commit b599308d691ccbc2b3cb3755593015adaa5c3a2f Author: Paul Stemmet Date: Sat Apr 20 12:23:50 2024 +0000 New upstream version 1.14.8 diff --git a/.copywrite.hcl b/.copywrite.hcl new file mode 100644 index 0000000..ab9fa58 --- /dev/null +++ b/.copywrite.hcl @@ -0,0 +1,16 @@ +schema_version = 1 + +project { + license = "MPL-2.0" + copyright_year = 2015 + + # (OPTIONAL) A list of globs that should not have copyright/license headers. + # Supports doublestar glob patterns for more flexibility in defining which + # files or folders should be ignored + header_ignore = [ + "builtin/credential/aws/pkcs7/**", + "ui/node_modules/**", + "enos/modules/k8s_deploy_vault/raft-config.hcl", + "plugins/database/postgresql/scram/**" + ] +} diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..d4431eb --- /dev/null +++ b/.gitattributes @@ -0,0 +1,26 @@ +vendor/* linguist-vendored +website/* linguist-documentation + +/packagespec.mk linguist-generated +*.ber filter=lfs diff=lfs merge=lfs -text +*.DS_Store filter=lfs diff=lfs merge=lfs -text +*.eot filter=lfs diff=lfs merge=lfs -text +*.gif filter=lfs diff=lfs merge=lfs -text +*.ico filter=lfs diff=lfs merge=lfs -text +*.jks filter=lfs diff=lfs merge=lfs -text +*.jpg filter=lfs diff=lfs merge=lfs -text +*.lzma filter=lfs diff=lfs merge=lfs -text +*.p12 filter=lfs diff=lfs merge=lfs -text +*.pdf filter=lfs diff=lfs merge=lfs -text +*.png filter=lfs diff=lfs merge=lfs -text +*.snap filter=lfs diff=lfs merge=lfs -text +*.ttf filter=lfs diff=lfs merge=lfs -text +*.woff filter=lfs diff=lfs merge=lfs -text +*.woff2 filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +bin/codechecker filter=lfs diff=lfs merge=lfs -text +Godeps/_workspace/src/github.com/gocql/gocql/testdata/pki/.keystore filter=lfs diff=lfs merge=lfs -text +Godeps/_workspace/src/github.com/gocql/gocql/testdata/pki/.truststore filter=lfs diff=lfs merge=lfs -text +plugins/database/cassandra/test-fixtures/with_tls/stores/keystore filter=lfs diff=lfs merge=lfs -text +plugins/database/cassandra/test-fixtures/with_tls/stores/truststore filter=lfs diff=lfs merge=lfs -text diff --git a/.github/CODE_OF_CONDUCT.md b/.github/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..0c8b092 --- /dev/null +++ b/.github/CODE_OF_CONDUCT.md @@ -0,0 +1,5 @@ +# Code of Conduct + +HashiCorp Community Guidelines apply to you when interacting with the community here on GitHub and contributing code. + +Please read the full text at https://www.hashicorp.com/community-guidelines diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md new file mode 100644 index 0000000..846293c --- /dev/null +++ b/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,48 @@ + + + + + +**Environment:** + + +* Vault Version: +* Operating System/Architecture: + +**Vault Config File:** + + +```hcl +# Paste your Vault config here. +# Be sure to scrub any sensitive values +``` + +**Startup Log Output:** + + +```text +# Paste your log output here +``` + +**Expected Behavior:** + + +**Actual Behavior:** + + +**Steps to Reproduce:** + + +**Important Factoids:** + + +**References:** + diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000..23f56cb --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,43 @@ +--- +name: Bug report +about: Let us know about a bug! +title: '' +labels: '' +assignees: '' + +--- + + + +**Describe the bug** +A clear and concise description of what the bug is. + +**To Reproduce** +Steps to reproduce the behavior: +1. Run `vault write ...` +2. Run `vault login....` +3. See error + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Environment:** +* Vault Server Version (retrieve with `vault status`): +* Vault CLI Version (retrieve with `vault version`): +* Server Operating System/Architecture: + +Vault server configuration file(s): + +```hcl +# Paste your Vault config here. +# Be sure to scrub any sensitive values +``` + +**Additional context** +Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000..23958d8 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,7 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +contact_links: + - name: Ask a question + url: https://discuss.hashicorp.com/c/vault + about: For increased visibility, please post questions on the discussion forum. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000..7775cce --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,23 @@ +--- +name: Feature request +about: Suggest something! +title: '' +labels: '' +assignees: '' + +--- + +**Is your feature request related to a problem? Please describe.** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + +**Explain any additional use-cases** +If there are any use-cases that would help us understand the use/need/value please share them as they can help us decide on acceptance and prioritization. + +**Additional context** +Add any other context or screenshots about the feature request here. diff --git a/.github/ISSUE_TEMPLATE/plugin-submission.md b/.github/ISSUE_TEMPLATE/plugin-submission.md new file mode 100644 index 0000000..8bed55a --- /dev/null +++ b/.github/ISSUE_TEMPLATE/plugin-submission.md @@ -0,0 +1,15 @@ +--- +name: Plugin Submission +about: Submit a community Vault plugin! +title: "[Plugin Portal] Plugin Submission - " +labels: ecosystem/plugin +assignees: '' + +--- + +Please provide details for the plugin to be listed. All fields are required for a submission to be included in the [Plugin Portal](https://www.vaultproject.io/docs/plugin-portal) page. + +**Plugin Information** +Name as it would appear listed: +Plugin type (secrets/auth/database): +Repository link: diff --git a/.github/actionlint.yaml b/.github/actionlint.yaml new file mode 100644 index 0000000..377acf0 --- /dev/null +++ b/.github/actionlint.yaml @@ -0,0 +1,19 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +self-hosted-runner: + # Labels of self-hosted runner in array of string + labels: + - small + - medium + - large + - ondemand + - disk_gb=64 + - os=linux + - type=m5.2xlarge + - type=c6a.xlarge + - type=c6a.4xlarge + - ubuntu-20.04 + - custom-linux-small-vault-latest + - custom-linux-medium-vault-latest + - custom-linux-xl-vault-latest diff --git a/.github/actions/set-up-go/action.yml b/.github/actions/set-up-go/action.yml new file mode 100644 index 0000000..e6289b4 --- /dev/null +++ b/.github/actions/set-up-go/action.yml @@ -0,0 +1,71 @@ +--- +name: Set up Go with a shared module cache +description: Set up Go with a shared module cache + +inputs: + github-token: + description: "An elevated Github token to access private modules if necessary" + type: string + no-restore: + description: "Whether or not to restore the Go module cache on a cache hit" + type: boolean + default: false + +outputs: + cache-key: + description: "The Go modules cache key" + value: ${{ steps.metadata.outputs.cache-key }} + cache-path: + description: "The GOMODCACHE path" + value: ${{ steps.metadata.outputs.cache-path }} + go-version: + description: "The version of Go in the .go-version file" + value: ${{ steps.go-version.outputs.go-version }} + +runs: + using: composite + steps: + - id: go-version + shell: bash + run: echo "go-version=$(cat ./.go-version)" >> "$GITHUB_OUTPUT" + - uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + with: + go-version: ${{ steps.go-version.outputs.go-version }} + cache: false # We use our own caching strategy + - id: metadata + shell: bash + run: | + echo "cache-path=$(go env GOMODCACHE)" >> "$GITHUB_OUTPUT" + echo "cache-key=go-modules-${{ hashFiles('**/go.sum') }}" >> "$GITHUB_OUTPUT" + - id: cache-modules + uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1 + with: + enableCrossOsArchive: true + lookup-only: ${{ inputs.no-restore }} + # We need to be very considerate of our caching strategy because Github only allows 10gb + # of caches per repository before it starts to evict older caches. This is usually fine + # if you only use the actions cache for cache, but we also use it for Go test time results. + # These results are used to balance our Go test groups, without which we could have + # painfully unbalanced Go test execution times. We have to ensure current caches for all + # active release branches and main do not exceed 10gb. Ideally we'd cache Go modules + # and Go build cache on a per version/platform/architecture/tag/module basis, but that + # would result in several hungred gb over all of our build workflows and release branches. + # Instead, we've chosen a middle ground approach where were share Go modules between build + # workflows but lose the Go build cache. + # We intentionally do not use partial restore keys. If we get dont get an exact cache hit + # we only want to download the latest modules, not append them to a prior cache. This + # keeps cache upload time, download time, and storage size to a minimum. + path: ${{ steps.metadata.outputs.cache-path }} + key: ${{ steps.metadata.outputs.cache-key }} + - if: steps.cache-modules.outputs.cache-hit != 'true' + name: Download go modules + shell: bash + run: | + git config --global url."https://${{ inputs.github-token }}@github.com".insteadOf https://github.com + for mod in $(find . -type f -name go.mod); do + pushd "$(dirname $mod)" + go list ./... + go list -test ./... + go mod download + popd + done diff --git a/.github/actions/set-up-gotestsum/action.yml b/.github/actions/set-up-gotestsum/action.yml new file mode 100644 index 0000000..bb48819 --- /dev/null +++ b/.github/actions/set-up-gotestsum/action.yml @@ -0,0 +1,52 @@ +--- +name: Set up gotestsum from Github releases +description: Set up gotestsum from Github releases + +inputs: + destination: + description: "Where to install the gotestsum binary (default: $HOME/bin/gotestsum)" + type: boolean + default: "$HOME/bin" + version: + description: "The version to install (default: latest)" + type: string + default: Latest + +outputs: + destination: + description: Where the installed gotestsum binary is + value: ${{ steps.install.outputs.destination }} + destination-dir: + description: The directory where the installed gotestsum binary is + value: ${{ steps.install.outputs.destination-dir }} + version: + description: The installed version of gotestsum + value: ${{ steps.install.outputs.version }} + +runs: + using: composite + steps: + - id: install + shell: bash + env: + GH_TOKEN: ${{ github.token }} + run: | + VERSION=$(gh release list -R gotestyourself/gotestsum --exclude-drafts --exclude-pre-releases | grep Latest | cut -f1) + echo "version=$VERSION" >> "$GITHUB_OUTPUT" + + mkdir -p "$HOME/bin" + DESTINATION="$(readlink -f "$HOME/bin")" + echo "destination=$DESTINATION" >> "GITHUB_OUTPUT" + DESTINATION_DIR="$(dirname "$DESTINATION")" + echo "$DESTINATION_DIR" >> "$GITHUB_PATH" + echo "destination-dir=$DESTINATION_DIR" >> "GITHUB_OUTPUT" + + OS="$(echo "$RUNNER_OS" | tr '[:upper:]' '[:lower:]')" + ARCH="$(echo "$RUNNER_ARCH" | tr '[:upper:]' '[:lower:]')" + if [ "$ARCH" = "x64" ]; then + export ARCH="amd64" + fi + + gh release download "$VERSION" -p "*${OS}_${ARCH}.tar.gz" -O gotestsum.tgz -R gotestyourself/gotestsum + tar -xvf gotestsum.tgz + mv gotestsum "${DESTINATION_DIR}/gotestsum" diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..81bae9a --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,7 @@ +version: 2 + +updates: + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "daily" diff --git a/.github/scripts/gh_comment.sh b/.github/scripts/gh_comment.sh new file mode 100644 index 0000000..b47df54 --- /dev/null +++ b/.github/scripts/gh_comment.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +set -e + + +function update_or_create_comment { + REPO=$1 + PR_NUMBER=$2 + SEARCH_KEY=$3 + BODY=$4 + + # We only want for the GH bot to place one comment to report build failures + # and if we rerun a job, that comment needs to be updated. + # Let's try to find if the GH bot has placed a similar comment + comment_id=$(gh api \ + -H "Accept: application/vnd.github+json" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + --paginate \ + /repos/hashicorp/"$REPO"/issues/"$PR_NUMBER"/comments | jq -r --arg SEARCH_KEY "$SEARCH_KEY" '.[] | select (.body | contains($SEARCH_KEY)) | .id') + + if [[ "$comment_id" != "" ]]; then + # update the comment with the new body + gh api \ + --method PATCH \ + -H "Accept: application/vnd.github+json" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + /repos/hashicorp/"$REPO"/issues/comments/"$comment_id" \ + -f body="$BODY" + else + # create a comment with the new body + gh api \ + --method POST \ + -H "Accept: application/vnd.github+json" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + /repos/hashicorp/"$REPO"/issues/"$PR_NUMBER"/comments \ + -f body="$BODY" + fi +} \ No newline at end of file diff --git a/.github/scripts/report_failed_builds.sh b/.github/scripts/report_failed_builds.sh new file mode 100755 index 0000000..b4cd348 --- /dev/null +++ b/.github/scripts/report_failed_builds.sh @@ -0,0 +1,44 @@ +#!/bin/bash + +set -e + +# this script expects the following env vars to be set +# error if these are not set +[ ${GITHUB_TOKEN:?} ] +[ ${RUN_ID:?} ] +[ ${REPO:?} ] +[ ${PR_NUMBER:?} ] +# list of build jobs +[ ${BUILD_OTHER:?} ] +[ ${BUILD_LINUX:?} ] +[ ${BUILD_DARWIN:?} ] +[ ${BUILD_DOCKER:?} ] +[ ${BUILD_UBI:?} ] +[ ${TEST:?} ] +[ ${TEST_DOCKER_K8S:?} ] + +# listing out all of the jobs with the status +jobs=( "build-other:$BUILD_OTHER" "build-linux:$BUILD_LINUX" "build-darwin:$BUILD_DARWIN" "build-docker:$BUILD_DOCKER" "build-ubi:$BUILD_UBI" "test:$TEST" "test-docker-k8s:$TEST_DOCKER_K8S" ) + +# there is a case where even if a job is failed, it reports as cancelled. So, we look for both. +failed_jobs=() +for job in "${jobs[@]}";do + if [[ "$job" == *"failure"* || "$job" == *"cancelled"* ]]; then + failed_jobs+=("$job") + fi +done + +# Create a comment to be posted on the PR +# This comment reports failed jobs and the url to the failed workflow +if [ ${#failed_jobs[@]} -eq 0 ]; then + new_body="Build Results: +All builds succeeded! :white_check_mark:" +else + new_body="Build Results: +Build failed for these jobs: ${failed_jobs[*]}. Please refer to this workflow to learn more: https://github.com/hashicorp/vault/actions/runs/$RUN_ID" +fi + + +source ./.github/scripts/gh_comment.sh + +update_or_create_comment "$REPO" "$PR_NUMBER" "Build Results:" "$new_body" \ No newline at end of file diff --git a/.github/scripts/report_failed_tests.sh b/.github/scripts/report_failed_tests.sh new file mode 100755 index 0000000..d69d43a --- /dev/null +++ b/.github/scripts/report_failed_tests.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +set -e +MAX_TESTS=10 +# this script expects the following env vars to be set +# error if these are not set +[ ${GITHUB_TOKEN:?} ] +[ ${RUN_ID:?} ] +[ ${REPO:?} ] +[ ${PR_NUMBER:?} ] +if [ -z "$TABLE_DATA" ]; then + BODY="CI Results: +All Go tests succeeded! :white_check_mark:" +else + # Remove any rows that don't have a test name + # Only keep the test type, test package, test name, and logs column + # Remove the scroll emoji + # Remove "github.com/hashicorp/vault" from the package name + TABLE_DATA=$(echo "$TABLE_DATA" | awk -F\| '{if ($4 != " - ") { print "|" $2 "|" $3 "|" $4 "|" $7 }}' | sed -r 's/ :scroll://' | sed -r 's/github.com\/hashicorp\/vault\///') + NUM_FAILURES=$(wc -l <<< "$TABLE_DATA") + + # Check if the number of failures is greater than the maximum tests to display + # If so, limit the table to MAX_TESTS number of results + if [ "$NUM_FAILURES" -gt "$MAX_TESTS" ]; then + TABLE_DATA=$(echo "$TABLE_DATA" | head -n "$MAX_TESTS") + NUM_OTHER=( $NUM_FAILURES - "$MAX_TESTS" ) + TABLE_DATA="$TABLE_DATA + +and $NUM_OTHER other tests" + fi + + # Add the header for the table + BODY="CI Results: +Failures: +| Test Type | Package | Test | Logs | +| --------- | ------- | ---- | ---- | +${TABLE_DATA}" +fi + +source ./.github/scripts/gh_comment.sh + +update_or_create_comment "$REPO" "$PR_NUMBER" "CI Results:" "$BODY" \ No newline at end of file diff --git a/.github/scripts/verify_changes.sh b/.github/scripts/verify_changes.sh new file mode 100755 index 0000000..ad5a030 --- /dev/null +++ b/.github/scripts/verify_changes.sh @@ -0,0 +1,57 @@ +#!/bin/bash +# This script validates if the git diff contains only docs/ui changes + +event_type=$1 # GH event type (pull_request) +ref_name=$2 # branch reference that triggered the workflow +base_ref=$3 # PR branch base ref + +contains() { + target=$1; shift + for i; do + if [[ "$i" == "$target" ]]; then + return 0 + fi + done + return 1 +} + +if [[ "$event_type" == "pull_request" ]]; then + git fetch --no-tags --prune origin $base_ref + head_commit="HEAD" + base_commit="origin/$base_ref" +else + git fetch --no-tags --prune origin $ref_name + head_commit=$(git log origin/$ref_name --oneline | head -1 | awk '{print $1}') + base_commit=$(git log origin/$ref_name --oneline | head -2 | awk 'NR==2 {print $1}') +fi + +# git diff with ... shows the differences between base_commit and head_commit starting at the last common commit +# excluding the changelog directory +changed_dir=$(git diff $base_commit...$head_commit --name-only | awk -F"/" '{ print $1}' | uniq | sed '/changelog/d') +change_count=$(git diff $base_commit...$head_commit --name-only | awk -F"/" '{ print $1}' | uniq | sed '/changelog/d' | wc -l) + +# There are 4 main conditions to check: +# +# 1. more than two changes found, set the flags to false +# 2. doc only change +# 3. ui only change +# 4. two changes found, if either doc or ui does not exist in the changes, set both flags to false + +if [[ $change_count -gt 2 ]]; then + echo "is_docs_change=false" >> "$GITHUB_OUTPUT" + echo "is_ui_change=false" >> "$GITHUB_OUTPUT" +elif [[ $change_count -eq 1 && "$changed_dir" == "website" ]]; then + echo "is_docs_change=true" >> "$GITHUB_OUTPUT" + echo "is_ui_change=false" >> "$GITHUB_OUTPUT" +elif [[ $change_count -eq 1 && "$changed_dir" == "ui" ]]; then + echo "is_ui_change=true" >> "$GITHUB_OUTPUT" + echo "is_docs_change=false" >> "$GITHUB_OUTPUT" +else + if ! contains "website" ${changed_dir[@]} || ! contains "ui" ${changed_dir[@]}; then + echo "is_docs_change=false" >> "$GITHUB_OUTPUT" + echo "is_ui_change=false" >> "$GITHUB_OUTPUT" + else + echo "is_docs_change=true" >> "$GITHUB_OUTPUT" + echo "is_ui_change=true" >> "$GITHUB_OUTPUT" + fi +fi diff --git a/.github/workflows/actionlint.yml b/.github/workflows/actionlint.yml new file mode 100644 index 0000000..8b466e7 --- /dev/null +++ b/.github/workflows/actionlint.yml @@ -0,0 +1,17 @@ +name: Lint GitHub Actions Workflows +on: + pull_request: + paths: + - '.github/**' + types: [opened, synchronize, reopened, ready_for_review] + +jobs: + actionlint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - name: "Check workflow files" + uses: docker://docker.mirror.hashicorp.services/rhysd/actionlint@sha256:93834930f56ca380be3e9a3377670d7aa5921be251b9c774891a39b3629b83b8 + with: + # milestoned and demilestoned work (https://github.com/github/docs/issues/23909) but they aren't listed in the github documentation, so actionlint complains about them + args: "-ignore=\"invalid activity type \\\"demilestoned\\\" for \\\"pull_request\\\" Webhook event\" -ignore=\"invalid activity type \\\"milestoned\\\" for \\\"pull_request\\\" Webhook event\"" diff --git a/.github/workflows/add-hashicorp-contributed-label.yml b/.github/workflows/add-hashicorp-contributed-label.yml new file mode 100644 index 0000000..379b8cc --- /dev/null +++ b/.github/workflows/add-hashicorp-contributed-label.yml @@ -0,0 +1,26 @@ +name: Add HashiCorp contributed label + +# The purpose of this job is to label all HashiCorp contributed PRs, so that +# we can more easily identify community contributed PRs (anything that doesn't +# have this label). +# While it might seem like this is the 'reverse' of what we should do, GitHub +# (rightly) does not allow branches from forks to have write permissions, so +# making PRs from forks self-label themselves as community-contributed is not +# possible. + +on: + # On every pull request, on every branch + pull_request: + types: [opened, synchronize, reopened] + +jobs: + add-hashicorp-contributed-label: + # Only run if this is NOT coming from a fork of hashicorp/vault (if this is not true, it's community contributed) + if: ${{ github.repository == 'hashicorp/vault' && (github.event.pull_request.head.repo.full_name == github.event.pull_request.base.repo.full_name) }} + runs-on: ubuntu-latest + steps: + - name: "Add label to PR" + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PR: ${{ github.event.pull_request.html_url }} + run: gh pr edit "$PR" --add-label 'hashicorp-contributed-pr' diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml new file mode 100644 index 0000000..f78464a --- /dev/null +++ b/.github/workflows/backport.yml @@ -0,0 +1,23 @@ +--- +name: Backport Assistant Runner (for OSS & ENT) + +on: + pull_request_target: + types: + - closed + - labeled + +jobs: + backport-targeted-release-branch: + if: github.event.pull_request.merged + runs-on: ubuntu-latest + container: hashicorpdev/backport-assistant:0.3.3 + steps: + - name: Backport changes to targeted release branch + run: | + backport-assistant backport -merge-method=squash -gh-automerge + env: + BACKPORT_LABEL_REGEXP: "backport/(?P\\d+\\.\\d+\\.[+\\w]+)" + BACKPORT_TARGET_TEMPLATE: "release/{{.target}}" + BACKPORT_MERGE_COMMIT: true + GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }} diff --git a/.github/workflows/build-vault-ce.yml b/.github/workflows/build-vault-ce.yml new file mode 100644 index 0000000..5d0f2d2 --- /dev/null +++ b/.github/workflows/build-vault-ce.yml @@ -0,0 +1,110 @@ +--- +name: build_vault + +# This workflow is intended to be called by the build workflow for each Vault +# binary that needs to be built and packaged. The ci make targets that are +# utilized automatically determine build metadata and handle building and +# packing vault. + +on: + workflow_call: + inputs: + cgo-enabled: + type: string + default: 0 + create-packages: + type: boolean + default: true + goos: + required: true + type: string + goarch: + required: true + type: string + go-tags: + type: string + package-name: + type: string + default: vault + vault-version: + type: string + required: true + web-ui-cache-key: + type: string + required: true + +jobs: + build: + runs-on: custom-linux-xl-vault-latest + name: Vault ${{ inputs.goos }} ${{ inputs.goarch }} v${{ inputs.vault-version }} + steps: + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: ./.github/actions/set-up-go + with: + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + - name: Restore UI from cache + uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1 + with: + # Restore the UI asset from the UI build workflow. Never use a partial restore key. + enableCrossOsArchive: true + fail-on-cache-miss: true + path: http/web_ui + key: ${{ inputs.web-ui-cache-key }} + - name: Build Vault + env: + GO_TAGS: ${{ inputs.go-tags }} + CGO_ENABLED: ${{ inputs.cgo-enabled }} + GOARCH: ${{ inputs.goarch }} + GOOS: ${{ inputs.goos }} + VERSION: ${{ inputs.vault-version }} + run: + make ci-build + - name: Determine artifact basename + env: + GOARCH: ${{ inputs.goarch }} + GOOS: ${{ inputs.goos }} + VERSION: ${{ inputs.vault-version }} + run: echo "ARTIFACT_BASENAME=$(make ci-get-artifact-basename)" >> "$GITHUB_ENV" + - name: Bundle Vault + env: + BUNDLE_PATH: out/${{ env.ARTIFACT_BASENAME }}.zip + run: make ci-bundle + - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + with: + name: ${{ env.ARTIFACT_BASENAME }}.zip + path: out/${{ env.ARTIFACT_BASENAME }}.zip + if-no-files-found: error + - if: ${{ inputs.create-packages }} + uses: hashicorp/actions-packaging-linux@v1 + with: + name: ${{ github.event.repository.name }} + description: Vault is a tool for secrets management, encryption as a service, and privileged access management. + arch: ${{ inputs.goarch }} + version: ${{ inputs.vault-version }} + maintainer: HashiCorp + homepage: https://github.com/hashicorp/vault + license: MPL-2.0 + binary: dist/${{ inputs.package-name }} + deb_depends: openssl + rpm_depends: openssl + config_dir: .release/linux/package/ + preinstall: .release/linux/preinst + postinstall: .release/linux/postinst + postremove: .release/linux/postrm + - if: ${{ inputs.create-packages }} + name: Determine package file names + run: | + echo "RPM_PACKAGE=$(basename out/*.rpm)" >> "$GITHUB_ENV" + echo "DEB_PACKAGE=$(basename out/*.deb)" >> "$GITHUB_ENV" + - if: ${{ inputs.create-packages }} + uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + with: + name: ${{ env.RPM_PACKAGE }} + path: out/${{ env.RPM_PACKAGE }} + if-no-files-found: error + - if: ${{ inputs.create-packages }} + uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + with: + name: ${{ env.DEB_PACKAGE }} + path: out/${{ env.DEB_PACKAGE }} + if-no-files-found: error diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 0000000..3e69ab5 --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,456 @@ +name: build + +on: + workflow_dispatch: + pull_request: + # The default types for pull_request are [ opened, synchronize, reopened ]. + # This is insufficient for our needs, since we're skipping stuff on PRs in + # draft mode. By adding the ready_for_review type, when a draft pr is marked + # ready, we run everything, including the stuff we'd have skipped up until now. + types: [opened, synchronize, reopened, ready_for_review] + push: + branches: + - main + - release/** + +concurrency: + group: ${{ github.head_ref || github.run_id }}-build + cancel-in-progress: true + +jobs: + # verify-changes determines if the changes are only for docs (website) + verify-changes: + uses: ./.github/workflows/verify_changes.yml + + product-metadata: + # do not run build and test steps for docs changes + # Following https://docs.github.com/en/repositories/configuring-branches-and-merges-in-your-repository/managing-protected-branches/troubleshooting-required-status-checks#handling-skipped-but-required-checks + # we conditionally skip the build and tests for docs(website) changes + if: | + github.event.pull_request.draft == false && + needs.verify-changes.outputs.is_docs_change == 'false' + runs-on: ubuntu-latest + needs: verify-changes + outputs: + build-date: ${{ steps.get-metadata.outputs.build-date }} + filepath: ${{ steps.generate-metadata-file.outputs.filepath }} + package-name: ${{ steps.get-metadata.outputs.package-name }} + vault-revision: ${{ steps.get-metadata.outputs.vault-revision }} + vault-version: ${{ steps.set-product-version.outputs.product-version }} + vault-version-package: ${{ steps.get-metadata.outputs.vault-version-package }} + steps: + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - name: Ensure Go modules are cached + uses: ./.github/actions/set-up-go + id: set-up-go + with: + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + no-restore: true # don't download them on a cache hit + - name: Set Product version + id: set-product-version + uses: hashicorp/actions-set-product-version@v1 + - name: Get metadata + id: get-metadata + env: + VAULT_VERSION: ${{ steps.set-product-version.outputs.product-version }} + run: | + # shellcheck disable=SC2129 + echo "build-date=$(make ci-get-date)" >> "$GITHUB_OUTPUT" + echo "package-name=vault" >> "$GITHUB_OUTPUT" + echo "vault-revision=$(make ci-get-revision)" >> "$GITHUB_OUTPUT" + echo "vault-version-package=$(make ci-get-version-package)" >> "$GITHUB_OUTPUT" + - uses: hashicorp/actions-generate-metadata@v1 + id: generate-metadata-file + with: + version: ${{ steps.set-product-version.outputs.product-version }} + product: ${{ steps.get-metadata.outputs.package-name }} + - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + with: + name: metadata.json + path: ${{ steps.generate-metadata-file.outputs.filepath }} + if-no-files-found: error + + build-ui: + name: UI + runs-on: custom-linux-xl-vault-latest + outputs: + cache-key: ui-${{ steps.ui-hash.outputs.ui-hash }} + steps: + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - name: Get UI hash + id: ui-hash + run: echo "ui-hash=$(git ls-tree HEAD ui --object-only)" >> "$GITHUB_OUTPUT" + - name: Set up UI asset cache + id: cache-ui-assets + uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1 + with: + enableCrossOsArchive: true + lookup-only: true + path: http/web_ui + # Only restore the UI asset cache if we haven't modified anything in the ui directory. + # Never do a partial restore of the web_ui if we don't get a cache hit. + key: ui-${{ steps.ui-hash.outputs.ui-hash }} + - if: steps.cache-ui-assets.outputs.cache-hit != 'true' + name: Set up node and yarn + uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 + with: + node-version-file: ui/package.json + cache: yarn + cache-dependency-path: ui/yarn.lock + - if: steps.cache-ui-assets.outputs.cache-hit != 'true' + name: Build UI + run: make ci-build-ui + + build-other: + name: Other + needs: + - product-metadata + - build-ui + strategy: + matrix: + goos: [freebsd, windows, netbsd, openbsd, solaris] + goarch: [386, amd64, arm] + exclude: + - goos: solaris + goarch: 386 + - goos: solaris + goarch: arm + - goos: windows + goarch: arm + fail-fast: true + uses: ./.github/workflows/build-vault-ce.yml + with: + create-packages: false + goarch: ${{ matrix.goarch }} + goos: ${{ matrix.goos }} + go-tags: ui + package-name: ${{ needs.product-metadata.outputs.package-name }} + web-ui-cache-key: ${{ needs.build-ui.outputs.cache-key }} + vault-version: ${{ needs.product-metadata.outputs.vault-version }} + secrets: inherit + + build-linux: + name: Linux + needs: + - product-metadata + - build-ui + strategy: + matrix: + goos: [linux] + goarch: [arm, arm64, 386, amd64] + fail-fast: true + uses: ./.github/workflows/build-vault-ce.yml + with: + goarch: ${{ matrix.goarch }} + goos: ${{ matrix.goos }} + go-tags: ui + package-name: ${{ needs.product-metadata.outputs.package-name }} + web-ui-cache-key: ${{ needs.build-ui.outputs.cache-key }} + vault-version: ${{ needs.product-metadata.outputs.vault-version }} + secrets: inherit + + build-darwin: + name: Darwin + needs: + - product-metadata + - build-ui + strategy: + matrix: + goos: [darwin] + goarch: [amd64, arm64] + fail-fast: true + uses: ./.github/workflows/build-vault-ce.yml + with: + create-packages: false + goarch: ${{ matrix.goarch }} + goos: ${{ matrix.goos }} + go-tags: ui + package-name: ${{ needs.product-metadata.outputs.package-name }} + web-ui-cache-key: ${{ needs.build-ui.outputs.cache-key }} + vault-version: ${{ needs.product-metadata.outputs.vault-version }} + secrets: inherit + + build-docker: + name: Docker image + needs: + - product-metadata + - build-linux + runs-on: ubuntu-latest + strategy: + matrix: + arch: [arm, arm64, 386, amd64] + env: + repo: ${{ github.event.repository.name }} + version: ${{ needs.product-metadata.outputs.vault-version }} + steps: + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: hashicorp/actions-docker-build@v1 + with: + version: ${{ env.version }} + target: default + arch: ${{ matrix.arch }} + zip_artifact_name: vault_${{ env.version }}_linux_${{ matrix.arch }}.zip + tags: | + docker.io/hashicorp/${{ env.repo }}:${{ env.version }} + public.ecr.aws/hashicorp/${{ env.repo }}:${{ env.version }} + + build-ubi: + name: UBI image + needs: + - product-metadata + - build-linux + runs-on: ubuntu-latest + strategy: + matrix: + arch: [amd64] + env: + repo: ${{ github.event.repository.name }} + version: ${{ needs.product-metadata.outputs.vault-version }} + steps: + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: hashicorp/actions-docker-build@v1 + with: + version: ${{ env.version }} + target: ubi + arch: ${{ matrix.arch }} + zip_artifact_name: vault_${{ env.version }}_linux_${{ matrix.arch }}.zip + # The redhat_tag differs on CE and ENT editions. Be mindful when resolving merge conflicts. + redhat_tag: quay.io/redhat-isv-containers/5f89bb5e0b94cf64cfeb500a:${{ env.version }}-ubi + + test: + name: Test ${{ matrix.build-artifact-name }} + # Only run the Enos workflow against branches that are created from the + # hashicorp/vault repository. This has the effect of limiting execution of + # Enos scenarios to branches that originate from authors that have write + # access to hashicorp/vault repository. This is required as Github Actions + # will not populate the required secrets for branches created by outside + # contributors in order to protect the secrets integrity. + # This condition can be removed in future if enos workflow is updated to + # workflow_run event + if: "! github.event.pull_request.head.repo.fork" + needs: + - product-metadata + - build-linux + uses: ./.github/workflows/test-run-enos-scenario-matrix.yml + strategy: + fail-fast: false + matrix: + include: + - sample-name: build_ce_linux_amd64_deb + build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1_amd64.deb + - sample-name: build_ce_linux_arm64_deb + build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1_arm64.deb + - sample-name: build_ce_linux_amd64_rpm + build-artifact-name: vault-${{ needs.product-metadata.outputs.vault-version-package }}-1.x86_64.rpm + - sample-name: build_ce_linux_arm64_rpm + build-artifact-name: vault-${{ needs.product-metadata.outputs.vault-version-package }}-1.aarch64.rpm + - sample-name: build_ce_linux_amd64_zip + build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version }}_linux_amd64.zip + - sample-name: build_ce_linux_arm64_zip + build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version }}_linux_arm64.zip + with: + build-artifact-name: ${{ matrix.build-artifact-name }} + sample-max: 1 + sample-name: ${{ matrix.sample-name }} + ssh-key-name: ${{ github.event.repository.name }}-ci-ssh-key + vault-revision: ${{ needs.product-metadata.outputs.vault-revision }} + vault-version: ${{ needs.product-metadata.outputs.vault-version }} + secrets: inherit + + test-docker-k8s: + name: Test Docker K8s + # Only run the Enos workflow against branches that are created from the + # hashicorp/vault repository. This has the effect of limiting execution of + # Enos scenarios to branches that originate from authors that have write + # access to hashicorp/vault repository. This is required as Github Actions + # will not populate the required secrets for branches created by outside + # contributors in order to protect the secrets integrity. + # GHA secrets are only ready on workflow_run for public repo + # This condition can be removed in future if enos workflow is updated to + # workflow_run event + if: "! github.event.pull_request.head.repo.fork" + needs: + - product-metadata + - build-docker + uses: ./.github/workflows/enos-run-k8s.yml + with: + artifact-build-date: ${{ needs.product-metadata.outputs.build-date }} + artifact-name: ${{ github.event.repository.name }}_default_linux_amd64_${{ needs.product-metadata.outputs.vault-version }}_${{ needs.product-metadata.outputs.vault-revision }}.docker.tar + artifact-revision: ${{ needs.product-metadata.outputs.vault-revision }} + artifact-version: ${{ needs.product-metadata.outputs.vault-version }} + secrets: inherit + + report-build-failures: + name: Report Build Failures + needs: + - build-other + - build-linux + - build-darwin + - build-docker + - build-ubi + - test + - test-docker-k8s + if: (success() || failure()) && github.head_ref != '' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - name: Build Status + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PR_NUMBER: ${{ github.event.pull_request.number }} + RUN_ID: ${{ github.run_id }} + REPO: ${{ github.event.repository.name }} + BUILD_OTHER: ${{ needs.build-other.result }} + BUILD_LINUX: ${{ needs.build-linux.result }} + BUILD_DARWIN: ${{ needs.build-darwin.result }} + BUILD_DOCKER: ${{ needs.build-docker.result }} + BUILD_UBI: ${{ needs.build-ubi.result }} + TEST: ${{ needs.test.result }} + TEST_DOCKER_K8S: ${{ needs.test-docker-k8s.result }} + run: ./.github/scripts/report_failed_builds.sh + + completed-successfully: + # We force a failure if any of the dependent jobs fail, + # this is a workaround for the issue reported https://github.com/actions/runner/issues/2566 + if: always() + runs-on: ubuntu-latest + needs: + - build-other + - build-linux + - build-darwin + - build-docker + - build-ubi + - test + - test-docker-k8s + steps: + - run: | + tr -d '\n' <<< '${{ toJSON(needs.*.result) }}' | grep -q -v -E '(failure|cancelled)' + + notify-completed-successfully-failures-ce: + if: ${{ always() && github.repository == 'hashicorp/vault' && needs.completed-successfully.result == 'failure' && (github.ref_name == 'main' || startsWith(github.ref_name, 'release/')) }} + runs-on: ubuntu-latest + permissions: + id-token: write + contents: read + strategy: + fail-fast: false + needs: + - completed-successfully + - build-other + - build-linux + - build-darwin + - build-docker + - build-ubi + - test + - test-docker-k8s + steps: + - name: send-notification + uses: slackapi/slack-github-action@e28cf165c92ffef168d23c5c9000cffc8a25e117 # v1.24.0 + # We intentionally aren't using the following here since it's from an internal repo + # uses: hashicorp/cloud-gha-slack-notifier@730a033037b8e603adf99ebd3085f0fdfe75e2f4 #v1 + env: + SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} + with: + channel-id: "C05AABYEA9Y" # sent to #feed-vault-ci-official, use "C05Q4D5V89W"/test-vault-ci-slack-integration for testing + payload: | + { + "text": "CE build failures on ${{ github.ref_name }}", + "blocks": [ + { + "type": "header", + "text": { + "type": "plain_text", + "text": ":rotating_light: CE build failures on ${{ github.ref_name }} :rotating_light:", + "emoji": true + } + }, + { + "type": "divider" + }, + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "${{ (needs.build-other.result != 'failure' && needs.build-linux.result != 'failure' && needs.build-darwin.result != 'failure' && needs.build-docker.result != 'failure' && needs.build-ubi.result != 'failure') && ':white_check_mark:' || ':x:' }} Build results\n${{ (needs.test.result != 'failure' && needs.test-docker-k8s.result != 'failure') && ':white_check_mark:' || ':x:' }} Enos tests" + }, + "accessory": { + "type": "button", + "text": { + "type": "plain_text", + "text": "View Failing Workflow", + "emoji": true + }, + "url": "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" + } + } + ] + } + + notify-completed-successfully-failures-ent: + if: ${{ always() && github.repository == 'hashicorp/vault-enterprise' && needs.completed-successfully.result == 'failure' && (github.ref_name == 'main' || startsWith(github.ref_name, 'release/')) }} + runs-on: ['self-hosted', 'linux', 'small'] + permissions: + id-token: write + contents: read + strategy: + fail-fast: false + needs: + - completed-successfully + - build-other + - build-linux + - build-darwin + - build-docker + - build-ubi + - test + - test-docker-k8s + steps: + - id: vault-auth + name: Vault Authenticate + run: vault-auth + - id: secrets + name: Fetch Vault Secrets + uses: hashicorp/vault-action@130d1f5f4fe645bb6c83e4225c04d64cfb62de6e + with: + url: ${{ steps.vault-auth.outputs.addr }} + caCertificate: ${{ steps.vault-auth.outputs.ca_certificate }} + token: ${{ steps.vault-auth.outputs.token }} + secrets: | + kv/data/github/${{ github.repository }}/github_actions_notifications_bot token | SLACK_BOT_TOKEN; + - name: send-notification + uses: hashicorp/cloud-gha-slack-notifier@730a033037b8e603adf99ebd3085f0fdfe75e2f4 #v1 + with: + channel-id: "C05AABYEA9Y" # sent to #feed-vault-ci-official, use "C05Q4D5V89W"/test-vault-ci-slack-integration for testing + slack-bot-token: ${{ steps.secrets.outputs.SLACK_BOT_TOKEN }} + payload: | + { + "text": "Enterprise build failures on ${{ github.ref_name }}", + "blocks": [ + { + "type": "header", + "text": { + "type": "plain_text", + "text": ":rotating_light: Enterprise build failures on ${{ github.ref_name }} :rotating_light:", + "emoji": true + } + }, + { + "type": "divider" + }, + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": "${{ (needs.build-other.result != 'failure' && needs.build-linux.result != 'failure' && needs.build-darwin.result != 'failure' && needs.build-docker.result != 'failure' && needs.build-ubi.result != 'failure') && ':white_check_mark:' || ':x:' }} Build results\n${{ (needs.test.result != 'failure' && needs.test-docker-k8s.result != 'failure') && ':white_check_mark:' || ':x:' }} Enos tests" + }, + "accessory": { + "type": "button", + "text": { + "type": "plain_text", + "text": "View Failing Workflow", + "emoji": true + }, + "url": "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" + } + } + ] + } \ No newline at end of file diff --git a/.github/workflows/changelog-checker.yml b/.github/workflows/changelog-checker.yml new file mode 100644 index 0000000..4db25c1 --- /dev/null +++ b/.github/workflows/changelog-checker.yml @@ -0,0 +1,90 @@ +# This workflow checks that there is either a 'pr/no-changelog' label applied to a PR +# or there is a changelog/.txt file associated with a PR for a changelog entry + +name: Check Changelog + +on: + pull_request: + types: [opened, synchronize, labeled, unlabeled] + # Runs on PRs to main + branches: + - main + +jobs: + # checks that a changelog entry is present for a PR + changelog-check: + # If there a `pr/no-changelog` label we ignore this check + if: "!contains(github.event.pull_request.labels.*.name, 'pr/no-changelog')" + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + with: + ref: ${{ github.event.pull_request.head.sha }} + fetch-depth: 0 # by default the checkout action doesn't checkout all branches + - name: Check for changelog entry in diff + run: | + # Check if there is a diff in the changelog directory. + # + # Try to identify the expected changelog file name based on PR + # number. This won't work for Go version updates though. + if [ ${{ github.event.repository.name }} == "vault-enterprise" ]; then + expected_changelog_file=changelog/_${{ github.event.pull_request.number }}.txt + else + expected_changelog_file=changelog/${{ github.event.pull_request.number }}.txt + fi + + echo "looking for changelog file ${expected_changelog_file}" + changelog_files=$(git --no-pager diff --name-only HEAD "$(git merge-base HEAD "origin/${{ github.event.pull_request.base.ref }}")" -- ${expected_changelog_file}) + + if [ -z "$changelog_files" ]; then + echo "Not found." + echo "looking for changelog file matching changelog/_go-ver-*.txt" + # If we do not find a file matching the PR # in changelog/, we fail the check + # unless we did a Go toolchain version update, in which case we check the + # alternative name. + toolchain_files=$(git --no-pager diff --name-only HEAD "$(git merge-base HEAD "origin/${{ github.event.pull_request.base.ref }}")" -- 'changelog/_go-ver-*.txt') + if [ -z "$toolchain_files" ]; then + echo "Not found." + echo "" + echo "Did not find a changelog entry named ${expected_changelog_file}" + echo "If your changelog file is correct, skip this check with the 'pr/no-changelog' label" + echo "Reference - https://github.com/hashicorp/vault/pull/10363 and https://github.com/hashicorp/vault/pull/11894" + exit 1 + fi + + # Else, we found some toolchain files. Let's make sure the contents are correct. + if ! grep -q 'release-note:change' "$toolchain_files" || ! grep -q '^core: Bump Go version to' "$toolchain_files"; then + echo "Invalid format for changelog. Expected format:" + echo '```release-note:change' + echo "core: Bump Go version to x.y.z." + echo '```' + exit 1 + else + echo "Found Go toolchain changelog entry in PR!" + fi + elif grep -q ':enhancement$' "$changelog_files"; then + # "Enhancement is not a valid type of changelog entry, but it's a common mistake. + echo "Found invalid type (enhancement) in changelog - did you mean improvement?" + exit 1 + elif grep -q ':changes$' "$changelog_files"; then + echo "Found invalid type (changes) in changelog - did you mean change?" + exit 1 + elif grep -q ':bugs$' "$changelog_files"; then + echo "Found invalid type (bugs) in changelog - did you mean bug?" + exit 1 + elif grep -q ':fix$' "$changelog_files"; then + echo "Found invalid type (fix) in changelog - did you mean bug?" + exit 1 + elif ! grep -q '```release-note:' "$changelog_files"; then + # People often make changelog files like ```changelog:, which is incorrect. + echo "Changelog file did not contain 'release-note' heading - check formatting." + exit 1 + elif grep -q '^core: Bump Go version' "$changelog_files"; then + echo "Don't use PR numbered changelog entries for Go version bumps!" + echo "Please use the format changelog/_go-ver-.txt instead." + echo "Example: _go-ver-1110.txt for Vault 1.11.0" + exit 1 + else + echo "Found changelog entry in PR!" + fi diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..105ed94 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,435 @@ +name: CI +on: + pull_request: + # The default types for pull_request are [ opened, synchronize, reopened ]. + # This is insufficient for our needs, since we're skipping stuff on PRs in + # draft mode. By adding the ready_for_review type, when a draft pr is marked + # ready, we run everything, including the stuff we'd have skipped up until now. + types: [opened, synchronize, reopened, ready_for_review] + push: + branches: + - main + - release/** + workflow_dispatch: + +concurrency: + group: ${{ github.head_ref || github.run_id }}-ci + cancel-in-progress: true + +jobs: + setup: + name: Setup + runs-on: ubuntu-latest + outputs: + compute-small: ${{ steps.setup-outputs.outputs.compute-small }} + compute-medium: ${{ steps.setup-outputs.outputs.compute-medium }} + compute-large: ${{ steps.setup-outputs.outputs.compute-large }} + compute-largem: ${{ steps.setup-outputs.outputs.compute-largem }} + compute-xlarge: ${{ steps.setup-outputs.outputs.compute-xlarge }} + enterprise: ${{ steps.setup-outputs.outputs.enterprise }} + go-tags: ${{ steps.setup-outputs.outputs.go-tags }} + checkout-ref: ${{ steps.checkout-ref-output.outputs.checkout-ref }} + steps: + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - id: setup-outputs + name: Setup outputs + run: | + github_repository="${{ github.repository }}" + + if [ "${github_repository##*/}" == "vault-enterprise" ] ; then + # shellcheck disable=SC2129 + echo 'compute-small=["self-hosted","ondemand","linux","type=c6a.large"]' >> "$GITHUB_OUTPUT" # 2x vCPUs, 4 GiB RAM, + echo 'compute-medium=["self-hosted","ondemand","linux","type=c6a.xlarge"]' >> "$GITHUB_OUTPUT" # 4x vCPUs, 8 GiB RAM, + echo 'compute-large=["self-hosted","ondemand","linux","type=c6a.2xlarge","disk_gb=64"]' >> "$GITHUB_OUTPUT" # 8x vCPUs, 16 GiB RAM, + echo 'compute-largem=["self-hosted","ondemand","linux","type=m6a.2xlarge"]' >> "$GITHUB_OUTPUT" # 8x vCPUs, 32 GiB RAM, + echo 'compute-xlarge=["self-hosted","ondemand","linux","type=c6a.4xlarge"]' >> "$GITHUB_OUTPUT" # 16x vCPUs, 32 GiB RAM, + echo 'enterprise=1' >> "$GITHUB_OUTPUT" + echo 'go-tags=ent,enterprise' >> "$GITHUB_OUTPUT" + else + # shellcheck disable=SC2129 + echo 'compute-small="ubuntu-latest"' >> "$GITHUB_OUTPUT" # 2x vCPUs, 7 GiB RAM, 14 GB SSD + echo 'compute-medium="custom-linux-small-vault-latest"' >> "$GITHUB_OUTPUT" # 8x vCPUs, 32 GiB RAM, 300 GB SSD + echo 'compute-large="custom-linux-medium-vault-latest"' >> "$GITHUB_OUTPUT" # 16x vCPUs, 64 GiB RAM, 600 GB SSD + echo 'compute-largem="custom-linux-medium-vault-latest"' >> "$GITHUB_OUTPUT" # 16x vCPUs, 64 GiB RAM, 600 GB SSD + echo 'compute-xlarge="custom-linux-xl-vault-latest"' >> "$GITHUB_OUTPUT" # 32x vCPUs, 128 GiB RAM, 1200 GB SSD + echo 'enterprise=' >> "$GITHUB_OUTPUT" + echo 'go-tags=' >> "$GITHUB_OUTPUT" + fi + - name: Ensure Go modules are cached + uses: ./.github/actions/set-up-go + with: + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + no-restore: true # don't download them on a cache hit + # control checking out head instead of default ref by a GH label + # if checkout-head label is added to a PR, checkout HEAD otherwise checkout ref + - if: ${{ !contains(github.event.pull_request.labels.*.name, 'checkout-head') }} + run: echo "CHECKOUT_REF=${{ github.ref }}" >> "$GITHUB_ENV" + - if: ${{ contains(github.event.pull_request.labels.*.name, 'checkout-head') }} + run: echo "CHECKOUT_REF=${{ github.event.pull_request.head.sha }}" >> "$GITHUB_ENV" + - id: checkout-ref-output + run: echo "checkout-ref=${{ env.CHECKOUT_REF }}" >> "$GITHUB_OUTPUT" + + diff-oss-ci: + name: Diff OSS + needs: + - setup + if: ${{ needs.setup.outputs.enterprise != '' && github.base_ref != '' }} + runs-on: ${{ fromJSON(needs.setup.outputs.compute-small) }} + steps: + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + with: + fetch-depth: 0 + - id: determine-branch + run: | + branch="${{ github.base_ref }}" + + if [[ $branch = release/* ]] ; then + branch=${branch%%+ent} + + # Add OSS remote + git config --global user.email "github-team-secret-vault-core@hashicorp.com" + git config --global user.name "hc-github-team-secret-vault-core" + git remote add oss https://github.com/hashicorp/vault.git + git fetch oss "$branch" + + branch="oss/$branch" + else + branch="origin/$branch" + fi + + echo "BRANCH=$branch" >> "$GITHUB_OUTPUT" + - id: diff + run: | + ./.github/scripts/oss-diff.sh ${{ steps.determine-branch.outputs.BRANCH }} HEAD + + verify-changes: + name: Verify doc-ui only PRs + uses: ./.github/workflows/verify_changes.yml + + test-go: + name: Run Go tests + needs: + - setup + - verify-changes + # Don't run this job for docs/ui only PRs + if: | + needs.verify-changes.outputs.is_docs_change == 'false' && + needs.verify-changes.outputs.is_ui_change == 'false' + uses: ./.github/workflows/test-go.yml + with: + # The regular Go tests use an extra runner to execute the + # binary-dependent tests. We isolate them there so that the + # other tests aren't slowed down waiting for a binary build. + binary-tests: true + total-runners: 16 + go-arch: amd64 + go-tags: '${{ needs.setup.outputs.go-tags }},deadlock' + runs-on: ${{ needs.setup.outputs.compute-large }} + enterprise: ${{ needs.setup.outputs.enterprise }} + checkout-ref: ${{ needs.setup.outputs.checkout-ref }} + secrets: inherit + + test-go-testonly: + name: Run Go tests tagged with testonly + needs: + - setup + - verify-changes + # Don't run this job for docs/ui only PRs + if: | + needs.verify-changes.outputs.is_docs_change == 'false' && + needs.verify-changes.outputs.is_ui_change == 'false' + uses: ./.github/workflows/test-go.yml + with: + testonly: true + total-runners: 2 # test runners cannot be less than 2 + go-arch: amd64 + go-tags: '${{ needs.setup.outputs.go-tags }},deadlock,testonly' + runs-on: ${{ needs.setup.outputs.compute-large }} + enterprise: ${{ needs.setup.outputs.enterprise }} + secrets: inherit + + test-go-race: + name: Run Go tests with data race detection + needs: + - setup + - verify-changes + # Don't run this job for docs/ui only PRs + if: | + github.event.pull_request.draft == false && + needs.verify-changes.outputs.is_docs_change == 'false' && + needs.verify-changes.outputs.is_ui_change == 'false' + uses: ./.github/workflows/test-go.yml + with: + total-runners: 16 + env-vars: | + { + "VAULT_CI_GO_TEST_RACE": 1 + } + extra-flags: '-race' + go-arch: amd64 + go-tags: ${{ needs.setup.outputs.go-tags }} + runs-on: ${{ needs.setup.outputs.compute-large }} + enterprise: ${{ needs.setup.outputs.enterprise }} + name: "race" + checkout-ref: ${{ needs.setup.outputs.checkout-ref }} + secrets: inherit + + test-go-fips: + name: Run Go tests with FIPS configuration + # Only run fips on the enterprise repo, and only if it's main or a release branch + # (i.e. not a PR), or is a PR with the label "fips" + if: | + needs.setup.outputs.enterprise == 1 && + needs.verify-changes.outputs.is_docs_change == 'false' && + needs.verify-changes.outputs.is_ui_change == 'false' && + (contains(github.event.pull_request.labels.*.name, 'fips') || github.ref_name == 'main' || startsWith(github.ref_name, 'release/')) + needs: + - setup + - verify-changes + uses: ./.github/workflows/test-go.yml + with: + total-runners: 16 + env-vars: | + { + "GOEXPERIMENT": "boringcrypto" + } + go-arch: amd64 + go-tags: '${{ needs.setup.outputs.go-tags }},deadlock,cgo,fips,fips_140_2' + runs-on: ${{ needs.setup.outputs.compute-large }} + enterprise: ${{ needs.setup.outputs.enterprise }} + name: "fips" + checkout-ref: ${{ needs.setup.outputs.checkout-ref }} + secrets: inherit + + test-ui: + name: Test UI + # The test-ui job is only run on: + # - pushes to main and branches starting with "release/" + # - PRs where the branch starts with "ui/", "backport/ui/", "merge", or when base branch starts with "release/" + # - PRs with the "ui" label on GitHub + if: | + github.ref_name == 'main' || + startsWith(github.ref_name, 'release/') || + startsWith(github.head_ref, 'ui/') || + startsWith(github.head_ref, 'backport/ui/') || + startsWith(github.head_ref, 'merge') || + contains(github.event.pull_request.labels.*.name, 'ui') + needs: + - setup + permissions: + id-token: write + contents: read + runs-on: ${{ fromJSON(needs.setup.outputs.compute-largem) }} + steps: + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: ./.github/actions/set-up-go + with: + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + # Setup node.js without caching to allow running npm install -g yarn (next step) + - uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 + with: + node-version-file: './ui/package.json' + - id: install-yarn + run: | + npm install -g yarn + # Setup node.js with caching using the yarn.lock file + - uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 + with: + node-version-file: './ui/package.json' + cache: yarn + cache-dependency-path: ui/yarn.lock + - id: install-browser + uses: browser-actions/setup-chrome@c485fa3bab6be59dce18dbc18ef6ab7cbc8ff5f1 # v1.2.0 + - id: ui-dependencies + name: ui-dependencies + working-directory: ./ui + run: | + yarn install --frozen-lockfile + npm rebuild node-sass + - id: vault-auth + name: Authenticate to Vault + if: github.repository == 'hashicorp/vault-enterprise' + run: vault-auth + - id: secrets + name: Fetch secrets + if: github.repository == 'hashicorp/vault-enterprise' + uses: hashicorp/vault-action@130d1f5f4fe645bb6c83e4225c04d64cfb62de6e + with: + url: ${{ steps.vault-auth.outputs.addr }} + caCertificate: ${{ steps.vault-auth.outputs.ca_certificate }} + token: ${{ steps.vault-auth.outputs.token }} + secrets: | + kv/data/github/hashicorp/vault-enterprise/github-token token | PRIVATE_REPO_GITHUB_TOKEN; + kv/data/github/hashicorp/vault-enterprise/license license_1 | VAULT_LICENSE; + - id: setup-git + name: Setup Git + if: github.repository == 'hashicorp/vault-enterprise' + run: | + git config --global url."https://${{ steps.secrets.outputs.PRIVATE_REPO_GITHUB_TOKEN }}@github.com".insteadOf https://github.com + - id: build-go-dev + name: build-go-dev + run: | + rm -rf ./pkg + mkdir ./pkg + + make ci-bootstrap dev + - id: test-ui + name: test-ui + env: + VAULT_LICENSE: ${{ steps.secrets.outputs.VAULT_LICENSE }} + run: | + export PATH="${PWD}/bin:${PATH}" + + if [ "${{ github.repository }}" == 'hashicorp/vault' ] ; then + export VAULT_LICENSE="${{ secrets.VAULT_LICENSE }}" + fi + + # Run Ember tests + cd ui + mkdir -p test-results/qunit + yarn test:oss + - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + with: + name: test-results-ui + path: ui/test-results + if: success() || failure() + - uses: test-summary/action@62bc5c68de2a6a0d02039763b8c754569df99e3f # TSCCR: no entry for repository "test-summary/action" + with: + paths: "ui/test-results/qunit/results.xml" + show: "fail" + if: always() + + tests-completed: + needs: + - setup + - test-go + - test-ui + if: always() + runs-on: ${{ fromJSON(needs.setup.outputs.compute-small) }} + steps: + - run: | + tr -d '\n' <<< '${{ toJSON(needs.*.result) }}' | grep -q -v -E '(failure|cancelled)' + + notify-tests-completed-failures-oss: + if: | + always() && + github.repository == 'hashicorp/vault' && + (needs.test-go.result == 'failure' || + needs.test-go-fips.result == 'failure' || + needs.test-go-race.result == 'failure') && + (github.ref_name == 'main' || startsWith(github.ref_name, 'release/')) + runs-on: ubuntu-latest + permissions: + id-token: write + contents: read + strategy: + fail-fast: false + needs: + - test-go + - test-go-fips + - test-go-race + steps: + - name: send-notification + uses: slackapi/slack-github-action@e28cf165c92ffef168d23c5c9000cffc8a25e117 # v1.24.0 + # We intentionally aren't using the following here since it's from an internal repo + # uses: hashicorp/cloud-gha-slack-notifier@730a033037b8e603adf99ebd3085f0fdfe75e2f4 #v1 + env: + SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} + with: + channel-id: "C05AABYEA9Y" # sent to #feed-vault-ci-official + payload: | + {"text":"OSS test failures on ${{ github.ref_name }}","blocks":[{"type":"header","text":{"type":"plain_text","text":":rotating_light: OSS test failures :rotating_light:","emoji":true}},{"type":"divider"},{"type":"section","text":{"type":"mrkdwn","text":"test(s) failed on ${{ github.ref_name }}"},"accessory":{"type":"button","text":{"type":"plain_text","text":"View Failing Workflow","emoji":true},"url":"${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"}}]} + + notify-tests-completed-failures-ent: + if: | + always() && + github.repository == 'hashicorp/vault-enterprise' && + (needs.test-go.result == 'failure' || + needs.test-go-fips.result == 'failure' || + needs.test-go-race.result == 'failure') && + (github.ref_name == 'main' || startsWith(github.ref_name, 'release/')) + runs-on: ['self-hosted', 'linux', 'small'] + permissions: + id-token: write + contents: read + strategy: + fail-fast: false + needs: + - test-go + - test-go-fips + - test-go-race + steps: + - id: vault-auth + name: Vault Authenticate + run: vault-auth + - id: secrets + name: Fetch Vault Secrets + uses: hashicorp/vault-action@130d1f5f4fe645bb6c83e4225c04d64cfb62de6e + with: + url: ${{ steps.vault-auth.outputs.addr }} + caCertificate: ${{ steps.vault-auth.outputs.ca_certificate }} + token: ${{ steps.vault-auth.outputs.token }} + secrets: | + kv/data/github/${{ github.repository }}/github_actions_notifications_bot token | SLACK_BOT_TOKEN; + - name: send-notification + uses: hashicorp/cloud-gha-slack-notifier@730a033037b8e603adf99ebd3085f0fdfe75e2f4 #v1 + with: + channel-id: "C05AABYEA9Y" # sent to #feed-vault-ci-official + slack-bot-token: ${{ steps.secrets.outputs.SLACK_BOT_TOKEN }} + payload: | + {"text":"Enterprise test failures on ${{ github.ref_name }}","blocks":[{"type":"header","text":{"type":"plain_text","text":":rotating_light: Enterprise test failures :rotating_light:","emoji":true}},{"type":"divider"},{"type":"section","text":{"type":"mrkdwn","text":"test(s) failed on ${{ github.ref_name }}"},"accessory":{"type":"button","text":{"type":"plain_text","text":"View Failing Workflow","emoji":true},"url":"${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"}}]} + + test-summary: + name: Go test failures + runs-on: ubuntu-latest + if: | + always() && + (needs.test-go.result == 'success' || + needs.test-go.result == 'failure' || + needs.test-go-fips.result == 'success' || + needs.test-go-fips.result == 'failure' || + needs.test-go-race.result == 'success' || + needs.test-go-race.result == 'failure') + needs: + - test-go + - test-go-fips + - test-go-race + steps: + - name: Download failure summary + uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + with: + name: failure-summary + - name: Prepare failure summary + run: | + # Sort all of the summary table rows and push them to a temp file. + temp_file_name=temp-$(date +%s) + cat failure-summary-*.md | sort >> "$temp_file_name" + + # If there are test failures, present them in a format of a GitHub Markdown table. + if [ -s "$temp_file_name" ]; then + # shellcheck disable=SC2129 + # Here we create the headings for the summary table + echo "| Test Type | Package | Test | Elapsed | Runner Index | Logs |" >> "$GITHUB_STEP_SUMMARY" + echo "| --------- | ------- | ---- | ------- | ------------ | ---- |" >> "$GITHUB_STEP_SUMMARY" + # shellcheck disable=SC2002 + cat "$temp_file_name" >> "$GITHUB_STEP_SUMMARY" + else + echo "### All Go tests passed! :white_check_mark:" >> "$GITHUB_STEP_SUMMARY" + fi + + # the random EOF is needed for a multiline environment variable + EOF=$(dd if=/dev/urandom bs=15 count=1 status=none | base64) + # shellcheck disable=SC2129 + echo "TABLE_TEST_RESULTS<<$EOF" >> "$GITHUB_ENV" + cat "$temp_file_name" >> "$GITHUB_ENV" + echo "$EOF" >> "$GITHUB_ENV" + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - name: Create comment + if: github.head_ref != '' + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PR_NUMBER: ${{ github.event.pull_request.number }} + RUN_ID: ${{ github.run_id }} + REPO: ${{ github.event.repository.name }} + TABLE_DATA: ${{ env.TABLE_TEST_RESULTS }} + run: ./.github/scripts/report_failed_tests.sh diff --git a/.github/workflows/code-checker.yml b/.github/workflows/code-checker.yml new file mode 100644 index 0000000..f9f5ab2 --- /dev/null +++ b/.github/workflows/code-checker.yml @@ -0,0 +1,74 @@ +name: Run linters + +on: + pull_request: + types: [opened, synchronize, reopened, ready_for_review] + push: + branches: + - main + - release/** + +concurrency: + group: ${{ github.head_ref || github.run_id }}-lint + cancel-in-progress: true + +jobs: + deprecations: + name: Deprecated functions + runs-on: ubuntu-latest + if: github.base_ref == 'main' + steps: + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + with: + fetch-depth: 0 + - uses: ./.github/actions/set-up-go + with: + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + - run: make ci-deprecations + name: Check deprecations + + codechecker: + name: Code checks + runs-on: ubuntu-latest + if: github.base_ref == 'main' + steps: + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + with: + fetch-depth: 0 + - uses: ./.github/actions/set-up-go + with: + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + # Note: if there is a function we want to ignore the nilnil check for, + # You can add 'ignore-nil-nil-function-check' somewhere in the + # godoc for the function. + - run: make ci-vet-codechecker + name: Check custom linters + + format: + name: Format + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: ./.github/actions/set-up-go + with: + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + - name: Go format + run: | + make ci-bootstrap + echo "Using gofumpt version $(go run mvdan.cc/gofumpt -version)" + make fmt + if ! git diff --exit-code; then + echo "Code has formatting errors. Run 'make fmt' to fix" + exit 1 + fi + + semgrep: + name: Semgrep + runs-on: ubuntu-latest + container: + image: returntocorp/semgrep@sha256:ffc6f3567654f9431456d49fd059dfe548f007c494a7eb6cd5a1a3e50d813fb3 + steps: + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - name: Run Semgrep Rules + id: semgrep + run: semgrep ci --include '*.go' --config 'tools/semgrep/ci' diff --git a/.github/workflows/enos-lint.yml b/.github/workflows/enos-lint.yml new file mode 100644 index 0000000..616b3e0 --- /dev/null +++ b/.github/workflows/enos-lint.yml @@ -0,0 +1,53 @@ +--- +name: lint-enos + +on: + pull_request: + paths: + - enos/** + +jobs: + metadata: + # Only run this workflow on pull requests from hashicorp/vault branches + # as we need secrets to install enos. + if: "! github.event.pull_request.head.repo.fork" + name: metadata + runs-on: ubuntu-latest + outputs: + runs-on: ${{ steps.metadata.outputs.runs-on }} + version: ${{ steps.metadata.outputs.version }} + steps: + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - id: set-product-version + uses: hashicorp/actions-set-product-version@v1 + - id: metadata + run: | + echo "version=${{ steps.set-product-version.outputs.product-version }}" >> "$GITHUB_OUTPUT" + github_repository="${{ github.repository }}" + if [ "${github_repository##*/}" == "vault-enterprise" ] ; then + echo 'runs-on=["self-hosted","ondemand","linux","type=c6a.4xlarge"]' >> "$GITHUB_OUTPUT" + else + echo 'runs-on="custom-linux-xl-vault-latest"' >> "$GITHUB_OUTPUT" + fi + + lint: + needs: metadata + runs-on: ${{ fromJSON(needs.metadata.outputs.runs-on) }} + env: + GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + ENOS_VAR_tfc_api_token: ${{ secrets.TF_API_TOKEN }} + steps: + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: hashicorp/setup-terraform@v2 + with: + terraform_wrapper: false + - uses: hashicorp/action-setup-enos@v1 + with: + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + - name: Ensure shellcheck is available for linting + run: which shellcheck || (sudo apt update && sudo apt install -y shellcheck) + - name: lint + working-directory: ./enos + env: + ENOS_VAR_vault_product_version: ${{ needs.metadata.outputs.version }} + run: make lint diff --git a/.github/workflows/enos-release-testing-oss.yml b/.github/workflows/enos-release-testing-oss.yml new file mode 100644 index 0000000..27558ee --- /dev/null +++ b/.github/workflows/enos-release-testing-oss.yml @@ -0,0 +1,72 @@ +name: enos-release-testing-oss + +on: + repository_dispatch: + types: + - enos-release-testing-oss + - enos-release-testing-oss::* + +jobs: + product-metadata: + if: ${{ startsWith(github.event.client_payload.payload.branch, 'release/') }} + runs-on: ubuntu-latest + outputs: + vault-revision: ${{ github.event.client_payload.payload.sha }} + vault-version: ${{ github.event.client_payload.payload.version }} + vault-version-package: ${{ steps.get-metadata.outputs.vault-version-package }} + steps: + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + with: + # Check out the repository at the same Git SHA that was used to create + # the artifacts to get the correct metadata. + ref: ${{ github.event.client_payload.payload.sha }} + - id: get-metadata + env: + VAULT_VERSION: ${{ github.event.client_payload.payload.version }} + run: | + echo "vault-version-package=$(make ci-get-version-package)" >> "$GITHUB_OUTPUT" + - name: Release Artifact Info + run: | + # shellcheck disable=SC2129 + echo "__Product:__ ${{ github.event.client_payload.payload.product }}" >> "$GITHUB_STEP_SUMMARY" + echo "__Version:__ ${{ github.event.client_payload.payload.version }}" >> "$GITHUB_STEP_SUMMARY" + echo "__Commit:__ ${{ github.event.client_payload.payload.sha }}" >> "$GITHUB_STEP_SUMMARY" + echo "" >> "$GITHUB_STEP_SUMMARY" + echo "[Build Workflow](https://github.com/${{github.event.client_payload.payload.org}}/${{github.event.client_payload.payload.repo}}/actions/runs/${{github.event.client_payload.payload.buildworkflowid}})" >> "$GITHUB_STEP_SUMMARY" + + test: + name: Test ${{ matrix.build-artifact-name }} + if: ${{ startsWith(github.event.client_payload.payload.branch, 'release/') }} + needs: product-metadata + uses: ./.github/workflows/test-run-enos-scenario-matrix.yml + strategy: + fail-fast: false + matrix: + include: + - sample-name: release_ce_linux_amd64_deb + build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1_amd64.deb + - sample-name: release_ce_linux_arm64_deb + build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1_arm64.deb + - sample-name: release_ce_linux_amd64_rpm + build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1.x86_64.rpm + - sample-name: release_ce_linux_arm64_rpm + build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1.aarch64.rpm + - sample-name: release_ce_linux_amd64_zip + build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version }}_linux_amd64.zip + - sample-name: release_ce_linux_arm64_zip + build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version }}_linux_arm64.zip + with: + build-artifact-name: ${{ matrix.build-artifact-name }} + sample-max: 2 + sample-name: ${{ matrix.sample-name }} + vault-revision: ${{ needs.product-metadata.outputs.vault-revision }} + vault-version: ${{ needs.product-metadata.outputs.vault-version }} + secrets: inherit + + save-metadata: + runs-on: linux + if: always() + needs: test + steps: + - name: Persist metadata + uses: hashicorp/actions-persist-metadata@v1 diff --git a/.github/workflows/enos-run-k8s.yml b/.github/workflows/enos-run-k8s.yml new file mode 100644 index 0000000..adddba4 --- /dev/null +++ b/.github/workflows/enos-run-k8s.yml @@ -0,0 +1,113 @@ +--- +name: enos-k8s + +on: + workflow_call: + inputs: + artifact-build-date: + required: false + type: string + artifact-name: + required: true + type: string + artifact-revision: + required: true + type: string + artifact-version: + required: true + type: string + +env: + ARTIFACT_BUILD_DATE: ${{ inputs.artifact-build-date }} + ARTIFACT_NAME: ${{ inputs.artifact-name }} + ARTIFACT_REVISION: ${{ inputs.artifact-revision }} + ARTIFACT_VERSION: ${{ inputs.artifact-version }} + +jobs: + enos: + name: Integration + runs-on: ubuntu-latest + env: + GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + steps: + - name: Checkout + uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - name: Set up Terraform + uses: hashicorp/setup-terraform@v2 + with: + # the Terraform wrapper will break Terraform execution in Enos because + # it changes the output to text when we expect it to be JSON. + terraform_wrapper: false + - name: Set up Enos + uses: hashicorp/action-setup-enos@v1 + with: + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + - name: Download Docker Image + id: download + uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + with: + name: ${{ inputs.artifact-name }} + path: ./enos/support/downloads + - name: Prepare for scenario execution + env: + IS_ENT: ${{ startsWith(env.ARTIFACT_NAME, 'vault-enterprise' ) }} + run: | + mkdir -p ./enos/support/terraform-plugin-cache + if [ "$IS_ENT" == true ]; then + echo "${{ secrets.VAULT_LICENSE }}" > ./enos/support/vault.hclic || true + echo "edition=ent" >> "$GITHUB_ENV" + echo "edition set to 'ent'" + echo "image_repo=hashicorp/vault-enterprise" >> "$GITHUB_ENV" + echo "image repo set to 'hashicorp/vault-enterprise'" + else + echo "edition=ce" >> "$GITHUB_ENV" + echo "edition set to 'ce'" + echo "image_repo=hashicorp/vault" >> "$GITHUB_ENV" + echo "image repo set to 'hashicorp/vault'" + fi + - name: Run Enos scenario + id: run + # Continue once and retry to handle occasional blips when creating + # infrastructure. + continue-on-error: true + env: + ENOS_VAR_tfc_api_token: ${{ secrets.TF_API_TOKEN }} + ENOS_VAR_terraform_plugin_cache_dir: ../support/terraform-plugin-cache + ENOS_VAR_vault_build_date: ${{ env.ARTIFACT_BUILD_DATE }} + ENOS_VAR_vault_product_version: ${{ env.ARTIFACT_VERSION }} + ENOS_VAR_vault_product_revision: ${{ env.ARTIFACT_REVISION }} + ENOS_VAR_vault_docker_image_archive: ${{steps.download.outputs.download-path}}/${{ env.ARTIFACT_NAME }} + ENOS_VAR_vault_image_repository: ${{ env.image_repo }} + run: | + enos scenario run --timeout 10m0s --chdir ./enos/k8s edition:${{ env.edition }} + - name: Retry Enos scenario + id: run_retry + if: steps.run.outcome == 'failure' + env: + ENOS_VAR_tfc_api_token: ${{ secrets.TF_API_TOKEN }} + ENOS_VAR_terraform_plugin_cache_dir: ../support/terraform-plugin-cache + ENOS_VAR_vault_build_date: ${{ env.ARTIFACT_BUILD_DATE }} + ENOS_VAR_vault_product_version: ${{ env.ARTIFACT_VERSION }} + ENOS_VAR_vault_product_revision: ${{ env.ARTIFACT_REVISION }} + ENOS_VAR_vault_docker_image_archive: ${{steps.download.outputs.download-path}}/${{ env.ARTIFACT_NAME }} + ENOS_VAR_vault_image_repository: ${{ env.image_repo }} + run: | + enos scenario run --timeout 10m0s --chdir ./enos/k8s edition:${{ env.edition }} + - name: Destroy Enos scenario + if: ${{ always() }} + env: + ENOS_VAR_tfc_api_token: ${{ secrets.TF_API_TOKEN }} + ENOS_VAR_terraform_plugin_cache_dir: ./support/terraform-plugin-cache + ENOS_VAR_vault_build_date: ${{ env.ARTIFACT_BUILD_DATE }} + ENOS_VAR_vault_product_version: ${{ env.ARTIFACT_VERSION }} + ENOS_VAR_vault_product_revision: ${{ env.ARTIFACT_REVISION }} + ENOS_VAR_vault_docker_image_archive: ${{steps.download.outputs.download-path}} + ENOS_VAR_vault_image_repository: ${{ env.image_repo }} + run: | + enos scenario destroy --timeout 10m0s --chdir ./enos/k8s edition:${{ env.edition }} + - name: Cleanup Enos runtime directories + if: ${{ always() }} + run: | + rm -rf /tmp/enos* + rm -rf ./enos/support + rm -rf ./enos/k8s/.enos diff --git a/.github/workflows/milestone-checker.yml b/.github/workflows/milestone-checker.yml new file mode 100644 index 0000000..0cfaab4 --- /dev/null +++ b/.github/workflows/milestone-checker.yml @@ -0,0 +1,23 @@ +# This workflow checks that there is either a 'pr/no-milestone' label applied to a PR +# or there is a milestone associated with a PR + +name: Check Milestone + +on: + pull_request: + # milestoned and demilestoned work (https://github.com/github/docs/issues/23909) but they aren't listed in the github documentation + types: [opened, synchronize, labeled, unlabeled, milestoned, demilestoned] + # Runs on PRs to main and release branches + branches: + - main + - release/** + +jobs: + # checks that a milestone entry is present for a PR + milestone-check: + # If there is a `pr/no-milestone` label we ignore this check + if: "!contains(github.event.pull_request.labels.*.name, 'pr/no-milestone')" + runs-on: ubuntu-latest + steps: + - name: Check milestone + run: ${{ github.event.pull_request.milestone != null }} diff --git a/.github/workflows/oss.yml b/.github/workflows/oss.yml new file mode 100644 index 0000000..3eaa1f9 --- /dev/null +++ b/.github/workflows/oss.yml @@ -0,0 +1,128 @@ +# Open Source Community Workflows + +name: Project triage +on: + pull_request: + types: [opened, reopened] + # Runs on PRs to main + branches: + - main + + issues: + types: [opened, reopened] + +jobs: + add-to-projects: + # exclude internal PRs + if: github.event.pull_request.head.repo.owner.login != 'hashicorp' && ((github.event.action == 'reopened') || (github.event.action == 'opened')) + name: Add issue or PR to projects + runs-on: ubuntu-latest + steps: + - if: github.event.pull_request != null + uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - if: github.event.pull_request != null + uses: dorny/paths-filter@4512585405083f25c027a35db413c2b3b9006d50 # v2.11.1 + id: changes + with: + # derived from CODEOWNERS + filters: | + cryptosec: + - 'builtin/logical/pki/**' + - 'builtin/logical/ssh/**' + - 'builtin/logical/totp/**' + - 'builtin/logical/transit/**' + ecosystem: + - 'builtin/credential/aws/**' + - 'builtin/credential/github/**' + - 'builtin/credential/ldap/**' + - 'builtin/credential/okta/**' + - 'builtin/logical/aws/**' + - 'builtin/logical/cassandra/**' + - 'builtin/logical/consul/**' + - 'builtin/logical/database/**' + - 'builtin/logical/mongodb/**' + - 'builtin/logical/mssql/**' + - 'builtin/logical/mysql/**' + - 'builtin/logical/nomad/**' + - 'builtin/logical/postgresql/**' + - 'builtin/logical/rabbitmq/**' + - 'command/agent/**' + - 'plugins/**' + - 'vault/plugin_catalog.go' + - 'ui/app/components/auth-jwt.js' + - 'ui/app/routes/vault/cluster/oidc-*.js' + devex: + - 'api/**' + - 'command/**' + ui: + - 'ui/**' + + - name: "Default to core board" + run: echo "PROJECT=170" >> "$GITHUB_ENV" + - if: github.event.pull_request != null && steps.changes.outputs.cryptosec == 'true' + run: echo "PROJECT=172" >> "$GITHUB_ENV" + - if: github.event.pull_request != null && steps.changes.outputs.ecosystem == 'true' + run: echo "PROJECT=169" >> "$GITHUB_ENV" + - if: github.event.pull_request != null && steps.changes.outputs.devex == 'true' + run: echo "PROJECT=176" >> "$GITHUB_ENV" + - if: github.event.pull_request != null && steps.changes.outputs.ui == 'true' + run: echo "PROJECT=171" >> "$GITHUB_ENV" + + - uses: actions/add-to-project@a9f041ddd462ed185893ea1024cec954f50dbe42 # v0.3.0 # TSCCR: no entry for repository "actions/add-to-project" + with: + project-url: https://github.com/orgs/hashicorp/projects/${{ env.PROJECT }} + github-token: ${{ secrets.TRIAGE_GITHUB_TOKEN }} + + # example of something more complicated: deleting an issue or PR automatically (though this is done in the project workflows already) + # we have to use the GraphQL API for anything involving projects. + # + # get-project: + # name: Get project data + # runs-on: ubuntu-latest + # if: github.event.action == 'closed' || github.event.action == 'deleted' + # outputs: + # project_id: ${{ steps.get-project.outputs.project_id }} + # steps: + # - id: get-project + # name: Get project data + # env: + # GITHUB_TOKEN: ${{ secrets.TRIAGE_GITHUB_TOKEN }} + # ORGANIZATION: hashicorp + # PROJECT_NUMBER: 169 + # run: | + # gh api graphql -f query=' + # query($org: String!, $number: Int!) { + # organization(login: $org){ + # projectV2(number: $number) { + # id + # } + # } + # }' -f org=$ORGANIZATION -F number=$PROJECT_NUMBER > project_data.json + # echo "::set-output name=project_id::$(jq '.data.organization.projectV2.id' project_data.json)" + + # delete-from-project: + # name: Remove issue or PR from project + # needs: [get-project] + # if: github.event.action == 'closed' || github.event.action == 'deleted' + # runs-on: ubuntu-latest + # steps: + # - name: Remove issue or PR + # env: + # GITHUB_TOKEN: ${{ secrets.TRIAGE_GITHUB_TOKEN }} + # run: | + # PROJECT_ID=${{ needs.get-project.outputs.project_id }} + # item_id=${{ github.event.issue.node_id }} + # if [ -z "$item_id" ]; then + # item_id=${{ github.event.pull_request.node_id }} + # fi + # gh api graphql -f query=' + # mutation($project_id: ID!, $item_id: ID!) { + # deleteProjectV2Item( + # input: { + # projectId: $project_id + # itemId: $item_id + # } + # ) { + # deletedItemId + # } + # }' -f project_id=$PROJECT_ID -f item_id=$item_id || true diff --git a/.github/workflows/remove-labels.yml b/.github/workflows/remove-labels.yml new file mode 100644 index 0000000..014b675 --- /dev/null +++ b/.github/workflows/remove-labels.yml @@ -0,0 +1,19 @@ +name: Autoremove Labels + +on: + issues: + types: [closed] + pull_request_target: + types: [closed] + +jobs: + + RemoveWaitingLabelFromClosedIssueOrPR: + if: github.event.action == 'closed' + runs-on: ubuntu-latest + steps: + - name: Remove triaging labels from closed issues and PRs + uses: actions-ecosystem/action-remove-labels@2ce5d41b4b6aa8503e285553f75ed56e0a40bae0 # v1.3.0 + with: + labels: | + waiting-for-response \ No newline at end of file diff --git a/.github/workflows/security-scan.yml b/.github/workflows/security-scan.yml new file mode 100644 index 0000000..3618601 --- /dev/null +++ b/.github/workflows/security-scan.yml @@ -0,0 +1,85 @@ +name: Security Scan + +on: + push: + branches: [main] + pull_request: + branches: + - 'main' + - '!oss-merge-main*' + +jobs: + scan: + runs-on: ['linux', 'large'] + if: ${{ github.actor != 'dependabot[bot]' || github.actor != 'hc-github-team-secure-vault-core' }} + steps: + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + + - name: Set up Go + uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 + with: + cache: false # save cache space for vault builds: https://github.com/hashicorp/vault/pull/21764 + go-version-file: .go-version + + - name: Set up Python + uses: actions/setup-python@bd6b4b6205c4dbad673328db7b31b7fab9e241c0 # v4.6.1 + with: + python-version: 3.x + + - name: Clone Security Scanner repo + uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + with: + repository: hashicorp/security-scanner + token: ${{ secrets.HASHIBOT_PRODSEC_GITHUB_TOKEN }} + path: security-scanner + ref: 52d94588851f38a416f11c1e727131b3c8b0dd4d + + - name: Install dependencies + shell: bash + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + mkdir "$HOME/.bin" + cd "$GITHUB_WORKSPACE/security-scanner/pkg/sdk/examples/scan-plugin-semgrep" + go build -o scan-plugin-semgrep . + mv scan-plugin-semgrep "$HOME/.bin" + + cd "$GITHUB_WORKSPACE/security-scanner/pkg/sdk/examples/scan-plugin-codeql" + go build -o scan-plugin-codeql . + mv scan-plugin-codeql "$HOME/.bin" + + # Semgrep + python3 -m pip install semgrep + + # CodeQL + LATEST=$(gh release list --repo https://github.com/github/codeql-action | cut -f 3 | sort --version-sort | tail -n1) + gh release download --repo https://github.com/github/codeql-action --pattern codeql-bundle-linux64.tar.gz "$LATEST" + tar xf codeql-bundle-linux64.tar.gz -C "$HOME/.bin" + + # Add to PATH + echo "$HOME/.bin" >> "$GITHUB_PATH" + echo "$HOME/.bin/codeql" >> "$GITHUB_PATH" + + - name: Scan + id: scan + uses: ./security-scanner + # env: + # Note: this _should_ work, but causes some issues with Semgrep. + # Instead, rely on filtering in the SARIF Output step. + #SEMGREP_BASELINE_REF: ${{ github.base_ref }} + with: + repository: "$PWD" + cache-build: true + cache-go-modules: false + + - name: SARIF Output + shell: bash + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + cat results.sarif + + - name: Upload SARIF file + uses: github/codeql-action/upload-sarif@46a6823b81f2d7c67ddf123851eea88365bc8a67 # codeql-bundle-v2.13.5 + with: + sarif_file: results.sarif diff --git a/.github/workflows/stable-website.yaml b/.github/workflows/stable-website.yaml new file mode 100644 index 0000000..2fa1833 --- /dev/null +++ b/.github/workflows/stable-website.yaml @@ -0,0 +1,21 @@ +on: + pull_request: + types: + - closed + +jobs: + stable_website_cherry_pick: + if: github.event.pull_request.merged && contains(github.event.pull_request.labels.*.name, 'docs-cherrypick') + runs-on: ubuntu-latest + name: Cherry pick to stable-website branch + steps: + - name: Checkout + uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + with: + ref: stable-website + - run: | + git fetch --no-tags --prune origin main + git config --local user.email "action@github.com" + git config --local user.name "GitHub Action" + git cherry-pick ${{ github.sha }} + git push origin stable-website diff --git a/.github/workflows/test-acc-dockeronly-nightly.yml b/.github/workflows/test-acc-dockeronly-nightly.yml new file mode 100644 index 0000000..4a78bb6 --- /dev/null +++ b/.github/workflows/test-acc-dockeronly-nightly.yml @@ -0,0 +1,31 @@ +name: test-go-acceptance-nightly + +on: + # Change to nightly cadence once API-credential-requiring tests are added to the jobs + workflow_dispatch: + +# Currently the jobs here are only for acceptance tests that have no dependencies except for Docker +jobs: + plugins-database: + uses: ./.github/workflows/test-run-acc-tests-for-path.yml + strategy: + matrix: + name: [mongodb, mysql, postgresql] + with: + name: plugins-database-${{ matrix.name }} + path: plugins/database/${{ matrix.name }} + + external: + uses: ./.github/workflows/test-run-acc-tests-for-path.yml + strategy: + matrix: + name: [api, identity, token] + with: + name: external-${{ matrix.name }} + path: vault/external_tests/${{ matrix.name }} + + # Suggestions and tips for adding more acceptance test jobs: + # - the job name is up to you, but it should be derived from the path that the tests are found in + # - for instance, "plugins-database" is a job for acceptance tests in the plugins/database path + # - the path will be used with go test wildcards, but don't include the preceding "./" or following "/..." + # - the name parameter is used to construct the log artifact's name, make it something that is related to the path diff --git a/.github/workflows/test-ci-bootstrap.yml b/.github/workflows/test-ci-bootstrap.yml new file mode 100644 index 0000000..4a81261 --- /dev/null +++ b/.github/workflows/test-ci-bootstrap.yml @@ -0,0 +1,51 @@ +name: test-ci-bootstrap + +on: + workflow_dispatch: + pull_request: + branches: + - main + paths: + - enos/ci/** + - .github/workflows/test-ci-bootstrap.yml + push: + branches: + - main + paths: + - enos/ci/** + - .github/workflows/test-ci-bootstrap.yml + +jobs: + bootstrap-ci: + runs-on: ubuntu-latest + env: + TF_WORKSPACE: "${{ github.event.repository.name }}-ci-enos-bootstrap" + TF_VAR_repository: ${{ github.event.repository.name }} + TF_VAR_aws_ssh_public_key: ${{ secrets.SSH_KEY_PUBLIC_CI }} + TF_TOKEN_app_terraform_io: ${{ secrets.TF_API_TOKEN }} + steps: + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - name: Set up Terraform + uses: hashicorp/setup-terraform@v2 + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@5fd3084fc36e372ff1fff382a39b10d03659f355 # v2.2.0 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_CI }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_CI }} + aws-region: us-east-1 + role-to-assume: ${{ secrets.AWS_ROLE_ARN_CI }} + role-skip-session-tagging: true + role-duration-seconds: 3600 + - name: Init Terraform + id: tf_init + run: | + terraform -chdir=enos/ci/bootstrap init + - name: Plan Terraform + id: tf_plan + run: | + terraform -chdir=enos/ci/bootstrap plan + - name: Apply Terraform + if: ${{ github.ref == 'refs/heads/main' }} + id: tf_apply + run: | + terraform -chdir=enos/ci/bootstrap apply -auto-approve diff --git a/.github/workflows/test-ci-cleanup.yml b/.github/workflows/test-ci-cleanup.yml new file mode 100644 index 0000000..731a968 --- /dev/null +++ b/.github/workflows/test-ci-cleanup.yml @@ -0,0 +1,88 @@ +name: test-ci-cleanup +on: + schedule: + # * is a special character in YAML so you have to quote this string + - cron: '05 02 * * *' + +jobs: + setup: + runs-on: ubuntu-latest + outputs: + regions: ${{steps.setup.outputs.regions}} + steps: + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@5fd3084fc36e372ff1fff382a39b10d03659f355 # v2.2.0 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_CI }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_CI }} + aws-region: us-east-1 + role-to-assume: ${{ secrets.AWS_ROLE_ARN_CI }} + role-skip-session-tagging: true + role-duration-seconds: 3600 + - name: Get all regions + id: setup + run: | + echo "regions=$(aws ec2 describe-regions --region us-east-1 --output json --query 'Regions[].RegionName' | tr -d '\n ')" >> "$GITHUB_OUTPUT" + + aws-nuke: + needs: setup + runs-on: ubuntu-latest + container: + image: rebuy/aws-nuke + options: + --user root + -t + env: + AWS_ACCESS_KEY_ID: ${{ env.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ env.AWS_SECRET_ACCESS_KEY }} + TIME_LIMIT: "72h" + timeout-minutes: 60 + steps: + - name: Configure AWS credentials + id: aws-configure + uses: aws-actions/configure-aws-credentials@5fd3084fc36e372ff1fff382a39b10d03659f355 # v2.2.0 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_CI }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_CI }} + aws-region: us-east-1 + role-to-assume: ${{ secrets.AWS_ROLE_ARN_CI }} + role-skip-session-tagging: true + role-duration-seconds: 3600 + mask-aws-account-id: false + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - name: Configure + run: | + cp enos/ci/aws-nuke.yml . + sed -i "s/ACCOUNT_NUM/${{ steps.aws-configure.outputs.aws-account-id }}/g" aws-nuke.yml + sed -i "s/TIME_LIMIT/${TIME_LIMIT}/g" aws-nuke.yml + # We don't care if cleanup succeeds or fails, because dependencies be dependenceies, + # we'll fail on actually actionable things in the quota steep afterwards. + - name: Clean up abandoned resources + # Filter STDERR because it's super noisy about things we don't have access to + run: | + aws-nuke -c aws-nuke.yml -q --no-dry-run --force 2>/tmp/aws-nuke-error.log || true + + check-quotas: + needs: [ setup, aws-nuke ] + runs-on: ubuntu-latest + container: + image: jantman/awslimitchecker + env: + AWS_ACCESS_KEY_ID: ${{ env.AWS_ACCESS_KEY_ID_CI }} + AWS_SECRET_ACCESS_KEY: ${{ env.AWS_SECRET_ACCESS_KEY_CI }} + strategy: + matrix: + region: ${{ fromJSON(needs.setup.outputs.regions) }} + steps: + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@5fd3084fc36e372ff1fff382a39b10d03659f355 # v2.2.0 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_CI }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_CI }} + aws-region: us-east-1 + role-to-assume: ${{ secrets.AWS_ROLE_ARN_CI }} + role-skip-session-tagging: true + role-duration-seconds: 3600 + # Currently just checking VPC limits across all region, can add more checks here in future + - name: Check AWS Quotas + run: awslimitchecker -S "VPC" -r ${{matrix.region}} diff --git a/.github/workflows/test-enos-scenario-ui.yml b/.github/workflows/test-enos-scenario-ui.yml new file mode 100644 index 0000000..fc348ce --- /dev/null +++ b/.github/workflows/test-enos-scenario-ui.yml @@ -0,0 +1,145 @@ +--- +name: Vault UI Tests + +on: + workflow_call: + inputs: + test_filter: + type: string + description: "A filter to limit the ui tests to. Will be appended to the ember test command as '-f='" + required: false + storage_backend: + type: string + description: "The storage backend to use, either 'raft' or 'consul'" + default: raft + workflow_dispatch: + inputs: + test_filter: + type: string + description: "A filter to limit the ui tests to. Will be appended to the ember test command as '-f='" + required: false + storage_backend: + description: "The storage backend to use, either 'raft' or 'consul'" + required: true + default: raft + type: choice + options: + - raft + - consul + +jobs: + get-metadata: + name: Get metadata + runs-on: ubuntu-latest + outputs: + runs-on: ${{ steps.get-metadata.outputs.runs-on }} + vault_edition: ${{ steps.get-metadata.outputs.vault_edition }} + steps: + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - id: get-metadata + env: + IS_ENT: ${{ startsWith(github.event.repository.name, 'vault-enterprise' ) }} + run: | + if [ "$IS_ENT" == true ]; then + echo "detected vault_edition=ent" + echo "runs-on=['self-hosted', 'ondemand', 'os=linux', 'type=m5d.4xlarge']" >> "$GITHUB_OUTPUT" + echo "vault_edition=ent" >> "$GITHUB_OUTPUT" + else + echo "detected vault_edition=oss" + echo "runs-on=\"custom-linux-xl-vault-latest\"" >> "$GITHUB_OUTPUT" + echo "vault_edition=oss" >> "$GITHUB_OUTPUT" + fi + + run-ui-tests: + name: Run UI Tests + needs: get-metadata + runs-on: ${{ fromJSON(needs.get-metadata.outputs.runs-on) }} + timeout-minutes: 90 + env: + GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + # Pass in enos variables + ENOS_VAR_aws_region: us-east-1 + ENOS_VAR_aws_ssh_keypair_name: ${{ github.event.repository.name }}-ci-ssh-key + ENOS_VAR_aws_ssh_private_key_path: ./support/private_key.pem + ENOS_VAR_tfc_api_token: ${{ secrets.TF_API_TOKEN }} + ENOS_VAR_terraform_plugin_cache_dir: ./support/terraform-plugin-cache + ENOS_VAR_vault_license_path: ./support/vault.hclic + GOPRIVATE: github.com/hashicorp + steps: + - name: Checkout + uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: ./.github/actions/set-up-go + with: + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + - uses: hashicorp/action-setup-enos@v1 + with: + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + - name: Set Up Git + run: git config --global url."https://${{ secrets.elevated_github_token }}:@github.com".insteadOf "https://github.com" + - name: Set Up Node + uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 + with: + node-version-file: './ui/package.json' + - name: Set Up Terraform + uses: hashicorp/setup-terraform@v2 + with: + cli_config_credentials_token: ${{ secrets.TF_API_TOKEN }} + terraform_wrapper: false + - name: Prepare scenario dependencies + run: | + mkdir -p ./enos/support/terraform-plugin-cache + echo "${{ secrets.SSH_KEY_PRIVATE_CI }}" > ./enos/support/private_key.pem + chmod 600 ./enos/support/private_key.pem + - name: Set Up Vault Enterprise License + if: contains(github.event.repository.name, 'ent') + run: echo "${{ secrets.VAULT_LICENSE }}" > ./enos/support/vault.hclic || true + - name: Check Chrome Installed + id: chrome-check + run: echo "chrome-version=$(chrome --version 2> /dev/null || google-chrome --version 2> /dev/null || google-chrome-stable --version 2> /dev/null || echo 'not-installed')" >> "$GITHUB_OUTPUT" + - name: Install Chrome Dependencies + if: steps.chrome-check.outputs.chrome-version == 'not-installed' + run: | + sudo apt update + sudo apt install -y libnss3-dev libgdk-pixbuf2.0-dev libgtk-3-dev libxss-dev libasound2 + - name: Install Chrome + if: steps.chrome-check.outputs.chrome-version == 'not-installed' + uses: browser-actions/setup-chrome@c485fa3bab6be59dce18dbc18ef6ab7cbc8ff5f1 # v1.2.0 + - name: Installed Chrome Version + run: | + echo "Installed Chrome Version = [$(chrome --version 2> /dev/null || google-chrome --version 2> /dev/null || google-chrome-stable --version 2> /dev/null)]" + - name: Configure AWS credentials from Test account + uses: aws-actions/configure-aws-credentials@5fd3084fc36e372ff1fff382a39b10d03659f355 # v2.2.0 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_CI }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_CI }} + aws-region: us-east-1 + role-to-assume: ${{ secrets.AWS_ROLE_ARN_CI }} + role-skip-session-tagging: true + role-duration-seconds: 3600 + - name: Set Up Cluster + id: setup_cluster + env: + ENOS_VAR_ui_run_tests: false + # Continue once and retry to handle occasional blips when creating infrastructure. + continue-on-error: true + run: enos scenario launch --timeout 60m0s --chdir ./enos ui edition:${{ needs.get-metadata.outputs.vault_edition }} backend:${{ inputs.storage_backend }} + - name: Retry Set Up Cluster + id: setup_cluster_retry + if: steps.setup_cluster.outcome == 'failure' + env: + ENOS_VAR_ui_run_tests: false + run: enos scenario launch --timeout 60m0s --chdir ./enos ui edition:${{ needs.get-metadata.outputs.vault_edition }} backend:${{ inputs.storage_backend }} + - name: Run UI Tests + id: run_ui_tests + env: + ENOS_VAR_ui_test_filter: "${{ inputs.test_filter }}" + run: enos scenario run --timeout 60m0s --chdir ./enos ui edition:${{ needs.get-metadata.outputs.vault_edition }} backend:${{ inputs.storage_backend }} + - name: Ensure scenario has been destroyed + if: ${{ always() }} + run: enos scenario destroy --timeout 60m0s --chdir ./enos ui edition:${{ needs.get-metadata.outputs.vault_edition }} backend:${{ inputs.storage_backend }} + - name: Clean up Enos runtime directories + if: ${{ always() }} + run: | + rm -rf /tmp/enos* + rm -rf ./enos/support + rm -rf ./enos/.enos diff --git a/.github/workflows/test-go.yml b/.github/workflows/test-go.yml new file mode 100644 index 0000000..0f8c0f0 --- /dev/null +++ b/.github/workflows/test-go.yml @@ -0,0 +1,424 @@ +on: + workflow_call: + inputs: + go-arch: + description: The execution architecture (arm, amd64, etc.) + required: true + type: string + enterprise: + description: A flag indicating if this workflow is executing for the enterprise repository. + required: true + type: string + total-runners: + description: Number of runners to use for executing non-binary tests. + required: true + type: string + binary-tests: + description: Whether to run the binary tests. + required: false + default: false + type: boolean + env-vars: + description: A map of environment variables as JSON. + required: false + type: string + default: '{}' + extra-flags: + description: A space-separated list of additional build flags. + required: false + type: string + default: '' + runs-on: + description: An expression indicating which kind of runners to use. + required: false + type: string + default: ubuntu-latest + go-tags: + description: A comma-separated list of additional build tags to consider satisfied during the build. + required: false + type: string + name: + description: A suffix to append to archived test results + required: false + default: '' + type: string + go-test-parallelism: + description: The parallelism parameter for Go tests + required: false + default: 20 + type: number + timeout-minutes: + description: The maximum number of minutes that this workflow should run + required: false + default: 60 + type: number + testonly: + description: Whether to run the tests tagged with testonly. + required: false + default: false + type: boolean + checkout-ref: + description: The ref to use for checkout. + required: false + default: ${{ github.ref }} + type: string + +env: ${{ fromJSON(inputs.env-vars) }} + +jobs: + test-matrix: + permissions: + id-token: write # Note: this permission is explicitly required for Vault auth + contents: read + runs-on: ${{ fromJSON(inputs.runs-on) }} + steps: + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + with: + ref: ${{ inputs.checkout-ref }} + - uses: ./.github/actions/set-up-go + with: + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + - name: Authenticate to Vault + id: vault-auth + if: github.repository == 'hashicorp/vault-enterprise' + run: vault-auth + - name: Fetch Secrets + id: secrets + if: github.repository == 'hashicorp/vault-enterprise' + uses: hashicorp/vault-action@130d1f5f4fe645bb6c83e4225c04d64cfb62de6e + with: + url: ${{ steps.vault-auth.outputs.addr }} + caCertificate: ${{ steps.vault-auth.outputs.ca_certificate }} + token: ${{ steps.vault-auth.outputs.token }} + secrets: | + kv/data/github/${{ github.repository }}/datadog-ci DATADOG_API_KEY; + kv/data/github/${{ github.repository }}/github-token username-and-token | github-token; + kv/data/github/${{ github.repository }}/license license_1 | VAULT_LICENSE_CI; + kv/data/github/${{ github.repository }}/license license_2 | VAULT_LICENSE_2; + kv/data/github/${{ github.repository }}/hcp-link HCP_API_ADDRESS; + kv/data/github/${{ github.repository }}/hcp-link HCP_AUTH_URL; + kv/data/github/${{ github.repository }}/hcp-link HCP_CLIENT_ID; + kv/data/github/${{ github.repository }}/hcp-link HCP_CLIENT_SECRET; + kv/data/github/${{ github.repository }}/hcp-link HCP_RESOURCE_ID; + - id: setup-git-private + name: Setup Git configuration (private) + if: github.repository == 'hashicorp/vault-enterprise' + run: | + git config --global url."https://${{ steps.secrets.outputs.github-token }}@github.com".insteadOf https://github.com + - id: setup-git-public + name: Setup Git configuration (public) + if: github.repository != 'hashicorp/vault-enterprise' + run: | + git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN}}@github.com".insteadOf https://github.com + - uses: ./.github/actions/set-up-gotestsum + - run: mkdir -p test-results/go-test + - uses: actions/cache/restore@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1 + with: + path: test-results/go-test + key: go-test-reports-${{ github.run_number }} + restore-keys: go-test-reports- + - name: List cached results + id: list-cached-results + run: ls -lhR test-results/go-test + - name: Build matrix excluding binary, integration, and testonly tests + id: build-non-binary + if: ${{ !inputs.testonly }} + env: + GOPRIVATE: github.com/hashicorp/* + run: | + # testonly tests need additional build tag though let's exclude them anyway for clarity + ( + go list ./... | grep -v "_binary" | grep -v "vault/integ" | grep -v "testonly" | gotestsum tool ci-matrix --debug \ + --partitions "${{ inputs.total-runners }}" \ + --timing-files 'test-results/go-test/*.json' > matrix.json + ) + - name: Build matrix for tests tagged with testonly + if: ${{ inputs.testonly }} + env: + GOPRIVATE: github.com/hashicorp/* + run: | + set -exo pipefail + # enable glob expansion + shopt -s nullglob + # testonly tagged tests need an additional tag to be included + # also running some extra tests for sanity checking with the testonly build tag + ( + go list -tags=testonly ./vault/external_tests/{kv,token,*replication-perf*,*testonly*} ./vault/ | gotestsum tool ci-matrix --debug \ + --partitions "${{ inputs.total-runners }}" \ + --timing-files 'test-results/go-test/*.json' > matrix.json + ) + # disable glob expansion + shopt -u nullglob + - name: Capture list of binary tests + if: inputs.binary-tests + id: list-binary-tests + run: | + LIST="$(go list ./... | grep "_binary" | xargs)" + echo "list=$LIST" >> "$GITHUB_OUTPUT" + - name: Build complete matrix + id: build + run: | + set -exo pipefail + matrix_file="matrix.json" + if [ "${{ inputs.binary-tests}}" == "true" ] && [ -n "${{ steps.list-binary-tests.outputs.list }}" ]; then + export BINARY_TESTS="${{ steps.list-binary-tests.outputs.list }}" + jq --arg BINARY "${BINARY_TESTS}" --arg BINARY_INDEX "${{ inputs.total-runners }}" \ + '.include += [{ + "id": $BINARY_INDEX, + "estimatedRuntime": "N/A", + "packages": $BINARY, + "description": "partition $BINARY_INDEX - binary test packages" + }]' matrix.json > new-matrix.json + matrix_file="new-matrix.json" + fi + # convert the json to a map keyed by id + ( + echo -n "matrix=" + jq -c \ + '.include | map( { (.id|tostring): . } ) | add' "$matrix_file" + ) >> "$GITHUB_OUTPUT" + # extract an array of ids from the json + ( + echo -n "matrix_ids=" + jq -c \ + '[ .include[].id | tostring ]' "$matrix_file" + ) >> "$GITHUB_OUTPUT" + outputs: + matrix: ${{ steps.build.outputs.matrix }} + matrix_ids: ${{ steps.build.outputs.matrix_ids }} + + test-go: + needs: test-matrix + permissions: + actions: read + contents: read + id-token: write # Note: this permission is explicitly required for Vault auth + runs-on: ${{ fromJSON(inputs.runs-on) }} + strategy: + fail-fast: false + matrix: + id: ${{ fromJSON(needs.test-matrix.outputs.matrix_ids) }} + env: + GOPRIVATE: github.com/hashicorp/* + TIMEOUT_IN_MINUTES: ${{ inputs.timeout-minutes }} + steps: + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + with: + ref: ${{ inputs.checkout-ref }} + - uses: ./.github/actions/set-up-go + with: + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + - name: Authenticate to Vault + id: vault-auth + if: github.repository == 'hashicorp/vault-enterprise' + run: vault-auth + - name: Fetch Secrets + id: secrets + if: github.repository == 'hashicorp/vault-enterprise' + uses: hashicorp/vault-action@130d1f5f4fe645bb6c83e4225c04d64cfb62de6e + with: + url: ${{ steps.vault-auth.outputs.addr }} + caCertificate: ${{ steps.vault-auth.outputs.ca_certificate }} + token: ${{ steps.vault-auth.outputs.token }} + secrets: | + kv/data/github/${{ github.repository }}/datadog-ci DATADOG_API_KEY; + kv/data/github/${{ github.repository }}/github-token username-and-token | github-token; + kv/data/github/${{ github.repository }}/license license_1 | VAULT_LICENSE_CI; + kv/data/github/${{ github.repository }}/license license_2 | VAULT_LICENSE_2; + kv/data/github/${{ github.repository }}/hcp-link HCP_API_ADDRESS; + kv/data/github/${{ github.repository }}/hcp-link HCP_AUTH_URL; + kv/data/github/${{ github.repository }}/hcp-link HCP_CLIENT_ID; + kv/data/github/${{ github.repository }}/hcp-link HCP_CLIENT_SECRET; + kv/data/github/${{ github.repository }}/hcp-link HCP_RESOURCE_ID; + - id: setup-git-private + name: Setup Git configuration (private) + if: github.repository == 'hashicorp/vault-enterprise' + run: | + git config --global url."https://${{ steps.secrets.outputs.github-token }}@github.com".insteadOf https://github.com + - id: setup-git-public + name: Setup Git configuration (public) + if: github.repository != 'hashicorp/vault-enterprise' + run: | + git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN}}@github.com".insteadOf https://github.com + - id: build + if: inputs.binary-tests && matrix.id == inputs.total-runners + env: + GOPRIVATE: github.com/hashicorp/* + run: time make ci-bootstrap dev + - uses: ./.github/actions/set-up-gotestsum + - id: run-go-tests + name: Run Go tests + timeout-minutes: ${{ fromJSON(env.TIMEOUT_IN_MINUTES) }} + env: + COMMIT_SHA: ${{ github.sha }} + run: | + set -exo pipefail + + # Build the dynamically generated source files. + make prep + + packages=$(echo "${{ toJSON(needs.test-matrix.outputs.matrix) }}" | jq -c -r --arg id "${{ matrix.id }}" '.[$id] | .packages') + + if [ -z "$packages" ]; then + echo "no test packages to run" + exit 1 + fi + # We don't want VAULT_LICENSE set when running Go tests, because that's + # not what developers have in their environments and it could break some + # tests; it would be like setting VAULT_TOKEN. However some non-Go + # CI commands, like the UI tests, shouldn't have to worry about licensing. + # So we provide the tests which want an externally supplied license with licenses + # via the VAULT_LICENSE_CI and VAULT_LICENSE_2 environment variables, and here we unset it. + # shellcheck disable=SC2034 + VAULT_LICENSE= + + # Assign test licenses to relevant variables if they aren't already + if [[ ${{ github.repository }} == 'hashicorp/vault' ]]; then + export VAULT_LICENSE_CI=${{ secrets.ci_license }} + export VAULT_LICENSE_2=${{ secrets.ci_license_2 }} + export HCP_API_ADDRESS=${{ secrets.HCP_API_ADDRESS }} + export HCP_AUTH_URL=${{ secrets.HCP_AUTH_URL }} + export HCP_CLIENT_ID=${{ secrets.HCP_CLIENT_ID }} + export HCP_CLIENT_SECRET=${{ secrets.HCP_CLIENT_SECRET }} + export HCP_RESOURCE_ID=${{ secrets.HCP_RESOURCE_ID }} + # Temporarily removing this variable to cause HCP Link tests + # to be skipped. + #export HCP_SCADA_ADDRESS=${{ secrets.HCP_SCADA_ADDRESS }} + fi + + if [ -f bin/vault ]; then + VAULT_BINARY="$(pwd)/bin/vault" + export VAULT_BINARY + fi + + # On a release branch, add a flag to rerun failed tests + # shellcheck disable=SC2193 # can get false positive for this comparision + if [[ "${{ github.base_ref }}" == release/* ]] || [[ -z "${{ github.base_ref }}" && "${{ github.ref_name }}" == release/* ]] + then + RERUN_FAILS="--rerun-fails" + fi + + # shellcheck disable=SC2086 # can't quote RERUN_FAILS + GOARCH=${{ inputs.go-arch }} \ + gotestsum --format=short-verbose \ + --junitfile test-results/go-test/results-${{ matrix.id }}.xml \ + --jsonfile test-results/go-test/results-${{ matrix.id }}.json \ + --jsonfile-timing-events failure-summary-${{ matrix.id }}${{ inputs.name != '' && '-' || '' }}${{ inputs.name }}.json \ + $RERUN_FAILS \ + --packages "$packages" \ + -- \ + -tags "${{ inputs.go-tags }}" \ + -timeout=${{ env.TIMEOUT_IN_MINUTES }}m \ + -parallel=${{ inputs.go-test-parallelism }} \ + ${{ inputs.extra-flags }} \ + - name: Prepare datadog-ci + if: github.repository == 'hashicorp/vault' && (success() || failure()) + continue-on-error: true + run: | + curl -L --fail "https://github.com/DataDog/datadog-ci/releases/latest/download/datadog-ci_linux-x64" --output "/usr/local/bin/datadog-ci" + chmod +x /usr/local/bin/datadog-ci + - name: Upload test results to DataDog + continue-on-error: true + env: + DD_ENV: ci + run: | + if [[ ${{ github.repository }} == 'hashicorp/vault' ]]; then + export DATADOG_API_KEY=${{ secrets.DATADOG_API_KEY }} + fi + datadog-ci junit upload --service "$GITHUB_REPOSITORY" test-results/go-test/results-${{ matrix.id }}.xml + if: success() || failure() + - name: Archive test results + uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + with: + name: test-results${{ inputs.name != '' && '-' || '' }}${{ inputs.name }} + path: test-results/go-test + if: success() || failure() + # GitHub Actions doesn't expose the job ID or the URL to the job execution, + # so we have to fetch it from the API + - name: Fetch job logs URL + uses: actions/github-script@d7906e4ad0b1822421a7e6a35d5ca353c962f410 # v6.4.1 + if: success() || failure() + continue-on-error: true + with: + retries: 3 + script: | + // We surround the whole script with a try-catch block, to avoid each of the matrix jobs + // displaying an error in the GHA workflow run annotations, which gets very noisy. + // If an error occurs, it will be logged so that we don't lose any information about the reason for failure. + try { + const fs = require("fs"); + const result = await github.rest.actions.listJobsForWorkflowRun({ + owner: context.repo.owner, + per_page: 100, + repo: context.repo.repo, + run_id: context.runId, + }); + + // Determine what job name to use for the query. These values are hardcoded, because GHA doesn't + // expose them in any of the contexts available within a workflow run. + let prefixToSearchFor; + switch ("${{ inputs.name }}") { + case "race": + prefixToSearchFor = 'Run Go tests with data race detection / test-go (${{ matrix.id }})' + break + case "fips": + prefixToSearchFor = 'Run Go tests with FIPS configuration / test-go (${{ matrix.id }})' + break + default: + prefixToSearchFor = 'Run Go tests / test-go (${{ matrix.id }})' + } + + const jobData = result.data.jobs.filter( + (job) => job.name.startsWith(prefixToSearchFor) + ); + const url = jobData[0].html_url; + const envVarName = "GH_JOB_URL"; + const envVar = envVarName + "=" + url; + const envFile = process.env.GITHUB_ENV; + + fs.appendFile(envFile, envVar, (err) => { + if (err) throw err; + console.log("Successfully set " + envVarName + " to: " + url); + }); + } catch (error) { + console.log("Error: " + error); + return + } + - name: Prepare failure summary + if: success() || failure() + continue-on-error: true + run: | + # This jq query filters out successful tests, leaving only the failures. + # Then, it formats the results into rows of a Markdown table. + # An example row will resemble this: + # | github.com/hashicorp/vault/package | TestName | fips | 0 | 2 | [view results](github.com/link-to-logs) | + jq -r -n 'inputs + | select(.Action == "fail") + | "| ${{inputs.name}} | \(.Package) | \(.Test // "-") | \(.Elapsed) | ${{ matrix.id }} | [view test results :scroll:](${{ env.GH_JOB_URL }}) |"' \ + failure-summary-${{ matrix.id }}${{ inputs.name != '' && '-' || '' }}${{inputs.name}}.json \ + >> failure-summary-${{ matrix.id }}${{ inputs.name != '' && '-' || '' }}${{inputs.name}}.md + - name: Upload failure summary + uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + if: success() || failure() + with: + name: failure-summary + path: failure-summary-${{ matrix.id }}${{ inputs.name != '' && '-' || '' }}${{inputs.name}}.md + + test-collect-reports: + if: ${{ ! cancelled() }} + needs: test-go + runs-on: ${{ fromJSON(inputs.runs-on) }} + steps: + - uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1 + with: + path: test-results/go-test + key: go-test-reports-${{ github.run_number }} + restore-keys: go-test-reports- + - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + with: + name: test-results + path: test-results/go-test + - run: | + ls -lhR test-results/go-test + find test-results/go-test -mindepth 1 -mtime +3 -delete + ls -lhR test-results/go-test diff --git a/.github/workflows/test-run-acc-tests-for-path.yml b/.github/workflows/test-run-acc-tests-for-path.yml new file mode 100644 index 0000000..b3096a3 --- /dev/null +++ b/.github/workflows/test-run-acc-tests-for-path.yml @@ -0,0 +1,32 @@ +name: test-run-go-tests-for-path + +on: + workflow_call: + inputs: + name: + description: 'The name to use that will appear in the output log file artifact' + required: true + type: string + path: + description: 'The path to the test without the precedeing "./" or following "/..." e.g. go test -v ./$path/...' + required: true + type: string + # We will need to add the capacity for receiving passed secrets once we get to the tests that require API credentials + +env: + VAULT_ACC: 1 + +jobs: + go-test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: ./.github/actions/set-up-go + with: + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + - run: go test -v ./${{ inputs.path }}/... 2>&1 | tee ${{ inputs.name }}.txt + - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + with: + name: ${{ inputs.name }}-output + path: ${{ inputs.name }}.txt + retention-days: 2 diff --git a/.github/workflows/test-run-enos-scenario-matrix.yml b/.github/workflows/test-run-enos-scenario-matrix.yml new file mode 100644 index 0000000..c05b429 --- /dev/null +++ b/.github/workflows/test-run-enos-scenario-matrix.yml @@ -0,0 +1,197 @@ +--- +name: enos + +on: + # Only trigger this working using workflow_call. This workflow requires many + # secrets that must be inherited from the caller workflow. + workflow_call: + inputs: + # The name of the artifact that we're going to use for testing. This should + # match exactly to build artifacts uploaded to Github and Artifactory. + build-artifact-name: + required: true + type: string + # The maximum number of scenarios to include in the test sample. + sample-max: + default: 1 + type: number + # The name of the enos scenario sample that defines compatible scenarios we can + # can test with. + sample-name: + required: true + type: string + runs-on: + # NOTE: The value should be JSON encoded as that's the only way we can + # pass arrays with workflow_call. + type: string + required: false + default: '"ubuntu-latest"' + ssh-key-name: + type: string + default: ${{ github.event.repository.name }}-ci-ssh-key + vault-edition: + required: false + type: string + default: oss + # The Git commit SHA used as the revision when building vault + vault-revision: + required: true + type: string + vault-version: + required: true + type: string + +jobs: + metadata: + runs-on: ${{ fromJSON(inputs.runs-on) }} + outputs: + build-date: ${{ steps.metadata.outputs.build-date }} + sample: ${{ steps.metadata.outputs.sample }} + vault-version: ${{ steps.metadata.outputs.vault-version }} + steps: + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + with: + ref: ${{ inputs.vault-revision }} + - uses: hashicorp/action-setup-enos@v1 + with: + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + - id: metadata + run: | + echo "build-date=$(make ci-get-date)" >> "$GITHUB_OUTPUT" + sample="$(enos scenario sample observe ${{ inputs.sample-name }} --chdir ./enos --min 1 --max ${{ inputs.sample-max }} --seed "$(date +%s%N)" --format json | jq -c ".observation.elements")" + echo "sample=$sample" + echo "sample=$sample" >> "$GITHUB_OUTPUT" + if [[ "${{ inputs.vault-edition }}" == "oss" ]]; then + echo "vault-version=${{ inputs.vault-version }}" >> "$GITHUB_OUTPUT" + else + # shellcheck disable=2001 + vault_version="$(sed 's/+ent/+${{ inputs.vault-edition }}/g' <<< '${{ inputs.vault-version }}')" + echo "vault-version=$vault_version" + echo "vault-version=$vault_version" >> "$GITHUB_OUTPUT" + fi + + # Run the Enos test scenario(s) + run: + needs: metadata + name: run ${{ matrix.scenario.id.filter }} + strategy: + fail-fast: false # don't fail as that can skip required cleanup steps for jobs + matrix: + include: ${{ fromJSON(needs.metadata.outputs.sample) }} + runs-on: ${{ fromJSON(inputs.runs-on) }} + env: + GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + # Pass in enos variables + ENOS_VAR_aws_region: ${{ matrix.attributes.aws_region }} + ENOS_VAR_aws_ssh_keypair_name: ${{ inputs.ssh-key-name }} + ENOS_VAR_aws_ssh_private_key_path: ./support/private_key.pem + ENOS_VAR_tfc_api_token: ${{ secrets.TF_API_TOKEN }} + ENOS_VAR_artifactory_username: ${{ secrets.ARTIFACTORY_USER }} + ENOS_VAR_artifactory_token: ${{ secrets.ARTIFACTORY_TOKEN }} + ENOS_VAR_terraform_plugin_cache_dir: ./support/terraform-plugin-cache + ENOS_VAR_vault_artifact_path: ./support/downloads/${{ inputs.build-artifact-name }} + ENOS_VAR_vault_build_date: ${{ needs.metadata.outputs.build-date }} + ENOS_VAR_vault_product_version: ${{ needs.metadata.outputs.vault-version }} + ENOS_VAR_vault_revision: ${{ inputs.vault-revision }} + ENOS_VAR_vault_license_path: ./support/vault.hclic + ENOS_DEBUG_DATA_ROOT_DIR: /tmp/enos-debug-data + steps: + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - uses: hashicorp/setup-terraform@v2 + with: + # the Terraform wrapper will break Terraform execution in Enos because + # it changes the output to text when we expect it to be JSON. + terraform_wrapper: false + - uses: aws-actions/configure-aws-credentials@5fd3084fc36e372ff1fff382a39b10d03659f355 # v2.2.0 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_CI }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_CI }} + aws-region: ${{ matrix.attributes.aws_region }} + role-to-assume: ${{ secrets.AWS_ROLE_ARN_CI }} + role-skip-session-tagging: true + role-duration-seconds: 3600 + - uses: hashicorp/action-setup-enos@v1 + with: + github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} + - name: Prepare scenario dependencies + id: prepare_scenario + run: | + mkdir -p "./enos/support/terraform-plugin-cache" + echo "${{ secrets.SSH_KEY_PRIVATE_CI }}" > "./enos/support/private_key.pem" + chmod 600 "./enos/support/private_key.pem" + echo "debug_data_artifact_name=enos-debug-data_$(echo "${{ matrix.scenario }}" | sed -e 's/ /_/g' | sed -e 's/:/=/g')" >> "$GITHUB_OUTPUT" + - if: contains(inputs.sample-name, 'build') + uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + with: + name: ${{ inputs.build-artifact-name }} + path: ./enos/support/downloads + - if: contains(inputs.sample-name, 'ent') + name: Configure Vault license + run: echo "${{ secrets.VAULT_LICENSE }}" > ./enos/support/vault.hclic || true + - id: launch + name: enos scenario launch ${{ matrix.scenario.id.filter }} + # Continue once and retry to handle occasional blips when creating infrastructure. + continue-on-error: true + run: enos scenario launch --timeout 60m0s --chdir ./enos ${{ matrix.scenario.id.filter }} + - if: steps.launch.outcome == 'failure' + id: launch_retry + name: Retry enos scenario launch ${{ matrix.scenario.id.filter }} + run: enos scenario launch --timeout 60m0s --chdir ./enos ${{ matrix.scenario.id.filter }} + - name: Upload Debug Data + if: failure() + uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 + with: + # The name of the artifact is the same as the matrix scenario name with the spaces replaced with underscores and colons replaced by equals. + name: ${{ steps.prepare_scenario.outputs.debug_data_artifact_name }} + path: ${{ env.ENOS_DEBUG_DATA_ROOT_DIR }} + retention-days: 30 + continue-on-error: true + - if: ${{ always() }} + id: destroy + name: enos scenario destroy ${{ matrix.scenario.id.filter }} + continue-on-error: true + run: enos scenario destroy --timeout 60m0s --chdir ./enos ${{ matrix.scenario.id.filter }} + - if: steps.destroy.outcome == 'failure' + id: destroy_retry + name: Retry enos scenario destroy ${{ matrix.scenario.id.filter }} + continue-on-error: true + run: enos scenario destroy --timeout 60m0s --chdir ./enos ${{ matrix.scenario.id.filter }} + - name: Clean up Enos runtime directories + id: cleanup + if: ${{ always() }} + continue-on-error: true + run: | + rm -rf /tmp/enos* + rm -rf ./enos/support + rm -rf ./enos/.enos + # Send slack notifications to #feed-vault-enos-failures any of our enos scenario commands fail. + # There is an incoming webhook set up on the "Enos Vault Failure Bot" Slackbot: + # https://api.slack.com/apps/A05E31CH1LG/incoming-webhooks + - if: ${{ always() && ! cancelled() }} + name: Notify launch failed + uses: hashicorp/actions-slack-status@v1 + with: + failure-message: "enos scenario launch ${{ matrix.scenario.id.filter}} failed. \nTriggering event: `${{ github.event_name }}` \nActor: `${{ github.actor }}`" + status: ${{ steps.launch.outcome }} + slack-webhook-url: ${{ secrets.SLACK_WEBHOOK_URL }} + - if: ${{ always() && ! cancelled() }} + name: Notify retry launch failed + uses: hashicorp/actions-slack-status@v1 + with: + failure-message: "retry enos scenario launch ${{ matrix.scenario.id.filter}} failed. \nTriggering event: `${{ github.event_name }}` \nActor: `${{ github.actor }}`" + status: ${{ steps.launch_retry.outcome }} + slack-webhook-url: ${{ secrets.SLACK_WEBHOOK_URL }} + - if: ${{ always() && ! cancelled() }} + name: Notify destroy failed + uses: hashicorp/actions-slack-status@v1 + with: + failure-message: "enos scenario destroy ${{ matrix.scenario.id.filter}} failed. \nTriggering event: `${{ github.event_name }}` \nActor: `${{ github.actor }}`" + status: ${{ steps.destroy.outcome }} + slack-webhook-url: ${{ secrets.SLACK_WEBHOOK_URL }} + - if: ${{ always() && ! cancelled() }} + name: Notify retry destroy failed + uses: hashicorp/actions-slack-status@v1 + with: + failure-message: "retry enos scenario destroy ${{ matrix.scenario.id.filter}} failed. \nTriggering event: `${{ github.event_name }}` \nActor: `${{ github.actor }}`" + status: ${{ steps.destroy_retry.outcome }} + slack-webhook-url: ${{ secrets.SLACK_WEBHOOK_URL }} diff --git a/.github/workflows/verify_changes.yml b/.github/workflows/verify_changes.yml new file mode 100644 index 0000000..3cb0d19 --- /dev/null +++ b/.github/workflows/verify_changes.yml @@ -0,0 +1,31 @@ +name: verify_changes + +on: + workflow_call: + outputs: + is_docs_change: + description: "determines if the changes contains docs" + value: ${{ jobs.verify-doc-ui-changes.outputs.is_docs_change }} + is_ui_change: + description: "determines if the changes contain ui" + value: ${{ jobs.verify-doc-ui-changes.outputs.is_ui_change }} + +jobs: + # verify-doc-ui-changes determines if the changes are only for docs (website) and/or ui + verify-doc-ui-changes: + runs-on: ubuntu-latest + outputs: + is_docs_change: ${{ steps.get-changeddir.outputs.is_docs_change }} + is_ui_change: ${{ steps.get-changeddir.outputs.is_ui_change }} + steps: + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + with: + ref: ${{ github.event.pull_request.head.sha }} + fetch-depth: 0 # Use fetch depth 0 for comparing changes to base branch + - name: Get changed directories + id: get-changeddir + env: + TYPE: ${{ github.event_name }} + REF_NAME: ${{ github.ref_name }} + BASE: ${{ github.base_ref }} + run: ./.github/scripts/verify_changes.sh ${{ env.TYPE }} ${{ env.REF_NAME }} ${{ env.BASE }} \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..c320896 --- /dev/null +++ b/.gitignore @@ -0,0 +1,133 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +.cover + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# Other dirs +/bin/ +/pkg/ + +# Generated Web UI goes here +/http/web_ui/*.* +/http/web_ui/**/*.* + +# Vault-specific +example.hcl +example.vault.d + +# Without this, the *.[568vq] above ignores this folder. +!**/graphrbac/1.6 + +# Ruby +website/vendor +website/.bundle +website/build +website/tmp + +# Vagrant +.vagrant/ +Vagrantfile + +# Configs +*.hcl +!.copywrite.hcl +!.release/ci.hcl +!.release/security-scan.hcl +!.release/linux/package/etc/vault.d/vault.hcl +!command/agent/config/test-fixtures/*.hcl +!command/server/test-fixtures/**/*.hcl +!enos/**/*.hcl + +# Enos +.enos +enos-local.vars.hcl +enos/**/support +enos/**/kubeconfig +.terraform +.terraform.lock.hcl +.tfstate.* + +.DS_Store +.idea +.vscode + +dist/* + +# ignore ctags +./tags + +# Editor backups +*~ +*.sw[a-z] + +# IntelliJ IDEA project files +.idea +*.ipr +*.iml + +# compiled output +ui/dist +ui/tmp +ui/root + +# dependencies +ui/node_modules +ui/bower_components + +# misc +ui/.DS_Store +ui/.sass-cache +ui/connect.lock +ui/coverage/* +ui/libpeerconnection.log +ui/npm-debug.log +ui/test-reports/* +ui/testem.log + +# used for JS acceptance tests +ui/tests/helpers/vault-keys.js +ui/vault-ui-integration-server.pid + +# for building static assets +node_modules + +# Website +website/.bundle +website/build/ +website/npm-debug.log +website/vendor +website/.bundle +website/.cache +website/assets/node_modules +website/assets/public +website/components/node_modules + +.buildcache/ +.releaser/ +*.log + +tools/godoctests/.bin +tools/gonilnilfunctions/.bin +tools/codechecker/.bin +.ci-bootstrap diff --git a/.go-version b/.go-version new file mode 100644 index 0000000..4bb1a22 --- /dev/null +++ b/.go-version @@ -0,0 +1 @@ +1.20.11 diff --git a/.hooks/pre-commit b/.hooks/pre-commit new file mode 100755 index 0000000..d2d52a7 --- /dev/null +++ b/.hooks/pre-commit @@ -0,0 +1,78 @@ +#!/usr/bin/env bash + +# READ THIS BEFORE MAKING CHANGES: +# +# If you want to add a new pre-commit check, here are the rules: +# +# 1. Create a bash function for your check (see e.g. ui_lint below). +# NOTE: Each function will be called in a sub-shell so you can freely +# change directory without worrying about interference. +# 2. Add the name of the function to the CHECKS variable. +# 3. If no changes relevant to your new check are staged, then +# do not output anything at all - this would be annoying noise. +# In this case, call 'return 0' from your check function to return +# early without blocking the commit. +# 4. If any non-trivial check-specific thing has to be invoked, +# then output '==> [check description]' as the first line of +# output. Each sub-check should output '--> [subcheck description]' +# after it has run, indicating success or failure. +# 5. Call 'block [reason]' to block the commit. This ensures the last +# line of output calls out that the commit was blocked - which may not +# be obvious from random error messages generated in 4. +# +# At the moment, there are no automated tests for this hook, so please run it +# locally to check you have not broken anything - breaking this will interfere +# with other peoples' workflows significantly, so be sure, check everything twice. + +set -euo pipefail + +# Call block to block the commit with a message. +block() { + echo "$@" + echo "Commit blocked - see errors above." + exit 1 +} + +# Add all check functions to this space separated list. +# They are executed in this order (see end of file). +CHECKS="ui_lint backend_lint" + +# Run ui linter if changes in that dir detected. +ui_lint() { + local DIR=ui LINTER=node_modules/.bin/lint-staged + + # Silently succeed if no changes staged for $DIR + if git diff --name-only --cached --exit-code -- $DIR/; then + return 0 + fi + + # Silently succeed if the linter has not been installed. + # We assume that if you're doing UI dev, you will have installed the linter + # by running yarn. + if [ ! -x $DIR/$LINTER ]; then + return 0 + fi + + echo "==> Changes detected in $DIR/: Running linter..." + + # Run the linter from the UI dir. + cd $DIR + $LINTER || block "UI lint failed" +} + +backend_lint() { + # Silently succeed if no changes staged for Go code files. + staged=$(git diff --name-only --cached --exit-code -- '*.go') + ret=$? + if [ $ret -eq 0 ]; then + return 0 + fi + + # Only run fmtcheck on staged files + ./scripts/gofmtcheck.sh "${staged}" || block "Backend linting failed; run 'make fmt' to fix." +} + +for CHECK in $CHECKS; do + # Force each check into a subshell to avoid crosstalk. + ( $CHECK ) || exit $? +done diff --git a/.hooks/pre-push b/.hooks/pre-push new file mode 100755 index 0000000..e760921 --- /dev/null +++ b/.hooks/pre-push @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +remote="$1" +remote_url=$(git remote get-url $remote) + +if [[ $remote_url == *"vault-enterprise"* ]]; then + exit 0 +fi + +if [ "$remote" = "enterprise" ]; then + exit 0 +fi + +if [ "$remote" = "ent" ]; then + exit 0 +fi + +if [ -f command/version_ent.go ]; then + echo "Found enterprise version file while pushing to oss remote" + exit 1 +fi + +exit 0 diff --git a/.release/ci.hcl b/.release/ci.hcl new file mode 100644 index 0000000..8cd7eb8 --- /dev/null +++ b/.release/ci.hcl @@ -0,0 +1,176 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +schema = "1" + +project "vault" { + team = "vault" + slack { + notification_channel = "C03RXFX5M4L" // #feed-vault-releases + } + github { + organization = "hashicorp" + repository = "vault" + release_branches = [ + "main", + "release/**", + ] + } +} + +event "merge" { + // "entrypoint" to use if build is not run automatically + // i.e. send "merge" complete signal to orchestrator to trigger build +} + +event "build" { + depends = ["merge"] + action "build" { + organization = "hashicorp" + repository = "vault" + workflow = "build" + } +} + +event "prepare" { + depends = ["build"] + action "prepare" { + organization = "hashicorp" + repository = "crt-workflows-common" + workflow = "prepare" + depends = ["build"] + } + + notification { + on = "fail" + } +} + +event "enos-release-testing-oss" { + depends = ["prepare"] + action "enos-release-testing-oss" { + organization = "hashicorp" + repository = "vault" + workflow = "enos-release-testing-oss" + } + + notification { + on = "fail" + } +} + +## These events are publish and post-publish events and should be added to the end of the file +## after the verify event stanza. + +event "trigger-staging" { +// This event is dispatched by the bob trigger-promotion command +// and is required - do not delete. +} + +event "promote-staging" { + depends = ["trigger-staging"] + action "promote-staging" { + organization = "hashicorp" + repository = "crt-workflows-common" + workflow = "promote-staging" + config = "release-metadata.hcl" + } + + notification { + on = "always" + } +} + +event "promote-staging-docker" { + depends = ["promote-staging"] + action "promote-staging-docker" { + organization = "hashicorp" + repository = "crt-workflows-common" + workflow = "promote-staging-docker" + } + + notification { + on = "always" + } +} + +event "trigger-production" { +// This event is dispatched by the bob trigger-promotion command +// and is required - do not delete. +} + +event "promote-production" { + depends = ["trigger-production"] + action "promote-production" { + organization = "hashicorp" + repository = "crt-workflows-common" + workflow = "promote-production" + } + + notification { + on = "always" + } +} + +event "promote-production-docker" { + depends = ["promote-production"] + action "promote-production-docker" { + organization = "hashicorp" + repository = "crt-workflows-common" + workflow = "promote-production-docker" + } + + notification { + on = "always" + } +} + +event "promote-production-packaging" { + depends = ["promote-production-docker"] + action "promote-production-packaging" { + organization = "hashicorp" + repository = "crt-workflows-common" + workflow = "promote-production-packaging" + } + + notification { + on = "always" + } +} + +# The post-publish-website event should not be merged into the enterprise repo. +# It is for OSS use only. +event "post-publish-website" { + depends = ["promote-production-packaging"] + action "post-publish-website" { + organization = "hashicorp" + repository = "crt-workflows-common" + workflow = "post-publish-website" + } + + notification { + on = "always" + } +} + +event "bump-version" { + depends = ["post-publish-website"] + action "bump-version" { + organization = "hashicorp" + repository = "crt-workflows-common" + workflow = "bump-version" + } +} + +event "update-ironbank" { + depends = ["bump-version"] + action "update-ironbank" { + organization = "hashicorp" + repository = "crt-workflows-common" + workflow = "update-ironbank" + } + + notification { + on = "fail" + } +} diff --git a/.release/docker/docker-entrypoint.sh b/.release/docker/docker-entrypoint.sh new file mode 100755 index 0000000..2b9b8f3 --- /dev/null +++ b/.release/docker/docker-entrypoint.sh @@ -0,0 +1,107 @@ +#!/usr/bin/dumb-init /bin/sh +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +set -e + +# Note above that we run dumb-init as PID 1 in order to reap zombie processes +# as well as forward signals to all processes in its session. Normally, sh +# wouldn't do either of these functions so we'd leak zombies as well as do +# unclean termination of all our sub-processes. + +# Prevent core dumps +ulimit -c 0 + +# Allow setting VAULT_REDIRECT_ADDR and VAULT_CLUSTER_ADDR using an interface +# name instead of an IP address. The interface name is specified using +# VAULT_REDIRECT_INTERFACE and VAULT_CLUSTER_INTERFACE environment variables. If +# VAULT_*_ADDR is also set, the resulting URI will combine the protocol and port +# number with the IP of the named interface. +get_addr () { + local if_name=$1 + local uri_template=$2 + ip addr show dev $if_name | awk -v uri=$uri_template '/\s*inet\s/ { \ + ip=gensub(/(.+)\/.+/, "\\1", "g", $2); \ + print gensub(/^(.+:\/\/).+(:.+)$/, "\\1" ip "\\2", "g", uri); \ + exit}' +} + +if [ -n "$VAULT_REDIRECT_INTERFACE" ]; then + export VAULT_REDIRECT_ADDR=$(get_addr $VAULT_REDIRECT_INTERFACE ${VAULT_REDIRECT_ADDR:-"http://0.0.0.0:8200"}) + echo "Using $VAULT_REDIRECT_INTERFACE for VAULT_REDIRECT_ADDR: $VAULT_REDIRECT_ADDR" +fi +if [ -n "$VAULT_CLUSTER_INTERFACE" ]; then + export VAULT_CLUSTER_ADDR=$(get_addr $VAULT_CLUSTER_INTERFACE ${VAULT_CLUSTER_ADDR:-"https://0.0.0.0:8201"}) + echo "Using $VAULT_CLUSTER_INTERFACE for VAULT_CLUSTER_ADDR: $VAULT_CLUSTER_ADDR" +fi + +# VAULT_CONFIG_DIR isn't exposed as a volume but you can compose additional +# config files in there if you use this image as a base, or use +# VAULT_LOCAL_CONFIG below. +VAULT_CONFIG_DIR=/vault/config + +# You can also set the VAULT_LOCAL_CONFIG environment variable to pass some +# Vault configuration JSON without having to bind any volumes. +if [ -n "$VAULT_LOCAL_CONFIG" ]; then + echo "$VAULT_LOCAL_CONFIG" > "$VAULT_CONFIG_DIR/local.json" +fi + +# If the user is trying to run Vault directly with some arguments, then +# pass them to Vault. +if [ "${1:0:1}" = '-' ]; then + set -- vault "$@" +fi + +# Look for Vault subcommands. +if [ "$1" = 'server' ]; then + shift + set -- vault server \ + -config="$VAULT_CONFIG_DIR" \ + -dev-root-token-id="$VAULT_DEV_ROOT_TOKEN_ID" \ + -dev-listen-address="${VAULT_DEV_LISTEN_ADDRESS:-"0.0.0.0:8200"}" \ + "$@" +elif [ "$1" = 'version' ]; then + # This needs a special case because there's no help output. + set -- vault "$@" +elif vault --help "$1" 2>&1 | grep -q "vault $1"; then + # We can't use the return code to check for the existence of a subcommand, so + # we have to use grep to look for a pattern in the help output. + set -- vault "$@" +fi + +# If we are running Vault, make sure it executes as the proper user. +if [ "$1" = 'vault' ]; then + if [ -z "$SKIP_CHOWN" ]; then + # If the config dir is bind mounted then chown it + if [ "$(stat -c %u /vault/config)" != "$(id -u vault)" ]; then + chown -R vault:vault /vault/config || echo "Could not chown /vault/config (may not have appropriate permissions)" + fi + + # If the logs dir is bind mounted then chown it + if [ "$(stat -c %u /vault/logs)" != "$(id -u vault)" ]; then + chown -R vault:vault /vault/logs + fi + + # If the file dir is bind mounted then chown it + if [ "$(stat -c %u /vault/file)" != "$(id -u vault)" ]; then + chown -R vault:vault /vault/file + fi + fi + + if [ -z "$SKIP_SETCAP" ]; then + # Allow mlock to avoid swapping Vault memory to disk + setcap cap_ipc_lock=+ep $(readlink -f $(which vault)) + + # In the case vault has been started in a container without IPC_LOCK privileges + if ! vault -version 1>/dev/null 2>/dev/null; then + >&2 echo "Couldn't start vault with IPC_LOCK. Disabling IPC_LOCK, please use --cap-add IPC_LOCK" + setcap cap_ipc_lock=-ep $(readlink -f $(which vault)) + fi + fi + + if [ "$(id -u)" = '0' ]; then + set -- su-exec vault "$@" + fi +fi + +exec "$@" diff --git a/.release/docker/ubi-docker-entrypoint.sh b/.release/docker/ubi-docker-entrypoint.sh new file mode 100755 index 0000000..794e69c --- /dev/null +++ b/.release/docker/ubi-docker-entrypoint.sh @@ -0,0 +1,116 @@ +#!/bin/sh +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +set -e + +# Prevent core dumps +ulimit -c 0 + +# Allow setting VAULT_REDIRECT_ADDR and VAULT_CLUSTER_ADDR using an interface +# name instead of an IP address. The interface name is specified using +# VAULT_REDIRECT_INTERFACE and VAULT_CLUSTER_INTERFACE environment variables. If +# VAULT_*_ADDR is also set, the resulting URI will combine the protocol and port +# number with the IP of the named interface. +get_addr () { + local if_name=$1 + local uri_template=$2 + ip addr show dev $if_name | awk -v uri=$uri_template '/\s*inet\s/ { \ + ip=gensub(/(.+)\/.+/, "\\1", "g", $2); \ + print gensub(/^(.+:\/\/).+(:.+)$/, "\\1" ip "\\2", "g", uri); \ + exit}' +} + +if [ -n "$VAULT_REDIRECT_INTERFACE" ]; then + export VAULT_REDIRECT_ADDR=$(get_addr $VAULT_REDIRECT_INTERFACE ${VAULT_REDIRECT_ADDR:-"http://0.0.0.0:8200"}) + echo "Using $VAULT_REDIRECT_INTERFACE for VAULT_REDIRECT_ADDR: $VAULT_REDIRECT_ADDR" +fi +if [ -n "$VAULT_CLUSTER_INTERFACE" ]; then + export VAULT_CLUSTER_ADDR=$(get_addr $VAULT_CLUSTER_INTERFACE ${VAULT_CLUSTER_ADDR:-"https://0.0.0.0:8201"}) + echo "Using $VAULT_CLUSTER_INTERFACE for VAULT_CLUSTER_ADDR: $VAULT_CLUSTER_ADDR" +fi + +# VAULT_CONFIG_DIR isn't exposed as a volume but you can compose additional +# config files in there if you use this image as a base, or use +# VAULT_LOCAL_CONFIG below. +VAULT_CONFIG_DIR=/vault/config + +# You can also set the VAULT_LOCAL_CONFIG environment variable to pass some +# Vault configuration JSON without having to bind any volumes. +if [ -n "$VAULT_LOCAL_CONFIG" ]; then + echo "$VAULT_LOCAL_CONFIG" > "$VAULT_CONFIG_DIR/local.json" +fi + +# Due to OpenShift environment compatibility, we have to allow group write +# access to the Vault configuration. This requires us to disable the stricter +# file permissions checks introduced in Vault v1.11.0. +export VAULT_DISABLE_FILE_PERMISSIONS_CHECK=true + +# If the user is trying to run Vault directly with some arguments, then +# pass them to Vault. +if [ "${1:0:1}" = '-' ]; then + set -- vault "$@" +fi + +# Look for Vault subcommands. +if [ "$1" = 'server' ]; then + shift + set -- vault server \ + -config="$VAULT_CONFIG_DIR" \ + -dev-root-token-id="$VAULT_DEV_ROOT_TOKEN_ID" \ + -dev-listen-address="${VAULT_DEV_LISTEN_ADDRESS:-"0.0.0.0:8200"}" \ + "$@" +elif [ "$1" = 'version' ]; then + # This needs a special case because there's no help output. + set -- vault "$@" +elif vault --help "$1" 2>&1 | grep -q "vault $1"; then + # We can't use the return code to check for the existence of a subcommand, so + # we have to use grep to look for a pattern in the help output. + set -- vault "$@" +fi + +# If we are running Vault, make sure it executes as the proper user. +if [ "$1" = 'vault' ]; then + if [ -z "$SKIP_CHOWN" ]; then + # If the config dir is bind mounted then chown it + if [ "$(stat -c %u /vault/config)" != "$(id -u vault)" ]; then + chown -R vault:vault /vault/config || echo "Could not chown /vault/config (may not have appropriate permissions)" + fi + + # If the logs dir is bind mounted then chown it + if [ "$(stat -c %u /vault/logs)" != "$(id -u vault)" ]; then + chown -R vault:vault /vault/logs + fi + + # If the file dir is bind mounted then chown it + if [ "$(stat -c %u /vault/file)" != "$(id -u vault)" ]; then + chown -R vault:vault /vault/file + fi + fi + + if [ -z "$SKIP_SETCAP" ]; then + # Allow mlock to avoid swapping Vault memory to disk + setcap cap_ipc_lock=+ep $(readlink -f /bin/vault) + + # In the case vault has been started in a container without IPC_LOCK privileges + if ! vault -version 1>/dev/null 2>/dev/null; then + >&2 echo "Couldn't start vault with IPC_LOCK. Disabling IPC_LOCK, please use --cap-add IPC_LOCK" + setcap cap_ipc_lock=-ep $(readlink -f /bin/vault) + fi + fi +fi + +# In case of Docker, where swap may be enabled, we +# still require mlocking to be available. So this script +# was executed as root to make this happen, however, +# we're now rerunning the entrypoint script as the Vault +# user but no longer need to run setup code for setcap +# or chowning directories (previously done on the first run). +if [[ "$(id -u)" == '0' ]] +then + export SKIP_CHOWN="true" + export SKIP_SETCAP="true" + exec su vault -p "$0" -- "$@" +else + exec "$@" +fi diff --git a/.release/linux/package/etc/vault.d/vault.env b/.release/linux/package/etc/vault.d/vault.env new file mode 100644 index 0000000..e69de29 diff --git a/.release/linux/package/etc/vault.d/vault.hcl b/.release/linux/package/etc/vault.d/vault.hcl new file mode 100644 index 0000000..4a59d36 --- /dev/null +++ b/.release/linux/package/etc/vault.d/vault.hcl @@ -0,0 +1,50 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +# Full configuration options can be found at https://www.vaultproject.io/docs/configuration + +ui = true + +#mlock = true +#disable_mlock = true + +storage "file" { + path = "/opt/vault/data" +} + +#storage "consul" { +# address = "127.0.0.1:8500" +# path = "vault" +#} + +# HTTP listener +#listener "tcp" { +# address = "127.0.0.1:8200" +# tls_disable = 1 +#} + +# HTTPS listener +listener "tcp" { + address = "0.0.0.0:8200" + tls_cert_file = "/opt/vault/tls/tls.crt" + tls_key_file = "/opt/vault/tls/tls.key" +} + +# Enterprise license_path +# This will be required for enterprise as of v1.8 +#license_path = "/etc/vault.d/vault.hclic" + +# Example AWS KMS auto unseal +#seal "awskms" { +# region = "us-east-1" +# kms_key_id = "REPLACE-ME" +#} + +# Example HSM auto unseal +#seal "pkcs11" { +# lib = "/usr/vault/lib/libCryptoki2_64.so" +# slot = "0" +# pin = "AAAA-BBBB-CCCC-DDDD" +# key_label = "vault-hsm-key" +# hmac_key_label = "vault-hsm-hmac-key" +#} diff --git a/.release/linux/package/usr/lib/systemd/system/vault.service b/.release/linux/package/usr/lib/systemd/system/vault.service new file mode 100644 index 0000000..45c896b --- /dev/null +++ b/.release/linux/package/usr/lib/systemd/system/vault.service @@ -0,0 +1,34 @@ +[Unit] +Description="HashiCorp Vault - A tool for managing secrets" +Documentation=https://www.vaultproject.io/docs/ +Requires=network-online.target +After=network-online.target +ConditionFileNotEmpty=/etc/vault.d/vault.hcl +StartLimitIntervalSec=60 +StartLimitBurst=3 + +[Service] +Type=notify +EnvironmentFile=/etc/vault.d/vault.env +User=vault +Group=vault +ProtectSystem=full +ProtectHome=read-only +PrivateTmp=yes +PrivateDevices=yes +SecureBits=keep-caps +AmbientCapabilities=CAP_IPC_LOCK +CapabilityBoundingSet=CAP_SYSLOG CAP_IPC_LOCK +NoNewPrivileges=yes +ExecStart=/usr/bin/vault server -config=/etc/vault.d/vault.hcl +ExecReload=/bin/kill --signal HUP $MAINPID +KillMode=process +KillSignal=SIGINT +Restart=on-failure +RestartSec=5 +TimeoutStopSec=30 +LimitNOFILE=65536 +LimitMEMLOCK=infinity + +[Install] +WantedBy=multi-user.target diff --git a/.release/linux/postinst b/.release/linux/postinst new file mode 100644 index 0000000..2a08b7a --- /dev/null +++ b/.release/linux/postinst @@ -0,0 +1,47 @@ +#!/bin/bash + +if [[ -f /opt/vault/tls/tls.crt ]] && [[ -f /opt/vault/tls/tls.key ]]; then + echo "Vault TLS key and certificate already exist. Exiting." + exit 0 +fi + +echo "Generating Vault TLS key and self-signed certificate..." + +# Create TLS and Data directory +mkdir --parents /opt/vault/tls +mkdir --parents /opt/vault/data + +# Generate TLS key and certificate +cd /opt/vault/tls +openssl req \ + -out tls.crt \ + -new \ + -keyout tls.key \ + -newkey rsa:4096 \ + -nodes \ + -sha256 \ + -x509 \ + -subj "/O=HashiCorp/CN=Vault" \ + -days 1095 # 3 years + +# Update file permissions +chown --recursive vault:vault /etc/vault.d +chown --recursive vault:vault /opt/vault +chmod 600 /opt/vault/tls/tls.crt /opt/vault/tls/tls.key +chmod 700 /opt/vault/tls + +echo "Vault TLS key and self-signed certificate have been generated in '/opt/vault/tls'." + +# Set IPC_LOCK capabilities on vault +setcap cap_ipc_lock=+ep /usr/bin/vault + +if [ -d /run/systemd/system ]; then + systemctl --system daemon-reload >/dev/null || true +fi + +if [[ $(vault version) == *+ent* ]]; then +echo " +The following shall apply unless your organization has a separately signed Enterprise License Agreement or Evaluation Agreement governing your use of the software: +Software in this repository is subject to the license terms located in the software, copies of which are also available at https://eula.hashicorp.com/ClickThruELA-Global.pdf or https://www.hashicorp.com/terms-of-evaluation as applicable. Please read the license terms prior to using the software. Your installation and use of the software constitutes your acceptance of these terms. If you do not accept the terms, do not use the software. +" +fi diff --git a/.release/linux/postrm b/.release/linux/postrm new file mode 100644 index 0000000..64dd1e5 --- /dev/null +++ b/.release/linux/postrm @@ -0,0 +1,8 @@ +#!/bin/bash + +if [ "$1" = "purge" ] +then + userdel vault +fi + +exit 0 diff --git a/.release/linux/preinst b/.release/linux/preinst new file mode 100644 index 0000000..6de6e2e --- /dev/null +++ b/.release/linux/preinst @@ -0,0 +1,13 @@ +#!/bin/bash + +set -eu + +USER="vault" + +if ! id -u $USER > /dev/null 2>&1; then + useradd \ + --system \ + --user-group \ + --shell /bin/false \ + $USER +fi diff --git a/.release/release-metadata.hcl b/.release/release-metadata.hcl new file mode 100644 index 0000000..3a49b69 --- /dev/null +++ b/.release/release-metadata.hcl @@ -0,0 +1,9 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +url_docker_registry_dockerhub = "https://hub.docker.com/r/hashicorp/vault" +url_docker_registry_ecr = "https://gallery.ecr.aws/hashicorp/vault" +url_license = "https://github.com/hashicorp/vault/blob/main/LICENSE" +url_project_website = "https://www.vaultproject.io/" +url_source_repository = "https://github.com/hashicorp/vault" +url_release_notes = "https://www.vaultproject.io/docs/release-notes" diff --git a/.release/security-scan.hcl b/.release/security-scan.hcl new file mode 100644 index 0000000..62460e4 --- /dev/null +++ b/.release/security-scan.hcl @@ -0,0 +1,16 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +container { + dependencies = true + alpine_secdb = true + secrets = true +} + +binary { + secrets = false + go_modules = false + osv = true + oss_index = true + nvd = false +} diff --git a/.yamllint b/.yamllint new file mode 100644 index 0000000..fe62ee8 --- /dev/null +++ b/.yamllint @@ -0,0 +1,10 @@ +--- + +extends: relaxed + +rules: + comments: disable + comments-indentation: disable + line-length: disable + commas: + max-spaces-after: -1 diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..9e8b0fc --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,8800 @@ +## 1.13.2 +### April 26, 2023 + +CHANGES: + +* core: Bump Go version to 1.20.3. + +SECURITY: + +* core/seal: Fix handling of HMACing of seal-wrapped storage entries from HSMs using CKM_AES_CBC or CKM_AES_CBC_PAD which may have allowed an attacker to conduct a padding oracle attack. This vulnerability, CVE-2023-2197, affects Vault from 1.13.0 up to 1.13.1 and was fixed in 1.13.2. [[HCSEC-2023-14](https://discuss.hashicorp.com/t/hcsec-2023-14-vault-enterprise-vulnerable-to-padding-oracle-attacks-when-using-a-cbc-based-encryption-mechanism-with-a-hsm/53322)] + +IMPROVEMENTS: + +* Add debug symbols back to builds to fix Dynatrace support [[GH-20294](https://github.com/hashicorp/vault/pull/20294)] +* cli/namespace: Add detailed flag to output additional namespace information +such as namespace IDs and custom metadata. [[GH-20243](https://github.com/hashicorp/vault/pull/20243)] +* core/activity: add an endpoint to write test activity log data, guarded by a build flag [[GH-20019](https://github.com/hashicorp/vault/pull/20019)] +* core: Add a `raft` sub-field to the `storage` and `ha_storage` details provided by the +`/sys/config/state/sanitized` endpoint in order to include the `max_entry_size`. [[GH-20044](https://github.com/hashicorp/vault/pull/20044)] +* core: include reason for ErrReadOnly on PBPWF writing failures +* sdk/ldaputil: added `connection_timeout` to tune connection timeout duration +for all LDAP plugins. [[GH-20144](https://github.com/hashicorp/vault/pull/20144)] +* secrets/pki: Decrease size and improve compatibility of OCSP responses by removing issuer certificate. [[GH-20201](https://github.com/hashicorp/vault/pull/20201)] +* sys/wrapping: Add example how to unwrap without authentication in Vault [[GH-20109](https://github.com/hashicorp/vault/pull/20109)] +* ui: Allows license-banners to be dismissed. Saves preferences in localStorage. [[GH-19116](https://github.com/hashicorp/vault/pull/19116)] + +BUG FIXES: + +* auth/ldap: Add max_page_size configurable to LDAP configuration [[GH-19032](https://github.com/hashicorp/vault/pull/19032)] +* command/server: Fix incorrect paths in generated config for `-dev-tls` flag on Windows [[GH-20257](https://github.com/hashicorp/vault/pull/20257)] +* core (enterprise): Fix intermittent issue with token entries sometimes not being found when using a newly created token in a request to a secondary, even when SSCT `new_token` forwarding is set. When this occurred, this would result in the following error to the client: `error performing token check: no lease entry found for token that ought to have one, possible eventual consistency issue`. +* core (enterprise): Fix read on perf standbys failing with 412 after leadership change, unseal, restores or restarts when no writes occur +* core/ssct (enterprise): Fixed race condition where a newly promoted DR may revert `sscGenCounter` +resulting in 412 errors. +* core: Fix regression breaking non-raft clusters whose nodes share the same cluster_addr/api_addr. [[GH-19721](https://github.com/hashicorp/vault/pull/19721)] +* helper/random: Fix race condition in string generator helper [[GH-19875](https://github.com/hashicorp/vault/pull/19875)] +* kmip (enterprise): Fix a problem decrypting with keys that have no Process Start Date attribute. +* pki: Fix automatically turning off CRL signing on upgrade to Vault >= 1.12, if CA Key Usage disallows it [[GH-20220](https://github.com/hashicorp/vault/pull/20220)] +* replication (enterprise): Fix a caching issue when replicating filtered data to +a performance secondary. This resulted in the data being set to nil in the cache +and a "invalid value" error being returned from the API. +* replication (enterprise): Fix replication status for Primary clusters showing its primary cluster's information (in case of DR) in secondaries field when known_secondaries field is nil +* sdk/helper/ocsp: Workaround bug in Go's ocsp.ParseResponse(...), causing validation to fail with embedded CA certificates. +auth/cert: Fix OCSP validation against Vault's PKI engine. [[GH-20181](https://github.com/hashicorp/vault/pull/20181)] +* secrets/aws: Revert changes that removed the lease on STS credentials, while leaving the new ttl field in place. [[GH-20034](https://github.com/hashicorp/vault/pull/20034)] +* secrets/pki: Ensure cross-cluster delta WAL write failure only logs to avoid unattended forwarding. [[GH-20057](https://github.com/hashicorp/vault/pull/20057)] +* secrets/pki: Fix building of unified delta CRLs and recovery during unified delta WAL write failures. [[GH-20058](https://github.com/hashicorp/vault/pull/20058)] +* secrets/pki: Fix patching of leaf_not_after_behavior on issuers. [[GH-20341](https://github.com/hashicorp/vault/pull/20341)] +* secrets/transform (enterprise): Address SQL connection leak when cleaning expired tokens +* ui: Fix OIDC provider logo showing when domain doesn't match [[GH-20263](https://github.com/hashicorp/vault/pull/20263)] +* ui: Fix bad link to namespace when namespace name includes `.` [[GH-19799](https://github.com/hashicorp/vault/pull/19799)] +* ui: fixes browser console formatting for help command output [[GH-20064](https://github.com/hashicorp/vault/pull/20064)] +* ui: fixes remaining doc links to include /vault in path [[GH-20070](https://github.com/hashicorp/vault/pull/20070)] +* ui: remove use of htmlSafe except when first sanitized [[GH-20235](https://github.com/hashicorp/vault/pull/20235)] +* website/docs: Fix Kubernetes Auth Code Example to use the correct whitespace in import. [[GH-20216](https://github.com/hashicorp/vault/pull/20216)] + +## 1.13.1 +### March 29, 2023 + +SECURITY: + +* storage/mssql: When using Vault’s community-supported Microsoft SQL (MSSQL) database storage backend, a privileged attacker with the ability to write arbitrary data to Vault’s configuration may be able to perform arbitrary SQL commands on the underlying database server through Vault. This vulnerability, CVE-2023-0620, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-12](https://discuss.hashicorp.com/t/hcsec-2023-12-vault-s-microsoft-sql-database-storage-backend-vulnerable-to-sql-injection-via-configuration-file/52080)] +* secrets/pki: Vault’s PKI mount issuer endpoints did not correctly authorize access to remove an issuer or modify issuer metadata, potentially resulting in denial of service of the PKI mount. This bug did not affect public or private key material, trust chains or certificate issuance. This vulnerability, CVE-2023-0665, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-11](https://discuss.hashicorp.com/t/hcsec-2023-11-vault-s-pki-issuer-endpoint-did-not-correctly-authorize-access-to-issuer-metadata/52079)] +* core: HashiCorp Vault’s implementation of Shamir’s secret sharing used precomputed table lookups, and was vulnerable to cache-timing attacks. An attacker with access to, and the ability to observe a large number of unseal operations on the host through a side channel may reduce the search space of a brute force effort to recover the Shamir shares. This vulnerability, CVE-2023-25000, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-10](https://discuss.hashicorp.com/t/hcsec-2023-10-vault-vulnerable-to-cache-timing-attacks-during-seal-and-unseal-operations/52078)] + +IMPROVEMENTS: + +* auth/github: Allow for an optional Github auth token environment variable to make authenticated requests when fetching org id +website/docs: Add docs for `VAULT_AUTH_CONFIG_GITHUB_TOKEN` environment variable when writing Github config [[GH-19244](https://github.com/hashicorp/vault/pull/19244)] +* core: Allow overriding gRPC connect timeout via VAULT_GRPC_MIN_CONNECT_TIMEOUT. This is an env var rather than a config setting because we don't expect this to ever be needed. It's being added as a last-ditch +option in case all else fails for some replication issues we may not have fully reproduced. [[GH-19676](https://github.com/hashicorp/vault/pull/19676)] +* core: validate name identifiers in mssql physical storage backend prior use [[GH-19591](https://github.com/hashicorp/vault/pull/19591)] +* database/elasticsearch: Update error messages resulting from Elasticsearch API errors [[GH-19545](https://github.com/hashicorp/vault/pull/19545)] +* events: Suppress log warnings triggered when events are sent but the events system is not enabled. [[GH-19593](https://github.com/hashicorp/vault/pull/19593)] + +BUG FIXES: + +* agent: Fix panic when SIGHUP is issued to Agent while it has a non-TLS listener. [[GH-19483](https://github.com/hashicorp/vault/pull/19483)] +* core (enterprise): Attempt to reconnect to a PKCS#11 HSM if we retrieve a CKR_FUNCTION_FAILED error. +* core: Fixed issue with remounting mounts that have a non-trailing space in the 'to' or 'from' paths. [[GH-19585](https://github.com/hashicorp/vault/pull/19585)] +* kmip (enterprise): Do not require attribute Cryptographic Usage Mask when registering Secret Data managed objects. +* kmip (enterprise): Fix a problem forwarding some requests to the active node. +* openapi: Fix logic for labeling unauthenticated/sudo paths. [[GH-19600](https://github.com/hashicorp/vault/pull/19600)] +* secrets/ldap: Invalidates WAL entry for static role if `password_policy` has changed. [[GH-19640](https://github.com/hashicorp/vault/pull/19640)] +* secrets/pki: Fix PKI revocation request forwarding from standby nodes due to an error wrapping bug [[GH-19624](https://github.com/hashicorp/vault/pull/19624)] +* secrets/transform (enterprise): Fix persistence problem with rotated tokenization key versions +* ui: Fixes crypto.randomUUID error in unsecure contexts from third party ember-data library [[GH-19428](https://github.com/hashicorp/vault/pull/19428)] +* ui: fixes SSH engine config deletion [[GH-19448](https://github.com/hashicorp/vault/pull/19448)] +* ui: fixes issue navigating back a level using the breadcrumb from secret metadata view [[GH-19703](https://github.com/hashicorp/vault/pull/19703)] +* ui: fixes oidc tabs in auth form submitting with the root's default_role value after a namespace has been inputted [[GH-19541](https://github.com/hashicorp/vault/pull/19541)] +* ui: pass encodeBase64 param to HMAC transit-key-actions. [[GH-19429](https://github.com/hashicorp/vault/pull/19429)] +* ui: use URLSearchParams interface to capture namespace param from SSOs (ex. ADFS) with decoded state param in callback url [[GH-19460](https://github.com/hashicorp/vault/pull/19460)] + +## 1.13.0 +### March 01, 2023 + +SECURITY: + +* secrets/ssh: removal of the deprecated dynamic keys mode. **When any remaining dynamic key leases expire**, an error stating `secret is unsupported by this backend` will be thrown by the lease manager. [[GH-18874](https://github.com/hashicorp/vault/pull/18874)] +* auth/approle: When using the Vault and Vault Enterprise (Vault) approle auth method, any authenticated user with access to the /auth/approle/role/:role_name/secret-id-accessor/destroy endpoint can destroy the secret ID of any other role by providing the secret ID accessor. This vulnerability, CVE-2023-24999 has been fixed in Vault 1.13.0, 1.12.4, 1.11.8, 1.10.11 and above. [[HSEC-2023-07](https://discuss.hashicorp.com/t/hcsec-2023-07-vault-fails-to-verify-if-approle-secretid-belongs-to-role-during-a-destroy-operation/51305)] + +CHANGES: + +* auth/alicloud: require the `role` field on login [[GH-19005](https://github.com/hashicorp/vault/pull/19005)] +* auth/approle: Add maximum length of 4096 for approle role_names, as this value results in HMAC calculation [[GH-17768](https://github.com/hashicorp/vault/pull/17768)] +* auth: Returns invalid credentials for ldap, userpass and approle when wrong credentials are provided for existent users. +This will only be used internally for implementing user lockout. [[GH-17104](https://github.com/hashicorp/vault/pull/17104)] +* core: Bump Go version to 1.20.1. +* core: Vault version has been moved out of sdk and into main vault module. +Plugins using sdk/useragent.String must instead use sdk/useragent.PluginString. [[GH-14229](https://github.com/hashicorp/vault/pull/14229)] +* logging: Removed legacy environment variable for log format ('LOGXI_FORMAT'), should use 'VAULT_LOG_FORMAT' instead [[GH-17822](https://github.com/hashicorp/vault/pull/17822)] +* plugins: Mounts can no longer be pinned to a specific _builtin_ version. Mounts previously pinned to a specific builtin version will now automatically upgrade to the latest builtin version, and may now be overridden if an unversioned plugin of the same name and type is registered. Mounts using plugin versions without `builtin` in their metadata remain unaffected. [[GH-18051](https://github.com/hashicorp/vault/pull/18051)] +* plugins: `GET /database/config/:name` endpoint now returns an additional `plugin_version` field in the response data. [[GH-16982](https://github.com/hashicorp/vault/pull/16982)] +* plugins: `GET /sys/auth/:path/tune` and `GET /sys/mounts/:path/tune` endpoints may now return an additional `plugin_version` field in the response data if set. [[GH-17167](https://github.com/hashicorp/vault/pull/17167)] +* plugins: `GET` for `/sys/auth`, `/sys/auth/:path`, `/sys/mounts`, and `/sys/mounts/:path` paths now return additional `plugin_version`, `running_plugin_version` and `running_sha256` fields in the response data for each mount. [[GH-17167](https://github.com/hashicorp/vault/pull/17167)] +* sdk: Remove version package, make useragent.String versionless. [[GH-19068](https://github.com/hashicorp/vault/pull/19068)] +* secrets/aws: do not create leases for non-renewable/non-revocable STS credentials to reduce storage calls [[GH-15869](https://github.com/hashicorp/vault/pull/15869)] +* secrets/gcpkms: Updated plugin from v0.13.0 to v0.14.0 [[GH-19063](https://github.com/hashicorp/vault/pull/19063)] +* sys/internal/inspect: Turns of this endpoint by default. A SIGHUP can now be used to reload the configs and turns this endpoint on. +* ui: Upgrade Ember to version 4.4.0 [[GH-17086](https://github.com/hashicorp/vault/pull/17086)] + +FEATURES: + +* **User lockout**: Ignore repeated bad credentials from the same user for a configured period of time. Enabled by default. +* **Azure Auth Managed Identities**: Allow any Azure resource that supports managed identities to authenticate with Vault [[GH-19077](https://github.com/hashicorp/vault/pull/19077)] +* **Azure Auth Rotate Root**: Add support for rotate root in Azure Auth engine [[GH-19077](https://github.com/hashicorp/vault/pull/19077)] +* **Event System (Alpha)**: Vault has a new opt-in experimental event system. Not yet suitable for production use. Events are currently only generated on writes to the KV secrets engine, but external plugins can also be updated to start generating events. [[GH-19194](https://github.com/hashicorp/vault/pull/19194)] +* **GCP Secrets Impersonated Account Support**: Add support for GCP service account impersonation, allowing callers to generate a GCP access token without requiring Vault to store or retrieve a GCP service account key for each role. [[GH-19018](https://github.com/hashicorp/vault/pull/19018)] +* **Kubernetes Secrets Engine UI**: Kubernetes is now available in the UI as a supported secrets engine. [[GH-17893](https://github.com/hashicorp/vault/pull/17893)] +* **New PKI UI**: Add beta support for new and improved PKI UI [[GH-18842](https://github.com/hashicorp/vault/pull/18842)] +* **PKI Cross-Cluster Revocations**: Revocation information can now be +synchronized across primary and performance replica clusters offering +a unified CRL/OCSP view of revocations across cluster boundaries. [[GH-19196](https://github.com/hashicorp/vault/pull/19196)] +* **Server UDS Listener**: Adding listener to Vault server to serve http request via unix domain socket [[GH-18227](https://github.com/hashicorp/vault/pull/18227)] +* **Transit managed keys**: The transit secrets engine now supports configuring and using managed keys +* **User Lockout**: Adds support to configure the user-lockout behaviour for failed logins to prevent +brute force attacks for userpass, approle and ldap auth methods. [[GH-19230](https://github.com/hashicorp/vault/pull/19230)] +* **VMSS Flex Authentication**: Adds support for Virtual Machine Scale Set Flex Authentication [[GH-19077](https://github.com/hashicorp/vault/pull/19077)] +* **Namespaces (enterprise)**: Added the ability to allow access to secrets and more to be shared across namespaces that do not share a namespace hierarchy. Using the new `sys/config/group-policy-application` API, policies can be configured to apply outside of namespace hierarchy, allowing this kind of cross-namespace sharing. +* **OpenAPI-based Go & .NET Client Libraries (Beta)**: We have now made available two new [[OpenAPI-based Go](https://github.com/hashicorp/vault-client-go/)] & [[OpenAPI-based .NET](https://github.com/hashicorp/vault-client-dotnet/)] Client libraries (beta). You can use them to perform various secret management operations easily from your applications. + +IMPROVEMENTS: + +* **Redis ElastiCache DB Engine**: Renamed configuration parameters for disambiguation; old parameters still supported for compatibility. [[GH-18752](https://github.com/hashicorp/vault/pull/18752)] +* Bump github.com/hashicorp/go-plugin version from 1.4.5 to 1.4.8 [[GH-19100](https://github.com/hashicorp/vault/pull/19100)] +* Reduced binary size [[GH-17678](https://github.com/hashicorp/vault/pull/17678)] +* agent/config: Allow config directories to be specified with -config, and allow multiple -configs to be supplied. [[GH-18403](https://github.com/hashicorp/vault/pull/18403)] +* agent: Add note in logs when starting Vault Agent indicating if the version differs to the Vault Server. [[GH-18684](https://github.com/hashicorp/vault/pull/18684)] +* agent: Added `token_file` auto-auth configuration to allow using a pre-existing token for Vault Agent. [[GH-18740](https://github.com/hashicorp/vault/pull/18740)] +* agent: Agent listeners can now be to be the `metrics_only` role, serving only metrics, as part of the listener's new top level `role` option. [[GH-18101](https://github.com/hashicorp/vault/pull/18101)] +* agent: Configured Vault Agent listeners now listen without the need for caching to be configured. [[GH-18137](https://github.com/hashicorp/vault/pull/18137)] +* agent: allows some parts of config to be reloaded without requiring a restart. [[GH-18638](https://github.com/hashicorp/vault/pull/18638)] +* agent: fix incorrectly used loop variables in parallel tests and when finalizing seals [[GH-16872](https://github.com/hashicorp/vault/pull/16872)] +* api: Remove dependency on sdk module. [[GH-18962](https://github.com/hashicorp/vault/pull/18962)] +* api: Support VAULT_DISABLE_REDIRECTS environment variable (and --disable-redirects flag) to disable default client behavior and prevent the client following any redirection responses. [[GH-17352](https://github.com/hashicorp/vault/pull/17352)] +* audit: Add `elide_list_responses` option, providing a countermeasure for a common source of oversized audit log entries [[GH-18128](https://github.com/hashicorp/vault/pull/18128)] +* audit: Include stack trace when audit logging recovers from a panic. [[GH-18121](https://github.com/hashicorp/vault/pull/18121)] +* auth/alicloud: upgrades dependencies [[GH-18021](https://github.com/hashicorp/vault/pull/18021)] +* auth/azure: Adds support for authentication with Managed Service Identity (MSI) from a +Virtual Machine Scale Set (VMSS) in flexible orchestration mode. [[GH-17540](https://github.com/hashicorp/vault/pull/17540)] +* auth/azure: upgrades dependencies [[GH-17857](https://github.com/hashicorp/vault/pull/17857)] +* auth/cert: Add configurable support for validating client certs with OCSP. [[GH-17093](https://github.com/hashicorp/vault/pull/17093)] +* auth/cert: Support listing provisioned CRLs within the mount. [[GH-18043](https://github.com/hashicorp/vault/pull/18043)] +* auth/cf: Remove incorrect usage of CreateOperation from path_config [[GH-19098](https://github.com/hashicorp/vault/pull/19098)] +* auth/gcp: Upgrades dependencies [[GH-17858](https://github.com/hashicorp/vault/pull/17858)] +* auth/oidc: Adds `abort_on_error` parameter to CLI login command to help in non-interactive contexts [[GH-19076](https://github.com/hashicorp/vault/pull/19076)] +* auth/oidc: Adds ability to set Google Workspace domain for groups search [[GH-19076](https://github.com/hashicorp/vault/pull/19076)] +* auth/token (enterprise): Allow batch token creation in perfStandby nodes +* auth: Allow naming login MFA methods and using those names instead of IDs in satisfying MFA requirement for requests. +Make passcode arguments consistent across login MFA method types. [[GH-18610](https://github.com/hashicorp/vault/pull/18610)] +* auth: Provide an IP address of the requests from Vault to a Duo challenge after successful authentication. [[GH-18811](https://github.com/hashicorp/vault/pull/18811)] +* autopilot: Update version to v.0.2.0 to add better support for respecting min quorum +* cli/kv: improve kv CLI to remove data or custom metadata using kv patch [[GH-18067](https://github.com/hashicorp/vault/pull/18067)] +* cli/pki: Add List-Intermediates functionality to pki client. [[GH-18463](https://github.com/hashicorp/vault/pull/18463)] +* cli/pki: Add health-check subcommand to evaluate the health of a PKI instance. [[GH-17750](https://github.com/hashicorp/vault/pull/17750)] +* cli/pki: Add pki issue command, which creates a CSR, has a vault mount sign it, then reimports it. [[GH-18467](https://github.com/hashicorp/vault/pull/18467)] +* cli/pki: Added "Reissue" command which allows extracting fields from an existing certificate to create a new certificate. [[GH-18499](https://github.com/hashicorp/vault/pull/18499)] +* cli/pki: Change the pki health-check --list default config output to JSON so it's a usable configuration file [[GH-19269](https://github.com/hashicorp/vault/pull/19269)] +* cli: Add support for creating requests to existing non-KVv2 PATCH-capable endpoints. [[GH-17650](https://github.com/hashicorp/vault/pull/17650)] +* cli: Add transit import key helper commands for BYOK to Transit/Transform. [[GH-18887](https://github.com/hashicorp/vault/pull/18887)] +* cli: Support the -format=raw option, to read non-JSON Vault endpoints and original response bodies. [[GH-14945](https://github.com/hashicorp/vault/pull/14945)] +* cli: updated `vault operator rekey` prompts to describe recovery keys when `-target=recovery` [[GH-18892](https://github.com/hashicorp/vault/pull/18892)] +* client/pki: Add a new command verify-sign which checks the relationship between two certificates. [[GH-18437](https://github.com/hashicorp/vault/pull/18437)] +* command/server: Environment variable keys are now logged at startup. [[GH-18125](https://github.com/hashicorp/vault/pull/18125)] +* core/fips: use upstream toolchain for FIPS 140-2 compliance again; this will appear as X=boringcrypto on the Go version in Vault server logs. +* core/identity: Add machine-readable output to body of response upon alias clash during entity merge [[GH-17459](https://github.com/hashicorp/vault/pull/17459)] +* core/server: Added an environment variable to write goroutine stacktraces to a +temporary file for SIGUSR2 signals. [[GH-17929](https://github.com/hashicorp/vault/pull/17929)] +* core: Add RPCs to read and update userFailedLoginInfo map +* core: Add experiments system and `events.alpha1` experiment. [[GH-18682](https://github.com/hashicorp/vault/pull/18682)] +* core: Add read support to `sys/loggers` and `sys/loggers/:name` endpoints [[GH-17979](https://github.com/hashicorp/vault/pull/17979)] +* core: Add user lockout field to config and configuring this for auth mount using auth tune to prevent brute forcing in auth methods [[GH-17338](https://github.com/hashicorp/vault/pull/17338)] +* core: Add vault.core.locked_users telemetry metric to emit information about total number of locked users. [[GH-18718](https://github.com/hashicorp/vault/pull/18718)] +* core: Added sys/locked-users endpoint to list locked users. Changed api endpoint from +sys/lockedusers/[mount_accessor]/unlock/[alias_identifier] to sys/locked-users/[mount_accessor]/unlock/[alias_identifier]. [[GH-18675](https://github.com/hashicorp/vault/pull/18675)] +* core: Added sys/lockedusers/[mount_accessor]/unlock/[alias_identifier] endpoint to unlock an user +with given mount_accessor and alias_identifier if locked [[GH-18279](https://github.com/hashicorp/vault/pull/18279)] +* core: Added warning to /sys/seal-status and vault status command if potentially dangerous behaviour overrides are being used. [[GH-17855](https://github.com/hashicorp/vault/pull/17855)] +* core: Implemented background thread to update locked user entries every 15 minutes to prevent brute forcing in auth methods. [[GH-18673](https://github.com/hashicorp/vault/pull/18673)] +* core: License location is no longer cache exempt, meaning sys/health will not contribute as greatly to storage load when using consul as a storage backend. [[GH-17265](https://github.com/hashicorp/vault/pull/17265)] +* core: Update protoc from 3.21.5 to 3.21.7 [[GH-17499](https://github.com/hashicorp/vault/pull/17499)] +* core: add `detect_deadlocks` config to optionally detect core state deadlocks [[GH-18604](https://github.com/hashicorp/vault/pull/18604)] +* core: added changes for user lockout workflow. [[GH-17951](https://github.com/hashicorp/vault/pull/17951)] +* core: parallelize backend initialization to improve startup time for large numbers of mounts. [[GH-18244](https://github.com/hashicorp/vault/pull/18244)] +* database/postgres: Support multiline strings for revocation statements. [[GH-18632](https://github.com/hashicorp/vault/pull/18632)] +* database/redis-elasticache: changed config argument names for disambiguation [[GH-19044](https://github.com/hashicorp/vault/pull/19044)] +* database/snowflake: Allow parallel requests to Snowflake [[GH-17593](https://github.com/hashicorp/vault/pull/17593)] +* hcp/connectivity: Add foundational OSS support for opt-in secure communication between self-managed Vault nodes and [HashiCorp Cloud Platform](https://cloud.hashicorp.com) [[GH-18228](https://github.com/hashicorp/vault/pull/18228)] +* hcp/connectivity: Include HCP organization, project, and resource ID in server startup logs [[GH-18315](https://github.com/hashicorp/vault/pull/18315)] +* hcp/connectivity: Only update SCADA session metadata if status changes [[GH-18585](https://github.com/hashicorp/vault/pull/18585)] +* hcp/status: Add cluster-level status information [[GH-18351](https://github.com/hashicorp/vault/pull/18351)] +* hcp/status: Expand node-level status information [[GH-18302](https://github.com/hashicorp/vault/pull/18302)] +* logging: Vault Agent supports logging to a specified file path via environment variable, CLI or config [[GH-17841](https://github.com/hashicorp/vault/pull/17841)] +* logging: Vault agent and server commands support log file and log rotation. [[GH-18031](https://github.com/hashicorp/vault/pull/18031)] +* migration: allow parallelization of key migration for `vault operator migrate` in order to speed up a migration. [[GH-18817](https://github.com/hashicorp/vault/pull/18817)] +* namespaces (enterprise): Add new API, `sys/config/group-policy-application`, to allow group policies to be configurable +to apply to a group in `any` namespace. The default, `within_namespace_hierarchy`, is the current behaviour. +* openapi: Add default values to thing_mount_path parameters [[GH-18935](https://github.com/hashicorp/vault/pull/18935)] +* openapi: Add logic to generate openapi response structures [[GH-18192](https://github.com/hashicorp/vault/pull/18192)] +* openapi: Add openapi response definitions to approle/path_login.go & approle/path_tidy_user_id.go [[GH-18772](https://github.com/hashicorp/vault/pull/18772)] +* openapi: Add openapi response definitions to approle/path_role.go [[GH-18198](https://github.com/hashicorp/vault/pull/18198)] +* openapi: Change gen_openapi.sh to generate schema with generic mount paths [[GH-18934](https://github.com/hashicorp/vault/pull/18934)] +* openapi: Mark request body objects as required [[GH-17909](https://github.com/hashicorp/vault/pull/17909)] +* openapi: add openapi response defintions to /sys/audit endpoints [[GH-18456](https://github.com/hashicorp/vault/pull/18456)] +* openapi: generic_mount_paths: Move implementation fully into server, rather than partially in plugin framework; recognize all 4 singleton mounts (auth/token, cubbyhole, identity, system) rather than just 2; change parameter from `{mountPath}` to `{_mount_path}` [[GH-18663](https://github.com/hashicorp/vault/pull/18663)] +* plugins: Add plugin version information to key plugin lifecycle log lines. [[GH-17430](https://github.com/hashicorp/vault/pull/17430)] +* plugins: Allow selecting builtin plugins by their reported semantic version of the form `vX.Y.Z+builtin` or `vX.Y.Z+builtin.vault`. [[GH-17289](https://github.com/hashicorp/vault/pull/17289)] +* plugins: Let Vault unseal and mount deprecated builtin plugins in a +deactivated state if this is not the first unseal after an upgrade. [[GH-17879](https://github.com/hashicorp/vault/pull/17879)] +* plugins: Mark app-id auth method Removed and remove the plugin code. [[GH-18039](https://github.com/hashicorp/vault/pull/18039)] +* plugins: Mark logical database plugins Removed and remove the plugin code. [[GH-18039](https://github.com/hashicorp/vault/pull/18039)] +* sdk/ldap: Added support for paging when searching for groups using group filters [[GH-17640](https://github.com/hashicorp/vault/pull/17640)] +* sdk: Add response schema validation method framework/FieldData.ValidateStrict and two test helpers (ValidateResponse, ValidateResponseData) [[GH-18635](https://github.com/hashicorp/vault/pull/18635)] +* sdk: Adding FindResponseSchema test helper to assist with response schema validation in tests [[GH-18636](https://github.com/hashicorp/vault/pull/18636)] +* secrets/aws: Update dependencies [[PR-17747](https://github.com/hashicorp/vault/pull/17747)] [[GH-17747](https://github.com/hashicorp/vault/pull/17747)] +* secrets/azure: Adds ability to persist an application for the lifetime of a role. [[GH-19096](https://github.com/hashicorp/vault/pull/19096)] +* secrets/azure: upgrades dependencies [[GH-17964](https://github.com/hashicorp/vault/pull/17964)] +* secrets/db/mysql: Add `tls_server_name` and `tls_skip_verify` parameters [[GH-18799](https://github.com/hashicorp/vault/pull/18799)] +* secrets/gcp: Upgrades dependencies [[GH-17871](https://github.com/hashicorp/vault/pull/17871)] +* secrets/kubernetes: Add /check endpoint to determine if environment variables are set [[GH-18](https://github.com/hashicorp/vault-plugin-secrets-kubernetes/pull/18)] [[GH-18587](https://github.com/hashicorp/vault/pull/18587)] +* secrets/kubernetes: add /check endpoint to determine if environment variables are set [[GH-19084](https://github.com/hashicorp/vault/pull/19084)] +* secrets/kv: Emit events on write if events system enabled [[GH-19145](https://github.com/hashicorp/vault/pull/19145)] +* secrets/kv: make upgrade synchronous when no keys to upgrade [[GH-19056](https://github.com/hashicorp/vault/pull/19056)] +* secrets/kv: new KVv2 mounts and KVv1 mounts without any keys will upgrade synchronously, allowing for instant use [[GH-17406](https://github.com/hashicorp/vault/pull/17406)] +* secrets/pki: Add a new API that returns the serial numbers of revoked certificates on the local cluster [[GH-17779](https://github.com/hashicorp/vault/pull/17779)] +* secrets/pki: Add support to specify signature bits when generating CSRs through intermediate/generate apis [[GH-17388](https://github.com/hashicorp/vault/pull/17388)] +* secrets/pki: Added a new API that allows external actors to craft a CRL through JSON parameters [[GH-18040](https://github.com/hashicorp/vault/pull/18040)] +* secrets/pki: Allow UserID Field (https://www.rfc-editor.org/rfc/rfc1274#section-9.3.1) to be set on Certificates when +allowed by role [[GH-18397](https://github.com/hashicorp/vault/pull/18397)] +* secrets/pki: Allow issuer creation, import to change default issuer via `default_follows_latest_issuer`. [[GH-17824](https://github.com/hashicorp/vault/pull/17824)] +* secrets/pki: Allow templating performance replication cluster- and issuer-specific AIA URLs. [[GH-18199](https://github.com/hashicorp/vault/pull/18199)] +* secrets/pki: Allow tidying of expired issuer certificates. [[GH-17823](https://github.com/hashicorp/vault/pull/17823)] +* secrets/pki: Allow tidying of the legacy ca_bundle, improving startup on post-migrated, seal-wrapped PKI mounts. [[GH-18645](https://github.com/hashicorp/vault/pull/18645)] +* secrets/pki: Respond with written data to `config/auto-tidy`, `config/crl`, and `roles/:role`. [[GH-18222](https://github.com/hashicorp/vault/pull/18222)] +* secrets/pki: Return issuer_id and issuer_name on /issuer/:issuer_ref/json endpoint. [[GH-18482](https://github.com/hashicorp/vault/pull/18482)] +* secrets/pki: Return new fields revocation_time_rfc3339 and issuer_id to existing certificate serial lookup api if it is revoked [[GH-17774](https://github.com/hashicorp/vault/pull/17774)] +* secrets/ssh: Allow removing SSH host keys from the dynamic keys feature. [[GH-18939](https://github.com/hashicorp/vault/pull/18939)] +* secrets/ssh: Evaluate ssh validprincipals user template before splitting [[GH-16622](https://github.com/hashicorp/vault/pull/16622)] +* secrets/transit: Add an optional reference field to batch operation items +which is repeated on batch responses to help more easily correlate inputs with outputs. [[GH-18243](https://github.com/hashicorp/vault/pull/18243)] +* secrets/transit: Add associated_data parameter for additional authenticated data in AEAD ciphers [[GH-17638](https://github.com/hashicorp/vault/pull/17638)] +* secrets/transit: Add support for PKCSv1_5_NoOID RSA signatures [[GH-17636](https://github.com/hashicorp/vault/pull/17636)] +* secrets/transit: Allow configuring whether upsert of keys is allowed. [[GH-18272](https://github.com/hashicorp/vault/pull/18272)] +* storage/raft: Add `retry_join_as_non_voter` config option. [[GH-18030](https://github.com/hashicorp/vault/pull/18030)] +* storage/raft: add additional raft metrics relating to applied index and heartbeating; also ensure OSS standbys emit periodic metrics. [[GH-12166](https://github.com/hashicorp/vault/pull/12166)] +* sys/internal/inspect: Creates an endpoint to look to inspect internal subsystems. [[GH-17789](https://github.com/hashicorp/vault/pull/17789)] +* sys/internal/inspect: Creates an endpoint to look to inspect internal subsystems. +* ui: Add algorithm-signer as a SSH Secrets Engine UI field [[GH-10299](https://github.com/hashicorp/vault/pull/10299)] +* ui: Add inline policy creation when creating an identity entity or group [[GH-17749](https://github.com/hashicorp/vault/pull/17749)] +* ui: Added JWT authentication warning message about blocked pop-up windows and web browser settings. [[GH-18787](https://github.com/hashicorp/vault/pull/18787)] +* ui: Enable typescript for future development [[GH-17927](https://github.com/hashicorp/vault/pull/17927)] +* ui: Prepends "passcode=" if not provided in user input for duo totp mfa method authentication [[GH-18342](https://github.com/hashicorp/vault/pull/18342)] +* ui: Update language on database role to "Connection name" [[GH-18261](https://github.com/hashicorp/vault/issues/18261)] [[GH-18350](https://github.com/hashicorp/vault/pull/18350)] +* ui: adds allowed_response_headers as param for secret engine mount config [[GH-19216](https://github.com/hashicorp/vault/pull/19216)] +* ui: consolidate all tag usage [[GH-17866](https://github.com/hashicorp/vault/pull/17866)] +* ui: mfa: use proper request id generation [[GH-17835](https://github.com/hashicorp/vault/pull/17835)] +* ui: remove wizard [[GH-19220](https://github.com/hashicorp/vault/pull/19220)] +* ui: update DocLink component to use new host url: developer.hashicorp.com [[GH-18374](https://github.com/hashicorp/vault/pull/18374)] +* ui: update TTL picker for consistency [[GH-18114](https://github.com/hashicorp/vault/pull/18114)] +* ui: use the combined activity log (partial + historic) API for client count dashboard and remove use of monthly endpoint [[GH-17575](https://github.com/hashicorp/vault/pull/17575)] +* vault/diagnose: Upgrade `go.opentelemetry.io/otel`, `go.opentelemetry.io/otel/sdk`, `go.opentelemetry.io/otel/trace` to v1.11.2 [[GH-18589](https://github.com/hashicorp/vault/pull/18589)] + +DEPRECATIONS: + +* secrets/ad: Marks the Active Directory (AD) secrets engine as deprecated. [[GH-19334](https://github.com/hashicorp/vault/pull/19334)] + +BUG FIXES: + +* api: Remove timeout logic from ReadRaw functions and add ReadRawWithContext [[GH-18708](https://github.com/hashicorp/vault/pull/18708)] +* auth/alicloud: fix regression in vault login command that caused login to fail [[GH-19005](https://github.com/hashicorp/vault/pull/19005)] +* auth/approle: Add nil check for the secret ID entry when deleting via secret id accessor preventing cross role secret id deletion [[GH-19186](https://github.com/hashicorp/vault/pull/19186)] +* auth/approle: Fix `token_bound_cidrs` validation when using /32 blocks for role and secret ID [[GH-18145](https://github.com/hashicorp/vault/pull/18145)] +* auth/cert: Address a race condition accessing the loaded crls without a lock [[GH-18945](https://github.com/hashicorp/vault/pull/18945)] +* auth/kubernetes: Ensure a consistent TLS configuration for all k8s API requests [[#173](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/173)] [[GH-18716](https://github.com/hashicorp/vault/pull/18716)] +* auth/kubernetes: fixes and dep updates for the auth-kubernetes plugin (see plugin changelog for details) [[GH-19094](https://github.com/hashicorp/vault/pull/19094)] +* auth/okta: fix a panic for AuthRenew in Okta [[GH-18011](https://github.com/hashicorp/vault/pull/18011)] +* auth: Deduplicate policies prior to ACL generation [[GH-17914](https://github.com/hashicorp/vault/pull/17914)] +* cli/kv: skip formatting of nil secrets for patch and put with field parameter set [[GH-18163](https://github.com/hashicorp/vault/pull/18163)] +* cli/pki: Decode integer values properly in health-check configuration file [[GH-19265](https://github.com/hashicorp/vault/pull/19265)] +* cli/pki: Fix path for role health-check warning messages [[GH-19274](https://github.com/hashicorp/vault/pull/19274)] +* cli/pki: Properly report permission issues within health-check mount tune checks [[GH-19276](https://github.com/hashicorp/vault/pull/19276)] +* cli/transit: Fix import, import-version command invocation [[GH-19373](https://github.com/hashicorp/vault/pull/19373)] +* cli: Fix issue preventing kv commands from executing properly when the mount path provided by `-mount` flag and secret key path are the same. [[GH-17679](https://github.com/hashicorp/vault/pull/17679)] +* cli: Fix vault read handling to return raw data as secret.Data when there is no top-level data object from api response. [[GH-17913](https://github.com/hashicorp/vault/pull/17913)] +* cli: Remove empty table heading for `vault secrets list -detailed` output. [[GH-17577](https://github.com/hashicorp/vault/pull/17577)] +* command/namespace: Fix vault cli namespace patch examples in help text. [[GH-18143](https://github.com/hashicorp/vault/pull/18143)] +* core (enterprise): Fix missing quotation mark in error message +* core (enterprise): Fix panic that could occur with SSCT alongside invoking external plugins for revocation. +* core (enterprise): Fix panic when using invalid accessor for control-group request +* core (enterprise): Fix perf standby WAL streaming silently failures when replication setup happens at a bad time. +* core (enterprise): Supported storage check in `vault server` command will no longer prevent startup. Instead, a warning will be logged if configured to use storage backend other than `raft` or `consul`. +* core/activity: add namespace breakdown for new clients when date range spans multiple months, including the current month. [[GH-18766](https://github.com/hashicorp/vault/pull/18766)] +* core/activity: de-duplicate namespaces when historical and current month data are mixed [[GH-18452](https://github.com/hashicorp/vault/pull/18452)] +* core/activity: fix the end_date returned from the activity log endpoint when partial counts are computed [[GH-17856](https://github.com/hashicorp/vault/pull/17856)] +* core/activity: include mount counts when de-duplicating current and historical month data [[GH-18598](https://github.com/hashicorp/vault/pull/18598)] +* core/activity: report mount paths (rather than mount accessors) in current month activity log counts and include deleted mount paths in precomputed queries. [[GH-18916](https://github.com/hashicorp/vault/pull/18916)] +* core/activity: return partial month counts when querying a historical date range and no historical data exists. [[GH-17935](https://github.com/hashicorp/vault/pull/17935)] +* core/auth: Return a 403 instead of a 500 for wrapping requests when token is not provided [[GH-18859](https://github.com/hashicorp/vault/pull/18859)] +* core/managed-keys (enterprise): Limit verification checks to mounts in a key's namespace +* core/managed-keys (enterprise): Return better error messages when encountering key creation failures +* core/managed-keys (enterprise): Switch to using hash length as PSS Salt length within the test/sign api for better PKCS#11 compatibility +* core/quotas (enterprise): Fix a lock contention issue that could occur and cause Vault to become unresponsive when creating, changing, or deleting lease count quotas. +* core/quotas (enterprise): Fix a potential deadlock that could occur when using lease count quotas. +* core/quotas: Fix issue with improper application of default rate limit quota exempt paths [[GH-18273](https://github.com/hashicorp/vault/pull/18273)] +* core/seal: Fix regression handling of the key_id parameter in seal configuration HCL. [[GH-17612](https://github.com/hashicorp/vault/pull/17612)] +* core: Fix panic caused in Vault Agent when rendering certificate templates [[GH-17419](https://github.com/hashicorp/vault/pull/17419)] +* core: Fix potential deadlock if barrier ciphertext is less than 4 bytes. [[GH-17944](https://github.com/hashicorp/vault/pull/17944)] +* core: Fix spurious `permission denied` for all HelpOperations on sudo-protected paths [[GH-18568](https://github.com/hashicorp/vault/pull/18568)] +* core: Fix vault operator init command to show the right curl string with -output-curl-string and right policy hcl with -output-policy [[GH-17514](https://github.com/hashicorp/vault/pull/17514)] +* core: Fixes spurious warnings being emitted relating to "unknown or unsupported fields" for JSON config [[GH-17660](https://github.com/hashicorp/vault/pull/17660)] +* core: Linux packages now have vendor label and set the default label to HashiCorp. +This fix is implemented for any future releases, but will not be updated for historical releases. +* core: Prevent panics in `sys/leases/lookup`, `sys/leases/revoke`, and `sys/leases/renew` endpoints if provided `lease_id` is null [[GH-18951](https://github.com/hashicorp/vault/pull/18951)] +* core: Refactor lock grabbing code to simplify stateLock deadlock investigations [[GH-17187](https://github.com/hashicorp/vault/pull/17187)] +* core: fix GPG encryption to support subkeys. [[GH-16224](https://github.com/hashicorp/vault/pull/16224)] +* core: fix a start up race condition where performance standbys could go into a +mount loop if default policies are not yet synced from the active node. [[GH-17801](https://github.com/hashicorp/vault/pull/17801)] +* core: fix bug where context cancellations weren't forwarded to active node from performance standbys. +* core: fix race when using SystemView.ReplicationState outside of a request context [[GH-17186](https://github.com/hashicorp/vault/pull/17186)] +* core: prevent memory leak when using control group factors in a policy [[GH-17532](https://github.com/hashicorp/vault/pull/17532)] +* core: prevent panic during mfa after enforcement's namespace is deleted [[GH-17562](https://github.com/hashicorp/vault/pull/17562)] +* core: prevent panic in login mfa enforcement delete after enforcement's namespace is deleted [[GH-18923](https://github.com/hashicorp/vault/pull/18923)] +* core: trying to unseal with the wrong key now returns HTTP 400 [[GH-17836](https://github.com/hashicorp/vault/pull/17836)] +* credential/cert: adds error message if no tls connection is found during the AliasLookahead operation [[GH-17904](https://github.com/hashicorp/vault/pull/17904)] +* database/mongodb: Fix writeConcern set to be applied to any query made on the database [[GH-18546](https://github.com/hashicorp/vault/pull/18546)] +* expiration: Prevent panics on perf standbys when an irrevocable lease gets deleted. [[GH-18401](https://github.com/hashicorp/vault/pull/18401)] +* kmip (enterprise): Fix a problem with some multi-part MAC Verify operations. +* kmip (enterprise): Only require data to be full blocks on encrypt/decrypt operations using CBC and ECB block cipher modes. +* license (enterprise): Fix bug where license would update even if the license didn't change. +* licensing (enterprise): update autoloaded license cache after reload +* login: Store token in tokenhelper for interactive login MFA [[GH-17040](https://github.com/hashicorp/vault/pull/17040)] +* openapi: Fix many incorrect details in generated API spec, by using better techniques to parse path regexps [[GH-18554](https://github.com/hashicorp/vault/pull/18554)] +* openapi: fix gen_openapi.sh script to correctly load vault plugins [[GH-17752](https://github.com/hashicorp/vault/pull/17752)] +* plugins/kv: KV v2 returns 404 instead of 500 for request paths that incorrectly include a trailing slash. [[GH-17339](https://github.com/hashicorp/vault/pull/17339)] +* plugins: Allow running external plugins which override deprecated builtins. [[GH-17879](https://github.com/hashicorp/vault/pull/17879)] +* plugins: Corrected the path to check permissions on when the registered plugin name does not match the plugin binary's filename. [[GH-17340](https://github.com/hashicorp/vault/pull/17340)] +* plugins: Listing all plugins while audit logging is enabled will no longer result in an internal server error. [[GH-18173](https://github.com/hashicorp/vault/pull/18173)] +* plugins: Only report deprecation status for builtin plugins. [[GH-17816](https://github.com/hashicorp/vault/pull/17816)] +* plugins: Skip loading but still mount data associated with missing plugins on unseal. [[GH-18189](https://github.com/hashicorp/vault/pull/18189)] +* plugins: Vault upgrades will no longer fail if a mount has been created using an explicit builtin plugin version. [[GH-18051](https://github.com/hashicorp/vault/pull/18051)] +* replication (enterprise): Fix bug where reloading external plugin on a secondary would +break replication. +* sdk: Don't panic if system view or storage methods called during plugin setup. [[GH-18210](https://github.com/hashicorp/vault/pull/18210)] +* secret/pki: fix bug with initial legacy bundle migration (from < 1.11 into 1.11+) and missing issuers from ca_chain [[GH-17772](https://github.com/hashicorp/vault/pull/17772)] +* secrets/ad: Fix bug where updates to config would fail if password isn't provided [[GH-19061](https://github.com/hashicorp/vault/pull/19061)] +* secrets/gcp: fix issue where IAM bindings were not preserved during policy update [[GH-19018](https://github.com/hashicorp/vault/pull/19018)] +* secrets/mongodb-atlas: Fix a bug that did not allow WAL rollback to handle partial failures when creating API keys [[GH-19111](https://github.com/hashicorp/vault/pull/19111)] +* secrets/pki: Address nil panic when an empty POST request is sent to the OCSP handler [[GH-18184](https://github.com/hashicorp/vault/pull/18184)] +* secrets/pki: Allow patching issuer to set an empty issuer name. [[GH-18466](https://github.com/hashicorp/vault/pull/18466)] +* secrets/pki: Do not read revoked certificates from backend when CRL is disabled [[GH-17385](https://github.com/hashicorp/vault/pull/17385)] +* secrets/pki: Fix upgrade of missing expiry, delta_rebuild_interval by setting them to the default. [[GH-17693](https://github.com/hashicorp/vault/pull/17693)] +* secrets/pki: Fixes duplicate otherName in certificates created by the sign-verbatim endpoint. [[GH-16700](https://github.com/hashicorp/vault/pull/16700)] +* secrets/pki: OCSP GET request parameter was not being URL unescaped before processing. [[GH-18938](https://github.com/hashicorp/vault/pull/18938)] +* secrets/pki: Respond to tidy-status, tidy-cancel on PR Secondary clusters. [[GH-17497](https://github.com/hashicorp/vault/pull/17497)] +* secrets/pki: Revert fix for PR [18938](https://github.com/hashicorp/vault/pull/18938) [[GH-19037](https://github.com/hashicorp/vault/pull/19037)] +* secrets/pki: consistently use UTC for CA's notAfter exceeded error message [[GH-18984](https://github.com/hashicorp/vault/pull/18984)] +* secrets/pki: fix race between tidy's cert counting and tidy status reporting. [[GH-18899](https://github.com/hashicorp/vault/pull/18899)] +* secrets/transit: Do not warn about unrecognized parameter 'batch_input' [[GH-18299](https://github.com/hashicorp/vault/pull/18299)] +* secrets/transit: Honor `partial_success_response_code` on decryption failures. [[GH-18310](https://github.com/hashicorp/vault/pull/18310)] +* server/config: Use file.Stat when checking file permissions when VAULT_ENABLE_FILE_PERMISSIONS_CHECK is enabled [[GH-19311](https://github.com/hashicorp/vault/pull/19311)] +* storage/raft (enterprise): An already joined node can rejoin by wiping storage +and re-issueing a join request, but in doing so could transiently become a +non-voter. In some scenarios this resulted in loss of quorum. [[GH-18263](https://github.com/hashicorp/vault/pull/18263)] +* storage/raft: Don't panic on unknown raft ops [[GH-17732](https://github.com/hashicorp/vault/pull/17732)] +* storage/raft: Fix race with follower heartbeat tracker during teardown. [[GH-18704](https://github.com/hashicorp/vault/pull/18704)] +* ui/keymgmt: Sets the defaultValue for type when creating a key. [[GH-17407](https://github.com/hashicorp/vault/pull/17407)] +* ui: Fix bug where logging in via OIDC fails if browser is in fullscreen mode [[GH-19071](https://github.com/hashicorp/vault/pull/19071)] +* ui: Fixes issue with not being able to download raft snapshot via service worker [[GH-17769](https://github.com/hashicorp/vault/pull/17769)] +* ui: Fixes oidc/jwt login issue with alternate mount path and jwt login via mount path tab [[GH-17661](https://github.com/hashicorp/vault/pull/17661)] +* ui: Remove `default` and add `default-service` and `default-batch` to UI token_type for auth mount and tuning. [[GH-19290](https://github.com/hashicorp/vault/pull/19290)] +* ui: Remove default value of 30 to TtlPicker2 if no value is passed in. [[GH-17376](https://github.com/hashicorp/vault/pull/17376)] +* ui: allow selection of "default" for ssh algorithm_signer in web interface [[GH-17894](https://github.com/hashicorp/vault/pull/17894)] +* ui: cleanup unsaved auth method ember data record when navigating away from mount backend form [[GH-18651](https://github.com/hashicorp/vault/pull/18651)] +* ui: fix entity policies list link to policy show page [[GH-17950](https://github.com/hashicorp/vault/pull/17950)] +* ui: fixes query parameters not passed in api explorer test requests [[GH-18743](https://github.com/hashicorp/vault/pull/18743)] +* ui: fixes reliance on secure context (https) by removing methods using the Crypto interface [[GH-19403](https://github.com/hashicorp/vault/pull/19403)] +* ui: show Get credentials button for static roles detail page when a user has the proper permissions. [[GH-19190](https://github.com/hashicorp/vault/pull/19190)] + +## 1.12.6 +### April 26, 2023 + +CHANGES: + +* core: Bump Go version to 1.19.8. + +IMPROVEMENTS: + +* cli/namespace: Add detailed flag to output additional namespace information +such as namespace IDs and custom metadata. [[GH-20243](https://github.com/hashicorp/vault/pull/20243)] +* core/activity: add an endpoint to write test activity log data, guarded by a build flag [[GH-20019](https://github.com/hashicorp/vault/pull/20019)] +* core: Add a `raft` sub-field to the `storage` and `ha_storage` details provided by the +`/sys/config/state/sanitized` endpoint in order to include the `max_entry_size`. [[GH-20044](https://github.com/hashicorp/vault/pull/20044)] +* sdk/ldaputil: added `connection_timeout` to tune connection timeout duration +for all LDAP plugins. [[GH-20144](https://github.com/hashicorp/vault/pull/20144)] +* secrets/pki: Decrease size and improve compatibility of OCSP responses by removing issuer certificate. [[GH-20201](https://github.com/hashicorp/vault/pull/20201)] + +BUG FIXES: + +* auth/ldap: Add max_page_size configurable to LDAP configuration [[GH-19032](https://github.com/hashicorp/vault/pull/19032)] +* command/server: Fix incorrect paths in generated config for `-dev-tls` flag on Windows [[GH-20257](https://github.com/hashicorp/vault/pull/20257)] +* core (enterprise): Fix intermittent issue with token entries sometimes not being found when using a newly created token in a request to a secondary, even when SSCT `new_token` forwarding is set. When this occurred, this would result in the following error to the client: `error performing token check: no lease entry found for token that ought to have one, possible eventual consistency issue`. +* core (enterprise): Fix read on perf standbys failing with 412 after leadership change, unseal, restores or restarts when no writes occur +* core/ssct (enterprise): Fixed race condition where a newly promoted DR may revert `sscGenCounter` +resulting in 412 errors. +* core: Fix regression breaking non-raft clusters whose nodes share the same cluster_addr/api_addr. [[GH-19721](https://github.com/hashicorp/vault/pull/19721)] +* helper/random: Fix race condition in string generator helper [[GH-19875](https://github.com/hashicorp/vault/pull/19875)] +* kmip (enterprise): Fix a problem decrypting with keys that have no Process Start Date attribute. +* openapi: Fix many incorrect details in generated API spec, by using better techniques to parse path regexps [[GH-18554](https://github.com/hashicorp/vault/pull/18554)] +* pki: Fix automatically turning off CRL signing on upgrade to Vault >= 1.12, if CA Key Usage disallows it [[GH-20220](https://github.com/hashicorp/vault/pull/20220)] +* replication (enterprise): Fix a caching issue when replicating filtered data to +a performance secondary. This resulted in the data being set to nil in the cache +and a "invalid value" error being returned from the API. +* replication (enterprise): Fix replication status for Primary clusters showing its primary cluster's information (in case of DR) in secondaries field when known_secondaries field is nil +* secrets/pki: Fix patching of leaf_not_after_behavior on issuers. [[GH-20341](https://github.com/hashicorp/vault/pull/20341)] +* secrets/transform (enterprise): Address SQL connection leak when cleaning expired tokens +* ui: Fix OIDC provider logo showing when domain doesn't match [[GH-20263](https://github.com/hashicorp/vault/pull/20263)] +* ui: Fix bad link to namespace when namespace name includes `.` [[GH-19799](https://github.com/hashicorp/vault/pull/19799)] +* ui: fixes browser console formatting for help command output [[GH-20064](https://github.com/hashicorp/vault/pull/20064)] +* ui: remove use of htmlSafe except when first sanitized [[GH-20235](https://github.com/hashicorp/vault/pull/20235)] + +## 1.12.5 +### March 29, 2023 + +SECURITY: + +* storage/mssql: When using Vault’s community-supported Microsoft SQL (MSSQL) database storage backend, a privileged attacker with the ability to write arbitrary data to Vault’s configuration may be able to perform arbitrary SQL commands on the underlying database server through Vault. This vulnerability, CVE-2023-0620, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-12](https://discuss.hashicorp.com/t/hcsec-2023-12-vault-s-microsoft-sql-database-storage-backend-vulnerable-to-sql-injection-via-configuration-file/52080)] +* secrets/pki: Vault’s PKI mount issuer endpoints did not correctly authorize access to remove an issuer or modify issuer metadata, potentially resulting in denial of service of the PKI mount. This bug did not affect public or private key material, trust chains or certificate issuance. This vulnerability, CVE-2023-0665, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-11](https://discuss.hashicorp.com/t/hcsec-2023-11-vault-s-pki-issuer-endpoint-did-not-correctly-authorize-access-to-issuer-metadata/52079)] +* core: HashiCorp Vault’s implementation of Shamir’s secret sharing used precomputed table lookups, and was vulnerable to cache-timing attacks. An attacker with access to, and the ability to observe a large number of unseal operations on the host through a side channel may reduce the search space of a brute force effort to recover the Shamir shares. This vulnerability, CVE-2023-25000, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-10](https://discuss.hashicorp.com/t/hcsec-2023-10-vault-vulnerable-to-cache-timing-attacks-during-seal-and-unseal-operations/52078)] + +IMPROVEMENTS: + +* auth/github: Allow for an optional Github auth token environment variable to make authenticated requests when fetching org id +website/docs: Add docs for `VAULT_AUTH_CONFIG_GITHUB_TOKEN` environment variable when writing Github config [[GH-19244](https://github.com/hashicorp/vault/pull/19244)] +* core: Allow overriding gRPC connect timeout via VAULT_GRPC_MIN_CONNECT_TIMEOUT. This is an env var rather than a config setting because we don't expect this to ever be needed. It's being added as a last-ditch +option in case all else fails for some replication issues we may not have fully reproduced. [[GH-19676](https://github.com/hashicorp/vault/pull/19676)] +* core: validate name identifiers in mssql physical storage backend prior use [[GH-19591](https://github.com/hashicorp/vault/pull/19591)] + +BUG FIXES: + +* cli: Fix vault read handling to return raw data as secret.Data when there is no top-level data object from api response. [[GH-17913](https://github.com/hashicorp/vault/pull/17913)] +* core (enterprise): Attempt to reconnect to a PKCS#11 HSM if we retrieve a CKR_FUNCTION_FAILED error. +* core: Fixed issue with remounting mounts that have a non-trailing space in the 'to' or 'from' paths. [[GH-19585](https://github.com/hashicorp/vault/pull/19585)] +* kmip (enterprise): Do not require attribute Cryptographic Usage Mask when registering Secret Data managed objects. +* kmip (enterprise): Fix a problem forwarding some requests to the active node. +* openapi: Fix logic for labeling unauthenticated/sudo paths. [[GH-19600](https://github.com/hashicorp/vault/pull/19600)] +* secrets/ldap: Invalidates WAL entry for static role if `password_policy` has changed. [[GH-19641](https://github.com/hashicorp/vault/pull/19641)] +* secrets/transform (enterprise): Fix persistence problem with rotated tokenization key versions +* ui: fixes issue navigating back a level using the breadcrumb from secret metadata view [[GH-19703](https://github.com/hashicorp/vault/pull/19703)] +* ui: pass encodeBase64 param to HMAC transit-key-actions. [[GH-19429](https://github.com/hashicorp/vault/pull/19429)] +* ui: use URLSearchParams interface to capture namespace param from SSOs (ex. ADFS) with decoded state param in callback url [[GH-19460](https://github.com/hashicorp/vault/pull/19460)] + +## 1.12.4 +### March 01, 2023 + +SECURITY: +* auth/approle: When using the Vault and Vault Enterprise (Vault) approle auth method, any authenticated user with access to the /auth/approle/role/:role_name/secret-id-accessor/destroy endpoint can destroy the secret ID of any other role by providing the secret ID accessor. This vulnerability, CVE-2023-24999 has been fixed in Vault 1.13.0, 1.12.4, 1.11.8, 1.10.11 and above. [[HSEC-2023-07](https://discuss.hashicorp.com/t/hcsec-2023-07-vault-fails-to-verify-if-approle-secretid-belongs-to-role-during-a-destroy-operation/51305)] + +CHANGES: + +* core: Bump Go version to 1.19.6. + +IMPROVEMENTS: + +* secrets/database: Adds error message requiring password on root crednetial rotation. [[GH-19103](https://github.com/hashicorp/vault/pull/19103)] +* ui: remove wizard [[GH-19220](https://github.com/hashicorp/vault/pull/19220)] + +BUG FIXES: + +* auth/approle: Add nil check for the secret ID entry when deleting via secret id accessor preventing cross role secret id deletion [[GH-19186](https://github.com/hashicorp/vault/pull/19186)] +* core (enterprise): Fix panic when using invalid accessor for control-group request +* core (enterprise): Fix perf standby WAL streaming silently failures when replication setup happens at a bad time. +* core: Prevent panics in `sys/leases/lookup`, `sys/leases/revoke`, and `sys/leases/renew` endpoints if provided `lease_id` is null [[GH-18951](https://github.com/hashicorp/vault/pull/18951)] +* license (enterprise): Fix bug where license would update even if the license didn't change. +* replication (enterprise): Fix bug where reloading external plugin on a secondary would +break replication. +* secrets/ad: Fix bug where config couldn't be updated unless binddn/bindpass were included in the update. [[GH-18207](https://github.com/hashicorp/vault/pull/18207)] +* secrets/pki: Revert fix for PR [18938](https://github.com/hashicorp/vault/pull/18938) [[GH-19037](https://github.com/hashicorp/vault/pull/19037)] +* server/config: Use file.Stat when checking file permissions when VAULT_ENABLE_FILE_PERMISSIONS_CHECK is enabled [[GH-19311](https://github.com/hashicorp/vault/pull/19311)] +* ui (enterprise): Fix cancel button from transform engine role creation page [[GH-19135](https://github.com/hashicorp/vault/pull/19135)] +* ui: Fix bug where logging in via OIDC fails if browser is in fullscreen mode [[GH-19071](https://github.com/hashicorp/vault/pull/19071)] +* ui: fixes reliance on secure context (https) by removing methods using the Crypto interface [[GH-19410](https://github.com/hashicorp/vault/pull/19410)] +* ui: show Get credentials button for static roles detail page when a user has the proper permissions. [[GH-19190](https://github.com/hashicorp/vault/pull/19190)] + +## 1.12.3 +### February 6, 2023 + +CHANGES: + +* core: Bump Go version to 1.19.4. + +IMPROVEMENTS: + +* audit: Include stack trace when audit logging recovers from a panic. [[GH-18121](https://github.com/hashicorp/vault/pull/18121)] +* command/server: Environment variable keys are now logged at startup. [[GH-18125](https://github.com/hashicorp/vault/pull/18125)] +* core/fips: use upstream toolchain for FIPS 140-2 compliance again; this will appear as X=boringcrypto on the Go version in Vault server logs. +* core: Add read support to `sys/loggers` and `sys/loggers/:name` endpoints [[GH-17979](https://github.com/hashicorp/vault/pull/17979)] +* plugins: Let Vault unseal and mount deprecated builtin plugins in a +deactivated state if this is not the first unseal after an upgrade. [[GH-17879](https://github.com/hashicorp/vault/pull/17879)] +* secrets/db/mysql: Add `tls_server_name` and `tls_skip_verify` parameters [[GH-18799](https://github.com/hashicorp/vault/pull/18799)] +* secrets/kv: new KVv2 mounts and KVv1 mounts without any keys will upgrade synchronously, allowing for instant use [[GH-17406](https://github.com/hashicorp/vault/pull/17406)] +* storage/raft: add additional raft metrics relating to applied index and heartbeating; also ensure OSS standbys emit periodic metrics. [[GH-12166](https://github.com/hashicorp/vault/pull/12166)] +* ui: Added JWT authentication warning message about blocked pop-up windows and web browser settings. [[GH-18787](https://github.com/hashicorp/vault/pull/18787)] +* ui: Prepends "passcode=" if not provided in user input for duo totp mfa method authentication [[GH-18342](https://github.com/hashicorp/vault/pull/18342)] +* ui: Update language on database role to "Connection name" [[GH-18261](https://github.com/hashicorp/vault/issues/18261)] [[GH-18350](https://github.com/hashicorp/vault/pull/18350)] + +BUG FIXES: + +* auth/approle: Fix `token_bound_cidrs` validation when using /32 blocks for role and secret ID [[GH-18145](https://github.com/hashicorp/vault/pull/18145)] +* auth/cert: Address a race condition accessing the loaded crls without a lock [[GH-18945](https://github.com/hashicorp/vault/pull/18945)] +* auth/kubernetes: Ensure a consistent TLS configuration for all k8s API requests [[#173](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/173)] [[GH-18716](https://github.com/hashicorp/vault/pull/18716)] +* cli/kv: skip formatting of nil secrets for patch and put with field parameter set [[GH-18163](https://github.com/hashicorp/vault/pull/18163)] +* command/namespace: Fix vault cli namespace patch examples in help text. [[GH-18143](https://github.com/hashicorp/vault/pull/18143)] +* core (enterprise): Fix a race condition resulting in login errors to PKCS#11 modules under high concurrency. +* core/managed-keys (enterprise): Limit verification checks to mounts in a key's namespace +* core/quotas (enterprise): Fix a potential deadlock that could occur when using lease count quotas. +* core/quotas: Fix issue with improper application of default rate limit quota exempt paths [[GH-18273](https://github.com/hashicorp/vault/pull/18273)] +* core/seal: Fix regression handling of the key_id parameter in seal configuration HCL. [[GH-17612](https://github.com/hashicorp/vault/pull/17612)] +* core: fix bug where context cancellations weren't forwarded to active node from performance standbys. +* core: prevent panic in login mfa enforcement delete after enforcement's namespace is deleted [[GH-18923](https://github.com/hashicorp/vault/pull/18923)] +* database/mongodb: Fix writeConcern set to be applied to any query made on the database [[GH-18546](https://github.com/hashicorp/vault/pull/18546)] +* expiration: Prevent panics on perf standbys when an irrevocable lease gets deleted. [[GH-18401](https://github.com/hashicorp/vault/pull/18401)] +* kmip (enterprise): Fix Destroy operation response that omitted Unique Identifier on some batched responses. +* kmip (enterprise): Fix Locate operation response incompatibility with clients using KMIP versions prior to 1.3. +* kmip (enterprise): Fix Query operation response that omitted streaming capability and supported profiles. +* licensing (enterprise): update autoloaded license cache after reload +* plugins: Allow running external plugins which override deprecated builtins. [[GH-17879](https://github.com/hashicorp/vault/pull/17879)] +* plugins: Listing all plugins while audit logging is enabled will no longer result in an internal server error. [[GH-18173](https://github.com/hashicorp/vault/pull/18173)] +* plugins: Skip loading but still mount data associated with missing plugins on unseal. [[GH-18189](https://github.com/hashicorp/vault/pull/18189)] +* sdk: Don't panic if system view or storage methods called during plugin setup. [[GH-18210](https://github.com/hashicorp/vault/pull/18210)] +* secrets/pki: Address nil panic when an empty POST request is sent to the OCSP handler [[GH-18184](https://github.com/hashicorp/vault/pull/18184)] +* secrets/pki: Allow patching issuer to set an empty issuer name. [[GH-18466](https://github.com/hashicorp/vault/pull/18466)] +* secrets/pki: OCSP GET request parameter was not being URL unescaped before processing. [[GH-18938](https://github.com/hashicorp/vault/pull/18938)] +* secrets/pki: fix race between tidy's cert counting and tidy status reporting. [[GH-18899](https://github.com/hashicorp/vault/pull/18899)] +* secrets/transit: Do not warn about unrecognized parameter 'batch_input' [[GH-18299](https://github.com/hashicorp/vault/pull/18299)] +* secrets/transit: Honor `partial_success_response_code` on decryption failures. [[GH-18310](https://github.com/hashicorp/vault/pull/18310)] +* storage/raft (enterprise): An already joined node can rejoin by wiping storage +and re-issueing a join request, but in doing so could transiently become a +non-voter. In some scenarios this resulted in loss of quorum. [[GH-18263](https://github.com/hashicorp/vault/pull/18263)] +* storage/raft: Don't panic on unknown raft ops [[GH-17732](https://github.com/hashicorp/vault/pull/17732)] +* ui: cleanup unsaved auth method ember data record when navigating away from mount backend form [[GH-18651](https://github.com/hashicorp/vault/pull/18651)] +* ui: fixes query parameters not passed in api explorer test requests [[GH-18743](https://github.com/hashicorp/vault/pull/18743)] +## 1.12.2 +### November 30, 2022 + +CHANGES: + +* core: Bump Go version to 1.19.3. +* plugins: Mounts can no longer be pinned to a specific _builtin_ version. Mounts previously pinned to a specific builtin version will now automatically upgrade to the latest builtin version, and may now be overridden if an unversioned plugin of the same name and type is registered. Mounts using plugin versions without `builtin` in their metadata remain unaffected. [[GH-18051](https://github.com/hashicorp/vault/pull/18051)] + +IMPROVEMENTS: + +* secrets/pki: Allow issuer creation, import to change default issuer via `default_follows_latest_issuer`. [[GH-17824](https://github.com/hashicorp/vault/pull/17824)] +* storage/raft: Add `retry_join_as_non_voter` config option. [[GH-18030](https://github.com/hashicorp/vault/pull/18030)] + +BUG FIXES: + +* auth/okta: fix a panic for AuthRenew in Okta [[GH-18011](https://github.com/hashicorp/vault/pull/18011)] +* auth: Deduplicate policies prior to ACL generation [[GH-17914](https://github.com/hashicorp/vault/pull/17914)] +* cli: Fix issue preventing kv commands from executing properly when the mount path provided by `-mount` flag and secret key path are the same. [[GH-17679](https://github.com/hashicorp/vault/pull/17679)] +* core (enterprise): Supported storage check in `vault server` command will no longer prevent startup. Instead, a warning will be logged if configured to use storage backend other than `raft` or `consul`. +* core/quotas (enterprise): Fix a lock contention issue that could occur and cause Vault to become unresponsive when creating, changing, or deleting lease count quotas. +* core: Fix potential deadlock if barrier ciphertext is less than 4 bytes. [[GH-17944](https://github.com/hashicorp/vault/pull/17944)] +* core: fix a start up race condition where performance standbys could go into a + mount loop if default policies are not yet synced from the active node. [[GH-17801](https://github.com/hashicorp/vault/pull/17801)] +* plugins: Only report deprecation status for builtin plugins. [[GH-17816](https://github.com/hashicorp/vault/pull/17816)] +* plugins: Vault upgrades will no longer fail if a mount has been created using an explicit builtin plugin version. [[GH-18051](https://github.com/hashicorp/vault/pull/18051)] +* secret/pki: fix bug with initial legacy bundle migration (from < 1.11 into 1.11+) and missing issuers from ca_chain [[GH-17772](https://github.com/hashicorp/vault/pull/17772)] +* secrets/azure: add WAL to clean up role assignments if errors occur [[GH-18086](https://github.com/hashicorp/vault/pull/18086)] +* secrets/gcp: Fixes duplicate service account key for rotate root on standby or secondary [[GH-18111](https://github.com/hashicorp/vault/pull/18111)] +* secrets/pki: Fix upgrade of missing expiry, delta_rebuild_interval by setting them to the default. [[GH-17693](https://github.com/hashicorp/vault/pull/17693)] +* ui: Fixes issue with not being able to download raft snapshot via service worker [[GH-17769](https://github.com/hashicorp/vault/pull/17769)] +* ui: fix entity policies list link to policy show page [[GH-17950](https://github.com/hashicorp/vault/pull/17950)] + +## 1.12.1 +### November 2, 2022 + +IMPROVEMENTS: + +* api: Support VAULT_DISABLE_REDIRECTS environment variable (and --disable-redirects flag) to disable default client behavior and prevent the client following any redirection responses. [[GH-17352](https://github.com/hashicorp/vault/pull/17352)] +* database/snowflake: Allow parallel requests to Snowflake [[GH-17593](https://github.com/hashicorp/vault/pull/17593)] +* plugins: Add plugin version information to key plugin lifecycle log lines. [[GH-17430](https://github.com/hashicorp/vault/pull/17430)] +* sdk/ldap: Added support for paging when searching for groups using group filters [[GH-17640](https://github.com/hashicorp/vault/pull/17640)] + +BUG FIXES: + +* cli: Remove empty table heading for `vault secrets list -detailed` output. [[GH-17577](https://github.com/hashicorp/vault/pull/17577)] +* core/managed-keys (enterprise): Return better error messages when encountering key creation failures +* core/managed-keys (enterprise): Switch to using hash length as PSS Salt length within the test/sign api for better PKCS#11 compatibility +* core: Fix panic caused in Vault Agent when rendering certificate templates [[GH-17419](https://github.com/hashicorp/vault/pull/17419)] +* core: Fixes spurious warnings being emitted relating to "unknown or unsupported fields" for JSON config [[GH-17660](https://github.com/hashicorp/vault/pull/17660)] +* core: prevent memory leak when using control group factors in a policy [[GH-17532](https://github.com/hashicorp/vault/pull/17532)] +* core: prevent panic during mfa after enforcement's namespace is deleted [[GH-17562](https://github.com/hashicorp/vault/pull/17562)] +* kmip (enterprise): Fix a problem in the handling of attributes that caused Import operations to fail. +* kmip (enterprise): Fix selection of Cryptographic Parameters for Encrypt/Decrypt operations. +* login: Store token in tokenhelper for interactive login MFA [[GH-17040](https://github.com/hashicorp/vault/pull/17040)] +* secrets/pki: Respond to tidy-status, tidy-cancel on PR Secondary clusters. [[GH-17497](https://github.com/hashicorp/vault/pull/17497)] +* ui: Fixes oidc/jwt login issue with alternate mount path and jwt login via mount path tab [[GH-17661](https://github.com/hashicorp/vault/pull/17661)] + +## 1.12.0 +### October 13, 2022 + +SECURITY: + +* secrets/pki: Vault’s TLS certificate auth method did not initially load the optionally-configured CRL issued by the role’s CA into memory on startup, resulting in the revocation list not being checked, if the CRL has not yet been retrieved. This vulnerability, CVE-2022-41316, is fixed in Vault 1.12.0, 1.11.4, 1.10.7, and 1.9.10. [[HSEC-2022-24](https://discuss.hashicorp.com/t/hcsec-2022-24-vaults-tls-cert-auth-method-only-loaded-crl-after-first-request/45483)] + +CHANGES: + +* api: Exclusively use `GET /sys/plugins/catalog` endpoint for listing plugins, and add `details` field to list responses. [[GH-17347](https://github.com/hashicorp/vault/pull/17347)] +* auth: `GET /sys/auth/:name` endpoint now returns an additional `deprecation_status` field in the response data for builtins. [[GH-16849](https://github.com/hashicorp/vault/pull/16849)] +* auth: `GET /sys/auth` endpoint now returns an additional `deprecation_status` field in the response data for builtins. [[GH-16849](https://github.com/hashicorp/vault/pull/16849)] +* auth: `POST /sys/auth/:type` endpoint response contains a warning for `Deprecated` auth methods. [[GH-17058](https://github.com/hashicorp/vault/pull/17058)] +* auth: `auth enable` returns an error and `POST /sys/auth/:type` endpoint reports an error for `Pending Removal` auth methods. [[GH-17005](https://github.com/hashicorp/vault/pull/17005)] +* core/entities: Fixed stranding of aliases upon entity merge, and require explicit selection of which aliases should be kept when some must be deleted [[GH-16539](https://github.com/hashicorp/vault/pull/16539)] +* core: Bump Go version to 1.19.2. +* core: Validate input parameters for vault operator init command. Vault 1.12 CLI version is needed to run operator init now. [[GH-16379](https://github.com/hashicorp/vault/pull/16379)] +* identity: a request to `/identity/group` that includes `member_group_ids` that contains a cycle will now be responded to with a 400 rather than 500 [[GH-15912](https://github.com/hashicorp/vault/pull/15912)] +* licensing (enterprise): Terminated licenses will no longer result in shutdown. Instead, upgrades will not be allowed if the license expiration time is before the build date of the binary. +* plugins: Add plugin version to auth register, list, and mount table [[GH-16856](https://github.com/hashicorp/vault/pull/16856)] +* plugins: `GET /sys/plugins/catalog/:type/:name` endpoint contains deprecation status for builtin plugins. [[GH-17077](https://github.com/hashicorp/vault/pull/17077)] +* plugins: `GET /sys/plugins/catalog/:type/:name` endpoint now returns an additional `version` field in the response data. [[GH-16688](https://github.com/hashicorp/vault/pull/16688)] +* plugins: `GET /sys/plugins/catalog/` endpoint contains deprecation status in `detailed` list. [[GH-17077](https://github.com/hashicorp/vault/pull/17077)] +* plugins: `GET /sys/plugins/catalog` endpoint now returns an additional `detailed` field in the response data with a list of additional plugin metadata. [[GH-16688](https://github.com/hashicorp/vault/pull/16688)] +* plugins: `plugin info` displays deprecation status for builtin plugins. [[GH-17077](https://github.com/hashicorp/vault/pull/17077)] +* plugins: `plugin list` now accepts a `-detailed` flag, which display deprecation status and version info. [[GH-17077](https://github.com/hashicorp/vault/pull/17077)] +* secrets/azure: Removed deprecated AAD graph API support from the secrets engine. [[GH-17180](https://github.com/hashicorp/vault/pull/17180)] +* secrets: All database-specific (standalone DB) secrets engines are now marked `Pending Removal`. [[GH-17038](https://github.com/hashicorp/vault/pull/17038)] +* secrets: `GET /sys/mounts/:name` endpoint now returns an additional `deprecation_status` field in the response data for builtins. [[GH-16849](https://github.com/hashicorp/vault/pull/16849)] +* secrets: `GET /sys/mounts` endpoint now returns an additional `deprecation_status` field in the response data for builtins. [[GH-16849](https://github.com/hashicorp/vault/pull/16849)] +* secrets: `POST /sys/mounts/:type` endpoint response contains a warning for `Deprecated` secrets engines. [[GH-17058](https://github.com/hashicorp/vault/pull/17058)] +* secrets: `secrets enable` returns an error and `POST /sys/mount/:type` endpoint reports an error for `Pending Removal` secrets engines. [[GH-17005](https://github.com/hashicorp/vault/pull/17005)] + +FEATURES: + +* **GCP Cloud KMS support for managed keys**: Managed keys now support using GCP Cloud KMS keys +* **LDAP Secrets Engine**: Adds the `ldap` secrets engine with service account check-out functionality for all supported schemas. [[GH-17152](https://github.com/hashicorp/vault/pull/17152)] +* **OCSP Responder**: PKI mounts now have an OCSP responder that implements a subset of RFC6960, answering single serial number OCSP requests for a specific cluster's revoked certificates in a mount. [[GH-16723](https://github.com/hashicorp/vault/pull/16723)] +* **Redis DB Engine**: Adding the new Redis database engine that supports the generation of static and dynamic user roles and root credential rotation on a stand alone Redis server. [[GH-17070](https://github.com/hashicorp/vault/pull/17070)] +* **Redis ElastiCache DB Plugin**: Added Redis ElastiCache as a built-in plugin. [[GH-17075](https://github.com/hashicorp/vault/pull/17075)] +* **Secrets/auth plugin multiplexing**: manage multiple plugin configurations with a single plugin process [[GH-14946](https://github.com/hashicorp/vault/pull/14946)] +* **Transform Key Import (BYOK)**: The transform secrets engine now supports importing keys for tokenization and FPE transformations +* HCP (enterprise): Adding foundational support for self-managed vault nodes to securely communicate with [HashiCorp Cloud Platform](https://cloud.hashicorp.com) as an opt-in feature +* ui: UI support for Okta Number Challenge. [[GH-15998](https://github.com/hashicorp/vault/pull/15998)] +* **Plugin Versioning**: Vault supports registering, managing, and running plugins with semantic versions specified. + +IMPROVEMENTS: + +* core/managed-keys (enterprise): Allow operators to specify PSS signatures and/or hash algorithm for the test/sign api +* activity (enterprise): Added new clients unit tests to test accuracy of estimates +* agent/auto-auth: Add `exit_on_err` which when set to true, will cause Agent to exit if any errors are encountered during authentication. [[GH-17091](https://github.com/hashicorp/vault/pull/17091)] +* agent: Added `disable_idle_connections` configuration to disable leaving idle connections open in auto-auth, caching and templating. [[GH-15986](https://github.com/hashicorp/vault/pull/15986)] +* agent: Added `disable_keep_alives` configuration to disable keep alives in auto-auth, caching and templating. [[GH-16479](https://github.com/hashicorp/vault/pull/16479)] +* agent: JWT auto auth now supports a `remove_jwt_after_reading` config option which defaults to true. [[GH-11969](https://github.com/hashicorp/vault/pull/11969)] +* agent: Send notifications to systemd on start and stop. [[GH-9802](https://github.com/hashicorp/vault/pull/9802)] +* api/mfa: Add namespace path to the MFA read/list endpoint [[GH-16911](https://github.com/hashicorp/vault/pull/16911)] +* api: Add a sentinel error for missing KV secrets [[GH-16699](https://github.com/hashicorp/vault/pull/16699)] +* auth/alicloud: Enables AliCloud roles to be compatible with Vault's role based quotas. [[GH-17251](https://github.com/hashicorp/vault/pull/17251)] +* auth/approle: SecretIDs can now be generated with an per-request specified TTL and num_uses. +When either the ttl and num_uses fields are not specified, the role's configuration is used. [[GH-14474](https://github.com/hashicorp/vault/pull/14474)] +* auth/aws: PKCS7 signatures will now use SHA256 by default in prep for Go 1.18 [[GH-16455](https://github.com/hashicorp/vault/pull/16455)] +* auth/azure: Enables Azure roles to be compatible with Vault's role based quotas. [[GH-17194](https://github.com/hashicorp/vault/pull/17194)] +* auth/cert: Add metadata to identity-alias [[GH-14751](https://github.com/hashicorp/vault/pull/14751)] +* auth/cert: Operators can now specify a CRL distribution point URL, in which case the cert auth engine will fetch and use the CRL from that location rather than needing to push CRLs directly to auth/cert. [[GH-17136](https://github.com/hashicorp/vault/pull/17136)] +* auth/cf: Enables CF roles to be compatible with Vault's role based quotas. [[GH-17196](https://github.com/hashicorp/vault/pull/17196)] +* auth/gcp: Add support for GCE regional instance groups [[GH-16435](https://github.com/hashicorp/vault/pull/16435)] +* auth/gcp: Updates dependencies: `google.golang.org/api@v0.83.0`, `github.com/hashicorp/go-gcp-common@v0.8.0`. [[GH-17160](https://github.com/hashicorp/vault/pull/17160)] +* auth/jwt: Adds support for Microsoft US Gov L4 to the Azure provider for groups fetching. [[GH-16525](https://github.com/hashicorp/vault/pull/16525)] +* auth/jwt: Improves detection of Windows Subsystem for Linux (WSL) for CLI-based logins. [[GH-16525](https://github.com/hashicorp/vault/pull/16525)] +* auth/kerberos: add `add_group_aliases` config to include LDAP groups in Vault group aliases [[GH-16890](https://github.com/hashicorp/vault/pull/16890)] +* auth/kerberos: add `remove_instance_name` parameter to the login CLI and the Kerberos config in Vault. This removes any instance names found in the keytab service principal name. [[GH-16594](https://github.com/hashicorp/vault/pull/16594)] +* auth/kubernetes: Role resolution for K8S Auth [[GH-156](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/156)] [[GH-17161](https://github.com/hashicorp/vault/pull/17161)] +* auth/oci: Add support for role resolution. [[GH-17212](https://github.com/hashicorp/vault/pull/17212)] +* auth/oidc: Adds support for group membership parsing when using SecureAuth as an OIDC provider. [[GH-16274](https://github.com/hashicorp/vault/pull/16274)] +* cli: CLI commands will print a warning if flags will be ignored because they are passed after positional arguments. [[GH-16441](https://github.com/hashicorp/vault/pull/16441)] +* cli: `auth` and `secrets` list `-detailed` commands now show Deprecation Status for builtin plugins. [[GH-16849](https://github.com/hashicorp/vault/pull/16849)] +* cli: `vault plugin list` now has a `details` field in JSON format, and version and type information in table format. [[GH-17347](https://github.com/hashicorp/vault/pull/17347)] +* command/audit: Improve missing type error message [[GH-16409](https://github.com/hashicorp/vault/pull/16409)] +* command/server: add `-dev-tls` and `-dev-tls-cert-dir` subcommands to create a Vault dev server with generated certificates and private key. [[GH-16421](https://github.com/hashicorp/vault/pull/16421)] +* command: Fix shell completion for KV v2 mounts [[GH-16553](https://github.com/hashicorp/vault/pull/16553)] +* core (enterprise): Add HTTP PATCH support for namespaces with an associated `namespace patch` CLI command +* core (enterprise): Add check to `vault server` command to ensure configured storage backend is supported. +* core (enterprise): Add custom metadata support for namespaces +* core/activity: generate hyperloglogs containing clientIds for each month during precomputation [[GH-16146](https://github.com/hashicorp/vault/pull/16146)] +* core/activity: refactor activity log api to reuse partial api functions in activity endpoint when current month is specified [[GH-16162](https://github.com/hashicorp/vault/pull/16162)] +* core/activity: use monthly hyperloglogs to calculate new clients approximation for current month [[GH-16184](https://github.com/hashicorp/vault/pull/16184)] +* core/quotas (enterprise): Added ability to add path suffixes for lease-count resource quotas +* core/quotas (enterprise): Added ability to add role information for lease-count resource quotas, to limit login requests on auth mounts made using that role +* core/quotas: Added ability to add path suffixes for rate-limit resource quotas [[GH-15989](https://github.com/hashicorp/vault/pull/15989)] +* core/quotas: Added ability to add role information for rate-limit resource quotas, to limit login requests on auth mounts made using that role [[GH-16115](https://github.com/hashicorp/vault/pull/16115)] +* core: Activity log goroutine management improvements to allow tests to be more deterministic. [[GH-17028](https://github.com/hashicorp/vault/pull/17028)] +* core: Add `sys/loggers` and `sys/loggers/:name` endpoints to provide ability to modify logging verbosity [[GH-16111](https://github.com/hashicorp/vault/pull/16111)] +* core: Handle and log deprecated builtin mounts. Introduces `VAULT_ALLOW_PENDING_REMOVAL_MOUNTS` to override shutdown and error when attempting to mount `Pending Removal` builtin plugins. [[GH-17005](https://github.com/hashicorp/vault/pull/17005)] +* core: Limit activity log client count usage by namespaces [[GH-16000](https://github.com/hashicorp/vault/pull/16000)] +* core: Upgrade github.com/hashicorp/raft [[GH-16609](https://github.com/hashicorp/vault/pull/16609)] +* core: remove gox [[GH-16353](https://github.com/hashicorp/vault/pull/16353)] +* docs: Clarify the behaviour of local mounts in the context of DR replication [[GH-16218](https://github.com/hashicorp/vault/pull/16218)] +* identity/oidc: Adds support for detailed listing of clients and providers. [[GH-16567](https://github.com/hashicorp/vault/pull/16567)] +* identity/oidc: Adds the `client_secret_post` token endpoint authentication method. [[GH-16598](https://github.com/hashicorp/vault/pull/16598)] +* identity/oidc: allows filtering the list providers response by an allowed_client_id [[GH-16181](https://github.com/hashicorp/vault/pull/16181)] +* identity: Prevent possibility of data races on entity creation. [[GH-16487](https://github.com/hashicorp/vault/pull/16487)] +* physical/postgresql: pass context to queries to propagate timeouts and cancellations on requests. [[GH-15866](https://github.com/hashicorp/vault/pull/15866)] +* plugins/multiplexing: Added multiplexing support to database plugins if run as external plugins [[GH-16995](https://github.com/hashicorp/vault/pull/16995)] +* plugins: Add Deprecation Status method to builtinregistry. [[GH-16846](https://github.com/hashicorp/vault/pull/16846)] +* plugins: Added environment variable flag to opt-out specific plugins from multiplexing [[GH-16972](https://github.com/hashicorp/vault/pull/16972)] +* plugins: Adding version to plugin GRPC interface [[GH-17088](https://github.com/hashicorp/vault/pull/17088)] +* plugins: Plugin catalog supports registering and managing plugins with semantic version information. [[GH-16688](https://github.com/hashicorp/vault/pull/16688)] +* replication (enterprise): Fix race in merkle sync that can prevent streaming by returning key value matching provided hash if found in log shipper buffer. +* secret/nomad: allow reading CA and client auth certificate from /nomad/config/access [[GH-15809](https://github.com/hashicorp/vault/pull/15809)] +* secret/pki: Add RSA PSS signature support for issuing certificates, signing CRLs [[GH-16519](https://github.com/hashicorp/vault/pull/16519)] +* secret/pki: Add signature_bits to sign-intermediate, sign-verbatim endpoints [[GH-16124](https://github.com/hashicorp/vault/pull/16124)] +* secret/pki: Allow issuing certificates with non-domain, non-email Common Names from roles, sign-verbatim, and as issuers (`cn_validations`). [[GH-15996](https://github.com/hashicorp/vault/pull/15996)] +* secret/pki: Allow specifying SKID for cross-signed issuance from older Vault versions. [[GH-16494](https://github.com/hashicorp/vault/pull/16494)] +* secret/transit: Allow importing Ed25519 keys from PKCS#8 with inner RFC 5915 ECPrivateKey blobs (NSS-wrapped keys). [[GH-15742](https://github.com/hashicorp/vault/pull/15742)] +* secrets/ad: set config default length only if password_policy is missing [[GH-16140](https://github.com/hashicorp/vault/pull/16140)] +* secrets/azure: Adds option to permanently delete AzureAD objects created by Vault. [[GH-17045](https://github.com/hashicorp/vault/pull/17045)] +* secrets/database/hana: Add ability to customize dynamic usernames [[GH-16631](https://github.com/hashicorp/vault/pull/16631)] +* secrets/database/snowflake: Add multiplexing support [[GH-17159](https://github.com/hashicorp/vault/pull/17159)] +* secrets/gcp: Updates dependencies: `google.golang.org/api@v0.83.0`, `github.com/hashicorp/go-gcp-common@v0.8.0`. [[GH-17174](https://github.com/hashicorp/vault/pull/17174)] +* secrets/gcpkms: Update dependencies: google.golang.org/api@v0.83.0. [[GH-17199](https://github.com/hashicorp/vault/pull/17199)] +* secrets/kubernetes: upgrade to v0.2.0 [[GH-17164](https://github.com/hashicorp/vault/pull/17164)] +* secrets/pki/tidy: Add another pair of metrics counting certificates not deleted by the tidy operation. [[GH-16702](https://github.com/hashicorp/vault/pull/16702)] +* secrets/pki: Add a new flag to issue/sign APIs which can filter out root CAs from the returned ca_chain field [[GH-16935](https://github.com/hashicorp/vault/pull/16935)] +* secrets/pki: Add a warning to any successful response when the requested TTL is overwritten by MaxTTL [[GH-17073](https://github.com/hashicorp/vault/pull/17073)] +* secrets/pki: Add ability to cancel tidy operations, control tidy resource usage. [[GH-16958](https://github.com/hashicorp/vault/pull/16958)] +* secrets/pki: Add ability to periodically rebuild CRL before expiry [[GH-16762](https://github.com/hashicorp/vault/pull/16762)] +* secrets/pki: Add ability to periodically run tidy operations to remove expired certificates. [[GH-16900](https://github.com/hashicorp/vault/pull/16900)] +* secrets/pki: Add support for per-issuer Authority Information Access (AIA) URLs [[GH-16563](https://github.com/hashicorp/vault/pull/16563)] +* secrets/pki: Add support to specify signature bits when generating CSRs through intermediate/generate apis [[GH-17388](https://github.com/hashicorp/vault/pull/17388)] +* secrets/pki: Added gauge metrics "secrets.pki.total_revoked_certificates_stored" and "secrets.pki.total_certificates_stored" to track the number of certificates in storage. [[GH-16676](https://github.com/hashicorp/vault/pull/16676)] +* secrets/pki: Allow revocation of certificates with explicitly provided certificate (bring your own certificate / BYOC). [[GH-16564](https://github.com/hashicorp/vault/pull/16564)] +* secrets/pki: Allow revocation via proving possession of certificate's private key [[GH-16566](https://github.com/hashicorp/vault/pull/16566)] +* secrets/pki: Allow tidy to associate revoked certs with their issuers for OCSP performance [[GH-16871](https://github.com/hashicorp/vault/pull/16871)] +* secrets/pki: Honor If-Modified-Since header on CA, CRL fetch; requires passthrough_request_headers modification on the mount point. [[GH-16249](https://github.com/hashicorp/vault/pull/16249)] +* secrets/pki: Improve stability of association of revoked cert with its parent issuer; when an issuer loses crl-signing usage, do not place certs on default issuer's CRL. [[GH-16874](https://github.com/hashicorp/vault/pull/16874)] +* secrets/pki: Support generating delta CRLs for up-to-date CRLs when auto-building is enabled. [[GH-16773](https://github.com/hashicorp/vault/pull/16773)] +* secrets/ssh: Add allowed_domains_template to allow templating of allowed_domains. [[GH-16056](https://github.com/hashicorp/vault/pull/16056)] +* secrets/ssh: Allow additional text along with a template definition in defaultExtension value fields. [[GH-16018](https://github.com/hashicorp/vault/pull/16018)] +* secrets/ssh: Allow the use of Identity templates in the `default_user` field [[GH-16351](https://github.com/hashicorp/vault/pull/16351)] +* secrets/transit: Add a dedicated HMAC key type, which can be used with key import. [[GH-16668](https://github.com/hashicorp/vault/pull/16668)] +* secrets/transit: Added a parameter to encrypt/decrypt batch operations to allow the caller to override the HTTP response code in case of partial user-input failures. [[GH-17118](https://github.com/hashicorp/vault/pull/17118)] +* secrets/transit: Allow configuring the possible salt lengths for RSA PSS signatures. [[GH-16549](https://github.com/hashicorp/vault/pull/16549)] +* ssh: Addition of an endpoint `ssh/issue/:role` to allow the creation of signed key pairs [[GH-15561](https://github.com/hashicorp/vault/pull/15561)] +* storage/cassandra: tuning parameters for clustered environments `connection_timeout`, `initial_connection_timeout`, `simple_retry_policy_retries`. [[GH-10467](https://github.com/hashicorp/vault/pull/10467)] +* storage/gcs: Add documentation explaining how to configure the gcs backend using environment variables instead of options in the configuration stanza [[GH-14455](https://github.com/hashicorp/vault/pull/14455)] +* ui: Changed the tokenBoundCidrs tooltip content to clarify that comma separated values are not accepted in this field. [[GH-15852](https://github.com/hashicorp/vault/pull/15852)] +* ui: Prevents requests to /sys/internal/ui/resultant-acl endpoint when unauthenticated [[GH-17139](https://github.com/hashicorp/vault/pull/17139)] +* ui: Removed deprecated version of core-js 2.6.11 [[GH-15898](https://github.com/hashicorp/vault/pull/15898)] +* ui: Renamed labels under Tools for wrap, lookup, rewrap and unwrap with description. [[GH-16489](https://github.com/hashicorp/vault/pull/16489)] +* ui: Replaces non-inclusive terms [[GH-17116](https://github.com/hashicorp/vault/pull/17116)] +* ui: redirect_to param forwards from auth route when authenticated [[GH-16821](https://github.com/hashicorp/vault/pull/16821)] +* website/docs: API generate-recovery-token documentation. [[GH-16213](https://github.com/hashicorp/vault/pull/16213)] +* website/docs: Add documentation around the expensiveness of making lots of lease count quotas in a short period [[GH-16950](https://github.com/hashicorp/vault/pull/16950)] +* website/docs: Removes mentions of unauthenticated from internal ui resultant-acl doc [[GH-17139](https://github.com/hashicorp/vault/pull/17139)] +* website/docs: Update replication docs to mention Integrated Storage [[GH-16063](https://github.com/hashicorp/vault/pull/16063)] +* website/docs: changed to echo for all string examples instead of (<<<) here-string. [[GH-9081](https://github.com/hashicorp/vault/pull/9081)] + +BUG FIXES: + +* agent/template: Fix parsing error for the exec stanza [[GH-16231](https://github.com/hashicorp/vault/pull/16231)] +* agent: Agent will now respect `max_retries` retry configuration even when caching is set. [[GH-16970](https://github.com/hashicorp/vault/pull/16970)] +* agent: Update consul-template for pkiCert bug fixes [[GH-16087](https://github.com/hashicorp/vault/pull/16087)] +* api/sys/internal/specs/openapi: support a new "dynamic" query parameter to generate generic mountpaths [[GH-15835](https://github.com/hashicorp/vault/pull/15835)] +* api: Fixed erroneous warnings of unrecognized parameters when unwrapping data. [[GH-16794](https://github.com/hashicorp/vault/pull/16794)] +* api: Fixed issue with internal/ui/mounts and internal/ui/mounts/(?P.+) endpoints where it was not properly handling /auth/ [[GH-15552](https://github.com/hashicorp/vault/pull/15552)] +* api: properly handle switching to/from unix domain socket when changing client address [[GH-11904](https://github.com/hashicorp/vault/pull/11904)] +* auth/cert: Vault does not initially load the CRLs in cert auth unless the read/write CRL endpoint is hit. [[GH-17138](https://github.com/hashicorp/vault/pull/17138)] +* auth/kerberos: Maintain headers set by the client [[GH-16636](https://github.com/hashicorp/vault/pull/16636)] +* auth/kubernetes: Restore support for JWT signature algorithm ES384 [[GH-160](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/160)] [[GH-17161](https://github.com/hashicorp/vault/pull/17161)] +* auth/token: Fix ignored parameter warnings for valid parameters on token create [[GH-16938](https://github.com/hashicorp/vault/pull/16938)] +* command/debug: fix bug where monitor was not honoring configured duration [[GH-16834](https://github.com/hashicorp/vault/pull/16834)] +* core (enterprise): Fix bug where wrapping token lookup does not work within namespaces. [[GH-15583](https://github.com/hashicorp/vault/pull/15583)] +* core (enterprise): Fix creation of duplicate entities via alias metadata changes on local auth mounts. +* core/auth: Return a 403 instead of a 500 for a malformed SSCT [[GH-16112](https://github.com/hashicorp/vault/pull/16112)] +* core/identity: Replicate member_entity_ids and policies in identity/group across nodes identically [[GH-16088](https://github.com/hashicorp/vault/pull/16088)] +* core/license (enterprise): Always remove stored license and allow unseal to complete when license cleanup fails +* core/managed-keys (enterprise): fix panic when having `cache_disable` true +* core/quotas (enterprise): Fixed issue with improper counting of leases if lease count quota created after leases +* core/quotas: Added globbing functionality on the end of path suffix quota paths [[GH-16386](https://github.com/hashicorp/vault/pull/16386)] +* core/quotas: Fix goroutine leak caused by the seal process not fully cleaning up Rate Limit Quotas. [[GH-17281](https://github.com/hashicorp/vault/pull/17281)] +* core/replication (enterprise): Don't flush merkle tree pages to disk after losing active duty +* core/seal: Fix possible keyring truncation when using the file backend. [[GH-15946](https://github.com/hashicorp/vault/pull/15946)] +* core: Fix panic when the plugin catalog returns neither a plugin nor an error. [[GH-17204](https://github.com/hashicorp/vault/pull/17204)] +* core: Fixes parsing boolean values for ha_storage backends in config [[GH-15900](https://github.com/hashicorp/vault/pull/15900)] +* core: Increase the allowed concurrent gRPC streams over the cluster port. [[GH-16327](https://github.com/hashicorp/vault/pull/16327)] +* core: Prevent two or more DR failovers from invalidating SSCT tokens generated on the previous primaries. [[GH-16956](https://github.com/hashicorp/vault/pull/16956)] +* database: Invalidate queue should cancel context first to avoid deadlock [[GH-15933](https://github.com/hashicorp/vault/pull/15933)] +* debug: Fix panic when capturing debug bundle on Windows [[GH-14399](https://github.com/hashicorp/vault/pull/14399)] +* debug: Remove extra empty lines from vault.log when debug command is run [[GH-16714](https://github.com/hashicorp/vault/pull/16714)] +* identity (enterprise): Fix a data race when creating an entity for a local alias. +* identity/oidc: Adds `claims_supported` to discovery document. [[GH-16992](https://github.com/hashicorp/vault/pull/16992)] +* identity/oidc: Change the `state` parameter of the Authorization Endpoint to optional. [[GH-16599](https://github.com/hashicorp/vault/pull/16599)] +* identity/oidc: Detect invalid `redirect_uri` values sooner in validation of the Authorization Endpoint. [[GH-16601](https://github.com/hashicorp/vault/pull/16601)] +* identity/oidc: Fixes validation of the `request` and `request_uri` parameters. [[GH-16600](https://github.com/hashicorp/vault/pull/16600)] +* openapi: Fixed issue where information about /auth/token endpoints was not present with explicit policy permissions [[GH-15552](https://github.com/hashicorp/vault/pull/15552)] +* plugin/multiplexing: Fix panic when id doesn't exist in connection map [[GH-16094](https://github.com/hashicorp/vault/pull/16094)] +* plugin/secrets/auth: Fix a bug with aliased backends such as aws-ec2 or generic [[GH-16673](https://github.com/hashicorp/vault/pull/16673)] +* plugins: Corrected the path to check permissions on when the registered plugin name does not match the plugin binary's filename. [[GH-17340](https://github.com/hashicorp/vault/pull/17340)] +* quotas/lease-count: Fix lease-count quotas on mounts not properly being enforced when the lease generating request is a read [[GH-15735](https://github.com/hashicorp/vault/pull/15735)] +* replication (enterprise): Fix data race in SaveCheckpoint() +* replication (enterprise): Fix data race in saveCheckpoint. +* replication (enterprise): Fix possible data race during merkle diff/sync +* secret/pki: Do not fail validation with a legacy key_bits default value and key_type=any when signing CSRs [[GH-16246](https://github.com/hashicorp/vault/pull/16246)] +* secrets/database: Fix a bug where the secret engine would queue up a lot of WAL deletes during startup. [[GH-16686](https://github.com/hashicorp/vault/pull/16686)] +* secrets/gcp: Fixes duplicate static account key creation from performance secondary clusters. [[GH-16534](https://github.com/hashicorp/vault/pull/16534)] +* secrets/kv: Fix `kv get` issue preventing the ability to read a secret when providing a leading slash [[GH-16443](https://github.com/hashicorp/vault/pull/16443)] +* secrets/pki: Allow import of issuers without CRLSign KeyUsage; prohibit setting crl-signing usage on such issuers [[GH-16865](https://github.com/hashicorp/vault/pull/16865)] +* secrets/pki: Do not ignore provided signature bits value when signing intermediate and leaf certificates with a managed key [[GH-17328](https://github.com/hashicorp/vault/pull/17328)] +* secrets/pki: Do not read revoked certificates from backend when CRL is disabled [[GH-17385](https://github.com/hashicorp/vault/pull/17385)] +* secrets/pki: Fix migration to properly handle mounts that contain only keys, no certificates [[GH-16813](https://github.com/hashicorp/vault/pull/16813)] +* secrets/pki: Ignore EC PARAMETER PEM blocks during issuer import (/config/ca, /issuers/import/*, and /intermediate/set-signed) [[GH-16721](https://github.com/hashicorp/vault/pull/16721)] +* secrets/pki: LIST issuers endpoint is now unauthenticated. [[GH-16830](https://github.com/hashicorp/vault/pull/16830)] +* secrets/transform (enterprise): Fix an issue loading tokenization transform configuration after a specific sequence of reconfigurations. +* secrets/transform (enterprise): Fix persistence problem with tokenization store credentials. +* storage/raft (enterprise): Fix some storage-modifying RPCs used by perf standbys that weren't returning the resulting WAL state. +* storage/raft (enterprise): Prevent unauthenticated voter status change with rejoin [[GH-16324](https://github.com/hashicorp/vault/pull/16324)] +* storage/raft: Fix retry_join initialization failure [[GH-16550](https://github.com/hashicorp/vault/pull/16550)] +* storage/raft: Nodes no longer get demoted to nonvoter if we don't know their version due to missing heartbeats. [[GH-17019](https://github.com/hashicorp/vault/pull/17019)] +* ui/keymgmt: Sets the defaultValue for type when creating a key. [[GH-17407](https://github.com/hashicorp/vault/pull/17407)] +* ui: Fix OIDC callback to accept namespace flag in different formats [[GH-16886](https://github.com/hashicorp/vault/pull/16886)] +* ui: Fix info tooltip submitting form [[GH-16659](https://github.com/hashicorp/vault/pull/16659)] +* ui: Fix issue logging in with JWT auth method [[GH-16466](https://github.com/hashicorp/vault/pull/16466)] +* ui: Fix lease force revoke action [[GH-16930](https://github.com/hashicorp/vault/pull/16930)] +* ui: Fix naming of permitted_dns_domains form parameter on CA creation (root generation and sign intermediate). [[GH-16739](https://github.com/hashicorp/vault/pull/16739)] +* ui: Fixed bug where red spellcheck underline appears in sensitive/secret kv values when it should not appear [[GH-15681](https://github.com/hashicorp/vault/pull/15681)] +* ui: Fixes secret version and status menu links transitioning to auth screen [[GH-16983](https://github.com/hashicorp/vault/pull/16983)] +* ui: OIDC login type uses localStorage instead of sessionStorage [[GH-16170](https://github.com/hashicorp/vault/pull/16170)] +* vault: Fix a bug where duplicate policies could be added to an identity group. [[GH-15638](https://github.com/hashicorp/vault/pull/15638)] + +## 1.11.10 +### April 26, 2023 + +CHANGES: + +* core: Bump Go version to 1.19.8. + +IMPROVEMENTS: + +* cli/namespace: Add detailed flag to output additional namespace information +such as namespace IDs and custom metadata. [[GH-20243](https://github.com/hashicorp/vault/pull/20243)] +* core/activity: add an endpoint to write test activity log data, guarded by a build flag [[GH-20019](https://github.com/hashicorp/vault/pull/20019)] +* core: Add a `raft` sub-field to the `storage` and `ha_storage` details provided by the +`/sys/config/state/sanitized` endpoint in order to include the `max_entry_size`. [[GH-20044](https://github.com/hashicorp/vault/pull/20044)] +* sdk/ldaputil: added `connection_timeout` to tune connection timeout duration +for all LDAP plugins. [[GH-20144](https://github.com/hashicorp/vault/pull/20144)] + +BUG FIXES: + +* auth/ldap: Add max_page_size configurable to LDAP configuration [[GH-19032](https://github.com/hashicorp/vault/pull/19032)] +* core (enterprise): Fix intermittent issue with token entries sometimes not being found when using a newly created token in a request to a secondary, even when SSCT `new_token` forwarding is set. When this occurred, this would result in the following error to the client: `error performing token check: no lease entry found for token that ought to have one, possible eventual consistency issue`. +* core (enterprise): Fix read on perf standbys failing with 412 after leadership change, unseal, restores or restarts when no writes occur +* core/ssct (enterprise): Fixed race condition where a newly promoted DR may revert `sscGenCounter` +resulting in 412 errors. +* core: Fix regression breaking non-raft clusters whose nodes share the same cluster_addr/api_addr. [[GH-19721](https://github.com/hashicorp/vault/pull/19721)] +* helper/random: Fix race condition in string generator helper [[GH-19875](https://github.com/hashicorp/vault/pull/19875)] +* openapi: Fix many incorrect details in generated API spec, by using better techniques to parse path regexps [[GH-18554](https://github.com/hashicorp/vault/pull/18554)] +* replication (enterprise): Fix replication status for Primary clusters showing its primary cluster's information (in case of DR) in secondaries field when known_secondaries field is nil +* secrets/pki: Fix patching of leaf_not_after_behavior on issuers. [[GH-20341](https://github.com/hashicorp/vault/pull/20341)] +* secrets/transform (enterprise): Address SQL connection leak when cleaning expired tokens +* ui: Fix OIDC provider logo showing when domain doesn't match [[GH-20263](https://github.com/hashicorp/vault/pull/20263)] +* ui: Fix bad link to namespace when namespace name includes `.` [[GH-19799](https://github.com/hashicorp/vault/pull/19799)] +* ui: fixes browser console formatting for help command output [[GH-20064](https://github.com/hashicorp/vault/pull/20064)] +* ui: remove use of htmlSafe except when first sanitized [[GH-20235](https://github.com/hashicorp/vault/pull/20235)] + +## 1.11.9 +### March 29, 2023 + +SECURITY: + +* storage/mssql: When using Vault’s community-supported Microsoft SQL (MSSQL) database storage backend, a privileged attacker with the ability to write arbitrary data to Vault’s configuration may be able to perform arbitrary SQL commands on the underlying database server through Vault. This vulnerability, CVE-2023-0620, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-12](https://discuss.hashicorp.com/t/hcsec-2023-12-vault-s-microsoft-sql-database-storage-backend-vulnerable-to-sql-injection-via-configuration-file/52080)] +* secrets/pki: Vault’s PKI mount issuer endpoints did not correctly authorize access to remove an issuer or modify issuer metadata, potentially resulting in denial of service of the PKI mount. This bug did not affect public or private key material, trust chains or certificate issuance. This vulnerability, CVE-2023-0665, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-11](https://discuss.hashicorp.com/t/hcsec-2023-11-vault-s-pki-issuer-endpoint-did-not-correctly-authorize-access-to-issuer-metadata/52079)] +* core: HashiCorp Vault’s implementation of Shamir’s secret sharing used precomputed table lookups, and was vulnerable to cache-timing attacks. An attacker with access to, and the ability to observe a large number of unseal operations on the host through a side channel may reduce the search space of a brute force effort to recover the Shamir shares. This vulnerability, CVE-2023-25000, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-10](https://discuss.hashicorp.com/t/hcsec-2023-10-vault-vulnerable-to-cache-timing-attacks-during-seal-and-unseal-operations/52078)] + +IMPROVEMENTS: + +* auth/github: Allow for an optional Github auth token environment variable to make authenticated requests when fetching org id +website/docs: Add docs for `VAULT_AUTH_CONFIG_GITHUB_TOKEN` environment variable when writing Github config [[GH-19244](https://github.com/hashicorp/vault/pull/19244)] +* core: Allow overriding gRPC connect timeout via VAULT_GRPC_MIN_CONNECT_TIMEOUT. This is an env var rather than a config setting because we don't expect this to ever be needed. It's being added as a last-ditch +option in case all else fails for some replication issues we may not have fully reproduced. [[GH-19676](https://github.com/hashicorp/vault/pull/19676)] +* core: validate name identifiers in mssql physical storage backend prior use [[GH-19591](https://github.com/hashicorp/vault/pull/19591)] + +BUG FIXES: + +* auth/kubernetes: Ensure a consistent TLS configuration for all k8s API requests [[#190](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/190)] [[GH-19720](https://github.com/hashicorp/vault/pull/19720)] +* cli: Fix vault read handling to return raw data as secret.Data when there is no top-level data object from api response. [[GH-17913](https://github.com/hashicorp/vault/pull/17913)] +* core (enterprise): Attempt to reconnect to a PKCS#11 HSM if we retrieve a CKR_FUNCTION_FAILED error. +* core: Fixed issue with remounting mounts that have a non-trailing space in the 'to' or 'from' paths. [[GH-19585](https://github.com/hashicorp/vault/pull/19585)] +* openapi: Fix logic for labeling unauthenticated/sudo paths. [[GH-19600](https://github.com/hashicorp/vault/pull/19600)] +* secrets/transform (enterprise): Fix persistence problem with rotated tokenization key versions +* ui: fixes issue navigating back a level using the breadcrumb from secret metadata view [[GH-19703](https://github.com/hashicorp/vault/pull/19703)] +* ui: pass encodeBase64 param to HMAC transit-key-actions. [[GH-19429](https://github.com/hashicorp/vault/pull/19429)] +* ui: use URLSearchParams interface to capture namespace param from SSOs (ex. ADFS) with decoded state param in callback url [[GH-19460](https://github.com/hashicorp/vault/pull/19460)] + +## 1.11.8 +### March 01, 2023 + +SECURITY: + +* auth/approle: When using the Vault and Vault Enterprise (Vault) approle auth method, any authenticated user with access to the /auth/approle/role/:role_name/secret-id-accessor/destroy endpoint can destroy the secret ID of any other role by providing the secret ID accessor. This vulnerability, CVE-2023-24999 has been fixed in Vault 1.13.0, 1.12.4, 1.11.8, 1.10.11 and above. [[HSEC-2023-07](https://discuss.hashicorp.com/t/hcsec-2023-07-vault-fails-to-verify-if-approle-secretid-belongs-to-role-during-a-destroy-operation/51305)] + +CHANGES: + +* core: Bump Go version to 1.19.6. + +IMPROVEMENTS: + +* secrets/database: Adds error message requiring password on root crednetial rotation. [[GH-19103](https://github.com/hashicorp/vault/pull/19103)] + +BUG FIXES: + +* auth/approle: Add nil check for the secret ID entry when deleting via secret id accessor preventing cross role secret id deletion [[GH-19186](https://github.com/hashicorp/vault/pull/19186)] +* core (enterprise): Fix panic when using invalid accessor for control-group request +* core (enterprise): Fix perf standby WAL streaming silently failures when replication setup happens at a bad time. +* core: Prevent panics in `sys/leases/lookup`, `sys/leases/revoke`, and `sys/leases/renew` endpoints if provided `lease_id` is null [[GH-18951](https://github.com/hashicorp/vault/pull/18951)] +* license (enterprise): Fix bug where license would update even if the license didn't change. +* replication (enterprise): Fix bug where reloading external plugin on a secondary would +break replication. +* secrets/ad: Fix bug where config couldn't be updated unless binddn/bindpass were included in the update. [[GH-18208](https://github.com/hashicorp/vault/pull/18208)] +* ui (enterprise): Fix cancel button from transform engine role creation page [[GH-19135](https://github.com/hashicorp/vault/pull/19135)] +* ui: Fix bug where logging in via OIDC fails if browser is in fullscreen mode [[GH-19071](https://github.com/hashicorp/vault/pull/19071)] +* ui: show Get credentials button for static roles detail page when a user has the proper permissions. [[GH-19190](https://github.com/hashicorp/vault/pull/19190)] + +## 1.11.7 +### February 6, 2023 + +CHANGES: + +* core: Bump Go version to 1.19.4. + +IMPROVEMENTS: + +* command/server: Environment variable keys are now logged at startup. [[GH-18125](https://github.com/hashicorp/vault/pull/18125)] +* core/fips: use upstream toolchain for FIPS 140-2 compliance again; this will appear as X=boringcrypto on the Go version in Vault server logs. +* secrets/db/mysql: Add `tls_server_name` and `tls_skip_verify` parameters [[GH-18799](https://github.com/hashicorp/vault/pull/18799)] +* ui: Prepends "passcode=" if not provided in user input for duo totp mfa method authentication [[GH-18342](https://github.com/hashicorp/vault/pull/18342)] +* ui: Update language on database role to "Connection name" [[GH-18261](https://github.com/hashicorp/vault/issues/18261)] [[GH-18350](https://github.com/hashicorp/vault/pull/18350)] + +BUG FIXES: + +* auth/approle: Fix `token_bound_cidrs` validation when using /32 blocks for role and secret ID [[GH-18145](https://github.com/hashicorp/vault/pull/18145)] +* cli/kv: skip formatting of nil secrets for patch and put with field parameter set [[GH-18163](https://github.com/hashicorp/vault/pull/18163)] +* core (enterprise): Fix a race condition resulting in login errors to PKCS#11 modules under high concurrency. +* core/managed-keys (enterprise): Limit verification checks to mounts in a key's namespace +* core/quotas (enterprise): Fix a potential deadlock that could occur when using lease count quotas. +* core/quotas: Fix issue with improper application of default rate limit quota exempt paths [[GH-18273](https://github.com/hashicorp/vault/pull/18273)] +* core: fix bug where context cancellations weren't forwarded to active node from performance standbys. +* core: prevent panic in login mfa enforcement delete after enforcement's namespace is deleted [[GH-18923](https://github.com/hashicorp/vault/pull/18923)] +* database/mongodb: Fix writeConcern set to be applied to any query made on the database [[GH-18546](https://github.com/hashicorp/vault/pull/18546)] +* identity (enterprise): Fix a data race when creating an entity for a local alias. +* kmip (enterprise): Fix Destroy operation response that omitted Unique Identifier on some batched responses. +* kmip (enterprise): Fix Locate operation response incompatibility with clients using KMIP versions prior to 1.3. +* kmip (enterprise): Fix Query operation response that omitted streaming capability and supported profiles. +* licensing (enterprise): update autoloaded license cache after reload +* secrets/pki: Allow patching issuer to set an empty issuer name. [[GH-18466](https://github.com/hashicorp/vault/pull/18466)] +* secrets/transit: Do not warn about unrecognized parameter 'batch_input' [[GH-18299](https://github.com/hashicorp/vault/pull/18299)] +* storage/raft (enterprise): An already joined node can rejoin by wiping storage +and re-issueing a join request, but in doing so could transiently become a +non-voter. In some scenarios this resulted in loss of quorum. [[GH-18263](https://github.com/hashicorp/vault/pull/18263)] +* storage/raft (enterprise): Fix some storage-modifying RPCs used by perf standbys that weren't returning the resulting WAL state. +* storage/raft: Don't panic on unknown raft ops [[GH-17732](https://github.com/hashicorp/vault/pull/17732)] +* ui: fixes query parameters not passed in api explorer test requests [[GH-18743](https://github.com/hashicorp/vault/pull/18743)] + +## 1.11.6 +### November 30, 2022 + +IMPROVEMENTS: + +* secrets/pki: Allow issuer creation, import to change default issuer via `default_follows_latest_issuer`. [[GH-17824](https://github.com/hashicorp/vault/pull/17824)] + +BUG FIXES: + +* auth/okta: fix a panic for AuthRenew in Okta [[GH-18011](https://github.com/hashicorp/vault/pull/18011)] +* auth: Deduplicate policies prior to ACL generation [[GH-17914](https://github.com/hashicorp/vault/pull/17914)] +* cli: Fix issue preventing kv commands from executing properly when the mount path provided by `-mount` flag and secret key path are the same. [[GH-17679](https://github.com/hashicorp/vault/pull/17679)] +* core/quotas (enterprise): Fix a lock contention issue that could occur and cause Vault to become unresponsive when creating, changing, or deleting lease count quotas. +* core: Fix potential deadlock if barrier ciphertext is less than 4 bytes. [[GH-17944](https://github.com/hashicorp/vault/pull/17944)] +* core: fix a start up race condition where performance standbys could go into a + mount loop if default policies are not yet synced from the active node. [[GH-17801](https://github.com/hashicorp/vault/pull/17801)] +* secret/pki: fix bug with initial legacy bundle migration (from < 1.11 into 1.11+) and missing issuers from ca_chain [[GH-17772](https://github.com/hashicorp/vault/pull/17772)] +* secrets/azure: add WAL to clean up role assignments if errors occur [[GH-18085](https://github.com/hashicorp/vault/pull/18085)] +* secrets/gcp: Fixes duplicate service account key for rotate root on standby or secondary [[GH-18110](https://github.com/hashicorp/vault/pull/18110)] +* ui: Fixes issue with not being able to download raft snapshot via service worker [[GH-17769](https://github.com/hashicorp/vault/pull/17769)] +* ui: fix entity policies list link to policy show page [[GH-17950](https://github.com/hashicorp/vault/pull/17950)] + +## 1.11.5 +### November 2, 2022 + +IMPROVEMENTS: + +* database/snowflake: Allow parallel requests to Snowflake [[GH-17594](https://github.com/hashicorp/vault/pull/17594)] +* sdk/ldap: Added support for paging when searching for groups using group filters [[GH-17640](https://github.com/hashicorp/vault/pull/17640)] + +BUG FIXES: + +* core/managed-keys (enterprise): Return better error messages when encountering key creation failures +* core/managed-keys (enterprise): fix panic when having `cache_disable` true +* core: prevent memory leak when using control group factors in a policy [[GH-17532](https://github.com/hashicorp/vault/pull/17532)] +* core: prevent panic during mfa after enforcement's namespace is deleted [[GH-17562](https://github.com/hashicorp/vault/pull/17562)] +* kmip (enterprise): Fix a problem in the handling of attributes that caused Import operations to fail. +* login: Store token in tokenhelper for interactive login MFA [[GH-17040](https://github.com/hashicorp/vault/pull/17040)] +* secrets/pki: Do not ignore provided signature bits value when signing intermediate and leaf certificates with a managed key [[GH-17328](https://github.com/hashicorp/vault/pull/17328)] +* secrets/pki: Do not read revoked certificates from backend when CRL is disabled [[GH-17384](https://github.com/hashicorp/vault/pull/17384)] +* secrets/pki: Respond to tidy-status, tidy-cancel on PR Secondary clusters. [[GH-17497](https://github.com/hashicorp/vault/pull/17497)] +* ui/keymgmt: Sets the defaultValue for type when creating a key. [[GH-17407](https://github.com/hashicorp/vault/pull/17407)] +* ui: Fixes oidc/jwt login issue with alternate mount path and jwt login via mount path tab [[GH-17661](https://github.com/hashicorp/vault/pull/17661)] + +## 1.11.4 +### September 30, 2022 + +SECURITY: + +* secrets/pki: Vault’s TLS certificate auth method did not initially load the optionally-configured CRL issued by the role’s CA into memory on startup, resulting in the revocation list not being checked, if the CRL has not yet been retrieved. This vulnerability, CVE-2022-41316, is fixed in Vault 1.12.0, 1.11.4, 1.10.7, and 1.9.10. [[HSEC-2022-24](https://discuss.hashicorp.com/t/hcsec-2022-24-vaults-tls-cert-auth-method-only-loaded-crl-after-first-request/45483)] + +IMPROVEMENTS: + +* agent/auto-auth: Add `exit_on_err` which when set to true, will cause Agent to exit if any errors are encountered during authentication. [[GH-17091](https://github.com/hashicorp/vault/pull/17091)] +* agent: Send notifications to systemd on start and stop. [[GH-9802](https://github.com/hashicorp/vault/pull/9802)] + +BUG FIXES: + +* auth/cert: Vault does not initially load the CRLs in cert auth unless the read/write CRL endpoint is hit. [[GH-17138](https://github.com/hashicorp/vault/pull/17138)] +* auth/kubernetes: Restore support for JWT signature algorithm ES384 [[GH-160](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/160)] [[GH-17162](https://github.com/hashicorp/vault/pull/17162)] +* auth/token: Fix ignored parameter warnings for valid parameters on token create [[GH-16938](https://github.com/hashicorp/vault/pull/16938)] +* core/quotas: Fix goroutine leak caused by the seal process not fully cleaning up Rate Limit Quotas. [[GH-17281](https://github.com/hashicorp/vault/pull/17281)] +* core: Prevent two or more DR failovers from invalidating SSCT tokens generated on the previous primaries. [[GH-16956](https://github.com/hashicorp/vault/pull/16956)] +* identity/oidc: Adds `claims_supported` to discovery document. [[GH-16992](https://github.com/hashicorp/vault/pull/16992)] +* replication (enterprise): Fix data race in SaveCheckpoint() +* secrets/transform (enterprise): Fix an issue loading tokenization transform configuration after a specific sequence of reconfigurations. +* secrets/transform (enterprise): Fix persistence problem with tokenization store credentials. +* ui: Fixes secret version and status menu links transitioning to auth screen [[GH-16983](https://github.com/hashicorp/vault/pull/16983)] +* ui: Fixes secret version and status menu links transitioning to auth screen [[GH-16983](https://github.com/hashicorp/vault/pull/16983)] + +## 1.11.3 +### August 31, 2022 + +SECURITY: + +* core: When entity aliases mapped to a single entity share the same alias name, but have different mount accessors, Vault can leak metadata between the aliases. This metadata leak may result in unexpected access if templated policies are using alias metadata for path names. This vulnerability, CVE-2022-40186, is fixed in 1.11.3, 1.10.6, and 1.9.9. [[HSEC-2022-18](https://discuss.hashicorp.com/t/hcsec-2022-18-vault-entity-alias-metadata-may-leak-between-aliases-with-the-same-name-assigned-to-the-same-entity/44550)] + +CHANGES: + +* core: Bump Go version to 1.17.13. + +IMPROVEMENTS: + +* auth/kerberos: add `add_group_aliases` config to include LDAP groups in Vault group aliases [[GH-16890](https://github.com/hashicorp/vault/pull/16890)] +* auth/kerberos: add `remove_instance_name` parameter to the login CLI and the +Kerberos config in Vault. This removes any instance names found in the keytab +service principal name. [[GH-16594](https://github.com/hashicorp/vault/pull/16594)] +* identity/oidc: Adds the `client_secret_post` token endpoint authentication method. [[GH-16598](https://github.com/hashicorp/vault/pull/16598)] +* storage/gcs: Add documentation explaining how to configure the gcs backend using environment variables instead of options in the configuration stanza [[GH-14455](https://github.com/hashicorp/vault/pull/14455)] + +BUG FIXES: + +* api: Fixed erroneous warnings of unrecognized parameters when unwrapping data. [[GH-16794](https://github.com/hashicorp/vault/pull/16794)] +* auth/gcp: Fixes the ability to reset the configuration's credentials to use application default credentials. [[GH-16523](https://github.com/hashicorp/vault/pull/16523)] +* auth/kerberos: Maintain headers set by the client [[GH-16636](https://github.com/hashicorp/vault/pull/16636)] +* command/debug: fix bug where monitor was not honoring configured duration [[GH-16834](https://github.com/hashicorp/vault/pull/16834)] +* core/license (enterprise): Always remove stored license and allow unseal to complete when license cleanup fails +* database/elasticsearch: Fixes a bug in boolean parsing for initialize [[GH-16526](https://github.com/hashicorp/vault/pull/16526)] +* identity/oidc: Change the `state` parameter of the Authorization Endpoint to optional. [[GH-16599](https://github.com/hashicorp/vault/pull/16599)] +* identity/oidc: Detect invalid `redirect_uri` values sooner in validation of the +Authorization Endpoint. [[GH-16601](https://github.com/hashicorp/vault/pull/16601)] +* identity/oidc: Fixes validation of the `request` and `request_uri` parameters. [[GH-16600](https://github.com/hashicorp/vault/pull/16600)] +* plugin/secrets/auth: Fix a bug with aliased backends such as aws-ec2 or generic [[GH-16673](https://github.com/hashicorp/vault/pull/16673)] +* secrets/database: Fix a bug where the secret engine would queue up a lot of WAL deletes during startup. [[GH-16686](https://github.com/hashicorp/vault/pull/16686)] +* secrets/gcp: Fixes duplicate static account key creation from performance secondary clusters. [[GH-16534](https://github.com/hashicorp/vault/pull/16534)] +* secrets/pki: Fix migration to properly handle mounts that contain only keys, no certificates [[GH-16813](https://github.com/hashicorp/vault/pull/16813)] +* secrets/pki: Ignore EC PARAMETER PEM blocks during issuer import (/config/ca, /issuers/import/*, and /intermediate/set-signed) [[GH-16721](https://github.com/hashicorp/vault/pull/16721)] +* secrets/pki: LIST issuers endpoint is now unauthenticated. [[GH-16830](https://github.com/hashicorp/vault/pull/16830)] +* storage/raft: Fix retry_join initialization failure [[GH-16550](https://github.com/hashicorp/vault/pull/16550)] +* ui: Fix OIDC callback to accept namespace flag in different formats [[GH-16886](https://github.com/hashicorp/vault/pull/16886)] +* ui: Fix info tooltip submitting form [[GH-16659](https://github.com/hashicorp/vault/pull/16659)] +* ui: Fix naming of permitted_dns_domains form parameter on CA creation (root generation and sign intermediate). [[GH-16739](https://github.com/hashicorp/vault/pull/16739)] + +SECURITY: + +* identity/entity: When entity aliases mapped to a single entity share the same alias name, but have different mount accessors, Vault can leak metadata between the aliases. This metadata leak may result in unexpected access if templated policies are using alias metadata for path names. [[HCSEC-2022-18](https://discuss.hashicorp.com/t/hcsec-2022-18-vault-entity-alias-metadata-may-leak-between-aliases-with-the-same-name-assigned-to-the-same-entity/44550)] + +## 1.11.2 +### August 2, 2022 + +IMPROVEMENTS: + +* agent: Added `disable_keep_alives` configuration to disable keep alives in auto-auth, caching and templating. [[GH-16479](https://github.com/hashicorp/vault/pull/16479)] + +BUG FIXES: + +* core/auth: Return a 403 instead of a 500 for a malformed SSCT [[GH-16112](https://github.com/hashicorp/vault/pull/16112)] +* core: Increase the allowed concurrent gRPC streams over the cluster port. [[GH-16327](https://github.com/hashicorp/vault/pull/16327)] +* secrets/kv: Fix `kv get` issue preventing the ability to read a secret when providing a leading slash [[GH-16443](https://github.com/hashicorp/vault/pull/16443)] +* ui: Fix issue logging in with JWT auth method [[GH-16466](https://github.com/hashicorp/vault/pull/16466)] + +## 1.11.1 +### July 21, 2022 + +SECURITY: + +* storage/raft: Vault Enterprise (“Vault”) clusters using Integrated Storage expose an unauthenticated API endpoint that could be abused to override the voter status of a node within a Vault HA cluster, introducing potential for future data loss or catastrophic failure. This vulnerability, CVE-2022-36129, was fixed in Vault 1.9.8, 1.10.5, and 1.11.1. [[HSEC-2022-15](https://discuss.hashicorp.com/t/hcsec-2022-15-vault-enterprise-does-not-verify-existing-voter-status-when-joining-an-integrated-storage-ha-node/42420)] + +CHANGES: + +* core: Bump Go version to 1.17.12. + +IMPROVEMENTS: + +* agent: Added `disable_idle_connections` configuration to disable leaving idle connections open in auto-auth, caching and templating. [[GH-15986](https://github.com/hashicorp/vault/pull/15986)] +* core: Add `sys/loggers` and `sys/loggers/:name` endpoints to provide ability to modify logging verbosity [[GH-16111](https://github.com/hashicorp/vault/pull/16111)] +* secrets/ssh: Allow additional text along with a template definition in defaultExtension value fields. [[GH-16018](https://github.com/hashicorp/vault/pull/16018)] + +BUG FIXES: + +* agent/template: Fix parsing error for the exec stanza [[GH-16231](https://github.com/hashicorp/vault/pull/16231)] +* agent: Update consul-template for pkiCert bug fixes [[GH-16087](https://github.com/hashicorp/vault/pull/16087)] +* core/identity: Replicate member_entity_ids and policies in identity/group across nodes identically [[GH-16088](https://github.com/hashicorp/vault/pull/16088)] +* core/replication (enterprise): Don't flush merkle tree pages to disk after losing active duty +* core/seal: Fix possible keyring truncation when using the file backend. [[GH-15946](https://github.com/hashicorp/vault/pull/15946)] +* kmip (enterprise): Return SecretData as supported Object Type. +* plugin/multiplexing: Fix panic when id doesn't exist in connection map [[GH-16094](https://github.com/hashicorp/vault/pull/16094)] +* secret/pki: Do not fail validation with a legacy key_bits default value and key_type=any when signing CSRs [[GH-16246](https://github.com/hashicorp/vault/pull/16246)] +* storage/raft (enterprise): Prevent unauthenticated voter status change with rejoin [[GH-16324](https://github.com/hashicorp/vault/pull/16324)] +* transform (enterprise): Fix a bug in the handling of nested or unmatched capture groups in FPE transformations. +* ui: OIDC login type uses localStorage instead of sessionStorage [[GH-16170](https://github.com/hashicorp/vault/pull/16170)] + +SECURITY: + +* storage/raft (enterprise): Vault Enterprise (“Vault”) clusters using Integrated Storage expose an unauthenticated API endpoint that could be abused to override the voter status of a node within a Vault HA cluster, introducing potential for future data loss or catastrophic failure. This vulnerability, CVE-2022-36129, was fixed in Vault 1.9.8, 1.10.5, and 1.11.1. [[HCSEC-2022-15](https://discuss.hashicorp.com/t/hcsec-2022-15-vault-enterprise-does-not-verify-existing-voter-status-when-joining-an-integrated-storage-ha-node/42420)] + +## 1.11.0 +### June 20, 2022 + +CHANGES: + +* auth/aws: Add RoleSession to DisplayName when using assumeRole for authentication [[GH-14954](https://github.com/hashicorp/vault/pull/14954)] +* auth/kubernetes: If `kubernetes_ca_cert` is unset, and there is no pod-local CA available, an error will be surfaced when writing config instead of waiting for login. [[GH-15584](https://github.com/hashicorp/vault/pull/15584)] +* auth: Remove support for legacy MFA +(https://www.vaultproject.io/docs/v1.10.x/auth/mfa) [[GH-14869](https://github.com/hashicorp/vault/pull/14869)] +* core/fips: Disable and warn about entropy augmentation in FIPS 140-2 Inside mode [[GH-15858](https://github.com/hashicorp/vault/pull/15858)] +* core: A request that fails path validation due to relative path check will now be responded to with a 400 rather than 500. [[GH-14328](https://github.com/hashicorp/vault/pull/14328)] +* core: Bump Go version to 1.17.11. [[GH-go-ver-1110](https://github.com/hashicorp/vault/pull/go-ver-1110)] +* database & storage: Change underlying driver library from [lib/pq](https://github.com/lib/pq) to [pgx](https://github.com/jackc/pgx). This change affects Redshift & Postgres database secrets engines, and CockroachDB & Postgres storage engines [[GH-15343](https://github.com/hashicorp/vault/pull/15343)] +* licensing (enterprise): Remove support for stored licenses and associated `sys/license` and `sys/license/signed` +endpoints in favor of [autoloaded licenses](https://www.vaultproject.io/docs/enterprise/license/autoloading). +* replication (enterprise): The `/sys/replication/performance/primary/mount-filter` endpoint has been removed. Please use [Paths Filter](https://www.vaultproject.io/api-docs/system/replication/replication-performance#create-paths-filter) instead. +* secret/pki: Remove unused signature_bits parameter from intermediate CSR generation; this parameter doesn't control the final certificate's signature algorithm selection as that is up to the signing CA [[GH-15478](https://github.com/hashicorp/vault/pull/15478)] +* secrets/kubernetes: Split `additional_metadata` into `extra_annotations` and `extra_labels` parameters [[GH-15655](https://github.com/hashicorp/vault/pull/15655)] +* secrets/pki: A new aliased api path (/pki/issuer/:issuer_ref/sign-self-issued) +providing the same functionality as the existing API(/pki/root/sign-self-issued) +does not require sudo capabilities but the latter still requires it in an +effort to maintain backwards compatibility. [[GH-15211](https://github.com/hashicorp/vault/pull/15211)] +* secrets/pki: Err on unknown role during sign-verbatim. [[GH-15543](https://github.com/hashicorp/vault/pull/15543)] +* secrets/pki: Existing CRL API (/pki/crl) now returns an X.509 v2 CRL instead +of a v1 CRL. [[GH-15100](https://github.com/hashicorp/vault/pull/15100)] +* secrets/pki: The `ca_chain` response field within issuing (/pki/issue/:role) +and signing APIs will now include the root CA certificate if the mount is +aware of it. [[GH-15155](https://github.com/hashicorp/vault/pull/15155)] +* secrets/pki: existing Delete Root API (pki/root) will now delete all issuers +and keys within the mount path. [[GH-15004](https://github.com/hashicorp/vault/pull/15004)] +* secrets/pki: existing Generate Root (pki/root/generate/:type), +Set Signed Intermediate (/pki/intermediate/set-signed) APIs will +add new issuers/keys to a mount instead of warning that an existing CA exists [[GH-14975](https://github.com/hashicorp/vault/pull/14975)] +* secrets/pki: the signed CA certificate from the sign-intermediate api will now appear within the ca_chain +response field along with the issuer's ca chain. [[GH-15524](https://github.com/hashicorp/vault/pull/15524)] +* ui: Upgrade Ember to version 3.28 [[GH-14763](https://github.com/hashicorp/vault/pull/14763)] + +FEATURES: + +* **Autopilot Improvements (Enterprise)**: Autopilot on Vault Enterprise now supports automated upgrades and redundancy zones when using integrated storage. +* **KeyMgmt UI**: Add UI support for managing the Key Management Secrets Engine [[GH-15523](https://github.com/hashicorp/vault/pull/15523)] +* **Kubernetes Secrets Engine**: This new secrets engine generates Kubernetes service account tokens, service accounts, role bindings, and roles dynamically. [[GH-15551](https://github.com/hashicorp/vault/pull/15551)] +* **Non-Disruptive Intermediate/Root Certificate Rotation**: This allows +import, generation and configuration of any number of keys and/or issuers +within a PKI mount, providing operators the ability to rotate certificates +in place without affecting existing client configurations. [[GH-15277](https://github.com/hashicorp/vault/pull/15277)] +* **Print minimum required policy for any command**: The global CLI flag `-output-policy` can now be used with any command to print out the minimum required policy HCL for that operation, including whether the given path requires the "sudo" capability. [[GH-14899](https://github.com/hashicorp/vault/pull/14899)] +* **Snowflake Database Plugin**: Adds ability to manage RSA key pair credentials for dynamic and static Snowflake users. [[GH-15376](https://github.com/hashicorp/vault/pull/15376)] +* **Transit BYOK**: Allow import of externally-generated keys into the Transit secrets engine. [[GH-15414](https://github.com/hashicorp/vault/pull/15414)] +* nomad: Bootstrap Nomad ACL system if no token is provided [[GH-12451](https://github.com/hashicorp/vault/pull/12451)] +* storage/dynamodb: Added `AWS_DYNAMODB_REGION` environment variable. [[GH-15054](https://github.com/hashicorp/vault/pull/15054)] + +IMPROVEMENTS: + +* activity: return nil response months in activity log API when no month data exists [[GH-15420](https://github.com/hashicorp/vault/pull/15420)] +* agent/auto-auth: Add `min_backoff` to the method stanza for configuring initial backoff duration. [[GH-15204](https://github.com/hashicorp/vault/pull/15204)] +* agent: Update consul-template to v0.29.0 [[GH-15293](https://github.com/hashicorp/vault/pull/15293)] +* agent: Upgrade hashicorp/consul-template version for sprig template functions and improved writeTo function [[GH-15092](https://github.com/hashicorp/vault/pull/15092)] +* api/monitor: Add log_format option to allow for logs to be emitted in JSON format [[GH-15536](https://github.com/hashicorp/vault/pull/15536)] +* api: Add ability to pass certificate as PEM bytes to api.Client. [[GH-14753](https://github.com/hashicorp/vault/pull/14753)] +* api: Add context-aware functions to vault/api for each API wrapper function. [[GH-14388](https://github.com/hashicorp/vault/pull/14388)] +* api: Added MFALogin() for handling MFA flow when using login helpers. [[GH-14900](https://github.com/hashicorp/vault/pull/14900)] +* api: If the parameters supplied over the API payload are ignored due to not +being what the endpoints were expecting, or if the parameters supplied get +replaced by the values in the endpoint's path itself, warnings will be added to +the non-empty responses listing all the ignored and replaced parameters. [[GH-14962](https://github.com/hashicorp/vault/pull/14962)] +* api: KV helper methods to simplify the common use case of reading and writing KV secrets [[GH-15305](https://github.com/hashicorp/vault/pull/15305)] +* api: Provide a helper method WithNamespace to create a cloned client with a new NS [[GH-14963](https://github.com/hashicorp/vault/pull/14963)] +* api: Support VAULT_PROXY_ADDR environment variable to allow overriding the Vault client's HTTP proxy. [[GH-15377](https://github.com/hashicorp/vault/pull/15377)] +* api: Use the context passed to the api/auth Login helpers. [[GH-14775](https://github.com/hashicorp/vault/pull/14775)] +* api: make ListPlugins parse only known plugin types [[GH-15434](https://github.com/hashicorp/vault/pull/15434)] +* audit: Add a policy_results block into the audit log that contains the set of +policies that granted this request access. [[GH-15457](https://github.com/hashicorp/vault/pull/15457)] +* audit: Include mount_accessor in audit request and response logs [[GH-15342](https://github.com/hashicorp/vault/pull/15342)] +* audit: added entity_created boolean to audit log, set when login operations create an entity [[GH-15487](https://github.com/hashicorp/vault/pull/15487)] +* auth/aws: Add rsa2048 signature type to API [[GH-15719](https://github.com/hashicorp/vault/pull/15719)] +* auth/gcp: Enable the Google service endpoints used by the underlying client to be customized [[GH-15592](https://github.com/hashicorp/vault/pull/15592)] +* auth/gcp: Vault CLI now infers the service account email when running on Google Cloud [[GH-15592](https://github.com/hashicorp/vault/pull/15592)] +* auth/jwt: Adds ability to use JSON pointer syntax for the `user_claim` value. [[GH-15593](https://github.com/hashicorp/vault/pull/15593)] +* auth/okta: Add support for Google provider TOTP type in the Okta auth method [[GH-14985](https://github.com/hashicorp/vault/pull/14985)] +* auth/okta: Add support for performing [the number +challenge](https://help.okta.com/en-us/Content/Topics/Mobile/ov-admin-config.htm?cshid=csh-okta-verify-number-challenge-v1#enable-number-challenge) +during an Okta Verify push challenge [[GH-15361](https://github.com/hashicorp/vault/pull/15361)] +* auth: Globally scoped Login MFA method Get/List endpoints [[GH-15248](https://github.com/hashicorp/vault/pull/15248)] +* auth: enforce a rate limit for TOTP passcode validation attempts [[GH-14864](https://github.com/hashicorp/vault/pull/14864)] +* auth: forward cached MFA auth response to the leader using RPC instead of forwarding all login requests [[GH-15469](https://github.com/hashicorp/vault/pull/15469)] +* cli/debug: added support for retrieving metrics from DR clusters if `unauthenticated_metrics_access` is enabled [[GH-15316](https://github.com/hashicorp/vault/pull/15316)] +* cli/vault: warn when policy name contains upper-case letter [[GH-14670](https://github.com/hashicorp/vault/pull/14670)] +* cli: Alternative flag-based syntax for KV to mitigate confusion from automatically appended /data [[GH-14807](https://github.com/hashicorp/vault/pull/14807)] +* cockroachdb: add high-availability support [[GH-12965](https://github.com/hashicorp/vault/pull/12965)] +* command/debug: Add log_format flag to allow for logs to be emitted in JSON format [[GH-15536](https://github.com/hashicorp/vault/pull/15536)] +* command: Support optional '-log-level' flag to be passed to 'operator migrate' command (defaults to info). Also support VAULT_LOG_LEVEL env var. [[GH-15405](https://github.com/hashicorp/vault/pull/15405)] +* command: Support the optional '-detailed' flag to be passed to 'vault list' command to show ListResponseWithInfo data. Also supports the VAULT_DETAILED env var. [[GH-15417](https://github.com/hashicorp/vault/pull/15417)] +* core (enterprise): Include `termination_time` in `sys/license/status` response +* core (enterprise): Include termination time in `license inspect` command output +* core,transit: Allow callers to choose random byte source including entropy augmentation sources for the sys/tools/random and transit/random endpoints. [[GH-15213](https://github.com/hashicorp/vault/pull/15213)] +* core/activity: Order month data in ascending order of timestamps [[GH-15259](https://github.com/hashicorp/vault/pull/15259)] +* core/activity: allow client counts to be precomputed and queried on non-contiguous chunks of data [[GH-15352](https://github.com/hashicorp/vault/pull/15352)] +* core/managed-keys (enterprise): Allow configuring the number of parallel operations to PKCS#11 managed keys. +* core: Add an export API for historical activity log data [[GH-15586](https://github.com/hashicorp/vault/pull/15586)] +* core: Add new DB methods that do not prepare statements. [[GH-15166](https://github.com/hashicorp/vault/pull/15166)] +* core: check uid and permissions of config dir, config file, plugin dir and plugin binaries [[GH-14817](https://github.com/hashicorp/vault/pull/14817)] +* core: Fix some identity data races found by Go race detector (no known impact yet). [[GH-15123](https://github.com/hashicorp/vault/pull/15123)] +* core: Include build date in `sys/seal-status` and `sys/version-history` endpoints. [[GH-14957](https://github.com/hashicorp/vault/pull/14957)] +* core: Upgrade github.org/x/crypto/ssh [[GH-15125](https://github.com/hashicorp/vault/pull/15125)] +* kmip (enterprise): Implement operations Query, Import, Encrypt and Decrypt. Improve operations Locate, Add Attribute, Get Attributes and Get Attribute List to handle most supported attributes. +* mfa/okta: migrate to use official Okta SDK [[GH-15355](https://github.com/hashicorp/vault/pull/15355)] +* sdk: Change OpenAPI code generator to extract request objects into /components/schemas and reference them by name. [[GH-14217](https://github.com/hashicorp/vault/pull/14217)] +* secrets/consul: Add support for Consul node-identities and service-identities [[GH-15295](https://github.com/hashicorp/vault/pull/15295)] +* secrets/consul: Vault is now able to automatically bootstrap the Consul ACL system. [[GH-10751](https://github.com/hashicorp/vault/pull/10751)] +* secrets/database/elasticsearch: Use the new /_security base API path instead of /_xpack/security when managing elasticsearch. [[GH-15614](https://github.com/hashicorp/vault/pull/15614)] +* secrets/pki: Add not_before_duration to root CA generation, intermediate CA signing paths. [[GH-14178](https://github.com/hashicorp/vault/pull/14178)] +* secrets/pki: Add support for CPS URLs and User Notice to Policy Information [[GH-15751](https://github.com/hashicorp/vault/pull/15751)] +* secrets/pki: Allow operators to control the issuing certificate behavior when +the requested TTL is beyond the NotAfter value of the signing certificate [[GH-15152](https://github.com/hashicorp/vault/pull/15152)] +* secrets/pki: Always return CRLs, URLs configurations, even if using the default value. [[GH-15470](https://github.com/hashicorp/vault/pull/15470)] +* secrets/pki: Enable Patch Functionality for Roles and Issuers (API only) [[GH-15510](https://github.com/hashicorp/vault/pull/15510)] +* secrets/pki: Have pki/sign-verbatim use the not_before_duration field defined in the role [[GH-15429](https://github.com/hashicorp/vault/pull/15429)] +* secrets/pki: Warn on empty Subject field during issuer generation (root/generate and root/sign-intermediate). [[GH-15494](https://github.com/hashicorp/vault/pull/15494)] +* secrets/pki: Warn on missing AIA access information when generating issuers (config/urls). [[GH-15509](https://github.com/hashicorp/vault/pull/15509)] +* secrets/pki: Warn when `generate_lease` and `no_store` are both set to `true` on requests. [[GH-14292](https://github.com/hashicorp/vault/pull/14292)] +* secrets/ssh: Add connection timeout of 1 minute for outbound SSH connection in deprecated Dynamic SSH Keys mode. [[GH-15440](https://github.com/hashicorp/vault/pull/15440)] +* secrets/ssh: Support for `add_before_duration` in SSH [[GH-15250](https://github.com/hashicorp/vault/pull/15250)] +* sentinel (enterprise): Upgrade sentinel to [v0.18.5](https://docs.hashicorp.com/sentinel/changelog#0-18-5-january-14-2022) to avoid potential naming collisions in the remote installer +* storage/raft: Use larger timeouts at startup to reduce likelihood of inducing elections. [[GH-15042](https://github.com/hashicorp/vault/pull/15042)] +* ui: Allow namespace param to be parsed from state queryParam [[GH-15378](https://github.com/hashicorp/vault/pull/15378)] +* ui: Default auto-rotation period in transit is 30 days [[GH-15474](https://github.com/hashicorp/vault/pull/15474)] +* ui: Parse schema refs from OpenAPI [[GH-14508](https://github.com/hashicorp/vault/pull/14508)] +* ui: Remove stored license references [[GH-15513](https://github.com/hashicorp/vault/pull/15513)] +* ui: Remove storybook. [[GH-15074](https://github.com/hashicorp/vault/pull/15074)] +* ui: Replaces the IvyCodemirror wrapper with a custom ember modifier. [[GH-14659](https://github.com/hashicorp/vault/pull/14659)] +* website/docs: Add usage documentation for Kubernetes Secrets Engine [[GH-15527](https://github.com/hashicorp/vault/pull/15527)] +* website/docs: added a link to an Enigma secret plugin. [[GH-14389](https://github.com/hashicorp/vault/pull/14389)] + +DEPRECATIONS: + +* docs: Document removal of X.509 certificates with signatures who use SHA-1 in Vault 1.12 [[GH-15581](https://github.com/hashicorp/vault/pull/15581)] +* secrets/consul: Deprecate old parameters "token_type" and "policy" [[GH-15550](https://github.com/hashicorp/vault/pull/15550)] +* secrets/consul: Deprecate parameter "policies" in favor of "consul_policies" for consistency [[GH-15400](https://github.com/hashicorp/vault/pull/15400)] + +BUG FIXES: + +* Fixed panic when adding or modifying a Duo MFA Method in Enterprise +* agent: Fix log level mismatch between ERR and ERROR [[GH-14424](https://github.com/hashicorp/vault/pull/14424)] +* agent: Redact auto auth token from renew endpoints [[GH-15380](https://github.com/hashicorp/vault/pull/15380)] +* api/sys/raft: Update RaftSnapshotRestore to use net/http client allowing bodies larger than allocated memory to be streamed [[GH-14269](https://github.com/hashicorp/vault/pull/14269)] +* api: Fixes bug where OutputCurlString field was unintentionally being copied over during client cloning [[GH-14968](https://github.com/hashicorp/vault/pull/14968)] +* api: Respect increment value in grace period calculations in LifetimeWatcher [[GH-14836](https://github.com/hashicorp/vault/pull/14836)] +* auth/approle: Add maximum length for input values that result in SHA56 HMAC calculation [[GH-14746](https://github.com/hashicorp/vault/pull/14746)] +* auth/kubernetes: Fix error code when using the wrong service account [[GH-15584](https://github.com/hashicorp/vault/pull/15584)] +* auth/ldap: The logic for setting the entity alias when `username_as_alias` is set +has been fixed. The previous behavior would make a request to the LDAP server to +get `user_attr` before discarding it and using the username instead. This would +make it impossible for a user to connect if this attribute was missing or had +multiple values, even though it would not be used anyway. This has been fixed +and the username is now used without making superfluous LDAP searches. [[GH-15525](https://github.com/hashicorp/vault/pull/15525)] +* auth: Fixed erroneous success message when using vault login in case of two-phase MFA [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] +* auth: Fixed erroneous token information being displayed when using vault login in case of two-phase MFA [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] +* auth: Fixed two-phase MFA information missing from table format when using vault login [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] +* auth: Prevent deleting a valid MFA method ID using the endpoint for a different MFA method type [[GH-15482](https://github.com/hashicorp/vault/pull/15482)] +* auth: forward requests subject to login MFA from perfStandby to Active node [[GH-15009](https://github.com/hashicorp/vault/pull/15009)] +* auth: load login MFA configuration upon restart [[GH-15261](https://github.com/hashicorp/vault/pull/15261)] +* cassandra: Update gocql Cassandra client to fix "no hosts available in the pool" error [[GH-14973](https://github.com/hashicorp/vault/pull/14973)] +* cli: Fix panic caused by parsing key=value fields whose value is a single backslash [[GH-14523](https://github.com/hashicorp/vault/pull/14523)] +* cli: kv get command now honors trailing spaces to retrieve secrets [[GH-15188](https://github.com/hashicorp/vault/pull/15188)] +* command: do not report listener and storage types as key not found warnings [[GH-15383](https://github.com/hashicorp/vault/pull/15383)] +* core (enterprise): Allow local alias create RPCs to persist alias metadata +* core (enterprise): Fix overcounting of lease count quota usage at startup. +* core (enterprise): Fix some races in merkle index flushing code found in testing +* core (enterprise): Handle additional edge cases reinitializing PKCS#11 libraries after login errors. +* core/config: Only ask the system about network interfaces when address configs contain a template having the format: {{ ... }} [[GH-15224](https://github.com/hashicorp/vault/pull/15224)] +* core/managed-keys (enterprise): Allow PKCS#11 managed keys to use 0 as a slot number +* core/metrics: Fix incorrect table size metric for local mounts [[GH-14755](https://github.com/hashicorp/vault/pull/14755)] +* core: Fix double counting for "route" metrics [[GH-12763](https://github.com/hashicorp/vault/pull/12763)] +* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited integers [[GH-15072](https://github.com/hashicorp/vault/pull/15072)] +* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited strings [[GH-14522](https://github.com/hashicorp/vault/pull/14522)] +* core: Fix panic caused by parsing policies with empty slice values. [[GH-14501](https://github.com/hashicorp/vault/pull/14501)] +* core: Fix panic for help request URL paths without /v1/ prefix [[GH-14704](https://github.com/hashicorp/vault/pull/14704)] +* core: Limit SSCT WAL checks on perf standbys to raft backends only [[GH-15879](https://github.com/hashicorp/vault/pull/15879)] +* core: Prevent changing file permissions of audit logs when mode 0000 is used. [[GH-15759](https://github.com/hashicorp/vault/pull/15759)] +* core: Prevent metrics generation from causing deadlocks. [[GH-15693](https://github.com/hashicorp/vault/pull/15693)] +* core: fixed systemd reloading notification [[GH-15041](https://github.com/hashicorp/vault/pull/15041)] +* core: fixing excessive unix file permissions [[GH-14791](https://github.com/hashicorp/vault/pull/14791)] +* core: fixing excessive unix file permissions on dir, files and archive created by vault debug command [[GH-14846](https://github.com/hashicorp/vault/pull/14846)] +* core: pre-calculate namespace specific paths when tainting a route during postUnseal [[GH-15067](https://github.com/hashicorp/vault/pull/15067)] +* core: renaming the environment variable VAULT_DISABLE_FILE_PERMISSIONS_CHECK to VAULT_ENABLE_FILE_PERMISSIONS_CHECK and adjusting the logic [[GH-15452](https://github.com/hashicorp/vault/pull/15452)] +* core: report unused or redundant keys in server configuration [[GH-14752](https://github.com/hashicorp/vault/pull/14752)] +* core: time.After() used in a select statement can lead to memory leak [[GH-14814](https://github.com/hashicorp/vault/pull/14814)] +* identity: deduplicate policies when creating/updating identity groups [[GH-15055](https://github.com/hashicorp/vault/pull/15055)] +* mfa/okta: disable client side rate limiting causing delays in push notifications [[GH-15369](https://github.com/hashicorp/vault/pull/15369)] +* plugin: Fix a bug where plugin reload would falsely report success in certain scenarios. [[GH-15579](https://github.com/hashicorp/vault/pull/15579)] +* raft: fix Raft TLS key rotation panic that occurs if active key is more than 24 hours old [[GH-15156](https://github.com/hashicorp/vault/pull/15156)] +* raft: Ensure initialMmapSize is set to 0 on Windows [[GH-14977](https://github.com/hashicorp/vault/pull/14977)] +* replication (enterprise): fix panic due to missing entity during invalidation of local aliases. [[GH-14622](https://github.com/hashicorp/vault/pull/14622)] +* sdk/cidrutil: Only check if cidr contains remote address for IP addresses [[GH-14487](https://github.com/hashicorp/vault/pull/14487)] +* sdk: Fix OpenApi spec generator to properly convert TypeInt64 to OAS supported int64 [[GH-15104](https://github.com/hashicorp/vault/pull/15104)] +* sdk: Fix OpenApi spec generator to remove duplicate sha_256 parameter [[GH-15163](https://github.com/hashicorp/vault/pull/15163)] +* secrets/database: Ensure that a `connection_url` password is redacted in all cases. [[GH-14744](https://github.com/hashicorp/vault/pull/14744)] +* secrets/kv: Fix issue preventing the ability to reset the `delete_version_after` key metadata field to 0s via HTTP `PATCH`. [[GH-15792](https://github.com/hashicorp/vault/pull/15792)] +* secrets/pki: CRLs on performance secondary clusters are now automatically +rebuilt upon changes to the list of issuers. [[GH-15179](https://github.com/hashicorp/vault/pull/15179)] +* secrets/pki: Fix handling of "any" key type with default zero signature bits value. [[GH-14875](https://github.com/hashicorp/vault/pull/14875)] +* secrets/pki: Fixed bug where larger SHA-2 hashes were truncated with shorter ECDSA CA certificates [[GH-14943](https://github.com/hashicorp/vault/pull/14943)] +* secrets/ssh: Convert role field not_before_duration to seconds before returning it [[GH-15559](https://github.com/hashicorp/vault/pull/15559)] +* storage/raft (enterprise): Auto-snapshot configuration now forbids slashes in file prefixes for all types, and "/" in path prefix for local storage type. Strip leading prefix in path prefix for AWS. Improve error handling/reporting. +* storage/raft: Forward autopilot state requests on perf standbys to active node. [[GH-15493](https://github.com/hashicorp/vault/pull/15493)] +* storage/raft: joining a node to a cluster now ignores any VAULT_NAMESPACE environment variable set on the server process [[GH-15519](https://github.com/hashicorp/vault/pull/15519)] +* ui: Fix Generated Token's Policies helpText to clarify that comma separated values are not accepted in this field. [[GH-15046](https://github.com/hashicorp/vault/pull/15046)] +* ui: Fix KV secret showing in the edit form after a user creates a new version but doesn't have read capabilities [[GH-14794](https://github.com/hashicorp/vault/pull/14794)] +* ui: Fix inconsistent behavior in client count calendar widget [[GH-15789](https://github.com/hashicorp/vault/pull/15789)] +* ui: Fix issue where metadata tab is hidden even though policy grants access [[GH-15824](https://github.com/hashicorp/vault/pull/15824)] +* ui: Fix issue with KV not recomputing model when you changed versions. [[GH-14941](https://github.com/hashicorp/vault/pull/14941)] +* ui: Fixed client count timezone for start and end months [[GH-15167](https://github.com/hashicorp/vault/pull/15167)] +* ui: Fixed unsupported revocation statements field for DB roles [[GH-15573](https://github.com/hashicorp/vault/pull/15573)] +* ui: Fixes edit auth method capabilities issue [[GH-14966](https://github.com/hashicorp/vault/pull/14966)] +* ui: Fixes issue logging in with OIDC from a listed auth mounts tab [[GH-14916](https://github.com/hashicorp/vault/pull/14916)] +* ui: Revert using localStorage in favor of sessionStorage [[GH-15769](https://github.com/hashicorp/vault/pull/15769)] +* ui: Updated `leasId` to `leaseId` in the "Copy Credentials" section of "Generate AWS Credentials" [[GH-15685](https://github.com/hashicorp/vault/pull/15685)] +* ui: fix firefox inability to recognize file format of client count csv export [[GH-15364](https://github.com/hashicorp/vault/pull/15364)] +* ui: fix form validations ignoring default values and disabling submit button [[GH-15560](https://github.com/hashicorp/vault/pull/15560)] +* ui: fix search-select component showing blank selections when editing group member entity [[GH-15058](https://github.com/hashicorp/vault/pull/15058)] +* ui: masked values no longer give away length or location of special characters [[GH-15025](https://github.com/hashicorp/vault/pull/15025)] + +## 1.10.11 +### March 01, 2023 + +SECURITY: + +* auth/approle: When using the Vault and Vault Enterprise (Vault) approle auth method, any authenticated user with access to the /auth/approle/role/:role_name/secret-id-accessor/destroy endpoint can destroy the secret ID of any other role by providing the secret ID accessor. This vulnerability, CVE-2023-24999 has been fixed in Vault 1.13.0, 1.12.4, 1.11.8, 1.10.11 and above. [[HSEC-2023-07](https://discuss.hashicorp.com/t/hcsec-2023-07-vault-fails-to-verify-if-approle-secretid-belongs-to-role-during-a-destroy-operation/51305)] + +CHANGES: + +* core: Bump Go version to 1.19.6. + +IMPROVEMENTS: + +* secrets/database: Adds error message requiring password on root crednetial rotation. [[GH-19103](https://github.com/hashicorp/vault/pull/19103)] + +BUG FIXES: + +* auth/approle: Add nil check for the secret ID entry when deleting via secret id accessor preventing cross role secret id deletion [[GH-19186](https://github.com/hashicorp/vault/pull/19186)] +* core (enterprise): Fix panic when using invalid accessor for control-group request +* core: Prevent panics in `sys/leases/lookup`, `sys/leases/revoke`, and `sys/leases/renew` endpoints if provided `lease_id` is null [[GH-18951](https://github.com/hashicorp/vault/pull/18951)] +* replication (enterprise): Fix bug where reloading external plugin on a secondary would +break replication. +* secrets/ad: Fix bug where config couldn't be updated unless binddn/bindpass were included in the update. [[GH-18209](https://github.com/hashicorp/vault/pull/18209)] +* ui (enterprise): Fix cancel button from transform engine role creation page [[GH-19135](https://github.com/hashicorp/vault/pull/19135)] +* ui: Fix bug where logging in via OIDC fails if browser is in fullscreen mode [[GH-19071](https://github.com/hashicorp/vault/pull/19071)] + +## 1.10.10 +### February 6, 2023 + +CHANGES: + +* core: Bump Go version to 1.19.4. + +IMPROVEMENTS: + +* command/server: Environment variable keys are now logged at startup. [[GH-18125](https://github.com/hashicorp/vault/pull/18125)] +* core/fips: use upstream toolchain for FIPS 140-2 compliance again; this will appear as X=boringcrypto on the Go version in Vault server logs. +* secrets/db/mysql: Add `tls_server_name` and `tls_skip_verify` parameters [[GH-18799](https://github.com/hashicorp/vault/pull/18799)] +* ui: Prepends "passcode=" if not provided in user input for duo totp mfa method authentication [[GH-18342](https://github.com/hashicorp/vault/pull/18342)] +* ui: Update language on database role to "Connection name" [[GH-18261](https://github.com/hashicorp/vault/issues/18261)] [[GH-18350](https://github.com/hashicorp/vault/pull/18350)] + +BUG FIXES: + +* auth/approle: Fix `token_bound_cidrs` validation when using /32 blocks for role and secret ID [[GH-18145](https://github.com/hashicorp/vault/pull/18145)] +* auth/token: Fix ignored parameter warnings for valid parameters on token create [[GH-16938](https://github.com/hashicorp/vault/pull/16938)] +* cli/kv: skip formatting of nil secrets for patch and put with field parameter set [[GH-18163](https://github.com/hashicorp/vault/pull/18163)] +* core (enterprise): Fix a race condition resulting in login errors to PKCS#11 modules under high concurrency. +* core/managed-keys (enterprise): Limit verification checks to mounts in a key's namespace +* core/quotas (enterprise): Fix a potential deadlock that could occur when using lease count quotas. +* core/quotas: Fix issue with improper application of default rate limit quota exempt paths [[GH-18273](https://github.com/hashicorp/vault/pull/18273)] +* core: fix bug where context cancellations weren't forwarded to active node from performance standbys. +* core: prevent panic in login mfa enforcement delete after enforcement's namespace is deleted [[GH-18923](https://github.com/hashicorp/vault/pull/18923)] +* database/mongodb: Fix writeConcern set to be applied to any query made on the database [[GH-18546](https://github.com/hashicorp/vault/pull/18546)] +* identity (enterprise): Fix a data race when creating an entity for a local alias. +* kmip (enterprise): Fix Destroy operation response that omitted Unique Identifier on some batched responses. +* kmip (enterprise): Fix Locate operation response incompatibility with clients using KMIP versions prior to 1.3. +* licensing (enterprise): update autoloaded license cache after reload +* storage/raft (enterprise): Fix some storage-modifying RPCs used by perf standbys that weren't returning the resulting WAL state. +* ui: fixes query parameters not passed in api explorer test requests [[GH-18743](https://github.com/hashicorp/vault/pull/18743)] + +## 1.10.9 +### November 30, 2022 + +BUG FIXES: + +* auth: Deduplicate policies prior to ACL generation [[GH-17914](https://github.com/hashicorp/vault/pull/17914)] +* core/quotas (enterprise): Fix a lock contention issue that could occur and cause Vault to become unresponsive when creating, changing, or deleting lease count quotas. +* core: Fix potential deadlock if barrier ciphertext is less than 4 bytes. [[GH-17944](https://github.com/hashicorp/vault/pull/17944)] +* core: fix a start up race condition where performance standbys could go into a + mount loop if default policies are not yet synced from the active node. [[GH-17801](https://github.com/hashicorp/vault/pull/17801)] +* secrets/azure: add WAL to clean up role assignments if errors occur [[GH-18084](https://github.com/hashicorp/vault/pull/18084)] +* secrets/gcp: Fixes duplicate service account key for rotate root on standby or secondary [[GH-18109](https://github.com/hashicorp/vault/pull/18109)] +* ui: fix entity policies list link to policy show page [[GH-17950](https://github.com/hashicorp/vault/pull/17950)] + +## 1.10.8 +### November 2, 2022 + +BUG FIXES: + +* core/managed-keys (enterprise): Return better error messages when encountering key creation failures +* core/managed-keys (enterprise): fix panic when having `cache_disable` true +* core: prevent memory leak when using control group factors in a policy [[GH-17532](https://github.com/hashicorp/vault/pull/17532)] +* core: prevent panic during mfa after enforcement's namespace is deleted [[GH-17562](https://github.com/hashicorp/vault/pull/17562)] +* login: Store token in tokenhelper for interactive login MFA [[GH-17040](https://github.com/hashicorp/vault/pull/17040)] +* secrets/pki: Do not ignore provided signature bits value when signing intermediate and leaf certificates with a managed key [[GH-17328](https://github.com/hashicorp/vault/pull/17328)] +* secrets/pki: Respond to tidy-status, tidy-cancel on PR Secondary clusters. [[GH-17497](https://github.com/hashicorp/vault/pull/17497)] +* ui: Fixes oidc/jwt login issue with alternate mount path and jwt login via mount path tab [[GH-17661](https://github.com/hashicorp/vault/pull/17661)] + +## 1.10.7 +### September 30, 2022 + +SECURITY: + +* secrets/pki: Vault’s TLS certificate auth method did not initially load the optionally-configured CRL issued by the role’s CA into memory on startup, resulting in the revocation list not being checked, if the CRL has not yet been retrieved. This vulnerability, CVE-2022-41316, is fixed in Vault 1.12.0, 1.11.4, 1.10.7, and 1.9.10. [[HSEC-2022-24](https://discuss.hashicorp.com/t/hcsec-2022-24-vaults-tls-cert-auth-method-only-loaded-crl-after-first-request/45483)] + +BUG FIXES: + +* auth/cert: Vault does not initially load the CRLs in cert auth unless the read/write CRL endpoint is hit. [[GH-17138](https://github.com/hashicorp/vault/pull/17138)] +* core/quotas: Fix goroutine leak caused by the seal process not fully cleaning up Rate Limit Quotas. [[GH-17281](https://github.com/hashicorp/vault/pull/17281)] +* core: Prevent two or more DR failovers from invalidating SSCT tokens generated on the previous primaries. [[GH-16956](https://github.com/hashicorp/vault/pull/16956)] +* identity/oidc: Adds `claims_supported` to discovery document. [[GH-16992](https://github.com/hashicorp/vault/pull/16992)] +* replication (enterprise): Fix data race in SaveCheckpoint() +* secrets/transform (enterprise): Fix an issue loading tokenization transform configuration after a specific sequence of reconfigurations. +* secrets/transform (enterprise): Fix persistence problem with tokenization store credentials. +* ui: Fix lease force revoke action [[GH-16930](https://github.com/hashicorp/vault/pull/16930)] + +## 1.10.6 +### August 31, 2022 + +SECURITY: + +* core: When entity aliases mapped to a single entity share the same alias name, but have different mount accessors, Vault can leak metadata between the aliases. This metadata leak may result in unexpected access if templated policies are using alias metadata for path names. This vulnerability, CVE-2022-40186, is fixed in 1.11.3, 1.10.6, and 1.9.9. [[HSEC-2022-18](https://discuss.hashicorp.com/t/hcsec-2022-18-vault-entity-alias-metadata-may-leak-between-aliases-with-the-same-name-assigned-to-the-same-entity/44550)] + +CHANGES: + +* core: Bump Go version to 1.17.13. + +IMPROVEMENTS: + +* identity/oidc: Adds the `client_secret_post` token endpoint authentication method. [[GH-16598](https://github.com/hashicorp/vault/pull/16598)] + +BUG FIXES: + +* auth/gcp: Fixes the ability to reset the configuration's credentials to use application default credentials. [[GH-16524](https://github.com/hashicorp/vault/pull/16524)] +* command/debug: fix bug where monitor was not honoring configured duration [[GH-16834](https://github.com/hashicorp/vault/pull/16834)] +* core/auth: Return a 403 instead of a 500 for a malformed SSCT [[GH-16112](https://github.com/hashicorp/vault/pull/16112)] +* core: Increase the allowed concurrent gRPC streams over the cluster port. [[GH-16327](https://github.com/hashicorp/vault/pull/16327)] +* database: Invalidate queue should cancel context first to avoid deadlock [[GH-15933](https://github.com/hashicorp/vault/pull/15933)] +* identity/oidc: Change the `state` parameter of the Authorization Endpoint to optional. [[GH-16599](https://github.com/hashicorp/vault/pull/16599)] +* identity/oidc: Detect invalid `redirect_uri` values sooner in validation of the +Authorization Endpoint. [[GH-16601](https://github.com/hashicorp/vault/pull/16601)] +* identity/oidc: Fixes validation of the `request` and `request_uri` parameters. [[GH-16600](https://github.com/hashicorp/vault/pull/16600)] +* secrets/database: Fix a bug where the secret engine would queue up a lot of WAL deletes during startup. [[GH-16686](https://github.com/hashicorp/vault/pull/16686)] +* secrets/gcp: Fixes duplicate static account key creation from performance secondary clusters. [[GH-16534](https://github.com/hashicorp/vault/pull/16534)] +* storage/raft: Fix retry_join initialization failure [[GH-16550](https://github.com/hashicorp/vault/pull/16550)] +* ui: Fix OIDC callback to accept namespace flag in different formats [[GH-16886](https://github.com/hashicorp/vault/pull/16886)] +* ui: Fix issue logging in with JWT auth method [[GH-16466](https://github.com/hashicorp/vault/pull/16466)] +* ui: Fix naming of permitted_dns_domains form parameter on CA creation (root generation and sign intermediate). [[GH-16739](https://github.com/hashicorp/vault/pull/16739)] + +SECURITY: + +* identity/entity: When entity aliases mapped to a single entity share the same alias name, but have different mount accessors, Vault can leak metadata between the aliases. This metadata leak may result in unexpected access if templated policies are using alias metadata for path names. [[HCSEC-2022-18](https://discuss.hashicorp.com/t/hcsec-2022-18-vault-entity-alias-metadata-may-leak-between-aliases-with-the-same-name-assigned-to-the-same-entity/44550)] + +## 1.10.5 +### July 21, 2022 + +SECURITY: + +* storage/raft: Vault Enterprise (“Vault”) clusters using Integrated Storage expose an unauthenticated API endpoint that could be abused to override the voter status of a node within a Vault HA cluster, introducing potential for future data loss or catastrophic failure. This vulnerability, CVE-2022-36129, was fixed in Vault 1.9.8, 1.10.5, and 1.11.1. [[HSEC-2022-15](https://discuss.hashicorp.com/t/hcsec-2022-15-vault-enterprise-does-not-verify-existing-voter-status-when-joining-an-integrated-storage-ha-node/42420)] + +CHANGES: + +* core/fips: Disable and warn about entropy augmentation in FIPS 140-2 Inside mode [[GH-15858](https://github.com/hashicorp/vault/pull/15858)] +* core: Bump Go version to 1.17.12. + +IMPROVEMENTS: + +* core: Add `sys/loggers` and `sys/loggers/:name` endpoints to provide ability to modify logging verbosity [[GH-16111](https://github.com/hashicorp/vault/pull/16111)] +* secrets/ssh: Allow additional text along with a template definition in defaultExtension value fields. [[GH-16018](https://github.com/hashicorp/vault/pull/16018)] + +BUG FIXES: + +* agent/template: Fix parsing error for the exec stanza [[GH-16231](https://github.com/hashicorp/vault/pull/16231)] +* core/identity: Replicate member_entity_ids and policies in identity/group across nodes identically [[GH-16088](https://github.com/hashicorp/vault/pull/16088)] +* core/replication (enterprise): Don't flush merkle tree pages to disk after losing active duty +* core/seal: Fix possible keyring truncation when using the file backend. [[GH-15946](https://github.com/hashicorp/vault/pull/15946)] +* core: Limit SSCT WAL checks on perf standbys to raft backends only [[GH-15879](https://github.com/hashicorp/vault/pull/15879)] +* plugin/multiplexing: Fix panic when id doesn't exist in connection map [[GH-16094](https://github.com/hashicorp/vault/pull/16094)] +* secret/pki: Do not fail validation with a legacy key_bits default value and key_type=any when signing CSRs [[GH-16246](https://github.com/hashicorp/vault/pull/16246)] +* storage/raft (enterprise): Prevent unauthenticated voter status with rejoin [[GH-16324](https://github.com/hashicorp/vault/pull/16324)] +* transform (enterprise): Fix a bug in the handling of nested or unmatched capture groups in FPE transformations. +* ui: Fix issue where metadata tab is hidden even though policy grants access [[GH-15824](https://github.com/hashicorp/vault/pull/15824)] +* ui: Revert using localStorage in favor of sessionStorage [[GH-16169](https://github.com/hashicorp/vault/pull/16169)] +* ui: Updated `leasId` to `leaseId` in the "Copy Credentials" section of "Generate AWS Credentials" [[GH-15685](https://github.com/hashicorp/vault/pull/15685)] + +## 1.10.4 +### June 10, 2022 + +CHANGES: + +* core: Bump Go version to 1.17.11. [[GH-go-ver-1104](https://github.com/hashicorp/vault/pull/go-ver-1104)] + +IMPROVEMENTS: + +* api/monitor: Add log_format option to allow for logs to be emitted in JSON format [[GH-15536](https://github.com/hashicorp/vault/pull/15536)] +* auth: Globally scoped Login MFA method Get/List endpoints [[GH-15248](https://github.com/hashicorp/vault/pull/15248)] +* auth: forward cached MFA auth response to the leader using RPC instead of forwarding all login requests [[GH-15469](https://github.com/hashicorp/vault/pull/15469)] +* cli/debug: added support for retrieving metrics from DR clusters if `unauthenticated_metrics_access` is enabled [[GH-15316](https://github.com/hashicorp/vault/pull/15316)] +* command/debug: Add log_format flag to allow for logs to be emitted in JSON format [[GH-15536](https://github.com/hashicorp/vault/pull/15536)] +* core: Fix some identity data races found by Go race detector (no known impact yet). [[GH-15123](https://github.com/hashicorp/vault/pull/15123)] +* storage/raft: Use larger timeouts at startup to reduce likelihood of inducing elections. [[GH-15042](https://github.com/hashicorp/vault/pull/15042)] +* ui: Allow namespace param to be parsed from state queryParam [[GH-15378](https://github.com/hashicorp/vault/pull/15378)] + +BUG FIXES: + +* agent: Redact auto auth token from renew endpoints [[GH-15380](https://github.com/hashicorp/vault/pull/15380)] +* auth/kubernetes: Fix error code when using the wrong service account [[GH-15585](https://github.com/hashicorp/vault/pull/15585)] +* auth/ldap: The logic for setting the entity alias when `username_as_alias` is set +has been fixed. The previous behavior would make a request to the LDAP server to +get `user_attr` before discarding it and using the username instead. This would +make it impossible for a user to connect if this attribute was missing or had +multiple values, even though it would not be used anyway. This has been fixed +and the username is now used without making superfluous LDAP searches. [[GH-15525](https://github.com/hashicorp/vault/pull/15525)] +* auth: Fixed erroneous success message when using vault login in case of two-phase MFA [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] +* auth: Fixed erroneous token information being displayed when using vault login in case of two-phase MFA [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] +* auth: Fixed two-phase MFA information missing from table format when using vault login [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] +* auth: Prevent deleting a valid MFA method ID using the endpoint for a different MFA method type [[GH-15482](https://github.com/hashicorp/vault/pull/15482)] +* core (enterprise): Fix overcounting of lease count quota usage at startup. +* core: Prevent changing file permissions of audit logs when mode 0000 is used. [[GH-15759](https://github.com/hashicorp/vault/pull/15759)] +* core: Prevent metrics generation from causing deadlocks. [[GH-15693](https://github.com/hashicorp/vault/pull/15693)] +* core: fixed systemd reloading notification [[GH-15041](https://github.com/hashicorp/vault/pull/15041)] +* mfa/okta: disable client side rate limiting causing delays in push notifications [[GH-15369](https://github.com/hashicorp/vault/pull/15369)] +* storage/raft (enterprise): Auto-snapshot configuration now forbids slashes in file prefixes for all types, and "/" in path prefix for local storage type. Strip leading prefix in path prefix for AWS. Improve error handling/reporting. +* transform (enterprise): Fix non-overridable column default value causing tokenization tokens to expire prematurely when using the MySQL storage backend. +* ui: Fix inconsistent behavior in client count calendar widget [[GH-15789](https://github.com/hashicorp/vault/pull/15789)] +* ui: Fixed client count timezone for start and end months [[GH-15167](https://github.com/hashicorp/vault/pull/15167)] +* ui: fix firefox inability to recognize file format of client count csv export [[GH-15364](https://github.com/hashicorp/vault/pull/15364)] + +## 1.10.3 +### May 11, 2022 + +SECURITY: +* auth: A vulnerability was identified in Vault and Vault Enterprise (“Vault”) from 1.10.0 to 1.10.2 where MFA may not be enforced on user logins after a server restart. This vulnerability, CVE-2022-30689, was fixed in Vault 1.10.3. + +BUG FIXES: + +* auth: load login MFA configuration upon restart [[GH-15261](https://github.com/hashicorp/vault/pull/15261)] +* core/config: Only ask the system about network interfaces when address configs contain a template having the format: {{ ... }} [[GH-15224](https://github.com/hashicorp/vault/pull/15224)] +* core: pre-calculate namespace specific paths when tainting a route during postUnseal [[GH-15067](https://github.com/hashicorp/vault/pull/15067)] + +## 1.10.2 +### April 29, 2022 + +BUG FIXES: + +* raft: fix Raft TLS key rotation panic that occurs if active key is more than 24 hours old [[GH-15156](https://github.com/hashicorp/vault/pull/15156)] +* sdk: Fix OpenApi spec generator to properly convert TypeInt64 to OAS supported int64 [[GH-15104](https://github.com/hashicorp/vault/pull/15104)] + +## 1.10.1 +### April 22, 2022 + +CHANGES: + +* core: A request that fails path validation due to relative path check will now be responded to with a 400 rather than 500. [[GH-14328](https://github.com/hashicorp/vault/pull/14328)] +* core: Bump Go version to 1.17.9. [[GH-15044](https://github.com/hashicorp/vault/pull/15044)] + +IMPROVEMENTS: + +* agent: Upgrade hashicorp/consul-template version for sprig template functions and improved writeTo function [[GH-15092](https://github.com/hashicorp/vault/pull/15092)] +* auth: enforce a rate limit for TOTP passcode validation attempts [[GH-14864](https://github.com/hashicorp/vault/pull/14864)] +* cli/vault: warn when policy name contains upper-case letter [[GH-14670](https://github.com/hashicorp/vault/pull/14670)] +* cockroachdb: add high-availability support [[GH-12965](https://github.com/hashicorp/vault/pull/12965)] +* sentinel (enterprise): Upgrade sentinel to [v0.18.5](https://docs.hashicorp.com/sentinel/changelog#0-18-5-january-14-2022) to avoid potential naming collisions in the remote installer + +BUG FIXES: + +* Fixed panic when adding or modifying a Duo MFA Method in Enterprise +* agent: Fix log level mismatch between ERR and ERROR [[GH-14424](https://github.com/hashicorp/vault/pull/14424)] +* api/sys/raft: Update RaftSnapshotRestore to use net/http client allowing bodies larger than allocated memory to be streamed [[GH-14269](https://github.com/hashicorp/vault/pull/14269)] +* api: Respect increment value in grace period calculations in LifetimeWatcher [[GH-14836](https://github.com/hashicorp/vault/pull/14836)] +* auth/approle: Add maximum length for input values that result in SHA56 HMAC calculation [[GH-14746](https://github.com/hashicorp/vault/pull/14746)] +* auth: forward requests subject to login MFA from perfStandby to Active node [[GH-15009](https://github.com/hashicorp/vault/pull/15009)] +* cassandra: Update gocql Cassandra client to fix "no hosts available in the pool" error [[GH-14973](https://github.com/hashicorp/vault/pull/14973)] +* cli: Fix panic caused by parsing key=value fields whose value is a single backslash [[GH-14523](https://github.com/hashicorp/vault/pull/14523)] +* core (enterprise): Allow local alias create RPCs to persist alias metadata [[GH-changelog:_2747](https://github.com/hashicorp/vault/pull/changelog:_2747)] +* core/managed-keys (enterprise): Allow PKCS#11 managed keys to use 0 as a slot number +* core/metrics: Fix incorrect table size metric for local mounts [[GH-14755](https://github.com/hashicorp/vault/pull/14755)] +* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited integers [[GH-15072](https://github.com/hashicorp/vault/pull/15072)] +* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited strings [[GH-14522](https://github.com/hashicorp/vault/pull/14522)] +* core: Fix panic caused by parsing policies with empty slice values. [[GH-14501](https://github.com/hashicorp/vault/pull/14501)] +* core: Fix panic for help request URL paths without /v1/ prefix [[GH-14704](https://github.com/hashicorp/vault/pull/14704)] +* core: fixing excessive unix file permissions [[GH-14791](https://github.com/hashicorp/vault/pull/14791)] +* core: fixing excessive unix file permissions on dir, files and archive created by vault debug command [[GH-14846](https://github.com/hashicorp/vault/pull/14846)] +* core: report unused or redundant keys in server configuration [[GH-14752](https://github.com/hashicorp/vault/pull/14752)] +* core: time.After() used in a select statement can lead to memory leak [[GH-14814](https://github.com/hashicorp/vault/pull/14814)] +* raft: Ensure initialMmapSize is set to 0 on Windows [[GH-14977](https://github.com/hashicorp/vault/pull/14977)] +* replication (enterprise): fix panic due to missing entity during invalidation of local aliases. [[GH-14622](https://github.com/hashicorp/vault/pull/14622)] +* secrets/database: Ensure that a `connection_url` password is redacted in all cases. [[GH-14744](https://github.com/hashicorp/vault/pull/14744)] +* secrets/pki: Fix handling of "any" key type with default zero signature bits value. [[GH-14875](https://github.com/hashicorp/vault/pull/14875)] +* secrets/pki: Fixed bug where larger SHA-2 hashes were truncated with shorter ECDSA CA certificates [[GH-14943](https://github.com/hashicorp/vault/pull/14943)] +* ui: Fix Generated Token's Policies helpText to clarify that comma separated values are not excepted in this field. [[GH-15046](https://github.com/hashicorp/vault/pull/15046)] +* ui: Fixes edit auth method capabilities issue [[GH-14966](https://github.com/hashicorp/vault/pull/14966)] +* ui: Fixes issue logging in with OIDC from a listed auth mounts tab [[GH-14916](https://github.com/hashicorp/vault/pull/14916)] +* ui: fix search-select component showing blank selections when editing group member entity [[GH-15058](https://github.com/hashicorp/vault/pull/15058)] +* ui: masked values no longer give away length or location of special characters [[GH-15025](https://github.com/hashicorp/vault/pull/15025)] + +## 1.10.0 +### March 23, 2022 + +CHANGES: + +* core (enterprise): requests with newly generated tokens to perf standbys which are lagging behind the active node return http 412 instead of 400/403/50x. +* core: Changes the unit of `default_lease_ttl` and `max_lease_ttl` values returned by +the `/sys/config/state/sanitized` endpoint from nanoseconds to seconds. [[GH-14206](https://github.com/hashicorp/vault/pull/14206)] +* core: Bump Go version to 1.17.7. [[GH-14232](https://github.com/hashicorp/vault/pull/14232)] +* plugin/database: The return value from `POST /database/config/:name` has been updated to "204 No Content" [[GH-14033](https://github.com/hashicorp/vault/pull/14033)] +* secrets/azure: Changes the configuration parameter `use_microsoft_graph_api` to use the Microsoft +Graph API by default. [[GH-14130](https://github.com/hashicorp/vault/pull/14130)] +* storage/etcd: Remove support for v2. [[GH-14193](https://github.com/hashicorp/vault/pull/14193)] +* ui: Upgrade Ember to version 3.24 [[GH-13443](https://github.com/hashicorp/vault/pull/13443)] + +FEATURES: + +* **Database plugin multiplexing**: manage multiple database connections with a single plugin process [[GH-14033](https://github.com/hashicorp/vault/pull/14033)] +* **Login MFA**: Single and two phase MFA is now available when authenticating to Vault. [[GH-14025](https://github.com/hashicorp/vault/pull/14025)] +* **Mount Migration**: Vault supports moving secrets and auth mounts both within and across namespaces. +* **Postgres in the UI**: Postgres DB is now supported by the UI [[GH-12945](https://github.com/hashicorp/vault/pull/12945)] +* **Report in-flight requests**: Adding a trace capability to show in-flight requests, and a new gauge metric to show the total number of in-flight requests [[GH-13024](https://github.com/hashicorp/vault/pull/13024)] +* **Server Side Consistent Tokens**: Service tokens have been updated to be longer (a minimum of 95 bytes) and token prefixes for all token types are updated from s., b., and r. to hvs., hvb., and hvr. for service, batch, and recovery tokens respectively. Vault clusters with integrated storage will now have read-after-write consistency by default. [[GH-14109](https://github.com/hashicorp/vault/pull/14109)] +* **Transit SHA-3 Support**: Add support for SHA-3 in the Transit backend. [[GH-13367](https://github.com/hashicorp/vault/pull/13367)] +* **Transit Time-Based Key Autorotation**: Add support for automatic, time-based key rotation to transit secrets engine, including in the UI. [[GH-13691](https://github.com/hashicorp/vault/pull/13691)] +* **UI Client Count Improvements**: Restructures client count dashboard, making use of billing start date to improve accuracy. Adds mount-level distribution and filtering. [[GH-client-counts](https://github.com/hashicorp/vault/pull/client-counts)] +* **Agent Telemetry**: The Vault Agent can now collect and return telemetry information at the `/agent/v1/metrics` endpoint. + +IMPROVEMENTS: + +* agent: Adds ability to configure specific user-assigned managed identities for Azure auto-auth. [[GH-14214](https://github.com/hashicorp/vault/pull/14214)] +* agent: The `agent/v1/quit` endpoint can now be used to stop the Vault Agent remotely [[GH-14223](https://github.com/hashicorp/vault/pull/14223)] +* api: Allow cloning `api.Client` tokens via `api.Config.CloneToken` or `api.Client.SetCloneToken()`. [[GH-13515](https://github.com/hashicorp/vault/pull/13515)] +* api: Define constants for X-Vault-Forward and X-Vault-Inconsistent headers [[GH-14067](https://github.com/hashicorp/vault/pull/14067)] +* api: Implements Login method in Go client libraries for GCP and Azure auth methods [[GH-13022](https://github.com/hashicorp/vault/pull/13022)] +* api: Implements Login method in Go client libraries for LDAP auth methods [[GH-13841](https://github.com/hashicorp/vault/pull/13841)] +* api: Trim newline character from wrapping token in logical.Unwrap from the api package [[GH-13044](https://github.com/hashicorp/vault/pull/13044)] +* api: add api method for modifying raft autopilot configuration [[GH-12428](https://github.com/hashicorp/vault/pull/12428)] +* api: respect WithWrappingToken() option during AppRole login authentication when used with secret ID specified from environment or from string [[GH-13241](https://github.com/hashicorp/vault/pull/13241)] +* audit: The audit logs now contain the port used by the client [[GH-12790](https://github.com/hashicorp/vault/pull/12790)] +* auth/aws: Enable region detection in the CLI by specifying the region as `auto` [[GH-14051](https://github.com/hashicorp/vault/pull/14051)] +* auth/cert: Add certificate extensions as metadata [[GH-13348](https://github.com/hashicorp/vault/pull/13348)] +* auth/jwt: The Authorization Code flow makes use of the Proof Key for Code Exchange (PKCE) extension. [[GH-13365](https://github.com/hashicorp/vault/pull/13365)] +* auth/kubernetes: Added support for dynamically reloading short-lived tokens for better Kubernetes 1.21+ compatibility [[GH-13595](https://github.com/hashicorp/vault/pull/13595)] +* auth/ldap: Add a response warning and server log whenever the config is accessed +if `userfilter` doesn't consider `userattr` [[GH-14095](https://github.com/hashicorp/vault/pull/14095)] +* auth/ldap: Add username to alias metadata [[GH-13669](https://github.com/hashicorp/vault/pull/13669)] +* auth/ldap: Add username_as_alias configurable to change how aliases are named [[GH-14324](https://github.com/hashicorp/vault/pull/14324)] +* auth/okta: Update [okta-sdk-golang](https://github.com/okta/okta-sdk-golang) dependency to version v2.9.1 for improved request backoff handling [[GH-13439](https://github.com/hashicorp/vault/pull/13439)] +* auth/token: The `auth/token/revoke-accessor` endpoint is now idempotent and will +not error out if the token has already been revoked. [[GH-13661](https://github.com/hashicorp/vault/pull/13661)] +* auth: reading `sys/auth/:path` now returns the configuration for the auth engine mounted at the given path [[GH-12793](https://github.com/hashicorp/vault/pull/12793)] +* cli: interactive CLI for login mfa [[GH-14131](https://github.com/hashicorp/vault/pull/14131)] +* command (enterprise): "vault license get" now uses non-deprecated endpoint /sys/license/status +* core/ha: Add new mechanism for keeping track of peers talking to active node, and new 'operator members' command to view them. [[GH-13292](https://github.com/hashicorp/vault/pull/13292)] +* core/identity: Support updating an alias' `custom_metadata` to be empty. [[GH-13395](https://github.com/hashicorp/vault/pull/13395)] +* core/pki: Support Y10K value in notAfter field to be compliant with IEEE 802.1AR-2018 standard [[GH-12795](https://github.com/hashicorp/vault/pull/12795)] +* core/pki: Support Y10K value in notAfter field when signing non-CA certificates [[GH-13736](https://github.com/hashicorp/vault/pull/13736)] +* core: Add duration and start_time to completed requests log entries [[GH-13682](https://github.com/hashicorp/vault/pull/13682)] +* core: Add support to list password policies at `sys/policies/password` [[GH-12787](https://github.com/hashicorp/vault/pull/12787)] +* core: Add support to list version history via API at `sys/version-history` and via CLI with `vault version-history` [[GH-13766](https://github.com/hashicorp/vault/pull/13766)] +* core: Fixes code scanning alerts [[GH-13667](https://github.com/hashicorp/vault/pull/13667)] +* core: Periodically test the health of connectivity to auto-seal backends [[GH-13078](https://github.com/hashicorp/vault/pull/13078)] +* core: Reading `sys/mounts/:path` now returns the configuration for the secret engine at the given path [[GH-12792](https://github.com/hashicorp/vault/pull/12792)] +* core: Replace "master key" terminology with "root key" [[GH-13324](https://github.com/hashicorp/vault/pull/13324)] +* core: Small changes to ensure goroutines terminate in tests [[GH-14197](https://github.com/hashicorp/vault/pull/14197)] +* core: Systemd unit file included with the Linux packages now sets the service type to notify. [[GH-14385](https://github.com/hashicorp/vault/pull/14385)] +* core: Update github.com/prometheus/client_golang to fix security vulnerability CVE-2022-21698. [[GH-14190](https://github.com/hashicorp/vault/pull/14190)] +* core: Vault now supports the PROXY protocol v2. Support for UNKNOWN connections +has also been added to the PROXY protocol v1. [[GH-13540](https://github.com/hashicorp/vault/pull/13540)] +* http (enterprise): Serve /sys/license/status endpoint within namespaces +* identity/oidc: Adds a default OIDC provider [[GH-14119](https://github.com/hashicorp/vault/pull/14119)] +* identity/oidc: Adds a default key for OIDC clients [[GH-14119](https://github.com/hashicorp/vault/pull/14119)] +* identity/oidc: Adds an `allow_all` assignment that permits all entities to authenticate via an OIDC client [[GH-14119](https://github.com/hashicorp/vault/pull/14119)] +* identity/oidc: Adds proof key for code exchange (PKCE) support to OIDC providers. [[GH-13917](https://github.com/hashicorp/vault/pull/13917)] +* sdk: Add helper for decoding root tokens [[GH-10505](https://github.com/hashicorp/vault/pull/10505)] +* secrets/azure: Adds support for rotate-root. [#70](https://github.com/hashicorp/vault-plugin-secrets-azure/pull/70) [[GH-13034](https://github.com/hashicorp/vault/pull/13034)] +* secrets/consul: Add support for consul enterprise namespaces and admin partitions. [[GH-13850](https://github.com/hashicorp/vault/pull/13850)] +* secrets/consul: Add support for consul roles. [[GH-14014](https://github.com/hashicorp/vault/pull/14014)] +* secrets/database/influxdb: Switch/upgrade to the `influxdb1-client` module [[GH-12262](https://github.com/hashicorp/vault/pull/12262)] +* secrets/database: Add database configuration parameter 'disable_escaping' for username and password when connecting to a database. [[GH-13414](https://github.com/hashicorp/vault/pull/13414)] +* secrets/kv: add full secret path output to table-formatted responses [[GH-14301](https://github.com/hashicorp/vault/pull/14301)] +* secrets/kv: add patch support for KVv2 key metadata [[GH-13215](https://github.com/hashicorp/vault/pull/13215)] +* secrets/kv: add subkeys endpoint to retrieve a secret's stucture without its values [[GH-13893](https://github.com/hashicorp/vault/pull/13893)] +* secrets/pki: Add ability to fetch individual certificate as DER or PEM [[GH-10948](https://github.com/hashicorp/vault/pull/10948)] +* secrets/pki: Add count and duration metrics to PKI issue and revoke calls. [[GH-13889](https://github.com/hashicorp/vault/pull/13889)] +* secrets/pki: Add error handling for error types other than UserError or InternalError [[GH-14195](https://github.com/hashicorp/vault/pull/14195)] +* secrets/pki: Allow URI SAN templates in allowed_uri_sans when allowed_uri_sans_template is set to true. [[GH-10249](https://github.com/hashicorp/vault/pull/10249)] +* secrets/pki: Allow other_sans in sign-intermediate and sign-verbatim [[GH-13958](https://github.com/hashicorp/vault/pull/13958)] +* secrets/pki: Calculate the Subject Key Identifier as suggested in [RFC 5280, Section 4.2.1.2](https://datatracker.ietf.org/doc/html/rfc5280#section-4.2.1.2). [[GH-11218](https://github.com/hashicorp/vault/pull/11218)] +* secrets/pki: Restrict issuance of wildcard certificates via role parameter (`allow_wildcard_certificates`) [[GH-14238](https://github.com/hashicorp/vault/pull/14238)] +* secrets/pki: Return complete chain (in `ca_chain` field) on calls to `pki/cert/ca_chain` [[GH-13935](https://github.com/hashicorp/vault/pull/13935)] +* secrets/pki: Use application/pem-certificate-chain for PEM certificates, application/x-pem-file for PEM CRLs [[GH-13927](https://github.com/hashicorp/vault/pull/13927)] +* secrets/pki: select appropriate signature algorithm for ECDSA signature on certificates. [[GH-11216](https://github.com/hashicorp/vault/pull/11216)] +* secrets/ssh: Add support for generating non-RSA SSH CAs [[GH-14008](https://github.com/hashicorp/vault/pull/14008)] +* secrets/ssh: Allow specifying multiple approved key lengths for a single algorithm [[GH-13991](https://github.com/hashicorp/vault/pull/13991)] +* secrets/ssh: Use secure default for algorithm signer (rsa-sha2-256) with RSA SSH CA keys on new roles [[GH-14006](https://github.com/hashicorp/vault/pull/14006)] +* secrets/transit: Don't abort transit encrypt or decrypt batches on single item failure. [[GH-13111](https://github.com/hashicorp/vault/pull/13111)] +* storage/aerospike: Upgrade `aerospike-client-go` to v5.6.0. [[GH-12165](https://github.com/hashicorp/vault/pull/12165)] +* storage/raft: Set InitialMmapSize to 100GB on 64bit architectures [[GH-13178](https://github.com/hashicorp/vault/pull/13178)] +* storage/raft: When using retry_join stanzas, join against all of them in parallel. [[GH-13606](https://github.com/hashicorp/vault/pull/13606)] +* sys/raw: Enhance sys/raw to read and write values that cannot be encoded in json. [[GH-13537](https://github.com/hashicorp/vault/pull/13537)] +* ui: Add support for ECDSA and Ed25519 certificate views [[GH-13894](https://github.com/hashicorp/vault/pull/13894)] +* ui: Add version diff view for KV V2 [[GH-13000](https://github.com/hashicorp/vault/pull/13000)] +* ui: Added client side paging for namespace list view [[GH-13195](https://github.com/hashicorp/vault/pull/13195)] +* ui: Adds flight icons to UI [[GH-12976](https://github.com/hashicorp/vault/pull/12976)] +* ui: Adds multi-factor authentication support [[GH-14049](https://github.com/hashicorp/vault/pull/14049)] +* ui: Allow static role credential rotation in Database secrets engines [[GH-14268](https://github.com/hashicorp/vault/pull/14268)] +* ui: Display badge for all versions in secrets engine header [[GH-13015](https://github.com/hashicorp/vault/pull/13015)] +* ui: Swap browser localStorage in favor of sessionStorage [[GH-14054](https://github.com/hashicorp/vault/pull/14054)] +* ui: The integrated web terminal now accepts both `-f` and `--force` as aliases +for `-force` for the `write` command. [[GH-13683](https://github.com/hashicorp/vault/pull/13683)] +* ui: Transform advanced templating with encode/decode format support [[GH-13908](https://github.com/hashicorp/vault/pull/13908)] +* ui: Updates ember blueprints to glimmer components [[GH-13149](https://github.com/hashicorp/vault/pull/13149)] +* ui: customizes empty state messages for transit and transform [[GH-13090](https://github.com/hashicorp/vault/pull/13090)] + +BUG FIXES: + +* Fixed bug where auth method only considers system-identity when multiple identities are available. [#50](https://github.com/hashicorp/vault-plugin-auth-azure/pull/50) [[GH-14138](https://github.com/hashicorp/vault/pull/14138)] +* activity log (enterprise): allow partial monthly client count to be accessed from namespaces [[GH-13086](https://github.com/hashicorp/vault/pull/13086)] +* agent: Fixes bug where vault agent is unaware of the namespace in the config when wrapping token +* api/client: Fixes an issue where the `replicateStateStore` was being set to `nil` upon consecutive calls to `client.SetReadYourWrites(true)`. [[GH-13486](https://github.com/hashicorp/vault/pull/13486)] +* auth/approle: Fix regression where unset cidrlist is returned as nil instead of zero-length array. [[GH-13235](https://github.com/hashicorp/vault/pull/13235)] +* auth/approle: Fix wrapping of nil errors in `login` endpoint [[GH-14107](https://github.com/hashicorp/vault/pull/14107)] +* auth/github: Use the Organization ID instead of the Organization name to verify the org membership. [[GH-13332](https://github.com/hashicorp/vault/pull/13332)] +* auth/kubernetes: Properly handle the migration of role storage entries containing an empty `alias_name_source` [[GH-13925](https://github.com/hashicorp/vault/pull/13925)] +* auth/kubernetes: ensure valid entity alias names created for projected volume tokens [[GH-14144](https://github.com/hashicorp/vault/pull/14144)] +* auth/oidc: Fixes OIDC auth from the Vault UI when using the implicit flow and `form_post` response mode. [[GH-13492](https://github.com/hashicorp/vault/pull/13492)] +* cli: Fix using kv patch with older server versions that don't support HTTP PATCH. [[GH-13615](https://github.com/hashicorp/vault/pull/13615)] +* core (enterprise): Fix a data race in logshipper. +* core (enterprise): Workaround AWS CloudHSM v5 SDK issue not allowing read-only sessions +* core/api: Fix overwriting of request headers when using JSONMergePatch. [[GH-14222](https://github.com/hashicorp/vault/pull/14222)] +* core/identity: Address a data race condition between local updates to aliases and invalidations [[GH-13093](https://github.com/hashicorp/vault/pull/13093)] +* core/identity: Address a data race condition between local updates to aliases and invalidations [[GH-13476](https://github.com/hashicorp/vault/pull/13476)] +* core/token: Fix null token panic from 'v1/auth/token/' endpoints and return proper error response. [[GH-13233](https://github.com/hashicorp/vault/pull/13233)] +* core/token: Fix null token_type panic resulting from 'v1/auth/token/roles/{role_name}' endpoint [[GH-13236](https://github.com/hashicorp/vault/pull/13236)] +* core: Fix warnings logged on perf standbys re stored versions [[GH-13042](https://github.com/hashicorp/vault/pull/13042)] +* core: `-output-curl-string` now properly sets cURL options for client and CA +certificates. [[GH-13660](https://github.com/hashicorp/vault/pull/13660)] +* core: add support for go-sockaddr templates in the top-level cluster_addr field [[GH-13678](https://github.com/hashicorp/vault/pull/13678)] +* core: authentication to "login" endpoint for non-existent mount path returns permission denied with status code 403 [[GH-13162](https://github.com/hashicorp/vault/pull/13162)] +* core: revert some unintentionally downgraded dependencies from 1.9.0-rc1 [[GH-13168](https://github.com/hashicorp/vault/pull/13168)] +* ha (enterprise): Prevents performance standby nodes from serving and caching stale data immediately after performance standby election completes +* http (enterprise): Always forward internal/counters endpoints from perf standbys to active node +* http:Fix /sys/monitor endpoint returning streaming not supported [[GH-13200](https://github.com/hashicorp/vault/pull/13200)] +* identity/oidc: Adds support for port-agnostic validation of loopback IP redirect URIs. [[GH-13871](https://github.com/hashicorp/vault/pull/13871)] +* identity/oidc: Check for a nil signing key on rotation to prevent panics. [[GH-13716](https://github.com/hashicorp/vault/pull/13716)] +* identity/oidc: Fixes inherited group membership when evaluating client assignments [[GH-14013](https://github.com/hashicorp/vault/pull/14013)] +* identity/oidc: Fixes potential write to readonly storage on performance secondary clusters during key rotation [[GH-14426](https://github.com/hashicorp/vault/pull/14426)] +* identity/oidc: Make the `nonce` parameter optional for the Authorization Endpoint of OIDC providers. [[GH-13231](https://github.com/hashicorp/vault/pull/13231)] +* identity/token: Fixes a bug where duplicate public keys could appear in the .well-known JWKS [[GH-14543](https://github.com/hashicorp/vault/pull/14543)] +* identity: Fix possible nil pointer dereference. [[GH-13318](https://github.com/hashicorp/vault/pull/13318)] +* identity: Fix regression preventing startup when aliases were created pre-1.9. [[GH-13169](https://github.com/hashicorp/vault/pull/13169)] +* identity: Fixes a panic in the OIDC key rotation due to a missing nil check. [[GH-13298](https://github.com/hashicorp/vault/pull/13298)] +* kmip (enterprise): Fix locate by name operations fail to find key after a rekey operation. +* licensing (enterprise): Revert accidental inclusion of the TDE feature from the `prem` build. +* metrics/autosnapshots (enterprise) : Fix bug that could cause +vault.autosnapshots.save.errors to not be incremented when there is an +autosnapshot save error. +* physical/mysql: Create table with wider `vault_key` column when initializing database tables. [[GH-14231](https://github.com/hashicorp/vault/pull/14231)] +* plugin/couchbase: Fix an issue in which the locking patterns did not allow parallel requests. [[GH-13033](https://github.com/hashicorp/vault/pull/13033)] +* replication (enterprise): When using encrypted secondary tokens, only clear the +private key after a successful connection to the primary cluster +* sdk/framework: Generate proper OpenAPI specs for path patterns that use an alternation as the root. [[GH-13487](https://github.com/hashicorp/vault/pull/13487)] +* sdk/helper/ldaputil: properly escape a trailing escape character to prevent panics. [[GH-13452](https://github.com/hashicorp/vault/pull/13452)] +* sdk/queue: move lock before length check to prevent panics. [[GH-13146](https://github.com/hashicorp/vault/pull/13146)] +* sdk: Fixes OpenAPI to distinguish between paths that can do only List, or both List and Read. [[GH-13643](https://github.com/hashicorp/vault/pull/13643)] +* secrets/azure: Fixed bug where Azure environment did not change Graph URL [[GH-13973](https://github.com/hashicorp/vault/pull/13973)] +* secrets/azure: Fixes service principal generation when assigning roles that have [DataActions](https://docs.microsoft.com/en-us/azure/role-based-access-control/role-definitions#dataactions). [[GH-13277](https://github.com/hashicorp/vault/pull/13277)] +* secrets/azure: Fixes the [rotate root](https://www.vaultproject.io/api-docs/secret/azure#rotate-root) +operation for upgraded configurations with a `root_password_ttl` of zero. [[GH-14130](https://github.com/hashicorp/vault/pull/14130)] +* secrets/database/cassandra: change connect_timeout to 5s as documentation says [[GH-12443](https://github.com/hashicorp/vault/pull/12443)] +* secrets/database/mssql: Accept a boolean for `contained_db`, rather than just a string. [[GH-13469](https://github.com/hashicorp/vault/pull/13469)] +* secrets/gcp: Fixed bug where error was not reported for invalid bindings [[GH-13974](https://github.com/hashicorp/vault/pull/13974)] +* secrets/gcp: Fixes role bindings for BigQuery dataset resources. [[GH-13548](https://github.com/hashicorp/vault/pull/13548)] +* secrets/openldap: Fix panic from nil logger in backend [[GH-14171](https://github.com/hashicorp/vault/pull/14171)] +* secrets/pki: Default value for key_bits changed to 0, enabling key_type=ec key generation with default value [[GH-13080](https://github.com/hashicorp/vault/pull/13080)] +* secrets/pki: Fix issuance of wildcard certificates matching glob patterns [[GH-14235](https://github.com/hashicorp/vault/pull/14235)] +* secrets/pki: Fix regression causing performance secondaries to forward certificate generation to the primary. [[GH-13759](https://github.com/hashicorp/vault/pull/13759)] +* secrets/pki: Fix regression causing performance secondaries to forward certificate generation to the primary. [[GH-2456](https://github.com/hashicorp/vault/pull/2456)] +* secrets/pki: Fixes around NIST P-curve signature hash length, default value for signature_bits changed to 0. [[GH-12872](https://github.com/hashicorp/vault/pull/12872)] +* secrets/pki: Recognize ed25519 when requesting a response in PKCS8 format [[GH-13257](https://github.com/hashicorp/vault/pull/13257)] +* secrets/pki: Skip signature bits validation for ed25519 curve key type [[GH-13254](https://github.com/hashicorp/vault/pull/13254)] +* secrets/transit: Ensure that Vault does not panic for invalid nonce size when we aren't in convergent encryption mode. [[GH-13690](https://github.com/hashicorp/vault/pull/13690)] +* secrets/transit: Return an error if any required parameter is missing. [[GH-14074](https://github.com/hashicorp/vault/pull/14074)] +* storage/raft: Fix a panic when trying to store a key > 32KB in a transaction. [[GH-13286](https://github.com/hashicorp/vault/pull/13286)] +* storage/raft: Fix a panic when trying to write a key > 32KB [[GH-13282](https://github.com/hashicorp/vault/pull/13282)] +* storage/raft: Fix issues allowing invalid nodes to become leadership candidates. [[GH-13703](https://github.com/hashicorp/vault/pull/13703)] +* storage/raft: Fix regression in 1.9.0-rc1 that changed how time is represented in Raft logs; this prevented using a raft db created pre-1.9. [[GH-13165](https://github.com/hashicorp/vault/pull/13165)] +* storage/raft: On linux, use map_populate for bolt files to improve startup time. [[GH-13573](https://github.com/hashicorp/vault/pull/13573)] +* storage/raft: Units for bolt metrics now given in milliseconds instead of nanoseconds [[GH-13749](https://github.com/hashicorp/vault/pull/13749)] +* ui: Adds pagination to auth methods list view [[GH-13054](https://github.com/hashicorp/vault/pull/13054)] +* ui: Do not show verify connection value on database connection config page [[GH-13152](https://github.com/hashicorp/vault/pull/13152)] +* ui: Fix client count current month data not showing unless monthly history data exists [[GH-13396](https://github.com/hashicorp/vault/pull/13396)] +* ui: Fix default TTL display and set on database role [[GH-14224](https://github.com/hashicorp/vault/pull/14224)] +* ui: Fix incorrect validity message on transit secrets engine [[GH-14233](https://github.com/hashicorp/vault/pull/14233)] +* ui: Fix issue where UI incorrectly handled API errors when mounting backends [[GH-14551](https://github.com/hashicorp/vault/pull/14551)] +* ui: Fix kv engine access bug [[GH-13872](https://github.com/hashicorp/vault/pull/13872)] +* ui: Fixes breadcrumb bug for secrets navigation [[GH-13604](https://github.com/hashicorp/vault/pull/13604)] +* ui: Fixes caching issue on kv new version create [[GH-14489](https://github.com/hashicorp/vault/pull/14489)] +* ui: Fixes displaying empty masked values in PKI engine [[GH-14400](https://github.com/hashicorp/vault/pull/14400)] +* ui: Fixes horizontal bar chart hover issue when filtering namespaces and mounts [[GH-14493](https://github.com/hashicorp/vault/pull/14493)] +* ui: Fixes issue logging out with wrapped token query parameter [[GH-14329](https://github.com/hashicorp/vault/pull/14329)] +* ui: Fixes issue removing raft storage peer via cli not reflected in UI until refresh [[GH-13098](https://github.com/hashicorp/vault/pull/13098)] +* ui: Fixes issue restoring raft storage snapshot [[GH-13107](https://github.com/hashicorp/vault/pull/13107)] +* ui: Fixes issue saving KMIP role correctly [[GH-13585](https://github.com/hashicorp/vault/pull/13585)] +* ui: Fixes issue with OIDC auth workflow when using MetaMask Chrome extension [[GH-13133](https://github.com/hashicorp/vault/pull/13133)] +* ui: Fixes issue with SearchSelect component not holding focus [[GH-13590](https://github.com/hashicorp/vault/pull/13590)] +* ui: Fixes issue with automate secret deletion value not displaying initially if set in secret metadata edit view [[GH-13177](https://github.com/hashicorp/vault/pull/13177)] +* ui: Fixes issue with correct auth method not selected when logging out from OIDC or JWT methods [[GH-14545](https://github.com/hashicorp/vault/pull/14545)] +* ui: Fixes issue with placeholder not displaying for automatically deleted secrets when deletion time has passed [[GH-13166](https://github.com/hashicorp/vault/pull/13166)] +* ui: Fixes issue with the number of PGP Key inputs not matching the key shares number in the initialization form on change [[GH-13038](https://github.com/hashicorp/vault/pull/13038)] +* ui: Fixes long secret key names overlapping masked values [[GH-13032](https://github.com/hashicorp/vault/pull/13032)] +* ui: Fixes node-forge error when parsing EC (elliptical curve) certs [[GH-13238](https://github.com/hashicorp/vault/pull/13238)] +* ui: Redirects to managed namespace if incorrect namespace in URL param [[GH-14422](https://github.com/hashicorp/vault/pull/14422)] +* ui: Removes ability to tune token_type for token auth methods [[GH-12904](https://github.com/hashicorp/vault/pull/12904)] +* ui: trigger token renewal if inactive and half of TTL has passed [[GH-13950](https://github.com/hashicorp/vault/pull/13950)] + +## 1.9.10 +### September 30, 2022 + +SECURITY: + +* secrets/pki: Vault’s TLS certificate auth method did not initially load the optionally-configured CRL issued by the role’s CA into memory on startup, resulting in the revocation list not being checked, if the CRL has not yet been retrieved. This vulnerability, CVE-2022-41316, is fixed in Vault 1.12.0, 1.11.4, 1.10.7, and 1.9.10. [[HSEC-2022-24](https://discuss.hashicorp.com/t/hcsec-2022-24-vaults-tls-cert-auth-method-only-loaded-crl-after-first-request/45483)] + +BUG FIXES: + +* auth/cert: Vault does not initially load the CRLs in cert auth unless the read/write CRL endpoint is hit. [[GH-17138](https://github.com/hashicorp/vault/pull/17138)] +* replication (enterprise): Fix data race in SaveCheckpoint() +* ui: Fix lease force revoke action [[GH-16930](https://github.com/hashicorp/vault/pull/16930)] + +## 1.9.9 +### August 31, 2022 + +SECURITY: + +* core: When entity aliases mapped to a single entity share the same alias name, but have different mount accessors, Vault can leak metadata between the aliases. This metadata leak may result in unexpected access if templated policies are using alias metadata for path names. This vulnerability, CVE-2022-40186, is fixed in 1.11.3, 1.10.6, and 1.9.9. [[HSEC-2022-18](https://discuss.hashicorp.com/t/hcsec-2022-18-vault-entity-alias-metadata-may-leak-between-aliases-with-the-same-name-assigned-to-the-same-entity/44550)] + +CHANGES: + +* core: Bump Go version to 1.17.13. + +BUG FIXES: + +* core (enterprise): Fix some races in merkle index flushing code found in testing +* core: Increase the allowed concurrent gRPC streams over the cluster port. [[GH-16327](https://github.com/hashicorp/vault/pull/16327)] +* database: Invalidate queue should cancel context first to avoid deadlock [[GH-15933](https://github.com/hashicorp/vault/pull/15933)] +* secrets/database: Fix a bug where the secret engine would queue up a lot of WAL deletes during startup. [[GH-16686](https://github.com/hashicorp/vault/pull/16686)] +* ui: Fix OIDC callback to accept namespace flag in different formats [[GH-16886](https://github.com/hashicorp/vault/pull/16886)] +* ui: Fix issue logging in with JWT auth method [[GH-16466](https://github.com/hashicorp/vault/pull/16466)] + +SECURITY: + +* identity/entity: When entity aliases mapped to a single entity share the same alias name, but have different mount accessors, Vault can leak metadata between the aliases. This metadata leak may result in unexpected access if templated policies are using alias metadata for path names. [[HCSEC-2022-18](https://discuss.hashicorp.com/t/hcsec-2022-18-vault-entity-alias-metadata-may-leak-between-aliases-with-the-same-name-assigned-to-the-same-entity/44550)] + +## 1.9.8 +### July 21, 2022 + +SECURITY: + +* storage/raft: Vault Enterprise (“Vault”) clusters using Integrated Storage expose an unauthenticated API endpoint that could be abused to override the voter status of a node within a Vault HA cluster, introducing potential for future data loss or catastrophic failure. This vulnerability, CVE-2022-36129, was fixed in Vault 1.9.8, 1.10.5, and 1.11.1. [[HSEC-2022-15](https://discuss.hashicorp.com/t/hcsec-2022-15-vault-enterprise-does-not-verify-existing-voter-status-when-joining-an-integrated-storage-ha-node/42420)] + +CHANGES: + +* core: Bump Go version to 1.17.12. + +IMPROVEMENTS: + +* secrets/ssh: Allow additional text along with a template definition in defaultExtension value fields. [[GH-16018](https://github.com/hashicorp/vault/pull/16018)] + +BUG FIXES: + +* core/identity: Replicate member_entity_ids and policies in identity/group across nodes identically [[GH-16088](https://github.com/hashicorp/vault/pull/16088)] +* core/replication (enterprise): Don't flush merkle tree pages to disk after losing active duty +* core/seal: Fix possible keyring truncation when using the file backend. [[GH-15946](https://github.com/hashicorp/vault/pull/15946)] +* storage/raft (enterprise): Prevent unauthenticated voter status change with rejoin [[GH-16324](https://github.com/hashicorp/vault/pull/16324)] +* transform (enterprise): Fix a bug in the handling of nested or unmatched capture groups in FPE transformations. +* ui: Fix issue where metadata tab is hidden even though policy grants access [[GH-15824](https://github.com/hashicorp/vault/pull/15824)] +* ui: Updated `leasId` to `leaseId` in the "Copy Credentials" section of "Generate AWS Credentials" [[GH-15685](https://github.com/hashicorp/vault/pull/15685)] + +## 1.9.7 +### June 10, 2022 + +CHANGES: + +* core: Bump Go version to 1.17.11. [[GH-go-ver-197](https://github.com/hashicorp/vault/pull/go-ver-197)] + +IMPROVEMENTS: + +* ui: Allow namespace param to be parsed from state queryParam [[GH-15378](https://github.com/hashicorp/vault/pull/15378)] + +BUG FIXES: + +* agent: Redact auto auth token from renew endpoints [[GH-15380](https://github.com/hashicorp/vault/pull/15380)] +* auth/ldap: The logic for setting the entity alias when `username_as_alias` is set +has been fixed. The previous behavior would make a request to the LDAP server to +get `user_attr` before discarding it and using the username instead. This would +make it impossible for a user to connect if this attribute was missing or had +multiple values, even though it would not be used anyway. This has been fixed +and the username is now used without making superfluous LDAP searches. [[GH-15525](https://github.com/hashicorp/vault/pull/15525)] +* core (enterprise): Fix overcounting of lease count quota usage at startup. +* core/config: Only ask the system about network interfaces when address configs contain a template having the format: {{ ... }} [[GH-15224](https://github.com/hashicorp/vault/pull/15224)] +* core: Prevent changing file permissions of audit logs when mode 0000 is used. [[GH-15759](https://github.com/hashicorp/vault/pull/15759)] +* core: Prevent metrics generation from causing deadlocks. [[GH-15693](https://github.com/hashicorp/vault/pull/15693)] +* core: fixed systemd reloading notification [[GH-15041](https://github.com/hashicorp/vault/pull/15041)] +* core: pre-calculate namespace specific paths when tainting a route during postUnseal [[GH-15067](https://github.com/hashicorp/vault/pull/15067)] +* storage/raft (enterprise): Auto-snapshot configuration now forbids slashes in file prefixes for all types, and "/" in path prefix for local storage type. Strip leading prefix in path prefix for AWS. Improve error handling/reporting. +* transform (enterprise): Fix non-overridable column default value causing tokenization tokens to expire prematurely when using the MySQL storage backend. +* ui: Fixes client count timezone bug [[GH-15743](https://github.com/hashicorp/vault/pull/15743)] +* ui: Fixes issue logging in with OIDC from a listed auth mounts tab [[GH-15666](https://github.com/hashicorp/vault/pull/15666)] + +## 1.9.6 +### April 29, 2022 + +BUG FIXES: + +* raft: fix Raft TLS key rotation panic that occurs if active key is more than 24 hours old [[GH-15156](https://github.com/hashicorp/vault/pull/15156)] +* sdk: Fix OpenApi spec generator to properly convert TypeInt64 to OAS supported int64 [[GH-15104](https://github.com/hashicorp/vault/pull/15104)] + + +## 1.9.5 +### April 22, 2022 + +CHANGES: + +* core: A request that fails path validation due to relative path check will now be responded to with a 400 rather than 500. [[GH-14328](https://github.com/hashicorp/vault/pull/14328)] +* core: Bump Go version to 1.17.9. [[GH-15045](https://github.com/hashicorp/vault/pull/15045)] + +IMPROVEMENTS: + +* auth/ldap: Add username_as_alias configurable to change how aliases are named [[GH-14324](https://github.com/hashicorp/vault/pull/14324)] +* core: Systemd unit file included with the Linux packages now sets the service type to notify. [[GH-14385](https://github.com/hashicorp/vault/pull/14385)] +* sentinel (enterprise): Upgrade sentinel to [v0.18.5](https://docs.hashicorp.com/sentinel/changelog#0-18-5-january-14-2022) to avoid potential naming collisions in the remote installer +* website/docs: added a link to an Enigma secret plugin. [[GH-14389](https://github.com/hashicorp/vault/pull/14389)] + +BUG FIXES: + +* api/sys/raft: Update RaftSnapshotRestore to use net/http client allowing bodies larger than allocated memory to be streamed [[GH-14269](https://github.com/hashicorp/vault/pull/14269)] +* api: Respect increment value in grace period calculations in LifetimeWatcher [[GH-14836](https://github.com/hashicorp/vault/pull/14836)] +* auth/approle: Add maximum length for input values that result in SHA56 HMAC calculation [[GH-14746](https://github.com/hashicorp/vault/pull/14746)] +* cassandra: Update gocql Cassandra client to fix "no hosts available in the pool" error [[GH-14973](https://github.com/hashicorp/vault/pull/14973)] +* cli: Fix panic caused by parsing key=value fields whose value is a single backslash [[GH-14523](https://github.com/hashicorp/vault/pull/14523)] +* core (enterprise): Allow local alias create RPCs to persist alias metadata +* core/metrics: Fix incorrect table size metric for local mounts [[GH-14755](https://github.com/hashicorp/vault/pull/14755)] +* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited integers [[GH-15072](https://github.com/hashicorp/vault/pull/15072)] +* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited strings [[GH-14522](https://github.com/hashicorp/vault/pull/14522)] +* core: Fix panic caused by parsing policies with empty slice values. [[GH-14501](https://github.com/hashicorp/vault/pull/14501)] +* core: Fix panic for help request URL paths without /v1/ prefix [[GH-14704](https://github.com/hashicorp/vault/pull/14704)] +* core: fixing excessive unix file permissions [[GH-14791](https://github.com/hashicorp/vault/pull/14791)] +* core: fixing excessive unix file permissions on dir, files and archive created by vault debug command [[GH-14846](https://github.com/hashicorp/vault/pull/14846)] +* core: report unused or redundant keys in server configuration [[GH-14752](https://github.com/hashicorp/vault/pull/14752)] +* core: time.After() used in a select statement can lead to memory leak [[GH-14814](https://github.com/hashicorp/vault/pull/14814)] +* identity/token: Fixes a bug where duplicate public keys could appear in the .well-known JWKS [[GH-14543](https://github.com/hashicorp/vault/pull/14543)] +* metrics/autosnapshots (enterprise) : Fix bug that could cause +vault.autosnapshots.save.errors to not be incremented when there is an +autosnapshot save error. +* replication (enterprise): fix panic due to missing entity during invalidation of local aliases. [[GH-14622](https://github.com/hashicorp/vault/pull/14622)] +* ui: Fix Generated Token's Policies helpText to clarify that comma separated values are not excepted in this field. [[GH-15046](https://github.com/hashicorp/vault/pull/15046)] +* ui: Fix issue where UI incorrectly handled API errors when mounting backends [[GH-14551](https://github.com/hashicorp/vault/pull/14551)] +* ui: Fixes caching issue on kv new version create [[GH-14489](https://github.com/hashicorp/vault/pull/14489)] +* ui: Fixes edit auth method capabilities issue [[GH-14966](https://github.com/hashicorp/vault/pull/14966)] +* ui: Fixes issue logging out with wrapped token query parameter [[GH-14329](https://github.com/hashicorp/vault/pull/14329)] +* ui: Fixes issue with correct auth method not selected when logging out from OIDC or JWT methods [[GH-14545](https://github.com/hashicorp/vault/pull/14545)] +* ui: Redirects to managed namespace if incorrect namespace in URL param [[GH-14422](https://github.com/hashicorp/vault/pull/14422)] +* ui: fix search-select component showing blank selections when editing group member entity [[GH-15058](https://github.com/hashicorp/vault/pull/15058)] +* ui: masked values no longer give away length or location of special characters [[GH-15025](https://github.com/hashicorp/vault/pull/15025)] + + +## 1.9.4 +### March 3, 2022 + +SECURITY: +* secrets/pki: Vault and Vault Enterprise (“Vault”) allowed the PKI secrets engine under certain configurations to issue wildcard certificates to authorized users for a specified domain, even if the PKI role policy attribute allow_subdomains is set to false. This vulnerability, CVE-2022-25243, was fixed in Vault 1.8.9 and 1.9.4. +* transform (enterprise): Vault Enterprise (“Vault”) clusters using the tokenization transform feature can expose the tokenization key through the tokenization key configuration endpoint to authorized operators with read permissions on this endpoint. This vulnerability, CVE-2022-25244, was fixed in Vault Enterprise 1.7.10, 1.8.9, and 1.9.4. + +CHANGES: + +* secrets/azure: Changes the configuration parameter `use_microsoft_graph_api` to use the Microsoft +Graph API by default. [[GH-14130](https://github.com/hashicorp/vault/pull/14130)] + +IMPROVEMENTS: + +* core: Bump Go version to 1.17.7. [[GH-14232](https://github.com/hashicorp/vault/pull/14232)] +* secrets/pki: Restrict issuance of wildcard certificates via role parameter (`allow_wildcard_certificates`) [[GH-14238](https://github.com/hashicorp/vault/pull/14238)] + +BUG FIXES: + +* Fixed bug where auth method only considers system-identity when multiple identities are available. [#50](https://github.com/hashicorp/vault-plugin-auth-azure/pull/50) [[GH-14138](https://github.com/hashicorp/vault/pull/14138)] +* auth/kubernetes: Properly handle the migration of role storage entries containing an empty `alias_name_source` [[GH-13925](https://github.com/hashicorp/vault/pull/13925)] +* auth/kubernetes: ensure valid entity alias names created for projected volume tokens [[GH-14144](https://github.com/hashicorp/vault/pull/14144)] +* identity/oidc: Adds support for port-agnostic validation of loopback IP redirect URIs. [[GH-13871](https://github.com/hashicorp/vault/pull/13871)] +* identity/oidc: Fixes inherited group membership when evaluating client assignments [[GH-14013](https://github.com/hashicorp/vault/pull/14013)] +* secrets/azure: Fixed bug where Azure environment did not change Graph URL [[GH-13973](https://github.com/hashicorp/vault/pull/13973)] +* secrets/azure: Fixes the [rotate root](https://www.vaultproject.io/api-docs/secret/azure#rotate-root) +operation for upgraded configurations with a `root_password_ttl` of zero. [[GH-14130](https://github.com/hashicorp/vault/pull/14130)] +* secrets/gcp: Fixed bug where error was not reported for invalid bindings [[GH-13974](https://github.com/hashicorp/vault/pull/13974)] +* secrets/openldap: Fix panic from nil logger in backend [[GH-14171](https://github.com/hashicorp/vault/pull/14171)] +* secrets/pki: Fix issuance of wildcard certificates matching glob patterns [[GH-14235](https://github.com/hashicorp/vault/pull/14235)] +* storage/raft: Fix issues allowing invalid nodes to become leadership candidates. [[GH-13703](https://github.com/hashicorp/vault/pull/13703)] +* ui: Fix default TTL display and set on database role [[GH-14224](https://github.com/hashicorp/vault/pull/14224)] +* ui: Fix incorrect validity message on transit secrets engine [[GH-14233](https://github.com/hashicorp/vault/pull/14233)] +* ui: Fix kv engine access bug [[GH-13872](https://github.com/hashicorp/vault/pull/13872)] +* ui: Fix issue removing raft storage peer via cli not reflected in UI until refresh [[GH-13098](https://github.com/hashicorp/vault/pull/13098)] +* ui: Trigger background token self-renewal if inactive and half of TTL has passed [[GH-13950](https://github.com/hashicorp/vault/pull/13950)] + +## 1.9.3 +### January 27, 2022 + +IMPROVEMENTS: + +* auth/kubernetes: Added support for dynamically reloading short-lived tokens for better Kubernetes 1.21+ compatibility [[GH-13698](https://github.com/hashicorp/vault/pull/13698)] +* auth/ldap: Add username to alias metadata [[GH-13669](https://github.com/hashicorp/vault/pull/13669)] +* core/identity: Support updating an alias' `custom_metadata` to be empty. [[GH-13395](https://github.com/hashicorp/vault/pull/13395)] +* core: Fixes code scanning alerts [[GH-13667](https://github.com/hashicorp/vault/pull/13667)] +* http (enterprise): Serve /sys/license/status endpoint within namespaces + +BUG FIXES: + +* auth/oidc: Fixes OIDC auth from the Vault UI when using the implicit flow and `form_post` response mode. [[GH-13492](https://github.com/hashicorp/vault/pull/13492)] +* cli: Fix using kv patch with older server versions that don't support HTTP PATCH. [[GH-13615](https://github.com/hashicorp/vault/pull/13615)] +* core (enterprise): Workaround AWS CloudHSM v5 SDK issue not allowing read-only sessions +* core/identity: Address a data race condition between local updates to aliases and invalidations [[GH-13476](https://github.com/hashicorp/vault/pull/13476)] +* core: add support for go-sockaddr templates in the top-level cluster_addr field [[GH-13678](https://github.com/hashicorp/vault/pull/13678)] +* identity/oidc: Check for a nil signing key on rotation to prevent panics. [[GH-13716](https://github.com/hashicorp/vault/pull/13716)] +* kmip (enterprise): Fix locate by name operations fail to find key after a rekey operation. +* secrets/database/mssql: Accept a boolean for `contained_db`, rather than just a string. [[GH-13469](https://github.com/hashicorp/vault/pull/13469)] +* secrets/gcp: Fixes role bindings for BigQuery dataset resources. [[GH-13548](https://github.com/hashicorp/vault/pull/13548)] +* secrets/pki: Fix regression causing performance secondaries to forward certificate generation to the primary. [[GH-13759](https://github.com/hashicorp/vault/pull/13759)] +* storage/raft: On linux, use map_populate for bolt files to improve startup time. [[GH-13573](https://github.com/hashicorp/vault/pull/13573)] +* storage/raft: Units for bolt metrics now given in milliseconds instead of nanoseconds [[GH-13749](https://github.com/hashicorp/vault/pull/13749)] +* ui: Fixes breadcrumb bug for secrets navigation [[GH-13604](https://github.com/hashicorp/vault/pull/13604)] +* ui: Fixes issue saving KMIP role correctly [[GH-13585](https://github.com/hashicorp/vault/pull/13585)] + +## 1.9.2 +### December 21, 2021 + +CHANGES: + +* go: Update go version to 1.17.5 [[GH-13408](https://github.com/hashicorp/vault/pull/13408)] + +IMPROVEMENTS: + +* auth/jwt: The Authorization Code flow makes use of the Proof Key for Code Exchange (PKCE) extension. [[GH-13365](https://github.com/hashicorp/vault/pull/13365)] + +BUG FIXES: + +* ui: Fix client count current month data not showing unless monthly history data exists [[GH-13396](https://github.com/hashicorp/vault/pull/13396)] + +## 1.9.1 +### December 9, 2021 + +SECURITY: + +* storage/raft: Integrated Storage backend could be caused to crash by an authenticated user with write permissions to the KV secrets engine. This vulnerability, CVE-2021-45042, was fixed in Vault 1.7.7, 1.8.6, and 1.9.1. + +IMPROVEMENTS: + +* storage/aerospike: Upgrade `aerospike-client-go` to v5.6.0. [[GH-12165](https://github.com/hashicorp/vault/pull/12165)] + +BUG FIXES: + +* auth/approle: Fix regression where unset cidrlist is returned as nil instead of zero-length array. [[GH-13235](https://github.com/hashicorp/vault/pull/13235)] +* ha (enterprise): Prevents performance standby nodes from serving and caching stale data immediately after performance standby election completes +* http:Fix /sys/monitor endpoint returning streaming not supported [[GH-13200](https://github.com/hashicorp/vault/pull/13200)] +* identity/oidc: Make the `nonce` parameter optional for the Authorization Endpoint of OIDC providers. [[GH-13231](https://github.com/hashicorp/vault/pull/13231)] +* identity: Fixes a panic in the OIDC key rotation due to a missing nil check. [[GH-13298](https://github.com/hashicorp/vault/pull/13298)] +* sdk/queue: move lock before length check to prevent panics. [[GH-13146](https://github.com/hashicorp/vault/pull/13146)] +* secrets/azure: Fixes service principal generation when assigning roles that have [DataActions](https://docs.microsoft.com/en-us/azure/role-based-access-control/role-definitions#dataactions). [[GH-13277](https://github.com/hashicorp/vault/pull/13277)] +* secrets/pki: Recognize ed25519 when requesting a response in PKCS8 format [[GH-13257](https://github.com/hashicorp/vault/pull/13257)] +* storage/raft: Fix a panic when trying to store a key > 32KB in a transaction. [[GH-13286](https://github.com/hashicorp/vault/pull/13286)] +* storage/raft: Fix a panic when trying to write a key > 32KB [[GH-13282](https://github.com/hashicorp/vault/pull/13282)] +* ui: Do not show verify connection value on database connection config page [[GH-13152](https://github.com/hashicorp/vault/pull/13152)] +* ui: Fixes issue restoring raft storage snapshot [[GH-13107](https://github.com/hashicorp/vault/pull/13107)] +* ui: Fixes issue with OIDC auth workflow when using MetaMask Chrome extension [[GH-13133](https://github.com/hashicorp/vault/pull/13133)] +* ui: Fixes issue with automate secret deletion value not displaying initially if set in secret metadata edit view [[GH-13177](https://github.com/hashicorp/vault/pull/13177)] +* ui: Fixes issue with placeholder not displaying for automatically deleted secrets when deletion time has passed [[GH-13166](https://github.com/hashicorp/vault/pull/13166)] +* ui: Fixes node-forge error when parsing EC (elliptical curve) certs [[GH-13238](https://github.com/hashicorp/vault/pull/13238)] + +## 1.9.0 +### November 17, 2021 + +CHANGES: + +* auth/kubernetes: `disable_iss_validation` defaults to true. [#127](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/127) [[GH-12975](https://github.com/hashicorp/vault/pull/12975)] +* expiration: VAULT_16_REVOKE_PERMITPOOL environment variable has been removed. [[GH-12888](https://github.com/hashicorp/vault/pull/12888)] +* expiration: VAULT_LEASE_USE_LEGACY_REVOCATION_STRATEGY environment variable has +been removed. [[GH-12888](https://github.com/hashicorp/vault/pull/12888)] +* go: Update go version to 1.17.2 +* secrets/ssh: Roles with empty allowed_extensions will now forbid end-users +specifying extensions when requesting ssh key signing. Update roles setting +allowed_extensions to `*` to permit any extension to be specified by an end-user. [[GH-12847](https://github.com/hashicorp/vault/pull/12847)] + +FEATURES: + +* **Customizable HTTP Headers**: Add support to define custom HTTP headers for root path (`/`) and also on API endpoints (`/v1/*`) [[GH-12485](https://github.com/hashicorp/vault/pull/12485)] +* **Deduplicate Token With Entities in Activity Log**: Vault tokens without entities are now tracked with client IDs and deduplicated in the Activity Log [[GH-12820](https://github.com/hashicorp/vault/pull/12820)] +* **Elasticsearch Database UI**: The UI now supports adding and editing Elasticsearch connections in the database secret engine. [[GH-12672](https://github.com/hashicorp/vault/pull/12672)] +* **KV Custom Metadata**: Add ability in kv-v2 to specify version-agnostic custom key metadata via the +metadata endpoint. The data will be present in responses made to the data endpoint independent of the +calling token's `read` access to the metadata endpoint. [[GH-12907](https://github.com/hashicorp/vault/pull/12907)] +* **KV patch (Tech Preview)**: Add partial update support for the `//data/:path` kv-v2 +endpoint through HTTP `PATCH`. A new `patch` ACL capability has been added and +is required to make such requests. [[GH-12687](https://github.com/hashicorp/vault/pull/12687)] +* **Key Management Secrets Engine (Enterprise)**: Adds support for distributing and managing keys in GCP Cloud KMS. +* **Local Auth Mount Entities (enterprise)**: Logins on `local` auth mounts will +generate identity entities for the tokens issued. The aliases of the entity +resulting from local auth mounts (local-aliases), will be scoped by the cluster. +This means that the local-aliases will never leave the geographical boundary of +the cluster where they were issued. This is something to be mindful about for +those who have implemented local auth mounts for complying with GDPR guidelines. +* **Namespaces (Enterprise)**: Adds support for locking Vault API for particular namespaces. +* **OIDC Identity Provider (Tech Preview)**: Adds support for Vault to be an OpenID Connect (OIDC) provider. [[GH-12932](https://github.com/hashicorp/vault/pull/12932)] +* **Oracle Database UI**: The UI now supports adding and editing Oracle connections in the database secret engine. [[GH-12752](https://github.com/hashicorp/vault/pull/12752)] +* **Postgres Database UI**: The UI now supports adding and editing Postgres connections in the database secret engine. [[GH-12945](https://github.com/hashicorp/vault/pull/12945)] + +SECURITY: + +* core/identity: A Vault user with write permission to an entity alias ID sharing a mount accessor with another user may acquire this other user’s policies by merging their identities. This vulnerability, CVE-2021-41802, was fixed in Vault and Vault Enterprise 1.7.5, 1.8.4, and 1.9.0. +* core/identity: Templated ACL policies would always match the first-created entity alias if multiple entity aliases existed for a specified entity and mount combination, potentially resulting in incorrect policy enforcement. This vulnerability, CVE-2021-43998, was fixed in Vault and Vault Enterprise 1.7.6, 1.8.5, and 1.9.0. + +IMPROVEMENTS: + +* agent/cache: Process persistent cache leases in dependency order during restore to ensure child leases are always correctly restored [[GH-12843](https://github.com/hashicorp/vault/pull/12843)] +* agent/cache: Use an in-process listener between consul-template and vault-agent when caching is enabled and either templates or a listener is defined [[GH-12762](https://github.com/hashicorp/vault/pull/12762)] +* agent/cache: tolerate partial restore failure from persistent cache [[GH-12718](https://github.com/hashicorp/vault/pull/12718)] +* agent/template: add support for new 'writeToFile' template function [[GH-12505](https://github.com/hashicorp/vault/pull/12505)] +* api: Add configuration option for ensuring isolated read-after-write semantics for all Client requests. [[GH-12814](https://github.com/hashicorp/vault/pull/12814)] +* api: adds native Login method to Go client module with different auth method interfaces to support easier authentication [[GH-12796](https://github.com/hashicorp/vault/pull/12796)] +* api: Move mergeStates and other required utils from agent to api module [[GH-12731](https://github.com/hashicorp/vault/pull/12731)] +* api: Support VAULT_HTTP_PROXY environment variable to allow overriding the Vault client's HTTP proxy [[GH-12582](https://github.com/hashicorp/vault/pull/12582)] +* auth/approle: The `role/:name/secret-id-accessor/lookup` endpoint now returns a 404 status code when the `secret_id_accessor` cannot be found [[GH-12788](https://github.com/hashicorp/vault/pull/12788)] +* auth/approle: expose secret_id_accessor as WrappedAccessor when creating wrapped secret-id. [[GH-12425](https://github.com/hashicorp/vault/pull/12425)] +* auth/aws: add profile support for AWS credentials when using the AWS auth method [[GH-12621](https://github.com/hashicorp/vault/pull/12621)] +* auth/kubernetes: validate JWT against the provided role on alias look ahead operations [[GH-12688](https://github.com/hashicorp/vault/pull/12688)] +* auth/kubernetes: Add ability to configure entity alias names based on the serviceaccount's namespace and name. [#110](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/110) [#112](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/112) [[GH-12633](https://github.com/hashicorp/vault/pull/12633)] +* auth/ldap: include support for an optional user filter field when searching for users [[GH-11000](https://github.com/hashicorp/vault/pull/11000)] +* auth/oidc: Adds the `skip_browser` CLI option to allow users to skip opening the default browser during the authentication flow. [[GH-12876](https://github.com/hashicorp/vault/pull/12876)] +* auth/okta: Send x-forwarded-for in Okta Push Factor request [[GH-12320](https://github.com/hashicorp/vault/pull/12320)] +* auth/token: Add `allowed_policies_glob` and `disallowed_policies_glob` fields to token roles to allow glob matching of policies [[GH-7277](https://github.com/hashicorp/vault/pull/7277)] +* cli: Operator diagnose now tests for missing or partial telemetry configurations. [[GH-12802](https://github.com/hashicorp/vault/pull/12802)] +* cli: add new http option : -header which enable sending arbitrary headers with the cli [[GH-12508](https://github.com/hashicorp/vault/pull/12508)] +* command: operator generate-root -decode: allow passing encoded token via stdin [[GH-12881](https://github.com/hashicorp/vault/pull/12881)] +* core/token: Return the token_no_default_policy config on token role read if set [[GH-12565](https://github.com/hashicorp/vault/pull/12565)] +* core: Add support for go-sockaddr templated addresses in config. [[GH-9109](https://github.com/hashicorp/vault/pull/9109)] +* core: adds custom_metadata field for aliases [[GH-12502](https://github.com/hashicorp/vault/pull/12502)] +* core: Update Oracle Cloud library to enable seal integration with the uk-gov-london-1 region [[GH-12724](https://github.com/hashicorp/vault/pull/12724)] +* core: Update github.com/ulikunitz/xz to fix security vulnerability GHSA-25xm-hr59-7c27. [[GH-12253](https://github.com/hashicorp/vault/pull/12253)] +* core: Upgrade github.com/gogo/protobuf [[GH-12255](https://github.com/hashicorp/vault/pull/12255)] +* core: build with Go 1.17, and mitigate a breaking change they made that could impact how approle and ssh interpret IPs/CIDRs [[GH-12868](https://github.com/hashicorp/vault/pull/12868)] +* core: observe the client counts broken down by namespace for partial month client count [[GH-12393](https://github.com/hashicorp/vault/pull/12393)] +* core: Artifact builds will now only run on merges to the release branches or to `main` +* core: The [dockerfile](https://github.com/hashicorp/vault/blob/main/Dockerfile) that is used to build the vault docker image available at [hashicorp/vault](https://hub.docker.com/repository/docker/hashicorp/vault) now lives in the root of this repo, and the entrypoint is available under [.release/docker/docker-entrypoint.sh](https://github.com/hashicorp/vault/blob/main/.release/docker/docker-entrypoint.sh) +* core: The vault linux packaging service configs and pre/post install scripts are now available under [.release/linux](https://github.com/hashicorp/vault/blob/main/.release/linux) +* core: Vault linux packages are now available for all supported linux architectures including arm, arm64, 386, and amd64 +* db/cassandra: make the connect_timeout config option actually apply to connection timeouts, in addition to non-connection operations [[GH-12903](https://github.com/hashicorp/vault/pull/12903)] +* identity/token: Only return keys from the `.well-known/keys` endpoint that are being used by roles to sign/verify tokens. [[GH-12780](https://github.com/hashicorp/vault/pull/12780)] +* identity: fix issue where Cache-Control header causes stampede of requests for JWKS keys [[GH-12414](https://github.com/hashicorp/vault/pull/12414)] +* physical/etcd: Upgrade etcd3 client to v3.5.0 and etcd2 to v2.305.0. [[GH-11980](https://github.com/hashicorp/vault/pull/11980)] +* pki: adds signature_bits field to customize signature algorithm on CAs and certs signed by Vault [[GH-11245](https://github.com/hashicorp/vault/pull/11245)] +* plugin: update the couchbase gocb version in the couchbase plugin [[GH-12483](https://github.com/hashicorp/vault/pull/12483)] +* replication (enterprise): Add merkle.flushDirty.num_pages_outstanding metric which specifies number of +outstanding dirty pages that were not flushed. [[GH-2093](https://github.com/hashicorp/vault/pull/2093)] +* sdk/framework: The '+' wildcard is now supported for parameterizing unauthenticated paths. [[GH-12668](https://github.com/hashicorp/vault/pull/12668)] +* secrets/aws: Add conditional template that allows custom usernames for both STS and IAM cases [[GH-12185](https://github.com/hashicorp/vault/pull/12185)] +* secrets/azure: Adds support for rotate-root. [#70](https://github.com/hashicorp/vault-plugin-secrets-azure/pull/70) [[GH-13034](https://github.com/hashicorp/vault/pull/13034)] +* secrets/azure: Adds support for using Microsoft Graph API since Azure Active Directory API is being removed in 2022. [#67](https://github.com/hashicorp/vault-plugin-secrets-azure/pull/67) [[GH-12629](https://github.com/hashicorp/vault/pull/12629)] +* secrets/database: Update MSSQL dependency github.com/denisenkom/go-mssqldb to v0.11.0 and include support for contained databases in MSSQL plugin [[GH-12839](https://github.com/hashicorp/vault/pull/12839)] +* secrets/pki: Allow signing of self-issued certs with a different signature algorithm. [[GH-12514](https://github.com/hashicorp/vault/pull/12514)] +* secrets/pki: Use entropy augmentation when available when generating root and intermediate CA key material. [[GH-12559](https://github.com/hashicorp/vault/pull/12559)] +* secrets/pki: select appropriate signature algorithm for ECDSA signature on certificates. [[GH-11216](https://github.com/hashicorp/vault/pull/11216)] +* secrets/pki: Support ed25519 as a key for the pki backend [[GH-11780](https://github.com/hashicorp/vault/pull/11780)] +* secrets/rabbitmq: Update dependency github.com/michaelklishin/rabbit-hole to v2 and resolve UserInfo.tags regression from RabbitMQ v3.9 [[GH-12877](https://github.com/hashicorp/vault/pull/12877)] +* secrets/ssh: Let allowed_users template mix templated and non-templated parts. [[GH-10886](https://github.com/hashicorp/vault/pull/10886)] +* secrets/ssh: Use entropy augmentation when available for generation of the signing key. [[GH-12560](https://github.com/hashicorp/vault/pull/12560)] +* serviceregistration: add `external-source: "vault"` metadata value for Consul registration. [[GH-12163](https://github.com/hashicorp/vault/pull/12163)] +* storage/raft: Best-effort handling of cancelled contexts. [[GH-12162](https://github.com/hashicorp/vault/pull/12162)] +* transform (enterprise): Add advanced features for encoding and decoding for Transform FPE +* transform (enterprise): Add a `reference` field to batch items, and propogate it to the response +* ui: Add KV secret search box when no metadata list access. [[GH-12626](https://github.com/hashicorp/vault/pull/12626)] +* ui: Add custom metadata to KV secret engine and metadata to config [[GH-12169](https://github.com/hashicorp/vault/pull/12169)] +* ui: Creates new StatText component [[GH-12295](https://github.com/hashicorp/vault/pull/12295)] +* ui: client count monthly view [[GH-12554](https://github.com/hashicorp/vault/pull/12554)] +* ui: creates bar chart component for displaying client count data by namespace [[GH-12437](https://github.com/hashicorp/vault/pull/12437)] +* ui: Add creation time to KV 2 version history and version view [[GH-12663](https://github.com/hashicorp/vault/pull/12663)] +* ui: Added resize for JSON editor [[GH-12906](https://github.com/hashicorp/vault/pull/12906)] [[GH-12906](https://github.com/hashicorp/vault/pull/12906)] +* ui: Adds warning about white space in KV secret engine. [[GH-12921](https://github.com/hashicorp/vault/pull/12921)] +* ui: Click to copy database static role last rotation value in tooltip [[GH-12890](https://github.com/hashicorp/vault/pull/12890)] +* ui: Filter DB connection attributes so only relevant attrs POST to backend [[GH-12770](https://github.com/hashicorp/vault/pull/12770)] +* ui: Removes empty rows from DB config views [[GH-12819](https://github.com/hashicorp/vault/pull/12819)] +* ui: Standardizes toolbar presentation of destructive actions [[GH-12895](https://github.com/hashicorp/vault/pull/12895)] +* ui: Updates font for table row value fields [[GH-12908](https://github.com/hashicorp/vault/pull/12908)] +* ui: namespace search in client count views [[GH-12577](https://github.com/hashicorp/vault/pull/12577)] +* ui: parse and display pki cert metadata [[GH-12541](https://github.com/hashicorp/vault/pull/12541)] +* ui: replaces Vault's use of elazarl/go-bindata-assetfs in building the UI with Go's native Embed package [[GH-11208](https://github.com/hashicorp/vault/pull/11208)] +* ui: updated client tracking config view [[GH-12422](https://github.com/hashicorp/vault/pull/12422)] + +DEPRECATIONS: + +* auth/kubernetes: deprecate `disable_iss_validation` and `issuer` configuration fields [#127](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/127) [[GH-12975](https://github.com/hashicorp/vault/pull/12975)] + +BUG FIXES: + +* activity log (enterprise): allow partial monthly client count to be accessed from namespaces [[GH-13086](https://github.com/hashicorp/vault/pull/13086)] +* agent: Avoid possible `unexpected fault address` panic when using persistent cache. [[GH-12534](https://github.com/hashicorp/vault/pull/12534)] +* api: Fixes storage APIs returning incorrect error when parsing responses [[GH-12338](https://github.com/hashicorp/vault/pull/12338)] +* auth/aws: Fix ec2 auth on instances that have a cert in their PKCS7 signature [[GH-12519](https://github.com/hashicorp/vault/pull/12519)] +* auth/aws: Fixes ec2 login no longer supporting DSA signature verification [[GH-12340](https://github.com/hashicorp/vault/pull/12340)] +* auth/aws: fix config/rotate-root to store new key [[GH-12715](https://github.com/hashicorp/vault/pull/12715)] +* auth/jwt: Fixes OIDC auth from the Vault UI when using `form_post` as the `oidc_response_mode`. [[GH-12265](https://github.com/hashicorp/vault/pull/12265)] +* cli/api: Providing consistency for the use of comma separated parameters in auth/secret enable/tune [[GH-12126](https://github.com/hashicorp/vault/pull/12126)] +* cli: fixes CLI requests when namespace is both provided as argument and part of the path [[GH-12720](https://github.com/hashicorp/vault/pull/12720)] +* cli: fixes CLI requests when namespace is both provided as argument and part of the path [[GH-12911](https://github.com/hashicorp/vault/pull/12911)] +* cli: vault debug now puts newlines after every captured log line. [[GH-12175](https://github.com/hashicorp/vault/pull/12175)] +* core (enterprise): Allow deletion of stored licenses on DR secondary nodes +* core (enterprise): Disallow autogenerated licenses to be used in diagnose even when config is specified +* core (enterprise): Fix bug where password generation through password policies do not work on namespaces if performed outside a request callback or from an external plugin. [[GH-12635](https://github.com/hashicorp/vault/pull/12635)] +* core (enterprise): Fix data race during perf standby sealing +* core (enterprise): Fixes reading raft auto-snapshot configuration from performance standby node [[GH-12317](https://github.com/hashicorp/vault/pull/12317)] +* core (enterprise): Only delete quotas on primary cluster. [[GH-12339](https://github.com/hashicorp/vault/pull/12339)] +* core (enterprise): namespace header included in responses, Go client uses it when displaying error messages [[GH-12196](https://github.com/hashicorp/vault/pull/12196)] +* core/api: Fix an arm64 bug converting a negative int to an unsigned int [[GH-12372](https://github.com/hashicorp/vault/pull/12372)] +* core/identity: Address a data race condition between local updates to aliases and invalidations [[GH-13093](https://github.com/hashicorp/vault/pull/13093)] +* core/identity: Cleanup alias in the in-memory entity after an alias deletion by ID [[GH-12834](https://github.com/hashicorp/vault/pull/12834)] +* core/identity: Disallow entity alias creation/update if a conflicting alias exists for the target entity and mount combination [[GH-12747](https://github.com/hashicorp/vault/pull/12747)] +* core: Fix a deadlock on HA leadership transfer [[GH-12691](https://github.com/hashicorp/vault/pull/12691)] +* core: Fix warnings logged on perf standbys re stored versions [[GH-13042](https://github.com/hashicorp/vault/pull/13042)] +* core: fix byte printing for diagnose disk checks [[GH-12229](https://github.com/hashicorp/vault/pull/12229)] +* core: revert some unintentionally downgraded dependencies from 1.9.0-rc1 [[GH-13168](https://github.com/hashicorp/vault/pull/13168)] +* database/couchbase: change default template to truncate username at 128 characters [[GH-12301](https://github.com/hashicorp/vault/pull/12301)] +* database/postgres: Update postgres library (github.com/lib/pq) to properly remove terminated TLS connections from the connection pool. [[GH-12413](https://github.com/hashicorp/vault/pull/12413)] +* http (enterprise): Always forward internal/counters endpoints from perf standbys to active node +* http: removed unpublished true from logical_system path, making openapi spec consistent with documentation [[GH-12713](https://github.com/hashicorp/vault/pull/12713)] +* identity/token: Adds missing call to unlock mutex in key deletion error handling [[GH-12916](https://github.com/hashicorp/vault/pull/12916)] +* identity: Fail alias rename if the resulting (name,accessor) exists already [[GH-12473](https://github.com/hashicorp/vault/pull/12473)] +* identity: Fix a panic on arm64 platform when doing identity I/O. [[GH-12371](https://github.com/hashicorp/vault/pull/12371)] +* identity: Fix regression preventing startup when aliases were created pre-1.9. [[GH-13169](https://github.com/hashicorp/vault/pull/13169)] +* identity: dedup from_entity_ids when merging two entities [[GH-10101](https://github.com/hashicorp/vault/pull/10101)] +* identity: disallow creation of role without a key parameter [[GH-12208](https://github.com/hashicorp/vault/pull/12208)] +* identity: do not allow a role's token_ttl to be longer than the signing key's verification_ttl [[GH-12151](https://github.com/hashicorp/vault/pull/12151)] +* identity: merge associated entity groups when merging entities [[GH-10085](https://github.com/hashicorp/vault/pull/10085)] +* identity: suppress duplicate policies on entities [[GH-12812](https://github.com/hashicorp/vault/pull/12812)] +* kmip (enterprise): Fix handling of custom attributes when servicing GetAttributes requests +* kmip (enterprise): Fix handling of invalid role parameters within various vault api calls +* kmip (enterprise): Forward KMIP register operations to the active node +* license: ignore stored terminated license while autoloading is enabled [[GH-2104](https://github.com/hashicorp/vault/pull/2104)] +* licensing (enterprise): Revert accidental inclusion of the TDE feature from the `prem` build. +* physical/raft: Fix safeio.Rename error when restoring snapshots on windows [[GH-12377](https://github.com/hashicorp/vault/pull/12377)] +* pki: Fix regression preventing email addresses being used as a common name within certificates [[GH-12716](https://github.com/hashicorp/vault/pull/12716)] +* plugin/couchbase: Fix an issue in which the locking patterns did not allow parallel requests. [[GH-13033](https://github.com/hashicorp/vault/pull/13033)] +* plugin/snowflake: Fixed bug where plugin would crash on 32 bit systems [[GH-12378](https://github.com/hashicorp/vault/pull/12378)] +* raft (enterprise): Fix panic when updating auto-snapshot config +* replication (enterprise): Fix issue where merkle.flushDirty.num_pages metric is not emitted if number +of dirty pages is 0. [[GH-2093](https://github.com/hashicorp/vault/pull/2093)] +* replication (enterprise): Fix merkle.saveCheckpoint.num_dirty metric to accurately specify the number +of dirty pages in the merkle tree at time of checkpoint creation. [[GH-2093](https://github.com/hashicorp/vault/pull/2093)] +* sdk/database: Fix a DeleteUser error message on the gRPC client. [[GH-12351](https://github.com/hashicorp/vault/pull/12351)] +* secrets/db: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. [[GH-12563](https://github.com/hashicorp/vault/pull/12563)] +* secrets/gcp: Fixes a potential panic in the service account policy rollback for rolesets. [[GH-12379](https://github.com/hashicorp/vault/pull/12379)] +* secrets/keymgmt (enterprise): Fix support for Azure Managed HSM Key Vault instances. [[GH-12934](https://github.com/hashicorp/vault/pull/12934)] +* secrets/openldap: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. [#28](https://github.com/hashicorp/vault-plugin-secrets-openldap/pull/28) [[GH-12600](https://github.com/hashicorp/vault/pull/12600)] +* secrets/transit: Enforce minimum cache size for transit backend and init cache size on transit backend without restart. [[GH-12418](https://github.com/hashicorp/vault/pull/12418)] +* storage/postgres: Update postgres library (github.com/lib/pq) to properly remove terminated TLS connections from the connection pool. [[GH-12413](https://github.com/hashicorp/vault/pull/12413)] +* storage/raft (enterprise): Ensure that raft autosnapshot backoff retry duration never hits 0s +* storage/raft: Detect incomplete raft snapshots in api.RaftSnapshot(), and thereby in `vault operator raft snapshot save`. [[GH-12388](https://github.com/hashicorp/vault/pull/12388)] +* storage/raft: Fix regression in 1.9.0-rc1 that changed how time is represented in Raft logs; this prevented using a raft db created pre-1.9. [[GH-13165](https://github.com/hashicorp/vault/pull/13165)] +* storage/raft: Support `addr_type=public_v6` in auto-join [[GH-12366](https://github.com/hashicorp/vault/pull/12366)] +* transform (enterprise): Enforce minimum cache size for Transform backend and reset cache size without a restart +* transform (enterprise): Fix an error where the decode response of an expired token is an empty result rather than an error. +* ui: Adds pagination to auth methods list view [[GH-13054](https://github.com/hashicorp/vault/pull/13054)] +* ui: Fix bug where capabilities check on secret-delete-menu was encoding the forward slashes. [[GH-12550](https://github.com/hashicorp/vault/pull/12550)] +* ui: Fix bug where edit role form on auth method is invalid by default [[GH-12646](https://github.com/hashicorp/vault/pull/12646)] +* ui: Fixed api explorer routing bug [[GH-12354](https://github.com/hashicorp/vault/pull/12354)] +* ui: Fixed text overflow in flash messages [[GH-12357](https://github.com/hashicorp/vault/pull/12357)] +* ui: Fixes issue with the number of PGP Key inputs not matching the key shares number in the initialization form on change [[GH-13038](https://github.com/hashicorp/vault/pull/13038)] +* ui: Fixes metrics page when read on counter config not allowed [[GH-12348](https://github.com/hashicorp/vault/pull/12348)] +* ui: Remove spinner after token renew [[GH-12887](https://github.com/hashicorp/vault/pull/12887)] +* ui: Removes ability to tune token_type for token auth methods [[GH-12904](https://github.com/hashicorp/vault/pull/12904)] +* ui: Show day of month instead of day of year in the expiration warning dialog [[GH-11984](https://github.com/hashicorp/vault/pull/11984)] +* ui: fix issue where on MaskedInput on auth methods if tab it would clear the value. [[GH-12409](https://github.com/hashicorp/vault/pull/12409)] +* ui: fix missing navbar items on login to namespace [[GH-12478](https://github.com/hashicorp/vault/pull/12478)] +* ui: update bar chart when model changes [[GH-12622](https://github.com/hashicorp/vault/pull/12622)] +* ui: updating database TTL picker help text. [[GH-12212](https://github.com/hashicorp/vault/pull/12212)] + +## 1.8.12 +### June 10, 2022 + +BUG FIXES: + +* agent: Redact auto auth token from renew endpoints [[GH-15380](https://github.com/hashicorp/vault/pull/15380)] +* core: Prevent changing file permissions of audit logs when mode 0000 is used. [[GH-15759](https://github.com/hashicorp/vault/pull/15759)] +* core: fixed systemd reloading notification [[GH-15041](https://github.com/hashicorp/vault/pull/15041)] +* core: pre-calculate namespace specific paths when tainting a route during postUnseal [[GH-15067](https://github.com/hashicorp/vault/pull/15067)] +* storage/raft (enterprise): Auto-snapshot configuration now forbids slashes in file prefixes for all types, and "/" in path prefix for local storage type. Strip leading prefix in path prefix for AWS. Improve error handling/reporting. +* transform (enterprise): Fix non-overridable column default value causing tokenization tokens to expire prematurely when using the MySQL storage backend. + +## 1.8.11 +### April 29, 2022 + +BUG FIXES: + +* raft: fix Raft TLS key rotation panic that occurs if active key is more than 24 hours old [[GH-15156](https://github.com/hashicorp/vault/pull/15156)] +* sdk: Fix OpenApi spec generator to properly convert TypeInt64 to OAS supported int64 [[GH-15104](https://github.com/hashicorp/vault/pull/15104)] + +## 1.8.10 +### April 22, 2022 + +CHANGES: + +* core: A request that fails path validation due to relative path check will now be responded to with a 400 rather than 500. [[GH-14328](https://github.com/hashicorp/vault/pull/14328)] +* core: Bump Go version to 1.16.15. [[GH-go-ver-1810](https://github.com/hashicorp/vault/pull/go-ver-1810)] + +IMPROVEMENTS: + +* auth/ldap: Add username_as_alias configurable to change how aliases are named [[GH-14324](https://github.com/hashicorp/vault/pull/14324)] +* core: Systemd unit file included with the Linux packages now sets the service type to notify. [[GH-14385](https://github.com/hashicorp/vault/pull/14385)] +* sentinel (enterprise): Upgrade sentinel to [v0.18.5](https://docs.hashicorp.com/sentinel/changelog#0-18-5-january-14-2022) to avoid potential naming collisions in the remote installer + +BUG FIXES: + +* api/sys/raft: Update RaftSnapshotRestore to use net/http client allowing bodies larger than allocated memory to be streamed [[GH-14269](https://github.com/hashicorp/vault/pull/14269)] +* auth/approle: Add maximum length for input values that result in SHA56 HMAC calculation [[GH-14746](https://github.com/hashicorp/vault/pull/14746)] +* cassandra: Update gocql Cassandra client to fix "no hosts available in the pool" error [[GH-14973](https://github.com/hashicorp/vault/pull/14973)] +* cli: Fix panic caused by parsing key=value fields whose value is a single backslash [[GH-14523](https://github.com/hashicorp/vault/pull/14523)] +* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited integers [[GH-15072](https://github.com/hashicorp/vault/pull/15072)] +* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited strings [[GH-14522](https://github.com/hashicorp/vault/pull/14522)] +* core: Fix panic caused by parsing policies with empty slice values. [[GH-14501](https://github.com/hashicorp/vault/pull/14501)] +* core: Fix panic for help request URL paths without /v1/ prefix [[GH-14704](https://github.com/hashicorp/vault/pull/14704)] +* core: fixing excessive unix file permissions [[GH-14791](https://github.com/hashicorp/vault/pull/14791)] +* core: fixing excessive unix file permissions on dir, files and archive created by vault debug command [[GH-14846](https://github.com/hashicorp/vault/pull/14846)] +* core: report unused or redundant keys in server configuration [[GH-14752](https://github.com/hashicorp/vault/pull/14752)] +* core: time.After() used in a select statement can lead to memory leak [[GH-14814](https://github.com/hashicorp/vault/pull/14814)] +* metrics/autosnapshots (enterprise) : Fix bug that could cause +vault.autosnapshots.save.errors to not be incremented when there is an +autosnapshot save error. +* ui: Fix Generated Token's Policies helpText to clarify that comma separated values are not excepted in this field. [[GH-15046](https://github.com/hashicorp/vault/pull/15046)] +* ui: Fixes edit auth method capabilities issue [[GH-14966](https://github.com/hashicorp/vault/pull/14966)] +* ui: Fixes issue logging out with wrapped token query parameter [[GH-14329](https://github.com/hashicorp/vault/pull/14329)] +* ui: Fixes issue with correct auth method not selected when logging out from OIDC or JWT methods [[GH-14545](https://github.com/hashicorp/vault/pull/14545)] +* ui: fix search-select component showing blank selections when editing group member entity [[GH-15058](https://github.com/hashicorp/vault/pull/15058)] +* ui: masked values no longer give away length or location of special characters [[GH-15025](https://github.com/hashicorp/vault/pull/15025)] + + +## 1.8.9 +### March 3, 2022 + +* secrets/pki: Vault and Vault Enterprise (“Vault”) allowed the PKI secrets engine under certain configurations to issue wildcard certificates to authorized users for a specified domain, even if the PKI role policy attribute allow_subdomains is set to false. This vulnerability, CVE-2022-25243, was fixed in Vault 1.8.9 and 1.9.4. +* transform (enterprise): Vault Enterprise (“Vault”) clusters using the tokenization transform feature can expose the tokenization key through the tokenization key configuration endpoint to authorized operators with read permissions on this endpoint. This vulnerability, CVE-2022-25244, was fixed in Vault Enterprise 1.7.10, 1.8.9, and 1.9.4. + +IMPROVEMENTS: + +* secrets/pki: Restrict issuance of wildcard certificates via role parameter (`allow_wildcard_certificates`) [[GH-14238](https://github.com/hashicorp/vault/pull/14238)] + +BUG FIXES: + +* auth/aws: Fix ec2 auth on instances that have a cert in their PKCS7 signature [[GH-12519](https://github.com/hashicorp/vault/pull/12519)] +* database/mssql: Removed string interpolation on internal queries and replaced them with inline queries using named parameters. [[GH-13799](https://github.com/hashicorp/vault/pull/13799)] +* secrets/openldap: Fix panic from nil logger in backend [[GH-14170](https://github.com/hashicorp/vault/pull/14170)] +* secrets/pki: Fix issuance of wildcard certificates matching glob patterns [[GH-14235](https://github.com/hashicorp/vault/pull/14235)] +* ui: Fix issue removing raft storage peer via cli not reflected in UI until refresh [[GH-13098](https://github.com/hashicorp/vault/pull/13098)] +* ui: Trigger background token self-renewal if inactive and half of TTL has passed [[GH-13950](https://github.com/hashicorp/vault/pull/13950)] + +## 1.8.8 +### January 27, 2022 + +IMPROVEMENTS: + +* core: Fixes code scanning alerts [[GH-13667](https://github.com/hashicorp/vault/pull/13667)] + +BUG FIXES: + +* auth/oidc: Fixes OIDC auth from the Vault UI when using the implicit flow and `form_post` response mode. [[GH-13494](https://github.com/hashicorp/vault/pull/13494)] +* core (enterprise): Workaround AWS CloudHSM v5 SDK issue not allowing read-only sessions +* kmip (enterprise): Fix locate by name operations fail to find key after a rekey operation. +* secrets/gcp: Fixes role bindings for BigQuery dataset resources. [[GH-13549](https://github.com/hashicorp/vault/pull/13549)] +* secrets/pki: Fix regression causing performance secondaries to forward certificate generation to the primary. [[GH-13759](https://github.com/hashicorp/vault/pull/13759)] +* secrets/pki: Fix regression causing performance secondaries to forward certificate generation to the primary. [[GH-2456](https://github.com/hashicorp/vault/pull/2456)] +* storage/raft: Fix issues allowing invalid nodes to become leadership candidates. [[GH-13703](https://github.com/hashicorp/vault/pull/13703)] +* storage/raft: On linux, use map_populate for bolt files to improve startup time. [[GH-13573](https://github.com/hashicorp/vault/pull/13573)] +* storage/raft: Units for bolt metrics now given in milliseconds instead of nanoseconds [[GH-13749](https://github.com/hashicorp/vault/pull/13749)] +* ui: Fixes breadcrumb bug for secrets navigation [[GH-13604](https://github.com/hashicorp/vault/pull/13604)] +* ui: Fixes issue saving KMIP role correctly [[GH-13585](https://github.com/hashicorp/vault/pull/13585)] + +## 1.8.7 +### December 21, 2021 + +CHANGES: + +* go: Update go version to 1.16.12 [[GH-13422](https://github.com/hashicorp/vault/pull/13422)] + +## 1.8.6 +### December 9, 2021 + +CHANGES: + +* go: Update go version to 1.16.9 [[GH-13029](https://github.com/hashicorp/vault/pull/13029)] + +SECURITY: + +* storage/raft: Integrated Storage backend could be caused to crash by an authenticated user with write permissions to the KV secrets engine. This vulnerability, CVE-2021-45042, was fixed in Vault 1.7.7, 1.8.6, and 1.9.1. + +BUG FIXES: + +* ha (enterprise): Prevents performance standby nodes from serving and caching stale data immediately after performance standby election completes +* storage/raft: Fix a panic when trying to store a key > 32KB in a transaction. [[GH-13286](https://github.com/hashicorp/vault/pull/13286)] +* storage/raft: Fix a panic when trying to write a key > 32KB [[GH-13282](https://github.com/hashicorp/vault/pull/13282)] +* ui: Adds pagination to auth methods list view [[GH-13054](https://github.com/hashicorp/vault/pull/13054)] +* ui: Do not show verify connection value on database connection config page [[GH-13152](https://github.com/hashicorp/vault/pull/13152)] +* ui: Fixes issue restoring raft storage snapshot [[GH-13107](https://github.com/hashicorp/vault/pull/13107)] +* ui: Fixes issue with OIDC auth workflow when using MetaMask Chrome extension [[GH-13133](https://github.com/hashicorp/vault/pull/13133)] +* ui: Fixes issue with the number of PGP Key inputs not matching the key shares number in the initialization form on change [[GH-13038](https://github.com/hashicorp/vault/pull/13038)] + +## 1.8.5 +### November 4, 2021 + +SECURITY: + +* core/identity: Templated ACL policies would always match the first-created entity alias if multiple entity aliases existed for a specified entity and mount combination, potentially resulting in incorrect policy enforcement. This vulnerability, CVE-2021-43998, was fixed in Vault and Vault Enterprise 1.7.6, 1.8.5, and 1.9.0. + +BUG FIXES: + +* auth/aws: fix config/rotate-root to store new key [[GH-12715](https://github.com/hashicorp/vault/pull/12715)] +* core/identity: Cleanup alias in the in-memory entity after an alias deletion by ID [[GH-12834](https://github.com/hashicorp/vault/pull/12834)] +* core/identity: Disallow entity alias creation/update if a conflicting alias exists for the target entity and mount combination [[GH-12747](https://github.com/hashicorp/vault/pull/12747)] +* http (enterprise): Always forward internal/counters endpoints from perf standbys to active node +* identity/token: Adds missing call to unlock mutex in key deletion error handling [[GH-12916](https://github.com/hashicorp/vault/pull/12916)] +* kmip (enterprise): Fix handling of custom attributes when servicing GetAttributes requests +* kmip (enterprise): Fix handling of invalid role parameters within various vault api calls +* kmip (enterprise): Forward KMIP register operations to the active node +* secrets/keymgmt (enterprise): Fix support for Azure Managed HSM Key Vault instances. [[GH-12952](https://github.com/hashicorp/vault/pull/12952)] +* transform (enterprise): Fix an error where the decode response of an expired token is an empty result rather than an error. + +## 1.8.4 +### 6 October 2021 + +SECURITY: + +* core/identity: A Vault user with write permission to an entity alias ID sharing a mount accessor with another user may acquire this other user’s policies by merging their identities. This vulnerability, CVE-2021-41802, was fixed in Vault and Vault Enterprise 1.7.5 and 1.8.4. + +IMPROVEMENTS: + +* core: Update Oracle Cloud library to enable seal integration with the uk-gov-london-1 region [[GH-12724](https://github.com/hashicorp/vault/pull/12724)] + +BUG FIXES: + +* core: Fix a deadlock on HA leadership transfer [[GH-12691](https://github.com/hashicorp/vault/pull/12691)] +* database/postgres: Update postgres library (github.com/lib/pq) to properly remove terminated TLS connections from the connection pool. [[GH-12413](https://github.com/hashicorp/vault/pull/12413)] +* pki: Fix regression preventing email addresses being used as a common name within certificates [[GH-12716](https://github.com/hashicorp/vault/pull/12716)] +* storage/postgres: Update postgres library (github.com/lib/pq) to properly remove terminated TLS connections from the connection pool. [[GH-12413](https://github.com/hashicorp/vault/pull/12413)] +* ui: Fix bug where edit role form on auth method is invalid by default [[GH-12646](https://github.com/hashicorp/vault/pull/12646)] + +## 1.8.3 +### 29 September 2021 + +IMPROVEMENTS: + +* secrets/pki: Allow signing of self-issued certs with a different signature algorithm. [[GH-12514](https://github.com/hashicorp/vault/pull/12514)] + +BUG FIXES: + +* agent: Avoid possible `unexpected fault address` panic when using persistent cache. [[GH-12534](https://github.com/hashicorp/vault/pull/12534)] +* core (enterprise): Allow deletion of stored licenses on DR secondary nodes +* core (enterprise): Fix bug where password generation through password policies do not work on namespaces if performed outside a request callback or from an external plugin. [[GH-12635](https://github.com/hashicorp/vault/pull/12635)] +* core (enterprise): Only delete quotas on primary cluster. [[GH-12339](https://github.com/hashicorp/vault/pull/12339)] +* identity: Fail alias rename if the resulting (name,accessor) exists already [[GH-12473](https://github.com/hashicorp/vault/pull/12473)] +* raft (enterprise): Fix panic when updating auto-snapshot config +* secrets/db: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. [[GH-12563](https://github.com/hashicorp/vault/pull/12563)] +* secrets/openldap: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. [#28](https://github.com/hashicorp/vault-plugin-secrets-openldap/pull/28) [[GH-12599](https://github.com/hashicorp/vault/pull/12599)] +* secrets/transit: Enforce minimum cache size for transit backend and init cache size on transit backend without restart. [[GH-12418](https://github.com/hashicorp/vault/pull/12418)] +* storage/raft: Detect incomplete raft snapshots in api.RaftSnapshot(), and thereby in `vault operator raft snapshot save`. [[GH-12388](https://github.com/hashicorp/vault/pull/12388)] +* ui: Fix bug where capabilities check on secret-delete-menu was encoding the forward slashes. [[GH-12550](https://github.com/hashicorp/vault/pull/12550)] +* ui: Show day of month instead of day of year in the expiration warning dialog [[GH-11984](https://github.com/hashicorp/vault/pull/11984)] + +## 1.8.2 +### 26 August 2021 + +CHANGES: + +* Alpine: Docker images for Vault 1.6.6+, 1.7.4+, and 1.8.2+ are built with Alpine 3.14, due to CVE-2021-36159 +* go: Update go version to 1.16.7 [[GH-12408](https://github.com/hashicorp/vault/pull/12408)] + +BUG FIXES: + +* auth/aws: Fixes ec2 login no longer supporting DSA signature verification [[GH-12340](https://github.com/hashicorp/vault/pull/12340)] +* cli: vault debug now puts newlines after every captured log line. [[GH-12175](https://github.com/hashicorp/vault/pull/12175)] +* database/couchbase: change default template to truncate username at 128 characters [[GH-12300](https://github.com/hashicorp/vault/pull/12300)] +* identity: Fix a panic on arm64 platform when doing identity I/O. [[GH-12371](https://github.com/hashicorp/vault/pull/12371)] +* physical/raft: Fix safeio.Rename error when restoring snapshots on windows [[GH-12377](https://github.com/hashicorp/vault/pull/12377)] +* plugin/snowflake: Fixed bug where plugin would crash on 32 bit systems [[GH-12378](https://github.com/hashicorp/vault/pull/12378)] +* sdk/database: Fix a DeleteUser error message on the gRPC client. [[GH-12351](https://github.com/hashicorp/vault/pull/12351)] +* secrets/gcp: Fixes a potential panic in the service account policy rollback for rolesets. [[GH-12379](https://github.com/hashicorp/vault/pull/12379)] +* ui: Fixed api explorer routing bug [[GH-12354](https://github.com/hashicorp/vault/pull/12354)] +* ui: Fixes metrics page when read on counter config not allowed [[GH-12348](https://github.com/hashicorp/vault/pull/12348)] +* ui: fix issue where on MaskedInput on auth methods if tab it would clear the value. [[GH-12409](https://github.com/hashicorp/vault/pull/12409)] + +## 1.8.1 +### August 5th, 2021 + +CHANGES: + +* go: Update go version to 1.16.6 [[GH-12245](https://github.com/hashicorp/vault/pull/12245)] + +IMPROVEMENTS: + +* serviceregistration: add `external-source: "vault"` metadata value for Consul registration. [[GH-12163](https://github.com/hashicorp/vault/pull/12163)] + +BUG FIXES: + +* auth/aws: Remove warning stating AWS Token TTL will be capped by the Default Lease TTL. [[GH-12026](https://github.com/hashicorp/vault/pull/12026)] +* auth/jwt: Fixes OIDC auth from the Vault UI when using `form_post` as the `oidc_response_mode`. [[GH-12258](https://github.com/hashicorp/vault/pull/12258)] +* core (enterprise): Disallow autogenerated licenses to be used in diagnose even when config is specified +* core: fix byte printing for diagnose disk checks [[GH-12229](https://github.com/hashicorp/vault/pull/12229)] +* identity: do not allow a role's token_ttl to be longer than the signing key's verification_ttl [[GH-12151](https://github.com/hashicorp/vault/pull/12151)] + +## 1.8.0 +### July 28th, 2021 + +CHANGES: + +* agent: Errors in the template engine will no longer cause agent to exit unless +explicitly defined to do so. A new configuration parameter, +`exit_on_retry_failure`, within the new top-level stanza, `template_config`, can +be set to `true` in order to cause agent to exit. Note that for agent to exit if +`template.error_on_missing_key` is set to `true`, `exit_on_retry_failure` must +be also set to `true`. Otherwise, the template engine will log an error but then +restart its internal runner. [[GH-11775](https://github.com/hashicorp/vault/pull/11775)] +* agent: Update to use IAM Service Account Credentials endpoint for signing JWTs +when using GCP Auto-Auth method [[GH-11473](https://github.com/hashicorp/vault/pull/11473)] +* core (enterprise): License/EULA changes that ensure the presence of a valid HashiCorp license to +start Vault. More information is available in the [Vault License FAQ](https://www.vaultproject.io/docs/enterprise/license/faqs) + +FEATURES: + +* **GCP Secrets Engine Static Accounts**: Adds ability to use existing service accounts for generation + of service account keys and access tokens. [[GH-12023](https://github.com/hashicorp/vault/pull/12023)] +* **Key Management Secrets Engine (Enterprise)**: Adds general availability for distributing and managing keys in AWS KMS. [[GH-11958](https://github.com/hashicorp/vault/pull/11958)] +* **License Autoloading (Enterprise)**: Licenses may now be automatically loaded from the environment or disk. +* **MySQL Database UI**: The UI now supports adding and editing MySQL connections in the database secret engine [[GH-11532](https://github.com/hashicorp/vault/pull/11532)] +* **Vault Diagnose**: A new `vault operator` command to detect common issues with vault server setups. + +SECURITY: + +* storage/raft: When initializing Vault’s Integrated Storage backend, excessively broad filesystem permissions may be set for the underlying Bolt database used by Vault’s Raft implementation. This vulnerability, CVE-2021-38553, was fixed in Vault 1.8.0. +* ui: The Vault UI erroneously cached and exposed user-viewed secrets between authenticated sessions in a single shared browser, if the browser window / tab was not refreshed or closed between logout and a subsequent login. This vulnerability, CVE-2021-38554, was fixed in Vault 1.8.0 and will be addressed in pending 1.7.4 / 1.6.6 releases. + +IMPROVEMENTS: + +* agent/template: Added static_secret_render_interval to specify how often to fetch non-leased secrets [[GH-11934](https://github.com/hashicorp/vault/pull/11934)] +* agent: Allow Agent auto auth to read symlinked JWT files [[GH-11502](https://github.com/hashicorp/vault/pull/11502)] +* api: Allow a leveled logger to be provided to `api.Client` through `SetLogger`. [[GH-11696](https://github.com/hashicorp/vault/pull/11696)] +* auth/aws: Underlying error included in validation failure message. [[GH-11638](https://github.com/hashicorp/vault/pull/11638)] +* cli/api: Add lease lookup command [[GH-11129](https://github.com/hashicorp/vault/pull/11129)] +* core: Add `prefix_filter` to telemetry config [[GH-12025](https://github.com/hashicorp/vault/pull/12025)] +* core: Add a darwin/arm64 binary release supporting the Apple M1 CPU [[GH-12071](https://github.com/hashicorp/vault/pull/12071)] +* core: Add a small (<1s) exponential backoff to failed TCP listener Accept failures. [[GH-11588](https://github.com/hashicorp/vault/pull/11588)] +* core (enterprise): Add controlled capabilities to control group policy stanza +* core: Add metrics for standby node forwarding. [[GH-11366](https://github.com/hashicorp/vault/pull/11366)] +* core: Add metrics to report if a node is a perf standby, if a node is a dr secondary or primary, and if a node is a perf secondary or primary. [[GH-11472](https://github.com/hashicorp/vault/pull/11472)] +* core: Send notifications to systemd on start, stop, and configuration reload. [[GH-11517](https://github.com/hashicorp/vault/pull/11517)] +* core: add irrevocable lease list and count apis [[GH-11607](https://github.com/hashicorp/vault/pull/11607)] +* core: allow arbitrary length stack traces upon receiving SIGUSR2 (was 32MB) [[GH-11364](https://github.com/hashicorp/vault/pull/11364)] +* core: Improve renew/revoke performance using per-lease locks [[GH-11122](https://github.com/hashicorp/vault/pull/11122)] +* db/cassandra: Added tls_server_name to specify server name for TLS validation [[GH-11820](https://github.com/hashicorp/vault/pull/11820)] +* go: Update to Go 1.16.5 [[GH-11802](https://github.com/hashicorp/vault/pull/11802)] +* replication: Delay evaluation of X-Vault-Index headers until merkle sync completes. +* secrets/rabbitmq: Add ability to customize dynamic usernames [[GH-11899](https://github.com/hashicorp/vault/pull/11899)] +* secrets/ad: Add `rotate-role` endpoint to allow rotations of service accounts. [[GH-11942](https://github.com/hashicorp/vault/pull/11942)] +* secrets/aws: add IAM tagging support for iam_user roles [[GH-10953](https://github.com/hashicorp/vault/pull/10953)] +* secrets/aws: add ability to provide a role session name when generating STS credentials [[GH-11345](https://github.com/hashicorp/vault/pull/11345)] +* secrets/database/elasticsearch: Add ability to customize dynamic usernames [[GH-11957](https://github.com/hashicorp/vault/pull/11957)] +* secrets/database/influxdb: Add ability to customize dynamic usernames [[GH-11796](https://github.com/hashicorp/vault/pull/11796)] +* secrets/database/mongodb: Add ability to customize `SocketTimeout`, `ConnectTimeout`, and `ServerSelectionTimeout` [[GH-11600](https://github.com/hashicorp/vault/pull/11600)] +* secrets/database/mongodb: Increased throughput by allowing for multiple request threads to simultaneously update users in MongoDB [[GH-11600](https://github.com/hashicorp/vault/pull/11600)] +* secrets/database/mongodbatlas: Adds the ability to customize username generation for dynamic users in MongoDB Atlas. [[GH-11956](https://github.com/hashicorp/vault/pull/11956)] +* secrets/database/redshift: Add ability to customize dynamic usernames [[GH-12016](https://github.com/hashicorp/vault/pull/12016)] +* secrets/database/snowflake: Add ability to customize dynamic usernames [[GH-11997](https://github.com/hashicorp/vault/pull/11997)] +* ssh: add support for templated values in SSH CA DefaultExtensions [[GH-11495](https://github.com/hashicorp/vault/pull/11495)] +* storage/raft: Improve raft batch size selection [[GH-11907](https://github.com/hashicorp/vault/pull/11907)] +* storage/raft: change freelist type to map and set nofreelistsync to true [[GH-11895](https://github.com/hashicorp/vault/pull/11895)] +* storage/raft: Switch to shared raft-boltdb library and add boltdb metrics [[GH-11269](https://github.com/hashicorp/vault/pull/11269)] +* storage/raft: Support autopilot for HA only raft storage. [[GH-11260](https://github.com/hashicorp/vault/pull/11260)] +* storage/raft (enterprise): Enable Autopilot on DR secondary clusters +* ui: Add Validation to KV secret engine [[GH-11785](https://github.com/hashicorp/vault/pull/11785)] +* ui: Add database secret engine support for MSSQL [[GH-11231](https://github.com/hashicorp/vault/pull/11231)] +* ui: Add push notification message when selecting okta auth. [[GH-11442](https://github.com/hashicorp/vault/pull/11442)] +* ui: Add regex validation to Transform Template pattern input [[GH-11586](https://github.com/hashicorp/vault/pull/11586)] +* ui: Add specific error message if unseal fails due to license [[GH-11705](https://github.com/hashicorp/vault/pull/11705)] +* ui: Add validation support for open api form fields [[GH-11963](https://github.com/hashicorp/vault/pull/11963)] +* ui: Added auth method descriptions to UI login page [[GH-11795](https://github.com/hashicorp/vault/pull/11795)] +* ui: JSON fields on database can be cleared on edit [[GH-11708](https://github.com/hashicorp/vault/pull/11708)] +* ui: Obscure secret values on input and displayOnly fields like certificates. [[GH-11284](https://github.com/hashicorp/vault/pull/11284)] +* ui: Redesign of KV 2 Delete toolbar. [[GH-11530](https://github.com/hashicorp/vault/pull/11530)] +* ui: Replace tool partials with components. [[GH-11672](https://github.com/hashicorp/vault/pull/11672)] +* ui: Show description on secret engine list [[GH-11995](https://github.com/hashicorp/vault/pull/11995)] +* ui: Update ember to latest LTS and upgrade UI dependencies [[GH-11447](https://github.com/hashicorp/vault/pull/11447)] +* ui: Update partials to components [[GH-11680](https://github.com/hashicorp/vault/pull/11680)] +* ui: Updated ivy code mirror component for consistency [[GH-11500](https://github.com/hashicorp/vault/pull/11500)] +* ui: Updated node to v14, latest stable build [[GH-12049](https://github.com/hashicorp/vault/pull/12049)] +* ui: Updated search select component styling [[GH-11360](https://github.com/hashicorp/vault/pull/11360)] +* ui: add transform secrets engine to features list [[GH-12003](https://github.com/hashicorp/vault/pull/12003)] +* ui: add validations for duplicate path kv engine [[GH-11878](https://github.com/hashicorp/vault/pull/11878)] +* ui: show site-wide banners for license warnings if applicable [[GH-11759](https://github.com/hashicorp/vault/pull/11759)] +* ui: update license page with relevant autoload info [[GH-11778](https://github.com/hashicorp/vault/pull/11778)] + +DEPRECATIONS: + +* secrets/gcp: Deprecated the `/gcp/token/:roleset` and `/gcp/key/:roleset` paths for generating + secrets for rolesets. Use `/gcp/roleset/:roleset/token` and `/gcp/roleset/:roleset/key` instead. [[GH-12023](https://github.com/hashicorp/vault/pull/12023)] + +BUG FIXES: + +* activity: Omit wrapping tokens and control groups from client counts [[GH-11826](https://github.com/hashicorp/vault/pull/11826)] +* agent/cert: Fix issue where the API client on agent was not honoring certificate + information from the auto-auth config map on renewals or retries. [[GH-11576](https://github.com/hashicorp/vault/pull/11576)] +* agent/template: fix command shell quoting issue [[GH-11838](https://github.com/hashicorp/vault/pull/11838)] +* agent: Fixed agent templating to use configured tls servername values [[GH-11288](https://github.com/hashicorp/vault/pull/11288)] +* agent: fix timestamp format in log messages from the templating engine [[GH-11838](https://github.com/hashicorp/vault/pull/11838)] +* auth/approle: fixing dereference of nil pointer [[GH-11864](https://github.com/hashicorp/vault/pull/11864)] +* auth/jwt: Updates the [hashicorp/cap](https://github.com/hashicorp/cap) library to `v0.1.0` to + bring in a verification key caching fix. [[GH-11784](https://github.com/hashicorp/vault/pull/11784)] +* auth/kubernetes: Fix AliasLookahead to correctly extract ServiceAccount UID when using ephemeral JWTs [[GH-12073](https://github.com/hashicorp/vault/pull/12073)] +* auth/ldap: Fix a bug where the LDAP auth method does not return the request_timeout configuration parameter on config read. [[GH-11975](https://github.com/hashicorp/vault/pull/11975)] +* cli: Add support for response wrapping in `vault list` and `vault kv list` with output format other than `table`. [[GH-12031](https://github.com/hashicorp/vault/pull/12031)] +* cli: vault delete and vault kv delete should support the same output options (e.g. -format) as vault write. [[GH-11992](https://github.com/hashicorp/vault/pull/11992)] +* core (enterprise): Fix orphan return value from auth methods executed on performance standby nodes. +* core (enterprise): Fix plugins mounted in namespaces being unable to use password policies [[GH-11596](https://github.com/hashicorp/vault/pull/11596)] +* core (enterprise): serialize access to HSM entropy generation to avoid errors in concurrent key generation. +* core/metrics: Add generic KV mount support for vault.kv.secret.count telemetry metric [[GH-12020](https://github.com/hashicorp/vault/pull/12020)] +* core: Fix cleanup of storage entries from cubbyholes within namespaces. [[GH-11408](https://github.com/hashicorp/vault/pull/11408)] +* core: Fix edge cases in the configuration endpoint for barrier key autorotation. [[GH-11541](https://github.com/hashicorp/vault/pull/11541)] +* core: Fix goroutine leak when updating rate limit quota [[GH-11371](https://github.com/hashicorp/vault/pull/11371)] +* core (enterprise): Fix panic on DR secondary when there are lease count quotas [[GH-11742](https://github.com/hashicorp/vault/pull/11742)] +* core: Fix race that allowed remounting on path used by another mount [[GH-11453](https://github.com/hashicorp/vault/pull/11453)] +* core: Fix storage entry leak when revoking leases created with non-orphan batch tokens. [[GH-11377](https://github.com/hashicorp/vault/pull/11377)] +* core: Fixed double counting of http requests after operator stepdown [[GH-11970](https://github.com/hashicorp/vault/pull/11970)] +* core: correct logic for renewal of leases nearing their expiration time. [[GH-11650](https://github.com/hashicorp/vault/pull/11650)] +* identity: Use correct mount accessor when refreshing external group memberships. [[GH-11506](https://github.com/hashicorp/vault/pull/11506)] +* mongo-db: default username template now strips invalid '.' characters [[GH-11872](https://github.com/hashicorp/vault/pull/11872)] +* pki: Only remove revoked entry for certificates during tidy if they are past their NotAfter value [[GH-11367](https://github.com/hashicorp/vault/pull/11367)] +* replication: Fix panic trying to update walState during identity group invalidation. +* replication: Fix: mounts created within a namespace that was part of an Allow + filtering rule would not appear on performance secondary if created after rule + was defined. +* secret/pki: use case insensitive domain name comparison as per RFC1035 section 2.3.3 +* secret: fix the bug where transit encrypt batch doesn't work with key_version [[GH-11628](https://github.com/hashicorp/vault/pull/11628)] +* secrets/ad: Forward all creds requests to active node [[GH-76](https://github.com/hashicorp/vault-plugin-secrets-ad/pull/76)] [[GH-11836](https://github.com/hashicorp/vault/pull/11836)] +* secrets/database/cassandra: Fixed issue where hostnames were not being validated when using TLS [[GH-11365](https://github.com/hashicorp/vault/pull/11365)] +* secrets/database/cassandra: Fixed issue where the PEM parsing logic of `pem_bundle` and `pem_json` didn't work for CA-only configurations [[GH-11861](https://github.com/hashicorp/vault/pull/11861)] +* secrets/database/cassandra: Updated default statement for password rotation to allow for special characters. This applies to root and static credentials. [[GH-11262](https://github.com/hashicorp/vault/pull/11262)] +* secrets/database: Fix marshalling to allow providing numeric arguments to external database plugins. [[GH-11451](https://github.com/hashicorp/vault/pull/11451)] +* secrets/database: Fixed an issue that prevented external database plugin processes from restarting after a shutdown. [[GH-12087](https://github.com/hashicorp/vault/pull/12087)] +* secrets/database: Fixed minor race condition when rotate-root is called [[GH-11600](https://github.com/hashicorp/vault/pull/11600)] +* secrets/database: Fixes issue for V4 database interface where `SetCredentials` wasn't falling back to using `RotateRootCredentials` if `SetCredentials` is `Unimplemented` [[GH-11585](https://github.com/hashicorp/vault/pull/11585)] +* secrets/openldap: Fix bug where schema was not compatible with rotate-root [#24](https://github.com/hashicorp/vault-plugin-secrets-openldap/pull/24) [[GH-12019](https://github.com/hashicorp/vault/pull/12019)] +* storage/dynamodb: Handle throttled batch write requests by retrying, without which writes could be lost. [[GH-10181](https://github.com/hashicorp/vault/pull/10181)] +* storage/raft: Support cluster address change for nodes in a cluster managed by autopilot [[GH-11247](https://github.com/hashicorp/vault/pull/11247)] +* storage/raft: Tweak creation of vault.db file [[GH-12034](https://github.com/hashicorp/vault/pull/12034)] +* storage/raft: leader_tls_servername wasn't used unless leader_ca_cert_file and/or mTLS were configured. [[GH-11252](https://github.com/hashicorp/vault/pull/11252)] +* tokenutil: Perform the num uses check before token type. [[GH-11647](https://github.com/hashicorp/vault/pull/11647)] +* transform (enterprise): Fix an issue with malformed transform configuration + storage when upgrading from 1.5 to 1.6. See Upgrade Notes for 1.6.x. +* ui: Add role from database connection automatically populates the database for new role [[GH-11119](https://github.com/hashicorp/vault/pull/11119)] +* ui: Add root rotation statements support to appropriate database secret engine plugins [[GH-11404](https://github.com/hashicorp/vault/pull/11404)] +* ui: Automatically refresh the page when user logs out [[GH-12035](https://github.com/hashicorp/vault/pull/12035)] +* ui: Fix Version History queryParams on LinkedBlock [[GH-12079](https://github.com/hashicorp/vault/pull/12079)] +* ui: Fix bug where database secret engines with custom names cannot delete connections [[GH-11127](https://github.com/hashicorp/vault/pull/11127)] +* ui: Fix bug where the UI does not recognize version 2 KV until refresh, and fix [object Object] error message [[GH-11258](https://github.com/hashicorp/vault/pull/11258)] +* ui: Fix database role CG access [[GH-12111](https://github.com/hashicorp/vault/pull/12111)] +* ui: Fix date display on expired token notice [[GH-11142](https://github.com/hashicorp/vault/pull/11142)] +* ui: Fix entity group membership and metadata not showing [[GH-11641](https://github.com/hashicorp/vault/pull/11641)] +* ui: Fix error message caused by control group [[GH-11143](https://github.com/hashicorp/vault/pull/11143)] +* ui: Fix footer URL linking to the correct version changelog. [[GH-11283](https://github.com/hashicorp/vault/pull/11283)] +* ui: Fix issue where logging in without namespace input causes error [[GH-11094](https://github.com/hashicorp/vault/pull/11094)] +* ui: Fix namespace-bug on login [[GH-11182](https://github.com/hashicorp/vault/pull/11182)] +* ui: Fix status menu no showing on login [[GH-11213](https://github.com/hashicorp/vault/pull/11213)] +* ui: Fix text link URL on database roles list [[GH-11597](https://github.com/hashicorp/vault/pull/11597)] +* ui: Fixed and updated lease renewal picker [[GH-11256](https://github.com/hashicorp/vault/pull/11256)] +* ui: fix control group access for database credential [[GH-12024](https://github.com/hashicorp/vault/pull/12024)] +* ui: fix issue where select-one option was not showing in secrets database role creation [[GH-11294](https://github.com/hashicorp/vault/pull/11294)] +* ui: fix oidc login with Safari [[GH-11884](https://github.com/hashicorp/vault/pull/11884)] + +## 1.7.10 +### March 3, 2022 + +SECURITY: + +* transform (enterprise): Vault Enterprise (“Vault”) clusters using the tokenization transform feature can expose the tokenization key through the tokenization key configuration endpoint to authorized operators with read permissions on this endpoint. This vulnerability, CVE-2022-25244, was fixed in Vault Enterprise 1.7.10, 1.8.9, and 1.9.4. + +BUG FIXES: + +* database/mssql: Removed string interpolation on internal queries and replaced them with inline queries using named parameters. [[GH-13799](https://github.com/hashicorp/vault/pull/13799)] +* ui: Fix issue removing raft storage peer via cli not reflected in UI until refresh [[GH-13098](https://github.com/hashicorp/vault/pull/13098)] +* ui: Trigger background token self-renewal if inactive and half of TTL has passed [[GH-13950](https://github.com/hashicorp/vault/pull/13950)] + +## 1.7.9 +### January 27, 2022 + +IMPROVEMENTS: + +* core: Fixes code scanning alerts [[GH-13667](https://github.com/hashicorp/vault/pull/13667)] + +BUG FIXES: + +* auth/oidc: Fixes OIDC auth from the Vault UI when using the implicit flow and `form_post` response mode. [[GH-13493](https://github.com/hashicorp/vault/pull/13493)] +* secrets/gcp: Fixes role bindings for BigQuery dataset resources. [[GH-13735](https://github.com/hashicorp/vault/pull/13735)] +* ui: Fixes breadcrumb bug for secrets navigation [[GH-13604](https://github.com/hashicorp/vault/pull/13604)] +* ui: Fixes issue saving KMIP role correctly [[GH-13585](https://github.com/hashicorp/vault/pull/13585)] + +## 1.7.8 +### December 21, 2021 + +CHANGES: + +* go: Update go version to 1.16.12 [[GH-13422](https://github.com/hashicorp/vault/pull/13422)] + +BUG FIXES: + +* auth/aws: Fixes ec2 login no longer supporting DSA signature verification [[GH-12340](https://github.com/hashicorp/vault/pull/12340)] +* identity: Fix a panic on arm64 platform when doing identity I/O. [[GH-12371](https://github.com/hashicorp/vault/pull/12371)] + +## 1.7.7 +### December 9, 2021 + +SECURITY: + +* storage/raft: Integrated Storage backend could be caused to crash by an authenticated user with write permissions to the KV secrets engine. This vulnerability, CVE-2021-45042, was fixed in Vault 1.7.7, 1.8.6, and 1.9.1. + +BUG FIXES: + +* ha (enterprise): Prevents performance standby nodes from serving and caching stale data immediately after performance standby election completes +* storage/raft: Fix a panic when trying to store a key > 32KB in a transaction. [[GH-13286](https://github.com/hashicorp/vault/pull/13286)] +* storage/raft: Fix a panic when trying to write a key > 32KB [[GH-13282](https://github.com/hashicorp/vault/pull/13282)] +* ui: Fixes issue restoring raft storage snapshot [[GH-13107](https://github.com/hashicorp/vault/pull/13107)] +* ui: Fixes issue with OIDC auth workflow when using MetaMask Chrome extension [[GH-13133](https://github.com/hashicorp/vault/pull/13133)] +* ui: Fixes issue with the number of PGP Key inputs not matching the key shares number in the initialization form on change [[GH-13038](https://github.com/hashicorp/vault/pull/13038)] + +## 1.7.6 +### November 4, 2021 + +SECURITY: + +* core/identity: Templated ACL policies would always match the first-created entity alias if multiple entity aliases existed for a specified entity and mount combination, potentially resulting in incorrect policy enforcement. This vulnerability, CVE-2021-43998, was fixed in Vault and Vault Enterprise 1.7.6, 1.8.5, and 1.9.0. + +BUG FIXES: + +* auth/aws: fix config/rotate-root to store new key [[GH-12715](https://github.com/hashicorp/vault/pull/12715)] +* core/identity: Cleanup alias in the in-memory entity after an alias deletion by ID [[GH-12834](https://github.com/hashicorp/vault/pull/12834)] +* core/identity: Disallow entity alias creation/update if a conflicting alias exists for the target entity and mount combination [[GH-12747](https://github.com/hashicorp/vault/pull/12747)] +* core: Fix a deadlock on HA leadership transfer [[GH-12691](https://github.com/hashicorp/vault/pull/12691)] +* http (enterprise): Always forward internal/counters endpoints from perf standbys to active node +* kmip (enterprise): Fix handling of custom attributes when servicing GetAttributes requests +* kmip (enterprise): Fix handling of invalid role parameters within various vault api calls +* kmip (enterprise): Forward KMIP register operations to the active node +* secrets/keymgmt (enterprise): Fix support for Azure Managed HSM Key Vault instances. [[GH-12957](https://github.com/hashicorp/vault/pull/12957)] +* storage/postgres: Update postgres library (github.com/lib/pq) to properly remove terminated TLS connections from the connection pool. [[GH-12413](https://github.com/hashicorp/vault/pull/12413)] +* database/postgres: Update postgres library (github.com/lib/pq) to properly remove terminated TLS connections from the connection pool. [[GH-12413](https://github.com/hashicorp/vault/pull/12413)] +* transform (enterprise): Fix an error where the decode response of an expired token is an empty result rather than an error. + +## 1.7.5 +### 29 September 2021 + +SECURITY: + +* core/identity: A Vault user with write permission to an entity alias ID sharing a mount accessor with another user may acquire this other user’s policies by merging their identities. This vulnerability, CVE-2021-41802, was fixed in Vault and Vault Enterprise 1.7.5 and 1.8.4. + +IMPROVEMENTS: + +* secrets/pki: Allow signing of self-issued certs with a different signature algorithm. [[GH-12514](https://github.com/hashicorp/vault/pull/12514)] + +BUG FIXES: + +* agent: Avoid possible `unexpected fault address` panic when using persistent cache. [[GH-12534](https://github.com/hashicorp/vault/pull/12534)] +* core (enterprise): Fix bug where password generation through password policies do not work on namespaces if performed outside a request callback or from an external plugin. [[GH-12635](https://github.com/hashicorp/vault/pull/12635)] +* core (enterprise): Only delete quotas on primary cluster. [[GH-12339](https://github.com/hashicorp/vault/pull/12339)] +* identity: Fail alias rename if the resulting (name,accessor) exists already [[GH-12473](https://github.com/hashicorp/vault/pull/12473)] +* raft (enterprise): Fix panic when updating auto-snapshot config +* secrets/db: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. [[GH-12563](https://github.com/hashicorp/vault/pull/12563)] +* secrets/openldap: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. [#28](https://github.com/hashicorp/vault-plugin-secrets-openldap/pull/28) [[GH-12598](https://github.com/hashicorp/vault/pull/12598)] +* storage/raft: Detect incomplete raft snapshots in api.RaftSnapshot(), and thereby in `vault operator raft snapshot save`. [[GH-12388](https://github.com/hashicorp/vault/pull/12388)] +* ui: Fixed api explorer routing bug [[GH-12354](https://github.com/hashicorp/vault/pull/12354)] + +## 1.7.4 +### 26 August 2021 + +SECURITY: + +* *UI Secret Caching*: The Vault UI erroneously cached and exposed user-viewed secrets between authenticated sessions in a single shared browser, if the browser window / tab was not refreshed or closed between logout and a subsequent login. This vulnerability, CVE-2021-38554, was fixed in Vault 1.8.0 and will be addressed in pending 1.7.4 / 1.6.6 releases. + +CHANGES: + +* Alpine: Docker images for Vault 1.6.6+, 1.7.4+, and 1.8.2+ are built with Alpine 3.14, due to CVE-2021-36159 +* go: Update go version to 1.15.15 [[GH-12411](https://github.com/hashicorp/vault/pull/12411)] + +IMPROVEMENTS: + +* ui: Updated node to v14, latest stable build [[GH-12049](https://github.com/hashicorp/vault/pull/12049)] + +BUG FIXES: + +* replication (enterprise): Fix a panic that could occur when checking the last wal and the log shipper buffer is empty. +* cli: vault debug now puts newlines after every captured log line. [[GH-12175](https://github.com/hashicorp/vault/pull/12175)] +* database/couchbase: change default template to truncate username at 128 characters [[GH-12299](https://github.com/hashicorp/vault/pull/12299)] +* physical/raft: Fix safeio.Rename error when restoring snapshots on windows [[GH-12377](https://github.com/hashicorp/vault/pull/12377)] +* secrets/database/cassandra: Fixed issue where the PEM parsing logic of `pem_bundle` and `pem_json` didn't work for CA-only configurations [[GH-11861](https://github.com/hashicorp/vault/pull/11861)] +* secrets/database: Fixed an issue that prevented external database plugin processes from restarting after a shutdown. [[GH-12087](https://github.com/hashicorp/vault/pull/12087)] +* ui: Automatically refresh the page when user logs out [[GH-12035](https://github.com/hashicorp/vault/pull/12035)] +* ui: Fix database role CG access [[GH-12111](https://github.com/hashicorp/vault/pull/12111)] +* ui: Fixes metrics page when read on counter config not allowed [[GH-12348](https://github.com/hashicorp/vault/pull/12348)] +* ui: fix control group access for database credential [[GH-12024](https://github.com/hashicorp/vault/pull/12024)] +* ui: fix oidc login with Safari [[GH-11884](https://github.com/hashicorp/vault/pull/11884)] + +## 1.7.3 +### June 16th, 2021 + +CHANGES: + +* go: Update go version to 1.15.13 [[GH-11857](https://github.com/hashicorp/vault/pull/11857)] + +IMPROVEMENTS: + +* db/cassandra: Added tls_server_name to specify server name for TLS validation [[GH-11820](https://github.com/hashicorp/vault/pull/11820)] +* ui: Add specific error message if unseal fails due to license [[GH-11705](https://github.com/hashicorp/vault/pull/11705)] + +BUG FIXES: + +* auth/jwt: Updates the [hashicorp/cap](https://github.com/hashicorp/cap) library to `v0.1.0` to +bring in a verification key caching fix. [[GH-11784](https://github.com/hashicorp/vault/pull/11784)] +* core (enterprise): serialize access to HSM entropy generation to avoid errors in concurrent key generation. +* secret: fix the bug where transit encrypt batch doesn't work with key_version [[GH-11628](https://github.com/hashicorp/vault/pull/11628)] +* secrets/ad: Forward all creds requests to active node [[GH-76](https://github.com/hashicorp/vault-plugin-secrets-ad/pull/76)] [[GH-11836](https://github.com/hashicorp/vault/pull/11836)] +* tokenutil: Perform the num uses check before token type. [[GH-11647](https://github.com/hashicorp/vault/pull/11647)] + +## 1.7.2 +### May 20th, 2021 + +SECURITY: + +* Non-Expiring Leases: Vault and Vault Enterprise renewed nearly-expiring token +leases and dynamic secret leases with a zero-second TTL, causing them to be +treated as non-expiring, and never revoked. This issue affects Vault and Vault +Enterprise versions 0.10.0 through 1.7.1, and is fixed in 1.5.9, 1.6.5, and +1.7.2 (CVE-2021-32923). + +CHANGES: + +* agent: Update to use IAM Service Account Credentials endpoint for signing JWTs +when using GCP Auto-Auth method [[GH-11473](https://github.com/hashicorp/vault/pull/11473)] +* auth/gcp: Update to v0.9.1 to use IAM Service Account Credentials API for +signing JWTs [[GH-11494](https://github.com/hashicorp/vault/pull/11494)] + +IMPROVEMENTS: + +* api, agent: LifetimeWatcher now does more retries when renewal failures occur. This also impacts Agent auto-auth and leases managed via Agent caching. [[GH-11445](https://github.com/hashicorp/vault/pull/11445)] +* auth/aws: Underlying error included in validation failure message. [[GH-11638](https://github.com/hashicorp/vault/pull/11638)] +* http: Add optional HTTP response headers for hostname and raft node ID [[GH-11289](https://github.com/hashicorp/vault/pull/11289)] +* secrets/aws: add ability to provide a role session name when generating STS credentials [[GH-11345](https://github.com/hashicorp/vault/pull/11345)] +* secrets/database/mongodb: Add ability to customize `SocketTimeout`, `ConnectTimeout`, and `ServerSelectionTimeout` [[GH-11600](https://github.com/hashicorp/vault/pull/11600)] +* secrets/database/mongodb: Increased throughput by allowing for multiple request threads to simultaneously update users in MongoDB [[GH-11600](https://github.com/hashicorp/vault/pull/11600)] + +BUG FIXES: + +* agent/cert: Fix issue where the API client on agent was not honoring certificate +information from the auto-auth config map on renewals or retries. [[GH-11576](https://github.com/hashicorp/vault/pull/11576)] +* agent: Fixed agent templating to use configured tls servername values [[GH-11288](https://github.com/hashicorp/vault/pull/11288)] +* core (enterprise): Fix plugins mounted in namespaces being unable to use password policies [[GH-11596](https://github.com/hashicorp/vault/pull/11596)] +* core: correct logic for renewal of leases nearing their expiration time. [[GH-11650](https://github.com/hashicorp/vault/pull/11650)] +* identity: Use correct mount accessor when refreshing external group memberships. [[GH-11506](https://github.com/hashicorp/vault/pull/11506)] +* replication: Fix panic trying to update walState during identity group invalidation. [[GH-1865](https://github.com/hashicorp/vault/pull/1865)] +* secrets/database: Fix marshalling to allow providing numeric arguments to external database plugins. [[GH-11451](https://github.com/hashicorp/vault/pull/11451)] +* secrets/database: Fixed minor race condition when rotate-root is called [[GH-11600](https://github.com/hashicorp/vault/pull/11600)] +* secrets/database: Fixes issue for V4 database interface where `SetCredentials` wasn't falling back to using `RotateRootCredentials` if `SetCredentials` is `Unimplemented` [[GH-11585](https://github.com/hashicorp/vault/pull/11585)] +* secrets/keymgmt (enterprise): Fixes audit logging for the read key response. +* storage/raft: Support cluster address change for nodes in a cluster managed by autopilot [[GH-11247](https://github.com/hashicorp/vault/pull/11247)] +* ui: Fix entity group membership and metadata not showing [[GH-11641](https://github.com/hashicorp/vault/pull/11641)] +* ui: Fix text link URL on database roles list [[GH-11597](https://github.com/hashicorp/vault/pull/11597)] + +## 1.7.1 +### 21 April 2021 + +SECURITY: + +* The PKI Secrets Engine tidy functionality may cause Vault to exclude revoked-but-unexpired certificates from the + Vault CRL. This vulnerability affects Vault and Vault Enterprise 1.5.1 and newer and was fixed in versions + 1.5.8, 1.6.4, and 1.7.1. (CVE-2021-27668) +* The Cassandra Database and Storage backends were not correctly verifying TLS certificates. This issue affects all + versions of Vault and Vault Enterprise and was fixed in versions 1.6.4, and 1.7.1. (CVE-2021-27400) + +CHANGES: + +* go: Update to Go 1.15.11 [[GH-11395](https://github.com/hashicorp/vault/pull/11395)] + +IMPROVEMENTS: + +* auth/jwt: Adds ability to directly provide service account JSON in G Suite provider config. [[GH-11388](https://github.com/hashicorp/vault/pull/11388)] +* core: Add tls_max_version listener config option. [[GH-11226](https://github.com/hashicorp/vault/pull/11226)] +* core: Add metrics for standby node forwarding. [[GH-11366](https://github.com/hashicorp/vault/pull/11366)] +* core: allow arbitrary length stack traces upon receiving SIGUSR2 (was 32MB) [[GH-11364](https://github.com/hashicorp/vault/pull/11364)] +* storage/raft: Support autopilot for HA only raft storage. [[GH-11260](https://github.com/hashicorp/vault/pull/11260)] + +BUG FIXES: + +* core: Fix cleanup of storage entries from cubbyholes within namespaces. [[GH-11408](https://github.com/hashicorp/vault/pull/11408)] +* core: Fix goroutine leak when updating rate limit quota [[GH-11371](https://github.com/hashicorp/vault/pull/11371)] +* core: Fix storage entry leak when revoking leases created with non-orphan batch tokens. [[GH-11377](https://github.com/hashicorp/vault/pull/11377)] +* core: requests forwarded by standby weren't always timed out. [[GH-11322](https://github.com/hashicorp/vault/pull/11322)] +* pki: Only remove revoked entry for certificates during tidy if they are past their NotAfter value [[GH-11367](https://github.com/hashicorp/vault/pull/11367)] +* replication: Fix: mounts created within a namespace that was part of an Allow + filtering rule would not appear on performance secondary if created after rule + was defined. +* replication: Perf standby nodes on newly enabled DR secondary sometimes couldn't connect to active node with TLS errors. [[GH-1823](https://github.com/hashicorp/vault/pull/1823)] +* secrets/database/cassandra: Fixed issue where hostnames were not being validated when using TLS [[GH-11365](https://github.com/hashicorp/vault/pull/11365)] +* secrets/database/cassandra: Updated default statement for password rotation to allow for special characters. This applies to root and static credentials. [[GH-11262](https://github.com/hashicorp/vault/pull/11262)] +* storage/dynamodb: Handle throttled batch write requests by retrying, without which writes could be lost. [[GH-10181](https://github.com/hashicorp/vault/pull/10181)] +* storage/raft: leader_tls_servername wasn't used unless leader_ca_cert_file and/or mTLS were configured. [[GH-11252](https://github.com/hashicorp/vault/pull/11252)] +* storage/raft: using raft for ha_storage with a different storage backend was broken in 1.7.0, now fixed. [[GH-11340](https://github.com/hashicorp/vault/pull/11340)] +* ui: Add root rotation statements support to appropriate database secret engine plugins [[GH-11404](https://github.com/hashicorp/vault/pull/11404)] +* ui: Fix bug where the UI does not recognize version 2 KV until refresh, and fix [object Object] error message [[GH-11258](https://github.com/hashicorp/vault/pull/11258)] +* ui: Fix OIDC bug seen when running on HCP [[GH-11283](https://github.com/hashicorp/vault/pull/11283)] +* ui: Fix namespace-bug on login [[GH-11182](https://github.com/hashicorp/vault/pull/11182)] +* ui: Fix status menu no showing on login [[GH-11213](https://github.com/hashicorp/vault/pull/11213)] +* ui: fix issue where select-one option was not showing in secrets database role creation [[GH-11294](https://github.com/hashicorp/vault/pull/11294)] + +## 1.7.0 +### 24 March 2021 + +CHANGES: + +* agent: Failed auto-auth attempts are now throttled by an exponential backoff instead of the +~2 second retry delay. The maximum backoff may be configured with the new `max_backoff` parameter, +which defaults to 5 minutes. [[GH-10964](https://github.com/hashicorp/vault/pull/10964)] +* aws/auth: AWS Auth concepts and endpoints that use the "whitelist" and "blacklist" terms +have been updated to more inclusive language (e.g. `/auth/aws/identity-whitelist` has been +updated to`/auth/aws/identity-accesslist`). The old and new endpoints are aliases, +sharing the same underlying data. The legacy endpoint names are considered **deprecated** +and will be removed in a future release (not before Vault 1.9). The complete list of +endpoint changes is available in the [AWS Auth API docs](/api-docs/auth/aws#deprecations-effective-in-vault-1-7). +* go: Update Go version to 1.15.10 [[GH-11114](https://github.com/hashicorp/vault/pull/11114)] [[GH-11173](https://github.com/hashicorp/vault/pull/11173)] + +FEATURES: + +* **Aerospike Storage Backend**: Add support for using Aerospike as a storage backend [[GH-10131](https://github.com/hashicorp/vault/pull/10131)] +* **Autopilot for Integrated Storage**: A set of features has been added to allow for automatic operator-friendly management of Vault servers. This is only applicable when integrated storage is in use. + * **Dead Server Cleanup**: Dead servers will periodically be cleaned up and removed from the Raft peer set, to prevent them from interfering with the quorum size and leader elections. + * **Server Health Checking**: An API has been added to track the state of servers, including their health. + * **New Server Stabilization**: When a new server is added to the cluster, there will be a waiting period where it must be healthy and stable for a certain amount of time before being promoted to a full, voting member. +* **Tokenization Secrets Engine (Enterprise)**: The Tokenization Secrets Engine is now generally available. We have added support for MySQL, key rotation, and snapshot/restore. +* replication (enterprise): The log shipper is now memory as well as length bound, and length and size can be separately configured. +* agent: Support for persisting the agent cache to disk [[GH-10938](https://github.com/hashicorp/vault/pull/10938)] +* auth/jwt: Adds `max_age` role parameter and `auth_time` claim validation. [[GH-10919](https://github.com/hashicorp/vault/pull/10919)] +* core (enterprise): X-Vault-Index and related headers can be used by clients to manage eventual consistency. +* kmip (enterprise): Use entropy augmentation to generate kmip certificates +* sdk: Private key generation in the certutil package now allows custom io.Readers to be used. [[GH-10653](https://github.com/hashicorp/vault/pull/10653)] +* secrets/aws: add IAM tagging support for iam_user roles [[GH-10953](https://github.com/hashicorp/vault/pull/10953)] +* secrets/database/cassandra: Add ability to customize dynamic usernames [[GH-10906](https://github.com/hashicorp/vault/pull/10906)] +* secrets/database/couchbase: Add ability to customize dynamic usernames [[GH-10995](https://github.com/hashicorp/vault/pull/10995)] +* secrets/database/mongodb: Add ability to customize dynamic usernames [[GH-10858](https://github.com/hashicorp/vault/pull/10858)] +* secrets/database/mssql: Add ability to customize dynamic usernames [[GH-10767](https://github.com/hashicorp/vault/pull/10767)] +* secrets/database/mysql: Add ability to customize dynamic usernames [[GH-10834](https://github.com/hashicorp/vault/pull/10834)] +* secrets/database/postgresql: Add ability to customize dynamic usernames [[GH-10766](https://github.com/hashicorp/vault/pull/10766)] +* secrets/db/snowflake: Added support for Snowflake to the Database Secret Engine [[GH-10603](https://github.com/hashicorp/vault/pull/10603)] +* secrets/keymgmt (enterprise): Adds beta support for distributing and managing keys in AWS KMS. +* secrets/keymgmt (enterprise): Adds general availability for distributing and managing keys in Azure Key Vault. +* secrets/openldap: Added dynamic roles to OpenLDAP similar to the combined database engine [[GH-10996](https://github.com/hashicorp/vault/pull/10996)] +* secrets/terraform: New secret engine for managing Terraform Cloud API tokens [[GH-10931](https://github.com/hashicorp/vault/pull/10931)] +* ui: Adds check for feature flag on application, and updates namespace toolbar on login if present [[GH-10588](https://github.com/hashicorp/vault/pull/10588)] +* ui: Adds the wizard to the Database Secret Engine [[GH-10982](https://github.com/hashicorp/vault/pull/10982)] +* ui: Database secrets engine, supporting MongoDB only [[GH-10655](https://github.com/hashicorp/vault/pull/10655)] + +IMPROVEMENTS: + +* agent: Add a `vault.retry` stanza that allows specifying number of retries on failure; this applies both to templating and proxied requests. [[GH-11113](https://github.com/hashicorp/vault/pull/11113)] +* agent: Agent can now run as a Windows service. [[GH-10231](https://github.com/hashicorp/vault/pull/10231)] +* agent: Better concurrent request handling on identical requests proxied through Agent. [[GH-10705](https://github.com/hashicorp/vault/pull/10705)] +* agent: Route templating server through cache when persistent cache is enabled. [[GH-10927](https://github.com/hashicorp/vault/pull/10927)] +* agent: change auto-auth to preload an existing token on start [[GH-10850](https://github.com/hashicorp/vault/pull/10850)] +* auth/approle: Secrets ID generation endpoint now returns `secret_id_ttl` as part of its response. [[GH-10826](https://github.com/hashicorp/vault/pull/10826)] +* auth/ldap: Improve consistency in error messages [[GH-10537](https://github.com/hashicorp/vault/pull/10537)] +* auth/okta: Adds support for Okta Verify TOTP MFA. [[GH-10942](https://github.com/hashicorp/vault/pull/10942)] +* changelog: Add dependencies listed in dependencies/2-25-21 [[GH-11015](https://github.com/hashicorp/vault/pull/11015)] +* command/debug: Now collects logs (at level `trace`) as a periodic output. [[GH-10609](https://github.com/hashicorp/vault/pull/10609)] +* core (enterprise): "vault status" command works when a namespace is set. [[GH-10725](https://github.com/hashicorp/vault/pull/10725)] +* core (enterprise): Update Trial Enterprise license from 30 minutes to 6 hours +* core/metrics: Added "vault operator usage" command. [[GH-10365](https://github.com/hashicorp/vault/pull/10365)] +* core/metrics: New telemetry metrics reporting lease expirations by time interval and namespace [[GH-10375](https://github.com/hashicorp/vault/pull/10375)] +* core: Added active since timestamp to the status output of active nodes. [[GH-10489](https://github.com/hashicorp/vault/pull/10489)] +* core: Check audit device with a test message before adding it. [[GH-10520](https://github.com/hashicorp/vault/pull/10520)] +* core: Track barrier encryption count and automatically rotate after a large number of operations or on a schedule [[GH-10774](https://github.com/hashicorp/vault/pull/10774)] +* core: add metrics for active entity count [[GH-10514](https://github.com/hashicorp/vault/pull/10514)] +* core: add partial month client count api [[GH-11022](https://github.com/hashicorp/vault/pull/11022)] +* core: dev mode listener allows unauthenticated sys/metrics requests [[GH-10992](https://github.com/hashicorp/vault/pull/10992)] +* core: reduce memory used by leases [[GH-10726](https://github.com/hashicorp/vault/pull/10726)] +* secrets/gcp: Truncate ServiceAccount display names longer than 100 characters. [[GH-10558](https://github.com/hashicorp/vault/pull/10558)] +* storage/raft (enterprise): Listing of peers is now allowed on DR secondary +cluster nodes, as an update operation that takes in DR operation token for +authenticating the request. +* transform (enterprise): Improve FPE transformation performance +* transform (enterprise): Use transactions with batch tokenization operations for improved performance +* ui: Clarify language on usage metrics page empty state [[GH-10951](https://github.com/hashicorp/vault/pull/10951)] +* ui: Customize MongoDB input fields on Database Secrets Engine [[GH-10949](https://github.com/hashicorp/vault/pull/10949)] +* ui: Upgrade Ember-cli from 3.8 to 3.22. [[GH-9972](https://github.com/hashicorp/vault/pull/9972)] +* ui: Upgrade Storybook from 5.3.19 to 6.1.17. [[GH-10904](https://github.com/hashicorp/vault/pull/10904)] +* ui: Upgrade date-fns from 1.3.0 to 2.16.1. [[GH-10848](https://github.com/hashicorp/vault/pull/10848)] +* ui: Upgrade dependencies to resolve potential JS vulnerabilities [[GH-10677](https://github.com/hashicorp/vault/pull/10677)] +* ui: better errors on Database secrets engine role create [[GH-10980](https://github.com/hashicorp/vault/pull/10980)] + +BUG FIXES: + +* agent: Only set the namespace if the VAULT_NAMESPACE env var isn't present [[GH-10556](https://github.com/hashicorp/vault/pull/10556)] +* agent: Set TokenParent correctly in the Index to be cached. [[GH-10833](https://github.com/hashicorp/vault/pull/10833)] +* agent: Set namespace for template server in agent. [[GH-10757](https://github.com/hashicorp/vault/pull/10757)] +* api/sys/config/ui: Fixes issue where multiple UI custom header values are ignored and only the first given value is used [[GH-10490](https://github.com/hashicorp/vault/pull/10490)] +* api: Fixes CORS API methods that were outdated and invalid [[GH-10444](https://github.com/hashicorp/vault/pull/10444)] +* auth/jwt: Fixes `bound_claims` validation for provider-specific group and user info fetching. [[GH-10546](https://github.com/hashicorp/vault/pull/10546)] +* auth/jwt: Fixes an issue where JWT verification keys weren't updated after a `jwks_url` change. [[GH-10919](https://github.com/hashicorp/vault/pull/10919)] +* auth/jwt: Fixes an issue where `jwt_supported_algs` were not being validated for JWT auth using +`jwks_url` and `jwt_validation_pubkeys`. [[GH-10919](https://github.com/hashicorp/vault/pull/10919)] +* auth/oci: Fixes alias name to use the role name, and not the literal string `name` [[GH-10](https://github.com/hashicorp/vault-plugin-auth-oci/pull/10)] [[GH-10952](https://github.com/hashicorp/vault/pull/10952)] +* consul-template: Update consul-template vendor version and associated dependencies to master, +pulling in https://github.com/hashicorp/consul-template/pull/1447 [[GH-10756](https://github.com/hashicorp/vault/pull/10756)] +* core (enterprise): Limit entropy augmentation during token generation to root tokens. [[GH-10487](https://github.com/hashicorp/vault/pull/10487)] +* core (enterprise): Vault EGP policies attached to path * were not correctly scoped to the namespace. +* core/identity: Fix deadlock in entity merge endpoint. [[GH-10877](https://github.com/hashicorp/vault/pull/10877)] +* core: Avoid disclosing IP addresses in the errors of unauthenticated requests [[GH-10579](https://github.com/hashicorp/vault/pull/10579)] +* core: Fix client.Clone() to include the address [[GH-10077](https://github.com/hashicorp/vault/pull/10077)] +* core: Fix duplicate quotas on performance standby nodes. [[GH-10855](https://github.com/hashicorp/vault/pull/10855)] +* core: Fix rate limit resource quota migration from 1.5.x to 1.6.x by ensuring `purgeInterval` and +`staleAge` are set appropriately. [[GH-10536](https://github.com/hashicorp/vault/pull/10536)] +* core: Make all APIs that report init status consistent, and make them report +initialized=true when a Raft join is in progress. [[GH-10498](https://github.com/hashicorp/vault/pull/10498)] +* core: Make the response to an unauthenticated request to sys/internal endpoints consistent regardless of mount existence. [[GH-10650](https://github.com/hashicorp/vault/pull/10650)] +* core: Turn off case sensitivity for allowed entity alias check during token create operation. [[GH-10743](https://github.com/hashicorp/vault/pull/10743)] +* http: change max_request_size to be unlimited when the config value is less than 0 [[GH-10072](https://github.com/hashicorp/vault/pull/10072)] +* license: Fix license caching issue that prevents new licenses to get picked up by the license manager [[GH-10424](https://github.com/hashicorp/vault/pull/10424)] +* metrics: Protect emitMetrics from panicking during post-seal [[GH-10708](https://github.com/hashicorp/vault/pull/10708)] +* quotas/rate-limit: Fix quotas enforcing old rate limit quota paths [[GH-10689](https://github.com/hashicorp/vault/pull/10689)] +* replication (enterprise): Fix bug with not starting merkle sync while requests are in progress +* secrets/database/influxdb: Fix issue where not all errors from InfluxDB were being handled [[GH-10384](https://github.com/hashicorp/vault/pull/10384)] +* secrets/database/mysql: Fixes issue where the DisplayName within generated usernames was the incorrect length [[GH-10433](https://github.com/hashicorp/vault/pull/10433)] +* secrets/database: Sanitize `private_key` field when reading database plugin config [[GH-10416](https://github.com/hashicorp/vault/pull/10416)] +* secrets/gcp: Fix issue with account and iam_policy roleset WALs not being removed after attempts when GCP project no longer exists [[GH-10759](https://github.com/hashicorp/vault/pull/10759)] +* secrets/transit: allow for null string to be used for optional parameters in encrypt and decrypt [[GH-10386](https://github.com/hashicorp/vault/pull/10386)] +* serviceregistration: Fix race during shutdown of Consul service registration. [[GH-10901](https://github.com/hashicorp/vault/pull/10901)] +* storage/raft (enterprise): Automated snapshots with Azure required specifying +`azure_blob_environment`, which should have had as a default `AZUREPUBLICCLOUD`. +* storage/raft (enterprise): Reading a non-existent auto snapshot config now returns 404. +* storage/raft (enterprise): The parameter aws_s3_server_kms_key was misnamed and +didn't work. Renamed to aws_s3_kms_key, and make it work so that when provided +the given key will be used to encrypt the snapshot using AWS KMS. +* transform (enterprise): Fix bug tokenization handling metadata on exportable stores +* transform (enterprise): Fix bug where tokenization store changes are persisted but don't take effect +* transform (enterprise): Fix transform configuration not handling `stores` parameter on the legacy path +* transform (enterprise): Make expiration timestamps human readable +* transform (enterprise): Return false for invalid tokens on the validate endpoint rather than returning an HTTP error +* ui: Add role from database connection automatically populates the database for new role [[GH-11119](https://github.com/hashicorp/vault/pull/11119)] +* ui: Fix bug in Transform secret engine when a new role is added and then removed from a transformation [[GH-10417](https://github.com/hashicorp/vault/pull/10417)] +* ui: Fix bug that double encodes secret route when there are spaces in the path and makes you unable to view the version history. [[GH-10596](https://github.com/hashicorp/vault/pull/10596)] +* ui: Fix expected response from feature-flags endpoint [[GH-10684](https://github.com/hashicorp/vault/pull/10684)] +* ui: Fix footer URL linking to the correct version changelog. [[GH-10491](https://github.com/hashicorp/vault/pull/10491)] + +DEPRECATIONS: +* aws/auth: AWS Auth endpoints that use the "whitelist" and "blacklist" terms have been deprecated. +Refer to the CHANGES section for additional details. + +## 1.6.7 +### 29 September 2021 + +BUG FIXES: + +* core (enterprise): Fix bug where password generation through password policies do not work on namespaces if performed outside a request callback or from an external plugin. [[GH-12635](https://github.com/hashicorp/vault/pull/12635)] +* core (enterprise): Only delete quotas on primary cluster. [[GH-12339](https://github.com/hashicorp/vault/pull/12339)] +* secrets/db: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. [[GH-12563](https://github.com/hashicorp/vault/pull/12563)] +* secrets/openldap: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. [#28](https://github.com/hashicorp/vault-plugin-secrets-openldap/pull/28) [[GH-12597](https://github.com/hashicorp/vault/pull/12597)] + +## 1.6.6 +### 26 August 2021 + +SECURITY: + +* *UI Secret Caching*: The Vault UI erroneously cached and exposed user-viewed secrets between authenticated sessions in a single shared browser, if the browser window / tab was not refreshed or closed between logout and a subsequent login. This vulnerability, CVE-2021-38554, was fixed in Vault 1.8.0 and will be addressed in pending 1.7.4 / 1.6.6 releases. + +CHANGES: + +* Alpine: Docker images for Vault 1.6.6+, 1.7.4+, and 1.8.2+ are built with Alpine 3.14, due to CVE-2021-36159 +* go: Update go version to 1.15.15 [[GH-12423](https://github.com/hashicorp/vault/pull/12423)] + +IMPROVEMENTS: + +* db/cassandra: Added tls_server_name to specify server name for TLS validation [[GH-11820](https://github.com/hashicorp/vault/pull/11820)] + +BUG FIXES: + +* physical/raft: Fix safeio.Rename error when restoring snapshots on windows [[GH-12377](https://github.com/hashicorp/vault/pull/12377)] +* secret: fix the bug where transit encrypt batch doesn't work with key_version [[GH-11628](https://github.com/hashicorp/vault/pull/11628)] +* secrets/database: Fixed an issue that prevented external database plugin processes from restarting after a shutdown. [[GH-12087](https://github.com/hashicorp/vault/pull/12087)] +* ui: Automatically refresh the page when user logs out [[GH-12035](https://github.com/hashicorp/vault/pull/12035)] +* ui: Fixes metrics page when read on counter config not allowed [[GH-12348](https://github.com/hashicorp/vault/pull/12348)] +* ui: fix oidc login with Safari [[GH-11884](https://github.com/hashicorp/vault/pull/11884)] + +## 1.6.5 +### May 20th, 2021 + +SECURITY: + +* Non-Expiring Leases: Vault and Vault Enterprise renewed nearly-expiring token +leases and dynamic secret leases with a zero-second TTL, causing them to be +treated as non-expiring, and never revoked. This issue affects Vault and Vault +Enterprise versions 0.10.0 through 1.7.1, and is fixed in 1.5.9, 1.6.5, and +1.7.2 (CVE-2021-32923). + +CHANGES: + +* agent: Update to use IAM Service Account Credentials endpoint for signing JWTs +when using GCP Auto-Auth method [[GH-11473](https://github.com/hashicorp/vault/pull/11473)] +* auth/gcp: Update to v0.8.1 to use IAM Service Account Credentials API for +signing JWTs [[GH-11498](https://github.com/hashicorp/vault/pull/11498)] + +BUG FIXES: + +* core (enterprise): Fix plugins mounted in namespaces being unable to use password policies [[GH-11596](https://github.com/hashicorp/vault/pull/11596)] +* core: correct logic for renewal of leases nearing their expiration time. [[GH-11650](https://github.com/hashicorp/vault/pull/11650)] +* secrets/database: Fix marshalling to allow providing numeric arguments to external database plugins. [[GH-11451](https://github.com/hashicorp/vault/pull/11451)] +* secrets/database: Fixes issue for V4 database interface where `SetCredentials` wasn't falling back to using `RotateRootCredentials` if `SetCredentials` is `Unimplemented` [[GH-11585](https://github.com/hashicorp/vault/pull/11585)] +* ui: Fix namespace-bug on login [[GH-11182](https://github.com/hashicorp/vault/pull/11182)] + +## 1.6.4 +### 21 April 2021 + +SECURITY: + +* The PKI Secrets Engine tidy functionality may cause Vault to exclude revoked-but-unexpired certificates from the + Vault CRL. This vulnerability affects Vault and Vault Enterprise 1.5.1 and newer and was fixed in versions + 1.5.8, 1.6.4, and 1.7.1. (CVE-2021-27668) +* The Cassandra Database and Storage backends were not correctly verifying TLS certificates. This issue affects all + versions of Vault and Vault Enterprise and was fixed in versions 1.6.4, and 1.7.1. (CVE-2021-27400) + +CHANGES: + +* go: Update to Go 1.15.11 [[GH-11396](https://github.com/hashicorp/vault/pull/11396)] + +IMPROVEMENTS: + +* command/debug: Now collects logs (at level `trace`) as a periodic output. [[GH-10609](https://github.com/hashicorp/vault/pull/10609)] +* core: Add tls_max_version listener config option. [[GH-11226](https://github.com/hashicorp/vault/pull/11226)] +* core: allow arbitrary length stack traces upon receiving SIGUSR2 (was 32MB) [[GH-11364](https://github.com/hashicorp/vault/pull/11364)] + +BUG FIXES: + +* core: Fix cleanup of storage entries from cubbyholes within namespaces. [[GH-11408](https://github.com/hashicorp/vault/pull/11408)] +* core: Fix goroutine leak when updating rate limit quota [[GH-11371](https://github.com/hashicorp/vault/pull/11371)] +* core: Fix storage entry leak when revoking leases created with non-orphan batch tokens. [[GH-11377](https://github.com/hashicorp/vault/pull/11377)] +* pki: Only remove revoked entry for certificates during tidy if they are past their NotAfter value [[GH-11367](https://github.com/hashicorp/vault/pull/11367)] +* pki: Preserve ordering of all DN attribute values when issuing certificates [[GH-11259](https://github.com/hashicorp/vault/pull/11259)] +* replication: Fix: mounts created within a namespace that was part of an Allow + filtering rule would not appear on performance secondary if created after rule + was defined. +* secrets/database/cassandra: Fixed issue where hostnames were not being validated when using TLS [[GH-11365](https://github.com/hashicorp/vault/pull/11365)] +* storage/raft: leader_tls_servername wasn't used unless leader_ca_cert_file and/or mTLS were configured. [[GH-11252](https://github.com/hashicorp/vault/pull/11252)] + + +## 1.6.3 +### February 25, 2021 + +SECURITY: + +* Limited Unauthenticated License Metadata Read: We addressed a security vulnerability that allowed for the unauthenticated +reading of Vault license metadata from DR Secondaries. This vulnerability affects Vault Enterprise and is +fixed in 1.6.3 (CVE-2021-27668). + +CHANGES: + +* secrets/mongodbatlas: Move from whitelist to access list API [[GH-10966](https://github.com/hashicorp/vault/pull/10966)] + +IMPROVEMENTS: + +* ui: Clarify language on usage metrics page empty state [[GH-10951](https://github.com/hashicorp/vault/pull/10951)] + +BUG FIXES: + +* auth/kubernetes: Cancel API calls to TokenReview endpoint when request context +is closed [[GH-10930](https://github.com/hashicorp/vault/pull/10930)] +* core/identity: Fix deadlock in entity merge endpoint. [[GH-10877](https://github.com/hashicorp/vault/pull/10877)] +* quotas: Fix duplicate quotas on performance standby nodes. [[GH-10855](https://github.com/hashicorp/vault/pull/10855)] +* quotas/rate-limit: Fix quotas enforcing old rate limit quota paths [[GH-10689](https://github.com/hashicorp/vault/pull/10689)] +* replication (enterprise): Don't write request count data on DR Secondaries. +Fixes DR Secondaries becoming out of sync approximately every 30s. [[GH-10970](https://github.com/hashicorp/vault/pull/10970)] +* secrets/azure (enterprise): Forward service principal credential creation to the +primary cluster if called on a performance standby or performance secondary. [[GH-10902](https://github.com/hashicorp/vault/pull/10902)] + +## 1.6.2 +### January 29, 2021 + +SECURITY: + +* IP Address Disclosure: We fixed a vulnerability where, under some error +conditions, Vault would return an error message disclosing internal IP +addresses. This vulnerability affects Vault and Vault Enterprise and is fixed in +1.6.2 (CVE-2021-3024). +* Limited Unauthenticated Remove Peer: As of Vault 1.6, the remove-peer command +on DR secondaries did not require authentication. This issue impacts the +stability of HA architecture, as a bad actor could remove all standby +nodes from a DR +secondary. This issue affects Vault Enterprise 1.6.0 and 1.6.1, and is fixed in +1.6.2 (CVE-2021-3282). +* Mount Path Disclosure: Vault previously returned different HTTP status codes for +existent and non-existent mount paths. This behavior would allow unauthenticated +brute force attacks to reveal which paths had valid mounts. This issue affects +Vault and Vault Enterprise and is fixed in 1.6.2 (CVE-2020-25594). + +CHANGES: + +* go: Update go version to 1.15.7 [[GH-10730](https://github.com/hashicorp/vault/pull/10730)] + +FEATURES: + +* ui: Adds check for feature flag on application, and updates namespace toolbar on login if present [[GH-10588](https://github.com/hashicorp/vault/pull/10588)] + +IMPROVEMENTS: + +* core (enterprise): "vault status" command works when a namespace is set. [[GH-10725](https://github.com/hashicorp/vault/pull/10725)] +* core: reduce memory used by leases [[GH-10726](https://github.com/hashicorp/vault/pull/10726)] +* storage/raft (enterprise): Listing of peers is now allowed on DR secondary +cluster nodes, as an update operation that takes in DR operation token for +authenticating the request. +* core: allow setting tls_servername for raft retry/auto-join [[GH-10698](https://github.com/hashicorp/vault/pull/10698)] + +BUG FIXES: + +* agent: Set namespace for template server in agent. [[GH-10757](https://github.com/hashicorp/vault/pull/10757)] +* core: Make the response to an unauthenticated request to sys/internal endpoints consistent regardless of mount existence. [[GH-10650](https://github.com/hashicorp/vault/pull/10650)] +* metrics: Protect emitMetrics from panicking during post-seal [[GH-10708](https://github.com/hashicorp/vault/pull/10708)] +* secrets/gcp: Fix issue with account and iam_policy roleset WALs not being removed after attempts when GCP project no longer exists [[GH-10759](https://github.com/hashicorp/vault/pull/10759)] +* storage/raft (enterprise): Automated snapshots with Azure required specifying +`azure_blob_environment`, which should have had as a default `AZUREPUBLICCLOUD`. +* storage/raft (enterprise): Autosnapshots config and storage weren't excluded from +performance replication, causing conflicts and errors. +* ui: Fix bug that double encodes secret route when there are spaces in the path and makes you unable to view the version history. [[GH-10596](https://github.com/hashicorp/vault/pull/10596)] +* ui: Fix expected response from feature-flags endpoint [[GH-10684](https://github.com/hashicorp/vault/pull/10684)] + +## 1.6.1 +### December 16, 2020 + +SECURITY: + +* LDAP Auth Method: We addressed an issue where error messages returned by the + LDAP auth method allowed user enumeration [[GH-10537](https://github.com/hashicorp/vault/pull/10537)]. This vulnerability affects Vault OSS and Vault + Enterprise and is fixed in 1.5.6 and 1.6.1 (CVE-2020-35177). +* Sentinel EGP: We've fixed incorrect handling of namespace paths to prevent + users within namespaces from applying Sentinel EGP policies to paths above + their namespace. This vulnerability affects Vault Enterprise and is fixed in + 1.5.6 and 1.6.1 (CVE-2020-35453). + +IMPROVEMENTS: + +* auth/ldap: Improve consistency in error messages [[GH-10537](https://github.com/hashicorp/vault/pull/10537)] +* core/metrics: Added "vault operator usage" command. [[GH-10365](https://github.com/hashicorp/vault/pull/10365)] +* secrets/gcp: Truncate ServiceAccount display names longer than 100 characters. [[GH-10558](https://github.com/hashicorp/vault/pull/10558)] + +BUG FIXES: + +* agent: Only set the namespace if the VAULT_NAMESPACE env var isn't present [[GH-10556](https://github.com/hashicorp/vault/pull/10556)] +* auth/jwt: Fixes `bound_claims` validation for provider-specific group and user info fetching. [[GH-10546](https://github.com/hashicorp/vault/pull/10546)] +* core (enterprise): Vault EGP policies attached to path * were not correctly scoped to the namespace. +* core: Avoid deadlocks by ensuring that if grabLockOrStop returns stopped=true, the lock will not be held. [[GH-10456](https://github.com/hashicorp/vault/pull/10456)] +* core: Fix client.Clone() to include the address [[GH-10077](https://github.com/hashicorp/vault/pull/10077)] +* core: Fix rate limit resource quota migration from 1.5.x to 1.6.x by ensuring `purgeInterval` and +`staleAge` are set appropriately. [[GH-10536](https://github.com/hashicorp/vault/pull/10536)] +* core: Make all APIs that report init status consistent, and make them report +initialized=true when a Raft join is in progress. [[GH-10498](https://github.com/hashicorp/vault/pull/10498)] +* secrets/database/influxdb: Fix issue where not all errors from InfluxDB were being handled [[GH-10384](https://github.com/hashicorp/vault/pull/10384)] +* secrets/database/mysql: Fixes issue where the DisplayName within generated usernames was the incorrect length [[GH-10433](https://github.com/hashicorp/vault/pull/10433)] +* secrets/database: Sanitize `private_key` field when reading database plugin config [[GH-10416](https://github.com/hashicorp/vault/pull/10416)] +* secrets/transit: allow for null string to be used for optional parameters in encrypt and decrypt [[GH-10386](https://github.com/hashicorp/vault/pull/10386)] +* storage/raft (enterprise): The parameter aws_s3_server_kms_key was misnamed and didn't work. Renamed to aws_s3_kms_key, and make it work so that when provided the given key will be used to encrypt the snapshot using AWS KMS. +* transform (enterprise): Fix bug tokenization handling metadata on exportable stores +* transform (enterprise): Fix transform configuration not handling `stores` parameter on the legacy path +* transform (enterprise): Make expiration timestamps human readable +* transform (enterprise): Return false for invalid tokens on the validate endpoint rather than returning an HTTP error +* transform (enterprise): Fix bug where tokenization store changes are persisted but don't take effect +* ui: Fix bug in Transform secret engine when a new role is added and then removed from a transformation [[GH-10417](https://github.com/hashicorp/vault/pull/10417)] +* ui: Fix footer URL linking to the correct version changelog. [[GH-10491](https://github.com/hashicorp/vault/pull/10491)] +* ui: Fox radio click on secrets and auth list pages. [[GH-10586](https://github.com/hashicorp/vault/pull/10586)] + +## 1.6.0 +### November 11th, 2020 + +NOTE: + +Binaries for 32-bit macOS (i.e. the `darwin_386` build) will no longer be published. This target was dropped in the latest version of the Go compiler. + +CHANGES: + +* agent: Agent now properly returns a non-zero exit code on error, such as one due to template rendering failure. Using `error_on_missing_key` in the template config will cause agent to immediately exit on failure. In order to make agent properly exit due to continuous failure from template rendering errors, the old behavior of indefinitely restarting the template server is now changed to exit once the default retry attempt of 12 times (with exponential backoff) gets exhausted. [[GH-9670](https://github.com/hashicorp/vault/pull/9670)] +* token: Periodic tokens generated by auth methods will have the period value stored in its token entry. [[GH-7885](https://github.com/hashicorp/vault/pull/7885)] +* core: New telemetry metrics reporting mount table size and number of entries [[GH-10201](https://github.com/hashicorp/vault/pull/10201)] +* go: Updated Go version to 1.15.4 [[GH-10366](https://github.com/hashicorp/vault/pull/10366)] + +FEATURES: + +* **Couchbase Secrets**: Vault can now manage static and dynamic credentials for Couchbase. [[GH-9664](https://github.com/hashicorp/vault/pull/9664)] +* **Expanded Password Policy Support**: Custom password policies are now supported for all database engines. +* **Integrated Storage Auto Snapshots (Enterprise)**: This feature enables an operator to schedule snapshots of the integrated storage backend and ensure those snapshots are persisted elsewhere. +* **Integrated Storage Cloud Auto Join**: This feature for integrated storage enables Vault nodes running in the cloud to automatically discover and join a Vault cluster via operator-supplied metadata. +* **Key Management Secrets Engine (Enterprise; Tech Preview)**: This new secret engine allows securely distributing and managing keys to Azure cloud KMS services. +* **Seal Migration**: With Vault 1.6, we will support migrating from an auto unseal mechanism to a different mechanism of the same type. For example, if you were using an AWS KMS key to automatically unseal, you can now migrate to a different AWS KMS key. +* **Tokenization (Enterprise; Tech Preview)**: Tokenization supports creating irreversible “tokens” from sensitive data. Tokens can be used in less secure environments, protecting the original data. +* **Vault Client Count**: Vault now counts the number of active entities (and non-entity tokens) per month and makes this information available via the "Metrics" section of the UI. + +IMPROVEMENTS: + +* auth/approle: Role names can now be referenced in templated policies through the `approle.metadata.role_name` property [[GH-9529](https://github.com/hashicorp/vault/pull/9529)] +* auth/aws: Improve logic check on wildcard `BoundIamPrincipalARNs` and include role name on error messages on check failure [[GH-10036](https://github.com/hashicorp/vault/pull/10036)] +* auth/jwt: Add support for fetching groups and user information from G Suite during authentication. [[GH-123](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/123)] +* auth/jwt: Adding EdDSA (ed25519) to supported algorithms [[GH-129](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/129)] +* auth/jwt: Improve cli authorization error [[GH-137](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/137)] +* auth/jwt: Add OIDC namespace_in_state option [[GH-140](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/140)] +* secrets/transit: fix missing plaintext in bulk decrypt response [[GH-9991](https://github.com/hashicorp/vault/pull/9991)] +* command/server: Delay informational messages in -dev mode until logs have settled. [[GH-9702](https://github.com/hashicorp/vault/pull/9702)] +* command/server: Add environment variable support for `disable_mlock`. [[GH-9931](https://github.com/hashicorp/vault/pull/9931)] +* core/metrics: Add metrics for storage cache [[GH_10079](https://github.com/hashicorp/vault/pull/10079)] +* core/metrics: Add metrics for leader status [[GH 10147](https://github.com/hashicorp/vault/pull/10147)] +* physical/azure: Add the ability to use Azure Instance Metadata Service to set the credentials for Azure Blob storage on the backend. [[GH-10189](https://github.com/hashicorp/vault/pull/10189)] +* sdk/framework: Add a time type for API fields. [[GH-9911](https://github.com/hashicorp/vault/pull/9911)] +* secrets/database: Added support for password policies to all databases [[GH-9641](https://github.com/hashicorp/vault/pull/9641), + [and more](https://github.com/hashicorp/vault/pulls?q=is%3Apr+is%3Amerged+dbpw)] +* secrets/database/cassandra: Added support for static credential rotation [[GH-10051](https://github.com/hashicorp/vault/pull/10051)] +* secrets/database/elasticsearch: Added support for static credential rotation [[GH-19](https://github.com/hashicorp/vault-plugin-database-elasticsearch/pull/19)] +* secrets/database/hanadb: Added support for root credential & static credential rotation [[GH-10142](https://github.com/hashicorp/vault/pull/10142)] +* secrets/database/hanadb: Default password generation now includes dashes. Custom statements may need to be updated + to include quotes around the password field [[GH-10142](https://github.com/hashicorp/vault/pull/10142)] +* secrets/database/influxdb: Added support for static credential rotation [[GH-10118](https://github.com/hashicorp/vault/pull/10118)] +* secrets/database/mongodbatlas: Added support for root credential rotation [[GH-14](https://github.com/hashicorp/vault-plugin-database-mongodbatlas/pull/14)] +* secrets/database/mongodbatlas: Support scopes field in creations statements for MongoDB Atlas database plugin [[GH-15](https://github.com/hashicorp/vault-plugin-database-mongodbatlas/pull/15)] +* seal/awskms: Add logging during awskms auto-unseal [[GH-9794](https://github.com/hashicorp/vault/pull/9794)] +* storage/azure: Update SDK library to use [azure-storage-blob-go](https://github.com/Azure/azure-storage-blob-go) since previous library has been deprecated. [[GH-9577](https://github.com/hashicorp/vault/pull/9577/)] +* secrets/ad: `rotate-root` now supports POST requests like other secret engines [[GH-70](https://github.com/hashicorp/vault-plugin-secrets-ad/pull/70)] +* ui: Add ui functionality for the Transform Secret Engine [[GH-9665](https://github.com/hashicorp/vault/pull/9665)] +* ui: Pricing metrics dashboard [[GH-10049](https://github.com/hashicorp/vault/pull/10049)] + +BUG FIXES: + +* auth/jwt: Fix bug preventing config edit UI from rendering [[GH-141](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/141)] +* cli: Don't open or overwrite a raft snapshot file on an unsuccessful `vault operator raft snapshot` [[GH-9894](https://github.com/hashicorp/vault/pull/9894)] +* core: Implement constant time version of shamir GF(2^8) math [[GH-9932](https://github.com/hashicorp/vault/pull/9932)] +* core: Fix resource leak in plugin API (plugin-dependent, not all plugins impacted) [[GH-9557](https://github.com/hashicorp/vault/pull/9557)] +* core: Fix race involved in enabling certain features via a license change +* core: Fix error handling in HCL parsing of objects with invalid syntax [[GH-410](https://github.com/hashicorp/hcl/pull/410)] +* identity: Check for timeouts in entity API [[GH-9925](https://github.com/hashicorp/vault/pull/9925)] +* secrets/database: Fix handling of TLS options in mongodb connection strings [[GH-9519](https://github.com/hashicorp/vault/pull/9519)] +* secrets/gcp: Ensure that the IAM policy version is appropriately set after a roleset's bindings have changed. [[GH-93](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/93)] +* ui: Mask LDAP bindpass while typing [[GH-10087](https://github.com/hashicorp/vault/pull/10087)] +* ui: Update language in promote dr modal flow [[GH-10155](https://github.com/hashicorp/vault/pull/10155)] +* ui: Update language on replication primary dashboard for clarity [[GH-10205](https://github.com/hashicorp/vault/pull/10217)] +* core: Fix bug where updating an existing path quota could introduce a conflict. [[GH-10285](https://github.com/hashicorp/vault/pull/10285)] + +## 1.5.9 +### May 20th, 2021 + +SECURITY: + +* Non-Expiring Leases: Vault and Vault Enterprise renewed nearly-expiring token +leases and dynamic secret leases with a zero-second TTL, causing them to be +treated as non-expiring, and never revoked. This issue affects Vault and Vault +Enterprise versions 0.10.0 through 1.7.1, and is fixed in 1.5.9, 1.6.5, and +1.7.2 (CVE-2021-32923). + +CHANGES: + +* agent: Update to use IAM Service Account Credentials endpoint for signing JWTs +when using GCP Auto-Auth method [[GH-11473](https://github.com/hashicorp/vault/pull/11473)] +* auth/gcp: Update to v0.7.2 to use IAM Service Account Credentials API for +signing JWTs [[GH-11499](https://github.com/hashicorp/vault/pull/11499)] + +BUG FIXES: + +* core: correct logic for renewal of leases nearing their expiration time. [[GH-11650](https://github.com/hashicorp/vault/pull/11650)] + +## 1.5.8 +### 21 April 2021 + +SECURITY: + +* The PKI Secrets Engine tidy functionality may cause Vault to exclude revoked-but-unexpired certificates from the + Vault CRL. This vulnerability affects Vault and Vault Enterprise 1.5.1 and newer and was fixed in versions + 1.5.8, 1.6.4, and 1.7.1. (CVE-2021-27668) + +CHANGES: + +* go: Update to Go 1.14.15 [[GH-11397](https://github.com/hashicorp/vault/pull/11397)] + +IMPROVEMENTS: + +* core: Add tls_max_version listener config option. [[GH-11226](https://github.com/hashicorp/vault/pull/11226)] + +BUG FIXES: + +* core/identity: Fix deadlock in entity merge endpoint. [[GH-10877](https://github.com/hashicorp/vault/pull/10877)] +* core: Fix cleanup of storage entries from cubbyholes within namespaces. [[GH-11408](https://github.com/hashicorp/vault/pull/11408)] +* pki: Only remove revoked entry for certificates during tidy if they are past their NotAfter value [[GH-11367](https://github.com/hashicorp/vault/pull/11367)] +* core: Avoid deadlocks by ensuring that if grabLockOrStop returns stopped=true, the lock will not be held. [[GH-10456](https://github.com/hashicorp/vault/pull/10456)] + +## 1.5.7 +### January 29, 2021 + +SECURITY: + +* IP Address Disclosure: We fixed a vulnerability where, under some error +conditions, Vault would return an error message disclosing internal IP +addresses. This vulnerability affects Vault and Vault Enterprise and is fixed in +1.6.2 and 1.5.7 (CVE-2021-3024). +* Mount Path Disclosure: Vault previously returned different HTTP status codes for +existent and non-existent mount paths. This behavior would allow unauthenticated +brute force attacks to reveal which paths had valid mounts. This issue affects +Vault and Vault Enterprise and is fixed in 1.6.2 and 1.5.7 (CVE-2020-25594). + +IMPROVEMENTS: + +* storage/raft (enterprise): Listing of peers is now allowed on DR secondary +cluster nodes, as an update operation that takes in DR operation token for +authenticating the request. + +BUG FIXES: + +* core: Avoid disclosing IP addresses in the errors of unauthenticated requests [[GH-10579](https://github.com/hashicorp/vault/pull/10579)] +* core: Make the response to an unauthenticated request to sys/internal endpoints consistent regardless of mount existence. [[GH-10650](https://github.com/hashicorp/vault/pull/10650)] + +## 1.5.6 +### December 16, 2020 + +SECURITY: + +* LDAP Auth Method: We addressed an issue where error messages returned by the + LDAP auth method allowed user enumeration [[GH-10537](https://github.com/hashicorp/vault/pull/10537)]. This vulnerability affects Vault OSS and Vault + Enterprise and is fixed in 1.5.6 and 1.6.1 (CVE-2020-35177). +* Sentinel EGP: We've fixed incorrect handling of namespace paths to prevent + users within namespaces from applying Sentinel EGP policies to paths above + their namespace. This vulnerability affects Vault Enterprise and is fixed in + 1.5.6 and 1.6.1. + +IMPROVEMENTS: + +* auth/ldap: Improve consistency in error messages [[GH-10537](https://github.com/hashicorp/vault/pull/10537)] + +BUG FIXES: + +* core (enterprise): Vault EGP policies attached to path * were not correctly scoped to the namespace. +* core: Fix bug where updating an existing path quota could introduce a conflict [[GH-10285](https://github.com/hashicorp/vault/pull/10285)] +* core: Fix client.Clone() to include the address [[GH-10077](https://github.com/hashicorp/vault/pull/10077)] +* quotas (enterprise): Reset cache before loading quotas in the db during startup +* secrets/transit: allow for null string to be used for optional parameters in encrypt and decrypt [[GH-10386](https://github.com/hashicorp/vault/pull/10386)] + +## 1.5.5 +### October 21, 2020 + +IMPROVEMENTS: + +* auth/aws, core/seal, secret/aws: Set default IMDS timeouts to match AWS SDK [[GH-10133](https://github.com/hashicorp/vault/pull/10133)] + +BUG FIXES: + +* auth/aws: Restrict region selection when in the aws-us-gov partition to avoid IAM errors [[GH-9947](https://github.com/hashicorp/vault/pull/9947)] +* core (enterprise): Allow operators to add and remove (Raft) peers in a DR secondary cluster using Integrated Storage. +* core (enterprise): Add DR operation token to the remove peer API and CLI command (when DR secondary). +* core (enterprise): Fix deadlock in handling EGP policies +* core (enterprise): Fix extraneous error messages in DR Cluster +* secrets/mysql: Conditionally overwrite TLS parameters for MySQL secrets engine [[GH-9729](https://github.com/hashicorp/vault/pull/9729)] +* secrets/ad: Fix bug where `password_policy` setting was not using correct key when `ad/config` was read [[GH-71](https://github.com/hashicorp/vault-plugin-secrets-ad/pull/71)] +* ui: Fix issue with listing roles and methods on the same auth methods with different names [[GH-10122](https://github.com/hashicorp/vault/pull/10122)] + +## 1.5.4 +### September 24th, 2020 + +SECURITY: + +* Batch Token Expiry: We addressed an issue where batch token leases could outlive their TTL because we were not scheduling the expiration time correctly. This vulnerability affects Vault OSS and Vault Enterprise 1.0 and newer and is fixed in 1.4.7 and 1.5.4 (CVE-2020-25816). + +IMPROVEMENTS: + +* secrets/pki: Handle expiration of a cert not in storage as a success [[GH-9880](https://github.com/hashicorp/vault/pull/9880)] +* auth/kubernetes: Add an option to disable defaulting to the local CA cert and service account JWT when running in a Kubernetes pod [[GH-97]](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/97) +* secrets/gcp: Add check for 403 during rollback to prevent repeated deletion calls [[GH-97](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/97)] +* core: Disable usage metrics collection on performance standby nodes. [[GH-9966](https://github.com/hashicorp/vault/pull/9966)] +* credential/aws: Added X-Amz-Content-Sha256 as a default STS request header [[GH-10009](https://github.com/hashicorp/vault/pull/10009)] + +BUG FIXES: + +* agent: Fix `disable_fast_negotiation` not being set on the auth method when configured by user. [[GH-9892](https://github.com/hashicorp/vault/pull/9892)] +* core (enterprise): Fix hang when cluster-wide plugin reload cleanup is slow on unseal +* core (enterprise): Fix an error in cluster-wide plugin reload cleanup following such a reload +* core: Fix crash when metrics collection encounters zero-length keys in KV store [[GH-9811](https://github.com/hashicorp/vault/pull/9881)] +* mfa (enterprise): Fix incorrect handling of PingID responses that could result in auth requests failing +* replication (enterprise): Improve race condition when using a newly created token on a performance standby node +* replication (enterprise): Only write failover cluster addresses if they've changed +* ui: fix bug where dropdown for identity/entity management is not reflective of actual policy [[GH-9958](https://github.com/hashicorp/vault/pull/9958)] + +## 1.5.3 +### August 27th, 2020 + +NOTE: + +All security content from 1.5.2, 1.5.1, 1.4.5, 1.4.4, 1.3.9, 1.3.8, 1.2.6, and 1.2.5 has been made fully open source, and the git tags for 1.5.3, 1.4.6, 1.3.10, and 1.2.7 will build correctly for open source users. + +BUG FIXES: + +* auth/aws: Made header handling for IAM authentication more robust +* secrets/ssh: Fixed a bug with role option for SSH signing algorithm to allow more than RSA signing + +## 1.5.2.1 +### August 21st, 2020 +### Enterprise Only + +NOTE: + +Includes correct license in the HSM binary. + +## 1.5.2 +### August 20th, 2020 + +NOTE: + +OSS binaries of 1.5.1, 1.4.4, 1.3.8, and 1.2.5 were built without the Vault UI. Enterprise binaries are not affected. + +KNOWN ISSUES: + +* AWS IAM logins may return an error depending on the headers sent with the request. + For more details and a workaround, see the [1.5.2 Upgrade Guide](https://www.vaultproject.io/docs/upgrading/upgrade-to-1.5.2) +* In versions 1.2.6, 1.3.9, 1.4.5, and 1.5.2, enterprise licenses on the HSM build were not incorporated correctly - enterprise + customers should use 1.2.6.1, 1.3.9.1, 1.4.5.1, and 1.5.2.1. + + +## 1.5.1 +### August 20th, 2020 + +SECURITY: + +* When using the IAM AWS Auth Method, under certain circumstances, values Vault uses to validate identities and roles can be manipulated and bypassed. This vulnerability affects Vault and Vault Enterprise 0.7.1 and newer and is fixed in 1.2.5, 1.3.8, 1.4.4, and 1.5.1 (CVE-2020-16250) (Discovered by Felix Wilhelm of Google Project Zero) +* When using the GCP GCE Auth Method, under certain circumstances, values Vault uses to validate GCE VMs can be manipulated and bypassed. This vulnerability affects Vault and Vault Enterprise 0.8.3 and newer and is fixed in 1.2.5, 1.3.8, 1.4.4, and 1.5.1 (CVE-2020-16251) (Discovered by Felix Wilhelm of Google Project Zero) +* When using Vault Agent with cert auto-auth and caching enabled, under certain circumstances, clients without permission to access agent's token may retrieve the token without login credentials. This vulnerability affects Vault Agent 1.1.0 and newer and is fixed in 1.5.1 (CVE-2020-17455) + +KNOWN ISSUES: + +* OSS binaries of 1.5.1, 1.4.4, 1.3.8, and 1.2.5 were built without the Vault UI. Enterprise binaries are not affected. +* AWS IAM logins may return an error depending on the headers sent with the request. + For more details and a workaround, see the [1.5.1 Upgrade Guide](https://www.vaultproject.io/docs/upgrading/upgrade-to-1.5.1) + +CHANGES: + +* pki: The tidy operation will now remove revoked certificates if the parameter `tidy_revoked_certs` is set to `true`. This will result in certificate entries being immediately removed, as opposed to awaiting until its NotAfter time. Note that this only affects certificates that have been already revoked. [[GH-9609](https://github.com/hashicorp/vault/pull/9609)] +* go: Updated Go version to 1.14.7 + +IMPROVEMENTS: + +* auth/jwt: Add support for fetching groups and user information from G Suite during authentication. [[GH-9574](https://github.com/hashicorp/vault/pull/9574)] +* auth/jwt: Add EdDSA to supported algorithms. [[GH-129](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/129)] +* secrets/openldap: Add "ad" schema that allows the engine to correctly rotate AD passwords. [[GH-9740](https://github.com/hashicorp/vault/pull/9740)] +* pki: Add a `allowed_domains_template` parameter that enables the use of identity templating within the `allowed_domains` parameter. [[GH-8509](https://github.com/hashicorp/vault/pull/8509)] +* secret/azure: Use write-ahead-logs to cleanup any orphaned Service Principals [[GH-9773](https://github.com/hashicorp/vault/pull/9773)] +* ui: Wrap TTL option on transit engine export action is updated to a new component. [[GH-9632](https://github.com/hashicorp/vault/pull/9632)] +* ui: Wrap Tool uses newest version of TTL Picker component. [[GH-9691](https://github.com/hashicorp/vault/pull/9691)] + +BUG FIXES: + +* secrets/gcp: Ensure that the IAM policy version is appropriately set after a roleset's bindings have changed. [[GH-9603](https://github.com/hashicorp/vault/pull/9603)] +* replication (enterprise): Fix status API output incorrectly stating replication is in `idle` state. +* replication (enterprise): Use PrimaryClusterAddr if it's been set +* core: Fix panic when printing over-long info fields at startup [[GH-9681](https://github.com/hashicorp/vault/pull/9681)] +* core: Seal migration using the new minimal-downtime strategy didn't work properly with performance standbys. [[GH-9690](https://github.com/hashicorp/vault/pull/9690)] +* core: Vault failed to start when there were non-string values in seal configuration [[GH-9555](https://github.com/hashicorp/vault/pull/9555)] +* core: Handle a trailing slash in the API address used for enabling replication + +## 1.5.0 +### July 21st, 2020 + +CHANGES: + +* audit: Token TTL and issue time are now provided in the auth portion of audit logs. [[GH-9091](https://github.com/hashicorp/vault/pull/9091)] +* auth/gcp: Changes the default name of the entity alias that gets created to be the role ID for both IAM and GCE authentication. [[GH-99](https://github.com/hashicorp/vault-plugin-auth-gcp/pull/99)] +* core: Remove the addition of newlines to parsed configuration when using integer/boolean values [[GH-8928](https://github.com/hashicorp/vault/pull/8928)] +* cubbyhole: Reject reads and writes to an empty ("") path. [[GH-8971](https://github.com/hashicorp/vault/pull/8971)] +* secrets/azure: Default password generation changed from uuid to cryptographically secure randomized string [[GH-40](https://github.com/hashicorp/vault-plugin-secrets-azure/pull/40)] +* storage/gcs: The `credentials_file` config option has been removed. The `GOOGLE_APPLICATION_CREDENTIALS` environment variable + or default credentials may be used instead [[GH-9424](https://github.com/hashicorp/vault/pull/9424)] +* storage/raft: The storage configuration now accepts a new `max_entry_size` config that will limit + the total size in bytes of any entry committed via raft. It defaults to `"1048576"` (1MiB). [[GH-9027](https://github.com/hashicorp/vault/pull/9027)] +* token: Token creation with custom token ID via `id` will no longer allow periods (`.`) as part of the input string. + The final generated token value may contain periods, such as the `s.` prefix for service token + indication. [[GH-8646](https://github.com/hashicorp/vault/pull/8646/files)] +* token: Token renewals will now return token policies within the `token_policies` , identity policies within `identity_policies`, and the full policy set within `policies`. [[GH-8535](https://github.com/hashicorp/vault/pull/8535)] +* go: Updated Go version to 1.14.4 + +FEATURES: + +* **Monitoring**: We have released a Splunk App [9] for Enterprise customers. The app is accompanied by an updated monitoring guide and a few new metrics to enable OSS users to effectively monitor Vault. +* **Password Policies**: Allows operators to customize how passwords are generated for select secret engines (OpenLDAP, Active Directory, Azure, and RabbitMQ). +* **Replication UI Improvements**: We have redesigned the replication UI to highlight the state and relationship between primaries and secondaries and improved management workflows, enabling a more holistic understanding of multiple Vault clusters. +* **Resource Quotas**: As of 1.5, Vault supports specifying a quota to rate limit requests on OSS and Enterprise. Enterprise customers also have access to set quotas on the number of leases that can be generated on a path. +* **OpenShift Support**: We have updated the Helm charts to allow users to install Vault onto their OpenShift clusters. +* **Seal Migration**: We have made updates to allow migrations from auto unseal to Shamir unseal on Enterprise. +* **AWS Auth Web Identity Support**: We've added support for AWS Web Identities, which will be used in the credentials chain if present. +* **Vault Monitor**: Similar to the monitor command for Consul and Nomad, we have added the ability for Vault to stream logs from other Vault servers at varying log levels. +* **AWS Secrets Groups Support**: IAM users generated by Vault may now be added to IAM Groups. +* **Integrated Storage as HA Storage**: In Vault 1.5, it is possible to use Integrated Storage as HA Storage with a different storage backend as regular storage. +* **OIDC Auth Provider Extensions**: We've added support to OIDC Auth to incorporate IdP-specific extensions. Currently this includes expanded Azure AD groups support. +* **GCP Secrets**: Support BigQuery dataset ACLs in absence of IAM endpoints. +* **KMIP**: Add support for signing client certificates requests (CSRs) rather than having them be generated entirely within Vault. + +IMPROVEMENTS: + +* audit: Replication status requests are no longer audited. [[GH-8877](https://github.com/hashicorp/vault/pull/8877)] +* audit: Added mount_type field to requests and responses. [[GH-9167](https://github.com/hashicorp/vault/pull/9167)] +* auth/aws: Add support for Web Identity credentials [[GH-7738](https://github.com/hashicorp/vault/pull/7738)] +* auth/jwt: Support users that are members of more than 200 groups on Azure [[GH-120](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/120)] +* auth/kerberos: Support identities without userPrincipalName [[GH-44](https://github.com/hashicorp/vault-plugin-auth-kerberos/issues/44)] +* auth/kubernetes: Allow disabling `iss` validation [[GH-91](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/91)] +* auth/kubernetes: Try reading the ca.crt and TokenReviewer JWT from the default service account [[GH-83](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/83)] +* cli: Support reading TLS parameters from file for the `vault operator raft join` command. [[GH-9060](https://github.com/hashicorp/vault/pull/9060)] +* cli: Add a new subcommand, `vault monitor`, for tailing server logs in the console. [[GH-8477](https://github.com/hashicorp/vault/pull/8477)] +* core: Add the Go version used to build a Vault binary to the server message output. [[GH-9078](https://github.com/hashicorp/vault/pull/9078)] +* core: Added Password Policies for user-configurable password generation [[GH-8637](https://github.com/hashicorp/vault/pull/8637)] +* core: New telemetry metrics covering token counts, token creation, KV secret counts, lease creation. [[GH-9239](https://github.com/hashicorp/vault/pull/9239)] [[GH-9250](https://github.com/hashicorp/vault/pull/9250)] [[GH-9244](https://github.com/hashicorp/vault/pull/9244)] [[GH-9052](https://github.com/hashicorp/vault/pull/9052)] +* physical/gcs: The storage backend now uses a dedicated client for HA lock updates to prevent lock table update failures when flooded by other client requests. [[GH-9424](https://github.com/hashicorp/vault/pull/9424)] +* physical/spanner: The storage backend now uses a dedicated client for HA lock updates to prevent lock table update failures when flooded by other client requests. [[GH-9423](https://github.com/hashicorp/vault/pull/9423)] +* plugin: Add SDK method, `Sys.ReloadPlugin`, and CLI command, `vault plugin reload`, for reloading plugins. [[GH-8777](https://github.com/hashicorp/vault/pull/8777)] +* plugin (enterprise): Add a scope field to plugin reload, which when global, reloads the plugin anywhere in a cluster. [[GH-9347](https://github.com/hashicorp/vault/pull/9347)] +* sdk/framework: Support accepting TypeFloat parameters over the API [[GH-8923](https://github.com/hashicorp/vault/pull/8923)] +* secrets/aws: Add iam_groups parameter to role create/update [[GH-8811](https://github.com/hashicorp/vault/pull/8811)] +* secrets/database: Add static role rotation for MongoDB Atlas database plugin [[GH-11](https://github.com/hashicorp/vault-plugin-database-mongodbatlas/pull/11)] +* secrets/database: Add static role rotation for MSSQL database plugin [[GH-9062](https://github.com/hashicorp/vault/pull/9062)] +* secrets/database: Allow InfluxDB to use insecure TLS without cert bundle [[GH-8778](https://github.com/hashicorp/vault/pull/8778)] +* secrets/gcp: Support BigQuery dataset ACLs in absence of IAM endpoints [[GH-78](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/78)] +* secrets/pki: Allow 3072-bit RSA keys [[GH-8343](https://github.com/hashicorp/vault/pull/8343)] +* secrets/ssh: Add a CA-mode role option to specify signing algorithm [[GH-9096](https://github.com/hashicorp/vault/pull/9096)] +* secrets/ssh: The [Vault SSH Helper](https://github.com/hashicorp/vault-ssh-helper) can now be configured to reference a mount in a namespace [[GH-44](https://github.com/hashicorp/vault-ssh-helper/pull/44)] +* secrets/transit: Transit requests that make use of keys now include a new field `key_version` in their responses [[GH-9100](https://github.com/hashicorp/vault/pull/9100)] +* secrets/transit: Improving transit batch encrypt and decrypt latencies [[GH-8775](https://github.com/hashicorp/vault/pull/8775)] +* sentinel: Add a sentinel config section, and "additional_enabled_modules", a list of Sentinel modules that may be imported in addition to the defaults. +* ui: Update TTL picker styling on SSH secret engine [[GH-8891](https://github.com/hashicorp/vault/pull/8891)] +* ui: Only render the JWT input field of the Vault login form on mounts configured for JWT auth [[GH-8952](https://github.com/hashicorp/vault/pull/8952)] +* ui: Add replication dashboards. Improve replication management workflows. [[GH-8705]](https://github.com/hashicorp/vault/pull/8705). +* ui: Update alert banners to match design systems black text. [[GH-9463]](https://github.com/hashicorp/vault/pull/9463). + +BUG FIXES: + +* auth/oci: Fix issue where users of the Oracle Cloud Infrastructure (OCI) auth method could not authenticate when the plugin backend was mounted at a non-default path. [[GH-7](https://github.com/hashicorp/vault-plugin-auth-oci/pull/7)] +* core: Extend replicated cubbyhole fix in 1.4.0 to cover case where a performance primary is also a DR primary [[GH-9148](https://github.com/hashicorp/vault/pull/9148)] +* replication (enterprise): Use the PrimaryClusterAddr if it's been set +* seal/awskms: fix AWS KMS auto-unseal when AWS_ROLE_SESSION_NAME not set [[GH-9416](https://github.com/hashicorp/vault/pull/9416)] +* sentinel: fix panic due to concurrent map access when rules iterate over metadata maps +* secrets/aws: Fix issue where performance standbys weren't able to generate STS credentials after an IAM access key rotation in AWS and root IAM credential update in Vault [[GH-9186](https://github.com/hashicorp/vault/pull/9186)] +* secrets/database: Fix issue where rotating root database credentials while Vault's storage backend is unavailable causes Vault to lose access to the database [[GH-8782](https://github.com/hashicorp/vault/pull/8782)] +* secrets/database: Fix issue that prevents performance standbys from connecting to databases after a root credential rotation [[GH-9129](https://github.com/hashicorp/vault/pull/9129)] +* secrets/database: Fix parsing of multi-line PostgreSQL statements [[GH-8512](https://github.com/hashicorp/vault/pull/8512)] +* secrets/gcp: Fix issue were updates were not being applied to the `token_scopes` of a roleset. [[GH-90](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/90)] +* secrets/kv: Return the value of delete_version_after when reading kv/config, even if it is set to the default. [[GH-42](https://github.com/hashicorp/vault-plugin-secrets-kv/pull/42)] +* ui: Add Toggle component into core addon so it is available in KMIP and other Ember Engines.[[GH-8913]](https://github.com/hashicorp/vault/pull/8913) +* ui: Disallow max versions value of large than 9999999999999999 on kv2 secrets engine. [[GH-9242](https://github.com/hashicorp/vault/pull/9242)] +* ui: Add and upgrade missing dependencies to resolve a failure with `make static-dist`. [[GH-9277](https://github.com/hashicorp/vault/pull/9371)] + +## 1.4.7.1 +### October 15th, 2020 +### Enterprise Only + +BUG FIXES: +* replication (enterprise): Fix panic when old filter path evaluation fails + +## 1.4.7 +### September 24th, 2020 + +SECURITY: + +* Batch Token Expiry: We addressed an issue where batch token leases could outlive their TTL because we were not scheduling the expiration time correctly. This vulnerability affects Vault OSS and Vault Enterprise 1.0 and newer and is fixed in 1.4.7 and 1.5.4 (CVE-2020-25816). + +IMPROVEMENTS: + +* secret/azure: Use write-ahead-logs to cleanup any orphaned Service Principals [[GH-9773](https://github.com/hashicorp/vault/pull/9773)] + +BUG FIXES: +* replication (enterprise): Don't stop replication if old filter path evaluation fails + +## 1.4.6 +### August 27th, 2020 + +NOTE: + +All security content from 1.5.2, 1.5.1, 1.4.5, 1.4.4, 1.3.9, 1.3.8, 1.2.6, and 1.2.5 has been made fully open source, and the git tags for 1.5.3, 1.4.6, 1.3.10, and 1.2.7 will build correctly for open source users. + +BUG FIXES: + +* auth/aws: Made header handling for IAM authentication more robust +* secrets/ssh: Fixed a bug with role option for SSH signing algorithm to allow more than RSA signing [[GH-9824](https://github.com/hashicorp/vault/pull/9824)] + +## 1.4.5.1 +### August 21st, 2020 +### Enterprise Only + +NOTE: + +Includes correct license in the HSM binary. + +## 1.4.5 +### August 20th, 2020 + +NOTE: + +OSS binaries of 1.5.1, 1.4.4, 1.3.8, and 1.2.5 were built without the Vault UI. Enterprise binaries are not affected. + +KNOWN ISSUES: + +* AWS IAM logins may return an error depending on the headers sent with the request. + For more details and a workaround, see the [1.4.5 Upgrade Guide](https://www.vaultproject.io/docs/upgrading/upgrade-to-1.4.5) +* In versions 1.2.6, 1.3.9, 1.4.5, and 1.5.2, enterprise licenses on the HSM build were not incorporated correctly - enterprise + customers should use 1.2.6.1, 1.3.9.1, 1.4.5.1, and 1.5.2.1. + + +## 1.4.4 +### August 20th, 2020 + +SECURITY: + +* When using the IAM AWS Auth Method, under certain circumstances, values Vault uses to validate identities and roles can be manipulated and bypassed. This vulnerability affects Vault and Vault Enterprise 0.7.1 and newer and is fixed in 1.2.5, 1.3.8, 1.4.4, and 1.5.1 (CVE-2020-16250) (Discovered by Felix Wilhelm of Google Project Zero) +* When using the GCP GCE Auth Method, under certain circumstances, values Vault uses to validate GCE VMs can be manipulated and bypassed. This vulnerability affects Vault and Vault Enterprise 0.8.3 and newer and is fixed in 1.2.5, 1.3.8, 1.4.4, and 1.5.1 (CVE-2020-16251) (Discovered by Felix Wilhelm of Google Project Zero) + +KNOWN ISSUES: + +* OSS binaries of 1.5.1, 1.4.4, 1.3.8, and 1.2.5 were built without the Vault UI. Enterprise binaries are not affected. +* AWS IAM logins may return an error depending on the headers sent with the request. + For more details and a workaround, see the [1.4.4 Upgrade Guide](https://www.vaultproject.io/docs/upgrading/upgrade-to-1.4.4) + +BUG FIXES: + +* auth/okta: fix bug introduced in 1.4.0: only 200 external groups were fetched even if user belonged to more [[GH-9580](https://github.com/hashicorp/vault/pull/9580)] +* seal/awskms: fix AWS KMS auto-unseal when AWS_ROLE_SESSION_NAME not set [[GH-9416](https://github.com/hashicorp/vault/pull/9416)] +* secrets/aws: Fix possible issue creating access keys when using Performance Standbys [[GH-9606](https://github.com/hashicorp/vault/pull/9606)] + +IMPROVEMENTS: +* auth/aws: Retry on transient failures during AWS IAM auth login attempts [[GH-8727](https://github.com/hashicorp/vault/pull/8727)] +* ui: Add transit key algorithms aes128-gcm96, ecdsa-p384, ecdsa-p521 to the UI. [[GH-9070](https://github.com/hashicorp/vault/pull/9070)] & [[GH-9520](https://github.com/hashicorp/vault/pull/9520)] + +## 1.4.3 +### July 2nd, 2020 + +IMPROVEMENTS: + +* auth/aws: Add support for Web Identity credentials [[GH-9251](https://github.com/hashicorp/vault/pull/9251)] +* auth/kerberos: Support identities without userPrincipalName [[GH-44](https://github.com/hashicorp/vault-plugin-auth-kerberos/issues/44)] +* core: Add the Go version used to build a Vault binary to the server message output. [[GH-9078](https://github.com/hashicorp/vault/pull/9078)] +* secrets/database: Add static role rotation for MongoDB Atlas database plugin [[GH-9311](https://github.com/hashicorp/vault/pull/9311)] +* physical/mysql: Require TLS or plaintext flagging in MySQL configuration [[GH-9012](https://github.com/hashicorp/vault/pull/9012)] +* ui: Link to the Vault Changelog in the UI footer [[GH-9216](https://github.com/hashicorp/vault/pull/9216)] + +BUG FIXES: + +* agent: Restart template server when it shuts down [[GH-9200](https://github.com/hashicorp/vault/pull/9200)] +* auth/oci: Fix issue where users of the Oracle Cloud Infrastructure (OCI) auth method could not authenticate when the plugin backend was mounted at a non-default path. [[GH-9278](https://github.com/hashicorp/vault/pull/9278)] +* replication: The issue causing cubbyholes in namespaces on performance secondaries to not work, which was fixed in 1.4.0, was still an issue when the primary was both a performance primary and DR primary. +* seal: (enterprise) Fix issue causing stored seal and recovery keys to be mistaken as sealwrapped values +* secrets/aws: Fix issue where performance standbys weren't able to generate STS credentials after an IAM access key rotation in AWS and root IAM credential update in Vault [[GH-9207](https://github.com/hashicorp/vault/pull/9207)] +* secrets/database: Fix issue that prevents performance standbys from connecting to databases after a root credential rotation [[GH-9208](https://github.com/hashicorp/vault/pull/9208)] +* secrets/gcp: Fix issue were updates were not being applied to the `token_scopes` of a roleset. [[GH-9277](https://github.com/hashicorp/vault/pull/9277)] + + +## 1.4.2 (May 21st, 2020) + +SECURITY: +* core: Proxy environment variables are now redacted before being logged, in case the URLs include a username:password. This vulnerability, CVE-2020-13223, is fixed in 1.3.6 and 1.4.2, but affects 1.4.0 and 1.4.1, as well as older versions of Vault [[GH-9022](https://github.com/hashicorp/vault/pull/9022)] +* secrets/gcp: Fix a regression in 1.4.0 where the system TTLs were being used instead of the configured backend TTLs for dynamic service accounts. This vulnerability is CVE-2020-12757. [[GH-85](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/85)] + +IMPROVEMENTS: + +* storage/raft: The storage stanza now accepts `leader_ca_cert_file`, `leader_client_cert_file`, and + `leader_client_key_file` parameters to read and parse TLS certificate information from paths on disk. + Existing non-path based parameters will continue to work, but their values will need to be provided as a + single-line string with newlines delimited by `\n`. [[GH-8894](https://github.com/hashicorp/vault/pull/8894)] +* storage/raft: The `vault status` CLI command and the `sys/leader` API now contain the committed and applied + raft indexes. [[GH-9011](https://github.com/hashicorp/vault/pull/9011)] + +BUG FIXES: + +* auth/aws: Fix token renewal issues caused by the metadata changes in 1.4.1 [[GH-8991](https://github.com/hashicorp/vault/pull/8991)] +* auth/ldap: Fix 1.4.0 regression that could result in auth failures when LDAP auth config includes upndomain. [[GH-9041](https://github.com/hashicorp/vault/pull/9041)] +* secrets/ad: Forward rotation requests from standbys to active clusters [[GH-66](https://github.com/hashicorp/vault-plugin-secrets-ad/pull/66)] +* secrets/database: Prevent generation of usernames that are not allowed by the MongoDB Atlas API [[GH-9](https://github.com/hashicorp/vault-plugin-database-mongodbatlas/pull/9)] +* secrets/database: Return an error if a manual rotation of static account credentials fails [[GH-9035](https://github.com/hashicorp/vault/pull/9035)] +* secrets/openldap: Forward all rotation requests from standbys to active clusters [[GH-9028](https://github.com/hashicorp/vault/pull/9028)] +* secrets/transform (enterprise): Fix panic that could occur when accessing cached template entries, such as a requests + that accessed templates directly or indirectly from a performance standby node. +* serviceregistration: Fix a regression for Consul service registration that ignored using the listener address as + the redirect address unless api_addr was provided. It now properly uses the same redirect address as the one + used by Vault's Core object. [[GH-8976](https://github.com/hashicorp/vault/pull/8976)] +* storage/raft: Advertise the configured cluster address to the rest of the nodes in the raft cluster. This fixes + an issue where a node advertising 0.0.0.0 is not using a unique hostname. [[GH-9008](https://github.com/hashicorp/vault/pull/9008)] +* storage/raft: Fix panic when multiple nodes attempt to join the cluster at once. [[GH-9008](https://github.com/hashicorp/vault/pull/9008)] +* sys: The path provided in `sys/internal/ui/mounts/:path` is now namespace-aware. This fixes an issue + with `vault kv` subcommands that had namespaces provided in the path returning permission denied all the time. + [[GH-8962](https://github.com/hashicorp/vault/pull/8962)] +* ui: Fix snowman that appears when namespaces have more than one period [[GH-8910](https://github.com/hashicorp/vault/pull/8910)] + +## 1.4.1 (April 30th, 2020) + +CHANGES: + +* auth/aws: The default set of metadata fields added in 1.4.1 has been changed to `account_id` and `auth_type` [[GH-8783](https://github.com/hashicorp/vault/pull/8783)] +* storage/raft: Disallow `ha_storage` to be specified if `raft` is set as the `storage` type. [[GH-8707](https://github.com/hashicorp/vault/pull/8707)] + +IMPROVEMENTS: + +* auth/aws: The set of metadata stored during login is now configurable [[GH-8783](https://github.com/hashicorp/vault/pull/8783)] +* auth/aws: Improve region selection to avoid errors seen if the account hasn't enabled some newer AWS regions [[GH-8679](https://github.com/hashicorp/vault/pull/8679)] +* auth/azure: Enable login from Azure VMs with user-assigned identities [[GH-33](https://github.com/hashicorp/vault-plugin-auth-azure/pull/33)] +* auth/gcp: The set of metadata stored during login is now configurable [[GH-92](https://github.com/hashicorp/vault-plugin-auth-gcp/pull/92)] +* auth/gcp: The type of alias name used during login is now configurable [[GH-95](https://github.com/hashicorp/vault-plugin-auth-gcp/pull/95)] +* auth/ldap: Improve error messages during LDAP operation failures [[GH-8740](https://github.com/hashicorp/vault/pull/8740)] +* identity: Add a batch delete API for identity entities [[GH-8785]](https://github.com/hashicorp/vault/pull/8785) +* identity: Improve performance of logins when no group updates are needed [[GH-8795]](https://github.com/hashicorp/vault/pull/8795) +* metrics: Add `vault.identity.num_entities` metric [[GH-8816]](https://github.com/hashicorp/vault/pull/8816) +* secrets/kv: Allow `delete-version-after` to be reset to 0 via the CLI [[GH-8635](https://github.com/hashicorp/vault/pull/8635)] +* secrets/rabbitmq: Improve error handling and reporting [[GH-8619](https://github.com/hashicorp/vault/pull/8619)] +* ui: Provide One Time Password during Operation Token generation process [[GH-8630]](https://github.com/hashicorp/vault/pull/8630) + +BUG FIXES: + +* auth/okta: Fix MFA regression (introduced in [GH-8143](https://github.com/hashicorp/vault/pull/8143)) from 1.4.0 [[GH-8807](https://github.com/hashicorp/vault/pull/8807)] +* auth/userpass: Fix upgrade value for `token_bound_cidrs` being ignored due to incorrect key provided [[GH-8826](https://github.com/hashicorp/vault/pull/8826/files)] +* config/seal: Fix segfault when seal block is removed [[GH-8517](https://github.com/hashicorp/vault/pull/8517)] +* core: Fix an issue where users attempting to build Vault could receive Go module checksum errors [[GH-8770](https://github.com/hashicorp/vault/pull/8770)] +* core: Fix blocked requests if a SIGHUP is issued during a long-running request has the state lock held. + Also fixes deadlock that can happen if `vault debug` with the config target is ran during this time. + [[GH-8755](https://github.com/hashicorp/vault/pull/8755)] +* core: Always rewrite the .vault-token file as part of a `vault login` to ensure permissions and ownership are set correctly [[GH-8867](https://github.com/hashicorp/vault/pull/8867)] +* database/mongodb: Fix context deadline error that may result due to retry attempts on failed commands + [[GH-8863](https://github.com/hashicorp/vault/pull/8863)] +* http: Fix superflous call messages from the http package on logs caused by missing returns after + `respondError` calls [[GH-8796](https://github.com/hashicorp/vault/pull/8796)] +* namespace (enterprise): Fix namespace listing to return `key_info` when a scoping namespace is also provided. +* seal/gcpkms: Fix panic that could occur if all seal parameters were provided via environment + variables [[GH-8840](https://github.com/hashicorp/vault/pull/8840)] +* storage/raft: Fix memory allocation and incorrect metadata tracking issues with snapshots [[GH-8793](https://github.com/hashicorp/vault/pull/8793)] +* storage/raft: Fix panic that could occur if `disable_clustering` was set to true on Raft storage cluster [[GH-8784](https://github.com/hashicorp/vault/pull/8784)] +* storage/raft: Handle errors returned from the API during snapshot operations [[GH-8861](https://github.com/hashicorp/vault/pull/8861)] +* sys/wrapping: Allow unwrapping of wrapping tokens which contain nil data [[GH-8714](https://github.com/hashicorp/vault/pull/8714)] + +## 1.4.0 (April 7th, 2020) + +CHANGES: + +* cli: The raft configuration command has been renamed to list-peers to avoid + confusion. + +FEATURES: + +* **Kerberos Authentication**: Vault now supports Kerberos authentication using a SPNEGO token. + Login can be performed using the Vault CLI, API, or agent. +* **Kubernetes Service Discovery**: A new Kubernetes service discovery feature where, if + configured, Vault will tag Vault pods with their current health status. For more, see [#8249](https://github.com/hashicorp/vault/pull/8249). +* **MongoDB Atlas Secrets**: Vault can now generate dynamic credentials for both MongoDB Atlas databases + as well as the [Atlas programmatic interface](https://docs.atlas.mongodb.com/tutorial/manage-programmatic-access/). +* **OpenLDAP Secrets Engine**: We now support password management of existing OpenLDAP user entries. For more, see [#8360](https://github.com/hashicorp/vault/pull/8360/). +* **Redshift Database Secrets Engine**: The database secrets engine now supports static and dynamic secrets for the Amazon Web Services (AWS) Redshift service. +* **Service Registration Config**: A newly introduced `service_registration` configuration stanza, that allows for service registration to be configured separately from the storage backend. For more, see [#7887](https://github.com/hashicorp/vault/pull/7887/). +* **Transform Secrets Engine (Enterprise)**: A new secrets engine that handles secure data transformations against provided input values. +* **Integrated Storage**: Promoted out of beta and into general availability for both open-source and enterprise workloads. + +IMPROVEMENTS: + +* agent: add option to force the use of the auth-auth token, and ignore the Vault token in the request [[GH-8101](https://github.com/hashicorp/vault/pull/8101)] +* api: Restore and fix DNS SRV Lookup [[GH-8520](https://github.com/hashicorp/vault/pull/8520)] +* audit: HMAC http_raw_body in audit log; this ensures that large authenticated Prometheus metrics responses get + replaced with short HMAC values [[GH-8130](https://github.com/hashicorp/vault/pull/8130)] +* audit: Generate-root, generate-recovery-token, and generate-dr-operation-token requests and responses are now audited. [[GH-8301](https://github.com/hashicorp/vault/pull/8301)] +* auth/aws: Reduce the number of simultaneous STS client credentials needed [[GH-8161](https://github.com/hashicorp/vault/pull/8161)] +* auth/azure: subscription ID, resource group, vm and vmss names are now stored in alias metadata [[GH-30](https://github.com/hashicorp/vault-plugin-auth-azure/pull/30)] +* auth/jwt: Additional OIDC callback parameters available for CLI logins [[GH-80](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/80) & [GH-86](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/86)] +* auth/jwt: Bound claims may be optionally configured using globs [[GH-89](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/89)] +* auth/jwt: Timeout during OIDC CLI login if process doesn't complete within 2 minutes [[GH-97](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/97)] +* auth/jwt: Add support for the `form_post` response mode [[GH-98](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/98)] +* auth/jwt: add optional client_nonce to authorization flow [[GH-104](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/104)] +* auth/okta: Upgrade okta sdk lib, which should improve handling of groups [[GH-8143](https://github.com/hashicorp/vault/pull/8143)] +* aws: Add support for v2 of the instance metadata service (see [issue 7924](https://github.com/hashicorp/vault/issues/7924) for all linked PRs) +* core: Separate out service discovery interface from storage interface to allow + new types of service discovery not coupled to storage [[GH-7887](https://github.com/hashicorp/vault/pull/7887)] +* core: Add support for telemetry option `metrics_prefix` [[GH-8340](https://github.com/hashicorp/vault/pull/8340)] +* core: Entropy Augmentation can now be used with AWS KMS and Vault Transit seals +* core: Allow tls_min_version to be set to TLS 1.3 [[GH-8305](https://github.com/hashicorp/vault/pull/8305)] +* cli: Incorrect TLS configuration will now correctly fail [[GH-8025](https://github.com/hashicorp/vault/pull/8025)] +* identity: Allow specifying a custom `client_id` for identity tokens [[GH-8165](https://github.com/hashicorp/vault/pull/8165)] +* metrics/prometheus: improve performance with high volume of metrics updates [[GH-8507](https://github.com/hashicorp/vault/pull/8507)] +* replication (enterprise): Fix race condition causing clusters with high throughput writes to sometimes + fail to enter streaming-wal mode +* replication (enterprise): Secondary clusters can now perform an extra gRPC call to all nodes in a primary + cluster in an attempt to resolve the active node's address +* replication (enterprise): The replication status API now outputs `last_performance_wal`, `last_dr_wal`, + and `connection_state` values +* replication (enterprise): DR secondary clusters can now be recovered by the `replication/dr/secondary/recover` + API +* replication (enterprise): We now allow for an alternate means to create a Disaster Recovery token, by using a batch + token that is created with an ACL that allows for access to one or more of the DR endpoints. +* secrets/database/mongodb: Switched internal MongoDB driver to mongo-driver [[GH-8140](https://github.com/hashicorp/vault/pull/8140)] +* secrets/database/mongodb: Add support for x509 client authorization to MongoDB [[GH-8329](https://github.com/hashicorp/vault/pull/8329)] +* secrets/database/oracle: Add support for static credential rotation [[GH-26](https://github.com/hashicorp/vault-plugin-database-oracle/pull/26)] +* secrets/consul: Add support to specify TLS options per Consul backend [[GH-4800](https://github.com/hashicorp/vault/pull/4800)] +* secrets/gcp: Allow specifying the TTL for a service key [[GH-54](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/54)] +* secrets/gcp: Add support for rotating root keys [[GH-53](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/53)] +* secrets/gcp: Handle version 3 policies for Resource Manager IAM requests [[GH-77](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/77)] +* secrets/nomad: Add support to specify TLS options per Nomad backend [[GH-8083](https://github.com/hashicorp/vault/pull/8083)] +* secrets/ssh: Allowed users can now be templated with identity information [[GH-7548](https://github.com/hashicorp/vault/pull/7548)] +* secrets/transit: Adding RSA3072 key support [[GH-8151](https://github.com/hashicorp/vault/pull/8151)] +* storage/consul: Vault returns now a more descriptive error message when only a client cert or + a client key has been provided [[GH-4930]](https://github.com/hashicorp/vault/pull/8084) +* storage/raft: Nodes in the raft cluster can all be given possible leader + addresses for them to continuously try and join one of them, thus automating + the process of join to a greater extent [[GH-7856](https://github.com/hashicorp/vault/pull/7856)] +* storage/raft: Fix a potential deadlock that could occur on leadership transition [[GH-8547](https://github.com/hashicorp/vault/pull/8547)] +* storage/raft: Refresh TLS keyring on snapshot restore [[GH-8546](https://github.com/hashicorp/vault/pull/8546)] +* storage/etcd: Bumped etcd client API SDK [[GH-7931](https://github.com/hashicorp/vault/pull/7931) & [GH-4961](https://github.com/hashicorp/vault/pull/4961) & [GH-4349](https://github.com/hashicorp/vault/pull/4349) & [GH-7582](https://github.com/hashicorp/vault/pull/7582)] +* ui: Make Transit Key actions more prominent [[GH-8304](https://github.com/hashicorp/vault/pull/8304)] +* ui: Add Core Usage Metrics [[GH-8347](https://github.com/hashicorp/vault/pull/8347)] +* ui: Add refresh Namespace list on the Namespace dropdown, and redesign of Namespace dropdown menu [[GH-8442](https://github.com/hashicorp/vault/pull/8442)] +* ui: Update transit actions to codeblocks & automatically encode plaintext unless indicated [[GH-8462](https://github.com/hashicorp/vault/pull/8462)] +* ui: Display the results of transit key actions in a modal window [[GH-8462](https://github.com/hashicorp/vault/pull/8575)] +* ui: Transit key version styling updates & ability to copy key from dropdown [[GH-8480](https://github.com/hashicorp/vault/pull/8480)] + +BUG FIXES: + +* agent: Fix issue where TLS options are ignored for agent template feature [[GH-7889](https://github.com/hashicorp/vault/pull/7889)] +* auth/jwt: Use lower case role names for `default_role` to match the `role` case convention [[GH-100](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/100)] +* auth/ldap: Fix a bug where the UPNDOMAIN parameter was wrongly used to lookup the group + membership of the given user [[GH-6325]](https://github.com/hashicorp/vault/pull/8333) +* cli: Support autocompletion for nested mounts [[GH-8303](https://github.com/hashicorp/vault/pull/8303)] +* cli: Fix CLI namespace autocompletion [[GH-8315](https://github.com/hashicorp/vault/pull/8315)] +* identity: Fix incorrect caching of identity token JWKS responses [[GH-8412](https://github.com/hashicorp/vault/pull/8412)] +* metrics/stackdriver: Fix issue that prevents the stackdriver metrics library to create unnecessary stackdriver descriptors [[GH-8073](https://github.com/hashicorp/vault/pull/8073)] +* replication (enterprise): Fix issue causing cubbyholes in namespaces on performance secondaries to not work. +* replication (enterprise): Unmounting a dynamic secrets backend could sometimes lead to replication errors. Change the order of operations to prevent that. +* seal (enterprise): Fix seal migration when transactional seal wrap backend is in use. +* secrets/database/influxdb: Fix potential panic if connection to the InfluxDB database cannot be established [[GH-8282](https://github.com/hashicorp/vault/pull/8282)] +* secrets/database/mysql: Ensures default static credential rotation statements are used [[GH-8240](https://github.com/hashicorp/vault/pull/8240)] +* secrets/database/mysql: Fix inconsistent query parameter names: {{name}} or {{username}} for + different queries. Now it allows for either for backwards compatibility [[GH-8240](https://github.com/hashicorp/vault/pull/8240)] +* secrets/database/postgres: Fix inconsistent query parameter names: {{name}} or {{username}} for + different queries. Now it allows for either for backwards compatibility [[GH-8240](https://github.com/hashicorp/vault/pull/8240)] +* secrets/pki: Support FQDNs in DNS Name [[GH-8288](https://github.com/hashicorp/vault/pull/8288)] +* storage/raft: Allow seal migration to be performed on Vault clusters using raft storage [[GH-8103](https://github.com/hashicorp/vault/pull/8103)] +* telemetry: Prometheus requests on standby nodes will now return an error instead of forwarding + the request to the active node [[GH-8280](https://github.com/hashicorp/vault/pull/8280)] +* ui: Fix broken popup menu on the transit secrets list page [[GH-8348](https://github.com/hashicorp/vault/pull/8348)] +* ui: Update headless Chrome flag to fix `yarn run test:oss` [[GH-8035](https://github.com/hashicorp/vault/pull/8035)] +* ui: Update CLI to accept empty strings as param value to reset previously-set values +* ui: Fix bug where error states don't clear when moving between action tabs on Transit [[GH-8354](https://github.com/hashicorp/vault/pull/8354)] + +## 1.3.10 +### August 27th, 2020 + +NOTE: + +All security content from 1.5.2, 1.5.1, 1.4.5, 1.4.4, 1.3.9, 1.3.8, 1.2.6, and 1.2.5 has been made fully open source, and the git tags for 1.5.3, 1.4.6, 1.3.10, and 1.2.7 will build correctly for open source users. + +BUG FIXES: + +* auth/aws: Made header handling for IAM authentication more robust + +## 1.3.9.1 +### August 21st, 2020 +### Enterprise Only + +NOTE: + +Includes correct license in the HSM binary. + +## 1.3.9 +### August 20th, 2020 + +NOTE: + +OSS binaries of 1.5.1, 1.4.4, 1.3.8, and 1.2.5 were built without the Vault UI. Enterprise binaries are not affected. + +KNOWN ISSUES: + +* AWS IAM logins may return an error depending on the headers sent with the request. + For more details and a workaround, see the [1.3.9 Upgrade Guide](https://www.vaultproject.io/docs/upgrading/upgrade-to-1.3.9) +* In versions 1.2.6, 1.3.9, 1.4.5, and 1.5.2, enterprise licenses on the HSM build were not incorporated correctly - enterprise + customers should use 1.2.6.1, 1.3.9.1, 1.4.5.1, and 1.5.2.1. + +## 1.3.8 +### August 20th, 2020 + +SECURITY: + +* When using the IAM AWS Auth Method, under certain circumstances, values Vault uses to validate identities and roles can be manipulated and bypassed. This vulnerability affects Vault and Vault Enterprise 0.7.1 and newer and is fixed in 1.2.5, 1.3.8, 1.4.4, and 1.5.1 (CVE-2020-16250) (Discovered by Felix Wilhelm of Google Project Zero) +* When using the GCP GCE Auth Method, under certain circumstances, values Vault uses to validate GCE VMs can be manipulated and bypassed. This vulnerability affects Vault and Vault Enterprise 0.8.3 and newer and is fixed in 1.2.5, 1.3.8, 1.4.4, and 1.5.1 (CVE-2020-16251) (Discovered by Felix Wilhelm of Google Project Zero) + +KNOWN ISSUES: + +* OSS binaries of 1.5.1, 1.4.4, 1.3.8, and 1.2.5 were built without the Vault UI. Enterprise binaries are not affected. +* AWS IAM logins may return an error depending on the headers sent with the request. + For more details and a workaround, see the [1.3.8 Upgrade Guide](https://www.vaultproject.io/docs/upgrading/upgrade-to-1.3.8) + +## 1.3.7 +### July 2nd, 2020 + +BUG FIXES: + +* seal: (enterprise) Fix issue causing stored seal and recovery keys to be mistaken as sealwrapped values +* secrets/aws: Fix issue where performance standbys weren't able to generate STS credentials after an IAM access key rotation in AWS and root IAM credential update in Vault [[GH-9363](https://github.com/hashicorp/vault/pull/9363)] + +## 1.3.6 (May 21st, 2020) + +SECURITY: +* core: proxy environment variables are now redacted before being logged, in case the URLs include a username:password. This vulnerability, CVE-2020-13223, is fixed in 1.3.6 and 1.4.2, but affects 1.4 and 1.4.1, as well as older versions of Vault [[GH-9022](https://github.com/hashicorp/vault/pull/9022)] + +BUG FIXES: + +* auth/aws: Fix token renewal issues caused by the metadata changes in 1.3.5 [[GH-8991](https://github.com/hashicorp/vault/pull/8991)] +* replication: Fix mount filter bug that allowed replication filters to hide local mounts on a performance secondary + +## 1.3.5 (April 28th, 2020) + +CHANGES: + +* auth/aws: The default set of metadata fields added in 1.3.2 has been changed to `account_id` and `auth_type` [[GH-8783](https://github.com/hashicorp/vault/pull/8783)] + +IMPROVEMENTS: + +* auth/aws: The set of metadata stored during login is now configurable [[GH-8783](https://github.com/hashicorp/vault/pull/8783)] + +## 1.3.4 (March 19th, 2020) + +SECURITY: + +* A vulnerability was identified in Vault and Vault Enterprise such that, under certain circumstances, an Entity's Group membership may inadvertently include Groups the Entity no longer has permissions to. This vulnerability, CVE-2020-10660, affects Vault and Vault Enterprise versions 0.9.0 and newer, and is fixed in 1.3.4. [[GH-8606](https://github.com/hashicorp/vault/pull/8606)] +* A vulnerability was identified in Vault Enterprise such that, under certain circumstances, existing nested-path policies may give access to Namespaces created after-the-fact. This vulnerability, CVE-2020-10661, affects Vault Enterprise versions 0.11 and newer, and is fixed in 1.3.4. + +## 1.3.3 (March 5th, 2020) + +BUG FIXES: + +* approle: Fix excessive locking during tidy, which could potentially block new approle logins for long enough to cause an outage [[GH-8418](https://github.com/hashicorp/vault/pull/8418)] +* cli: Fix issue where Raft snapshots from standby nodes created an empty backup file [[GH-8097](https://github.com/hashicorp/vault/pull/8097)] +* identity: Fix incorrect caching of identity token JWKS responses [[GH-8412](https://github.com/hashicorp/vault/pull/8412)] +* kmip: role read now returns tls_client_ttl +* kmip: fix panic when templateattr not provided in rekey request +* secrets/database/influxdb: Fix potential panic if connection to the InfluxDB database cannot be established [[GH-8282](https://github.com/hashicorp/vault/pull/8282)] +* storage/mysql: Fix potential crash when using MySQL as coordination for high availability [[GH-8300](https://github.com/hashicorp/vault/pull/8300)] +* storage/raft: Fix potential crash when using Raft as coordination for high availability [[GH-8356](https://github.com/hashicorp/vault/pull/8356)] +* ui: Fix missing License menu item [[GH-8230](https://github.com/hashicorp/vault/pull/8230)] +* ui: Fix bug where default auth method on login is defaulted to auth method that is listing-visibility=unauth instead of “other” [[GH-8218](https://github.com/hashicorp/vault/pull/8218)] +* ui: Fix bug where KMIP details were not shown in the UI Wizard [[GH-8255](https://github.com/hashicorp/vault/pull/8255)] +* ui: Show Error messages on Auth Configuration page when you hit permission errors [[GH-8500](https://github.com/hashicorp/vault/pull/8500)] +* ui: Remove duplicate form inputs for the GitHub config [[GH-8519](https://github.com/hashicorp/vault/pull/8519)] +* ui: Correct HMAC capitalization [[GH-8528](https://github.com/hashicorp/vault/pull/8528)] +* ui: Fix danger message in DR [[GH-8555](https://github.com/hashicorp/vault/pull/8555)] +* ui: Fix certificate field for LDAP config [[GH-8573](https://github.com/hashicorp/vault/pull/8573)] + +## 1.3.2 (January 22nd, 2020) + +SECURITY: + * When deleting a namespace on Vault Enterprise, in certain circumstances, the deletion + process will fail to revoke dynamic secrets for a mount in that namespace. This will + leave any dynamic secrets in remote systems alive and will fail to clean them up. This + vulnerability, CVE-2020-7220, affects Vault Enterprise 0.11.0 and newer. + +IMPROVEMENTS: + * auth/aws: Add aws metadata to identity alias [[GH-7985](https://github.com/hashicorp/vault/pull/7985)] + * auth/kubernetes: Allow both names and namespaces to be set to "*" [[GH-78](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/78)] + +BUG FIXES: + +* auth/azure: Fix Azure compute client to use correct base URL [[GH-8072](https://github.com/hashicorp/vault/pull/8072)] +* auth/ldap: Fix renewal of tokens without configured policies that are + generated by an LDAP login [[GH-8072](https://github.com/hashicorp/vault/pull/8072)] +* auth/okta: Fix renewal of tokens without configured policies that are + generated by an Okta login [[GH-8072](https://github.com/hashicorp/vault/pull/8072)] +* core: Fix seal migration error when attempting to migrate from auto unseal to shamir [[GH-8172](https://github.com/hashicorp/vault/pull/8172)] +* core: Fix seal migration config issue when migrating from auto unseal to auto unseal [[GH-8172](https://github.com/hashicorp/vault/pull/8172)] +* plugin: Fix issue where a plugin unwrap request potentially used an expired token [[GH-8058](https://github.com/hashicorp/vault/pull/8058)] +* replication: Fix issue where a forwarded request from a performance/standby node could run into + a timeout +* secrets/database: Fix issue where a manual static role rotation could potentially panic [[GH-8098](https://github.com/hashicorp/vault/pull/8098)] +* secrets/database: Fix issue where a manual root credential rotation request is not forwarded + to the primary node [[GH-8125](https://github.com/hashicorp/vault/pull/8125)] +* secrets/database: Fix issue where a manual static role rotation request is not forwarded + to the primary node [[GH-8126](https://github.com/hashicorp/vault/pull/8126)] +* secrets/database/mysql: Fix issue where special characters for a MySQL password were encoded [[GH-8040](https://github.com/hashicorp/vault/pull/8040)] +* ui: Fix deleting namespaces [[GH-8132](https://github.com/hashicorp/vault/pull/8132)] +* ui: Fix Error handler on kv-secret edit and kv-secret view pages [[GH-8133](https://github.com/hashicorp/vault/pull/8133)] +* ui: Fix OIDC callback to check storage [[GH-7929](https://github.com/hashicorp/vault/pull/7929)]. +* ui: Change `.box-radio` height to min-height to prevent overflow issues [[GH-8065](https://github.com/hashicorp/vault/pull/8065)] + +## 1.3.1 (December 18th, 2019) + +IMPROVEMENTS: + +* agent: Add ability to set `exit-after-auth` via the CLI [[GH-7920](https://github.com/hashicorp/vault/pull/7920)] +* auth/ldap: Add a `request_timeout` configuration option to prevent connection + requests from hanging [[GH-7909](https://github.com/hashicorp/vault/pull/7909)] +* auth/kubernetes: Add audience to tokenreview API request for Kube deployments where issuer + is not Kube. [[GH-74](https://github.com/hashicorp/vault/pull/74)] +* secrets/ad: Add a `request_timeout` configuration option to prevent connection + requests from hanging [[GH-59](https://github.com/hashicorp/vault-plugin-secrets-ad/pull/59)] +* storage/postgresql: Add support for setting `connection_url` from enviornment + variable `VAULT_PG_CONNECTION_URL` [[GH-7937](https://github.com/hashicorp/vault/pull/7937)] +* telemetry: Add `enable_hostname_label` option to telemetry stanza [[GH-7902](https://github.com/hashicorp/vault/pull/7902)] +* telemetry: Add accept header check for prometheus mime type [[GH-7958](https://github.com/hashicorp/vault/pull/7958)] + +BUG FIXES: + +* agent: Fix issue where Agent exits before all templates are rendered when + using and `exit_after_auth` [[GH-7899](https://github.com/hashicorp/vault/pull/7899)] +* auth/aws: Fixes region-related issues when using a custom `sts_endpoint` by adding + a `sts_region` parameter [[GH-7922](https://github.com/hashicorp/vault/pull/7922)] +* auth/token: Fix panic when getting batch tokens on a performance standby from a role + that does not exist [[GH-8027](https://github.com/hashicorp/vault/pull/8027)] +* core: Improve warning message for lease TTLs [[GH-7901](https://github.com/hashicorp/vault/pull/7901)] +* identity: Fix identity token panic during invalidation [[GH-8043](https://github.com/hashicorp/vault/pull/8043)] +* plugin: Fix a panic that could occur if a mount/auth entry was unable to + mount the plugin backend and a request that required the system view to be + retrieved was made [[GH-7991](https://github.com/hashicorp/vault/pull/7991)] +* replication: Add `generate-public-key` endpoint to list of allowed endpoints + for existing DR secondaries +* secrets/gcp: Fix panic if bindings aren't provided in roleset create/update. [[GH-56](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/56)] +* secrets/pki: Prevent generating certificate on performance standby when storing + [[GH-7904](https://github.com/hashicorp/vault/pull/7904)] +* secrets/transit: Prevent restoring keys to new names that are sub paths [[GH-7998](https://github.com/hashicorp/vault/pull/7998)] +* storage/s3: Fix a bug in configurable S3 paths that was preventing use of S3 as + a source during `operator migrate` operations [[GH-7966](https://github.com/hashicorp/vault/pull/7966)] +* ui: Ensure secrets with a period in their key can be viewed and copied [[GH-7926](https://github.com/hashicorp/vault/pull/7926)] +* ui: Fix status menu after demotion [[GH-7997](https://github.com/hashicorp/vault/pull/7997)] +* ui: Fix select dropdowns in Safari when running Mojave [[GH-8023](https://github.com/hashicorp/vault/pull/8023)] + +## 1.3 (November 14th, 2019) + +CHANGES: + + * Secondary cluster activation: There has been a change to the way that activating + performance and DR secondary clusters works when using public keys for + encryption of the parameters rather than a wrapping token. This flow was + experimental and never documented. It is now officially supported and + documented but is not backwards compatible with older Vault releases. + * Cluster cipher suites: On its cluster port, Vault will no longer advertise + the full TLS 1.2 cipher suite list by default. Although this port is only + used for Vault-to-Vault communication and would always pick a strong cipher, + it could cause false flags on port scanners and other security utilities + that assumed insecure ciphers were being used. The previous behavior can be + achieved by setting the value of the (undocumented) `cluster_cipher_suites` + config flag to `tls12`. + * API/Agent Renewal behavior: The API now allows multiple options for how it + deals with renewals. The legacy behavior in the Agent/API is for the renewer + (now called the lifetime watcher) to exit on a renew error, leading to a + reauthentication. The new default behavior is for the lifetime watcher to + ignore 5XX errors and simply retry as scheduled, using the existing lease + duration. It is also possible, within custom code, to disable renewals + entirely, which allows the lifetime watcher to simply return when it + believes it is time for your code to renew or reauthenticate. + +FEATURES: + + * **Vault Debug**: A new top-level subcommand, `debug`, is added that allows + operators to retrieve debugging information related to a particular Vault + node. Operators can use this simple workflow to capture triaging information, + which can then be consumed programmatically or by support and engineering teams. + It has the abilitity to probe for config, host, metrics, pprof, server status, + and replication status. + * **Recovery Mode**: Vault server can be brought up in recovery mode to resolve + outages caused due to data store being in bad state. This is a privileged mode + that allows `sys/raw` API calls to perform surgical corrections to the data + store. Bad storage state can be caused by bugs. However, this is usually + observed when known (and fixed) bugs are hit by older versions of Vault. + * **Entropy Augmentation (Enterprise)**: Vault now supports sourcing entropy from + external source for critical security parameters. Currently an HSM that + supports PKCS#11 is the only supported source. + * **Active Directory Secret Check-In/Check-Out**: In the Active Directory secrets + engine, users or applications can check out a service account for use, and its + password will be rotated when it's checked back in. + * **Vault Agent Template**: Vault Agent now supports rendering templates containing + Vault secrets to disk, similar to Consul Template [[GH-7652](https://github.com/hashicorp/vault/pull/7652)] + * **Transit Key Type Support**: Signing and verification is now supported with the P-384 + (secp384r1) and P-521 (secp521r1) ECDSA curves [[GH-7551](https://github.com/hashicorp/vault/pull/7551)] and encryption and + decryption is now supported via AES128-GCM96 [[GH-7555](https://github.com/hashicorp/vault/pull/7555)] + * **SSRF Protection for Vault Agent**: Vault Agent has a configuration option to + require a specific header before allowing requests [[GH-7627](https://github.com/hashicorp/vault/pull/7627)] + * **AWS Auth Method Root Rotation**: The credential used by the AWS auth method can + now be rotated, to ensure that only Vault knows the credentials it is using [[GH-7131](https://github.com/hashicorp/vault/pull/7131)] + * **New UI Features**: The UI now supports managing users and groups for the + Userpass, Cert, Okta, and Radius auth methods. + * **Shamir with Stored Master Key**: The on disk format for Shamir seals has changed, + allowing for a secondary cluster using Shamir downstream from a primary cluster + using Auto Unseal. [[GH-7694](https://github.com/hashicorp/vault/pull/7694)] + * **Stackdriver Metrics Sink**: Vault can now send metrics to + [Stackdriver](https://cloud.google.com/stackdriver/). See the [configuration + documentation](https://www.vaultproject.io/docs/config/index.html) for + details. [[GH-6957](https://github.com/hashicorp/vault/pull/6957)] + * **Filtered Paths Replication (Enterprise)**: Based on the predecessor Filtered Mount Replication, + Filtered Paths Replication allows now filtering of namespaces in addition to mounts. + With this feature, Filtered Mount Replication should be considered deprecated. + * **Token Renewal via Accessor**: Tokens can now be renewed via the accessor value through + the new `auth/token/renew-accessor` endpoint if the caller's token has + permission to access that endpoint. + * **Improved Integrated Storage (Beta)**: Improved raft write performance, added support for + non-voter nodes, along with UI support for: using raft storage, joining a raft cluster, + and downloading and restoring a snapshot. + +IMPROVEMENTS: + + * agent: Add ability to set the TLS SNI name used by Agent [[GH-7519](https://github.com/hashicorp/vault/pull/7519)] + * agent & api: Change default renewer behavior to ignore 5XX errors [[GH-7733](https://github.com/hashicorp/vault/pull/7733)] + * auth/jwt: The redirect callback host may now be specified for CLI logins + [[GH-71](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/71)] + * auth/jwt: Bound claims may now contain boolean values [[GH-73](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/73)] + * auth/jwt: CLI logins can now open the browser when running in WSL [[GH-77](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/77)] + * core: Exit ScanView if context has been cancelled [[GH-7419](https://github.com/hashicorp/vault/pull/7419)] + * core: re-encrypt barrier and recovery keys if the unseal key is updated + [[GH-7493](https://github.com/hashicorp/vault/pull/7493)] + * core: Don't advertise the full set of TLS 1.2 cipher suites on the cluster + port, even though only strong ciphers were used [[GH-7487](https://github.com/hashicorp/vault/pull/7487)] + * core (enterprise): Add background seal re-wrap + * core/metrics: Add config parameter to allow unauthenticated sys/metrics + access. [[GH-7550](https://github.com/hashicorp/vault/pull/7550)] + * metrics: Upgrade DataDog library to improve performance [[GH-7794](https://github.com/hashicorp/vault/pull/7794)] + * replication (enterprise): Write-Ahead-Log entries will not duplicate the + data belonging to the encompassing physical entries of the transaction, + thereby improving the performance and storage capacity. + * replication (enterprise): Added more replication metrics + * replication (enterprise): Reindex process now compares subpages for a more + accurate indexing process. + * replication (enterprise): Reindex API now accepts a new `skip_flush` + parameter indicating all the changes should not be flushed while the tree is + locked. + * secrets/aws: The root config can now be read [[GH-7245](https://github.com/hashicorp/vault/pull/7245)] + * secrets/aws: Role paths may now contain the '@' character [[GH-7553](https://github.com/hashicorp/vault/pull/7553)] + * secrets/database/cassandra: Add ability to skip verfication of connection + [[GH-7614](https://github.com/hashicorp/vault/pull/7614)] + * secrets/gcp: Fix panic during rollback if the roleset has been deleted + [[GH-52](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/52)] + * storage/azure: Add config parameter to Azure storage backend to allow + specifying the ARM endpoint [[GH-7567](https://github.com/hashicorp/vault/pull/7567)] + * storage/cassandra: Improve storage efficiency by eliminating unnecessary + copies of value data [[GH-7199](https://github.com/hashicorp/vault/pull/7199)] + * storage/raft: Improve raft write performance by utilizing FSM Batching + [[GH-7527](https://github.com/hashicorp/vault/pull/7527)] + * storage/raft: Add support for non-voter nodes [[GH-7634](https://github.com/hashicorp/vault/pull/7634)] + * sys: Add a new `sys/host-info` endpoint for querying information about + the host [[GH-7330](https://github.com/hashicorp/vault/pull/7330)] + * sys: Add a new set of endpoints under `sys/pprof/` that allows profiling + information to be extracted [[GH-7473](https://github.com/hashicorp/vault/pull/7473)] + * sys: Add endpoint that counts the total number of active identity entities + [[GH-7541](https://github.com/hashicorp/vault/pull/7541)] + * sys: `sys/seal-status` now has a `storage_type` field denoting what type of + storage + the cluster is configured to use + * sys: Add a new `sys/internal/counters/tokens` endpoint, that counts the + total number of active service token accessors in the shared token storage. + [[GH-7541](https://github.com/hashicorp/vault/pull/7541)] + * sys/config: Add a new endpoint under `sys/config/state/sanitized` that + returns the configuration state of the server. It excludes config values + from `storage`, `ha_storage`, and `seal` stanzas and some values + from `telemetry` due to potential sensitive entries in those fields. + * ui: when using raft storage, you can now join a raft cluster, download a + snapshot, and restore a snapshot from the UI [[GH-7410](https://github.com/hashicorp/vault/pull/7410)] + * ui: clarify when secret version is deleted in the secret version history + dropdown [[GH-7714](https://github.com/hashicorp/vault/pull/7714)] + +BUG FIXES: + + * agent: Fix a data race on the token value for inmemsink [[GH-7707](https://github.com/hashicorp/vault/pull/7707)] + * api: Fix Go API using lease revocation via URL instead of body [[GH-7777](https://github.com/hashicorp/vault/pull/7777)] + * api: Allow setting a function to control retry behavior [[GH-7331](https://github.com/hashicorp/vault/pull/7331)] + * auth/gcp: Fix a bug where region information in instance groups names could + cause an authorization attempt to fail [[GH-74](https://github.com/hashicorp/vault-plugin-auth-gcp/pull/74)] + * cli: Fix a bug where a token of an unknown format (e.g. in ~/.vault-token) + could cause confusing error messages during `vault login` [[GH-7508](https://github.com/hashicorp/vault/pull/7508)] + * cli: Fix a bug where the `namespace list` command with JSON formatting + always returned an empty object [[GH-7705](https://github.com/hashicorp/vault/pull/7705)] + * cli: Command timeouts are now always specified solely by the + `VAULT_CLIENT_TIMEOUT` value. [[GH-7469](https://github.com/hashicorp/vault/pull/7469)] + * core: Don't allow registering a non-root zero TTL token lease. This is purely + defense in depth as the lease would be revoked immediately anyways, but + there's no real reason to allow registration. [[GH-7524](https://github.com/hashicorp/vault/pull/7524)] + * core: Correctly revoke the token that's present in the response auth from a + auth/token/ request if there's partial failure during the process. [[GH-7835](https://github.com/hashicorp/vault/pull/7835)] + * identity (enterprise): Fixed identity case sensitive loading in secondary + cluster [[GH-7327](https://github.com/hashicorp/vault/pull/7327)] + * identity: Ensure only replication primary stores the identity case sensitivity state [[GH-7820](https://github.com/hashicorp/vault/pull/7820)] + * raft: Fixed VAULT_CLUSTER_ADDR env being ignored at startup [[GH-7619](https://github.com/hashicorp/vault/pull/7619)] + * secrets/pki: Don't allow duplicate SAN names in issued certs [[GH-7605](https://github.com/hashicorp/vault/pull/7605)] + * sys/health: Pay attention to the values provided for `standbyok` and + `perfstandbyok` rather than simply using their presence as a key to flip on + that behavior [[GH-7323](https://github.com/hashicorp/vault/pull/7323)] + * ui: using the `wrapped_token` query param will work with `redirect_to` and + will automatically log in as intended [[GH-7398](https://github.com/hashicorp/vault/pull/7398)] + * ui: fix an error when initializing from the UI using PGP keys [[GH-7542](https://github.com/hashicorp/vault/pull/7542)] + * ui: show all active kv v2 secret versions even when `delete_version_after` is configured [[GH-7685](https://github.com/hashicorp/vault/pull/7685)] + * ui: Ensure that items in the top navigation link to pages that users have access to [[GH-7590](https://github.com/hashicorp/vault/pull/7590)] + +## 1.2.7 +### August 27th, 2020 + +NOTE: + +All security content from 1.5.2, 1.5.1, 1.4.5, 1.4.4, 1.3.9, 1.3.8, 1.2.6, and 1.2.5 has been made fully open source, and the git tags for 1.5.3, 1.4.6, 1.3.10, and 1.2.7 will build correctly for open source users. + +BUG FIXES: + +* auth/aws: Made header handling for IAM authentication more robust + +## 1.2.6.1 +### August 21st, 2020 +### Enterprise Only + +NOTE: + +Includes correct license in the HSM binary. + +## 1.2.6 +### August 20th, 2020 + +NOTE: + +OSS binaries of 1.5.1, 1.4.4, 1.3.8, and 1.2.5 were built without the Vault UI. Enterprise binaries are not affected. + +KNOWN ISSUES: + +* AWS IAM logins may return an error depending on the headers sent with the request. + For more details and a workaround, see the [1.2.6 Upgrade Guide](https://www.vaultproject.io/docs/upgrading/upgrade-to-1.2.6) +* In versions 1.2.6, 1.3.9, 1.4.5, and 1.5.2, enterprise licenses on the HSM build were not incorporated correctly - enterprise + customers should use 1.2.6.1, 1.3.9.1, 1.4.5.1, and 1.5.2.1. + +## 1.2.5 +### August 20th, 2020 + +SECURITY: + + * When using the IAM AWS Auth Method, under certain circumstances, values Vault uses to validate identities and roles can be manipulated and bypassed. This vulnerability affects Vault and Vault Enterprise 0.7.1 and newer and is fixed in 1.2.5, 1.3.8, 1.4.4, and 1.5.1 (CVE-2020-16250) (Discovered by Felix Wilhelm of Google Project Zero) + * When using the GCP GCE Auth Method, under certain circumstances, values Vault uses to validate GCE VMs can be manipulated and bypassed. This vulnerability affects Vault and Vault Enterprise 0.8.3 and newer and is fixed in 1.2.5, 1.3.8, 1.4.4, and 1.5.1 (CVE-2020-16251) (Discovered by Felix Wilhelm of Google Project Zero) + +KNOWN ISSUES: + +* OSS binaries of 1.5.1, 1.4.4, 1.3.8, and 1.2.5 were built without the Vault UI. Enterprise binaries are not affected. +* AWS IAM logins may return an error depending on the headers sent with the request. + For more details and a workaround, see the [1.2.5 Upgrade Guide](https://www.vaultproject.io/docs/upgrading/upgrade-to-1.2.5) + +BUG FIXES: +* seal: (enterprise) Fix issue causing stored seal and recovery keys to be mistaken as sealwrapped values + +## 1.2.4 (November 7th, 2019) + +SECURITY: + + * In a non-root namespace, revocation of a token scoped to a non-root + namespace did not trigger the expected revocation of dynamic secret leases + associated with that token. As a result, dynamic secret leases in non-root + namespaces may outlive the token that created them. This vulnerability, + CVE-2019-18616, affects Vault Enterprise 0.11.0 and newer. + * Disaster Recovery secondary clusters did not delete already-replicated data + after a mount filter has been created on an upstream Performance secondary + cluster. As a result, encrypted secrets may remain replicated on a Disaster + Recovery secondary cluster after application of a mount filter excluding + those secrets from replication. This vulnerability, CVE-2019-18617, affects + Vault Enterprise 0.8 and newer. + * Update version of Go to 1.12.12 to fix Go bug golang.org/issue/34960 which + corresponds to CVE-2019-17596. + +CHANGES: + + * auth/aws: If a custom `sts_endpoint` is configured, Vault Agent and the CLI + should provide the corresponding region via the `region` parameter (which + already existed as a CLI parameter, and has now been added to Agent). The + automatic region detection added to the CLI and Agent in 1.2 has been removed. + +IMPROVEMENTS: + + * cli: Ignore existing token during CLI login [[GH-7508](https://github.com/hashicorp/vault/pull/7508)] + * core: Log proxy settings from environment on startup [[GH-7528](https://github.com/hashicorp/vault/pull/7528)] + * core: Cache whether we've been initialized to reduce load on storage [[GH-7549](https://github.com/hashicorp/vault/pull/7549)] + +BUG FIXES: + + * agent: Fix handling of gzipped responses [[GH-7470](https://github.com/hashicorp/vault/pull/7470)] + * cli: Fix panic when pgp keys list is empty [[GH-7546](https://github.com/hashicorp/vault/pull/7546)] + * cli: Command timeouts are now always specified solely by the + `VAULT_CLIENT_TIMEOUT` value. [[GH-7469](https://github.com/hashicorp/vault/pull/7469)] + * core: add hook for initializing seals for migration [[GH-7666](https://github.com/hashicorp/vault/pull/7666)] + * core (enterprise): Migrating from one auto unseal method to another never + worked on enterprise, now it does. + * identity: Add required field `response_types_supported` to identity token + `.well-known/openid-configuration` response [[GH-7533](https://github.com/hashicorp/vault/pull/7533)] + * identity: Fixed nil pointer panic when merging entities [[GH-7712](https://github.com/hashicorp/vault/pull/7712)] + * replication (Enterprise): Fix issue causing performance standbys nodes + disconnecting when under high loads. + * secrets/azure: Fix panic that could occur if client retries timeout [[GH-7793](https://github.com/hashicorp/vault/pull/7793)] + * secrets/database: Fix bug in combined DB secrets engine that can result in + writes to static-roles endpoints timing out [[GH-7518](https://github.com/hashicorp/vault/pull/7518)] + * secrets/pki: Improve tidy to continue when value is nil [[GH-7589](https://github.com/hashicorp/vault/pull/7589)] + * ui (Enterprise): Allow kv v2 secrets that are gated by Control Groups to be + viewed in the UI [[GH-7504](https://github.com/hashicorp/vault/pull/7504)] + +## 1.2.3 (September 12, 2019) + +FEATURES: + +* **Oracle Cloud (OCI) Integration**: Vault now support using Oracle Cloud for + storage, auto unseal, and authentication. + +IMPROVEMENTS: + + * auth/jwt: Groups claim matching now treats a string response as a single + element list [[GH-63](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/63)] + * auth/kubernetes: enable better support for projected tokens API by allowing + user to specify issuer [[GH-65](https://github.com/hashicorp/vault/pull/65)] + * auth/pcf: The PCF auth plugin was renamed to the CF auth plugin, maintaining + full backwards compatibility [[GH-7346](https://github.com/hashicorp/vault/pull/7346)] + * replication: Premium packages now come with unlimited performance standby + nodes + +BUG FIXES: + + * agent: Allow batch tokens and other non-renewable tokens to be used for + agent operations [[GH-7441](https://github.com/hashicorp/vault/pull/7441)] + * auth/jwt: Fix an error where newer (v1.2) token_* configuration parameters + were not being applied to tokens generated using the OIDC login flow + [[GH-67](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/67)] + * raft: Fix an incorrect JSON tag on `leader_ca_cert` in the join request [[GH-7393](https://github.com/hashicorp/vault/pull/7393)] + * seal/transit: Allow using Vault Agent for transit seal operations [[GH-7441](https://github.com/hashicorp/vault/pull/7441)] + * storage/couchdb: Fix a file descriptor leak [[GH-7345](https://github.com/hashicorp/vault/pull/7345)] + * ui: Fix a bug where the status menu would disappear when trying to revoke a + token [[GH-7337](https://github.com/hashicorp/vault/pull/7337)] + * ui: Fix a regression that prevented input of custom items in search-select + [[GH-7338](https://github.com/hashicorp/vault/pull/7338)] + * ui: Fix an issue with the namespace picker being unable to render nested + namespaces named with numbers and sorting of namespaces in the picker + [[GH-7333](https://github.com/hashicorp/vault/pull/7333)] + +## 1.2.2 (August 15, 2019) + +CHANGES: + + * auth/pcf: The signature format has been updated to use the standard Base64 + encoding instead of the URL-safe variant. Signatures created using the + previous format will continue to be accepted [PCF-27] + * core: The http response code returned when an identity token key is not found + has been changed from 400 to 404 + +IMPROVEMENTS: + + * identity: Remove 512 entity limit for groups [[GH-7317](https://github.com/hashicorp/vault/pull/7317)] + +BUG FIXES: + + * auth/approle: Fix an error where an empty `token_type` string was not being + correctly handled as `TokenTypeDefault` [[GH-7273](https://github.com/hashicorp/vault/pull/7273)] + * auth/radius: Fix panic when logging in [[GH-7286](https://github.com/hashicorp/vault/pull/7286)] + * ui: the string-list widget will now honor multiline input [[GH-7254](https://github.com/hashicorp/vault/pull/7254)] + * ui: various visual bugs in the KV interface were addressed [[GH-7307](https://github.com/hashicorp/vault/pull/7307)] + * ui: fixed incorrect URL to access help in LDAP auth [[GH-7299](https://github.com/hashicorp/vault/pull/7299)] + +## 1.2.1 (August 6th, 2019) + +BUG FIXES: + + * agent: Fix a panic on creds pulling in some error conditions in `aws` and + `alicloud` auth methods [[GH-7238](https://github.com/hashicorp/vault/pull/7238)] + * auth/approle: Fix error reading role-id on a role created pre-1.2 [[GH-7231](https://github.com/hashicorp/vault/pull/7231)] + * auth/token: Fix sudo check in non-root namespaces on create [[GH-7224](https://github.com/hashicorp/vault/pull/7224)] + * core: Fix health checks with perfstandbyok=true returning the wrong status + code [[GH-7240](https://github.com/hashicorp/vault/pull/7240)] + * ui: The web CLI will now parse input as a shell string, with special + characters escaped [[GH-7206](https://github.com/hashicorp/vault/pull/7206)] + * ui: The UI will now redirect to a page after authentication [[GH-7088](https://github.com/hashicorp/vault/pull/7088)] + * ui (Enterprise): The list of namespaces is now cleared when logging + out [[GH-7186](https://github.com/hashicorp/vault/pull/7186)] + +## 1.2.0 (July 30th, 2019) + +CHANGES: + + * Token store roles use new, common token fields for the values + that overlap with other auth backends. `period`, `explicit_max_ttl`, and + `bound_cidrs` will continue to work, with priority being given to the + `token_` prefixed versions of those parameters. They will also be returned + when doing a read on the role if they were used to provide values initially; + however, in Vault 1.4 if `period` or `explicit_max_ttl` is zero they will no + longer be returned. (`explicit_max_ttl` was already not returned if empty.) + * Due to underlying changes in Go version 1.12 and Go > 1.11.5, Vault is now + stricter about what characters it will accept in path names. Whereas before + it would filter out unprintable characters (and this could be turned off), + control characters and other invalid characters are now rejected within Go's + HTTP library before the request is passed to Vault, and this cannot be + disabled. To continue using these (e.g. for already-written paths), they + must be properly percent-encoded (e.g. `\r` becomes `%0D`, `\x00` becomes + `%00`, and so on). + * The user-configured regions on the AWSKMS seal stanza will now be preferred + over regions set in the enclosing environment. This is a _breaking_ change. + * All values in audit logs now are omitted if they are empty. This helps + reduce the size of audit log entries by not reproducing keys in each entry + that commonly don't contain any value, which can help in cases where audit + log entries are above the maximum UDP packet size and others. + * Both PeriodicFunc and WALRollback functions will be called if both are + provided. Previously WALRollback would only be called if PeriodicFunc was + not set. See [[GH-6717](https://github.com/hashicorp/vault/pull/6717)] for + details. + * Vault now uses Go's official dependency management system, Go Modules, to + manage dependencies. As a result to both reduce transitive dependencies for + API library users and plugin authors, and to work around various conflicts, + we have moved various helpers around, mostly under an `sdk/` submodule. A + couple of functions have also moved from plugin helper code to the `api/` + submodule. If you are a plugin author, take a look at some of our official + plugins and the paths they are importing for guidance. + * AppRole uses new, common token fields for values that overlap + with other auth backends. `period` and `policies` will continue to work, + with priority being given to the `token_` prefixed versions of those + parameters. They will also be returned when doing a read on the role if they + were used to provide values initially. + * In AppRole, `"default"` is no longer automatically added to the `policies` + parameter. This was a no-op since it would always be added anyways by + Vault's core; however, this can now be explicitly disabled with the new + `token_no_default_policy` field. + * In AppRole, `bound_cidr_list` is no longer returned when reading a role + * rollback: Rollback will no longer display log messages when it runs; it will + only display messages on error. + * Database plugins will now default to 4 `max_open_connections` + rather than 2. + +FEATURES: + + * **Integrated Storage**: Vault 1.2 includes a _tech preview_ of a new way to + manage storage directly within a Vault cluster. This new integrated storage + solution is based on the Raft protocol which is also used to back HashiCorp + Consul and HashiCorp Nomad. + * **Combined DB credential rotation**: Alternative mode for the Combined DB + Secret Engine to automatically rotate existing database account credentials + and set Vault as the source of truth for credentials. + * **Identity Tokens**: Vault's Identity system can now generate OIDC-compliant + ID tokens. These customizable tokens allow encapsulating a signed, verifiable + snapshot of identity information and metadata. They can be use by other + applications—even those without Vault authorization—as a way of establishing + identity based on a Vault entity. + * **Pivotal Cloud Foundry plugin**: New auth method using Pivotal Cloud + Foundry certificates for Vault authentication. + * **ElasticSearch database plugin**: New ElasticSearch database plugin issues + unique, short-lived ElasticSearch credentials. + * **New UI Features**: An HTTP Request Volume Page and new UI for editing LDAP + Users and Groups have been added. + * **HA support for Postgres**: PostgreSQL versions >= 9.5 may now but used as + and HA storage backend. + * **KMIP secrets engine (Enterprise)**: Allows Vault to operate as a KMIP + Server, seamlessly brokering cryptographic operations for traditional + infrastructure. + * Common Token Fields: Auth methods now use common fields for controlling + token behavior, making it easier to understand configuration across methods. + * **Vault API explorer**: The Vault UI now includes an embedded API explorer + where you can browse the endpoints avaliable to you and make requests. To try + it out, open the Web CLI and type `api`. + +IMPROVEMENTS: + + * agent: Allow EC2 nonce to be passed in [[GH-6953](https://github.com/hashicorp/vault/pull/6953)] + * agent: Add optional `namespace` parameter, which sets the default namespace + for the auto-auth functionality [[GH-6988](https://github.com/hashicorp/vault/pull/6988)] + * agent: Add cert auto-auth method [[GH-6652](https://github.com/hashicorp/vault/pull/6652)] + * api: Add support for passing data to delete operations via `DeleteWithData` + [[GH-7139](https://github.com/hashicorp/vault/pull/7139)] + * audit/file: Dramatically speed up file operations by changing + locking/marshaling order [[GH-7024](https://github.com/hashicorp/vault/pull/7024)] + * auth/jwt: A JWKS endpoint may now be configured for signature verification [[GH-43](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/43)] + * auth/jwt: A new `verbose_oidc_logging` role parameter has been added to help + troubleshoot OIDC configuration [[GH-57](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/57)] + * auth/jwt: `bound_claims` will now match received claims that are lists if any element + of the list is one of the expected values [[GH-50](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/50)] + * auth/jwt: Leeways for `nbf` and `exp` are now configurable, as is clock skew + leeway [[GH-53](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/53)] + * auth/kubernetes: Allow service names/namespaces to be configured as globs + [[GH-58](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/58)] + * auth/token: Allow the support of the identity system for the token backend + via token roles [[GH-6267](https://github.com/hashicorp/vault/pull/6267)] + * auth/token: Add a large set of token configuration options to token store + roles [[GH-6662](https://github.com/hashicorp/vault/pull/6662)] + * cli: `path-help` now allows `-format=json` to be specified, which will + output OpenAPI [[GH-7006](https://github.com/hashicorp/vault/pull/7006)] + * cli: Add support for passing parameters to `vault delete` operations + [[GH-7139](https://github.com/hashicorp/vault/pull/7139)] + * cli: Add a log-format CLI flag that can specify either "standard" or "json" + for the log format for the `vault server`command. [[GH-6840](https://github.com/hashicorp/vault/pull/6840)] + * cli: Add `-dev-no-store-token` to allow dev servers to not store the + generated token at the tokenhelper location [[GH-7104](https://github.com/hashicorp/vault/pull/7104)] + * identity: Allow a group alias' canonical ID to be modified + * namespaces: Namespaces can now be created and deleted from performance + replication secondaries + * plugins: Change the default for `max_open_connections` for DB plugins to 4 + [[GH-7093](https://github.com/hashicorp/vault/pull/7093)] + * replication: Client TLS authentication is now supported when enabling or + updating a replication secondary + * secrets/database: Cassandra operations will now cancel on client timeout + [[GH-6954](https://github.com/hashicorp/vault/pull/6954)] + * secrets/kv: Add optional `delete_version_after` parameter, which takes a + duration and can be set on the mount and/or the metadata for a specific key + [[GH-7005](https://github.com/hashicorp/vault/pull/7005)] + * storage/postgres: LIST now performs better on large datasets [[GH-6546](https://github.com/hashicorp/vault/pull/6546)] + * storage/s3: A new `path` parameter allows selecting the path within a bucket + for Vault data [[GH-7157](https://github.com/hashicorp/vault/pull/7157)] + * ui: KV v1 and v2 will now gracefully degrade allowing a write without read + workflow in the UI [[GH-6570](https://github.com/hashicorp/vault/pull/6570)] + * ui: Many visual improvements with the addition of Toolbars [[GH-6626](https://github.com/hashicorp/vault/pull/6626)], the restyling + of the Confirm Action component [[GH-6741](https://github.com/hashicorp/vault/pull/6741)], and using a new set of glyphs for our + Icon component [[GH-6736](https://github.com/hashicorp/vault/pull/6736)] + * ui: Lazy loading parts of the application so that the total initial payload is + smaller [[GH-6718](https://github.com/hashicorp/vault/pull/6718)] + * ui: Tabbing to auto-complete in filters will first complete a common prefix if there + is one [[GH-6759](https://github.com/hashicorp/vault/pull/6759)] + * ui: Removing jQuery from the application makes the initial JS payload smaller [[GH-6768](https://github.com/hashicorp/vault/pull/6768)] + +BUG FIXES: + + * audit: Log requests and responses due to invalid wrapping token provided + [[GH-6541](https://github.com/hashicorp/vault/pull/6541)] + * audit: Fix bug preventing request counter queries from working with auditing + enabled [[GH-6767](https://github.com/hashicorp/vault/pull/6767) + * auth/aws: AWS Roles are now upgraded and saved to the latest version just + after the AWS credential plugin is mounted. [[GH-7025](https://github.com/hashicorp/vault/pull/7025)] + * auth/aws: Fix a case where a panic could stem from a malformed assumed-role ARN + when parsing this value [[GH-6917](https://github.com/hashicorp/vault/pull/6917)] + * auth/aws: Fix an error complaining about a read-only view that could occur + during updating of a role when on a performance replication secondary + [[GH-6926](https://github.com/hashicorp/vault/pull/6926)] + * auth/jwt: Fix a regression introduced in 1.1.1 that disabled checking of client_id + for OIDC logins [[GH-54](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/54)] + * auth/jwt: Fix a panic during OIDC CLI logins that could occur if the Vault server + response is empty [[GH-55](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/55)] + * auth/jwt: Fix issue where OIDC logins might intermittently fail when using + performance standbys [[GH-61](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/61)] + * identity: Fix a case where modifying aliases of an entity could end up + moving the entity into the wrong namespace + * namespaces: Fix a behavior (currently only known to be benign) where we + wouldn't delete policies through the official functions before wiping the + namespaces on deletion + * secrets/database: Escape username/password before using in connection URL + [[GH-7089](https://github.com/hashicorp/vault/pull/7089)] + * secrets/pki: Forward revocation requests to active node when on a + performance standby [[GH-7173](https://github.com/hashicorp/vault/pull/7173)] + * ui: Fix timestamp on some transit keys [[GH-6827](https://github.com/hashicorp/vault/pull/6827)] + * ui: Show Entities and Groups in Side Navigation [[GH-7138](https://github.com/hashicorp/vault/pull/7138)] + * ui: Ensure dropdown updates selected item on HTTP Request Metrics page + +## 1.1.4/1.1.5 (July 25th/30th, 2019) + +NOTE: + +Although 1.1.4 was tagged, we realized very soon after the tag was publicly +pushed that an intended fix was accidentally left out. As a result, 1.1.4 was +not officially announced and 1.1.5 should be used as the release after 1.1.3. + +IMPROVEMENTS: + + * identity: Allow a group alias' canonical ID to be modified + * namespaces: Improve namespace deletion performance [[GH-6939](https://github.com/hashicorp/vault/pull/6939)] + * namespaces: Namespaces can now be created and deleted from performance + replication secondaries + +BUG FIXES: + + * api: Add backwards compat support for API env vars [[GH-7135](https://github.com/hashicorp/vault/pull/7135)] + * auth/aws: Fix a case where a panic could stem from a malformed assumed-role + ARN when parsing this value [[GH-6917](https://github.com/hashicorp/vault/pull/6917)] + * auth/ldap: Add `use_pre111_group_cn_behavior` flag to allow recovering from + a regression caused by a bug fix starting in 1.1.1 [[GH-7208](https://github.com/hashicorp/vault/pull/7208)] + * auth/aws: Use a role cache to avoid separate locking paths [[GH-6926](https://github.com/hashicorp/vault/pull/6926)] + * core: Fix a deadlock if a panic happens during request handling [[GH-6920](https://github.com/hashicorp/vault/pull/6920)] + * core: Fix an issue that may cause key upgrades to not be cleaned up properly + [[GH-6949](https://github.com/hashicorp/vault/pull/6949)] + * core: Don't shutdown if key upgrades fail due to canceled context [[GH-7070](https://github.com/hashicorp/vault/pull/7070)] + * core: Fix panic caused by handling requests while vault is inactive + * identity: Fix reading entity and groups that have spaces in their names + [[GH-7055](https://github.com/hashicorp/vault/pull/7055)] + * identity: Ensure entity alias operations properly verify namespace [[GH-6886](https://github.com/hashicorp/vault/pull/6886)] + * mfa: Fix a nil pointer panic that could occur if invalid Duo credentials + were supplied + * replication: Forward step-down on perf standbys to match HA behavior + * replication: Fix various read only storage errors on performance standbys + * replication: Stop forwarding before stopping replication to eliminate some + possible bad states + * secrets/database: Allow cassandra queries to be cancled [[GH-6954](https://github.com/hashicorp/vault/pull/6954)] + * storage/consul: Fix a regression causing vault to not connect to consul over + unix sockets [[GH-6859](https://github.com/hashicorp/vault/pull/6859)] + * ui: Fix saving of TTL and string array fields generated by Open API [[GH-7094](https://github.com/hashicorp/vault/pull/7094)] + +## 1.1.3 (June 5th, 2019) + +IMPROVEMENTS: + + * agent: Now supports proxying request query parameters [[GH-6772](https://github.com/hashicorp/vault/pull/6772)] + * core: Mount table output now includes a UUID indicating the storage path [[GH-6633](https://github.com/hashicorp/vault/pull/6633)] + * core: HTTP server timeout values are now configurable [[GH-6666](https://github.com/hashicorp/vault/pull/6666)] + * replication: Improve performance of the reindex operation on secondary clusters + when mount filters are in use + * replication: Replication status API now returns the state and progress of a reindex + +BUG FIXES: + + * api: Return the Entity ID in the secret output [[GH-6819](https://github.com/hashicorp/vault/pull/6819)] + * auth/jwt: Consider bound claims when considering if there is at least one + bound constraint [[GH-49](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/49)] + * auth/okta: Fix handling of group names containing slashes [[GH-6665](https://github.com/hashicorp/vault/pull/6665)] + * cli: Add deprecated stored-shares flag back to the init command [[GH-6677](https://github.com/hashicorp/vault/pull/6677)] + * cli: Fix a panic when the KV command would return no data [[GH-6675](https://github.com/hashicorp/vault/pull/6675)] + * cli: Fix issue causing CLI list operations to not return proper format when + there is an empty response [[GH-6776](https://github.com/hashicorp/vault/pull/6776)] + * core: Correctly honor non-HMAC request keys when auditing requests [[GH-6653](https://github.com/hashicorp/vault/pull/6653)] + * core: Fix the `x-vault-unauthenticated` value in OpenAPI for a number of + endpoints [[GH-6654](https://github.com/hashicorp/vault/pull/6654)] + * core: Fix issue where some OpenAPI parameters were incorrectly listed as + being sent as a header [[GH-6679](https://github.com/hashicorp/vault/pull/6679)] + * core: Fix issue that would allow duplicate mount names to be used [[GH-6771](https://github.com/hashicorp/vault/pull/6771)] + * namespaces: Fix behavior when using `root` instead of `root/` as the + namespace header value + * pki: fix a panic when a client submits a null value [[GH-5679](https://github.com/hashicorp/vault/pull/5679)] + * replication: Properly update mount entry cache on a secondary to apply all + new values after a tune + * replication: Properly close connection on bootstrap error + * replication: Fix an issue causing startup problems if a namespace policy + wasn't replicated properly + * replication: Fix longer than necessary WAL replay during an initial reindex + * replication: Fix error during mount filter invalidation on DR secondary clusters + * secrets/ad: Make time buffer configurable [AD-35] + * secrets/gcp: Check for nil config when getting credentials [[GH-35](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/35)] + * secrets/gcp: Fix error checking in some cases where the returned value could + be 403 instead of 404 [[GH-37](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/37)] + * secrets/gcpkms: Disable key rotation when deleting a key [[GH-10](https://github.com/hashicorp/vault-plugin-secrets-gcpkms/pull/10)] + * storage/consul: recognize `https://` address even if schema not specified + [[GH-6602](https://github.com/hashicorp/vault/pull/6602)] + * storage/dynamodb: Fix an issue where a deleted lock key in DynamoDB (HA) + could cause constant switching of the active node [[GH-6637](https://github.com/hashicorp/vault/pull/6637)] + * storage/dynamodb: Eliminate a high-CPU condition that could occur if an + error was received from the DynamoDB API [[GH-6640](https://github.com/hashicorp/vault/pull/6640)] + * storage/gcs: Correctly use configured chunk size values [[GH-6655](https://github.com/hashicorp/vault/pull/6655)] + * storage/mssql: Use the correct database when pre-created schemas exist + [[GH-6356](https://github.com/hashicorp/vault/pull/6356)] + * ui: Fix issue with select arrows on drop down menus [[GH-6627](https://github.com/hashicorp/vault/pull/6627)] + * ui: Fix an issue where sensitive input values weren't being saved to the + server [[GH-6586](https://github.com/hashicorp/vault/pull/6586)] + * ui: Fix web cli parsing when using quoted values [[GH-6755](https://github.com/hashicorp/vault/pull/6755)] + * ui: Fix a namespace workflow mapping identities from external namespaces by + allowing arbitrary input in search-select component [[GH-6728](https://github.com/hashicorp/vault/pull/6728)] + +## 1.1.2 (April 18th, 2019) + +This is a bug fix release containing the two items below. It is otherwise +unchanged from 1.1.1. + +BUG FIXES: + + * auth/okta: Fix a potential dropped error [[GH-6592](https://github.com/hashicorp/vault/pull/6592)] + * secrets/kv: Fix a regression on upgrade where a KVv2 mount could fail to be + mounted on unseal if it had previously been mounted but not written to + [[GH-31](https://github.com/hashicorp/vault-plugin-secrets-kv/pull/31)] + +## 1.1.1 (April 11th, 2019) + +SECURITY: + + * Given: (a) performance replication is enabled; (b) performance standbys are + in use on the performance replication secondary cluster; and (c) mount + filters are in use, if a mount that was previously available to a secondary + is updated to be filtered out, although the data would be removed from the + secondary cluster, the in-memory cache of the data would not be purged on + the performance standby nodes. As a result, the previously-available data + could still be read from memory if it was ever read from disk, and if this + included mount configuration data this could result in token or lease + issuance. The issue is fixed in this release; in prior releases either an + active node changeover (such as a step-down) or a restart of the standby + nodes is sufficient to cause the performance standby nodes to clear their + cache. A CVE is in the process of being issued; the number is + CVE-2019-11075. + * Roles in the JWT Auth backend using the OIDC login flow (i.e. role_type of + “oidc”) were not enforcing bound_cidrs restrictions, if any were configured + for the role. This issue did not affect roles of type “jwt”. + +CHANGES: + + * auth/jwt: Disallow logins of role_type "oidc" via the `/login` path [[GH-38](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/38)] + * core/acl: New ordering defines which policy wins when there are multiple + inexact matches and at least one path contains `+`. `+*` is now illegal in + policy paths. The previous behavior simply selected any matching + segment-wildcard path that matched. [[GH-6532](https://github.com/hashicorp/vault/pull/6532)] + * replication: Due to technical limitations, mounting and unmounting was not + previously possible from a performance secondary. These have been resolved, + and these operations may now be run from a performance secondary. + +IMPROVEMENTS: + + * agent: Allow AppRole auto-auth without a secret-id [[GH-6324](https://github.com/hashicorp/vault/pull/6324)] + * auth/gcp: Cache clients to improve performance and reduce open file usage + * auth/jwt: Bounds claims validiation will now allow matching the received + claims against a list of expected values [[GH-41](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/41)] + * secret/gcp: Cache clients to improve performance and reduce open file usage + * replication: Mounting/unmounting/remounting/mount-tuning is now supported + from a performance secondary cluster + * ui: Suport for authentication via the RADIUS auth method [[GH-6488](https://github.com/hashicorp/vault/pull/6488)] + * ui: Navigating away from secret list view will clear any page-specific + filter that was applied [[GH-6511](https://github.com/hashicorp/vault/pull/6511)] + * ui: Improved the display when OIDC auth errors [[GH-6553](https://github.com/hashicorp/vault/pull/6553)] + +BUG FIXES: + + * agent: Allow auto-auth to be used with caching without having to define any + sinks [[GH-6468](https://github.com/hashicorp/vault/pull/6468)] + * agent: Disallow some nonsensical config file combinations [[GH-6471](https://github.com/hashicorp/vault/pull/6471)] + * auth/ldap: Fix CN check not working if CN was not all in uppercase [[GH-6518](https://github.com/hashicorp/vault/pull/6518)] + * auth/jwt: The CLI helper for OIDC logins will now open the browser to the correct + URL when running on Windows [[GH-37](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/37)] + * auth/jwt: Fix OIDC login issue where configured TLS certs weren't being used [[GH-40](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/40)] + * auth/jwt: Fix an issue where the `oidc_scopes` parameter was not being included in + the response to a role read request [[GH-35](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/35)] + * core: Fix seal migration case when migrating to Shamir and a seal block + wasn't explicitly specified [[GH-6455](https://github.com/hashicorp/vault/pull/6455)] + * core: Fix unwrapping when using namespaced wrapping tokens [[GH-6536](https://github.com/hashicorp/vault/pull/6536)] + * core: Fix incorrect representation of required properties in OpenAPI output + [[GH-6490](https://github.com/hashicorp/vault/pull/6490)] + * core: Fix deadlock that could happen when using the UI [[GH-6560](https://github.com/hashicorp/vault/pull/6560)] + * identity: Fix updating groups removing existing members [[GH-6527](https://github.com/hashicorp/vault/pull/6527)] + * identity: Properly invalidate group alias in performance secondary [[GH-6564](https://github.com/hashicorp/vault/pull/6564)] + * identity: Use namespace context when loading entities and groups to ensure + merging of duplicate entries works properly [[GH-6563](https://github.com/hashicorp/vault/pull/6563)] + * replication: Fix performance standby election failure [[GH-6561](https://github.com/hashicorp/vault/pull/6561)] + * replication: Fix mount filter invalidation on performance standby nodes + * replication: Fix license reloading on performance standby nodes + * replication: Fix handling of control groups on performance standby nodes + * replication: Fix some forwarding scenarios with request bodies using + performance standby nodes [[GH-6538](https://github.com/hashicorp/vault/pull/6538)] + * secret/gcp: Fix roleset binding when using JSON [[GH-27](https://github.com/hashicorp/vault-plugin-secrets-gcp/pull/27)] + * secret/pki: Use `uri_sans` param in when not using CSR parameters [[GH-6505](https://github.com/hashicorp/vault/pull/6505)] + * storage/dynamodb: Fix a race condition possible in HA configurations that could + leave the cluster without a leader [[GH-6512](https://github.com/hashicorp/vault/pull/6512)] + * ui: Fix an issue where in production builds OpenAPI model generation was + failing, causing any form using it to render labels with missing fields [[GH-6474](https://github.com/hashicorp/vault/pull/6474)] + * ui: Fix issue nav-hiding when moving between namespaces [[GH-6473](https://github.com/hashicorp/vault/pull/6473)] + * ui: Secrets will always show in the nav regardless of access to cubbyhole [[GH-6477](https://github.com/hashicorp/vault/pull/6477)] + * ui: fix SSH OTP generation [[GH-6540](https://github.com/hashicorp/vault/pull/6540)] + * ui: add polyfill to load UI in IE11 [[GH-6567](https://github.com/hashicorp/vault/pull/6567)] + * ui: Fix issue where some elements would fail to work properly if using ACLs + with segment-wildcard paths (`/+/` segments) [[GH-6525](https://github.com/hashicorp/vault/pull/6525)] + +## 1.1.0 (March 18th, 2019) + +CHANGES: + + * auth/jwt: The `groups_claim_delimiter_pattern` field has been removed. If the + groups claim is not at the top level, it can now be specified as a + [JSONPointer](https://tools.ietf.org/html/rfc6901). + * auth/jwt: Roles now have a "role type" parameter with a default type of + "oidc". To configure new JWT roles, a role type of "jwt" must be explicitly + specified. + * cli: CLI commands deprecated in 0.9.2 are now removed. Please see the CLI + help/warning output in previous versions of Vault for updated commands. + * core: Vault no longer automatically mounts a K/V backend at the "secret/" + path when initializing Vault + * core: Vault's cluster port will now be open at all times on HA standby nodes + * plugins: Vault no longer supports running netRPC plugins. These were + deprecated in favor of gRPC based plugins and any plugin built since 0.9.4 + defaults to gRPC. Older plugins may need to be recompiled against the latest + Vault dependencies. + +FEATURES: + + * **Vault Agent Caching**: Vault Agent can now be configured to act as a + caching proxy to Vault. Clients can send requests to Vault Agent and the + request will be proxied to the Vault server and cached locally in Agent. + Currently Agent will cache generated leases and tokens and keep them + renewed. The proxy can also use the Auto Auth feature so clients do not need + to authenticate to Vault, but rather can make requests to Agent and have + Agent fully manage token lifecycle. + * **OIDC Redirect Flow Support**: The JWT auth backend now supports OIDC + roles. These allow authentication via an OIDC-compliant provider via the + user's browser. The login may be initiated from the Vault UI or through + the `vault login` command. + * **ACL Path Wildcard**: ACL paths can now use the `+` character to enable + wild card matching for a single directory in the path definition. + * **Transit Auto Unseal**: Vault can now be configured to use the Transit + Secret Engine in another Vault cluster as an auto unseal provider. + +IMPROVEMENTS: + + * auth/jwt: A default role can be set. It will be used during JWT/OIDC logins if + a role is not specified. + * auth/jwt: Arbitrary claims data can now be copied into token & alias metadata. + * auth/jwt: An arbitrary set of bound claims can now be configured for a role. + * auth/jwt: The name "oidc" has been added as an alias for the jwt backend. Either + name may be specified in the `auth enable` command. + * command/server: A warning will be printed when 'tls_cipher_suites' includes a + blacklisted cipher suite or all cipher suites are blacklisted by the HTTP/2 + specification [[GH-6300](https://github.com/hashicorp/vault/pull/6300)] + * core/metrics: Prometheus pull support using a new sys/metrics endpoint. [[GH-5308](https://github.com/hashicorp/vault/pull/5308)] + * core: On non-windows platforms a SIGUSR2 will make the server log a dump of + all running goroutines' stack traces for debugging purposes [[GH-6240](https://github.com/hashicorp/vault/pull/6240)] + * replication: The initial replication indexing process on newly initialized or upgraded + clusters now runs asynchronously + * sentinel: Add token namespace id and path, available in rules as + token.namespace.id and token.namespace.path + * ui: The UI is now leveraging OpenAPI definitions to pull in fields for various forms. + This means, it will not be necessary to add fields on the go and JS sides in the future. + [[GH-6209](https://github.com/hashicorp/vault/pull/6209)] + +BUG FIXES: + + * auth/jwt: Apply `bound_claims` validation across all login paths + * auth/jwt: Update `bound_audiences` validation during non-OIDC logins to accept + any matched audience, as documented and handled in OIDC logins [[GH-30](https://github.com/hashicorp/vault-plugin-auth-jwt/pull/30)] + * auth/token: Fix issue where empty values for token role update call were + ignored [[GH-6314](https://github.com/hashicorp/vault/pull/6314)] + * core: The `operator migrate` command will no longer hang on empty key names + [[GH-6371](https://github.com/hashicorp/vault/pull/6371)] + * identity: Fix a panic at login when external group has a nil alias [[GH-6230](https://github.com/hashicorp/vault/pull/6230)] + * namespaces: Clear out identity store items upon namespace deletion + * replication/perfstandby: Fixed a bug causing performance standbys to wait + longer than necessary after forwarding a write to the active node + * replication/mountfilter: Fix a deadlock that could occur when mount filters + were updated [[GH-6426](https://github.com/hashicorp/vault/pull/6426)] + * secret/kv: Fix issue where a v1→v2 upgrade could run on a performance + standby when using a local mount + * secret/ssh: Fix for a bug where attempting to delete the last ssh role + in the zeroaddress configuration could fail [[GH-6390](https://github.com/hashicorp/vault/pull/6390)] + * secret/totp: Uppercase provided keys so they don't fail base32 validation + [[GH-6400](https://github.com/hashicorp/vault/pull/6400)] + * secret/transit: Multiple HMAC, Sign or Verify operations can now be + performed with one API call using the new `batch_input` parameter [[GH-5875](https://github.com/hashicorp/vault/pull/5875)] + * sys: `sys/internal/ui/mounts` will no longer return secret or auth mounts + that have been filtered. Similarly, `sys/internal/ui/mount/:path` will + return a error response if a filtered mount path is requested. [[GH-6412](https://github.com/hashicorp/vault/pull/6412)] + * ui: Fix for a bug where you couldn't access the data tab after clicking on + wrap details on the unwrap page [[GH-6404](https://github.com/hashicorp/vault/pull/6404)] + * ui: Fix an issue where the policies tab was erroneously hidden [[GH-6301](https://github.com/hashicorp/vault/pull/6301)] + * ui: Fix encoding issues with kv interfaces [[GH-6294](https://github.com/hashicorp/vault/pull/6294)] + +## 1.0.3.1 (March 14th, 2019) (Enterprise Only) + +SECURITY: + + * A regression was fixed in replication mount filter code introduced in Vault + 1.0 that caused the underlying filtered data to be replicated to + secondaries. This data was not accessible to users via Vault's API but via a + combination of privileged configuration file changes/Vault commands it could + be read. Upgrading to this version or 1.1 will fix this issue and cause the + replicated data to be deleted from filtered secondaries. More information + was sent to customer contacts on file. + +## 1.0.3 (February 12th, 2019) + +CHANGES: + + * New AWS authentication plugin mounts will default to using the generated + role ID as the Identity alias name. This applies to both EC2 and IAM auth. + Existing mounts that explicitly set this value will not be affected but + mounts that specified no preference will switch over on upgrade. + * The default policy now allows a token to look up its associated identity + entity either by name or by id [[GH-6105](https://github.com/hashicorp/vault/pull/6105)] + * The Vault UI's navigation and onboarding wizard now only displays items that + are permitted in a users' policy [[GH-5980](https://github.com/hashicorp/vault/pull/5980), [GH-6094](https://github.com/hashicorp/vault/pull/6094)] + * An issue was fixed that caused recovery keys to not work on secondary + clusters when using a different unseal mechanism/key than the primary. This + would be hit if the cluster was rekeyed or initialized after 1.0. We recommend + rekeying the recovery keys on the primary cluster if you meet the above + requirements. + +FEATURES: + + * **cURL Command Output**: CLI commands can now use the `-output-curl-string` + flag to print out an equivalent cURL command. + * **Response Headers From Plugins**: Plugins can now send back headers that + will be included in the response to a client. The set of allowed headers can + be managed by the operator. + +IMPROVEMENTS: + + * auth/aws: AWS EC2 authentication can optionally create entity aliases by + role ID [[GH-6133](https://github.com/hashicorp/vault/pull/6133)] + * auth/jwt: The supported set of signing algorithms is now configurable [JWT + plugin [GH-16](https://github.com/hashicorp/vault/pull/16)] + * core: When starting from an uninitialized state, HA nodes will now attempt + to auto-unseal using a configured auto-unseal mechanism after the active + node initializes Vault [[GH-6039](https://github.com/hashicorp/vault/pull/6039)] + * secret/database: Add socket keepalive option for Cassandra [[GH-6201](https://github.com/hashicorp/vault/pull/6201)] + * secret/ssh: Add signed key constraints, allowing enforcement of key types + and minimum key sizes [[GH-6030](https://github.com/hashicorp/vault/pull/6030)] + * secret/transit: ECDSA signatures can now be marshaled in JWS-compatible + fashion [[GH-6077](https://github.com/hashicorp/vault/pull/6077)] + * storage/etcd: Support SRV service names [[GH-6087](https://github.com/hashicorp/vault/pull/6087)] + * storage/aws: Support specifying a KMS key ID for server-side encryption + [[GH-5996](https://github.com/hashicorp/vault/pull/5996)] + +BUG FIXES: + + * core: Fix a rare case where a standby whose connection is entirely torn down + to the active node, then reconnects to the same active node, may not + successfully resume operation [[GH-6167](https://github.com/hashicorp/vault/pull/6167)] + * cors: Don't duplicate headers when they're written [[GH-6207](https://github.com/hashicorp/vault/pull/6207)] + * identity: Persist merged entities only on the primary [[GH-6075](https://github.com/hashicorp/vault/pull/6075)] + * replication: Fix a potential race when a token is created and then used with + a performance standby very quickly, before an associated entity has been + replicated. If the entity is not found in this scenario, the request will + forward to the active node. + * replication: Fix issue where recovery keys would not work on secondary + clusters if using a different unseal mechanism than the primary. + * replication: Fix a "failed to register lease" error when using performance + standbys + * storage/postgresql: The `Get` method will now return an Entry object with + the `Key` member correctly populated with the full path that was requested + instead of just the last path element [[GH-6044](https://github.com/hashicorp/vault/pull/6044)] + +## 1.0.2 (January 15th, 2019) + +SECURITY: + + * When creating a child token from a parent with `bound_cidrs`, the list of + CIDRs would not be propagated to the child token, allowing the child token + to be used from any address. + +CHANGES: + + * secret/aws: Role now returns `credential_type` instead of `credential_types` + to match role input. If a legacy role that can supply more than one + credential type, they will be concatenated with a `,`. + * physical/dynamodb, autoseal/aws: Instead of Vault performing environment + variable handling, and overriding static (config file) values if found, we + use the default AWS SDK env handling behavior, which also looks for + deprecated values. If you were previously providing both config values and + environment values, please ensure the config values are unset if you want to + use environment values. + * Namespaces (Enterprise): Providing "root" as the header value for + `X-Vault-Namespace` will perform the request on the root namespace. This is + equivalent to providing an empty value. Creating a namespace called "root" in + the root namespace is disallowed. + +FEATURES: + + * **InfluxDB Database Plugin**: Use Vault to dynamically create and manage InfluxDB + users + +IMPROVEMENTS: + + * auth/aws: AWS EC2 authentication can optionally create entity aliases by + image ID [[GH-5846](https://github.com/hashicorp/vault/pull/5846)] + * autoseal/gcpckms: Reduce the required permissions for the GCPCKMS autounseal + [[GH-5999](https://github.com/hashicorp/vault/pull/5999)] + * physical/foundationdb: TLS support added. [[GH-5800](https://github.com/hashicorp/vault/pull/5800)] + +BUG FIXES: + + * api: Fix a couple of places where we were using the `LIST` HTTP verb + (necessary to get the right method into the wrapping lookup function) and + not then modifying it to a `GET`; although this is officially the verb Vault + uses for listing and it's fully legal to use custom verbs, since many WAFs + and API gateways choke on anything outside of RFC-standardized verbs we fall + back to `GET` [[GH-6026](https://github.com/hashicorp/vault/pull/6026)] + * autoseal/aws: Fix reading session tokens when AWS access key/secret key are + also provided [[GH-5965](https://github.com/hashicorp/vault/pull/5965)] + * command/operator/rekey: Fix help output showing `-delete-backup` when it + should show `-backup-delete` [[GH-5981](https://github.com/hashicorp/vault/pull/5981)] + * core: Fix bound_cidrs not being propagated to child tokens + * replication: Correctly forward identity entity creation that originates from + performance standby nodes (Enterprise) + * secret/aws: Make input `credential_type` match the output type (string, not + array) [[GH-5972](https://github.com/hashicorp/vault/pull/5972)] + * secret/cubbyhole: Properly cleanup cubbyhole after token revocation [[GH-6006](https://github.com/hashicorp/vault/pull/6006)] + * secret/pki: Fix reading certificates on windows with the file storage backend [[GH-6013](https://github.com/hashicorp/vault/pull/6013)] + * ui (enterprise): properly display perf-standby count on the license page [[GH-5971](https://github.com/hashicorp/vault/pull/5971)] + * ui: fix disappearing nested secrets and go to the nearest parent when deleting + a secret - [[GH-5976](https://github.com/hashicorp/vault/pull/5976)] + * ui: fix error where deleting an item via the context menu would fail if the + item name contained dots [[GH-6018](https://github.com/hashicorp/vault/pull/6018)] + * ui: allow saving of kv secret after an errored save attempt [[GH-6022](https://github.com/hashicorp/vault/pull/6022)] + * ui: fix display of kv-v1 secret containing a key named "keys" [[GH-6023](https://github.com/hashicorp/vault/pull/6023)] + +## 1.0.1 (December 14th, 2018) + +SECURITY: + + * Update version of Go to 1.11.3 to fix Go bug + https://github.com/golang/go/issues/29233 which corresponds to + CVE-2018-16875 + * Database user revocation: If a client has configured custom revocation + statements for a role with a value of `""`, that statement would be executed + verbatim, resulting in a lack of actual revocation but success for the + operation. Vault will now strip empty statements from any provided; as a + result if an empty statement is provided, it will behave as if no statement + is provided, falling back to the default revocation statement. + +CHANGES: + + * secret/database: On role read, empty statements will be returned as empty + slices instead of potentially being returned as JSON null values. This makes + it more in line with other parts of Vault and makes it easier for statically + typed languages to interpret the values. + +IMPROVEMENTS: + + * cli: Strip iTerm extra characters from password manager input [[GH-5837](https://github.com/hashicorp/vault/pull/5837)] + * command/server: Setting default kv engine to v1 in -dev mode can now be + specified via -dev-kv-v1 [[GH-5919](https://github.com/hashicorp/vault/pull/5919)] + * core: Add operationId field to OpenAPI output [[GH-5876](https://github.com/hashicorp/vault/pull/5876)] + * ui: Added ability to search for Group and Policy IDs when creating Groups + and Entities instead of typing them in manually + +BUG FIXES: + + * auth/azure: Cache azure authorizer [15] + * auth/gcp: Remove explicit project for service account in GCE authorizer [[GH-58](https://github.com/hashicorp/vault-plugin-auth-gcp/pull/58)] + * cli: Show correct stored keys/threshold for autoseals [[GH-5910](https://github.com/hashicorp/vault/pull/5910)] + * cli: Fix backwards compatibility fallback when listing plugins [[GH-5913](https://github.com/hashicorp/vault/pull/5913)] + * core: Fix upgrades when the seal config had been created on early versions + of vault [[GH-5956](https://github.com/hashicorp/vault/pull/5956)] + * namespaces: Correctly reload the proper mount when tuning or reloading the + mount [[GH-5937](https://github.com/hashicorp/vault/pull/5937)] + * secret/azure: Cache azure authorizer [19] + * secret/database: Strip empty statements on user input [[GH-5955](https://github.com/hashicorp/vault/pull/5955)] + * secret/gcpkms: Add path for retrieving the public key [[GH-5](https://github.com/hashicorp/vault-plugin-secrets-gcpkms/pull/5)] + * secret/pki: Fix panic that could occur during tidy operation when malformed + data was found [[GH-5931](https://github.com/hashicorp/vault/pull/5931)] + * secret/pki: Strip empty line in ca_chain output [[GH-5779](https://github.com/hashicorp/vault/pull/5779)] + * ui: Fixed a bug where the web CLI was not usable via the `fullscreen` + command - [[GH-5909](https://github.com/hashicorp/vault/pull/5909)] + * ui: Fix a bug where you couldn't write a jwt auth method config [[GH-5936](https://github.com/hashicorp/vault/pull/5936)] + +## 0.11.6 (December 14th, 2018) + +This release contains the three security fixes from 1.0.0 and 1.0.1 and the +following bug fixes from 1.0.0/1.0.1: + + * namespaces: Correctly reload the proper mount when tuning or reloading the + mount [[GH-5937](https://github.com/hashicorp/vault/pull/5937)] + * replication/perfstandby: Fix audit table upgrade on standbys [[GH-5811](https://github.com/hashicorp/vault/pull/5811)] + * replication/perfstandby: Fix redirect on approle update [[GH-5820](https://github.com/hashicorp/vault/pull/5820)] + * secrets/kv: Fix issue where storage version would get incorrectly downgraded + [[GH-5809](https://github.com/hashicorp/vault/pull/5809)] + +It is otherwise identical to 0.11.5. + +## 1.0.0 (December 3rd, 2018) + +SECURITY: + + * When debugging a customer incident we discovered that in the case of + malformed data from an autoseal mechanism, Vault's master key could be + logged in Vault's server log. For this to happen, the data would need to be + modified by the autoseal mechanism after being submitted to it by Vault but + prior to encryption, or after decryption, prior to it being returned to + Vault. To put it another way, it requires the data that Vault submits for + encryption to not match the data returned after decryption. It is not + sufficient for the autoseal mechanism to return an error, and it cannot be + triggered by an outside attacker changing the on-disk ciphertext as all + autoseal mechanisms use authenticated encryption. We do not believe that + this is generally a cause for concern; since it involves the autoseal + mechanism returning bad data to Vault but with no error, in a working Vault + configuration this code path should never be hit, and if hitting this issue + Vault will not be unsealing properly anyways so it will be obvious what is + happening and an immediate rekey of the master key can be performed after + service is restored. We have filed for a CVE (CVE-2018-19786) and a CVSS V3 + score of 5.2 has been assigned. + +CHANGES: + + * Tokens are now prefixed by a designation to indicate what type of token they + are. Service tokens start with `s.` and batch tokens start with `b.`. + Existing tokens will still work (they are all of service type and will be + considered as such). Prefixing allows us to be more efficient when consuming + a token, which keeps the critical path of requests faster. + * Paths within `auth/token` that allow specifying a token or accessor in the + URL have been removed. These have been deprecated since March 2016 and + undocumented, but were retained for backwards compatibility. They shouldn't + be used due to the possibility of those paths being logged, so at this point + they are simply being removed. + * Vault will no longer accept updates when the storage key has invalid UTF-8 + character encoding [[GH-5819](https://github.com/hashicorp/vault/pull/5819)] + * Mount/Auth tuning the `options` map on backends will now upsert any provided + values, and keep any of the existing values in place if not provided. The + options map itself cannot be unset once it's set, but the keypairs within the + map can be unset if an empty value is provided, with the exception of the + `version` keypair which is handled differently for KVv2 purposes. + * Agent no longer automatically reauthenticates when new credentials are + detected. It's not strictly necessary and in some cases was causing + reauthentication much more often than intended. + * HSM Regenerate Key Support Removed: Vault no longer supports destroying and + regenerating encryption keys on an HSM; it only supports creating them. + Although this has never been a source of a customer incident, it is simply a + code path that is too trivial to activate, especially by mistyping + `regenerate_key` instead of `generate_key`. + * Barrier Config Upgrade (Enterprise): When upgrading from Vault 0.8.x, the + seal type in the barrier config storage entry will be upgraded from + "hsm-auto" to "awskms" or "pkcs11" upon unseal if using AWSKMS or HSM seals. + If performing seal migration, the barrier config should first be upgraded + prior to starting migration. + * Go API client uses pooled HTTP client: The Go API client now uses a + connection-pooling HTTP client by default. For CLI operations this makes no + difference but it should provide significant performance benefits for those + writing custom clients using the Go API library. As before, this can be + changed to any custom HTTP client by the caller. + * Builtin Secret Engines and Auth Methods are integrated deeper into the + plugin system. The plugin catalog can now override builtin plugins with + custom versions of the same name. Additionally the plugin system now + requires a plugin `type` field when configuring plugins, this can be "auth", + "database", or "secret". + +FEATURES: + + * **Auto-Unseal in Open Source**: Cloud-based auto-unseal has been migrated + from Enterprise to Open Source. We've created a migrator to allow migrating + between Shamir seals and auto unseal methods. + * **Batch Tokens**: Batch tokens trade off some features of service tokens for no + storage overhead, and in most cases can be used across performance + replication clusters. + * **Replication Speed Improvements**: We've worked hard to speed up a lot of + operations when using Vault Enterprise Replication. + * **GCP KMS Secrets Engine**: This new secrets engine provides a Transit-like + pattern to keys stored within GCP Cloud KMS. + * **AppRole support in Vault Agent Auto-Auth**: You can now use AppRole + credentials when having Agent automatically authenticate to Vault + * **OpenAPI Support**: Descriptions of mounted backends can be served directly + from Vault + * **Kubernetes Projected Service Account Tokens**: Projected Service Account + Tokens are now supported in Kubernetes auth + * **Response Wrapping in UI**: Added ability to wrap secrets and easily copy + the wrap token or secret JSON in the UI + +IMPROVEMENTS: + + * agent: Support for configuring the location of the kubernetes service account + [[GH-5725](https://github.com/hashicorp/vault/pull/5725)] + * auth/token: New tokens are indexed in storage HMAC-SHA256 instead of SHA1 + * secret/totp: Allow @ character to be part of key name [[GH-5652](https://github.com/hashicorp/vault/pull/5652)] + * secret/consul: Add support for new policy based tokens added in Consul 1.4 + [[GH-5586](https://github.com/hashicorp/vault/pull/5586)] + * ui: Improve the token auto-renew warning, and automatically begin renewal + when a user becomes active again [[GH-5662](https://github.com/hashicorp/vault/pull/5662)] + * ui: The unbundled UI page now has some styling [[GH-5665](https://github.com/hashicorp/vault/pull/5665)] + * ui: Improved banner and popup design [[GH-5672](https://github.com/hashicorp/vault/pull/5672)] + * ui: Added token type to auth method mount config [[GH-5723](https://github.com/hashicorp/vault/pull/5723)] + * ui: Display additonal wrap info when unwrapping. [[GH-5664](https://github.com/hashicorp/vault/pull/5664)] + * ui: Empty states have updated styling and link to relevant actions and + documentation [[GH-5758](https://github.com/hashicorp/vault/pull/5758)] + * ui: Allow editing of KV V2 data when a token doesn't have capabilities to + read secret metadata [[GH-5879](https://github.com/hashicorp/vault/pull/5879)] + +BUG FIXES: + + * agent: Fix auth when multiple redirects [[GH-5814](https://github.com/hashicorp/vault/pull/5814)] + * cli: Restore the `-policy-override` flag [[GH-5826](https://github.com/hashicorp/vault/pull/5826)] + * core: Fix rekey progress reset which did not happen under certain + circumstances. [[GH-5743](https://github.com/hashicorp/vault/pull/5743)] + * core: Migration from autounseal to shamir will clean up old keys [[GH-5671](https://github.com/hashicorp/vault/pull/5671)] + * identity: Update group memberships when entity is deleted [[GH-5786](https://github.com/hashicorp/vault/pull/5786)] + * replication/perfstandby: Fix audit table upgrade on standbys [[GH-5811](https://github.com/hashicorp/vault/pull/5811)] + * replication/perfstandby: Fix redirect on approle update [[GH-5820](https://github.com/hashicorp/vault/pull/5820)] + * secrets/azure: Fix valid roles being rejected for duplicate ids despite + having distinct scopes + [[GH-16](https://github.com/hashicorp/vault-plugin-secrets-azure/pull/16)] + * storage/gcs: Send md5 of values to GCS to avoid potential corruption + [[GH-5804](https://github.com/hashicorp/vault/pull/5804)] + * secrets/kv: Fix issue where storage version would get incorrectly downgraded + [[GH-5809](https://github.com/hashicorp/vault/pull/5809)] + * secrets/kv: Disallow empty paths on a `kv put` while accepting empty paths + for all other operations for backwards compatibility + [[GH-19](https://github.com/hashicorp/vault-plugin-secrets-kv/pull/19)] + * ui: Allow for secret creation in kv v2 when cas_required=true [[GH-5823](https://github.com/hashicorp/vault/pull/5823)] + * ui: Fix dr secondary operation token generation via the ui [[GH-5818](https://github.com/hashicorp/vault/pull/5818)] + * ui: Fix the PKI context menu so that items load [[GH-5824](https://github.com/hashicorp/vault/pull/5824)] + * ui: Update DR Secondary Token generation command [[GH-5857](https://github.com/hashicorp/vault/pull/5857)] + * ui: Fix pagination bug where controls would be rendered once for each + item when viewing policies [[GH-5866](https://github.com/hashicorp/vault/pull/5866)] + * ui: Fix bug where `sys/leases/revoke` required 'sudo' capability to show + the revoke button in the UI [[GH-5647](https://github.com/hashicorp/vault/pull/5647)] + * ui: Fix issue where certain pages wouldn't render in a namespace [[GH-5692](https://github.com/hashicorp/vault/pull/5692)] + +## 0.11.5 (November 13th, 2018) + +BUG FIXES: + + * agent: Fix issue when specifying two file sinks [[GH-5610](https://github.com/hashicorp/vault/pull/5610)] + * auth/userpass: Fix minor timing issue that could leak the presence of a + username [[GH-5614](https://github.com/hashicorp/vault/pull/5614)] + * autounseal/alicloud: Fix issue interacting with the API (Enterprise) + * autounseal/azure: Fix key version tracking (Enterprise) + * cli: Fix panic that could occur if parameters were not provided [[GH-5603](https://github.com/hashicorp/vault/pull/5603)] + * core: Fix buggy behavior if trying to remount into a namespace + * identity: Fix duplication of entity alias entity during alias transfer + between entities [[GH-5733](https://github.com/hashicorp/vault/pull/5733)] + * namespaces: Fix tuning of auth mounts in a namespace + * ui: Fix bug where editing secrets as JSON doesn't save properly [[GH-5660](https://github.com/hashicorp/vault/pull/5660)] + * ui: Fix issue where IE 11 didn't render the UI and also had a broken form + when trying to use tool/hash [[GH-5714](https://github.com/hashicorp/vault/pull/5714)] + +## 0.11.4 (October 23rd, 2018) + +CHANGES: + + * core: HA lock file is no longer copied during `operator migrate` [[GH-5503](https://github.com/hashicorp/vault/pull/5503)]. + We've categorized this as a change, but generally this can be considered + just a bug fix, and no action is needed. + +FEATURES: + + * **Transit Key Trimming**: Keys in transit secret engine can now be trimmed to + remove older unused key versions + * **Web UI support for KV Version 2**: Browse, delete, undelete and destroy + individual secret versions in the UI + * **Azure Existing Service Principal Support**: Credentials can now be generated + against an existing service principal + +IMPROVEMENTS: + + * core: Add last WAL in leader/health output for easier debugging [[GH-5523](https://github.com/hashicorp/vault/pull/5523)] + * identity: Identity names will now be handled case insensitively by default. + This includes names of entities, aliases and groups [[GH-5404](https://github.com/hashicorp/vault/pull/5404)] + * secrets/aws: Added role-option max_sts_ttl to cap TTL for AWS STS + credentials [[GH-5500](https://github.com/hashicorp/vault/pull/5500)] + * secret/database: Allow Cassandra user to be non-superuser so long as it has + role creation permissions [[GH-5402](https://github.com/hashicorp/vault/pull/5402)] + * secret/radius: Allow setting the NAS Identifier value in the generated + packet [[GH-5465](https://github.com/hashicorp/vault/pull/5465)] + * secret/ssh: Allow usage of JSON arrays when setting zero addresses [[GH-5528](https://github.com/hashicorp/vault/pull/5528)] + * secret/transit: Allow trimming unused keys [[GH-5388](https://github.com/hashicorp/vault/pull/5388)] + * ui: Support KVv2 [[GH-5547](https://github.com/hashicorp/vault/pull/5547)], [[GH-5563](https://github.com/hashicorp/vault/pull/5563)] + * ui: Allow viewing and updating Vault license via the UI + * ui: Onboarding will now display your progress through the chosen tutorials + * ui: Dynamic secret backends obfuscate sensitive data by default and + visibility is toggleable + +BUG FIXES: + + * agent: Fix potential hang during agent shutdown [[GH-5026](https://github.com/hashicorp/vault/pull/5026)] + * auth/ldap: Fix listing of users/groups that contain slashes [[GH-5537](https://github.com/hashicorp/vault/pull/5537)] + * core: Fix memory leak during some expiration calls [[GH-5505](https://github.com/hashicorp/vault/pull/5505)] + * core: Fix generate-root operations requiring empty `otp` to be provided + instead of an empty body [[GH-5495](https://github.com/hashicorp/vault/pull/5495)] + * identity: Remove lookup check during alias removal from entity [[GH-5524](https://github.com/hashicorp/vault/pull/5524)] + * secret/pki: Fix TTL/MaxTTL check when using `sign-verbatim` [[GH-5549](https://github.com/hashicorp/vault/pull/5549)] + * secret/pki: Fix regression in 0.11.2+ causing the NotBefore value of + generated certificates to be set to the Unix epoch if the role value was not + set, instead of using the default of 30 seconds [[GH-5481](https://github.com/hashicorp/vault/pull/5481)] + * storage/mysql: Use `varbinary` instead of `varchar` when creating HA tables + [[GH-5529](https://github.com/hashicorp/vault/pull/5529)] + +## 0.11.3 (October 8th, 2018) + +SECURITY: + + * Revocation: A regression in 0.11.2 (OSS) and 0.11.0 (Enterprise) caused + lease IDs containing periods (`.`) to not be revoked properly. Upon startup + when revocation is tried again these should now revoke successfully. + +IMPROVEMENTS: + + * auth/ldap: Listing of users and groups return absolute paths [[GH-5537](https://github.com/hashicorp/vault/pull/5537)] + * secret/pki: OID SANs can now specify `*` to allow any value [[GH-5459](https://github.com/hashicorp/vault/pull/5459)] + +BUG FIXES: + + * auth/ldap: Fix panic if specific values were given to be escaped [[GH-5471](https://github.com/hashicorp/vault/pull/5471)] + * cli/auth: Fix panic if `vault auth` was given no parameters [[GH-5473](https://github.com/hashicorp/vault/pull/5473)] + * secret/database/mongodb: Fix panic that could occur at high load [[GH-5463](https://github.com/hashicorp/vault/pull/5463)] + * secret/pki: Fix CA generation not allowing OID SANs [[GH-5459](https://github.com/hashicorp/vault/pull/5459)] + +## 0.11.2 (October 2nd, 2018) + +CHANGES: + + * `sys/seal-status` now includes an `initialized` boolean in the output. If + Vault is not initialized, it will return a `200` with this value set `false` + instead of a `400`. + * `passthrough_request_headers` will now deny certain headers from being + provided to backends based on a global denylist. + * Token Format: Tokens are now represented as a base62 value; tokens in + namespaces will have the namespace identifier appended. (This appeared in + Enterprise in 0.11.0, but is only in OSS in 0.11.2.) + +FEATURES: + + * **AWS Secret Engine Root Credential Rotation**: The credential used by the AWS + secret engine can now be rotated, to ensure that only Vault knows the + credentials it is using [[GH-5140](https://github.com/hashicorp/vault/pull/5140)] + * **Storage Backend Migrator**: A new `operator migrate` command allows offline + migration of data between two storage backends + * **AliCloud KMS Auto Unseal and Seal Wrap Support (Enterprise)**: AliCloud KMS can now be used a support seal for + Auto Unseal and Seal Wrapping + +BUG FIXES: + + * auth/okta: Fix reading deprecated `token` parameter if a token was + previously set in the configuration [[GH-5409](https://github.com/hashicorp/vault/pull/5409)] + * core: Re-add deprecated capabilities information for now [[GH-5360](https://github.com/hashicorp/vault/pull/5360)] + * core: Fix handling of cyclic token relationships [[GH-4803](https://github.com/hashicorp/vault/pull/4803)] + * storage/mysql: Fix locking on MariaDB [[GH-5343](https://github.com/hashicorp/vault/pull/5343)] + * replication: Fix DR API when using a token [[GH-5398](https://github.com/hashicorp/vault/pull/5398)] + * identity: Ensure old group alias is removed when a new one is written [[GH-5350](https://github.com/hashicorp/vault/pull/5350)] + * storage/alicloud: Don't call uname on package init [[GH-5358](https://github.com/hashicorp/vault/pull/5358)] + * secrets/jwt: Fix issue where request context would be canceled too early + * ui: fix need to have update for aws iam creds generation [GF-5294] + * ui: fix calculation of token expiry [[GH-5435](https://github.com/hashicorp/vault/pull/5435)] + +IMPROVEMENTS: + + * auth/aws: The identity alias name can now configured to be either IAM unique + ID of the IAM Principal, or ARN of the caller identity [[GH-5247](https://github.com/hashicorp/vault/pull/5247)] + * auth/cert: Add allowed_organizational_units support [[GH-5252](https://github.com/hashicorp/vault/pull/5252)] + * cli: Format TTLs for non-secret responses [[GH-5367](https://github.com/hashicorp/vault/pull/5367)] + * identity: Support operating on entities and groups by their names [[GH-5355](https://github.com/hashicorp/vault/pull/5355)] + * plugins: Add `env` parameter when registering plugins to the catalog to allow + operators to include environment variables during plugin execution. [[GH-5359](https://github.com/hashicorp/vault/pull/5359)] + * secrets/aws: WAL Rollback improvements [[GH-5202](https://github.com/hashicorp/vault/pull/5202)] + * secrets/aws: Allow specifying STS role-default TTLs [[GH-5138](https://github.com/hashicorp/vault/pull/5138)] + * secrets/pki: Add configuration support for setting NotBefore [[GH-5325](https://github.com/hashicorp/vault/pull/5325)] + * core: Support for passing the Vault token via an Authorization Bearer header [[GH-5397](https://github.com/hashicorp/vault/pull/5397)] + * replication: Reindex process now runs in the background and does not block other + vault operations + * storage/zookeeper: Enable TLS based communication with Zookeeper [[GH-4856](https://github.com/hashicorp/vault/pull/4856)] + * ui: you can now init a cluster with a seal config [[GH-5428](https://github.com/hashicorp/vault/pull/5428)] + * ui: added the option to force promote replication clusters [[GH-5438](https://github.com/hashicorp/vault/pull/5438)] + * replication: Allow promotion of a secondary when data is syncing with a "force" flag + +## 0.11.1.1 (September 17th, 2018) (Enterprise Only) + +BUG FIXES: + + * agent: Fix auth handler-based wrapping of output tokens [[GH-5316](https://github.com/hashicorp/vault/pull/5316)] + * core: Properly store the replication checkpoint file if it's larger than the + storage engine's per-item limit + * core: Improve WAL deletion rate + * core: Fix token creation on performance standby nodes + * core: Fix unwrapping inside a namespace + * core: Always forward tidy operations from performance standby nodes + +IMPROVEMENTS: + + * auth/aws: add support for key/value pairs or JSON values for + `iam_request_headers` with IAM auth method [[GH-5320](https://github.com/hashicorp/vault/pull/5320)] + * auth/aws, secret/aws: Throttling errors from the AWS API will now be + reported as 502 errors by Vault, along with the original error [[GH-5270](https://github.com/hashicorp/vault/pull/5270)] + * replication: Start fetching during a sync from where it previously errored + +## 0.11.1 (September 6th, 2018) + +SECURITY: + + * Random Byte Reading in Barrier: Prior to this release, Vault was not + properly checking the error code when reading random bytes for the IV for + AES operations in its cryptographic barrier. Specifically, this means that + such an IV could potentially be zero multiple times, causing nonce re-use + and weakening the security of the key. On most platforms this should never + happen because reading from kernel random sources is non-blocking and always + successful, but there may be platform-specific behavior that has not been + accounted for. (Vault has tests to check exactly this, and the tests have + never seen nonce re-use.) + +FEATURES: + + * AliCloud Agent Support: Vault Agent can now authenticate against the + AliCloud auth method. + * UI: Enable AliCloud auth method and Azure secrets engine via the UI. + +IMPROVEMENTS: + + * core: Logging level for most logs (not including secrets/auth plugins) can + now be changed on-the-fly via `SIGHUP`, reading the desired value from + Vault's config file [[GH-5280](https://github.com/hashicorp/vault/pull/5280)] + +BUG FIXES: + + * core: Ensure we use a background context when stepping down [[GH-5290](https://github.com/hashicorp/vault/pull/5290)] + * core: Properly check error return from random byte reading [[GH-5277](https://github.com/hashicorp/vault/pull/5277)] + * core: Re-add `sys/` top-route injection for now [[GH-5241](https://github.com/hashicorp/vault/pull/5241)] + * core: Policies stored in minified JSON would return an error [[GH-5229](https://github.com/hashicorp/vault/pull/5229)] + * core: Evaluate templated policies in capabilities check [[GH-5250](https://github.com/hashicorp/vault/pull/5250)] + * identity: Update MemDB with identity group alias while loading groups [[GH-5289](https://github.com/hashicorp/vault/pull/5289)] + * secrets/database: Fix nil pointer when revoking some leases [[GH-5262](https://github.com/hashicorp/vault/pull/5262)] + * secrets/pki: Fix sign-verbatim losing extra Subject attributes [[GH-5245](https://github.com/hashicorp/vault/pull/5245)] + * secrets/pki: Remove certificates from store when tidying revoked + certificates and simplify API [[GH-5231](https://github.com/hashicorp/vault/pull/5231)] + * ui: JSON editor will not coerce input to an object, and will now show an + error about Vault expecting an object [[GH-5271](https://github.com/hashicorp/vault/pull/5271)] + * ui: authentication form will now default to any methods that have been tuned + to show up for unauthenticated users [[GH-5281](https://github.com/hashicorp/vault/pull/5281)] + + +## 0.11.0 (August 28th, 2018) + +DEPRECATIONS/CHANGES: + + * Request Timeouts: A default request timeout of 90s is now enforced. This + setting can be overwritten in the config file. If you anticipate requests + taking longer than 90s this setting should be updated before upgrading. + * (NOTE: will be re-added into 0.11.1 as it broke more than anticipated. There + will be some further guidelines around when this will be removed again.) + * `sys/` Top Level Injection: For the last two years for backwards + compatibility data for various `sys/` routes has been injected into both the + Secret's Data map and into the top level of the JSON response object. + However, this has some subtle issues that pop up from time to time and is + becoming increasingly complicated to maintain, so it's finally being + removed. + * Path Fallback for List Operations: For a very long time Vault has + automatically adjusted `list` operations to always end in a `/`, as list + operations operates on prefixes, so all list operations by definition end + with `/`. This was done server-side so affects all clients. However, this + has also led to a lot of confusion for users writing policies that assume + that the path that they use in the CLI is the path used internally. Starting + in 0.11, ACL policies gain a new fallback rule for listing: they will use a + matching path ending in `/` if available, but if not found, they will look + for the same path without a trailing `/`. This allows putting `list` + capabilities in the same path block as most other capabilities for that + path, while not providing any extra access if `list` wasn't actually + provided there. + * Performance Standbys On By Default: If you flavor/license of Vault + Enterprise supports Performance Standbys, they are on by default. You can + disable this behavior per-node with the `disable_performance_standby` + configuration flag. + * AWS Secret Engine Roles: The AWS Secret Engine roles are now explicit about + the type of AWS credential they are generating; this reduces reduce + ambiguity that existed previously as well as enables new features for + specific credential types. Writing role data and generating credentials + remain backwards compatible; however, the data returned when reading a + role's configuration has changed in backwards-incompatible ways. Anything + that depended on reading role data from the AWS secret engine will break + until it is updated to work with the new format. + * Token Format (Enterprise): Tokens are now represented as a base62 value; + tokens in namespaces will have the namespace identifier appended. + +FEATURES: + + * **Namespaces (Enterprise)**: A set of features within Vault Enterprise + that allows Vault environments to support *Secure Multi-tenancy* within a + single Vault Enterprise infrastructure. Through namespaces, Vault + administrators can support tenant isolation for teams and individuals as + well as empower those individuals to self-manage their own tenant + environment. + * **Performance Standbys (Enterprise)**: Standby nodes can now service + requests that do not modify storage. This provides near-horizontal scaling + of a cluster in some workloads, and is the intra-cluster analogue of + the existing Performance Replication feature, which replicates to distinct + clusters in other datacenters, geos, etc. + * **AliCloud OSS Storage**: AliCloud OSS can now be used for Vault storage. + * **AliCloud Auth Plugin**: AliCloud's identity services can now be used to + grant access to Vault. See the [plugin + repository](https://github.com/hashicorp/vault-plugin-auth-alicloud) for + more information. + * **Azure Secrets Plugin**: There is now a plugin (pulled in to Vault) that + allows generating credentials to allow access to Azure. See the [plugin + repository](https://github.com/hashicorp/vault-plugin-secrets-azure) for + more information. + * **HA Support for MySQL Storage**: MySQL storage now supports HA. + * **ACL Templating**: ACL policies can now be templated using identity Entity, + Groups, and Metadata. + * **UI Onboarding wizards**: The Vault UI can provide contextual help and + guidance, linking out to relevant links or guides on vaultproject.io for + various workflows in Vault. + +IMPROVEMENTS: + + * agent: Add `exit_after_auth` to be able to use the Agent for a single + authentication [[GH-5013](https://github.com/hashicorp/vault/pull/5013)] + * auth/approle: Add ability to set token bound CIDRs on individual Secret IDs + [[GH-5034](https://github.com/hashicorp/vault/pull/5034)] + * cli: Add support for passing parameters to `vault read` operations [[GH-5093](https://github.com/hashicorp/vault/pull/5093)] + * secrets/aws: Make credential types more explicit [[GH-4360](https://github.com/hashicorp/vault/pull/4360)] + * secrets/nomad: Support for longer token names [[GH-5117](https://github.com/hashicorp/vault/pull/5117)] + * secrets/pki: Allow disabling CRL generation [[GH-5134](https://github.com/hashicorp/vault/pull/5134)] + * storage/azure: Add support for different Azure environments [[GH-4997](https://github.com/hashicorp/vault/pull/4997)] + * storage/file: Sort keys in list responses [[GH-5141](https://github.com/hashicorp/vault/pull/5141)] + * storage/mysql: Support special characters in database and table names. + +BUG FIXES: + + * auth/jwt: Always validate `aud` claim even if `bound_audiences` isn't set + (IOW, error in this case) + * core: Prevent Go's HTTP library from interspersing logs in a different + format and/or interleaved [[GH-5135](https://github.com/hashicorp/vault/pull/5135)] + * identity: Properly populate `mount_path` and `mount_type` on group lookup + [[GH-5074](https://github.com/hashicorp/vault/pull/5074)] + * identity: Fix persisting alias metadata [[GH-5188](https://github.com/hashicorp/vault/pull/5188)] + * identity: Fix carryover issue from previously fixed race condition that + could cause Vault not to start up due to two entities referencing the same + alias. These entities are now merged. [[GH-5000](https://github.com/hashicorp/vault/pull/5000)] + * replication: Fix issue causing some pages not to flush to storage + * secrets/database: Fix inability to update custom SQL statements on + database roles. [[GH-5080](https://github.com/hashicorp/vault/pull/5080)] + * secrets/pki: Disallow putting the CA's serial on its CRL. While technically + legal, doing so inherently means the CRL can't be trusted anyways, so it's + not useful and easy to footgun. [[GH-5134](https://github.com/hashicorp/vault/pull/5134)] + * storage/gcp,spanner: Fix data races [[GH-5081](https://github.com/hashicorp/vault/pull/5081)] + +## 0.10.4 (July 25th, 2018) + +SECURITY: + + * Control Groups: The associated Identity entity with a request was not being + properly persisted. As a result, the same authorizer could provide more than + one authorization. + +DEPRECATIONS/CHANGES: + + * Revocations of dynamic secrets leases are now queued/asynchronous rather + than synchronous. This allows Vault to take responsibility for revocation + even if the initial attempt fails. The previous synchronous behavior can be + attained via the `-sync` CLI flag or `sync` API parameter. When in + synchronous mode, if the operation results in failure it is up to the user + to retry. + * CLI Retries: The CLI will no longer retry commands on 5xx errors. This was a + source of confusion to users as to why Vault would "hang" before returning a + 5xx error. The Go API client still defaults to two retries. + * Identity Entity Alias metadata: You can no longer manually set metadata on + entity aliases. All alias data (except the canonical entity ID it refers to) + is intended to be managed by the plugin providing the alias information, so + allowing it to be set manually didn't make sense. + +FEATURES: + + * **JWT/OIDC Auth Method**: The new `jwt` auth method accepts JWTs and either + validates signatures locally or uses OIDC Discovery to fetch the current set + of keys for signature validation. Various claims can be specified for + validation (in addition to the cryptographic signature) and a user and + optional groups claim can be used to provide Identity information. + * **FoundationDB Storage**: You can now use FoundationDB for storing Vault + data. + * **UI Control Group Workflow (enterprise)**: The UI will now detect control + group responses and provides a workflow to view the status of the request + and to authorize requests. + * **Vault Agent (Beta)**: Vault Agent is a daemon that can automatically + authenticate for you across a variety of authentication methods, provide + tokens to clients, and keep the tokens renewed, reauthenticating as + necessary. + +IMPROVEMENTS: + + * auth/azure: Add support for virtual machine scale sets + * auth/gcp: Support multiple bindings for region, zone, and instance group + * cli: Add subcommands for interacting with the plugin catalog [[GH-4911](https://github.com/hashicorp/vault/pull/4911)] + * cli: Add a `-description` flag to secrets and auth tune subcommands to allow + updating an existing secret engine's or auth method's description. This + change also allows the description to be unset by providing an empty string. + * core: Add config flag to disable non-printable character check [[GH-4917](https://github.com/hashicorp/vault/pull/4917)] + * core: A `max_request_size` parameter can now be set per-listener to adjust + the maximum allowed size per request [[GH-4824](https://github.com/hashicorp/vault/pull/4824)] + * core: Add control group request endpoint to default policy [[GH-4904](https://github.com/hashicorp/vault/pull/4904)] + * identity: Identity metadata is now passed through to plugins [[GH-4967](https://github.com/hashicorp/vault/pull/4967)] + * replication: Add additional saftey checks and logging when replication is + in a bad state + * secrets/kv: Add support for using `-field=data` to KVv2 when using `vault + kv` [[GH-4895](https://github.com/hashicorp/vault/pull/4895)] + * secrets/pki: Add the ability to tidy revoked but unexpired certificates + [[GH-4916](https://github.com/hashicorp/vault/pull/4916)] + * secrets/ssh: Allow Vault to work with single-argument SSH flags [[GH-4825](https://github.com/hashicorp/vault/pull/4825)] + * secrets/ssh: SSH executable path can now be configured in the CLI [[GH-4937](https://github.com/hashicorp/vault/pull/4937)] + * storage/swift: Add additional configuration options [[GH-4901](https://github.com/hashicorp/vault/pull/4901)] + * ui: Choose which auth methods to show to unauthenticated users via + `listing_visibility` in the auth method edit forms [[GH-4854](https://github.com/hashicorp/vault/pull/4854)] + * ui: Authenticate users automatically by passing a wrapped token to the UI via + the new `wrapped_token` query parameter [[GH-4854](https://github.com/hashicorp/vault/pull/4854)] + +BUG FIXES: + + * api: Fix response body being cleared too early [[GH-4987](https://github.com/hashicorp/vault/pull/4987)] + * auth/approle: Fix issue with tidy endpoint that would unnecessarily remove + secret accessors [[GH-4981](https://github.com/hashicorp/vault/pull/4981)] + * auth/aws: Fix updating `max_retries` [[GH-4980](https://github.com/hashicorp/vault/pull/4980)] + * auth/kubernetes: Trim trailing whitespace when sending JWT + * cli: Fix parsing of environment variables for integer flags [[GH-4925](https://github.com/hashicorp/vault/pull/4925)] + * core: Fix returning 500 instead of 503 if a rekey is attempted when Vault is + sealed [[GH-4874](https://github.com/hashicorp/vault/pull/4874)] + * core: Fix issue releasing the leader lock in some circumstances [[GH-4915](https://github.com/hashicorp/vault/pull/4915)] + * core: Fix a panic that could happen if the server was shut down while still + starting up + * core: Fix deadlock that would occur if a leadership loss occurs at the same + time as a seal operation [[GH-4932](https://github.com/hashicorp/vault/pull/4932)] + * core: Fix issue with auth mounts failing to renew tokens due to policies + changing [[GH-4960](https://github.com/hashicorp/vault/pull/4960)] + * auth/radius: Fix issue where some radius logins were being canceled too early + [[GH-4941](https://github.com/hashicorp/vault/pull/4941)] + * core: Fix accidental seal of vault of we lose leadership during startup + [[GH-4924](https://github.com/hashicorp/vault/pull/4924)] + * core: Fix standby not being able to forward requests larger than 4MB + [[GH-4844](https://github.com/hashicorp/vault/pull/4844)] + * core: Avoid panic while processing group memberships [[GH-4841](https://github.com/hashicorp/vault/pull/4841)] + * identity: Fix a race condition creating aliases [[GH-4965](https://github.com/hashicorp/vault/pull/4965)] + * plugins: Fix being unable to send very large payloads to or from plugins + [[GH-4958](https://github.com/hashicorp/vault/pull/4958)] + * physical/azure: Long list responses would sometimes be truncated [[GH-4983](https://github.com/hashicorp/vault/pull/4983)] + * replication: Allow replication status requests to be processed while in + merkle sync + * replication: Ensure merkle reindex flushes all changes to storage immediately + * replication: Fix a case where a network interruption could cause a secondary + to be unable to reconnect to a primary + * secrets/pki: Fix permitted DNS domains performing improper validation + [[GH-4863](https://github.com/hashicorp/vault/pull/4863)] + * secrets/database: Fix panic during DB creds revocation [[GH-4846](https://github.com/hashicorp/vault/pull/4846)] + * ui: Fix usage of cubbyhole backend in the UI [[GH-4851](https://github.com/hashicorp/vault/pull/4851)] + * ui: Fix toggle state when a secret is JSON-formatted [[GH-4913](https://github.com/hashicorp/vault/pull/4913)] + * ui: Fix coercion of falsey values to empty string when editing secrets as + JSON [[GH-4977](https://github.com/hashicorp/vault/pull/4977)] + +## 0.10.3 (June 20th, 2018) + +DEPRECATIONS/CHANGES: + + * In the audit log and in client responses, policies are now split into three + parameters: policies that came only from tokens, policies that came only + from Identity, and the combined set. Any previous location of policies via + the API now contains the full, combined set. + * When a token is tied to an Identity entity and the entity is deleted, the + token will no longer be usable, regardless of the validity of the token + itself. + * When authentication succeeds but no policies were defined for that specific + user, most auth methods would allow a token to be generated but a few would + reject the authentication, namely `ldap`, `okta`, and `radius`. Since the + `default` policy is added by Vault's core, this would incorrectly reject + valid authentications before they would in fact be granted policies. This + inconsistency has been addressed; valid authentications for these methods + now succeed even if no policy was specifically defined in that method for + that user. + +FEATURES: + + * Root Rotation for Active Directory: You can now command Vault to rotate the + configured root credentials used in the AD secrets engine, to ensure that + only Vault knows the credentials it's using. + * URI SANs in PKI: You can now configure URI Subject Alternate Names in the + `pki` backend. Roles can limit which SANs are allowed via globbing. + * `kv rollback` Command: You can now use `vault kv rollback` to roll a KVv2 + path back to a previous non-deleted/non-destroyed version. The previous + version becomes the next/newest version for the path. + * Token Bound CIDRs in AppRole: You can now add CIDRs to which a token + generated from AppRole will be bound. + +IMPROVEMENTS: + + * approle: Return 404 instead of 202 on invalid role names during POST + operations [[GH-4778](https://github.com/hashicorp/vault/pull/4778)] + * core: Add idle and initial header read/TLS handshake timeouts to connections + to ensure server resources are cleaned up [[GH-4760](https://github.com/hashicorp/vault/pull/4760)] + * core: Report policies in token, identity, and full sets [[GH-4747](https://github.com/hashicorp/vault/pull/4747)] + * secrets/databases: Add `create`/`update` distinction for connection + configurations [[GH-3544](https://github.com/hashicorp/vault/pull/3544)] + * secrets/databases: Add `create`/`update` distinction for role configurations + [[GH-3544](https://github.com/hashicorp/vault/pull/3544)] + * secrets/databases: Add best-effort revocation logic for use when a role has + been deleted [[GH-4782](https://github.com/hashicorp/vault/pull/4782)] + * secrets/kv: Add `kv rollback` [[GH-4774](https://github.com/hashicorp/vault/pull/4774)] + * secrets/pki: Add URI SANs support [[GH-4675](https://github.com/hashicorp/vault/pull/4675)] + * secrets/ssh: Allow standard SSH command arguments to be used, without + requiring username@hostname syntax [[GH-4710](https://github.com/hashicorp/vault/pull/4710)] + * storage/consul: Add context support so that requests are cancelable + [[GH-4739](https://github.com/hashicorp/vault/pull/4739)] + * sys: Added `hidden` option to `listing_visibility` field on `sys/mounts` + API [[GH-4827](https://github.com/hashicorp/vault/pull/4827)] + * ui: Secret values are obfuscated by default and visibility is toggleable [[GH-4422](https://github.com/hashicorp/vault/pull/4422)] + +BUG FIXES: + + * auth/approle: Fix panic due to metadata being nil [[GH-4719](https://github.com/hashicorp/vault/pull/4719)] + * auth/aws: Fix delete path for tidy operations [[GH-4799](https://github.com/hashicorp/vault/pull/4799)] + * core: Optimizations to remove some speed regressions due to the + security-related changes in 0.10.2 + * storage/dynamodb: Fix errors seen when reading existing DynamoDB data [[GH-4721](https://github.com/hashicorp/vault/pull/4721)] + * secrets/database: Fix default MySQL root rotation statement [[GH-4748](https://github.com/hashicorp/vault/pull/4748)] + * secrets/gcp: Fix renewal for GCP account keys + * secrets/kv: Fix writing to the root of a KVv2 mount from `vault kv` commands + incorrectly operating on a root+mount path instead of being an error + [[GH-4726](https://github.com/hashicorp/vault/pull/4726)] + * seal/pkcs11: Add `CKK_SHA256_HMAC` to the search list when finding HMAC + keys, fixing lookup on some Thales devices + * replication: Fix issue enabling replication when a non-auth mount and auth + mount have the same name + * auth/kubernetes: Fix issue verifying ECDSA signed JWTs + * ui: add missing edit mode for auth method configs [[GH-4770](https://github.com/hashicorp/vault/pull/4770)] + +## 0.10.2 (June 6th, 2018) + +SECURITY: + + * Tokens: A race condition was identified that could occur if a token's + lease expired while Vault was not running. In this case, when Vault came + back online, sometimes it would properly revoke the lease but other times it + would not, leading to a Vault token that no longer had an expiration and had + essentially unlimited lifetime. This race was per-token, not all-or-nothing + for all tokens that may have expired during Vault's downtime. We have fixed + the behavior and put extra checks in place to help prevent any similar + future issues. In addition, the logic we have put in place ensures that such + lease-less tokens can no longer be used (unless they are root tokens that + never had an expiration to begin with). + * Convergent Encryption: The version 2 algorithm used in `transit`'s + convergent encryption feature is susceptible to offline + plaintext-confirmation attacks. As a result, we are introducing a version 3 + algorithm that mitigates this. If you are currently using convergent + encryption, we recommend upgrading, rotating your encryption key (the new + key version will use the new algorithm), and rewrapping your data (the + `rewrap` endpoint can be used to allow a relatively non-privileged user to + perform the rewrapping while never divulging the plaintext). + * AppRole case-sensitive role name secret-id leaking: When using a mixed-case + role name via AppRole, deleting a secret-id via accessor or other operations + could end up leaving the secret-id behind and valid but without an accessor. + This has now been fixed, and we have put checks in place to prevent these + secret-ids from being used. + +DEPRECATIONS/CHANGES: + + * PKI duration return types: The PKI backend now returns durations (e.g. when + reading a role) as an integer number of seconds instead of a Go-style + string, in line with how the rest of Vault's API returns durations. + +FEATURES: + + * Active Directory Secrets Engine: A new `ad` secrets engine has been created + which allows Vault to rotate and provide credentials for configured AD + accounts. + * Rekey Verification: Rekey operations can now require verification. This + turns on a two-phase process where the existing key shares authorize + generating a new master key, and a threshold of the new, returned key shares + must be provided to verify that they have been successfully received in + order for the actual master key to be rotated. + * CIDR restrictions for `cert`, `userpass`, and `kubernetes` auth methods: + You can now limit authentication to specific CIDRs; these will also be + encoded in resultant tokens to limit their use. + * Vault UI Browser CLI: The UI now supports usage of read/write/list/delete + commands in a CLI that can be accessed from the nav bar. Complex inputs such + as JSON files are not currently supported. This surfaces features otherwise + unsupported in Vault's UI. + * Azure Key Vault Auto Unseal/Seal Wrap Support (Enterprise): Azure Key Vault + can now be used a support seal for Auto Unseal and Seal Wrapping. + +IMPROVEMENTS: + + * api: Close renewer's doneCh when the renewer is stopped, so that programs + expecting a final value through doneCh behave correctly [[GH-4472](https://github.com/hashicorp/vault/pull/4472)] + * auth/cert: Break out `allowed_names` into component parts and add + `allowed_uri_sans` [[GH-4231](https://github.com/hashicorp/vault/pull/4231)] + * auth/ldap: Obfuscate error messages pre-bind for greater security [[GH-4700](https://github.com/hashicorp/vault/pull/4700)] + * cli: `vault login` now supports a `-no-print` flag to suppress printing + token information but still allow storing into the token helper [[GH-4454](https://github.com/hashicorp/vault/pull/4454)] + * core/pkcs11 (enterprise): Add support for CKM_AES_CBC_PAD, CKM_RSA_PKCS, and + CKM_RSA_PKCS_OAEP mechanisms + * core/pkcs11 (enterprise): HSM slots can now be selected by token label + instead of just slot number + * core/token: Optimize token revocation by removing unnecessary list call + against the storage backend when calling revoke-orphan on tokens [[GH-4465](https://github.com/hashicorp/vault/pull/4465)] + * core/token: Refactor token revocation logic to not block on the call when + underlying leases are pending revocation by moving the expiration logic to + the expiration manager [[GH-4512](https://github.com/hashicorp/vault/pull/4512)] + * expiration: Allow revoke-prefix and revoke-force to work on single leases as + well as prefixes [[GH-4450](https://github.com/hashicorp/vault/pull/4450)] + * identity: Return parent group info when reading a group [[GH-4648](https://github.com/hashicorp/vault/pull/4648)] + * identity: Provide more contextual key information when listing entities, + groups, and aliases + * identity: Passthrough EntityID to backends [[GH-4663](https://github.com/hashicorp/vault/pull/4663)] + * identity: Adds ability to request entity information through system view + [GH_4681] + * secret/pki: Add custom extended key usages [[GH-4667](https://github.com/hashicorp/vault/pull/4667)] + * secret/pki: Add custom PKIX serial numbers [[GH-4694](https://github.com/hashicorp/vault/pull/4694)] + * secret/ssh: Use hostname instead of IP in OTP mode, similar to CA mode + [[GH-4673](https://github.com/hashicorp/vault/pull/4673)] + * storage/file: Attempt in some error conditions to do more cleanup [[GH-4684](https://github.com/hashicorp/vault/pull/4684)] + * ui: wrapping lookup now distplays the path [[GH-4644](https://github.com/hashicorp/vault/pull/4644)] + * ui: Identity interface now has more inline actions to make editing and adding + aliases to an entity or group easier [[GH-4502](https://github.com/hashicorp/vault/pull/4502)] + * ui: Identity interface now lists groups by name [[GH-4655](https://github.com/hashicorp/vault/pull/4655)] + * ui: Permission denied errors still render the sidebar in the Access section + [[GH-4658](https://github.com/hashicorp/vault/pull/4658)] + * replication: Improve performance of index page flushes and WAL garbage + collecting + +BUG FIXES: + + * auth/approle: Make invalid role_id a 400 error instead of 500 [[GH-4470](https://github.com/hashicorp/vault/pull/4470)] + * auth/cert: Fix Identity alias using serial number instead of common name + [[GH-4475](https://github.com/hashicorp/vault/pull/4475)] + * cli: Fix panic running `vault token capabilities` with multiple paths + [[GH-4552](https://github.com/hashicorp/vault/pull/4552)] + * core: When using the `use_always` option with PROXY protocol support, do not + require `authorized_addrs` to be set [[GH-4065](https://github.com/hashicorp/vault/pull/4065)] + * core: Fix panic when certain combinations of policy paths and allowed/denied + parameters were used [[GH-4582](https://github.com/hashicorp/vault/pull/4582)] + * secret/gcp: Make `bound_region` able to use short names + * secret/kv: Fix response wrapping for KV v2 [[GH-4511](https://github.com/hashicorp/vault/pull/4511)] + * secret/kv: Fix address flag not being honored correctly [[GH-4617](https://github.com/hashicorp/vault/pull/4617)] + * secret/pki: Fix `safety_buffer` for tidy being allowed to be negative, + clearing all certs [[GH-4641](https://github.com/hashicorp/vault/pull/4641)] + * secret/pki: Fix `key_type` not being allowed to be set to `any` [[GH-4595](https://github.com/hashicorp/vault/pull/4595)] + * secret/pki: Fix path length parameter being ignored when using + `use_csr_values` and signing an intermediate CA cert [[GH-4459](https://github.com/hashicorp/vault/pull/4459)] + * secret/ssh: Only append UserKnownHostsFile to args when configured with a + value [[GH-4674](https://github.com/hashicorp/vault/pull/4674)] + * storage/dynamodb: Fix listing when one child is left within a nested path + [[GH-4570](https://github.com/hashicorp/vault/pull/4570)] + * storage/gcs: Fix swallowing an error on connection close [[GH-4691](https://github.com/hashicorp/vault/pull/4691)] + * ui: Fix HMAC algorithm in transit [[GH-4604](https://github.com/hashicorp/vault/pull/4604)] + * ui: Fix unwrap of auth responses via the UI's unwrap tool [[GH-4611](https://github.com/hashicorp/vault/pull/4611)] + * ui (enterprise): Fix parsing of version string that blocked some users from seeing + enterprise-specific pages in the UI [[GH-4547](https://github.com/hashicorp/vault/pull/4547)] + * ui: Fix incorrect capabilities path check when viewing policies [[GH-4566](https://github.com/hashicorp/vault/pull/4566)] + * replication: Fix error while running plugins on a newly created replication + secondary + * replication: Fix issue with token store lookups after a secondary's mount table + is invalidated. + * replication: Improve startup time when a large merkle index is in use. + * replication: Fix panic when storage becomes unreachable during unseal. + +## 0.10.1/0.9.7 (April 25th, 2018) + +The following two items are in both 0.9.7 and 0.10.1. They only affect +Enterprise, and as such 0.9.7 is an Enterprise-only release: + +SECURITY: + + * EGPs: A regression affecting 0.9.6 and 0.10.0 causes EGPs to not be applied + correctly if an EGP is updated in a running Vault after initial write or + after it is loaded on unseal. This has been fixed. + +BUG FIXES: + + * Fixed an upgrade issue affecting performance secondaries when migrating from + a version that did not include Identity to one that did. + +All other content in this release is for 0.10.1 only. + +DEPRECATIONS/CHANGES: + + * `vault kv` and Vault versions: In 0.10.1 some issues with `vault kv` against + v1 K/V engine mounts are fixed. However, using 0.10.1 for both the server + and CLI versions is required. + * Mount information visibility: Users that have access to any path within a + mount can now see information about that mount, such as its type and + options, via some API calls. + * Identity and Local Mounts: Local mounts would allow creating Identity + entities but these would not be able to be used successfully (even locally) + in replicated scenarios. We have now disallowed entities and groups from + being created for local mounts in the first place. + +FEATURES: + + * X-Forwarded-For support: `X-Forwarded-For` headers can now be used to set the + client IP seen by Vault. See the [TCP listener configuration + page](https://www.vaultproject.io/docs/configuration/listener/tcp.html) for + details. + * CIDR IP Binding for Tokens: Tokens now support being bound to specific + CIDR(s) for usage. Currently this is implemented in Token Roles; usage can be + expanded to other authentication backends over time. + * `vault kv patch` command: A new `kv patch` helper command that allows + modifying only some values in existing data at a K/V path, but uses + check-and-set to ensure that this modification happens safely. + * AppRole Local Secret IDs: Roles can now be configured to generate secret IDs + local to the cluster. This enables performance secondaries to generate and + consume secret IDs without contacting the primary. + * AES-GCM Support for PKCS#11 [BETA] (Enterprise): For supporting HSMs, + AES-GCM can now be used in lieu of AES-CBC/HMAC-SHA256. This has currently + only been fully tested on AWS CloudHSM. + * Auto Unseal/Seal Wrap Key Rotation Support (Enterprise): Auto Unseal + mechanisms, including PKCS#11 HSMs, now support rotation of encryption keys, + and migration between key and encryption types, such as from AES-CBC to + AES-GCM, can be performed at the same time (where supported). + +IMPROVEMENTS: + + * auth/approle: Support for cluster local secret IDs. This enables secondaries + to generate secret IDs without contacting the primary [[GH-4427](https://github.com/hashicorp/vault/pull/4427)] + * auth/token: Add to the token lookup response, the policies inherited due to + identity associations [[GH-4366](https://github.com/hashicorp/vault/pull/4366)] + * auth/token: Add CIDR binding to token roles [[GH-815](https://github.com/hashicorp/vault/pull/815)] + * cli: Add `vault kv patch` [[GH-4432](https://github.com/hashicorp/vault/pull/4432)] + * core: Add X-Forwarded-For support [[GH-4380](https://github.com/hashicorp/vault/pull/4380)] + * core: Add token CIDR-binding support [[GH-815](https://github.com/hashicorp/vault/pull/815)] + * identity: Add the ability to disable an entity. Disabling an entity does not + revoke associated tokens, but while the entity is disabled they cannot be + used. [[GH-4353](https://github.com/hashicorp/vault/pull/4353)] + * physical/consul: Allow tuning of session TTL and lock wait time [[GH-4352](https://github.com/hashicorp/vault/pull/4352)] + * replication: Dynamically adjust WAL cleanup over a period of time based on + the rate of writes committed + * secret/ssh: Update dynamic key install script to use shell locking to avoid + concurrent modifications [[GH-4358](https://github.com/hashicorp/vault/pull/4358)] + * ui: Access to `sys/mounts` is no longer needed to use the UI - the list of + engines will show you the ones you implicitly have access to (because you have + access to to secrets in those engines) [[GH-4439](https://github.com/hashicorp/vault/pull/4439)] + +BUG FIXES: + + * cli: Fix `vault kv` backwards compatibility with KV v1 engine mounts + [[GH-4430](https://github.com/hashicorp/vault/pull/4430)] + * identity: Persist entity memberships in external identity groups across + mounts [[GH-4365](https://github.com/hashicorp/vault/pull/4365)] + * identity: Fix error preventing authentication using local mounts on + performance secondary replication clusters [[GH-4407](https://github.com/hashicorp/vault/pull/4407)] + * replication: Fix issue causing secondaries to not connect properly to a + pre-0.10 primary until the primary was upgraded + * secret/gcp: Fix panic on rollback when a roleset wasn't created properly + [[GH-4344](https://github.com/hashicorp/vault/pull/4344)] + * secret/gcp: Fix panic on renewal + * ui: Fix IE11 form submissions in a few parts of the application [[GH-4378](https://github.com/hashicorp/vault/pull/4378)] + * ui: Fix IE file saving on policy pages and init screens [[GH-4376](https://github.com/hashicorp/vault/pull/4376)] + * ui: Fixed an issue where the AWS secret backend would show the wrong menu + [[GH-4371](https://github.com/hashicorp/vault/pull/4371)] + * ui: Fixed an issue where policies with commas would not render in the + interface properly [[GH-4398](https://github.com/hashicorp/vault/pull/4398)] + * ui: Corrected the saving of mount tune ttls for auth methods [[GH-4431](https://github.com/hashicorp/vault/pull/4431)] + * ui: Credentials generation no longer checks capabilities before making + api calls. This should fix needing "update" capabilites to read IAM + credentials in the AWS secrets engine [[GH-4446](https://github.com/hashicorp/vault/pull/4446)] + +## 0.10.0 (April 10th, 2018) + +SECURITY: + + * Log sanitization for Combined Database Secret Engine: In certain failure + scenarios with incorrectly formatted connection urls, the raw connection + errors were being returned to the user with the configured database + credentials. Errors are now sanitized before being returned to the user. + +DEPRECATIONS/CHANGES: + + * Database plugin compatibility: The database plugin interface was enhanced to + support some additional functionality related to root credential rotation + and supporting templated URL strings. The changes were made in a + backwards-compatible way and all builtin plugins were updated with the new + features. Custom plugins not built into Vault will need to be upgraded to + support templated URL strings and root rotation. Additionally, the + Initialize method was deprecated in favor of a new Init method that supports + configuration modifications that occur in the plugin back to the primary + data store. + * Removal of returned secret information: For a long time Vault has returned + configuration given to various secret engines and auth methods with secret + values (such as secret API keys or passwords) still intact, and with a + warning to the user on write that anyone with read access could see the + secret. This was mostly done to make it easy for tools like Terraform to + judge whether state had drifted. However, it also feels quite un-Vault-y to + do this and we've never felt very comfortable doing so. In 0.10 we have gone + through and removed this behavior from the various backends; fields which + contained secret values are simply no longer returned on read. We are + working with the Terraform team to make changes to their provider to + accommodate this as best as possible, and users of other tools may have to + make adjustments, but in the end we felt that the ends did not justify the + means and we needed to prioritize security over operational convenience. + * LDAP auth method case sensitivity: We now treat usernames and groups + configured locally for policy assignment in a case insensitive fashion by + default. Existing configurations will continue to work as they do now; + however, the next time a configuration is written `case_sensitive_names` + will need to be explicitly set to `true`. + * TTL handling within core: All lease TTL handling has been centralized within + the core of Vault to ensure consistency across all backends. Since this was + previously delegated to individual backends, there may be some slight + differences in TTLs generated from some backends. + * Removal of default `secret/` mount: In 0.12 we will stop mounting `secret/` + by default at initialization time (it will still be available in `dev` + mode). + +FEATURES: + + * OSS UI: The Vault UI is now fully open-source. Similarly to the CLI, some + features are only available with a supporting version of Vault, but the code + base is entirely open. + * Versioned K/V: The `kv` backend has been completely revamped, featuring + flexible versioning of values, check-and-set protections, and more. A new + `vault kv` subcommand allows friendly interactions with it. Existing mounts + of the `kv` backend can be upgraded to the new versioned mode (downgrades + are not currently supported). The old "passthrough" mode is still the + default for new mounts; versioning can be turned on by setting the + `-version=2` flag for the `vault secrets enable` command. + * Database Root Credential Rotation: Database configurations can now rotate + their own configured admin/root credentials, allowing configured credentials + for a database connection to be rotated immediately after sending them into + Vault, invalidating the old credentials and ensuring only Vault knows the + actual valid values. + * Azure Authentication Plugin: There is now a plugin (pulled in to Vault) that + allows authenticating Azure machines to Vault using Azure's Managed Service + Identity credentials. See the [plugin + repository](https://github.com/hashicorp/vault-plugin-auth-azure) for more + information. + * GCP Secrets Plugin: There is now a plugin (pulled in to Vault) that allows + generating secrets to allow access to GCP. See the [plugin + repository](https://github.com/hashicorp/vault-plugin-secrets-gcp) for more + information. + * Selective Audit HMACing of Request and Response Data Keys: HMACing in audit + logs can be turned off for specific keys in the request input map and + response `data` map on a per-mount basis. + * Passthrough Request Headers: Request headers can now be selectively passed + through to backends on a per-mount basis. This is useful in various cases + when plugins are interacting with external services. + * HA for Google Cloud Storage: The GCS storage type now supports HA. + * UI support for identity: Add and edit entities, groups, and their associated + aliases. + * UI auth method support: Enable, disable, and configure all of the built-in + authentication methods. + * UI (Enterprise): View and edit Sentinel policies. + +IMPROVEMENTS: + + * core: Centralize TTL generation for leases in core [[GH-4230](https://github.com/hashicorp/vault/pull/4230)] + * identity: API to update group-alias by ID [[GH-4237](https://github.com/hashicorp/vault/pull/4237)] + * secret/cassandra: Update Cassandra storage delete function to not use batch + operations [[GH-4054](https://github.com/hashicorp/vault/pull/4054)] + * storage/mysql: Allow setting max idle connections and connection lifetime + [[GH-4211](https://github.com/hashicorp/vault/pull/4211)] + * storage/gcs: Add HA support [[GH-4226](https://github.com/hashicorp/vault/pull/4226)] + * ui: Add Nomad to the list of available secret engines + * ui: Adds ability to set static headers to be returned by the UI + +BUG FIXES: + + * api: Fix retries not working [[GH-4322](https://github.com/hashicorp/vault/pull/4322)] + * auth/gcp: Invalidate clients on config change + * auth/token: Revoke-orphan and tidy operations now correctly cleans up the + parent prefix entry in the underlying storage backend. These operations also + mark corresponding child tokens as orphans by removing the parent/secondary + index from the entries. [[GH-4193](https://github.com/hashicorp/vault/pull/4193)] + * command: Re-add `-mfa` flag and migrate to OSS binary [[GH-4223](https://github.com/hashicorp/vault/pull/4223)] + * core: Fix issue occurring from mounting two auth backends with the same path + with one mount having `auth/` in front [[GH-4206](https://github.com/hashicorp/vault/pull/4206)] + * mfa: Invalidation of MFA configurations (Enterprise) + * replication: Fix a panic on some non-64-bit platforms + * replication: Fix invalidation of policies on performance secondaries + * secret/pki: When tidying if a value is unexpectedly nil, delete it and move + on [[GH-4214](https://github.com/hashicorp/vault/pull/4214)] + * storage/s3: Fix panic if S3 returns no Content-Length header [[GH-4222](https://github.com/hashicorp/vault/pull/4222)] + * ui: Fixed an issue where the UI was checking incorrect paths when operating + on transit keys. Capabilities are now checked when attempting to encrypt / + decrypt, etc. + * ui: Fixed IE 11 layout issues and JS errors that would stop the application + from running. + * ui: Fixed the link that gets rendered when a user doesn't have permissions + to view the root of a secret engine. The link now sends them back to the list + of secret engines. + * replication: Fix issue with DR secondaries when using mount specified local + paths. + * cli: Fix an issue where generating a dr operation token would not output the + token [[GH-4328](https://github.com/hashicorp/vault/pull/4328)] + +## 0.9.6 (March 20th, 2018) + +DEPRECATIONS/CHANGES: + + * The AWS authentication backend now allows binds for inputs as either a + comma-delimited string or a string array. However, to keep consistency with + input and output, when reading a role the binds will now be returned as + string arrays rather than strings. + * In order to prefix-match IAM role and instance profile ARNs in AWS auth + backend, you now must explicitly opt-in by adding a `*` to the end of the + ARN. Existing configurations will be upgraded automatically, but when + writing a new role configuration the updated behavior will be used. + +FEATURES: + + * Replication Activation Enhancements: When activating a replication + secondary, a public key can now be fetched first from the target cluster. + This public key can be provided to the primary when requesting the + activation token. If provided, the public key will be used to perform a + Diffie-Hellman key exchange resulting in a shared key that encrypts the + contents of the activation token. The purpose is to protect against + accidental disclosure of the contents of the token if unwrapped by the wrong + party, given that the contents of the token are highly sensitive. If + accidentally unwrapped, the contents of the token are not usable by the + unwrapping party. It is important to note that just as a malicious operator + could unwrap the contents of the token, a malicious operator can pretend to + be a secondary and complete the Diffie-Hellman exchange on their own; this + feature provides defense in depth but still requires due diligence around + replication activation, including multiple eyes on the commands/tokens and + proper auditing. + +IMPROVEMENTS: + + * api: Update renewer grace period logic. It no longer is static, but rather + dynamically calculates one based on the current lease duration after each + renew. [[GH-4090](https://github.com/hashicorp/vault/pull/4090)] + * auth/approle: Allow array input for bound_cidr_list [4078] + * auth/aws: Allow using lists in role bind parameters [[GH-3907](https://github.com/hashicorp/vault/pull/3907)] + * auth/aws: Allow binding by EC2 instance IDs [[GH-3816](https://github.com/hashicorp/vault/pull/3816)] + * auth/aws: Allow non-prefix-matched IAM role and instance profile ARNs + [[GH-4071](https://github.com/hashicorp/vault/pull/4071)] + * auth/ldap: Set a very large size limit on queries [[GH-4169](https://github.com/hashicorp/vault/pull/4169)] + * core: Log info notifications of revoked leases for all leases/reasons, not + just expirations [[GH-4164](https://github.com/hashicorp/vault/pull/4164)] + * physical/couchdb: Removed limit on the listing of items [[GH-4149](https://github.com/hashicorp/vault/pull/4149)] + * secret/pki: Support certificate policies [[GH-4125](https://github.com/hashicorp/vault/pull/4125)] + * secret/pki: Add ability to have CA:true encoded into intermediate CSRs, to + improve compatibility with some ADFS scenarios [[GH-3883](https://github.com/hashicorp/vault/pull/3883)] + * secret/transit: Allow selecting signature algorithm as well as hash + algorithm when signing/verifying [[GH-4018](https://github.com/hashicorp/vault/pull/4018)] + * server: Make sure `tls_disable_client_cert` is actually a true value rather + than just set [[GH-4049](https://github.com/hashicorp/vault/pull/4049)] + * storage/dynamodb: Allow specifying max retries for dynamo client [[GH-4115](https://github.com/hashicorp/vault/pull/4115)] + * storage/gcs: Allow specifying chunk size for transfers, which can reduce + memory utilization [[GH-4060](https://github.com/hashicorp/vault/pull/4060)] + * sys/capabilities: Add the ability to use multiple paths for capability + checking [[GH-3663](https://github.com/hashicorp/vault/pull/3663)] + +BUG FIXES: + + * auth/aws: Fix honoring `max_ttl` when a corresponding role `ttl` is not also + set [[GH-4107](https://github.com/hashicorp/vault/pull/4107)] + * auth/okta: Fix honoring configured `max_ttl` value [[GH-4110](https://github.com/hashicorp/vault/pull/4110)] + * auth/token: If a periodic token being issued has a period greater than the + max_lease_ttl configured on the token store mount, truncate it. This matches + renewal behavior; before it was inconsistent between issuance and renewal. + [[GH-4112](https://github.com/hashicorp/vault/pull/4112)] + * cli: Improve error messages around `vault auth help` when there is no CLI + helper for a particular method [[GH-4056](https://github.com/hashicorp/vault/pull/4056)] + * cli: Fix autocomplete installation when using Fish as the shell [[GH-4094](https://github.com/hashicorp/vault/pull/4094)] + * secret/database: Properly honor mount-tuned max TTL [[GH-4051](https://github.com/hashicorp/vault/pull/4051)] + * secret/ssh: Return `key_bits` value when reading a role [[GH-4098](https://github.com/hashicorp/vault/pull/4098)] + * sys: When writing policies on a performance replication secondary, properly + forward requests to the primary [[GH-4129](https://github.com/hashicorp/vault/pull/4129)] + +## 0.9.5 (February 26th, 2018) + +IMPROVEMENTS: + + * auth: Allow sending default_lease_ttl and max_lease_ttl values when enabling + auth methods. [[GH-4019](https://github.com/hashicorp/vault/pull/4019)] + * secret/database: Add list functionality to `database/config` endpoint + [[GH-4026](https://github.com/hashicorp/vault/pull/4026)] + * physical/consul: Allow setting a specific service address [[GH-3971](https://github.com/hashicorp/vault/pull/3971)] + * replication: When bootstrapping a new secondary, if the initial cluster + connection fails, Vault will attempt to roll back state so that + bootstrapping can be tried again, rather than having to recreate the + downstream cluster. This will still require fetching a new secondary + activation token. + +BUG FIXES: + + * auth/aws: Update libraries to fix regression verifying PKCS#7 identity + documents [[GH-4014](https://github.com/hashicorp/vault/pull/4014)] + * listener: Revert to Go 1.9 for now to allow certificates with non-DNS names + in their DNS SANs to be used for Vault's TLS connections [[GH-4028](https://github.com/hashicorp/vault/pull/4028)] + * replication: Fix issue with a performance secondary/DR primary node losing + its DR primary status when performing an update-primary operation + * replication: Fix issue where performance secondaries could be unable to + automatically connect to a performance primary after that performance + primary has been promoted to a DR primary from a DR secondary + * ui: Fix behavior when a value contains a `.` + +## 0.9.4 (February 20th, 2018) + +SECURITY: + + * Role Tags used with the EC2 style of AWS auth were being improperly parsed; + as a result they were not being used to properly restrict values. + Implementations following our suggestion of using these as defense-in-depth + rather than the only source of restriction should not have significant + impact. + +FEATURES: + + * **ChaCha20-Poly1305 support in `transit`**: You can now encrypt and decrypt + with ChaCha20-Poly1305 in `transit`. Key derivation and convergent + encryption is also supported. + * **Okta Push support in Okta Auth Backend**: If a user account has MFA + required within Okta, an Okta Push MFA flow can be used to successfully + finish authentication. + * **PKI Improvements**: Custom OID subject alternate names can now be set, + subject to allow restrictions that support globbing. Additionally, Country, + Locality, Province, Street Address, and Postal Code can now be set in + certificate subjects. + * **Manta Storage**: Joyent Triton Manta can now be used for Vault storage + * **Google Cloud Spanner Storage**: Google Cloud Spanner can now be used for + Vault storage + +IMPROVEMENTS: + + * auth/centrify: Add CLI helper + * audit: Always log failure metrics, even if zero, to ensure the values appear + on dashboards [[GH-3937](https://github.com/hashicorp/vault/pull/3937)] + * cli: Disable color when output is not a TTY [[GH-3897](https://github.com/hashicorp/vault/pull/3897)] + * cli: Add `-format` flag to all subcommands [[GH-3897](https://github.com/hashicorp/vault/pull/3897)] + * cli: Do not display deprecation warnings when the format is not table + [[GH-3897](https://github.com/hashicorp/vault/pull/3897)] + * core: If over a predefined lease count (256k), log a warning not more than + once a minute. Too many leases can be problematic for many of the storage + backends and often this number of leases is indicative of a need for + workflow improvements. [[GH-3957](https://github.com/hashicorp/vault/pull/3957)] + * secret/nomad: Have generated ACL tokens cap out at 64 characters [[GH-4009](https://github.com/hashicorp/vault/pull/4009)] + * secret/pki: Country, Locality, Province, Street Address, and Postal Code can + now be set on certificates [[GH-3992](https://github.com/hashicorp/vault/pull/3992)] + * secret/pki: UTF-8 Other Names can now be set in Subject Alternate Names in + issued certs; allowed values can be set per role and support globbing + [[GH-3889](https://github.com/hashicorp/vault/pull/3889)] + * secret/pki: Add a flag to make the common name optional on certs [[GH-3940](https://github.com/hashicorp/vault/pull/3940)] + * secret/pki: Ensure only DNS-compatible names go into DNS SANs; additionally, + properly handle IDNA transformations for these DNS names [[GH-3953](https://github.com/hashicorp/vault/pull/3953)] + * secret/ssh: Add `valid-principles` flag to CLI for CA mode [[GH-3922](https://github.com/hashicorp/vault/pull/3922)] + * storage/manta: Add Manta storage [[GH-3270](https://github.com/hashicorp/vault/pull/3270)] + * ui (Enterprise): Support for ChaCha20-Poly1305 keys in the transit engine. + +BUG FIXES: + * api/renewer: Honor increment value in renew auth calls [[GH-3904](https://github.com/hashicorp/vault/pull/3904)] + * auth/approle: Fix inability to use limited-use-count secret IDs on + replication performance secondaries + * auth/approle: Cleanup of secret ID accessors during tidy and removal of + dangling accessor entries [[GH-3924](https://github.com/hashicorp/vault/pull/3924)] + * auth/aws-ec2: Avoid masking of role tag response [[GH-3941](https://github.com/hashicorp/vault/pull/3941)] + * auth/cert: Verify DNS SANs in the authenticating certificate [[GH-3982](https://github.com/hashicorp/vault/pull/3982)] + * auth/okta: Return configured durations as seconds, not nanoseconds [[GH-3871](https://github.com/hashicorp/vault/pull/3871)] + * auth/okta: Get all okta groups for a user vs. default 200 limit [[GH-4034](https://github.com/hashicorp/vault/pull/4034)] + * auth/token: Token creation via the CLI no longer forces periodic token + creation. Passing an explicit zero value for the period no longer create + periodic tokens. [[GH-3880](https://github.com/hashicorp/vault/pull/3880)] + * command: Fix interpreted formatting directives when printing raw fields + [[GH-4005](https://github.com/hashicorp/vault/pull/4005)] + * command: Correctly format output when using -field and -format flags at the + same time [[GH-3987](https://github.com/hashicorp/vault/pull/3987)] + * command/rekey: Re-add lost `stored-shares` parameter [[GH-3974](https://github.com/hashicorp/vault/pull/3974)] + * command/ssh: Create and reuse the api client [[GH-3909](https://github.com/hashicorp/vault/pull/3909)] + * command/status: Fix panic when status returns 500 from leadership lookup + [[GH-3998](https://github.com/hashicorp/vault/pull/3998)] + * identity: Fix race when creating entities [[GH-3932](https://github.com/hashicorp/vault/pull/3932)] + * plugin/gRPC: Fixed an issue with list requests and raw responses coming from + plugins using gRPC transport [[GH-3881](https://github.com/hashicorp/vault/pull/3881)] + * plugin/gRPC: Fix panic when special paths are not set [[GH-3946](https://github.com/hashicorp/vault/pull/3946)] + * secret/pki: Verify a name is a valid hostname before adding to DNS SANs + [[GH-3918](https://github.com/hashicorp/vault/pull/3918)] + * secret/transit: Fix auditing when reading a key after it has been backed up + or restored [[GH-3919](https://github.com/hashicorp/vault/pull/3919)] + * secret/transit: Fix storage/memory consistency when persistence fails + [[GH-3959](https://github.com/hashicorp/vault/pull/3959)] + * storage/consul: Validate that service names are RFC 1123 compliant [[GH-3960](https://github.com/hashicorp/vault/pull/3960)] + * storage/etcd3: Fix memory ballooning with standby instances [[GH-3798](https://github.com/hashicorp/vault/pull/3798)] + * storage/etcd3: Fix large lists (like token loading at startup) not being + handled [[GH-3772](https://github.com/hashicorp/vault/pull/3772)] + * storage/postgresql: Fix compatibility with versions using custom string + version tags [[GH-3949](https://github.com/hashicorp/vault/pull/3949)] + * storage/zookeeper: Update vendoring to fix freezing issues [[GH-3896](https://github.com/hashicorp/vault/pull/3896)] + * ui (Enterprise): Decoding the replication token should no longer error and + prevent enabling of a secondary replication cluster via the ui. + * plugin/gRPC: Add connection info to the request object [[GH-3997](https://github.com/hashicorp/vault/pull/3997)] + +## 0.9.3 (January 28th, 2018) + +A regression from a feature merge disabled the Nomad secrets backend in 0.9.2. +This release re-enables the Nomad secrets backend; it is otherwise identical to +0.9.2. + +## 0.9.2 (January 26th, 2018) + +SECURITY: + + * Okta Auth Backend: While the Okta auth backend was successfully verifying + usernames and passwords, it was not checking the returned state of the + account, so accounts that had been marked locked out could still be used to + log in. Only accounts in SUCCESS or PASSWORD_WARN states are now allowed. + * Periodic Tokens: A regression in 0.9.1 meant that periodic tokens created by + the AppRole, AWS, and Cert auth backends would expire when the max TTL for + the backend/mount/system was hit instead of their stated behavior of living + as long as they are renewed. This is now fixed; existing tokens do not have + to be reissued as this was purely a regression in the renewal logic. + * Seal Wrapping: During certain replication states values written marked for + seal wrapping may not be wrapped on the secondaries. This has been fixed, + and existing values will be wrapped on next read or write. This does not + affect the barrier keys. + +DEPRECATIONS/CHANGES: + + * `sys/health` DR Secondary Reporting: The `replication_dr_secondary` bool + returned by `sys/health` could be misleading since it would be `false` both + when a cluster was not a DR secondary but also when the node is a standby in + the cluster and has not yet fully received state from the active node. This + could cause health checks on LBs to decide that the node was acceptable for + traffic even though DR secondaries cannot handle normal Vault traffic. (In + other words, the bool could only convey "yes" or "no" but not "not sure + yet".) This has been replaced by `replication_dr_mode` and + `replication_perf_mode` which are string values that convey the current + state of the node; a value of `disabled` indicates that replication is + disabled or the state is still being discovered. As a result, an LB check + can positively verify that the node is both not `disabled` and is not a DR + secondary, and avoid sending traffic to it if either is true. + * PKI Secret Backend Roles parameter types: For `ou` and `organization` + in role definitions in the PKI secret backend, input can now be a + comma-separated string or an array of strings. Reading a role will + now return arrays for these parameters. + * Plugin API Changes: The plugin API has been updated to utilize golang's + context.Context package. Many function signatures now accept a context + object as the first parameter. Existing plugins will need to pull in the + latest Vault code and update their function signatures to begin using + context and the new gRPC transport. + +FEATURES: + + * **gRPC Backend Plugins**: Backend plugins now use gRPC for transport, + allowing them to be written in other languages. + * **Brand New CLI**: Vault has a brand new CLI interface that is significantly + streamlined, supports autocomplete, and is almost entirely backwards + compatible. + * **UI: PKI Secret Backend (Enterprise)**: Configure PKI secret backends, + create and browse roles and certificates, and issue and sign certificates via + the listed roles. + +IMPROVEMENTS: + + * auth/aws: Handle IAM headers produced by clients that formulate numbers as + ints rather than strings [[GH-3763](https://github.com/hashicorp/vault/pull/3763)] + * auth/okta: Support JSON lists when specifying groups and policies [[GH-3801](https://github.com/hashicorp/vault/pull/3801)] + * autoseal/hsm: Attempt reconnecting to the HSM on certain kinds of issues, + including HA scenarios for some Gemalto HSMs. + (Enterprise) + * cli: Output password prompts to stderr to make it easier to pipe an output + token to another command [[GH-3782](https://github.com/hashicorp/vault/pull/3782)] + * core: Report replication status in `sys/health` [[GH-3810](https://github.com/hashicorp/vault/pull/3810)] + * physical/s3: Allow using paths with S3 for non-AWS deployments [[GH-3730](https://github.com/hashicorp/vault/pull/3730)] + * physical/s3: Add ability to disable SSL for non-AWS deployments [[GH-3730](https://github.com/hashicorp/vault/pull/3730)] + * plugins: Args for plugins can now be specified separately from the command, + allowing the same output format and input format for plugin information + [[GH-3778](https://github.com/hashicorp/vault/pull/3778)] + * secret/pki: `ou` and `organization` can now be specified as a + comma-separated string or an array of strings [[GH-3804](https://github.com/hashicorp/vault/pull/3804)] + * plugins: Plugins will fall back to using netrpc as the communication protocol + on older versions of Vault [[GH-3833](https://github.com/hashicorp/vault/pull/3833)] + +BUG FIXES: + + * auth/(approle,aws,cert): Fix behavior where periodic tokens generated by + these backends could not have their TTL renewed beyond the system/mount max + TTL value [[GH-3803](https://github.com/hashicorp/vault/pull/3803)] + * auth/aws: Fix error returned if `bound_iam_principal_arn` was given to an + existing role update [[GH-3843](https://github.com/hashicorp/vault/pull/3843)] + * core/sealwrap: Speed improvements and bug fixes (Enterprise) + * identity: Delete group alias when an external group is deleted [[GH-3773](https://github.com/hashicorp/vault/pull/3773)] + * legacymfa/duo: Fix intermittent panic when Duo could not be reached + [[GH-2030](https://github.com/hashicorp/vault/pull/2030)] + * secret/database: Fix a location where a lock could potentially not be + released, leading to deadlock [[GH-3774](https://github.com/hashicorp/vault/pull/3774)] + * secret/(all databases) Fix behavior where if a max TTL was specified but no + default TTL was specified the system/mount default TTL would be used but not + be capped by the local max TTL [[GH-3814](https://github.com/hashicorp/vault/pull/3814)] + * secret/database: Fix an issue where plugins were not closed properly if they + failed to initialize [[GH-3768](https://github.com/hashicorp/vault/pull/3768)] + * ui: mounting a secret backend will now properly set `max_lease_ttl` and + `default_lease_ttl` when specified - previously both fields set + `default_lease_ttl`. + +## 0.9.1 (December 21st, 2017) + +DEPRECATIONS/CHANGES: + + * AppRole Case Sensitivity: In prior versions of Vault, `list` operations + against AppRole roles would require preserving case in the role name, even + though most other operations within AppRole are case-insensitive with + respect to the role name. This has been fixed; existing roles will behave as + they have in the past, but new roles will act case-insensitively in these + cases. + * Token Auth Backend Roles parameter types: For `allowed_policies` and + `disallowed_policies` in role definitions in the token auth backend, input + can now be a comma-separated string or an array of strings. Reading a role + will now return arrays for these parameters. + * Transit key exporting: You can now mark a key in the `transit` backend as + `exportable` at any time, rather than just at creation time; however, once + this value is set, it still cannot be unset. + * PKI Secret Backend Roles parameter types: For `allowed_domains` and + `key_usage` in role definitions in the PKI secret backend, input + can now be a comma-separated string or an array of strings. Reading a role + will now return arrays for these parameters. + * SSH Dynamic Keys Method Defaults to 2048-bit Keys: When using the dynamic + key method in the SSH backend, the default is now to use 2048-bit keys if no + specific key bit size is specified. + * Consul Secret Backend lease handling: The `consul` secret backend can now + accept both strings and integer numbers of seconds for its lease value. The + value returned on a role read will be an integer number of seconds instead + of a human-friendly string. + * Unprintable characters not allowed in API paths: Unprintable characters are + no longer allowed in names in the API (paths and path parameters), with an + extra restriction on whitespace characters. Allowed characters are those + that are considered printable by Unicode plus spaces. + +FEATURES: + + * **Transit Backup/Restore**: The `transit` backend now supports a backup + operation that can export a given key, including all key versions and + configuration, as well as a restore operation allowing import into another + Vault. + * **gRPC Database Plugins**: Database plugins now use gRPC for transport, + allowing them to be written in other languages. + * **Nomad Secret Backend**: Nomad ACL tokens can now be generated and revoked + using Vault. + * **TLS Cert Auth Backend Improvements**: The `cert` auth backend can now + match against custom certificate extensions via exact or glob matching, and + additionally supports max_ttl and periodic token toggles. + +IMPROVEMENTS: + + * auth/cert: Support custom certificate constraints [[GH-3634](https://github.com/hashicorp/vault/pull/3634)] + * auth/cert: Support setting `max_ttl` and `period` [[GH-3642](https://github.com/hashicorp/vault/pull/3642)] + * audit/file: Setting a file mode of `0000` will now disable Vault from + automatically `chmod`ing the log file [[GH-3649](https://github.com/hashicorp/vault/pull/3649)] + * auth/github: The legacy MFA system can now be used with the GitHub auth + backend [[GH-3696](https://github.com/hashicorp/vault/pull/3696)] + * auth/okta: The legacy MFA system can now be used with the Okta auth backend + [[GH-3653](https://github.com/hashicorp/vault/pull/3653)] + * auth/token: `allowed_policies` and `disallowed_policies` can now be specified + as a comma-separated string or an array of strings [[GH-3641](https://github.com/hashicorp/vault/pull/3641)] + * command/server: The log level can now be specified with `VAULT_LOG_LEVEL` + [[GH-3721](https://github.com/hashicorp/vault/pull/3721)] + * core: Period values from auth backends will now be checked and applied to the + TTL value directly by core on login and renewal requests [[GH-3677](https://github.com/hashicorp/vault/pull/3677)] + * database/mongodb: Add optional `write_concern` parameter, which can be set + during database configuration. This establishes a session-wide [write + concern](https://docs.mongodb.com/manual/reference/write-concern/) for the + lifecycle of the mount [[GH-3646](https://github.com/hashicorp/vault/pull/3646)] + * http: Request path containing non-printable characters will return 400 - Bad + Request [[GH-3697](https://github.com/hashicorp/vault/pull/3697)] + * mfa/okta: Filter a given email address as a login filter, allowing operation + when login email and account email are different + * plugins: Make Vault more resilient when unsealing when plugins are + unavailable [[GH-3686](https://github.com/hashicorp/vault/pull/3686)] + * secret/pki: `allowed_domains` and `key_usage` can now be specified + as a comma-separated string or an array of strings [[GH-3642](https://github.com/hashicorp/vault/pull/3642)] + * secret/ssh: Allow 4096-bit keys to be used in dynamic key method [[GH-3593](https://github.com/hashicorp/vault/pull/3593)] + * secret/consul: The Consul secret backend now uses the value of `lease` set + on the role, if set, when renewing a secret. [[GH-3796](https://github.com/hashicorp/vault/pull/3796)] + * storage/mysql: Don't attempt database creation if it exists, which can help + under certain permissions constraints [[GH-3716](https://github.com/hashicorp/vault/pull/3716)] + +BUG FIXES: + + * api/status (enterprise): Fix status reporting when using an auto seal + * auth/approle: Fix case-sensitive/insensitive comparison issue [[GH-3665](https://github.com/hashicorp/vault/pull/3665)] + * auth/cert: Return `allowed_names` on role read [[GH-3654](https://github.com/hashicorp/vault/pull/3654)] + * auth/ldap: Fix incorrect control information being sent [[GH-3402](https://github.com/hashicorp/vault/pull/3402)] [[GH-3496](https://github.com/hashicorp/vault/pull/3496)] + [[GH-3625](https://github.com/hashicorp/vault/pull/3625)] [[GH-3656](https://github.com/hashicorp/vault/pull/3656)] + * core: Fix seal status reporting when using an autoseal + * core: Add creation path to wrap info for a control group token + * core: Fix potential panic that could occur using plugins when a node + transitioned from active to standby [[GH-3638](https://github.com/hashicorp/vault/pull/3638)] + * core: Fix memory ballooning when a connection would connect to the cluster + port and then go away -- redux! [[GH-3680](https://github.com/hashicorp/vault/pull/3680)] + * core: Replace recursive token revocation logic with depth-first logic, which + can avoid hitting stack depth limits in extreme cases [[GH-2348](https://github.com/hashicorp/vault/pull/2348)] + * core: When doing a read on configured audited-headers, properly handle case + insensitivity [[GH-3701](https://github.com/hashicorp/vault/pull/3701)] + * core/pkcs11 (enterprise): Fix panic when PKCS#11 library is not readable + * database/mysql: Allow the creation statement to use commands that are not yet + supported by the prepare statement protocol [[GH-3619](https://github.com/hashicorp/vault/pull/3619)] + * plugin/auth-gcp: Fix IAM roles when using `allow_gce_inference` [VPAG-19] + +## 0.9.0.1 (November 21st, 2017) (Enterprise Only) + +IMPROVEMENTS: + + * auth/gcp: Support seal wrapping of configuration parameters + * auth/kubernetes: Support seal wrapping of configuration parameters + +BUG FIXES: + + * Fix an upgrade issue with some physical backends when migrating from legacy + HSM stored key support to the new Seal Wrap mechanism (Enterprise) + * mfa: Add the 'mfa' flag that was removed by mistake [[GH-4223](https://github.com/hashicorp/vault/pull/4223)] + +## 0.9.0 (November 14th, 2017) + +DEPRECATIONS/CHANGES: + + * HSM config parameter requirements: When using Vault with an HSM, a new + parameter is required: `hmac_key_label`. This performs a similar function to + `key_label` but for the HMAC key Vault will use. Vault will generate a + suitable key if this value is specified and `generate_key` is set true. + * API HTTP client behavior: When calling `NewClient` the API no longer + modifies the provided client/transport. In particular this means it will no + longer enable redirection limiting and HTTP/2 support on custom clients. It + is suggested that if you want to make changes to an HTTP client that you use + one created by `DefaultConfig` as a starting point. + * AWS EC2 client nonce behavior: The client nonce generated by the backend + that gets returned along with the authentication response will be audited in + plaintext. If this is undesired, the clients can choose to supply a custom + nonce to the login endpoint. The custom nonce set by the client will from + now on, not be returned back with the authentication response, and hence not + audit logged. + * AWS Auth role options: The API will now error when trying to create or + update a role with the mutually-exclusive options + `disallow_reauthentication` and `allow_instance_migration`. + * SSH CA role read changes: When reading back a role from the `ssh` backend, + the TTL/max TTL values will now be an integer number of seconds rather than + a string. This better matches the API elsewhere in Vault. + * SSH role list changes: When listing roles from the `ssh` backend via the API, + the response data will additionally return a `key_info` map that will contain + a map of each key with a corresponding object containing the `key_type`. + * More granularity in audit logs: Audit request and response entries are still + in RFC3339 format but now have a granularity of nanoseconds. + * High availability related values have been moved out of the `storage` and + `ha_storage` stanzas, and into the top-level configuration. `redirect_addr` + has been renamed to `api_addr`. The stanzas still support accepting + HA-related values to maintain backward compatibility, but top-level values + will take precedence. + * A new `seal` stanza has been added to the configuration file, which is + optional and enables configuration of the seal type to use for additional + data protection, such as using HSM or Cloud KMS solutions to encrypt and + decrypt data. + +FEATURES: + + * **RSA Support for Transit Backend**: Transit backend can now generate RSA + keys which can be used for encryption and signing. [[GH-3489](https://github.com/hashicorp/vault/pull/3489)] + * **Identity System**: Now in open source and with significant enhancements, + Identity is an integrated system for understanding users across tokens and + enabling easier management of users directly and via groups. + * **External Groups in Identity**: Vault can now automatically assign users + and systems to groups in Identity based on their membership in external + groups. + * **Seal Wrap / FIPS 140-2 Compatibility (Enterprise)**: Vault can now take + advantage of FIPS 140-2-certified HSMs to ensure that Critical Security + Parameters are protected in a compliant fashion. Vault's implementation has + received a statement of compliance from Leidos. + * **Control Groups (Enterprise)**: Require multiple members of an Identity + group to authorize a requested action before it is allowed to run. + * **Cloud Auto-Unseal (Enterprise)**: Automatically unseal Vault using AWS KMS + and GCP CKMS. + * **Sentinel Integration (Enterprise)**: Take advantage of HashiCorp Sentinel + to create extremely flexible access control policies -- even on + unauthenticated endpoints. + * **Barrier Rekey Support for Auto-Unseal (Enterprise)**: When using auto-unsealing + functionality, the `rekey` operation is now supported; it uses recovery keys + to authorize the master key rekey. + * **Operation Token for Disaster Recovery Actions (Enterprise)**: When using + Disaster Recovery replication, a token can be created that can be used to + authorize actions such as promotion and updating primary information, rather + than using recovery keys. + * **Trigger Auto-Unseal with Recovery Keys (Enterprise)**: When using + auto-unsealing, a request to unseal Vault can be triggered by a threshold of + recovery keys, rather than requiring the Vault process to be restarted. + * **UI Redesign (Enterprise)**: All new experience for the Vault Enterprise + UI. The look and feel has been completely redesigned to give users a better + experience and make managing secrets fast and easy. + * **UI: SSH Secret Backend (Enterprise)**: Configure an SSH secret backend, + create and browse roles. And use them to sign keys or generate one time + passwords. + * **UI: AWS Secret Backend (Enterprise)**: You can now configure the AWS + backend via the Vault Enterprise UI. In addition you can create roles, + browse the roles and Generate IAM Credentials from them in the UI. + +IMPROVEMENTS: + + * api: Add ability to set custom headers on each call [[GH-3394](https://github.com/hashicorp/vault/pull/3394)] + * command/server: Add config option to disable requesting client certificates + [[GH-3373](https://github.com/hashicorp/vault/pull/3373)] + * auth/aws: Max retries can now be customized for the AWS client [[GH-3965](https://github.com/hashicorp/vault/pull/3965)] + * core: Disallow mounting underneath an existing path, not just over [[GH-2919](https://github.com/hashicorp/vault/pull/2919)] + * physical/file: Use `700` as permissions when creating directories. The files + themselves were `600` and are all encrypted, but this doesn't hurt. + * secret/aws: Add ability to use custom IAM/STS endpoints [[GH-3416](https://github.com/hashicorp/vault/pull/3416)] + * secret/aws: Max retries can now be customized for the AWS client [[GH-3965](https://github.com/hashicorp/vault/pull/3965)] + * secret/cassandra: Work around Cassandra ignoring consistency levels for a + user listing query [[GH-3469](https://github.com/hashicorp/vault/pull/3469)] + * secret/pki: Private keys can now be marshalled as PKCS#8 [[GH-3518](https://github.com/hashicorp/vault/pull/3518)] + * secret/pki: Allow entering URLs for `pki` as both comma-separated strings and JSON + arrays [[GH-3409](https://github.com/hashicorp/vault/pull/3409)] + * secret/ssh: Role TTL/max TTL can now be specified as either a string or an + integer [[GH-3507](https://github.com/hashicorp/vault/pull/3507)] + * secret/transit: Sign and verify operations now support a `none` hash + algorithm to allow signing/verifying pre-hashed data [[GH-3448](https://github.com/hashicorp/vault/pull/3448)] + * secret/database: Add the ability to glob allowed roles in the Database Backend [[GH-3387](https://github.com/hashicorp/vault/pull/3387)] + * ui (enterprise): Support for RSA keys in the transit backend + * ui (enterprise): Support for DR Operation Token generation, promoting, and + updating primary on DR Secondary clusters + +BUG FIXES: + + * api: Fix panic when setting a custom HTTP client but with a nil transport + [[GH-3435](https://github.com/hashicorp/vault/pull/3435)] [[GH-3437](https://github.com/hashicorp/vault/pull/3437)] + * api: Fix authing to the `cert` backend when the CA for the client cert is + not known to the server's listener [[GH-2946](https://github.com/hashicorp/vault/pull/2946)] + * auth/approle: Create role ID index during read if a role is missing one [[GH-3561](https://github.com/hashicorp/vault/pull/3561)] + * auth/aws: Don't allow mutually exclusive options [[GH-3291](https://github.com/hashicorp/vault/pull/3291)] + * auth/radius: Fix logging in in some situations [[GH-3461](https://github.com/hashicorp/vault/pull/3461)] + * core: Fix memleak when a connection would connect to the cluster port and + then go away [[GH-3513](https://github.com/hashicorp/vault/pull/3513)] + * core: Fix panic if a single-use token is used to step-down or seal [[GH-3497](https://github.com/hashicorp/vault/pull/3497)] + * core: Set rather than add headers to prevent some duplicated headers in + responses when requests were forwarded to the active node [[GH-3485](https://github.com/hashicorp/vault/pull/3485)] + * physical/etcd3: Fix some listing issues due to how etcd3 does prefix + matching [[GH-3406](https://github.com/hashicorp/vault/pull/3406)] + * physical/etcd3: Fix case where standbys can lose their etcd client lease + [[GH-3031](https://github.com/hashicorp/vault/pull/3031)] + * physical/file: Fix listing when underscores are the first component of a + path [[GH-3476](https://github.com/hashicorp/vault/pull/3476)] + * plugins: Allow response errors to be returned from backend plugins [[GH-3412](https://github.com/hashicorp/vault/pull/3412)] + * secret/transit: Fix panic if the length of the input ciphertext was less + than the expected nonce length [[GH-3521](https://github.com/hashicorp/vault/pull/3521)] + * ui (enterprise): Reinstate support for generic secret backends - this was + erroneously removed in a previous release + +## 0.8.3 (September 19th, 2017) + +CHANGES: + + * Policy input/output standardization: For all built-in authentication + backends, policies can now be specified as a comma-delimited string or an + array if using JSON as API input; on read, policies will be returned as an + array; and the `default` policy will not be forcefully added to policies + saved in configurations. Please note that the `default` policy will continue + to be added to generated tokens, however, rather than backends adding + `default` to the given set of input policies (in some cases, and not in + others), the stored set will reflect the user-specified set. + * `sign-self-issued` modifies Issuer in generated certificates: In 0.8.2 the + endpoint would not modify the Issuer in the generated certificate, leaving + the output self-issued. Although theoretically valid, in practice crypto + stacks were unhappy validating paths containing such certs. As a result, + `sign-self-issued` now encodes the signing CA's Subject DN into the Issuer + DN of the generated certificate. + * `sys/raw` requires enabling: While the `sys/raw` endpoint can be extremely + useful in break-glass or support scenarios, it is also extremely dangerous. + As of now, a configuration file option `raw_storage_endpoint` must be set in + order to enable this API endpoint. Once set, the available functionality has + been enhanced slightly; it now supports listing and decrypting most of + Vault's core data structures, except for the encryption keyring itself. + * `generic` is now `kv`: To better reflect its actual use, the `generic` + backend is now `kv`. Using `generic` will still work for backwards + compatibility. + +FEATURES: + + * **GCE Support for GCP Auth**: GCE instances can now authenticate to Vault + using machine credentials. + * **Support for Kubernetes Service Account Auth**: Kubernetes Service Accounts + can now authenticate to vault using JWT tokens. + +IMPROVEMENTS: + + * configuration: Provide a config option to store Vault server's process ID + (PID) in a file [[GH-3321](https://github.com/hashicorp/vault/pull/3321)] + * mfa (Enterprise): Add the ability to use identity metadata in username format + * mfa/okta (Enterprise): Add support for configuring base_url for API calls + * secret/pki: `sign-intermediate` will now allow specifying a `ttl` value + longer than the signing CA certificate's NotAfter value. [[GH-3325](https://github.com/hashicorp/vault/pull/3325)] + * sys/raw: Raw storage access is now disabled by default [[GH-3329](https://github.com/hashicorp/vault/pull/3329)] + +BUG FIXES: + + * auth/okta: Fix regression that removed the ability to set base_url [[GH-3313](https://github.com/hashicorp/vault/pull/3313)] + * core: Fix panic while loading leases at startup on ARM processors + [[GH-3314](https://github.com/hashicorp/vault/pull/3314)] + * secret/pki: Fix `sign-self-issued` encoding the wrong subject public key + [[GH-3325](https://github.com/hashicorp/vault/pull/3325)] + +## 0.8.2.1 (September 11th, 2017) (Enterprise Only) + +BUG FIXES: + + * Fix an issue upgrading to 0.8.2 for Enterprise customers. + +## 0.8.2 (September 5th, 2017) + +SECURITY: + +* In prior versions of Vault, if authenticating via AWS IAM and requesting a + periodic token, the period was not properly respected. This could lead to + tokens expiring unexpectedly, or a token lifetime being longer than expected. + Upon token renewal with Vault 0.8.2 the period will be properly enforced. + +DEPRECATIONS/CHANGES: + +* `vault ssh` users should supply `-mode` and `-role` to reduce the number of + API calls. A future version of Vault will mark these optional values are + required. Failure to supply `-mode` or `-role` will result in a warning. +* Vault plugins will first briefly run a restricted version of the plugin to + fetch metadata, and then lazy-load the plugin on first request to prevent + crash/deadlock of Vault during the unseal process. Plugins will need to be + built with the latest changes in order for them to run properly. + +FEATURES: + +* **Lazy Lease Loading**: On startup, Vault will now load leases from storage + in a lazy fashion (token checks and revocation/renewal requests still force + an immediate load). For larger installations this can significantly reduce + downtime when switching active nodes or bringing Vault up from cold start. +* **SSH CA Login with `vault ssh`**: `vault ssh` now supports the SSH CA + backend for authenticating to machines. It also supports remote host key + verification through the SSH CA backend, if enabled. +* **Signing of Self-Issued Certs in PKI**: The `pki` backend now supports + signing self-issued CA certs. This is useful when switching root CAs. + +IMPROVEMENTS: + + * audit/file: Allow specifying `stdout` as the `file_path` to log to standard + output [[GH-3235](https://github.com/hashicorp/vault/pull/3235)] + * auth/aws: Allow wildcards in `bound_iam_principal_arn` [[GH-3213](https://github.com/hashicorp/vault/pull/3213)] + * auth/okta: Compare groups case-insensitively since Okta is only + case-preserving [[GH-3240](https://github.com/hashicorp/vault/pull/3240)] + * auth/okta: Standardize Okta configuration APIs across backends [[GH-3245](https://github.com/hashicorp/vault/pull/3245)] + * cli: Add subcommand autocompletion that can be enabled with + `vault -autocomplete-install` [[GH-3223](https://github.com/hashicorp/vault/pull/3223)] + * cli: Add ability to handle wrapped responses when using `vault auth`. What + is output depends on the other given flags; see the help output for that + command for more information. [[GH-3263](https://github.com/hashicorp/vault/pull/3263)] + * core: TLS cipher suites used for cluster behavior can now be set via + `cluster_cipher_suites` in configuration [[GH-3228](https://github.com/hashicorp/vault/pull/3228)] + * core: The `plugin_name` can now either be specified directly as part of the + parameter or within the `config` object when mounting a secret or auth backend + via `sys/mounts/:path` or `sys/auth/:path` respectively [[GH-3202](https://github.com/hashicorp/vault/pull/3202)] + * core: It is now possible to update the `description` of a mount when + mount-tuning, although this must be done through the HTTP layer [[GH-3285](https://github.com/hashicorp/vault/pull/3285)] + * secret/databases/mongo: If an EOF is encountered, attempt reconnecting and + retrying the operation [[GH-3269](https://github.com/hashicorp/vault/pull/3269)] + * secret/pki: TTLs can now be specified as a string or an integer number of + seconds [[GH-3270](https://github.com/hashicorp/vault/pull/3270)] + * secret/pki: Self-issued certs can now be signed via + `pki/root/sign-self-issued` [[GH-3274](https://github.com/hashicorp/vault/pull/3274)] + * storage/gcp: Use application default credentials if they exist [[GH-3248](https://github.com/hashicorp/vault/pull/3248)] + +BUG FIXES: + + * auth/aws: Properly use role-set period values for IAM-derived token renewals + [[GH-3220](https://github.com/hashicorp/vault/pull/3220)] + * auth/okta: Fix updating organization/ttl/max_ttl after initial setting + [[GH-3236](https://github.com/hashicorp/vault/pull/3236)] + * core: Fix PROXY when underlying connection is TLS [[GH-3195](https://github.com/hashicorp/vault/pull/3195)] + * core: Policy-related commands would sometimes fail to act case-insensitively + [[GH-3210](https://github.com/hashicorp/vault/pull/3210)] + * storage/consul: Fix parsing TLS configuration when using a bare IPv6 address + [[GH-3268](https://github.com/hashicorp/vault/pull/3268)] + * plugins: Lazy-load plugins to prevent crash/deadlock during unseal process. + [[GH-3255](https://github.com/hashicorp/vault/pull/3255)] + * plugins: Skip mounting plugin-based secret and credential mounts when setting + up mounts if the plugin is no longer present in the catalog. [[GH-3255](https://github.com/hashicorp/vault/pull/3255)] + +## 0.8.1 (August 16th, 2017) + +DEPRECATIONS/CHANGES: + + * PKI Root Generation: Calling `pki/root/generate` when a CA cert/key already + exists will now return a `204` instead of overwriting an existing root. If + you want to recreate the root, first run a delete operation on `pki/root` + (requires `sudo` capability), then generate it again. + +FEATURES: + + * **Oracle Secret Backend**: There is now an external plugin to support leased + credentials for Oracle databases (distributed separately). + * **GCP IAM Auth Backend**: There is now an authentication backend that allows + using GCP IAM credentials to retrieve Vault tokens. This is available as + both a plugin and built-in to Vault. + * **PingID Push Support for Path-Based MFA (Enterprise)**: PingID Push can + now be used for MFA with the new path-based MFA introduced in Vault + Enterprise 0.8. + * **Permitted DNS Domains Support in PKI**: The `pki` backend now supports + specifying permitted DNS domains for CA certificates, allowing you to + narrowly scope the set of domains for which a CA can issue or sign child + certificates. + * **Plugin Backend Reload Endpoint**: Plugin backends can now be triggered to + reload using the `sys/plugins/reload/backend` endpoint and providing either + the plugin name or the mounts to reload. + * **Self-Reloading Plugins**: The plugin system will now attempt to reload a + crashed or stopped plugin, once per request. + +IMPROVEMENTS: + + * auth/approle: Allow array input for policies in addition to comma-delimited + strings [[GH-3163](https://github.com/hashicorp/vault/pull/3163)] + * plugins: Send logs through Vault's logger rather than stdout [[GH-3142](https://github.com/hashicorp/vault/pull/3142)] + * secret/pki: Add `pki/root` delete operation [[GH-3165](https://github.com/hashicorp/vault/pull/3165)] + * secret/pki: Don't overwrite an existing root cert/key when calling generate + [[GH-3165](https://github.com/hashicorp/vault/pull/3165)] + +BUG FIXES: + + * aws: Don't prefer a nil HTTP client over an existing one [[GH-3159](https://github.com/hashicorp/vault/pull/3159)] + * core: If there is an error when checking for create/update existence, return + 500 instead of 400 [[GH-3162](https://github.com/hashicorp/vault/pull/3162)] + * secret/database: Avoid creating usernames that are too long for legacy MySQL + [[GH-3138](https://github.com/hashicorp/vault/pull/3138)] + +## 0.8.0 (August 9th, 2017) + +SECURITY: + + * We've added a note to the docs about the way the GitHub auth backend works + as it may not be readily apparent that GitHub personal access tokens, which + are used by the backend, can be used for unauthorized access if they are + stolen from third party services and access to Vault is public. + +DEPRECATIONS/CHANGES: + + * Database Plugin Backends: Passwords generated for these backends now + enforce stricter password requirements, as opposed to the previous behavior + of returning a randomized UUID. Passwords are of length 20, and have a `A1a-` + characters prepended to ensure stricter requirements. No regressions are + expected from this change. (For database backends that were previously + substituting underscores for hyphens in passwords, this will remain the + case.) + * Lease Endpoints: The endpoints `sys/renew`, `sys/revoke`, `sys/revoke-prefix`, + `sys/revoke-force` have been deprecated and relocated under `sys/leases`. + Additionally, the deprecated path `sys/revoke-force` now requires the `sudo` + capability. + * Response Wrapping Lookup Unauthenticated: The `sys/wrapping/lookup` endpoint + is now unauthenticated. This allows introspection of the wrapping info by + clients that only have the wrapping token without then invalidating the + token. Validation functions/checks are still performed on the token. + +FEATURES: + + * **Cassandra Storage**: Cassandra can now be used for Vault storage + * **CockroachDB Storage**: CockroachDB can now be used for Vault storage + * **CouchDB Storage**: CouchDB can now be used for Vault storage + * **SAP HANA Database Plugin**: The `databases` backend can now manage users + for SAP HANA databases + * **Plugin Backends**: Vault now supports running secret and auth backends as + plugins. Plugins can be mounted like normal backends and can be developed + independently from Vault. + * **PROXY Protocol Support** Vault listeners can now be configured to honor + PROXY protocol v1 information to allow passing real client IPs into Vault. A + list of authorized addresses (IPs or subnets) can be defined and + accept/reject behavior controlled. + * **Lease Lookup and Browsing in the Vault Enterprise UI**: Vault Enterprise UI + now supports lookup and listing of leases and the associated actions from the + `sys/leases` endpoints in the API. These are located in the new top level + navigation item "Leases". + * **Filtered Mounts for Performance Mode Replication**: Whitelists or + blacklists of mounts can be defined per-secondary to control which mounts + are actually replicated to that secondary. This can allow targeted + replication of specific sets of data to specific geolocations/datacenters. + * **Disaster Recovery Mode Replication (Enterprise Only)**: There is a new + replication mode, Disaster Recovery (DR), that performs full real-time + replication (including tokens and leases) to DR secondaries. DR secondaries + cannot handle client requests, but can be promoted to primary as needed for + failover. + * **Manage New Replication Features in the Vault Enterprise UI**: Support for + Replication features in Vault Enterprise UI has expanded to include new DR + Replication mode and management of Filtered Mounts in Performance Replication + mode. + * **Vault Identity (Enterprise Only)**: Vault's new Identity system allows + correlation of users across tokens. At present this is only used for MFA, + but will be the foundation of many other features going forward. + * **Duo Push, Okta Push, and TOTP MFA For All Authenticated Paths (Enterprise + Only)**: A brand new MFA system built on top of Identity allows MFA + (currently Duo Push, Okta Push, and TOTP) for any authenticated path within + Vault. MFA methods can be configured centrally, and TOTP keys live within + the user's Identity information to allow using the same key across tokens. + Specific MFA method(s) required for any given path within Vault can be + specified in normal ACL path statements. + +IMPROVEMENTS: + + * api: Add client method for a secret renewer background process [[GH-2886](https://github.com/hashicorp/vault/pull/2886)] + * api: Add `RenewTokenAsSelf` [[GH-2886](https://github.com/hashicorp/vault/pull/2886)] + * api: Client timeout can now be adjusted with the `VAULT_CLIENT_TIMEOUT` env + var or with a new API function [[GH-2956](https://github.com/hashicorp/vault/pull/2956)] + * api/cli: Client will now attempt to look up SRV records for the given Vault + hostname [[GH-3035](https://github.com/hashicorp/vault/pull/3035)] + * audit/socket: Enhance reconnection logic and don't require the connection to + be established at unseal time [[GH-2934](https://github.com/hashicorp/vault/pull/2934)] + * audit/file: Opportunistically try re-opening the file on error [[GH-2999](https://github.com/hashicorp/vault/pull/2999)] + * auth/approle: Add role name to token metadata [[GH-2985](https://github.com/hashicorp/vault/pull/2985)] + * auth/okta: Allow specifying `ttl`/`max_ttl` inside the mount [[GH-2915](https://github.com/hashicorp/vault/pull/2915)] + * cli: Client timeout can now be adjusted with the `VAULT_CLIENT_TIMEOUT` env + var [[GH-2956](https://github.com/hashicorp/vault/pull/2956)] + * command/auth: Add `-token-only` flag to `vault auth` that returns only the + token on stdout and does not store it via the token helper [[GH-2855](https://github.com/hashicorp/vault/pull/2855)] + * core: CORS allowed origins can now be configured [[GH-2021](https://github.com/hashicorp/vault/pull/2021)] + * core: Add metrics counters for audit log failures [[GH-2863](https://github.com/hashicorp/vault/pull/2863)] + * cors: Allow setting allowed headers via the API instead of always using + wildcard [[GH-3023](https://github.com/hashicorp/vault/pull/3023)] + * secret/ssh: Allow specifying the key ID format using template values for CA + type [[GH-2888](https://github.com/hashicorp/vault/pull/2888)] + * server: Add `tls_client_ca_file` option for specifying a CA file to use for + client certificate verification when `tls_require_and_verify_client_cert` is + enabled [[GH-3034](https://github.com/hashicorp/vault/pull/3034)] + * storage/cockroachdb: Add CockroachDB storage backend [[GH-2713](https://github.com/hashicorp/vault/pull/2713)] + * storage/couchdb: Add CouchDB storage backend [[GH-2880](https://github.com/hashicorp/vault/pull/2880)] + * storage/mssql: Add `max_parallel` [[GH-3026](https://github.com/hashicorp/vault/pull/3026)] + * storage/postgresql: Add `max_parallel` [[GH-3026](https://github.com/hashicorp/vault/pull/3026)] + * storage/postgresql: Improve listing speed [[GH-2945](https://github.com/hashicorp/vault/pull/2945)] + * storage/s3: More efficient paging when an object has a lot of subobjects + [[GH-2780](https://github.com/hashicorp/vault/pull/2780)] + * sys/wrapping: Make `sys/wrapping/lookup` unauthenticated [[GH-3084](https://github.com/hashicorp/vault/pull/3084)] + * sys/wrapping: Wrapped tokens now store the original request path of the data + [[GH-3100](https://github.com/hashicorp/vault/pull/3100)] + * telemetry: Add support for DogStatsD [[GH-2490](https://github.com/hashicorp/vault/pull/2490)] + +BUG FIXES: + + * api/health: Don't treat standby `429` codes as an error [[GH-2850](https://github.com/hashicorp/vault/pull/2850)] + * api/leases: Fix lease lookup returning lease properties at the top level + * audit: Fix panic when audit logging a read operation on an asymmetric + `transit` key [[GH-2958](https://github.com/hashicorp/vault/pull/2958)] + * auth/approle: Fix panic when secret and cidr list not provided in role + [[GH-3075](https://github.com/hashicorp/vault/pull/3075)] + * auth/aws: Look up proper account ID on token renew [[GH-3012](https://github.com/hashicorp/vault/pull/3012)] + * auth/aws: Store IAM header in all cases when it changes [[GH-3004](https://github.com/hashicorp/vault/pull/3004)] + * auth/ldap: Verify given certificate is PEM encoded instead of failing + silently [[GH-3016](https://github.com/hashicorp/vault/pull/3016)] + * auth/token: Don't allow using the same token ID twice when manually + specifying [[GH-2916](https://github.com/hashicorp/vault/pull/2916)] + * cli: Fix issue with parsing keys that start with special characters [[GH-2998](https://github.com/hashicorp/vault/pull/2998)] + * core: Relocated `sys/leases/renew` returns same payload as original + `sys/leases` endpoint [[GH-2891](https://github.com/hashicorp/vault/pull/2891)] + * secret/ssh: Fix panic when signing with incorrect key type [[GH-3072](https://github.com/hashicorp/vault/pull/3072)] + * secret/totp: Ensure codes can only be used once. This makes some automated + workflows harder but complies with the RFC. [[GH-2908](https://github.com/hashicorp/vault/pull/2908)] + * secret/transit: Fix locking when creating a key with unsupported options + [[GH-2974](https://github.com/hashicorp/vault/pull/2974)] + +## 0.7.3 (June 7th, 2017) + +SECURITY: + + * Cert auth backend now checks validity of individual certificates: In + previous versions of Vault, validity (e.g. expiration) of individual leaf + certificates added for authentication was not checked. This was done to make + it easier for administrators to control lifecycles of individual + certificates added to the backend, e.g. the authentication material being + checked was access to that specific certificate's private key rather than + all private keys signed by a CA. However, this behavior is often unexpected + and as a result can lead to insecure deployments, so we are now validating + these certificates as well. + * App-ID path salting was skipped in 0.7.1/0.7.2: A regression in 0.7.1/0.7.2 + caused the HMACing of any App-ID information stored in paths (including + actual app-IDs and user-IDs) to be unsalted and written as-is from the API. + In 0.7.3 any such paths will be automatically changed to salted versions on + access (e.g. login or read); however, if you created new app-IDs or user-IDs + in 0.7.1/0.7.2, you may want to consider whether any users with access to + Vault's underlying data store may have intercepted these values, and + revoke/roll them. + +DEPRECATIONS/CHANGES: + + * Step-Down is Forwarded: When a step-down is issued against a non-active node + in an HA cluster, it will now forward the request to the active node. + +FEATURES: + + * **ed25519 Signing/Verification in Transit with Key Derivation**: The + `transit` backend now supports generating + [ed25519](https://ed25519.cr.yp.to/) keys for signing and verification + functionality. These keys support derivation, allowing you to modify the + actual encryption key used by supplying a `context` value. + * **Key Version Specification for Encryption in Transit**: You can now specify + the version of a key you use to wish to generate a signature, ciphertext, or + HMAC. This can be controlled by the `min_encryption_version` key + configuration property. + * **Replication Primary Discovery (Enterprise)**: Replication primaries will + now advertise the addresses of their local HA cluster members to replication + secondaries. This helps recovery if the primary active node goes down and + neither service discovery nor load balancers are in use to steer clients. + +IMPROVEMENTS: + + * api/health: Add Sys().Health() [[GH-2805](https://github.com/hashicorp/vault/pull/2805)] + * audit: Add auth information to requests that error out [[GH-2754](https://github.com/hashicorp/vault/pull/2754)] + * command/auth: Add `-no-store` option that prevents the auth command from + storing the returned token into the configured token helper [[GH-2809](https://github.com/hashicorp/vault/pull/2809)] + * core/forwarding: Request forwarding now heartbeats to prevent unused + connections from being terminated by firewalls or proxies + * plugins/databases: Add MongoDB as an internal database plugin [[GH-2698](https://github.com/hashicorp/vault/pull/2698)] + * storage/dynamodb: Add a method for checking the existence of children, + speeding up deletion operations in the DynamoDB storage backend [[GH-2722](https://github.com/hashicorp/vault/pull/2722)] + * storage/mysql: Add max_parallel parameter to MySQL backend [[GH-2760](https://github.com/hashicorp/vault/pull/2760)] + * secret/databases: Support listing connections [[GH-2823](https://github.com/hashicorp/vault/pull/2823)] + * secret/databases: Support custom renewal statements in Postgres database + plugin [[GH-2788](https://github.com/hashicorp/vault/pull/2788)] + * secret/databases: Use the role name as part of generated credentials + [[GH-2812](https://github.com/hashicorp/vault/pull/2812)] + * ui (Enterprise): Transit key and secret browsing UI handle large lists better + * ui (Enterprise): root tokens are no longer persisted + * ui (Enterprise): support for mounting Database and TOTP secret backends + +BUG FIXES: + + * auth/app-id: Fix regression causing loading of salts to be skipped + * auth/aws: Improve EC2 describe instances performance [[GH-2766](https://github.com/hashicorp/vault/pull/2766)] + * auth/aws: Fix lookup of some instance profile ARNs [[GH-2802](https://github.com/hashicorp/vault/pull/2802)] + * auth/aws: Resolve ARNs to internal AWS IDs which makes lookup at various + points (e.g. renewal time) more robust [[GH-2814](https://github.com/hashicorp/vault/pull/2814)] + * auth/aws: Properly honor configured period when using IAM authentication + [[GH-2825](https://github.com/hashicorp/vault/pull/2825)] + * auth/aws: Check that a bound IAM principal is not empty (in the current + state of the role) before requiring it match the previously authenticated + client [[GH-2781](https://github.com/hashicorp/vault/pull/2781)] + * auth/cert: Fix panic on renewal [[GH-2749](https://github.com/hashicorp/vault/pull/2749)] + * auth/cert: Certificate verification for non-CA certs [[GH-2761](https://github.com/hashicorp/vault/pull/2761)] + * core/acl: Prevent race condition when compiling ACLs in some scenarios + [[GH-2826](https://github.com/hashicorp/vault/pull/2826)] + * secret/database: Increase wrapping token TTL; in a loaded scenario it could + be too short + * secret/generic: Allow integers to be set as the value of `ttl` field as the + documentation claims is supported [[GH-2699](https://github.com/hashicorp/vault/pull/2699)] + * secret/ssh: Added host key callback to ssh client config [[GH-2752](https://github.com/hashicorp/vault/pull/2752)] + * storage/s3: Avoid a panic when some bad data is returned [[GH-2785](https://github.com/hashicorp/vault/pull/2785)] + * storage/dynamodb: Fix list functions working improperly on Windows [[GH-2789](https://github.com/hashicorp/vault/pull/2789)] + * storage/file: Don't leak file descriptors in some error cases + * storage/swift: Fix pre-v3 project/tenant name reading [[GH-2803](https://github.com/hashicorp/vault/pull/2803)] + +## 0.7.2 (May 8th, 2017) + +BUG FIXES: + + * audit: Fix auditing entries containing certain kinds of time values + [[GH-2689](https://github.com/hashicorp/vault/pull/2689)] + +## 0.7.1 (May 5th, 2017) + +DEPRECATIONS/CHANGES: + + * LDAP Auth Backend: Group membership queries will now run as the `binddn` + user when `binddn`/`bindpass` are configured, rather than as the + authenticating user as was the case previously. + +FEATURES: + + * **AWS IAM Authentication**: IAM principals can get Vault tokens + automatically, opening AWS-based authentication to users, ECS containers, + Lambda instances, and more. Signed client identity information retrieved + using the AWS API `sts:GetCallerIdentity` is validated against the AWS STS + service before issuing a Vault token. This backend is unified with the + `aws-ec2` authentication backend under the name `aws`, and allows additional + EC2-related restrictions to be applied during the IAM authentication; the + previous EC2 behavior is also still available. [[GH-2441](https://github.com/hashicorp/vault/pull/2441)] + * **MSSQL Physical Backend**: You can now use Microsoft SQL Server as your + Vault physical data store [[GH-2546](https://github.com/hashicorp/vault/pull/2546)] + * **Lease Listing and Lookup**: You can now introspect a lease to get its + creation and expiration properties via `sys/leases/lookup`; with `sudo` + capability you can also list leases for lookup, renewal, or revocation via + that endpoint. Various lease functions (renew, revoke, revoke-prefix, + revoke-force) have also been relocated to `sys/leases/`, but they also work + at the old paths for compatibility. Reading (but not listing) leases via + `sys/leases/lookup` is now a part of the current `default` policy. [[GH-2650](https://github.com/hashicorp/vault/pull/2650)] + * **TOTP Secret Backend**: You can now store multi-factor authentication keys + in Vault and use the API to retrieve time-based one-time use passwords on + demand. The backend can also be used to generate a new key and validate + passwords generated by that key. [[GH-2492](https://github.com/hashicorp/vault/pull/2492)] + * **Database Secret Backend & Secure Plugins (Beta)**: This new secret backend + combines the functionality of the MySQL, PostgreSQL, MSSQL, and Cassandra + backends. It also provides a plugin interface for extendability through + custom databases. [[GH-2200](https://github.com/hashicorp/vault/pull/2200)] + +IMPROVEMENTS: + + * auth/cert: Support for constraints on subject Common Name and DNS/email + Subject Alternate Names in certificates [[GH-2595](https://github.com/hashicorp/vault/pull/2595)] + * auth/ldap: Use the binding credentials to search group membership rather + than the user credentials [[GH-2534](https://github.com/hashicorp/vault/pull/2534)] + * cli/revoke: Add `-self` option to allow revoking the currently active token + [[GH-2596](https://github.com/hashicorp/vault/pull/2596)] + * core: Randomize x coordinate in Shamir shares [[GH-2621](https://github.com/hashicorp/vault/pull/2621)] + * replication: Fix a bug when enabling `approle` on a primary before + secondaries were connected + * replication: Add heartbeating to ensure firewalls don't kill connections to + primaries + * secret/pki: Add `no_store` option that allows certificates to be issued + without being stored. This removes the ability to look up and/or add to a + CRL but helps with scaling to very large numbers of certificates. [[GH-2565](https://github.com/hashicorp/vault/pull/2565)] + * secret/pki: If used with a role parameter, the `sign-verbatim/` + endpoint honors the values of `generate_lease`, `no_store`, `ttl` and + `max_ttl` from the given role [[GH-2593](https://github.com/hashicorp/vault/pull/2593)] + * secret/pki: Add role parameter `allow_glob_domains` that enables defining + names in `allowed_domains` containing `*` glob patterns [[GH-2517](https://github.com/hashicorp/vault/pull/2517)] + * secret/pki: Update certificate storage to not use characters that are not + supported on some filesystems [[GH-2575](https://github.com/hashicorp/vault/pull/2575)] + * storage/etcd3: Add `discovery_srv` option to query for SRV records to find + servers [[GH-2521](https://github.com/hashicorp/vault/pull/2521)] + * storage/s3: Support `max_parallel` option to limit concurrent outstanding + requests [[GH-2466](https://github.com/hashicorp/vault/pull/2466)] + * storage/s3: Use pooled transport for http client [[GH-2481](https://github.com/hashicorp/vault/pull/2481)] + * storage/swift: Allow domain values for V3 authentication [[GH-2554](https://github.com/hashicorp/vault/pull/2554)] + * tidy: Improvements to `auth/token/tidy` and `sys/leases/tidy` to handle more + cleanup cases [[GH-2452](https://github.com/hashicorp/vault/pull/2452)] + +BUG FIXES: + + * api: Respect a configured path in Vault's address [[GH-2588](https://github.com/hashicorp/vault/pull/2588)] + * auth/aws-ec2: New bounds added as criteria to allow role creation [[GH-2600](https://github.com/hashicorp/vault/pull/2600)] + * auth/ldap: Don't lowercase groups attached to users [[GH-2613](https://github.com/hashicorp/vault/pull/2613)] + * cli: Don't panic if `vault write` is used with the `force` flag but no path + [[GH-2674](https://github.com/hashicorp/vault/pull/2674)] + * core: Help operations should request forward since standbys may not have + appropriate info [[GH-2677](https://github.com/hashicorp/vault/pull/2677)] + * replication: Fix enabling secondaries when certain mounts already existed on + the primary + * secret/mssql: Update mssql driver to support queries with colons [[GH-2610](https://github.com/hashicorp/vault/pull/2610)] + * secret/pki: Don't lowercase O/OU values in certs [[GH-2555](https://github.com/hashicorp/vault/pull/2555)] + * secret/pki: Don't attempt to validate IP SANs if none are provided [[GH-2574](https://github.com/hashicorp/vault/pull/2574)] + * secret/ssh: Don't automatically lowercase principles in issued SSH certs + [[GH-2591](https://github.com/hashicorp/vault/pull/2591)] + * storage/consul: Properly handle state events rather than timing out + [[GH-2548](https://github.com/hashicorp/vault/pull/2548)] + * storage/etcd3: Ensure locks are released if client is improperly shut down + [[GH-2526](https://github.com/hashicorp/vault/pull/2526)] + +## 0.7.0 (March 21th, 2017) + +SECURITY: + + * Common name not being validated when `exclude_cn_from_sans` option used in + `pki` backend: When using a role in the `pki` backend that specified the + `exclude_cn_from_sans` option, the common name would not then be properly + validated against the role's constraints. This has been fixed. We recommend + any users of this feature to upgrade to 0.7 as soon as feasible. + +DEPRECATIONS/CHANGES: + + * List Operations Always Use Trailing Slash: Any list operation, whether via + the `GET` or `LIST` HTTP verb, will now internally canonicalize the path to + have a trailing slash. This makes policy writing more predictable, as it + means clients will no longer work or fail based on which client they're + using or which HTTP verb they're using. However, it also means that policies + allowing `list` capability must be carefully checked to ensure that they + contain a trailing slash; some policies may need to be split into multiple + stanzas to accommodate. + * PKI Defaults to Unleased Certificates: When issuing certificates from the + PKI backend, by default, no leases will be issued. If you want to manually + revoke a certificate, its serial number can be used with the `pki/revoke` + endpoint. Issuing leases is still possible by enabling the `generate_lease` + toggle in PKI role entries (this will default to `true` for upgrades, to + keep existing behavior), which will allow using lease IDs to revoke + certificates. For installations issuing large numbers of certificates (tens + to hundreds of thousands, or millions), this will significantly improve + Vault startup time since leases associated with these certificates will not + have to be loaded; however note that it also means that revocation of a + token used to issue certificates will no longer add these certificates to a + CRL. If this behavior is desired or needed, consider keeping leases enabled + and ensuring lifetimes are reasonable, and issue long-lived certificates via + a different role with leases disabled. + +FEATURES: + + * **Replication (Enterprise)**: Vault Enterprise now has support for creating + a multi-datacenter replication set between clusters. The current replication + offering is based on an asynchronous primary/secondary (1:N) model that + replicates static data while keeping dynamic data (leases, tokens) + cluster-local, focusing on horizontal scaling for high-throughput and + high-fanout deployments. + * **Response Wrapping & Replication in the Vault Enterprise UI**: Vault + Enterprise UI now supports looking up and rotating response wrapping tokens, + as well as creating tokens with arbitrary values inside. It also now + supports replication functionality, enabling the configuration of a + replication set in the UI. + * **Expanded Access Control Policies**: Access control policies can now + specify allowed and denied parameters -- and, optionally, their values -- to + control what a client can and cannot submit during an API call. Policies can + also specify minimum/maximum response wrapping TTLs to both enforce the use + of response wrapping and control the duration of resultant wrapping tokens. + See the [policies concepts + page](https://www.vaultproject.io/docs/concepts/policies.html) for more + information. + * **SSH Backend As Certificate Authority**: The SSH backend can now be + configured to sign host and user certificates. Each mount of the backend + acts as an independent signing authority. The CA key pair can be configured + for each mount and the public key is accessible via an unauthenticated API + call; additionally, the backend can generate a public/private key pair for + you. We recommend using separate mounts for signing host and user + certificates. + +IMPROVEMENTS: + + * api/request: Passing username and password information in API request + [GH-2469] + * audit: Logging the token's use count with authentication response and + logging the remaining uses of the client token with request [GH-2437] + * auth/approle: Support for restricting the number of uses on the tokens + issued [GH-2435] + * auth/aws-ec2: AWS EC2 auth backend now supports constraints for VPC ID, + Subnet ID and Region [GH-2407] + * auth/ldap: Use the value of the `LOGNAME` or `USER` env vars for the + username if not explicitly set on the command line when authenticating + [GH-2154] + * audit: Support adding a configurable prefix (such as `@cee`) before each + line [GH-2359] + * core: Canonicalize list operations to use a trailing slash [GH-2390] + * core: Add option to disable caching on a per-mount level [GH-2455] + * core: Add ability to require valid client certs in listener config [GH-2457] + * physical/dynamodb: Implement a session timeout to avoid having to use + recovery mode in the case of an unclean shutdown, which makes HA much safer + [GH-2141] + * secret/pki: O (Organization) values can now be set to role-defined values + for issued/signed certificates [GH-2369] + * secret/pki: Certificates issued/signed from PKI backend do not generate + leases by default [GH-2403] + * secret/pki: When using DER format, still return the private key type + [GH-2405] + * secret/pki: Add an intermediate to the CA chain even if it lacks an + authority key ID [GH-2465] + * secret/pki: Add role option to use CSR SANs [GH-2489] + * secret/ssh: SSH backend as CA to sign user and host certificates [GH-2208] + * secret/ssh: Support reading of SSH CA public key from `config/ca` endpoint + and also return it when CA key pair is generated [GH-2483] + +BUG FIXES: + + * audit: When auditing headers use case-insensitive comparisons [GH-2362] + * auth/aws-ec2: Return role period in seconds and not nanoseconds [GH-2374] + * auth/okta: Fix panic if user had no local groups and/or policies set + [GH-2367] + * command/server: Fix parsing of redirect address when port is not mentioned + [GH-2354] + * physical/postgresql: Fix listing returning incorrect results if there were + multiple levels of children [GH-2393] + +## 0.6.5 (February 7th, 2017) + +FEATURES: + + * **Okta Authentication**: A new Okta authentication backend allows you to use + Okta usernames and passwords to authenticate to Vault. If provided with an + appropriate Okta API token, group membership can be queried to assign + policies; users and groups can be defined locally as well. + * **RADIUS Authentication**: A new RADIUS authentication backend allows using + a RADIUS server to authenticate to Vault. Policies can be configured for + specific users or for any authenticated user. + * **Exportable Transit Keys**: Keys in `transit` can now be marked as + `exportable` at creation time. This allows a properly ACL'd user to retrieve + the associated signing key, encryption key, or HMAC key. The `exportable` + value is returned on a key policy read and cannot be changed, so if a key is + marked `exportable` it will always be exportable, and if it is not it will + never be exportable. + * **Batch Transit Operations**: `encrypt`, `decrypt` and `rewrap` operations + in the transit backend now support processing multiple input items in one + call, returning the output of each item in the response. + * **Configurable Audited HTTP Headers**: You can now specify headers that you + want to have included in each audit entry, along with whether each header + should be HMAC'd or kept plaintext. This can be useful for adding additional + client or network metadata to the audit logs. + * **Transit Backend UI (Enterprise)**: Vault Enterprise UI now supports the transit + backend, allowing creation, viewing and editing of named keys as well as using + those keys to perform supported transit operations directly in the UI. + * **Socket Audit Backend** A new socket audit backend allows audit logs to be sent + through TCP, UDP, or UNIX Sockets. + +IMPROVEMENTS: + + * auth/aws-ec2: Add support for cross-account auth using STS [GH-2148] + * auth/aws-ec2: Support issuing periodic tokens [GH-2324] + * auth/github: Support listing teams and users [GH-2261] + * auth/ldap: Support adding policies to local users directly, in addition to + local groups [GH-2152] + * command/server: Add ability to select and prefer server cipher suites + [GH-2293] + * core: Add a nonce to unseal operations as a check (useful mostly for + support, not as a security principle) [GH-2276] + * duo: Added ability to supply extra context to Duo pushes [GH-2118] + * physical/consul: Add option for setting consistency mode on Consul gets + [GH-2282] + * physical/etcd: Full v3 API support; code will autodetect which API version + to use. The v3 code path is significantly less complicated and may be much + more stable. [GH-2168] + * secret/pki: Allow specifying OU entries in generated certificate subjects + [GH-2251] + * secret mount ui (Enterprise): the secret mount list now shows all mounted + backends even if the UI cannot browse them. Additional backends can now be + mounted from the UI as well. + +BUG FIXES: + + * auth/token: Fix regression in 0.6.4 where using token store roles as a + blacklist (with only `disallowed_policies` set) would not work in most + circumstances [GH-2286] + * physical/s3: Page responses in client so list doesn't truncate [GH-2224] + * secret/cassandra: Stop a connection leak that could occur on active node + failover [GH-2313] + * secret/pki: When using `sign-verbatim`, don't require a role and use the + CSR's common name [GH-2243] + +## 0.6.4 (December 16, 2016) + +SECURITY: + +Further details about these security issues can be found in the 0.6.4 upgrade +guide. + + * `default` Policy Privilege Escalation: If a parent token did not have the + `default` policy attached to its token, it could still create children with + the `default` policy. This is no longer allowed (unless the parent has + `sudo` capability for the creation path). In most cases this is low severity + since the access grants in the `default` policy are meant to be access + grants that are acceptable for all tokens to have. + * Leases Not Expired When Limited Use Token Runs Out of Uses: When using + limited-use tokens to create leased secrets, if the limited-use token was + revoked due to running out of uses (rather than due to TTL expiration or + explicit revocation) it would fail to revoke the leased secrets. These + secrets would still be revoked when their TTL expired, limiting the severity + of this issue. An endpoint has been added (`auth/token/tidy`) that can + perform housekeeping tasks on the token store; one of its tasks can detect + this situation and revoke the associated leases. + +FEATURES: + + * **Policy UI (Enterprise)**: Vault Enterprise UI now supports viewing, + creating, and editing policies. + +IMPROVEMENTS: + + * http: Vault now sets a `no-store` cache control header to make it more + secure in setups that are not end-to-end encrypted [GH-2183] + +BUG FIXES: + + * auth/ldap: Don't panic if dialing returns an error and starttls is enabled; + instead, return the error [GH-2188] + * ui (Enterprise): Submitting an unseal key now properly resets the + form so a browser refresh isn't required to continue. + +## 0.6.3 (December 6, 2016) + +DEPRECATIONS/CHANGES: + + * Request size limitation: A maximum request size of 32MB is imposed to + prevent a denial of service attack with arbitrarily large requests [GH-2108] + * LDAP denies passwordless binds by default: In new LDAP mounts, or when + existing LDAP mounts are rewritten, passwordless binds will be denied by + default. The new `deny_null_bind` parameter can be set to `false` to allow + these. [GH-2103] + * Any audit backend activated satisfies conditions: Previously, when a new + Vault node was taking over service in an HA cluster, all audit backends were + required to be loaded successfully to take over active duty. This behavior + now matches the behavior of the audit logging system itself: at least one + audit backend must successfully be loaded. The server log contains an error + when this occurs. This helps keep a Vault HA cluster working when there is a + misconfiguration on a standby node. [GH-2083] + +FEATURES: + + * **Web UI (Enterprise)**: Vault Enterprise now contains a built-in web UI + that offers access to a number of features, including init/unsealing/sealing, + authentication via userpass or LDAP, and K/V reading/writing. The capability + set of the UI will be expanding rapidly in further releases. To enable it, + set `ui = true` in the top level of Vault's configuration file and point a + web browser at your Vault address. + * **Google Cloud Storage Physical Backend**: You can now use GCS for storing + Vault data [GH-2099] + +IMPROVEMENTS: + + * auth/github: Policies can now be assigned to users as well as to teams + [GH-2079] + * cli: Set the number of retries on 500 down to 0 by default (no retrying). It + can be very confusing to users when there is a pause while the retries + happen if they haven't explicitly set it. With request forwarding the need + for this is lessened anyways. [GH-2093] + * core: Response wrapping is now allowed to be specified by backend responses + (requires backends gaining support) [GH-2088] + * physical/consul: When announcing service, use the scheme of the Vault server + rather than the Consul client [GH-2146] + * secret/consul: Added listing functionality to roles [GH-2065] + * secret/postgresql: Added `revocation_sql` parameter on the role endpoint to + enable customization of user revocation SQL statements [GH-2033] + * secret/transit: Add listing of keys [GH-1987] + +BUG FIXES: + + * api/unwrap, command/unwrap: Increase compatibility of `unwrap` command with + Vault 0.6.1 and older [GH-2014] + * api/unwrap, command/unwrap: Fix error when no client token exists [GH-2077] + * auth/approle: Creating the index for the role_id properly [GH-2004] + * auth/aws-ec2: Handle the case of multiple upgrade attempts when setting the + instance-profile ARN [GH-2035] + * auth/ldap: Avoid leaking connections on login [GH-2130] + * command/path-help: Use the actual error generated by Vault rather than + always using 500 when there is a path help error [GH-2153] + * command/ssh: Use temporary file for identity and ensure its deletion before + the command returns [GH-2016] + * cli: Fix error printing values with `-field` if the values contained + formatting directives [GH-2109] + * command/server: Don't say mlock is supported on OSX when it isn't. [GH-2120] + * core: Fix bug where a failure to come up as active node (e.g. if an audit + backend failed) could lead to deadlock [GH-2083] + * physical/mysql: Fix potential crash during setup due to a query failure + [GH-2105] + * secret/consul: Fix panic on user error [GH-2145] + +## 0.6.2 (October 5, 2016) + +DEPRECATIONS/CHANGES: + + * Convergent Encryption v2: New keys in `transit` using convergent mode will + use a new nonce derivation mechanism rather than require the user to supply + a nonce. While not explicitly increasing security, it minimizes the + likelihood that a user will use the mode improperly and impact the security + of their keys. Keys in convergent mode that were created in v0.6.1 will + continue to work with the same mechanism (user-supplied nonce). + * `etcd` HA off by default: Following in the footsteps of `dynamodb`, the + `etcd` storage backend now requires that `ha_enabled` be explicitly + specified in the configuration file. The backend currently has known broken + HA behavior, so this flag discourages use by default without explicitly + enabling it. If you are using this functionality, when upgrading, you should + set `ha_enabled` to `"true"` *before* starting the new versions of Vault. + * Default/Max lease/token TTLs are now 32 days: In previous versions of Vault + the default was 30 days, but moving it to 32 days allows some operations + (e.g. reauthenticating, renewing, etc.) to be performed via a monthly cron + job. + * AppRole Secret ID endpoints changed: Secret ID and Secret ID accessors are + no longer part of request URLs. The GET and DELETE operations are now moved + to new endpoints (`/lookup` and `/destroy`) which consumes the input from + the body and not the URL. + * AppRole requires at least one constraint: previously it was sufficient to + turn off all AppRole authentication constraints (secret ID, CIDR block) and + use the role ID only. It is now required that at least one additional + constraint is enabled. Existing roles are unaffected, but any new roles or + updated roles will require this. + * Reading wrapped responses from `cubbyhole/response` is deprecated. The + `sys/wrapping/unwrap` endpoint should be used instead as it provides + additional security, auditing, and other benefits. The ability to read + directly will be removed in a future release. + * Request Forwarding is now on by default: in 0.6.1 this required toggling on, + but is now enabled by default. This can be disabled via the + `"disable_clustering"` parameter in Vault's + [config](https://www.vaultproject.io/docs/config/index.html), or per-request + with the `X-Vault-No-Request-Forwarding` header. + * In prior versions a bug caused the `bound_iam_role_arn` value in the + `aws-ec2` authentication backend to actually use the instance profile ARN. + This has been corrected, but as a result there is a behavior change. To + match using the instance profile ARN, a new parameter + `bound_iam_instance_profile_arn` has been added. Existing roles will + automatically transfer the value over to the correct parameter, but the next + time the role is updated, the new meanings will take effect. + +FEATURES: + + * **Secret ID CIDR Restrictions in `AppRole`**: Secret IDs generated under an + approle can now specify a list of CIDR blocks from where the requests to + generate secret IDs should originate from. If an approle already has CIDR + restrictions specified, the CIDR restrictions on the secret ID should be a + subset of those specified on the role [GH-1910] + * **Initial Root Token PGP Encryption**: Similar to `generate-root`, the root + token created at initialization time can now be PGP encrypted [GH-1883] + * **Support Chained Intermediate CAs in `pki`**: The `pki` backend now allows, + when a CA cert is being supplied as a signed root or intermediate, a trust + chain of arbitrary length. The chain is returned as a parameter at + certificate issue/sign time and is retrievable independently as well. + [GH-1694] + * **Response Wrapping Enhancements**: There are new endpoints to look up + response wrapped token parameters; wrap arbitrary values; rotate wrapping + tokens; and unwrap with enhanced validation. In addition, list operations + can now be response-wrapped. [GH-1927] + * **Transit Features**: The `transit` backend now supports generating random + bytes and SHA sums; HMACs; and signing and verification functionality using + EC keys (P-256 curve) + +IMPROVEMENTS: + + * api: Return error when an invalid (as opposed to incorrect) unseal key is + submitted, rather than ignoring it [GH-1782] + * api: Add method to call `auth/token/create-orphan` endpoint [GH-1834] + * api: Rekey operation now redirects from standbys to master [GH-1862] + * audit/file: Sending a `SIGHUP` to Vault now causes Vault to close and + re-open the log file, making it easier to rotate audit logs [GH-1953] + * auth/aws-ec2: EC2 instances can get authenticated by presenting the identity + document and its SHA256 RSA digest [GH-1961] + * auth/aws-ec2: IAM bound parameters on the aws-ec2 backend will perform a + prefix match instead of exact match [GH-1943] + * auth/aws-ec2: Added a new constraint `bound_iam_instance_profile_arn` to + refer to IAM instance profile ARN and fixed the earlier `bound_iam_role_arn` + to refer to IAM role ARN instead of the instance profile ARN [GH-1913] + * auth/aws-ec2: Backend generates the nonce by default and clients can + explicitly disable reauthentication by setting empty nonce [GH-1889] + * auth/token: Added warnings if tokens and accessors are used in URLs [GH-1806] + * command/format: The `format` flag on select CLI commands takes `yml` as an + alias for `yaml` [GH-1899] + * core: Allow the size of the read cache to be set via the config file, and + change the default value to 1MB (from 32KB) [GH-1784] + * core: Allow single and two-character path parameters for most places + [GH-1811] + * core: Allow list operations to be response-wrapped [GH-1814] + * core: Provide better protection against timing attacks in Shamir code + [GH-1877] + * core: Unmounting/disabling backends no longer returns an error if the mount + didn't exist. This is line with elsewhere in Vault's API where `DELETE` is + an idempotent operation. [GH-1903] + * credential/approle: At least one constraint is required to be enabled while + creating and updating a role [GH-1882] + * secret/cassandra: Added consistency level for use with roles [GH-1931] + * secret/mysql: SQL for revoking user can be configured on the role [GH-1914] + * secret/transit: Use HKDF (RFC 5869) as the key derivation function for new + keys [GH-1812] + * secret/transit: Empty plaintext values are now allowed [GH-1874] + +BUG FIXES: + + * audit: Fix panic being caused by some values logging as underlying Go types + instead of formatted strings [GH-1912] + * auth/approle: Fixed panic on deleting approle that doesn't exist [GH-1920] + * auth/approle: Not letting secret IDs and secret ID accessors to get logged + in plaintext in audit logs [GH-1947] + * auth/aws-ec2: Allow authentication if the underlying host is in a bad state + but the instance is running [GH-1884] + * auth/token: Fixed metadata getting missed out from token lookup response by + gracefully handling token entry upgrade [GH-1924] + * cli: Don't error on newline in token file [GH-1774] + * core: Pass back content-type header for forwarded requests [GH-1791] + * core: Fix panic if the same key was given twice to `generate-root` [GH-1827] + * core: Fix potential deadlock on unmount/remount [GH-1793] + * physical/file: Remove empty directories from the `file` storage backend [GH-1821] + * physical/zookeeper: Remove empty directories from the `zookeeper` storage + backend and add a fix to the `file` storage backend's logic [GH-1964] + * secret/aws: Added update operation to `aws/sts` path to consider `ttl` + parameter [39b75c6] + * secret/aws: Mark STS secrets as non-renewable [GH-1804] + * secret/cassandra: Properly store session for re-use [GH-1802] + * secret/ssh: Fix panic when revoking SSH dynamic keys [GH-1781] + +## 0.6.1 (August 22, 2016) + +DEPRECATIONS/CHANGES: + + * Once the active node is 0.6.1, standby nodes must also be 0.6.1 in order to + connect to the HA cluster. We recommend following our [general upgrade + instructions](https://www.vaultproject.io/docs/install/upgrade.html) in + addition to 0.6.1-specific upgrade instructions to ensure that this is not + an issue. + * Status codes for sealed/uninitialized Vaults have changed to `503`/`501` + respectively. See the [version-specific upgrade + guide](https://www.vaultproject.io/docs/install/upgrade-to-0.6.1.html) for + more details. + * Root tokens (tokens with the `root` policy) can no longer be created except + by another root token or the `generate-root` endpoint. + * Issued certificates from the `pki` backend against new roles created or + modified after upgrading will contain a set of default key usages. + * The `dynamodb` physical data store no longer supports HA by default. It has + some non-ideal behavior around failover that was causing confusion. See the + [documentation](https://www.vaultproject.io/docs/config/index.html#ha_enabled) + for information on enabling HA mode. It is very important that this + configuration is added _before upgrading_. + * The `ldap` backend no longer searches for `memberOf` groups as part of its + normal flow. Instead, the desired group filter must be specified. This fixes + some errors and increases speed for directories with different structures, + but if this behavior has been relied upon, ensure that you see the upgrade + notes _before upgrading_. + * `app-id` is now deprecated with the addition of the new AppRole backend. + There are no plans to remove it, but we encourage using AppRole whenever + possible, as it offers enhanced functionality and can accommodate many more + types of authentication paradigms. + +FEATURES: + + * **AppRole Authentication Backend**: The `approle` backend is a + machine-oriented authentication backend that provides a similar concept to + App-ID while adding many missing features, including a pull model that + allows for the backend to generate authentication credentials rather than + requiring operators or other systems to push credentials in. It should be + useful in many more situations than App-ID. The inclusion of this backend + deprecates App-ID. [GH-1426] + * **Request Forwarding**: Vault servers can now forward requests to each other + rather than redirecting clients. This feature is off by default in 0.6.1 but + will be on by default in the next release. See the [HA concepts + page](https://www.vaultproject.io/docs/concepts/ha.html) for information on + enabling and configuring it. [GH-443] + * **Convergent Encryption in `Transit`**: The `transit` backend now supports a + convergent encryption mode where the same plaintext will produce the same + ciphertext. Although very useful in some situations, this has potential + security implications, which are mostly mitigated by requiring the use of + key derivation when convergent encryption is enabled. See [the `transit` + backend + documentation](https://www.vaultproject.io/docs/secrets/transit/index.html) + for more details. [GH-1537] + * **Improved LDAP Group Filters**: The `ldap` auth backend now uses templates + to define group filters, providing the capability to support some + directories that could not easily be supported before (especially specific + Active Directory setups with nested groups). [GH-1388] + * **Key Usage Control in `PKI`**: Issued certificates from roles created or + modified after upgrading contain a set of default key usages for increased + compatibility with OpenVPN and some other software. This set can be changed + when writing a role definition. Existing roles are unaffected. [GH-1552] + * **Request Retrying in the CLI and Go API**: Requests that fail with a `5xx` + error code will now retry after a backoff. The maximum total number of + retries (including disabling this functionality) can be set with an + environment variable. See the [environment variable + documentation](https://www.vaultproject.io/docs/commands/environment.html) + for more details. [GH-1594] + * **Service Discovery in `vault init`**: The new `-auto` option on `vault init` + will perform service discovery using Consul. When only one node is discovered, + it will be initialized and when more than one node is discovered, they will + be output for easy selection. See `vault init --help` for more details. [GH-1642] + * **MongoDB Secret Backend**: Generate dynamic unique MongoDB database + credentials based on configured roles. Sponsored by + [CommerceHub](http://www.commercehub.com/). [GH-1414] + * **Circonus Metrics Integration**: Vault can now send metrics to + [Circonus](http://www.circonus.com/). See the [configuration + documentation](https://www.vaultproject.io/docs/config/index.html) for + details. [GH-1646] + +IMPROVEMENTS: + + * audit: Added a unique identifier to each request which will also be found in + the request portion of the response. [GH-1650] + * auth/aws-ec2: Added a new constraint `bound_account_id` to the role + [GH-1523] + * auth/aws-ec2: Added a new constraint `bound_iam_role_arn` to the role + [GH-1522] + * auth/aws-ec2: Added `ttl` field for the role [GH-1703] + * auth/ldap, secret/cassandra, physical/consul: Clients with `tls.Config` + have the minimum TLS version set to 1.2 by default. This is configurable. + * auth/token: Added endpoint to list accessors [GH-1676] + * auth/token: Added `disallowed_policies` option to token store roles [GH-1681] + * auth/token: `root` or `sudo` tokens can now create periodic tokens via + `auth/token/create`; additionally, the same token can now be periodic and + have an explicit max TTL [GH-1725] + * build: Add support for building on Solaris/Illumos [GH-1726] + * cli: Output formatting in the presence of warnings in the response object + [GH-1533] + * cli: `vault auth` command supports a `-path` option to take in the path at + which the auth backend is enabled, thereby allowing authenticating against + different paths using the command options [GH-1532] + * cli: `vault auth -methods` will now display the config settings of the mount + [GH-1531] + * cli: `vault read/write/unwrap -field` now allows selecting token response + fields [GH-1567] + * cli: `vault write -field` now allows selecting wrapped response fields + [GH-1567] + * command/status: Version information and cluster details added to the output + of `vault status` command [GH-1671] + * core: Response wrapping is now enabled for login endpoints [GH-1588] + * core: The duration of leadership is now exported via events through + telemetry [GH-1625] + * core: `sys/capabilities-self` is now accessible as part of the `default` + policy [GH-1695] + * core: `sys/renew` is now accessible as part of the `default` policy [GH-1701] + * core: Unseal keys will now be returned in both hex and base64 forms, and + either can be used [GH-1734] + * core: Responses from most `/sys` endpoints now return normal `api.Secret` + structs in addition to the values they carried before. This means that + response wrapping can now be used with most authenticated `/sys` operations + [GH-1699] + * physical/etcd: Support `ETCD_ADDR` env var for specifying addresses [GH-1576] + * physical/consul: Allowing additional tags to be added to Consul service + registration via `service_tags` option [GH-1643] + * secret/aws: Listing of roles is supported now [GH-1546] + * secret/cassandra: Add `connect_timeout` value for Cassandra connection + configuration [GH-1581] + * secret/mssql,mysql,postgresql: Reading of connection settings is supported + in all the sql backends [GH-1515] + * secret/mysql: Added optional maximum idle connections value to MySQL + connection configuration [GH-1635] + * secret/mysql: Use a combination of the role name and token display name in + generated user names and allow the length to be controlled [GH-1604] + * secret/{cassandra,mssql,mysql,postgresql}: SQL statements can now be passed + in via one of four ways: a semicolon-delimited string, a base64-delimited + string, a serialized JSON string array, or a base64-encoded serialized JSON + string array [GH-1686] + * secret/ssh: Added `allowed_roles` to vault-ssh-helper's config and returning + role name as part of response of `verify` API + * secret/ssh: Added passthrough of command line arguments to `ssh` [GH-1680] + * sys/health: Added version information to the response of health status + endpoint [GH-1647] + * sys/health: Cluster information isbe returned as part of health status when + Vault is unsealed [GH-1671] + * sys/mounts: MountTable data is compressed before serializing to accommodate + thousands of mounts [GH-1693] + * website: The [token + concepts](https://www.vaultproject.io/docs/concepts/tokens.html) page has + been completely rewritten [GH-1725] + +BUG FIXES: + + * auth/aws-ec2: Added a nil check for stored whitelist identity object + during renewal [GH-1542] + * auth/cert: Fix panic if no client certificate is supplied [GH-1637] + * auth/token: Don't report that a non-expiring root token is renewable, as + attempting to renew it results in an error [GH-1692] + * cli: Don't retry a command when a redirection is received [GH-1724] + * core: Fix regression causing status codes to be `400` in most non-5xx error + cases [GH-1553] + * core: Fix panic that could occur during a leadership transition [GH-1627] + * physical/postgres: Remove use of prepared statements as this causes + connection multiplexing software to break [GH-1548] + * physical/consul: Multiple Vault nodes on the same machine leading to check ID + collisions were resulting in incorrect health check responses [GH-1628] + * physical/consul: Fix deregistration of health checks on exit [GH-1678] + * secret/postgresql: Check for existence of role before attempting deletion + [GH-1575] + * secret/postgresql: Handle revoking roles that have privileges on sequences + [GH-1573] + * secret/postgresql(,mysql,mssql): Fix incorrect use of database over + transaction object which could lead to connection exhaustion [GH-1572] + * secret/pki: Fix parsing CA bundle containing trailing whitespace [GH-1634] + * secret/pki: Fix adding email addresses as SANs [GH-1688] + * secret/pki: Ensure that CRL values are always UTC, per RFC [GH-1727] + * sys/seal-status: Fixed nil Cluster object while checking seal status [GH-1715] + +## 0.6.0 (June 14th, 2016) + +SECURITY: + + * Although `sys/revoke-prefix` was intended to revoke prefixes of secrets (via + lease IDs, which incorporate path information) and + `auth/token/revoke-prefix` was intended to revoke prefixes of tokens (using + the tokens' paths and, since 0.5.2, role information), in implementation + they both behaved exactly the same way since a single component in Vault is + responsible for managing lifetimes of both, and the type of the tracked + lifetime was not being checked. The end result was that either endpoint + could revoke both secret leases and tokens. We consider this a very minor + security issue as there are a number of mitigating factors: both endpoints + require `sudo` capability in addition to write capability, preventing + blanket ACL path globs from providing access; both work by using the prefix + to revoke as a part of the endpoint path, allowing them to be properly + ACL'd; and both are intended for emergency scenarios and users should + already not generally have access to either one. In order to prevent + confusion, we have simply removed `auth/token/revoke-prefix` in 0.6, and + `sys/revoke-prefix` will be meant for both leases and tokens instead. + +DEPRECATIONS/CHANGES: + + * `auth/token/revoke-prefix` has been removed. See the security notice for + details. [GH-1280] + * Vault will now automatically register itself as the `vault` service when + using the `consul` backend and will perform its own health checks. See + the Consul backend documentation for information on how to disable + auto-registration and service checks. + * List operations that do not find any keys now return a `404` status code + rather than an empty response object [GH-1365] + * CA certificates issued from the `pki` backend no longer have associated + leases, and any CA certs already issued will ignore revocation requests from + the lease manager. This is to prevent CA certificates from being revoked + when the token used to issue the certificate expires; it was not be obvious + to users that they need to ensure that the token lifetime needed to be at + least as long as a potentially very long-lived CA cert. + +FEATURES: + + * **AWS EC2 Auth Backend**: Provides a secure introduction mechanism for AWS + EC2 instances allowing automated retrieval of Vault tokens. Unlike most + Vault authentication backends, this backend does not require first deploying + or provisioning security-sensitive credentials (tokens, username/password, + client certificates, etc). Instead, it treats AWS as a Trusted Third Party + and uses the cryptographically signed dynamic metadata information that + uniquely represents each EC2 instance. [Vault + Enterprise](https://www.hashicorp.com/vault.html) customers have access to a + turnkey client that speaks the backend API and makes access to a Vault token + easy. + * **Response Wrapping**: Nearly any response within Vault can now be wrapped + inside a single-use, time-limited token's cubbyhole, taking the [Cubbyhole + Authentication + Principles](https://www.hashicorp.com/blog/vault-cubbyhole-principles.html) + mechanism to its logical conclusion. Retrieving the original response is as + simple as a single API command or the new `vault unwrap` command. This makes + secret distribution easier and more secure, including secure introduction. + * **Azure Physical Backend**: You can now use Azure blob object storage as + your Vault physical data store [GH-1266] + * **Swift Physical Backend**: You can now use Swift blob object storage as + your Vault physical data store [GH-1425] + * **Consul Backend Health Checks**: The Consul backend will automatically + register a `vault` service and perform its own health checking. By default + the active node can be found at `active.vault.service.consul` and all with + standby nodes are `standby.vault.service.consul`. Sealed vaults are marked + critical and are not listed by default in Consul's service discovery. See + the documentation for details. [GH-1349] + * **Explicit Maximum Token TTLs**: You can now set explicit maximum TTLs on + tokens that do not honor changes in the system- or mount-set values. This is + useful, for instance, when the max TTL of the system or the `auth/token` + mount must be set high to accommodate certain needs but you want more + granular restrictions on tokens being issued directly from the Token + authentication backend at `auth/token`. [GH-1399] + * **Non-Renewable Tokens**: When creating tokens directly through the token + authentication backend, you can now specify in both token store roles and + the API whether or not a token should be renewable, defaulting to `true`. + * **RabbitMQ Secret Backend**: Vault can now generate credentials for + RabbitMQ. Vhosts and tags can be defined within roles. [GH-788] + +IMPROVEMENTS: + + * audit: Add the DisplayName value to the copy of the Request object embedded + in the associated Response, to match the original Request object [GH-1387] + * audit: Enable auditing of the `seal` and `step-down` commands [GH-1435] + * backends: Remove most `root`/`sudo` paths in favor of normal ACL mechanisms. + A particular exception are any current MFA paths. A few paths in `token` and + `sys` also require `root` or `sudo`. [GH-1478] + * command/auth: Restore the previous authenticated token if the `auth` command + fails to authenticate the provided token [GH-1233] + * command/write: `-format` and `-field` can now be used with the `write` + command [GH-1228] + * core: Add `mlock` support for FreeBSD, OpenBSD, and Darwin [GH-1297] + * core: Don't keep lease timers around when tokens are revoked [GH-1277] + * core: If using the `disable_cache` option, caches for the policy store and + the `transit` backend are now disabled as well [GH-1346] + * credential/cert: Renewal requests are rejected if the set of policies has + changed since the token was issued [GH-477] + * credential/cert: Check CRLs for specific non-CA certs configured in the + backend [GH-1404] + * credential/ldap: If `groupdn` is not configured, skip searching LDAP and + only return policies for local groups, plus a warning [GH-1283] + * credential/ldap: `vault list` support for users and groups [GH-1270] + * credential/ldap: Support for the `memberOf` attribute for group membership + searching [GH-1245] + * credential/userpass: Add list support for users [GH-911] + * credential/userpass: Remove user configuration paths from requiring sudo, in + favor of normal ACL mechanisms [GH-1312] + * credential/token: Sanitize policies and add `default` policies in appropriate + places [GH-1235] + * credential/token: Setting the renewable status of a token is now possible + via `vault token-create` and the API. The default is true, but tokens can be + specified as non-renewable. [GH-1499] + * secret/aws: Use chain credentials to allow environment/EC2 instance/shared + providers [GH-307] + * secret/aws: Support for STS AssumeRole functionality [GH-1318] + * secret/consul: Reading consul access configuration supported. The response + will contain non-sensitive information only [GH-1445] + * secret/pki: Added `exclude_cn_from_sans` field to prevent adding the CN to + DNS or Email Subject Alternate Names [GH-1220] + * secret/pki: Added list support for certificates [GH-1466] + * sys/capabilities: Enforce ACL checks for requests that query the capabilities + of a token on a given path [GH-1221] + * sys/health: Status information can now be retrieved with `HEAD` [GH-1509] + +BUG FIXES: + + * command/read: Fix panic when using `-field` with a non-string value [GH-1308] + * command/token-lookup: Fix TTL showing as 0 depending on how a token was + created. This only affected the value shown at lookup, not the token + behavior itself. [GH-1306] + * command/various: Tell the JSON decoder to not convert all numbers to floats; + fixes some various places where numbers were showing up in scientific + notation + * command/server: Prioritized `devRootTokenID` and `devListenAddress` flags + over their respective env vars [GH-1480] + * command/ssh: Provided option to disable host key checking. The automated + variant of `vault ssh` command uses `sshpass` which was failing to handle + host key checking presented by the `ssh` binary. [GH-1473] + * core: Properly persist mount-tuned TTLs for auth backends [GH-1371] + * core: Don't accidentally crosswire SIGINT to the reload handler [GH-1372] + * credential/github: Make organization comparison case-insensitive during + login [GH-1359] + * credential/github: Fix panic when renewing a token created with some earlier + versions of Vault [GH-1510] + * credential/github: The token used to log in via `vault auth` can now be + specified in the `VAULT_AUTH_GITHUB_TOKEN` environment variable [GH-1511] + * credential/ldap: Fix problem where certain error conditions when configuring + or opening LDAP connections would cause a panic instead of return a useful + error message [GH-1262] + * credential/token: Fall back to normal parent-token semantics if + `allowed_policies` is empty for a role. Using `allowed_policies` of + `default` resulted in the same behavior anyways. [GH-1276] + * credential/token: Fix issues renewing tokens when using the "suffix" + capability of token roles [GH-1331] + * credential/token: Fix lookup via POST showing the request token instead of + the desired token [GH-1354] + * credential/various: Fix renewal conditions when `default` policy is not + contained in the backend config [GH-1256] + * physical/s3: Don't panic in certain error cases from bad S3 responses [GH-1353] + * secret/consul: Use non-pooled Consul API client to avoid leaving files open + [GH-1428] + * secret/pki: Don't check whether a certificate is destined to be a CA + certificate if sign-verbatim endpoint is used [GH-1250] + +## 0.5.3 (May 27th, 2016) + +SECURITY: + + * Consul ACL Token Revocation: An issue was reported to us indicating that + generated Consul ACL tokens were not being properly revoked. Upon + investigation, we found that this behavior was reproducible in a specific + scenario: when a generated lease for a Consul ACL token had been renewed + prior to revocation. In this case, the generated token was not being + properly persisted internally through the renewal function, leading to an + error during revocation due to the missing token. Unfortunately, this was + coded as a user error rather than an internal error, and the revocation + logic was expecting internal errors if revocation failed. As a result, the + revocation logic believed the revocation to have succeeded when it in fact + failed, causing the lease to be dropped while the token was still valid + within Consul. In this release, the Consul backend properly persists the + token through renewals, and the revocation logic has been changed to + consider any error type to have been a failure to revoke, causing the lease + to persist and attempt to be revoked later. + +We have written an example shell script that searches through Consul's ACL +tokens and looks for those generated by Vault, which can be used as a template +for a revocation script as deemed necessary for any particular security +response. The script is available at +https://gist.github.com/jefferai/6233c2963f9407a858d84f9c27d725c0 + +Please note that any outstanding leases for Consul tokens produced prior to +0.5.3 that have been renewed will continue to exhibit this behavior. As a +result, we recommend either revoking all tokens produced by the backend and +issuing new ones, or if needed, a more advanced variant of the provided example +could use the timestamp embedded in each generated token's name to decide which +tokens are too old and should be deleted. This could then be run periodically +up until the maximum lease time for any outstanding pre-0.5.3 tokens has +expired. + +This is a security-only release. There are no other code changes since 0.5.2. +The binaries have one additional change: they are built against Go 1.6.1 rather +than Go 1.6, as Go 1.6.1 contains two security fixes to the Go programming +language itself. + +## 0.5.2 (March 16th, 2016) + +FEATURES: + + * **MSSQL Backend**: Generate dynamic unique MSSQL database credentials based + on configured roles [GH-998] + * **Token Accessors**: Vault now provides an accessor with each issued token. + This accessor is an identifier that can be used for a limited set of + actions, notably for token revocation. This value can be logged in + plaintext to audit logs, and in combination with the plaintext metadata + logged to audit logs, provides a searchable and straightforward way to + revoke particular users' or services' tokens in many cases. To enable + plaintext audit logging of these accessors, set `hmac_accessor=false` when + enabling an audit backend. + * **Token Credential Backend Roles**: Roles can now be created in the `token` + credential backend that allow modifying token behavior in ways that are not + otherwise exposed or easily delegated. This allows creating tokens with a + fixed set (or subset) of policies (rather than a subset of the calling + token's), periodic tokens with a fixed TTL but no expiration, specified + prefixes, and orphans. + * **Listener Certificate Reloading**: Vault's configured listeners now reload + their TLS certificate and private key when the Vault process receives a + SIGHUP. + +IMPROVEMENTS: + + * auth/token: Endpoints optionally accept tokens from the HTTP body rather + than just from the URLs [GH-1211] + * auth/token,sys/capabilities: Added new endpoints + `auth/token/lookup-accessor`, `auth/token/revoke-accessor` and + `sys/capabilities-accessor`, which enables performing the respective actions + with just the accessor of the tokens, without having access to the actual + token [GH-1188] + * core: Ignore leading `/` in policy paths [GH-1170] + * core: Ignore leading `/` in mount paths [GH-1172] + * command/policy-write: Provided HCL is now validated for format violations + and provides helpful information around where the violation occurred + [GH-1200] + * command/server: The initial root token ID when running in `-dev` mode can + now be specified via `-dev-root-token-id` or the environment variable + `VAULT_DEV_ROOT_TOKEN_ID` [GH-1162] + * command/server: The listen address when running in `-dev` mode can now be + specified via `-dev-listen-address` or the environment variable + `VAULT_DEV_LISTEN_ADDRESS` [GH-1169] + * command/server: The configured listeners now reload their TLS + certificates/keys when Vault is SIGHUP'd [GH-1196] + * command/step-down: New `vault step-down` command and API endpoint to force + the targeted node to give up active status, but without sealing. The node + will wait ten seconds before attempting to grab the lock again. [GH-1146] + * command/token-renew: Allow no token to be passed in; use `renew-self` in + this case. Change the behavior for any token being passed in to use `renew`. + [GH-1150] + * credential/app-id: Allow `app-id` parameter to be given in the login path; + this causes the `app-id` to be part of the token path, making it easier to + use with `revoke-prefix` [GH-424] + * credential/cert: Non-CA certificates can be used for authentication. They + must be matched exactly (issuer and serial number) for authentication, and + the certificate must carry the client authentication or 'any' extended usage + attributes. [GH-1153] + * credential/cert: Subject and Authority key IDs are output in metadata; this + allows more flexible searching/revocation in the audit logs [GH-1183] + * credential/cert: Support listing configured certs [GH-1212] + * credential/userpass: Add support for `create`/`update` capability + distinction in user path, and add user-specific endpoints to allow changing + the password and policies [GH-1216] + * credential/token: Add roles [GH-1155] + * secret/mssql: Add MSSQL backend [GH-998] + * secret/pki: Add revocation time (zero or Unix epoch) to `pki/cert/SERIAL` + endpoint [GH-1180] + * secret/pki: Sanitize serial number in `pki/revoke` endpoint to allow some + other formats [GH-1187] + * secret/ssh: Added documentation for `ssh/config/zeroaddress` endpoint. + [GH-1154] + * sys: Added new endpoints `sys/capabilities` and `sys/capabilities-self` to + fetch the capabilities of a token on a given path [GH-1171] + * sys: Added `sys/revoke-force`, which enables a user to ignore backend errors + when revoking a lease, necessary in some emergency/failure scenarios + [GH-1168] + * sys: The return codes from `sys/health` can now be user-specified via query + parameters [GH-1199] + +BUG FIXES: + + * logical/cassandra: Apply hyphen/underscore replacement to the entire + generated username, not just the UUID, in order to handle token display name + hyphens [GH-1140] + * physical/etcd: Output actual error when cluster sync fails [GH-1141] + * vault/expiration: Not letting the error responses from the backends to skip + during renewals [GH-1176] + +## 0.5.1 (February 25th, 2016) + +DEPRECATIONS/CHANGES: + + * RSA keys less than 2048 bits are no longer supported in the PKI backend. + 1024-bit keys are considered unsafe and are disallowed in the Internet PKI. + The `pki` backend has enforced SHA256 hashes in signatures from the + beginning, and software that can handle these hashes should be able to + handle larger key sizes. [GH-1095] + * The PKI backend now does not automatically delete expired certificates, + including from the CRL. Doing so could lead to a situation where a time + mismatch between the Vault server and clients could result in a certificate + that would not be considered expired by a client being removed from the CRL. + The new `pki/tidy` endpoint can be used to trigger expirations. [GH-1129] + * The `cert` backend now performs a variant of channel binding at renewal time + for increased security. In order to not overly burden clients, a notion of + identity is used. This functionality can be disabled. See the 0.5.1 upgrade + guide for more specific information [GH-1127] + +FEATURES: + + * **Codebase Audit**: Vault's 0.5 codebase was audited by iSEC. (The terms of + the audit contract do not allow us to make the results public.) [GH-220] + +IMPROVEMENTS: + + * api: The `VAULT_TLS_SERVER_NAME` environment variable can be used to control + the SNI header during TLS connections [GH-1131] + * api/health: Add the server's time in UTC to health responses [GH-1117] + * command/rekey and command/generate-root: These now return the status at + attempt initialization time, rather than requiring a separate fetch for the + nonce [GH-1054] + * credential/cert: Don't require root/sudo tokens for the `certs/` and `crls/` + paths; use normal ACL behavior instead [GH-468] + * credential/github: The validity of the token used for login will be checked + at renewal time [GH-1047] + * credential/github: The `config` endpoint no longer requires a root token; + normal ACL path matching applies + * deps: Use the standardized Go 1.6 vendoring system + * secret/aws: Inform users of AWS-imposed policy restrictions around STS + tokens if they attempt to use an invalid policy [GH-1113] + * secret/mysql: The MySQL backend now allows disabling verification of the + `connection_url` [GH-1096] + * secret/pki: Submitted CSRs are now verified to have the correct key type and + minimum number of bits according to the role. The exception is intermediate + CA signing and the `sign-verbatim` path [GH-1104] + * secret/pki: New `tidy` endpoint to allow expunging expired certificates. + [GH-1129] + * secret/postgresql: The PostgreSQL backend now allows disabling verification + of the `connection_url` [GH-1096] + * secret/ssh: When verifying an OTP, return 400 if it is not valid instead of + 204 [GH-1086] + * credential/app-id: App ID backend will check the validity of app-id and user-id + during renewal time [GH-1039] + * credential/cert: TLS Certificates backend, during renewal, will now match the + client identity with the client identity used during login [GH-1127] + +BUG FIXES: + + * credential/ldap: Properly escape values being provided to search filters + [GH-1100] + * secret/aws: Capping on length of usernames for both IAM and STS types + [GH-1102] + * secret/pki: If a cert is not found during lookup of a serial number, + respond with a 400 rather than a 500 [GH-1085] + * secret/postgresql: Add extra revocation statements to better handle more + permission scenarios [GH-1053] + * secret/postgresql: Make connection_url work properly [GH-1112] + +## 0.5.0 (February 10, 2016) + +SECURITY: + + * Previous versions of Vault could allow a malicious user to hijack the rekey + operation by canceling an operation in progress and starting a new one. The + practical application of this is very small. If the user was an unseal key + owner, they could attempt to do this in order to either receive unencrypted + reseal keys or to replace the PGP keys used for encryption with ones under + their control. However, since this would invalidate any rekey progress, they + would need other unseal key holders to resubmit, which would be rather + suspicious during this manual operation if they were not also the original + initiator of the rekey attempt. If the user was not an unseal key holder, + there is no benefit to be gained; the only outcome that could be attempted + would be a denial of service against a legitimate rekey operation by sending + cancel requests over and over. Thanks to Josh Snyder for the report! + +DEPRECATIONS/CHANGES: + + * `s3` physical backend: Environment variables are now preferred over + configuration values. This makes it behave similar to the rest of Vault, + which, in increasing order of preference, uses values from the configuration + file, environment variables, and CLI flags. [GH-871] + * `etcd` physical backend: `sync` functionality is now supported and turned on + by default. This can be disabled. [GH-921] + * `transit`: If a client attempts to encrypt a value with a key that does not + yet exist, what happens now depends on the capabilities set in the client's + ACL policies. If the client has `create` (or `create` and `update`) + capability, the key will upsert as in the past. If the client has `update` + capability, they will receive an error. [GH-1012] + * `token-renew` CLI command: If the token given for renewal is the same as the + client token, the `renew-self` endpoint will be used in the API. Given that + the `default` policy (by default) allows all clients access to the + `renew-self` endpoint, this makes it much more likely that the intended + operation will be successful. [GH-894] + * Token `lookup`: the `ttl` value in the response now reflects the actual + remaining TTL rather than the original TTL specified when the token was + created; this value is now located in `creation_ttl` [GH-986] + * Vault no longer uses grace periods on leases or token TTLs. Uncertainty + about the length grace period for any given backend could cause confusion + and uncertainty. [GH-1002] + * `rekey`: Rekey now requires a nonce to be supplied with key shares. This + nonce is generated at the start of a rekey attempt and is unique for that + attempt. + * `status`: The exit code for the `status` CLI command is now `2` for an + uninitialized Vault instead of `1`. `1` is returned for errors. This better + matches the rest of the CLI. + +FEATURES: + + * **Split Data/High Availability Physical Backends**: You can now configure + two separate physical backends: one to be used for High Availability + coordination and another to be used for encrypted data storage. See the + [configuration + documentation](https://vaultproject.io/docs/config/index.html) for details. + [GH-395] + * **Fine-Grained Access Control**: Policies can now use the `capabilities` set + to specify fine-grained control over operations allowed on a path, including + separation of `sudo` privileges from other privileges. These can be mixed + and matched in any way desired. The `policy` value is kept for backwards + compatibility. See the [updated policy + documentation](https://vaultproject.io/docs/concepts/policies.html) for + details. [GH-914] + * **List Support**: Listing is now supported via the API and the new `vault + list` command. This currently supports listing keys in the `generic` and + `cubbyhole` backends and a few other places (noted in the IMPROVEMENTS + section below). Different parts of the API and backends will need to + implement list capabilities in ways that make sense to particular endpoints, + so further support will appear over time. [GH-617] + * **Root Token Generation via Unseal Keys**: You can now use the + `generate-root` CLI command to generate new orphaned, non-expiring root + tokens in case the original is lost or revoked (accidentally or + purposefully). This requires a quorum of unseal key holders. The output + value is protected via any PGP key of the initiator's choosing or a one-time + pad known only to the initiator (a suitable pad can be generated via the + `-genotp` flag to the command. [GH-915] + * **Unseal Key Archiving**: You can now optionally have Vault store your + unseal keys in your chosen physical store for disaster recovery purposes. + This option is only available when the keys are encrypted with PGP. [GH-907] + * **Keybase Support for PGP Encryption Keys**: You can now specify Keybase + users when passing in PGP keys to the `init`, `rekey`, and `generate-root` + CLI commands. Public keys for these users will be fetched automatically. + [GH-901] + * **DynamoDB HA Physical Backend**: There is now a new, community-supported + HA-enabled physical backend using Amazon DynamoDB. See the [configuration + documentation](https://vaultproject.io/docs/config/index.html) for details. + [GH-878] + * **PostgreSQL Physical Backend**: There is now a new, community-supported + physical backend using PostgreSQL. See the [configuration + documentation](https://vaultproject.io/docs/config/index.html) for details. + [GH-945] + * **STS Support in AWS Secret Backend**: You can now use the AWS secret + backend to fetch STS tokens rather than IAM users. [GH-927] + * **Speedups in the transit backend**: The `transit` backend has gained a + cache, and now loads only the working set of keys (e.g. from the + `min_decryption_version` to the current key version) into its working set. + This provides large speedups and potential memory savings when the `rotate` + feature of the backend is used heavily. + +IMPROVEMENTS: + + * cli: Output secrets sorted by key name [GH-830] + * cli: Support YAML as an output format [GH-832] + * cli: Show an error if the output format is incorrect, rather than falling + back to an empty table [GH-849] + * cli: Allow setting the `advertise_addr` for HA via the + `VAULT_ADVERTISE_ADDR` environment variable [GH-581] + * cli/generate-root: Add generate-root and associated functionality [GH-915] + * cli/init: Add `-check` flag that returns whether Vault is initialized + [GH-949] + * cli/server: Use internal functions for the token-helper rather than shelling + out, which fixes some problems with using a static binary in Docker or paths + with multiple spaces when launching in `-dev` mode [GH-850] + * cli/token-lookup: Add token-lookup command [GH-892] + * command/{init,rekey}: Allow ASCII-armored keychain files to be arguments for + `-pgp-keys` [GH-940] + * conf: Use normal bool values rather than empty/non-empty for the + `tls_disable` option [GH-802] + * credential/ldap: Add support for binding, both anonymously (to discover a + user DN) and via a username and password [GH-975] + * credential/token: Add `last_renewal_time` to token lookup calls [GH-896] + * credential/token: Change `ttl` to reflect the current remaining TTL; the + original value is in `creation_ttl` [GH-1007] + * helper/certutil: Add ability to parse PKCS#8 bundles [GH-829] + * logical/aws: You can now get STS tokens instead of IAM users [GH-927] + * logical/cassandra: Add `protocol_version` parameter to set the CQL proto + version [GH-1005] + * logical/cubbyhole: Add cubbyhole access to default policy [GH-936] + * logical/mysql: Add list support for roles path [GH-984] + * logical/pki: Fix up key usages being specified for CAs [GH-989] + * logical/pki: Add list support for roles path [GH-985] + * logical/pki: Allow `pem_bundle` to be specified as the format, which + provides a concatenated PEM bundle of returned values [GH-1008] + * logical/pki: Add 30 seconds of slack to the validity start period to + accommodate some clock skew in machines [GH-1036] + * logical/postgres: Add `max_idle_connections` parameter [GH-950] + * logical/postgres: Add list support for roles path + * logical/ssh: Add list support for roles path [GH-983] + * logical/transit: Keys are archived and only keys between the latest version + and `min_decryption_version` are loaded into the working set. This can + provide a very large speed increase when rotating keys very often. [GH-977] + * logical/transit: Keys are now cached, which should provide a large speedup + in most cases [GH-979] + * physical/cache: Use 2Q cache instead of straight LRU [GH-908] + * physical/etcd: Support basic auth [GH-859] + * physical/etcd: Support sync functionality and enable by default [GH-921] + +BUG FIXES: + + * api: Correct the HTTP verb used in the LookupSelf method [GH-887] + * api: Fix the output of `Sys().MountConfig(...)` to return proper values + [GH-1017] + * command/read: Fix panic when an empty argument was given [GH-923] + * command/ssh: Fix panic when username lookup fails [GH-886] + * core: When running in standalone mode, don't advertise that we are active + until post-unseal setup completes [GH-872] + * core: Update go-cleanhttp dependency to ensure idle connections aren't + leaked [GH-867] + * core: Don't allow tokens to have duplicate policies [GH-897] + * core: Fix regression in `sys/renew` that caused information stored in the + Secret part of the response to be lost [GH-912] + * physical: Use square brackets when setting an IPv6-based advertise address + as the auto-detected advertise address [GH-883] + * physical/s3: Use an initialized client when using IAM roles to fix a + regression introduced against newer versions of the AWS Go SDK [GH-836] + * secret/pki: Fix a condition where unmounting could fail if the CA + certificate was not properly loaded [GH-946] + * secret/ssh: Fix a problem where SSH connections were not always closed + properly [GH-942] + +MISC: + + * Clarified our stance on support for community-derived physical backends. + See the [configuration + documentation](https://vaultproject.io/docs/config/index.html) for details. + * Add `vault-java` to libraries [GH-851] + * Various minor documentation fixes and improvements [GH-839] [GH-854] + [GH-861] [GH-876] [GH-899] [GH-900] [GH-904] [GH-923] [GH-924] [GH-958] + [GH-959] [GH-981] [GH-990] [GH-1024] [GH-1025] + +BUILD NOTE: + + * The HashiCorp-provided binary release of Vault 0.5.0 is built against a + patched version of Go 1.5.3 containing two specific bug fixes affecting TLS + certificate handling. These fixes are in the Go 1.6 tree and were + cherry-picked on top of stock Go 1.5.3. If you want to examine the way in + which the releases were built, please look at our [cross-compilation + Dockerfile](https://github.com/hashicorp/vault/blob/v0.5.0/scripts/cross/Dockerfile-patched-1.5.3). + +## 0.4.1 (January 13, 2016) + +SECURITY: + + * Build against Go 1.5.3 to mitigate a security vulnerability introduced in + Go 1.5. For more information, please see + https://groups.google.com/forum/#!topic/golang-dev/MEATuOi_ei4 + +This is a security-only release; other than the version number and building +against Go 1.5.3, there are no changes from 0.4.0. + +## 0.4.0 (December 10, 2015) + +DEPRECATIONS/CHANGES: + + * Policy Name Casing: Policy names are now normalized to lower-case on write, + helping prevent accidental case mismatches. For backwards compatibility, + policy names are not currently normalized when reading or deleting. [GH-676] + * Default etcd port number: the default connection string for the `etcd` + physical store uses port 2379 instead of port 4001, which is the port used + by the supported version 2.x of etcd. [GH-753] + * As noted below in the FEATURES section, if your Vault installation contains + a policy called `default`, new tokens created will inherit this policy + automatically. + * In the PKI backend there have been a few minor breaking changes: + * The token display name is no longer a valid option for providing a base + domain for issuance. Since this name is prepended with the name of the + authentication backend that issued it, it provided a faulty use-case at best + and a confusing experience at worst. We hope to figure out a better + per-token value in a future release. + * The `allowed_base_domain` parameter has been changed to `allowed_domains`, + which accepts a comma-separated list of domains. This allows issuing + certificates with DNS subjects across multiple domains. If you had a + configured `allowed_base_domain` parameter, it will be migrated + automatically when the role is read (either via a normal read, or via + issuing a certificate). + +FEATURES: + + * **Significantly Enhanced PKI Backend**: The `pki` backend can now generate + and sign root CA certificates and intermediate CA CSRs. It can also now sign + submitted client CSRs, as well as a significant number of other + enhancements. See the updated documentation for the full API. [GH-666] + * **CRL Checking for Certificate Authentication**: The `cert` backend now + supports pushing CRLs into the mount and using the contained serial numbers + for revocation checking. See the documentation for the `cert` backend for + more info. [GH-330] + * **Default Policy**: Vault now ensures that a policy named `default` is added + to every token. This policy cannot be deleted, but it can be modified + (including to an empty policy). There are three endpoints allowed in the + default `default` policy, related to token self-management: `lookup-self`, + which allows a token to retrieve its own information, and `revoke-self` and + `renew-self`, which are self-explanatory. If your existing Vault + installation contains a policy called `default`, it will not be overridden, + but it will be added to each new token created. You can override this + behavior when using manual token creation (i.e. not via an authentication + backend) by setting the "no_default_policy" flag to true. [GH-732] + +IMPROVEMENTS: + + * api: API client now uses a 60 second timeout instead of indefinite [GH-681] + * api: Implement LookupSelf, RenewSelf, and RevokeSelf functions for auth + tokens [GH-739] + * api: Standardize environment variable reading logic inside the API; the CLI + now uses this but can still override via command-line parameters [GH-618] + * audit: HMAC-SHA256'd client tokens are now stored with each request entry. + Previously they were only displayed at creation time; this allows much + better traceability of client actions. [GH-713] + * audit: There is now a `sys/audit-hash` endpoint that can be used to generate + an HMAC-SHA256'd value from provided data using the given audit backend's + salt [GH-784] + * core: The physical storage read cache can now be disabled via + "disable_cache" [GH-674] + * core: The unsealing process can now be reset midway through (this feature + was documented before, but not enabled) [GH-695] + * core: Tokens can now renew themselves [GH-455] + * core: Base64-encoded PGP keys can be used with the CLI for `init` and + `rekey` operations [GH-653] + * core: Print version on startup [GH-765] + * core: Access to `sys/policy` and `sys/mounts` now uses the normal ACL system + instead of requiring a root token [GH-769] + * credential/token: Display whether or not a token is an orphan in the output + of a lookup call [GH-766] + * logical: Allow `.` in path-based variables in many more locations [GH-244] + * logical: Responses now contain a "warnings" key containing a list of + warnings returned from the server. These are conditions that did not require + failing an operation, but of which the client should be aware. [GH-676] + * physical/(consul,etcd): Consul and etcd now use a connection pool to limit + the number of outstanding operations, improving behavior when a lot of + operations must happen at once [GH-677] [GH-780] + * physical/consul: The `datacenter` parameter was removed; It could not be + effective unless the Vault node (or the Consul node it was connecting to) + was in the datacenter specified, in which case it wasn't needed [GH-816] + * physical/etcd: Support TLS-encrypted connections and use a connection pool + to limit the number of outstanding operations [GH-780] + * physical/s3: The S3 endpoint can now be configured, allowing using + S3-API-compatible storage solutions [GH-750] + * physical/s3: The S3 bucket can now be configured with the `AWS_S3_BUCKET` + environment variable [GH-758] + * secret/consul: Management tokens can now be created [GH-714] + +BUG FIXES: + + * api: API client now checks for a 301 response for redirects. Vault doesn't + generate these, but in certain conditions Go's internal HTTP handler can + generate them, leading to client errors. + * cli: `token-create` now supports the `ttl` parameter in addition to the + deprecated `lease` parameter. [GH-688] + * core: Return data from `generic` backends on the last use of a limited-use + token [GH-615] + * core: Fix upgrade path for leases created in `generic` prior to 0.3 [GH-673] + * core: Stale leader entries will now be reaped [GH-679] + * core: Using `mount-tune` on the auth/token path did not take effect. + [GH-688] + * core: Fix a potential race condition when (un)sealing the vault with metrics + enabled [GH-694] + * core: Fix an error that could happen in some failure scenarios where Vault + could fail to revert to a clean state [GH-733] + * core: Ensure secondary indexes are removed when a lease is expired [GH-749] + * core: Ensure rollback manager uses an up-to-date mounts table [GH-771] + * everywhere: Don't use http.DefaultClient, as it shares state implicitly and + is a source of hard-to-track-down bugs [GH-700] + * credential/token: Allow creating orphan tokens via an API path [GH-748] + * secret/generic: Validate given duration at write time, not just read time; + if stored durations are not parseable, return a warning and the default + duration rather than an error [GH-718] + * secret/generic: Return 400 instead of 500 when `generic` backend is written + to with no data fields [GH-825] + * secret/postgresql: Revoke permissions before dropping a user or revocation + may fail [GH-699] + +MISC: + + * Various documentation fixes and improvements [GH-685] [GH-688] [GH-697] + [GH-710] [GH-715] [GH-831] + +## 0.3.1 (October 6, 2015) + +SECURITY: + + * core: In certain failure scenarios, the full values of requests and + responses would be logged [GH-665] + +FEATURES: + + * **Settable Maximum Open Connections**: The `mysql` and `postgresql` backends + now allow setting the number of maximum open connections to the database, + which was previously capped to 2. [GH-661] + * **Renewable Tokens for GitHub**: The `github` backend now supports + specifying a TTL, enabling renewable tokens. [GH-664] + +BUG FIXES: + + * dist: linux-amd64 distribution was dynamically linked [GH-656] + * credential/github: Fix acceptance tests [GH-651] + +MISC: + + * Various minor documentation fixes and improvements [GH-649] [GH-650] + [GH-654] [GH-663] + +## 0.3.0 (September 28, 2015) + +DEPRECATIONS/CHANGES: + +Note: deprecations and breaking changes in upcoming releases are announced +ahead of time on the "vault-tool" mailing list. + + * **Cookie Authentication Removed**: As of 0.3 the only way to authenticate is + via the X-Vault-Token header. Cookie authentication was hard to properly + test, could result in browsers/tools/applications saving tokens in plaintext + on disk, and other issues. [GH-564] + * **Terminology/Field Names**: Vault is transitioning from overloading the + term "lease" to mean both "a set of metadata" and "the amount of time the + metadata is valid". The latter is now being referred to as TTL (or + "lease_duration" for backwards-compatibility); some parts of Vault have + already switched to using "ttl" and others will follow in upcoming releases. + In particular, the "token", "generic", and "pki" backends accept both "ttl" + and "lease" but in 0.4 only "ttl" will be accepted. [GH-528] + * **Downgrade Not Supported**: Due to enhancements in the storage subsystem, + values written by Vault 0.3+ will not be able to be read by prior versions + of Vault. There are no expected upgrade issues, however, as with all + critical infrastructure it is recommended to back up Vault's physical + storage before upgrading. + +FEATURES: + + * **SSH Backend**: Vault can now be used to delegate SSH access to machines, + via a (recommended) One-Time Password approach or by issuing dynamic keys. + [GH-385] + * **Cubbyhole Backend**: This backend works similarly to the "generic" backend + but provides a per-token workspace. This enables some additional + authentication workflows (especially for containers) and can be useful to + applications to e.g. store local credentials while being restarted or + upgraded, rather than persisting to disk. [GH-612] + * **Transit Backend Improvements**: The transit backend now allows key + rotation and datakey generation. For rotation, data encrypted with previous + versions of the keys can still be decrypted, down to a (configurable) + minimum previous version; there is a rewrap function for manual upgrades of + ciphertext to newer versions. Additionally, the backend now allows + generating and returning high-entropy keys of a configurable bitsize + suitable for AES and other functions; this is returned wrapped by a named + key, or optionally both wrapped and plaintext for immediate use. [GH-626] + * **Global and Per-Mount Default/Max TTL Support**: You can now set the + default and maximum Time To Live for leases both globally and per-mount. + Per-mount settings override global settings. Not all backends honor these + settings yet, but the maximum is a hard limit enforced outside the backend. + See the documentation for "/sys/mounts/" for details on configuring + per-mount TTLs. [GH-469] + * **PGP Encryption for Unseal Keys**: When initializing or rotating Vault's + master key, PGP/GPG public keys can now be provided. The output keys will be + encrypted with the given keys, in order. [GH-570] + * **Duo Multifactor Authentication Support**: Backends that support MFA can + now use Duo as the mechanism. [GH-464] + * **Performance Improvements**: Users of the "generic" backend will see a + significant performance improvement as the backend no longer creates leases, + although it does return TTLs (global/mount default, or set per-item) as + before. [GH-631] + * **Codebase Audit**: Vault's codebase was audited by iSEC. (The terms of the + audit contract do not allow us to make the results public.) [GH-220] + +IMPROVEMENTS: + + * audit: Log entries now contain a time field [GH-495] + * audit: Obfuscated audit entries now use hmac-sha256 instead of sha1 [GH-627] + * backends: Add ability for a cleanup function to be called on backend unmount + [GH-608] + * config: Allow specifying minimum acceptable TLS version [GH-447] + * core: If trying to mount in a location that is already mounted, be more + helpful about the error [GH-510] + * core: Be more explicit on failure if the issue is invalid JSON [GH-553] + * core: Tokens can now revoke themselves [GH-620] + * credential/app-id: Give a more specific error when sending a duplicate POST + to sys/auth/app-id [GH-392] + * credential/github: Support custom API endpoints (e.g. for Github Enterprise) + [GH-572] + * credential/ldap: Add per-user policies and option to login with + userPrincipalName [GH-420] + * credential/token: Allow root tokens to specify the ID of a token being + created from CLI [GH-502] + * credential/userpass: Enable renewals for login tokens [GH-623] + * scripts: Use /usr/bin/env to find Bash instead of hardcoding [GH-446] + * scripts: Use godep for build scripts to use same environment as tests + [GH-404] + * secret/mysql: Allow reading configuration data [GH-529] + * secret/pki: Split "allow_any_name" logic to that and "enforce_hostnames", to + allow for non-hostname values (e.g. for client certificates) [GH-555] + * storage/consul: Allow specifying certificates used to talk to Consul + [GH-384] + * storage/mysql: Allow SSL encrypted connections [GH-439] + * storage/s3: Allow using temporary security credentials [GH-433] + * telemetry: Put telemetry object in configuration to allow more flexibility + [GH-419] + * testing: Disable mlock for testing of logical backends so as not to require + root [GH-479] + +BUG FIXES: + + * audit/file: Do not enable auditing if file permissions are invalid [GH-550] + * backends: Allow hyphens in endpoint patterns (fixes AWS and others) [GH-559] + * cli: Fixed missing setup of client TLS certificates if no custom CA was + provided + * cli/read: Do not include a carriage return when using raw field output + [GH-624] + * core: Bad input data could lead to a panic for that session, rather than + returning an error [GH-503] + * core: Allow SHA2-384/SHA2-512 hashed certificates [GH-448] + * core: Do not return a Secret if there are no uses left on a token (since it + will be unable to be used) [GH-615] + * core: Code paths that called lookup-self would decrement num_uses and + potentially immediately revoke a token [GH-552] + * core: Some /sys/ paths would not properly redirect from a standby to the + leader [GH-499] [GH-551] + * credential/aws: Translate spaces in a token's display name to avoid making + IAM unhappy [GH-567] + * credential/github: Integration failed if more than ten organizations or + teams [GH-489] + * credential/token: Tokens with sudo access to "auth/token/create" can now use + root-only options [GH-629] + * secret/cassandra: Work around backwards-incompatible change made in + Cassandra 2.2 preventing Vault from properly setting/revoking leases + [GH-549] + * secret/mysql: Use varbinary instead of varchar to avoid InnoDB/UTF-8 issues + [GH-522] + * secret/postgres: Explicitly set timezone in connections [GH-597] + * storage/etcd: Renew semaphore periodically to prevent leadership flapping + [GH-606] + * storage/zk: Fix collisions in storage that could lead to data unavailability + [GH-411] + +MISC: + + * Various documentation fixes and improvements [GH-412] [GH-474] [GH-476] + [GH-482] [GH-483] [GH-486] [GH-508] [GH-568] [GH-574] [GH-586] [GH-590] + [GH-591] [GH-592] [GH-595] [GH-613] [GH-637] + * Less "armon" in stack traces [GH-453] + * Sourcegraph integration [GH-456] + +## 0.2.0 (July 13, 2015) + +FEATURES: + + * **Key Rotation Support**: The `rotate` command can be used to rotate the + master encryption key used to write data to the storage (physical) backend. + [GH-277] + * **Rekey Support**: Rekey can be used to rotate the master key and change the + configuration of the unseal keys (number of shares, threshold required). + [GH-277] + * **New secret backend: `pki`**: Enable Vault to be a certificate authority + and generate signed TLS certificates. [GH-310] + * **New secret backend: `cassandra`**: Generate dynamic credentials for + Cassandra [GH-363] + * **New storage backend: `etcd`**: store physical data in etcd [GH-259] + [GH-297] + * **New storage backend: `s3`**: store physical data in S3. Does not support + HA. [GH-242] + * **New storage backend: `MySQL`**: store physical data in MySQL. Does not + support HA. [GH-324] + * `transit` secret backend supports derived keys for per-transaction unique + keys [GH-399] + +IMPROVEMENTS: + + * cli/auth: Enable `cert` method [GH-380] + * cli/auth: read input from stdin [GH-250] + * cli/read: Ability to read a single field from a secret [GH-257] + * cli/write: Adding a force flag when no input required + * core: allow time duration format in place of seconds for some inputs + * core: audit log provides more useful information [GH-360] + * core: graceful shutdown for faster HA failover + * core: **change policy format** to use explicit globbing [GH-400] Any + existing policy in Vault is automatically upgraded to avoid issues. All + policy files must be updated for future writes. Adding the explicit glob + character `*` to the path specification is all that is required. + * core: policy merging to give deny highest precedence [GH-400] + * credential/app-id: Protect against timing attack on app-id + * credential/cert: Record the common name in the metadata [GH-342] + * credential/ldap: Allow TLS verification to be disabled [GH-372] + * credential/ldap: More flexible names allowed [GH-245] [GH-379] [GH-367] + * credential/userpass: Protect against timing attack on password + * credential/userpass: Use bcrypt for password matching + * http: response codes improved to reflect error [GH-366] + * http: the `sys/health` endpoint supports `?standbyok` to return 200 on + standby [GH-389] + * secret/app-id: Support deleting AppID and UserIDs [GH-200] + * secret/consul: Fine grained lease control [GH-261] + * secret/transit: Decouple raw key from key management endpoint [GH-355] + * secret/transit: Upsert named key when encrypt is used [GH-355] + * storage/zk: Support for HA configuration [GH-252] + * storage/zk: Changing node representation. **Backwards incompatible**. + [GH-416] + +BUG FIXES: + + * audit/file: file removing TLS connection state + * audit/syslog: fix removing TLS connection state + * command/*: commands accepting `k=v` allow blank values + * core: Allow building on FreeBSD [GH-365] + * core: Fixed various panics when audit logging enabled + * core: Lease renewal does not create redundant lease + * core: fixed leases with negative duration [GH-354] + * core: token renewal does not create child token + * core: fixing panic when lease increment is null [GH-408] + * credential/app-id: Salt the paths in storage backend to avoid information + leak + * credential/cert: Fixing client certificate not being requested + * credential/cert: Fixing panic when no certificate match found [GH-361] + * http: Accept PUT as POST for sys/auth + * http: Accept PUT as POST for sys/mounts [GH-349] + * http: Return 503 when sealed [GH-225] + * secret/postgres: Username length is capped to exceeding limit + * server: Do not panic if backend not configured [GH-222] + * server: Explicitly check value of tls_diable [GH-201] + * storage/zk: Fixed issues with version conflicts [GH-190] + +MISC: + + * cli/path-help: renamed from `help` to avoid confusion + +## 0.1.2 (May 11, 2015) + +FEATURES: + + * **New physical backend: `zookeeper`**: store physical data in Zookeeper. + HA not supported yet. + * **New credential backend: `ldap`**: authenticate using LDAP credentials. + +IMPROVEMENTS: + + * core: Auth backends can store internal data about auth creds + * audit: display name for auth is shown in logs [GH-176] + * command/*: `-insecure` has been renamed to `-tls-skip-verify` [GH-130] + * command/*: `VAULT_TOKEN` overrides local stored auth [GH-162] + * command/server: environment variables are copy-pastable + * credential/app-id: hash of app and user ID are in metadata [GH-176] + * http: HTTP API accepts `X-Vault-Token` as auth header [GH-124] + * logical/*: Generate help output even if no synopsis specified + +BUG FIXES: + + * core: login endpoints should never return secrets + * core: Internal data should never be returned from core endpoints + * core: defer barrier initialization to as late as possible to avoid error + cases during init that corrupt data (no data loss) + * core: guard against invalid init config earlier + * audit/file: create file if it doesn't exist [GH-148] + * command/*: ignore directories when traversing CA paths [GH-181] + * credential/*: all policy mapping keys are case insensitive [GH-163] + * physical/consul: Fixing path for locking so HA works in every case + +## 0.1.1 (May 2, 2015) + +SECURITY CHANGES: + + * physical/file: create the storge with 0600 permissions [GH-102] + * token/disk: write the token to disk with 0600 perms + +IMPROVEMENTS: + + * core: Very verbose error if mlock fails [GH-59] + * command/*: On error with TLS oversized record, show more human-friendly + error message. [GH-123] + * command/read: `lease_renewable` is now outputted along with the secret to + show whether it is renewable or not + * command/server: Add configuration option to disable mlock + * command/server: Disable mlock for dev mode so it works on more systems + +BUG FIXES: + + * core: if token helper isn't absolute, prepend with path to Vault + executable, not "vault" (which requires PATH) [GH-60] + * core: Any "mapping" routes allow hyphens in keys [GH-119] + * core: Validate `advertise_addr` is a valid URL with scheme [GH-106] + * command/auth: Using an invalid token won't crash [GH-75] + * credential/app-id: app and user IDs can have hyphens in keys [GH-119] + * helper/password: import proper DLL for Windows to ask password [GH-83] + +## 0.1.0 (April 28, 2015) + + * Initial release diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 0000000..e13a6b5 --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,84 @@ +# Each line is a file pattern followed by one or more owners. Being an owner +# means those groups or individuals will be added as reviewers to PRs affecting +# those areas of the code. +# +# More on CODEOWNERS files: https://help.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners + +# Select Auth engines are owned by Ecosystem +/builtin/credential/aws/ @hashicorp/vault-ecosystem +/builtin/credential/github/ @hashicorp/vault-ecosystem +/builtin/credential/ldap/ @hashicorp/vault-ecosystem +/builtin/credential/okta/ @hashicorp/vault-ecosystem + +# Secrets engines (pki, ssh, totp and transit omitted) +/builtin/logical/aws/ @hashicorp/vault-ecosystem +/builtin/logical/cassandra/ @hashicorp/vault-ecosystem +/builtin/logical/consul/ @hashicorp/vault-ecosystem +/builtin/logical/database/ @hashicorp/vault-ecosystem +/builtin/logical/mongodb/ @hashicorp/vault-ecosystem +/builtin/logical/mssql/ @hashicorp/vault-ecosystem +/builtin/logical/mysql/ @hashicorp/vault-ecosystem +/builtin/logical/nomad/ @hashicorp/vault-ecosystem +/builtin/logical/postgresql/ @hashicorp/vault-ecosystem +/builtin/logical/rabbitmq/ @hashicorp/vault-ecosystem + +/plugins/ @hashicorp/vault-ecosystem +/vault/plugin_catalog.go @hashicorp/vault-ecosystem + +/website/content/ @hashicorp/vault-education-approvers +/website/content/docs/plugin-portal.mdx @acahn @hashicorp/vault-education-approvers + +# Plugin docs +/website/content/docs/plugins/ @fairclothjm @hashicorp/vault-education-approvers +/website/content/docs/upgrading/plugins.mdx @fairclothjm @hashicorp/vault-education-approvers + +# UI code related to Vault's JWT/OIDC auth method and OIDC provider. +# Changes to these files often require coordination with backend code, +# so stewards of the backend code are added below for notification. +/ui/app/components/auth-jwt.js @austingebauer +/ui/app/routes/vault/cluster/oidc-*.js @austingebauer + +# Release config; service account is required for automation tooling. +/.release/ @hashicorp/release-engineering @hashicorp/github-secure-vault-core @hashicorp/quality-team +/.github/workflows/build.yml @hashicorp/release-engineering @hashicorp/github-secure-vault-core @hashicorp/quality-team + +# Quality engineering +/.github/ @hashicorp/quality-team +/enos/ @hashicorp/quality-team + +# Cryptosec +/builtin/logical/pki/ @hashicorp/vault-crypto +/builtin/logical/pkiext/ @hashicorp/vault-crypto +/website/content/docs/secrets/pki/ @hashicorp/vault-crypto +/website/content/api-docs/secret/pki.mdx @hashicorp/vault-crypto +/builtin/credential/cert/ @hashicorp/vault-crypto +/website/content/docs/auth/cert.mdx @hashicorp/vault-crypto +/website/content/api-docs/auth/cert.mdx @hashicorp/vault-crypto +/builtin/logical/ssh/ @hashicorp/vault-crypto +/website/content/docs/secrets/ssh/ @hashicorp/vault-crypto +/website/content/api-docs/secret/ssh.mdx @hashicorp/vault-crypto +/builtin/logical/transit/ @hashicorp/vault-crypto +/website/content/docs/secrets/transit/ @hashicorp/vault-crypto +/website/content/api-docs/secret/transit.mdx @hashicorp/vault-crypto +/helper/random/ @hashicorp/vault-crypto +/sdk/helper/certutil/ @hashicorp/vault-crypto +/sdk/helper/cryptoutil/ @hashicorp/vault-crypto +/sdk/helper/kdf/ @hashicorp/vault-crypto +/sdk/helper/keysutil/ @hashicorp/vault-crypto +/sdk/helper/ocsp/ @hashicorp/vault-crypto +/sdk/helper/salt/ @hashicorp/vault-crypto +/sdk/helper/tlsutil/ @hashicorp/vault-crypto +/shamir/ @hashicorp/vault-crypto +/vault/barrier* @hashicorp/vault-crypto +/vault/managed_key* @hashicorp/vault-crypto +/vault/seal* @hashicorp/vault-crypto +/vault/seal/ @hashicorp/vault-crypto +/website/content/docs/configuration/seal/ @hashicorp/vault-crypto +/website/content/docs/enterprise/sealwrap.mdx @hashicorp/vault-crypto +/website/content/api-docs/system/sealwrap-rewrap.mdx @hashicorp/vault-crypto +/website/content/docs/secrets/transform/ @hashicorp/vault-crypto +/website/content/api-docs/secret/transform.mdx @hashicorp/vault-crypto +/website/content/docs/secrets/kmip-profiles.mdx @hashicorp/vault-crypto +/website/content/docs/secrets/kmip.mdx @hashicorp/vault-crypto +/website/content/api-docs/secret/kmip.mdx @hashicorp/vault-crypto +/website/content/docs/enterprise/fips/ @hashicorp/vault-crypto diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..5878017 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,146 @@ +# Contributing to Vault + +**Please note:** We take Vault's security and our users' trust very seriously. +If you believe you have found a security issue in Vault, please responsibly +disclose by contacting us at security@hashicorp.com. + +**First:** if you're unsure or afraid of _anything_, just ask or submit the +issue or pull request anyways. You won't be yelled at for giving it your best +effort. The worst that can happen is that you'll be politely asked to change +something. We appreciate any sort of contributions, and don't want a wall of +rules to get in the way of that. + +That said, if you want to ensure that a pull request is likely to be merged, +talk to us! You can find out our thoughts and ensure that your contribution +won't clash or be obviated by Vault's normal direction. A great way to do this +is via the [Vault Discussion Forum][2]. + +## Issues + +This section will cover what we're looking for in terms of reporting issues. + +By addressing all the points we're looking for, it raises the chances we can +quickly merge or address your contributions. + +### Reporting an Issue + +* Make sure you test against the latest released version. It is possible we + already fixed the bug you're experiencing. Even better is if you can test + against the `main` branch, as the bugs are regularly fixed but new versions + are only released every few months. + +* Provide steps to reproduce the issue, and if possible include the expected + results as well as the actual results. Please provide text, not screen shots! + +* If you are seeing an internal Vault error (a status code of 5xx), please be + sure to post relevant parts of (or the entire) Vault log, as often these + errors are logged on the server but not reported to the user. + +* If you experienced a panic, please create a [gist](https://gist.github.com) + of the *entire* generated crash log for us to look at. Double check + no sensitive items were in the log. + +* Respond as promptly as possible to any questions made by the Vault + team to your issue. + +### Issue Lifecycle + +1. The issue is reported. + +2. The issue is verified and categorized by a Vault collaborator. + Categorization is done via tags. For example, bugs are marked as "bugs". + +3. Unless it is critical, the issue may be left for a period of time (sometimes + many weeks), giving outside contributors -- maybe you!? -- a chance to + address the issue. + +4. The issue is addressed in a pull request or commit. The issue will be + referenced in the commit message so that the code that fixes it is clearly + linked. + +5. The issue is closed. + +6. Issues that are not reproducible and/or not gotten responses for a long time are + stale issues. In order to provide faster responses and better engagement with + the community, we strive to keep the issue tracker clean and the issue count + low. In this regard, our current policy is to close stale issues after 30 days. + Closed issues will still be indexed and available for future viewers. If users + feel that the issue is still relevant, we encourage reopening them. + +## Pull requests + +When submitting a PR you should reference an existing issue. If no issue already exists, +please create one. This can be skipped for trivial PRs like fixing typos. + +Creating an issue in advance of working on the PR can help to avoid duplication of effort, +e.g. maybe we know of existing related work. Or it may be that we can provide guidance +that will help with your approach. + +Your pull request should have a description of what it accomplishes, how it does so, +and why you chose the approach you did. PRs should include unit tests that validate +correctness and the existing tests must pass. Follow-up work to fix tests +does not need a fresh issue filed. + +Someone will do a first pass review on your PR making sure it follows the guidelines +in this document. If it doesn't we'll mark the PR incomplete and ask you to follow +up on the missing requirements. + +### Changelog Entries +Please include a file within your PR named `changelog/#.txt`, where `#` is your +pull request ID. There are many examples under [changelog](changelog/), but +the general format is + +```` +```release-note:CATEGORY +COMPONENT: summary of change +``` +```` + +CATEGORY is one of `security`, `change`, `feature`, `improvement`, or `bug`. +Your PR is almost certain to be one of `bug` or `improvement`, but don't +worry too much about getting it exactly right, we'll tell you if a change is +needed. + +To determine the relevant component, consult [CHANGELOG](CHANGELOG.md) and pick +whichever one you see that seems the closest match. + +You do not need to include the link at the end of the summary that appears in +CHANGELOG.md, those are generated automatically by the changelog-building +process. + +### Vault UI + +How you contribute to the UI depends on what you want to contribute. If that is +a new feature, please submit an informational issue first. That issue +should include a short description of the proposed feature, the use case, +the approach you're taking, and the tests that would be written. A mockup +is optional but encouraged. + +Bug fixes are welcome in PRs but existing tests must pass and updated logic +should be handled in new tests. You needn't submit an issue first to fix bugs. + +Keep in mind that the UI should be consistent with other areas of Vault. +The UI should be user-centered, informative, and include edge cases and errors— +including accommodations for users who may not have permissions to view or +interact with your feature. If you are not comfortable with UI design, a Vault +designer can take a look at your work— just be aware that this might mean +it will add some time to the PR process. + +Finally, in your code, try to avoid logic-heavy templates (when possible, +calculate values in the .js file instead of .hbs) and Ember anti-patterns. +And most of all, if you have any questions, please ask! + +## Setting up Go to work on Vault + +If you have never worked with Go before, you will have to complete the +following steps listed in the README, under the section [Developing Vault][1]. + + +[1]: https://github.com/hashicorp/vault#developing-vault +[2]: https://discuss.hashicorp.com/c/vault + +## Contributor License Agreement + +We require that all contributors sign our Contributor License Agreement ("CLA") before we can accept the contribution. + +[Learn more about why HashiCorp requires a CLA and what the CLA includes](https://www.hashicorp.com/cla) diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..18ad9ef --- /dev/null +++ b/Dockerfile @@ -0,0 +1,158 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +## DOCKERHUB DOCKERFILE ## +FROM alpine:3.18 as default + +ARG BIN_NAME +# NAME and PRODUCT_VERSION are the name of the software in releases.hashicorp.com +# and the version to download. Example: NAME=vault PRODUCT_VERSION=1.2.3. +ARG NAME=vault +ARG PRODUCT_VERSION +ARG PRODUCT_REVISION +# TARGETARCH and TARGETOS are set automatically when --platform is provided. +ARG TARGETOS TARGETARCH + +# Additional metadata labels used by container registries, platforms +# and certification scanners. +LABEL name="Vault" \ + maintainer="Vault Team " \ + vendor="HashiCorp" \ + version=${PRODUCT_VERSION} \ + release=${PRODUCT_REVISION} \ + revision=${PRODUCT_REVISION} \ + summary="Vault is a tool for securely accessing secrets." \ + description="Vault is a tool for securely accessing secrets. A secret is anything that you want to tightly control access to, such as API keys, passwords, certificates, and more. Vault provides a unified interface to any secret, while providing tight access control and recording a detailed audit log." + +COPY LICENSE /licenses/mozilla.txt + +# Set ARGs as ENV so that they can be used in ENTRYPOINT/CMD +ENV NAME=$NAME +ENV VERSION=$VERSION + +# Create a non-root user to run the software. +RUN addgroup ${NAME} && adduser -S -G ${NAME} ${NAME} + +RUN apk add --no-cache libcap su-exec dumb-init tzdata + +COPY dist/$TARGETOS/$TARGETARCH/$BIN_NAME /bin/ + +# /vault/logs is made available to use as a location to store audit logs, if +# desired; /vault/file is made available to use as a location with the file +# storage backend, if desired; the server will be started with /vault/config as +# the configuration directory so you can add additional config files in that +# location. +RUN mkdir -p /vault/logs && \ + mkdir -p /vault/file && \ + mkdir -p /vault/config && \ + chown -R ${NAME}:${NAME} /vault + +# Expose the logs directory as a volume since there's potentially long-running +# state in there +VOLUME /vault/logs + +# Expose the file directory as a volume since there's potentially long-running +# state in there +VOLUME /vault/file + +# 8200/tcp is the primary interface that applications use to interact with +# Vault. +EXPOSE 8200 + +# The entry point script uses dumb-init as the top-level process to reap any +# zombie processes created by Vault sub-processes. +# +# For production derivatives of this container, you shoud add the IPC_LOCK +# capability so that Vault can mlock memory. +COPY .release/docker/docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh +ENTRYPOINT ["docker-entrypoint.sh"] + + +# # By default you'll get a single-node development server that stores everything +# # in RAM and bootstraps itself. Don't use this configuration for production. +CMD ["server", "-dev"] + + +## UBI DOCKERFILE ## +FROM registry.access.redhat.com/ubi8/ubi-minimal:8.8 as ubi + +ARG BIN_NAME +# PRODUCT_VERSION is the version built dist/$TARGETOS/$TARGETARCH/$BIN_NAME, +# which we COPY in later. Example: PRODUCT_VERSION=1.2.3. +ARG PRODUCT_VERSION +ARG PRODUCT_REVISION +# TARGETARCH and TARGETOS are set automatically when --platform is provided. +ARG TARGETOS TARGETARCH + +# Additional metadata labels used by container registries, platforms +# and certification scanners. +LABEL name="Vault" \ + maintainer="Vault Team " \ + vendor="HashiCorp" \ + version=${PRODUCT_VERSION} \ + release=${PRODUCT_REVISION} \ + revision=${PRODUCT_REVISION} \ + summary="Vault is a tool for securely accessing secrets." \ + description="Vault is a tool for securely accessing secrets. A secret is anything that you want to tightly control access to, such as API keys, passwords, certificates, and more. Vault provides a unified interface to any secret, while providing tight access control and recording a detailed audit log." + +COPY LICENSE /licenses/mozilla.txt + +# Set ARGs as ENV so that they can be used in ENTRYPOINT/CMD +ENV NAME=$NAME +ENV VERSION=$VERSION + +# Set up certificates, our base tools, and Vault. Unlike the other version of +# this (https://github.com/hashicorp/docker-vault/blob/master/ubi/Dockerfile), +# we copy in the Vault binary from CRT. +RUN set -eux; \ + microdnf install -y ca-certificates gnupg openssl libcap tzdata procps shadow-utils util-linux + +# Create a non-root user to run the software. +RUN groupadd --gid 1000 vault && \ + adduser --uid 100 --system -g vault vault && \ + usermod -a -G root vault + +# Copy in the new Vault from CRT pipeline, rather than fetching it from our +# public releases. +COPY dist/$TARGETOS/$TARGETARCH/$BIN_NAME /bin/ + +# /vault/logs is made available to use as a location to store audit logs, if +# desired; /vault/file is made available to use as a location with the file +# storage backend, if desired; the server will be started with /vault/config as +# the configuration directory so you can add additional config files in that +# location. +ENV HOME /home/vault +RUN mkdir -p /vault/logs && \ + mkdir -p /vault/file && \ + mkdir -p /vault/config && \ + mkdir -p $HOME && \ + chown -R vault /vault && chown -R vault $HOME && \ + chgrp -R 0 $HOME && chmod -R g+rwX $HOME && \ + chgrp -R 0 /vault && chmod -R g+rwX /vault + +# Expose the logs directory as a volume since there's potentially long-running +# state in there +VOLUME /vault/logs + +# Expose the file directory as a volume since there's potentially long-running +# state in there +VOLUME /vault/file + +# 8200/tcp is the primary interface that applications use to interact with +# Vault. +EXPOSE 8200 + +# The entry point script uses dumb-init as the top-level process to reap any +# zombie processes created by Vault sub-processes. +# +# For production derivatives of this container, you shoud add the IPC_LOCK +# capability so that Vault can mlock memory. +COPY .release/docker/ubi-docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh +ENTRYPOINT ["docker-entrypoint.sh"] + +# Use the Vault user as the default user for starting this container. +USER vault + +# # By default you'll get a single-node development server that stores everything +# # in RAM and bootstraps itself. Don't use this configuration for production. +CMD ["server", "-dev"] diff --git a/HCPV_badge.png b/HCPV_badge.png new file mode 100644 index 0000000..243dc73 Binary files /dev/null and b/HCPV_badge.png differ diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..f4f97ee --- /dev/null +++ b/LICENSE @@ -0,0 +1,365 @@ +Copyright (c) 2015 HashiCorp, Inc. + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..eed53a4 --- /dev/null +++ b/Makefile @@ -0,0 +1,325 @@ +# Determine this makefile's path. +# Be sure to place this BEFORE `include` directives, if any. +THIS_FILE := $(lastword $(MAKEFILE_LIST)) + +TEST?=$$($(GO_CMD) list ./... | grep -v /vendor/ | grep -v /integ) +TEST_TIMEOUT?=45m +EXTENDED_TEST_TIMEOUT=60m +INTEG_TEST_TIMEOUT=120m +VETARGS?=-asmdecl -atomic -bool -buildtags -copylocks -methods -nilfunc -printf -rangeloops -shift -structtags -unsafeptr +EXTERNAL_TOOLS_CI=\ + golang.org/x/tools/cmd/goimports \ + github.com/golangci/revgrep/cmd/revgrep \ + mvdan.cc/gofumpt \ + honnef.co/go/tools/cmd/staticcheck +EXTERNAL_TOOLS=\ + github.com/client9/misspell/cmd/misspell +GOFMT_FILES?=$$(find . -name '*.go' | grep -v pb.go | grep -v vendor) +SED?=$(shell command -v gsed || command -v sed) + +GO_VERSION_MIN=$$(cat $(CURDIR)/.go-version) +PROTOC_VERSION_MIN=3.21.12 +GO_CMD?=go +CGO_ENABLED?=0 +ifneq ($(FDB_ENABLED), ) + CGO_ENABLED=1 + BUILD_TAGS+=foundationdb +endif + +default: dev + +# bin generates the releasable binaries for Vault +bin: prep + @CGO_ENABLED=$(CGO_ENABLED) BUILD_TAGS='$(BUILD_TAGS) ui' sh -c "'$(CURDIR)/scripts/build.sh'" + +# dev creates binaries for testing Vault locally. These are put +# into ./bin/ as well as $GOPATH/bin +dev: BUILD_TAGS+=testonly +dev: prep + @CGO_ENABLED=$(CGO_ENABLED) BUILD_TAGS='$(BUILD_TAGS)' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'" +dev-ui: BUILD_TAGS+=testonly +dev-ui: assetcheck prep + @CGO_ENABLED=$(CGO_ENABLED) BUILD_TAGS='$(BUILD_TAGS) ui' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'" +dev-dynamic: BUILD_TAGS+=testonly +dev-dynamic: prep + @CGO_ENABLED=1 BUILD_TAGS='$(BUILD_TAGS)' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'" + +# *-mem variants will enable memory profiling which will write snapshots of heap usage +# to $TMP/vaultprof every 5 minutes. These can be analyzed using `$ go tool pprof `. +# Note that any build can have profiling added via: `$ BUILD_TAGS=memprofiler make ...` +dev-mem: BUILD_TAGS+=memprofiler +dev-mem: dev +dev-ui-mem: BUILD_TAGS+=memprofiler +dev-ui-mem: assetcheck dev-ui +dev-dynamic-mem: BUILD_TAGS+=memprofiler +dev-dynamic-mem: dev-dynamic + +# Creates a Docker image by adding the compiled linux/amd64 binary found in ./bin. +# The resulting image is tagged "vault:dev". +docker-dev: BUILD_TAGS+=testonly +docker-dev: prep + docker build --build-arg VERSION=$(GO_VERSION_MIN) --build-arg BUILD_TAGS="$(BUILD_TAGS)" -f scripts/docker/Dockerfile -t vault:dev . + +docker-dev-ui: BUILD_TAGS+=testonly +docker-dev-ui: prep + docker build --build-arg VERSION=$(GO_VERSION_MIN) --build-arg BUILD_TAGS="$(BUILD_TAGS)" -f scripts/docker/Dockerfile.ui -t vault:dev-ui . + +# test runs the unit tests and vets the code +test: BUILD_TAGS+=testonly +test: prep + @CGO_ENABLED=$(CGO_ENABLED) \ + VAULT_ADDR= \ + VAULT_TOKEN= \ + VAULT_DEV_ROOT_TOKEN_ID= \ + VAULT_ACC= \ + $(GO_CMD) test -tags='$(BUILD_TAGS)' $(TEST) $(TESTARGS) -timeout=$(TEST_TIMEOUT) -parallel=20 + +testcompile: BUILD_TAGS+=testonly +testcompile: prep + @for pkg in $(TEST) ; do \ + $(GO_CMD) test -v -c -tags='$(BUILD_TAGS)' $$pkg -parallel=4 ; \ + done + +# testacc runs acceptance tests +testacc: BUILD_TAGS+=testonly +testacc: prep + @if [ "$(TEST)" = "./..." ]; then \ + echo "ERROR: Set TEST to a specific package"; \ + exit 1; \ + fi + VAULT_ACC=1 $(GO_CMD) test -tags='$(BUILD_TAGS)' $(TEST) -v $(TESTARGS) -timeout=$(EXTENDED_TEST_TIMEOUT) + +# testrace runs the race checker +testrace: BUILD_TAGS+=testonly +testrace: prep + @CGO_ENABLED=1 \ + VAULT_ADDR= \ + VAULT_TOKEN= \ + VAULT_DEV_ROOT_TOKEN_ID= \ + VAULT_ACC= \ + $(GO_CMD) test -tags='$(BUILD_TAGS)' -race $(TEST) $(TESTARGS) -timeout=$(EXTENDED_TEST_TIMEOUT) -parallel=20 + +cover: + ./scripts/coverage.sh --html + +# vet runs the Go source code static analysis tool `vet` to find +# any common errors. +vet: + @$(GO_CMD) list -f '{{.Dir}}' ./... | grep -v /vendor/ \ + | grep -v '.*github.com/hashicorp/vault$$' \ + | xargs $(GO_CMD) vet ; if [ $$? -eq 1 ]; then \ + echo ""; \ + echo "Vet found suspicious constructs. Please check the reported constructs"; \ + echo "and fix them if necessary before submitting the code for reviewal."; \ + fi + +# deprecations runs staticcheck tool to look for deprecations. Checks entire code to see if it +# has deprecated function, variable, constant or field +deprecations: bootstrap prep + @BUILD_TAGS='$(BUILD_TAGS)' ./scripts/deprecations-checker.sh "" + +# ci-deprecations runs staticcheck tool to look for deprecations. All output gets piped to revgrep +# which will only return an error if changes that is not on main has deprecated function, variable, constant or field +ci-deprecations: ci-bootstrap prep + @BUILD_TAGS='$(BUILD_TAGS)' ./scripts/deprecations-checker.sh main + +tools/codechecker/.bin/codechecker: + @cd tools/codechecker && $(GO_CMD) build -o .bin/codechecker . + +# vet-codechecker runs our custom linters on the test functions. All output gets +# piped to revgrep which will only return an error if new piece of code violates +# the check +vet-codechecker: bootstrap tools/codechecker/.bin/codechecker prep + @$(GO_CMD) vet -vettool=./tools/codechecker/.bin/codechecker -tags=$(BUILD_TAGS) ./... 2>&1 | revgrep + +# vet-codechecker runs our custom linters on the test functions. All output gets +# piped to revgrep which will only return an error if new piece of code that is +# not on main violates the check +ci-vet-codechecker: ci-bootstrap tools/codechecker/.bin/codechecker prep + @$(GO_CMD) vet -vettool=./tools/codechecker/.bin/codechecker -tags=$(BUILD_TAGS) ./... 2>&1 | revgrep origin/main + +# lint runs vet plus a number of other checkers, it is more comprehensive, but louder +lint: + @$(GO_CMD) list -f '{{.Dir}}' ./... | grep -v /vendor/ \ + | xargs golangci-lint run; if [ $$? -eq 1 ]; then \ + echo ""; \ + echo "Lint found suspicious constructs. Please check the reported constructs"; \ + echo "and fix them if necessary before submitting the code for reviewal."; \ + fi +# for ci jobs, runs lint against the changed packages in the commit +ci-lint: + @golangci-lint run --deadline 10m --new-from-rev=HEAD~ + +# prep runs `go generate` to build the dynamically generated +# source files. +# +# n.b.: prep used to depend on fmtcheck, but since fmtcheck is +# now run as a pre-commit hook (and there's little value in +# making every build run the formatter), we've removed that +# dependency. +prep: + @sh -c "'$(CURDIR)/scripts/goversioncheck.sh' '$(GO_VERSION_MIN)'" + @GOARCH= GOOS= $(GO_CMD) generate $$($(GO_CMD) list ./... | grep -v /vendor/) + @if [ -d .git/hooks ]; then cp .hooks/* .git/hooks/; fi + +# bootstrap the build by downloading additional tools needed to build +ci-bootstrap: .ci-bootstrap +.ci-bootstrap: + @for tool in $(EXTERNAL_TOOLS_CI) ; do \ + echo "Installing/Updating $$tool" ; \ + GO111MODULE=off $(GO_CMD) get -u $$tool; \ + done + @touch .ci-bootstrap + +# bootstrap the build by downloading additional tools that may be used by devs +bootstrap: ci-bootstrap + go generate -tags tools tools/tools.go + +# Note: if you have plugins in GOPATH you can update all of them via something like: +# for i in $(ls | grep vault-plugin-); do cd $i; git remote update; git reset --hard origin/master; dep ensure -update; git add .; git commit; git push; cd ..; done +update-plugins: + grep vault-plugin- go.mod | cut -d ' ' -f 1 | while read -r P; do echo "Updating $P..."; go get -v "$P"; done + +static-assets-dir: + @mkdir -p ./http/web_ui + +install-ui-dependencies: + @echo "--> Installing JavaScript assets" + @cd ui && yarn + +test-ember: install-ui-dependencies + @echo "--> Running ember tests" + @cd ui && yarn run test:oss + +test-ember-enos: install-ui-dependencies + @echo "--> Running ember tests with a real backend" + @cd ui && yarn run test:enos + +check-vault-in-path: + @VAULT_BIN=$$(command -v vault) || { echo "vault command not found"; exit 1; }; \ + [ -x "$$VAULT_BIN" ] || { echo "$$VAULT_BIN not executable"; exit 1; }; \ + printf "Using Vault at %s:\n\$$ vault version\n%s\n" "$$VAULT_BIN" "$$(vault version)" + +ember-dist: install-ui-dependencies + @cd ui && npm rebuild node-sass + @echo "--> Building Ember application" + @cd ui && yarn run build + @rm -rf ui/if-you-need-to-delete-this-open-an-issue-async-disk-cache + +ember-dist-dev: install-ui-dependencies + @cd ui && npm rebuild node-sass + @echo "--> Building Ember application" + @cd ui && yarn run build:dev + +static-dist: ember-dist +static-dist-dev: ember-dist-dev + +proto: bootstrap + @sh -c "'$(CURDIR)/scripts/protocversioncheck.sh' '$(PROTOC_VERSION_MIN)'" + protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative vault/*.proto + protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative vault/activity/activity_log.proto + protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative vault/activity/generation/generate_data.proto + protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative helper/storagepacker/types.proto + protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative helper/forwarding/types.proto + protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative sdk/logical/*.proto + protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative physical/raft/types.proto + protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative helper/identity/mfa/types.proto + protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative helper/identity/types.proto + protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative sdk/database/dbplugin/*.proto + protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative sdk/database/dbplugin/v5/proto/*.proto + protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative sdk/plugin/pb/*.proto + protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative vault/tokens/token.proto + protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative sdk/helper/pluginutil/*.proto + protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative vault/hcp_link/proto/*/*.proto + + # No additional sed expressions should be added to this list. Going forward + # we should just use the variable names choosen by protobuf. These are left + # here for backwards compatability, namely for SDK compilation. + $(SED) -i -e 's/Id/ID/' -e 's/SPDX-License-IDentifier/SPDX-License-Identifier/' vault/request_forwarding_service.pb.go + $(SED) -i -e 's/Idp/IDP/' -e 's/Url/URL/' -e 's/Id/ID/' -e 's/IDentity/Identity/' -e 's/EntityId/EntityID/' -e 's/Api/API/' -e 's/Qr/QR/' -e 's/Totp/TOTP/' -e 's/Mfa/MFA/' -e 's/Pingid/PingID/' -e 's/namespaceId/namespaceID/' -e 's/Ttl/TTL/' -e 's/BoundCidrs/BoundCIDRs/' -e 's/SPDX-License-IDentifier/SPDX-License-Identifier/' helper/identity/types.pb.go helper/identity/mfa/types.pb.go helper/storagepacker/types.pb.go sdk/plugin/pb/backend.pb.go sdk/logical/identity.pb.go vault/activity/activity_log.pb.go + + # This will inject the sentinel struct tags as decorated in the proto files. + protoc-go-inject-tag -input=./helper/identity/types.pb.go + protoc-go-inject-tag -input=./helper/identity/mfa/types.pb.go + +fmtcheck: + @sh -c "'$(CURDIR)/scripts/gofmtcheck.sh'" + +fmt: ci-bootstrap + find . -name '*.go' | grep -v pb.go | grep -v vendor | xargs go run mvdan.cc/gofumpt -w + +semgrep: + semgrep --include '*.go' --exclude 'vendor' -a -f tools/semgrep . + +semgrep-ci: + semgrep --error --include '*.go' --exclude 'vendor' -f tools/semgrep/ci . + +assetcheck: + @echo "==> Checking compiled UI assets..." + @sh -c "'$(CURDIR)/scripts/assetcheck.sh'" + +spellcheck: + @echo "==> Spell checking website..." + @misspell -error -source=text website/source + +mysql-database-plugin: + @CGO_ENABLED=0 $(GO_CMD) build -o bin/mysql-database-plugin ./plugins/database/mysql/mysql-database-plugin + +mysql-legacy-database-plugin: + @CGO_ENABLED=0 $(GO_CMD) build -o bin/mysql-legacy-database-plugin ./plugins/database/mysql/mysql-legacy-database-plugin + +cassandra-database-plugin: + @CGO_ENABLED=0 $(GO_CMD) build -o bin/cassandra-database-plugin ./plugins/database/cassandra/cassandra-database-plugin + +influxdb-database-plugin: + @CGO_ENABLED=0 $(GO_CMD) build -o bin/influxdb-database-plugin ./plugins/database/influxdb/influxdb-database-plugin + +postgresql-database-plugin: + @CGO_ENABLED=0 $(GO_CMD) build -o bin/postgresql-database-plugin ./plugins/database/postgresql/postgresql-database-plugin + +mssql-database-plugin: + @CGO_ENABLED=0 $(GO_CMD) build -o bin/mssql-database-plugin ./plugins/database/mssql/mssql-database-plugin + +hana-database-plugin: + @CGO_ENABLED=0 $(GO_CMD) build -o bin/hana-database-plugin ./plugins/database/hana/hana-database-plugin + +mongodb-database-plugin: + @CGO_ENABLED=0 $(GO_CMD) build -o bin/mongodb-database-plugin ./plugins/database/mongodb/mongodb-database-plugin + +.PHONY: bin default prep test vet bootstrap ci-bootstrap fmt fmtcheck mysql-database-plugin mysql-legacy-database-plugin cassandra-database-plugin influxdb-database-plugin postgresql-database-plugin mssql-database-plugin hana-database-plugin mongodb-database-plugin ember-dist ember-dist-dev static-dist static-dist-dev assetcheck check-vault-in-path packages build build-ci semgrep semgrep-ci vet-godoctests ci-vet-godoctests + +.NOTPARALLEL: ember-dist ember-dist-dev + +# These ci targets are used for used for building and testing in Github Actions +# workflows and for Enos scenarios. +.PHONY: ci-build +ci-build: + @$(CURDIR)/scripts/ci-helper.sh build + +.PHONY: ci-build-ui +ci-build-ui: + @$(CURDIR)/scripts/ci-helper.sh build-ui + +.PHONY: ci-bundle +ci-bundle: + @$(CURDIR)/scripts/ci-helper.sh bundle + +.PHONY: ci-get-artifact-basename +ci-get-artifact-basename: + @$(CURDIR)/scripts/ci-helper.sh artifact-basename + +.PHONY: ci-get-date +ci-get-date: + @$(CURDIR)/scripts/ci-helper.sh date + +.PHONY: ci-get-revision +ci-get-revision: + @$(CURDIR)/scripts/ci-helper.sh revision + +.PHONY: ci-get-version-package +ci-get-version-package: + @$(CURDIR)/scripts/ci-helper.sh version-package + +.PHONY: ci-prepare-legal +ci-prepare-legal: + @$(CURDIR)/scripts/ci-helper.sh prepare-legal diff --git a/README.md b/README.md new file mode 100644 index 0000000..6b64066 --- /dev/null +++ b/README.md @@ -0,0 +1,288 @@ +# Vault [![CircleCI](https://circleci.com/gh/hashicorp/vault.svg?style=svg)](https://circleci.com/gh/hashicorp/vault) [![vault enterprise](https://img.shields.io/badge/vault-enterprise-yellow.svg?colorB=7c8797&colorA=000000)](https://www.hashicorp.com/products/vault/?utm_source=github&utm_medium=banner&utm_campaign=github-vault-enterprise) + +---- + +**Please note**: We take Vault's security and our users' trust very seriously. If you believe you have found a security issue in Vault, _please responsibly disclose_ by contacting us at [security@hashicorp.com](mailto:security@hashicorp.com). + +---- + +- Website: https://www.vaultproject.io +- Announcement list: [Google Groups](https://groups.google.com/group/hashicorp-announce) +- Discussion forum: [Discuss](https://discuss.hashicorp.com/c/vault) +- Documentation: [https://www.vaultproject.io/docs/](https://www.vaultproject.io/docs/) +- Tutorials: [HashiCorp's Learn Platform](https://learn.hashicorp.com/vault) +- Certification Exam: [Vault Associate](https://www.hashicorp.com/certification/#hashicorp-certified-vault-associate) + +Vault Logo + +Vault is a tool for securely accessing secrets. A secret is anything that you want to tightly control access to, such as API keys, passwords, certificates, and more. Vault provides a unified interface to any secret, while providing tight access control and recording a detailed audit log. + +A modern system requires access to a multitude of secrets: database credentials, API keys for external services, credentials for service-oriented architecture communication, etc. Understanding who is accessing what secrets is already very difficult and platform-specific. Adding on key rolling, secure storage, and detailed audit logs is almost impossible without a custom solution. This is where Vault steps in. + +The key features of Vault are: + +* **Secure Secret Storage**: Arbitrary key/value secrets can be stored + in Vault. Vault encrypts these secrets prior to writing them to persistent + storage, so gaining access to the raw storage isn't enough to access + your secrets. Vault can write to disk, [Consul](https://www.consul.io), + and more. + +* **Dynamic Secrets**: Vault can generate secrets on-demand for some + systems, such as AWS or SQL databases. For example, when an application + needs to access an S3 bucket, it asks Vault for credentials, and Vault + will generate an AWS keypair with valid permissions on demand. After + creating these dynamic secrets, Vault will also automatically revoke them + after the lease is up. + +* **Data Encryption**: Vault can encrypt and decrypt data without storing + it. This allows security teams to define encryption parameters and + developers to store encrypted data in a location such as a SQL database without + having to design their own encryption methods. + +* **Leasing and Renewal**: All secrets in Vault have a _lease_ associated + with them. At the end of the lease, Vault will automatically revoke that + secret. Clients are able to renew leases via built-in renew APIs. + +* **Revocation**: Vault has built-in support for secret revocation. Vault + can revoke not only single secrets, but a tree of secrets, for example, + all secrets read by a specific user, or all secrets of a particular type. + Revocation assists in key rolling as well as locking down systems in the + case of an intrusion. + +Documentation, Getting Started, and Certification Exams +------------------------------- + +Documentation is available on the [Vault website](https://www.vaultproject.io/docs/). + +If you're new to Vault and want to get started with security automation, please +check out our [Getting Started guides](https://learn.hashicorp.com/collections/vault/getting-started) +on HashiCorp's learning platform. There are also [additional guides](https://learn.hashicorp.com/vault) +to continue your learning. + +For examples of how to interact with Vault from inside your application in different programming languages, see the [vault-examples](https://github.com/hashicorp/vault-examples) repo. An out-of-the-box [sample application](https://github.com/hashicorp/hello-vault-go) is also available. + +Show off your Vault knowledge by passing a certification exam. Visit the +[certification page](https://www.hashicorp.com/certification/#hashicorp-certified-vault-associate) +for information about exams and find [study materials](https://learn.hashicorp.com/collections/vault/certification) +on HashiCorp's learning platform. + +Developing Vault +-------------------- + +If you wish to work on Vault itself or any of its built-in systems, you'll +first need [Go](https://www.golang.org) installed on your machine. + +For local dev first make sure Go is properly installed, including setting up a +[GOPATH](https://golang.org/doc/code.html#GOPATH). Ensure that `$GOPATH/bin` is in +your path as some distributions bundle the old version of build tools. Next, clone this +repository. Vault uses [Go Modules](https://github.com/golang/go/wiki/Modules), +so it is recommended that you clone the repository ***outside*** of the GOPATH. +You can then download any required build tools by bootstrapping your environment: + +```sh +$ make bootstrap +... +``` + +To compile a development version of Vault, run `make` or `make dev`. This will +put the Vault binary in the `bin` and `$GOPATH/bin` folders: + +```sh +$ make dev +... +$ bin/vault +... +``` + +To compile a development version of Vault with the UI, run `make static-dist dev-ui`. This will +put the Vault binary in the `bin` and `$GOPATH/bin` folders: + +```sh +$ make static-dist dev-ui +... +$ bin/vault +... +``` + +To run tests, type `make test`. Note: this requires Docker to be installed. If +this exits with exit status 0, then everything is working! + +```sh +$ make test +... +``` + +If you're developing a specific package, you can run tests for just that +package by specifying the `TEST` variable. For example below, only +`vault` package tests will be run. + +```sh +$ make test TEST=./vault +... +``` + +### Importing Vault + +This repository publishes two libraries that may be imported by other projects: +`github.com/hashicorp/vault/api` and `github.com/hashicorp/vault/sdk`. + +Note that this repository also contains Vault (the product), and as with most Go +projects, Vault uses Go modules to manage its dependencies. The mechanism to do +that is the [go.mod](./go.mod) file. As it happens, the presence of that file +also makes it theoretically possible to import Vault as a dependency into other +projects. Some other projects have made a practice of doing so in order to take +advantage of testing tooling that was developed for testing Vault itself. This +is not, and has never been, a supported way to use the Vault project. We aren't +likely to fix bugs relating to failure to import `github.com/hashicorp/vault` +into your project. + +See also the section "Docker-based tests" below. + +### Acceptance Tests + +Vault has comprehensive [acceptance tests](https://en.wikipedia.org/wiki/Acceptance_testing) +covering most of the features of the secret and auth methods. + +If you're working on a feature of a secret or auth method and want to +verify it is functioning (and also hasn't broken anything else), we recommend +running the acceptance tests. + +**Warning:** The acceptance tests create/destroy/modify *real resources*, which +may incur real costs in some cases. In the presence of a bug, it is technically +possible that broken backends could leave dangling data behind. Therefore, +please run the acceptance tests at your own risk. At the very least, +we recommend running them in their own private account for whatever backend +you're testing. + +To run the acceptance tests, invoke `make testacc`: + +```sh +$ make testacc TEST=./builtin/logical/consul +... +``` + +The `TEST` variable is required, and you should specify the folder where the +backend is. The `TESTARGS` variable is recommended to filter down to a specific +resource to test, since testing all of them at once can sometimes take a very +long time. + +Acceptance tests typically require other environment variables to be set for +things such as access keys. The test itself should error early and tell +you what to set, so it is not documented here. + +For more information on Vault Enterprise features, visit the [Vault Enterprise site](https://www.hashicorp.com/products/vault/?utm_source=github&utm_medium=referral&utm_campaign=github-vault-enterprise). + +### Docker-based Tests + +We have created an experimental new testing mechanism inspired by NewTestCluster. +An example of how to use it: + +```go +import ( + "testing" + "github.com/hashicorp/vault/sdk/helper/testcluster/docker" +) + +func Test_Something_With_Docker(t *testing.T) { + opts := &docker.DockerClusterOptions{ + ImageRepo: "hashicorp/vault", // or "hashicorp/vault-enterprise" + ImageTag: "latest", + } + cluster := docker.NewTestDockerCluster(t, opts) + defer cluster.Cleanup() + + client := cluster.Nodes()[0].APIClient() + _, err := client.Logical().Read("sys/storage/raft/configuration") + if err != nil { + t.Fatal(err) + } +} +``` + +Or for Enterprise: + +```go +import ( + "testing" + "github.com/hashicorp/vault/sdk/helper/testcluster/docker" +) + +func Test_Something_With_Docker(t *testing.T) { + opts := &docker.DockerClusterOptions{ + ImageRepo: "hashicorp/vault-enterprise", + ImageTag: "latest", + VaultLicense: licenseString, // not a path, the actual license bytes + } + cluster := docker.NewTestDockerCluster(t, opts) + defer cluster.Cleanup() +} +``` + +Here is a more realistic example of how we use it in practice. DefaultOptions uses +`hashicorp/vault`:`latest` as the repo and tag, but it also looks at the environment +variable VAULT_BINARY. If populated, it will copy the local file referenced by +VAULT_BINARY into the container. This is useful when testing local changes. + +Instead of setting the VaultLicense option, you can set the VAULT_LICENSE_CI environment +variable, which is better than committing a license to version control. + +Optionally you can set COMMIT_SHA, which will be appended to the image name we +build as a debugging convenience. + +```go +func Test_Custom_Build_With_Docker(t *testing.T) { + opts := docker.DefaultOptions(t) + cluster := docker.NewTestDockerCluster(t, opts) + defer cluster.Cleanup() +} +``` + +There are a variety of helpers in the `github.com/hashicorp/vault/sdk/helper/testcluster` +package, e.g. these tests below will create a pair of 3-node clusters and link them using +PR or DR replication respectively, and fail if the replication state doesn't become healthy +before the passed context expires. + +Again, as written, these depend on having a Vault Enterprise binary locally and the env +var VAULT_BINARY set to point to it, as well as having VAULT_LICENSE_CI set. + +```go +func TestStandardPerfReplication_Docker(t *testing.T) { + opts := docker.DefaultOptions(t) + r, err := docker.NewReplicationSetDocker(t, opts) + if err != nil { + t.Fatal(err) + } + defer r.Cleanup() + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + err = r.StandardPerfReplication(ctx) + if err != nil { + t.Fatal(err) + } +} + +func TestStandardDRReplication_Docker(t *testing.T) { + opts := docker.DefaultOptions(t) + r, err := docker.NewReplicationSetDocker(t, opts) + if err != nil { + t.Fatal(err) + } + defer r.Cleanup() + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + err = r.StandardDRReplication(ctx) + if err != nil { + t.Fatal(err) + } +} +``` + +Finally, here's an example of running an existing OSS docker test with a custom binary: + +```bash +$ GOOS=linux make dev +$ VAULT_BINARY=$(pwd)/bin/vault go test -run 'TestRaft_Configuration_Docker' ./vault/external_tests/raft/raft_binary +ok github.com/hashicorp/vault/vault/external_tests/raft/raft_binary 20.960s +``` diff --git a/api/README.md b/api/README.md new file mode 100644 index 0000000..7230ce7 --- /dev/null +++ b/api/README.md @@ -0,0 +1,9 @@ +Vault API +================= + +This provides the `github.com/hashicorp/vault/api` package which contains code useful for interacting with a Vault server. + +For examples of how to use this module, see the [vault-examples](https://github.com/hashicorp/vault-examples) repo. +For a step-by-step walkthrough on using these client libraries, see the [developer quickstart](https://www.vaultproject.io/docs/get-started/developer-qs). + +[![GoDoc](https://godoc.org/github.com/hashicorp/vault/api?status.png)](https://godoc.org/github.com/hashicorp/vault/api) \ No newline at end of file diff --git a/api/api_test.go b/api/api_test.go new file mode 100644 index 0000000..8bf69e0 --- /dev/null +++ b/api/api_test.go @@ -0,0 +1,28 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "fmt" + "net" + "net/http" + "testing" +) + +// testHTTPServer creates a test HTTP server that handles requests until +// the listener returned is closed. +func testHTTPServer(t *testing.T, handler http.Handler) (*Config, net.Listener) { + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("err: %s", err) + } + + server := &http.Server{Handler: handler} + go server.Serve(ln) + + config := DefaultConfig() + config.Address = fmt.Sprintf("http://%s", ln.Addr()) + + return config, ln +} diff --git a/api/auth.go b/api/auth.go new file mode 100644 index 0000000..c1ef7a7 --- /dev/null +++ b/api/auth.go @@ -0,0 +1,115 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "fmt" +) + +// Auth is used to perform credential backend related operations. +type Auth struct { + c *Client +} + +type AuthMethod interface { + Login(ctx context.Context, client *Client) (*Secret, error) +} + +// Auth is used to return the client for credential-backend API calls. +func (c *Client) Auth() *Auth { + return &Auth{c: c} +} + +// Login sets up the required request body for login requests to the given auth +// method's /login API endpoint, and then performs a write to it. After a +// successful login, this method will automatically set the client's token to +// the login response's ClientToken as well. +// +// The Secret returned is the authentication secret, which if desired can be +// passed as input to the NewLifetimeWatcher method in order to start +// automatically renewing the token. +func (a *Auth) Login(ctx context.Context, authMethod AuthMethod) (*Secret, error) { + if authMethod == nil { + return nil, fmt.Errorf("no auth method provided for login") + } + return a.login(ctx, authMethod) +} + +// MFALogin is a wrapper that helps satisfy Vault's MFA implementation. +// If optional credentials are provided a single-phase login will be attempted +// and the resulting Secret will contain a ClientToken if the authentication is successful. +// The client's token will also be set accordingly. +// +// If no credentials are provided a two-phase MFA login will be assumed and the resulting +// Secret will have a MFARequirement containing the MFARequestID to be used in a follow-up +// call to `sys/mfa/validate` or by passing it to the method (*Auth).MFAValidate. +func (a *Auth) MFALogin(ctx context.Context, authMethod AuthMethod, creds ...string) (*Secret, error) { + if len(creds) > 0 { + a.c.SetMFACreds(creds) + return a.login(ctx, authMethod) + } + + return a.twoPhaseMFALogin(ctx, authMethod) +} + +// MFAValidate validates an MFA request using the appropriate payload and a secret containing +// Auth.MFARequirement, like the one returned by MFALogin when credentials are not provided. +// Upon successful validation the client token will be set accordingly. +// +// The Secret returned is the authentication secret, which if desired can be +// passed as input to the NewLifetimeWatcher method in order to start +// automatically renewing the token. +func (a *Auth) MFAValidate(ctx context.Context, mfaSecret *Secret, payload map[string]interface{}) (*Secret, error) { + if mfaSecret == nil || mfaSecret.Auth == nil || mfaSecret.Auth.MFARequirement == nil { + return nil, fmt.Errorf("secret does not contain MFARequirements") + } + + s, err := a.c.Sys().MFAValidateWithContext(ctx, mfaSecret.Auth.MFARequirement.MFARequestID, payload) + if err != nil { + return nil, err + } + + return a.checkAndSetToken(s) +} + +// login performs the (*AuthMethod).Login() with the configured client and checks that a ClientToken is returned +func (a *Auth) login(ctx context.Context, authMethod AuthMethod) (*Secret, error) { + s, err := authMethod.Login(ctx, a.c) + if err != nil { + return nil, fmt.Errorf("unable to log in to auth method: %w", err) + } + + return a.checkAndSetToken(s) +} + +// twoPhaseMFALogin performs the (*AuthMethod).Login() with the configured client +// and checks that an MFARequirement is returned +func (a *Auth) twoPhaseMFALogin(ctx context.Context, authMethod AuthMethod) (*Secret, error) { + s, err := authMethod.Login(ctx, a.c) + if err != nil { + return nil, fmt.Errorf("unable to log in: %w", err) + } + if s == nil || s.Auth == nil || s.Auth.MFARequirement == nil { + if s != nil { + s.Warnings = append(s.Warnings, "expected secret to contain MFARequirements") + } + return s, fmt.Errorf("assumed two-phase MFA login, returned secret is missing MFARequirements") + } + + return s, nil +} + +func (a *Auth) checkAndSetToken(s *Secret) (*Secret, error) { + if s == nil || s.Auth == nil || s.Auth.ClientToken == "" { + if s != nil { + s.Warnings = append(s.Warnings, "expected secret to contain ClientToken") + } + return s, fmt.Errorf("response did not return ClientToken, client token not set") + } + + a.c.SetToken(s.Auth.ClientToken) + + return s, nil +} diff --git a/api/auth/approle/approle.go b/api/auth/approle/approle.go new file mode 100644 index 0000000..10d26b6 --- /dev/null +++ b/api/auth/approle/approle.go @@ -0,0 +1,208 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package approle + +import ( + "context" + "fmt" + "io" + "os" + "strings" + + "github.com/hashicorp/vault/api" +) + +type AppRoleAuth struct { + mountPath string + roleID string + secretID string + secretIDFile string + secretIDEnv string + unwrap bool +} + +var _ api.AuthMethod = (*AppRoleAuth)(nil) + +// SecretID is a struct that allows you to specify where your application is +// storing the secret ID required for login to the AppRole auth method. The +// recommended secure pattern is to use response-wrapping tokens rather than +// a plaintext value, by passing WithWrappingToken() to NewAppRoleAuth. +// https://learn.hashicorp.com/tutorials/vault/approle-best-practices?in=vault/auth-methods#secretid-delivery-best-practices +type SecretID struct { + // Path on the file system where the secret ID can be found. + FromFile string + // The name of the environment variable containing the application's + // secret ID. + FromEnv string + // The secret ID as a plaintext string value. + FromString string +} + +type LoginOption func(a *AppRoleAuth) error + +const ( + defaultMountPath = "approle" +) + +// NewAppRoleAuth initializes a new AppRole auth method interface to be +// passed as a parameter to the client.Auth().Login method. +// +// For a secret ID, the recommended secure pattern is to unwrap a one-time-use +// response-wrapping token that was placed here by a trusted orchestrator +// (https://learn.hashicorp.com/tutorials/vault/approle-best-practices?in=vault/auth-methods#secretid-delivery-best-practices) +// To indicate that the filepath points to this wrapping token and not just +// a plaintext secret ID, initialize NewAppRoleAuth with the +// WithWrappingToken LoginOption. +// +// Supported options: WithMountPath, WithWrappingToken +func NewAppRoleAuth(roleID string, secretID *SecretID, opts ...LoginOption) (*AppRoleAuth, error) { + if roleID == "" { + return nil, fmt.Errorf("no role ID provided for login") + } + + if secretID == nil { + return nil, fmt.Errorf("no secret ID provided for login") + } + + err := secretID.validate() + if err != nil { + return nil, fmt.Errorf("invalid secret ID: %w", err) + } + + a := &AppRoleAuth{ + mountPath: defaultMountPath, + roleID: roleID, + } + + // secret ID will be read in at login time if it comes from a file or environment variable, in case the underlying value changes + if secretID.FromFile != "" { + a.secretIDFile = secretID.FromFile + } + + if secretID.FromEnv != "" { + a.secretIDEnv = secretID.FromEnv + } + + if secretID.FromString != "" { + a.secretID = secretID.FromString + } + + // Loop through each option + for _, opt := range opts { + // Call the option giving the instantiated + // *AppRoleAuth as the argument + err := opt(a) + if err != nil { + return nil, fmt.Errorf("error with login option: %w", err) + } + } + + // return the modified auth struct instance + return a, nil +} + +func (a *AppRoleAuth) Login(ctx context.Context, client *api.Client) (*api.Secret, error) { + if ctx == nil { + ctx = context.Background() + } + + loginData := map[string]interface{}{ + "role_id": a.roleID, + } + + var secretIDValue string + + switch { + case a.secretIDFile != "": + s, err := a.readSecretIDFromFile() + if err != nil { + return nil, fmt.Errorf("error reading secret ID: %w", err) + } + secretIDValue = s + case a.secretIDEnv != "": + s := os.Getenv(a.secretIDEnv) + if s == "" { + return nil, fmt.Errorf("secret ID was specified with an environment variable %q with an empty value", a.secretIDEnv) + } + secretIDValue = s + default: + secretIDValue = a.secretID + } + + // if the caller indicated that the value was actually a wrapping token, unwrap it first + if a.unwrap { + unwrappedToken, err := client.Logical().UnwrapWithContext(ctx, secretIDValue) + if err != nil { + return nil, fmt.Errorf("unable to unwrap response wrapping token: %w", err) + } + loginData["secret_id"] = unwrappedToken.Data["secret_id"] + } else { + loginData["secret_id"] = secretIDValue + } + + path := fmt.Sprintf("auth/%s/login", a.mountPath) + resp, err := client.Logical().WriteWithContext(ctx, path, loginData) + if err != nil { + return nil, fmt.Errorf("unable to log in with app role auth: %w", err) + } + + return resp, nil +} + +func WithMountPath(mountPath string) LoginOption { + return func(a *AppRoleAuth) error { + a.mountPath = mountPath + return nil + } +} + +func WithWrappingToken() LoginOption { + return func(a *AppRoleAuth) error { + a.unwrap = true + return nil + } +} + +func (a *AppRoleAuth) readSecretIDFromFile() (string, error) { + secretIDFile, err := os.Open(a.secretIDFile) + if err != nil { + return "", fmt.Errorf("unable to open file containing secret ID: %w", err) + } + defer secretIDFile.Close() + + limitedReader := io.LimitReader(secretIDFile, 1000) + secretIDBytes, err := io.ReadAll(limitedReader) + if err != nil { + return "", fmt.Errorf("unable to read secret ID: %w", err) + } + + secretIDValue := strings.TrimSuffix(string(secretIDBytes), "\n") + + return secretIDValue, nil +} + +func (secretID *SecretID) validate() error { + if secretID.FromFile == "" && secretID.FromEnv == "" && secretID.FromString == "" { + return fmt.Errorf("secret ID for AppRole must be provided with a source file, environment variable, or plaintext string") + } + + if secretID.FromFile != "" { + if secretID.FromEnv != "" || secretID.FromString != "" { + return fmt.Errorf("only one source for the secret ID should be specified") + } + } + + if secretID.FromEnv != "" { + if secretID.FromFile != "" || secretID.FromString != "" { + return fmt.Errorf("only one source for the secret ID should be specified") + } + } + + if secretID.FromString != "" { + if secretID.FromFile != "" || secretID.FromEnv != "" { + return fmt.Errorf("only one source for the secret ID should be specified") + } + } + return nil +} diff --git a/api/auth/approle/approle_test.go b/api/auth/approle/approle_test.go new file mode 100644 index 0000000..cdfb4e2 --- /dev/null +++ b/api/auth/approle/approle_test.go @@ -0,0 +1,135 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package approle + +import ( + "context" + "encoding/json" + "fmt" + "net" + "net/http" + "os" + "strings" + "testing" + + "github.com/hashicorp/vault/api" +) + +// testHTTPServer creates a test HTTP server that handles requests until +// the listener returned is closed. +func testHTTPServer( + t *testing.T, handler http.Handler, +) (*api.Config, net.Listener) { + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("err: %s", err) + } + + server := &http.Server{Handler: handler} + go server.Serve(ln) + + config := api.DefaultConfig() + config.Address = fmt.Sprintf("http://%s", ln.Addr()) + + return config, ln +} + +func init() { + os.Setenv("VAULT_TOKEN", "") +} + +func TestLogin(t *testing.T) { + secretIDEnvVar := "APPROLE_SECRET_ID" + allowedRoleID := "my-role-id" + allowedSecretID := "my-secret-id" + + content := []byte(allowedSecretID) + tmpfile, err := os.CreateTemp("", "file-containing-secret-id") + if err != nil { + t.Fatalf("error creating temp file: %v", err) + } + defer os.Remove(tmpfile.Name()) // clean up + err = os.Setenv(secretIDEnvVar, allowedSecretID) + if err != nil { + t.Fatalf("error writing secret ID to env var: %v", err) + } + + if _, err := tmpfile.Write(content); err != nil { + t.Fatalf("error writing to temp file: %v", err) + } + if err := tmpfile.Close(); err != nil { + t.Fatalf("error closing temp file: %v", err) + } + + // a response to return if the correct values were passed to login + authSecret := &api.Secret{ + Auth: &api.SecretAuth{ + ClientToken: "a-client-token", + }, + } + + authBytes, err := json.Marshal(authSecret) + if err != nil { + t.Fatalf("error marshaling json: %v", err) + } + + handler := func(w http.ResponseWriter, req *http.Request) { + payload := make(map[string]interface{}) + err := json.NewDecoder(req.Body).Decode(&payload) + if err != nil { + t.Fatalf("error decoding json: %v", err) + } + if payload["role_id"] == allowedRoleID && payload["secret_id"] == allowedSecretID { + w.Write(authBytes) + } + } + + config, ln := testHTTPServer(t, http.HandlerFunc(handler)) + defer ln.Close() + + config.Address = strings.ReplaceAll(config.Address, "127.0.0.1", "localhost") + client, err := api.NewClient(config) + if err != nil { + t.Fatalf("error initializing Vault client: %v", err) + } + + authFromFile, err := NewAppRoleAuth(allowedRoleID, &SecretID{FromFile: tmpfile.Name()}) + if err != nil { + t.Fatalf("error initializing AppRoleAuth with secret ID file: %v", err) + } + + loginRespFromFile, err := client.Auth().Login(context.TODO(), authFromFile) + if err != nil { + t.Fatalf("error logging in with secret ID from file: %v", err) + } + if loginRespFromFile.Auth == nil || loginRespFromFile.Auth.ClientToken == "" { + t.Fatalf("no authentication info returned by login") + } + + authFromEnv, err := NewAppRoleAuth(allowedRoleID, &SecretID{FromEnv: secretIDEnvVar}) + if err != nil { + t.Fatalf("error initializing AppRoleAuth with secret ID env var: %v", err) + } + + loginRespFromEnv, err := client.Auth().Login(context.TODO(), authFromEnv) + if err != nil { + t.Fatalf("error logging in with secret ID from env var: %v", err) + } + if loginRespFromEnv.Auth == nil || loginRespFromEnv.Auth.ClientToken == "" { + t.Fatalf("no authentication info returned by login with secret ID from env var") + } + + authFromStr, err := NewAppRoleAuth(allowedRoleID, &SecretID{FromString: allowedSecretID}) + if err != nil { + t.Fatalf("error initializing AppRoleAuth with secret ID string: %v", err) + } + + loginRespFromStr, err := client.Auth().Login(context.TODO(), authFromStr) + if err != nil { + t.Fatalf("error logging in with string: %v", err) + } + if loginRespFromStr.Auth == nil || loginRespFromStr.Auth.ClientToken == "" { + t.Fatalf("no authentication info returned by login with secret ID from string") + } +} diff --git a/api/auth/approle/go.mod b/api/auth/approle/go.mod new file mode 100644 index 0000000..a1d1f47 --- /dev/null +++ b/api/auth/approle/go.mod @@ -0,0 +1,5 @@ +module github.com/hashicorp/vault/api/auth/approle + +go 1.16 + +require github.com/hashicorp/vault/api v1.9.2 diff --git a/api/auth/approle/go.sum b/api/auth/approle/go.sum new file mode 100644 index 0000000..dfd66b2 --- /dev/null +++ b/api/auth/approle/go.sum @@ -0,0 +1,117 @@ +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= +github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo= +github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= +github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= +github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= +github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= +github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/vault/api v1.9.2 h1:YjkZLJ7K3inKgMZ0wzCU9OHqc+UqMQyXsPXnf3Cl2as= +github.com/hashicorp/vault/api v1.9.2/go.mod h1:jo5Y/ET+hNyz+JnKDt8XLAdKs+AM0G5W0Vp1IrFI8N8= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/api/auth/aws/aws.go b/api/auth/aws/aws.go new file mode 100644 index 0000000..f2aa9be --- /dev/null +++ b/api/auth/aws/aws.go @@ -0,0 +1,287 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package aws + +import ( + "context" + "encoding/base64" + "fmt" + "os" + "strings" + + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-secure-stdlib/awsutil" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/api" +) + +type AWSAuth struct { + // If not provided with the WithRole login option, the Vault server will look for a role + // with the friendly name of the IAM principal if using the IAM auth type, + // or the name of the EC2 instance's AMI ID if using the EC2 auth type. + // If no matching role is found, login will fail. + roleName string + mountPath string + // Can be "iam" or "ec2". Defaults to "iam". + authType string + // Can be "pkcs7", "identity", or "rsa2048". Defaults to "pkcs7". + signatureType string + region string + iamServerIDHeaderValue string + creds *credentials.Credentials + nonce string +} + +var _ api.AuthMethod = (*AWSAuth)(nil) + +type LoginOption func(a *AWSAuth) error + +const ( + iamType = "iam" + ec2Type = "ec2" + pkcs7Type = "pkcs7" + identityType = "identity" + rsa2048Type = "rsa2048" + defaultMountPath = "aws" + defaultAuthType = iamType + defaultRegion = "us-east-1" + defaultSignatureType = pkcs7Type +) + +// NewAWSAuth initializes a new AWS auth method interface to be +// passed as a parameter to the client.Auth().Login method. +// +// Supported options: WithRole, WithMountPath, WithIAMAuth, WithEC2Auth, +// WithPKCS7Signature, WithIdentitySignature, WithIAMServerIDHeader, WithNonce, WithRegion +func NewAWSAuth(opts ...LoginOption) (*AWSAuth, error) { + a := &AWSAuth{ + mountPath: defaultMountPath, + authType: defaultAuthType, + region: defaultRegion, + signatureType: defaultSignatureType, + } + + // Loop through each option + for _, opt := range opts { + // Call the option giving the instantiated + // *AWSAuth as the argument + err := opt(a) + if err != nil { + return nil, fmt.Errorf("error with login option: %w", err) + } + } + + // return the modified auth struct instance + return a, nil +} + +// Login sets up the required request body for the AWS auth method's /login +// endpoint, and performs a write to it. This method defaults to the "iam" +// auth type unless NewAWSAuth is called with WithEC2Auth(). +// +// The Vault client will set its credentials to the values of the +// AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, and AWS_SESSION environment +// variables. To specify a path to a credentials file on disk instead, set +// the environment variable AWS_SHARED_CREDENTIALS_FILE. +func (a *AWSAuth) Login(ctx context.Context, client *api.Client) (*api.Secret, error) { + if ctx == nil { + ctx = context.Background() + } + + loginData := make(map[string]interface{}) + switch a.authType { + case ec2Type: + sess, err := session.NewSession() + if err != nil { + return nil, fmt.Errorf("error creating session to probe EC2 metadata: %w", err) + } + metadataSvc := ec2metadata.New(sess) + if !metadataSvc.Available() { + return nil, fmt.Errorf("metadata service not available") + } + + if a.signatureType == pkcs7Type { + // fetch PKCS #7 signature + resp, err := metadataSvc.GetDynamicData("/instance-identity/pkcs7") + if err != nil { + return nil, fmt.Errorf("unable to get PKCS 7 data from metadata service: %w", err) + } + pkcs7 := strings.TrimSpace(resp) + loginData["pkcs7"] = pkcs7 + } else if a.signatureType == identityType { + // fetch signature from identity document + doc, err := metadataSvc.GetDynamicData("/instance-identity/document") + if err != nil { + return nil, fmt.Errorf("error requesting instance identity doc: %w", err) + } + loginData["identity"] = base64.StdEncoding.EncodeToString([]byte(doc)) + + signature, err := metadataSvc.GetDynamicData("/instance-identity/signature") + if err != nil { + return nil, fmt.Errorf("error requesting signature: %w", err) + } + loginData["signature"] = signature + } else if a.signatureType == rsa2048Type { + // fetch RSA 2048 signature, which is also a PKCS#7 signature + resp, err := metadataSvc.GetDynamicData("/instance-identity/rsa2048") + if err != nil { + return nil, fmt.Errorf("unable to get PKCS 7 data from metadata service: %w", err) + } + pkcs7 := strings.TrimSpace(resp) + loginData["pkcs7"] = pkcs7 + } else { + return nil, fmt.Errorf("unknown signature type: %s", a.signatureType) + } + + // Add the reauthentication value, if we have one + if a.nonce == "" { + uid, err := uuid.GenerateUUID() + if err != nil { + return nil, fmt.Errorf("error generating uuid for reauthentication value: %w", err) + } + a.nonce = uid + } + loginData["nonce"] = a.nonce + case iamType: + logger := hclog.Default() + if a.creds == nil { + credsConfig := awsutil.CredentialsConfig{ + AccessKey: os.Getenv("AWS_ACCESS_KEY_ID"), + SecretKey: os.Getenv("AWS_SECRET_ACCESS_KEY"), + SessionToken: os.Getenv("AWS_SESSION_TOKEN"), + Logger: logger, + } + + // the env vars above will take precedence if they are set, as + // they will be added to the ChainProvider stack first + var hasCredsFile bool + credsFilePath := os.Getenv("AWS_SHARED_CREDENTIALS_FILE") + if credsFilePath != "" { + hasCredsFile = true + credsConfig.Filename = credsFilePath + } + + creds, err := credsConfig.GenerateCredentialChain(awsutil.WithSharedCredentials(hasCredsFile)) + if err != nil { + return nil, err + } + if creds == nil { + return nil, fmt.Errorf("could not compile valid credential providers from static config, environment, shared, or instance metadata") + } + + _, err = creds.Get() + if err != nil { + return nil, fmt.Errorf("failed to retrieve credentials from credential chain: %w", err) + } + + a.creds = creds + } + + data, err := awsutil.GenerateLoginData(a.creds, a.iamServerIDHeaderValue, a.region, logger) + if err != nil { + return nil, fmt.Errorf("unable to generate login data for AWS auth endpoint: %w", err) + } + loginData = data + } + + // Add role if we have one. If not, Vault will infer the role name based + // on the IAM friendly name (iam auth type) or EC2 instance's + // AMI ID (ec2 auth type). + if a.roleName != "" { + loginData["role"] = a.roleName + } + + if a.iamServerIDHeaderValue != "" { + client.AddHeader("iam_server_id_header_value", a.iamServerIDHeaderValue) + } + + path := fmt.Sprintf("auth/%s/login", a.mountPath) + resp, err := client.Logical().WriteWithContext(ctx, path, loginData) + if err != nil { + return nil, fmt.Errorf("unable to log in with AWS auth: %w", err) + } + + return resp, nil +} + +func WithRole(roleName string) LoginOption { + return func(a *AWSAuth) error { + a.roleName = roleName + return nil + } +} + +func WithMountPath(mountPath string) LoginOption { + return func(a *AWSAuth) error { + a.mountPath = mountPath + return nil + } +} + +func WithEC2Auth() LoginOption { + return func(a *AWSAuth) error { + a.authType = ec2Type + return nil + } +} + +func WithIAMAuth() LoginOption { + return func(a *AWSAuth) error { + a.authType = iamType + return nil + } +} + +// WithIdentitySignature will have the client send the cryptographic identity +// document signature to verify EC2 auth logins. Only used by EC2 auth type. +// If this option is not provided, will default to using the PKCS #7 signature. +// The signature type used should match the type of the public AWS cert Vault +// has been configured with to verify EC2 instance identity. +// https://www.vaultproject.io/api/auth/aws#create-certificate-configuration +func WithIdentitySignature() LoginOption { + return func(a *AWSAuth) error { + a.signatureType = identityType + return nil + } +} + +// WithPKCS7Signature will explicitly tell the client to send the PKCS #7 +// signature to verify EC2 auth logins. Only used by EC2 auth type. +// PKCS #7 is the default, but this method is provided for additional clarity. +// The signature type used should match the type of the public AWS cert Vault +// has been configured with to verify EC2 instance identity. +// https://www.vaultproject.io/api/auth/aws#create-certificate-configuration +func WithPKCS7Signature() LoginOption { + return func(a *AWSAuth) error { + a.signatureType = pkcs7Type + return nil + } +} + +func WithIAMServerIDHeader(headerValue string) LoginOption { + return func(a *AWSAuth) error { + a.iamServerIDHeaderValue = headerValue + return nil + } +} + +// WithNonce can be used to specify a named nonce for the ec2 auth login +// method. If not provided, an automatically-generated uuid will be used +// instead. +func WithNonce(nonce string) LoginOption { + return func(a *AWSAuth) error { + a.nonce = nonce + return nil + } +} + +func WithRegion(region string) LoginOption { + return func(a *AWSAuth) error { + a.region = region + return nil + } +} diff --git a/api/auth/aws/go.mod b/api/auth/aws/go.mod new file mode 100644 index 0000000..86261e7 --- /dev/null +++ b/api/auth/aws/go.mod @@ -0,0 +1,11 @@ +module github.com/hashicorp/vault/api/auth/aws + +go 1.16 + +require ( + github.com/aws/aws-sdk-go v1.30.27 + github.com/hashicorp/go-hclog v0.16.2 + github.com/hashicorp/go-secure-stdlib/awsutil v0.1.6 + github.com/hashicorp/go-uuid v1.0.2 + github.com/hashicorp/vault/api v1.9.2 +) diff --git a/api/auth/aws/go.sum b/api/auth/aws/go.sum new file mode 100644 index 0000000..4d30b11 --- /dev/null +++ b/api/auth/aws/go.sum @@ -0,0 +1,142 @@ +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aws/aws-sdk-go v1.30.27 h1:9gPjZWVDSoQrBO2AvqrWObS6KAZByfEJxQoCYo4ZfK0= +github.com/aws/aws-sdk-go v1.30.27/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= +github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo= +github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= +github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= +github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= +github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/awsutil v0.1.6 h1:W9WN8p6moV1fjKLkeqEgkAMu5rauy9QeYDAmIaPuuiA= +github.com/hashicorp/go-secure-stdlib/awsutil v0.1.6/go.mod h1:MpCPSPGLDILGb4JMm94/mMi3YysIqsXzGCzkEZjcjXg= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/vault/api v1.9.2 h1:YjkZLJ7K3inKgMZ0wzCU9OHqc+UqMQyXsPXnf3Cl2as= +github.com/hashicorp/vault/api v1.9.2/go.mod h1:jo5Y/ET+hNyz+JnKDt8XLAdKs+AM0G5W0Vp1IrFI8N8= +github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= +github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/api/auth/azure/azure.go b/api/auth/azure/azure.go new file mode 100644 index 0000000..b682195 --- /dev/null +++ b/api/auth/azure/azure.go @@ -0,0 +1,246 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package azure + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "time" + + "github.com/hashicorp/vault/api" +) + +type AzureAuth struct { + roleName string + mountPath string + resource string +} + +var _ api.AuthMethod = (*AzureAuth)(nil) + +type LoginOption func(a *AzureAuth) error + +type responseJSON struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + ExpiresIn string `json:"expires_in"` + ExpiresOn string `json:"expires_on"` + NotBefore string `json:"not_before"` + Resource string `json:"resource"` + TokenType string `json:"token_type"` +} + +type errorJSON struct { + Error string `json:"error"` + ErrorDescription string `json:"error_description"` +} + +type metadataJSON struct { + Compute computeJSON `json:"compute"` +} + +type computeJSON struct { + VMName string `json:"name"` + VMScaleSetName string `json:"vmScaleSetName"` + SubscriptionID string `json:"subscriptionId"` + ResourceGroupName string `json:"resourceGroupName"` +} + +const ( + defaultMountPath = "azure" + defaultResourceURL = "https://management.azure.com/" + metadataEndpoint = "http://169.254.169.254" + metadataAPIVersion = "2021-05-01" + apiVersionQueryParam = "api-version" + resourceQueryParam = "resource" + clientTimeout = 10 * time.Second +) + +// NewAzureAuth initializes a new Azure auth method interface to be +// passed as a parameter to the client.Auth().Login method. +// +// Supported options: WithMountPath, WithResource +func NewAzureAuth(roleName string, opts ...LoginOption) (*AzureAuth, error) { + if roleName == "" { + return nil, fmt.Errorf("no role name provided for login") + } + + a := &AzureAuth{ + roleName: roleName, + mountPath: defaultMountPath, + resource: defaultResourceURL, + } + + // Loop through each option + for _, opt := range opts { + // Call the option giving the instantiated + // *AzureAuth as the argument + err := opt(a) + if err != nil { + return nil, fmt.Errorf("error with login option: %w", err) + } + } + + // return the modified auth struct instance + return a, nil +} + +// Login sets up the required request body for the Azure auth method's /login +// endpoint, and performs a write to it. +func (a *AzureAuth) Login(ctx context.Context, client *api.Client) (*api.Secret, error) { + if ctx == nil { + ctx = context.Background() + } + + jwtResp, err := a.getJWT() + if err != nil { + return nil, fmt.Errorf("unable to get access token: %w", err) + } + + metadataRespJSON, err := getMetadata() + if err != nil { + return nil, fmt.Errorf("unable to get instance metadata: %w", err) + } + + loginData := map[string]interface{}{ + "role": a.roleName, + "jwt": jwtResp, + "vm_name": metadataRespJSON.Compute.VMName, + "vmss_name": metadataRespJSON.Compute.VMScaleSetName, + "subscription_id": metadataRespJSON.Compute.SubscriptionID, + "resource_group_name": metadataRespJSON.Compute.ResourceGroupName, + } + + path := fmt.Sprintf("auth/%s/login", a.mountPath) + resp, err := client.Logical().WriteWithContext(ctx, path, loginData) + if err != nil { + return nil, fmt.Errorf("unable to log in with Azure auth: %w", err) + } + + return resp, nil +} + +func WithMountPath(mountPath string) LoginOption { + return func(a *AzureAuth) error { + a.mountPath = mountPath + return nil + } +} + +// WithResource allows you to specify a different resource URL to use as the aud value +// on the JWT token than the default of Azure Public Cloud's ARM URL. +// This should match the resource URI that an administrator configured your +// Vault server to use. +// +// See https://github.com/Azure/go-autorest/blob/master/autorest/azure/environments.go +// for a list of valid environments. +func WithResource(url string) LoginOption { + return func(a *AzureAuth) error { + a.resource = url + return nil + } +} + +// Retrieves an access token from Managed Identities for Azure Resources +// +// Learn more here: https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token +func (a *AzureAuth) getJWT() (string, error) { + identityEndpoint, err := url.Parse(fmt.Sprintf("%s/metadata/identity/oauth2/token", metadataEndpoint)) + if err != nil { + return "", fmt.Errorf("error creating metadata URL: %w", err) + } + + identityParameters := identityEndpoint.Query() + identityParameters.Add(apiVersionQueryParam, metadataAPIVersion) + identityParameters.Add(resourceQueryParam, a.resource) + identityEndpoint.RawQuery = identityParameters.Encode() + + req, err := http.NewRequest(http.MethodGet, identityEndpoint.String(), nil) + if err != nil { + return "", fmt.Errorf("error creating HTTP request: %w", err) + } + req.Header.Add("Metadata", "true") + + client := &http.Client{ + Timeout: clientTimeout, + } + resp, err := client.Do(req) + if err != nil { + return "", fmt.Errorf("error calling Azure token endpoint: %w", err) + } + defer resp.Body.Close() + + responseBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("error reading response body from Azure token endpoint: %w", err) + } + + if resp.StatusCode != http.StatusOK { + var errResp errorJSON + err = json.Unmarshal(responseBytes, &errResp) + if err != nil { + return "", fmt.Errorf("received error message but was unable to unmarshal its contents") + } + return "", fmt.Errorf("%s error from Azure token endpoint: %s", errResp.Error, errResp.ErrorDescription) + } + + var r responseJSON + err = json.Unmarshal(responseBytes, &r) + if err != nil { + return "", fmt.Errorf("error unmarshaling response from Azure token endpoint: %w", err) + } + + return r.AccessToken, nil +} + +func getMetadata() (metadataJSON, error) { + metadataEndpoint, err := url.Parse(fmt.Sprintf("%s/metadata/instance", metadataEndpoint)) + if err != nil { + return metadataJSON{}, err + } + + metadataParameters := metadataEndpoint.Query() + metadataParameters.Add(apiVersionQueryParam, metadataAPIVersion) + metadataEndpoint.RawQuery = metadataParameters.Encode() + req, err := http.NewRequest(http.MethodGet, metadataEndpoint.String(), nil) + if err != nil { + return metadataJSON{}, fmt.Errorf("error creating HTTP Request for metadata endpoint: %w", err) + } + req.Header.Add("Metadata", "true") + + client := &http.Client{ + Timeout: clientTimeout, + } + resp, err := client.Do(req) + if err != nil { + return metadataJSON{}, fmt.Errorf("error calling metadata endpoint: %w", err) + } + defer resp.Body.Close() + + responseBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + return metadataJSON{}, fmt.Errorf("error reading response body from metadata endpoint: %w", err) + } + + if resp.StatusCode != http.StatusOK { + var errResp errorJSON + _ = json.Unmarshal(responseBytes, &errResp) + if err != nil { + return metadataJSON{}, fmt.Errorf("received error message but was unable to unmarshal its contents") + } + return metadataJSON{}, fmt.Errorf("%s error from metadata endpoint: %s", errResp.Error, errResp.ErrorDescription) + } + + var r metadataJSON + err = json.Unmarshal(responseBytes, &r) + if err != nil { + return metadataJSON{}, fmt.Errorf("error unmarshaling the response from metadata endpoint: %w", err) + } + + return r, nil +} diff --git a/api/auth/azure/go.mod b/api/auth/azure/go.mod new file mode 100644 index 0000000..3a192f3 --- /dev/null +++ b/api/auth/azure/go.mod @@ -0,0 +1,5 @@ +module github.com/hashicorp/vault/api/auth/azure + +go 1.16 + +require github.com/hashicorp/vault/api v1.9.2 diff --git a/api/auth/azure/go.sum b/api/auth/azure/go.sum new file mode 100644 index 0000000..dfd66b2 --- /dev/null +++ b/api/auth/azure/go.sum @@ -0,0 +1,117 @@ +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= +github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo= +github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= +github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= +github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= +github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= +github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/vault/api v1.9.2 h1:YjkZLJ7K3inKgMZ0wzCU9OHqc+UqMQyXsPXnf3Cl2as= +github.com/hashicorp/vault/api v1.9.2/go.mod h1:jo5Y/ET+hNyz+JnKDt8XLAdKs+AM0G5W0Vp1IrFI8N8= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/api/auth/gcp/gcp.go b/api/auth/gcp/gcp.go new file mode 100644 index 0000000..2d6ef84 --- /dev/null +++ b/api/auth/gcp/gcp.go @@ -0,0 +1,191 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package gcp + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "time" + + "cloud.google.com/go/compute/metadata" + credentials "cloud.google.com/go/iam/credentials/apiv1" + "github.com/hashicorp/vault/api" + credentialspb "google.golang.org/genproto/googleapis/iam/credentials/v1" +) + +type GCPAuth struct { + roleName string + mountPath string + authType string + serviceAccountEmail string +} + +var _ api.AuthMethod = (*GCPAuth)(nil) + +type LoginOption func(a *GCPAuth) error + +const ( + iamType = "iam" + gceType = "gce" + defaultMountPath = "gcp" + defaultAuthType = gceType + identityMetadataURL = "http://metadata/computeMetadata/v1/instance/service-accounts/default/identity" +) + +// NewGCPAuth initializes a new GCP auth method interface to be +// passed as a parameter to the client.Auth().Login method. +// +// Supported options: WithMountPath, WithIAMAuth, WithGCEAuth +func NewGCPAuth(roleName string, opts ...LoginOption) (*GCPAuth, error) { + if roleName == "" { + return nil, fmt.Errorf("no role name provided for login") + } + + a := &GCPAuth{ + mountPath: defaultMountPath, + authType: defaultAuthType, + roleName: roleName, + } + + // Loop through each option + for _, opt := range opts { + // Call the option giving the instantiated + // *GCPAuth as the argument + err := opt(a) + if err != nil { + return nil, fmt.Errorf("error with login option: %w", err) + } + } + + // return the modified auth struct instance + return a, nil +} + +// Login sets up the required request body for the GCP auth method's /login +// endpoint, and performs a write to it. This method defaults to the "gce" +// auth type unless NewGCPAuth is called with WithIAMAuth(). +func (a *GCPAuth) Login(ctx context.Context, client *api.Client) (*api.Secret, error) { + if ctx == nil { + ctx = context.Background() + } + + loginData := map[string]interface{}{ + "role": a.roleName, + } + switch a.authType { + case gceType: + jwt, err := a.getJWTFromMetadataService(client.Address()) + if err != nil { + return nil, fmt.Errorf("unable to retrieve JWT from GCE metadata service: %w", err) + } + loginData["jwt"] = jwt + case iamType: + jwtResp, err := a.signJWT() + if err != nil { + return nil, fmt.Errorf("unable to sign JWT for authenticating to GCP: %w", err) + } + loginData["jwt"] = jwtResp.SignedJwt + } + + path := fmt.Sprintf("auth/%s/login", a.mountPath) + resp, err := client.Logical().WriteWithContext(ctx, path, loginData) + if err != nil { + return nil, fmt.Errorf("unable to log in with GCP auth: %w", err) + } + + return resp, nil +} + +func WithMountPath(mountPath string) LoginOption { + return func(a *GCPAuth) error { + a.mountPath = mountPath + return nil + } +} + +func WithIAMAuth(serviceAccountEmail string) LoginOption { + return func(a *GCPAuth) error { + a.serviceAccountEmail = serviceAccountEmail + a.authType = iamType + return nil + } +} + +func WithGCEAuth() LoginOption { + return func(a *GCPAuth) error { + a.authType = gceType + return nil + } +} + +// generate signed JWT token from GCP IAM. +func (a *GCPAuth) signJWT() (*credentialspb.SignJwtResponse, error) { + ctx := context.Background() + iamClient, err := credentials.NewIamCredentialsClient(ctx) // can pass option.WithCredentialsFile("path/to/creds.json") as second param if GOOGLE_APPLICATION_CREDENTIALS env var not set + if err != nil { + return nil, fmt.Errorf("unable to initialize IAM credentials client: %w", err) + } + defer iamClient.Close() + + resourceName := fmt.Sprintf("projects/-/serviceAccounts/%s", a.serviceAccountEmail) + jwtPayload := map[string]interface{}{ + "aud": fmt.Sprintf("vault/%s", a.roleName), + "sub": a.serviceAccountEmail, + "exp": time.Now().Add(time.Minute * 10).Unix(), + } + + payloadBytes, err := json.Marshal(jwtPayload) + if err != nil { + return nil, fmt.Errorf("unable to marshal jwt payload to json: %w", err) + } + + signJWTReq := &credentialspb.SignJwtRequest{ + Name: resourceName, + Payload: string(payloadBytes), + } + + jwtResp, err := iamClient.SignJwt(ctx, signJWTReq) + if err != nil { + return nil, fmt.Errorf("unable to sign JWT: %w", err) + } + + return jwtResp, nil +} + +func (a *GCPAuth) getJWTFromMetadataService(vaultAddress string) (string, error) { + if !metadata.OnGCE() { + return "", fmt.Errorf("GCE metadata service not available") + } + + // build request to metadata server + c := &http.Client{} + req, err := http.NewRequest(http.MethodGet, identityMetadataURL, nil) + if err != nil { + return "", fmt.Errorf("error creating http request: %w", err) + } + + req.Header.Add("Metadata-Flavor", "Google") + q := url.Values{} + q.Add("audience", fmt.Sprintf("%s/vault/%s", vaultAddress, a.roleName)) + q.Add("format", "full") + req.URL.RawQuery = q.Encode() + resp, err := c.Do(req) + if err != nil { + return "", fmt.Errorf("error making request to metadata service: %w", err) + } + defer resp.Body.Close() + + // get jwt from response + body, err := ioutil.ReadAll(resp.Body) + jwt := string(body) + if err != nil { + return "", fmt.Errorf("error reading response from metadata service: %w", err) + } + + return jwt, nil +} diff --git a/api/auth/gcp/go.mod b/api/auth/gcp/go.mod new file mode 100644 index 0000000..7509753 --- /dev/null +++ b/api/auth/gcp/go.mod @@ -0,0 +1,10 @@ +module github.com/hashicorp/vault/api/auth/gcp + +go 1.16 + +require ( + cloud.google.com/go v0.97.0 + github.com/hashicorp/vault/api v1.9.2 + google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0 + google.golang.org/grpc v1.41.0 // indirect +) diff --git a/api/auth/gcp/go.sum b/api/auth/gcp/go.sum new file mode 100644 index 0000000..35da60a --- /dev/null +++ b/api/auth/gcp/go.sum @@ -0,0 +1,641 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0 h1:3DXvAyifywvq64LfkKaMOmkWPS1CikIQdMe2lY9vxU8= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= +github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo= +github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= +github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= +github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0 h1:6DWmvNpomjL1+3liNSZbVns3zsYzzCjm6pRBO1tLeso= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= +github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= +github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/vault/api v1.9.2 h1:YjkZLJ7K3inKgMZ0wzCU9OHqc+UqMQyXsPXnf3Cl2as= +github.com/hashicorp/vault/api v1.9.2/go.mod h1:jo5Y/ET+hNyz+JnKDt8XLAdKs+AM0G5W0Vp1IrFI8N8= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0 h1:4t9zuDlHLcIx0ZEhmXEeFVCRsiOgpgn2QOH9N0MNjPI= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0 h1:5Tbluzus3QxoAJx4IefGt1W0HQZW4nuMrVk684jI74Q= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E= +google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/api/auth/kubernetes/go.mod b/api/auth/kubernetes/go.mod new file mode 100644 index 0000000..1076282 --- /dev/null +++ b/api/auth/kubernetes/go.mod @@ -0,0 +1,5 @@ +module github.com/hashicorp/vault/api/auth/kubernetes + +go 1.16 + +require github.com/hashicorp/vault/api v1.9.2 diff --git a/api/auth/kubernetes/go.sum b/api/auth/kubernetes/go.sum new file mode 100644 index 0000000..dfd66b2 --- /dev/null +++ b/api/auth/kubernetes/go.sum @@ -0,0 +1,117 @@ +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= +github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo= +github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= +github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= +github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= +github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= +github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/vault/api v1.9.2 h1:YjkZLJ7K3inKgMZ0wzCU9OHqc+UqMQyXsPXnf3Cl2as= +github.com/hashicorp/vault/api v1.9.2/go.mod h1:jo5Y/ET+hNyz+JnKDt8XLAdKs+AM0G5W0Vp1IrFI8N8= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/api/auth/kubernetes/kubernetes.go b/api/auth/kubernetes/kubernetes.go new file mode 100644 index 0000000..f0e38c1 --- /dev/null +++ b/api/auth/kubernetes/kubernetes.go @@ -0,0 +1,136 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package kubernetes + +import ( + "context" + "fmt" + "os" + + "github.com/hashicorp/vault/api" +) + +type KubernetesAuth struct { + roleName string + mountPath string + serviceAccountToken string +} + +var _ api.AuthMethod = (*KubernetesAuth)(nil) + +type LoginOption func(a *KubernetesAuth) error + +const ( + defaultMountPath = "kubernetes" + defaultServiceAccountTokenPath = "/var/run/secrets/kubernetes.io/serviceaccount/token" +) + +// NewKubernetesAuth creates a KubernetesAuth struct which can be passed to +// the client.Auth().Login method to authenticate to Vault. The roleName +// parameter should be the name of the role in Vault that was created with +// this app's Kubernetes service account bound to it. +// +// The Kubernetes service account token JWT is retrieved from +// /var/run/secrets/kubernetes.io/serviceaccount/token by default. To change this +// path, pass the WithServiceAccountTokenPath option. To instead pass the +// JWT directly as a string, or to read the value from an environment +// variable, use WithServiceAccountToken and WithServiceAccountTokenEnv respectively. +// +// Supported options: WithMountPath, WithServiceAccountTokenPath, WithServiceAccountTokenEnv, WithServiceAccountToken +func NewKubernetesAuth(roleName string, opts ...LoginOption) (*KubernetesAuth, error) { + if roleName == "" { + return nil, fmt.Errorf("no role name was provided") + } + + a := &KubernetesAuth{ + roleName: roleName, + mountPath: defaultMountPath, + } + + // Loop through each option + for _, opt := range opts { + // Call the option giving the instantiated + // *KubernetesAuth as the argument + err := opt(a) + if err != nil { + return nil, fmt.Errorf("error with login option: %w", err) + } + } + + if a.serviceAccountToken == "" { + token, err := readTokenFromFile(defaultServiceAccountTokenPath) + if err != nil { + return nil, fmt.Errorf("error reading service account token from default location: %w", err) + } + a.serviceAccountToken = token + } + + // return the modified auth struct instance + return a, nil +} + +func (a *KubernetesAuth) Login(ctx context.Context, client *api.Client) (*api.Secret, error) { + if ctx == nil { + ctx = context.Background() + } + + loginData := map[string]interface{}{ + "jwt": a.serviceAccountToken, + "role": a.roleName, + } + + path := fmt.Sprintf("auth/%s/login", a.mountPath) + resp, err := client.Logical().WriteWithContext(ctx, path, loginData) + if err != nil { + return nil, fmt.Errorf("unable to log in with Kubernetes auth: %w", err) + } + return resp, nil +} + +func WithMountPath(mountPath string) LoginOption { + return func(a *KubernetesAuth) error { + a.mountPath = mountPath + return nil + } +} + +// WithServiceAccountTokenPath allows you to specify a different path to +// where your application's Kubernetes service account token is mounted, +// instead of the default of /var/run/secrets/kubernetes.io/serviceaccount/token +func WithServiceAccountTokenPath(pathToToken string) LoginOption { + return func(a *KubernetesAuth) error { + token, err := readTokenFromFile(pathToToken) + if err != nil { + return fmt.Errorf("unable to read service account token from file: %w", err) + } + a.serviceAccountToken = token + return nil + } +} + +func WithServiceAccountToken(jwt string) LoginOption { + return func(a *KubernetesAuth) error { + a.serviceAccountToken = jwt + return nil + } +} + +func WithServiceAccountTokenEnv(envVar string) LoginOption { + return func(a *KubernetesAuth) error { + token := os.Getenv(envVar) + if token == "" { + return fmt.Errorf("service account token was specified with an environment variable with an empty value") + } + a.serviceAccountToken = token + return nil + } +} + +func readTokenFromFile(filepath string) (string, error) { + jwt, err := os.ReadFile(filepath) + if err != nil { + return "", fmt.Errorf("unable to read file containing service account token: %w", err) + } + return string(jwt), nil +} diff --git a/api/auth/ldap/go.mod b/api/auth/ldap/go.mod new file mode 100644 index 0000000..4bfa972 --- /dev/null +++ b/api/auth/ldap/go.mod @@ -0,0 +1,5 @@ +module github.com/hashicorp/vault/api/auth/ldap + +go 1.16 + +require github.com/hashicorp/vault/api v1.9.2 diff --git a/api/auth/ldap/go.sum b/api/auth/ldap/go.sum new file mode 100644 index 0000000..dfd66b2 --- /dev/null +++ b/api/auth/ldap/go.sum @@ -0,0 +1,117 @@ +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= +github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo= +github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= +github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= +github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= +github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= +github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/vault/api v1.9.2 h1:YjkZLJ7K3inKgMZ0wzCU9OHqc+UqMQyXsPXnf3Cl2as= +github.com/hashicorp/vault/api v1.9.2/go.mod h1:jo5Y/ET+hNyz+JnKDt8XLAdKs+AM0G5W0Vp1IrFI8N8= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/api/auth/ldap/ldap.go b/api/auth/ldap/ldap.go new file mode 100644 index 0000000..fdf1a38 --- /dev/null +++ b/api/auth/ldap/ldap.go @@ -0,0 +1,169 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ldap + +import ( + "context" + "fmt" + "io" + "os" + "strings" + + "github.com/hashicorp/vault/api" +) + +type LDAPAuth struct { + mountPath string + username string + password string + passwordFile string + passwordEnv string +} + +type Password struct { + // Path on the file system where the LDAP password can be found. + FromFile string + // The name of the environment variable containing the LDAP password + FromEnv string + // The password as a plaintext string value. + FromString string +} + +var _ api.AuthMethod = (*LDAPAuth)(nil) + +type LoginOption func(a *LDAPAuth) error + +const ( + defaultMountPath = "ldap" +) + +// NewLDAPAuth initializes a new LDAP auth method interface to be +// passed as a parameter to the client.Auth().Login method. +// +// Supported options: WithMountPath +func NewLDAPAuth(username string, password *Password, opts ...LoginOption) (*LDAPAuth, error) { + if username == "" { + return nil, fmt.Errorf("no user name provided for login") + } + + if password == nil { + return nil, fmt.Errorf("no password provided for login") + } + + err := password.validate() + if err != nil { + return nil, fmt.Errorf("invalid password: %w", err) + } + + a := &LDAPAuth{ + mountPath: defaultMountPath, + username: username, + } + + if password.FromFile != "" { + a.passwordFile = password.FromFile + } + + if password.FromEnv != "" { + a.passwordEnv = password.FromEnv + } + + if password.FromString != "" { + a.password = password.FromString + } + // Loop through each option + for _, opt := range opts { + // Call the option giving the instantiated + // *LDAPAuth as the argument + err := opt(a) + if err != nil { + return nil, fmt.Errorf("error with login option: %w", err) + } + } + + // return the modified auth struct instance + return a, nil +} + +func (a *LDAPAuth) Login(ctx context.Context, client *api.Client) (*api.Secret, error) { + if ctx == nil { + ctx = context.Background() + } + + loginData := make(map[string]interface{}) + + if a.passwordFile != "" { + passwordValue, err := a.readPasswordFromFile() + if err != nil { + return nil, fmt.Errorf("error reading password: %w", err) + } + loginData["password"] = passwordValue + } else if a.passwordEnv != "" { + passwordValue := os.Getenv(a.passwordEnv) + if passwordValue == "" { + return nil, fmt.Errorf("password was specified with an environment variable with an empty value") + } + loginData["password"] = passwordValue + } else { + loginData["password"] = a.password + } + + path := fmt.Sprintf("auth/%s/login/%s", a.mountPath, a.username) + resp, err := client.Logical().WriteWithContext(ctx, path, loginData) + if err != nil { + return nil, fmt.Errorf("unable to log in with LDAP auth: %w", err) + } + + return resp, nil +} + +func WithMountPath(mountPath string) LoginOption { + return func(a *LDAPAuth) error { + a.mountPath = mountPath + return nil + } +} + +func (a *LDAPAuth) readPasswordFromFile() (string, error) { + passwordFile, err := os.Open(a.passwordFile) + if err != nil { + return "", fmt.Errorf("unable to open file containing password: %w", err) + } + defer passwordFile.Close() + + limitedReader := io.LimitReader(passwordFile, 1000) + passwordBytes, err := io.ReadAll(limitedReader) + if err != nil { + return "", fmt.Errorf("unable to read password: %w", err) + } + + passwordValue := strings.TrimSuffix(string(passwordBytes), "\n") + + return passwordValue, nil +} + +func (password *Password) validate() error { + if password.FromFile == "" && password.FromEnv == "" && password.FromString == "" { + return fmt.Errorf("password for LDAP auth must be provided with a source file, environment variable, or plaintext string") + } + + if password.FromFile != "" { + if password.FromEnv != "" || password.FromString != "" { + return fmt.Errorf("only one source for the password should be specified") + } + } + + if password.FromEnv != "" { + if password.FromFile != "" || password.FromString != "" { + return fmt.Errorf("only one source for the password should be specified") + } + } + + if password.FromString != "" { + if password.FromFile != "" || password.FromEnv != "" { + return fmt.Errorf("only one source for the password should be specified") + } + } + return nil +} diff --git a/api/auth/ldap/ldap_test.go b/api/auth/ldap/ldap_test.go new file mode 100644 index 0000000..abdccb0 --- /dev/null +++ b/api/auth/ldap/ldap_test.go @@ -0,0 +1,159 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ldap + +import ( + "context" + "encoding/json" + "fmt" + "net" + "net/http" + "os" + "strings" + "testing" + + "github.com/hashicorp/vault/api" +) + +// testHTTPServer creates a test HTTP server that handles requests until +// the listener returned is closed. +func testHTTPServer( + t *testing.T, handler http.Handler, +) (*api.Config, net.Listener) { + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("err: %s", err) + } + + server := &http.Server{Handler: handler} + go server.Serve(ln) + + config := api.DefaultConfig() + config.Address = fmt.Sprintf("http://%s", ln.Addr()) + + return config, ln +} + +func init() { + os.Setenv("VAULT_TOKEN", "") +} + +func TestLogin(t *testing.T) { + passwordEnvVar := "LDAP_PASSWORD" + allowedPassword := "6hrtL!*bro!ywbQbvDwW" + + content := []byte(allowedPassword) + tmpfile, err := os.CreateTemp("./", "file-containing-password") + if err != nil { + t.Fatalf("error creating temp file: %v", err) + } + defer os.Remove(tmpfile.Name()) // clean up + err = os.Setenv(passwordEnvVar, allowedPassword) + if err != nil { + t.Fatalf("error writing password to env var: %v", err) + } + + if _, err := tmpfile.Write(content); err != nil { + t.Fatalf("error writing to temp file: %v", err) + } + if err := tmpfile.Close(); err != nil { + t.Fatalf("error closing temp file: %v", err) + } + + // a response to return if the correct values were passed to login + authSecret := &api.Secret{ + Auth: &api.SecretAuth{ + ClientToken: "a-client-token", + }, + } + + authBytes, err := json.Marshal(authSecret) + if err != nil { + t.Fatalf("error marshaling json: %v", err) + } + + handler := func(w http.ResponseWriter, req *http.Request) { + payload := make(map[string]interface{}) + err := json.NewDecoder(req.Body).Decode(&payload) + if err != nil { + t.Fatalf("error decoding json: %v", err) + } + if payload["password"] == allowedPassword { + w.Write(authBytes) + } + } + + config, ln := testHTTPServer(t, http.HandlerFunc(handler)) + defer ln.Close() + + config.Address = strings.ReplaceAll(config.Address, "127.0.0.1", "localhost") + client, err := api.NewClient(config) + if err != nil { + t.Fatalf("error initializing Vault client: %v", err) + } + + // Password fromFile test + authFromFile, err := NewLDAPAuth("my-ldap-username", &Password{FromFile: tmpfile.Name()}) + if err != nil { + t.Fatalf("error initializing LDAPAuth with password file: %v", err) + } + + loginRespFromFile, err := client.Auth().Login(context.TODO(), authFromFile) + if err != nil { + t.Fatalf("error logging in with password from file: %v", err) + } + + if loginRespFromFile.Auth == nil || loginRespFromFile.Auth.ClientToken == "" { + t.Fatalf("no authentication info returned by login") + } + + // Password fromEnv Test + authFromEnv, err := NewLDAPAuth("my-ldap-username", &Password{FromEnv: passwordEnvVar}) + if err != nil { + t.Fatalf("error initializing LDAPAuth with password env var: %v", err) + } + + loginRespFromEnv, err := client.Auth().Login(context.TODO(), authFromEnv) + if err != nil { + t.Fatalf("error logging in with password from env var: %v", err) + } + + if loginRespFromEnv.Auth == nil || loginRespFromEnv.Auth.ClientToken == "" { + t.Fatalf("no authentication info returned by login with password from env var") + } + + // Password fromStr test + authFromStr, err := NewLDAPAuth("my-ldap-username", &Password{FromString: allowedPassword}) + if err != nil { + t.Fatalf("error initializing LDAPAuth with password string: %v", err) + } + + loginRespFromStr, err := client.Auth().Login(context.TODO(), authFromStr) + if err != nil { + t.Fatalf("error logging in with string: %v", err) + } + + if loginRespFromStr.Auth == nil || loginRespFromStr.Auth.ClientToken == "" { + t.Fatalf("no authentication info returned by login with password from string") + } + + // Empty User Test + _, err = NewLDAPAuth("", &Password{FromString: allowedPassword}) + if err.Error() != "no user name provided for login" { + t.Fatalf("Auth object created for empty username: %v", err) + } + + // Empty Password Test + _, err = NewLDAPAuth("my-ldap-username", nil) + if err.Error() != "no password provided for login" { + t.Fatalf("Auth object created when passing a nil Password struct: %v", err) + } + + // Auth with Custom MountPath + ldapMount := WithMountPath("customMount") + _, err = NewLDAPAuth("my-ldap-username", &Password{FromString: allowedPassword}, ldapMount) + if err != nil { + t.Fatalf("error initializing LDAPAuth with custom mountpath: %v", err) + } +} diff --git a/api/auth/userpass/go.mod b/api/auth/userpass/go.mod new file mode 100644 index 0000000..194bc6c --- /dev/null +++ b/api/auth/userpass/go.mod @@ -0,0 +1,5 @@ +module github.com/hashicorp/vault/api/auth/userpass + +go 1.16 + +require github.com/hashicorp/vault/api v1.9.2 diff --git a/api/auth/userpass/go.sum b/api/auth/userpass/go.sum new file mode 100644 index 0000000..dfd66b2 --- /dev/null +++ b/api/auth/userpass/go.sum @@ -0,0 +1,117 @@ +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= +github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo= +github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= +github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= +github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= +github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= +github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/vault/api v1.9.2 h1:YjkZLJ7K3inKgMZ0wzCU9OHqc+UqMQyXsPXnf3Cl2as= +github.com/hashicorp/vault/api v1.9.2/go.mod h1:jo5Y/ET+hNyz+JnKDt8XLAdKs+AM0G5W0Vp1IrFI8N8= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/api/auth/userpass/userpass.go b/api/auth/userpass/userpass.go new file mode 100644 index 0000000..3e89429 --- /dev/null +++ b/api/auth/userpass/userpass.go @@ -0,0 +1,173 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package userpass + +import ( + "context" + "fmt" + "io" + "os" + "strings" + + "github.com/hashicorp/vault/api" +) + +type UserpassAuth struct { + mountPath string + username string + password string + passwordFile string + passwordEnv string +} + +type Password struct { + // Path on the file system where the password corresponding to this + // application's Vault role can be found. + FromFile string + // The name of the environment variable containing the password + // that corresponds to this application's Vault role. + FromEnv string + // The password as a plaintext string value. + FromString string +} + +var _ api.AuthMethod = (*UserpassAuth)(nil) + +type LoginOption func(a *UserpassAuth) error + +const ( + defaultMountPath = "userpass" +) + +// NewUserpassAuth initializes a new Userpass auth method interface to be +// passed as a parameter to the client.Auth().Login method. +// +// Supported options: WithMountPath +func NewUserpassAuth(username string, password *Password, opts ...LoginOption) (*UserpassAuth, error) { + if username == "" { + return nil, fmt.Errorf("no user name provided for login") + } + + if password == nil { + return nil, fmt.Errorf("no password provided for login") + } + + err := password.validate() + if err != nil { + return nil, fmt.Errorf("invalid password: %w", err) + } + + a := &UserpassAuth{ + mountPath: defaultMountPath, + username: username, + } + + // password will be read in at login time if it comes from a file or environment variable, in case the underlying value changes + if password.FromFile != "" { + a.passwordFile = password.FromFile + } + + if password.FromEnv != "" { + a.passwordEnv = password.FromEnv + } + + if password.FromString != "" { + a.password = password.FromString + } + + // Loop through each option + for _, opt := range opts { + // Call the option giving the instantiated + // *UserpassAuth as the argument + err := opt(a) + if err != nil { + return nil, fmt.Errorf("error with login option: %w", err) + } + } + + // return the modified auth struct instance + return a, nil +} + +func (a *UserpassAuth) Login(ctx context.Context, client *api.Client) (*api.Secret, error) { + if ctx == nil { + ctx = context.Background() + } + + loginData := make(map[string]interface{}) + + if a.passwordFile != "" { + passwordValue, err := a.readPasswordFromFile() + if err != nil { + return nil, fmt.Errorf("error reading password: %w", err) + } + loginData["password"] = passwordValue + } else if a.passwordEnv != "" { + passwordValue := os.Getenv(a.passwordEnv) + if passwordValue == "" { + return nil, fmt.Errorf("password was specified with an environment variable with an empty value") + } + loginData["password"] = passwordValue + } else { + loginData["password"] = a.password + } + + path := fmt.Sprintf("auth/%s/login/%s", a.mountPath, a.username) + resp, err := client.Logical().WriteWithContext(ctx, path, loginData) + if err != nil { + return nil, fmt.Errorf("unable to log in with userpass auth: %w", err) + } + + return resp, nil +} + +func WithMountPath(mountPath string) LoginOption { + return func(a *UserpassAuth) error { + a.mountPath = mountPath + return nil + } +} + +func (a *UserpassAuth) readPasswordFromFile() (string, error) { + passwordFile, err := os.Open(a.passwordFile) + if err != nil { + return "", fmt.Errorf("unable to open file containing password: %w", err) + } + defer passwordFile.Close() + + limitedReader := io.LimitReader(passwordFile, 1000) + passwordBytes, err := io.ReadAll(limitedReader) + if err != nil { + return "", fmt.Errorf("unable to read password: %w", err) + } + + passwordValue := strings.TrimSuffix(string(passwordBytes), "\n") + + return passwordValue, nil +} + +func (password *Password) validate() error { + if password.FromFile == "" && password.FromEnv == "" && password.FromString == "" { + return fmt.Errorf("password for Userpass auth must be provided with a source file, environment variable, or plaintext string") + } + + if password.FromFile != "" { + if password.FromEnv != "" || password.FromString != "" { + return fmt.Errorf("only one source for the password should be specified") + } + } + + if password.FromEnv != "" { + if password.FromFile != "" || password.FromString != "" { + return fmt.Errorf("only one source for the password should be specified") + } + } + + if password.FromString != "" { + if password.FromFile != "" || password.FromEnv != "" { + return fmt.Errorf("only one source for the password should be specified") + } + } + return nil +} diff --git a/api/auth/userpass/userpass_test.go b/api/auth/userpass/userpass_test.go new file mode 100644 index 0000000..4fe68d8 --- /dev/null +++ b/api/auth/userpass/userpass_test.go @@ -0,0 +1,134 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package userpass + +import ( + "context" + "encoding/json" + "fmt" + "net" + "net/http" + "os" + "strings" + "testing" + + "github.com/hashicorp/vault/api" +) + +// testHTTPServer creates a test HTTP server that handles requests until +// the listener returned is closed. +func testHTTPServer( + t *testing.T, handler http.Handler, +) (*api.Config, net.Listener) { + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("err: %s", err) + } + + server := &http.Server{Handler: handler} + go server.Serve(ln) + + config := api.DefaultConfig() + config.Address = fmt.Sprintf("http://%s", ln.Addr()) + + return config, ln +} + +func init() { + os.Setenv("VAULT_TOKEN", "") +} + +func TestLogin(t *testing.T) { + passwordEnvVar := "USERPASS_PASSWORD" + allowedPassword := "my-password" + + content := []byte(allowedPassword) + tmpfile, err := os.CreateTemp("", "file-containing-password") + if err != nil { + t.Fatalf("error creating temp file: %v", err) + } + defer os.Remove(tmpfile.Name()) // clean up + err = os.Setenv(passwordEnvVar, allowedPassword) + if err != nil { + t.Fatalf("error writing password to env var: %v", err) + } + + if _, err := tmpfile.Write(content); err != nil { + t.Fatalf("error writing to temp file: %v", err) + } + if err := tmpfile.Close(); err != nil { + t.Fatalf("error closing temp file: %v", err) + } + + // a response to return if the correct values were passed to login + authSecret := &api.Secret{ + Auth: &api.SecretAuth{ + ClientToken: "a-client-token", + }, + } + + authBytes, err := json.Marshal(authSecret) + if err != nil { + t.Fatalf("error marshaling json: %v", err) + } + + handler := func(w http.ResponseWriter, req *http.Request) { + payload := make(map[string]interface{}) + err := json.NewDecoder(req.Body).Decode(&payload) + if err != nil { + t.Fatalf("error decoding json: %v", err) + } + if payload["password"] == allowedPassword { + w.Write(authBytes) + } + } + + config, ln := testHTTPServer(t, http.HandlerFunc(handler)) + defer ln.Close() + + config.Address = strings.ReplaceAll(config.Address, "127.0.0.1", "localhost") + client, err := api.NewClient(config) + if err != nil { + t.Fatalf("error initializing Vault client: %v", err) + } + + authFromFile, err := NewUserpassAuth("my-role-id", &Password{FromFile: tmpfile.Name()}) + if err != nil { + t.Fatalf("error initializing AppRoleAuth with password file: %v", err) + } + + loginRespFromFile, err := client.Auth().Login(context.TODO(), authFromFile) + if err != nil { + t.Fatalf("error logging in with password from file: %v", err) + } + if loginRespFromFile.Auth == nil || loginRespFromFile.Auth.ClientToken == "" { + t.Fatalf("no authentication info returned by login") + } + + authFromEnv, err := NewUserpassAuth("my-role-id", &Password{FromEnv: passwordEnvVar}) + if err != nil { + t.Fatalf("error initializing AppRoleAuth with password env var: %v", err) + } + + loginRespFromEnv, err := client.Auth().Login(context.TODO(), authFromEnv) + if err != nil { + t.Fatalf("error logging in with password from env var: %v", err) + } + if loginRespFromEnv.Auth == nil || loginRespFromEnv.Auth.ClientToken == "" { + t.Fatalf("no authentication info returned by login with password from env var") + } + + authFromStr, err := NewUserpassAuth("my-role-id", &Password{FromString: allowedPassword}) + if err != nil { + t.Fatalf("error initializing AppRoleAuth with password string: %v", err) + } + + loginRespFromStr, err := client.Auth().Login(context.TODO(), authFromStr) + if err != nil { + t.Fatalf("error logging in with string: %v", err) + } + if loginRespFromStr.Auth == nil || loginRespFromStr.Auth.ClientToken == "" { + t.Fatalf("no authentication info returned by login with password from string") + } +} diff --git a/api/auth_test.go b/api/auth_test.go new file mode 100644 index 0000000..ca69630 --- /dev/null +++ b/api/auth_test.go @@ -0,0 +1,131 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "testing" +) + +type mockAuthMethod struct { + mockedSecret *Secret + mockedError error +} + +func (m *mockAuthMethod) Login(_ context.Context, _ *Client) (*Secret, error) { + return m.mockedSecret, m.mockedError +} + +func TestAuth_Login(t *testing.T) { + a := &Auth{ + c: &Client{}, + } + + m := mockAuthMethod{ + mockedSecret: &Secret{ + Auth: &SecretAuth{ + ClientToken: "a-client-token", + }, + }, + mockedError: nil, + } + + t.Run("Login should set token on success", func(t *testing.T) { + if a.c.Token() != "" { + t.Errorf("client token was %v expected to be unset", a.c.Token()) + } + + _, err := a.Login(context.Background(), &m) + if err != nil { + t.Errorf("Login() error = %v", err) + return + } + + if a.c.Token() != m.mockedSecret.Auth.ClientToken { + t.Errorf("client token was %v expected %v", a.c.Token(), m.mockedSecret.Auth.ClientToken) + return + } + }) +} + +func TestAuth_MFALoginSinglePhase(t *testing.T) { + t.Run("MFALogin() should succeed if credentials are passed in", func(t *testing.T) { + a := &Auth{ + c: &Client{}, + } + + m := mockAuthMethod{ + mockedSecret: &Secret{ + Auth: &SecretAuth{ + ClientToken: "a-client-token", + }, + }, + mockedError: nil, + } + + _, err := a.MFALogin(context.Background(), &m, "testMethod:testPasscode") + if err != nil { + t.Errorf("MFALogin() error %v", err) + return + } + if a.c.Token() != m.mockedSecret.Auth.ClientToken { + t.Errorf("client token was %v expected %v", a.c.Token(), m.mockedSecret.Auth.ClientToken) + return + } + }) +} + +func TestAuth_MFALoginTwoPhase(t *testing.T) { + tests := []struct { + name string + a *Auth + m *mockAuthMethod + creds *string + wantErr bool + }{ + { + name: "return MFARequirements", + a: &Auth{ + c: &Client{}, + }, + m: &mockAuthMethod{ + mockedSecret: &Secret{ + Auth: &SecretAuth{ + MFARequirement: &MFARequirement{ + MFARequestID: "a-req-id", + MFAConstraints: nil, + }, + }, + }, + }, + wantErr: false, + }, + { + name: "error if no MFARequirements", + a: &Auth{ + c: &Client{}, + }, + m: &mockAuthMethod{ + mockedSecret: &Secret{ + Auth: &SecretAuth{}, + }, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + secret, err := tt.a.MFALogin(context.Background(), tt.m) + if (err != nil) != tt.wantErr { + t.Errorf("MFALogin() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if secret.Auth.MFARequirement != tt.m.mockedSecret.Auth.MFARequirement { + t.Errorf("MFALogin() returned %v, expected %v", secret.Auth.MFARequirement, tt.m.mockedSecret.Auth.MFARequirement) + return + } + }) + } +} diff --git a/api/auth_token.go b/api/auth_token.go new file mode 100644 index 0000000..1980be0 --- /dev/null +++ b/api/auth_token.go @@ -0,0 +1,377 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "net/http" +) + +// TokenAuth is used to perform token backend operations on Vault +type TokenAuth struct { + c *Client +} + +// Token is used to return the client for token-backend API calls +func (a *Auth) Token() *TokenAuth { + return &TokenAuth{c: a.c} +} + +func (c *TokenAuth) Create(opts *TokenCreateRequest) (*Secret, error) { + return c.CreateWithContext(context.Background(), opts) +} + +func (c *TokenAuth) CreateWithContext(ctx context.Context, opts *TokenCreateRequest) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, "/v1/auth/token/create") + if err := r.SetJSONBody(opts); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) CreateOrphan(opts *TokenCreateRequest) (*Secret, error) { + return c.CreateOrphanWithContext(context.Background(), opts) +} + +func (c *TokenAuth) CreateOrphanWithContext(ctx context.Context, opts *TokenCreateRequest) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, "/v1/auth/token/create-orphan") + if err := r.SetJSONBody(opts); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) CreateWithRole(opts *TokenCreateRequest, roleName string) (*Secret, error) { + return c.CreateWithRoleWithContext(context.Background(), opts, roleName) +} + +func (c *TokenAuth) CreateWithRoleWithContext(ctx context.Context, opts *TokenCreateRequest, roleName string) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, "/v1/auth/token/create/"+roleName) + if err := r.SetJSONBody(opts); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) Lookup(token string) (*Secret, error) { + return c.LookupWithContext(context.Background(), token) +} + +func (c *TokenAuth) LookupWithContext(ctx context.Context, token string) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, "/v1/auth/token/lookup") + if err := r.SetJSONBody(map[string]interface{}{ + "token": token, + }); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) LookupAccessor(accessor string) (*Secret, error) { + return c.LookupAccessorWithContext(context.Background(), accessor) +} + +func (c *TokenAuth) LookupAccessorWithContext(ctx context.Context, accessor string) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, "/v1/auth/token/lookup-accessor") + if err := r.SetJSONBody(map[string]interface{}{ + "accessor": accessor, + }); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) LookupSelf() (*Secret, error) { + return c.LookupSelfWithContext(context.Background()) +} + +func (c *TokenAuth) LookupSelfWithContext(ctx context.Context) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/auth/token/lookup-self") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) RenewAccessor(accessor string, increment int) (*Secret, error) { + return c.RenewAccessorWithContext(context.Background(), accessor, increment) +} + +func (c *TokenAuth) RenewAccessorWithContext(ctx context.Context, accessor string, increment int) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, "/v1/auth/token/renew-accessor") + if err := r.SetJSONBody(map[string]interface{}{ + "accessor": accessor, + "increment": increment, + }); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) Renew(token string, increment int) (*Secret, error) { + return c.RenewWithContext(context.Background(), token, increment) +} + +func (c *TokenAuth) RenewWithContext(ctx context.Context, token string, increment int) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/auth/token/renew") + if err := r.SetJSONBody(map[string]interface{}{ + "token": token, + "increment": increment, + }); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) RenewSelf(increment int) (*Secret, error) { + return c.RenewSelfWithContext(context.Background(), increment) +} + +func (c *TokenAuth) RenewSelfWithContext(ctx context.Context, increment int) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/auth/token/renew-self") + + body := map[string]interface{}{"increment": increment} + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +// RenewTokenAsSelf wraps RenewTokenAsSelfWithContext using context.Background. +func (c *TokenAuth) RenewTokenAsSelf(token string, increment int) (*Secret, error) { + return c.RenewTokenAsSelfWithContext(context.Background(), token, increment) +} + +// RenewTokenAsSelfWithContext behaves like renew-self, but authenticates using a provided +// token instead of the token attached to the client. +func (c *TokenAuth) RenewTokenAsSelfWithContext(ctx context.Context, token string, increment int) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/auth/token/renew-self") + r.ClientToken = token + + body := map[string]interface{}{"increment": increment} + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +// RevokeAccessor wraps RevokeAccessorWithContext using context.Background. +func (c *TokenAuth) RevokeAccessor(accessor string) error { + return c.RevokeAccessorWithContext(context.Background(), accessor) +} + +// RevokeAccessorWithContext revokes a token associated with the given accessor +// along with all the child tokens. +func (c *TokenAuth) RevokeAccessorWithContext(ctx context.Context, accessor string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, "/v1/auth/token/revoke-accessor") + if err := r.SetJSONBody(map[string]interface{}{ + "accessor": accessor, + }); err != nil { + return err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +// RevokeOrphan wraps RevokeOrphanWithContext using context.Background. +func (c *TokenAuth) RevokeOrphan(token string) error { + return c.RevokeOrphanWithContext(context.Background(), token) +} + +// RevokeOrphanWithContext revokes a token without revoking the tree underneath it (so +// child tokens are orphaned rather than revoked) +func (c *TokenAuth) RevokeOrphanWithContext(ctx context.Context, token string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/auth/token/revoke-orphan") + if err := r.SetJSONBody(map[string]interface{}{ + "token": token, + }); err != nil { + return err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +// RevokeSelf wraps RevokeSelfWithContext using context.Background. +func (c *TokenAuth) RevokeSelf(token string) error { + return c.RevokeSelfWithContext(context.Background(), token) +} + +// RevokeSelfWithContext revokes the token making the call. The `token` parameter is kept +// for backwards compatibility but is ignored; only the client's set token has +// an effect. +func (c *TokenAuth) RevokeSelfWithContext(ctx context.Context, token string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/auth/token/revoke-self") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +// RevokeTree wraps RevokeTreeWithContext using context.Background. +func (c *TokenAuth) RevokeTree(token string) error { + return c.RevokeTreeWithContext(context.Background(), token) +} + +// RevokeTreeWithContext is the "normal" revoke operation that revokes the given token and +// the entire tree underneath -- all of its child tokens, their child tokens, +// etc. +func (c *TokenAuth) RevokeTreeWithContext(ctx context.Context, token string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/auth/token/revoke") + if err := r.SetJSONBody(map[string]interface{}{ + "token": token, + }); err != nil { + return err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +// TokenCreateRequest is the options structure for creating a token. +type TokenCreateRequest struct { + ID string `json:"id,omitempty"` + Policies []string `json:"policies,omitempty"` + Metadata map[string]string `json:"meta,omitempty"` + Lease string `json:"lease,omitempty"` + TTL string `json:"ttl,omitempty"` + ExplicitMaxTTL string `json:"explicit_max_ttl,omitempty"` + Period string `json:"period,omitempty"` + NoParent bool `json:"no_parent,omitempty"` + NoDefaultPolicy bool `json:"no_default_policy,omitempty"` + DisplayName string `json:"display_name"` + NumUses int `json:"num_uses"` + Renewable *bool `json:"renewable,omitempty"` + Type string `json:"type"` + EntityAlias string `json:"entity_alias"` +} diff --git a/api/client.go b/api/client.go new file mode 100644 index 0000000..1e72019 --- /dev/null +++ b/api/client.go @@ -0,0 +1,1829 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "crypto/hmac" + "crypto/sha256" + "crypto/tls" + "encoding/base64" + "encoding/hex" + "fmt" + "net" + "net/http" + "net/url" + "os" + "path" + "strconv" + "strings" + "sync" + "time" + "unicode" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-retryablehttp" + "github.com/hashicorp/go-rootcerts" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/go-secure-stdlib/strutil" + "golang.org/x/net/http2" + "golang.org/x/time/rate" +) + +const ( + EnvVaultAddress = "VAULT_ADDR" + EnvVaultAgentAddr = "VAULT_AGENT_ADDR" + EnvVaultCACert = "VAULT_CACERT" + EnvVaultCACertBytes = "VAULT_CACERT_BYTES" + EnvVaultCAPath = "VAULT_CAPATH" + EnvVaultClientCert = "VAULT_CLIENT_CERT" + EnvVaultClientKey = "VAULT_CLIENT_KEY" + EnvVaultClientTimeout = "VAULT_CLIENT_TIMEOUT" + EnvVaultSRVLookup = "VAULT_SRV_LOOKUP" + EnvVaultSkipVerify = "VAULT_SKIP_VERIFY" + EnvVaultNamespace = "VAULT_NAMESPACE" + EnvVaultTLSServerName = "VAULT_TLS_SERVER_NAME" + EnvVaultWrapTTL = "VAULT_WRAP_TTL" + EnvVaultMaxRetries = "VAULT_MAX_RETRIES" + EnvVaultToken = "VAULT_TOKEN" + EnvVaultMFA = "VAULT_MFA" + EnvRateLimit = "VAULT_RATE_LIMIT" + EnvHTTPProxy = "VAULT_HTTP_PROXY" + EnvVaultProxyAddr = "VAULT_PROXY_ADDR" + EnvVaultDisableRedirects = "VAULT_DISABLE_REDIRECTS" + HeaderIndex = "X-Vault-Index" + HeaderForward = "X-Vault-Forward" + HeaderInconsistent = "X-Vault-Inconsistent" + + // NamespaceHeaderName is the header set to specify which namespace the + // request is indented for. + NamespaceHeaderName = "X-Vault-Namespace" + + // AuthHeaderName is the name of the header containing the token. + AuthHeaderName = "X-Vault-Token" + + // RequestHeaderName is the name of the header used by the Agent for + // SSRF protection. + RequestHeaderName = "X-Vault-Request" + + TLSErrorString = "This error usually means that the server is running with TLS disabled\n" + + "but the client is configured to use TLS. Please either enable TLS\n" + + "on the server or run the client with -address set to an address\n" + + "that uses the http protocol:\n\n" + + " vault -address http://
\n\n" + + "You can also set the VAULT_ADDR environment variable:\n\n\n" + + " VAULT_ADDR=http://
vault \n\n" + + "where
is replaced by the actual address to the server." +) + +// Deprecated values +const ( + EnvVaultAgentAddress = "VAULT_AGENT_ADDR" + EnvVaultInsecure = "VAULT_SKIP_VERIFY" +) + +// WrappingLookupFunc is a function that, given an HTTP verb and a path, +// returns an optional string duration to be used for response wrapping (e.g. +// "15s", or simply "15"). The path will not begin with "/v1/" or "v1/" or "/", +// however, end-of-path forward slashes are not trimmed, so must match your +// called path precisely. Response wrapping will only be used when the return +// value is not the empty string. +type WrappingLookupFunc func(operation, path string) string + +// Config is used to configure the creation of the client. +type Config struct { + modifyLock sync.RWMutex + + // Address is the address of the Vault server. This should be a complete + // URL such as "http://vault.example.com". If you need a custom SSL + // cert or want to enable insecure mode, you need to specify a custom + // HttpClient. + Address string + + // AgentAddress is the address of the local Vault agent. This should be a + // complete URL such as "http://vault.example.com". + AgentAddress string + + // HttpClient is the HTTP client to use. Vault sets sane defaults for the + // http.Client and its associated http.Transport created in DefaultConfig. + // If you must modify Vault's defaults, it is suggested that you start with + // that client and modify as needed rather than start with an empty client + // (or http.DefaultClient). + HttpClient *http.Client + + // MinRetryWait controls the minimum time to wait before retrying when a 5xx + // error occurs. Defaults to 1000 milliseconds. + MinRetryWait time.Duration + + // MaxRetryWait controls the maximum time to wait before retrying when a 5xx + // error occurs. Defaults to 1500 milliseconds. + MaxRetryWait time.Duration + + // MaxRetries controls the maximum number of times to retry when a 5xx + // error occurs. Set to 0 to disable retrying. Defaults to 2 (for a total + // of three tries). + MaxRetries int + + // Timeout, given a non-negative value, will apply the request timeout + // to each request function unless an earlier deadline is passed to the + // request function through context.Context. Note that this timeout is + // not applicable to Logical().ReadRaw* (raw response) functions. + // Defaults to 60 seconds. + Timeout time.Duration + + // If there is an error when creating the configuration, this will be the + // error + Error error + + // The Backoff function to use; a default is used if not provided + Backoff retryablehttp.Backoff + + // The CheckRetry function to use; a default is used if not provided + CheckRetry retryablehttp.CheckRetry + + // Logger is the leveled logger to provide to the retryable HTTP client. + Logger retryablehttp.LeveledLogger + + // Limiter is the rate limiter used by the client. + // If this pointer is nil, then there will be no limit set. + // In contrast, if this pointer is set, even to an empty struct, + // then that limiter will be used. Note that an empty Limiter + // is equivalent blocking all events. + Limiter *rate.Limiter + + // OutputCurlString causes the actual request to return an error of type + // *OutputStringError. Type asserting the error message will allow + // fetching a cURL-compatible string for the operation. + // + // Note: It is not thread-safe to set this and make concurrent requests + // with the same client. Cloning a client will not clone this value. + OutputCurlString bool + + // OutputPolicy causes the actual request to return an error of type + // *OutputPolicyError. Type asserting the error message will display + // an example of the required policy HCL needed for the operation. + // + // Note: It is not thread-safe to set this and make concurrent requests + // with the same client. Cloning a client will not clone this value. + OutputPolicy bool + + // curlCACert, curlCAPath, curlClientCert and curlClientKey are used to keep + // track of the name of the TLS certs and keys when OutputCurlString is set. + // Cloning a client will also not clone those values. + curlCACert, curlCAPath string + curlClientCert, curlClientKey string + + // SRVLookup enables the client to lookup the host through DNS SRV lookup + SRVLookup bool + + // CloneHeaders ensures that the source client's headers are copied to + // its clone. + CloneHeaders bool + + // CloneToken from parent. + CloneToken bool + + // ReadYourWrites ensures isolated read-after-write semantics by + // providing discovered cluster replication states in each request. + // The shared state is automatically propagated to all Client clones. + // + // Note: Careful consideration should be made prior to enabling this setting + // since there will be a performance penalty paid upon each request. + // This feature requires Enterprise server-side. + ReadYourWrites bool + + // DisableRedirects when set to true, will prevent the client from + // automatically following a (single) redirect response to its initial + // request. This behavior may be desirable if using Vault CLI on the server + // side. + // + // Note: Disabling redirect following behavior could cause issues with + // commands such as 'vault operator raft snapshot' as this redirects to the + // primary node. + DisableRedirects bool + clientTLSConfig *tls.Config +} + +// TLSConfig contains the parameters needed to configure TLS on the HTTP client +// used to communicate with Vault. +type TLSConfig struct { + // CACert is the path to a PEM-encoded CA cert file to use to verify the + // Vault server SSL certificate. It takes precedence over CACertBytes + // and CAPath. + CACert string + + // CACertBytes is a PEM-encoded certificate or bundle. It takes precedence + // over CAPath. + CACertBytes []byte + + // CAPath is the path to a directory of PEM-encoded CA cert files to verify + // the Vault server SSL certificate. + CAPath string + + // ClientCert is the path to the certificate for Vault communication + ClientCert string + + // ClientKey is the path to the private key for Vault communication + ClientKey string + + // TLSServerName, if set, is used to set the SNI host when connecting via + // TLS. + TLSServerName string + + // Insecure enables or disables SSL verification + Insecure bool +} + +// DefaultConfig returns a default configuration for the client. It is +// safe to modify the return value of this function. +// +// The default Address is https://127.0.0.1:8200, but this can be overridden by +// setting the `VAULT_ADDR` environment variable. +// +// If an error is encountered, the Error field on the returned *Config will be populated with the specific error. +func DefaultConfig() *Config { + config := &Config{ + Address: "https://127.0.0.1:8200", + HttpClient: cleanhttp.DefaultPooledClient(), + Timeout: time.Second * 60, + MinRetryWait: time.Millisecond * 1000, + MaxRetryWait: time.Millisecond * 1500, + MaxRetries: 2, + Backoff: retryablehttp.LinearJitterBackoff, + } + + transport := config.HttpClient.Transport.(*http.Transport) + transport.TLSHandshakeTimeout = 10 * time.Second + transport.TLSClientConfig = &tls.Config{ + MinVersion: tls.VersionTLS12, + } + if err := http2.ConfigureTransport(transport); err != nil { + config.Error = err + return config + } + + if err := config.ReadEnvironment(); err != nil { + config.Error = err + return config + } + + // Ensure redirects are not automatically followed + // Note that this is sane for the API client as it has its own + // redirect handling logic (and thus also for command/meta), + // but in e.g. http_test actual redirect handling is necessary + config.HttpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error { + // Returning this value causes the Go net library to not close the + // response body and to nil out the error. Otherwise retry clients may + // try three times on every redirect because it sees an error from this + // function (to prevent redirects) passing through to it. + return http.ErrUseLastResponse + } + + return config +} + +// configureTLS is a lock free version of ConfigureTLS that can be used in +// ReadEnvironment where the lock is already hold +func (c *Config) configureTLS(t *TLSConfig) error { + if c.HttpClient == nil { + c.HttpClient = DefaultConfig().HttpClient + } + clientTLSConfig := c.HttpClient.Transport.(*http.Transport).TLSClientConfig + + var clientCert tls.Certificate + foundClientCert := false + + switch { + case t.ClientCert != "" && t.ClientKey != "": + var err error + clientCert, err = tls.LoadX509KeyPair(t.ClientCert, t.ClientKey) + if err != nil { + return err + } + foundClientCert = true + c.curlClientCert = t.ClientCert + c.curlClientKey = t.ClientKey + case t.ClientCert != "" || t.ClientKey != "": + return fmt.Errorf("both client cert and client key must be provided") + } + + if t.CACert != "" || len(t.CACertBytes) != 0 || t.CAPath != "" { + c.curlCACert = t.CACert + c.curlCAPath = t.CAPath + rootConfig := &rootcerts.Config{ + CAFile: t.CACert, + CACertificate: t.CACertBytes, + CAPath: t.CAPath, + } + if err := rootcerts.ConfigureTLS(clientTLSConfig, rootConfig); err != nil { + return err + } + } + + if t.Insecure { + clientTLSConfig.InsecureSkipVerify = true + } + + if foundClientCert { + // We use this function to ignore the server's preferential list of + // CAs, otherwise any CA used for the cert auth backend must be in the + // server's CA pool + clientTLSConfig.GetClientCertificate = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) { + return &clientCert, nil + } + } + + if t.TLSServerName != "" { + clientTLSConfig.ServerName = t.TLSServerName + } + c.clientTLSConfig = clientTLSConfig + + return nil +} + +func (c *Config) TLSConfig() *tls.Config { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + return c.clientTLSConfig.Clone() +} + +// ConfigureTLS takes a set of TLS configurations and applies those to the +// HTTP client. +func (c *Config) ConfigureTLS(t *TLSConfig) error { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + + return c.configureTLS(t) +} + +// ReadEnvironment reads configuration information from the environment. If +// there is an error, no configuration value is updated. +func (c *Config) ReadEnvironment() error { + var envAddress string + var envAgentAddress string + var envCACert string + var envCACertBytes []byte + var envCAPath string + var envClientCert string + var envClientKey string + var envClientTimeout time.Duration + var envInsecure bool + var envTLSServerName string + var envMaxRetries *uint64 + var envSRVLookup bool + var limit *rate.Limiter + var envVaultProxy string + var envVaultDisableRedirects bool + + // Parse the environment variables + if v := os.Getenv(EnvVaultAddress); v != "" { + envAddress = v + } + if v := os.Getenv(EnvVaultAgentAddr); v != "" { + envAgentAddress = v + } + if v := os.Getenv(EnvVaultMaxRetries); v != "" { + maxRetries, err := strconv.ParseUint(v, 10, 32) + if err != nil { + return err + } + envMaxRetries = &maxRetries + } + if v := os.Getenv(EnvVaultCACert); v != "" { + envCACert = v + } + if v := os.Getenv(EnvVaultCACertBytes); v != "" { + envCACertBytes = []byte(v) + } + if v := os.Getenv(EnvVaultCAPath); v != "" { + envCAPath = v + } + if v := os.Getenv(EnvVaultClientCert); v != "" { + envClientCert = v + } + if v := os.Getenv(EnvVaultClientKey); v != "" { + envClientKey = v + } + if v := os.Getenv(EnvRateLimit); v != "" { + rateLimit, burstLimit, err := parseRateLimit(v) + if err != nil { + return err + } + limit = rate.NewLimiter(rate.Limit(rateLimit), burstLimit) + } + if t := os.Getenv(EnvVaultClientTimeout); t != "" { + clientTimeout, err := parseutil.ParseDurationSecond(t) + if err != nil { + return fmt.Errorf("could not parse %q", EnvVaultClientTimeout) + } + envClientTimeout = clientTimeout + } + if v := os.Getenv(EnvVaultSkipVerify); v != "" { + var err error + envInsecure, err = strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("could not parse %s", EnvVaultSkipVerify) + } + } + if v := os.Getenv(EnvVaultSRVLookup); v != "" { + var err error + envSRVLookup, err = strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("could not parse %s", EnvVaultSRVLookup) + } + } + + if v := os.Getenv(EnvVaultTLSServerName); v != "" { + envTLSServerName = v + } + + if v := os.Getenv(EnvHTTPProxy); v != "" { + envVaultProxy = v + } + + // VAULT_PROXY_ADDR supersedes VAULT_HTTP_PROXY + if v := os.Getenv(EnvVaultProxyAddr); v != "" { + envVaultProxy = v + } + + if v := os.Getenv(EnvVaultDisableRedirects); v != "" { + var err error + envVaultDisableRedirects, err = strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("could not parse %s", EnvVaultDisableRedirects) + } + + c.DisableRedirects = envVaultDisableRedirects + } + + // Configure the HTTP clients TLS configuration. + t := &TLSConfig{ + CACert: envCACert, + CACertBytes: envCACertBytes, + CAPath: envCAPath, + ClientCert: envClientCert, + ClientKey: envClientKey, + TLSServerName: envTLSServerName, + Insecure: envInsecure, + } + + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + + c.SRVLookup = envSRVLookup + c.Limiter = limit + + if err := c.configureTLS(t); err != nil { + return err + } + + if envAddress != "" { + c.Address = envAddress + } + + if envAgentAddress != "" { + c.AgentAddress = envAgentAddress + } + + if envMaxRetries != nil { + c.MaxRetries = int(*envMaxRetries) + } + + if envClientTimeout != 0 { + c.Timeout = envClientTimeout + } + + if envVaultProxy != "" { + u, err := url.Parse(envVaultProxy) + if err != nil { + return err + } + + transport := c.HttpClient.Transport.(*http.Transport) + transport.Proxy = http.ProxyURL(u) + } + + return nil +} + +// ParseAddress transforms the provided address into a url.URL and handles +// the case of Unix domain sockets by setting the DialContext in the +// configuration's HttpClient.Transport. This function must be called with +// c.modifyLock held for write access. +func (c *Config) ParseAddress(address string) (*url.URL, error) { + u, err := url.Parse(address) + if err != nil { + return nil, err + } + + c.Address = address + + if strings.HasPrefix(address, "unix://") { + // When the address begins with unix://, always change the transport's + // DialContext (to match previous behaviour) + socket := strings.TrimPrefix(address, "unix://") + + if transport, ok := c.HttpClient.Transport.(*http.Transport); ok { + transport.DialContext = func(context.Context, string, string) (net.Conn, error) { + return net.Dial("unix", socket) + } + + // Since the address points to a unix domain socket, the scheme in the + // *URL would be set to `unix`. The *URL in the client is expected to + // be pointing to the protocol used in the application layer and not to + // the transport layer. Hence, setting the fields accordingly. + u.Scheme = "http" + u.Host = "localhost" + u.Path = "" + } else { + return nil, fmt.Errorf("attempting to specify unix:// address with non-transport transport") + } + } else if strings.HasPrefix(c.Address, "unix://") { + // When the address being set does not begin with unix:// but the previous + // address in the Config did, change the transport's DialContext back to + // use the default configuration that cleanhttp uses. + + if transport, ok := c.HttpClient.Transport.(*http.Transport); ok { + transport.DialContext = cleanhttp.DefaultPooledTransport().DialContext + } + } + + return u, nil +} + +func parseRateLimit(val string) (rate float64, burst int, err error) { + _, err = fmt.Sscanf(val, "%f:%d", &rate, &burst) + if err != nil { + rate, err = strconv.ParseFloat(val, 64) + if err != nil { + err = fmt.Errorf("%v was provided but incorrectly formatted", EnvRateLimit) + } + burst = int(rate) + } + + return rate, burst, err +} + +// Client is the client to the Vault API. Create a client with NewClient. +type Client struct { + modifyLock sync.RWMutex + addr *url.URL + config *Config + token string + headers http.Header + wrappingLookupFunc WrappingLookupFunc + mfaCreds []string + policyOverride bool + requestCallbacks []RequestCallback + responseCallbacks []ResponseCallback + replicationStateStore *replicationStateStore +} + +// NewClient returns a new client for the given configuration. +// +// If the configuration is nil, Vault will use configuration from +// DefaultConfig(), which is the recommended starting configuration. +// +// If the environment variable `VAULT_TOKEN` is present, the token will be +// automatically added to the client. Otherwise, you must manually call +// `SetToken()`. +func NewClient(c *Config) (*Client, error) { + def := DefaultConfig() + if def == nil { + return nil, fmt.Errorf("could not create/read default configuration") + } + if def.Error != nil { + return nil, errwrap.Wrapf("error encountered setting up default configuration: {{err}}", def.Error) + } + + if c == nil { + c = def + } + + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + + if c.MinRetryWait == 0 { + c.MinRetryWait = def.MinRetryWait + } + + if c.MaxRetryWait == 0 { + c.MaxRetryWait = def.MaxRetryWait + } + + if c.HttpClient == nil { + c.HttpClient = def.HttpClient + } + if c.HttpClient.Transport == nil { + c.HttpClient.Transport = def.HttpClient.Transport + } + + address := c.Address + if c.AgentAddress != "" { + address = c.AgentAddress + } + + u, err := c.ParseAddress(address) + if err != nil { + return nil, err + } + + client := &Client{ + addr: u, + config: c, + headers: make(http.Header), + } + + if c.ReadYourWrites { + client.replicationStateStore = &replicationStateStore{} + } + + // Add the VaultRequest SSRF protection header + client.headers[RequestHeaderName] = []string{"true"} + + if token := os.Getenv(EnvVaultToken); token != "" { + client.token = token + } + + if namespace := os.Getenv(EnvVaultNamespace); namespace != "" { + client.setNamespace(namespace) + } + + return client, nil +} + +func (c *Client) CloneConfig() *Config { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + + newConfig := DefaultConfig() + newConfig.Address = c.config.Address + newConfig.AgentAddress = c.config.AgentAddress + newConfig.MinRetryWait = c.config.MinRetryWait + newConfig.MaxRetryWait = c.config.MaxRetryWait + newConfig.MaxRetries = c.config.MaxRetries + newConfig.Timeout = c.config.Timeout + newConfig.Backoff = c.config.Backoff + newConfig.CheckRetry = c.config.CheckRetry + newConfig.Logger = c.config.Logger + newConfig.Limiter = c.config.Limiter + newConfig.SRVLookup = c.config.SRVLookup + newConfig.CloneHeaders = c.config.CloneHeaders + newConfig.CloneToken = c.config.CloneToken + newConfig.ReadYourWrites = c.config.ReadYourWrites + newConfig.clientTLSConfig = c.config.clientTLSConfig + + // we specifically want a _copy_ of the client here, not a pointer to the original one + newClient := *c.config.HttpClient + newConfig.HttpClient = &newClient + + return newConfig +} + +// SetAddress sets the address of Vault in the client. The format of address should be +// "://:". Setting this on a client will override the +// value of VAULT_ADDR environment variable. +func (c *Client) SetAddress(addr string) error { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + + parsedAddr, err := c.config.ParseAddress(addr) + if err != nil { + return errwrap.Wrapf("failed to set address: {{err}}", err) + } + + c.addr = parsedAddr + return nil +} + +// Address returns the Vault URL the client is configured to connect to +func (c *Client) Address() string { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + + return c.addr.String() +} + +func (c *Client) SetCheckRedirect(f func(*http.Request, []*http.Request) error) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.HttpClient.CheckRedirect = f +} + +// SetLimiter will set the rate limiter for this client. +// This method is thread-safe. +// rateLimit and burst are specified according to https://godoc.org/golang.org/x/time/rate#NewLimiter +func (c *Client) SetLimiter(rateLimit float64, burst int) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.Limiter = rate.NewLimiter(rate.Limit(rateLimit), burst) +} + +func (c *Client) Limiter() *rate.Limiter { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.Limiter +} + +// SetMinRetryWait sets the minimum time to wait before retrying in the case of certain errors. +func (c *Client) SetMinRetryWait(retryWait time.Duration) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.MinRetryWait = retryWait +} + +func (c *Client) MinRetryWait() time.Duration { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.MinRetryWait +} + +// SetMaxRetryWait sets the maximum time to wait before retrying in the case of certain errors. +func (c *Client) SetMaxRetryWait(retryWait time.Duration) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.MaxRetryWait = retryWait +} + +func (c *Client) MaxRetryWait() time.Duration { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.MaxRetryWait +} + +// SetMaxRetries sets the number of retries that will be used in the case of certain errors +func (c *Client) SetMaxRetries(retries int) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.MaxRetries = retries +} + +func (c *Client) SetMaxIdleConnections(idle int) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.HttpClient.Transport.(*http.Transport).MaxIdleConns = idle +} + +func (c *Client) MaxIdleConnections() int { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + return c.config.HttpClient.Transport.(*http.Transport).MaxIdleConns +} + +func (c *Client) SetDisableKeepAlives(disable bool) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.HttpClient.Transport.(*http.Transport).DisableKeepAlives = disable +} + +func (c *Client) DisableKeepAlives() bool { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.HttpClient.Transport.(*http.Transport).DisableKeepAlives +} + +func (c *Client) MaxRetries() int { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.MaxRetries +} + +func (c *Client) SetSRVLookup(srv bool) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.SRVLookup = srv +} + +func (c *Client) SRVLookup() bool { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.SRVLookup +} + +// SetCheckRetry sets the CheckRetry function to be used for future requests. +func (c *Client) SetCheckRetry(checkRetry retryablehttp.CheckRetry) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.CheckRetry = checkRetry +} + +func (c *Client) CheckRetry() retryablehttp.CheckRetry { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.CheckRetry +} + +// SetClientTimeout sets the client request timeout +func (c *Client) SetClientTimeout(timeout time.Duration) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.Timeout = timeout +} + +func (c *Client) ClientTimeout() time.Duration { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.Timeout +} + +func (c *Client) OutputCurlString() bool { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.OutputCurlString +} + +func (c *Client) SetOutputCurlString(curl bool) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.OutputCurlString = curl +} + +func (c *Client) OutputPolicy() bool { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.OutputPolicy +} + +func (c *Client) SetOutputPolicy(isSet bool) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.OutputPolicy = isSet +} + +// CurrentWrappingLookupFunc sets a lookup function that returns desired wrap TTLs +// for a given operation and path. +func (c *Client) CurrentWrappingLookupFunc() WrappingLookupFunc { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + return c.wrappingLookupFunc +} + +// SetWrappingLookupFunc sets a lookup function that returns desired wrap TTLs +// for a given operation and path. +func (c *Client) SetWrappingLookupFunc(lookupFunc WrappingLookupFunc) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.wrappingLookupFunc = lookupFunc +} + +// SetMFACreds sets the MFA credentials supplied either via the environment +// variable or via the command line. +func (c *Client) SetMFACreds(creds []string) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.mfaCreds = creds +} + +// SetNamespace sets the namespace supplied either via the environment +// variable or via the command line. +func (c *Client) SetNamespace(namespace string) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.setNamespace(namespace) +} + +func (c *Client) setNamespace(namespace string) { + if c.headers == nil { + c.headers = make(http.Header) + } + + c.headers.Set(NamespaceHeaderName, namespace) +} + +// ClearNamespace removes the namespace header if set. +func (c *Client) ClearNamespace() { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + if c.headers != nil { + c.headers.Del(NamespaceHeaderName) + } +} + +// Namespace returns the namespace currently set in this client. It will +// return an empty string if there is no namespace set. +func (c *Client) Namespace() string { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + if c.headers == nil { + return "" + } + return c.headers.Get(NamespaceHeaderName) +} + +// WithNamespace makes a shallow copy of Client, modifies it to use +// the given namespace, and returns it. Passing an empty string will +// temporarily unset the namespace. +func (c *Client) WithNamespace(namespace string) *Client { + c2 := *c + c2.modifyLock = sync.RWMutex{} + c2.headers = c.Headers() + if namespace == "" { + c2.ClearNamespace() + } else { + c2.SetNamespace(namespace) + } + return &c2 +} + +// Token returns the access token being used by this client. It will +// return the empty string if there is no token set. +func (c *Client) Token() string { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + return c.token +} + +// SetToken sets the token directly. This won't perform any auth +// verification, it simply sets the token properly for future requests. +func (c *Client) SetToken(v string) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.token = v +} + +// ClearToken deletes the token if it is set or does nothing otherwise. +func (c *Client) ClearToken() { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.token = "" +} + +// Headers gets the current set of headers used for requests. This returns a +// copy; to modify it call AddHeader or SetHeaders. +func (c *Client) Headers() http.Header { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + + if c.headers == nil { + return nil + } + + ret := make(http.Header) + for k, v := range c.headers { + for _, val := range v { + ret[k] = append(ret[k], val) + } + } + + return ret +} + +// AddHeader allows a single header key/value pair to be added +// in a race-safe fashion. +func (c *Client) AddHeader(key, value string) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.headers.Add(key, value) +} + +// SetHeaders clears all previous headers and uses only the given +// ones going forward. +func (c *Client) SetHeaders(headers http.Header) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.headers = headers +} + +// SetBackoff sets the backoff function to be used for future requests. +func (c *Client) SetBackoff(backoff retryablehttp.Backoff) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.Backoff = backoff +} + +func (c *Client) SetLogger(logger retryablehttp.LeveledLogger) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.Logger = logger +} + +// SetCloneHeaders to allow headers to be copied whenever the client is cloned. +func (c *Client) SetCloneHeaders(cloneHeaders bool) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.CloneHeaders = cloneHeaders +} + +// CloneHeaders gets the configured CloneHeaders value. +func (c *Client) CloneHeaders() bool { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.CloneHeaders +} + +// SetCloneToken from parent +func (c *Client) SetCloneToken(cloneToken bool) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.CloneToken = cloneToken +} + +// CloneToken gets the configured CloneToken value. +func (c *Client) CloneToken() bool { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.CloneToken +} + +// SetReadYourWrites to prevent reading stale cluster replication state. +func (c *Client) SetReadYourWrites(preventStaleReads bool) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + if preventStaleReads { + if c.replicationStateStore == nil { + c.replicationStateStore = &replicationStateStore{} + } + } else { + c.replicationStateStore = nil + } + + c.config.ReadYourWrites = preventStaleReads +} + +// ReadYourWrites gets the configured value of ReadYourWrites +func (c *Client) ReadYourWrites() bool { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.ReadYourWrites +} + +// Clone creates a new client with the same configuration. Note that the same +// underlying http.Client is used; modifying the client from more than one +// goroutine at once may not be safe, so modify the client as needed and then +// clone. The headers are cloned based on the CloneHeaders property of the +// source config +// +// Also, only the client's config is currently copied; this means items not in +// the api.Config struct, such as policy override and wrapping function +// behavior, must currently then be set as desired on the new client. +func (c *Client) Clone() (*Client, error) { + return c.clone(c.config.CloneHeaders) +} + +// CloneWithHeaders creates a new client similar to Clone, with the difference +// being that the headers are always cloned +func (c *Client) CloneWithHeaders() (*Client, error) { + return c.clone(true) +} + +// clone creates a new client, with the headers being cloned based on the +// passed in cloneheaders boolean +func (c *Client) clone(cloneHeaders bool) (*Client, error) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + + config := c.config + config.modifyLock.RLock() + defer config.modifyLock.RUnlock() + + newConfig := &Config{ + Address: config.Address, + HttpClient: config.HttpClient, + MinRetryWait: config.MinRetryWait, + MaxRetryWait: config.MaxRetryWait, + MaxRetries: config.MaxRetries, + Timeout: config.Timeout, + Backoff: config.Backoff, + CheckRetry: config.CheckRetry, + Logger: config.Logger, + Limiter: config.Limiter, + AgentAddress: config.AgentAddress, + SRVLookup: config.SRVLookup, + CloneHeaders: config.CloneHeaders, + CloneToken: config.CloneToken, + ReadYourWrites: config.ReadYourWrites, + } + client, err := NewClient(newConfig) + if err != nil { + return nil, err + } + + if cloneHeaders { + client.SetHeaders(c.Headers().Clone()) + } + + if config.CloneToken { + client.SetToken(c.token) + } + + client.replicationStateStore = c.replicationStateStore + + return client, nil +} + +// SetPolicyOverride sets whether requests should be sent with the policy +// override flag to request overriding soft-mandatory Sentinel policies (both +// RGPs and EGPs) +func (c *Client) SetPolicyOverride(override bool) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.policyOverride = override +} + +// NewRequest creates a new raw request object to query the Vault server +// configured for this client. This is an advanced method and generally +// doesn't need to be called externally. +func (c *Client) NewRequest(method, requestPath string) *Request { + c.modifyLock.RLock() + addr := c.addr + token := c.token + mfaCreds := c.mfaCreds + wrappingLookupFunc := c.wrappingLookupFunc + policyOverride := c.policyOverride + c.modifyLock.RUnlock() + + host := addr.Host + // if SRV records exist (see https://tools.ietf.org/html/draft-andrews-http-srv-02), lookup the SRV + // record and take the highest match; this is not designed for high-availability, just discovery + // Internet Draft specifies that the SRV record is ignored if a port is given + if addr.Port() == "" && c.config.SRVLookup { + _, addrs, err := net.LookupSRV("http", "tcp", addr.Hostname()) + if err == nil && len(addrs) > 0 { + host = fmt.Sprintf("%s:%d", addrs[0].Target, addrs[0].Port) + } + } + + req := &Request{ + Method: method, + URL: &url.URL{ + User: addr.User, + Scheme: addr.Scheme, + Host: host, + Path: path.Join(addr.Path, requestPath), + }, + Host: addr.Host, + ClientToken: token, + Params: make(map[string][]string), + } + + var lookupPath string + switch { + case strings.HasPrefix(requestPath, "/v1/"): + lookupPath = strings.TrimPrefix(requestPath, "/v1/") + case strings.HasPrefix(requestPath, "v1/"): + lookupPath = strings.TrimPrefix(requestPath, "v1/") + default: + lookupPath = requestPath + } + + req.MFAHeaderVals = mfaCreds + + if wrappingLookupFunc != nil { + req.WrapTTL = wrappingLookupFunc(method, lookupPath) + } else { + req.WrapTTL = DefaultWrappingLookupFunc(method, lookupPath) + } + + req.Headers = c.Headers() + req.PolicyOverride = policyOverride + + return req +} + +// RawRequest performs the raw request given. This request may be against +// a Vault server not configured with this client. This is an advanced operation +// that generally won't need to be called externally. +// +// Deprecated: RawRequest exists for historical compatibility and should not be +// used directly. Use client.Logical().ReadRaw(...) or higher level methods +// instead. +func (c *Client) RawRequest(r *Request) (*Response, error) { + return c.RawRequestWithContext(context.Background(), r) +} + +// RawRequestWithContext performs the raw request given. This request may be against +// a Vault server not configured with this client. This is an advanced operation +// that generally won't need to be called externally. +// +// Deprecated: RawRequestWithContext exists for historical compatibility and +// should not be used directly. Use client.Logical().ReadRawWithContext(...) +// or higher level methods instead. +func (c *Client) RawRequestWithContext(ctx context.Context, r *Request) (*Response, error) { + // Note: we purposefully do not call cancel manually. The reason is + // when canceled, the request.Body will EOF when reading due to the way + // it streams data in. Cancel will still be run when the timeout is + // hit, so this doesn't really harm anything. + ctx, _ = c.withConfiguredTimeout(ctx) + return c.rawRequestWithContext(ctx, r) +} + +func (c *Client) rawRequestWithContext(ctx context.Context, r *Request) (*Response, error) { + c.modifyLock.RLock() + token := c.token + + c.config.modifyLock.RLock() + limiter := c.config.Limiter + minRetryWait := c.config.MinRetryWait + maxRetryWait := c.config.MaxRetryWait + maxRetries := c.config.MaxRetries + checkRetry := c.config.CheckRetry + backoff := c.config.Backoff + httpClient := c.config.HttpClient + ns := c.headers.Get(NamespaceHeaderName) + outputCurlString := c.config.OutputCurlString + outputPolicy := c.config.OutputPolicy + logger := c.config.Logger + disableRedirects := c.config.DisableRedirects + c.config.modifyLock.RUnlock() + + c.modifyLock.RUnlock() + + // ensure that the most current namespace setting is used at the time of the call + // e.g. calls using (*Client).WithNamespace + switch ns { + case "": + r.Headers.Del(NamespaceHeaderName) + default: + r.Headers.Set(NamespaceHeaderName, ns) + } + + for _, cb := range c.requestCallbacks { + cb(r) + } + + if c.config.ReadYourWrites { + c.replicationStateStore.requireState(r) + } + + if limiter != nil { + limiter.Wait(ctx) + } + + // check the token before potentially erroring from the API + if err := validateToken(token); err != nil { + return nil, err + } + + redirectCount := 0 +START: + req, err := r.toRetryableHTTP() + if err != nil { + return nil, err + } + if req == nil { + return nil, fmt.Errorf("nil request created") + } + + if outputCurlString { + LastOutputStringError = &OutputStringError{ + Request: req, + TLSSkipVerify: c.config.HttpClient.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify, + ClientCert: c.config.curlClientCert, + ClientKey: c.config.curlClientKey, + ClientCACert: c.config.curlCACert, + ClientCAPath: c.config.curlCAPath, + } + return nil, LastOutputStringError + } + + if outputPolicy { + LastOutputPolicyError = &OutputPolicyError{ + method: req.Method, + path: strings.TrimPrefix(req.URL.Path, "/v1"), + params: req.URL.Query(), + } + return nil, LastOutputPolicyError + } + + req.Request = req.Request.WithContext(ctx) + + if backoff == nil { + backoff = retryablehttp.LinearJitterBackoff + } + + if checkRetry == nil { + checkRetry = DefaultRetryPolicy + } + + client := &retryablehttp.Client{ + HTTPClient: httpClient, + RetryWaitMin: minRetryWait, + RetryWaitMax: maxRetryWait, + RetryMax: maxRetries, + Backoff: backoff, + CheckRetry: checkRetry, + Logger: logger, + ErrorHandler: retryablehttp.PassthroughErrorHandler, + } + + var result *Response + resp, err := client.Do(req) + if resp != nil { + result = &Response{Response: resp} + } + if err != nil { + if strings.Contains(err.Error(), "tls: oversized") { + err = errwrap.Wrapf("{{err}}\n\n"+TLSErrorString, err) + } + return result, err + } + + // Check for a redirect, only allowing for a single redirect (if redirects aren't disabled) + if (resp.StatusCode == 301 || resp.StatusCode == 302 || resp.StatusCode == 307) && redirectCount == 0 && !disableRedirects { + // Parse the updated location + respLoc, err := resp.Location() + if err != nil { + return result, err + } + + // Ensure a protocol downgrade doesn't happen + if req.URL.Scheme == "https" && respLoc.Scheme != "https" { + return result, fmt.Errorf("redirect would cause protocol downgrade") + } + + // Update the request + r.URL = respLoc + + // Reset the request body if any + if err := r.ResetJSONBody(); err != nil { + return result, err + } + + // Retry the request + redirectCount++ + goto START + } + + if result != nil { + for _, cb := range c.responseCallbacks { + cb(result) + } + + if c.config.ReadYourWrites { + c.replicationStateStore.recordState(result) + } + } + if err := result.Error(); err != nil { + return result, err + } + + return result, nil +} + +// httpRequestWithContext avoids the use of the go-retryable library found in RawRequestWithContext and is +// useful when making calls where a net/http client is desirable. A single redirect (status code 301, 302, +// or 307) will be followed but all retry and timeout logic is the responsibility of the caller as is +// closing the Response body. +func (c *Client) httpRequestWithContext(ctx context.Context, r *Request) (*Response, error) { + req, err := http.NewRequestWithContext(ctx, r.Method, r.URL.RequestURI(), r.Body) + if err != nil { + return nil, err + } + + c.modifyLock.RLock() + token := c.token + + c.config.modifyLock.RLock() + limiter := c.config.Limiter + httpClient := c.config.HttpClient + outputCurlString := c.config.OutputCurlString + outputPolicy := c.config.OutputPolicy + disableRedirects := c.config.DisableRedirects + + // add headers + if c.headers != nil { + for header, vals := range c.headers { + for _, val := range vals { + req.Header.Add(header, val) + } + } + // explicitly set the namespace header to current client + if ns := c.headers.Get(NamespaceHeaderName); ns != "" { + r.Headers.Set(NamespaceHeaderName, ns) + } + } + + c.config.modifyLock.RUnlock() + c.modifyLock.RUnlock() + + // OutputCurlString and OutputPolicy logic rely on the request type to be retryable.Request + if outputCurlString { + return nil, fmt.Errorf("output-curl-string is not implemented for this request") + } + if outputPolicy { + return nil, fmt.Errorf("output-policy is not implemented for this request") + } + + req.URL.User = r.URL.User + req.URL.Scheme = r.URL.Scheme + req.URL.Host = r.URL.Host + req.Host = r.URL.Host + + if len(r.ClientToken) != 0 { + req.Header.Set(AuthHeaderName, r.ClientToken) + } + + if len(r.WrapTTL) != 0 { + req.Header.Set("X-Vault-Wrap-TTL", r.WrapTTL) + } + + if len(r.MFAHeaderVals) != 0 { + for _, mfaHeaderVal := range r.MFAHeaderVals { + req.Header.Add("X-Vault-MFA", mfaHeaderVal) + } + } + + if r.PolicyOverride { + req.Header.Set("X-Vault-Policy-Override", "true") + } + + if limiter != nil { + limiter.Wait(ctx) + } + + // check the token before potentially erroring from the API + if err := validateToken(token); err != nil { + return nil, err + } + + var result *Response + + resp, err := httpClient.Do(req) + + if resp != nil { + result = &Response{Response: resp} + } + + if err != nil { + if strings.Contains(err.Error(), "tls: oversized") { + err = errwrap.Wrapf("{{err}}\n\n"+TLSErrorString, err) + } + return result, err + } + + // Check for a redirect, only allowing for a single redirect, if redirects aren't disabled + if (resp.StatusCode == 301 || resp.StatusCode == 302 || resp.StatusCode == 307) && !disableRedirects { + // Parse the updated location + respLoc, err := resp.Location() + if err != nil { + return result, fmt.Errorf("redirect failed: %s", err) + } + + // Ensure a protocol downgrade doesn't happen + if req.URL.Scheme == "https" && respLoc.Scheme != "https" { + return result, fmt.Errorf("redirect would cause protocol downgrade") + } + + // Update the request + req.URL = respLoc + + // Reset the request body if any + if err := r.ResetJSONBody(); err != nil { + return result, fmt.Errorf("redirect failed: %s", err) + } + + // Retry the request + resp, err = httpClient.Do(req) + if err != nil { + return result, fmt.Errorf("redirect failed: %s", err) + } + } + + if err := result.Error(); err != nil { + return nil, err + } + + return result, nil +} + +type ( + RequestCallback func(*Request) + ResponseCallback func(*Response) +) + +// WithRequestCallbacks makes a shallow clone of Client, modifies it to use +// the given callbacks, and returns it. Each of the callbacks will be invoked +// on every outgoing request. A client may be used to issue requests +// concurrently; any locking needed by callbacks invoked concurrently is the +// callback's responsibility. +func (c *Client) WithRequestCallbacks(callbacks ...RequestCallback) *Client { + c2 := *c + c2.modifyLock = sync.RWMutex{} + c2.requestCallbacks = callbacks + return &c2 +} + +// WithResponseCallbacks makes a shallow clone of Client, modifies it to use +// the given callbacks, and returns it. Each of the callbacks will be invoked +// on every received response. A client may be used to issue requests +// concurrently; any locking needed by callbacks invoked concurrently is the +// callback's responsibility. +func (c *Client) WithResponseCallbacks(callbacks ...ResponseCallback) *Client { + c2 := *c + c2.modifyLock = sync.RWMutex{} + c2.responseCallbacks = callbacks + return &c2 +} + +// withConfiguredTimeout wraps the context with a timeout from the client configuration. +func (c *Client) withConfiguredTimeout(ctx context.Context) (context.Context, context.CancelFunc) { + timeout := c.ClientTimeout() + + if timeout > 0 { + return context.WithTimeout(ctx, timeout) + } + + return ctx, func() {} +} + +// RecordState returns a response callback that will record the state returned +// by Vault in a response header. +func RecordState(state *string) ResponseCallback { + return func(resp *Response) { + *state = resp.Header.Get(HeaderIndex) + } +} + +// RequireState returns a request callback that will add a request header to +// specify the state we require of Vault. This state was obtained from a +// response header seen previous, probably captured with RecordState. +func RequireState(states ...string) RequestCallback { + return func(req *Request) { + for _, s := range states { + req.Headers.Add(HeaderIndex, s) + } + } +} + +// compareReplicationStates returns 1 if s1 is newer or identical, -1 if s1 is older, and 0 +// if neither s1 or s2 is strictly greater. An error is returned if s1 or s2 +// are invalid or from different clusters. +func compareReplicationStates(s1, s2 string) (int, error) { + w1, err := ParseReplicationState(s1, nil) + if err != nil { + return 0, err + } + w2, err := ParseReplicationState(s2, nil) + if err != nil { + return 0, err + } + + if w1.ClusterID != w2.ClusterID { + return 0, fmt.Errorf("can't compare replication states with different ClusterIDs") + } + + switch { + case w1.LocalIndex >= w2.LocalIndex && w1.ReplicatedIndex >= w2.ReplicatedIndex: + return 1, nil + // We've already handled the case where both are equal above, so really we're + // asking here if one or both are lesser. + case w1.LocalIndex <= w2.LocalIndex && w1.ReplicatedIndex <= w2.ReplicatedIndex: + return -1, nil + } + + return 0, nil +} + +// MergeReplicationStates returns a merged array of replication states by iterating +// through all states in `old`. An iterated state is merged to the result before `new` +// based on the result of compareReplicationStates +func MergeReplicationStates(old []string, new string) []string { + if len(old) == 0 || len(old) > 2 { + return []string{new} + } + + var ret []string + for _, o := range old { + c, err := compareReplicationStates(o, new) + if err != nil { + return []string{new} + } + switch c { + case 1: + ret = append(ret, o) + case -1: + ret = append(ret, new) + case 0: + ret = append(ret, o, new) + } + } + return strutil.RemoveDuplicates(ret, false) +} + +type WALState struct { + ClusterID string + LocalIndex uint64 + ReplicatedIndex uint64 +} + +func ParseReplicationState(raw string, hmacKey []byte) (*WALState, error) { + cooked, err := base64.StdEncoding.DecodeString(raw) + if err != nil { + return nil, err + } + s := string(cooked) + + lastIndex := strings.LastIndexByte(s, ':') + if lastIndex == -1 { + return nil, fmt.Errorf("invalid full state header format") + } + state, stateHMACRaw := s[:lastIndex], s[lastIndex+1:] + stateHMAC, err := hex.DecodeString(stateHMACRaw) + if err != nil { + return nil, fmt.Errorf("invalid state header HMAC: %v, %w", stateHMACRaw, err) + } + + if len(hmacKey) != 0 { + hm := hmac.New(sha256.New, hmacKey) + hm.Write([]byte(state)) + if !hmac.Equal(hm.Sum(nil), stateHMAC) { + return nil, fmt.Errorf("invalid state header HMAC (mismatch)") + } + } + + pieces := strings.Split(state, ":") + if len(pieces) != 4 || pieces[0] != "v1" || pieces[1] == "" { + return nil, fmt.Errorf("invalid state header format") + } + localIndex, err := strconv.ParseUint(pieces[2], 10, 64) + if err != nil { + return nil, fmt.Errorf("invalid local index in state header: %w", err) + } + replicatedIndex, err := strconv.ParseUint(pieces[3], 10, 64) + if err != nil { + return nil, fmt.Errorf("invalid replicated index in state header: %w", err) + } + + return &WALState{ + ClusterID: pieces[1], + LocalIndex: localIndex, + ReplicatedIndex: replicatedIndex, + }, nil +} + +// ForwardInconsistent returns a request callback that will add a request +// header which says: if the state required isn't present on the node receiving +// this request, forward it to the active node. This should be used in +// conjunction with RequireState. +func ForwardInconsistent() RequestCallback { + return func(req *Request) { + req.Headers.Set(HeaderInconsistent, "forward-active-node") + } +} + +// ForwardAlways returns a request callback which adds a header telling any +// performance standbys handling the request to forward it to the active node. +// This feature must be enabled in Vault's configuration. +func ForwardAlways() RequestCallback { + return func(req *Request) { + req.Headers.Set(HeaderForward, "active-node") + } +} + +// DefaultRetryPolicy is the default retry policy used by new Client objects. +// It is the same as retryablehttp.DefaultRetryPolicy except that it also retries +// 412 requests, which are returned by Vault when a X-Vault-Index header isn't +// satisfied. +func DefaultRetryPolicy(ctx context.Context, resp *http.Response, err error) (bool, error) { + retry, err := retryablehttp.DefaultRetryPolicy(ctx, resp, err) + if err != nil || retry { + return retry, err + } + if resp != nil && resp.StatusCode == 412 { + return true, nil + } + return false, nil +} + +// replicationStateStore is used to track cluster replication states +// in order to ensure proper read-after-write semantics for a Client. +type replicationStateStore struct { + m sync.RWMutex + store []string +} + +// recordState updates the store's replication states with the merger of all +// states. +func (w *replicationStateStore) recordState(resp *Response) { + w.m.Lock() + defer w.m.Unlock() + newState := resp.Header.Get(HeaderIndex) + if newState != "" { + w.store = MergeReplicationStates(w.store, newState) + } +} + +// requireState updates the Request with the store's current replication states. +func (w *replicationStateStore) requireState(req *Request) { + w.m.RLock() + defer w.m.RUnlock() + for _, s := range w.store { + req.Headers.Add(HeaderIndex, s) + } +} + +// states currently stored. +func (w *replicationStateStore) states() []string { + w.m.RLock() + defer w.m.RUnlock() + c := make([]string, len(w.store)) + copy(c, w.store) + return c +} + +// validateToken will check for non-printable characters to prevent a call that will fail at the api +func validateToken(t string) error { + idx := strings.IndexFunc(t, func(c rune) bool { + return !unicode.IsPrint(c) + }) + if idx != -1 { + return fmt.Errorf("configured Vault token contains non-printable characters and cannot be used") + } + return nil +} diff --git a/api/client_test.go b/api/client_test.go new file mode 100644 index 0000000..a23c0c1 --- /dev/null +++ b/api/client_test.go @@ -0,0 +1,1438 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "bytes" + "context" + "crypto/x509" + "encoding/base64" + "fmt" + "io" + "net/http" + "net/url" + "os" + "reflect" + "sort" + "strings" + "sync" + "testing" + "time" + + "github.com/go-test/deep" + "github.com/hashicorp/go-hclog" +) + +func init() { + // Ensure our special envvars are not present + os.Setenv("VAULT_ADDR", "") + os.Setenv("VAULT_TOKEN", "") +} + +func TestDefaultConfig_envvar(t *testing.T) { + os.Setenv("VAULT_ADDR", "https://vault.mycompany.com") + defer os.Setenv("VAULT_ADDR", "") + + config := DefaultConfig() + if config.Address != "https://vault.mycompany.com" { + t.Fatalf("bad: %s", config.Address) + } + + os.Setenv("VAULT_TOKEN", "testing") + defer os.Setenv("VAULT_TOKEN", "") + + client, err := NewClient(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + if token := client.Token(); token != "testing" { + t.Fatalf("bad: %s", token) + } +} + +func TestClientDefaultHttpClient(t *testing.T) { + _, err := NewClient(&Config{ + HttpClient: http.DefaultClient, + }) + if err != nil { + t.Fatal(err) + } +} + +func TestClientNilConfig(t *testing.T) { + client, err := NewClient(nil) + if err != nil { + t.Fatal(err) + } + if client == nil { + t.Fatal("expected a non-nil client") + } +} + +func TestClientDefaultHttpClient_unixSocket(t *testing.T) { + os.Setenv("VAULT_AGENT_ADDR", "unix:///var/run/vault.sock") + defer os.Setenv("VAULT_AGENT_ADDR", "") + + client, err := NewClient(nil) + if err != nil { + t.Fatal(err) + } + if client == nil { + t.Fatal("expected a non-nil client") + } + if client.addr.Scheme != "http" { + t.Fatalf("bad: %s", client.addr.Scheme) + } + if client.addr.Host != "/var/run/vault.sock" { + t.Fatalf("bad: %s", client.addr.Host) + } +} + +func TestClientSetAddress(t *testing.T) { + client, err := NewClient(nil) + if err != nil { + t.Fatal(err) + } + // Start with TCP address using HTTP + if err := client.SetAddress("http://172.168.2.1:8300"); err != nil { + t.Fatal(err) + } + if client.addr.Host != "172.168.2.1:8300" { + t.Fatalf("bad: expected: '172.168.2.1:8300' actual: %q", client.addr.Host) + } + // Test switching to Unix Socket address from TCP address + if err := client.SetAddress("unix:///var/run/vault.sock"); err != nil { + t.Fatal(err) + } + if client.addr.Scheme != "http" { + t.Fatalf("bad: expected: 'http' actual: %q", client.addr.Scheme) + } + if client.addr.Host != "/var/run/vault.sock" { + t.Fatalf("bad: expected: '/var/run/vault.sock' actual: %q", client.addr.Host) + } + if client.addr.Path != "" { + t.Fatalf("bad: expected '' actual: %q", client.addr.Path) + } + if client.config.HttpClient.Transport.(*http.Transport).DialContext == nil { + t.Fatal("bad: expected DialContext to not be nil") + } + // Test switching to TCP address from Unix Socket address + if err := client.SetAddress("http://172.168.2.1:8300"); err != nil { + t.Fatal(err) + } + if client.addr.Host != "172.168.2.1:8300" { + t.Fatalf("bad: expected: '172.168.2.1:8300' actual: %q", client.addr.Host) + } + if client.addr.Scheme != "http" { + t.Fatalf("bad: expected: 'http' actual: %q", client.addr.Scheme) + } +} + +func TestClientToken(t *testing.T) { + tokenValue := "foo" + handler := func(w http.ResponseWriter, req *http.Request) {} + + config, ln := testHTTPServer(t, http.HandlerFunc(handler)) + defer ln.Close() + + client, err := NewClient(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + client.SetToken(tokenValue) + + // Verify the token is set + if v := client.Token(); v != tokenValue { + t.Fatalf("bad: %s", v) + } + + client.ClearToken() + + if v := client.Token(); v != "" { + t.Fatalf("bad: %s", v) + } +} + +func TestClientHostHeader(t *testing.T) { + handler := func(w http.ResponseWriter, req *http.Request) { + w.Write([]byte(req.Host)) + } + config, ln := testHTTPServer(t, http.HandlerFunc(handler)) + defer ln.Close() + + config.Address = strings.ReplaceAll(config.Address, "127.0.0.1", "localhost") + client, err := NewClient(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Set the token manually + client.SetToken("foo") + + resp, err := client.RawRequest(client.NewRequest(http.MethodPut, "/")) + if err != nil { + t.Fatal(err) + } + + // Copy the response + var buf bytes.Buffer + io.Copy(&buf, resp.Body) + + // Verify we got the response from the primary + if buf.String() != strings.ReplaceAll(config.Address, "http://", "") { + t.Fatalf("Bad address: %s", buf.String()) + } +} + +func TestClientBadToken(t *testing.T) { + handler := func(w http.ResponseWriter, req *http.Request) {} + + config, ln := testHTTPServer(t, http.HandlerFunc(handler)) + defer ln.Close() + + client, err := NewClient(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + client.SetToken("foo") + _, err = client.RawRequest(client.NewRequest(http.MethodPut, "/")) + if err != nil { + t.Fatal(err) + } + + client.SetToken("foo\u007f") + _, err = client.RawRequest(client.NewRequest(http.MethodPut, "/")) + if err == nil || !strings.Contains(err.Error(), "printable") { + t.Fatalf("expected error due to bad token") + } +} + +func TestClientDisableRedirects(t *testing.T) { + tests := map[string]struct { + statusCode int + expectedNumReqs int + disableRedirects bool + }{ + "Disabled redirects: Moved permanently": {statusCode: 301, expectedNumReqs: 1, disableRedirects: true}, + "Disabled redirects: Found": {statusCode: 302, expectedNumReqs: 1, disableRedirects: true}, + "Disabled redirects: Temporary Redirect": {statusCode: 307, expectedNumReqs: 1, disableRedirects: true}, + "Enable redirects: Moved permanently": {statusCode: 301, expectedNumReqs: 2, disableRedirects: false}, + } + + for name, tc := range tests { + test := tc + name := name + t.Run(name, func(t *testing.T) { + t.Parallel() + numReqs := 0 + var config *Config + + respFunc := func(w http.ResponseWriter, req *http.Request) { + // Track how many requests the server has handled + numReqs++ + // Send back the relevant status code and generate a location + w.Header().Set("Location", fmt.Sprintf(config.Address+"/reqs/%v", numReqs)) + w.WriteHeader(test.statusCode) + } + + config, ln := testHTTPServer(t, http.HandlerFunc(respFunc)) + config.DisableRedirects = test.disableRedirects + defer ln.Close() + + client, err := NewClient(config) + if err != nil { + t.Fatalf("%s: error %v", name, err) + } + + req := client.NewRequest("GET", "/") + resp, err := client.rawRequestWithContext(context.Background(), req) + if err != nil { + t.Fatalf("%s: error %v", name, err) + } + + if numReqs != test.expectedNumReqs { + t.Fatalf("%s: expected %v request(s) but got %v", name, test.expectedNumReqs, numReqs) + } + + if resp.StatusCode != test.statusCode { + t.Fatalf("%s: expected status code %v got %v", name, test.statusCode, resp.StatusCode) + } + + location, err := resp.Location() + if err != nil { + t.Fatalf("%s error %v", name, err) + } + if req.URL.String() == location.String() { + t.Fatalf("%s: expected request URL %v to be different from redirect URL %v", name, req.URL, resp.Request.URL) + } + }) + } +} + +func TestClientRedirect(t *testing.T) { + primary := func(w http.ResponseWriter, req *http.Request) { + w.Write([]byte("test")) + } + config, ln := testHTTPServer(t, http.HandlerFunc(primary)) + defer ln.Close() + + standby := func(w http.ResponseWriter, req *http.Request) { + w.Header().Set("Location", config.Address) + w.WriteHeader(307) + } + config2, ln2 := testHTTPServer(t, http.HandlerFunc(standby)) + defer ln2.Close() + + client, err := NewClient(config2) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Set the token manually + client.SetToken("foo") + + // Do a raw "/" request + resp, err := client.RawRequest(client.NewRequest(http.MethodPut, "/")) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Copy the response + var buf bytes.Buffer + io.Copy(&buf, resp.Body) + + // Verify we got the response from the primary + if buf.String() != "test" { + t.Fatalf("Bad: %s", buf.String()) + } +} + +func TestDefaulRetryPolicy(t *testing.T) { + cases := map[string]struct { + resp *http.Response + err error + expect bool + expectErr error + }{ + "retry on error": { + err: fmt.Errorf("error"), + expect: true, + }, + "don't retry connection failures": { + err: &url.Error{ + Err: x509.UnknownAuthorityError{}, + }, + }, + "don't retry on 200": { + resp: &http.Response{ + StatusCode: http.StatusOK, + }, + }, + "don't retry on 4xx": { + resp: &http.Response{ + StatusCode: http.StatusBadRequest, + }, + }, + "don't retry on 501": { + resp: &http.Response{ + StatusCode: http.StatusNotImplemented, + }, + }, + "retry on 500": { + resp: &http.Response{ + StatusCode: http.StatusInternalServerError, + }, + expect: true, + }, + "retry on 5xx": { + resp: &http.Response{ + StatusCode: http.StatusGatewayTimeout, + }, + expect: true, + }, + } + + for name, test := range cases { + t.Run(name, func(t *testing.T) { + retry, err := DefaultRetryPolicy(context.Background(), test.resp, test.err) + if retry != test.expect { + t.Fatalf("expected to retry request: '%t', but actual result was: '%t'", test.expect, retry) + } + if err != test.expectErr { + t.Fatalf("expected error from retry policy: %q, but actual result was: %q", err, test.expectErr) + } + }) + } +} + +func TestClientEnvSettings(t *testing.T) { + cwd, _ := os.Getwd() + + caCertBytes, err := os.ReadFile(cwd + "/test-fixtures/keys/cert.pem") + if err != nil { + t.Fatalf("error reading %q cert file: %v", cwd+"/test-fixtures/keys/cert.pem", err) + } + + oldCACert := os.Getenv(EnvVaultCACert) + oldCACertBytes := os.Getenv(EnvVaultCACertBytes) + oldCAPath := os.Getenv(EnvVaultCAPath) + oldClientCert := os.Getenv(EnvVaultClientCert) + oldClientKey := os.Getenv(EnvVaultClientKey) + oldSkipVerify := os.Getenv(EnvVaultSkipVerify) + oldMaxRetries := os.Getenv(EnvVaultMaxRetries) + oldDisableRedirects := os.Getenv(EnvVaultDisableRedirects) + + os.Setenv(EnvVaultCACert, cwd+"/test-fixtures/keys/cert.pem") + os.Setenv(EnvVaultCACertBytes, string(caCertBytes)) + os.Setenv(EnvVaultCAPath, cwd+"/test-fixtures/keys") + os.Setenv(EnvVaultClientCert, cwd+"/test-fixtures/keys/cert.pem") + os.Setenv(EnvVaultClientKey, cwd+"/test-fixtures/keys/key.pem") + os.Setenv(EnvVaultSkipVerify, "true") + os.Setenv(EnvVaultMaxRetries, "5") + os.Setenv(EnvVaultDisableRedirects, "true") + + defer func() { + os.Setenv(EnvVaultCACert, oldCACert) + os.Setenv(EnvVaultCACertBytes, oldCACertBytes) + os.Setenv(EnvVaultCAPath, oldCAPath) + os.Setenv(EnvVaultClientCert, oldClientCert) + os.Setenv(EnvVaultClientKey, oldClientKey) + os.Setenv(EnvVaultSkipVerify, oldSkipVerify) + os.Setenv(EnvVaultMaxRetries, oldMaxRetries) + os.Setenv(EnvVaultDisableRedirects, oldDisableRedirects) + }() + + config := DefaultConfig() + if err := config.ReadEnvironment(); err != nil { + t.Fatalf("error reading environment: %v", err) + } + + tlsConfig := config.HttpClient.Transport.(*http.Transport).TLSClientConfig + if len(tlsConfig.RootCAs.Subjects()) == 0 { + t.Fatalf("bad: expected a cert pool with at least one subject") + } + if tlsConfig.GetClientCertificate == nil { + t.Fatalf("bad: expected client tls config to have a certificate getter") + } + if tlsConfig.InsecureSkipVerify != true { + t.Fatalf("bad: %v", tlsConfig.InsecureSkipVerify) + } + if config.DisableRedirects != true { + t.Fatalf("bad: expected disable redirects to be true: %v", config.DisableRedirects) + } +} + +func TestClientDeprecatedEnvSettings(t *testing.T) { + oldInsecure := os.Getenv(EnvVaultInsecure) + os.Setenv(EnvVaultInsecure, "true") + defer os.Setenv(EnvVaultInsecure, oldInsecure) + + config := DefaultConfig() + if err := config.ReadEnvironment(); err != nil { + t.Fatalf("error reading environment: %v", err) + } + + tlsConfig := config.HttpClient.Transport.(*http.Transport).TLSClientConfig + if tlsConfig.InsecureSkipVerify != true { + t.Fatalf("bad: %v", tlsConfig.InsecureSkipVerify) + } +} + +func TestClientEnvNamespace(t *testing.T) { + var seenNamespace string + handler := func(w http.ResponseWriter, req *http.Request) { + seenNamespace = req.Header.Get(NamespaceHeaderName) + } + config, ln := testHTTPServer(t, http.HandlerFunc(handler)) + defer ln.Close() + + oldVaultNamespace := os.Getenv(EnvVaultNamespace) + defer os.Setenv(EnvVaultNamespace, oldVaultNamespace) + os.Setenv(EnvVaultNamespace, "test") + + client, err := NewClient(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + _, err = client.RawRequest(client.NewRequest(http.MethodGet, "/")) + if err != nil { + t.Fatalf("err: %s", err) + } + + if seenNamespace != "test" { + t.Fatalf("Bad: %s", seenNamespace) + } +} + +func TestParsingRateAndBurst(t *testing.T) { + var ( + correctFormat = "400:400" + observedRate, observedBurst, err = parseRateLimit(correctFormat) + expectedRate, expectedBurst = float64(400), 400 + ) + if err != nil { + t.Error(err) + } + if expectedRate != observedRate { + t.Errorf("Expected rate %v but found %v", expectedRate, observedRate) + } + if expectedBurst != observedBurst { + t.Errorf("Expected burst %v but found %v", expectedBurst, observedBurst) + } +} + +func TestParsingRateOnly(t *testing.T) { + var ( + correctFormat = "400" + observedRate, observedBurst, err = parseRateLimit(correctFormat) + expectedRate, expectedBurst = float64(400), 400 + ) + if err != nil { + t.Error(err) + } + if expectedRate != observedRate { + t.Errorf("Expected rate %v but found %v", expectedRate, observedRate) + } + if expectedBurst != observedBurst { + t.Errorf("Expected burst %v but found %v", expectedBurst, observedBurst) + } +} + +func TestParsingErrorCase(t *testing.T) { + incorrectFormat := "foobar" + _, _, err := parseRateLimit(incorrectFormat) + if err == nil { + t.Error("Expected error, found no error") + } +} + +func TestClientTimeoutSetting(t *testing.T) { + oldClientTimeout := os.Getenv(EnvVaultClientTimeout) + os.Setenv(EnvVaultClientTimeout, "10") + defer os.Setenv(EnvVaultClientTimeout, oldClientTimeout) + config := DefaultConfig() + config.ReadEnvironment() + _, err := NewClient(config) + if err != nil { + t.Fatal(err) + } +} + +type roundTripperFunc func(*http.Request) (*http.Response, error) + +func (rt roundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { + return rt(r) +} + +func TestClientNonTransportRoundTripper(t *testing.T) { + client := &http.Client{ + Transport: roundTripperFunc(http.DefaultTransport.RoundTrip), + } + + _, err := NewClient(&Config{ + HttpClient: client, + }) + if err != nil { + t.Fatal(err) + } +} + +func TestClientNonTransportRoundTripperUnixAddress(t *testing.T) { + client := &http.Client{ + Transport: roundTripperFunc(http.DefaultTransport.RoundTrip), + } + + _, err := NewClient(&Config{ + HttpClient: client, + Address: "unix:///var/run/vault.sock", + }) + if err == nil { + t.Fatal("bad: expected error got nil") + } +} + +func TestClone(t *testing.T) { + type fields struct{} + tests := []struct { + name string + config *Config + headers *http.Header + token string + }{ + { + name: "default", + config: DefaultConfig(), + }, + { + name: "cloneHeaders", + config: &Config{ + CloneHeaders: true, + }, + headers: &http.Header{ + "X-foo": []string{"bar"}, + "X-baz": []string{"qux"}, + }, + }, + { + name: "preventStaleReads", + config: &Config{ + ReadYourWrites: true, + }, + }, + { + name: "cloneToken", + config: &Config{ + CloneToken: true, + }, + token: "cloneToken", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parent, err := NewClient(tt.config) + if err != nil { + t.Fatalf("NewClient failed: %v", err) + } + + // Set all of the things that we provide setter methods for, which modify config values + err = parent.SetAddress("http://example.com:8080") + if err != nil { + t.Fatalf("SetAddress failed: %v", err) + } + + clientTimeout := time.Until(time.Now().AddDate(0, 0, 1)) + parent.SetClientTimeout(clientTimeout) + + checkRetry := func(ctx context.Context, resp *http.Response, err error) (bool, error) { + return true, nil + } + parent.SetCheckRetry(checkRetry) + + parent.SetLogger(hclog.NewNullLogger()) + + parent.SetLimiter(5.0, 10) + parent.SetMaxRetries(5) + parent.SetOutputCurlString(true) + parent.SetOutputPolicy(true) + parent.SetSRVLookup(true) + + if tt.headers != nil { + parent.SetHeaders(*tt.headers) + } + + if tt.token != "" { + parent.SetToken(tt.token) + } + + clone, err := parent.Clone() + if err != nil { + t.Fatalf("Clone failed: %v", err) + } + + if parent.Address() != clone.Address() { + t.Fatalf("addresses don't match: %v vs %v", parent.Address(), clone.Address()) + } + if parent.ClientTimeout() != clone.ClientTimeout() { + t.Fatalf("timeouts don't match: %v vs %v", parent.ClientTimeout(), clone.ClientTimeout()) + } + if parent.CheckRetry() != nil && clone.CheckRetry() == nil { + t.Fatal("checkRetry functions don't match. clone is nil.") + } + if (parent.Limiter() != nil && clone.Limiter() == nil) || (parent.Limiter() == nil && clone.Limiter() != nil) { + t.Fatalf("limiters don't match: %v vs %v", parent.Limiter(), clone.Limiter()) + } + if parent.Limiter().Limit() != clone.Limiter().Limit() { + t.Fatalf("limiter limits don't match: %v vs %v", parent.Limiter().Limit(), clone.Limiter().Limit()) + } + if parent.Limiter().Burst() != clone.Limiter().Burst() { + t.Fatalf("limiter bursts don't match: %v vs %v", parent.Limiter().Burst(), clone.Limiter().Burst()) + } + if parent.MaxRetries() != clone.MaxRetries() { + t.Fatalf("maxRetries don't match: %v vs %v", parent.MaxRetries(), clone.MaxRetries()) + } + if parent.OutputCurlString() == clone.OutputCurlString() { + t.Fatalf("outputCurlString was copied over when it shouldn't have been: %v and %v", parent.OutputCurlString(), clone.OutputCurlString()) + } + if parent.SRVLookup() != clone.SRVLookup() { + t.Fatalf("SRVLookup doesn't match: %v vs %v", parent.SRVLookup(), clone.SRVLookup()) + } + if tt.config.CloneHeaders { + if !reflect.DeepEqual(parent.Headers(), clone.Headers()) { + t.Fatalf("Headers() don't match: %v vs %v", parent.Headers(), clone.Headers()) + } + if parent.config.CloneHeaders != clone.config.CloneHeaders { + t.Fatalf("config.CloneHeaders doesn't match: %v vs %v", parent.config.CloneHeaders, clone.config.CloneHeaders) + } + if tt.headers != nil { + if !reflect.DeepEqual(*tt.headers, clone.Headers()) { + t.Fatalf("expected headers %v, actual %v", *tt.headers, clone.Headers()) + } + } + } + if tt.config.ReadYourWrites && parent.replicationStateStore == nil { + t.Fatalf("replicationStateStore is nil") + } + if tt.config.CloneToken { + if tt.token == "" { + t.Fatalf("test requires a non-empty token") + } + if parent.config.CloneToken != clone.config.CloneToken { + t.Fatalf("config.CloneToken doesn't match: %v vs %v", parent.config.CloneToken, clone.config.CloneToken) + } + if parent.token != clone.token { + t.Fatalf("tokens do not match: %v vs %v", parent.token, clone.token) + } + } else { + // assumes `VAULT_TOKEN` is unset or has an empty value. + expected := "" + if clone.token != expected { + t.Fatalf("expected clone's token %q, actual %q", expected, clone.token) + } + } + if !reflect.DeepEqual(parent.replicationStateStore, clone.replicationStateStore) { + t.Fatalf("expected replicationStateStore %v, actual %v", parent.replicationStateStore, + clone.replicationStateStore) + } + }) + } +} + +func TestSetHeadersRaceSafe(t *testing.T) { + client, err1 := NewClient(nil) + if err1 != nil { + t.Fatalf("NewClient failed: %v", err1) + } + + start := make(chan interface{}) + done := make(chan interface{}) + + testPairs := map[string]string{ + "soda": "rootbeer", + "veggie": "carrots", + "fruit": "apples", + "color": "red", + "protein": "egg", + } + + for key, value := range testPairs { + tmpKey := key + tmpValue := value + go func() { + <-start + // This test fails if here, you replace client.AddHeader(tmpKey, tmpValue) with: + // headerCopy := client.Header() + // headerCopy.AddHeader(tmpKey, tmpValue) + // client.SetHeader(headerCopy) + client.AddHeader(tmpKey, tmpValue) + done <- true + }() + } + + // Start everyone at once. + close(start) + + // Wait until everyone is done. + for i := 0; i < len(testPairs); i++ { + <-done + } + + // Check that all the test pairs are in the resulting + // headers. + resultingHeaders := client.Headers() + for key, value := range testPairs { + if resultingHeaders.Get(key) != value { + t.Fatal("expected " + value + " for " + key) + } + } +} + +func TestMergeReplicationStates(t *testing.T) { + type testCase struct { + name string + old []string + new string + expected []string + } + + testCases := []testCase{ + { + name: "empty-old", + old: nil, + new: "v1:cid:1:0:", + expected: []string{"v1:cid:1:0:"}, + }, + { + name: "old-smaller", + old: []string{"v1:cid:1:0:"}, + new: "v1:cid:2:0:", + expected: []string{"v1:cid:2:0:"}, + }, + { + name: "old-bigger", + old: []string{"v1:cid:2:0:"}, + new: "v1:cid:1:0:", + expected: []string{"v1:cid:2:0:"}, + }, + { + name: "mixed-single", + old: []string{"v1:cid:1:0:"}, + new: "v1:cid:0:1:", + expected: []string{"v1:cid:0:1:", "v1:cid:1:0:"}, + }, + { + name: "mixed-single-alt", + old: []string{"v1:cid:0:1:"}, + new: "v1:cid:1:0:", + expected: []string{"v1:cid:0:1:", "v1:cid:1:0:"}, + }, + { + name: "mixed-double", + old: []string{"v1:cid:0:1:", "v1:cid:1:0:"}, + new: "v1:cid:2:0:", + expected: []string{"v1:cid:0:1:", "v1:cid:2:0:"}, + }, + { + name: "newer-both", + old: []string{"v1:cid:0:1:", "v1:cid:1:0:"}, + new: "v1:cid:2:1:", + expected: []string{"v1:cid:2:1:"}, + }, + } + + b64enc := func(ss []string) []string { + var ret []string + for _, s := range ss { + ret = append(ret, base64.StdEncoding.EncodeToString([]byte(s))) + } + return ret + } + b64dec := func(ss []string) []string { + var ret []string + for _, s := range ss { + d, err := base64.StdEncoding.DecodeString(s) + if err != nil { + t.Fatal(err) + } + ret = append(ret, string(d)) + } + return ret + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + out := b64dec(MergeReplicationStates(b64enc(tc.old), base64.StdEncoding.EncodeToString([]byte(tc.new)))) + if diff := deep.Equal(out, tc.expected); len(diff) != 0 { + t.Errorf("got=%v, expected=%v, diff=%v", out, tc.expected, diff) + } + }) + } +} + +func TestReplicationStateStore_recordState(t *testing.T) { + b64enc := func(s string) string { + return base64.StdEncoding.EncodeToString([]byte(s)) + } + + tests := []struct { + name string + expected []string + resp []*Response + }{ + { + name: "single", + resp: []*Response{ + { + Response: &http.Response{ + Header: map[string][]string{ + HeaderIndex: { + b64enc("v1:cid:1:0:"), + }, + }, + }, + }, + }, + expected: []string{ + b64enc("v1:cid:1:0:"), + }, + }, + { + name: "empty", + resp: []*Response{ + { + Response: &http.Response{ + Header: map[string][]string{}, + }, + }, + }, + expected: nil, + }, + { + name: "multiple", + resp: []*Response{ + { + Response: &http.Response{ + Header: map[string][]string{ + HeaderIndex: { + b64enc("v1:cid:0:1:"), + }, + }, + }, + }, + { + Response: &http.Response{ + Header: map[string][]string{ + HeaderIndex: { + b64enc("v1:cid:1:0:"), + }, + }, + }, + }, + }, + expected: []string{ + b64enc("v1:cid:0:1:"), + b64enc("v1:cid:1:0:"), + }, + }, + { + name: "duplicates", + resp: []*Response{ + { + Response: &http.Response{ + Header: map[string][]string{ + HeaderIndex: { + b64enc("v1:cid:1:0:"), + }, + }, + }, + }, + { + Response: &http.Response{ + Header: map[string][]string{ + HeaderIndex: { + b64enc("v1:cid:1:0:"), + }, + }, + }, + }, + }, + expected: []string{ + b64enc("v1:cid:1:0:"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + w := &replicationStateStore{} + + var wg sync.WaitGroup + for _, r := range tt.resp { + wg.Add(1) + go func(r *Response) { + defer wg.Done() + w.recordState(r) + }(r) + } + wg.Wait() + + if !reflect.DeepEqual(tt.expected, w.store) { + t.Errorf("recordState(): expected states %v, actual %v", tt.expected, w.store) + } + }) + } +} + +func TestReplicationStateStore_requireState(t *testing.T) { + tests := []struct { + name string + states []string + req []*Request + expected []string + }{ + { + name: "empty", + states: []string{}, + req: []*Request{ + { + Headers: make(http.Header), + }, + }, + expected: nil, + }, + { + name: "basic", + states: []string{ + "v1:cid:0:1:", + "v1:cid:1:0:", + }, + req: []*Request{ + { + Headers: make(http.Header), + }, + }, + expected: []string{ + "v1:cid:0:1:", + "v1:cid:1:0:", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + store := &replicationStateStore{ + store: tt.states, + } + + var wg sync.WaitGroup + for _, r := range tt.req { + wg.Add(1) + go func(r *Request) { + defer wg.Done() + store.requireState(r) + }(r) + } + + wg.Wait() + + var actual []string + for _, r := range tt.req { + if values := r.Headers.Values(HeaderIndex); len(values) > 0 { + actual = append(actual, values...) + } + } + sort.Strings(actual) + if !reflect.DeepEqual(tt.expected, actual) { + t.Errorf("requireState(): expected states %v, actual %v", tt.expected, actual) + } + }) + } +} + +func TestClient_ReadYourWrites(t *testing.T) { + b64enc := func(s string) string { + return base64.StdEncoding.EncodeToString([]byte(s)) + } + + handler := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + w.Header().Set(HeaderIndex, strings.TrimLeft(req.URL.Path, "/")) + }) + + tests := []struct { + name string + handler http.Handler + wantStates []string + values [][]string + clone bool + }{ + { + name: "multiple_duplicates", + clone: false, + handler: handler, + wantStates: []string{ + b64enc("v1:cid:0:4:"), + }, + values: [][]string{ + { + b64enc("v1:cid:0:4:"), + b64enc("v1:cid:0:2:"), + }, + { + b64enc("v1:cid:0:4:"), + b64enc("v1:cid:0:2:"), + }, + }, + }, + { + name: "basic_clone", + clone: true, + handler: handler, + wantStates: []string{ + b64enc("v1:cid:0:4:"), + }, + values: [][]string{ + { + b64enc("v1:cid:0:4:"), + }, + { + b64enc("v1:cid:0:3:"), + }, + }, + }, + { + name: "multiple_clone", + clone: true, + handler: handler, + wantStates: []string{ + b64enc("v1:cid:0:4:"), + }, + values: [][]string{ + { + b64enc("v1:cid:0:4:"), + b64enc("v1:cid:0:2:"), + }, + { + b64enc("v1:cid:0:3:"), + b64enc("v1:cid:0:1:"), + }, + }, + }, + { + name: "multiple_duplicates_clone", + clone: true, + handler: handler, + wantStates: []string{ + b64enc("v1:cid:0:4:"), + }, + values: [][]string{ + { + b64enc("v1:cid:0:4:"), + b64enc("v1:cid:0:2:"), + }, + { + b64enc("v1:cid:0:4:"), + b64enc("v1:cid:0:2:"), + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + testRequest := func(client *Client, val string) { + req := client.NewRequest(http.MethodGet, "/"+val) + req.Headers.Set(HeaderIndex, val) + resp, err := client.RawRequestWithContext(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + // validate that the server provided a valid header value in its response + actual := resp.Header.Get(HeaderIndex) + if actual != val { + t.Errorf("expected header value %v, actual %v", val, actual) + } + } + + config, ln := testHTTPServer(t, handler) + defer ln.Close() + + config.ReadYourWrites = true + config.Address = fmt.Sprintf("http://%s", ln.Addr()) + parent, err := NewClient(config) + if err != nil { + t.Fatal(err) + } + + var wg sync.WaitGroup + for i := 0; i < len(tt.values); i++ { + var c *Client + if tt.clone { + c, err = parent.Clone() + if err != nil { + t.Fatal(err) + } + } else { + c = parent + } + + for _, val := range tt.values[i] { + wg.Add(1) + go func(val string) { + defer wg.Done() + testRequest(c, val) + }(val) + } + } + + wg.Wait() + + if !reflect.DeepEqual(tt.wantStates, parent.replicationStateStore.states()) { + t.Errorf("expected states %v, actual %v", tt.wantStates, parent.replicationStateStore.states()) + } + }) + } +} + +func TestClient_SetReadYourWrites(t *testing.T) { + tests := []struct { + name string + config *Config + calls []bool + }{ + { + name: "false", + config: &Config{}, + calls: []bool{false}, + }, + { + name: "true", + config: &Config{}, + calls: []bool{true}, + }, + { + name: "multi-false", + config: &Config{}, + calls: []bool{false, false}, + }, + { + name: "multi-true", + config: &Config{}, + calls: []bool{true, true}, + }, + { + name: "multi-mix", + config: &Config{}, + calls: []bool{false, true, false, true}, + }, + } + + assertSetReadYourRights := func(t *testing.T, c *Client, v bool, s *replicationStateStore) { + t.Helper() + c.SetReadYourWrites(v) + if c.config.ReadYourWrites != v { + t.Fatalf("expected config.ReadYourWrites %#v, actual %#v", v, c.config.ReadYourWrites) + } + if !reflect.DeepEqual(s, c.replicationStateStore) { + t.Fatalf("expected replicationStateStore %#v, actual %#v", s, c.replicationStateStore) + } + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &Client{ + config: tt.config, + } + for i, v := range tt.calls { + var expectStateStore *replicationStateStore + if v { + if c.replicationStateStore == nil { + c.replicationStateStore = &replicationStateStore{ + store: []string{}, + } + } + c.replicationStateStore.store = append(c.replicationStateStore.store, + fmt.Sprintf("%s-%d", tt.name, i)) + expectStateStore = c.replicationStateStore + } + assertSetReadYourRights(t, c, v, expectStateStore) + } + }) + } +} + +func TestClient_SetCloneToken(t *testing.T) { + tests := []struct { + name string + calls []bool + }{ + { + name: "false", + calls: []bool{false}, + }, + { + name: "true", + calls: []bool{true}, + }, + { + name: "multi", + calls: []bool{true, false, true}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &Client{ + config: &Config{}, + } + + var expected bool + for _, v := range tt.calls { + actual := c.CloneToken() + if expected != actual { + t.Fatalf("expected %v, actual %v", expected, actual) + } + + expected = v + c.SetCloneToken(expected) + actual = c.CloneToken() + if actual != expected { + t.Fatalf("SetCloneToken(): expected %v, actual %v", expected, actual) + } + } + }) + } +} + +func TestClientWithNamespace(t *testing.T) { + var ns string + handler := func(w http.ResponseWriter, req *http.Request) { + ns = req.Header.Get(NamespaceHeaderName) + } + config, ln := testHTTPServer(t, http.HandlerFunc(handler)) + defer ln.Close() + + // set up a client with a namespace + client, err := NewClient(config) + if err != nil { + t.Fatalf("err: %s", err) + } + ogNS := "test" + client.SetNamespace(ogNS) + _, err = client.rawRequestWithContext( + context.Background(), + client.NewRequest(http.MethodGet, "/")) + if err != nil { + t.Fatalf("err: %s", err) + } + if ns != ogNS { + t.Fatalf("Expected namespace: %q, got %q", ogNS, ns) + } + + // make a call with a temporary namespace + newNS := "new-namespace" + _, err = client.WithNamespace(newNS).rawRequestWithContext( + context.Background(), + client.NewRequest(http.MethodGet, "/")) + if err != nil { + t.Fatalf("err: %s", err) + } + if ns != newNS { + t.Fatalf("Expected new namespace: %q, got %q", newNS, ns) + } + // ensure client has not been modified + _, err = client.rawRequestWithContext( + context.Background(), + client.NewRequest(http.MethodGet, "/")) + if err != nil { + t.Fatalf("err: %s", err) + } + if ns != ogNS { + t.Fatalf("Expected original namespace: %q, got %q", ogNS, ns) + } + + // make call with empty ns + _, err = client.WithNamespace("").rawRequestWithContext( + context.Background(), + client.NewRequest(http.MethodGet, "/")) + if err != nil { + t.Fatalf("err: %s", err) + } + if ns != "" { + t.Fatalf("Expected no namespace, got %q", ns) + } + + // ensure client has not been modified + if client.Namespace() != ogNS { + t.Fatalf("Expected original namespace: %q, got %q", ogNS, client.Namespace()) + } +} + +func TestVaultProxy(t *testing.T) { + const NoProxy string = "NO_PROXY" + + tests := map[string]struct { + name string + vaultHttpProxy string + vaultProxyAddr string + noProxy string + requestUrl string + expectedResolvedProxyUrl string + }{ + "VAULT_HTTP_PROXY used when NO_PROXY env var doesn't include request host": { + vaultHttpProxy: "https://hashicorp.com", + vaultProxyAddr: "", + noProxy: "terraform.io", + requestUrl: "https://vaultproject.io", + }, + "VAULT_HTTP_PROXY used when NO_PROXY env var includes request host": { + vaultHttpProxy: "https://hashicorp.com", + vaultProxyAddr: "", + noProxy: "terraform.io,vaultproject.io", + requestUrl: "https://vaultproject.io", + }, + "VAULT_PROXY_ADDR used when NO_PROXY env var doesn't include request host": { + vaultHttpProxy: "", + vaultProxyAddr: "https://hashicorp.com", + noProxy: "terraform.io", + requestUrl: "https://vaultproject.io", + }, + "VAULT_PROXY_ADDR used when NO_PROXY env var includes request host": { + vaultHttpProxy: "", + vaultProxyAddr: "https://hashicorp.com", + noProxy: "terraform.io,vaultproject.io", + requestUrl: "https://vaultproject.io", + }, + "VAULT_PROXY_ADDR used when VAULT_HTTP_PROXY env var also supplied": { + vaultHttpProxy: "https://hashicorp.com", + vaultProxyAddr: "https://terraform.io", + noProxy: "", + requestUrl: "https://vaultproject.io", + expectedResolvedProxyUrl: "https://terraform.io", + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + if tc.vaultHttpProxy != "" { + oldVaultHttpProxy := os.Getenv(EnvHTTPProxy) + os.Setenv(EnvHTTPProxy, tc.vaultHttpProxy) + defer os.Setenv(EnvHTTPProxy, oldVaultHttpProxy) + } + + if tc.vaultProxyAddr != "" { + oldVaultProxyAddr := os.Getenv(EnvVaultProxyAddr) + os.Setenv(EnvVaultProxyAddr, tc.vaultProxyAddr) + defer os.Setenv(EnvVaultProxyAddr, oldVaultProxyAddr) + } + + if tc.noProxy != "" { + oldNoProxy := os.Getenv(NoProxy) + os.Setenv(NoProxy, tc.noProxy) + defer os.Setenv(NoProxy, oldNoProxy) + } + + c := DefaultConfig() + if c.Error != nil { + t.Fatalf("Expected no error reading config, found error %v", c.Error) + } + + r, _ := http.NewRequest("GET", tc.requestUrl, nil) + proxyUrl, err := c.HttpClient.Transport.(*http.Transport).Proxy(r) + if err != nil { + t.Fatalf("Expected no error resolving proxy, found error %v", err) + } + if proxyUrl == nil || proxyUrl.String() == "" { + t.Fatalf("Expected proxy to be resolved but no proxy returned") + } + if tc.expectedResolvedProxyUrl != "" && proxyUrl.String() != tc.expectedResolvedProxyUrl { + t.Fatalf("Expected resolved proxy URL to be %v but was %v", tc.expectedResolvedProxyUrl, proxyUrl.String()) + } + }) + } +} + +func TestParseAddressWithUnixSocket(t *testing.T) { + address := "unix:///var/run/vault.sock" + config := DefaultConfig() + + u, err := config.ParseAddress(address) + if err != nil { + t.Fatal("Error not expected") + } + if u.Scheme != "http" { + t.Fatal("Scheme not changed to http") + } + if u.Host != "/var/run/vault.sock" { + t.Fatal("Host not changed to socket name") + } + if u.Path != "" { + t.Fatal("Path expected to be blank") + } + if config.HttpClient.Transport.(*http.Transport).DialContext == nil { + t.Fatal("DialContext function not set in config.HttpClient.Transport") + } +} diff --git a/api/go.mod b/api/go.mod new file mode 100644 index 0000000..20fb461 --- /dev/null +++ b/api/go.mod @@ -0,0 +1,39 @@ +module github.com/hashicorp/vault/api + +// The Go version directive for the api package should normally only be updated when +// code in the api package requires a newer Go version to build. It should not +// automatically track the Go version used to build Vault itself. Many projects import +// the api module and we don't want to impose a newer version on them any more than we +// have to. +go 1.19 + +require ( + github.com/cenkalti/backoff/v3 v3.0.0 + github.com/go-jose/go-jose/v3 v3.0.0 + github.com/go-test/deep v1.0.2 + github.com/hashicorp/errwrap v1.1.0 + github.com/hashicorp/go-cleanhttp v0.5.2 + github.com/hashicorp/go-hclog v0.16.2 + github.com/hashicorp/go-multierror v1.1.1 + github.com/hashicorp/go-retryablehttp v0.6.6 + github.com/hashicorp/go-rootcerts v1.0.2 + github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 + github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 + github.com/hashicorp/hcl v1.0.0 + github.com/mitchellh/mapstructure v1.5.0 + golang.org/x/net v0.7.0 + golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 +) + +require ( + github.com/fatih/color v1.7.0 // indirect + github.com/google/go-cmp v0.5.7 // indirect + github.com/hashicorp/go-sockaddr v1.0.2 // indirect + github.com/mattn/go-colorable v0.1.6 // indirect + github.com/mattn/go-isatty v0.0.12 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/ryanuber/go-glob v1.0.0 // indirect + golang.org/x/crypto v0.6.0 // indirect + golang.org/x/sys v0.5.0 // indirect + golang.org/x/text v0.7.0 // indirect +) diff --git a/api/go.sum b/api/go.sum new file mode 100644 index 0000000..e8f5f18 --- /dev/null +++ b/api/go.sum @@ -0,0 +1,93 @@ +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= +github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo= +github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= +github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= +github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= +github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= +github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/api/help.go b/api/help.go new file mode 100644 index 0000000..c119f6c --- /dev/null +++ b/api/help.go @@ -0,0 +1,40 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "fmt" + "net/http" +) + +// Help wraps HelpWithContext using context.Background. +func (c *Client) Help(path string) (*Help, error) { + return c.HelpWithContext(context.Background(), path) +} + +// HelpWithContext reads the help information for the given path. +func (c *Client) HelpWithContext(ctx context.Context, path string) (*Help, error) { + ctx, cancelFunc := c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.NewRequest(http.MethodGet, fmt.Sprintf("/v1/%s", path)) + r.Params.Add("help", "1") + + resp, err := c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result Help + err = resp.DecodeJSON(&result) + return &result, err +} + +type Help struct { + Help string `json:"help"` + SeeAlso []string `json:"see_also"` + OpenAPI map[string]interface{} `json:"openapi"` +} diff --git a/api/kv.go b/api/kv.go new file mode 100644 index 0000000..20862fb --- /dev/null +++ b/api/kv.go @@ -0,0 +1,59 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import "errors" + +// ErrSecretNotFound is returned by KVv1 and KVv2 wrappers to indicate that the +// secret is missing at the given location. +var ErrSecretNotFound = errors.New("secret not found") + +// A KVSecret is a key-value secret returned by Vault's KV secrets engine, +// and is the most basic type of secret stored in Vault. +// +// Data contains the key-value pairs of the secret itself, +// while Metadata contains a subset of metadata describing +// this particular version of the secret. +// The Metadata field for a KV v1 secret will always be nil, as +// metadata is only supported starting in KV v2. +// +// The Raw field can be inspected for information about the lease, +// and passed to a LifetimeWatcher object for periodic renewal. +type KVSecret struct { + Data map[string]interface{} + VersionMetadata *KVVersionMetadata + CustomMetadata map[string]interface{} + Raw *Secret +} + +// KVv1 is used to return a client for reads and writes against +// a KV v1 secrets engine in Vault. +// +// The mount path is the location where the target KV secrets engine resides +// in Vault. +// +// While v1 is not necessarily deprecated, Vault development servers tend to +// use v2 as the version of the KV secrets engine, as this is what's mounted +// by default when a server is started in -dev mode. See the kvv2 struct. +// +// Learn more about the KV secrets engine here: +// https://www.vaultproject.io/docs/secrets/kv +func (c *Client) KVv1(mountPath string) *KVv1 { + return &KVv1{c: c, mountPath: mountPath} +} + +// KVv2 is used to return a client for reads and writes against +// a KV v2 secrets engine in Vault. +// +// The mount path is the location where the target KV secrets engine resides +// in Vault. +// +// Vault development servers tend to have "secret" as the mount path, +// as these are the default settings when a server is started in -dev mode. +// +// Learn more about the KV secrets engine here: +// https://www.vaultproject.io/docs/secrets/kv +func (c *Client) KVv2(mountPath string) *KVv2 { + return &KVv2{c: c, mountPath: mountPath} +} diff --git a/api/kv_test.go b/api/kv_test.go new file mode 100644 index 0000000..36d769f --- /dev/null +++ b/api/kv_test.go @@ -0,0 +1,390 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "reflect" + "testing" + "time" +) + +func TestExtractVersionMetadata(t *testing.T) { + t.Parallel() + + inputCreatedTimeStr := "2022-05-06T23:02:04.865025Z" + inputDeletionTimeStr := "2022-06-17T01:15:03.279013Z" + expectedCreatedTimeParsed, err := time.Parse(time.RFC3339, inputCreatedTimeStr) + if err != nil { + t.Fatalf("unable to parse expected created time: %v", err) + } + expectedDeletionTimeParsed, err := time.Parse(time.RFC3339, inputDeletionTimeStr) + if err != nil { + t.Fatalf("unable to parse expected created time: %v", err) + } + + testCases := []struct { + name string + input *Secret + expected *KVVersionMetadata + }{ + { + name: "a secret", + input: &Secret{ + Data: map[string]interface{}{ + "data": map[string]interface{}{ + "password": "Hashi123", + }, + "metadata": map[string]interface{}{ + "version": 10, + "created_time": inputCreatedTimeStr, + "deletion_time": "", + "destroyed": false, + "custom_metadata": nil, + }, + }, + }, + expected: &KVVersionMetadata{ + Version: 10, + CreatedTime: expectedCreatedTimeParsed, + DeletionTime: time.Time{}, + Destroyed: false, + }, + }, + { + name: "a secret that has been deleted", + input: &Secret{ + Data: map[string]interface{}{ + "data": map[string]interface{}{ + "password": "Hashi123", + }, + "metadata": map[string]interface{}{ + "version": 10, + "created_time": inputCreatedTimeStr, + "deletion_time": inputDeletionTimeStr, + "destroyed": false, + "custom_metadata": nil, + }, + }, + }, + expected: &KVVersionMetadata{ + Version: 10, + CreatedTime: expectedCreatedTimeParsed, + DeletionTime: expectedDeletionTimeParsed, + Destroyed: false, + }, + }, + { + name: "a response from a Write operation", + input: &Secret{ + Data: map[string]interface{}{ + "version": 10, + "created_time": inputCreatedTimeStr, + "deletion_time": "", + "destroyed": false, + "custom_metadata": nil, + }, + }, + expected: &KVVersionMetadata{ + Version: 10, + CreatedTime: expectedCreatedTimeParsed, + DeletionTime: time.Time{}, + Destroyed: false, + }, + }, + } + + for _, tc := range testCases { + versionMetadata, err := extractVersionMetadata(tc.input) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(versionMetadata, tc.expected) { + t.Fatalf("%s: got\n%#v\nexpected\n%#v\n", tc.name, versionMetadata, tc.expected) + } + } +} + +func TestExtractDataAndVersionMetadata(t *testing.T) { + t.Parallel() + + inputCreatedTimeStr := "2022-05-06T23:02:04.865025Z" + inputDeletionTimeStr := "2022-06-17T01:15:03.279013Z" + expectedCreatedTimeParsed, err := time.Parse(time.RFC3339, inputCreatedTimeStr) + if err != nil { + t.Fatalf("unable to parse expected created time: %v", err) + } + expectedDeletionTimeParsed, err := time.Parse(time.RFC3339, inputDeletionTimeStr) + if err != nil { + t.Fatalf("unable to parse expected created time: %v", err) + } + + readResp := &Secret{ + Data: map[string]interface{}{ + "data": map[string]interface{}{ + "password": "Hashi123", + }, + "metadata": map[string]interface{}{ + "version": 10, + "created_time": inputCreatedTimeStr, + "deletion_time": "", + "destroyed": false, + "custom_metadata": nil, + }, + }, + } + + readRespDeleted := &Secret{ + Data: map[string]interface{}{ + "data": nil, + "metadata": map[string]interface{}{ + "version": 10, + "created_time": inputCreatedTimeStr, + "deletion_time": inputDeletionTimeStr, + "destroyed": false, + "custom_metadata": nil, + }, + }, + } + + testCases := []struct { + name string + input *Secret + expected *KVSecret + }{ + { + name: "a response from a Read operation", + input: readResp, + expected: &KVSecret{ + Data: map[string]interface{}{ + "password": "Hashi123", + }, + VersionMetadata: &KVVersionMetadata{ + Version: 10, + CreatedTime: expectedCreatedTimeParsed, + DeletionTime: time.Time{}, + Destroyed: false, + }, + // it's tempting to test some Secrets with custom_metadata but + // we can't in this test because it isn't until we call the + // extractCustomMetadata function that the custom metadata + // gets added onto the struct. See TestExtractCustomMetadata. + CustomMetadata: nil, + Raw: readResp, + }, + }, + { + name: "a secret that has been deleted and thus has nil data", + input: readRespDeleted, + expected: &KVSecret{ + Data: nil, + VersionMetadata: &KVVersionMetadata{ + Version: 10, + CreatedTime: expectedCreatedTimeParsed, + DeletionTime: expectedDeletionTimeParsed, + Destroyed: false, + }, + CustomMetadata: nil, + Raw: readRespDeleted, + }, + }, + } + + for _, tc := range testCases { + dvm, err := extractDataAndVersionMetadata(tc.input) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(dvm, tc.expected) { + t.Fatalf("%s: got\n%#v\nexpected\n%#v\n", tc.name, dvm, tc.expected) + } + } +} + +func TestExtractFullMetadata(t *testing.T) { + inputCreatedTimeStr := "2022-05-20T00:51:49.419794Z" + expectedCreatedTimeParsed, err := time.Parse(time.RFC3339, inputCreatedTimeStr) + if err != nil { + t.Fatalf("unable to parse expected created time: %v", err) + } + + inputUpdatedTimeStr := "2022-05-20T20:23:43.284488Z" + expectedUpdatedTimeParsed, err := time.Parse(time.RFC3339, inputUpdatedTimeStr) + if err != nil { + t.Fatalf("unable to parse expected updated time: %v", err) + } + + inputDeletedTimeStr := "2022-05-21T00:05:49.521697Z" + expectedDeletedTimeParsed, err := time.Parse(time.RFC3339, inputDeletedTimeStr) + if err != nil { + t.Fatalf("unable to parse expected deletion time: %v", err) + } + + metadataResp := &Secret{ + Data: map[string]interface{}{ + "cas_required": true, + "created_time": inputCreatedTimeStr, + "current_version": 2, + "custom_metadata": map[string]interface{}{ + "org": "eng", + }, + "delete_version_after": "200s", + "max_versions": 3, + "oldest_version": 1, + "updated_time": inputUpdatedTimeStr, + "versions": map[string]interface{}{ + "2": map[string]interface{}{ + "created_time": inputUpdatedTimeStr, + "deletion_time": "", + "destroyed": false, + }, + "1": map[string]interface{}{ + "created_time": inputCreatedTimeStr, + "deletion_time": inputDeletedTimeStr, + "destroyed": false, + }, + }, + }, + } + + testCases := []struct { + name string + input *Secret + expected *KVMetadata + }{ + { + name: "a metadata response", + input: metadataResp, + expected: &KVMetadata{ + CASRequired: true, + CreatedTime: expectedCreatedTimeParsed, + CurrentVersion: 2, + CustomMetadata: map[string]interface{}{ + "org": "eng", + }, + DeleteVersionAfter: time.Duration(200 * time.Second), + MaxVersions: 3, + OldestVersion: 1, + UpdatedTime: expectedUpdatedTimeParsed, + Versions: map[string]KVVersionMetadata{ + "2": { + Version: 2, + CreatedTime: expectedUpdatedTimeParsed, + DeletionTime: time.Time{}, + }, + "1": { + Version: 1, + CreatedTime: expectedCreatedTimeParsed, + DeletionTime: expectedDeletedTimeParsed, + }, + }, + }, + }, + } + + for _, tc := range testCases { + md, err := extractFullMetadata(tc.input) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !reflect.DeepEqual(md, tc.expected) { + t.Fatalf("%s: got\n%#v\nexpected\n%#v\n", tc.name, md, tc.expected) + } + } +} + +func TestExtractCustomMetadata(t *testing.T) { + testCases := []struct { + name string + inputAPIResp *Secret + expected map[string]interface{} + }{ + { + name: "a read response with some custom metadata", + inputAPIResp: &Secret{ + Data: map[string]interface{}{ + "metadata": map[string]interface{}{ + "custom_metadata": map[string]interface{}{"org": "eng"}, + }, + }, + }, + expected: map[string]interface{}{"org": "eng"}, + }, + { + name: "a write response with some (pre-existing) custom metadata", + inputAPIResp: &Secret{ + Data: map[string]interface{}{ + "custom_metadata": map[string]interface{}{"org": "eng"}, + }, + }, + expected: map[string]interface{}{"org": "eng"}, + }, + { + name: "a read response with no custom metadata from a pre-1.9 Vault server", + inputAPIResp: &Secret{ + Data: map[string]interface{}{ + "metadata": map[string]interface{}{}, + }, + }, + expected: map[string]interface{}(nil), + }, + { + name: "a write response with no custom metadata from a pre-1.9 Vault server", + inputAPIResp: &Secret{ + Data: map[string]interface{}{}, + }, + expected: map[string]interface{}(nil), + }, + { + name: "a read response with no custom metadata from a post-1.9 Vault server", + inputAPIResp: &Secret{ + Data: map[string]interface{}{ + "metadata": map[string]interface{}{ + "custom_metadata": nil, + }, + }, + }, + expected: map[string]interface{}(nil), + }, + { + name: "a write response with no custom metadata from a post-1.9 Vault server", + inputAPIResp: &Secret{ + Data: map[string]interface{}{ + "custom_metadata": nil, + }, + }, + expected: map[string]interface{}(nil), + }, + { + name: "a read response where custom metadata was deleted", + inputAPIResp: &Secret{ + Data: map[string]interface{}{ + "metadata": map[string]interface{}{ + "custom_metadata": map[string]interface{}{}, + }, + }, + }, + expected: map[string]interface{}{}, + }, + { + name: "a write response where custom metadata was deleted", + inputAPIResp: &Secret{ + Data: map[string]interface{}{ + "custom_metadata": map[string]interface{}{}, + }, + }, + expected: map[string]interface{}{}, + }, + } + + for _, tc := range testCases { + cm := extractCustomMetadata(tc.inputAPIResp) + + if !reflect.DeepEqual(cm, tc.expected) { + t.Fatalf("%s: got\n%#v\nexpected\n%#v\n", tc.name, cm, tc.expected) + } + } +} diff --git a/api/kv_v1.go b/api/kv_v1.go new file mode 100644 index 0000000..a914e03 --- /dev/null +++ b/api/kv_v1.go @@ -0,0 +1,60 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "fmt" +) + +type KVv1 struct { + c *Client + mountPath string +} + +// Get returns a secret from the KV v1 secrets engine. +func (kv *KVv1) Get(ctx context.Context, secretPath string) (*KVSecret, error) { + pathToRead := fmt.Sprintf("%s/%s", kv.mountPath, secretPath) + + secret, err := kv.c.Logical().ReadWithContext(ctx, pathToRead) + if err != nil { + return nil, fmt.Errorf("error encountered while reading secret at %s: %w", pathToRead, err) + } + if secret == nil { + return nil, fmt.Errorf("%w: at %s", ErrSecretNotFound, pathToRead) + } + + return &KVSecret{ + Data: secret.Data, + VersionMetadata: nil, + Raw: secret, + }, nil +} + +// Put inserts a key-value secret (e.g. {"password": "Hashi123"}) into the +// KV v1 secrets engine. +// +// If the secret already exists, it will be overwritten. +func (kv *KVv1) Put(ctx context.Context, secretPath string, data map[string]interface{}) error { + pathToWriteTo := fmt.Sprintf("%s/%s", kv.mountPath, secretPath) + + _, err := kv.c.Logical().WriteWithContext(ctx, pathToWriteTo, data) + if err != nil { + return fmt.Errorf("error writing secret to %s: %w", pathToWriteTo, err) + } + + return nil +} + +// Delete deletes a secret from the KV v1 secrets engine. +func (kv *KVv1) Delete(ctx context.Context, secretPath string) error { + pathToDelete := fmt.Sprintf("%s/%s", kv.mountPath, secretPath) + + _, err := kv.c.Logical().DeleteWithContext(ctx, pathToDelete) + if err != nil { + return fmt.Errorf("error deleting secret at %s: %w", pathToDelete, err) + } + + return nil +} diff --git a/api/kv_v2.go b/api/kv_v2.go new file mode 100644 index 0000000..72c29ea --- /dev/null +++ b/api/kv_v2.go @@ -0,0 +1,781 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "errors" + "fmt" + "net/http" + "sort" + "strconv" + "time" + + "github.com/mitchellh/mapstructure" +) + +type KVv2 struct { + c *Client + mountPath string +} + +// KVMetadata is the full metadata for a given KV v2 secret. +type KVMetadata struct { + CASRequired bool `mapstructure:"cas_required"` + CreatedTime time.Time `mapstructure:"created_time"` + CurrentVersion int `mapstructure:"current_version"` + CustomMetadata map[string]interface{} `mapstructure:"custom_metadata"` + DeleteVersionAfter time.Duration `mapstructure:"delete_version_after"` + MaxVersions int `mapstructure:"max_versions"` + OldestVersion int `mapstructure:"oldest_version"` + UpdatedTime time.Time `mapstructure:"updated_time"` + // Keys are stringified ints, e.g. "3". To get a sorted slice of version metadata, use GetVersionsAsList. + Versions map[string]KVVersionMetadata `mapstructure:"versions"` + Raw *Secret +} + +// KVMetadataPutInput is the subset of metadata that can be replaced for a +// KV v2 secret using the PutMetadata method. +// +// All fields should be explicitly provided, as any fields left unset in the +// struct will be reset to their zero value. +type KVMetadataPutInput struct { + CASRequired bool + CustomMetadata map[string]interface{} + DeleteVersionAfter time.Duration + MaxVersions int +} + +// KVMetadataPatchInput is the subset of metadata that can be manually modified for +// a KV v2 secret using the PatchMetadata method. +// +// The struct's fields are all pointers. A pointer to a field's zero +// value (e.g. false for *bool) implies that field should be reset to its +// zero value after update, whereas a field left as a nil pointer +// (e.g. nil for *bool) implies the field should remain unchanged. +// +// Since maps are already pointers, use an empty map to remove all +// custom metadata. +type KVMetadataPatchInput struct { + CASRequired *bool + CustomMetadata map[string]interface{} + DeleteVersionAfter *time.Duration + MaxVersions *int +} + +// KVVersionMetadata is a subset of metadata for a given version of a KV v2 secret. +type KVVersionMetadata struct { + Version int `mapstructure:"version"` + CreatedTime time.Time `mapstructure:"created_time"` + DeletionTime time.Time `mapstructure:"deletion_time"` + Destroyed bool `mapstructure:"destroyed"` +} + +// Currently supported options: WithOption, WithCheckAndSet, WithMethod +type KVOption func() (key string, value interface{}) + +const ( + KVOptionCheckAndSet = "cas" + KVOptionMethod = "method" + KVMergeMethodPatch = "patch" + KVMergeMethodReadWrite = "rw" +) + +// WithOption can optionally be passed to provide generic options for a +// KV request. Valid keys and values depend on the type of request. +func WithOption(key string, value interface{}) KVOption { + return func() (string, interface{}) { + return key, value + } +} + +// WithCheckAndSet can optionally be passed to perform a check-and-set +// operation on a KV request. If not set, the write will be allowed. +// If cas is set to 0, a write will only be allowed if the key doesn't exist. +// If set to non-zero, the write will only be allowed if the key’s current +// version matches the version specified in the cas parameter. +func WithCheckAndSet(cas int) KVOption { + return WithOption(KVOptionCheckAndSet, cas) +} + +// WithMergeMethod can optionally be passed to dictate which type of +// patch to perform in a Patch request. If set to "patch", then an HTTP PATCH +// request will be issued. If set to "rw", then a read will be performed, +// then a local update, followed by a remote update. Defaults to "patch". +func WithMergeMethod(method string) KVOption { + return WithOption(KVOptionMethod, method) +} + +// Get returns the latest version of a secret from the KV v2 secrets engine. +// +// If the latest version has been deleted, an error will not be thrown, but +// the Data field on the returned secret will be nil, and the Metadata field +// will contain the deletion time. +func (kv *KVv2) Get(ctx context.Context, secretPath string) (*KVSecret, error) { + pathToRead := fmt.Sprintf("%s/data/%s", kv.mountPath, secretPath) + + secret, err := kv.c.Logical().ReadWithContext(ctx, pathToRead) + if err != nil { + return nil, fmt.Errorf("error encountered while reading secret at %s: %w", pathToRead, err) + } + if secret == nil { + return nil, fmt.Errorf("%w: at %s", ErrSecretNotFound, pathToRead) + } + + kvSecret, err := extractDataAndVersionMetadata(secret) + if err != nil { + return nil, fmt.Errorf("error parsing secret at %s: %w", pathToRead, err) + } + + kvSecret.CustomMetadata = extractCustomMetadata(secret) + + return kvSecret, nil +} + +// GetVersion returns the data and metadata for a specific version of the +// given secret. +// +// If that version has been deleted, the Data field on the +// returned secret will be nil, and the Metadata field will contain the deletion time. +// +// GetVersionsAsList can provide a list of available versions sorted by +// version number, while the response from GetMetadata contains them as a map. +func (kv *KVv2) GetVersion(ctx context.Context, secretPath string, version int) (*KVSecret, error) { + pathToRead := fmt.Sprintf("%s/data/%s", kv.mountPath, secretPath) + + queryParams := map[string][]string{"version": {strconv.Itoa(version)}} + secret, err := kv.c.Logical().ReadWithDataWithContext(ctx, pathToRead, queryParams) + if err != nil { + return nil, err + } + if secret == nil { + return nil, fmt.Errorf("%w: for version %d at %s", ErrSecretNotFound, version, pathToRead) + } + + kvSecret, err := extractDataAndVersionMetadata(secret) + if err != nil { + return nil, fmt.Errorf("error parsing secret at %s: %w", pathToRead, err) + } + + kvSecret.CustomMetadata = extractCustomMetadata(secret) + + return kvSecret, nil +} + +// GetVersionsAsList returns a subset of the metadata for each version of the secret, sorted by version number. +func (kv *KVv2) GetVersionsAsList(ctx context.Context, secretPath string) ([]KVVersionMetadata, error) { + pathToRead := fmt.Sprintf("%s/metadata/%s", kv.mountPath, secretPath) + + secret, err := kv.c.Logical().ReadWithContext(ctx, pathToRead) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, fmt.Errorf("%w: no metadata at %s", ErrSecretNotFound, pathToRead) + } + + md, err := extractFullMetadata(secret) + if err != nil { + return nil, fmt.Errorf("unable to extract metadata from secret to determine versions: %w", err) + } + + versionsList := make([]KVVersionMetadata, 0, len(md.Versions)) + for _, versionMetadata := range md.Versions { + versionsList = append(versionsList, versionMetadata) + } + + sort.Slice(versionsList, func(i, j int) bool { return versionsList[i].Version < versionsList[j].Version }) + return versionsList, nil +} + +// GetMetadata returns the full metadata for a given secret, including a map of +// its existing versions and their respective creation/deletion times, etc. +func (kv *KVv2) GetMetadata(ctx context.Context, secretPath string) (*KVMetadata, error) { + pathToRead := fmt.Sprintf("%s/metadata/%s", kv.mountPath, secretPath) + + secret, err := kv.c.Logical().ReadWithContext(ctx, pathToRead) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, fmt.Errorf("%w: no metadata at %s", ErrSecretNotFound, pathToRead) + } + + md, err := extractFullMetadata(secret) + if err != nil { + return nil, fmt.Errorf("unable to extract metadata from secret: %w", err) + } + + return md, nil +} + +// Put inserts a key-value secret (e.g. {"password": "Hashi123"}) +// into the KV v2 secrets engine. +// +// If the secret already exists, a new version will be created +// and the previous version can be accessed with the GetVersion method. +// GetMetadata can provide a list of available versions. +func (kv *KVv2) Put(ctx context.Context, secretPath string, data map[string]interface{}, opts ...KVOption) (*KVSecret, error) { + pathToWriteTo := fmt.Sprintf("%s/data/%s", kv.mountPath, secretPath) + + wrappedData := map[string]interface{}{ + "data": data, + } + + // Add options such as check-and-set, etc. + // We leave this as an optional arg so that most users + // can just pass plain key-value secret data without + // having to remember to put the extra layer "data" in there. + options := make(map[string]interface{}) + for _, opt := range opts { + k, v := opt() + options[k] = v + } + if len(opts) > 0 { + wrappedData["options"] = options + } + + secret, err := kv.c.Logical().WriteWithContext(ctx, pathToWriteTo, wrappedData) + if err != nil { + return nil, fmt.Errorf("error writing secret to %s: %w", pathToWriteTo, err) + } + if secret == nil { + return nil, fmt.Errorf("%w: after writing to %s", ErrSecretNotFound, pathToWriteTo) + } + + metadata, err := extractVersionMetadata(secret) + if err != nil { + return nil, fmt.Errorf("secret was written successfully, but unable to view version metadata from response: %w", err) + } + + kvSecret := &KVSecret{ + Data: nil, // secret.Data in this case is the metadata + VersionMetadata: metadata, + Raw: secret, + } + + kvSecret.CustomMetadata = extractCustomMetadata(secret) + + return kvSecret, nil +} + +// PutMetadata can be used to fully replace a subset of metadata fields for a +// given KV v2 secret. All fields will replace the corresponding values on the Vault server. +// Any fields left as nil will reset the field on the Vault server back to its zero value. +// +// To only partially replace the values of these metadata fields, use PatchMetadata. +// +// This method can also be used to create a new secret with just metadata and no secret data yet. +func (kv *KVv2) PutMetadata(ctx context.Context, secretPath string, metadata KVMetadataPutInput) error { + pathToWriteTo := fmt.Sprintf("%s/metadata/%s", kv.mountPath, secretPath) + + const ( + casRequiredKey = "cas_required" + deleteVersionAfterKey = "delete_version_after" + maxVersionsKey = "max_versions" + customMetadataKey = "custom_metadata" + ) + + // convert values to a map we can pass to Logical + metadataMap := make(map[string]interface{}) + metadataMap[maxVersionsKey] = metadata.MaxVersions + metadataMap[deleteVersionAfterKey] = metadata.DeleteVersionAfter.String() + metadataMap[casRequiredKey] = metadata.CASRequired + metadataMap[customMetadataKey] = metadata.CustomMetadata + + _, err := kv.c.Logical().WriteWithContext(ctx, pathToWriteTo, metadataMap) + if err != nil { + return fmt.Errorf("error writing secret metadata to %s: %w", pathToWriteTo, err) + } + + return nil +} + +// Patch additively updates the most recent version of a key-value secret, +// differentiating it from Put which will fully overwrite the previous data. +// Only the key-value pairs that are new or changing need to be provided. +// +// The WithMethod KVOption function can optionally be passed to dictate which +// kind of patch to perform, as older Vault server versions (pre-1.9.0) may +// only be able to use the old "rw" (read-then-write) style of partial update, +// whereas newer Vault servers can use the default value of "patch" if the +// client token's policy has the "patch" capability. +func (kv *KVv2) Patch(ctx context.Context, secretPath string, newData map[string]interface{}, opts ...KVOption) (*KVSecret, error) { + // determine patch method + var patchMethod string + var ok bool + for _, opt := range opts { + k, v := opt() + if k == "method" { + patchMethod, ok = v.(string) + if !ok { + return nil, fmt.Errorf("unsupported type provided for option value; value for patch method should be string \"rw\" or \"patch\"") + } + } + } + + // Determine which kind of patch to use, + // the newer HTTP Patch style or the older read-then-write style + var kvs *KVSecret + var err error + switch patchMethod { + case "rw": + kvs, err = readThenWrite(ctx, kv.c, kv.mountPath, secretPath, newData) + case "patch": + kvs, err = mergePatch(ctx, kv.c, kv.mountPath, secretPath, newData, opts...) + case "": + kvs, err = mergePatch(ctx, kv.c, kv.mountPath, secretPath, newData, opts...) + default: + return nil, fmt.Errorf("unsupported patch method provided; value for patch method should be string \"rw\" or \"patch\"") + } + if err != nil { + return nil, fmt.Errorf("unable to perform patch: %w", err) + } + if kvs == nil { + return nil, fmt.Errorf("no secret was written to %s", secretPath) + } + + return kvs, nil +} + +// PatchMetadata can be used to replace just a subset of a secret's +// metadata fields at a time, as opposed to PutMetadata which is used to +// completely replace all fields on the previous metadata. +func (kv *KVv2) PatchMetadata(ctx context.Context, secretPath string, metadata KVMetadataPatchInput) error { + pathToWriteTo := fmt.Sprintf("%s/metadata/%s", kv.mountPath, secretPath) + + md, err := toMetadataMap(metadata) + if err != nil { + return fmt.Errorf("unable to create map for JSON merge patch request: %w", err) + } + + _, err = kv.c.Logical().JSONMergePatch(ctx, pathToWriteTo, md) + if err != nil { + return fmt.Errorf("error patching metadata at %s: %w", pathToWriteTo, err) + } + + return nil +} + +// Delete deletes the most recent version of a secret from the KV v2 +// secrets engine. To delete an older version, use DeleteVersions. +func (kv *KVv2) Delete(ctx context.Context, secretPath string) error { + pathToDelete := fmt.Sprintf("%s/data/%s", kv.mountPath, secretPath) + + _, err := kv.c.Logical().DeleteWithContext(ctx, pathToDelete) + if err != nil { + return fmt.Errorf("error deleting secret at %s: %w", pathToDelete, err) + } + + return nil +} + +// DeleteVersions deletes the specified versions of a secret from the KV v2 +// secrets engine. To delete the latest version of a secret, just use Delete. +func (kv *KVv2) DeleteVersions(ctx context.Context, secretPath string, versions []int) error { + // verb and path are different when trying to delete past versions + pathToDelete := fmt.Sprintf("%s/delete/%s", kv.mountPath, secretPath) + + if len(versions) == 0 { + return nil + } + + var versionsToDelete []string + for _, version := range versions { + versionsToDelete = append(versionsToDelete, strconv.Itoa(version)) + } + versionsMap := map[string]interface{}{ + "versions": versionsToDelete, + } + _, err := kv.c.Logical().WriteWithContext(ctx, pathToDelete, versionsMap) + if err != nil { + return fmt.Errorf("error deleting secret at %s: %w", pathToDelete, err) + } + + return nil +} + +// DeleteMetadata deletes all versions and metadata of the secret at the +// given path. +func (kv *KVv2) DeleteMetadata(ctx context.Context, secretPath string) error { + pathToDelete := fmt.Sprintf("%s/metadata/%s", kv.mountPath, secretPath) + + _, err := kv.c.Logical().DeleteWithContext(ctx, pathToDelete) + if err != nil { + return fmt.Errorf("error deleting secret metadata at %s: %w", pathToDelete, err) + } + + return nil +} + +// Undelete undeletes the given versions of a secret, restoring the data +// so that it can be fetched again with Get requests. +// +// A list of existing versions can be retrieved using the GetVersionsAsList method. +func (kv *KVv2) Undelete(ctx context.Context, secretPath string, versions []int) error { + pathToUndelete := fmt.Sprintf("%s/undelete/%s", kv.mountPath, secretPath) + + data := map[string]interface{}{ + "versions": versions, + } + + _, err := kv.c.Logical().WriteWithContext(ctx, pathToUndelete, data) + if err != nil { + return fmt.Errorf("error undeleting secret metadata at %s: %w", pathToUndelete, err) + } + + return nil +} + +// Destroy permanently removes the specified secret versions' data +// from the Vault server. If no secret exists at the given path, no +// action will be taken. +// +// A list of existing versions can be retrieved using the GetVersionsAsList method. +func (kv *KVv2) Destroy(ctx context.Context, secretPath string, versions []int) error { + pathToDestroy := fmt.Sprintf("%s/destroy/%s", kv.mountPath, secretPath) + + data := map[string]interface{}{ + "versions": versions, + } + + _, err := kv.c.Logical().WriteWithContext(ctx, pathToDestroy, data) + if err != nil { + return fmt.Errorf("error destroying secret metadata at %s: %w", pathToDestroy, err) + } + + return nil +} + +// Rollback can be used to roll a secret back to a previous +// non-deleted/non-destroyed version. That previous version becomes the +// next/newest version for the path. +func (kv *KVv2) Rollback(ctx context.Context, secretPath string, toVersion int) (*KVSecret, error) { + // First, do a read to get the current version for check-and-set + latest, err := kv.Get(ctx, secretPath) + if err != nil { + return nil, fmt.Errorf("unable to get latest version of secret: %w", err) + } + + // Make sure a value already exists + if latest == nil { + return nil, fmt.Errorf("no secret was found: %w", err) + } + + // Verify metadata found + if latest.VersionMetadata == nil { + return nil, fmt.Errorf("no metadata found; rollback can only be used on existing data") + } + + // Now run it again and read the version we want to roll back to + rollbackVersion, err := kv.GetVersion(ctx, secretPath, toVersion) + if err != nil { + return nil, fmt.Errorf("unable to get previous version %d of secret: %w", toVersion, err) + } + + err = validateRollbackVersion(rollbackVersion) + if err != nil { + return nil, fmt.Errorf("invalid rollback version %d: %w", toVersion, err) + } + + casVersion := latest.VersionMetadata.Version + kvs, err := kv.Put(ctx, secretPath, rollbackVersion.Data, WithCheckAndSet(casVersion)) + if err != nil { + return nil, fmt.Errorf("unable to roll back to previous secret version: %w", err) + } + + return kvs, nil +} + +func extractCustomMetadata(secret *Secret) map[string]interface{} { + // Logical Writes return the metadata directly, Reads return it nested inside the "metadata" key + customMetadataInterface, ok := secret.Data["custom_metadata"] + if !ok { + metadataInterface := secret.Data["metadata"] + metadataMap, ok := metadataInterface.(map[string]interface{}) + if !ok { + return nil + } + customMetadataInterface = metadataMap["custom_metadata"] + } + + cm, ok := customMetadataInterface.(map[string]interface{}) + if !ok { + return nil + } + + return cm +} + +func extractDataAndVersionMetadata(secret *Secret) (*KVSecret, error) { + // A nil map is a valid value for data: secret.Data will be nil when this + // version of the secret has been deleted, but the metadata is still + // available. + var data map[string]interface{} + if secret.Data != nil { + dataInterface, ok := secret.Data["data"] + if !ok { + return nil, fmt.Errorf("missing expected 'data' element") + } + + if dataInterface != nil { + data, ok = dataInterface.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("unexpected type for 'data' element: %T (%#v)", data, data) + } + } + } + + metadata, err := extractVersionMetadata(secret) + if err != nil { + return nil, fmt.Errorf("unable to get version metadata: %w", err) + } + + return &KVSecret{ + Data: data, + VersionMetadata: metadata, + Raw: secret, + }, nil +} + +func extractVersionMetadata(secret *Secret) (*KVVersionMetadata, error) { + var metadata *KVVersionMetadata + + if secret.Data == nil { + return nil, nil + } + + // Logical Writes return the metadata directly, Reads return it nested inside the "metadata" key + var metadataMap map[string]interface{} + metadataInterface, ok := secret.Data["metadata"] + if ok { + metadataMap, ok = metadataInterface.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("unexpected type for 'metadata' element: %T (%#v)", metadataInterface, metadataInterface) + } + } else { + metadataMap = secret.Data + } + + // deletion_time usually comes in as an empty string which can't be + // processed as time.RFC3339, so we reset it to a convertible value + if metadataMap["deletion_time"] == "" { + metadataMap["deletion_time"] = time.Time{} + } + + d, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + DecodeHook: mapstructure.StringToTimeHookFunc(time.RFC3339), + Result: &metadata, + }) + if err != nil { + return nil, fmt.Errorf("error setting up decoder for API response: %w", err) + } + + err = d.Decode(metadataMap) + if err != nil { + return nil, fmt.Errorf("error decoding metadata from API response into VersionMetadata: %w", err) + } + + return metadata, nil +} + +func extractFullMetadata(secret *Secret) (*KVMetadata, error) { + var metadata *KVMetadata + + if secret.Data == nil { + return nil, nil + } + + if versions, ok := secret.Data["versions"]; ok { + versionsMap := versions.(map[string]interface{}) + if len(versionsMap) > 0 { + for version, metadata := range versionsMap { + metadataMap := metadata.(map[string]interface{}) + // deletion_time usually comes in as an empty string which can't be + // processed as time.RFC3339, so we reset it to a convertible value + if metadataMap["deletion_time"] == "" { + metadataMap["deletion_time"] = time.Time{} + } + versionInt, err := strconv.Atoi(version) + if err != nil { + return nil, fmt.Errorf("error converting version %s to integer: %w", version, err) + } + metadataMap["version"] = versionInt + versionsMap[version] = metadataMap // save the updated copy of the metadata map + } + } + secret.Data["versions"] = versionsMap // save the updated copy of the versions map + } + + d, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeHookFunc(time.RFC3339), + mapstructure.StringToTimeDurationHookFunc(), + ), + Result: &metadata, + }) + if err != nil { + return nil, fmt.Errorf("error setting up decoder for API response: %w", err) + } + + err = d.Decode(secret.Data) + if err != nil { + return nil, fmt.Errorf("error decoding metadata from API response into KVMetadata: %w", err) + } + + return metadata, nil +} + +func validateRollbackVersion(rollbackVersion *KVSecret) error { + // Make sure a value already exists + if rollbackVersion == nil || rollbackVersion.Data == nil { + return fmt.Errorf("no secret found") + } + + // Verify metadata found + if rollbackVersion.VersionMetadata == nil { + return fmt.Errorf("no version metadata found; rollback only works on existing data") + } + + // Verify it hasn't been deleted + if !rollbackVersion.VersionMetadata.DeletionTime.IsZero() { + return fmt.Errorf("cannot roll back to a version that has been deleted") + } + + if rollbackVersion.VersionMetadata.Destroyed { + return fmt.Errorf("cannot roll back to a version that has been destroyed") + } + + // Verify old data found + if rollbackVersion.Data == nil { + return fmt.Errorf("no data found; rollback only works on existing data") + } + + return nil +} + +func mergePatch(ctx context.Context, client *Client, mountPath string, secretPath string, newData map[string]interface{}, opts ...KVOption) (*KVSecret, error) { + pathToMergePatch := fmt.Sprintf("%s/data/%s", mountPath, secretPath) + + // take any other additional options provided + // and pass them along to the patch request + wrappedData := map[string]interface{}{ + "data": newData, + } + options := make(map[string]interface{}) + for _, opt := range opts { + k, v := opt() + options[k] = v + } + if len(opts) > 0 { + wrappedData["options"] = options + } + + secret, err := client.Logical().JSONMergePatch(ctx, pathToMergePatch, wrappedData) + if err != nil { + var re *ResponseError + + if errors.As(err, &re) { + switch re.StatusCode { + // 403 + case http.StatusForbidden: + return nil, fmt.Errorf("received 403 from Vault server; please ensure that token's policy has \"patch\" capability: %w", err) + + // 404 + case http.StatusNotFound: + return nil, fmt.Errorf("%w: performing merge patch to %s", ErrSecretNotFound, pathToMergePatch) + + // 405 + case http.StatusMethodNotAllowed: + // If it's a 405, that probably means the server is running a pre-1.9 + // Vault version that doesn't support the HTTP PATCH method. + // Fall back to the old way of doing it. + return readThenWrite(ctx, client, mountPath, secretPath, newData) + } + } + + return nil, fmt.Errorf("error performing merge patch to %s: %w", pathToMergePatch, err) + } + + metadata, err := extractVersionMetadata(secret) + if err != nil { + return nil, fmt.Errorf("secret was written successfully, but unable to view version metadata from response: %w", err) + } + + kvSecret := &KVSecret{ + Data: nil, // secret.Data in this case is the metadata + VersionMetadata: metadata, + Raw: secret, + } + + kvSecret.CustomMetadata = extractCustomMetadata(secret) + + return kvSecret, nil +} + +func readThenWrite(ctx context.Context, client *Client, mountPath string, secretPath string, newData map[string]interface{}) (*KVSecret, error) { + // First, read the secret. + existingVersion, err := client.KVv2(mountPath).Get(ctx, secretPath) + if err != nil { + return nil, fmt.Errorf("error reading secret as part of read-then-write patch operation: %w", err) + } + + // Make sure the secret already exists + if existingVersion == nil || existingVersion.Data == nil { + return nil, fmt.Errorf("%w: at %s as part of read-then-write patch operation", ErrSecretNotFound, secretPath) + } + + // Verify existing secret has metadata + if existingVersion.VersionMetadata == nil { + return nil, fmt.Errorf("no metadata found at %s; patch can only be used on existing data", secretPath) + } + + // Copy new data over with existing data + combinedData := existingVersion.Data + for k, v := range newData { + combinedData[k] = v + } + + updatedSecret, err := client.KVv2(mountPath).Put(ctx, secretPath, combinedData, WithCheckAndSet(existingVersion.VersionMetadata.Version)) + if err != nil { + return nil, fmt.Errorf("error writing secret to %s: %w", secretPath, err) + } + + return updatedSecret, nil +} + +func toMetadataMap(patchInput KVMetadataPatchInput) (map[string]interface{}, error) { + metadataMap := make(map[string]interface{}) + + const ( + casRequiredKey = "cas_required" + deleteVersionAfterKey = "delete_version_after" + maxVersionsKey = "max_versions" + customMetadataKey = "custom_metadata" + ) + + // The KVMetadataPatchInput struct is designed to have pointer fields so that + // the user can easily express the difference between explicitly setting a + // field back to its zero value (e.g. false), as opposed to just having + // the field remain unchanged (e.g. nil). This way, they only need to pass + // the fields they want to change. + if patchInput.MaxVersions != nil { + metadataMap[maxVersionsKey] = *(patchInput.MaxVersions) + } + if patchInput.CASRequired != nil { + metadataMap[casRequiredKey] = *(patchInput.CASRequired) + } + if patchInput.CustomMetadata != nil { + if len(patchInput.CustomMetadata) == 0 { // empty non-nil map means delete all the keys + metadataMap[customMetadataKey] = nil + } else { + metadataMap[customMetadataKey] = patchInput.CustomMetadata + } + } + if patchInput.DeleteVersionAfter != nil { + metadataMap[deleteVersionAfterKey] = patchInput.DeleteVersionAfter.String() + } + + return metadataMap, nil +} diff --git a/api/lifetime_watcher.go b/api/lifetime_watcher.go new file mode 100644 index 0000000..5c060e5 --- /dev/null +++ b/api/lifetime_watcher.go @@ -0,0 +1,420 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "errors" + "math/rand" + "sync" + "time" + + "github.com/cenkalti/backoff/v3" +) + +var ( + ErrLifetimeWatcherMissingInput = errors.New("missing input") + ErrLifetimeWatcherMissingSecret = errors.New("missing secret") + ErrLifetimeWatcherNotRenewable = errors.New("secret is not renewable") + ErrLifetimeWatcherNoSecretData = errors.New("returned empty secret data") + + // Deprecated; kept for compatibility + ErrRenewerMissingInput = errors.New("missing input to renewer") + ErrRenewerMissingSecret = errors.New("missing secret to renew") + ErrRenewerNotRenewable = errors.New("secret is not renewable") + ErrRenewerNoSecretData = errors.New("returned empty secret data") + + // DefaultLifetimeWatcherRenewBuffer is the default size of the buffer for renew + // messages on the channel. + DefaultLifetimeWatcherRenewBuffer = 5 + // Deprecated: kept for backwards compatibility + DefaultRenewerRenewBuffer = 5 +) + +type RenewBehavior uint + +const ( + // RenewBehaviorIgnoreErrors means we will attempt to keep renewing until + // we hit the lifetime threshold. It also ignores errors stemming from + // passing a non-renewable lease in. In practice, this means you simply + // reauthenticate/refetch credentials when the watcher exits. This is the + // default. + RenewBehaviorIgnoreErrors RenewBehavior = iota + + // RenewBehaviorRenewDisabled turns off renewal attempts entirely. This + // allows you to simply watch lifetime and have the watcher return at a + // reasonable threshold without actually making Vault calls. + RenewBehaviorRenewDisabled + + // RenewBehaviorErrorOnErrors is the "legacy" behavior which always exits + // on some kind of error + RenewBehaviorErrorOnErrors +) + +// LifetimeWatcher is a process for watching lifetime of a secret. +// +// watcher, err := client.NewLifetimeWatcher(&LifetimeWatcherInput{ +// Secret: mySecret, +// }) +// go watcher.Start() +// defer watcher.Stop() +// +// for { +// select { +// case err := <-watcher.DoneCh(): +// if err != nil { +// log.Fatal(err) +// } +// +// // Renewal is now over +// case renewal := <-watcher.RenewCh(): +// log.Printf("Successfully renewed: %#v", renewal) +// } +// } +// +// `DoneCh` will return if renewal fails, or if the remaining lease duration is +// under a built-in threshold and either renewing is not extending it or +// renewing is disabled. In both cases, the caller should attempt a re-read of +// the secret. Clients should check the return value of the channel to see if +// renewal was successful. +type LifetimeWatcher struct { + l sync.Mutex + + client *Client + secret *Secret + grace time.Duration + random *rand.Rand + increment int + doneCh chan error + renewCh chan *RenewOutput + renewBehavior RenewBehavior + + stopped bool + stopCh chan struct{} + + errLifetimeWatcherNotRenewable error + errLifetimeWatcherNoSecretData error +} + +// LifetimeWatcherInput is used as input to the renew function. +type LifetimeWatcherInput struct { + // Secret is the secret to renew + Secret *Secret + + // DEPRECATED: this does not do anything. + Grace time.Duration + + // Rand is the randomizer to use for underlying randomization. If not + // provided, one will be generated and seeded automatically. If provided, it + // is assumed to have already been seeded. + Rand *rand.Rand + + // RenewBuffer is the size of the buffered channel where renew messages are + // dispatched. + RenewBuffer int + + // The new TTL, in seconds, that should be set on the lease. The TTL set + // here may or may not be honored by the vault server, based on Vault + // configuration or any associated max TTL values. If specified, the + // minimum of this value and the remaining lease duration will be used + // for grace period calculations. + Increment int + + // RenewBehavior controls what happens when a renewal errors or the + // passed-in secret is not renewable. + RenewBehavior RenewBehavior +} + +// RenewOutput is the metadata returned to the client (if it's listening) to +// renew messages. +type RenewOutput struct { + // RenewedAt is the timestamp when the renewal took place (UTC). + RenewedAt time.Time + + // Secret is the underlying renewal data. It's the same struct as all data + // that is returned from Vault, but since this is renewal data, it will not + // usually include the secret itself. + Secret *Secret +} + +// NewLifetimeWatcher creates a new renewer from the given input. +func (c *Client) NewLifetimeWatcher(i *LifetimeWatcherInput) (*LifetimeWatcher, error) { + if i == nil { + return nil, ErrLifetimeWatcherMissingInput + } + + secret := i.Secret + if secret == nil { + return nil, ErrLifetimeWatcherMissingSecret + } + + random := i.Rand + if random == nil { + // NOTE: + // Rather than a cryptographically secure random number generator (RNG), + // the default behavior uses the math/rand package. The random number is + // used to introduce a slight jitter when calculating the grace period + // for a monitored secret monitoring. This is intended to stagger renewal + // requests to the Vault server, but in a semi-predictable way, so there + // is no need to use a cryptographically secure RNG. + random = rand.New(rand.NewSource(int64(time.Now().Nanosecond()))) + } + + renewBuffer := i.RenewBuffer + if renewBuffer == 0 { + renewBuffer = DefaultLifetimeWatcherRenewBuffer + } + + return &LifetimeWatcher{ + client: c, + secret: secret, + increment: i.Increment, + random: random, + doneCh: make(chan error, 1), + renewCh: make(chan *RenewOutput, renewBuffer), + renewBehavior: i.RenewBehavior, + + stopped: false, + stopCh: make(chan struct{}), + + errLifetimeWatcherNotRenewable: ErrLifetimeWatcherNotRenewable, + errLifetimeWatcherNoSecretData: ErrLifetimeWatcherNoSecretData, + }, nil +} + +// Deprecated: exists only for backwards compatibility. Calls +// NewLifetimeWatcher, and sets compatibility flags. +func (c *Client) NewRenewer(i *LifetimeWatcherInput) (*LifetimeWatcher, error) { + if i == nil { + return nil, ErrRenewerMissingInput + } + + secret := i.Secret + if secret == nil { + return nil, ErrRenewerMissingSecret + } + + renewer, err := c.NewLifetimeWatcher(i) + if err != nil { + return nil, err + } + + renewer.renewBehavior = RenewBehaviorErrorOnErrors + renewer.errLifetimeWatcherNotRenewable = ErrRenewerNotRenewable + renewer.errLifetimeWatcherNoSecretData = ErrRenewerNoSecretData + return renewer, err +} + +// DoneCh returns the channel where the renewer will publish when renewal stops. +// If there is an error, this will be an error. +func (r *LifetimeWatcher) DoneCh() <-chan error { + return r.doneCh +} + +// RenewCh is a channel that receives a message when a successful renewal takes +// place and includes metadata about the renewal. +func (r *LifetimeWatcher) RenewCh() <-chan *RenewOutput { + return r.renewCh +} + +// Stop stops the renewer. +func (r *LifetimeWatcher) Stop() { + r.l.Lock() + defer r.l.Unlock() + + if !r.stopped { + close(r.stopCh) + r.stopped = true + } +} + +// Start starts a background process for watching the lifetime of this secret. +// If renewal is enabled, when the secret has auth data, this attempts to renew +// the auth (token); When the secret has a lease, this attempts to renew the +// lease. +func (r *LifetimeWatcher) Start() { + r.doneCh <- r.doRenew() +} + +// Renew is for compatibility with the legacy api.Renewer. Calling Renew +// simply chains to Start. +func (r *LifetimeWatcher) Renew() { + r.Start() +} + +type renewFunc func(string, int) (*Secret, error) + +// doRenew is a helper for renewing authentication. +func (r *LifetimeWatcher) doRenew() error { + defaultInitialRetryInterval := 10 * time.Second + switch { + case r.secret.Auth != nil: + return r.doRenewWithOptions(true, !r.secret.Auth.Renewable, + r.secret.Auth.LeaseDuration, r.secret.Auth.ClientToken, + r.client.Auth().Token().RenewTokenAsSelf, defaultInitialRetryInterval) + default: + return r.doRenewWithOptions(false, !r.secret.Renewable, + r.secret.LeaseDuration, r.secret.LeaseID, + r.client.Sys().Renew, defaultInitialRetryInterval) + } +} + +func (r *LifetimeWatcher) doRenewWithOptions(tokenMode bool, nonRenewable bool, initLeaseDuration int, credString string, + renew renewFunc, initialRetryInterval time.Duration, +) error { + if credString == "" || + (nonRenewable && r.renewBehavior == RenewBehaviorErrorOnErrors) { + return r.errLifetimeWatcherNotRenewable + } + + initialTime := time.Now() + priorDuration := time.Duration(initLeaseDuration) * time.Second + r.calculateGrace(priorDuration, time.Duration(r.increment)*time.Second) + var errorBackoff backoff.BackOff + + for { + // Check if we are stopped. + select { + case <-r.stopCh: + return nil + default: + } + + var remainingLeaseDuration time.Duration + fallbackLeaseDuration := initialTime.Add(priorDuration).Sub(time.Now()) + var renewal *Secret + var err error + + switch { + case nonRenewable || r.renewBehavior == RenewBehaviorRenewDisabled: + // Can't or won't renew, just keep the same expiration so we exit + // when it's reauthentication time + remainingLeaseDuration = fallbackLeaseDuration + + default: + // Renew the token + renewal, err = renew(credString, r.increment) + if err != nil || renewal == nil || (tokenMode && renewal.Auth == nil) { + if r.renewBehavior == RenewBehaviorErrorOnErrors { + if err != nil { + return err + } + if renewal == nil || (tokenMode && renewal.Auth == nil) { + return r.errLifetimeWatcherNoSecretData + } + } + + // Calculate remaining duration until initial token lease expires + remainingLeaseDuration = initialTime.Add(time.Duration(initLeaseDuration) * time.Second).Sub(time.Now()) + if errorBackoff == nil { + errorBackoff = &backoff.ExponentialBackOff{ + MaxElapsedTime: remainingLeaseDuration, + RandomizationFactor: backoff.DefaultRandomizationFactor, + InitialInterval: initialRetryInterval, + MaxInterval: 5 * time.Minute, + Multiplier: 2, + Clock: backoff.SystemClock, + } + errorBackoff.Reset() + } + break + } + errorBackoff = nil + + // Push a message that a renewal took place. + select { + case r.renewCh <- &RenewOutput{time.Now().UTC(), renewal}: + default: + } + + // Possibly error if we are not renewable + if ((tokenMode && !renewal.Auth.Renewable) || (!tokenMode && !renewal.Renewable)) && + r.renewBehavior == RenewBehaviorErrorOnErrors { + return r.errLifetimeWatcherNotRenewable + } + + // Reset initial time + initialTime = time.Now() + + // Grab the lease duration + initLeaseDuration = renewal.LeaseDuration + if tokenMode { + initLeaseDuration = renewal.Auth.LeaseDuration + } + + remainingLeaseDuration = time.Duration(initLeaseDuration) * time.Second + } + + var sleepDuration time.Duration + + if errorBackoff == nil { + sleepDuration = r.calculateSleepDuration(remainingLeaseDuration, priorDuration) + } else if errorBackoff.NextBackOff() == backoff.Stop { + return err + } + + // remainingLeaseDuration becomes the priorDuration for the next loop + priorDuration = remainingLeaseDuration + + // If we are within grace, return now; or, if the amount of time we + // would sleep would land us in the grace period. This helps with short + // tokens; for example, you don't want a current lease duration of 4 + // seconds, a grace period of 3 seconds, and end up sleeping for more + // than three of those seconds and having a very small budget of time + // to renew. + if remainingLeaseDuration <= r.grace || remainingLeaseDuration-sleepDuration <= r.grace { + return nil + } + + timer := time.NewTimer(sleepDuration) + select { + case <-r.stopCh: + timer.Stop() + return nil + case <-timer.C: + continue + } + } +} + +// calculateSleepDuration calculates the amount of time the LifeTimeWatcher should sleep +// before re-entering its loop. +func (r *LifetimeWatcher) calculateSleepDuration(remainingLeaseDuration, priorDuration time.Duration) time.Duration { + // We keep evaluating a new grace period so long as the lease is + // extending. Once it stops extending, we've hit the max and need to + // rely on the grace duration. + if remainingLeaseDuration > priorDuration { + r.calculateGrace(remainingLeaseDuration, time.Duration(r.increment)*time.Second) + } + + // The sleep duration is set to 2/3 of the current lease duration plus + // 1/3 of the current grace period, which adds jitter. + return time.Duration(float64(remainingLeaseDuration.Nanoseconds())*2/3 + float64(r.grace.Nanoseconds())/3) +} + +// calculateGrace calculates the grace period based on the minimum of the +// remaining lease duration and the token increment value; it also adds some +// jitter to not have clients be in sync. +func (r *LifetimeWatcher) calculateGrace(leaseDuration, increment time.Duration) { + minDuration := leaseDuration + if minDuration > increment && increment > 0 { + minDuration = increment + } + + if minDuration <= 0 { + r.grace = 0 + return + } + + leaseNanos := float64(minDuration.Nanoseconds()) + jitterMax := 0.1 * leaseNanos + + // For a given lease duration, we want to allow 80-90% of that to elapse, + // so the remaining amount is the grace period + r.grace = time.Duration(jitterMax) + time.Duration(uint64(r.random.Int63())%uint64(jitterMax)) +} + +type ( + Renewer = LifetimeWatcher + RenewerInput = LifetimeWatcherInput +) diff --git a/api/logical.go b/api/logical.go new file mode 100644 index 0000000..927dd16 --- /dev/null +++ b/api/logical.go @@ -0,0 +1,403 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "os" + "strings" + + "github.com/hashicorp/errwrap" +) + +const ( + wrappedResponseLocation = "cubbyhole/response" +) + +var ( + // The default TTL that will be used with `sys/wrapping/wrap`, can be + // changed + DefaultWrappingTTL = "5m" + + // The default function used if no other function is set. It honors the env + // var to set the wrap TTL. The default wrap TTL will apply when when writing + // to `sys/wrapping/wrap` when the env var is not set. + DefaultWrappingLookupFunc = func(operation, path string) string { + if os.Getenv(EnvVaultWrapTTL) != "" { + return os.Getenv(EnvVaultWrapTTL) + } + + if (operation == http.MethodPut || operation == http.MethodPost) && path == "sys/wrapping/wrap" { + return DefaultWrappingTTL + } + + return "" + } +) + +// Logical is used to perform logical backend operations on Vault. +type Logical struct { + c *Client +} + +// Logical is used to return the client for logical-backend API calls. +func (c *Client) Logical() *Logical { + return &Logical{c: c} +} + +func (c *Logical) Read(path string) (*Secret, error) { + return c.ReadWithDataWithContext(context.Background(), path, nil) +} + +func (c *Logical) ReadWithContext(ctx context.Context, path string) (*Secret, error) { + return c.ReadWithDataWithContext(ctx, path, nil) +} + +func (c *Logical) ReadWithData(path string, data map[string][]string) (*Secret, error) { + return c.ReadWithDataWithContext(context.Background(), path, data) +} + +func (c *Logical) ReadWithDataWithContext(ctx context.Context, path string, data map[string][]string) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + resp, err := c.readRawWithDataWithContext(ctx, path, data) + return c.ParseRawResponseAndCloseBody(resp, err) +} + +// ReadRaw attempts to read the value stored at the given Vault path +// (without '/v1/' prefix) and returns a raw *http.Response. +// +// Note: the raw-response functions do not respect the client-configured +// request timeout; if a timeout is desired, please use ReadRawWithContext +// instead and set the timeout through context.WithTimeout or context.WithDeadline. +func (c *Logical) ReadRaw(path string) (*Response, error) { + return c.ReadRawWithDataWithContext(context.Background(), path, nil) +} + +// ReadRawWithContext attempts to read the value stored at the give Vault path +// (without '/v1/' prefix) and returns a raw *http.Response. +// +// Note: the raw-response functions do not respect the client-configured +// request timeout; if a timeout is desired, please set it through +// context.WithTimeout or context.WithDeadline. +func (c *Logical) ReadRawWithContext(ctx context.Context, path string) (*Response, error) { + return c.ReadRawWithDataWithContext(ctx, path, nil) +} + +// ReadRawWithData attempts to read the value stored at the given Vault +// path (without '/v1/' prefix) and returns a raw *http.Response. The 'data' map +// is added as query parameters to the request. +// +// Note: the raw-response functions do not respect the client-configured +// request timeout; if a timeout is desired, please use +// ReadRawWithDataWithContext instead and set the timeout through +// context.WithTimeout or context.WithDeadline. +func (c *Logical) ReadRawWithData(path string, data map[string][]string) (*Response, error) { + return c.ReadRawWithDataWithContext(context.Background(), path, data) +} + +// ReadRawWithDataWithContext attempts to read the value stored at the given +// Vault path (without '/v1/' prefix) and returns a raw *http.Response. The 'data' +// map is added as query parameters to the request. +// +// Note: the raw-response functions do not respect the client-configured +// request timeout; if a timeout is desired, please set it through +// context.WithTimeout or context.WithDeadline. +func (c *Logical) ReadRawWithDataWithContext(ctx context.Context, path string, data map[string][]string) (*Response, error) { + return c.readRawWithDataWithContext(ctx, path, data) +} + +func (c *Logical) ParseRawResponseAndCloseBody(resp *Response, err error) (*Secret, error) { + if resp != nil { + defer resp.Body.Close() + } + if resp != nil && resp.StatusCode == 404 { + secret, parseErr := ParseSecret(resp.Body) + switch parseErr { + case nil: + case io.EOF: + return nil, nil + default: + return nil, parseErr + } + if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) { + return secret, nil + } + return nil, nil + } + if err != nil { + return nil, err + } + + return ParseSecret(resp.Body) +} + +func (c *Logical) readRawWithDataWithContext(ctx context.Context, path string, data map[string][]string) (*Response, error) { + r := c.c.NewRequest(http.MethodGet, "/v1/"+path) + + var values url.Values + for k, v := range data { + if values == nil { + values = make(url.Values) + } + for _, val := range v { + values.Add(k, val) + } + } + + if values != nil { + r.Params = values + } + + return c.c.RawRequestWithContext(ctx, r) +} + +func (c *Logical) List(path string) (*Secret, error) { + return c.ListWithContext(context.Background(), path) +} + +func (c *Logical) ListWithContext(ctx context.Context, path string) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest("LIST", "/v1/"+path) + // Set this for broader compatibility, but we use LIST above to be able to + // handle the wrapping lookup function + r.Method = http.MethodGet + r.Params.Set("list", "true") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if resp != nil { + defer resp.Body.Close() + } + if resp != nil && resp.StatusCode == 404 { + secret, parseErr := ParseSecret(resp.Body) + switch parseErr { + case nil: + case io.EOF: + return nil, nil + default: + return nil, parseErr + } + if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) { + return secret, nil + } + return nil, nil + } + if err != nil { + return nil, err + } + + return ParseSecret(resp.Body) +} + +func (c *Logical) Write(path string, data map[string]interface{}) (*Secret, error) { + return c.WriteWithContext(context.Background(), path, data) +} + +func (c *Logical) WriteWithContext(ctx context.Context, path string, data map[string]interface{}) (*Secret, error) { + r := c.c.NewRequest(http.MethodPut, "/v1/"+path) + if err := r.SetJSONBody(data); err != nil { + return nil, err + } + + return c.write(ctx, path, r) +} + +func (c *Logical) JSONMergePatch(ctx context.Context, path string, data map[string]interface{}) (*Secret, error) { + r := c.c.NewRequest(http.MethodPatch, "/v1/"+path) + r.Headers.Set("Content-Type", "application/merge-patch+json") + if err := r.SetJSONBody(data); err != nil { + return nil, err + } + + return c.write(ctx, path, r) +} + +func (c *Logical) WriteBytes(path string, data []byte) (*Secret, error) { + return c.WriteBytesWithContext(context.Background(), path, data) +} + +func (c *Logical) WriteBytesWithContext(ctx context.Context, path string, data []byte) (*Secret, error) { + r := c.c.NewRequest(http.MethodPut, "/v1/"+path) + r.BodyBytes = data + + return c.write(ctx, path, r) +} + +func (c *Logical) write(ctx context.Context, path string, request *Request) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + resp, err := c.c.rawRequestWithContext(ctx, request) + if resp != nil { + defer resp.Body.Close() + } + if resp != nil && resp.StatusCode == 404 { + secret, parseErr := ParseSecret(resp.Body) + switch parseErr { + case nil: + case io.EOF: + return nil, nil + default: + return nil, parseErr + } + if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) { + return secret, err + } + } + if err != nil { + return nil, err + } + + return ParseSecret(resp.Body) +} + +func (c *Logical) Delete(path string) (*Secret, error) { + return c.DeleteWithContext(context.Background(), path) +} + +func (c *Logical) DeleteWithContext(ctx context.Context, path string) (*Secret, error) { + return c.DeleteWithDataWithContext(ctx, path, nil) +} + +func (c *Logical) DeleteWithData(path string, data map[string][]string) (*Secret, error) { + return c.DeleteWithDataWithContext(context.Background(), path, data) +} + +func (c *Logical) DeleteWithDataWithContext(ctx context.Context, path string, data map[string][]string) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, "/v1/"+path) + + var values url.Values + for k, v := range data { + if values == nil { + values = make(url.Values) + } + for _, val := range v { + values.Add(k, val) + } + } + + if values != nil { + r.Params = values + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if resp != nil { + defer resp.Body.Close() + } + if resp != nil && resp.StatusCode == 404 { + secret, parseErr := ParseSecret(resp.Body) + switch parseErr { + case nil: + case io.EOF: + return nil, nil + default: + return nil, parseErr + } + if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) { + return secret, err + } + } + if err != nil { + return nil, err + } + + return ParseSecret(resp.Body) +} + +func (c *Logical) Unwrap(wrappingToken string) (*Secret, error) { + return c.UnwrapWithContext(context.Background(), wrappingToken) +} + +func (c *Logical) UnwrapWithContext(ctx context.Context, wrappingToken string) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + var data map[string]interface{} + wt := strings.TrimSpace(wrappingToken) + if wrappingToken != "" { + if c.c.Token() == "" { + c.c.SetToken(wt) + } else if wrappingToken != c.c.Token() { + data = map[string]interface{}{ + "token": wt, + } + } + } + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/wrapping/unwrap") + if err := r.SetJSONBody(data); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if resp != nil { + defer resp.Body.Close() + } + if resp == nil || resp.StatusCode != 404 { + if err != nil { + return nil, err + } + if resp == nil { + return nil, nil + } + return ParseSecret(resp.Body) + } + + // In the 404 case this may actually be a wrapped 404 error + secret, parseErr := ParseSecret(resp.Body) + switch parseErr { + case nil: + case io.EOF: + return nil, nil + default: + return nil, parseErr + } + if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) { + return secret, nil + } + + // Otherwise this might be an old-style wrapping token so attempt the old + // method + if wrappingToken != "" { + origToken := c.c.Token() + defer c.c.SetToken(origToken) + c.c.SetToken(wrappingToken) + } + + secret, err = c.ReadWithContext(ctx, wrappedResponseLocation) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("error reading %q: {{err}}", wrappedResponseLocation), err) + } + if secret == nil { + return nil, fmt.Errorf("no value found at %q", wrappedResponseLocation) + } + if secret.Data == nil { + return nil, fmt.Errorf("\"data\" not found in wrapping response") + } + if _, ok := secret.Data["response"]; !ok { + return nil, fmt.Errorf("\"response\" not found in wrapping response \"data\" map") + } + + wrappedSecret := new(Secret) + buf := bytes.NewBufferString(secret.Data["response"].(string)) + dec := json.NewDecoder(buf) + dec.UseNumber() + if err := dec.Decode(wrappedSecret); err != nil { + return nil, errwrap.Wrapf("error unmarshalling wrapped secret: {{err}}", err) + } + + return wrappedSecret, nil +} diff --git a/api/output_policy.go b/api/output_policy.go new file mode 100644 index 0000000..c3ec522 --- /dev/null +++ b/api/output_policy.go @@ -0,0 +1,99 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "fmt" + "net/http" + "net/url" + "strconv" + "strings" +) + +const ( + ErrOutputPolicyRequest = "output a policy, please" +) + +var LastOutputPolicyError *OutputPolicyError + +type OutputPolicyError struct { + method string + path string + params url.Values + finalHCLString string +} + +func (d *OutputPolicyError) Error() string { + if d.finalHCLString == "" { + p, err := d.buildSamplePolicy() + if err != nil { + return err.Error() + } + d.finalHCLString = p + } + + return ErrOutputPolicyRequest +} + +func (d *OutputPolicyError) HCLString() (string, error) { + if d.finalHCLString == "" { + p, err := d.buildSamplePolicy() + if err != nil { + return "", err + } + d.finalHCLString = p + } + return d.finalHCLString, nil +} + +// Builds a sample policy document from the request +func (d *OutputPolicyError) buildSamplePolicy() (string, error) { + operation := d.method + // List is often defined as a URL param instead of as an http.Method + // this will check for the header and properly switch off of the intended functionality + if d.params.Has("list") { + isList, err := strconv.ParseBool(d.params.Get("list")) + if err != nil { + return "", fmt.Errorf("the value of the list url param is not a bool: %v", err) + } + + if isList { + operation = "LIST" + } + } + + var capabilities []string + switch operation { + case http.MethodGet, "": + capabilities = append(capabilities, "read") + case http.MethodPost, http.MethodPut: + capabilities = append(capabilities, "create") + capabilities = append(capabilities, "update") + case http.MethodPatch: + capabilities = append(capabilities, "patch") + case http.MethodDelete: + capabilities = append(capabilities, "delete") + case "LIST": + capabilities = append(capabilities, "list") + } + + // determine whether to add sudo capability + if IsSudoPath(d.path) { + capabilities = append(capabilities, "sudo") + } + + return formatOutputPolicy(d.path, capabilities), nil +} + +func formatOutputPolicy(path string, capabilities []string) string { + // the OpenAPI response has a / in front of each path, + // but policies need the path without that leading slash + path = strings.TrimLeft(path, "/") + + capStr := strings.Join(capabilities, `", "`) + return fmt.Sprintf( + `path "%s" { + capabilities = ["%s"] +}`, path, capStr) +} diff --git a/api/output_policy_test.go b/api/output_policy_test.go new file mode 100644 index 0000000..2092e2b --- /dev/null +++ b/api/output_policy_test.go @@ -0,0 +1,83 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "net/http" + "net/url" + "testing" +) + +func TestBuildSamplePolicy(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + req *OutputPolicyError + expected string + err error + }{ + { + "happy path", + &OutputPolicyError{ + method: http.MethodGet, + path: "/something", + }, + formatOutputPolicy("/something", []string{"read"}), + nil, + }, + { // test included to clear up some confusion around the sanitize comment + "demonstrate that this function does not format fully", + &OutputPolicyError{ + method: http.MethodGet, + path: "http://vault.test/v1/something", + }, + formatOutputPolicy("http://vault.test/v1/something", []string{"read"}), + nil, + }, + { // test that list is properly returned + "list over read returned", + &OutputPolicyError{ + method: http.MethodGet, + path: "/something", + params: url.Values{ + "list": []string{"true"}, + }, + }, + formatOutputPolicy("/something", []string{"list"}), + nil, + }, + { + "valid protected path", + &OutputPolicyError{ + method: http.MethodGet, + path: "/sys/config/ui/headers/", + }, + formatOutputPolicy("/sys/config/ui/headers/", []string{"read", "sudo"}), + nil, + }, + { // ensure that a formatted path that trims the trailing slash as the code does still works for recognizing a sudo path + "valid protected path no trailing /", + &OutputPolicyError{ + method: http.MethodGet, + path: "/sys/config/ui/headers", + }, + formatOutputPolicy("/sys/config/ui/headers", []string{"read", "sudo"}), + nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result, err := tc.req.buildSamplePolicy() + if tc.err != err { + t.Fatalf("expected for the error to be %v instead got %v\n", tc.err, err) + } + + if tc.expected != result { + t.Fatalf("expected for the policy string to be %v instead got %v\n", tc.expected, result) + } + }) + } +} diff --git a/api/output_string.go b/api/output_string.go new file mode 100644 index 0000000..d777771 --- /dev/null +++ b/api/output_string.go @@ -0,0 +1,98 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "fmt" + "net/http" + "strings" + + retryablehttp "github.com/hashicorp/go-retryablehttp" +) + +const ( + ErrOutputStringRequest = "output a string, please" +) + +var LastOutputStringError *OutputStringError + +type OutputStringError struct { + *retryablehttp.Request + TLSSkipVerify bool + ClientCACert, ClientCAPath string + ClientCert, ClientKey string + finalCurlString string +} + +func (d *OutputStringError) Error() string { + if d.finalCurlString == "" { + cs, err := d.buildCurlString() + if err != nil { + return err.Error() + } + d.finalCurlString = cs + } + + return ErrOutputStringRequest +} + +func (d *OutputStringError) CurlString() (string, error) { + if d.finalCurlString == "" { + cs, err := d.buildCurlString() + if err != nil { + return "", err + } + d.finalCurlString = cs + } + return d.finalCurlString, nil +} + +func (d *OutputStringError) buildCurlString() (string, error) { + body, err := d.Request.BodyBytes() + if err != nil { + return "", err + } + + // Build cURL string + finalCurlString := "curl " + if d.TLSSkipVerify { + finalCurlString += "--insecure " + } + if d.Request.Method != http.MethodGet { + finalCurlString = fmt.Sprintf("%s-X %s ", finalCurlString, d.Request.Method) + } + if d.ClientCACert != "" { + clientCACert := strings.ReplaceAll(d.ClientCACert, "'", "'\"'\"'") + finalCurlString = fmt.Sprintf("%s--cacert '%s' ", finalCurlString, clientCACert) + } + if d.ClientCAPath != "" { + clientCAPath := strings.ReplaceAll(d.ClientCAPath, "'", "'\"'\"'") + finalCurlString = fmt.Sprintf("%s--capath '%s' ", finalCurlString, clientCAPath) + } + if d.ClientCert != "" { + clientCert := strings.ReplaceAll(d.ClientCert, "'", "'\"'\"'") + finalCurlString = fmt.Sprintf("%s--cert '%s' ", finalCurlString, clientCert) + } + if d.ClientKey != "" { + clientKey := strings.ReplaceAll(d.ClientKey, "'", "'\"'\"'") + finalCurlString = fmt.Sprintf("%s--key '%s' ", finalCurlString, clientKey) + } + for k, v := range d.Request.Header { + for _, h := range v { + if strings.ToLower(k) == "x-vault-token" { + h = `$(vault print token)` + } + finalCurlString = fmt.Sprintf("%s-H \"%s: %s\" ", finalCurlString, k, h) + } + } + + if len(body) > 0 { + // We need to escape single quotes since that's what we're using to + // quote the body + escapedBody := strings.ReplaceAll(string(body), "'", "'\"'\"'") + finalCurlString = fmt.Sprintf("%s-d '%s' ", finalCurlString, escapedBody) + } + + return fmt.Sprintf("%s%s", finalCurlString, d.Request.URL.String()), nil +} diff --git a/api/plugin_helpers.go b/api/plugin_helpers.go new file mode 100644 index 0000000..32755c3 --- /dev/null +++ b/api/plugin_helpers.go @@ -0,0 +1,275 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "errors" + "flag" + "net/url" + "os" + "regexp" + + "github.com/go-jose/go-jose/v3/jwt" + + "github.com/hashicorp/errwrap" +) + +const ( + // PluginAutoMTLSEnv is used to ensure AutoMTLS is used. This will override + // setting a TLSProviderFunc for a plugin. + PluginAutoMTLSEnv = "VAULT_PLUGIN_AUTOMTLS_ENABLED" + + // PluginMetadataModeEnv is an ENV name used to disable TLS communication + // to bootstrap mounting plugins. + PluginMetadataModeEnv = "VAULT_PLUGIN_METADATA_MODE" + + // PluginUnwrapTokenEnv is the ENV name used to pass unwrap tokens to the + // plugin. + PluginUnwrapTokenEnv = "VAULT_UNWRAP_TOKEN" +) + +// sudoPaths is a map containing the paths that require a token's policy +// to have the "sudo" capability. The keys are the paths as strings, in +// the same format as they are returned by the OpenAPI spec. The values +// are the regular expressions that can be used to test whether a given +// path matches that path or not (useful specifically for the paths that +// contain templated fields.) +var sudoPaths = map[string]*regexp.Regexp{ + "/auth/token/accessors": regexp.MustCompile(`^/auth/token/accessors/?$`), + "/pki/root": regexp.MustCompile(`^/pki/root$`), + "/pki/root/sign-self-issued": regexp.MustCompile(`^/pki/root/sign-self-issued$`), + "/sys/audit": regexp.MustCompile(`^/sys/audit$`), + "/sys/audit/{path}": regexp.MustCompile(`^/sys/audit/.+$`), + "/sys/auth/{path}": regexp.MustCompile(`^/sys/auth/.+$`), + "/sys/auth/{path}/tune": regexp.MustCompile(`^/sys/auth/.+/tune$`), + "/sys/config/auditing/request-headers": regexp.MustCompile(`^/sys/config/auditing/request-headers$`), + "/sys/config/auditing/request-headers/{header}": regexp.MustCompile(`^/sys/config/auditing/request-headers/.+$`), + "/sys/config/cors": regexp.MustCompile(`^/sys/config/cors$`), + "/sys/config/ui/headers": regexp.MustCompile(`^/sys/config/ui/headers/?$`), + "/sys/config/ui/headers/{header}": regexp.MustCompile(`^/sys/config/ui/headers/.+$`), + "/sys/leases": regexp.MustCompile(`^/sys/leases$`), + "/sys/leases/lookup/": regexp.MustCompile(`^/sys/leases/lookup/?$`), + "/sys/leases/lookup/{prefix}": regexp.MustCompile(`^/sys/leases/lookup/.+$`), + "/sys/leases/revoke-force/{prefix}": regexp.MustCompile(`^/sys/leases/revoke-force/.+$`), + "/sys/leases/revoke-prefix/{prefix}": regexp.MustCompile(`^/sys/leases/revoke-prefix/.+$`), + "/sys/plugins/catalog/{name}": regexp.MustCompile(`^/sys/plugins/catalog/[^/]+$`), + "/sys/plugins/catalog/{type}": regexp.MustCompile(`^/sys/plugins/catalog/[\w-]+$`), + "/sys/plugins/catalog/{type}/{name}": regexp.MustCompile(`^/sys/plugins/catalog/[\w-]+/[^/]+$`), + "/sys/raw": regexp.MustCompile(`^/sys/raw$`), + "/sys/raw/{path}": regexp.MustCompile(`^/sys/raw/.+$`), + "/sys/remount": regexp.MustCompile(`^/sys/remount$`), + "/sys/revoke-force/{prefix}": regexp.MustCompile(`^/sys/revoke-force/.+$`), + "/sys/revoke-prefix/{prefix}": regexp.MustCompile(`^/sys/revoke-prefix/.+$`), + "/sys/rotate": regexp.MustCompile(`^/sys/rotate$`), + "/sys/internal/inspect/router/{tag}": regexp.MustCompile(`^/sys/internal/inspect/router/.+$`), + + // enterprise-only paths + "/sys/replication/dr/primary/secondary-token": regexp.MustCompile(`^/sys/replication/dr/primary/secondary-token$`), + "/sys/replication/performance/primary/secondary-token": regexp.MustCompile(`^/sys/replication/performance/primary/secondary-token$`), + "/sys/replication/primary/secondary-token": regexp.MustCompile(`^/sys/replication/primary/secondary-token$`), + "/sys/replication/reindex": regexp.MustCompile(`^/sys/replication/reindex$`), + "/sys/storage/raft/snapshot-auto/config/": regexp.MustCompile(`^/sys/storage/raft/snapshot-auto/config/?$`), + "/sys/storage/raft/snapshot-auto/config/{name}": regexp.MustCompile(`^/sys/storage/raft/snapshot-auto/config/[^/]+$`), +} + +// PluginAPIClientMeta is a helper that plugins can use to configure TLS connections +// back to Vault. +type PluginAPIClientMeta struct { + // These are set by the command line flags. + flagCACert string + flagCAPath string + flagClientCert string + flagClientKey string + flagServerName string + flagInsecure bool +} + +// FlagSet returns the flag set for configuring the TLS connection +func (f *PluginAPIClientMeta) FlagSet() *flag.FlagSet { + fs := flag.NewFlagSet("vault plugin settings", flag.ContinueOnError) + + fs.StringVar(&f.flagCACert, "ca-cert", "", "") + fs.StringVar(&f.flagCAPath, "ca-path", "", "") + fs.StringVar(&f.flagClientCert, "client-cert", "", "") + fs.StringVar(&f.flagClientKey, "client-key", "", "") + fs.StringVar(&f.flagServerName, "tls-server-name", "", "") + fs.BoolVar(&f.flagInsecure, "tls-skip-verify", false, "") + + return fs +} + +// GetTLSConfig will return a TLSConfig based off the values from the flags +func (f *PluginAPIClientMeta) GetTLSConfig() *TLSConfig { + // If we need custom TLS configuration, then set it + if f.flagCACert != "" || f.flagCAPath != "" || f.flagClientCert != "" || f.flagClientKey != "" || f.flagInsecure || f.flagServerName != "" { + t := &TLSConfig{ + CACert: f.flagCACert, + CAPath: f.flagCAPath, + ClientCert: f.flagClientCert, + ClientKey: f.flagClientKey, + TLSServerName: f.flagServerName, + Insecure: f.flagInsecure, + } + + return t + } + + return nil +} + +// VaultPluginTLSProvider wraps VaultPluginTLSProviderContext using context.Background. +func VaultPluginTLSProvider(apiTLSConfig *TLSConfig) func() (*tls.Config, error) { + return VaultPluginTLSProviderContext(context.Background(), apiTLSConfig) +} + +// VaultPluginTLSProviderContext is run inside a plugin and retrieves the response +// wrapped TLS certificate from vault. It returns a configured TLS Config. +func VaultPluginTLSProviderContext(ctx context.Context, apiTLSConfig *TLSConfig) func() (*tls.Config, error) { + if os.Getenv(PluginAutoMTLSEnv) == "true" || os.Getenv(PluginMetadataModeEnv) == "true" { + return nil + } + + return func() (*tls.Config, error) { + unwrapToken := os.Getenv(PluginUnwrapTokenEnv) + + parsedJWT, err := jwt.ParseSigned(unwrapToken) + if err != nil { + return nil, errwrap.Wrapf("error parsing wrapping token: {{err}}", err) + } + + allClaims := make(map[string]interface{}) + if err = parsedJWT.UnsafeClaimsWithoutVerification(&allClaims); err != nil { + return nil, errwrap.Wrapf("error parsing claims from wrapping token: {{err}}", err) + } + + addrClaimRaw, ok := allClaims["addr"] + if !ok { + return nil, errors.New("could not validate addr claim") + } + vaultAddr, ok := addrClaimRaw.(string) + if !ok { + return nil, errors.New("could not parse addr claim") + } + if vaultAddr == "" { + return nil, errors.New(`no vault api_addr found`) + } + + // Sanity check the value + if _, err := url.Parse(vaultAddr); err != nil { + return nil, errwrap.Wrapf("error parsing the vault api_addr: {{err}}", err) + } + + // Unwrap the token + clientConf := DefaultConfig() + clientConf.Address = vaultAddr + if apiTLSConfig != nil { + err := clientConf.ConfigureTLS(apiTLSConfig) + if err != nil { + return nil, errwrap.Wrapf("error configuring api client {{err}}", err) + } + } + client, err := NewClient(clientConf) + if err != nil { + return nil, errwrap.Wrapf("error during api client creation: {{err}}", err) + } + + // Reset token value to make sure nothing has been set by default + client.ClearToken() + + secret, err := client.Logical().UnwrapWithContext(ctx, unwrapToken) + if err != nil { + return nil, errwrap.Wrapf("error during token unwrap request: {{err}}", err) + } + if secret == nil { + return nil, errors.New("error during token unwrap request: secret is nil") + } + + // Retrieve and parse the server's certificate + serverCertBytesRaw, ok := secret.Data["ServerCert"].(string) + if !ok { + return nil, errors.New("error unmarshalling certificate") + } + + serverCertBytes, err := base64.StdEncoding.DecodeString(serverCertBytesRaw) + if err != nil { + return nil, errwrap.Wrapf("error parsing certificate: {{err}}", err) + } + + serverCert, err := x509.ParseCertificate(serverCertBytes) + if err != nil { + return nil, errwrap.Wrapf("error parsing certificate: {{err}}", err) + } + + // Retrieve and parse the server's private key + serverKeyB64, ok := secret.Data["ServerKey"].(string) + if !ok { + return nil, errors.New("error unmarshalling certificate") + } + + serverKeyRaw, err := base64.StdEncoding.DecodeString(serverKeyB64) + if err != nil { + return nil, errwrap.Wrapf("error parsing certificate: {{err}}", err) + } + + serverKey, err := x509.ParseECPrivateKey(serverKeyRaw) + if err != nil { + return nil, errwrap.Wrapf("error parsing certificate: {{err}}", err) + } + + // Add CA cert to the cert pool + caCertPool := x509.NewCertPool() + caCertPool.AddCert(serverCert) + + // Build a certificate object out of the server's cert and private key. + cert := tls.Certificate{ + Certificate: [][]byte{serverCertBytes}, + PrivateKey: serverKey, + Leaf: serverCert, + } + + // Setup TLS config + tlsConfig := &tls.Config{ + ClientCAs: caCertPool, + RootCAs: caCertPool, + ClientAuth: tls.RequireAndVerifyClientCert, + // TLS 1.2 minimum + MinVersion: tls.VersionTLS12, + Certificates: []tls.Certificate{cert}, + ServerName: serverCert.Subject.CommonName, + } + + return tlsConfig, nil + } +} + +func SudoPaths() map[string]*regexp.Regexp { + return sudoPaths +} + +// Determine whether the given path requires the sudo capability. +// Note that this uses hardcoded static path information, so will return incorrect results for paths in namespaces, +// or for secret engines mounted at non-default paths. +func IsSudoPath(path string) bool { + // Return early if the path is any of the non-templated sudo paths. + if _, ok := sudoPaths[path]; ok { + return true + } + + // Some sudo paths have templated fields in them. + // (e.g. /sys/revoke-prefix/{prefix}) + // The values in the sudoPaths map are actually regular expressions, + // so we can check if our path matches against them. + for _, sudoPathRegexp := range sudoPaths { + match := sudoPathRegexp.MatchString(path) + if match { + return true + } + } + + return false +} diff --git a/api/plugin_helpers_test.go b/api/plugin_helpers_test.go new file mode 100644 index 0000000..2e97d44 --- /dev/null +++ b/api/plugin_helpers_test.go @@ -0,0 +1,78 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import "testing" + +func TestIsSudoPath(t *testing.T) { + t.Parallel() + + testCases := []struct { + path string + expected bool + }{ + // Testing: Not a real endpoint + { + "/not/in/sudo/paths/list", + false, + }, + // Testing: sys/raw/{path} + { + "/sys/raw/single-node-path", + true, + }, + { + "/sys/raw/multiple/nodes/path", + true, + }, + { + "/sys/raw/WEIRD(but_still_valid!)p4Th?🗿笑", + true, + }, + // Testing: sys/auth/{path}/tune + { + "/sys/auth/path/in/middle/tune", + true, + }, + // Testing: sys/plugins/catalog/{type} and sys/plugins/catalog/{name} (regexes overlap) + { + "/sys/plugins/catalog/some-type", + true, + }, + // Testing: Not a real endpoint + { + "/sys/plugins/catalog/some/type/or/name/with/slashes", + false, + }, + // Testing: sys/plugins/catalog/{type}/{name} + { + "/sys/plugins/catalog/some-type/some-name", + true, + }, + // Testing: Not a real endpoint + { + "/sys/plugins/catalog/some-type/some/name/with/slashes", + false, + }, + // Testing: auth/token/accessors (an example of a sudo path that only accepts list operations) + // It is matched as sudo without the trailing slash... + { + "/auth/token/accessors", + true, + }, + // ...and also with it. + // (Although at the time of writing, the only caller of IsSudoPath always removes trailing slashes.) + { + "/auth/token/accessors/", + true, + }, + } + + for _, tc := range testCases { + result := IsSudoPath(tc.path) + if result != tc.expected { + t.Fatalf("expected api.IsSudoPath to return %v for path %s but it returned %v", tc.expected, tc.path, result) + } + } +} diff --git a/api/plugin_types.go b/api/plugin_types.go new file mode 100644 index 0000000..4c759a2 --- /dev/null +++ b/api/plugin_types.go @@ -0,0 +1,66 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +// NOTE: this file was copied from +// https://github.com/hashicorp/vault/blob/main/sdk/helper/consts/plugin_types.go +// Any changes made should be made to both files at the same time. + +import "fmt" + +var PluginTypes = []PluginType{ + PluginTypeUnknown, + PluginTypeCredential, + PluginTypeDatabase, + PluginTypeSecrets, +} + +type PluginType uint32 + +// This is a list of PluginTypes used by Vault. +// If we need to add any in the future, it would +// be best to add them to the _end_ of the list below +// because they resolve to incrementing numbers, +// which may be saved in state somewhere. Thus if +// the name for one of those numbers changed because +// a value were added to the middle, that could cause +// the wrong plugin types to be read from storage +// for a given underlying number. Example of the problem +// here: https://play.golang.org/p/YAaPw5ww3er +const ( + PluginTypeUnknown PluginType = iota + PluginTypeCredential + PluginTypeDatabase + PluginTypeSecrets +) + +func (p PluginType) String() string { + switch p { + case PluginTypeUnknown: + return "unknown" + case PluginTypeCredential: + return "auth" + case PluginTypeDatabase: + return "database" + case PluginTypeSecrets: + return "secret" + default: + return "unsupported" + } +} + +func ParsePluginType(pluginType string) (PluginType, error) { + switch pluginType { + case "unknown": + return PluginTypeUnknown, nil + case "auth": + return PluginTypeCredential, nil + case "database": + return PluginTypeDatabase, nil + case "secret": + return PluginTypeSecrets, nil + default: + return PluginTypeUnknown, fmt.Errorf("%q is not a supported plugin type", pluginType) + } +} diff --git a/api/renewer_test.go b/api/renewer_test.go new file mode 100644 index 0000000..7ba16e6 --- /dev/null +++ b/api/renewer_test.go @@ -0,0 +1,285 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "errors" + "fmt" + "math/rand" + "reflect" + "testing" + "testing/quick" + "time" + + "github.com/go-test/deep" +) + +func TestRenewer_NewRenewer(t *testing.T) { + t.Parallel() + + client, err := NewClient(DefaultConfig()) + if err != nil { + t.Fatal(err) + } + + cases := []struct { + name string + i *RenewerInput + e *Renewer + err bool + }{ + { + name: "nil", + i: nil, + e: nil, + err: true, + }, + { + name: "missing_secret", + i: &RenewerInput{ + Secret: nil, + }, + e: nil, + err: true, + }, + { + name: "default_grace", + i: &RenewerInput{ + Secret: &Secret{}, + }, + e: &Renewer{ + secret: &Secret{}, + }, + err: false, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + v, err := client.NewRenewer(tc.i) + if (err != nil) != tc.err { + t.Fatal(err) + } + + if v == nil { + return + } + + // Zero-out channels because reflect + v.client = nil + v.random = nil + v.doneCh = nil + v.renewCh = nil + v.stopCh = nil + + if diff := deep.Equal(tc.e, v); diff != nil { + t.Error(diff) + } + }) + } +} + +func TestLifetimeWatcher(t *testing.T) { + t.Parallel() + + client, err := NewClient(DefaultConfig()) + if err != nil { + t.Fatal(err) + } + + // Note that doRenewWithOptions starts its loop with an initial renewal. + // This has a big impact on the particulars of the following cases. + + renewedSecret := &Secret{} + var caseOneErrorCount int + var caseManyErrorsCount int + cases := []struct { + maxTestTime time.Duration + name string + leaseDurationSeconds int + incrementSeconds int + renew renewFunc + expectError error + expectRenewal bool + }{ + { + maxTestTime: time.Second, + name: "no_error", + leaseDurationSeconds: 60, + incrementSeconds: 60, + renew: func(_ string, _ int) (*Secret, error) { + return renewedSecret, nil + }, + expectError: nil, + expectRenewal: true, + }, + { + maxTestTime: time.Second, + name: "short_increment_duration", + leaseDurationSeconds: 60, + incrementSeconds: 10, + renew: func(_ string, _ int) (*Secret, error) { + return renewedSecret, nil + }, + expectError: nil, + expectRenewal: true, + }, + { + maxTestTime: 5 * time.Second, + name: "one_error", + leaseDurationSeconds: 15, + incrementSeconds: 15, + renew: func(_ string, _ int) (*Secret, error) { + if caseOneErrorCount == 0 { + caseOneErrorCount++ + return nil, fmt.Errorf("renew failure") + } + return renewedSecret, nil + }, + expectError: nil, + expectRenewal: true, + }, + { + maxTestTime: 15 * time.Second, + name: "many_errors", + leaseDurationSeconds: 15, + incrementSeconds: 15, + renew: func(_ string, _ int) (*Secret, error) { + if caseManyErrorsCount == 3 { + return renewedSecret, nil + } + caseManyErrorsCount++ + return nil, fmt.Errorf("renew failure") + }, + expectError: nil, + expectRenewal: true, + }, + { + maxTestTime: 15 * time.Second, + name: "only_errors", + leaseDurationSeconds: 15, + incrementSeconds: 15, + renew: func(_ string, _ int) (*Secret, error) { + return nil, fmt.Errorf("renew failure") + }, + expectError: nil, + expectRenewal: false, + }, + { + maxTestTime: 15 * time.Second, + name: "negative_lease_duration", + leaseDurationSeconds: -15, + incrementSeconds: 15, + renew: func(_ string, _ int) (*Secret, error) { + return renewedSecret, nil + }, + expectError: nil, + expectRenewal: true, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + v, err := client.NewLifetimeWatcher(&LifetimeWatcherInput{ + Secret: &Secret{ + LeaseDuration: tc.leaseDurationSeconds, + }, + Increment: tc.incrementSeconds, + }) + if err != nil { + t.Fatal(err) + } + + doneCh := make(chan error, 1) + go func() { + doneCh <- v.doRenewWithOptions(false, false, + tc.leaseDurationSeconds, "myleaseID", tc.renew, time.Second) + }() + defer v.Stop() + + receivedRenewal := false + receivedDone := false + ChannelLoop: + for { + select { + case <-time.After(tc.maxTestTime): + t.Fatalf("renewal didn't happen") + case r := <-v.RenewCh(): + if !tc.expectRenewal { + t.Fatal("expected no renewals") + } + if r.Secret != renewedSecret { + t.Fatalf("expected secret %v, got %v", renewedSecret, r.Secret) + } + receivedRenewal = true + if !receivedDone { + continue ChannelLoop + } + break ChannelLoop + case err := <-doneCh: + receivedDone = true + if tc.expectError != nil && !errors.Is(err, tc.expectError) { + t.Fatalf("expected error %q, got: %v", tc.expectError, err) + } + if tc.expectError == nil && err != nil { + t.Fatalf("expected no error, got: %v", err) + } + if tc.expectRenewal && !receivedRenewal { + // We might have received the stop before the renew call on the channel. + continue ChannelLoop + } + break ChannelLoop + } + } + + if tc.expectRenewal && !receivedRenewal { + t.Fatalf("expected at least one renewal, got none.") + } + }) + } +} + +// TestCalcSleepPeriod uses property based testing to evaluate the calculateSleepDuration +// function of LifeTimeWatchers, but also incidentally tests "calculateGrace". +// This is on account of "calculateSleepDuration" performing the "calculateGrace" +// function in particular instances. +// Both of these functions support the vital functionality of the LifeTimeWatcher +// and therefore should be tested rigorously. +func TestCalcSleepPeriod(t *testing.T) { + c := quick.Config{ + MaxCount: 10000, + Values: func(values []reflect.Value, r *rand.Rand) { + leaseDuration := r.Int63() + priorDuration := r.Int63n(leaseDuration) + remainingLeaseDuration := r.Int63n(priorDuration) + increment := r.Int63n(remainingLeaseDuration) + + values[0] = reflect.ValueOf(r) + values[1] = reflect.ValueOf(time.Duration(leaseDuration)) + values[2] = reflect.ValueOf(time.Duration(priorDuration)) + values[3] = reflect.ValueOf(time.Duration(remainingLeaseDuration)) + values[4] = reflect.ValueOf(time.Duration(increment)) + }, + } + + // tests that "calculateSleepDuration" will always return a value less than + // the remaining lease duration given a random leaseDuration, priorDuration, remainingLeaseDuration, and increment. + // Inputs are generated so that: + // leaseDuration > priorDuration > remainingLeaseDuration + // and remainingLeaseDuration > increment + if err := quick.Check(func(r *rand.Rand, leaseDuration, priorDuration, remainingLeaseDuration, increment time.Duration) bool { + lw := LifetimeWatcher{ + grace: 0, + increment: int(increment.Seconds()), + random: r, + } + + lw.calculateGrace(remainingLeaseDuration, increment) + + // ensure that we sleep for less than the remaining lease. + return lw.calculateSleepDuration(remainingLeaseDuration, priorDuration) < remainingLeaseDuration + }, &c); err != nil { + t.Error(err) + } +} diff --git a/api/request.go b/api/request.go new file mode 100644 index 0000000..ecf7837 --- /dev/null +++ b/api/request.go @@ -0,0 +1,149 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "bytes" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "net/url" + + retryablehttp "github.com/hashicorp/go-retryablehttp" +) + +// Request is a raw request configuration structure used to initiate +// API requests to the Vault server. +type Request struct { + Method string + URL *url.URL + Host string + Params url.Values + Headers http.Header + ClientToken string + MFAHeaderVals []string + WrapTTL string + Obj interface{} + + // When possible, use BodyBytes as it is more efficient due to how the + // retry logic works + BodyBytes []byte + + // Fallback + Body io.Reader + BodySize int64 + + // Whether to request overriding soft-mandatory Sentinel policies (RGPs and + // EGPs). If set, the override flag will take effect for all policies + // evaluated during the request. + PolicyOverride bool +} + +// SetJSONBody is used to set a request body that is a JSON-encoded value. +func (r *Request) SetJSONBody(val interface{}) error { + buf, err := json.Marshal(val) + if err != nil { + return err + } + + r.Obj = val + r.BodyBytes = buf + return nil +} + +// ResetJSONBody is used to reset the body for a redirect +func (r *Request) ResetJSONBody() error { + if r.BodyBytes == nil { + return nil + } + return r.SetJSONBody(r.Obj) +} + +// DEPRECATED: ToHTTP turns this request into a valid *http.Request for use +// with the net/http package. +func (r *Request) ToHTTP() (*http.Request, error) { + req, err := r.toRetryableHTTP() + if err != nil { + return nil, err + } + + switch { + case r.BodyBytes == nil && r.Body == nil: + // No body + + case r.BodyBytes != nil: + req.Request.Body = ioutil.NopCloser(bytes.NewReader(r.BodyBytes)) + + default: + if c, ok := r.Body.(io.ReadCloser); ok { + req.Request.Body = c + } else { + req.Request.Body = ioutil.NopCloser(r.Body) + } + } + + return req.Request, nil +} + +func (r *Request) toRetryableHTTP() (*retryablehttp.Request, error) { + // Encode the query parameters + r.URL.RawQuery = r.Params.Encode() + + // Create the HTTP request, defaulting to retryable + var req *retryablehttp.Request + + var err error + var body interface{} + + switch { + case r.BodyBytes == nil && r.Body == nil: + // No body + + case r.BodyBytes != nil: + // Use bytes, it's more efficient + body = r.BodyBytes + + default: + body = r.Body + } + + req, err = retryablehttp.NewRequest(r.Method, r.URL.RequestURI(), body) + if err != nil { + return nil, err + } + + req.URL.User = r.URL.User + req.URL.Scheme = r.URL.Scheme + req.URL.Host = r.URL.Host + req.Host = r.Host + + if r.Headers != nil { + for header, vals := range r.Headers { + for _, val := range vals { + req.Header.Add(header, val) + } + } + } + + if len(r.ClientToken) != 0 { + req.Header.Set(AuthHeaderName, r.ClientToken) + } + + if len(r.WrapTTL) != 0 { + req.Header.Set("X-Vault-Wrap-TTL", r.WrapTTL) + } + + if len(r.MFAHeaderVals) != 0 { + for _, mfaHeaderVal := range r.MFAHeaderVals { + req.Header.Add("X-Vault-MFA", mfaHeaderVal) + } + } + + if r.PolicyOverride { + req.Header.Set("X-Vault-Policy-Override", "true") + } + + return req, nil +} diff --git a/api/request_test.go b/api/request_test.go new file mode 100644 index 0000000..ac21b80 --- /dev/null +++ b/api/request_test.go @@ -0,0 +1,44 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "strings" + "testing" +) + +func TestRequestSetJSONBody(t *testing.T) { + var r Request + raw := map[string]interface{}{"foo": "bar"} + if err := r.SetJSONBody(raw); err != nil { + t.Fatalf("err: %s", err) + } + + expected := `{"foo":"bar"}` + actual := strings.TrimSpace(string(r.BodyBytes)) + if actual != expected { + t.Fatalf("bad: %s", actual) + } +} + +func TestRequestResetJSONBody(t *testing.T) { + var r Request + raw := map[string]interface{}{"foo": "bar"} + if err := r.SetJSONBody(raw); err != nil { + t.Fatalf("err: %s", err) + } + + if err := r.ResetJSONBody(); err != nil { + t.Fatalf("err: %s", err) + } + + buf := make([]byte, len(r.BodyBytes)) + copy(buf, r.BodyBytes) + + expected := `{"foo":"bar"}` + actual := strings.TrimSpace(string(buf)) + if actual != expected { + t.Fatalf("bad: actual %s, expected %s", actual, expected) + } +} diff --git a/api/response.go b/api/response.go new file mode 100644 index 0000000..2842c12 --- /dev/null +++ b/api/response.go @@ -0,0 +1,138 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" +) + +// Response is a raw response that wraps an HTTP response. +type Response struct { + *http.Response +} + +// DecodeJSON will decode the response body to a JSON structure. This +// will consume the response body, but will not close it. Close must +// still be called. +func (r *Response) DecodeJSON(out interface{}) error { + dec := json.NewDecoder(r.Body) + dec.UseNumber() + return dec.Decode(out) +} + +// Error returns an error response if there is one. If there is an error, +// this will fully consume the response body, but will not close it. The +// body must still be closed manually. +func (r *Response) Error() error { + // 200 to 399 are okay status codes. 429 is the code for health status of + // standby nodes, otherwise, 429 is treated as quota limit reached. + if (r.StatusCode >= 200 && r.StatusCode < 400) || (r.StatusCode == 429 && r.Request.URL.Path == "/v1/sys/health") { + return nil + } + + // We have an error. Let's copy the body into our own buffer first, + // so that if we can't decode JSON, we can at least copy it raw. + bodyBuf := &bytes.Buffer{} + if _, err := io.Copy(bodyBuf, r.Body); err != nil { + return err + } + + r.Body.Close() + r.Body = ioutil.NopCloser(bodyBuf) + ns := r.Header.Get(NamespaceHeaderName) + + // Build up the error object + respErr := &ResponseError{ + HTTPMethod: r.Request.Method, + URL: r.Request.URL.String(), + StatusCode: r.StatusCode, + NamespacePath: ns, + } + + // Decode the error response if we can. Note that we wrap the bodyBuf + // in a bytes.Reader here so that the JSON decoder doesn't move the + // read pointer for the original buffer. + var resp ErrorResponse + dec := json.NewDecoder(bytes.NewReader(bodyBuf.Bytes())) + dec.UseNumber() + if err := dec.Decode(&resp); err != nil { + // Store the fact that we couldn't decode the errors + respErr.RawError = true + respErr.Errors = []string{bodyBuf.String()} + } else { + // Store the decoded errors + respErr.Errors = resp.Errors + } + + return respErr +} + +// ErrorResponse is the raw structure of errors when they're returned by the +// HTTP API. +type ErrorResponse struct { + Errors []string +} + +// ResponseError is the error returned when Vault responds with an error or +// non-success HTTP status code. If a request to Vault fails because of a +// network error a different error message will be returned. ResponseError gives +// access to the underlying errors and status code. +type ResponseError struct { + // HTTPMethod is the HTTP method for the request (PUT, GET, etc). + HTTPMethod string + + // URL is the URL of the request. + URL string + + // StatusCode is the HTTP status code. + StatusCode int + + // RawError marks that the underlying error messages returned by Vault were + // not parsable. The Errors slice will contain the raw response body as the + // first and only error string if this value is set to true. + RawError bool + + // Errors are the underlying errors returned by Vault. + Errors []string + + // Namespace path to be reported to the client if it is set to anything other + // than root + NamespacePath string +} + +// Error returns a human-readable error string for the response error. +func (r *ResponseError) Error() string { + errString := "Errors" + if r.RawError { + errString = "Raw Message" + } + + var ns string + if r.NamespacePath != "" && r.NamespacePath != "root/" { + ns = "Namespace: " + r.NamespacePath + "\n" + } + + var errBody bytes.Buffer + errBody.WriteString(fmt.Sprintf( + "Error making API request.\n\n"+ + ns+ + "URL: %s %s\n"+ + "Code: %d. %s:\n\n", + r.HTTPMethod, r.URL, r.StatusCode, errString)) + + if r.RawError && len(r.Errors) == 1 { + errBody.WriteString(r.Errors[0]) + } else { + for _, err := range r.Errors { + errBody.WriteString(fmt.Sprintf("* %s", err)) + } + } + + return errBody.String() +} diff --git a/api/secret.go b/api/secret.go new file mode 100644 index 0000000..3d15f7a --- /dev/null +++ b/api/secret.go @@ -0,0 +1,385 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "reflect" + "strings" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-secure-stdlib/parseutil" +) + +// Secret is the structure returned for every secret within Vault. +type Secret struct { + // The request ID that generated this response + RequestID string `json:"request_id"` + + LeaseID string `json:"lease_id"` + LeaseDuration int `json:"lease_duration"` + Renewable bool `json:"renewable"` + + // Data is the actual contents of the secret. The format of the data + // is arbitrary and up to the secret backend. + Data map[string]interface{} `json:"data"` + + // Warnings contains any warnings related to the operation. These + // are not issues that caused the command to fail, but that the + // client should be aware of. + Warnings []string `json:"warnings"` + + // Auth, if non-nil, means that there was authentication information + // attached to this response. + Auth *SecretAuth `json:"auth,omitempty"` + + // WrapInfo, if non-nil, means that the initial response was wrapped in the + // cubbyhole of the given token (which has a TTL of the given number of + // seconds) + WrapInfo *SecretWrapInfo `json:"wrap_info,omitempty"` +} + +// TokenID returns the standardized token ID (token) for the given secret. +func (s *Secret) TokenID() (string, error) { + if s == nil { + return "", nil + } + + if s.Auth != nil && len(s.Auth.ClientToken) > 0 { + return s.Auth.ClientToken, nil + } + + if s.Data == nil || s.Data["id"] == nil { + return "", nil + } + + id, ok := s.Data["id"].(string) + if !ok { + return "", fmt.Errorf("token found but in the wrong format") + } + + return id, nil +} + +// TokenAccessor returns the standardized token accessor for the given secret. +// If the secret is nil or does not contain an accessor, this returns the empty +// string. +func (s *Secret) TokenAccessor() (string, error) { + if s == nil { + return "", nil + } + + if s.Auth != nil && len(s.Auth.Accessor) > 0 { + return s.Auth.Accessor, nil + } + + if s.Data == nil || s.Data["accessor"] == nil { + return "", nil + } + + accessor, ok := s.Data["accessor"].(string) + if !ok { + return "", fmt.Errorf("token found but in the wrong format") + } + + return accessor, nil +} + +// TokenRemainingUses returns the standardized remaining uses for the given +// secret. If the secret is nil or does not contain the "num_uses", this +// returns -1. On error, this will return -1 and a non-nil error. +func (s *Secret) TokenRemainingUses() (int, error) { + if s == nil || s.Data == nil || s.Data["num_uses"] == nil { + return -1, nil + } + + return parseutil.SafeParseInt(s.Data["num_uses"]) +} + +// TokenPolicies returns the standardized list of policies for the given secret. +// If the secret is nil or does not contain any policies, this returns nil. It +// also populates the secret's Auth info with identity/token policy info. +func (s *Secret) TokenPolicies() ([]string, error) { + if s == nil { + return nil, nil + } + + if s.Auth != nil && len(s.Auth.Policies) > 0 { + return s.Auth.Policies, nil + } + + if s.Data == nil || s.Data["policies"] == nil { + return nil, nil + } + + var tokenPolicies []string + + // Token policies + { + _, ok := s.Data["policies"] + if !ok { + goto TOKEN_DONE + } + + sList, ok := s.Data["policies"].([]string) + if ok { + tokenPolicies = sList + goto TOKEN_DONE + } + + list, ok := s.Data["policies"].([]interface{}) + if !ok { + return nil, fmt.Errorf("unable to convert token policies to expected format") + } + for _, v := range list { + p, ok := v.(string) + if !ok { + return nil, fmt.Errorf("unable to convert policy %v to string", v) + } + tokenPolicies = append(tokenPolicies, p) + } + } + +TOKEN_DONE: + var identityPolicies []string + + // Identity policies + { + v, ok := s.Data["identity_policies"] + if !ok || v == nil { + goto DONE + } + + sList, ok := s.Data["identity_policies"].([]string) + if ok { + identityPolicies = sList + goto DONE + } + + list, ok := s.Data["identity_policies"].([]interface{}) + if !ok { + return nil, fmt.Errorf("unable to convert identity policies to expected format") + } + for _, v := range list { + p, ok := v.(string) + if !ok { + return nil, fmt.Errorf("unable to convert policy %v to string", v) + } + identityPolicies = append(identityPolicies, p) + } + } + +DONE: + + if s.Auth == nil { + s.Auth = &SecretAuth{} + } + + policies := append(tokenPolicies, identityPolicies...) + + s.Auth.TokenPolicies = tokenPolicies + s.Auth.IdentityPolicies = identityPolicies + s.Auth.Policies = policies + + return policies, nil +} + +// TokenMetadata returns the map of metadata associated with this token, if any +// exists. If the secret is nil or does not contain the "metadata" key, this +// returns nil. +func (s *Secret) TokenMetadata() (map[string]string, error) { + if s == nil { + return nil, nil + } + + if s.Auth != nil && len(s.Auth.Metadata) > 0 { + return s.Auth.Metadata, nil + } + + if s.Data == nil || (s.Data["metadata"] == nil && s.Data["meta"] == nil) { + return nil, nil + } + + data, ok := s.Data["metadata"].(map[string]interface{}) + if !ok { + data, ok = s.Data["meta"].(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("unable to convert metadata field to expected format") + } + } + + metadata := make(map[string]string, len(data)) + for k, v := range data { + typed, ok := v.(string) + if !ok { + return nil, fmt.Errorf("unable to convert metadata value %v to string", v) + } + metadata[k] = typed + } + + return metadata, nil +} + +// TokenIsRenewable returns the standardized token renewability for the given +// secret. If the secret is nil or does not contain the "renewable" key, this +// returns false. +func (s *Secret) TokenIsRenewable() (bool, error) { + if s == nil { + return false, nil + } + + if s.Auth != nil && s.Auth.Renewable { + return s.Auth.Renewable, nil + } + + if s.Data == nil || s.Data["renewable"] == nil { + return false, nil + } + + renewable, err := parseutil.ParseBool(s.Data["renewable"]) + if err != nil { + return false, errwrap.Wrapf("could not convert renewable value to a boolean: {{err}}", err) + } + + return renewable, nil +} + +// TokenTTL returns the standardized remaining token TTL for the given secret. +// If the secret is nil or does not contain a TTL, this returns 0. +func (s *Secret) TokenTTL() (time.Duration, error) { + if s == nil { + return 0, nil + } + + if s.Auth != nil && s.Auth.LeaseDuration > 0 { + return time.Duration(s.Auth.LeaseDuration) * time.Second, nil + } + + if s.Data == nil || s.Data["ttl"] == nil { + return 0, nil + } + + ttl, err := parseutil.ParseDurationSecond(s.Data["ttl"]) + if err != nil { + return 0, err + } + + return ttl, nil +} + +// SecretWrapInfo contains wrapping information if we have it. If what is +// contained is an authentication token, the accessor for the token will be +// available in WrappedAccessor. +type SecretWrapInfo struct { + Token string `json:"token"` + Accessor string `json:"accessor"` + TTL int `json:"ttl"` + CreationTime time.Time `json:"creation_time"` + CreationPath string `json:"creation_path"` + WrappedAccessor string `json:"wrapped_accessor"` +} + +type MFAMethodID struct { + Type string `json:"type,omitempty"` + ID string `json:"id,omitempty"` + UsesPasscode bool `json:"uses_passcode,omitempty"` + Name string `json:"name,omitempty"` +} + +type MFAConstraintAny struct { + Any []*MFAMethodID `json:"any,omitempty"` +} + +type MFARequirement struct { + MFARequestID string `json:"mfa_request_id,omitempty"` + MFAConstraints map[string]*MFAConstraintAny `json:"mfa_constraints,omitempty"` +} + +// SecretAuth is the structure containing auth information if we have it. +type SecretAuth struct { + ClientToken string `json:"client_token"` + Accessor string `json:"accessor"` + Policies []string `json:"policies"` + TokenPolicies []string `json:"token_policies"` + IdentityPolicies []string `json:"identity_policies"` + Metadata map[string]string `json:"metadata"` + Orphan bool `json:"orphan"` + EntityID string `json:"entity_id"` + + LeaseDuration int `json:"lease_duration"` + Renewable bool `json:"renewable"` + + MFARequirement *MFARequirement `json:"mfa_requirement"` +} + +// ParseSecret is used to parse a secret value from JSON from an io.Reader. +func ParseSecret(r io.Reader) (*Secret, error) { + // First read the data into a buffer. Not super efficient but we want to + // know if we actually have a body or not. + var buf bytes.Buffer + + // io.Reader is treated like a stream and cannot be read + // multiple times. Duplicating this stream using TeeReader + // to use this data in case there is no top-level data from + // api response + var teebuf bytes.Buffer + tee := io.TeeReader(r, &teebuf) + + _, err := buf.ReadFrom(tee) + if err != nil { + return nil, err + } + if buf.Len() == 0 { + return nil, nil + } + + // First decode the JSON into a map[string]interface{} + var secret Secret + dec := json.NewDecoder(&buf) + dec.UseNumber() + if err := dec.Decode(&secret); err != nil { + return nil, err + } + + // If the secret is null, add raw data to secret data if present + if reflect.DeepEqual(secret, Secret{}) { + data := make(map[string]interface{}) + dec := json.NewDecoder(&teebuf) + dec.UseNumber() + if err := dec.Decode(&data); err != nil { + return nil, err + } + errRaw, errPresent := data["errors"] + + // if only errors are present in the resp.Body return nil + // to return value not found as it does not have any raw data + if len(data) == 1 && errPresent { + return nil, nil + } + + // if errors are present along with raw data return the error + if errPresent { + var errStrArray []string + errBytes, err := json.Marshal(errRaw) + if err != nil { + return nil, err + } + if err := json.Unmarshal(errBytes, &errStrArray); err != nil { + return nil, err + } + return nil, fmt.Errorf(strings.Join(errStrArray, " ")) + } + + // if any raw data is present in resp.Body, add it to secret + if len(data) > 0 { + secret.Data = data + } + } + + return &secret, nil +} diff --git a/api/secret_test.go b/api/secret_test.go new file mode 100644 index 0000000..d089a24 --- /dev/null +++ b/api/secret_test.go @@ -0,0 +1,208 @@ +package api + +import ( + "testing" +) + +func TestTokenPolicies(t *testing.T) { + var s *Secret + + // Verify some of the short-circuit paths in the function + if policies, err := s.TokenPolicies(); policies != nil { + t.Errorf("policies was not nil, got %v", policies) + } else if err != nil { + t.Errorf("err was not nil, got %v", err) + } + + s = &Secret{} + + if policies, err := s.TokenPolicies(); policies != nil { + t.Errorf("policies was not nil, got %v", policies) + } else if err != nil { + t.Errorf("err was not nil, got %v", err) + } + + s.Auth = &SecretAuth{} + + if policies, err := s.TokenPolicies(); policies != nil { + t.Errorf("policies was not nil, got %v", policies) + } else if err != nil { + t.Errorf("err was not nil, got %v", err) + } + + s.Auth.Policies = []string{} + + if policies, err := s.TokenPolicies(); policies != nil { + t.Errorf("policies was not nil, got %v", policies) + } else if err != nil { + t.Errorf("err was not nil, got %v", err) + } + + s.Auth.Policies = []string{"test"} + + if policies, err := s.TokenPolicies(); policies == nil { + t.Error("policies was nil") + } else if err != nil { + t.Errorf("err was not nil, got %v", err) + } + + s.Auth = nil + s.Data = make(map[string]interface{}) + + if policies, err := s.TokenPolicies(); policies != nil { + t.Errorf("policies was not nil, got %v", policies) + } else if err != nil { + t.Errorf("err was not nil, got %v", err) + } + + // Verify that s.Data["policies"] are properly processed + { + policyList := make([]string, 0) + s.Data["policies"] = policyList + + if policies, err := s.TokenPolicies(); len(policies) != len(policyList) { + t.Errorf("expecting policies length %d, got %d", len(policyList), len(policies)) + } else if err != nil { + t.Errorf("err was not nil, got %v", err) + } else if s.Auth == nil { + t.Error("Auth field is still nil") + } + + policyList = append(policyList, "policy1", "policy2") + s.Data["policies"] = policyList + + if policies, err := s.TokenPolicies(); len(policyList) != 2 { + t.Errorf("expecting policies length %d, got %d", len(policyList), len(policies)) + } else if err != nil { + t.Errorf("err was not nil, got %v", err) + } else if s.Auth == nil { + t.Error("Auth field is still nil") + } + } + + // Do it again but with an interface{} slice + { + s.Auth = nil + policyList := make([]interface{}, 0) + s.Data["policies"] = policyList + + if policies, err := s.TokenPolicies(); len(policies) != len(policyList) { + t.Errorf("expecting policies length %d, got %d", len(policyList), len(policies)) + } else if err != nil { + t.Errorf("err was not nil, got %v", err) + } else if s.Auth == nil { + t.Error("Auth field is still nil") + } + + policyItems := make([]interface{}, 2) + policyItems[0] = "policy1" + policyItems[1] = "policy2" + + policyList = append(policyList, policyItems...) + s.Data["policies"] = policyList + + if policies, err := s.TokenPolicies(); len(policies) != 2 { + t.Errorf("expecting policies length %d, got %d", len(policyList), len(policies)) + } else if err != nil { + t.Errorf("err was not nil, got %v", err) + } else if s.Auth == nil { + t.Error("Auth field is still nil") + } + + s.Auth = nil + s.Data["policies"] = 7.0 + + if policies, err := s.TokenPolicies(); err == nil { + t.Error("err was nil") + } else if policies != nil { + t.Errorf("policies was not nil, got %v", policies) + } + + s.Auth = nil + s.Data["policies"] = []int{2, 3, 5, 8, 13} + + if policies, err := s.TokenPolicies(); err == nil { + t.Error("err was nil") + } else if policies != nil { + t.Errorf("policies was not nil, got %v", policies) + } + } + + s.Auth = nil + s.Data["policies"] = nil + + if policies, err := s.TokenPolicies(); err != nil { + t.Errorf("err was not nil, got %v", err) + } else if policies != nil { + t.Errorf("policies was not nil, got %v", policies) + } + + // Verify that logic that merges s.Data["policies"] and s.Data["identity_policies"] works + { + policyList := []string{"policy1", "policy2", "policy3"} + s.Data["policies"] = policyList[:1] + s.Data["identity_policies"] = "not_a_slice" + s.Auth = nil + + if policies, err := s.TokenPolicies(); err == nil { + t.Error("err was nil") + } else if policies != nil { + t.Errorf("policies was not nil, got %v", policies) + } + + s.Data["identity_policies"] = policyList[1:] + + if policies, err := s.TokenPolicies(); len(policyList) != len(policies) { + t.Errorf("expecting policies length %d, got %d", len(policyList), len(policies)) + } else if err != nil { + t.Errorf("err was not nil, got %v", err) + } else if s.Auth == nil { + t.Error("Auth field is still nil") + } + } + + // Do it again but with an interface{} slice + { + policyList := []interface{}{"policy1", "policy2", "policy3"} + s.Data["policies"] = policyList[:1] + s.Data["identity_policies"] = "not_a_slice" + s.Auth = nil + + if policies, err := s.TokenPolicies(); err == nil { + t.Error("err was nil") + } else if policies != nil { + t.Errorf("policies was not nil, got %v", policies) + } + + s.Data["identity_policies"] = policyList[1:] + + if policies, err := s.TokenPolicies(); len(policyList) != len(policies) { + t.Errorf("expecting policies length %d, got %d", len(policyList), len(policies)) + } else if err != nil { + t.Errorf("err was not nil, got %v", err) + } else if s.Auth == nil { + t.Error("Auth field is still nil") + } + + s.Auth = nil + s.Data["identity_policies"] = []int{2, 3, 5, 8, 13} + + if policies, err := s.TokenPolicies(); err == nil { + t.Error("err was nil") + } else if policies != nil { + t.Errorf("policies was not nil, got %v", policies) + } + } + + s.Auth = nil + s.Data["policies"] = []string{"policy1"} + s.Data["identity_policies"] = nil + + if policies, err := s.TokenPolicies(); err != nil { + t.Errorf("err was not nil, got %v", err) + } else if len(policies) != 1 { + t.Errorf("expecting policies length %d, got %d", 1, len(policies)) + } else if s.Auth == nil { + t.Error("Auth field is still nil") + } +} diff --git a/api/ssh.go b/api/ssh.go new file mode 100644 index 0000000..28510ee --- /dev/null +++ b/api/ssh.go @@ -0,0 +1,78 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "fmt" + "net/http" +) + +// SSH is used to return a client to invoke operations on SSH backend. +type SSH struct { + c *Client + MountPoint string +} + +// SSH returns the client for logical-backend API calls. +func (c *Client) SSH() *SSH { + return c.SSHWithMountPoint(SSHHelperDefaultMountPoint) +} + +// SSHWithMountPoint returns the client with specific SSH mount point. +func (c *Client) SSHWithMountPoint(mountPoint string) *SSH { + return &SSH{ + c: c, + MountPoint: mountPoint, + } +} + +// Credential wraps CredentialWithContext using context.Background. +func (c *SSH) Credential(role string, data map[string]interface{}) (*Secret, error) { + return c.CredentialWithContext(context.Background(), role, data) +} + +// CredentialWithContext invokes the SSH backend API to create a credential to establish an SSH session. +func (c *SSH) CredentialWithContext(ctx context.Context, role string, data map[string]interface{}) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, fmt.Sprintf("/v1/%s/creds/%s", c.MountPoint, role)) + if err := r.SetJSONBody(data); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +// SignKey wraps SignKeyWithContext using context.Background. +func (c *SSH) SignKey(role string, data map[string]interface{}) (*Secret, error) { + return c.SignKeyWithContext(context.Background(), role, data) +} + +// SignKeyWithContext signs the given public key and returns a signed public key to pass +// along with the SSH request. +func (c *SSH) SignKeyWithContext(ctx context.Context, role string, data map[string]interface{}) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, fmt.Sprintf("/v1/%s/sign/%s", c.MountPoint, role)) + if err := r.SetJSONBody(data); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} diff --git a/api/ssh_agent.go b/api/ssh_agent.go new file mode 100644 index 0000000..e615037 --- /dev/null +++ b/api/ssh_agent.go @@ -0,0 +1,276 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net/http" + "os" + + "github.com/hashicorp/errwrap" + cleanhttp "github.com/hashicorp/go-cleanhttp" + multierror "github.com/hashicorp/go-multierror" + rootcerts "github.com/hashicorp/go-rootcerts" + "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/ast" + "github.com/mitchellh/mapstructure" +) + +const ( + // SSHHelperDefaultMountPoint is the default path at which SSH backend will be + // mounted in the Vault server. + SSHHelperDefaultMountPoint = "ssh" + + // VerifyEchoRequest is the echo request message sent as OTP by the helper. + VerifyEchoRequest = "verify-echo-request" + + // VerifyEchoResponse is the echo response message sent as a response to OTP + // matching echo request. + VerifyEchoResponse = "verify-echo-response" +) + +// SSHHelper is a structure representing a vault-ssh-helper which can talk to vault server +// in order to verify the OTP entered by the user. It contains the path at which +// SSH backend is mounted at the server. +type SSHHelper struct { + c *Client + MountPoint string +} + +// SSHVerifyResponse is a structure representing the fields in Vault server's +// response. +type SSHVerifyResponse struct { + // Usually empty. If the request OTP is echo request message, this will + // be set to the corresponding echo response message. + Message string `json:"message" mapstructure:"message"` + + // Username associated with the OTP + Username string `json:"username" mapstructure:"username"` + + // IP associated with the OTP + IP string `json:"ip" mapstructure:"ip"` + + // Name of the role against which the OTP was issued + RoleName string `json:"role_name" mapstructure:"role_name"` +} + +// SSHHelperConfig is a structure which represents the entries from the vault-ssh-helper's configuration file. +type SSHHelperConfig struct { + VaultAddr string `hcl:"vault_addr"` + SSHMountPoint string `hcl:"ssh_mount_point"` + Namespace string `hcl:"namespace"` + CACert string `hcl:"ca_cert"` + CAPath string `hcl:"ca_path"` + AllowedCidrList string `hcl:"allowed_cidr_list"` + AllowedRoles string `hcl:"allowed_roles"` + TLSSkipVerify bool `hcl:"tls_skip_verify"` + TLSServerName string `hcl:"tls_server_name"` +} + +// SetTLSParameters sets the TLS parameters for this SSH agent. +func (c *SSHHelperConfig) SetTLSParameters(clientConfig *Config, certPool *x509.CertPool) { + tlsConfig := &tls.Config{ + InsecureSkipVerify: c.TLSSkipVerify, + MinVersion: tls.VersionTLS12, + RootCAs: certPool, + ServerName: c.TLSServerName, + } + + transport := cleanhttp.DefaultTransport() + transport.TLSClientConfig = tlsConfig + clientConfig.HttpClient.Transport = transport +} + +// Returns true if any of the following conditions are true: +// - CA cert is configured +// - CA path is configured +// - configured to skip certificate verification +// - TLS server name is configured +func (c *SSHHelperConfig) shouldSetTLSParameters() bool { + return c.CACert != "" || c.CAPath != "" || c.TLSServerName != "" || c.TLSSkipVerify +} + +// NewClient returns a new client for the configuration. This client will be used by the +// vault-ssh-helper to communicate with Vault server and verify the OTP entered by user. +// If the configuration supplies Vault SSL certificates, then the client will +// have TLS configured in its transport. +func (c *SSHHelperConfig) NewClient() (*Client, error) { + // Creating a default client configuration for communicating with vault server. + clientConfig := DefaultConfig() + + // Pointing the client to the actual address of vault server. + clientConfig.Address = c.VaultAddr + + // Check if certificates are provided via config file. + if c.shouldSetTLSParameters() { + rootConfig := &rootcerts.Config{ + CAFile: c.CACert, + CAPath: c.CAPath, + } + certPool, err := rootcerts.LoadCACerts(rootConfig) + if err != nil { + return nil, err + } + // Enable TLS on the HTTP client information + c.SetTLSParameters(clientConfig, certPool) + } + + // Creating the client object for the given configuration + client, err := NewClient(clientConfig) + if err != nil { + return nil, err + } + + // Configure namespace + if c.Namespace != "" { + client.SetNamespace(c.Namespace) + } + + return client, nil +} + +// LoadSSHHelperConfig loads ssh-helper's configuration from the file and populates the corresponding +// in-memory structure. +// +// Vault address is a required parameter. +// Mount point defaults to "ssh". +func LoadSSHHelperConfig(path string) (*SSHHelperConfig, error) { + contents, err := ioutil.ReadFile(path) + if err != nil && !os.IsNotExist(err) { + return nil, multierror.Prefix(err, "ssh_helper:") + } + return ParseSSHHelperConfig(string(contents)) +} + +// ParseSSHHelperConfig parses the given contents as a string for the SSHHelper +// configuration. +func ParseSSHHelperConfig(contents string) (*SSHHelperConfig, error) { + root, err := hcl.Parse(string(contents)) + if err != nil { + return nil, errwrap.Wrapf("error parsing config: {{err}}", err) + } + + list, ok := root.Node.(*ast.ObjectList) + if !ok { + return nil, fmt.Errorf("error parsing config: file doesn't contain a root object") + } + + valid := []string{ + "vault_addr", + "ssh_mount_point", + "namespace", + "ca_cert", + "ca_path", + "allowed_cidr_list", + "allowed_roles", + "tls_skip_verify", + "tls_server_name", + } + if err := CheckHCLKeys(list, valid); err != nil { + return nil, multierror.Prefix(err, "ssh_helper:") + } + + var c SSHHelperConfig + c.SSHMountPoint = SSHHelperDefaultMountPoint + if err := hcl.DecodeObject(&c, list); err != nil { + return nil, multierror.Prefix(err, "ssh_helper:") + } + + if c.VaultAddr == "" { + return nil, fmt.Errorf(`missing config "vault_addr"`) + } + return &c, nil +} + +func CheckHCLKeys(node ast.Node, valid []string) error { + var list *ast.ObjectList + switch n := node.(type) { + case *ast.ObjectList: + list = n + case *ast.ObjectType: + list = n.List + default: + return fmt.Errorf("cannot check HCL keys of type %T", n) + } + + validMap := make(map[string]struct{}, len(valid)) + for _, v := range valid { + validMap[v] = struct{}{} + } + + var result error + for _, item := range list.Items { + key := item.Keys[0].Token.Value().(string) + if _, ok := validMap[key]; !ok { + result = multierror.Append(result, fmt.Errorf("invalid key %q on line %d", key, item.Assign.Line)) + } + } + + return result +} + +// SSHHelper creates an SSHHelper object which can talk to Vault server with SSH backend +// mounted at default path ("ssh"). +func (c *Client) SSHHelper() *SSHHelper { + return c.SSHHelperWithMountPoint(SSHHelperDefaultMountPoint) +} + +// SSHHelperWithMountPoint creates an SSHHelper object which can talk to Vault server with SSH backend +// mounted at a specific mount point. +func (c *Client) SSHHelperWithMountPoint(mountPoint string) *SSHHelper { + return &SSHHelper{ + c: c, + MountPoint: mountPoint, + } +} + +// Verify verifies if the key provided by user is present in Vault server. The response +// will contain the IP address and username associated with the OTP. In case the +// OTP matches the echo request message, instead of searching an entry for the OTP, +// an echo response message is returned. This feature is used by ssh-helper to verify if +// its configured correctly. +func (c *SSHHelper) Verify(otp string) (*SSHVerifyResponse, error) { + return c.VerifyWithContext(context.Background(), otp) +} + +// VerifyWithContext the same as Verify but with a custom context. +func (c *SSHHelper) VerifyWithContext(ctx context.Context, otp string) (*SSHVerifyResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + data := map[string]interface{}{ + "otp": otp, + } + verifyPath := fmt.Sprintf("/v1/%s/verify", c.MountPoint) + r := c.c.NewRequest(http.MethodPut, verifyPath) + if err := r.SetJSONBody(data); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + + if secret.Data == nil { + return nil, nil + } + + var verifyResp SSHVerifyResponse + err = mapstructure.Decode(secret.Data, &verifyResp) + if err != nil { + return nil, err + } + return &verifyResp, nil +} diff --git a/api/ssh_agent_test.go b/api/ssh_agent_test.go new file mode 100644 index 0000000..38117e4 --- /dev/null +++ b/api/ssh_agent_test.go @@ -0,0 +1,112 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "fmt" + "net/http" + "strings" + "testing" +) + +func TestSSH_CreateTLSClient(t *testing.T) { + // load the default configuration + config, err := LoadSSHHelperConfig("./test-fixtures/agent_config.hcl") + if err != nil { + panic(fmt.Sprintf("error loading agent's config file: %s", err)) + } + + client, err := config.NewClient() + if err != nil { + panic(fmt.Sprintf("error creating the client: %s", err)) + } + + // Provide a certificate and enforce setting of transport + config.CACert = "./test-fixtures/vault.crt" + + client, err = config.NewClient() + if err != nil { + panic(fmt.Sprintf("error creating the client: %s", err)) + } + if client.config.HttpClient.Transport == nil { + panic(fmt.Sprintf("error creating client with TLS transport")) + } +} + +func TestSSH_CreateTLSClient_tlsServerName(t *testing.T) { + // Ensure that the HTTP client is associated with the configured TLS server name. + tlsServerName := "tls.server.name" + + config, err := ParseSSHHelperConfig(fmt.Sprintf(` +vault_addr = "1.2.3.4" +tls_server_name = "%s" +`, tlsServerName)) + if err != nil { + panic(fmt.Sprintf("error loading config: %s", err)) + } + + client, err := config.NewClient() + if err != nil { + panic(fmt.Sprintf("error creating the client: %s", err)) + } + + actualTLSServerName := client.config.HttpClient.Transport.(*http.Transport).TLSClientConfig.ServerName + if actualTLSServerName != tlsServerName { + panic(fmt.Sprintf("incorrect TLS server name. expected: %s actual: %s", tlsServerName, actualTLSServerName)) + } +} + +func TestParseSSHHelperConfig(t *testing.T) { + config, err := ParseSSHHelperConfig(` + vault_addr = "1.2.3.4" +`) + if err != nil { + t.Fatal(err) + } + + if config.SSHMountPoint != SSHHelperDefaultMountPoint { + t.Errorf("expected %q to be %q", config.SSHMountPoint, SSHHelperDefaultMountPoint) + } +} + +func TestParseSSHHelperConfig_missingVaultAddr(t *testing.T) { + _, err := ParseSSHHelperConfig("") + if err == nil { + t.Fatal("expected error") + } + + if !strings.Contains(err.Error(), `missing config "vault_addr"`) { + t.Errorf("bad error: %s", err) + } +} + +func TestParseSSHHelperConfig_badKeys(t *testing.T) { + _, err := ParseSSHHelperConfig(` +vault_addr = "1.2.3.4" +nope = "bad" +`) + if err == nil { + t.Fatal("expected error") + } + + if !strings.Contains(err.Error(), `ssh_helper: invalid key "nope" on line 3`) { + t.Errorf("bad error: %s", err) + } +} + +func TestParseSSHHelperConfig_tlsServerName(t *testing.T) { + tlsServerName := "tls.server.name" + + config, err := ParseSSHHelperConfig(fmt.Sprintf(` +vault_addr = "1.2.3.4" +tls_server_name = "%s" +`, tlsServerName)) + if err != nil { + t.Fatal(err) + } + + if config.TLSServerName != tlsServerName { + t.Errorf("incorrect TLS server name. expected: %s actual: %s", tlsServerName, config.TLSServerName) + } +} diff --git a/api/sys.go b/api/sys.go new file mode 100644 index 0000000..81ebb3a --- /dev/null +++ b/api/sys.go @@ -0,0 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +// Sys is used to perform system-related operations on Vault. +type Sys struct { + c *Client +} + +// Sys is used to return the client for sys-related API calls. +func (c *Client) Sys() *Sys { + return &Sys{c: c} +} diff --git a/api/sys_audit.go b/api/sys_audit.go new file mode 100644 index 0000000..2244087 --- /dev/null +++ b/api/sys_audit.go @@ -0,0 +1,159 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/mitchellh/mapstructure" +) + +func (c *Sys) AuditHash(path string, input string) (string, error) { + return c.AuditHashWithContext(context.Background(), path, input) +} + +func (c *Sys) AuditHashWithContext(ctx context.Context, path string, input string) (string, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + body := map[string]interface{}{ + "input": input, + } + + r := c.c.NewRequest(http.MethodPut, fmt.Sprintf("/v1/sys/audit-hash/%s", path)) + if err := r.SetJSONBody(body); err != nil { + return "", err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return "", err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return "", err + } + if secret == nil || secret.Data == nil { + return "", errors.New("data from server response is empty") + } + + hash, ok := secret.Data["hash"] + if !ok { + return "", errors.New("hash not found in response data") + } + hashStr, ok := hash.(string) + if !ok { + return "", errors.New("could not parse hash in response data") + } + + return hashStr, nil +} + +func (c *Sys) ListAudit() (map[string]*Audit, error) { + return c.ListAuditWithContext(context.Background()) +} + +func (c *Sys) ListAuditWithContext(ctx context.Context) (map[string]*Audit, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/audit") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + mounts := map[string]*Audit{} + err = mapstructure.Decode(secret.Data, &mounts) + if err != nil { + return nil, err + } + + return mounts, nil +} + +// DEPRECATED: Use EnableAuditWithOptions instead +func (c *Sys) EnableAudit( + path string, auditType string, desc string, opts map[string]string, +) error { + return c.EnableAuditWithOptions(path, &EnableAuditOptions{ + Type: auditType, + Description: desc, + Options: opts, + }) +} + +func (c *Sys) EnableAuditWithOptions(path string, options *EnableAuditOptions) error { + return c.EnableAuditWithOptionsWithContext(context.Background(), path, options) +} + +func (c *Sys) EnableAuditWithOptionsWithContext(ctx context.Context, path string, options *EnableAuditOptions) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, fmt.Sprintf("/v1/sys/audit/%s", path)) + if err := r.SetJSONBody(options); err != nil { + return err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +func (c *Sys) DisableAudit(path string) error { + return c.DisableAuditWithContext(context.Background(), path) +} + +func (c *Sys) DisableAuditWithContext(ctx context.Context, path string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, fmt.Sprintf("/v1/sys/audit/%s", path)) + + resp, err := c.c.rawRequestWithContext(ctx, r) + + if err == nil { + defer resp.Body.Close() + } + return err +} + +// Structures for the requests/response are all down here. They aren't +// individually documented because the map almost directly to the raw HTTP API +// documentation. Please refer to that documentation for more details. + +type EnableAuditOptions struct { + Type string `json:"type" mapstructure:"type"` + Description string `json:"description" mapstructure:"description"` + Options map[string]string `json:"options" mapstructure:"options"` + Local bool `json:"local" mapstructure:"local"` +} + +type Audit struct { + Type string `json:"type" mapstructure:"type"` + Description string `json:"description" mapstructure:"description"` + Options map[string]string `json:"options" mapstructure:"options"` + Local bool `json:"local" mapstructure:"local"` + Path string `json:"path" mapstructure:"path"` +} diff --git a/api/sys_auth.go b/api/sys_auth.go new file mode 100644 index 0000000..e814412 --- /dev/null +++ b/api/sys_auth.go @@ -0,0 +1,101 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/mitchellh/mapstructure" +) + +func (c *Sys) ListAuth() (map[string]*AuthMount, error) { + return c.ListAuthWithContext(context.Background()) +} + +func (c *Sys) ListAuthWithContext(ctx context.Context) (map[string]*AuthMount, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/auth") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + mounts := map[string]*AuthMount{} + err = mapstructure.Decode(secret.Data, &mounts) + if err != nil { + return nil, err + } + + return mounts, nil +} + +// DEPRECATED: Use EnableAuthWithOptions instead +func (c *Sys) EnableAuth(path, authType, desc string) error { + return c.EnableAuthWithOptions(path, &EnableAuthOptions{ + Type: authType, + Description: desc, + }) +} + +func (c *Sys) EnableAuthWithOptions(path string, options *EnableAuthOptions) error { + return c.EnableAuthWithOptionsWithContext(context.Background(), path, options) +} + +func (c *Sys) EnableAuthWithOptionsWithContext(ctx context.Context, path string, options *EnableAuthOptions) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, fmt.Sprintf("/v1/sys/auth/%s", path)) + if err := r.SetJSONBody(options); err != nil { + return err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +func (c *Sys) DisableAuth(path string) error { + return c.DisableAuthWithContext(context.Background(), path) +} + +func (c *Sys) DisableAuthWithContext(ctx context.Context, path string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, fmt.Sprintf("/v1/sys/auth/%s", path)) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +// Rather than duplicate, we can use modern Go's type aliasing +type ( + EnableAuthOptions = MountInput + AuthConfigInput = MountConfigInput + AuthMount = MountOutput + AuthConfigOutput = MountConfigOutput +) diff --git a/api/sys_capabilities.go b/api/sys_capabilities.go new file mode 100644 index 0000000..6310d42 --- /dev/null +++ b/api/sys_capabilities.go @@ -0,0 +1,80 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/mitchellh/mapstructure" +) + +func (c *Sys) CapabilitiesSelf(path string) ([]string, error) { + return c.CapabilitiesSelfWithContext(context.Background(), path) +} + +func (c *Sys) CapabilitiesSelfWithContext(ctx context.Context, path string) ([]string, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + return c.CapabilitiesWithContext(ctx, c.c.Token(), path) +} + +func (c *Sys) Capabilities(token, path string) ([]string, error) { + return c.CapabilitiesWithContext(context.Background(), token, path) +} + +func (c *Sys) CapabilitiesWithContext(ctx context.Context, token, path string) ([]string, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + body := map[string]string{ + "token": token, + "path": path, + } + + reqPath := "/v1/sys/capabilities" + if token == c.c.Token() { + reqPath = fmt.Sprintf("%s-self", reqPath) + } + + r := c.c.NewRequest(http.MethodPost, reqPath) + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var res []string + err = mapstructure.Decode(secret.Data[path], &res) + if err != nil { + return nil, err + } + + if len(res) == 0 { + _, ok := secret.Data["capabilities"] + if ok { + err = mapstructure.Decode(secret.Data["capabilities"], &res) + if err != nil { + return nil, err + } + } + } + + return res, nil +} diff --git a/api/sys_config_cors.go b/api/sys_config_cors.go new file mode 100644 index 0000000..e80aa9d --- /dev/null +++ b/api/sys_config_cors.go @@ -0,0 +1,94 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "errors" + "net/http" + + "github.com/mitchellh/mapstructure" +) + +func (c *Sys) CORSStatus() (*CORSResponse, error) { + return c.CORSStatusWithContext(context.Background()) +} + +func (c *Sys) CORSStatusWithContext(ctx context.Context) (*CORSResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/config/cors") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result CORSResponse + err = mapstructure.Decode(secret.Data, &result) + if err != nil { + return nil, err + } + + return &result, err +} + +func (c *Sys) ConfigureCORS(req *CORSRequest) error { + return c.ConfigureCORSWithContext(context.Background(), req) +} + +func (c *Sys) ConfigureCORSWithContext(ctx context.Context, req *CORSRequest) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/config/cors") + if err := r.SetJSONBody(req); err != nil { + return err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) DisableCORS() error { + return c.DisableCORSWithContext(context.Background()) +} + +func (c *Sys) DisableCORSWithContext(ctx context.Context) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, "/v1/sys/config/cors") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +type CORSRequest struct { + AllowedOrigins []string `json:"allowed_origins" mapstructure:"allowed_origins"` + AllowedHeaders []string `json:"allowed_headers" mapstructure:"allowed_headers"` + Enabled bool `json:"enabled" mapstructure:"enabled"` +} + +type CORSResponse struct { + AllowedOrigins []string `json:"allowed_origins" mapstructure:"allowed_origins"` + AllowedHeaders []string `json:"allowed_headers" mapstructure:"allowed_headers"` + Enabled bool `json:"enabled" mapstructure:"enabled"` +} diff --git a/api/sys_generate_root.go b/api/sys_generate_root.go new file mode 100644 index 0000000..da4ad2f --- /dev/null +++ b/api/sys_generate_root.go @@ -0,0 +1,198 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "net/http" +) + +func (c *Sys) GenerateRootStatus() (*GenerateRootStatusResponse, error) { + return c.GenerateRootStatusWithContext(context.Background()) +} + +func (c *Sys) GenerateDROperationTokenStatus() (*GenerateRootStatusResponse, error) { + return c.GenerateDROperationTokenStatusWithContext(context.Background()) +} + +func (c *Sys) GenerateRecoveryOperationTokenStatus() (*GenerateRootStatusResponse, error) { + return c.GenerateRecoveryOperationTokenStatusWithContext(context.Background()) +} + +func (c *Sys) GenerateRootStatusWithContext(ctx context.Context) (*GenerateRootStatusResponse, error) { + return c.generateRootStatusCommonWithContext(ctx, "/v1/sys/generate-root/attempt") +} + +func (c *Sys) GenerateDROperationTokenStatusWithContext(ctx context.Context) (*GenerateRootStatusResponse, error) { + return c.generateRootStatusCommonWithContext(ctx, "/v1/sys/replication/dr/secondary/generate-operation-token/attempt") +} + +func (c *Sys) GenerateRecoveryOperationTokenStatusWithContext(ctx context.Context) (*GenerateRootStatusResponse, error) { + return c.generateRootStatusCommonWithContext(ctx, "/v1/sys/generate-recovery-token/attempt") +} + +func (c *Sys) generateRootStatusCommonWithContext(ctx context.Context, path string) (*GenerateRootStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, path) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result GenerateRootStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) GenerateRootInit(otp, pgpKey string) (*GenerateRootStatusResponse, error) { + return c.GenerateRootInitWithContext(context.Background(), otp, pgpKey) +} + +func (c *Sys) GenerateDROperationTokenInit(otp, pgpKey string) (*GenerateRootStatusResponse, error) { + return c.GenerateDROperationTokenInitWithContext(context.Background(), otp, pgpKey) +} + +func (c *Sys) GenerateRecoveryOperationTokenInit(otp, pgpKey string) (*GenerateRootStatusResponse, error) { + return c.GenerateRecoveryOperationTokenInitWithContext(context.Background(), otp, pgpKey) +} + +func (c *Sys) GenerateRootInitWithContext(ctx context.Context, otp, pgpKey string) (*GenerateRootStatusResponse, error) { + return c.generateRootInitCommonWithContext(ctx, "/v1/sys/generate-root/attempt", otp, pgpKey) +} + +func (c *Sys) GenerateDROperationTokenInitWithContext(ctx context.Context, otp, pgpKey string) (*GenerateRootStatusResponse, error) { + return c.generateRootInitCommonWithContext(ctx, "/v1/sys/replication/dr/secondary/generate-operation-token/attempt", otp, pgpKey) +} + +func (c *Sys) GenerateRecoveryOperationTokenInitWithContext(ctx context.Context, otp, pgpKey string) (*GenerateRootStatusResponse, error) { + return c.generateRootInitCommonWithContext(ctx, "/v1/sys/generate-recovery-token/attempt", otp, pgpKey) +} + +func (c *Sys) generateRootInitCommonWithContext(ctx context.Context, path, otp, pgpKey string) (*GenerateRootStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + body := map[string]interface{}{ + "otp": otp, + "pgp_key": pgpKey, + } + + r := c.c.NewRequest(http.MethodPut, path) + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result GenerateRootStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) GenerateRootCancel() error { + return c.GenerateRootCancelWithContext(context.Background()) +} + +func (c *Sys) GenerateDROperationTokenCancel() error { + return c.GenerateDROperationTokenCancelWithContext(context.Background()) +} + +func (c *Sys) GenerateRecoveryOperationTokenCancel() error { + return c.GenerateRecoveryOperationTokenCancelWithContext(context.Background()) +} + +func (c *Sys) GenerateRootCancelWithContext(ctx context.Context) error { + return c.generateRootCancelCommonWithContext(ctx, "/v1/sys/generate-root/attempt") +} + +func (c *Sys) GenerateDROperationTokenCancelWithContext(ctx context.Context) error { + return c.generateRootCancelCommonWithContext(ctx, "/v1/sys/replication/dr/secondary/generate-operation-token/attempt") +} + +func (c *Sys) GenerateRecoveryOperationTokenCancelWithContext(ctx context.Context) error { + return c.generateRootCancelCommonWithContext(ctx, "/v1/sys/generate-recovery-token/attempt") +} + +func (c *Sys) generateRootCancelCommonWithContext(ctx context.Context, path string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, path) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) GenerateRootUpdate(shard, nonce string) (*GenerateRootStatusResponse, error) { + return c.GenerateRootUpdateWithContext(context.Background(), shard, nonce) +} + +func (c *Sys) GenerateDROperationTokenUpdate(shard, nonce string) (*GenerateRootStatusResponse, error) { + return c.GenerateDROperationTokenUpdateWithContext(context.Background(), shard, nonce) +} + +func (c *Sys) GenerateRecoveryOperationTokenUpdate(shard, nonce string) (*GenerateRootStatusResponse, error) { + return c.GenerateRecoveryOperationTokenUpdateWithContext(context.Background(), shard, nonce) +} + +func (c *Sys) GenerateRootUpdateWithContext(ctx context.Context, shard, nonce string) (*GenerateRootStatusResponse, error) { + return c.generateRootUpdateCommonWithContext(ctx, "/v1/sys/generate-root/update", shard, nonce) +} + +func (c *Sys) GenerateDROperationTokenUpdateWithContext(ctx context.Context, shard, nonce string) (*GenerateRootStatusResponse, error) { + return c.generateRootUpdateCommonWithContext(ctx, "/v1/sys/replication/dr/secondary/generate-operation-token/update", shard, nonce) +} + +func (c *Sys) GenerateRecoveryOperationTokenUpdateWithContext(ctx context.Context, shard, nonce string) (*GenerateRootStatusResponse, error) { + return c.generateRootUpdateCommonWithContext(ctx, "/v1/sys/generate-recovery-token/update", shard, nonce) +} + +func (c *Sys) generateRootUpdateCommonWithContext(ctx context.Context, path, shard, nonce string) (*GenerateRootStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + body := map[string]interface{}{ + "key": shard, + "nonce": nonce, + } + + r := c.c.NewRequest(http.MethodPut, path) + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result GenerateRootStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +type GenerateRootStatusResponse struct { + Nonce string `json:"nonce"` + Started bool `json:"started"` + Progress int `json:"progress"` + Required int `json:"required"` + Complete bool `json:"complete"` + EncodedToken string `json:"encoded_token"` + EncodedRootToken string `json:"encoded_root_token"` + PGPFingerprint string `json:"pgp_fingerprint"` + OTP string `json:"otp"` + OTPLength int `json:"otp_length"` +} diff --git a/api/sys_hastatus.go b/api/sys_hastatus.go new file mode 100644 index 0000000..2b2aa7c --- /dev/null +++ b/api/sys_hastatus.go @@ -0,0 +1,46 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "net/http" + "time" +) + +func (c *Sys) HAStatus() (*HAStatusResponse, error) { + return c.HAStatusWithContext(context.Background()) +} + +func (c *Sys) HAStatusWithContext(ctx context.Context) (*HAStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/ha-status") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result HAStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +type HAStatusResponse struct { + Nodes []HANode +} + +type HANode struct { + Hostname string `json:"hostname"` + APIAddress string `json:"api_address"` + ClusterAddress string `json:"cluster_address"` + ActiveNode bool `json:"active_node"` + LastEcho *time.Time `json:"last_echo"` + Version string `json:"version"` + UpgradeVersion string `json:"upgrade_version,omitempty"` + RedundancyZone string `json:"redundancy_zone,omitempty"` +} diff --git a/api/sys_health.go b/api/sys_health.go new file mode 100644 index 0000000..13fd8d4 --- /dev/null +++ b/api/sys_health.go @@ -0,0 +1,52 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "net/http" +) + +func (c *Sys) Health() (*HealthResponse, error) { + return c.HealthWithContext(context.Background()) +} + +func (c *Sys) HealthWithContext(ctx context.Context) (*HealthResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/health") + // If the code is 400 or above it will automatically turn into an error, + // but the sys/health API defaults to returning 5xx when not sealed or + // inited, so we force this code to be something else so we parse correctly + r.Params.Add("uninitcode", "299") + r.Params.Add("sealedcode", "299") + r.Params.Add("standbycode", "299") + r.Params.Add("drsecondarycode", "299") + r.Params.Add("performancestandbycode", "299") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result HealthResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +type HealthResponse struct { + Initialized bool `json:"initialized"` + Sealed bool `json:"sealed"` + Standby bool `json:"standby"` + PerformanceStandby bool `json:"performance_standby"` + ReplicationPerformanceMode string `json:"replication_performance_mode"` + ReplicationDRMode string `json:"replication_dr_mode"` + ServerTimeUTC int64 `json:"server_time_utc"` + Version string `json:"version"` + ClusterName string `json:"cluster_name,omitempty"` + ClusterID string `json:"cluster_id,omitempty"` + LastWAL uint64 `json:"last_wal,omitempty"` +} diff --git a/api/sys_init.go b/api/sys_init.go new file mode 100644 index 0000000..13fa948 --- /dev/null +++ b/api/sys_init.go @@ -0,0 +1,77 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "net/http" +) + +func (c *Sys) InitStatus() (bool, error) { + return c.InitStatusWithContext(context.Background()) +} + +func (c *Sys) InitStatusWithContext(ctx context.Context) (bool, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/init") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return false, err + } + defer resp.Body.Close() + + var result InitStatusResponse + err = resp.DecodeJSON(&result) + return result.Initialized, err +} + +func (c *Sys) Init(opts *InitRequest) (*InitResponse, error) { + return c.InitWithContext(context.Background(), opts) +} + +func (c *Sys) InitWithContext(ctx context.Context, opts *InitRequest) (*InitResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/init") + if err := r.SetJSONBody(opts); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result InitResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +type InitRequest struct { + SecretShares int `json:"secret_shares"` + SecretThreshold int `json:"secret_threshold"` + StoredShares int `json:"stored_shares"` + PGPKeys []string `json:"pgp_keys"` + RecoveryShares int `json:"recovery_shares"` + RecoveryThreshold int `json:"recovery_threshold"` + RecoveryPGPKeys []string `json:"recovery_pgp_keys"` + RootTokenPGPKey string `json:"root_token_pgp_key"` +} + +type InitStatusResponse struct { + Initialized bool +} + +type InitResponse struct { + Keys []string `json:"keys"` + KeysB64 []string `json:"keys_base64"` + RecoveryKeys []string `json:"recovery_keys"` + RecoveryKeysB64 []string `json:"recovery_keys_base64"` + RootToken string `json:"root_token"` +} diff --git a/api/sys_leader.go b/api/sys_leader.go new file mode 100644 index 0000000..868914d --- /dev/null +++ b/api/sys_leader.go @@ -0,0 +1,44 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "net/http" + "time" +) + +func (c *Sys) Leader() (*LeaderResponse, error) { + return c.LeaderWithContext(context.Background()) +} + +func (c *Sys) LeaderWithContext(ctx context.Context) (*LeaderResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/leader") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result LeaderResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +type LeaderResponse struct { + HAEnabled bool `json:"ha_enabled"` + IsSelf bool `json:"is_self"` + ActiveTime time.Time `json:"active_time"` + LeaderAddress string `json:"leader_address"` + LeaderClusterAddress string `json:"leader_cluster_address"` + PerfStandby bool `json:"performance_standby"` + PerfStandbyLastRemoteWAL uint64 `json:"performance_standby_last_remote_wal"` + LastWAL uint64 `json:"last_wal"` + RaftCommittedIndex uint64 `json:"raft_committed_index,omitempty"` + RaftAppliedIndex uint64 `json:"raft_applied_index,omitempty"` +} diff --git a/api/sys_leases.go b/api/sys_leases.go new file mode 100644 index 0000000..c46f07e --- /dev/null +++ b/api/sys_leases.go @@ -0,0 +1,166 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "errors" + "net/http" +) + +func (c *Sys) Renew(id string, increment int) (*Secret, error) { + return c.RenewWithContext(context.Background(), id, increment) +} + +func (c *Sys) RenewWithContext(ctx context.Context, id string, increment int) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/leases/renew") + + body := map[string]interface{}{ + "increment": increment, + "lease_id": id, + } + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *Sys) Lookup(id string) (*Secret, error) { + return c.LookupWithContext(context.Background(), id) +} + +func (c *Sys) LookupWithContext(ctx context.Context, id string) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/leases/lookup") + + body := map[string]interface{}{ + "lease_id": id, + } + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *Sys) Revoke(id string) error { + return c.RevokeWithContext(context.Background(), id) +} + +func (c *Sys) RevokeWithContext(ctx context.Context, id string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/leases/revoke") + body := map[string]interface{}{ + "lease_id": id, + } + if err := r.SetJSONBody(body); err != nil { + return err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RevokePrefix(id string) error { + return c.RevokePrefixWithContext(context.Background(), id) +} + +func (c *Sys) RevokePrefixWithContext(ctx context.Context, id string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/leases/revoke-prefix/"+id) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RevokeForce(id string) error { + return c.RevokeForceWithContext(context.Background(), id) +} + +func (c *Sys) RevokeForceWithContext(ctx context.Context, id string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/leases/revoke-force/"+id) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RevokeWithOptions(opts *RevokeOptions) error { + return c.RevokeWithOptionsWithContext(context.Background(), opts) +} + +func (c *Sys) RevokeWithOptionsWithContext(ctx context.Context, opts *RevokeOptions) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + if opts == nil { + return errors.New("nil options provided") + } + + // Construct path + path := "/v1/sys/leases/revoke/" + switch { + case opts.Force: + path = "/v1/sys/leases/revoke-force/" + case opts.Prefix: + path = "/v1/sys/leases/revoke-prefix/" + } + path += opts.LeaseID + + r := c.c.NewRequest(http.MethodPut, path) + if !opts.Force { + body := map[string]interface{}{ + "sync": opts.Sync, + } + if err := r.SetJSONBody(body); err != nil { + return err + } + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +type RevokeOptions struct { + LeaseID string + Force bool + Prefix bool + Sync bool +} diff --git a/api/sys_mfa.go b/api/sys_mfa.go new file mode 100644 index 0000000..2be6695 --- /dev/null +++ b/api/sys_mfa.go @@ -0,0 +1,48 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "fmt" + "net/http" +) + +func (c *Sys) MFAValidate(requestID string, payload map[string]interface{}) (*Secret, error) { + return c.MFAValidateWithContext(context.Background(), requestID, payload) +} + +func (c *Sys) MFAValidateWithContext(ctx context.Context, requestID string, payload map[string]interface{}) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + body := map[string]interface{}{ + "mfa_request_id": requestID, + "mfa_payload": payload, + } + + r := c.c.NewRequest(http.MethodPost, fmt.Sprintf("/v1/sys/mfa/validate")) + if err := r.SetJSONBody(body); err != nil { + return nil, fmt.Errorf("failed to set request body: %w", err) + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return nil, err + } + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, fmt.Errorf("failed to parse secret from response: %w", err) + } + + if secret == nil { + return nil, fmt.Errorf("data from server response is empty") + } + + return secret, nil +} diff --git a/api/sys_monitor.go b/api/sys_monitor.go new file mode 100644 index 0000000..15a8a13 --- /dev/null +++ b/api/sys_monitor.go @@ -0,0 +1,74 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "bufio" + "context" + "fmt" + "net/http" +) + +// Monitor returns a channel that outputs strings containing the log messages +// coming from the server. +func (c *Sys) Monitor(ctx context.Context, logLevel string, logFormat string) (chan string, error) { + r := c.c.NewRequest(http.MethodGet, "/v1/sys/monitor") + + if logLevel == "" { + r.Params.Add("log_level", "info") + } else { + r.Params.Add("log_level", logLevel) + } + + if logFormat == "" { + r.Params.Add("log_format", "standard") + } else { + r.Params.Add("log_format", logFormat) + } + + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + + logCh := make(chan string, 64) + + go func() { + scanner := bufio.NewScanner(resp.Body) + droppedCount := 0 + + defer close(logCh) + defer resp.Body.Close() + + for { + if ctx.Err() != nil { + return + } + + if !scanner.Scan() { + return + } + + logMessage := scanner.Text() + + if droppedCount > 0 { + select { + case logCh <- fmt.Sprintf("Monitor dropped %d logs during monitor request\n", droppedCount): + droppedCount = 0 + default: + droppedCount++ + continue + } + } + + select { + case logCh <- logMessage: + default: + droppedCount++ + } + } + }() + + return logCh, nil +} diff --git a/api/sys_mounts.go b/api/sys_mounts.go new file mode 100644 index 0000000..a6c2a0f --- /dev/null +++ b/api/sys_mounts.go @@ -0,0 +1,337 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/mitchellh/mapstructure" +) + +func (c *Sys) ListMounts() (map[string]*MountOutput, error) { + return c.ListMountsWithContext(context.Background()) +} + +func (c *Sys) ListMountsWithContext(ctx context.Context) (map[string]*MountOutput, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/mounts") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + mounts := map[string]*MountOutput{} + err = mapstructure.Decode(secret.Data, &mounts) + if err != nil { + return nil, err + } + + return mounts, nil +} + +func (c *Sys) Mount(path string, mountInfo *MountInput) error { + return c.MountWithContext(context.Background(), path, mountInfo) +} + +func (c *Sys) MountWithContext(ctx context.Context, path string, mountInfo *MountInput) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, fmt.Sprintf("/v1/sys/mounts/%s", path)) + if err := r.SetJSONBody(mountInfo); err != nil { + return err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +func (c *Sys) Unmount(path string) error { + return c.UnmountWithContext(context.Background(), path) +} + +func (c *Sys) UnmountWithContext(ctx context.Context, path string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, fmt.Sprintf("/v1/sys/mounts/%s", path)) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +// Remount wraps RemountWithContext using context.Background. +func (c *Sys) Remount(from, to string) error { + return c.RemountWithContext(context.Background(), from, to) +} + +// RemountWithContext kicks off a remount operation, polls the status endpoint using +// the migration ID till either success or failure state is observed +func (c *Sys) RemountWithContext(ctx context.Context, from, to string) error { + remountResp, err := c.StartRemountWithContext(ctx, from, to) + if err != nil { + return err + } + + for { + remountStatusResp, err := c.RemountStatusWithContext(ctx, remountResp.MigrationID) + if err != nil { + return err + } + if remountStatusResp.MigrationInfo.MigrationStatus == "success" { + return nil + } + if remountStatusResp.MigrationInfo.MigrationStatus == "failure" { + return fmt.Errorf("Failure! Error encountered moving mount %s to %s, with migration ID %s", from, to, remountResp.MigrationID) + } + time.Sleep(1 * time.Second) + } +} + +// StartRemount wraps StartRemountWithContext using context.Background. +func (c *Sys) StartRemount(from, to string) (*MountMigrationOutput, error) { + return c.StartRemountWithContext(context.Background(), from, to) +} + +// StartRemountWithContext kicks off a mount migration and returns a response with the migration ID +func (c *Sys) StartRemountWithContext(ctx context.Context, from, to string) (*MountMigrationOutput, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + body := map[string]interface{}{ + "from": from, + "to": to, + } + + r := c.c.NewRequest(http.MethodPost, "/v1/sys/remount") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result MountMigrationOutput + err = mapstructure.Decode(secret.Data, &result) + if err != nil { + return nil, err + } + + return &result, err +} + +// RemountStatus wraps RemountStatusWithContext using context.Background. +func (c *Sys) RemountStatus(migrationID string) (*MountMigrationStatusOutput, error) { + return c.RemountStatusWithContext(context.Background(), migrationID) +} + +// RemountStatusWithContext checks the status of a mount migration operation with the provided ID +func (c *Sys) RemountStatusWithContext(ctx context.Context, migrationID string) (*MountMigrationStatusOutput, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, fmt.Sprintf("/v1/sys/remount/status/%s", migrationID)) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result MountMigrationStatusOutput + err = mapstructure.Decode(secret.Data, &result) + if err != nil { + return nil, err + } + + return &result, err +} + +func (c *Sys) TuneMount(path string, config MountConfigInput) error { + return c.TuneMountWithContext(context.Background(), path, config) +} + +func (c *Sys) TuneMountWithContext(ctx context.Context, path string, config MountConfigInput) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, fmt.Sprintf("/v1/sys/mounts/%s/tune", path)) + if err := r.SetJSONBody(config); err != nil { + return err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) MountConfig(path string) (*MountConfigOutput, error) { + return c.MountConfigWithContext(context.Background(), path) +} + +func (c *Sys) MountConfigWithContext(ctx context.Context, path string) (*MountConfigOutput, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, fmt.Sprintf("/v1/sys/mounts/%s/tune", path)) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result MountConfigOutput + err = mapstructure.Decode(secret.Data, &result) + if err != nil { + return nil, err + } + + return &result, err +} + +type MountInput struct { + Type string `json:"type"` + Description string `json:"description"` + Config MountConfigInput `json:"config"` + Local bool `json:"local"` + SealWrap bool `json:"seal_wrap" mapstructure:"seal_wrap"` + ExternalEntropyAccess bool `json:"external_entropy_access" mapstructure:"external_entropy_access"` + Options map[string]string `json:"options"` + + // Deprecated: Newer server responses should be returning this information in the + // Type field (json: "type") instead. + PluginName string `json:"plugin_name,omitempty"` +} + +type MountConfigInput struct { + Options map[string]string `json:"options" mapstructure:"options"` + DefaultLeaseTTL string `json:"default_lease_ttl" mapstructure:"default_lease_ttl"` + Description *string `json:"description,omitempty" mapstructure:"description"` + MaxLeaseTTL string `json:"max_lease_ttl" mapstructure:"max_lease_ttl"` + ForceNoCache bool `json:"force_no_cache" mapstructure:"force_no_cache"` + AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" mapstructure:"audit_non_hmac_request_keys"` + AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" mapstructure:"audit_non_hmac_response_keys"` + ListingVisibility string `json:"listing_visibility,omitempty" mapstructure:"listing_visibility"` + PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" mapstructure:"passthrough_request_headers"` + AllowedResponseHeaders []string `json:"allowed_response_headers,omitempty" mapstructure:"allowed_response_headers"` + TokenType string `json:"token_type,omitempty" mapstructure:"token_type"` + AllowedManagedKeys []string `json:"allowed_managed_keys,omitempty" mapstructure:"allowed_managed_keys"` + PluginVersion string `json:"plugin_version,omitempty"` + UserLockoutConfig *UserLockoutConfigInput `json:"user_lockout_config,omitempty"` + // Deprecated: This field will always be blank for newer server responses. + PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"` +} + +type MountOutput struct { + UUID string `json:"uuid"` + Type string `json:"type"` + Description string `json:"description"` + Accessor string `json:"accessor"` + Config MountConfigOutput `json:"config"` + Options map[string]string `json:"options"` + Local bool `json:"local"` + SealWrap bool `json:"seal_wrap" mapstructure:"seal_wrap"` + ExternalEntropyAccess bool `json:"external_entropy_access" mapstructure:"external_entropy_access"` + PluginVersion string `json:"plugin_version" mapstructure:"plugin_version"` + RunningVersion string `json:"running_plugin_version" mapstructure:"running_plugin_version"` + RunningSha256 string `json:"running_sha256" mapstructure:"running_sha256"` + DeprecationStatus string `json:"deprecation_status" mapstructure:"deprecation_status"` +} + +type MountConfigOutput struct { + DefaultLeaseTTL int `json:"default_lease_ttl" mapstructure:"default_lease_ttl"` + MaxLeaseTTL int `json:"max_lease_ttl" mapstructure:"max_lease_ttl"` + ForceNoCache bool `json:"force_no_cache" mapstructure:"force_no_cache"` + AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" mapstructure:"audit_non_hmac_request_keys"` + AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" mapstructure:"audit_non_hmac_response_keys"` + ListingVisibility string `json:"listing_visibility,omitempty" mapstructure:"listing_visibility"` + PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" mapstructure:"passthrough_request_headers"` + AllowedResponseHeaders []string `json:"allowed_response_headers,omitempty" mapstructure:"allowed_response_headers"` + TokenType string `json:"token_type,omitempty" mapstructure:"token_type"` + AllowedManagedKeys []string `json:"allowed_managed_keys,omitempty" mapstructure:"allowed_managed_keys"` + UserLockoutConfig *UserLockoutConfigOutput `json:"user_lockout_config,omitempty"` + // Deprecated: This field will always be blank for newer server responses. + PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"` +} + +type UserLockoutConfigInput struct { + LockoutThreshold string `json:"lockout_threshold,omitempty" structs:"lockout_threshold" mapstructure:"lockout_threshold"` + LockoutDuration string `json:"lockout_duration,omitempty" structs:"lockout_duration" mapstructure:"lockout_duration"` + LockoutCounterResetDuration string `json:"lockout_counter_reset_duration,omitempty" structs:"lockout_counter_reset_duration" mapstructure:"lockout_counter_reset_duration"` + DisableLockout *bool `json:"lockout_disable,omitempty" structs:"lockout_disable" mapstructure:"lockout_disable"` +} + +type UserLockoutConfigOutput struct { + LockoutThreshold uint `json:"lockout_threshold,omitempty" structs:"lockout_threshold" mapstructure:"lockout_threshold"` + LockoutDuration int `json:"lockout_duration,omitempty" structs:"lockout_duration" mapstructure:"lockout_duration"` + LockoutCounterReset int `json:"lockout_counter_reset,omitempty" structs:"lockout_counter_reset" mapstructure:"lockout_counter_reset"` + DisableLockout *bool `json:"disable_lockout,omitempty" structs:"disable_lockout" mapstructure:"disable_lockout"` +} + +type MountMigrationOutput struct { + MigrationID string `mapstructure:"migration_id"` +} + +type MountMigrationStatusOutput struct { + MigrationID string `mapstructure:"migration_id"` + MigrationInfo *MountMigrationStatusInfo `mapstructure:"migration_info"` +} + +type MountMigrationStatusInfo struct { + SourceMount string `mapstructure:"source_mount"` + TargetMount string `mapstructure:"target_mount"` + MigrationStatus string `mapstructure:"status"` +} diff --git a/api/sys_mounts_test.go b/api/sys_mounts_test.go new file mode 100644 index 0000000..a810c62 --- /dev/null +++ b/api/sys_mounts_test.go @@ -0,0 +1,152 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "net/http" + "net/http/httptest" + "testing" +) + +func TestListMounts(t *testing.T) { + mockVaultServer := httptest.NewServer(http.HandlerFunc(mockVaultMountsHandler)) + defer mockVaultServer.Close() + + cfg := DefaultConfig() + cfg.Address = mockVaultServer.URL + client, err := NewClient(cfg) + if err != nil { + t.Fatal(err) + } + + resp, err := client.Sys().ListMounts() + if err != nil { + t.Fatal(err) + } + + expectedMounts := map[string]struct { + Type string + Version string + }{ + "cubbyhole/": {Type: "cubbyhole", Version: "v1.0.0"}, + "identity/": {Type: "identity", Version: ""}, + "secret/": {Type: "kv", Version: ""}, + "sys/": {Type: "system", Version: ""}, + } + + for path, mount := range resp { + expected, ok := expectedMounts[path] + if !ok { + t.Errorf("Unexpected mount: %s: %+v", path, mount) + continue + } + if expected.Type != mount.Type || expected.Version != mount.PluginVersion { + t.Errorf("Mount did not match: %s -> expected %+v but got %+v", path, expected, mount) + } + } + + for path, expected := range expectedMounts { + mount, ok := resp[path] + if !ok { + t.Errorf("Expected mount not found mount: %s: %+v", path, expected) + continue + } + if expected.Type != mount.Type || expected.Version != mount.PluginVersion { + t.Errorf("Mount did not match: %s -> expected %+v but got %+v", path, expected, mount) + } + } +} + +func mockVaultMountsHandler(w http.ResponseWriter, _ *http.Request) { + _, _ = w.Write([]byte(listMountsResponse)) +} + +const listMountsResponse = `{ + "request_id": "3cd881e9-ea50-2e06-90b2-5641667485fa", + "lease_id": "", + "lease_duration": 0, + "renewable": false, + "data": { + "cubbyhole/": { + "accessor": "cubbyhole_2e3fc28d", + "config": { + "default_lease_ttl": 0, + "force_no_cache": false, + "max_lease_ttl": 0 + }, + "description": "per-token private secret storage", + "external_entropy_access": false, + "local": true, + "options": null, + "plugin_version": "v1.0.0", + "running_sha256": "", + "running_plugin_version": "", + "seal_wrap": false, + "type": "cubbyhole", + "uuid": "575063dc-5ef8-4487-c842-22c494c19a6f" + }, + "identity/": { + "accessor": "identity_6e01c327", + "config": { + "default_lease_ttl": 0, + "force_no_cache": false, + "max_lease_ttl": 0, + "passthrough_request_headers": [ + "Authorization" + ] + }, + "description": "identity store", + "external_entropy_access": false, + "local": false, + "options": null, + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": "", + "seal_wrap": false, + "type": "identity", + "uuid": "187d7eba-3471-554b-c2d9-1479612c8046" + }, + "secret/": { + "accessor": "kv_3e2f282f", + "config": { + "default_lease_ttl": 0, + "force_no_cache": false, + "max_lease_ttl": 0 + }, + "description": "key/value secret storage", + "external_entropy_access": false, + "local": false, + "options": { + "version": "2" + }, + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": "", + "seal_wrap": false, + "type": "kv", + "uuid": "13375e0f-876e-7e96-0a3e-076f37b6b69d" + }, + "sys/": { + "accessor": "system_93503264", + "config": { + "default_lease_ttl": 0, + "force_no_cache": false, + "max_lease_ttl": 0, + "passthrough_request_headers": [ + "Accept" + ] + }, + "description": "system endpoints used for control, policy and debugging", + "external_entropy_access": false, + "local": false, + "options": null, + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": "", + "seal_wrap": true, + "type": "system", + "uuid": "1373242d-cc4d-c023-410b-7f336e7ba0a8" + } + } +}` diff --git a/api/sys_plugins.go b/api/sys_plugins.go new file mode 100644 index 0000000..2ee024d --- /dev/null +++ b/api/sys_plugins.go @@ -0,0 +1,382 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/mitchellh/mapstructure" +) + +// ListPluginsInput is used as input to the ListPlugins function. +type ListPluginsInput struct { + // Type of the plugin. Required. + Type PluginType `json:"type"` +} + +// ListPluginsResponse is the response from the ListPlugins call. +type ListPluginsResponse struct { + // PluginsByType is the list of plugins by type. + PluginsByType map[PluginType][]string `json:"types"` + + Details []PluginDetails `json:"details,omitempty"` + + // Names is the list of names of the plugins. + // + // Deprecated: Newer server responses should be returning PluginsByType (json: + // "types") instead. + Names []string `json:"names"` +} + +type PluginDetails struct { + Type string `json:"type"` + Name string `json:"name"` + Version string `json:"version,omitempty"` + Builtin bool `json:"builtin"` + DeprecationStatus string `json:"deprecation_status,omitempty" mapstructure:"deprecation_status"` +} + +// ListPlugins wraps ListPluginsWithContext using context.Background. +func (c *Sys) ListPlugins(i *ListPluginsInput) (*ListPluginsResponse, error) { + return c.ListPluginsWithContext(context.Background(), i) +} + +// ListPluginsWithContext lists all plugins in the catalog and returns their names as a +// list of strings. +func (c *Sys) ListPluginsWithContext(ctx context.Context, i *ListPluginsInput) (*ListPluginsResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + resp, err := c.c.rawRequestWithContext(ctx, c.c.NewRequest(http.MethodGet, "/v1/sys/plugins/catalog")) + if err != nil && resp == nil { + return nil, err + } + if resp == nil { + return nil, nil + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + result := &ListPluginsResponse{ + PluginsByType: make(map[PluginType][]string), + } + switch i.Type { + case PluginTypeUnknown: + for _, pluginType := range PluginTypes { + pluginsRaw, ok := secret.Data[pluginType.String()] + if !ok { + continue + } + + pluginsIfc, ok := pluginsRaw.([]interface{}) + if !ok { + return nil, fmt.Errorf("unable to parse plugins for %q type", pluginType.String()) + } + + plugins := make([]string, 0, len(pluginsIfc)) + for _, nameIfc := range pluginsIfc { + name, ok := nameIfc.(string) + if !ok { + continue + } + plugins = append(plugins, name) + } + result.PluginsByType[pluginType] = plugins + } + default: + pluginsRaw, ok := secret.Data[i.Type.String()] + if !ok { + return nil, fmt.Errorf("no %s entry in returned data", i.Type.String()) + } + + var respKeys []string + if err := mapstructure.Decode(pluginsRaw, &respKeys); err != nil { + return nil, err + } + result.PluginsByType[i.Type] = respKeys + } + + if detailed, ok := secret.Data["detailed"]; ok { + var details []PluginDetails + if err := mapstructure.Decode(detailed, &details); err != nil { + return nil, err + } + + switch i.Type { + case PluginTypeUnknown: + result.Details = details + default: + // Filter for just the queried type. + for _, entry := range details { + if entry.Type == i.Type.String() { + result.Details = append(result.Details, entry) + } + } + } + } + + return result, nil +} + +// GetPluginInput is used as input to the GetPlugin function. +type GetPluginInput struct { + Name string `json:"-"` + + // Type of the plugin. Required. + Type PluginType `json:"type"` + Version string `json:"version"` +} + +// GetPluginResponse is the response from the GetPlugin call. +type GetPluginResponse struct { + Args []string `json:"args"` + Builtin bool `json:"builtin"` + Command string `json:"command"` + Name string `json:"name"` + SHA256 string `json:"sha256"` + DeprecationStatus string `json:"deprecation_status,omitempty"` + Version string `json:"version,omitempty"` +} + +// GetPlugin wraps GetPluginWithContext using context.Background. +func (c *Sys) GetPlugin(i *GetPluginInput) (*GetPluginResponse, error) { + return c.GetPluginWithContext(context.Background(), i) +} + +// GetPluginWithContext retrieves information about the plugin. +func (c *Sys) GetPluginWithContext(ctx context.Context, i *GetPluginInput) (*GetPluginResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + path := catalogPathByType(i.Type, i.Name) + req := c.c.NewRequest(http.MethodGet, path) + if i.Version != "" { + req.Params.Set("version", i.Version) + } + + resp, err := c.c.rawRequestWithContext(ctx, req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result struct { + Data *GetPluginResponse + } + err = resp.DecodeJSON(&result) + if err != nil { + return nil, err + } + return result.Data, err +} + +// RegisterPluginInput is used as input to the RegisterPlugin function. +type RegisterPluginInput struct { + // Name is the name of the plugin. Required. + Name string `json:"-"` + + // Type of the plugin. Required. + Type PluginType `json:"type"` + + // Args is the list of args to spawn the process with. + Args []string `json:"args,omitempty"` + + // Command is the command to run. + Command string `json:"command,omitempty"` + + // SHA256 is the shasum of the plugin. + SHA256 string `json:"sha256,omitempty"` + + // Version is the optional version of the plugin being registered + Version string `json:"version,omitempty"` +} + +// RegisterPlugin wraps RegisterPluginWithContext using context.Background. +func (c *Sys) RegisterPlugin(i *RegisterPluginInput) error { + return c.RegisterPluginWithContext(context.Background(), i) +} + +// RegisterPluginWithContext registers the plugin with the given information. +func (c *Sys) RegisterPluginWithContext(ctx context.Context, i *RegisterPluginInput) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + path := catalogPathByType(i.Type, i.Name) + req := c.c.NewRequest(http.MethodPut, path) + + if err := req.SetJSONBody(i); err != nil { + return err + } + + resp, err := c.c.rawRequestWithContext(ctx, req) + if err == nil { + defer resp.Body.Close() + } + return err +} + +// DeregisterPluginInput is used as input to the DeregisterPlugin function. +type DeregisterPluginInput struct { + // Name is the name of the plugin. Required. + Name string `json:"-"` + + // Type of the plugin. Required. + Type PluginType `json:"type"` + + // Version of the plugin. Optional. + Version string `json:"version,omitempty"` +} + +// DeregisterPlugin wraps DeregisterPluginWithContext using context.Background. +func (c *Sys) DeregisterPlugin(i *DeregisterPluginInput) error { + return c.DeregisterPluginWithContext(context.Background(), i) +} + +// DeregisterPluginWithContext removes the plugin with the given name from the plugin +// catalog. +func (c *Sys) DeregisterPluginWithContext(ctx context.Context, i *DeregisterPluginInput) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + path := catalogPathByType(i.Type, i.Name) + req := c.c.NewRequest(http.MethodDelete, path) + req.Params.Set("version", i.Version) + resp, err := c.c.rawRequestWithContext(ctx, req) + if err == nil { + defer resp.Body.Close() + } + return err +} + +// ReloadPluginInput is used as input to the ReloadPlugin function. +type ReloadPluginInput struct { + // Plugin is the name of the plugin to reload, as registered in the plugin catalog + Plugin string `json:"plugin"` + + // Mounts is the array of string mount paths of the plugin backends to reload + Mounts []string `json:"mounts"` + + // Scope is the scope of the plugin reload + Scope string `json:"scope"` +} + +// ReloadPlugin wraps ReloadPluginWithContext using context.Background. +func (c *Sys) ReloadPlugin(i *ReloadPluginInput) (string, error) { + return c.ReloadPluginWithContext(context.Background(), i) +} + +// ReloadPluginWithContext reloads mounted plugin backends, possibly returning +// reloadId for a cluster scoped reload +func (c *Sys) ReloadPluginWithContext(ctx context.Context, i *ReloadPluginInput) (string, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + path := "/v1/sys/plugins/reload/backend" + req := c.c.NewRequest(http.MethodPut, path) + + if err := req.SetJSONBody(i); err != nil { + return "", err + } + + resp, err := c.c.rawRequestWithContext(ctx, req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + if i.Scope == "global" { + // Get the reload id + secret, parseErr := ParseSecret(resp.Body) + if parseErr != nil { + return "", parseErr + } + if _, ok := secret.Data["reload_id"]; ok { + return secret.Data["reload_id"].(string), nil + } + } + return "", err +} + +// ReloadStatus is the status of an individual node's plugin reload +type ReloadStatus struct { + Timestamp time.Time `json:"timestamp" mapstructure:"timestamp"` + Error string `json:"error" mapstructure:"error"` +} + +// ReloadStatusResponse is the combined response of all known completed plugin reloads +type ReloadStatusResponse struct { + ReloadID string `mapstructure:"reload_id"` + Results map[string]*ReloadStatus `mapstructure:"results"` +} + +// ReloadPluginStatusInput is used as input to the ReloadStatusPlugin function. +type ReloadPluginStatusInput struct { + // ReloadID is the ID of the reload operation + ReloadID string `json:"reload_id"` +} + +// ReloadPluginStatus wraps ReloadPluginStatusWithContext using context.Background. +func (c *Sys) ReloadPluginStatus(reloadStatusInput *ReloadPluginStatusInput) (*ReloadStatusResponse, error) { + return c.ReloadPluginStatusWithContext(context.Background(), reloadStatusInput) +} + +// ReloadPluginStatusWithContext retrieves the status of a reload operation +func (c *Sys) ReloadPluginStatusWithContext(ctx context.Context, reloadStatusInput *ReloadPluginStatusInput) (*ReloadStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + path := "/v1/sys/plugins/reload/backend/status" + req := c.c.NewRequest(http.MethodGet, path) + req.Params.Add("reload_id", reloadStatusInput.ReloadID) + + resp, err := c.c.rawRequestWithContext(ctx, req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp != nil { + secret, parseErr := ParseSecret(resp.Body) + if parseErr != nil { + return nil, err + } + + var r ReloadStatusResponse + d, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + DecodeHook: mapstructure.StringToTimeHookFunc(time.RFC3339), + Result: &r, + }) + if err != nil { + return nil, err + } + err = d.Decode(secret.Data) + if err != nil { + return nil, err + } + return &r, nil + } + return nil, nil +} + +// catalogPathByType is a helper to construct the proper API path by plugin type +func catalogPathByType(pluginType PluginType, name string) string { + path := fmt.Sprintf("/v1/sys/plugins/catalog/%s/%s", pluginType, name) + + // Backwards compat, if type is not provided then use old path + if pluginType == PluginTypeUnknown { + path = fmt.Sprintf("/v1/sys/plugins/catalog/%s", name) + } + + return path +} diff --git a/api/sys_plugins_test.go b/api/sys_plugins_test.go new file mode 100644 index 0000000..3673181 --- /dev/null +++ b/api/sys_plugins_test.go @@ -0,0 +1,324 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "net/http" + "net/http/httptest" + "reflect" + "testing" + + "github.com/hashicorp/go-secure-stdlib/strutil" +) + +func TestRegisterPlugin(t *testing.T) { + mockVaultServer := httptest.NewServer(http.HandlerFunc(mockVaultHandlerRegister)) + defer mockVaultServer.Close() + + cfg := DefaultConfig() + cfg.Address = mockVaultServer.URL + client, err := NewClient(cfg) + if err != nil { + t.Fatal(err) + } + + err = client.Sys().RegisterPluginWithContext(context.Background(), &RegisterPluginInput{ + Version: "v1.0.0", + }) + if err != nil { + t.Fatal(err) + } +} + +func TestListPlugins(t *testing.T) { + mockVaultServer := httptest.NewServer(http.HandlerFunc(mockVaultHandlerList)) + defer mockVaultServer.Close() + + cfg := DefaultConfig() + cfg.Address = mockVaultServer.URL + client, err := NewClient(cfg) + if err != nil { + t.Fatal(err) + } + + for name, tc := range map[string]struct { + input ListPluginsInput + expectedPlugins map[PluginType][]string + }{ + "no type specified": { + input: ListPluginsInput{}, + expectedPlugins: map[PluginType][]string{ + PluginTypeCredential: {"alicloud"}, + PluginTypeDatabase: {"cassandra-database-plugin"}, + PluginTypeSecrets: {"ad", "alicloud"}, + }, + }, + "only auth plugins": { + input: ListPluginsInput{Type: PluginTypeCredential}, + expectedPlugins: map[PluginType][]string{ + PluginTypeCredential: {"alicloud"}, + }, + }, + "only database plugins": { + input: ListPluginsInput{Type: PluginTypeDatabase}, + expectedPlugins: map[PluginType][]string{ + PluginTypeDatabase: {"cassandra-database-plugin"}, + }, + }, + "only secret plugins": { + input: ListPluginsInput{Type: PluginTypeSecrets}, + expectedPlugins: map[PluginType][]string{ + PluginTypeSecrets: {"ad", "alicloud"}, + }, + }, + } { + t.Run(name, func(t *testing.T) { + resp, err := client.Sys().ListPluginsWithContext(context.Background(), &tc.input) + if err != nil { + t.Fatal(err) + } + + for pluginType, expected := range tc.expectedPlugins { + actualPlugins := resp.PluginsByType[pluginType] + if len(expected) != len(actualPlugins) { + t.Fatal("Wrong number of plugins", expected, actualPlugins) + } + for i := range actualPlugins { + if expected[i] != actualPlugins[i] { + t.Fatalf("Expected %q but got %q", expected[i], actualPlugins[i]) + } + } + + for _, expectedPlugin := range expected { + found := false + for _, plugin := range resp.Details { + if plugin.Type == pluginType.String() && plugin.Name == expectedPlugin { + found = true + break + } + } + if !found { + t.Errorf("Expected to find %s plugin %s but not found in details: %#v", pluginType.String(), expectedPlugin, resp.Details) + } + } + } + + for _, actual := range resp.Details { + pluginType, err := ParsePluginType(actual.Type) + if err != nil { + t.Fatal(err) + } + if !strutil.StrListContains(tc.expectedPlugins[pluginType], actual.Name) { + t.Errorf("Did not expect to find %s in details", actual.Name) + } + } + }) + } +} + +func TestGetPlugin(t *testing.T) { + for name, tc := range map[string]struct { + version string + body string + expected GetPluginResponse + }{ + "builtin": { + body: getResponse, + expected: GetPluginResponse{ + Args: nil, + Builtin: true, + Command: "", + Name: "azure", + SHA256: "", + DeprecationStatus: "supported", + Version: "v0.14.0+builtin", + }, + }, + "external": { + version: "v1.0.0", + body: getResponseExternal, + expected: GetPluginResponse{ + Args: []string{}, + Builtin: false, + Command: "azure-plugin", + Name: "azure", + SHA256: "8ba442dba253803685b05e35ad29dcdebc48dec16774614aa7a4ebe53c1e90e1", + DeprecationStatus: "", + Version: "v1.0.0", + }, + }, + "old server": { + body: getResponseOldServerVersion, + expected: GetPluginResponse{ + Args: nil, + Builtin: true, + Command: "", + Name: "azure", + SHA256: "", + DeprecationStatus: "", + Version: "", + }, + }, + } { + t.Run(name, func(t *testing.T) { + mockVaultServer := httptest.NewServer(http.HandlerFunc(mockVaultHandlerInfo(tc.body))) + defer mockVaultServer.Close() + + cfg := DefaultConfig() + cfg.Address = mockVaultServer.URL + client, err := NewClient(cfg) + if err != nil { + t.Fatal(err) + } + + input := GetPluginInput{ + Name: "azure", + Type: PluginTypeSecrets, + } + if tc.version != "" { + input.Version = tc.version + } + + info, err := client.Sys().GetPluginWithContext(context.Background(), &input) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(tc.expected, *info) { + t.Errorf("expected: %#v\ngot: %#v", tc.expected, info) + } + }) + } +} + +func mockVaultHandlerInfo(body string) func(w http.ResponseWriter, _ *http.Request) { + return func(w http.ResponseWriter, _ *http.Request) { + _, _ = w.Write([]byte(body)) + } +} + +const getResponse = `{ + "request_id": "e93d3f93-8e4f-8443-a803-f1c97c495241", + "lease_id": "", + "renewable": false, + "lease_duration": 0, + "data": { + "args": null, + "builtin": true, + "command": "", + "deprecation_status": "supported", + "name": "azure", + "sha256": "", + "version": "v0.14.0+builtin" + }, + "wrap_info": null, + "warnings": null, + "auth": null +}` + +const getResponseExternal = `{ + "request_id": "e93d3f93-8e4f-8443-a803-f1c97c495241", + "lease_id": "", + "renewable": false, + "lease_duration": 0, + "data": { + "args": [], + "builtin": false, + "command": "azure-plugin", + "name": "azure", + "sha256": "8ba442dba253803685b05e35ad29dcdebc48dec16774614aa7a4ebe53c1e90e1", + "version": "v1.0.0" + }, + "wrap_info": null, + "warnings": null, + "auth": null +}` + +const getResponseOldServerVersion = `{ + "request_id": "e93d3f93-8e4f-8443-a803-f1c97c495241", + "lease_id": "", + "renewable": false, + "lease_duration": 0, + "data": { + "args": null, + "builtin": true, + "command": "", + "name": "azure", + "sha256": "" + }, + "wrap_info": null, + "warnings": null, + "auth": null +}` + +func mockVaultHandlerList(w http.ResponseWriter, _ *http.Request) { + _, _ = w.Write([]byte(listUntypedResponse)) +} + +const listUntypedResponse = `{ + "request_id": "82601a91-cd7a-718f-feca-f573449cc1bb", + "lease_id": "", + "renewable": false, + "lease_duration": 0, + "data": { + "auth": [ + "alicloud" + ], + "database": [ + "cassandra-database-plugin" + ], + "secret": [ + "ad", + "alicloud" + ], + "some_other_unexpected_key": [ + { + "objectKey": "objectValue" + }, + { + "arbitraryData": 7 + } + ], + "detailed": [ + { + "type": "auth", + "name": "alicloud", + "version": "v0.13.0+builtin", + "builtin": true, + "deprecation_status": "supported" + }, + { + "type": "database", + "name": "cassandra-database-plugin", + "version": "v1.13.0+builtin.vault", + "builtin": true, + "deprecation_status": "supported" + }, + { + "type": "secret", + "name": "ad", + "version": "v0.14.0+builtin", + "builtin": true, + "deprecation_status": "supported" + }, + { + "type": "secret", + "name": "alicloud", + "version": "v0.13.0+builtin", + "builtin": true, + "deprecation_status": "supported" + } + ] + }, + "wrap_info": null, + "warnings": null, + "auth": null +}` + +func mockVaultHandlerRegister(w http.ResponseWriter, _ *http.Request) { + _, _ = w.Write([]byte(registerResponse)) +} + +const registerResponse = `{}` diff --git a/api/sys_policy.go b/api/sys_policy.go new file mode 100644 index 0000000..9ddffe4 --- /dev/null +++ b/api/sys_policy.go @@ -0,0 +1,137 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/mitchellh/mapstructure" +) + +func (c *Sys) ListPolicies() ([]string, error) { + return c.ListPoliciesWithContext(context.Background()) +} + +func (c *Sys) ListPoliciesWithContext(ctx context.Context) ([]string, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest("LIST", "/v1/sys/policies/acl") + // Set this for broader compatibility, but we use LIST above to be able to + // handle the wrapping lookup function + r.Method = http.MethodGet + r.Params.Set("list", "true") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result []string + err = mapstructure.Decode(secret.Data["keys"], &result) + if err != nil { + return nil, err + } + + return result, err +} + +func (c *Sys) GetPolicy(name string) (string, error) { + return c.GetPolicyWithContext(context.Background(), name) +} + +func (c *Sys) GetPolicyWithContext(ctx context.Context, name string) (string, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, fmt.Sprintf("/v1/sys/policies/acl/%s", name)) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if resp != nil { + defer resp.Body.Close() + if resp.StatusCode == 404 { + return "", nil + } + } + if err != nil { + return "", err + } + + secret, err := ParseSecret(resp.Body) + if err != nil { + return "", err + } + if secret == nil || secret.Data == nil { + return "", errors.New("data from server response is empty") + } + + if policyRaw, ok := secret.Data["policy"]; ok { + return policyRaw.(string), nil + } + + return "", fmt.Errorf("no policy found in response") +} + +func (c *Sys) PutPolicy(name, rules string) error { + return c.PutPolicyWithContext(context.Background(), name, rules) +} + +func (c *Sys) PutPolicyWithContext(ctx context.Context, name, rules string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + body := map[string]string{ + "policy": rules, + } + + r := c.c.NewRequest(http.MethodPut, fmt.Sprintf("/v1/sys/policies/acl/%s", name)) + if err := r.SetJSONBody(body); err != nil { + return err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +func (c *Sys) DeletePolicy(name string) error { + return c.DeletePolicyWithContext(context.Background(), name) +} + +func (c *Sys) DeletePolicyWithContext(ctx context.Context, name string) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, fmt.Sprintf("/v1/sys/policies/acl/%s", name)) + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +type getPoliciesResp struct { + Rules string `json:"rules"` +} + +type listPoliciesResp struct { + Policies []string `json:"policies"` +} diff --git a/api/sys_raft.go b/api/sys_raft.go new file mode 100644 index 0000000..4b9487c --- /dev/null +++ b/api/sys_raft.go @@ -0,0 +1,402 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "archive/tar" + "compress/gzip" + "context" + "encoding/json" + "errors" + "io" + "io/ioutil" + "net/http" + "sync" + "time" + + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/mitchellh/mapstructure" +) + +var ErrIncompleteSnapshot = errors.New("incomplete snapshot, unable to read SHA256SUMS.sealed file") + +// RaftJoinResponse represents the response of the raft join API +type RaftJoinResponse struct { + Joined bool `json:"joined"` +} + +// RaftJoinRequest represents the parameters consumed by the raft join API +type RaftJoinRequest struct { + AutoJoin string `json:"auto_join"` + AutoJoinScheme string `json:"auto_join_scheme"` + AutoJoinPort uint `json:"auto_join_port"` + LeaderAPIAddr string `json:"leader_api_addr"` + LeaderCACert string `json:"leader_ca_cert"` + LeaderClientCert string `json:"leader_client_cert"` + LeaderClientKey string `json:"leader_client_key"` + Retry bool `json:"retry"` + NonVoter bool `json:"non_voter"` +} + +// AutopilotConfig is used for querying/setting the Autopilot configuration. +type AutopilotConfig struct { + CleanupDeadServers bool `json:"cleanup_dead_servers" mapstructure:"cleanup_dead_servers"` + LastContactThreshold time.Duration `json:"last_contact_threshold" mapstructure:"-"` + DeadServerLastContactThreshold time.Duration `json:"dead_server_last_contact_threshold" mapstructure:"-"` + MaxTrailingLogs uint64 `json:"max_trailing_logs" mapstructure:"max_trailing_logs"` + MinQuorum uint `json:"min_quorum" mapstructure:"min_quorum"` + ServerStabilizationTime time.Duration `json:"server_stabilization_time" mapstructure:"-"` + DisableUpgradeMigration bool `json:"disable_upgrade_migration" mapstructure:"disable_upgrade_migration"` +} + +// MarshalJSON makes the autopilot config fields JSON compatible +func (ac *AutopilotConfig) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]interface{}{ + "cleanup_dead_servers": ac.CleanupDeadServers, + "last_contact_threshold": ac.LastContactThreshold.String(), + "dead_server_last_contact_threshold": ac.DeadServerLastContactThreshold.String(), + "max_trailing_logs": ac.MaxTrailingLogs, + "min_quorum": ac.MinQuorum, + "server_stabilization_time": ac.ServerStabilizationTime.String(), + "disable_upgrade_migration": ac.DisableUpgradeMigration, + }) +} + +// UnmarshalJSON parses the autopilot config JSON blob +func (ac *AutopilotConfig) UnmarshalJSON(b []byte) error { + var data interface{} + err := json.Unmarshal(b, &data) + if err != nil { + return err + } + + conf := data.(map[string]interface{}) + if err = mapstructure.WeakDecode(conf, ac); err != nil { + return err + } + if ac.LastContactThreshold, err = parseutil.ParseDurationSecond(conf["last_contact_threshold"]); err != nil { + return err + } + if ac.DeadServerLastContactThreshold, err = parseutil.ParseDurationSecond(conf["dead_server_last_contact_threshold"]); err != nil { + return err + } + if ac.ServerStabilizationTime, err = parseutil.ParseDurationSecond(conf["server_stabilization_time"]); err != nil { + return err + } + return nil +} + +// AutopilotState represents the response of the raft autopilot state API +type AutopilotState struct { + Healthy bool `mapstructure:"healthy"` + FailureTolerance int `mapstructure:"failure_tolerance"` + Servers map[string]*AutopilotServer `mapstructure:"servers"` + Leader string `mapstructure:"leader"` + Voters []string `mapstructure:"voters"` + NonVoters []string `mapstructure:"non_voters"` + RedundancyZones map[string]AutopilotZone `mapstructure:"redundancy_zones,omitempty"` + Upgrade *AutopilotUpgrade `mapstructure:"upgrade_info,omitempty"` + OptimisticFailureTolerance int `mapstructure:"optimistic_failure_tolerance,omitempty"` +} + +// AutopilotServer represents the server blocks in the response of the raft +// autopilot state API. +type AutopilotServer struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + Address string `mapstructure:"address"` + NodeStatus string `mapstructure:"node_status"` + LastContact string `mapstructure:"last_contact"` + LastTerm uint64 `mapstructure:"last_term"` + LastIndex uint64 `mapstructure:"last_index"` + Healthy bool `mapstructure:"healthy"` + StableSince string `mapstructure:"stable_since"` + Status string `mapstructure:"status"` + Version string `mapstructure:"version"` + UpgradeVersion string `mapstructure:"upgrade_version,omitempty"` + RedundancyZone string `mapstructure:"redundancy_zone,omitempty"` + NodeType string `mapstructure:"node_type,omitempty"` +} + +type AutopilotZone struct { + Servers []string `mapstructure:"servers,omitempty"` + Voters []string `mapstructure:"voters,omitempty"` + FailureTolerance int `mapstructure:"failure_tolerance,omitempty"` +} + +type AutopilotUpgrade struct { + Status string `mapstructure:"status"` + TargetVersion string `mapstructure:"target_version,omitempty"` + TargetVersionVoters []string `mapstructure:"target_version_voters,omitempty"` + TargetVersionNonVoters []string `mapstructure:"target_version_non_voters,omitempty"` + TargetVersionReadReplicas []string `mapstructure:"target_version_read_replicas,omitempty"` + OtherVersionVoters []string `mapstructure:"other_version_voters,omitempty"` + OtherVersionNonVoters []string `mapstructure:"other_version_non_voters,omitempty"` + OtherVersionReadReplicas []string `mapstructure:"other_version_read_replicas,omitempty"` + RedundancyZones map[string]AutopilotZoneUpgradeVersions `mapstructure:"redundancy_zones,omitempty"` +} + +type AutopilotZoneUpgradeVersions struct { + TargetVersionVoters []string `mapstructure:"target_version_voters,omitempty"` + TargetVersionNonVoters []string `mapstructure:"target_version_non_voters,omitempty"` + OtherVersionVoters []string `mapstructure:"other_version_voters,omitempty"` + OtherVersionNonVoters []string `mapstructure:"other_version_non_voters,omitempty"` +} + +// RaftJoin wraps RaftJoinWithContext using context.Background. +func (c *Sys) RaftJoin(opts *RaftJoinRequest) (*RaftJoinResponse, error) { + return c.RaftJoinWithContext(context.Background(), opts) +} + +// RaftJoinWithContext adds the node from which this call is invoked from to the raft +// cluster represented by the leader address in the parameter. +func (c *Sys) RaftJoinWithContext(ctx context.Context, opts *RaftJoinRequest) (*RaftJoinResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, "/v1/sys/storage/raft/join") + + if err := r.SetJSONBody(opts); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RaftJoinResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +// RaftSnapshot wraps RaftSnapshotWithContext using context.Background. +func (c *Sys) RaftSnapshot(snapWriter io.Writer) error { + return c.RaftSnapshotWithContext(context.Background(), snapWriter) +} + +// RaftSnapshotWithContext invokes the API that takes the snapshot of the raft cluster and +// writes it to the supplied io.Writer. +func (c *Sys) RaftSnapshotWithContext(ctx context.Context, snapWriter io.Writer) error { + r := c.c.NewRequest(http.MethodGet, "/v1/sys/storage/raft/snapshot") + r.URL.RawQuery = r.Params.Encode() + + resp, err := c.c.httpRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + // Make sure that the last file in the archive, SHA256SUMS.sealed, is present + // and non-empty. This is to catch cases where the snapshot failed midstream, + // e.g. due to a problem with the seal that prevented encryption of that file. + var wg sync.WaitGroup + wg.Add(1) + var verified bool + + rPipe, wPipe := io.Pipe() + dup := io.TeeReader(resp.Body, wPipe) + go func() { + defer func() { + io.Copy(ioutil.Discard, rPipe) + rPipe.Close() + wg.Done() + }() + + uncompressed, err := gzip.NewReader(rPipe) + if err != nil { + return + } + + t := tar.NewReader(uncompressed) + var h *tar.Header + for { + h, err = t.Next() + if err != nil { + return + } + if h.Name != "SHA256SUMS.sealed" { + continue + } + var b []byte + b, err = ioutil.ReadAll(t) + if err != nil || len(b) == 0 { + return + } + verified = true + return + } + }() + + // Copy bytes from dup to snapWriter. This will have a side effect that + // everything read from dup will be written to wPipe. + _, err = io.Copy(snapWriter, dup) + wPipe.Close() + if err != nil { + rPipe.CloseWithError(err) + return err + } + wg.Wait() + + if !verified { + return ErrIncompleteSnapshot + } + return nil +} + +// RaftSnapshotRestore wraps RaftSnapshotRestoreWithContext using context.Background. +func (c *Sys) RaftSnapshotRestore(snapReader io.Reader, force bool) error { + return c.RaftSnapshotRestoreWithContext(context.Background(), snapReader, force) +} + +// RaftSnapshotRestoreWithContext reads the snapshot from the io.Reader and installs that +// snapshot, returning the cluster to the state defined by it. +func (c *Sys) RaftSnapshotRestoreWithContext(ctx context.Context, snapReader io.Reader, force bool) error { + path := "/v1/sys/storage/raft/snapshot" + if force { + path = "/v1/sys/storage/raft/snapshot-force" + } + + r := c.c.NewRequest(http.MethodPost, path) + r.Body = snapReader + + resp, err := c.c.httpRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +// RaftAutopilotState wraps RaftAutopilotStateWithContext using context.Background. +func (c *Sys) RaftAutopilotState() (*AutopilotState, error) { + return c.RaftAutopilotStateWithContext(context.Background()) +} + +// RaftAutopilotStateWithToken wraps RaftAutopilotStateWithContext using the given token. +func (c *Sys) RaftAutopilotStateWithDRToken(drToken string) (*AutopilotState, error) { + return c.RaftAutopilotStateWithContext(context.WithValue(context.Background(), "dr-token", drToken)) +} + +// RaftAutopilotStateWithContext returns the state of the raft cluster as seen by autopilot. +func (c *Sys) RaftAutopilotStateWithContext(ctx context.Context) (*AutopilotState, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + if ctx.Value("dr-token") != nil { + c.c.SetToken(ctx.Value("dr-token").(string)) + } + r := c.c.NewRequest(http.MethodGet, "/v1/sys/storage/raft/autopilot/state") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if resp != nil { + defer resp.Body.Close() + if resp.StatusCode == 404 { + return nil, nil + } + } + if err != nil { + return nil, err + } + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result AutopilotState + err = mapstructure.Decode(secret.Data, &result) + if err != nil { + return nil, err + } + + return &result, err +} + +// RaftAutopilotConfiguration wraps RaftAutopilotConfigurationWithContext using context.Background. +func (c *Sys) RaftAutopilotConfiguration() (*AutopilotConfig, error) { + return c.RaftAutopilotConfigurationWithContext(context.Background()) +} + +// RaftAutopilotConfigurationWithDRToken wraps RaftAutopilotConfigurationWithContext using the given token. +func (c *Sys) RaftAutopilotConfigurationWithDRToken(drToken string) (*AutopilotConfig, error) { + return c.RaftAutopilotConfigurationWithContext(context.WithValue(context.Background(), "dr-token", drToken)) +} + +// RaftAutopilotConfigurationWithContext fetches the autopilot config. +func (c *Sys) RaftAutopilotConfigurationWithContext(ctx context.Context) (*AutopilotConfig, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + if ctx.Value("dr-token") != nil { + c.c.SetToken(ctx.Value("dr-token").(string)) + } + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/storage/raft/autopilot/configuration") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if resp != nil { + defer resp.Body.Close() + if resp.StatusCode == 404 { + return nil, nil + } + } + if err != nil { + return nil, err + } + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil { + return nil, errors.New("data from server response is empty") + } + + var result AutopilotConfig + if err = mapstructure.Decode(secret.Data, &result); err != nil { + return nil, err + } + if result.LastContactThreshold, err = parseutil.ParseDurationSecond(secret.Data["last_contact_threshold"]); err != nil { + return nil, err + } + if result.DeadServerLastContactThreshold, err = parseutil.ParseDurationSecond(secret.Data["dead_server_last_contact_threshold"]); err != nil { + return nil, err + } + if result.ServerStabilizationTime, err = parseutil.ParseDurationSecond(secret.Data["server_stabilization_time"]); err != nil { + return nil, err + } + + return &result, err +} + +// PutRaftAutopilotConfiguration wraps PutRaftAutopilotConfigurationWithContext using context.Background. +func (c *Sys) PutRaftAutopilotConfiguration(opts *AutopilotConfig) error { + return c.PutRaftAutopilotConfigurationWithContext(context.Background(), opts) +} + +// PutRaftAutopilotConfigurationWithContext allows modifying the raft autopilot configuration +func (c *Sys) PutRaftAutopilotConfigurationWithContext(ctx context.Context, opts *AutopilotConfig) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, "/v1/sys/storage/raft/autopilot/configuration") + + if err := r.SetJSONBody(opts); err != nil { + return err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} diff --git a/api/sys_rekey.go b/api/sys_rekey.go new file mode 100644 index 0000000..5732017 --- /dev/null +++ b/api/sys_rekey.go @@ -0,0 +1,482 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "errors" + "net/http" + + "github.com/mitchellh/mapstructure" +) + +func (c *Sys) RekeyStatus() (*RekeyStatusResponse, error) { + return c.RekeyStatusWithContext(context.Background()) +} + +func (c *Sys) RekeyStatusWithContext(ctx context.Context) (*RekeyStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/rekey/init") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyRecoveryKeyStatus() (*RekeyStatusResponse, error) { + return c.RekeyRecoveryKeyStatusWithContext(context.Background()) +} + +func (c *Sys) RekeyRecoveryKeyStatusWithContext(ctx context.Context) (*RekeyStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/rekey-recovery-key/init") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyVerificationStatus() (*RekeyVerificationStatusResponse, error) { + return c.RekeyVerificationStatusWithContext(context.Background()) +} + +func (c *Sys) RekeyVerificationStatusWithContext(ctx context.Context) (*RekeyVerificationStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/rekey/verify") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyVerificationStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyRecoveryKeyVerificationStatus() (*RekeyVerificationStatusResponse, error) { + return c.RekeyRecoveryKeyVerificationStatusWithContext(context.Background()) +} + +func (c *Sys) RekeyRecoveryKeyVerificationStatusWithContext(ctx context.Context) (*RekeyVerificationStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/rekey-recovery-key/verify") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyVerificationStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyInit(config *RekeyInitRequest) (*RekeyStatusResponse, error) { + return c.RekeyInitWithContext(context.Background(), config) +} + +func (c *Sys) RekeyInitWithContext(ctx context.Context, config *RekeyInitRequest) (*RekeyStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/rekey/init") + if err := r.SetJSONBody(config); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyRecoveryKeyInit(config *RekeyInitRequest) (*RekeyStatusResponse, error) { + return c.RekeyRecoveryKeyInitWithContext(context.Background(), config) +} + +func (c *Sys) RekeyRecoveryKeyInitWithContext(ctx context.Context, config *RekeyInitRequest) (*RekeyStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/rekey-recovery-key/init") + if err := r.SetJSONBody(config); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyCancel() error { + return c.RekeyCancelWithContext(context.Background()) +} + +func (c *Sys) RekeyCancelWithContext(ctx context.Context) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, "/v1/sys/rekey/init") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RekeyRecoveryKeyCancel() error { + return c.RekeyRecoveryKeyCancelWithContext(context.Background()) +} + +func (c *Sys) RekeyRecoveryKeyCancelWithContext(ctx context.Context) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, "/v1/sys/rekey-recovery-key/init") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RekeyVerificationCancel() error { + return c.RekeyVerificationCancelWithContext(context.Background()) +} + +func (c *Sys) RekeyVerificationCancelWithContext(ctx context.Context) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, "/v1/sys/rekey/verify") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RekeyRecoveryKeyVerificationCancel() error { + return c.RekeyRecoveryKeyVerificationCancelWithContext(context.Background()) +} + +func (c *Sys) RekeyRecoveryKeyVerificationCancelWithContext(ctx context.Context) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, "/v1/sys/rekey-recovery-key/verify") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RekeyUpdate(shard, nonce string) (*RekeyUpdateResponse, error) { + return c.RekeyUpdateWithContext(context.Background(), shard, nonce) +} + +func (c *Sys) RekeyUpdateWithContext(ctx context.Context, shard, nonce string) (*RekeyUpdateResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + body := map[string]interface{}{ + "key": shard, + "nonce": nonce, + } + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/rekey/update") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyUpdateResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyRecoveryKeyUpdate(shard, nonce string) (*RekeyUpdateResponse, error) { + return c.RekeyRecoveryKeyUpdateWithContext(context.Background(), shard, nonce) +} + +func (c *Sys) RekeyRecoveryKeyUpdateWithContext(ctx context.Context, shard, nonce string) (*RekeyUpdateResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + body := map[string]interface{}{ + "key": shard, + "nonce": nonce, + } + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/rekey-recovery-key/update") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyUpdateResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyRetrieveBackup() (*RekeyRetrieveResponse, error) { + return c.RekeyRetrieveBackupWithContext(context.Background()) +} + +func (c *Sys) RekeyRetrieveBackupWithContext(ctx context.Context) (*RekeyRetrieveResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/rekey/backup") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result RekeyRetrieveResponse + err = mapstructure.Decode(secret.Data, &result) + if err != nil { + return nil, err + } + + return &result, err +} + +func (c *Sys) RekeyRetrieveRecoveryBackup() (*RekeyRetrieveResponse, error) { + return c.RekeyRetrieveRecoveryBackupWithContext(context.Background()) +} + +func (c *Sys) RekeyRetrieveRecoveryBackupWithContext(ctx context.Context) (*RekeyRetrieveResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/rekey/recovery-key-backup") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result RekeyRetrieveResponse + err = mapstructure.Decode(secret.Data, &result) + if err != nil { + return nil, err + } + + return &result, err +} + +func (c *Sys) RekeyDeleteBackup() error { + return c.RekeyDeleteBackupWithContext(context.Background()) +} + +func (c *Sys) RekeyDeleteBackupWithContext(ctx context.Context) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, "/v1/sys/rekey/backup") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + + return err +} + +func (c *Sys) RekeyDeleteRecoveryBackup() error { + return c.RekeyDeleteRecoveryBackupWithContext(context.Background()) +} + +func (c *Sys) RekeyDeleteRecoveryBackupWithContext(ctx context.Context) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodDelete, "/v1/sys/rekey/recovery-key-backup") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + + return err +} + +func (c *Sys) RekeyVerificationUpdate(shard, nonce string) (*RekeyVerificationUpdateResponse, error) { + return c.RekeyVerificationUpdateWithContext(context.Background(), shard, nonce) +} + +func (c *Sys) RekeyVerificationUpdateWithContext(ctx context.Context, shard, nonce string) (*RekeyVerificationUpdateResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + body := map[string]interface{}{ + "key": shard, + "nonce": nonce, + } + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/rekey/verify") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyVerificationUpdateResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyRecoveryKeyVerificationUpdate(shard, nonce string) (*RekeyVerificationUpdateResponse, error) { + return c.RekeyRecoveryKeyVerificationUpdateWithContext(context.Background(), shard, nonce) +} + +func (c *Sys) RekeyRecoveryKeyVerificationUpdateWithContext(ctx context.Context, shard, nonce string) (*RekeyVerificationUpdateResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + body := map[string]interface{}{ + "key": shard, + "nonce": nonce, + } + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/rekey-recovery-key/verify") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyVerificationUpdateResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +type RekeyInitRequest struct { + SecretShares int `json:"secret_shares"` + SecretThreshold int `json:"secret_threshold"` + StoredShares int `json:"stored_shares"` + PGPKeys []string `json:"pgp_keys"` + Backup bool + RequireVerification bool `json:"require_verification"` +} + +type RekeyStatusResponse struct { + Nonce string `json:"nonce"` + Started bool `json:"started"` + T int `json:"t"` + N int `json:"n"` + Progress int `json:"progress"` + Required int `json:"required"` + PGPFingerprints []string `json:"pgp_fingerprints"` + Backup bool `json:"backup"` + VerificationRequired bool `json:"verification_required"` + VerificationNonce string `json:"verification_nonce"` +} + +type RekeyUpdateResponse struct { + Nonce string `json:"nonce"` + Complete bool `json:"complete"` + Keys []string `json:"keys"` + KeysB64 []string `json:"keys_base64"` + PGPFingerprints []string `json:"pgp_fingerprints"` + Backup bool `json:"backup"` + VerificationRequired bool `json:"verification_required"` + VerificationNonce string `json:"verification_nonce,omitempty"` +} + +type RekeyRetrieveResponse struct { + Nonce string `json:"nonce" mapstructure:"nonce"` + Keys map[string][]string `json:"keys" mapstructure:"keys"` + KeysB64 map[string][]string `json:"keys_base64" mapstructure:"keys_base64"` +} + +type RekeyVerificationStatusResponse struct { + Nonce string `json:"nonce"` + Started bool `json:"started"` + T int `json:"t"` + N int `json:"n"` + Progress int `json:"progress"` +} + +type RekeyVerificationUpdateResponse struct { + Nonce string `json:"nonce"` + Complete bool `json:"complete"` +} diff --git a/api/sys_rotate.go b/api/sys_rotate.go new file mode 100644 index 0000000..295d989 --- /dev/null +++ b/api/sys_rotate.go @@ -0,0 +1,105 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "encoding/json" + "errors" + "net/http" + "time" +) + +func (c *Sys) Rotate() error { + return c.RotateWithContext(context.Background()) +} + +func (c *Sys) RotateWithContext(ctx context.Context) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPost, "/v1/sys/rotate") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) KeyStatus() (*KeyStatus, error) { + return c.KeyStatusWithContext(context.Background()) +} + +func (c *Sys) KeyStatusWithContext(ctx context.Context) (*KeyStatus, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodGet, "/v1/sys/key-status") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result KeyStatus + + termRaw, ok := secret.Data["term"] + if !ok { + return nil, errors.New("term not found in response") + } + term, ok := termRaw.(json.Number) + if !ok { + return nil, errors.New("could not convert term to a number") + } + term64, err := term.Int64() + if err != nil { + return nil, err + } + result.Term = int(term64) + + installTimeRaw, ok := secret.Data["install_time"] + if !ok { + return nil, errors.New("install_time not found in response") + } + installTimeStr, ok := installTimeRaw.(string) + if !ok { + return nil, errors.New("could not convert install_time to a string") + } + installTime, err := time.Parse(time.RFC3339Nano, installTimeStr) + if err != nil { + return nil, err + } + result.InstallTime = installTime + + encryptionsRaw, ok := secret.Data["encryptions"] + if ok { + encryptions, ok := encryptionsRaw.(json.Number) + if !ok { + return nil, errors.New("could not convert encryptions to a number") + } + encryptions64, err := encryptions.Int64() + if err != nil { + return nil, err + } + result.Encryptions = int(encryptions64) + } + + return &result, err +} + +type KeyStatus struct { + Term int `json:"term"` + InstallTime time.Time `json:"install_time"` + Encryptions int `json:"encryptions"` +} diff --git a/api/sys_seal.go b/api/sys_seal.go new file mode 100644 index 0000000..7a9c562 --- /dev/null +++ b/api/sys_seal.go @@ -0,0 +1,122 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "net/http" +) + +func (c *Sys) SealStatus() (*SealStatusResponse, error) { + return c.SealStatusWithContext(context.Background()) +} + +func (c *Sys) SealStatusWithContext(ctx context.Context) (*SealStatusResponse, error) { + r := c.c.NewRequest(http.MethodGet, "/v1/sys/seal-status") + return sealStatusRequestWithContext(ctx, c, r) +} + +func (c *Sys) Seal() error { + return c.SealWithContext(context.Background()) +} + +func (c *Sys) SealWithContext(ctx context.Context) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/seal") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +func (c *Sys) ResetUnsealProcess() (*SealStatusResponse, error) { + return c.ResetUnsealProcessWithContext(context.Background()) +} + +func (c *Sys) ResetUnsealProcessWithContext(ctx context.Context) (*SealStatusResponse, error) { + body := map[string]interface{}{"reset": true} + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/unseal") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + return sealStatusRequestWithContext(ctx, c, r) +} + +func (c *Sys) Unseal(shard string) (*SealStatusResponse, error) { + return c.UnsealWithContext(context.Background(), shard) +} + +func (c *Sys) UnsealWithContext(ctx context.Context, shard string) (*SealStatusResponse, error) { + body := map[string]interface{}{"key": shard} + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/unseal") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + return sealStatusRequestWithContext(ctx, c, r) +} + +func (c *Sys) UnsealWithOptions(opts *UnsealOpts) (*SealStatusResponse, error) { + return c.UnsealWithOptionsWithContext(context.Background(), opts) +} + +func (c *Sys) UnsealWithOptionsWithContext(ctx context.Context, opts *UnsealOpts) (*SealStatusResponse, error) { + r := c.c.NewRequest(http.MethodPut, "/v1/sys/unseal") + + if err := r.SetJSONBody(opts); err != nil { + return nil, err + } + + return sealStatusRequestWithContext(ctx, c, r) +} + +func sealStatusRequestWithContext(ctx context.Context, c *Sys, r *Request) (*SealStatusResponse, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + resp, err := c.c.rawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result SealStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +type SealStatusResponse struct { + Type string `json:"type"` + Initialized bool `json:"initialized"` + Sealed bool `json:"sealed"` + T int `json:"t"` + N int `json:"n"` + Progress int `json:"progress"` + Nonce string `json:"nonce"` + Version string `json:"version"` + BuildDate string `json:"build_date"` + Migration bool `json:"migration"` + ClusterName string `json:"cluster_name,omitempty"` + ClusterID string `json:"cluster_id,omitempty"` + RecoverySeal bool `json:"recovery_seal"` + StorageType string `json:"storage_type,omitempty"` + HCPLinkStatus string `json:"hcp_link_status,omitempty"` + HCPLinkResourceID string `json:"hcp_link_resource_ID,omitempty"` + Warnings []string `json:"warnings,omitempty"` +} + +type UnsealOpts struct { + Key string `json:"key"` + Reset bool `json:"reset"` + Migrate bool `json:"migrate"` +} diff --git a/api/sys_stepdown.go b/api/sys_stepdown.go new file mode 100644 index 0000000..c55ed1e --- /dev/null +++ b/api/sys_stepdown.go @@ -0,0 +1,26 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package api + +import ( + "context" + "net/http" +) + +func (c *Sys) StepDown() error { + return c.StepDownWithContext(context.Background()) +} + +func (c *Sys) StepDownWithContext(ctx context.Context) error { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + r := c.c.NewRequest(http.MethodPut, "/v1/sys/step-down") + + resp, err := c.c.rawRequestWithContext(ctx, r) + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + return err +} diff --git a/api/test-fixtures/agent_config.hcl b/api/test-fixtures/agent_config.hcl new file mode 100644 index 0000000..38d8026 --- /dev/null +++ b/api/test-fixtures/agent_config.hcl @@ -0,0 +1,5 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +vault_addr="http://127.0.0.1:8200" +ssh_mount_point="ssh" diff --git a/api/test-fixtures/keys/cert.pem b/api/test-fixtures/keys/cert.pem new file mode 100644 index 0000000..67ef67d --- /dev/null +++ b/api/test-fixtures/keys/cert.pem @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDtTCCAp2gAwIBAgIUf+jhKTFBnqSs34II0WS1L4QsbbAwDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzQxWhcNMjUw +MTA1MTAyODExWjAbMRkwFwYDVQQDExBjZXJ0LmV4YW1wbGUuY29tMIIBIjANBgkq +hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsZx0Svr82YJpFpIy4fJNW5fKA6B8mhxS +TRAVnygAftetT8puHflY0ss7Y6X2OXjsU0PRn+1PswtivhKi+eLtgWkUF9cFYFGn +SgMld6ZWRhNheZhA6ZfQmeM/BF2pa5HK2SDF36ljgjL9T+nWrru2Uv0BCoHzLAmi +YYMiIWplidMmMO5NTRG3k+3AN0TkfakB6JVzjLGhTcXdOcVEMXkeQVqJMAuGouU5 +donyqtnaHuIJGuUdy54YDnX86txhOQhAv6r7dHXzZxS4pmLvw8UI1rsSf/GLcUVG +B+5+AAGF5iuHC3N2DTl4xz3FcN4Cb4w9pbaQ7+mCzz+anqiJfyr2nwIDAQABo4H1 +MIHyMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAdBgNVHQ4EFgQUm++e +HpyM3p708bgZJuRYEdX1o+UwHwYDVR0jBBgwFoAUncSzT/6HMexyuiU9/7EgHu+o +k5swOwYIKwYBBQUHAQEELzAtMCsGCCsGAQUFBzAChh9odHRwOi8vMTI3LjAuMC4x +OjgyMDAvdjEvcGtpL2NhMCEGA1UdEQQaMBiCEGNlcnQuZXhhbXBsZS5jb22HBH8A +AAEwMQYDVR0fBCowKDAmoCSgIoYgaHR0cDovLzEyNy4wLjAuMTo4MjAwL3YxL3Br +aS9jcmwwDQYJKoZIhvcNAQELBQADggEBABsuvmPSNjjKTVN6itWzdQy+SgMIrwfs +X1Yb9Lefkkwmp9ovKFNQxa4DucuCuzXcQrbKwWTfHGgR8ct4rf30xCRoA7dbQWq4 +aYqNKFWrRaBRAaaYZ/O1ApRTOrXqRx9Eqr0H1BXLsoAq+mWassL8sf6siae+CpwA +KqBko5G0dNXq5T4i2LQbmoQSVetIrCJEeMrU+idkuqfV2h1BQKgSEhFDABjFdTCN +QDAHsEHsi2M4/jRW9fqEuhHSDfl2n7tkFUI8wTHUUCl7gXwweJ4qtaSXIwKXYzNj +xqKHA8Purc1Yfybz4iE1JCROi9fInKlzr5xABq8nb9Qc/J9DIQM+Xmk= +-----END CERTIFICATE----- \ No newline at end of file diff --git a/api/test-fixtures/keys/key.pem b/api/test-fixtures/keys/key.pem new file mode 100644 index 0000000..add9820 --- /dev/null +++ b/api/test-fixtures/keys/key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAsZx0Svr82YJpFpIy4fJNW5fKA6B8mhxSTRAVnygAftetT8pu +HflY0ss7Y6X2OXjsU0PRn+1PswtivhKi+eLtgWkUF9cFYFGnSgMld6ZWRhNheZhA +6ZfQmeM/BF2pa5HK2SDF36ljgjL9T+nWrru2Uv0BCoHzLAmiYYMiIWplidMmMO5N +TRG3k+3AN0TkfakB6JVzjLGhTcXdOcVEMXkeQVqJMAuGouU5donyqtnaHuIJGuUd +y54YDnX86txhOQhAv6r7dHXzZxS4pmLvw8UI1rsSf/GLcUVGB+5+AAGF5iuHC3N2 +DTl4xz3FcN4Cb4w9pbaQ7+mCzz+anqiJfyr2nwIDAQABAoIBAHR7fFV0eAGaopsX +9OD0TUGlsephBXb43g0GYHfJ/1Ew18w9oaxszJEqkl+PB4W3xZ3yG3e8ZomxDOhF +RreF2WgG5xOfhDogMwu6NodbArfgnAvoC6JnW3qha8HMP4F500RFVyCRcd6A3Frd +rFtaZn/UyCsBAN8/zkwPeYHayo7xX6d9kzgRl9HluEX5PXI5+3uiBDUiM085gkLI +5Cmadh9fMdjfhDXI4x2JYmILpp/9Nlc/krB15s5n1MPNtn3yL0TI0tWp0WlwDCV7 +oUm1SfIM0F1fXGFyFDcqwoIr6JCQgXk6XtTg31YhH1xgUIclUVdtHqmAwAbLdIhQ +GAiHn2kCgYEAwD4pZ8HfpiOG/EHNoWsMATc/5yC7O8F9WbvcHZQIymLY4v/7HKZb +VyOR6UQ5/O2cztSGIuKSF6+OK1C34lOyCuTSOTFrjlgEYtLIXjdGLfFdtOO8GRQR +akVXdwuzNAjTBaH5eXbG+NKcjmCvZL48dQVlfDTVulzFGbcsVTHIMQUCgYEA7IQI +FVsKnY3KqpyGqXq92LMcsT3XgW6X1BIIV+YhJ5AFUFkFrjrbXs94/8XyLfi0xBQy +efK+8g5sMs7koF8LyZEcAXWZJQduaKB71hoLlRaU4VQkL/dl2B6VFmAII/CsRCYh +r9RmDN2PF/mp98Ih9dpC1VqcCDRGoTYsd7jLalMCgYAMgH5k1wDaZxkSMp1S0AlZ +0uP+/evvOOgT+9mWutfPgZolOQx1koQCKLgGeX9j6Xf3I28NubpSfAI84uTyfQrp +FnRtb79U5Hh0jMynA+U2e6niZ6UF5H41cQj9Hu+qhKBkj2IP+h96cwfnYnZFkPGR +kqZE65KyqfHPeFATwkcImQKBgCdrfhlpGiTWXCABhKQ8s+WpPLAB2ahV8XJEKyXT +UlVQuMIChGLcpnFv7P/cUxf8asx/fUY8Aj0/0CLLvulHziQjTmKj4gl86pb/oIQ3 +xRRtNhU0O+/OsSfLORgIm3K6C0w0esregL/GMbJSR1TnA1gBr7/1oSnw5JC8Ab9W +injHAoGAJT1MGAiQrhlt9GCGe6Ajw4omdbY0wS9NXefnFhf7EwL0es52ezZ28zpU +2LXqSFbtann5CHgpSLxiMYPDIf+er4xgg9Bz34tz1if1rDfP2Qrxdrpr4jDnrGT3 +gYC2qCpvVD9RRUMKFfnJTfl5gMQdBW/LINkHtJ82snAeLl3gjQ4= +-----END RSA PRIVATE KEY----- diff --git a/api/test-fixtures/keys/pkioutput b/api/test-fixtures/keys/pkioutput new file mode 100644 index 0000000..526ff03 --- /dev/null +++ b/api/test-fixtures/keys/pkioutput @@ -0,0 +1,74 @@ +Key Value +lease_id pki/issue/example-dot-com/d8214077-9976-8c68-9c07-6610da30aea4 +lease_duration 279359999 +lease_renewable false +certificate -----BEGIN CERTIFICATE----- +MIIDtTCCAp2gAwIBAgIUf+jhKTFBnqSs34II0WS1L4QsbbAwDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzQxWhcNMjUw +MTA1MTAyODExWjAbMRkwFwYDVQQDExBjZXJ0LmV4YW1wbGUuY29tMIIBIjANBgkq +hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsZx0Svr82YJpFpIy4fJNW5fKA6B8mhxS +TRAVnygAftetT8puHflY0ss7Y6X2OXjsU0PRn+1PswtivhKi+eLtgWkUF9cFYFGn +SgMld6ZWRhNheZhA6ZfQmeM/BF2pa5HK2SDF36ljgjL9T+nWrru2Uv0BCoHzLAmi +YYMiIWplidMmMO5NTRG3k+3AN0TkfakB6JVzjLGhTcXdOcVEMXkeQVqJMAuGouU5 +donyqtnaHuIJGuUdy54YDnX86txhOQhAv6r7dHXzZxS4pmLvw8UI1rsSf/GLcUVG +B+5+AAGF5iuHC3N2DTl4xz3FcN4Cb4w9pbaQ7+mCzz+anqiJfyr2nwIDAQABo4H1 +MIHyMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAdBgNVHQ4EFgQUm++e +HpyM3p708bgZJuRYEdX1o+UwHwYDVR0jBBgwFoAUncSzT/6HMexyuiU9/7EgHu+o +k5swOwYIKwYBBQUHAQEELzAtMCsGCCsGAQUFBzAChh9odHRwOi8vMTI3LjAuMC4x +OjgyMDAvdjEvcGtpL2NhMCEGA1UdEQQaMBiCEGNlcnQuZXhhbXBsZS5jb22HBH8A +AAEwMQYDVR0fBCowKDAmoCSgIoYgaHR0cDovLzEyNy4wLjAuMTo4MjAwL3YxL3Br +aS9jcmwwDQYJKoZIhvcNAQELBQADggEBABsuvmPSNjjKTVN6itWzdQy+SgMIrwfs +X1Yb9Lefkkwmp9ovKFNQxa4DucuCuzXcQrbKwWTfHGgR8ct4rf30xCRoA7dbQWq4 +aYqNKFWrRaBRAaaYZ/O1ApRTOrXqRx9Eqr0H1BXLsoAq+mWassL8sf6siae+CpwA +KqBko5G0dNXq5T4i2LQbmoQSVetIrCJEeMrU+idkuqfV2h1BQKgSEhFDABjFdTCN +QDAHsEHsi2M4/jRW9fqEuhHSDfl2n7tkFUI8wTHUUCl7gXwweJ4qtaSXIwKXYzNj +xqKHA8Purc1Yfybz4iE1JCROi9fInKlzr5xABq8nb9Qc/J9DIQM+Xmk= +-----END CERTIFICATE----- +issuing_ca -----BEGIN CERTIFICATE----- +MIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw +MjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7 +Q7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0 +z2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x +AHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb +6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH +SWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G +A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx +7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc +BgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA +wHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2 +U946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa +cNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N +ScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ +t2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk +zehNe5dFTjFpylg1o6b8Ow== +-----END CERTIFICATE----- +private_key -----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAsZx0Svr82YJpFpIy4fJNW5fKA6B8mhxSTRAVnygAftetT8pu +HflY0ss7Y6X2OXjsU0PRn+1PswtivhKi+eLtgWkUF9cFYFGnSgMld6ZWRhNheZhA +6ZfQmeM/BF2pa5HK2SDF36ljgjL9T+nWrru2Uv0BCoHzLAmiYYMiIWplidMmMO5N +TRG3k+3AN0TkfakB6JVzjLGhTcXdOcVEMXkeQVqJMAuGouU5donyqtnaHuIJGuUd +y54YDnX86txhOQhAv6r7dHXzZxS4pmLvw8UI1rsSf/GLcUVGB+5+AAGF5iuHC3N2 +DTl4xz3FcN4Cb4w9pbaQ7+mCzz+anqiJfyr2nwIDAQABAoIBAHR7fFV0eAGaopsX +9OD0TUGlsephBXb43g0GYHfJ/1Ew18w9oaxszJEqkl+PB4W3xZ3yG3e8ZomxDOhF +RreF2WgG5xOfhDogMwu6NodbArfgnAvoC6JnW3qha8HMP4F500RFVyCRcd6A3Frd +rFtaZn/UyCsBAN8/zkwPeYHayo7xX6d9kzgRl9HluEX5PXI5+3uiBDUiM085gkLI +5Cmadh9fMdjfhDXI4x2JYmILpp/9Nlc/krB15s5n1MPNtn3yL0TI0tWp0WlwDCV7 +oUm1SfIM0F1fXGFyFDcqwoIr6JCQgXk6XtTg31YhH1xgUIclUVdtHqmAwAbLdIhQ +GAiHn2kCgYEAwD4pZ8HfpiOG/EHNoWsMATc/5yC7O8F9WbvcHZQIymLY4v/7HKZb +VyOR6UQ5/O2cztSGIuKSF6+OK1C34lOyCuTSOTFrjlgEYtLIXjdGLfFdtOO8GRQR +akVXdwuzNAjTBaH5eXbG+NKcjmCvZL48dQVlfDTVulzFGbcsVTHIMQUCgYEA7IQI +FVsKnY3KqpyGqXq92LMcsT3XgW6X1BIIV+YhJ5AFUFkFrjrbXs94/8XyLfi0xBQy +efK+8g5sMs7koF8LyZEcAXWZJQduaKB71hoLlRaU4VQkL/dl2B6VFmAII/CsRCYh +r9RmDN2PF/mp98Ih9dpC1VqcCDRGoTYsd7jLalMCgYAMgH5k1wDaZxkSMp1S0AlZ +0uP+/evvOOgT+9mWutfPgZolOQx1koQCKLgGeX9j6Xf3I28NubpSfAI84uTyfQrp +FnRtb79U5Hh0jMynA+U2e6niZ6UF5H41cQj9Hu+qhKBkj2IP+h96cwfnYnZFkPGR +kqZE65KyqfHPeFATwkcImQKBgCdrfhlpGiTWXCABhKQ8s+WpPLAB2ahV8XJEKyXT +UlVQuMIChGLcpnFv7P/cUxf8asx/fUY8Aj0/0CLLvulHziQjTmKj4gl86pb/oIQ3 +xRRtNhU0O+/OsSfLORgIm3K6C0w0esregL/GMbJSR1TnA1gBr7/1oSnw5JC8Ab9W +injHAoGAJT1MGAiQrhlt9GCGe6Ajw4omdbY0wS9NXefnFhf7EwL0es52ezZ28zpU +2LXqSFbtann5CHgpSLxiMYPDIf+er4xgg9Bz34tz1if1rDfP2Qrxdrpr4jDnrGT3 +gYC2qCpvVD9RRUMKFfnJTfl5gMQdBW/LINkHtJ82snAeLl3gjQ4= +-----END RSA PRIVATE KEY----- +private_key_type rsa diff --git a/api/test-fixtures/root/pkioutput b/api/test-fixtures/root/pkioutput new file mode 100644 index 0000000..312ae18 --- /dev/null +++ b/api/test-fixtures/root/pkioutput @@ -0,0 +1,74 @@ +Key Value +lease_id pki/root/generate/exported/7bf99d76-dd3e-2c5b-04ce-5253062ad586 +lease_duration 315359999 +lease_renewable false +certificate -----BEGIN CERTIFICATE----- +MIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw +MjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7 +Q7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0 +z2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x +AHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb +6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH +SWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G +A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx +7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc +BgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA +wHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2 +U946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa +cNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N +ScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ +t2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk +zehNe5dFTjFpylg1o6b8Ow== +-----END CERTIFICATE----- +expiration 1.772072879e+09 +issuing_ca -----BEGIN CERTIFICATE----- +MIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw +MjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7 +Q7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0 +z2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x +AHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb +6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH +SWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G +A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx +7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc +BgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA +wHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2 +U946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa +cNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N +ScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ +t2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk +zehNe5dFTjFpylg1o6b8Ow== +-----END CERTIFICATE----- +private_key -----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEA7FMy+FO4hFzZiHFmmY/B6A/zpyCep9PWZfGLUDtDuprHbg2p +t/TQjegMxC0AmWPZEHeG9FIJvT+WQXuLUG5v5MbG4zs21mqnYXwWAbTPaZ35bpp3 +BbbquXFtAqOZG8yfrob1g9OSgmY+bG3ZNxxv35qmbBbuWyUvmPwfjzEAclMxv48w +/2Rs4dXkBuvc9PiNdQ9Sv+ZYG8GIqIcbRd38cSaXI4Q94BOHEr4jm1vqb54H7twv +0Gy9CnLXfn7ZtEDFSmG8WLk2GvIna+UC+gnxSqKCw0rcTby63rQUCgdJZF8VQUVi +18+BMNLXI4pT/P9cxTaCJC/YeuV5a3SaniOoFQIDAQABAoIBAQCoGZJC84JnnIgb +ttZNWuWKBXbCJcDVDikOQJ9hBZbqsFg1X0CfGmQS3MHf9Ubc1Ro8zVjQh15oIEfn +8lIpdzTeXcpxLdiW8ix3ekVJF20F6pnXY8ZP6UnTeOwamXY6QPZAtb0D9UXcvY+f +nw+IVRD6082XS0Rmzu+peYWVXDy+FDN+HJRANBcdJZz8gOmNBIe0qDWx1b85d/s8 +2Kk1Wwdss1IwAGeSddTSwzBNaaHdItZaMZOqPW1gRyBfVSkcUQIE6zn2RKw2b70t +grkIvyRcTdfmiKbqkkJ+eR+ITOUt0cBZSH4cDjlQA+r7hulvoBpQBRj068Toxkcc +bTagHaPBAoGBAPWPGVkHqhTbJ/DjmqDIStxby2M1fhhHt4xUGHinhUYjQjGOtDQ9 +0mfaB7HObudRiSLydRAVGAHGyNJdQcTeFxeQbovwGiYKfZSA1IGpea7dTxPpGEdN +ksA0pzSp9MfKzX/MdLuAkEtO58aAg5YzsgX9hDNxo4MhH/gremZhEGZlAoGBAPZf +lqdYvAL0fjHGJ1FUEalhzGCGE9PH2iOqsxqLCXK7bDbzYSjvuiHkhYJHAOgVdiW1 +lB34UHHYAqZ1VVoFqJ05gax6DE2+r7K5VV3FUCaC0Zm3pavxchU9R/TKP82xRrBj +AFWwdgDTxUyvQEmgPR9sqorftO71Iz2tiwyTpIfxAoGBAIhEMLzHFAse0rtKkrRG +ccR27BbRyHeQ1Lp6sFnEHKEfT8xQdI/I/snCpCJ3e/PBu2g5Q9z416mktiyGs8ib +thTNgYsGYnxZtfaCx2pssanoBcn2wBJRae5fSapf5gY49HDG9MBYR7qCvvvYtSzU +4yWP2ZzyotpRt3vwJKxLkN5BAoGAORHpZvhiDNkvxj3da7Rqpu7VleJZA2y+9hYb +iOF+HcqWhaAY+I+XcTRrTMM/zYLzLEcEeXDEyao86uwxCjpXVZw1kotvAC9UqbTO +tnr3VwRkoxPsV4kFYTAh0+1pnC8dbcxxDmhi3Uww3tOVs7hfkEDuvF6XnebA9A+Y +LyCgMzECgYEA6cCU8QODOivIKWFRXucvWckgE6MYDBaAwe6qcLsd1Q/gpE2e3yQc +4RB3bcyiPROLzMLlXFxf1vSNJQdIaVfrRv+zJeGIiivLPU8+Eq4Lrb+tl1LepcOX +OzQeADTSCn5VidOfjDkIst9UXjMlrFfV9/oJEw5Eiqa6lkNPCGDhfA8= +-----END RSA PRIVATE KEY----- +private_key_type rsa +serial_number 6f:98:9d:f8:67:1a:31:e3:27:60:1b:f7:32:f7:53:19:68:a0:c8:9d diff --git a/api/test-fixtures/root/root.crl b/api/test-fixtures/root/root.crl new file mode 100644 index 0000000..a80c9e4 --- /dev/null +++ b/api/test-fixtures/root/root.crl @@ -0,0 +1,12 @@ +-----BEGIN X509 CRL----- +MIIBrjCBlzANBgkqhkiG9w0BAQsFADAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbRcN +MTYwMjI5MDIyOTE3WhcNMjUwMTA1MTAyOTE3WjArMCkCFG+YnfhnGjHjJ2Ab9zL3 +UxlooMidFxExNjAyMjgyMTI5MTctMDUwMKAjMCEwHwYDVR0jBBgwFoAUncSzT/6H +MexyuiU9/7EgHu+ok5swDQYJKoZIhvcNAQELBQADggEBAG9YDXpNe4LJroKZmVCn +HqMhW8eyzyaPak2nPPGCVUnc6vt8rlBYQU+xlBizD6xatZQDMPgrT8sBl9W3ysXk +RUlliHsT/SHddMz5dAZsBPRMJ7pYWLTx8jI4w2WRfbSyI4bY/6qTRNkEBUv+Fk8J +xvwB89+EM0ENcVMhv9ghsUA8h7kOg673HKwRstLDAzxS/uLmEzFjj8SV2m5DbV2Y +UUCKRSV20/kxJMIC9x2KikZhwOSyv1UE1otD+RQvbfAoZPUDmvp2FR/E0NGjBBOg +1TtCPRrl63cjqU3s8KQ4uah9Vj+Cwcu9n/yIKKtNQq4NKHvagv8GlUsoJ4BdAxCw +IA0= +-----END X509 CRL----- diff --git a/api/test-fixtures/root/rootcacert.pem b/api/test-fixtures/root/rootcacert.pem new file mode 100644 index 0000000..dcb307a --- /dev/null +++ b/api/test-fixtures/root/rootcacert.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw +MjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7 +Q7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0 +z2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x +AHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb +6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH +SWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G +A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx +7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc +BgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA +wHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2 +U946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa +cNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N +ScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ +t2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk +zehNe5dFTjFpylg1o6b8Ow== +-----END CERTIFICATE----- diff --git a/api/test-fixtures/root/rootcakey.pem b/api/test-fixtures/root/rootcakey.pem new file mode 100644 index 0000000..e950da5 --- /dev/null +++ b/api/test-fixtures/root/rootcakey.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEA7FMy+FO4hFzZiHFmmY/B6A/zpyCep9PWZfGLUDtDuprHbg2p +t/TQjegMxC0AmWPZEHeG9FIJvT+WQXuLUG5v5MbG4zs21mqnYXwWAbTPaZ35bpp3 +BbbquXFtAqOZG8yfrob1g9OSgmY+bG3ZNxxv35qmbBbuWyUvmPwfjzEAclMxv48w +/2Rs4dXkBuvc9PiNdQ9Sv+ZYG8GIqIcbRd38cSaXI4Q94BOHEr4jm1vqb54H7twv +0Gy9CnLXfn7ZtEDFSmG8WLk2GvIna+UC+gnxSqKCw0rcTby63rQUCgdJZF8VQUVi +18+BMNLXI4pT/P9cxTaCJC/YeuV5a3SaniOoFQIDAQABAoIBAQCoGZJC84JnnIgb +ttZNWuWKBXbCJcDVDikOQJ9hBZbqsFg1X0CfGmQS3MHf9Ubc1Ro8zVjQh15oIEfn +8lIpdzTeXcpxLdiW8ix3ekVJF20F6pnXY8ZP6UnTeOwamXY6QPZAtb0D9UXcvY+f +nw+IVRD6082XS0Rmzu+peYWVXDy+FDN+HJRANBcdJZz8gOmNBIe0qDWx1b85d/s8 +2Kk1Wwdss1IwAGeSddTSwzBNaaHdItZaMZOqPW1gRyBfVSkcUQIE6zn2RKw2b70t +grkIvyRcTdfmiKbqkkJ+eR+ITOUt0cBZSH4cDjlQA+r7hulvoBpQBRj068Toxkcc +bTagHaPBAoGBAPWPGVkHqhTbJ/DjmqDIStxby2M1fhhHt4xUGHinhUYjQjGOtDQ9 +0mfaB7HObudRiSLydRAVGAHGyNJdQcTeFxeQbovwGiYKfZSA1IGpea7dTxPpGEdN +ksA0pzSp9MfKzX/MdLuAkEtO58aAg5YzsgX9hDNxo4MhH/gremZhEGZlAoGBAPZf +lqdYvAL0fjHGJ1FUEalhzGCGE9PH2iOqsxqLCXK7bDbzYSjvuiHkhYJHAOgVdiW1 +lB34UHHYAqZ1VVoFqJ05gax6DE2+r7K5VV3FUCaC0Zm3pavxchU9R/TKP82xRrBj +AFWwdgDTxUyvQEmgPR9sqorftO71Iz2tiwyTpIfxAoGBAIhEMLzHFAse0rtKkrRG +ccR27BbRyHeQ1Lp6sFnEHKEfT8xQdI/I/snCpCJ3e/PBu2g5Q9z416mktiyGs8ib +thTNgYsGYnxZtfaCx2pssanoBcn2wBJRae5fSapf5gY49HDG9MBYR7qCvvvYtSzU +4yWP2ZzyotpRt3vwJKxLkN5BAoGAORHpZvhiDNkvxj3da7Rqpu7VleJZA2y+9hYb +iOF+HcqWhaAY+I+XcTRrTMM/zYLzLEcEeXDEyao86uwxCjpXVZw1kotvAC9UqbTO +tnr3VwRkoxPsV4kFYTAh0+1pnC8dbcxxDmhi3Uww3tOVs7hfkEDuvF6XnebA9A+Y +LyCgMzECgYEA6cCU8QODOivIKWFRXucvWckgE6MYDBaAwe6qcLsd1Q/gpE2e3yQc +4RB3bcyiPROLzMLlXFxf1vSNJQdIaVfrRv+zJeGIiivLPU8+Eq4Lrb+tl1LepcOX +OzQeADTSCn5VidOfjDkIst9UXjMlrFfV9/oJEw5Eiqa6lkNPCGDhfA8= +-----END RSA PRIVATE KEY----- diff --git a/api/test-fixtures/vault.crt b/api/test-fixtures/vault.crt new file mode 100644 index 0000000..3e34cf1 --- /dev/null +++ b/api/test-fixtures/vault.crt @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIIEEjCCAvqgAwIBAgIJAM7PFmA6Y+KeMA0GCSqGSIb3DQEBCwUAMIGWMQswCQYD +VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFDASBgNVBAcMC1N0b255IEJyb29r +MRIwEAYDVQQKDAlIYXNoaUNvcnAxDjAMBgNVBAsMBVZhdWx0MRUwEwYDVQQDDAxW +aXNoYWwgTmF5YWsxIzAhBgkqhkiG9w0BCQEWFHZpc2hhbEBoYXNoaWNvcnAuY29t +MB4XDTE1MDgwNzE5MTk1OFoXDTE1MDkwNjE5MTk1OFowgZYxCzAJBgNVBAYTAlVT +MREwDwYDVQQIDAhOZXcgWW9yazEUMBIGA1UEBwwLU3RvbnkgQnJvb2sxEjAQBgNV +BAoMCUhhc2hpQ29ycDEOMAwGA1UECwwFVmF1bHQxFTATBgNVBAMMDFZpc2hhbCBO +YXlhazEjMCEGCSqGSIb3DQEJARYUdmlzaGFsQGhhc2hpY29ycC5jb20wggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCcGlPKIrsq5sDJAUB7mtLjnjbcfR0b +dX1sDHUaTdT+2YBq0JvtoLZOmKw1iVwsMBhaLeXwnKP/O/n67sE8zvZPsuU3REw1 +NTjPof8IbepkENWNxR68KoSB2Vn5r4KiO3ux+KbkXssrZB62+k9khj0e7qIiwyZP +y5+RQPOL2ESmX5DznX+90vH4mzAEF654PbXFI/qOBZcWvWZJ37i+lHkeyCqcB+sm +5o5+zd1ua8jVlN0eLjyqa7FDvIuXPAFEX+r5DVQgIvS2++YaFRqTFCIxRXdDQXdw +1xDMCuG1w4PGVWf3TtlpHeGSIU07DdrCgXsvIRYfW++aZ2pvXwJYCr8hAgMBAAGj +YTBfMA8GA1UdEQQIMAaHBKwYFugwHQYDVR0OBBYEFPl+AkButpRfbblZE9Jb3xNj +AyhkMB8GA1UdIwQYMBaAFPl+AkButpRfbblZE9Jb3xNjAyhkMAwGA1UdEwQFMAMB +Af8wDQYJKoZIhvcNAQELBQADggEBADdIyyBJ3BVghW1shhxYsqQgg/gj2TagpO1P +ulGNzS0aCfB4tzMD4MGWm7cTlL6QW9W6r9OuWKCd1ADherIX9j0gtVWgIMtWGx+i +NbHrYin1xHr4rkB7/f6veCiJ3CtzBC9P/rEI6keyfOn1BfQBsOxfo3oGe/HDlSzD +lpu0GlQECjTXD7dd4jrD0T/wdRQI0BmxcYjn9cZLgoJHtLHZwaS16TGVmKs4iRAW +V9Aw5hLK4jJ59IID830/ly+Ndfc//QGgdE5PM44OrvVFO3Q8+zs7pwr1ql7uQWew +MSuDfbL7EcEGajD/o085sj2u4xVUfkVBW+3TQvs4/pHYOxlhPjI= +-----END CERTIFICATE----- diff --git a/audit/audit.go b/audit/audit.go new file mode 100644 index 0000000..35a3d38 --- /dev/null +++ b/audit/audit.go @@ -0,0 +1,62 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package audit + +import ( + "context" + + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/logical" +) + +// Backend interface must be implemented for an audit +// mechanism to be made available. Audit backends can be enabled to +// sink information to different backends such as logs, file, databases, +// or other external services. +type Backend interface { + // LogRequest is used to synchronously log a request. This is done after the + // request is authorized but before the request is executed. The arguments + // MUST not be modified in anyway. They should be deep copied if this is + // a possibility. + LogRequest(context.Context, *logical.LogInput) error + + // LogResponse is used to synchronously log a response. This is done after + // the request is processed but before the response is sent. The arguments + // MUST not be modified in anyway. They should be deep copied if this is + // a possibility. + LogResponse(context.Context, *logical.LogInput) error + + // LogTestMessage is used to check an audit backend before adding it + // permanently. It should attempt to synchronously log the given test + // message, WITHOUT using the normal Salt (which would require a storage + // operation on creation, which is currently disallowed.) + LogTestMessage(context.Context, *logical.LogInput, map[string]string) error + + // GetHash is used to return the given data with the backend's hash, + // so that a caller can determine if a value in the audit log matches + // an expected plaintext value + GetHash(context.Context, string) (string, error) + + // Reload is called on SIGHUP for supporting backends. + Reload(context.Context) error + + // Invalidate is called for path invalidation + Invalidate(context.Context) +} + +// BackendConfig contains configuration parameters used in the factory func to +// instantiate audit backends +type BackendConfig struct { + // The view to store the salt + SaltView logical.Storage + + // The salt config that should be used for any secret obfuscation + SaltConfig *salt.Config + + // Config is the opaque user configuration provided when mounting + Config map[string]string +} + +// Factory is the factory function to create an audit backend. +type Factory func(context.Context, *BackendConfig) (Backend, error) diff --git a/audit/format.go b/audit/format.go new file mode 100644 index 0000000..d595f2f --- /dev/null +++ b/audit/format.go @@ -0,0 +1,590 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package audit + +import ( + "context" + "crypto/tls" + "fmt" + "io" + "strings" + "time" + + "github.com/go-jose/go-jose/v3/jwt" + + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/logical" +) + +type AuditFormatWriter interface { + // WriteRequest writes the request entry to the writer or returns an error. + WriteRequest(io.Writer, *AuditRequestEntry) error + // WriteResponse writes the response entry to the writer or returns an error. + WriteResponse(io.Writer, *AuditResponseEntry) error + // Salt returns a non-nil salt or an error. + Salt(context.Context) (*salt.Salt, error) +} + +// AuditFormatter implements the Formatter interface, and allows the underlying +// marshaller to be swapped out +type AuditFormatter struct { + AuditFormatWriter +} + +var _ Formatter = (*AuditFormatter)(nil) + +func (f *AuditFormatter) FormatRequest(ctx context.Context, w io.Writer, config FormatterConfig, in *logical.LogInput) error { + if in == nil || in.Request == nil { + return fmt.Errorf("request to request-audit a nil request") + } + + if w == nil { + return fmt.Errorf("writer for audit request is nil") + } + + if f.AuditFormatWriter == nil { + return fmt.Errorf("no format writer specified") + } + + salt, err := f.Salt(ctx) + if err != nil { + return fmt.Errorf("error fetching salt: %w", err) + } + + // Set these to the input values at first + auth := in.Auth + req := in.Request + var connState *tls.ConnectionState + if auth == nil { + auth = new(logical.Auth) + } + + if in.Request.Connection != nil && in.Request.Connection.ConnState != nil { + connState = in.Request.Connection.ConnState + } + + if !config.Raw { + auth, err = HashAuth(salt, auth, config.HMACAccessor) + if err != nil { + return err + } + + req, err = HashRequest(salt, req, config.HMACAccessor, in.NonHMACReqDataKeys) + if err != nil { + return err + } + } + + var errString string + if in.OuterErr != nil { + errString = in.OuterErr.Error() + } + + ns, err := namespace.FromContext(ctx) + if err != nil { + return err + } + + reqType := in.Type + if reqType == "" { + reqType = "request" + } + reqEntry := &AuditRequestEntry{ + Type: reqType, + Error: errString, + ForwardedFrom: req.ForwardedFrom, + Auth: &AuditAuth{ + ClientToken: auth.ClientToken, + Accessor: auth.Accessor, + DisplayName: auth.DisplayName, + Policies: auth.Policies, + TokenPolicies: auth.TokenPolicies, + IdentityPolicies: auth.IdentityPolicies, + ExternalNamespacePolicies: auth.ExternalNamespacePolicies, + NoDefaultPolicy: auth.NoDefaultPolicy, + Metadata: auth.Metadata, + EntityID: auth.EntityID, + RemainingUses: req.ClientTokenRemainingUses, + TokenType: auth.TokenType.String(), + TokenTTL: int64(auth.TTL.Seconds()), + }, + + Request: &AuditRequest{ + ID: req.ID, + ClientID: req.ClientID, + ClientToken: req.ClientToken, + ClientTokenAccessor: req.ClientTokenAccessor, + Operation: req.Operation, + MountPoint: req.MountPoint, + MountType: req.MountType, + MountAccessor: req.MountAccessor, + MountRunningVersion: req.MountRunningVersion(), + MountRunningSha256: req.MountRunningSha256(), + MountIsExternalPlugin: req.MountIsExternalPlugin(), + MountClass: req.MountClass(), + Namespace: &AuditNamespace{ + ID: ns.ID, + Path: ns.Path, + }, + Path: req.Path, + Data: req.Data, + PolicyOverride: req.PolicyOverride, + RemoteAddr: getRemoteAddr(req), + RemotePort: getRemotePort(req), + ReplicationCluster: req.ReplicationCluster, + Headers: req.Headers, + ClientCertificateSerialNumber: getClientCertificateSerialNumber(connState), + }, + } + + if !auth.IssueTime.IsZero() { + reqEntry.Auth.TokenIssueTime = auth.IssueTime.Format(time.RFC3339) + } + + if auth.PolicyResults != nil { + reqEntry.Auth.PolicyResults = &AuditPolicyResults{ + Allowed: auth.PolicyResults.Allowed, + } + + for _, p := range auth.PolicyResults.GrantingPolicies { + reqEntry.Auth.PolicyResults.GrantingPolicies = append(reqEntry.Auth.PolicyResults.GrantingPolicies, PolicyInfo{ + Name: p.Name, + NamespaceId: p.NamespaceId, + NamespacePath: p.NamespacePath, + Type: p.Type, + }) + } + } + + if req.WrapInfo != nil { + reqEntry.Request.WrapTTL = int(req.WrapInfo.TTL / time.Second) + } + + if !config.OmitTime { + reqEntry.Time = time.Now().UTC().Format(time.RFC3339Nano) + } + + return f.AuditFormatWriter.WriteRequest(w, reqEntry) +} + +func (f *AuditFormatter) FormatResponse(ctx context.Context, w io.Writer, config FormatterConfig, in *logical.LogInput) error { + if in == nil || in.Request == nil { + return fmt.Errorf("request to response-audit a nil request") + } + + if w == nil { + return fmt.Errorf("writer for audit request is nil") + } + + if f.AuditFormatWriter == nil { + return fmt.Errorf("no format writer specified") + } + + salt, err := f.Salt(ctx) + if err != nil { + return fmt.Errorf("error fetching salt: %w", err) + } + + // Set these to the input values at first + auth, req, resp := in.Auth, in.Request, in.Response + if auth == nil { + auth = new(logical.Auth) + } + if resp == nil { + resp = new(logical.Response) + } + var connState *tls.ConnectionState + + if in.Request.Connection != nil && in.Request.Connection.ConnState != nil { + connState = in.Request.Connection.ConnState + } + + elideListResponseData := config.ElideListResponses && req.Operation == logical.ListOperation + + var respData map[string]interface{} + if config.Raw { + // In the non-raw case, elision of list response data occurs inside HashResponse, to avoid redundant deep + // copies and hashing of data only to elide it later. In the raw case, we need to do it here. + if elideListResponseData && resp.Data != nil { + // Copy the data map before making changes, but we only need to go one level deep in this case + respData = make(map[string]interface{}, len(resp.Data)) + for k, v := range resp.Data { + respData[k] = v + } + + doElideListResponseData(respData) + } else { + respData = resp.Data + } + } else { + auth, err = HashAuth(salt, auth, config.HMACAccessor) + if err != nil { + return err + } + + req, err = HashRequest(salt, req, config.HMACAccessor, in.NonHMACReqDataKeys) + if err != nil { + return err + } + + resp, err = HashResponse(salt, resp, config.HMACAccessor, in.NonHMACRespDataKeys, elideListResponseData) + if err != nil { + return err + } + + respData = resp.Data + } + + var errString string + if in.OuterErr != nil { + errString = in.OuterErr.Error() + } + + ns, err := namespace.FromContext(ctx) + if err != nil { + return err + } + + var respAuth *AuditAuth + if resp.Auth != nil { + respAuth = &AuditAuth{ + ClientToken: resp.Auth.ClientToken, + Accessor: resp.Auth.Accessor, + DisplayName: resp.Auth.DisplayName, + Policies: resp.Auth.Policies, + TokenPolicies: resp.Auth.TokenPolicies, + IdentityPolicies: resp.Auth.IdentityPolicies, + ExternalNamespacePolicies: resp.Auth.ExternalNamespacePolicies, + NoDefaultPolicy: resp.Auth.NoDefaultPolicy, + Metadata: resp.Auth.Metadata, + NumUses: resp.Auth.NumUses, + EntityID: resp.Auth.EntityID, + TokenType: resp.Auth.TokenType.String(), + TokenTTL: int64(resp.Auth.TTL.Seconds()), + } + if !resp.Auth.IssueTime.IsZero() { + respAuth.TokenIssueTime = resp.Auth.IssueTime.Format(time.RFC3339) + } + } + + var respSecret *AuditSecret + if resp.Secret != nil { + respSecret = &AuditSecret{ + LeaseID: resp.Secret.LeaseID, + } + } + + var respWrapInfo *AuditResponseWrapInfo + if resp.WrapInfo != nil { + token := resp.WrapInfo.Token + if jwtToken := parseVaultTokenFromJWT(token); jwtToken != nil { + token = *jwtToken + } + respWrapInfo = &AuditResponseWrapInfo{ + TTL: int(resp.WrapInfo.TTL / time.Second), + Token: token, + Accessor: resp.WrapInfo.Accessor, + CreationTime: resp.WrapInfo.CreationTime.UTC().Format(time.RFC3339Nano), + CreationPath: resp.WrapInfo.CreationPath, + WrappedAccessor: resp.WrapInfo.WrappedAccessor, + } + } + + respType := in.Type + if respType == "" { + respType = "response" + } + respEntry := &AuditResponseEntry{ + Type: respType, + Error: errString, + Forwarded: req.ForwardedFrom != "", + Auth: &AuditAuth{ + ClientToken: auth.ClientToken, + Accessor: auth.Accessor, + DisplayName: auth.DisplayName, + Policies: auth.Policies, + TokenPolicies: auth.TokenPolicies, + IdentityPolicies: auth.IdentityPolicies, + ExternalNamespacePolicies: auth.ExternalNamespacePolicies, + NoDefaultPolicy: auth.NoDefaultPolicy, + Metadata: auth.Metadata, + RemainingUses: req.ClientTokenRemainingUses, + EntityID: auth.EntityID, + EntityCreated: auth.EntityCreated, + TokenType: auth.TokenType.String(), + TokenTTL: int64(auth.TTL.Seconds()), + }, + + Request: &AuditRequest{ + ID: req.ID, + ClientToken: req.ClientToken, + ClientTokenAccessor: req.ClientTokenAccessor, + ClientID: req.ClientID, + Operation: req.Operation, + MountPoint: req.MountPoint, + MountType: req.MountType, + MountAccessor: req.MountAccessor, + MountRunningVersion: req.MountRunningVersion(), + MountRunningSha256: req.MountRunningSha256(), + MountIsExternalPlugin: req.MountIsExternalPlugin(), + MountClass: req.MountClass(), + Namespace: &AuditNamespace{ + ID: ns.ID, + Path: ns.Path, + }, + Path: req.Path, + Data: req.Data, + PolicyOverride: req.PolicyOverride, + RemoteAddr: getRemoteAddr(req), + RemotePort: getRemotePort(req), + ClientCertificateSerialNumber: getClientCertificateSerialNumber(connState), + ReplicationCluster: req.ReplicationCluster, + Headers: req.Headers, + }, + + Response: &AuditResponse{ + MountPoint: req.MountPoint, + MountType: req.MountType, + MountAccessor: req.MountAccessor, + MountRunningVersion: req.MountRunningVersion(), + MountRunningSha256: req.MountRunningSha256(), + MountIsExternalPlugin: req.MountIsExternalPlugin(), + MountClass: req.MountClass(), + Auth: respAuth, + Secret: respSecret, + Data: respData, + Warnings: resp.Warnings, + Redirect: resp.Redirect, + WrapInfo: respWrapInfo, + Headers: resp.Headers, + }, + } + + if auth.PolicyResults != nil { + respEntry.Auth.PolicyResults = &AuditPolicyResults{ + Allowed: auth.PolicyResults.Allowed, + } + + for _, p := range auth.PolicyResults.GrantingPolicies { + respEntry.Auth.PolicyResults.GrantingPolicies = append(respEntry.Auth.PolicyResults.GrantingPolicies, PolicyInfo{ + Name: p.Name, + NamespaceId: p.NamespaceId, + NamespacePath: p.NamespacePath, + Type: p.Type, + }) + } + } + + if !auth.IssueTime.IsZero() { + respEntry.Auth.TokenIssueTime = auth.IssueTime.Format(time.RFC3339) + } + if req.WrapInfo != nil { + respEntry.Request.WrapTTL = int(req.WrapInfo.TTL / time.Second) + } + + if !config.OmitTime { + respEntry.Time = time.Now().UTC().Format(time.RFC3339Nano) + } + + return f.AuditFormatWriter.WriteResponse(w, respEntry) +} + +// AuditRequestEntry is the structure of a request audit log entry in Audit. +type AuditRequestEntry struct { + Time string `json:"time,omitempty"` + Type string `json:"type,omitempty"` + Auth *AuditAuth `json:"auth,omitempty"` + Request *AuditRequest `json:"request,omitempty"` + Error string `json:"error,omitempty"` + ForwardedFrom string `json:"forwarded_from,omitempty"` // Populated in Enterprise when a request is forwarded +} + +// AuditResponseEntry is the structure of a response audit log entry in Audit. +type AuditResponseEntry struct { + Time string `json:"time,omitempty"` + Type string `json:"type,omitempty"` + Auth *AuditAuth `json:"auth,omitempty"` + Request *AuditRequest `json:"request,omitempty"` + Response *AuditResponse `json:"response,omitempty"` + Error string `json:"error,omitempty"` + Forwarded bool `json:"forwarded,omitempty"` +} + +type AuditRequest struct { + ID string `json:"id,omitempty"` + ClientID string `json:"client_id,omitempty"` + ReplicationCluster string `json:"replication_cluster,omitempty"` + Operation logical.Operation `json:"operation,omitempty"` + MountPoint string `json:"mount_point,omitempty"` + MountType string `json:"mount_type,omitempty"` + MountAccessor string `json:"mount_accessor,omitempty"` + MountRunningVersion string `json:"mount_running_version,omitempty"` + MountRunningSha256 string `json:"mount_running_sha256,omitempty"` + MountClass string `json:"mount_class,omitempty"` + MountIsExternalPlugin bool `json:"mount_is_external_plugin,omitempty"` + ClientToken string `json:"client_token,omitempty"` + ClientTokenAccessor string `json:"client_token_accessor,omitempty"` + Namespace *AuditNamespace `json:"namespace,omitempty"` + Path string `json:"path,omitempty"` + Data map[string]interface{} `json:"data,omitempty"` + PolicyOverride bool `json:"policy_override,omitempty"` + RemoteAddr string `json:"remote_address,omitempty"` + RemotePort int `json:"remote_port,omitempty"` + WrapTTL int `json:"wrap_ttl,omitempty"` + Headers map[string][]string `json:"headers,omitempty"` + ClientCertificateSerialNumber string `json:"client_certificate_serial_number,omitempty"` +} + +type AuditResponse struct { + Auth *AuditAuth `json:"auth,omitempty"` + MountPoint string `json:"mount_point,omitempty"` + MountType string `json:"mount_type,omitempty"` + MountAccessor string `json:"mount_accessor,omitempty"` + MountRunningVersion string `json:"mount_running_plugin_version,omitempty"` + MountRunningSha256 string `json:"mount_running_sha256,omitempty"` + MountClass string `json:"mount_class,omitempty"` + MountIsExternalPlugin bool `json:"mount_is_external_plugin,omitempty"` + Secret *AuditSecret `json:"secret,omitempty"` + Data map[string]interface{} `json:"data,omitempty"` + Warnings []string `json:"warnings,omitempty"` + Redirect string `json:"redirect,omitempty"` + WrapInfo *AuditResponseWrapInfo `json:"wrap_info,omitempty"` + Headers map[string][]string `json:"headers,omitempty"` +} + +type AuditAuth struct { + ClientToken string `json:"client_token,omitempty"` + Accessor string `json:"accessor,omitempty"` + DisplayName string `json:"display_name,omitempty"` + Policies []string `json:"policies,omitempty"` + TokenPolicies []string `json:"token_policies,omitempty"` + IdentityPolicies []string `json:"identity_policies,omitempty"` + ExternalNamespacePolicies map[string][]string `json:"external_namespace_policies,omitempty"` + NoDefaultPolicy bool `json:"no_default_policy,omitempty"` + PolicyResults *AuditPolicyResults `json:"policy_results,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` + NumUses int `json:"num_uses,omitempty"` + RemainingUses int `json:"remaining_uses,omitempty"` + EntityID string `json:"entity_id,omitempty"` + EntityCreated bool `json:"entity_created,omitempty"` + TokenType string `json:"token_type,omitempty"` + TokenTTL int64 `json:"token_ttl,omitempty"` + TokenIssueTime string `json:"token_issue_time,omitempty"` +} + +type AuditPolicyResults struct { + Allowed bool `json:"allowed"` + GrantingPolicies []PolicyInfo `json:"granting_policies,omitempty"` +} + +type PolicyInfo struct { + Name string `json:"name,omitempty"` + NamespaceId string `json:"namespace_id,omitempty"` + NamespacePath string `json:"namespace_path,omitempty"` + Type string `json:"type"` +} + +type AuditSecret struct { + LeaseID string `json:"lease_id,omitempty"` +} + +type AuditResponseWrapInfo struct { + TTL int `json:"ttl,omitempty"` + Token string `json:"token,omitempty"` + Accessor string `json:"accessor,omitempty"` + CreationTime string `json:"creation_time,omitempty"` + CreationPath string `json:"creation_path,omitempty"` + WrappedAccessor string `json:"wrapped_accessor,omitempty"` +} + +type AuditNamespace struct { + ID string `json:"id,omitempty"` + Path string `json:"path,omitempty"` +} + +// getRemoteAddr safely gets the remote address avoiding a nil pointer +func getRemoteAddr(req *logical.Request) string { + if req != nil && req.Connection != nil { + return req.Connection.RemoteAddr + } + return "" +} + +// getRemotePort safely gets the remote port avoiding a nil pointer +func getRemotePort(req *logical.Request) int { + if req != nil && req.Connection != nil { + return req.Connection.RemotePort + } + return 0 +} + +func getClientCertificateSerialNumber(connState *tls.ConnectionState) string { + if connState == nil || len(connState.VerifiedChains) == 0 || len(connState.VerifiedChains[0]) == 0 { + return "" + } + + return connState.VerifiedChains[0][0].SerialNumber.String() +} + +// parseVaultTokenFromJWT returns a string iff the token was a JWT and we could +// extract the original token ID from inside +func parseVaultTokenFromJWT(token string) *string { + if strings.Count(token, ".") != 2 { + return nil + } + + parsedJWT, err := jwt.ParseSigned(token) + if err != nil { + return nil + } + + var claims jwt.Claims + if err = parsedJWT.UnsafeClaimsWithoutVerification(&claims); err != nil { + return nil + } + + return &claims.ID +} + +// NewTemporaryFormatter creates a formatter not backed by a persistent salt +func NewTemporaryFormatter(format, prefix string) *AuditFormatter { + temporarySalt := func(ctx context.Context) (*salt.Salt, error) { + return salt.NewNonpersistentSalt(), nil + } + ret := &AuditFormatter{} + + switch format { + case "jsonx": + ret.AuditFormatWriter = &JSONxFormatWriter{ + Prefix: prefix, + SaltFunc: temporarySalt, + } + default: + ret.AuditFormatWriter = &JSONFormatWriter{ + Prefix: prefix, + SaltFunc: temporarySalt, + } + } + return ret +} + +// doElideListResponseData performs the actual elision of list operation response data, once surrounding code has +// determined it should apply to a particular request. The data map that is passed in must be a copy that is safe to +// modify in place, but need not be a full recursive deep copy, as only top-level keys are changed. +// +// See the documentation of the controlling option in FormatterConfig for more information on the purpose. +func doElideListResponseData(data map[string]interface{}) { + for k, v := range data { + if k == "keys" { + if vSlice, ok := v.([]string); ok { + data[k] = len(vSlice) + } + } else if k == "key_info" { + if vMap, ok := v.(map[string]interface{}); ok { + data[k] = len(vMap) + } + } + } +} diff --git a/audit/format_json.go b/audit/format_json.go new file mode 100644 index 0000000..74f4138 --- /dev/null +++ b/audit/format_json.go @@ -0,0 +1,56 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package audit + +import ( + "context" + "encoding/json" + "fmt" + "io" + + "github.com/hashicorp/vault/sdk/helper/salt" +) + +// JSONFormatWriter is an AuditFormatWriter implementation that structures data into +// a JSON format. +type JSONFormatWriter struct { + Prefix string + SaltFunc func(context.Context) (*salt.Salt, error) +} + +func (f *JSONFormatWriter) WriteRequest(w io.Writer, req *AuditRequestEntry) error { + if req == nil { + return fmt.Errorf("request entry was nil, cannot encode") + } + + if len(f.Prefix) > 0 { + _, err := w.Write([]byte(f.Prefix)) + if err != nil { + return err + } + } + + enc := json.NewEncoder(w) + return enc.Encode(req) +} + +func (f *JSONFormatWriter) WriteResponse(w io.Writer, resp *AuditResponseEntry) error { + if resp == nil { + return fmt.Errorf("response entry was nil, cannot encode") + } + + if len(f.Prefix) > 0 { + _, err := w.Write([]byte(f.Prefix)) + if err != nil { + return err + } + } + + enc := json.NewEncoder(w) + return enc.Encode(resp) +} + +func (f *JSONFormatWriter) Salt(ctx context.Context) (*salt.Salt, error) { + return f.SaltFunc(ctx) +} diff --git a/audit/format_json_test.go b/audit/format_json_test.go new file mode 100644 index 0000000..fa31cde --- /dev/null +++ b/audit/format_json_test.go @@ -0,0 +1,157 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package audit + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "strings" + "testing" + "time" + + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/logical" +) + +func TestFormatJSON_formatRequest(t *testing.T) { + salter, err := salt.NewSalt(context.Background(), nil, nil) + if err != nil { + t.Fatal(err) + } + saltFunc := func(context.Context) (*salt.Salt, error) { + return salter, nil + } + + expectedResultStr := fmt.Sprintf(testFormatJSONReqBasicStrFmt, salter.GetIdentifiedHMAC("foo")) + + issueTime, _ := time.Parse(time.RFC3339, "2020-05-28T13:40:18-05:00") + cases := map[string]struct { + Auth *logical.Auth + Req *logical.Request + Err error + Prefix string + ExpectedStr string + }{ + "auth, request": { + &logical.Auth{ + ClientToken: "foo", + Accessor: "bar", + DisplayName: "testtoken", + EntityID: "foobarentity", + NoDefaultPolicy: true, + Policies: []string{"root"}, + TokenType: logical.TokenTypeService, + LeaseOptions: logical.LeaseOptions{ + TTL: time.Hour * 4, + IssueTime: issueTime, + }, + }, + &logical.Request{ + Operation: logical.UpdateOperation, + Path: "/foo", + Connection: &logical.Connection{ + RemoteAddr: "127.0.0.1", + }, + WrapInfo: &logical.RequestWrapInfo{ + TTL: 60 * time.Second, + }, + Headers: map[string][]string{ + "foo": {"bar"}, + }, + }, + errors.New("this is an error"), + "", + expectedResultStr, + }, + "auth, request with prefix": { + &logical.Auth{ + ClientToken: "foo", + Accessor: "bar", + EntityID: "foobarentity", + DisplayName: "testtoken", + NoDefaultPolicy: true, + Policies: []string{"root"}, + TokenType: logical.TokenTypeService, + LeaseOptions: logical.LeaseOptions{ + TTL: time.Hour * 4, + IssueTime: issueTime, + }, + }, + &logical.Request{ + Operation: logical.UpdateOperation, + Path: "/foo", + Connection: &logical.Connection{ + RemoteAddr: "127.0.0.1", + }, + WrapInfo: &logical.RequestWrapInfo{ + TTL: 60 * time.Second, + }, + Headers: map[string][]string{ + "foo": {"bar"}, + }, + }, + errors.New("this is an error"), + "@cee: ", + expectedResultStr, + }, + } + + for name, tc := range cases { + var buf bytes.Buffer + formatter := AuditFormatter{ + AuditFormatWriter: &JSONFormatWriter{ + Prefix: tc.Prefix, + SaltFunc: saltFunc, + }, + } + config := FormatterConfig{ + HMACAccessor: false, + } + in := &logical.LogInput{ + Auth: tc.Auth, + Request: tc.Req, + OuterErr: tc.Err, + } + if err := formatter.FormatRequest(namespace.RootContext(nil), &buf, config, in); err != nil { + t.Fatalf("bad: %s\nerr: %s", name, err) + } + + if !strings.HasPrefix(buf.String(), tc.Prefix) { + t.Fatalf("no prefix: %s \n log: %s\nprefix: %s", name, expectedResultStr, tc.Prefix) + } + + expectedjson := new(AuditRequestEntry) + + if err := jsonutil.DecodeJSON([]byte(expectedResultStr), &expectedjson); err != nil { + t.Fatalf("bad json: %s", err) + } + expectedjson.Request.Namespace = &AuditNamespace{ID: "root"} + + actualjson := new(AuditRequestEntry) + if err := jsonutil.DecodeJSON([]byte(buf.String())[len(tc.Prefix):], &actualjson); err != nil { + t.Fatalf("bad json: %s", err) + } + + expectedjson.Time = actualjson.Time + + expectedBytes, err := json.Marshal(expectedjson) + if err != nil { + t.Fatalf("unable to marshal json: %s", err) + } + + if !strings.HasSuffix(strings.TrimSpace(buf.String()), string(expectedBytes)) { + t.Fatalf( + "bad: %s\nResult:\n\n%q\n\nExpected:\n\n%q", + name, buf.String(), string(expectedBytes)) + } + } +} + +const testFormatJSONReqBasicStrFmt = `{"time":"2015-08-05T13:45:46Z","type":"request","auth":{"client_token":"%s","accessor":"bar","display_name":"testtoken","policies":["root"],"no_default_policy":true,"metadata":null,"entity_id":"foobarentity","token_type":"service", "token_ttl": 14400, "token_issue_time": "2020-05-28T13:40:18-05:00"},"request":{"operation":"update","path":"/foo","data":null,"wrap_ttl":60,"remote_address":"127.0.0.1","headers":{"foo":["bar"]}},"error":"this is an error"} +` diff --git a/audit/format_jsonx.go b/audit/format_jsonx.go new file mode 100644 index 0000000..20352a2 --- /dev/null +++ b/audit/format_jsonx.go @@ -0,0 +1,77 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package audit + +import ( + "context" + "encoding/json" + "fmt" + "io" + + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/jefferai/jsonx" +) + +// JSONxFormatWriter is an AuditFormatWriter implementation that structures data into +// a XML format. +type JSONxFormatWriter struct { + Prefix string + SaltFunc func(context.Context) (*salt.Salt, error) +} + +func (f *JSONxFormatWriter) WriteRequest(w io.Writer, req *AuditRequestEntry) error { + if req == nil { + return fmt.Errorf("request entry was nil, cannot encode") + } + + if len(f.Prefix) > 0 { + _, err := w.Write([]byte(f.Prefix)) + if err != nil { + return err + } + } + + jsonBytes, err := json.Marshal(req) + if err != nil { + return err + } + + xmlBytes, err := jsonx.EncodeJSONBytes(jsonBytes) + if err != nil { + return err + } + + _, err = w.Write(xmlBytes) + return err +} + +func (f *JSONxFormatWriter) WriteResponse(w io.Writer, resp *AuditResponseEntry) error { + if resp == nil { + return fmt.Errorf("response entry was nil, cannot encode") + } + + if len(f.Prefix) > 0 { + _, err := w.Write([]byte(f.Prefix)) + if err != nil { + return err + } + } + + jsonBytes, err := json.Marshal(resp) + if err != nil { + return err + } + + xmlBytes, err := jsonx.EncodeJSONBytes(jsonBytes) + if err != nil { + return err + } + + _, err = w.Write(xmlBytes) + return err +} + +func (f *JSONxFormatWriter) Salt(ctx context.Context) (*salt.Salt, error) { + return f.SaltFunc(ctx) +} diff --git a/audit/format_jsonx_test.go b/audit/format_jsonx_test.go new file mode 100644 index 0000000..fb60461 --- /dev/null +++ b/audit/format_jsonx_test.go @@ -0,0 +1,147 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package audit + +import ( + "bytes" + "context" + "errors" + "fmt" + "strings" + "testing" + "time" + + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/logical" +) + +func TestFormatJSONx_formatRequest(t *testing.T) { + salter, err := salt.NewSalt(context.Background(), nil, nil) + if err != nil { + t.Fatal(err) + } + saltFunc := func(context.Context) (*salt.Salt, error) { + return salter, nil + } + + fooSalted := salter.GetIdentifiedHMAC("foo") + issueTime, _ := time.Parse(time.RFC3339, "2020-05-28T13:40:18-05:00") + + cases := map[string]struct { + Auth *logical.Auth + Req *logical.Request + Err error + Prefix string + Result string + ExpectedStr string + }{ + "auth, request": { + &logical.Auth{ + ClientToken: "foo", + Accessor: "bar", + DisplayName: "testtoken", + EntityID: "foobarentity", + NoDefaultPolicy: true, + Policies: []string{"root"}, + TokenType: logical.TokenTypeService, + LeaseOptions: logical.LeaseOptions{ + TTL: time.Hour * 4, + IssueTime: issueTime, + }, + }, + &logical.Request{ + ID: "request", + ClientToken: "foo", + ClientTokenAccessor: "bar", + Operation: logical.UpdateOperation, + Path: "/foo", + Connection: &logical.Connection{ + RemoteAddr: "127.0.0.1", + }, + WrapInfo: &logical.RequestWrapInfo{ + TTL: 60 * time.Second, + }, + Headers: map[string][]string{ + "foo": {"bar"}, + }, + PolicyOverride: true, + }, + errors.New("this is an error"), + "", + "", + fmt.Sprintf(`bar%stesttokenfoobarentitytrueroot2020-05-28T13:40:18-05:0014400servicethis is an error%sbarbarrequestrootupdate/footrue127.0.0.160request`, + fooSalted, fooSalted), + }, + "auth, request with prefix": { + &logical.Auth{ + ClientToken: "foo", + Accessor: "bar", + DisplayName: "testtoken", + NoDefaultPolicy: true, + EntityID: "foobarentity", + Policies: []string{"root"}, + TokenType: logical.TokenTypeService, + LeaseOptions: logical.LeaseOptions{ + TTL: time.Hour * 4, + IssueTime: issueTime, + }, + }, + &logical.Request{ + ID: "request", + ClientToken: "foo", + ClientTokenAccessor: "bar", + Operation: logical.UpdateOperation, + Path: "/foo", + Connection: &logical.Connection{ + RemoteAddr: "127.0.0.1", + }, + WrapInfo: &logical.RequestWrapInfo{ + TTL: 60 * time.Second, + }, + Headers: map[string][]string{ + "foo": {"bar"}, + }, + PolicyOverride: true, + }, + errors.New("this is an error"), + "", + "@cee: ", + fmt.Sprintf(`bar%stesttokenfoobarentitytrueroot2020-05-28T13:40:18-05:0014400servicethis is an error%sbarbarrequestrootupdate/footrue127.0.0.160request`, + fooSalted, fooSalted), + }, + } + + for name, tc := range cases { + var buf bytes.Buffer + formatter := AuditFormatter{ + AuditFormatWriter: &JSONxFormatWriter{ + Prefix: tc.Prefix, + SaltFunc: saltFunc, + }, + } + config := FormatterConfig{ + OmitTime: true, + HMACAccessor: false, + } + in := &logical.LogInput{ + Auth: tc.Auth, + Request: tc.Req, + OuterErr: tc.Err, + } + if err := formatter.FormatRequest(namespace.RootContext(nil), &buf, config, in); err != nil { + t.Fatalf("bad: %s\nerr: %s", name, err) + } + + if !strings.HasPrefix(buf.String(), tc.Prefix) { + t.Fatalf("no prefix: %s \n log: %s\nprefix: %s", name, tc.Result, tc.Prefix) + } + + if !strings.HasSuffix(strings.TrimSpace(buf.String()), string(tc.ExpectedStr)) { + t.Fatalf( + "bad: %s\nResult:\n\n%q\n\nExpected:\n\n%q", + name, strings.TrimSpace(buf.String()), string(tc.ExpectedStr)) + } + } +} diff --git a/audit/format_test.go b/audit/format_test.go new file mode 100644 index 0000000..5395d91 --- /dev/null +++ b/audit/format_test.go @@ -0,0 +1,224 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package audit + +import ( + "context" + "io" + "testing" + + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/logical" + "github.com/mitchellh/copystructure" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type testingFormatWriter struct { + salt *salt.Salt + lastRequest *AuditRequestEntry + lastResponse *AuditResponseEntry +} + +func (fw *testingFormatWriter) WriteRequest(_ io.Writer, entry *AuditRequestEntry) error { + fw.lastRequest = entry + return nil +} + +func (fw *testingFormatWriter) WriteResponse(_ io.Writer, entry *AuditResponseEntry) error { + fw.lastResponse = entry + return nil +} + +func (fw *testingFormatWriter) Salt(ctx context.Context) (*salt.Salt, error) { + if fw.salt != nil { + return fw.salt, nil + } + var err error + fw.salt, err = salt.NewSalt(ctx, nil, nil) + if err != nil { + return nil, err + } + return fw.salt, nil +} + +// hashExpectedValueForComparison replicates enough of the audit HMAC process on a piece of expected data in a test, +// so that we can use assert.Equal to compare the expected and output values. +func (fw *testingFormatWriter) hashExpectedValueForComparison(input map[string]interface{}) map[string]interface{} { + // Copy input before modifying, since we may re-use the same data in another test + copied, err := copystructure.Copy(input) + if err != nil { + panic(err) + } + copiedAsMap := copied.(map[string]interface{}) + + salter, err := fw.Salt(context.Background()) + if err != nil { + panic(err) + } + + err = hashMap(salter.GetIdentifiedHMAC, copiedAsMap, nil) + if err != nil { + panic(err) + } + + return copiedAsMap +} + +func TestFormatRequestErrors(t *testing.T) { + config := FormatterConfig{} + formatter := AuditFormatter{ + AuditFormatWriter: &testingFormatWriter{}, + } + + if err := formatter.FormatRequest(context.Background(), io.Discard, config, &logical.LogInput{}); err == nil { + t.Fatal("expected error due to nil request") + } + + in := &logical.LogInput{ + Request: &logical.Request{}, + } + if err := formatter.FormatRequest(context.Background(), nil, config, in); err == nil { + t.Fatal("expected error due to nil writer") + } +} + +func TestFormatResponseErrors(t *testing.T) { + config := FormatterConfig{} + formatter := AuditFormatter{ + AuditFormatWriter: &testingFormatWriter{}, + } + + if err := formatter.FormatResponse(context.Background(), io.Discard, config, &logical.LogInput{}); err == nil { + t.Fatal("expected error due to nil request") + } + + in := &logical.LogInput{ + Request: &logical.Request{}, + } + if err := formatter.FormatResponse(context.Background(), nil, config, in); err == nil { + t.Fatal("expected error due to nil writer") + } +} + +func TestElideListResponses(t *testing.T) { + tfw := testingFormatWriter{} + formatter := AuditFormatter{&tfw} + ctx := namespace.RootContext(context.Background()) + + type test struct { + name string + inputData map[string]interface{} + expectedData map[string]interface{} + } + + tests := []test{ + { + "nil data", + nil, + nil, + }, + { + "Normal list (keys only)", + map[string]interface{}{ + "keys": []string{"foo", "bar", "baz"}, + }, + map[string]interface{}{ + "keys": 3, + }, + }, + { + "Enhanced list (has key_info)", + map[string]interface{}{ + "keys": []string{"foo", "bar", "baz", "quux"}, + "key_info": map[string]interface{}{ + "foo": "alpha", + "bar": "beta", + "baz": "gamma", + "quux": "delta", + }, + }, + map[string]interface{}{ + "keys": 4, + "key_info": 4, + }, + }, + { + "Unconventional other values in a list response are not touched", + map[string]interface{}{ + "keys": []string{"foo", "bar"}, + "something_else": "baz", + }, + map[string]interface{}{ + "keys": 2, + "something_else": "baz", + }, + }, + { + "Conventional values in a list response are not elided if their data types are unconventional", + map[string]interface{}{ + "keys": map[string]interface{}{ + "You wouldn't expect keys to be a map": nil, + }, + "key_info": []string{ + "You wouldn't expect key_info to be a slice", + }, + }, + map[string]interface{}{ + "keys": map[string]interface{}{ + "You wouldn't expect keys to be a map": nil, + }, + "key_info": []string{ + "You wouldn't expect key_info to be a slice", + }, + }, + }, + } + oneInterestingTestCase := tests[2] + + formatResponse := func( + t *testing.T, + config FormatterConfig, + operation logical.Operation, + inputData map[string]interface{}, + ) { + err := formatter.FormatResponse(ctx, io.Discard, config, &logical.LogInput{ + Request: &logical.Request{Operation: operation}, + Response: &logical.Response{Data: inputData}, + }) + require.Nil(t, err) + } + + t.Run("Default case", func(t *testing.T) { + config := FormatterConfig{ElideListResponses: true} + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + formatResponse(t, config, logical.ListOperation, tc.inputData) + assert.Equal(t, tfw.hashExpectedValueForComparison(tc.expectedData), tfw.lastResponse.Response.Data) + }) + } + }) + + t.Run("When Operation is not list, eliding does not happen", func(t *testing.T) { + config := FormatterConfig{ElideListResponses: true} + tc := oneInterestingTestCase + formatResponse(t, config, logical.ReadOperation, tc.inputData) + assert.Equal(t, tfw.hashExpectedValueForComparison(tc.inputData), tfw.lastResponse.Response.Data) + }) + + t.Run("When ElideListResponses is false, eliding does not happen", func(t *testing.T) { + config := FormatterConfig{ElideListResponses: false} + tc := oneInterestingTestCase + formatResponse(t, config, logical.ListOperation, tc.inputData) + assert.Equal(t, tfw.hashExpectedValueForComparison(tc.inputData), tfw.lastResponse.Response.Data) + }) + + t.Run("When Raw is true, eliding still happens", func(t *testing.T) { + config := FormatterConfig{ElideListResponses: true, Raw: true} + tc := oneInterestingTestCase + formatResponse(t, config, logical.ListOperation, tc.inputData) + assert.Equal(t, tc.expectedData, tfw.lastResponse.Response.Data) + }) +} diff --git a/audit/formatter.go b/audit/formatter.go new file mode 100644 index 0000000..98c393c --- /dev/null +++ b/audit/formatter.go @@ -0,0 +1,51 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package audit + +import ( + "context" + "io" + + "github.com/hashicorp/vault/sdk/logical" +) + +// Formatter is an interface that is responsible for formatting a +// request/response into some format. Formatters write their output +// to an io.Writer. +// +// It is recommended that you pass data through Hash prior to formatting it. +type Formatter interface { + FormatRequest(context.Context, io.Writer, FormatterConfig, *logical.LogInput) error + FormatResponse(context.Context, io.Writer, FormatterConfig, *logical.LogInput) error +} + +type FormatterConfig struct { + Raw bool + HMACAccessor bool + + // Vault lacks pagination in its APIs. As a result, certain list operations can return **very** large responses. + // The user's chosen audit sinks may experience difficulty consuming audit records that swell to tens of megabytes + // of JSON. The responses of list operations are typically not very interesting, as they are mostly lists of keys, + // or, even when they include a "key_info" field, are not returning confidential information. They become even less + // interesting once HMAC-ed by the audit system. + // + // Some example Vault "list" operations that are prone to becoming very large in an active Vault installation are: + // auth/token/accessors/ + // identity/entity/id/ + // identity/entity-alias/id/ + // pki/certs/ + // + // This option exists to provide such users with the option to have response data elided from audit logs, only when + // the operation type is "list". For added safety, the elision only applies to the "keys" and "key_info" fields + // within the response data - these are conventionally the only fields present in a list response - see + // logical.ListResponse, and logical.ListResponseWithInfo. However, other fields are technically possible if a + // plugin author writes unusual code, and these will be preserved in the audit log even with this option enabled. + // The elision replaces the values of the "keys" and "key_info" fields with an integer count of the number of + // entries. This allows even the elided audit logs to still be useful for answering questions like + // "Was any data returned?" or "How many records were listed?". + ElideListResponses bool + + // This should only ever be used in a testing context + OmitTime bool +} diff --git a/audit/hashstructure.go b/audit/hashstructure.go new file mode 100644 index 0000000..cd4f808 --- /dev/null +++ b/audit/hashstructure.go @@ -0,0 +1,375 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package audit + +import ( + "encoding/json" + "errors" + "reflect" + "time" + + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/helper/wrapping" + "github.com/hashicorp/vault/sdk/logical" + "github.com/mitchellh/copystructure" + "github.com/mitchellh/reflectwalk" +) + +// HashString hashes the given opaque string and returns it +func HashString(salter *salt.Salt, data string) string { + return salter.GetIdentifiedHMAC(data) +} + +// HashAuth returns a hashed copy of the logical.Auth input. +func HashAuth(salter *salt.Salt, in *logical.Auth, HMACAccessor bool) (*logical.Auth, error) { + if in == nil { + return nil, nil + } + + fn := salter.GetIdentifiedHMAC + auth := *in + + if auth.ClientToken != "" { + auth.ClientToken = fn(auth.ClientToken) + } + if HMACAccessor && auth.Accessor != "" { + auth.Accessor = fn(auth.Accessor) + } + return &auth, nil +} + +// HashRequest returns a hashed copy of the logical.Request input. +func HashRequest(salter *salt.Salt, in *logical.Request, HMACAccessor bool, nonHMACDataKeys []string) (*logical.Request, error) { + if in == nil { + return nil, nil + } + + fn := salter.GetIdentifiedHMAC + req := *in + + if req.Auth != nil { + cp, err := copystructure.Copy(req.Auth) + if err != nil { + return nil, err + } + + req.Auth, err = HashAuth(salter, cp.(*logical.Auth), HMACAccessor) + if err != nil { + return nil, err + } + } + + if req.ClientToken != "" { + req.ClientToken = fn(req.ClientToken) + } + if HMACAccessor && req.ClientTokenAccessor != "" { + req.ClientTokenAccessor = fn(req.ClientTokenAccessor) + } + + if req.Data != nil { + copy, err := copystructure.Copy(req.Data) + if err != nil { + return nil, err + } + + err = hashMap(fn, copy.(map[string]interface{}), nonHMACDataKeys) + if err != nil { + return nil, err + } + req.Data = copy.(map[string]interface{}) + } + + return &req, nil +} + +func hashMap(fn func(string) string, data map[string]interface{}, nonHMACDataKeys []string) error { + for k, v := range data { + if o, ok := v.(logical.OptMarshaler); ok { + marshaled, err := o.MarshalJSONWithOptions(&logical.MarshalOptions{ + ValueHasher: fn, + }) + if err != nil { + return err + } + data[k] = json.RawMessage(marshaled) + } + } + + return HashStructure(data, fn, nonHMACDataKeys) +} + +// HashResponse returns a hashed copy of the logical.Request input. +func HashResponse( + salter *salt.Salt, + in *logical.Response, + HMACAccessor bool, + nonHMACDataKeys []string, + elideListResponseData bool, +) (*logical.Response, error) { + if in == nil { + return nil, nil + } + + fn := salter.GetIdentifiedHMAC + resp := *in + + if resp.Auth != nil { + cp, err := copystructure.Copy(resp.Auth) + if err != nil { + return nil, err + } + + resp.Auth, err = HashAuth(salter, cp.(*logical.Auth), HMACAccessor) + if err != nil { + return nil, err + } + } + + if resp.Data != nil { + copy, err := copystructure.Copy(resp.Data) + if err != nil { + return nil, err + } + + mapCopy := copy.(map[string]interface{}) + if b, ok := mapCopy[logical.HTTPRawBody].([]byte); ok { + mapCopy[logical.HTTPRawBody] = string(b) + } + + // Processing list response data elision takes place at this point in the code for performance reasons: + // - take advantage of the deep copy of resp.Data that was going to be done anyway for hashing + // - but elide data before potentially spending time hashing it + if elideListResponseData { + doElideListResponseData(mapCopy) + } + + err = hashMap(fn, mapCopy, nonHMACDataKeys) + if err != nil { + return nil, err + } + resp.Data = mapCopy + } + + if resp.WrapInfo != nil { + var err error + resp.WrapInfo, err = HashWrapInfo(salter, resp.WrapInfo, HMACAccessor) + if err != nil { + return nil, err + } + } + + return &resp, nil +} + +// HashWrapInfo returns a hashed copy of the wrapping.ResponseWrapInfo input. +func HashWrapInfo(salter *salt.Salt, in *wrapping.ResponseWrapInfo, HMACAccessor bool) (*wrapping.ResponseWrapInfo, error) { + if in == nil { + return nil, nil + } + + fn := salter.GetIdentifiedHMAC + wrapinfo := *in + + wrapinfo.Token = fn(wrapinfo.Token) + + if HMACAccessor { + wrapinfo.Accessor = fn(wrapinfo.Accessor) + + if wrapinfo.WrappedAccessor != "" { + wrapinfo.WrappedAccessor = fn(wrapinfo.WrappedAccessor) + } + } + + return &wrapinfo, nil +} + +// HashStructure takes an interface and hashes all the values within +// the structure. Only _values_ are hashed: keys of objects are not. +// +// For the HashCallback, see the built-in HashCallbacks below. +func HashStructure(s interface{}, cb HashCallback, ignoredKeys []string) error { + walker := &hashWalker{Callback: cb, IgnoredKeys: ignoredKeys} + return reflectwalk.Walk(s, walker) +} + +// HashCallback is the callback called for HashStructure to hash +// a value. +type HashCallback func(string) string + +// hashWalker implements interfaces for the reflectwalk package +// (github.com/mitchellh/reflectwalk) that can be used to automatically +// replace primitives with a hashed value. +type hashWalker struct { + // Callback is the function to call with the primitive that is + // to be hashed. If there is an error, walking will be halted + // immediately and the error returned. + Callback HashCallback + // IgnoreKeys are the keys that wont have the HashCallback applied + IgnoredKeys []string + // MapElem appends the key itself (not the reflect.Value) to key. + // The last element in key is the most recently entered map key. + // Since Exit pops the last element of key, only nesting to another + // structure increases the size of this slice. + key []string + lastValue reflect.Value + // Enter appends to loc and exit pops loc. The last element of loc is thus + // the current location. + loc []reflectwalk.Location + // Map and Slice append to cs, Exit pops the last element off cs. + // The last element in cs is the most recently entered map or slice. + cs []reflect.Value + // MapElem and SliceElem append to csKey. The last element in csKey is the + // most recently entered map key or slice index. Since Exit pops the last + // element of csKey, only nesting to another structure increases the size of + // this slice. + csKey []reflect.Value +} + +// hashTimeType stores a pre-computed reflect.Type for a time.Time so +// we can quickly compare in hashWalker.Struct. We create an empty/invalid +// time.Time{} so we don't need to incur any additional startup cost vs. +// Now() or Unix(). +var hashTimeType = reflect.TypeOf(time.Time{}) + +func (w *hashWalker) Enter(loc reflectwalk.Location) error { + w.loc = append(w.loc, loc) + return nil +} + +func (w *hashWalker) Exit(loc reflectwalk.Location) error { + w.loc = w.loc[:len(w.loc)-1] + + switch loc { + case reflectwalk.Map: + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.MapValue: + w.key = w.key[:len(w.key)-1] + w.csKey = w.csKey[:len(w.csKey)-1] + case reflectwalk.Slice: + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.SliceElem: + w.csKey = w.csKey[:len(w.csKey)-1] + } + + return nil +} + +func (w *hashWalker) Map(m reflect.Value) error { + w.cs = append(w.cs, m) + return nil +} + +func (w *hashWalker) MapElem(m, k, v reflect.Value) error { + w.csKey = append(w.csKey, k) + w.key = append(w.key, k.String()) + w.lastValue = v + return nil +} + +func (w *hashWalker) Slice(s reflect.Value) error { + w.cs = append(w.cs, s) + return nil +} + +func (w *hashWalker) SliceElem(i int, elem reflect.Value) error { + w.csKey = append(w.csKey, reflect.ValueOf(i)) + return nil +} + +func (w *hashWalker) Struct(v reflect.Value) error { + // We are looking for time values. If it isn't one, ignore it. + if v.Type() != hashTimeType { + return nil + } + + if len(w.loc) < 3 { + // The last element of w.loc is reflectwalk.Struct, by definition. + // If len(w.loc) < 3 that means hashWalker.Walk was given a struct + // value and this is the very first step in the walk, and we don't + // currently support structs as inputs, + return errors.New("structs as direct inputs not supported") + } + + // Second to last element of w.loc is location that contains this struct. + switch w.loc[len(w.loc)-2] { + case reflectwalk.MapValue: + // Create a string value of the time. IMPORTANT: this must never change + // across Vault versions or the hash value of equivalent time.Time will + // change. + strVal := v.Interface().(time.Time).Format(time.RFC3339Nano) + + // Set the map value to the string instead of the time.Time object + m := w.cs[len(w.cs)-1] + mk := w.csKey[len(w.cs)-1] + m.SetMapIndex(mk, reflect.ValueOf(strVal)) + case reflectwalk.SliceElem: + // Create a string value of the time. IMPORTANT: this must never change + // across Vault versions or the hash value of equivalent time.Time will + // change. + strVal := v.Interface().(time.Time).Format(time.RFC3339Nano) + + // Set the map value to the string instead of the time.Time object + s := w.cs[len(w.cs)-1] + si := int(w.csKey[len(w.cs)-1].Int()) + s.Slice(si, si+1).Index(0).Set(reflect.ValueOf(strVal)) + } + + // Skip this entry so that we don't walk the struct. + return reflectwalk.SkipEntry +} + +func (w *hashWalker) StructField(reflect.StructField, reflect.Value) error { + return nil +} + +// Primitive calls Callback to transform strings in-place, except for map keys. +// Strings hiding within interfaces are also transformed. +func (w *hashWalker) Primitive(v reflect.Value) error { + if w.Callback == nil { + return nil + } + + // We don't touch map keys + if w.loc[len(w.loc)-1] == reflectwalk.MapKey { + return nil + } + + setV := v + + // We only care about strings + if v.Kind() == reflect.Interface { + v = v.Elem() + } + if v.Kind() != reflect.String { + return nil + } + + // See if the current key is part of the ignored keys + currentKey := w.key[len(w.key)-1] + if strutil.StrListContains(w.IgnoredKeys, currentKey) { + return nil + } + + replaceVal := w.Callback(v.String()) + + resultVal := reflect.ValueOf(replaceVal) + switch w.loc[len(w.loc)-1] { + case reflectwalk.MapValue: + // If we're in a map, then the only way to set a map value is + // to set it directly. + m := w.cs[len(w.cs)-1] + mk := w.csKey[len(w.cs)-1] + m.SetMapIndex(mk, resultVal) + case reflectwalk.SliceElem: + s := w.cs[len(w.cs)-1] + si := int(w.csKey[len(w.cs)-1].Int()) + s.Slice(si, si+1).Index(0).Set(resultVal) + default: + // Otherwise, we should be addressable + setV.Set(resultVal) + } + + return nil +} diff --git a/audit/hashstructure_test.go b/audit/hashstructure_test.go new file mode 100644 index 0000000..c65931f --- /dev/null +++ b/audit/hashstructure_test.go @@ -0,0 +1,400 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package audit + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "reflect" + "testing" + "time" + + "github.com/go-test/deep" + + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/helper/wrapping" + "github.com/hashicorp/vault/sdk/logical" + "github.com/mitchellh/copystructure" +) + +func TestCopy_auth(t *testing.T) { + // Make a non-pointer one so that it can't be modified directly + expected := logical.Auth{ + LeaseOptions: logical.LeaseOptions{ + TTL: 1 * time.Hour, + }, + + ClientToken: "foo", + } + auth := expected + + // Copy it + dup, err := copystructure.Copy(&auth) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Check equality + auth2 := dup.(*logical.Auth) + if !reflect.DeepEqual(*auth2, expected) { + t.Fatalf("bad:\n\n%#v\n\n%#v", *auth2, expected) + } +} + +func TestCopy_request(t *testing.T) { + // Make a non-pointer one so that it can't be modified directly + expected := logical.Request{ + Data: map[string]interface{}{ + "foo": "bar", + }, + WrapInfo: &logical.RequestWrapInfo{ + TTL: 60 * time.Second, + }, + } + arg := expected + + // Copy it + dup, err := copystructure.Copy(&arg) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Check equality + arg2 := dup.(*logical.Request) + if !reflect.DeepEqual(*arg2, expected) { + t.Fatalf("bad:\n\n%#v\n\n%#v", *arg2, expected) + } +} + +func TestCopy_response(t *testing.T) { + // Make a non-pointer one so that it can't be modified directly + expected := logical.Response{ + Data: map[string]interface{}{ + "foo": "bar", + }, + WrapInfo: &wrapping.ResponseWrapInfo{ + TTL: 60, + Token: "foo", + CreationTime: time.Now(), + WrappedAccessor: "abcd1234", + }, + } + arg := expected + + // Copy it + dup, err := copystructure.Copy(&arg) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Check equality + arg2 := dup.(*logical.Response) + if !reflect.DeepEqual(*arg2, expected) { + t.Fatalf("bad:\n\n%#v\n\n%#v", *arg2, expected) + } +} + +func TestHashString(t *testing.T) { + inmemStorage := &logical.InmemStorage{} + inmemStorage.Put(context.Background(), &logical.StorageEntry{ + Key: "salt", + Value: []byte("foo"), + }) + localSalt, err := salt.NewSalt(context.Background(), inmemStorage, &salt.Config{ + HMAC: sha256.New, + HMACType: "hmac-sha256", + }) + if err != nil { + t.Fatalf("Error instantiating salt: %s", err) + } + out := HashString(localSalt, "foo") + if out != "hmac-sha256:08ba357e274f528065766c770a639abf6809b39ccfd37c2a3157c7f51954da0a" { + t.Fatalf("err: HashString output did not match expected") + } +} + +func TestHashAuth(t *testing.T) { + cases := []struct { + Input *logical.Auth + Output *logical.Auth + HMACAccessor bool + }{ + { + &logical.Auth{ClientToken: "foo"}, + &logical.Auth{ClientToken: "hmac-sha256:08ba357e274f528065766c770a639abf6809b39ccfd37c2a3157c7f51954da0a"}, + false, + }, + { + &logical.Auth{ + LeaseOptions: logical.LeaseOptions{ + TTL: 1 * time.Hour, + }, + + ClientToken: "foo", + }, + &logical.Auth{ + LeaseOptions: logical.LeaseOptions{ + TTL: 1 * time.Hour, + }, + + ClientToken: "hmac-sha256:08ba357e274f528065766c770a639abf6809b39ccfd37c2a3157c7f51954da0a", + }, + false, + }, + } + + inmemStorage := &logical.InmemStorage{} + inmemStorage.Put(context.Background(), &logical.StorageEntry{ + Key: "salt", + Value: []byte("foo"), + }) + localSalt, err := salt.NewSalt(context.Background(), inmemStorage, &salt.Config{ + HMAC: sha256.New, + HMACType: "hmac-sha256", + }) + if err != nil { + t.Fatalf("Error instantiating salt: %s", err) + } + for _, tc := range cases { + input := fmt.Sprintf("%#v", tc.Input) + out, err := HashAuth(localSalt, tc.Input, tc.HMACAccessor) + if err != nil { + t.Fatalf("err: %s\n\n%s", err, input) + } + if !reflect.DeepEqual(out, tc.Output) { + t.Fatalf("bad:\nInput:\n%s\nOutput:\n%#v\nExpected output:\n%#v", input, out, tc.Output) + } + } +} + +type testOptMarshaler struct { + S string + I int +} + +func (o *testOptMarshaler) MarshalJSONWithOptions(options *logical.MarshalOptions) ([]byte, error) { + return json.Marshal(&testOptMarshaler{S: options.ValueHasher(o.S), I: o.I}) +} + +var _ logical.OptMarshaler = &testOptMarshaler{} + +func TestHashRequest(t *testing.T) { + cases := []struct { + Input *logical.Request + Output *logical.Request + NonHMACDataKeys []string + HMACAccessor bool + }{ + { + &logical.Request{ + Data: map[string]interface{}{ + "foo": "bar", + "baz": "foobar", + "private_key_type": certutil.PrivateKeyType("rsa"), + "om": &testOptMarshaler{S: "bar", I: 1}, + }, + }, + &logical.Request{ + Data: map[string]interface{}{ + "foo": "hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317", + "baz": "foobar", + "private_key_type": "hmac-sha256:995230dca56fffd310ff591aa404aab52b2abb41703c787cfa829eceb4595bf1", + "om": json.RawMessage(`{"S":"hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317","I":1}`), + }, + }, + []string{"baz"}, + false, + }, + } + + inmemStorage := &logical.InmemStorage{} + inmemStorage.Put(context.Background(), &logical.StorageEntry{ + Key: "salt", + Value: []byte("foo"), + }) + localSalt, err := salt.NewSalt(context.Background(), inmemStorage, &salt.Config{ + HMAC: sha256.New, + HMACType: "hmac-sha256", + }) + if err != nil { + t.Fatalf("Error instantiating salt: %s", err) + } + for _, tc := range cases { + input := fmt.Sprintf("%#v", tc.Input) + out, err := HashRequest(localSalt, tc.Input, tc.HMACAccessor, tc.NonHMACDataKeys) + if err != nil { + t.Fatalf("err: %s\n\n%s", err, input) + } + if diff := deep.Equal(out, tc.Output); len(diff) > 0 { + t.Fatalf("bad:\nInput:\n%s\nDiff:\n%#v", input, diff) + } + } +} + +func TestHashResponse(t *testing.T) { + now := time.Now() + + cases := []struct { + Input *logical.Response + Output *logical.Response + NonHMACDataKeys []string + HMACAccessor bool + }{ + { + &logical.Response{ + Data: map[string]interface{}{ + "foo": "bar", + "baz": "foobar", + // Responses can contain time values, so test that with + // a known fixed value. + "bar": now, + "om": &testOptMarshaler{S: "bar", I: 1}, + }, + WrapInfo: &wrapping.ResponseWrapInfo{ + TTL: 60, + Token: "bar", + Accessor: "flimflam", + CreationTime: now, + WrappedAccessor: "bar", + }, + }, + &logical.Response{ + Data: map[string]interface{}{ + "foo": "hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317", + "baz": "foobar", + "bar": now.Format(time.RFC3339Nano), + "om": json.RawMessage(`{"S":"hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317","I":1}`), + }, + WrapInfo: &wrapping.ResponseWrapInfo{ + TTL: 60, + Token: "hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317", + Accessor: "hmac-sha256:7c9c6fe666d0af73b3ebcfbfabe6885015558213208e6635ba104047b22f6390", + CreationTime: now, + WrappedAccessor: "hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317", + }, + }, + []string{"baz"}, + true, + }, + } + + inmemStorage := &logical.InmemStorage{} + inmemStorage.Put(context.Background(), &logical.StorageEntry{ + Key: "salt", + Value: []byte("foo"), + }) + localSalt, err := salt.NewSalt(context.Background(), inmemStorage, &salt.Config{ + HMAC: sha256.New, + HMACType: "hmac-sha256", + }) + if err != nil { + t.Fatalf("Error instantiating salt: %s", err) + } + for _, tc := range cases { + input := fmt.Sprintf("%#v", tc.Input) + out, err := HashResponse(localSalt, tc.Input, tc.HMACAccessor, tc.NonHMACDataKeys, false) + if err != nil { + t.Fatalf("err: %s\n\n%s", err, input) + } + if diff := deep.Equal(out, tc.Output); len(diff) > 0 { + t.Fatalf("bad:\nInput:\n%s\nDiff:\n%#v", input, diff) + } + } +} + +func TestHashWalker(t *testing.T) { + replaceText := "foo" + + cases := []struct { + Input map[string]interface{} + Output map[string]interface{} + }{ + { + map[string]interface{}{ + "hello": "foo", + }, + map[string]interface{}{ + "hello": replaceText, + }, + }, + + { + map[string]interface{}{ + "hello": []interface{}{"world"}, + }, + map[string]interface{}{ + "hello": []interface{}{replaceText}, + }, + }, + } + + for _, tc := range cases { + err := HashStructure(tc.Input, func(string) string { + return replaceText + }, nil) + if err != nil { + t.Fatalf("err: %s\n\n%#v", err, tc.Input) + } + if !reflect.DeepEqual(tc.Input, tc.Output) { + t.Fatalf("bad:\n\n%#v\n\n%#v", tc.Input, tc.Output) + } + } +} + +func TestHashWalker_TimeStructs(t *testing.T) { + replaceText := "bar" + + now := time.Now() + cases := []struct { + Input map[string]interface{} + Output map[string]interface{} + }{ + // Should not touch map keys of type time.Time. + { + map[string]interface{}{ + "hello": map[time.Time]struct{}{ + now: {}, + }, + }, + map[string]interface{}{ + "hello": map[time.Time]struct{}{ + now: {}, + }, + }, + }, + // Should handle map values of type time.Time. + { + map[string]interface{}{ + "hello": now, + }, + map[string]interface{}{ + "hello": now.Format(time.RFC3339Nano), + }, + }, + // Should handle slice values of type time.Time. + { + map[string]interface{}{ + "hello": []interface{}{"foo", now, "foo2"}, + }, + map[string]interface{}{ + "hello": []interface{}{"foobar", now.Format(time.RFC3339Nano), "foo2bar"}, + }, + }, + } + + for _, tc := range cases { + err := HashStructure(tc.Input, func(s string) string { + return s + replaceText + }, nil) + if err != nil { + t.Fatalf("err: %v\n\n%#v", err, tc.Input) + } + if !reflect.DeepEqual(tc.Input, tc.Output) { + t.Fatalf("bad:\n\n%#v\n\n%#v", tc.Input, tc.Output) + } + } +} diff --git a/builtin/audit/file/backend.go b/builtin/audit/file/backend.go new file mode 100644 index 0000000..2c3ef3f --- /dev/null +++ b/builtin/audit/file/backend.go @@ -0,0 +1,362 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package file + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "sync/atomic" + + "github.com/hashicorp/vault/audit" + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/logical" +) + +func Factory(ctx context.Context, conf *audit.BackendConfig) (audit.Backend, error) { + if conf.SaltConfig == nil { + return nil, fmt.Errorf("nil salt config") + } + if conf.SaltView == nil { + return nil, fmt.Errorf("nil salt view") + } + + path, ok := conf.Config["file_path"] + if !ok { + path, ok = conf.Config["path"] + if !ok { + return nil, fmt.Errorf("file_path is required") + } + } + + // normalize path if configured for stdout + if strings.EqualFold(path, "stdout") { + path = "stdout" + } + if strings.EqualFold(path, "discard") { + path = "discard" + } + + format, ok := conf.Config["format"] + if !ok { + format = "json" + } + switch format { + case "json", "jsonx": + default: + return nil, fmt.Errorf("unknown format type %q", format) + } + + // Check if hashing of accessor is disabled + hmacAccessor := true + if hmacAccessorRaw, ok := conf.Config["hmac_accessor"]; ok { + value, err := strconv.ParseBool(hmacAccessorRaw) + if err != nil { + return nil, err + } + hmacAccessor = value + } + + // Check if raw logging is enabled + logRaw := false + if raw, ok := conf.Config["log_raw"]; ok { + b, err := strconv.ParseBool(raw) + if err != nil { + return nil, err + } + logRaw = b + } + + elideListResponses := false + if elideListResponsesRaw, ok := conf.Config["elide_list_responses"]; ok { + value, err := strconv.ParseBool(elideListResponsesRaw) + if err != nil { + return nil, err + } + elideListResponses = value + } + + // Check if mode is provided + mode := os.FileMode(0o600) + if modeRaw, ok := conf.Config["mode"]; ok { + m, err := strconv.ParseUint(modeRaw, 8, 32) + if err != nil { + return nil, err + } + switch m { + case 0: + // if mode is 0000, then do not modify file mode + if path != "stdout" && path != "discard" { + fileInfo, err := os.Stat(path) + if err != nil { + return nil, err + } + mode = fileInfo.Mode() + } + default: + mode = os.FileMode(m) + + } + + } + + b := &Backend{ + path: path, + mode: mode, + saltConfig: conf.SaltConfig, + saltView: conf.SaltView, + salt: new(atomic.Value), + formatConfig: audit.FormatterConfig{ + Raw: logRaw, + HMACAccessor: hmacAccessor, + ElideListResponses: elideListResponses, + }, + } + + // Ensure we are working with the right type by explicitly storing a nil of + // the right type + b.salt.Store((*salt.Salt)(nil)) + + switch format { + case "json": + b.formatter.AuditFormatWriter = &audit.JSONFormatWriter{ + Prefix: conf.Config["prefix"], + SaltFunc: b.Salt, + } + case "jsonx": + b.formatter.AuditFormatWriter = &audit.JSONxFormatWriter{ + Prefix: conf.Config["prefix"], + SaltFunc: b.Salt, + } + } + + switch path { + case "stdout", "discard": + // no need to test opening file if outputting to stdout or discarding + default: + // Ensure that the file can be successfully opened for writing; + // otherwise it will be too late to catch later without problems + // (ref: https://github.com/hashicorp/vault/issues/550) + if err := b.open(); err != nil { + return nil, fmt.Errorf("sanity check failed; unable to open %q for writing: %w", path, err) + } + } + + return b, nil +} + +// Backend is the audit backend for the file-based audit store. +// +// NOTE: This audit backend is currently very simple: it appends to a file. +// It doesn't do anything more at the moment to assist with rotation +// or reset the write cursor, this should be done in the future. +type Backend struct { + path string + + formatter audit.AuditFormatter + formatConfig audit.FormatterConfig + + fileLock sync.RWMutex + f *os.File + mode os.FileMode + + saltMutex sync.RWMutex + salt *atomic.Value + saltConfig *salt.Config + saltView logical.Storage +} + +var _ audit.Backend = (*Backend)(nil) + +func (b *Backend) Salt(ctx context.Context) (*salt.Salt, error) { + s := b.salt.Load().(*salt.Salt) + if s != nil { + return s, nil + } + + b.saltMutex.Lock() + defer b.saltMutex.Unlock() + + s = b.salt.Load().(*salt.Salt) + if s != nil { + return s, nil + } + + newSalt, err := salt.NewSalt(ctx, b.saltView, b.saltConfig) + if err != nil { + b.salt.Store((*salt.Salt)(nil)) + return nil, err + } + + b.salt.Store(newSalt) + return newSalt, nil +} + +func (b *Backend) GetHash(ctx context.Context, data string) (string, error) { + salt, err := b.Salt(ctx) + if err != nil { + return "", err + } + + return audit.HashString(salt, data), nil +} + +func (b *Backend) LogRequest(ctx context.Context, in *logical.LogInput) error { + var writer io.Writer + switch b.path { + case "stdout": + writer = os.Stdout + case "discard": + return nil + } + + buf := bytes.NewBuffer(make([]byte, 0, 2000)) + err := b.formatter.FormatRequest(ctx, buf, b.formatConfig, in) + if err != nil { + return err + } + + return b.log(ctx, buf, writer) +} + +func (b *Backend) log(ctx context.Context, buf *bytes.Buffer, writer io.Writer) error { + reader := bytes.NewReader(buf.Bytes()) + + b.fileLock.Lock() + + if writer == nil { + if err := b.open(); err != nil { + b.fileLock.Unlock() + return err + } + writer = b.f + } + + if _, err := reader.WriteTo(writer); err == nil { + b.fileLock.Unlock() + return nil + } else if b.path == "stdout" { + b.fileLock.Unlock() + return err + } + + // If writing to stdout there's no real reason to think anything would have + // changed so return above. Otherwise, opportunistically try to re-open the + // FD, once per call. + b.f.Close() + b.f = nil + + if err := b.open(); err != nil { + b.fileLock.Unlock() + return err + } + + reader.Seek(0, io.SeekStart) + _, err := reader.WriteTo(writer) + b.fileLock.Unlock() + return err +} + +func (b *Backend) LogResponse(ctx context.Context, in *logical.LogInput) error { + var writer io.Writer + switch b.path { + case "stdout": + writer = os.Stdout + case "discard": + return nil + } + + buf := bytes.NewBuffer(make([]byte, 0, 6000)) + err := b.formatter.FormatResponse(ctx, buf, b.formatConfig, in) + if err != nil { + return err + } + + return b.log(ctx, buf, writer) +} + +func (b *Backend) LogTestMessage(ctx context.Context, in *logical.LogInput, config map[string]string) error { + var writer io.Writer + switch b.path { + case "stdout": + writer = os.Stdout + case "discard": + return nil + } + + var buf bytes.Buffer + temporaryFormatter := audit.NewTemporaryFormatter(config["format"], config["prefix"]) + if err := temporaryFormatter.FormatRequest(ctx, &buf, b.formatConfig, in); err != nil { + return err + } + + return b.log(ctx, &buf, writer) +} + +// The file lock must be held before calling this +func (b *Backend) open() error { + if b.f != nil { + return nil + } + if err := os.MkdirAll(filepath.Dir(b.path), b.mode); err != nil { + return err + } + + var err error + b.f, err = os.OpenFile(b.path, os.O_APPEND|os.O_WRONLY|os.O_CREATE, b.mode) + if err != nil { + return err + } + + // Change the file mode in case the log file already existed. We special + // case /dev/null since we can't chmod it and bypass if the mode is zero + switch b.path { + case "/dev/null": + default: + if b.mode != 0 { + err = os.Chmod(b.path, b.mode) + if err != nil { + return err + } + } + } + + return nil +} + +func (b *Backend) Reload(_ context.Context) error { + switch b.path { + case "stdout", "discard": + return nil + } + + b.fileLock.Lock() + defer b.fileLock.Unlock() + + if b.f == nil { + return b.open() + } + + err := b.f.Close() + // Set to nil here so that even if we error out, on the next access open() + // will be tried + b.f = nil + if err != nil { + return err + } + + return b.open() +} + +func (b *Backend) Invalidate(_ context.Context) { + b.saltMutex.Lock() + defer b.saltMutex.Unlock() + b.salt.Store((*salt.Salt)(nil)) +} diff --git a/builtin/audit/file/backend_test.go b/builtin/audit/file/backend_test.go new file mode 100644 index 0000000..ad082ac --- /dev/null +++ b/builtin/audit/file/backend_test.go @@ -0,0 +1,186 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package file + +import ( + "context" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "testing" + "time" + + "github.com/hashicorp/vault/audit" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/logical" +) + +func TestAuditFile_fileModeNew(t *testing.T) { + modeStr := "0777" + mode, err := strconv.ParseUint(modeStr, 8, 32) + if err != nil { + t.Fatal(err) + } + + path, err := ioutil.TempDir("", "vault-test_audit_file-file_mode_new") + if err != nil { + t.Fatal(err) + } + + defer os.RemoveAll(path) + + file := filepath.Join(path, "auditTest.txt") + + config := map[string]string{ + "path": file, + "mode": modeStr, + } + + _, err = Factory(context.Background(), &audit.BackendConfig{ + SaltConfig: &salt.Config{}, + SaltView: &logical.InmemStorage{}, + Config: config, + }) + if err != nil { + t.Fatal(err) + } + + info, err := os.Stat(file) + if err != nil { + t.Fatalf("Cannot retrieve file mode from `Stat`") + } + if info.Mode() != os.FileMode(mode) { + t.Fatalf("File mode does not match.") + } +} + +func TestAuditFile_fileModeExisting(t *testing.T) { + f, err := ioutil.TempFile("", "test") + if err != nil { + t.Fatalf("Failure to create test file.") + } + defer os.Remove(f.Name()) + + err = os.Chmod(f.Name(), 0o777) + if err != nil { + t.Fatalf("Failure to chmod temp file for testing.") + } + + err = f.Close() + if err != nil { + t.Fatalf("Failure to close temp file for test.") + } + + config := map[string]string{ + "path": f.Name(), + } + + _, err = Factory(context.Background(), &audit.BackendConfig{ + Config: config, + SaltConfig: &salt.Config{}, + SaltView: &logical.InmemStorage{}, + }) + if err != nil { + t.Fatal(err) + } + + info, err := os.Stat(f.Name()) + if err != nil { + t.Fatalf("cannot retrieve file mode from `Stat`") + } + if info.Mode() != os.FileMode(0o600) { + t.Fatalf("File mode does not match.") + } +} + +func TestAuditFile_fileMode0000(t *testing.T) { + f, err := ioutil.TempFile("", "test") + if err != nil { + t.Fatalf("Failure to create test file. The error is %v", err) + } + defer os.Remove(f.Name()) + + err = os.Chmod(f.Name(), 0o777) + if err != nil { + t.Fatalf("Failure to chmod temp file for testing. The error is %v", err) + } + + err = f.Close() + if err != nil { + t.Fatalf("Failure to close temp file for test. The error is %v", err) + } + + config := map[string]string{ + "path": f.Name(), + "mode": "0000", + } + + _, err = Factory(context.Background(), &audit.BackendConfig{ + Config: config, + SaltConfig: &salt.Config{}, + SaltView: &logical.InmemStorage{}, + }) + if err != nil { + t.Fatal(err) + } + + info, err := os.Stat(f.Name()) + if err != nil { + t.Fatalf("cannot retrieve file mode from `Stat`. The error is %v", err) + } + if info.Mode() != os.FileMode(0o777) { + t.Fatalf("File mode does not match.") + } +} + +func BenchmarkAuditFile_request(b *testing.B) { + config := map[string]string{ + "path": "/dev/null", + } + sink, err := Factory(context.Background(), &audit.BackendConfig{ + Config: config, + SaltConfig: &salt.Config{}, + SaltView: &logical.InmemStorage{}, + }) + if err != nil { + b.Fatal(err) + } + + in := &logical.LogInput{ + Auth: &logical.Auth{ + ClientToken: "foo", + Accessor: "bar", + EntityID: "foobarentity", + DisplayName: "testtoken", + NoDefaultPolicy: true, + Policies: []string{"root"}, + TokenType: logical.TokenTypeService, + }, + Request: &logical.Request{ + Operation: logical.UpdateOperation, + Path: "/foo", + Connection: &logical.Connection{ + RemoteAddr: "127.0.0.1", + }, + WrapInfo: &logical.RequestWrapInfo{ + TTL: 60 * time.Second, + }, + Headers: map[string][]string{ + "foo": {"bar"}, + }, + }, + } + + ctx := namespace.RootContext(nil) + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + if err := sink.LogRequest(ctx, in); err != nil { + panic(err) + } + } + }) +} diff --git a/builtin/audit/socket/backend.go b/builtin/audit/socket/backend.go new file mode 100644 index 0000000..4c649e0 --- /dev/null +++ b/builtin/audit/socket/backend.go @@ -0,0 +1,290 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package socket + +import ( + "bytes" + "context" + "fmt" + "net" + "strconv" + "sync" + "time" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/audit" + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/logical" +) + +func Factory(ctx context.Context, conf *audit.BackendConfig) (audit.Backend, error) { + if conf.SaltConfig == nil { + return nil, fmt.Errorf("nil salt config") + } + if conf.SaltView == nil { + return nil, fmt.Errorf("nil salt view") + } + + address, ok := conf.Config["address"] + if !ok { + return nil, fmt.Errorf("address is required") + } + + socketType, ok := conf.Config["socket_type"] + if !ok { + socketType = "tcp" + } + + writeDeadline, ok := conf.Config["write_timeout"] + if !ok { + writeDeadline = "2s" + } + writeDuration, err := parseutil.ParseDurationSecond(writeDeadline) + if err != nil { + return nil, err + } + + format, ok := conf.Config["format"] + if !ok { + format = "json" + } + switch format { + case "json", "jsonx": + default: + return nil, fmt.Errorf("unknown format type %q", format) + } + + // Check if hashing of accessor is disabled + hmacAccessor := true + if hmacAccessorRaw, ok := conf.Config["hmac_accessor"]; ok { + value, err := strconv.ParseBool(hmacAccessorRaw) + if err != nil { + return nil, err + } + hmacAccessor = value + } + + // Check if raw logging is enabled + logRaw := false + if raw, ok := conf.Config["log_raw"]; ok { + b, err := strconv.ParseBool(raw) + if err != nil { + return nil, err + } + logRaw = b + } + + elideListResponses := false + if elideListResponsesRaw, ok := conf.Config["elide_list_responses"]; ok { + value, err := strconv.ParseBool(elideListResponsesRaw) + if err != nil { + return nil, err + } + elideListResponses = value + } + + b := &Backend{ + saltConfig: conf.SaltConfig, + saltView: conf.SaltView, + formatConfig: audit.FormatterConfig{ + Raw: logRaw, + HMACAccessor: hmacAccessor, + ElideListResponses: elideListResponses, + }, + + writeDuration: writeDuration, + address: address, + socketType: socketType, + } + + switch format { + case "json": + b.formatter.AuditFormatWriter = &audit.JSONFormatWriter{ + Prefix: conf.Config["prefix"], + SaltFunc: b.Salt, + } + case "jsonx": + b.formatter.AuditFormatWriter = &audit.JSONxFormatWriter{ + Prefix: conf.Config["prefix"], + SaltFunc: b.Salt, + } + } + + return b, nil +} + +// Backend is the audit backend for the socket audit transport. +type Backend struct { + connection net.Conn + + formatter audit.AuditFormatter + formatConfig audit.FormatterConfig + + writeDuration time.Duration + address string + socketType string + + sync.Mutex + + saltMutex sync.RWMutex + salt *salt.Salt + saltConfig *salt.Config + saltView logical.Storage +} + +var _ audit.Backend = (*Backend)(nil) + +func (b *Backend) GetHash(ctx context.Context, data string) (string, error) { + salt, err := b.Salt(ctx) + if err != nil { + return "", err + } + return audit.HashString(salt, data), nil +} + +func (b *Backend) LogRequest(ctx context.Context, in *logical.LogInput) error { + var buf bytes.Buffer + if err := b.formatter.FormatRequest(ctx, &buf, b.formatConfig, in); err != nil { + return err + } + + b.Lock() + defer b.Unlock() + + err := b.write(ctx, buf.Bytes()) + if err != nil { + rErr := b.reconnect(ctx) + if rErr != nil { + err = multierror.Append(err, rErr) + } else { + // Try once more after reconnecting + err = b.write(ctx, buf.Bytes()) + } + } + + return err +} + +func (b *Backend) LogResponse(ctx context.Context, in *logical.LogInput) error { + var buf bytes.Buffer + if err := b.formatter.FormatResponse(ctx, &buf, b.formatConfig, in); err != nil { + return err + } + + b.Lock() + defer b.Unlock() + + err := b.write(ctx, buf.Bytes()) + if err != nil { + rErr := b.reconnect(ctx) + if rErr != nil { + err = multierror.Append(err, rErr) + } else { + // Try once more after reconnecting + err = b.write(ctx, buf.Bytes()) + } + } + + return err +} + +func (b *Backend) LogTestMessage(ctx context.Context, in *logical.LogInput, config map[string]string) error { + var buf bytes.Buffer + temporaryFormatter := audit.NewTemporaryFormatter(config["format"], config["prefix"]) + if err := temporaryFormatter.FormatRequest(ctx, &buf, b.formatConfig, in); err != nil { + return err + } + + b.Lock() + defer b.Unlock() + + err := b.write(ctx, buf.Bytes()) + if err != nil { + rErr := b.reconnect(ctx) + if rErr != nil { + err = multierror.Append(err, rErr) + } else { + // Try once more after reconnecting + err = b.write(ctx, buf.Bytes()) + } + } + + return err +} + +func (b *Backend) write(ctx context.Context, buf []byte) error { + if b.connection == nil { + if err := b.reconnect(ctx); err != nil { + return err + } + } + + err := b.connection.SetWriteDeadline(time.Now().Add(b.writeDuration)) + if err != nil { + return err + } + + _, err = b.connection.Write(buf) + if err != nil { + return err + } + + return nil +} + +func (b *Backend) reconnect(ctx context.Context) error { + if b.connection != nil { + b.connection.Close() + b.connection = nil + } + + timeoutContext, cancel := context.WithTimeout(ctx, b.writeDuration) + defer cancel() + + dialer := net.Dialer{} + conn, err := dialer.DialContext(timeoutContext, b.socketType, b.address) + if err != nil { + return err + } + + b.connection = conn + + return nil +} + +func (b *Backend) Reload(ctx context.Context) error { + b.Lock() + defer b.Unlock() + + err := b.reconnect(ctx) + + return err +} + +func (b *Backend) Salt(ctx context.Context) (*salt.Salt, error) { + b.saltMutex.RLock() + if b.salt != nil { + defer b.saltMutex.RUnlock() + return b.salt, nil + } + b.saltMutex.RUnlock() + b.saltMutex.Lock() + defer b.saltMutex.Unlock() + if b.salt != nil { + return b.salt, nil + } + salt, err := salt.NewSalt(ctx, b.saltView, b.saltConfig) + if err != nil { + return nil, err + } + b.salt = salt + return salt, nil +} + +func (b *Backend) Invalidate(_ context.Context) { + b.saltMutex.Lock() + defer b.saltMutex.Unlock() + b.salt = nil +} diff --git a/builtin/audit/syslog/backend.go b/builtin/audit/syslog/backend.go new file mode 100644 index 0000000..2da92fe --- /dev/null +++ b/builtin/audit/syslog/backend.go @@ -0,0 +1,196 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package syslog + +import ( + "bytes" + "context" + "fmt" + "strconv" + "sync" + + gsyslog "github.com/hashicorp/go-syslog" + "github.com/hashicorp/vault/audit" + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/logical" +) + +func Factory(ctx context.Context, conf *audit.BackendConfig) (audit.Backend, error) { + if conf.SaltConfig == nil { + return nil, fmt.Errorf("nil salt config") + } + if conf.SaltView == nil { + return nil, fmt.Errorf("nil salt view") + } + + // Get facility or default to AUTH + facility, ok := conf.Config["facility"] + if !ok { + facility = "AUTH" + } + + // Get tag or default to 'vault' + tag, ok := conf.Config["tag"] + if !ok { + tag = "vault" + } + + format, ok := conf.Config["format"] + if !ok { + format = "json" + } + switch format { + case "json", "jsonx": + default: + return nil, fmt.Errorf("unknown format type %q", format) + } + + // Check if hashing of accessor is disabled + hmacAccessor := true + if hmacAccessorRaw, ok := conf.Config["hmac_accessor"]; ok { + value, err := strconv.ParseBool(hmacAccessorRaw) + if err != nil { + return nil, err + } + hmacAccessor = value + } + + // Check if raw logging is enabled + logRaw := false + if raw, ok := conf.Config["log_raw"]; ok { + b, err := strconv.ParseBool(raw) + if err != nil { + return nil, err + } + logRaw = b + } + + elideListResponses := false + if elideListResponsesRaw, ok := conf.Config["elide_list_responses"]; ok { + value, err := strconv.ParseBool(elideListResponsesRaw) + if err != nil { + return nil, err + } + elideListResponses = value + } + + // Get the logger + logger, err := gsyslog.NewLogger(gsyslog.LOG_INFO, facility, tag) + if err != nil { + return nil, err + } + + b := &Backend{ + logger: logger, + saltConfig: conf.SaltConfig, + saltView: conf.SaltView, + formatConfig: audit.FormatterConfig{ + Raw: logRaw, + HMACAccessor: hmacAccessor, + ElideListResponses: elideListResponses, + }, + } + + switch format { + case "json": + b.formatter.AuditFormatWriter = &audit.JSONFormatWriter{ + Prefix: conf.Config["prefix"], + SaltFunc: b.Salt, + } + case "jsonx": + b.formatter.AuditFormatWriter = &audit.JSONxFormatWriter{ + Prefix: conf.Config["prefix"], + SaltFunc: b.Salt, + } + } + + return b, nil +} + +// Backend is the audit backend for the syslog-based audit store. +type Backend struct { + logger gsyslog.Syslogger + + formatter audit.AuditFormatter + formatConfig audit.FormatterConfig + + saltMutex sync.RWMutex + salt *salt.Salt + saltConfig *salt.Config + saltView logical.Storage +} + +var _ audit.Backend = (*Backend)(nil) + +func (b *Backend) GetHash(ctx context.Context, data string) (string, error) { + salt, err := b.Salt(ctx) + if err != nil { + return "", err + } + return audit.HashString(salt, data), nil +} + +func (b *Backend) LogRequest(ctx context.Context, in *logical.LogInput) error { + var buf bytes.Buffer + if err := b.formatter.FormatRequest(ctx, &buf, b.formatConfig, in); err != nil { + return err + } + + // Write out to syslog + _, err := b.logger.Write(buf.Bytes()) + return err +} + +func (b *Backend) LogResponse(ctx context.Context, in *logical.LogInput) error { + var buf bytes.Buffer + if err := b.formatter.FormatResponse(ctx, &buf, b.formatConfig, in); err != nil { + return err + } + + // Write out to syslog + _, err := b.logger.Write(buf.Bytes()) + return err +} + +func (b *Backend) LogTestMessage(ctx context.Context, in *logical.LogInput, config map[string]string) error { + var buf bytes.Buffer + temporaryFormatter := audit.NewTemporaryFormatter(config["format"], config["prefix"]) + if err := temporaryFormatter.FormatRequest(ctx, &buf, b.formatConfig, in); err != nil { + return err + } + + // Send to syslog + _, err := b.logger.Write(buf.Bytes()) + return err +} + +func (b *Backend) Reload(_ context.Context) error { + return nil +} + +func (b *Backend) Salt(ctx context.Context) (*salt.Salt, error) { + b.saltMutex.RLock() + if b.salt != nil { + defer b.saltMutex.RUnlock() + return b.salt, nil + } + b.saltMutex.RUnlock() + b.saltMutex.Lock() + defer b.saltMutex.Unlock() + if b.salt != nil { + return b.salt, nil + } + salt, err := salt.NewSalt(ctx, b.saltView, b.saltConfig) + if err != nil { + return nil, err + } + b.salt = salt + return salt, nil +} + +func (b *Backend) Invalidate(_ context.Context) { + b.saltMutex.Lock() + defer b.saltMutex.Unlock() + b.salt = nil +} diff --git a/builtin/credential/approle/backend.go b/builtin/credential/approle/backend.go new file mode 100644 index 0000000..4165fbb --- /dev/null +++ b/builtin/credential/approle/backend.go @@ -0,0 +1,181 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package approle + +import ( + "context" + "sync" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/locksutil" + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + operationPrefixAppRole = "app-role" + secretIDPrefix = "secret_id/" + secretIDLocalPrefix = "secret_id_local/" + secretIDAccessorPrefix = "accessor/" + secretIDAccessorLocalPrefix = "accessor_local/" +) + +// ReportedVersion is used to report a specific version to Vault. +var ReportedVersion = "" + +type backend struct { + *framework.Backend + + // The salt value to be used by the information to be accessed only + // by this backend. + salt *salt.Salt + saltMutex sync.RWMutex + + // The view to use when creating the salt + view logical.Storage + + // Guard to clean-up the expired SecretID entries + tidySecretIDCASGuard *uint32 + + // Locks to make changes to role entries. These will be initialized to a + // predefined number of locks when the backend is created, and will be + // indexed based on salted role names. + roleLocks []*locksutil.LockEntry + + // Locks to make changes to the storage entries of RoleIDs generated. These + // will be initialized to a predefined number of locks when the backend is + // created, and will be indexed based on the salted RoleIDs. + roleIDLocks []*locksutil.LockEntry + + // Locks to make changes to the storage entries of SecretIDs generated. + // These will be initialized to a predefined number of locks when the + // backend is created, and will be indexed based on the HMAC-ed SecretIDs. + secretIDLocks []*locksutil.LockEntry + + // Locks to make changes to the storage entries of SecretIDAccessors + // generated. These will be initialized to a predefined number of locks + // when the backend is created, and will be indexed based on the + // SecretIDAccessors itself. + secretIDAccessorLocks []*locksutil.LockEntry + + // secretIDListingLock is a dedicated lock for listing SecretIDAccessors + // for all the SecretIDs issued against an approle + secretIDListingLock sync.RWMutex +} + +func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + b, err := Backend(conf) + if err != nil { + return nil, err + } + if err := b.Setup(ctx, conf); err != nil { + return nil, err + } + return b, nil +} + +func Backend(conf *logical.BackendConfig) (*backend, error) { + // Create a backend object + b := &backend{ + view: conf.StorageView, + + // Create locks to modify the registered roles + roleLocks: locksutil.CreateLocks(), + + // Create locks to modify the generated RoleIDs + roleIDLocks: locksutil.CreateLocks(), + + // Create locks to modify the generated SecretIDs + secretIDLocks: locksutil.CreateLocks(), + + // Create locks to modify the generated SecretIDAccessors + secretIDAccessorLocks: locksutil.CreateLocks(), + + tidySecretIDCASGuard: new(uint32), + } + + // Attach the paths and secrets that are to be handled by the backend + b.Backend = &framework.Backend{ + // Register a periodic function that deletes the expired SecretID entries + PeriodicFunc: b.periodicFunc, + Help: backendHelp, + AuthRenew: b.pathLoginRenew, + PathsSpecial: &logical.Paths{ + Unauthenticated: []string{ + "login", + }, + LocalStorage: []string{ + secretIDLocalPrefix, + secretIDAccessorLocalPrefix, + }, + }, + Paths: framework.PathAppend( + rolePaths(b), + []*framework.Path{ + pathLogin(b), + pathTidySecretID(b), + }, + ), + Invalidate: b.invalidate, + BackendType: logical.TypeCredential, + RunningVersion: ReportedVersion, + } + return b, nil +} + +func (b *backend) Salt(ctx context.Context) (*salt.Salt, error) { + b.saltMutex.RLock() + if b.salt != nil { + defer b.saltMutex.RUnlock() + return b.salt, nil + } + b.saltMutex.RUnlock() + b.saltMutex.Lock() + defer b.saltMutex.Unlock() + if b.salt != nil { + return b.salt, nil + } + salt, err := salt.NewSalt(ctx, b.view, &salt.Config{ + HashFunc: salt.SHA256Hash, + Location: salt.DefaultLocation, + }) + if err != nil { + return nil, err + } + b.salt = salt + return salt, nil +} + +func (b *backend) invalidate(_ context.Context, key string) { + switch key { + case salt.DefaultLocation: + b.saltMutex.Lock() + defer b.saltMutex.Unlock() + b.salt = nil + } +} + +// periodicFunc of the backend will be invoked once a minute by the RollbackManager. +// RoleRole backend utilizes this function to delete expired SecretID entries. +// This could mean that the SecretID may live in the backend upto 1 min after its +// expiration. The deletion of SecretIDs are not security sensitive and it is okay +// to delay the removal of SecretIDs by a minute. +func (b *backend) periodicFunc(ctx context.Context, req *logical.Request) error { + // Initiate clean-up of expired SecretID entries + if b.System().LocalMount() || !b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary|consts.ReplicationPerformanceStandby) { + b.tidySecretID(ctx, req) + } + return nil +} + +const backendHelp = ` +Any registered Role can authenticate itself with Vault. The credentials +depends on the constraints that are set on the Role. One common required +credential is the 'role_id' which is a unique identifier of the Role. +It can be retrieved from the 'role//role-id' endpoint. + +The default constraint configuration is 'bind_secret_id', which requires +the credential 'secret_id' to be presented during login. Refer to the +documentation for other types of constraints.` diff --git a/builtin/credential/approle/backend_test.go b/builtin/credential/approle/backend_test.go new file mode 100644 index 0000000..683249e --- /dev/null +++ b/builtin/credential/approle/backend_test.go @@ -0,0 +1,430 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package approle + +import ( + "context" + "strings" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/hashicorp/vault/sdk/logical" +) + +func createBackendWithStorage(t *testing.T) (*backend, logical.Storage) { + t.Helper() + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + + b, err := Backend(config) + if err != nil { + t.Fatal(err) + } + if b == nil { + t.Fatalf("failed to create backend") + } + err = b.Backend.Setup(context.Background(), config) + if err != nil { + t.Fatal(err) + } + return b, config.StorageView +} + +func TestAppRole_RoleServiceToBatchNumUses(t *testing.T) { + b, s := createBackendWithStorage(t) + + requestFunc := func(operation logical.Operation, data map[string]interface{}) { + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Path: "role/testrole", + Operation: operation, + Storage: s, + Data: data, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %#v\nresp: %#v", err, resp) + } + } + + data := map[string]interface{}{ + "bind_secret_id": true, + "secret_id_num_uses": 0, + "secret_id_ttl": "10m", + "token_policies": "policy", + "token_ttl": "5m", + "token_max_ttl": "10m", + "token_num_uses": 2, + "token_type": "default", + } + requestFunc(logical.CreateOperation, data) + + data["token_num_uses"] = 0 + data["token_type"] = "batch" + requestFunc(logical.UpdateOperation, data) + + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Path: "role/testrole/role-id", + Operation: logical.ReadOperation, + Storage: s, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) + } + roleID := resp.Data["role_id"] + + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Path: "role/testrole/secret-id", + Operation: logical.UpdateOperation, + Storage: s, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) + } + secretID := resp.Data["secret_id"] + + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Path: "login", + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "role_id": roleID, + "secret_id": secretID, + }, + Storage: s, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) + } + require.NotNil(t, resp.Auth) +} + +func TestAppRole_RoleNameCaseSensitivity(t *testing.T) { + testFunc := func(t *testing.T, roleName string) { + var resp *logical.Response + var err error + b, s := createBackendWithStorage(t) + + // Create the role + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Path: "role/" + roleName, + Operation: logical.CreateOperation, + Storage: s, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr:%v", resp, err) + } + + // Get the role-id + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Path: "role/" + roleName + "/role-id", + Operation: logical.ReadOperation, + Storage: s, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) + } + roleID := resp.Data["role_id"] + + // Create a secret-id + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Path: "role/" + roleName + "/secret-id", + Operation: logical.UpdateOperation, + Storage: s, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) + } + secretID := resp.Data["secret_id"] + secretIDAccessor := resp.Data["secret_id_accessor"] + + // Ensure login works + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Path: "login", + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "role_id": roleID, + "secret_id": secretID, + }, + Storage: s, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) + } + if resp.Auth == nil { + t.Fatalf("failed to perform login") + } + + // Destroy secret ID accessor + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Path: "role/" + roleName + "/secret-id-accessor/destroy", + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "secret_id_accessor": secretIDAccessor, + }, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) + } + + // Login again using the accessor's corresponding secret ID should fail + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Path: "login", + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "role_id": roleID, + "secret_id": secretID, + }, + Storage: s, + }) + if err != nil && err != logical.ErrInvalidCredentials { + t.Fatal(err) + } + if resp == nil || !resp.IsError() { + t.Fatalf("expected error due to invalid secret ID") + } + + // Generate another secret ID + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Path: "role/" + roleName + "/secret-id", + Operation: logical.UpdateOperation, + Storage: s, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) + } + secretID = resp.Data["secret_id"] + secretIDAccessor = resp.Data["secret_id_accessor"] + + // Ensure login works + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Path: "login", + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "role_id": roleID, + "secret_id": secretID, + }, + Storage: s, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) + } + if resp.Auth == nil { + t.Fatalf("failed to perform login") + } + + // Destroy the secret ID + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Path: "role/" + roleName + "/secret-id/destroy", + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "secret_id": secretID, + }, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) + } + + // Login again using the same secret ID should fail + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Path: "login", + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "role_id": roleID, + "secret_id": secretID, + }, + Storage: s, + }) + if err != nil && err != logical.ErrInvalidCredentials { + t.Fatal(err) + } + if resp == nil || !resp.IsError() { + t.Fatalf("expected error due to invalid secret ID") + } + + // Generate another secret ID + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Path: "role/" + roleName + "/secret-id", + Operation: logical.UpdateOperation, + Storage: s, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) + } + secretID = resp.Data["secret_id"] + secretIDAccessor = resp.Data["secret_id_accessor"] + + // Ensure login works + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Path: "login", + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "role_id": roleID, + "secret_id": secretID, + }, + Storage: s, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) + } + if resp.Auth == nil { + t.Fatalf("failed to perform login") + } + + // Destroy the secret ID using lower cased role name + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Path: "role/" + strings.ToLower(roleName) + "/secret-id/destroy", + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "secret_id": secretID, + }, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) + } + + // Login again using the same secret ID should fail + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Path: "login", + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "role_id": roleID, + "secret_id": secretID, + }, + Storage: s, + }) + if err != nil && err != logical.ErrInvalidCredentials { + t.Fatal(err) + } + if resp == nil || !resp.IsError() { + t.Fatalf("expected error due to invalid secret ID") + } + + // Generate another secret ID + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Path: "role/" + roleName + "/secret-id", + Operation: logical.UpdateOperation, + Storage: s, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) + } + secretID = resp.Data["secret_id"] + secretIDAccessor = resp.Data["secret_id_accessor"] + + // Ensure login works + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Path: "login", + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "role_id": roleID, + "secret_id": secretID, + }, + Storage: s, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) + } + if resp.Auth == nil { + t.Fatalf("failed to perform login") + } + + // Destroy the secret ID using upper cased role name + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Path: "role/" + strings.ToUpper(roleName) + "/secret-id/destroy", + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "secret_id": secretID, + }, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) + } + + // Login again using the same secret ID should fail + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Path: "login", + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "role_id": roleID, + "secret_id": secretID, + }, + Storage: s, + }) + if err != nil && err != logical.ErrInvalidCredentials { + t.Fatal(err) + } + if resp == nil || !resp.IsError() { + t.Fatalf("expected error due to invalid secret ID") + } + + // Generate another secret ID + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Path: "role/" + roleName + "/secret-id", + Operation: logical.UpdateOperation, + Storage: s, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) + } + secretID = resp.Data["secret_id"] + secretIDAccessor = resp.Data["secret_id_accessor"] + + // Ensure login works + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Path: "login", + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "role_id": roleID, + "secret_id": secretID, + }, + Storage: s, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) + } + if resp.Auth == nil { + t.Fatalf("failed to perform login") + } + + // Destroy the secret ID using mixed case name + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Path: "role/saMpleRolEnaMe/secret-id/destroy", + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "secret_id": secretID, + }, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) + } + + // Login again using the same secret ID should fail + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Path: "login", + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "role_id": roleID, + "secret_id": secretID, + }, + Storage: s, + }) + if err != nil && err != logical.ErrInvalidCredentials { + t.Fatal(err) + } + if resp == nil || !resp.IsError() { + t.Fatalf("expected error due to invalid secret ID") + } + } + + // Lower case role name + testFunc(t, "samplerolename") + // Upper case role name + testFunc(t, "SAMPLEROLENAME") + // Mixed case role name + testFunc(t, "SampleRoleName") +} diff --git a/builtin/credential/approle/cmd/approle/main.go b/builtin/credential/approle/cmd/approle/main.go new file mode 100644 index 0000000..9000ea9 --- /dev/null +++ b/builtin/credential/approle/cmd/approle/main.go @@ -0,0 +1,34 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "os" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/credential/approle" + "github.com/hashicorp/vault/sdk/plugin" +) + +func main() { + apiClientMeta := &api.PluginAPIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(os.Args[1:]) + + tlsConfig := apiClientMeta.GetTLSConfig() + tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) + + if err := plugin.ServeMultiplex(&plugin.ServeOpts{ + BackendFactoryFunc: approle.Factory, + // set the TLSProviderFunc so that the plugin maintains backwards + // compatibility with Vault versions that don’t support plugin AutoMTLS + TLSProviderFunc: tlsProviderFunc, + }); err != nil { + logger := hclog.New(&hclog.LoggerOptions{}) + + logger.Error("plugin shutting down", "error", err) + os.Exit(1) + } +} diff --git a/builtin/credential/approle/path_login.go b/builtin/credential/approle/path_login.go new file mode 100644 index 0000000..1e21e79 --- /dev/null +++ b/builtin/credential/approle/path_login.go @@ -0,0 +1,426 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package approle + +import ( + "context" + "fmt" + "net/http" + "strings" + "time" + + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/cidrutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathLogin(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "login$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationVerb: "login", + }, + Fields: map[string]*framework.FieldSchema{ + "role_id": { + Type: framework.TypeString, + Description: "Unique identifier of the Role. Required to be supplied when the 'bind_secret_id' constraint is set.", + }, + "secret_id": { + Type: framework.TypeString, + Default: "", + Description: "SecretID belong to the App role", + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathLoginUpdate, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: http.StatusText(http.StatusOK), + }}, + }, + }, + logical.AliasLookaheadOperation: &framework.PathOperation{ + Callback: b.pathLoginUpdateAliasLookahead, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: http.StatusText(http.StatusOK), + }}, + }, + }, + logical.ResolveRoleOperation: &framework.PathOperation{ + Callback: b.pathLoginResolveRole, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: http.StatusText(http.StatusOK), + Fields: map[string]*framework.FieldSchema{ + "role": { + Type: framework.TypeString, + Required: true, + }, + }, + }}, + }, + }, + }, + HelpSynopsis: pathLoginHelpSys, + HelpDescription: pathLoginHelpDesc, + } +} + +func (b *backend) pathLoginUpdateAliasLookahead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleID := strings.TrimSpace(data.Get("role_id").(string)) + if roleID == "" { + return nil, fmt.Errorf("missing role_id") + } + + return &logical.Response{ + Auth: &logical.Auth{ + Alias: &logical.Alias{ + Name: roleID, + }, + }, + }, nil +} + +func (b *backend) pathLoginResolveRole(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // RoleID must be supplied during every login + roleID := strings.TrimSpace(data.Get("role_id").(string)) + if roleID == "" { + return logical.ErrorResponse("missing role_id"), nil + } + + // Look for the storage entry that maps the roleID to role + roleIDIndex, err := b.roleIDEntry(ctx, req.Storage, roleID) + if err != nil { + return nil, err + } + if roleIDIndex == nil { + return logical.ErrorResponse("invalid role or secret ID"), nil + } + + roleName := roleIDIndex.Name + + roleLock := b.roleLock(roleName) + roleLock.RLock() + + role, err := b.roleEntry(ctx, req.Storage, roleName) + roleLock.RUnlock() + if err != nil { + return nil, err + } + if role == nil { + return logical.ErrorResponse("invalid role or secret ID"), nil + } + + return logical.ResolveRoleResponse(roleName) +} + +// Returns the Auth object indicating the authentication and authorization information +// if the credentials provided are validated by the backend. +func (b *backend) pathLoginUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // RoleID must be supplied during every login + roleID := strings.TrimSpace(data.Get("role_id").(string)) + if roleID == "" { + return logical.ErrorResponse("missing role_id"), nil + } + + // Look for the storage entry that maps the roleID to role + roleIDIndex, err := b.roleIDEntry(ctx, req.Storage, roleID) + if err != nil { + return nil, err + } + if roleIDIndex == nil { + return logical.ErrorResponse("invalid role or secret ID"), nil + } + + roleName := roleIDIndex.Name + + roleLock := b.roleLock(roleName) + roleLock.RLock() + + role, err := b.roleEntry(ctx, req.Storage, roleName) + roleLock.RUnlock() + if err != nil { + return nil, err + } + if role == nil { + return logical.ErrorResponse("invalid role or secret ID"), nil + } + + metadata := make(map[string]string) + var entry *secretIDStorageEntry + if role.BindSecretID { + secretID := strings.TrimSpace(data.Get("secret_id").(string)) + if secretID == "" { + return logical.ErrorResponse("missing secret_id"), nil + } + + secretIDHMAC, err := createHMAC(role.HMACKey, secretID) + if err != nil { + return nil, fmt.Errorf("failed to create HMAC of secret_id: %w", err) + } + + roleNameHMAC, err := createHMAC(role.HMACKey, role.name) + if err != nil { + return nil, fmt.Errorf("failed to create HMAC of role_name: %w", err) + } + + entryIndex := fmt.Sprintf("%s%s/%s", role.SecretIDPrefix, roleNameHMAC, secretIDHMAC) + + secretIDLock := b.secretIDLock(secretIDHMAC) + secretIDLock.RLock() + + unlockFunc := secretIDLock.RUnlock + defer func() { + unlockFunc() + }() + + entry, err = b.nonLockedSecretIDStorageEntry(ctx, req.Storage, role.SecretIDPrefix, roleNameHMAC, secretIDHMAC) + if err != nil { + return nil, err + } + if entry == nil { + return logical.ErrorResponse("invalid role or secret ID"), logical.ErrInvalidCredentials + } + + // If a secret ID entry does not have a corresponding accessor + // entry, revoke the secret ID immediately + accessorEntry, err := b.secretIDAccessorEntry(ctx, req.Storage, entry.SecretIDAccessor, role.SecretIDPrefix) + if err != nil { + return nil, fmt.Errorf("failed to read secret ID accessor entry: %w", err) + } + if accessorEntry == nil { + // Switch the locks and recheck the conditions + secretIDLock.RUnlock() + secretIDLock.Lock() + unlockFunc = secretIDLock.Unlock + + entry, err = b.nonLockedSecretIDStorageEntry(ctx, req.Storage, role.SecretIDPrefix, roleNameHMAC, secretIDHMAC) + if err != nil { + return nil, err + } + if entry == nil { + return logical.ErrorResponse("invalid role or secret ID"), nil + } + + accessorEntry, err := b.secretIDAccessorEntry(ctx, req.Storage, entry.SecretIDAccessor, role.SecretIDPrefix) + if err != nil { + return nil, fmt.Errorf("failed to read secret ID accessor entry: %w", err) + } + + if accessorEntry == nil { + if err := req.Storage.Delete(ctx, entryIndex); err != nil { + return nil, fmt.Errorf("error deleting secret ID %q from storage: %w", secretIDHMAC, err) + } + } + return logical.ErrorResponse("invalid role or secret ID"), nil + } + + switch { + case entry.SecretIDNumUses == 0: + // + // SecretIDNumUses will be zero only if the usage limit was not set at all, + // in which case, the SecretID will remain to be valid as long as it is not + // expired. + // + + // Ensure that the CIDRs on the secret ID are still a subset of that of + // role's + err = verifyCIDRRoleSecretIDSubset(entry.CIDRList, role.SecretIDBoundCIDRs) + if err != nil { + return nil, err + } + + // If CIDR restrictions are present on the secret ID, check if the + // source IP complies to it + if len(entry.CIDRList) != 0 { + if req.Connection == nil || req.Connection.RemoteAddr == "" { + return nil, fmt.Errorf("failed to get connection information") + } + + belongs, err := cidrutil.IPBelongsToCIDRBlocksSlice(req.Connection.RemoteAddr, entry.CIDRList) + if err != nil { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } + + if !belongs { + return logical.ErrorResponse(fmt.Errorf( + "source address %q unauthorized through CIDR restrictions on the secret ID", + req.Connection.RemoteAddr, + ).Error()), nil + } + } + default: + // + // If the SecretIDNumUses is non-zero, it means that its use-count should be updated + // in the storage. Switch the lock from a `read` to a `write` and update + // the storage entry. + // + + secretIDLock.RUnlock() + secretIDLock.Lock() + unlockFunc = secretIDLock.Unlock + + // Lock switching may change the data. Refresh the contents. + entry, err = b.nonLockedSecretIDStorageEntry(ctx, req.Storage, role.SecretIDPrefix, roleNameHMAC, secretIDHMAC) + if err != nil { + return nil, err + } + if entry == nil { + return logical.ErrorResponse(fmt.Sprintf("invalid secret_id %q", secretID)), nil + } + + // If there exists a single use left, delete the SecretID entry from + // the storage but do not fail the validation request. Subsequent + // requests to use the same SecretID will fail. + if entry.SecretIDNumUses == 1 { + // Delete the secret IDs accessor first + err = b.deleteSecretIDAccessorEntry(ctx, req.Storage, entry.SecretIDAccessor, role.SecretIDPrefix) + if err != nil { + return nil, err + } + err = req.Storage.Delete(ctx, entryIndex) + if err != nil { + return nil, fmt.Errorf("failed to delete secret ID: %w", err) + } + } else { + // If the use count is greater than one, decrement it and update the last updated time. + entry.SecretIDNumUses -= 1 + entry.LastUpdatedTime = time.Now() + + sEntry, err := logical.StorageEntryJSON(entryIndex, &entry) + if err != nil { + return nil, err + } + + err = req.Storage.Put(ctx, sEntry) + if err != nil { + return nil, err + } + } + + // Ensure that the CIDRs on the secret ID are still a subset of that of + // role's + err = verifyCIDRRoleSecretIDSubset(entry.CIDRList, role.SecretIDBoundCIDRs) + if err != nil { + return nil, err + } + + // If CIDR restrictions are present on the secret ID, check if the + // source IP complies to it + if len(entry.CIDRList) != 0 { + if req.Connection == nil || req.Connection.RemoteAddr == "" { + return nil, fmt.Errorf("failed to get connection information") + } + + belongs, err := cidrutil.IPBelongsToCIDRBlocksSlice(req.Connection.RemoteAddr, entry.CIDRList) + if err != nil || !belongs { + return logical.ErrorResponse( + fmt.Errorf( + "source address %q unauthorized by CIDR restrictions on the secret ID: %w", + req.Connection.RemoteAddr, + err, + ).Error()), nil + } + } + } + + metadata = entry.Metadata + } + + if len(role.SecretIDBoundCIDRs) != 0 { + if req.Connection == nil || req.Connection.RemoteAddr == "" { + return nil, fmt.Errorf("failed to get connection information") + } + belongs, err := cidrutil.IPBelongsToCIDRBlocksSlice(req.Connection.RemoteAddr, role.SecretIDBoundCIDRs) + if err != nil || !belongs { + return logical.ErrorResponse( + fmt.Errorf( + "source address %q unauthorized by CIDR restrictions on the role: %w", + req.Connection.RemoteAddr, + err, + ).Error()), nil + } + } + + // Parse the CIDRs we should be binding the token to. + tokenBoundCIDRs := role.TokenBoundCIDRs + if entry != nil && len(entry.TokenBoundCIDRs) > 0 { + tokenBoundCIDRs, err = parseutil.ParseAddrs(entry.TokenBoundCIDRs) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + } + + // For some reason, if metadata was set to nil while processing secret ID + // binding, ensure that it is initialized again to avoid a panic. + if metadata == nil { + metadata = make(map[string]string) + } + + // Always include the role name, for later filtering + metadata["role_name"] = role.name + + auth := &logical.Auth{ + InternalData: map[string]interface{}{ + "role_name": role.name, + }, + Metadata: metadata, + Alias: &logical.Alias{ + Name: role.RoleID, + Metadata: metadata, + }, + } + role.PopulateTokenAuth(auth) + + // Allow for overridden token bound CIDRs + auth.BoundCIDRs = tokenBoundCIDRs + + return &logical.Response{ + Auth: auth, + }, nil +} + +// Invoked when the token issued by this backend is attempting a renewal. +func (b *backend) pathLoginRenew(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := req.Auth.InternalData["role_name"].(string) + if roleName == "" { + return nil, fmt.Errorf("failed to fetch role_name during renewal") + } + + lock := b.roleLock(roleName) + lock.RLock() + defer lock.RUnlock() + + // Ensure that the Role still exists. + role, err := b.roleEntry(ctx, req.Storage, roleName) + if err != nil { + return nil, fmt.Errorf("failed to validate role %q during renewal: %w", roleName, err) + } + if role == nil { + return nil, fmt.Errorf("role %q does not exist during renewal", roleName) + } + + resp := &logical.Response{Auth: req.Auth} + resp.Auth.TTL = role.TokenTTL + resp.Auth.MaxTTL = role.TokenMaxTTL + resp.Auth.Period = role.TokenPeriod + return resp, nil +} + +const pathLoginHelpSys = "Issue a token based on the credentials supplied" + +const pathLoginHelpDesc = ` +While the credential 'role_id' is required at all times, +other credentials required depends on the properties App role +to which the 'role_id' belongs to. The 'bind_secret_id' +constraint (enabled by default) on the App role requires the +'secret_id' credential to be presented. + +'role_id' is fetched using the 'role//role_id' +endpoint and 'secret_id' is fetched using the 'role//secret_id' +endpoint.` diff --git a/builtin/credential/approle/path_login_test.go b/builtin/credential/approle/path_login_test.go new file mode 100644 index 0000000..efed467 --- /dev/null +++ b/builtin/credential/approle/path_login_test.go @@ -0,0 +1,360 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package approle + +import ( + "context" + "strings" + "testing" + "time" + + "github.com/hashicorp/vault/sdk/logical" +) + +func TestAppRole_BoundCIDRLogin(t *testing.T) { + var resp *logical.Response + var err error + b, s := createBackendWithStorage(t) + + // Create a role with secret ID binding disabled and only bound cidr list + // enabled + resp = b.requestNoErr(t, &logical.Request{ + Path: "role/testrole", + Operation: logical.CreateOperation, + Data: map[string]interface{}{ + "bind_secret_id": false, + "bound_cidr_list": []string{"127.0.0.1/8"}, + "token_bound_cidrs": []string{"10.0.0.0/8"}, + }, + Storage: s, + }) + + // Read the role ID + resp = b.requestNoErr(t, &logical.Request{ + Path: "role/testrole/role-id", + Operation: logical.ReadOperation, + Storage: s, + }) + + roleID := resp.Data["role_id"] + + // Fill in the connection information and login with just the role ID + resp = b.requestNoErr(t, &logical.Request{ + Path: "login", + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "role_id": roleID, + }, + Storage: s, + Connection: &logical.Connection{RemoteAddr: "127.0.0.1"}, + }) + + if resp.Auth == nil { + t.Fatal("expected login to succeed") + } + if len(resp.Auth.BoundCIDRs) != 1 { + t.Fatal("bad token bound cidrs") + } + if resp.Auth.BoundCIDRs[0].String() != "10.0.0.0/8" { + t.Fatalf("bad: %s", resp.Auth.BoundCIDRs[0].String()) + } + + // Override with a secret-id value, verify it doesn't pass + resp = b.requestNoErr(t, &logical.Request{ + Path: "role/testrole", + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "bind_secret_id": true, + }, + Storage: s, + }) + + roleSecretIDReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "role/testrole/secret-id", + Storage: s, + Data: map[string]interface{}{ + "token_bound_cidrs": []string{"11.0.0.0/24"}, + }, + } + resp, err = b.HandleRequest(context.Background(), roleSecretIDReq) + if err == nil { + t.Fatal("expected error due to mismatching subnet relationship") + } + + roleSecretIDReq.Data["token_bound_cidrs"] = "10.0.0.0/24" + resp = b.requestNoErr(t, roleSecretIDReq) + + secretID := resp.Data["secret_id"] + + resp = b.requestNoErr(t, &logical.Request{ + Path: "login", + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "role_id": roleID, + "secret_id": secretID, + }, + Storage: s, + Connection: &logical.Connection{RemoteAddr: "127.0.0.1"}, + }) + + if resp.Auth == nil { + t.Fatal("expected login to succeed") + } + if len(resp.Auth.BoundCIDRs) != 1 { + t.Fatal("bad token bound cidrs") + } + if resp.Auth.BoundCIDRs[0].String() != "10.0.0.0/24" { + t.Fatalf("bad: %s", resp.Auth.BoundCIDRs[0].String()) + } +} + +func TestAppRole_RoleLogin(t *testing.T) { + var resp *logical.Response + var err error + b, storage := createBackendWithStorage(t) + + createRole(t, b, storage, "role1", "a,b,c") + roleRoleIDReq := &logical.Request{ + Operation: logical.ReadOperation, + Path: "role/role1/role-id", + Storage: storage, + } + resp = b.requestNoErr(t, roleRoleIDReq) + + roleID := resp.Data["role_id"] + + roleSecretIDReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "role/role1/secret-id", + Storage: storage, + } + resp = b.requestNoErr(t, roleSecretIDReq) + + secretID := resp.Data["secret_id"] + + loginData := map[string]interface{}{ + "role_id": roleID, + "secret_id": secretID, + } + loginReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "login", + Storage: storage, + Data: loginData, + Connection: &logical.Connection{ + RemoteAddr: "127.0.0.1", + }, + } + loginResp, err := b.HandleRequest(context.Background(), loginReq) + if err != nil || (loginResp != nil && loginResp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, loginResp) + } + + if loginResp.Auth == nil { + t.Fatalf("expected a non-nil auth object in the response") + } + + if loginResp.Auth.Metadata == nil { + t.Fatalf("expected a non-nil metadata object in the response") + } + + if val := loginResp.Auth.Metadata["role_name"]; val != "role1" { + t.Fatalf("expected metadata.role_name to equal 'role1', got: %v", val) + } + + if loginResp.Auth.Alias.Metadata == nil { + t.Fatalf("expected a non-nil alias metadata object in the response") + } + + if val := loginResp.Auth.Alias.Metadata["role_name"]; val != "role1" { + t.Fatalf("expected metadata.alias.role_name to equal 'role1', got: %v", val) + } + + // Test renewal + renewReq := generateRenewRequest(storage, loginResp.Auth) + + resp, err = b.HandleRequest(context.Background(), renewReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + if resp.Auth.TTL != 400*time.Second { + t.Fatalf("expected period value from response to be 400s, got: %s", resp.Auth.TTL) + } + + /// + // Test renewal with period + /// + + // Create role + period := 600 * time.Second + roleData := map[string]interface{}{ + "policies": "a,b,c", + "period": period.String(), + } + roleReq := &logical.Request{ + Operation: logical.CreateOperation, + Path: "role/" + "role-period", + Storage: storage, + Data: roleData, + } + resp = b.requestNoErr(t, roleReq) + + roleRoleIDReq = &logical.Request{ + Operation: logical.ReadOperation, + Path: "role/role-period/role-id", + Storage: storage, + } + resp = b.requestNoErr(t, roleRoleIDReq) + + roleID = resp.Data["role_id"] + + roleSecretIDReq = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "role/role-period/secret-id", + Storage: storage, + } + resp = b.requestNoErr(t, roleSecretIDReq) + + secretID = resp.Data["secret_id"] + + loginData["role_id"] = roleID + loginData["secret_id"] = secretID + + loginResp, err = b.HandleRequest(context.Background(), loginReq) + if err != nil || (loginResp != nil && loginResp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, loginResp) + } + + if loginResp.Auth == nil { + t.Fatalf("expected a non-nil auth object in the response") + } + + renewReq = generateRenewRequest(storage, loginResp.Auth) + + resp, err = b.HandleRequest(context.Background(), renewReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + if resp.Auth.Period != period { + t.Fatalf("expected period value of %d in the response, got: %s", period, resp.Auth.Period) + } + + // Test input validation with secret_id that exceeds max length + loginData["secret_id"] = strings.Repeat("a", maxHmacInputLength+1) + + loginReq = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "login", + Storage: storage, + Data: loginData, + Connection: &logical.Connection{ + RemoteAddr: "127.0.0.1", + }, + } + + loginResp, err = b.HandleRequest(context.Background(), loginReq) + + expectedErr := "failed to create HMAC of secret_id" + if loginResp != nil || err == nil || !strings.Contains(err.Error(), expectedErr) { + t.Fatalf("expected login test to fail with error %q, resp: %#v, err: %v", expectedErr, loginResp, err) + } +} + +func generateRenewRequest(s logical.Storage, auth *logical.Auth) *logical.Request { + renewReq := &logical.Request{ + Operation: logical.RenewOperation, + Storage: s, + Auth: &logical.Auth{}, + } + renewReq.Auth.InternalData = auth.InternalData + renewReq.Auth.Metadata = auth.Metadata + renewReq.Auth.LeaseOptions = auth.LeaseOptions + renewReq.Auth.Policies = auth.Policies + renewReq.Auth.Period = auth.Period + + return renewReq +} + +func TestAppRole_RoleResolve(t *testing.T) { + b, storage := createBackendWithStorage(t) + + role := "role1" + createRole(t, b, storage, role, "a,b,c") + roleRoleIDReq := &logical.Request{ + Operation: logical.ReadOperation, + Path: "role/role1/role-id", + Storage: storage, + } + resp := b.requestNoErr(t, roleRoleIDReq) + + roleID := resp.Data["role_id"] + + roleSecretIDReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "role/role1/secret-id", + Storage: storage, + } + resp = b.requestNoErr(t, roleSecretIDReq) + + secretID := resp.Data["secret_id"] + + loginData := map[string]interface{}{ + "role_id": roleID, + "secret_id": secretID, + } + loginReq := &logical.Request{ + Operation: logical.ResolveRoleOperation, + Path: "login", + Storage: storage, + Data: loginData, + Connection: &logical.Connection{ + RemoteAddr: "127.0.0.1", + }, + } + + resp = b.requestNoErr(t, loginReq) + + if resp.Data["role"] != role { + t.Fatalf("Role was not as expected. Expected %s, received %s", role, resp.Data["role"]) + } +} + +func TestAppRole_RoleDoesNotExist(t *testing.T) { + var resp *logical.Response + var err error + b, storage := createBackendWithStorage(t) + + roleID := "roleDoesNotExist" + + loginData := map[string]interface{}{ + "role_id": roleID, + "secret_id": "secret", + } + loginReq := &logical.Request{ + Operation: logical.ResolveRoleOperation, + Path: "login", + Storage: storage, + Data: loginData, + Connection: &logical.Connection{ + RemoteAddr: "127.0.0.1", + }, + } + + resp, err = b.HandleRequest(context.Background(), loginReq) + if resp == nil && !resp.IsError() { + t.Fatalf("Response was not an error: err:%v resp:%#v", err, resp) + } + + errString, ok := resp.Data["error"].(string) + if !ok { + t.Fatal("Error not part of response.") + } + + if !strings.Contains(errString, "invalid role or secret ID") { + t.Fatalf("Error was not due to invalid role ID. Error: %s", errString) + } +} diff --git a/builtin/credential/approle/path_role.go b/builtin/credential/approle/path_role.go new file mode 100644 index 0000000..112d2e0 --- /dev/null +++ b/builtin/credential/approle/path_role.go @@ -0,0 +1,3353 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package approle + +import ( + "context" + "errors" + "fmt" + "net/http" + "strings" + "time" + + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/go-secure-stdlib/strutil" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/helper/parseip" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/cidrutil" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/locksutil" + "github.com/hashicorp/vault/sdk/helper/policyutil" + "github.com/hashicorp/vault/sdk/helper/tokenutil" + "github.com/hashicorp/vault/sdk/logical" +) + +// roleStorageEntry stores all the options that are set on an role +type roleStorageEntry struct { + tokenutil.TokenParams + + // Name of the role. This field is not persisted on disk. After the role is + // read out of disk, the sanitized version of name is set in this field for + // subsequent use of role name elsewhere. + name string + + // UUID that uniquely represents this role. This serves as a credential + // to perform login using this role. + RoleID string `json:"role_id" mapstructure:"role_id"` + + // UUID that serves as the HMAC key for the hashing the 'secret_id's + // of the role + HMACKey string `json:"hmac_key" mapstructure:"hmac_key"` + + // Policies that are to be required by the token to access this role. Deprecated. + Policies []string `json:"policies" mapstructure:"policies"` + + // Number of times the SecretID generated against this role can be + // used to perform login operation + SecretIDNumUses int `json:"secret_id_num_uses" mapstructure:"secret_id_num_uses"` + + // Duration (less than the backend mount's max TTL) after which a + // SecretID generated against the role will expire + SecretIDTTL time.Duration `json:"secret_id_ttl" mapstructure:"secret_id_ttl"` + + // A constraint, if set, requires 'secret_id' credential to be presented during login + BindSecretID bool `json:"bind_secret_id" mapstructure:"bind_secret_id"` + + // Deprecated: A constraint, if set, specifies the CIDR blocks from which logins should be allowed, + // please use SecretIDBoundCIDRs instead. + BoundCIDRListOld string `json:"bound_cidr_list,omitempty"` + + // Deprecated: A constraint, if set, specifies the CIDR blocks from which logins should be allowed, + // please use SecretIDBoundCIDRs instead. + BoundCIDRList []string `json:"bound_cidr_list_list" mapstructure:"bound_cidr_list"` + + // A constraint, if set, specifies the CIDR blocks from which logins should be allowed + SecretIDBoundCIDRs []string `json:"secret_id_bound_cidrs" mapstructure:"secret_id_bound_cidrs"` + + // Period, if set, indicates that the token generated using this role + // should never expire. The token should be renewed within the duration + // specified by this value. The renewal duration will be fixed if the value + // is not modified on the role. If the `Period` in the role is modified, a + // token will pick up the new value during its next renewal. Deprecated. + Period time.Duration `json:"period" mapstructure:"period"` + + // LowerCaseRoleName enforces the lower casing of role names for all the + // roles that get created since this field was introduced. + LowerCaseRoleName bool `json:"lower_case_role_name" mapstructure:"lower_case_role_name"` + + // SecretIDPrefix is the storage prefix for persisting secret IDs. This + // differs based on whether the secret IDs are cluster local or not. + SecretIDPrefix string `json:"secret_id_prefix" mapstructure:"secret_id_prefix"` +} + +// roleIDStorageEntry represents the reverse mapping from RoleID to Role +type roleIDStorageEntry struct { + Name string `json:"name" mapstructure:"name"` +} + +// rolePaths creates all the paths that are used to register and manage an role. +// +// Paths returned: +// role/ - For listing all the registered roles +// role/ - For registering an role +// role//policies - For updating the param +// role//secret-id-num-uses - For updating the param +// role//secret-id-ttl - For updating the param +// role//token-ttl - For updating the param +// role//token-max-ttl - For updating the param +// role//token-num-uses - For updating the param +// role//bind-secret-id - For updating the param +// role//bound-cidr-list - For updating the param +// role//period - For updating the param +// role//role-id - For fetching the role_id of an role +// role//secret-id - For issuing a secret_id against an role, also to list the secret_id_accessors +// role//custom-secret-id - For assigning a custom SecretID against an role +// role//secret-id/lookup - For reading the properties of a secret_id +// role//secret-id/destroy - For deleting a secret_id +// role//secret-id-accessor/lookup - For reading secret_id using accessor +// role//secret-id-accessor/destroy - For deleting secret_id using accessor +func rolePaths(b *backend) []*framework.Path { + defTokenFields := tokenutil.TokenFields() + + responseOK := map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + }}, + } + responseNoContent := map[int][]framework.Response{ + http.StatusNoContent: {{ + Description: "No Content", + }}, + } + + p := &framework.Path{ + Pattern: "role/" + framework.GenericNameRegex("role_name"), + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "role", + }, + Fields: map[string]*framework.FieldSchema{ + "role_name": { + Type: framework.TypeString, + Description: fmt.Sprintf("Name of the role. Must be less than %d bytes.", maxHmacInputLength), + }, + "bind_secret_id": { + Type: framework.TypeBool, + Default: true, + Description: "Impose secret_id to be presented when logging in using this role. Defaults to 'true'.", + }, + + "bound_cidr_list": { + Type: framework.TypeCommaStringSlice, + Description: `Use "secret_id_bound_cidrs" instead.`, + Deprecated: true, + }, + + "secret_id_bound_cidrs": { + Type: framework.TypeCommaStringSlice, + Description: `Comma separated string or list of CIDR blocks. If set, specifies the blocks of +IP addresses which can perform the login operation.`, + }, + + "policies": { + Type: framework.TypeCommaStringSlice, + Description: tokenutil.DeprecationText("token_policies"), + Deprecated: true, + }, + + "secret_id_num_uses": { + Type: framework.TypeInt, + Description: `Number of times a SecretID can access the role, after which the SecretID +will expire. Defaults to 0 meaning that the the secret_id is of unlimited use.`, + }, + + "secret_id_ttl": { + Type: framework.TypeDurationSecond, + Description: `Duration in seconds after which the issued SecretID should expire. Defaults +to 0, meaning no expiration.`, + }, + + "period": { + Type: framework.TypeDurationSecond, + Description: tokenutil.DeprecationText("token_period"), + Deprecated: true, + }, + + "role_id": { + Type: framework.TypeString, + Description: "Identifier of the role. Defaults to a UUID.", + }, + + "local_secret_ids": { + Type: framework.TypeBool, + Description: `If set, the secret IDs generated using this role will be cluster local. This +can only be set during role creation and once set, it can't be reset later.`, + }, + }, + ExistenceCheck: b.pathRoleExistenceCheck, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.CreateOperation: &framework.PathOperation{ + Callback: b.pathRoleCreateUpdate, + Responses: responseOK, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathRoleCreateUpdate, + Responses: responseOK, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathRoleRead, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "bind_secret_id": { + Type: framework.TypeBool, + Required: true, + Description: "Impose secret ID to be presented when logging in using this role.", + }, + "secret_id_bound_cidrs": { + Type: framework.TypeCommaStringSlice, + Required: true, + Description: "Comma separated string or list of CIDR blocks. If set, specifies the blocks of IP addresses which can perform the login operation.", + }, + "secret_id_num_uses": { + Type: framework.TypeInt, + Required: true, + Description: "Number of times a secret ID can access the role, after which the secret ID will expire.", + }, + "secret_id_ttl": { + Type: framework.TypeDurationSecond, + Required: true, + Description: "Duration in seconds after which the issued secret ID expires.", + }, + "local_secret_ids": { + Type: framework.TypeBool, + Required: true, + Description: "If true, the secret identifiers generated using this role will be cluster local. This can only be set during role creation and once set, it can't be reset later", + }, + "token_bound_cidrs": { + Type: framework.TypeCommaStringSlice, + Required: true, + Description: `Comma separated string or JSON list of CIDR blocks. If set, specifies the blocks of IP addresses which are allowed to use the generated token.`, + }, + "token_explicit_max_ttl": { + Type: framework.TypeDurationSecond, + Required: true, + Description: "If set, tokens created via this role carry an explicit maximum TTL. During renewal, the current maximum TTL values of the role and the mount are not checked for changes, and any updates to these values will have no effect on the token being renewed.", + }, + "token_max_ttl": { + Type: framework.TypeDurationSecond, + Required: true, + Description: "The maximum lifetime of the generated token", + }, + "token_no_default_policy": { + Type: framework.TypeBool, + Required: true, + Description: "If true, the 'default' policy will not automatically be added to generated tokens", + }, + "token_period": { + Type: framework.TypeDurationSecond, + Required: true, + Description: "If set, tokens created via this role will have no max lifetime; instead, their renewal period will be fixed to this value.", + }, + "token_policies": { + Type: framework.TypeCommaStringSlice, + Required: true, + Description: "Comma-separated list of policies", + }, + "token_type": { + Type: framework.TypeString, + Required: true, + Default: "default-service", + Description: "The type of token to generate, service or batch", + }, + "token_ttl": { + Type: framework.TypeDurationSecond, + Required: true, + Description: "The initial ttl of the token to generate", + }, + "token_num_uses": { + Type: framework.TypeInt, + Required: true, + Description: "The maximum number of times a token may be used, a value of zero means unlimited", + }, + "period": { + Type: framework.TypeDurationSecond, + Required: false, + Description: tokenutil.DeprecationText("token_period"), + Deprecated: true, + }, + "policies": { + Type: framework.TypeCommaStringSlice, + Required: false, + Description: tokenutil.DeprecationText("token_policies"), + Deprecated: true, + }, + }, + }}, + }, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathRoleDelete, + Responses: responseNoContent, + }, + }, + HelpSynopsis: strings.TrimSpace(roleHelp["role"][0]), + HelpDescription: strings.TrimSpace(roleHelp["role"][1]), + } + + tokenutil.AddTokenFields(p.Fields) + + return []*framework.Path{ + p, + { + Pattern: "role/?", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "roles", + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ListOperation: &framework.PathOperation{ + Callback: b.pathRoleList, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "keys": { + Type: framework.TypeStringSlice, + Required: true, + }, + }, + }}, + }, + }, + }, + HelpSynopsis: strings.TrimSpace(roleHelp["role-list"][0]), + HelpDescription: strings.TrimSpace(roleHelp["role-list"][1]), + }, + { + Pattern: "role/" + framework.GenericNameRegex("role_name") + "/local-secret-ids$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "local-secret-ids", + }, + Fields: map[string]*framework.FieldSchema{ + "role_name": { + Type: framework.TypeString, + Description: fmt.Sprintf("Name of the role. Must be less than %d bytes.", maxHmacInputLength), + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathRoleLocalSecretIDsRead, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "local_secret_ids": { + Type: framework.TypeBool, + Required: true, + Description: "If true, the secret identifiers generated using this role will be cluster local. This can only be set during role creation and once set, it can't be reset later", + }, + }, + }}, + }, + }, + }, + HelpSynopsis: strings.TrimSpace(roleHelp["role-local-secret-ids"][0]), + HelpDescription: strings.TrimSpace(roleHelp["role-local-secret-ids"][1]), + }, + { + Pattern: "role/" + framework.GenericNameRegex("role_name") + "/policies$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "policies", + }, + Fields: map[string]*framework.FieldSchema{ + "role_name": { + Type: framework.TypeString, + Description: fmt.Sprintf("Name of the role. Must be less than %d bytes.", maxHmacInputLength), + }, + "policies": { + Type: framework.TypeCommaStringSlice, + Description: tokenutil.DeprecationText("token_policies"), + Deprecated: true, + }, + "token_policies": { + Type: framework.TypeCommaStringSlice, + Description: defTokenFields["token_policies"].Description, + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathRolePoliciesUpdate, + Responses: responseNoContent, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathRolePoliciesRead, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "policies": { + Type: framework.TypeCommaStringSlice, + Required: false, + Description: tokenutil.DeprecationText("token_policies"), + Deprecated: true, + }, + "token_policies": { + Type: framework.TypeCommaStringSlice, + Required: true, + Description: defTokenFields["token_policies"].Description, + }, + }, + }}, + }, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathRolePoliciesDelete, + Responses: responseNoContent, + }, + }, + HelpSynopsis: strings.TrimSpace(roleHelp["role-policies"][0]), + HelpDescription: strings.TrimSpace(roleHelp["role-policies"][1]), + }, + { + Pattern: "role/" + framework.GenericNameRegex("role_name") + "/bound-cidr-list$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "bound-cidr-list", + }, + Fields: map[string]*framework.FieldSchema{ + "role_name": { + Type: framework.TypeString, + Description: fmt.Sprintf("Name of the role. Must be less than %d bytes.", maxHmacInputLength), + }, + "bound_cidr_list": { + Type: framework.TypeCommaStringSlice, + Description: `Deprecated: Please use "secret_id_bound_cidrs" instead. Comma separated string or list +of CIDR blocks. If set, specifies the blocks of IP addresses which can perform the login operation.`, + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathRoleBoundCIDRUpdate, + Responses: responseNoContent, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathRoleBoundCIDRListRead, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "bound_cidr_list": { + Type: framework.TypeCommaStringSlice, + Required: true, + Description: `Deprecated: Please use "secret_id_bound_cidrs" instead. Comma separated string or list of CIDR blocks. If set, specifies the blocks of IP addresses which can perform the login operation.`, + Deprecated: true, + }, + }, + }}, + }, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathRoleBoundCIDRListDelete, + Responses: responseNoContent, + }, + }, + HelpSynopsis: strings.TrimSpace(roleHelp["role-bound-cidr-list"][0]), + HelpDescription: strings.TrimSpace(roleHelp["role-bound-cidr-list"][1]), + }, + { + Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id-bound-cidrs$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "secret-id-bound-cidrs", + }, + Fields: map[string]*framework.FieldSchema{ + "role_name": { + Type: framework.TypeString, + Description: fmt.Sprintf("Name of the role. Must be less than %d bytes.", maxHmacInputLength), + }, + "secret_id_bound_cidrs": { + Type: framework.TypeCommaStringSlice, + Description: `Comma separated string or list of CIDR blocks. If set, specifies the blocks of +IP addresses which can perform the login operation.`, + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathRoleSecretIDBoundCIDRUpdate, + Responses: responseNoContent, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathRoleSecretIDBoundCIDRRead, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "secret_id_bound_cidrs": { + Type: framework.TypeCommaStringSlice, + Required: true, + Description: `Comma separated string or list of CIDR blocks. If set, specifies the blocks of IP addresses which can perform the login operation.`, + }, + }, + }}, + }, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathRoleSecretIDBoundCIDRDelete, + Responses: responseNoContent, + }, + }, + HelpSynopsis: strings.TrimSpace(roleHelp["secret-id-bound-cidrs"][0]), + HelpDescription: strings.TrimSpace(roleHelp["secret-id-bound-cidrs"][1]), + }, + { + Pattern: "role/" + framework.GenericNameRegex("role_name") + "/token-bound-cidrs$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "token-bound-cidrs", + }, + Fields: map[string]*framework.FieldSchema{ + "role_name": { + Type: framework.TypeString, + Description: fmt.Sprintf("Name of the role. Must be less than %d bytes.", maxHmacInputLength), + }, + "token_bound_cidrs": { + Type: framework.TypeCommaStringSlice, + Description: defTokenFields["token_bound_cidrs"].Description, + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathRoleTokenBoundCIDRUpdate, + Responses: responseNoContent, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathRoleTokenBoundCIDRRead, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "token_bound_cidrs": { + Type: framework.TypeCommaStringSlice, + Required: true, + Description: `Comma separated string or list of CIDR blocks. If set, specifies the blocks of IP addresses which can use the returned token. Should be a subset of the token CIDR blocks listed on the role, if any.`, + }, + }, + }}, + }, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathRoleTokenBoundCIDRDelete, + Responses: responseNoContent, + }, + }, + HelpSynopsis: strings.TrimSpace(roleHelp["token-bound-cidrs"][0]), + HelpDescription: strings.TrimSpace(roleHelp["token-bound-cidrs"][1]), + }, + { + Pattern: "role/" + framework.GenericNameRegex("role_name") + "/bind-secret-id$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "bind-secret-id", + }, + Fields: map[string]*framework.FieldSchema{ + "role_name": { + Type: framework.TypeString, + Description: fmt.Sprintf("Name of the role. Must be less than %d bytes.", maxHmacInputLength), + }, + "bind_secret_id": { + Type: framework.TypeBool, + Default: true, + Description: "Impose secret_id to be presented when logging in using this role.", + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathRoleBindSecretIDUpdate, + Responses: responseNoContent, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathRoleBindSecretIDRead, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "bind_secret_id": { + Type: framework.TypeBool, + Required: true, + Description: "Impose secret_id to be presented when logging in using this role. Defaults to 'true'.", + }, + }, + }}, + }, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathRoleBindSecretIDDelete, + Responses: responseNoContent, + }, + }, + HelpSynopsis: strings.TrimSpace(roleHelp["role-bind-secret-id"][0]), + HelpDescription: strings.TrimSpace(roleHelp["role-bind-secret-id"][1]), + }, + { + Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id-num-uses$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "secret-id-num-uses", + }, + Fields: map[string]*framework.FieldSchema{ + "role_name": { + Type: framework.TypeString, + Description: fmt.Sprintf("Name of the role. Must be less than %d bytes.", maxHmacInputLength), + }, + "secret_id_num_uses": { + Type: framework.TypeInt, + Description: "Number of times a SecretID can access the role, after which the SecretID will expire.", + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathRoleSecretIDNumUsesUpdate, + Responses: responseNoContent, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathRoleSecretIDNumUsesRead, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "secret_id_num_uses": { + Type: framework.TypeInt, + Required: true, + Description: "Number of times a secret ID can access the role, after which the SecretID will expire. Defaults to 0 meaning that the secret ID is of unlimited use.", + }, + }, + }}, + }, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathRoleSecretIDNumUsesDelete, + Responses: responseNoContent, + }, + }, + HelpSynopsis: strings.TrimSpace(roleHelp["role-secret-id-num-uses"][0]), + HelpDescription: strings.TrimSpace(roleHelp["role-secret-id-num-uses"][1]), + }, + { + Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id-ttl$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "secret-id-ttl", + }, + Fields: map[string]*framework.FieldSchema{ + "role_name": { + Type: framework.TypeString, + Description: fmt.Sprintf("Name of the role. Must be less than %d bytes.", maxHmacInputLength), + }, + "secret_id_ttl": { + Type: framework.TypeDurationSecond, + Description: `Duration in seconds after which the issued SecretID should expire. Defaults +to 0, meaning no expiration.`, + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathRoleSecretIDTTLUpdate, + Responses: responseNoContent, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathRoleSecretIDTTLRead, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "secret_id_ttl": { + Type: framework.TypeDurationSecond, + Required: true, + Description: "Duration in seconds after which the issued secret ID should expire. Defaults to 0, meaning no expiration.", + }, + }, + }}, + }, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathRoleSecretIDTTLDelete, + Responses: responseNoContent, + }, + }, + HelpSynopsis: strings.TrimSpace(roleHelp["role-secret-id-ttl"][0]), + HelpDescription: strings.TrimSpace(roleHelp["role-secret-id-ttl"][1]), + }, + { + Pattern: "role/" + framework.GenericNameRegex("role_name") + "/period$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "period", + }, + Fields: map[string]*framework.FieldSchema{ + "role_name": { + Type: framework.TypeString, + Description: fmt.Sprintf("Name of the role. Must be less than %d bytes.", maxHmacInputLength), + }, + "period": { + Type: framework.TypeDurationSecond, + Description: tokenutil.DeprecationText("token_period"), + Deprecated: true, + }, + "token_period": { + Type: framework.TypeDurationSecond, + Description: defTokenFields["token_period"].Description, + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathRolePeriodUpdate, + Responses: responseNoContent, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathRolePeriodRead, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "period": { + Type: framework.TypeDurationSecond, + Required: false, + Description: tokenutil.DeprecationText("token_period"), + Deprecated: true, + }, + "token_period": { + Type: framework.TypeDurationSecond, + Required: true, + Description: defTokenFields["token_period"].Description, + }, + }, + }}, + }, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathRolePeriodDelete, + Responses: responseNoContent, + }, + }, + HelpSynopsis: strings.TrimSpace(roleHelp["role-period"][0]), + HelpDescription: strings.TrimSpace(roleHelp["role-period"][1]), + }, + { + Pattern: "role/" + framework.GenericNameRegex("role_name") + "/token-num-uses$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "token-num-uses", + }, + Fields: map[string]*framework.FieldSchema{ + "role_name": { + Type: framework.TypeString, + Description: fmt.Sprintf("Name of the role. Must be less than %d bytes.", maxHmacInputLength), + }, + "token_num_uses": { + Type: framework.TypeInt, + Description: defTokenFields["token_num_uses"].Description, + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathRoleTokenNumUsesUpdate, + Responses: responseNoContent, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathRoleTokenNumUsesRead, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "token_num_uses": { + Type: framework.TypeInt, + Required: true, + Description: defTokenFields["token_num_uses"].Description, + }, + }, + }}, + }, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathRoleTokenNumUsesDelete, + Responses: responseNoContent, + }, + }, + HelpSynopsis: strings.TrimSpace(roleHelp["role-token-num-uses"][0]), + HelpDescription: strings.TrimSpace(roleHelp["role-token-num-uses"][1]), + }, + { + Pattern: "role/" + framework.GenericNameRegex("role_name") + "/token-ttl$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "token-ttl", + }, + Fields: map[string]*framework.FieldSchema{ + "role_name": { + Type: framework.TypeString, + Description: fmt.Sprintf("Name of the role. Must be less than %d bytes.", maxHmacInputLength), + }, + "token_ttl": { + Type: framework.TypeDurationSecond, + Description: defTokenFields["token_ttl"].Description, + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathRoleTokenTTLUpdate, + Responses: responseNoContent, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathRoleTokenTTLRead, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "token_ttl": { + Type: framework.TypeDurationSecond, + Required: true, + Description: defTokenFields["token_ttl"].Description, + }, + }, + }}, + }, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathRoleTokenTTLDelete, + Responses: responseNoContent, + }, + }, + HelpSynopsis: strings.TrimSpace(roleHelp["role-token-ttl"][0]), + HelpDescription: strings.TrimSpace(roleHelp["role-token-ttl"][1]), + }, + { + Pattern: "role/" + framework.GenericNameRegex("role_name") + "/token-max-ttl$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "token-max-ttl", + }, + Fields: map[string]*framework.FieldSchema{ + "role_name": { + Type: framework.TypeString, + Description: fmt.Sprintf("Name of the role. Must be less than %d bytes.", maxHmacInputLength), + }, + "token_max_ttl": { + Type: framework.TypeDurationSecond, + Description: defTokenFields["token_max_ttl"].Description, + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathRoleTokenMaxTTLUpdate, + Responses: responseNoContent, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathRoleTokenMaxTTLRead, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "token_max_ttl": { + Type: framework.TypeDurationSecond, + Required: true, + Description: defTokenFields["token_max_ttl"].Description, + }, + }, + }}, + }, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathRoleTokenMaxTTLDelete, + Responses: responseNoContent, + }, + }, + HelpSynopsis: strings.TrimSpace(roleHelp["role-token-max-ttl"][0]), + HelpDescription: strings.TrimSpace(roleHelp["role-token-max-ttl"][1]), + }, + { + Pattern: "role/" + framework.GenericNameRegex("role_name") + "/role-id$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "role-id", + }, + Fields: map[string]*framework.FieldSchema{ + "role_name": { + Type: framework.TypeString, + Description: fmt.Sprintf("Name of the role. Must be less than %d bytes.", maxHmacInputLength), + }, + "role_id": { + Type: framework.TypeString, + Description: "Identifier of the role. Defaults to a UUID.", + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathRoleRoleIDRead, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "role_id": { + Type: framework.TypeString, + Required: false, + Description: "Identifier of the role. Defaults to a UUID.", + }, + }, + }}, + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathRoleRoleIDUpdate, + Responses: responseNoContent, + }, + }, + HelpSynopsis: strings.TrimSpace(roleHelp["role-id"][0]), + HelpDescription: strings.TrimSpace(roleHelp["role-id"][1]), + }, + { + Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "secret-id", + }, + Fields: map[string]*framework.FieldSchema{ + "role_name": { + Type: framework.TypeString, + Description: fmt.Sprintf("Name of the role. Must be less than %d bytes.", maxHmacInputLength), + }, + "metadata": { + Type: framework.TypeString, + Description: `Metadata to be tied to the SecretID. This should be a JSON +formatted string containing the metadata in key value pairs.`, + }, + "cidr_list": { + Type: framework.TypeCommaStringSlice, + Description: `Comma separated string or list of CIDR blocks enforcing secret IDs to be used from +specific set of IP addresses. If 'bound_cidr_list' is set on the role, then the +list of CIDR blocks listed here should be a subset of the CIDR blocks listed on +the role.`, + }, + "token_bound_cidrs": { + Type: framework.TypeCommaStringSlice, + Description: defTokenFields["token_bound_cidrs"].Description, + }, + "num_uses": { + Type: framework.TypeInt, + Description: `Number of times this SecretID can be used, after which the SecretID expires. +Overrides secret_id_num_uses role option when supplied. May not be higher than role's secret_id_num_uses.`, + }, + "ttl": { + Type: framework.TypeDurationSecond, + Description: `Duration in seconds after which this SecretID expires. +Overrides secret_id_ttl role option when supplied. May not be longer than role's secret_id_ttl.`, + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathRoleSecretIDUpdate, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "secret_id": { + Type: framework.TypeString, + Required: true, + Description: "Secret ID attached to the role.", + }, + "secret_id_accessor": { + Type: framework.TypeString, + Required: true, + Description: "Accessor of the secret ID", + }, + "secret_id_ttl": { + Type: framework.TypeDurationSecond, + Required: true, + Description: "Duration in seconds after which the issued secret ID expires.", + }, + "secret_id_num_uses": { + Type: framework.TypeInt, + Required: true, + Description: "Number of times a secret ID can access the role, after which the secret ID will expire.", + }, + }, + }}, + }, + }, + logical.ListOperation: &framework.PathOperation{ + Callback: b.pathRoleSecretIDList, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "secret-ids", + }, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "keys": { + Required: true, + Type: framework.TypeStringSlice, + }, + }, + }}, + }, + }, + }, + HelpSynopsis: strings.TrimSpace(roleHelp["role-secret-id"][0]), + HelpDescription: strings.TrimSpace(roleHelp["role-secret-id"][1]), + }, + { + Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id/lookup/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "secret-id", + OperationVerb: "look-up", + }, + Fields: map[string]*framework.FieldSchema{ + "role_name": { + Type: framework.TypeString, + Description: fmt.Sprintf("Name of the role. Must be less than %d bytes.", maxHmacInputLength), + }, + "secret_id": { + Type: framework.TypeString, + Description: "SecretID attached to the role.", + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathRoleSecretIDLookupUpdate, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "secret_id_accessor": { + Type: framework.TypeString, + Required: true, + Description: "Accessor of the secret ID", + }, + "secret_id_ttl": { + Type: framework.TypeDurationSecond, + Required: true, + Description: "Duration in seconds after which the issued secret ID expires.", + }, + "secret_id_num_uses": { + Type: framework.TypeInt, + Required: true, + Description: "Number of times a secret ID can access the role, after which the secret ID will expire.", + }, + "creation_time": { + Type: framework.TypeTime, + Required: true, + }, + "expiration_time": { + Type: framework.TypeTime, + Required: true, + }, + "last_updated_time": { + Type: framework.TypeTime, + Required: true, + }, + "metadata": { + Type: framework.TypeKVPairs, + Required: true, + }, + "cidr_list": { + Type: framework.TypeCommaStringSlice, + Required: true, + Description: "List of CIDR blocks enforcing secret IDs to be used from specific set of IP addresses. If 'bound_cidr_list' is set on the role, then the list of CIDR blocks listed here should be a subset of the CIDR blocks listed on the role.", + }, + "token_bound_cidrs": { + Type: framework.TypeCommaStringSlice, + Required: true, + Description: "List of CIDR blocks. If set, specifies the blocks of IP addresses which can use the returned token. Should be a subset of the token CIDR blocks listed on the role, if any.", + }, + }, + }}, + }, + }, + }, + HelpSynopsis: strings.TrimSpace(roleHelp["role-secret-id-lookup"][0]), + HelpDescription: strings.TrimSpace(roleHelp["role-secret-id-lookup"][1]), + }, + { + Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id/destroy/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationVerb: "destroy", + }, + Fields: map[string]*framework.FieldSchema{ + "role_name": { + Type: framework.TypeString, + Description: fmt.Sprintf("Name of the role. Must be less than %d bytes.", maxHmacInputLength), + }, + "secret_id": { + Type: framework.TypeString, + Description: "SecretID attached to the role.", + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathRoleSecretIDDestroyUpdateDelete, + Responses: responseNoContent, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "secret-id", + }, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathRoleSecretIDDestroyUpdateDelete, + Responses: responseNoContent, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "secret-id2", + }, + }, + }, + HelpSynopsis: strings.TrimSpace(roleHelp["role-secret-id-destroy"][0]), + HelpDescription: strings.TrimSpace(roleHelp["role-secret-id-destroy"][1]), + }, + { + Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id-accessor/lookup/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "secret-id-by-accessor", + OperationVerb: "look-up", + }, + Fields: map[string]*framework.FieldSchema{ + "role_name": { + Type: framework.TypeString, + Description: fmt.Sprintf("Name of the role. Must be less than %d bytes.", maxHmacInputLength), + }, + "secret_id_accessor": { + Type: framework.TypeString, + Description: "Accessor of the SecretID", + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathRoleSecretIDAccessorLookupUpdate, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "secret_id_accessor": { + Type: framework.TypeString, + Required: true, + Description: "Accessor of the secret ID", + }, + "secret_id_ttl": { + Type: framework.TypeDurationSecond, + Required: true, + Description: "Duration in seconds after which the issued secret ID expires.", + }, + "secret_id_num_uses": { + Type: framework.TypeInt, + Required: true, + Description: "Number of times a secret ID can access the role, after which the secret ID will expire.", + }, + "creation_time": { + Type: framework.TypeTime, + Required: true, + }, + "expiration_time": { + Type: framework.TypeTime, + Required: true, + }, + "last_updated_time": { + Type: framework.TypeTime, + Required: true, + }, + "metadata": { + Type: framework.TypeKVPairs, + Required: true, + }, + "cidr_list": { + Type: framework.TypeCommaStringSlice, + Required: true, + Description: "List of CIDR blocks enforcing secret IDs to be used from specific set of IP addresses. If 'bound_cidr_list' is set on the role, then the list of CIDR blocks listed here should be a subset of the CIDR blocks listed on the role.", + }, + "token_bound_cidrs": { + Type: framework.TypeCommaStringSlice, + Required: true, + Description: "List of CIDR blocks. If set, specifies the blocks of IP addresses which can use the returned token. Should be a subset of the token CIDR blocks listed on the role, if any.", + }, + }, + }}, + }, + }, + }, + HelpSynopsis: strings.TrimSpace(roleHelp["role-secret-id-accessor"][0]), + HelpDescription: strings.TrimSpace(roleHelp["role-secret-id-accessor"][1]), + }, + { + Pattern: "role/" + framework.GenericNameRegex("role_name") + "/secret-id-accessor/destroy/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationVerb: "destroy", + }, + Fields: map[string]*framework.FieldSchema{ + "role_name": { + Type: framework.TypeString, + Description: fmt.Sprintf("Name of the role. Must be less than %d bytes.", maxHmacInputLength), + }, + "secret_id_accessor": { + Type: framework.TypeString, + Description: "Accessor of the SecretID", + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathRoleSecretIDAccessorDestroyUpdateDelete, + Responses: responseNoContent, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "secret-id-by-accessor", + }, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathRoleSecretIDAccessorDestroyUpdateDelete, + Responses: responseNoContent, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "secret-id-by-accessor2", + }, + }, + }, + HelpSynopsis: strings.TrimSpace(roleHelp["role-secret-id-accessor"][0]), + HelpDescription: strings.TrimSpace(roleHelp["role-secret-id-accessor"][1]), + }, + { + Pattern: "role/" + framework.GenericNameRegex("role_name") + "/custom-secret-id$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "custom-secret-id", + }, + Fields: map[string]*framework.FieldSchema{ + "role_name": { + Type: framework.TypeString, + Description: fmt.Sprintf("Name of the role. Must be less than %d bytes.", maxHmacInputLength), + }, + "secret_id": { + Type: framework.TypeString, + Description: "SecretID to be attached to the role.", + }, + "metadata": { + Type: framework.TypeString, + Description: `Metadata to be tied to the SecretID. This should be a JSON +formatted string containing metadata in key value pairs.`, + }, + "cidr_list": { + Type: framework.TypeCommaStringSlice, + Description: `Comma separated string or list of CIDR blocks enforcing secret IDs to be used from +specific set of IP addresses. If 'bound_cidr_list' is set on the role, then the +list of CIDR blocks listed here should be a subset of the CIDR blocks listed on +the role.`, + }, + "token_bound_cidrs": { + Type: framework.TypeCommaStringSlice, + Description: `Comma separated string or list of CIDR blocks. If set, specifies the blocks of +IP addresses which can use the returned token. Should be a subset of the token CIDR blocks listed on the role, if any.`, + }, + "num_uses": { + Type: framework.TypeInt, + Description: `Number of times this SecretID can be used, after which the SecretID expires. +Overrides secret_id_num_uses role option when supplied. May not be higher than role's secret_id_num_uses.`, + }, + "ttl": { + Type: framework.TypeDurationSecond, + Description: `Duration in seconds after which this SecretID expires. +Overrides secret_id_ttl role option when supplied. May not be longer than role's secret_id_ttl.`, + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathRoleCustomSecretIDUpdate, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "secret_id": { + Type: framework.TypeString, + Required: true, + Description: "Secret ID attached to the role.", + }, + "secret_id_accessor": { + Type: framework.TypeString, + Required: true, + Description: "Accessor of the secret ID", + }, + "secret_id_ttl": { + Type: framework.TypeDurationSecond, + Required: true, + Description: "Duration in seconds after which the issued secret ID expires.", + }, + "secret_id_num_uses": { + Type: framework.TypeInt, + Required: true, + Description: "Number of times a secret ID can access the role, after which the secret ID will expire.", + }, + }, + }}, + }, + }, + }, + HelpSynopsis: strings.TrimSpace(roleHelp["role-custom-secret-id"][0]), + HelpDescription: strings.TrimSpace(roleHelp["role-custom-secret-id"][1]), + }, + } +} + +// pathRoleExistenceCheck returns whether the role with the given name exists or not. +func (b *backend) pathRoleExistenceCheck(ctx context.Context, req *logical.Request, data *framework.FieldData) (bool, error) { + roleName := data.Get("role_name").(string) + if roleName == "" { + return false, fmt.Errorf("missing role_name") + } + + lock := b.roleLock(roleName) + lock.RLock() + defer lock.RUnlock() + + role, err := b.roleEntry(ctx, req.Storage, roleName) + if err != nil { + return false, err + } + + return role != nil, nil +} + +// pathRoleList is used to list all the Roles registered with the backend. +func (b *backend) pathRoleList(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roles, err := req.Storage.List(ctx, "role/") + if err != nil { + return nil, err + } + return logical.ListResponse(roles), nil +} + +// pathRoleSecretIDList is used to list all the 'secret_id_accessor's issued against the role. +func (b *backend) pathRoleSecretIDList(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := data.Get("role_name").(string) + if roleName == "" { + return logical.ErrorResponse("missing role_name"), nil + } + + lock := b.roleLock(roleName) + lock.RLock() + defer lock.RUnlock() + + // Get the role entry + role, err := b.roleEntry(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil { + return logical.ErrorResponse(fmt.Sprintf("role %q does not exist", roleName)), nil + } + + // Guard the list operation with an outer lock + b.secretIDListingLock.RLock() + defer b.secretIDListingLock.RUnlock() + + roleNameHMAC, err := createHMAC(role.HMACKey, role.name) + if err != nil { + return nil, fmt.Errorf("failed to create HMAC of role_name: %w", err) + } + + // Listing works one level at a time. Get the first level of data + // which could then be used to get the actual SecretID storage entries. + secretIDHMACs, err := req.Storage.List(ctx, fmt.Sprintf("%s%s/", role.SecretIDPrefix, roleNameHMAC)) + if err != nil { + return nil, err + } + + var listItems []string + for _, secretIDHMAC := range secretIDHMACs { + // For sanity + if secretIDHMAC == "" { + continue + } + + // Prepare the full index of the SecretIDs. + entryIndex := fmt.Sprintf("%s%s/%s", role.SecretIDPrefix, roleNameHMAC, secretIDHMAC) + + // SecretID locks are not indexed by SecretIDs itself. + // This is because SecretIDs are not stored in plaintext + // form anywhere in the backend, and hence accessing its + // corresponding lock many times using SecretIDs is not + // possible. Also, indexing it everywhere using secretIDHMACs + // makes listing operation easier. + secretIDLock := b.secretIDLock(secretIDHMAC) + + secretIDLock.RLock() + + result := secretIDStorageEntry{} + if entry, err := req.Storage.Get(ctx, entryIndex); err != nil { + secretIDLock.RUnlock() + return nil, err + } else if entry == nil { + secretIDLock.RUnlock() + return nil, fmt.Errorf("storage entry for SecretID is present but no content found at the index") + } else if err := entry.DecodeJSON(&result); err != nil { + secretIDLock.RUnlock() + return nil, err + } + listItems = append(listItems, result.SecretIDAccessor) + secretIDLock.RUnlock() + } + + return logical.ListResponse(listItems), nil +} + +// validateRoleConstraints checks if the role has at least one constraint +// enabled. +func validateRoleConstraints(role *roleStorageEntry) error { + if role == nil { + return fmt.Errorf("nil role") + } + + // At least one constraint should be enabled on the role + switch { + case role.BindSecretID: + case len(role.BoundCIDRList) != 0: + case len(role.SecretIDBoundCIDRs) != 0: + case len(role.TokenBoundCIDRs) != 0: + default: + return fmt.Errorf("at least one constraint should be enabled on the role") + } + + return nil +} + +// setRoleEntry persists the role and creates an index from roleID to role +// name. +func (b *backend) setRoleEntry(ctx context.Context, s logical.Storage, roleName string, role *roleStorageEntry, previousRoleID string) error { + if roleName == "" { + return fmt.Errorf("missing role name") + } + + if role == nil { + return fmt.Errorf("nil role") + } + + // Check if role constraints are properly set + if err := validateRoleConstraints(role); err != nil { + return err + } + + // Create a storage entry for the role + entry, err := logical.StorageEntryJSON("role/"+strings.ToLower(roleName), role) + if err != nil { + return err + } + if entry == nil { + return fmt.Errorf("failed to create storage entry for role %q", roleName) + } + + // Check if the index from the role_id to role already exists + roleIDIndex, err := b.roleIDEntry(ctx, s, role.RoleID) + if err != nil { + return fmt.Errorf("failed to read role_id index: %w", err) + } + + // If the entry exists, make sure that it belongs to the current role + if roleIDIndex != nil && roleIDIndex.Name != roleName { + return fmt.Errorf("role_id already in use") + } + + // When role_id is getting updated, delete the old index before + // a new one is created + if previousRoleID != "" && previousRoleID != role.RoleID { + if err = b.roleIDEntryDelete(ctx, s, previousRoleID); err != nil { + return fmt.Errorf("failed to delete previous role ID index: %w", err) + } + } + + // Save the role entry only after all the validations + if err = s.Put(ctx, entry); err != nil { + return err + } + + // If previousRoleID is still intact, don't create another one + if previousRoleID != "" && previousRoleID == role.RoleID { + return nil + } + + // Create a storage entry for reverse mapping of RoleID to role. + // Note that secondary index is created when the roleLock is held. + return b.setRoleIDEntry(ctx, s, role.RoleID, &roleIDStorageEntry{ + Name: roleName, + }) +} + +// roleEntry reads the role from storage +func (b *backend) roleEntry(ctx context.Context, s logical.Storage, roleName string) (*roleStorageEntry, error) { + if roleName == "" { + return nil, fmt.Errorf("missing role_name") + } + + var role roleStorageEntry + + if entry, err := s.Get(ctx, "role/"+strings.ToLower(roleName)); err != nil { + return nil, err + } else if entry == nil { + return nil, nil + } else if err := entry.DecodeJSON(&role); err != nil { + return nil, err + } + + needsUpgrade := false + + if role.BoundCIDRListOld != "" { + role.SecretIDBoundCIDRs = strutil.ParseDedupAndSortStrings(role.BoundCIDRListOld, ",") + role.BoundCIDRListOld = "" + needsUpgrade = true + } + + if len(role.BoundCIDRList) != 0 { + role.SecretIDBoundCIDRs = role.BoundCIDRList + role.BoundCIDRList = nil + needsUpgrade = true + } + + if role.SecretIDPrefix == "" { + role.SecretIDPrefix = secretIDPrefix + needsUpgrade = true + } + + for i, cidr := range role.SecretIDBoundCIDRs { + role.SecretIDBoundCIDRs[i] = parseip.TrimLeadingZeroesCIDR(cidr) + } + + if role.TokenPeriod == 0 && role.Period > 0 { + role.TokenPeriod = role.Period + } + + if len(role.TokenPolicies) == 0 && len(role.Policies) > 0 { + role.TokenPolicies = role.Policies + } + + if needsUpgrade && (b.System().LocalMount() || !b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary|consts.ReplicationPerformanceStandby)) { + entry, err := logical.StorageEntryJSON("role/"+strings.ToLower(roleName), &role) + if err != nil { + return nil, err + } + if err := s.Put(ctx, entry); err != nil { + // Only perform upgrades on replication primary + if !strings.Contains(err.Error(), logical.ErrReadOnly.Error()) { + return nil, err + } + } + } + + role.name = roleName + if role.LowerCaseRoleName { + role.name = strings.ToLower(roleName) + } + + return &role, nil +} + +// pathRoleCreateUpdate registers a new role with the backend or updates the options +// of an existing role +func (b *backend) pathRoleCreateUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := data.Get("role_name").(string) + if roleName == "" { + return logical.ErrorResponse("missing role_name"), nil + } + + if len(roleName) > maxHmacInputLength { + return logical.ErrorResponse(fmt.Sprintf("role_name is longer than maximum of %d bytes", maxHmacInputLength)), nil + } + + lock := b.roleLock(roleName) + lock.Lock() + defer lock.Unlock() + + // Check if the role already exists + role, err := b.roleEntry(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + + // Create a new entry object if this is a CreateOperation + switch { + case role == nil && req.Operation == logical.CreateOperation: + hmacKey, err := uuid.GenerateUUID() + if err != nil { + return nil, fmt.Errorf("failed to create role_id: %w", err) + } + role = &roleStorageEntry{ + name: strings.ToLower(roleName), + HMACKey: hmacKey, + LowerCaseRoleName: true, + } + case role == nil: + return logical.ErrorResponse(fmt.Sprintf("role name %q doesn't exist", roleName)), logical.ErrUnsupportedPath + } + + var resp *logical.Response + + // Handle a backwards compat case + if tokenTypeRaw, ok := data.Raw["token_type"]; ok { + switch tokenTypeRaw.(string) { + case "default-service": + data.Raw["token_type"] = "service" + resp = &logical.Response{} + resp.AddWarning("default-service has no useful meaning; adjusting to service") + case "default-batch": + data.Raw["token_type"] = "batch" + resp = &logical.Response{} + resp.AddWarning("default-batch has no useful meaning; adjusting to batch") + } + } + + if err := role.ParseTokenFields(req, data); err != nil { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } + + localSecretIDsRaw, ok := data.GetOk("local_secret_ids") + if ok { + switch { + case req.Operation == logical.CreateOperation: + localSecretIDs := localSecretIDsRaw.(bool) + if localSecretIDs { + role.SecretIDPrefix = secretIDLocalPrefix + } + default: + return logical.ErrorResponse("local_secret_ids can only be modified during role creation"), nil + } + } + + previousRoleID := role.RoleID + if roleIDRaw, ok := data.GetOk("role_id"); ok { + role.RoleID = roleIDRaw.(string) + } else if req.Operation == logical.CreateOperation { + roleID, err := uuid.GenerateUUID() + if err != nil { + return nil, fmt.Errorf("failed to generate role_id: %w", err) + } + role.RoleID = roleID + } + if role.RoleID == "" { + return logical.ErrorResponse("invalid role_id supplied, or failed to generate a role_id"), nil + } + + if bindSecretIDRaw, ok := data.GetOk("bind_secret_id"); ok { + role.BindSecretID = bindSecretIDRaw.(bool) + } else if req.Operation == logical.CreateOperation { + role.BindSecretID = data.Get("bind_secret_id").(bool) + } + + if boundCIDRListRaw, ok := data.GetFirst("secret_id_bound_cidrs", "bound_cidr_list"); ok { + role.SecretIDBoundCIDRs = boundCIDRListRaw.([]string) + } + + if len(role.SecretIDBoundCIDRs) != 0 { + valid, err := cidrutil.ValidateCIDRListSlice(role.SecretIDBoundCIDRs) + if err != nil { + return nil, fmt.Errorf("failed to validate CIDR blocks: %w", err) + } + if !valid { + return logical.ErrorResponse("invalid CIDR blocks"), nil + } + } + + if secretIDNumUsesRaw, ok := data.GetOk("secret_id_num_uses"); ok { + role.SecretIDNumUses = secretIDNumUsesRaw.(int) + } else if req.Operation == logical.CreateOperation { + role.SecretIDNumUses = data.Get("secret_id_num_uses").(int) + } + if role.SecretIDNumUses < 0 { + return logical.ErrorResponse("secret_id_num_uses cannot be negative"), nil + } + + if secretIDTTLRaw, ok := data.GetOk("secret_id_ttl"); ok { + role.SecretIDTTL = time.Second * time.Duration(secretIDTTLRaw.(int)) + } else if req.Operation == logical.CreateOperation { + role.SecretIDTTL = time.Second * time.Duration(data.Get("secret_id_ttl").(int)) + } + + // handle upgrade cases + { + if err := tokenutil.UpgradeValue(data, "policies", "token_policies", &role.Policies, &role.TokenPolicies); err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + if err := tokenutil.UpgradeValue(data, "period", "token_period", &role.Period, &role.TokenPeriod); err != nil { + return logical.ErrorResponse(err.Error()), nil + } + } + + if role.Period > b.System().MaxLeaseTTL() { + return logical.ErrorResponse(fmt.Sprintf("period of %q is greater than the backend's maximum lease TTL of %q", role.Period.String(), b.System().MaxLeaseTTL().String())), nil + } + + if role.TokenMaxTTL > b.System().MaxLeaseTTL() { + if resp == nil { + resp = &logical.Response{} + } + resp.AddWarning("token_max_ttl is greater than the backend mount's maximum TTL value; issued tokens' max TTL value will be truncated") + } + + // Store the entry. + return resp, b.setRoleEntry(ctx, req.Storage, role.name, role, previousRoleID) +} + +// pathRoleRead grabs a read lock and reads the options set on the role from the storage +func (b *backend) pathRoleRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := data.Get("role_name").(string) + if roleName == "" { + return logical.ErrorResponse("missing role_name"), nil + } + + lock := b.roleLock(roleName) + lock.RLock() + lockRelease := lock.RUnlock + + role, err := b.roleEntry(ctx, req.Storage, roleName) + if err != nil { + lockRelease() + return nil, err + } + + if role == nil { + lockRelease() + return nil, nil + } + + respData := map[string]interface{}{ + "bind_secret_id": role.BindSecretID, + "secret_id_bound_cidrs": role.SecretIDBoundCIDRs, + "secret_id_num_uses": role.SecretIDNumUses, + "secret_id_ttl": role.SecretIDTTL / time.Second, + "local_secret_ids": false, + } + role.PopulateTokenData(respData) + + if role.SecretIDPrefix == secretIDLocalPrefix { + respData["local_secret_ids"] = true + } + + // Backwards compat data + if role.Period != 0 { + respData["period"] = role.Period / time.Second + } + if len(role.Policies) > 0 { + respData["policies"] = role.Policies + } + + resp := &logical.Response{ + Data: respData, + } + + if err := validateRoleConstraints(role); err != nil { + resp.AddWarning("Role does not have any constraints set on it. Updates to this role will require a constraint to be set") + } + + // For sanity, verify that the index still exists. If the index is missing, + // add one and return a warning so it can be reported. + roleIDIndex, err := b.roleIDEntry(ctx, req.Storage, role.RoleID) + if err != nil { + lockRelease() + return nil, err + } + + if roleIDIndex == nil { + // Switch to a write lock + lock.RUnlock() + lock.Lock() + lockRelease = lock.Unlock + + // Check again if the index is missing + roleIDIndex, err = b.roleIDEntry(ctx, req.Storage, role.RoleID) + if err != nil { + lockRelease() + return nil, err + } + + if roleIDIndex == nil { + // Create a new index + err = b.setRoleIDEntry(ctx, req.Storage, role.RoleID, &roleIDStorageEntry{ + Name: role.name, + }) + if err != nil { + lockRelease() + return nil, fmt.Errorf("failed to create secondary index for role_id %q: %w", role.RoleID, err) + } + resp.AddWarning("Role identifier was missing an index back to role name. A new index has been added. Please report this observation.") + } + } + + lockRelease() + + return resp, nil +} + +// pathRoleDelete removes the role from the storage +func (b *backend) pathRoleDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := data.Get("role_name").(string) + if roleName == "" { + return logical.ErrorResponse("missing role_name"), nil + } + + lock := b.roleLock(roleName) + lock.Lock() + defer lock.Unlock() + + role, err := b.roleEntry(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil { + return nil, nil + } + + // Just before the role is deleted, remove all the SecretIDs issued as part of the role. + if err = b.flushRoleSecrets(ctx, req.Storage, role.name, role.HMACKey, role.SecretIDPrefix); err != nil { + return nil, fmt.Errorf("failed to invalidate the secrets belonging to role %q: %w", role.name, err) + } + + // Delete the reverse mapping from RoleID to the role + if err = b.roleIDEntryDelete(ctx, req.Storage, role.RoleID); err != nil { + return nil, fmt.Errorf("failed to delete the mapping from RoleID to role %q: %w", role.name, err) + } + + // After deleting the SecretIDs and the RoleID, delete the role itself + if err = req.Storage.Delete(ctx, "role/"+strings.ToLower(role.name)); err != nil { + return nil, err + } + + return nil, nil +} + +// Returns the properties of the SecretID +func (b *backend) pathRoleSecretIDLookupUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := data.Get("role_name").(string) + if roleName == "" { + return logical.ErrorResponse("missing role_name"), nil + } + + secretID := data.Get("secret_id").(string) + if secretID == "" { + return logical.ErrorResponse("missing secret_id"), nil + } + + lock := b.roleLock(roleName) + lock.RLock() + defer lock.RUnlock() + + // Fetch the role + role, err := b.roleEntry(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil { + return nil, fmt.Errorf("role %q does not exist", roleName) + } + + // Create the HMAC of the secret ID using the per-role HMAC key + secretIDHMAC, err := createHMAC(role.HMACKey, secretID) + if err != nil { + return nil, fmt.Errorf("failed to create HMAC of secret_id: %w", err) + } + + // Create the HMAC of the roleName using the per-role HMAC key + roleNameHMAC, err := createHMAC(role.HMACKey, role.name) + if err != nil { + return nil, fmt.Errorf("failed to create HMAC of role_name: %w", err) + } + + // Create the index at which the secret_id would've been stored + entryIndex := fmt.Sprintf("%s%s/%s", role.SecretIDPrefix, roleNameHMAC, secretIDHMAC) + + secretLock := b.secretIDLock(secretIDHMAC) + secretLock.Lock() + defer secretLock.Unlock() + + secretIDEntry, err := b.nonLockedSecretIDStorageEntry(ctx, req.Storage, role.SecretIDPrefix, roleNameHMAC, secretIDHMAC) + if err != nil { + return nil, err + } + if secretIDEntry == nil { + return nil, nil + } + + // If a secret ID entry does not have a corresponding accessor + // entry, revoke the secret ID immediately + accessorEntry, err := b.secretIDAccessorEntry(ctx, req.Storage, secretIDEntry.SecretIDAccessor, role.SecretIDPrefix) + if err != nil { + return nil, fmt.Errorf("failed to read secret ID accessor entry: %w", err) + } + if accessorEntry == nil { + if err := req.Storage.Delete(ctx, entryIndex); err != nil { + return nil, fmt.Errorf("error deleting secret ID %q from storage: %w", secretIDHMAC, err) + } + return logical.ErrorResponse("invalid secret id"), nil + } + + return &logical.Response{ + Data: secretIDEntry.ToResponseData(), + }, nil +} + +func (entry *secretIDStorageEntry) ToResponseData() map[string]interface{} { + ret := map[string]interface{}{ + "secret_id_accessor": entry.SecretIDAccessor, + "secret_id_num_uses": entry.SecretIDNumUses, + "secret_id_ttl": entry.SecretIDTTL / time.Second, + "creation_time": entry.CreationTime, + "expiration_time": entry.ExpirationTime, + "last_updated_time": entry.LastUpdatedTime, + "metadata": entry.Metadata, + "cidr_list": entry.CIDRList, + "token_bound_cidrs": entry.TokenBoundCIDRs, + } + if len(entry.TokenBoundCIDRs) == 0 { + ret["token_bound_cidrs"] = []string{} + } + return ret +} + +func (b *backend) pathRoleSecretIDDestroyUpdateDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := data.Get("role_name").(string) + if roleName == "" { + return logical.ErrorResponse("missing role_name"), nil + } + + secretID := data.Get("secret_id").(string) + if secretID == "" { + return logical.ErrorResponse("missing secret_id"), nil + } + + roleLock := b.roleLock(roleName) + roleLock.RLock() + defer roleLock.RUnlock() + + role, err := b.roleEntry(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil { + return nil, fmt.Errorf("role %q does not exist", roleName) + } + + secretIDHMAC, err := createHMAC(role.HMACKey, secretID) + if err != nil { + return nil, fmt.Errorf("failed to create HMAC of secret_id: %w", err) + } + + roleNameHMAC, err := createHMAC(role.HMACKey, role.name) + if err != nil { + return nil, fmt.Errorf("failed to create HMAC of role_name: %w", err) + } + + entryIndex := fmt.Sprintf("%s%s/%s", role.SecretIDPrefix, roleNameHMAC, secretIDHMAC) + + lock := b.secretIDLock(secretIDHMAC) + lock.Lock() + defer lock.Unlock() + + entry, err := b.nonLockedSecretIDStorageEntry(ctx, req.Storage, role.SecretIDPrefix, roleNameHMAC, secretIDHMAC) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + // Delete the accessor of the SecretID first + if err := b.deleteSecretIDAccessorEntry(ctx, req.Storage, entry.SecretIDAccessor, role.SecretIDPrefix); err != nil { + return nil, err + } + + // Delete the storage entry that corresponds to the SecretID + if err := req.Storage.Delete(ctx, entryIndex); err != nil { + return nil, fmt.Errorf("failed to delete secret_id: %w", err) + } + + return nil, nil +} + +// pathRoleSecretIDAccessorLookupUpdate returns the properties of the SecretID +// given its accessor +func (b *backend) pathRoleSecretIDAccessorLookupUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := data.Get("role_name").(string) + if roleName == "" { + return logical.ErrorResponse("missing role_name"), nil + } + + secretIDAccessor := data.Get("secret_id_accessor").(string) + if secretIDAccessor == "" { + return logical.ErrorResponse("missing secret_id_accessor"), nil + } + + // SecretID is indexed based on HMACed roleName and HMACed SecretID. + // Get the role details to fetch the RoleID and accessor to get + // the HMACed SecretID. + + lock := b.roleLock(roleName) + lock.RLock() + defer lock.RUnlock() + + role, err := b.roleEntry(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil { + return nil, fmt.Errorf("role %q does not exist", roleName) + } + + accessorEntry, err := b.secretIDAccessorEntry(ctx, req.Storage, secretIDAccessor, role.SecretIDPrefix) + if err != nil { + return nil, err + } + if accessorEntry == nil { + return logical.RespondWithStatusCode( + logical.ErrorResponse("failed to find accessor entry for secret_id_accessor: %q", secretIDAccessor), + req, + http.StatusNotFound, + ) + } + + roleNameHMAC, err := createHMAC(role.HMACKey, role.name) + if err != nil { + return nil, fmt.Errorf("failed to create HMAC of role_name: %w", err) + } + + secretLock := b.secretIDLock(accessorEntry.SecretIDHMAC) + secretLock.RLock() + defer secretLock.RUnlock() + + secretIDEntry, err := b.nonLockedSecretIDStorageEntry(ctx, req.Storage, role.SecretIDPrefix, roleNameHMAC, accessorEntry.SecretIDHMAC) + if err != nil { + return nil, err + } + if secretIDEntry == nil { + return nil, nil + } + + return &logical.Response{ + Data: secretIDEntry.ToResponseData(), + }, nil +} + +func (b *backend) pathRoleSecretIDAccessorDestroyUpdateDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := data.Get("role_name").(string) + if roleName == "" { + return logical.ErrorResponse("missing role_name"), nil + } + + secretIDAccessor := data.Get("secret_id_accessor").(string) + if secretIDAccessor == "" { + return logical.ErrorResponse("missing secret_id_accessor"), nil + } + + // SecretID is indexed based on HMACed roleName and HMACed SecretID. + // Get the role details to fetch the RoleID and accessor to get + // the HMACed SecretID. + + role, err := b.roleEntry(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil { + return nil, fmt.Errorf("role %q does not exist", roleName) + } + + accessorEntry, err := b.secretIDAccessorEntry(ctx, req.Storage, secretIDAccessor, role.SecretIDPrefix) + if err != nil { + return nil, err + } + if accessorEntry == nil { + return nil, fmt.Errorf("failed to find accessor entry for secret_id_accessor: %q", secretIDAccessor) + } + + roleNameHMAC, err := createHMAC(role.HMACKey, role.name) + if err != nil { + return nil, fmt.Errorf("failed to create HMAC of role_name: %w", err) + } + + lock := b.secretIDLock(accessorEntry.SecretIDHMAC) + lock.Lock() + defer lock.Unlock() + + // Verify we have a valid SecretID Storage Entry + entry, err := b.nonLockedSecretIDStorageEntry(ctx, req.Storage, role.SecretIDPrefix, roleNameHMAC, accessorEntry.SecretIDHMAC) + if err != nil { + return nil, err + } + if entry == nil { + return logical.ErrorResponse("invalid secret id accessor"), logical.ErrPermissionDenied + } + + entryIndex := fmt.Sprintf("%s%s/%s", role.SecretIDPrefix, roleNameHMAC, accessorEntry.SecretIDHMAC) + + // Delete the accessor of the SecretID first + if err := b.deleteSecretIDAccessorEntry(ctx, req.Storage, secretIDAccessor, role.SecretIDPrefix); err != nil { + return nil, err + } + + // Delete the storage entry that corresponds to the SecretID + if err := req.Storage.Delete(ctx, entryIndex); err != nil { + return nil, fmt.Errorf("failed to delete secret_id: %w", err) + } + + return nil, nil +} + +func (b *backend) pathRoleBoundCIDRUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + delete(data.Raw, "token_bound_cidrs") + delete(data.Raw, "secret_id_bound_cidrs") + return b.pathRoleBoundCIDRUpdateCommon(ctx, req, data) +} + +func (b *backend) pathRoleSecretIDBoundCIDRUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + delete(data.Raw, "bound_cidr_list") + delete(data.Raw, "token_bound_cidrs") + return b.pathRoleBoundCIDRUpdateCommon(ctx, req, data) +} + +func (b *backend) pathRoleTokenBoundCIDRUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + delete(data.Raw, "bound_cidr_list") + delete(data.Raw, "secret_id_bound_cidrs") + return b.pathRoleBoundCIDRUpdateCommon(ctx, req, data) +} + +func (b *backend) pathRoleBoundCIDRUpdateCommon(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := data.Get("role_name").(string) + if roleName == "" { + return logical.ErrorResponse("missing role_name"), nil + } + + lock := b.roleLock(roleName) + lock.Lock() + defer lock.Unlock() + + // Re-read the role after grabbing the lock + role, err := b.roleEntry(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil { + return nil, logical.ErrUnsupportedPath + } + + if cidrsIfc, ok := data.GetFirst("secret_id_bound_cidrs", "bound_cidr_list"); ok { + cidrs := cidrsIfc.([]string) + if len(cidrs) == 0 { + return logical.ErrorResponse("missing bound_cidr_list"), nil + } + valid, err := cidrutil.ValidateCIDRListSlice(cidrs) + if err != nil { + return logical.ErrorResponse(fmt.Errorf("failed to validate CIDR blocks: %w", err).Error()), nil + } + if !valid { + return logical.ErrorResponse("failed to validate CIDR blocks"), nil + } + role.SecretIDBoundCIDRs = cidrs + + } else if cidrsIfc, ok := data.GetOk("token_bound_cidrs"); ok { + cidrs, err := parseutil.ParseAddrs(cidrsIfc.([]string)) + if err != nil { + return logical.ErrorResponse(fmt.Errorf("failed to parse token_bound_cidrs: %w", err).Error()), nil + } + role.TokenBoundCIDRs = cidrs + } + + return nil, b.setRoleEntry(ctx, req.Storage, role.name, role, "") +} + +func (b *backend) pathRoleSecretIDBoundCIDRRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + return b.pathRoleFieldRead(ctx, req, data, "secret_id_bound_cidrs") +} + +func (b *backend) pathRoleTokenBoundCIDRRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + return b.pathRoleFieldRead(ctx, req, data, "token_bound_cidrs") +} + +func (b *backend) pathRoleBoundCIDRListRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + return b.pathRoleFieldRead(ctx, req, data, "bound_cidr_list") +} + +func (b *backend) pathRoleFieldRead(ctx context.Context, req *logical.Request, data *framework.FieldData, fieldName string) (*logical.Response, error) { + roleName := data.Get("role_name").(string) + if roleName == "" { + return logical.ErrorResponse("missing role_name"), nil + } + + lock := b.roleLock(roleName) + lock.Lock() + defer lock.Unlock() + + role, err := b.roleEntry(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil { + return nil, nil + } else { + switch fieldName { + case "secret_id_bound_cidrs": + return &logical.Response{ + Data: map[string]interface{}{ + "secret_id_bound_cidrs": role.SecretIDBoundCIDRs, + }, + }, nil + case "token_bound_cidrs": + return &logical.Response{ + Data: map[string]interface{}{ + "token_bound_cidrs": role.TokenBoundCIDRs, + }, + }, nil + case "bound_cidr_list": + resp := &logical.Response{ + Data: map[string]interface{}{ + "bound_cidr_list": role.BoundCIDRList, + }, + } + resp.AddWarning(`The "bound_cidr_list" field is deprecated and will be removed. Please use "secret_id_bound_cidrs" instead.`) + return resp, nil + default: + // shouldn't occur IRL + return nil, errors.New("unrecognized field provided: " + fieldName) + } + } +} + +func (b *backend) pathRoleBoundCIDRDelete(ctx context.Context, req *logical.Request, data *framework.FieldData, fieldName string) (*logical.Response, error) { + roleName := data.Get("role_name").(string) + if roleName == "" { + return logical.ErrorResponse("missing role_name"), nil + } + + lock := b.roleLock(roleName) + lock.Lock() + defer lock.Unlock() + + role, err := b.roleEntry(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil { + return nil, nil + } + + switch fieldName { + case "bound_cidr_list": + role.BoundCIDRList = nil + case "secret_id_bound_cidrs": + role.SecretIDBoundCIDRs = nil + case "token_bound_cidrs": + role.TokenBoundCIDRs = nil + } + + return nil, b.setRoleEntry(ctx, req.Storage, roleName, role, "") +} + +func (b *backend) pathRoleBoundCIDRListDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + return b.pathRoleBoundCIDRDelete(ctx, req, data, "bound_cidr_list") +} + +func (b *backend) pathRoleSecretIDBoundCIDRDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + return b.pathRoleBoundCIDRDelete(ctx, req, data, "secret_id_bound_cidrs") +} + +func (b *backend) pathRoleTokenBoundCIDRDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + return b.pathRoleBoundCIDRDelete(ctx, req, data, "token_bound_cidrs") +} + +func (b *backend) pathRoleBindSecretIDUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := data.Get("role_name").(string) + if roleName == "" { + return logical.ErrorResponse("missing role_name"), nil + } + + lock := b.roleLock(roleName) + lock.Lock() + defer lock.Unlock() + + role, err := b.roleEntry(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil { + return nil, logical.ErrUnsupportedPath + } + + if bindSecretIDRaw, ok := data.GetOk("bind_secret_id"); ok { + role.BindSecretID = bindSecretIDRaw.(bool) + return nil, b.setRoleEntry(ctx, req.Storage, role.name, role, "") + } else { + return logical.ErrorResponse("missing bind_secret_id"), nil + } +} + +func (b *backend) pathRoleBindSecretIDRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := data.Get("role_name").(string) + if roleName == "" { + return logical.ErrorResponse("missing role_name"), nil + } + + lock := b.roleLock(roleName) + lock.RLock() + defer lock.RUnlock() + + role, err := b.roleEntry(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil { + return nil, nil + } + + return &logical.Response{ + Data: map[string]interface{}{ + "bind_secret_id": role.BindSecretID, + }, + }, nil +} + +func (b *backend) pathRoleBindSecretIDDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := data.Get("role_name").(string) + if roleName == "" { + return logical.ErrorResponse("missing role_name"), nil + } + + lock := b.roleLock(roleName) + lock.Lock() + defer lock.Unlock() + + role, err := b.roleEntry(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil { + return nil, nil + } + + // Deleting a field implies setting the value to it's default value. + role.BindSecretID = data.GetDefaultOrZero("bind_secret_id").(bool) + + return nil, b.setRoleEntry(ctx, req.Storage, role.name, role, "") +} + +func (b *backend) pathRoleLocalSecretIDsRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := data.Get("role_name").(string) + if roleName == "" { + return logical.ErrorResponse("missing role_name"), nil + } + + lock := b.roleLock(roleName) + lock.RLock() + defer lock.RUnlock() + + role, err := b.roleEntry(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil { + return nil, nil + } + + localSecretIDs := false + if role.SecretIDPrefix == secretIDLocalPrefix { + localSecretIDs = true + } + + return &logical.Response{ + Data: map[string]interface{}{ + "local_secret_ids": localSecretIDs, + }, + }, nil +} + +func (b *backend) pathRolePoliciesUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := data.Get("role_name").(string) + if roleName == "" { + return logical.ErrorResponse("missing role_name"), nil + } + + lock := b.roleLock(roleName) + lock.Lock() + defer lock.Unlock() + + role, err := b.roleEntry(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil { + return nil, logical.ErrUnsupportedPath + } + + policiesRaw, ok := data.GetOk("token_policies") + if !ok { + policiesRaw, ok = data.GetOk("policies") + if ok { + role.Policies = policyutil.ParsePolicies(policiesRaw) + role.TokenPolicies = role.Policies + } else { + return logical.ErrorResponse("missing token_policies"), nil + } + } else { + role.TokenPolicies = policyutil.ParsePolicies(policiesRaw) + _, ok = data.GetOk("policies") + if ok { + role.Policies = role.TokenPolicies + } else { + role.Policies = nil + } + } + + return nil, b.setRoleEntry(ctx, req.Storage, role.name, role, "") +} + +func (b *backend) pathRolePoliciesRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := data.Get("role_name").(string) + if roleName == "" { + return logical.ErrorResponse("missing role_name"), nil + } + + lock := b.roleLock(roleName) + lock.RLock() + defer lock.RUnlock() + + role, err := b.roleEntry(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil { + return nil, nil + } + + p := role.TokenPolicies + if p == nil { + p = []string{} + } + d := map[string]interface{}{ + "token_policies": p, + } + + if len(role.Policies) > 0 { + d["policies"] = role.Policies + } + + return &logical.Response{ + Data: d, + }, nil +} + +func (b *backend) pathRolePoliciesDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := data.Get("role_name").(string) + if roleName == "" { + return logical.ErrorResponse("missing role_name"), nil + } + + lock := b.roleLock(roleName) + lock.Lock() + defer lock.Unlock() + + role, err := b.roleEntry(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil { + return nil, nil + } + + role.TokenPolicies = nil + role.Policies = nil + + return nil, b.setRoleEntry(ctx, req.Storage, role.name, role, "") +} + +func (b *backend) pathRoleSecretIDNumUsesUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := data.Get("role_name").(string) + if roleName == "" { + return logical.ErrorResponse("missing role_name"), nil + } + + lock := b.roleLock(roleName) + lock.Lock() + defer lock.Unlock() + + role, err := b.roleEntry(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil { + return nil, logical.ErrUnsupportedPath + } + + if numUsesRaw, ok := data.GetOk("secret_id_num_uses"); ok { + role.SecretIDNumUses = numUsesRaw.(int) + if role.SecretIDNumUses < 0 { + return logical.ErrorResponse("secret_id_num_uses cannot be negative"), nil + } + return nil, b.setRoleEntry(ctx, req.Storage, role.name, role, "") + } else { + return logical.ErrorResponse("missing secret_id_num_uses"), nil + } +} + +func (b *backend) pathRoleRoleIDUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := data.Get("role_name").(string) + if roleName == "" { + return logical.ErrorResponse("missing role_name"), nil + } + + lock := b.roleLock(roleName) + lock.Lock() + defer lock.Unlock() + + role, err := b.roleEntry(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil { + return nil, logical.ErrUnsupportedPath + } + + previousRoleID := role.RoleID + role.RoleID = data.Get("role_id").(string) + if role.RoleID == "" { + return logical.ErrorResponse("missing role_id"), nil + } + + return nil, b.setRoleEntry(ctx, req.Storage, role.name, role, previousRoleID) +} + +func (b *backend) pathRoleRoleIDRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := data.Get("role_name").(string) + if roleName == "" { + return logical.ErrorResponse("missing role_name"), nil + } + + lock := b.roleLock(roleName) + lock.RLock() + defer lock.RUnlock() + + role, err := b.roleEntry(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil { + return nil, nil + } + + return &logical.Response{ + Data: map[string]interface{}{ + "role_id": role.RoleID, + }, + }, nil +} + +func (b *backend) pathRoleSecretIDNumUsesRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := data.Get("role_name").(string) + if roleName == "" { + return logical.ErrorResponse("missing role_name"), nil + } + + lock := b.roleLock(roleName) + lock.RLock() + defer lock.RUnlock() + + role, err := b.roleEntry(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil { + return nil, nil + } + + return &logical.Response{ + Data: map[string]interface{}{ + "secret_id_num_uses": role.SecretIDNumUses, + }, + }, nil +} + +func (b *backend) pathRoleSecretIDNumUsesDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := data.Get("role_name").(string) + if roleName == "" { + return logical.ErrorResponse("missing role_name"), nil + } + + lock := b.roleLock(roleName) + lock.Lock() + defer lock.Unlock() + + role, err := b.roleEntry(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil { + return nil, nil + } + + role.SecretIDNumUses = data.GetDefaultOrZero("secret_id_num_uses").(int) + + return nil, b.setRoleEntry(ctx, req.Storage, role.name, role, "") +} + +func (b *backend) pathRoleSecretIDTTLUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := data.Get("role_name").(string) + if roleName == "" { + return logical.ErrorResponse("missing role_name"), nil + } + + lock := b.roleLock(roleName) + lock.Lock() + defer lock.Unlock() + + role, err := b.roleEntry(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil { + return nil, logical.ErrUnsupportedPath + } + + if secretIDTTLRaw, ok := data.GetOk("secret_id_ttl"); ok { + role.SecretIDTTL = time.Second * time.Duration(secretIDTTLRaw.(int)) + return nil, b.setRoleEntry(ctx, req.Storage, role.name, role, "") + } else { + return logical.ErrorResponse("missing secret_id_ttl"), nil + } +} + +func (b *backend) pathRoleSecretIDTTLRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := data.Get("role_name").(string) + if roleName == "" { + return logical.ErrorResponse("missing role_name"), nil + } + + lock := b.roleLock(roleName) + lock.RLock() + defer lock.RUnlock() + + role, err := b.roleEntry(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil { + return nil, nil + } + + return &logical.Response{ + Data: map[string]interface{}{ + "secret_id_ttl": role.SecretIDTTL / time.Second, + }, + }, nil +} + +func (b *backend) pathRoleSecretIDTTLDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := data.Get("role_name").(string) + if roleName == "" { + return logical.ErrorResponse("missing role_name"), nil + } + + lock := b.roleLock(roleName) + lock.Lock() + defer lock.Unlock() + + role, err := b.roleEntry(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil { + return nil, nil + } + + role.SecretIDTTL = time.Second * time.Duration(data.GetDefaultOrZero("secret_id_ttl").(int)) + + return nil, b.setRoleEntry(ctx, req.Storage, role.name, role, "") +} + +func (b *backend) pathRolePeriodUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := data.Get("role_name").(string) + if roleName == "" { + return logical.ErrorResponse("missing role_name"), nil + } + + lock := b.roleLock(roleName) + lock.Lock() + defer lock.Unlock() + + role, err := b.roleEntry(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil { + return nil, logical.ErrUnsupportedPath + } + + periodRaw, ok := data.GetOk("token_period") + if !ok { + periodRaw, ok = data.GetOk("period") + if ok { + role.Period = time.Second * time.Duration(periodRaw.(int)) + role.TokenPeriod = role.Period + } else { + return logical.ErrorResponse("missing period"), nil + } + } else { + role.TokenPeriod = time.Second * time.Duration(periodRaw.(int)) + _, ok = data.GetOk("period") + if ok { + role.Period = role.TokenPeriod + } else { + role.Period = 0 + } + } + + if role.TokenPeriod > b.System().MaxLeaseTTL() { + return logical.ErrorResponse(fmt.Sprintf("period of %q is greater than the backend's maximum lease TTL of %q", role.Period.String(), b.System().MaxLeaseTTL().String())), nil + } + return nil, b.setRoleEntry(ctx, req.Storage, role.name, role, "") +} + +func (b *backend) pathRolePeriodRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := data.Get("role_name").(string) + if roleName == "" { + return logical.ErrorResponse("missing role_name"), nil + } + + lock := b.roleLock(roleName) + lock.RLock() + defer lock.RUnlock() + + role, err := b.roleEntry(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil { + return nil, nil + } + + d := map[string]interface{}{ + "token_period": role.TokenPeriod / time.Second, + } + + if role.Period > 0 { + d["period"] = role.Period / time.Second + } + + return &logical.Response{ + Data: d, + }, nil +} + +func (b *backend) pathRolePeriodDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := data.Get("role_name").(string) + if roleName == "" { + return logical.ErrorResponse("missing role_name"), nil + } + + lock := b.roleLock(roleName) + lock.Lock() + defer lock.Unlock() + + role, err := b.roleEntry(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil { + return nil, nil + } + + role.TokenPeriod = 0 + role.Period = 0 + + return nil, b.setRoleEntry(ctx, req.Storage, role.name, role, "") +} + +func (b *backend) pathRoleTokenNumUsesUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := data.Get("role_name").(string) + if roleName == "" { + return logical.ErrorResponse("missing role_name"), nil + } + + lock := b.roleLock(roleName) + lock.Lock() + defer lock.Unlock() + + role, err := b.roleEntry(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil { + return nil, logical.ErrUnsupportedPath + } + + if tokenNumUsesRaw, ok := data.GetOk("token_num_uses"); ok { + role.TokenNumUses = tokenNumUsesRaw.(int) + return nil, b.setRoleEntry(ctx, req.Storage, role.name, role, "") + } else { + return logical.ErrorResponse("missing token_num_uses"), nil + } +} + +func (b *backend) pathRoleTokenNumUsesRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := data.Get("role_name").(string) + if roleName == "" { + return logical.ErrorResponse("missing role_name"), nil + } + + lock := b.roleLock(roleName) + lock.RLock() + defer lock.RUnlock() + + role, err := b.roleEntry(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil { + return nil, nil + } + + return &logical.Response{ + Data: map[string]interface{}{ + "token_num_uses": role.TokenNumUses, + }, + }, nil +} + +func (b *backend) pathRoleTokenNumUsesDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := data.Get("role_name").(string) + if roleName == "" { + return logical.ErrorResponse("missing role_name"), nil + } + + lock := b.roleLock(roleName) + lock.Lock() + defer lock.Unlock() + + role, err := b.roleEntry(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil { + return nil, nil + } + + role.TokenNumUses = data.GetDefaultOrZero("token_num_uses").(int) + + return nil, b.setRoleEntry(ctx, req.Storage, role.name, role, "") +} + +func (b *backend) pathRoleTokenTTLUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := data.Get("role_name").(string) + if roleName == "" { + return logical.ErrorResponse("missing role_name"), nil + } + + lock := b.roleLock(roleName) + lock.Lock() + defer lock.Unlock() + + role, err := b.roleEntry(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil { + return nil, logical.ErrUnsupportedPath + } + + if tokenTTLRaw, ok := data.GetOk("token_ttl"); ok { + role.TokenTTL = time.Second * time.Duration(tokenTTLRaw.(int)) + if role.TokenMaxTTL > time.Duration(0) && role.TokenTTL > role.TokenMaxTTL { + return logical.ErrorResponse("token_ttl should not be greater than token_max_ttl"), nil + } + return nil, b.setRoleEntry(ctx, req.Storage, role.name, role, "") + } else { + return logical.ErrorResponse("missing token_ttl"), nil + } +} + +func (b *backend) pathRoleTokenTTLRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := data.Get("role_name").(string) + if roleName == "" { + return logical.ErrorResponse("missing role_name"), nil + } + + lock := b.roleLock(roleName) + lock.RLock() + defer lock.RUnlock() + + role, err := b.roleEntry(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil { + return nil, nil + } + + return &logical.Response{ + Data: map[string]interface{}{ + "token_ttl": role.TokenTTL / time.Second, + }, + }, nil +} + +func (b *backend) pathRoleTokenTTLDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := data.Get("role_name").(string) + if roleName == "" { + return logical.ErrorResponse("missing role_name"), nil + } + + lock := b.roleLock(roleName) + lock.Lock() + defer lock.Unlock() + + role, err := b.roleEntry(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil { + return nil, nil + } + + role.TokenTTL = time.Second * time.Duration(data.GetDefaultOrZero("token_ttl").(int)) + + return nil, b.setRoleEntry(ctx, req.Storage, role.name, role, "") +} + +func (b *backend) pathRoleTokenMaxTTLUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := data.Get("role_name").(string) + if roleName == "" { + return logical.ErrorResponse("missing role_name"), nil + } + + lock := b.roleLock(roleName) + lock.Lock() + defer lock.Unlock() + + role, err := b.roleEntry(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil { + return nil, logical.ErrUnsupportedPath + } + + if tokenMaxTTLRaw, ok := data.GetOk("token_max_ttl"); ok { + role.TokenMaxTTL = time.Second * time.Duration(tokenMaxTTLRaw.(int)) + if role.TokenMaxTTL > time.Duration(0) && role.TokenTTL > role.TokenMaxTTL { + return logical.ErrorResponse("token_max_ttl should be greater than or equal to token_ttl"), nil + } + return nil, b.setRoleEntry(ctx, req.Storage, role.name, role, "") + } else { + return logical.ErrorResponse("missing token_max_ttl"), nil + } +} + +func (b *backend) pathRoleTokenMaxTTLRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := data.Get("role_name").(string) + if roleName == "" { + return logical.ErrorResponse("missing role_name"), nil + } + + lock := b.roleLock(roleName) + lock.RLock() + defer lock.RUnlock() + + role, err := b.roleEntry(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil { + return nil, nil + } + + return &logical.Response{ + Data: map[string]interface{}{ + "token_max_ttl": role.TokenMaxTTL / time.Second, + }, + }, nil +} + +func (b *backend) pathRoleTokenMaxTTLDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := data.Get("role_name").(string) + if roleName == "" { + return logical.ErrorResponse("missing role_name"), nil + } + + lock := b.roleLock(roleName) + lock.Lock() + defer lock.Unlock() + + role, err := b.roleEntry(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil { + return nil, nil + } + + role.TokenMaxTTL = time.Second * time.Duration(data.GetDefaultOrZero("token_max_ttl").(int)) + + return nil, b.setRoleEntry(ctx, req.Storage, role.name, role, "") +} + +func (b *backend) pathRoleSecretIDUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + secretID, err := uuid.GenerateUUID() + if err != nil { + return nil, fmt.Errorf("failed to generate secret_id: %w", err) + } + return b.handleRoleSecretIDCommon(ctx, req, data, secretID) +} + +func (b *backend) pathRoleCustomSecretIDUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + return b.handleRoleSecretIDCommon(ctx, req, data, data.Get("secret_id").(string)) +} + +func (b *backend) handleRoleSecretIDCommon(ctx context.Context, req *logical.Request, data *framework.FieldData, secretID string) (*logical.Response, error) { + roleName := data.Get("role_name").(string) + if roleName == "" { + return logical.ErrorResponse("missing role_name"), nil + } + + if secretID == "" { + return logical.ErrorResponse("missing secret_id"), nil + } + + lock := b.roleLock(roleName) + lock.RLock() + defer lock.RUnlock() + + role, err := b.roleEntry(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil { + return logical.ErrorResponse(fmt.Sprintf("role %q does not exist", roleName)), logical.ErrUnsupportedPath + } + + if !role.BindSecretID { + return logical.ErrorResponse("bind_secret_id is not set on the role"), nil + } + + secretIDCIDRs := data.Get("cidr_list").([]string) + + // Validate the list of CIDR blocks + if len(secretIDCIDRs) != 0 { + valid, err := cidrutil.ValidateCIDRListSlice(secretIDCIDRs) + if err != nil { + return nil, fmt.Errorf("failed to validate CIDR blocks: %w", err) + } + if !valid { + return logical.ErrorResponse("failed to validate CIDR blocks"), nil + } + } + // Ensure that the CIDRs on the secret ID are a subset of that of role's + if err := verifyCIDRRoleSecretIDSubset(secretIDCIDRs, role.SecretIDBoundCIDRs); err != nil { + return nil, err + } + + secretIDTokenCIDRs := data.Get("token_bound_cidrs").([]string) + if len(secretIDTokenCIDRs) != 0 { + valid, err := cidrutil.ValidateCIDRListSlice(secretIDTokenCIDRs) + if err != nil { + return nil, fmt.Errorf("failed to validate token CIDR blocks: %w", err) + } + if !valid { + return logical.ErrorResponse("failed to validate token CIDR blocks"), nil + } + } + // Ensure that the token CIDRs on the secret ID are a subset of that of role's + var roleCIDRs []string + for _, v := range role.TokenBoundCIDRs { + roleCIDRs = append(roleCIDRs, v.String()) + } + if err := verifyCIDRRoleSecretIDSubset(secretIDTokenCIDRs, roleCIDRs); err != nil { + return nil, err + } + + var numUses int + // Check whether or not specified num_uses is defined, otherwise fallback to role's secret_id_num_uses + if numUsesRaw, ok := data.GetOk("num_uses"); ok { + numUses = numUsesRaw.(int) + if numUses < 0 { + return logical.ErrorResponse("num_uses cannot be negative"), nil + } + + // If the specified num_uses is higher than the role's secret_id_num_uses, throw an error rather than implicitly overriding + if (numUses == 0 && role.SecretIDNumUses > 0) || (role.SecretIDNumUses > 0 && numUses > role.SecretIDNumUses) { + return logical.ErrorResponse("num_uses cannot be higher than the role's secret_id_num_uses"), nil + } + } else { + numUses = role.SecretIDNumUses + } + + var ttl time.Duration + // Check whether or not specified ttl is defined, otherwise fallback to role's secret_id_ttl + if ttlRaw, ok := data.GetOk("ttl"); ok { + ttl = time.Second * time.Duration(ttlRaw.(int)) + + // If the specified ttl is longer than the role's secret_id_ttl, throw an error rather than implicitly overriding + if (ttl == 0 && role.SecretIDTTL > 0) || (role.SecretIDTTL > 0 && ttl > role.SecretIDTTL) { + return logical.ErrorResponse("ttl cannot be longer than the role's secret_id_ttl"), nil + } + } else { + ttl = role.SecretIDTTL + } + + secretIDStorage := &secretIDStorageEntry{ + SecretIDNumUses: numUses, + SecretIDTTL: ttl, + Metadata: make(map[string]string), + CIDRList: secretIDCIDRs, + TokenBoundCIDRs: secretIDTokenCIDRs, + } + + if err = strutil.ParseArbitraryKeyValues(data.Get("metadata").(string), secretIDStorage.Metadata, ","); err != nil { + return logical.ErrorResponse(fmt.Sprintf("failed to parse metadata: %v", err)), nil + } + + if secretIDStorage, err = b.registerSecretIDEntry(ctx, req.Storage, role.name, secretID, role.HMACKey, role.SecretIDPrefix, secretIDStorage); err != nil { + return nil, fmt.Errorf("failed to store secret_id: %w", err) + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "secret_id": secretID, + "secret_id_accessor": secretIDStorage.SecretIDAccessor, + "secret_id_ttl": int64(b.deriveSecretIDTTL(secretIDStorage.SecretIDTTL).Seconds()), + "secret_id_num_uses": secretIDStorage.SecretIDNumUses, + }, + } + + return resp, nil +} + +func (b *backend) roleIDLock(roleID string) *locksutil.LockEntry { + return locksutil.LockForKey(b.roleIDLocks, roleID) +} + +func (b *backend) roleLock(roleName string) *locksutil.LockEntry { + return locksutil.LockForKey(b.roleLocks, strings.ToLower(roleName)) +} + +// setRoleIDEntry creates a storage entry that maps RoleID to Role +func (b *backend) setRoleIDEntry(ctx context.Context, s logical.Storage, roleID string, roleIDEntry *roleIDStorageEntry) error { + lock := b.roleIDLock(roleID) + lock.Lock() + defer lock.Unlock() + + salt, err := b.Salt(ctx) + if err != nil { + return err + } + entryIndex := "role_id/" + salt.SaltID(roleID) + + entry, err := logical.StorageEntryJSON(entryIndex, roleIDEntry) + if err != nil { + return err + } + if err = s.Put(ctx, entry); err != nil { + return err + } + return nil +} + +// roleIDEntry is used to read the storage entry that maps RoleID to Role +func (b *backend) roleIDEntry(ctx context.Context, s logical.Storage, roleID string) (*roleIDStorageEntry, error) { + if roleID == "" { + return nil, fmt.Errorf("missing role id") + } + + lock := b.roleIDLock(roleID) + lock.RLock() + defer lock.RUnlock() + + var result roleIDStorageEntry + + salt, err := b.Salt(ctx) + if err != nil { + return nil, err + } + entryIndex := "role_id/" + salt.SaltID(roleID) + + if entry, err := s.Get(ctx, entryIndex); err != nil { + return nil, err + } else if entry == nil { + return nil, nil + } else if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + + return &result, nil +} + +// roleIDEntryDelete is used to remove the secondary index that maps the +// RoleID to the Role itself. +func (b *backend) roleIDEntryDelete(ctx context.Context, s logical.Storage, roleID string) error { + if roleID == "" { + return fmt.Errorf("missing role id") + } + + lock := b.roleIDLock(roleID) + lock.Lock() + defer lock.Unlock() + + salt, err := b.Salt(ctx) + if err != nil { + return err + } + entryIndex := "role_id/" + salt.SaltID(roleID) + + return s.Delete(ctx, entryIndex) +} + +var roleHelp = map[string][2]string{ + "role-list": { + "Lists all the roles registered with the backend.", + "The list will contain the names of the roles.", + }, + "role": { + "Register an role with the backend.", + `A role can represent a service, a machine or anything that can be IDed. +The set of policies on the role defines access to the role, meaning, any +Vault token with a policy set that is a superset of the policies on the +role registered here will have access to the role. If a SecretID is desired +to be generated against only this specific role, it can be done via +'role//secret-id' and 'role//custom-secret-id' endpoints. +The properties of the SecretID created against the role and the properties +of the token issued with the SecretID generated against the role, can be +configured using the fields of this endpoint.`, + }, + "role-bind-secret-id": { + "Impose secret_id to be presented during login using this role.", + `By setting this to 'true', during login the field 'secret_id' becomes a mandatory argument. +The value of 'secret_id' can be retrieved using 'role//secret-id' endpoint.`, + }, + "role-bound-cidr-list": { + `Deprecated: Comma separated list of CIDR blocks, if set, specifies blocks of IP +addresses which can perform the login operation`, + `During login, the IP address of the client will be checked to see if it +belongs to the CIDR blocks specified. If CIDR blocks were set and if the +IP is not encompassed by it, login fails`, + }, + "secret-id-bound-cidrs": { + `Comma separated list of CIDR blocks, if set, specifies blocks of IP +addresses which can perform the login operation`, + `During login, the IP address of the client will be checked to see if it +belongs to the CIDR blocks specified. If CIDR blocks were set and if the +IP is not encompassed by it, login fails`, + }, + "token-bound-cidrs": { + `Comma separated string or list of CIDR blocks. If set, specifies the blocks of +IP addresses which can use the returned token.`, + `During use of the returned token, the IP address of the client will be checked to see if it +belongs to the CIDR blocks specified. If CIDR blocks were set and if the +IP is not encompassed by it, token use fails`, + }, + "role-policies": { + "Policies of the role.", + `A comma-delimited set of Vault policies that defines access to the role. +All the Vault tokens with policies that encompass the policy set +defined on the role, can access the role.`, + }, + "role-secret-id-num-uses": { + "Use limit of the SecretID generated against the role.", + `If a SecretID is generated/assigned against a role using the +'role//secret-id' or 'role//custom-secret-id' endpoint, +then the number of times this SecretID can be used is defined by this option. +However, this option may be overriden by the request's 'num_uses' field.`, + }, + "role-secret-id-ttl": { + "Duration in seconds of the SecretID generated against the role.", + `If a SecretID is generated/assigned against a role using the +'role//secret-id' or 'role//custom-secret-id' endpoint, +then the lifetime of this SecretID is defined by this option. +However, this option may be overridden by the request's 'ttl' field.`, + }, + "role-secret-id-lookup": { + "Read the properties of an issued secret_id", + `This endpoint is used to read the properties of a secret_id associated to a +role.`, + }, + "role-secret-id-destroy": { + "Invalidate an issued secret_id", + `This endpoint is used to delete the properties of a secret_id associated to a +role.`, + }, + "role-secret-id-accessor-lookup": { + "Read an issued secret_id, using its accessor", + `This is particularly useful to lookup the non-expiring 'secret_id's. +The list operation on the 'role//secret-id' endpoint will return +the 'secret_id_accessor's. This endpoint can be used to read the properties +of the secret. If the 'secret_id_num_uses' field in the response is 0, it +represents a non-expiring 'secret_id'.`, + }, + "role-secret-id-accessor-destroy": { + "Delete an issued secret_id, using its accessor", + `This is particularly useful to clean-up the non-expiring 'secret_id's. +The list operation on the 'role//secret-id' endpoint will return +the 'secret_id_accessor's. This endpoint can be used to read the properties +of the secret. If the 'secret_id_num_uses' field in the response is 0, it +represents a non-expiring 'secret_id'.`, + }, + "role-token-num-uses": { + "Number of times issued tokens can be used", + `By default, this will be set to zero, indicating that the issued +tokens can be used any number of times.`, + }, + "role-token-ttl": { + `Duration in seconds, the lifetime of the token issued by using the SecretID that +is generated against this role, before which the token needs to be renewed.`, + `If SecretIDs are generated against the role, using 'role//secret-id' or the +'role//custom-secret-id' endpoints, and if those SecretIDs are used +to perform the login operation, then the value of 'token-ttl' defines the +lifetime of the token issued, before which the token needs to be renewed.`, + }, + "role-token-max-ttl": { + `Duration in seconds, the maximum lifetime of the tokens issued by using +the SecretIDs that were generated against this role, after which the +tokens are not allowed to be renewed.`, + `If SecretIDs are generated against the role using 'role//secret-id' +or the 'role//custom-secret-id' endpoints, and if those SecretIDs +are used to perform the login operation, then the value of 'token-max-ttl' +defines the maximum lifetime of the tokens issued, after which the tokens +cannot be renewed. A reauthentication is required after this duration. +This value will be capped by the backend mount's maximum TTL value.`, + }, + "role-id": { + "Returns the 'role_id' of the role.", + `If login is performed from an role, then its 'role_id' should be presented +as a credential during the login. This 'role_id' can be retrieved using +this endpoint.`, + }, + "role-secret-id": { + "Generate a SecretID against this role.", + `The SecretID generated using this endpoint will be scoped to access +just this role and none else. The properties of this SecretID will be +based on the options set on the role. It will expire after a period +defined by the 'ttl' field or 'secret_id_ttl' option on the role, +and/or the backend mount's maximum TTL value.`, + }, + "role-custom-secret-id": { + "Assign a SecretID of choice against the role.", + `This option is not recommended unless there is a specific need +to do so. This will assign a client supplied SecretID to be used to access +the role. This SecretID will behave similarly to the SecretIDs generated by +the backend. The properties of this SecretID will be based on the options +set on the role. It will expire after a period defined by the 'ttl' field +or 'secret_id_ttl' option on the role, and/or the backend mount's maximum TTL value.`, + }, + "role-period": { + "Updates the value of 'period' on the role", + `If set, indicates that the token generated using this role +should never expire. The token should be renewed within the +duration specified by this value. The renewal duration will +be fixed. If the Period in the role is modified, the token +will pick up the new value during its next renewal.`, + }, + "role-local-secret-ids": { + "Enables cluster local secret IDs", + `If set, the secret IDs generated using this role will be cluster local. +This can only be set during role creation and once set, it can't be +reset later.`, + }, +} diff --git a/builtin/credential/approle/path_role_test.go b/builtin/credential/approle/path_role_test.go new file mode 100644 index 0000000..a5ea9d1 --- /dev/null +++ b/builtin/credential/approle/path_role_test.go @@ -0,0 +1,2134 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package approle + +import ( + "context" + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + "time" + + "github.com/go-test/deep" + "github.com/hashicorp/go-sockaddr" + "github.com/hashicorp/vault/sdk/helper/policyutil" + "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" + "github.com/hashicorp/vault/sdk/helper/tokenutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/mitchellh/mapstructure" +) + +func (b *backend) requestNoErr(t *testing.T, req *logical.Request) *logical.Response { + t.Helper() + resp, err := b.HandleRequest(context.Background(), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route(req.Path), req.Operation), resp, true) + return resp +} + +func TestAppRole_LocalSecretIDsRead(t *testing.T) { + b, storage := createBackendWithStorage(t) + + roleData := map[string]interface{}{ + "local_secret_ids": true, + "bind_secret_id": true, + } + + b.requestNoErr(t, &logical.Request{ + Operation: logical.CreateOperation, + Path: "role/testrole", + Storage: storage, + Data: roleData, + }) + + resp := b.requestNoErr(t, &logical.Request{ + Operation: logical.ReadOperation, + Storage: storage, + Path: "role/testrole/local-secret-ids", + }) + + if !resp.Data["local_secret_ids"].(bool) { + t.Fatalf("expected local_secret_ids to be returned") + } +} + +func TestAppRole_LocalNonLocalSecretIDs(t *testing.T) { + b, storage := createBackendWithStorage(t) + + // Create a role with local_secret_ids set + resp := b.requestNoErr(t, &logical.Request{ + Path: "role/testrole1", + Operation: logical.CreateOperation, + Storage: storage, + Data: map[string]interface{}{ + "policies": []string{"default", "role1policy"}, + "bind_secret_id": true, + "local_secret_ids": true, + }, + }) + + // Create another role without setting local_secret_ids + resp = b.requestNoErr(t, &logical.Request{ + Path: "role/testrole2", + Operation: logical.CreateOperation, + Storage: storage, + Data: map[string]interface{}{ + "policies": []string{"default", "role1policy"}, + "bind_secret_id": true, + }, + }) + + count := 10 + // Create secret IDs on testrole1 + for i := 0; i < count; i++ { + resp = b.requestNoErr(t, &logical.Request{ + Path: "role/testrole1/secret-id", + Operation: logical.UpdateOperation, + Storage: storage, + }) + } + + // Check the number of secret IDs generated + resp = b.requestNoErr(t, &logical.Request{ + Path: "role/testrole1/secret-id", + Operation: logical.ListOperation, + Storage: storage, + }) + + if len(resp.Data["keys"].([]string)) != count { + t.Fatalf("failed to list secret IDs") + } + + // Create secret IDs on testrole1 + for i := 0; i < count; i++ { + resp = b.requestNoErr(t, &logical.Request{ + Path: "role/testrole2/secret-id", + Operation: logical.UpdateOperation, + Storage: storage, + }) + } + + resp = b.requestNoErr(t, &logical.Request{ + Path: "role/testrole2/secret-id", + Operation: logical.ListOperation, + Storage: storage, + }) + + if len(resp.Data["keys"].([]string)) != count { + t.Fatalf("failed to list secret IDs") + } +} + +func TestAppRole_UpgradeSecretIDPrefix(t *testing.T) { + var resp *logical.Response + var err error + + b, storage := createBackendWithStorage(t) + + // Create a role entry directly in storage without SecretIDPrefix + err = b.setRoleEntry(context.Background(), storage, "testrole", &roleStorageEntry{ + RoleID: "testroleid", + HMACKey: "testhmackey", + Policies: []string{"default"}, + BindSecretID: true, + BoundCIDRListOld: "127.0.0.1/18,192.178.1.2/24", + }, "") + if err != nil { + t.Fatal(err) + } + + // Reading the role entry should upgrade it to contain SecretIDPrefix + role, err := b.roleEntry(context.Background(), storage, "testrole") + if err != nil { + t.Fatal(err) + } + if role.SecretIDPrefix == "" { + t.Fatalf("expected SecretIDPrefix to be set") + } + + // Ensure that the API response contains local_secret_ids + resp = b.requestNoErr(t, &logical.Request{ + Path: "role/testrole", + Operation: logical.ReadOperation, + Storage: storage, + }) + + _, ok := resp.Data["local_secret_ids"] + if !ok { + t.Fatalf("expected local_secret_ids to be present in the response") + } +} + +func TestAppRole_LocalSecretIDImmutability(t *testing.T) { + var resp *logical.Response + var err error + + b, storage := createBackendWithStorage(t) + + roleData := map[string]interface{}{ + "policies": []string{"default"}, + "bind_secret_id": true, + "bound_cidr_list": []string{"127.0.0.1/18", "192.178.1.2/24"}, + "local_secret_ids": true, + } + + // Create a role with local_secret_ids set + resp = b.requestNoErr(t, &logical.Request{ + Path: "role/testrole", + Operation: logical.CreateOperation, + Storage: storage, + Data: roleData, + }) + + // Attempt to modify local_secret_ids should fail + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Path: "role/testrole", + Operation: logical.UpdateOperation, + Storage: storage, + Data: roleData, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil || !resp.IsError() { + t.Fatalf("expected an error since local_secret_ids can't be overwritten") + } +} + +func TestAppRole_UpgradeBoundCIDRList(t *testing.T) { + var resp *logical.Response + var err error + + b, storage := createBackendWithStorage(t) + + roleData := map[string]interface{}{ + "policies": []string{"default"}, + "bind_secret_id": true, + "bound_cidr_list": []string{"127.0.0.1/18", "192.178.1.2/24"}, + } + + // Create a role with bound_cidr_list set + resp = b.requestNoErr(t, &logical.Request{ + Path: "role/testrole", + Operation: logical.CreateOperation, + Storage: storage, + Data: roleData, + }) + + // Read the role and check that the bound_cidr_list is set properly + resp = b.requestNoErr(t, &logical.Request{ + Path: "role/testrole", + Operation: logical.ReadOperation, + Storage: storage, + }) + + expected := []string{"127.0.0.1/18", "192.178.1.2/24"} + actual := resp.Data["secret_id_bound_cidrs"].([]string) + + if !reflect.DeepEqual(expected, actual) { + t.Fatalf("bad: secret_id_bound_cidrs; expected: %#v\nactual: %#v\n", expected, actual) + } + + // Modify the storage entry of the role to hold the old style string typed bound_cidr_list + role := &roleStorageEntry{ + RoleID: "testroleid", + HMACKey: "testhmackey", + Policies: []string{"default"}, + BindSecretID: true, + BoundCIDRListOld: "127.0.0.1/18,192.178.1.2/24", + SecretIDPrefix: secretIDPrefix, + } + err = b.setRoleEntry(context.Background(), storage, "testrole", role, "") + if err != nil { + t.Fatal(err) + } + + // Read the role. The upgrade code should have migrated the old type to the new type + resp = b.requestNoErr(t, &logical.Request{ + Path: "role/testrole", + Operation: logical.ReadOperation, + Storage: storage, + }) + + if !reflect.DeepEqual(expected, actual) { + t.Fatalf("bad: bound_cidr_list; expected: %#v\nactual: %#v\n", expected, actual) + } + + // Create a secret-id by supplying a subset of the role's CIDR blocks with the new type + resp = b.requestNoErr(t, &logical.Request{ + Path: "role/testrole/secret-id", + Operation: logical.UpdateOperation, + Storage: storage, + Data: map[string]interface{}{ + "cidr_list": []string{"127.0.0.1/24"}, + }, + }) + + if resp.Data["secret_id"].(string) == "" { + t.Fatalf("failed to generate secret-id") + } + + // Check that the backwards compatibility for the string type is not broken + resp = b.requestNoErr(t, &logical.Request{ + Path: "role/testrole/secret-id", + Operation: logical.UpdateOperation, + Storage: storage, + Data: map[string]interface{}{ + "cidr_list": "127.0.0.1/24", + }, + }) + + if resp.Data["secret_id"].(string) == "" { + t.Fatalf("failed to generate secret-id") + } +} + +func TestAppRole_RoleNameLowerCasing(t *testing.T) { + var resp *logical.Response + var err error + var roleID, secretID string + + b, storage := createBackendWithStorage(t) + + // Save a role with out LowerCaseRoleName set + role := &roleStorageEntry{ + RoleID: "testroleid", + HMACKey: "testhmackey", + Policies: []string{"default"}, + BindSecretID: true, + SecretIDPrefix: secretIDPrefix, + } + err = b.setRoleEntry(context.Background(), storage, "testRoleName", role, "") + if err != nil { + t.Fatal(err) + } + + secretIDReq := &logical.Request{ + Path: "role/testRoleName/secret-id", + Operation: logical.UpdateOperation, + Storage: storage, + } + resp = b.requestNoErr(t, secretIDReq) + + secretID = resp.Data["secret_id"].(string) + roleID = "testroleid" + + // Regular login flow. This should succeed. + resp = b.requestNoErr(t, &logical.Request{ + Path: "login", + Operation: logical.UpdateOperation, + Storage: storage, + Data: map[string]interface{}{ + "role_id": roleID, + "secret_id": secretID, + }, + }) + + // Lower case the role name when generating the secret id + secretIDReq.Path = "role/testrolename/secret-id" + resp = b.requestNoErr(t, secretIDReq) + + secretID = resp.Data["secret_id"].(string) + + // Login should fail + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Path: "login", + Operation: logical.UpdateOperation, + Storage: storage, + Data: map[string]interface{}{ + "role_id": roleID, + "secret_id": secretID, + }, + }) + if err != nil && err != logical.ErrInvalidCredentials { + t.Fatal(err) + } + if resp == nil || !resp.IsError() { + t.Fatalf("expected an error") + } + + // Delete the role and create it again. This time don't directly persist + // it, but route the request to the creation handler so that it sets the + // LowerCaseRoleName to true. + resp = b.requestNoErr(t, &logical.Request{ + Path: "role/testRoleName", + Operation: logical.DeleteOperation, + Storage: storage, + }) + + roleReq := &logical.Request{ + Path: "role/testRoleName", + Operation: logical.CreateOperation, + Storage: storage, + Data: map[string]interface{}{ + "bind_secret_id": true, + }, + } + resp = b.requestNoErr(t, roleReq) + + // Create secret id with lower cased role name + resp = b.requestNoErr(t, &logical.Request{ + Path: "role/testrolename/secret-id", + Operation: logical.UpdateOperation, + Storage: storage, + }) + + secretID = resp.Data["secret_id"].(string) + + resp = b.requestNoErr(t, &logical.Request{ + Path: "role/testrolename/role-id", + Operation: logical.ReadOperation, + Storage: storage, + }) + + roleID = resp.Data["role_id"].(string) + + // Login should pass + resp = b.requestNoErr(t, &logical.Request{ + Path: "login", + Operation: logical.UpdateOperation, + Storage: storage, + Data: map[string]interface{}{ + "role_id": roleID, + "secret_id": secretID, + }, + }) + + // Lookup of secret ID should work in case-insensitive manner + resp = b.requestNoErr(t, &logical.Request{ + Path: "role/testrolename/secret-id/lookup", + Operation: logical.UpdateOperation, + Storage: storage, + Data: map[string]interface{}{ + "secret_id": secretID, + }, + }) + + if resp == nil { + t.Fatalf("failed to lookup secret IDs") + } + + // Listing of secret IDs should work in case-insensitive manner + resp = b.requestNoErr(t, &logical.Request{ + Path: "role/testrolename/secret-id", + Operation: logical.ListOperation, + Storage: storage, + }) + + if len(resp.Data["keys"].([]string)) != 1 { + t.Fatalf("failed to list secret IDs") + } +} + +func TestAppRole_RoleReadSetIndex(t *testing.T) { + var resp *logical.Response + var err error + + b, storage := createBackendWithStorage(t) + + roleReq := &logical.Request{ + Path: "role/testrole", + Operation: logical.CreateOperation, + Storage: storage, + Data: map[string]interface{}{ + "bind_secret_id": true, + }, + } + + // Create a role + resp = b.requestNoErr(t, roleReq) + + roleIDReq := &logical.Request{ + Path: "role/testrole/role-id", + Operation: logical.ReadOperation, + Storage: storage, + } + + // Get the role ID + resp = b.requestNoErr(t, roleIDReq) + + roleID := resp.Data["role_id"].(string) + + // Delete the role ID index + err = b.roleIDEntryDelete(context.Background(), storage, roleID) + if err != nil { + t.Fatal(err) + } + + // Read the role again. This should add the index and return a warning + roleReq.Operation = logical.ReadOperation + resp = b.requestNoErr(t, roleReq) + + // Check if the warning is being returned + if !strings.Contains(resp.Warnings[0], "Role identifier was missing an index back to role name.") { + t.Fatalf("bad: expected a warning in the response") + } + + roleIDIndex, err := b.roleIDEntry(context.Background(), storage, roleID) + if err != nil { + t.Fatal(err) + } + + // Check if the index has been successfully created + if roleIDIndex == nil || roleIDIndex.Name != "testrole" { + t.Fatalf("bad: expected role to have an index") + } + + roleReq.Operation = logical.UpdateOperation + roleReq.Data = map[string]interface{}{ + "bind_secret_id": true, + "policies": "default", + } + + // Check if updating and reading of roles work and that there are no lock + // contentions dangling due to previous operation + resp = b.requestNoErr(t, roleReq) + + roleReq.Operation = logical.ReadOperation + resp = b.requestNoErr(t, roleReq) +} + +func TestAppRole_CIDRSubset(t *testing.T) { + var resp *logical.Response + var err error + + b, storage := createBackendWithStorage(t) + + roleData := map[string]interface{}{ + "role_id": "role-id-123", + "policies": "a,b", + "bound_cidr_list": "127.0.0.1/24", + } + + roleReq := &logical.Request{ + Operation: logical.CreateOperation, + Path: "role/testrole1", + Storage: storage, + Data: roleData, + } + + resp = b.requestNoErr(t, roleReq) + + secretIDData := map[string]interface{}{ + "cidr_list": "127.0.0.1/16", + } + secretIDReq := &logical.Request{ + Operation: logical.UpdateOperation, + Storage: storage, + Path: "role/testrole1/secret-id", + Data: secretIDData, + } + + resp, err = b.HandleRequest(context.Background(), secretIDReq) + if resp != nil { + t.Fatalf("resp:%#v", resp) + } + if err == nil { + t.Fatal("expected an error") + } + + roleData["bound_cidr_list"] = "192.168.27.29/16,172.245.30.40/24,10.20.30.40/30" + roleReq.Operation = logical.UpdateOperation + resp = b.requestNoErr(t, roleReq) + + secretIDData["cidr_list"] = "192.168.27.29/20,172.245.30.40/25,10.20.30.40/32" + resp = b.requestNoErr(t, secretIDReq) +} + +func TestAppRole_TokenBoundCIDRSubset32Mask(t *testing.T) { + var resp *logical.Response + var err error + + b, storage := createBackendWithStorage(t) + + roleData := map[string]interface{}{ + "role_id": "role-id-123", + "policies": "a,b", + "token_bound_cidrs": "127.0.0.1/32", + } + + roleReq := &logical.Request{ + Operation: logical.CreateOperation, + Path: "role/testrole1", + Storage: storage, + Data: roleData, + } + + resp = b.requestNoErr(t, roleReq) + + secretIDData := map[string]interface{}{ + "token_bound_cidrs": "127.0.0.1/32", + } + secretIDReq := &logical.Request{ + Operation: logical.UpdateOperation, + Storage: storage, + Path: "role/testrole1/secret-id", + Data: secretIDData, + } + + resp = b.requestNoErr(t, secretIDReq) + + secretIDData = map[string]interface{}{ + "token_bound_cidrs": "127.0.0.1/24", + } + secretIDReq = &logical.Request{ + Operation: logical.UpdateOperation, + Storage: storage, + Path: "role/testrole1/secret-id", + Data: secretIDData, + } + + resp, err = b.HandleRequest(context.Background(), secretIDReq) + if resp != nil { + t.Fatalf("resp:%#v", resp) + } + + if err == nil { + t.Fatal("expected an error") + } +} + +func TestAppRole_RoleConstraints(t *testing.T) { + var resp *logical.Response + var err error + b, storage := createBackendWithStorage(t) + + roleData := map[string]interface{}{ + "role_id": "role-id-123", + "policies": "a,b", + } + + roleReq := &logical.Request{ + Operation: logical.CreateOperation, + Path: "role/testrole1", + Storage: storage, + Data: roleData, + } + + // Set bind_secret_id, which is enabled by default + resp = b.requestNoErr(t, roleReq) + + // Set bound_cidr_list alone by explicitly disabling bind_secret_id + roleReq.Operation = logical.UpdateOperation + roleData["bind_secret_id"] = false + roleData["bound_cidr_list"] = "0.0.0.0/0" + resp = b.requestNoErr(t, roleReq) + + // Remove both constraints + roleReq.Operation = logical.UpdateOperation + roleData["bound_cidr_list"] = "" + roleData["bind_secret_id"] = false + resp, err = b.HandleRequest(context.Background(), roleReq) + if resp != nil && resp.IsError() { + t.Fatalf("err:%v, resp:%#v", err, resp) + } + if err == nil { + t.Fatalf("expected an error") + } +} + +func TestAppRole_RoleIDUpdate(t *testing.T) { + var resp *logical.Response + b, storage := createBackendWithStorage(t) + + roleData := map[string]interface{}{ + "role_id": "role-id-123", + "policies": "a,b", + "secret_id_num_uses": 10, + "secret_id_ttl": 300, + "token_ttl": 400, + "token_max_ttl": 500, + } + roleReq := &logical.Request{ + Operation: logical.CreateOperation, + Path: "role/testrole1", + Storage: storage, + Data: roleData, + } + resp = b.requestNoErr(t, roleReq) + + roleIDUpdateReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "role/testrole1/role-id", + Storage: storage, + Data: map[string]interface{}{ + "role_id": "customroleid", + }, + } + resp = b.requestNoErr(t, roleIDUpdateReq) + + secretIDReq := &logical.Request{ + Operation: logical.UpdateOperation, + Storage: storage, + Path: "role/testrole1/secret-id", + } + resp = b.requestNoErr(t, secretIDReq) + + secretID := resp.Data["secret_id"].(string) + + loginData := map[string]interface{}{ + "role_id": "customroleid", + "secret_id": secretID, + } + loginReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "login", + Storage: storage, + Data: loginData, + Connection: &logical.Connection{ + RemoteAddr: "127.0.0.1", + }, + } + resp = b.requestNoErr(t, loginReq) + + if resp.Auth == nil { + t.Fatalf("expected a non-nil auth object in the response") + } +} + +func TestAppRole_RoleIDUniqueness(t *testing.T) { + var resp *logical.Response + var err error + b, storage := createBackendWithStorage(t) + + roleData := map[string]interface{}{ + "role_id": "role-id-123", + "policies": "a,b", + "secret_id_num_uses": 10, + "secret_id_ttl": 300, + "token_ttl": 400, + "token_max_ttl": 500, + } + roleReq := &logical.Request{ + Operation: logical.CreateOperation, + Path: "role/testrole1", + Storage: storage, + Data: roleData, + } + + resp = b.requestNoErr(t, roleReq) + + roleReq.Path = "role/testrole2" + resp, err = b.HandleRequest(context.Background(), roleReq) + if err == nil && !(resp != nil && resp.IsError()) { + t.Fatalf("expected an error: got resp:%#v", resp) + } + + roleData["role_id"] = "role-id-456" + resp = b.requestNoErr(t, roleReq) + + roleReq.Operation = logical.UpdateOperation + roleData["role_id"] = "role-id-123" + resp, err = b.HandleRequest(context.Background(), roleReq) + if err == nil && !(resp != nil && resp.IsError()) { + t.Fatalf("expected an error: got resp:%#v", resp) + } + + roleReq.Path = "role/testrole1" + roleData["role_id"] = "role-id-456" + resp, err = b.HandleRequest(context.Background(), roleReq) + if err == nil && !(resp != nil && resp.IsError()) { + t.Fatalf("expected an error: got resp:%#v", resp) + } + + roleIDData := map[string]interface{}{ + "role_id": "role-id-456", + } + roleIDReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "role/testrole1/role-id", + Storage: storage, + Data: roleIDData, + } + resp, err = b.HandleRequest(context.Background(), roleIDReq) + if err == nil && !(resp != nil && resp.IsError()) { + t.Fatalf("expected an error: got resp:%#v", resp) + } + + roleIDData["role_id"] = "role-id-123" + roleIDReq.Path = "role/testrole2/role-id" + resp, err = b.HandleRequest(context.Background(), roleIDReq) + if err == nil && !(resp != nil && resp.IsError()) { + t.Fatalf("expected an error: got resp:%#v", resp) + } + + roleIDData["role_id"] = "role-id-2000" + resp = b.requestNoErr(t, roleIDReq) + + roleIDData["role_id"] = "role-id-1000" + roleIDReq.Path = "role/testrole1/role-id" + resp = b.requestNoErr(t, roleIDReq) +} + +func TestAppRole_RoleDeleteSecretID(t *testing.T) { + var resp *logical.Response + b, storage := createBackendWithStorage(t) + + createRole(t, b, storage, "role1", "a,b") + secretIDReq := &logical.Request{ + Operation: logical.UpdateOperation, + Storage: storage, + Path: "role/role1/secret-id", + } + // Create 3 secrets on the role + resp = b.requestNoErr(t, secretIDReq) + resp = b.requestNoErr(t, secretIDReq) + resp = b.requestNoErr(t, secretIDReq) + + listReq := &logical.Request{ + Operation: logical.ListOperation, + Storage: storage, + Path: "role/role1/secret-id", + } + resp = b.requestNoErr(t, listReq) + + secretIDAccessors := resp.Data["keys"].([]string) + if len(secretIDAccessors) != 3 { + t.Fatalf("bad: len of secretIDAccessors: expected:3 actual:%d", len(secretIDAccessors)) + } + + roleReq := &logical.Request{ + Operation: logical.DeleteOperation, + Storage: storage, + Path: "role/role1", + } + resp = b.requestNoErr(t, roleReq) + + resp, err := b.HandleRequest(context.Background(), listReq) + if err != nil || resp == nil || (resp != nil && !resp.IsError()) { + t.Fatalf("expected an error. err:%v resp:%#v", err, resp) + } +} + +func TestAppRole_RoleSecretIDReadDelete(t *testing.T) { + var resp *logical.Response + var err error + b, storage := createBackendWithStorage(t) + + createRole(t, b, storage, "role1", "a,b") + secretIDCreateReq := &logical.Request{ + Operation: logical.UpdateOperation, + Storage: storage, + Path: "role/role1/secret-id", + } + resp = b.requestNoErr(t, secretIDCreateReq) + + secretID := resp.Data["secret_id"].(string) + if secretID == "" { + t.Fatal("expected non empty secret ID") + } + + secretIDReq := &logical.Request{ + Operation: logical.UpdateOperation, + Storage: storage, + Path: "role/role1/secret-id/lookup", + Data: map[string]interface{}{ + "secret_id": secretID, + }, + } + resp = b.requestNoErr(t, secretIDReq) + + if resp.Data == nil { + t.Fatal(err) + } + + deleteSecretIDReq := &logical.Request{ + Operation: logical.DeleteOperation, + Storage: storage, + Path: "role/role1/secret-id/destroy", + Data: map[string]interface{}{ + "secret_id": secretID, + }, + } + resp = b.requestNoErr(t, deleteSecretIDReq) + resp, err = b.HandleRequest(context.Background(), secretIDReq) + if resp != nil && resp.IsError() { + t.Fatalf("error response:%#v", resp) + } + if err != nil { + t.Fatal(err) + } +} + +func TestAppRole_RoleSecretIDAccessorReadDelete(t *testing.T) { + var resp *logical.Response + var err error + b, storage := createBackendWithStorage(t) + + createRole(t, b, storage, "role1", "a,b") + secretIDReq := &logical.Request{ + Operation: logical.UpdateOperation, + Storage: storage, + Path: "role/role1/secret-id", + } + resp = b.requestNoErr(t, secretIDReq) + + listReq := &logical.Request{ + Operation: logical.ListOperation, + Storage: storage, + Path: "role/role1/secret-id", + } + resp = b.requestNoErr(t, listReq) + + hmacSecretID := resp.Data["keys"].([]string)[0] + + hmacReq := &logical.Request{ + Operation: logical.UpdateOperation, + Storage: storage, + Path: "role/role1/secret-id-accessor/lookup", + Data: map[string]interface{}{ + "secret_id_accessor": hmacSecretID, + }, + } + resp = b.requestNoErr(t, hmacReq) + + if resp.Data == nil { + t.Fatal(err) + } + + hmacReq.Path = "role/role1/secret-id-accessor/destroy" + resp = b.requestNoErr(t, hmacReq) + + hmacReq.Operation = logical.ReadOperation + resp, err = b.HandleRequest(context.Background(), hmacReq) + if resp != nil && resp.IsError() { + t.Fatalf("err:%v resp:%#v", err, resp) + } + if err == nil { + t.Fatalf("expected an error") + } +} + +func TestAppRoleSecretIDLookup(t *testing.T) { + b, storage := createBackendWithStorage(t) + createRole(t, b, storage, "role1", "a,b") + + req := &logical.Request{ + Operation: logical.UpdateOperation, + Storage: storage, + Path: "role/role1/secret-id-accessor/lookup", + Data: map[string]interface{}{ + "secret_id_accessor": "invalid", + }, + } + resp, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + expected := &logical.Response{ + Data: map[string]interface{}{ + "http_content_type": "application/json", + "http_raw_body": `{"request_id":"","lease_id":"","renewable":false,"lease_duration":0,"data":{"error":"failed to find accessor entry for secret_id_accessor: \"invalid\""},"wrap_info":null,"warnings":null,"auth":null}`, + "http_status_code": 404, + }, + } + if !reflect.DeepEqual(resp, expected) { + t.Fatalf("resp:%#v expected:%#v", resp, expected) + } +} + +func TestAppRoleRoleListSecretID(t *testing.T) { + var resp *logical.Response + b, storage := createBackendWithStorage(t) + + createRole(t, b, storage, "role1", "a,b") + + secretIDReq := &logical.Request{ + Operation: logical.UpdateOperation, + Storage: storage, + Path: "role/role1/secret-id", + } + // Create 5 'secret_id's + resp = b.requestNoErr(t, secretIDReq) + resp = b.requestNoErr(t, secretIDReq) + resp = b.requestNoErr(t, secretIDReq) + resp = b.requestNoErr(t, secretIDReq) + resp = b.requestNoErr(t, secretIDReq) + + listReq := &logical.Request{ + Operation: logical.ListOperation, + Storage: storage, + Path: "role/role1/secret-id/", + } + resp = b.requestNoErr(t, listReq) + + secrets := resp.Data["keys"].([]string) + if len(secrets) != 5 { + t.Fatalf("bad: len of secrets: expected:5 actual:%d", len(secrets)) + } +} + +func TestAppRole_RoleList(t *testing.T) { + var resp *logical.Response + b, storage := createBackendWithStorage(t) + + createRole(t, b, storage, "role1", "a,b") + createRole(t, b, storage, "role2", "c,d") + createRole(t, b, storage, "role3", "e,f") + createRole(t, b, storage, "role4", "g,h") + createRole(t, b, storage, "role5", "i,j") + + listReq := &logical.Request{ + Operation: logical.ListOperation, + Path: "role", + Storage: storage, + } + resp = b.requestNoErr(t, listReq) + + actual := resp.Data["keys"].([]string) + expected := []string{"role1", "role2", "role3", "role4", "role5"} + if !policyutil.EquivalentPolicies(actual, expected) { + t.Fatalf("bad: listed roles: expected:%s\nactual:%s", expected, actual) + } +} + +func TestAppRole_RoleSecretIDWithoutFields(t *testing.T) { + var resp *logical.Response + b, storage := createBackendWithStorage(t) + + roleData := map[string]interface{}{ + "policies": "p,q,r,s", + "secret_id_num_uses": 10, + "secret_id_ttl": 300, + "token_ttl": 400, + "token_max_ttl": 500, + } + roleReq := &logical.Request{ + Operation: logical.CreateOperation, + Path: "role/role1", + Storage: storage, + Data: roleData, + } + + resp = b.requestNoErr(t, roleReq) + + roleSecretIDReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "role/role1/secret-id", + Storage: storage, + } + resp = b.requestNoErr(t, roleSecretIDReq) + + if resp.Data["secret_id"].(string) == "" { + t.Fatalf("failed to generate secret_id") + } + if resp.Data["secret_id_ttl"].(int64) != int64(roleData["secret_id_ttl"].(int)) { + t.Fatalf("secret_id_ttl has not defaulted to the role's secret id ttl") + } + if resp.Data["secret_id_num_uses"].(int) != roleData["secret_id_num_uses"].(int) { + t.Fatalf("secret_id_num_uses has not defaulted to the role's secret id num_uses") + } + + roleSecretIDReq.Path = "role/role1/custom-secret-id" + roleCustomSecretIDData := map[string]interface{}{ + "secret_id": "abcd123", + } + roleSecretIDReq.Data = roleCustomSecretIDData + resp = b.requestNoErr(t, roleSecretIDReq) + + if resp.Data["secret_id"] != "abcd123" { + t.Fatalf("failed to set specific secret_id to role") + } + if resp.Data["secret_id_ttl"].(int64) != int64(roleData["secret_id_ttl"].(int)) { + t.Fatalf("secret_id_ttl has not defaulted to the role's secret id ttl") + } + if resp.Data["secret_id_num_uses"].(int) != roleData["secret_id_num_uses"].(int) { + t.Fatalf("secret_id_num_uses has not defaulted to the role's secret id num_uses") + } +} + +func TestAppRole_RoleSecretIDWithValidFields(t *testing.T) { + type testCase struct { + name string + payload map[string]interface{} + } + + var resp *logical.Response + b, storage := createBackendWithStorage(t) + + roleData := map[string]interface{}{ + "policies": "p,q,r,s", + "secret_id_num_uses": 0, + "secret_id_ttl": 0, + "token_ttl": 400, + "token_max_ttl": 500, + } + roleReq := &logical.Request{ + Operation: logical.CreateOperation, + Path: "role/role1", + Storage: storage, + Data: roleData, + } + + resp = b.requestNoErr(t, roleReq) + + testCases := []testCase{ + { + name: "finite num_uses ttl", + payload: map[string]interface{}{"secret_id": "finite", "ttl": 5, "num_uses": 5}, + }, + { + name: "infinite num_uses and ttl", + payload: map[string]interface{}{"secret_id": "infinite", "ttl": 0, "num_uses": 0}, + }, + { + name: "finite num_uses and infinite ttl", + payload: map[string]interface{}{"secret_id": "mixed1", "ttl": 0, "num_uses": 5}, + }, + { + name: "infinite num_uses and finite ttl", + payload: map[string]interface{}{"secret_id": "mixed2", "ttl": 5, "num_uses": 0}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + roleSecretIDReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "role/role1/secret-id", + Storage: storage, + } + roleCustomSecretIDData := tc.payload + roleSecretIDReq.Data = roleCustomSecretIDData + + resp = b.requestNoErr(t, roleSecretIDReq) + + if resp.Data["secret_id"].(string) == "" { + t.Fatalf("failed to generate secret_id") + } + if resp.Data["secret_id_ttl"].(int64) != int64(tc.payload["ttl"].(int)) { + t.Fatalf("secret_id_ttl has not been set by the 'ttl' field") + } + if resp.Data["secret_id_num_uses"].(int) != tc.payload["num_uses"].(int) { + t.Fatalf("secret_id_num_uses has not been set by the 'num_uses' field") + } + + roleSecretIDReq.Path = "role/role1/custom-secret-id" + roleSecretIDReq.Data = roleCustomSecretIDData + resp = b.requestNoErr(t, roleSecretIDReq) + + if resp.Data["secret_id"] != tc.payload["secret_id"] { + t.Fatalf("failed to set specific secret_id to role") + } + if resp.Data["secret_id_ttl"].(int64) != int64(tc.payload["ttl"].(int)) { + t.Fatalf("secret_id_ttl has not been set by the 'ttl' field") + } + if resp.Data["secret_id_num_uses"].(int) != tc.payload["num_uses"].(int) { + t.Fatalf("secret_id_num_uses has not been set by the 'num_uses' field") + } + }) + } +} + +func TestAppRole_ErrorsRoleSecretIDWithInvalidFields(t *testing.T) { + type testCase struct { + name string + payload map[string]interface{} + expected string + } + + type roleTestCase struct { + name string + options map[string]interface{} + cases []testCase + } + + infiniteTestCases := []testCase{ + { + name: "infinite ttl", + payload: map[string]interface{}{"secret_id": "abcd123", "num_uses": 1, "ttl": 0}, + expected: "ttl cannot be longer than the role's secret_id_ttl", + }, + { + name: "infinite num_uses", + payload: map[string]interface{}{"secret_id": "abcd123", "num_uses": 0, "ttl": 1}, + expected: "num_uses cannot be higher than the role's secret_id_num_uses", + }, + } + + negativeTestCases := []testCase{ + { + name: "negative num_uses", + payload: map[string]interface{}{"secret_id": "abcd123", "num_uses": -1, "ttl": 0}, + expected: "num_uses cannot be negative", + }, + } + + roleTestCases := []roleTestCase{ + { + name: "infinite role secret id ttl", + options: map[string]interface{}{ + "secret_id_num_uses": 1, + "secret_id_ttl": 0, + }, + cases: []testCase{ + { + name: "higher num_uses", + payload: map[string]interface{}{"secret_id": "abcd123", "ttl": 0, "num_uses": 2}, + expected: "num_uses cannot be higher than the role's secret_id_num_uses", + }, + }, + }, + { + name: "infinite role num_uses", + options: map[string]interface{}{ + "secret_id_num_uses": 0, + "secret_id_ttl": 1, + }, + cases: []testCase{ + { + name: "longer ttl", + payload: map[string]interface{}{"secret_id": "abcd123", "ttl": 2, "num_uses": 0}, + expected: "ttl cannot be longer than the role's secret_id_ttl", + }, + }, + }, + { + name: "finite role ttl and num_uses", + options: map[string]interface{}{ + "secret_id_num_uses": 2, + "secret_id_ttl": 2, + }, + cases: infiniteTestCases, + }, + { + name: "mixed role ttl and num_uses", + options: map[string]interface{}{ + "secret_id_num_uses": 400, + "secret_id_ttl": 500, + }, + cases: negativeTestCases, + }, + } + + var resp *logical.Response + var err error + b, storage := createBackendWithStorage(t) + + for i, rc := range roleTestCases { + roleData := map[string]interface{}{ + "policies": "p,q,r,s", + "token_ttl": 400, + "token_max_ttl": 500, + } + roleData["secret_id_num_uses"] = rc.options["secret_id_num_uses"] + roleData["secret_id_ttl"] = rc.options["secret_id_ttl"] + + roleReq := &logical.Request{ + Operation: logical.CreateOperation, + Path: fmt.Sprintf("role/role%d", i), + Storage: storage, + Data: roleData, + } + + resp = b.requestNoErr(t, roleReq) + + for _, tc := range rc.cases { + t.Run(fmt.Sprintf("%s/%s", rc.name, tc.name), func(t *testing.T) { + roleSecretIDReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("role/role%d/secret-id", i), + Storage: storage, + } + roleSecretIDReq.Data = tc.payload + resp, err = b.HandleRequest(context.Background(), roleSecretIDReq) + if err != nil || (resp != nil && !resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + if resp.Data["error"].(string) != tc.expected { + t.Fatalf("expected: %q, got: %q", tc.expected, resp.Data["error"].(string)) + } + + roleSecretIDReq.Path = fmt.Sprintf("role/role%d/custom-secret-id", i) + resp, err = b.HandleRequest(context.Background(), roleSecretIDReq) + if err != nil || (resp != nil && !resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + if resp.Data["error"].(string) != tc.expected { + t.Fatalf("expected: %q, got: %q", tc.expected, resp.Data["error"].(string)) + } + }) + } + } +} + +func TestAppRole_RoleCRUD(t *testing.T) { + var resp *logical.Response + var err error + b, storage := createBackendWithStorage(t) + + roleData := map[string]interface{}{ + "policies": "p,q,r,s", + "secret_id_num_uses": 10, + "secret_id_ttl": 300, + "token_ttl": 400, + "token_max_ttl": 500, + "token_num_uses": 600, + "secret_id_bound_cidrs": "127.0.0.1/32,127.0.0.1/16", + } + roleReq := &logical.Request{ + Operation: logical.CreateOperation, + Path: "role/role1", + Storage: storage, + Data: roleData, + } + + resp = b.requestNoErr(t, roleReq) + + roleReq.Operation = logical.ReadOperation + resp = b.requestNoErr(t, roleReq) + + expected := map[string]interface{}{ + "bind_secret_id": true, + "policies": []string{"p", "q", "r", "s"}, + "secret_id_num_uses": 10, + "secret_id_ttl": 300, + "token_ttl": 400, + "token_max_ttl": 500, + "token_num_uses": 600, + "secret_id_bound_cidrs": []string{"127.0.0.1/32", "127.0.0.1/16"}, + "token_bound_cidrs": []string{}, + "token_type": "default", + } + + var expectedStruct roleStorageEntry + err = mapstructure.Decode(expected, &expectedStruct) + if err != nil { + t.Fatal(err) + } + + var actualStruct roleStorageEntry + err = mapstructure.Decode(resp.Data, &actualStruct) + if err != nil { + t.Fatal(err) + } + + expectedStruct.RoleID = actualStruct.RoleID + if diff := deep.Equal(expectedStruct, actualStruct); diff != nil { + t.Fatal(diff) + } + + roleData = map[string]interface{}{ + "role_id": "test_role_id", + "policies": "a,b,c,d", + "secret_id_num_uses": 100, + "secret_id_ttl": 3000, + "token_ttl": 4000, + "token_max_ttl": 5000, + } + roleReq.Data = roleData + roleReq.Operation = logical.UpdateOperation + + resp = b.requestNoErr(t, roleReq) + + roleReq.Operation = logical.ReadOperation + resp = b.requestNoErr(t, roleReq) + + expected = map[string]interface{}{ + "policies": []string{"a", "b", "c", "d"}, + "secret_id_num_uses": 100, + "secret_id_ttl": 3000, + "token_ttl": 4000, + "token_max_ttl": 5000, + } + err = mapstructure.Decode(expected, &expectedStruct) + if err != nil { + t.Fatal(err) + } + + err = mapstructure.Decode(resp.Data, &actualStruct) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(expectedStruct, actualStruct) { + t.Fatalf("bad:\nexpected:%#v\nactual:%#v\n", expectedStruct, actualStruct) + } + + // RU for role_id field + roleReq.Path = "role/role1/role-id" + roleReq.Operation = logical.ReadOperation + resp = b.requestNoErr(t, roleReq) + + if resp.Data["role_id"].(string) != "test_role_id" { + t.Fatalf("bad: role_id: expected:test_role_id actual:%s\n", resp.Data["role_id"].(string)) + } + + roleReq.Data = map[string]interface{}{"role_id": "custom_role_id"} + roleReq.Operation = logical.UpdateOperation + resp = b.requestNoErr(t, roleReq) + + roleReq.Operation = logical.ReadOperation + resp = b.requestNoErr(t, roleReq) + + if resp.Data["role_id"].(string) != "custom_role_id" { + t.Fatalf("bad: role_id: expected:custom_role_id actual:%s\n", resp.Data["role_id"].(string)) + } + + // RUD for bind_secret_id field + roleReq.Path = "role/role1/bind-secret-id" + roleReq.Operation = logical.ReadOperation + resp = b.requestNoErr(t, roleReq) + + roleReq.Data = map[string]interface{}{"bind_secret_id": false} + roleReq.Operation = logical.UpdateOperation + resp = b.requestNoErr(t, roleReq) + + roleReq.Operation = logical.ReadOperation + resp = b.requestNoErr(t, roleReq) + + if resp.Data["bind_secret_id"].(bool) { + t.Fatalf("bad: bind_secret_id: expected:false actual:%t\n", resp.Data["bind_secret_id"].(bool)) + } + roleReq.Operation = logical.DeleteOperation + resp = b.requestNoErr(t, roleReq) + + roleReq.Operation = logical.ReadOperation + resp = b.requestNoErr(t, roleReq) + + if !resp.Data["bind_secret_id"].(bool) { + t.Fatalf("expected the default value of 'true' to be set") + } + + // RUD for policies field + roleReq.Path = "role/role1/policies" + roleReq.Operation = logical.ReadOperation + resp = b.requestNoErr(t, roleReq) + + roleReq.Data = map[string]interface{}{"policies": "a1,b1,c1,d1"} + roleReq.Operation = logical.UpdateOperation + resp = b.requestNoErr(t, roleReq) + + roleReq.Operation = logical.ReadOperation + resp = b.requestNoErr(t, roleReq) + + if !reflect.DeepEqual(resp.Data["policies"].([]string), []string{"a1", "b1", "c1", "d1"}) { + t.Fatalf("bad: policies: actual:%s\n", resp.Data["policies"].([]string)) + } + if !reflect.DeepEqual(resp.Data["token_policies"].([]string), []string{"a1", "b1", "c1", "d1"}) { + t.Fatalf("bad: policies: actual:%s\n", resp.Data["policies"].([]string)) + } + roleReq.Operation = logical.DeleteOperation + resp = b.requestNoErr(t, roleReq) + + roleReq.Operation = logical.ReadOperation + resp = b.requestNoErr(t, roleReq) + + expectedPolicies := []string{} + actualPolicies := resp.Data["token_policies"].([]string) + if !policyutil.EquivalentPolicies(expectedPolicies, actualPolicies) { + t.Fatalf("bad: token_policies: expected:%s actual:%s", expectedPolicies, actualPolicies) + } + + // RUD for secret-id-num-uses field + roleReq.Path = "role/role1/secret-id-num-uses" + roleReq.Operation = logical.ReadOperation + resp = b.requestNoErr(t, roleReq) + + roleReq.Data = map[string]interface{}{"secret_id_num_uses": 200} + roleReq.Operation = logical.UpdateOperation + resp = b.requestNoErr(t, roleReq) + + roleReq.Operation = logical.ReadOperation + resp = b.requestNoErr(t, roleReq) + + if resp.Data["secret_id_num_uses"].(int) != 200 { + t.Fatalf("bad: secret_id_num_uses: expected:200 actual:%d\n", resp.Data["secret_id_num_uses"].(int)) + } + roleReq.Operation = logical.DeleteOperation + resp = b.requestNoErr(t, roleReq) + + roleReq.Operation = logical.ReadOperation + resp = b.requestNoErr(t, roleReq) + + if resp.Data["secret_id_num_uses"].(int) != 0 { + t.Fatalf("expected value to be reset") + } + + // RUD for secret_id_ttl field + roleReq.Path = "role/role1/secret-id-ttl" + roleReq.Operation = logical.ReadOperation + resp = b.requestNoErr(t, roleReq) + + roleReq.Data = map[string]interface{}{"secret_id_ttl": 3001} + roleReq.Operation = logical.UpdateOperation + resp = b.requestNoErr(t, roleReq) + + roleReq.Operation = logical.ReadOperation + resp = b.requestNoErr(t, roleReq) + + if resp.Data["secret_id_ttl"].(time.Duration) != 3001 { + t.Fatalf("bad: secret_id_ttl: expected:3001 actual:%d\n", resp.Data["secret_id_ttl"].(time.Duration)) + } + roleReq.Operation = logical.DeleteOperation + resp = b.requestNoErr(t, roleReq) + + roleReq.Operation = logical.ReadOperation + resp = b.requestNoErr(t, roleReq) + + if resp.Data["secret_id_ttl"].(time.Duration) != 0 { + t.Fatalf("expected value to be reset") + } + + // RUD for secret-id-num-uses field + roleReq.Path = "role/role1/token-num-uses" + roleReq.Operation = logical.ReadOperation + resp = b.requestNoErr(t, roleReq) + + if resp.Data["token_num_uses"].(int) != 600 { + t.Fatalf("bad: token_num_uses: expected:600 actual:%d\n", resp.Data["token_num_uses"].(int)) + } + + roleReq.Data = map[string]interface{}{"token_num_uses": 60} + roleReq.Operation = logical.UpdateOperation + resp = b.requestNoErr(t, roleReq) + + roleReq.Operation = logical.ReadOperation + resp = b.requestNoErr(t, roleReq) + + if resp.Data["token_num_uses"].(int) != 60 { + t.Fatalf("bad: token_num_uses: expected:60 actual:%d\n", resp.Data["token_num_uses"].(int)) + } + + roleReq.Operation = logical.DeleteOperation + resp = b.requestNoErr(t, roleReq) + + roleReq.Operation = logical.ReadOperation + resp = b.requestNoErr(t, roleReq) + + if resp.Data["token_num_uses"].(int) != 0 { + t.Fatalf("expected value to be reset") + } + + // RUD for 'period' field + roleReq.Path = "role/role1/period" + roleReq.Operation = logical.ReadOperation + resp = b.requestNoErr(t, roleReq) + + roleReq.Data = map[string]interface{}{"period": 9001} + roleReq.Operation = logical.UpdateOperation + resp = b.requestNoErr(t, roleReq) + + roleReq.Operation = logical.ReadOperation + resp = b.requestNoErr(t, roleReq) + + if resp.Data["period"].(time.Duration) != 9001 { + t.Fatalf("bad: period: expected:9001 actual:%d\n", resp.Data["9001"].(time.Duration)) + } + roleReq.Operation = logical.DeleteOperation + resp = b.requestNoErr(t, roleReq) + + roleReq.Operation = logical.ReadOperation + resp = b.requestNoErr(t, roleReq) + + if resp.Data["token_period"].(time.Duration) != 0 { + t.Fatalf("expected value to be reset") + } + + // RUD for token_ttl field + roleReq.Path = "role/role1/token-ttl" + roleReq.Operation = logical.ReadOperation + resp = b.requestNoErr(t, roleReq) + + roleReq.Data = map[string]interface{}{"token_ttl": 4001} + roleReq.Operation = logical.UpdateOperation + resp = b.requestNoErr(t, roleReq) + + roleReq.Operation = logical.ReadOperation + resp = b.requestNoErr(t, roleReq) + + if resp.Data["token_ttl"].(time.Duration) != 4001 { + t.Fatalf("bad: token_ttl: expected:4001 actual:%d\n", resp.Data["token_ttl"].(time.Duration)) + } + roleReq.Operation = logical.DeleteOperation + resp = b.requestNoErr(t, roleReq) + + roleReq.Operation = logical.ReadOperation + resp = b.requestNoErr(t, roleReq) + + if resp.Data["token_ttl"].(time.Duration) != 0 { + t.Fatalf("expected value to be reset") + } + + // RUD for token_max_ttl field + roleReq.Path = "role/role1/token-max-ttl" + roleReq.Operation = logical.ReadOperation + resp = b.requestNoErr(t, roleReq) + + roleReq.Data = map[string]interface{}{"token_max_ttl": 5001} + roleReq.Operation = logical.UpdateOperation + resp = b.requestNoErr(t, roleReq) + + roleReq.Operation = logical.ReadOperation + resp = b.requestNoErr(t, roleReq) + + if resp.Data["token_max_ttl"].(time.Duration) != 5001 { + t.Fatalf("bad: token_max_ttl: expected:5001 actual:%d\n", resp.Data["token_max_ttl"].(time.Duration)) + } + roleReq.Operation = logical.DeleteOperation + resp = b.requestNoErr(t, roleReq) + + roleReq.Operation = logical.ReadOperation + resp = b.requestNoErr(t, roleReq) + + if resp.Data["token_max_ttl"].(time.Duration) != 0 { + t.Fatalf("expected value to be reset") + } + + // Delete test for role + roleReq.Path = "role/role1" + roleReq.Operation = logical.DeleteOperation + resp = b.requestNoErr(t, roleReq) + + roleReq.Operation = logical.ReadOperation + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + if resp != nil { + t.Fatalf("expected a nil response") + } +} + +func TestAppRole_RoleWithTokenBoundCIDRsCRUD(t *testing.T) { + var resp *logical.Response + var err error + b, storage := createBackendWithStorage(t) + + roleData := map[string]interface{}{ + "policies": "p,q,r,s", + "secret_id_num_uses": 10, + "secret_id_ttl": 300, + "token_ttl": 400, + "token_max_ttl": 500, + "token_num_uses": 600, + "secret_id_bound_cidrs": "127.0.0.1/32,127.0.0.1/16", + "token_bound_cidrs": "127.0.0.1/32,127.0.0.1/16", + } + roleReq := &logical.Request{ + Operation: logical.CreateOperation, + Path: "role/role1", + Storage: storage, + Data: roleData, + } + + resp = b.requestNoErr(t, roleReq) + + roleReq.Operation = logical.ReadOperation + resp = b.requestNoErr(t, roleReq) + + expected := map[string]interface{}{ + "bind_secret_id": true, + "policies": []string{"p", "q", "r", "s"}, + "secret_id_num_uses": 10, + "secret_id_ttl": 300, + "token_ttl": 400, + "token_max_ttl": 500, + "token_num_uses": 600, + "token_bound_cidrs": []string{"127.0.0.1/32", "127.0.0.1/16"}, + "secret_id_bound_cidrs": []string{"127.0.0.1/32", "127.0.0.1/16"}, + "token_type": "default", + } + + var expectedStruct roleStorageEntry + err = mapstructure.Decode(expected, &expectedStruct) + if err != nil { + t.Fatal(err) + } + + var actualStruct roleStorageEntry + err = mapstructure.Decode(resp.Data, &actualStruct) + if err != nil { + t.Fatal(err) + } + + expectedStruct.RoleID = actualStruct.RoleID + if !reflect.DeepEqual(expectedStruct, actualStruct) { + t.Fatalf("bad:\nexpected:%#v\nactual:%#v\n", expectedStruct, actualStruct) + } + + roleData = map[string]interface{}{ + "role_id": "test_role_id", + "policies": "a,b,c,d", + "secret_id_num_uses": 100, + "secret_id_ttl": 3000, + "token_ttl": 4000, + "token_max_ttl": 5000, + } + roleReq.Data = roleData + roleReq.Operation = logical.UpdateOperation + + resp = b.requestNoErr(t, roleReq) + + roleReq.Operation = logical.ReadOperation + resp = b.requestNoErr(t, roleReq) + + expected = map[string]interface{}{ + "policies": []string{"a", "b", "c", "d"}, + "secret_id_num_uses": 100, + "secret_id_ttl": 3000, + "token_ttl": 4000, + "token_max_ttl": 5000, + } + err = mapstructure.Decode(expected, &expectedStruct) + if err != nil { + t.Fatal(err) + } + + err = mapstructure.Decode(resp.Data, &actualStruct) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(expectedStruct, actualStruct) { + t.Fatalf("bad:\nexpected:%#v\nactual:%#v\n", expectedStruct, actualStruct) + } + + // RUD for secret-id-bound-cidrs field + roleReq.Path = "role/role1/secret-id-bound-cidrs" + roleReq.Operation = logical.ReadOperation + resp = b.requestNoErr(t, roleReq) + + if resp.Data["secret_id_bound_cidrs"].([]string)[0] != "127.0.0.1/32" || + resp.Data["secret_id_bound_cidrs"].([]string)[1] != "127.0.0.1/16" { + t.Fatalf("bad: secret_id_bound_cidrs: expected:127.0.0.1/32,127.0.0.1/16 actual:%d\n", resp.Data["secret_id_bound_cidrs"].(int)) + } + + roleReq.Data = map[string]interface{}{"secret_id_bound_cidrs": []string{"127.0.0.1/20"}} + roleReq.Operation = logical.UpdateOperation + resp = b.requestNoErr(t, roleReq) + + roleReq.Operation = logical.ReadOperation + resp = b.requestNoErr(t, roleReq) + + if resp.Data["secret_id_bound_cidrs"].([]string)[0] != "127.0.0.1/20" { + t.Fatalf("bad: secret_id_bound_cidrs: expected:127.0.0.1/20 actual:%s\n", resp.Data["secret_id_bound_cidrs"].([]string)[0]) + } + + roleReq.Operation = logical.DeleteOperation + resp = b.requestNoErr(t, roleReq) + + roleReq.Operation = logical.ReadOperation + resp = b.requestNoErr(t, roleReq) + + if len(resp.Data["secret_id_bound_cidrs"].([]string)) != 0 { + t.Fatalf("expected value to be reset") + } + + // RUD for token-bound-cidrs field + roleReq.Path = "role/role1/token-bound-cidrs" + roleReq.Operation = logical.ReadOperation + resp = b.requestNoErr(t, roleReq) + + if resp.Data["token_bound_cidrs"].([]*sockaddr.SockAddrMarshaler)[0].String() != "127.0.0.1" || + resp.Data["token_bound_cidrs"].([]*sockaddr.SockAddrMarshaler)[1].String() != "127.0.0.1/16" { + m, err := json.Marshal(resp.Data["token_bound_cidrs"].([]*sockaddr.SockAddrMarshaler)) + if err != nil { + t.Fatal(err) + } + t.Fatalf("bad: token_bound_cidrs: expected:127.0.0.1/32,127.0.0.1/16 actual:%s\n", string(m)) + } + + roleReq.Data = map[string]interface{}{"token_bound_cidrs": []string{"127.0.0.1/20"}} + roleReq.Operation = logical.UpdateOperation + resp = b.requestNoErr(t, roleReq) + + roleReq.Operation = logical.ReadOperation + resp = b.requestNoErr(t, roleReq) + + if resp.Data["token_bound_cidrs"].([]*sockaddr.SockAddrMarshaler)[0].String() != "127.0.0.1/20" { + t.Fatalf("bad: token_bound_cidrs: expected:127.0.0.1/20 actual:%s\n", resp.Data["token_bound_cidrs"].([]*sockaddr.SockAddrMarshaler)[0]) + } + + roleReq.Operation = logical.DeleteOperation + resp = b.requestNoErr(t, roleReq) + + roleReq.Operation = logical.ReadOperation + resp = b.requestNoErr(t, roleReq) + + if len(resp.Data["token_bound_cidrs"].([]*sockaddr.SockAddrMarshaler)) != 0 { + t.Fatalf("expected value to be reset") + } + + // Delete test for role + roleReq.Path = "role/role1" + roleReq.Operation = logical.DeleteOperation + resp = b.requestNoErr(t, roleReq) + + roleReq.Operation = logical.ReadOperation + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + if resp != nil { + t.Fatalf("expected a nil response") + } +} + +func TestAppRole_RoleWithTokenTypeCRUD(t *testing.T) { + var resp *logical.Response + var err error + b, storage := createBackendWithStorage(t) + + roleData := map[string]interface{}{ + "policies": "p,q,r,s", + "secret_id_num_uses": 10, + "secret_id_ttl": 300, + "token_ttl": 400, + "token_max_ttl": 500, + "token_num_uses": 600, + "token_type": "default-service", + } + roleReq := &logical.Request{ + Operation: logical.CreateOperation, + Path: "role/role1", + Storage: storage, + Data: roleData, + } + + resp = b.requestNoErr(t, roleReq) + + if 0 == len(resp.Warnings) { + t.Fatalf("bad:\nexpected warning in resp:%#v\n", resp.Warnings) + } + + roleReq.Operation = logical.ReadOperation + resp = b.requestNoErr(t, roleReq) + + expected := map[string]interface{}{ + "bind_secret_id": true, + "policies": []string{"p", "q", "r", "s"}, + "secret_id_num_uses": 10, + "secret_id_ttl": 300, + "token_ttl": 400, + "token_max_ttl": 500, + "token_num_uses": 600, + "token_type": "service", + } + + var expectedStruct roleStorageEntry + err = mapstructure.Decode(expected, &expectedStruct) + if err != nil { + t.Fatal(err) + } + + var actualStruct roleStorageEntry + err = mapstructure.Decode(resp.Data, &actualStruct) + if err != nil { + t.Fatal(err) + } + + expectedStruct.RoleID = actualStruct.RoleID + if !reflect.DeepEqual(expectedStruct, actualStruct) { + t.Fatalf("bad:\nexpected:%#v\nactual:%#v\n", expectedStruct, actualStruct) + } + + roleData = map[string]interface{}{ + "role_id": "test_role_id", + "policies": "a,b,c,d", + "secret_id_num_uses": 100, + "secret_id_ttl": 3000, + "token_ttl": 4000, + "token_max_ttl": 5000, + "token_type": "default-service", + } + roleReq.Data = roleData + roleReq.Operation = logical.UpdateOperation + + resp = b.requestNoErr(t, roleReq) + + if 0 == len(resp.Warnings) { + t.Fatalf("bad:\nexpected a warning in resp:%#v\n", resp.Warnings) + } + + roleReq.Operation = logical.ReadOperation + resp = b.requestNoErr(t, roleReq) + + expected = map[string]interface{}{ + "policies": []string{"a", "b", "c", "d"}, + "secret_id_num_uses": 100, + "secret_id_ttl": 3000, + "token_ttl": 4000, + "token_max_ttl": 5000, + "token_type": "service", + } + err = mapstructure.Decode(expected, &expectedStruct) + if err != nil { + t.Fatal(err) + } + + err = mapstructure.Decode(resp.Data, &actualStruct) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(expectedStruct, actualStruct) { + t.Fatalf("bad:\nexpected:%#v\nactual:%#v\n", expectedStruct, actualStruct) + } + + // Delete test for role + roleReq.Path = "role/role1" + roleReq.Operation = logical.DeleteOperation + resp = b.requestNoErr(t, roleReq) + + roleReq.Operation = logical.ReadOperation + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + if resp != nil { + t.Fatalf("expected a nil response") + } +} + +func createRole(t *testing.T, b *backend, s logical.Storage, roleName, policies string) { + roleData := map[string]interface{}{ + "policies": policies, + "secret_id_num_uses": 10, + "secret_id_ttl": 300, + "token_ttl": 400, + "token_max_ttl": 500, + } + roleReq := &logical.Request{ + Operation: logical.CreateOperation, + Path: "role/" + roleName, + Storage: s, + Data: roleData, + } + _ = b.requestNoErr(t, roleReq) +} + +// TestAppRole_TokenutilUpgrade ensures that when we read values out that are +// values with upgrade logic we see the correct struct entries populated +func TestAppRole_TokenutilUpgrade(t *testing.T) { + tests := []struct { + name string + storageValMissing bool + storageVal string + expectedTokenType logical.TokenType + }{ + { + "token_type_missing", + true, + "", + logical.TokenTypeDefault, + }, + { + "token_type_empty", + false, + "", + logical.TokenTypeDefault, + }, + { + "token_type_service", + false, + "service", + logical.TokenTypeService, + }, + } + + s := &logical.InmemStorage{} + + config := logical.TestBackendConfig() + config.StorageView = s + + ctx := context.Background() + + b, err := Backend(config) + if err != nil { + t.Fatal(err) + } + if b == nil { + t.Fatalf("failed to create backend") + } + if err := b.Setup(ctx, config); err != nil { + t.Fatal(err) + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Construct the storage entry object based on our test case. + tokenTypeKV := "" + if !tt.storageValMissing { + tokenTypeKV = fmt.Sprintf(`, "token_type": "%s"`, tt.storageVal) + } + entryVal := fmt.Sprintf(`{"policies": ["foo"], "period": 300000000000, "token_bound_cidrs": ["127.0.0.1", "10.10.10.10/24"]%s}`, tokenTypeKV) + + // Hand craft JSON because there is overlap between fields + if err := s.Put(ctx, &logical.StorageEntry{ + Key: "role/" + tt.name, + Value: []byte(entryVal), + }); err != nil { + t.Fatal(err) + } + + resEntry, err := b.roleEntry(ctx, s, tt.name) + if err != nil { + t.Fatal(err) + } + + exp := &roleStorageEntry{ + SecretIDPrefix: "secret_id/", + Policies: []string{"foo"}, + Period: 300 * time.Second, + TokenParams: tokenutil.TokenParams{ + TokenPolicies: []string{"foo"}, + TokenPeriod: 300 * time.Second, + TokenBoundCIDRs: []*sockaddr.SockAddrMarshaler{ + {SockAddr: sockaddr.MustIPAddr("127.0.0.1")}, + {SockAddr: sockaddr.MustIPAddr("10.10.10.10/24")}, + }, + TokenType: tt.expectedTokenType, + }, + } + if diff := deep.Equal(resEntry, exp); diff != nil { + t.Fatal(diff) + } + }) + } +} + +func TestAppRole_SecretID_WithTTL(t *testing.T) { + tests := []struct { + name string + roleName string + ttl int64 + sysTTLCap bool + }{ + { + "zero ttl", + "role-zero-ttl", + 0, + false, + }, + { + "custom ttl", + "role-custom-ttl", + 60, + false, + }, + { + "system ttl capped", + "role-sys-ttl-cap", + 700000000, + true, + }, + } + + b, storage := createBackendWithStorage(t) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create role + roleData := map[string]interface{}{ + "policies": "default", + "secret_id_ttl": tt.ttl, + } + + roleReq := &logical.Request{ + Operation: logical.CreateOperation, + Path: "role/" + tt.roleName, + Storage: storage, + Data: roleData, + } + resp := b.requestNoErr(t, roleReq) + + // Generate secret ID + secretIDReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "role/" + tt.roleName + "/secret-id", + Storage: storage, + } + resp = b.requestNoErr(t, secretIDReq) + + // Extract the "ttl" value from the response data if it exists + ttlRaw, okTTL := resp.Data["secret_id_ttl"] + if !okTTL { + t.Fatalf("expected TTL value in response") + } + + var ( + respTTL int64 + ok bool + ) + respTTL, ok = ttlRaw.(int64) + if !ok { + t.Fatalf("expected ttl to be an integer, got: %T", ttlRaw) + } + + // Verify secret ID response for different cases + switch { + case tt.sysTTLCap: + if respTTL != int64(b.System().MaxLeaseTTL().Seconds()) { + t.Fatalf("expected TTL value to be system's max lease TTL, got: %d", respTTL) + } + default: + if respTTL != tt.ttl { + t.Fatalf("expected TTL value to be %d, got: %d", tt.ttl, respTTL) + } + } + }) + } +} + +// TestAppRole_RoleSecretIDAccessorCrossDelete tests deleting a secret id via +// secret id accessor belonging to a different role +func TestAppRole_RoleSecretIDAccessorCrossDelete(t *testing.T) { + var resp *logical.Response + var err error + b, storage := createBackendWithStorage(t) + + // Create First Role + createRole(t, b, storage, "role1", "a,b") + _ = b.requestNoErr(t, &logical.Request{ + Operation: logical.UpdateOperation, + Storage: storage, + Path: "role/role1/secret-id", + }) + + // Create Second Role + createRole(t, b, storage, "role2", "a,b") + _ = b.requestNoErr(t, &logical.Request{ + Operation: logical.UpdateOperation, + Storage: storage, + Path: "role/role2/secret-id", + }) + + // Get role2 secretID Accessor + resp = b.requestNoErr(t, &logical.Request{ + Operation: logical.ListOperation, + Storage: storage, + Path: "role/role2/secret-id", + }) + + // Read back role2 secretID Accessor information + hmacSecretID := resp.Data["keys"].([]string)[0] + _ = b.requestNoErr(t, &logical.Request{ + Operation: logical.UpdateOperation, + Storage: storage, + Path: "role/role2/secret-id-accessor/lookup", + Data: map[string]interface{}{ + "secret_id_accessor": hmacSecretID, + }, + }) + + // Attempt to destroy role2 secretID accessor using role1 path + _, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Storage: storage, + Path: "role/role1/secret-id-accessor/destroy", + Data: map[string]interface{}{ + "secret_id_accessor": hmacSecretID, + }, + }) + + if err == nil { + t.Fatalf("expected error") + } +} diff --git a/builtin/credential/approle/path_tidy_user_id.go b/builtin/credential/approle/path_tidy_user_id.go new file mode 100644 index 0000000..b6c777b --- /dev/null +++ b/builtin/credential/approle/path_tidy_user_id.go @@ -0,0 +1,287 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package approle + +import ( + "context" + "fmt" + "net/http" + "sync/atomic" + "time" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/locksutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathTidySecretID(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "tidy/secret-id$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAppRole, + OperationSuffix: "secret-id", + OperationVerb: "tidy", + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathTidySecretIDUpdate, + Responses: map[int][]framework.Response{ + http.StatusAccepted: {{ + Description: http.StatusText(http.StatusAccepted), + }}, + }, + }, + }, + + HelpSynopsis: pathTidySecretIDSyn, + HelpDescription: pathTidySecretIDDesc, + } +} + +// tidySecretID is used to delete entries in the whitelist that are expired. +func (b *backend) tidySecretID(ctx context.Context, req *logical.Request) (*logical.Response, error) { + // If we are a performance standby forward the request to the active node + if b.System().ReplicationState().HasState(consts.ReplicationPerformanceStandby) { + return nil, logical.ErrReadOnly + } + + if !atomic.CompareAndSwapUint32(b.tidySecretIDCASGuard, 0, 1) { + resp := &logical.Response{} + resp.AddWarning("Tidy operation already in progress.") + return resp, nil + } + + go b.tidySecretIDinternal(req.Storage) + + resp := &logical.Response{} + resp.AddWarning("Tidy operation successfully started. Any information from the operation will be printed to Vault's server logs.") + return logical.RespondWithStatusCode(resp, req, http.StatusAccepted) +} + +type tidyHelperSecretIDAccessor struct { + secretIDAccessorStorageEntry + saltedSecretIDAccessor string +} + +func (b *backend) tidySecretIDinternal(s logical.Storage) { + defer atomic.StoreUint32(b.tidySecretIDCASGuard, 0) + + logger := b.Logger().Named("tidy") + + checkCount := 0 + + defer func() { + logger.Trace("done checking entries", "num_entries", checkCount) + }() + + // Don't cancel when the original client request goes away + ctx := context.Background() + + salt, err := b.Salt(ctx) + if err != nil { + logger.Error("error tidying secret IDs", "error", err) + return + } + + tidyFunc := func(secretIDPrefixToUse, accessorIDPrefixToUse string) error { + logger.Trace("listing accessors", "prefix", accessorIDPrefixToUse) + + // List all the accessors and add them all to a map + // These hashes are the result of salting the accessor id. + accessorHashes, err := s.List(ctx, accessorIDPrefixToUse) + if err != nil { + return err + } + skipHashes := make(map[string]bool, len(accessorHashes)) + accHashesByLockID := make([][]tidyHelperSecretIDAccessor, 256) + for _, accessorHash := range accessorHashes { + var entry secretIDAccessorStorageEntry + entryIndex := accessorIDPrefixToUse + accessorHash + se, err := s.Get(ctx, entryIndex) + if err != nil { + return err + } + if se == nil { + continue + } + err = se.DecodeJSON(&entry) + if err != nil { + return err + } + + lockIdx := locksutil.LockIndexForKey(entry.SecretIDHMAC) + accHashesByLockID[lockIdx] = append(accHashesByLockID[lockIdx], tidyHelperSecretIDAccessor{ + secretIDAccessorStorageEntry: entry, + saltedSecretIDAccessor: accessorHash, + }) + } + + secretIDCleanupFunc := func(secretIDHMAC, roleNameHMAC, secretIDPrefixToUse string) error { + checkCount++ + lock := b.secretIDLock(secretIDHMAC) + lock.Lock() + defer lock.Unlock() + + entryIndex := fmt.Sprintf("%s%s%s", secretIDPrefixToUse, roleNameHMAC, secretIDHMAC) + secretIDEntry, err := s.Get(ctx, entryIndex) + if err != nil { + return fmt.Errorf("error fetching SecretID %q: %w", secretIDHMAC, err) + } + + if secretIDEntry == nil { + logger.Error("entry for secret id was nil", "secret_id_hmac", secretIDHMAC) + return nil + } + + if secretIDEntry.Value == nil || len(secretIDEntry.Value) == 0 { + return fmt.Errorf("found entry for SecretID %q but actual SecretID is empty", secretIDHMAC) + } + + var result secretIDStorageEntry + if err := secretIDEntry.DecodeJSON(&result); err != nil { + return err + } + + // If a secret ID entry does not have a corresponding accessor + // entry, revoke the secret ID immediately + accessorEntry, err := b.secretIDAccessorEntry(ctx, s, result.SecretIDAccessor, secretIDPrefixToUse) + if err != nil { + return fmt.Errorf("failed to read secret ID accessor entry: %w", err) + } + if accessorEntry == nil { + logger.Trace("found nil accessor") + if err := s.Delete(ctx, entryIndex); err != nil { + return fmt.Errorf("error deleting secret ID %q from storage: %w", secretIDHMAC, err) + } + return nil + } + + // ExpirationTime not being set indicates non-expiring SecretIDs + if !result.ExpirationTime.IsZero() && time.Now().After(result.ExpirationTime) { + logger.Trace("found expired secret ID") + // Clean up the accessor of the secret ID first + err = b.deleteSecretIDAccessorEntry(ctx, s, result.SecretIDAccessor, secretIDPrefixToUse) + if err != nil { + return fmt.Errorf("failed to delete secret ID accessor entry: %w", err) + } + + if err := s.Delete(ctx, entryIndex); err != nil { + return fmt.Errorf("error deleting SecretID %q from storage: %w", secretIDHMAC, err) + } + + return nil + } + + // At this point, the secret ID is not expired and is valid. Flag + // the corresponding accessor as not needing attention. + skipHashes[salt.SaltID(result.SecretIDAccessor)] = true + + return nil + } + + logger.Trace("listing role HMACs", "prefix", secretIDPrefixToUse) + + roleNameHMACs, err := s.List(ctx, secretIDPrefixToUse) + if err != nil { + return err + } + + for _, roleNameHMAC := range roleNameHMACs { + logger.Trace("listing secret ID HMACs", "role_hmac", roleNameHMAC) + secretIDHMACs, err := s.List(ctx, fmt.Sprintf("%s%s", secretIDPrefixToUse, roleNameHMAC)) + if err != nil { + return err + } + for _, secretIDHMAC := range secretIDHMACs { + err = secretIDCleanupFunc(secretIDHMAC, roleNameHMAC, secretIDPrefixToUse) + if err != nil { + return err + } + } + } + + // Accessor indexes were not getting cleaned up until 0.9.3. This is a fix + // to clean up the dangling accessor entries. + if len(accessorHashes) > len(skipHashes) { + // There is some raciness here because we're querying secretids for + // roles without having a lock while doing so. Because + // accHashesByLockID was populated previously, at worst this may + // mean that we fail to clean up something we ought to. + allSecretIDHMACs := make(map[string]struct{}) + for _, roleNameHMAC := range roleNameHMACs { + secretIDHMACs, err := s.List(ctx, secretIDPrefixToUse+roleNameHMAC) + if err != nil { + return err + } + for _, v := range secretIDHMACs { + allSecretIDHMACs[v] = struct{}{} + } + } + + tidyEntries := func(entries []tidyHelperSecretIDAccessor) error { + for _, entry := range entries { + // Don't clean up accessor index entry if secretid cleanup func + // determined that it should stay. + if _, ok := skipHashes[entry.saltedSecretIDAccessor]; ok { + continue + } + + // Don't clean up accessor index entry if referenced in role. + if _, ok := allSecretIDHMACs[entry.SecretIDHMAC]; ok { + continue + } + + if err := s.Delete(context.Background(), accessorIDPrefixToUse+entry.saltedSecretIDAccessor); err != nil { + return err + } + } + return nil + } + + for lockIdx, entries := range accHashesByLockID { + // Ideally, locking on accessors should be performed here too + // but for that, accessors are required in plaintext, which are + // not available. + // ... + // The lock is held when writing accessor/secret so if we have + // the lock we know we're not in a + // wrote-accessor-but-not-yet-secret case, which can be racy. + b.secretIDLocks[lockIdx].Lock() + err = tidyEntries(entries) + b.secretIDLocks[lockIdx].Unlock() + if err != nil { + return err + } + } + } + + return nil + } + + err = tidyFunc(secretIDPrefix, secretIDAccessorPrefix) + if err != nil { + logger.Error("error tidying global secret IDs", "error", err) + return + } + err = tidyFunc(secretIDLocalPrefix, secretIDAccessorLocalPrefix) + if err != nil { + logger.Error("error tidying local secret IDs", "error", err) + return + } +} + +// pathTidySecretIDUpdate is used to delete the expired SecretID entries +func (b *backend) pathTidySecretIDUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + return b.tidySecretID(ctx, req) +} + +const ( + pathTidySecretIDSyn = "Trigger the clean-up of expired SecretID entries." + pathTidySecretIDDesc = `SecretIDs will have expiration time attached to them. The periodic function +of the backend will look for expired entries and delete them. This happens once in a minute. Invoking +this endpoint will trigger the clean-up action, without waiting for the backend's periodic function.` +) diff --git a/builtin/credential/approle/path_tidy_user_id_test.go b/builtin/credential/approle/path_tidy_user_id_test.go new file mode 100644 index 0000000..c03686e --- /dev/null +++ b/builtin/credential/approle/path_tidy_user_id_test.go @@ -0,0 +1,216 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package approle + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" + "github.com/hashicorp/vault/sdk/logical" +) + +func TestAppRole_TidyDanglingAccessors_Normal(t *testing.T) { + b, storage := createBackendWithStorage(t) + + // Create a role + createRole(t, b, storage, "role1", "a,b,c") + + // Create a secret-id + roleSecretIDReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "role/role1/secret-id", + Storage: storage, + } + _ = b.requestNoErr(t, roleSecretIDReq) + + accessorHashes, err := storage.List(context.Background(), "accessor/") + if err != nil { + t.Fatal(err) + } + if len(accessorHashes) != 1 { + t.Fatalf("bad: len(accessorHashes); expect 1, got %d", len(accessorHashes)) + } + + entry1, err := logical.StorageEntryJSON( + "accessor/invalid1", + &secretIDAccessorStorageEntry{ + SecretIDHMAC: "samplesecretidhmac", + }, + ) + if err != nil { + t.Fatal(err) + } + + if err := storage.Put(context.Background(), entry1); err != nil { + t.Fatal(err) + } + + entry2, err := logical.StorageEntryJSON( + "accessor/invalid2", + &secretIDAccessorStorageEntry{ + SecretIDHMAC: "samplesecretidhmac2", + }, + ) + if err != nil { + t.Fatal(err) + } + if err := storage.Put(context.Background(), entry2); err != nil { + t.Fatal(err) + } + + accessorHashes, err = storage.List(context.Background(), "accessor/") + if err != nil { + t.Fatal(err) + } + if len(accessorHashes) != 3 { + t.Fatalf("bad: len(accessorHashes); expect 3, got %d", len(accessorHashes)) + } + + secret, err := b.tidySecretID(context.Background(), &logical.Request{ + Storage: storage, + }) + if err != nil { + t.Fatal(err) + } + schema.ValidateResponse( + t, + schema.GetResponseSchema(t, pathTidySecretID(b), logical.UpdateOperation), + secret, + true, + ) + + // It runs async so we give it a bit of time to run + time.Sleep(10 * time.Second) + + accessorHashes, err = storage.List(context.Background(), "accessor/") + if err != nil { + t.Fatal(err) + } + if len(accessorHashes) != 1 { + t.Fatalf("bad: len(accessorHashes); expect 1, got %d", len(accessorHashes)) + } +} + +func TestAppRole_TidyDanglingAccessors_RaceTest(t *testing.T) { + b, storage := createBackendWithStorage(t) + + // Create a role + createRole(t, b, storage, "role1", "a,b,c") + + // Create an initial entry + roleSecretIDReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "role/role1/secret-id", + Storage: storage, + } + _ = b.requestNoErr(t, roleSecretIDReq) + + count := 1 + + wg := &sync.WaitGroup{} + start := time.Now() + for time.Now().Sub(start) < 10*time.Second { + if time.Now().Sub(start) > 100*time.Millisecond && atomic.LoadUint32(b.tidySecretIDCASGuard) == 0 { + secret, err := b.tidySecretID(context.Background(), &logical.Request{ + Storage: storage, + }) + if err != nil { + t.Fatal(err) + } + schema.ValidateResponse( + t, + schema.GetResponseSchema(t, pathTidySecretID(b), logical.UpdateOperation), + secret, + true, + ) + } + wg.Add(1) + go func() { + defer wg.Done() + roleSecretIDReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "role/role1/secret-id", + Storage: storage, + } + _ = b.requestNoErr(t, roleSecretIDReq) + }() + + entry, err := logical.StorageEntryJSON( + fmt.Sprintf("accessor/invalid%d", count), + &secretIDAccessorStorageEntry{ + SecretIDHMAC: "samplesecretidhmac", + }, + ) + if err != nil { + t.Fatal(err) + } + + if err := storage.Put(context.Background(), entry); err != nil { + t.Fatal(err) + } + + count++ + time.Sleep(100 * time.Microsecond) + } + + logger := b.Logger().Named(t.Name()) + logger.Info("wrote entries", "count", count) + + wg.Wait() + // Let tidy finish + for atomic.LoadUint32(b.tidySecretIDCASGuard) != 0 { + time.Sleep(100 * time.Millisecond) + } + + logger.Info("running tidy again") + + // Run tidy again + secret, err := b.tidySecretID(context.Background(), &logical.Request{ + Storage: storage, + }) + if err != nil || len(secret.Warnings) > 0 { + t.Fatal(err, secret.Warnings) + } + schema.ValidateResponse( + t, + schema.GetResponseSchema(t, pathTidySecretID(b), logical.UpdateOperation), + secret, + true, + ) + + // Wait for tidy to start + for atomic.LoadUint32(b.tidySecretIDCASGuard) == 0 { + time.Sleep(100 * time.Millisecond) + } + + // Let tidy finish + for atomic.LoadUint32(b.tidySecretIDCASGuard) != 0 { + time.Sleep(100 * time.Millisecond) + } + + accessorHashes, err := storage.List(context.Background(), "accessor/") + if err != nil { + t.Fatal(err) + } + if len(accessorHashes) != count { + t.Fatalf("bad: len(accessorHashes); expect %d, got %d", count, len(accessorHashes)) + } + + roleHMACs, err := storage.List(context.Background(), secretIDPrefix) + if err != nil { + t.Fatal(err) + } + secretIDs, err := storage.List(context.Background(), fmt.Sprintf("%s%s", secretIDPrefix, roleHMACs[0])) + if err != nil { + t.Fatal(err) + } + if len(secretIDs) != count { + t.Fatalf("bad: len(secretIDs); expect %d, got %d", count, len(secretIDs)) + } +} diff --git a/builtin/credential/approle/validation.go b/builtin/credential/approle/validation.go new file mode 100644 index 0000000..70f2194 --- /dev/null +++ b/builtin/credential/approle/validation.go @@ -0,0 +1,445 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package approle + +import ( + "context" + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "fmt" + "strings" + "time" + + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/helper/parseip" + "github.com/hashicorp/vault/sdk/helper/cidrutil" + "github.com/hashicorp/vault/sdk/helper/locksutil" + "github.com/hashicorp/vault/sdk/logical" +) + +// secretIDStorageEntry represents the information stored in storage +// when a SecretID is created. The structure of the SecretID storage +// entry is the same for all the types of SecretIDs generated. +type secretIDStorageEntry struct { + // Accessor for the SecretID. It is a random UUID serving as + // a secondary index for the SecretID. This uniquely identifies + // the SecretID it belongs to, and hence can be used for listing + // and deleting SecretIDs. Accessors cannot be used as valid + // SecretIDs during login. + SecretIDAccessor string `json:"secret_id_accessor" mapstructure:"secret_id_accessor"` + + // Number of times this SecretID can be used to perform the login + // operation + SecretIDNumUses int `json:"secret_id_num_uses" mapstructure:"secret_id_num_uses"` + + // Duration after which this SecretID should expire. This is capped by + // the backend mount's max TTL value. + SecretIDTTL time.Duration `json:"secret_id_ttl" mapstructure:"secret_id_ttl"` + + // The time when the SecretID was created + CreationTime time.Time `json:"creation_time" mapstructure:"creation_time"` + + // The time when the SecretID becomes eligible for tidy operation. + // Tidying is performed by the PeriodicFunc of the backend which is 1 + // minute apart. + ExpirationTime time.Time `json:"expiration_time" mapstructure:"expiration_time"` + + // The time representing the last time this storage entry was modified + LastUpdatedTime time.Time `json:"last_updated_time" mapstructure:"last_updated_time"` + + // Metadata that belongs to the SecretID + Metadata map[string]string `json:"metadata" mapstructure:"metadata"` + + // CIDRList is a set of CIDR blocks that impose source address + // restrictions on the usage of SecretID + CIDRList []string `json:"cidr_list" mapstructure:"cidr_list"` + + // TokenBoundCIDRs is a set of CIDR blocks that impose source address + // restrictions on the usage of the token generated by this SecretID + TokenBoundCIDRs []string `json:"token_cidr_list" mapstructure:"token_bound_cidrs"` + + // This is a deprecated field + SecretIDNumUsesDeprecated int `json:"SecretIDNumUses" mapstructure:"SecretIDNumUses"` +} + +// Represents the payload of the storage entry of the accessor that maps to a +// unique SecretID. Note that SecretIDs should never be stored in plaintext +// anywhere in the backend. SecretIDHMAC will be used as an index to fetch the +// properties of the SecretID and to delete the SecretID. +type secretIDAccessorStorageEntry struct { + // Hash of the SecretID which can be used to find the storage index at which + // properties of SecretID is stored. + SecretIDHMAC string `json:"secret_id_hmac" mapstructure:"secret_id_hmac"` +} + +// verifyCIDRRoleSecretIDSubset checks if the CIDR blocks set on the secret ID +// are a subset of CIDR blocks set on the role +func verifyCIDRRoleSecretIDSubset(secretIDCIDRs []string, roleBoundCIDRList []string) error { + if len(secretIDCIDRs) != 0 { + // If there are no CIDR blocks on the role, then the subset + // requirement would be satisfied + if len(roleBoundCIDRList) != 0 { + // Address blocks with /32 mask do not get stored with the CIDR mask + // Check if there are any /32 addresses and append CIDR mask + for i, block := range roleBoundCIDRList { + if !strings.Contains(block, "/") { + roleBoundCIDRList[i] = fmt.Sprint(block, "/32") + } + } + + subset, err := cidrutil.SubsetBlocks(roleBoundCIDRList, secretIDCIDRs) + if !subset || err != nil { + return fmt.Errorf( + "failed to verify subset relationship between CIDR blocks on the role %q and CIDR blocks on the secret ID %q: %w", + roleBoundCIDRList, + secretIDCIDRs, + err, + ) + } + } + } + + return nil +} + +const maxHmacInputLength = 4096 + +// Creates a SHA256 HMAC of the given 'value' using the given 'key' and returns +// a hex encoded string. +func createHMAC(key, value string) (string, error) { + if key == "" { + return "", fmt.Errorf("invalid HMAC key") + } + + if len(value) > maxHmacInputLength { + return "", fmt.Errorf("value is longer than maximum of %d bytes", maxHmacInputLength) + } + + hm := hmac.New(sha256.New, []byte(key)) + hm.Write([]byte(value)) + return hex.EncodeToString(hm.Sum(nil)), nil +} + +func (b *backend) secretIDLock(secretIDHMAC string) *locksutil.LockEntry { + return locksutil.LockForKey(b.secretIDLocks, secretIDHMAC) +} + +func (b *backend) secretIDAccessorLock(secretIDAccessor string) *locksutil.LockEntry { + return locksutil.LockForKey(b.secretIDAccessorLocks, secretIDAccessor) +} + +func decodeSecretIDStorageEntry(entry *logical.StorageEntry) (*secretIDStorageEntry, error) { + result := secretIDStorageEntry{} + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + + cleanup := func(in []string) []string { + if len(in) == 0 { + // Don't change unnecessarily, if it was empty list leave as empty list + // instead of making it nil. + return in + } + var out []string + for _, s := range in { + out = append(out, parseip.TrimLeadingZeroesCIDR(s)) + } + return out + } + + result.CIDRList = cleanup(result.CIDRList) + result.TokenBoundCIDRs = cleanup(result.TokenBoundCIDRs) + return &result, nil +} + +// nonLockedSecretIDStorageEntry fetches the secret ID properties from physical +// storage. The entry will be indexed based on the given HMACs of both role +// name and the secret ID. This method will not acquire secret ID lock to fetch +// the storage entry. Locks need to be acquired before calling this method. +func (b *backend) nonLockedSecretIDStorageEntry(ctx context.Context, s logical.Storage, roleSecretIDPrefix, roleNameHMAC, secretIDHMAC string) (*secretIDStorageEntry, error) { + if secretIDHMAC == "" { + return nil, fmt.Errorf("missing secret ID HMAC") + } + + if roleNameHMAC == "" { + return nil, fmt.Errorf("missing role name HMAC") + } + + // Prepare the storage index at which the secret ID will be stored + entryIndex := fmt.Sprintf("%s%s/%s", roleSecretIDPrefix, roleNameHMAC, secretIDHMAC) + + entry, err := s.Get(ctx, entryIndex) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + result, err := decodeSecretIDStorageEntry(entry) + if err != nil { + return nil, err + } + + // TODO: Remove this upgrade bit in future releases + persistNeeded := false + if result.SecretIDNumUsesDeprecated != 0 { + if result.SecretIDNumUses == 0 || + result.SecretIDNumUsesDeprecated < result.SecretIDNumUses { + result.SecretIDNumUses = result.SecretIDNumUsesDeprecated + persistNeeded = true + } + if result.SecretIDNumUses < result.SecretIDNumUsesDeprecated { + result.SecretIDNumUsesDeprecated = result.SecretIDNumUses + persistNeeded = true + } + } + + if persistNeeded { + if err := b.nonLockedSetSecretIDStorageEntry(ctx, s, roleSecretIDPrefix, roleNameHMAC, secretIDHMAC, result); err != nil { + return nil, fmt.Errorf("failed to upgrade role storage entry %w", err) + } + } + + return result, nil +} + +// nonLockedSetSecretIDStorageEntry creates or updates a secret ID entry at the +// physical storage. The entry will be indexed based on the given HMACs of both +// role name and the secret ID. This method will not acquire secret ID lock to +// create/update the storage entry. Locks need to be acquired before calling +// this method. +func (b *backend) nonLockedSetSecretIDStorageEntry(ctx context.Context, s logical.Storage, roleSecretIDPrefix, roleNameHMAC, secretIDHMAC string, secretEntry *secretIDStorageEntry) error { + if roleSecretIDPrefix == "" { + return fmt.Errorf("missing secret ID prefix") + } + if secretIDHMAC == "" { + return fmt.Errorf("missing secret ID HMAC") + } + + if roleNameHMAC == "" { + return fmt.Errorf("missing role name HMAC") + } + + if secretEntry == nil { + return fmt.Errorf("nil secret entry") + } + + entryIndex := fmt.Sprintf("%s%s/%s", roleSecretIDPrefix, roleNameHMAC, secretIDHMAC) + + if entry, err := logical.StorageEntryJSON(entryIndex, secretEntry); err != nil { + return err + } else if err = s.Put(ctx, entry); err != nil { + return err + } + + return nil +} + +// registerSecretIDEntry creates a new storage entry for the given SecretID. +func (b *backend) registerSecretIDEntry(ctx context.Context, s logical.Storage, roleName, secretID, hmacKey, roleSecretIDPrefix string, secretEntry *secretIDStorageEntry) (*secretIDStorageEntry, error) { + secretIDHMAC, err := createHMAC(hmacKey, secretID) + if err != nil { + return nil, fmt.Errorf("failed to create HMAC of secret ID: %w", err) + } + roleNameHMAC, err := createHMAC(hmacKey, roleName) + if err != nil { + return nil, fmt.Errorf("failed to create HMAC of role_name: %w", err) + } + + lock := b.secretIDLock(secretIDHMAC) + lock.RLock() + + entry, err := b.nonLockedSecretIDStorageEntry(ctx, s, roleSecretIDPrefix, roleNameHMAC, secretIDHMAC) + if err != nil { + lock.RUnlock() + return nil, err + } + if entry != nil { + lock.RUnlock() + return nil, fmt.Errorf("SecretID is already registered") + } + + // If there isn't an entry for the secretID already, switch the read lock + // with a write lock and create an entry. + lock.RUnlock() + lock.Lock() + defer lock.Unlock() + + // But before saving a new entry, check if the secretID entry was created during the lock switch. + entry, err = b.nonLockedSecretIDStorageEntry(ctx, s, roleSecretIDPrefix, roleNameHMAC, secretIDHMAC) + if err != nil { + return nil, err + } + if entry != nil { + return nil, fmt.Errorf("SecretID is already registered") + } + + // + // Create a new entry for the SecretID + // + + // Set the creation time for the SecretID + currentTime := time.Now() + secretEntry.CreationTime = currentTime + secretEntry.LastUpdatedTime = currentTime + + if ttl := b.deriveSecretIDTTL(secretEntry.SecretIDTTL); ttl != time.Duration(0) { + secretEntry.ExpirationTime = currentTime.Add(ttl) + } + + // Before storing the SecretID, store its accessor. + if err := b.createSecretIDAccessorEntry(ctx, s, secretEntry, secretIDHMAC, roleSecretIDPrefix); err != nil { + return nil, err + } + + if err := b.nonLockedSetSecretIDStorageEntry(ctx, s, roleSecretIDPrefix, roleNameHMAC, secretIDHMAC, secretEntry); err != nil { + return nil, err + } + + return secretEntry, nil +} + +// deriveSecretIDTTL determines the secret ID TTL to use based on the system's +// max lease TTL. +// +// If SecretIDTTL is negative or if it crosses the backend mount's limit, +// return to backend's max lease TTL. Otherwise, return the provided secretIDTTL +// value. +func (b *backend) deriveSecretIDTTL(secretIDTTL time.Duration) time.Duration { + if secretIDTTL < time.Duration(0) || secretIDTTL > b.System().MaxLeaseTTL() { + return b.System().MaxLeaseTTL() + } + + return secretIDTTL +} + +// secretIDAccessorEntry is used to read the storage entry that maps an +// accessor to a secret_id. +func (b *backend) secretIDAccessorEntry(ctx context.Context, s logical.Storage, secretIDAccessor, roleSecretIDPrefix string) (*secretIDAccessorStorageEntry, error) { + if secretIDAccessor == "" { + return nil, fmt.Errorf("missing secretIDAccessor") + } + + var result secretIDAccessorStorageEntry + + // Create index entry, mapping the accessor to the token ID + salt, err := b.Salt(ctx) + if err != nil { + return nil, err + } + accessorPrefix := secretIDAccessorPrefix + if roleSecretIDPrefix == secretIDLocalPrefix { + accessorPrefix = secretIDAccessorLocalPrefix + } + entryIndex := accessorPrefix + salt.SaltID(secretIDAccessor) + + accessorLock := b.secretIDAccessorLock(secretIDAccessor) + accessorLock.RLock() + defer accessorLock.RUnlock() + + if entry, err := s.Get(ctx, entryIndex); err != nil { + return nil, err + } else if entry == nil { + return nil, nil + } else if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + + return &result, nil +} + +// createSecretIDAccessorEntry creates an identifier for the SecretID. A storage index, +// mapping the accessor to the SecretID is also created. This method should +// be called when the lock for the corresponding SecretID is held. +func (b *backend) createSecretIDAccessorEntry(ctx context.Context, s logical.Storage, entry *secretIDStorageEntry, secretIDHMAC, roleSecretIDPrefix string) error { + // Create a random accessor + accessorUUID, err := uuid.GenerateUUID() + if err != nil { + return err + } + entry.SecretIDAccessor = accessorUUID + + // Create index entry, mapping the accessor to the token ID + salt, err := b.Salt(ctx) + if err != nil { + return err + } + + accessorPrefix := secretIDAccessorPrefix + if roleSecretIDPrefix == secretIDLocalPrefix { + accessorPrefix = secretIDAccessorLocalPrefix + } + entryIndex := accessorPrefix + salt.SaltID(entry.SecretIDAccessor) + + accessorLock := b.secretIDAccessorLock(accessorUUID) + accessorLock.Lock() + defer accessorLock.Unlock() + + if entry, err := logical.StorageEntryJSON(entryIndex, &secretIDAccessorStorageEntry{ + SecretIDHMAC: secretIDHMAC, + }); err != nil { + return err + } else if err = s.Put(ctx, entry); err != nil { + return fmt.Errorf("failed to persist accessor index entry: %w", err) + } + + return nil +} + +// deleteSecretIDAccessorEntry deletes the storage index mapping the accessor to a SecretID. +func (b *backend) deleteSecretIDAccessorEntry(ctx context.Context, s logical.Storage, secretIDAccessor, roleSecretIDPrefix string) error { + salt, err := b.Salt(ctx) + if err != nil { + return err + } + + accessorPrefix := secretIDAccessorPrefix + if roleSecretIDPrefix == secretIDLocalPrefix { + accessorPrefix = secretIDAccessorLocalPrefix + } + entryIndex := accessorPrefix + salt.SaltID(secretIDAccessor) + + accessorLock := b.secretIDAccessorLock(secretIDAccessor) + accessorLock.Lock() + defer accessorLock.Unlock() + + // Delete the accessor of the SecretID first + if err := s.Delete(ctx, entryIndex); err != nil { + return fmt.Errorf("failed to delete accessor storage entry: %w", err) + } + + return nil +} + +// flushRoleSecrets deletes all the SecretIDs that belong to the given +// RoleID. +func (b *backend) flushRoleSecrets(ctx context.Context, s logical.Storage, roleName, hmacKey, roleSecretIDPrefix string) error { + roleNameHMAC, err := createHMAC(hmacKey, roleName) + if err != nil { + return fmt.Errorf("failed to create HMAC of role_name: %w", err) + } + + // Acquire the custom lock to perform listing of SecretIDs + b.secretIDListingLock.RLock() + defer b.secretIDListingLock.RUnlock() + + secretIDHMACs, err := s.List(ctx, fmt.Sprintf("%s%s/", roleSecretIDPrefix, roleNameHMAC)) + if err != nil { + return err + } + for _, secretIDHMAC := range secretIDHMACs { + // Acquire the lock belonging to the SecretID + lock := b.secretIDLock(secretIDHMAC) + lock.Lock() + entryIndex := fmt.Sprintf("%s%s/%s", roleSecretIDPrefix, roleNameHMAC, secretIDHMAC) + if err := s.Delete(ctx, entryIndex); err != nil { + lock.Unlock() + return fmt.Errorf("error deleting SecretID %q from storage: %w", secretIDHMAC, err) + } + lock.Unlock() + } + return nil +} diff --git a/builtin/credential/approle/validation_test.go b/builtin/credential/approle/validation_test.go new file mode 100644 index 0000000..7f7366b --- /dev/null +++ b/builtin/credential/approle/validation_test.go @@ -0,0 +1,60 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package approle + +import ( + "context" + "testing" + + "github.com/hashicorp/vault/sdk/logical" +) + +func TestAppRole_SecretIDNumUsesUpgrade(t *testing.T) { + var resp *logical.Response + var err error + + b, storage := createBackendWithStorage(t) + + roleData := map[string]interface{}{ + "secret_id_num_uses": 10, + } + + roleReq := &logical.Request{ + Operation: logical.CreateOperation, + Path: "role/role1", + Storage: storage, + Data: roleData, + } + + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + secretIDReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "role/role1/secret-id", + Storage: storage, + } + + resp, err = b.HandleRequest(context.Background(), secretIDReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + secretIDReq.Operation = logical.UpdateOperation + secretIDReq.Path = "role/role1/secret-id/lookup" + secretIDReq.Data = map[string]interface{}{ + "secret_id": resp.Data["secret_id"].(string), + } + resp, err = b.HandleRequest(context.Background(), secretIDReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + // Check if the response contains the value set for secret_id_num_uses + if resp.Data["secret_id_num_uses"] != 10 { + t.Fatal("invalid secret_id_num_uses") + } +} diff --git a/builtin/credential/aws/backend.go b/builtin/credential/aws/backend.go new file mode 100644 index 0000000..e8424f2 --- /dev/null +++ b/builtin/credential/aws/backend.go @@ -0,0 +1,437 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package awsauth + +import ( + "context" + "fmt" + "strings" + "sync" + "time" + + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/hashicorp/go-secure-stdlib/awsutil" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" + cache "github.com/patrickmn/go-cache" +) + +const ( + amzHeaderPrefix = "X-Amz-" + operationPrefixAWS = "aws" +) + +var defaultAllowedSTSRequestHeaders = []string{ + "X-Amz-Algorithm", + "X-Amz-Content-Sha256", + "X-Amz-Credential", + "X-Amz-Date", + "X-Amz-Security-Token", + "X-Amz-Signature", + "X-Amz-SignedHeaders", +} + +func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + b, err := Backend(conf) + if err != nil { + return nil, err + } + if err := b.Setup(ctx, conf); err != nil { + return nil, err + } + return b, nil +} + +type backend struct { + *framework.Backend + + // Lock to make changes to any of the backend's configuration endpoints. + configMutex sync.RWMutex + + // Lock to make changes to role entries + roleMutex sync.Mutex + + // Lock to make changes to the deny list entries + denyListMutex sync.RWMutex + + // Guards the deny list/access list tidy functions + tidyDenyListCASGuard *uint32 + tidyAccessListCASGuard *uint32 + + // Duration after which the periodic function of the backend needs to + // tidy the deny list and access list entries. + tidyCooldownPeriod time.Duration + + // nextTidyTime holds the time at which the periodic func should initiate + // the tidy operations. This is set by the periodicFunc based on the value + // of tidyCooldownPeriod. + nextTidyTime time.Time + + // Map to hold the EC2 client objects indexed by region and STS role. + // This avoids the overhead of creating a client object for every login request. + // When the credentials are modified or deleted, all the cached client objects + // will be flushed. The empty STS role signifies the master account + EC2ClientsMap map[string]map[string]*ec2.EC2 + + // Map to hold the IAM client objects indexed by region and STS role. + // This avoids the overhead of creating a client object for every login request. + // When the credentials are modified or deleted, all the cached client objects + // will be flushed. The empty STS role signifies the master account + IAMClientsMap map[string]map[string]*iam.IAM + + // Map to associate a partition to a random region in that partition. Users of + // this don't care what region in the partition they use, but there is some client + // cache efficiency gain if we keep the mapping stable, hence caching a single copy. + partitionToRegionMap map[string]*endpoints.Region + + // Map of AWS unique IDs to the full ARN corresponding to that unique ID + // This avoids the overhead of an AWS API hit for every login request + // using the IAM auth method when bound_iam_principal_arn contains a wildcard + iamUserIdToArnCache *cache.Cache + + // AWS Account ID of the "default" AWS credentials + // This cache avoids the need to call GetCallerIdentity repeatedly to learn it + // We can't store this because, in certain pathological cases, it could change + // out from under us, such as a standby and active Vault server in different AWS + // accounts using their IAM instance profile to get their credentials. + defaultAWSAccountID string + + // roleCache caches role entries to avoid locking headaches + roleCache *cache.Cache + + resolveArnToUniqueIDFunc func(context.Context, logical.Storage, string) (string, error) + + // upgradeCancelFunc is used to cancel the context used in the upgrade + // function + upgradeCancelFunc context.CancelFunc + + // deprecatedTerms is used to downgrade preferred terminology (e.g. accesslist) + // to the legacy term. This allows for consolidated aliasing of the affected + // endpoints until the legacy terms are removed. + deprecatedTerms *strings.Replacer +} + +func Backend(_ *logical.BackendConfig) (*backend, error) { + b := &backend{ + // Setting the periodic func to be run once in an hour. + // If there is a real need, this can be made configurable. + tidyCooldownPeriod: time.Hour, + EC2ClientsMap: make(map[string]map[string]*ec2.EC2), + IAMClientsMap: make(map[string]map[string]*iam.IAM), + iamUserIdToArnCache: cache.New(7*24*time.Hour, 24*time.Hour), + tidyDenyListCASGuard: new(uint32), + tidyAccessListCASGuard: new(uint32), + roleCache: cache.New(cache.NoExpiration, cache.NoExpiration), + + deprecatedTerms: strings.NewReplacer( + "accesslist", "whitelist", + "access-list", "whitelist", + "denylist", "blacklist", + "deny-list", "blacklist", + ), + } + + b.resolveArnToUniqueIDFunc = b.resolveArnToRealUniqueId + + b.Backend = &framework.Backend{ + PeriodicFunc: b.periodicFunc, + AuthRenew: b.pathLoginRenew, + Help: backendHelp, + PathsSpecial: &logical.Paths{ + Unauthenticated: []string{ + "login", + }, + LocalStorage: []string{ + identityAccessListStorage, + }, + SealWrapStorage: []string{ + "config/client", + }, + }, + Paths: []*framework.Path{ + b.pathLogin(), + b.pathListRole(), + b.pathListRoles(), + b.pathRole(), + b.pathRoleTag(), + b.pathConfigClient(), + b.pathConfigCertificate(), + b.pathConfigIdentity(), + b.pathConfigRotateRoot(), + b.pathConfigSts(), + b.pathListSts(), + b.pathListCertificates(), + + // The following pairs of functions are path aliases. The first is the + // primary endpoint, and the second is version using deprecated language, + // for backwards compatibility. The functionality is identical between the two. + b.pathConfigTidyRoletagDenyList(), + b.genDeprecatedPath(b.pathConfigTidyRoletagDenyList()), + + b.pathConfigTidyIdentityAccessList(), + b.genDeprecatedPath(b.pathConfigTidyIdentityAccessList()), + + b.pathListRoletagDenyList(), + b.genDeprecatedPath(b.pathListRoletagDenyList()), + + b.pathRoletagDenyList(), + b.genDeprecatedPath(b.pathRoletagDenyList()), + + b.pathTidyRoletagDenyList(), + b.genDeprecatedPath(b.pathTidyRoletagDenyList()), + + b.pathListIdentityAccessList(), + b.genDeprecatedPath(b.pathListIdentityAccessList()), + + b.pathIdentityAccessList(), + b.genDeprecatedPath(b.pathIdentityAccessList()), + + b.pathTidyIdentityAccessList(), + b.genDeprecatedPath(b.pathTidyIdentityAccessList()), + }, + Invalidate: b.invalidate, + InitializeFunc: b.initialize, + BackendType: logical.TypeCredential, + Clean: b.cleanup, + } + + b.partitionToRegionMap = generatePartitionToRegionMap() + + return b, nil +} + +// periodicFunc performs the tasks that the backend wishes to do periodically. +// Currently this will be triggered once in a minute by the RollbackManager. +// +// The tasks being done currently by this function are to cleanup the expired +// entries of both deny list role tags and access list identities. Tidying is done +// not once in a minute, but once in an hour, controlled by 'tidyCooldownPeriod'. +// Tidying of deny list and access list are by default enabled. This can be +// changed using `config/tidy/roletags` and `config/tidy/identities` endpoints. +func (b *backend) periodicFunc(ctx context.Context, req *logical.Request) error { + // Run the tidy operations for the first time. Then run it when current + // time matches the nextTidyTime. + if b.nextTidyTime.IsZero() || !time.Now().Before(b.nextTidyTime) { + if b.System().LocalMount() || !b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary|consts.ReplicationPerformanceStandby) { + // safetyBuffer defaults to 180 days for roletag deny list + safetyBuffer := 15552000 + tidyBlacklistConfigEntry, err := b.lockedConfigTidyRoleTags(ctx, req.Storage) + if err != nil { + return err + } + skipBlacklistTidy := false + // check if tidying of role tags was configured + if tidyBlacklistConfigEntry != nil { + // check if periodic tidying of role tags was disabled + if tidyBlacklistConfigEntry.DisablePeriodicTidy { + skipBlacklistTidy = true + } + // overwrite the default safetyBuffer with the configured value + safetyBuffer = tidyBlacklistConfigEntry.SafetyBuffer + } + // tidy role tags if explicitly not disabled + if !skipBlacklistTidy { + b.tidyDenyListRoleTag(ctx, req, safetyBuffer) + } + } + + // We don't check for replication state for access list identities as + // these are locally stored + + safety_buffer := 259200 + tidyWhitelistConfigEntry, err := b.lockedConfigTidyIdentities(ctx, req.Storage) + if err != nil { + return err + } + skipWhitelistTidy := false + // check if tidying of identities was configured + if tidyWhitelistConfigEntry != nil { + // check if periodic tidying of identities was disabled + if tidyWhitelistConfigEntry.DisablePeriodicTidy { + skipWhitelistTidy = true + } + // overwrite the default safety_buffer with the configured value + safety_buffer = tidyWhitelistConfigEntry.SafetyBuffer + } + // tidy identities if explicitly not disabled + if !skipWhitelistTidy { + b.tidyAccessListIdentity(ctx, req, safety_buffer) + } + + // Update the time at which to run the tidy functions again. + b.nextTidyTime = time.Now().Add(b.tidyCooldownPeriod) + } + return nil +} + +func (b *backend) cleanup(ctx context.Context) { + if b.upgradeCancelFunc != nil { + b.upgradeCancelFunc() + } +} + +func (b *backend) invalidate(ctx context.Context, key string) { + switch { + case key == "config/client": + b.configMutex.Lock() + defer b.configMutex.Unlock() + b.flushCachedEC2Clients() + b.flushCachedIAMClients() + b.defaultAWSAccountID = "" + case strings.HasPrefix(key, "role"): + // TODO: We could make this better + b.roleCache.Flush() + } +} + +// Putting this here so we can inject a fake resolver into the backend for unit testing +// purposes +func (b *backend) resolveArnToRealUniqueId(ctx context.Context, s logical.Storage, arn string) (string, error) { + entity, err := parseIamArn(arn) + if err != nil { + return "", err + } + // This odd-looking code is here because IAM is an inherently global service. IAM and STS ARNs + // don't have regions in them, and there is only a single global endpoint for IAM; see + // http://docs.aws.amazon.com/general/latest/gr/rande.html#iam_region + // However, the ARNs do have a partition in them, because the GovCloud and China partitions DO + // have their own separate endpoints, and the partition is encoded in the ARN. If Amazon's Go SDK + // would allow us to pass a partition back to the IAM client, it would be much simpler. But it + // doesn't appear that's possible, so in order to properly support GovCloud and China, we do a + // circular dance of extracting the partition from the ARN, finding any arbitrary region in the + // partition, and passing that region back back to the SDK, so that the SDK can figure out the + // proper partition from the arbitrary region we passed in to look up the endpoint. + // Sigh + region := b.partitionToRegionMap[entity.Partition] + if region == nil { + return "", fmt.Errorf("unable to resolve partition %q to a region", entity.Partition) + } + iamClient, err := b.clientIAM(ctx, s, region.ID(), entity.AccountNumber) + if err != nil { + return "", awsutil.AppendAWSError(err) + } + + switch entity.Type { + case "user": + userInfo, err := iamClient.GetUserWithContext(ctx, &iam.GetUserInput{UserName: &entity.FriendlyName}) + if err != nil { + return "", awsutil.AppendAWSError(err) + } + if userInfo == nil { + return "", fmt.Errorf("got nil result from GetUser") + } + return *userInfo.User.UserId, nil + case "role": + roleInfo, err := iamClient.GetRoleWithContext(ctx, &iam.GetRoleInput{RoleName: &entity.FriendlyName}) + if err != nil { + return "", awsutil.AppendAWSError(err) + } + if roleInfo == nil { + return "", fmt.Errorf("got nil result from GetRole") + } + return *roleInfo.Role.RoleId, nil + case "instance-profile": + profileInfo, err := iamClient.GetInstanceProfileWithContext(ctx, &iam.GetInstanceProfileInput{InstanceProfileName: &entity.FriendlyName}) + if err != nil { + return "", awsutil.AppendAWSError(err) + } + if profileInfo == nil { + return "", fmt.Errorf("got nil result from GetInstanceProfile") + } + return *profileInfo.InstanceProfile.InstanceProfileId, nil + default: + return "", fmt.Errorf("unrecognized error type %#v", entity.Type) + } +} + +// genDeprecatedPath will return a deprecated version of a framework.Path. The +// path pattern and display attributes (if any) will contain deprecated terms, +// and the path will be marked as deprecated. +func (b *backend) genDeprecatedPath(path *framework.Path) *framework.Path { + pathDeprecated := *path + pathDeprecated.Pattern = b.deprecatedTerms.Replace(path.Pattern) + pathDeprecated.Deprecated = true + + if path.DisplayAttrs != nil { + deprecatedDisplayAttrs := *path.DisplayAttrs + deprecatedDisplayAttrs.OperationPrefix = b.deprecatedTerms.Replace(path.DisplayAttrs.OperationPrefix) + deprecatedDisplayAttrs.OperationVerb = b.deprecatedTerms.Replace(path.DisplayAttrs.OperationVerb) + deprecatedDisplayAttrs.OperationSuffix = b.deprecatedTerms.Replace(path.DisplayAttrs.OperationSuffix) + pathDeprecated.DisplayAttrs = &deprecatedDisplayAttrs + } + + for i, op := range path.Operations { + if op.Properties().DisplayAttrs != nil { + deprecatedDisplayAttrs := *op.Properties().DisplayAttrs + deprecatedDisplayAttrs.OperationPrefix = b.deprecatedTerms.Replace(op.Properties().DisplayAttrs.OperationPrefix) + deprecatedDisplayAttrs.OperationVerb = b.deprecatedTerms.Replace(op.Properties().DisplayAttrs.OperationVerb) + deprecatedDisplayAttrs.OperationSuffix = b.deprecatedTerms.Replace(op.Properties().DisplayAttrs.OperationSuffix) + deprecatedProperties := pathDeprecated.Operations[i].(*framework.PathOperation) + deprecatedProperties.DisplayAttrs = &deprecatedDisplayAttrs + } + } + + return &pathDeprecated +} + +// Adapted from https://docs.aws.amazon.com/sdk-for-go/api/aws/endpoints/ +// the "Enumerating Regions and Endpoint Metadata" section +func generatePartitionToRegionMap() map[string]*endpoints.Region { + partitionToRegion := make(map[string]*endpoints.Region) + + resolver := endpoints.DefaultResolver() + partitions := resolver.(endpoints.EnumPartitions).Partitions() + + for _, p := range partitions { + // For most partitions, it's fine to choose a single region randomly. + // However, there are a few exceptions: + // + // For "aws", choose "us-east-1" because it is always enabled (and + // enabled for STS) by default. + // + // For "aws-us-gov", choose "us-gov-west-1" because it is the only + // valid region for IAM operations. + // ref: https://github.com/aws/aws-sdk-go/blob/v1.34.25/aws/endpoints/defaults.go#L8176-L8194 + for _, r := range p.Regions() { + if p.ID() == "aws" && r.ID() != "us-east-1" { + continue + } + if p.ID() == "aws-us-gov" && r.ID() != "us-gov-west-1" { + continue + } + partitionToRegion[p.ID()] = &r + break + } + } + + return partitionToRegion +} + +const backendHelp = ` +The aws auth method uses either AWS IAM credentials or AWS-signed EC2 metadata +to authenticate clients, which are IAM principals or EC2 instances. + +Authentication is backed by a preconfigured role in the backend. The role +represents the authorization of resources by containing Vault's policies. +Role can be created using 'role/' endpoint. + +Authentication of IAM principals, either IAM users or roles, is done using a +specifically signed AWS API request using clients' AWS IAM credentials. IAM +principals can then be assigned to roles within Vault. This is known as the +"iam" auth method. + +Authentication of EC2 instances is done using either a signed PKCS#7 document +or a detached RSA signature of an AWS EC2 instance's identity document along +with a client-created nonce. This is known as the "ec2" auth method. + +If there is need to further restrict the capabilities of the role on the instance +that is using the role, 'role_tag' option can be enabled on the role, and a tag +can be generated using 'role//tag' endpoint. This tag represents the +subset of capabilities set on the role. When the 'role_tag' option is enabled on +the role, the login operation requires that a respective role tag is attached to +the EC2 instance which performs the login. +` diff --git a/builtin/credential/aws/backend_e2e_test.go b/builtin/credential/aws/backend_e2e_test.go new file mode 100644 index 0000000..e8939b9 --- /dev/null +++ b/builtin/credential/aws/backend_e2e_test.go @@ -0,0 +1,134 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package awsauth + +import ( + "context" + "testing" + "time" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +func TestBackend_E2E_Initialize(t *testing.T) { + ctx := context.Background() + + // Set up the cluster. This will trigger an Initialize(); we sleep briefly + // awaiting its completion. + cluster := setupAwsTestCluster(t, ctx) + defer cluster.Cleanup() + time.Sleep(time.Second) + core := cluster.Cores[0] + + // Fetch the aws auth's path in storage. This is a uuid that is different + // every time we run the test + authUuids, err := core.UnderlyingStorage.List(ctx, "auth/") + if err != nil { + t.Fatal(err) + } + if len(authUuids) != 1 { + t.Fatalf("expected exactly one auth path") + } + awsPath := "auth/" + authUuids[0] + + // Make sure that the upgrade happened, by fishing the 'config/version' + // entry out of storage. We can't use core.Client.Logical().Read() to do + // this, because 'config/version' hasn't been exposed as a path. + version, err := core.UnderlyingStorage.Get(ctx, awsPath+"config/version") + if err != nil { + t.Fatal(err) + } + if version == nil { + t.Fatalf("no config found") + } + + // Nuke the version, so we can pretend that Initialize() has never been run + if err := core.UnderlyingStorage.Delete(ctx, awsPath+"config/version"); err != nil { + t.Fatal(err) + } + version, err = core.UnderlyingStorage.Get(ctx, awsPath+"config/version") + if err != nil { + t.Fatal(err) + } + if version != nil { + t.Fatalf("version found") + } + + // Create a role + data := map[string]interface{}{ + "auth_type": "ec2", + "policies": "default", + "bound_subnet_id": "subnet-abcdef", + } + if _, err := core.Client.Logical().Write("auth/aws/role/test-role", data); err != nil { + t.Fatal(err) + } + role, err := core.Client.Logical().Read("auth/aws/role/test-role") + if err != nil { + t.Fatal(err) + } + if role == nil { + t.Fatalf("no role found") + } + + // There should _still_ be no config version + version, err = core.UnderlyingStorage.Get(ctx, awsPath+"config/version") + if err != nil { + t.Fatal(err) + } + if version != nil { + t.Fatalf("version found") + } + + // Seal, and then Unseal. This will once again trigger an Initialize(), + // only this time there will be a role present during the upgrade. + core.Seal(t) + cluster.UnsealCores(t) + time.Sleep(time.Second) + + // Now the config version should be there again + version, err = core.UnderlyingStorage.Get(ctx, awsPath+"config/version") + if err != nil { + t.Fatal(err) + } + if version == nil { + t.Fatalf("no version found") + } +} + +func setupAwsTestCluster(t *testing.T, _ context.Context) *vault.TestCluster { + // create a cluster with the aws auth backend built-in + logger := logging.NewVaultLogger(hclog.Trace) + coreConfig := &vault.CoreConfig{ + Logger: logger, + CredentialBackends: map[string]logical.Factory{ + "aws": Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.Handler, + }) + + cluster.Start() + if len(cluster.Cores) != 1 { + t.Fatalf("expected exactly one core") + } + core := cluster.Cores[0] + vault.TestWaitActive(t, core.Core) + + // load the auth plugin + if err := core.Client.Sys().EnableAuthWithOptions("aws", &api.EnableAuthOptions{ + Type: "aws", + }); err != nil { + t.Fatal(err) + } + + return cluster +} diff --git a/builtin/credential/aws/backend_test.go b/builtin/credential/aws/backend_test.go new file mode 100644 index 0000000..dea280c --- /dev/null +++ b/builtin/credential/aws/backend_test.go @@ -0,0 +1,1913 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package awsauth + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "os" + "strings" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/sts" + logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/policyutil" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + testVaultHeaderValue = "VaultAcceptanceTesting" + testValidRoleName = "valid-role" + testInvalidRoleName = "invalid-role" +) + +func TestBackend_CreateParseVerifyRoleTag(t *testing.T) { + // create a backend + config := logical.TestBackendConfig() + storage := &logical.InmemStorage{} + config.StorageView = storage + + b, err := Backend(config) + if err != nil { + t.Fatal(err) + } + + err = b.Setup(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + // create a role entry + data := map[string]interface{}{ + "auth_type": "ec2", + "policies": "p,q,r,s", + "bound_ami_id": "abcd-123", + } + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.CreateOperation, + Path: "role/abcd-123", + Storage: storage, + Data: data, + }) + if resp != nil && resp.IsError() { + t.Fatalf("failed to create role") + } + if err != nil { + t.Fatal(err) + } + + // read the created role entry + roleEntry, err := b.role(context.Background(), storage, "abcd-123") + if err != nil { + t.Fatal(err) + } + + // create a nonce for the role tag + nonce, err := createRoleTagNonce() + if err != nil { + t.Fatal(err) + } + rTag1 := &roleTag{ + Version: "v1", + Role: "abcd-123", + Nonce: nonce, + Policies: []string{"p", "q", "r"}, + MaxTTL: 200000000000, // 200s + } + + // create a role tag against the role entry + val, err := createRoleTagValue(rTag1, roleEntry) + if err != nil { + t.Fatal(err) + } + if val == "" { + t.Fatalf("failed to create role tag") + } + + // parse the created role tag + rTag2, err := b.parseAndVerifyRoleTagValue(context.Background(), storage, val) + if err != nil { + t.Fatal(err) + } + + // check the values in parsed role tag + if rTag2.Version != "v1" || + rTag2.Nonce != nonce || + rTag2.Role != "abcd-123" || + rTag2.MaxTTL != 200000000000 || // 200s + !policyutil.EquivalentPolicies(rTag2.Policies, []string{"p", "q", "r"}) || + len(rTag2.HMAC) == 0 { + t.Fatalf("parsed role tag is invalid") + } + + // verify the tag contents using role specific HMAC key + verified, err := verifyRoleTagValue(rTag2, roleEntry) + if err != nil { + t.Fatal(err) + } + if !verified { + t.Fatalf("failed to verify the role tag") + } + + // register a different role + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.CreateOperation, + Path: "role/ami-6789", + Storage: storage, + Data: data, + }) + if resp != nil && resp.IsError() { + t.Fatalf("failed to create role") + } + if err != nil { + t.Fatal(err) + } + + // get the entry of the newly created role entry + roleEntry2, err := b.role(context.Background(), storage, "ami-6789") + if err != nil { + t.Fatal(err) + } + + // try to verify the tag created with previous role's HMAC key + // with the newly registered entry's HMAC key + verified, err = verifyRoleTagValue(rTag2, roleEntry2) + if err != nil { + t.Fatal(err) + } + if verified { + t.Fatalf("verification of role tag should have failed") + } + + // modify any value in role tag and try to verify it + rTag2.Version = "v2" + verified, err = verifyRoleTagValue(rTag2, roleEntry) + if err != nil { + t.Fatal(err) + } + if verified { + t.Fatalf("verification of role tag should have failed: invalid Version") + } +} + +func TestBackend_prepareRoleTagPlaintextValue(t *testing.T) { + // create a nonce for the role tag + nonce, err := createRoleTagNonce() + if err != nil { + t.Fatal(err) + } + rTag := &roleTag{ + Version: "v1", + Nonce: nonce, + Role: "abcd-123", + } + + rTag.Version = "" + // try to create plaintext part of role tag + // without specifying version + val, err := prepareRoleTagPlaintextValue(rTag) + if err == nil { + t.Fatalf("expected error for missing version") + } + rTag.Version = "v1" + + rTag.Nonce = "" + // try to create plaintext part of role tag + // without specifying nonce + val, err = prepareRoleTagPlaintextValue(rTag) + if err == nil { + t.Fatalf("expected error for missing nonce") + } + rTag.Nonce = nonce + + rTag.Role = "" + // try to create plaintext part of role tag + // without specifying role + val, err = prepareRoleTagPlaintextValue(rTag) + if err == nil { + t.Fatalf("expected error for missing role") + } + rTag.Role = "abcd-123" + + // create the plaintext part of the tag + val, err = prepareRoleTagPlaintextValue(rTag) + if err != nil { + t.Fatal(err) + } + + // verify if it contains known fields + if !strings.Contains(val, "r=") || + !strings.Contains(val, "d=") || + !strings.Contains(val, "m=") || + !strings.HasPrefix(val, "v1") { + t.Fatalf("incorrect information in role tag plaintext value") + } + + rTag.InstanceID = "instance-123" + // create the role tag with instance_id specified + val, err = prepareRoleTagPlaintextValue(rTag) + if err != nil { + t.Fatal(err) + } + // verify it + if !strings.Contains(val, "i=") { + t.Fatalf("missing instance ID in role tag plaintext value") + } + + rTag.MaxTTL = 200000000000 + // create the role tag with max_ttl specified + val, err = prepareRoleTagPlaintextValue(rTag) + if err != nil { + t.Fatal(err) + } + // verify it + if !strings.Contains(val, "t=") { + t.Fatalf("missing max_ttl field in role tag plaintext value") + } +} + +func TestBackend_CreateRoleTagNonce(t *testing.T) { + // create a nonce for the role tag + nonce, err := createRoleTagNonce() + if err != nil { + t.Fatal(err) + } + if nonce == "" { + t.Fatalf("failed to create role tag nonce") + } + + // verify that the value returned is base64 encoded + nonceBytes, err := base64.StdEncoding.DecodeString(nonce) + if err != nil { + t.Fatal(err) + } + if len(nonceBytes) == 0 { + t.Fatalf("length of role tag nonce is zero") + } +} + +func TestBackend_ConfigTidyIdentities(t *testing.T) { + for _, path := range []string{"config/tidy/identity-whitelist", "config/tidy/identity-accesslist"} { + // create a backend + config := logical.TestBackendConfig() + storage := &logical.InmemStorage{} + config.StorageView = storage + + b, err := Backend(config) + if err != nil { + t.Fatal(err) + } + + err = b.Setup(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + // test update operation + tidyRequest := &logical.Request{ + Operation: logical.UpdateOperation, + Path: path, + Storage: storage, + } + data := map[string]interface{}{ + "safety_buffer": "60", + "disable_periodic_tidy": true, + } + tidyRequest.Data = data + _, err = b.HandleRequest(context.Background(), tidyRequest) + if err != nil { + t.Fatal(err) + } + + // test read operation + tidyRequest.Operation = logical.ReadOperation + resp, err := b.HandleRequest(context.Background(), tidyRequest) + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.IsError() { + t.Fatalf("failed to read %q endpoint", path) + } + if resp.Data["safety_buffer"].(int) != 60 || !resp.Data["disable_periodic_tidy"].(bool) { + t.Fatalf("bad: expected: safety_buffer:60 disable_periodic_tidy:true actual: safety_buffer:%d disable_periodic_tidy:%t\n", resp.Data["safety_buffer"].(int), resp.Data["disable_periodic_tidy"].(bool)) + } + + // test delete operation + tidyRequest.Operation = logical.DeleteOperation + resp, err = b.HandleRequest(context.Background(), tidyRequest) + if err != nil { + t.Fatal(err) + } + if resp != nil { + t.Fatalf("failed to delete %q", path) + } + } +} + +func TestBackend_ConfigTidyRoleTags(t *testing.T) { + for _, path := range []string{"config/tidy/roletag-blacklist", "config/tidy/roletag-denylist"} { + config := logical.TestBackendConfig() + storage := &logical.InmemStorage{} + config.StorageView = storage + + b, err := Backend(config) + if err != nil { + t.Fatal(err) + } + + err = b.Setup(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + // test update operation + tidyRequest := &logical.Request{ + Operation: logical.UpdateOperation, + Path: path, + Storage: storage, + } + data := map[string]interface{}{ + "safety_buffer": "60", + "disable_periodic_tidy": true, + } + tidyRequest.Data = data + _, err = b.HandleRequest(context.Background(), tidyRequest) + if err != nil { + t.Fatal(err) + } + + // test read operation + tidyRequest.Operation = logical.ReadOperation + resp, err := b.HandleRequest(context.Background(), tidyRequest) + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.IsError() { + t.Fatalf("failed to read %s endpoint", path) + } + if resp.Data["safety_buffer"].(int) != 60 || !resp.Data["disable_periodic_tidy"].(bool) { + t.Fatalf("bad: expected: safety_buffer:60 disable_periodic_tidy:true actual: safety_buffer:%d disable_periodic_tidy:%t\n", resp.Data["safety_buffer"].(int), resp.Data["disable_periodic_tidy"].(bool)) + } + + // test delete operation + tidyRequest.Operation = logical.DeleteOperation + resp, err = b.HandleRequest(context.Background(), tidyRequest) + if err != nil { + t.Fatal(err) + } + if resp != nil { + t.Fatalf("failed to delete %s", path) + } + } +} + +func TestBackend_TidyIdentities(t *testing.T) { + for _, path := range []string{"tidy/identity-whitelist", "tidy/identity-accesslist"} { + config := logical.TestBackendConfig() + storage := &logical.InmemStorage{} + config.StorageView = storage + + b, err := Backend(config) + if err != nil { + t.Fatal(err) + } + + err = b.Setup(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + expiredIdentityWhitelist := &accessListIdentity{ + ExpirationTime: time.Now().Add(-1 * 24 * 365 * time.Hour), + } + entry, err := logical.StorageEntryJSON("whitelist/identity/id1", expiredIdentityWhitelist) + if err != nil { + t.Fatal(err) + } + if err := storage.Put(context.Background(), entry); err != nil { + t.Fatal(err) + } + + // test update operation + _, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: path, + Storage: storage, + }) + if err != nil { + t.Fatal(err) + } + + // let tidy finish in the background + time.Sleep(1 * time.Second) + + entry, err = storage.Get(context.Background(), "whitelist/identity/id1") + if err != nil { + t.Fatal(err) + } + if entry != nil { + t.Fatal("wl tidy did not remove expired entry") + } + } +} + +func TestBackend_TidyRoleTags(t *testing.T) { + for _, path := range []string{"tidy/roletag-blacklist", "tidy/roletag-denylist"} { + config := logical.TestBackendConfig() + storage := &logical.InmemStorage{} + config.StorageView = storage + + b, err := Backend(config) + if err != nil { + t.Fatal(err) + } + + err = b.Setup(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + expiredIdentityWhitelist := &roleTagBlacklistEntry{ + ExpirationTime: time.Now().Add(-1 * 24 * 365 * time.Hour), + } + entry, err := logical.StorageEntryJSON("blacklist/roletag/id1", expiredIdentityWhitelist) + if err != nil { + t.Fatal(err) + } + if err := storage.Put(context.Background(), entry); err != nil { + t.Fatal(err) + } + + // test update operation + _, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: path, + Storage: storage, + }) + if err != nil { + t.Fatal(err) + } + + // let tidy finish in the background + time.Sleep(1 * time.Second) + + entry, err = storage.Get(context.Background(), "blacklist/roletag/id1") + if err != nil { + t.Fatal(err) + } + if entry != nil { + t.Fatal("bl tidy did not remove expired entry") + } + } +} + +func TestBackend_ConfigClient(t *testing.T) { + config := logical.TestBackendConfig() + storage := &logical.InmemStorage{} + config.StorageView = storage + + b, err := Backend(config) + if err != nil { + t.Fatal(err) + } + + err = b.Setup(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + data := map[string]interface{}{ + "access_key": "AKIAJBRHKV6EVTTNXDHA", + "secret_key": "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", + } + + stepCreate := logicaltest.TestStep{ + Operation: logical.CreateOperation, + Path: "config/client", + Data: data, + } + + stepUpdate := logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "config/client", + Data: data, + } + + data3 := map[string]interface{}{ + "access_key": "", + "secret_key": "mCtSM8ZUEQ3mOFVZYPBQkf2sO6F/W7a5TVzrl3Oj", + } + stepInvalidAccessKey := logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "config/client", + Data: data3, + ErrorOk: true, + } + + data4 := map[string]interface{}{ + "access_key": "accesskey", + "secret_key": "", + } + stepInvalidSecretKey := logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "config/client", + Data: data4, + ErrorOk: true, + } + + logicaltest.Test(t, logicaltest.TestCase{ + AcceptanceTest: false, + CredentialBackend: b, + Steps: []logicaltest.TestStep{ + stepCreate, + stepInvalidAccessKey, + stepInvalidSecretKey, + stepUpdate, + }, + }) + + // test existence check returning false + checkFound, exists, err := b.HandleExistenceCheck(context.Background(), &logical.Request{ + Operation: logical.CreateOperation, + Path: "config/client", + Storage: storage, + }) + if err != nil { + t.Fatal(err) + } + if !checkFound { + t.Fatal("existence check not found for path 'config/client'") + } + if exists { + t.Fatal("existence check should have returned 'false' for 'config/client'") + } + + // create an entry + configClientCreateRequest := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/client", + Data: data, + Storage: storage, + } + _, err = b.HandleRequest(context.Background(), configClientCreateRequest) + if err != nil { + t.Fatal(err) + } + + // test existence check returning true + checkFound, exists, err = b.HandleExistenceCheck(context.Background(), &logical.Request{ + Operation: logical.CreateOperation, + Path: "config/client", + Storage: storage, + }) + if err != nil { + t.Fatal(err) + } + if !checkFound { + t.Fatal("existence check not found for path 'config/client'") + } + if !exists { + t.Fatal("existence check should have returned 'true' for 'config/client'") + } + + endpointData := map[string]interface{}{ + "secret_key": "secretkey", + "access_key": "accesskey", + "endpoint": "endpointvalue", + } + + endpointReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/client", + Storage: storage, + Data: endpointData, + } + _, err = b.HandleRequest(context.Background(), endpointReq) + if err != nil { + t.Fatal(err) + } + + endpointReq.Operation = logical.ReadOperation + resp, err := b.HandleRequest(context.Background(), endpointReq) + if err != nil { + t.Fatal(err) + } + if resp == nil || + resp.IsError() { + t.Fatalf("") + } + actual := resp.Data["endpoint"].(string) + if actual != "endpointvalue" { + t.Fatalf("bad: endpoint: expected:endpointvalue actual:%s\n", actual) + } +} + +func TestBackend_pathConfigCertificate(t *testing.T) { + config := logical.TestBackendConfig() + storage := &logical.InmemStorage{} + config.StorageView = storage + + b, err := Backend(config) + if err != nil { + t.Fatal(err) + } + + err = b.Setup(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + certReq := &logical.Request{ + Operation: logical.CreateOperation, + Storage: storage, + Path: "config/certificate/cert1", + } + checkFound, exists, err := b.HandleExistenceCheck(context.Background(), certReq) + if err != nil { + t.Fatal(err) + } + if !checkFound { + t.Fatal("existence check not found for path 'config/certificate/cert1'") + } + if exists { + t.Fatal("existence check should have returned 'false' for 'config/certificate/cert1'") + } + + data := map[string]interface{}{ + "type": "pkcs7", + "aws_public_cert": `LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM3VENDQXEwQ0NRQ1d1a2paNVY0YVp6QUpC +Z2NxaGtqT09BUURNRnd4Q3pBSkJnTlZCQVlUQWxWVE1Sa3cKRndZRFZRUUlFeEJYWVhOb2FXNW5k +Rzl1SUZOMFlYUmxNUkF3RGdZRFZRUUhFd2RUWldGMGRHeGxNU0F3SGdZRApWUVFLRXhkQmJXRjZi +MjRnVjJWaUlGTmxjblpwWTJWeklFeE1RekFlRncweE1qQXhNRFV4TWpVMk1USmFGdzB6Ck9EQXhN +RFV4TWpVMk1USmFNRnd4Q3pBSkJnTlZCQVlUQWxWVE1Sa3dGd1lEVlFRSUV4QlhZWE5vYVc1bmRH +OXUKSUZOMFlYUmxNUkF3RGdZRFZRUUhFd2RUWldGMGRHeGxNU0F3SGdZRFZRUUtFeGRCYldGNmIy +NGdWMlZpSUZObApjblpwWTJWeklFeE1RekNDQWJjd2dnRXNCZ2NxaGtqT09BUUJNSUlCSHdLQmdR +Q2prdmNTMmJiMVZRNHl0LzVlCmloNU9PNmtLL24xTHpsbHI3RDhad3RRUDhmT0VwcDVFMm5nK0Q2 +VWQxWjFnWWlwcjU4S2ozbnNzU05wSTZiWDMKVnlJUXpLN3dMY2xuZC9Zb3pxTk5tZ0l5WmVjTjdF +Z2xLOUlUSEpMUCt4OEZ0VXB0M1FieVlYSmRtVk1lZ042UApodmlZdDVKSC9uWWw0aGgzUGExSEpk +c2tnUUlWQUxWSjNFUjExK0tvNHRQNm53dkh3aDYrRVJZUkFvR0JBSTFqCmsrdGtxTVZIdUFGY3ZB +R0tvY1Rnc2pKZW02LzVxb216SnVLRG1iSk51OVF4dzNyQW90WGF1OFFlK01CY0psL1UKaGh5MUtI +VnBDR2w5ZnVlUTJzNklMMENhTy9idXljVTFDaVlRazQwS05IQ2NIZk5pWmJkbHgxRTlycFVwN2Ju +RgpsUmEydjFudE1YM2NhUlZEZGJ0UEVXbWR4U0NZc1lGRGs0bVpyT0xCQTRHRUFBS0JnRWJtZXZl +NWY4TElFL0dmCk1ObVA5Q001ZW92UU9HeDVobzhXcUQrYVRlYnMrazJ0bjkyQkJQcWVacXBXUmE1 +UC8ranJkS21sMXF4NGxsSFcKTVhyczNJZ0liNitoVUlCK1M4ZHo4L21tTzBicHI3NlJvWlZDWFlh +YjJDWmVkRnV0N3FjM1dVSDkrRVVBSDVtdwp2U2VEQ09VTVlRUjdSOUxJTll3b3VISXppcVFZTUFr +R0J5cUdTTTQ0QkFNREx3QXdMQUlVV1hCbGs0MHhUd1N3CjdIWDMyTXhYWXJ1c2U5QUNGQk5HbWRY +MlpCclZOR3JOOU4yZjZST2swazlLCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K +`, + } + + certReq.Data = data + // test create operation + resp, err := b.HandleRequest(context.Background(), certReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v, err: %v", resp, err) + } + + certReq.Data = nil + // test existence check + checkFound, exists, err = b.HandleExistenceCheck(context.Background(), certReq) + if err != nil { + t.Fatal(err) + } + if !checkFound { + t.Fatal("existence check not found for path 'config/certificate/cert1'") + } + if !exists { + t.Fatal("existence check should have returned 'true' for 'config/certificate/cert1'") + } + + certReq.Operation = logical.ReadOperation + // test read operation + resp, err = b.HandleRequest(context.Background(), certReq) + if err != nil { + t.Fatal(err) + } + expectedCert := `-----BEGIN CERTIFICATE----- +MIIC7TCCAq0CCQCWukjZ5V4aZzAJBgcqhkjOOAQDMFwxCzAJBgNVBAYTAlVTMRkw +FwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYD +VQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAeFw0xMjAxMDUxMjU2MTJaFw0z +ODAxMDUxMjU2MTJaMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9u +IFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNl +cnZpY2VzIExMQzCCAbcwggEsBgcqhkjOOAQBMIIBHwKBgQCjkvcS2bb1VQ4yt/5e +ih5OO6kK/n1Lzllr7D8ZwtQP8fOEpp5E2ng+D6Ud1Z1gYipr58Kj3nssSNpI6bX3 +VyIQzK7wLclnd/YozqNNmgIyZecN7EglK9ITHJLP+x8FtUpt3QbyYXJdmVMegN6P +hviYt5JH/nYl4hh3Pa1HJdskgQIVALVJ3ER11+Ko4tP6nwvHwh6+ERYRAoGBAI1j +k+tkqMVHuAFcvAGKocTgsjJem6/5qomzJuKDmbJNu9Qxw3rAotXau8Qe+MBcJl/U +hhy1KHVpCGl9fueQ2s6IL0CaO/buycU1CiYQk40KNHCcHfNiZbdlx1E9rpUp7bnF +lRa2v1ntMX3caRVDdbtPEWmdxSCYsYFDk4mZrOLBA4GEAAKBgEbmeve5f8LIE/Gf +MNmP9CM5eovQOGx5ho8WqD+aTebs+k2tn92BBPqeZqpWRa5P/+jrdKml1qx4llHW +MXrs3IgIb6+hUIB+S8dz8/mmO0bpr76RoZVCXYab2CZedFut7qc3WUH9+EUAH5mw +vSeDCOUMYQR7R9LINYwouHIziqQYMAkGByqGSM44BAMDLwAwLAIUWXBlk40xTwSw +7HX32MxXYruse9ACFBNGmdX2ZBrVNGrN9N2f6ROk0k9K +-----END CERTIFICATE----- +` + if resp.Data["aws_public_cert"].(string) != expectedCert { + t.Fatalf("bad: expected:%s\n got:%s\n", expectedCert, resp.Data["aws_public_cert"].(string)) + } + + certReq.Operation = logical.CreateOperation + certReq.Path = "config/certificate/cert2" + certReq.Data = data + // create another entry to test the list operation + _, err = b.HandleRequest(context.Background(), certReq) + if err != nil { + t.Fatal(err) + } + + certReq.Operation = logical.ListOperation + certReq.Path = "config/certificates" + // test list operation + resp, err = b.HandleRequest(context.Background(), certReq) + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.IsError() { + t.Fatalf("failed to list config/certificates") + } + keys := resp.Data["keys"].([]string) + if len(keys) != 2 { + t.Fatalf("invalid keys listed: %#v\n", keys) + } + + certReq.Operation = logical.DeleteOperation + certReq.Path = "config/certificate/cert1" + _, err = b.HandleRequest(context.Background(), certReq) + if err != nil { + t.Fatal(err) + } + + certReq.Path = "config/certificate/cert2" + _, err = b.HandleRequest(context.Background(), certReq) + if err != nil { + t.Fatal(err) + } + + certReq.Operation = logical.ListOperation + certReq.Path = "config/certificates" + // test list operation + resp, err = b.HandleRequest(context.Background(), certReq) + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.IsError() { + t.Fatalf("failed to list config/certificates") + } + if resp.Data["keys"] != nil { + t.Fatalf("no entries should be present") + } +} + +func TestBackend_parseAndVerifyRoleTagValue(t *testing.T) { + // create a backend + config := logical.TestBackendConfig() + storage := &logical.InmemStorage{} + config.StorageView = storage + b, err := Backend(config) + if err != nil { + t.Fatal(err) + } + + err = b.Setup(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + // create a role + data := map[string]interface{}{ + "auth_type": "ec2", + "policies": "p,q,r,s", + "max_ttl": "120s", + "role_tag": "VaultRole", + "bound_ami_id": "abcd-123", + } + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.CreateOperation, + Path: "role/abcd-123", + Storage: storage, + Data: data, + }) + if resp != nil && resp.IsError() { + t.Fatalf("failed to create role") + } + if err != nil { + t.Fatal(err) + } + + // verify that the entry is created + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ReadOperation, + Path: "role/abcd-123", + Storage: storage, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatalf("expected an role entry for abcd-123") + } + + // create a role tag + data2 := map[string]interface{}{ + "policies": "p,q,r,s", + } + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "role/abcd-123/tag", + Storage: storage, + Data: data2, + }) + if err != nil { + t.Fatal(err) + } + if resp.Data["tag_key"].(string) == "" || + resp.Data["tag_value"].(string) == "" { + t.Fatalf("invalid tag response: %#v\n", resp) + } + tagValue := resp.Data["tag_value"].(string) + + // parse the value and check if the verifiable values match + rTag, err := b.parseAndVerifyRoleTagValue(context.Background(), storage, tagValue) + if err != nil { + t.Fatalf("err: %s", err) + } + if rTag == nil { + t.Fatalf("failed to parse role tag") + } + if rTag.Version != "v1" || + !policyutil.EquivalentPolicies(rTag.Policies, []string{"p", "q", "r", "s"}) || + rTag.Role != "abcd-123" { + t.Fatalf("bad: parsed role tag contains incorrect values. Got: %#v\n", rTag) + } +} + +func TestBackend_PathRoleTag(t *testing.T) { + config := logical.TestBackendConfig() + storage := &logical.InmemStorage{} + config.StorageView = storage + b, err := Backend(config) + if err != nil { + t.Fatal(err) + } + + err = b.Setup(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + data := map[string]interface{}{ + "auth_type": "ec2", + "policies": "p,q,r,s", + "max_ttl": "120s", + "role_tag": "VaultRole", + "bound_ami_id": "abcd-123", + } + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.CreateOperation, + Path: "role/abcd-123", + Storage: storage, + Data: data, + }) + if resp != nil && resp.IsError() { + t.Fatalf("failed to create role") + } + if err != nil { + t.Fatal(err) + } + + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ReadOperation, + Path: "role/abcd-123", + Storage: storage, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatalf("failed to find a role entry for abcd-123") + } + + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "role/abcd-123/tag", + Storage: storage, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.Data == nil { + t.Fatalf("failed to create a tag on role: abcd-123") + } + if resp.IsError() { + t.Fatalf("failed to create a tag on role: abcd-123: %s\n", resp.Data["error"]) + } + if resp.Data["tag_value"].(string) == "" { + t.Fatalf("role tag not present in the response data: %#v\n", resp.Data) + } +} + +func TestBackend_PathBlacklistRoleTag(t *testing.T) { + for _, path := range []string{"roletag-blacklist/", "roletag-denylist/"} { + // create the backend + storage := &logical.InmemStorage{} + config := logical.TestBackendConfig() + config.StorageView = storage + b, err := Backend(config) + if err != nil { + t.Fatal(err) + } + + err = b.Setup(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + // create an role entry + data := map[string]interface{}{ + "auth_type": "ec2", + "policies": "p,q,r,s", + "role_tag": "VaultRole", + "bound_ami_id": "abcd-123", + } + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.CreateOperation, + Path: "role/abcd-123", + Storage: storage, + Data: data, + }) + if resp != nil && resp.IsError() { + t.Fatalf("failed to create role") + } + if err != nil { + t.Fatal(err) + } + + // create a role tag against an role registered before + data2 := map[string]interface{}{ + "policies": "p,q,r,s", + } + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "role/abcd-123/tag", + Storage: storage, + Data: data2, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.Data == nil { + t.Fatalf("failed to create a tag on role: abcd-123") + } + if resp.IsError() { + t.Fatalf("failed to create a tag on role: abcd-123: %s\n", resp.Data["error"]) + } + tag := resp.Data["tag_value"].(string) + if tag == "" { + t.Fatalf("role tag not present in the response data: %#v\n", resp.Data) + } + + // deny list that role tag + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: path + tag, + Storage: storage, + }) + if err != nil { + t.Fatal(err) + } + if resp != nil { + t.Fatalf("failed to deny list the roletag: %s\n", tag) + } + + // read the deny list entry + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ReadOperation, + Path: path + tag, + Storage: storage, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.Data == nil { + t.Fatalf("failed to read the deny list role tag: %s\n", tag) + } + if resp.IsError() { + t.Fatalf("failed to read the deny list role tag:%s. Err: %s\n", tag, resp.Data["error"]) + } + + // delete the deny listed entry + _, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.DeleteOperation, + Path: path + tag, + Storage: storage, + }) + if err != nil { + t.Fatal(err) + } + + // try to read the deleted entry + tagEntry, err := b.lockedDenyLististRoleTagEntry(context.Background(), storage, tag) + if err != nil { + t.Fatal(err) + } + if tagEntry != nil { + t.Fatalf("role tag should not have been present: %s\n", tag) + } + } +} + +/* +This is an acceptance test. + + Requires the following env vars: + TEST_AWS_EC2_RSA2048 + TEST_AWS_EC2_PKCS7 + TEST_AWS_EC2_IDENTITY_DOCUMENT + TEST_AWS_EC2_IDENTITY_DOCUMENT_SIG + TEST_AWS_EC2_AMI_ID + TEST_AWS_EC2_ACCOUNT_ID + TEST_AWS_EC2_IAM_ROLE_ARN + + If this is being run on an EC2 instance, you can set the environment vars using this bash snippet: + + export TEST_AWS_EC2_RSA2048=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/rsa2048) + export TEST_AWS_EC2_PKCS7=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/pkcs7) + export TEST_AWS_EC2_IDENTITY_DOCUMENT=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | base64 -w 0) + export TEST_AWS_EC2_IDENTITY_DOCUMENT_SIG=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/signature | tr -d '\n') + export TEST_AWS_EC2_AMI_ID=$(curl -s http://169.254.169.254/latest/meta-data/ami-id) + export TEST_AWS_EC2_IAM_ROLE_ARN=$(aws iam get-role --role-name $(curl -q http://169.254.169.254/latest/meta-data/iam/security-credentials/ -S -s) --query Role.Arn --output text) + export TEST_AWS_EC2_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) + + If the test is not being run on an EC2 instance that has access to + credentials using EC2RoleProvider, on top of the above vars, following + needs to be set: + TEST_AWS_SECRET_KEY + TEST_AWS_ACCESS_KEY +*/ +func TestBackendAcc_LoginWithInstanceIdentityDocAndAccessListIdentity(t *testing.T) { + for _, path := range []string{"identity-whitelist/", "identity-accesslist/"} { + // This test case should be run only when certain env vars are set and + // executed as an acceptance test. + if os.Getenv(logicaltest.TestEnvVar) == "" { + t.Skip(fmt.Sprintf("Acceptance tests skipped unless env %q set", logicaltest.TestEnvVar)) + return + } + + rsa2048 := os.Getenv("TEST_AWS_EC2_RSA2048") + if rsa2048 == "" { + t.Skipf("env var TEST_AWS_EC2_RSA2048 not set, skipping test") + } + + pkcs7 := os.Getenv("TEST_AWS_EC2_PKCS7") + if pkcs7 == "" { + t.Skipf("env var TEST_AWS_EC2_PKCS7 not set, skipping test") + } + + identityDoc := os.Getenv("TEST_AWS_EC2_IDENTITY_DOCUMENT") + if identityDoc == "" { + t.Skipf("env var TEST_AWS_EC2_IDENTITY_DOCUMENT not set, skipping test") + } + + identityDocSig := os.Getenv("TEST_AWS_EC2_IDENTITY_DOCUMENT_SIG") + if identityDocSig == "" { + t.Skipf("env var TEST_AWS_EC2_IDENTITY_DOCUMENT_SIG not set, skipping test") + } + + amiID := os.Getenv("TEST_AWS_EC2_AMI_ID") + if amiID == "" { + t.Skipf("env var TEST_AWS_EC2_AMI_ID not set, skipping test") + } + + iamARN := os.Getenv("TEST_AWS_EC2_IAM_ROLE_ARN") + if iamARN == "" { + t.Skipf("env var TEST_AWS_EC2_IAM_ROLE_ARN not set, skipping test") + } + + accountID := os.Getenv("TEST_AWS_EC2_ACCOUNT_ID") + if accountID == "" { + t.Skipf("env var TEST_AWS_EC2_ACCOUNT_ID not set, skipping test") + } + + roleName := amiID + + // create the backend + storage := &logical.InmemStorage{} + config := logical.TestBackendConfig() + config.StorageView = storage + b, err := Backend(config) + if err != nil { + t.Fatal(err) + } + + err = b.Setup(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + accessKey := os.Getenv("TEST_AWS_ACCESS_KEY") + secretKey := os.Getenv("TEST_AWS_SECRET_KEY") + + // In case of problems with making API calls using the credentials (2FA enabled, + // for instance), the keys need not be set if the test is running on an EC2 + // instance with permissions to get the credentials using EC2RoleProvider. + if accessKey != "" && secretKey != "" { + // get the API credentials from env vars + clientConfig := map[string]interface{}{ + "access_key": accessKey, + "secret_key": secretKey, + } + if clientConfig["access_key"] == "" || + clientConfig["secret_key"] == "" { + t.Fatalf("credentials not configured") + } + + // store the credentials + _, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Storage: storage, + Path: "config/client", + Data: clientConfig, + }) + if err != nil { + t.Fatal(err) + } + } + + // Configure additional metadata to be returned for ec2 logins. + identity := map[string]interface{}{ + "ec2_metadata": []string{"instance_id", "region", "ami_id"}, + } + + // store the identity + _, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Storage: storage, + Path: "config/identity", + Data: identity, + }) + if err != nil { + t.Fatal(err) + } + + loginInput := map[string]interface{}{ + "pkcs7": pkcs7, + "nonce": "vault-client-nonce", + } + + parsedIdentityDoc, err := b.parseIdentityDocument(context.Background(), storage, pkcs7) + if err != nil { + t.Fatal(err) + } + + // Perform the login operation with a AMI ID that is not matching + // the bound on the role. + loginRequest := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "login", + Storage: storage, + Data: loginInput, + } + + // Baseline role data that should succeed permit login + data := map[string]interface{}{ + "auth_type": "ec2", + "policies": "root", + "max_ttl": "120s", + "bound_ami_id": []string{"wrong_ami_id", amiID, "wrong_ami_id2"}, + "bound_account_id": accountID, + "bound_iam_role_arn": iamARN, + "bound_ec2_instance_id": []string{parsedIdentityDoc.InstanceID, "i-1234567"}, + } + + roleReq := &logical.Request{ + Operation: logical.CreateOperation, + Path: "role/" + roleName, + Storage: storage, + Data: data, + } + + updateRoleExpectLoginFail := func(roleRequest, loginRequest *logical.Request) error { + resp, err := b.HandleRequest(context.Background(), roleRequest) + if err != nil || (resp != nil && resp.IsError()) { + return fmt.Errorf("bad: failed to create role: resp:%#v\nerr:%v", resp, err) + } + resp, err = b.HandleRequest(context.Background(), loginRequest) + if err != nil || resp == nil || (resp != nil && !resp.IsError()) { + return fmt.Errorf("bad: expected login failure: resp:%#v\nerr:%v", resp, err) + } + return nil + } + + // Test a role with the wrong AMI ID + data["bound_ami_id"] = []string{"ami-1234567", "ami-7654321"} + if err := updateRoleExpectLoginFail(roleReq, loginRequest); err != nil { + t.Fatal(err) + } + + roleReq.Operation = logical.UpdateOperation + // Place the correct AMI ID in one of the values, but make the AccountID wrong + data["bound_ami_id"] = []string{"wrong_ami_id_1", amiID, "wrong_ami_id_2"} + data["bound_account_id"] = []string{"wrong-account-id", "wrong-account-id-2"} + if err := updateRoleExpectLoginFail(roleReq, loginRequest); err != nil { + t.Fatal(err) + } + + // Place the correct AccountID in one of the values, but make the wrong IAMRoleARN + data["bound_account_id"] = []string{"wrong-account-id-1", accountID, "wrong-account-id-2"} + data["bound_iam_role_arn"] = []string{"wrong_iam_role_arn", "wrong_iam_role_arn_2"} + if err := updateRoleExpectLoginFail(roleReq, loginRequest); err != nil { + t.Fatal(err) + } + + // Place correct IAM role ARN, but incorrect instance ID + data["bound_iam_role_arn"] = []string{"wrong_iam_role_arn_1", iamARN, "wrong_iam_role_arn_2"} + data["bound_ec2_instance_id"] = "i-1234567" + if err := updateRoleExpectLoginFail(roleReq, loginRequest); err != nil { + t.Fatal(err) + } + + // Place correct instance ID, but substring of the IAM role ARN + data["bound_ec2_instance_id"] = []string{parsedIdentityDoc.InstanceID, "i-1234567"} + data["bound_iam_role_arn"] = []string{"wrong_iam_role_arn", iamARN[:len(iamARN)-2], "wrong_iam_role_arn_2"} + if err := updateRoleExpectLoginFail(roleReq, loginRequest); err != nil { + t.Fatal(err) + } + + // place a wildcard in the middle of the role ARN + // The :31 gets arn:aws:iam::123456789012:role/ + // This test relies on the role name having at least two characters + data["bound_iam_role_arn"] = []string{"wrong_iam_role_arn", fmt.Sprintf("%s*%s", iamARN[:31], iamARN[32:])} + if err := updateRoleExpectLoginFail(roleReq, loginRequest); err != nil { + t.Fatal(err) + } + + // globbed IAM role ARN + data["bound_iam_role_arn"] = []string{"wrong_iam_role_arn_1", fmt.Sprintf("%s*", iamARN[:len(iamARN)-2]), "wrong_iam_role_arn_2"} + resp, err := b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: failed to create role: resp:%#v\nerr:%v", resp, err) + } + + // Now, the login attempt should succeed + resp, err = b.HandleRequest(context.Background(), loginRequest) + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.Auth == nil || resp.IsError() { + t.Fatalf("bad: failed to login: resp:%#v\nerr:%v", resp, err) + } + + // Attempt to re-login with the identity signature + delete(loginInput, "pkcs7") + loginInput["identity"] = identityDoc + loginInput["signature"] = identityDocSig + + resp, err = b.HandleRequest(context.Background(), loginRequest) + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.Auth == nil || resp.IsError() { + t.Fatalf("bad: failed to login: resp:%#v\nerr:%v", resp, err) + } + + // verify the presence of instance_id in the response object. + instanceID := resp.Auth.Metadata["instance_id"] + if instanceID == "" { + t.Fatalf("instance ID not present in the response object") + } + if instanceID != parsedIdentityDoc.InstanceID { + t.Fatalf("instance ID in response (%q) did not match instance ID from identity document (%q)", instanceID, parsedIdentityDoc.InstanceID) + } + + _, ok := resp.Auth.Metadata["nonce"] + if ok { + t.Fatalf("client nonce should not have been returned") + } + + loginInput["nonce"] = "changed-vault-client-nonce" + // try to login again with changed nonce + resp, err = b.HandleRequest(context.Background(), loginRequest) + if err != nil { + t.Fatal(err) + } + if resp == nil || !resp.IsError() { + t.Fatalf("login attempt should have failed due to client nonce mismatch") + } + + // Check if an access list identity entry is created after the login. + wlRequest := &logical.Request{ + Operation: logical.ReadOperation, + Path: path + instanceID, + Storage: storage, + } + resp, err = b.HandleRequest(context.Background(), wlRequest) + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.Data == nil || resp.Data["role"] != roleName { + t.Fatalf("failed to read access list identity") + } + + // Delete the access list identity entry. + wlRequest.Operation = logical.DeleteOperation + resp, err = b.HandleRequest(context.Background(), wlRequest) + if err != nil { + t.Fatal(err) + } + if resp.IsError() { + t.Fatalf("failed to delete access list identity") + } + + // Allow a fresh login without supplying the nonce + delete(loginInput, "nonce") + + resp, err = b.HandleRequest(context.Background(), loginRequest) + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.Auth == nil || resp.IsError() { + t.Fatalf("login attempt failed") + } + + _, ok = resp.Auth.Metadata["nonce"] + if !ok { + t.Fatalf("expected nonce to be returned") + } + + // Attempt to re-login with the rsa2048 signature as a pkcs7 signature + wlRequest.Operation = logical.DeleteOperation + resp, err = b.HandleRequest(context.Background(), wlRequest) + if err != nil { + t.Fatal(err) + } + if resp.IsError() { + t.Fatalf("failed to delete access list identity") + } + delete(loginInput, "identity") + delete(loginInput, "signature") + loginInput["pkcs7"] = rsa2048 + + resp, err = b.HandleRequest(context.Background(), loginRequest) + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.Auth == nil || resp.IsError() { + t.Fatalf("bad: failed to login: resp:%#v\nerr:%v", resp, err) + } + + // verify the presence of instance_id in the response object. + instanceID = resp.Auth.Metadata["instance_id"] + if instanceID == "" { + t.Fatalf("instance ID not present in the response object") + } + if instanceID != parsedIdentityDoc.InstanceID { + t.Fatalf("instance ID in response (%q) did not match instance ID from identity document (%q)", instanceID, parsedIdentityDoc.InstanceID) + } + } +} + +func TestBackend_pathStsConfig(t *testing.T) { + config := logical.TestBackendConfig() + storage := &logical.InmemStorage{} + config.StorageView = storage + b, err := Backend(config) + if err != nil { + t.Fatal(err) + } + + err = b.Setup(context.Background(), config) + if err != nil { + t.Fatal(err) + } + stsReq := &logical.Request{ + Operation: logical.CreateOperation, + Storage: storage, + Path: "config/sts/account1", + } + checkFound, exists, err := b.HandleExistenceCheck(context.Background(), stsReq) + if err != nil { + t.Fatal(err) + } + if !checkFound { + t.Fatal("existence check not found for path 'config/sts/account1'") + } + if exists { + t.Fatal("existence check should have returned 'false' for 'config/sts/account1'") + } + + data := map[string]interface{}{ + "sts_role": "arn:aws:iam:account1:role/myRole", + } + + stsReq.Data = data + // test create operation + resp, err := b.HandleRequest(context.Background(), stsReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v, err: %v", resp, err) + } + + stsReq.Data = nil + // test existence check + checkFound, exists, err = b.HandleExistenceCheck(context.Background(), stsReq) + if err != nil { + t.Fatal(err) + } + if !checkFound { + t.Fatal("existence check not found for path 'config/sts/account1'") + } + if !exists { + t.Fatal("existence check should have returned 'true' for 'config/sts/account1'") + } + + stsReq.Operation = logical.ReadOperation + // test read operation + resp, err = b.HandleRequest(context.Background(), stsReq) + if err != nil { + t.Fatal(err) + } + expectedStsRole := "arn:aws:iam:account1:role/myRole" + if resp.Data["sts_role"].(string) != expectedStsRole { + t.Fatalf("bad: expected:%s\n got:%s\n", expectedStsRole, resp.Data["sts_role"].(string)) + } + + stsReq.Operation = logical.CreateOperation + stsReq.Path = "config/sts/account2" + stsReq.Data = data + // create another entry to test the list operation + resp, err = b.HandleRequest(context.Background(), stsReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatal(err) + } + + stsReq.Operation = logical.ListOperation + stsReq.Path = "config/sts" + // test list operation + resp, err = b.HandleRequest(context.Background(), stsReq) + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.IsError() { + t.Fatalf("failed to list config/sts") + } + keys := resp.Data["keys"].([]string) + if len(keys) != 2 { + t.Fatalf("invalid keys listed: %#v\n", keys) + } + + stsReq.Operation = logical.DeleteOperation + stsReq.Path = "config/sts/account1" + resp, err = b.HandleRequest(context.Background(), stsReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatal(err) + } + + stsReq.Path = "config/sts/account2" + resp, err = b.HandleRequest(context.Background(), stsReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatal(err) + } + + stsReq.Operation = logical.ListOperation + stsReq.Path = "config/sts" + // test list operation + resp, err = b.HandleRequest(context.Background(), stsReq) + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.IsError() { + t.Fatalf("failed to list config/sts") + } + if resp.Data["keys"] != nil { + t.Fatalf("no entries should be present") + } +} + +func buildCallerIdentityLoginData(request *http.Request, roleName string) (map[string]interface{}, error) { + headersJson, err := json.Marshal(request.Header) + if err != nil { + return nil, err + } + requestBody, err := ioutil.ReadAll(request.Body) + if err != nil { + return nil, err + } + return map[string]interface{}{ + "iam_http_request_method": request.Method, + "iam_request_url": base64.StdEncoding.EncodeToString([]byte(request.URL.String())), + "iam_request_headers": base64.StdEncoding.EncodeToString(headersJson), + "iam_request_body": base64.StdEncoding.EncodeToString(requestBody), + "request_role": roleName, + }, nil +} + +// This is an acceptance test. +// If the test is NOT being run on an AWS EC2 instance in an instance profile, +// it requires the following environment variables to be set: +// TEST_AWS_ACCESS_KEY_ID +// TEST_AWS_SECRET_ACCESS_KEY +// TEST_AWS_SECURITY_TOKEN or TEST_AWS_SESSION_TOKEN (optional, if you are using short-lived creds) +// These are intentionally NOT the "standard" variables to prevent accidentally +// using prod creds in acceptance tests +func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) { + // This test case should be run only when certain env vars are set and + // executed as an acceptance test. + if os.Getenv(logicaltest.TestEnvVar) == "" { + t.Skip(fmt.Sprintf("Acceptance tests skipped unless env %q set", logicaltest.TestEnvVar)) + return + } + + ctx := context.Background() + storage := &logical.InmemStorage{} + config := logical.TestBackendConfig() + config.StorageView = storage + b, err := Backend(config) + if err != nil { + t.Fatal(err) + } + + err = b.Setup(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + // Override the default AWS env vars (if set) with our test creds + // so that the credential provider chain will pick them up + // NOTE that I'm not bothing to override the shared config file location, + // so if creds are specified there, they will be used before IAM + // instance profile creds + // This doesn't provide perfect leakage protection (e.g., it will still + // potentially pick up credentials from the ~/.config files), but probably + // good enough rather than having to muck around in the low-level details + for _, envvar := range []string{ + "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SECURITY_TOKEN", "AWS_SESSION_TOKEN", + } { + // Skip test if any of the required env vars are missing + testEnvVar := os.Getenv("TEST_" + envvar) + if testEnvVar == "" { + t.Skipf("env var %s not set, skipping test", "TEST_"+envvar) + } + + // restore existing environment variables (in case future tests need them) + defer os.Setenv(envvar, os.Getenv(envvar)) + + os.Setenv(envvar, testEnvVar) + } + awsSession, err := session.NewSession() + if err != nil { + fmt.Println("failed to create session,", err) + return + } + + stsService := sts.New(awsSession) + stsInputParams := &sts.GetCallerIdentityInput{} + + testIdentity, err := stsService.GetCallerIdentity(stsInputParams) + if err != nil { + t.Fatalf("Received error retrieving identity: %s", err) + } + entity, err := parseIamArn(*testIdentity.Arn) + if err != nil { + t.Fatal(err) + } + + // Test setup largely done + // At this point, we're going to: + // 1. Configure the client to require our test header value + // 2. Configure identity to use the ARN for the alias + // 3. Configure two different roles: + // a. One bound to our test user + // b. One bound to a garbage ARN + // 4. Pass in a request that doesn't have the signed header, ensure + // we're not allowed to login + // 5. Passin a request that has a validly signed header, but the wrong + // value, ensure it doesn't allow login + // 6. Pass in a request that has a validly signed request, ensure + // it allows us to login to our role + // 7. Pass in a request that has a validly signed request, asking for + // the other role, ensure it fails + + clientConfigData := map[string]interface{}{ + "iam_server_id_header_value": testVaultHeaderValue, + } + clientRequest := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/client", + Storage: storage, + Data: clientConfigData, + } + _, err = b.HandleRequest(ctx, clientRequest) + if err != nil { + t.Fatal(err) + } + + configIdentityData := map[string]interface{}{ + "iam_alias": identityAliasIAMFullArn, + } + configIdentityRequest := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/identity", + Storage: storage, + Data: configIdentityData, + } + resp, err := b.HandleRequest(ctx, configIdentityRequest) + if err != nil { + t.Fatal(err) + } + if resp != nil && resp.IsError() { + t.Fatalf("received error response when configuring identity: %#v", resp) + } + + // configuring the valid role we'll be able to login to + roleData := map[string]interface{}{ + "bound_iam_principal_arn": []string{entity.canonicalArn(), "arn:aws:iam::123456789012:role/FakeRoleArn1*"}, // Fake ARN MUST be wildcard terminated because we're resolving unique IDs, and the wildcard termination prevents unique ID resolution + "policies": "root", + "auth_type": iamAuthType, + } + roleRequest := &logical.Request{ + Operation: logical.CreateOperation, + Path: "role/" + testValidRoleName, + Storage: storage, + Data: roleData, + } + resp, err = b.HandleRequest(ctx, roleRequest) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: failed to create role: resp:%#v\nerr:%v", resp, err) + } + + // configuring a valid role we won't be able to login to + roleDataEc2 := map[string]interface{}{ + "auth_type": "ec2", + "policies": "root", + "bound_ami_id": "ami-1234567", + } + roleRequestEc2 := &logical.Request{ + Operation: logical.CreateOperation, + Path: "role/ec2only", + Storage: storage, + Data: roleDataEc2, + } + resp, err = b.HandleRequest(ctx, roleRequestEc2) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: failed to create role; resp:%#v\nerr:%v", resp, err) + } + + fakeArn := "arn:aws:iam::123456789012:role/somePath/FakeRole" + fakeArn2 := "arn:aws:iam::123456789012:role/somePath/FakeRole2" + fakeArnResolverCount := 0 + fakeArnResolver := func(ctx context.Context, s logical.Storage, arn string) (string, error) { + if strings.HasPrefix(arn, fakeArn) { + fakeArnResolverCount++ + return fmt.Sprintf("FakeUniqueIdFor%s%d", arn, fakeArnResolverCount), nil + } + return b.resolveArnToRealUniqueId(context.Background(), s, arn) + } + b.resolveArnToUniqueIDFunc = fakeArnResolver + + // now we're creating the invalid role we won't be able to login to + roleData["bound_iam_principal_arn"] = []string{fakeArn, fakeArn2} + roleRequest.Path = "role/" + testInvalidRoleName + resp, err = b.HandleRequest(context.Background(), roleRequest) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: didn't fail to create role: resp:%#v\nerr:%v", resp, err) + } + + // now, create the request without the signed header + stsRequestNoHeader, _ := stsService.GetCallerIdentityRequest(stsInputParams) + stsRequestNoHeader.Sign() + loginData, err := buildCallerIdentityLoginData(stsRequestNoHeader.HTTPRequest, testValidRoleName) + if err != nil { + t.Fatal(err) + } + loginRequest := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "login", + Storage: storage, + Data: loginData, + } + resp, err = b.HandleRequest(ctx, loginRequest) + if err != nil || resp == nil || !resp.IsError() { + t.Errorf("bad: expected failed login due to missing header: resp:%#v\nerr:%v", resp, err) + } + + // create the request with the invalid header value + + // Not reusing stsRequestNoHeader because the process of signing the request + // and reading the body modifies the underlying request, so it's just cleaner + // to get new requests. + stsRequestInvalidHeader, _ := stsService.GetCallerIdentityRequest(stsInputParams) + stsRequestInvalidHeader.HTTPRequest.Header.Add(iamServerIdHeader, "InvalidValue") + stsRequestInvalidHeader.Sign() + loginData, err = buildCallerIdentityLoginData(stsRequestInvalidHeader.HTTPRequest, testValidRoleName) + if err != nil { + t.Fatal(err) + } + loginRequest = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "login", + Storage: storage, + Data: loginData, + } + resp, err = b.HandleRequest(ctx, loginRequest) + if err != nil || resp == nil || !resp.IsError() { + t.Errorf("bad: expected failed login due to invalid header: resp:%#v\nerr:%v", resp, err) + } + + // Now, valid request against invalid role + stsRequestValid, _ := stsService.GetCallerIdentityRequest(stsInputParams) + stsRequestValid.HTTPRequest.Header.Add(iamServerIdHeader, testVaultHeaderValue) + stsRequestValid.Sign() + loginData, err = buildCallerIdentityLoginData(stsRequestValid.HTTPRequest, testInvalidRoleName) + if err != nil { + t.Fatal(err) + } + loginRequest = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "login", + Storage: storage, + Data: loginData, + } + resp, err = b.HandleRequest(ctx, loginRequest) + if err != nil || resp == nil || !resp.IsError() { + t.Errorf("bad: expected failed login due to invalid role: resp:%#v\nerr:%v", resp, err) + } + + loginData["role"] = "ec2only" + resp, err = b.HandleRequest(ctx, loginRequest) + if err != nil || resp == nil || !resp.IsError() { + t.Errorf("bad: expected failed login due to bad auth type: resp:%#v\nerr:%v", resp, err) + } + + // finally, the happy path test :) + + loginData["role"] = testValidRoleName + resp, err = b.HandleRequest(ctx, loginRequest) + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.Auth == nil || resp.IsError() { + t.Fatalf("bad: expected valid login: resp:%#v", resp) + } + if resp.Auth.Alias == nil { + t.Fatalf("bad: nil auth Alias") + } + if resp.Auth.Alias.Name != *testIdentity.Arn { + t.Fatalf("bad: expected identity alias of %q, got %q instead", *testIdentity.Arn, resp.Auth.Alias.Name) + } + + renewReq := generateRenewRequest(storage, resp.Auth) + // dump a fake ARN into the metadata to ensure that we ONLY look + // at the unique ID that has been generated + renewReq.Auth.Metadata["canonical_arn"] = "fake_arn" + emptyLoginFd := &framework.FieldData{ + Raw: map[string]interface{}{}, + Schema: b.pathLogin().Fields, + } + // ensure we can renew + resp, err = b.pathLoginRenew(ctx, renewReq, emptyLoginFd) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("got nil response from renew") + } + if resp.IsError() { + t.Fatalf("got error when renewing: %#v", *resp) + } + + // Now, fake out the unique ID resolver to ensure we fail login if the unique ID + // changes from under us + b.resolveArnToUniqueIDFunc = resolveArnToFakeUniqueId + // First, we need to update the role to force Vault to use our fake resolver to + // pick up the fake user ID + roleData["bound_iam_principal_arn"] = entity.canonicalArn() + roleRequest.Path = "role/" + testValidRoleName + resp, err = b.HandleRequest(ctx, roleRequest) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: failed to recreate role: resp:%#v\nerr:%v", resp, err) + } + resp, err = b.HandleRequest(ctx, loginRequest) + if err != nil || resp == nil || !resp.IsError() { + t.Errorf("bad: expected failed login due to changed AWS role ID: resp: %#v\nerr:%v", resp, err) + } + + // and ensure a renew no longer works + resp, err = b.pathLoginRenew(ctx, renewReq, emptyLoginFd) + if err == nil || (resp != nil && !resp.IsError()) { + t.Errorf("bad: expected failed renew due to changed AWS role ID: resp: %#v", resp) + } + // Undo the fake resolver... + b.resolveArnToUniqueIDFunc = b.resolveArnToRealUniqueId + + // Now test that wildcard matching works + wildcardRoleName := "valid_wildcard" + wildcardEntity := *entity + wildcardEntity.FriendlyName = "*" + roleData["bound_iam_principal_arn"] = []string{wildcardEntity.canonicalArn(), "arn:aws:iam::123456789012:role/DoesNotExist/Vault_Fake_Role*"} + roleRequest.Path = "role/" + wildcardRoleName + resp, err = b.HandleRequest(ctx, roleRequest) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: failed to create wildcard roles: resp:%#v\nerr:%v", resp, err) + } + + loginData["role"] = wildcardRoleName + resp, err = b.HandleRequest(ctx, loginRequest) + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.Auth == nil || resp.IsError() { + t.Fatalf("bad: expected valid login: resp:%#v", resp) + } + // and ensure we can renew + renewReq = generateRenewRequest(storage, resp.Auth) + resp, err = b.pathLoginRenew(ctx, renewReq, emptyLoginFd) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("got nil response from renew") + } + if resp.IsError() { + t.Fatalf("got error when renewing: %#v", *resp) + } + // ensure the cache is populated + + clientUserIDRaw, ok := resp.Auth.InternalData["client_user_id"] + if !ok { + t.Errorf("client_user_id not found in response") + } + clientUserID, ok := clientUserIDRaw.(string) + if !ok { + t.Errorf("client_user_id is not a string: %#v", clientUserIDRaw) + } + + cachedArn := b.getCachedUserId(clientUserID) + if cachedArn == "" { + t.Errorf("got empty ARN back from user ID cache; expected full arn") + } + + // Test for renewal with period + period := 600 * time.Second + roleData["period"] = period.String() + roleRequest.Path = "role/" + testValidRoleName + resp, err = b.HandleRequest(ctx, roleRequest) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: failed to create wildcard role: resp:%#v\nerr:%v", resp, err) + } + + loginData["role"] = testValidRoleName + resp, err = b.HandleRequest(ctx, loginRequest) + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.Auth == nil || resp.IsError() { + t.Fatalf("bad: expected valid login: resp:%#v", resp) + } + + renewReq = generateRenewRequest(storage, resp.Auth) + resp, err = b.pathLoginRenew(context.Background(), renewReq, emptyLoginFd) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("got nil response from renew") + } + if resp.IsError() { + t.Fatalf("got error when renewing: %#v", *resp) + } + + if resp.Auth.Period != period { + t.Fatalf("expected a period value of %s in the response, got: %s", period, resp.Auth.Period) + } +} + +func generateRenewRequest(s logical.Storage, auth *logical.Auth) *logical.Request { + renewReq := &logical.Request{ + Storage: s, + Auth: &logical.Auth{}, + } + renewReq.Auth.InternalData = auth.InternalData + renewReq.Auth.Metadata = auth.Metadata + renewReq.Auth.LeaseOptions = auth.LeaseOptions + renewReq.Auth.Policies = auth.Policies + renewReq.Auth.Period = auth.Period + + return renewReq +} + +func TestGeneratePartitionToRegionMap(t *testing.T) { + m := generatePartitionToRegionMap() + if m["aws"].ID() != "us-east-1" { + t.Fatal("expected us-east-1 but received " + m["aws"].ID()) + } + if m["aws-us-gov"].ID() != "us-gov-west-1" { + t.Fatal("expected us-gov-west-1 but received " + m["aws-us-gov"].ID()) + } +} diff --git a/builtin/credential/aws/certificates.go b/builtin/credential/aws/certificates.go new file mode 100644 index 0000000..4b97a95 --- /dev/null +++ b/builtin/credential/aws/certificates.go @@ -0,0 +1,886 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package awsauth + +import ( + "crypto/x509" + "strings" +) + +var defaultCertificates []*x509.Certificate = nil + +func init() { + allCerts := pkcs7RawCerts + signatureRawCerts + rsa2048RawCerts + splitCerts := strings.Split(allCerts, "-----BEGIN CERTIFICATE-----") + // parse all hard-coded certs + for _, cert := range splitCerts { + if len(strings.TrimSpace(cert)) == 0 { + continue + } + cert = "-----BEGIN CERTIFICATE-----\n" + cert + decodedCert, err := decodePEMAndParseCertificate(cert) + if err != nil { + panic(err) + } + defaultCertificates = append(defaultCertificates, decodedCert) + } +} + +// These certificates are for verifying PKCS#7 DSA signatures. +// Copied from: +// +// curl https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/verify-pkcs7.html | pcregrep -M -o -e '(?s)-----BEGIN CERTIFICATE-----[^>]*-----END CERTIFICATE-----' +// +// Last updated: 2022-05-31 +const pkcs7RawCerts = `-----BEGIN CERTIFICATE----- +MIIC7TCCAq0CCQCWukjZ5V4aZzAJBgcqhkjOOAQDMFwxCzAJBgNVBAYTAlVTMRkw +FwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYD +VQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAeFw0xMjAxMDUxMjU2MTJaFw0z +ODAxMDUxMjU2MTJaMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9u +IFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNl +cnZpY2VzIExMQzCCAbcwggEsBgcqhkjOOAQBMIIBHwKBgQCjkvcS2bb1VQ4yt/5e +ih5OO6kK/n1Lzllr7D8ZwtQP8fOEpp5E2ng+D6Ud1Z1gYipr58Kj3nssSNpI6bX3 +VyIQzK7wLclnd/YozqNNmgIyZecN7EglK9ITHJLP+x8FtUpt3QbyYXJdmVMegN6P +hviYt5JH/nYl4hh3Pa1HJdskgQIVALVJ3ER11+Ko4tP6nwvHwh6+ERYRAoGBAI1j +k+tkqMVHuAFcvAGKocTgsjJem6/5qomzJuKDmbJNu9Qxw3rAotXau8Qe+MBcJl/U +hhy1KHVpCGl9fueQ2s6IL0CaO/buycU1CiYQk40KNHCcHfNiZbdlx1E9rpUp7bnF +lRa2v1ntMX3caRVDdbtPEWmdxSCYsYFDk4mZrOLBA4GEAAKBgEbmeve5f8LIE/Gf +MNmP9CM5eovQOGx5ho8WqD+aTebs+k2tn92BBPqeZqpWRa5P/+jrdKml1qx4llHW +MXrs3IgIb6+hUIB+S8dz8/mmO0bpr76RoZVCXYab2CZedFut7qc3WUH9+EUAH5mw +vSeDCOUMYQR7R9LINYwouHIziqQYMAkGByqGSM44BAMDLwAwLAIUWXBlk40xTwSw +7HX32MxXYruse9ACFBNGmdX2ZBrVNGrN9N2f6ROk0k9K +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIC7zCCAq4CCQCO7MJe5Y3VLjAJBgcqhkjOOAQDMFwxCzAJBgNVBAYTAlVTMRkw +FwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYD +VQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAeFw0xOTAyMDMwMjIxMjFaFw00 +NTAyMDMwMjIxMjFaMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9u +IFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNl +cnZpY2VzIExMQzCCAbgwggEsBgcqhkjOOAQBMIIBHwKBgQDvQ9RzVvf4MAwGbqfX +blCvCoVb9957OkLGn/04CowHXJ+vTBR7eyIa6AoXltsQXBOmrJswToFKKxT4gbuw +jK7s9QQX4CmTRWcEgO2RXtZSVjOhsUQMh+yf7Ht4OVL97LWnNfGsX2cwjcRWHYgI +7lvnuBNBzLQHdSEwMNq0Bk76PwIVAMan6XIEEPnwr4e6u/RNnWBGKd9FAoGBAOCG +eSNmxpW4QFu4pIlAykm6EnTZKKHT87gdXkAkfoC5fAfOxxhnE2HezZHp9Ap2tMV5 +8bWNvoPHvoKCQqwfm+OUBlAxC/3vqoVkKL2mG1KgUH9+hrtpMTkwO3RREnKe7I5O +x9qDimJpOihrL4I0dYvy9xUOoz+DzFAW8+ylWVYpA4GFAAKBgQDbnBAKSxWr9QHY +6Dt+EFdGz6lAZLedeBKpaP53Z1DTO34J0C55YbJTwBTFGqPtOLxnUVDlGiD6GbmC +80f3jvogPR1mSmGsydbNbZnbUEVWrRhe+y5zJ3g9qs/DWmDW0deEFvkhWVnLJkFJ +9pdOu/ibRPH1lE2nz6pK7GbOQtLyHTAJBgcqhkjOOAQDAzAAMC0CFQCoJlwGtJQC +cLoM4p/jtVFOj26xbgIUUS4pDKyHaG/eaygLTtFpFJqzWHc= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIC7jCCAq4CCQCVWIgSmP8RhTAJBgcqhkjOOAQDMFwxCzAJBgNVBAYTAlVTMRkw +FwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYD +VQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAeFw0xOTAyMDUxMzA2MjFaFw00 +NTAyMDUxMzA2MjFaMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9u +IFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNl +cnZpY2VzIExMQzCCAbgwggEsBgcqhkjOOAQBMIIBHwKBgQDcwojQfgWdV1QliO0B +8n6cLZ38VE7ZmrjZ9OQV//Gst6S1h7euhC23YppKXi1zovefSDwFU54zi3/oJ++q +PHlP1WGL8IZ34BUgRTtG4TVolvp0smjkMvyRu5hIdKtzjV93Ccx15gVgyk+o1IEG +fZ2Kbw/Dd8JfoPS7KaSCmJKxXQIVAIZbIaDFRGa2qcMkW2HWASyNDl7bAoGBANtz +IdhfMq+l2I5iofY2oj3HI21Kj3LtZrWEg3W+/4rVhL3lTm0Nne1rl9yGujrjQwy5 +Zp9V4A/w9w2O10Lx4K6hj34Eefy/aQnZwNdNhv/FQP7Az0fju+Yl6L13OOHQrL0z +Q+9cF7zEosekEnBQx3v6psNknKgD3Shgx+GO/LpCA4GFAAKBgQCVS7m77nuNAlZ8 +wvUqcooxXMPkxJFl54NxAsAul9KP9KN4svm0O3Zrb7t2FOtXRM8zU3TqMpryq1o5 +mpMPsZDg6RXo9BF7Hn0DoZ6PJTamkFA6md+NyTJWJKvXC7iJ8fGDBJqTciUHuCKr +12AztQ8bFWsrTgTzPE3p6U5ckcgV1TAJBgcqhkjOOAQDAy8AMCwCFB2NZGWm5EDl +86ayV3c1PEDukgQIAhQow38rQkN/VwHVeSW9DqEshXHjuQ== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIC7DCCAqwCCQCncbCtQbjuyzAJBgcqhkjOOAQDMFwxCzAJBgNVBAYTAlVTMRkw +FwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYD +VQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAeFw0xOTA2MDQxMjQ4MDVaFw00 +NTA2MDQxMjQ4MDVaMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9u +IFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNl +cnZpY2VzIExMQzCCAbYwggErBgcqhkjOOAQBMIIBHgKBgQC12Nr1gMrHcFSZ7S/A +pQBSCMHWmn2qeoQTMVWqe50fnTd0zGFxDdIjKxUK58/8zjWG5uR4TXRzmZpGpmXB +bSufAR6BGqud2LnT/HIWGJAsnX2uOtSyNfCoJigqwhea5w+CqZ6I7iBDdnB4TtTw +qO6TlnExHFVj8LMkylZgiaE1CQIVAIhdobse4K0QnbAhCL6R2euQzloXAoGAV/21 +WUuMz/79Ga0JvQcz1FNy1sT0pU9rU4TenqLQIt5iccn/7EIfNtvVO5TZKulIKq7J +gXZr0x/KIT8zsNweetLOaGehPIYRMPX0vunMMR7hN7qA7W17WZv/76adywIsnDKq +ekfe15jinaX8MsKUdyDK7Y+ifCG4PVhoM4+W2XwDgYQAAoGAIxOKbVgwLxbn6Pi2 +6hBOihFv16jKxAQI0hHzXJLV0Vyv9QwnqjJJRfOCy3dB0zicLXiIxeIdYfvqJr+u +hlN8rGxEZYYJjEUKMGvsc0DW85jonXz0bNfcP0aaKH0lKKVjL+OZi5n2kn9wgdo5 +F3CVnMl8BUra8A1Tr2yrrE6TVZ4wCQYHKoZIzjgEAwMvADAsAhQfa7MCJZ+/TEY5 +AUr0J4wm8VzjoAIUSYZVu2NdRJ/ERPmDfhW5EsjHlCA= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIC7TCCAqwCCQCMElHPdwG37jAJBgcqhkjOOAQDMFwxCzAJBgNVBAYTAlVTMRkw +FwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYD +VQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAeFw0xOTA0MjkyMDM1MjJaFw00 +NTA0MjkyMDM1MjJaMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9u +IFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNl +cnZpY2VzIExMQzCCAbYwggErBgcqhkjOOAQBMIIBHgKBgQDAkoL4YfdMI/MrQ0oL +NPfeEk94eiCQA5xNOnU7+2eVQtEqjFbDADFENh1p3sh9Q9OoheLFH8qpSfNDWn/0 +ktCS909ApTY6Esx1ExjGSeQq/U+SC2JSuuTT4WFMKJ63a/czMtFkEPPnVIjJJJmT +HJSKSsVUgpdDIRvJXuyB0zdB+wIVALQ3OLaVGdlPMNfS1nD/Yyn+32wnAoGAPBQ3 +7XHg5NLOS4326eFRUT+4ornQFjJjP6dp3pOBEzpImNmZTtkCNNUKE4Go9hv5T4lh +R0pODvWv0CBupMAZVBP9ObplXPCyEIZtuDqVa7ukPOUpQNgQhLLAqkigTyXVOSmt +ECBj9tu5WNP/x3iTZTHJ+g0rhIqpgh012UwJpKADgYQAAoGAV1OEQPYQUg5/M3xf +6vE7jKTxxyFWEyjKfJK7PZCzOIGrE/swgACy4PYQW+AwcUweSlK/Hx2OaZVUKzWo +wDUbeu65DcRdw2rSwCbBTU342sitFo/iGCV/Gjf+BaiAJtxniZze7J1ob8vOBeLv +uaMQmgOYeZ5e0fl04GtqPl+lhcQwCQYHKoZIzjgEAwMwADAtAhQdoeWLrkm0K49+ +AeBK+j6m2h9SKQIVAIBNhS2a8cQVABDCQXVXrc0tOmO8 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIC8DCCArCgAwIBAgIGAXbVDEikMAkGByqGSM44BAMwXDELMAkGA1UEBhMCVVMx +GTAXBgNVBAgMEFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcMB1NlYXR0bGUxIDAe +BgNVBAoMF0FtYXpvbiBXZWIgU2VydmljZXMgTExDMB4XDTIxMDEwNjAwMTUyMFoX +DTQ3MDEwNjAwMTUyMFowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgMEFdhc2hpbmd0 +b24gU3RhdGUxEDAOBgNVBAcMB1NlYXR0bGUxIDAeBgNVBAoMF0FtYXpvbiBXZWIg +U2VydmljZXMgTExDMIIBuDCCASwGByqGSM44BAEwggEfAoGBAP1/U4EddRIpUt9K +nC7s5Of2EbdSPO9EAMMeP4C2USZpRV1AIlH7WT2NWPq/xfW6MPbLm1Vs14E7gB00 +b/JmYLdrmVClpJ+f6AR7ECLCT7up1/63xhv4O1fnxqimFQ8E+4P208UewwI1VBNa +FpEy9nXzrith1yrv8iIDGZ3RSAHHAhUAl2BQjxUjC8yykrmCouuEC/BYHPUCgYEA +9+GghdabPd7LvKtcNrhXuXmUr7v6OuqC+VdMCz0HgmdRWVeOutRZT+ZxBxCBgLRJ +FnEj6EwoFhO3zwkyjMim4TwWeotUfI0o4KOuHiuzpnWRbqN/C/ohNWLx+2J6ASQ7 +zKTxvqhRkImog9/hWuWfBpKLZl6Ae1UlZAFMO/7PSSoDgYUAAoGBAPjuiEx05N3J +Q6cVwntJie67D8OuNo4jGRn+crEtL7YO0jSVB9zGE1ga+UgRPIaYETL293S8rTJT +VgXAqdpBwfaHC6NUzre8U8iJ8FMNnlP9Gw1oUIlgQBjORyynVJexoB31TDZM+/52 +g9O/bpq1QqNyKbeIgyBBlc1dAtr1QLnsMAkGByqGSM44BAMDLwAwLAIUK8E6RDIR +twK+9qnaTOBhvO/njuQCFFocyT1OxK+UDR888oNsdgtif2Sf +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDNjCCAh4CCQD3yZ1w1AVkTzANBgkqhkiG9w0BAQsFADBcMQswCQYDVQQGEwJV +UzEZMBcGA1UECBMQV2FzaGluZ3RvbiBTdGF0ZTEQMA4GA1UEBxMHU2VhdHRsZTEg +MB4GA1UEChMXQW1hem9uIFdlYiBTZXJ2aWNlcyBMTEMwIBcNMTUwNTEzMDk1OTE1 +WhgPMjE5NDEwMTYwOTU5MTVaMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNo +aW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24g +V2ViIFNlcnZpY2VzIExMQzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +AMWk9vyppSmDU3AxZ2Cy2bvKeK3F1UqNpMuyeriizi+NTsZ8tQqtNloaQcqhto/l +gsw9+QSnEJeYWnmivJWOBdn9CyDpN7cpHVmeGgNJL2fvImWyWe2f2Kq/BL9l7N7C +P2ZT52/sH9orlck1n2zO8xPi7MItgPHQwu3OxsGQsAdWucdxjHGtdchulpo1uJ31 +jsTAPKZ3p1/sxPXBBAgBMatPHhRBqhwHO/Twm4J3GmTLWN7oVDds4W3bPKQfnw3r +vtBj/SM4/IgQ3xJslFcl90TZbQbgxIi88R/gWTbs7GsyT2PzstU30yLdJhKfdZKz +/aIzraHvoDTWFaOdy0+OOaECAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAdSzN2+0E +V1BfR3DPWJHWRf1b7zl+1X/ZseW2hYE5r6YxrLv+1VPf/L5I6kB7GEtqhZUqteY7 +zAceoLrVu/7OynRyfQetJVGichaaxLNM3lcr6kcxOowb+WQQ84cwrB3keykH4gRX +KHB2rlWSxta+2panSEO1JX2q5jhcFP90rDOtZjlpYv57N/Z9iQ+dvQPJnChdq3BK +5pZlnIDnVVxqRike7BFy8tKyPj7HzoPEF5mh9Kfnn1YoSVu+61lMVv/qRjnyKfS9 +c96nE98sYFj0ZVBzXw8Sq4Gh8FiVmFHbQp1peGC19idOUqxPxWsasWxQXO0azYsP +9RyWLHKxH1dMuA== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIC7TCCAq0CCQCWukjZ5V4aZzAJBgcqhkjOOAQDMFwxCzAJBgNVBAYTAlVTMRkw +FwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYD +VQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAeFw0xMjAxMDUxMjU2MTJaFw0z +ODAxMDUxMjU2MTJaMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9u +IFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNl +cnZpY2VzIExMQzCCAbcwggEsBgcqhkjOOAQBMIIBHwKBgQCjkvcS2bb1VQ4yt/5e +ih5OO6kK/n1Lzllr7D8ZwtQP8fOEpp5E2ng+D6Ud1Z1gYipr58Kj3nssSNpI6bX3 +VyIQzK7wLclnd/YozqNNmgIyZecN7EglK9ITHJLP+x8FtUpt3QbyYXJdmVMegN6P +hviYt5JH/nYl4hh3Pa1HJdskgQIVALVJ3ER11+Ko4tP6nwvHwh6+ERYRAoGBAI1j +k+tkqMVHuAFcvAGKocTgsjJem6/5qomzJuKDmbJNu9Qxw3rAotXau8Qe+MBcJl/U +hhy1KHVpCGl9fueQ2s6IL0CaO/buycU1CiYQk40KNHCcHfNiZbdlx1E9rpUp7bnF +lRa2v1ntMX3caRVDdbtPEWmdxSCYsYFDk4mZrOLBA4GEAAKBgEbmeve5f8LIE/Gf +MNmP9CM5eovQOGx5ho8WqD+aTebs+k2tn92BBPqeZqpWRa5P/+jrdKml1qx4llHW +MXrs3IgIb6+hUIB+S8dz8/mmO0bpr76RoZVCXYab2CZedFut7qc3WUH9+EUAH5mw +vSeDCOUMYQR7R9LINYwouHIziqQYMAkGByqGSM44BAMDLwAwLAIUWXBlk40xTwSw +7HX32MxXYruse9ACFBNGmdX2ZBrVNGrN9N2f6ROk0k9K +-----END CERTIFICATE----- +` + +// These certificates are for verifying PKCS#7 DSA signatures. +// Copied from: +// curl https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/verify-signature.html | pcregrep -M -o -e '(?s)-----BEGIN CERTIFICATE-----[^>]*-----END CERTIFICATE-----' +// Last updated: 2022-05-31 +const signatureRawCerts = `-----BEGIN CERTIFICATE----- +MIIDIjCCAougAwIBAgIJAKnL4UEDMN/FMA0GCSqGSIb3DQEBBQUAMGoxCzAJBgNV +BAYTAlVTMRMwEQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdTZWF0dGxlMRgw +FgYDVQQKEw9BbWF6b24uY29tIEluYy4xGjAYBgNVBAMTEWVjMi5hbWF6b25hd3Mu +Y29tMB4XDTE0MDYwNTE0MjgwMloXDTI0MDYwNTE0MjgwMlowajELMAkGA1UEBhMC +VVMxEzARBgNVBAgTCldhc2hpbmd0b24xEDAOBgNVBAcTB1NlYXR0bGUxGDAWBgNV +BAoTD0FtYXpvbi5jb20gSW5jLjEaMBgGA1UEAxMRZWMyLmFtYXpvbmF3cy5jb20w +gZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAIe9GN//SRK2knbjySG0ho3yqQM3 +e2TDhWO8D2e8+XZqck754gFSo99AbT2RmXClambI7xsYHZFapbELC4H91ycihvrD +jbST1ZjkLQgga0NE1q43eS68ZeTDccScXQSNivSlzJZS8HJZjgqzBlXjZftjtdJL +XeE4hwvo0sD4f3j9AgMBAAGjgc8wgcwwHQYDVR0OBBYEFCXWzAgVyrbwnFncFFIs +77VBdlE4MIGcBgNVHSMEgZQwgZGAFCXWzAgVyrbwnFncFFIs77VBdlE4oW6kbDBq +MQswCQYDVQQGEwJVUzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHU2Vh +dHRsZTEYMBYGA1UEChMPQW1hem9uLmNvbSBJbmMuMRowGAYDVQQDExFlYzIuYW1h +em9uYXdzLmNvbYIJAKnL4UEDMN/FMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEF +BQADgYEAFYcz1OgEhQBXIwIdsgCOS8vEtiJYF+j9uO6jz7VOmJqO+pRlAbRlvY8T +C1haGgSI/A1uZUKs/Zfnph0oEI0/hu1IIJ/SKBDtN5lvmZ/IzbOPIJWirlsllQIQ +7zvWbGd9c9+Rm3p04oTvhup99la7kZqevJK0QRdD/6NpCKsqP/0= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICSzCCAbQCCQDtQvkVxRvK9TANBgkqhkiG9w0BAQsFADBqMQswCQYDVQQGEwJV +UzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHU2VhdHRsZTEYMBYGA1UE +ChMPQW1hem9uLmNvbSBJbmMuMRowGAYDVQQDExFlYzIuYW1hem9uYXdzLmNvbTAe +Fw0xOTAyMDMwMzAwMDZaFw0yOTAyMDIwMzAwMDZaMGoxCzAJBgNVBAYTAlVTMRMw +EQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdTZWF0dGxlMRgwFgYDVQQKEw9B +bWF6b24uY29tIEluYy4xGjAYBgNVBAMTEWVjMi5hbWF6b25hd3MuY29tMIGfMA0G +CSqGSIb3DQEBAQUAA4GNADCBiQKBgQC1kkHXYTfc7gY5Q55JJhjTieHAgacaQkiR +Pity9QPDE3b+NXDh4UdP1xdIw73JcIIG3sG9RhWiXVCHh6KkuCTqJfPUknIKk8vs +M3RXflUpBe8Pf+P92pxqPMCz1Fr2NehS3JhhpkCZVGxxwLC5gaG0Lr4rFORubjYY +Rh84dK98VwIDAQABMA0GCSqGSIb3DQEBCwUAA4GBAA6xV9f0HMqXjPHuGILDyaNN +dKcvplNFwDTydVg32MNubAGnecoEBtUPtxBsLoVYXCOb+b5/ZMDubPF9tU/vSXuo +TpYM5Bq57gJzDRaBOntQbX9bgHiUxw6XZWaTS/6xjRJDT5p3S1E0mPI3lP/eJv4o +Ezk5zb3eIf10/sqt4756 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDPDCCAqWgAwIBAgIJAMl6uIV/zqJFMA0GCSqGSIb3DQEBCwUAMHIxCzAJBgNV +BAYTAlVTMRMwEQYDVQQIDApXYXNoaW5ndG9uMRAwDgYDVQQHDAdTZWF0dGxlMSAw +HgYDVQQKDBdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzEaMBgGA1UEAwwRZWMyLmFt +YXpvbmF3cy5jb20wIBcNMTkwNDI2MTQzMjQ3WhgPMjE5ODA5MjkxNDMyNDdaMHIx +CzAJBgNVBAYTAlVTMRMwEQYDVQQIDApXYXNoaW5ndG9uMRAwDgYDVQQHDAdTZWF0 +dGxlMSAwHgYDVQQKDBdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzEaMBgGA1UEAwwR +ZWMyLmFtYXpvbmF3cy5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBALVN +CDTZEnIeoX1SEYqq6k1BV0ZlpY5y3KnoOreCAE589TwS4MX5+8Fzd6AmACmugeBP +Qk7Hm6b2+g/d4tWycyxLaQlcq81DB1GmXehRkZRgGeRge1ePWd1TUA0I8P/QBT7S +gUePm/kANSFU+P7s7u1NNl+vynyi0wUUrw7/wIZTAgMBAAGjgdcwgdQwHQYDVR0O +BBYEFILtMd+T4YgH1cgc+hVsVOV+480FMIGkBgNVHSMEgZwwgZmAFILtMd+T4YgH +1cgc+hVsVOV+480FoXakdDByMQswCQYDVQQGEwJVUzETMBEGA1UECAwKV2FzaGlu +Z3RvbjEQMA4GA1UEBwwHU2VhdHRsZTEgMB4GA1UECgwXQW1hem9uIFdlYiBTZXJ2 +aWNlcyBMTEMxGjAYBgNVBAMMEWVjMi5hbWF6b25hd3MuY29tggkAyXq4hX/OokUw +DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOBgQBhkNTBIFgWFd+ZhC/LhRUY +4OjEiykmbEp6hlzQ79T0Tfbn5A4NYDI2icBP0+hmf6qSnIhwJF6typyd1yPK5Fqt +NTpxxcXmUKquX+pHmIkK1LKDO8rNE84jqxrxRsfDi6by82fjVYf2pgjJW8R1FAw+ +mL5WQRFexbfB5aXhcMo0AA== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICNjCCAZ+gAwIBAgIJAKumfZiRrNvHMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV +BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0 +dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xOTExMjcw +NzE0MDVaGA8yMTk5MDUwMjA3MTQwNVowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT +EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft +YXpvbiBXZWIgU2VydmljZXMgTExDMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKB +gQDFd571nUzVtke3rPyRkYfvs3jh0C0EMzzG72boyUNjnfw1+m0TeFraTLKb9T6F +7TuB/ZEN+vmlYqr2+5Va8U8qLbPF0bRH+FdaKjhgWZdYXxGzQzU3ioy5W5ZM1VyB +7iUsxEAlxsybC3ziPYaHI42UiTkQNahmoroNeqVyHNnBpQIDAQABMA0GCSqGSIb3 +DQEBCwUAA4GBAAJLylWyElEgOpW4B1XPyRVD4pAds8Guw2+krgqkY0HxLCdjosuH +RytGDGN+q75aAoXzW5a7SGpxLxk6Hfv0xp3RjDHsoeP0i1d8MD3hAC5ezxS4oukK +s5gbPOnokhKTMPXbTdRn5ZifCbWlx+bYN/mTYKvxho7b5SVg2o1La9aK +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICNjCCAZ+gAwIBAgIJAOZ3GEIaDcugMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV +BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0 +dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xOTEwMjQx +NTE5MDlaGA8yMTk5MDMyOTE1MTkwOVowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT +EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft +YXpvbiBXZWIgU2VydmljZXMgTExDMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKB +gQCjiPgW3vsXRj4JoA16WQDyoPc/eh3QBARaApJEc4nPIGoUolpAXcjFhWplo2O+ +ivgfCsc4AU9OpYdAPha3spLey/bhHPRi1JZHRNqScKP0hzsCNmKhfnZTIEQCFvsp +DRp4zr91/WS06/flJFBYJ6JHhp0KwM81XQG59lV6kkoW7QIDAQABMA0GCSqGSIb3 +DQEBCwUAA4GBAGLLrY3P+HH6C57dYgtJkuGZGT2+rMkk2n81/abzTJvsqRqGRrWv +XRKRXlKdM/dfiuYGokDGxiC0Mg6TYy6wvsR2qRhtXW1OtZkiHWcQCnOttz+8vpew +wx8JGMvowtuKB1iMsbwyRqZkFYLcvH+Opfb/Aayi20/ChQLdI6M2R5VU +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICMzCCAZygAwIBAgIGAXbVDG2yMA0GCSqGSIb3DQEBBQUAMFwxCzAJBgNVBAYT +AlVTMRkwFwYDVQQIDBBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHDAdTZWF0dGxl +MSAwHgYDVQQKDBdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0yMTAxMDYwMDE1 +MzBaGA8yMjAwMDEwNjAwMTUzMFowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgMEFdh +c2hpbmd0b24gU3RhdGUxEDAOBgNVBAcMB1NlYXR0bGUxIDAeBgNVBAoMF0FtYXpv +biBXZWIgU2VydmljZXMgTExDMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCn +CS/Vbt0gQ1ebWcur2hSO7PnJifE4OPxQ7RgSAlc4/spJp1sDP+ZrS0LO1ZJfKhXf +1R9S3AUwLnsc7b+IuVXdY5LK9RKqu64nyXP5dx170zoL8loEyCSuRR2fs+04i2Qs +WBVP+KFNAn7P5L1EHRjkgTO8kjNKviwRV+OkP9ab5wIDAQABMA0GCSqGSIb3DQEB +BQUAA4GBAI4WUy6+DKh0JDSzQEZNyBgNlSoSuC2owtMxCwGB6nBfzzfcekWvs6eo +fLTSGovrReX7MtVgrcJBZjmPIentw5dWUs+87w/g9lNwUnUt0ZHYyh2tuBG6hVJu +UEwDJ/z3wDd6wQviLOTF3MITawt9P8siR1hXqLJNxpjRQFZrgHqi +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICSzCCAbQCCQCQu97teKRD4zANBgkqhkiG9w0BAQUFADBqMQswCQYDVQQGEwJV +UzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHU2VhdHRsZTEYMBYGA1UE +ChMPQW1hem9uLmNvbSBJbmMuMRowGAYDVQQDExFlYzIuYW1hem9uYXdzLmNvbTAe +Fw0xMzA4MjExMzIyNDNaFw0yMzA4MjExMzIyNDNaMGoxCzAJBgNVBAYTAlVTMRMw +EQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdTZWF0dGxlMRgwFgYDVQQKEw9B +bWF6b24uY29tIEluYy4xGjAYBgNVBAMTEWVjMi5hbWF6b25hd3MuY29tMIGfMA0G +CSqGSIb3DQEBAQUAA4GNADCBiQKBgQC6GFQ2WoBl1xZYH85INUMaTc4D30QXM6f+ +YmWZyJD9fC7Z0UlaZIKoQATqCO58KNCre+jECELYIX56Uq0lb8LRLP8tijrQ9Sp3 +qJcXiH66kH0eQ44a5YdewcFOy+CSAYDUIaB6XhTQJ2r7bd4A2vw3ybbxTOWONKdO +WtgIe3M3iwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAHzQC5XZVeuD9GTJTsbO5AyH +ZQvki/jfARNrD9dgBRYZzLC/NOkWG6M9wlrmks9RtdNxc53nLxKq4I2Dd73gI0yQ +wYu9YYwmM/LMgmPlI33Rg2Ohwq4DVgT3hO170PL6Fsgiq3dMvctSImJvjWktBQaT +bcAgaZLHGIpXPrWSA2d+ +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDCzCCAnSgAwIBAgIJAIe9Hnq82O7UMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV +BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0 +dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAeFw0yMTA3MTQx +NDI3NTdaFw0yNDA3MTMxNDI3NTdaMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBX +YXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6 +b24gV2ViIFNlcnZpY2VzIExMQzCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA +qaIcGFFTx/SO1W5G91jHvyQdGP25n1Y91aXCuOOWAUTvSvNGpXrI4AXNrQF+CmIO +C4beBASnHCx082jYudWBBl9Wiza0psYc9flrczSzVLMmN8w/c78F/95NfiQdnUQP +pvgqcMeJo82cgHkLR7XoFWgMrZJqrcUK0gnsQcb6kakCAwEAAaOB1DCB0TALBgNV +HQ8EBAMCB4AwHQYDVR0OBBYEFNWV53gWJz72F5B1ZVY4O/dfFYBPMIGOBgNVHSME +gYYwgYOAFNWV53gWJz72F5B1ZVY4O/dfFYBPoWCkXjBcMQswCQYDVQQGEwJVUzEZ +MBcGA1UECBMQV2FzaGluZ3RvbiBTdGF0ZTEQMA4GA1UEBxMHU2VhdHRsZTEgMB4G +A1UEChMXQW1hem9uIFdlYiBTZXJ2aWNlcyBMTEOCCQCHvR56vNju1DASBgNVHRMB +Af8ECDAGAQH/AgEAMA0GCSqGSIb3DQEBCwUAA4GBACrKjWj460GUPZCGm3/z0dIz +M2BPuH769wcOsqfFZcMKEysSFK91tVtUb1soFwH4/Lb/T0PqNrvtEwD1Nva5k0h2 +xZhNNRmDuhOhW1K9wCcnHGRBwY5t4lYL6hNV6hcrqYwGMjTjcAjBG2yMgznSNFle +Rwi/S3BFXISixNx9cILu +-----END CERTIFICATE-----` + +// These certificates are for verifying RSA 2048 signatures. +// Copied from: +// curl https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/verify-rsa2048.html | pcregrep -M -o -e '(?s)-----BEGIN CERTIFICATE-----[^>]*-----END CERTIFICATE-----' +// Last updated: 2022-05-31 +const rsa2048RawCerts = `-----BEGIN CERTIFICATE----- +MIIEEjCCAvqgAwIBAgIJALFpzEAVWaQZMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV +BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0 +dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xNTA4MTQw +ODU5MTJaGA8yMTk1MDExNzA4NTkxMlowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT +EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft +YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAjS2vqZu9mEOhOq+0bRpAbCUiapbZMFNQqRg7kTlr7Cf+gDqXKpHPjsng +SfNz+JHQd8WPI+pmNs+q0Z2aTe23klmf2U52KH9/j1k8RlIbap/yFibFTSedmegX +E5r447GbJRsHUmuIIfZTZ/oRlpuIIO5/Vz7SOj22tdkdY2ADp7caZkNxhSP915fk +2jJMTBUOzyXUS2rBU/ulNHbTTeePjcEkvzVYPahD30TeQ+/A+uWUu89bHSQOJR8h +Um4cFApzZgN3aD5j2LrSMu2pctkQwf9CaWyVznqrsGYjYOY66LuFzSCXwqSnFBfv +fFBAFsjCgY24G2DoMyYkF3MyZlu+rwIDAQABo4HUMIHRMAsGA1UdDwQEAwIHgDAd +BgNVHQ4EFgQUrynSPp4uqSECwy+PiO4qyJ8TWSkwgY4GA1UdIwSBhjCBg4AUrynS +Pp4uqSECwy+PiO4qyJ8TWSmhYKReMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBX +YXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6 +b24gV2ViIFNlcnZpY2VzIExMQ4IJALFpzEAVWaQZMBIGA1UdEwEB/wQIMAYBAf8C +AQAwDQYJKoZIhvcNAQELBQADggEBADW/s8lXijwdP6NkEoH1m9XLrvK4YTqkNfR6 +er/uRRgTx2QjFcMNrx+g87gAml11z+D0crAZ5LbEhDMs+JtZYR3ty0HkDk6SJM85 +haoJNAFF7EQ/zCp1EJRIkLLsC7bcDL/Eriv1swt78/BB4RnC9W9kSp/sxd5svJMg +N9a6FAplpNRsWAnbP8JBlAP93oJzblX2LQXgykTghMkQO7NaY5hg/H5o4dMPclTK +lYGqlFUCH6A2vdrxmpKDLmTn5//5pujdD2MN0df6sZWtxwZ0osljV4rDjm9Q3VpA +NWIsDEcp3GUB4proOR+C7PNkY+VGODitBOw09qBGosCBstwyEqY= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEEjCCAvqgAwIBAgIJAM07oeX4xevdMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV +BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0 +dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xNjA2MTAx +MjU4MThaGA8yMTk1MTExNDEyNTgxOFowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT +EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft +YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA6v6kGMnRmFDLxBEqXzP4npnL65OO0kmQ7w8YXQygSdmNIoScGSU5wfh9 +mZdcvCxCdxgALFsFqPvH8fqiE9ttI0fEfuZvHOs8wUsIdKr0Zz0MjSx3cik4tKET +ch0EKfMnzKOgDBavraCDeX1rUDU0Rg7HFqNAOry3uqDmnqtk00XC9GenS3z/7ebJ +fIBEPAam5oYMVFpX6M6St77WdNE8wEU8SuerQughiMVx9kMB07imeVHBiELbMQ0N +lwSWRL/61fA02keGSTfSp/0m3u+lesf2VwVFhqIJs+JbsEscPxOkIRlzy8mGd/JV +ONb/DQpTedzUKLgXbw7KtO3HTG9iXQIDAQABo4HUMIHRMAsGA1UdDwQEAwIHgDAd +BgNVHQ4EFgQU2CTGYE5fTjx7gQXzdZSGPEWAJY4wgY4GA1UdIwSBhjCBg4AU2CTG +YE5fTjx7gQXzdZSGPEWAJY6hYKReMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBX +YXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6 +b24gV2ViIFNlcnZpY2VzIExMQ4IJAM07oeX4xevdMBIGA1UdEwEB/wQIMAYBAf8C +AQAwDQYJKoZIhvcNAQELBQADggEBANdqkIpVypr2PveqUsAKke1wKCOSuw1UmH9k +xX1/VRoHbrI/UznrXtPQOPMmHA2LKSTedwsJuorUn3cFH6qNs8ixBDrl8pZwfKOY +IBJcTFBbI1xBEFkZoO3wczzo5+8vPQ60RVqAaYb+iCa1HFJpccC3Ovajfa4GRdNb +n6FYnluIcDbmpcQePoVQwX7W3oOYLB1QLN7fE6H1j4TBIsFdO3OuKzmaifQlwLYt +DVxVCNDabpOr6Uozd5ASm4ihPPoEoKo7Ilp0fOT6fZ41U2xWA4+HF/89UoygZSo7 +K+cQ90xGxJ+gmlYbLFR5rbJOLfjrgDAb2ogbFy8LzHo2ZtSe60M= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEEjCCAvqgAwIBAgIJALZL3lrQCSTMMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV +BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0 +dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xNTA4MTQw +OTAxMzJaGA8yMTk1MDExNzA5MDEzMlowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT +EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft +YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA02Y59qtAA0a6uzo7nEQcnJ26OKF+LRPwZfixBH+EbEN/Fx0gYy1jpjCP +s5+VRNg6/WbfqAsV6X2VSjUKN59ZMnMY9ALA/Ipz0n00Huxj38EBZmX/NdNqKm7C +qWu1q5kmIvYjKGiadfboU8wLwLcHo8ywvfgI6FiGGsEO9VMC56E/hL6Cohko11LW +dizyvRcvg/IidazVkJQCN/4zC9PUOVyKdhW33jXy8BTg/QH927QuNk+ZzD7HH//y +tIYxDhR6TIZsSnRjz3bOcEHxt1nsidc65mY0ejQty4hy7ioSiapw316mdbtE+RTN +fcH9FPIFKQNBpiqfAW5Ebp3Lal3/+wIDAQABo4HUMIHRMAsGA1UdDwQEAwIHgDAd +BgNVHQ4EFgQU7coQx8Qnd75qA9XotSWT3IhvJmowgY4GA1UdIwSBhjCBg4AU7coQ +x8Qnd75qA9XotSWT3IhvJmqhYKReMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBX +YXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6 +b24gV2ViIFNlcnZpY2VzIExMQ4IJALZL3lrQCSTMMBIGA1UdEwEB/wQIMAYBAf8C +AQAwDQYJKoZIhvcNAQELBQADggEBAFZ1e2MnzRaXCaLwEC1pW/f0oRG8nHrlPZ9W +OYZEWbh+QanRgaikBNDtVTwARQcZm3z+HWSkaIx3cyb6vM0DSkZuiwzm1LJ9rDPc +aBm03SEt5v8mcc7sXWvgFjCnUpzosmky6JheCD4O1Cf8k0olZ93FQnTrbg62OK0h +83mGCDeVKU3hLH97FYoUq+3N/IliWFDhvibAYYKFJydZLhIdlCiiB99AM6Sg53rm +oukS3csyUxZyTU2hQfdjyo1nqW9yhvFAKjnnggiwxNKTTPZzstKW8+cnYwiiTwJN +QpVoZdt0SfbuNnmwRUMi+QbuccXweav29QeQ3ADqjgB0CZdSRKk= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEEjCCAvqgAwIBAgIJANNPkIpcyEtIMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV +BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0 +dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xNTEwMjkw +OTAzMDdaGA8yMTk1MDQwMzA5MDMwN1owXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT +EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft +YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEApHQGvHvq3SVCzDrC7575BW7GWLzcj8CLqYcL3YY7Jffupz7OjcftO57Z +4fo5Pj0CaS8DtPzh8+8vdwUSMbiJ6cDd3ooio3MnCq6DwzmsY+pY7CiI3UVG7KcH +4TriDqr1Iii7nB5MiPJ8wTeAqX89T3SYaf6Vo+4GCb3LCDGvnkZ9TrGcz2CHkJsj +AIGwgopFpwhIjVYm7obmuIxSIUv+oNH0wXgDL029Zd98SnIYQd/njiqkzE+lvXgk +4h4Tu17xZIKBgFcTtWPky+POGu81DYFqiWVEyR2JKKm2/iR1dL1YsT39kbNg47xY +aR129sS4nB5Vw3TRQA2jL0ToTIxzhQIDAQABo4HUMIHRMAsGA1UdDwQEAwIHgDAd +BgNVHQ4EFgQUgepyiONs8j+q67dmcWu+mKKDa+gwgY4GA1UdIwSBhjCBg4AUgepy +iONs8j+q67dmcWu+mKKDa+ihYKReMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBX +YXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6 +b24gV2ViIFNlcnZpY2VzIExMQ4IJANNPkIpcyEtIMBIGA1UdEwEB/wQIMAYBAf8C +AQAwDQYJKoZIhvcNAQELBQADggEBAGLFWyutf1u0xcAc+kmnMPqtc/Q6b79VIX0E +tNoKMI2KR8lcV8ZElXDb0NC6v8UeLpe1WBKjaWQtEjL1ifKg9hdY9RJj4RXIDSK7 +33qCQ8juF4vep2U5TTBd6hfWxt1Izi88xudjixmbpUU4YKr8UPbmixldYR+BEx0u +B1KJi9l1lxvuc/Igy/xeHOAZEjAXzVvHp8Bne33VVwMiMxWECZCiJxE4I7+Y6fqJ +pLLSFFJKbNaFyXlDiJ3kXyePEZSc1xiWeyRB2ZbTi5eu7vMG4i3AYWuFVLthaBgu +lPfHafJpj/JDcqt2vKUKfur5edQ6j1CGdxqqjawhOTEqcN8m7us= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDOzCCAiOgAwIBAgIJAJNKhJhaJOuMMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV +BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0 +dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xNjA3Mjkx +MTM3MTdaGA8yMTk2MDEwMjExMzcxN1owXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT +EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft +YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAhDUh6j1ACSt057nSxAcwMaGr8Ez87VA2RW2HyY8l9XoHndnxmP50Cqld ++26AJtltlqHpI1YdtnZ6OrVgVhXcVtbvte0lZ3ldEzC3PMvmISBhHs6A3SWHA9ln +InHbToLX/SWqBHLOX78HkPRaG2k0COHpRy+fG9gvz8HCiQaXCbWNFDHZev9OToNI +xhXBVzIa3AgUnGMalCYZuh5AfVRCEeALG60kxMMC8IoAN7+HG+pMdqAhJxGUcMO0 +LBvmTGGeWhi04MUZWfOkwn9JjQZuyLg6B1OD4Y6s0LB2P1MovmSJKGY4JcF8Qu3z +xxUbl7Bh9pvzFR5gJN1pjM2n3gJEPwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQAJ +UNKM+gIIHNk0G0tzv6vZBT+o/vt+tIp8lEoZwaPQh1121iw/I7ZvhMLAigx7eyvf +IxUt9/nf8pxWaeGzi98RbSmbap+uxYRynqe1p5rifTamOsguuPrhVpl12OgRWLcT +rjg/K60UMXRsmg2w/cxV45pUBcyVb5h6Op5uEVAVq+CVns13ExiQL6kk3guG4+Yq +LvP1p4DZfeC33a2Rfre2IHLsJH5D4SdWcYqBsfTpf3FQThH0l0KoacGrXtsedsxs +9aRd7OzuSEJ+mBxmzxSjSwM84Ooh78DjkdpQgv967p3d+8NiSLt3/n7MgnUy6WwB +KtDujDnB+ttEHwRRngX7 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEEjCCAvqgAwIBAgIJAMcyoxx4U0xxMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV +BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0 +dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xNTA4MTQw +ODU4MDJaGA8yMTk1MDExNzA4NTgwMlowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT +EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft +YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAw45IhGZVbQcy1fHBqzROhO8CsrDzxj/WP4cRbJo/2DAnimVrCCDs5O86 +FA39Zo1xsDuJHDlwMKqeXYXkJXHYbcPWc6EYYAnR+PlLG+aNSOGUzsy202S03hT0 +B20hWPCqpPp39itIRhG4id6nbNRJOzLm6evHuepMAHR4/OV7hyGOiGaV/v9zqiNA +pMCLhbh2xk0PO35HCVBuWt3HUjsgeks2eEsu9Ws6H3JXTCfiqp0TjyRWapM29OhA +cRJfJ/d/+wBTz1fkWOZ7TF+EWRIN5ITEadlDTPnF1r8kBRuDcS/lIGFwrOOHLo4C +cKoNgXkhTqDDBDu6oNBb2rS0K+sz3QIDAQABo4HUMIHRMAsGA1UdDwQEAwIHgDAd +BgNVHQ4EFgQUqBy7D847Ya/w321Dfr+rBJGsGTwwgY4GA1UdIwSBhjCBg4AUqBy7 +D847Ya/w321Dfr+rBJGsGTyhYKReMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBX +YXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6 +b24gV2ViIFNlcnZpY2VzIExMQ4IJAMcyoxx4U0xxMBIGA1UdEwEB/wQIMAYBAf8C +AQAwDQYJKoZIhvcNAQELBQADggEBACOoWSBf7b9AlcNrl4lr3QWWSc7k90/tUZal +PlT0G3Obl2x9T/ZiBsQpbUvs0lfotG0XqGVVHcIxF38EbVwbw9KJGXbGSCJSEJkW +vGCtc/jYMHXfhx67Szmftm/MTYNvnzsyQQ3v8y3Rdah+xe1NPdpFrwmfL6xe3pFF +cY33KdHA/3PNLdn9CaEsHmcmj3ctaaXLFIzZhQyyjtsrgGfTLvXeXRokktvsLDS/ +YgKedQ+jFjzVJqgr4NjfY/Wt7/8kbbdhzaqlB5pCPjLLzv0zp/XmO6k+JvOePOGh +JzGk5t1QrSju+MqNPFk3+1O7o910Vrhqw1QRB0gr1ExrviLbyfU= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEEjCCAvqgAwIBAgIJAKD+v6LeR/WrMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV +BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0 +dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xNTA4MTQw +OTA4MTlaGA8yMTk1MDExNzA5MDgxOVowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT +EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft +YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAka8FLhxs1cSJGK+Q+q/vTf8zVnDAPZ3U6oqppOW/cupCtpwMAQcky8DY +Yb62GF7+C6usniaq/9W6xPn/3o//wti0cNt6MLsiUeHqNl5H/4U/Q/fR+GA8pJ+L +npqZDG2tFi1WMvvGhGgIbScrjR4VO3TuKy+rZXMYvMRk1RXZ9gPhk6evFnviwHsE +jV5AEjxLz3duD+u/SjPp1vloxe2KuWnyC+EKInnka909sl4ZAUh+qIYfZK85DAjm +GJP4W036E9wTJQF2hZJrzsiB1MGyC1WI9veRISd30izZZL6VVXLXUtHwVHnVASrS +zZDVpzj+3yD5hRXsvFigGhY0FCVFnwIDAQABo4HUMIHRMAsGA1UdDwQEAwIHgDAd +BgNVHQ4EFgQUxC2l6pvJaRflgu3MUdN6zTuP6YcwgY4GA1UdIwSBhjCBg4AUxC2l +6pvJaRflgu3MUdN6zTuP6YehYKReMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBX +YXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6 +b24gV2ViIFNlcnZpY2VzIExMQ4IJAKD+v6LeR/WrMBIGA1UdEwEB/wQIMAYBAf8C +AQAwDQYJKoZIhvcNAQELBQADggEBAIK+DtbUPppJXFqQMv1f2Gky5/82ZwgbbfXa +HBeGSii55b3tsyC3ZW5ZlMJ7Dtnr3vUkiWbV1EUaZGOUlndUFtXUMABCb/coDndw +CAr53XTv7UwGVNe/AFO/6pQDdPxXn3xBhF0mTKPrOGdvYmjZUtQMSVb9lbMWCFfs +w+SwDLnm5NF4yZchIcTs2fdpoyZpOHDXy0xgxO1gWhKTnYbaZOxkJvEvcckxVAwJ +obF8NyJla0/pWdjhlHafEXEN8lyxyTTyOa0BGTuYOBD2cTYYynauVKY4fqHUkr3v +Z6fboaHEd4RFamShM8uvSu6eEFD+qRmvqlcodbpsSOhuGNLzhOQ= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDOzCCAiOgAwIBAgIJANBx0E2bOCEPMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV +BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0 +dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xNjA4MTEx +NDU2NDJaGA8yMTk2MDExNTE0NTY0MlowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT +EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft +YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEArYS3mJLGaMrh2DmiPLbqr4Z+xWXTzBWCjOwpsuHE9H6dWUUyl2Bgnu+Z +d8QvW306Yleec45M4F2RA3J4hWHtShzsMlOJVRt+YulGeTf9OCPr26QmIFfs5nD4 +fgsJQEry2MBSGA9Fxq3Cw6qkWcrOPsCR+bHOU0XykdKl0MnIbpBf0kTfciAupQEA +dEHnM2J1L2iI0NTLBgKxy5PXLH9weX20BFauNmHH9/J07OpwL20SN5f8TxcM9+pj +Lbk8h1V4KdIwVQpdWkbDL9BCGlYjyadQJxSxz1J343NzrnDM0M4h4HtVaKOS7bQo +Bqt2ruopLRCYgcuFHck/1348iAmbRQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQBG +wujwU1Otpi3iBgmhjMClgZyMMn0aQIxMigoFNqXMUNx1Mq/e/Tx+SNaOEAu0n2FF +aiYjvY0/hXOx75ewzZvM7/zJWIdLdsgewpUqOBH4DXFhbSk2TxggSPb0WRqTBxq5 +Ed7F7+7GRIeBbRzdLqmISDnfqey8ufW0ks51XcQNomDIRG5s9XZ5KHviDCar8FgL +HngBCdFI04CMagM+pwTO9XN1Ivt+NzUj208ca3oP1IwEAd5KhIhPLcihBQA5/Lpi +h1s3170z1JQ1HZbDrH1pgp+8hSI0DwwDVb3IIH8kPR/J0Qn+hvOl2HOpaUg2Ly0E +pt1RCZe+W7/dF4zsbqwK +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDOzCCAiOgAwIBAgIJALWSfgHuT/ARMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV +BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0 +dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xNzA1MzEx +MTE4MTZaGA8yMTk2MTEwMzExMTgxNlowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT +EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft +YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAy5V7KDqnEvF3DrSProFcgu/oL+QYD62b1U+Naq8aPuljJe127Sm9WnWA +EBdOSASkOaQ9fzjCPoG5SGgWKxYoZjsevHpmzjVv9+Ci+F57bSuMbjgUbvbRIFUB +bxQojVoXQPHgK5v433ODxkQ4sjRyUbf4YV1AFdfU7zabC698YgPVOExGhXPlTvco +8mlc631ubw2g52j0lzaozUkHPSbknTomhQIvO6kUfX0e0TDMH4jLDG2ZIrUB1L4r +OWKG4KetduFrRZyDHF6ILZu+s6ywiMicUd+2UllDFC6oas+a8D11hmO/rpWU/ieV +jj4rWAFrsebpn+Nhgy96iiVUGS2LuQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQDE +iYv6FQ6knXCg+svlcaQG9q59xUC5z8HvJZ1+SxzPKKC4PKQdKvIIfE8GxVXqlZG1 +cl5WKTFDMapnzb9RV/DTaVzWx3cMYT77vm1Hl1XGjhx611CGcENH1egI3lOTILsa ++KfopuJEQQ9TDMAIkGjhA+KieU/U5Ctv9fdej6d0GC6OEuwKkTNzPWue6UMq8d4H +2xqJboWsE1t4nybEosvZfQJcZ8jyIYcYBnsG13vCLM+ixjuU5MVVQNMY/gBJzqJB +V+U0QiGiuT5cYgY/QihxdHt99zwGaE0ZBC7213NKrlNuLSrqhDI2NLu8NsExqOFy +OmY0v/xVmQUQl26jJXaM +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEEjCCAvqgAwIBAgIJAOrmqHuaUt0vMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV +BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0 +dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xNTEwMjkw +OTA2MTlaGA8yMTk1MDQwMzA5MDYxOVowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT +EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft +YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAjE7nVu+aHLtzp9FYV25Qs1mvJ1JXD7J0iQ1Gs/RirW9a5ZECCtc4ssnf +zQHq2JRVr0GRchvDrbm1HaP/avtFQR/Thvfltwu9AROVT22dUOTvERdkNzveoFCy +hf52Rqf0DMrLXG8ZmQPPXPDFAv+sVMWCDftcChxRYZ6mP9O+TpgYNT1krD5PdvJU +7HcXrkNHDYqbsg8A+Mu2hzl0QkvUET83Csg1ibeK54HP9w+FsD6F5W+6ZSHGJ88l +FI+qYKs7xsjJQYgXWfEt6bbckWs1kZIaIOyMzYdPF6ClYzEec/UhIe/uJyUUNfpT +VIsI5OltBbcPF4c7Y20jOIwwI2SgOQIDAQABo4HUMIHRMAsGA1UdDwQEAwIHgDAd +BgNVHQ4EFgQUF2DgPUZivKQR/Zl8mB/MxIkjZDUwgY4GA1UdIwSBhjCBg4AUF2Dg +PUZivKQR/Zl8mB/MxIkjZDWhYKReMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBX +YXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6 +b24gV2ViIFNlcnZpY2VzIExMQ4IJAOrmqHuaUt0vMBIGA1UdEwEB/wQIMAYBAf8C +AQAwDQYJKoZIhvcNAQELBQADggEBAGm6+57W5brzJ3+T8/XsIdLTuiBSe5ALgSqI +qnO5usUKAeQsa+kZIJPyEri5i8LEodh46DAF1RlXTMYgXXxl0YggX88XPmPtok17 +l4hib/D9/lu4IaFIyLzYNSzsETYWKWoGVe7ZFz60MTRTwY2u8YgJ5dec7gQgPSGj +avB0vTIgoW41G58sfw5b+wjXCsh0nROon79RcQFFhGnvup0MZ+JbljyhZUYFzCli +31jPZiKzqWa87xh2DbAyvj2KZrZtTe2LQ48Z4G8wWytJzxEeZdREe4NoETf+Mu5G +4CqoaPR05KWkdNUdGNwXewydb3+agdCgfTs+uAjeXKNdSpbhMYg= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDOzCCAiOgAwIBAgIJAO/+DgYF78KwMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV +BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0 +dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xOTA0Mjky +MDM1MjJaGA8yMTk4MTAwMjIwMzUyMlowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT +EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft +YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAv1ZLV+Z/P6INq+R1qLkzETBg7sFGKPiwHekbpuB6lrRxKHhj8V9vaReM +lnv1Ur5LAPpMPYDsuJ4WoUbPYAqVqyMAo7ikJHCCM1cXgZJefgN6z9bpS+uA3YVh +V/0ipHh/X2hc2S9wvxKWiSHu6Aq9GVpqL035tJQD+NJuqFd+nXrtcw4yGtmvA6wl +5Bjn8WdsP3xOTKjrByYY1BhXpP/f1ohU9jE9dstsRXLa+XTgTPWcWdCS2oRTWPGR +c5Aeh47nnDsyQfP9gLxHeYeQItV/BD9kU/2Hn6mnRg/B9/TYH8qzlRTzLapXp4/5 +iNwusrTNexGl8BgvAPrfhjDpdgYuTwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQB7 +5ya11K/hKgvaRTvZwVV8GlVZt0CGPtNvOi4AR/UN6TMm51BzUB5nurB4z0R2MoYO +Uts9sLGvSFALJ4otoB77hyNpH3drttU1CVVwal/yK/RQLSon/IoUkaGEbqalu+mH +nYad5IG4tEbmepX456XXcO58MKmnczNbPyw3FRzUZQtI/sf94qBwJ1Xo6XbzPKMy +xjL57LHIZCssD+XPifXay69OFlsCIgLim11HgPkRIHEOXLSf3dsW9r+4CjoZqB/Z +jj/P4TLCxbYCLkvglwaMjgEWF40Img0fhx7yT2X92MiSrs3oncv/IqfdVTiN8OXq +jgnq1bf+EZEZKvb6UCQV +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDOzCCAiOgAwIBAgIJALc/uRxg++EnMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV +BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0 +dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xODA0MTAx +NDAwMTFaGA8yMTk3MDkxMzE0MDAxMVowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT +EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft +YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAzwCGJEJIxqtr2PD2a1mA6LhRzKhTBa1AZsg3eYfpETXIVlrpojMfvVoN +qHvGshWLgrGTT6os/3gsaADheSaJKavxwX3X6tJA8fvEGqr3a1C1MffH9hBWbQqC +LbfUTAbkwis4GdTUwOwPjT1Cm3u9R/VzilCNwkj7iQ65AFAI8Enmsw3UGldEsop4 +yChKB3KW3WI0FTh0+gD0YtjrqqYJxpGOYBpJp5vwdd3fZ4t1vidmDMs7liv4f9Bx +p0oSmUobU4GUlFhBchK1DukICVQdnOVzdMonYm7s+HtpFbVHR8yf6QoixBKGdSal +mBf7+y0ixjCn0pnC0VLVooGo4mi17QIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQDG +4ONZiixgk2sjJctwbyD5WKLTH6+mxYcDw+3y/F0fWz561YORhP2FNnPOmEkf0Sl/ +Jqk4svzJbCbQeMzRoyaya/46d7UioXMHRZam5IaGBhOdQbi97R4VsQjwQj0RmQsq +yDueDyuKTwWLK9KnvI+ZA6e6bRkdNGflK4N8GGKQ+fBhPwVELkbT9f16OJkezeeN +S+F/gDADGJgmPXfjogICb4Kvshq0H5Lm/xZlDULF2g/cYhyNY6EOI/eS5m1I7R8p +D/m6WoyZdpInxJfxW616OMkxQMRVsruLTNGtby3u1g6ScjmpFtvAMhYejBSdzKG4 +FEyxIdEjoeO1jhTsck3R +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDOzCCAiOgAwIBAgIJANZkFlQR2rKqMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV +BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0 +dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xOTAyMDUx +MzA2MjBaGA8yMTk4MDcxMTEzMDYyMFowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT +EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft +YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAy4Vnit2eBpEjKgOKBmyupJzJAiT4fr74tuGJNwwa+Is2vH12jMZn9Il1 +UpvvEUYTIboIgISpf6SJ5LmV5rCv4jT4a1Wm0kjfNbiIlkUi8SxZrPypcw24m6ke +BVuxQZrZDs+xDUYIZifTmdgD50u5YE+TLg+YmXKnVgxBU6WZjbuK2INohi71aPBw +2zWUR7Gr/ggIpf635JLU3KIBLNEmrkXCVSnDFlsK4eeCrB7+UNak+4BwgpuykSGG +Op9+2vsuNqFeU1l9daQeG9roHR+4rIWSPa0opmMxv5nctgypOrE6zKXx2dNXQldd +VULv+WH7s6Vm4+yBeG8ctPYH5GOo+QIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQBs +ZcViiZdFdpcXESZP/KmZNDxB/kktlIEIhsQ+MNn29jayE5oLmtGjHj5dtA3XNKlr +f6PVygVTKbtQLQqunRT83e8+7iCZMKI5ev7pITUQVvTUwI+Fc01JkYZxRFlVBuFA +WGZO+98kxCS4n6tTwVt+nSuJr9BJRVC17apfHBgSS8c5OWna0VU/Cc9ka4eAfQR4 +7pYSDU3wSRE01cs30q34lXZ629IyFirSJ5TTOIc0osNL7vwMQYj8HOn4OBYqxKy8 +ZJyvfXsIPh0Na76PaBIs6ZlqAOflLrjGzxBPiwRM/XrGmF8ze4KzoUqJEnK13O6A +KHKgfiigQZ1+gv5FlyXH +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDOzCCAiOgAwIBAgIJAIFI+O5A6/ZIMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV +BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0 +dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xOTA2MDQx +MjQ4MDRaGA8yMTk4MTEwNzEyNDgwNFowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT +EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft +YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAy7/WHBBHOrk+20aumT07g8rxrSM0UXgki3eYgKauPCG4Xx//vwQbuZwI +oeVmR9nqnhfij2wOcQdbLandh0EGtbxerete3IoXzd1KXJb11PVmzrzyu5SPBPuP +iCeV4qdjjkXo2YWM6t9YQ911hcG96YSp89TBXFYUh3KLxfqAdTVhuC0NRGhXpyii +j/czo9njofHhqhTr7UEyPun8NVS2QWctLQ86N5zWR3Q0GRoVqqMrJs0cowHTrVw2 +9Qr7QBjjBOVbyYmtYxm/DtiKprYV/e6bCAVok015X1sZDd3oCOQNoGlv5XbHJe2o +JFD8GRRy2rkWO/lNwVFDcwec6zC3QwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQCE +goqzjpCpmMgCpszFHwvRaSMbspKtK7wNImUjrSBOfBJsfFulyg1Zgn2nDCK7kQhx +jMJmNIvXbps3yMqQ2cHUkKcKf5t+WldfeT4Vk1Rz6HSA8sd0kgVcIesIaoy2aaXU +VEB/oQziRGyKdN1d4TGYVZXG44CkrzSDvlbmfiTq5tL+kAieznVF3bzHgPZW6hKP +EXC3G/IXrXicFEe6YyE1Rakl62VncYSXiGe/i2XvsiNH3Qlmnx5XS7W0SCN0oAxW +EH9twibauv82DVg1WOkQu8EwFw8hFde9X0Rkiu0qVcuU8lJgFEvPWMDFU5sGB6ZM +gkEKTzMvlZpPbBhg99Jl +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEEjCCAvqgAwIBAgIJAL2bOgb+dq9rMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV +BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0 +dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xNTEwMjkw +OTAwNTdaGA8yMTk1MDQwMzA5MDA1N1owXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT +EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft +YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAmRcyLWraysQS8yDC1b5Abs3TUaJabjqWu7d5gHik5Icd6dKl8EYpQSeS +vz6pLhkgO4xBbCRGlgE8LS/OijcZ5HwdrxBiKbicR1YvIPaIyEQQvF5sX6UWkGYw +Ma5IRGj4YbRmJkBybw+AAV9Icb5LJNOMWPi34OWM+2tMh+8L234v/JA6ogpdPuDr +sM6YFHMZ0NWo58MQ0FnEj2D7H58Ti//vFPl0TaaPWaAIRF85zBiJtKcFJ6vPidqK +f2/SDuAvZmyHC8ZBHg1moX9bR5FsU3QazfbW+c+JzAQWHj2AaQrGSCITxCMlS9sJ +l51DeoZBjnx8cnRe+HCaC4YoRBiqIQIDAQABo4HUMIHRMAsGA1UdDwQEAwIHgDAd +BgNVHQ4EFgQU/wHIo+r5U31VIsPoWoRVsNXGxowwgY4GA1UdIwSBhjCBg4AU/wHI +o+r5U31VIsPoWoRVsNXGxoyhYKReMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBX +YXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6 +b24gV2ViIFNlcnZpY2VzIExMQ4IJAL2bOgb+dq9rMBIGA1UdEwEB/wQIMAYBAf8C +AQAwDQYJKoZIhvcNAQELBQADggEBACobLvj8IxlQyORTz/9q7/VJL509/p4HAeve +92riHp6+Moi0/dSEYPeFTgdWB9W3YCNc34Ss9TJq2D7t/zLGGlbI4wYXU6VJjL0S +hCjWeIyBXUZOZKFCb0DSJeUElsTRSXSFuVrZ9EAwjLvHni3BaC9Ve34iP71ifr75 +8Tpk6PEj0+JwiijFH8E4GhcV5chB0/iooU6ioQqJrMwFYnwo1cVZJD5v6D0mu9bS +TMIJLJKv4QQQqPsNdjiB7G9bfkB6trP8fUVYLHLsVlIy5lGx+tgwFEYkG1N8IOO/ +2LCawwaWm8FYAFd3IZl04RImNs/IMG7VmH1bf4swHOBHgCN1uYo= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEEjCCAvqgAwIBAgIJAL9KIB7Fgvg/MA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV +BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0 +dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xNTA4MTQw +OTAwMjVaGA8yMTk1MDExNzA5MDAyNVowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT +EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft +YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAz0djWUcmRW85C5CiCKPFiTIvj6y2OuopFxNE5d3Wtab10bm06vnXVKXu +tz3AndG+Dg0zIL0gMlU+QmrSR0PH2PfV9iejfLak9iwdm1WbwRrCEAj5VxPe0Q+I +KeznOtxzqQ5Wo5NLE9bA61sziUAFNVsTFUzphEwRohcekYyd3bBC4v/RuAjCXHVx +40z6AIksnAOGN2VABMlTeMNvPItKOCIeRLlllSqXX1gbtL1gxSW40JWdF3WPB68E +e+/1U3F7OEr7XqmNODOL6yh92QqZ8fHjG+afOL9Y2Hc4g+P1nk4w4iohQOPABqzb +MPjK7B2Rze0f9OEc51GBQu13kxkWWQIDAQABo4HUMIHRMAsGA1UdDwQEAwIHgDAd +BgNVHQ4EFgQU5DS5IFdU/QwYbikgtWvkU3fDwRgwgY4GA1UdIwSBhjCBg4AU5DS5 +IFdU/QwYbikgtWvkU3fDwRihYKReMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBX +YXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6 +b24gV2ViIFNlcnZpY2VzIExMQ4IJAL9KIB7Fgvg/MBIGA1UdEwEB/wQIMAYBAf8C +AQAwDQYJKoZIhvcNAQELBQADggEBAG/N7ua8IE9IMyno0n5T57erBvLTOQ79fIJN +Mf+mKRM7qRRsdg/eumFft0rLOKo54pJ+Kim2cngCWNhkzctRHBV567AJNt4+ZDG5 +hDgV0IxWO1+eaLE4qzqWP/9VrO+p3reuumgFZLVpvVpwXBBeBFUf2drUR14aWfI2 +L/6VGINXYs7uP8v/2VBS7r6XZRnPBUy/R4hv5efYXnjwA9gq8+a3stC2ur8m5ySl +faKSwE4H320yAyaZWH4gpwUdbUlYgPHtm/ohRtiWPrN7KEG5Wq/REzMIjZCnxOfS +6KR6PNjlhxBsImQhmBvz6j5PLQxOxBZIpDoiK278e/1Wqm9LrBc= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDOzCCAiOgAwIBAgIJANuCgCcHtOJhMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV +BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0 +dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xNTA5MTQx +NTU3NDRaGA8yMTk1MDIxNzE1NTc0NFowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT +EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft +YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA66iNv6pJPmGM20W8HbVYJSlKcAg2vUGx8xeAbzZIQdpGfkabVcUHGB6m +Gy59VXDMDlrJckDDk6dxUOhmcX9z785TtVZURq1fua9QosdbTzX4kAgHGdp4xQEs +mO6QZqg5qKjBP6xr3+PshfQ1rB8Bmwg0gXEm22CC7o77+7N7Mu2sWzWbiUR7vil4 +9FjWS8XmMNwFTlShp4l1TDTevDWW/uYmC30RThM9S4QPvTZ0rAS18hHVam8BCTxa +LHaVCH/Yy52rsz0hM/FlghnSnK105ZKj+b+KIp3adBL8OMCjgc/Pxi0+j3HQLdYE +32+FaXWU84D2iP2gDT28evnstzuYTQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQC1 +mA4q+12pxy7By6g3nBk1s34PmWikNRJBwOqhF8ucGRv8aiNhRRye9lokcXomwo8r +KHbbqvtK85l0xUZp/Cx4sm4aTgcMvfJP29jGLclDzeqADIvkWEJ4+xncxSYVlS9x ++78TvF/+8h9U2LnSl64PXaKdxHy2IsHIVRN4GtoaP2Xhpa1S0M328Jykq/571nfN +1WRD1c/fQf1edgzRjhQ4whcAhv7WRRF+qTbfQJ/vDxy8lkiOsvU9XzUaZ0fZSfXX +wXxZamQbONvFcxVHY/0PSiM8nQoUmkkBQuKleDwRWvkoJKYKyr3jvXK7HIWtMrO4 +jmXe0aMy3thyK6g5sJVg +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDOzCCAiOgAwIBAgIJAMn1yPk22ditMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV +BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0 +dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xNzA3MTkx +MTEyNThaGA8yMTk2MTIyMjExMTI1OFowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT +EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft +YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEArznEYef8IjhrJoazI0QGZkmlmHm/4rEbyQbMNifxjsDE8YWtHNwaM91z +zmyK6Sk/tKlWxcnl3g31iq305ziyFPEewe5Qbwf1iz2cMsvfNBcTh/E6u+mBPH3J +gvGanqUJt6c4IbipdEouIjjnynyVWd4D6erLl/ENijeR1OxVpaqSW5SBK7jms49E +pw3wtbchEl3qsE42Ip4IYmWxqjgaxB7vps91n4kfyzAjUmklcqTfMfPCkzmJCRgp +Vh1C79vRQhmriVKD6BXwfZ8tG3a7mijeDn7kTsQzgO07Z2SAE63PIO48JK8HcObH +tXORUQ/XF1jzi/SIaUJZT7kq3kWl8wIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQBj +ThtO9dLvU2QmKuXAhxXjsIdlQgGG3ZGh/Vke4If1ymgLx95v2Vj9Moxk+gJuUSRL +BzFte3TT6b3jPolbECgmAorjj8NxjC17N8QAAI1d0S0gI8kqkG7V8iRyPIFekv+M +pcai1+cIv5IV5qAz8QOMGYfGdYkcoBjsgiyvMJu/2N2UbZJNGWvcEGkdjGJUYYOO +NaspCAFm+6HA/K7BD9zXB1IKsprLgqhiIUgEaW3UFEbThJT+z8UfHG9fQjzzfN/J +nT6vuY/0RRu1xAZPyh2gr5okN/s6rnmh2zmBHU1n8cbCc64MVfXe2g3EZ9Glq/9n +izPrI09hMypJDP04ugQc +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDOzCCAiOgAwIBAgIJAPRYyD8TtmC0MA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV +BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0 +dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xNjAzMDcx +MDQ1MDFaGA8yMTk1MDgxMTEwNDUwMVowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT +EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft +YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA0LSS5I/eCT2PM0+qusorBx67QL26BIWQHd/yF6ARtHBb/1DdFLRqE5Dj +07Xw7eENC+T79mOxOAbeWg91KaODOzw6i9I/2/HpK0+NDEdD6sPKDA1d45jRra+v +CqAjI+nV9Vw91wv7HjMk3RcjWGziM8/hw+3YNIutt7aQzZRwIWlBpcqx3/AFd8Eu +2UsRMSHgkGUW6UzUF+h/U8218XfrauKNGmNKDYUhtmyBrHT+k6J0hQ4pN7fe6h+Z +w9RVHm24BGhlLxLHLmsOIxvbrF277uX9Dxu1HfKfu5D2kimTY7xSZDNLR2dt+kNY +/+iWdIeEFpPT0PLSILt52wP6stF+3QIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQBI +E6w+WWC2gCfoJO6c9HMyGLMFEpqZmz1n5IcQt1h9iyO7Vkm1wkJiZsMhXpk73zXf +TPxuXEacTX3SOEa07OIMCFwkusO5f6leOyFTynHCzBgZ3U0UkRVZA3WcpbNB6Dwy +h7ysVlqyT9WZd7EOYm5j5oue2G2xdei+6etgn5UjyWm6liZGrcOF6WPTdmzqa6WG +ApEqanpkQd/HM+hUYex/ZS6zEhd4CCDLgYkIjlrFbFb3pJ1OVLztIfSN5J4Oolpu +JVCfIq5u1NkpzL7ys/Ub8eYipbzI6P+yxXiUSuF0v9b98ymczMYjrSQXIf1e8In3 +OP2CclCHoZ8XDQcvvKAh +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDOzCCAiOgAwIBAgIJAMoxixvs3YssMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV +BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0 +dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xODA3MjAw +ODQ0NDRaGA8yMTk3MTIyMzA4NDQ0NFowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT +EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft +YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA4T1PNsOg0FDrGlWePoHeOSmOJTA3HCRy5LSbYD33GFU2eBrOIxoU/+SM +rInKu3GghAMfH7WxPW3etIAZiyTDDU5RLcUq2Qwdr/ZpXAWpYocNc/CEmBFtfbxF +z4uwBIN3/drM0RSbe/wP9EcgmNUGQMMZWeAji8sMtwpOblNWAP9BniUG0Flcz6Dp +uPovwDTLdAYT3TyhzlohKL3f6O48TR5yTaV+3Ran2SGRhyJjfh3FRpP4VC+z5LnT +WPQHN74Kdq35UgrUxNhJraMGCzznolUuoR/tFMwR93401GsM9fVA7SW3jjCGF81z +PSzjy+ArKyQqIpLW1YGWDFk3sf08FQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQDK +2/+C3nPMgtyOFX/I3Cyk+Pui44IgOwCsIdNGwuJysdqp5VIfnjegEu2zIMWJSKGO +lMZoQXjffkVZZ97J7RNDW06oB7kj3WVE8a7U4WEOfnO/CbMUf/x99CckNDwpjgW+ +K8V8SzAsQDvYZs2KaE+18GFfLVF1TGUYK2rPSZMHyX+v/TIlc/qUceBycrIQ/kke +jDFsihUMLqgmOV2hXKUpIsmiWMGrFQV4AeV0iXP8L/ZhcepLf1t5SbsGdUA3AUY1 +3If8s81uTheiQjwY5t9nM0SY/1Th/tL3+RaEI79VNEVfG1FQ8mgqCK0ar4m0oZJl +tmmEJM7xeURdpBBx36Di +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEEjCCAvqgAwIBAgIJAJVMGw5SHkcvMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV +BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0 +dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xNTEwMjkw +ODU3MTlaGA8yMTk1MDQwMzA4NTcxOVowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT +EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft +YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAlaSSLfBl7OgmikjLReHuNhVuvM20dCsVzptUyRbut+KmIEEc24wd/xVy +2RMIrydGedkW4tUjkUyOyfET5OAyT43jTzDPHZTkRSVkYjBdcYbe9o/0Q4P7IVS3 +XlvwrUu0qo9nSID0mxMnOoF1l8KAqnn10tQ0W+lNSTkasW7QVzcb+3okPEVhPAOq +MnlY3vkMQGI8zX4iOKbEcSVIzf6wuIffXMGHVC/JjwihJ2USQ8fq6oy686g54P4w +ROg415kLYcodjqThmGJPNUpAZ7MOc5Z4pymFuCHgNAZNvjhZDA842Ojecqm62zcm +Tzh/pNMNeGCRYq2EQX0aQtYOIj7bOQIDAQABo4HUMIHRMAsGA1UdDwQEAwIHgDAd +BgNVHQ4EFgQU6SSB+3qALorPMVNjToM1Bj3oJMswgY4GA1UdIwSBhjCBg4AU6SSB ++3qALorPMVNjToM1Bj3oJMuhYKReMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBX +YXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6 +b24gV2ViIFNlcnZpY2VzIExMQ4IJAJVMGw5SHkcvMBIGA1UdEwEB/wQIMAYBAf8C +AQAwDQYJKoZIhvcNAQELBQADggEBAF/0dWqkIEZKg5rca8o0P0VS+tolJJE/FRZO +atHOeaQbWzyac6NEwjYeeV2kY63skJ+QPuYbSuIBLM8p/uTRIvYM4LZYImLGUvoO +IdtJ8mAzq8CZ3ipdMs1hRqF5GRp8lg4w2QpX+PfhnW47iIOBiqSAUkIr3Y3BDaDn +EjeXF6qS4iPIvBaQQ0cvdddNh/pE33/ceghbkZNTYkrwMyBkQlRTTVKXFN7pCRUV ++L9FuQ9y8mP0BYZa5e1sdkwebydU+eqVzsil98ntkhpjvRkaJ5+Drs8TjGaJWlRw +5WuOr8unKj7YxdL1bv7//RtVYVVi296ldoRUYv4SCvJF11z0OdQ= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEEjCCAvqgAwIBAgIJAMtdyRcH51j9MA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV +BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0 +dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0yMjA0MDgx +MjM5MTZaGA8yMjAxMDkxMjEyMzkxNlowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT +EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft +YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAvUsKCxoH6KXRYJLeYTWAQfaBQeCwhJaR56mfUeFHJE4g8aFjWkiN4uc1 +TvOyYNnIZKTHWmzmulmdinWNbwP0GiROHb/i7ro0HhvnptyycGt8ag8affiIbx5X +7ohdwSN2KJ6G0IKflIx7f2NEI0oAMM/9k+T1eVF+MVWzpZoiDp8frLNkqp8+RAgz +ScZsbRfwv3u/if5xJAvdg2nCkIWDMSHEVPoz0lJo7v0ZuDtWWsL1LHnL5ozvsKEk ++ZJyEi23r+U1hIT1NTBdp4yoigNQexedtwCSr7q36oOdDwvZpqYlkLi3uxZ4ta+a +01pzOSTwMLgQZSbKWQrpMvsIAPrxoQIDAQABo4HUMIHRMAsGA1UdDwQEAwIHgDAd +BgNVHQ4EFgQU1GgnGdNpbnL3lLF30Jomg7Ji9hYwgY4GA1UdIwSBhjCBg4AU1Ggn +GdNpbnL3lLF30Jomg7Ji9hahYKReMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBX +YXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6 +b24gV2ViIFNlcnZpY2VzIExMQ4IJAMtdyRcH51j9MBIGA1UdEwEB/wQIMAYBAf8C +AQAwDQYJKoZIhvcNAQELBQADggEBACVl00qQlatBKVeiWMrhpczsJroxDxlZTOba +6wTMZk7c3akb6XMOSZFbGaifkebPZqTHEhDlrClM2j9AIlYcCx6YCrTf4cuhn2mD +gcJN33143eOWSaeRY3ee4j+V9ne98y3kO2wLz95VrRgclPFR8po2iWGzGhwUi+FG +q8dXeCH3N0DZgQsSgQWwmdNQXZZej6RHLU/8In5trHKLY0ppnLBjn/UZQbeTyW5q +RJB3GaveXjfgFUWj2qOcDuRGaikdS+dYaLsi5z9cA3FolHzWxx9MOs8io8vKqQzV +XUrLTNWwuhZy88cOlqGPxnoRbw7TmifwPw/cunNrsjUUOgs6ZTk= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDOzCCAiOgAwIBAgIJAPu4ssY3BlzcMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV +BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0 +dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xNTEyMDMy +MTI5MzJaGA8yMTk1MDUwODIxMjkzMlowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT +EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft +YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAsOiGi4A6+YTLzCdIyP8b8SCT2M/6PGKwzKJ5XbSBoL3gsnSWiFYqPg9c +uJPNbiy9wSA9vlyfWMd90qvTfiNrT6vewP813QdJ3EENZOx4ERcf/Wd22tV72kxD +yw1Q3I1OMH4bOItGQAxU5OtXCjBZEEUZooOkU8RoUQOU2Pql4NTiUpzWacNutAn5 +HHS7MDc4lUlsJqbN+5QW6fFrcNG/0Mrib3JbwdFUNhrQ5j+Yq5h78HarnUivnX/3 +Ap+oPbentv1qd7wvPJu556LZuhfqI0TohiIT1Ah+yUdN5osoaMxTHKKtf/CsSJ1F +w3qXqFJQA0VWsqjFyHXFI32I/GOupwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQCn +Um00QHvUsJSN6KATbghowLynHn3wZSQsuS8E0COpcFJFxP2SV0NYkERbXu0n/Vhi +yq5F8v4/bRA2/xpedLWmvFs7QWlomuXhSnYFkd33Z5gnXPb9vRkLwiMSw4uXls35 +qQraczUJ9EXDhrv7VmngIk9H3YsxYrlDGEqh/oz4Ze4ULOgnfkauanHikk+BUEsg +/jsTD+7e+niEzJPihHdsvKFDlud5pakEzyxovHwNJ1GS2I//yxrJFIL91mehjqEk +RLPdNse7N6UvSnuXcOokwu6l6kfzigGkJBxkcq4gre3szZFdCQcUioj7Z4xtuTL8 +YMqfiDtN5cbD8R8ojw9Y +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDOzCCAiOgAwIBAgIJAOtrM5XLDSjCMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV +BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0 +dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xNTA4MTQx +MDAxNDJaGA8yMTk1MDExNzEwMDE0MlowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT +EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft +YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAvVBz+WQNdPiM9S+aUULOQEriTmNDUrjLWLr7SfaOJScBzis5D5ju0jh1 ++qJdkbuGKtFX5OTWTm8pWhInX+hIOoS3exC4BaANoa1A3o6quoG+Rsv72qQf8LLH +sgEi6+LMlCN9TwnRKOToEabmDKorss4zFl7VSsbQJwcBSfOcIwbdRRaW9Ab6uJHu +79L+mBR3Ea+G7vSDrVIA8goAPkae6jY9WGw9KxsOrcvNdQoEkqRVtHo4bs9fMRHU +Etphj2gh4ObXlFN92VtvzD6QBs3CcoFWgyWGvzg+dNG5VCbsiiuRdmii3kcijZ3H +Nv1wCcZoEAqH72etVhsuvNRC/xAP8wIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQA8 +ezx5LRjzUU9EYWYhyYIEShFlP1qDHs7F4L46/5lc4pL8FPoQm5CZuAF31DJhYi/b +fcV7i3n++/ymQbCLC6kAg8DUB7NrcROll5ag8d/JXGzcTCnlDXLXx1905fPNa+jI +0q5quTmdmiSi0taeaKZmyUdhrB+a7ohWdSdlokEIOtbH1P+g5yll3bI2leYE6Tm8 +LKbyfK/532xJPqO9abx4Ddn89ZEC6vvWVNDgTsxERg992Wi+/xoSw3XxkgAryIv1 +zQ4dQ6irFmXwCWJqc6kHg/M5W+z60S/94+wGTXmp+19U6Rkq5jVMLh16XJXrXwHe +4KcgIS/aQGVgjM6wivVA +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDOzCCAiOgAwIBAgIJANCOF0Q6ohnuMA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV +BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0 +dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xNTA5MTAx +OTQyNDdaGA8yMTk1MDIxMzE5NDI0N1owXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT +EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft +YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAzIcGTzNqie3f1olrrqcfzGfbymSM2QfbTzDIOG6xXXeFrCDAmOq0wUhi +3fRCuoeHlKOWAPu76B9os71+zgF22dIDEVkpqHCjBrGzDQZXXUwOzhm+PmBUI8Z1 +qvbVD4ZYhjCujWWzrsX6Z4yEK7PEFjtf4M4W8euw0RmiNwjy+knIFa+VxK6aQv94 +lW98URFP2fD84xedHp6ozZlr3+RZSIFZsOiyxYsgiwTbesRMI0Y7LnkKGCIHQ/XJ +OwSISWaCddbu59BZeADnyhl4f+pWaSQpQQ1DpXvZAVBYvCH97J1oAxLfH8xcwgSQ +/se3wtn095VBt5b7qTVjOvy6vKZazwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQA/ +S8+a9csfASkdtQUOLsBynAbsBCH9Gykq2m8JS7YE4TGvqlpnWehz78rFTzQwmz4D +fwq8byPkl6DjdF9utqZ0JUo/Fxelxom0h6oievtBlSkmZJNbgc2WYm1zi6ptViup +Y+4S2+vWZyg/X1PXD7wyRWuETmykk73uEyeWFBYKCHWsO9sI+62O4Vf8Jkuj/cie +1NSJX8fkervfLrZSHBYhxLbL+actVEo00tiyZz8GnhgWx5faCY38D/k4Y/j5Vz99 +7lUX/+fWHT3+lTL8ZZK7fOQWh6NQpI0wTP9KtWqfOUwMIbgFQPoxkP00TWRmdmPz +WOwTObEf9ouTnjG9OZ20 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDOzCCAiOgAwIBAgIJALPB6hxFhay8MA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV +BAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0 +dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAgFw0xODA0MTAx +MjMyNDlaGA8yMTk3MDkxMzEyMzI0OVowXDELMAkGA1UEBhMCVVMxGTAXBgNVBAgT +EFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0Ft +YXpvbiBXZWIgU2VydmljZXMgTExDMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEAva9xsI9237KYb/SPWmeCVzi7giKNron8hoRDwlwwMC9+uHPd53UxzKLb +pTgtJWAPkZVxEdl2Gdhwr3SULoKcKmkqE6ltVFrVuPT33La1UufguT9k8ZDDuO9C +hQNHUdSVEuVrK3bLjaSsMOS7Uxmnn7lYT990IReowvnBNBsBlcabfQTBV04xfUG0 +/m0XUiUFjOxDBqbNzkEIblW7vK7ydSJtFMSljga54UAVXibQt9EAIF7B8k9l2iLa +mu9yEjyQy+ZQICTuAvPUEWe6va2CHVY9gYQLA31/zU0VBKZPTNExjaqK4j8bKs1/ +7dOV1so39sIGBz21cUBec1o+yCS5SwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQBt +hO2W/Lm+Nk0qsXW6mqQFsAou0cASc/vtGNCyBfoFNX6aKXsVCHxq2aq2TUKWENs+ +mKmYu1lZVhBOmLshyllh3RRoL3Ohp3jCwXytkWQ7ElcGjDzNGc0FArzB8xFyQNdK +MNvXDi/ErzgrHGSpcvmGHiOhMf3UzChMWbIr6udoDlMbSIO7+8F+jUJkh4Xl1lKb +YeN5fsLZp7T/6YvbFSPpmbn1YoE2vKtuGKxObRrhU3h4JHdp1Zel1pZ6lh5iM0ec +SD11SximGIYCjfZpRqI3q50mbxCd7ckULz+UUPwLrfOds4VrVVSj+x0ZdY19Plv2 +9shw5ez6Cn7E3IfzqNHO +-----END CERTIFICATE-----` diff --git a/builtin/credential/aws/cli.go b/builtin/credential/aws/cli.go new file mode 100644 index 0000000..a169557 --- /dev/null +++ b/builtin/credential/aws/cli.go @@ -0,0 +1,138 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package awsauth + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-secure-stdlib/awsutil" + "github.com/hashicorp/vault/api" +) + +type CLIHandler struct{} + +func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, error) { + mount, ok := m["mount"] + if !ok { + mount = "aws" + } + + role, ok := m["role"] + if !ok { + role = "" + } + + headerValue, ok := m["header_value"] + if !ok { + headerValue = "" + } + + logVal, ok := m["log_level"] + if !ok { + logVal = "info" + } + level := hclog.LevelFromString(logVal) + if level == hclog.NoLevel { + return nil, fmt.Errorf("failed to parse 'log_level' value: %q", logVal) + } + hlogger := hclog.Default() + hlogger.SetLevel(level) + + creds, err := awsutil.RetrieveCreds(m["aws_access_key_id"], m["aws_secret_access_key"], m["aws_security_token"], hlogger) + if err != nil { + return nil, err + } + + region := m["region"] + switch region { + case "": + // The CLI has always defaulted to "us-east-1" if a region is not provided. + region = awsutil.DefaultRegion + case "auto": + // Beginning in 1.10 we also accept the "auto" value, which uses the region detection logic in + // awsutil.GetRegion() to determine the region. That behavior is triggered when region = "". + region = "" + } + + loginData, err := awsutil.GenerateLoginData(creds, headerValue, region, hlogger) + if err != nil { + return nil, err + } + if loginData == nil { + return nil, fmt.Errorf("got nil response from GenerateLoginData") + } + loginData["role"] = role + path := fmt.Sprintf("auth/%s/login", mount) + secret, err := c.Logical().Write(path, loginData) + if err != nil { + return nil, err + } + if secret == nil { + return nil, fmt.Errorf("empty response from credential provider") + } + + return secret, nil +} + +func (h *CLIHandler) Help() string { + help := ` +Usage: vault login -method=aws [CONFIG K=V...] + + The AWS auth method allows users to authenticate with AWS IAM + credentials. The AWS IAM credentials, and optionally the AWS region, may be + specified in a number of ways, listed in order of precedence below: + + 1. Explicitly via the command line (not recommended) + + 2. Via the standard AWS environment variables (AWS_ACCESS_KEY, etc.) + + 3. Via the ~/.aws/credentials file + + 4. Via EC2 instance profile + + Authenticate using locally stored credentials: + + $ vault login -method=aws + + Authenticate by passing keys: + + $ vault login -method=aws aws_access_key_id=... aws_secret_access_key=... + +Configuration: + + aws_access_key_id= + Explicit AWS access key ID + + aws_secret_access_key= + Explicit AWS secret access key + + aws_security_token= + Explicit AWS security token for temporary credentials + + header_value= + Value for the x-vault-aws-iam-server-id header in requests + + mount= + Path where the AWS credential method is mounted. This is usually provided + via the -path flag in the "vault login" command, but it can be specified + here as well. If specified here, it takes precedence over the value for + -path. The default value is "aws". + + region= + Explicit AWS region to reach out to for authentication request signing. A value + of "auto" enables auto-detection of region based on the precedence described above. + Defaults to "us-east-1" if not specified. + + role= + Name of the role to request a token against + + log_level= + Set logging level during AWS credential acquisition. Valid levels are + trace, debug, info, warn, error. Defaults to info. +` + + return strings.TrimSpace(help) +} diff --git a/builtin/credential/aws/client.go b/builtin/credential/aws/client.go new file mode 100644 index 0000000..314c97e --- /dev/null +++ b/builtin/credential/aws/client.go @@ -0,0 +1,306 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package awsauth + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/aws/aws-sdk-go/service/sts" + cleanhttp "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-secure-stdlib/awsutil" + "github.com/hashicorp/vault/sdk/logical" +) + +// getRawClientConfig creates a aws-sdk-go config, which is used to create client +// that can interact with AWS API. This builds credentials in the following +// order of preference: +// +// * Static credentials from 'config/client' +// * Environment variables +// * Instance metadata role +func (b *backend) getRawClientConfig(ctx context.Context, s logical.Storage, region, clientType string) (*aws.Config, error) { + credsConfig := &awsutil.CredentialsConfig{ + Region: region, + Logger: b.Logger(), + } + + // Read the configured secret key and access key + config, err := b.nonLockedClientConfigEntry(ctx, s) + if err != nil { + return nil, err + } + + endpoint := aws.String("") + var maxRetries int = aws.UseServiceDefaultRetries + if config != nil { + // Override the defaults with configured values. + switch { + case clientType == "ec2" && config.Endpoint != "": + endpoint = aws.String(config.Endpoint) + case clientType == "iam" && config.IAMEndpoint != "": + endpoint = aws.String(config.IAMEndpoint) + case clientType == "sts": + if config.STSEndpoint != "" { + endpoint = aws.String(config.STSEndpoint) + } + if config.STSRegion != "" { + region = config.STSRegion + } + } + + credsConfig.AccessKey = config.AccessKey + credsConfig.SecretKey = config.SecretKey + maxRetries = config.MaxRetries + } + + credsConfig.HTTPClient = cleanhttp.DefaultClient() + + creds, err := credsConfig.GenerateCredentialChain() + if err != nil { + return nil, err + } + if creds == nil { + return nil, fmt.Errorf("could not compile valid credential providers from static config, environment, shared, or instance metadata") + } + + // Create a config that can be used to make the API calls. + return &aws.Config{ + Credentials: creds, + Region: aws.String(region), + HTTPClient: cleanhttp.DefaultClient(), + Endpoint: endpoint, + MaxRetries: aws.Int(maxRetries), + }, nil +} + +// getClientConfig returns an aws-sdk-go config, with optionally assumed credentials +// It uses getRawClientConfig to obtain config for the runtime environment, and if +// stsRole is a non-empty string, it will use AssumeRole to obtain a set of assumed +// credentials. The credentials will expire after 15 minutes but will auto-refresh. +func (b *backend) getClientConfig(ctx context.Context, s logical.Storage, region, stsRole, accountID, clientType string) (*aws.Config, error) { + config, err := b.getRawClientConfig(ctx, s, region, clientType) + if err != nil { + return nil, err + } + if config == nil { + return nil, fmt.Errorf("could not compile valid credentials through the default provider chain") + } + + stsConfig, err := b.getRawClientConfig(ctx, s, region, "sts") + if stsConfig == nil { + return nil, fmt.Errorf("could not configure STS client") + } + if err != nil { + return nil, err + } + if stsRole != "" { + sess, err := session.NewSession(stsConfig) + if err != nil { + return nil, err + } + assumedCredentials := stscreds.NewCredentials(sess, stsRole) + // Test that we actually have permissions to assume the role + if _, err = assumedCredentials.Get(); err != nil { + return nil, err + } + config.Credentials = assumedCredentials + } else { + if b.defaultAWSAccountID == "" { + sess, err := session.NewSession(stsConfig) + if err != nil { + return nil, err + } + client := sts.New(sess) + if client == nil { + return nil, fmt.Errorf("could not obtain sts client: %w", err) + } + inputParams := &sts.GetCallerIdentityInput{} + identity, err := client.GetCallerIdentityWithContext(ctx, inputParams) + if err != nil { + return nil, fmt.Errorf("unable to fetch current caller: %w", err) + } + if identity == nil { + return nil, fmt.Errorf("got nil result from GetCallerIdentity") + } + b.defaultAWSAccountID = *identity.Account + } + if b.defaultAWSAccountID != accountID { + return nil, fmt.Errorf("unable to fetch client for account ID %q -- default client is for account %q", accountID, b.defaultAWSAccountID) + } + } + + return config, nil +} + +// flushCachedEC2Clients deletes all the cached ec2 client objects from the backend. +// If the client credentials configuration is deleted or updated in the backend, all +// the cached EC2 client objects will be flushed. Config mutex lock should be +// acquired for write operation before calling this method. +func (b *backend) flushCachedEC2Clients() { + // deleting items in map during iteration is safe + for region := range b.EC2ClientsMap { + delete(b.EC2ClientsMap, region) + } +} + +// flushCachedIAMClients deletes all the cached iam client objects from the +// backend. If the client credentials configuration is deleted or updated in +// the backend, all the cached IAM client objects will be flushed. Config mutex +// lock should be acquired for write operation before calling this method. +func (b *backend) flushCachedIAMClients() { + // deleting items in map during iteration is safe + for region := range b.IAMClientsMap { + delete(b.IAMClientsMap, region) + } +} + +// Gets an entry out of the user ID cache +func (b *backend) getCachedUserId(userId string) string { + if userId == "" { + return "" + } + if entry, ok := b.iamUserIdToArnCache.Get(userId); ok { + b.iamUserIdToArnCache.SetDefault(userId, entry) + return entry.(string) + } + return "" +} + +// Sets an entry in the user ID cache +func (b *backend) setCachedUserId(userId, arn string) { + if userId != "" { + b.iamUserIdToArnCache.SetDefault(userId, arn) + } +} + +func (b *backend) stsRoleForAccount(ctx context.Context, s logical.Storage, accountID string) (string, error) { + // Check if an STS configuration exists for the AWS account + sts, err := b.lockedAwsStsEntry(ctx, s, accountID) + if err != nil { + return "", fmt.Errorf("error fetching STS config for account ID %q: %w", accountID, err) + } + // An empty STS role signifies the master account + if sts != nil { + return sts.StsRole, nil + } + return "", nil +} + +// clientEC2 creates a client to interact with AWS EC2 API +func (b *backend) clientEC2(ctx context.Context, s logical.Storage, region, accountID string) (*ec2.EC2, error) { + stsRole, err := b.stsRoleForAccount(ctx, s, accountID) + if err != nil { + return nil, err + } + b.configMutex.RLock() + if b.EC2ClientsMap[region] != nil && b.EC2ClientsMap[region][stsRole] != nil { + defer b.configMutex.RUnlock() + // If the client object was already created, return it + return b.EC2ClientsMap[region][stsRole], nil + } + + // Release the read lock and acquire the write lock + b.configMutex.RUnlock() + b.configMutex.Lock() + defer b.configMutex.Unlock() + + // If the client gets created while switching the locks, return it + if b.EC2ClientsMap[region] != nil && b.EC2ClientsMap[region][stsRole] != nil { + return b.EC2ClientsMap[region][stsRole], nil + } + + // Create an AWS config object using a chain of providers + var awsConfig *aws.Config + awsConfig, err = b.getClientConfig(ctx, s, region, stsRole, accountID, "ec2") + + if err != nil { + return nil, err + } + + if awsConfig == nil { + return nil, fmt.Errorf("could not retrieve valid assumed credentials") + } + + // Create a new EC2 client object, cache it and return the same + sess, err := session.NewSession(awsConfig) + if err != nil { + return nil, err + } + client := ec2.New(sess) + if client == nil { + return nil, fmt.Errorf("could not obtain ec2 client") + } + if _, ok := b.EC2ClientsMap[region]; !ok { + b.EC2ClientsMap[region] = map[string]*ec2.EC2{stsRole: client} + } else { + b.EC2ClientsMap[region][stsRole] = client + } + + return b.EC2ClientsMap[region][stsRole], nil +} + +// clientIAM creates a client to interact with AWS IAM API +func (b *backend) clientIAM(ctx context.Context, s logical.Storage, region, accountID string) (*iam.IAM, error) { + stsRole, err := b.stsRoleForAccount(ctx, s, accountID) + if err != nil { + return nil, err + } + if stsRole == "" { + b.Logger().Debug(fmt.Sprintf("no stsRole found for %s", accountID)) + } else { + b.Logger().Debug(fmt.Sprintf("found stsRole %s for account %s", stsRole, accountID)) + } + b.configMutex.RLock() + if b.IAMClientsMap[region] != nil && b.IAMClientsMap[region][stsRole] != nil { + defer b.configMutex.RUnlock() + // If the client object was already created, return it + b.Logger().Debug(fmt.Sprintf("returning cached client for region %s and stsRole %s", region, stsRole)) + return b.IAMClientsMap[region][stsRole], nil + } + b.Logger().Debug(fmt.Sprintf("no cached client for region %s and stsRole %s", region, stsRole)) + + // Release the read lock and acquire the write lock + b.configMutex.RUnlock() + b.configMutex.Lock() + defer b.configMutex.Unlock() + + // If the client gets created while switching the locks, return it + if b.IAMClientsMap[region] != nil && b.IAMClientsMap[region][stsRole] != nil { + return b.IAMClientsMap[region][stsRole], nil + } + + // Create an AWS config object using a chain of providers + var awsConfig *aws.Config + awsConfig, err = b.getClientConfig(ctx, s, region, stsRole, accountID, "iam") + + if err != nil { + return nil, err + } + + if awsConfig == nil { + return nil, fmt.Errorf("could not retrieve valid assumed credentials") + } + + // Create a new IAM client object, cache it and return the same + sess, err := session.NewSession(awsConfig) + if err != nil { + return nil, err + } + client := iam.New(sess) + if client == nil { + return nil, fmt.Errorf("could not obtain iam client") + } + if _, ok := b.IAMClientsMap[region]; !ok { + b.IAMClientsMap[region] = map[string]*iam.IAM{stsRole: client} + } else { + b.IAMClientsMap[region][stsRole] = client + } + return b.IAMClientsMap[region][stsRole], nil +} diff --git a/builtin/credential/aws/cmd/aws/main.go b/builtin/credential/aws/cmd/aws/main.go new file mode 100644 index 0000000..c7fce3e --- /dev/null +++ b/builtin/credential/aws/cmd/aws/main.go @@ -0,0 +1,34 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "os" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + awsauth "github.com/hashicorp/vault/builtin/credential/aws" + "github.com/hashicorp/vault/sdk/plugin" +) + +func main() { + apiClientMeta := &api.PluginAPIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(os.Args[1:]) + + tlsConfig := apiClientMeta.GetTLSConfig() + tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) + + if err := plugin.ServeMultiplex(&plugin.ServeOpts{ + BackendFactoryFunc: awsauth.Factory, + // set the TLSProviderFunc so that the plugin maintains backwards + // compatibility with Vault versions that don’t support plugin AutoMTLS + TLSProviderFunc: tlsProviderFunc, + }); err != nil { + logger := hclog.New(&hclog.LoggerOptions{}) + + logger.Error("plugin shutting down", "error", err) + os.Exit(1) + } +} diff --git a/builtin/credential/aws/path_config_certificate.go b/builtin/credential/aws/path_config_certificate.go new file mode 100644 index 0000000..36dfe3c --- /dev/null +++ b/builtin/credential/aws/path_config_certificate.go @@ -0,0 +1,418 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package awsauth + +import ( + "context" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "fmt" + "strings" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +// pathListCertificates creates a path that enables listing of all +// the AWS public certificates registered with Vault. +func (b *backend) pathListCertificates() *framework.Path { + return &framework.Path{ + Pattern: "config/certificates/?", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "certificate-configurations", + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ListOperation: &framework.PathOperation{ + Callback: b.pathCertificatesList, + }, + }, + + HelpSynopsis: pathListCertificatesHelpSyn, + HelpDescription: pathListCertificatesHelpDesc, + } +} + +func (b *backend) pathConfigCertificate() *framework.Path { + return &framework.Path{ + Pattern: "config/certificate/" + framework.GenericNameRegex("cert_name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + }, + + Fields: map[string]*framework.FieldSchema{ + "cert_name": { + Type: framework.TypeString, + Description: "Name of the certificate.", + }, + "aws_public_cert": { + Type: framework.TypeString, + Description: "Base64 encoded AWS Public cert required to verify PKCS7 signature of the EC2 instance metadata.", + }, + "type": { + Type: framework.TypeString, + Default: "pkcs7", + Description: ` +Takes the value of either "pkcs7" or "identity", indicating the type of +document which can be verified using the given certificate. The reason is that +the PKCS#7 document will have a DSA digest and the identity signature will have +an RSA signature, and accordingly the public certificates to verify those also +vary. Defaults to "pkcs7".`, + }, + }, + + ExistenceCheck: b.pathConfigCertificateExistenceCheck, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.CreateOperation: &framework.PathOperation{ + Callback: b.pathConfigCertificateCreateUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "certificate", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigCertificateCreateUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "certificate", + }, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathConfigCertificateRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "certificate-configuration", + }, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathConfigCertificateDelete, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "certificate-configuration", + }, + }, + }, + + HelpSynopsis: pathConfigCertificateSyn, + HelpDescription: pathConfigCertificateDesc, + } +} + +// Establishes dichotomy of request operation between CreateOperation and UpdateOperation. +// Returning 'true' forces an UpdateOperation, CreateOperation otherwise. +func (b *backend) pathConfigCertificateExistenceCheck(ctx context.Context, req *logical.Request, data *framework.FieldData) (bool, error) { + certName := data.Get("cert_name").(string) + if certName == "" { + return false, fmt.Errorf("missing cert_name") + } + + entry, err := b.lockedAWSPublicCertificateEntry(ctx, req.Storage, certName) + if err != nil { + return false, err + } + return entry != nil, nil +} + +// pathCertificatesList is used to list all the AWS public certificates registered with Vault +func (b *backend) pathCertificatesList(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + b.configMutex.RLock() + defer b.configMutex.RUnlock() + + certs, err := req.Storage.List(ctx, "config/certificate/") + if err != nil { + return nil, err + } + return logical.ListResponse(certs), nil +} + +// Decodes the PEM encoded certificate and parses it into a x509 cert +func decodePEMAndParseCertificate(certificate string) (*x509.Certificate, error) { + // Decode the PEM block and error out if a block is not detected in the first attempt + decodedPublicCert, rest := pem.Decode([]byte(certificate)) + if len(rest) != 0 { + return nil, fmt.Errorf("invalid certificate; should be one PEM block only") + } + + // Check if the certificate can be parsed + publicCert, err := x509.ParseCertificate(decodedPublicCert.Bytes) + if err != nil { + return nil, err + } + if publicCert == nil { + return nil, fmt.Errorf("invalid certificate; failed to parse certificate") + } + return publicCert, nil +} + +// awsPublicCertificates returns a slice of all the parsed AWS public +// certificates, which are used to verify either the identity, RSA 2048 +// or the PKCS7 signatures of the instance identity documents. This method will +// append the certificates registered using `config/certificate/` +// endpoint, along with the default certificates in the backend. +func (b *backend) awsPublicCertificates(ctx context.Context, s logical.Storage, isPkcs bool) ([]*x509.Certificate, error) { + // Lock at beginning and use internal method so that we are consistent as + // we iterate through + b.configMutex.RLock() + defer b.configMutex.RUnlock() + + certs := make([]*x509.Certificate, len(defaultCertificates)) + copy(certs, defaultCertificates) + + // Get the list of all the registered certificates + registeredCerts, err := s.List(ctx, "config/certificate/") + if err != nil { + return nil, err + } + + // Iterate through each certificate, parse and append it to a slice + for _, cert := range registeredCerts { + certEntry, err := b.nonLockedAWSPublicCertificateEntry(ctx, s, cert) + if err != nil { + return nil, err + } + if certEntry == nil { + return nil, fmt.Errorf("certificate storage has a nil entry under the name: %q", cert) + } + // Append relevant certificates only + if (isPkcs && certEntry.Type == "pkcs7") || + (!isPkcs && certEntry.Type == "identity") { + decodedCert, err := decodePEMAndParseCertificate(certEntry.AWSPublicCert) + if err != nil { + return nil, err + } + certs = append(certs, decodedCert) + } + } + + return certs, nil +} + +// lockedSetAWSPublicCertificateEntry is used to store the AWS public key in +// the storage. This method acquires lock before creating or updating a storage +// entry. +func (b *backend) lockedSetAWSPublicCertificateEntry(ctx context.Context, s logical.Storage, certName string, certEntry *awsPublicCert) error { + if certName == "" { + return fmt.Errorf("missing certificate name") + } + + if certEntry == nil { + return fmt.Errorf("nil AWS public key certificate") + } + + b.configMutex.Lock() + defer b.configMutex.Unlock() + + return b.nonLockedSetAWSPublicCertificateEntry(ctx, s, certName, certEntry) +} + +// nonLockedSetAWSPublicCertificateEntry is used to store the AWS public key in +// the storage. This method does not acquire lock before reading the storage. +// If locking is desired, use lockedSetAWSPublicCertificateEntry instead. +func (b *backend) nonLockedSetAWSPublicCertificateEntry(ctx context.Context, s logical.Storage, certName string, certEntry *awsPublicCert) error { + if certName == "" { + return fmt.Errorf("missing certificate name") + } + + if certEntry == nil { + return fmt.Errorf("nil AWS public key certificate") + } + + entry, err := logical.StorageEntryJSON("config/certificate/"+certName, certEntry) + if err != nil { + return err + } + if entry == nil { + return fmt.Errorf("failed to create storage entry for AWS public key certificate") + } + + return s.Put(ctx, entry) +} + +// lockedAWSPublicCertificateEntry is used to get the configured AWS Public Key +// that is used to verify the PKCS#7 signature of the instance identity +// document. +func (b *backend) lockedAWSPublicCertificateEntry(ctx context.Context, s logical.Storage, certName string) (*awsPublicCert, error) { + b.configMutex.RLock() + defer b.configMutex.RUnlock() + + return b.nonLockedAWSPublicCertificateEntry(ctx, s, certName) +} + +// nonLockedAWSPublicCertificateEntry reads the certificate information from +// the storage. This method does not acquire lock before reading the storage. +// If locking is desired, use lockedAWSPublicCertificateEntry instead. +func (b *backend) nonLockedAWSPublicCertificateEntry(ctx context.Context, s logical.Storage, certName string) (*awsPublicCert, error) { + entry, err := s.Get(ctx, "config/certificate/"+certName) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + var certEntry awsPublicCert + if err := entry.DecodeJSON(&certEntry); err != nil { + return nil, err + } + + // Handle upgrade for certificate type + persistNeeded := false + if certEntry.Type == "" { + certEntry.Type = "pkcs7" + persistNeeded = true + } + + if persistNeeded { + if err := b.nonLockedSetAWSPublicCertificateEntry(ctx, s, certName, &certEntry); err != nil { + return nil, err + } + } + + return &certEntry, nil +} + +// pathConfigCertificateDelete is used to delete the previously configured AWS +// Public Key that is used to verify the PKCS#7 signature of the instance +// identity document. +func (b *backend) pathConfigCertificateDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + b.configMutex.Lock() + defer b.configMutex.Unlock() + + certName := data.Get("cert_name").(string) + if certName == "" { + return logical.ErrorResponse("missing cert_name"), nil + } + + return nil, req.Storage.Delete(ctx, "config/certificate/"+certName) +} + +// pathConfigCertificateRead is used to view the configured AWS Public Key that +// is used to verify the PKCS#7 signature of the instance identity document. +func (b *backend) pathConfigCertificateRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + certName := data.Get("cert_name").(string) + if certName == "" { + return logical.ErrorResponse("missing cert_name"), nil + } + + certificateEntry, err := b.lockedAWSPublicCertificateEntry(ctx, req.Storage, certName) + if err != nil { + return nil, err + } + if certificateEntry == nil { + return nil, nil + } + + return &logical.Response{ + Data: map[string]interface{}{ + "aws_public_cert": certificateEntry.AWSPublicCert, + "type": certificateEntry.Type, + }, + }, nil +} + +// pathConfigCertificateCreateUpdate is used to register an AWS Public Key that +// is used to verify the PKCS#7 signature of the instance identity document. +func (b *backend) pathConfigCertificateCreateUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + certName := data.Get("cert_name").(string) + if certName == "" { + return logical.ErrorResponse("missing certificate name"), nil + } + + b.configMutex.Lock() + defer b.configMutex.Unlock() + + // Check if there is already a certificate entry registered + certEntry, err := b.nonLockedAWSPublicCertificateEntry(ctx, req.Storage, certName) + if err != nil { + return nil, err + } + if certEntry == nil { + certEntry = &awsPublicCert{} + } + + // Check if type information is provided + certTypeRaw, ok := data.GetOk("type") + if ok { + certEntry.Type = strings.ToLower(certTypeRaw.(string)) + } else if req.Operation == logical.CreateOperation { + certEntry.Type = data.Get("type").(string) + } + + switch certEntry.Type { + case "pkcs7": + case "identity": + default: + return logical.ErrorResponse(fmt.Sprintf("invalid certificate type %q", certEntry.Type)), nil + } + + // Check if the value is provided by the client + certStrData, ok := data.GetOk("aws_public_cert") + if ok { + if certBytes, err := base64.StdEncoding.DecodeString(certStrData.(string)); err == nil { + certEntry.AWSPublicCert = string(certBytes) + } else { + certEntry.AWSPublicCert = certStrData.(string) + } + } else { + // aws_public_cert should be supplied for both create and update operations. + // If it is not provided, throw an error. + return logical.ErrorResponse("missing aws_public_cert"), nil + } + + // If explicitly set to empty string, error out + if certEntry.AWSPublicCert == "" { + return logical.ErrorResponse("invalid aws_public_cert"), nil + } + + // Verify the certificate by decoding it and parsing it + publicCert, err := decodePEMAndParseCertificate(certEntry.AWSPublicCert) + if err != nil { + return nil, err + } + if publicCert == nil { + return logical.ErrorResponse("invalid certificate; failed to decode and parse certificate"), nil + } + + // If none of the checks fail, save the provided certificate + if err := b.nonLockedSetAWSPublicCertificateEntry(ctx, req.Storage, certName, certEntry); err != nil { + return nil, err + } + + return nil, nil +} + +// Struct awsPublicCert holds the AWS Public Key that is used to verify the PKCS#7 signature +// of the instance identity document. +type awsPublicCert struct { + AWSPublicCert string `json:"aws_public_cert"` + Type string `json:"type"` +} + +const pathConfigCertificateSyn = ` +Adds the AWS Public Key that is used to verify the PKCS#7 signature of the identity document. +` + +const pathConfigCertificateDesc = ` +AWS Public Key which is used to verify the PKCS#7 signature of the identity document, +varies by region. The public key(s) can be found in AWS EC2 instance metadata documentation. +The default key that is used to verify the signature is the one that is applicable for +following regions: US East (N. Virginia), US West (Oregon), US West (N. California), +EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Seoul), Asia Pacific (Singapore), +Asia Pacific (Sydney), and South America (Sao Paulo). + +If the instances belongs to region other than the above, the public key(s) for the +corresponding regions should be registered using this endpoint. PKCS#7 is verified +using a collection of certificates containing the default certificate and all the +certificates that are registered using this endpoint. +` + +const pathListCertificatesHelpSyn = ` +Lists all the AWS public certificates that are registered with the backend. +` + +const pathListCertificatesHelpDesc = ` +Certificates will be listed by their respective names that were used during registration. +` diff --git a/builtin/credential/aws/path_config_client.go b/builtin/credential/aws/path_config_client.go new file mode 100644 index 0000000..979fac1 --- /dev/null +++ b/builtin/credential/aws/path_config_client.go @@ -0,0 +1,395 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package awsauth + +import ( + "context" + "errors" + "net/http" + "net/textproto" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func (b *backend) pathConfigClient() *framework.Path { + return &framework.Path{ + Pattern: "config/client$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + }, + + Fields: map[string]*framework.FieldSchema{ + "access_key": { + Type: framework.TypeString, + Default: "", + Description: "AWS Access Key ID for the account used to make AWS API requests.", + }, + + "secret_key": { + Type: framework.TypeString, + Default: "", + Description: "AWS Secret Access Key for the account used to make AWS API requests.", + }, + + "endpoint": { + Type: framework.TypeString, + Default: "", + Description: "URL to override the default generated endpoint for making AWS EC2 API calls.", + }, + + "iam_endpoint": { + Type: framework.TypeString, + Default: "", + Description: "URL to override the default generated endpoint for making AWS IAM API calls.", + }, + + "sts_endpoint": { + Type: framework.TypeString, + Default: "", + Description: "URL to override the default generated endpoint for making AWS STS API calls.", + }, + + "sts_region": { + Type: framework.TypeString, + Default: "", + Description: "The region ID for the sts_endpoint, if set.", + }, + + "iam_server_id_header_value": { + Type: framework.TypeString, + Default: "", + Description: "Value to require in the X-Vault-AWS-IAM-Server-ID request header", + }, + + "allowed_sts_header_values": { + Type: framework.TypeCommaStringSlice, + Default: nil, + Description: "List of additional headers that are allowed to be in AWS STS request headers", + }, + + "max_retries": { + Type: framework.TypeInt, + Default: aws.UseServiceDefaultRetries, + Description: "Maximum number of retries for recoverable exceptions of AWS APIs", + }, + }, + + ExistenceCheck: b.pathConfigClientExistenceCheck, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.CreateOperation: &framework.PathOperation{ + Callback: b.pathConfigClientCreateUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "client", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigClientCreateUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "client", + }, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathConfigClientDelete, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "client-configuration", + }, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathConfigClientRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "client-configuration", + }, + }, + }, + + HelpSynopsis: pathConfigClientHelpSyn, + HelpDescription: pathConfigClientHelpDesc, + } +} + +// Establishes dichotomy of request operation between CreateOperation and UpdateOperation. +// Returning 'true' forces an UpdateOperation, CreateOperation otherwise. +func (b *backend) pathConfigClientExistenceCheck(ctx context.Context, req *logical.Request, data *framework.FieldData) (bool, error) { + entry, err := b.lockedClientConfigEntry(ctx, req.Storage) + if err != nil { + return false, err + } + return entry != nil, nil +} + +// Fetch the client configuration required to access the AWS API, after acquiring an exclusive lock. +func (b *backend) lockedClientConfigEntry(ctx context.Context, s logical.Storage) (*clientConfig, error) { + b.configMutex.RLock() + defer b.configMutex.RUnlock() + + return b.nonLockedClientConfigEntry(ctx, s) +} + +// Fetch the client configuration required to access the AWS API. +func (b *backend) nonLockedClientConfigEntry(ctx context.Context, s logical.Storage) (*clientConfig, error) { + entry, err := s.Get(ctx, "config/client") + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result clientConfig + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + return &result, nil +} + +func (b *backend) pathConfigClientRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + clientConfig, err := b.lockedClientConfigEntry(ctx, req.Storage) + if err != nil { + return nil, err + } + + if clientConfig == nil { + return nil, nil + } + + return &logical.Response{ + Data: map[string]interface{}{ + "access_key": clientConfig.AccessKey, + "endpoint": clientConfig.Endpoint, + "iam_endpoint": clientConfig.IAMEndpoint, + "sts_endpoint": clientConfig.STSEndpoint, + "sts_region": clientConfig.STSRegion, + "iam_server_id_header_value": clientConfig.IAMServerIdHeaderValue, + "max_retries": clientConfig.MaxRetries, + "allowed_sts_header_values": clientConfig.AllowedSTSHeaderValues, + }, + }, nil +} + +func (b *backend) pathConfigClientDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + b.configMutex.Lock() + defer b.configMutex.Unlock() + + if err := req.Storage.Delete(ctx, "config/client"); err != nil { + return nil, err + } + + // Remove all the cached EC2 client objects in the backend. + b.flushCachedEC2Clients() + + // Remove all the cached EC2 client objects in the backend. + b.flushCachedIAMClients() + + // unset the cached default AWS account ID + b.defaultAWSAccountID = "" + + return nil, nil +} + +// pathConfigClientCreateUpdate is used to register the 'aws_secret_key' and 'aws_access_key' +// that can be used to interact with AWS EC2 API. +func (b *backend) pathConfigClientCreateUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + b.configMutex.Lock() + defer b.configMutex.Unlock() + + configEntry, err := b.nonLockedClientConfigEntry(ctx, req.Storage) + if err != nil { + return nil, err + } + if configEntry == nil { + configEntry = &clientConfig{} + } + + // changedCreds is whether we need to flush the cached AWS clients and store in the backend + changedCreds := false + // changedOtherConfig is whether other config has changed that requires storing in the backend + // but does not require flushing the cached clients + changedOtherConfig := false + + accessKeyStr, ok := data.GetOk("access_key") + if ok { + if configEntry.AccessKey != accessKeyStr.(string) { + changedCreds = true + configEntry.AccessKey = accessKeyStr.(string) + } + } else if req.Operation == logical.CreateOperation { + // Use the default + configEntry.AccessKey = data.Get("access_key").(string) + } + + secretKeyStr, ok := data.GetOk("secret_key") + if ok { + if configEntry.SecretKey != secretKeyStr.(string) { + changedCreds = true + configEntry.SecretKey = secretKeyStr.(string) + } + } else if req.Operation == logical.CreateOperation { + configEntry.SecretKey = data.Get("secret_key").(string) + } + + endpointStr, ok := data.GetOk("endpoint") + if ok { + if configEntry.Endpoint != endpointStr.(string) { + changedCreds = true + configEntry.Endpoint = endpointStr.(string) + } + } else if req.Operation == logical.CreateOperation { + configEntry.Endpoint = data.Get("endpoint").(string) + } + + iamEndpointStr, ok := data.GetOk("iam_endpoint") + if ok { + if configEntry.IAMEndpoint != iamEndpointStr.(string) { + changedCreds = true + configEntry.IAMEndpoint = iamEndpointStr.(string) + } + } else if req.Operation == logical.CreateOperation { + configEntry.IAMEndpoint = data.Get("iam_endpoint").(string) + } + + stsEndpointStr, ok := data.GetOk("sts_endpoint") + if ok { + if configEntry.STSEndpoint != stsEndpointStr.(string) { + // We don't directly cache STS clients as they are never directly used. + // However, they are potentially indirectly used as credential providers + // for the EC2 and IAM clients, and thus we would be indirectly caching + // them there. So, if we change the STS endpoint, we should flush those + // cached clients. + changedCreds = true + configEntry.STSEndpoint = stsEndpointStr.(string) + } + } else if req.Operation == logical.CreateOperation { + configEntry.STSEndpoint = data.Get("sts_endpoint").(string) + } + + stsRegionStr, ok := data.GetOk("sts_region") + if ok { + if configEntry.STSRegion != stsRegionStr.(string) { + // Region is used when building STS clients. As such, all the comments + // regarding the sts_endpoint changing apply here as well. + changedCreds = true + configEntry.STSRegion = stsRegionStr.(string) + } + } + + headerValStr, ok := data.GetOk("iam_server_id_header_value") + if ok { + if configEntry.IAMServerIdHeaderValue != headerValStr.(string) { + // NOT setting changedCreds here, since this isn't really cached + configEntry.IAMServerIdHeaderValue = headerValStr.(string) + changedOtherConfig = true + } + } else if req.Operation == logical.CreateOperation { + configEntry.IAMServerIdHeaderValue = data.Get("iam_server_id_header_value").(string) + } + + aHeadersValStr, ok := data.GetOk("allowed_sts_header_values") + if ok { + aHeadersValSl := aHeadersValStr.([]string) + for i, v := range aHeadersValSl { + aHeadersValSl[i] = textproto.CanonicalMIMEHeaderKey(v) + } + if !strutil.EquivalentSlices(configEntry.AllowedSTSHeaderValues, aHeadersValSl) { + // NOT setting changedCreds here, since this isn't really cached + configEntry.AllowedSTSHeaderValues = aHeadersValSl + changedOtherConfig = true + } + } else if req.Operation == logical.CreateOperation { + ah, ok := data.GetOk("allowed_sts_header_values") + if ok { + configEntry.AllowedSTSHeaderValues = ah.([]string) + } + } + + maxRetriesInt, ok := data.GetOk("max_retries") + if ok { + configEntry.MaxRetries = maxRetriesInt.(int) + changedOtherConfig = true + } else if req.Operation == logical.CreateOperation { + configEntry.MaxRetries = data.Get("max_retries").(int) + } + + // Since this endpoint supports both create operation and update operation, + // the error checks for access_key and secret_key not being set are not present. + // This allows calling this endpoint multiple times to provide the values. + // Hence, the readers of this endpoint should do the validation on + // the validation of keys before using them. + entry, err := b.configClientToEntry(configEntry) + if err != nil { + return nil, err + } + + if changedCreds || changedOtherConfig || req.Operation == logical.CreateOperation { + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + } + + if changedCreds { + b.flushCachedEC2Clients() + b.flushCachedIAMClients() + b.defaultAWSAccountID = "" + } + + return nil, nil +} + +// configClientToEntry allows the client config code to encapsulate its +// knowledge about where its config is stored. It also provides a way +// for other endpoints to update the config properly. +func (b *backend) configClientToEntry(conf *clientConfig) (*logical.StorageEntry, error) { + entry, err := logical.StorageEntryJSON("config/client", conf) + if err != nil { + return nil, err + } + return entry, nil +} + +// Struct to hold 'aws_access_key' and 'aws_secret_key' that are required to +// interact with the AWS EC2 API. +type clientConfig struct { + AccessKey string `json:"access_key"` + SecretKey string `json:"secret_key"` + Endpoint string `json:"endpoint"` + IAMEndpoint string `json:"iam_endpoint"` + STSEndpoint string `json:"sts_endpoint"` + STSRegion string `json:"sts_region"` + IAMServerIdHeaderValue string `json:"iam_server_id_header_value"` + AllowedSTSHeaderValues []string `json:"allowed_sts_header_values"` + MaxRetries int `json:"max_retries"` +} + +func (c *clientConfig) validateAllowedSTSHeaderValues(headers http.Header) error { + for k := range headers { + h := textproto.CanonicalMIMEHeaderKey(k) + if strings.HasPrefix(h, amzHeaderPrefix) && + !strutil.StrListContains(defaultAllowedSTSRequestHeaders, h) && + !strutil.StrListContains(c.AllowedSTSHeaderValues, h) { + return errors.New("invalid request header: " + k) + } + } + return nil +} + +const pathConfigClientHelpSyn = ` +Configure AWS IAM credentials that are used to query instance and role details from the AWS API. +` + +const pathConfigClientHelpDesc = ` +The aws-ec2 auth method makes AWS API queries to retrieve information +regarding EC2 instances that perform login operations. The 'aws_secret_key' and +'aws_access_key' parameters configured here should map to an AWS IAM user that +has permission to make the following API queries: + +* ec2:DescribeInstances +* iam:GetInstanceProfile (if IAM Role binding is used) +` diff --git a/builtin/credential/aws/path_config_client_test.go b/builtin/credential/aws/path_config_client_test.go new file mode 100644 index 0000000..4c807d1 --- /dev/null +++ b/builtin/credential/aws/path_config_client_test.go @@ -0,0 +1,132 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package awsauth + +import ( + "context" + "testing" + + "github.com/hashicorp/vault/sdk/logical" +) + +func TestBackend_pathConfigClient(t *testing.T) { + config := logical.TestBackendConfig() + storage := &logical.InmemStorage{} + config.StorageView = storage + + b, err := Backend(config) + if err != nil { + t.Fatal(err) + } + + err = b.Setup(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + // make sure we start with empty roles, which gives us confidence that the read later + // actually is the two roles we created + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ReadOperation, + Path: "config/client", + Storage: storage, + }) + if err != nil { + t.Fatal(err) + } + // at this point, resp == nil is valid as no client config exists + // if resp != nil, then resp.Data must have EndPoint and IAMServerIdHeaderValue as nil + if resp != nil { + if resp.IsError() { + t.Fatalf("failed to read client config entry") + } else if resp.Data["endpoint"] != nil || resp.Data["iam_server_id_header_value"] != nil { + t.Fatalf("returned endpoint or iam_server_id_header_value non-nil") + } + } + + data := map[string]interface{}{ + "sts_endpoint": "https://my-custom-sts-endpoint.example.com", + "sts_region": "us-east-2", + "iam_server_id_header_value": "vault_server_identification_314159", + } + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.CreateOperation, + Path: "config/client", + Data: data, + Storage: storage, + }) + if err != nil { + t.Fatal(err) + } + if resp != nil && resp.IsError() { + t.Fatal("failed to create the client config entry") + } + + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ReadOperation, + Path: "config/client", + Storage: storage, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.IsError() { + t.Fatal("failed to read the client config entry") + } + if resp.Data["iam_server_id_header_value"] != data["iam_server_id_header_value"] { + t.Fatalf("expected iam_server_id_header_value: '%#v'; returned iam_server_id_header_value: '%#v'", + data["iam_server_id_header_value"], resp.Data["iam_server_id_header_value"]) + } + if resp.Data["sts_endpoint"] != data["sts_endpoint"] { + t.Fatalf("expected sts_endpoint: '%#v'; returned sts_endpoint: '%#v'", + data["sts_endpoint"], resp.Data["sts_endpoint"]) + } + if resp.Data["sts_region"] != data["sts_region"] { + t.Fatalf("expected sts_region: '%#v'; returned sts_region: '%#v'", + data["sts_region"], resp.Data["sts_region"]) + } + + data = map[string]interface{}{ + "sts_endpoint": "https://my-custom-sts-endpoint2.example.com", + "sts_region": "us-west-1", + "iam_server_id_header_value": "vault_server_identification_2718281", + } + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/client", + Data: data, + Storage: storage, + }) + + if err != nil { + t.Fatal(err) + } + if resp != nil && resp.IsError() { + t.Fatal("failed to update the client config entry") + } + + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ReadOperation, + Path: "config/client", + Storage: storage, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.IsError() { + t.Fatal("failed to read the client config entry") + } + if resp.Data["iam_server_id_header_value"] != data["iam_server_id_header_value"] { + t.Fatalf("expected iam_server_id_header_value: '%#v'; returned iam_server_id_header_value: '%#v'", + data["iam_server_id_header_value"], resp.Data["iam_server_id_header_value"]) + } + if resp.Data["sts_endpoint"] != data["sts_endpoint"] { + t.Fatalf("expected sts_endpoint: '%#v'; returned sts_endpoint: '%#v'", + data["sts_endpoint"], resp.Data["sts_endpoint"]) + } + if resp.Data["sts_region"] != data["sts_region"] { + t.Fatalf("expected sts_region: '%#v'; returned sts_region: '%#v'", + data["sts_region"], resp.Data["sts_region"]) + } +} diff --git a/builtin/credential/aws/path_config_identity.go b/builtin/credential/aws/path_config_identity.go new file mode 100644 index 0000000..2512c9d --- /dev/null +++ b/builtin/credential/aws/path_config_identity.go @@ -0,0 +1,221 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package awsauth + +import ( + "context" + "fmt" + + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/authmetadata" + "github.com/hashicorp/vault/sdk/logical" +) + +var ( + // iamAuthMetadataFields is a list of the default auth metadata + // added to tokens during login. The default alias type used + // by this back-end is the role ID. Subsequently, the default + // fields included are expected to have a low rate of change + // when the role ID is in use. + iamAuthMetadataFields = &authmetadata.Fields{ + FieldName: "iam_metadata", + Default: []string{ + "account_id", + "auth_type", + }, + AvailableToAdd: []string{ + "canonical_arn", + "client_arn", + "client_user_id", + "inferred_aws_region", + "inferred_entity_id", + "inferred_entity_type", + }, + } + + // ec2AuthMetadataFields is a list of the default auth metadata + // added to tokens during login. The default alias type used + // by this back-end is the role ID. Subsequently, the default + // fields included are expected to have a low rate of change + // when the role ID is in use. + ec2AuthMetadataFields = &authmetadata.Fields{ + FieldName: "ec2_metadata", + Default: []string{ + "account_id", + "auth_type", + }, + AvailableToAdd: []string{ + "ami_id", + "instance_id", + "region", + }, + } +) + +func (b *backend) pathConfigIdentity() *framework.Path { + return &framework.Path{ + Pattern: "config/identity$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + }, + + Fields: map[string]*framework.FieldSchema{ + "iam_alias": { + Type: framework.TypeString, + Default: identityAliasIAMUniqueID, + Description: fmt.Sprintf("Configure how the AWS auth method generates entity aliases when using IAM auth. Valid values are %q, %q, and %q. Defaults to %q.", identityAliasRoleID, identityAliasIAMUniqueID, identityAliasIAMFullArn, identityAliasRoleID), + }, + iamAuthMetadataFields.FieldName: authmetadata.FieldSchema(iamAuthMetadataFields), + "ec2_alias": { + Type: framework.TypeString, + Default: identityAliasEC2InstanceID, + Description: fmt.Sprintf("Configure how the AWS auth method generates entity alias when using EC2 auth. Valid values are %q, %q, and %q. Defaults to %q.", identityAliasRoleID, identityAliasEC2InstanceID, identityAliasEC2ImageID, identityAliasRoleID), + }, + ec2AuthMetadataFields.FieldName: authmetadata.FieldSchema(ec2AuthMetadataFields), + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: pathConfigIdentityRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "identity-integration-configuration", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: pathConfigIdentityUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "identity-integration", + }, + }, + }, + + HelpSynopsis: pathConfigIdentityHelpSyn, + HelpDescription: pathConfigIdentityHelpDesc, + } +} + +func identityConfigEntry(ctx context.Context, s logical.Storage) (*identityConfig, error) { + entryRaw, err := s.Get(ctx, "config/identity") + if err != nil { + return nil, err + } + + entry := &identityConfig{ + IAMAuthMetadataHandler: authmetadata.NewHandler(iamAuthMetadataFields), + EC2AuthMetadataHandler: authmetadata.NewHandler(ec2AuthMetadataFields), + } + if entryRaw != nil { + if err := entryRaw.DecodeJSON(entry); err != nil { + return nil, err + } + } + + if entry.IAMAlias == "" { + entry.IAMAlias = identityAliasRoleID + } + + if entry.EC2Alias == "" { + entry.EC2Alias = identityAliasRoleID + } + + return entry, nil +} + +func pathConfigIdentityRead(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { + config, err := identityConfigEntry(ctx, req.Storage) + if err != nil { + return nil, err + } + + return &logical.Response{ + Data: map[string]interface{}{ + "iam_alias": config.IAMAlias, + iamAuthMetadataFields.FieldName: config.IAMAuthMetadataHandler.AuthMetadata(), + "ec2_alias": config.EC2Alias, + ec2AuthMetadataFields.FieldName: config.EC2AuthMetadataHandler.AuthMetadata(), + }, + }, nil +} + +func pathConfigIdentityUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + config, err := identityConfigEntry(ctx, req.Storage) + if err != nil { + return nil, err + } + + iamAliasRaw, ok := data.GetOk("iam_alias") + if ok { + iamAlias := iamAliasRaw.(string) + allowedIAMAliasValues := []string{identityAliasRoleID, identityAliasIAMUniqueID, identityAliasIAMFullArn} + if !strutil.StrListContains(allowedIAMAliasValues, iamAlias) { + return logical.ErrorResponse(fmt.Sprintf("iam_alias of %q not in set of allowed values: %v", iamAlias, allowedIAMAliasValues)), nil + } + config.IAMAlias = iamAlias + } + + ec2AliasRaw, ok := data.GetOk("ec2_alias") + if ok { + ec2Alias := ec2AliasRaw.(string) + allowedEC2AliasValues := []string{identityAliasRoleID, identityAliasEC2InstanceID, identityAliasEC2ImageID} + if !strutil.StrListContains(allowedEC2AliasValues, ec2Alias) { + return logical.ErrorResponse(fmt.Sprintf("ec2_alias of %q not in set of allowed values: %v", ec2Alias, allowedEC2AliasValues)), nil + } + config.EC2Alias = ec2Alias + } + if err := config.IAMAuthMetadataHandler.ParseAuthMetadata(data); err != nil { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } + if err := config.EC2AuthMetadataHandler.ParseAuthMetadata(data); err != nil { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } + + entry, err := logical.StorageEntryJSON("config/identity", config) + if err != nil { + return nil, err + } + + err = req.Storage.Put(ctx, entry) + if err != nil { + return nil, err + } + + return nil, nil +} + +type identityConfig struct { + IAMAlias string `json:"iam_alias"` + IAMAuthMetadataHandler *authmetadata.Handler `json:"iam_auth_metadata_handler"` + EC2Alias string `json:"ec2_alias"` + EC2AuthMetadataHandler *authmetadata.Handler `json:"ec2_auth_metadata_handler"` +} + +const ( + identityAliasIAMUniqueID = "unique_id" + identityAliasIAMFullArn = "full_arn" + identityAliasEC2InstanceID = "instance_id" + identityAliasEC2ImageID = "image_id" + identityAliasRoleID = "role_id" +) + +const pathConfigIdentityHelpSyn = ` +Configure the way the AWS auth method interacts with the identity store +` + +const pathConfigIdentityHelpDesc = ` +The AWS auth backend defaults to aliasing an IAM principal's unique ID to the +identity store. This path allows users to change how Vault configures the +mapping to Identity aliases for more flexibility. + +You can set the iam_alias parameter to one of the following values: + +* 'unique_id': This retains Vault's default behavior +* 'full_arn': This maps the full authenticated ARN to the identity alias, e.g., + "arn:aws:sts:::assumed-role// + This is useful where you have an identity provder that sets role_session_name + to a known value of a person, such as a username or email address, and allows + you to map those roles back to entries in your identity store. +` diff --git a/builtin/credential/aws/path_config_identity_test.go b/builtin/credential/aws/path_config_identity_test.go new file mode 100644 index 0000000..8a7db09 --- /dev/null +++ b/builtin/credential/aws/path_config_identity_test.go @@ -0,0 +1,167 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package awsauth + +import ( + "context" + "testing" + + "github.com/hashicorp/vault/sdk/logical" +) + +func TestBackend_pathConfigIdentity(t *testing.T) { + config := logical.TestBackendConfig() + storage := &logical.InmemStorage{} + config.StorageView = storage + + b, err := Backend(config) + if err != nil { + t.Fatal(err) + } + + err = b.Setup(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + // Check if default values are returned before setting the configuration + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ReadOperation, + Path: "config/identity", + Storage: storage, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } + if resp.Data["iam_alias"] == nil || resp.Data["iam_alias"] != identityAliasRoleID { + t.Fatalf("bad: iam_alias; expected: %q, actual: %q", identityAliasIAMUniqueID, resp.Data["iam_alias"]) + } + if resp.Data["ec2_alias"] == nil || resp.Data["ec2_alias"] != identityAliasRoleID { + t.Fatalf("bad: ec2_alias; expected: %q, actual: %q", identityAliasIAMUniqueID, resp.Data["ec2_alias"]) + } + + // Invalid value for iam_alias + data := map[string]interface{}{ + "iam_alias": "invalid", + } + + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/identity", + Data: data, + Storage: storage, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatalf("nil response from invalid config/identity request") + } + if !resp.IsError() { + t.Fatalf("received non-error response from invalid config/identity request: %#v", resp) + } + + // Valid value for iam_alias but invalid value for ec2_alias + data["iam_alias"] = identityAliasIAMFullArn + data["ec2_alias"] = "invalid" + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/identity", + Data: data, + Storage: storage, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatalf("nil response from invalid config/identity request") + } + if !resp.IsError() { + t.Fatalf("received non-error response from invalid config/identity request: %#v", resp) + } + + // Valid value for both iam_alias and ec2_alias + data["ec2_alias"] = identityAliasEC2ImageID + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/identity", + Data: data, + Storage: storage, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } + + // Check if both values are stored properly + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ReadOperation, + Path: "config/identity", + Storage: storage, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } + if resp.Data["iam_alias"] != identityAliasIAMFullArn { + t.Fatalf("bad: expected response with iam_alias value of %q; got %#v", identityAliasIAMFullArn, resp.Data["iam_alias"]) + } + if resp.Data["ec2_alias"] != identityAliasEC2ImageID { + t.Fatalf("bad: expected response with ec2_alias value of %q; got %#v", identityAliasEC2ImageID, resp.Data["ec2_alias"]) + } + + // Modify one field and ensure that the other one is unchanged + data["ec2_alias"] = identityAliasEC2InstanceID + delete(data, "iam_alias") + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/identity", + Data: data, + Storage: storage, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ReadOperation, + Path: "config/identity", + Storage: storage, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } + if resp.Data["iam_alias"] != identityAliasIAMFullArn { + t.Fatalf("bad: expected response with iam_alias value of %q; got %#v", identityAliasIAMFullArn, resp.Data["iam_alias"]) + } + if resp.Data["ec2_alias"] != identityAliasEC2InstanceID { + t.Fatalf("bad: expected response with ec2_alias value of %q; got %#v", identityAliasEC2ImageID, resp.Data["ec2_alias"]) + } + + // Update both iam_alias and ec2_alias + data["iam_alias"] = identityAliasIAMUniqueID + data["ec2_alias"] = identityAliasEC2InstanceID + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/identity", + Data: data, + Storage: storage, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } + + // Check if updates were stored properly + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ReadOperation, + Path: "config/identity", + Storage: storage, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } + if resp.Data["iam_alias"] != identityAliasIAMUniqueID { + t.Fatalf("bad: expected response with iam_alias value of %q; got %#v", identityAliasIAMFullArn, resp.Data["iam_alias"]) + } + if resp.Data["ec2_alias"] != identityAliasEC2InstanceID { + t.Fatalf("bad: expected response with ec2_alias value of %q; got %#v", identityAliasEC2ImageID, resp.Data["ec2_alias"]) + } +} diff --git a/builtin/credential/aws/path_config_rotate_root.go b/builtin/credential/aws/path_config_rotate_root.go new file mode 100644 index 0000000..141b7e2 --- /dev/null +++ b/builtin/credential/aws/path_config_rotate_root.go @@ -0,0 +1,218 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package awsauth + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/aws/aws-sdk-go/service/iam/iamiface" + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-secure-stdlib/awsutil" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func (b *backend) pathConfigRotateRoot() *framework.Path { + return &framework.Path{ + Pattern: "config/rotate-root", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationVerb: "rotate", + OperationSuffix: "root-credentials", + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigRotateRootUpdate, + }, + }, + + HelpSynopsis: pathConfigRotateRootHelpSyn, + HelpDescription: pathConfigRotateRootHelpDesc, + } +} + +func (b *backend) pathConfigRotateRootUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // First get the AWS key and secret and validate that we _can_ rotate them. + // We need the read lock here to prevent anything else from mutating it while we're using it. + b.configMutex.Lock() + defer b.configMutex.Unlock() + + clientConf, err := b.nonLockedClientConfigEntry(ctx, req.Storage) + if err != nil { + return nil, err + } + if clientConf == nil { + return logical.ErrorResponse(`can't update client config because it's unset`), nil + } + if clientConf.AccessKey == "" { + return logical.ErrorResponse("can't update access_key because it's unset"), nil + } + if clientConf.SecretKey == "" { + return logical.ErrorResponse("can't update secret_key because it's unset"), nil + } + + // Getting our client through the b.clientIAM method requires values retrieved through + // the user providing an ARN, which we don't have here, so let's just directly + // make what we need. + staticCreds := &credentials.StaticProvider{ + Value: credentials.Value{ + AccessKeyID: clientConf.AccessKey, + SecretAccessKey: clientConf.SecretKey, + }, + } + // By default, leave the iamEndpoint nil to tell AWS it's unset. However, if it is + // configured, populate the pointer. + var iamEndpoint *string + if clientConf.IAMEndpoint != "" { + iamEndpoint = aws.String(clientConf.IAMEndpoint) + } + + // Attempt to retrieve the region, error out if no region is provided. + region, err := awsutil.GetRegion("") + if err != nil { + return nil, fmt.Errorf("error retrieving region: %w", err) + } + + awsConfig := &aws.Config{ + Credentials: credentials.NewCredentials(staticCreds), + Endpoint: iamEndpoint, + + // Generally speaking, GetRegion will use the Vault server's region. However, if this + // needs to be overridden, an easy way would be to set the AWS_DEFAULT_REGION on the Vault server + // to the desired region. If that's still insufficient for someone's use case, in the future we + // could add the ability to specify the region either on the client config or as part of the + // inbound rotation call. + Region: aws.String(region), + + // Prevents races. + HTTPClient: cleanhttp.DefaultClient(), + } + sess, err := session.NewSession(awsConfig) + if err != nil { + return nil, err + } + iamClient := getIAMClient(sess) + + // Get the current user's name since it's required to create an access key. + // Empty input means get the current user. + var getUserInput iam.GetUserInput + getUserRes, err := iamClient.GetUserWithContext(ctx, &getUserInput) + if err != nil { + return nil, fmt.Errorf("error calling GetUser: %w", err) + } + if getUserRes == nil { + return nil, fmt.Errorf("nil response from GetUser") + } + if getUserRes.User == nil { + return nil, fmt.Errorf("nil user returned from GetUser") + } + if getUserRes.User.UserName == nil { + return nil, fmt.Errorf("nil UserName returned from GetUser") + } + + // Create the new access key and secret. + createAccessKeyInput := iam.CreateAccessKeyInput{ + UserName: getUserRes.User.UserName, + } + createAccessKeyRes, err := iamClient.CreateAccessKeyWithContext(ctx, &createAccessKeyInput) + if err != nil { + return nil, fmt.Errorf("error calling CreateAccessKey: %w", err) + } + if createAccessKeyRes.AccessKey == nil { + return nil, fmt.Errorf("nil response from CreateAccessKey") + } + if createAccessKeyRes.AccessKey.AccessKeyId == nil || createAccessKeyRes.AccessKey.SecretAccessKey == nil { + return nil, fmt.Errorf("nil AccessKeyId or SecretAccessKey returned from CreateAccessKey") + } + + // We're about to attempt to store the newly created key and secret, but just in case we can't, + // let's clean up after ourselves. + storedNewConf := false + var errs error + defer func() { + if storedNewConf { + return + } + // Attempt to delete the access key and secret we created but couldn't store and use. + deleteAccessKeyInput := iam.DeleteAccessKeyInput{ + AccessKeyId: createAccessKeyRes.AccessKey.AccessKeyId, + UserName: getUserRes.User.UserName, + } + if _, err := iamClient.DeleteAccessKeyWithContext(ctx, &deleteAccessKeyInput); err != nil { + // Include this error in the errs returned by this method. + errs = multierror.Append(errs, fmt.Errorf("error deleting newly created but unstored access key ID %s: %s", *createAccessKeyRes.AccessKey.AccessKeyId, err)) + } + }() + + oldAccessKey := clientConf.AccessKey + clientConf.AccessKey = *createAccessKeyRes.AccessKey.AccessKeyId + clientConf.SecretKey = *createAccessKeyRes.AccessKey.SecretAccessKey + + // Now get ready to update storage, doing everything beforehand so we can minimize how long + // we need to hold onto the lock. + newEntry, err := b.configClientToEntry(clientConf) + if err != nil { + errs = multierror.Append(errs, fmt.Errorf("error generating new client config JSON: %w", err)) + return nil, errs + } + + // Someday we may want to allow the user to send a number of seconds to wait here + // before deleting the previous access key to allow work to complete. That would allow + // AWS, which is eventually consistent, to finish populating the new key in all places. + if err := req.Storage.Put(ctx, newEntry); err != nil { + errs = multierror.Append(errs, fmt.Errorf("error saving new client config: %w", err)) + return nil, errs + } + storedNewConf = true + + // Previous cached clients need to be cleared because they may have been made using + // the soon-to-be-obsolete credentials. + b.IAMClientsMap = make(map[string]map[string]*iam.IAM) + b.EC2ClientsMap = make(map[string]map[string]*ec2.EC2) + + // Now to clean up the old key. + deleteAccessKeyInput := iam.DeleteAccessKeyInput{ + AccessKeyId: aws.String(oldAccessKey), + UserName: getUserRes.User.UserName, + } + if _, err = iamClient.DeleteAccessKeyWithContext(ctx, &deleteAccessKeyInput); err != nil { + errs = multierror.Append(errs, fmt.Errorf("error deleting old access key ID %s: %w", oldAccessKey, err)) + return nil, errs + } + return &logical.Response{ + Data: map[string]interface{}{ + "access_key": clientConf.AccessKey, + }, + }, nil +} + +// getIAMClient allows us to change how an IAM client is created +// during testing. The AWS SDK doesn't easily lend itself to testing +// using a Go httptest server because if you inject a test URL into +// the config, the client strips important information about which +// endpoint it's hitting. Per +// https://aws.amazon.com/blogs/developer/mocking-out-then-aws-sdk-for-go-for-unit-testing/, +// this is the recommended approach. +var getIAMClient = func(sess *session.Session) iamiface.IAMAPI { + return iam.New(sess) +} + +const pathConfigRotateRootHelpSyn = ` +Request to rotate the AWS credentials used by Vault +` + +const pathConfigRotateRootHelpDesc = ` +This path attempts to rotate the AWS credentials used by Vault for this mount. +It is only valid if Vault has been configured to use AWS IAM credentials via the +config/client endpoint. +` diff --git a/builtin/credential/aws/path_config_rotate_root_test.go b/builtin/credential/aws/path_config_rotate_root_test.go new file mode 100644 index 0000000..3fe5b29 --- /dev/null +++ b/builtin/credential/aws/path_config_rotate_root_test.go @@ -0,0 +1,98 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package awsauth + +import ( + "context" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/aws/aws-sdk-go/service/iam/iamiface" + "github.com/hashicorp/go-secure-stdlib/awsutil" + "github.com/hashicorp/vault/sdk/logical" +) + +type mockIAMClient awsutil.MockIAM + +func (m *mockIAMClient) GetUserWithContext(_ aws.Context, input *iam.GetUserInput, _ ...request.Option) (*iam.GetUserOutput, error) { + return (*awsutil.MockIAM)(m).GetUser(input) +} + +func (m *mockIAMClient) CreateAccessKeyWithContext(_ aws.Context, input *iam.CreateAccessKeyInput, _ ...request.Option) (*iam.CreateAccessKeyOutput, error) { + return (*awsutil.MockIAM)(m).CreateAccessKey(input) +} + +func (m *mockIAMClient) DeleteAccessKeyWithContext(_ aws.Context, input *iam.DeleteAccessKeyInput, _ ...request.Option) (*iam.DeleteAccessKeyOutput, error) { + return (*awsutil.MockIAM)(m).DeleteAccessKey(input) +} + +func TestPathConfigRotateRoot(t *testing.T) { + getIAMClient = func(sess *session.Session) iamiface.IAMAPI { + return &mockIAMClient{ + CreateAccessKeyOutput: &iam.CreateAccessKeyOutput{ + AccessKey: &iam.AccessKey{ + AccessKeyId: aws.String("fizz2"), + SecretAccessKey: aws.String("buzz2"), + }, + }, + GetUserOutput: &iam.GetUserOutput{ + User: &iam.User{ + UserName: aws.String("ellen"), + }, + }, + } + } + + ctx := context.Background() + config := logical.TestBackendConfig() + logical.TestBackendConfig() + storage := &logical.InmemStorage{} + config.StorageView = storage + + b, err := Backend(config) + if err != nil { + t.Fatal(err) + } + + clientConf := &clientConfig{ + AccessKey: "fizz1", + SecretKey: "buzz1", + } + entry, err := logical.StorageEntryJSON("config/client", clientConf) + if err != nil { + t.Fatal(err) + } + if err := storage.Put(ctx, entry); err != nil { + t.Fatal(err) + } + + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/rotate-root", + Storage: storage, + } + resp, err := b.HandleRequest(ctx, req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr:%v", resp, err) + } + if resp == nil { + t.Fatal("expected nil response to represent a 204") + } + if resp.Data == nil { + t.Fatal("expected resp.Data") + } + if resp.Data["access_key"].(string) != "fizz2" { + t.Fatalf("expected new access key buzz2 but received %s", resp.Data["access_key"]) + } + newClientConf, err := b.nonLockedClientConfigEntry(ctx, req.Storage) + if err != nil { + t.Fatal(err) + } + if resp.Data["access_key"].(string) != newClientConf.AccessKey { + t.Fatalf("expected new access key buzz2 to be saved to storage but receieved %s", clientConf.AccessKey) + } +} diff --git a/builtin/credential/aws/path_config_sts.go b/builtin/credential/aws/path_config_sts.go new file mode 100644 index 0000000..50d986d --- /dev/null +++ b/builtin/credential/aws/path_config_sts.go @@ -0,0 +1,274 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package awsauth + +import ( + "context" + "fmt" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +// awsStsEntry is used to store details of an STS role for assumption +type awsStsEntry struct { + StsRole string `json:"sts_role"` +} + +func (b *backend) pathListSts() *framework.Path { + return &framework.Path{ + Pattern: "config/sts/?", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "sts-role-relationships", + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ListOperation: &framework.PathOperation{ + Callback: b.pathStsList, + }, + }, + + HelpSynopsis: pathListStsHelpSyn, + HelpDescription: pathListStsHelpDesc, + } +} + +func (b *backend) pathConfigSts() *framework.Path { + return &framework.Path{ + Pattern: "config/sts/" + framework.GenericNameRegex("account_id"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "sts-role", + }, + + Fields: map[string]*framework.FieldSchema{ + "account_id": { + Type: framework.TypeString, + Description: `AWS account ID to be associated with STS role. If set, +Vault will use assumed credentials to verify any login attempts from EC2 +instances in this account.`, + }, + "sts_role": { + Type: framework.TypeString, + Description: `AWS ARN for STS role to be assumed when interacting with the account specified. +The Vault server must have permissions to assume this role.`, + }, + }, + + ExistenceCheck: b.pathConfigStsExistenceCheck, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.CreateOperation: &framework.PathOperation{ + Callback: b.pathConfigStsCreateUpdate, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigStsCreateUpdate, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathConfigStsRead, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathConfigStsDelete, + }, + }, + + HelpSynopsis: pathConfigStsSyn, + HelpDescription: pathConfigStsDesc, + } +} + +// Establishes dichotomy of request operation between CreateOperation and UpdateOperation. +// Returning 'true' forces an UpdateOperation, CreateOperation otherwise. +func (b *backend) pathConfigStsExistenceCheck(ctx context.Context, req *logical.Request, data *framework.FieldData) (bool, error) { + accountID := data.Get("account_id").(string) + if accountID == "" { + return false, fmt.Errorf("missing account_id") + } + + entry, err := b.lockedAwsStsEntry(ctx, req.Storage, accountID) + if err != nil { + return false, err + } + + return entry != nil, nil +} + +// pathStsList is used to list all the AWS STS role configurations +func (b *backend) pathStsList(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + b.configMutex.RLock() + defer b.configMutex.RUnlock() + sts, err := req.Storage.List(ctx, "config/sts/") + if err != nil { + return nil, err + } + return logical.ListResponse(sts), nil +} + +// nonLockedSetAwsStsEntry creates or updates an STS role association with the given accountID +// This method does not acquire the write lock before creating or updating. If locking is +// desired, use lockedSetAwsStsEntry instead +func (b *backend) nonLockedSetAwsStsEntry(ctx context.Context, s logical.Storage, accountID string, stsEntry *awsStsEntry) error { + if accountID == "" { + return fmt.Errorf("missing AWS account ID") + } + + if stsEntry == nil { + return fmt.Errorf("missing AWS STS Role ARN") + } + + entry, err := logical.StorageEntryJSON("config/sts/"+accountID, stsEntry) + if err != nil { + return err + } + + if entry == nil { + return fmt.Errorf("failed to create storage entry for AWS STS configuration") + } + + return s.Put(ctx, entry) +} + +// lockedSetAwsStsEntry creates or updates an STS role association with the given accountID +// This method acquires the write lock before creating or updating the STS entry. +func (b *backend) lockedSetAwsStsEntry(ctx context.Context, s logical.Storage, accountID string, stsEntry *awsStsEntry) error { + if accountID == "" { + return fmt.Errorf("missing AWS account ID") + } + + if stsEntry == nil { + return fmt.Errorf("missing sts entry") + } + + b.configMutex.Lock() + defer b.configMutex.Unlock() + + return b.nonLockedSetAwsStsEntry(ctx, s, accountID, stsEntry) +} + +// nonLockedAwsStsEntry returns the STS role associated with the given accountID. +// This method does not acquire the read lock before returning information. If locking is +// desired, use lockedAwsStsEntry instead +func (b *backend) nonLockedAwsStsEntry(ctx context.Context, s logical.Storage, accountID string) (*awsStsEntry, error) { + entry, err := s.Get(ctx, "config/sts/"+accountID) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + var stsEntry awsStsEntry + if err := entry.DecodeJSON(&stsEntry); err != nil { + return nil, err + } + + return &stsEntry, nil +} + +// lockedAwsStsEntry returns the STS role associated with the given accountID. +// This method acquires the read lock before returning the association. +func (b *backend) lockedAwsStsEntry(ctx context.Context, s logical.Storage, accountID string) (*awsStsEntry, error) { + b.configMutex.RLock() + defer b.configMutex.RUnlock() + + return b.nonLockedAwsStsEntry(ctx, s, accountID) +} + +// pathConfigStsRead is used to return information about an STS role/AWS accountID association +func (b *backend) pathConfigStsRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + accountID := data.Get("account_id").(string) + if accountID == "" { + return logical.ErrorResponse("missing account id"), nil + } + + stsEntry, err := b.lockedAwsStsEntry(ctx, req.Storage, accountID) + if err != nil { + return nil, err + } + if stsEntry == nil { + return nil, nil + } + + return &logical.Response{ + Data: map[string]interface{}{ + "sts_role": stsEntry.StsRole, + }, + }, nil +} + +// pathConfigStsCreateUpdate is used to associate an STS role with a given AWS accountID +func (b *backend) pathConfigStsCreateUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + accountID := data.Get("account_id").(string) + if accountID == "" { + return logical.ErrorResponse("missing AWS account ID"), nil + } + + b.configMutex.Lock() + defer b.configMutex.Unlock() + + // Check if an STS role is already registered + stsEntry, err := b.nonLockedAwsStsEntry(ctx, req.Storage, accountID) + if err != nil { + return nil, err + } + if stsEntry == nil { + stsEntry = &awsStsEntry{} + } + + // Check that an STS role has actually been provided + stsRole, ok := data.GetOk("sts_role") + if ok { + stsEntry.StsRole = stsRole.(string) + } else if req.Operation == logical.CreateOperation { + return logical.ErrorResponse("missing sts role"), nil + } + + if stsEntry.StsRole == "" { + return logical.ErrorResponse("sts role cannot be empty"), nil + } + + // save the provided STS role + if err := b.nonLockedSetAwsStsEntry(ctx, req.Storage, accountID, stsEntry); err != nil { + return nil, err + } + + return nil, nil +} + +// pathConfigStsDelete is used to delete a previously configured STS configuration +func (b *backend) pathConfigStsDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + b.configMutex.Lock() + defer b.configMutex.Unlock() + + accountID := data.Get("account_id").(string) + if accountID == "" { + return logical.ErrorResponse("missing account id"), nil + } + + return nil, req.Storage.Delete(ctx, "config/sts/"+accountID) +} + +const pathConfigStsSyn = ` +Specify STS roles to be assumed for certain AWS accounts. +` + +const pathConfigStsDesc = ` +Allows the explicit association of STS roles to satellite AWS accounts (i.e. those +which are not the account in which the Vault server is running.) Login attempts from +EC2 instances running in these accounts will be verified using credentials obtained +by assumption of these STS roles. + +The environment in which the Vault server resides must have access to assume the +given STS roles. +` + +const pathListStsHelpSyn = ` +List all the AWS account/STS role relationships registered with Vault. +` + +const pathListStsHelpDesc = ` +AWS accounts will be listed by account ID, along with their respective role names. +` diff --git a/builtin/credential/aws/path_config_tidy_identity_accesslist.go b/builtin/credential/aws/path_config_tidy_identity_accesslist.go new file mode 100644 index 0000000..686b026 --- /dev/null +++ b/builtin/credential/aws/path_config_tidy_identity_accesslist.go @@ -0,0 +1,183 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package awsauth + +import ( + "context" + "fmt" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + identityAccessListConfigStorage = "config/tidy/identity-whitelist" +) + +func (b *backend) pathConfigTidyIdentityAccessList() *framework.Path { + return &framework.Path{ + Pattern: fmt.Sprintf("%s$", "config/tidy/identity-accesslist"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + }, + + Fields: map[string]*framework.FieldSchema{ + "safety_buffer": { + Type: framework.TypeDurationSecond, + Default: 259200, // 72h + Description: `The amount of extra time that must have passed beyond the identity's +expiration, before it is removed from the backend storage.`, + }, + "disable_periodic_tidy": { + Type: framework.TypeBool, + Default: false, + Description: "If set to 'true', disables the periodic tidying of the 'identity-accesslist/' entries.", + }, + }, + + ExistenceCheck: b.pathConfigTidyIdentityAccessListExistenceCheck, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.CreateOperation: &framework.PathOperation{ + Callback: b.pathConfigTidyIdentityAccessListCreateUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "identity-access-list-tidy-operation", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigTidyIdentityAccessListCreateUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "identity-access-list-tidy-operation", + }, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathConfigTidyIdentityAccessListRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "identity-access-list-tidy-settings", + }, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathConfigTidyIdentityAccessListDelete, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "identity-access-list-tidy-settings", + }, + }, + }, + + HelpSynopsis: pathConfigTidyIdentityAccessListHelpSyn, + HelpDescription: pathConfigTidyIdentityAccessListHelpDesc, + } +} + +func (b *backend) pathConfigTidyIdentityAccessListExistenceCheck(ctx context.Context, req *logical.Request, data *framework.FieldData) (bool, error) { + entry, err := b.lockedConfigTidyIdentities(ctx, req.Storage) + if err != nil { + return false, err + } + return entry != nil, nil +} + +func (b *backend) lockedConfigTidyIdentities(ctx context.Context, s logical.Storage) (*tidyWhitelistIdentityConfig, error) { + b.configMutex.RLock() + defer b.configMutex.RUnlock() + + return b.nonLockedConfigTidyIdentities(ctx, s) +} + +func (b *backend) nonLockedConfigTidyIdentities(ctx context.Context, s logical.Storage) (*tidyWhitelistIdentityConfig, error) { + entry, err := s.Get(ctx, identityAccessListConfigStorage) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result tidyWhitelistIdentityConfig + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + return &result, nil +} + +func (b *backend) pathConfigTidyIdentityAccessListCreateUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + b.configMutex.Lock() + defer b.configMutex.Unlock() + + configEntry, err := b.nonLockedConfigTidyIdentities(ctx, req.Storage) + if err != nil { + return nil, err + } + if configEntry == nil { + configEntry = &tidyWhitelistIdentityConfig{} + } + + safetyBufferInt, ok := data.GetOk("safety_buffer") + if ok { + configEntry.SafetyBuffer = safetyBufferInt.(int) + } else if req.Operation == logical.CreateOperation { + configEntry.SafetyBuffer = data.Get("safety_buffer").(int) + } + + disablePeriodicTidyBool, ok := data.GetOk("disable_periodic_tidy") + if ok { + configEntry.DisablePeriodicTidy = disablePeriodicTidyBool.(bool) + } else if req.Operation == logical.CreateOperation { + configEntry.DisablePeriodicTidy = data.Get("disable_periodic_tidy").(bool) + } + + entry, err := logical.StorageEntryJSON(identityAccessListConfigStorage, configEntry) + if err != nil { + return nil, err + } + + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + + return nil, nil +} + +func (b *backend) pathConfigTidyIdentityAccessListRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + clientConfig, err := b.lockedConfigTidyIdentities(ctx, req.Storage) + if err != nil { + return nil, err + } + if clientConfig == nil { + return nil, nil + } + + return &logical.Response{ + Data: map[string]interface{}{ + "safety_buffer": clientConfig.SafetyBuffer, + "disable_periodic_tidy": clientConfig.DisablePeriodicTidy, + }, + }, nil +} + +func (b *backend) pathConfigTidyIdentityAccessListDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + b.configMutex.Lock() + defer b.configMutex.Unlock() + + return nil, req.Storage.Delete(ctx, identityAccessListConfigStorage) +} + +type tidyWhitelistIdentityConfig struct { + SafetyBuffer int `json:"safety_buffer"` + DisablePeriodicTidy bool `json:"disable_periodic_tidy"` +} + +const pathConfigTidyIdentityAccessListHelpSyn = ` +Configures the periodic tidying operation of the access list identity entries. +` + +const pathConfigTidyIdentityAccessListHelpDesc = ` +By default, the expired entries in the access list will be attempted to be removed +periodically. This operation will look for expired items in the list and purges them. +However, there is a safety buffer duration (defaults to 72h), purges the entries +only if they have been persisting this duration, past its expiration time. +` diff --git a/builtin/credential/aws/path_config_tidy_roletag_denylist.go b/builtin/credential/aws/path_config_tidy_roletag_denylist.go new file mode 100644 index 0000000..fa82b77 --- /dev/null +++ b/builtin/credential/aws/path_config_tidy_roletag_denylist.go @@ -0,0 +1,183 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package awsauth + +import ( + "context" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + roletagDenyListConfigStorage = "config/tidy/roletag-blacklist" +) + +func (b *backend) pathConfigTidyRoletagDenyList() *framework.Path { + return &framework.Path{ + Pattern: "config/tidy/roletag-denylist$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + }, + + Fields: map[string]*framework.FieldSchema{ + "safety_buffer": { + Type: framework.TypeDurationSecond, + Default: 15552000, // 180d + Description: `The amount of extra time that must have passed beyond the roletag +expiration, before it is removed from the backend storage. +Defaults to 4320h (180 days).`, + }, + + "disable_periodic_tidy": { + Type: framework.TypeBool, + Default: false, + Description: "If set to 'true', disables the periodic tidying of deny listed entries.", + }, + }, + + ExistenceCheck: b.pathConfigTidyRoletagDenyListExistenceCheck, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.CreateOperation: &framework.PathOperation{ + Callback: b.pathConfigTidyRoletagDenyListCreateUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "role-tag-deny-list-tidy-operation", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigTidyRoletagDenyListCreateUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "role-tag-deny-list-tidy-operation", + }, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathConfigTidyRoletagDenyListRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "role-tag-deny-list-tidy-settings", + }, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathConfigTidyRoletagDenyListDelete, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "role-tag-deny-list-tidy-settings", + }, + }, + }, + + HelpSynopsis: pathConfigTidyRoletagDenyListHelpSyn, + HelpDescription: pathConfigTidyRoletagDenyListHelpDesc, + } +} + +func (b *backend) pathConfigTidyRoletagDenyListExistenceCheck(ctx context.Context, req *logical.Request, data *framework.FieldData) (bool, error) { + entry, err := b.lockedConfigTidyRoleTags(ctx, req.Storage) + if err != nil { + return false, err + } + return entry != nil, nil +} + +func (b *backend) lockedConfigTidyRoleTags(ctx context.Context, s logical.Storage) (*tidyDenyListRoleTagConfig, error) { + b.configMutex.RLock() + defer b.configMutex.RUnlock() + + return b.nonLockedConfigTidyRoleTags(ctx, s) +} + +func (b *backend) nonLockedConfigTidyRoleTags(ctx context.Context, s logical.Storage) (*tidyDenyListRoleTagConfig, error) { + entry, err := s.Get(ctx, roletagDenyListConfigStorage) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result tidyDenyListRoleTagConfig + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + + return &result, nil +} + +func (b *backend) pathConfigTidyRoletagDenyListCreateUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + b.configMutex.Lock() + defer b.configMutex.Unlock() + + configEntry, err := b.nonLockedConfigTidyRoleTags(ctx, req.Storage) + if err != nil { + return nil, err + } + if configEntry == nil { + configEntry = &tidyDenyListRoleTagConfig{} + } + safetyBufferInt, ok := data.GetOk("safety_buffer") + if ok { + configEntry.SafetyBuffer = safetyBufferInt.(int) + } else if req.Operation == logical.CreateOperation { + configEntry.SafetyBuffer = data.Get("safety_buffer").(int) + } + disablePeriodicTidyBool, ok := data.GetOk("disable_periodic_tidy") + if ok { + configEntry.DisablePeriodicTidy = disablePeriodicTidyBool.(bool) + } else if req.Operation == logical.CreateOperation { + configEntry.DisablePeriodicTidy = data.Get("disable_periodic_tidy").(bool) + } + + entry, err := logical.StorageEntryJSON(roletagDenyListConfigStorage, configEntry) + if err != nil { + return nil, err + } + + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + + return nil, nil +} + +func (b *backend) pathConfigTidyRoletagDenyListRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + clientConfig, err := b.lockedConfigTidyRoleTags(ctx, req.Storage) + if err != nil { + return nil, err + } + if clientConfig == nil { + return nil, nil + } + + return &logical.Response{ + Data: map[string]interface{}{ + "safety_buffer": clientConfig.SafetyBuffer, + "disable_periodic_tidy": clientConfig.DisablePeriodicTidy, + }, + }, nil +} + +func (b *backend) pathConfigTidyRoletagDenyListDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + b.configMutex.Lock() + defer b.configMutex.Unlock() + + return nil, req.Storage.Delete(ctx, roletagDenyListConfigStorage) +} + +type tidyDenyListRoleTagConfig struct { + SafetyBuffer int `json:"safety_buffer"` + DisablePeriodicTidy bool `json:"disable_periodic_tidy"` +} + +const pathConfigTidyRoletagDenyListHelpSyn = ` +Configures the periodic tidying operation of the deny listed role tag entries. +` + +const pathConfigTidyRoletagDenyListHelpDesc = ` +By default, the expired entries in the deny list will be attempted to be removed +periodically. This operation will look for expired items in the list and purges them. +However, there is a safety buffer duration (defaults to 72h), purges the entries +only if they have been persisting this duration, past its expiration time. +` diff --git a/builtin/credential/aws/path_identity_accesslist.go b/builtin/credential/aws/path_identity_accesslist.go new file mode 100644 index 0000000..77ec574 --- /dev/null +++ b/builtin/credential/aws/path_identity_accesslist.go @@ -0,0 +1,181 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package awsauth + +import ( + "context" + "time" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +const identityAccessListStorage = "whitelist/identity/" + +func (b *backend) pathIdentityAccessList() *framework.Path { + return &framework.Path{ + Pattern: "identity-accesslist/" + framework.GenericNameRegex("instance_id"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "identity-access-list", + }, + + Fields: map[string]*framework.FieldSchema{ + "instance_id": { + Type: framework.TypeString, + Description: `EC2 instance ID. A successful login operation from an EC2 instance +gets cached in this accesslist, keyed off of instance ID.`, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathIdentityAccesslistRead, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathIdentityAccesslistDelete, + }, + }, + + HelpSynopsis: pathIdentityAccessListSyn, + HelpDescription: pathIdentityAccessListDesc, + } +} + +func (b *backend) pathListIdentityAccessList() *framework.Path { + return &framework.Path{ + Pattern: "identity-accesslist/?", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "identity-access-list", + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ListOperation: &framework.PathOperation{ + Callback: b.pathAccessListIdentitiesList, + }, + }, + + HelpSynopsis: pathListIdentityAccessListHelpSyn, + HelpDescription: pathListIdentityAccessListHelpDesc, + } +} + +// pathAccessListIdentitiesList is used to list all the instance IDs that are present +// in the identity access list. This will list both valid and expired entries. +func (b *backend) pathAccessListIdentitiesList(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + identities, err := req.Storage.List(ctx, identityAccessListStorage) + if err != nil { + return nil, err + } + return logical.ListResponse(identities), nil +} + +// Fetch an item from the access list given an instance ID. +func accessListIdentityEntry(ctx context.Context, s logical.Storage, instanceID string) (*accessListIdentity, error) { + entry, err := s.Get(ctx, identityAccessListStorage+instanceID) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result accessListIdentity + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + return &result, nil +} + +// Stores an instance ID and the information required to validate further login/renewal attempts from +// the same instance ID. +func setAccessListIdentityEntry(ctx context.Context, s logical.Storage, instanceID string, identity *accessListIdentity) error { + entry, err := logical.StorageEntryJSON(identityAccessListStorage+instanceID, identity) + if err != nil { + return err + } + + if err := s.Put(ctx, entry); err != nil { + return err + } + return nil +} + +// pathIdentityAccesslistDelete is used to delete an entry from the identity access list given an instance ID. +func (b *backend) pathIdentityAccesslistDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + instanceID := data.Get("instance_id").(string) + if instanceID == "" { + return logical.ErrorResponse("missing instance_id"), nil + } + + return nil, req.Storage.Delete(ctx, identityAccessListStorage+instanceID) +} + +// pathIdentityAccesslistRead is used to view an entry in the identity access list given an instance ID. +func (b *backend) pathIdentityAccesslistRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + instanceID := data.Get("instance_id").(string) + if instanceID == "" { + return logical.ErrorResponse("missing instance_id"), nil + } + + entry, err := accessListIdentityEntry(ctx, req.Storage, instanceID) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + return &logical.Response{ + Data: map[string]interface{}{ + "role": entry.Role, + "client_nonce": entry.ClientNonce, + "creation_time": entry.CreationTime.Format(time.RFC3339Nano), + "disallow_reauthentication": entry.DisallowReauthentication, + "pending_time": entry.PendingTime, + "expiration_time": entry.ExpirationTime.Format(time.RFC3339Nano), + "last_updated_time": entry.LastUpdatedTime.Format(time.RFC3339Nano), + }, + }, nil +} + +// Struct to represent each item in the identity access list. +type accessListIdentity struct { + Role string `json:"role"` + ClientNonce string `json:"client_nonce"` + CreationTime time.Time `json:"creation_time"` + DisallowReauthentication bool `json:"disallow_reauthentication"` + PendingTime string `json:"pending_time"` + ExpirationTime time.Time `json:"expiration_time"` + LastUpdatedTime time.Time `json:"last_updated_time"` +} + +const pathIdentityAccessListSyn = ` +Read or delete entries in the identity access list. +` + +const pathIdentityAccessListDesc = ` +Each login from an EC2 instance creates/updates an entry in the identity access list. + +Entries in this list can be viewed or deleted using this endpoint. + +By default, a cron task will periodically look for expired entries in the access list +and deletes them. The duration to periodically run this, is one hour by default. +However, this can be configured using the 'config/tidy/identities' endpoint. This tidy +action can be triggered via the API as well, using the 'tidy/identities' endpoint. +` + +const pathListIdentityAccessListHelpSyn = ` +Lists the items present in the identity access list. +` + +const pathListIdentityAccessListHelpDesc = ` +The entries in the identity access list is keyed off of the EC2 instance IDs. +This endpoint lists all the entries present in the identity access list, both +expired and un-expired entries. Use 'tidy/identities' endpoint to clean-up +the access list of identities. +` diff --git a/builtin/credential/aws/path_login.go b/builtin/credential/aws/path_login.go new file mode 100644 index 0000000..1e23500 --- /dev/null +++ b/builtin/credential/aws/path_login.go @@ -0,0 +1,1915 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package awsauth + +import ( + "context" + "crypto/subtle" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "encoding/xml" + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "regexp" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + awsClient "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/hashicorp/errwrap" + cleanhttp "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-retryablehttp" + "github.com/hashicorp/go-secure-stdlib/awsutil" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/go-secure-stdlib/strutil" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/builtin/credential/aws/pkcs7" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/cidrutil" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + reauthenticationDisabledNonce = "reauthentication-disabled-nonce" + iamAuthType = "iam" + ec2AuthType = "ec2" + ec2EntityType = "ec2_instance" + + // Retry configuration + retryWaitMin = 500 * time.Millisecond + retryWaitMax = 30 * time.Second +) + +var ( + errRequestBodyNotValid = errors.New("iam request body is invalid") + errInvalidGetCallerIdentityResponse = errors.New("body of GetCallerIdentity is invalid") +) + +func (b *backend) pathLogin() *framework.Path { + return &framework.Path{ + Pattern: "login$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationVerb: "login", + }, + Fields: map[string]*framework.FieldSchema{ + "role": { + Type: framework.TypeString, + Description: `Name of the role against which the login is being attempted. +If 'role' is not specified, then the login endpoint looks for a role +bearing the name of the AMI ID of the EC2 instance that is trying to login. +If a matching role is not found, login fails.`, + }, + + "pkcs7": { + Type: framework.TypeString, + Description: `PKCS7 signature of the identity document when using an auth_type +of ec2.`, + }, + + "nonce": { + Type: framework.TypeString, + Description: `The nonce to be used for subsequent login requests when +auth_type is ec2. If this parameter is not specified at +all and if reauthentication is allowed, then the backend will generate a random +nonce, attaches it to the instance's identity access list entry and returns the +nonce back as part of auth metadata. This value should be used with further +login requests, to establish client authenticity. Clients can choose to set a +custom nonce if preferred, in which case, it is recommended that clients provide +a strong nonce. If a nonce is provided but with an empty value, it indicates +intent to disable reauthentication. Note that, when 'disallow_reauthentication' +option is enabled on either the role or the role tag, the 'nonce' holds no +significance.`, + }, + + "iam_http_request_method": { + Type: framework.TypeString, + Description: `HTTP method to use for the AWS request when auth_type is +iam. This must match what has been signed in the +presigned request. Currently, POST is the only supported value`, + }, + + "iam_request_url": { + Type: framework.TypeString, + Description: `Base64-encoded full URL against which to make the AWS request +when using iam auth_type.`, + }, + + "iam_request_body": { + Type: framework.TypeString, + Description: `Base64-encoded request body when auth_type is iam. +This must match the request body included in the signature.`, + }, + "iam_request_headers": { + Type: framework.TypeHeader, + Description: `Key/value pairs of headers for use in the +sts:GetCallerIdentity HTTP requests headers when auth_type is iam. Can be either +a Base64-encoded, JSON-serialized string, or a JSON object of key/value pairs. +This must at a minimum include the headers over which AWS has included a signature.`, + }, + "identity": { + Type: framework.TypeString, + Description: `Base64 encoded EC2 instance identity document. This needs to be supplied along +with the 'signature' parameter. If using 'curl' for fetching the identity +document, consider using the option '-w 0' while piping the output to 'base64' +binary.`, + }, + "signature": { + Type: framework.TypeString, + Description: `Base64 encoded SHA256 RSA signature of the instance identity document. This +needs to be supplied along with 'identity' parameter.`, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathLoginUpdate, + }, + logical.AliasLookaheadOperation: &framework.PathOperation{ + Callback: b.pathLoginUpdate, + }, + logical.ResolveRoleOperation: &framework.PathOperation{ + Callback: b.pathLoginResolveRole, + }, + }, + + HelpSynopsis: pathLoginSyn, + HelpDescription: pathLoginDesc, + } +} + +func (b *backend) pathLoginResolveRole(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + anyEc2, allEc2 := hasValuesForEc2Auth(data) + anyIam, allIam := hasValuesForIamAuth(data) + switch { + case anyEc2 && anyIam: + return logical.ErrorResponse("supplied auth values for both ec2 and iam auth types"), nil + case anyEc2 && !allEc2: + return logical.ErrorResponse("supplied some of the auth values for the ec2 auth type but not all"), nil + case anyEc2: + return b.pathLoginResolveRoleEc2(ctx, req, data) + case anyIam && !allIam: + return logical.ErrorResponse("supplied some of the auth values for the iam auth type but not all"), nil + case anyIam: + return b.pathLoginResolveRoleIam(ctx, req, data) + default: + return logical.ErrorResponse("didn't supply required authentication values"), nil + } +} + +func (b *backend) pathLoginEc2GetRoleNameAndIdentityDoc(ctx context.Context, req *logical.Request, data *framework.FieldData) (string, *identityDocument, *logical.Response, error) { + identityDocB64 := data.Get("identity").(string) + var identityDocBytes []byte + var err error + if identityDocB64 != "" { + identityDocBytes, err = base64.StdEncoding.DecodeString(identityDocB64) + if err != nil || len(identityDocBytes) == 0 { + return "", nil, logical.ErrorResponse("failed to base64 decode the instance identity document"), nil + } + } + + signatureB64 := data.Get("signature").(string) + var signatureBytes []byte + if signatureB64 != "" { + signatureBytes, err = base64.StdEncoding.DecodeString(signatureB64) + if err != nil { + return "", nil, logical.ErrorResponse("failed to base64 decode the SHA256 RSA signature of the instance identity document"), nil + } + } + + pkcs7B64 := data.Get("pkcs7").(string) + + // Either the pkcs7 signature of the instance identity document, or + // the identity document itself along with its SHA256 RSA signature + // needs to be provided. + if pkcs7B64 == "" && (len(identityDocBytes) == 0 && len(signatureBytes) == 0) { + return "", nil, logical.ErrorResponse("either pkcs7 or a tuple containing the instance identity document and its SHA256 RSA signature needs to be provided"), nil + } else if pkcs7B64 != "" && (len(identityDocBytes) != 0 && len(signatureBytes) != 0) { + return "", nil, logical.ErrorResponse("both pkcs7 and a tuple containing the instance identity document and its SHA256 RSA signature is supplied; provide only one"), nil + } + + // Verify the signature of the identity document and unmarshal it + var identityDocParsed *identityDocument + if pkcs7B64 != "" { + identityDocParsed, err = b.parseIdentityDocument(ctx, req.Storage, pkcs7B64) + if err != nil { + return "", nil, nil, err + } + if identityDocParsed == nil { + return "", nil, logical.ErrorResponse("failed to verify the instance identity document using pkcs7"), nil + } + } else { + identityDocParsed, err = b.verifyInstanceIdentitySignature(ctx, req.Storage, identityDocBytes, signatureBytes) + if err != nil { + return "", nil, nil, err + } + if identityDocParsed == nil { + return "", nil, logical.ErrorResponse("failed to verify the instance identity document using the SHA256 RSA digest"), nil + } + } + + roleName := data.Get("role").(string) + + // If roleName is not supplied, a role in the name of the instance's AMI ID will be looked for + if roleName == "" { + roleName = identityDocParsed.AmiID + } + + // Get the entry for the role used by the instance + // Note that we don't return the roleEntry, but use it to determine if the role exists + // roleEntry does not contain the role name, so it is not appropriate to return + roleEntry, err := b.role(ctx, req.Storage, roleName) + if err != nil { + return "", nil, nil, err + } + if roleEntry == nil { + return "", nil, logical.ErrorResponse(fmt.Sprintf("entry for role %q not found", roleName)), nil + } + return roleName, identityDocParsed, nil, nil +} + +func (b *backend) pathLoginResolveRoleEc2(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + role, _, resp, err := b.pathLoginEc2GetRoleNameAndIdentityDoc(ctx, req, data) + if resp != nil || err != nil { + return resp, err + } + return logical.ResolveRoleResponse(role) +} + +func (b *backend) pathLoginIamGetRoleNameCallerIdAndEntity(ctx context.Context, req *logical.Request, data *framework.FieldData) (string, *GetCallerIdentityResult, *iamEntity, *logical.Response, error) { + method := data.Get("iam_http_request_method").(string) + if method == "" { + return "", nil, nil, logical.ErrorResponse("missing iam_http_request_method"), nil + } + + // In the future, might consider supporting GET + if method != "POST" { + return "", nil, nil, logical.ErrorResponse("invalid iam_http_request_method; currently only 'POST' is supported"), nil + } + + rawUrlB64 := data.Get("iam_request_url").(string) + if rawUrlB64 == "" { + return "", nil, nil, logical.ErrorResponse("missing iam_request_url"), nil + } + rawUrl, err := base64.StdEncoding.DecodeString(rawUrlB64) + if err != nil { + return "", nil, nil, logical.ErrorResponse("failed to base64 decode iam_request_url"), nil + } + parsedUrl, err := url.Parse(string(rawUrl)) + if err != nil { + return "", nil, nil, logical.ErrorResponse("error parsing iam_request_url"), nil + } + if parsedUrl.RawQuery != "" { + // Should be no query parameters + return "", nil, nil, logical.ErrorResponse(logical.ErrInvalidRequest.Error()), nil + } + // TODO: There are two potentially valid cases we're not yet supporting that would + // necessitate this check being changed. First, if we support GET requests. + // Second if we support presigned POST requests + bodyB64 := data.Get("iam_request_body").(string) + if bodyB64 == "" { + return "", nil, nil, logical.ErrorResponse("missing iam_request_body"), nil + } + bodyRaw, err := base64.StdEncoding.DecodeString(bodyB64) + if err != nil { + return "", nil, nil, logical.ErrorResponse("failed to base64 decode iam_request_body"), nil + } + body := string(bodyRaw) + if err = validateLoginIamRequestBody(body); err != nil { + return "", nil, nil, logical.ErrorResponse(err.Error()), nil + } + + headers := data.Get("iam_request_headers").(http.Header) + if len(headers) == 0 { + return "", nil, nil, logical.ErrorResponse("missing iam_request_headers"), nil + } + + config, err := b.lockedClientConfigEntry(ctx, req.Storage) + if err != nil { + return "", nil, nil, logical.ErrorResponse("error getting configuration"), nil + } + + endpoint := "https://sts.amazonaws.com" + + maxRetries := awsClient.DefaultRetryerMaxNumRetries + if config != nil { + if config.IAMServerIdHeaderValue != "" { + err = validateVaultHeaderValue(headers, parsedUrl, config.IAMServerIdHeaderValue) + if err != nil { + return "", nil, nil, logical.ErrorResponse(fmt.Sprintf("error validating %s header: %v", iamServerIdHeader, err)), nil + } + } + if err = config.validateAllowedSTSHeaderValues(headers); err != nil { + return "", nil, nil, logical.ErrorResponse(err.Error()), nil + } + if config.STSEndpoint != "" { + endpoint = config.STSEndpoint + } + if config.MaxRetries >= 0 { + maxRetries = config.MaxRetries + } + } + + callerID, err := submitCallerIdentityRequest(ctx, maxRetries, method, endpoint, parsedUrl, body, headers) + if err != nil { + return "", nil, nil, logical.ErrorResponse(fmt.Sprintf("error making upstream request: %v", err)), nil + } + + entity, err := parseIamArn(callerID.Arn) + if err != nil { + return "", nil, nil, logical.ErrorResponse(fmt.Sprintf("error parsing arn %q: %v", callerID.Arn, err)), nil + } + + roleName := data.Get("role").(string) + if roleName == "" { + roleName = entity.FriendlyName + } + return roleName, callerID, entity, nil, nil +} + +func (b *backend) pathLoginResolveRoleIam(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + role, _, _, resp, err := b.pathLoginIamGetRoleNameCallerIdAndEntity(ctx, req, data) + if resp != nil || err != nil { + return resp, err + } + return logical.ResolveRoleResponse(role) +} + +// instanceIamRoleARN fetches the IAM role ARN associated with the given +// instance profile name +func (b *backend) instanceIamRoleARN(ctx context.Context, iamClient *iam.IAM, instanceProfileName string) (string, error) { + if iamClient == nil { + return "", fmt.Errorf("nil iamClient") + } + if instanceProfileName == "" { + return "", fmt.Errorf("missing instance profile name") + } + + profile, err := iamClient.GetInstanceProfileWithContext(ctx, &iam.GetInstanceProfileInput{ + InstanceProfileName: aws.String(instanceProfileName), + }) + if err != nil { + return "", awsutil.AppendAWSError(err) + } + if profile == nil { + return "", fmt.Errorf("nil output while getting instance profile details") + } + + if profile.InstanceProfile == nil { + return "", fmt.Errorf("nil instance profile in the output of instance profile details") + } + + if profile.InstanceProfile.Roles == nil || len(profile.InstanceProfile.Roles) != 1 { + return "", fmt.Errorf("invalid roles in the output of instance profile details") + } + + if profile.InstanceProfile.Roles[0].Arn == nil { + return "", fmt.Errorf("nil role ARN in the output of instance profile details") + } + + return *profile.InstanceProfile.Roles[0].Arn, nil +} + +// validateInstance queries the status of the EC2 instance using AWS EC2 API +// and checks if the instance is running and is healthy +func (b *backend) validateInstance(ctx context.Context, s logical.Storage, instanceID, region, accountID string) (*ec2.Instance, error) { + // Create an EC2 client to pull the instance information + ec2Client, err := b.clientEC2(ctx, s, region, accountID) + if err != nil { + return nil, err + } + + status, err := ec2Client.DescribeInstancesWithContext(ctx, &ec2.DescribeInstancesInput{ + InstanceIds: []*string{ + aws.String(instanceID), + }, + }) + if err != nil { + errW := fmt.Errorf("error fetching description for instance ID %q: %w", instanceID, err) + return nil, errwrap.Wrap(errW, awsutil.CheckAWSError(err)) + } + if status == nil { + return nil, fmt.Errorf("nil output from describe instances") + } + if len(status.Reservations) == 0 { + return nil, fmt.Errorf("no reservations found in instance description") + } + if len(status.Reservations[0].Instances) == 0 { + return nil, fmt.Errorf("no instance details found in reservations") + } + if *status.Reservations[0].Instances[0].InstanceId != instanceID { + return nil, fmt.Errorf("expected instance ID not matching the instance ID in the instance description") + } + if status.Reservations[0].Instances[0].State == nil { + return nil, fmt.Errorf("instance state in instance description is nil") + } + if *status.Reservations[0].Instances[0].State.Name != "running" { + return nil, fmt.Errorf("instance is not in 'running' state") + } + return status.Reservations[0].Instances[0], nil +} + +// validateMetadata matches the given client nonce and pending time with the +// one cached in the identity access list during the previous login. But, if +// reauthentication is disabled, login attempt is failed immediately. +func validateMetadata(clientNonce, pendingTime string, storedIdentity *accessListIdentity, roleEntry *awsRoleEntry) error { + // For sanity + if !storedIdentity.DisallowReauthentication && storedIdentity.ClientNonce == "" { + return fmt.Errorf("client nonce missing in stored identity") + } + + // If reauthentication is disabled or if the nonce supplied matches a + // predefined nonce which indicates reauthentication to be disabled, + // authentication will not succeed. + if storedIdentity.DisallowReauthentication || + subtle.ConstantTimeCompare([]byte(reauthenticationDisabledNonce), []byte(clientNonce)) == 1 { + return fmt.Errorf("reauthentication is disabled") + } + + givenPendingTime, err := time.Parse(time.RFC3339, pendingTime) + if err != nil { + return err + } + + storedPendingTime, err := time.Parse(time.RFC3339, storedIdentity.PendingTime) + if err != nil { + return err + } + + // When the presented client nonce does not match the cached entry, it + // is either that a rogue client is trying to login or that a valid + // client suffered a migration. The migration is detected via + // pendingTime in the instance metadata, which sadly is only updated + // when an instance is stopped and started but *not* when the instance + // is rebooted. If reboot survivability is needed, either + // instrumentation to delete the instance ID from the access list is + // necessary, or the client must durably store the nonce. + // + // If the `allow_instance_migration` property of the registered role is + // enabled, then the client nonce mismatch is ignored, as long as the + // pending time in the presented instance identity document is newer + // than the cached pending time. The new pendingTime is stored and used + // for future checks. + // + // This is a weak criterion and hence the `allow_instance_migration` + // option should be used with caution. + if subtle.ConstantTimeCompare([]byte(clientNonce), []byte(storedIdentity.ClientNonce)) != 1 { + if !roleEntry.AllowInstanceMigration { + return fmt.Errorf("client nonce mismatch") + } + if roleEntry.AllowInstanceMigration && !givenPendingTime.After(storedPendingTime) { + return fmt.Errorf("client nonce mismatch and instance meta-data incorrect") + } + } + + // Ensure that the 'pendingTime' on the given identity document is not + // before the 'pendingTime' that was used for previous login. This + // disallows old metadata documents from being used to perform login. + if givenPendingTime.Before(storedPendingTime) { + return fmt.Errorf("instance meta-data is older than the one used for previous login") + } + return nil +} + +// Verifies the integrity of the instance identity document using its SHA256 +// RSA signature. After verification, returns the unmarshaled instance identity +// document. +func (b *backend) verifyInstanceIdentitySignature(ctx context.Context, s logical.Storage, identityBytes, signatureBytes []byte) (*identityDocument, error) { + if len(identityBytes) == 0 { + return nil, fmt.Errorf("missing instance identity document") + } + + if len(signatureBytes) == 0 { + return nil, fmt.Errorf("missing SHA256 RSA signature of the instance identity document") + } + + // Get the public certificates that are used to verify the signature. + // This returns a slice of certificates containing the default + // certificate and all the registered certificates via + // 'config/certificate/' endpoint, for verifying the RSA + // digest. + publicCerts, err := b.awsPublicCertificates(ctx, s, false) + if err != nil { + return nil, err + } + if publicCerts == nil || len(publicCerts) == 0 { + return nil, fmt.Errorf("certificates to verify the signature are not found") + } + + // Check if any of the certs registered at the backend can verify the + // signature + for _, cert := range publicCerts { + err := cert.CheckSignature(x509.SHA256WithRSA, identityBytes, signatureBytes) + if err == nil { + var identityDoc identityDocument + if decErr := jsonutil.DecodeJSON(identityBytes, &identityDoc); decErr != nil { + return nil, decErr + } + return &identityDoc, nil + } + } + + return nil, fmt.Errorf("instance identity verification using SHA256 RSA signature is unsuccessful") +} + +// Verifies the correctness of the authenticated attributes present in the PKCS#7 +// signature. After verification, extracts the instance identity document from the +// signature, parses it and returns it. +func (b *backend) parseIdentityDocument(ctx context.Context, s logical.Storage, pkcs7B64 string) (*identityDocument, error) { + // Insert the header and footer for the signature to be able to pem decode it + pkcs7B64 = fmt.Sprintf("-----BEGIN PKCS7-----\n%s\n-----END PKCS7-----", pkcs7B64) + + // Decode the PEM encoded signature + pkcs7BER, pkcs7Rest := pem.Decode([]byte(pkcs7B64)) + if len(pkcs7Rest) != 0 { + return nil, fmt.Errorf("failed to decode the PEM encoded PKCS#7 signature") + } + + // Parse the signature from asn1 format into a struct + pkcs7Data, err := pkcs7.Parse(pkcs7BER.Bytes) + if err != nil { + return nil, fmt.Errorf("failed to parse the BER encoded PKCS#7 signature: %w", err) + } + + // Get the public certificates that are used to verify the signature. + // This returns a slice of certificates containing the default certificate + // and all the registered certificates via 'config/certificate/' endpoint + publicCerts, err := b.awsPublicCertificates(ctx, s, true) + if err != nil { + return nil, err + } + if publicCerts == nil || len(publicCerts) == 0 { + return nil, fmt.Errorf("certificates to verify the signature are not found") + } + + // Before calling Verify() on the PKCS#7 struct, set the certificates to be used + // to verify the contents in the signer information. + pkcs7Data.Certificates = publicCerts + + // Verify extracts the authenticated attributes in the PKCS#7 signature, and verifies + // the authenticity of the content using 'dsa.PublicKey' embedded in the public certificate. + if err := pkcs7Data.Verify(); err != nil { + return nil, fmt.Errorf("failed to verify the signature: %w", err) + } + + // Check if the signature has content inside of it + if len(pkcs7Data.Content) == 0 { + return nil, fmt.Errorf("instance identity document could not be found in the signature") + } + + var identityDoc identityDocument + if err := jsonutil.DecodeJSON(pkcs7Data.Content, &identityDoc); err != nil { + return nil, err + } + + return &identityDoc, nil +} + +func (b *backend) pathLoginUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + anyEc2, allEc2 := hasValuesForEc2Auth(data) + anyIam, allIam := hasValuesForIamAuth(data) + switch { + case anyEc2 && anyIam: + return logical.ErrorResponse("supplied auth values for both ec2 and iam auth types"), nil + case anyEc2 && !allEc2: + return logical.ErrorResponse("supplied some of the auth values for the ec2 auth type but not all"), nil + case anyEc2: + return b.pathLoginUpdateEc2(ctx, req, data) + case anyIam && !allIam: + return logical.ErrorResponse("supplied some of the auth values for the iam auth type but not all"), nil + case anyIam: + return b.pathLoginUpdateIam(ctx, req, data) + default: + return logical.ErrorResponse("didn't supply required authentication values"), nil + } +} + +// Returns whether the EC2 instance meets the requirements of the particular +// AWS role entry. +// The first error return value is whether there's some sort of validation +// error that means the instance doesn't meet the role requirements +// The second error return value indicates whether there's an error in even +// trying to validate those requirements +func (b *backend) verifyInstanceMeetsRoleRequirements(ctx context.Context, + s logical.Storage, instance *ec2.Instance, roleEntry *awsRoleEntry, roleName string, identityDoc *identityDocument) (error, error, +) { + switch { + case instance == nil: + return nil, fmt.Errorf("nil instance") + case roleEntry == nil: + return nil, fmt.Errorf("nil roleEntry") + case identityDoc == nil: + return nil, fmt.Errorf("nil identityDoc") + } + + // Verify that the instance ID matches one of the ones set by the role + if len(roleEntry.BoundEc2InstanceIDs) > 0 && !strutil.StrListContains(roleEntry.BoundEc2InstanceIDs, *instance.InstanceId) { + return fmt.Errorf("instance ID %q does not belong to the role %q", *instance.InstanceId, roleName), nil + } + + // Verify that the AccountID of the instance trying to login matches the + // AccountID specified as a constraint on role + if len(roleEntry.BoundAccountIDs) > 0 && !strutil.StrListContains(roleEntry.BoundAccountIDs, identityDoc.AccountID) { + return fmt.Errorf("account ID %q does not belong to role %q", identityDoc.AccountID, roleName), nil + } + + // Verify that the AMI ID of the instance trying to login matches the + // AMI ID specified as a constraint on the role. + // + // Here, we're making a tradeoff and pulling the AMI ID out of the EC2 + // API rather than the signed instance identity doc. They *should* match. + // This means we require an EC2 API call to retrieve the AMI ID, but we're + // already calling the API to validate the Instance ID anyway, so it shouldn't + // matter. The benefit is that we have the exact same code whether auth_type + // is ec2 or iam. + if len(roleEntry.BoundAmiIDs) > 0 { + if instance.ImageId == nil { + return nil, fmt.Errorf("AMI ID in the instance description is nil") + } + if !strutil.StrListContains(roleEntry.BoundAmiIDs, *instance.ImageId) { + return fmt.Errorf("AMI ID %q does not belong to role %q", *instance.ImageId, roleName), nil + } + } + + // Validate the SubnetID if corresponding bound was set on the role + if len(roleEntry.BoundSubnetIDs) > 0 { + if instance.SubnetId == nil { + return nil, fmt.Errorf("subnet ID in the instance description is nil") + } + if !strutil.StrListContains(roleEntry.BoundSubnetIDs, *instance.SubnetId) { + return fmt.Errorf("subnet ID %q does not satisfy the constraint on role %q", *instance.SubnetId, roleName), nil + } + } + + // Validate the VpcID if corresponding bound was set on the role + if len(roleEntry.BoundVpcIDs) > 0 { + if instance.VpcId == nil { + return nil, fmt.Errorf("VPC ID in the instance description is nil") + } + if !strutil.StrListContains(roleEntry.BoundVpcIDs, *instance.VpcId) { + return fmt.Errorf("VPC ID %q does not satisfy the constraint on role %q", *instance.VpcId, roleName), nil + } + } + + // Check if the IAM instance profile ARN of the instance trying to + // login, matches the IAM instance profile ARN specified as a constraint + // on the role + if len(roleEntry.BoundIamInstanceProfileARNs) > 0 { + if instance.IamInstanceProfile == nil { + return nil, fmt.Errorf("IAM instance profile in the instance description is nil") + } + if instance.IamInstanceProfile.Arn == nil { + return nil, fmt.Errorf("IAM instance profile ARN in the instance description is nil") + } + iamInstanceProfileARN := *instance.IamInstanceProfile.Arn + matchesInstanceProfile := false + // NOTE: Can't use strutil.StrListContainsGlob. A * is a perfectly valid character in the "path" component + // of an ARN. See, e.g., https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateInstanceProfile.html : + // The path allows strings "containing any ASCII character from the ! (\u0021) thru the DEL character + // (\u007F), including most punctuation characters, digits, and upper and lowercased letters." + // So, e.g., arn:aws:iam::123456789012:instance-profile/Some*Path/MyProfileName is a perfectly valid instance + // profile ARN, and it wouldn't be correct to expand the * in the middle as a wildcard. + // If a user wants to match an IAM instance profile arn beginning with arn:aws:iam::123456789012:instance-profile/foo* + // then bound_iam_instance_profile_arn would need to be arn:aws:iam::123456789012:instance-profile/foo** + // Wanting to exactly match an ARN that has a * at the end is not a valid use case. The * is only valid in the + // path; it's not valid in the name. That means no valid ARN can ever end with a *. For example, + // arn:aws:iam::123456789012:instance-profile/Foo* is NOT valid as an instance profile ARN, so no valid instance + // profile ARN could ever equal that value. + for _, boundInstanceProfileARN := range roleEntry.BoundIamInstanceProfileARNs { + switch { + case strings.HasSuffix(boundInstanceProfileARN, "*") && strings.HasPrefix(iamInstanceProfileARN, boundInstanceProfileARN[:len(boundInstanceProfileARN)-1]): + matchesInstanceProfile = true + break + case iamInstanceProfileARN == boundInstanceProfileARN: + matchesInstanceProfile = true + break + } + } + if !matchesInstanceProfile { + return fmt.Errorf("IAM instance profile ARN %q does not satisfy the constraint role %q", iamInstanceProfileARN, roleName), nil + } + } + + // Check if the IAM role ARN of the instance trying to login, matches + // the IAM role ARN specified as a constraint on the role. + if len(roleEntry.BoundIamRoleARNs) > 0 { + if instance.IamInstanceProfile == nil { + return nil, fmt.Errorf("IAM instance profile in the instance description is nil") + } + if instance.IamInstanceProfile.Arn == nil { + return nil, fmt.Errorf("IAM instance profile ARN in the instance description is nil") + } + + // Fetch the instance profile ARN from the instance description + iamInstanceProfileARN := *instance.IamInstanceProfile.Arn + + if iamInstanceProfileARN == "" { + return nil, fmt.Errorf("IAM instance profile ARN in the instance description is empty") + } + + // Extract out the instance profile name from the instance + // profile ARN + iamInstanceProfileEntity, err := parseIamArn(iamInstanceProfileARN) + if err != nil { + return nil, fmt.Errorf("failed to parse IAM instance profile ARN %q: %w", iamInstanceProfileARN, err) + } + + // Use instance profile ARN to fetch the associated role ARN + iamClient, err := b.clientIAM(ctx, s, identityDoc.Region, identityDoc.AccountID) + if err != nil { + return nil, fmt.Errorf("could not fetch IAM client: %w", err) + } else if iamClient == nil { + return nil, fmt.Errorf("received a nil iamClient") + } + iamRoleARN, err := b.instanceIamRoleARN(ctx, iamClient, iamInstanceProfileEntity.FriendlyName) + if err != nil { + return nil, fmt.Errorf("IAM role ARN could not be fetched: %w", err) + } + if iamRoleARN == "" { + return nil, fmt.Errorf("IAM role ARN could not be fetched") + } + + matchesInstanceRoleARN := false + for _, boundIamRoleARN := range roleEntry.BoundIamRoleARNs { + switch { + // as with boundInstanceProfileARN, can't use strutil.StrListContainsGlob because * can validly exist in the middle of an ARN + case strings.HasSuffix(boundIamRoleARN, "*") && strings.HasPrefix(iamRoleARN, boundIamRoleARN[:len(boundIamRoleARN)-1]): + matchesInstanceRoleARN = true + break + case iamRoleARN == boundIamRoleARN: + matchesInstanceRoleARN = true + break + } + } + if !matchesInstanceRoleARN { + return fmt.Errorf("IAM role ARN %q does not satisfy the constraint role %q", iamRoleARN, roleName), nil + } + } + + return nil, nil +} + +// pathLoginUpdateEc2 is used to create a Vault token by the EC2 instances +// by providing the pkcs7 signature of the instance identity document +// and a client created nonce. Client nonce is optional if 'disallow_reauthentication' +// option is enabled on the registered role. +func (b *backend) pathLoginUpdateEc2(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName, identityDocParsed, errResp, err := b.pathLoginEc2GetRoleNameAndIdentityDoc(ctx, req, data) + if errResp != nil || err != nil { + return errResp, err + } + + // Get the entry for the role used by the instance + roleEntry, err := b.role(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if roleEntry == nil { + return logical.ErrorResponse(fmt.Sprintf("entry for role %q not found", roleName)), nil + } + + // Check for a CIDR match. + if len(roleEntry.TokenBoundCIDRs) > 0 { + if req.Connection == nil { + b.Logger().Warn("token bound CIDRs found but no connection information available for validation") + return nil, logical.ErrPermissionDenied + } + if !cidrutil.RemoteAddrIsOk(req.Connection.RemoteAddr, roleEntry.TokenBoundCIDRs) { + return nil, logical.ErrPermissionDenied + } + } + + if roleEntry.AuthType != ec2AuthType { + return logical.ErrorResponse(fmt.Sprintf("auth method ec2 not allowed for role %s", roleName)), nil + } + + identityConfigEntry, err := identityConfigEntry(ctx, req.Storage) + if err != nil { + return nil, err + } + + identityAlias := "" + + switch identityConfigEntry.EC2Alias { + case identityAliasRoleID: + identityAlias = roleEntry.RoleID + case identityAliasEC2InstanceID: + identityAlias = identityDocParsed.InstanceID + case identityAliasEC2ImageID: + identityAlias = identityDocParsed.AmiID + } + + // If we're just looking up for MFA, return the Alias info + if req.Operation == logical.AliasLookaheadOperation { + return &logical.Response{ + Auth: &logical.Auth{ + Alias: &logical.Alias{ + Name: identityAlias, + }, + }, + }, nil + } + + // Validate the instance ID by making a call to AWS EC2 DescribeInstances API + // and fetching the instance description. Validation succeeds only if the + // instance is in 'running' state. + instance, err := b.validateInstance(ctx, req.Storage, identityDocParsed.InstanceID, identityDocParsed.Region, identityDocParsed.AccountID) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("failed to verify instance ID: %v", err)), nil + } + + // Verify that the `Region` of the instance trying to login matches the + // `Region` specified as a constraint on role + if len(roleEntry.BoundRegions) > 0 && !strutil.StrListContains(roleEntry.BoundRegions, identityDocParsed.Region) { + return logical.ErrorResponse(fmt.Sprintf("Region %q does not satisfy the constraint on role %q", identityDocParsed.Region, roleName)), nil + } + + validationError, err := b.verifyInstanceMeetsRoleRequirements(ctx, req.Storage, instance, roleEntry, roleName, identityDocParsed) + if err != nil { + return nil, err + } + if validationError != nil { + return logical.ErrorResponse(fmt.Sprintf("Error validating instance: %v", validationError)), nil + } + + // Get the entry from the identity access list, if there is one + storedIdentity, err := accessListIdentityEntry(ctx, req.Storage, identityDocParsed.InstanceID) + if err != nil { + return nil, err + } + + // disallowReauthentication value that gets cached at the stored + // identity access list entry is determined not just by the role entry. + // If client explicitly sets nonce to be empty, it implies intent to + // disable reauthentication. Also, role tag can override the 'false' + // value with 'true' (the other way around is not allowed). + + // Read the value from the role entry + disallowReauthentication := roleEntry.DisallowReauthentication + + clientNonce := "" + + // Check if the nonce is supplied by the client + clientNonceRaw, clientNonceSupplied := data.GetOk("nonce") + if clientNonceSupplied { + clientNonce = clientNonceRaw.(string) + + // Nonce explicitly set to empty implies intent to disable + // reauthentication by the client. Set a predefined nonce which + // indicates reauthentication being disabled. + if clientNonce == "" { + clientNonce = reauthenticationDisabledNonce + + // Ensure that the intent lands in the access list + disallowReauthentication = true + } + } + + // This is NOT a first login attempt from the client + if storedIdentity != nil { + // Check if the client nonce match the cached nonce and if the pending time + // of the identity document is not before the pending time of the document + // with which previous login was made. If 'allow_instance_migration' is + // enabled on the registered role, client nonce requirement is relaxed. + if err = validateMetadata(clientNonce, identityDocParsed.PendingTime, storedIdentity, roleEntry); err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + // Don't let subsequent login attempts to bypass the initial + // intent of disabling reauthentication, despite the properties + // of role getting updated. For example: Role has the value set + // to 'false', a role-tag login sets the value to 'true', then + // role gets updated to not use a role-tag, and a login attempt + // is made with role's value set to 'false'. Removing the entry + // from the identity access list should be the only way to be + // able to login from the instance again. + disallowReauthentication = disallowReauthentication || storedIdentity.DisallowReauthentication + } + + // If we reach this point without erroring and if the client nonce was + // not supplied, a first time login is implied and that the client + // intends that the nonce be generated by the backend. Create a random + // nonce to be associated for the instance ID. + if !clientNonceSupplied { + if clientNonce, err = uuid.GenerateUUID(); err != nil { + return nil, fmt.Errorf("failed to generate random nonce") + } + } + + // Load the current values for max TTL and policies from the role entry, + // before checking for overriding max TTL in the role tag. The shortest + // max TTL is used to cap the token TTL; the longest max TTL is used to + // make the access list entry as long as possible as it controls for replay + // attacks. + shortestMaxTTL := b.System().MaxLeaseTTL() + longestMaxTTL := b.System().MaxLeaseTTL() + if roleEntry.TokenMaxTTL > time.Duration(0) && roleEntry.TokenMaxTTL < shortestMaxTTL { + shortestMaxTTL = roleEntry.TokenMaxTTL + } + if roleEntry.TokenMaxTTL > longestMaxTTL { + longestMaxTTL = roleEntry.TokenMaxTTL + } + + policies := roleEntry.TokenPolicies + rTagMaxTTL := time.Duration(0) + var roleTagResp *roleTagLoginResponse + if roleEntry.RoleTag != "" { + roleTagResp, err = b.handleRoleTagLogin(ctx, req.Storage, roleName, roleEntry, instance) + if err != nil { + return nil, err + } + if roleTagResp == nil { + return logical.ErrorResponse("failed to fetch and verify the role tag"), nil + } + } + + if roleTagResp != nil { + // Role tag is enabled on the role. + + // Overwrite the policies with the ones returned from processing the role tag + // If there are no policies on the role tag, policies on the role are inherited. + // If policies on role tag are set, by this point, it is verified that it is a subset of the + // policies on the role. So, apply only those. + if len(roleTagResp.Policies) != 0 { + policies = roleTagResp.Policies + } + + // If roleEntry had disallowReauthentication set to 'true', do not reset it + // to 'false' based on role tag having it not set. But, if role tag had it set, + // be sure to override the value. + if !disallowReauthentication { + disallowReauthentication = roleTagResp.DisallowReauthentication + } + + // Cache the value of role tag's max_ttl value + rTagMaxTTL = roleTagResp.MaxTTL + + // Scope the shortestMaxTTL to the value set on the role tag + if roleTagResp.MaxTTL > time.Duration(0) && roleTagResp.MaxTTL < shortestMaxTTL { + shortestMaxTTL = roleTagResp.MaxTTL + } + if roleTagResp.MaxTTL > longestMaxTTL { + longestMaxTTL = roleTagResp.MaxTTL + } + } + + // Save the login attempt in the identity access list + currentTime := time.Now() + if storedIdentity == nil { + // Role, ClientNonce and CreationTime of the identity entry, + // once set, should never change. + storedIdentity = &accessListIdentity{ + Role: roleName, + ClientNonce: clientNonce, + CreationTime: currentTime, + } + } + + // DisallowReauthentication, PendingTime, LastUpdatedTime and + // ExpirationTime may change. + storedIdentity.LastUpdatedTime = currentTime + storedIdentity.ExpirationTime = currentTime.Add(longestMaxTTL) + storedIdentity.PendingTime = identityDocParsed.PendingTime + storedIdentity.DisallowReauthentication = disallowReauthentication + + // Don't cache the nonce if DisallowReauthentication is set + if storedIdentity.DisallowReauthentication { + storedIdentity.ClientNonce = "" + } + + // Sanitize the nonce to a reasonable length + if len(clientNonce) > 128 && !storedIdentity.DisallowReauthentication { + return logical.ErrorResponse("client nonce exceeding the limit of 128 characters"), nil + } + + if err = setAccessListIdentityEntry(ctx, req.Storage, identityDocParsed.InstanceID, storedIdentity); err != nil { + return nil, err + } + + auth := &logical.Auth{ + Metadata: map[string]string{ + "role_tag_max_ttl": rTagMaxTTL.String(), + "role": roleName, + }, + Alias: &logical.Alias{ + Name: identityAlias, + }, + InternalData: map[string]interface{}{ + "instance_id": identityDocParsed.InstanceID, + "region": identityDocParsed.Region, + "account_id": identityDocParsed.AccountID, + }, + } + roleEntry.PopulateTokenAuth(auth) + if err := identityConfigEntry.EC2AuthMetadataHandler.PopulateDesiredMetadata(auth, map[string]string{ + "instance_id": identityDocParsed.InstanceID, + "region": identityDocParsed.Region, + "account_id": identityDocParsed.AccountID, + "ami_id": identityDocParsed.AmiID, + "auth_type": ec2AuthType, + }); err != nil { + b.Logger().Warn("unable to set alias metadata", "err", err) + } + + resp := &logical.Response{ + Auth: auth, + } + resp.Auth.Policies = policies + resp.Auth.LeaseOptions.MaxTTL = shortestMaxTTL + + // Return the nonce only if reauthentication is allowed and if the nonce + // was not supplied by the user. + if !disallowReauthentication && !clientNonceSupplied { + // Echo the client nonce back. If nonce param was not supplied + // to the endpoint at all (setting it to empty string does not + // qualify here), callers should extract out the nonce from + // this field for reauthentication requests. + resp.Auth.Metadata["nonce"] = clientNonce + } + + return resp, nil +} + +// handleRoleTagLogin is used to fetch the role tag of the instance and +// verifies it to be correct. Then the policies for the login request will be +// set off of the role tag, if certain criteria satisfies. +func (b *backend) handleRoleTagLogin(ctx context.Context, s logical.Storage, roleName string, roleEntry *awsRoleEntry, instance *ec2.Instance) (*roleTagLoginResponse, error) { + if roleEntry == nil { + return nil, fmt.Errorf("nil role entry") + } + if instance == nil { + return nil, fmt.Errorf("nil instance") + } + + // Input validation on instance is not performed here considering + // that it would have been done in validateInstance method. + tags := instance.Tags + if tags == nil || len(tags) == 0 { + return nil, fmt.Errorf("missing tag with key %q on the instance", roleEntry.RoleTag) + } + + // Iterate through the tags attached on the instance and look for + // a tag with its 'key' matching the expected role tag value. + rTagValue := "" + for _, tagItem := range tags { + if tagItem.Key != nil && *tagItem.Key == roleEntry.RoleTag { + rTagValue = *tagItem.Value + break + } + } + + // If 'role_tag' is enabled on the role, and if a corresponding tag is not found + // to be attached to the instance, fail. + if rTagValue == "" { + return nil, fmt.Errorf("missing tag with key %q on the instance", roleEntry.RoleTag) + } + + // Parse the role tag into a struct, extract the plaintext part of it and verify its HMAC + rTag, err := b.parseAndVerifyRoleTagValue(ctx, s, rTagValue) + if err != nil { + return nil, err + } + + // Check if the role name with which this login is being made is same + // as the role name embedded in the tag. + if rTag.Role != roleName { + return nil, fmt.Errorf("role on the tag is not matching the role supplied") + } + + // If instance_id was set on the role tag, check if the same instance is attempting to login + if rTag.InstanceID != "" && rTag.InstanceID != *instance.InstanceId { + return nil, fmt.Errorf("role tag is being used by an unauthorized instance") + } + + // Check if the role tag is deny listed + denyListEntry, err := b.lockedDenyLististRoleTagEntry(ctx, s, rTagValue) + if err != nil { + return nil, err + } + if denyListEntry != nil { + return nil, fmt.Errorf("role tag is deny listed") + } + + // Ensure that the policies on the RoleTag is a subset of policies on the role + if !strutil.StrListSubset(roleEntry.TokenPolicies, rTag.Policies) { + return nil, fmt.Errorf("policies on the role tag must be subset of policies on the role") + } + + return &roleTagLoginResponse{ + Policies: rTag.Policies, + MaxTTL: rTag.MaxTTL, + DisallowReauthentication: rTag.DisallowReauthentication, + }, nil +} + +// pathLoginRenew is used to renew an authenticated token +func (b *backend) pathLoginRenew(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + authType, ok := req.Auth.Metadata["auth_type"] + if !ok { + // backwards compatibility for clients that have leases from before we added auth_type + authType = ec2AuthType + } + + if authType == ec2AuthType { + return b.pathLoginRenewEc2(ctx, req, data) + } else if authType == iamAuthType { + return b.pathLoginRenewIam(ctx, req, data) + } else { + return nil, fmt.Errorf("unrecognized auth_type: %q", authType) + } +} + +func (b *backend) pathLoginRenewIam(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + canonicalArn, err := getMetadataValue(req.Auth, "canonical_arn") + if err != nil { + return nil, err + } + + roleName := "" + roleNameIfc, ok := req.Auth.InternalData["role_name"] + if ok { + roleName = roleNameIfc.(string) + } + if roleName == "" { + return nil, fmt.Errorf("error retrieving role_name during renewal") + } + roleEntry, err := b.role(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if roleEntry == nil { + return nil, fmt.Errorf("role entry not found") + } + + // we don't really care what the inferred entity type was when the role was initially created. We + // care about what the role currently requires. However, the metadata's inferred_entity_id is only + // set when inferencing is turned on at initial login time. So, if inferencing is turned on, any + // existing roles will NOT be able to renew tokens. + // This might change later, but authenticating the actual inferred entity ID is NOT done if there + // is no inferencing requested in the role. The reason is that authenticating the inferred entity + // ID requires additional AWS IAM permissions that might not be present (e.g., + // ec2:DescribeInstances) as well as additional inferencing configuration (the inferred region). + // So, for now, if you want to turn on inferencing, all clients must re-authenticate and cannot + // renew existing tokens. + if roleEntry.InferredEntityType != "" { + if roleEntry.InferredEntityType == ec2EntityType { + instanceID, err := getMetadataValue(req.Auth, "inferred_entity_id") + if err != nil { + return nil, err + } + instanceRegion, err := getMetadataValue(req.Auth, "inferred_aws_region") + if err != nil { + return nil, err + } + accountID, err := getMetadataValue(req.Auth, "account_id") + if err != nil { + b.Logger().Debug("account_id not present during iam renewal attempt, continuing to attempt validation") + } + if _, err := b.validateInstance(ctx, req.Storage, instanceID, instanceRegion, accountID); err != nil { + return nil, fmt.Errorf("failed to verify instance ID %q: %w", instanceID, err) + } + } else { + return nil, fmt.Errorf("unrecognized entity_type in metadata: %q", roleEntry.InferredEntityType) + } + } + + // Note that the error messages below can leak a little bit of information about the role information + // For example, if on renew, the client gets the "error parsing ARN..." error message, the client + // will know that it's a wildcard bind (but not the actual bind), even if the client can't actually + // read the role directly to know what the bind is. It's a relatively small amount of leakage, in + // some fairly corner cases, and in the most likely error case (role has been changed to a new ARN), + // the error message is identical. + if len(roleEntry.BoundIamPrincipalARNs) > 0 { + // We might not get here if all bindings were on the inferred entity, which we've already validated + // above + // As with logins, there are three ways to pass this check: + // 1: clientUserId is in roleEntry.BoundIamPrincipalIDs (entries in roleEntry.BoundIamPrincipalIDs + // implies that roleEntry.ResolveAWSUniqueIDs is true) + // 2: roleEntry.ResolveAWSUniqueIDs is false and canonical_arn is in roleEntry.BoundIamPrincipalARNs + // 3: Full ARN matches one of the wildcard globs in roleEntry.BoundIamPrincipalARNs + clientUserId, err := getMetadataValue(req.Auth, "client_user_id") + switch { + case err == nil && strutil.StrListContains(roleEntry.BoundIamPrincipalIDs, clientUserId): // check 1 passed + case !roleEntry.ResolveAWSUniqueIDs && strutil.StrListContains(roleEntry.BoundIamPrincipalARNs, canonicalArn): // check 2 passed + default: + // check 3 is a bit more complex, so we do it last + // only try to look up full ARNs if there's a wildcard ARN in BoundIamPrincipalIDs. + if !hasWildcardBind(roleEntry.BoundIamPrincipalARNs) { + return nil, fmt.Errorf("role %q no longer bound to ARN %q", roleName, canonicalArn) + } + + fullArn := b.getCachedUserId(clientUserId) + if fullArn == "" { + entity, err := parseIamArn(canonicalArn) + if err != nil { + return nil, fmt.Errorf( + "error parsing ARN %q when updating login for role %q: %w", + canonicalArn, + roleName, + err, + ) + } + fullArn, err = b.fullArn(ctx, entity, req.Storage) + if err != nil { + return nil, fmt.Errorf( + "error looking up full ARN of entity %v when updating login for role %q: %w", + entity, + roleName, + err, + ) + } + if fullArn == "" { + return nil, fmt.Errorf("got empty string back when looking up full ARN of entity %v when updating login for role %q", entity, roleName) + } + if clientUserId != "" { + b.setCachedUserId(clientUserId, fullArn) + } + } + matchedWildcardBind := false + for _, principalARN := range roleEntry.BoundIamPrincipalARNs { + if strings.HasSuffix(principalARN, "*") && strutil.GlobbedStringsMatch(principalARN, fullArn) { + matchedWildcardBind = true + break + } + } + if !matchedWildcardBind { + return nil, fmt.Errorf("role %q no longer bound to ARN %q", roleName, canonicalArn) + } + } + } + + resp := &logical.Response{Auth: req.Auth} + resp.Auth.TTL = roleEntry.TokenTTL + resp.Auth.MaxTTL = roleEntry.TokenMaxTTL + resp.Auth.Period = roleEntry.TokenPeriod + return resp, nil +} + +func (b *backend) pathLoginRenewEc2(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { + instanceID, err := getMetadataValue(req.Auth, "instance_id") + if err != nil { + return nil, err + } + region, err := getMetadataValue(req.Auth, "region") + if err != nil { + return nil, err + } + accountID, err := getMetadataValue(req.Auth, "account_id") + if err != nil { + b.Logger().Debug("account_id not present during ec2 renewal attempt, continuing to attempt validation") + } + + // Cross check that the instance is still in 'running' state + if _, err := b.validateInstance(ctx, req.Storage, instanceID, region, accountID); err != nil { + return nil, fmt.Errorf("failed to verify instance ID %q: %w", instanceID, err) + } + + storedIdentity, err := accessListIdentityEntry(ctx, req.Storage, instanceID) + if err != nil { + return nil, err + } + if storedIdentity == nil { + return nil, fmt.Errorf("failed to verify the access list identity entry for instance ID: %q", instanceID) + } + + // Ensure that role entry is not deleted + roleEntry, err := b.role(ctx, req.Storage, storedIdentity.Role) + if err != nil { + return nil, err + } + if roleEntry == nil { + return nil, fmt.Errorf("role entry not found") + } + + // If the login was made using the role tag, then max_ttl from tag + // is cached in internal data during login and used here to cap the + // max_ttl of renewal. + rTagMaxTTL, err := parseutil.ParseDurationSecond(req.Auth.Metadata["role_tag_max_ttl"]) + if err != nil { + return nil, err + } + + // Re-evaluate the maxTTL bounds + shortestMaxTTL := b.System().MaxLeaseTTL() + longestMaxTTL := b.System().MaxLeaseTTL() + if roleEntry.TokenMaxTTL > time.Duration(0) && roleEntry.TokenMaxTTL < shortestMaxTTL { + shortestMaxTTL = roleEntry.TokenMaxTTL + } + if roleEntry.TokenMaxTTL > longestMaxTTL { + longestMaxTTL = roleEntry.TokenMaxTTL + } + if rTagMaxTTL > time.Duration(0) && rTagMaxTTL < shortestMaxTTL { + shortestMaxTTL = rTagMaxTTL + } + if rTagMaxTTL > longestMaxTTL { + longestMaxTTL = rTagMaxTTL + } + + // Only LastUpdatedTime and ExpirationTime change and all other fields remain the same + currentTime := time.Now() + storedIdentity.LastUpdatedTime = currentTime + storedIdentity.ExpirationTime = currentTime.Add(longestMaxTTL) + + // Updating the expiration time is required for the tidy operation on the + // access list identity storage items + if err = setAccessListIdentityEntry(ctx, req.Storage, instanceID, storedIdentity); err != nil { + return nil, err + } + + resp := &logical.Response{Auth: req.Auth} + resp.Auth.TTL = roleEntry.TokenTTL + resp.Auth.MaxTTL = shortestMaxTTL + resp.Auth.Period = roleEntry.TokenPeriod + return resp, nil +} + +func (b *backend) pathLoginUpdateIam(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName, callerID, entity, errResp, err := b.pathLoginIamGetRoleNameCallerIdAndEntity(ctx, req, data) + if errResp != nil || err != nil { + return errResp, err + } + + roleEntry, err := b.role(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if roleEntry == nil { + return logical.ErrorResponse(fmt.Sprintf("entry for role %s not found", roleName)), nil + } + + // Check for a CIDR match. + if len(roleEntry.TokenBoundCIDRs) > 0 { + if req.Connection == nil { + b.Logger().Warn("token bound CIDRs found but no connection information available for validation") + return nil, logical.ErrPermissionDenied + } + if !cidrutil.RemoteAddrIsOk(req.Connection.RemoteAddr, roleEntry.TokenBoundCIDRs) { + return nil, logical.ErrPermissionDenied + } + } + + if roleEntry.AuthType != iamAuthType { + return logical.ErrorResponse(fmt.Sprintf("auth method iam not allowed for role %s", roleName)), nil + } + + identityConfigEntry, err := identityConfigEntry(ctx, req.Storage) + if err != nil { + return nil, err + } + + // This could either be a "userID:SessionID" (in the case of an assumed role) or just a "userID" + // (in the case of an IAM user). + callerUniqueId := strings.Split(callerID.UserId, ":")[0] + identityAlias := "" + switch identityConfigEntry.IAMAlias { + case identityAliasRoleID: + identityAlias = roleEntry.RoleID + case identityAliasIAMUniqueID: + identityAlias = callerUniqueId + case identityAliasIAMFullArn: + identityAlias = callerID.Arn + } + + // If we're just looking up for MFA, return the Alias info + if req.Operation == logical.AliasLookaheadOperation { + return &logical.Response{ + Auth: &logical.Auth{ + Alias: &logical.Alias{ + Name: identityAlias, + }, + }, + }, nil + } + + // The role creation should ensure that either we're inferring this is an EC2 instance + // or that we're binding an ARN + if len(roleEntry.BoundIamPrincipalARNs) > 0 { + // As with renews, there are three ways to pass this check: + // 1: callerUniqueId is in roleEntry.BoundIamPrincipalIDs (entries in roleEntry.BoundIamPrincipalIDs + // implies that roleEntry.ResolveAWSUniqueIDs is true) + // 2: roleEntry.ResolveAWSUniqueIDs is false and entity.canonicalArn() is in roleEntry.BoundIamPrincipalARNs + // 3: Full ARN matches one of the wildcard globs in roleEntry.BoundIamPrincipalARNs + // Need to be able to handle pathological configurations such as roleEntry.BoundIamPrincipalARNs looking something like: + // arn:aw:iam::123456789012:{user/UserName,user/path/*,role/RoleName,role/path/*} + switch { + case strutil.StrListContains(roleEntry.BoundIamPrincipalIDs, callerUniqueId): // check 1 passed + case !roleEntry.ResolveAWSUniqueIDs && strutil.StrListContains(roleEntry.BoundIamPrincipalARNs, entity.canonicalArn()): // check 2 passed + default: + // evaluate check 3 -- only try to look up full ARNs if there's a wildcard ARN in BoundIamPrincipalIDs. + if !hasWildcardBind(roleEntry.BoundIamPrincipalARNs) { + return logical.ErrorResponse("IAM Principal %q does not belong to the role %q", callerID.Arn, roleName), nil + } + + fullArn := b.getCachedUserId(callerUniqueId) + if fullArn == "" { + fullArn, err = b.fullArn(ctx, entity, req.Storage) + if err != nil { + return logical.ErrorResponse("error looking up full ARN of entity %v when attempting login for role %q: %v", entity, roleName, err), nil + } + if fullArn == "" { + return logical.ErrorResponse("got empty string back when looking up full ARN of entity %v when attempting login for role %q", entity, roleName), nil + } + b.setCachedUserId(callerUniqueId, fullArn) + } + matchedWildcardBind := false + for _, principalARN := range roleEntry.BoundIamPrincipalARNs { + if strings.HasSuffix(principalARN, "*") && strutil.GlobbedStringsMatch(principalARN, fullArn) { + matchedWildcardBind = true + break + } + } + if !matchedWildcardBind { + return logical.ErrorResponse("IAM Principal %q does not belong to the role %q", callerID.Arn, roleName), nil + } + } + } + + inferredEntityType := "" + inferredEntityID := "" + if roleEntry.InferredEntityType == ec2EntityType { + instance, err := b.validateInstance(ctx, req.Storage, entity.SessionInfo, roleEntry.InferredAWSRegion, callerID.Account) + if err != nil { + return logical.ErrorResponse("failed to verify %s as a valid EC2 instance in region %s: %s", entity.SessionInfo, roleEntry.InferredAWSRegion, err), nil + } + + // build a fake identity doc to pass on metadata about the instance to verifyInstanceMeetsRoleRequirements + identityDoc := &identityDocument{ + Tags: nil, // Don't really need the tags, so not doing the work of converting them from Instance.Tags to identityDocument.Tags + InstanceID: *instance.InstanceId, + AmiID: *instance.ImageId, + AccountID: callerID.Account, + Region: roleEntry.InferredAWSRegion, + PendingTime: instance.LaunchTime.Format(time.RFC3339), + } + + validationError, err := b.verifyInstanceMeetsRoleRequirements(ctx, req.Storage, instance, roleEntry, roleName, identityDoc) + if err != nil { + return nil, err + } + if validationError != nil { + return logical.ErrorResponse(fmt.Sprintf("error validating instance: %s", validationError)), nil + } + + inferredEntityType = ec2EntityType + inferredEntityID = entity.SessionInfo + } + + auth := &logical.Auth{ + Metadata: map[string]string{ + "role_id": roleEntry.RoleID, + }, + InternalData: map[string]interface{}{ + "role_name": roleName, + "role_id": roleEntry.RoleID, + "canonical_arn": entity.canonicalArn(), + "client_user_id": callerUniqueId, + "inferred_entity_id": inferredEntityID, + "inferred_aws_region": roleEntry.InferredAWSRegion, + "account_id": entity.AccountNumber, + }, + DisplayName: entity.FriendlyName, + Alias: &logical.Alias{ + Name: identityAlias, + }, + } + + if entity.Type == "assumed-role" { + auth.DisplayName = strings.Join([]string{entity.FriendlyName, entity.SessionInfo}, "/") + } + + roleEntry.PopulateTokenAuth(auth) + if err := identityConfigEntry.IAMAuthMetadataHandler.PopulateDesiredMetadata(auth, map[string]string{ + "client_arn": callerID.Arn, + "canonical_arn": entity.canonicalArn(), + "client_user_id": callerUniqueId, + "auth_type": iamAuthType, + "inferred_entity_type": inferredEntityType, + "inferred_entity_id": inferredEntityID, + "inferred_aws_region": roleEntry.InferredAWSRegion, + "account_id": entity.AccountNumber, + }); err != nil { + b.Logger().Warn(fmt.Sprintf("unable to set alias metadata due to %s", err)) + } + + return &logical.Response{ + Auth: auth, + }, nil +} + +func hasWildcardBind(boundIamPrincipalARNs []string) bool { + for _, principalARN := range boundIamPrincipalARNs { + if strings.HasSuffix(principalARN, "*") { + return true + } + } + return false +} + +// Validate that the iam_request_body passed is valid for the STS request +func validateLoginIamRequestBody(body string) error { + qs, err := url.ParseQuery(body) + if err != nil { + return err + } + for k, v := range qs { + switch k { + case "Action": + if len(v) != 1 || v[0] != "GetCallerIdentity" { + return errRequestBodyNotValid + } + case "Version": + // Will assume for now that future versions don't change + // the semantics + default: + // Not expecting any other values + return errRequestBodyNotValid + } + } + return nil +} + +// These two methods (hasValuesFor*) return two bools +// The first is a hasAll, that is, does the request have all the values +// necessary for this auth method +// The second is a hasAny, that is, does the request have any of the fields +// exclusive to this auth method +func hasValuesForEc2Auth(data *framework.FieldData) (bool, bool) { + _, hasPkcs7 := data.GetOk("pkcs7") + _, hasIdentity := data.GetOk("identity") + _, hasSignature := data.GetOk("signature") + return (hasPkcs7 || (hasIdentity && hasSignature)), (hasPkcs7 || hasIdentity || hasSignature) +} + +func hasValuesForIamAuth(data *framework.FieldData) (bool, bool) { + _, hasRequestMethod := data.GetOk("iam_http_request_method") + _, hasRequestURL := data.GetOk("iam_request_url") + _, hasRequestBody := data.GetOk("iam_request_body") + _, hasRequestHeaders := data.GetOk("iam_request_headers") + return (hasRequestMethod && hasRequestURL && hasRequestBody && hasRequestHeaders), + (hasRequestMethod || hasRequestURL || hasRequestBody || hasRequestHeaders) +} + +func parseIamArn(iamArn string) (*iamEntity, error) { + // iamArn should look like one of the following: + // 1. arn:aws:iam:::/ + // 2. arn:aws:sts:::assumed-role// + // if we get something like 2, then we want to transform that back to what + // most people would expect, which is arn:aws:iam:::role/ + var entity iamEntity + fullParts := strings.Split(iamArn, ":") + if len(fullParts) != 6 { + return nil, fmt.Errorf("unrecognized arn: contains %d colon-separated parts, expected 6", len(fullParts)) + } + if fullParts[0] != "arn" { + return nil, fmt.Errorf("unrecognized arn: does not begin with \"arn:\"") + } + // normally aws, but could be aws-cn or aws-us-gov + entity.Partition = fullParts[1] + if fullParts[2] != "iam" && fullParts[2] != "sts" { + return nil, fmt.Errorf("unrecognized service: %v, not one of iam or sts", fullParts[2]) + } + // fullParts[3] is the region, which doesn't matter for AWS IAM entities + entity.AccountNumber = fullParts[4] + // fullParts[5] would now be something like user/ or assumed-role// + parts := strings.Split(fullParts[5], "/") + if len(parts) < 2 { + return nil, fmt.Errorf("unrecognized arn: %q contains fewer than 2 slash-separated parts", fullParts[5]) + } + entity.Type = parts[0] + entity.Path = strings.Join(parts[1:len(parts)-1], "/") + entity.FriendlyName = parts[len(parts)-1] + // now, entity.FriendlyName should either be or + switch entity.Type { + case "assumed-role": + // Check for three parts for assumed role ARNs + if len(parts) < 3 { + return nil, fmt.Errorf("unrecognized arn: %q contains fewer than 3 slash-separated parts", fullParts[5]) + } + // Assumed roles don't have paths and have a slightly different format + // parts[2] is + entity.Path = "" + entity.FriendlyName = parts[1] + entity.SessionInfo = parts[2] + case "user": + case "role": + case "instance-profile": + default: + return &iamEntity{}, fmt.Errorf("unrecognized principal type: %q", entity.Type) + } + return &entity, nil +} + +func validateVaultHeaderValue(headers http.Header, _ *url.URL, requiredHeaderValue string) error { + providedValue := "" + for k, v := range headers { + if strings.EqualFold(iamServerIdHeader, k) { + providedValue = strings.Join(v, ",") + break + } + } + if providedValue == "" { + return fmt.Errorf("missing header %q", iamServerIdHeader) + } + + // NOT doing a constant time compare here since the value is NOT intended to be secret + if providedValue != requiredHeaderValue { + return fmt.Errorf("expected %q but got %q", requiredHeaderValue, providedValue) + } + + if authzHeaders, ok := headers["Authorization"]; ok { + // authzHeader looks like AWS4-HMAC-SHA256 Credential=AKI..., SignedHeaders=host;x-amz-date;x-vault-awsiam-id, Signature=... + // We need to extract out the SignedHeaders + re := regexp.MustCompile(".*SignedHeaders=([^,]+)") + authzHeader := strings.Join(authzHeaders, ",") + matches := re.FindSubmatch([]byte(authzHeader)) + if len(matches) < 1 { + return fmt.Errorf("vault header wasn't signed") + } + if len(matches) > 2 { + return fmt.Errorf("found multiple SignedHeaders components") + } + signedHeaders := string(matches[1]) + return ensureHeaderIsSigned(signedHeaders, iamServerIdHeader) + } + // TODO: If we support GET requests, then we need to parse the X-Amz-SignedHeaders + // argument out of the query string and search in there for the header value + return fmt.Errorf("missing Authorization header") +} + +func buildHttpRequest(method, endpoint string, parsedUrl *url.URL, body string, headers http.Header) *http.Request { + // This is all a bit complicated because the AWS signature algorithm requires that + // the Host header be included in the signed headers. See + // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + // The use cases we want to support, in order of increasing complexity, are: + // 1. All defaults (client assumes sts.amazonaws.com and server has no override) + // 2. Alternate STS regions: client wants to go to a specific region, in which case + // Vault must be configured with that endpoint as well. The client's signed request + // will include a signature over what the client expects the Host header to be, + // so we cannot change that and must match. + // 3. Alternate STS regions with a proxy that is transparent to Vault's clients. + // In this case, Vault is aware of the proxy, as the proxy is configured as the + // endpoint, but the clients should NOT be aware of the proxy (because STS will + // not be aware of the proxy) + // It's also annoying because: + // 1. The AWS Sigv4 algorithm requires the Host header to be defined + // 2. Some of the official SDKs (at least botocore and aws-sdk-go) don't actually + // include an explicit Host header in the HTTP requests they generate, relying on + // the underlying HTTP library to do that for them. + // 3. To get a validly signed request, the SDKs check if a Host header has been set + // and, if not, add an inferred host header (based on the URI) to the internal + // data structure used for calculating the signature, but never actually expose + // that to clients. So then they just "hope" that the underlying library actually + // adds the right Host header which was included in the signature calculation. + // We could either explicitly require all Vault clients to explicitly add the Host header + // in the encoded request, or we could also implicitly infer it from the URI. + // We choose to support both -- allow you to explicitly set a Host header, but if not, + // infer one from the URI. + // HOWEVER, we have to preserve the request URI portion of the client's + // URL because the GetCallerIdentity Action can be encoded in either the body + // or the URL. So, we need to rebuild the URL sent to the http library to have the + // custom, Vault-specified endpoint with the client-side request parameters. + targetUrl := fmt.Sprintf("%s/%s", endpoint, parsedUrl.RequestURI()) + request, err := http.NewRequest(method, targetUrl, strings.NewReader(body)) + if err != nil { + return nil + } + request.Host = parsedUrl.Host + for k, vals := range headers { + for _, val := range vals { + request.Header.Add(k, val) + } + } + return request +} + +func ensureHeaderIsSigned(signedHeaders, headerToSign string) error { + // Not doing a constant time compare here, the values aren't secret + for _, header := range strings.Split(signedHeaders, ";") { + if header == strings.ToLower(headerToSign) { + return nil + } + } + return fmt.Errorf("vault header wasn't signed") +} + +func parseGetCallerIdentityResponse(response string) (GetCallerIdentityResponse, error) { + result := GetCallerIdentityResponse{} + response = strings.TrimSpace(response) + if !strings.HasPrefix(response, " + + arn:aws:iam::123456789012:user/MyUserName + ASOMETHINGSOMETHINGSOMETHING + 123456789012 + + + 7f4fc40c-853a-11e6-8848-8d035d01eb87 + +` + expectedUserArn := "arn:aws:iam::123456789012:user/MyUserName" + + responseFromAssumedRole := ` + + arn:aws:sts::123456789012:assumed-role/RoleName/RoleSessionName + ASOMETHINGSOMETHINGELSE:RoleSessionName + 123456789012 + + + 7f4fc40c-853a-11e6-8848-8d035d01eb87 + +` + expectedRoleArn := "arn:aws:sts::123456789012:assumed-role/RoleName/RoleSessionName" + + parsedUserResponse, err := parseGetCallerIdentityResponse(responseFromUser) + if err != nil { + t.Fatal(err) + } + if parsedArn := parsedUserResponse.GetCallerIdentityResult[0].Arn; parsedArn != expectedUserArn { + t.Errorf("expected to parse arn %#v, got %#v", expectedUserArn, parsedArn) + } + + parsedRoleResponse, err := parseGetCallerIdentityResponse(responseFromAssumedRole) + if err != nil { + t.Fatal(err) + } + if parsedArn := parsedRoleResponse.GetCallerIdentityResult[0].Arn; parsedArn != expectedRoleArn { + t.Errorf("expected to parn arn %#v; got %#v", expectedRoleArn, parsedArn) + } + + _, err = parseGetCallerIdentityResponse("SomeRandomGibberish") + if err == nil { + t.Errorf("expected to NOT parse random giberish, but didn't get an error") + } +} + +func TestBackend_pathLogin_parseIamArn(t *testing.T) { + testParser := func(inputArn, expectedCanonicalArn string, expectedEntity iamEntity) { + entity, err := parseIamArn(inputArn) + if err != nil { + t.Fatal(err) + } + if expectedCanonicalArn != "" && entity.canonicalArn() != expectedCanonicalArn { + t.Fatalf("expected to canonicalize ARN %q into %q but got %q instead", inputArn, expectedCanonicalArn, entity.canonicalArn()) + } + if *entity != expectedEntity { + t.Fatalf("expected to get iamEntity %#v from input ARN %q but instead got %#v", expectedEntity, inputArn, *entity) + } + } + + testParser("arn:aws:iam::123456789012:user/UserPath/MyUserName", + "arn:aws:iam::123456789012:user/MyUserName", + iamEntity{Partition: "aws", AccountNumber: "123456789012", Type: "user", Path: "UserPath", FriendlyName: "MyUserName"}, + ) + canonicalRoleArn := "arn:aws:iam::123456789012:role/RoleName" + testParser("arn:aws:sts::123456789012:assumed-role/RoleName/RoleSessionName", + canonicalRoleArn, + iamEntity{Partition: "aws", AccountNumber: "123456789012", Type: "assumed-role", FriendlyName: "RoleName", SessionInfo: "RoleSessionName"}, + ) + testParser("arn:aws:iam::123456789012:role/RolePath/RoleName", + canonicalRoleArn, + iamEntity{Partition: "aws", AccountNumber: "123456789012", Type: "role", Path: "RolePath", FriendlyName: "RoleName"}, + ) + testParser("arn:aws:iam::123456789012:instance-profile/profilePath/InstanceProfileName", + "", + iamEntity{Partition: "aws", AccountNumber: "123456789012", Type: "instance-profile", Path: "profilePath", FriendlyName: "InstanceProfileName"}, + ) + + // Test that it properly handles pathological inputs... + _, err := parseIamArn("") + if err == nil { + t.Error("expected error from empty input string") + } + + _, err = parseIamArn("arn:aws:iam::123456789012:role") + if err == nil { + t.Error("expected error from malformed ARN without a role name") + } + + _, err = parseIamArn("arn:aws:iam") + if err == nil { + t.Error("expected error from incomplete ARN (arn:aws:iam)") + } + + _, err = parseIamArn("arn:aws:iam::1234556789012:/") + if err == nil { + t.Error("expected error from empty principal type and no principal name (arn:aws:iam::1234556789012:/)") + } + _, err = parseIamArn("arn:aws:sts::1234556789012:assumed-role/role") + if err == nil { + t.Error("expected error from malformed assumed role ARN") + } +} + +func TestBackend_validateVaultHeaderValue(t *testing.T) { + const canaryHeaderValue = "Vault-Server" + requestURL, err := url.Parse("https://sts.amazonaws.com/") + if err != nil { + t.Fatalf("error parsing test URL: %v", err) + } + postHeadersMissing := http.Header{ + "Host": []string{"Foo"}, + "Authorization": []string{"AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request, SignedHeaders=content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7"}, + } + postHeadersInvalid := http.Header{ + "Host": []string{"Foo"}, + iamServerIdHeader: []string{"InvalidValue"}, + "Authorization": []string{"AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request, SignedHeaders=content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7"}, + } + postHeadersUnsigned := http.Header{ + "Host": []string{"Foo"}, + iamServerIdHeader: []string{canaryHeaderValue}, + "Authorization": []string{"AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request, SignedHeaders=content-type;host;x-amz-date, Signature=5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7"}, + } + postHeadersValid := http.Header{ + "Host": []string{"Foo"}, + iamServerIdHeader: []string{canaryHeaderValue}, + "Authorization": []string{"AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request, SignedHeaders=content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7"}, + } + + postHeadersSplit := http.Header{ + "Host": []string{"Foo"}, + iamServerIdHeader: []string{canaryHeaderValue}, + "Authorization": []string{"AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request", "SignedHeaders=content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7"}, + } + + err = validateVaultHeaderValue(postHeadersMissing, requestURL, canaryHeaderValue) + if err == nil { + t.Error("validated POST request with missing Vault header") + } + + err = validateVaultHeaderValue(postHeadersInvalid, requestURL, canaryHeaderValue) + if err == nil { + t.Error("validated POST request with invalid Vault header value") + } + + err = validateVaultHeaderValue(postHeadersUnsigned, requestURL, canaryHeaderValue) + if err == nil { + t.Error("validated POST request with unsigned Vault header") + } + + err = validateVaultHeaderValue(postHeadersValid, requestURL, canaryHeaderValue) + if err != nil { + t.Errorf("did NOT validate valid POST request: %v", err) + } + + err = validateVaultHeaderValue(postHeadersSplit, requestURL, canaryHeaderValue) + if err != nil { + t.Errorf("did NOT validate valid POST request with split Authorization header: %v", err) + } +} + +// TestBackend_pathLogin_IAMHeaders tests login with iam_request_headers, +// supporting both base64 encoded string and JSON headers +func TestBackend_pathLogin_IAMHeaders(t *testing.T) { + storage := &logical.InmemStorage{} + config := logical.TestBackendConfig() + config.StorageView = storage + b, err := Backend(config) + if err != nil { + t.Fatal(err) + } + + err = b.Setup(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + // sets up a test server to stand in for STS service + ts := setupIAMTestServer() + defer ts.Close() + + clientConfigData := map[string]interface{}{ + "iam_server_id_header_value": testVaultHeaderValue, + "sts_endpoint": ts.URL, + } + clientRequest := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/client", + Storage: storage, + Data: clientConfigData, + } + _, err = b.HandleRequest(context.Background(), clientRequest) + if err != nil { + t.Fatal(err) + } + + // Configure identity. + _, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/identity", + Storage: storage, + Data: map[string]interface{}{ + "iam_alias": "role_id", + "iam_metadata": []string{ + "account_id", + "auth_type", + "canonical_arn", + "client_arn", + "client_user_id", + "inferred_aws_region", + "inferred_entity_id", + "inferred_entity_type", + }, + "ec2_alias": "role_id", + "ec2_metadata": []string{ + "account_id", + "ami_id", + "instance_id", + "region", + }, + }, + }) + if err != nil { + t.Fatal(err) + } + + // create a role entry + roleEntry := &awsRoleEntry{ + RoleID: "foo", + Version: currentRoleStorageVersion, + AuthType: iamAuthType, + } + + if err := b.setRole(context.Background(), storage, testValidRoleName, roleEntry); err != nil { + t.Fatalf("failed to set entry: %s", err) + } + + // create a baseline loginData map structure, including iam_request_headers + // already base64encoded. This is the "Default" loginData used for all tests. + // Each sub test can override the map's iam_request_headers entry + loginData, err := defaultLoginData() + if err != nil { + t.Fatal(err) + } + + expectedAuthMetadata := map[string]string{ + "account_id": "123456789012", + "auth_type": "iam", + "canonical_arn": "arn:aws:iam::123456789012:user/valid-role", + "client_arn": "arn:aws:iam::123456789012:user/valid-role", + "client_user_id": "ASOMETHINGSOMETHINGSOMETHING", + } + + // expected errors for certain tests + missingHeaderErr := errors.New("error validating X-Vault-AWS-IAM-Server-ID header: missing header \"X-Vault-AWS-IAM-Server-ID\"") + parsingErr := errors.New("error making upstream request: error parsing STS response") + + testCases := []struct { + Name string + Header interface{} + ExpectErr error + }{ + { + Name: "Default", + }, + { + Name: "Map-complete", + Header: map[string]interface{}{ + "Content-Length": "43", + "Content-Type": "application/x-www-form-urlencoded; charset=utf-8", + "User-Agent": "aws-sdk-go/1.14.24 (go1.11; darwin; amd64)", + "X-Amz-Date": "20180910T203328Z", + "X-Vault-Aws-Iam-Server-Id": "VaultAcceptanceTesting", + "Authorization": "AWS4-HMAC-SHA256 Credential=AKIAJPQ466AIIQW4LPSQ/20180910/us-east-1/sts/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=cdef5819b2e97f1ff0f3e898fd2621aa03af00a4ec3e019122c20e5482534bf4", + }, + }, + { + Name: "Map-incomplete", + Header: map[string]interface{}{ + "Content-Length": "43", + "Content-Type": "application/x-www-form-urlencoded; charset=utf-8", + "User-Agent": "aws-sdk-go/1.14.24 (go1.11; darwin; amd64)", + "X-Amz-Date": "20180910T203328Z", + "Authorization": "AWS4-HMAC-SHA256 Credential=AKIAJPQ466AIIQW4LPSQ/20180910/us-east-1/sts/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=cdef5819b2e97f1ff0f3e898fd2621aa03af00a4ec3e019122c20e5482534bf4", + }, + ExpectErr: missingHeaderErr, + }, + { + Name: "Map-illegal-header", + Header: map[string]interface{}{ + "Content-Length": "43", + "Content-Type": "application/x-www-form-urlencoded; charset=utf-8", + "User-Agent": "aws-sdk-go/1.14.24 (go1.11; darwin; amd64)", + "X-Amz-Date": "20180910T203328Z", + "Authorization": "AWS4-HMAC-SHA256 Credential=AKIAJPQ466AIIQW4LPSQ/20180910/us-east-1/sts/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=cdef5819b2e97f1ff0f3e898fd2621aa03af00a4ec3e019122c20e5482534bf4", + "X-Vault-Aws-Iam-Server-Id": "VaultAcceptanceTesting", + "X-Amz-Mallory-Header": "", + }, + ExpectErr: errors.New("invalid request header: X-Amz-Mallory-Header"), + }, + { + Name: "JSON-complete", + Header: `{ + "Content-Length":"43", + "Content-Type":"application/x-www-form-urlencoded; charset=utf-8", + "User-Agent":"aws-sdk-go/1.14.24 (go1.11; darwin; amd64)", + "X-Amz-Date":"20180910T203328Z", + "X-Vault-Aws-Iam-Server-Id": "VaultAcceptanceTesting", + "Authorization":"AWS4-HMAC-SHA256 Credential=AKIAJPQ466AIIQW4LPSQ/20180910/us-east-1/sts/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=cdef5819b2e97f1ff0f3e898fd2621aa03af00a4ec3e019122c20e5482534bf4" + }`, + }, + { + Name: "JSON-incomplete", + Header: `{ + "Content-Length":"43", + "Content-Type":"application/x-www-form-urlencoded; charset=utf-8", + "User-Agent":"aws-sdk-go/1.14.24 (go1.11; darwin; amd64)", + "X-Amz-Date":"20180910T203328Z", + "X-Vault-Aws-Iam-Server-Id": "VaultAcceptanceTesting", + "Authorization":"AWS4-HMAC-SHA256 Credential=AKIAJPQ466AIIQW4LPSQ/20180910/us-east-1/sts/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date;x-vault-aws-iam-server-id" + }`, + ExpectErr: parsingErr, + }, + { + Name: "Base64-complete", + Header: base64Complete(), + }, + { + Name: "Base64-incomplete-missing-header", + Header: base64MissingVaultID(), + ExpectErr: missingHeaderErr, + }, + { + Name: "Base64-incomplete-missing-auth-sig", + Header: base64MissingAuthField(), + ExpectErr: parsingErr, + }, + } + + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + if tc.Header != nil { + loginData["iam_request_headers"] = tc.Header + } + + loginRequest := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "login", + Storage: storage, + Data: loginData, + Connection: &logical.Connection{}, + } + + resp, err := b.HandleRequest(context.Background(), loginRequest) + if err != nil || resp == nil || resp.IsError() { + if tc.ExpectErr != nil && tc.ExpectErr.Error() == resp.Error().Error() { + return + } + t.Errorf("un expected failed login:\nresp: %#v\n\nerr: %v", resp, err) + } + + if !reflect.DeepEqual(expectedAuthMetadata, resp.Auth.Alias.Metadata) { + t.Errorf("expected metadata (%#v) to match (%#v)", expectedAuthMetadata, resp.Auth.Alias.Metadata) + } + }) + } +} + +// TestBackend_pathLogin_IAMRoleResolution tests role resolution for an Iam login +func TestBackend_pathLogin_IAMRoleResolution(t *testing.T) { + storage := &logical.InmemStorage{} + config := logical.TestBackendConfig() + config.StorageView = storage + b, err := Backend(config) + if err != nil { + t.Fatal(err) + } + + err = b.Setup(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + // sets up a test server to stand in for STS service + ts := setupIAMTestServer() + defer ts.Close() + + clientConfigData := map[string]interface{}{ + "iam_server_id_header_value": testVaultHeaderValue, + "sts_endpoint": ts.URL, + } + clientRequest := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/client", + Storage: storage, + Data: clientConfigData, + } + _, err = b.HandleRequest(context.Background(), clientRequest) + if err != nil { + t.Fatal(err) + } + + // Configure identity. + _, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/identity", + Storage: storage, + Data: map[string]interface{}{ + "iam_alias": "role_id", + "iam_metadata": []string{ + "account_id", + "auth_type", + "canonical_arn", + "client_arn", + "client_user_id", + "inferred_aws_region", + "inferred_entity_id", + "inferred_entity_type", + }, + "ec2_alias": "role_id", + "ec2_metadata": []string{ + "account_id", + "ami_id", + "instance_id", + "region", + }, + }, + }) + if err != nil { + t.Fatal(err) + } + + // create a role entry + roleEntry := &awsRoleEntry{ + RoleID: "foo", + Version: currentRoleStorageVersion, + AuthType: iamAuthType, + } + + if err := b.setRole(context.Background(), storage, testValidRoleName, roleEntry); err != nil { + t.Fatalf("failed to set entry: %s", err) + } + + // create a baseline loginData map structure, including iam_request_headers + // already base64encoded. This is the "Default" loginData used for all tests. + // Each sub test can override the map's iam_request_headers entry + loginData, err := defaultLoginData() + if err != nil { + t.Fatal(err) + } + + loginRequest := &logical.Request{ + Operation: logical.ResolveRoleOperation, + Path: "login", + Storage: storage, + Data: loginData, + Connection: &logical.Connection{}, + } + + resp, err := b.HandleRequest(context.Background(), loginRequest) + if err != nil || resp == nil || resp.IsError() { + t.Errorf("unexpected failed role resolution:\nresp: %#v\n\nerr: %v", resp, err) + } + if resp.Data["role"] != testValidRoleName { + t.Fatalf("Role was not as expected. Expected %s, received %s", testValidRoleName, resp.Data["role"]) + } +} + +func TestBackend_defaultAliasMetadata(t *testing.T) { + storage := &logical.InmemStorage{} + config := logical.TestBackendConfig() + config.StorageView = storage + b, err := Backend(config) + if err != nil { + t.Fatal(err) + } + + err = b.Setup(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + // sets up a test server to stand in for STS service + ts := setupIAMTestServer() + defer ts.Close() + + clientConfigData := map[string]interface{}{ + "iam_server_id_header_value": testVaultHeaderValue, + "sts_endpoint": ts.URL, + } + clientRequest := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/client", + Storage: storage, + Data: clientConfigData, + } + _, err = b.HandleRequest(context.Background(), clientRequest) + if err != nil { + t.Fatal(err) + } + + // Configure identity. + _, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/identity", + Storage: storage, + Data: map[string]interface{}{ + "iam_alias": "role_id", + "ec2_alias": "role_id", + }, + }) + if err != nil { + t.Fatal(err) + } + + // create a role entry + roleEntry := &awsRoleEntry{ + RoleID: "foo", + Version: currentRoleStorageVersion, + AuthType: iamAuthType, + } + + if err := b.setRole(context.Background(), storage, testValidRoleName, roleEntry); err != nil { + t.Fatalf("failed to set entry: %s", err) + } + + // create a baseline loginData map structure, including iam_request_headers + // already base64encoded. This is the "Default" loginData used for all tests. + // Each sub test can override the map's iam_request_headers entry + loginData, err := defaultLoginData() + if err != nil { + t.Fatal(err) + } + + expectedAliasMetadata := map[string]string{ + "account_id": "123456789012", + "auth_type": "iam", + } + + testCases := []struct { + Name string + Header interface{} + ExpectErr error + }{ + { + Name: "Default", + }, + { + Name: "Map-complete", + Header: map[string]interface{}{ + "Content-Length": "43", + "Content-Type": "application/x-www-form-urlencoded; charset=utf-8", + "User-Agent": "aws-sdk-go/1.14.24 (go1.11; darwin; amd64)", + "X-Amz-Date": "20180910T203328Z", + "X-Vault-Aws-Iam-Server-Id": "VaultAcceptanceTesting", + "Authorization": "AWS4-HMAC-SHA256 Credential=AKIAJPQ466AIIQW4LPSQ/20180910/us-east-1/sts/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=cdef5819b2e97f1ff0f3e898fd2621aa03af00a4ec3e019122c20e5482534bf4", + }, + }, + { + Name: "JSON-complete", + Header: `{ + "Content-Length":"43", + "Content-Type":"application/x-www-form-urlencoded; charset=utf-8", + "User-Agent":"aws-sdk-go/1.14.24 (go1.11; darwin; amd64)", + "X-Amz-Date":"20180910T203328Z", + "X-Vault-Aws-Iam-Server-Id": "VaultAcceptanceTesting", + "Authorization":"AWS4-HMAC-SHA256 Credential=AKIAJPQ466AIIQW4LPSQ/20180910/us-east-1/sts/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=cdef5819b2e97f1ff0f3e898fd2621aa03af00a4ec3e019122c20e5482534bf4" + }`, + }, + { + Name: "Base64-complete", + Header: base64Complete(), + }, + } + + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + if tc.Header != nil { + loginData["iam_request_headers"] = tc.Header + } + + loginRequest := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "login", + Storage: storage, + Data: loginData, + Connection: &logical.Connection{}, + } + + resp, err := b.HandleRequest(context.Background(), loginRequest) + if err != nil || resp == nil || resp.IsError() { + if tc.ExpectErr != nil && tc.ExpectErr.Error() == resp.Error().Error() { + return + } + t.Errorf("un expected failed login:\nresp: %#v\n\nerr: %v", resp, err) + } + + if !reflect.DeepEqual(expectedAliasMetadata, resp.Auth.Alias.Metadata) { + t.Errorf("expected metadata (%#v) to match (%#v)", expectedAliasMetadata, resp.Auth.Alias.Metadata) + } + }) + } +} + +func defaultLoginData() (map[string]interface{}, error) { + awsSession, err := session.NewSession() + if err != nil { + return nil, fmt.Errorf("failed to create session: %s", err) + } + + stsService := sts.New(awsSession) + stsInputParams := &sts.GetCallerIdentityInput{} + stsRequestValid, _ := stsService.GetCallerIdentityRequest(stsInputParams) + stsRequestValid.HTTPRequest.Header.Add(iamServerIdHeader, testVaultHeaderValue) + stsRequestValid.HTTPRequest.Header.Add("Authorization", fmt.Sprintf("%s,%s,%s", + "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request", + "SignedHeaders=content-type;host;x-amz-date;x-vault-aws-iam-server-id", + "Signature=5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7")) + stsRequestValid.Sign() + + return buildCallerIdentityLoginData(stsRequestValid.HTTPRequest, testValidRoleName) +} + +// setupIAMTestServer configures httptest server to intercept and respond to the +// IAM login path's invocation of submitCallerIdentityRequest (which does not +// use the AWS SDK), which receieves the mocked response responseFromUser +// containing user information matching the role. +func setupIAMTestServer() *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + responseString := ` + + arn:aws:iam::123456789012:user/valid-role + ASOMETHINGSOMETHINGSOMETHING + 123456789012 + + + 7f4fc40c-853a-11e6-8848-8d035d01eb87 + + +` + + auth := r.Header.Get("Authorization") + parts := strings.Split(auth, ",") + for i, s := range parts { + s = strings.TrimSpace(s) + key := strings.Split(s, "=") + parts[i] = key[0] + } + + // verify the "Authorization" header contains all the expected parts + expectedAuthParts := []string{"AWS4-HMAC-SHA256 Credential", "SignedHeaders", "Signature"} + var matchingCount int + for _, v := range parts { + for _, z := range expectedAuthParts { + if z == v { + matchingCount++ + } + } + } + if matchingCount != len(expectedAuthParts) { + responseString = "missing auth parts" + } + w.Header().Add("Content-Type", "text/xml") + fmt.Fprintln(w, responseString) + })) +} + +// base64Complete returns a base64 encoded auth header as expected +func base64Complete() string { + min := `{"Authorization":["AWS4-HMAC-SHA256 Credential=AKIAJPQ466AIIQW4LPSQ/20180907/us-east-1/sts/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=97086b0531854844099fc52733fa2c88a2bfb54b2689600c6e249358a8353b52"],"Content-Length":["43"],"Content-Type":["application/x-www-form-urlencoded; charset=utf-8"],"User-Agent":["aws-sdk-go/1.14.24 (go1.11; darwin; amd64)"],"X-Amz-Date":["20180907T222145Z"],"X-Vault-Aws-Iam-Server-Id":["VaultAcceptanceTesting"]}` + return min +} + +// base64MissingVaultID returns a base64 encoded auth header, that omits the +// Vault ID header +func base64MissingVaultID() string { + min := `{"Authorization":["AWS4-HMAC-SHA256 Credential=AKIAJPQ466AIIQW4LPSQ/20180907/us-east-1/sts/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=97086b0531854844099fc52733fa2c88a2bfb54b2689600c6e249358a8353b52"],"Content-Length":["43"],"Content-Type":["application/x-www-form-urlencoded; charset=utf-8"],"User-Agent":["aws-sdk-go/1.14.24 (go1.11; darwin; amd64)"],"X-Amz-Date":["20180907T222145Z"]}` + return min +} + +// base64MissingAuthField returns a base64 encoded Auth header, that omits the +// "Signature" part +func base64MissingAuthField() string { + min := `{"Authorization":["AWS4-HMAC-SHA256 Credential=AKIAJPQ466AIIQW4LPSQ/20180907/us-east-1/sts/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date;x-vault-aws-iam-server-id"],"Content-Length":["43"],"Content-Type":["application/x-www-form-urlencoded; charset=utf-8"],"User-Agent":["aws-sdk-go/1.14.24 (go1.11; darwin; amd64)"],"X-Amz-Date":["20180907T222145Z"],"X-Vault-Aws-Iam-Server-Id":["VaultAcceptanceTesting"]}` + return min +} diff --git a/builtin/credential/aws/path_role.go b/builtin/credential/aws/path_role.go new file mode 100644 index 0000000..1c9ecf2 --- /dev/null +++ b/builtin/credential/aws/path_role.go @@ -0,0 +1,1086 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package awsauth + +import ( + "context" + "errors" + "fmt" + "strings" + "time" + + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/tokenutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/mitchellh/copystructure" +) + +var currentRoleStorageVersion = 3 + +func (b *backend) pathRole() *framework.Path { + p := &framework.Path{ + Pattern: "role/" + framework.GenericNameRegex("role"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "auth-role", + }, + + Fields: map[string]*framework.FieldSchema{ + "role": { + Type: framework.TypeString, + Description: "Name of the role.", + }, + "auth_type": { + Type: framework.TypeString, + Description: `The auth_type permitted to authenticate to this role. Must be one of +iam or ec2 and cannot be changed after role creation.`, + }, + "bound_ami_id": { + Type: framework.TypeCommaStringSlice, + Description: `If set, defines a constraint on the EC2 instances that they should be +using one of the AMI IDs specified by this parameter. This is only applicable +when auth_type is ec2 or inferred_entity_type is ec2_instance.`, + }, + "bound_account_id": { + Type: framework.TypeCommaStringSlice, + Description: `If set, defines a constraint on the EC2 instances that the account ID +in its identity document to match one of the IDs specified by this parameter. +This is only applicable when auth_type is ec2 or inferred_entity_type is +ec2_instance.`, + }, + "bound_iam_principal_arn": { + Type: framework.TypeCommaStringSlice, + Description: `ARN of the IAM principals to bind to this role. Only applicable when +auth_type is iam.`, + }, + "bound_region": { + Type: framework.TypeCommaStringSlice, + Description: `If set, defines a constraint on the EC2 instances that the region in +its identity document match one of the regions specified by this parameter. This is only +applicable when auth_type is ec2.`, + }, + "bound_iam_role_arn": { + Type: framework.TypeCommaStringSlice, + Description: `If set, defines a constraint on the authenticating EC2 instance +that it must match one of the IAM role ARNs specified by this parameter. +The value is prefix-matched (as though it were a glob ending in +'*'). The configured IAM user or EC2 instance role must be allowed +to execute the 'iam:GetInstanceProfile' action if this is specified. This is +only applicable when auth_type is ec2 or inferred_entity_type is +ec2_instance.`, + }, + "bound_iam_instance_profile_arn": { + Type: framework.TypeCommaStringSlice, + Description: `If set, defines a constraint on the EC2 instances to be associated +with an IAM instance profile ARN which has a prefix that matches +one of the values specified by this parameter. The value is prefix-matched +(as though it were a glob ending in '*'). This is only applicable when +auth_type is ec2 or inferred_entity_type is ec2_instance.`, + }, + "bound_ec2_instance_id": { + Type: framework.TypeCommaStringSlice, + Description: `If set, defines a constraint on the EC2 instances to have one of the +given instance IDs. Can be a list or comma-separated string of EC2 instance +IDs. This is only applicable when auth_type is ec2 or inferred_entity_type is +ec2_instance.`, + DisplayAttrs: &framework.DisplayAttributes{ + Description: "If set, defines a constraint on the EC2 instances to have one of the given instance IDs. A list of EC2 instance IDs. This is only applicable when auth_type is ec2 or inferred_entity_type is ec2_instance.", + }, + }, + "resolve_aws_unique_ids": { + Type: framework.TypeBool, + Default: true, + Description: `If set, resolve all AWS IAM ARNs into AWS's internal unique IDs. +When an IAM entity (e.g., user, role, or instance profile) is deleted, then all references +to it within the role will be invalidated, which prevents a new IAM entity from being created +with the same name and matching the role's IAM binds. Once set, this cannot be unset.`, + }, + "inferred_entity_type": { + Type: framework.TypeString, + Description: `When auth_type is iam, the +AWS entity type to infer from the authenticated principal. The only supported +value is ec2_instance, which will extract the EC2 instance ID from the +authenticated role and apply the following restrictions specific to EC2 +instances: bound_ami_id, bound_account_id, bound_iam_role_arn, +bound_iam_instance_profile_arn, bound_vpc_id, bound_subnet_id. The configured +EC2 client must be able to find the inferred instance ID in the results, and the +instance must be running. If unable to determine the EC2 instance ID or unable +to find the EC2 instance ID among running instances, then authentication will +fail.`, + }, + "inferred_aws_region": { + Type: framework.TypeString, + Description: `When auth_type is iam and +inferred_entity_type is set, the region to assume the inferred entity exists in.`, + }, + "bound_vpc_id": { + Type: framework.TypeCommaStringSlice, + Description: ` +If set, defines a constraint on the EC2 instance to be associated with a VPC +ID that matches one of the value specified by this parameter. This is only +applicable when auth_type is ec2 or inferred_entity_type is ec2_instance.`, + }, + "bound_subnet_id": { + Type: framework.TypeCommaStringSlice, + Description: ` +If set, defines a constraint on the EC2 instance to be associated with the +subnet ID that matches one of the values specified by this parameter. This is +only applicable when auth_type is ec2 or inferred_entity_type is +ec2_instance.`, + }, + "role_tag": { + Type: framework.TypeString, + Default: "", + Description: `If set, enables the role tags for this role. The value set for this +field should be the 'key' of the tag on the EC2 instance. The 'value' +of the tag should be generated using 'role//tag' endpoint. +Defaults to an empty string, meaning that role tags are disabled. This +is only allowed if auth_type is ec2.`, + }, + "period": { + Type: framework.TypeDurationSecond, + Description: tokenutil.DeprecationText("token_period"), + Deprecated: true, + }, + "ttl": { + Type: framework.TypeDurationSecond, + Description: tokenutil.DeprecationText("token_ttl"), + Deprecated: true, + }, + "max_ttl": { + Type: framework.TypeDurationSecond, + Description: tokenutil.DeprecationText("token_max_ttl"), + Deprecated: true, + }, + "policies": { + Type: framework.TypeCommaStringSlice, + Description: tokenutil.DeprecationText("token_policies"), + Deprecated: true, + }, + "allow_instance_migration": { + Type: framework.TypeBool, + Default: false, + Description: `If set, allows migration of the underlying instance where the client +resides. This keys off of pendingTime in the metadata document, so +essentially, this disables the client nonce check whenever the +instance is migrated to a new host and pendingTime is newer than the +previously-remembered time. Use with caution. This is only checked when +auth_type is ec2.`, + }, + "disallow_reauthentication": { + Type: framework.TypeBool, + Default: false, + Description: `If set, only allows a single token to be granted per + instance ID. In order to perform a fresh login, the entry in the access list + for the instance ID needs to be cleared using + 'auth/aws-ec2/identity-accesslist/' endpoint. This is only + applicable when auth_type is ec2.`, + }, + }, + + ExistenceCheck: b.pathRoleExistenceCheck, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.CreateOperation: &framework.PathOperation{ + Callback: b.pathRoleCreateUpdate, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathRoleCreateUpdate, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathRoleRead, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathRoleDelete, + }, + }, + + HelpSynopsis: pathRoleSyn, + HelpDescription: pathRoleDesc, + } + + tokenutil.AddTokenFields(p.Fields) + return p +} + +func (b *backend) pathListRole() *framework.Path { + return &framework.Path{ + Pattern: "role/?", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "auth-roles", + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ListOperation: &framework.PathOperation{ + Callback: b.pathRoleList, + }, + }, + + HelpSynopsis: pathListRolesHelpSyn, + HelpDescription: pathListRolesHelpDesc, + } +} + +func (b *backend) pathListRoles() *framework.Path { + return &framework.Path{ + Pattern: "roles/?", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "auth-roles2", + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ListOperation: &framework.PathOperation{ + Callback: b.pathRoleList, + }, + }, + + HelpSynopsis: pathListRolesHelpSyn, + HelpDescription: pathListRolesHelpDesc, + } +} + +// Establishes dichotomy of request operation between CreateOperation and UpdateOperation. +// Returning 'true' forces an UpdateOperation, CreateOperation otherwise. +func (b *backend) pathRoleExistenceCheck(ctx context.Context, req *logical.Request, data *framework.FieldData) (bool, error) { + entry, err := b.role(ctx, req.Storage, strings.ToLower(data.Get("role").(string))) + if err != nil { + return false, err + } + return entry != nil, nil +} + +// role fetches the role entry from cache, or loads from disk if necessary +func (b *backend) role(ctx context.Context, s logical.Storage, roleName string) (*awsRoleEntry, error) { + if roleName == "" { + return nil, fmt.Errorf("missing role name") + } + + roleEntryRaw, found := b.roleCache.Get(roleName) + if found && roleEntryRaw != nil { + roleEntry, ok := roleEntryRaw.(*awsRoleEntry) + if !ok { + return nil, errors.New("could not convert role entry internally") + } + if roleEntry == nil { + return nil, errors.New("converted role entry is nil") + } + } + + // Not found, or was nil + b.roleMutex.Lock() + defer b.roleMutex.Unlock() + + return b.roleInternal(ctx, s, roleName) +} + +// roleInternal does not perform locking, and rechecks the cache, going to disk if necessary +func (b *backend) roleInternal(ctx context.Context, s logical.Storage, roleName string) (*awsRoleEntry, error) { + // Check cache again now that we have the lock + roleEntryRaw, found := b.roleCache.Get(roleName) + if found && roleEntryRaw != nil { + roleEntry, ok := roleEntryRaw.(*awsRoleEntry) + if !ok { + return nil, errors.New("could not convert role entry internally") + } + if roleEntry == nil { + return nil, errors.New("converted role entry is nil") + } + } + + // Fetch from storage + entry, err := s.Get(ctx, "role/"+strings.ToLower(roleName)) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + result := new(awsRoleEntry) + if err := entry.DecodeJSON(result); err != nil { + return nil, err + } + + needUpgrade, err := b.upgradeRole(ctx, s, result) + if err != nil { + return nil, fmt.Errorf("error upgrading roleEntry: %w", err) + } + if needUpgrade && (b.System().LocalMount() || !b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary|consts.ReplicationPerformanceStandby)) { + if err = b.setRole(ctx, s, roleName, result); err != nil { + return nil, fmt.Errorf("error saving upgraded roleEntry: %w", err) + } + } + + b.roleCache.SetDefault(roleName, result) + + return result, nil +} + +// setRole creates or updates a role in the storage. The caller must hold +// the write lock. +func (b *backend) setRole(ctx context.Context, s logical.Storage, roleName string, + roleEntry *awsRoleEntry, +) error { + if roleName == "" { + return fmt.Errorf("missing role name") + } + + if roleEntry == nil { + return fmt.Errorf("nil role entry") + } + + entry, err := logical.StorageEntryJSON("role/"+strings.ToLower(roleName), roleEntry) + if err != nil { + return err + } + + if err := s.Put(ctx, entry); err != nil { + return err + } + + b.roleCache.SetDefault(roleName, roleEntry) + + return nil +} + +// initialize is used to initialize the AWS roles +func (b *backend) initialize(ctx context.Context, req *logical.InitializationRequest) error { + // on standbys and DR secondaries we do not want to run any kind of upgrade logic + if b.System().ReplicationState().HasState(consts.ReplicationPerformanceStandby | consts.ReplicationDRSecondary) { + return nil + } + + // Initialize only if we are either: + // (1) A local mount. + // (2) Are _NOT_ a replicated performance secondary + if b.System().LocalMount() || !b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary) { + + s := req.Storage + + logger := b.Logger().Named("initialize") + logger.Debug("starting initialization") + + var upgradeCtx context.Context + upgradeCtx, b.upgradeCancelFunc = context.WithCancel(context.Background()) + + go func() { + // The vault will become unsealed while this goroutine is running, + // so we could see some role requests block until the lock is + // released. However we'd rather see those requests block (and + // potentially start timing out) than allow a non-upgraded role to + // be fetched. + b.roleMutex.Lock() + defer b.roleMutex.Unlock() + + upgraded, err := b.upgrade(upgradeCtx, s) + if err != nil { + logger.Error("error running initialization", "error", err) + return + } + if upgraded { + logger.Info("an upgrade was performed during initialization") + } + }() + + } + + return nil +} + +// awsVersion stores info about the the latest aws version that we have +// upgraded to. +type awsVersion struct { + Version int `json:"version"` +} + +// currentAwsVersion stores the latest version that we have upgraded to. +// Note that this is tracked independently from currentRoleStorageVersion. +const currentAwsVersion = 1 + +// upgrade does an upgrade, if necessary +func (b *backend) upgrade(ctx context.Context, s logical.Storage) (bool, error) { + entry, err := s.Get(ctx, "config/version") + if err != nil { + return false, err + } + var version awsVersion + if entry != nil { + err = entry.DecodeJSON(&version) + if err != nil { + return false, err + } + } + + upgraded := version.Version < currentAwsVersion + switch version.Version { + case 0: + // Read all the role names. + roleNames, err := s.List(ctx, "role/") + if err != nil { + return false, err + } + + // Upgrade the roles as necessary. + for _, roleName := range roleNames { + // make sure the context hasn't been canceled + if ctx.Err() != nil { + return false, ctx.Err() + } + _, err := b.roleInternal(ctx, s, roleName) + if err != nil { + return false, err + } + } + fallthrough + + case currentAwsVersion: + version.Version = currentAwsVersion + + default: + return false, fmt.Errorf("unrecognized role version: %d", version.Version) + } + + // save the current version + if upgraded { + entry, err = logical.StorageEntryJSON("config/version", &version) + if err != nil { + return false, err + } + err = s.Put(ctx, entry) + if err != nil { + return false, err + } + } + + return upgraded, nil +} + +// If needed, updates the role entry and returns a bool indicating if it was updated +// (and thus needs to be persisted) +func (b *backend) upgradeRole(ctx context.Context, s logical.Storage, roleEntry *awsRoleEntry) (bool, error) { + if roleEntry == nil { + return false, fmt.Errorf("received nil roleEntry") + } + upgraded := roleEntry.Version < currentRoleStorageVersion + switch roleEntry.Version { + case 0: + // Check if the value held by role ARN field is actually an instance profile ARN + if roleEntry.BoundIamRoleARN != "" && strings.Contains(roleEntry.BoundIamRoleARN, ":instance-profile/") { + // If yes, move it to the correct field + roleEntry.BoundIamInstanceProfileARN = roleEntry.BoundIamRoleARN + + // Reset the old field + roleEntry.BoundIamRoleARN = "" + } + + // Check if there was no pre-existing AuthType set (from older versions) + if roleEntry.AuthType == "" { + // then default to the original behavior of ec2 + roleEntry.AuthType = ec2AuthType + } + + // Check if we need to resolve the unique ID on the role + if roleEntry.AuthType == iamAuthType && + roleEntry.ResolveAWSUniqueIDs && + roleEntry.BoundIamPrincipalARN != "" && + roleEntry.BoundIamPrincipalID == "" && + !strings.HasSuffix(roleEntry.BoundIamPrincipalARN, "*") { + principalId, err := b.resolveArnToUniqueIDFunc(ctx, s, roleEntry.BoundIamPrincipalARN) + if err != nil { + return false, err + } + roleEntry.BoundIamPrincipalID = principalId + // Not setting roleEntry.BoundIamPrincipalARN to "" here so that clients can see the original + // ARN that the role was bound to + } + + // Check if we need to convert individual string values to lists + if roleEntry.BoundAmiID != "" { + roleEntry.BoundAmiIDs = []string{roleEntry.BoundAmiID} + roleEntry.BoundAmiID = "" + } + if roleEntry.BoundAccountID != "" { + roleEntry.BoundAccountIDs = []string{roleEntry.BoundAccountID} + roleEntry.BoundAccountID = "" + } + if roleEntry.BoundIamPrincipalARN != "" { + roleEntry.BoundIamPrincipalARNs = []string{roleEntry.BoundIamPrincipalARN} + roleEntry.BoundIamPrincipalARN = "" + } + if roleEntry.BoundIamPrincipalID != "" { + roleEntry.BoundIamPrincipalIDs = []string{roleEntry.BoundIamPrincipalID} + roleEntry.BoundIamPrincipalID = "" + } + if roleEntry.BoundIamRoleARN != "" { + roleEntry.BoundIamRoleARNs = []string{roleEntry.BoundIamRoleARN} + roleEntry.BoundIamRoleARN = "" + } + if roleEntry.BoundIamInstanceProfileARN != "" { + roleEntry.BoundIamInstanceProfileARNs = []string{roleEntry.BoundIamInstanceProfileARN} + roleEntry.BoundIamInstanceProfileARN = "" + } + if roleEntry.BoundRegion != "" { + roleEntry.BoundRegions = []string{roleEntry.BoundRegion} + roleEntry.BoundRegion = "" + } + if roleEntry.BoundSubnetID != "" { + roleEntry.BoundSubnetIDs = []string{roleEntry.BoundSubnetID} + roleEntry.BoundSubnetID = "" + } + if roleEntry.BoundVpcID != "" { + roleEntry.BoundVpcIDs = []string{roleEntry.BoundVpcID} + roleEntry.BoundVpcID = "" + } + fallthrough + + case 1: + // Make BoundIamRoleARNs and BoundIamInstanceProfileARNs explicitly prefix-matched + for i, arn := range roleEntry.BoundIamRoleARNs { + roleEntry.BoundIamRoleARNs[i] = fmt.Sprintf("%s*", arn) + } + for i, arn := range roleEntry.BoundIamInstanceProfileARNs { + roleEntry.BoundIamInstanceProfileARNs[i] = fmt.Sprintf("%s*", arn) + } + fallthrough + + case 2: + roleID, err := uuid.GenerateUUID() + if err != nil { + return false, err + } + roleEntry.RoleID = roleID + fallthrough + + case currentRoleStorageVersion: + roleEntry.Version = currentRoleStorageVersion + + default: + return false, fmt.Errorf("unrecognized role version: %q", roleEntry.Version) + } + + // Add tokenutil upgrades. These don't need to be persisted, they're fine + // being upgraded each time until changed. + if roleEntry.TokenTTL == 0 && roleEntry.TTL > 0 { + roleEntry.TokenTTL = roleEntry.TTL + } + if roleEntry.TokenMaxTTL == 0 && roleEntry.MaxTTL > 0 { + roleEntry.TokenMaxTTL = roleEntry.MaxTTL + } + if roleEntry.TokenPeriod == 0 && roleEntry.Period > 0 { + roleEntry.TokenPeriod = roleEntry.Period + } + if len(roleEntry.TokenPolicies) == 0 && len(roleEntry.Policies) > 0 { + roleEntry.TokenPolicies = roleEntry.Policies + } + + return upgraded, nil +} + +// pathRoleDelete is used to delete the information registered for a given AMI ID. +func (b *backend) pathRoleDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := data.Get("role").(string) + if roleName == "" { + return logical.ErrorResponse("missing role"), nil + } + + b.roleMutex.Lock() + defer b.roleMutex.Unlock() + + err := req.Storage.Delete(ctx, "role/"+strings.ToLower(roleName)) + if err != nil { + return nil, fmt.Errorf("error deleting role: %w", err) + } + + b.roleCache.Delete(roleName) + + return nil, nil +} + +// pathRoleList is used to list all the AMI IDs registered with Vault. +func (b *backend) pathRoleList(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roles, err := req.Storage.List(ctx, "role/") + if err != nil { + return nil, err + } + + return logical.ListResponse(roles), nil +} + +// pathRoleRead is used to view the information registered for a given AMI ID. +func (b *backend) pathRoleRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleEntry, err := b.role(ctx, req.Storage, strings.ToLower(data.Get("role").(string))) + if err != nil { + return nil, err + } + if roleEntry == nil { + return nil, nil + } + + return &logical.Response{ + Data: roleEntry.ToResponseData(), + }, nil +} + +// pathRoleCreateUpdate is used to associate Vault policies to a given AMI ID. +func (b *backend) pathRoleCreateUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := strings.ToLower(data.Get("role").(string)) + if roleName == "" { + return logical.ErrorResponse("missing role"), nil + } + + b.roleMutex.Lock() + defer b.roleMutex.Unlock() + + // We use the internal one here to ensure that we have fresh data and + // nobody else is concurrently modifying. This will also call the upgrade + // path on existing role entries. + roleEntry, err := b.roleInternal(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if roleEntry == nil { + roleID, err := uuid.GenerateUUID() + if err != nil { + return nil, err + } + roleEntry = &awsRoleEntry{ + RoleID: roleID, + Version: currentRoleStorageVersion, + } + } else { + // We want to always use a copy so we aren't modifying items in the + // version in the cache while other users may be looking it up (or if + // we fail somewhere) + cp, err := copystructure.Copy(roleEntry) + if err != nil { + return nil, err + } + roleEntry = cp.(*awsRoleEntry) + } + + // Fetch and set the bound parameters. There can't be default values + // for these. + if boundAmiIDRaw, ok := data.GetOk("bound_ami_id"); ok { + roleEntry.BoundAmiIDs = boundAmiIDRaw.([]string) + } + + if boundAccountIDRaw, ok := data.GetOk("bound_account_id"); ok { + roleEntry.BoundAccountIDs = boundAccountIDRaw.([]string) + } + + if boundRegionRaw, ok := data.GetOk("bound_region"); ok { + roleEntry.BoundRegions = boundRegionRaw.([]string) + } + + if boundVpcIDRaw, ok := data.GetOk("bound_vpc_id"); ok { + roleEntry.BoundVpcIDs = boundVpcIDRaw.([]string) + } + + if boundSubnetIDRaw, ok := data.GetOk("bound_subnet_id"); ok { + roleEntry.BoundSubnetIDs = boundSubnetIDRaw.([]string) + } + + if resolveAWSUniqueIDsRaw, ok := data.GetOk("resolve_aws_unique_ids"); ok { + switch { + case req.Operation == logical.CreateOperation: + roleEntry.ResolveAWSUniqueIDs = resolveAWSUniqueIDsRaw.(bool) + case roleEntry.ResolveAWSUniqueIDs && !resolveAWSUniqueIDsRaw.(bool): + return logical.ErrorResponse("changing resolve_aws_unique_ids from true to false is not allowed"), nil + default: + roleEntry.ResolveAWSUniqueIDs = resolveAWSUniqueIDsRaw.(bool) + } + } else if req.Operation == logical.CreateOperation { + roleEntry.ResolveAWSUniqueIDs = data.Get("resolve_aws_unique_ids").(bool) + } + + if boundIamRoleARNRaw, ok := data.GetOk("bound_iam_role_arn"); ok { + roleEntry.BoundIamRoleARNs = boundIamRoleARNRaw.([]string) + } + + if boundIamInstanceProfileARNRaw, ok := data.GetOk("bound_iam_instance_profile_arn"); ok { + roleEntry.BoundIamInstanceProfileARNs = boundIamInstanceProfileARNRaw.([]string) + } + + if boundEc2InstanceIDRaw, ok := data.GetOk("bound_ec2_instance_id"); ok { + roleEntry.BoundEc2InstanceIDs = boundEc2InstanceIDRaw.([]string) + } + + if boundIamPrincipalARNRaw, ok := data.GetOk("bound_iam_principal_arn"); ok { + principalARNs := boundIamPrincipalARNRaw.([]string) + roleEntry.BoundIamPrincipalARNs = principalARNs + roleEntry.BoundIamPrincipalIDs = []string{} + } + if roleEntry.ResolveAWSUniqueIDs && len(roleEntry.BoundIamPrincipalIDs) == 0 { + // we might be turning on resolution on this role, so ensure we update the IDs + for _, principalARN := range roleEntry.BoundIamPrincipalARNs { + if !strings.HasSuffix(principalARN, "*") { + principalID, err := b.resolveArnToUniqueIDFunc(ctx, req.Storage, principalARN) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("unable to resolve ARN %#v to internal ID: %s", principalARN, err.Error())), nil + } + roleEntry.BoundIamPrincipalIDs = append(roleEntry.BoundIamPrincipalIDs, principalID) + } + } + } + + if inferRoleTypeRaw, ok := data.GetOk("inferred_entity_type"); ok { + roleEntry.InferredEntityType = inferRoleTypeRaw.(string) + } + + if inferredAWSRegionRaw, ok := data.GetOk("inferred_aws_region"); ok { + roleEntry.InferredAWSRegion = inferredAWSRegionRaw.(string) + } + + // auth_type is a special case as it's immutable and can't be changed once a role is created + if authTypeRaw, ok := data.GetOk("auth_type"); ok { + // roleEntry.AuthType should only be "" when it's a new role; existing roles without an + // auth_type should have already been upgraded to have one before we get here + if roleEntry.AuthType == "" { + switch authTypeRaw.(string) { + case ec2AuthType, iamAuthType: + roleEntry.AuthType = authTypeRaw.(string) + default: + return logical.ErrorResponse(fmt.Sprintf("unrecognized auth_type: %v", authTypeRaw.(string))), nil + } + } else if authTypeRaw.(string) != roleEntry.AuthType { + return logical.ErrorResponse("changing auth_type on a role is not allowed"), nil + } + } else if req.Operation == logical.CreateOperation { + switch req.MountType { + // maintain backwards compatibility for old aws-ec2 auth types + case "aws-ec2": + roleEntry.AuthType = ec2AuthType + // but default to iamAuth for new mounts going forward + case "aws": + roleEntry.AuthType = iamAuthType + default: + roleEntry.AuthType = iamAuthType + } + } + + allowEc2Binds := roleEntry.AuthType == ec2AuthType + + if roleEntry.InferredEntityType != "" { + switch { + case roleEntry.AuthType != iamAuthType: + return logical.ErrorResponse("specified inferred_entity_type but didn't allow iam auth_type"), nil + case roleEntry.InferredEntityType != ec2EntityType: + return logical.ErrorResponse(fmt.Sprintf("specified invalid inferred_entity_type: %s", roleEntry.InferredEntityType)), nil + case roleEntry.InferredAWSRegion == "": + return logical.ErrorResponse("specified inferred_entity_type but not inferred_aws_region"), nil + } + allowEc2Binds = true + } else if roleEntry.InferredAWSRegion != "" { + return logical.ErrorResponse("specified inferred_aws_region but not inferred_entity_type"), nil + } + + numBinds := 0 + + if len(roleEntry.BoundAccountIDs) > 0 { + if !allowEc2Binds { + return logical.ErrorResponse(fmt.Sprintf("specified bound_account_id but not specifying ec2 auth_type or inferring %s", ec2EntityType)), nil + } + numBinds++ + } + + if len(roleEntry.BoundRegions) > 0 { + if roleEntry.AuthType != ec2AuthType { + return logical.ErrorResponse("specified bound_region but not specifying ec2 auth_type"), nil + } + numBinds++ + } + + if len(roleEntry.BoundAmiIDs) > 0 { + if !allowEc2Binds { + return logical.ErrorResponse(fmt.Sprintf("specified bound_ami_id but not specifying ec2 auth_type or inferring %s", ec2EntityType)), nil + } + numBinds++ + } + + if len(roleEntry.BoundIamInstanceProfileARNs) > 0 { + if !allowEc2Binds { + return logical.ErrorResponse(fmt.Sprintf("specified bound_iam_instance_profile_arn but not specifying ec2 auth_type or inferring %s", ec2EntityType)), nil + } + numBinds++ + } + + if len(roleEntry.BoundEc2InstanceIDs) > 0 { + if !allowEc2Binds { + return logical.ErrorResponse(fmt.Sprintf("specified bound_ec2_instance_id but not specifying ec2 auth_type or inferring %s", ec2EntityType)), nil + } + numBinds++ + } + + if len(roleEntry.BoundIamRoleARNs) > 0 { + if !allowEc2Binds { + return logical.ErrorResponse(fmt.Sprintf("specified bound_iam_role_arn but not specifying ec2 auth_type or inferring %s", ec2EntityType)), nil + } + numBinds++ + } + + if len(roleEntry.BoundIamPrincipalARNs) > 0 { + if roleEntry.AuthType != iamAuthType { + return logical.ErrorResponse("specified bound_iam_principal_arn but not specifying iam auth_type"), nil + } + numBinds++ + } + + if len(roleEntry.BoundVpcIDs) > 0 { + if !allowEc2Binds { + return logical.ErrorResponse(fmt.Sprintf("specified bound_vpc_id but not specifying ec2 auth_type or inferring %s", ec2EntityType)), nil + } + numBinds++ + } + + if len(roleEntry.BoundSubnetIDs) > 0 { + if !allowEc2Binds { + return logical.ErrorResponse(fmt.Sprintf("specified bound_subnet_id but not specifying ec2 auth_type or inferring %s", ec2EntityType)), nil + } + numBinds++ + } + + if numBinds == 0 { + return logical.ErrorResponse("at least one bound parameter should be specified on the role"), nil + } + + disallowReauthenticationBool, ok := data.GetOk("disallow_reauthentication") + if ok { + if roleEntry.AuthType != ec2AuthType { + return logical.ErrorResponse("specified disallow_reauthentication when not using ec2 auth type"), nil + } + roleEntry.DisallowReauthentication = disallowReauthenticationBool.(bool) + } else if req.Operation == logical.CreateOperation && roleEntry.AuthType == ec2AuthType { + roleEntry.DisallowReauthentication = data.Get("disallow_reauthentication").(bool) + } + + allowInstanceMigrationBool, ok := data.GetOk("allow_instance_migration") + if ok { + if roleEntry.AuthType != ec2AuthType { + return logical.ErrorResponse("specified allow_instance_migration when not using ec2 auth type"), nil + } + roleEntry.AllowInstanceMigration = allowInstanceMigrationBool.(bool) + } else if req.Operation == logical.CreateOperation && roleEntry.AuthType == ec2AuthType { + roleEntry.AllowInstanceMigration = data.Get("allow_instance_migration").(bool) + } + + if roleEntry.AllowInstanceMigration && roleEntry.DisallowReauthentication { + return logical.ErrorResponse("cannot specify both disallow_reauthentication=true and allow_instance_migration=true"), nil + } + + var resp logical.Response + + if err := roleEntry.ParseTokenFields(req, data); err != nil { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } + + // Handle upgrade cases + { + if err := tokenutil.UpgradeValue(data, "policies", "token_policies", &roleEntry.Policies, &roleEntry.TokenPolicies); err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + if err := tokenutil.UpgradeValue(data, "ttl", "token_ttl", &roleEntry.TTL, &roleEntry.TokenTTL); err != nil { + return logical.ErrorResponse(err.Error()), nil + } + // Special case here for old lease value + _, ok := data.GetOk("token_ttl") + if !ok { + _, ok = data.GetOk("ttl") + if !ok { + ttlRaw, ok := data.GetOk("lease") + if ok { + roleEntry.TTL = time.Duration(ttlRaw.(int)) * time.Second + roleEntry.TokenTTL = roleEntry.TTL + } + } + } + + if err := tokenutil.UpgradeValue(data, "max_ttl", "token_max_ttl", &roleEntry.MaxTTL, &roleEntry.TokenMaxTTL); err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + if err := tokenutil.UpgradeValue(data, "period", "token_period", &roleEntry.Period, &roleEntry.TokenPeriod); err != nil { + return logical.ErrorResponse(err.Error()), nil + } + } + + systemMaxTTL := b.System().MaxLeaseTTL() + if roleEntry.TokenMaxTTL > systemMaxTTL { + resp.AddWarning(fmt.Sprintf("Given max ttl of %d seconds greater than current mount/system default of %d seconds; max ttl will be capped at login time", roleEntry.TokenMaxTTL/time.Second, systemMaxTTL/time.Second)) + } + if roleEntry.TokenMaxTTL != 0 && roleEntry.TokenMaxTTL < roleEntry.TokenTTL { + return logical.ErrorResponse("ttl should be shorter than max ttl"), nil + } + if roleEntry.TokenPeriod > b.System().MaxLeaseTTL() { + return logical.ErrorResponse(fmt.Sprintf("period of %q is greater than the backend's maximum lease TTL of %q", roleEntry.TokenPeriod.String(), b.System().MaxLeaseTTL().String())), nil + } + + roleTagStr, ok := data.GetOk("role_tag") + if ok { + if roleEntry.AuthType != ec2AuthType { + return logical.ErrorResponse("tried to enable role_tag when not using ec2 auth method"), nil + } + roleEntry.RoleTag = roleTagStr.(string) + // There is a limit of 127 characters on the tag key for AWS EC2 instances. + // Complying to that requirement, do not allow the value of 'key' to be more than that. + if len(roleEntry.RoleTag) > 127 { + return logical.ErrorResponse("length of role tag exceeds the EC2 key limit of 127 characters"), nil + } + } else if req.Operation == logical.CreateOperation && roleEntry.AuthType == ec2AuthType { + roleEntry.RoleTag = data.Get("role_tag").(string) + } + + if roleEntry.HMACKey == "" { + roleEntry.HMACKey, err = uuid.GenerateUUID() + if err != nil { + return nil, fmt.Errorf("failed to generate role HMAC key: %w", err) + } + } + + if err := b.setRole(ctx, req.Storage, roleName, roleEntry); err != nil { + return nil, err + } + + if len(resp.Warnings) == 0 { + return nil, nil + } + + return &resp, nil +} + +// Struct to hold the information associated with a Vault role +type awsRoleEntry struct { + tokenutil.TokenParams + + RoleID string `json:"role_id"` + AuthType string `json:"auth_type"` + BoundAmiIDs []string `json:"bound_ami_id_list"` + BoundAccountIDs []string `json:"bound_account_id_list"` + BoundEc2InstanceIDs []string `json:"bound_ec2_instance_id_list"` + BoundIamPrincipalARNs []string `json:"bound_iam_principal_arn_list"` + BoundIamPrincipalIDs []string `json:"bound_iam_principal_id_list"` + BoundIamRoleARNs []string `json:"bound_iam_role_arn_list"` + BoundIamInstanceProfileARNs []string `json:"bound_iam_instance_profile_arn_list"` + BoundRegions []string `json:"bound_region_list"` + BoundSubnetIDs []string `json:"bound_subnet_id_list"` + BoundVpcIDs []string `json:"bound_vpc_id_list"` + InferredEntityType string `json:"inferred_entity_type"` + InferredAWSRegion string `json:"inferred_aws_region"` + ResolveAWSUniqueIDs bool `json:"resolve_aws_unique_ids"` + RoleTag string `json:"role_tag"` + AllowInstanceMigration bool `json:"allow_instance_migration"` + DisallowReauthentication bool `json:"disallow_reauthentication"` + HMACKey string `json:"hmac_key"` + Version int `json:"version"` + + // Deprecated: These are superceded by TokenUtil + TTL time.Duration `json:"ttl"` + MaxTTL time.Duration `json:"max_ttl"` + Period time.Duration `json:"period"` + Policies []string `json:"policies"` + + // DEPRECATED -- these are the old fields before we supported lists and exist for backwards compatibility + BoundAmiID string `json:"bound_ami_id,omitempty" ` + BoundAccountID string `json:"bound_account_id,omitempty"` + BoundIamPrincipalARN string `json:"bound_iam_principal_arn,omitempty"` + BoundIamPrincipalID string `json:"bound_iam_principal_id,omitempty"` + BoundIamRoleARN string `json:"bound_iam_role_arn,omitempty"` + BoundIamInstanceProfileARN string `json:"bound_iam_instance_profile_arn,omitempty"` + BoundRegion string `json:"bound_region,omitempty"` + BoundSubnetID string `json:"bound_subnet_id,omitempty"` + BoundVpcID string `json:"bound_vpc_id,omitempty"` +} + +func (r *awsRoleEntry) ToResponseData() map[string]interface{} { + responseData := map[string]interface{}{ + "auth_type": r.AuthType, + "bound_ami_id": r.BoundAmiIDs, + "bound_account_id": r.BoundAccountIDs, + "bound_ec2_instance_id": r.BoundEc2InstanceIDs, + "bound_iam_principal_arn": r.BoundIamPrincipalARNs, + "bound_iam_principal_id": r.BoundIamPrincipalIDs, + "bound_iam_role_arn": r.BoundIamRoleARNs, + "bound_iam_instance_profile_arn": r.BoundIamInstanceProfileARNs, + "bound_region": r.BoundRegions, + "bound_subnet_id": r.BoundSubnetIDs, + "bound_vpc_id": r.BoundVpcIDs, + "inferred_entity_type": r.InferredEntityType, + "inferred_aws_region": r.InferredAWSRegion, + "resolve_aws_unique_ids": r.ResolveAWSUniqueIDs, + "role_id": r.RoleID, + "role_tag": r.RoleTag, + "allow_instance_migration": r.AllowInstanceMigration, + "disallow_reauthentication": r.DisallowReauthentication, + } + + r.PopulateTokenData(responseData) + if r.TTL > 0 { + responseData["ttl"] = int64(r.TTL.Seconds()) + } + if r.MaxTTL > 0 { + responseData["max_ttl"] = int64(r.MaxTTL.Seconds()) + } + if r.Period > 0 { + responseData["period"] = int64(r.Period.Seconds()) + } + if len(r.Policies) > 0 { + responseData["policies"] = responseData["token_policies"] + } + + convertNilToEmptySlice := func(data map[string]interface{}, field string) { + if data[field] == nil || len(data[field].([]string)) == 0 { + data[field] = []string{} + } + } + convertNilToEmptySlice(responseData, "bound_ami_id") + convertNilToEmptySlice(responseData, "bound_account_id") + convertNilToEmptySlice(responseData, "bound_iam_principal_arn") + convertNilToEmptySlice(responseData, "bound_iam_principal_id") + convertNilToEmptySlice(responseData, "bound_iam_role_arn") + convertNilToEmptySlice(responseData, "bound_iam_instance_profile_arn") + convertNilToEmptySlice(responseData, "bound_region") + convertNilToEmptySlice(responseData, "bound_subnet_id") + convertNilToEmptySlice(responseData, "bound_vpc_id") + + return responseData +} + +const pathRoleSyn = ` +Create a role and associate policies to it. +` + +const pathRoleDesc = ` +A precondition for login is that a role should be created in the backend. +The login endpoint takes in the role name against which the client +should be validated. After authenticating the client, the authorization +to access Vault's resources is determined by the policies that are +associated to the role though this endpoint. + +When an EC2 instance requires only a subset of policies on the role, then +'role_tag' option on the role can be enabled to create a role tag via the +endpoint 'role//tag'. This tag then needs to be applied on the +instance before it attempts a login. The policies on the tag should be a +subset of policies that are associated to the role. In order to enable +login using tags, 'role_tag' option should be set while creating a role. +This only applies when authenticating EC2 instances. + +Also, a 'max_ttl' can be configured in this endpoint that determines the maximum +duration for which a login can be renewed. Note that the 'max_ttl' has an upper +limit of the 'max_ttl' value on the backend's mount. +` + +const pathListRolesHelpSyn = ` +Lists all the roles that are registered with Vault. +` + +const pathListRolesHelpDesc = ` +Roles will be listed by their respective role names. +` diff --git a/builtin/credential/aws/path_role_tag.go b/builtin/credential/aws/path_role_tag.go new file mode 100644 index 0000000..93322f6 --- /dev/null +++ b/builtin/credential/aws/path_role_tag.go @@ -0,0 +1,447 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package awsauth + +import ( + "context" + "crypto/hmac" + "crypto/sha256" + "crypto/subtle" + "encoding/base64" + "fmt" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/go-secure-stdlib/strutil" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/policyutil" + "github.com/hashicorp/vault/sdk/logical" +) + +const roleTagVersion = "v1" + +func (b *backend) pathRoleTag() *framework.Path { + return &framework.Path{ + Pattern: "role/" + framework.GenericNameRegex("role") + "/tag$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "role-tag", + }, + + Fields: map[string]*framework.FieldSchema{ + "role": { + Type: framework.TypeString, + Description: "Name of the role.", + }, + + "instance_id": { + Type: framework.TypeString, + Description: `Instance ID for which this tag is intended for. +If set, the created tag can only be used by the instance with the given ID.`, + }, + + "policies": { + Type: framework.TypeCommaStringSlice, + Description: "Policies to be associated with the tag. If set, must be a subset of the role's policies. If set, but set to an empty value, only the 'default' policy will be given to issued tokens.", + }, + + "max_ttl": { + Type: framework.TypeDurationSecond, + Default: 0, + Description: "If set, specifies the maximum allowed token lifetime.", + }, + + "allow_instance_migration": { + Type: framework.TypeBool, + Default: false, + Description: "If set, allows migration of the underlying instance where the client resides. This keys off of pendingTime in the metadata document, so essentially, this disables the client nonce check whenever the instance is migrated to a new host and pendingTime is newer than the previously-remembered time. Use with caution.", + }, + + "disallow_reauthentication": { + Type: framework.TypeBool, + Default: false, + Description: "If set, only allows a single token to be granted per instance ID. In order to perform a fresh login, the entry in access list for the instance ID needs to be cleared using the 'auth/aws-ec2/identity-accesslist/' endpoint.", + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathRoleTagUpdate, + }, + }, + + HelpSynopsis: pathRoleTagSyn, + HelpDescription: pathRoleTagDesc, + } +} + +// pathRoleTagUpdate is used to create an EC2 instance tag which will +// identify the Vault resources that the instance will be authorized for. +func (b *backend) pathRoleTagUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := strings.ToLower(data.Get("role").(string)) + if roleName == "" { + return logical.ErrorResponse("missing role"), nil + } + + // Fetch the role entry + roleEntry, err := b.role(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if roleEntry == nil { + return logical.ErrorResponse(fmt.Sprintf("entry not found for role %s", roleName)), nil + } + + // If RoleTag is empty, disallow creation of tag. + if roleEntry.RoleTag == "" { + return logical.ErrorResponse("tag creation is not enabled for this role"), nil + } + + // There should be a HMAC key present in the role entry + if roleEntry.HMACKey == "" { + // Not being able to find the HMACKey is an internal error + return nil, fmt.Errorf("failed to find the HMAC key") + } + + resp := &logical.Response{} + + // Instance ID is an optional field. + instanceID := strings.ToLower(data.Get("instance_id").(string)) + + // If no policies field was not supplied, then the tag should inherit all the policies + // on the role. But, it was provided, but set to empty explicitly, only "default" policy + // should be inherited. So, by leaving the policies var unset to anything when it is not + // supplied, we ensure that it inherits all the policies on the role. + var policies []string + policiesRaw, ok := data.GetOk("policies") + if ok { + policies = policyutil.ParsePolicies(policiesRaw) + } + if !strutil.StrListSubset(roleEntry.TokenPolicies, policies) { + resp.AddWarning("Policies on the tag are not a subset of the policies set on the role. Login will not be allowed with this tag unless the role policies are updated.") + } + + // This is an optional field. + disallowReauthentication := data.Get("disallow_reauthentication").(bool) + + // This is an optional field. + allowInstanceMigration := data.Get("allow_instance_migration").(bool) + if allowInstanceMigration && !roleEntry.AllowInstanceMigration { + resp.AddWarning("Role does not allow instance migration. Login will not be allowed with this tag unless the role value is updated.") + } + + if disallowReauthentication && allowInstanceMigration { + return logical.ErrorResponse("cannot set both disallow_reauthentication and allow_instance_migration"), nil + } + + // max_ttl for the role tag should be less than the max_ttl set on the role. + maxTTL := time.Duration(data.Get("max_ttl").(int)) * time.Second + + // max_ttl on the tag should not be greater than the system view's max_ttl value. + if maxTTL > b.System().MaxLeaseTTL() { + resp.AddWarning(fmt.Sprintf("Given max TTL of %d is greater than the mount maximum of %d seconds, and will be capped at login time.", maxTTL/time.Second, b.System().MaxLeaseTTL()/time.Second)) + } + // If max_ttl is set for the role, check the bounds for tag's max_ttl value using that. + if roleEntry.TokenMaxTTL != time.Duration(0) && maxTTL > roleEntry.TokenMaxTTL { + resp.AddWarning(fmt.Sprintf("Given max TTL of %d is greater than the role maximum of %d seconds, and will be capped at login time.", maxTTL/time.Second, roleEntry.TokenMaxTTL/time.Second)) + } + + if maxTTL < time.Duration(0) { + return logical.ErrorResponse("max_ttl cannot be negative"), nil + } + + // Create a random nonce. + nonce, err := createRoleTagNonce() + if err != nil { + return nil, err + } + + // Create a role tag out of all the information provided. + rTagValue, err := createRoleTagValue(&roleTag{ + Version: roleTagVersion, + Role: roleName, + Nonce: nonce, + Policies: policies, + MaxTTL: maxTTL, + InstanceID: instanceID, + DisallowReauthentication: disallowReauthentication, + AllowInstanceMigration: allowInstanceMigration, + }, roleEntry) + if err != nil { + return nil, err + } + + // Return the key to be used for the tag and the value to be used for that tag key. + // This key value pair should be set on the EC2 instance. + resp.Data = map[string]interface{}{ + "tag_key": roleEntry.RoleTag, + "tag_value": rTagValue, + } + + return resp, nil +} + +// createRoleTagValue prepares the plaintext version of the role tag, +// and appends a HMAC of the plaintext value to it, before returning. +func createRoleTagValue(rTag *roleTag, roleEntry *awsRoleEntry) (string, error) { + if rTag == nil { + return "", fmt.Errorf("nil role tag") + } + + if roleEntry == nil { + return "", fmt.Errorf("nil role entry") + } + + // Attach version, nonce, policies and maxTTL to the role tag value. + rTagPlaintext, err := prepareRoleTagPlaintextValue(rTag) + if err != nil { + return "", err + } + + // Attach HMAC to tag's plaintext and return. + return appendHMAC(rTagPlaintext, roleEntry) +} + +// Takes in the plaintext part of the role tag, creates a HMAC of it and returns +// a role tag value containing both the plaintext part and the HMAC part. +func appendHMAC(rTagPlaintext string, roleEntry *awsRoleEntry) (string, error) { + if rTagPlaintext == "" { + return "", fmt.Errorf("empty role tag plaintext string") + } + + if roleEntry == nil { + return "", fmt.Errorf("nil role entry") + } + + // Create the HMAC of the value + hmacB64, err := createRoleTagHMACBase64(roleEntry.HMACKey, rTagPlaintext) + if err != nil { + return "", err + } + + // attach the HMAC to the value + rTagValue := fmt.Sprintf("%s:%s", rTagPlaintext, hmacB64) + + // This limit of 255 is enforced on the EC2 instance. Hence complying to that here. + if len(rTagValue) > 255 { + return "", fmt.Errorf("role tag 'value' exceeding the limit of 255 characters") + } + + return rTagValue, nil +} + +// verifyRoleTagValue rebuilds the role tag's plaintext part, computes the HMAC +// from it using the role specific HMAC key and compares it with the received HMAC. +func verifyRoleTagValue(rTag *roleTag, roleEntry *awsRoleEntry) (bool, error) { + if rTag == nil { + return false, fmt.Errorf("nil role tag") + } + + if roleEntry == nil { + return false, fmt.Errorf("nil role entry") + } + + // Fetch the plaintext part of role tag + rTagPlaintext, err := prepareRoleTagPlaintextValue(rTag) + if err != nil { + return false, err + } + + // Compute the HMAC of the plaintext + hmacB64, err := createRoleTagHMACBase64(roleEntry.HMACKey, rTagPlaintext) + if err != nil { + return false, err + } + + return subtle.ConstantTimeCompare([]byte(rTag.HMAC), []byte(hmacB64)) == 1, nil +} + +// prepareRoleTagPlaintextValue builds the role tag value without the HMAC in it. +func prepareRoleTagPlaintextValue(rTag *roleTag) (string, error) { + if rTag == nil { + return "", fmt.Errorf("nil role tag") + } + if rTag.Version == "" { + return "", fmt.Errorf("missing version") + } + if rTag.Nonce == "" { + return "", fmt.Errorf("missing nonce") + } + if rTag.Role == "" { + return "", fmt.Errorf("missing role") + } + + // Attach Version, Nonce, Role, DisallowReauthentication and AllowInstanceMigration + // fields to the role tag. + value := fmt.Sprintf("%s:%s:r=%s:d=%s:m=%s", rTag.Version, rTag.Nonce, rTag.Role, strconv.FormatBool(rTag.DisallowReauthentication), strconv.FormatBool(rTag.AllowInstanceMigration)) + + // Attach the policies only if they are specified. + if len(rTag.Policies) != 0 { + value = fmt.Sprintf("%s:p=%s", value, strings.Join(rTag.Policies, ",")) + } + + // Attach instance_id if set. + if rTag.InstanceID != "" { + value = fmt.Sprintf("%s:i=%s", value, rTag.InstanceID) + } + + // Attach max_ttl if it is provided. + if int(rTag.MaxTTL.Seconds()) > 0 { + value = fmt.Sprintf("%s:t=%d", value, int(rTag.MaxTTL.Seconds())) + } + + return value, nil +} + +// Parses the tag from string form into a struct form. This method +// also verifies the correctness of the parsed role tag. +func (b *backend) parseAndVerifyRoleTagValue(ctx context.Context, s logical.Storage, tag string) (*roleTag, error) { + tagItems := strings.Split(tag, ":") + + // Tag must contain version, nonce, policies and HMAC + if len(tagItems) < 4 { + return nil, fmt.Errorf("invalid tag") + } + + rTag := &roleTag{} + + // Cache the HMAC value. The last item in the collection. + rTag.HMAC = tagItems[len(tagItems)-1] + + // Remove the HMAC from the list. + tagItems = tagItems[:len(tagItems)-1] + + // Version will be the first element. + rTag.Version = tagItems[0] + if rTag.Version != roleTagVersion { + return nil, fmt.Errorf("invalid role tag version") + } + + // Nonce will be the second element. + rTag.Nonce = tagItems[1] + + // Delete the version and nonce from the list. + tagItems = tagItems[2:] + + for _, tagItem := range tagItems { + var err error + switch { + case strings.HasPrefix(tagItem, "i="): + rTag.InstanceID = strings.TrimPrefix(tagItem, "i=") + case strings.HasPrefix(tagItem, "r="): + rTag.Role = strings.TrimPrefix(tagItem, "r=") + case strings.HasPrefix(tagItem, "p="): + rTag.Policies = strings.Split(strings.TrimPrefix(tagItem, "p="), ",") + case strings.HasPrefix(tagItem, "d="): + rTag.DisallowReauthentication, err = strconv.ParseBool(strings.TrimPrefix(tagItem, "d=")) + if err != nil { + return nil, err + } + case strings.HasPrefix(tagItem, "m="): + rTag.AllowInstanceMigration, err = strconv.ParseBool(strings.TrimPrefix(tagItem, "m=")) + if err != nil { + return nil, err + } + case strings.HasPrefix(tagItem, "t="): + rTag.MaxTTL, err = parseutil.ParseDurationSecond(fmt.Sprintf("%ss", strings.TrimPrefix(tagItem, "t="))) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unrecognized item %q in tag", tagItem) + } + } + + if rTag.Role == "" { + return nil, fmt.Errorf("missing role name") + } + + roleEntry, err := b.role(ctx, s, rTag.Role) + if err != nil { + return nil, err + } + if roleEntry == nil { + return nil, fmt.Errorf("entry not found for %q", rTag.Role) + } + + // Create a HMAC of the plaintext value of role tag and compare it with the given value. + verified, err := verifyRoleTagValue(rTag, roleEntry) + if err != nil { + return nil, err + } + if !verified { + return nil, fmt.Errorf("role tag signature verification failed") + } + + return rTag, nil +} + +// Creates base64 encoded HMAC using a per-role key. +func createRoleTagHMACBase64(key, value string) (string, error) { + if key == "" { + return "", fmt.Errorf("invalid HMAC key") + } + hm := hmac.New(sha256.New, []byte(key)) + hm.Write([]byte(value)) + + // base64 encode the hmac bytes. + return base64.StdEncoding.EncodeToString(hm.Sum(nil)), nil +} + +// Creates a base64 encoded random nonce. +func createRoleTagNonce() (string, error) { + if uuidBytes, err := uuid.GenerateRandomBytes(8); err != nil { + return "", err + } else { + return base64.StdEncoding.EncodeToString(uuidBytes), nil + } +} + +// Struct roleTag represents a role tag in a struct form. +type roleTag struct { + Version string `json:"version"` + InstanceID string `json:"instance_id"` + Nonce string `json:"nonce"` + Policies []string `json:"policies"` + MaxTTL time.Duration `json:"max_ttl"` + Role string `json:"role"` + HMAC string `json:"hmac"` + DisallowReauthentication bool `json:"disallow_reauthentication"` + AllowInstanceMigration bool `json:"allow_instance_migration"` +} + +func (rTag1 *roleTag) Equal(rTag2 *roleTag) bool { + return rTag1 != nil && + rTag2 != nil && + rTag1.Version == rTag2.Version && + rTag1.Nonce == rTag2.Nonce && + policyutil.EquivalentPolicies(rTag1.Policies, rTag2.Policies) && + rTag1.MaxTTL == rTag2.MaxTTL && + rTag1.Role == rTag2.Role && + rTag1.HMAC == rTag2.HMAC && + rTag1.InstanceID == rTag2.InstanceID && + rTag1.DisallowReauthentication == rTag2.DisallowReauthentication && + rTag1.AllowInstanceMigration == rTag2.AllowInstanceMigration +} + +const pathRoleTagSyn = ` +Create a tag on a role in order to be able to further restrict the capabilities of a role. +` + +const pathRoleTagDesc = ` +If there are needs to apply only a subset of role's capabilities to any specific +instance, create a role tag using this endpoint and attach the tag on the instance +before performing login. + +To be able to create a role tag, the 'role_tag' option on the role should be +enabled via the endpoint 'role/'. Also, the policies to be associated +with the tag should be a subset of the policies associated with the registered role. + +This endpoint will return both the 'key' and the 'value' of the tag to be set +on the EC2 instance. +` diff --git a/builtin/credential/aws/path_role_test.go b/builtin/credential/aws/path_role_test.go new file mode 100644 index 0000000..3a63d4c --- /dev/null +++ b/builtin/credential/aws/path_role_test.go @@ -0,0 +1,1081 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package awsauth + +import ( + "context" + "os" + "reflect" + "strings" + "testing" + + "github.com/go-test/deep" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-secure-stdlib/awsutil" + "github.com/hashicorp/go-secure-stdlib/strutil" + vlttesting "github.com/hashicorp/vault/helper/testhelpers/logical" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/helper/policyutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func TestBackend_pathRoleEc2(t *testing.T) { + config := logical.TestBackendConfig() + storage := &logical.InmemStorage{} + config.StorageView = storage + + b, err := Backend(config) + if err != nil { + t.Fatal(err) + } + + err = b.Setup(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + data := map[string]interface{}{ + "auth_type": "ec2", + "policies": "p,q,r,s", + "max_ttl": "2h", + "bound_ami_id": "ami-abcd123", + } + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.CreateOperation, + Path: "role/ami-abcd123", + Data: data, + Storage: storage, + }) + if resp != nil && resp.IsError() { + t.Fatalf("failed to create role") + } + if err != nil { + t.Fatal(err) + } + + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ReadOperation, + Path: "role/ami-abcd123", + Storage: storage, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.IsError() { + t.Fatal("failed to read the role entry") + } + if !policyutil.EquivalentPolicies(strings.Split(data["policies"].(string), ","), resp.Data["policies"].([]string)) { + t.Fatalf("bad: policies: expected: %#v\ngot: %#v\n", data, resp.Data) + } + + data["allow_instance_migration"] = true + data["disallow_reauthentication"] = true + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "role/ami-abcd123", + Data: data, + Storage: storage, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil || !resp.IsError() { + t.Fatalf("expected failure to create role with both allow_instance_migration true and disallow_reauthentication true") + } + data["disallow_reauthentication"] = false + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "role/ami-abcd123", + Data: data, + Storage: storage, + }) + if err != nil { + t.Fatal(err) + } + if resp != nil && resp.IsError() { + t.Fatalf("failure to update role: %v", resp.Data["error"]) + } + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ReadOperation, + Path: "role/ami-abcd123", + Storage: storage, + }) + if err != nil { + t.Fatal(err) + } + if !resp.Data["allow_instance_migration"].(bool) { + t.Fatal("bad: expected allow_instance_migration:true got:false\n") + } + + if resp.Data["disallow_reauthentication"].(bool) { + t.Fatal("bad: expected disallow_reauthentication: false got:true\n") + } + + // add another entry, to test listing of role entries + data["bound_ami_id"] = "ami-abcd456" + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.CreateOperation, + Path: "role/ami-abcd456", + Data: data, + Storage: storage, + }) + if resp != nil && resp.IsError() { + t.Fatalf("failed to create role: %s", resp.Data["error"]) + } + if err != nil { + t.Fatal(err) + } + + data["bound_iam_principal_arn"] = "" + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "role/ami-abcd456", + Data: data, + Storage: storage, + }) + if err != nil { + t.Fatal(err) + } + if resp != nil && resp.IsError() { + t.Fatalf("failed to update role with empty bound_iam_principal_arn: %s", resp.Data["error"]) + } + + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ListOperation, + Path: "roles", + Storage: storage, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.Data == nil || resp.IsError() { + t.Fatalf("failed to list the role entries") + } + keys := resp.Data["keys"].([]string) + if len(keys) != 2 { + t.Fatalf("bad: keys: %#v\n", keys) + } + + _, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.DeleteOperation, + Path: "role/ami-abcd123", + Storage: storage, + }) + if err != nil { + t.Fatal(err) + } + + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ReadOperation, + Path: "role/ami-abcd123", + Storage: storage, + }) + if err != nil { + t.Fatal(err) + } + if resp != nil { + t.Fatalf("bad: response: expected:nil actual:%#v\n", resp) + } +} + +func Test_enableIamIDResolution(t *testing.T) { + config := logical.TestBackendConfig() + storage := &logical.InmemStorage{} + config.StorageView = storage + + b, err := Backend(config) + if err != nil { + t.Fatal(err) + } + err = b.Setup(context.Background(), config) + if err != nil { + t.Fatal(err) + } + roleName := "upgradable_role" + + b.resolveArnToUniqueIDFunc = resolveArnToFakeUniqueId + + boundIamRoleARNs := []string{"arn:aws:iam::123456789012:role/MyRole", "arn:aws:iam::123456789012:role/path/*"} + data := map[string]interface{}{ + "auth_type": iamAuthType, + "policies": "p,q", + "bound_iam_principal_arn": boundIamRoleARNs, + "resolve_aws_unique_ids": false, + } + + submitRequest := func(roleName string, op logical.Operation) (*logical.Response, error) { + return b.HandleRequest(context.Background(), &logical.Request{ + Operation: op, + Path: "role/" + roleName, + Data: data, + Storage: storage, + }) + } + + resp, err := submitRequest(roleName, logical.CreateOperation) + if err != nil { + t.Fatal(err) + } + if resp != nil && resp.IsError() { + t.Fatalf("failed to create role: %#v", resp) + } + + resp, err = submitRequest(roleName, logical.ReadOperation) + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.IsError() { + t.Fatalf("failed to read role: resp:%#v,\nerr:%#v", resp, err) + } + if resp.Data["bound_iam_principal_id"] != nil && len(resp.Data["bound_iam_principal_id"].([]string)) > 0 { + t.Fatalf("expected to get no unique ID in role, but got %q", resp.Data["bound_iam_principal_id"]) + } + + data = map[string]interface{}{ + "resolve_aws_unique_ids": true, + } + resp, err = submitRequest(roleName, logical.UpdateOperation) + if err != nil { + t.Fatal(err) + } + if resp != nil && resp.IsError() { + t.Fatalf("unable to upgrade role to resolve internal IDs: resp:%#v", resp) + } + + resp, err = submitRequest(roleName, logical.ReadOperation) + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.IsError() { + t.Fatalf("failed to read role: resp:%#v,\nerr:%#v", resp, err) + } + principalIDs := resp.Data["bound_iam_principal_id"].([]string) + if len(principalIDs) != 1 || principalIDs[0] != "FakeUniqueId1" { + t.Fatalf("bad: expected upgrade of role resolve principal ID to %q, but got %q instead", "FakeUniqueId1", resp.Data["bound_iam_principal_id"]) + } + returnedARNs := resp.Data["bound_iam_principal_arn"].([]string) + if !strutil.EquivalentSlices(returnedARNs, boundIamRoleARNs) { + t.Fatalf("bad: expected to return bound_iam_principal_arn of %q, but got %q instead", boundIamRoleARNs, returnedARNs) + } +} + +func TestBackend_pathIam(t *testing.T) { + config := logical.TestBackendConfig() + storage := &logical.InmemStorage{} + config.StorageView = storage + + b, err := Backend(config) + if err != nil { + t.Fatal(err) + } + err = b.Setup(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + // make sure we start with empty roles, which gives us confidence that the read later + // actually is the two roles we created + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ListOperation, + Path: "roles", + Storage: storage, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.Data == nil || resp.IsError() { + t.Fatalf("failed to list role entries") + } + if resp.Data["keys"] != nil { + t.Fatalf("Received roles when expected none") + } + + data := map[string]interface{}{ + "auth_type": iamAuthType, + "policies": "p,q,r,s", + "max_ttl": "2h", + "bound_iam_principal_arn": "n:aws:iam::123456789012:user/MyUserName", + "resolve_aws_unique_ids": false, + } + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.CreateOperation, + Path: "role/MyRoleName", + Data: data, + Storage: storage, + }) + + if err != nil { + t.Fatal(err) + } + if resp != nil && resp.IsError() { + t.Fatalf("failed to create the role entry; resp: %#v", resp) + } + + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ReadOperation, + Path: "role/MyRoleName", + Storage: storage, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.IsError() { + t.Fatal("failed to read the role entry") + } + if !policyutil.EquivalentPolicies(strings.Split(data["policies"].(string), ","), resp.Data["policies"].([]string)) { + t.Fatalf("bad: policies: expected %#v\ngot: %#v\n", data, resp.Data) + } + + data["inferred_entity_type"] = "invalid" + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.CreateOperation, + Path: "role/ShouldNeverExist", + Data: data, + Storage: storage, + }) + if resp == nil || !resp.IsError() { + t.Fatalf("Created role with invalid inferred_entity_type") + } + if err != nil { + t.Fatal(err) + } + + data["inferred_entity_type"] = ec2EntityType + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.CreateOperation, + Path: "role/ShouldNeverExist", + Data: data, + Storage: storage, + }) + if resp == nil || !resp.IsError() { + t.Fatalf("Created role without necessary inferred_aws_region") + } + if err != nil { + t.Fatal(err) + } + + delete(data, "bound_iam_principal_arn") + data["inferred_aws_region"] = "us-east-1" + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.CreateOperation, + Path: "role/ShouldNeverExist", + Data: data, + Storage: storage, + }) + if resp == nil || !resp.IsError() { + t.Fatalf("Created role without anything bound") + } + if err != nil { + t.Fatal(err) + } + + // generate a second role, ensure we're able to list both + data["bound_ami_id"] = "ami-abcd123" + secondRole := &logical.Request{ + Operation: logical.CreateOperation, + Path: "role/MyOtherRoleName", + Data: data, + Storage: storage, + } + resp, err = b.HandleRequest(context.Background(), secondRole) + if err != nil { + t.Fatal(err) + } + if resp != nil && resp.IsError() { + t.Fatalf("failed to create additional role: %v", *secondRole) + } + + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ListOperation, + Path: "roles", + Storage: storage, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.Data == nil || resp.IsError() { + t.Fatalf("failed to list role entries") + } + keys := resp.Data["keys"].([]string) + if len(keys) != 2 { + t.Fatalf("bad: keys %#v\n", keys) + } + + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.DeleteOperation, + Path: "role/MyOtherRoleName", + Storage: storage, + }) + if err != nil { + t.Fatal(err) + } + + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ReadOperation, + Path: "role/MyOtherRoleName", + Storage: storage, + }) + if err != nil { + t.Fatal(err) + } + if resp != nil { + t.Fatalf("bad: response: expected: nil actual:%3v\n", resp) + } +} + +func TestBackend_pathRoleMixedTypes(t *testing.T) { + config := logical.TestBackendConfig() + storage := &logical.InmemStorage{} + config.StorageView = storage + + b, err := Backend(config) + if err != nil { + t.Fatal(err) + } + err = b.Setup(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + data := map[string]interface{}{ + "policies": "p,q,r,s", + "bound_ami_id": "ami-abc1234", + "auth_type": "ec2,invalid", + } + + submitRequest := func(roleName string, op logical.Operation) (*logical.Response, error) { + return b.HandleRequest(context.Background(), &logical.Request{ + Operation: op, + Path: "role/" + roleName, + Data: data, + Storage: storage, + }) + } + + resp, err := submitRequest("shouldNeverExist", logical.CreateOperation) + if resp == nil || !resp.IsError() { + t.Fatalf("created role with invalid auth_type; resp: %#v", resp) + } + if err != nil { + t.Fatal(err) + } + + data["auth_type"] = "ec2,,iam" + resp, err = submitRequest("shouldNeverExist", logical.CreateOperation) + if resp == nil || !resp.IsError() { + t.Fatalf("created role mixed auth types") + } + if err != nil { + t.Fatal(err) + } + + data["auth_type"] = ec2AuthType + resp, err = submitRequest("ec2_to_iam", logical.CreateOperation) + if resp != nil && resp.IsError() { + t.Fatalf("failed to create valid role; resp: %#v", resp) + } + if err != nil { + t.Fatal(err) + } + + data["auth_type"] = iamAuthType + delete(data, "bound_ami_id") + boundIamPrincipalARNs := []string{"arn:aws:iam::123456789012:role/MyRole", "arn:aws:iam::123456789012:role/path/*"} + data["bound_iam_principal_arn"] = boundIamPrincipalARNs + resp, err = submitRequest("ec2_to_iam", logical.UpdateOperation) + if resp == nil || !resp.IsError() { + t.Fatalf("changed auth type on the role") + } + if err != nil { + t.Fatal(err) + } + + data["inferred_entity_type"] = ec2EntityType + data["inferred_aws_region"] = "us-east-1" + data["resolve_aws_unique_ids"] = false + resp, err = submitRequest("multipleTypesInferred", logical.CreateOperation) + if err != nil { + t.Fatal(err) + } + if resp.IsError() { + t.Fatalf("didn't allow creation of roles with only inferred bindings") + } + + b.resolveArnToUniqueIDFunc = resolveArnToFakeUniqueId + data["resolve_aws_unique_ids"] = true + resp, err = submitRequest("withInternalIdResolution", logical.CreateOperation) + if err != nil { + t.Fatal(err) + } + if resp.IsError() { + t.Fatalf("didn't allow creation of role resolving unique IDs") + } + resp, err = submitRequest("withInternalIdResolution", logical.ReadOperation) + if err != nil { + t.Fatal(err) + } + principalIDs := resp.Data["bound_iam_principal_id"].([]string) + if len(principalIDs) != 1 || principalIDs[0] != "FakeUniqueId1" { + t.Fatalf("expected fake unique ID of FakeUniqueId1, got %q", resp.Data["bound_iam_principal_id"]) + } + returnedARNs := resp.Data["bound_iam_principal_arn"].([]string) + if !strutil.EquivalentSlices(returnedARNs, boundIamPrincipalARNs) { + t.Fatalf("bad: expected to return bound_iam_principal_arn of %q, but got %q instead", boundIamPrincipalARNs, returnedARNs) + } + data["resolve_aws_unique_ids"] = false + resp, err = submitRequest("withInternalIdResolution", logical.UpdateOperation) + if err != nil { + t.Fatal(err) + } + if !resp.IsError() { + t.Fatalf("allowed changing resolve_aws_unique_ids from true to false") + } +} + +func TestAwsEc2_RoleCrud(t *testing.T) { + var err error + var resp *logical.Response + config := logical.TestBackendConfig() + storage := &logical.InmemStorage{} + config.StorageView = storage + + b, err := Backend(config) + if err != nil { + t.Fatal(err) + } + + err = b.Setup(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + role1Data := map[string]interface{}{ + "auth_type": "ec2", + "bound_vpc_id": "testvpcid", + "allow_instance_migration": true, + "policies": "testpolicy1,testpolicy2", + } + roleReq := &logical.Request{ + Operation: logical.UpdateOperation, + Storage: storage, + Path: "role/role1", + Data: role1Data, + } + + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v, err: %v", resp, err) + } + + roleData := map[string]interface{}{ + "auth_type": "ec2", + "bound_ami_id": "testamiid", + "bound_account_id": "testaccountid", + "bound_region": "testregion", + "bound_iam_role_arn": "arn:aws:iam::123456789012:role/MyRole", + "bound_iam_instance_profile_arn": "arn:aws:iam::123456789012:instance-profile/MyInstancePro*", + "bound_subnet_id": "testsubnetid", + "bound_vpc_id": "testvpcid", + "bound_ec2_instance_id": "i-12345678901234567,i-76543210987654321", + "role_tag": "testtag", + "resolve_aws_unique_ids": false, + "allow_instance_migration": true, + "ttl": "10m", + "max_ttl": "20m", + "policies": "testpolicy1,testpolicy2", + "disallow_reauthentication": false, + "hmac_key": "testhmackey", + "period": "1m", + } + + roleReq.Path = "role/testrole" + roleReq.Data = roleData + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v, err: %v", resp, err) + } + + roleReq.Operation = logical.ReadOperation + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v, err: %v", resp, err) + } + + expected := map[string]interface{}{ + "auth_type": ec2AuthType, + "bound_ami_id": []string{"testamiid"}, + "bound_account_id": []string{"testaccountid"}, + "bound_region": []string{"testregion"}, + "bound_ec2_instance_id": []string{"i-12345678901234567", "i-76543210987654321"}, + "bound_iam_principal_arn": []string{}, + "bound_iam_principal_id": []string{}, + "bound_iam_role_arn": []string{"arn:aws:iam::123456789012:role/MyRole"}, + "bound_iam_instance_profile_arn": []string{"arn:aws:iam::123456789012:instance-profile/MyInstancePro*"}, + "bound_subnet_id": []string{"testsubnetid"}, + "bound_vpc_id": []string{"testvpcid"}, + "inferred_entity_type": "", + "inferred_aws_region": "", + "resolve_aws_unique_ids": false, + "role_tag": "testtag", + "allow_instance_migration": true, + "ttl": int64(600), + "token_ttl": int64(600), + "max_ttl": int64(1200), + "token_max_ttl": int64(1200), + "token_explicit_max_ttl": int64(0), + "policies": []string{"testpolicy1", "testpolicy2"}, + "token_policies": []string{"testpolicy1", "testpolicy2"}, + "disallow_reauthentication": false, + "period": int64(60), + "token_period": int64(60), + "token_bound_cidrs": []string{}, + "token_no_default_policy": false, + "token_num_uses": 0, + "token_type": "default", + } + + if resp.Data["role_id"] == nil { + t.Fatal("role_id not found in repsonse") + } + expected["role_id"] = resp.Data["role_id"] + if diff := deep.Equal(expected, resp.Data); diff != nil { + t.Fatal(diff) + } + + roleData["bound_vpc_id"] = "newvpcid" + roleReq.Operation = logical.UpdateOperation + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v, err: %v", resp, err) + } + + roleReq.Operation = logical.ReadOperation + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v, err: %v", resp, err) + } + expected["bound_vpc_id"] = []string{"newvpcid"} + if !reflect.DeepEqual(expected, resp.Data) { + t.Fatalf("bad: role data: expected: %#v\n actual: %#v", expected, resp.Data) + } + + // Create a new backend so we have a new cache (thus populating from disk). + // Then test reading (reading from disk + lock), writing, reading, + // deleting, reading. + b, err = Backend(config) + if err != nil { + t.Fatal(err) + } + + err = b.Setup(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + // Read again, make sure things are what we expect + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v, err: %v", resp, err) + } + if !reflect.DeepEqual(expected, resp.Data) { + t.Fatalf("bad: role data: expected: %#v\n actual: %#v", expected, resp.Data) + } + + roleReq.Operation = logical.UpdateOperation + roleData["bound_ami_id"] = "testamiid2" + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v, err: %v", resp, err) + } + + roleReq.Operation = logical.ReadOperation + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v, err: %v", resp, err) + } + + expected["bound_ami_id"] = []string{"testamiid2"} + if diff := deep.Equal(expected, resp.Data); diff != nil { + t.Fatal(diff) + } + + // Delete which should remove from disk and also cache + roleReq.Operation = logical.DeleteOperation + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v, err: %v", resp, err) + } + if resp != nil { + t.Fatalf("failed to delete role entry") + } + + // Verify it was deleted, e.g. it isn't found in the role cache + roleReq.Operation = logical.ReadOperation + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v, err: %v", resp, err) + } + if resp != nil { + t.Fatal("expected nil") + } +} + +func TestAwsEc2_RoleDurationSeconds(t *testing.T) { + config := logical.TestBackendConfig() + storage := &logical.InmemStorage{} + config.StorageView = storage + + b, err := Backend(config) + if err != nil { + t.Fatal(err) + } + + err = b.Setup(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + roleData := map[string]interface{}{ + "auth_type": "ec2", + "bound_iam_instance_profile_arn": "arn:aws:iam::123456789012:instance-profile/test-profile-name", + "resolve_aws_unique_ids": false, + "ttl": "10s", + "max_ttl": "20s", + "period": "30s", + } + + roleReq := &logical.Request{ + Operation: logical.CreateOperation, + Storage: storage, + Path: "role/testrole", + Data: roleData, + } + + resp, err := b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v, err: %v", resp, err) + } + + roleReq.Operation = logical.ReadOperation + + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v, err: %v", resp, err) + } + + if resp.Data["ttl"].(int64) != 10 { + t.Fatalf("bad: ttl; expected: 10, actual: %d", resp.Data["ttl"]) + } + if resp.Data["max_ttl"].(int64) != 20 { + t.Fatalf("bad: max_ttl; expected: 20, actual: %d", resp.Data["max_ttl"]) + } + if resp.Data["period"].(int64) != 30 { + t.Fatalf("bad: period; expected: 30, actual: %d", resp.Data["period"]) + } +} + +func TestRoleEntryUpgradeV(t *testing.T) { + config := logical.TestBackendConfig() + storage := &logical.InmemStorage{} + config.StorageView = storage + b, err := Backend(config) + if err != nil { + t.Fatal(err) + } + + err = b.Setup(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + roleEntryToUpgrade := &awsRoleEntry{ + BoundIamRoleARNs: []string{"arn:aws:iam::123456789012:role/my_role_prefix"}, + BoundIamInstanceProfileARNs: []string{"arn:aws:iam::123456789012:instance-profile/my_profile-prefix"}, + Version: 1, + } + expected := &awsRoleEntry{ + BoundIamRoleARNs: []string{"arn:aws:iam::123456789012:role/my_role_prefix*"}, + BoundIamInstanceProfileARNs: []string{"arn:aws:iam::123456789012:instance-profile/my_profile-prefix*"}, + Version: currentRoleStorageVersion, + } + + upgraded, err := b.upgradeRole(context.Background(), storage, roleEntryToUpgrade) + if err != nil { + t.Fatalf("error upgrading role entry: %#v", err) + } + if !upgraded { + t.Fatalf("expected to upgrade role entry %#v but got no upgrade", roleEntryToUpgrade) + } + if roleEntryToUpgrade.RoleID == "" { + t.Fatal("expected role ID to be populated") + } + expected.RoleID = roleEntryToUpgrade.RoleID + if diff := deep.Equal(*roleEntryToUpgrade, *expected); diff != nil { + t.Fatal(diff) + } +} + +func TestRoleInitialize(t *testing.T) { + config := logical.TestBackendConfig() + storage := &logical.InmemStorage{} + config.StorageView = storage + b, err := Backend(config) + if err != nil { + t.Fatal(err) + } + + ctx := context.Background() + err = b.Setup(ctx, config) + if err != nil { + t.Fatal(err) + } + + // create some role entries, some of which will need to be upgraded + type testData struct { + name string + entry *awsRoleEntry + } + + before := []testData{ + { + name: "role1", + entry: &awsRoleEntry{ + BoundIamRoleARNs: []string{"arn:aws:iam::000000000001:role/my_role_prefix"}, + BoundIamInstanceProfileARNs: []string{"arn:aws:iam::000000000001:instance-profile/my_profile-prefix"}, + Version: 1, + }, + }, + { + name: "role2", + entry: &awsRoleEntry{ + BoundIamRoleARNs: []string{"arn:aws:iam::000000000002:role/my_role_prefix"}, + BoundIamInstanceProfileARNs: []string{"arn:aws:iam::000000000002:instance-profile/my_profile-prefix"}, + Version: 2, + }, + }, + { + name: "role3", + entry: &awsRoleEntry{ + BoundIamRoleARNs: []string{"arn:aws:iam::000000000003:role/my_role_prefix"}, + BoundIamInstanceProfileARNs: []string{"arn:aws:iam::000000000003:instance-profile/my_profile-prefix"}, + Version: currentRoleStorageVersion, + }, + }, + } + + // put the entries in storage + for _, role := range before { + err = b.setRole(ctx, storage, role.name, role.entry) + if err != nil { + t.Fatal(err) + } + } + + // upgrade all the entries + upgraded, err := b.upgrade(ctx, storage) + if err != nil { + t.Fatal(err) + } + if !upgraded { + t.Fatalf("expected upgrade") + } + + // read the entries from storage + after := make([]testData, 0) + names, err := storage.List(ctx, "role/") + if err != nil { + t.Fatal(err) + } + for _, name := range names { + entry, err := b.role(ctx, storage, name) + if err != nil { + t.Fatal(err) + } + after = append(after, testData{name: name, entry: entry}) + } + + // make sure each entry is at the current version + expected := []testData{ + { + name: "role1", + entry: &awsRoleEntry{ + BoundIamRoleARNs: []string{"arn:aws:iam::000000000001:role/my_role_prefix"}, + BoundIamInstanceProfileARNs: []string{"arn:aws:iam::000000000001:instance-profile/my_profile-prefix"}, + Version: currentRoleStorageVersion, + }, + }, + { + name: "role2", + entry: &awsRoleEntry{ + BoundIamRoleARNs: []string{"arn:aws:iam::000000000002:role/my_role_prefix"}, + BoundIamInstanceProfileARNs: []string{"arn:aws:iam::000000000002:instance-profile/my_profile-prefix"}, + Version: currentRoleStorageVersion, + }, + }, + { + name: "role3", + entry: &awsRoleEntry{ + BoundIamRoleARNs: []string{"arn:aws:iam::000000000003:role/my_role_prefix"}, + BoundIamInstanceProfileARNs: []string{"arn:aws:iam::000000000003:instance-profile/my_profile-prefix"}, + Version: currentRoleStorageVersion, + }, + }, + } + if diff := deep.Equal(expected, after); diff != nil { + t.Fatal(diff) + } + + // run it again -- nothing will happen + upgraded, err = b.upgrade(ctx, storage) + if err != nil { + t.Fatal(err) + } + if upgraded { + t.Fatalf("expected no upgrade") + } + + // make sure saved role version is correct + entry, err := storage.Get(ctx, "config/version") + if err != nil { + t.Fatal(err) + } + var version awsVersion + err = entry.DecodeJSON(&version) + if err != nil { + t.Fatal(err) + } + if version.Version != currentAwsVersion { + t.Fatalf("expected version %d, got %d", currentAwsVersion, version.Version) + } + + // stomp on the saved version + version.Version = 0 + e2, err := logical.StorageEntryJSON("config/version", version) + if err != nil { + t.Fatal(err) + } + err = storage.Put(ctx, e2) + if err != nil { + t.Fatal(err) + } + + // run it again -- now an upgrade will happen + upgraded, err = b.upgrade(ctx, storage) + if err != nil { + t.Fatal(err) + } + if !upgraded { + t.Fatalf("expected upgrade") + } +} + +func TestAwsVersion(t *testing.T) { + before := awsVersion{ + Version: 42, + } + + entry, err := logical.StorageEntryJSON("config/version", &before) + if err != nil { + t.Fatal(err) + } + + var after awsVersion + err = entry.DecodeJSON(&after) + if err != nil { + t.Fatal(err) + } + + if diff := deep.Equal(before, after); diff != nil { + t.Fatal(diff) + } +} + +// This test was used to reproduce https://github.com/hashicorp/vault/issues/7418 +// and verify its fix. +// Please run it at least 3 times to ensure that passing tests are due to actually +// passing, rather than the region being randomly chosen tying to the one in the +// test through luck. +func TestRoleResolutionWithSTSEndpointConfigured(t *testing.T) { + if enabled := os.Getenv(vlttesting.TestEnvVar); enabled == "" { + t.Skip() + } + + /* ARN of an AWS role that Vault can query during testing. + This role should exist in your current AWS account and your credentials + should have iam:GetRole permissions to query it. + */ + assumableRoleArn := os.Getenv("AWS_ASSUMABLE_ROLE_ARN") + if assumableRoleArn == "" { + t.Skip("skipping because AWS_ASSUMABLE_ROLE_ARN is unset") + } + + // Ensure aws credentials are available locally for testing. + logger := logging.NewVaultLogger(hclog.Debug) + credsConfig := &awsutil.CredentialsConfig{Logger: logger} + credsChain, err := credsConfig.GenerateCredentialChain() + if err != nil { + t.Fatal(err) + } + _, err = credsChain.Get() + if err != nil { + t.SkipNow() + } + + config := logical.TestBackendConfig() + storage := &logical.InmemStorage{} + config.StorageView = storage + + b, err := Backend(config) + if err != nil { + t.Fatal(err) + } + + err = b.Setup(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + // configure the client with an sts endpoint that should be used in creating the role + data := map[string]interface{}{ + "sts_endpoint": "https://sts.eu-west-1.amazonaws.com", + // Note - if you comment this out, you can reproduce the error shown + // in the linked GH issue above. This essentially reproduces the problem + // we had when we didn't have an sts_region field. + "sts_region": "eu-west-1", + } + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.CreateOperation, + Path: "config/client", + Data: data, + Storage: storage, + }) + if err != nil { + t.Fatal(err) + } + if resp != nil && resp.IsError() { + t.Fatalf("failed to create the role entry; resp: %#v", resp) + } + + data = map[string]interface{}{ + "auth_type": iamAuthType, + "bound_iam_principal_arn": assumableRoleArn, + "resolve_aws_unique_ids": true, + } + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.CreateOperation, + Path: "role/MyRoleName", + Data: data, + Storage: storage, + }) + if err != nil { + t.Fatal(err) + } + if resp != nil && resp.IsError() { + t.Fatalf("failed to create the role entry; resp: %#v", resp) + } +} + +func resolveArnToFakeUniqueId(_ context.Context, _ logical.Storage, _ string) (string, error) { + return "FakeUniqueId1", nil +} diff --git a/builtin/credential/aws/path_roletag_denylist.go b/builtin/credential/aws/path_roletag_denylist.go new file mode 100644 index 0000000..8200436 --- /dev/null +++ b/builtin/credential/aws/path_roletag_denylist.go @@ -0,0 +1,274 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package awsauth + +import ( + "context" + "encoding/base64" + "time" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func (b *backend) pathRoletagDenyList() *framework.Path { + return &framework.Path{ + Pattern: "roletag-denylist/(?P.*)", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "role-tag-deny-list", + }, + + Fields: map[string]*framework.FieldSchema{ + "role_tag": { + Type: framework.TypeString, + Description: `Role tag to be deny listed. The tag can be supplied as-is. In order +to avoid any encoding problems, it can be base64 encoded.`, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathRoletagDenyListUpdate, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathRoletagDenyListRead, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathRoletagDenyListDelete, + }, + }, + + HelpSynopsis: pathRoletagBlacklistSyn, + HelpDescription: pathRoletagBlacklistDesc, + } +} + +// Path to list all the deny listed tags. +func (b *backend) pathListRoletagDenyList() *framework.Path { + return &framework.Path{ + Pattern: "roletag-denylist/?", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "role-tag-deny-lists", + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ListOperation: &framework.PathOperation{ + Callback: b.pathRoletagDenyListsList, + }, + }, + + HelpSynopsis: pathListRoletagDenyListHelpSyn, + HelpDescription: pathListRoletagDenyListHelpDesc, + } +} + +// Lists all the deny listed role tags. +func (b *backend) pathRoletagDenyListsList(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + b.denyListMutex.RLock() + defer b.denyListMutex.RUnlock() + + tags, err := req.Storage.List(ctx, denyListRoletagStorage) + if err != nil { + return nil, err + } + + // Tags are base64 encoded before indexing to avoid problems + // with the path separators being present in the tag. + // Reverse it before returning the list response. + for i, keyB64 := range tags { + if key, err := base64.StdEncoding.DecodeString(keyB64); err != nil { + return nil, err + } else { + // Overwrite the result with the decoded string. + tags[i] = string(key) + } + } + return logical.ListResponse(tags), nil +} + +// Fetch an entry from the role tag deny list for a given tag. +// This method takes a role tag in its original form and not a base64 encoded form. +func (b *backend) lockedDenyLististRoleTagEntry(ctx context.Context, s logical.Storage, tag string) (*roleTagBlacklistEntry, error) { + b.denyListMutex.RLock() + defer b.denyListMutex.RUnlock() + + return b.nonLockedDenyListRoleTagEntry(ctx, s, tag) +} + +func (b *backend) nonLockedDenyListRoleTagEntry(ctx context.Context, s logical.Storage, tag string) (*roleTagBlacklistEntry, error) { + entry, err := s.Get(ctx, denyListRoletagStorage+base64.StdEncoding.EncodeToString([]byte(tag))) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result roleTagBlacklistEntry + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + return &result, nil +} + +// Deletes an entry from the role tag deny list for a given tag. +func (b *backend) pathRoletagDenyListDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + b.denyListMutex.Lock() + defer b.denyListMutex.Unlock() + + tag := data.Get("role_tag").(string) + if tag == "" { + return logical.ErrorResponse("missing role_tag"), nil + } + + return nil, req.Storage.Delete(ctx, denyListRoletagStorage+base64.StdEncoding.EncodeToString([]byte(tag))) +} + +// If the given role tag is deny listed, returns the details of the deny list entry. +// Returns 'nil' otherwise. +func (b *backend) pathRoletagDenyListRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + tag := data.Get("role_tag").(string) + if tag == "" { + return logical.ErrorResponse("missing role_tag"), nil + } + + entry, err := b.lockedDenyLististRoleTagEntry(ctx, req.Storage, tag) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + return &logical.Response{ + Data: map[string]interface{}{ + "creation_time": entry.CreationTime.Format(time.RFC3339Nano), + "expiration_time": entry.ExpirationTime.Format(time.RFC3339Nano), + }, + }, nil +} + +// pathRoletagDenyListUpdate is used to deny list a given role tag. +// Before a role tag is added to the deny list, the correctness of the plaintext part +// in the role tag is verified using the associated HMAC. +func (b *backend) pathRoletagDenyListUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // The role_tag value provided, optionally can be base64 encoded. + tagInput := data.Get("role_tag").(string) + if tagInput == "" { + return logical.ErrorResponse("missing role_tag"), nil + } + + tag := "" + + // Try to base64 decode the value. + tagBytes, err := base64.StdEncoding.DecodeString(tagInput) + if err != nil { + // If the decoding failed, use the value as-is. + tag = tagInput + } else { + // If the decoding succeeded, use the decoded value. + tag = string(tagBytes) + } + + // Parse and verify the role tag from string form to a struct form and verify it. + rTag, err := b.parseAndVerifyRoleTagValue(ctx, req.Storage, tag) + if err != nil { + return nil, err + } + if rTag == nil { + return logical.ErrorResponse("failed to verify the role tag and parse it"), nil + } + + // Get the entry for the role mentioned in the role tag. + roleEntry, err := b.role(ctx, req.Storage, rTag.Role) + if err != nil { + return nil, err + } + if roleEntry == nil { + return logical.ErrorResponse("role entry not found"), nil + } + + b.denyListMutex.Lock() + defer b.denyListMutex.Unlock() + + // Check if the role tag is already deny listed. If yes, update it. + blEntry, err := b.nonLockedDenyListRoleTagEntry(ctx, req.Storage, tag) + if err != nil { + return nil, err + } + if blEntry == nil { + blEntry = &roleTagBlacklistEntry{} + } + + currentTime := time.Now() + + // Check if this is a creation of deny list entry. + if blEntry.CreationTime.IsZero() { + // Set the creation time for the deny list entry. + // This should not be updated after setting it once. + // If deny list operation is invoked more than once, only update the expiration time. + blEntry.CreationTime = currentTime + } + + // Decide the expiration time based on the max_ttl values. Since this is + // restricting access, use the greatest duration, not the least. + maxDur := rTag.MaxTTL + if roleEntry.TokenMaxTTL > maxDur { + maxDur = roleEntry.TokenMaxTTL + } + if b.System().MaxLeaseTTL() > maxDur { + maxDur = b.System().MaxLeaseTTL() + } + + blEntry.ExpirationTime = currentTime.Add(maxDur) + + entry, err := logical.StorageEntryJSON(denyListRoletagStorage+base64.StdEncoding.EncodeToString([]byte(tag)), blEntry) + if err != nil { + return nil, err + } + + // Store the deny list entry. + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + + return nil, nil +} + +type roleTagBlacklistEntry struct { + CreationTime time.Time `json:"creation_time"` + ExpirationTime time.Time `json:"expiration_time"` +} + +const pathRoletagBlacklistSyn = ` +Blacklist a previously created role tag. +` + +const pathRoletagBlacklistDesc = ` +Add a role tag to the deny list so that it cannot be used by any EC2 instance to perform further +logins. This can be used if the role tag is suspected or believed to be possessed by +an unintended party. + +By default, a cron task will periodically look for expired entries in the deny list +and deletes them. The duration to periodically run this, is one hour by default. +However, this can be configured using the 'config/tidy/roletags' endpoint. This tidy +action can be triggered via the API as well, using the 'tidy/roletags' endpoint. + +Also note that delete operation is supported on this endpoint to remove specific +entries from the deny list. +` + +const pathListRoletagDenyListHelpSyn = ` +Lists the deny list role tags. +` + +const pathListRoletagDenyListHelpDesc = ` +Lists all the entries present in the deny list. This will show both the valid +entries and the expired entries in the deny list. Use 'tidy/roletags' endpoint +to clean-up the deny list of role tags based on expiration time. +` diff --git a/builtin/credential/aws/path_tidy_identity_accesslist.go b/builtin/credential/aws/path_tidy_identity_accesslist.go new file mode 100644 index 0000000..3b907c4 --- /dev/null +++ b/builtin/credential/aws/path_tidy_identity_accesslist.go @@ -0,0 +1,136 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package awsauth + +import ( + "context" + "fmt" + "net/http" + "sync/atomic" + "time" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" +) + +func (b *backend) pathTidyIdentityAccessList() *framework.Path { + return &framework.Path{ + Pattern: "tidy/identity-accesslist$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "identity-access-list", + OperationVerb: "tidy", + }, + + Fields: map[string]*framework.FieldSchema{ + "safety_buffer": { + Type: framework.TypeDurationSecond, + Default: 259200, + Description: `The amount of extra time that must have passed beyond the identity's +expiration, before it is removed from the backend storage.`, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathTidyIdentityAccessListUpdate, + }, + }, + + HelpSynopsis: pathTidyIdentityAccessListSyn, + HelpDescription: pathTidyIdentityAccessListDesc, + } +} + +// tidyAccessListIdentity is used to delete entries in the access list that are expired. +func (b *backend) tidyAccessListIdentity(ctx context.Context, req *logical.Request, safetyBuffer int) (*logical.Response, error) { + // If we are a performance standby forward the request to the active node + if b.System().ReplicationState().HasState(consts.ReplicationPerformanceStandby) { + return nil, logical.ErrReadOnly + } + + if !atomic.CompareAndSwapUint32(b.tidyAccessListCASGuard, 0, 1) { + resp := &logical.Response{} + resp.AddWarning("Tidy operation already in progress.") + return resp, nil + } + + s := req.Storage + + go func() { + defer atomic.StoreUint32(b.tidyAccessListCASGuard, 0) + + // Don't cancel when the original client request goes away + ctx = context.Background() + + logger := b.Logger().Named("wltidy") + + bufferDuration := time.Duration(safetyBuffer) * time.Second + + doTidy := func() error { + identities, err := s.List(ctx, identityAccessListStorage) + if err != nil { + return err + } + + for _, instanceID := range identities { + identityEntry, err := s.Get(ctx, identityAccessListStorage+instanceID) + if err != nil { + return fmt.Errorf("error fetching identity of instanceID %q: %w", instanceID, err) + } + + if identityEntry == nil { + return fmt.Errorf("identity entry for instanceID %q is nil", instanceID) + } + + if identityEntry.Value == nil || len(identityEntry.Value) == 0 { + return fmt.Errorf("found identity entry for instanceID %q but actual identity is empty", instanceID) + } + + var result accessListIdentity + if err := identityEntry.DecodeJSON(&result); err != nil { + return err + } + + if time.Now().After(result.ExpirationTime.Add(bufferDuration)) { + if err := s.Delete(ctx, identityAccessListStorage+instanceID); err != nil { + return fmt.Errorf("error deleting identity of instanceID %q from storage: %w", instanceID, err) + } + } + } + + return nil + } + + if err := doTidy(); err != nil { + logger.Error("error running access list tidy", "error", err) + return + } + }() + + resp := &logical.Response{} + resp.AddWarning("Tidy operation successfully started. Any information from the operation will be printed to Vault's server logs.") + return logical.RespondWithStatusCode(resp, req, http.StatusAccepted) +} + +// pathTidyIdentityAccessListUpdate is used to delete entries in the access list that are expired. +func (b *backend) pathTidyIdentityAccessListUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + return b.tidyAccessListIdentity(ctx, req, data.Get("safety_buffer").(int)) +} + +const pathTidyIdentityAccessListSyn = ` +Clean-up the access list instance identity entries. +` + +const pathTidyIdentityAccessListDesc = ` +When an instance identity is in the access list, the expiration time of the access list +entry is set based on the maximum 'max_ttl' value set on: the role, the role tag +and the backend's mount. + +When this endpoint is invoked, all the entries that are expired will be deleted. +A 'safety_buffer' (duration in seconds) can be provided, to ensure deletion of +only those entries that are expired before 'safety_buffer' seconds. +` diff --git a/builtin/credential/aws/path_tidy_roletag_denylist.go b/builtin/credential/aws/path_tidy_roletag_denylist.go new file mode 100644 index 0000000..ddd1f79 --- /dev/null +++ b/builtin/credential/aws/path_tidy_roletag_denylist.go @@ -0,0 +1,140 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package awsauth + +import ( + "context" + "fmt" + "net/http" + "sync/atomic" + "time" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + denyListRoletagStorage = "blacklist/roletag/" +) + +func (b *backend) pathTidyRoletagDenyList() *framework.Path { + return &framework.Path{ + Pattern: "tidy/roletag-denylist$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "role-tag-deny-list", + OperationVerb: "tidy", + }, + + Fields: map[string]*framework.FieldSchema{ + "safety_buffer": { + Type: framework.TypeDurationSecond, + Default: 259200, // 72h + Description: `The amount of extra time that must have passed beyond the roletag +expiration, before it is removed from the backend storage.`, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathTidyRoletagDenylistUpdate, + }, + }, + + HelpSynopsis: pathTidyRoletagDenylistSyn, + HelpDescription: pathTidyRoletagDenylistDesc, + } +} + +// tidyDenyListRoleTag is used to clean-up the entries in the role tag deny list. +func (b *backend) tidyDenyListRoleTag(ctx context.Context, req *logical.Request, safetyBuffer int) (*logical.Response, error) { + // If we are a performance standby forward the request to the active node + if b.System().ReplicationState().HasState(consts.ReplicationPerformanceStandby) { + return nil, logical.ErrReadOnly + } + + if !atomic.CompareAndSwapUint32(b.tidyDenyListCASGuard, 0, 1) { + resp := &logical.Response{} + resp.AddWarning("Tidy operation already in progress.") + return resp, nil + } + + s := req.Storage + + go func() { + defer atomic.StoreUint32(b.tidyDenyListCASGuard, 0) + + // Don't cancel when the original client request goes away + ctx = context.Background() + + logger := b.Logger().Named("bltidy") + + bufferDuration := time.Duration(safetyBuffer) * time.Second + + doTidy := func() error { + tags, err := s.List(ctx, denyListRoletagStorage) + if err != nil { + return err + } + + for _, tag := range tags { + tagEntry, err := s.Get(ctx, denyListRoletagStorage+tag) + if err != nil { + return fmt.Errorf("error fetching tag %q: %w", tag, err) + } + + if tagEntry == nil { + return fmt.Errorf("tag entry for tag %q is nil", tag) + } + + if tagEntry.Value == nil || len(tagEntry.Value) == 0 { + return fmt.Errorf("found entry for tag %q but actual tag is empty", tag) + } + + var result roleTagBlacklistEntry + if err := tagEntry.DecodeJSON(&result); err != nil { + return err + } + + if time.Now().After(result.ExpirationTime.Add(bufferDuration)) { + if err := s.Delete(ctx, denyListRoletagStorage+tag); err != nil { + return fmt.Errorf("error deleting tag %q from storage: %w", tag, err) + } + } + } + + return nil + } + + if err := doTidy(); err != nil { + logger.Error("error running deny list tidy", "error", err) + return + } + }() + + resp := &logical.Response{} + resp.AddWarning("Tidy operation successfully started. Any information from the operation will be printed to Vault's server logs.") + return logical.RespondWithStatusCode(resp, req, http.StatusAccepted) +} + +// pathTidyRoletagDenylistUpdate is used to clean-up the entries in the role tag deny list. +func (b *backend) pathTidyRoletagDenylistUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + return b.tidyDenyListRoleTag(ctx, req, data.Get("safety_buffer").(int)) +} + +const pathTidyRoletagDenylistSyn = ` +Clean-up the deny list role tag entries. +` + +const pathTidyRoletagDenylistDesc = ` +When a role tag is deny listed, the expiration time of the deny list entry is +set based on the maximum 'max_ttl' value set on: the role, the role tag and the +backend's mount. + +When this endpoint is invoked, all the entries that are expired will be deleted. +A 'safety_buffer' (duration in seconds) can be provided, to ensure deletion of +only those entries that are expired before 'safety_buffer' seconds. +` diff --git a/builtin/credential/aws/pkcs7/LICENSE b/builtin/credential/aws/pkcs7/LICENSE new file mode 100644 index 0000000..f2fc6ef --- /dev/null +++ b/builtin/credential/aws/pkcs7/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Andrew Smith + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/builtin/credential/aws/pkcs7/README.md b/builtin/credential/aws/pkcs7/README.md new file mode 100644 index 0000000..1831a15 --- /dev/null +++ b/builtin/credential/aws/pkcs7/README.md @@ -0,0 +1,5 @@ +# PKCS7 + +This code is used to verify PKCS7 signatures for the EC2 auth method. The code +was forked from [mozilla-services/pkcs7](https://github.com/mozilla-services/pkcs7) +and modified for Vault. \ No newline at end of file diff --git a/builtin/credential/aws/pkcs7/ber.go b/builtin/credential/aws/pkcs7/ber.go new file mode 100644 index 0000000..0b18a6c --- /dev/null +++ b/builtin/credential/aws/pkcs7/ber.go @@ -0,0 +1,271 @@ +package pkcs7 + +import ( + "bytes" + "errors" +) + +var encodeIndent = 0 + +type asn1Object interface { + EncodeTo(writer *bytes.Buffer) error +} + +type asn1Structured struct { + tagBytes []byte + content []asn1Object +} + +func (s asn1Structured) EncodeTo(out *bytes.Buffer) error { + // fmt.Printf("%s--> tag: % X\n", strings.Repeat("| ", encodeIndent), s.tagBytes) + encodeIndent++ + inner := new(bytes.Buffer) + for _, obj := range s.content { + err := obj.EncodeTo(inner) + if err != nil { + return err + } + } + encodeIndent-- + out.Write(s.tagBytes) + encodeLength(out, inner.Len()) + out.Write(inner.Bytes()) + return nil +} + +type asn1Primitive struct { + tagBytes []byte + length int + content []byte +} + +func (p asn1Primitive) EncodeTo(out *bytes.Buffer) error { + _, err := out.Write(p.tagBytes) + if err != nil { + return err + } + if err = encodeLength(out, p.length); err != nil { + return err + } + // fmt.Printf("%s--> tag: % X length: %d\n", strings.Repeat("| ", encodeIndent), p.tagBytes, p.length) + // fmt.Printf("%s--> content length: %d\n", strings.Repeat("| ", encodeIndent), len(p.content)) + out.Write(p.content) + + return nil +} + +func ber2der(ber []byte) ([]byte, error) { + if len(ber) == 0 { + return nil, errors.New("ber2der: input ber is empty") + } + // fmt.Printf("--> ber2der: Transcoding %d bytes\n", len(ber)) + out := new(bytes.Buffer) + + obj, _, err := readObject(ber, 0) + if err != nil { + return nil, err + } + obj.EncodeTo(out) + + // if offset < len(ber) { + // return nil, fmt.Errorf("ber2der: Content longer than expected. Got %d, expected %d", offset, len(ber)) + //} + + return out.Bytes(), nil +} + +// encodes lengths that are longer than 127 into string of bytes +func marshalLongLength(out *bytes.Buffer, i int) (err error) { + n := lengthLength(i) + + for ; n > 0; n-- { + err = out.WriteByte(byte(i >> uint((n-1)*8))) + if err != nil { + return + } + } + + return nil +} + +// computes the byte length of an encoded length value +func lengthLength(i int) (numBytes int) { + numBytes = 1 + for i > 255 { + numBytes++ + i >>= 8 + } + return +} + +// encodes the length in DER format +// If the length fits in 7 bits, the value is encoded directly. +// +// Otherwise, the number of bytes to encode the length is first determined. +// This number is likely to be 4 or less for a 32bit length. This number is +// added to 0x80. The length is encoded in big endian encoding follow after +// +// Examples: +// +// length | byte 1 | bytes n +// 0 | 0x00 | - +// 120 | 0x78 | - +// 200 | 0x81 | 0xC8 +// 500 | 0x82 | 0x01 0xF4 +func encodeLength(out *bytes.Buffer, length int) (err error) { + if length >= 128 { + l := lengthLength(length) + err = out.WriteByte(0x80 | byte(l)) + if err != nil { + return + } + err = marshalLongLength(out, length) + if err != nil { + return + } + } else { + err = out.WriteByte(byte(length)) + if err != nil { + return + } + } + return +} + +func readObject(ber []byte, offset int) (asn1Object, int, error) { + berLen := len(ber) + if offset >= berLen { + return nil, 0, errors.New("ber2der: offset is after end of ber data") + } + tagStart := offset + b := ber[offset] + offset++ + if offset >= berLen { + return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") + } + tag := b & 0x1F // last 5 bits + if tag == 0x1F { + tag = 0 + for ber[offset] >= 0x80 { + tag = tag*128 + ber[offset] - 0x80 + offset++ + if offset > berLen { + return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") + } + } + // jvehent 20170227: this doesn't appear to be used anywhere... + // tag = tag*128 + ber[offset] - 0x80 + offset++ + if offset > berLen { + return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") + } + } + tagEnd := offset + + kind := b & 0x20 + if kind == 0 { + debugprint("--> Primitive\n") + } else { + debugprint("--> Constructed\n") + } + // read length + var length int + l := ber[offset] + offset++ + if offset > berLen { + return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") + } + indefinite := false + if l > 0x80 { + numberOfBytes := (int)(l & 0x7F) + if numberOfBytes > 4 { // int is only guaranteed to be 32bit + return nil, 0, errors.New("ber2der: BER tag length too long") + } + if numberOfBytes == 4 && (int)(ber[offset]) > 0x7F { + return nil, 0, errors.New("ber2der: BER tag length is negative") + } + if (int)(ber[offset]) == 0x0 { + return nil, 0, errors.New("ber2der: BER tag length has leading zero") + } + debugprint("--> (compute length) indicator byte: %x\n", l) + debugprint("--> (compute length) length bytes: % X\n", ber[offset:offset+numberOfBytes]) + for i := 0; i < numberOfBytes; i++ { + length = length*256 + (int)(ber[offset]) + offset++ + if offset > berLen { + return nil, 0, errors.New("ber2der: cannot move offset forward, end of ber data reached") + } + } + } else if l == 0x80 { + indefinite = true + } else { + length = (int)(l) + } + if length < 0 { + return nil, 0, errors.New("ber2der: invalid negative value found in BER tag length") + } + // fmt.Printf("--> length : %d\n", length) + contentEnd := offset + length + if contentEnd > len(ber) { + return nil, 0, errors.New("ber2der: BER tag length is more than available data") + } + debugprint("--> content start : %d\n", offset) + debugprint("--> content end : %d\n", contentEnd) + debugprint("--> content : % X\n", ber[offset:contentEnd]) + var obj asn1Object + if indefinite && kind == 0 { + return nil, 0, errors.New("ber2der: Indefinite form tag must have constructed encoding") + } + if kind == 0 { + obj = asn1Primitive{ + tagBytes: ber[tagStart:tagEnd], + length: length, + content: ber[offset:contentEnd], + } + } else { + var subObjects []asn1Object + for (offset < contentEnd) || indefinite { + var subObj asn1Object + var err error + subObj, offset, err = readObject(ber, offset) + if err != nil { + return nil, 0, err + } + subObjects = append(subObjects, subObj) + + if indefinite { + terminated, err := isIndefiniteTermination(ber, offset) + if err != nil { + return nil, 0, err + } + + if terminated { + break + } + } + } + obj = asn1Structured{ + tagBytes: ber[tagStart:tagEnd], + content: subObjects, + } + } + + // Apply indefinite form length with 0x0000 terminator. + if indefinite { + contentEnd = offset + 2 + } + + return obj, contentEnd, nil +} + +func isIndefiniteTermination(ber []byte, offset int) (bool, error) { + if len(ber)-offset < 2 { + return false, errors.New("ber2der: Invalid BER format") + } + + return bytes.Index(ber[offset:], []byte{0x0, 0x0}) == 0, nil +} + +func debugprint(format string, a ...interface{}) { + // fmt.Printf(format, a) +} diff --git a/builtin/credential/aws/pkcs7/ber_test.go b/builtin/credential/aws/pkcs7/ber_test.go new file mode 100644 index 0000000..169c78a --- /dev/null +++ b/builtin/credential/aws/pkcs7/ber_test.go @@ -0,0 +1,150 @@ +package pkcs7 + +import ( + "bytes" + "encoding/asn1" + "encoding/pem" + "fmt" + "strings" + "testing" +) + +func TestBer2Der(t *testing.T) { + // indefinite length fixture + ber := []byte{0x30, 0x80, 0x02, 0x01, 0x01, 0x00, 0x00} + expected := []byte{0x30, 0x03, 0x02, 0x01, 0x01} + der, err := ber2der(ber) + if err != nil { + t.Fatalf("ber2der failed with error: %v", err) + } + if !bytes.Equal(der, expected) { + t.Errorf("ber2der result did not match.\n\tExpected: % X\n\tActual: % X", expected, der) + } + + if der2, err := ber2der(der); err != nil { + t.Errorf("ber2der on DER bytes failed with error: %v", err) + } else { + if !bytes.Equal(der, der2) { + t.Error("ber2der is not idempotent") + } + } + var thing struct { + Number int + } + rest, err := asn1.Unmarshal(der, &thing) + if err != nil { + t.Errorf("Cannot parse resulting DER because: %v", err) + } else if len(rest) > 0 { + t.Errorf("Resulting DER has trailing data: % X", rest) + } +} + +func TestBer2Der_Negatives(t *testing.T) { + fixtures := []struct { + Input []byte + ErrorContains string + }{ + {[]byte{0x30, 0x85}, "tag length too long"}, + {[]byte{0x30, 0x84, 0x80, 0x0, 0x0, 0x0}, "length is negative"}, + {[]byte{0x30, 0x82, 0x0, 0x1}, "length has leading zero"}, + {[]byte{0x30, 0x80, 0x1, 0x2, 0x1, 0x2}, "Invalid BER format"}, + {[]byte{0x30, 0x80, 0x1, 0x2}, "BER tag length is more than available data"}, + {[]byte{0x30, 0x03, 0x01, 0x02}, "length is more than available data"}, + {[]byte{0x30}, "end of ber data reached"}, + } + + for _, fixture := range fixtures { + _, err := ber2der(fixture.Input) + if err == nil { + t.Errorf("No error thrown. Expected: %s", fixture.ErrorContains) + } + if !strings.Contains(err.Error(), fixture.ErrorContains) { + t.Errorf("Unexpected error thrown.\n\tExpected: /%s/\n\tActual: %s", fixture.ErrorContains, err.Error()) + } + } +} + +func TestVerifyIndefiniteLengthBer(t *testing.T) { + decoded := mustDecodePEM([]byte(testPKCS7)) + + _, err := ber2der(decoded) + if err != nil { + t.Errorf("cannot parse indefinite length ber: %v", err) + } +} + +func mustDecodePEM(data []byte) []byte { + var block *pem.Block + block, rest := pem.Decode(data) + if len(rest) != 0 { + panic(fmt.Errorf("unexpected remaining PEM block during decode")) + } + return block.Bytes +} + +const testPKCS7 = ` +-----BEGIN PKCS7----- +MIAGCSqGSIb3DQEHAqCAMIACAQExDzANBglghkgBZQMEAgEFADCABgkqhkiG9w0B +BwGggCSABIIDfXsiQWdlbnRBY3Rpb25PdmVycmlkZXMiOnsiQWdlbnRPdmVycmlk +ZXMiOnsiRmlsZUV4aXN0c0JlaGF2aW9yIjoiT1ZFUldSSVRFIn19LCJBcHBsaWNh +dGlvbklkIjoiZTA0NDIzZTQtN2E2Ny00ZjljLWIyOTEtOTllNjNjMWMyMTU4Iiwi +QXBwbGljYXRpb25OYW1lIjoibWthbmlhLXhyZF9zYW0uY2R3c19lY2hvc2VydmVy +IiwiRGVwbG95bWVudENyZWF0b3IiOiJ1c2VyIiwiRGVwbG95bWVudEdyb3VwSWQi +OiJmYWI5MjEwZi1mNmM3LTQyODUtYWEyZC03Mzc2MGQ4ODE3NmEiLCJEZXBsb3lt +ZW50R3JvdXBOYW1lIjoibWthbmlhLXhyZF9zYW0uY2R3c19lY2hvc2VydmVyX2Rn +IiwiRGVwbG95bWVudElkIjoiZC1UREUxVTNXREEiLCJEZXBsb3ltZW50VHlwZSI6 +IklOX1BMQUNFIiwiR2l0SHViQWNjZXNzVG9rZW4iOm51bGwsIkluc3RhbmNlR3Jv +dXBJZCI6ImZhYjkyMTBmLWY2YzctNDI4NS1hYTJkLTczNzYwZDg4MTc2YSIsIlJl +dmlzaW9uIjp7IkFwcFNwZWNDb250ZW50IjpudWxsLCJDb2RlQ29tbWl0UmV2aXNp +b24iOm51bGwsIkdpdEh1YlJldmlzaW9uIjpudWxsLCJHaXRSZXZpc2lvbiI6bnVs +bCwiUmV2aXNpb25UeXBlIjoiUzMiLCJTM1JldmlzaW9uIjp7IkJ1Y2tldCI6Im1r +YW5pYS1jZHdzLWRlcGxveS1idWNrZXQiLCJCdW5kbGVUeXBlIjoiemlwIiwiRVRh +ZyI6bnVsbCwiS2V5IjoieHJkOjpzYW0uY2R3czo6ZWNob3NlcnZlcjo6MTo6Lnpp +cCIsIlZlcnNpb24iOm51bGx9fSwiUzNSZXZpc2lvbiI6eyJCdWNrZXQiOiJta2Fu +aWEtY2R3cy1kZXBsb3ktYnVja2V0IiwiQnVuZGxlVHlwZSI6InppcCIsIkVUYWci +Om51bGwsIktleSI6InhyZDo6c2FtLmNkd3M6OmVjaG9zZXJ2ZXI6OjE6Oi56aXAi +LCJWZXJzaW9uIjpudWxsfSwiVGFyZ2V0UmV2aXNpb24iOm51bGx9AAAAAAAAoIAw +ggWbMIIEg6ADAgECAhAGrjFMK45t2jcNHtjY1DjEMA0GCSqGSIb3DQEBCwUAMEYx +CzAJBgNVBAYTAlVTMQ8wDQYDVQQKEwZBbWF6b24xFTATBgNVBAsTDFNlcnZlciBD +QSAxQjEPMA0GA1UEAxMGQW1hem9uMB4XDTIwMTExMjAwMDAwMFoXDTIxMTAxNTIz +NTk1OVowNDEyMDAGA1UEAxMpY29kZWRlcGxveS1zaWduZXItdXMtZWFzdC0yLmFt +YXpvbmF3cy5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDit4f+ +I4BSv4rBV/8bJ+f4KqBwTCt9iJeau/r9liQfMgj/C1M2E+aa++u8BtY/LQstB44v +v6KqcaiOyWpkD9OsUty9qb4eNTPF2Y4jpNsi/Hfw0phsd9gLun2foppILmL4lZIG +lBhTeEwv6qV4KbyXOG9abHOX32+jVFtM1rbzHNFvz90ysfZp16TBAi7IRKEZeXvd +MvlJJMAJtAoblxiDIS3A1csY1G4XHYET8xIoCop3mqEZEtAxUUP2epdXXdhD2U0G +7alSRS54o91QW1Dp3A13lu1A1nds9CkWlPkDTpKSUG/qN5y5+6dCCGaydgL5krMs +R79bCrR1sEKm5hi1AgMBAAGjggKVMIICkTAfBgNVHSMEGDAWgBRZpGYGUqB7lZI8 +o5QHJ5Z0W/k90DAdBgNVHQ4EFgQUPF5qTbnTDYhmp7tGmmL/jTmLoHMwNAYDVR0R +BC0wK4IpY29kZWRlcGxveS1zaWduZXItdXMtZWFzdC0yLmFtYXpvbmF3cy5jb20w +DgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjA7 +BgNVHR8ENDAyMDCgLqAshipodHRwOi8vY3JsLnNjYTFiLmFtYXpvbnRydXN0LmNv +bS9zY2ExYi5jcmwwIAYDVR0gBBkwFzALBglghkgBhv1sAQIwCAYGZ4EMAQIBMHUG +CCsGAQUFBwEBBGkwZzAtBggrBgEFBQcwAYYhaHR0cDovL29jc3Auc2NhMWIuYW1h +em9udHJ1c3QuY29tMDYGCCsGAQUFBzAChipodHRwOi8vY3J0LnNjYTFiLmFtYXpv +bnRydXN0LmNvbS9zY2ExYi5jcnQwDAYDVR0TAQH/BAIwADCCAQQGCisGAQQB1nkC +BAIEgfUEgfIA8AB2APZclC/RdzAiFFQYCDCUVo7jTRMZM7/fDC8gC8xO8WTjAAAB +dboejIcAAAQDAEcwRQIgeqoKXbST17TCEzM1BMWx/jjyVQVBIN3LG17U4OaV364C +IQDPUSJZhJm7uqGea6+VwqeDe/vGuGSuJzkDwTIOeIXPaAB2AFzcQ5L+5qtFRLFe +mtRW5hA3+9X6R9yhc5SyXub2xw7KAAABdboejNQAAAQDAEcwRQIgEKIAwwhjUcq2 +iwzBAagdy+fTiKnBY1Yjf6wOeRpwXfMCIQC8wM3nxiWrGgIpdzzgDvFhZZTV3N81 +JWcYAu+srIVOhTANBgkqhkiG9w0BAQsFAAOCAQEAer9kml53XFy4ZSVzCbdsIFYP +Ohu7LDf5iffHBVZFnGOEVOmiPYYkNwi9R6EHIYaAs7G7GGLCp/6tdc+G4eF1j6wB +IkmXZcxMTxk/87R+S+36yDLg1GBZvqttLfexj0TRVAfVLJc7FjLXAW2+wi7YyNe8 +X17lWBwHxa1r5KgweJshGzYVUsgMTSx0aJ+93ZnqplBp9x+9DSQNqqNlBgxFANxs +ux+dfpduyLd8VLqtlECGC07tYE4mBaAjMiNjCZRWMp8ya/Z6J/bJZ27IDGA4dXzm +l9NNnlbuUDAenAByUqE+0b78J6EmmdAVf+N8siriMg02FdP3lAXJLE8tDeZp8AAA +MYICIDCCAhwCAQEwWjBGMQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRUw +EwYDVQQLEwxTZXJ2ZXIgQ0EgMUIxDzANBgNVBAMTBkFtYXpvbgIQBq4xTCuObdo3 +DR7Y2NQ4xDANBglghkgBZQMEAgEFAKCBmDAYBgkqhkiG9w0BCQMxCwYJKoZIhvcN +AQcBMBwGCSqGSIb3DQEJBTEPFw0yMTA2MjQxOTU1MzFaMC0GCSqGSIb3DQEJNDEg +MB4wDQYJYIZIAWUDBAIBBQChDQYJKoZIhvcNAQELBQAwLwYJKoZIhvcNAQkEMSIE +IP7gMuT2H0/AhgPgj3Eo0NWCIdQOBjJO18coNKIaOnJYMA0GCSqGSIb3DQEBCwUA +BIIBAJX+e87q0YvRon9/ENTvE0FoYMzYblID2Reek6L217ZlZ6pUuRsc4ghhJ5Yh +WZeOCaLwi4mrnQ5/+DGKkJ4a/w5sqFTwtJIGIIAuDCn/uDm8kIDUVkbeznSOLoPA +67cxiqgIdqZ5pqUoid2YsDj20owrGDG4wUF6ZvhM9g/5va3CAhxqvTE2HwjhHTfz +Cgl8Nlvalz7YxXEf2clFEiEVa1fVaGMl9pCyedAmTfd6hoivcpAsopvXfVaaaR2y +iuZidpUfFhSk+Ls7TU/kB74ckfUGj5q/5HcKJgb/S+FYUV7eu0ewzTyW1uRl/d0U +Tb7e7EjgDGJsjOTMdTrMfv8ho8kAAAAAAAA= +-----END PKCS7----- +` diff --git a/builtin/credential/aws/pkcs7/decrypt.go b/builtin/credential/aws/pkcs7/decrypt.go new file mode 100644 index 0000000..acedb1e --- /dev/null +++ b/builtin/credential/aws/pkcs7/decrypt.go @@ -0,0 +1,176 @@ +package pkcs7 + +import ( + "bytes" + "crypto" + "crypto/aes" + "crypto/cipher" + "crypto/des" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/asn1" + "errors" + "fmt" +) + +// ErrUnsupportedAlgorithm tells you when our quick dev assumptions have failed +var ErrUnsupportedAlgorithm = errors.New("pkcs7: cannot decrypt data: only RSA, DES, DES-EDE3, AES-256-CBC and AES-128-GCM supported") + +// ErrNotEncryptedContent is returned when attempting to Decrypt data that is not encrypted data +var ErrNotEncryptedContent = errors.New("pkcs7: content data is a decryptable data type") + +// Decrypt decrypts encrypted content info for recipient cert and private key +func (p7 *PKCS7) Decrypt(cert *x509.Certificate, pkey crypto.PrivateKey) ([]byte, error) { + data, ok := p7.raw.(envelopedData) + if !ok { + return nil, ErrNotEncryptedContent + } + recipient := selectRecipientForCertificate(data.RecipientInfos, cert) + if recipient.EncryptedKey == nil { + return nil, errors.New("pkcs7: no enveloped recipient for provided certificate") + } + switch pkey := pkey.(type) { + case *rsa.PrivateKey: + var contentKey []byte + contentKey, err := rsa.DecryptPKCS1v15(rand.Reader, pkey, recipient.EncryptedKey) + if err != nil { + return nil, err + } + return data.EncryptedContentInfo.decrypt(contentKey) + } + return nil, ErrUnsupportedAlgorithm +} + +// DecryptUsingPSK decrypts encrypted data using caller provided +// pre-shared secret +func (p7 *PKCS7) DecryptUsingPSK(key []byte) ([]byte, error) { + data, ok := p7.raw.(encryptedData) + if !ok { + return nil, ErrNotEncryptedContent + } + return data.EncryptedContentInfo.decrypt(key) +} + +func (eci encryptedContentInfo) decrypt(key []byte) ([]byte, error) { + alg := eci.ContentEncryptionAlgorithm.Algorithm + if !alg.Equal(OIDEncryptionAlgorithmDESCBC) && + !alg.Equal(OIDEncryptionAlgorithmDESEDE3CBC) && + !alg.Equal(OIDEncryptionAlgorithmAES256CBC) && + !alg.Equal(OIDEncryptionAlgorithmAES128CBC) && + !alg.Equal(OIDEncryptionAlgorithmAES128GCM) && + !alg.Equal(OIDEncryptionAlgorithmAES256GCM) { + return nil, ErrUnsupportedAlgorithm + } + + // EncryptedContent can either be constructed of multple OCTET STRINGs + // or _be_ a tagged OCTET STRING + var cyphertext []byte + if eci.EncryptedContent.IsCompound { + // Complex case to concat all of the children OCTET STRINGs + var buf bytes.Buffer + cypherbytes := eci.EncryptedContent.Bytes + for { + var part []byte + cypherbytes, _ = asn1.Unmarshal(cypherbytes, &part) + buf.Write(part) + if cypherbytes == nil { + break + } + } + cyphertext = buf.Bytes() + } else { + // Simple case, the bytes _are_ the cyphertext + cyphertext = eci.EncryptedContent.Bytes + } + + var block cipher.Block + var err error + + switch { + case alg.Equal(OIDEncryptionAlgorithmDESCBC): + block, err = des.NewCipher(key) + case alg.Equal(OIDEncryptionAlgorithmDESEDE3CBC): + block, err = des.NewTripleDESCipher(key) + case alg.Equal(OIDEncryptionAlgorithmAES256CBC), alg.Equal(OIDEncryptionAlgorithmAES256GCM): + fallthrough + case alg.Equal(OIDEncryptionAlgorithmAES128GCM), alg.Equal(OIDEncryptionAlgorithmAES128CBC): + block, err = aes.NewCipher(key) + } + + if err != nil { + return nil, err + } + + if alg.Equal(OIDEncryptionAlgorithmAES128GCM) || alg.Equal(OIDEncryptionAlgorithmAES256GCM) { + params := aesGCMParameters{} + paramBytes := eci.ContentEncryptionAlgorithm.Parameters.Bytes + + _, err := asn1.Unmarshal(paramBytes, ¶ms) + if err != nil { + return nil, err + } + + gcm, err := cipher.NewGCM(block) + if err != nil { + return nil, err + } + + if len(params.Nonce) != gcm.NonceSize() { + return nil, errors.New("pkcs7: encryption algorithm parameters are incorrect") + } + if params.ICVLen != gcm.Overhead() { + return nil, errors.New("pkcs7: encryption algorithm parameters are incorrect") + } + + plaintext, err := gcm.Open(nil, params.Nonce, cyphertext, nil) + if err != nil { + return nil, err + } + + return plaintext, nil + } + + iv := eci.ContentEncryptionAlgorithm.Parameters.Bytes + if len(iv) != block.BlockSize() { + return nil, errors.New("pkcs7: encryption algorithm parameters are malformed") + } + mode := cipher.NewCBCDecrypter(block, iv) + plaintext := make([]byte, len(cyphertext)) + mode.CryptBlocks(plaintext, cyphertext) + if plaintext, err = unpad(plaintext, mode.BlockSize()); err != nil { + return nil, err + } + return plaintext, nil +} + +func unpad(data []byte, blocklen int) ([]byte, error) { + if blocklen < 1 { + return nil, fmt.Errorf("invalid blocklen %d", blocklen) + } + if len(data)%blocklen != 0 || len(data) == 0 { + return nil, fmt.Errorf("invalid data len %d", len(data)) + } + + // the last byte is the length of padding + padlen := int(data[len(data)-1]) + + // check padding integrity, all bytes should be the same + pad := data[len(data)-padlen:] + for _, padbyte := range pad { + if padbyte != byte(padlen) { + return nil, errors.New("invalid padding") + } + } + + return data[:len(data)-padlen], nil +} + +func selectRecipientForCertificate(recipients []recipientInfo, cert *x509.Certificate) recipientInfo { + for _, recp := range recipients { + if isCertMatchForIssuerAndSerial(cert, recp.IssuerAndSerialNumber) { + return recp + } + } + return recipientInfo{} +} diff --git a/builtin/credential/aws/pkcs7/decrypt_test.go b/builtin/credential/aws/pkcs7/decrypt_test.go new file mode 100644 index 0000000..f8d1592 --- /dev/null +++ b/builtin/credential/aws/pkcs7/decrypt_test.go @@ -0,0 +1,61 @@ +package pkcs7 + +import ( + "bytes" + "testing" +) + +func TestDecrypt(t *testing.T) { + fixture := UnmarshalTestFixture(EncryptedTestFixture) + p7, err := Parse(fixture.Input) + if err != nil { + t.Fatal(err) + } + content, err := p7.Decrypt(fixture.Certificate, fixture.PrivateKey) + if err != nil { + t.Errorf("Cannot Decrypt with error: %v", err) + } + expected := []byte("This is a test") + if !bytes.Equal(content, expected) { + t.Errorf("Decrypted result does not match.\n\tExpected:%s\n\tActual:%s", expected, content) + } +} + +// echo -n "This is a test" > test.txt +// openssl cms -encrypt -in test.txt cert.pem +var EncryptedTestFixture = ` +-----BEGIN PKCS7----- +MIIBGgYJKoZIhvcNAQcDoIIBCzCCAQcCAQAxgcwwgckCAQAwMjApMRAwDgYDVQQK +EwdBY21lIENvMRUwEwYDVQQDEwxFZGRhcmQgU3RhcmsCBQDL+CvWMA0GCSqGSIb3 +DQEBAQUABIGAyFz7bfI2noUs4FpmYfztm1pVjGyB00p9x0H3gGHEYNXdqlq8VG8d +iq36poWtEkatnwsOlURWZYECSi0g5IAL0U9sj82EN0xssZNaK0S5FTGnB3DPvYgt +HJvcKq7YvNLKMh4oqd17C6GB4oXyEBDj0vZnL7SUoCAOAWELPeC8CTUwMwYJKoZI +hvcNAQcBMBQGCCqGSIb3DQMHBAhEowTkot3a7oAQFD//J/IhFnk+JbkH7HZQFA== +-----END PKCS7----- +-----BEGIN CERTIFICATE----- +MIIB1jCCAUGgAwIBAgIFAMv4K9YwCwYJKoZIhvcNAQELMCkxEDAOBgNVBAoTB0Fj +bWUgQ28xFTATBgNVBAMTDEVkZGFyZCBTdGFyazAeFw0xNTA1MDYwMzU2NDBaFw0x +NjA1MDYwMzU2NDBaMCUxEDAOBgNVBAoTB0FjbWUgQ28xETAPBgNVBAMTCEpvbiBT +bm93MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDK6NU0R0eiCYVquU4RcjKc +LzGfx0aa1lMr2TnLQUSeLFZHFxsyyMXXuMPig3HK4A7SGFHupO+/1H/sL4xpH5zg +8+Zg2r8xnnney7abxcuv0uATWSIeKlNnb1ZO1BAxFnESc3GtyOCr2dUwZHX5mRVP ++Zxp2ni5qHNraf3wE2VPIQIDAQABoxIwEDAOBgNVHQ8BAf8EBAMCAKAwCwYJKoZI +hvcNAQELA4GBAIr2F7wsqmEU/J/kLyrCgEVXgaV/sKZq4pPNnzS0tBYk8fkV3V18 +sBJyHKRLL/wFZASvzDcVGCplXyMdAOCyfd8jO3F9Ac/xdlz10RrHJT75hNu3a7/n +9KNwKhfN4A1CQv2x372oGjRhCW5bHNCWx4PIVeNzCyq/KZhyY9sxHE6f +-----END CERTIFICATE----- +-----BEGIN PRIVATE KEY----- +MIICXgIBAAKBgQDK6NU0R0eiCYVquU4RcjKcLzGfx0aa1lMr2TnLQUSeLFZHFxsy +yMXXuMPig3HK4A7SGFHupO+/1H/sL4xpH5zg8+Zg2r8xnnney7abxcuv0uATWSIe +KlNnb1ZO1BAxFnESc3GtyOCr2dUwZHX5mRVP+Zxp2ni5qHNraf3wE2VPIQIDAQAB +AoGBALyvnSt7KUquDen7nXQtvJBudnf9KFPt//OjkdHHxNZNpoF/JCSqfQeoYkeu +MdAVYNLQGMiRifzZz4dDhA9xfUAuy7lcGQcMCxEQ1dwwuFaYkawbS0Tvy2PFlq2d +H5/HeDXU4EDJ3BZg0eYj2Bnkt1sJI35UKQSxblQ0MY2q0uFBAkEA5MMOogkgUx1C +67S1tFqMUSM8D0mZB0O5vOJZC5Gtt2Urju6vywge2ArExWRXlM2qGl8afFy2SgSv +Xk5eybcEiQJBAOMRwwbEoW5NYHuFFbSJyWll4n71CYuWuQOCzehDPyTb80WFZGLV +i91kFIjeERyq88eDE5xVB3ZuRiXqaShO/9kCQQCKOEkpInaDgZSjskZvuJ47kByD +6CYsO4GIXQMMeHML8ncFH7bb6AYq5ybJVb2NTU7QLFJmfeYuhvIm+xdOreRxAkEA +o5FC5Jg2FUfFzZSDmyZ6IONUsdF/i78KDV5nRv1R+hI6/oRlWNCtTNBv/lvBBd6b +dseUE9QoaQZsn5lpILEvmQJAZ0B+Or1rAYjnbjnUhdVZoy9kC4Zov+4UH3N/BtSy +KJRWUR0wTWfZBPZ5hAYZjTBEAFULaYCXlQKsODSp0M1aQA== +-----END PRIVATE KEY-----` diff --git a/builtin/credential/aws/pkcs7/encrypt.go b/builtin/credential/aws/pkcs7/encrypt.go new file mode 100644 index 0000000..90da67e --- /dev/null +++ b/builtin/credential/aws/pkcs7/encrypt.go @@ -0,0 +1,399 @@ +package pkcs7 + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "crypto/des" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" +) + +type envelopedData struct { + Version int + RecipientInfos []recipientInfo `asn1:"set"` + EncryptedContentInfo encryptedContentInfo +} + +type encryptedData struct { + Version int + EncryptedContentInfo encryptedContentInfo +} + +type recipientInfo struct { + Version int + IssuerAndSerialNumber issuerAndSerial + KeyEncryptionAlgorithm pkix.AlgorithmIdentifier + EncryptedKey []byte +} + +type encryptedContentInfo struct { + ContentType asn1.ObjectIdentifier + ContentEncryptionAlgorithm pkix.AlgorithmIdentifier + EncryptedContent asn1.RawValue `asn1:"tag:0,optional"` +} + +const ( + // EncryptionAlgorithmDESCBC is the DES CBC encryption algorithm + EncryptionAlgorithmDESCBC = iota + + // EncryptionAlgorithmAES128CBC is the AES 128 bits with CBC encryption algorithm + // Avoid this algorithm unless required for interoperability; use AES GCM instead. + EncryptionAlgorithmAES128CBC + + // EncryptionAlgorithmAES256CBC is the AES 256 bits with CBC encryption algorithm + // Avoid this algorithm unless required for interoperability; use AES GCM instead. + EncryptionAlgorithmAES256CBC + + // EncryptionAlgorithmAES128GCM is the AES 128 bits with GCM encryption algorithm + EncryptionAlgorithmAES128GCM + + // EncryptionAlgorithmAES256GCM is the AES 256 bits with GCM encryption algorithm + EncryptionAlgorithmAES256GCM +) + +// ContentEncryptionAlgorithm determines the algorithm used to encrypt the +// plaintext message. Change the value of this variable to change which +// algorithm is used in the Encrypt() function. +var ContentEncryptionAlgorithm = EncryptionAlgorithmDESCBC + +// ErrUnsupportedEncryptionAlgorithm is returned when attempting to encrypt +// content with an unsupported algorithm. +var ErrUnsupportedEncryptionAlgorithm = errors.New("pkcs7: cannot encrypt content: only DES-CBC, AES-CBC, and AES-GCM supported") + +// ErrPSKNotProvided is returned when attempting to encrypt +// using a PSK without actually providing the PSK. +var ErrPSKNotProvided = errors.New("pkcs7: cannot encrypt content: PSK not provided") + +const nonceSize = 12 + +type aesGCMParameters struct { + Nonce []byte `asn1:"tag:4"` + ICVLen int +} + +func encryptAESGCM(content []byte, key []byte) ([]byte, *encryptedContentInfo, error) { + var keyLen int + var algID asn1.ObjectIdentifier + switch ContentEncryptionAlgorithm { + case EncryptionAlgorithmAES128GCM: + keyLen = 16 + algID = OIDEncryptionAlgorithmAES128GCM + case EncryptionAlgorithmAES256GCM: + keyLen = 32 + algID = OIDEncryptionAlgorithmAES256GCM + default: + return nil, nil, fmt.Errorf("invalid ContentEncryptionAlgorithm in encryptAESGCM: %d", ContentEncryptionAlgorithm) + } + if key == nil { + // Create AES key + key = make([]byte, keyLen) + + _, err := rand.Read(key) + if err != nil { + return nil, nil, err + } + } + + // Create nonce + nonce := make([]byte, nonceSize) + + _, err := rand.Read(nonce) + if err != nil { + return nil, nil, err + } + + // Encrypt content + block, err := aes.NewCipher(key) + if err != nil { + return nil, nil, err + } + + gcm, err := cipher.NewGCM(block) + if err != nil { + return nil, nil, err + } + + ciphertext := gcm.Seal(nil, nonce, content, nil) + + // Prepare ASN.1 Encrypted Content Info + paramSeq := aesGCMParameters{ + Nonce: nonce, + ICVLen: gcm.Overhead(), + } + + paramBytes, err := asn1.Marshal(paramSeq) + if err != nil { + return nil, nil, err + } + + eci := encryptedContentInfo{ + ContentType: OIDData, + ContentEncryptionAlgorithm: pkix.AlgorithmIdentifier{ + Algorithm: algID, + Parameters: asn1.RawValue{ + Tag: asn1.TagSequence, + Bytes: paramBytes, + }, + }, + EncryptedContent: marshalEncryptedContent(ciphertext), + } + + return key, &eci, nil +} + +func encryptDESCBC(content []byte, key []byte) ([]byte, *encryptedContentInfo, error) { + if key == nil { + // Create DES key + key = make([]byte, 8) + + _, err := rand.Read(key) + if err != nil { + return nil, nil, err + } + } + + // Create CBC IV + iv := make([]byte, des.BlockSize) + _, err := rand.Read(iv) + if err != nil { + return nil, nil, err + } + + // Encrypt padded content + block, err := des.NewCipher(key) + if err != nil { + return nil, nil, err + } + mode := cipher.NewCBCEncrypter(block, iv) + plaintext, err := pad(content, mode.BlockSize()) + if err != nil { + return nil, nil, err + } + cyphertext := make([]byte, len(plaintext)) + mode.CryptBlocks(cyphertext, plaintext) + + // Prepare ASN.1 Encrypted Content Info + eci := encryptedContentInfo{ + ContentType: OIDData, + ContentEncryptionAlgorithm: pkix.AlgorithmIdentifier{ + Algorithm: OIDEncryptionAlgorithmDESCBC, + Parameters: asn1.RawValue{Tag: 4, Bytes: iv}, + }, + EncryptedContent: marshalEncryptedContent(cyphertext), + } + + return key, &eci, nil +} + +func encryptAESCBC(content []byte, key []byte) ([]byte, *encryptedContentInfo, error) { + var keyLen int + var algID asn1.ObjectIdentifier + switch ContentEncryptionAlgorithm { + case EncryptionAlgorithmAES128CBC: + keyLen = 16 + algID = OIDEncryptionAlgorithmAES128CBC + case EncryptionAlgorithmAES256CBC: + keyLen = 32 + algID = OIDEncryptionAlgorithmAES256CBC + default: + return nil, nil, fmt.Errorf("invalid ContentEncryptionAlgorithm in encryptAESCBC: %d", ContentEncryptionAlgorithm) + } + + if key == nil { + // Create AES key + key = make([]byte, keyLen) + + _, err := rand.Read(key) + if err != nil { + return nil, nil, err + } + } + + // Create CBC IV + iv := make([]byte, aes.BlockSize) + _, err := rand.Read(iv) + if err != nil { + return nil, nil, err + } + + // Encrypt padded content + block, err := aes.NewCipher(key) + if err != nil { + return nil, nil, err + } + mode := cipher.NewCBCEncrypter(block, iv) + plaintext, err := pad(content, mode.BlockSize()) + if err != nil { + return nil, nil, err + } + cyphertext := make([]byte, len(plaintext)) + mode.CryptBlocks(cyphertext, plaintext) + + // Prepare ASN.1 Encrypted Content Info + eci := encryptedContentInfo{ + ContentType: OIDData, + ContentEncryptionAlgorithm: pkix.AlgorithmIdentifier{ + Algorithm: algID, + Parameters: asn1.RawValue{Tag: 4, Bytes: iv}, + }, + EncryptedContent: marshalEncryptedContent(cyphertext), + } + + return key, &eci, nil +} + +// Encrypt creates and returns an envelope data PKCS7 structure with encrypted +// recipient keys for each recipient public key. +// +// The algorithm used to perform encryption is determined by the current value +// of the global ContentEncryptionAlgorithm package variable. By default, the +// value is EncryptionAlgorithmDESCBC. To use a different algorithm, change the +// value before calling Encrypt(). For example: +// +// ContentEncryptionAlgorithm = EncryptionAlgorithmAES128GCM +// +// TODO(fullsailor): Add support for encrypting content with other algorithms +func Encrypt(content []byte, recipients []*x509.Certificate) ([]byte, error) { + var eci *encryptedContentInfo + var key []byte + var err error + + // Apply chosen symmetric encryption method + switch ContentEncryptionAlgorithm { + case EncryptionAlgorithmDESCBC: + key, eci, err = encryptDESCBC(content, nil) + case EncryptionAlgorithmAES128CBC: + fallthrough + case EncryptionAlgorithmAES256CBC: + key, eci, err = encryptAESCBC(content, nil) + case EncryptionAlgorithmAES128GCM: + fallthrough + case EncryptionAlgorithmAES256GCM: + key, eci, err = encryptAESGCM(content, nil) + + default: + return nil, ErrUnsupportedEncryptionAlgorithm + } + + if err != nil { + return nil, err + } + + // Prepare each recipient's encrypted cipher key + recipientInfos := make([]recipientInfo, len(recipients)) + for i, recipient := range recipients { + encrypted, err := encryptKey(key, recipient) + if err != nil { + return nil, err + } + ias, err := cert2issuerAndSerial(recipient) + if err != nil { + return nil, err + } + info := recipientInfo{ + Version: 0, + IssuerAndSerialNumber: ias, + KeyEncryptionAlgorithm: pkix.AlgorithmIdentifier{ + Algorithm: OIDEncryptionAlgorithmRSA, + }, + EncryptedKey: encrypted, + } + recipientInfos[i] = info + } + + // Prepare envelope content + envelope := envelopedData{ + EncryptedContentInfo: *eci, + Version: 0, + RecipientInfos: recipientInfos, + } + innerContent, err := asn1.Marshal(envelope) + if err != nil { + return nil, err + } + + // Prepare outer payload structure + wrapper := contentInfo{ + ContentType: OIDEnvelopedData, + Content: asn1.RawValue{Class: 2, Tag: 0, IsCompound: true, Bytes: innerContent}, + } + + return asn1.Marshal(wrapper) +} + +// EncryptUsingPSK creates and returns an encrypted data PKCS7 structure, +// encrypted using caller provided pre-shared secret. +func EncryptUsingPSK(content []byte, key []byte) ([]byte, error) { + var eci *encryptedContentInfo + var err error + + if key == nil { + return nil, ErrPSKNotProvided + } + + // Apply chosen symmetric encryption method + switch ContentEncryptionAlgorithm { + case EncryptionAlgorithmDESCBC: + _, eci, err = encryptDESCBC(content, key) + + case EncryptionAlgorithmAES128GCM: + fallthrough + case EncryptionAlgorithmAES256GCM: + _, eci, err = encryptAESGCM(content, key) + + default: + return nil, ErrUnsupportedEncryptionAlgorithm + } + + if err != nil { + return nil, err + } + + // Prepare encrypted-data content + ed := encryptedData{ + Version: 0, + EncryptedContentInfo: *eci, + } + innerContent, err := asn1.Marshal(ed) + if err != nil { + return nil, err + } + + // Prepare outer payload structure + wrapper := contentInfo{ + ContentType: OIDEncryptedData, + Content: asn1.RawValue{Class: 2, Tag: 0, IsCompound: true, Bytes: innerContent}, + } + + return asn1.Marshal(wrapper) +} + +func marshalEncryptedContent(content []byte) asn1.RawValue { + asn1Content, _ := asn1.Marshal(content) + return asn1.RawValue{Tag: 0, Class: 2, Bytes: asn1Content, IsCompound: true} +} + +func encryptKey(key []byte, recipient *x509.Certificate) ([]byte, error) { + if pub := recipient.PublicKey.(*rsa.PublicKey); pub != nil { + return rsa.EncryptPKCS1v15(rand.Reader, pub, key) + } + return nil, ErrUnsupportedAlgorithm +} + +func pad(data []byte, blocklen int) ([]byte, error) { + if blocklen < 1 { + return nil, fmt.Errorf("invalid blocklen %d", blocklen) + } + padlen := blocklen - (len(data) % blocklen) + if padlen == 0 { + padlen = blocklen + } + pad := bytes.Repeat([]byte{byte(padlen)}, padlen) + return append(data, pad...), nil +} diff --git a/builtin/credential/aws/pkcs7/encrypt_test.go b/builtin/credential/aws/pkcs7/encrypt_test.go new file mode 100644 index 0000000..7f1bead --- /dev/null +++ b/builtin/credential/aws/pkcs7/encrypt_test.go @@ -0,0 +1,101 @@ +package pkcs7 + +import ( + "bytes" + "crypto/x509" + "testing" +) + +func TestEncrypt(t *testing.T) { + modes := []int{ + EncryptionAlgorithmDESCBC, + EncryptionAlgorithmAES128CBC, + EncryptionAlgorithmAES256CBC, + EncryptionAlgorithmAES128GCM, + EncryptionAlgorithmAES256GCM, + } + sigalgs := []x509.SignatureAlgorithm{ + x509.SHA256WithRSA, + x509.SHA512WithRSA, + } + for _, mode := range modes { + for _, sigalg := range sigalgs { + ContentEncryptionAlgorithm = mode + + plaintext := []byte("Hello Secret World!") + cert, err := createTestCertificate(sigalg) + if err != nil { + t.Fatal(err) + } + encrypted, err := Encrypt(plaintext, []*x509.Certificate{cert.Certificate}) + if err != nil { + t.Fatal(err) + } + p7, err := Parse(encrypted) + if err != nil { + t.Fatalf("cannot Parse encrypted result: %s", err) + } + result, err := p7.Decrypt(cert.Certificate, *cert.PrivateKey) + if err != nil { + t.Fatalf("cannot Decrypt encrypted result: %s", err) + } + if !bytes.Equal(plaintext, result) { + t.Errorf("encrypted data does not match plaintext:\n\tExpected: %s\n\tActual: %s", plaintext, result) + } + } + } +} + +func TestEncryptUsingPSK(t *testing.T) { + modes := []int{ + EncryptionAlgorithmDESCBC, + EncryptionAlgorithmAES128GCM, + } + + for _, mode := range modes { + ContentEncryptionAlgorithm = mode + plaintext := []byte("Hello Secret World!") + var key []byte + + switch mode { + case EncryptionAlgorithmDESCBC: + key = []byte("64BitKey") + case EncryptionAlgorithmAES128GCM: + key = []byte("128BitKey4AESGCM") + } + ciphertext, err := EncryptUsingPSK(plaintext, key) + if err != nil { + t.Fatal(err) + } + + p7, _ := Parse(ciphertext) + result, err := p7.DecryptUsingPSK(key) + if err != nil { + t.Fatalf("cannot Decrypt encrypted result: %s", err) + } + if !bytes.Equal(plaintext, result) { + t.Errorf("encrypted data does not match plaintext:\n\tExpected: %s\n\tActual: %s", plaintext, result) + } + } +} + +func TestPad(t *testing.T) { + tests := []struct { + Original []byte + Expected []byte + BlockSize int + }{ + {[]byte{0x1, 0x2, 0x3, 0x10}, []byte{0x1, 0x2, 0x3, 0x10, 0x4, 0x4, 0x4, 0x4}, 8}, + {[]byte{0x1, 0x2, 0x3, 0x0, 0x0, 0x0, 0x0, 0x0}, []byte{0x1, 0x2, 0x3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8}, 8}, + } + for _, test := range tests { + padded, err := pad(test.Original, test.BlockSize) + if err != nil { + t.Errorf("pad encountered error: %s", err) + continue + } + if !bytes.Equal(test.Expected, padded) { + t.Errorf("pad results mismatch:\n\tExpected: %X\n\tActual: %X", test.Expected, padded) + } + } +} diff --git a/builtin/credential/aws/pkcs7/pkcs7.go b/builtin/credential/aws/pkcs7/pkcs7.go new file mode 100644 index 0000000..eecff9b --- /dev/null +++ b/builtin/credential/aws/pkcs7/pkcs7.go @@ -0,0 +1,290 @@ +// Package pkcs7 implements parsing and generation of some PKCS#7 structures. +package pkcs7 + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" + "sort" + + _ "crypto/sha1" // for crypto.SHA1 +) + +// PKCS7 Represents a PKCS7 structure +type PKCS7 struct { + Content []byte + Certificates []*x509.Certificate + CRLs []pkix.CertificateList + Signers []signerInfo + raw interface{} +} + +type contentInfo struct { + ContentType asn1.ObjectIdentifier + Content asn1.RawValue `asn1:"explicit,optional,tag:0"` +} + +// ErrUnsupportedContentType is returned when a PKCS7 content is not supported. +// Currently only Data (1.2.840.113549.1.7.1), Signed Data (1.2.840.113549.1.7.2), +// and Enveloped Data are supported (1.2.840.113549.1.7.3) +var ErrUnsupportedContentType = errors.New("pkcs7: cannot parse data: unimplemented content type") + +type unsignedData []byte + +var ( + // Signed Data OIDs + OIDData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 1} + OIDSignedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 2} + OIDEnvelopedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 3} + OIDEncryptedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 6} + OIDAttributeContentType = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 3} + OIDAttributeMessageDigest = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 4} + OIDAttributeSigningTime = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 5} + + // Digest Algorithms + OIDDigestAlgorithmSHA1 = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 26} + OIDDigestAlgorithmSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 1} + OIDDigestAlgorithmSHA384 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 2} + OIDDigestAlgorithmSHA512 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 3} + + OIDDigestAlgorithmDSA = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 1} + OIDDigestAlgorithmDSASHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3} + + OIDDigestAlgorithmECDSASHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1} + OIDDigestAlgorithmECDSASHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2} + OIDDigestAlgorithmECDSASHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3} + OIDDigestAlgorithmECDSASHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4} + + // Signature Algorithms + OIDEncryptionAlgorithmRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1} + OIDEncryptionAlgorithmRSASHA1 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5} + OIDEncryptionAlgorithmRSASHA256 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11} + OIDEncryptionAlgorithmRSASHA384 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12} + OIDEncryptionAlgorithmRSASHA512 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13} + + OIDEncryptionAlgorithmECDSAP256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7} + OIDEncryptionAlgorithmECDSAP384 = asn1.ObjectIdentifier{1, 3, 132, 0, 34} + OIDEncryptionAlgorithmECDSAP521 = asn1.ObjectIdentifier{1, 3, 132, 0, 35} + + // Encryption Algorithms + OIDEncryptionAlgorithmDESCBC = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 7} + OIDEncryptionAlgorithmDESEDE3CBC = asn1.ObjectIdentifier{1, 2, 840, 113549, 3, 7} + OIDEncryptionAlgorithmAES256CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 42} + OIDEncryptionAlgorithmAES128GCM = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 6} + OIDEncryptionAlgorithmAES128CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 2} + OIDEncryptionAlgorithmAES256GCM = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 46} +) + +func getHashForOID(oid asn1.ObjectIdentifier) (crypto.Hash, error) { + switch { + case oid.Equal(OIDDigestAlgorithmSHA1), oid.Equal(OIDDigestAlgorithmECDSASHA1), + oid.Equal(OIDDigestAlgorithmDSA), oid.Equal(OIDDigestAlgorithmDSASHA1), + oid.Equal(OIDEncryptionAlgorithmRSA): + return crypto.SHA1, nil + case oid.Equal(OIDDigestAlgorithmSHA256), oid.Equal(OIDDigestAlgorithmECDSASHA256): + return crypto.SHA256, nil + case oid.Equal(OIDDigestAlgorithmSHA384), oid.Equal(OIDDigestAlgorithmECDSASHA384): + return crypto.SHA384, nil + case oid.Equal(OIDDigestAlgorithmSHA512), oid.Equal(OIDDigestAlgorithmECDSASHA512): + return crypto.SHA512, nil + } + return crypto.Hash(0), ErrUnsupportedAlgorithm +} + +// getDigestOIDForSignatureAlgorithm takes an x509.SignatureAlgorithm +// and returns the corresponding OID digest algorithm +func getDigestOIDForSignatureAlgorithm(digestAlg x509.SignatureAlgorithm) (asn1.ObjectIdentifier, error) { + switch digestAlg { + case x509.SHA1WithRSA, x509.ECDSAWithSHA1: + return OIDDigestAlgorithmSHA1, nil + case x509.SHA256WithRSA, x509.ECDSAWithSHA256: + return OIDDigestAlgorithmSHA256, nil + case x509.SHA384WithRSA, x509.ECDSAWithSHA384: + return OIDDigestAlgorithmSHA384, nil + case x509.SHA512WithRSA, x509.ECDSAWithSHA512: + return OIDDigestAlgorithmSHA512, nil + } + return nil, fmt.Errorf("pkcs7: cannot convert hash to oid, unknown hash algorithm") +} + +// getOIDForEncryptionAlgorithm takes the private key type of the signer and +// the OID of a digest algorithm to return the appropriate signerInfo.DigestEncryptionAlgorithm +func getOIDForEncryptionAlgorithm(pkey crypto.PrivateKey, OIDDigestAlg asn1.ObjectIdentifier) (asn1.ObjectIdentifier, error) { + switch pkey.(type) { + case *rsa.PrivateKey: + switch { + default: + return OIDEncryptionAlgorithmRSA, nil + case OIDDigestAlg.Equal(OIDEncryptionAlgorithmRSA): + return OIDEncryptionAlgorithmRSA, nil + case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA1): + return OIDEncryptionAlgorithmRSASHA1, nil + case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA256): + return OIDEncryptionAlgorithmRSASHA256, nil + case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA384): + return OIDEncryptionAlgorithmRSASHA384, nil + case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA512): + return OIDEncryptionAlgorithmRSASHA512, nil + } + case *ecdsa.PrivateKey: + switch { + case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA1): + return OIDDigestAlgorithmECDSASHA1, nil + case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA256): + return OIDDigestAlgorithmECDSASHA256, nil + case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA384): + return OIDDigestAlgorithmECDSASHA384, nil + case OIDDigestAlg.Equal(OIDDigestAlgorithmSHA512): + return OIDDigestAlgorithmECDSASHA512, nil + } + case *dsa.PrivateKey: + return OIDDigestAlgorithmDSA, nil + } + return nil, fmt.Errorf("pkcs7: cannot convert encryption algorithm to oid, unknown private key type %T", pkey) +} + +// Parse decodes a DER encoded PKCS7 package +func Parse(data []byte) (p7 *PKCS7, err error) { + if len(data) == 0 { + return nil, errors.New("pkcs7: input data is empty") + } + var info contentInfo + der, err := ber2der(data) + if err != nil { + return nil, err + } + rest, err := asn1.Unmarshal(der, &info) + if len(rest) > 0 { + err = asn1.SyntaxError{Msg: "trailing data"} + return + } + if err != nil { + return + } + + // fmt.Printf("--> Content Type: %s", info.ContentType) + switch { + case info.ContentType.Equal(OIDSignedData): + return parseSignedData(info.Content.Bytes) + case info.ContentType.Equal(OIDEnvelopedData): + return parseEnvelopedData(info.Content.Bytes) + case info.ContentType.Equal(OIDEncryptedData): + return parseEncryptedData(info.Content.Bytes) + } + return nil, ErrUnsupportedContentType +} + +func parseEnvelopedData(data []byte) (*PKCS7, error) { + var ed envelopedData + if _, err := asn1.Unmarshal(data, &ed); err != nil { + return nil, err + } + return &PKCS7{ + raw: ed, + }, nil +} + +func parseEncryptedData(data []byte) (*PKCS7, error) { + var ed encryptedData + if _, err := asn1.Unmarshal(data, &ed); err != nil { + return nil, err + } + return &PKCS7{ + raw: ed, + }, nil +} + +func (raw rawCertificates) Parse() ([]*x509.Certificate, error) { + if len(raw.Raw) == 0 { + return nil, nil + } + + var val asn1.RawValue + if _, err := asn1.Unmarshal(raw.Raw, &val); err != nil { + return nil, err + } + + return x509.ParseCertificates(val.Bytes) +} + +func isCertMatchForIssuerAndSerial(cert *x509.Certificate, ias issuerAndSerial) bool { + return cert.SerialNumber.Cmp(ias.SerialNumber) == 0 && bytes.Equal(cert.RawIssuer, ias.IssuerName.FullBytes) +} + +// Attribute represents a key value pair attribute. Value must be marshalable byte +// `encoding/asn1` +type Attribute struct { + Type asn1.ObjectIdentifier + Value interface{} +} + +type attributes struct { + types []asn1.ObjectIdentifier + values []interface{} +} + +// Add adds the attribute, maintaining insertion order +func (attrs *attributes) Add(attrType asn1.ObjectIdentifier, value interface{}) { + attrs.types = append(attrs.types, attrType) + attrs.values = append(attrs.values, value) +} + +type sortableAttribute struct { + SortKey []byte + Attribute attribute +} + +type attributeSet []sortableAttribute + +func (sa attributeSet) Len() int { + return len(sa) +} + +func (sa attributeSet) Less(i, j int) bool { + return bytes.Compare(sa[i].SortKey, sa[j].SortKey) < 0 +} + +func (sa attributeSet) Swap(i, j int) { + sa[i], sa[j] = sa[j], sa[i] +} + +func (sa attributeSet) Attributes() []attribute { + attrs := make([]attribute, len(sa)) + for i, attr := range sa { + attrs[i] = attr.Attribute + } + return attrs +} + +func (attrs *attributes) ForMarshalling() ([]attribute, error) { + sortables := make(attributeSet, len(attrs.types)) + for i := range sortables { + attrType := attrs.types[i] + attrValue := attrs.values[i] + asn1Value, err := asn1.Marshal(attrValue) + if err != nil { + return nil, err + } + attr := attribute{ + Type: attrType, + Value: asn1.RawValue{Tag: 17, IsCompound: true, Bytes: asn1Value}, // 17 == SET tag + } + encoded, err := asn1.Marshal(attr) + if err != nil { + return nil, err + } + sortables[i] = sortableAttribute{ + SortKey: encoded, + Attribute: attr, + } + } + sort.Sort(sortables) + return sortables.Attributes(), nil +} diff --git a/builtin/credential/aws/pkcs7/pkcs7_test.go b/builtin/credential/aws/pkcs7/pkcs7_test.go new file mode 100644 index 0000000..7753c17 --- /dev/null +++ b/builtin/credential/aws/pkcs7/pkcs7_test.go @@ -0,0 +1,283 @@ +package pkcs7 + +import ( + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "log" + "math/big" + "os" + "time" +) + +var test1024Key, test2048Key, test3072Key, test4096Key *rsa.PrivateKey + +func init() { + test1024Key = &rsa.PrivateKey{ + PublicKey: rsa.PublicKey{ + N: fromBase10("123024078101403810516614073341068864574068590522569345017786163424062310013967742924377390210586226651760719671658568413826602264886073432535341149584680111145880576802262550990305759285883150470245429547886689754596541046564560506544976611114898883158121012232676781340602508151730773214407220733898059285561"), + E: 65537, + }, + D: fromBase10("118892427340746627750435157989073921703209000249285930635312944544706203626114423392257295670807166199489096863209592887347935991101581502404113203993092422730000157893515953622392722273095289787303943046491132467130346663160540744582438810535626328230098940583296878135092036661410664695896115177534496784545"), + Primes: []*big.Int{ + fromBase10("12172745919282672373981903347443034348576729562395784527365032103134165674508405592530417723266847908118361582847315228810176708212888860333051929276459099"), + fromBase10("10106518193772789699356660087736308350857919389391620140340519320928952625438936098550728858345355053201610649202713962702543058578827268756755006576249339"), + }, + } + test1024Key.Precompute() + test2048Key = &rsa.PrivateKey{ + PublicKey: rsa.PublicKey{ + N: fromBase10("14314132931241006650998084889274020608918049032671858325988396851334124245188214251956198731333464217832226406088020736932173064754214329009979944037640912127943488972644697423190955557435910767690712778463524983667852819010259499695177313115447116110358524558307947613422897787329221478860907963827160223559690523660574329011927531289655711860504630573766609239332569210831325633840174683944553667352219670930408593321661375473885147973879086994006440025257225431977751512374815915392249179976902953721486040787792801849818254465486633791826766873076617116727073077821584676715609985777563958286637185868165868520557"), + E: 3, + }, + D: fromBase10("9542755287494004433998723259516013739278699355114572217325597900889416163458809501304132487555642811888150937392013824621448709836142886006653296025093941418628992648429798282127303704957273845127141852309016655778568546006839666463451542076964744073572349705538631742281931858219480985907271975884773482372966847639853897890615456605598071088189838676728836833012254065983259638538107719766738032720239892094196108713378822882383694456030043492571063441943847195939549773271694647657549658603365629458610273821292232646334717612674519997533901052790334279661754176490593041941863932308687197618671528035670452762731"), + Primes: []*big.Int{ + fromBase10("130903255182996722426771613606077755295583329135067340152947172868415809027537376306193179624298874215608270802054347609836776473930072411958753044562214537013874103802006369634761074377213995983876788718033850153719421695468704276694983032644416930879093914927146648402139231293035971427838068945045019075433"), + fromBase10("109348945610485453577574767652527472924289229538286649661240938988020367005475727988253438647560958573506159449538793540472829815903949343191091817779240101054552748665267574271163617694640513549693841337820602726596756351006149518830932261246698766355347898158548465400674856021497190430791824869615170301029"), + }, + } + test2048Key.Precompute() + test3072Key = &rsa.PrivateKey{ + PublicKey: rsa.PublicKey{ + N: fromBase10("4799422180968749215324244710281712119910779465109490663934897082847293004098645365195947978124390029272750644394844443980065532911010718425428791498896288210928474905407341584968381379157418577471272697781778686372450913810019702928839200328075568223462554606149618941566459398862673532997592879359280754226882565483298027678735544377401276021471356093819491755877827249763065753555051973844057308627201762456191918852016986546071426986328720794061622370410645440235373576002278045257207695462423797272017386006110722769072206022723167102083033531426777518054025826800254337147514768377949097720074878744769255210076910190151785807232805749219196645305822228090875616900385866236956058984170647782567907618713309775105943700661530312800231153745705977436176908325539234432407050398510090070342851489496464612052853185583222422124535243967989533830816012180864309784486694786581956050902756173889941244024888811572094961378021"), + E: 65537, + }, + D: fromBase10("4068124900056380177006532461065648259352178312499768312132802353620854992915205894105621345694615110794369150964768050224096623567443679436821868510233726084582567244003894477723706516831312989564775159596496449435830457803384416702014837685962523313266832032687145914871879794104404800823188153886925022171560391765913739346955738372354826804228989767120353182641396181570533678315099748218734875742705419933837638038793286534641711407564379950728858267828581787483317040753987167237461567332386718574803231955771633274184646232632371006762852623964054645811527580417392163873708539175349637050049959954373319861427407953413018816604365474462455009323937599275324390953644555294418021286807661559165324810415569396577697316798600308544755741549699523972971375304826663847015905713096287495342701286542193782001358775773848824496321550110946106870685499577993864871847542645561943034990484973293461948058147956373115641615329"), + Primes: []*big.Int{ + fromBase10("2378529069722721185825622840841310902793949682948530343491428052737890236476884657507685118578733560141370511507721598189068683665232991988491561624429938984370132428230072355214627085652359350722926394699707232921674771664421591347888367477300909202851476404132163673865768760147403525700174918450753162242834161458300343282159799476695001920226357456953682236859505243928716782707623075239350380352265954107362618991716602898266999700316937680986690964564264877"), + fromBase10("2017811025336026464312837780072272578817919741496395062543647660689775637351085991504709917848745137013798005682591633910555599626950744674459976829106750083386168859581016361317479081273480343110649405858059581933773354781034946787147300862495438979895430001323443224335618577322449133208754541656374335100929456885995320929464029817626916719434010943205170760536768893924932021302887114400922813817969176636993508191950649313115712159241971065134077636674146073"), + }, + } + test3072Key.Precompute() + test4096Key = &rsa.PrivateKey{ + PublicKey: rsa.PublicKey{ + N: fromBase10("633335480064287130853997429184971616419051348693342219741748040433588285601270210251206421401040394238592139790962887290698043839174341843721930134010306454716566698330215646704263665452264344664385995704186692432827662862845900348526672531755932642433662686500295989783595767573119607065791980381547677840410600100715146047382485989885183858757974681241303484641390718944520330953604501686666386926996348457928415093305041429178744778762826377713889019740060910363468343855830206640274442887621960581569183233822878661711798998132931623726434336448716605363514220760343097572198620479297583609779817750646169845195672483600293522186340560792255595411601450766002877850696008003794520089358819042318331840490155176019070646738739580486357084733208876620846449161909966690602374519398451042362690200166144326179405976024265116931974936425064291406950542193873313447617169603706868220189295654943247311295475722243471700112334609817776430552541319671117235957754556272646031356496763094955985615723596562217985372503002989591679252640940571608314743271809251568670314461039035793703429977801961867815257832671786542212589906513979094156334941265621017752516999186481477500481433634914622735206243841674973785078408289183000133399026553"), + E: 65537, + }, + D: fromBase10("439373650557744155078930178606343279553665694488479749802070836418412881168612407941793966086633543867614175621952769177088930851151267623886678906158545451731745754402575409204816390946376103491325109185445659065122640946673660760274557781540431107937331701243915001777636528502669576801704352961341634812275635811512806966908648671988644114352046582195051714797831307925775689566757438907578527366568747104508496278929566712224252103563340770696548181508180254674236716995730292431858611476396845443056967589437890065663497768422598977743046882539288481002449571403783500529740184608873520856954837631427724158592309018382711485601884461168736465751756282510065053161144027097169985941910909130083273691945578478173708396726266170473745329617793866669307716920992380350270584929908460462802627239204245339385636926433446418108504614031393494119344916828744888432279343816084433424594432427362258172264834429525166677273382617457205387388293888430391895615438030066428745187333897518037597413369705720436392869403948934993623418405908467147848576977008003556716087129242155836114780890054057743164411952731290520995017097151300091841286806603044227906213832083363876549637037625314539090155417589796428888619937329669464810549362433"), + Primes: []*big.Int{ + fromBase10("25745433817240673759910623230144796182285844101796353869339294232644316274580053211056707671663014355388701931204078502829809738396303142990312095225333440050808647355535878394534263839500592870406002873182360027755750148248672968563366185348499498613479490545488025779331426515670185366021612402246813511722553210128074701620113404560399242413747318161403908617342170447610792422053460359960010544593668037305465806912471260799852789913123044326555978680190904164976511331681163576833618899773550873682147782263100803907156362439021929408298804955194748640633152519828940133338948391986823456836070708197320166146761"), + fromBase10("24599914864909676687852658457515103765368967514652318497893275892114442089314173678877914038802355565271545910572804267918959612739009937926962653912943833939518967731764560204997062096919833970670512726396663920955497151415639902788974842698619579886297871162402643104696160155894685518587660015182381685605752989716946154299190561137541792784125356553411300817844325739404126956793095254412123887617931225840421856505925283322918693259047428656823141903489964287619982295891439430302405252447010728112098326033634688757933930065610737780413018498561434074501822951716586796047404555397992425143397497639322075233073"), + }, + } + test4096Key.Precompute() +} + +func fromBase10(base10 string) *big.Int { + i, ok := new(big.Int).SetString(base10, 10) + if !ok { + panic("bad number: " + base10) + } + return i +} + +type certKeyPair struct { + Certificate *x509.Certificate + PrivateKey *crypto.PrivateKey +} + +func createTestCertificate(sigAlg x509.SignatureAlgorithm) (certKeyPair, error) { + signer, err := createTestCertificateByIssuer("Eddard Stark", nil, sigAlg, true) + if err != nil { + return certKeyPair{}, err + } + pair, err := createTestCertificateByIssuer("Jon Snow", signer, sigAlg, false) + if err != nil { + return certKeyPair{}, err + } + return *pair, nil +} + +func createTestCertificateByIssuer(name string, issuer *certKeyPair, sigAlg x509.SignatureAlgorithm, isCA bool) (*certKeyPair, error) { + var ( + err error + priv crypto.PrivateKey + derCert []byte + issuerCert *x509.Certificate + issuerKey crypto.PrivateKey + ) + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 32) + serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + return nil, err + } + + template := x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + CommonName: name, + Organization: []string{"Acme Co"}, + }, + NotBefore: time.Now().Add(-1 * time.Second), + NotAfter: time.Now().AddDate(1, 0, 0), + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageEmailProtection}, + } + if issuer != nil { + issuerCert = issuer.Certificate + issuerKey = *issuer.PrivateKey + } + switch sigAlg { + case x509.SHA256WithRSA: + priv = test2048Key + switch issuerKey.(type) { + case *rsa.PrivateKey: + template.SignatureAlgorithm = x509.SHA256WithRSA + case *ecdsa.PrivateKey: + template.SignatureAlgorithm = x509.ECDSAWithSHA256 + case *dsa.PrivateKey: + template.SignatureAlgorithm = x509.DSAWithSHA256 + } + case x509.SHA384WithRSA: + priv = test3072Key + switch issuerKey.(type) { + case *rsa.PrivateKey: + template.SignatureAlgorithm = x509.SHA384WithRSA + case *ecdsa.PrivateKey: + template.SignatureAlgorithm = x509.ECDSAWithSHA384 + case *dsa.PrivateKey: + template.SignatureAlgorithm = x509.DSAWithSHA256 + } + case x509.SHA512WithRSA: + priv = test4096Key + switch issuerKey.(type) { + case *rsa.PrivateKey: + template.SignatureAlgorithm = x509.SHA512WithRSA + case *ecdsa.PrivateKey: + template.SignatureAlgorithm = x509.ECDSAWithSHA512 + case *dsa.PrivateKey: + template.SignatureAlgorithm = x509.DSAWithSHA256 + } + case x509.ECDSAWithSHA256: + priv, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + return nil, err + } + switch issuerKey.(type) { + case *rsa.PrivateKey: + template.SignatureAlgorithm = x509.SHA256WithRSA + case *ecdsa.PrivateKey: + template.SignatureAlgorithm = x509.ECDSAWithSHA256 + case *dsa.PrivateKey: + template.SignatureAlgorithm = x509.DSAWithSHA256 + } + case x509.ECDSAWithSHA384: + priv, err = ecdsa.GenerateKey(elliptic.P384(), rand.Reader) + if err != nil { + return nil, err + } + switch issuerKey.(type) { + case *rsa.PrivateKey: + template.SignatureAlgorithm = x509.SHA384WithRSA + case *ecdsa.PrivateKey: + template.SignatureAlgorithm = x509.ECDSAWithSHA384 + case *dsa.PrivateKey: + template.SignatureAlgorithm = x509.DSAWithSHA256 + } + case x509.ECDSAWithSHA512: + priv, err = ecdsa.GenerateKey(elliptic.P521(), rand.Reader) + if err != nil { + return nil, err + } + switch issuerKey.(type) { + case *rsa.PrivateKey: + template.SignatureAlgorithm = x509.SHA512WithRSA + case *ecdsa.PrivateKey: + template.SignatureAlgorithm = x509.ECDSAWithSHA512 + case *dsa.PrivateKey: + template.SignatureAlgorithm = x509.DSAWithSHA256 + } + } + if isCA { + template.IsCA = true + template.KeyUsage |= x509.KeyUsageCertSign + template.BasicConstraintsValid = true + } + if issuer == nil { + // no issuer given,make this a self-signed root cert + issuerCert = &template + issuerKey = priv + } + + log.Println("creating cert", name, "issued by", issuerCert.Subject.CommonName, "with sigalg", sigAlg) + switch priv.(type) { + case *rsa.PrivateKey: + switch issuerKey.(type) { + case *rsa.PrivateKey: + derCert, err = x509.CreateCertificate(rand.Reader, &template, issuerCert, priv.(*rsa.PrivateKey).Public(), issuerKey.(*rsa.PrivateKey)) + case *ecdsa.PrivateKey: + derCert, err = x509.CreateCertificate(rand.Reader, &template, issuerCert, priv.(*rsa.PrivateKey).Public(), issuerKey.(*ecdsa.PrivateKey)) + case *dsa.PrivateKey: + derCert, err = x509.CreateCertificate(rand.Reader, &template, issuerCert, priv.(*rsa.PrivateKey).Public(), issuerKey.(*dsa.PrivateKey)) + } + case *ecdsa.PrivateKey: + switch issuerKey.(type) { + case *rsa.PrivateKey: + derCert, err = x509.CreateCertificate(rand.Reader, &template, issuerCert, priv.(*ecdsa.PrivateKey).Public(), issuerKey.(*rsa.PrivateKey)) + case *ecdsa.PrivateKey: + derCert, err = x509.CreateCertificate(rand.Reader, &template, issuerCert, priv.(*ecdsa.PrivateKey).Public(), issuerKey.(*ecdsa.PrivateKey)) + case *dsa.PrivateKey: + derCert, err = x509.CreateCertificate(rand.Reader, &template, issuerCert, priv.(*ecdsa.PrivateKey).Public(), issuerKey.(*dsa.PrivateKey)) + } + case *dsa.PrivateKey: + pub := &priv.(*dsa.PrivateKey).PublicKey + switch issuerKey := issuerKey.(type) { + case *rsa.PrivateKey: + derCert, err = x509.CreateCertificate(rand.Reader, &template, issuerCert, pub, issuerKey) + case *ecdsa.PrivateKey: + derCert, err = x509.CreateCertificate(rand.Reader, &template, issuerCert, priv.(*dsa.PublicKey), issuerKey) + case *dsa.PrivateKey: + derCert, err = x509.CreateCertificate(rand.Reader, &template, issuerCert, priv.(*dsa.PublicKey), issuerKey) + } + } + if err != nil { + return nil, err + } + if len(derCert) == 0 { + return nil, fmt.Errorf("no certificate created, probably due to wrong keys. types were %T and %T", priv, issuerKey) + } + cert, err := x509.ParseCertificate(derCert) + if err != nil { + return nil, err + } + pem.Encode(os.Stdout, &pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}) + return &certKeyPair{ + Certificate: cert, + PrivateKey: &priv, + }, nil +} + +type TestFixture struct { + Input []byte + Certificate *x509.Certificate + PrivateKey *rsa.PrivateKey +} + +func UnmarshalTestFixture(testPEMBlock string) TestFixture { + var result TestFixture + var derBlock *pem.Block + pemBlock := []byte(testPEMBlock) + for { + derBlock, pemBlock = pem.Decode(pemBlock) + if derBlock == nil { + break + } + switch derBlock.Type { + case "PKCS7": + result.Input = derBlock.Bytes + case "CERTIFICATE": + result.Certificate, _ = x509.ParseCertificate(derBlock.Bytes) + case "PRIVATE KEY": + result.PrivateKey, _ = x509.ParsePKCS1PrivateKey(derBlock.Bytes) + } + } + + return result +} diff --git a/builtin/credential/aws/pkcs7/sign.go b/builtin/credential/aws/pkcs7/sign.go new file mode 100644 index 0000000..72b9938 --- /dev/null +++ b/builtin/credential/aws/pkcs7/sign.go @@ -0,0 +1,435 @@ +package pkcs7 + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" + "math/big" + "time" + + "github.com/hashicorp/vault/internal" +) + +func init() { + internal.PatchSha1() +} + +// SignedData is an opaque data structure for creating signed data payloads +type SignedData struct { + sd signedData + certs []*x509.Certificate + data, messageDigest []byte + digestOid asn1.ObjectIdentifier + encryptionOid asn1.ObjectIdentifier +} + +// NewSignedData takes data and initializes a PKCS7 SignedData struct that is +// ready to be signed via AddSigner. The digest algorithm is set to SHA-256 by default +// and can be changed by calling SetDigestAlgorithm. +func NewSignedData(data []byte) (*SignedData, error) { + content, err := asn1.Marshal(data) + if err != nil { + return nil, err + } + ci := contentInfo{ + ContentType: OIDData, + Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: content, IsCompound: true}, + } + sd := signedData{ + ContentInfo: ci, + Version: 1, + } + return &SignedData{sd: sd, data: data, digestOid: OIDDigestAlgorithmSHA256}, nil +} + +// SignerInfoConfig are optional values to include when adding a signer +type SignerInfoConfig struct { + ExtraSignedAttributes []Attribute + ExtraUnsignedAttributes []Attribute +} + +type signedData struct { + Version int `asn1:"default:1"` + DigestAlgorithmIdentifiers []pkix.AlgorithmIdentifier `asn1:"set"` + ContentInfo contentInfo + Certificates rawCertificates `asn1:"optional,tag:0"` + CRLs []pkix.CertificateList `asn1:"optional,tag:1"` + SignerInfos []signerInfo `asn1:"set"` +} + +type signerInfo struct { + Version int `asn1:"default:1"` + IssuerAndSerialNumber issuerAndSerial + DigestAlgorithm pkix.AlgorithmIdentifier + AuthenticatedAttributes []attribute `asn1:"optional,omitempty,tag:0"` + DigestEncryptionAlgorithm pkix.AlgorithmIdentifier + EncryptedDigest []byte + UnauthenticatedAttributes []attribute `asn1:"optional,omitempty,tag:1"` +} + +type attribute struct { + Type asn1.ObjectIdentifier + Value asn1.RawValue `asn1:"set"` +} + +func marshalAttributes(attrs []attribute) ([]byte, error) { + encodedAttributes, err := asn1.Marshal(struct { + A []attribute `asn1:"set"` + }{A: attrs}) + if err != nil { + return nil, err + } + + // Remove the leading sequence octets + var raw asn1.RawValue + asn1.Unmarshal(encodedAttributes, &raw) + return raw.Bytes, nil +} + +type rawCertificates struct { + Raw asn1.RawContent +} + +type issuerAndSerial struct { + IssuerName asn1.RawValue + SerialNumber *big.Int +} + +// SetDigestAlgorithm sets the digest algorithm to be used in the signing process. +// +// This should be called before adding signers +func (sd *SignedData) SetDigestAlgorithm(d asn1.ObjectIdentifier) { + sd.digestOid = d +} + +// SetEncryptionAlgorithm sets the encryption algorithm to be used in the signing process. +// +// This should be called before adding signers +func (sd *SignedData) SetEncryptionAlgorithm(d asn1.ObjectIdentifier) { + sd.encryptionOid = d +} + +// AddSigner is a wrapper around AddSignerChain() that adds a signer without any parent. +func (sd *SignedData) AddSigner(ee *x509.Certificate, pkey crypto.PrivateKey, config SignerInfoConfig) error { + var parents []*x509.Certificate + return sd.AddSignerChain(ee, pkey, parents, config) +} + +// AddSignerChain signs attributes about the content and adds certificates +// and signers infos to the Signed Data. The certificate and private key +// of the end-entity signer are used to issue the signature, and any +// parent of that end-entity that need to be added to the list of +// certifications can be specified in the parents slice. +// +// The signature algorithm used to hash the data is the one of the end-entity +// certificate. +func (sd *SignedData) AddSignerChain(ee *x509.Certificate, pkey crypto.PrivateKey, parents []*x509.Certificate, config SignerInfoConfig) error { + // Following RFC 2315, 9.2 SignerInfo type, the distinguished name of + // the issuer of the end-entity signer is stored in the issuerAndSerialNumber + // section of the SignedData.SignerInfo, alongside the serial number of + // the end-entity. + var ias issuerAndSerial + ias.SerialNumber = ee.SerialNumber + if len(parents) == 0 { + // no parent, the issuer is the end-entity cert itself + ias.IssuerName = asn1.RawValue{FullBytes: ee.RawIssuer} + } else { + err := verifyPartialChain(ee, parents) + if err != nil { + return err + } + // the first parent is the issuer + ias.IssuerName = asn1.RawValue{FullBytes: parents[0].RawSubject} + } + sd.sd.DigestAlgorithmIdentifiers = append(sd.sd.DigestAlgorithmIdentifiers, + pkix.AlgorithmIdentifier{Algorithm: sd.digestOid}, + ) + hash, err := getHashForOID(sd.digestOid) + if err != nil { + return err + } + h := hash.New() + h.Write(sd.data) + sd.messageDigest = h.Sum(nil) + encryptionOid, err := getOIDForEncryptionAlgorithm(pkey, sd.digestOid) + if err != nil { + return err + } + attrs := &attributes{} + attrs.Add(OIDAttributeContentType, sd.sd.ContentInfo.ContentType) + attrs.Add(OIDAttributeMessageDigest, sd.messageDigest) + attrs.Add(OIDAttributeSigningTime, time.Now().UTC()) + for _, attr := range config.ExtraSignedAttributes { + attrs.Add(attr.Type, attr.Value) + } + finalAttrs, err := attrs.ForMarshalling() + if err != nil { + return err + } + unsignedAttrs := &attributes{} + for _, attr := range config.ExtraUnsignedAttributes { + unsignedAttrs.Add(attr.Type, attr.Value) + } + finalUnsignedAttrs, err := unsignedAttrs.ForMarshalling() + if err != nil { + return err + } + // create signature of signed attributes + signature, err := signAttributes(finalAttrs, pkey, hash) + if err != nil { + return err + } + signer := signerInfo{ + AuthenticatedAttributes: finalAttrs, + UnauthenticatedAttributes: finalUnsignedAttrs, + DigestAlgorithm: pkix.AlgorithmIdentifier{Algorithm: sd.digestOid}, + DigestEncryptionAlgorithm: pkix.AlgorithmIdentifier{Algorithm: encryptionOid}, + IssuerAndSerialNumber: ias, + EncryptedDigest: signature, + Version: 1, + } + sd.certs = append(sd.certs, ee) + if len(parents) > 0 { + sd.certs = append(sd.certs, parents...) + } + sd.sd.SignerInfos = append(sd.sd.SignerInfos, signer) + return nil +} + +// SignWithoutAttr issues a signature on the content of the pkcs7 SignedData. +// Unlike AddSigner/AddSignerChain, it calculates the digest on the data alone +// and does not include any signed attributes like timestamp and so on. +// +// This function is needed to sign old Android APKs, something you probably +// shouldn't do unless you're maintaining backward compatibility for old +// applications. +func (sd *SignedData) SignWithoutAttr(ee *x509.Certificate, pkey crypto.PrivateKey, config SignerInfoConfig) error { + var signature []byte + sd.sd.DigestAlgorithmIdentifiers = append(sd.sd.DigestAlgorithmIdentifiers, pkix.AlgorithmIdentifier{Algorithm: sd.digestOid}) + hash, err := getHashForOID(sd.digestOid) + if err != nil { + return err + } + h := hash.New() + h.Write(sd.data) + sd.messageDigest = h.Sum(nil) + switch pkey := pkey.(type) { + case *dsa.PrivateKey: + // dsa doesn't implement crypto.Signer so we make a special case + // https://github.com/golang/go/issues/27889 + r, s, err := dsa.Sign(rand.Reader, pkey, sd.messageDigest) + if err != nil { + return err + } + signature, err = asn1.Marshal(dsaSignature{r, s}) + if err != nil { + return err + } + default: + key, ok := pkey.(crypto.Signer) + if !ok { + return errors.New("pkcs7: private key does not implement crypto.Signer") + } + signature, err = key.Sign(rand.Reader, sd.messageDigest, hash) + if err != nil { + return err + } + } + var ias issuerAndSerial + ias.SerialNumber = ee.SerialNumber + // no parent, the issue is the end-entity cert itself + ias.IssuerName = asn1.RawValue{FullBytes: ee.RawIssuer} + if sd.encryptionOid == nil { + // if the encryption algorithm wasn't set by SetEncryptionAlgorithm, + // infer it from the digest algorithm + sd.encryptionOid, err = getOIDForEncryptionAlgorithm(pkey, sd.digestOid) + } + if err != nil { + return err + } + signer := signerInfo{ + DigestAlgorithm: pkix.AlgorithmIdentifier{Algorithm: sd.digestOid}, + DigestEncryptionAlgorithm: pkix.AlgorithmIdentifier{Algorithm: sd.encryptionOid}, + IssuerAndSerialNumber: ias, + EncryptedDigest: signature, + Version: 1, + } + // create signature of signed attributes + sd.certs = append(sd.certs, ee) + sd.sd.SignerInfos = append(sd.sd.SignerInfos, signer) + return nil +} + +func (si *signerInfo) SetUnauthenticatedAttributes(extraUnsignedAttrs []Attribute) error { + unsignedAttrs := &attributes{} + for _, attr := range extraUnsignedAttrs { + unsignedAttrs.Add(attr.Type, attr.Value) + } + finalUnsignedAttrs, err := unsignedAttrs.ForMarshalling() + if err != nil { + return err + } + + si.UnauthenticatedAttributes = finalUnsignedAttrs + + return nil +} + +// AddCertificate adds the certificate to the payload. Useful for parent certificates +func (sd *SignedData) AddCertificate(cert *x509.Certificate) { + sd.certs = append(sd.certs, cert) +} + +// Detach removes content from the signed data struct to make it a detached signature. +// This must be called right before Finish() +func (sd *SignedData) Detach() { + sd.sd.ContentInfo = contentInfo{ContentType: OIDData} +} + +// GetSignedData returns the private Signed Data +func (sd *SignedData) GetSignedData() *signedData { + return &sd.sd +} + +// Finish marshals the content and its signers +func (sd *SignedData) Finish() ([]byte, error) { + sd.sd.Certificates = marshalCertificates(sd.certs) + inner, err := asn1.Marshal(sd.sd) + if err != nil { + return nil, err + } + outer := contentInfo{ + ContentType: OIDSignedData, + Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: inner, IsCompound: true}, + } + return asn1.Marshal(outer) +} + +// RemoveAuthenticatedAttributes removes authenticated attributes from signedData +// similar to OpenSSL's PKCS7_NOATTR or -noattr flags +func (sd *SignedData) RemoveAuthenticatedAttributes() { + for i := range sd.sd.SignerInfos { + sd.sd.SignerInfos[i].AuthenticatedAttributes = nil + } +} + +// RemoveUnauthenticatedAttributes removes unauthenticated attributes from signedData +func (sd *SignedData) RemoveUnauthenticatedAttributes() { + for i := range sd.sd.SignerInfos { + sd.sd.SignerInfos[i].UnauthenticatedAttributes = nil + } +} + +// verifyPartialChain checks that a given cert is issued by the first parent in the list, +// then continue down the path. It doesn't require the last parent to be a root CA, +// or to be trusted in any truststore. It simply verifies that the chain provided, albeit +// partial, makes sense. +func verifyPartialChain(cert *x509.Certificate, parents []*x509.Certificate) error { + if len(parents) == 0 { + return fmt.Errorf("pkcs7: zero parents provided to verify the signature of certificate %q", cert.Subject.CommonName) + } + err := cert.CheckSignatureFrom(parents[0]) + if err != nil { + return fmt.Errorf("pkcs7: certificate signature from parent is invalid: %v", err) + } + if len(parents) == 1 { + // there is no more parent to check, return + return nil + } + return verifyPartialChain(parents[0], parents[1:]) +} + +func cert2issuerAndSerial(cert *x509.Certificate) (issuerAndSerial, error) { + var ias issuerAndSerial + // The issuer RDNSequence has to match exactly the sequence in the certificate + // We cannot use cert.Issuer.ToRDNSequence() here since it mangles the sequence + ias.IssuerName = asn1.RawValue{FullBytes: cert.RawIssuer} + ias.SerialNumber = cert.SerialNumber + + return ias, nil +} + +// signs the DER encoded form of the attributes with the private key +func signAttributes(attrs []attribute, pkey crypto.PrivateKey, digestAlg crypto.Hash) ([]byte, error) { + attrBytes, err := marshalAttributes(attrs) + if err != nil { + return nil, err + } + h := digestAlg.New() + h.Write(attrBytes) + hash := h.Sum(nil) + + // dsa doesn't implement crypto.Signer so we make a special case + // https://github.com/golang/go/issues/27889 + switch pkey := pkey.(type) { + case *dsa.PrivateKey: + r, s, err := dsa.Sign(rand.Reader, pkey, hash) + if err != nil { + return nil, err + } + return asn1.Marshal(dsaSignature{r, s}) + } + + key, ok := pkey.(crypto.Signer) + if !ok { + return nil, errors.New("pkcs7: private key does not implement crypto.Signer") + } + return key.Sign(rand.Reader, hash, digestAlg) +} + +type dsaSignature struct { + R, S *big.Int +} + +// concats and wraps the certificates in the RawValue structure +func marshalCertificates(certs []*x509.Certificate) rawCertificates { + var buf bytes.Buffer + for _, cert := range certs { + buf.Write(cert.Raw) + } + rawCerts, _ := marshalCertificateBytes(buf.Bytes()) + return rawCerts +} + +// Even though, the tag & length are stripped out during marshalling the +// RawContent, we have to encode it into the RawContent. If its missing, +// then `asn1.Marshal()` will strip out the certificate wrapper instead. +func marshalCertificateBytes(certs []byte) (rawCertificates, error) { + val := asn1.RawValue{Bytes: certs, Class: 2, Tag: 0, IsCompound: true} + b, err := asn1.Marshal(val) + if err != nil { + return rawCertificates{}, err + } + return rawCertificates{Raw: b}, nil +} + +// DegenerateCertificate creates a signed data structure containing only the +// provided certificate or certificate chain. +func DegenerateCertificate(cert []byte) ([]byte, error) { + rawCert, err := marshalCertificateBytes(cert) + if err != nil { + return nil, err + } + emptyContent := contentInfo{ContentType: OIDData} + sd := signedData{ + Version: 1, + ContentInfo: emptyContent, + Certificates: rawCert, + CRLs: []pkix.CertificateList{}, + } + content, err := asn1.Marshal(sd) + if err != nil { + return nil, err + } + signedContent := contentInfo{ + ContentType: OIDSignedData, + Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: content, IsCompound: true}, + } + return asn1.Marshal(signedContent) +} diff --git a/builtin/credential/aws/pkcs7/sign_test.go b/builtin/credential/aws/pkcs7/sign_test.go new file mode 100644 index 0000000..641cb04 --- /dev/null +++ b/builtin/credential/aws/pkcs7/sign_test.go @@ -0,0 +1,269 @@ +package pkcs7 + +import ( + "bytes" + "crypto/dsa" + "crypto/x509" + "encoding/asn1" + "encoding/pem" + "fmt" + "io/ioutil" + "log" + "math/big" + "os" + "os/exec" + "testing" +) + +func TestSign(t *testing.T) { + content := []byte("Hello World") + sigalgs := []x509.SignatureAlgorithm{ + x509.SHA256WithRSA, + x509.SHA512WithRSA, + x509.ECDSAWithSHA256, + x509.ECDSAWithSHA384, + x509.ECDSAWithSHA512, + } + for _, sigalgroot := range sigalgs { + rootCert, err := createTestCertificateByIssuer("PKCS7 Test Root CA", nil, sigalgroot, true) + if err != nil { + t.Fatalf("test %s: cannot generate root cert: %s", sigalgroot, err) + } + truststore := x509.NewCertPool() + truststore.AddCert(rootCert.Certificate) + for _, sigalginter := range sigalgs { + interCert, err := createTestCertificateByIssuer("PKCS7 Test Intermediate Cert", rootCert, sigalginter, true) + if err != nil { + t.Fatalf("test %s/%s: cannot generate intermediate cert: %s", sigalgroot, sigalginter, err) + } + var parents []*x509.Certificate + parents = append(parents, interCert.Certificate) + for _, sigalgsigner := range sigalgs { + signerCert, err := createTestCertificateByIssuer("PKCS7 Test Signer Cert", interCert, sigalgsigner, false) + if err != nil { + t.Fatalf("test %s/%s/%s: cannot generate signer cert: %s", sigalgroot, sigalginter, sigalgsigner, err) + } + for _, testDetach := range []bool{false, true} { + log.Printf("test %s/%s/%s detached %t\n", sigalgroot, sigalginter, sigalgsigner, testDetach) + toBeSigned, err := NewSignedData(content) + if err != nil { + t.Fatalf("test %s/%s/%s: cannot initialize signed data: %s", sigalgroot, sigalginter, sigalgsigner, err) + } + + // Set the digest to match the end entity cert + signerDigest, _ := getDigestOIDForSignatureAlgorithm(signerCert.Certificate.SignatureAlgorithm) + toBeSigned.SetDigestAlgorithm(signerDigest) + + if err := toBeSigned.AddSignerChain(signerCert.Certificate, *signerCert.PrivateKey, parents, SignerInfoConfig{}); err != nil { + t.Fatalf("test %s/%s/%s: cannot add signer: %s", sigalgroot, sigalginter, sigalgsigner, err) + } + if testDetach { + toBeSigned.Detach() + } + signed, err := toBeSigned.Finish() + if err != nil { + t.Fatalf("test %s/%s/%s: cannot finish signing data: %s", sigalgroot, sigalginter, sigalgsigner, err) + } + pem.Encode(os.Stdout, &pem.Block{Type: "PKCS7", Bytes: signed}) + p7, err := Parse(signed) + if err != nil { + t.Fatalf("test %s/%s/%s: cannot parse signed data: %s", sigalgroot, sigalginter, sigalgsigner, err) + } + if testDetach { + p7.Content = content + } + if !bytes.Equal(content, p7.Content) { + t.Errorf("test %s/%s/%s: content was not found in the parsed data:\n\tExpected: %s\n\tActual: %s", sigalgroot, sigalginter, sigalgsigner, content, p7.Content) + } + if err := p7.VerifyWithChain(truststore); err != nil { + t.Errorf("test %s/%s/%s: cannot verify signed data: %s", sigalgroot, sigalginter, sigalgsigner, err) + } + if !signerDigest.Equal(p7.Signers[0].DigestAlgorithm.Algorithm) { + t.Errorf("test %s/%s/%s: expected digest algorithm %q but got %q", + sigalgroot, sigalginter, sigalgsigner, signerDigest, p7.Signers[0].DigestAlgorithm.Algorithm) + } + } + } + } + } +} + +func TestDSASignAndVerifyWithOpenSSL(t *testing.T) { + content := []byte("Hello World") + // write the content to a temp file + tmpContentFile, err := ioutil.TempFile("", "TestDSASignAndVerifyWithOpenSSL_content") + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(tmpContentFile.Name(), content, 0o755) + + block, _ := pem.Decode(dsaPublicCert) + if block == nil { + t.Fatal("failed to parse certificate PEM") + } + signerCert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + t.Fatal("failed to parse certificate: " + err.Error()) + } + + // write the signer cert to a temp file + tmpSignerCertFile, err := ioutil.TempFile("", "TestDSASignAndVerifyWithOpenSSL_signer") + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(tmpSignerCertFile.Name(), dsaPublicCert, 0o755) + + priv := dsa.PrivateKey{ + PublicKey: dsa.PublicKey{ + Parameters: dsa.Parameters{ + P: fromHex("fd7f53811d75122952df4a9c2eece4e7f611b7523cef4400c31e3f80b6512669455d402251fb593d8d58fabfc5f5ba30f6cb9b556cd7813b801d346ff26660b76b9950a5a49f9fe8047b1022c24fbba9d7feb7c61bf83b57e7c6a8a6150f04fb83f6d3c51ec3023554135a169132f675f3ae2b61d72aeff22203199dd14801c7"), + Q: fromHex("9760508F15230BCCB292B982A2EB840BF0581CF5"), + G: fromHex("F7E1A085D69B3DDECBBCAB5C36B857B97994AFBBFA3AEA82F9574C0B3D0782675159578EBAD4594FE67107108180B449167123E84C281613B7CF09328CC8A6E13C167A8B547C8D28E0A3AE1E2BB3A675916EA37F0BFA213562F1FB627A01243BCCA4F1BEA8519089A883DFE15AE59F06928B665E807B552564014C3BFECF492A"), + }, + }, + X: fromHex("7D6E1A3DD4019FD809669D8AB8DA73807CEF7EC1"), + } + toBeSigned, err := NewSignedData(content) + if err != nil { + t.Fatalf("test case: cannot initialize signed data: %s", err) + } + // openssl DSA only supports SHA1 for our 1024-bit DSA key, since that is all the standard officially supports + toBeSigned.digestOid = OIDDigestAlgorithmSHA1 + if err := toBeSigned.SignWithoutAttr(signerCert, &priv, SignerInfoConfig{}); err != nil { + t.Fatalf("Cannot add signer: %s", err) + } + toBeSigned.Detach() + signed, err := toBeSigned.Finish() + if err != nil { + t.Fatalf("test case: cannot finish signing data: %s", err) + } + + // write the signature to a temp file + tmpSignatureFile, err := ioutil.TempFile("", "TestDSASignAndVerifyWithOpenSSL_signature") + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(tmpSignatureFile.Name(), pem.EncodeToMemory(&pem.Block{Type: "PKCS7", Bytes: signed}), 0o755) + + // call openssl to verify the signature on the content using the root + opensslCMD := exec.Command("openssl", "smime", "-verify", "-noverify", + "-in", tmpSignatureFile.Name(), "-inform", "PEM", + "-content", tmpContentFile.Name()) + out, err := opensslCMD.CombinedOutput() + if err != nil { + t.Errorf("Command: %s", opensslCMD.Args) + t.Fatalf("test case: openssl command failed with %s: %s", err, out) + } + os.Remove(tmpSignatureFile.Name()) // clean up + os.Remove(tmpContentFile.Name()) // clean up + os.Remove(tmpSignerCertFile.Name()) // clean up +} + +func ExampleSignedData() { + // generate a signing cert or load a key pair + cert, err := createTestCertificate(x509.SHA256WithRSA) + if err != nil { + fmt.Printf("Cannot create test certificates: %s", err) + } + + // Initialize a SignedData struct with content to be signed + signedData, err := NewSignedData([]byte("Example data to be signed")) + if err != nil { + fmt.Printf("Cannot initialize signed data: %s", err) + } + + // Add the signing cert and private key + if err := signedData.AddSigner(cert.Certificate, cert.PrivateKey, SignerInfoConfig{}); err != nil { + fmt.Printf("Cannot add signer: %s", err) + } + + // Call Detach() is you want to remove content from the signature + // and generate an S/MIME detached signature + signedData.Detach() + + // Finish() to obtain the signature bytes + detachedSignature, err := signedData.Finish() + if err != nil { + fmt.Printf("Cannot finish signing data: %s", err) + } + pem.Encode(os.Stdout, &pem.Block{Type: "PKCS7", Bytes: detachedSignature}) +} + +func TestUnmarshalSignedAttribute(t *testing.T) { + cert, err := createTestCertificate(x509.SHA512WithRSA) + if err != nil { + t.Fatal(err) + } + content := []byte("Hello World") + toBeSigned, err := NewSignedData(content) + if err != nil { + t.Fatalf("Cannot initialize signed data: %s", err) + } + oidTest := asn1.ObjectIdentifier{2, 3, 4, 5, 6, 7} + testValue := "TestValue" + if err := toBeSigned.AddSigner(cert.Certificate, *cert.PrivateKey, SignerInfoConfig{ + ExtraSignedAttributes: []Attribute{{Type: oidTest, Value: testValue}}, + }); err != nil { + t.Fatalf("Cannot add signer: %s", err) + } + signed, err := toBeSigned.Finish() + if err != nil { + t.Fatalf("Cannot finish signing data: %s", err) + } + p7, err := Parse(signed) + if err != nil { + t.Fatalf("Cannot parse signed data: %v", err) + } + var actual string + err = p7.UnmarshalSignedAttribute(oidTest, &actual) + if err != nil { + t.Fatalf("Cannot unmarshal test value: %s", err) + } + if testValue != actual { + t.Errorf("Attribute does not match test value\n\tExpected: %s\n\tActual: %s", testValue, actual) + } +} + +func TestDegenerateCertificate(t *testing.T) { + cert, err := createTestCertificate(x509.SHA256WithRSA) + if err != nil { + t.Fatal(err) + } + deg, err := DegenerateCertificate(cert.Certificate.Raw) + if err != nil { + t.Fatal(err) + } + testOpenSSLParse(t, deg) + pem.Encode(os.Stdout, &pem.Block{Type: "PKCS7", Bytes: deg}) +} + +// writes the cert to a temporary file and tests that openssl can read it. +func testOpenSSLParse(t *testing.T, certBytes []byte) { + tmpCertFile, err := ioutil.TempFile("", "testCertificate") + if err != nil { + t.Fatal(err) + } + defer os.Remove(tmpCertFile.Name()) // clean up + + if _, err := tmpCertFile.Write(certBytes); err != nil { + t.Fatal(err) + } + + opensslCMD := exec.Command("openssl", "pkcs7", "-inform", "der", "-in", tmpCertFile.Name()) + _, err = opensslCMD.Output() + if err != nil { + t.Fatal(err) + } + + if err := tmpCertFile.Close(); err != nil { + t.Fatal(err) + } +} + +func fromHex(s string) *big.Int { + result, ok := new(big.Int).SetString(s, 16) + if !ok { + panic(s) + } + return result +} diff --git a/builtin/credential/aws/pkcs7/verify.go b/builtin/credential/aws/pkcs7/verify.go new file mode 100644 index 0000000..002e77f --- /dev/null +++ b/builtin/credential/aws/pkcs7/verify.go @@ -0,0 +1,400 @@ +package pkcs7 + +import ( + "crypto" + "crypto/dsa" + "crypto/subtle" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" + "time" +) + +// Verify is a wrapper around VerifyWithChain() that initializes an empty +// trust store, effectively disabling certificate verification when validating +// a signature. +func (p7 *PKCS7) Verify() (err error) { + return p7.VerifyWithChain(nil) +} + +// VerifyWithChain checks the signatures of a PKCS7 object. +// +// If truststore is not nil, it also verifies the chain of trust of +// the end-entity signer cert to one of the roots in the +// truststore. When the PKCS7 object includes the signing time +// authenticated attr verifies the chain at that time and UTC now +// otherwise. +func (p7 *PKCS7) VerifyWithChain(truststore *x509.CertPool) (err error) { + if len(p7.Signers) == 0 { + return errors.New("pkcs7: Message has no signers") + } + for _, signer := range p7.Signers { + if err := verifySignature(p7, signer, truststore); err != nil { + return err + } + } + return nil +} + +// VerifyWithChainAtTime checks the signatures of a PKCS7 object. +// +// If truststore is not nil, it also verifies the chain of trust of +// the end-entity signer cert to a root in the truststore at +// currentTime. It does not use the signing time authenticated +// attribute. +func (p7 *PKCS7) VerifyWithChainAtTime(truststore *x509.CertPool, currentTime time.Time) (err error) { + if len(p7.Signers) == 0 { + return errors.New("pkcs7: Message has no signers") + } + for _, signer := range p7.Signers { + if err := verifySignatureAtTime(p7, signer, truststore, currentTime); err != nil { + return err + } + } + return nil +} + +func verifySignatureAtTime(p7 *PKCS7, signer signerInfo, truststore *x509.CertPool, currentTime time.Time) (err error) { + signedData := p7.Content + ee := getCertFromCertsByIssuerAndSerial(p7.Certificates, signer.IssuerAndSerialNumber) + if ee == nil { + return errors.New("pkcs7: No certificate for signer") + } + if len(signer.AuthenticatedAttributes) > 0 { + // TODO(fullsailor): First check the content type match + var ( + digest []byte + signingTime time.Time + ) + err := unmarshalAttribute(signer.AuthenticatedAttributes, OIDAttributeMessageDigest, &digest) + if err != nil { + return err + } + hash, err := getHashForOID(signer.DigestAlgorithm.Algorithm) + if err != nil { + return err + } + h := hash.New() + h.Write(p7.Content) + computed := h.Sum(nil) + if subtle.ConstantTimeCompare(digest, computed) != 1 { + return &MessageDigestMismatchError{ + ExpectedDigest: digest, + ActualDigest: computed, + } + } + signedData, err = marshalAttributes(signer.AuthenticatedAttributes) + if err != nil { + return err + } + err = unmarshalAttribute(signer.AuthenticatedAttributes, OIDAttributeSigningTime, &signingTime) + if err == nil { + // signing time found, performing validity check + if signingTime.After(ee.NotAfter) || signingTime.Before(ee.NotBefore) { + return fmt.Errorf("pkcs7: signing time %q is outside of certificate validity %q to %q", + signingTime.Format(time.RFC3339), + ee.NotBefore.Format(time.RFC3339), + ee.NotAfter.Format(time.RFC3339)) + } + } + } + if truststore != nil { + _, err = verifyCertChain(ee, p7.Certificates, truststore, currentTime) + if err != nil { + return err + } + } + sigalg, err := getSignatureAlgorithm(signer.DigestEncryptionAlgorithm, signer.DigestAlgorithm) + if err != nil { + return err + } + switch sigalg { + case x509.DSAWithSHA1, x509.DSAWithSHA256: + return dsaCheckSignature(sigalg, signedData, signer.EncryptedDigest, ee.PublicKey) + default: + return ee.CheckSignature(sigalg, signedData, signer.EncryptedDigest) + } +} + +// dsaSignature verifies the DSA signature on a PKCS7 document. DSA support was +// removed from Go's crypto/x509 support prior to Go 1.16. This allows +// verifying legacy signatures until affected applications can be migrated off +// of DSA. +func dsaCheckSignature(algo x509.SignatureAlgorithm, signed, signature []byte, publicKey crypto.PublicKey) error { + dsaKey, ok := publicKey.(*dsa.PublicKey) + if !ok { + return ErrUnsupportedAlgorithm + } + + var hashType crypto.Hash + switch algo { + case x509.DSAWithSHA1: + hashType = crypto.SHA1 + case x509.DSAWithSHA256: + hashType = crypto.SHA256 + default: + return ErrUnsupportedAlgorithm + } + h := hashType.New() + h.Write(signed) + signed = h.Sum(nil) + + dsaSig := new(dsaSignature) + if rest, err := asn1.Unmarshal(signature, dsaSig); err != nil { + return err + } else if len(rest) != 0 { + return errors.New("x509: trailing data after DSA signature") + } + if dsaSig.R.Sign() <= 0 || dsaSig.S.Sign() <= 0 { + return errors.New("x509: DSA signature contained zero or negative values") + } + // According to FIPS 186-3, section 4.6, the hash must be truncated if it is longer + // than the key length, but crypto/dsa doesn't do it automatically. + if maxHashLen := dsaKey.Q.BitLen() / 8; maxHashLen < len(signed) { + signed = signed[:maxHashLen] + } + if !dsa.Verify(dsaKey, signed, dsaSig.R, dsaSig.S) { + return errors.New("x509: DSA verification failure") + } + return nil +} + +func verifySignature(p7 *PKCS7, signer signerInfo, truststore *x509.CertPool) (err error) { + signedData := p7.Content + ee := getCertFromCertsByIssuerAndSerial(p7.Certificates, signer.IssuerAndSerialNumber) + if ee == nil { + return errors.New("pkcs7: No certificate for signer") + } + signingTime := time.Now().UTC() + if len(signer.AuthenticatedAttributes) > 0 { + // TODO(fullsailor): First check the content type match + var digest []byte + err := unmarshalAttribute(signer.AuthenticatedAttributes, OIDAttributeMessageDigest, &digest) + if err != nil { + return err + } + hash, err := getHashForOID(signer.DigestAlgorithm.Algorithm) + if err != nil { + return err + } + h := hash.New() + h.Write(p7.Content) + computed := h.Sum(nil) + if subtle.ConstantTimeCompare(digest, computed) != 1 { + return &MessageDigestMismatchError{ + ExpectedDigest: digest, + ActualDigest: computed, + } + } + signedData, err = marshalAttributes(signer.AuthenticatedAttributes) + if err != nil { + return err + } + err = unmarshalAttribute(signer.AuthenticatedAttributes, OIDAttributeSigningTime, &signingTime) + if err == nil { + // signing time found, performing validity check + if signingTime.After(ee.NotAfter) || signingTime.Before(ee.NotBefore) { + return fmt.Errorf("pkcs7: signing time %q is outside of certificate validity %q to %q", + signingTime.Format(time.RFC3339), + ee.NotBefore.Format(time.RFC3339), + ee.NotAfter.Format(time.RFC3339)) + } + } + } + if truststore != nil { + _, err = verifyCertChain(ee, p7.Certificates, truststore, signingTime) + if err != nil { + return err + } + } + sigalg, err := getSignatureAlgorithm(signer.DigestEncryptionAlgorithm, signer.DigestAlgorithm) + if err != nil { + return err + } + + switch sigalg { + case x509.DSAWithSHA1, x509.DSAWithSHA256: + return dsaCheckSignature(sigalg, signedData, signer.EncryptedDigest, ee.PublicKey) + default: + return ee.CheckSignature(sigalg, signedData, signer.EncryptedDigest) + } +} + +// GetOnlySigner returns an x509.Certificate for the first signer of the signed +// data payload. If there are more or less than one signer, nil is returned +func (p7 *PKCS7) GetOnlySigner() *x509.Certificate { + if len(p7.Signers) != 1 { + return nil + } + signer := p7.Signers[0] + return getCertFromCertsByIssuerAndSerial(p7.Certificates, signer.IssuerAndSerialNumber) +} + +// UnmarshalSignedAttribute decodes a single attribute from the signer info +func (p7 *PKCS7) UnmarshalSignedAttribute(attributeType asn1.ObjectIdentifier, out interface{}) error { + sd, ok := p7.raw.(signedData) + if !ok { + return errors.New("pkcs7: payload is not signedData content") + } + if len(sd.SignerInfos) < 1 { + return errors.New("pkcs7: payload has no signers") + } + attributes := sd.SignerInfos[0].AuthenticatedAttributes + return unmarshalAttribute(attributes, attributeType, out) +} + +func parseSignedData(data []byte) (*PKCS7, error) { + var sd signedData + asn1.Unmarshal(data, &sd) + certs, err := sd.Certificates.Parse() + if err != nil { + return nil, err + } + // fmt.Printf("--> Signed Data Version %d\n", sd.Version) + + var compound asn1.RawValue + var content unsignedData + + // The Content.Bytes maybe empty on PKI responses. + if len(sd.ContentInfo.Content.Bytes) > 0 { + if _, err := asn1.Unmarshal(sd.ContentInfo.Content.Bytes, &compound); err != nil { + return nil, err + } + } + // Compound octet string + if compound.IsCompound { + if compound.Tag == 4 { + if _, err = asn1.Unmarshal(compound.Bytes, &content); err != nil { + return nil, err + } + } else { + content = compound.Bytes + } + } else { + // assuming this is tag 04 + content = compound.Bytes + } + return &PKCS7{ + Content: content, + Certificates: certs, + CRLs: sd.CRLs, + Signers: sd.SignerInfos, + raw: sd, + }, nil +} + +// verifyCertChain takes an end-entity certs, a list of potential intermediates and a +// truststore, and built all potential chains between the EE and a trusted root. +// +// When verifying chains that may have expired, currentTime can be set to a past date +// to allow the verification to pass. If unset, currentTime is set to the current UTC time. +func verifyCertChain(ee *x509.Certificate, certs []*x509.Certificate, truststore *x509.CertPool, currentTime time.Time) (chains [][]*x509.Certificate, err error) { + intermediates := x509.NewCertPool() + for _, intermediate := range certs { + intermediates.AddCert(intermediate) + } + verifyOptions := x509.VerifyOptions{ + Roots: truststore, + Intermediates: intermediates, + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + CurrentTime: currentTime, + } + chains, err = ee.Verify(verifyOptions) + if err != nil { + return chains, fmt.Errorf("pkcs7: failed to verify certificate chain: %v", err) + } + return +} + +// MessageDigestMismatchError is returned when the signer data digest does not +// match the computed digest for the contained content +type MessageDigestMismatchError struct { + ExpectedDigest []byte + ActualDigest []byte +} + +func (err *MessageDigestMismatchError) Error() string { + return fmt.Sprintf("pkcs7: Message digest mismatch\n\tExpected: %X\n\tActual : %X", err.ExpectedDigest, err.ActualDigest) +} + +func getSignatureAlgorithm(digestEncryption, digest pkix.AlgorithmIdentifier) (x509.SignatureAlgorithm, error) { + switch { + case digestEncryption.Algorithm.Equal(OIDDigestAlgorithmECDSASHA1): + return x509.ECDSAWithSHA1, nil + case digestEncryption.Algorithm.Equal(OIDDigestAlgorithmECDSASHA256): + return x509.ECDSAWithSHA256, nil + case digestEncryption.Algorithm.Equal(OIDDigestAlgorithmECDSASHA384): + return x509.ECDSAWithSHA384, nil + case digestEncryption.Algorithm.Equal(OIDDigestAlgorithmECDSASHA512): + return x509.ECDSAWithSHA512, nil + case digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSA), + digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA1), + digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA256), + digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA384), + digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmRSASHA512): + switch { + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA1): + return x509.SHA1WithRSA, nil + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA256): + return x509.SHA256WithRSA, nil + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA384): + return x509.SHA384WithRSA, nil + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA512): + return x509.SHA512WithRSA, nil + default: + return -1, fmt.Errorf("pkcs7: unsupported digest %q for encryption algorithm %q", + digest.Algorithm.String(), digestEncryption.Algorithm.String()) + } + case digestEncryption.Algorithm.Equal(OIDDigestAlgorithmDSA), + digestEncryption.Algorithm.Equal(OIDDigestAlgorithmDSASHA1): + switch { + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA1): + return x509.DSAWithSHA1, nil + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA256): + return x509.DSAWithSHA256, nil + default: + return -1, fmt.Errorf("pkcs7: unsupported digest %q for encryption algorithm %q", + digest.Algorithm.String(), digestEncryption.Algorithm.String()) + } + case digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmECDSAP256), + digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmECDSAP384), + digestEncryption.Algorithm.Equal(OIDEncryptionAlgorithmECDSAP521): + switch { + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA1): + return x509.ECDSAWithSHA1, nil + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA256): + return x509.ECDSAWithSHA256, nil + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA384): + return x509.ECDSAWithSHA384, nil + case digest.Algorithm.Equal(OIDDigestAlgorithmSHA512): + return x509.ECDSAWithSHA512, nil + default: + return -1, fmt.Errorf("pkcs7: unsupported digest %q for encryption algorithm %q", + digest.Algorithm.String(), digestEncryption.Algorithm.String()) + } + default: + return -1, fmt.Errorf("pkcs7: unsupported algorithm %q", + digestEncryption.Algorithm.String()) + } +} + +func getCertFromCertsByIssuerAndSerial(certs []*x509.Certificate, ias issuerAndSerial) *x509.Certificate { + for _, cert := range certs { + if isCertMatchForIssuerAndSerial(cert, ias) { + return cert + } + } + return nil +} + +func unmarshalAttribute(attrs []attribute, attributeType asn1.ObjectIdentifier, out interface{}) error { + for _, attr := range attrs { + if attr.Type.Equal(attributeType) { + _, err := asn1.Unmarshal(attr.Value.Bytes, out) + return err + } + } + return errors.New("pkcs7: attribute type not in attributes") +} diff --git a/builtin/credential/aws/pkcs7/verify_dsa_test.go b/builtin/credential/aws/pkcs7/verify_dsa_test.go new file mode 100644 index 0000000..857ea4d --- /dev/null +++ b/builtin/credential/aws/pkcs7/verify_dsa_test.go @@ -0,0 +1,181 @@ +//go:build go1.11 || go1.12 || go1.13 || go1.14 || go1.15 + +package pkcs7 + +import ( + "crypto/x509" + "encoding/pem" + "io/ioutil" + "os" + "os/exec" + "testing" +) + +func TestVerifyEC2(t *testing.T) { + fixture := UnmarshalDSATestFixture(EC2IdentityDocumentFixture) + p7, err := Parse(fixture.Input) + if err != nil { + t.Errorf("Parse encountered unexpected error: %v", err) + } + p7.Certificates = []*x509.Certificate{fixture.Certificate} + if err := p7.Verify(); err != nil { + t.Errorf("Verify failed with error: %v", err) + } +} + +var EC2IdentityDocumentFixture = ` +-----BEGIN PKCS7----- +MIAGCSqGSIb3DQEHAqCAMIACAQExCzAJBgUrDgMCGgUAMIAGCSqGSIb3DQEHAaCA +JIAEggGmewogICJwcml2YXRlSXAiIDogIjE3Mi4zMC4wLjI1MiIsCiAgImRldnBh +eVByb2R1Y3RDb2RlcyIgOiBudWxsLAogICJhdmFpbGFiaWxpdHlab25lIiA6ICJ1 +cy1lYXN0LTFhIiwKICAidmVyc2lvbiIgOiAiMjAxMC0wOC0zMSIsCiAgImluc3Rh +bmNlSWQiIDogImktZjc5ZmU1NmMiLAogICJiaWxsaW5nUHJvZHVjdHMiIDogbnVs +bCwKICAiaW5zdGFuY2VUeXBlIiA6ICJ0Mi5taWNybyIsCiAgImFjY291bnRJZCIg +OiAiMTIxNjU5MDE0MzM0IiwKICAiaW1hZ2VJZCIgOiAiYW1pLWZjZTNjNjk2IiwK +ICAicGVuZGluZ1RpbWUiIDogIjIwMTYtMDQtMDhUMDM6MDE6MzhaIiwKICAiYXJj +aGl0ZWN0dXJlIiA6ICJ4ODZfNjQiLAogICJrZXJuZWxJZCIgOiBudWxsLAogICJy +YW1kaXNrSWQiIDogbnVsbCwKICAicmVnaW9uIiA6ICJ1cy1lYXN0LTEiCn0AAAAA +AAAxggEYMIIBFAIBATBpMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5n +dG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2Vi +IFNlcnZpY2VzIExMQwIJAJa6SNnlXhpnMAkGBSsOAwIaBQCgXTAYBgkqhkiG9w0B +CQMxCwYJKoZIhvcNAQcBMBwGCSqGSIb3DQEJBTEPFw0xNjA0MDgwMzAxNDRaMCMG +CSqGSIb3DQEJBDEWBBTuUc28eBXmImAautC+wOjqcFCBVjAJBgcqhkjOOAQDBC8w +LQIVAKA54NxGHWWCz5InboDmY/GHs33nAhQ6O/ZI86NwjA9Vz3RNMUJrUPU5tAAA +AAAAAA== +-----END PKCS7----- +-----BEGIN CERTIFICATE----- +MIIC7TCCAq0CCQCWukjZ5V4aZzAJBgcqhkjOOAQDMFwxCzAJBgNVBAYTAlVTMRkw +FwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYD +VQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAeFw0xMjAxMDUxMjU2MTJaFw0z +ODAxMDUxMjU2MTJaMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9u +IFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNl +cnZpY2VzIExMQzCCAbcwggEsBgcqhkjOOAQBMIIBHwKBgQCjkvcS2bb1VQ4yt/5e +ih5OO6kK/n1Lzllr7D8ZwtQP8fOEpp5E2ng+D6Ud1Z1gYipr58Kj3nssSNpI6bX3 +VyIQzK7wLclnd/YozqNNmgIyZecN7EglK9ITHJLP+x8FtUpt3QbyYXJdmVMegN6P +hviYt5JH/nYl4hh3Pa1HJdskgQIVALVJ3ER11+Ko4tP6nwvHwh6+ERYRAoGBAI1j +k+tkqMVHuAFcvAGKocTgsjJem6/5qomzJuKDmbJNu9Qxw3rAotXau8Qe+MBcJl/U +hhy1KHVpCGl9fueQ2s6IL0CaO/buycU1CiYQk40KNHCcHfNiZbdlx1E9rpUp7bnF +lRa2v1ntMX3caRVDdbtPEWmdxSCYsYFDk4mZrOLBA4GEAAKBgEbmeve5f8LIE/Gf +MNmP9CM5eovQOGx5ho8WqD+aTebs+k2tn92BBPqeZqpWRa5P/+jrdKml1qx4llHW +MXrs3IgIb6+hUIB+S8dz8/mmO0bpr76RoZVCXYab2CZedFut7qc3WUH9+EUAH5mw +vSeDCOUMYQR7R9LINYwouHIziqQYMAkGByqGSM44BAMDLwAwLAIUWXBlk40xTwSw +7HX32MxXYruse9ACFBNGmdX2ZBrVNGrN9N2f6ROk0k9K +-----END CERTIFICATE-----` + +func TestDSASignWithOpenSSLAndVerify(t *testing.T) { + content := []byte(` +A ship in port is safe, +but that's not what ships are built for. +-- Grace Hopper`) + // write the content to a temp file + tmpContentFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_content") + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(tmpContentFile.Name(), content, 0o755) + + // write the signer cert to a temp file + tmpSignerCertFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_signer") + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(tmpSignerCertFile.Name(), dsaPublicCert, 0o755) + + // write the signer key to a temp file + tmpSignerKeyFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_key") + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(tmpSignerKeyFile.Name(), dsaPrivateKey, 0o755) + + tmpSignedFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_signature") + if err != nil { + t.Fatal(err) + } + // call openssl to sign the content + opensslCMD := exec.Command("openssl", "smime", "-sign", "-nodetach", "-md", "sha1", + "-in", tmpContentFile.Name(), "-out", tmpSignedFile.Name(), + "-signer", tmpSignerCertFile.Name(), "-inkey", tmpSignerKeyFile.Name(), + "-certfile", tmpSignerCertFile.Name(), "-outform", "PEM") + out, err := opensslCMD.CombinedOutput() + if err != nil { + t.Fatalf("openssl command failed with %s: %s", err, out) + } + + // verify the signed content + pemSignature, err := ioutil.ReadFile(tmpSignedFile.Name()) + if err != nil { + t.Fatal(err) + } + t.Logf("%s\n", pemSignature) + derBlock, _ := pem.Decode(pemSignature) + if derBlock == nil { + t.Fatalf("failed to read DER block from signature PEM %s", tmpSignedFile.Name()) + } + p7, err := Parse(derBlock.Bytes) + if err != nil { + t.Fatalf("Parse encountered unexpected error: %v", err) + } + if err := p7.Verify(); err != nil { + t.Fatalf("Verify failed with error: %v", err) + } + os.Remove(tmpSignerCertFile.Name()) // clean up + os.Remove(tmpSignerKeyFile.Name()) // clean up + os.Remove(tmpContentFile.Name()) // clean up +} + +var dsaPrivateKey = []byte(`-----BEGIN PRIVATE KEY----- +MIIBSwIBADCCASwGByqGSM44BAEwggEfAoGBAP1/U4EddRIpUt9KnC7s5Of2EbdS +PO9EAMMeP4C2USZpRV1AIlH7WT2NWPq/xfW6MPbLm1Vs14E7gB00b/JmYLdrmVCl +pJ+f6AR7ECLCT7up1/63xhv4O1fnxqimFQ8E+4P208UewwI1VBNaFpEy9nXzrith +1yrv8iIDGZ3RSAHHAhUAl2BQjxUjC8yykrmCouuEC/BYHPUCgYEA9+GghdabPd7L +vKtcNrhXuXmUr7v6OuqC+VdMCz0HgmdRWVeOutRZT+ZxBxCBgLRJFnEj6EwoFhO3 +zwkyjMim4TwWeotUfI0o4KOuHiuzpnWRbqN/C/ohNWLx+2J6ASQ7zKTxvqhRkImo +g9/hWuWfBpKLZl6Ae1UlZAFMO/7PSSoEFgIUfW4aPdQBn9gJZp2KuNpzgHzvfsE= +-----END PRIVATE KEY-----`) + +var dsaPublicCert = []byte(`-----BEGIN CERTIFICATE----- +MIIDOjCCAvWgAwIBAgIEPCY/UDANBglghkgBZQMEAwIFADBsMRAwDgYDVQQGEwdV +bmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYD +VQQKEwdVbmtub3duMRAwDgYDVQQLEwdVbmtub3duMRAwDgYDVQQDEwdVbmtub3du +MB4XDTE4MTAyMjEzNDMwN1oXDTQ2MDMwOTEzNDMwN1owbDEQMA4GA1UEBhMHVW5r +bm93bjEQMA4GA1UECBMHVW5rbm93bjEQMA4GA1UEBxMHVW5rbm93bjEQMA4GA1UE +ChMHVW5rbm93bjEQMA4GA1UECxMHVW5rbm93bjEQMA4GA1UEAxMHVW5rbm93bjCC +AbgwggEsBgcqhkjOOAQBMIIBHwKBgQD9f1OBHXUSKVLfSpwu7OTn9hG3UjzvRADD +Hj+AtlEmaUVdQCJR+1k9jVj6v8X1ujD2y5tVbNeBO4AdNG/yZmC3a5lQpaSfn+gE +exAiwk+7qdf+t8Yb+DtX58aophUPBPuD9tPFHsMCNVQTWhaRMvZ1864rYdcq7/Ii +Axmd0UgBxwIVAJdgUI8VIwvMspK5gqLrhAvwWBz1AoGBAPfhoIXWmz3ey7yrXDa4 +V7l5lK+7+jrqgvlXTAs9B4JnUVlXjrrUWU/mcQcQgYC0SRZxI+hMKBYTt88JMozI +puE8FnqLVHyNKOCjrh4rs6Z1kW6jfwv6ITVi8ftiegEkO8yk8b6oUZCJqIPf4Vrl +nwaSi2ZegHtVJWQBTDv+z0kqA4GFAAKBgQDCriMPbEVBoRK4SOUeFwg7+VRf4TTp +rcOQC9IVVoCjXzuWEGrp3ZI7YWJSpFnSch4lk29RH8O0HpI/NOzKnOBtnKr782pt +1k/bJVMH9EaLd6MKnAVjrCDMYBB0MhebZ8QHY2elZZCWoqDYAcIDOsEx+m4NLErT +ypPnjS5M0jm1PKMhMB8wHQYDVR0OBBYEFC0Yt5XdM0Kc95IX8NQ8XRssGPx7MA0G +CWCGSAFlAwQDAgUAAzAAMC0CFQCIgQtrZZ9hdZG1ROhR5hc8nYEmbgIUAIlgC688 +qzy/7yePTlhlpj+ahMM= +-----END CERTIFICATE-----`) + +type DSATestFixture struct { + Input []byte + Certificate *x509.Certificate +} + +func UnmarshalDSATestFixture(testPEMBlock string) DSATestFixture { + var result DSATestFixture + var derBlock *pem.Block + pemBlock := []byte(testPEMBlock) + for { + derBlock, pemBlock = pem.Decode(pemBlock) + if derBlock == nil { + break + } + switch derBlock.Type { + case "PKCS7": + result.Input = derBlock.Bytes + case "CERTIFICATE": + result.Certificate, _ = x509.ParseCertificate(derBlock.Bytes) + } + } + + return result +} diff --git a/builtin/credential/cert/backend.go b/builtin/credential/cert/backend.go new file mode 100644 index 0000000..61dd889 --- /dev/null +++ b/builtin/credential/cert/backend.go @@ -0,0 +1,173 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cert + +import ( + "context" + "crypto/x509" + "fmt" + "io" + "net/http" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/ocsp" + "github.com/hashicorp/vault/sdk/logical" +) + +const operationPrefixCert = "cert" + +func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + b := Backend() + if err := b.Setup(ctx, conf); err != nil { + return nil, err + } + return b, nil +} + +func Backend() *backend { + var b backend + b.Backend = &framework.Backend{ + Help: backendHelp, + PathsSpecial: &logical.Paths{ + Unauthenticated: []string{ + "login", + }, + }, + Paths: []*framework.Path{ + pathConfig(&b), + pathLogin(&b), + pathListCerts(&b), + pathCerts(&b), + pathListCRLs(&b), + pathCRLs(&b), + }, + AuthRenew: b.loginPathWrapper(b.pathLoginRenew), + Invalidate: b.invalidate, + BackendType: logical.TypeCredential, + InitializeFunc: b.initialize, + PeriodicFunc: b.updateCRLs, + } + + b.crlUpdateMutex = &sync.RWMutex{} + return &b +} + +type backend struct { + *framework.Backend + MapCertId *framework.PathMap + + crls map[string]CRLInfo + crlUpdateMutex *sync.RWMutex + ocspClientMutex sync.RWMutex + ocspClient *ocsp.Client + configUpdated atomic.Bool +} + +func (b *backend) initialize(ctx context.Context, req *logical.InitializationRequest) error { + bConf, err := b.Config(ctx, req.Storage) + if err != nil { + b.Logger().Error(fmt.Sprintf("failed to load backend configuration: %v", err)) + return err + } + + if bConf != nil { + b.updatedConfig(bConf) + } + + if err := b.lockThenpopulateCRLs(ctx, req.Storage); err != nil { + b.Logger().Error(fmt.Sprintf("failed to populate CRLs: %v", err)) + return err + } + + return nil +} + +func (b *backend) invalidate(_ context.Context, key string) { + switch { + case strings.HasPrefix(key, "crls/"): + b.crlUpdateMutex.Lock() + defer b.crlUpdateMutex.Unlock() + b.crls = nil + case key == "config": + b.configUpdated.Store(true) + } +} + +func (b *backend) initOCSPClient(cacheSize int) { + b.ocspClient = ocsp.New(func() hclog.Logger { + return b.Logger() + }, cacheSize) +} + +func (b *backend) updatedConfig(config *config) { + b.ocspClientMutex.Lock() + defer b.ocspClientMutex.Unlock() + b.initOCSPClient(config.OcspCacheSize) + b.configUpdated.Store(false) + return +} + +func (b *backend) fetchCRL(ctx context.Context, storage logical.Storage, name string, crl *CRLInfo) error { + response, err := http.Get(crl.CDP.Url) + if err != nil { + return err + } + if response.StatusCode == http.StatusOK { + body, err := io.ReadAll(response.Body) + if err != nil { + return err + } + certList, err := x509.ParseCRL(body) + if err != nil { + return err + } + crl.CDP.ValidUntil = certList.TBSCertList.NextUpdate + return b.setCRL(ctx, storage, certList, name, crl.CDP) + } + return fmt.Errorf("unexpected response code %d fetching CRL from %s", response.StatusCode, crl.CDP.Url) +} + +func (b *backend) updateCRLs(ctx context.Context, req *logical.Request) error { + b.crlUpdateMutex.Lock() + defer b.crlUpdateMutex.Unlock() + var errs *multierror.Error + for name, crl := range b.crls { + if crl.CDP != nil && time.Now().After(crl.CDP.ValidUntil) { + if err := b.fetchCRL(ctx, req.Storage, name, &crl); err != nil { + errs = multierror.Append(errs, err) + } + } + } + return errs.ErrorOrNil() +} + +func (b *backend) storeConfig(ctx context.Context, storage logical.Storage, config *config) error { + entry, err := logical.StorageEntryJSON("config", config) + if err != nil { + return err + } + + if err := storage.Put(ctx, entry); err != nil { + return err + } + b.updatedConfig(config) + return nil +} + +const backendHelp = ` +The "cert" credential provider allows authentication using +TLS client certificates. A client connects to Vault and uses +the "login" endpoint to generate a client token. + +Trusted certificates are configured using the "certs/" endpoint +by a user with root access. A certificate authority can be trusted, +which permits all keys signed by it. Alternatively, self-signed +certificates can be trusted avoiding the need for a CA. +` diff --git a/builtin/credential/cert/backend_test.go b/builtin/credential/cert/backend_test.go new file mode 100644 index 0000000..47d7ae0 --- /dev/null +++ b/builtin/credential/cert/backend_test.go @@ -0,0 +1,2342 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cert + +import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "io" + "io/ioutil" + "math/big" + mathrand "math/rand" + "net" + "net/http" + "net/url" + "os" + "path/filepath" + "reflect" + "testing" + "time" + + "github.com/go-test/deep" + "github.com/hashicorp/go-sockaddr" + + "golang.org/x/net/http2" + + cleanhttp "github.com/hashicorp/go-cleanhttp" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + vaulthttp "github.com/hashicorp/vault/http" + + rootcerts "github.com/hashicorp/go-rootcerts" + "github.com/hashicorp/vault/builtin/logical/pki" + logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/tokenutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" + "github.com/mitchellh/mapstructure" +) + +const ( + serverCertPath = "test-fixtures/cacert.pem" + serverKeyPath = "test-fixtures/cakey.pem" + serverCAPath = serverCertPath + + testRootCACertPath1 = "test-fixtures/testcacert1.pem" + testRootCAKeyPath1 = "test-fixtures/testcakey1.pem" + testCertPath1 = "test-fixtures/testissuedcert4.pem" + testKeyPath1 = "test-fixtures/testissuedkey4.pem" + testIssuedCertCRL = "test-fixtures/issuedcertcrl" + + testRootCACertPath2 = "test-fixtures/testcacert2.pem" + testRootCAKeyPath2 = "test-fixtures/testcakey2.pem" + testRootCertCRL = "test-fixtures/cacert2crl" +) + +func generateTestCertAndConnState(t *testing.T, template *x509.Certificate) (string, tls.ConnectionState, error) { + t.Helper() + tempDir, err := ioutil.TempDir("", "vault-cert-auth-test-") + if err != nil { + t.Fatal(err) + } + t.Logf("test %s, temp dir %s", t.Name(), tempDir) + caCertTemplate := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "localhost", + }, + DNSNames: []string{"localhost"}, + IPAddresses: []net.IP{net.ParseIP("127.0.0.1")}, + KeyUsage: x509.KeyUsage(x509.KeyUsageCertSign | x509.KeyUsageCRLSign), + SerialNumber: big.NewInt(mathrand.Int63()), + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(262980 * time.Hour), + BasicConstraintsValid: true, + IsCA: true, + } + caKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatal(err) + } + caBytes, err := x509.CreateCertificate(rand.Reader, caCertTemplate, caCertTemplate, caKey.Public(), caKey) + if err != nil { + t.Fatal(err) + } + caCert, err := x509.ParseCertificate(caBytes) + if err != nil { + t.Fatal(err) + } + caCertPEMBlock := &pem.Block{ + Type: "CERTIFICATE", + Bytes: caBytes, + } + err = ioutil.WriteFile(filepath.Join(tempDir, "ca_cert.pem"), pem.EncodeToMemory(caCertPEMBlock), 0o755) + if err != nil { + t.Fatal(err) + } + marshaledCAKey, err := x509.MarshalECPrivateKey(caKey) + if err != nil { + t.Fatal(err) + } + caKeyPEMBlock := &pem.Block{ + Type: "EC PRIVATE KEY", + Bytes: marshaledCAKey, + } + err = ioutil.WriteFile(filepath.Join(tempDir, "ca_key.pem"), pem.EncodeToMemory(caKeyPEMBlock), 0o755) + if err != nil { + t.Fatal(err) + } + + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatal(err) + } + certBytes, err := x509.CreateCertificate(rand.Reader, template, caCert, key.Public(), caKey) + if err != nil { + t.Fatal(err) + } + certPEMBlock := &pem.Block{ + Type: "CERTIFICATE", + Bytes: certBytes, + } + err = ioutil.WriteFile(filepath.Join(tempDir, "cert.pem"), pem.EncodeToMemory(certPEMBlock), 0o755) + if err != nil { + t.Fatal(err) + } + marshaledKey, err := x509.MarshalECPrivateKey(key) + if err != nil { + t.Fatal(err) + } + keyPEMBlock := &pem.Block{ + Type: "EC PRIVATE KEY", + Bytes: marshaledKey, + } + err = ioutil.WriteFile(filepath.Join(tempDir, "key.pem"), pem.EncodeToMemory(keyPEMBlock), 0o755) + if err != nil { + t.Fatal(err) + } + connInfo, err := testConnState(filepath.Join(tempDir, "cert.pem"), filepath.Join(tempDir, "key.pem"), filepath.Join(tempDir, "ca_cert.pem")) + return tempDir, connInfo, err +} + +// Unlike testConnState, this method does not use the same 'tls.Config' objects for +// both dialing and listening. Instead, it runs the server without specifying its CA. +// But the client, presents the CA cert of the server to trust the server. +// The client can present a cert and key which is completely independent of server's CA. +// The connection state returned will contain the certificate presented by the client. +func connectionState(serverCAPath, serverCertPath, serverKeyPath, clientCertPath, clientKeyPath string) (tls.ConnectionState, error) { + serverKeyPair, err := tls.LoadX509KeyPair(serverCertPath, serverKeyPath) + if err != nil { + return tls.ConnectionState{}, err + } + // Prepare the listener configuration with server's key pair + listenConf := &tls.Config{ + Certificates: []tls.Certificate{serverKeyPair}, + ClientAuth: tls.RequestClientCert, + } + + clientKeyPair, err := tls.LoadX509KeyPair(clientCertPath, clientKeyPath) + if err != nil { + return tls.ConnectionState{}, err + } + // Load the CA cert required by the client to authenticate the server. + rootConfig := &rootcerts.Config{ + CAFile: serverCAPath, + } + serverCAs, err := rootcerts.LoadCACerts(rootConfig) + if err != nil { + return tls.ConnectionState{}, err + } + // Prepare the dial configuration that the client uses to establish the connection. + dialConf := &tls.Config{ + Certificates: []tls.Certificate{clientKeyPair}, + RootCAs: serverCAs, + } + + // Start the server. + list, err := tls.Listen("tcp", "127.0.0.1:0", listenConf) + if err != nil { + return tls.ConnectionState{}, err + } + defer list.Close() + + // Accept connections. + serverErrors := make(chan error, 1) + connState := make(chan tls.ConnectionState) + go func() { + defer close(connState) + serverConn, err := list.Accept() + if err != nil { + serverErrors <- err + close(serverErrors) + return + } + defer serverConn.Close() + + // Read the ping + buf := make([]byte, 4) + _, err = serverConn.Read(buf) + if (err != nil) && (err != io.EOF) { + serverErrors <- err + close(serverErrors) + return + } + close(serverErrors) + connState <- serverConn.(*tls.Conn).ConnectionState() + }() + + // Establish a connection from the client side and write a few bytes. + clientErrors := make(chan error, 1) + go func() { + addr := list.Addr().String() + conn, err := tls.Dial("tcp", addr, dialConf) + if err != nil { + clientErrors <- err + close(clientErrors) + return + } + defer conn.Close() + + // Write ping + _, err = conn.Write([]byte("ping")) + if err != nil { + clientErrors <- err + } + close(clientErrors) + }() + + for err = range clientErrors { + if err != nil { + return tls.ConnectionState{}, fmt.Errorf("error in client goroutine:%v", err) + } + } + + for err = range serverErrors { + if err != nil { + return tls.ConnectionState{}, fmt.Errorf("error in server goroutine:%v", err) + } + } + // Grab the current state + return <-connState, nil +} + +func TestBackend_PermittedDNSDomainsIntermediateCA(t *testing.T) { + // Enable PKI secret engine and Cert auth method + coreConfig := &vault.CoreConfig{ + DisableMlock: true, + DisableCache: true, + Logger: log.NewNullLogger(), + CredentialBackends: map[string]logical.Factory{ + "cert": Factory, + }, + LogicalBackends: map[string]logical.Factory{ + "pki": pki.Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + cores := cluster.Cores + vault.TestWaitActive(t, cores[0].Core) + client := cores[0].Client + + var err error + + // Mount /pki as a root CA + err = client.Sys().Mount("pki", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "32h", + }, + }) + if err != nil { + t.Fatal(err) + } + + // Set the cluster's certificate as the root CA in /pki + pemBundleRootCA := string(cluster.CACertPEM) + string(cluster.CAKeyPEM) + _, err = client.Logical().Write("pki/config/ca", map[string]interface{}{ + "pem_bundle": pemBundleRootCA, + }) + if err != nil { + t.Fatal(err) + } + + // Mount /pki2 to operate as an intermediate CA + err = client.Sys().Mount("pki2", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "32h", + }, + }) + if err != nil { + t.Fatal(err) + } + + // Create a CSR for the intermediate CA + secret, err := client.Logical().Write("pki2/intermediate/generate/internal", nil) + if err != nil { + t.Fatal(err) + } + intermediateCSR := secret.Data["csr"].(string) + + // Sign the intermediate CSR using /pki + secret, err = client.Logical().Write("pki/root/sign-intermediate", map[string]interface{}{ + "permitted_dns_domains": ".myvault.com", + "csr": intermediateCSR, + }) + if err != nil { + t.Fatal(err) + } + intermediateCertPEM := secret.Data["certificate"].(string) + + // Configure the intermediate cert as the CA in /pki2 + _, err = client.Logical().Write("pki2/intermediate/set-signed", map[string]interface{}{ + "certificate": intermediateCertPEM, + }) + if err != nil { + t.Fatal(err) + } + + // Create a role on the intermediate CA mount + _, err = client.Logical().Write("pki2/roles/myvault-dot-com", map[string]interface{}{ + "allowed_domains": "myvault.com", + "allow_subdomains": "true", + "max_ttl": "5m", + }) + if err != nil { + t.Fatal(err) + } + + // Issue a leaf cert using the intermediate CA + secret, err = client.Logical().Write("pki2/issue/myvault-dot-com", map[string]interface{}{ + "common_name": "cert.myvault.com", + "format": "pem", + "ip_sans": "127.0.0.1", + }) + if err != nil { + t.Fatal(err) + } + leafCertPEM := secret.Data["certificate"].(string) + leafCertKeyPEM := secret.Data["private_key"].(string) + + // Enable the cert auth method + err = client.Sys().EnableAuthWithOptions("cert", &api.EnableAuthOptions{ + Type: "cert", + }) + if err != nil { + t.Fatal(err) + } + + // Set the intermediate CA cert as a trusted certificate in the backend + _, err = client.Logical().Write("auth/cert/certs/myvault-dot-com", map[string]interface{}{ + "display_name": "myvault.com", + "policies": "default", + "certificate": intermediateCertPEM, + }) + if err != nil { + t.Fatal(err) + } + + // Create temporary files for CA cert, client cert and client cert key. + // This is used to configure TLS in the api client. + caCertFile, err := ioutil.TempFile("", "caCert") + if err != nil { + t.Fatal(err) + } + defer os.Remove(caCertFile.Name()) + if _, err := caCertFile.Write([]byte(cluster.CACertPEM)); err != nil { + t.Fatal(err) + } + if err := caCertFile.Close(); err != nil { + t.Fatal(err) + } + + leafCertFile, err := ioutil.TempFile("", "leafCert") + if err != nil { + t.Fatal(err) + } + defer os.Remove(leafCertFile.Name()) + if _, err := leafCertFile.Write([]byte(leafCertPEM)); err != nil { + t.Fatal(err) + } + if err := leafCertFile.Close(); err != nil { + t.Fatal(err) + } + + leafCertKeyFile, err := ioutil.TempFile("", "leafCertKey") + if err != nil { + t.Fatal(err) + } + defer os.Remove(leafCertKeyFile.Name()) + if _, err := leafCertKeyFile.Write([]byte(leafCertKeyPEM)); err != nil { + t.Fatal(err) + } + if err := leafCertKeyFile.Close(); err != nil { + t.Fatal(err) + } + + // This function is a copy-pasta from the NewTestCluster, with the + // modification to reconfigure the TLS on the api client with the leaf + // certificate generated above. + getAPIClient := func(port int, tlsConfig *tls.Config) *api.Client { + transport := cleanhttp.DefaultPooledTransport() + transport.TLSClientConfig = tlsConfig.Clone() + if err := http2.ConfigureTransport(transport); err != nil { + t.Fatal(err) + } + client := &http.Client{ + Transport: transport, + CheckRedirect: func(*http.Request, []*http.Request) error { + // This can of course be overridden per-test by using its own client + return fmt.Errorf("redirects not allowed in these tests") + }, + } + config := api.DefaultConfig() + if config.Error != nil { + t.Fatal(config.Error) + } + config.Address = fmt.Sprintf("https://127.0.0.1:%d", port) + config.HttpClient = client + + // Set the above issued certificates as the client certificates + config.ConfigureTLS(&api.TLSConfig{ + CACert: caCertFile.Name(), + ClientCert: leafCertFile.Name(), + ClientKey: leafCertKeyFile.Name(), + }) + + apiClient, err := api.NewClient(config) + if err != nil { + t.Fatal(err) + } + return apiClient + } + + // Create a new api client with the desired TLS configuration + newClient := getAPIClient(cores[0].Listeners[0].Address.Port, cores[0].TLSConfig()) + + secret, err = newClient.Logical().Write("auth/cert/login", map[string]interface{}{ + "name": "myvault-dot-com", + }) + if err != nil { + t.Fatal(err) + } + if secret.Auth == nil || secret.Auth.ClientToken == "" { + t.Fatalf("expected a successful authentication") + } + + // testing pathLoginRenew for cert auth + oldAccessor := secret.Auth.Accessor + newClient.SetToken(client.Token()) + secret, err = newClient.Logical().Write("auth/token/renew-accessor", map[string]interface{}{ + "accessor": secret.Auth.Accessor, + "increment": 3600, + }) + if err != nil { + t.Fatal(err) + } + + if secret.Auth == nil || secret.Auth.ClientToken != "" || secret.Auth.LeaseDuration != 3600 || secret.Auth.Accessor != oldAccessor { + t.Fatalf("unexpected accessor renewal") + } +} + +func TestBackend_MetadataBasedACLPolicy(t *testing.T) { + // Start cluster with cert auth method enabled + coreConfig := &vault.CoreConfig{ + DisableMlock: true, + DisableCache: true, + Logger: log.NewNullLogger(), + CredentialBackends: map[string]logical.Factory{ + "cert": Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + cores := cluster.Cores + vault.TestWaitActive(t, cores[0].Core) + client := cores[0].Client + + var err error + + // Enable the cert auth method + err = client.Sys().EnableAuthWithOptions("cert", &api.EnableAuthOptions{ + Type: "cert", + }) + if err != nil { + t.Fatal(err) + } + + // Enable metadata in aliases + _, err = client.Logical().Write("auth/cert/config", map[string]interface{}{ + "enable_identity_alias_metadata": true, + }) + if err != nil { + t.Fatal(err) + } + + // Retrieve its accessor id + auths, err := client.Sys().ListAuth() + if err != nil { + t.Fatal(err) + } + + var accessor string + + for _, auth := range auths { + if auth.Type == "cert" { + accessor = auth.Accessor + } + } + + if accessor == "" { + t.Fatal("failed to find cert auth accessor") + } + + // Write ACL policy + err = client.Sys().PutPolicy("metadata-based", fmt.Sprintf(` +path "kv/cn/{{identity.entity.aliases.%s.metadata.common_name}}" { + capabilities = ["read"] +} +path "kv/ext/{{identity.entity.aliases.%s.metadata.2-1-1-1}}" { + capabilities = ["read"] +} +`, accessor, accessor)) + if err != nil { + t.Fatalf("err: %v", err) + } + + ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem") + if err != nil { + t.Fatalf("err: %v", err) + } + + // Set the trusted certificate in the backend + _, err = client.Logical().Write("auth/cert/certs/test", map[string]interface{}{ + "display_name": "test", + "policies": "metadata-based", + "certificate": string(ca), + "allowed_metadata_extensions": "2.1.1.1,1.2.3.45", + }) + if err != nil { + t.Fatal(err) + } + + // This function is a copy-paste from the NewTestCluster, with the + // modification to reconfigure the TLS on the api client with a + // specific client certificate. + getAPIClient := func(port int, tlsConfig *tls.Config) *api.Client { + transport := cleanhttp.DefaultPooledTransport() + transport.TLSClientConfig = tlsConfig.Clone() + if err := http2.ConfigureTransport(transport); err != nil { + t.Fatal(err) + } + client := &http.Client{ + Transport: transport, + CheckRedirect: func(*http.Request, []*http.Request) error { + // This can of course be overridden per-test by using its own client + return fmt.Errorf("redirects not allowed in these tests") + }, + } + config := api.DefaultConfig() + if config.Error != nil { + t.Fatal(config.Error) + } + config.Address = fmt.Sprintf("https://127.0.0.1:%d", port) + config.HttpClient = client + + // Set the client certificates + config.ConfigureTLS(&api.TLSConfig{ + CACertBytes: cluster.CACertPEM, + ClientCert: "test-fixtures/root/rootcawextcert.pem", + ClientKey: "test-fixtures/root/rootcawextkey.pem", + }) + + apiClient, err := api.NewClient(config) + if err != nil { + t.Fatal(err) + } + return apiClient + } + + // Create a new api client with the desired TLS configuration + newClient := getAPIClient(cores[0].Listeners[0].Address.Port, cores[0].TLSConfig()) + + var secret *api.Secret + + secret, err = newClient.Logical().Write("auth/cert/login", map[string]interface{}{ + "name": "test", + }) + if err != nil { + t.Fatal(err) + } + if secret.Auth == nil || secret.Auth.ClientToken == "" { + t.Fatalf("expected a successful authentication") + } + + // Check paths guarded by ACL policy + newClient.SetToken(secret.Auth.ClientToken) + + _, err = newClient.Logical().Read("kv/cn/example.com") + if err != nil { + t.Fatal(err) + } + + _, err = newClient.Logical().Read("kv/cn/not.example.com") + if err == nil { + t.Fatal("expected access denied") + } + + _, err = newClient.Logical().Read("kv/ext/A UTF8String Extension") + if err != nil { + t.Fatal(err) + } + + _, err = newClient.Logical().Read("kv/ext/bar") + if err == nil { + t.Fatal("expected access denied") + } +} + +func TestBackend_NonCAExpiry(t *testing.T) { + var resp *logical.Response + var err error + + // Create a self-signed certificate and issue a leaf certificate using the + // CA cert + template := &x509.Certificate{ + SerialNumber: big.NewInt(1234), + Subject: pkix.Name{ + CommonName: "localhost", + Organization: []string{"hashicorp"}, + OrganizationalUnit: []string{"vault"}, + }, + BasicConstraintsValid: true, + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(50 * time.Second), + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, + KeyUsage: x509.KeyUsage(x509.KeyUsageCertSign | x509.KeyUsageCRLSign), + } + + // Set IP SAN + parsedIP := net.ParseIP("127.0.0.1") + if parsedIP == nil { + t.Fatalf("failed to create parsed IP") + } + template.IPAddresses = []net.IP{parsedIP} + + // Private key for CA cert + caPrivateKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + t.Fatal(err) + } + + // Marshalling to be able to create PEM file + caPrivateKeyBytes := x509.MarshalPKCS1PrivateKey(caPrivateKey) + + caPublicKey := &caPrivateKey.PublicKey + + template.IsCA = true + + caCertBytes, err := x509.CreateCertificate(rand.Reader, template, template, caPublicKey, caPrivateKey) + if err != nil { + t.Fatal(err) + } + + caCert, err := x509.ParseCertificate(caCertBytes) + if err != nil { + t.Fatal(err) + } + + parsedCaBundle := &certutil.ParsedCertBundle{ + Certificate: caCert, + CertificateBytes: caCertBytes, + PrivateKeyBytes: caPrivateKeyBytes, + PrivateKeyType: certutil.RSAPrivateKey, + } + + caCertBundle, err := parsedCaBundle.ToCertBundle() + if err != nil { + t.Fatal(err) + } + + caCertFile, err := ioutil.TempFile("", "caCert") + if err != nil { + t.Fatal(err) + } + + defer os.Remove(caCertFile.Name()) + + if _, err := caCertFile.Write([]byte(caCertBundle.Certificate)); err != nil { + t.Fatal(err) + } + if err := caCertFile.Close(); err != nil { + t.Fatal(err) + } + + caKeyFile, err := ioutil.TempFile("", "caKey") + if err != nil { + t.Fatal(err) + } + + defer os.Remove(caKeyFile.Name()) + + if _, err := caKeyFile.Write([]byte(caCertBundle.PrivateKey)); err != nil { + t.Fatal(err) + } + if err := caKeyFile.Close(); err != nil { + t.Fatal(err) + } + + // Prepare template for non-CA cert + + template.IsCA = false + template.SerialNumber = big.NewInt(5678) + + template.KeyUsage = x509.KeyUsage(x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign) + issuedPrivateKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + t.Fatal(err) + } + + issuedPrivateKeyBytes := x509.MarshalPKCS1PrivateKey(issuedPrivateKey) + + issuedPublicKey := &issuedPrivateKey.PublicKey + + // Keep a short certificate lifetime so logins can be tested both when + // cert is valid and when it gets expired + template.NotBefore = time.Now().Add(-2 * time.Second) + template.NotAfter = time.Now().Add(3 * time.Second) + + issuedCertBytes, err := x509.CreateCertificate(rand.Reader, template, caCert, issuedPublicKey, caPrivateKey) + if err != nil { + t.Fatal(err) + } + + issuedCert, err := x509.ParseCertificate(issuedCertBytes) + if err != nil { + t.Fatal(err) + } + + parsedIssuedBundle := &certutil.ParsedCertBundle{ + Certificate: issuedCert, + CertificateBytes: issuedCertBytes, + PrivateKeyBytes: issuedPrivateKeyBytes, + PrivateKeyType: certutil.RSAPrivateKey, + } + + issuedCertBundle, err := parsedIssuedBundle.ToCertBundle() + if err != nil { + t.Fatal(err) + } + + issuedCertFile, err := ioutil.TempFile("", "issuedCert") + if err != nil { + t.Fatal(err) + } + + defer os.Remove(issuedCertFile.Name()) + + if _, err := issuedCertFile.Write([]byte(issuedCertBundle.Certificate)); err != nil { + t.Fatal(err) + } + if err := issuedCertFile.Close(); err != nil { + t.Fatal(err) + } + + issuedKeyFile, err := ioutil.TempFile("", "issuedKey") + if err != nil { + t.Fatal(err) + } + + defer os.Remove(issuedKeyFile.Name()) + + if _, err := issuedKeyFile.Write([]byte(issuedCertBundle.PrivateKey)); err != nil { + t.Fatal(err) + } + if err := issuedKeyFile.Close(); err != nil { + t.Fatal(err) + } + + config := logical.TestBackendConfig() + storage := &logical.InmemStorage{} + config.StorageView = storage + + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + // Register the Non-CA certificate of the client key pair + certData := map[string]interface{}{ + "certificate": issuedCertBundle.Certificate, + "policies": "abc", + "display_name": "cert1", + "ttl": 10000, + } + certReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "certs/cert1", + Storage: storage, + Data: certData, + } + + resp, err = b.HandleRequest(context.Background(), certReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + // Create connection state using the certificates generated + connState, err := connectionState(caCertFile.Name(), caCertFile.Name(), caKeyFile.Name(), issuedCertFile.Name(), issuedKeyFile.Name()) + if err != nil { + t.Fatalf("error testing connection state:%v", err) + } + + loginReq := &logical.Request{ + Operation: logical.UpdateOperation, + Storage: storage, + Path: "login", + Connection: &logical.Connection{ + ConnState: &connState, + }, + } + + // Login when the certificate is still valid. Login should succeed. + resp, err = b.HandleRequest(context.Background(), loginReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + // Wait until the certificate expires + time.Sleep(5 * time.Second) + + // Login attempt after certificate expiry should fail + resp, err = b.HandleRequest(context.Background(), loginReq) + if err == nil { + t.Fatalf("expected error due to expired certificate") + } +} + +func TestBackend_RegisteredNonCA_CRL(t *testing.T) { + config := logical.TestBackendConfig() + storage := &logical.InmemStorage{} + config.StorageView = storage + + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + nonCACert, err := ioutil.ReadFile(testCertPath1) + if err != nil { + t.Fatal(err) + } + + // Register the Non-CA certificate of the client key pair + certData := map[string]interface{}{ + "certificate": nonCACert, + "policies": "abc", + "display_name": "cert1", + "ttl": 10000, + } + certReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "certs/cert1", + Storage: storage, + Data: certData, + } + + resp, err := b.HandleRequest(context.Background(), certReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + // Connection state is presenting the client Non-CA cert and its key. + // This is exactly what is registered at the backend. + connState, err := connectionState(serverCAPath, serverCertPath, serverKeyPath, testCertPath1, testKeyPath1) + if err != nil { + t.Fatalf("error testing connection state:%v", err) + } + loginReq := &logical.Request{ + Operation: logical.UpdateOperation, + Storage: storage, + Path: "login", + Connection: &logical.Connection{ + ConnState: &connState, + }, + } + // Login should succeed. + resp, err = b.HandleRequest(context.Background(), loginReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + // Register a CRL containing the issued client certificate used above. + issuedCRL, err := ioutil.ReadFile(testIssuedCertCRL) + if err != nil { + t.Fatal(err) + } + crlData := map[string]interface{}{ + "crl": issuedCRL, + } + crlReq := &logical.Request{ + Operation: logical.UpdateOperation, + Storage: storage, + Path: "crls/issuedcrl", + Data: crlData, + } + resp, err = b.HandleRequest(context.Background(), crlReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + // Ensure the CRL shows up on a list. + listReq := &logical.Request{ + Operation: logical.ListOperation, + Storage: storage, + Path: "crls", + Data: map[string]interface{}{}, + } + resp, err = b.HandleRequest(context.Background(), listReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + if len(resp.Data) != 1 || len(resp.Data["keys"].([]string)) != 1 || resp.Data["keys"].([]string)[0] != "issuedcrl" { + t.Fatalf("bad listing: resp:%v", resp) + } + + // Attempt login with the same connection state but with the CRL registered + resp, err = b.HandleRequest(context.Background(), loginReq) + if err != nil { + t.Fatal(err) + } + if resp == nil || !resp.IsError() { + t.Fatalf("expected failure due to revoked certificate") + } +} + +func TestBackend_CRLs(t *testing.T) { + config := logical.TestBackendConfig() + storage := &logical.InmemStorage{} + config.StorageView = storage + + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + clientCA1, err := ioutil.ReadFile(testRootCACertPath1) + if err != nil { + t.Fatal(err) + } + // Register the CA certificate of the client key pair + certData := map[string]interface{}{ + "certificate": clientCA1, + "policies": "abc", + "display_name": "cert1", + "ttl": 10000, + } + + certReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "certs/cert1", + Storage: storage, + Data: certData, + } + + resp, err := b.HandleRequest(context.Background(), certReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + // Connection state is presenting the client CA cert and its key. + // This is exactly what is registered at the backend. + connState, err := connectionState(serverCAPath, serverCertPath, serverKeyPath, testRootCACertPath1, testRootCAKeyPath1) + if err != nil { + t.Fatalf("error testing connection state:%v", err) + } + loginReq := &logical.Request{ + Operation: logical.UpdateOperation, + Storage: storage, + Path: "login", + Connection: &logical.Connection{ + ConnState: &connState, + }, + } + resp, err = b.HandleRequest(context.Background(), loginReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + // Now, without changing the registered client CA cert, present from + // the client side, a cert issued using the registered CA. + connState, err = connectionState(serverCAPath, serverCertPath, serverKeyPath, testCertPath1, testKeyPath1) + if err != nil { + t.Fatalf("error testing connection state: %v", err) + } + loginReq.Connection.ConnState = &connState + + // Attempt login with the updated connection + resp, err = b.HandleRequest(context.Background(), loginReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + // Register a CRL containing the issued client certificate used above. + issuedCRL, err := ioutil.ReadFile(testIssuedCertCRL) + if err != nil { + t.Fatal(err) + } + crlData := map[string]interface{}{ + "crl": issuedCRL, + } + + crlReq := &logical.Request{ + Operation: logical.UpdateOperation, + Storage: storage, + Path: "crls/issuedcrl", + Data: crlData, + } + resp, err = b.HandleRequest(context.Background(), crlReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + // Attempt login with the revoked certificate. + resp, err = b.HandleRequest(context.Background(), loginReq) + if err != nil { + t.Fatal(err) + } + if resp == nil || !resp.IsError() { + t.Fatalf("expected failure due to revoked certificate") + } + + // Register a different client CA certificate. + clientCA2, err := ioutil.ReadFile(testRootCACertPath2) + if err != nil { + t.Fatal(err) + } + certData["certificate"] = clientCA2 + resp, err = b.HandleRequest(context.Background(), certReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + // Test login using a different client CA cert pair. + connState, err = connectionState(serverCAPath, serverCertPath, serverKeyPath, testRootCACertPath2, testRootCAKeyPath2) + if err != nil { + t.Fatalf("error testing connection state: %v", err) + } + loginReq.Connection.ConnState = &connState + + // Attempt login with the updated connection + resp, err = b.HandleRequest(context.Background(), loginReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + // Register a CRL containing the root CA certificate used above. + rootCRL, err := ioutil.ReadFile(testRootCertCRL) + if err != nil { + t.Fatal(err) + } + crlData["crl"] = rootCRL + resp, err = b.HandleRequest(context.Background(), crlReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + // Attempt login with the same connection state but with the CRL registered + resp, err = b.HandleRequest(context.Background(), loginReq) + if err != nil { + t.Fatal(err) + } + if resp == nil || !resp.IsError() { + t.Fatalf("expected failure due to revoked certificate") + } +} + +func testFactory(t *testing.T) logical.Backend { + storage := &logical.InmemStorage{} + b, err := Factory(context.Background(), &logical.BackendConfig{ + System: &logical.StaticSystemView{ + DefaultLeaseTTLVal: 1000 * time.Second, + MaxLeaseTTLVal: 1800 * time.Second, + }, + StorageView: storage, + }) + if err != nil { + t.Fatalf("error: %s", err) + } + if err := b.Initialize(context.Background(), &logical.InitializationRequest{ + Storage: storage, + }); err != nil { + t.Fatalf("error: %s", err) + } + return b +} + +// Test the certificates being registered to the backend +func TestBackend_CertWrites(t *testing.T) { + // CA cert + ca1, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem") + if err != nil { + t.Fatalf("err: %v", err) + } + // Non CA Cert + ca2, err := ioutil.ReadFile("test-fixtures/keys/cert.pem") + if err != nil { + t.Fatalf("err: %v", err) + } + // Non CA cert without TLS web client authentication + ca3, err := ioutil.ReadFile("test-fixtures/noclientauthcert.pem") + if err != nil { + t.Fatalf("err: %v", err) + } + + tc := logicaltest.TestCase{ + CredentialBackend: testFactory(t), + Steps: []logicaltest.TestStep{ + testAccStepCert(t, "aaa", ca1, "foo", allowed{}, false), + testAccStepCert(t, "bbb", ca2, "foo", allowed{}, false), + testAccStepCert(t, "ccc", ca3, "foo", allowed{}, true), + }, + } + tc.Steps = append(tc.Steps, testAccStepListCerts(t, []string{"aaa", "bbb"})...) + logicaltest.Test(t, tc) +} + +// Test a client trusted by a CA +func TestBackend_basic_CA(t *testing.T) { + connState, err := testConnState("test-fixtures/keys/cert.pem", + "test-fixtures/keys/key.pem", "test-fixtures/root/rootcacert.pem") + if err != nil { + t.Fatalf("error testing connection state: %v", err) + } + ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem") + if err != nil { + t.Fatalf("err: %v", err) + } + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: testFactory(t), + Steps: []logicaltest.TestStep{ + testAccStepCert(t, "web", ca, "foo", allowed{}, false), + testAccStepLogin(t, connState), + testAccStepCertLease(t, "web", ca, "foo"), + testAccStepCertTTL(t, "web", ca, "foo"), + testAccStepLogin(t, connState), + testAccStepCertMaxTTL(t, "web", ca, "foo"), + testAccStepLogin(t, connState), + testAccStepCertNoLease(t, "web", ca, "foo"), + testAccStepLoginDefaultLease(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{names: "*.example.com"}, false), + testAccStepLogin(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{names: "*.invalid.com"}, false), + testAccStepLoginInvalid(t, connState), + }, + }) +} + +// Test CRL behavior +func TestBackend_Basic_CRLs(t *testing.T) { + connState, err := testConnState("test-fixtures/keys/cert.pem", + "test-fixtures/keys/key.pem", "test-fixtures/root/rootcacert.pem") + if err != nil { + t.Fatalf("error testing connection state: %v", err) + } + ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem") + if err != nil { + t.Fatalf("err: %v", err) + } + crl, err := ioutil.ReadFile("test-fixtures/root/root.crl") + if err != nil { + t.Fatalf("err: %v", err) + } + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: testFactory(t), + Steps: []logicaltest.TestStep{ + testAccStepCertNoLease(t, "web", ca, "foo"), + testAccStepLoginDefaultLease(t, connState), + testAccStepAddCRL(t, crl, connState), + testAccStepReadCRL(t, connState), + testAccStepLoginInvalid(t, connState), + testAccStepDeleteCRL(t, connState), + testAccStepLoginDefaultLease(t, connState), + }, + }) +} + +// Test a self-signed client (root CA) that is trusted +func TestBackend_basic_singleCert(t *testing.T) { + connState, err := testConnState("test-fixtures/root/rootcacert.pem", + "test-fixtures/root/rootcakey.pem", "test-fixtures/root/rootcacert.pem") + if err != nil { + t.Fatalf("error testing connection state: %v", err) + } + ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem") + if err != nil { + t.Fatalf("err: %v", err) + } + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: testFactory(t), + Steps: []logicaltest.TestStep{ + testAccStepCert(t, "web", ca, "foo", allowed{}, false), + testAccStepLogin(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{names: "example.com"}, false), + testAccStepLogin(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{names: "invalid"}, false), + testAccStepLoginInvalid(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{ext: "1.2.3.4:invalid"}, false), + testAccStepLoginInvalid(t, connState), + }, + }) +} + +func TestBackend_common_name_singleCert(t *testing.T) { + connState, err := testConnState("test-fixtures/root/rootcacert.pem", + "test-fixtures/root/rootcakey.pem", "test-fixtures/root/rootcacert.pem") + if err != nil { + t.Fatalf("error testing connection state: %v", err) + } + ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem") + if err != nil { + t.Fatalf("err: %v", err) + } + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: testFactory(t), + Steps: []logicaltest.TestStep{ + testAccStepCert(t, "web", ca, "foo", allowed{}, false), + testAccStepLogin(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{common_names: "example.com"}, false), + testAccStepLogin(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{common_names: "invalid"}, false), + testAccStepLoginInvalid(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{ext: "1.2.3.4:invalid"}, false), + testAccStepLoginInvalid(t, connState), + }, + }) +} + +// Test a self-signed client with custom ext (root CA) that is trusted +func TestBackend_ext_singleCert(t *testing.T) { + connState, err := testConnState( + "test-fixtures/root/rootcawextcert.pem", + "test-fixtures/root/rootcawextkey.pem", + "test-fixtures/root/rootcacert.pem", + ) + if err != nil { + t.Fatalf("error testing connection state: %v", err) + } + ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem") + if err != nil { + t.Fatalf("err: %v", err) + } + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: testFactory(t), + Steps: []logicaltest.TestStep{ + testAccStepCert(t, "web", ca, "foo", allowed{ext: "2.1.1.1:A UTF8String Extension"}, false), + testAccStepLogin(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{ext: "2.1.1.1:*,2.1.1.2:A UTF8*"}, false), + testAccStepLogin(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{ext: "1.2.3.45:*"}, false), + testAccStepLoginInvalid(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{ext: "2.1.1.1:The Wrong Value"}, false), + testAccStepLoginInvalid(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{ext: "2.1.1.1:*,2.1.1.2:The Wrong Value"}, false), + testAccStepLoginInvalid(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{ext: "2.1.1.1:"}, false), + testAccStepLoginInvalid(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{ext: "2.1.1.1:,2.1.1.2:*"}, false), + testAccStepLoginInvalid(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{names: "example.com", ext: "2.1.1.1:A UTF8String Extension"}, false), + testAccStepLogin(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{names: "example.com", ext: "2.1.1.1:*,2.1.1.2:A UTF8*"}, false), + testAccStepLogin(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{names: "example.com", ext: "1.2.3.45:*"}, false), + testAccStepLoginInvalid(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{names: "example.com", ext: "2.1.1.1:The Wrong Value"}, false), + testAccStepLoginInvalid(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{names: "example.com", ext: "2.1.1.1:*,2.1.1.2:The Wrong Value"}, false), + testAccStepLoginInvalid(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{names: "invalid", ext: "2.1.1.1:A UTF8String Extension"}, false), + testAccStepLoginInvalid(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{names: "invalid", ext: "2.1.1.1:*,2.1.1.2:A UTF8*"}, false), + testAccStepLoginInvalid(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{names: "invalid", ext: "1.2.3.45:*"}, false), + testAccStepLoginInvalid(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{names: "invalid", ext: "2.1.1.1:The Wrong Value"}, false), + testAccStepLoginInvalid(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{names: "invalid", ext: "2.1.1.1:*,2.1.1.2:The Wrong Value"}, false), + testAccStepLoginInvalid(t, connState), + testAccStepReadConfig(t, config{EnableIdentityAliasMetadata: false}, connState), + testAccStepCert(t, "web", ca, "foo", allowed{metadata_ext: "2.1.1.1,1.2.3.45"}, false), + testAccStepLoginWithMetadata(t, connState, "web", map[string]string{"2-1-1-1": "A UTF8String Extension"}, false), + testAccStepCert(t, "web", ca, "foo", allowed{metadata_ext: "1.2.3.45"}, false), + testAccStepLoginWithMetadata(t, connState, "web", map[string]string{}, false), + testAccStepSetConfig(t, config{EnableIdentityAliasMetadata: true}, connState), + testAccStepReadConfig(t, config{EnableIdentityAliasMetadata: true}, connState), + testAccStepCert(t, "web", ca, "foo", allowed{metadata_ext: "2.1.1.1,1.2.3.45"}, false), + testAccStepLoginWithMetadata(t, connState, "web", map[string]string{"2-1-1-1": "A UTF8String Extension"}, true), + testAccStepCert(t, "web", ca, "foo", allowed{metadata_ext: "1.2.3.45"}, false), + testAccStepLoginWithMetadata(t, connState, "web", map[string]string{}, true), + }, + }) +} + +// Test a self-signed client with URI alt names (root CA) that is trusted +func TestBackend_dns_singleCert(t *testing.T) { + certTemplate := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "example.com", + }, + DNSNames: []string{"example.com"}, + IPAddresses: []net.IP{net.ParseIP("127.0.0.1")}, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageServerAuth, + x509.ExtKeyUsageClientAuth, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement, + SerialNumber: big.NewInt(mathrand.Int63()), + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(262980 * time.Hour), + } + + tempDir, connState, err := generateTestCertAndConnState(t, certTemplate) + if tempDir != "" { + defer os.RemoveAll(tempDir) + } + if err != nil { + t.Fatalf("error testing connection state: %v", err) + } + ca, err := ioutil.ReadFile(filepath.Join(tempDir, "ca_cert.pem")) + if err != nil { + t.Fatalf("err: %v", err) + } + + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: testFactory(t), + Steps: []logicaltest.TestStep{ + testAccStepCert(t, "web", ca, "foo", allowed{dns: "example.com"}, false), + testAccStepLogin(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{dns: "*ample.com"}, false), + testAccStepLogin(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{dns: "notincert.com"}, false), + testAccStepLoginInvalid(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{dns: "abc"}, false), + testAccStepLoginInvalid(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{dns: "*.example.com"}, false), + testAccStepLoginInvalid(t, connState), + }, + }) +} + +// Test a self-signed client with URI alt names (root CA) that is trusted +func TestBackend_email_singleCert(t *testing.T) { + certTemplate := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "example.com", + }, + EmailAddresses: []string{"valid@example.com"}, + IPAddresses: []net.IP{net.ParseIP("127.0.0.1")}, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageServerAuth, + x509.ExtKeyUsageClientAuth, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement, + SerialNumber: big.NewInt(mathrand.Int63()), + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(262980 * time.Hour), + } + + tempDir, connState, err := generateTestCertAndConnState(t, certTemplate) + if tempDir != "" { + defer os.RemoveAll(tempDir) + } + if err != nil { + t.Fatalf("error testing connection state: %v", err) + } + ca, err := ioutil.ReadFile(filepath.Join(tempDir, "ca_cert.pem")) + if err != nil { + t.Fatalf("err: %v", err) + } + + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: testFactory(t), + Steps: []logicaltest.TestStep{ + testAccStepCert(t, "web", ca, "foo", allowed{emails: "valid@example.com"}, false), + testAccStepLogin(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{emails: "*@example.com"}, false), + testAccStepLogin(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{emails: "invalid@notincert.com"}, false), + testAccStepLoginInvalid(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{emails: "abc"}, false), + testAccStepLoginInvalid(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{emails: "*.example.com"}, false), + testAccStepLoginInvalid(t, connState), + }, + }) +} + +// Test a self-signed client with OU (root CA) that is trusted +func TestBackend_organizationalUnit_singleCert(t *testing.T) { + connState, err := testConnState( + "test-fixtures/root/rootcawoucert.pem", + "test-fixtures/root/rootcawoukey.pem", + "test-fixtures/root/rootcawoucert.pem", + ) + if err != nil { + t.Fatalf("error testing connection state: %v", err) + } + ca, err := ioutil.ReadFile("test-fixtures/root/rootcawoucert.pem") + if err != nil { + t.Fatalf("err: %v", err) + } + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: testFactory(t), + Steps: []logicaltest.TestStep{ + testAccStepCert(t, "web", ca, "foo", allowed{organizational_units: "engineering"}, false), + testAccStepLogin(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{organizational_units: "eng*"}, false), + testAccStepLogin(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{organizational_units: "engineering,finance"}, false), + testAccStepLogin(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{organizational_units: "foo"}, false), + testAccStepLoginInvalid(t, connState), + }, + }) +} + +// Test a self-signed client with URI alt names (root CA) that is trusted +func TestBackend_uri_singleCert(t *testing.T) { + u, err := url.Parse("spiffe://example.com/host") + if err != nil { + t.Fatal(err) + } + certTemplate := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "example.com", + }, + DNSNames: []string{"example.com"}, + IPAddresses: []net.IP{net.ParseIP("127.0.0.1")}, + URIs: []*url.URL{u}, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageServerAuth, + x509.ExtKeyUsageClientAuth, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement, + SerialNumber: big.NewInt(mathrand.Int63()), + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(262980 * time.Hour), + } + + tempDir, connState, err := generateTestCertAndConnState(t, certTemplate) + if tempDir != "" { + defer os.RemoveAll(tempDir) + } + if err != nil { + t.Fatalf("error testing connection state: %v", err) + } + ca, err := ioutil.ReadFile(filepath.Join(tempDir, "ca_cert.pem")) + if err != nil { + t.Fatalf("err: %v", err) + } + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: testFactory(t), + Steps: []logicaltest.TestStep{ + testAccStepCert(t, "web", ca, "foo", allowed{uris: "spiffe://example.com/*"}, false), + testAccStepLogin(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{uris: "spiffe://example.com/host"}, false), + testAccStepLogin(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{uris: "spiffe://example.com/invalid"}, false), + testAccStepLoginInvalid(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{uris: "abc"}, false), + testAccStepLoginInvalid(t, connState), + testAccStepCert(t, "web", ca, "foo", allowed{uris: "http://www.google.com"}, false), + testAccStepLoginInvalid(t, connState), + }, + }) +} + +// Test against a collection of matching and non-matching rules +func TestBackend_mixed_constraints(t *testing.T) { + connState, err := testConnState("test-fixtures/keys/cert.pem", + "test-fixtures/keys/key.pem", "test-fixtures/root/rootcacert.pem") + if err != nil { + t.Fatalf("error testing connection state: %v", err) + } + ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem") + if err != nil { + t.Fatalf("err: %v", err) + } + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: testFactory(t), + Steps: []logicaltest.TestStep{ + testAccStepCert(t, "1unconstrained", ca, "foo", allowed{}, false), + testAccStepCert(t, "2matching", ca, "foo", allowed{names: "*.example.com,whatever"}, false), + testAccStepCert(t, "3invalid", ca, "foo", allowed{names: "invalid"}, false), + testAccStepLogin(t, connState), + // Assumes CertEntries are processed in alphabetical order (due to store.List), so we only match 2matching if 1unconstrained doesn't match + testAccStepLoginWithName(t, connState, "2matching"), + testAccStepLoginWithNameInvalid(t, connState, "3invalid"), + }, + }) +} + +// Test an untrusted client +func TestBackend_untrusted(t *testing.T) { + connState, err := testConnState("test-fixtures/keys/cert.pem", + "test-fixtures/keys/key.pem", "test-fixtures/root/rootcacert.pem") + if err != nil { + t.Fatalf("error testing connection state: %v", err) + } + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: testFactory(t), + Steps: []logicaltest.TestStep{ + testAccStepLoginInvalid(t, connState), + }, + }) +} + +func TestBackend_validCIDR(t *testing.T) { + config := logical.TestBackendConfig() + storage := &logical.InmemStorage{} + config.StorageView = storage + + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + connState, err := testConnState("test-fixtures/keys/cert.pem", + "test-fixtures/keys/key.pem", "test-fixtures/root/rootcacert.pem") + if err != nil { + t.Fatalf("error testing connection state: %v", err) + } + ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem") + if err != nil { + t.Fatalf("err: %v", err) + } + + name := "web" + boundCIDRs := []string{"127.0.0.1", "128.252.0.0/16"} + + addCertReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "certs/" + name, + Data: map[string]interface{}{ + "certificate": string(ca), + "policies": "foo", + "display_name": name, + "allowed_names": "", + "required_extensions": "", + "lease": 1000, + "bound_cidrs": boundCIDRs, + }, + Storage: storage, + Connection: &logical.Connection{ConnState: &connState}, + } + + _, err = b.HandleRequest(context.Background(), addCertReq) + if err != nil { + t.Fatal(err) + } + + readCertReq := &logical.Request{ + Operation: logical.ReadOperation, + Path: "certs/" + name, + Storage: storage, + Connection: &logical.Connection{ConnState: &connState}, + } + + readResult, err := b.HandleRequest(context.Background(), readCertReq) + if err != nil { + t.Fatal(err) + } + cidrsResult := readResult.Data["bound_cidrs"].([]*sockaddr.SockAddrMarshaler) + + if cidrsResult[0].String() != boundCIDRs[0] || + cidrsResult[1].String() != boundCIDRs[1] { + t.Fatalf("bound_cidrs couldn't be set correctly, EXPECTED: %v, ACTUAL: %v", boundCIDRs, cidrsResult) + } + + loginReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "login", + Unauthenticated: true, + Data: map[string]interface{}{ + "name": name, + }, + Storage: storage, + Connection: &logical.Connection{ConnState: &connState}, + } + + // override the remote address with an IPV4 that is authorized + loginReq.Connection.RemoteAddr = "127.0.0.1/32" + + _, err = b.HandleRequest(context.Background(), loginReq) + if err != nil { + t.Fatal(err.Error()) + } +} + +func TestBackend_invalidCIDR(t *testing.T) { + config := logical.TestBackendConfig() + storage := &logical.InmemStorage{} + config.StorageView = storage + + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + connState, err := testConnState("test-fixtures/keys/cert.pem", + "test-fixtures/keys/key.pem", "test-fixtures/root/rootcacert.pem") + if err != nil { + t.Fatalf("error testing connection state: %v", err) + } + ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem") + if err != nil { + t.Fatalf("err: %v", err) + } + + name := "web" + + addCertReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "certs/" + name, + Data: map[string]interface{}{ + "certificate": string(ca), + "policies": "foo", + "display_name": name, + "allowed_names": "", + "required_extensions": "", + "lease": 1000, + "bound_cidrs": []string{"127.0.0.1/32", "128.252.0.0/16"}, + }, + Storage: storage, + Connection: &logical.Connection{ConnState: &connState}, + } + + _, err = b.HandleRequest(context.Background(), addCertReq) + if err != nil { + t.Fatal(err) + } + + loginReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "login", + Unauthenticated: true, + Data: map[string]interface{}{ + "name": name, + }, + Storage: storage, + Connection: &logical.Connection{ConnState: &connState}, + } + + // override the remote address with an IPV4 that isn't authorized + loginReq.Connection.RemoteAddr = "127.0.0.1/8" + + _, err = b.HandleRequest(context.Background(), loginReq) + if err == nil { + t.Fatal("expected \"ERROR: permission denied\"") + } +} + +func testAccStepAddCRL(t *testing.T, crl []byte, connState tls.ConnectionState) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "crls/test", + ConnState: &connState, + Data: map[string]interface{}{ + "crl": crl, + }, + } +} + +func testAccStepReadCRL(t *testing.T, connState tls.ConnectionState) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: "crls/test", + ConnState: &connState, + Check: func(resp *logical.Response) error { + crlInfo := CRLInfo{} + err := mapstructure.Decode(resp.Data, &crlInfo) + if err != nil { + t.Fatalf("err: %v", err) + } + if len(crlInfo.Serials) != 1 { + t.Fatalf("bad: expected CRL with length 1, got %d", len(crlInfo.Serials)) + } + if _, ok := crlInfo.Serials["637101449987587619778072672905061040630001617053"]; !ok { + t.Fatalf("bad: expected serial number not found in CRL") + } + return nil + }, + } +} + +func testAccStepDeleteCRL(t *testing.T, connState tls.ConnectionState) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.DeleteOperation, + Path: "crls/test", + ConnState: &connState, + } +} + +func testAccStepSetConfig(t *testing.T, conf config, connState tls.ConnectionState) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "config", + ConnState: &connState, + Data: map[string]interface{}{ + "enable_identity_alias_metadata": conf.EnableIdentityAliasMetadata, + }, + } +} + +func testAccStepReadConfig(t *testing.T, conf config, connState tls.ConnectionState) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: "config", + ConnState: &connState, + Check: func(resp *logical.Response) error { + value, ok := resp.Data["enable_identity_alias_metadata"] + if !ok { + t.Fatalf("enable_identity_alias_metadata not found in response") + } + + b, ok := value.(bool) + if !ok { + t.Fatalf("bad: expected enable_identity_alias_metadata to be a bool") + } + + if b != conf.EnableIdentityAliasMetadata { + t.Fatalf("bad: expected enable_identity_alias_metadata to be %t, got %t", conf.EnableIdentityAliasMetadata, b) + } + + return nil + }, + } +} + +func testAccStepLogin(t *testing.T, connState tls.ConnectionState) logicaltest.TestStep { + return testAccStepLoginWithName(t, connState, "") +} + +func testAccStepLoginWithName(t *testing.T, connState tls.ConnectionState, certName string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "login", + Unauthenticated: true, + ConnState: &connState, + Check: func(resp *logical.Response) error { + if resp.Auth.TTL != 1000*time.Second { + t.Fatalf("bad lease length: %#v", resp.Auth) + } + + if certName != "" && resp.Auth.DisplayName != ("mnt-"+certName) { + t.Fatalf("matched the wrong cert: %#v", resp.Auth.DisplayName) + } + + fn := logicaltest.TestCheckAuth([]string{"default", "foo"}) + return fn(resp) + }, + Data: map[string]interface{}{ + "name": certName, + }, + } +} + +func testAccStepLoginDefaultLease(t *testing.T, connState tls.ConnectionState) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "login", + Unauthenticated: true, + ConnState: &connState, + Check: func(resp *logical.Response) error { + if resp.Auth.TTL != 1000*time.Second { + t.Fatalf("bad lease length: %#v", resp.Auth) + } + + fn := logicaltest.TestCheckAuth([]string{"default", "foo"}) + return fn(resp) + }, + } +} + +func testAccStepLoginWithMetadata(t *testing.T, connState tls.ConnectionState, certName string, metadata map[string]string, expectAliasMetadata bool) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "login", + Unauthenticated: true, + ConnState: &connState, + Check: func(resp *logical.Response) error { + // Check for fixed metadata too + metadata["cert_name"] = certName + metadata["common_name"] = connState.PeerCertificates[0].Subject.CommonName + metadata["serial_number"] = connState.PeerCertificates[0].SerialNumber.String() + metadata["subject_key_id"] = certutil.GetHexFormatted(connState.PeerCertificates[0].SubjectKeyId, ":") + metadata["authority_key_id"] = certutil.GetHexFormatted(connState.PeerCertificates[0].AuthorityKeyId, ":") + + for key, expected := range metadata { + value, ok := resp.Auth.Metadata[key] + if !ok { + t.Fatalf("missing metadata key: %s", key) + } + + if value != expected { + t.Fatalf("expected metadata key %s to equal %s, but got: %s", key, expected, value) + } + + if expectAliasMetadata { + value, ok = resp.Auth.Alias.Metadata[key] + if !ok { + t.Fatalf("missing alias metadata key: %s", key) + } + + if value != expected { + t.Fatalf("expected metadata key %s to equal %s, but got: %s", key, expected, value) + } + } else { + if len(resp.Auth.Alias.Metadata) > 0 { + t.Fatal("found alias metadata keys, but should not have any") + } + } + } + + fn := logicaltest.TestCheckAuth([]string{"default", "foo"}) + return fn(resp) + }, + Data: map[string]interface{}{ + "metadata": metadata, + }, + } +} + +func testAccStepLoginInvalid(t *testing.T, connState tls.ConnectionState) logicaltest.TestStep { + return testAccStepLoginWithNameInvalid(t, connState, "") +} + +func testAccStepLoginWithNameInvalid(t *testing.T, connState tls.ConnectionState, certName string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "login", + Unauthenticated: true, + ConnState: &connState, + Check: func(resp *logical.Response) error { + if resp.Auth != nil { + return fmt.Errorf("should not be authorized: %#v", resp) + } + return nil + }, + Data: map[string]interface{}{ + "name": certName, + }, + ErrorOk: true, + } +} + +func testAccStepListCerts( + t *testing.T, certs []string, +) []logicaltest.TestStep { + return []logicaltest.TestStep{ + { + Operation: logical.ListOperation, + Path: "certs", + Check: func(resp *logical.Response) error { + if resp == nil { + return fmt.Errorf("nil response") + } + if resp.Data == nil { + return fmt.Errorf("nil data") + } + if resp.Data["keys"] == interface{}(nil) { + return fmt.Errorf("nil keys") + } + keys := resp.Data["keys"].([]string) + if !reflect.DeepEqual(keys, certs) { + return fmt.Errorf("mismatch: keys is %#v, certs is %#v", keys, certs) + } + return nil + }, + }, { + Operation: logical.ListOperation, + Path: "certs/", + Check: func(resp *logical.Response) error { + if resp == nil { + return fmt.Errorf("nil response") + } + if resp.Data == nil { + return fmt.Errorf("nil data") + } + if resp.Data["keys"] == interface{}(nil) { + return fmt.Errorf("nil keys") + } + keys := resp.Data["keys"].([]string) + if !reflect.DeepEqual(keys, certs) { + return fmt.Errorf("mismatch: keys is %#v, certs is %#v", keys, certs) + } + + return nil + }, + }, + } +} + +type allowed struct { + names string // allowed names in the certificate, looks at common, name, dns, email [depricated] + common_names string // allowed common names in the certificate + dns string // allowed dns names in the SAN extension of the certificate + emails string // allowed email names in SAN extension of the certificate + uris string // allowed uris in SAN extension of the certificate + organizational_units string // allowed OUs in the certificate + ext string // required extensions in the certificate + metadata_ext string // allowed metadata extensions to add to identity alias +} + +func testAccStepCert(t *testing.T, name string, cert []byte, policies string, testData allowed, expectError bool) logicaltest.TestStep { + return testAccStepCertWithExtraParams(t, name, cert, policies, testData, expectError, nil) +} + +func testAccStepCertWithExtraParams(t *testing.T, name string, cert []byte, policies string, testData allowed, expectError bool, extraParams map[string]interface{}) logicaltest.TestStep { + data := map[string]interface{}{ + "certificate": string(cert), + "policies": policies, + "display_name": name, + "allowed_names": testData.names, + "allowed_common_names": testData.common_names, + "allowed_dns_sans": testData.dns, + "allowed_email_sans": testData.emails, + "allowed_uri_sans": testData.uris, + "allowed_organizational_units": testData.organizational_units, + "required_extensions": testData.ext, + "allowed_metadata_extensions": testData.metadata_ext, + "lease": 1000, + } + for k, v := range extraParams { + data[k] = v + } + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "certs/" + name, + ErrorOk: expectError, + Data: data, + Check: func(resp *logical.Response) error { + if resp == nil && expectError { + return fmt.Errorf("expected error but received nil") + } + return nil + }, + } +} + +func testAccStepReadCertPolicy(t *testing.T, name string, expectError bool, expected map[string]interface{}) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: "certs/" + name, + ErrorOk: expectError, + Data: nil, + Check: func(resp *logical.Response) error { + if (resp == nil || len(resp.Data) == 0) && expectError { + return fmt.Errorf("expected error but received nil") + } + for key, expectedValue := range expected { + actualValue := resp.Data[key] + if expectedValue != actualValue { + return fmt.Errorf("Expected to get [%v]=[%v] but read [%v]=[%v] from server for certs/%v: %v", key, expectedValue, key, actualValue, name, resp) + } + } + return nil + }, + } +} + +func testAccStepCertLease( + t *testing.T, name string, cert []byte, policies string, +) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "certs/" + name, + Data: map[string]interface{}{ + "certificate": string(cert), + "policies": policies, + "display_name": name, + "lease": 1000, + }, + } +} + +func testAccStepCertTTL( + t *testing.T, name string, cert []byte, policies string, +) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "certs/" + name, + Data: map[string]interface{}{ + "certificate": string(cert), + "policies": policies, + "display_name": name, + "ttl": "1000s", + }, + } +} + +func testAccStepCertMaxTTL( + t *testing.T, name string, cert []byte, policies string, +) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "certs/" + name, + Data: map[string]interface{}{ + "certificate": string(cert), + "policies": policies, + "display_name": name, + "ttl": "1000s", + "max_ttl": "1200s", + }, + } +} + +func testAccStepCertNoLease( + t *testing.T, name string, cert []byte, policies string, +) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "certs/" + name, + Data: map[string]interface{}{ + "certificate": string(cert), + "policies": policies, + "display_name": name, + }, + } +} + +func testConnState(certPath, keyPath, rootCertPath string) (tls.ConnectionState, error) { + cert, err := tls.LoadX509KeyPair(certPath, keyPath) + if err != nil { + return tls.ConnectionState{}, err + } + rootConfig := &rootcerts.Config{ + CAFile: rootCertPath, + } + rootCAs, err := rootcerts.LoadCACerts(rootConfig) + if err != nil { + return tls.ConnectionState{}, err + } + listenConf := &tls.Config{ + Certificates: []tls.Certificate{cert}, + ClientAuth: tls.RequestClientCert, + InsecureSkipVerify: false, + RootCAs: rootCAs, + } + dialConf := listenConf.Clone() + // start a server + list, err := tls.Listen("tcp", "127.0.0.1:0", listenConf) + if err != nil { + return tls.ConnectionState{}, err + } + defer list.Close() + + // Accept connections. + serverErrors := make(chan error, 1) + connState := make(chan tls.ConnectionState) + go func() { + defer close(connState) + serverConn, err := list.Accept() + serverErrors <- err + if err != nil { + close(serverErrors) + return + } + defer serverConn.Close() + + // Read the ping + buf := make([]byte, 4) + _, err = serverConn.Read(buf) + if (err != nil) && (err != io.EOF) { + serverErrors <- err + close(serverErrors) + return + } else { + // EOF is a reasonable error condition, so swallow it. + serverErrors <- nil + } + close(serverErrors) + connState <- serverConn.(*tls.Conn).ConnectionState() + }() + + // Establish a connection from the client side and write a few bytes. + clientErrors := make(chan error, 1) + go func() { + addr := list.Addr().String() + conn, err := tls.Dial("tcp", addr, dialConf) + clientErrors <- err + if err != nil { + close(clientErrors) + return + } + defer conn.Close() + + // Write ping + _, err = conn.Write([]byte("ping")) + clientErrors <- err + close(clientErrors) + }() + + for err = range clientErrors { + if err != nil { + return tls.ConnectionState{}, fmt.Errorf("error in client goroutine:%v", err) + } + } + + for err = range serverErrors { + if err != nil { + return tls.ConnectionState{}, fmt.Errorf("error in server goroutine:%v", err) + } + } + // Grab the current state + return <-connState, nil +} + +func Test_Renew(t *testing.T) { + storage := &logical.InmemStorage{} + + lb, err := Factory(context.Background(), &logical.BackendConfig{ + System: &logical.StaticSystemView{ + DefaultLeaseTTLVal: 300 * time.Second, + MaxLeaseTTLVal: 1800 * time.Second, + }, + StorageView: storage, + }) + if err != nil { + t.Fatalf("error: %s", err) + } + + b := lb.(*backend) + connState, err := testConnState("test-fixtures/keys/cert.pem", + "test-fixtures/keys/key.pem", "test-fixtures/root/rootcacert.pem") + if err != nil { + t.Fatalf("error testing connection state: %v", err) + } + ca, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem") + if err != nil { + t.Fatal(err) + } + + req := &logical.Request{ + Connection: &logical.Connection{ + ConnState: &connState, + }, + Storage: storage, + Auth: &logical.Auth{}, + } + + fd := &framework.FieldData{ + Raw: map[string]interface{}{ + "name": "test", + "certificate": ca, + "policies": "foo,bar", + }, + Schema: pathCerts(b).Fields, + } + + resp, err := b.pathCertWrite(context.Background(), req, fd) + if err != nil { + t.Fatal(err) + } + + empty_login_fd := &framework.FieldData{ + Raw: map[string]interface{}{}, + Schema: pathLogin(b).Fields, + } + resp, err = b.pathLogin(context.Background(), req, empty_login_fd) + if err != nil { + t.Fatal(err) + } + if resp.IsError() { + t.Fatalf("got error: %#v", *resp) + } + req.Auth.InternalData = resp.Auth.InternalData + req.Auth.Metadata = resp.Auth.Metadata + req.Auth.LeaseOptions = resp.Auth.LeaseOptions + req.Auth.Policies = resp.Auth.Policies + req.Auth.TokenPolicies = req.Auth.Policies + req.Auth.Period = resp.Auth.Period + + // Normal renewal + resp, err = b.pathLoginRenew(context.Background(), req, empty_login_fd) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("got nil response from renew") + } + if resp.IsError() { + t.Fatalf("got error: %#v", *resp) + } + + // Change the policies -- this should fail + fd.Raw["policies"] = "zip,zap" + resp, err = b.pathCertWrite(context.Background(), req, fd) + if err != nil { + t.Fatal(err) + } + + resp, err = b.pathLoginRenew(context.Background(), req, empty_login_fd) + if err == nil { + t.Fatal("expected error") + } + + // Put the policies back, this should be okay + fd.Raw["policies"] = "bar,foo" + resp, err = b.pathCertWrite(context.Background(), req, fd) + if err != nil { + t.Fatal(err) + } + + resp, err = b.pathLoginRenew(context.Background(), req, empty_login_fd) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("got nil response from renew") + } + if resp.IsError() { + t.Fatalf("got error: %#v", *resp) + } + + // Add period value to cert entry + period := 350 * time.Second + fd.Raw["period"] = period.String() + resp, err = b.pathCertWrite(context.Background(), req, fd) + if err != nil { + t.Fatal(err) + } + + resp, err = b.pathLoginRenew(context.Background(), req, empty_login_fd) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("got nil response from renew") + } + if resp.IsError() { + t.Fatalf("got error: %#v", *resp) + } + + if resp.Auth.Period != period { + t.Fatalf("expected a period value of %s in the response, got: %s", period, resp.Auth.Period) + } + + // Delete CA, make sure we can't renew + resp, err = b.pathCertDelete(context.Background(), req, fd) + if err != nil { + t.Fatal(err) + } + + resp, err = b.pathLoginRenew(context.Background(), req, empty_login_fd) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("got nil response from renew") + } + if !resp.IsError() { + t.Fatal("expected error") + } +} + +func TestBackend_CertUpgrade(t *testing.T) { + s := &logical.InmemStorage{} + + config := logical.TestBackendConfig() + config.StorageView = s + + ctx := context.Background() + + b := Backend() + if b == nil { + t.Fatalf("failed to create backend") + } + if err := b.Setup(ctx, config); err != nil { + t.Fatal(err) + } + + foo := &CertEntry{ + Policies: []string{"foo"}, + Period: time.Second, + TTL: time.Second, + MaxTTL: time.Second, + BoundCIDRs: []*sockaddr.SockAddrMarshaler{{SockAddr: sockaddr.MustIPAddr("127.0.0.1")}}, + } + + entry, err := logical.StorageEntryJSON("cert/foo", foo) + if err != nil { + t.Fatal(err) + } + err = s.Put(ctx, entry) + if err != nil { + t.Fatal(err) + } + + certEntry, err := b.Cert(ctx, s, "foo") + if err != nil { + t.Fatal(err) + } + + exp := &CertEntry{ + Policies: []string{"foo"}, + Period: time.Second, + TTL: time.Second, + MaxTTL: time.Second, + BoundCIDRs: []*sockaddr.SockAddrMarshaler{{SockAddr: sockaddr.MustIPAddr("127.0.0.1")}}, + TokenParams: tokenutil.TokenParams{ + TokenPolicies: []string{"foo"}, + TokenPeriod: time.Second, + TokenTTL: time.Second, + TokenMaxTTL: time.Second, + TokenBoundCIDRs: []*sockaddr.SockAddrMarshaler{{SockAddr: sockaddr.MustIPAddr("127.0.0.1")}}, + }, + } + if diff := deep.Equal(certEntry, exp); diff != nil { + t.Fatal(diff) + } +} diff --git a/builtin/credential/cert/cli.go b/builtin/credential/cert/cli.go new file mode 100644 index 0000000..3ba1e71 --- /dev/null +++ b/builtin/credential/cert/cli.go @@ -0,0 +1,64 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cert + +import ( + "fmt" + "strings" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/mapstructure" +) + +type CLIHandler struct{} + +func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, error) { + var data struct { + Mount string `mapstructure:"mount"` + Name string `mapstructure:"name"` + } + if err := mapstructure.WeakDecode(m, &data); err != nil { + return nil, err + } + + if data.Mount == "" { + data.Mount = "cert" + } + + options := map[string]interface{}{ + "name": data.Name, + } + path := fmt.Sprintf("auth/%s/login", data.Mount) + secret, err := c.Logical().Write(path, options) + if err != nil { + return nil, err + } + if secret == nil { + return nil, fmt.Errorf("empty response from credential provider") + } + + return secret, nil +} + +func (h *CLIHandler) Help() string { + help := ` +Usage: vault login -method=cert [CONFIG K=V...] + + The certificate auth method allows users to authenticate with a + client certificate passed with the request. The -client-cert and -client-key + flags are included with the "vault login" command, NOT as configuration to the + auth method. + + Authenticate using a local client certificate: + + $ vault login -method=cert -client-cert=cert.pem -client-key=key.pem + +Configuration: + + name= + Certificate role to authenticate against. +` + + return strings.TrimSpace(help) +} diff --git a/builtin/credential/cert/cmd/cert/main.go b/builtin/credential/cert/cmd/cert/main.go new file mode 100644 index 0000000..5b80a54 --- /dev/null +++ b/builtin/credential/cert/cmd/cert/main.go @@ -0,0 +1,34 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "os" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/credential/cert" + "github.com/hashicorp/vault/sdk/plugin" +) + +func main() { + apiClientMeta := &api.PluginAPIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(os.Args[1:]) + + tlsConfig := apiClientMeta.GetTLSConfig() + tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) + + if err := plugin.ServeMultiplex(&plugin.ServeOpts{ + BackendFactoryFunc: cert.Factory, + // set the TLSProviderFunc so that the plugin maintains backwards + // compatibility with Vault versions that don’t support plugin AutoMTLS + TLSProviderFunc: tlsProviderFunc, + }); err != nil { + logger := hclog.New(&hclog.LoggerOptions{}) + + logger.Error("plugin shutting down", "error", err) + os.Exit(1) + } +} diff --git a/builtin/credential/cert/path_certs.go b/builtin/credential/cert/path_certs.go new file mode 100644 index 0000000..03a3e55 --- /dev/null +++ b/builtin/credential/cert/path_certs.go @@ -0,0 +1,532 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cert + +import ( + "context" + "crypto/x509" + "fmt" + "strings" + "time" + + "github.com/hashicorp/go-sockaddr" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/tokenutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathListCerts(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "certs/?", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixCert, + OperationSuffix: "certificates", + Navigation: true, + ItemType: "Certificate", + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: b.pathCertList, + }, + + HelpSynopsis: pathCertHelpSyn, + HelpDescription: pathCertHelpDesc, + } +} + +func pathCerts(b *backend) *framework.Path { + p := &framework.Path{ + Pattern: "certs/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixCert, + OperationSuffix: "certificate", + Action: "Create", + ItemType: "Certificate", + }, + + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "The name of the certificate", + }, + + "certificate": { + Type: framework.TypeString, + Description: `The public certificate that should be trusted. +Must be x509 PEM encoded.`, + DisplayAttrs: &framework.DisplayAttributes{ + EditType: "file", + }, + }, + "ocsp_enabled": { + Type: framework.TypeBool, + Description: `Whether to attempt OCSP verification of certificates at login`, + }, + "ocsp_ca_certificates": { + Type: framework.TypeString, + Description: `Any additional CA certificates needed to communicate with OCSP servers`, + DisplayAttrs: &framework.DisplayAttributes{ + EditType: "file", + }, + }, + "ocsp_servers_override": { + Type: framework.TypeCommaStringSlice, + Description: `A comma-separated list of OCSP server addresses. If unset, the OCSP server is determined +from the AuthorityInformationAccess extension on the certificate being inspected.`, + DisplayAttrs: &framework.DisplayAttributes{ + Description: "A list of OCSP server addresses. If unset, the OCSP server is determined from the AuthorityInformationAccess extension on the certificate being inspected.", + }, + }, + "ocsp_fail_open": { + Type: framework.TypeBool, + Default: false, + Description: "If set to true, if an OCSP revocation cannot be made successfully, login will proceed rather than failing. If false, failing to get an OCSP status fails the request.", + }, + "ocsp_query_all_servers": { + Type: framework.TypeBool, + Default: false, + Description: "If set to true, rather than accepting the first successful OCSP response, query all servers and consider the certificate valid only if all servers agree.", + }, + "allowed_names": { + Type: framework.TypeCommaStringSlice, + Description: `A comma-separated list of names. +At least one must exist in either the Common Name or SANs. Supports globbing. +This parameter is deprecated, please use allowed_common_names, allowed_dns_sans, +allowed_email_sans, allowed_uri_sans.`, + DisplayAttrs: &framework.DisplayAttributes{ + Group: "Constraints", + Description: "A list of names. At least one must exist in either the Common Name or SANs. Supports globbing. This parameter is deprecated, please use allowed_common_names, allowed_dns_sans, allowed_email_sans, allowed_uri_sans.", + }, + }, + + "allowed_common_names": { + Type: framework.TypeCommaStringSlice, + Description: `A comma-separated list of names. +At least one must exist in the Common Name. Supports globbing.`, + DisplayAttrs: &framework.DisplayAttributes{ + Group: "Constraints", + Description: "A list of names. At least one must exist in the Common Name. Supports globbing.", + }, + }, + + "allowed_dns_sans": { + Type: framework.TypeCommaStringSlice, + Description: `A comma-separated list of DNS names. +At least one must exist in the SANs. Supports globbing.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Allowed DNS SANs", + Group: "Constraints", + Description: "A list of DNS names. At least one must exist in the SANs. Supports globbing.", + }, + }, + + "allowed_email_sans": { + Type: framework.TypeCommaStringSlice, + Description: `A comma-separated list of Email Addresses. +At least one must exist in the SANs. Supports globbing.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Allowed Email SANs", + Group: "Constraints", + Description: "A list of Email Addresses. At least one must exist in the SANs. Supports globbing.", + }, + }, + + "allowed_uri_sans": { + Type: framework.TypeCommaStringSlice, + Description: `A comma-separated list of URIs. +At least one must exist in the SANs. Supports globbing.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Allowed URI SANs", + Group: "Constraints", + Description: "A list of URIs. At least one must exist in the SANs. Supports globbing.", + }, + }, + + "allowed_organizational_units": { + Type: framework.TypeCommaStringSlice, + Description: `A comma-separated list of Organizational Units names. +At least one must exist in the OU field.`, + DisplayAttrs: &framework.DisplayAttributes{ + Group: "Constraints", + Description: "A list of Organizational Units names. At least one must exist in the OU field.", + }, + }, + + "required_extensions": { + Type: framework.TypeCommaStringSlice, + Description: `A comma-separated string or array of extensions +formatted as "oid:value". Expects the extension value to be some type of ASN1 encoded string. +All values much match. Supports globbing on "value".`, + DisplayAttrs: &framework.DisplayAttributes{ + Description: "A list of extensions formatted as 'oid:value'. Expects the extension value to be some type of ASN1 encoded string. All values much match. Supports globbing on 'value'.", + }, + }, + + "allowed_metadata_extensions": { + Type: framework.TypeCommaStringSlice, + Description: `A comma-separated string or array of oid extensions. +Upon successful authentication, these extensions will be added as metadata if they are present +in the certificate. The metadata key will be the string consisting of the oid numbers +separated by a dash (-) instead of a dot (.) to allow usage in ACL templates.`, + DisplayAttrs: &framework.DisplayAttributes{ + Description: "A list of OID extensions. Upon successful authentication, these extensions will be added as metadata if they are present in the certificate. The metadata key will be the string consisting of the OID numbers separated by a dash (-) instead of a dot (.) to allow usage in ACL templates.", + }, + }, + + "display_name": { + Type: framework.TypeString, + Description: `The display name to use for clients using this +certificate.`, + }, + + "policies": { + Type: framework.TypeCommaStringSlice, + Description: tokenutil.DeprecationText("token_policies"), + Deprecated: true, + }, + + "lease": { + Type: framework.TypeInt, + Description: tokenutil.DeprecationText("token_ttl"), + Deprecated: true, + }, + + "ttl": { + Type: framework.TypeDurationSecond, + Description: tokenutil.DeprecationText("token_ttl"), + Deprecated: true, + }, + + "max_ttl": { + Type: framework.TypeDurationSecond, + Description: tokenutil.DeprecationText("token_max_ttl"), + Deprecated: true, + }, + + "period": { + Type: framework.TypeDurationSecond, + Description: tokenutil.DeprecationText("token_period"), + Deprecated: true, + }, + + "bound_cidrs": { + Type: framework.TypeCommaStringSlice, + Description: tokenutil.DeprecationText("token_bound_cidrs"), + Deprecated: true, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.DeleteOperation: b.pathCertDelete, + logical.ReadOperation: b.pathCertRead, + logical.UpdateOperation: b.pathCertWrite, + }, + + HelpSynopsis: pathCertHelpSyn, + HelpDescription: pathCertHelpDesc, + } + + tokenutil.AddTokenFields(p.Fields) + return p +} + +func (b *backend) Cert(ctx context.Context, s logical.Storage, n string) (*CertEntry, error) { + entry, err := s.Get(ctx, "cert/"+strings.ToLower(n)) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result CertEntry + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + + if result.TokenTTL == 0 && result.TTL > 0 { + result.TokenTTL = result.TTL + } + if result.TokenMaxTTL == 0 && result.MaxTTL > 0 { + result.TokenMaxTTL = result.MaxTTL + } + if result.TokenPeriod == 0 && result.Period > 0 { + result.TokenPeriod = result.Period + } + if len(result.TokenPolicies) == 0 && len(result.Policies) > 0 { + result.TokenPolicies = result.Policies + } + if len(result.TokenBoundCIDRs) == 0 && len(result.BoundCIDRs) > 0 { + result.TokenBoundCIDRs = result.BoundCIDRs + } + + return &result, nil +} + +func (b *backend) pathCertDelete(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + err := req.Storage.Delete(ctx, "cert/"+strings.ToLower(d.Get("name").(string))) + if err != nil { + return nil, err + } + return nil, nil +} + +func (b *backend) pathCertList(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + certs, err := req.Storage.List(ctx, "cert/") + if err != nil { + return nil, err + } + return logical.ListResponse(certs), nil +} + +func (b *backend) pathCertRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + cert, err := b.Cert(ctx, req.Storage, strings.ToLower(d.Get("name").(string))) + if err != nil { + return nil, err + } + if cert == nil { + return nil, nil + } + + data := map[string]interface{}{ + "certificate": cert.Certificate, + "display_name": cert.DisplayName, + "allowed_names": cert.AllowedNames, + "allowed_common_names": cert.AllowedCommonNames, + "allowed_dns_sans": cert.AllowedDNSSANs, + "allowed_email_sans": cert.AllowedEmailSANs, + "allowed_uri_sans": cert.AllowedURISANs, + "allowed_organizational_units": cert.AllowedOrganizationalUnits, + "required_extensions": cert.RequiredExtensions, + "allowed_metadata_extensions": cert.AllowedMetadataExtensions, + "ocsp_ca_certificates": cert.OcspCaCertificates, + "ocsp_enabled": cert.OcspEnabled, + "ocsp_servers_override": cert.OcspServersOverride, + "ocsp_fail_open": cert.OcspFailOpen, + "ocsp_query_all_servers": cert.OcspQueryAllServers, + } + cert.PopulateTokenData(data) + + if cert.TTL > 0 { + data["ttl"] = int64(cert.TTL.Seconds()) + } + if cert.MaxTTL > 0 { + data["max_ttl"] = int64(cert.MaxTTL.Seconds()) + } + if cert.Period > 0 { + data["period"] = int64(cert.Period.Seconds()) + } + if len(cert.Policies) > 0 { + data["policies"] = data["token_policies"] + } + if len(cert.BoundCIDRs) > 0 { + data["bound_cidrs"] = data["token_bound_cidrs"] + } + + return &logical.Response{ + Data: data, + }, nil +} + +func (b *backend) pathCertWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := strings.ToLower(d.Get("name").(string)) + + cert, err := b.Cert(ctx, req.Storage, name) + if err != nil { + return nil, err + } + + if cert == nil { + cert = &CertEntry{ + Name: name, + } + } + + // Get non tokenutil fields + if certificateRaw, ok := d.GetOk("certificate"); ok { + cert.Certificate = certificateRaw.(string) + } + if ocspCertificatesRaw, ok := d.GetOk("ocsp_ca_certificates"); ok { + cert.OcspCaCertificates = ocspCertificatesRaw.(string) + } + if ocspEnabledRaw, ok := d.GetOk("ocsp_enabled"); ok { + cert.OcspEnabled = ocspEnabledRaw.(bool) + } + if ocspServerOverrides, ok := d.GetOk("ocsp_servers_override"); ok { + cert.OcspServersOverride = ocspServerOverrides.([]string) + } + if ocspFailOpen, ok := d.GetOk("ocsp_fail_open"); ok { + cert.OcspFailOpen = ocspFailOpen.(bool) + } + if ocspQueryAll, ok := d.GetOk("ocsp_query_all_servers"); ok { + cert.OcspQueryAllServers = ocspQueryAll.(bool) + } + if displayNameRaw, ok := d.GetOk("display_name"); ok { + cert.DisplayName = displayNameRaw.(string) + } + if allowedNamesRaw, ok := d.GetOk("allowed_names"); ok { + cert.AllowedNames = allowedNamesRaw.([]string) + } + if allowedCommonNamesRaw, ok := d.GetOk("allowed_common_names"); ok { + cert.AllowedCommonNames = allowedCommonNamesRaw.([]string) + } + if allowedDNSSANsRaw, ok := d.GetOk("allowed_dns_sans"); ok { + cert.AllowedDNSSANs = allowedDNSSANsRaw.([]string) + } + if allowedEmailSANsRaw, ok := d.GetOk("allowed_email_sans"); ok { + cert.AllowedEmailSANs = allowedEmailSANsRaw.([]string) + } + if allowedURISANsRaw, ok := d.GetOk("allowed_uri_sans"); ok { + cert.AllowedURISANs = allowedURISANsRaw.([]string) + } + if allowedOrganizationalUnitsRaw, ok := d.GetOk("allowed_organizational_units"); ok { + cert.AllowedOrganizationalUnits = allowedOrganizationalUnitsRaw.([]string) + } + if requiredExtensionsRaw, ok := d.GetOk("required_extensions"); ok { + cert.RequiredExtensions = requiredExtensionsRaw.([]string) + } + if allowedMetadataExtensionsRaw, ok := d.GetOk("allowed_metadata_extensions"); ok { + cert.AllowedMetadataExtensions = allowedMetadataExtensionsRaw.([]string) + } + + // Get tokenutil fields + if err := cert.ParseTokenFields(req, d); err != nil { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } + + // Handle upgrade cases + { + if err := tokenutil.UpgradeValue(d, "policies", "token_policies", &cert.Policies, &cert.TokenPolicies); err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + if err := tokenutil.UpgradeValue(d, "ttl", "token_ttl", &cert.TTL, &cert.TokenTTL); err != nil { + return logical.ErrorResponse(err.Error()), nil + } + // Special case here for old lease value + _, ok := d.GetOk("token_ttl") + if !ok { + _, ok = d.GetOk("ttl") + if !ok { + ttlRaw, ok := d.GetOk("lease") + if ok { + cert.TTL = time.Duration(ttlRaw.(int)) * time.Second + cert.TokenTTL = cert.TTL + } + } + } + + if err := tokenutil.UpgradeValue(d, "max_ttl", "token_max_ttl", &cert.MaxTTL, &cert.TokenMaxTTL); err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + if err := tokenutil.UpgradeValue(d, "period", "token_period", &cert.Period, &cert.TokenPeriod); err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + if err := tokenutil.UpgradeValue(d, "bound_cidrs", "token_bound_cidrs", &cert.BoundCIDRs, &cert.TokenBoundCIDRs); err != nil { + return logical.ErrorResponse(err.Error()), nil + } + } + + var resp logical.Response + + systemDefaultTTL := b.System().DefaultLeaseTTL() + if cert.TokenTTL > systemDefaultTTL { + resp.AddWarning(fmt.Sprintf("Given ttl of %d seconds is greater than current mount/system default of %d seconds", cert.TokenTTL/time.Second, systemDefaultTTL/time.Second)) + } + systemMaxTTL := b.System().MaxLeaseTTL() + if cert.TokenMaxTTL > systemMaxTTL { + resp.AddWarning(fmt.Sprintf("Given max_ttl of %d seconds is greater than current mount/system default of %d seconds", cert.TokenMaxTTL/time.Second, systemMaxTTL/time.Second)) + } + if cert.TokenMaxTTL != 0 && cert.TokenTTL > cert.TokenMaxTTL { + return logical.ErrorResponse("ttl should be shorter than max_ttl"), nil + } + if cert.TokenPeriod > systemMaxTTL { + resp.AddWarning(fmt.Sprintf("Given period of %d seconds is greater than the backend's maximum TTL of %d seconds", cert.TokenPeriod/time.Second, systemMaxTTL/time.Second)) + } + + // Default the display name to the certificate name if not given + if cert.DisplayName == "" { + cert.DisplayName = name + } + + parsed := parsePEM([]byte(cert.Certificate)) + if len(parsed) == 0 { + return logical.ErrorResponse("failed to parse certificate"), nil + } + + // If the certificate is not a CA cert, then ensure that x509.ExtKeyUsageClientAuth is set + if !parsed[0].IsCA && parsed[0].ExtKeyUsage != nil { + var clientAuth bool + for _, usage := range parsed[0].ExtKeyUsage { + if usage == x509.ExtKeyUsageClientAuth || usage == x509.ExtKeyUsageAny { + clientAuth = true + break + } + } + if !clientAuth { + return logical.ErrorResponse("nonCA certificates should have TLS client authentication set as an extended key usage"), nil + } + } + + // Store it + entry, err := logical.StorageEntryJSON("cert/"+name, cert) + if err != nil { + return nil, err + } + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + + if len(resp.Warnings) == 0 { + return nil, nil + } + + return &resp, nil +} + +type CertEntry struct { + tokenutil.TokenParams + + Name string + Certificate string + DisplayName string + Policies []string + TTL time.Duration + MaxTTL time.Duration + Period time.Duration + AllowedNames []string + AllowedCommonNames []string + AllowedDNSSANs []string + AllowedEmailSANs []string + AllowedURISANs []string + AllowedOrganizationalUnits []string + RequiredExtensions []string + AllowedMetadataExtensions []string + BoundCIDRs []*sockaddr.SockAddrMarshaler + + OcspCaCertificates string + OcspEnabled bool + OcspServersOverride []string + OcspFailOpen bool + OcspQueryAllServers bool +} + +const pathCertHelpSyn = ` +Manage trusted certificates used for authentication. +` + +const pathCertHelpDesc = ` +This endpoint allows you to create, read, update, and delete trusted certificates +that are allowed to authenticate. + +Deleting a certificate will not revoke auth for prior authenticated connections. +To do this, do a revoke on "login". If you don'log need to revoke login immediately, +then the next renew will cause the lease to expire. + +` diff --git a/builtin/credential/cert/path_config.go b/builtin/credential/cert/path_config.go new file mode 100644 index 0000000..6f1f290 --- /dev/null +++ b/builtin/credential/cert/path_config.go @@ -0,0 +1,122 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cert + +import ( + "context" + "fmt" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +const maxCacheSize = 100000 + +func pathConfig(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "config", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixCert, + }, + + Fields: map[string]*framework.FieldSchema{ + "disable_binding": { + Type: framework.TypeBool, + Default: false, + Description: `If set, during renewal, skips the matching of presented client identity with the client identity used during login. Defaults to false.`, + }, + "enable_identity_alias_metadata": { + Type: framework.TypeBool, + Default: false, + Description: `If set, metadata of the certificate including the metadata corresponding to allowed_metadata_extensions will be stored in the alias. Defaults to false.`, + }, + "ocsp_cache_size": { + Type: framework.TypeInt, + Default: 100, + Description: `The size of the in memory OCSP response cache, shared by all configured certs`, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + }, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathConfigRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "configuration", + }, + }, + }, + } +} + +func (b *backend) pathConfigWrite(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + config, err := b.Config(ctx, req.Storage) + if err != nil { + return nil, err + } + + if disableBindingRaw, ok := data.GetOk("disable_binding"); ok { + config.DisableBinding = disableBindingRaw.(bool) + } + if enableIdentityAliasMetadataRaw, ok := data.GetOk("enable_identity_alias_metadata"); ok { + config.EnableIdentityAliasMetadata = enableIdentityAliasMetadataRaw.(bool) + } + if cacheSizeRaw, ok := data.GetOk("ocsp_cache_size"); ok { + cacheSize := cacheSizeRaw.(int) + if cacheSize < 2 || cacheSize > maxCacheSize { + return logical.ErrorResponse("invalid cache size, must be >= 2 and <= %d", maxCacheSize), nil + } + config.OcspCacheSize = cacheSize + } + if err := b.storeConfig(ctx, req.Storage, config); err != nil { + return nil, err + } + return nil, nil +} + +func (b *backend) pathConfigRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + cfg, err := b.Config(ctx, req.Storage) + if err != nil { + return nil, err + } + + data := map[string]interface{}{ + "disable_binding": cfg.DisableBinding, + "enable_identity_alias_metadata": cfg.EnableIdentityAliasMetadata, + "ocsp_cache_size": cfg.OcspCacheSize, + } + + return &logical.Response{ + Data: data, + }, nil +} + +// Config returns the configuration for this backend. +func (b *backend) Config(ctx context.Context, s logical.Storage) (*config, error) { + entry, err := s.Get(ctx, "config") + if err != nil { + return nil, err + } + + // Returning a default configuration if an entry is not found + var result config + if entry != nil { + if err := entry.DecodeJSON(&result); err != nil { + return nil, fmt.Errorf("error reading configuration: %w", err) + } + } + return &result, nil +} + +type config struct { + DisableBinding bool `json:"disable_binding"` + EnableIdentityAliasMetadata bool `json:"enable_identity_alias_metadata"` + OcspCacheSize int `json:"ocsp_cache_size"` +} diff --git a/builtin/credential/cert/path_crls.go b/builtin/credential/cert/path_crls.go new file mode 100644 index 0000000..9dd7107 --- /dev/null +++ b/builtin/credential/cert/path_crls.go @@ -0,0 +1,348 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cert + +import ( + "context" + "crypto/x509" + "crypto/x509/pkix" + "fmt" + "math/big" + url2 "net/url" + "strings" + "time" + + "github.com/fatih/structs" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathListCRLs(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "crls/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixCert, + OperationSuffix: "crls", + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ListOperation: &framework.PathOperation{ + Callback: b.pathCRLsList, + }, + }, + HelpSynopsis: pathCRLsHelpSyn, + HelpDescription: pathCRLsHelpDesc, + } +} + +func (b *backend) pathCRLsList(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + entries, err := req.Storage.List(ctx, "crls/") + if err != nil { + return nil, fmt.Errorf("failed to list CRLs: %w", err) + } + + return logical.ListResponse(entries), nil +} + +func pathCRLs(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "crls/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixCert, + OperationSuffix: "crl", + }, + + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "The name of the certificate", + }, + + "crl": { + Type: framework.TypeString, + Description: `The public CRL that should be trusted to attest to certificates' validity statuses. +May be DER or PEM encoded. Note: the expiration time +is ignored; if the CRL is no longer valid, delete it +using the same name as specified here.`, + }, + "url": { + Type: framework.TypeString, + Description: `The URL of a CRL distribution point. Only one of 'crl' or 'url' parameters should be specified.`, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.DeleteOperation: b.pathCRLDelete, + logical.ReadOperation: b.pathCRLRead, + logical.UpdateOperation: b.pathCRLWrite, + }, + + HelpSynopsis: pathCRLsHelpSyn, + HelpDescription: pathCRLsHelpDesc, + } +} + +func (b *backend) populateCrlsIfNil(ctx context.Context, storage logical.Storage) error { + b.crlUpdateMutex.RLock() + if b.crls == nil { + b.crlUpdateMutex.RUnlock() + return b.lockThenpopulateCRLs(ctx, storage) + } + b.crlUpdateMutex.RUnlock() + return nil +} + +func (b *backend) lockThenpopulateCRLs(ctx context.Context, storage logical.Storage) error { + b.crlUpdateMutex.Lock() + defer b.crlUpdateMutex.Unlock() + return b.populateCRLs(ctx, storage) +} + +func (b *backend) populateCRLs(ctx context.Context, storage logical.Storage) error { + if b.crls != nil { + return nil + } + + b.crls = map[string]CRLInfo{} + + keys, err := storage.List(ctx, "crls/") + if err != nil { + return fmt.Errorf("error listing CRLs: %w", err) + } + if keys == nil || len(keys) == 0 { + return nil + } + + for _, key := range keys { + entry, err := storage.Get(ctx, "crls/"+key) + if err != nil { + b.crls = nil + return fmt.Errorf("error loading CRL %q: %w", key, err) + } + if entry == nil { + continue + } + var crlInfo CRLInfo + err = entry.DecodeJSON(&crlInfo) + if err != nil { + b.crls = nil + return fmt.Errorf("error decoding CRL %q: %w", key, err) + } + b.crls[key] = crlInfo + } + + return nil +} + +func (b *backend) findSerialInCRLs(serial *big.Int) map[string]RevokedSerialInfo { + b.crlUpdateMutex.RLock() + defer b.crlUpdateMutex.RUnlock() + ret := map[string]RevokedSerialInfo{} + for key, crl := range b.crls { + if crl.Serials == nil { + continue + } + if info, ok := crl.Serials[serial.String()]; ok { + ret[key] = info + } + } + return ret +} + +func parseSerialString(input string) (*big.Int, error) { + ret := &big.Int{} + + switch { + case strings.Count(input, ":") > 0: + serialBytes := certutil.ParseHexFormatted(input, ":") + if serialBytes == nil { + return nil, fmt.Errorf("error parsing serial %q", input) + } + ret.SetBytes(serialBytes) + case strings.Count(input, "-") > 0: + serialBytes := certutil.ParseHexFormatted(input, "-") + if serialBytes == nil { + return nil, fmt.Errorf("error parsing serial %q", input) + } + ret.SetBytes(serialBytes) + default: + var success bool + ret, success = ret.SetString(input, 0) + if !success { + return nil, fmt.Errorf("error parsing serial %q", input) + } + } + + return ret, nil +} + +func (b *backend) pathCRLDelete(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := strings.ToLower(d.Get("name").(string)) + if name == "" { + return logical.ErrorResponse(`"name" parameter cannot be empty`), nil + } + + if err := b.lockThenpopulateCRLs(ctx, req.Storage); err != nil { + return nil, err + } + + b.crlUpdateMutex.Lock() + defer b.crlUpdateMutex.Unlock() + + _, ok := b.crls[name] + if !ok { + return logical.ErrorResponse(fmt.Sprintf( + "no such CRL %s", name, + )), nil + } + + if err := req.Storage.Delete(ctx, "crls/"+name); err != nil { + return logical.ErrorResponse(fmt.Sprintf( + "error deleting crl %s: %v", name, err), + ), nil + } + + delete(b.crls, name) + + return nil, nil +} + +func (b *backend) pathCRLRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := strings.ToLower(d.Get("name").(string)) + if name == "" { + return logical.ErrorResponse(`"name" parameter must be set`), nil + } + + if err := b.lockThenpopulateCRLs(ctx, req.Storage); err != nil { + return nil, err + } + + b.crlUpdateMutex.RLock() + defer b.crlUpdateMutex.RUnlock() + + var retData map[string]interface{} + + crl, ok := b.crls[name] + if !ok { + return logical.ErrorResponse(fmt.Sprintf( + "no such CRL %s", name, + )), nil + } + + retData = structs.New(&crl).Map() + + return &logical.Response{ + Data: retData, + }, nil +} + +func (b *backend) pathCRLWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := strings.ToLower(d.Get("name").(string)) + if name == "" { + return logical.ErrorResponse(`"name" parameter cannot be empty`), nil + } + if crlRaw, ok := d.GetOk("crl"); ok { + crl := crlRaw.(string) + certList, err := x509.ParseCRL([]byte(crl)) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("failed to parse CRL: %v", err)), nil + } + if certList == nil { + return logical.ErrorResponse("parsed CRL is nil"), nil + } + + b.crlUpdateMutex.Lock() + defer b.crlUpdateMutex.Unlock() + err = b.setCRL(ctx, req.Storage, certList, name, nil) + if err != nil { + return nil, err + } + } else if urlRaw, ok := d.GetOk("url"); ok { + url := urlRaw.(string) + if url == "" { + return logical.ErrorResponse("empty CRL url"), nil + } + _, err := url2.Parse(url) + if err != nil { + return logical.ErrorResponse("invalid CRL url: %v", err), nil + } + + b.crlUpdateMutex.Lock() + defer b.crlUpdateMutex.Unlock() + + cdpInfo := &CDPInfo{ + Url: url, + } + err = b.fetchCRL(ctx, req.Storage, name, &CRLInfo{ + CDP: cdpInfo, + }) + if err != nil { + return nil, err + } + } else { + return logical.ErrorResponse("one of 'crl' or 'url' must be provided"), nil + } + + return nil, nil +} + +func (b *backend) setCRL(ctx context.Context, storage logical.Storage, certList *pkix.CertificateList, name string, cdp *CDPInfo) error { + if err := b.populateCRLs(ctx, storage); err != nil { + return err + } + + crlInfo := CRLInfo{ + CDP: cdp, + Serials: map[string]RevokedSerialInfo{}, + } + + if certList != nil { + for _, revokedCert := range certList.TBSCertList.RevokedCertificates { + crlInfo.Serials[revokedCert.SerialNumber.String()] = RevokedSerialInfo{} + } + } + + entry, err := logical.StorageEntryJSON("crls/"+name, crlInfo) + if err != nil { + return err + } + if err = storage.Put(ctx, entry); err != nil { + return err + } + + b.crls[name] = crlInfo + return err +} + +type CDPInfo struct { + Url string `json:"url" structs:"url" mapstructure:"url"` + ValidUntil time.Time `json:"valid_until" structs:"valid_until" mapstructure:"valid_until"` +} + +type CRLInfo struct { + CDP *CDPInfo `json:"cdp" structs:"cdp" mapstructure:"cdp"` + Serials map[string]RevokedSerialInfo `json:"serials" structs:"serials" mapstructure:"serials"` +} + +type RevokedSerialInfo struct{} + +const pathCRLsHelpSyn = ` +Manage Certificate Revocation Lists checked during authentication. +` + +const pathCRLsHelpDesc = ` +This endpoint allows you to list, create, read, update, and delete the Certificate +Revocation Lists checked during authentication, and/or CRL Distribution Point +URLs. + +When any CRLs are in effect, any login will check the trust chains sent by a +client against the submitted or retrieved CRLs. Any chain containing a serial number revoked +by one or more of the CRLs causes that chain to be marked as invalid for the +authentication attempt. Conversely, *any* valid chain -- that is, a chain +in which none of the serials are revoked by any CRL -- allows authentication. +This allows authentication to succeed when interim parts of one chain have been +revoked; for instance, if a certificate is signed by two intermediate CAs due to +one of them expiring. +` diff --git a/builtin/credential/cert/path_crls_test.go b/builtin/credential/cert/path_crls_test.go new file mode 100644 index 0000000..24211f5 --- /dev/null +++ b/builtin/credential/cert/path_crls_test.go @@ -0,0 +1,207 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cert + +import ( + "context" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "fmt" + "io/ioutil" + "math/big" + "net/http" + "net/http/httptest" + "net/url" + "sync" + "testing" + "time" + + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/require" +) + +func TestCRLFetch(t *testing.T) { + storage := &logical.InmemStorage{} + + lb, err := Factory(context.Background(), &logical.BackendConfig{ + System: &logical.StaticSystemView{ + DefaultLeaseTTLVal: 300 * time.Second, + MaxLeaseTTLVal: 1800 * time.Second, + }, + StorageView: storage, + }) + + require.NoError(t, err) + b := lb.(*backend) + closeChan := make(chan bool) + go func() { + t := time.NewTicker(50 * time.Millisecond) + for { + select { + case <-t.C: + b.PeriodicFunc(context.Background(), &logical.Request{Storage: storage}) + case <-closeChan: + break + } + } + }() + defer close(closeChan) + + if err != nil { + t.Fatalf("error: %s", err) + } + connState, err := testConnState("test-fixtures/keys/cert.pem", + "test-fixtures/keys/key.pem", "test-fixtures/root/rootcacert.pem") + require.NoError(t, err) + caPEM, err := ioutil.ReadFile("test-fixtures/root/rootcacert.pem") + require.NoError(t, err) + caKeyPEM, err := ioutil.ReadFile("test-fixtures/keys/key.pem") + require.NoError(t, err) + certPEM, err := ioutil.ReadFile("test-fixtures/keys/cert.pem") + + caBundle, err := certutil.ParsePEMBundle(string(caPEM)) + require.NoError(t, err) + bundle, err := certutil.ParsePEMBundle(string(certPEM) + "\n" + string(caKeyPEM)) + require.NoError(t, err) + // Entry with one cert first + + revocationListTemplate := &x509.RevocationList{ + RevokedCertificates: []pkix.RevokedCertificate{ + { + SerialNumber: big.NewInt(1), + RevocationTime: time.Now(), + }, + }, + Number: big.NewInt(1), + ThisUpdate: time.Now(), + NextUpdate: time.Now().Add(50 * time.Millisecond), + SignatureAlgorithm: x509.SHA1WithRSA, + } + + var crlBytesLock sync.Mutex + crlBytes, err := x509.CreateRevocationList(rand.Reader, revocationListTemplate, caBundle.Certificate, bundle.PrivateKey) + require.NoError(t, err) + + var serverURL *url.URL + crlServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Host == serverURL.Host { + crlBytesLock.Lock() + w.Write(crlBytes) + crlBytesLock.Unlock() + } else { + w.WriteHeader(http.StatusNotFound) + } + })) + serverURL, _ = url.Parse(crlServer.URL) + + req := &logical.Request{ + Connection: &logical.Connection{ + ConnState: &connState, + }, + Storage: storage, + Auth: &logical.Auth{}, + } + + fd := &framework.FieldData{ + Raw: map[string]interface{}{ + "name": "test", + "certificate": string(caPEM), + "policies": "foo,bar", + }, + Schema: pathCerts(b).Fields, + } + + resp, err := b.pathCertWrite(context.Background(), req, fd) + if err != nil { + t.Fatal(err) + } + + empty_login_fd := &framework.FieldData{ + Raw: map[string]interface{}{}, + Schema: pathLogin(b).Fields, + } + resp, err = b.pathLogin(context.Background(), req, empty_login_fd) + if err != nil { + t.Fatal(err) + } + if resp.IsError() { + t.Fatalf("got error: %#v", *resp) + } + + // Set a bad CRL + fd = &framework.FieldData{ + Raw: map[string]interface{}{ + "name": "testcrl", + "url": "http://wrongserver.com", + }, + Schema: pathCRLs(b).Fields, + } + resp, err = b.pathCRLWrite(context.Background(), req, fd) + if err == nil { + t.Fatal(err) + } + if resp.IsError() { + t.Fatalf("got error: %#v", *resp) + } + + // Set good CRL + fd = &framework.FieldData{ + Raw: map[string]interface{}{ + "name": "testcrl", + "url": crlServer.URL, + }, + Schema: pathCRLs(b).Fields, + } + resp, err = b.pathCRLWrite(context.Background(), req, fd) + if err != nil { + t.Fatal(err) + } + if resp.IsError() { + t.Fatalf("got error: %#v", *resp) + } + + b.crlUpdateMutex.Lock() + if len(b.crls["testcrl"].Serials) != 1 { + t.Fatalf("wrong number of certs in CRL got %d, expected 1", len(b.crls["testcrl"].Serials)) + } + b.crlUpdateMutex.Unlock() + + // Add a cert to the CRL, then wait to see if it gets automatically picked up + revocationListTemplate.RevokedCertificates = []pkix.RevokedCertificate{ + { + SerialNumber: big.NewInt(1), + RevocationTime: revocationListTemplate.RevokedCertificates[0].RevocationTime, + }, + { + SerialNumber: big.NewInt(2), + RevocationTime: time.Now(), + }, + } + revocationListTemplate.ThisUpdate = time.Now() + revocationListTemplate.NextUpdate = time.Now().Add(1 * time.Minute) + revocationListTemplate.Number = big.NewInt(2) + + crlBytesLock.Lock() + crlBytes, err = x509.CreateRevocationList(rand.Reader, revocationListTemplate, caBundle.Certificate, bundle.PrivateKey) + crlBytesLock.Unlock() + require.NoError(t, err) + + // Give ourselves a little extra room on slower CI systems to ensure we + // can fetch the new CRL. + corehelpers.RetryUntil(t, 2*time.Second, func() error { + b.crlUpdateMutex.Lock() + defer b.crlUpdateMutex.Unlock() + + serialCount := len(b.crls["testcrl"].Serials) + if serialCount != 2 { + return fmt.Errorf("CRL refresh did not occur serial count %d", serialCount) + } + return nil + }) +} diff --git a/builtin/credential/cert/path_login.go b/builtin/credential/cert/path_login.go new file mode 100644 index 0000000..d59c5b4 --- /dev/null +++ b/builtin/credential/cert/path_login.go @@ -0,0 +1,719 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cert + +import ( + "bytes" + "context" + "crypto/tls" + "crypto/x509" + "encoding/asn1" + "encoding/base64" + "encoding/pem" + "errors" + "fmt" + "strings" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/cidrutil" + "github.com/hashicorp/vault/sdk/helper/ocsp" + "github.com/hashicorp/vault/sdk/helper/policyutil" + "github.com/hashicorp/vault/sdk/logical" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-multierror" + glob "github.com/ryanuber/go-glob" +) + +// ParsedCert is a certificate that has been configured as trusted +type ParsedCert struct { + Entry *CertEntry + Certificates []*x509.Certificate +} + +func pathLogin(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "login", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixCert, + OperationVerb: "login", + }, + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "The name of the certificate role to authenticate against.", + }, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.loginPathWrapper(b.pathLogin), + logical.AliasLookaheadOperation: b.pathLoginAliasLookahead, + logical.ResolveRoleOperation: b.loginPathWrapper(b.pathLoginResolveRole), + }, + } +} + +func (b *backend) loginPathWrapper(wrappedOp func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error)) framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // Make sure that the CRLs have been loaded before processing a login request, + // they might have been nil'd by an invalidate func call. + if err := b.populateCrlsIfNil(ctx, req.Storage); err != nil { + return nil, err + } + return wrappedOp(ctx, req, data) + } +} + +func (b *backend) pathLoginResolveRole(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + var matched *ParsedCert + + if verifyResp, resp, err := b.verifyCredentials(ctx, req, data); err != nil { + return nil, err + } else if resp != nil { + return resp, nil + } else { + matched = verifyResp + } + + if matched == nil { + return logical.ErrorResponse("no certificate was matched by this request"), nil + } + + return logical.ResolveRoleResponse(matched.Entry.Name) +} + +func (b *backend) pathLoginAliasLookahead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + if req.Connection == nil || req.Connection.ConnState == nil { + return nil, fmt.Errorf("tls connection not found") + } + clientCerts := req.Connection.ConnState.PeerCertificates + if len(clientCerts) == 0 { + return nil, fmt.Errorf("no client certificate found") + } + + return &logical.Response{ + Auth: &logical.Auth{ + Alias: &logical.Alias{ + Name: clientCerts[0].Subject.CommonName, + }, + }, + }, nil +} + +func (b *backend) pathLogin(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + config, err := b.Config(ctx, req.Storage) + if err != nil { + return nil, err + } + if b.configUpdated.Load() { + b.updatedConfig(config) + } + + var matched *ParsedCert + if verifyResp, resp, err := b.verifyCredentials(ctx, req, data); err != nil { + return nil, err + } else if resp != nil { + return resp, nil + } else { + matched = verifyResp + } + + if matched == nil { + return nil, nil + } + + if len(matched.Entry.TokenBoundCIDRs) > 0 { + if req.Connection == nil { + b.Logger().Warn("token bound CIDRs found but no connection information available for validation") + return nil, logical.ErrPermissionDenied + } + if !cidrutil.RemoteAddrIsOk(req.Connection.RemoteAddr, matched.Entry.TokenBoundCIDRs) { + return nil, logical.ErrPermissionDenied + } + } + + clientCerts := req.Connection.ConnState.PeerCertificates + if len(clientCerts) == 0 { + return logical.ErrorResponse("no client certificate found"), nil + } + skid := base64.StdEncoding.EncodeToString(clientCerts[0].SubjectKeyId) + akid := base64.StdEncoding.EncodeToString(clientCerts[0].AuthorityKeyId) + + metadata := map[string]string{ + "cert_name": matched.Entry.Name, + "common_name": clientCerts[0].Subject.CommonName, + "serial_number": clientCerts[0].SerialNumber.String(), + "subject_key_id": certutil.GetHexFormatted(clientCerts[0].SubjectKeyId, ":"), + "authority_key_id": certutil.GetHexFormatted(clientCerts[0].AuthorityKeyId, ":"), + } + + // Add metadata from allowed_metadata_extensions when present, + // with sanitized oids (dash-separated instead of dot-separated) as keys. + for k, v := range b.certificateExtensionsMetadata(clientCerts[0], matched) { + metadata[k] = v + } + + auth := &logical.Auth{ + InternalData: map[string]interface{}{ + "subject_key_id": skid, + "authority_key_id": akid, + }, + DisplayName: matched.Entry.DisplayName, + Metadata: metadata, + Alias: &logical.Alias{ + Name: clientCerts[0].Subject.CommonName, + }, + } + + if config.EnableIdentityAliasMetadata { + auth.Alias.Metadata = metadata + } + + matched.Entry.PopulateTokenAuth(auth) + + return &logical.Response{ + Auth: auth, + }, nil +} + +func (b *backend) pathLoginRenew(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + config, err := b.Config(ctx, req.Storage) + if err != nil { + return nil, err + } + if b.configUpdated.Load() { + b.updatedConfig(config) + } + + if !config.DisableBinding { + var matched *ParsedCert + if verifyResp, resp, err := b.verifyCredentials(ctx, req, d); err != nil { + return nil, err + } else if resp != nil { + return resp, nil + } else { + matched = verifyResp + } + + if matched == nil { + return nil, nil + } + + clientCerts := req.Connection.ConnState.PeerCertificates + if len(clientCerts) == 0 { + return logical.ErrorResponse("no client certificate found"), nil + } + skid := base64.StdEncoding.EncodeToString(clientCerts[0].SubjectKeyId) + akid := base64.StdEncoding.EncodeToString(clientCerts[0].AuthorityKeyId) + + // Certificate should not only match a registered certificate policy. + // Also, the identity of the certificate presented should match the identity of the certificate used during login + if req.Auth.InternalData["subject_key_id"] != skid && req.Auth.InternalData["authority_key_id"] != akid { + return nil, fmt.Errorf("client identity during renewal not matching client identity used during login") + } + + } + // Get the cert and use its TTL + cert, err := b.Cert(ctx, req.Storage, req.Auth.Metadata["cert_name"]) + if err != nil { + return nil, err + } + if cert == nil { + // User no longer exists, do not renew + return nil, nil + } + + if !policyutil.EquivalentPolicies(cert.TokenPolicies, req.Auth.TokenPolicies) { + return nil, fmt.Errorf("policies have changed, not renewing") + } + + resp := &logical.Response{Auth: req.Auth} + resp.Auth.TTL = cert.TokenTTL + resp.Auth.MaxTTL = cert.TokenMaxTTL + resp.Auth.Period = cert.TokenPeriod + return resp, nil +} + +func (b *backend) verifyCredentials(ctx context.Context, req *logical.Request, d *framework.FieldData) (*ParsedCert, *logical.Response, error) { + // Get the connection state + if req.Connection == nil || req.Connection.ConnState == nil { + return nil, logical.ErrorResponse("tls connection required"), nil + } + connState := req.Connection.ConnState + + if connState.PeerCertificates == nil || len(connState.PeerCertificates) == 0 { + return nil, logical.ErrorResponse("client certificate must be supplied"), nil + } + clientCert := connState.PeerCertificates[0] + + // Allow constraining the login request to a single CertEntry + var certName string + if req.Auth != nil { // It's a renewal, use the saved certName + certName = req.Auth.Metadata["cert_name"] + } else if d != nil { // d is nil if handleAuthRenew call the authRenew + certName = d.Get("name").(string) + } + + // Load the trusted certificates and other details + roots, trusted, trustedNonCAs, verifyConf := b.loadTrustedCerts(ctx, req.Storage, certName) + + // Get the list of full chains matching the connection and validates the + // certificate itself + trustedChains, err := validateConnState(roots, connState) + if err != nil { + return nil, nil, err + } + + var extraCas []*x509.Certificate + for _, t := range trusted { + extraCas = append(extraCas, t.Certificates...) + } + + // If trustedNonCAs is not empty it means that client had registered a non-CA cert + // with the backend. + var retErr error + if len(trustedNonCAs) != 0 { + for _, trustedNonCA := range trustedNonCAs { + tCert := trustedNonCA.Certificates[0] + // Check for client cert being explicitly listed in the config (and matching other constraints) + if tCert.SerialNumber.Cmp(clientCert.SerialNumber) == 0 && + bytes.Equal(tCert.AuthorityKeyId, clientCert.AuthorityKeyId) { + matches, err := b.matchesConstraints(ctx, clientCert, trustedNonCA.Certificates, trustedNonCA, verifyConf) + + // matchesConstraints returns an error when OCSP verification fails, + // but some other path might still give us success. Add to the + // retErr multierror, but avoid duplicates. This way, if we reach a + // failure later, we can give additional context. + // + // XXX: If matchesConstraints is updated to generate additional, + // immediately fatal errors, we likely need to extend it to return + // another boolean (fatality) or other detection scheme. + if err != nil && (retErr == nil || !errwrap.Contains(retErr, err.Error())) { + retErr = multierror.Append(retErr, err) + } + + if matches { + return trustedNonCA, nil, nil + } + } + } + } + + // If no trusted chain was found, client is not authenticated + // This check happens after checking for a matching configured non-CA certs + if len(trustedChains) == 0 { + if retErr == nil { + return nil, logical.ErrorResponse(fmt.Sprintf("invalid certificate or no client certificate supplied; additionally got errors during verification: %v", retErr)), nil + } + return nil, logical.ErrorResponse("invalid certificate or no client certificate supplied"), nil + } + + // Search for a ParsedCert that intersects with the validated chains and any additional constraints + for _, trust := range trusted { // For each ParsedCert in the config + for _, tCert := range trust.Certificates { // For each certificate in the entry + for _, chain := range trustedChains { // For each root chain that we matched + for _, cCert := range chain { // For each cert in the matched chain + if tCert.Equal(cCert) { // ParsedCert intersects with matched chain + match, err := b.matchesConstraints(ctx, clientCert, chain, trust, verifyConf) // validate client cert + matched chain against the config + + // See note above. + if err != nil && (retErr == nil || !errwrap.Contains(retErr, err.Error())) { + retErr = multierror.Append(retErr, err) + } + + // Return the first matching entry (for backwards + // compatibility, we continue to just pick the first + // one if we have multiple matches). + // + // Here, we return directly: this means that any + // future OCSP errors would be ignored; in the future, + // if these become fatal, we could revisit this + // choice and choose the first match after evaluating + // all possible candidates. + if match && err == nil { + return trust, nil, nil + } + } + } + } + } + } + + if retErr != nil { + return nil, logical.ErrorResponse(fmt.Sprintf("no chain matching all constraints could be found for this login certificate; additionally got errors during verification: %v", retErr)), nil + } + + return nil, logical.ErrorResponse("no chain matching all constraints could be found for this login certificate"), nil +} + +func (b *backend) matchesConstraints(ctx context.Context, clientCert *x509.Certificate, trustedChain []*x509.Certificate, + config *ParsedCert, conf *ocsp.VerifyConfig, +) (bool, error) { + soFar := !b.checkForChainInCRLs(trustedChain) && + b.matchesNames(clientCert, config) && + b.matchesCommonName(clientCert, config) && + b.matchesDNSSANs(clientCert, config) && + b.matchesEmailSANs(clientCert, config) && + b.matchesURISANs(clientCert, config) && + b.matchesOrganizationalUnits(clientCert, config) && + b.matchesCertificateExtensions(clientCert, config) + if config.Entry.OcspEnabled { + ocspGood, err := b.checkForCertInOCSP(ctx, clientCert, trustedChain, conf) + if err != nil { + return false, err + } + soFar = soFar && ocspGood + } + return soFar, nil +} + +// matchesNames verifies that the certificate matches at least one configured +// allowed name +func (b *backend) matchesNames(clientCert *x509.Certificate, config *ParsedCert) bool { + // Default behavior (no names) is to allow all names + if len(config.Entry.AllowedNames) == 0 { + return true + } + // At least one pattern must match at least one name if any patterns are specified + for _, allowedName := range config.Entry.AllowedNames { + if glob.Glob(allowedName, clientCert.Subject.CommonName) { + return true + } + + for _, name := range clientCert.DNSNames { + if glob.Glob(allowedName, name) { + return true + } + } + + for _, name := range clientCert.EmailAddresses { + if glob.Glob(allowedName, name) { + return true + } + } + + } + return false +} + +// matchesCommonName verifies that the certificate matches at least one configured +// allowed common name +func (b *backend) matchesCommonName(clientCert *x509.Certificate, config *ParsedCert) bool { + // Default behavior (no names) is to allow all names + if len(config.Entry.AllowedCommonNames) == 0 { + return true + } + // At least one pattern must match at least one name if any patterns are specified + for _, allowedCommonName := range config.Entry.AllowedCommonNames { + if glob.Glob(allowedCommonName, clientCert.Subject.CommonName) { + return true + } + } + + return false +} + +// matchesDNSSANs verifies that the certificate matches at least one configured +// allowed dns entry in the subject alternate name extension +func (b *backend) matchesDNSSANs(clientCert *x509.Certificate, config *ParsedCert) bool { + // Default behavior (no names) is to allow all names + if len(config.Entry.AllowedDNSSANs) == 0 { + return true + } + // At least one pattern must match at least one name if any patterns are specified + for _, allowedDNS := range config.Entry.AllowedDNSSANs { + for _, name := range clientCert.DNSNames { + if glob.Glob(allowedDNS, name) { + return true + } + } + } + + return false +} + +// matchesEmailSANs verifies that the certificate matches at least one configured +// allowed email in the subject alternate name extension +func (b *backend) matchesEmailSANs(clientCert *x509.Certificate, config *ParsedCert) bool { + // Default behavior (no names) is to allow all names + if len(config.Entry.AllowedEmailSANs) == 0 { + return true + } + // At least one pattern must match at least one name if any patterns are specified + for _, allowedEmail := range config.Entry.AllowedEmailSANs { + for _, email := range clientCert.EmailAddresses { + if glob.Glob(allowedEmail, email) { + return true + } + } + } + + return false +} + +// matchesURISANs verifies that the certificate matches at least one configured +// allowed uri in the subject alternate name extension +func (b *backend) matchesURISANs(clientCert *x509.Certificate, config *ParsedCert) bool { + // Default behavior (no names) is to allow all names + if len(config.Entry.AllowedURISANs) == 0 { + return true + } + // At least one pattern must match at least one name if any patterns are specified + for _, allowedURI := range config.Entry.AllowedURISANs { + for _, name := range clientCert.URIs { + if glob.Glob(allowedURI, name.String()) { + return true + } + } + } + + return false +} + +// matchesOrganizationalUnits verifies that the certificate matches at least one configurd allowed OU +func (b *backend) matchesOrganizationalUnits(clientCert *x509.Certificate, config *ParsedCert) bool { + // Default behavior (no OUs) is to allow all OUs + if len(config.Entry.AllowedOrganizationalUnits) == 0 { + return true + } + + // At least one pattern must match at least one name if any patterns are specified + for _, allowedOrganizationalUnits := range config.Entry.AllowedOrganizationalUnits { + for _, ou := range clientCert.Subject.OrganizationalUnit { + if glob.Glob(allowedOrganizationalUnits, ou) { + return true + } + } + } + + return false +} + +// matchesCertificateExtensions verifies that the certificate matches configured +// required extensions +func (b *backend) matchesCertificateExtensions(clientCert *x509.Certificate, config *ParsedCert) bool { + // If no required extensions, nothing to check here + if len(config.Entry.RequiredExtensions) == 0 { + return true + } + // Fail fast if we have required extensions but no extensions on the cert + if len(clientCert.Extensions) == 0 { + return false + } + + // Build Client Extensions Map for Constraint Matching + // x509 Writes Extensions in ASN1 with a bitstring tag, which results in the field + // including its ASN.1 type tag bytes. For the sake of simplicity, assume string type + // and drop the tag bytes. And get the number of bytes from the tag. + clientExtMap := make(map[string]string, len(clientCert.Extensions)) + for _, ext := range clientCert.Extensions { + var parsedValue string + asn1.Unmarshal(ext.Value, &parsedValue) + clientExtMap[ext.Id.String()] = parsedValue + } + // If any of the required extensions don'log match the constraint fails + for _, requiredExt := range config.Entry.RequiredExtensions { + reqExt := strings.SplitN(requiredExt, ":", 2) + clientExtValue, clientExtValueOk := clientExtMap[reqExt[0]] + if !clientExtValueOk || !glob.Glob(reqExt[1], clientExtValue) { + return false + } + } + return true +} + +// certificateExtensionsMetadata returns the metadata from configured +// metadata extensions +func (b *backend) certificateExtensionsMetadata(clientCert *x509.Certificate, config *ParsedCert) map[string]string { + // If no metadata extensions are configured, return an empty map + if len(config.Entry.AllowedMetadataExtensions) == 0 { + return map[string]string{} + } + + // Build a map with the accepted oid strings as keys, and the metadata keys as values. + allowedOidMap := make(map[string]string, len(config.Entry.AllowedMetadataExtensions)) + for _, oidString := range config.Entry.AllowedMetadataExtensions { + // Avoid dots in metadata keys and put dashes instead, + // to allow use policy templates. + allowedOidMap[oidString] = strings.ReplaceAll(oidString, ".", "-") + } + + // Collect the metadata from accepted certificate extensions. + metadata := make(map[string]string, len(config.Entry.AllowedMetadataExtensions)) + for _, ext := range clientCert.Extensions { + if metadataKey, ok := allowedOidMap[ext.Id.String()]; ok { + // x509 Writes Extensions in ASN1 with a bitstring tag, which results in the field + // including its ASN.1 type tag bytes. For the sake of simplicity, assume string type + // and drop the tag bytes. And get the number of bytes from the tag. + var parsedValue string + asn1.Unmarshal(ext.Value, &parsedValue) + metadata[metadataKey] = parsedValue + } + } + + return metadata +} + +// loadTrustedCerts is used to load all the trusted certificates from the backend +func (b *backend) loadTrustedCerts(ctx context.Context, storage logical.Storage, certName string) (pool *x509.CertPool, trusted []*ParsedCert, trustedNonCAs []*ParsedCert, conf *ocsp.VerifyConfig) { + pool = x509.NewCertPool() + trusted = make([]*ParsedCert, 0) + trustedNonCAs = make([]*ParsedCert, 0) + + var names []string + if certName != "" { + names = append(names, certName) + } else { + var err error + names, err = storage.List(ctx, "cert/") + if err != nil { + b.Logger().Error("failed to list trusted certs", "error", err) + return + } + } + + conf = &ocsp.VerifyConfig{} + for _, name := range names { + entry, err := b.Cert(ctx, storage, strings.TrimPrefix(name, "cert/")) + if err != nil { + b.Logger().Error("failed to load trusted cert", "name", name, "error", err) + continue + } + if entry == nil { + // This could happen when the certName was provided and the cert doesn'log exist, + // or just if between the LIST and the GET the cert was deleted. + continue + } + + parsed := parsePEM([]byte(entry.Certificate)) + if len(parsed) == 0 { + b.Logger().Error("failed to parse certificate", "name", name) + continue + } + parsed = append(parsed, parsePEM([]byte(entry.OcspCaCertificates))...) + + if !parsed[0].IsCA { + trustedNonCAs = append(trustedNonCAs, &ParsedCert{ + Entry: entry, + Certificates: parsed, + }) + } else { + for _, p := range parsed { + pool.AddCert(p) + } + + // Create a ParsedCert entry + trusted = append(trusted, &ParsedCert{ + Entry: entry, + Certificates: parsed, + }) + } + if entry.OcspEnabled { + conf.OcspEnabled = true + conf.OcspServersOverride = append(conf.OcspServersOverride, entry.OcspServersOverride...) + if entry.OcspFailOpen { + conf.OcspFailureMode = ocsp.FailOpenTrue + } else { + conf.OcspFailureMode = ocsp.FailOpenFalse + } + conf.QueryAllServers = conf.QueryAllServers || entry.OcspQueryAllServers + } + } + return +} + +func (b *backend) checkForCertInOCSP(ctx context.Context, clientCert *x509.Certificate, chain []*x509.Certificate, conf *ocsp.VerifyConfig) (bool, error) { + if !conf.OcspEnabled || len(chain) < 2 { + return true, nil + } + b.ocspClientMutex.RLock() + defer b.ocspClientMutex.RUnlock() + err := b.ocspClient.VerifyLeafCertificate(ctx, clientCert, chain[1], conf) + if err != nil { + // We want to preserve error messages when they have additional, + // potentially useful information. Just having a revoked cert + // isn't additionally useful. + if !strings.Contains(err.Error(), "has been revoked") { + return false, err + } + return false, nil + } + return true, nil +} + +func (b *backend) checkForChainInCRLs(chain []*x509.Certificate) bool { + badChain := false + for _, cert := range chain { + badCRLs := b.findSerialInCRLs(cert.SerialNumber) + if len(badCRLs) != 0 { + badChain = true + break + } + + } + return badChain +} + +func (b *backend) checkForValidChain(chains [][]*x509.Certificate) bool { + for _, chain := range chains { + if !b.checkForChainInCRLs(chain) { + return true + } + } + return false +} + +// parsePEM parses a PEM encoded x509 certificate +func parsePEM(raw []byte) (certs []*x509.Certificate) { + for len(raw) > 0 { + var block *pem.Block + block, raw = pem.Decode(raw) + if block == nil { + break + } + if (block.Type != "CERTIFICATE" && block.Type != "TRUSTED CERTIFICATE") || len(block.Headers) != 0 { + continue + } + + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + continue + } + certs = append(certs, cert) + } + return +} + +// validateConnState is used to validate that the TLS client is authorized +// by at trusted certificate. Most of this logic is lifted from the client +// verification logic here: http://golang.org/src/crypto/tls/handshake_server.go +// The trusted chains are returned. +func validateConnState(roots *x509.CertPool, cs *tls.ConnectionState) ([][]*x509.Certificate, error) { + certs := cs.PeerCertificates + if len(certs) == 0 { + return nil, nil + } + + opts := x509.VerifyOptions{ + Roots: roots, + Intermediates: x509.NewCertPool(), + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + } + + if len(certs) > 1 { + for _, cert := range certs[1:] { + opts.Intermediates.AddCert(cert) + } + } + + chains, err := certs[0].Verify(opts) + if err != nil { + if _, ok := err.(x509.UnknownAuthorityError); ok { + return nil, nil + } + return nil, errors.New("failed to verify client's certificate: " + err.Error()) + } + + return chains, nil +} diff --git a/builtin/credential/cert/path_login_test.go b/builtin/credential/cert/path_login_test.go new file mode 100644 index 0000000..d86bd31 --- /dev/null +++ b/builtin/credential/cert/path_login_test.go @@ -0,0 +1,362 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cert + +import ( + "context" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "fmt" + "io/ioutil" + "math/big" + mathrand "math/rand" + "net" + "net/http" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/hashicorp/vault/sdk/helper/certutil" + + "golang.org/x/crypto/ocsp" + + logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" + + "github.com/hashicorp/vault/sdk/logical" +) + +var ocspPort int + +var source InMemorySource + +type testLogger struct{} + +func (t *testLogger) Log(args ...any) { + fmt.Printf("%v", args) +} + +func TestMain(m *testing.M) { + source = make(InMemorySource) + + listener, err := net.Listen("tcp", ":0") + if err != nil { + return + } + + ocspPort = listener.Addr().(*net.TCPAddr).Port + srv := &http.Server{ + Addr: "localhost:0", + Handler: NewResponder(&testLogger{}, source, nil), + } + go func() { + srv.Serve(listener) + }() + defer srv.Shutdown(context.Background()) + m.Run() +} + +func TestCert_RoleResolve(t *testing.T) { + certTemplate := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "example.com", + }, + DNSNames: []string{"example.com"}, + IPAddresses: []net.IP{net.ParseIP("127.0.0.1")}, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageServerAuth, + x509.ExtKeyUsageClientAuth, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement, + SerialNumber: big.NewInt(mathrand.Int63()), + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(262980 * time.Hour), + } + + tempDir, connState, err := generateTestCertAndConnState(t, certTemplate) + if tempDir != "" { + defer os.RemoveAll(tempDir) + } + if err != nil { + t.Fatalf("error testing connection state: %v", err) + } + ca, err := ioutil.ReadFile(filepath.Join(tempDir, "ca_cert.pem")) + if err != nil { + t.Fatalf("err: %v", err) + } + + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: testFactory(t), + Steps: []logicaltest.TestStep{ + testAccStepCert(t, "web", ca, "foo", allowed{dns: "example.com"}, false), + testAccStepLoginWithName(t, connState, "web"), + testAccStepResolveRoleWithName(t, connState, "web"), + }, + }) +} + +func testAccStepResolveRoleWithName(t *testing.T, connState tls.ConnectionState, certName string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ResolveRoleOperation, + Path: "login", + Unauthenticated: true, + ConnState: &connState, + Check: func(resp *logical.Response) error { + if resp.Data["role"] != certName { + t.Fatalf("Role was not as expected. Expected %s, received %s", certName, resp.Data["role"]) + } + return nil + }, + Data: map[string]interface{}{ + "name": certName, + }, + } +} + +func TestCert_RoleResolveWithoutProvidingCertName(t *testing.T) { + certTemplate := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "example.com", + }, + DNSNames: []string{"example.com"}, + IPAddresses: []net.IP{net.ParseIP("127.0.0.1")}, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageServerAuth, + x509.ExtKeyUsageClientAuth, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement, + SerialNumber: big.NewInt(mathrand.Int63()), + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(262980 * time.Hour), + } + + tempDir, connState, err := generateTestCertAndConnState(t, certTemplate) + if tempDir != "" { + defer os.RemoveAll(tempDir) + } + if err != nil { + t.Fatalf("error testing connection state: %v", err) + } + ca, err := ioutil.ReadFile(filepath.Join(tempDir, "ca_cert.pem")) + if err != nil { + t.Fatalf("err: %v", err) + } + + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: testFactory(t), + Steps: []logicaltest.TestStep{ + testAccStepCert(t, "web", ca, "foo", allowed{dns: "example.com"}, false), + testAccStepLoginWithName(t, connState, "web"), + testAccStepResolveRoleWithEmptyDataMap(t, connState, "web"), + }, + }) +} + +func testAccStepResolveRoleWithEmptyDataMap(t *testing.T, connState tls.ConnectionState, certName string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ResolveRoleOperation, + Path: "login", + Unauthenticated: true, + ConnState: &connState, + Check: func(resp *logical.Response) error { + if resp.Data["role"] != certName { + t.Fatalf("Role was not as expected. Expected %s, received %s", certName, resp.Data["role"]) + } + return nil + }, + Data: map[string]interface{}{}, + } +} + +func testAccStepResolveRoleExpectRoleResolutionToFail(t *testing.T, connState tls.ConnectionState, certName string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ResolveRoleOperation, + Path: "login", + Unauthenticated: true, + ConnState: &connState, + ErrorOk: true, + Check: func(resp *logical.Response) error { + if resp == nil && !resp.IsError() { + t.Fatalf("Response was not an error: resp:%#v", resp) + } + + errString, ok := resp.Data["error"].(string) + if !ok { + t.Fatal("Error not part of response.") + } + + if !strings.Contains(errString, "invalid certificate") { + t.Fatalf("Error was not due to invalid role name. Error: %s", errString) + } + return nil + }, + Data: map[string]interface{}{ + "name": certName, + }, + } +} + +func testAccStepResolveRoleOCSPFail(t *testing.T, connState tls.ConnectionState, certName string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ResolveRoleOperation, + Path: "login", + Unauthenticated: true, + ConnState: &connState, + ErrorOk: true, + Check: func(resp *logical.Response) error { + if resp == nil || !resp.IsError() { + t.Fatalf("Response was not an error: resp:%#v", resp) + } + + errString, ok := resp.Data["error"].(string) + if !ok { + t.Fatal("Error not part of response.") + } + + if !strings.Contains(errString, "no chain matching") { + t.Fatalf("Error was not due to OCSP failure. Error: %s", errString) + } + return nil + }, + Data: map[string]interface{}{ + "name": certName, + }, + } +} + +func TestCert_RoleResolve_RoleDoesNotExist(t *testing.T) { + certTemplate := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "example.com", + }, + DNSNames: []string{"example.com"}, + IPAddresses: []net.IP{net.ParseIP("127.0.0.1")}, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageServerAuth, + x509.ExtKeyUsageClientAuth, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement, + SerialNumber: big.NewInt(mathrand.Int63()), + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(262980 * time.Hour), + } + + tempDir, connState, err := generateTestCertAndConnState(t, certTemplate) + if tempDir != "" { + defer os.RemoveAll(tempDir) + } + if err != nil { + t.Fatalf("error testing connection state: %v", err) + } + ca, err := ioutil.ReadFile(filepath.Join(tempDir, "ca_cert.pem")) + if err != nil { + t.Fatalf("err: %v", err) + } + + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: testFactory(t), + Steps: []logicaltest.TestStep{ + testAccStepCert(t, "web", ca, "foo", allowed{dns: "example.com"}, false), + testAccStepLoginWithName(t, connState, "web"), + testAccStepResolveRoleExpectRoleResolutionToFail(t, connState, "notweb"), + }, + }) +} + +func TestCert_RoleResolveOCSP(t *testing.T) { + cases := []struct { + name string + failOpen bool + certStatus int + errExpected bool + }{ + {"failFalseGoodCert", false, ocsp.Good, false}, + {"failFalseRevokedCert", false, ocsp.Revoked, true}, + {"failFalseUnknownCert", false, ocsp.Unknown, true}, + {"failTrueGoodCert", true, ocsp.Good, false}, + {"failTrueRevokedCert", true, ocsp.Revoked, true}, + {"failTrueUnknownCert", true, ocsp.Unknown, false}, + } + certTemplate := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "example.com", + }, + DNSNames: []string{"example.com"}, + IPAddresses: []net.IP{net.ParseIP("127.0.0.1")}, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageServerAuth, + x509.ExtKeyUsageClientAuth, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement, + SerialNumber: big.NewInt(mathrand.Int63()), + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(262980 * time.Hour), + OCSPServer: []string{fmt.Sprintf("http://localhost:%d", ocspPort)}, + } + tempDir, connState, err := generateTestCertAndConnState(t, certTemplate) + if tempDir != "" { + defer os.RemoveAll(tempDir) + } + if err != nil { + t.Fatalf("error testing connection state: %v", err) + } + ca, err := ioutil.ReadFile(filepath.Join(tempDir, "ca_cert.pem")) + if err != nil { + t.Fatalf("err: %v", err) + } + + issuer := parsePEM(ca) + pkf, err := ioutil.ReadFile(filepath.Join(tempDir, "ca_key.pem")) + if err != nil { + t.Fatalf("err: %v", err) + } + pk, err := certutil.ParsePEMBundle(string(pkf)) + if err != nil { + t.Fatalf("err: %v", err) + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + resp, err := ocsp.CreateResponse(issuer[0], issuer[0], ocsp.Response{ + Status: c.certStatus, + SerialNumber: certTemplate.SerialNumber, + ProducedAt: time.Now(), + ThisUpdate: time.Now(), + NextUpdate: time.Now().Add(time.Hour), + }, pk.PrivateKey) + if err != nil { + t.Fatal(err) + } + source[certTemplate.SerialNumber.String()] = resp + + b := testFactory(t) + b.(*backend).ocspClient.ClearCache() + var resolveStep logicaltest.TestStep + var loginStep logicaltest.TestStep + if c.errExpected { + loginStep = testAccStepLoginWithNameInvalid(t, connState, "web") + resolveStep = testAccStepResolveRoleOCSPFail(t, connState, "web") + } else { + loginStep = testAccStepLoginWithName(t, connState, "web") + resolveStep = testAccStepResolveRoleWithName(t, connState, "web") + } + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepCertWithExtraParams(t, "web", ca, "foo", allowed{dns: "example.com"}, false, + map[string]interface{}{"ocsp_enabled": true, "ocsp_fail_open": c.failOpen}), + testAccStepReadCertPolicy(t, "web", false, map[string]interface{}{"ocsp_enabled": true, "ocsp_fail_open": c.failOpen}), + loginStep, + resolveStep, + }, + }) + }) + } +} + +func serialFromBigInt(serial *big.Int) string { + return strings.TrimSpace(certutil.GetHexFormatted(serial.Bytes(), ":")) +} diff --git a/builtin/credential/cert/test-fixtures/cacert.pem b/builtin/credential/cert/test-fixtures/cacert.pem new file mode 100644 index 0000000..9d9a385 --- /dev/null +++ b/builtin/credential/cert/test-fixtures/cacert.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDPjCCAiagAwIBAgIUXiEDuecwua9+j1XHLnconxQ/JBcwDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLbXl2YXVsdC5jb20wIBcNMTYwNTAyMTYwMzU4WhgPMjA2 +NjA0MjAxNjA0MjhaMBYxFDASBgNVBAMTC215dmF1bHQuY29tMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwWPjnTqnkc6acah+wWLmdTK0oCrf2687XVhx +VP3IN897TYzkaBQ2Dn1UM2VEL71sE3OZSVm0UWs5n7UqRuDp6mvkvrT2q5zgh/bV +zg9ZL1AI5H7dY2Rsor95I849ymFpXZooMgNtIQLxIeleBwzTnVSkFl8RqKM7NkjZ +wvBafQEjSsYk9050Bu0GMLgFJYRo1LozJLbwIs5ykG5F5PWTMfRvLCgLBzixPb75 +unIJ29nL0yB7zzUdkM8CG1EX8NkjGLEnpRnPa7+RMf8bd10v84cr0JFCUQmoabks +sqVyA825/1we2r5Y8blyXZVIr2lcPyGocLDxz1qT1MqxrNQIywIDAQABo4GBMH8w +DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFBTo2I+W +3Wb2MBe3OWuj5qCbafavMB8GA1UdIwQYMBaAFBTo2I+W3Wb2MBe3OWuj5qCbafav +MBwGA1UdEQQVMBOCC215dmF1bHQuY29thwR/AAABMA0GCSqGSIb3DQEBCwUAA4IB +AQAyjJzDMzf28yMgiu//2R6LD3+zuLHlfX8+p5JB7WDBT7CgSm89gzMRtD2DvqZQ +6iLbZv/x7Td8bdLsOKf3LDCkZyOygJ0Sr9+6YZdc9heWO8tsO/SbcLhj9/vK8YyV +5fJo+vECW8I5zQLeTKfPqJtTU0zFspv0WYCB96Hsbhd1hTfHmVgjBoxi0YuduAa8 +3EHuYPfTYkO3M4QJCoQ+3S6LXSTDqppd1KGAy7QhRU6shd29EpSVxhgqZ+CIOpZu +3RgPOgPqfqcOD/v/SRPqhRf+P5O5Dc/N4ZXTZtfJbaY0qE+smpeQUskVQ2TrSqha +UYpNk7+toZW3Gioo0lBD3gH2 +-----END CERTIFICATE----- \ No newline at end of file diff --git a/builtin/credential/cert/test-fixtures/cacert2crl b/builtin/credential/cert/test-fixtures/cacert2crl new file mode 100644 index 0000000..82db7a3 --- /dev/null +++ b/builtin/credential/cert/test-fixtures/cacert2crl @@ -0,0 +1,12 @@ +-----BEGIN X509 CRL----- +MIIBrjCBlzANBgkqhkiG9w0BAQsFADAWMRQwEgYDVQQDEwtteXZhdWx0LmNvbRcN +MTYwNTAyMTYxNDMzWhcNMTYwNTA1MTYxNDMzWjArMCkCFCXxxcbS0ATpI2PYrx8d +ACLEQ3B9FxExNjA1MDIxMjE0MzMtMDQwMKAjMCEwHwYDVR0jBBgwFoAUwsRNYCw4 +U2won66rMKEJm8inFfgwDQYJKoZIhvcNAQELBQADggEBAD/VvoRK4eaEDzG7Z95b +fHL5ubJGkyvkp8ruNu+rfQp8NLgFVvY6a93Hz7WLOhACkKIWJ63+/4vCfDi5uU0B +HW2FICHdlSQ+6DdGJ6MrgujALlyT+69iF+fPiJ/M1j/N7Am8XPYYcfNdSK6CHtfg +gHNB7E+ubBA7lIw7ucIkoiJjXrSWSXTs9/GzLUImiXJAKQ+JzPYryIsGKXKAwgHh +HB56BnJ2vOs7+6UxQ6fjKTMxYdNgoZ34MhkkxNNhylrEndO6XUvUvC1f/1p1wlzy +xTq2MrMfJHJyu08rkrD+kwMPH2uoVwKyDhXdRBP0QrvQwOsvNEhW8LTKwLWkK17b +fEI= +-----END X509 CRL----- diff --git a/builtin/credential/cert/test-fixtures/cakey.pem b/builtin/credential/cert/test-fixtures/cakey.pem new file mode 100644 index 0000000..ecba475 --- /dev/null +++ b/builtin/credential/cert/test-fixtures/cakey.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAwWPjnTqnkc6acah+wWLmdTK0oCrf2687XVhxVP3IN897TYzk +aBQ2Dn1UM2VEL71sE3OZSVm0UWs5n7UqRuDp6mvkvrT2q5zgh/bVzg9ZL1AI5H7d +Y2Rsor95I849ymFpXZooMgNtIQLxIeleBwzTnVSkFl8RqKM7NkjZwvBafQEjSsYk +9050Bu0GMLgFJYRo1LozJLbwIs5ykG5F5PWTMfRvLCgLBzixPb75unIJ29nL0yB7 +zzUdkM8CG1EX8NkjGLEnpRnPa7+RMf8bd10v84cr0JFCUQmoabkssqVyA825/1we +2r5Y8blyXZVIr2lcPyGocLDxz1qT1MqxrNQIywIDAQABAoIBAD1pBd9ov8t6Surq +sY2hZUM0Hc16r+ln5LcInbx6djjaxvHiWql+OYgyXimP764lPYuTuspjFPKB1SOU ++N7XDxCkwFeayXXHdDlYtZ4gm5Z9mMVOT+j++8xWdxZaqJ56fmX9zOPM2LuR3paB +L52Xgh9EwHJmMApYAzaCvbu8bU+iHeNTW80xabxQrp9VCu/A1BXUX06jK4T+wmjZ +kDA82uQp3dCOF1tv/10HgwqkJj6/1jjM0XUzUZR6iV85S6jrA7wD7gDDeqNO8YHN +08YMRgTKk4pbA7AqoC5xbL3gbSjsjyw48KRq0FkdkjsgV0PJZRMUU9fv9puDa23K +WRPa8LECgYEAyeth5bVH8FXnVXIAAFU6W0WdgCK3VakhjItLw0eoxshuTwbVq64w +CNOB8y1pfP83WiJjX3qRG43NDW07X69J57YKtCCb6KICVUPmecgYZPkmegD1HBQZ +5+Aak+5pIUQuycQ0t65yHGu4Jsju05gEFgdzydFjNANgiPxRzZxzAkkCgYEA9S+y +ZR063oCQDg/GhMLCx19nCJyU44Figh1YCD6kTrsSTECuRpQ5B1F9a+LeZT2wnYxv ++qMvvV+lfVY73f5WZ567u2jSDIsCH34p4g7sE25lKwo+Lhik6EtOehJFs2ZUemaT +Ym7EjqWlC1whrG7P4MnTGzPOVNAGAxsGPtT58nMCgYAs/R8A2VU//UPfy9ioOlUY +RPiEtjd3BIoPEHI+/lZihAHf5bvx1oupS8bmcbXRPeQNVyAhA+QU6ZFIbpAOD7Y9 +xFe6LpHOUVqHuOs/MxAMX17tTA1QxkHHYi1JzJLr8I8kMW01h86w+mc7bQWZa4Nt +jReFXfvmeOInY2CumS8e0QKBgC23ow/vj1aFqla04lNG7YK3a0LTz39MVM3mItAG +viRgBV1qghRu9uNCcpx3RPijtBbsZMTbQL+S4gyo06jlD79qfZ7IQMJN+SteHvkj +xykoYHzSAB4gQj9+KzffyFdXMVFRZxHnjYb7o/amSzEXyHMlrtNXqZVu5HAXzeZR +V/m5AoGAAStS43Q7qSJSMfMBITKMdKlqCObnifD77WeR2WHGrpkq26300ggsDpMS +UTmnAAo77lSMmDsdoNn2XZmdeTu1CPoQnoZSE5CqPd5GeHA/hhegVCdeYxSXZJoH +Lhiac+AhCEog/MS1GmVsjynD7eDGVFcsJ6SWuam7doKfrpPqPnE= +-----END RSA PRIVATE KEY----- \ No newline at end of file diff --git a/builtin/credential/cert/test-fixtures/generate.txt b/builtin/credential/cert/test-fixtures/generate.txt new file mode 100644 index 0000000..5b888ee --- /dev/null +++ b/builtin/credential/cert/test-fixtures/generate.txt @@ -0,0 +1,67 @@ +vault mount pki +vault mount-tune -max-lease-ttl=438000h pki +vault write pki/root/generate/exported common_name=myvault.com ttl=438000h ip_sans=127.0.0.1 +vi cacert.pem +vi cakey.pem + +vaultcert.hcl +backend "inmem" { +} +disable_mlock = true +default_lease_ttl = "700h" +max_lease_ttl = "768h" +listener "tcp" { + address = "127.0.0.1:8200" + tls_cert_file = "./cacert.pem" + tls_key_file = "./cakey.pem" +} +======================================== +vault mount pki +vault mount-tune -max-lease-ttl=438000h pki +vault write pki/root/generate/exported common_name=myvault.com ttl=438000h max_ttl=438000h ip_sans=127.0.0.1 +vi testcacert1.pem +vi testcakey1.pem +vi testcaserial1 + +vault write pki/config/urls issuing_certificates="http://127.0.0.1:8200/v1/pki/ca" crl_distribution_points="http://127.0.0.1:8200/v1/pki/crl" +vault write pki/roles/myvault-dot-com allowed_domains=myvault.com allow_subdomains=true ttl=437999h max_ttl=438000h allow_ip_sans=true + +vault write pki/issue/myvault-dot-com common_name=cert.myvault.com format=pem ip_sans=127.0.0.1 +vi testissuedserial1 + +vault write pki/issue/myvault-dot-com common_name=cert.myvault.com format=pem ip_sans=127.0.0.1 +vi testissuedcert2.pem +vi testissuedkey2.pem +vi testissuedserial2 + +vault write pki/issue/myvault-dot-com common_name=cert.myvault.com format=pem ip_sans=127.0.0.1 +vi testissuedserial3 + +vault write pki/issue/myvault-dot-com common_name=cert.myvault.com format=pem ip_sans=127.0.0.1 +vi testissuedcert4.pem +vi testissuedkey4.pem +vi testissuedserial4 + +vault write pki/issue/myvault-dot-com common_name=cert.myvault.com format=pem ip_sans=127.0.0.1 +vi testissuedserial5 + +vault write pki/revoke serial_number=$(cat testissuedserial2) +vault write pki/revoke serial_number=$(cat testissuedserial4) +curl -XGET "http://127.0.0.1:8200/v1/pki/crl/pem" -H "x-vault-token:123" > issuedcertcrl +openssl crl -in issuedcertcrl -noout -text + +======================================== +export VAULT_ADDR='http://127.0.0.1:8200' +vault mount pki +vault mount-tune -max-lease-ttl=438000h pki +vault write pki/root/generate/exported common_name=myvault.com ttl=438000h ip_sans=127.0.0.1 +vi testcacert2.pem +vi testcakey2.pem +vi testcaserial2 +vi testcacert2leaseid + +vault write pki/config/urls issuing_certificates="http://127.0.0.1:8200/v1/pki/ca" crl_distribution_points="http://127.0.0.1:8200/v1/pki/crl" +vault revoke $(cat testcacert2leaseid) + +curl -XGET "http://127.0.0.1:8200/v1/pki/crl/pem" -H "x-vault-token:123" > cacert2crl +openssl crl -in cacert2crl -noout -text diff --git a/builtin/credential/cert/test-fixtures/issuedcertcrl b/builtin/credential/cert/test-fixtures/issuedcertcrl new file mode 100644 index 0000000..45e9a98 --- /dev/null +++ b/builtin/credential/cert/test-fixtures/issuedcertcrl @@ -0,0 +1,12 @@ +-----BEGIN X509 CRL----- +MIIB2TCBwjANBgkqhkiG9w0BAQsFADAWMRQwEgYDVQQDEwtteXZhdWx0LmNvbRcN +MTYwNTAyMTYxMTA4WhcNMTYwNTA1MTYxMTA4WjBWMCkCFAS6oenLRllQ1MRYcSV+ +5ukv2563FxExNjA1MDIxMjExMDgtMDQwMDApAhQaQdPJfbIwE3q4nyYp60lVnZaE +5hcRMTYwNTAyMTIxMTA1LTA0MDCgIzAhMB8GA1UdIwQYMBaAFOuKvPiUG06iHkRX +AOeMiUdBfHFyMA0GCSqGSIb3DQEBCwUAA4IBAQBD2jkeOAmkDdYkAXbmjLGdHaQI +WMS/M+wtFnHVIDVQEmUmj/KPsrkshTZv2UgCHIxBha6y+kXUMQFMg6FwriDTB170 +WyJVDVhGg2WjiQjnzrzEI+iOmcpx60sPPXE63J/Zxo4QS5M62RTXRq3909HQTFI5 +f3xf0pog8mOrv5uQxO1SACP6YFtdDE2dGOVwoIPuNMTY5vijnj8I9dAw8VrbdoBX +m/Ky56kT+BpmVWHKwQd1nEcP/RHSKbZwwJzJG0BoGM8cvzjITtBmpEF+OZcea81x +p9XJkpfFeiVIgzxks3zTeuQjLF8u+MDcdGt0ztHEbkswjxuk1cCovZe2GFr4 +-----END X509 CRL----- diff --git a/builtin/credential/cert/test-fixtures/keys/cert.pem b/builtin/credential/cert/test-fixtures/keys/cert.pem new file mode 100644 index 0000000..942d266 --- /dev/null +++ b/builtin/credential/cert/test-fixtures/keys/cert.pem @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDtTCCAp2gAwIBAgIUf+jhKTFBnqSs34II0WS1L4QsbbAwDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzQxWhcNMjUw +MTA1MTAyODExWjAbMRkwFwYDVQQDExBjZXJ0LmV4YW1wbGUuY29tMIIBIjANBgkq +hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsZx0Svr82YJpFpIy4fJNW5fKA6B8mhxS +TRAVnygAftetT8puHflY0ss7Y6X2OXjsU0PRn+1PswtivhKi+eLtgWkUF9cFYFGn +SgMld6ZWRhNheZhA6ZfQmeM/BF2pa5HK2SDF36ljgjL9T+nWrru2Uv0BCoHzLAmi +YYMiIWplidMmMO5NTRG3k+3AN0TkfakB6JVzjLGhTcXdOcVEMXkeQVqJMAuGouU5 +donyqtnaHuIJGuUdy54YDnX86txhOQhAv6r7dHXzZxS4pmLvw8UI1rsSf/GLcUVG +B+5+AAGF5iuHC3N2DTl4xz3FcN4Cb4w9pbaQ7+mCzz+anqiJfyr2nwIDAQABo4H1 +MIHyMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAdBgNVHQ4EFgQUm++e +HpyM3p708bgZJuRYEdX1o+UwHwYDVR0jBBgwFoAUncSzT/6HMexyuiU9/7EgHu+o +k5swOwYIKwYBBQUHAQEELzAtMCsGCCsGAQUFBzAChh9odHRwOi8vMTI3LjAuMC4x +OjgyMDAvdjEvcGtpL2NhMCEGA1UdEQQaMBiCEGNlcnQuZXhhbXBsZS5jb22HBH8A +AAEwMQYDVR0fBCowKDAmoCSgIoYgaHR0cDovLzEyNy4wLjAuMTo4MjAwL3YxL3Br +aS9jcmwwDQYJKoZIhvcNAQELBQADggEBABsuvmPSNjjKTVN6itWzdQy+SgMIrwfs +X1Yb9Lefkkwmp9ovKFNQxa4DucuCuzXcQrbKwWTfHGgR8ct4rf30xCRoA7dbQWq4 +aYqNKFWrRaBRAaaYZ/O1ApRTOrXqRx9Eqr0H1BXLsoAq+mWassL8sf6siae+CpwA +KqBko5G0dNXq5T4i2LQbmoQSVetIrCJEeMrU+idkuqfV2h1BQKgSEhFDABjFdTCN +QDAHsEHsi2M4/jRW9fqEuhHSDfl2n7tkFUI8wTHUUCl7gXwweJ4qtaSXIwKXYzNj +xqKHA8Purc1Yfybz4iE1JCROi9fInKlzr5xABq8nb9Qc/J9DIQM+Xmk= +-----END CERTIFICATE----- diff --git a/builtin/credential/cert/test-fixtures/keys/key.pem b/builtin/credential/cert/test-fixtures/keys/key.pem new file mode 100644 index 0000000..add9820 --- /dev/null +++ b/builtin/credential/cert/test-fixtures/keys/key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAsZx0Svr82YJpFpIy4fJNW5fKA6B8mhxSTRAVnygAftetT8pu +HflY0ss7Y6X2OXjsU0PRn+1PswtivhKi+eLtgWkUF9cFYFGnSgMld6ZWRhNheZhA +6ZfQmeM/BF2pa5HK2SDF36ljgjL9T+nWrru2Uv0BCoHzLAmiYYMiIWplidMmMO5N +TRG3k+3AN0TkfakB6JVzjLGhTcXdOcVEMXkeQVqJMAuGouU5donyqtnaHuIJGuUd +y54YDnX86txhOQhAv6r7dHXzZxS4pmLvw8UI1rsSf/GLcUVGB+5+AAGF5iuHC3N2 +DTl4xz3FcN4Cb4w9pbaQ7+mCzz+anqiJfyr2nwIDAQABAoIBAHR7fFV0eAGaopsX +9OD0TUGlsephBXb43g0GYHfJ/1Ew18w9oaxszJEqkl+PB4W3xZ3yG3e8ZomxDOhF +RreF2WgG5xOfhDogMwu6NodbArfgnAvoC6JnW3qha8HMP4F500RFVyCRcd6A3Frd +rFtaZn/UyCsBAN8/zkwPeYHayo7xX6d9kzgRl9HluEX5PXI5+3uiBDUiM085gkLI +5Cmadh9fMdjfhDXI4x2JYmILpp/9Nlc/krB15s5n1MPNtn3yL0TI0tWp0WlwDCV7 +oUm1SfIM0F1fXGFyFDcqwoIr6JCQgXk6XtTg31YhH1xgUIclUVdtHqmAwAbLdIhQ +GAiHn2kCgYEAwD4pZ8HfpiOG/EHNoWsMATc/5yC7O8F9WbvcHZQIymLY4v/7HKZb +VyOR6UQ5/O2cztSGIuKSF6+OK1C34lOyCuTSOTFrjlgEYtLIXjdGLfFdtOO8GRQR +akVXdwuzNAjTBaH5eXbG+NKcjmCvZL48dQVlfDTVulzFGbcsVTHIMQUCgYEA7IQI +FVsKnY3KqpyGqXq92LMcsT3XgW6X1BIIV+YhJ5AFUFkFrjrbXs94/8XyLfi0xBQy +efK+8g5sMs7koF8LyZEcAXWZJQduaKB71hoLlRaU4VQkL/dl2B6VFmAII/CsRCYh +r9RmDN2PF/mp98Ih9dpC1VqcCDRGoTYsd7jLalMCgYAMgH5k1wDaZxkSMp1S0AlZ +0uP+/evvOOgT+9mWutfPgZolOQx1koQCKLgGeX9j6Xf3I28NubpSfAI84uTyfQrp +FnRtb79U5Hh0jMynA+U2e6niZ6UF5H41cQj9Hu+qhKBkj2IP+h96cwfnYnZFkPGR +kqZE65KyqfHPeFATwkcImQKBgCdrfhlpGiTWXCABhKQ8s+WpPLAB2ahV8XJEKyXT +UlVQuMIChGLcpnFv7P/cUxf8asx/fUY8Aj0/0CLLvulHziQjTmKj4gl86pb/oIQ3 +xRRtNhU0O+/OsSfLORgIm3K6C0w0esregL/GMbJSR1TnA1gBr7/1oSnw5JC8Ab9W +injHAoGAJT1MGAiQrhlt9GCGe6Ajw4omdbY0wS9NXefnFhf7EwL0es52ezZ28zpU +2LXqSFbtann5CHgpSLxiMYPDIf+er4xgg9Bz34tz1if1rDfP2Qrxdrpr4jDnrGT3 +gYC2qCpvVD9RRUMKFfnJTfl5gMQdBW/LINkHtJ82snAeLl3gjQ4= +-----END RSA PRIVATE KEY----- diff --git a/builtin/credential/cert/test-fixtures/keys/pkioutput b/builtin/credential/cert/test-fixtures/keys/pkioutput new file mode 100644 index 0000000..526ff03 --- /dev/null +++ b/builtin/credential/cert/test-fixtures/keys/pkioutput @@ -0,0 +1,74 @@ +Key Value +lease_id pki/issue/example-dot-com/d8214077-9976-8c68-9c07-6610da30aea4 +lease_duration 279359999 +lease_renewable false +certificate -----BEGIN CERTIFICATE----- +MIIDtTCCAp2gAwIBAgIUf+jhKTFBnqSs34II0WS1L4QsbbAwDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzQxWhcNMjUw +MTA1MTAyODExWjAbMRkwFwYDVQQDExBjZXJ0LmV4YW1wbGUuY29tMIIBIjANBgkq +hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsZx0Svr82YJpFpIy4fJNW5fKA6B8mhxS +TRAVnygAftetT8puHflY0ss7Y6X2OXjsU0PRn+1PswtivhKi+eLtgWkUF9cFYFGn +SgMld6ZWRhNheZhA6ZfQmeM/BF2pa5HK2SDF36ljgjL9T+nWrru2Uv0BCoHzLAmi +YYMiIWplidMmMO5NTRG3k+3AN0TkfakB6JVzjLGhTcXdOcVEMXkeQVqJMAuGouU5 +donyqtnaHuIJGuUdy54YDnX86txhOQhAv6r7dHXzZxS4pmLvw8UI1rsSf/GLcUVG +B+5+AAGF5iuHC3N2DTl4xz3FcN4Cb4w9pbaQ7+mCzz+anqiJfyr2nwIDAQABo4H1 +MIHyMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAdBgNVHQ4EFgQUm++e +HpyM3p708bgZJuRYEdX1o+UwHwYDVR0jBBgwFoAUncSzT/6HMexyuiU9/7EgHu+o +k5swOwYIKwYBBQUHAQEELzAtMCsGCCsGAQUFBzAChh9odHRwOi8vMTI3LjAuMC4x +OjgyMDAvdjEvcGtpL2NhMCEGA1UdEQQaMBiCEGNlcnQuZXhhbXBsZS5jb22HBH8A +AAEwMQYDVR0fBCowKDAmoCSgIoYgaHR0cDovLzEyNy4wLjAuMTo4MjAwL3YxL3Br +aS9jcmwwDQYJKoZIhvcNAQELBQADggEBABsuvmPSNjjKTVN6itWzdQy+SgMIrwfs +X1Yb9Lefkkwmp9ovKFNQxa4DucuCuzXcQrbKwWTfHGgR8ct4rf30xCRoA7dbQWq4 +aYqNKFWrRaBRAaaYZ/O1ApRTOrXqRx9Eqr0H1BXLsoAq+mWassL8sf6siae+CpwA +KqBko5G0dNXq5T4i2LQbmoQSVetIrCJEeMrU+idkuqfV2h1BQKgSEhFDABjFdTCN +QDAHsEHsi2M4/jRW9fqEuhHSDfl2n7tkFUI8wTHUUCl7gXwweJ4qtaSXIwKXYzNj +xqKHA8Purc1Yfybz4iE1JCROi9fInKlzr5xABq8nb9Qc/J9DIQM+Xmk= +-----END CERTIFICATE----- +issuing_ca -----BEGIN CERTIFICATE----- +MIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw +MjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7 +Q7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0 +z2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x +AHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb +6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH +SWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G +A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx +7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc +BgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA +wHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2 +U946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa +cNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N +ScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ +t2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk +zehNe5dFTjFpylg1o6b8Ow== +-----END CERTIFICATE----- +private_key -----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAsZx0Svr82YJpFpIy4fJNW5fKA6B8mhxSTRAVnygAftetT8pu +HflY0ss7Y6X2OXjsU0PRn+1PswtivhKi+eLtgWkUF9cFYFGnSgMld6ZWRhNheZhA +6ZfQmeM/BF2pa5HK2SDF36ljgjL9T+nWrru2Uv0BCoHzLAmiYYMiIWplidMmMO5N +TRG3k+3AN0TkfakB6JVzjLGhTcXdOcVEMXkeQVqJMAuGouU5donyqtnaHuIJGuUd +y54YDnX86txhOQhAv6r7dHXzZxS4pmLvw8UI1rsSf/GLcUVGB+5+AAGF5iuHC3N2 +DTl4xz3FcN4Cb4w9pbaQ7+mCzz+anqiJfyr2nwIDAQABAoIBAHR7fFV0eAGaopsX +9OD0TUGlsephBXb43g0GYHfJ/1Ew18w9oaxszJEqkl+PB4W3xZ3yG3e8ZomxDOhF +RreF2WgG5xOfhDogMwu6NodbArfgnAvoC6JnW3qha8HMP4F500RFVyCRcd6A3Frd +rFtaZn/UyCsBAN8/zkwPeYHayo7xX6d9kzgRl9HluEX5PXI5+3uiBDUiM085gkLI +5Cmadh9fMdjfhDXI4x2JYmILpp/9Nlc/krB15s5n1MPNtn3yL0TI0tWp0WlwDCV7 +oUm1SfIM0F1fXGFyFDcqwoIr6JCQgXk6XtTg31YhH1xgUIclUVdtHqmAwAbLdIhQ +GAiHn2kCgYEAwD4pZ8HfpiOG/EHNoWsMATc/5yC7O8F9WbvcHZQIymLY4v/7HKZb +VyOR6UQ5/O2cztSGIuKSF6+OK1C34lOyCuTSOTFrjlgEYtLIXjdGLfFdtOO8GRQR +akVXdwuzNAjTBaH5eXbG+NKcjmCvZL48dQVlfDTVulzFGbcsVTHIMQUCgYEA7IQI +FVsKnY3KqpyGqXq92LMcsT3XgW6X1BIIV+YhJ5AFUFkFrjrbXs94/8XyLfi0xBQy +efK+8g5sMs7koF8LyZEcAXWZJQduaKB71hoLlRaU4VQkL/dl2B6VFmAII/CsRCYh +r9RmDN2PF/mp98Ih9dpC1VqcCDRGoTYsd7jLalMCgYAMgH5k1wDaZxkSMp1S0AlZ +0uP+/evvOOgT+9mWutfPgZolOQx1koQCKLgGeX9j6Xf3I28NubpSfAI84uTyfQrp +FnRtb79U5Hh0jMynA+U2e6niZ6UF5H41cQj9Hu+qhKBkj2IP+h96cwfnYnZFkPGR +kqZE65KyqfHPeFATwkcImQKBgCdrfhlpGiTWXCABhKQ8s+WpPLAB2ahV8XJEKyXT +UlVQuMIChGLcpnFv7P/cUxf8asx/fUY8Aj0/0CLLvulHziQjTmKj4gl86pb/oIQ3 +xRRtNhU0O+/OsSfLORgIm3K6C0w0esregL/GMbJSR1TnA1gBr7/1oSnw5JC8Ab9W +injHAoGAJT1MGAiQrhlt9GCGe6Ajw4omdbY0wS9NXefnFhf7EwL0es52ezZ28zpU +2LXqSFbtann5CHgpSLxiMYPDIf+er4xgg9Bz34tz1if1rDfP2Qrxdrpr4jDnrGT3 +gYC2qCpvVD9RRUMKFfnJTfl5gMQdBW/LINkHtJ82snAeLl3gjQ4= +-----END RSA PRIVATE KEY----- +private_key_type rsa diff --git a/builtin/credential/cert/test-fixtures/noclientauthcert.pem b/builtin/credential/cert/test-fixtures/noclientauthcert.pem new file mode 100644 index 0000000..3948f22 --- /dev/null +++ b/builtin/credential/cert/test-fixtures/noclientauthcert.pem @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDGTCCAgGgAwIBAgIBBDANBgkqhkiG9w0BAQUFADBxMQowCAYDVQQDFAEqMQsw +CQYDVQQIEwJHQTELMAkGA1UEBhMCVVMxJTAjBgkqhkiG9w0BCQEWFnZpc2hhbG5h +eWFrdkBnbWFpbC5jb20xEjAQBgNVBAoTCUhhc2hpQ29ycDEOMAwGA1UECxMFVmF1 +bHQwHhcNMTYwMjI5MjE0NjE2WhcNMjEwMjI3MjE0NjE2WjBxMQowCAYDVQQDFAEq +MQswCQYDVQQIEwJHQTELMAkGA1UEBhMCVVMxJTAjBgkqhkiG9w0BCQEWFnZpc2hh +bG5heWFrdkBnbWFpbC5jb20xEjAQBgNVBAoTCUhhc2hpQ29ycDEOMAwGA1UECxMF +VmF1bHQwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMfRkLfIGHt1r2jjnV0N +LqRCu3oB+J1dqpM03vQt3qzIiqtuQuIA2ba7TJm2HwU3W3+rtfFcS+hkBR/LZM+u +cBPB+9b9+7i08vHjgy2P3QH/Ebxa8j1v7JtRMT2qyxWK8NlT/+wZSH82Cr812aS/ +zNT56FbBo2UAtzpqeC4eiv6NAgMBAAGjQDA+MAkGA1UdEwQCMAAwCwYDVR0PBAQD +AgXgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA8GA1UdEQQIMAaHBH8AAAEwDQYJKoZI +hvcNAQEFBQADggEBAG2mUwsZ6+R8qqyNjzMk7mgpsRZv9TEl6c1IiQdyjaCOPaYH +vtZpLX20um36cxrLuOUtZLllG/VJEhRZW5mXWxuOk4QunWMBXQioCDJG1ktcZAcQ +QqYv9Dzy2G9lZHjLztEac37T75RXW7OEeQREgwP11c8sQYiS9jf+7ITYL7nXjoKq +gEuH0h86BOH2O/BxgMelt9O0YCkvkLLHnE27xuNelRRZcBLSuE1GxdUi32MDJ+ff +25GUNM0zzOEaJAFE/USUBEdQqN1gvJidNXkAiMtIK7T8omQZONRaD2ZnSW8y2krh +eUg+rKis9RinqFlahLPfI5BlyQsNMEnsD07Q85E= +-----END CERTIFICATE----- diff --git a/builtin/credential/cert/test-fixtures/root/pkioutput b/builtin/credential/cert/test-fixtures/root/pkioutput new file mode 100644 index 0000000..312ae18 --- /dev/null +++ b/builtin/credential/cert/test-fixtures/root/pkioutput @@ -0,0 +1,74 @@ +Key Value +lease_id pki/root/generate/exported/7bf99d76-dd3e-2c5b-04ce-5253062ad586 +lease_duration 315359999 +lease_renewable false +certificate -----BEGIN CERTIFICATE----- +MIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw +MjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7 +Q7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0 +z2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x +AHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb +6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH +SWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G +A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx +7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc +BgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA +wHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2 +U946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa +cNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N +ScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ +t2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk +zehNe5dFTjFpylg1o6b8Ow== +-----END CERTIFICATE----- +expiration 1.772072879e+09 +issuing_ca -----BEGIN CERTIFICATE----- +MIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw +MjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7 +Q7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0 +z2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x +AHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb +6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH +SWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G +A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx +7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc +BgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA +wHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2 +U946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa +cNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N +ScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ +t2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk +zehNe5dFTjFpylg1o6b8Ow== +-----END CERTIFICATE----- +private_key -----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEA7FMy+FO4hFzZiHFmmY/B6A/zpyCep9PWZfGLUDtDuprHbg2p +t/TQjegMxC0AmWPZEHeG9FIJvT+WQXuLUG5v5MbG4zs21mqnYXwWAbTPaZ35bpp3 +BbbquXFtAqOZG8yfrob1g9OSgmY+bG3ZNxxv35qmbBbuWyUvmPwfjzEAclMxv48w +/2Rs4dXkBuvc9PiNdQ9Sv+ZYG8GIqIcbRd38cSaXI4Q94BOHEr4jm1vqb54H7twv +0Gy9CnLXfn7ZtEDFSmG8WLk2GvIna+UC+gnxSqKCw0rcTby63rQUCgdJZF8VQUVi +18+BMNLXI4pT/P9cxTaCJC/YeuV5a3SaniOoFQIDAQABAoIBAQCoGZJC84JnnIgb +ttZNWuWKBXbCJcDVDikOQJ9hBZbqsFg1X0CfGmQS3MHf9Ubc1Ro8zVjQh15oIEfn +8lIpdzTeXcpxLdiW8ix3ekVJF20F6pnXY8ZP6UnTeOwamXY6QPZAtb0D9UXcvY+f +nw+IVRD6082XS0Rmzu+peYWVXDy+FDN+HJRANBcdJZz8gOmNBIe0qDWx1b85d/s8 +2Kk1Wwdss1IwAGeSddTSwzBNaaHdItZaMZOqPW1gRyBfVSkcUQIE6zn2RKw2b70t +grkIvyRcTdfmiKbqkkJ+eR+ITOUt0cBZSH4cDjlQA+r7hulvoBpQBRj068Toxkcc +bTagHaPBAoGBAPWPGVkHqhTbJ/DjmqDIStxby2M1fhhHt4xUGHinhUYjQjGOtDQ9 +0mfaB7HObudRiSLydRAVGAHGyNJdQcTeFxeQbovwGiYKfZSA1IGpea7dTxPpGEdN +ksA0pzSp9MfKzX/MdLuAkEtO58aAg5YzsgX9hDNxo4MhH/gremZhEGZlAoGBAPZf +lqdYvAL0fjHGJ1FUEalhzGCGE9PH2iOqsxqLCXK7bDbzYSjvuiHkhYJHAOgVdiW1 +lB34UHHYAqZ1VVoFqJ05gax6DE2+r7K5VV3FUCaC0Zm3pavxchU9R/TKP82xRrBj +AFWwdgDTxUyvQEmgPR9sqorftO71Iz2tiwyTpIfxAoGBAIhEMLzHFAse0rtKkrRG +ccR27BbRyHeQ1Lp6sFnEHKEfT8xQdI/I/snCpCJ3e/PBu2g5Q9z416mktiyGs8ib +thTNgYsGYnxZtfaCx2pssanoBcn2wBJRae5fSapf5gY49HDG9MBYR7qCvvvYtSzU +4yWP2ZzyotpRt3vwJKxLkN5BAoGAORHpZvhiDNkvxj3da7Rqpu7VleJZA2y+9hYb +iOF+HcqWhaAY+I+XcTRrTMM/zYLzLEcEeXDEyao86uwxCjpXVZw1kotvAC9UqbTO +tnr3VwRkoxPsV4kFYTAh0+1pnC8dbcxxDmhi3Uww3tOVs7hfkEDuvF6XnebA9A+Y +LyCgMzECgYEA6cCU8QODOivIKWFRXucvWckgE6MYDBaAwe6qcLsd1Q/gpE2e3yQc +4RB3bcyiPROLzMLlXFxf1vSNJQdIaVfrRv+zJeGIiivLPU8+Eq4Lrb+tl1LepcOX +OzQeADTSCn5VidOfjDkIst9UXjMlrFfV9/oJEw5Eiqa6lkNPCGDhfA8= +-----END RSA PRIVATE KEY----- +private_key_type rsa +serial_number 6f:98:9d:f8:67:1a:31:e3:27:60:1b:f7:32:f7:53:19:68:a0:c8:9d diff --git a/builtin/credential/cert/test-fixtures/root/root.crl b/builtin/credential/cert/test-fixtures/root/root.crl new file mode 100644 index 0000000..a80c9e4 --- /dev/null +++ b/builtin/credential/cert/test-fixtures/root/root.crl @@ -0,0 +1,12 @@ +-----BEGIN X509 CRL----- +MIIBrjCBlzANBgkqhkiG9w0BAQsFADAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbRcN +MTYwMjI5MDIyOTE3WhcNMjUwMTA1MTAyOTE3WjArMCkCFG+YnfhnGjHjJ2Ab9zL3 +UxlooMidFxExNjAyMjgyMTI5MTctMDUwMKAjMCEwHwYDVR0jBBgwFoAUncSzT/6H +MexyuiU9/7EgHu+ok5swDQYJKoZIhvcNAQELBQADggEBAG9YDXpNe4LJroKZmVCn +HqMhW8eyzyaPak2nPPGCVUnc6vt8rlBYQU+xlBizD6xatZQDMPgrT8sBl9W3ysXk +RUlliHsT/SHddMz5dAZsBPRMJ7pYWLTx8jI4w2WRfbSyI4bY/6qTRNkEBUv+Fk8J +xvwB89+EM0ENcVMhv9ghsUA8h7kOg673HKwRstLDAzxS/uLmEzFjj8SV2m5DbV2Y +UUCKRSV20/kxJMIC9x2KikZhwOSyv1UE1otD+RQvbfAoZPUDmvp2FR/E0NGjBBOg +1TtCPRrl63cjqU3s8KQ4uah9Vj+Cwcu9n/yIKKtNQq4NKHvagv8GlUsoJ4BdAxCw +IA0= +-----END X509 CRL----- diff --git a/builtin/credential/cert/test-fixtures/root/rootcacert.pem b/builtin/credential/cert/test-fixtures/root/rootcacert.pem new file mode 100644 index 0000000..dcb307a --- /dev/null +++ b/builtin/credential/cert/test-fixtures/root/rootcacert.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw +MjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7 +Q7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0 +z2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x +AHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb +6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH +SWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G +A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx +7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc +BgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA +wHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2 +U946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa +cNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N +ScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ +t2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk +zehNe5dFTjFpylg1o6b8Ow== +-----END CERTIFICATE----- diff --git a/builtin/credential/cert/test-fixtures/root/rootcacert.srl b/builtin/credential/cert/test-fixtures/root/rootcacert.srl new file mode 100644 index 0000000..1c85d63 --- /dev/null +++ b/builtin/credential/cert/test-fixtures/root/rootcacert.srl @@ -0,0 +1 @@ +92223EAFBBEE17AF diff --git a/builtin/credential/cert/test-fixtures/root/rootcakey.pem b/builtin/credential/cert/test-fixtures/root/rootcakey.pem new file mode 100644 index 0000000..e950da5 --- /dev/null +++ b/builtin/credential/cert/test-fixtures/root/rootcakey.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEA7FMy+FO4hFzZiHFmmY/B6A/zpyCep9PWZfGLUDtDuprHbg2p +t/TQjegMxC0AmWPZEHeG9FIJvT+WQXuLUG5v5MbG4zs21mqnYXwWAbTPaZ35bpp3 +BbbquXFtAqOZG8yfrob1g9OSgmY+bG3ZNxxv35qmbBbuWyUvmPwfjzEAclMxv48w +/2Rs4dXkBuvc9PiNdQ9Sv+ZYG8GIqIcbRd38cSaXI4Q94BOHEr4jm1vqb54H7twv +0Gy9CnLXfn7ZtEDFSmG8WLk2GvIna+UC+gnxSqKCw0rcTby63rQUCgdJZF8VQUVi +18+BMNLXI4pT/P9cxTaCJC/YeuV5a3SaniOoFQIDAQABAoIBAQCoGZJC84JnnIgb +ttZNWuWKBXbCJcDVDikOQJ9hBZbqsFg1X0CfGmQS3MHf9Ubc1Ro8zVjQh15oIEfn +8lIpdzTeXcpxLdiW8ix3ekVJF20F6pnXY8ZP6UnTeOwamXY6QPZAtb0D9UXcvY+f +nw+IVRD6082XS0Rmzu+peYWVXDy+FDN+HJRANBcdJZz8gOmNBIe0qDWx1b85d/s8 +2Kk1Wwdss1IwAGeSddTSwzBNaaHdItZaMZOqPW1gRyBfVSkcUQIE6zn2RKw2b70t +grkIvyRcTdfmiKbqkkJ+eR+ITOUt0cBZSH4cDjlQA+r7hulvoBpQBRj068Toxkcc +bTagHaPBAoGBAPWPGVkHqhTbJ/DjmqDIStxby2M1fhhHt4xUGHinhUYjQjGOtDQ9 +0mfaB7HObudRiSLydRAVGAHGyNJdQcTeFxeQbovwGiYKfZSA1IGpea7dTxPpGEdN +ksA0pzSp9MfKzX/MdLuAkEtO58aAg5YzsgX9hDNxo4MhH/gremZhEGZlAoGBAPZf +lqdYvAL0fjHGJ1FUEalhzGCGE9PH2iOqsxqLCXK7bDbzYSjvuiHkhYJHAOgVdiW1 +lB34UHHYAqZ1VVoFqJ05gax6DE2+r7K5VV3FUCaC0Zm3pavxchU9R/TKP82xRrBj +AFWwdgDTxUyvQEmgPR9sqorftO71Iz2tiwyTpIfxAoGBAIhEMLzHFAse0rtKkrRG +ccR27BbRyHeQ1Lp6sFnEHKEfT8xQdI/I/snCpCJ3e/PBu2g5Q9z416mktiyGs8ib +thTNgYsGYnxZtfaCx2pssanoBcn2wBJRae5fSapf5gY49HDG9MBYR7qCvvvYtSzU +4yWP2ZzyotpRt3vwJKxLkN5BAoGAORHpZvhiDNkvxj3da7Rqpu7VleJZA2y+9hYb +iOF+HcqWhaAY+I+XcTRrTMM/zYLzLEcEeXDEyao86uwxCjpXVZw1kotvAC9UqbTO +tnr3VwRkoxPsV4kFYTAh0+1pnC8dbcxxDmhi3Uww3tOVs7hfkEDuvF6XnebA9A+Y +LyCgMzECgYEA6cCU8QODOivIKWFRXucvWckgE6MYDBaAwe6qcLsd1Q/gpE2e3yQc +4RB3bcyiPROLzMLlXFxf1vSNJQdIaVfrRv+zJeGIiivLPU8+Eq4Lrb+tl1LepcOX +OzQeADTSCn5VidOfjDkIst9UXjMlrFfV9/oJEw5Eiqa6lkNPCGDhfA8= +-----END RSA PRIVATE KEY----- diff --git a/builtin/credential/cert/test-fixtures/root/rootcawext.cnf b/builtin/credential/cert/test-fixtures/root/rootcawext.cnf new file mode 100644 index 0000000..77e8258 --- /dev/null +++ b/builtin/credential/cert/test-fixtures/root/rootcawext.cnf @@ -0,0 +1,16 @@ +[ req ] +default_bits = 2048 +encrypt_key = no +prompt = no +default_md = sha256 +req_extensions = req_v3 +distinguished_name = dn + +[ dn ] +CN = example.com + +[ req_v3 ] +2.1.1.1=ASN1:UTF8String:A UTF8String Extension +2.1.1.2=ASN1:UTF8:A UTF8 Extension +2.1.1.3=ASN1:IA5:An IA5 Extension +2.1.1.4=ASN1:VISIBLE:A Visible Extension diff --git a/builtin/credential/cert/test-fixtures/root/rootcawext.csr b/builtin/credential/cert/test-fixtures/root/rootcawext.csr new file mode 100644 index 0000000..55e22ee --- /dev/null +++ b/builtin/credential/cert/test-fixtures/root/rootcawext.csr @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIIDAzCCAesCAQAwFjEUMBIGA1UEAwwLZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQDM2PrLyK/wVQIcnK362ZylDrIVMjFQzps/0AxM +ke+8MNPMArBlSAhnZus6qb0nN0nJrDLkHQgYqnSvK9N7VUv/xFblEcOLBlciLhyN +Wkm92+q/M/xOvUVmnYkN3XgTI5QNxF7ZWDFHmwCNV27RraQZou0hG7yvyoILLMQE +3MnMCNM1nZ9JIuBMcRsZLGqQ1XNaQljboRVIUjimzkcfYyTruhLosTIbwForp78J +MzHHqVjtLJXPqUnRMS7KhGMj1f2mIswQzCv6F2PWEzNBbP4Gb67znKikKDs0RgyL +RyfizFNFJSC58XntK8jwHK1D8W3UepFf4K8xNFnhPoKWtWfJAgMBAAGggacwgaQG +CSqGSIb3DQEJDjGBljCBkzAcBgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATAf +BgNRAQEEGAwWQSBVVEY4U3RyaW5nIEV4dGVuc2lvbjAZBgNRAQIEEgwQQSBVVEY4 +IEV4dGVuc2lvbjAZBgNRAQMEEhYQQW4gSUE1IEV4dGVuc2lvbjAcBgNRAQQEFRoT +QSBWaXNpYmxlIEV4dGVuc2lvbjANBgkqhkiG9w0BAQsFAAOCAQEAtYjewBcqAXxk +tDY0lpZid6ZvfngdDlDZX0vrs3zNppKNe5Sl+jsoDOexqTA7HQA/y1ru117sAEeB +yiqMeZ7oPk8b3w+BZUpab7p2qPMhZypKl93y/jGXGscc3jRbUBnym9S91PSq6wUd +f2aigSqFc9+ywFVdx5PnnZUfcrUQ2a+AweYEkGOzXX2Ga+Ige8grDMCzRgCoP5cW +kM5ghwZp5wYIBGrKBU9iDcBlmnNhYaGWf+dD00JtVDPNn2bJnCsJHIO0nklZgnrS +fli8VQ1nYPkONdkiRYLt6//6at1iNDoDgsVCChtlVkLpxFIKcDFUHlffZsc1kMFI +HTX579k8hA== +-----END CERTIFICATE REQUEST----- diff --git a/builtin/credential/cert/test-fixtures/root/rootcawextcert.pem b/builtin/credential/cert/test-fixtures/root/rootcawextcert.pem new file mode 100644 index 0000000..2c85917 --- /dev/null +++ b/builtin/credential/cert/test-fixtures/root/rootcawextcert.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDRjCCAi6gAwIBAgIJAJIiPq+77hejMA0GCSqGSIb3DQEBCwUAMBYxFDASBgNV +BAMTC2V4YW1wbGUuY29tMB4XDTE3MTEyOTE5MTgwM1oXDTI3MTEyNzE5MTgwM1ow +FjEUMBIGA1UEAwwLZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQDM2PrLyK/wVQIcnK362ZylDrIVMjFQzps/0AxMke+8MNPMArBlSAhn +Zus6qb0nN0nJrDLkHQgYqnSvK9N7VUv/xFblEcOLBlciLhyNWkm92+q/M/xOvUVm +nYkN3XgTI5QNxF7ZWDFHmwCNV27RraQZou0hG7yvyoILLMQE3MnMCNM1nZ9JIuBM +cRsZLGqQ1XNaQljboRVIUjimzkcfYyTruhLosTIbwForp78JMzHHqVjtLJXPqUnR +MS7KhGMj1f2mIswQzCv6F2PWEzNBbP4Gb67znKikKDs0RgyLRyfizFNFJSC58Xnt +K8jwHK1D8W3UepFf4K8xNFnhPoKWtWfJAgMBAAGjgZYwgZMwHAYDVR0RBBUwE4IL +ZXhhbXBsZS5jb22HBH8AAAEwHwYDUQEBBBgMFkEgVVRGOFN0cmluZyBFeHRlbnNp +b24wGQYDUQECBBIMEEEgVVRGOCBFeHRlbnNpb24wGQYDUQEDBBIWEEFuIElBNSBF +eHRlbnNpb24wHAYDUQEEBBUaE0EgVmlzaWJsZSBFeHRlbnNpb24wDQYJKoZIhvcN +AQELBQADggEBAGU/iA6saupEaGn/veVNCknFGDL7pst5D6eX/y9atXlBOdJe7ZJJ +XQRkeHJldA0khVpzH7Ryfi+/25WDuNz+XTZqmb4ppeV8g9amtqBwxziQ9UUwYrza +eDBqdXBaYp/iHUEHoceX4F44xuo80BIqwF0lD9TFNUFoILnF26ajhKX0xkGaiKTH +6SbjBfHoQVMzOHokVRWregmgNycV+MAI9Ne9XkIZvdOYeNlcS9drZeJI3szkiaxB +WWaWaAr5UU2Z0yUCZnAIDMRcIiUbSEjIDz504sSuCzTctMOxWZu0r/0UrXRzwZZi +HAaKm3MUmBh733ChP4rTB58nr5DEr5rJ9P8= +-----END CERTIFICATE----- diff --git a/builtin/credential/cert/test-fixtures/root/rootcawextkey.pem b/builtin/credential/cert/test-fixtures/root/rootcawextkey.pem new file mode 100644 index 0000000..3f8d8eb --- /dev/null +++ b/builtin/credential/cert/test-fixtures/root/rootcawextkey.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDM2PrLyK/wVQIc +nK362ZylDrIVMjFQzps/0AxMke+8MNPMArBlSAhnZus6qb0nN0nJrDLkHQgYqnSv +K9N7VUv/xFblEcOLBlciLhyNWkm92+q/M/xOvUVmnYkN3XgTI5QNxF7ZWDFHmwCN +V27RraQZou0hG7yvyoILLMQE3MnMCNM1nZ9JIuBMcRsZLGqQ1XNaQljboRVIUjim +zkcfYyTruhLosTIbwForp78JMzHHqVjtLJXPqUnRMS7KhGMj1f2mIswQzCv6F2PW +EzNBbP4Gb67znKikKDs0RgyLRyfizFNFJSC58XntK8jwHK1D8W3UepFf4K8xNFnh +PoKWtWfJAgMBAAECggEAW7hLkzMok9N8PpNo0wjcuor58cOnkSbxHIFrAF3XmcvD +CXWqxa6bFLFgYcPejdCTmVkg8EKPfXvVAxn8dxyaCss+nRJ3G6ibGxLKdgAXRItT +cIk2T4svp+KhmzOur+MeR4vFbEuwxP8CIEclt3yoHVJ2Gnzw30UtNRO2MPcq48/C +ZODGeBqUif1EGjDAvlqu5kl/pcDBJ3ctIZdVUMYYW4R9JtzKsmwhX7CRCBm8k5hG +2uzn8AKwpuVtfWcnX59UUmHGJ8mjETuNLARRAwWBWhl8f7wckmi+PKERJGEM2QE5 +/Voy0p22zmQ3waS8LgiI7YHCAEFqjVWNziVGdR36gQKBgQDxkpfkEsfa5PieIaaF +iQOO0rrjEJ9MBOQqmTDeclmDPNkM9qvCF/dqpJfOtliYFxd7JJ3OR2wKrBb5vGHt +qIB51Rnm9aDTM4OUEhnhvbPlERD0W+yWYXWRvqyHz0GYwEFGQ83h95GC/qfTosqy +LEzYLDafiPeNP+DG/HYRljAxUwKBgQDZFOWHEcZkSFPLNZiksHqs90OR2zIFxZcx +SrbkjqXjRjehWEAwgpvQ/quSBxrE2E8xXgVm90G1JpWzxjUfKKQRM6solQeEpnwY +kCy2Ozij/TtbLNRlU65UQ+nMto8KTSIyJbxxdOZxYdtJAJQp1FJO1a1WC11z4+zh +lnLV1O5S8wKBgQCDf/QU4DBQtNGtas315Oa96XJ4RkUgoYz+r1NN09tsOERC7UgE +KP2y3JQSn2pMqE1M6FrKvlBO4uzC10xLja0aJOmrssvwDBu1D8FtA9IYgJjFHAEG +v1i7lJrgdu7TUtx1flVli1l3gF4lM3m5UaonBrJZV7rB9iLKzwUKf8IOJwKBgFt/ +QktPA6brEV56Za8sr1hOFA3bLNdf9B0Tl8j4ExWbWAFKeCu6MUDCxsAS/IZxgdeW +AILovqpC7CBM78EFWTni5EaDohqYLYAQ7LeWeIYuSyFf4Nogjj74LQha/iliX4Jx +g17y3dp2W34Gn2yOEG8oAxpcSfR54jMnPZnBWP5fAoGBAMNAd3oa/xq9A5v719ik +naD7PdrjBdhnPk4egzMDv54y6pCFlvFbEiBduBWTmiVa7dSzhYtmEbri2WrgARlu +vkfTnVH9E8Hnm4HTbNn+ebxrofq1AOAvdApSoslsOP1NT9J6zB89RzChJyzjbIQR +Gevrutb4uO9qpB1jDVoMmGde +-----END PRIVATE KEY----- diff --git a/builtin/credential/cert/test-fixtures/root/rootcawou.cnf b/builtin/credential/cert/test-fixtures/root/rootcawou.cnf new file mode 100644 index 0000000..be11c33 --- /dev/null +++ b/builtin/credential/cert/test-fixtures/root/rootcawou.cnf @@ -0,0 +1,18 @@ +[ req ] +default_bits = 2048 +encrypt_key = no +prompt = no +default_md = sha256 +distinguished_name = dn +req_extensions = req_v3 + +[ req_v3 ] +subjectAltName = @alt_names + +[ dn ] +CN = example.com +OU = engineering + +[ alt_names ] +IP.1 = 127.0.0.1 +email = valid@example.com diff --git a/builtin/credential/cert/test-fixtures/root/rootcawou.csr b/builtin/credential/cert/test-fixtures/root/rootcawou.csr new file mode 100644 index 0000000..d72579b --- /dev/null +++ b/builtin/credential/cert/test-fixtures/root/rootcawou.csr @@ -0,0 +1,17 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIICpjCCAY4CAQAwLDEUMBIGA1UEAwwLZXhhbXBsZS5jb20xFDASBgNVBAsMC2Vu +Z2luZWVyaW5nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsI4ZJWfQ +mI/3qfacas7O260Iii06oTP4GoQ5QpAYvcfWKKnkXagd0fBl+hfpnrK6ojYY71Jt +cMstVdff2Wc5D3bnQ8Hikb1TMhdAAtZDUW4QbeWAXJ4mkDq1ARRcbTvK121bmDQp +1efepohe0mDxNCruGSHpqfayC6LOkk7XZ73VAOcPPV5OOpY8el7quUdfvElxn0vH +KBVlFRBBW2fbY5EAHDMkmBjWr0ofpwb+vhSuQlOZgsbd20mjDwSYIbywG0tAEOoj +pLI0pOQV5msdfbqmKYE6ZmUeL/Q/pZjYh5uxFUZ4aMD/STDaeq7GdYQYcm17WL+N +ceal9+gKceJSiQIDAQABoDUwMwYJKoZIhvcNAQkOMSYwJDAiBgNVHREEGzAZhwR/ +AAABgRF2YWxpZEBleGFtcGxlLmNvbTANBgkqhkiG9w0BAQsFAAOCAQEAf1tnXgX1 +/1p2MAxHhcil5/lsOMgHWU5dRL6KjK2cepuBpfzlCbxFtvnsj9WHx46f9Q/xbqy+ +1A2TJIBUWxK+Eji//WJxbDsi7fmV5VQlpG7+sEa7yin3KobfMd84nDIYP8wLF1Fq +HhRf7ZjIDh3zTgBosvIIjGEyABrouGYm4Nl409I09MftGXK/5TLJkgm6sxcJCAHG +BMm8IFaI0VN5QFIHKvJ/1oQLpLV+gvtR6jAM/99LXc0SXmFn0Jcy/mE/hxJXJigW +dDOblgjliJo0rWwHK4gfsgpMbHjJiG70g0XHtTpBW+i/NyuPnc8RYzBIJv+4sks+ +hWSmn6/IL46qTg== +-----END CERTIFICATE REQUEST----- diff --git a/builtin/credential/cert/test-fixtures/root/rootcawoucert.pem b/builtin/credential/cert/test-fixtures/root/rootcawoucert.pem new file mode 100644 index 0000000..fe0f227 --- /dev/null +++ b/builtin/credential/cert/test-fixtures/root/rootcawoucert.pem @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDATCCAemgAwIBAgIJAMAMmdiZi5G/MA0GCSqGSIb3DQEBCwUAMCwxFDASBgNV +BAMMC2V4YW1wbGUuY29tMRQwEgYDVQQLDAtlbmdpbmVlcmluZzAeFw0xODA5MDEx +NDM0NTVaFw0yODA4MjkxNDM0NTVaMCwxFDASBgNVBAMMC2V4YW1wbGUuY29tMRQw +EgYDVQQLDAtlbmdpbmVlcmluZzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBALCOGSVn0JiP96n2nGrOztutCIotOqEz+BqEOUKQGL3H1iip5F2oHdHwZfoX +6Z6yuqI2GO9SbXDLLVXX39lnOQ9250PB4pG9UzIXQALWQ1FuEG3lgFyeJpA6tQEU +XG07ytdtW5g0KdXn3qaIXtJg8TQq7hkh6an2sguizpJO12e91QDnDz1eTjqWPHpe +6rlHX7xJcZ9LxygVZRUQQVtn22ORABwzJJgY1q9KH6cG/r4UrkJTmYLG3dtJow8E +mCG8sBtLQBDqI6SyNKTkFeZrHX26pimBOmZlHi/0P6WY2IebsRVGeGjA/0kw2nqu +xnWEGHJte1i/jXHmpffoCnHiUokCAwEAAaMmMCQwIgYDVR0RBBswGYcEfwAAAYER +dmFsaWRAZXhhbXBsZS5jb20wDQYJKoZIhvcNAQELBQADggEBAHATSjW20P7+6en0 +Oq/n/R/i+aCgzcxIWSgf3dhOyxGfBW6svSg8ZtBQFEZZHqIRSXZX89zz25+mvwqi +kGRJKKzD/KDd2v9C5+H3DSuu9CqClVtpjF2XLvRHnuclBIrwvyijRcqa2GCTA9YZ +sOfVVGQYobDbtRCgTwWkEpU9RrZWWoD8HAYMkxFc1Cs/vJconeAaQDPEIZx9wnAN +4r/F5143rn5dyhbYehz1/gykL3K0v7s4U5NhaSACE2AiQ+63vhAEd5xt9WPKAAGY +zEyK4b/qPO88mxLr3A/rdzzt1UYAwT38kXA7aV82AH1J8EaCr7tLnXzyLXiEsI4E +BOrHBgU= +-----END CERTIFICATE----- diff --git a/builtin/credential/cert/test-fixtures/root/rootcawoukey.pem b/builtin/credential/cert/test-fixtures/root/rootcawoukey.pem new file mode 100644 index 0000000..1663172 --- /dev/null +++ b/builtin/credential/cert/test-fixtures/root/rootcawoukey.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCwjhklZ9CYj/ep +9pxqzs7brQiKLTqhM/gahDlCkBi9x9YoqeRdqB3R8GX6F+mesrqiNhjvUm1wyy1V +19/ZZzkPdudDweKRvVMyF0AC1kNRbhBt5YBcniaQOrUBFFxtO8rXbVuYNCnV596m +iF7SYPE0Ku4ZIemp9rILos6STtdnvdUA5w89Xk46ljx6Xuq5R1+8SXGfS8coFWUV +EEFbZ9tjkQAcMySYGNavSh+nBv6+FK5CU5mCxt3bSaMPBJghvLAbS0AQ6iOksjSk +5BXmax19uqYpgTpmZR4v9D+lmNiHm7EVRnhowP9JMNp6rsZ1hBhybXtYv41x5qX3 +6Apx4lKJAgMBAAECggEAF1Jd7fv9qPlzfKcP2GgDGS+NLjt1QDAOOOp4aduA+Si5 +mFuAyAJaFg5MWjHocUcosh61Qn+/5yNflLRUZHJnLizFtcSZuiipIbfCg91rvQjt +8KZdQ168t1aZ7E+VOfSpAbX3YG6bjB754UOoSt/1XK/DDdzV8dadhD34TYlOmOxZ +MMnIRERqa+IBSn90TONWPyY3ELSpaiCkz1YZpp6g9RnTACZKLwzBMSunNO5qbEfH +TWlk5o14DZ3zRu5gLT5wy3SGfzm2M+qi8afQq1MT2I6opXj4KU3c64agjNUBYTq7 +S2YWmw6yrqPzxcg0hOz9H6djCx2oen/UxM2z4uoE1QKBgQDlHIFQcVTWEmxhy5yp +uV7Ya5ubx6rW4FnCgh5lJ+wWuSa5TkMuBr30peJn0G6y0I0J1El4o3iwLD/jxwHb +BIJTB1z5fBo3K7lhpZLuRFSWe9Mcd/Aj2pFcy5TqaIV9x8bgVAMVOoZAq9muiEog +zIWVWrVF6FDuFgRMRegNDej6pwKBgQDFRpNQMscPpH+x6xeS0E8boZKnHyuJUZQZ +kfEmnHQuTYmmHS4kXSnJhjODa53YddknTrPrHOvddDDYAaulyyYitPemubYQzBog +MyIgaeFSw/eHrcr/8g4QTohRFcI71xnKRmHvQZb8UflFJkqsqil6WZ6FJiC+STcn +Qdnhol9fTwKBgQCZtGDw1cdjgqKhjVcB6nG94ZtYjECJvaOaQW8g0AKsT/SxttaN +B0ri2XMl0IijgBROttO/knQCRP1r03PkOocwKq1uVprDzpqk7s6++KqC9nlwDOrX +Muf4iD/UbuC3vJIop1QWJtgwhNoaJCcPEAbCZ0Nbrfq1b6Hchb2jHGTj2wKBgHJo +DpDJEeaBeMi+1SoAgpA8sKcZDY+SbvgxShAhVcNwli5u586Q9OX5XTCPHbhmB+yi +2Pa2DBefBaCPv3LkEJa6KpFXTD4Lj+8ymE0B+nmcSpY19O9f+kX8tVOI8d7wTPWg +wbUWbbCg/ZXbshzWhj19cdA4H28bWM/8gZY4K2VDAoGBAMYsNhKdu9ON/7vaLijh +kai2tQLObYqDV6OAzdYm1gopmTTLcxQ6jP6aQlyw1ie51ms/hFozmNkGkaQGD8pp +751Lv3prQz/lDaZeQfKANNN1tpz/QqUOu2di9secMmodxXkwcLzcEKjWPDTuPhcO +VODU1hC5oj8yGFInoDLL2B0K +-----END PRIVATE KEY----- diff --git a/builtin/credential/cert/test-fixtures/testcacert1.pem b/builtin/credential/cert/test-fixtures/testcacert1.pem new file mode 100644 index 0000000..ab8bf9e --- /dev/null +++ b/builtin/credential/cert/test-fixtures/testcacert1.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDPjCCAiagAwIBAgIUfIKsF2VPT7sdFcKOHJH2Ii6K4MwwDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLbXl2YXVsdC5jb20wIBcNMTYwNTAyMTYwNTQyWhgPMjA2 +NjA0MjAxNjA2MTJaMBYxFDASBgNVBAMTC215dmF1bHQuY29tMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuOimEXawD2qBoLCFP3Skq5zi1XzzcMAJlfdS +xz9hfymuJb+cN8rB91HOdU9wQCwVKnkUtGWxUnMp0tT0uAZj5NzhNfyinf0JGAbP +67HDzVZhGBHlHTjPX0638yaiUx90cTnucX0N20SgCYct29dMSgcPl+W78D3Jw3xE +JsHQPYS9ASe2eONxG09F/qNw7w/RO5/6WYoV2EmdarMMxq52pPe2chtNMQdSyOUb +cCcIZyk4QVFZ1ZLl6jTnUPb+JoCx1uMxXvMek4NF/5IL0Wr9dw2gKXKVKoHDr6SY +WrCONRw61A5Zwx1V+kn73YX3USRlkufQv/ih6/xThYDAXDC9cwIDAQABo4GBMH8w +DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFOuKvPiU +G06iHkRXAOeMiUdBfHFyMB8GA1UdIwQYMBaAFOuKvPiUG06iHkRXAOeMiUdBfHFy +MBwGA1UdEQQVMBOCC215dmF1bHQuY29thwR/AAABMA0GCSqGSIb3DQEBCwUAA4IB +AQBcN/UdAMzc7UjRdnIpZvO+5keBGhL/vjltnGM1dMWYHa60Y5oh7UIXF+P1RdNW +n7g80lOyvkSR15/r1rDkqOK8/4oruXU31EcwGhDOC4hU6yMUy4ltV/nBoodHBXNh +MfKiXeOstH1vdI6G0P6W93Bcww6RyV1KH6sT2dbETCw+iq2VN9CrruGIWzd67UT/ +spe/kYttr3UYVV3O9kqgffVVgVXg/JoRZ3J7Hy2UEXfh9UtWNanDlRuXaZgE9s/d +CpA30CHpNXvKeyNeW2ktv+2nAbSpvNW+e6MecBCTBIoDSkgU8ShbrzmDKVwNN66Q +5gn6KxUPBKHEtNzs5DgGM7nq +-----END CERTIFICATE----- \ No newline at end of file diff --git a/builtin/credential/cert/test-fixtures/testcacert2.pem b/builtin/credential/cert/test-fixtures/testcacert2.pem new file mode 100644 index 0000000..a8fe6c4 --- /dev/null +++ b/builtin/credential/cert/test-fixtures/testcacert2.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDPjCCAiagAwIBAgIUJfHFxtLQBOkjY9ivHx0AIsRDcH0wDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLbXl2YXVsdC5jb20wIBcNMTYwNTAyMTYxMjI5WhgPMjA2 +NjA0MjAxNjEyNTlaMBYxFDASBgNVBAMTC215dmF1bHQuY29tMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqj8ANjAGrg5BgUb3owGwUHlMYDxljMdwroA/ +Bv76ESjomj1zCyVtoJxlDZ8m9VcKQldk5ashFNuY+Ms9FrJ1YsePvsfStNe37C26 +2uldDToh5rm7K8uwp/bQiErwM9QZMCVYCPEH8QgETPg9qWnikDFLMqcLBNbIiXVL +alxEYgA1Qt6+ayMvoS35288hFdZj6a0pCF0+zMHORZxloPhkXWnZLp5lWBiunSJG +0kVz56TjF+oY0L74iW4y3x2805biisGvFqgpZJW8/hLw/kDthNylNTzEqBktsctQ +BXpSMcwG3woJ0uZ8cH/HA/m0VDeIA77UisXnlLiQDpdB7U7QPwIDAQABo4GBMH8w +DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMLETWAs +OFNsKJ+uqzChCZvIpxX4MB8GA1UdIwQYMBaAFMLETWAsOFNsKJ+uqzChCZvIpxX4 +MBwGA1UdEQQVMBOCC215dmF1bHQuY29thwR/AAABMA0GCSqGSIb3DQEBCwUAA4IB +AQCRlFb6bZDrq3NkoZF9evls7cT41V3XCdykMA4K9YRgDroZ5psanSvYEnSrk9cU +Y7sVYW7b8qSRWkLZrHCAwc2V0/i5F5j4q9yVnWaTZ+kOVCFYCI8yUS7ixRQdTLNN +os/r9dcRSzzTEqoQThAzn571yRcbJHzTjda3gCJ5F4utYUBU2F9WK+ukW9nqfepa +ju5vEEGDuL2+RyApzL0nGzMUkCdBcK82QBksTlElPnbICbJZWUUMTZWPaZ7WGDDa +Pj+pWMXiDQmzIuzgXUCNtQL6lEv4tQwGYRHjjPmhgJP4sr6Cyrj4G0iljrqM+z/3 +gLyJOlNU8c5x02/C1nFDDa14 +-----END CERTIFICATE----- \ No newline at end of file diff --git a/builtin/credential/cert/test-fixtures/testcakey1.pem b/builtin/credential/cert/test-fixtures/testcakey1.pem new file mode 100644 index 0000000..05211ba --- /dev/null +++ b/builtin/credential/cert/test-fixtures/testcakey1.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAuOimEXawD2qBoLCFP3Skq5zi1XzzcMAJlfdSxz9hfymuJb+c +N8rB91HOdU9wQCwVKnkUtGWxUnMp0tT0uAZj5NzhNfyinf0JGAbP67HDzVZhGBHl +HTjPX0638yaiUx90cTnucX0N20SgCYct29dMSgcPl+W78D3Jw3xEJsHQPYS9ASe2 +eONxG09F/qNw7w/RO5/6WYoV2EmdarMMxq52pPe2chtNMQdSyOUbcCcIZyk4QVFZ +1ZLl6jTnUPb+JoCx1uMxXvMek4NF/5IL0Wr9dw2gKXKVKoHDr6SYWrCONRw61A5Z +wx1V+kn73YX3USRlkufQv/ih6/xThYDAXDC9cwIDAQABAoIBAG3bCo7ljMQb6tel +CAUjL5Ilqz5a9ebOsONABRYLOclq4ePbatxawdJF7/sSLwZxKkIJnZtvr2Hkubxg +eOO8KC0YbVS9u39Rjc2QfobxHfsojpbWSuCJl+pvwinbkiUAUxXR7S/PtCPJKat/ +fGdYCiMQ/tqnynh4vR4+/d5o12c0KuuQ22/MdEf3GOadUamRXS1ET9iJWqla1pJW +TmzrlkGAEnR5PPO2RMxbnZCYmj3dArxWAnB57W+bWYla0DstkDKtwg2j2ikNZpXB +nkZJJpxR76IYD1GxfwftqAKxujKcyfqB0dIKCJ0UmfOkauNWjexroNLwaAOC3Nud +XIxppAECgYEA1wJ9EH6A6CrSjdzUocF9LtQy1LCDHbdiQFHxM5/zZqIxraJZ8Gzh +Q0d8JeOjwPdG4zL9pHcWS7+x64Wmfn0+Qfh6/47Vy3v90PIL0AeZYshrVZyJ/s6X +YkgFK80KEuWtacqIZ1K2UJyCw81u/ynIl2doRsIbgkbNeN0opjmqVTMCgYEA3CkW +2fETWK1LvmgKFjG1TjOotVRIOUfy4iN0kznPm6DK2PgTF5DX5RfktlmA8i8WPmB7 +YFOEdAWHf+RtoM/URa7EAGZncCWe6uggAcWqznTS619BJ63OmncpSWov5Byg90gJ +48qIMY4wDjE85ypz1bmBc2Iph974dtWeDtB7dsECgYAyKZh4EquMfwEkq9LH8lZ8 +aHF7gbr1YeWAUB3QB49H8KtacTg+iYh8o97pEBUSXh6hvzHB/y6qeYzPAB16AUpX +Jdu8Z9ylXsY2y2HKJRu6GjxAewcO9bAH8/mQ4INrKT6uIdx1Dq0OXZV8jR9KVLtB +55RCfeLhIBesDR0Auw9sVQKBgB0xTZhkgP43LF35Ca1btgDClNJGdLUztx8JOIH1 +HnQyY/NVIaL0T8xO2MLdJ131pGts+68QI/YGbaslrOuv4yPCQrcS3RBfzKy1Ttkt +TrLFhtoy7T7HqyeMOWtEq0kCCs3/PWB5EIoRoomfOcYlOOrUCDg2ge9EP4nyVVz9 +hAGBAoGBAJXw/ufevxpBJJMSyULmVWYr34GwLC1OhSE6AVVt9JkIYnc5L4xBKTHP +QNKKJLmFmMsEqfxHUNWmpiHkm2E0p37Zehui3kywo+A4ybHPTua70ZWQfZhKxLUr +PvJa8JmwiCM7kO8zjOv+edY1mMWrbjAZH1YUbfcTHmST7S8vp0F3 +-----END RSA PRIVATE KEY----- \ No newline at end of file diff --git a/builtin/credential/cert/test-fixtures/testcakey2.pem b/builtin/credential/cert/test-fixtures/testcakey2.pem new file mode 100644 index 0000000..c2e3763 --- /dev/null +++ b/builtin/credential/cert/test-fixtures/testcakey2.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAqj8ANjAGrg5BgUb3owGwUHlMYDxljMdwroA/Bv76ESjomj1z +CyVtoJxlDZ8m9VcKQldk5ashFNuY+Ms9FrJ1YsePvsfStNe37C262uldDToh5rm7 +K8uwp/bQiErwM9QZMCVYCPEH8QgETPg9qWnikDFLMqcLBNbIiXVLalxEYgA1Qt6+ +ayMvoS35288hFdZj6a0pCF0+zMHORZxloPhkXWnZLp5lWBiunSJG0kVz56TjF+oY +0L74iW4y3x2805biisGvFqgpZJW8/hLw/kDthNylNTzEqBktsctQBXpSMcwG3woJ +0uZ8cH/HA/m0VDeIA77UisXnlLiQDpdB7U7QPwIDAQABAoIBADivQ2XHdeHsUzk1 +JOz8efVBfgGo+nL2UPl5MAMnUKH4CgKZJT3311mb2TXA4RrdQUg3ixvBcAFe4L8u +BIgTIWyjX6Q5KloWXWHhFA8hll76FSGag8ygRJCYaHSI5xOKslxKgtZvUqKZdb0f +BoDrBYnXL9+MqOmSjjDegh7G2+n49n774Z2VVR47TZTBB5LCWDWj4AtEcalgwlvw +d5yL/GU/RfCkXCjCeie1pInp3eCMUI9jlvbe/vyaoFq2RiaJw1LSlJLXZBMYzaij +XkgMtRsr5bf0Tg2z3SPiaa9QZogfVLqHWAt6RHZf9Keidtiho+Ad6/dzJu+jKDys +Z6cthOECgYEAxMUCIYKO74BtPRN2r7KxbSjHzFsasxbfwkSg4Qefd4UoZJX2ShlL +cClnef3WdkKxtShJhqEPaKTYTrfgM+iz/a9+3lAFnS4EZawSf3YgXXslVTory0Da +yPQZKxX6XsupaLl4s13ehw/D0qfdxWVYaiFad3ePEE4ytmSkMMHLHo8CgYEA3X4a +jMWVbVv1W1lj+LFcg7AhU7lHgla+p7NI4gHw9V783noafnW7/8pNF80kshYo4u0g +aJRwaU/Inr5uw14eAyEjB4X7N8AE5wGmcxxS2uluGG6r3oyQSJBqktGnLwyTfcfC +XrfsGJza2BRGF4Mn8SFb7WtCl3f1qu0hTF+mC1ECgYB4oA1eXZsiV6if+H6Z1wHN +2WIidPc5MpyZi1jUmse3jXnlr8j8Q+VrLPayYlpGxTwLwlbQoYvAqs2v9CkNqWot +6pfr0UKfyMYJTiNI4DGXHRcV2ENgprF436tOLnr+AfwopwrHapQwWAnD6gSaLja1 +WR0Mf87EQCv2hFvjR+otIQKBgQCLyvJQ1MeZzQdPT1zkcnSUfM6b+/1hCwSr7WDb +nCQLiZcJh4E/PWmZaII9unEloQzPJKBmwQEtxng1kLVxwu4oRXrJXcuPhTbS4dy/ +HCpDFj8xVnBNNuQ9mEBbR80/ya0xHqnThDuT0TPiWvFeF55W9xoA/8h4tvKrnZx9 +ioTO8QKBgCMqRa5pHb+vCniTWUTz9JZRnRsdq7fRSsJHngMe5gOR4HylyAmmqKrd +kEXfkdu9TH2jxSWcZbHUPVwKfOUqQUZMz0pml0DIs1kedUDFanTZ8Rgg5SGUHBW0 +5bNCq64tKMmw6GiicaAGqd04OPo85WD9h8mPhM1Jdv/UmTV+HFAr +-----END RSA PRIVATE KEY----- \ No newline at end of file diff --git a/builtin/credential/cert/test-fixtures/testissuedcert4.pem b/builtin/credential/cert/test-fixtures/testissuedcert4.pem new file mode 100644 index 0000000..5bffd67 --- /dev/null +++ b/builtin/credential/cert/test-fixtures/testissuedcert4.pem @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDtzCCAp+gAwIBAgIUBLqh6ctGWVDUxFhxJX7m6S/bnrcwDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLbXl2YXVsdC5jb20wIBcNMTYwNTAyMTYwOTI2WhgPMjA2 +NjA0MjAxNTA5NTZaMBsxGTAXBgNVBAMTEGNlcnQubXl2YXVsdC5jb20wggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDY3gPB29kkdbu0mPO6J0efagQhSiXB +9OyDuLf5sMk6CVDWVWal5hISkyBmw/lXgF7qC2XFKivpJOrcGQd5Ep9otBqyJLzI +b0IWdXuPIrVnXDwcdWr86ybX2iC42zKWfbXgjzGijeAVpl0UJLKBj+fk5q6NvkRL +5FUL6TRV7Krn9mrmnrV9J5IqV15pTd9W2aVJ6IqWvIPCACtZKulqWn4707uy2X2W +1Stq/5qnp1pDshiGk1VPyxCwQ6yw3iEcgecbYo3vQfhWcv7Q8LpSIM9ZYpXu6OmF ++czqRZS9gERl+wipmmrN1MdYVrTuQem21C/PNZ4jo4XUk1SFx6JrcA+lAgMBAAGj +gfUwgfIwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBSe +Cl9WV3BjGCwmS/KrDSLRjfwyqjAfBgNVHSMEGDAWgBTrirz4lBtOoh5EVwDnjIlH +QXxxcjA7BggrBgEFBQcBAQQvMC0wKwYIKwYBBQUHMAKGH2h0dHA6Ly8xMjcuMC4w +LjE6ODIwMC92MS9wa2kvY2EwIQYDVR0RBBowGIIQY2VydC5teXZhdWx0LmNvbYcE +fwAAATAxBgNVHR8EKjAoMCagJKAihiBodHRwOi8vMTI3LjAuMC4xOjgyMDAvdjEv +cGtpL2NybDANBgkqhkiG9w0BAQsFAAOCAQEAWGholPN8buDYwKbUiDavbzjsxUIX +lU4MxEqOHw7CD3qIYIauPboLvB9EldBQwhgOOy607Yvdg3rtyYwyBFwPhHo/hK3Z +6mn4hc6TF2V+AUdHBvGzp2dbYLeo8noVoWbQ/lBulggwlIHNNF6+a3kALqsqk1Ch +f/hzsjFnDhAlNcYFgG8TgfE2lE/FckvejPqBffo7Q3I+wVAw0buqiz5QL81NOT+D +Y2S9LLKLRaCsWo9wRU1Az4Rhd7vK5SEMh16jJ82GyEODWPvuxOTI1MnzfnbWyLYe +TTp6YBjGMVf1I6NEcWNur7U17uIOiQjMZ9krNvoMJ1A/cxCoZ98QHgcIPg== +-----END CERTIFICATE----- \ No newline at end of file diff --git a/builtin/credential/cert/test-fixtures/testissuedkey4.pem b/builtin/credential/cert/test-fixtures/testissuedkey4.pem new file mode 100644 index 0000000..58e7f8d --- /dev/null +++ b/builtin/credential/cert/test-fixtures/testissuedkey4.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA2N4DwdvZJHW7tJjzuidHn2oEIUolwfTsg7i3+bDJOglQ1lVm +peYSEpMgZsP5V4Be6gtlxSor6STq3BkHeRKfaLQasiS8yG9CFnV7jyK1Z1w8HHVq +/Osm19oguNsyln214I8xoo3gFaZdFCSygY/n5Oaujb5ES+RVC+k0Veyq5/Zq5p61 +fSeSKldeaU3fVtmlSeiKlryDwgArWSrpalp+O9O7stl9ltUrav+ap6daQ7IYhpNV +T8sQsEOssN4hHIHnG2KN70H4VnL+0PC6UiDPWWKV7ujphfnM6kWUvYBEZfsIqZpq +zdTHWFa07kHpttQvzzWeI6OF1JNUhceia3APpQIDAQABAoIBAQCH3vEzr+3nreug +RoPNCXcSJXXY9X+aeT0FeeGqClzIg7Wl03OwVOjVwl/2gqnhbIgK0oE8eiNwurR6 +mSPZcxV0oAJpwiKU4T/imlCDaReGXn86xUX2l82KRxthNdQH/VLKEmzij0jpx4Vh +bWx5SBPdkbmjDKX1dmTiRYWIn/KjyNPvNvmtwdi8Qluhf4eJcNEUr2BtblnGOmfL +FdSu+brPJozpoQ1QdDnbAQRgqnh7Shl0tT85whQi0uquqIj1gEOGVjmBvDDnL3GV +WOENTKqsmIIoEzdZrql1pfmYTk7WNaD92bfpN128j8BF7RmAV4/DphH0pvK05y9m +tmRhyHGxAoGBAOV2BBocsm6xup575VqmFN+EnIOiTn+haOvfdnVsyQHnth63fOQx +PNtMpTPR1OMKGpJ13e2bV0IgcYRsRkScVkUtoa/17VIgqZXffnJJ0A/HT67uKBq3 +8o7RrtyK5N20otw0lZHyqOPhyCdpSsurDhNON1kPVJVYY4N1RiIxfut/AoGBAPHz +HfsJ5ZkyELE9N/r4fce04lprxWH+mQGK0/PfjS9caXPhj/r5ZkVMvzWesF3mmnY8 +goE5S35TuTvV1+6rKGizwlCFAQlyXJiFpOryNWpLwCmDDSzLcm+sToAlML3tMgWU +jM3dWHx3C93c3ft4rSWJaUYI9JbHsMzDW6Yh+GbbAoGBANIbKwxh5Hx5XwEJP2yu +kIROYCYkMy6otHLujgBdmPyWl+suZjxoXWoMl2SIqR8vPD+Jj6mmyNJy9J6lqf3f +DRuQ+fEuBZ1i7QWfvJ+XuN0JyovJ5Iz6jC58D1pAD+p2IX3y5FXcVQs8zVJRFjzB +p0TEJOf2oqORaKWRd6ONoMKvAoGALKu6aVMWdQZtVov6/fdLIcgf0pn7Q3CCR2qe +X3Ry2L+zKJYIw0mwvDLDSt8VqQCenB3n6nvtmFFU7ds5lvM67rnhsoQcAOaAehiS +rl4xxoJd5Ewx7odRhZTGmZpEOYzFo4odxRSM9c30/u18fqV1Mm0AZtHYds4/sk6P +aUj0V+kCgYBMpGrJk8RSez5g0XZ35HfpI4ENoWbiwB59FIpWsLl2LADEh29eC455 +t9Muq7MprBVBHQo11TMLLFxDIjkuMho/gcKgpYXCt0LfiNm8EZehvLJUXH+3WqUx +we6ywrbFCs6LaxaOCtTiLsN+GbZCatITL0UJaeBmTAbiw0KQjUuZPQ== +-----END RSA PRIVATE KEY----- \ No newline at end of file diff --git a/builtin/credential/cert/test_responder.go b/builtin/credential/cert/test_responder.go new file mode 100644 index 0000000..d68ebe0 --- /dev/null +++ b/builtin/credential/cert/test_responder.go @@ -0,0 +1,304 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package ocsp implements an OCSP responder based on a generic storage backend. +// It provides a couple of sample implementations. +// Because OCSP responders handle high query volumes, we have to be careful +// about how much logging we do. Error-level logs are reserved for problems +// internal to the server, that can be fixed by an administrator. Any type of +// incorrect input from a user should be logged and Info or below. For things +// that are logged on every request, Debug is the appropriate level. +// +// From https://github.com/cloudflare/cfssl/blob/master/ocsp/responder.go + +package cert + +import ( + "crypto" + "crypto/sha256" + "encoding/base64" + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "time" + + "golang.org/x/crypto/ocsp" +) + +var ( + malformedRequestErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x01} + internalErrorErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x02} + tryLaterErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x03} + sigRequredErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x05} + unauthorizedErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x06} + + // ErrNotFound indicates the request OCSP response was not found. It is used to + // indicate that the responder should reply with unauthorizedErrorResponse. + ErrNotFound = errors.New("Request OCSP Response not found") +) + +// Source represents the logical source of OCSP responses, i.e., +// the logic that actually chooses a response based on a request. In +// order to create an actual responder, wrap one of these in a Responder +// object and pass it to http.Handle. By default the Responder will set +// the headers Cache-Control to "max-age=(response.NextUpdate-now), public, no-transform, must-revalidate", +// Last-Modified to response.ThisUpdate, Expires to response.NextUpdate, +// ETag to the SHA256 hash of the response, and Content-Type to +// application/ocsp-response. If you want to override these headers, +// or set extra headers, your source should return a http.Header +// with the headers you wish to set. If you don'log want to set any +// extra headers you may return nil instead. +type Source interface { + Response(*ocsp.Request) ([]byte, http.Header, error) +} + +// An InMemorySource is a map from serialNumber -> der(response) +type InMemorySource map[string][]byte + +// Response looks up an OCSP response to provide for a given request. +// InMemorySource looks up a response purely based on serial number, +// without regard to what issuer the request is asking for. +func (src InMemorySource) Response(request *ocsp.Request) ([]byte, http.Header, error) { + response, present := src[request.SerialNumber.String()] + if !present { + return nil, nil, ErrNotFound + } + return response, nil, nil +} + +// Stats is a basic interface that allows users to record information +// about returned responses +type Stats interface { + ResponseStatus(ocsp.ResponseStatus) +} + +type logger interface { + Log(args ...any) +} + +// A Responder object provides the HTTP logic to expose a +// Source of OCSP responses. +type Responder struct { + log logger + Source Source + stats Stats +} + +// NewResponder instantiates a Responder with the give Source. +func NewResponder(t logger, source Source, stats Stats) *Responder { + return &Responder{ + Source: source, + stats: stats, + log: t, + } +} + +func overrideHeaders(response http.ResponseWriter, headers http.Header) { + for k, v := range headers { + if len(v) == 1 { + response.Header().Set(k, v[0]) + } else if len(v) > 1 { + response.Header().Del(k) + for _, e := range v { + response.Header().Add(k, e) + } + } + } +} + +// hashToString contains mappings for the only hash functions +// x/crypto/ocsp supports +var hashToString = map[crypto.Hash]string{ + crypto.SHA1: "SHA1", + crypto.SHA256: "SHA256", + crypto.SHA384: "SHA384", + crypto.SHA512: "SHA512", +} + +// A Responder can process both GET and POST requests. The mapping +// from an OCSP request to an OCSP response is done by the Source; +// the Responder simply decodes the request, and passes back whatever +// response is provided by the source. +// Note: The caller must use http.StripPrefix to strip any path components +// (including '/') on GET requests. +// Do not use this responder in conjunction with http.NewServeMux, because the +// default handler will try to canonicalize path components by changing any +// strings of repeated '/' into a single '/', which will break the base64 +// encoding. +func (rs *Responder) ServeHTTP(response http.ResponseWriter, request *http.Request) { + // By default we set a 'max-age=0, no-cache' Cache-Control header, this + // is only returned to the client if a valid authorized OCSP response + // is not found or an error is returned. If a response if found the header + // will be altered to contain the proper max-age and modifiers. + response.Header().Add("Cache-Control", "max-age=0, no-cache") + // Read response from request + var requestBody []byte + var err error + switch request.Method { + case "GET": + base64Request, err := url.QueryUnescape(request.URL.Path) + if err != nil { + rs.log.Log("Error decoding URL:", request.URL.Path) + response.WriteHeader(http.StatusBadRequest) + return + } + // url.QueryUnescape not only unescapes %2B escaping, but it additionally + // turns the resulting '+' into a space, which makes base64 decoding fail. + // So we go back afterwards and turn ' ' back into '+'. This means we + // accept some malformed input that includes ' ' or %20, but that's fine. + base64RequestBytes := []byte(base64Request) + for i := range base64RequestBytes { + if base64RequestBytes[i] == ' ' { + base64RequestBytes[i] = '+' + } + } + // In certain situations a UA may construct a request that has a double + // slash between the host name and the base64 request body due to naively + // constructing the request URL. In that case strip the leading slash + // so that we can still decode the request. + if len(base64RequestBytes) > 0 && base64RequestBytes[0] == '/' { + base64RequestBytes = base64RequestBytes[1:] + } + requestBody, err = base64.StdEncoding.DecodeString(string(base64RequestBytes)) + if err != nil { + rs.log.Log("Error decoding base64 from URL", string(base64RequestBytes)) + response.WriteHeader(http.StatusBadRequest) + return + } + case "POST": + requestBody, err = ioutil.ReadAll(request.Body) + if err != nil { + rs.log.Log("Problem reading body of POST", err) + response.WriteHeader(http.StatusBadRequest) + return + } + default: + response.WriteHeader(http.StatusMethodNotAllowed) + return + } + b64Body := base64.StdEncoding.EncodeToString(requestBody) + rs.log.Log("Received OCSP request", b64Body) + + // All responses after this point will be OCSP. + // We could check for the content type of the request, but that + // seems unnecessariliy restrictive. + response.Header().Add("Content-Type", "application/ocsp-response") + + // Parse response as an OCSP request + // XXX: This fails if the request contains the nonce extension. + // We don'log intend to support nonces anyway, but maybe we + // should return unauthorizedRequest instead of malformed. + ocspRequest, err := ocsp.ParseRequest(requestBody) + if err != nil { + rs.log.Log("Error decoding request body", b64Body) + response.WriteHeader(http.StatusBadRequest) + response.Write(malformedRequestErrorResponse) + if rs.stats != nil { + rs.stats.ResponseStatus(ocsp.Malformed) + } + return + } + + // Look up OCSP response from source + ocspResponse, headers, err := rs.Source.Response(ocspRequest) + if err != nil { + if err == ErrNotFound { + rs.log.Log("No response found for request: serial %x, request body %s", + ocspRequest.SerialNumber, b64Body) + response.Write(unauthorizedErrorResponse) + if rs.stats != nil { + rs.stats.ResponseStatus(ocsp.Unauthorized) + } + return + } + rs.log.Log("Error retrieving response for request: serial %x, request body %s, error", + ocspRequest.SerialNumber, b64Body, err) + response.WriteHeader(http.StatusInternalServerError) + response.Write(internalErrorErrorResponse) + if rs.stats != nil { + rs.stats.ResponseStatus(ocsp.InternalError) + } + return + } + + parsedResponse, err := ocsp.ParseResponse(ocspResponse, nil) + if err != nil { + rs.log.Log("Error parsing response for serial %x", + ocspRequest.SerialNumber, err) + response.Write(internalErrorErrorResponse) + if rs.stats != nil { + rs.stats.ResponseStatus(ocsp.InternalError) + } + return + } + + // Write OCSP response to response + response.Header().Add("Last-Modified", parsedResponse.ThisUpdate.Format(time.RFC1123)) + response.Header().Add("Expires", parsedResponse.NextUpdate.Format(time.RFC1123)) + now := time.Now() + maxAge := 0 + if now.Before(parsedResponse.NextUpdate) { + maxAge = int(parsedResponse.NextUpdate.Sub(now) / time.Second) + } else { + // TODO(#530): we want max-age=0 but this is technically an authorized OCSP response + // (despite being stale) and 5019 forbids attaching no-cache + maxAge = 0 + } + response.Header().Set( + "Cache-Control", + fmt.Sprintf( + "max-age=%d, public, no-transform, must-revalidate", + maxAge, + ), + ) + responseHash := sha256.Sum256(ocspResponse) + response.Header().Add("ETag", fmt.Sprintf("\"%X\"", responseHash)) + + if headers != nil { + overrideHeaders(response, headers) + } + + // RFC 7232 says that a 304 response must contain the above + // headers if they would also be sent for a 200 for the same + // request, so we have to wait until here to do this + if etag := request.Header.Get("If-None-Match"); etag != "" { + if etag == fmt.Sprintf("\"%X\"", responseHash) { + response.WriteHeader(http.StatusNotModified) + return + } + } + response.WriteHeader(http.StatusOK) + response.Write(ocspResponse) + if rs.stats != nil { + rs.stats.ResponseStatus(ocsp.Success) + } +} + +/* +Copyright (c) 2014 CloudFlare Inc. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ diff --git a/builtin/credential/github/backend.go b/builtin/credential/github/backend.go new file mode 100644 index 0000000..f8bbcc4 --- /dev/null +++ b/builtin/credential/github/backend.go @@ -0,0 +1,129 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package github + +import ( + "context" + "net/url" + + "github.com/google/go-github/github" + cleanhttp "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" + "golang.org/x/oauth2" +) + +const operationPrefixGithub = "github" + +func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + b := Backend() + if err := b.Setup(ctx, conf); err != nil { + return nil, err + } + return b, nil +} + +func Backend() *backend { + var b backend + b.TeamMap = &framework.PolicyMap{ + PathMap: framework.PathMap{ + Name: "teams", + }, + DefaultKey: "default", + } + + teamMapPaths := b.TeamMap.Paths() + + teamMapPaths[0].DisplayAttrs = &framework.DisplayAttributes{ + OperationPrefix: operationPrefixGithub, + OperationSuffix: "teams", + } + teamMapPaths[1].DisplayAttrs = &framework.DisplayAttributes{ + OperationPrefix: operationPrefixGithub, + OperationSuffix: "team-mapping", + } + + b.UserMap = &framework.PolicyMap{ + PathMap: framework.PathMap{ + Name: "users", + }, + DefaultKey: "default", + } + + userMapPaths := b.UserMap.Paths() + + userMapPaths[0].DisplayAttrs = &framework.DisplayAttributes{ + OperationPrefix: operationPrefixGithub, + OperationSuffix: "users", + } + userMapPaths[1].DisplayAttrs = &framework.DisplayAttributes{ + OperationPrefix: operationPrefixGithub, + OperationSuffix: "user-mapping", + } + + allPaths := append(teamMapPaths, userMapPaths...) + b.Backend = &framework.Backend{ + Help: backendHelp, + + PathsSpecial: &logical.Paths{ + Unauthenticated: []string{ + "login", + }, + }, + + Paths: append([]*framework.Path{pathConfig(&b), pathLogin(&b)}, allPaths...), + AuthRenew: b.pathLoginRenew, + BackendType: logical.TypeCredential, + } + + return &b +} + +type backend struct { + *framework.Backend + + TeamMap *framework.PolicyMap + + UserMap *framework.PolicyMap +} + +// Client returns the GitHub client to communicate to GitHub via the +// configured settings. +func (b *backend) Client(token string) (*github.Client, error) { + tc := cleanhttp.DefaultClient() + if token != "" { + ctx := context.WithValue(context.Background(), oauth2.HTTPClient, tc) + tc = oauth2.NewClient(ctx, &tokenSource{Value: token}) + } + + client := github.NewClient(tc) + emptyUrl, err := url.Parse("") + if err != nil { + return nil, err + } + client.UploadURL = emptyUrl + + return client, nil +} + +// tokenSource is an oauth2.TokenSource implementation. +type tokenSource struct { + Value string +} + +func (t *tokenSource) Token() (*oauth2.Token, error) { + return &oauth2.Token{AccessToken: t.Value}, nil +} + +const backendHelp = ` +The GitHub credential provider allows authentication via GitHub. + +Users provide a personal access token to log in, and the credential +provider verifies they're part of the correct organization and then +maps the user to a set of Vault policies according to the teams they're +part of. + +After enabling the credential provider, use the "config" route to +configure it. +` diff --git a/builtin/credential/github/backend_test.go b/builtin/credential/github/backend_test.go new file mode 100644 index 0000000..6ea08ee --- /dev/null +++ b/builtin/credential/github/backend_test.go @@ -0,0 +1,213 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package github + +import ( + "context" + "errors" + "fmt" + "os" + "strings" + "testing" + "time" + + logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" + "github.com/hashicorp/vault/sdk/logical" +) + +func TestBackend_Config(t *testing.T) { + defaultLeaseTTLVal := time.Hour * 24 + maxLeaseTTLVal := time.Hour * 24 * 2 + b, err := Factory(context.Background(), &logical.BackendConfig{ + Logger: nil, + System: &logical.StaticSystemView{ + DefaultLeaseTTLVal: defaultLeaseTTLVal, + MaxLeaseTTLVal: maxLeaseTTLVal, + }, + }) + if err != nil { + t.Fatalf("Unable to create backend: %s", err) + } + + login_data := map[string]interface{}{ + // This token has to be replaced with a working token for the test to work. + "token": os.Getenv("GITHUB_TOKEN"), + } + config_data1 := map[string]interface{}{ + "organization": os.Getenv("GITHUB_ORG"), + "ttl": "", + "max_ttl": "", + } + expectedTTL1 := 24 * time.Hour + config_data2 := map[string]interface{}{ + "organization": os.Getenv("GITHUB_ORG"), + "ttl": "1h", + "max_ttl": "2h", + } + expectedTTL2 := time.Hour + config_data3 := map[string]interface{}{ + "organization": os.Getenv("GITHUB_ORG"), + "ttl": "50h", + "max_ttl": "50h", + } + expectedTTL3 := 48 * time.Hour + + logicaltest.Test(t, logicaltest.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + CredentialBackend: b, + Steps: []logicaltest.TestStep{ + testConfigWrite(t, config_data1), + testLoginWrite(t, login_data, expectedTTL1, false), + testConfigWrite(t, config_data2), + testLoginWrite(t, login_data, expectedTTL2, false), + testConfigWrite(t, config_data3), + testLoginWrite(t, login_data, expectedTTL3, true), + }, + }) +} + +func testLoginWrite(t *testing.T, d map[string]interface{}, expectedTTL time.Duration, expectFail bool) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "login", + ErrorOk: true, + Data: d, + Check: func(resp *logical.Response) error { + if resp == nil { + return errors.New("expected a response but got nil") + } + if resp.IsError() && expectFail { + return nil + } + actualTTL := resp.Auth.LeaseOptions.TTL + if actualTTL != expectedTTL { + return fmt.Errorf("TTL mismatched. Expected: %d Actual: %d", expectedTTL, resp.Auth.LeaseOptions.TTL) + } + return nil + }, + } +} + +func testConfigWrite(t *testing.T, d map[string]interface{}) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "config", + Data: d, + } +} + +func TestBackend_basic(t *testing.T) { + defaultLeaseTTLVal := time.Hour * 24 + maxLeaseTTLVal := time.Hour * 24 * 32 + b, err := Factory(context.Background(), &logical.BackendConfig{ + Logger: nil, + System: &logical.StaticSystemView{ + DefaultLeaseTTLVal: defaultLeaseTTLVal, + MaxLeaseTTLVal: maxLeaseTTLVal, + }, + }) + if err != nil { + t.Fatalf("Unable to create backend: %s", err) + } + + logicaltest.Test(t, logicaltest.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + CredentialBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfig(t, false), + testAccMap(t, "default", "fakepol"), + testAccMap(t, "oWnErs", "fakepol"), + testAccLogin(t, []string{"default", "abc", "fakepol"}), + testAccStepConfig(t, true), + testAccMap(t, "default", "fakepol"), + testAccMap(t, "oWnErs", "fakepol"), + testAccLogin(t, []string{"default", "abc", "fakepol"}), + testAccStepConfigWithBaseURL(t), + testAccMap(t, "default", "fakepol"), + testAccMap(t, "oWnErs", "fakepol"), + testAccLogin(t, []string{"default", "abc", "fakepol"}), + testAccMap(t, "default", "fakepol"), + testAccStepConfig(t, true), + mapUserToPolicy(t, os.Getenv("GITHUB_USER"), "userpolicy"), + testAccLogin(t, []string{"default", "abc", "fakepol", "userpolicy"}), + }, + }) +} + +func testAccPreCheck(t *testing.T) { + if v := os.Getenv("GITHUB_TOKEN"); v == "" { + t.Skip("GITHUB_TOKEN must be set for acceptance tests") + } + + if v := os.Getenv("GITHUB_USER"); v == "" { + t.Skip("GITHUB_USER must be set for acceptance tests") + } + + if v := os.Getenv("GITHUB_ORG"); v == "" { + t.Skip("GITHUB_ORG must be set for acceptance tests") + } + + if v := os.Getenv("GITHUB_BASEURL"); v == "" { + t.Skip("GITHUB_BASEURL must be set for acceptance tests (use 'https://api.github.com' if you don't know what you're doing)") + } +} + +func testAccStepConfig(t *testing.T, upper bool) logicaltest.TestStep { + ts := logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "config", + Data: map[string]interface{}{ + "organization": os.Getenv("GITHUB_ORG"), + "token_policies": []string{"abc"}, + }, + } + if upper { + ts.Data["organization"] = strings.ToUpper(os.Getenv("GITHUB_ORG")) + } + return ts +} + +func testAccStepConfigWithBaseURL(t *testing.T) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "config", + Data: map[string]interface{}{ + "organization": os.Getenv("GITHUB_ORG"), + "base_url": os.Getenv("GITHUB_BASEURL"), + }, + } +} + +func testAccMap(t *testing.T, k string, v string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "map/teams/" + k, + Data: map[string]interface{}{ + "value": v, + }, + } +} + +func mapUserToPolicy(t *testing.T, k string, v string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "map/users/" + k, + Data: map[string]interface{}{ + "value": v, + }, + } +} + +func testAccLogin(t *testing.T, policies []string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "login", + Data: map[string]interface{}{ + "token": os.Getenv("GITHUB_TOKEN"), + }, + Unauthenticated: true, + + Check: logicaltest.TestCheckAuth(policies), + } +} diff --git a/builtin/credential/github/cli.go b/builtin/credential/github/cli.go new file mode 100644 index 0000000..d40f1b5 --- /dev/null +++ b/builtin/credential/github/cli.go @@ -0,0 +1,97 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package github + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/hashicorp/go-secure-stdlib/password" + "github.com/hashicorp/vault/api" +) + +type CLIHandler struct { + // for tests + testStdout io.Writer +} + +func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, error) { + mount, ok := m["mount"] + if !ok { + mount = "github" + } + + // Extract or prompt for token + token := m["token"] + if token == "" { + token = os.Getenv("VAULT_AUTH_GITHUB_TOKEN") + } + if token == "" { + // Override the output + stdout := h.testStdout + if stdout == nil { + stdout = os.Stderr + } + + var err error + fmt.Fprintf(stdout, "GitHub Personal Access Token (will be hidden): ") + token, err = password.Read(os.Stdin) + fmt.Fprintf(stdout, "\n") + if err != nil { + if err == password.ErrInterrupted { + return nil, fmt.Errorf("user interrupted") + } + + return nil, fmt.Errorf("An error occurred attempting to "+ + "ask for a token. The raw error message is shown below, but usually "+ + "this is because you attempted to pipe a value into the command or "+ + "you are executing outside of a terminal (tty). If you want to pipe "+ + "the value, pass \"-\" as the argument to read from stdin. The raw "+ + "error was: %w", err) + } + } + + path := fmt.Sprintf("auth/%s/login", mount) + secret, err := c.Logical().Write(path, map[string]interface{}{ + "token": strings.TrimSpace(token), + }) + if err != nil { + return nil, err + } + if secret == nil { + return nil, fmt.Errorf("empty response from credential provider") + } + + return secret, nil +} + +func (h *CLIHandler) Help() string { + help := ` +Usage: vault login -method=github [CONFIG K=V...] + + The GitHub auth method allows users to authenticate using a GitHub + personal access token. Users can generate a personal access token from the + settings page on their GitHub account. + + Authenticate using a GitHub token: + + $ vault login -method=github token=abcd1234 + +Configuration: + + mount= + Path where the GitHub credential method is mounted. This is usually + provided via the -path flag in the "vault login" command, but it can be + specified here as well. If specified here, it takes precedence over the + value for -path. The default value is "github". + + token= + GitHub personal access token to use for authentication. If not provided, + Vault will prompt for the value. +` + + return strings.TrimSpace(help) +} diff --git a/builtin/credential/github/cmd/github/main.go b/builtin/credential/github/cmd/github/main.go new file mode 100644 index 0000000..499469a --- /dev/null +++ b/builtin/credential/github/cmd/github/main.go @@ -0,0 +1,34 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "os" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/credential/github" + "github.com/hashicorp/vault/sdk/plugin" +) + +func main() { + apiClientMeta := &api.PluginAPIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(os.Args[1:]) + + tlsConfig := apiClientMeta.GetTLSConfig() + tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) + + if err := plugin.ServeMultiplex(&plugin.ServeOpts{ + BackendFactoryFunc: github.Factory, + // set the TLSProviderFunc so that the plugin maintains backwards + // compatibility with Vault versions that don’t support plugin AutoMTLS + TLSProviderFunc: tlsProviderFunc, + }); err != nil { + logger := hclog.New(&hclog.LoggerOptions{}) + + logger.Error("plugin shutting down", "error", err) + os.Exit(1) + } +} diff --git a/builtin/credential/github/path_config.go b/builtin/credential/github/path_config.go new file mode 100644 index 0000000..83238f3 --- /dev/null +++ b/builtin/credential/github/path_config.go @@ -0,0 +1,247 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package github + +import ( + "context" + "fmt" + "net/url" + "os" + "strings" + "time" + + "github.com/google/go-github/github" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/tokenutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathConfig(b *backend) *framework.Path { + p := &framework.Path{ + Pattern: "config", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixGithub, + }, + + Fields: map[string]*framework.FieldSchema{ + "organization": { + Type: framework.TypeString, + Description: "The organization users must be part of", + Required: true, + }, + "organization_id": { + Type: framework.TypeInt64, + Description: "The ID of the organization users must be part of", + }, + "base_url": { + Type: framework.TypeString, + Description: `The API endpoint to use. Useful if you +are running GitHub Enterprise or an +API-compatible authentication server.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Base URL", + Group: "GitHub Options", + }, + }, + "ttl": { + Type: framework.TypeDurationSecond, + Description: tokenutil.DeprecationText("token_ttl"), + Deprecated: true, + }, + "max_ttl": { + Type: framework.TypeDurationSecond, + Description: tokenutil.DeprecationText("token_max_ttl"), + Deprecated: true, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixGithub, + OperationVerb: "configure", + }, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathConfigRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "configuration", + }, + }, + }, + } + + tokenutil.AddTokenFields(p.Fields) + p.Fields["token_policies"].Description += ". This will apply to all tokens generated by this auth method, in addition to any policies configured for specific users/groups." + return p +} + +func (b *backend) pathConfigWrite(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + var resp logical.Response + c, err := b.Config(ctx, req.Storage) + if err != nil { + return nil, err + } + if c == nil { + c = &config{} + } + + if organizationRaw, ok := data.GetOk("organization"); ok { + c.Organization = organizationRaw.(string) + } + if c.Organization == "" { + return logical.ErrorResponse("organization is a required parameter"), nil + } + + if organizationRaw, ok := data.GetOk("organization_id"); ok { + c.OrganizationID = organizationRaw.(int64) + } + + var parsedURL *url.URL + if baseURLRaw, ok := data.GetOk("base_url"); ok { + baseURL := baseURLRaw.(string) + if !strings.HasSuffix(baseURL, "/") { + baseURL += "/" + } + parsedURL, err = url.Parse(baseURL) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("error parsing given base_url: %s", err)), nil + } + c.BaseURL = baseURL + } + + if c.OrganizationID == 0 { + githubToken := os.Getenv("VAULT_AUTH_CONFIG_GITHUB_TOKEN") + client, err := b.Client(githubToken) + if err != nil { + return nil, err + } + // ensure our client has the BaseURL if it was provided + if parsedURL != nil { + client.BaseURL = parsedURL + } + + // we want to set the Org ID in the config so we can use that to verify + // the credentials on login + err = c.setOrganizationID(ctx, client) + if err != nil { + errorMsg := fmt.Errorf("unable to fetch the organization_id, you must manually set it in the config: %s", err) + b.Logger().Error(errorMsg.Error()) + return nil, errorMsg + } + } + + if err := c.ParseTokenFields(req, data); err != nil { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } + + // Handle upgrade cases + { + if err := tokenutil.UpgradeValue(data, "ttl", "token_ttl", &c.TTL, &c.TokenTTL); err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + if err := tokenutil.UpgradeValue(data, "max_ttl", "token_max_ttl", &c.MaxTTL, &c.TokenMaxTTL); err != nil { + return logical.ErrorResponse(err.Error()), nil + } + } + + entry, err := logical.StorageEntryJSON("config", c) + if err != nil { + return nil, err + } + + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + + if len(resp.Warnings) == 0 { + return nil, nil + } + + return &resp, nil +} + +func (b *backend) pathConfigRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + config, err := b.Config(ctx, req.Storage) + if err != nil { + return nil, err + } + if config == nil { + return nil, nil + } + + d := map[string]interface{}{ + "organization_id": config.OrganizationID, + "organization": config.Organization, + "base_url": config.BaseURL, + } + config.PopulateTokenData(d) + + if config.TTL > 0 { + d["ttl"] = int64(config.TTL.Seconds()) + } + if config.MaxTTL > 0 { + d["max_ttl"] = int64(config.MaxTTL.Seconds()) + } + + return &logical.Response{ + Data: d, + }, nil +} + +// Config returns the configuration for this backend. +func (b *backend) Config(ctx context.Context, s logical.Storage) (*config, error) { + entry, err := s.Get(ctx, "config") + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result config + if entry != nil { + if err := entry.DecodeJSON(&result); err != nil { + return nil, fmt.Errorf("error reading configuration: %w", err) + } + } + + if result.TokenTTL == 0 && result.TTL > 0 { + result.TokenTTL = result.TTL + } + if result.TokenMaxTTL == 0 && result.MaxTTL > 0 { + result.TokenMaxTTL = result.MaxTTL + } + + return &result, nil +} + +type config struct { + tokenutil.TokenParams + + OrganizationID int64 `json:"organization_id" structs:"organization_id" mapstructure:"organization_id"` + Organization string `json:"organization" structs:"organization" mapstructure:"organization"` + BaseURL string `json:"base_url" structs:"base_url" mapstructure:"base_url"` + TTL time.Duration `json:"ttl" structs:"ttl" mapstructure:"ttl"` + MaxTTL time.Duration `json:"max_ttl" structs:"max_ttl" mapstructure:"max_ttl"` +} + +func (c *config) setOrganizationID(ctx context.Context, client *github.Client) error { + org, _, err := client.Organizations.Get(ctx, c.Organization) + if err != nil { + return err + } + + orgID := org.GetID() + if orgID == 0 { + return fmt.Errorf("organization_id not found for %s", c.Organization) + } + + c.OrganizationID = orgID + + return nil +} diff --git a/builtin/credential/github/path_config_test.go b/builtin/credential/github/path_config_test.go new file mode 100644 index 0000000..2f592b2 --- /dev/null +++ b/builtin/credential/github/path_config_test.go @@ -0,0 +1,255 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package github + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/http/httptest" + "os" + "strings" + "testing" + + "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/assert" +) + +func createBackendWithStorage(t *testing.T) (*backend, logical.Storage) { + t.Helper() + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + + b := Backend() + if b == nil { + t.Fatalf("failed to create backend") + } + err := b.Backend.Setup(context.Background(), config) + if err != nil { + t.Fatal(err) + } + return b, config.StorageView +} + +// setupTestServer configures httptest server to intercept and respond to the +// request to base_url +func setupTestServer(t *testing.T) *httptest.Server { + t.Helper() + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var resp string + if strings.Contains(r.URL.String(), "/user/orgs") { + resp = string(listOrgResponse) + } else if strings.Contains(r.URL.String(), "/user/teams") { + resp = string(listUserTeamsResponse) + } else if strings.Contains(r.URL.String(), "/user") { + resp = getUserResponse + } else if strings.Contains(r.URL.String(), "/orgs/") { + resp = getOrgResponse + } + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintln(w, resp) + })) +} + +// TestGitHub_WriteReadConfig tests that we can successfully read and write +// the github auth config +func TestGitHub_WriteReadConfig(t *testing.T) { + b, s := createBackendWithStorage(t) + + // use a test server to return our mock GH org info + ts := setupTestServer(t) + defer ts.Close() + + // Write the config + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Path: "config", + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "organization": "foo-org", + "base_url": ts.URL, // base_url will call the test server + }, + Storage: s, + }) + assert.NoError(t, err) + assert.Nil(t, resp) + assert.NoError(t, resp.Error()) + + // Read the config + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Path: "config", + Operation: logical.ReadOperation, + Storage: s, + }) + assert.NoError(t, err) + assert.NoError(t, resp.Error()) + + // the ID should be set, we grab it from the GET /orgs API + assert.Equal(t, int64(12345), resp.Data["organization_id"]) + assert.Equal(t, "foo-org", resp.Data["organization"]) +} + +// TestGitHub_WriteReadConfig_OrgID tests that we can successfully read and +// write the github auth config with an organization_id param +func TestGitHub_WriteReadConfig_OrgID(t *testing.T) { + b, s := createBackendWithStorage(t) + + // Write the config and pass in organization_id + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Path: "config", + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "organization": "foo-org", + "organization_id": 98765, + }, + Storage: s, + }) + assert.NoError(t, err) + assert.Nil(t, resp) + assert.NoError(t, resp.Error()) + + // Read the config + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Path: "config", + Operation: logical.ReadOperation, + Storage: s, + }) + assert.NoError(t, err) + assert.NoError(t, resp.Error()) + + // the ID should be set to what was written in the config + assert.Equal(t, int64(98765), resp.Data["organization_id"]) + assert.Equal(t, "foo-org", resp.Data["organization"]) +} + +// TestGitHub_WriteReadConfig_Token tests that we can successfully read and +// write the github auth config with a token environment variable +func TestGitHub_WriteReadConfig_Token(t *testing.T) { + b, s := createBackendWithStorage(t) + // use a test server to return our mock GH org info + ts := setupTestServer(t) + defer ts.Close() + + err := os.Setenv("VAULT_AUTH_CONFIG_GITHUB_TOKEN", "foobar") + assert.NoError(t, err) + + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Path: "config", + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "organization": "foo-org", + "base_url": ts.URL, // base_url will call the test server + }, + Storage: s, + }) + assert.NoError(t, err) + assert.Nil(t, resp) + assert.NoError(t, resp.Error()) + + // Read the config + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Path: "config", + Operation: logical.ReadOperation, + Storage: s, + }) + assert.NoError(t, err) + assert.NoError(t, resp.Error()) + + // the token should not be returned in the read config response. + assert.Nil(t, resp.Data["token"]) +} + +// TestGitHub_ErrorNoOrgID tests that an error is returned when we cannot fetch +// the org ID for the given org name +func TestGitHub_ErrorNoOrgID(t *testing.T) { + b, s := createBackendWithStorage(t) + // use a test server to return our mock GH org info + ts := func() *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + resp := `{ "id": 0 }` + fmt.Fprintln(w, resp) + })) + } + + defer ts().Close() + + // Write the config + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Path: "config", + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "organization": "foo-org", + "base_url": ts().URL, // base_url will call the test server + }, + Storage: s, + }) + assert.Error(t, err) + assert.Nil(t, resp) + assert.Equal(t, errors.New( + "unable to fetch the organization_id, you must manually set it in the config: organization_id not found for foo-org", + ), err) +} + +// TestGitHub_WriteConfig_ErrorNoOrg tests that an error is returned when the +// required "organization" parameter is not provided +func TestGitHub_WriteConfig_ErrorNoOrg(t *testing.T) { + b, s := createBackendWithStorage(t) + + // Write the config + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Path: "config", + Operation: logical.UpdateOperation, + Data: map[string]interface{}{}, + Storage: s, + }) + + assert.NoError(t, err) + assert.Error(t, resp.Error()) + assert.Equal(t, errors.New("organization is a required parameter"), resp.Error()) +} + +// https://docs.github.com/en/rest/reference/users#get-the-authenticated-user +// Note: many of the fields have been omitted +var getUserResponse = ` +{ + "login": "user-foo", + "id": 6789, + "description": "A great user. The very best user.", + "name": "foo name", + "company": "foo-company", + "type": "User" +} +` + +// https://docs.github.com/en/rest/reference/orgs#get-an-organization +// Note: many of the fields have been omitted, we only care about 'login' and 'id' +var getOrgResponse = ` +{ + "login": "foo-org", + "id": 12345, + "description": "A great org. The very best org.", + "name": "foo-display-name", + "company": "foo-company", + "type": "Organization" +} +` + +// https://docs.github.com/en/rest/reference/orgs#list-organizations-for-the-authenticated-user +var listOrgResponse = []byte(fmt.Sprintf(`[%v]`, getOrgResponse)) + +// https://docs.github.com/en/rest/reference/teams#list-teams-for-the-authenticated-user +// Note: many of the fields have been omitted +var listUserTeamsResponse = []byte(fmt.Sprintf(`[ +{ + "id": 1, + "node_id": "MDQ6VGVhbTE=", + "name": "Foo team", + "slug": "foo-team", + "description": "A great team. The very best team.", + "permission": "admin", + "organization": %v + } +]`, getOrgResponse)) diff --git a/builtin/credential/github/path_login.go b/builtin/credential/github/path_login.go new file mode 100644 index 0000000..4ee94e5 --- /dev/null +++ b/builtin/credential/github/path_login.go @@ -0,0 +1,312 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package github + +import ( + "context" + "errors" + "fmt" + "net/url" + + "github.com/google/go-github/github" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/cidrutil" + "github.com/hashicorp/vault/sdk/helper/policyutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathLogin(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "login", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixGithub, + OperationVerb: "login", + }, + + Fields: map[string]*framework.FieldSchema{ + "token": { + Type: framework.TypeString, + Description: "GitHub personal API token", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathLogin, + logical.AliasLookaheadOperation: b.pathLoginAliasLookahead, + }, + } +} + +func (b *backend) pathLoginAliasLookahead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + token := data.Get("token").(string) + + verifyResp, err := b.verifyCredentials(ctx, req, token) + if err != nil { + return nil, err + } + + return &logical.Response{ + Warnings: verifyResp.Warnings, + Auth: &logical.Auth{ + Alias: &logical.Alias{ + Name: *verifyResp.User.Login, + }, + }, + }, nil +} + +func (b *backend) pathLogin(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + token := data.Get("token").(string) + + verifyResp, err := b.verifyCredentials(ctx, req, token) + if err != nil { + return nil, err + } + + auth := &logical.Auth{ + InternalData: map[string]interface{}{ + "token": token, + }, + Metadata: map[string]string{ + "username": *verifyResp.User.Login, + "org": *verifyResp.Org.Login, + }, + DisplayName: *verifyResp.User.Login, + Alias: &logical.Alias{ + Name: *verifyResp.User.Login, + }, + } + verifyResp.Config.PopulateTokenAuth(auth) + + // Add in configured policies from user/group mapping + if len(verifyResp.Policies) > 0 { + auth.Policies = append(auth.Policies, verifyResp.Policies...) + } + + resp := &logical.Response{ + Warnings: verifyResp.Warnings, + Auth: auth, + } + + for _, teamName := range verifyResp.TeamNames { + if teamName == "" { + continue + } + resp.Auth.GroupAliases = append(resp.Auth.GroupAliases, &logical.Alias{ + Name: teamName, + }) + } + + return resp, nil +} + +func (b *backend) pathLoginRenew(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + if req.Auth == nil { + return nil, fmt.Errorf("request auth was nil") + } + + tokenRaw, ok := req.Auth.InternalData["token"] + if !ok { + return nil, fmt.Errorf("token created in previous version of Vault cannot be validated properly at renewal time") + } + token := tokenRaw.(string) + + verifyResp, err := b.verifyCredentials(ctx, req, token) + if err != nil { + return nil, err + } + + if !policyutil.EquivalentPolicies(verifyResp.Policies, req.Auth.TokenPolicies) { + return nil, fmt.Errorf("policies do not match") + } + + resp := &logical.Response{Auth: req.Auth} + resp.Auth.Period = verifyResp.Config.TokenPeriod + resp.Auth.TTL = verifyResp.Config.TokenTTL + resp.Auth.MaxTTL = verifyResp.Config.TokenMaxTTL + resp.Warnings = verifyResp.Warnings + + // Remove old aliases + resp.Auth.GroupAliases = nil + + for _, teamName := range verifyResp.TeamNames { + resp.Auth.GroupAliases = append(resp.Auth.GroupAliases, &logical.Alias{ + Name: teamName, + }) + } + + return resp, nil +} + +func (b *backend) verifyCredentials(ctx context.Context, req *logical.Request, token string) (*verifyCredentialsResp, error) { + var warnings []string + config, err := b.Config(ctx, req.Storage) + if err != nil { + return nil, err + } + if config == nil { + return nil, errors.New("configuration has not been set") + } + + // Check for a CIDR match. + if len(config.TokenBoundCIDRs) > 0 { + if req.Connection == nil { + b.Logger().Error("token bound CIDRs found but no connection information available for validation") + return nil, logical.ErrPermissionDenied + } + if !cidrutil.RemoteAddrIsOk(req.Connection.RemoteAddr, config.TokenBoundCIDRs) { + return nil, logical.ErrPermissionDenied + } + } + + client, err := b.Client(token) + if err != nil { + return nil, err + } + + if config.BaseURL != "" { + parsedURL, err := url.Parse(config.BaseURL) + if err != nil { + return nil, fmt.Errorf("successfully parsed base_url when set but failing to parse now: %w", err) + } + client.BaseURL = parsedURL + } + + if config.OrganizationID == 0 { + // Previously we did not verify using the Org ID. So if the Org ID is + // not set, we will trust-on-first-use and set it now. + err = config.setOrganizationID(ctx, client) + if err != nil { + b.Logger().Error("failed to set the organization_id on login", "error", err) + return nil, err + } + entry, err := logical.StorageEntryJSON("config", config) + if err != nil { + return nil, err + } + + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + + b.Logger().Info("set ID on a trust-on-first-use basis", "organization_id", config.OrganizationID) + } + + // Get the user + user, _, err := client.Users.Get(ctx, "") + if err != nil { + return nil, err + } + + // Verify that the user is part of the organization + var org *github.Organization + + orgOpt := &github.ListOptions{ + PerPage: 100, + } + + var allOrgs []*github.Organization + for { + orgs, resp, err := client.Organizations.List(ctx, "", orgOpt) + if err != nil { + return nil, err + } + allOrgs = append(allOrgs, orgs...) + if resp.NextPage == 0 { + break + } + orgOpt.Page = resp.NextPage + } + + orgLoginName := "" + for _, o := range allOrgs { + if o.GetID() == config.OrganizationID { + org = o + orgLoginName = *o.Login + break + } + } + if org == nil { + return nil, errors.New("user is not part of required org") + } + + if orgLoginName != config.Organization { + warningMsg := fmt.Sprintf( + "the organization name has changed to %q. It is recommended to verify and update the organization name in the config: %s=%d", + orgLoginName, + "organization_id", + config.OrganizationID, + ) + b.Logger().Warn(warningMsg) + warnings = append(warnings, warningMsg) + } + + // Get the teams that this user is part of to determine the policies + var teamNames []string + + teamOpt := &github.ListOptions{ + PerPage: 100, + } + + var allTeams []*github.Team + for { + teams, resp, err := client.Teams.ListUserTeams(ctx, teamOpt) + if err != nil { + return nil, err + } + allTeams = append(allTeams, teams...) + if resp.NextPage == 0 { + break + } + teamOpt.Page = resp.NextPage + } + + for _, t := range allTeams { + // We only care about teams that are part of the organization we use + if *t.Organization.ID != *org.ID { + continue + } + + // Append the names so we can get the policies + teamNames = append(teamNames, *t.Name) + if *t.Name != *t.Slug { + teamNames = append(teamNames, *t.Slug) + } + } + + groupPoliciesList, err := b.TeamMap.Policies(ctx, req.Storage, teamNames...) + if err != nil { + return nil, err + } + + userPoliciesList, err := b.UserMap.Policies(ctx, req.Storage, []string{*user.Login}...) + if err != nil { + return nil, err + } + + verifyResp := &verifyCredentialsResp{ + User: user, + Org: org, + Policies: append(groupPoliciesList, userPoliciesList...), + TeamNames: teamNames, + Config: config, + Warnings: warnings, + } + + return verifyResp, nil +} + +type verifyCredentialsResp struct { + User *github.User + Org *github.Organization + Policies []string + TeamNames []string + + // Warnings to send back to the caller + Warnings []string + + // This is just a cache to send back to the caller + Config *config +} diff --git a/builtin/credential/github/path_login_test.go b/builtin/credential/github/path_login_test.go new file mode 100644 index 0000000..282e3fa --- /dev/null +++ b/builtin/credential/github/path_login_test.go @@ -0,0 +1,189 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package github + +import ( + "context" + "errors" + "testing" + + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/assert" +) + +// TestGitHub_Login tests that we can successfully login with the given config +func TestGitHub_Login(t *testing.T) { + b, s := createBackendWithStorage(t) + + // use a test server to return our mock GH org info + ts := setupTestServer(t) + defer ts.Close() + + // Write the config + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Path: "config", + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "organization": "foo-org", + "base_url": ts.URL, // base_url will call the test server + }, + Storage: s, + }) + assert.NoError(t, err) + assert.NoError(t, resp.Error()) + + // Read the config + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Path: "config", + Operation: logical.ReadOperation, + Storage: s, + }) + assert.NoError(t, err) + assert.NoError(t, resp.Error()) + + // attempt a login + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Path: "login", + Operation: logical.UpdateOperation, + Storage: s, + }) + + expectedMetaData := map[string]string{ + "org": "foo-org", + "username": "user-foo", + } + assert.Equal(t, expectedMetaData, resp.Auth.Metadata) + assert.NoError(t, err) + assert.NoError(t, resp.Error()) +} + +// TestGitHub_Login_OrgInvalid tests that we cannot login with an ID other than +// what is set in the config +func TestGitHub_Login_OrgInvalid(t *testing.T) { + b, s := createBackendWithStorage(t) + ctx := namespace.RootContext(nil) + + // use a test server to return our mock GH org info + ts := setupTestServer(t) + defer ts.Close() + + // write and store config + config := config{ + Organization: "foo-org", + OrganizationID: 9999, + BaseURL: ts.URL + "/", // base_url will call the test server + } + entry, err := logical.StorageEntryJSON("config", config) + if err != nil { + t.Fatalf("failed creating storage entry") + } + if err := s.Put(ctx, entry); err != nil { + t.Fatalf("writing to in mem storage failed") + } + + // attempt a login + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Path: "login", + Operation: logical.UpdateOperation, + Storage: s, + }) + + assert.Nil(t, resp) + assert.Error(t, err) + assert.Equal(t, errors.New("user is not part of required org"), err) +} + +// TestGitHub_Login_OrgNameChanged tests that we can successfully login with the +// given config and emit a warning when the organization name has changed +func TestGitHub_Login_OrgNameChanged(t *testing.T) { + b, s := createBackendWithStorage(t) + ctx := namespace.RootContext(nil) + + // use a test server to return our mock GH org info + ts := setupTestServer(t) + defer ts.Close() + + // write and store config + // the name does not match what the API will return but the ID does + config := config{ + Organization: "old-name", + OrganizationID: 12345, + BaseURL: ts.URL + "/", // base_url will call the test server + } + entry, err := logical.StorageEntryJSON("config", config) + if err != nil { + t.Fatalf("failed creating storage entry") + } + if err := s.Put(ctx, entry); err != nil { + t.Fatalf("writing to in mem storage failed") + } + + // attempt a login + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Path: "login", + Operation: logical.UpdateOperation, + Storage: s, + }) + + assert.NoError(t, err) + assert.Nil(t, resp.Error()) + assert.Equal( + t, + []string{"the organization name has changed to \"foo-org\". It is recommended to verify and update the organization name in the config: organization_id=12345"}, + resp.Warnings, + ) +} + +// TestGitHub_Login_NoOrgID tests that we can successfully login with the given +// config when no organization ID is present and write the fetched ID to the +// config +func TestGitHub_Login_NoOrgID(t *testing.T) { + b, s := createBackendWithStorage(t) + ctx := namespace.RootContext(nil) + + // use a test server to return our mock GH org info + ts := setupTestServer(t) + defer ts.Close() + + // write and store config without Org ID + config := config{ + Organization: "foo-org", + BaseURL: ts.URL + "/", // base_url will call the test server + } + entry, err := logical.StorageEntryJSON("config", config) + if err != nil { + t.Fatalf("failed creating storage entry") + } + if err := s.Put(ctx, entry); err != nil { + t.Fatalf("writing to in mem storage failed") + } + + // attempt a login + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Path: "login", + Operation: logical.UpdateOperation, + Storage: s, + }) + + expectedMetaData := map[string]string{ + "org": "foo-org", + "username": "user-foo", + } + assert.Equal(t, expectedMetaData, resp.Auth.Metadata) + assert.NoError(t, err) + assert.NoError(t, resp.Error()) + + // Read the config + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Path: "config", + Operation: logical.ReadOperation, + Storage: s, + }) + assert.NoError(t, err) + assert.NoError(t, resp.Error()) + + // the ID should be set, we grab it from the GET /orgs API + assert.Equal(t, int64(12345), resp.Data["organization_id"]) +} diff --git a/builtin/credential/ldap/backend.go b/builtin/credential/ldap/backend.go new file mode 100644 index 0000000..ec4a0f2 --- /dev/null +++ b/builtin/credential/ldap/backend.go @@ -0,0 +1,228 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ldap + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/go-secure-stdlib/strutil" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/ldaputil" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + operationPrefixLDAP = "ldap" + errUserBindFailed = "ldap operation failed: failed to bind as user" +) + +func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + b := Backend() + if err := b.Setup(ctx, conf); err != nil { + return nil, err + } + return b, nil +} + +func Backend() *backend { + var b backend + b.Backend = &framework.Backend{ + Help: backendHelp, + + PathsSpecial: &logical.Paths{ + Unauthenticated: []string{ + "login/*", + }, + + SealWrapStorage: []string{ + "config", + }, + }, + + Paths: []*framework.Path{ + pathConfig(&b), + pathGroups(&b), + pathGroupsList(&b), + pathUsers(&b), + pathUsersList(&b), + pathLogin(&b), + }, + + AuthRenew: b.pathLoginRenew, + BackendType: logical.TypeCredential, + } + + return &b +} + +type backend struct { + *framework.Backend +} + +func (b *backend) Login(ctx context.Context, req *logical.Request, username string, password string, usernameAsAlias bool) (string, []string, *logical.Response, []string, error) { + cfg, err := b.Config(ctx, req) + if err != nil { + return "", nil, nil, nil, err + } + if cfg == nil { + return "", nil, logical.ErrorResponse("ldap backend not configured"), nil, nil + } + + if cfg.DenyNullBind && len(password) == 0 { + return "", nil, logical.ErrorResponse("password cannot be of zero length when passwordless binds are being denied"), nil, nil + } + + ldapClient := ldaputil.Client{ + Logger: b.Logger(), + LDAP: ldaputil.NewLDAP(), + } + + c, err := ldapClient.DialLDAP(cfg.ConfigEntry) + if err != nil { + return "", nil, logical.ErrorResponse(err.Error()), nil, nil + } + if c == nil { + return "", nil, logical.ErrorResponse("invalid connection returned from LDAP dial"), nil, nil + } + + // Clean connection + defer c.Close() + + userBindDN, err := ldapClient.GetUserBindDN(cfg.ConfigEntry, c, username) + if err != nil { + if b.Logger().IsDebug() { + b.Logger().Debug("error getting user bind DN", "error", err) + } + return "", nil, logical.ErrorResponse(errUserBindFailed), nil, logical.ErrInvalidCredentials + } + + if b.Logger().IsDebug() { + b.Logger().Debug("user binddn fetched", "username", username, "binddn", userBindDN) + } + + // Try to bind as the login user. This is where the actual authentication takes place. + if len(password) > 0 { + err = c.Bind(userBindDN, password) + } else { + err = c.UnauthenticatedBind(userBindDN) + } + if err != nil { + if b.Logger().IsDebug() { + b.Logger().Debug("ldap bind failed", "error", err) + } + return "", nil, logical.ErrorResponse(errUserBindFailed), nil, logical.ErrInvalidCredentials + } + + // We re-bind to the BindDN if it's defined because we assume + // the BindDN should be the one to search, not the user logging in. + if cfg.BindDN != "" && cfg.BindPassword != "" { + if err := c.Bind(cfg.BindDN, cfg.BindPassword); err != nil { + if b.Logger().IsDebug() { + b.Logger().Debug("error while attempting to re-bind with the BindDN User", "error", err) + } + return "", nil, logical.ErrorResponse("ldap operation failed: failed to re-bind with the BindDN user"), nil, logical.ErrInvalidCredentials + } + if b.Logger().IsDebug() { + b.Logger().Debug("re-bound to original binddn") + } + } + + userDN, err := ldapClient.GetUserDN(cfg.ConfigEntry, c, userBindDN, username) + if err != nil { + return "", nil, logical.ErrorResponse(err.Error()), nil, nil + } + + if cfg.AnonymousGroupSearch { + c, err = ldapClient.DialLDAP(cfg.ConfigEntry) + if err != nil { + return "", nil, logical.ErrorResponse("ldap operation failed: failed to connect to LDAP server"), nil, nil + } + defer c.Close() // Defer closing of this connection as the deferal above closes the other defined connection + } + + ldapGroups, err := ldapClient.GetLdapGroups(cfg.ConfigEntry, c, userDN, username) + if err != nil { + return "", nil, logical.ErrorResponse(err.Error()), nil, nil + } + if b.Logger().IsDebug() { + b.Logger().Debug("groups fetched from server", "num_server_groups", len(ldapGroups), "server_groups", ldapGroups) + } + + ldapResponse := &logical.Response{ + Data: map[string]interface{}{}, + } + if len(ldapGroups) == 0 { + errString := fmt.Sprintf( + "no LDAP groups found in groupDN %q; only policies from locally-defined groups available", + cfg.GroupDN) + ldapResponse.AddWarning(errString) + } + + var allGroups []string + canonicalUsername := username + cs := *cfg.CaseSensitiveNames + if !cs { + canonicalUsername = strings.ToLower(username) + } + // Import the custom added groups from ldap backend + user, err := b.User(ctx, req.Storage, canonicalUsername) + if err == nil && user != nil && user.Groups != nil { + if b.Logger().IsDebug() { + b.Logger().Debug("adding local groups", "num_local_groups", len(user.Groups), "local_groups", user.Groups) + } + allGroups = append(allGroups, user.Groups...) + } + // Merge local and LDAP groups + allGroups = append(allGroups, ldapGroups...) + + canonicalGroups := allGroups + // If not case sensitive, lowercase all + if !cs { + canonicalGroups = make([]string, len(allGroups)) + for i, v := range allGroups { + canonicalGroups[i] = strings.ToLower(v) + } + } + + // Retrieve policies + var policies []string + for _, groupName := range canonicalGroups { + group, err := b.Group(ctx, req.Storage, groupName) + if err == nil && group != nil { + policies = append(policies, group.Policies...) + } + } + if user != nil && user.Policies != nil { + policies = append(policies, user.Policies...) + } + // Policies from each group may overlap + policies = strutil.RemoveDuplicates(policies, true) + + if usernameAsAlias { + return username, policies, ldapResponse, allGroups, nil + } + + entityAliasAttribute, err := ldapClient.GetUserAliasAttributeValue(cfg.ConfigEntry, c, username) + if err != nil { + return "", nil, logical.ErrorResponse(err.Error()), nil, nil + } + if entityAliasAttribute == "" { + return "", nil, logical.ErrorResponse("missing entity alias attribute value"), nil, nil + } + + return entityAliasAttribute, policies, ldapResponse, allGroups, nil +} + +const backendHelp = ` +The "ldap" credential provider allows authentication querying +a LDAP server, checking username and password, and associating groups +to set of policies. + +Configuration of the server is done through the "config" and "groups" +endpoints by a user with root access. Authentication is then done +by supplying the two fields for "login". +` diff --git a/builtin/credential/ldap/backend_test.go b/builtin/credential/ldap/backend_test.go new file mode 100644 index 0000000..beda248 --- /dev/null +++ b/builtin/credential/ldap/backend_test.go @@ -0,0 +1,1277 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ldap + +import ( + "context" + "fmt" + "reflect" + "sort" + "testing" + "time" + + goldap "github.com/go-ldap/ldap/v3" + "github.com/go-test/deep" + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/helper/testhelpers/ldap" + logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" + "github.com/hashicorp/vault/sdk/helper/ldaputil" + "github.com/hashicorp/vault/sdk/helper/policyutil" + "github.com/hashicorp/vault/sdk/helper/tokenutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/mitchellh/mapstructure" +) + +func createBackendWithStorage(t *testing.T) (*backend, logical.Storage) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + + b := Backend() + if b == nil { + t.Fatalf("failed to create backend") + } + + err := b.Backend.Setup(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + return b, config.StorageView +} + +func TestLdapAuthBackend_Listing(t *testing.T) { + b, storage := createBackendWithStorage(t) + + // Create group "testgroup" + resp, err := b.HandleRequest(namespace.RootContext(nil), &logical.Request{ + Path: "groups/testgroup", + Operation: logical.UpdateOperation, + Storage: storage, + Data: map[string]interface{}{ + "policies": []string{"default"}, + }, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) + } + + // Create group "nested/testgroup" + resp, err = b.HandleRequest(namespace.RootContext(nil), &logical.Request{ + Path: "groups/nested/testgroup", + Operation: logical.UpdateOperation, + Storage: storage, + Data: map[string]interface{}{ + "policies": []string{"default"}, + }, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) + } + + // Create user "testuser" + resp, err = b.HandleRequest(namespace.RootContext(nil), &logical.Request{ + Path: "users/testuser", + Operation: logical.UpdateOperation, + Storage: storage, + Data: map[string]interface{}{ + "policies": []string{"default"}, + "groups": "testgroup,nested/testgroup", + }, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) + } + + // Create user "nested/testuser" + resp, err = b.HandleRequest(namespace.RootContext(nil), &logical.Request{ + Path: "users/nested/testuser", + Operation: logical.UpdateOperation, + Storage: storage, + Data: map[string]interface{}{ + "policies": []string{"default"}, + "groups": "testgroup,nested/testgroup", + }, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) + } + + // List users + resp, err = b.HandleRequest(namespace.RootContext(nil), &logical.Request{ + Path: "users/", + Operation: logical.ListOperation, + Storage: storage, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) + } + expected := []string{"testuser", "nested/testuser"} + if !reflect.DeepEqual(expected, resp.Data["keys"].([]string)) { + t.Fatalf("bad: listed users; expected: %#v actual: %#v", expected, resp.Data["keys"].([]string)) + } + + // List groups + resp, err = b.HandleRequest(namespace.RootContext(nil), &logical.Request{ + Path: "groups/", + Operation: logical.ListOperation, + Storage: storage, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v", resp, err) + } + expected = []string{"testgroup", "nested/testgroup"} + if !reflect.DeepEqual(expected, resp.Data["keys"].([]string)) { + t.Fatalf("bad: listed groups; expected: %#v actual: %#v", expected, resp.Data["keys"].([]string)) + } +} + +func TestLdapAuthBackend_CaseSensitivity(t *testing.T) { + var resp *logical.Response + var err error + b, storage := createBackendWithStorage(t) + + ctx := context.Background() + + testVals := func(caseSensitive bool) { + // Clear storage + userList, err := storage.List(ctx, "user/") + if err != nil { + t.Fatal(err) + } + for _, user := range userList { + err = storage.Delete(ctx, "user/"+user) + if err != nil { + t.Fatal(err) + } + } + groupList, err := storage.List(ctx, "group/") + if err != nil { + t.Fatal(err) + } + for _, group := range groupList { + err = storage.Delete(ctx, "group/"+group) + if err != nil { + t.Fatal(err) + } + } + + configReq := &logical.Request{ + Path: "config", + Operation: logical.ReadOperation, + Storage: storage, + } + resp, err = b.HandleRequest(ctx, configReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + if resp == nil { + t.Fatal("nil response") + } + if resp.Data["case_sensitive_names"].(bool) != caseSensitive { + t.Fatalf("expected case sensitivity %t, got %t", caseSensitive, resp.Data["case_sensitive_names"].(bool)) + } + + groupReq := &logical.Request{ + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "policies": "grouppolicy", + }, + Path: "groups/EngineerS", + Storage: storage, + } + resp, err = b.HandleRequest(ctx, groupReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + keys, err := storage.List(ctx, "group/") + if err != nil { + t.Fatal(err) + } + switch caseSensitive { + case true: + if keys[0] != "EngineerS" { + t.Fatalf("bad: %s", keys[0]) + } + default: + if keys[0] != "engineers" { + t.Fatalf("bad: %s", keys[0]) + } + } + + userReq := &logical.Request{ + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "groups": "EngineerS", + "policies": "userpolicy", + }, + Path: "users/hermeS conRad", + Storage: storage, + } + resp, err = b.HandleRequest(ctx, userReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + keys, err = storage.List(ctx, "user/") + if err != nil { + t.Fatal(err) + } + switch caseSensitive { + case true: + if keys[0] != "hermeS conRad" { + t.Fatalf("bad: %s", keys[0]) + } + default: + if keys[0] != "hermes conrad" { + t.Fatalf("bad: %s", keys[0]) + } + } + + if caseSensitive { + // The online test server is actually case sensitive so we need to + // write again so it works + userReq = &logical.Request{ + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "groups": "EngineerS", + "policies": "userpolicy", + }, + Path: "users/Hermes Conrad", + Storage: storage, + Connection: &logical.Connection{}, + } + resp, err = b.HandleRequest(ctx, userReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + } + + loginReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "login/Hermes Conrad", + Data: map[string]interface{}{ + "password": "hermes", + }, + Storage: storage, + Connection: &logical.Connection{}, + } + resp, err = b.HandleRequest(ctx, loginReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + expected := []string{"grouppolicy", "userpolicy"} + if !reflect.DeepEqual(expected, resp.Auth.Policies) { + t.Fatalf("bad: policies: expected: %q, actual: %q", expected, resp.Auth.Policies) + } + } + + cleanup, cfg := ldap.PrepareTestContainer(t, "latest") + defer cleanup() + configReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config", + Data: map[string]interface{}{ + "url": cfg.Url, + "userattr": cfg.UserAttr, + "userdn": cfg.UserDN, + "groupdn": cfg.GroupDN, + "groupattr": cfg.GroupAttr, + "binddn": cfg.BindDN, + "bindpass": cfg.BindPassword, + }, + Storage: storage, + } + resp, err = b.HandleRequest(ctx, configReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + testVals(false) + + // Check that if the value is nil, on read it is case sensitive + configEntry, err := b.Config(ctx, configReq) + if err != nil { + t.Fatal(err) + } + configEntry.CaseSensitiveNames = nil + entry, err := logical.StorageEntryJSON("config", configEntry) + if err != nil { + t.Fatal(err) + } + err = configReq.Storage.Put(ctx, entry) + if err != nil { + t.Fatal(err) + } + + testVals(true) +} + +func TestLdapAuthBackend_UserPolicies(t *testing.T) { + var resp *logical.Response + var err error + b, storage := createBackendWithStorage(t) + + cleanup, cfg := ldap.PrepareTestContainer(t, "latest") + defer cleanup() + configReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config", + Data: map[string]interface{}{ + "url": cfg.Url, + "userattr": cfg.UserAttr, + "userdn": cfg.UserDN, + "groupdn": cfg.GroupDN, + "groupattr": cfg.GroupAttr, + "binddn": cfg.BindDN, + "bindpassword": cfg.BindPassword, + }, + Storage: storage, + } + resp, err = b.HandleRequest(context.Background(), configReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + groupReq := &logical.Request{ + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "policies": "grouppolicy", + }, + Path: "groups/engineers", + Storage: storage, + Connection: &logical.Connection{}, + } + resp, err = b.HandleRequest(context.Background(), groupReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + userReq := &logical.Request{ + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "groups": "engineers", + "policies": "userpolicy", + }, + Path: "users/hermes conrad", + Storage: storage, + Connection: &logical.Connection{}, + } + + resp, err = b.HandleRequest(context.Background(), userReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + loginReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "login/hermes conrad", + Data: map[string]interface{}{ + "password": "hermes", + }, + Storage: storage, + Connection: &logical.Connection{}, + } + + resp, err = b.HandleRequest(context.Background(), loginReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + expected := []string{"grouppolicy", "userpolicy"} + if !reflect.DeepEqual(expected, resp.Auth.Policies) { + t.Fatalf("bad: policies: expected: %q, actual: %q", expected, resp.Auth.Policies) + } +} + +/* +* Acceptance test for LDAP Auth Method +* +* The tests here rely on a docker LDAP server: +* [https://github.com/rroemhild/docker-test-openldap] +* +* ...as well as existence of a person object, `cn=Hermes Conrad,dc=example,dc=com`, +* which is a member of a group, `cn=admin_staff,ou=people,dc=example,dc=com` +* + - Querying the server from the command line: + - $ docker run --privileged -d -p 389:389 --name ldap --rm rroemhild/test-openldap + - $ ldapsearch -x -H ldap://localhost -b dc=planetexpress,dc=com -s sub uid=hermes + - $ ldapsearch -x -H ldap://localhost -b dc=planetexpress,dc=com -s sub \ + 'member=cn=Hermes Conrad,ou=people,dc=planetexpress,dc=com' +*/ +func factory(t *testing.T) logical.Backend { + defaultLeaseTTLVal := time.Hour * 24 + maxLeaseTTLVal := time.Hour * 24 * 32 + b, err := Factory(context.Background(), &logical.BackendConfig{ + Logger: hclog.New(&hclog.LoggerOptions{ + Name: "FactoryLogger", + Level: hclog.Debug, + }), + System: &logical.StaticSystemView{ + DefaultLeaseTTLVal: defaultLeaseTTLVal, + MaxLeaseTTLVal: maxLeaseTTLVal, + }, + }) + if err != nil { + t.Fatalf("Unable to create backend: %s", err) + } + return b +} + +func TestBackend_basic(t *testing.T) { + b := factory(t) + cleanup, cfg := ldap.PrepareTestContainer(t, "latest") + defer cleanup() + + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfigUrl(t, cfg), + // Map Admin_staff group (from LDAP server) with foo policy + testAccStepGroup(t, "admin_staff", "foo"), + + // Map engineers group (local) with bar policy + testAccStepGroup(t, "engineers", "bar"), + + // Map hermes conrad user with local engineers group + testAccStepUser(t, "hermes conrad", "engineers"), + + // Authenticate + testAccStepLogin(t, "hermes conrad", "hermes"), + + // Verify both groups mappings can be listed back + testAccStepGroupList(t, []string{"engineers", "admin_staff"}), + + // Verify user mapping can be listed back + testAccStepUserList(t, []string{"hermes conrad"}), + }, + }) +} + +func TestBackend_basic_noPolicies(t *testing.T) { + b := factory(t) + cleanup, cfg := ldap.PrepareTestContainer(t, "latest") + defer cleanup() + + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfigUrl(t, cfg), + // Create LDAP user + testAccStepUser(t, "hermes conrad", ""), + // Authenticate + testAccStepLoginNoAttachedPolicies(t, "hermes conrad", "hermes"), + testAccStepUserList(t, []string{"hermes conrad"}), + }, + }) +} + +func TestBackend_basic_group_noPolicies(t *testing.T) { + b := factory(t) + cleanup, cfg := ldap.PrepareTestContainer(t, "latest") + defer cleanup() + + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfigUrl(t, cfg), + // Create engineers group with no policies + testAccStepGroup(t, "engineers", ""), + // Map hermes conrad user with local engineers group + testAccStepUser(t, "hermes conrad", "engineers"), + // Authenticate + testAccStepLoginNoAttachedPolicies(t, "hermes conrad", "hermes"), + // Verify group mapping can be listed back + testAccStepGroupList(t, []string{"engineers"}), + }, + }) +} + +func TestBackend_basic_authbind(t *testing.T) { + b := factory(t) + cleanup, cfg := ldap.PrepareTestContainer(t, "latest") + defer cleanup() + + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfigUrlWithAuthBind(t, cfg), + testAccStepGroup(t, "admin_staff", "foo"), + testAccStepGroup(t, "engineers", "bar"), + testAccStepUser(t, "hermes conrad", "engineers"), + testAccStepLogin(t, "hermes conrad", "hermes"), + }, + }) +} + +func TestBackend_basic_authbind_userfilter(t *testing.T) { + b := factory(t) + cleanup, cfg := ldap.PrepareTestContainer(t, "latest") + defer cleanup() + + // userattr not used in the userfilter should result in a warning in the response + cfg.UserFilter = "((mail={{.Username}}))" + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfigUrlWarningCheck(t, cfg, logical.UpdateOperation, []string{userFilterWarning}), + testAccStepConfigUrlWarningCheck(t, cfg, logical.ReadOperation, []string{userFilterWarning}), + }, + }) + + // If both upndomain and userfilter is set, ensure that a warning is still + // returned if userattr is not considered + cfg.UPNDomain = "planetexpress.com" + + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfigUrlWarningCheck(t, cfg, logical.UpdateOperation, []string{userFilterWarning}), + testAccStepConfigUrlWarningCheck(t, cfg, logical.ReadOperation, []string{userFilterWarning}), + }, + }) + + cfg.UPNDomain = "" + + // Add a liberal user filter, allowing to log in with either cn or email + cfg.UserFilter = "(|({{.UserAttr}}={{.Username}})(mail={{.Username}}))" + + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfigUrl(t, cfg), + // Create engineers group with no policies + testAccStepGroup(t, "engineers", ""), + // Map hermes conrad user with local engineers group + testAccStepUser(t, "hermes conrad", "engineers"), + // Authenticate with cn attribute + testAccStepLoginNoAttachedPolicies(t, "hermes conrad", "hermes"), + // Authenticate with mail attribute + testAccStepLoginNoAttachedPolicies(t, "hermes@planetexpress.com", "hermes"), + }, + }) + + // A filter giving the same DN makes the entity_id the same + entity_id := "" + + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfigUrl(t, cfg), + // Create engineers group with no policies + testAccStepGroup(t, "engineers", ""), + // Map hermes conrad user with local engineers group + testAccStepUser(t, "hermes conrad", "engineers"), + // Authenticate with cn attribute + testAccStepLoginReturnsSameEntity(t, "hermes conrad", "hermes", &entity_id), + // Authenticate with mail attribute + testAccStepLoginReturnsSameEntity(t, "hermes@planetexpress.com", "hermes", &entity_id), + }, + }) + + // Missing entity alias attribute means access denied + cfg.UserAttr = "inexistent" + cfg.UserFilter = "(|({{.UserAttr}}={{.Username}})(mail={{.Username}}))" + + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfigUrl(t, cfg), + // Authenticate with mail attribute will find DN but missing attribute means access denied + testAccStepLoginFailure(t, "hermes@planetexpress.com", "hermes"), + }, + }) + cfg.UserAttr = "cn" + + // UPNDomain has precedence over userfilter, for backward compatibility + cfg.UPNDomain = "planetexpress.com" + + addUPNAttributeToLDAPSchemaAndUser(t, cfg, "cn=Hubert J. Farnsworth,ou=people,dc=planetexpress,dc=com", "professor@planetexpress.com") + + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfigUrlWithAuthBind(t, cfg), + testAccStepLoginNoAttachedPolicies(t, "professor", "professor"), + }, + }) + + cfg.UPNDomain = "" + + // Add a strict user filter, rejecting login of bureaucrats + cfg.UserFilter = "(&({{.UserAttr}}={{.Username}})(!(employeeType=Bureaucrat)))" + + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfigUrl(t, cfg), + // Authenticate with cn attribute + testAccStepLoginFailure(t, "hermes conrad", "hermes"), + }, + }) + + // Login fails when multiple user match search filter (using an incorrect filter on purporse) + cfg.UserFilter = "(objectClass=*)" + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: b, + Steps: []logicaltest.TestStep{ + // testAccStepConfigUrl(t, cfg), + testAccStepConfigUrlWithAuthBind(t, cfg), + // Authenticate with cn attribute + testAccStepLoginFailure(t, "hermes conrad", "hermes"), + }, + }) + + // If UserAttr returns multiple attributes that can be used as alias then + // we return an error... + cfg.UserAttr = "employeeType" + cfg.UserFilter = "(cn={{.Username}})" + cfg.UsernameAsAlias = false + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfigUrl(t, cfg), + testAccStepLoginFailure(t, "hermes conrad", "hermes"), + }, + }) + + // ...unless username_as_alias has been set in which case we don't care + // about the alias returned by the LDAP server and always use the username + cfg.UsernameAsAlias = true + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfigUrl(t, cfg), + testAccStepLoginNoAttachedPolicies(t, "hermes conrad", "hermes"), + }, + }) +} + +func TestBackend_basic_authbind_metadata_name(t *testing.T) { + b := factory(t) + cleanup, cfg := ldap.PrepareTestContainer(t, "latest") + defer cleanup() + + cfg.UserAttr = "cn" + cfg.UPNDomain = "planetexpress.com" + + addUPNAttributeToLDAPSchemaAndUser(t, cfg, "cn=Hubert J. Farnsworth,ou=people,dc=planetexpress,dc=com", "professor@planetexpress.com") + + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfigUrlWithAuthBind(t, cfg), + testAccStepLoginAliasMetadataName(t, "professor", "professor"), + }, + }) +} + +func addUPNAttributeToLDAPSchemaAndUser(t *testing.T, cfg *ldaputil.ConfigEntry, testUserDN string, testUserUPN string) { + // Setup connection + client := &ldaputil.Client{ + Logger: hclog.New(&hclog.LoggerOptions{ + Name: "LDAPAuthTest", + Level: hclog.Debug, + }), + LDAP: ldaputil.NewLDAP(), + } + conn, err := client.DialLDAP(cfg) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + if err := conn.Bind("cn=admin,cn=config", cfg.BindPassword); err != nil { + t.Fatal(err) + } + + // Add userPrincipalName attribute type + userPrincipleNameTypeReq := goldap.NewModifyRequest("cn={0}core,cn=schema,cn=config", nil) + userPrincipleNameTypeReq.Add("olcAttributetypes", []string{"( 2.25.247072656268950430024439664556757516066 NAME ( 'userPrincipalName' ) SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 EQUALITY caseIgnoreMatch SINGLE-VALUE )"}) + if err := conn.Modify(userPrincipleNameTypeReq); err != nil { + t.Fatal(err) + } + + // Add new object class + userPrincipleNameObjClassReq := goldap.NewModifyRequest("cn={0}core,cn=schema,cn=config", nil) + userPrincipleNameObjClassReq.Add("olcObjectClasses", []string{"( 1.2.840.113556.6.2.6 NAME 'PrincipalNameClass' AUXILIARY MAY ( userPrincipalName ) )"}) + if err := conn.Modify(userPrincipleNameObjClassReq); err != nil { + t.Fatal(err) + } + + // Re-authenticate with the binddn user + if err := conn.Bind(cfg.BindDN, cfg.BindPassword); err != nil { + t.Fatal(err) + } + + // Modify professor user and add userPrincipalName attribute + modifyUserReq := goldap.NewModifyRequest(testUserDN, nil) + modifyUserReq.Add("objectClass", []string{"PrincipalNameClass"}) + modifyUserReq.Add("userPrincipalName", []string{testUserUPN}) + if err := conn.Modify(modifyUserReq); err != nil { + t.Fatal(err) + } +} + +func TestBackend_basic_discover(t *testing.T) { + b := factory(t) + cleanup, cfg := ldap.PrepareTestContainer(t, "latest") + defer cleanup() + + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfigUrlWithDiscover(t, cfg), + testAccStepGroup(t, "admin_staff", "foo"), + testAccStepGroup(t, "engineers", "bar"), + testAccStepUser(t, "hermes conrad", "engineers"), + testAccStepLogin(t, "hermes conrad", "hermes"), + }, + }) +} + +func TestBackend_basic_nogroupdn(t *testing.T) { + b := factory(t) + cleanup, cfg := ldap.PrepareTestContainer(t, "latest") + defer cleanup() + + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfigUrlNoGroupDN(t, cfg), + testAccStepGroup(t, "admin_staff", "foo"), + testAccStepGroup(t, "engineers", "bar"), + testAccStepUser(t, "hermes conrad", "engineers"), + testAccStepLoginNoGroupDN(t, "hermes conrad", "hermes"), + }, + }) +} + +func TestBackend_groupCrud(t *testing.T) { + b := factory(t) + + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepGroup(t, "g1", "foo"), + testAccStepReadGroup(t, "g1", "foo"), + testAccStepDeleteGroup(t, "g1"), + testAccStepReadGroup(t, "g1", ""), + }, + }) +} + +/* + * Test backend configuration defaults are successfully read. + */ +func TestBackend_configDefaultsAfterUpdate(t *testing.T) { + b := factory(t) + + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: b, + Steps: []logicaltest.TestStep{ + { + Operation: logical.UpdateOperation, + Path: "config", + Data: map[string]interface{}{}, + }, + { + Operation: logical.ReadOperation, + Path: "config", + Check: func(resp *logical.Response) error { + if resp == nil { + return fmt.Errorf("bad: %#v", resp) + } + + // Test well-known defaults + cfg := resp.Data + defaultGroupFilter := "(|(memberUid={{.Username}})(member={{.UserDN}})(uniqueMember={{.UserDN}}))" + if cfg["groupfilter"] != defaultGroupFilter { + t.Errorf("Default mismatch: groupfilter. Expected: %q, received :%q", defaultGroupFilter, cfg["groupfilter"]) + } + + defaultGroupAttr := "cn" + if cfg["groupattr"] != defaultGroupAttr { + t.Errorf("Default mismatch: groupattr. Expected: %q, received :%q", defaultGroupAttr, cfg["groupattr"]) + } + + defaultUserAttr := "cn" + if cfg["userattr"] != defaultUserAttr { + t.Errorf("Default mismatch: userattr. Expected: %q, received :%q", defaultUserAttr, cfg["userattr"]) + } + + defaultUserFilter := "({{.UserAttr}}={{.Username}})" + if cfg["userfilter"] != defaultUserFilter { + t.Errorf("Default mismatch: userfilter. Expected: %q, received :%q", defaultUserFilter, cfg["userfilter"]) + } + + defaultDenyNullBind := true + if cfg["deny_null_bind"] != defaultDenyNullBind { + t.Errorf("Default mismatch: deny_null_bind. Expected: '%t', received :%q", defaultDenyNullBind, cfg["deny_null_bind"]) + } + + return nil + }, + }, + }, + }) +} + +func testAccStepConfigUrl(t *testing.T, cfg *ldaputil.ConfigEntry) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "config", + Data: map[string]interface{}{ + "url": cfg.Url, + "userattr": cfg.UserAttr, + "userdn": cfg.UserDN, + "userfilter": cfg.UserFilter, + "groupdn": cfg.GroupDN, + "groupattr": cfg.GroupAttr, + "binddn": cfg.BindDN, + "bindpass": cfg.BindPassword, + "case_sensitive_names": true, + "token_policies": "abc,xyz", + "request_timeout": cfg.RequestTimeout, + "connection_timeout": cfg.ConnectionTimeout, + "username_as_alias": cfg.UsernameAsAlias, + }, + } +} + +func testAccStepConfigUrlWithAuthBind(t *testing.T, cfg *ldaputil.ConfigEntry) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "config", + Data: map[string]interface{}{ + // In this test we also exercise multiple URL support + "url": "foobar://ldap.example.com," + cfg.Url, + "userattr": cfg.UserAttr, + "userdn": cfg.UserDN, + "groupdn": cfg.GroupDN, + "groupattr": cfg.GroupAttr, + "binddn": cfg.BindDN, + "bindpass": cfg.BindPassword, + "upndomain": cfg.UPNDomain, + "case_sensitive_names": true, + "token_policies": "abc,xyz", + "request_timeout": cfg.RequestTimeout, + "connection_timeout": cfg.ConnectionTimeout, + }, + } +} + +func testAccStepConfigUrlWithDiscover(t *testing.T, cfg *ldaputil.ConfigEntry) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "config", + Data: map[string]interface{}{ + "url": cfg.Url, + "userattr": cfg.UserAttr, + "userdn": cfg.UserDN, + "groupdn": cfg.GroupDN, + "groupattr": cfg.GroupAttr, + "binddn": cfg.BindDN, + "bindpass": cfg.BindPassword, + "discoverdn": true, + "case_sensitive_names": true, + "token_policies": "abc,xyz", + "request_timeout": cfg.RequestTimeout, + "connection_timeout": cfg.ConnectionTimeout, + }, + } +} + +func testAccStepConfigUrlNoGroupDN(t *testing.T, cfg *ldaputil.ConfigEntry) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "config", + Data: map[string]interface{}{ + "url": cfg.Url, + "userattr": cfg.UserAttr, + "userdn": cfg.UserDN, + "binddn": cfg.BindDN, + "bindpass": cfg.BindPassword, + "discoverdn": true, + "case_sensitive_names": true, + "request_timeout": cfg.RequestTimeout, + "connection_timeout": cfg.ConnectionTimeout, + }, + } +} + +func testAccStepConfigUrlWarningCheck(t *testing.T, cfg *ldaputil.ConfigEntry, operation logical.Operation, warnings []string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: operation, + Path: "config", + Data: map[string]interface{}{ + "url": cfg.Url, + "userattr": cfg.UserAttr, + "userdn": cfg.UserDN, + "userfilter": cfg.UserFilter, + "groupdn": cfg.GroupDN, + "groupattr": cfg.GroupAttr, + "binddn": cfg.BindDN, + "bindpass": cfg.BindPassword, + "case_sensitive_names": true, + "token_policies": "abc,xyz", + "request_timeout": cfg.RequestTimeout, + "connection_timeout": cfg.ConnectionTimeout, + }, + Check: func(response *logical.Response) error { + if len(response.Warnings) == 0 { + return fmt.Errorf("expected warnings, got none") + } + + if !strutil.StrListSubset(response.Warnings, warnings) { + return fmt.Errorf("expected response to contain the following warnings:\n%s\ngot:\n%s", warnings, response.Warnings) + } + return nil + }, + } +} + +func testAccStepGroup(t *testing.T, group string, policies string) logicaltest.TestStep { + t.Logf("[testAccStepGroup] - Registering group %s, policy %s", group, policies) + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "groups/" + group, + Data: map[string]interface{}{ + "policies": policies, + }, + } +} + +func testAccStepReadGroup(t *testing.T, group string, policies string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: "groups/" + group, + Check: func(resp *logical.Response) error { + if resp == nil { + if policies == "" { + return nil + } + return fmt.Errorf("bad: %#v", resp) + } + + var d struct { + Policies []string `mapstructure:"policies"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + + if !reflect.DeepEqual(d.Policies, policyutil.ParsePolicies(policies)) { + return fmt.Errorf("bad: %#v", resp) + } + + return nil + }, + } +} + +func testAccStepDeleteGroup(t *testing.T, group string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.DeleteOperation, + Path: "groups/" + group, + } +} + +func TestBackend_userCrud(t *testing.T) { + b := Backend() + + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepUser(t, "g1", "bar"), + testAccStepReadUser(t, "g1", "bar"), + testAccStepDeleteUser(t, "g1"), + testAccStepReadUser(t, "g1", ""), + }, + }) +} + +func testAccStepUser(t *testing.T, user string, groups string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "users/" + user, + Data: map[string]interface{}{ + "groups": groups, + }, + } +} + +func testAccStepReadUser(t *testing.T, user string, groups string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: "users/" + user, + Check: func(resp *logical.Response) error { + if resp == nil { + if groups == "" { + return nil + } + return fmt.Errorf("bad: %#v", resp) + } + + var d struct { + Groups string `mapstructure:"groups"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + + if d.Groups != groups { + return fmt.Errorf("bad: %#v", resp) + } + + return nil + }, + } +} + +func testAccStepDeleteUser(t *testing.T, user string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.DeleteOperation, + Path: "users/" + user, + } +} + +func testAccStepLogin(t *testing.T, user string, pass string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "login/" + user, + Data: map[string]interface{}{ + "password": pass, + }, + Unauthenticated: true, + + // Verifies user hermes conrad maps to groups via local group (engineers) as well as remote group (Scientists) + Check: logicaltest.TestCheckAuth([]string{"abc", "bar", "default", "foo", "xyz"}), + } +} + +func testAccStepLoginReturnsSameEntity(t *testing.T, user string, pass string, entity_id *string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "login/" + user, + Data: map[string]interface{}{ + "password": pass, + }, + Unauthenticated: true, + + // Verifies user hermes conrad maps to groups via local group (engineers) as well as remote group (Scientists) + Check: logicaltest.TestCheckAuthEntityId(entity_id), + } +} + +func testAccStepLoginNoAttachedPolicies(t *testing.T, user string, pass string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "login/" + user, + Data: map[string]interface{}{ + "password": pass, + }, + Unauthenticated: true, + + // Verifies user hermes conrad maps to groups via local group (engineers) as well as remote group (Scientists) + Check: logicaltest.TestCheckAuth([]string{"abc", "default", "xyz"}), + } +} + +func testAccStepLoginAliasMetadataName(t *testing.T, user string, pass string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "login/" + user, + Data: map[string]interface{}{ + "password": pass, + }, + Unauthenticated: true, + + Check: logicaltest.TestCheckAuthEntityAliasMetadataName("name", user), + } +} + +func testAccStepLoginFailure(t *testing.T, user string, pass string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "login/" + user, + Data: map[string]interface{}{ + "password": pass, + }, + Unauthenticated: true, + + ErrorOk: true, + } +} + +func testAccStepLoginNoGroupDN(t *testing.T, user string, pass string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "login/" + user, + Data: map[string]interface{}{ + "password": pass, + }, + Unauthenticated: true, + + // Verifies a search without defined GroupDN returns a warning rather than failing + Check: func(resp *logical.Response) error { + if len(resp.Warnings) != 1 { + return fmt.Errorf("expected a warning due to no group dn, got: %#v", resp.Warnings) + } + + return logicaltest.TestCheckAuth([]string{"bar", "default"})(resp) + }, + } +} + +func testAccStepGroupList(t *testing.T, groups []string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ListOperation, + Path: "groups", + Check: func(resp *logical.Response) error { + if resp.IsError() { + return fmt.Errorf("got error response: %#v", *resp) + } + + expected := make([]string, len(groups)) + copy(expected, groups) + sort.Strings(expected) + + sortedResponse := make([]string, len(resp.Data["keys"].([]string))) + copy(sortedResponse, resp.Data["keys"].([]string)) + sort.Strings(sortedResponse) + + if !reflect.DeepEqual(expected, sortedResponse) { + return fmt.Errorf("expected:\n%#v\ngot:\n%#v\n", expected, sortedResponse) + } + return nil + }, + } +} + +func testAccStepUserList(t *testing.T, users []string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ListOperation, + Path: "users", + Check: func(resp *logical.Response) error { + if resp.IsError() { + return fmt.Errorf("got error response: %#v", *resp) + } + + expected := make([]string, len(users)) + copy(expected, users) + sort.Strings(expected) + + sortedResponse := make([]string, len(resp.Data["keys"].([]string))) + copy(sortedResponse, resp.Data["keys"].([]string)) + sort.Strings(sortedResponse) + + if !reflect.DeepEqual(expected, sortedResponse) { + return fmt.Errorf("expected:\n%#v\ngot:\n%#v\n", expected, sortedResponse) + } + return nil + }, + } +} + +func TestLdapAuthBackend_ConfigUpgrade(t *testing.T) { + var resp *logical.Response + var err error + b, storage := createBackendWithStorage(t) + + ctx := context.Background() + + cleanup, cfg := ldap.PrepareTestContainer(t, "latest") + defer cleanup() + configReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config", + Data: map[string]interface{}{ + "url": cfg.Url, + "userattr": cfg.UserAttr, + "userdn": cfg.UserDN, + "userfilter": cfg.UserFilter, + "groupdn": cfg.GroupDN, + "groupattr": cfg.GroupAttr, + "binddn": cfg.BindDN, + "bindpass": cfg.BindPassword, + "token_period": "5m", + "token_explicit_max_ttl": "24h", + "request_timeout": cfg.RequestTimeout, + "max_page_size": cfg.MaximumPageSize, + "connection_timeout": cfg.ConnectionTimeout, + }, + Storage: storage, + Connection: &logical.Connection{}, + } + resp, err = b.HandleRequest(ctx, configReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + fd, err := b.getConfigFieldData() + if err != nil { + t.Fatal(err) + } + defParams, err := ldaputil.NewConfigEntry(nil, fd) + if err != nil { + t.Fatal(err) + } + falseBool := new(bool) + *falseBool = false + + exp := &ldapConfigEntry{ + TokenParams: tokenutil.TokenParams{ + TokenPeriod: 5 * time.Minute, + TokenExplicitMaxTTL: 24 * time.Hour, + }, + ConfigEntry: &ldaputil.ConfigEntry{ + Url: cfg.Url, + UserAttr: cfg.UserAttr, + UserFilter: cfg.UserFilter, + UserDN: cfg.UserDN, + GroupDN: cfg.GroupDN, + GroupAttr: cfg.GroupAttr, + BindDN: cfg.BindDN, + BindPassword: cfg.BindPassword, + GroupFilter: defParams.GroupFilter, + DenyNullBind: defParams.DenyNullBind, + TLSMinVersion: defParams.TLSMinVersion, + TLSMaxVersion: defParams.TLSMaxVersion, + CaseSensitiveNames: falseBool, + UsePre111GroupCNBehavior: new(bool), + RequestTimeout: cfg.RequestTimeout, + ConnectionTimeout: cfg.ConnectionTimeout, + UsernameAsAlias: false, + DerefAliases: "never", + MaximumPageSize: 1000, + }, + } + + configEntry, err := b.Config(ctx, configReq) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(exp, configEntry); diff != nil { + t.Fatal(diff) + } + + // Store just the config entry portion, for upgrade testing + entry, err := logical.StorageEntryJSON("config", configEntry.ConfigEntry) + if err != nil { + t.Fatal(err) + } + err = configReq.Storage.Put(ctx, entry) + if err != nil { + t.Fatal(err) + } + + configEntry, err = b.Config(ctx, configReq) + if err != nil { + t.Fatal(err) + } + // We won't have token params anymore so nil those out + exp.TokenParams = tokenutil.TokenParams{} + if diff := deep.Equal(exp, configEntry); diff != nil { + t.Fatal(diff) + } +} diff --git a/builtin/credential/ldap/cli.go b/builtin/credential/ldap/cli.go new file mode 100644 index 0000000..7726034 --- /dev/null +++ b/builtin/credential/ldap/cli.go @@ -0,0 +1,102 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ldap + +import ( + "fmt" + "os" + "strings" + + pwd "github.com/hashicorp/go-secure-stdlib/password" + "github.com/hashicorp/vault/api" +) + +type CLIHandler struct{} + +func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, error) { + mount, ok := m["mount"] + if !ok { + mount = "ldap" + } + + username, ok := m["username"] + if !ok { + username = usernameFromEnv() + if username == "" { + return nil, fmt.Errorf("'username' not supplied and neither 'LOGNAME' nor 'USER' env vars set") + } + } + password, ok := m["password"] + if !ok { + password = passwordFromEnv() + if password == "" { + fmt.Fprintf(os.Stderr, "Password (will be hidden): ") + var err error + password, err = pwd.Read(os.Stdin) + fmt.Fprintf(os.Stderr, "\n") + if err != nil { + return nil, err + } + } + } + + data := map[string]interface{}{ + "password": password, + } + + path := fmt.Sprintf("auth/%s/login/%s", mount, username) + secret, err := c.Logical().Write(path, data) + if err != nil { + return nil, err + } + if secret == nil { + return nil, fmt.Errorf("empty response from credential provider") + } + + return secret, nil +} + +func (h *CLIHandler) Help() string { + help := ` +Usage: vault login -method=ldap [CONFIG K=V...] + + The LDAP auth method allows users to authenticate using LDAP or + Active Directory. + + Authenticate as "sally": + + $ vault login -method=ldap username=sally + Password (will be hidden): + + Authenticate as "bob": + + $ vault login -method=ldap username=bob password=password + +Configuration: + + password= + LDAP password to use for authentication. If not provided, it will use + the VAULT_LDAP_PASSWORD environment variable. If this is not set, the + CLI will prompt for this on stdin. + + username= + LDAP username to use for authentication. +` + + return strings.TrimSpace(help) +} + +func usernameFromEnv() string { + if logname := os.Getenv("LOGNAME"); logname != "" { + return logname + } + if user := os.Getenv("USER"); user != "" { + return user + } + return "" +} + +func passwordFromEnv() string { + return os.Getenv("VAULT_LDAP_PASSWORD") +} diff --git a/builtin/credential/ldap/cmd/ldap/main.go b/builtin/credential/ldap/cmd/ldap/main.go new file mode 100644 index 0000000..2dcb802 --- /dev/null +++ b/builtin/credential/ldap/cmd/ldap/main.go @@ -0,0 +1,34 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "os" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/credential/ldap" + "github.com/hashicorp/vault/sdk/plugin" +) + +func main() { + apiClientMeta := &api.PluginAPIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(os.Args[1:]) + + tlsConfig := apiClientMeta.GetTLSConfig() + tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) + + if err := plugin.ServeMultiplex(&plugin.ServeOpts{ + BackendFactoryFunc: ldap.Factory, + // set the TLSProviderFunc so that the plugin maintains backwards + // compatibility with Vault versions that don’t support plugin AutoMTLS + TLSProviderFunc: tlsProviderFunc, + }); err != nil { + logger := hclog.New(&hclog.LoggerOptions{}) + + logger.Error("plugin shutting down", "error", err) + os.Exit(1) + } +} diff --git a/builtin/credential/ldap/path_config.go b/builtin/credential/ldap/path_config.go new file mode 100644 index 0000000..a06c666 --- /dev/null +++ b/builtin/credential/ldap/path_config.go @@ -0,0 +1,271 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ldap + +import ( + "context" + "strings" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/ldaputil" + "github.com/hashicorp/vault/sdk/helper/tokenutil" + "github.com/hashicorp/vault/sdk/logical" +) + +const userFilterWarning = "userfilter configured does not consider userattr and may result in colliding entity aliases on logins" + +func pathConfig(b *backend) *framework.Path { + p := &framework.Path{ + Pattern: `config`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixLDAP, + Action: "Configure", + }, + + Fields: ldaputil.ConfigFields(), + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathConfigRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "auth-configuration", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure-auth", + }, + }, + }, + + HelpSynopsis: pathConfigHelpSyn, + HelpDescription: pathConfigHelpDesc, + } + + tokenutil.AddTokenFields(p.Fields) + p.Fields["token_policies"].Description += ". This will apply to all tokens generated by this auth method, in addition to any configured for specific users/groups." + return p +} + +/* + * Construct ConfigEntry struct using stored configuration. + */ +func (b *backend) Config(ctx context.Context, req *logical.Request) (*ldapConfigEntry, error) { + storedConfig, err := req.Storage.Get(ctx, "config") + if err != nil { + return nil, err + } + + if storedConfig == nil { + // Create a new ConfigEntry, filling in defaults where appropriate + fd, err := b.getConfigFieldData() + if err != nil { + return nil, err + } + + result, err := ldaputil.NewConfigEntry(nil, fd) + if err != nil { + return nil, err + } + + // No user overrides, return default configuration + result.CaseSensitiveNames = new(bool) + *result.CaseSensitiveNames = false + + result.UsePre111GroupCNBehavior = new(bool) + *result.UsePre111GroupCNBehavior = false + + return &ldapConfigEntry{ConfigEntry: result}, nil + } + + // Deserialize stored configuration. + // Fields not specified in storedConfig will retain their defaults. + result := new(ldapConfigEntry) + result.ConfigEntry = new(ldaputil.ConfigEntry) + if err := storedConfig.DecodeJSON(result); err != nil { + return nil, err + } + + var persistNeeded bool + if result.CaseSensitiveNames == nil { + // Upgrade from before switching to case-insensitive + result.CaseSensitiveNames = new(bool) + *result.CaseSensitiveNames = true + persistNeeded = true + } + + if result.UsePre111GroupCNBehavior == nil { + result.UsePre111GroupCNBehavior = new(bool) + *result.UsePre111GroupCNBehavior = true + persistNeeded = true + } + + if persistNeeded && (b.System().LocalMount() || !b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary|consts.ReplicationPerformanceStandby)) { + entry, err := logical.StorageEntryJSON("config", result) + if err != nil { + return nil, err + } + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + } + + return result, nil +} + +func (b *backend) pathConfigRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + cfg, err := b.Config(ctx, req) + if err != nil { + return nil, err + } + if cfg == nil { + return nil, nil + } + + data := cfg.PasswordlessMap() + cfg.PopulateTokenData(data) + + resp := &logical.Response{ + Data: data, + } + + if warnings := b.checkConfigUserFilter(cfg); len(warnings) > 0 { + resp.Warnings = warnings + } + + return resp, nil +} + +// checkConfigUserFilter performs a best-effort check the config's userfilter. +// It will checked whether the templated or literal userattr value is present, +// and if not return a warning. +func (b *backend) checkConfigUserFilter(cfg *ldapConfigEntry) []string { + if cfg == nil || cfg.UserFilter == "" { + return nil + } + + var warnings []string + + switch { + case strings.Contains(cfg.UserFilter, "{{.UserAttr}}"): + // Case where the templated userattr value is provided + case strings.Contains(cfg.UserFilter, cfg.UserAttr): + // Case where the literal userattr value is provided + default: + b.Logger().Debug(userFilterWarning, "userfilter", cfg.UserFilter, "userattr", cfg.UserAttr) + warnings = append(warnings, userFilterWarning) + } + + return warnings +} + +func (b *backend) pathConfigWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + cfg, err := b.Config(ctx, req) + if err != nil { + return nil, err + } + if cfg == nil { + return nil, nil + } + + // Build a ConfigEntry struct out of the supplied FieldData + cfg.ConfigEntry, err = ldaputil.NewConfigEntry(cfg.ConfigEntry, d) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + // On write, if not specified, use false. We do this here so upgrade logic + // works since it calls the same newConfigEntry function + if cfg.CaseSensitiveNames == nil { + cfg.CaseSensitiveNames = new(bool) + *cfg.CaseSensitiveNames = false + } + + if cfg.UsePre111GroupCNBehavior == nil { + cfg.UsePre111GroupCNBehavior = new(bool) + *cfg.UsePre111GroupCNBehavior = false + } + + if err := cfg.ParseTokenFields(req, d); err != nil { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } + + entry, err := logical.StorageEntryJSON("config", cfg) + if err != nil { + return nil, err + } + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + + if warnings := b.checkConfigUserFilter(cfg); len(warnings) > 0 { + return &logical.Response{ + Warnings: warnings, + }, nil + } + + return nil, nil +} + +/* + * Returns FieldData describing our ConfigEntry struct schema + */ +func (b *backend) getConfigFieldData() (*framework.FieldData, error) { + configPath := b.Route("config") + + if configPath == nil { + return nil, logical.ErrUnsupportedPath + } + + raw := make(map[string]interface{}, len(configPath.Fields)) + + fd := framework.FieldData{ + Raw: raw, + Schema: configPath.Fields, + } + + return &fd, nil +} + +type ldapConfigEntry struct { + tokenutil.TokenParams + *ldaputil.ConfigEntry +} + +const pathConfigHelpSyn = ` +Configure the LDAP server to connect to, along with its options. +` + +const pathConfigHelpDesc = ` +This endpoint allows you to configure the LDAP server to connect to and its +configuration options. + +The LDAP URL can use either the "ldap://" or "ldaps://" schema. In the former +case, an unencrypted connection will be made with a default port of 389, unless +the "starttls" parameter is set to true, in which case TLS will be used. In the +latter case, a SSL connection will be established with a default port of 636. + +## A NOTE ON ESCAPING + +It is up to the administrator to provide properly escaped DNs. This includes +the user DN, bind DN for search, and so on. + +The only DN escaping performed by this backend is on usernames given at login +time when they are inserted into the final bind DN, and uses escaping rules +defined in RFC 4514. + +Additionally, Active Directory has escaping rules that differ slightly from the +RFC; in particular it requires escaping of '#' regardless of position in the DN +(the RFC only requires it to be escaped when it is the first character), and +'=', which the RFC indicates can be escaped with a backslash, but does not +contain in its set of required escapes. If you are using Active Directory and +these appear in your usernames, please ensure that they are escaped, in +addition to being properly escaped in your configured DNs. + +For reference, see https://www.ietf.org/rfc/rfc4514.txt and +http://social.technet.microsoft.com/wiki/contents/articles/5312.active-directory-characters-to-escape.aspx +` diff --git a/builtin/credential/ldap/path_groups.go b/builtin/credential/ldap/path_groups.go new file mode 100644 index 0000000..08ac00d --- /dev/null +++ b/builtin/credential/ldap/path_groups.go @@ -0,0 +1,182 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ldap + +import ( + "context" + "strings" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/policyutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathGroupsList(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "groups/?$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixLDAP, + OperationSuffix: "groups", + Navigation: true, + ItemType: "Group", + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: b.pathGroupList, + }, + + HelpSynopsis: pathGroupHelpSyn, + HelpDescription: pathGroupHelpDesc, + } +} + +func pathGroups(b *backend) *framework.Path { + return &framework.Path{ + Pattern: `groups/(?P.+)`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixLDAP, + OperationSuffix: "group", + Action: "Create", + ItemType: "Group", + }, + + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the LDAP group.", + }, + + "policies": { + Type: framework.TypeCommaStringSlice, + Description: "Comma-separated list of policies associated to the group.", + DisplayAttrs: &framework.DisplayAttributes{ + Description: "A list of policies associated to the group.", + }, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.DeleteOperation: b.pathGroupDelete, + logical.ReadOperation: b.pathGroupRead, + logical.UpdateOperation: b.pathGroupWrite, + }, + + HelpSynopsis: pathGroupHelpSyn, + HelpDescription: pathGroupHelpDesc, + } +} + +func (b *backend) Group(ctx context.Context, s logical.Storage, n string) (*GroupEntry, error) { + entry, err := s.Get(ctx, "group/"+n) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result GroupEntry + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + + return &result, nil +} + +func (b *backend) pathGroupDelete(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + err := req.Storage.Delete(ctx, "group/"+d.Get("name").(string)) + if err != nil { + return nil, err + } + + return nil, nil +} + +func (b *backend) pathGroupRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + groupname := d.Get("name").(string) + + cfg, err := b.Config(ctx, req) + if err != nil { + return nil, err + } + if cfg == nil { + return logical.ErrorResponse("ldap backend not configured"), nil + } + if !*cfg.CaseSensitiveNames { + groupname = strings.ToLower(groupname) + } + + group, err := b.Group(ctx, req.Storage, groupname) + if err != nil { + return nil, err + } + if group == nil { + return nil, nil + } + + return &logical.Response{ + Data: map[string]interface{}{ + "policies": group.Policies, + }, + }, nil +} + +func (b *backend) pathGroupWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + groupname := d.Get("name").(string) + + cfg, err := b.Config(ctx, req) + if err != nil { + return nil, err + } + if cfg == nil { + return logical.ErrorResponse("ldap backend not configured"), nil + } + if !*cfg.CaseSensitiveNames { + groupname = strings.ToLower(groupname) + } + + // Store it + entry, err := logical.StorageEntryJSON("group/"+groupname, &GroupEntry{ + Policies: policyutil.ParsePolicies(d.Get("policies")), + }) + if err != nil { + return nil, err + } + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + + return nil, nil +} + +func (b *backend) pathGroupList(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + keys, err := logical.CollectKeysWithPrefix(ctx, req.Storage, "group/") + if err != nil { + return nil, err + } + for i := range keys { + keys[i] = strings.TrimPrefix(keys[i], "group/") + } + return logical.ListResponse(keys), nil +} + +type GroupEntry struct { + Policies []string +} + +const pathGroupHelpSyn = ` +Manage additional groups for users allowed to authenticate. +` + +const pathGroupHelpDesc = ` +This endpoint allows you to create, read, update, and delete configuration +for LDAP groups that are allowed to authenticate, and associate policies to +them. + +Deleting a group will not revoke auth for prior authenticated users in that +group. To do this, do a revoke on "login/" for +the usernames you want revoked. +` diff --git a/builtin/credential/ldap/path_login.go b/builtin/credential/ldap/path_login.go new file mode 100644 index 0000000..782b32e --- /dev/null +++ b/builtin/credential/ldap/path_login.go @@ -0,0 +1,176 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ldap + +import ( + "context" + "fmt" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/cidrutil" + "github.com/hashicorp/vault/sdk/helper/policyutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathLogin(b *backend) *framework.Path { + return &framework.Path{ + Pattern: `login/(?P.+)`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixLDAP, + OperationVerb: "login", + }, + + Fields: map[string]*framework.FieldSchema{ + "username": { + Type: framework.TypeString, + Description: "DN (distinguished name) to be used for login.", + }, + + "password": { + Type: framework.TypeString, + Description: "Password for this user.", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathLogin, + logical.AliasLookaheadOperation: b.pathLoginAliasLookahead, + }, + + HelpSynopsis: pathLoginSyn, + HelpDescription: pathLoginDesc, + } +} + +func (b *backend) pathLoginAliasLookahead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + username := d.Get("username").(string) + if username == "" { + return nil, fmt.Errorf("missing username") + } + + return &logical.Response{ + Auth: &logical.Auth{ + Alias: &logical.Alias{ + Name: username, + }, + }, + }, nil +} + +func (b *backend) pathLogin(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + cfg, err := b.Config(ctx, req) + if err != nil { + return nil, err + } + if cfg == nil { + return logical.ErrorResponse("auth method not configured"), nil + } + + // Check for a CIDR match. + if len(cfg.TokenBoundCIDRs) > 0 { + if req.Connection == nil { + b.Logger().Warn("token bound CIDRs found but no connection information available for validation") + return nil, logical.ErrPermissionDenied + } + if !cidrutil.RemoteAddrIsOk(req.Connection.RemoteAddr, cfg.TokenBoundCIDRs) { + return nil, logical.ErrPermissionDenied + } + } + + username := d.Get("username").(string) + password := d.Get("password").(string) + + effectiveUsername, policies, resp, groupNames, err := b.Login(ctx, req, username, password, cfg.UsernameAsAlias) + if err != nil || (resp != nil && resp.IsError()) { + return resp, err + } + + auth := &logical.Auth{ + Metadata: map[string]string{ + "username": username, + }, + InternalData: map[string]interface{}{ + "password": password, + }, + DisplayName: username, + Alias: &logical.Alias{ + Name: effectiveUsername, + Metadata: map[string]string{ + "name": username, + }, + }, + } + + cfg.PopulateTokenAuth(auth) + + // Add in configured policies from mappings + if len(policies) > 0 { + auth.Policies = append(auth.Policies, policies...) + } + + resp.Auth = auth + + for _, groupName := range groupNames { + if groupName == "" { + continue + } + resp.Auth.GroupAliases = append(resp.Auth.GroupAliases, &logical.Alias{ + Name: groupName, + }) + } + return resp, nil +} + +func (b *backend) pathLoginRenew(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + cfg, err := b.Config(ctx, req) + if err != nil { + return nil, err + } + if cfg == nil { + return logical.ErrorResponse("auth method not configured"), nil + } + + username := req.Auth.Metadata["username"] + password := req.Auth.InternalData["password"].(string) + + _, loginPolicies, resp, groupNames, err := b.Login(ctx, req, username, password, cfg.UsernameAsAlias) + if err != nil || (resp != nil && resp.IsError()) { + return resp, err + } + + finalPolicies := cfg.TokenPolicies + if len(loginPolicies) > 0 { + finalPolicies = append(finalPolicies, loginPolicies...) + } + + if !policyutil.EquivalentPolicies(finalPolicies, req.Auth.TokenPolicies) { + return nil, fmt.Errorf("policies have changed, not renewing") + } + + resp.Auth = req.Auth + resp.Auth.Period = cfg.TokenPeriod + resp.Auth.TTL = cfg.TokenTTL + resp.Auth.MaxTTL = cfg.TokenMaxTTL + + // Remove old aliases + resp.Auth.GroupAliases = nil + + for _, groupName := range groupNames { + resp.Auth.GroupAliases = append(resp.Auth.GroupAliases, &logical.Alias{ + Name: groupName, + }) + } + + return resp, nil +} + +const pathLoginSyn = ` +Log in with a username and password. +` + +const pathLoginDesc = ` +This endpoint authenticates using a username and password. Please be sure to +read the note on escaping from the path-help for the 'config' endpoint. +` diff --git a/builtin/credential/ldap/path_users.go b/builtin/credential/ldap/path_users.go new file mode 100644 index 0000000..1ce252d --- /dev/null +++ b/builtin/credential/ldap/path_users.go @@ -0,0 +1,201 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ldap + +import ( + "context" + "strings" + + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/policyutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathUsersList(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "users/?$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixLDAP, + OperationSuffix: "users", + Navigation: true, + ItemType: "User", + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: b.pathUserList, + }, + + HelpSynopsis: pathUserHelpSyn, + HelpDescription: pathUserHelpDesc, + } +} + +func pathUsers(b *backend) *framework.Path { + return &framework.Path{ + Pattern: `users/(?P.+)`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixLDAP, + OperationSuffix: "user", + Action: "Create", + ItemType: "User", + }, + + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the LDAP user.", + }, + + "groups": { + Type: framework.TypeCommaStringSlice, + Description: "Comma-separated list of additional groups associated with the user.", + DisplayAttrs: &framework.DisplayAttributes{ + Description: "A list of additional groups associated with the user.", + }, + }, + + "policies": { + Type: framework.TypeCommaStringSlice, + Description: "Comma-separated list of policies associated with the user.", + DisplayAttrs: &framework.DisplayAttributes{ + Description: "A list of policies associated with the user.", + }, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.DeleteOperation: b.pathUserDelete, + logical.ReadOperation: b.pathUserRead, + logical.UpdateOperation: b.pathUserWrite, + }, + + HelpSynopsis: pathUserHelpSyn, + HelpDescription: pathUserHelpDesc, + } +} + +func (b *backend) User(ctx context.Context, s logical.Storage, n string) (*UserEntry, error) { + entry, err := s.Get(ctx, "user/"+n) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result UserEntry + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + + return &result, nil +} + +func (b *backend) pathUserDelete(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + err := req.Storage.Delete(ctx, "user/"+d.Get("name").(string)) + if err != nil { + return nil, err + } + + return nil, nil +} + +func (b *backend) pathUserRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + username := d.Get("name").(string) + + cfg, err := b.Config(ctx, req) + if err != nil { + return nil, err + } + if cfg == nil { + return logical.ErrorResponse("ldap backend not configured"), nil + } + if !*cfg.CaseSensitiveNames { + username = strings.ToLower(username) + } + + user, err := b.User(ctx, req.Storage, username) + if err != nil { + return nil, err + } + if user == nil { + return nil, nil + } + + return &logical.Response{ + Data: map[string]interface{}{ + "groups": strings.Join(user.Groups, ","), + "policies": user.Policies, + }, + }, nil +} + +func (b *backend) pathUserWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + lowercaseGroups := false + username := d.Get("name").(string) + + cfg, err := b.Config(ctx, req) + if err != nil { + return nil, err + } + if cfg == nil { + return logical.ErrorResponse("ldap backend not configured"), nil + } + if !*cfg.CaseSensitiveNames { + username = strings.ToLower(username) + lowercaseGroups = true + } + + groups := strutil.RemoveDuplicates(d.Get("groups").([]string), lowercaseGroups) + policies := policyutil.ParsePolicies(d.Get("policies")) + for i, g := range groups { + groups[i] = strings.TrimSpace(g) + } + + // Store it + entry, err := logical.StorageEntryJSON("user/"+username, &UserEntry{ + Groups: groups, + Policies: policies, + }) + if err != nil { + return nil, err + } + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + + return nil, nil +} + +func (b *backend) pathUserList(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + keys, err := logical.CollectKeysWithPrefix(ctx, req.Storage, "user/") + if err != nil { + return nil, err + } + for i := range keys { + keys[i] = strings.TrimPrefix(keys[i], "user/") + } + return logical.ListResponse(keys), nil +} + +type UserEntry struct { + Groups []string + Policies []string +} + +const pathUserHelpSyn = ` +Manage users allowed to authenticate. +` + +const pathUserHelpDesc = ` +This endpoint allows you to create, read, update, and delete configuration +for LDAP users that are allowed to authenticate, in particular associating +additional groups to them. + +Deleting a user will not revoke their auth. To do this, do a revoke on "login/" for +the usernames you want revoked. +` diff --git a/builtin/credential/okta/backend.go b/builtin/credential/okta/backend.go new file mode 100644 index 0000000..04dba96 --- /dev/null +++ b/builtin/credential/okta/backend.go @@ -0,0 +1,406 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package okta + +import ( + "context" + "fmt" + "net/textproto" + "time" + + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/cidrutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/okta/okta-sdk-golang/v2/okta" + "github.com/patrickmn/go-cache" +) + +const ( + operationPrefixOkta = "okta" + mfaPushMethod = "push" + mfaTOTPMethod = "token:software:totp" +) + +func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + b := Backend() + if err := b.Setup(ctx, conf); err != nil { + return nil, err + } + return b, nil +} + +func Backend() *backend { + var b backend + b.Backend = &framework.Backend{ + Help: backendHelp, + + PathsSpecial: &logical.Paths{ + Unauthenticated: []string{ + "login/*", + "verify/*", + }, + SealWrapStorage: []string{ + "config", + }, + }, + + Paths: []*framework.Path{ + pathConfig(&b), + pathUsers(&b), + pathGroups(&b), + pathUsersList(&b), + pathGroupsList(&b), + pathLogin(&b), + pathVerify(&b), + }, + + AuthRenew: b.pathLoginRenew, + BackendType: logical.TypeCredential, + } + b.verifyCache = cache.New(5*time.Minute, time.Minute) + + return &b +} + +type backend struct { + *framework.Backend + verifyCache *cache.Cache +} + +func (b *backend) Login(ctx context.Context, req *logical.Request, username, password, totp, nonce, preferredProvider string) ([]string, *logical.Response, []string, error) { + cfg, err := b.Config(ctx, req.Storage) + if err != nil { + return nil, nil, nil, err + } + if cfg == nil { + return nil, logical.ErrorResponse("Okta auth method not configured"), nil, nil + } + + // Check for a CIDR match. + if len(cfg.TokenBoundCIDRs) > 0 { + if req.Connection == nil { + b.Logger().Warn("token bound CIDRs found but no connection information available for validation") + return nil, nil, nil, logical.ErrPermissionDenied + } + if !cidrutil.RemoteAddrIsOk(req.Connection.RemoteAddr, cfg.TokenBoundCIDRs) { + return nil, nil, nil, logical.ErrPermissionDenied + } + } + + shim, err := cfg.OktaClient(ctx) + if err != nil { + return nil, nil, nil, err + } + + type mfaFactor struct { + Id string `json:"id"` + Type string `json:"factorType"` + Provider string `json:"provider"` + Embedded struct { + Challenge struct { + CorrectAnswer *int `json:"correctAnswer"` + } `json:"challenge"` + } `json:"_embedded"` + } + + type embeddedResult struct { + User okta.User `json:"user"` + Factors []mfaFactor `json:"factors"` + Factor *mfaFactor `json:"factor"` + } + + type authResult struct { + Embedded embeddedResult `json:"_embedded"` + Status string `json:"status"` + FactorResult string `json:"factorResult"` + StateToken string `json:"stateToken"` + } + + authReq, err := shim.NewRequest("POST", "authn", map[string]interface{}{ + "username": username, + "password": password, + }) + if err != nil { + return nil, nil, nil, err + } + + var result authResult + rsp, err := shim.Do(authReq, &result) + if err != nil { + if oe, ok := err.(*okta.Error); ok { + return nil, logical.ErrorResponse("Okta auth failed: %v (code=%v)", err, oe.ErrorCode), nil, nil + } + return nil, logical.ErrorResponse(fmt.Sprintf("Okta auth failed: %v", err)), nil, nil + } + if rsp == nil { + return nil, logical.ErrorResponse("okta auth method unexpected failure"), nil, nil + } + + oktaResponse := &logical.Response{ + Data: map[string]interface{}{}, + } + + // More about Okta's Auth transaction state here: + // https://developer.okta.com/docs/api/resources/authn#transaction-state + + // If lockout failures are not configured to be hidden, the status needs to + // be inspected for LOCKED_OUT status. Otherwise, it is handled above by an + // error returned during the authentication request. + switch result.Status { + case "LOCKED_OUT": + if b.Logger().IsDebug() { + b.Logger().Debug("user is locked out", "user", username) + } + return nil, logical.ErrorResponse("okta authentication failed"), nil, nil + + case "PASSWORD_EXPIRED": + if b.Logger().IsDebug() { + b.Logger().Debug("password is expired", "user", username) + } + return nil, logical.ErrorResponse("okta authentication failed"), nil, nil + + case "PASSWORD_WARN": + oktaResponse.AddWarning("Your Okta password is in warning state and needs to be changed soon.") + + case "MFA_ENROLL", "MFA_ENROLL_ACTIVATE": + if !cfg.BypassOktaMFA { + if b.Logger().IsDebug() { + b.Logger().Debug("user must enroll or complete mfa enrollment", "user", username) + } + return nil, logical.ErrorResponse("okta authentication failed: you must complete MFA enrollment to continue"), nil, nil + } + + case "MFA_REQUIRED": + // Per Okta documentation: Users are challenged for MFA (MFA_REQUIRED) + // before the Status of PASSWORD_EXPIRED is exposed (if they have an + // active factor enrollment). This bypass removes visibility + // into the authenticating user's password expiry, but still ensures the + // credentials are valid and the user is not locked out. + // + // API reference: https://developer.okta.com/docs/reference/api/authn/#verify-factor + if cfg.BypassOktaMFA { + result.Status = "SUCCESS" + break + } + + var selectedFactor, totpFactor, pushFactor *mfaFactor + + // Scan for available factors + for _, v := range result.Embedded.Factors { + v := v // create a new copy since we'll be taking the address later + + if preferredProvider != "" && preferredProvider != v.Provider { + continue + } + + if !strutil.StrListContains(b.getSupportedProviders(), v.Provider) { + continue + } + + switch v.Type { + case mfaTOTPMethod: + totpFactor = &v + case mfaPushMethod: + pushFactor = &v + } + } + + // Okta push and totp, and Google totp are currently supported. + // If a totp passcode is provided during login and is supported, + // that will be the preferred method. + switch { + case totpFactor != nil && totp != "": + selectedFactor = totpFactor + case pushFactor != nil && pushFactor.Provider == oktaProvider: + selectedFactor = pushFactor + case totpFactor != nil && totp == "": + return nil, logical.ErrorResponse("'totp' passcode parameter is required to perform MFA"), nil, nil + default: + return nil, logical.ErrorResponse("Okta Verify Push or TOTP or Google TOTP factor is required in order to perform MFA"), nil, nil + } + + requestPath := fmt.Sprintf("authn/factors/%s/verify", selectedFactor.Id) + + payload := map[string]interface{}{ + "stateToken": result.StateToken, + } + if selectedFactor.Type == mfaTOTPMethod { + payload["passCode"] = totp + } + + verifyReq, err := shim.NewRequest("POST", requestPath, payload) + if err != nil { + return nil, nil, nil, err + } + if len(req.Headers["X-Forwarded-For"]) > 0 { + verifyReq.Header.Set("X-Forwarded-For", req.Headers[textproto.CanonicalMIMEHeaderKey("X-Forwarded-For")][0]) + } + + rsp, err := shim.Do(verifyReq, &result) + if err != nil { + return nil, logical.ErrorResponse(fmt.Sprintf("Okta auth failed: %v", err)), nil, nil + } + if rsp == nil { + return nil, logical.ErrorResponse("okta auth backend unexpected failure"), nil, nil + } + for result.Status == "MFA_CHALLENGE" { + switch result.FactorResult { + case "WAITING": + verifyReq, err := shim.NewRequest("POST", requestPath, payload) + if err != nil { + return nil, logical.ErrorResponse(fmt.Sprintf("okta auth failed creating verify request: %v", err)), nil, nil + } + rsp, err := shim.Do(verifyReq, &result) + + // Store number challenge if found + numberChallenge := result.Embedded.Factor.Embedded.Challenge.CorrectAnswer + if numberChallenge != nil { + if nonce == "" { + return nil, logical.ErrorResponse("nonce must be provided during login request when presented with number challenge"), nil, nil + } + + b.verifyCache.SetDefault(nonce, *numberChallenge) + } + + if err != nil { + return nil, logical.ErrorResponse(fmt.Sprintf("Okta auth failed checking loop: %v", err)), nil, nil + } + if rsp == nil { + return nil, logical.ErrorResponse("okta auth backend unexpected failure"), nil, nil + } + + timer := time.NewTimer(1 * time.Second) + select { + case <-timer.C: + // Continue + case <-ctx.Done(): + timer.Stop() + return nil, logical.ErrorResponse("exiting pending mfa challenge"), nil, nil + } + case "REJECTED": + return nil, logical.ErrorResponse("multi-factor authentication denied"), nil, nil + case "TIMEOUT": + return nil, logical.ErrorResponse("failed to complete multi-factor authentication"), nil, nil + case "SUCCESS": + // Allowed + default: + if b.Logger().IsDebug() { + b.Logger().Debug("unhandled result status", "status", result.Status, "factorstatus", result.FactorResult) + } + return nil, logical.ErrorResponse("okta authentication failed"), nil, nil + } + } + + case "SUCCESS": + // Do nothing here + + default: + if b.Logger().IsDebug() { + b.Logger().Debug("unhandled result status", "status", result.Status) + } + return nil, logical.ErrorResponse("okta authentication failed"), nil, nil + } + + // Verify result status again in case a switch case above modifies result + switch { + case result.Status == "SUCCESS", + result.Status == "PASSWORD_WARN", + result.Status == "MFA_REQUIRED" && cfg.BypassOktaMFA, + result.Status == "MFA_ENROLL" && cfg.BypassOktaMFA, + result.Status == "MFA_ENROLL_ACTIVATE" && cfg.BypassOktaMFA: + // Allowed + default: + if b.Logger().IsDebug() { + b.Logger().Debug("authentication returned a non-success status", "status", result.Status) + } + return nil, logical.ErrorResponse("okta authentication failed"), nil, nil + } + + var allGroups []string + // Only query the Okta API for group membership if we have a token + client, oktactx := shim.Client() + if client != nil { + oktaGroups, err := b.getOktaGroups(oktactx, client, &result.Embedded.User) + if err != nil { + return nil, logical.ErrorResponse(fmt.Sprintf("okta failure retrieving groups: %v", err)), nil, nil + } + if len(oktaGroups) == 0 { + errString := fmt.Sprintf( + "no Okta groups found; only policies from locally-defined groups available") + oktaResponse.AddWarning(errString) + } + allGroups = append(allGroups, oktaGroups...) + } + + // Import the custom added groups from okta backend + user, err := b.User(ctx, req.Storage, username) + if err != nil { + if b.Logger().IsDebug() { + b.Logger().Debug("error looking up user", "error", err) + } + } + if err == nil && user != nil && user.Groups != nil { + if b.Logger().IsDebug() { + b.Logger().Debug("adding local groups", "num_local_groups", len(user.Groups), "local_groups", user.Groups) + } + allGroups = append(allGroups, user.Groups...) + } + + // Retrieve policies + var policies []string + for _, groupName := range allGroups { + entry, _, err := b.Group(ctx, req.Storage, groupName) + if err != nil { + if b.Logger().IsDebug() { + b.Logger().Debug("error looking up group policies", "error", err) + } + } + if err == nil && entry != nil && entry.Policies != nil { + policies = append(policies, entry.Policies...) + } + } + + // Merge local Policies into Okta Policies + if user != nil && user.Policies != nil { + policies = append(policies, user.Policies...) + } + + return policies, oktaResponse, allGroups, nil +} + +func (b *backend) getOktaGroups(ctx context.Context, client *okta.Client, user *okta.User) ([]string, error) { + groups, resp, err := client.User.ListUserGroups(ctx, user.Id) + if err != nil { + return nil, err + } + oktaGroups := make([]string, 0, len(groups)) + for _, group := range groups { + oktaGroups = append(oktaGroups, group.Profile.Name) + } + for resp.HasNextPage() { + var nextGroups []*okta.Group + resp, err = resp.Next(ctx, &nextGroups) + if err != nil { + return nil, err + } + for _, group := range nextGroups { + oktaGroups = append(oktaGroups, group.Profile.Name) + } + } + if b.Logger().IsDebug() { + b.Logger().Debug("Groups fetched from Okta", "num_groups", len(oktaGroups), "groups", fmt.Sprintf("%#v", oktaGroups)) + } + return oktaGroups, nil +} + +const backendHelp = ` +The Okta credential provider allows authentication querying, +checking username and password, and associating policies. If an api token is +configured groups are pulled down from Okta. + +Configuration of the connection is done through the "config" and "policies" +endpoints by a user with root access. Authentication is then done +by supplying the two fields for "login". +` diff --git a/builtin/credential/okta/backend_test.go b/builtin/credential/okta/backend_test.go new file mode 100644 index 0000000..85642e8 --- /dev/null +++ b/builtin/credential/okta/backend_test.go @@ -0,0 +1,300 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package okta + +import ( + "context" + "fmt" + "os" + "strings" + "testing" + "time" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/testhelpers" + logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/helper/policyutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/okta/okta-sdk-golang/v2/okta" + "github.com/okta/okta-sdk-golang/v2/okta/query" + "github.com/stretchr/testify/require" +) + +// To run this test, set the following env variables: +// VAULT_ACC=1 +// OKTA_ORG=dev-219337 +// OKTA_API_TOKEN= +// OKTA_USERNAME=test3@example.com +// OKTA_PASSWORD= +// +// You will need to install the Okta client app on your mobile device and +// setup MFA in order to use the Okta web UI. This test does not exercise +// MFA however (which is an enterprise feature), and therefore the test +// user in OKTA_USERNAME should not be configured with it. Currently +// test3@example.com is not a member of testgroup, which is the group with +// the profile that requires MFA. +func TestBackend_Config(t *testing.T) { + if os.Getenv("VAULT_ACC") == "" { + t.SkipNow() + } + + // Ensure each cred is populated. + credNames := []string{ + "OKTA_USERNAME", + "OKTA_PASSWORD", + "OKTA_API_TOKEN", + } + testhelpers.SkipUnlessEnvVarsSet(t, credNames) + + defaultLeaseTTLVal := time.Hour * 12 + maxLeaseTTLVal := time.Hour * 24 + b, err := Factory(context.Background(), &logical.BackendConfig{ + Logger: logging.NewVaultLogger(log.Trace), + System: &logical.StaticSystemView{ + DefaultLeaseTTLVal: defaultLeaseTTLVal, + MaxLeaseTTLVal: maxLeaseTTLVal, + }, + }) + if err != nil { + t.Fatalf("Unable to create backend: %s", err) + } + + username := os.Getenv("OKTA_USERNAME") + password := os.Getenv("OKTA_PASSWORD") + token := os.Getenv("OKTA_API_TOKEN") + groupIDs := createOktaGroups(t, username, token, os.Getenv("OKTA_ORG")) + defer deleteOktaGroups(t, token, os.Getenv("OKTA_ORG"), groupIDs) + + configData := map[string]interface{}{ + "org_name": os.Getenv("OKTA_ORG"), + "base_url": "oktapreview.com", + } + + updatedDuration := time.Hour * 1 + configDataToken := map[string]interface{}{ + "api_token": token, + "token_ttl": "1h", + } + + logicaltest.Test(t, logicaltest.TestCase{ + AcceptanceTest: true, + PreCheck: func() { testAccPreCheck(t) }, + CredentialBackend: b, + Steps: []logicaltest.TestStep{ + testConfigCreate(t, configData), + // 2. Login with bad password, expect failure (E0000004=okta auth failure). + testLoginWrite(t, username, "wrong", "E0000004", 0, nil), + // 3. Make our user belong to two groups and have one user-specific policy. + testAccUserGroups(t, username, "local_grouP,lOcal_group2", []string{"user_policy"}), + // 4. Create the group local_group, assign it a single policy. + testAccGroups(t, "local_groUp", "loCal_group_policy"), + // 5. Login with good password, expect user to have their user-specific + // policy and the policy of the one valid group they belong to. + testLoginWrite(t, username, password, "", defaultLeaseTTLVal, []string{"local_group_policy", "user_policy"}), + // 6. Create the group everyone, assign it two policies. This is a + // magic group name in okta that always exists and which every + // user automatically belongs to. + testAccGroups(t, "everyoNe", "everyone_grouP_policy,eveRy_group_policy2"), + // 7. Login as before, expect same result + testLoginWrite(t, username, password, "", defaultLeaseTTLVal, []string{"local_group_policy", "user_policy"}), + // 8. Add API token so we can lookup groups + testConfigUpdate(t, configDataToken), + testConfigRead(t, token, configData), + // 10. Login should now lookup okta groups; since all okta users are + // in the "everyone" group, that should be returned; since we + // defined policies attached to the everyone group, we should now + // see those policies attached to returned vault token. + testLoginWrite(t, username, password, "", updatedDuration, []string{"everyone_group_policy", "every_group_policy2", "local_group_policy", "user_policy"}), + testAccGroups(t, "locAl_group2", "testgroup_group_policy"), + testLoginWrite(t, username, password, "", updatedDuration, []string{"everyone_group_policy", "every_group_policy2", "local_group_policy", "testgroup_group_policy", "user_policy"}), + }, + }) +} + +func createOktaGroups(t *testing.T, username string, token string, org string) []string { + orgURL := "https://" + org + "." + previewBaseURL + ctx, client, err := okta.NewClient(context.Background(), okta.WithOrgUrl(orgURL), okta.WithToken(token)) + require.Nil(t, err) + + users, _, err := client.User.ListUsers(ctx, &query.Params{ + Q: username, + }) + require.Nil(t, err) + require.Len(t, users, 1) + userID := users[0].Id + var groupIDs []string + + // Verify that login's call to list the groups of the user logging in will page + // through multiple result sets; note here + // https://developer.okta.com/docs/reference/api/groups/#list-groups-with-defaults + // that "If you don't specify a value for limit and don't specify a query, + // only 200 results are returned for most orgs." + for i := 0; i < 201; i++ { + name := fmt.Sprintf("TestGroup%d", i) + groups, _, err := client.Group.ListGroups(ctx, &query.Params{ + Q: name, + }) + require.Nil(t, err) + + var groupID string + if len(groups) == 0 { + group, _, err := client.Group.CreateGroup(ctx, okta.Group{ + Profile: &okta.GroupProfile{ + Name: fmt.Sprintf("TestGroup%d", i), + }, + }) + require.Nil(t, err) + groupID = group.Id + } else { + groupID = groups[0].Id + } + groupIDs = append(groupIDs, groupID) + + _, err = client.Group.AddUserToGroup(ctx, groupID, userID) + require.Nil(t, err) + } + return groupIDs +} + +func deleteOktaGroups(t *testing.T, token string, org string, groupIDs []string) { + orgURL := "https://" + org + "." + previewBaseURL + ctx, client, err := okta.NewClient(context.Background(), okta.WithOrgUrl(orgURL), okta.WithToken(token)) + require.Nil(t, err) + + for _, groupID := range groupIDs { + _, err := client.Group.DeleteGroup(ctx, groupID) + require.Nil(t, err) + } +} + +func testLoginWrite(t *testing.T, username, password, reason string, expectedTTL time.Duration, policies []string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "login/" + username, + ErrorOk: true, + Data: map[string]interface{}{ + "password": password, + }, + Check: func(resp *logical.Response) error { + if resp.IsError() { + if reason == "" || !strings.Contains(resp.Error().Error(), reason) { + return resp.Error() + } + } else if reason != "" { + return fmt.Errorf("expected error containing %q, got no error", reason) + } + + if resp.Auth != nil { + if !policyutil.EquivalentPolicies(resp.Auth.Policies, policies) { + return fmt.Errorf("policy mismatch expected %v but got %v", policies, resp.Auth.Policies) + } + + actualTTL := resp.Auth.LeaseOptions.TTL + if actualTTL != expectedTTL { + return fmt.Errorf("TTL mismatch expected %v but got %v", expectedTTL, actualTTL) + } + } + + return nil + }, + } +} + +func testConfigCreate(t *testing.T, d map[string]interface{}) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.CreateOperation, + Path: "config", + Data: d, + } +} + +func testConfigUpdate(t *testing.T, d map[string]interface{}) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "config", + Data: d, + } +} + +func testConfigRead(t *testing.T, token string, d map[string]interface{}) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: "config", + Check: func(resp *logical.Response) error { + if resp.IsError() { + return resp.Error() + } + + if resp.Data["org_name"] != d["org_name"] { + return fmt.Errorf("org mismatch expected %s but got %s", d["organization"], resp.Data["Org"]) + } + + if resp.Data["base_url"] != d["base_url"] { + return fmt.Errorf("BaseURL mismatch expected %s but got %s", d["base_url"], resp.Data["BaseURL"]) + } + + for _, value := range resp.Data { + if value == token { + return fmt.Errorf("token should not be returned on a read request") + } + } + + return nil + }, + } +} + +func testAccPreCheck(t *testing.T) { + if v := os.Getenv("OKTA_USERNAME"); v == "" { + t.Fatal("OKTA_USERNAME must be set for acceptance tests") + } + + if v := os.Getenv("OKTA_PASSWORD"); v == "" { + t.Fatal("OKTA_PASSWORD must be set for acceptance tests") + } + + if v := os.Getenv("OKTA_ORG"); v == "" { + t.Fatal("OKTA_ORG must be set for acceptance tests") + } + + if v := os.Getenv("OKTA_API_TOKEN"); v == "" { + t.Fatal("OKTA_API_TOKEN must be set for acceptance tests") + } +} + +func testAccUserGroups(t *testing.T, user string, groups interface{}, policies interface{}) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "users/" + user, + Data: map[string]interface{}{ + "groups": groups, + "policies": policies, + }, + } +} + +func testAccGroups(t *testing.T, group string, policies interface{}) logicaltest.TestStep { + t.Logf("[testAccGroups] - Registering group %s, policy %s", group, policies) + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "groups/" + group, + Data: map[string]interface{}{ + "policies": policies, + }, + } +} + +func testAccLogin(t *testing.T, user, password string, keys []string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "login/" + user, + Data: map[string]interface{}{ + "password": password, + }, + Unauthenticated: true, + + Check: logicaltest.TestCheckAuth(keys), + } +} diff --git a/builtin/credential/okta/cli.go b/builtin/credential/okta/cli.go new file mode 100644 index 0000000..df25296 --- /dev/null +++ b/builtin/credential/okta/cli.go @@ -0,0 +1,122 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package okta + +import ( + "encoding/json" + "fmt" + "os" + "strings" + "time" + + "github.com/hashicorp/go-secure-stdlib/base62" + pwd "github.com/hashicorp/go-secure-stdlib/password" + "github.com/hashicorp/vault/api" +) + +// CLIHandler struct +type CLIHandler struct{} + +// Auth cli method +func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, error) { + mount, ok := m["mount"] + if !ok { + mount = "okta" + } + + username, ok := m["username"] + if !ok { + return nil, fmt.Errorf("'username' var must be set") + } + password, ok := m["password"] + if !ok { + fmt.Fprintf(os.Stderr, "Password (will be hidden): ") + var err error + password, err = pwd.Read(os.Stdin) + fmt.Fprintf(os.Stderr, "\n") + if err != nil { + return nil, err + } + } + + data := map[string]interface{}{ + "password": password, + } + + // Okta or Google totp code + if totp, ok := m["totp"]; ok { + data["totp"] = totp + } + + // provider is an optional parameter + if provider, ok := m["provider"]; ok { + data["provider"] = provider + } + + nonce := base62.MustRandom(20) + data["nonce"] = nonce + + // Create a done channel to signal termination of the login so that we can + // clean up the goroutine + doneCh := make(chan struct{}) + defer close(doneCh) + + go func() { + for { + timer := time.NewTimer(time.Second) + select { + case <-doneCh: + timer.Stop() + return + case <-timer.C: + } + + resp, _ := c.Logical().Read(fmt.Sprintf("auth/%s/verify/%s", mount, nonce)) + if resp != nil { + fmt.Fprintf(os.Stderr, "In Okta Verify, tap the number %q\n", resp.Data["correct_answer"].(json.Number)) + return + } + } + }() + + path := fmt.Sprintf("auth/%s/login/%s", mount, username) + secret, err := c.Logical().Write(path, data) + if err != nil { + return nil, err + } + if secret == nil { + return nil, fmt.Errorf("empty response from credential provider") + } + + return secret, nil +} + +// Help method for okta cli +func (h *CLIHandler) Help() string { + help := ` +Usage: vault login -method=okta [CONFIG K=V...] + + The Okta auth method allows users to authenticate using Okta. + + Authenticate as "sally": + + $ vault login -method=okta username=sally + Password (will be hidden): + + Authenticate as "bob": + + $ vault login -method=okta username=bob password=password + +Configuration: + + password= + Okta password to use for authentication. If not provided, the CLI will + prompt for this on stdin. + + username= + Okta username to use for authentication. +` + + return strings.TrimSpace(help) +} diff --git a/builtin/credential/okta/cmd/okta/main.go b/builtin/credential/okta/cmd/okta/main.go new file mode 100644 index 0000000..e28b34a --- /dev/null +++ b/builtin/credential/okta/cmd/okta/main.go @@ -0,0 +1,34 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "os" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/credential/okta" + "github.com/hashicorp/vault/sdk/plugin" +) + +func main() { + apiClientMeta := &api.PluginAPIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(os.Args[1:]) + + tlsConfig := apiClientMeta.GetTLSConfig() + tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) + + if err := plugin.ServeMultiplex(&plugin.ServeOpts{ + BackendFactoryFunc: okta.Factory, + // set the TLSProviderFunc so that the plugin maintains backwards + // compatibility with Vault versions that don’t support plugin AutoMTLS + TLSProviderFunc: tlsProviderFunc, + }); err != nil { + logger := hclog.New(&hclog.LoggerOptions{}) + + logger.Error("plugin shutting down", "error", err) + os.Exit(1) + } +} diff --git a/builtin/credential/okta/path_config.go b/builtin/credential/okta/path_config.go new file mode 100644 index 0000000..045d4fd --- /dev/null +++ b/builtin/credential/okta/path_config.go @@ -0,0 +1,381 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package okta + +import ( + "context" + "fmt" + "net/http" + "net/url" + "strings" + "time" + + oktaold "github.com/chrismalek/oktasdk-go/okta" + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/tokenutil" + "github.com/hashicorp/vault/sdk/logical" + oktanew "github.com/okta/okta-sdk-golang/v2/okta" +) + +const ( + defaultBaseURL = "okta.com" + previewBaseURL = "oktapreview.com" +) + +func pathConfig(b *backend) *framework.Path { + p := &framework.Path{ + Pattern: `config`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixOkta, + Action: "Configure", + }, + + Fields: map[string]*framework.FieldSchema{ + "organization": { + Type: framework.TypeString, + Description: "Use org_name instead.", + Deprecated: true, + }, + "org_name": { + Type: framework.TypeString, + Description: "Name of the organization to be used in the Okta API.", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Organization Name", + }, + }, + "token": { + Type: framework.TypeString, + Description: "Use api_token instead.", + Deprecated: true, + }, + "api_token": { + Type: framework.TypeString, + Description: "Okta API key.", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "API Token", + }, + }, + "base_url": { + Type: framework.TypeString, + Description: `The base domain to use for the Okta API. When not specified in the configuration, "okta.com" is used.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Base URL", + }, + }, + "production": { + Type: framework.TypeBool, + Description: `Use base_url instead.`, + Deprecated: true, + }, + "ttl": { + Type: framework.TypeDurationSecond, + Description: tokenutil.DeprecationText("token_ttl"), + Deprecated: true, + }, + "max_ttl": { + Type: framework.TypeDurationSecond, + Description: tokenutil.DeprecationText("token_max_ttl"), + Deprecated: true, + }, + "bypass_okta_mfa": { + Type: framework.TypeBool, + Description: `When set true, requests by Okta for a MFA check will be bypassed. This also disallows certain status checks on the account, such as whether the password is expired.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Bypass Okta MFA", + }, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathConfigRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "configuration", + }, + }, + logical.CreateOperation: &framework.PathOperation{ + Callback: b.pathConfigWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + }, + }, + }, + + ExistenceCheck: b.pathConfigExistenceCheck, + + HelpSynopsis: pathConfigHelp, + } + + tokenutil.AddTokenFields(p.Fields) + p.Fields["token_policies"].Description += ". This will apply to all tokens generated by this auth method, in addition to any configured for specific users/groups." + return p +} + +// Config returns the configuration for this backend. +func (b *backend) Config(ctx context.Context, s logical.Storage) (*ConfigEntry, error) { + entry, err := s.Get(ctx, "config") + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result ConfigEntry + if entry != nil { + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + } + + if result.TokenTTL == 0 && result.TTL > 0 { + result.TokenTTL = result.TTL + } + if result.TokenMaxTTL == 0 && result.MaxTTL > 0 { + result.TokenMaxTTL = result.MaxTTL + } + + return &result, nil +} + +func (b *backend) pathConfigRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + cfg, err := b.Config(ctx, req.Storage) + if err != nil { + return nil, err + } + if cfg == nil { + return nil, nil + } + + data := map[string]interface{}{ + "organization": cfg.Org, + "org_name": cfg.Org, + "bypass_okta_mfa": cfg.BypassOktaMFA, + } + cfg.PopulateTokenData(data) + + if cfg.BaseURL != "" { + data["base_url"] = cfg.BaseURL + } + if cfg.Production != nil { + data["production"] = *cfg.Production + } + if cfg.TTL > 0 { + data["ttl"] = int64(cfg.TTL.Seconds()) + } + if cfg.MaxTTL > 0 { + data["max_ttl"] = int64(cfg.MaxTTL.Seconds()) + } + + resp := &logical.Response{ + Data: data, + } + + if cfg.BypassOktaMFA { + resp.AddWarning("Okta MFA bypass is configured. In addition to ignoring Okta MFA requests, certain other account statuses will not be seen, such as PASSWORD_EXPIRED. Authentication will succeed in these cases.") + } + + return resp, nil +} + +func (b *backend) pathConfigWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + cfg, err := b.Config(ctx, req.Storage) + if err != nil { + return nil, err + } + + // Due to the existence check, entry will only be nil if it's a create + // operation, so just create a new one + if cfg == nil { + cfg = &ConfigEntry{} + } + + org, ok := d.GetOk("org_name") + if ok { + cfg.Org = org.(string) + } + if cfg.Org == "" { + org, ok = d.GetOk("organization") + if ok { + cfg.Org = org.(string) + } + } + if cfg.Org == "" && req.Operation == logical.CreateOperation { + return logical.ErrorResponse("org_name is missing"), nil + } + + token, ok := d.GetOk("api_token") + if ok { + cfg.Token = token.(string) + } else if token, ok = d.GetOk("token"); ok { + cfg.Token = token.(string) + } + + baseURLRaw, ok := d.GetOk("base_url") + if ok { + baseURL := baseURLRaw.(string) + _, err = url.Parse(fmt.Sprintf("https://%s,%s", cfg.Org, baseURL)) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("Error parsing given base_url: %s", err)), nil + } + cfg.BaseURL = baseURL + } + + // We only care about the production flag when base_url is not set. It is + // for compatibility reasons. + if cfg.BaseURL == "" { + productionRaw, ok := d.GetOk("production") + if ok { + production := productionRaw.(bool) + cfg.Production = &production + } + } else { + // clear out old production flag if base_url is set + cfg.Production = nil + } + + bypass, ok := d.GetOk("bypass_okta_mfa") + if ok { + cfg.BypassOktaMFA = bypass.(bool) + } + + if err := cfg.ParseTokenFields(req, d); err != nil { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } + + // Handle upgrade cases + { + if err := tokenutil.UpgradeValue(d, "ttl", "token_ttl", &cfg.TTL, &cfg.TokenTTL); err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + if err := tokenutil.UpgradeValue(d, "max_ttl", "token_max_ttl", &cfg.MaxTTL, &cfg.TokenMaxTTL); err != nil { + return logical.ErrorResponse(err.Error()), nil + } + } + + jsonCfg, err := logical.StorageEntryJSON("config", cfg) + if err != nil { + return nil, err + } + if err := req.Storage.Put(ctx, jsonCfg); err != nil { + return nil, err + } + + var resp *logical.Response + if cfg.BypassOktaMFA { + resp = new(logical.Response) + resp.AddWarning("Okta MFA bypass is configured. In addition to ignoring Okta MFA requests, certain other account statuses will not be seen, such as PASSWORD_EXPIRED. Authentication will succeed in these cases.") + } + + return resp, nil +} + +func (b *backend) pathConfigExistenceCheck(ctx context.Context, req *logical.Request, d *framework.FieldData) (bool, error) { + cfg, err := b.Config(ctx, req.Storage) + if err != nil { + return false, err + } + + return cfg != nil, nil +} + +type oktaShim interface { + Client() (*oktanew.Client, context.Context) + NewRequest(method string, url string, body interface{}) (*http.Request, error) + Do(req *http.Request, v interface{}) (interface{}, error) +} + +type oktaShimNew struct { + client *oktanew.Client + ctx context.Context +} + +func (new *oktaShimNew) Client() (*oktanew.Client, context.Context) { + return new.client, new.ctx +} + +func (new *oktaShimNew) NewRequest(method string, url string, body interface{}) (*http.Request, error) { + if !strings.HasPrefix(url, "/") { + url = "/api/v1/" + url + } + return new.client.GetRequestExecutor().NewRequest(method, url, body) +} + +func (new *oktaShimNew) Do(req *http.Request, v interface{}) (interface{}, error) { + return new.client.GetRequestExecutor().Do(new.ctx, req, v) +} + +type oktaShimOld struct { + client *oktaold.Client +} + +func (new *oktaShimOld) Client() (*oktanew.Client, context.Context) { + return nil, nil +} + +func (new *oktaShimOld) NewRequest(method string, url string, body interface{}) (*http.Request, error) { + return new.client.NewRequest(method, url, body) +} + +func (new *oktaShimOld) Do(req *http.Request, v interface{}) (interface{}, error) { + return new.client.Do(req, v) +} + +// OktaClient creates a basic okta client connection +func (c *ConfigEntry) OktaClient(ctx context.Context) (oktaShim, error) { + baseURL := defaultBaseURL + if c.Production != nil { + if !*c.Production { + baseURL = previewBaseURL + } + } + if c.BaseURL != "" { + baseURL = c.BaseURL + } + + if c.Token != "" { + ctx, client, err := oktanew.NewClient(ctx, + oktanew.WithOrgUrl("https://"+c.Org+"."+baseURL), + oktanew.WithToken(c.Token)) + if err != nil { + return nil, err + } + return &oktaShimNew{client, ctx}, nil + } + client, err := oktaold.NewClientWithDomain(cleanhttp.DefaultClient(), c.Org, baseURL, "") + if err != nil { + return nil, err + } + return &oktaShimOld{client}, nil +} + +// ConfigEntry for Okta +type ConfigEntry struct { + tokenutil.TokenParams + + Org string `json:"organization"` + Token string `json:"token"` + BaseURL string `json:"base_url"` + Production *bool `json:"is_production,omitempty"` + TTL time.Duration `json:"ttl"` + MaxTTL time.Duration `json:"max_ttl"` + BypassOktaMFA bool `json:"bypass_okta_mfa"` +} + +const pathConfigHelp = ` +This endpoint allows you to configure the Okta and its +configuration options. + +The Okta organization are the characters at the front of the URL for Okta. +Example https://ORG.okta.com +` diff --git a/builtin/credential/okta/path_groups.go b/builtin/credential/okta/path_groups.go new file mode 100644 index 0000000..753c1ca --- /dev/null +++ b/builtin/credential/okta/path_groups.go @@ -0,0 +1,219 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package okta + +import ( + "context" + "strings" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/policyutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathGroupsList(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "groups/?$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixOkta, + OperationSuffix: "groups", + Navigation: true, + ItemType: "Group", + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: b.pathGroupList, + }, + + HelpSynopsis: pathGroupHelpSyn, + HelpDescription: pathGroupHelpDesc, + } +} + +func pathGroups(b *backend) *framework.Path { + return &framework.Path{ + Pattern: `groups/(?P.+)`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixOkta, + OperationSuffix: "group", + Action: "Create", + ItemType: "Group", + }, + + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the Okta group.", + }, + + "policies": { + Type: framework.TypeCommaStringSlice, + Description: "Comma-separated list of policies associated to the group.", + DisplayAttrs: &framework.DisplayAttributes{ + Description: "A list of policies associated to the group.", + }, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.DeleteOperation: b.pathGroupDelete, + logical.ReadOperation: b.pathGroupRead, + logical.UpdateOperation: b.pathGroupWrite, + }, + + HelpSynopsis: pathGroupHelpSyn, + HelpDescription: pathGroupHelpDesc, + } +} + +// We look up groups in a case-insensitive manner since Okta is case-preserving +// but case-insensitive for comparisons +func (b *backend) Group(ctx context.Context, s logical.Storage, n string) (*GroupEntry, string, error) { + canonicalName := n + entry, err := s.Get(ctx, "group/"+n) + if err != nil { + return nil, "", err + } + if entry == nil { + entries, err := groupList(ctx, s) + if err != nil { + return nil, "", err + } + + for _, groupName := range entries { + if strings.EqualFold(groupName, n) { + entry, err = s.Get(ctx, "group/"+groupName) + if err != nil { + return nil, "", err + } + canonicalName = groupName + break + } + } + } + if entry == nil { + return nil, "", nil + } + + var result GroupEntry + if err := entry.DecodeJSON(&result); err != nil { + return nil, "", err + } + + return &result, canonicalName, nil +} + +func (b *backend) pathGroupDelete(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + if len(name) == 0 { + return logical.ErrorResponse("'name' must be supplied"), nil + } + + entry, canonicalName, err := b.Group(ctx, req.Storage, name) + if err != nil { + return nil, err + } + if entry != nil { + err := req.Storage.Delete(ctx, "group/"+canonicalName) + if err != nil { + return nil, err + } + } + + return nil, nil +} + +func (b *backend) pathGroupRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + if len(name) == 0 { + return logical.ErrorResponse("'name' must be supplied"), nil + } + + group, _, err := b.Group(ctx, req.Storage, name) + if err != nil { + return nil, err + } + if group == nil { + return nil, nil + } + + return &logical.Response{ + Data: map[string]interface{}{ + "policies": group.Policies, + }, + }, nil +} + +func (b *backend) pathGroupWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + if len(name) == 0 { + return logical.ErrorResponse("'name' must be supplied"), nil + } + + // Check for an existing group, possibly lowercased so that we keep using + // existing user set values + _, canonicalName, err := b.Group(ctx, req.Storage, name) + if err != nil { + return nil, err + } + if canonicalName != "" { + name = canonicalName + } else { + name = strings.ToLower(name) + } + + entry, err := logical.StorageEntryJSON("group/"+name, &GroupEntry{ + Policies: policyutil.ParsePolicies(d.Get("policies")), + }) + if err != nil { + return nil, err + } + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + + return nil, nil +} + +func (b *backend) pathGroupList(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + groups, err := groupList(ctx, req.Storage) + if err != nil { + return nil, err + } + + return logical.ListResponse(groups), nil +} + +func groupList(ctx context.Context, s logical.Storage) ([]string, error) { + keys, err := logical.CollectKeysWithPrefix(ctx, s, "group/") + if err != nil { + return nil, err + } + + for i := range keys { + keys[i] = strings.TrimPrefix(keys[i], "group/") + } + + return keys, nil +} + +type GroupEntry struct { + Policies []string +} + +const pathGroupHelpSyn = ` +Manage users allowed to authenticate. +` + +const pathGroupHelpDesc = ` +This endpoint allows you to create, read, update, and delete configuration +for Okta groups that are allowed to authenticate, and associate policies to +them. + +Deleting a group will not revoke auth for prior authenticated users in that +group. To do this, do a revoke on "login/" for +the usernames you want revoked. +` diff --git a/builtin/credential/okta/path_groups_test.go b/builtin/credential/okta/path_groups_test.go new file mode 100644 index 0000000..8e4ba8c --- /dev/null +++ b/builtin/credential/okta/path_groups_test.go @@ -0,0 +1,111 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package okta + +import ( + "context" + "strings" + "testing" + "time" + + "github.com/go-test/deep" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/logical" +) + +func TestGroupsList(t *testing.T) { + b, storage := getBackend(t) + + groups := []string{ + "%20\\", + "foo", + "zfoo", + "🙂", + "foo/nested", + "foo/even/more/nested", + } + + for _, group := range groups { + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "groups/" + group, + Storage: storage, + Data: map[string]interface{}{ + "policies": []string{group + "_a", group + "_b"}, + }, + } + + resp, err := b.HandleRequest(context.Background(), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + } + + for _, group := range groups { + for _, upper := range []bool{false, true} { + groupPath := group + if upper { + groupPath = strings.ToUpper(group) + } + req := &logical.Request{ + Operation: logical.ReadOperation, + Path: "groups/" + groupPath, + Storage: storage, + } + + resp, err := b.HandleRequest(context.Background(), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + if resp == nil { + t.Fatal("unexpected nil response") + } + + expected := []string{group + "_a", group + "_b"} + + if diff := deep.Equal(resp.Data["policies"].([]string), expected); diff != nil { + t.Fatal(diff) + } + } + } + + req := &logical.Request{ + Operation: logical.ListOperation, + Path: "groups", + Storage: storage, + } + + resp, err := b.HandleRequest(context.Background(), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + if diff := deep.Equal(resp.Data["keys"].([]string), groups); diff != nil { + t.Fatal(diff) + } +} + +func getBackend(t *testing.T) (logical.Backend, logical.Storage) { + defaultLeaseTTLVal := time.Hour * 12 + maxLeaseTTLVal := time.Hour * 24 + + config := &logical.BackendConfig{ + Logger: logging.NewVaultLogger(log.Trace), + + System: &logical.StaticSystemView{ + DefaultLeaseTTLVal: defaultLeaseTTLVal, + MaxLeaseTTLVal: maxLeaseTTLVal, + }, + StorageView: &logical.InmemStorage{}, + } + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatalf("unable to create backend: %v", err) + } + + return b, config.StorageView +} diff --git a/builtin/credential/okta/path_login.go b/builtin/credential/okta/path_login.go new file mode 100644 index 0000000..1f2cb09 --- /dev/null +++ b/builtin/credential/okta/path_login.go @@ -0,0 +1,255 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package okta + +import ( + "context" + "fmt" + "strings" + + "github.com/go-errors/errors" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/policyutil" + "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + googleProvider = "GOOGLE" + oktaProvider = "OKTA" +) + +func pathLogin(b *backend) *framework.Path { + return &framework.Path{ + Pattern: `login/(?P.+)`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixOkta, + OperationVerb: "login", + }, + + Fields: map[string]*framework.FieldSchema{ + "username": { + Type: framework.TypeString, + Description: "Username to be used for login.", + }, + + "password": { + Type: framework.TypeString, + Description: "Password for this user.", + }, + "totp": { + Type: framework.TypeString, + Description: "TOTP passcode.", + }, + "nonce": { + Type: framework.TypeString, + Description: `Nonce provided if performing login that requires +number verification challenge. Logins through the vault login CLI command will +automatically generate a nonce.`, + }, + "provider": { + Type: framework.TypeString, + Description: "Preferred factor provider.", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathLogin, + logical.AliasLookaheadOperation: b.pathLoginAliasLookahead, + }, + + HelpSynopsis: pathLoginSyn, + HelpDescription: pathLoginDesc, + } +} + +func (b *backend) getSupportedProviders() []string { + return []string{googleProvider, oktaProvider} +} + +func (b *backend) pathLoginAliasLookahead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + username := d.Get("username").(string) + if username == "" { + return nil, fmt.Errorf("missing username") + } + + return &logical.Response{ + Auth: &logical.Auth{ + Alias: &logical.Alias{ + Name: username, + }, + }, + }, nil +} + +func (b *backend) pathLogin(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + username := d.Get("username").(string) + password := d.Get("password").(string) + totp := d.Get("totp").(string) + nonce := d.Get("nonce").(string) + preferredProvider := strings.ToUpper(d.Get("provider").(string)) + if preferredProvider != "" && !strutil.StrListContains(b.getSupportedProviders(), preferredProvider) { + return logical.ErrorResponse(fmt.Sprintf("provider %s is not among the supported ones %v", preferredProvider, b.getSupportedProviders())), nil + } + + defer b.verifyCache.Delete(nonce) + + policies, resp, groupNames, err := b.Login(ctx, req, username, password, totp, nonce, preferredProvider) + // Handle an internal error + if err != nil { + return nil, err + } + if resp != nil { + // Handle a logical error + if resp.IsError() { + return resp, nil + } + } else { + resp = &logical.Response{} + } + + cfg, err := b.getConfig(ctx, req) + if err != nil { + return nil, err + } + + auth := &logical.Auth{ + Metadata: map[string]string{ + "username": username, + "policies": strings.Join(policies, ","), + }, + InternalData: map[string]interface{}{ + "password": password, + }, + DisplayName: username, + Alias: &logical.Alias{ + Name: username, + }, + } + cfg.PopulateTokenAuth(auth) + + // Add in configured policies from mappings + if len(policies) > 0 { + auth.Policies = append(auth.Policies, policies...) + } + + resp.Auth = auth + + for _, groupName := range groupNames { + if groupName == "" { + continue + } + resp.Auth.GroupAliases = append(resp.Auth.GroupAliases, &logical.Alias{ + Name: groupName, + }) + } + + return resp, nil +} + +func (b *backend) pathLoginRenew(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + username := req.Auth.Metadata["username"] + password := req.Auth.InternalData["password"].(string) + + var nonce string + if d != nil { + nonce = d.Get("nonce").(string) + } + + cfg, err := b.getConfig(ctx, req) + if err != nil { + return nil, err + } + + // No TOTP entry is possible on renew. If push MFA is enabled it will still be triggered, however. + // Sending "" as the totp will prompt the push action if it is configured. + loginPolicies, resp, groupNames, err := b.Login(ctx, req, username, password, "", nonce, "") + if err != nil || (resp != nil && resp.IsError()) { + return resp, err + } + + finalPolicies := cfg.TokenPolicies + if len(loginPolicies) > 0 { + finalPolicies = append(finalPolicies, loginPolicies...) + } + if !policyutil.EquivalentPolicies(finalPolicies, req.Auth.TokenPolicies) { + return nil, fmt.Errorf("policies have changed, not renewing") + } + + resp.Auth = req.Auth + resp.Auth.Period = cfg.TokenPeriod + resp.Auth.TTL = cfg.TokenTTL + resp.Auth.MaxTTL = cfg.TokenMaxTTL + + // Remove old aliases + resp.Auth.GroupAliases = nil + + for _, groupName := range groupNames { + resp.Auth.GroupAliases = append(resp.Auth.GroupAliases, &logical.Alias{ + Name: groupName, + }) + } + + return resp, nil +} + +func pathVerify(b *backend) *framework.Path { + return &framework.Path{ + Pattern: `verify/(?P.+)`, + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixOkta, + OperationVerb: "verify", + }, + Fields: map[string]*framework.FieldSchema{ + "nonce": { + Type: framework.TypeString, + Description: `Nonce provided during a login request to +retrieve the number verification challenge for the matching request.`, + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathVerify, + }, + }, + } +} + +func (b *backend) pathVerify(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + nonce := d.Get("nonce").(string) + + correctRaw, ok := b.verifyCache.Get(nonce) + if !ok { + return nil, nil + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "correct_answer": correctRaw.(int), + }, + } + + return resp, nil +} + +func (b *backend) getConfig(ctx context.Context, req *logical.Request) (*ConfigEntry, error) { + cfg, err := b.Config(ctx, req.Storage) + if err != nil { + return nil, err + } + if cfg == nil { + return nil, errors.New("Okta backend not configured") + } + + return cfg, nil +} + +const pathLoginSyn = ` +Log in with a username and password. +` + +const pathLoginDesc = ` +This endpoint authenticates using a username and password. +` diff --git a/builtin/credential/okta/path_users.go b/builtin/credential/okta/path_users.go new file mode 100644 index 0000000..3c38e85 --- /dev/null +++ b/builtin/credential/okta/path_users.go @@ -0,0 +1,173 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package okta + +import ( + "context" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathUsersList(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "users/?$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixOkta, + OperationSuffix: "users", + Navigation: true, + ItemType: "User", + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: b.pathUserList, + }, + + HelpSynopsis: pathUserHelpSyn, + HelpDescription: pathUserHelpDesc, + } +} + +func pathUsers(b *backend) *framework.Path { + return &framework.Path{ + Pattern: `users/(?P.+)`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixOkta, + OperationSuffix: "user", + Action: "Create", + ItemType: "User", + }, + + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the user.", + }, + + "groups": { + Type: framework.TypeCommaStringSlice, + Description: "List of groups associated with the user.", + }, + + "policies": { + Type: framework.TypeCommaStringSlice, + Description: "List of policies associated with the user.", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.DeleteOperation: b.pathUserDelete, + logical.ReadOperation: b.pathUserRead, + logical.UpdateOperation: b.pathUserWrite, + }, + + HelpSynopsis: pathUserHelpSyn, + HelpDescription: pathUserHelpDesc, + } +} + +func (b *backend) User(ctx context.Context, s logical.Storage, n string) (*UserEntry, error) { + entry, err := s.Get(ctx, "user/"+n) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result UserEntry + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + + return &result, nil +} + +func (b *backend) pathUserDelete(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + if len(name) == 0 { + return logical.ErrorResponse("Error empty name"), nil + } + + err := req.Storage.Delete(ctx, "user/"+name) + if err != nil { + return nil, err + } + + return nil, nil +} + +func (b *backend) pathUserRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + if len(name) == 0 { + return logical.ErrorResponse("Error empty name"), nil + } + + user, err := b.User(ctx, req.Storage, name) + if err != nil { + return nil, err + } + if user == nil { + return nil, nil + } + + return &logical.Response{ + Data: map[string]interface{}{ + "groups": user.Groups, + "policies": user.Policies, + }, + }, nil +} + +func (b *backend) pathUserWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + if len(name) == 0 { + return logical.ErrorResponse("Error empty name"), nil + } + + groups := d.Get("groups").([]string) + policies := d.Get("policies").([]string) + + // Store it + entry, err := logical.StorageEntryJSON("user/"+name, &UserEntry{ + Groups: groups, + Policies: policies, + }) + if err != nil { + return nil, err + } + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + + return nil, nil +} + +func (b *backend) pathUserList(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + users, err := req.Storage.List(ctx, "user/") + if err != nil { + return nil, err + } + return logical.ListResponse(users), nil +} + +type UserEntry struct { + Groups []string + Policies []string +} + +const pathUserHelpSyn = ` +Manage additional groups for users allowed to authenticate. +` + +const pathUserHelpDesc = ` +This endpoint allows you to create, read, update, and delete configuration +for Okta users that are allowed to authenticate, in particular associating +additional groups to them. + +Deleting a user will not revoke their auth. To do this, do a revoke on "login/" for +the usernames you want revoked. +` diff --git a/builtin/credential/radius/backend.go b/builtin/credential/radius/backend.go new file mode 100644 index 0000000..3ec37a6 --- /dev/null +++ b/builtin/credential/radius/backend.go @@ -0,0 +1,69 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package radius + +import ( + "context" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +const operationPrefixRadius = "radius" + +func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + b := Backend() + if err := b.Setup(ctx, conf); err != nil { + return nil, err + } + return b, nil +} + +func Backend() *backend { + var b backend + b.Backend = &framework.Backend{ + Help: backendHelp, + + PathsSpecial: &logical.Paths{ + Unauthenticated: []string{ + "login", + "login/*", + }, + + SealWrapStorage: []string{ + "config", + }, + }, + + Paths: []*framework.Path{ + pathConfig(&b), + pathUsers(&b), + pathUsersList(&b), + pathLogin(&b), + }, + + AuthRenew: b.pathLoginRenew, + BackendType: logical.TypeCredential, + } + + return &b +} + +type backend struct { + *framework.Backend +} + +const backendHelp = ` +The "radius" credential provider allows authentication against +a RADIUS server, checking username and associating users +to set of policies. + +Configuration of the server is done through the "config" and "users" +endpoints by a user with appropriate access mandated by policy. +Authentication is then done by supplying the two fields for "login". + +The backend optionally allows to grant a set of policies to any +user that successfully authenticates against the RADIUS server, +without them being explicitly mapped in vault. +` diff --git a/builtin/credential/radius/backend_test.go b/builtin/credential/radius/backend_test.go new file mode 100644 index 0000000..c88faf0 --- /dev/null +++ b/builtin/credential/radius/backend_test.go @@ -0,0 +1,361 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package radius + +import ( + "context" + "fmt" + "os" + "reflect" + "strconv" + "strings" + "testing" + "time" + + logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" + "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + testSysTTL = time.Hour * 10 + testSysMaxTTL = time.Hour * 20 + + envRadiusRadiusHost = "RADIUS_HOST" + envRadiusPort = "RADIUS_PORT" + envRadiusSecret = "RADIUS_SECRET" + envRadiusUsername = "RADIUS_USERNAME" + envRadiusUserPass = "RADIUS_USERPASS" +) + +func prepareRadiusTestContainer(t *testing.T) (func(), string, int) { + if os.Getenv(envRadiusRadiusHost) != "" { + port, _ := strconv.Atoi(os.Getenv(envRadiusPort)) + return func() {}, os.Getenv(envRadiusRadiusHost), port + } + + // Now allow any client to connect to this radiusd instance by writing our + // own clients.conf file. + // + // This is necessary because we lack control over the container's network + // IPs. We might be running in Circle CI (with variable IPs per new + // network) or in Podman (which uses an entirely different set of default + // ranges than Docker). + // + // See also: https://freeradius.org/radiusd/man/clients.conf.html + ctx := context.Background() + clientsConfig := ` +client 0.0.0.0/1 { + ipaddr = 0.0.0.0/1 + secret = testing123 + shortname = all-clients-first +} + +client 128.0.0.0/1 { + ipaddr = 128.0.0.0/1 + secret = testing123 + shortname = all-clients-second +} +` + + containerfile := ` +FROM docker.mirror.hashicorp.services/jumanjiman/radiusd:latest + +COPY clients.conf /etc/raddb/clients.conf +` + + bCtx := docker.NewBuildContext() + bCtx["clients.conf"] = docker.PathContentsFromBytes([]byte(clientsConfig)) + + imageName := "vault_radiusd_any_client" + imageTag := "latest" + + runner, err := docker.NewServiceRunner(docker.RunOptions{ + ImageRepo: imageName, + ImageTag: imageTag, + ContainerName: "radiusd", + Cmd: []string{"-f", "-l", "stdout", "-X"}, + Ports: []string{"1812/udp"}, + LogConsumer: func(s string) { + if t.Failed() { + t.Logf("container logs: %s", s) + } + }, + }) + if err != nil { + t.Fatalf("Could not provision docker service runner: %s", err) + } + + output, err := runner.BuildImage(ctx, containerfile, bCtx, + docker.BuildRemove(true), docker.BuildForceRemove(true), + docker.BuildPullParent(true), + docker.BuildTags([]string{imageName + ":" + imageTag})) + if err != nil { + t.Fatalf("Could not build new image: %v", err) + } + + t.Logf("Image build output: %v", string(output)) + + svc, err := runner.StartService(context.Background(), func(ctx context.Context, host string, port int) (docker.ServiceConfig, error) { + time.Sleep(2 * time.Second) + return docker.NewServiceHostPort(host, port), nil + }) + if err != nil { + t.Fatalf("Could not start docker radiusd: %s", err) + } + + pieces := strings.Split(svc.Config.Address(), ":") + port, _ := strconv.Atoi(pieces[1]) + return svc.Cleanup, pieces[0], port +} + +func TestBackend_Config(t *testing.T) { + b, err := Factory(context.Background(), &logical.BackendConfig{ + Logger: nil, + System: &logical.StaticSystemView{ + DefaultLeaseTTLVal: testSysTTL, + MaxLeaseTTLVal: testSysMaxTTL, + }, + }) + if err != nil { + t.Fatalf("Unable to create backend: %s", err) + } + + configDataBasic := map[string]interface{}{ + "host": "test.radius.hostname.com", + "secret": "test-secret", + } + + configDataMissingRequired := map[string]interface{}{ + "host": "test.radius.hostname.com", + } + + configDataEmptyPort := map[string]interface{}{ + "host": "test.radius.hostname.com", + "port": "", + "secret": "test-secret", + } + + configDataInvalidPort := map[string]interface{}{ + "host": "test.radius.hostname.com", + "port": "notnumeric", + "secret": "test-secret", + } + + configDataInvalidBool := map[string]interface{}{ + "host": "test.radius.hostname.com", + "secret": "test-secret", + "unregistered_user_policies": "test", + } + + logicaltest.Test(t, logicaltest.TestCase{ + AcceptanceTest: false, + // PreCheck: func() { testAccPreCheck(t) }, + CredentialBackend: b, + Steps: []logicaltest.TestStep{ + testConfigWrite(t, configDataBasic, false), + testConfigWrite(t, configDataMissingRequired, true), + testConfigWrite(t, configDataEmptyPort, true), + testConfigWrite(t, configDataInvalidPort, true), + testConfigWrite(t, configDataInvalidBool, true), + }, + }) +} + +func TestBackend_users(t *testing.T) { + b, err := Factory(context.Background(), &logical.BackendConfig{ + Logger: nil, + System: &logical.StaticSystemView{ + DefaultLeaseTTLVal: testSysTTL, + MaxLeaseTTLVal: testSysMaxTTL, + }, + }) + if err != nil { + t.Fatalf("Unable to create backend: %s", err) + } + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: b, + Steps: []logicaltest.TestStep{ + testStepUpdateUser(t, "web", "foo"), + testStepUpdateUser(t, "web2", "foo"), + testStepUpdateUser(t, "web3", "foo"), + testStepUserList(t, []string{"web", "web2", "web3"}), + }, + }) +} + +func TestBackend_acceptance(t *testing.T) { + b, err := Factory(context.Background(), &logical.BackendConfig{ + Logger: nil, + System: &logical.StaticSystemView{ + DefaultLeaseTTLVal: testSysTTL, + MaxLeaseTTLVal: testSysMaxTTL, + }, + }) + if err != nil { + t.Fatalf("Unable to create backend: %s", err) + } + + cleanup, host, port := prepareRadiusTestContainer(t) + defer cleanup() + + // These defaults are specific to the jumanjiman/radiusd docker image + username := os.Getenv(envRadiusUsername) + if username == "" { + username = "test" + } + + password := os.Getenv(envRadiusUserPass) + if password == "" { + password = "test" + } + + secret := os.Getenv(envRadiusSecret) + if len(secret) == 0 { + secret = "testing123" + } + + configDataAcceptanceAllowUnreg := map[string]interface{}{ + "host": host, + "port": strconv.Itoa(port), + "secret": secret, + "unregistered_user_policies": "policy1,policy2", + } + if configDataAcceptanceAllowUnreg["port"] == "" { + configDataAcceptanceAllowUnreg["port"] = "1812" + } + + configDataAcceptanceNoAllowUnreg := map[string]interface{}{ + "host": host, + "port": strconv.Itoa(port), + "secret": secret, + "unregistered_user_policies": "", + } + if configDataAcceptanceNoAllowUnreg["port"] == "" { + configDataAcceptanceNoAllowUnreg["port"] = "1812" + } + + dataRealpassword := map[string]interface{}{ + "password": password, + } + + dataWrongpassword := map[string]interface{}{ + "password": "wrongpassword", + } + + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: b, + PreCheck: testAccPreCheck(t, host, port), + Steps: []logicaltest.TestStep{ + // Login with valid but unknown user will fail because unregistered_user_policies is empty + testConfigWrite(t, configDataAcceptanceNoAllowUnreg, false), + testAccUserLogin(t, username, dataRealpassword, true), + // Once the user is registered auth will succeed + testStepUpdateUser(t, username, ""), + testAccUserLoginPolicy(t, username, dataRealpassword, []string{"default"}, false), + + testStepUpdateUser(t, username, "foopolicy"), + testAccUserLoginPolicy(t, username, dataRealpassword, []string{"default", "foopolicy"}, false), + testAccStepDeleteUser(t, username), + + // When unregistered_user_policies is specified, an unknown user will be granted access and granted the listed policies + testConfigWrite(t, configDataAcceptanceAllowUnreg, false), + testAccUserLoginPolicy(t, username, dataRealpassword, []string{"default", "policy1", "policy2"}, false), + + // More tests + testAccUserLogin(t, "nonexistinguser", dataRealpassword, true), + testAccUserLogin(t, username, dataWrongpassword, true), + testStepUpdateUser(t, username, "foopolicy"), + testAccUserLoginPolicy(t, username, dataRealpassword, []string{"default", "foopolicy"}, false), + testStepUpdateUser(t, username, "foopolicy, secondpolicy"), + testAccUserLoginPolicy(t, username, dataRealpassword, []string{"default", "foopolicy", "secondpolicy"}, false), + testAccUserLoginPolicy(t, username, dataRealpassword, []string{"default", "foopolicy", "secondpolicy", "thirdpolicy"}, true), + }, + }) +} + +func testAccPreCheck(t *testing.T, host string, port int) func() { + return func() { + if host == "" { + t.Fatal("Host must be set for acceptance tests") + } + + if port == 0 { + t.Fatal("Port must be non-zero for acceptance tests") + } + } +} + +func testConfigWrite(t *testing.T, d map[string]interface{}, expectError bool) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "config", + Data: d, + ErrorOk: expectError, + } +} + +func testAccStepDeleteUser(t *testing.T, n string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.DeleteOperation, + Path: "users/" + n, + } +} + +func testStepUserList(t *testing.T, users []string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ListOperation, + Path: "users", + Check: func(resp *logical.Response) error { + if resp.IsError() { + return fmt.Errorf("got error response: %#v", *resp) + } + + if !reflect.DeepEqual(users, resp.Data["keys"].([]string)) { + return fmt.Errorf("expected:\n%#v\ngot:\n%#v\n", users, resp.Data["keys"]) + } + return nil + }, + } +} + +func testStepUpdateUser( + t *testing.T, name string, policies string, +) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "users/" + name, + Data: map[string]interface{}{ + "policies": policies, + }, + } +} + +func testAccUserLogin(t *testing.T, user string, data map[string]interface{}, expectError bool) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "login/" + user, + Data: data, + ErrorOk: expectError, + Unauthenticated: true, + } +} + +func testAccUserLoginPolicy(t *testing.T, user string, data map[string]interface{}, policies []string, expectError bool) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "login/" + user, + Data: data, + ErrorOk: expectError, + Unauthenticated: true, + // Check: logicaltest.TestCheckAuth(policies), + Check: func(resp *logical.Response) error { + res := logicaltest.TestCheckAuth(policies)(resp) + if res != nil && expectError { + return nil + } + return res + }, + } +} diff --git a/builtin/credential/radius/cmd/radius/main.go b/builtin/credential/radius/cmd/radius/main.go new file mode 100644 index 0000000..b3045a3 --- /dev/null +++ b/builtin/credential/radius/cmd/radius/main.go @@ -0,0 +1,34 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "os" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/credential/radius" + "github.com/hashicorp/vault/sdk/plugin" +) + +func main() { + apiClientMeta := &api.PluginAPIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(os.Args[1:]) + + tlsConfig := apiClientMeta.GetTLSConfig() + tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) + + if err := plugin.ServeMultiplex(&plugin.ServeOpts{ + BackendFactoryFunc: radius.Factory, + // set the TLSProviderFunc so that the plugin maintains backwards + // compatibility with Vault versions that don’t support plugin AutoMTLS + TLSProviderFunc: tlsProviderFunc, + }); err != nil { + logger := hclog.New(&hclog.LoggerOptions{}) + + logger.Error("plugin shutting down", "error", err) + os.Exit(1) + } +} diff --git a/builtin/credential/radius/path_config.go b/builtin/credential/radius/path_config.go new file mode 100644 index 0000000..6bdc296 --- /dev/null +++ b/builtin/credential/radius/path_config.go @@ -0,0 +1,294 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package radius + +import ( + "context" + "strings" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/tokenutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathConfig(b *backend) *framework.Path { + p := &framework.Path{ + Pattern: "config", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixRadius, + Action: "Configure", + }, + + Fields: map[string]*framework.FieldSchema{ + "host": { + Type: framework.TypeString, + Description: "RADIUS server host", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Host", + }, + }, + "port": { + Type: framework.TypeInt, + Default: 1812, + Description: "RADIUS server port (default: 1812)", + DisplayAttrs: &framework.DisplayAttributes{ + Value: 1812, + }, + }, + "secret": { + Type: framework.TypeString, + Description: "Secret shared with the RADIUS server", + }, + "unregistered_user_policies": { + Type: framework.TypeString, + Default: "", + Description: "Comma-separated list of policies to grant upon successful RADIUS authentication of an unregistered user (default: empty)", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Policies for unregistered users", + Description: "List of policies to grant upon successful RADIUS authentication of an unregistered user (default: empty)", + }, + }, + "dial_timeout": { + Type: framework.TypeDurationSecond, + Default: 10, + Description: "Number of seconds before connect times out (default: 10)", + DisplayAttrs: &framework.DisplayAttributes{ + Value: 10, + }, + }, + "read_timeout": { + Type: framework.TypeDurationSecond, + Default: 10, + Description: "Number of seconds before response times out (default: 10)", + DisplayAttrs: &framework.DisplayAttributes{ + Value: 10, + }, + }, + "nas_port": { + Type: framework.TypeInt, + Default: 10, + Description: "RADIUS NAS port field (default: 10)", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "NAS Port", + Value: 10, + }, + }, + "nas_identifier": { + Type: framework.TypeString, + Default: "", + Description: "RADIUS NAS Identifier field (optional)", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "NAS Identifier", + }, + }, + }, + + ExistenceCheck: b.configExistenceCheck, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathConfigRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "configuration", + }, + }, + logical.CreateOperation: &framework.PathOperation{ + Callback: b.pathConfigCreateUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigCreateUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + }, + }, + }, + + HelpSynopsis: pathConfigHelpSyn, + HelpDescription: pathConfigHelpDesc, + } + + tokenutil.AddTokenFields(p.Fields) + p.Fields["token_policies"].Description += ". This will apply to all tokens generated by this auth method, in addition to any configured for specific users." + return p +} + +// Establishes dichotomy of request operation between CreateOperation and UpdateOperation. +// Returning 'true' forces an UpdateOperation, CreateOperation otherwise. +func (b *backend) configExistenceCheck(ctx context.Context, req *logical.Request, data *framework.FieldData) (bool, error) { + entry, err := b.Config(ctx, req) + if err != nil { + return false, err + } + return entry != nil, nil +} + +/* + * Construct ConfigEntry struct using stored configuration. + */ +func (b *backend) Config(ctx context.Context, req *logical.Request) (*ConfigEntry, error) { + storedConfig, err := req.Storage.Get(ctx, "config") + if err != nil { + return nil, err + } + + if storedConfig == nil { + return nil, nil + } + + var result ConfigEntry + + if err := storedConfig.DecodeJSON(&result); err != nil { + return nil, err + } + + return &result, nil +} + +func (b *backend) pathConfigRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + cfg, err := b.Config(ctx, req) + if err != nil { + return nil, err + } + if cfg == nil { + return nil, nil + } + + data := map[string]interface{}{ + "host": cfg.Host, + "port": cfg.Port, + "unregistered_user_policies": cfg.UnregisteredUserPolicies, + "dial_timeout": cfg.DialTimeout, + "read_timeout": cfg.ReadTimeout, + "nas_port": cfg.NasPort, + "nas_identifier": cfg.NasIdentifier, + } + cfg.PopulateTokenData(data) + + return &logical.Response{ + Data: data, + }, nil +} + +func (b *backend) pathConfigCreateUpdate(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + // Build a ConfigEntry struct out of the supplied FieldData + cfg, err := b.Config(ctx, req) + if err != nil { + return nil, err + } + if cfg == nil { + cfg = &ConfigEntry{} + } + + if err := cfg.ParseTokenFields(req, d); err != nil { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } + + host, ok := d.GetOk("host") + if ok { + cfg.Host = strings.ToLower(host.(string)) + } else if req.Operation == logical.CreateOperation { + cfg.Host = strings.ToLower(d.Get("host").(string)) + } + if cfg.Host == "" { + return logical.ErrorResponse("config parameter `host` cannot be empty"), nil + } + + port, ok := d.GetOk("port") + if ok { + cfg.Port = port.(int) + } else if req.Operation == logical.CreateOperation { + cfg.Port = d.Get("port").(int) + } + + secret, ok := d.GetOk("secret") + if ok { + cfg.Secret = secret.(string) + } else if req.Operation == logical.CreateOperation { + cfg.Secret = d.Get("secret").(string) + } + if cfg.Secret == "" { + return logical.ErrorResponse("config parameter `secret` cannot be empty"), nil + } + + policies := make([]string, 0) + unregisteredUserPoliciesRaw, ok := d.GetOk("unregistered_user_policies") + if ok { + unregisteredUserPoliciesStr := unregisteredUserPoliciesRaw.(string) + if strings.TrimSpace(unregisteredUserPoliciesStr) != "" { + policies = strings.Split(unregisteredUserPoliciesStr, ",") + for _, policy := range policies { + if policy == "root" { + return logical.ErrorResponse("root policy cannot be granted by an auth method"), nil + } + } + } + cfg.UnregisteredUserPolicies = policies + } else if req.Operation == logical.CreateOperation { + cfg.UnregisteredUserPolicies = policies + } + + dialTimeout, ok := d.GetOk("dial_timeout") + if ok { + cfg.DialTimeout = dialTimeout.(int) + } else if req.Operation == logical.CreateOperation { + cfg.DialTimeout = d.Get("dial_timeout").(int) + } + + readTimeout, ok := d.GetOk("read_timeout") + if ok { + cfg.ReadTimeout = readTimeout.(int) + } else if req.Operation == logical.CreateOperation { + cfg.ReadTimeout = d.Get("read_timeout").(int) + } + + nasPort, ok := d.GetOk("nas_port") + if ok { + cfg.NasPort = nasPort.(int) + } else if req.Operation == logical.CreateOperation { + cfg.NasPort = d.Get("nas_port").(int) + } + + nasIdentifier, ok := d.GetOk("nas_identifier") + if ok { + cfg.NasIdentifier = nasIdentifier.(string) + } else if req.Operation == logical.CreateOperation { + cfg.NasIdentifier = d.Get("nas_identifier").(string) + } + + entry, err := logical.StorageEntryJSON("config", cfg) + if err != nil { + return nil, err + } + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + + return nil, nil +} + +type ConfigEntry struct { + tokenutil.TokenParams + + Host string `json:"host" structs:"host" mapstructure:"host"` + Port int `json:"port" structs:"port" mapstructure:"port"` + Secret string `json:"secret" structs:"secret" mapstructure:"secret"` + UnregisteredUserPolicies []string `json:"unregistered_user_policies" structs:"unregistered_user_policies" mapstructure:"unregistered_user_policies"` + DialTimeout int `json:"dial_timeout" structs:"dial_timeout" mapstructure:"dial_timeout"` + ReadTimeout int `json:"read_timeout" structs:"read_timeout" mapstructure:"read_timeout"` + NasPort int `json:"nas_port" structs:"nas_port" mapstructure:"nas_port"` + NasIdentifier string `json:"nas_identifier" structs:"nas_identifier" mapstructure:"nas_identifier"` +} + +const pathConfigHelpSyn = ` +Configure the RADIUS server to connect to, along with its options. +` + +const pathConfigHelpDesc = ` +This endpoint allows you to configure the RADIUS server to connect to and its +configuration options. +` diff --git a/builtin/credential/radius/path_login.go b/builtin/credential/radius/path_login.go new file mode 100644 index 0000000..6feaf1b --- /dev/null +++ b/builtin/credential/radius/path_login.go @@ -0,0 +1,233 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package radius + +import ( + "context" + "fmt" + "net" + "strconv" + "strings" + "time" + + "layeh.com/radius" + . "layeh.com/radius/rfc2865" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/cidrutil" + "github.com/hashicorp/vault/sdk/helper/policyutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathLogin(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "login" + framework.OptionalParamRegex("urlusername"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixRadius, + OperationVerb: "login", + OperationSuffix: "|with-username", + }, + + Fields: map[string]*framework.FieldSchema{ + "urlusername": { + Type: framework.TypeString, + Description: "Username to be used for login. (URL parameter)", + }, + + "username": { + Type: framework.TypeString, + Description: "Username to be used for login. (POST request body)", + }, + + "password": { + Type: framework.TypeString, + Description: "Password for this user.", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathLogin, + logical.AliasLookaheadOperation: b.pathLoginAliasLookahead, + }, + + HelpSynopsis: pathLoginSyn, + HelpDescription: pathLoginDesc, + } +} + +func (b *backend) pathLoginAliasLookahead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + username := d.Get("username").(string) + if username == "" { + return nil, fmt.Errorf("missing username") + } + + return &logical.Response{ + Auth: &logical.Auth{ + Alias: &logical.Alias{ + Name: username, + }, + }, + }, nil +} + +func (b *backend) pathLogin(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + cfg, err := b.Config(ctx, req) + if err != nil { + return nil, err + } + if cfg == nil { + return logical.ErrorResponse("radius backend not configured"), nil + } + + // Check for a CIDR match. + if len(cfg.TokenBoundCIDRs) > 0 { + if req.Connection == nil { + b.Logger().Warn("token bound CIDRs found but no connection information available for validation") + return nil, logical.ErrPermissionDenied + } + if !cidrutil.RemoteAddrIsOk(req.Connection.RemoteAddr, cfg.TokenBoundCIDRs) { + return nil, logical.ErrPermissionDenied + } + } + + username := d.Get("username").(string) + password := d.Get("password").(string) + + if username == "" { + username = d.Get("urlusername").(string) + if username == "" { + return logical.ErrorResponse("username cannot be empty"), nil + } + } + + if password == "" { + return logical.ErrorResponse("password cannot be empty"), nil + } + + policies, resp, err := b.RadiusLogin(ctx, req, username, password) + // Handle an internal error + if err != nil { + return nil, err + } + if resp != nil { + // Handle a logical error + if resp.IsError() { + return resp, nil + } + } + + auth := &logical.Auth{ + Metadata: map[string]string{ + "username": username, + "policies": strings.Join(policies, ","), + }, + InternalData: map[string]interface{}{ + "password": password, + }, + DisplayName: username, + Alias: &logical.Alias{ + Name: username, + }, + } + cfg.PopulateTokenAuth(auth) + + resp.Auth = auth + if policies != nil { + resp.Auth.Policies = append(resp.Auth.Policies, policies...) + } + + return resp, nil +} + +func (b *backend) pathLoginRenew(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + cfg, err := b.Config(ctx, req) + if err != nil { + return nil, err + } + if cfg == nil { + return logical.ErrorResponse("radius backend not configured"), nil + } + + username := req.Auth.Metadata["username"] + password := req.Auth.InternalData["password"].(string) + + var resp *logical.Response + var loginPolicies []string + + loginPolicies, resp, err = b.RadiusLogin(ctx, req, username, password) + if err != nil || (resp != nil && resp.IsError()) { + return resp, err + } + finalPolicies := cfg.TokenPolicies + if loginPolicies != nil { + finalPolicies = append(finalPolicies, loginPolicies...) + } + + if !policyutil.EquivalentPolicies(finalPolicies, req.Auth.TokenPolicies) { + return nil, fmt.Errorf("policies have changed, not renewing") + } + + req.Auth.Period = cfg.TokenPeriod + req.Auth.TTL = cfg.TokenTTL + req.Auth.MaxTTL = cfg.TokenMaxTTL + return &logical.Response{Auth: req.Auth}, nil +} + +func (b *backend) RadiusLogin(ctx context.Context, req *logical.Request, username string, password string) ([]string, *logical.Response, error) { + cfg, err := b.Config(ctx, req) + if err != nil { + return nil, nil, err + } + if cfg == nil || cfg.Host == "" || cfg.Secret == "" { + return nil, logical.ErrorResponse("radius backend not configured"), nil + } + + hostport := net.JoinHostPort(cfg.Host, strconv.Itoa(cfg.Port)) + + packet := radius.New(radius.CodeAccessRequest, []byte(cfg.Secret)) + UserName_SetString(packet, username) + UserPassword_SetString(packet, password) + if cfg.NasIdentifier != "" { + NASIdentifier_AddString(packet, cfg.NasIdentifier) + } + packet.Add(5, radius.NewInteger(uint32(cfg.NasPort))) + + client := radius.Client{ + Dialer: net.Dialer{ + Timeout: time.Duration(cfg.DialTimeout) * time.Second, + }, + } + clientCtx, cancelFunc := context.WithTimeout(ctx, time.Duration(cfg.ReadTimeout)*time.Second) + received, err := client.Exchange(clientCtx, packet, hostport) + cancelFunc() + if err != nil { + return nil, logical.ErrorResponse(err.Error()), nil + } + if received.Code != radius.CodeAccessAccept { + return nil, logical.ErrorResponse("access denied by the authentication server"), nil + } + + policies := cfg.UnregisteredUserPolicies + + // Retrieve user entry from storage + user, err := b.user(ctx, req.Storage, username) + if err != nil { + return nil, logical.ErrorResponse("could not retrieve user entry from storage"), err + } + if user != nil { + policies = user.Policies + } + + return policies, &logical.Response{}, nil +} + +const pathLoginSyn = ` +Log in with a username and password. +` + +const pathLoginDesc = ` +This endpoint authenticates using a username and password. Please be sure to +read the note on escaping from the path-help for the 'config' endpoint. +` diff --git a/builtin/credential/radius/path_users.go b/builtin/credential/radius/path_users.go new file mode 100644 index 0000000..63ac5bb --- /dev/null +++ b/builtin/credential/radius/path_users.go @@ -0,0 +1,177 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package radius + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/policyutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathUsersList(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "users/?$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixRadius, + OperationSuffix: "users", + Navigation: true, + ItemType: "User", + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: b.pathUserList, + }, + + HelpSynopsis: pathUserHelpSyn, + HelpDescription: pathUserHelpDesc, + } +} + +func pathUsers(b *backend) *framework.Path { + return &framework.Path{ + Pattern: `users/(?P.+)`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixRadius, + OperationSuffix: "user", + Action: "Create", + ItemType: "User", + }, + + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the RADIUS user.", + }, + + "policies": { + Type: framework.TypeCommaStringSlice, + Description: "Comma-separated list of policies associated to the user.", + DisplayAttrs: &framework.DisplayAttributes{ + Description: "A list of policies associated to the user.", + }, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.DeleteOperation: b.pathUserDelete, + logical.ReadOperation: b.pathUserRead, + logical.UpdateOperation: b.pathUserWrite, + logical.CreateOperation: b.pathUserWrite, + }, + + ExistenceCheck: b.userExistenceCheck, + + HelpSynopsis: pathUserHelpSyn, + HelpDescription: pathUserHelpDesc, + } +} + +func (b *backend) userExistenceCheck(ctx context.Context, req *logical.Request, data *framework.FieldData) (bool, error) { + userEntry, err := b.user(ctx, req.Storage, data.Get("name").(string)) + if err != nil { + return false, err + } + + return userEntry != nil, nil +} + +func (b *backend) user(ctx context.Context, s logical.Storage, username string) (*UserEntry, error) { + if username == "" { + return nil, fmt.Errorf("missing username") + } + + entry, err := s.Get(ctx, "user/"+strings.ToLower(username)) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result UserEntry + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + + return &result, nil +} + +func (b *backend) pathUserDelete(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + err := req.Storage.Delete(ctx, "user/"+d.Get("name").(string)) + if err != nil { + return nil, err + } + + return nil, nil +} + +func (b *backend) pathUserRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + user, err := b.user(ctx, req.Storage, d.Get("name").(string)) + if err != nil { + return nil, err + } + if user == nil { + return nil, nil + } + + return &logical.Response{ + Data: map[string]interface{}{ + "policies": user.Policies, + }, + }, nil +} + +func (b *backend) pathUserWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + policies := policyutil.ParsePolicies(d.Get("policies")) + for _, policy := range policies { + if policy == "root" { + return logical.ErrorResponse("root policy cannot be granted by an auth method"), nil + } + } + + // Store it + entry, err := logical.StorageEntryJSON("user/"+d.Get("name").(string), &UserEntry{ + Policies: policies, + }) + if err != nil { + return nil, err + } + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + + return nil, nil +} + +func (b *backend) pathUserList(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + users, err := req.Storage.List(ctx, "user/") + if err != nil { + return nil, err + } + return logical.ListResponse(users), nil +} + +type UserEntry struct { + Policies []string +} + +const pathUserHelpSyn = ` +Manage users allowed to authenticate. +` + +const pathUserHelpDesc = ` +This endpoint allows you to create, read, update, and delete configuration +for RADIUS users that are allowed to authenticate, and associate policies to +them. + +Deleting a user will not revoke auth for prior authenticated users. +To do this, do a revoke token by path on "auth/radius/login/" +for the usernames you want revoked. +` diff --git a/builtin/credential/token/cli.go b/builtin/credential/token/cli.go new file mode 100644 index 0000000..853d6ea --- /dev/null +++ b/builtin/credential/token/cli.go @@ -0,0 +1,171 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package token + +import ( + "fmt" + "io" + "os" + "strconv" + "strings" + + "github.com/hashicorp/go-secure-stdlib/password" + "github.com/hashicorp/vault/api" +) + +type CLIHandler struct { + // for tests + testStdin io.Reader + testStdout io.Writer +} + +func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, error) { + // Parse "lookup" first - we want to return an early error if the user + // supplied an invalid value here before we prompt them for a token. It would + // be annoying to type your token and then be told you supplied an invalid + // value that we could have known in advance. + lookup := true + if x, ok := m["lookup"]; ok { + parsed, err := strconv.ParseBool(x) + if err != nil { + return nil, fmt.Errorf("Failed to parse \"lookup\" as boolean: %w", err) + } + lookup = parsed + } + + // Parse the token. + token, ok := m["token"] + if !ok { + // Override the output + stdout := h.testStdout + if stdout == nil { + stdout = os.Stderr + } + + // No arguments given, read the token from user input + fmt.Fprintf(stdout, "Token (will be hidden): ") + var err error + token, err = password.Read(os.Stdin) + fmt.Fprintf(stdout, "\n") + + if err != nil { + if err == password.ErrInterrupted { + return nil, fmt.Errorf("user interrupted") + } + + return nil, fmt.Errorf("An error occurred attempting to "+ + "ask for a token. The raw error message is shown below, but usually "+ + "this is because you attempted to pipe a value into the command or "+ + "you are executing outside of a terminal (tty). If you want to pipe "+ + "the value, pass \"-\" as the argument to read from stdin. The raw "+ + "error was: %w", err) + } + } + + // Remove any whitespace, etc. + token = strings.TrimSpace(token) + + if token == "" { + return nil, fmt.Errorf( + "a token must be passed to auth, please view the help for more " + + "information") + } + + // If the user declined verification, return now. Note that we will not have + // a lot of information about the token. + if !lookup { + return &api.Secret{ + Auth: &api.SecretAuth{ + ClientToken: token, + }, + }, nil + } + + // If we got this far, we want to lookup and lookup the token and pull it's + // list of policies an metadata. + c.SetToken(token) + c.SetWrappingLookupFunc(func(string, string) string { return "" }) + + secret, err := c.Auth().Token().LookupSelf() + if err != nil { + return nil, fmt.Errorf("error looking up token: %w", err) + } + if secret == nil { + return nil, fmt.Errorf("empty response from lookup-self") + } + + // Return an auth struct that "looks" like the response from an auth method. + // lookup and lookup-self return their data in data, not auth. We try to + // mirror that data here. + id, err := secret.TokenID() + if err != nil { + return nil, fmt.Errorf("error accessing token ID: %w", err) + } + accessor, err := secret.TokenAccessor() + if err != nil { + return nil, fmt.Errorf("error accessing token accessor: %w", err) + } + // This populates secret.Auth + _, err = secret.TokenPolicies() + if err != nil { + return nil, fmt.Errorf("error accessing token policies: %w", err) + } + metadata, err := secret.TokenMetadata() + if err != nil { + return nil, fmt.Errorf("error accessing token metadata: %w", err) + } + dur, err := secret.TokenTTL() + if err != nil { + return nil, fmt.Errorf("error converting token TTL: %w", err) + } + renewable, err := secret.TokenIsRenewable() + if err != nil { + return nil, fmt.Errorf("error checking if token is renewable: %w", err) + } + return &api.Secret{ + Auth: &api.SecretAuth{ + ClientToken: id, + Accessor: accessor, + Policies: secret.Auth.Policies, + TokenPolicies: secret.Auth.TokenPolicies, + IdentityPolicies: secret.Auth.IdentityPolicies, + Metadata: metadata, + + LeaseDuration: int(dur.Seconds()), + Renewable: renewable, + }, + }, nil +} + +func (h *CLIHandler) Help() string { + help := ` +Usage: vault login TOKEN [CONFIG K=V...] + + The token auth method allows logging in directly with a token. This + can be a token from the "token-create" command or API. There are no + configuration options for this auth method. + + Authenticate using a token: + + $ vault login 96ddf4bc-d217-f3ba-f9bd-017055595017 + + Authenticate but do not lookup information about the token: + + $ vault login token=96ddf4bc-d217-f3ba-f9bd-017055595017 lookup=false + + This token usually comes from a different source such as the API or via the + built-in "vault token create" command. + +Configuration: + + token= + The token to use for authentication. This is usually provided directly + via the "vault login" command. + + lookup= + Perform a lookup of the token's metadata and policies. +` + + return strings.TrimSpace(help) +} diff --git a/builtin/credential/userpass/backend.go b/builtin/credential/userpass/backend.go new file mode 100644 index 0000000..428e8b2 --- /dev/null +++ b/builtin/credential/userpass/backend.go @@ -0,0 +1,61 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package userpass + +import ( + "context" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +const operationPrefixUserpass = "userpass" + +func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + b := Backend() + if err := b.Setup(ctx, conf); err != nil { + return nil, err + } + return b, nil +} + +func Backend() *backend { + var b backend + b.Backend = &framework.Backend{ + Help: backendHelp, + + PathsSpecial: &logical.Paths{ + Unauthenticated: []string{ + "login/*", + }, + }, + + Paths: []*framework.Path{ + pathUsers(&b), + pathUsersList(&b), + pathUserPolicies(&b), + pathUserPassword(&b), + pathLogin(&b), + }, + + AuthRenew: b.pathLoginRenew, + BackendType: logical.TypeCredential, + } + + return &b +} + +type backend struct { + *framework.Backend +} + +const backendHelp = ` +The "userpass" credential provider allows authentication using +a combination of a username and password. No additional factors +are supported. + +The username/password combination is configured using the "users/" +endpoints by a user with root access. Authentication is then done +by supplying the two fields for "login". +` diff --git a/builtin/credential/userpass/backend_test.go b/builtin/credential/userpass/backend_test.go new file mode 100644 index 0000000..3df8cfa --- /dev/null +++ b/builtin/credential/userpass/backend_test.go @@ -0,0 +1,419 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package userpass + +import ( + "context" + "crypto/tls" + "fmt" + "reflect" + "testing" + "time" + + "github.com/go-test/deep" + sockaddr "github.com/hashicorp/go-sockaddr" + logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" + "github.com/hashicorp/vault/sdk/helper/policyutil" + "github.com/hashicorp/vault/sdk/helper/tokenutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/mitchellh/mapstructure" +) + +const ( + testSysTTL = time.Hour * 10 + testSysMaxTTL = time.Hour * 20 +) + +func TestBackend_CRUD(t *testing.T) { + var resp *logical.Response + var err error + + storage := &logical.InmemStorage{} + + config := logical.TestBackendConfig() + config.StorageView = storage + + ctx := context.Background() + + b, err := Factory(ctx, config) + if err != nil { + t.Fatal(err) + } + if b == nil { + t.Fatalf("failed to create backend") + } + + localhostSockAddr, err := sockaddr.NewSockAddr("127.0.0.1") + if err != nil { + t.Fatal(err) + } + + // Use new token_ forms + resp, err = b.HandleRequest(ctx, &logical.Request{ + Path: "users/testuser", + Operation: logical.CreateOperation, + Storage: storage, + Data: map[string]interface{}{ + "password": "testpassword", + "token_ttl": 5, + "token_max_ttl": 10, + "token_policies": []string{"foo"}, + "token_bound_cidrs": []string{"127.0.0.1"}, + }, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v\n", resp, err) + } + + resp, err = b.HandleRequest(ctx, &logical.Request{ + Path: "users/testuser", + Operation: logical.ReadOperation, + Storage: storage, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v\n", resp, err) + } + if resp.Data["token_ttl"].(int64) != 5 && resp.Data["token_max_ttl"].(int64) != 10 { + t.Fatalf("bad: token_ttl and token_max_ttl are not set correctly") + } + if diff := deep.Equal(resp.Data["token_policies"], []string{"foo"}); diff != nil { + t.Fatal(diff) + } + if diff := deep.Equal(resp.Data["token_bound_cidrs"], []*sockaddr.SockAddrMarshaler{{localhostSockAddr}}); diff != nil { + t.Fatal(diff) + } + + localhostSockAddr, err = sockaddr.NewSockAddr("127.0.1.1") + if err != nil { + t.Fatal(err) + } + + // Use the old forms and verify that they zero out the new ones and then + // the new ones read with the expected value + resp, err = b.HandleRequest(ctx, &logical.Request{ + Path: "users/testuser", + Operation: logical.UpdateOperation, + Storage: storage, + Data: map[string]interface{}{ + "ttl": "5m", + "max_ttl": "10m", + "policies": []string{"bar"}, + "bound_cidrs": []string{"127.0.1.1"}, + }, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v\n", resp, err) + } + + resp, err = b.HandleRequest(ctx, &logical.Request{ + Path: "users/testuser", + Operation: logical.ReadOperation, + Storage: storage, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr: %v\n", resp, err) + } + if resp.Data["ttl"].(int64) != 300 && resp.Data["max_ttl"].(int64) != 600 { + t.Fatalf("bad: ttl and max_ttl are not set correctly") + } + if resp.Data["token_ttl"].(int64) != 300 && resp.Data["token_max_ttl"].(int64) != 600 { + t.Fatalf("bad: token_ttl and token_max_ttl are not set correctly") + } + if diff := deep.Equal(resp.Data["policies"], []string{"bar"}); diff != nil { + t.Fatal(diff) + } + if diff := deep.Equal(resp.Data["token_policies"], []string{"bar"}); diff != nil { + t.Fatal(diff) + } + if diff := deep.Equal(resp.Data["bound_cidrs"], []*sockaddr.SockAddrMarshaler{{localhostSockAddr}}); diff != nil { + t.Fatal(diff) + } + if diff := deep.Equal(resp.Data["token_bound_cidrs"], []*sockaddr.SockAddrMarshaler{{localhostSockAddr}}); diff != nil { + t.Fatal(diff) + } +} + +func TestBackend_basic(t *testing.T) { + b, err := Factory(context.Background(), &logical.BackendConfig{ + Logger: nil, + System: &logical.StaticSystemView{ + DefaultLeaseTTLVal: testSysTTL, + MaxLeaseTTLVal: testSysMaxTTL, + }, + }) + if err != nil { + t.Fatalf("Unable to create backend: %s", err) + } + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepUser(t, "web", "password", "foo"), + testAccStepUser(t, "web2", "password", "foo"), + testAccStepUser(t, "web3", "password", "foo"), + testAccStepList(t, []string{"web", "web2", "web3"}), + testAccStepLogin(t, "web", "password", []string{"default", "foo"}), + }, + }) +} + +func TestBackend_userCrud(t *testing.T) { + b, err := Factory(context.Background(), &logical.BackendConfig{ + Logger: nil, + System: &logical.StaticSystemView{ + DefaultLeaseTTLVal: testSysTTL, + MaxLeaseTTLVal: testSysMaxTTL, + }, + }) + if err != nil { + t.Fatalf("Unable to create backend: %s", err) + } + + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepUser(t, "web", "password", "foo"), + testAccStepReadUser(t, "web", "foo"), + testAccStepDeleteUser(t, "web"), + testAccStepReadUser(t, "web", ""), + }, + }) +} + +func TestBackend_userCreateOperation(t *testing.T) { + b, err := Factory(context.Background(), &logical.BackendConfig{ + Logger: nil, + System: &logical.StaticSystemView{ + DefaultLeaseTTLVal: testSysTTL, + MaxLeaseTTLVal: testSysMaxTTL, + }, + }) + if err != nil { + t.Fatalf("Unable to create backend: %s", err) + } + + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: b, + Steps: []logicaltest.TestStep{ + testUserCreateOperation(t, "web", "password", "foo"), + testAccStepLogin(t, "web", "password", []string{"default", "foo"}), + }, + }) +} + +func TestBackend_passwordUpdate(t *testing.T) { + b, err := Factory(context.Background(), &logical.BackendConfig{ + Logger: nil, + System: &logical.StaticSystemView{ + DefaultLeaseTTLVal: testSysTTL, + MaxLeaseTTLVal: testSysMaxTTL, + }, + }) + if err != nil { + t.Fatalf("Unable to create backend: %s", err) + } + + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepUser(t, "web", "password", "foo"), + testAccStepReadUser(t, "web", "foo"), + testAccStepLogin(t, "web", "password", []string{"default", "foo"}), + testUpdatePassword(t, "web", "newpassword"), + testAccStepLogin(t, "web", "newpassword", []string{"default", "foo"}), + }, + }) +} + +func TestBackend_policiesUpdate(t *testing.T) { + b, err := Factory(context.Background(), &logical.BackendConfig{ + Logger: nil, + System: &logical.StaticSystemView{ + DefaultLeaseTTLVal: testSysTTL, + MaxLeaseTTLVal: testSysMaxTTL, + }, + }) + if err != nil { + t.Fatalf("Unable to create backend: %s", err) + } + + logicaltest.Test(t, logicaltest.TestCase{ + CredentialBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepUser(t, "web", "password", "foo"), + testAccStepReadUser(t, "web", "foo"), + testAccStepLogin(t, "web", "password", []string{"default", "foo"}), + testUpdatePolicies(t, "web", "foo,bar"), + testAccStepReadUser(t, "web", "bar,foo"), + testAccStepLogin(t, "web", "password", []string{"bar", "default", "foo"}), + }, + }) +} + +func testUpdatePassword(t *testing.T, user, password string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "users/" + user + "/password", + Data: map[string]interface{}{ + "password": password, + }, + } +} + +func testUpdatePolicies(t *testing.T, user, policies string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "users/" + user + "/policies", + Data: map[string]interface{}{ + "policies": policies, + }, + } +} + +func testAccStepList(t *testing.T, users []string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ListOperation, + Path: "users", + Check: func(resp *logical.Response) error { + if resp.IsError() { + return fmt.Errorf("got error response: %#v", *resp) + } + + exp := []string{"web", "web2", "web3"} + if !reflect.DeepEqual(exp, resp.Data["keys"].([]string)) { + return fmt.Errorf("expected:\n%#v\ngot:\n%#v\n", exp, resp.Data["keys"]) + } + return nil + }, + } +} + +func testAccStepLogin(t *testing.T, user string, pass string, policies []string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "login/" + user, + Data: map[string]interface{}{ + "password": pass, + }, + Unauthenticated: true, + + Check: logicaltest.TestCheckAuth(policies), + ConnState: &tls.ConnectionState{}, + } +} + +func testUserCreateOperation( + t *testing.T, name string, password string, policies string, +) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.CreateOperation, + Path: "users/" + name, + Data: map[string]interface{}{ + "password": password, + "policies": policies, + }, + } +} + +func testAccStepUser( + t *testing.T, name string, password string, policies string, +) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "users/" + name, + Data: map[string]interface{}{ + "password": password, + "policies": policies, + }, + } +} + +func testAccStepDeleteUser(t *testing.T, n string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.DeleteOperation, + Path: "users/" + n, + } +} + +func testAccStepReadUser(t *testing.T, name string, policies string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: "users/" + name, + Check: func(resp *logical.Response) error { + if resp == nil { + if policies == "" { + return nil + } + + return fmt.Errorf("bad: %#v", resp) + } + + var d struct { + Policies []string `mapstructure:"policies"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + + if !reflect.DeepEqual(d.Policies, policyutil.ParsePolicies(policies)) { + return fmt.Errorf("bad: %#v", resp) + } + + return nil + }, + } +} + +func TestBackend_UserUpgrade(t *testing.T) { + s := &logical.InmemStorage{} + + config := logical.TestBackendConfig() + config.StorageView = s + + ctx := context.Background() + + b := Backend() + if b == nil { + t.Fatalf("failed to create backend") + } + if err := b.Setup(ctx, config); err != nil { + t.Fatal(err) + } + + foo := &UserEntry{ + Policies: []string{"foo"}, + TTL: time.Second, + MaxTTL: time.Second, + BoundCIDRs: []*sockaddr.SockAddrMarshaler{{SockAddr: sockaddr.MustIPAddr("127.0.0.1")}}, + } + + entry, err := logical.StorageEntryJSON("user/foo", foo) + if err != nil { + t.Fatal(err) + } + err = s.Put(ctx, entry) + if err != nil { + t.Fatal(err) + } + + userEntry, err := b.user(ctx, s, "foo") + if err != nil { + t.Fatal(err) + } + + exp := &UserEntry{ + Policies: []string{"foo"}, + TTL: time.Second, + MaxTTL: time.Second, + BoundCIDRs: []*sockaddr.SockAddrMarshaler{{SockAddr: sockaddr.MustIPAddr("127.0.0.1")}}, + TokenParams: tokenutil.TokenParams{ + TokenPolicies: []string{"foo"}, + TokenTTL: time.Second, + TokenMaxTTL: time.Second, + TokenBoundCIDRs: []*sockaddr.SockAddrMarshaler{{SockAddr: sockaddr.MustIPAddr("127.0.0.1")}}, + }, + } + if diff := deep.Equal(userEntry, exp); diff != nil { + t.Fatal(diff) + } +} diff --git a/builtin/credential/userpass/cli.go b/builtin/credential/userpass/cli.go new file mode 100644 index 0000000..e100ae9 --- /dev/null +++ b/builtin/credential/userpass/cli.go @@ -0,0 +1,89 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package userpass + +import ( + "fmt" + "os" + "strings" + + pwd "github.com/hashicorp/go-secure-stdlib/password" + "github.com/hashicorp/vault/api" + "github.com/mitchellh/mapstructure" +) + +type CLIHandler struct { + DefaultMount string +} + +func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, error) { + var data struct { + Username string `mapstructure:"username"` + Password string `mapstructure:"password"` + Mount string `mapstructure:"mount"` + } + if err := mapstructure.WeakDecode(m, &data); err != nil { + return nil, err + } + + if data.Username == "" { + return nil, fmt.Errorf("'username' must be specified") + } + if data.Password == "" { + fmt.Fprintf(os.Stderr, "Password (will be hidden): ") + password, err := pwd.Read(os.Stdin) + fmt.Fprintf(os.Stderr, "\n") + if err != nil { + return nil, err + } + data.Password = password + } + if data.Mount == "" { + data.Mount = h.DefaultMount + } + + options := map[string]interface{}{ + "password": data.Password, + } + + path := fmt.Sprintf("auth/%s/login/%s", data.Mount, data.Username) + secret, err := c.Logical().Write(path, options) + if err != nil { + return nil, err + } + if secret == nil { + return nil, fmt.Errorf("empty response from credential provider") + } + + return secret, nil +} + +func (h *CLIHandler) Help() string { + help := ` +Usage: vault login -method=userpass [CONFIG K=V...] + + The userpass auth method allows users to authenticate using Vault's + internal user database. + + Authenticate as "sally": + + $ vault login -method=userpass username=sally + Password (will be hidden): + + Authenticate as "bob": + + $ vault login -method=userpass username=bob password=password + +Configuration: + + password= + Password to use for authentication. If not provided, the CLI will prompt + for this on stdin. + + username= + Username to use for authentication. +` + + return strings.TrimSpace(help) +} diff --git a/builtin/credential/userpass/cmd/userpass/main.go b/builtin/credential/userpass/cmd/userpass/main.go new file mode 100644 index 0000000..4747a56 --- /dev/null +++ b/builtin/credential/userpass/cmd/userpass/main.go @@ -0,0 +1,33 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "os" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/credential/userpass" + "github.com/hashicorp/vault/sdk/plugin" +) + +func main() { + apiClientMeta := &api.PluginAPIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(os.Args[1:]) + tlsConfig := apiClientMeta.GetTLSConfig() + tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) + + if err := plugin.ServeMultiplex(&plugin.ServeOpts{ + BackendFactoryFunc: userpass.Factory, + // set the TLSProviderFunc so that the plugin maintains backwards + // compatibility with Vault versions that don’t support plugin AutoMTLS + TLSProviderFunc: tlsProviderFunc, + }); err != nil { + logger := hclog.New(&hclog.LoggerOptions{}) + + logger.Error("plugin shutting down", "error", err) + os.Exit(1) + } +} diff --git a/builtin/credential/userpass/path_login.go b/builtin/credential/userpass/path_login.go new file mode 100644 index 0000000..0565864 --- /dev/null +++ b/builtin/credential/userpass/path_login.go @@ -0,0 +1,182 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package userpass + +import ( + "context" + "crypto/subtle" + "fmt" + "strings" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/cidrutil" + "github.com/hashicorp/vault/sdk/helper/policyutil" + "github.com/hashicorp/vault/sdk/logical" + "golang.org/x/crypto/bcrypt" +) + +func pathLogin(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "login/" + framework.GenericNameRegex("username"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixUserpass, + OperationVerb: "login", + }, + + Fields: map[string]*framework.FieldSchema{ + "username": { + Type: framework.TypeString, + Description: "Username of the user.", + }, + + "password": { + Type: framework.TypeString, + Description: "Password for this user.", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathLogin, + logical.AliasLookaheadOperation: b.pathLoginAliasLookahead, + }, + + HelpSynopsis: pathLoginSyn, + HelpDescription: pathLoginDesc, + } +} + +func (b *backend) pathLoginAliasLookahead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + username := d.Get("username").(string) + if username == "" { + return nil, fmt.Errorf("missing username") + } + + return &logical.Response{ + Auth: &logical.Auth{ + Alias: &logical.Alias{ + Name: username, + }, + }, + }, nil +} + +func (b *backend) pathLogin(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + username := strings.ToLower(d.Get("username").(string)) + + password := d.Get("password").(string) + if password == "" { + return nil, fmt.Errorf("missing password") + } + + // Get the user and validate auth + user, userError := b.user(ctx, req.Storage, username) + + var userPassword []byte + var legacyPassword bool + // If there was an error or it's nil, we fake a password for the bcrypt + // check so as not to have a timing leak. Specifics of the underlying + // storage still leaks a bit but generally much more in the noise compared + // to bcrypt. + if user != nil && userError == nil { + if user.PasswordHash == nil { + userPassword = []byte(user.Password) + legacyPassword = true + } else { + userPassword = user.PasswordHash + } + } else { + // This is still acceptable as bcrypt will still make sure it takes + // a long time, it's just nicer to be random if possible + userPassword = []byte("dummy") + } + + // Check for a password match. Check for a hash collision for Vault 0.2+, + // but handle the older legacy passwords with a constant time comparison. + passwordBytes := []byte(password) + switch { + case !legacyPassword: + if err := bcrypt.CompareHashAndPassword(userPassword, passwordBytes); err != nil { + // The failed login info of existing users alone are tracked as only + // existing user's failed login information is stored in storage for optimization + if user == nil || userError != nil { + return logical.ErrorResponse("invalid username or password"), nil + } + return logical.ErrorResponse("invalid username or password"), logical.ErrInvalidCredentials + } + default: + if subtle.ConstantTimeCompare(userPassword, passwordBytes) != 1 { + // The failed login info of existing users alone are tracked as only + // existing user's failed login information is stored in storage for optimization + if user == nil || userError != nil { + return logical.ErrorResponse("invalid username or password"), nil + } + return logical.ErrorResponse("invalid username or password"), logical.ErrInvalidCredentials + } + + } + + if userError != nil { + return nil, userError + } + if user == nil { + return logical.ErrorResponse("invalid username or password"), nil + } + + // Check for a CIDR match. + if len(user.TokenBoundCIDRs) > 0 { + if req.Connection == nil { + b.Logger().Warn("token bound CIDRs found but no connection information available for validation") + return nil, logical.ErrPermissionDenied + } + if !cidrutil.RemoteAddrIsOk(req.Connection.RemoteAddr, user.TokenBoundCIDRs) { + return nil, logical.ErrPermissionDenied + } + } + + auth := &logical.Auth{ + Metadata: map[string]string{ + "username": username, + }, + DisplayName: username, + Alias: &logical.Alias{ + Name: username, + }, + } + user.PopulateTokenAuth(auth) + + return &logical.Response{ + Auth: auth, + }, nil +} + +func (b *backend) pathLoginRenew(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + // Get the user + user, err := b.user(ctx, req.Storage, req.Auth.Metadata["username"]) + if err != nil { + return nil, err + } + if user == nil { + // User no longer exists, do not renew + return nil, nil + } + + if !policyutil.EquivalentPolicies(user.TokenPolicies, req.Auth.TokenPolicies) { + return nil, fmt.Errorf("policies have changed, not renewing") + } + + resp := &logical.Response{Auth: req.Auth} + resp.Auth.Period = user.TokenPeriod + resp.Auth.TTL = user.TokenTTL + resp.Auth.MaxTTL = user.TokenMaxTTL + return resp, nil +} + +const pathLoginSyn = ` +Log in with a username and password. +` + +const pathLoginDesc = ` +This endpoint authenticates using a username and password. +` diff --git a/builtin/credential/userpass/path_user_password.go b/builtin/credential/userpass/path_user_password.go new file mode 100644 index 0000000..63b52ca --- /dev/null +++ b/builtin/credential/userpass/path_user_password.go @@ -0,0 +1,89 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package userpass + +import ( + "context" + "fmt" + + "golang.org/x/crypto/bcrypt" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathUserPassword(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "users/" + framework.GenericNameRegex("username") + "/password$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixUserpass, + OperationVerb: "reset", + OperationSuffix: "password", + }, + + Fields: map[string]*framework.FieldSchema{ + "username": { + Type: framework.TypeString, + Description: "Username for this user.", + }, + + "password": { + Type: framework.TypeString, + Description: "Password for this user.", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathUserPasswordUpdate, + }, + + HelpSynopsis: pathUserPasswordHelpSyn, + HelpDescription: pathUserPasswordHelpDesc, + } +} + +func (b *backend) pathUserPasswordUpdate(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + username := d.Get("username").(string) + + userEntry, err := b.user(ctx, req.Storage, username) + if err != nil { + return nil, err + } + if userEntry == nil { + return nil, fmt.Errorf("username does not exist") + } + + userErr, intErr := b.updateUserPassword(req, d, userEntry) + if intErr != nil { + return nil, err + } + if userErr != nil { + return logical.ErrorResponse(userErr.Error()), logical.ErrInvalidRequest + } + + return nil, b.setUser(ctx, req.Storage, username, userEntry) +} + +func (b *backend) updateUserPassword(req *logical.Request, d *framework.FieldData, userEntry *UserEntry) (error, error) { + password := d.Get("password").(string) + if password == "" { + return fmt.Errorf("missing password"), nil + } + // Generate a hash of the password + hash, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost) + if err != nil { + return nil, err + } + userEntry.PasswordHash = hash + return nil, nil +} + +const pathUserPasswordHelpSyn = ` +Reset user's password. +` + +const pathUserPasswordHelpDesc = ` +This endpoint allows resetting the user's password. +` diff --git a/builtin/credential/userpass/path_user_policies.go b/builtin/credential/userpass/path_user_policies.go new file mode 100644 index 0000000..8c5a917 --- /dev/null +++ b/builtin/credential/userpass/path_user_policies.go @@ -0,0 +1,91 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package userpass + +import ( + "context" + "fmt" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/policyutil" + "github.com/hashicorp/vault/sdk/helper/tokenutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathUserPolicies(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "users/" + framework.GenericNameRegex("username") + "/policies$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixUserpass, + OperationVerb: "update", + OperationSuffix: "policies", + }, + + Fields: map[string]*framework.FieldSchema{ + "username": { + Type: framework.TypeString, + Description: "Username for this user.", + }, + "policies": { + Type: framework.TypeCommaStringSlice, + Description: tokenutil.DeprecationText("token_policies"), + Deprecated: true, + }, + "token_policies": { + Type: framework.TypeCommaStringSlice, + Description: "Comma-separated list of policies", + DisplayAttrs: &framework.DisplayAttributes{ + Description: "A list of policies that will apply to the generated token for this user.", + }, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathUserPoliciesUpdate, + }, + + HelpSynopsis: pathUserPoliciesHelpSyn, + HelpDescription: pathUserPoliciesHelpDesc, + } +} + +func (b *backend) pathUserPoliciesUpdate(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + username := d.Get("username").(string) + + userEntry, err := b.user(ctx, req.Storage, username) + if err != nil { + return nil, err + } + if userEntry == nil { + return nil, fmt.Errorf("username does not exist") + } + + policiesRaw, ok := d.GetOk("token_policies") + if !ok { + policiesRaw, ok = d.GetOk("policies") + if ok { + userEntry.Policies = policyutil.ParsePolicies(policiesRaw) + userEntry.TokenPolicies = userEntry.Policies + } + } else { + userEntry.TokenPolicies = policyutil.ParsePolicies(policiesRaw) + _, ok = d.GetOk("policies") + if ok { + userEntry.Policies = userEntry.TokenPolicies + } else { + userEntry.Policies = nil + } + } + + return nil, b.setUser(ctx, req.Storage, username, userEntry) +} + +const pathUserPoliciesHelpSyn = ` +Update the policies associated with the username. +` + +const pathUserPoliciesHelpDesc = ` +This endpoint allows updating the policies associated with the username. +` diff --git a/builtin/credential/userpass/path_users.go b/builtin/credential/userpass/path_users.go new file mode 100644 index 0000000..221fc2c --- /dev/null +++ b/builtin/credential/userpass/path_users.go @@ -0,0 +1,294 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package userpass + +import ( + "context" + "fmt" + "strings" + "time" + + sockaddr "github.com/hashicorp/go-sockaddr" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/tokenutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathUsersList(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "users/?", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixUserpass, + OperationSuffix: "users", + Navigation: true, + ItemType: "User", + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: b.pathUserList, + }, + + HelpSynopsis: pathUserHelpSyn, + HelpDescription: pathUserHelpDesc, + } +} + +func pathUsers(b *backend) *framework.Path { + p := &framework.Path{ + Pattern: "users/" + framework.GenericNameRegex("username"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixUserpass, + OperationSuffix: "user", + Action: "Create", + ItemType: "User", + }, + + Fields: map[string]*framework.FieldSchema{ + "username": { + Type: framework.TypeString, + Description: "Username for this user.", + }, + + "password": { + Type: framework.TypeString, + Description: "Password for this user.", + DisplayAttrs: &framework.DisplayAttributes{ + Sensitive: true, + }, + }, + + "policies": { + Type: framework.TypeCommaStringSlice, + Description: tokenutil.DeprecationText("token_policies"), + Deprecated: true, + }, + + "ttl": { + Type: framework.TypeDurationSecond, + Description: tokenutil.DeprecationText("token_ttl"), + Deprecated: true, + }, + + "max_ttl": { + Type: framework.TypeDurationSecond, + Description: tokenutil.DeprecationText("token_max_ttl"), + Deprecated: true, + }, + + "bound_cidrs": { + Type: framework.TypeCommaStringSlice, + Description: tokenutil.DeprecationText("token_bound_cidrs"), + Deprecated: true, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.DeleteOperation: b.pathUserDelete, + logical.ReadOperation: b.pathUserRead, + logical.UpdateOperation: b.pathUserWrite, + logical.CreateOperation: b.pathUserWrite, + }, + + ExistenceCheck: b.userExistenceCheck, + + HelpSynopsis: pathUserHelpSyn, + HelpDescription: pathUserHelpDesc, + } + + tokenutil.AddTokenFields(p.Fields) + return p +} + +func (b *backend) userExistenceCheck(ctx context.Context, req *logical.Request, d *framework.FieldData) (bool, error) { + userEntry, err := b.user(ctx, req.Storage, d.Get("username").(string)) + if err != nil { + return false, err + } + + return userEntry != nil, nil +} + +func (b *backend) user(ctx context.Context, s logical.Storage, username string) (*UserEntry, error) { + if username == "" { + return nil, fmt.Errorf("missing username") + } + + entry, err := s.Get(ctx, "user/"+strings.ToLower(username)) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result UserEntry + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + + if result.TokenTTL == 0 && result.TTL > 0 { + result.TokenTTL = result.TTL + } + if result.TokenMaxTTL == 0 && result.MaxTTL > 0 { + result.TokenMaxTTL = result.MaxTTL + } + if len(result.TokenPolicies) == 0 && len(result.Policies) > 0 { + result.TokenPolicies = result.Policies + } + if len(result.TokenBoundCIDRs) == 0 && len(result.BoundCIDRs) > 0 { + result.TokenBoundCIDRs = result.BoundCIDRs + } + + return &result, nil +} + +func (b *backend) setUser(ctx context.Context, s logical.Storage, username string, userEntry *UserEntry) error { + entry, err := logical.StorageEntryJSON("user/"+username, userEntry) + if err != nil { + return err + } + + return s.Put(ctx, entry) +} + +func (b *backend) pathUserList(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + users, err := req.Storage.List(ctx, "user/") + if err != nil { + return nil, err + } + return logical.ListResponse(users), nil +} + +func (b *backend) pathUserDelete(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + err := req.Storage.Delete(ctx, "user/"+strings.ToLower(d.Get("username").(string))) + if err != nil { + return nil, err + } + + return nil, nil +} + +func (b *backend) pathUserRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + user, err := b.user(ctx, req.Storage, strings.ToLower(d.Get("username").(string))) + if err != nil { + return nil, err + } + if user == nil { + return nil, nil + } + + data := map[string]interface{}{} + user.PopulateTokenData(data) + + // Add backwards compat data + if user.TTL > 0 { + data["ttl"] = int64(user.TTL.Seconds()) + } + if user.MaxTTL > 0 { + data["max_ttl"] = int64(user.MaxTTL.Seconds()) + } + if len(user.Policies) > 0 { + data["policies"] = data["token_policies"] + } + if len(user.BoundCIDRs) > 0 { + data["bound_cidrs"] = user.BoundCIDRs + } + + return &logical.Response{ + Data: data, + }, nil +} + +func (b *backend) userCreateUpdate(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + username := strings.ToLower(d.Get("username").(string)) + userEntry, err := b.user(ctx, req.Storage, username) + if err != nil { + return nil, err + } + // Due to existence check, user will only be nil if it's a create operation + if userEntry == nil { + userEntry = &UserEntry{} + } + + if err := userEntry.ParseTokenFields(req, d); err != nil { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } + + if _, ok := d.GetOk("password"); ok { + userErr, intErr := b.updateUserPassword(req, d, userEntry) + if intErr != nil { + return nil, intErr + } + if userErr != nil { + return logical.ErrorResponse(userErr.Error()), logical.ErrInvalidRequest + } + } + + // handle upgrade cases + { + if err := tokenutil.UpgradeValue(d, "policies", "token_policies", &userEntry.Policies, &userEntry.TokenPolicies); err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + if err := tokenutil.UpgradeValue(d, "ttl", "token_ttl", &userEntry.TTL, &userEntry.TokenTTL); err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + if err := tokenutil.UpgradeValue(d, "max_ttl", "token_max_ttl", &userEntry.MaxTTL, &userEntry.TokenMaxTTL); err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + if err := tokenutil.UpgradeValue(d, "bound_cidrs", "token_bound_cidrs", &userEntry.BoundCIDRs, &userEntry.TokenBoundCIDRs); err != nil { + return logical.ErrorResponse(err.Error()), nil + } + } + + return nil, b.setUser(ctx, req.Storage, username, userEntry) +} + +func (b *backend) pathUserWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + password := d.Get("password").(string) + if req.Operation == logical.CreateOperation && password == "" { + return logical.ErrorResponse("missing password"), logical.ErrInvalidRequest + } + return b.userCreateUpdate(ctx, req, d) +} + +type UserEntry struct { + tokenutil.TokenParams + + // Password is deprecated in Vault 0.2 in favor of + // PasswordHash, but is retained for backwards compatibility. + Password string + + // PasswordHash is a bcrypt hash of the password. This is + // used instead of the actual password in Vault 0.2+. + PasswordHash []byte + + Policies []string + + // Duration after which the user will be revoked unless renewed + TTL time.Duration + + // Maximum duration for which user can be valid + MaxTTL time.Duration + + BoundCIDRs []*sockaddr.SockAddrMarshaler +} + +const pathUserHelpSyn = ` +Manage users allowed to authenticate. +` + +const pathUserHelpDesc = ` +This endpoint allows you to create, read, update, and delete users +that are allowed to authenticate. + +Deleting a user will not revoke auth for prior authenticated users +with that name. To do this, do a revoke on "login/" for +the username you want revoked. If you don't need to revoke login immediately, +then the next renew will cause the lease to expire. +` diff --git a/builtin/credential/userpass/stepwise_test.go b/builtin/credential/userpass/stepwise_test.go new file mode 100644 index 0000000..ab797ed --- /dev/null +++ b/builtin/credential/userpass/stepwise_test.go @@ -0,0 +1,85 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package userpass + +import ( + "fmt" + "reflect" + "testing" + + stepwise "github.com/hashicorp/vault-testing-stepwise" + dockerEnvironment "github.com/hashicorp/vault-testing-stepwise/environments/docker" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/helper/policyutil" + "github.com/mitchellh/mapstructure" +) + +func TestAccBackend_stepwise_UserCrud(t *testing.T) { + customPluginName := "my-userpass" + envOptions := &stepwise.MountOptions{ + RegistryName: customPluginName, + PluginType: api.PluginTypeCredential, + PluginName: "userpass", + MountPathPrefix: customPluginName, + } + stepwise.Run(t, stepwise.Case{ + Environment: dockerEnvironment.NewEnvironment(customPluginName, envOptions), + Steps: []stepwise.Step{ + testAccStepwiseUser(t, "web", "password", "foo"), + testAccStepwiseReadUser(t, "web", "foo"), + testAccStepwiseDeleteUser(t, "web"), + testAccStepwiseReadUser(t, "web", ""), + }, + }) +} + +func testAccStepwiseUser( + t *testing.T, name string, password string, policies string, +) stepwise.Step { + return stepwise.Step{ + Operation: stepwise.UpdateOperation, + Path: "users/" + name, + Data: map[string]interface{}{ + "password": password, + "policies": policies, + }, + } +} + +func testAccStepwiseDeleteUser(t *testing.T, name string) stepwise.Step { + return stepwise.Step{ + Operation: stepwise.DeleteOperation, + Path: "users/" + name, + } +} + +func testAccStepwiseReadUser(t *testing.T, name string, policies string) stepwise.Step { + return stepwise.Step{ + Operation: stepwise.ReadOperation, + Path: "users/" + name, + Assert: func(resp *api.Secret, err error) error { + if resp == nil { + if policies == "" { + return nil + } + + return fmt.Errorf("unexpected nil response") + } + + var d struct { + Policies []string `mapstructure:"policies"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + + expectedPolicies := policyutil.ParsePolicies(policies) + if !reflect.DeepEqual(d.Policies, expectedPolicies) { + return fmt.Errorf("Actual policies: %#v\nExpected policies: %#v", d.Policies, expectedPolicies) + } + + return nil + }, + } +} diff --git a/builtin/logical/aws/backend.go b/builtin/logical/aws/backend.go new file mode 100644 index 0000000..d93c394 --- /dev/null +++ b/builtin/logical/aws/backend.go @@ -0,0 +1,183 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package aws + +import ( + "context" + "strings" + "sync" + "time" + + "github.com/aws/aws-sdk-go/service/iam/iamiface" + "github.com/aws/aws-sdk-go/service/sts/stsiface" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/queue" +) + +const ( + rootConfigPath = "config/root" + minAwsUserRollbackAge = 5 * time.Minute + operationPrefixAWS = "aws" + operationPrefixAWSASD = "aws-config" +) + +func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + b := Backend(conf) + if err := b.Setup(ctx, conf); err != nil { + return nil, err + } + return b, nil +} + +func Backend(conf *logical.BackendConfig) *backend { + var b backend + b.credRotationQueue = queue.New() + b.Backend = &framework.Backend{ + Help: strings.TrimSpace(backendHelp), + + PathsSpecial: &logical.Paths{ + LocalStorage: []string{ + framework.WALPrefix, + }, + SealWrapStorage: []string{ + rootConfigPath, + pathStaticCreds + "/", + }, + }, + + Paths: []*framework.Path{ + pathConfigRoot(&b), + pathConfigRotateRoot(&b), + pathConfigLease(&b), + pathRoles(&b), + pathListRoles(&b), + pathStaticRoles(&b), + pathStaticCredentials(&b), + pathUser(&b), + }, + + Secrets: []*framework.Secret{ + secretAccessKeys(&b), + }, + + Invalidate: b.invalidate, + WALRollback: b.walRollback, + WALRollbackMinAge: minAwsUserRollbackAge, + PeriodicFunc: func(ctx context.Context, req *logical.Request) error { + repState := conf.System.ReplicationState() + if (conf.System.LocalMount() || + !repState.HasState(consts.ReplicationPerformanceSecondary)) && + !repState.HasState(consts.ReplicationDRSecondary) && + !repState.HasState(consts.ReplicationPerformanceStandby) { + return b.rotateExpiredStaticCreds(ctx, req) + } + return nil + }, + BackendType: logical.TypeLogical, + } + + return &b +} + +type backend struct { + *framework.Backend + + // Mutex to protect access to reading and writing policies + roleMutex sync.RWMutex + + // Mutex to protect access to iam/sts clients and client configs + clientMutex sync.RWMutex + + // iamClient and stsClient hold configured iam and sts clients for reuse, and + // to enable mocking with AWS iface for tests + iamClient iamiface.IAMAPI + stsClient stsiface.STSAPI + + // the age of a static role's credential is tracked by a priority queue and handled + // by the PeriodicFunc + credRotationQueue *queue.PriorityQueue +} + +const backendHelp = ` +The AWS backend dynamically generates AWS access keys for a set of +IAM policies. The AWS access keys have a configurable lease set and +are automatically revoked at the end of the lease. + +After mounting this backend, credentials to generate IAM keys must +be configured with the "root" path and policies must be written using +the "roles/" endpoints before any access keys can be generated. +` + +func (b *backend) invalidate(ctx context.Context, key string) { + switch { + case key == rootConfigPath: + b.clearClients() + } +} + +// clearClients clears the backend's IAM and STS clients +func (b *backend) clearClients() { + b.clientMutex.Lock() + defer b.clientMutex.Unlock() + b.iamClient = nil + b.stsClient = nil +} + +// clientIAM returns the configured IAM client. If nil, it constructs a new one +// and returns it, setting it the internal variable +func (b *backend) clientIAM(ctx context.Context, s logical.Storage) (iamiface.IAMAPI, error) { + b.clientMutex.RLock() + if b.iamClient != nil { + b.clientMutex.RUnlock() + return b.iamClient, nil + } + + // Upgrade the lock for writing + b.clientMutex.RUnlock() + b.clientMutex.Lock() + defer b.clientMutex.Unlock() + + // check client again, in the event that a client was being created while we + // waited for Lock() + if b.iamClient != nil { + return b.iamClient, nil + } + + iamClient, err := nonCachedClientIAM(ctx, s, b.Logger()) + if err != nil { + return nil, err + } + b.iamClient = iamClient + + return b.iamClient, nil +} + +func (b *backend) clientSTS(ctx context.Context, s logical.Storage) (stsiface.STSAPI, error) { + b.clientMutex.RLock() + if b.stsClient != nil { + b.clientMutex.RUnlock() + return b.stsClient, nil + } + + // Upgrade the lock for writing + b.clientMutex.RUnlock() + b.clientMutex.Lock() + defer b.clientMutex.Unlock() + + // check client again, in the event that a client was being created while we + // waited for Lock() + if b.stsClient != nil { + return b.stsClient, nil + } + + stsClient, err := nonCachedClientSTS(ctx, s, b.Logger()) + if err != nil { + return nil, err + } + b.stsClient = stsClient + + return b.stsClient, nil +} diff --git a/builtin/logical/aws/backend_test.go b/builtin/logical/aws/backend_test.go new file mode 100644 index 0000000..260bcc6 --- /dev/null +++ b/builtin/logical/aws/backend_test.go @@ -0,0 +1,1559 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package aws + +import ( + "context" + "fmt" + "log" + "net/http" + "os" + "reflect" + "strings" + "sync" + "testing" + "time" + + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/aws/aws-sdk-go/service/iam/iamiface" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/sts" + cleanhttp "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/vault/helper/testhelpers" + logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" + "github.com/hashicorp/vault/sdk/logical" + "github.com/mitchellh/mapstructure" +) + +var initSetup sync.Once + +type mockIAMClient struct { + iamiface.IAMAPI +} + +func (m *mockIAMClient) CreateUserWithContext(_ aws.Context, input *iam.CreateUserInput, _ ...request.Option) (*iam.CreateUserOutput, error) { + return nil, awserr.New("Throttling", "", nil) +} + +func getBackend(t *testing.T) logical.Backend { + be, _ := Factory(context.Background(), logical.TestBackendConfig()) + return be +} + +func TestAcceptanceBackend_basic(t *testing.T) { + t.Parallel() + logicaltest.Test(t, logicaltest.TestCase{ + AcceptanceTest: true, + PreCheck: func() { testAccPreCheck(t) }, + LogicalBackend: getBackend(t), + Steps: []logicaltest.TestStep{ + testAccStepConfig(t), + testAccStepWritePolicy(t, "test", testDynamoPolicy), + testAccStepRead(t, "creds", "test", []credentialTestFunc{listDynamoTablesTest}), + }, + }) +} + +func TestAcceptanceBackend_IamUserWithPermissionsBoundary(t *testing.T) { + t.Parallel() + roleData := map[string]interface{}{ + "credential_type": iamUserCred, + "policy_arns": adminAccessPolicyArn, + "permissions_boundary_arn": iamPolicyArn, + } + logicaltest.Test(t, logicaltest.TestCase{ + AcceptanceTest: true, + PreCheck: func() { testAccPreCheck(t) }, + LogicalBackend: getBackend(t), + Steps: []logicaltest.TestStep{ + testAccStepConfig(t), + testAccStepWriteRole(t, "test", roleData), + testAccStepRead(t, "creds", "test", []credentialTestFunc{listIamUsersTest, describeAzsTestUnauthorized}), + }, + }) +} + +func TestAcceptanceBackend_basicSTS(t *testing.T) { + t.Parallel() + awsAccountID, err := getAccountID() + if err != nil { + t.Logf("Unable to retrive user via sts:GetCallerIdentity: %#v", err) + t.Skip("Could not determine AWS account ID from sts:GetCallerIdentity for acceptance tests, skipping") + } + roleName := generateUniqueRoleName(t.Name()) + userName := generateUniqueUserName(t.Name()) + accessKey := &awsAccessKey{} + logicaltest.Test(t, logicaltest.TestCase{ + AcceptanceTest: true, + PreCheck: func() { + testAccPreCheck(t) + createUser(t, userName, accessKey) + createRole(t, roleName, awsAccountID, []string{ec2PolicyArn}) + // Sleep sometime because AWS is eventually consistent + // Both the createUser and createRole depend on this + log.Println("[WARN] Sleeping for 10 seconds waiting for AWS...") + time.Sleep(10 * time.Second) + }, + LogicalBackend: getBackend(t), + Steps: []logicaltest.TestStep{ + testAccStepConfigWithCreds(t, accessKey), + testAccStepRotateRoot(accessKey), + testAccStepWritePolicy(t, "test", testDynamoPolicy), + testAccStepRead(t, "sts", "test", []credentialTestFunc{listDynamoTablesTest}), + testAccStepWriteArnPolicyRef(t, "test", ec2PolicyArn), + testAccStepReadSTSWithArnPolicy(t, "test"), + testAccStepWriteArnRoleRef(t, "test2", roleName, awsAccountID), + testAccStepRead(t, "sts", "test2", []credentialTestFunc{describeInstancesTest}), + }, + Teardown: func() error { + if err := deleteTestRole(roleName); err != nil { + return err + } + return deleteTestUser(accessKey, userName) + }, + }) +} + +func TestBackend_policyCrud(t *testing.T) { + t.Parallel() + compacted, err := compactJSON(testDynamoPolicy) + if err != nil { + t.Fatalf("bad: %s", err) + } + + logicaltest.Test(t, logicaltest.TestCase{ + AcceptanceTest: false, + LogicalBackend: getBackend(t), + Steps: []logicaltest.TestStep{ + testAccStepConfig(t), + testAccStepWritePolicy(t, "test", testDynamoPolicy), + testAccStepReadPolicy(t, "test", compacted), + testAccStepDeletePolicy(t, "test"), + testAccStepReadPolicy(t, "test", ""), + }, + }) +} + +func TestBackend_throttled(t *testing.T) { + t.Parallel() + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + + b := Backend(config) + if err := b.Setup(context.Background(), config); err != nil { + t.Fatal(err) + } + + connData := map[string]interface{}{ + "credential_type": "iam_user", + } + + confReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/something", + Storage: config.StorageView, + Data: connData, + } + + resp, err := b.HandleRequest(context.Background(), confReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("failed to write configuration: resp:%#v err:%s", resp, err) + } + + // Mock the IAM API call to return a throttled response to the CreateUser API + // call + b.iamClient = &mockIAMClient{} + + credReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "creds/something", + Storage: config.StorageView, + } + + credResp, err := b.HandleRequest(context.Background(), credReq) + if err == nil { + t.Fatalf("failed to trigger expected throttling error condition: resp:%#v", credResp) + } + rErr := credResp.Error() + expected := "Error creating IAM user: Throttling: " + if rErr.Error() != expected { + t.Fatalf("error message did not match, expected (%s), got (%s)", expected, rErr.Error()) + } + + // verify the error we got back is returned with a http.StatusBadGateway + code, err := logical.RespondErrorCommon(credReq, credResp, err) + if err == nil { + t.Fatal("expected error after running req/resp/err through RespondErrorCommon, got nil") + } + if code != http.StatusBadGateway { + t.Fatalf("expected HTTP status 'bad gateway', got: (%d)", code) + } +} + +func testAccPreCheck(t *testing.T) { + if !hasAWSCredentials() { + t.Skip("Skipping because AWS credentials could not be resolved. See https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials for information on how to set up AWS credentials.") + } + + initSetup.Do(func() { + if v := os.Getenv("AWS_DEFAULT_REGION"); v == "" { + log.Println("[INFO] Test: Using us-west-2 as test region") + os.Setenv("AWS_DEFAULT_REGION", "us-west-2") + } + }) +} + +func hasAWSCredentials() bool { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + cfg, err := config.LoadDefaultConfig(ctx) + if err != nil { + return false + } + + creds, err := cfg.Credentials.Retrieve(ctx) + if err != nil { + return false + } + + return creds.HasKeys() +} + +func getAccountID() (string, error) { + awsConfig := &aws.Config{ + Region: aws.String("us-east-1"), + HTTPClient: cleanhttp.DefaultClient(), + } + sess, err := session.NewSession(awsConfig) + if err != nil { + return "", err + } + svc := sts.New(sess) + + params := &sts.GetCallerIdentityInput{} + res, err := svc.GetCallerIdentity(params) + if err != nil { + return "", err + } + if res == nil { + return "", fmt.Errorf("got nil response from GetCallerIdentity") + } + + return *res.Account, nil +} + +func createRole(t *testing.T, roleName, awsAccountID string, policyARNs []string) { + const testRoleAssumePolicy = `{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect":"Allow", + "Principal": { + "AWS": "arn:aws:iam::%s:root" + }, + "Action": [ + "sts:AssumeRole", + "sts:SetSourceIdentity" + ] + } + ] +} +` + awsConfig := &aws.Config{ + Region: aws.String("us-east-1"), + HTTPClient: cleanhttp.DefaultClient(), + } + sess, err := session.NewSession(awsConfig) + if err != nil { + t.Fatal(err) + } + svc := iam.New(sess) + trustPolicy := fmt.Sprintf(testRoleAssumePolicy, awsAccountID) + + params := &iam.CreateRoleInput{ + AssumeRolePolicyDocument: aws.String(trustPolicy), + RoleName: aws.String(roleName), + Path: aws.String("/"), + } + + log.Printf("[INFO] AWS CreateRole: %s", roleName) + if _, err := svc.CreateRole(params); err != nil { + t.Fatalf("AWS CreateRole failed: %v", err) + } + + for _, policyARN := range policyARNs { + attachment := &iam.AttachRolePolicyInput{ + PolicyArn: aws.String(policyARN), + RoleName: aws.String(roleName), // Required + } + _, err = svc.AttachRolePolicy(attachment) + + if err != nil { + t.Fatalf("AWS AttachRolePolicy failed: %v", err) + } + } +} + +func createUser(t *testing.T, userName string, accessKey *awsAccessKey) { + // The sequence of user creation actions is carefully chosen to minimize + // impact of stolen IAM user credentials + // 1. Create user, without any permissions or credentials. At this point, + // nobody cares if creds compromised because this user can do nothing. + // 2. Attach the timebomb policy. This grants no access but puts a time limit + // on validity of compromised credentials. If this fails, nobody cares + // because the user has no permissions to do anything anyway + // 3. Attach the AdminAccess policy. The IAM user still has no credentials to + // do anything + // 4. Generate API creds to get an actual access key and secret key + timebombPolicyTemplate := `{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Deny", + "Action": "*", + "Resource": "*", + "Condition": { + "DateGreaterThan": { + "aws:CurrentTime": "%s" + } + } + } + ] + } + ` + validity := time.Duration(2 * time.Hour) + expiry := time.Now().Add(validity) + timebombPolicy := fmt.Sprintf(timebombPolicyTemplate, expiry.Format(time.RFC3339)) + awsConfig := &aws.Config{ + Region: aws.String("us-east-1"), + HTTPClient: cleanhttp.DefaultClient(), + } + sess, err := session.NewSession(awsConfig) + if err != nil { + t.Fatal(err) + } + svc := iam.New(sess) + createUserInput := &iam.CreateUserInput{ + UserName: aws.String(userName), + } + log.Printf("[INFO] AWS CreateUser: %s", userName) + if _, err := svc.CreateUser(createUserInput); err != nil { + t.Fatalf("AWS CreateUser failed: %v", err) + } + + putPolicyInput := &iam.PutUserPolicyInput{ + PolicyDocument: aws.String(timebombPolicy), + PolicyName: aws.String("SelfDestructionTimebomb"), + UserName: aws.String(userName), + } + _, err = svc.PutUserPolicy(putPolicyInput) + if err != nil { + t.Fatalf("AWS PutUserPolicy failed: %v", err) + } + + attachUserPolicyInput := &iam.AttachUserPolicyInput{ + PolicyArn: aws.String("arn:aws:iam::aws:policy/AdministratorAccess"), + UserName: aws.String(userName), + } + _, err = svc.AttachUserPolicy(attachUserPolicyInput) + if err != nil { + t.Fatalf("AWS AttachUserPolicy failed, %v", err) + } + + createAccessKeyInput := &iam.CreateAccessKeyInput{ + UserName: aws.String(userName), + } + createAccessKeyOutput, err := svc.CreateAccessKey(createAccessKeyInput) + if err != nil { + t.Fatalf("AWS CreateAccessKey failed: %v", err) + } + if createAccessKeyOutput == nil { + t.Fatalf("AWS CreateAccessKey returned nil") + } + genAccessKey := createAccessKeyOutput.AccessKey + + accessKey.AccessKeyID = *genAccessKey.AccessKeyId + accessKey.SecretAccessKey = *genAccessKey.SecretAccessKey +} + +// Create an IAM Group and add an inline policy and managed policies if specified +func createGroup(t *testing.T, groupName string, inlinePolicy string, managedPolicies []string) { + awsConfig := &aws.Config{ + Region: aws.String("us-east-1"), + HTTPClient: cleanhttp.DefaultClient(), + } + sess, err := session.NewSession(awsConfig) + if err != nil { + t.Fatal(err) + } + svc := iam.New(sess) + createGroupInput := &iam.CreateGroupInput{ + GroupName: aws.String(groupName), + } + log.Printf("[INFO] AWS CreateGroup: %s", groupName) + if _, err := svc.CreateGroup(createGroupInput); err != nil { + t.Fatalf("AWS CreateGroup failed: %v", err) + } + + if len(inlinePolicy) > 0 { + putPolicyInput := &iam.PutGroupPolicyInput{ + PolicyDocument: aws.String(inlinePolicy), + PolicyName: aws.String("InlinePolicy"), + GroupName: aws.String(groupName), + } + _, err = svc.PutGroupPolicy(putPolicyInput) + if err != nil { + t.Fatalf("AWS PutGroupPolicy failed: %v", err) + } + } + + for _, mp := range managedPolicies { + attachGroupPolicyInput := &iam.AttachGroupPolicyInput{ + PolicyArn: aws.String(mp), + GroupName: aws.String(groupName), + } + _, err = svc.AttachGroupPolicy(attachGroupPolicyInput) + if err != nil { + t.Fatalf("AWS AttachGroupPolicy failed, %v", err) + } + } +} + +func deleteTestRole(roleName string) error { + awsConfig := &aws.Config{ + Region: aws.String("us-east-1"), + HTTPClient: cleanhttp.DefaultClient(), + } + sess, err := session.NewSession(awsConfig) + if err != nil { + return err + } + svc := iam.New(sess) + listAttachmentsInput := &iam.ListAttachedRolePoliciesInput{ + RoleName: aws.String(roleName), + } + detacher := func(result *iam.ListAttachedRolePoliciesOutput, lastPage bool) bool { + for _, policy := range result.AttachedPolicies { + detachInput := &iam.DetachRolePolicyInput{ + PolicyArn: policy.PolicyArn, + RoleName: aws.String(roleName), // Required + } + _, err := svc.DetachRolePolicy(detachInput) + if err != nil { + log.Printf("[WARN] AWS DetachRolePolicy failed for policy %s: %v", *policy.PolicyArn, err) + } + } + return true + } + if err := svc.ListAttachedRolePoliciesPages(listAttachmentsInput, detacher); err != nil { + log.Printf("[WARN] AWS DetachRolePolicy failed: %v", err) + } + + params := &iam.DeleteRoleInput{ + RoleName: aws.String(roleName), + } + + log.Printf("[INFO] AWS DeleteRole: %s", roleName) + _, err = svc.DeleteRole(params) + + if err != nil { + log.Printf("[WARN] AWS DeleteRole failed: %v", err) + return err + } + return nil +} + +func deleteTestUser(accessKey *awsAccessKey, userName string) error { + awsConfig := &aws.Config{ + Region: aws.String("us-east-1"), + HTTPClient: cleanhttp.DefaultClient(), + } + sess, err := session.NewSession(awsConfig) + if err != nil { + return err + } + svc := iam.New(sess) + userDetachment := &iam.DetachUserPolicyInput{ + PolicyArn: aws.String("arn:aws:iam::aws:policy/AdministratorAccess"), + UserName: aws.String(userName), + } + if _, err := svc.DetachUserPolicy(userDetachment); err != nil { + log.Printf("[WARN] AWS DetachUserPolicy failed: %v", err) + return err + } + + deleteAccessKeyInput := &iam.DeleteAccessKeyInput{ + AccessKeyId: aws.String(accessKey.AccessKeyID), + UserName: aws.String(userName), + } + _, err = svc.DeleteAccessKey(deleteAccessKeyInput) + if err != nil { + log.Printf("[WARN] AWS DeleteAccessKey failed: %v", err) + return err + } + + deleteTestUserPolicyInput := &iam.DeleteUserPolicyInput{ + PolicyName: aws.String("SelfDestructionTimebomb"), + UserName: aws.String(userName), + } + _, err = svc.DeleteUserPolicy(deleteTestUserPolicyInput) + if err != nil { + log.Printf("[WARN] AWS DeleteUserPolicy failed: %v", err) + return err + } + deleteTestUserInput := &iam.DeleteUserInput{ + UserName: aws.String(userName), + } + log.Printf("[INFO] AWS DeleteUser: %s", userName) + _, err = svc.DeleteUser(deleteTestUserInput) + if err != nil { + log.Printf("[WARN] AWS DeleteUser failed: %v", err) + return err + } + + return nil +} + +func deleteTestGroup(groupName string) error { + awsConfig := &aws.Config{ + Region: aws.String("us-east-1"), + HTTPClient: cleanhttp.DefaultClient(), + } + sess, err := session.NewSession(awsConfig) + if err != nil { + return err + } + svc := iam.New(sess) + + // Detach any managed group policies + getGroupsInput := &iam.ListAttachedGroupPoliciesInput{ + GroupName: aws.String(groupName), + } + getGroupsOutput, err := svc.ListAttachedGroupPolicies(getGroupsInput) + if err != nil { + log.Printf("[WARN] AWS ListAttachedGroupPolicies failed: %v", err) + return err + } + for _, g := range getGroupsOutput.AttachedPolicies { + detachGroupInput := &iam.DetachGroupPolicyInput{ + GroupName: aws.String(groupName), + PolicyArn: g.PolicyArn, + } + if _, err := svc.DetachGroupPolicy(detachGroupInput); err != nil { + log.Printf("[WARN] AWS DetachGroupPolicy failed: %v", err) + return err + } + } + + // Remove any inline policies + listGroupPoliciesInput := &iam.ListGroupPoliciesInput{ + GroupName: aws.String(groupName), + } + listGroupPoliciesOutput, err := svc.ListGroupPolicies(listGroupPoliciesInput) + if err != nil { + log.Printf("[WARN] AWS ListGroupPolicies failed: %v", err) + return err + } + for _, g := range listGroupPoliciesOutput.PolicyNames { + deleteGroupPolicyInput := &iam.DeleteGroupPolicyInput{ + GroupName: aws.String(groupName), + PolicyName: g, + } + if _, err := svc.DeleteGroupPolicy(deleteGroupPolicyInput); err != nil { + log.Printf("[WARN] AWS DeleteGroupPolicy failed: %v", err) + return err + } + } + + // Delete the group + deleteTestGroupInput := &iam.DeleteGroupInput{ + GroupName: aws.String(groupName), + } + log.Printf("[INFO] AWS DeleteGroup: %s", groupName) + _, err = svc.DeleteGroup(deleteTestGroupInput) + if err != nil { + log.Printf("[WARN] AWS DeleteGroup failed: %v", err) + return err + } + + return nil +} + +func testAccStepConfig(t *testing.T) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "config/root", + Data: map[string]interface{}{ + "region": os.Getenv("AWS_DEFAULT_REGION"), + }, + } +} + +func testAccStepConfigWithCreds(t *testing.T, accessKey *awsAccessKey) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "config/root", + Data: map[string]interface{}{ + "region": os.Getenv("AWS_DEFAULT_REGION"), + }, + PreFlight: func(req *logical.Request) error { + // Values in Data above get eagerly evaluated due to the testing framework. + // In particular, they get evaluated before accessKey gets set by CreateUser + // and thus would fail. By moving to a closure in a PreFlight, we ensure that + // the creds get evaluated lazily after they've been properly set + req.Data["access_key"] = accessKey.AccessKeyID + req.Data["secret_key"] = accessKey.SecretAccessKey + return nil + }, + } +} + +func testAccStepRotateRoot(oldAccessKey *awsAccessKey) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "config/rotate-root", + Check: func(resp *logical.Response) error { + if resp == nil { + return fmt.Errorf("received nil response from config/rotate-root") + } + newAccessKeyID := resp.Data["access_key"].(string) + if newAccessKeyID == oldAccessKey.AccessKeyID { + return fmt.Errorf("rotate-root didn't rotate access key") + } + awsConfig := &aws.Config{ + Region: aws.String("us-east-1"), + HTTPClient: cleanhttp.DefaultClient(), + Credentials: credentials.NewStaticCredentials(oldAccessKey.AccessKeyID, oldAccessKey.SecretAccessKey, ""), + } + // sigh.... + oldAccessKey.AccessKeyID = newAccessKeyID + log.Println("[WARN] Sleeping for 10 seconds waiting for AWS...") + time.Sleep(10 * time.Second) + sess, err := session.NewSession(awsConfig) + if err != nil { + return err + } + svc := sts.New(sess) + params := &sts.GetCallerIdentityInput{} + if _, err := svc.GetCallerIdentity(params); err == nil { + return fmt.Errorf("bad: old credentials succeeded after rotate") + } + if aerr, ok := err.(awserr.Error); ok { + if aerr.Code() != "InvalidClientTokenId" { + return fmt.Errorf("Unknown error returned from AWS: %#v", aerr) + } + return nil + } + return err + }, + } +} + +func testAccStepRead(t *testing.T, path, name string, credentialTests []credentialTestFunc) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: path + "/" + name, + Check: func(resp *logical.Response) error { + var d struct { + AccessKey string `mapstructure:"access_key"` + SecretKey string `mapstructure:"secret_key"` + STSToken string `mapstructure:"security_token"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + log.Printf("[WARN] Generated credentials: %v", d) + for _, test := range credentialTests { + err := test(d.AccessKey, d.SecretKey, d.STSToken) + if err != nil { + return err + } + } + return nil + }, + } +} + +func testAccStepReadSTSResponse(name string, maximumTTL time.Duration) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: "creds/" + name, + Check: func(resp *logical.Response) error { + if resp.Secret == nil { + return fmt.Errorf("bad: nil Secret returned") + } + ttl := resp.Secret.TTL + if ttl > maximumTTL { + return fmt.Errorf("bad: ttl of %d greater than maximum of %d", ttl/time.Second, maximumTTL/time.Second) + } + return nil + }, + } +} + +func describeInstancesTest(accessKey, secretKey, token string) error { + creds := credentials.NewStaticCredentials(accessKey, secretKey, token) + awsConfig := &aws.Config{ + Credentials: creds, + Region: aws.String("us-east-1"), + HTTPClient: cleanhttp.DefaultClient(), + } + sess, err := session.NewSession(awsConfig) + if err != nil { + return err + } + client := ec2.New(sess) + log.Printf("[WARN] Verifying that the generated credentials work with ec2:DescribeInstances...") + return retryUntilSuccess(func() error { + _, err := client.DescribeInstances(&ec2.DescribeInstancesInput{}) + return err + }) +} + +func describeAzsTestUnauthorized(accessKey, secretKey, token string) error { + creds := credentials.NewStaticCredentials(accessKey, secretKey, token) + awsConfig := &aws.Config{ + Credentials: creds, + Region: aws.String("us-east-1"), + HTTPClient: cleanhttp.DefaultClient(), + } + sess, err := session.NewSession(awsConfig) + if err != nil { + return err + } + client := ec2.New(sess) + log.Printf("[WARN] Verifying that the generated credentials don't work with ec2:DescribeAvailabilityZones...") + return retryUntilSuccess(func() error { + _, err := client.DescribeAvailabilityZones(&ec2.DescribeAvailabilityZonesInput{}) + // Need to make sure AWS authenticates the generated credentials but does not authorize the operation + if err == nil { + return fmt.Errorf("operation succeeded when expected failure") + } + if aerr, ok := err.(awserr.Error); ok { + if aerr.Code() == "UnauthorizedOperation" { + return nil + } + } + return err + }) +} + +func assertCreatedIAMUser(accessKey, secretKey, token string) error { + creds := credentials.NewStaticCredentials(accessKey, secretKey, token) + awsConfig := &aws.Config{ + Credentials: creds, + Region: aws.String("us-east-1"), + HTTPClient: cleanhttp.DefaultClient(), + } + sess, err := session.NewSession(awsConfig) + if err != nil { + return err + } + client := iam.New(sess) + log.Printf("[WARN] Checking if IAM User is created properly...") + userOutput, err := client.GetUser(&iam.GetUserInput{}) + if err != nil { + return err + } + + if *userOutput.User.Path != "/path/" { + return fmt.Errorf("bad: got: %#v\nexpected: %#v", userOutput.User.Path, "/path/") + } + + return nil +} + +func listIamUsersTest(accessKey, secretKey, token string) error { + creds := credentials.NewStaticCredentials(accessKey, secretKey, token) + awsConfig := &aws.Config{ + Credentials: creds, + Region: aws.String("us-east-1"), + HTTPClient: cleanhttp.DefaultClient(), + } + sess, err := session.NewSession(awsConfig) + if err != nil { + return err + } + client := iam.New(sess) + log.Printf("[WARN] Verifying that the generated credentials work with iam:ListUsers...") + return retryUntilSuccess(func() error { + _, err := client.ListUsers(&iam.ListUsersInput{}) + return err + }) +} + +func listDynamoTablesTest(accessKey, secretKey, token string) error { + creds := credentials.NewStaticCredentials(accessKey, secretKey, token) + awsConfig := &aws.Config{ + Credentials: creds, + Region: aws.String("us-east-1"), + HTTPClient: cleanhttp.DefaultClient(), + } + sess, err := session.NewSession(awsConfig) + if err != nil { + return err + } + client := dynamodb.New(sess) + log.Printf("[WARN] Verifying that the generated credentials work with dynamodb:ListTables...") + return retryUntilSuccess(func() error { + _, err := client.ListTables(&dynamodb.ListTablesInput{}) + return err + }) +} + +func listS3BucketsTest(accessKey, secretKey, token string) error { + creds := credentials.NewStaticCredentials(accessKey, secretKey, token) + awsConfig := &aws.Config{ + Credentials: creds, + Region: aws.String("us-east-1"), + HTTPClient: cleanhttp.DefaultClient(), + } + sess, err := session.NewSession(awsConfig) + if err != nil { + return err + } + client := s3.New(sess) + log.Printf("[WARN] Verifying that the generated credentials work with s3:ListBuckets...") + return retryUntilSuccess(func() error { + _, err := client.ListBuckets(&s3.ListBucketsInput{}) + return err + }) +} + +func retryUntilSuccess(op func() error) error { + retryCount := 0 + success := false + var err error + for !success && retryCount < 10 { + err = op() + if err == nil { + return nil + } + time.Sleep(time.Second) + retryCount++ + } + return err +} + +func testAccStepReadSTSWithArnPolicy(t *testing.T, name string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: "sts/" + name, + ErrorOk: true, + Check: func(resp *logical.Response) error { + if resp.Data["error"] != + "attempted to retrieve iam_user credentials through the sts path; this is not allowed for legacy roles" { + t.Fatalf("bad: %v", resp) + } + return nil + }, + } +} + +func testAccStepWritePolicy(t *testing.T, name string, policy string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "roles/" + name, + Data: map[string]interface{}{ + "policy": policy, + }, + } +} + +func testAccStepDeletePolicy(t *testing.T, n string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.DeleteOperation, + Path: "roles/" + n, + } +} + +func testAccStepReadPolicy(t *testing.T, name string, value string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: "roles/" + name, + Check: func(resp *logical.Response) error { + if resp == nil { + if value == "" { + return nil + } + + return fmt.Errorf("bad: %#v", resp) + } + + expected := map[string]interface{}{ + "policy_arns": []string(nil), + "role_arns": []string(nil), + "policy_document": value, + "credential_type": strings.Join([]string{iamUserCred, federationTokenCred}, ","), + "default_sts_ttl": int64(0), + "max_sts_ttl": int64(0), + "user_path": "", + "permissions_boundary_arn": "", + "iam_groups": []string(nil), + "iam_tags": map[string]string(nil), + } + if !reflect.DeepEqual(resp.Data, expected) { + return fmt.Errorf("bad: got: %#v\nexpected: %#v", resp.Data, expected) + } + return nil + }, + } +} + +const testDynamoPolicy = `{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "Stmt1426528957000", + "Effect": "Allow", + "Action": [ + "dynamodb:List*" + ], + "Resource": [ + "*" + ] + } + ] +} +` + +const testS3Policy = `{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:Get*", + "s3:List*" + ], + "Resource": "*" + } + ] +}` + +const ( + adminAccessPolicyArn = "arn:aws:iam::aws:policy/AdministratorAccess" + ec2PolicyArn = "arn:aws:iam::aws:policy/AmazonEC2ReadOnlyAccess" + iamPolicyArn = "arn:aws:iam::aws:policy/IAMReadOnlyAccess" + dynamoPolicyArn = "arn:aws:iam::aws:policy/AmazonDynamoDBReadOnlyAccess" +) + +func testAccStepWriteRole(t *testing.T, name string, data map[string]interface{}) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "roles/" + name, + Data: data, + } +} + +func testAccStepReadRole(t *testing.T, name string, expected map[string]interface{}) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: "roles/" + name, + Check: func(resp *logical.Response) error { + if resp == nil { + if expected == nil { + return nil + } + return fmt.Errorf("bad: nil response") + } + if !reflect.DeepEqual(resp.Data, expected) { + return fmt.Errorf("bad: got %#v\nexpected: %#v", resp.Data, expected) + } + return nil + }, + } +} + +func testAccStepWriteArnPolicyRef(t *testing.T, name string, arn string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "roles/" + name, + Data: map[string]interface{}{ + "arn": ec2PolicyArn, + }, + } +} + +func TestAcceptanceBackend_basicPolicyArnRef(t *testing.T) { + t.Parallel() + logicaltest.Test(t, logicaltest.TestCase{ + AcceptanceTest: true, + PreCheck: func() { testAccPreCheck(t) }, + LogicalBackend: getBackend(t), + Steps: []logicaltest.TestStep{ + testAccStepConfig(t), + testAccStepWriteArnPolicyRef(t, "test", ec2PolicyArn), + testAccStepRead(t, "creds", "test", []credentialTestFunc{describeInstancesTest}), + }, + }) +} + +func TestAcceptanceBackend_iamUserManagedInlinePoliciesGroups(t *testing.T) { + t.Parallel() + compacted, err := compactJSON(testDynamoPolicy) + if err != nil { + t.Fatalf("bad: %#v", err) + } + groupName := generateUniqueGroupName(t.Name()) + roleData := map[string]interface{}{ + "policy_document": testDynamoPolicy, + "policy_arns": []string{ec2PolicyArn, iamPolicyArn}, + "iam_groups": []string{groupName}, + "credential_type": iamUserCred, + "user_path": "/path/", + } + expectedRoleData := map[string]interface{}{ + "policy_document": compacted, + "policy_arns": []string{ec2PolicyArn, iamPolicyArn}, + "credential_type": iamUserCred, + "role_arns": []string(nil), + "default_sts_ttl": int64(0), + "max_sts_ttl": int64(0), + "user_path": "/path/", + "permissions_boundary_arn": "", + "iam_groups": []string{groupName}, + "iam_tags": map[string]string(nil), + } + + logicaltest.Test(t, logicaltest.TestCase{ + AcceptanceTest: true, + PreCheck: func() { + testAccPreCheck(t) + createGroup(t, groupName, testS3Policy, []string{}) + }, + LogicalBackend: getBackend(t), + Steps: []logicaltest.TestStep{ + testAccStepConfig(t), + testAccStepWriteRole(t, "test", roleData), + testAccStepReadRole(t, "test", expectedRoleData), + testAccStepRead(t, "creds", "test", []credentialTestFunc{describeInstancesTest, listIamUsersTest, listDynamoTablesTest, assertCreatedIAMUser, listS3BucketsTest}), + testAccStepRead(t, "sts", "test", []credentialTestFunc{describeInstancesTest, listIamUsersTest, listDynamoTablesTest, listS3BucketsTest}), + }, + Teardown: func() error { + return deleteTestGroup(groupName) + }, + }) +} + +// Similar to TestBackend_iamUserManagedInlinePoliciesGroups() but managing +// policies only with groups +func TestAcceptanceBackend_iamUserGroups(t *testing.T) { + t.Parallel() + group1Name := generateUniqueGroupName(t.Name()) + group2Name := generateUniqueGroupName(t.Name()) + roleData := map[string]interface{}{ + "iam_groups": []string{group1Name, group2Name}, + "credential_type": iamUserCred, + "user_path": "/path/", + } + expectedRoleData := map[string]interface{}{ + "policy_document": "", + "policy_arns": []string(nil), + "credential_type": iamUserCred, + "role_arns": []string(nil), + "default_sts_ttl": int64(0), + "max_sts_ttl": int64(0), + "user_path": "/path/", + "permissions_boundary_arn": "", + "iam_groups": []string{group1Name, group2Name}, + "iam_tags": map[string]string(nil), + } + + logicaltest.Test(t, logicaltest.TestCase{ + AcceptanceTest: true, + PreCheck: func() { + testAccPreCheck(t) + createGroup(t, group1Name, testS3Policy, []string{ec2PolicyArn, iamPolicyArn}) + createGroup(t, group2Name, testDynamoPolicy, []string{}) + }, + LogicalBackend: getBackend(t), + Steps: []logicaltest.TestStep{ + testAccStepConfig(t), + testAccStepWriteRole(t, "test", roleData), + testAccStepReadRole(t, "test", expectedRoleData), + testAccStepRead(t, "creds", "test", []credentialTestFunc{describeInstancesTest, listIamUsersTest, listDynamoTablesTest, assertCreatedIAMUser, listS3BucketsTest}), + testAccStepRead(t, "sts", "test", []credentialTestFunc{describeInstancesTest, listIamUsersTest, listDynamoTablesTest, listS3BucketsTest}), + }, + Teardown: func() error { + if err := deleteTestGroup(group1Name); err != nil { + return err + } + return deleteTestGroup(group2Name) + }, + }) +} + +func TestAcceptanceBackend_AssumedRoleWithPolicyDoc(t *testing.T) { + t.Parallel() + roleName := generateUniqueRoleName(t.Name()) + // This looks a bit curious. The policy document and the role document act + // as a logical intersection of policies. The role allows ec2:Describe* + // (among other permissions). This policy allows everything BUT + // ec2:DescribeAvailabilityZones. Thus, the logical intersection of the two + // is all ec2:Describe* EXCEPT ec2:DescribeAvailabilityZones, and so the + // describeAZs call should fail + allowAllButDescribeAzs := ` +{ + "Version": "2012-10-17", + "Statement": [{ + "Effect": "Allow", + "NotAction": "ec2:DescribeAvailabilityZones", + "Resource": "*" + }] +} +` + awsAccountID, err := getAccountID() + if err != nil { + t.Logf("Unable to retrive user via sts:GetCallerIdentity: %#v", err) + t.Skip("Could not determine AWS account ID from sts:GetCallerIdentity for acceptance tests, skipping") + } + roleData := map[string]interface{}{ + "policy_document": allowAllButDescribeAzs, + "role_arns": []string{fmt.Sprintf("arn:aws:iam::%s:role/%s", awsAccountID, roleName)}, + "credential_type": assumedRoleCred, + } + logicaltest.Test(t, logicaltest.TestCase{ + AcceptanceTest: true, + PreCheck: func() { + testAccPreCheck(t) + createRole(t, roleName, awsAccountID, []string{ec2PolicyArn}) + // Sleep sometime because AWS is eventually consistent + log.Println("[WARN] Sleeping for 10 seconds waiting for AWS...") + time.Sleep(10 * time.Second) + }, + LogicalBackend: getBackend(t), + Steps: []logicaltest.TestStep{ + testAccStepConfig(t), + testAccStepWriteRole(t, "test", roleData), + testAccStepRead(t, "sts", "test", []credentialTestFunc{describeInstancesTest, describeAzsTestUnauthorized}), + testAccStepRead(t, "creds", "test", []credentialTestFunc{describeInstancesTest, describeAzsTestUnauthorized}), + }, + Teardown: func() error { + return deleteTestRole(roleName) + }, + }) +} + +func TestAcceptanceBackend_AssumedRoleWithPolicyARN(t *testing.T) { + t.Parallel() + roleName := generateUniqueRoleName(t.Name()) + + awsAccountID, err := getAccountID() + if err != nil { + t.Logf("Unable to retrive user via sts:GetCallerIdentity: %#v", err) + t.Skip("Could not determine AWS account ID from sts:GetCallerIdentity for acceptance tests, skipping") + } + roleData := map[string]interface{}{ + "policy_arns": iamPolicyArn, + "role_arns": []string{fmt.Sprintf("arn:aws:iam::%s:role/%s", awsAccountID, roleName)}, + "credential_type": assumedRoleCred, + } + logicaltest.Test(t, logicaltest.TestCase{ + AcceptanceTest: true, + PreCheck: func() { + testAccPreCheck(t) + createRole(t, roleName, awsAccountID, []string{ec2PolicyArn, iamPolicyArn}) + log.Printf("[WARN] Sleeping for 10 seconds waiting for AWS...") + time.Sleep(10 * time.Second) + }, + LogicalBackend: getBackend(t), + Steps: []logicaltest.TestStep{ + testAccStepConfig(t), + testAccStepWriteRole(t, "test", roleData), + testAccStepRead(t, "sts", "test", []credentialTestFunc{listIamUsersTest, describeAzsTestUnauthorized}), + testAccStepRead(t, "creds", "test", []credentialTestFunc{listIamUsersTest, describeAzsTestUnauthorized}), + }, + Teardown: func() error { + return deleteTestRole(roleName) + }, + }) +} + +func TestAcceptanceBackend_AssumedRoleWithGroups(t *testing.T) { + t.Parallel() + roleName := generateUniqueRoleName(t.Name()) + groupName := generateUniqueGroupName(t.Name()) + // This looks a bit curious. The policy document and the role document act + // as a logical intersection of policies. The role allows ec2:Describe* + // (among other permissions). This policy allows everything BUT + // ec2:DescribeAvailabilityZones. Thus, the logical intersection of the two + // is all ec2:Describe* EXCEPT ec2:DescribeAvailabilityZones, and so the + // describeAZs call should fail + allowAllButDescribeAzs := `{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "NotAction": "ec2:DescribeAvailabilityZones", + "Resource": "*" + } + ] +}` + awsAccountID, err := getAccountID() + if err != nil { + t.Logf("Unable to retrive user via sts:GetCallerIdentity: %#v", err) + t.Skip("Could not determine AWS account ID from sts:GetCallerIdentity for acceptance tests, skipping") + } + + roleData := map[string]interface{}{ + "iam_groups": []string{groupName}, + "role_arns": []string{fmt.Sprintf("arn:aws:iam::%s:role/%s", awsAccountID, roleName)}, + "credential_type": assumedRoleCred, + } + logicaltest.Test(t, logicaltest.TestCase{ + AcceptanceTest: true, + PreCheck: func() { + testAccPreCheck(t) + createRole(t, roleName, awsAccountID, []string{ec2PolicyArn}) + createGroup(t, groupName, allowAllButDescribeAzs, []string{}) + // Sleep sometime because AWS is eventually consistent + log.Println("[WARN] Sleeping for 10 seconds waiting for AWS...") + time.Sleep(10 * time.Second) + }, + LogicalBackend: getBackend(t), + Steps: []logicaltest.TestStep{ + testAccStepConfig(t), + testAccStepWriteRole(t, "test", roleData), + testAccStepRead(t, "sts", "test", []credentialTestFunc{describeInstancesTest, describeAzsTestUnauthorized}), + testAccStepRead(t, "creds", "test", []credentialTestFunc{describeInstancesTest, describeAzsTestUnauthorized}), + }, + Teardown: func() error { + if err := deleteTestGroup(groupName); err != nil { + return err + } + return deleteTestRole(roleName) + }, + }) +} + +func TestAcceptanceBackend_FederationTokenWithPolicyARN(t *testing.T) { + t.Parallel() + userName := generateUniqueUserName(t.Name()) + accessKey := &awsAccessKey{} + + roleData := map[string]interface{}{ + "policy_arns": dynamoPolicyArn, + "credential_type": federationTokenCred, + } + logicaltest.Test(t, logicaltest.TestCase{ + AcceptanceTest: true, + PreCheck: func() { + testAccPreCheck(t) + createUser(t, userName, accessKey) + // Sleep sometime because AWS is eventually consistent + log.Println("[WARN] Sleeping for 10 seconds waiting for AWS...") + time.Sleep(10 * time.Second) + }, + LogicalBackend: getBackend(t), + Steps: []logicaltest.TestStep{ + testAccStepConfigWithCreds(t, accessKey), + testAccStepWriteRole(t, "test", roleData), + testAccStepRead(t, "sts", "test", []credentialTestFunc{listDynamoTablesTest, describeAzsTestUnauthorized}), + testAccStepRead(t, "creds", "test", []credentialTestFunc{listDynamoTablesTest, describeAzsTestUnauthorized}), + }, + Teardown: func() error { + return deleteTestUser(accessKey, userName) + }, + }) +} + +func TestAcceptanceBackend_FederationTokenWithGroups(t *testing.T) { + t.Parallel() + userName := generateUniqueUserName(t.Name()) + groupName := generateUniqueGroupName(t.Name()) + accessKey := &awsAccessKey{} + + // IAM policy where Statement is a single element, not a list + iamSingleStatementPolicy := `{ + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": [ + "s3:Get*", + "s3:List*" + ], + "Resource": "*" + } + }` + + roleData := map[string]interface{}{ + "iam_groups": []string{groupName}, + "policy_document": iamSingleStatementPolicy, + "credential_type": federationTokenCred, + } + logicaltest.Test(t, logicaltest.TestCase{ + AcceptanceTest: true, + PreCheck: func() { + testAccPreCheck(t) + createUser(t, userName, accessKey) + createGroup(t, groupName, "", []string{dynamoPolicyArn}) + // Sleep sometime because AWS is eventually consistent + log.Println("[WARN] Sleeping for 10 seconds waiting for AWS...") + time.Sleep(10 * time.Second) + }, + LogicalBackend: getBackend(t), + Steps: []logicaltest.TestStep{ + testAccStepConfigWithCreds(t, accessKey), + testAccStepWriteRole(t, "test", roleData), + testAccStepRead(t, "sts", "test", []credentialTestFunc{listDynamoTablesTest, describeAzsTestUnauthorized, listS3BucketsTest}), + testAccStepRead(t, "creds", "test", []credentialTestFunc{listDynamoTablesTest, describeAzsTestUnauthorized, listS3BucketsTest}), + }, + Teardown: func() error { + if err := deleteTestGroup(groupName); err != nil { + return err + } + return deleteTestUser(accessKey, userName) + }, + }) +} + +func TestAcceptanceBackend_RoleDefaultSTSTTL(t *testing.T) { + t.Parallel() + roleName := generateUniqueRoleName(t.Name()) + minAwsAssumeRoleDuration := 900 + awsAccountID, err := getAccountID() + if err != nil { + t.Logf("Unable to retrive user via sts:GetCallerIdentity: %#v", err) + t.Skip("Could not determine AWS account ID from sts:GetCallerIdentity for acceptance tests, skipping") + } + roleData := map[string]interface{}{ + "role_arns": []string{fmt.Sprintf("arn:aws:iam::%s:role/%s", awsAccountID, roleName)}, + "credential_type": assumedRoleCred, + "default_sts_ttl": minAwsAssumeRoleDuration, + "max_sts_ttl": minAwsAssumeRoleDuration, + } + logicaltest.Test(t, logicaltest.TestCase{ + AcceptanceTest: true, + PreCheck: func() { + testAccPreCheck(t) + createRole(t, roleName, awsAccountID, []string{ec2PolicyArn}) + log.Println("[WARN] Sleeping for 10 seconds waiting for AWS...") + time.Sleep(10 * time.Second) + }, + LogicalBackend: getBackend(t), + Steps: []logicaltest.TestStep{ + testAccStepConfig(t), + testAccStepWriteRole(t, "test", roleData), + testAccStepReadSTSResponse("test", time.Duration(minAwsAssumeRoleDuration)*time.Second), // allow a little slack + }, + Teardown: func() error { + return deleteTestRole(roleName) + }, + }) +} + +func TestBackend_policyArnCrud(t *testing.T) { + t.Parallel() + logicaltest.Test(t, logicaltest.TestCase{ + AcceptanceTest: false, + LogicalBackend: getBackend(t), + Steps: []logicaltest.TestStep{ + testAccStepConfig(t), + testAccStepWriteArnPolicyRef(t, "test", ec2PolicyArn), + testAccStepReadArnPolicy(t, "test", ec2PolicyArn), + testAccStepDeletePolicy(t, "test"), + testAccStepReadArnPolicy(t, "test", ""), + }, + }) +} + +func testAccStepReadArnPolicy(t *testing.T, name string, value string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: "roles/" + name, + Check: func(resp *logical.Response) error { + if resp == nil { + if value == "" { + return nil + } + + return fmt.Errorf("bad: %#v", resp) + } + + expected := map[string]interface{}{ + "policy_arns": []string{value}, + "role_arns": []string(nil), + "policy_document": "", + "credential_type": iamUserCred, + "default_sts_ttl": int64(0), + "max_sts_ttl": int64(0), + "user_path": "", + "permissions_boundary_arn": "", + "iam_groups": []string(nil), + "iam_tags": map[string]string(nil), + } + if !reflect.DeepEqual(resp.Data, expected) { + return fmt.Errorf("bad: got: %#v\nexpected: %#v", resp.Data, expected) + } + + return nil + }, + } +} + +func testAccStepWriteArnRoleRef(t *testing.T, vaultRoleName, awsRoleName, awsAccountID string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "roles/" + vaultRoleName, + Data: map[string]interface{}{ + "arn": fmt.Sprintf("arn:aws:iam::%s:role/%s", awsAccountID, awsRoleName), + }, + } +} + +func TestBackend_iamGroupsCrud(t *testing.T) { + t.Parallel() + logicaltest.Test(t, logicaltest.TestCase{ + AcceptanceTest: false, + LogicalBackend: getBackend(t), + Steps: []logicaltest.TestStep{ + testAccStepConfig(t), + testAccStepWriteIamGroups(t, "test", []string{"group1", "group2"}), + testAccStepReadIamGroups(t, "test", []string{"group1", "group2"}), + testAccStepDeletePolicy(t, "test"), + testAccStepReadIamGroups(t, "test", []string{}), + }, + }) +} + +func testAccStepWriteIamGroups(t *testing.T, name string, groups []string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "roles/" + name, + Data: map[string]interface{}{ + "credential_type": iamUserCred, + "iam_groups": groups, + }, + } +} + +func testAccStepReadIamGroups(t *testing.T, name string, groups []string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: "roles/" + name, + Check: func(resp *logical.Response) error { + if resp == nil { + if len(groups) == 0 { + return nil + } + + return fmt.Errorf("bad: %#v", resp) + } + + expected := map[string]interface{}{ + "policy_arns": []string(nil), + "role_arns": []string(nil), + "policy_document": "", + "credential_type": iamUserCred, + "default_sts_ttl": int64(0), + "max_sts_ttl": int64(0), + "user_path": "", + "permissions_boundary_arn": "", + "iam_groups": groups, + "iam_tags": map[string]string(nil), + } + if !reflect.DeepEqual(resp.Data, expected) { + return fmt.Errorf("bad: got: %#v\nexpected: %#v", resp.Data, expected) + } + + return nil + }, + } +} + +func TestBackend_iamTagsCrud(t *testing.T) { + logicaltest.Test(t, logicaltest.TestCase{ + AcceptanceTest: false, + LogicalBackend: getBackend(t), + Steps: []logicaltest.TestStep{ + testAccStepConfig(t), + testAccStepWriteIamTags(t, "test", map[string]string{"key1": "value1", "key2": "value2"}), + testAccStepReadIamTags(t, "test", map[string]string{"key1": "value1", "key2": "value2"}), + testAccStepDeletePolicy(t, "test"), + testAccStepReadIamTags(t, "test", map[string]string{}), + }, + }) +} + +func testAccStepWriteIamTags(t *testing.T, name string, tags map[string]string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "roles/" + name, + Data: map[string]interface{}{ + "credential_type": iamUserCred, + "iam_tags": tags, + }, + } +} + +func testAccStepReadIamTags(t *testing.T, name string, tags map[string]string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: "roles/" + name, + Check: func(resp *logical.Response) error { + if resp == nil { + if len(tags) == 0 { + return nil + } + + return fmt.Errorf("vault response not received") + } + + expected := map[string]interface{}{ + "policy_arns": []string(nil), + "role_arns": []string(nil), + "policy_document": "", + "credential_type": iamUserCred, + "default_sts_ttl": int64(0), + "max_sts_ttl": int64(0), + "user_path": "", + "permissions_boundary_arn": "", + "iam_groups": []string(nil), + "iam_tags": tags, + } + if !reflect.DeepEqual(resp.Data, expected) { + return fmt.Errorf("bad: got: %#v\nexpected: %#v", resp.Data, expected) + } + + return nil + }, + } +} + +func generateUniqueRoleName(prefix string) string { + return generateUniqueName(prefix, 64) +} + +func generateUniqueUserName(prefix string) string { + return generateUniqueName(prefix, 64) +} + +func generateUniqueGroupName(prefix string) string { + return generateUniqueName(prefix, 128) +} + +func generateUniqueName(prefix string, maxLength int) string { + name := testhelpers.RandomWithPrefix(prefix) + if len(name) > maxLength { + return name[:maxLength] + } + return name +} + +type awsAccessKey struct { + AccessKeyID string + SecretAccessKey string +} + +type credentialTestFunc func(string, string, string) error diff --git a/builtin/logical/aws/client.go b/builtin/logical/aws/client.go new file mode 100644 index 0000000..71d24f3 --- /dev/null +++ b/builtin/logical/aws/client.go @@ -0,0 +1,107 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package aws + +import ( + "context" + "fmt" + "os" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/aws/aws-sdk-go/service/sts" + cleanhttp "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-secure-stdlib/awsutil" + "github.com/hashicorp/vault/sdk/logical" +) + +// NOTE: The caller is required to ensure that b.clientMutex is at least read locked +func getRootConfig(ctx context.Context, s logical.Storage, clientType string, logger hclog.Logger) (*aws.Config, error) { + credsConfig := &awsutil.CredentialsConfig{} + var endpoint string + var maxRetries int = aws.UseServiceDefaultRetries + + entry, err := s.Get(ctx, "config/root") + if err != nil { + return nil, err + } + if entry != nil { + var config rootConfig + if err := entry.DecodeJSON(&config); err != nil { + return nil, fmt.Errorf("error reading root configuration: %w", err) + } + + credsConfig.AccessKey = config.AccessKey + credsConfig.SecretKey = config.SecretKey + credsConfig.Region = config.Region + maxRetries = config.MaxRetries + switch { + case clientType == "iam" && config.IAMEndpoint != "": + endpoint = *aws.String(config.IAMEndpoint) + case clientType == "sts" && config.STSEndpoint != "": + endpoint = *aws.String(config.STSEndpoint) + } + } + + if credsConfig.Region == "" { + credsConfig.Region = os.Getenv("AWS_REGION") + if credsConfig.Region == "" { + credsConfig.Region = os.Getenv("AWS_DEFAULT_REGION") + if credsConfig.Region == "" { + credsConfig.Region = "us-east-1" + } + } + } + + credsConfig.HTTPClient = cleanhttp.DefaultClient() + + credsConfig.Logger = logger + + creds, err := credsConfig.GenerateCredentialChain() + if err != nil { + return nil, err + } + + return &aws.Config{ + Credentials: creds, + Region: aws.String(credsConfig.Region), + Endpoint: &endpoint, + HTTPClient: cleanhttp.DefaultClient(), + MaxRetries: aws.Int(maxRetries), + }, nil +} + +func nonCachedClientIAM(ctx context.Context, s logical.Storage, logger hclog.Logger) (*iam.IAM, error) { + awsConfig, err := getRootConfig(ctx, s, "iam", logger) + if err != nil { + return nil, err + } + sess, err := session.NewSession(awsConfig) + if err != nil { + return nil, err + } + client := iam.New(sess) + if client == nil { + return nil, fmt.Errorf("could not obtain iam client") + } + return client, nil +} + +func nonCachedClientSTS(ctx context.Context, s logical.Storage, logger hclog.Logger) (*sts.STS, error) { + awsConfig, err := getRootConfig(ctx, s, "sts", logger) + if err != nil { + return nil, err + } + sess, err := session.NewSession(awsConfig) + if err != nil { + return nil, err + } + client := sts.New(sess) + if client == nil { + return nil, fmt.Errorf("could not obtain sts client") + } + return client, nil +} diff --git a/builtin/logical/aws/cmd/aws/main.go b/builtin/logical/aws/cmd/aws/main.go new file mode 100644 index 0000000..28de1eb --- /dev/null +++ b/builtin/logical/aws/cmd/aws/main.go @@ -0,0 +1,34 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "os" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/aws" + "github.com/hashicorp/vault/sdk/plugin" +) + +func main() { + apiClientMeta := &api.PluginAPIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(os.Args[1:]) + + tlsConfig := apiClientMeta.GetTLSConfig() + tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) + + if err := plugin.ServeMultiplex(&plugin.ServeOpts{ + BackendFactoryFunc: aws.Factory, + // set the TLSProviderFunc so that the plugin maintains backwards + // compatibility with Vault versions that don’t support plugin AutoMTLS + TLSProviderFunc: tlsProviderFunc, + }); err != nil { + logger := hclog.New(&hclog.LoggerOptions{}) + + logger.Error("plugin shutting down", "error", err) + os.Exit(1) + } +} diff --git a/builtin/logical/aws/iam_policies.go b/builtin/logical/aws/iam_policies.go new file mode 100644 index 0000000..002a738 --- /dev/null +++ b/builtin/logical/aws/iam_policies.go @@ -0,0 +1,144 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package aws + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/aws/aws-sdk-go/service/iam/iamiface" + "github.com/hashicorp/vault/sdk/logical" +) + +// PolicyDocument represents an IAM policy document +type PolicyDocument struct { + Version string `json:"Version"` + Statements StatementEntries `json:"Statement"` +} + +// StatementEntries is a slice of statements that make up a PolicyDocument +type StatementEntries []interface{} + +// UnmarshalJSON is defined here for StatementEntries because the Statement +// portion of an IAM Policy can either be a list or a single element, so if it's +// a single element this wraps it in a []interface{} so that it's easy to +// combine with other policy statements: +// https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_statement.html +func (se *StatementEntries) UnmarshalJSON(b []byte) error { + var out StatementEntries + + var data interface{} + if err := json.Unmarshal(b, &data); err != nil { + return err + } + + switch t := data.(type) { + case []interface{}: + out = t + case interface{}: + out = []interface{}{t} + default: + return fmt.Errorf("unsupported data type %T for StatementEntries", t) + } + *se = out + return nil +} + +// getGroupPolicies takes a list of IAM Group names and returns a list of their +// inline policy documents, and a list of the attached managed policy ARNs +func (b *backend) getGroupPolicies(ctx context.Context, s logical.Storage, iamGroups []string) ([]string, []string, error) { + var groupPolicies []string + var groupPolicyARNs []string + var err error + var agp *iam.ListAttachedGroupPoliciesOutput + var inlinePolicies *iam.ListGroupPoliciesOutput + var inlinePolicyDoc *iam.GetGroupPolicyOutput + var iamClient iamiface.IAMAPI + + // Return early if there are no groups, to avoid creating an IAM client + // needlessly + if len(iamGroups) == 0 { + return nil, nil, nil + } + + iamClient, err = b.clientIAM(ctx, s) + if err != nil { + return nil, nil, err + } + + for _, g := range iamGroups { + // Collect managed policy ARNs from the IAM Group + agp, err = iamClient.ListAttachedGroupPoliciesWithContext(ctx, &iam.ListAttachedGroupPoliciesInput{ + GroupName: aws.String(g), + }) + if err != nil { + return nil, nil, err + } + for _, p := range agp.AttachedPolicies { + groupPolicyARNs = append(groupPolicyARNs, *p.PolicyArn) + } + + // Collect inline policy names from the IAM Group + inlinePolicies, err = iamClient.ListGroupPoliciesWithContext(ctx, &iam.ListGroupPoliciesInput{ + GroupName: aws.String(g), + }) + if err != nil { + return nil, nil, err + } + for _, iP := range inlinePolicies.PolicyNames { + inlinePolicyDoc, err = iamClient.GetGroupPolicyWithContext(ctx, &iam.GetGroupPolicyInput{ + GroupName: &g, + PolicyName: iP, + }) + if err != nil { + return nil, nil, err + } + if inlinePolicyDoc != nil && inlinePolicyDoc.PolicyDocument != nil { + var policyStr string + if policyStr, err = url.QueryUnescape(*inlinePolicyDoc.PolicyDocument); err != nil { + return nil, nil, err + } + groupPolicies = append(groupPolicies, policyStr) + } + } + } + return groupPolicies, groupPolicyARNs, nil +} + +// combinePolicyDocuments takes policy strings as input, and combines them into +// a single policy document string +func combinePolicyDocuments(policies ...string) (string, error) { + var policy string + var err error + var policyBytes []byte + newPolicy := PolicyDocument{ + // 2012-10-17 is the current version of the AWS policy language: + // https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_version.html + Version: "2012-10-17", + } + newPolicy.Statements = make(StatementEntries, 0, len(policies)) + + for _, p := range policies { + if len(p) == 0 { + continue + } + var tmpDoc PolicyDocument + err = json.Unmarshal([]byte(p), &tmpDoc) + if err != nil { + return "", err + } + newPolicy.Statements = append(newPolicy.Statements, tmpDoc.Statements...) + } + + policyBytes, err = json.Marshal(&newPolicy) + if err != nil { + return "", err + } + policy = string(policyBytes) + return policy, nil +} diff --git a/builtin/logical/aws/iam_policies_test.go b/builtin/logical/aws/iam_policies_test.go new file mode 100644 index 0000000..7f8f96a --- /dev/null +++ b/builtin/logical/aws/iam_policies_test.go @@ -0,0 +1,264 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package aws + +import ( + "context" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/aws/aws-sdk-go/service/iam/iamiface" + "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/assert" +) + +const ec2DescribePolicy = `{"Version": "2012-10-17", "Statement": [{"Effect": "Allow", "Action": ["ec2:DescribeInstances"], "Resource": "*"}]}` + +// ec2AllPolicy also uses a string instead of a list for the Action +const ec2AllPolicy = `{"Version": "2012-10-17","Statement": [{"Effect": "Allow", "Action": "ec2:*", "Resource": "*"}]}` + +// ec2SingleStatement is an example of the Statement portion containing a single statement that's not a list +const ec2SingleStatement = `{"Version": "2012-10-17", "Statement": {"Effect": "Allow", "Action": ["ec2:DescribeInstances"], "Resource": "*"}}` + +type mockGroupIAMClient struct { + iamiface.IAMAPI + ListAttachedGroupPoliciesResp iam.ListAttachedGroupPoliciesOutput + ListGroupPoliciesResp iam.ListGroupPoliciesOutput + GetGroupPolicyResp iam.GetGroupPolicyOutput +} + +func (m mockGroupIAMClient) ListAttachedGroupPoliciesWithContext(_ aws.Context, in *iam.ListAttachedGroupPoliciesInput, _ ...request.Option) (*iam.ListAttachedGroupPoliciesOutput, error) { + return &m.ListAttachedGroupPoliciesResp, nil +} + +func (m mockGroupIAMClient) ListGroupPoliciesWithContext(_ aws.Context, in *iam.ListGroupPoliciesInput, _ ...request.Option) (*iam.ListGroupPoliciesOutput, error) { + return &m.ListGroupPoliciesResp, nil +} + +func (m mockGroupIAMClient) GetGroupPolicyWithContext(_ aws.Context, in *iam.GetGroupPolicyInput, _ ...request.Option) (*iam.GetGroupPolicyOutput, error) { + return &m.GetGroupPolicyResp, nil +} + +func Test_getGroupPolicies(t *testing.T) { + t.Parallel() + testCases := []struct { + description string + listAGPResp iam.ListAttachedGroupPoliciesOutput + listGPResp iam.ListGroupPoliciesOutput + getGPResp iam.GetGroupPolicyOutput + iamGroupArg []string + wantGroupPolicies []string + wantGroupPolicyARNs []string + wantErr bool + }{ + { + description: "All IAM calls respond with data", + listAGPResp: iam.ListAttachedGroupPoliciesOutput{ + AttachedPolicies: []*iam.AttachedPolicy{ + { + PolicyArn: aws.String("abcdefghijklmnopqrst"), + PolicyName: aws.String("test policy"), + }, + }, + }, + listGPResp: iam.ListGroupPoliciesOutput{ + PolicyNames: []*string{ + aws.String("inline policy"), + }, + }, + getGPResp: iam.GetGroupPolicyOutput{ + GroupName: aws.String("inline policy"), + PolicyDocument: aws.String(ec2DescribePolicy), + PolicyName: aws.String("ec2 describe"), + }, + iamGroupArg: []string{"testgroup1"}, + wantGroupPolicies: []string{ec2DescribePolicy}, + wantGroupPolicyARNs: []string{"abcdefghijklmnopqrst"}, + wantErr: false, + }, + { + description: "No managed policies", + listAGPResp: iam.ListAttachedGroupPoliciesOutput{}, + listGPResp: iam.ListGroupPoliciesOutput{ + PolicyNames: []*string{ + aws.String("inline policy"), + }, + }, + getGPResp: iam.GetGroupPolicyOutput{ + GroupName: aws.String("inline policy"), + PolicyDocument: aws.String(ec2DescribePolicy), + PolicyName: aws.String("ec2 describe"), + }, + iamGroupArg: []string{"testgroup1", "testgroup2"}, + wantGroupPolicies: []string{ec2DescribePolicy, ec2DescribePolicy}, + wantGroupPolicyARNs: []string(nil), + wantErr: false, + }, + { + description: "No inline policies", + listAGPResp: iam.ListAttachedGroupPoliciesOutput{ + AttachedPolicies: []*iam.AttachedPolicy{ + { + PolicyArn: aws.String("abcdefghijklmnopqrst"), + PolicyName: aws.String("test policy"), + }, + }, + }, + listGPResp: iam.ListGroupPoliciesOutput{}, + getGPResp: iam.GetGroupPolicyOutput{}, + iamGroupArg: []string{"testgroup1"}, + wantGroupPolicies: []string(nil), + wantGroupPolicyARNs: []string{"abcdefghijklmnopqrst"}, + wantErr: false, + }, + { + description: "No policies", + listAGPResp: iam.ListAttachedGroupPoliciesOutput{}, + listGPResp: iam.ListGroupPoliciesOutput{}, + getGPResp: iam.GetGroupPolicyOutput{}, + iamGroupArg: []string{"testgroup1"}, + wantGroupPolicies: []string(nil), + wantGroupPolicyARNs: []string(nil), + wantErr: false, + }, + { + description: "empty iam_groups arg", + listAGPResp: iam.ListAttachedGroupPoliciesOutput{}, + listGPResp: iam.ListGroupPoliciesOutput{}, + getGPResp: iam.GetGroupPolicyOutput{}, + iamGroupArg: []string{}, + wantGroupPolicies: []string(nil), + wantGroupPolicyARNs: []string(nil), + wantErr: false, + }, + } + for _, tc := range testCases { + t.Run(tc.description, func(t *testing.T) { + // configure backend and iam client + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + + b := Backend(config) + if err := b.Setup(context.Background(), config); err != nil { + t.Fatal(err) + } + b.iamClient = &mockGroupIAMClient{ + ListAttachedGroupPoliciesResp: tc.listAGPResp, + ListGroupPoliciesResp: tc.listGPResp, + GetGroupPolicyResp: tc.getGPResp, + } + + // run the test and compare results + groupPolicies, groupPolicyARNs, err := b.getGroupPolicies(context.TODO(), config.StorageView, tc.iamGroupArg) + assert.Equal(t, tc.wantGroupPolicies, groupPolicies) + assert.Equal(t, tc.wantGroupPolicyARNs, groupPolicyARNs) + assert.Equal(t, tc.wantErr, err != nil) + }) + } +} + +func Test_combinePolicyDocuments(t *testing.T) { + t.Parallel() + testCases := []struct { + description string + input []string + expectedOutput string + expectedErr bool + }{ + { + description: "one policy", + input: []string{ + ec2AllPolicy, + }, + expectedOutput: `{"Version":"2012-10-17","Statement":[{"Action":"ec2:*","Effect":"Allow","Resource":"*"}]}`, + expectedErr: false, + }, + { + description: "two policies", + input: []string{ + ec2AllPolicy, + ec2DescribePolicy, + }, + expectedOutput: `{"Version": "2012-10-17", "Statement":[ + {"Action": "ec2:*", "Effect": "Allow", "Resource": "*"}, + {"Action": ["ec2:DescribeInstances"], "Effect": "Allow", "Resource": "*"}]}`, + expectedErr: false, + }, + { + description: "two policies, one with empty statement", + input: []string{ + ec2AllPolicy, + `{"Version": "2012-10-17", "Statement": []}`, + }, + expectedOutput: `{"Version": "2012-10-17", "Statement": [{"Action": "ec2:*", "Effect": "Allow", "Resource": "*"}]}`, + expectedErr: false, + }, + { + description: "malformed json", + input: []string{ + `"Version": "2012-10-17","Statement": [{"Effect": "Allow", "Action": "ec2:*", "Resource": "*"}]}`, + `{"Version": "2012-10-17", "Statement": []}`, + }, + expectedOutput: ``, + expectedErr: true, + }, + { + description: "not action", + input: []string{ + `{"Version": "2012-10-17", "Statement": [{"Effect": "Allow", "NotAction": "ec2:DescribeAvailabilityZones", "Resource": "*"}]}`, + }, + expectedOutput: `{"Version": "2012-10-17","Statement":[{"Effect": "Allow","NotAction": "ec2:DescribeAvailabilityZones", "Resource": "*"}]}`, + expectedErr: false, + }, + { + description: "one blank policy", + input: []string{ + "", + `{"Version": "2012-10-17", "Statement": []}`, + }, + expectedOutput: `{"Version": "2012-10-17", "Statement": []}`, + expectedErr: false, + }, + { + description: "when statement is not a list", + input: []string{ + ec2SingleStatement, + }, + expectedOutput: `{"Version": "2012-10-17", "Statement": [{"Action": ["ec2:DescribeInstances"], "Effect": "Allow", "Resource": "*"}]}`, + expectedErr: false, + }, + { + description: "statement is malformed json", + input: []string{ + `{"Version": "2012-10-17", "Statement": {true}`, + }, + expectedOutput: "", + expectedErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.description, func(t *testing.T) { + policyOut, err := combinePolicyDocuments(tc.input...) + if (err != nil) != tc.expectedErr { + t.Fatalf("got unexpected error: %s", err) + } + if (err != nil) != tc.expectedErr { + t.Fatalf("got unexpected error: %s", err) + } + // remove whitespace + if tc.expectedOutput != "" { + tc.expectedOutput, err = compactJSON(tc.expectedOutput) + if err != nil { + t.Fatalf("error compacting JSON: %s", err) + } + } + if policyOut != tc.expectedOutput { + t.Fatalf("did not receive expected output: want %s, got %s", tc.expectedOutput, policyOut) + } + }) + } +} diff --git a/builtin/logical/aws/path_config_lease.go b/builtin/logical/aws/path_config_lease.go new file mode 100644 index 0000000..09e878a --- /dev/null +++ b/builtin/logical/aws/path_config_lease.go @@ -0,0 +1,146 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package aws + +import ( + "context" + "fmt" + "time" + + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathConfigLease(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "config/lease", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + }, + + Fields: map[string]*framework.FieldSchema{ + "lease": { + Type: framework.TypeString, + Description: "Default lease for roles.", + }, + + "lease_max": { + Type: framework.TypeString, + Description: "Maximum time a credential is valid for.", + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathLeaseRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "lease-configuration", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathLeaseWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "lease", + }, + }, + }, + + HelpSynopsis: pathConfigLeaseHelpSyn, + HelpDescription: pathConfigLeaseHelpDesc, + } +} + +// Lease returns the lease information +func (b *backend) Lease(ctx context.Context, s logical.Storage) (*configLease, error) { + entry, err := s.Get(ctx, "config/lease") + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result configLease + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + + return &result, nil +} + +func (b *backend) pathLeaseWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + leaseRaw := d.Get("lease").(string) + leaseMaxRaw := d.Get("lease_max").(string) + + if len(leaseRaw) == 0 { + return logical.ErrorResponse("'lease' is a required parameter"), nil + } + if len(leaseMaxRaw) == 0 { + return logical.ErrorResponse("'lease_max' is a required parameter"), nil + } + + lease, err := parseutil.ParseDurationSecond(leaseRaw) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf( + "Invalid lease: %s", err)), nil + } + leaseMax, err := parseutil.ParseDurationSecond(leaseMaxRaw) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf( + "Invalid lease_max: %s", err)), nil + } + + // Store it + entry, err := logical.StorageEntryJSON("config/lease", &configLease{ + Lease: lease, + LeaseMax: leaseMax, + }) + if err != nil { + return nil, err + } + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + + return nil, nil +} + +func (b *backend) pathLeaseRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + lease, err := b.Lease(ctx, req.Storage) + if err != nil { + return nil, err + } + if lease == nil { + return nil, nil + } + + return &logical.Response{ + Data: map[string]interface{}{ + "lease": lease.Lease.String(), + "lease_max": lease.LeaseMax.String(), + }, + }, nil +} + +type configLease struct { + Lease time.Duration + LeaseMax time.Duration +} + +const pathConfigLeaseHelpSyn = ` +Configure the default lease information for generated credentials. +` + +const pathConfigLeaseHelpDesc = ` +This configures the default lease information used for credentials +generated by this backend. The lease specifies the duration that a +credential will be valid for, as well as the maximum session for +a set of credentials. + +The format for the lease is "1h" or integer and then unit. The longest +unit is hour. +` diff --git a/builtin/logical/aws/path_config_root.go b/builtin/logical/aws/path_config_root.go new file mode 100644 index 0000000..bd6c09e --- /dev/null +++ b/builtin/logical/aws/path_config_root.go @@ -0,0 +1,168 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package aws + +import ( + "context" + + "github.com/aws/aws-sdk-go/aws" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +// A single default template that supports both the different credential types (IAM/STS) that are capped at differing length limits (64 chars/32 chars respectively) +const defaultUserNameTemplate = `{{ if (eq .Type "STS") }}{{ printf "vault-%s-%s" (unix_time) (random 20) | truncate 32 }}{{ else }}{{ printf "vault-%s-%s-%s" (printf "%s-%s" (.DisplayName) (.PolicyName) | truncate 42) (unix_time) (random 20) | truncate 64 }}{{ end }}` + +func pathConfigRoot(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "config/root", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + }, + + Fields: map[string]*framework.FieldSchema{ + "access_key": { + Type: framework.TypeString, + Description: "Access key with permission to create new keys.", + }, + + "secret_key": { + Type: framework.TypeString, + Description: "Secret key with permission to create new keys.", + }, + + "region": { + Type: framework.TypeString, + Description: "Region for API calls.", + }, + "iam_endpoint": { + Type: framework.TypeString, + Description: "Endpoint to custom IAM server URL", + }, + "sts_endpoint": { + Type: framework.TypeString, + Description: "Endpoint to custom STS server URL", + }, + "max_retries": { + Type: framework.TypeInt, + Default: aws.UseServiceDefaultRetries, + Description: "Maximum number of retries for recoverable exceptions of AWS APIs", + }, + "username_template": { + Type: framework.TypeString, + Description: "Template to generate custom IAM usernames", + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathConfigRootRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "root-iam-credentials-configuration", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigRootWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "root-iam-credentials", + }, + }, + }, + + HelpSynopsis: pathConfigRootHelpSyn, + HelpDescription: pathConfigRootHelpDesc, + } +} + +func (b *backend) pathConfigRootRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + b.clientMutex.RLock() + defer b.clientMutex.RUnlock() + + entry, err := req.Storage.Get(ctx, "config/root") + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var config rootConfig + + if err := entry.DecodeJSON(&config); err != nil { + return nil, err + } + + configData := map[string]interface{}{ + "access_key": config.AccessKey, + "region": config.Region, + "iam_endpoint": config.IAMEndpoint, + "sts_endpoint": config.STSEndpoint, + "max_retries": config.MaxRetries, + "username_template": config.UsernameTemplate, + } + return &logical.Response{ + Data: configData, + }, nil +} + +func (b *backend) pathConfigRootWrite(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + region := data.Get("region").(string) + iamendpoint := data.Get("iam_endpoint").(string) + stsendpoint := data.Get("sts_endpoint").(string) + maxretries := data.Get("max_retries").(int) + usernameTemplate := data.Get("username_template").(string) + if usernameTemplate == "" { + usernameTemplate = defaultUserNameTemplate + } + + b.clientMutex.Lock() + defer b.clientMutex.Unlock() + + entry, err := logical.StorageEntryJSON("config/root", rootConfig{ + AccessKey: data.Get("access_key").(string), + SecretKey: data.Get("secret_key").(string), + IAMEndpoint: iamendpoint, + STSEndpoint: stsendpoint, + Region: region, + MaxRetries: maxretries, + UsernameTemplate: usernameTemplate, + }) + if err != nil { + return nil, err + } + + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + + // clear possible cached IAM / STS clients after successfully updating + // config/root + b.iamClient = nil + b.stsClient = nil + + return nil, nil +} + +type rootConfig struct { + AccessKey string `json:"access_key"` + SecretKey string `json:"secret_key"` + IAMEndpoint string `json:"iam_endpoint"` + STSEndpoint string `json:"sts_endpoint"` + Region string `json:"region"` + MaxRetries int `json:"max_retries"` + UsernameTemplate string `json:"username_template"` +} + +const pathConfigRootHelpSyn = ` +Configure the root credentials that are used to manage IAM. +` + +const pathConfigRootHelpDesc = ` +Before doing anything, the AWS backend needs credentials that are able +to manage IAM policies, users, access keys, etc. This endpoint is used +to configure those credentials. They don't necessarily need to be root +keys as long as they have permission to manage IAM. +` diff --git a/builtin/logical/aws/path_config_root_test.go b/builtin/logical/aws/path_config_root_test.go new file mode 100644 index 0000000..a007064 --- /dev/null +++ b/builtin/logical/aws/path_config_root_test.go @@ -0,0 +1,58 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package aws + +import ( + "context" + "reflect" + "testing" + + "github.com/hashicorp/vault/sdk/logical" +) + +func TestBackend_PathConfigRoot(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + + b := Backend(config) + if err := b.Setup(context.Background(), config); err != nil { + t.Fatal(err) + } + + configData := map[string]interface{}{ + "access_key": "AKIAEXAMPLE", + "secret_key": "RandomData", + "region": "us-west-2", + "iam_endpoint": "https://iam.amazonaws.com", + "sts_endpoint": "https://sts.us-west-2.amazonaws.com", + "max_retries": 10, + "username_template": defaultUserNameTemplate, + } + + configReq := &logical.Request{ + Operation: logical.UpdateOperation, + Storage: config.StorageView, + Path: "config/root", + Data: configData, + } + + resp, err := b.HandleRequest(context.Background(), configReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: config writing failed: resp:%#v\n err: %v", resp, err) + } + + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ReadOperation, + Storage: config.StorageView, + Path: "config/root", + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: config reading failed: resp:%#v\n err: %v", resp, err) + } + + delete(configData, "secret_key") + if !reflect.DeepEqual(resp.Data, configData) { + t.Errorf("bad: expected to read config root as %#v, got %#v instead", configData, resp.Data) + } +} diff --git a/builtin/logical/aws/path_config_rotate_root.go b/builtin/logical/aws/path_config_rotate_root.go new file mode 100644 index 0000000..0434d22 --- /dev/null +++ b/builtin/logical/aws/path_config_rotate_root.go @@ -0,0 +1,137 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package aws + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathConfigRotateRoot(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "config/rotate-root", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "root-iam-credentials", + OperationVerb: "rotate", + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigRotateRootUpdate, + ForwardPerformanceStandby: true, + ForwardPerformanceSecondary: true, + }, + }, + + HelpSynopsis: pathConfigRotateRootHelpSyn, + HelpDescription: pathConfigRotateRootHelpDesc, + } +} + +func (b *backend) pathConfigRotateRootUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // have to get the client config first because that takes out a read lock + client, err := b.clientIAM(ctx, req.Storage) + if err != nil { + return nil, err + } + if client == nil { + return nil, fmt.Errorf("nil IAM client") + } + + b.clientMutex.Lock() + defer b.clientMutex.Unlock() + + rawRootConfig, err := req.Storage.Get(ctx, "config/root") + if err != nil { + return nil, err + } + if rawRootConfig == nil { + return nil, fmt.Errorf("no configuration found for config/root") + } + var config rootConfig + if err := rawRootConfig.DecodeJSON(&config); err != nil { + return nil, fmt.Errorf("error reading root configuration: %w", err) + } + + if config.AccessKey == "" || config.SecretKey == "" { + return logical.ErrorResponse("Cannot call config/rotate-root when either access_key or secret_key is empty"), nil + } + + var getUserInput iam.GetUserInput // empty input means get current user + getUserRes, err := client.GetUserWithContext(ctx, &getUserInput) + if err != nil { + return nil, fmt.Errorf("error calling GetUser: %w", err) + } + if getUserRes == nil { + return nil, fmt.Errorf("nil response from GetUser") + } + if getUserRes.User == nil { + return nil, fmt.Errorf("nil user returned from GetUser") + } + if getUserRes.User.UserName == nil { + return nil, fmt.Errorf("nil UserName returned from GetUser") + } + + createAccessKeyInput := iam.CreateAccessKeyInput{ + UserName: getUserRes.User.UserName, + } + createAccessKeyRes, err := client.CreateAccessKeyWithContext(ctx, &createAccessKeyInput) + if err != nil { + return nil, fmt.Errorf("error calling CreateAccessKey: %w", err) + } + if createAccessKeyRes.AccessKey == nil { + return nil, fmt.Errorf("nil response from CreateAccessKey") + } + if createAccessKeyRes.AccessKey.AccessKeyId == nil || createAccessKeyRes.AccessKey.SecretAccessKey == nil { + return nil, fmt.Errorf("nil AccessKeyId or SecretAccessKey returned from CreateAccessKey") + } + + oldAccessKey := config.AccessKey + + config.AccessKey = *createAccessKeyRes.AccessKey.AccessKeyId + config.SecretKey = *createAccessKeyRes.AccessKey.SecretAccessKey + + newEntry, err := logical.StorageEntryJSON("config/root", config) + if err != nil { + return nil, fmt.Errorf("error generating new config/root JSON: %w", err) + } + if err := req.Storage.Put(ctx, newEntry); err != nil { + return nil, fmt.Errorf("error saving new config/root: %w", err) + } + + b.iamClient = nil + b.stsClient = nil + + deleteAccessKeyInput := iam.DeleteAccessKeyInput{ + AccessKeyId: aws.String(oldAccessKey), + UserName: getUserRes.User.UserName, + } + _, err = client.DeleteAccessKeyWithContext(ctx, &deleteAccessKeyInput) + if err != nil { + return nil, fmt.Errorf("error deleting old access key: %w", err) + } + + return &logical.Response{ + Data: map[string]interface{}{ + "access_key": config.AccessKey, + }, + }, nil +} + +const pathConfigRotateRootHelpSyn = ` +Request to rotate the AWS credentials used by Vault +` + +const pathConfigRotateRootHelpDesc = ` +This path attempts to rotate the AWS credentials used by Vault for this mount. +It is only valid if Vault has been configured to use AWS IAM credentials via the +config/root endpoint. +` diff --git a/builtin/logical/aws/path_roles.go b/builtin/logical/aws/path_roles.go new file mode 100644 index 0000000..67545e6 --- /dev/null +++ b/builtin/logical/aws/path_roles.go @@ -0,0 +1,633 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package aws + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "regexp" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/arn" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" +) + +var userPathRegex = regexp.MustCompile(`^\/([\x21-\x7F]{0,510}\/)?$`) + +func pathListRoles(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "roles/?$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "roles", + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: b.pathRoleList, + }, + + HelpSynopsis: pathListRolesHelpSyn, + HelpDescription: pathListRolesHelpDesc, + } +} + +func pathRoles(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "roles/" + framework.GenericNameWithAtRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationSuffix: "role", + }, + + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the role", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Role Name", + }, + }, + + "credential_type": { + Type: framework.TypeString, + Description: fmt.Sprintf("Type of credential to retrieve. Must be one of %s, %s, or %s", assumedRoleCred, iamUserCred, federationTokenCred), + }, + + "role_arns": { + Type: framework.TypeCommaStringSlice, + Description: "ARNs of AWS roles allowed to be assumed. Only valid when credential_type is " + assumedRoleCred, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Role ARNs", + }, + }, + + "policy_arns": { + Type: framework.TypeCommaStringSlice, + Description: fmt.Sprintf(`ARNs of AWS policies. Behavior varies by credential_type. When credential_type is +%s, then it will attach the specified policies to the generated IAM user. +When credential_type is %s or %s, the policies will be passed as the +PolicyArns parameter, acting as a filter on permissions available.`, iamUserCred, assumedRoleCred, federationTokenCred), + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Policy ARNs", + }, + }, + + "policy_document": { + Type: framework.TypeString, + Description: `JSON-encoded IAM policy document. Behavior varies by credential_type. When credential_type is +iam_user, then it will attach the contents of the policy_document to the IAM +user generated. When credential_type is assumed_role or federation_token, this +will be passed in as the Policy parameter to the AssumeRole or +GetFederationToken API call, acting as a filter on permissions available.`, + }, + + "iam_groups": { + Type: framework.TypeCommaStringSlice, + Description: `Names of IAM groups that generated IAM users will be added to. For a credential +type of assumed_role or federation_token, the policies sent to the +corresponding AWS call (sts:AssumeRole or sts:GetFederation) will be the +policies from each group in iam_groups combined with the policy_document +and policy_arns parameters.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "IAM Groups", + Value: "group1,group2", + }, + }, + + "iam_tags": { + Type: framework.TypeKVPairs, + Description: `IAM tags to be set for any users created by this role. These must be presented +as Key-Value pairs. This can be represented as a map or a list of equal sign +delimited key pairs.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "IAM Tags", + Value: "[key1=value1, key2=value2]", + }, + }, + + "default_sts_ttl": { + Type: framework.TypeDurationSecond, + Description: fmt.Sprintf("Default TTL for %s and %s credential types when no TTL is explicitly requested with the credentials", assumedRoleCred, federationTokenCred), + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Default STS TTL", + }, + }, + + "max_sts_ttl": { + Type: framework.TypeDurationSecond, + Description: fmt.Sprintf("Max allowed TTL for %s and %s credential types", assumedRoleCred, federationTokenCred), + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Max STS TTL", + }, + }, + + "permissions_boundary_arn": { + Type: framework.TypeString, + Description: "ARN of an IAM policy to attach as a permissions boundary on IAM user credentials; only valid when credential_type is" + iamUserCred, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Permissions Boundary ARN", + }, + }, + + "arn": { + Type: framework.TypeString, + Description: `Use role_arns or policy_arns instead.`, + Deprecated: true, + }, + + "policy": { + Type: framework.TypeString, + Description: "Use policy_document instead.", + Deprecated: true, + }, + + "user_path": { + Type: framework.TypeString, + Description: "Path for IAM User. Only valid when credential_type is " + iamUserCred, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "User Path", + Value: "/", + }, + Default: "/", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.DeleteOperation: b.pathRolesDelete, + logical.ReadOperation: b.pathRolesRead, + logical.UpdateOperation: b.pathRolesWrite, + }, + + HelpSynopsis: pathRolesHelpSyn, + HelpDescription: pathRolesHelpDesc, + } +} + +func (b *backend) pathRoleList(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + b.roleMutex.RLock() + defer b.roleMutex.RUnlock() + entries, err := req.Storage.List(ctx, "role/") + if err != nil { + return nil, err + } + legacyEntries, err := req.Storage.List(ctx, "policy/") + if err != nil { + return nil, err + } + + return logical.ListResponse(append(entries, legacyEntries...)), nil +} + +func (b *backend) pathRolesDelete(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + for _, prefix := range []string{"policy/", "role/"} { + err := req.Storage.Delete(ctx, prefix+d.Get("name").(string)) + if err != nil { + return nil, err + } + } + + return nil, nil +} + +func (b *backend) pathRolesRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + entry, err := b.roleRead(ctx, req.Storage, d.Get("name").(string), true) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + return &logical.Response{ + Data: entry.toResponseData(), + }, nil +} + +func legacyRoleData(d *framework.FieldData) (string, error) { + policy := d.Get("policy").(string) + arn := d.Get("arn").(string) + + switch { + case policy == "" && arn == "": + return "", nil + case policy != "" && arn != "": + return "", errors.New("only one of policy or arn should be provided") + case policy != "": + return policy, nil + default: + return arn, nil + } +} + +func (b *backend) pathRolesWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + var resp logical.Response + + roleName := d.Get("name").(string) + if roleName == "" { + return logical.ErrorResponse("missing role name"), nil + } + + b.roleMutex.Lock() + defer b.roleMutex.Unlock() + roleEntry, err := b.roleRead(ctx, req.Storage, roleName, false) + if err != nil { + return nil, err + } + if roleEntry == nil { + roleEntry = &awsRoleEntry{} + } else if roleEntry.InvalidData != "" { + resp.AddWarning(fmt.Sprintf("Invalid data of %q cleared out of role", roleEntry.InvalidData)) + roleEntry.InvalidData = "" + } + + legacyRole, err := legacyRoleData(d) + if err != nil { + return nil, err + } + + if credentialTypeRaw, ok := d.GetOk("credential_type"); ok { + if legacyRole != "" { + return logical.ErrorResponse("cannot supply deprecated role or policy parameters with an explicit credential_type"), nil + } + roleEntry.CredentialTypes = []string{credentialTypeRaw.(string)} + } + + if roleArnsRaw, ok := d.GetOk("role_arns"); ok { + if legacyRole != "" { + return logical.ErrorResponse("cannot supply deprecated role or policy parameters with role_arns"), nil + } + roleEntry.RoleArns = roleArnsRaw.([]string) + } + + if policyArnsRaw, ok := d.GetOk("policy_arns"); ok { + if legacyRole != "" { + return logical.ErrorResponse("cannot supply deprecated role or policy parameters with policy_arns"), nil + } + roleEntry.PolicyArns = policyArnsRaw.([]string) + } + + if policyDocumentRaw, ok := d.GetOk("policy_document"); ok { + if legacyRole != "" { + return logical.ErrorResponse("cannot supply deprecated role or policy parameters with policy_document"), nil + } + compacted := policyDocumentRaw.(string) + if len(compacted) > 0 { + compacted, err = compactJSON(policyDocumentRaw.(string)) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("cannot parse policy document: %q", policyDocumentRaw.(string))), nil + } + } + roleEntry.PolicyDocument = compacted + } + + if defaultSTSTTLRaw, ok := d.GetOk("default_sts_ttl"); ok { + if legacyRole != "" { + return logical.ErrorResponse("cannot supply deprecated role or policy parameters with default_sts_ttl"), nil + } + roleEntry.DefaultSTSTTL = time.Duration(defaultSTSTTLRaw.(int)) * time.Second + } + + if maxSTSTTLRaw, ok := d.GetOk("max_sts_ttl"); ok { + if legacyRole != "" { + return logical.ErrorResponse("cannot supply deprecated role or policy parameters with max_sts_ttl"), nil + } + roleEntry.MaxSTSTTL = time.Duration(maxSTSTTLRaw.(int)) * time.Second + } + + if userPathRaw, ok := d.GetOk("user_path"); ok { + if legacyRole != "" { + return logical.ErrorResponse("cannot supply deprecated role or policy parameters with user_path"), nil + } + + roleEntry.UserPath = userPathRaw.(string) + } + + if permissionsBoundaryARNRaw, ok := d.GetOk("permissions_boundary_arn"); ok { + if legacyRole != "" { + return logical.ErrorResponse("cannot supply deprecated role or policy parameters with permissions_boundary_arn"), nil + } + roleEntry.PermissionsBoundaryARN = permissionsBoundaryARNRaw.(string) + } + + if iamGroups, ok := d.GetOk("iam_groups"); ok { + roleEntry.IAMGroups = iamGroups.([]string) + } + + if iamTags, ok := d.GetOk("iam_tags"); ok { + roleEntry.IAMTags = iamTags.(map[string]string) + } + + if legacyRole != "" { + roleEntry = upgradeLegacyPolicyEntry(legacyRole) + if roleEntry.InvalidData != "" { + return logical.ErrorResponse(fmt.Sprintf("unable to parse supplied data: %q", roleEntry.InvalidData)), nil + } + resp.AddWarning("Detected use of legacy role or policy parameter. Please upgrade to use the new parameters.") + } else { + roleEntry.ProhibitFlexibleCredPath = false + } + + err = roleEntry.validate() + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("error(s) validating supplied role data: %q", err)), nil + } + + err = setAwsRole(ctx, req.Storage, roleName, roleEntry) + if err != nil { + return nil, err + } + + if len(resp.Warnings) == 0 { + return nil, nil + } + + return &resp, nil +} + +func (b *backend) roleRead(ctx context.Context, s logical.Storage, roleName string, shouldLock bool) (*awsRoleEntry, error) { + if roleName == "" { + return nil, fmt.Errorf("missing role name") + } + if shouldLock { + b.roleMutex.RLock() + } + entry, err := s.Get(ctx, "role/"+roleName) + if shouldLock { + b.roleMutex.RUnlock() + } + if err != nil { + return nil, err + } + var roleEntry awsRoleEntry + if entry != nil { + if err := entry.DecodeJSON(&roleEntry); err != nil { + return nil, err + } + return &roleEntry, nil + } + + if shouldLock { + b.roleMutex.Lock() + defer b.roleMutex.Unlock() + } + entry, err = s.Get(ctx, "role/"+roleName) + if err != nil { + return nil, err + } + + if entry != nil { + if err := entry.DecodeJSON(&roleEntry); err != nil { + return nil, err + } + return &roleEntry, nil + } + + legacyEntry, err := s.Get(ctx, "policy/"+roleName) + if err != nil { + return nil, err + } + if legacyEntry == nil { + return nil, nil + } + + newRoleEntry := upgradeLegacyPolicyEntry(string(legacyEntry.Value)) + if b.System().LocalMount() || !b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary|consts.ReplicationPerformanceStandby) { + err = setAwsRole(ctx, s, roleName, newRoleEntry) + if err != nil { + return nil, err + } + // This can leave legacy data around in the policy/ path if it fails for some reason, + // but should be pretty rare for this to fail but prior writes to succeed, so not worrying + // about cleaning it up in case of error + err = s.Delete(ctx, "policy/"+roleName) + if err != nil { + return nil, err + } + } + return newRoleEntry, nil +} + +func upgradeLegacyPolicyEntry(entry string) *awsRoleEntry { + var newRoleEntry *awsRoleEntry + if strings.HasPrefix(entry, "arn:") { + parsedArn, err := arn.Parse(entry) + if err != nil { + newRoleEntry = &awsRoleEntry{ + InvalidData: entry, + Version: 1, + } + return newRoleEntry + } + resourceParts := strings.Split(parsedArn.Resource, "/") + resourceType := resourceParts[0] + switch resourceType { + case "role": + newRoleEntry = &awsRoleEntry{ + CredentialTypes: []string{assumedRoleCred}, + RoleArns: []string{entry}, + ProhibitFlexibleCredPath: true, + Version: 1, + } + case "policy": + newRoleEntry = &awsRoleEntry{ + CredentialTypes: []string{iamUserCred}, + PolicyArns: []string{entry}, + ProhibitFlexibleCredPath: true, + Version: 1, + } + default: + newRoleEntry = &awsRoleEntry{ + InvalidData: entry, + Version: 1, + } + } + } else { + compacted, err := compactJSON(entry) + if err != nil { + newRoleEntry = &awsRoleEntry{ + InvalidData: entry, + Version: 1, + } + } else { + // unfortunately, this is ambiguous between the cred types, so allow both + newRoleEntry = &awsRoleEntry{ + CredentialTypes: []string{iamUserCred, federationTokenCred}, + PolicyDocument: compacted, + ProhibitFlexibleCredPath: true, + Version: 1, + } + } + } + + return newRoleEntry +} + +func validateAWSManagedPolicy(policyARN string) error { + parsedARN, err := arn.Parse(policyARN) + if err != nil { + return err + } + if parsedARN.Service != "iam" { + return fmt.Errorf("expected a service of iam but got %s", parsedARN.Service) + } + if !strings.HasPrefix(parsedARN.Resource, "policy/") { + return fmt.Errorf("expected a resource type of policy but got %s", parsedARN.Resource) + } + return nil +} + +func setAwsRole(ctx context.Context, s logical.Storage, roleName string, roleEntry *awsRoleEntry) error { + if roleName == "" { + return fmt.Errorf("empty role name") + } + if roleEntry == nil { + return fmt.Errorf("nil roleEntry") + } + entry, err := logical.StorageEntryJSON("role/"+roleName, roleEntry) + if err != nil { + return err + } + if entry == nil { + return fmt.Errorf("nil result when writing to storage") + } + if err := s.Put(ctx, entry); err != nil { + return err + } + return nil +} + +type awsRoleEntry struct { + CredentialTypes []string `json:"credential_types"` // Entries must all be in the set of ("iam_user", "assumed_role", "federation_token") + PolicyArns []string `json:"policy_arns"` // ARNs of managed policies to attach to an IAM user + RoleArns []string `json:"role_arns"` // ARNs of roles to assume for AssumedRole credentials + PolicyDocument string `json:"policy_document"` // JSON-serialized inline policy to attach to IAM users and/or to specify as the Policy parameter in AssumeRole calls + IAMGroups []string `json:"iam_groups"` // Names of IAM groups that generated IAM users will be added to + IAMTags map[string]string `json:"iam_tags"` // IAM tags that will be added to the generated IAM users + InvalidData string `json:"invalid_data,omitempty"` // Invalid role data. Exists to support converting the legacy role data into the new format + ProhibitFlexibleCredPath bool `json:"prohibit_flexible_cred_path,omitempty"` // Disallow accessing STS credentials via the creds path and vice verse + Version int `json:"version"` // Version number of the role format + DefaultSTSTTL time.Duration `json:"default_sts_ttl"` // Default TTL for STS credentials + MaxSTSTTL time.Duration `json:"max_sts_ttl"` // Max allowed TTL for STS credentials + UserPath string `json:"user_path"` // The path for the IAM user when using "iam_user" credential type + PermissionsBoundaryARN string `json:"permissions_boundary_arn"` // ARN of an IAM policy to attach as a permissions boundary +} + +func (r *awsRoleEntry) toResponseData() map[string]interface{} { + respData := map[string]interface{}{ + "credential_type": strings.Join(r.CredentialTypes, ","), + "policy_arns": r.PolicyArns, + "role_arns": r.RoleArns, + "policy_document": r.PolicyDocument, + "iam_groups": r.IAMGroups, + "iam_tags": r.IAMTags, + "default_sts_ttl": int64(r.DefaultSTSTTL.Seconds()), + "max_sts_ttl": int64(r.MaxSTSTTL.Seconds()), + "user_path": r.UserPath, + "permissions_boundary_arn": r.PermissionsBoundaryARN, + } + + if r.InvalidData != "" { + respData["invalid_data"] = r.InvalidData + } + return respData +} + +func (r *awsRoleEntry) validate() error { + var errors *multierror.Error + + if len(r.CredentialTypes) == 0 { + errors = multierror.Append(errors, fmt.Errorf("did not supply credential_type")) + } + + allowedCredentialTypes := []string{iamUserCred, assumedRoleCred, federationTokenCred} + for _, credType := range r.CredentialTypes { + if !strutil.StrListContains(allowedCredentialTypes, credType) { + errors = multierror.Append(errors, fmt.Errorf("unrecognized credential type: %s", credType)) + } + } + + if r.DefaultSTSTTL != 0 && !strutil.StrListContains(r.CredentialTypes, assumedRoleCred) && !strutil.StrListContains(r.CredentialTypes, federationTokenCred) { + errors = multierror.Append(errors, fmt.Errorf("default_sts_ttl parameter only valid for %s and %s credential types", assumedRoleCred, federationTokenCred)) + } + + if r.MaxSTSTTL != 0 && !strutil.StrListContains(r.CredentialTypes, assumedRoleCred) && !strutil.StrListContains(r.CredentialTypes, federationTokenCred) { + errors = multierror.Append(errors, fmt.Errorf("max_sts_ttl parameter only valid for %s and %s credential types", assumedRoleCred, federationTokenCred)) + } + + if r.MaxSTSTTL > 0 && + r.DefaultSTSTTL > 0 && + r.DefaultSTSTTL > r.MaxSTSTTL { + errors = multierror.Append(errors, fmt.Errorf(`"default_sts_ttl" value must be less than or equal to "max_sts_ttl" value`)) + } + + if r.UserPath != "" { + if !strutil.StrListContains(r.CredentialTypes, iamUserCred) { + errors = multierror.Append(errors, fmt.Errorf("user_path parameter only valid for %s credential type", iamUserCred)) + } + if !userPathRegex.MatchString(r.UserPath) { + errors = multierror.Append(errors, fmt.Errorf("The specified value for user_path is invalid. It must match %q regexp", userPathRegex.String())) + } + } + + if r.PermissionsBoundaryARN != "" { + if !strutil.StrListContains(r.CredentialTypes, iamUserCred) { + errors = multierror.Append(errors, fmt.Errorf("cannot supply permissions_boundary_arn when credential_type isn't %s", iamUserCred)) + } + if err := validateAWSManagedPolicy(r.PermissionsBoundaryARN); err != nil { + errors = multierror.Append(fmt.Errorf("invalid permissions_boundary_arn parameter: %v", err)) + } + } + + if len(r.RoleArns) > 0 && !strutil.StrListContains(r.CredentialTypes, assumedRoleCred) { + errors = multierror.Append(errors, fmt.Errorf("cannot supply role_arns when credential_type isn't %s", assumedRoleCred)) + } + + return errors.ErrorOrNil() +} + +func compactJSON(input string) (string, error) { + var compacted bytes.Buffer + err := json.Compact(&compacted, []byte(input)) + return compacted.String(), err +} + +const ( + assumedRoleCred = "assumed_role" + iamUserCred = "iam_user" + federationTokenCred = "federation_token" +) + +const pathListRolesHelpSyn = `List the existing roles in this backend` + +const pathListRolesHelpDesc = `Roles will be listed by the role name.` + +const pathRolesHelpSyn = ` +Read, write and reference IAM policies that access keys can be made for. +` + +const pathRolesHelpDesc = ` +This path allows you to read and write roles that are used to +create access keys. These roles are associated with IAM policies that +map directly to the route to read the access keys. For example, if the +backend is mounted at "aws" and you create a role at "aws/roles/deploy" +then a user could request access credentials at "aws/creds/deploy". + +You can either supply a user inline policy (via the policy argument), or +provide a reference to an existing AWS policy by supplying the full arn +reference (via the arn argument). Inline user policies written are normal +IAM policies. Vault will not attempt to parse these except to validate +that they're basic JSON. No validation is performed on arn references. + +To validate the keys, attempt to read an access key after writing the policy. +` diff --git a/builtin/logical/aws/path_roles_test.go b/builtin/logical/aws/path_roles_test.go new file mode 100644 index 0000000..c5bf167 --- /dev/null +++ b/builtin/logical/aws/path_roles_test.go @@ -0,0 +1,451 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package aws + +import ( + "context" + "reflect" + "strconv" + "strings" + "testing" + + "github.com/hashicorp/vault/sdk/logical" +) + +const adminAccessPolicyARN = "arn:aws:iam::aws:policy/AdministratorAccess" + +func TestBackend_PathListRoles(t *testing.T) { + var resp *logical.Response + var err error + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + + b := Backend(config) + if err := b.Setup(context.Background(), config); err != nil { + t.Fatal(err) + } + + roleData := map[string]interface{}{ + "role_arns": []string{"arn:aws:iam::123456789012:role/path/RoleName"}, + "credential_type": assumedRoleCred, + "default_sts_ttl": 3600, + "max_sts_ttl": 3600, + } + + roleReq := &logical.Request{ + Operation: logical.UpdateOperation, + Storage: config.StorageView, + Data: roleData, + } + + for i := 1; i <= 10; i++ { + roleReq.Path = "roles/testrole" + strconv.Itoa(i) + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: role creation failed. resp:%#v\n err:%v", resp, err) + } + } + + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ListOperation, + Path: "roles", + Storage: config.StorageView, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: listing roles failed. resp:%#v\n err:%v", resp, err) + } + + if len(resp.Data["keys"].([]string)) != 10 { + t.Fatalf("failed to list all 10 roles") + } + + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ListOperation, + Path: "roles/", + Storage: config.StorageView, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: listing roles failed. resp:%#v\n err:%v", resp, err) + } + + if len(resp.Data["keys"].([]string)) != 10 { + t.Fatalf("failed to list all 10 roles") + } +} + +func TestUpgradeLegacyPolicyEntry(t *testing.T) { + var input string + var expected awsRoleEntry + var output *awsRoleEntry + + input = "arn:aws:iam::123456789012:role/path/RoleName" + expected = awsRoleEntry{ + CredentialTypes: []string{assumedRoleCred}, + RoleArns: []string{input}, + ProhibitFlexibleCredPath: true, + Version: 1, + } + output = upgradeLegacyPolicyEntry(input) + if output.InvalidData != "" { + t.Fatalf("bad: error processing upgrade of %q: got invalid data of %v", input, output.InvalidData) + } + if !reflect.DeepEqual(*output, expected) { + t.Fatalf("bad: expected %#v; received %#v", expected, *output) + } + + input = "arn:aws:iam::123456789012:policy/MyPolicy" + expected = awsRoleEntry{ + CredentialTypes: []string{iamUserCred}, + PolicyArns: []string{input}, + ProhibitFlexibleCredPath: true, + Version: 1, + } + output = upgradeLegacyPolicyEntry(input) + if output.InvalidData != "" { + t.Fatalf("bad: error processing upgrade of %q: got invalid data of %v", input, output.InvalidData) + } + if !reflect.DeepEqual(*output, expected) { + t.Fatalf("bad: expected %#v; received %#v", expected, *output) + } + + input = "arn:aws:iam::aws:policy/AWSManagedPolicy" + expected.PolicyArns = []string{input} + output = upgradeLegacyPolicyEntry(input) + if output.InvalidData != "" { + t.Fatalf("bad: error processing upgrade of %q: got invalid data of %v", input, output.InvalidData) + } + if !reflect.DeepEqual(*output, expected) { + t.Fatalf("bad: expected %#v; received %#v", expected, *output) + } + + input = ` +{ + "Version": "2012-10-07", + "Statement": [ + { + "Effect": "Allow", + "Action": "ec2:Describe*", + "Resource": "*" + } + ] +}` + compacted, err := compactJSON(input) + if err != nil { + t.Fatalf("error parsing JSON: %v", err) + } + expected = awsRoleEntry{ + CredentialTypes: []string{iamUserCred, federationTokenCred}, + PolicyDocument: compacted, + ProhibitFlexibleCredPath: true, + Version: 1, + } + output = upgradeLegacyPolicyEntry(input) + if output.InvalidData != "" { + t.Fatalf("bad: error processing upgrade of %q: got invalid data of %v", input, output.InvalidData) + } + if !reflect.DeepEqual(*output, expected) { + t.Fatalf("bad: expected %#v; received %#v", expected, *output) + } + + // Due to lack of prior input validation, this could exist in the storage, and we need + // to be able to read it out in some fashion, so have to handle this in a poor fashion + input = "arn:gobbledygook" + expected = awsRoleEntry{ + InvalidData: input, + Version: 1, + } + output = upgradeLegacyPolicyEntry(input) + if !reflect.DeepEqual(*output, expected) { + t.Fatalf("bad: expected %#v; received %#v", expected, *output) + } +} + +func TestUserPathValidity(t *testing.T) { + testCases := []struct { + description string + userPath string + isValid bool + }{ + { + description: "Default", + userPath: "/", + isValid: true, + }, + { + description: "Empty", + userPath: "", + isValid: false, + }, + { + description: "Valid", + userPath: "/path/", + isValid: true, + }, + { + description: "Missing leading slash", + userPath: "path/", + isValid: false, + }, + { + description: "Missing trailing slash", + userPath: "/path", + isValid: false, + }, + { + description: "Invalid character", + userPath: "/šiauliai/", + isValid: false, + }, + { + description: "Max length", + userPath: "/" + strings.Repeat("a", 510) + "/", + isValid: true, + }, + { + description: "Too long", + userPath: "/" + strings.Repeat("a", 511) + "/", + isValid: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.description, func(t *testing.T) { + if tc.isValid != userPathRegex.MatchString(tc.userPath) { + t.Fatalf("bad: expected %s", strconv.FormatBool(tc.isValid)) + } + }) + } +} + +func TestRoleCRUDWithPermissionsBoundary(t *testing.T) { + roleName := "test_perm_boundary" + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + + b := Backend(config) + if err := b.Setup(context.Background(), config); err != nil { + t.Fatal(err) + } + + permissionsBoundaryARN := "arn:aws:iam::aws:policy/EC2FullAccess" + + roleData := map[string]interface{}{ + "credential_type": iamUserCred, + "policy_arns": []string{adminAccessPolicyARN}, + "permissions_boundary_arn": permissionsBoundaryARN, + } + request := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/" + roleName, + Storage: config.StorageView, + Data: roleData, + } + resp, err := b.HandleRequest(context.Background(), request) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: role creation failed. resp:%#v\nerr:%v", resp, err) + } + + request = &logical.Request{ + Operation: logical.ReadOperation, + Path: "roles/" + roleName, + Storage: config.StorageView, + } + resp, err = b.HandleRequest(context.Background(), request) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: reading role failed. resp:%#v\nerr:%v", resp, err) + } + if resp.Data["credential_type"] != iamUserCred { + t.Errorf("bad: expected credential_type of %s, got %s instead", iamUserCred, resp.Data["credential_type"]) + } + if resp.Data["permissions_boundary_arn"] != permissionsBoundaryARN { + t.Errorf("bad: expected permissions_boundary_arn of %s, got %s instead", permissionsBoundaryARN, resp.Data["permissions_boundary_arn"]) + } +} + +func TestRoleWithPermissionsBoundaryValidation(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + + b := Backend(config) + if err := b.Setup(context.Background(), config); err != nil { + t.Fatal(err) + } + + roleData := map[string]interface{}{ + "credential_type": assumedRoleCred, // only iamUserCred supported with permissions_boundary_arn + "role_arns": []string{"arn:aws:iam::123456789012:role/VaultRole"}, + "permissions_boundary_arn": "arn:aws:iam::aws:policy/FooBar", + } + request := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/test_perm_boundary", + Storage: config.StorageView, + Data: roleData, + } + resp, err := b.HandleRequest(context.Background(), request) + if err == nil && (resp == nil || !resp.IsError()) { + t.Fatalf("bad: expected role creation to fail due to bad credential_type, but it didn't. resp:%#v\nerr:%v", resp, err) + } + + roleData = map[string]interface{}{ + "credential_type": iamUserCred, + "policy_arns": []string{adminAccessPolicyARN}, + "permissions_boundary_arn": "arn:aws:notiam::aws:policy/FooBar", + } + request.Data = roleData + resp, err = b.HandleRequest(context.Background(), request) + if err == nil && (resp == nil || !resp.IsError()) { + t.Fatalf("bad: expected role creation to fail due to malformed permissions_boundary_arn, but it didn't. resp:%#v\nerr:%v", resp, err) + } +} + +func TestValidateAWSManagedPolicy(t *testing.T) { + expectErr := func(arn string) { + err := validateAWSManagedPolicy(arn) + if err == nil { + t.Errorf("bad: expected arn of %s to return an error but it didn't", arn) + } + } + + expectErr("not_an_arn") + expectErr("notarn:aws:iam::aws:policy/FooBar") + expectErr("arn:aws:notiam::aws:policy/FooBar") + expectErr("arn:aws:iam::aws:notpolicy/FooBar") + expectErr("arn:aws:iam::aws:policynot/FooBar") + + arn := "arn:aws:iam::aws:policy/FooBar" + err := validateAWSManagedPolicy(arn) + if err != nil { + t.Errorf("bad: expected arn of %s to not return an error but it did: %#v", arn, err) + } +} + +func TestRoleEntryValidationCredTypes(t *testing.T) { + roleEntry := awsRoleEntry{ + CredentialTypes: []string{}, + PolicyArns: []string{adminAccessPolicyARN}, + } + if roleEntry.validate() == nil { + t.Errorf("bad: invalid roleEntry with no CredentialTypes %#v passed validation", roleEntry) + } + roleEntry.CredentialTypes = []string{"invalid_type"} + if roleEntry.validate() == nil { + t.Errorf("bad: invalid roleEntry with invalid CredentialTypes %#v passed validation", roleEntry) + } + roleEntry.CredentialTypes = []string{iamUserCred, "invalid_type"} + if roleEntry.validate() == nil { + t.Errorf("bad: invalid roleEntry with invalid CredentialTypes %#v passed validation", roleEntry) + } +} + +func TestRoleEntryValidationIamUserCred(t *testing.T) { + allowAllPolicyDocument := `{"Version": "2012-10-17", "Statement": [{"Sid": "AllowAll", "Effect": "Allow", "Action": "*", "Resource": "*"}]}` + roleEntry := awsRoleEntry{ + CredentialTypes: []string{iamUserCred}, + PolicyArns: []string{adminAccessPolicyARN}, + PermissionsBoundaryARN: adminAccessPolicyARN, + } + err := roleEntry.validate() + if err != nil { + t.Errorf("bad: valid roleEntry %#v failed validation: %v", roleEntry, err) + } + roleEntry.PolicyDocument = allowAllPolicyDocument + err = roleEntry.validate() + if err != nil { + t.Errorf("bad: valid roleEntry %#v failed validation: %v", roleEntry, err) + } + roleEntry.PolicyArns = []string{} + err = roleEntry.validate() + if err != nil { + t.Errorf("bad: valid roleEntry %#v failed validation: %v", roleEntry, err) + } + + roleEntry = awsRoleEntry{ + CredentialTypes: []string{iamUserCred}, + RoleArns: []string{"arn:aws:iam::123456789012:role/SomeRole"}, + } + if roleEntry.validate() == nil { + t.Errorf("bad: invalid roleEntry with invalid RoleArns parameter %#v passed validation", roleEntry) + } + + roleEntry = awsRoleEntry{ + CredentialTypes: []string{iamUserCred}, + PolicyArns: []string{adminAccessPolicyARN}, + DefaultSTSTTL: 1, + } + if roleEntry.validate() == nil { + t.Errorf("bad: invalid roleEntry with unrecognized DefaultSTSTTL %#v passed validation", roleEntry) + } + roleEntry.DefaultSTSTTL = 0 + roleEntry.MaxSTSTTL = 1 + if roleEntry.validate() == nil { + t.Errorf("bad: invalid roleEntry with unrecognized MaxSTSTTL %#v passed validation", roleEntry) + } +} + +func TestRoleEntryValidationAssumedRoleCred(t *testing.T) { + allowAllPolicyDocument := `{"Version": "2012-10-17", "Statement": [{"Sid": "AllowAll", "Effect": "Allow", "Action": "*", "Resource": "*"}]}` + roleEntry := awsRoleEntry{ + CredentialTypes: []string{assumedRoleCred}, + RoleArns: []string{"arn:aws:iam::123456789012:role/SomeRole"}, + PolicyArns: []string{adminAccessPolicyARN}, + PolicyDocument: allowAllPolicyDocument, + DefaultSTSTTL: 2, + MaxSTSTTL: 3, + } + if err := roleEntry.validate(); err != nil { + t.Errorf("bad: valid roleEntry %#v failed validation: %v", roleEntry, err) + } + + roleEntry.MaxSTSTTL = 1 + if roleEntry.validate() == nil { + t.Errorf("bad: invalid roleEntry with MaxSTSTTL < DefaultSTSTTL %#v passed validation", roleEntry) + } + roleEntry.MaxSTSTTL = 0 + roleEntry.UserPath = "/foobar/" + if roleEntry.validate() == nil { + t.Errorf("bad: invalid roleEntry with unrecognized UserPath %#v passed validation", roleEntry) + } + roleEntry.UserPath = "" + roleEntry.PermissionsBoundaryARN = adminAccessPolicyARN + if roleEntry.validate() == nil { + t.Errorf("bad: invalid roleEntry with unrecognized PermissionsBoundary %#v passed validation", roleEntry) + } +} + +func TestRoleEntryValidationFederationTokenCred(t *testing.T) { + allowAllPolicyDocument := `{"Version": "2012-10-17", "Statement": [{"Sid": "AllowAll", "Effect": "Allow", "Action": "*", "Resource": "*"}]}` + roleEntry := awsRoleEntry{ + CredentialTypes: []string{federationTokenCred}, + PolicyDocument: allowAllPolicyDocument, + PolicyArns: []string{adminAccessPolicyARN}, + DefaultSTSTTL: 2, + MaxSTSTTL: 3, + } + if err := roleEntry.validate(); err != nil { + t.Errorf("bad: valid roleEntry %#v failed validation: %v", roleEntry, err) + } + + roleEntry.RoleArns = []string{"arn:aws:iam::123456789012:role/SomeRole"} + if roleEntry.validate() == nil { + t.Errorf("bad: invalid roleEntry with unrecognized RoleArns %#v passed validation", roleEntry) + } + roleEntry.RoleArns = []string{} + roleEntry.UserPath = "/foobar/" + if roleEntry.validate() == nil { + t.Errorf("bad: invalid roleEntry with unrecognized UserPath %#v passed validation", roleEntry) + } + + roleEntry.UserPath = "" + roleEntry.MaxSTSTTL = 1 + if roleEntry.validate() == nil { + t.Errorf("bad: invalid roleEntry with MaxSTSTTL < DefaultSTSTTL %#v passed validation", roleEntry) + } + roleEntry.MaxSTSTTL = 0 + roleEntry.PermissionsBoundaryARN = adminAccessPolicyARN + if roleEntry.validate() == nil { + t.Errorf("bad: invalid roleEntry with unrecognized PermissionsBoundary %#v passed validation", roleEntry) + } +} diff --git a/builtin/logical/aws/path_static_creds.go b/builtin/logical/aws/path_static_creds.go new file mode 100644 index 0000000..119f5d0 --- /dev/null +++ b/builtin/logical/aws/path_static_creds.go @@ -0,0 +1,99 @@ +package aws + +import ( + "context" + "fmt" + "net/http" + + "github.com/fatih/structs" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + pathStaticCreds = "static-creds" + + paramAccessKeyID = "access_key" + paramSecretsAccessKey = "secret_key" +) + +type awsCredentials struct { + AccessKeyID string `json:"access_key" structs:"access_key" mapstructure:"access_key"` + SecretAccessKey string `json:"secret_key" structs:"secret_key" mapstructure:"secret_key"` +} + +func pathStaticCredentials(b *backend) *framework.Path { + return &framework.Path{ + Pattern: fmt.Sprintf("%s/%s", pathStaticCreds, framework.GenericNameWithAtRegex(paramRoleName)), + Fields: map[string]*framework.FieldSchema{ + paramRoleName: { + Type: framework.TypeString, + Description: descRoleName, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathStaticCredsRead, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: http.StatusText(http.StatusOK), + Fields: map[string]*framework.FieldSchema{ + paramAccessKeyID: { + Type: framework.TypeString, + Description: descAccessKeyID, + }, + paramSecretsAccessKey: { + Type: framework.TypeString, + Description: descSecretAccessKey, + }, + }, + }}, + }, + }, + }, + + HelpSynopsis: pathStaticCredsHelpSyn, + HelpDescription: pathStaticCredsHelpDesc, + } +} + +func (b *backend) pathStaticCredsRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName, ok := data.GetOk(paramRoleName) + if !ok { + return nil, fmt.Errorf("missing %q parameter", paramRoleName) + } + + entry, err := req.Storage.Get(ctx, formatCredsStoragePath(roleName.(string))) + if err != nil { + return nil, fmt.Errorf("failed to read credentials for role %q: %w", roleName, err) + } + if entry == nil { + return nil, nil + } + + var credentials awsCredentials + if err := entry.DecodeJSON(&credentials); err != nil { + return nil, fmt.Errorf("failed to decode credentials: %w", err) + } + + return &logical.Response{ + Data: structs.New(credentials).Map(), + }, nil +} + +func formatCredsStoragePath(roleName string) string { + return fmt.Sprintf("%s/%s", pathStaticCreds, roleName) +} + +const pathStaticCredsHelpSyn = `Retrieve static credentials from the named role.` + +const pathStaticCredsHelpDesc = ` +This path reads AWS credentials for a certain static role. The keys are rotated +periodically according to their configuration, and will return the same password +until they are rotated.` + +const ( + descAccessKeyID = "The access key of the AWS Credential" + descSecretAccessKey = "The secret key of the AWS Credential" +) diff --git a/builtin/logical/aws/path_static_creds_test.go b/builtin/logical/aws/path_static_creds_test.go new file mode 100644 index 0000000..c478e3f --- /dev/null +++ b/builtin/logical/aws/path_static_creds_test.go @@ -0,0 +1,92 @@ +package aws + +import ( + "context" + "reflect" + "testing" + + "github.com/fatih/structs" + + "github.com/hashicorp/vault/sdk/framework" + + "github.com/hashicorp/vault/sdk/logical" +) + +// TestStaticCredsRead verifies that we can correctly read a cred that exists, and correctly _not read_ +// a cred that does not exist. +func TestStaticCredsRead(t *testing.T) { + // setup + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + bgCTX := context.Background() // for brevity later + + // insert a cred to get + creds := &awsCredentials{ + AccessKeyID: "foo", + SecretAccessKey: "bar", + } + entry, err := logical.StorageEntryJSON(formatCredsStoragePath("test"), creds) + if err != nil { + t.Fatal(err) + } + err = config.StorageView.Put(bgCTX, entry) + if err != nil { + t.Fatal(err) + } + + // cases + cases := []struct { + name string + roleName string + expectedError error + expectedResponse *logical.Response + }{ + { + name: "get existing creds", + roleName: "test", + expectedResponse: &logical.Response{ + Data: structs.New(creds).Map(), + }, + }, + { + name: "get non-existent creds", + roleName: "this-doesnt-exist", + // returns nil, nil + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + b := Backend(config) + + req := &logical.Request{ + Storage: config.StorageView, + Data: map[string]interface{}{ + "name": c.roleName, + }, + } + resp, err := b.pathStaticCredsRead(bgCTX, req, staticCredsFieldData(req.Data)) + + if err != c.expectedError { + t.Fatalf("got error %q, but expected %q", err, c.expectedError) + } + if !reflect.DeepEqual(resp, c.expectedResponse) { + t.Fatalf("got response %v, but expected %v", resp, c.expectedResponse) + } + }) + } +} + +func staticCredsFieldData(data map[string]interface{}) *framework.FieldData { + schema := map[string]*framework.FieldSchema{ + paramRoleName: { + Type: framework.TypeString, + Description: descRoleName, + }, + } + + return &framework.FieldData{ + Raw: data, + Schema: schema, + } +} diff --git a/builtin/logical/aws/path_static_roles.go b/builtin/logical/aws/path_static_roles.go new file mode 100644 index 0000000..b0aa3b0 --- /dev/null +++ b/builtin/logical/aws/path_static_roles.go @@ -0,0 +1,331 @@ +package aws + +import ( + "context" + "errors" + "fmt" + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/fatih/structs" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/queue" +) + +const ( + pathStaticRole = "static-roles" + + paramRoleName = "name" + paramUsername = "username" + paramRotationPeriod = "rotation_period" +) + +type staticRoleEntry struct { + Name string `json:"name" structs:"name" mapstructure:"name"` + ID string `json:"id" structs:"id" mapstructure:"id"` + Username string `json:"username" structs:"username" mapstructure:"username"` + RotationPeriod time.Duration `json:"rotation_period" structs:"rotation_period" mapstructure:"rotation_period"` +} + +func pathStaticRoles(b *backend) *framework.Path { + roleResponse := map[int][]framework.Response{ + http.StatusOK: {{ + Description: http.StatusText(http.StatusOK), + Fields: map[string]*framework.FieldSchema{ + paramRoleName: { + Type: framework.TypeString, + Description: descRoleName, + }, + paramUsername: { + Type: framework.TypeString, + Description: descUsername, + }, + paramRotationPeriod: { + Type: framework.TypeDurationSecond, + Description: descRotationPeriod, + }, + }, + }}, + } + + return &framework.Path{ + Pattern: fmt.Sprintf("%s/%s", pathStaticRole, framework.GenericNameWithAtRegex(paramRoleName)), + Fields: map[string]*framework.FieldSchema{ + paramRoleName: { + Type: framework.TypeString, + Description: descRoleName, + }, + paramUsername: { + Type: framework.TypeString, + Description: descUsername, + }, + paramRotationPeriod: { + Type: framework.TypeDurationSecond, + Description: descRotationPeriod, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathStaticRolesRead, + Responses: roleResponse, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathStaticRolesWrite, + ForwardPerformanceSecondary: true, + ForwardPerformanceStandby: true, + Responses: roleResponse, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathStaticRolesDelete, + ForwardPerformanceSecondary: true, + ForwardPerformanceStandby: true, + Responses: map[int][]framework.Response{ + http.StatusNoContent: {{ + Description: http.StatusText(http.StatusNoContent), + }}, + }, + }, + }, + + HelpSynopsis: pathStaticRolesHelpSyn, + HelpDescription: pathStaticRolesHelpDesc, + } +} + +func (b *backend) pathStaticRolesRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName, ok := data.GetOk(paramRoleName) + if !ok { + return nil, fmt.Errorf("missing %q parameter", paramRoleName) + } + + b.roleMutex.RLock() + defer b.roleMutex.RUnlock() + + entry, err := req.Storage.Get(ctx, formatRoleStoragePath(roleName.(string))) + if err != nil { + return nil, fmt.Errorf("failed to read configuration for static role %q: %w", roleName, err) + } + if entry == nil { + return nil, nil + } + + var config staticRoleEntry + if err := entry.DecodeJSON(&config); err != nil { + return nil, fmt.Errorf("failed to decode configuration for static role %q: %w", roleName, err) + } + + return &logical.Response{ + Data: formatResponse(config), + }, nil +} + +func (b *backend) pathStaticRolesWrite(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // Create & validate config from request parameters + config := staticRoleEntry{} + isCreate := req.Operation == logical.CreateOperation + + if rawRoleName, ok := data.GetOk(paramRoleName); ok { + config.Name = rawRoleName.(string) + + if err := b.validateRoleName(config.Name); err != nil { + return nil, err + } + } else { + return logical.ErrorResponse("missing %q parameter", paramRoleName), nil + } + + // retrieve old role value + entry, err := req.Storage.Get(ctx, formatRoleStoragePath(config.Name)) + if err != nil { + return nil, fmt.Errorf("couldn't check storage for pre-existing role: %w", err) + } + + if entry != nil { + err = entry.DecodeJSON(&config) + if err != nil { + return nil, fmt.Errorf("couldn't convert existing role into config struct: %w", err) + } + } else { + // if we couldn't find an entry, this is a create event + isCreate = true + } + + // other params are optional if we're not Creating + + if rawUsername, ok := data.GetOk(paramUsername); ok { + config.Username = rawUsername.(string) + + if err := b.validateIAMUserExists(ctx, req.Storage, &config, isCreate); err != nil { + return nil, err + } + } else if isCreate { + return logical.ErrorResponse("missing %q parameter", paramUsername), nil + } + + if rawRotationPeriod, ok := data.GetOk(paramRotationPeriod); ok { + config.RotationPeriod = time.Duration(rawRotationPeriod.(int)) * time.Second + + if err := b.validateRotationPeriod(config.RotationPeriod); err != nil { + return nil, err + } + } else if isCreate { + return logical.ErrorResponse("missing %q parameter", paramRotationPeriod), nil + } + + b.roleMutex.Lock() + defer b.roleMutex.Unlock() + + // Upsert role config + newRole, err := logical.StorageEntryJSON(formatRoleStoragePath(config.Name), config) + if err != nil { + return nil, fmt.Errorf("failed to marshal object to JSON: %w", err) + } + err = req.Storage.Put(ctx, newRole) + if err != nil { + return nil, fmt.Errorf("failed to save object in storage: %w", err) + } + + // Bootstrap initial set of keys if they did not exist before. AWS Secret Access Keys can only be obtained on creation, + // so we need to boostrap new roles with a new initial set of keys to be able to serve valid credentials to Vault clients. + existingCreds, err := req.Storage.Get(ctx, formatCredsStoragePath(config.Name)) + if err != nil { + return nil, fmt.Errorf("unable to verify if credentials already exist for role %q: %w", config.Name, err) + } + if existingCreds == nil { + err := b.createCredential(ctx, req.Storage, config, false) + if err != nil { + return nil, fmt.Errorf("failed to create new credentials for role %q: %w", config.Name, err) + } + + err = b.credRotationQueue.Push(&queue.Item{ + Key: config.Name, + Value: config, + Priority: time.Now().Add(config.RotationPeriod).Unix(), + }) + if err != nil { + return nil, fmt.Errorf("failed to add item into the rotation queue for role %q: %w", config.Name, err) + } + } + + return &logical.Response{ + Data: formatResponse(config), + }, nil +} + +func (b *backend) pathStaticRolesDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName, ok := data.GetOk(paramRoleName) + if !ok { + return nil, fmt.Errorf("missing %q parameter", paramRoleName) + } + + b.roleMutex.Lock() + defer b.roleMutex.Unlock() + + entry, err := req.Storage.Get(ctx, formatRoleStoragePath(roleName.(string))) + if err != nil { + return nil, fmt.Errorf("couldn't locate role in storage due to error: %w", err) + } + // no entry in storage, but no error either, congrats, it's deleted! + if entry == nil { + return nil, nil + } + var cfg staticRoleEntry + err = entry.DecodeJSON(&cfg) + if err != nil { + return nil, fmt.Errorf("couldn't convert storage entry to role config") + } + + err = b.deleteCredential(ctx, req.Storage, cfg, false) + if err != nil { + return nil, fmt.Errorf("failed to clean credentials while deleting role %q: %w", roleName.(string), err) + } + + // delete from the queue + _, err = b.credRotationQueue.PopByKey(cfg.Name) + if err != nil { + return nil, fmt.Errorf("couldn't delete key from queue: %w", err) + } + + return nil, req.Storage.Delete(ctx, formatRoleStoragePath(roleName.(string))) +} + +func (b *backend) validateRoleName(name string) error { + if name == "" { + return errors.New("empty role name attribute given") + } + return nil +} + +// validateIAMUser checks the user information we have for the role against the information on AWS. On a create, it uses the username +// to retrieve the user information and _sets_ the userID. On update, it validates the userID and username. +func (b *backend) validateIAMUserExists(ctx context.Context, storage logical.Storage, entry *staticRoleEntry, isCreate bool) error { + c, err := b.clientIAM(ctx, storage) + if err != nil { + return fmt.Errorf("unable to validate username %q: %w", entry.Username, err) + } + + // we don't really care about the content of the result, just that it's not an error + out, err := c.GetUser(&iam.GetUserInput{ + UserName: aws.String(entry.Username), + }) + if err != nil || out.User == nil { + return fmt.Errorf("unable to validate username %q: %w", entry.Username, err) + } + if *out.User.UserName != entry.Username { + return fmt.Errorf("AWS GetUser returned a username, but it didn't match: %q was requested, but %q was returned", entry.Username, *out.User.UserName) + } + + if !isCreate && *out.User.UserId != entry.ID { + return fmt.Errorf("AWS GetUser returned a user, but the ID did not match: %q was requested, but %q was returned", entry.ID, *out.User.UserId) + } else { + // if this is an insert, store the userID. This is the immutable part of an IAM user, but it's not exactly user-friendly. + // So, we allow users to specify usernames, but on updates we'll use the ID as a verification cross-check. + entry.ID = *out.User.UserId + } + + return nil +} + +const ( + minAllowableRotationPeriod = 1 * time.Minute +) + +func (b *backend) validateRotationPeriod(period time.Duration) error { + if period < minAllowableRotationPeriod { + return fmt.Errorf("role rotation period out of range: must be greater than %.2f seconds", minAllowableRotationPeriod.Seconds()) + } + return nil +} + +func formatResponse(cfg staticRoleEntry) map[string]interface{} { + response := structs.New(cfg).Map() + response[paramRotationPeriod] = int64(cfg.RotationPeriod.Seconds()) + + return response +} + +func formatRoleStoragePath(roleName string) string { + return fmt.Sprintf("%s/%s", pathStaticRole, roleName) +} + +const pathStaticRolesHelpSyn = ` +Manage static roles for AWS. +` + +const pathStaticRolesHelpDesc = ` +This path lets you manage static roles (users) for the AWS secret backend. +A static role is associated with a single IAM user, and manages the access +keys based on a rotation period, automatically rotating the credential. If +the IAM user has multiple access keys, the oldest key will be rotated. +` + +const ( + descRoleName = "The name of this role." + descUsername = "The IAM user to adopt as a static role." + descRotationPeriod = `Period by which to rotate the backing credential of the adopted user. +This can be a Go duration (e.g, '1m', 24h'), or an integer number of seconds.` +) diff --git a/builtin/logical/aws/path_static_roles_test.go b/builtin/logical/aws/path_static_roles_test.go new file mode 100644 index 0000000..205b42c --- /dev/null +++ b/builtin/logical/aws/path_static_roles_test.go @@ -0,0 +1,490 @@ +package aws + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/hashicorp/vault/sdk/queue" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/hashicorp/go-secure-stdlib/awsutil" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +// TestStaticRolesValidation verifies that valid requests pass validation and that invalid requests fail validation. +// This includes the user already existing in IAM roles, and the rotation period being sufficiently long. +func TestStaticRolesValidation(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + bgCTX := context.Background() // for brevity + + cases := []struct { + name string + opts []awsutil.MockIAMOption + requestData map[string]interface{} + isError bool + }{ + { + name: "all good", + opts: []awsutil.MockIAMOption{ + awsutil.WithGetUserOutput(&iam.GetUserOutput{User: &iam.User{UserName: aws.String("jane-doe"), UserId: aws.String("unique-id")}}), + awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{ + AccessKey: &iam.AccessKey{ + AccessKeyId: aws.String("abcdefghijklmnopqrstuvwxyz"), + SecretAccessKey: aws.String("zyxwvutsrqponmlkjihgfedcba"), + UserName: aws.String("jane-doe"), + }, + }), + awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{ + AccessKeyMetadata: []*iam.AccessKeyMetadata{}, + IsTruncated: aws.Bool(false), + }), + }, + requestData: map[string]interface{}{ + "name": "test", + "username": "jane-doe", + "rotation_period": "1d", + }, + }, + { + name: "bad user", + opts: []awsutil.MockIAMOption{ + awsutil.WithGetUserError(errors.New("oh no")), + }, + requestData: map[string]interface{}{ + "name": "test", + "username": "jane-doe", + "rotation_period": "24h", + }, + isError: true, + }, + { + name: "user mismatch", + opts: []awsutil.MockIAMOption{ + awsutil.WithGetUserOutput(&iam.GetUserOutput{User: &iam.User{UserName: aws.String("ms-impostor"), UserId: aws.String("fake-id")}}), + }, + requestData: map[string]interface{}{ + "name": "test", + "username": "jane-doe", + "rotation_period": "1d2h", + }, + isError: true, + }, + { + name: "bad rotation period", + opts: []awsutil.MockIAMOption{ + awsutil.WithGetUserOutput(&iam.GetUserOutput{User: &iam.User{UserName: aws.String("jane-doe"), UserId: aws.String("unique-id")}}), + }, + requestData: map[string]interface{}{ + "name": "test", + "username": "jane-doe", + "rotation_period": "45s", + }, + isError: true, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + b := Backend(config) + miam, err := awsutil.NewMockIAM(c.opts...)(nil) + if err != nil { + t.Fatal(err) + } + b.iamClient = miam + if err := b.Setup(bgCTX, config); err != nil { + t.Fatal(err) + } + req := &logical.Request{ + Operation: logical.UpdateOperation, + Storage: config.StorageView, + Data: c.requestData, + Path: "static-roles/test", + } + _, err = b.pathStaticRolesWrite(bgCTX, req, staticRoleFieldData(req.Data)) + if c.isError && err == nil { + t.Fatal("expected an error but didn't get one") + } else if !c.isError && err != nil { + t.Fatalf("got an unexpected error: %s", err) + } + }) + } +} + +// TestStaticRolesWrite validates that we can write a new entry for a new static role, and that we correctly +// do not write if the request is invalid in some way. +func TestStaticRolesWrite(t *testing.T) { + bgCTX := context.Background() + + cases := []struct { + name string + opts []awsutil.MockIAMOption + data map[string]interface{} + expectedError bool + findUser bool + isUpdate bool + }{ + { + name: "happy path", + opts: []awsutil.MockIAMOption{ + awsutil.WithGetUserOutput(&iam.GetUserOutput{User: &iam.User{UserName: aws.String("jane-doe"), UserId: aws.String("unique-id")}}), + awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{ + AccessKeyMetadata: []*iam.AccessKeyMetadata{}, + IsTruncated: aws.Bool(false), + }), + awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{ + AccessKey: &iam.AccessKey{ + AccessKeyId: aws.String("abcdefghijklmnopqrstuvwxyz"), + SecretAccessKey: aws.String("zyxwvutsrqponmlkjihgfedcba"), + UserName: aws.String("jane-doe"), + }, + }), + }, + data: map[string]interface{}{ + "name": "test", + "username": "jane-doe", + "rotation_period": "1d", + }, + // writes role, writes cred + findUser: true, + }, + { + name: "no aws user", + opts: []awsutil.MockIAMOption{ + awsutil.WithGetUserError(errors.New("no such user, etc etc")), + }, + data: map[string]interface{}{ + "name": "test", + "username": "a-nony-mous", + "rotation_period": "15s", + }, + expectedError: true, + }, + { + name: "update existing user", + opts: []awsutil.MockIAMOption{ + awsutil.WithGetUserOutput(&iam.GetUserOutput{User: &iam.User{UserName: aws.String("john-doe"), UserId: aws.String("unique-id")}}), + awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{ + AccessKeyMetadata: []*iam.AccessKeyMetadata{}, + IsTruncated: aws.Bool(false), + }), + awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{ + AccessKey: &iam.AccessKey{ + AccessKeyId: aws.String("abcdefghijklmnopqrstuvwxyz"), + SecretAccessKey: aws.String("zyxwvutsrqponmlkjihgfedcba"), + UserName: aws.String("john-doe"), + }, + }), + }, + data: map[string]interface{}{ + "name": "johnny", + "rotation_period": "19m", + }, + findUser: true, + isUpdate: true, + }, + } + + // if a user exists (user doesn't exist is tested in validation) + // we'll check how many keys the user has - if it's two, we delete one. + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + + miam, err := awsutil.NewMockIAM( + c.opts..., + )(nil) + if err != nil { + t.Fatal(err) + } + + b := Backend(config) + b.iamClient = miam + if err := b.Setup(bgCTX, config); err != nil { + t.Fatal(err) + } + + // put a role in storage for update tests + staticRole := staticRoleEntry{ + Name: "johnny", + Username: "john-doe", + ID: "unique-id", + RotationPeriod: 24 * time.Hour, + } + entry, err := logical.StorageEntryJSON(formatRoleStoragePath(staticRole.Name), staticRole) + if err != nil { + t.Fatal(err) + } + err = config.StorageView.Put(bgCTX, entry) + if err != nil { + t.Fatal(err) + } + + req := &logical.Request{ + Operation: logical.UpdateOperation, + Storage: config.StorageView, + Data: c.data, + Path: "static-roles/" + c.data["name"].(string), + } + + r, err := b.pathStaticRolesWrite(bgCTX, req, staticRoleFieldData(req.Data)) + if c.expectedError && err == nil { + t.Fatal(err) + } else if c.expectedError { + return // save us some if statements + } + + if err != nil { + t.Fatalf("got an error back unexpectedly: %s", err) + } + + if c.findUser && r == nil { + t.Fatal("response was nil, but it shouldn't have been") + } + + role, err := config.StorageView.Get(bgCTX, req.Path) + if c.findUser && (err != nil || role == nil) { + t.Fatalf("couldn't find the role we should have stored: %s", err) + } + var actualData staticRoleEntry + err = role.DecodeJSON(&actualData) + if err != nil { + t.Fatalf("couldn't convert storage data to role entry: %s", err) + } + + // construct expected data + var expectedData staticRoleEntry + fieldData := staticRoleFieldData(c.data) + if c.isUpdate { + // data is johnny + c.data + expectedData = staticRole + } + + if u, ok := fieldData.GetOk("username"); ok { + expectedData.Username = u.(string) + } + if r, ok := fieldData.GetOk("rotation_period"); ok { + expectedData.RotationPeriod = time.Duration(r.(int)) * time.Second + } + if n, ok := fieldData.GetOk("name"); ok { + expectedData.Name = n.(string) + } + + // validate fields + if eu, au := expectedData.Username, actualData.Username; eu != au { + t.Fatalf("mismatched username, expected %q but got %q", eu, au) + } + if er, ar := expectedData.RotationPeriod, actualData.RotationPeriod; er != ar { + t.Fatalf("mismatched rotation period, expected %q but got %q", er, ar) + } + if en, an := expectedData.Name, actualData.Name; en != an { + t.Fatalf("mismatched role name, expected %q, but got %q", en, an) + } + }) + } +} + +// TestStaticRoleRead validates that we can read a configured role and correctly do not read anything if we +// request something that doesn't exist. +func TestStaticRoleRead(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + bgCTX := context.Background() + + // test cases are run against an inmem storage holding a role called "test" attached to an IAM user called "jane-doe" + cases := []struct { + name string + roleName string + found bool + }{ + { + name: "role name exists", + roleName: "test", + found: true, + }, + { + name: "role name not found", + roleName: "toast", + found: false, // implied, but set for clarity + }, + } + + staticRole := staticRoleEntry{ + Name: "test", + Username: "jane-doe", + RotationPeriod: 24 * time.Hour, + } + entry, err := logical.StorageEntryJSON(formatRoleStoragePath(staticRole.Name), staticRole) + if err != nil { + t.Fatal(err) + } + err = config.StorageView.Put(bgCTX, entry) + if err != nil { + t.Fatal(err) + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + req := &logical.Request{ + Operation: logical.ReadOperation, + Storage: config.StorageView, + Data: map[string]interface{}{ + "name": c.roleName, + }, + Path: formatRoleStoragePath(c.roleName), + } + + b := Backend(config) + + r, err := b.pathStaticRolesRead(bgCTX, req, staticRoleFieldData(req.Data)) + if err != nil { + t.Fatal(err) + } + if c.found { + if r == nil { + t.Fatal("response was nil, but it shouldn't have been") + } + } else { + if r != nil { + t.Fatal("response should have been nil on a non-existent role") + } + } + }) + } +} + +// TestStaticRoleDelete validates that we correctly remove a role on a delete request, and that we correctly do not +// remove anything if a role does not exist with that name. +func TestStaticRoleDelete(t *testing.T) { + bgCTX := context.Background() + + // test cases are run against an inmem storage holding a role called "test" attached to an IAM user called "jane-doe" + cases := []struct { + name string + role string + found bool + }{ + { + name: "role found", + role: "test", + found: true, + }, + { + name: "role not found", + role: "tossed", + found: false, + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + + // fake an IAM + var iamfunc awsutil.IAMAPIFunc + if !c.found { + iamfunc = awsutil.NewMockIAM(awsutil.WithDeleteAccessKeyError(errors.New("shouldn't have called delete"))) + } else { + iamfunc = awsutil.NewMockIAM() + } + miam, err := iamfunc(nil) + if err != nil { + t.Fatalf("couldn't initialize mockiam: %s", err) + } + + b := Backend(config) + b.iamClient = miam + + // put in storage + staticRole := staticRoleEntry{ + Name: "test", + Username: "jane-doe", + RotationPeriod: 24 * time.Hour, + } + entry, err := logical.StorageEntryJSON(formatRoleStoragePath(staticRole.Name), staticRole) + if err != nil { + t.Fatal(err) + } + err = config.StorageView.Put(bgCTX, entry) + if err != nil { + t.Fatal(err) + } + + l, err := config.StorageView.List(bgCTX, "") + if err != nil || len(l) != 1 { + t.Fatalf("couldn't add an entry to storage during test setup: %s", err) + } + + // put in queue + err = b.credRotationQueue.Push(&queue.Item{ + Key: staticRole.Name, + Value: staticRole, + Priority: time.Now().Add(90 * time.Hour).Unix(), + }) + if err != nil { + t.Fatalf("couldn't add items to pq") + } + + req := &logical.Request{ + Operation: logical.ReadOperation, + Storage: config.StorageView, + Data: map[string]interface{}{ + "name": c.role, + }, + Path: formatRoleStoragePath(c.role), + } + + r, err := b.pathStaticRolesDelete(bgCTX, req, staticRoleFieldData(req.Data)) + if err != nil { + t.Fatal(err) + } + if r != nil { + t.Fatal("response wasn't nil, but it should have been") + } + + l, err = config.StorageView.List(bgCTX, "") + if err != nil { + t.Fatal(err) + } + if c.found && len(l) != 0 { + t.Fatal("size of role storage is non zero after delete") + } else if !c.found && len(l) != 1 { + t.Fatal("size of role storage changed after what should have been no deletion") + } + + if c.found && b.credRotationQueue.Len() != 0 { + t.Fatal("size of queue is non-zero after delete") + } else if !c.found && b.credRotationQueue.Len() != 1 { + t.Fatal("size of queue changed after what should have been no deletion") + } + }) + } +} + +func staticRoleFieldData(data map[string]interface{}) *framework.FieldData { + schema := map[string]*framework.FieldSchema{ + paramRoleName: { + Type: framework.TypeString, + Description: descRoleName, + }, + paramUsername: { + Type: framework.TypeString, + Description: descUsername, + }, + paramRotationPeriod: { + Type: framework.TypeDurationSecond, + Description: descRotationPeriod, + }, + } + + return &framework.FieldData{ + Raw: data, + Schema: schema, + } +} diff --git a/builtin/logical/aws/path_user.go b/builtin/logical/aws/path_user.go new file mode 100644 index 0000000..f368365 --- /dev/null +++ b/builtin/logical/aws/path_user.go @@ -0,0 +1,314 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package aws + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" + "github.com/mitchellh/mapstructure" +) + +func pathUser(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "(creds|sts)/" + framework.GenericNameWithAtRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixAWS, + OperationVerb: "generate", + }, + + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the role", + }, + "role_arn": { + Type: framework.TypeString, + Description: "ARN of role to assume when credential_type is " + assumedRoleCred, + }, + "ttl": { + Type: framework.TypeDurationSecond, + Description: "Lifetime of the returned credentials in seconds", + Default: 3600, + }, + "role_session_name": { + Type: framework.TypeString, + Description: "Session name to use when assuming role. Max chars: 64", + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathCredsRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "credentials|sts-credentials", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathCredsRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "credentials-with-parameters|sts-credentials-with-parameters", + }, + }, + }, + + HelpSynopsis: pathUserHelpSyn, + HelpDescription: pathUserHelpDesc, + } +} + +func (b *backend) pathCredsRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + roleName := d.Get("name").(string) + + // Read the policy + role, err := b.roleRead(ctx, req.Storage, roleName, true) + if err != nil { + return nil, fmt.Errorf("error retrieving role: %w", err) + } + if role == nil { + return logical.ErrorResponse(fmt.Sprintf( + "Role %q not found", roleName)), nil + } + + var ttl int64 + ttlRaw, ok := d.GetOk("ttl") + switch { + case ok: + ttl = int64(ttlRaw.(int)) + case role.DefaultSTSTTL > 0: + ttl = int64(role.DefaultSTSTTL.Seconds()) + default: + ttl = int64(d.Get("ttl").(int)) + } + + var maxTTL int64 + if role.MaxSTSTTL > 0 { + maxTTL = int64(role.MaxSTSTTL.Seconds()) + } else { + maxTTL = int64(b.System().MaxLeaseTTL().Seconds()) + } + + if ttl > maxTTL { + ttl = maxTTL + } + + roleArn := d.Get("role_arn").(string) + roleSessionName := d.Get("role_session_name").(string) + + var credentialType string + switch { + case len(role.CredentialTypes) == 1: + credentialType = role.CredentialTypes[0] + // There is only one way for the CredentialTypes to contain more than one entry, and that's an upgrade path + // where it contains iamUserCred and federationTokenCred + // This ambiguity can be resolved based on req.Path, so resolve it assuming CredentialTypes only has those values + case len(role.CredentialTypes) > 1: + if strings.HasPrefix(req.Path, "creds") { + credentialType = iamUserCred + } else { + credentialType = federationTokenCred + } + // sanity check on the assumption above + if !strutil.StrListContains(role.CredentialTypes, credentialType) { + return logical.ErrorResponse(fmt.Sprintf("requested credential type %q not in allowed credential types %#v", credentialType, role.CredentialTypes)), nil + } + } + + // creds requested through the sts path shouldn't be allowed to get iamUserCred type creds + // when the role is created from legacy data because they might have more privileges in AWS. + // See https://github.com/hashicorp/vault/issues/4229#issuecomment-380316788 for details. + if role.ProhibitFlexibleCredPath { + if credentialType == iamUserCred && strings.HasPrefix(req.Path, "sts") { + return logical.ErrorResponse(fmt.Sprintf("attempted to retrieve %s credentials through the sts path; this is not allowed for legacy roles", iamUserCred)), nil + } + if credentialType != iamUserCred && strings.HasPrefix(req.Path, "creds") { + return logical.ErrorResponse(fmt.Sprintf("attempted to retrieve %s credentials through the creds path; this is not allowed for legacy roles", credentialType)), nil + } + } + + switch credentialType { + case iamUserCred: + return b.secretAccessKeysCreate(ctx, req.Storage, req.DisplayName, roleName, role) + case assumedRoleCred: + switch { + case roleArn == "": + if len(role.RoleArns) != 1 { + return logical.ErrorResponse("did not supply a role_arn parameter and unable to determine one"), nil + } + roleArn = role.RoleArns[0] + case !strutil.StrListContains(role.RoleArns, roleArn): + return logical.ErrorResponse(fmt.Sprintf("role_arn %q not in allowed role arns for Vault role %q", roleArn, roleName)), nil + } + return b.assumeRole(ctx, req.Storage, req.DisplayName, roleName, roleArn, role.PolicyDocument, role.PolicyArns, role.IAMGroups, ttl, roleSessionName) + case federationTokenCred: + return b.getFederationToken(ctx, req.Storage, req.DisplayName, roleName, role.PolicyDocument, role.PolicyArns, role.IAMGroups, ttl) + default: + return logical.ErrorResponse(fmt.Sprintf("unknown credential_type: %q", credentialType)), nil + } +} + +func (b *backend) pathUserRollback(ctx context.Context, req *logical.Request, _kind string, data interface{}) error { + var entry walUser + if err := mapstructure.Decode(data, &entry); err != nil { + return err + } + username := entry.UserName + + // Get the client + client, err := b.clientIAM(ctx, req.Storage) + if err != nil { + return err + } + + // Get information about this user + groupsResp, err := client.ListGroupsForUserWithContext(ctx, &iam.ListGroupsForUserInput{ + UserName: aws.String(username), + MaxItems: aws.Int64(1000), + }) + if err != nil { + // This isn't guaranteed to be perfect; for example, an IAM user + // might have gotten put into the WAL but then the IAM user creation + // failed (e.g., Vault didn't have permissions) and then the WAL + // deletion failed as well. Then, if Vault doesn't have access to + // call iam:ListGroupsForUser, AWS will return an access denied error + // and the WAL will never get cleaned up. But this is better than + // just having Vault "forget" about a user it actually created. + // + // BEWARE a potential race condition -- where this is called + // immediately after a user is created. AWS eventual consistency + // might say the user doesn't exist when the user does in fact + // exist, and this could cause Vault to forget about the user. + // This won't happen if the user creation fails (because the WAL + // minimum age is 5 minutes, and AWS eventual consistency is, in + // practice, never that long), but it could happen if a lease holder + // asks immediately after getting a user to revoke the lease, causing + // Vault to leak the secret, which would be a Very Bad Thing to allow. + // So we make sure that, if there's an associated lease, it must be at + // least 5 minutes old as well. + if aerr, ok := err.(awserr.Error); ok { + acceptMissingIamUsers := false + if req.Secret == nil || time.Since(req.Secret.IssueTime) > time.Duration(minAwsUserRollbackAge) { + // WAL rollback + acceptMissingIamUsers = true + } + if aerr.Code() == iam.ErrCodeNoSuchEntityException && acceptMissingIamUsers { + return nil + } + } + return err + } + groups := groupsResp.Groups + + // Inline (user) policies + policiesResp, err := client.ListUserPoliciesWithContext(ctx, &iam.ListUserPoliciesInput{ + UserName: aws.String(username), + MaxItems: aws.Int64(1000), + }) + if err != nil { + return err + } + policies := policiesResp.PolicyNames + + // Attached managed policies + manPoliciesResp, err := client.ListAttachedUserPoliciesWithContext(ctx, &iam.ListAttachedUserPoliciesInput{ + UserName: aws.String(username), + MaxItems: aws.Int64(1000), + }) + if err != nil { + return err + } + manPolicies := manPoliciesResp.AttachedPolicies + + keysResp, err := client.ListAccessKeysWithContext(ctx, &iam.ListAccessKeysInput{ + UserName: aws.String(username), + MaxItems: aws.Int64(1000), + }) + if err != nil { + return err + } + keys := keysResp.AccessKeyMetadata + + // Revoke all keys + for _, k := range keys { + _, err = client.DeleteAccessKeyWithContext(ctx, &iam.DeleteAccessKeyInput{ + AccessKeyId: k.AccessKeyId, + UserName: aws.String(username), + }) + if err != nil { + return err + } + } + + // Detach managed policies + for _, p := range manPolicies { + _, err = client.DetachUserPolicyWithContext(ctx, &iam.DetachUserPolicyInput{ + UserName: aws.String(username), + PolicyArn: p.PolicyArn, + }) + if err != nil { + return err + } + } + + // Delete any inline (user) policies + for _, p := range policies { + _, err = client.DeleteUserPolicyWithContext(ctx, &iam.DeleteUserPolicyInput{ + UserName: aws.String(username), + PolicyName: p, + }) + if err != nil { + return err + } + } + + // Remove the user from all their groups + for _, g := range groups { + _, err = client.RemoveUserFromGroupWithContext(ctx, &iam.RemoveUserFromGroupInput{ + GroupName: g.GroupName, + UserName: aws.String(username), + }) + if err != nil { + return err + } + } + + // Delete the user + _, err = client.DeleteUserWithContext(ctx, &iam.DeleteUserInput{ + UserName: aws.String(username), + }) + if err != nil { + return err + } + + return nil +} + +type walUser struct { + UserName string +} + +const pathUserHelpSyn = ` +Generate AWS credentials from a specific Vault role. +` + +const pathUserHelpDesc = ` +This path will generate new, never before used AWS credentials for +accessing AWS. The IAM policy used to back this key pair will be +the "name" parameter. For example, if this backend is mounted at "aws", +then "aws/creds/deploy" would generate access keys for the "deploy" role. + +The access keys will have a lease associated with them. The access keys +can be revoked by using the lease ID when using the iam_user credential type. +When using AWS STS credential types (assumed_role or federation_token), +revoking the lease does not revoke the access keys. +` diff --git a/builtin/logical/aws/rollback.go b/builtin/logical/aws/rollback.go new file mode 100644 index 0000000..847ecd1 --- /dev/null +++ b/builtin/logical/aws/rollback.go @@ -0,0 +1,30 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package aws + +import ( + "context" + "fmt" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" +) + +func (b *backend) walRollback(ctx context.Context, req *logical.Request, kind string, data interface{}) error { + walRollbackMap := map[string]framework.WALRollbackFunc{ + "user": b.pathUserRollback, + } + + if !b.System().LocalMount() && b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary|consts.ReplicationPerformanceStandby) { + return nil + } + + f, ok := walRollbackMap[kind] + if !ok { + return fmt.Errorf("unknown type to rollback") + } + + return f(ctx, req, kind, data) +} diff --git a/builtin/logical/aws/rotation.go b/builtin/logical/aws/rotation.go new file mode 100644 index 0000000..4461437 --- /dev/null +++ b/builtin/logical/aws/rotation.go @@ -0,0 +1,188 @@ +package aws + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/queue" +) + +// rotateExpiredStaticCreds will pop expired credentials (credentials whose priority +// represents a time before the present), rotate the associated credential, and push +// them back onto the queue with the new priority. +func (b *backend) rotateExpiredStaticCreds(ctx context.Context, req *logical.Request) error { + var errs *multierror.Error + + for { + keepGoing, err := b.rotateCredential(ctx, req.Storage) + if err != nil { + errs = multierror.Append(errs, err) + } + if !keepGoing { + if errs.ErrorOrNil() != nil { + return fmt.Errorf("error(s) occurred while rotating expired static credentials: %w", errs) + } else { + return nil + } + } + } +} + +// rotateCredential pops an element from the priority queue, and if it is expired, rotate and re-push. +// If a cred was rotated, it returns true, otherwise false. +func (b *backend) rotateCredential(ctx context.Context, storage logical.Storage) (rotated bool, err error) { + // If queue is empty or first item does not need a rotation (priority is next rotation timestamp) there is nothing to do + item, err := b.credRotationQueue.Pop() + if err != nil { + // the queue is just empty, which is fine. + if err == queue.ErrEmpty { + return false, nil + } + return false, fmt.Errorf("failed to pop from queue for role %q: %w", item.Key, err) + } + if item.Priority > time.Now().Unix() { + // no rotation required + // push the item back into priority queue + err = b.credRotationQueue.Push(item) + if err != nil { + return false, fmt.Errorf("failed to add item into the rotation queue for role %q: %w", item.Key, err) + } + return false, nil + } + + cfg := item.Value.(staticRoleEntry) + + err = b.createCredential(ctx, storage, cfg, true) + if err != nil { + return false, err + } + + // set new priority and re-queue + item.Priority = time.Now().Add(cfg.RotationPeriod).Unix() + err = b.credRotationQueue.Push(item) + if err != nil { + return false, fmt.Errorf("failed to add item into the rotation queue for role %q: %w", cfg.Name, err) + } + + return true, nil +} + +// createCredential will create a new iam credential, deleting the oldest one if necessary. +func (b *backend) createCredential(ctx context.Context, storage logical.Storage, cfg staticRoleEntry, shouldLockStorage bool) error { + iamClient, err := b.clientIAM(ctx, storage) + if err != nil { + return fmt.Errorf("unable to get the AWS IAM client: %w", err) + } + + // IAM users can have a most 2 sets of keys at a time. + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html) + // Ideally we would get this value through an api check, but I'm not sure one exists. + const maxAllowedKeys = 2 + + err = b.validateIAMUserExists(ctx, storage, &cfg, false) + if err != nil { + return fmt.Errorf("iam user didn't exist, or username/userid didn't match: %w", err) + } + + accessKeys, err := iamClient.ListAccessKeys(&iam.ListAccessKeysInput{ + UserName: aws.String(cfg.Username), + }) + if err != nil { + return fmt.Errorf("unable to list existing access keys for IAM user %q: %w", cfg.Username, err) + } + + // If we have the maximum number of keys, we have to delete one to make another (so we can get the credentials). + // We'll delete the oldest one. + // + // Since this check relies on a pre-coded maximum, it's a bit fragile. If the number goes up, we risk deleting + // a key when we didn't need to. If this number goes down, we'll start throwing errors because we think we're + // allowed to create a key and aren't. In either case, adjusting the constant should be sufficient to fix things. + if len(accessKeys.AccessKeyMetadata) >= maxAllowedKeys { + oldestKey := accessKeys.AccessKeyMetadata[0] + + for i := 1; i < len(accessKeys.AccessKeyMetadata); i++ { + if accessKeys.AccessKeyMetadata[i].CreateDate.Before(*oldestKey.CreateDate) { + oldestKey = accessKeys.AccessKeyMetadata[i] + } + } + + _, err := iamClient.DeleteAccessKey(&iam.DeleteAccessKeyInput{ + AccessKeyId: oldestKey.AccessKeyId, + UserName: oldestKey.UserName, + }) + if err != nil { + return fmt.Errorf("unable to delete oldest access keys for user %q: %w", cfg.Username, err) + } + } + + // Create new set of keys + out, err := iamClient.CreateAccessKey(&iam.CreateAccessKeyInput{ + UserName: aws.String(cfg.Username), + }) + if err != nil { + return fmt.Errorf("unable to create new access keys for user %q: %w", cfg.Username, err) + } + + // Persist new keys + entry, err := logical.StorageEntryJSON(formatCredsStoragePath(cfg.Name), &awsCredentials{ + AccessKeyID: *out.AccessKey.AccessKeyId, + SecretAccessKey: *out.AccessKey.SecretAccessKey, + }) + if err != nil { + return fmt.Errorf("failed to marshal object to JSON: %w", err) + } + if shouldLockStorage { + b.roleMutex.Lock() + defer b.roleMutex.Unlock() + } + err = storage.Put(ctx, entry) + if err != nil { + return fmt.Errorf("failed to save object in storage: %w", err) + } + + return nil +} + +// delete credential will remove the credential associated with the role from storage. +func (b *backend) deleteCredential(ctx context.Context, storage logical.Storage, cfg staticRoleEntry, shouldLockStorage bool) error { + // synchronize storage access if we didn't in the caller. + if shouldLockStorage { + b.roleMutex.Lock() + defer b.roleMutex.Unlock() + } + + key, err := storage.Get(ctx, formatCredsStoragePath(cfg.Name)) + if err != nil { + return fmt.Errorf("couldn't find key in storage: %w", err) + } + // no entry, so i guess we deleted it already + if key == nil { + return nil + } + var creds awsCredentials + err = key.DecodeJSON(&creds) + if err != nil { + return fmt.Errorf("couldn't decode storage entry to a valid credential: %w", err) + } + + err = storage.Delete(ctx, formatCredsStoragePath(cfg.Name)) + if err != nil { + return fmt.Errorf("couldn't delete from storage: %w", err) + } + + // because we have the information, this is the one we created, so it's safe for us to delete. + _, err = b.iamClient.DeleteAccessKey(&iam.DeleteAccessKeyInput{ + AccessKeyId: aws.String(creds.AccessKeyID), + UserName: aws.String(cfg.Username), + }) + if err != nil { + return fmt.Errorf("couldn't delete from IAM: %w", err) + } + + return nil +} diff --git a/builtin/logical/aws/rotation_test.go b/builtin/logical/aws/rotation_test.go new file mode 100644 index 0000000..8f672ef --- /dev/null +++ b/builtin/logical/aws/rotation_test.go @@ -0,0 +1,348 @@ +package aws + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/aws/aws-sdk-go/service/iam/iamiface" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/hashicorp/go-secure-stdlib/awsutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/queue" +) + +// TestRotation verifies that the rotation code and priority queue correctly selects and rotates credentials +// for static secrets. +func TestRotation(t *testing.T) { + bgCTX := context.Background() + + type credToInsert struct { + config staticRoleEntry // role configuration from a normal createRole request + age time.Duration // how old the cred should be - if this is longer than the config.RotationPeriod, + // the cred is 'pre-expired' + + changed bool // whether we expect the cred to change - this is technically redundant to a comparison between + // rotationPeriod and age. + } + + // due to a limitation with the mockIAM implementation, any cred you want to rotate must have + // username jane-doe and userid unique-id, since we can only pre-can one exact response to GetUser + cases := []struct { + name string + creds []credToInsert + }{ + { + name: "refresh one", + creds: []credToInsert{ + { + config: staticRoleEntry{ + Name: "test", + Username: "jane-doe", + ID: "unique-id", + RotationPeriod: 2 * time.Second, + }, + age: 5 * time.Second, + changed: true, + }, + }, + }, + { + name: "refresh none", + creds: []credToInsert{ + { + config: staticRoleEntry{ + Name: "test", + Username: "jane-doe", + ID: "unique-id", + RotationPeriod: 1 * time.Minute, + }, + age: 5 * time.Second, + changed: false, + }, + }, + }, + { + name: "refresh one of two", + creds: []credToInsert{ + { + config: staticRoleEntry{ + Name: "toast", + Username: "john-doe", + ID: "other-id", + RotationPeriod: 1 * time.Minute, + }, + age: 5 * time.Second, + changed: false, + }, + { + config: staticRoleEntry{ + Name: "test", + Username: "jane-doe", + ID: "unique-id", + RotationPeriod: 1 * time.Second, + }, + age: 5 * time.Second, + changed: true, + }, + }, + }, + { + name: "no creds to rotate", + creds: []credToInsert{}, + }, + } + + ak := "long-access-key-id" + oldSecret := "abcdefghijklmnopqrstuvwxyz" + newSecret := "zyxwvutsrqponmlkjihgfedcba" + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + + b := Backend(config) + + // insert all our creds + for i, cred := range c.creds { + + // all the creds will be the same for every user, but that's okay + // since what we care about is whether they changed on a single-user basis. + miam, err := awsutil.NewMockIAM( + // blank list for existing user + awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{ + AccessKeyMetadata: []*iam.AccessKeyMetadata{ + {}, + }, + }), + // initial key to store + awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{ + AccessKey: &iam.AccessKey{ + AccessKeyId: aws.String(ak), + SecretAccessKey: aws.String(oldSecret), + }, + }), + awsutil.WithGetUserOutput(&iam.GetUserOutput{ + User: &iam.User{ + UserId: aws.String(cred.config.ID), + UserName: aws.String(cred.config.Username), + }, + }), + )(nil) + if err != nil { + t.Fatalf("couldn't initialze mock IAM handler: %s", err) + } + b.iamClient = miam + + err = b.createCredential(bgCTX, config.StorageView, cred.config, true) + if err != nil { + t.Fatalf("couldn't insert credential %d: %s", i, err) + } + + item := &queue.Item{ + Key: cred.config.Name, + Value: cred.config, + Priority: time.Now().Add(-1 * cred.age).Add(cred.config.RotationPeriod).Unix(), + } + err = b.credRotationQueue.Push(item) + if err != nil { + t.Fatalf("couldn't push item onto queue: %s", err) + } + } + + // update aws responses, same argument for why it's okay every cred will be the same + miam, err := awsutil.NewMockIAM( + // old key + awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{ + AccessKeyMetadata: []*iam.AccessKeyMetadata{ + { + AccessKeyId: aws.String(ak), + }, + }, + }), + // new key + awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{ + AccessKey: &iam.AccessKey{ + AccessKeyId: aws.String(ak), + SecretAccessKey: aws.String(newSecret), + }, + }), + awsutil.WithGetUserOutput(&iam.GetUserOutput{ + User: &iam.User{ + UserId: aws.String("unique-id"), + UserName: aws.String("jane-doe"), + }, + }), + )(nil) + if err != nil { + t.Fatalf("couldn't initialze mock IAM handler: %s", err) + } + b.iamClient = miam + + req := &logical.Request{ + Storage: config.StorageView, + } + err = b.rotateExpiredStaticCreds(bgCTX, req) + if err != nil { + t.Fatalf("got an error rotating credentials: %s", err) + } + + // check our credentials + for i, cred := range c.creds { + entry, err := config.StorageView.Get(bgCTX, formatCredsStoragePath(cred.config.Name)) + if err != nil { + t.Fatalf("got an error retrieving credentials %d", i) + } + var out awsCredentials + err = entry.DecodeJSON(&out) + if err != nil { + t.Fatalf("could not unmarshal storage view entry for cred %d to an aws credential: %s", i, err) + } + + if cred.changed && out.SecretAccessKey != newSecret { + t.Fatalf("expected the key for cred %d to have changed, but it hasn't", i) + } else if !cred.changed && out.SecretAccessKey != oldSecret { + t.Fatalf("expected the key for cred %d to have stayed the same, but it changed", i) + } + } + }) + } +} + +type fakeIAM struct { + iamiface.IAMAPI + delReqs []*iam.DeleteAccessKeyInput +} + +func (f *fakeIAM) DeleteAccessKey(r *iam.DeleteAccessKeyInput) (*iam.DeleteAccessKeyOutput, error) { + f.delReqs = append(f.delReqs, r) + return f.IAMAPI.DeleteAccessKey(r) +} + +// TestCreateCredential verifies that credential creation firstly only deletes credentials if it needs to (i.e., two +// or more credentials on IAM), and secondly correctly deletes the oldest one. +func TestCreateCredential(t *testing.T) { + cases := []struct { + name string + username string + id string + deletedKey string + opts []awsutil.MockIAMOption + }{ + { + name: "zero keys", + username: "jane-doe", + id: "unique-id", + opts: []awsutil.MockIAMOption{ + awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{ + AccessKeyMetadata: []*iam.AccessKeyMetadata{}, + }), + // delete should _not_ be called + awsutil.WithDeleteAccessKeyError(errors.New("should not have been called")), + awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{ + AccessKey: &iam.AccessKey{ + AccessKeyId: aws.String("key"), + SecretAccessKey: aws.String("itsasecret"), + }, + }), + awsutil.WithGetUserOutput(&iam.GetUserOutput{ + User: &iam.User{ + UserId: aws.String("unique-id"), + UserName: aws.String("jane-doe"), + }, + }), + }, + }, + { + name: "one key", + username: "jane-doe", + id: "unique-id", + opts: []awsutil.MockIAMOption{ + awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{ + AccessKeyMetadata: []*iam.AccessKeyMetadata{ + {AccessKeyId: aws.String("foo"), CreateDate: aws.Time(time.Now())}, + }, + }), + // delete should _not_ be called + awsutil.WithDeleteAccessKeyError(errors.New("should not have been called")), + awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{ + AccessKey: &iam.AccessKey{ + AccessKeyId: aws.String("key"), + SecretAccessKey: aws.String("itsasecret"), + }, + }), + awsutil.WithGetUserOutput(&iam.GetUserOutput{ + User: &iam.User{ + UserId: aws.String("unique-id"), + UserName: aws.String("jane-doe"), + }, + }), + }, + }, + { + name: "two keys", + username: "jane-doe", + id: "unique-id", + deletedKey: "foo", + opts: []awsutil.MockIAMOption{ + awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{ + AccessKeyMetadata: []*iam.AccessKeyMetadata{ + {AccessKeyId: aws.String("foo"), CreateDate: aws.Time(time.Time{})}, + {AccessKeyId: aws.String("bar"), CreateDate: aws.Time(time.Now())}, + }, + }), + awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{ + AccessKey: &iam.AccessKey{ + AccessKeyId: aws.String("key"), + SecretAccessKey: aws.String("itsasecret"), + }, + }), + awsutil.WithGetUserOutput(&iam.GetUserOutput{ + User: &iam.User{ + UserId: aws.String("unique-id"), + UserName: aws.String("jane-doe"), + }, + }), + }, + }, + } + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + miam, err := awsutil.NewMockIAM( + c.opts..., + )(nil) + if err != nil { + t.Fatal(err) + } + fiam := &fakeIAM{ + IAMAPI: miam, + } + + b := Backend(config) + b.iamClient = fiam + + err = b.createCredential(context.Background(), config.StorageView, staticRoleEntry{Username: c.username, ID: c.id}, true) + if err != nil { + t.Fatalf("got an error we didn't expect: %q", err) + } + + if c.deletedKey != "" { + if len(fiam.delReqs) != 1 { + t.Fatalf("called the wrong number of deletes (called %d deletes)", len(fiam.delReqs)) + } + actualKey := *fiam.delReqs[0].AccessKeyId + if c.deletedKey != actualKey { + t.Fatalf("we deleted the wrong key: %q instead of %q", actualKey, c.deletedKey) + } + } + }) + } +} diff --git a/builtin/logical/aws/secret_access_keys.go b/builtin/logical/aws/secret_access_keys.go new file mode 100644 index 0000000..2f1ac44 --- /dev/null +++ b/builtin/logical/aws/secret_access_keys.go @@ -0,0 +1,526 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package aws + +import ( + "context" + "fmt" + "regexp" + "time" + + "github.com/hashicorp/go-secure-stdlib/awsutil" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/template" + "github.com/hashicorp/vault/sdk/logical" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/aws/aws-sdk-go/service/sts" + "github.com/hashicorp/errwrap" +) + +const ( + secretAccessKeyType = "access_keys" + storageKey = "config/root" +) + +func secretAccessKeys(b *backend) *framework.Secret { + return &framework.Secret{ + Type: secretAccessKeyType, + Fields: map[string]*framework.FieldSchema{ + "access_key": { + Type: framework.TypeString, + Description: "Access Key", + }, + + "secret_key": { + Type: framework.TypeString, + Description: "Secret Key", + }, + "security_token": { + Type: framework.TypeString, + Description: "Security Token", + }, + }, + + Renew: b.secretAccessKeysRenew, + Revoke: b.secretAccessKeysRevoke, + } +} + +func genUsername(displayName, policyName, userType, usernameTemplate string) (ret string, err error) { + switch userType { + case "iam_user", "assume_role": + // IAM users are capped at 64 chars + up, err := template.NewTemplate(template.Template(usernameTemplate)) + if err != nil { + return "", fmt.Errorf("unable to initialize username template: %w", err) + } + + um := UsernameMetadata{ + Type: "IAM", + DisplayName: normalizeDisplayName(displayName), + PolicyName: normalizeDisplayName(policyName), + } + + ret, err = up.Generate(um) + if err != nil { + return "", fmt.Errorf("failed to generate username: %w", err) + } + // To prevent a custom template from exceeding IAM length limits + if len(ret) > 64 { + return "", fmt.Errorf("the username generated by the template exceeds the IAM username length limits of 64 chars") + } + case "sts": + up, err := template.NewTemplate(template.Template(usernameTemplate)) + if err != nil { + return "", fmt.Errorf("unable to initialize username template: %w", err) + } + + um := UsernameMetadata{ + Type: "STS", + } + ret, err = up.Generate(um) + if err != nil { + return "", fmt.Errorf("failed to generate username: %w", err) + } + // To prevent a custom template from exceeding STS length limits + if len(ret) > 32 { + return "", fmt.Errorf("the username generated by the template exceeds the STS username length limits of 32 chars") + } + } + return +} + +func (b *backend) getFederationToken(ctx context.Context, s logical.Storage, + displayName, policyName, policy string, policyARNs []string, + iamGroups []string, lifeTimeInSeconds int64) (*logical.Response, error, +) { + groupPolicies, groupPolicyARNs, err := b.getGroupPolicies(ctx, s, iamGroups) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + if groupPolicies != nil { + groupPolicies = append(groupPolicies, policy) + policy, err = combinePolicyDocuments(groupPolicies...) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + } + if len(groupPolicyARNs) > 0 { + policyARNs = append(policyARNs, groupPolicyARNs...) + } + + stsClient, err := b.clientSTS(ctx, s) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + config, err := readConfig(ctx, s) + if err != nil { + return nil, fmt.Errorf("unable to read configuration: %w", err) + } + + // Set as defaultUsernameTemplate if not provided + usernameTemplate := config.UsernameTemplate + if usernameTemplate == "" { + usernameTemplate = defaultUserNameTemplate + } + + username, usernameError := genUsername(displayName, policyName, "sts", usernameTemplate) + // Send a 400 to Framework.OperationFunc Handler + if usernameError != nil { + return nil, usernameError + } + + getTokenInput := &sts.GetFederationTokenInput{ + Name: aws.String(username), + DurationSeconds: &lifeTimeInSeconds, + } + if len(policy) > 0 { + getTokenInput.Policy = aws.String(policy) + } + if len(policyARNs) > 0 { + getTokenInput.PolicyArns = convertPolicyARNs(policyARNs) + } + + // If neither a policy document nor policy ARNs are specified, then GetFederationToken will + // return credentials equivalent to that of the Vault server itself. We probably don't want + // that by default; the behavior can be explicitly opted in to by associating the Vault role + // with a policy ARN or document that allows the appropriate permissions. + if policy == "" && len(policyARNs) == 0 { + return logical.ErrorResponse("must specify at least one of policy_arns or policy_document with %s credential_type", federationTokenCred), nil + } + + tokenResp, err := stsClient.GetFederationTokenWithContext(ctx, getTokenInput) + if err != nil { + return logical.ErrorResponse("Error generating STS keys: %s", err), awsutil.CheckAWSError(err) + } + + // While STS credentials cannot be revoked/renewed, we will still create a lease since users are + // relying on a non-zero `lease_duration` in order to manage their lease lifecycles manually. + // + ttl := tokenResp.Credentials.Expiration.Sub(time.Now()) + resp := b.Secret(secretAccessKeyType).Response(map[string]interface{}{ + "access_key": *tokenResp.Credentials.AccessKeyId, + "secret_key": *tokenResp.Credentials.SecretAccessKey, + "security_token": *tokenResp.Credentials.SessionToken, + "ttl": uint64(ttl.Seconds()), + }, map[string]interface{}{ + "username": username, + "policy": policy, + "is_sts": true, + }) + + // Set the secret TTL to appropriately match the expiration of the token + resp.Secret.TTL = ttl + + // STS are purposefully short-lived and aren't renewable + resp.Secret.Renewable = false + + return resp, nil +} + +func (b *backend) assumeRole(ctx context.Context, s logical.Storage, + displayName, roleName, roleArn, policy string, policyARNs []string, + iamGroups []string, lifeTimeInSeconds int64, roleSessionName string) (*logical.Response, error, +) { + // grab any IAM group policies associated with the vault role, both inline + // and managed + groupPolicies, groupPolicyARNs, err := b.getGroupPolicies(ctx, s, iamGroups) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + if len(groupPolicies) > 0 { + groupPolicies = append(groupPolicies, policy) + policy, err = combinePolicyDocuments(groupPolicies...) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + } + if len(groupPolicyARNs) > 0 { + policyARNs = append(policyARNs, groupPolicyARNs...) + } + + stsClient, err := b.clientSTS(ctx, s) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + config, err := readConfig(ctx, s) + if err != nil { + return nil, fmt.Errorf("unable to read configuration: %w", err) + } + + // Set as defaultUsernameTemplate if not provided + usernameTemplate := config.UsernameTemplate + if usernameTemplate == "" { + usernameTemplate = defaultUserNameTemplate + } + + var roleSessionNameError error + if roleSessionName == "" { + roleSessionName, roleSessionNameError = genUsername(displayName, roleName, "assume_role", usernameTemplate) + // Send a 400 to Framework.OperationFunc Handler + if roleSessionNameError != nil { + return nil, roleSessionNameError + } + } else { + roleSessionName = normalizeDisplayName(roleSessionName) + } + + assumeRoleInput := &sts.AssumeRoleInput{ + RoleSessionName: aws.String(roleSessionName), + RoleArn: aws.String(roleArn), + DurationSeconds: &lifeTimeInSeconds, + } + if policy != "" { + assumeRoleInput.SetPolicy(policy) + } + if len(policyARNs) > 0 { + assumeRoleInput.SetPolicyArns(convertPolicyARNs(policyARNs)) + } + tokenResp, err := stsClient.AssumeRoleWithContext(ctx, assumeRoleInput) + if err != nil { + return logical.ErrorResponse("Error assuming role: %s", err), awsutil.CheckAWSError(err) + } + + // While STS credentials cannot be revoked/renewed, we will still create a lease since users are + // relying on a non-zero `lease_duration` in order to manage their lease lifecycles manually. + // + ttl := tokenResp.Credentials.Expiration.Sub(time.Now()) + resp := b.Secret(secretAccessKeyType).Response(map[string]interface{}{ + "access_key": *tokenResp.Credentials.AccessKeyId, + "secret_key": *tokenResp.Credentials.SecretAccessKey, + "security_token": *tokenResp.Credentials.SessionToken, + "arn": *tokenResp.AssumedRoleUser.Arn, + "ttl": uint64(ttl.Seconds()), + }, map[string]interface{}{ + "username": roleSessionName, + "policy": roleArn, + "is_sts": true, + }) + + // Set the secret TTL to appropriately match the expiration of the token + resp.Secret.TTL = ttl + + // STS are purposefully short-lived and aren't renewable + resp.Secret.Renewable = false + + return resp, nil +} + +func readConfig(ctx context.Context, storage logical.Storage) (rootConfig, error) { + entry, err := storage.Get(ctx, storageKey) + if err != nil { + return rootConfig{}, err + } + if entry == nil { + return rootConfig{}, nil + } + + var connConfig rootConfig + if err := entry.DecodeJSON(&connConfig); err != nil { + return rootConfig{}, err + } + return connConfig, nil +} + +func (b *backend) secretAccessKeysCreate( + ctx context.Context, + s logical.Storage, + displayName, policyName string, + role *awsRoleEntry, +) (*logical.Response, error) { + iamClient, err := b.clientIAM(ctx, s) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + config, err := readConfig(ctx, s) + if err != nil { + return nil, fmt.Errorf("unable to read configuration: %w", err) + } + + // Set as defaultUsernameTemplate if not provided + usernameTemplate := config.UsernameTemplate + if usernameTemplate == "" { + usernameTemplate = defaultUserNameTemplate + } + + username, usernameError := genUsername(displayName, policyName, "iam_user", usernameTemplate) + // Send a 400 to Framework.OperationFunc Handler + if usernameError != nil { + return nil, usernameError + } + + // Write to the WAL that this user will be created. We do this before + // the user is created because if switch the order then the WAL put + // can fail, which would put us in an awkward position: we have a user + // we need to rollback but can't put the WAL entry to do the rollback. + walID, err := framework.PutWAL(ctx, s, "user", &walUser{ + UserName: username, + }) + if err != nil { + return nil, fmt.Errorf("error writing WAL entry: %w", err) + } + + userPath := role.UserPath + if userPath == "" { + userPath = "/" + } + + createUserRequest := &iam.CreateUserInput{ + UserName: aws.String(username), + Path: aws.String(userPath), + } + if role.PermissionsBoundaryARN != "" { + createUserRequest.PermissionsBoundary = aws.String(role.PermissionsBoundaryARN) + } + + // Create the user + _, err = iamClient.CreateUserWithContext(ctx, createUserRequest) + if err != nil { + if walErr := framework.DeleteWAL(ctx, s, walID); walErr != nil { + iamErr := fmt.Errorf("error creating IAM user: %w", err) + return nil, errwrap.Wrap(fmt.Errorf("failed to delete WAL entry: %w", walErr), iamErr) + } + return logical.ErrorResponse("Error creating IAM user: %s", err), awsutil.CheckAWSError(err) + } + + for _, arn := range role.PolicyArns { + // Attach existing policy against user + _, err = iamClient.AttachUserPolicyWithContext(ctx, &iam.AttachUserPolicyInput{ + UserName: aws.String(username), + PolicyArn: aws.String(arn), + }) + if err != nil { + return logical.ErrorResponse("Error attaching user policy: %s", err), awsutil.CheckAWSError(err) + } + + } + if role.PolicyDocument != "" { + // Add new inline user policy against user + _, err = iamClient.PutUserPolicyWithContext(ctx, &iam.PutUserPolicyInput{ + UserName: aws.String(username), + PolicyName: aws.String(policyName), + PolicyDocument: aws.String(role.PolicyDocument), + }) + if err != nil { + return logical.ErrorResponse("Error putting user policy: %s", err), awsutil.CheckAWSError(err) + } + } + + for _, group := range role.IAMGroups { + // Add user to IAM groups + _, err = iamClient.AddUserToGroupWithContext(ctx, &iam.AddUserToGroupInput{ + UserName: aws.String(username), + GroupName: aws.String(group), + }) + if err != nil { + return logical.ErrorResponse("Error adding user to group: %s", err), awsutil.CheckAWSError(err) + } + } + + var tags []*iam.Tag + for key, value := range role.IAMTags { + // This assignment needs to be done in order to create unique addresses for + // these variables. Without doing so, all the tags will be copies of the last + // tag listed in the role. + k, v := key, value + tags = append(tags, &iam.Tag{Key: &k, Value: &v}) + } + + if len(tags) > 0 { + _, err = iamClient.TagUserWithContext(ctx, &iam.TagUserInput{ + Tags: tags, + UserName: &username, + }) + + if err != nil { + return logical.ErrorResponse("Error adding tags to user: %s", err), awsutil.CheckAWSError(err) + } + } + + // Create the keys + keyResp, err := iamClient.CreateAccessKeyWithContext(ctx, &iam.CreateAccessKeyInput{ + UserName: aws.String(username), + }) + if err != nil { + return logical.ErrorResponse("Error creating access keys: %s", err), awsutil.CheckAWSError(err) + } + + // Remove the WAL entry, we succeeded! If we fail, we don't return + // the secret because it'll get rolled back anyways, so we have to return + // an error here. + if err := framework.DeleteWAL(ctx, s, walID); err != nil { + return nil, fmt.Errorf("failed to commit WAL entry: %w", err) + } + + // Return the info! + resp := b.Secret(secretAccessKeyType).Response(map[string]interface{}{ + "access_key": *keyResp.AccessKey.AccessKeyId, + "secret_key": *keyResp.AccessKey.SecretAccessKey, + "security_token": nil, + }, map[string]interface{}{ + "username": username, + "policy": role, + "is_sts": false, + }) + + lease, err := b.Lease(ctx, s) + if err != nil || lease == nil { + lease = &configLease{} + } + + resp.Secret.TTL = lease.Lease + resp.Secret.MaxTTL = lease.LeaseMax + + return resp, nil +} + +func (b *backend) secretAccessKeysRenew(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + // STS already has a lifetime, and we don't support renewing it + isSTSRaw, ok := req.Secret.InternalData["is_sts"] + if ok { + isSTS, ok := isSTSRaw.(bool) + if ok { + if isSTS { + return nil, nil + } + } + } + + lease, err := b.Lease(ctx, req.Storage) + if err != nil { + return nil, err + } + if lease == nil { + lease = &configLease{} + } + + resp := &logical.Response{Secret: req.Secret} + resp.Secret.TTL = lease.Lease + resp.Secret.MaxTTL = lease.LeaseMax + return resp, nil +} + +func (b *backend) secretAccessKeysRevoke(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + // STS cleans up after itself so we can skip this if is_sts internal data + // element set to true. If is_sts is not set, assumes old version + // and defaults to the IAM approach. + isSTSRaw, ok := req.Secret.InternalData["is_sts"] + if ok { + isSTS, ok := isSTSRaw.(bool) + if ok { + if isSTS { + return nil, nil + } + } else { + return nil, fmt.Errorf("secret has is_sts but value could not be understood") + } + } + + // Get the username from the internal data + usernameRaw, ok := req.Secret.InternalData["username"] + if !ok { + return nil, fmt.Errorf("secret is missing username internal data") + } + username, ok := usernameRaw.(string) + if !ok { + return nil, fmt.Errorf("secret is missing username internal data") + } + + // Use the user rollback mechanism to delete this user + err := b.pathUserRollback(ctx, req, "user", map[string]interface{}{ + "username": username, + }) + if err != nil { + return nil, err + } + + return nil, nil +} + +func normalizeDisplayName(displayName string) string { + re := regexp.MustCompile("[^a-zA-Z0-9+=,.@_-]") + return re.ReplaceAllString(displayName, "_") +} + +func convertPolicyARNs(policyARNs []string) []*sts.PolicyDescriptorType { + size := len(policyARNs) + retval := make([]*sts.PolicyDescriptorType, size, size) + for i, arn := range policyARNs { + retval[i] = &sts.PolicyDescriptorType{ + Arn: aws.String(arn), + } + } + return retval +} + +type UsernameMetadata struct { + Type string + DisplayName string + PolicyName string +} diff --git a/builtin/logical/aws/secret_access_keys_test.go b/builtin/logical/aws/secret_access_keys_test.go new file mode 100644 index 0000000..890bb57 --- /dev/null +++ b/builtin/logical/aws/secret_access_keys_test.go @@ -0,0 +1,205 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package aws + +import ( + "context" + "strings" + "testing" + + "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/require" +) + +func TestNormalizeDisplayName_NormRequired(t *testing.T) { + invalidNames := map[string]string{ + "^#$test name\nshould be normalized)(*": "___test_name_should_be_normalized___", + "^#$test name1 should be normalized)(*": "___test_name1_should_be_normalized___", + "^#$test name should be normalized)(*": "___test_name__should_be_normalized___", + "^#$test name__should be normalized)(*": "___test_name__should_be_normalized___", + } + + for k, v := range invalidNames { + normalizedName := normalizeDisplayName(k) + if normalizedName != v { + t.Fatalf( + "normalizeDisplayName does not normalize AWS name correctly: %s should resolve to %s", + k, + normalizedName) + } + } +} + +func TestNormalizeDisplayName_NormNotRequired(t *testing.T) { + validNames := []string{ + "test_name_should_normalize_to_itself@example.com", + "test1_name_should_normalize_to_itself@example.com", + "UPPERlower0123456789-_,.@example.com", + } + + for _, n := range validNames { + normalizedName := normalizeDisplayName(n) + if normalizedName != n { + t.Fatalf( + "normalizeDisplayName erroneously normalizes valid names: expected %s but normalized to %s", + n, + normalizedName) + } + } +} + +func TestGenUsername(t *testing.T) { + type testCase struct { + name string + policy string + userType string + UsernameTemplate string + expectedError string + expectedRegex string + expectedLength int + } + + tests := map[string]testCase{ + "Truncated to 64. No warnings expected": { + name: "name1", + policy: "policy1", + userType: "iam_user", + UsernameTemplate: defaultUserNameTemplate, + expectedError: "", + expectedRegex: `^vault-name1-policy1-[0-9]+-[a-zA-Z0-9]+`, + expectedLength: 64, + }, + "Truncated to 32. No warnings expected": { + name: "name1", + policy: "policy1", + userType: "sts", + UsernameTemplate: defaultUserNameTemplate, + expectedError: "", + expectedRegex: `^vault-[0-9]+-[a-zA-Z0-9]+`, + expectedLength: 32, + }, + "Too long. Error expected — IAM": { + name: "this---is---a---very---long---name", + policy: "long------policy------name", + userType: "assume_role", + UsernameTemplate: `{{ if (eq .Type "IAM") }}{{ printf "%s-%s-%s-%s" (.DisplayName) (.PolicyName) (unix_time) (random 20) }}{{ end }}`, + expectedError: "the username generated by the template exceeds the IAM username length limits of 64 chars", + expectedRegex: "", + expectedLength: 64, + }, + "Too long. Error expected — STS": { + name: "this---is---a---very---long---name", + policy: "long------policy------name", + userType: "sts", + UsernameTemplate: `{{ if (eq .Type "STS") }}{{ printf "%s-%s-%s-%s" (.DisplayName) (.PolicyName) (unix_time) (random 20) }}{{ end }}`, + expectedError: "the username generated by the template exceeds the STS username length limits of 32 chars", + expectedRegex: "", + expectedLength: 32, + }, + } + + for testDescription, testCase := range tests { + t.Run(testDescription, func(t *testing.T) { + testUsername, err := genUsername(testCase.name, testCase.policy, testCase.userType, testCase.UsernameTemplate) + if err != nil && !strings.Contains(err.Error(), testCase.expectedError) { + t.Fatalf("expected an error %s; instead received %s", testCase.expectedError, err) + } + + if err == nil { + require.Regexp(t, testCase.expectedRegex, testUsername) + + if len(testUsername) > testCase.expectedLength { + t.Fatalf("expected username to be of length %d, got %d", testCase.expectedLength, len(testUsername)) + } + } + }) + } +} + +func TestReadConfig_DefaultTemplate(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b := Backend(config) + if err := b.Setup(context.Background(), config); err != nil { + t.Fatal(err) + } + + testTemplate := "" + configData := map[string]interface{}{ + "connection_uri": "test_uri", + "username": "guest", + "password": "guest", + "username_template": testTemplate, + } + configReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/root", + Storage: config.StorageView, + Data: configData, + } + resp, err := b.HandleRequest(context.Background(), configReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr:%s", resp, err) + } + if resp != nil { + t.Fatal("expected a nil response") + } + + configResult, err := readConfig(context.Background(), config.StorageView) + if err != nil { + t.Fatalf("expected err to be nil; got %s", err) + } + + // No template provided, config set to defaultUsernameTemplate + if configResult.UsernameTemplate != defaultUserNameTemplate { + t.Fatalf( + "expected template %s; got %s", + defaultUserNameTemplate, + configResult.UsernameTemplate, + ) + } +} + +func TestReadConfig_CustomTemplate(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b := Backend(config) + if err := b.Setup(context.Background(), config); err != nil { + t.Fatal(err) + } + + testTemplate := "`foo-{{ .DisplayName }}`" + configData := map[string]interface{}{ + "connection_uri": "test_uri", + "username": "guest", + "password": "guest", + "username_template": testTemplate, + } + configReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/root", + Storage: config.StorageView, + Data: configData, + } + resp, err := b.HandleRequest(context.Background(), configReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr:%s", resp, err) + } + if resp != nil { + t.Fatal("expected a nil response") + } + + configResult, err := readConfig(context.Background(), config.StorageView) + if err != nil { + t.Fatalf("expected err to be nil; got %s", err) + } + + if configResult.UsernameTemplate != testTemplate { + t.Fatalf( + "expected template %s; got %s", + testTemplate, + configResult.UsernameTemplate, + ) + } +} diff --git a/builtin/logical/aws/stepwise_test.go b/builtin/logical/aws/stepwise_test.go new file mode 100644 index 0000000..b6f1ffe --- /dev/null +++ b/builtin/logical/aws/stepwise_test.go @@ -0,0 +1,105 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package aws + +import ( + "os" + "sync" + "testing" + + stepwise "github.com/hashicorp/vault-testing-stepwise" + dockerEnvironment "github.com/hashicorp/vault-testing-stepwise/environments/docker" + "github.com/hashicorp/vault/api" + "github.com/mitchellh/mapstructure" +) + +var stepwiseSetup sync.Once + +func TestAccBackend_Stepwise_basic(t *testing.T) { + t.Parallel() + envOptions := &stepwise.MountOptions{ + RegistryName: "aws-sec", + PluginType: api.PluginTypeSecrets, + PluginName: "aws", + MountPathPrefix: "aws-sec", + } + roleName := "vault-stepwise-role" + stepwise.Run(t, stepwise.Case{ + Precheck: func() { testAccStepwisePreCheck(t) }, + Environment: dockerEnvironment.NewEnvironment("aws", envOptions), + Steps: []stepwise.Step{ + testAccStepwiseConfig(t), + testAccStepwiseWritePolicy(t, roleName, testDynamoPolicy), + testAccStepwiseRead(t, "creds", roleName, []credentialTestFunc{listDynamoTablesTest}), + }, + }) +} + +func testAccStepwiseConfig(t *testing.T) stepwise.Step { + return stepwise.Step{ + Operation: stepwise.UpdateOperation, + Path: "config/root", + Data: map[string]interface{}{ + "region": os.Getenv("AWS_DEFAULT_REGION"), + "access_key": os.Getenv("TEST_AWS_ACCESS_KEY"), + "secret_key": os.Getenv("TEST_AWS_SECRET_KEY"), + }, + } +} + +func testAccStepwiseWritePolicy(t *testing.T, name string, policy string) stepwise.Step { + return stepwise.Step{ + Operation: stepwise.UpdateOperation, + Path: "roles/" + name, + Data: map[string]interface{}{ + "policy_document": policy, + "credential_type": "iam_user", + }, + } +} + +func testAccStepwiseRead(t *testing.T, path, name string, credentialTests []credentialTestFunc) stepwise.Step { + return stepwise.Step{ + Operation: stepwise.ReadOperation, + Path: path + "/" + name, + Assert: func(resp *api.Secret, err error) error { + if err != nil { + return err + } + var d struct { + AccessKey string `mapstructure:"access_key"` + SecretKey string `mapstructure:"secret_key"` + STSToken string `mapstructure:"security_token"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + t.Logf("[WARN] Generated credentials: %v", d) + for _, testFunc := range credentialTests { + err := testFunc(d.AccessKey, d.SecretKey, d.STSToken) + if err != nil { + return err + } + } + return nil + }, + } +} + +func testAccStepwisePreCheck(t *testing.T) { + stepwiseSetup.Do(func() { + if v := os.Getenv("AWS_DEFAULT_REGION"); v == "" { + t.Logf("[INFO] Test: Using us-west-2 as test region") + os.Setenv("AWS_DEFAULT_REGION", "us-west-2") + } + + // Ensure test variables are set + if v := os.Getenv("TEST_AWS_ACCESS_KEY"); v == "" { + t.Skip("TEST_AWS_ACCESS_KEY not set") + } + if v := os.Getenv("TEST_AWS_SECRET_KEY"); v == "" { + t.Skip("TEST_AWS_SECRET_KEY not set") + } + }) +} diff --git a/builtin/logical/consul/backend.go b/builtin/logical/consul/backend.go new file mode 100644 index 0000000..5e42a51 --- /dev/null +++ b/builtin/logical/consul/backend.go @@ -0,0 +1,54 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package consul + +import ( + "context" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +const operationPrefixConsul = "consul" + +// ReportedVersion is used to report a specific version to Vault. +var ReportedVersion = "" + +func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + b := Backend() + if err := b.Setup(ctx, conf); err != nil { + return nil, err + } + return b, nil +} + +func Backend() *backend { + var b backend + b.Backend = &framework.Backend{ + PathsSpecial: &logical.Paths{ + SealWrapStorage: []string{ + "config/access", + }, + }, + + Paths: []*framework.Path{ + pathConfigAccess(&b), + pathListRoles(&b), + pathRoles(&b), + pathToken(&b), + }, + + Secrets: []*framework.Secret{ + secretToken(&b), + }, + BackendType: logical.TypeLogical, + RunningVersion: ReportedVersion, + } + + return &b +} + +type backend struct { + *framework.Backend +} diff --git a/builtin/logical/consul/backend_test.go b/builtin/logical/consul/backend_test.go new file mode 100644 index 0000000..68c3472 --- /dev/null +++ b/builtin/logical/consul/backend_test.go @@ -0,0 +1,1613 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package consul + +import ( + "context" + "encoding/base64" + "fmt" + "log" + "os" + "reflect" + "strings" + "testing" + "time" + + consulapi "github.com/hashicorp/consul/api" + "github.com/hashicorp/vault/helper/testhelpers/consul" + logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" + "github.com/hashicorp/vault/sdk/logical" + "github.com/mitchellh/mapstructure" +) + +func TestBackend_Config_Access(t *testing.T) { + t.Run("config_access", func(t *testing.T) { + t.Parallel() + t.Run("pre-1.4.0", func(t *testing.T) { + t.Parallel() + testBackendConfigAccess(t, "1.3.1", true) + }) + t.Run("post-1.4.0", func(t *testing.T) { + t.Parallel() + testBackendConfigAccess(t, "", true) + }) + t.Run("pre-1.4.0 automatic-bootstrap", func(t *testing.T) { + t.Parallel() + testBackendConfigAccess(t, "1.3.1", false) + }) + t.Run("post-1.4.0 automatic-bootstrap", func(t *testing.T) { + t.Parallel() + testBackendConfigAccess(t, "", false) + }) + }) +} + +func testBackendConfigAccess(t *testing.T, version string, autoBootstrap bool) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + cleanup, consulConfig := consul.PrepareTestContainer(t, version, false, autoBootstrap) + defer cleanup() + + connData := map[string]interface{}{ + "address": consulConfig.Address(), + } + if autoBootstrap || strings.HasPrefix(version, "1.3") { + connData["token"] = consulConfig.Token + } + + confReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/access", + Storage: config.StorageView, + Data: connData, + } + + resp, err := b.HandleRequest(context.Background(), confReq) + if err != nil || (resp != nil && resp.IsError()) || resp != nil { + t.Fatalf("failed to write configuration: resp:%#v err:%s", resp, err) + } + + confReq.Operation = logical.ReadOperation + resp, err = b.HandleRequest(context.Background(), confReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("failed to write configuration: resp:%#v err:%s", resp, err) + } + + expected := map[string]interface{}{ + "address": connData["address"].(string), + "scheme": "http", + } + if !reflect.DeepEqual(expected, resp.Data) { + t.Fatalf("bad: expected:%#v\nactual:%#v\n", expected, resp.Data) + } + if resp.Data["token"] != nil { + t.Fatalf("token should not be set in the response") + } +} + +func TestBackend_Renew_Revoke(t *testing.T) { + t.Run("renew_revoke", func(t *testing.T) { + t.Parallel() + t.Run("pre-1.4.0", func(t *testing.T) { + t.Parallel() + testBackendRenewRevoke(t, "1.3.1") + }) + t.Run("post-1.4.0", func(t *testing.T) { + t.Parallel() + t.Run("legacy", func(t *testing.T) { + t.Parallel() + testBackendRenewRevoke(t, "1.4.4") + }) + + t.Run("param-policies", func(t *testing.T) { + t.Parallel() + testBackendRenewRevoke14(t, "", "policies") + }) + t.Run("param-consul_policies", func(t *testing.T) { + t.Parallel() + testBackendRenewRevoke14(t, "", "consul_policies") + }) + t.Run("both-params", func(t *testing.T) { + t.Parallel() + testBackendRenewRevoke14(t, "", "both") + }) + }) + }) +} + +func testBackendRenewRevoke(t *testing.T, version string) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + cleanup, consulConfig := consul.PrepareTestContainer(t, version, false, true) + defer cleanup() + + connData := map[string]interface{}{ + "address": consulConfig.Address(), + "token": consulConfig.Token, + } + + req := &logical.Request{ + Storage: config.StorageView, + Operation: logical.UpdateOperation, + Path: "config/access", + Data: connData, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + req.Path = "roles/test" + req.Data = map[string]interface{}{ + "policy": base64.StdEncoding.EncodeToString([]byte(testPolicy)), + "lease": "6h", + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + req.Operation = logical.ReadOperation + req.Path = "creds/test" + resp, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("resp nil") + } + if resp.IsError() { + t.Fatalf("resp is error: %v", resp.Error()) + } + + generatedSecret := resp.Secret + generatedSecret.TTL = 6 * time.Hour + + var d struct { + Token string `mapstructure:"token"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + t.Fatal(err) + } + + // Build a client and verify that the credentials work + consulapiConfig := consulapi.DefaultConfig() + consulapiConfig.Address = connData["address"].(string) + consulapiConfig.Token = d.Token + client, err := consulapi.NewClient(consulapiConfig) + if err != nil { + t.Fatal(err) + } + + _, err = client.KV().Put(&consulapi.KVPair{ + Key: "foo", + Value: []byte("bar"), + }, nil) + if err != nil { + t.Fatal(err) + } + + req.Operation = logical.RenewOperation + req.Secret = generatedSecret + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("got nil response from renew") + } + + req.Operation = logical.RevokeOperation + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + _, err = client.KV().Put(&consulapi.KVPair{ + Key: "foo", + Value: []byte("bar"), + }, nil) + if err == nil { + t.Fatal("err: expected error") + } +} + +func testBackendRenewRevoke14(t *testing.T, version string, policiesParam string) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + cleanup, consulConfig := consul.PrepareTestContainer(t, version, false, true) + defer cleanup() + + connData := map[string]interface{}{ + "address": consulConfig.Address(), + "token": consulConfig.Token, + } + + req := &logical.Request{ + Storage: config.StorageView, + Operation: logical.UpdateOperation, + Path: "config/access", + Data: connData, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + req.Path = "roles/test" + req.Data = map[string]interface{}{ + "lease": "6h", + } + if policiesParam == "both" { + req.Data["policies"] = []string{"wrong-name"} + req.Data["consul_policies"] = []string{"test"} + } else { + req.Data[policiesParam] = []string{"test"} + } + + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + read := &logical.Request{ + Storage: config.StorageView, + Operation: logical.ReadOperation, + Path: "roles/test", + Data: connData, + } + roleResp, err := b.HandleRequest(context.Background(), read) + + expectExtract := roleResp.Data["consul_policies"] + respExtract := roleResp.Data[policiesParam] + if respExtract != nil { + if expectExtract.([]string)[0] != respExtract.([]string)[0] { + t.Errorf("mismatch: response consul_policies '%s' does not match '[test]'", roleResp.Data["consul_policies"]) + } + } + + req.Operation = logical.ReadOperation + req.Path = "creds/test" + resp, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("resp nil") + } + if resp.IsError() { + t.Fatalf("resp is error: %v", resp.Error()) + } + + generatedSecret := resp.Secret + generatedSecret.TTL = 6 * time.Hour + + var d struct { + Token string `mapstructure:"token"` + Accessor string `mapstructure:"accessor"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + t.Fatal(err) + } + + // Build a client and verify that the credentials work + consulapiConfig := consulapi.DefaultNonPooledConfig() + consulapiConfig.Address = connData["address"].(string) + consulapiConfig.Token = d.Token + client, err := consulapi.NewClient(consulapiConfig) + if err != nil { + t.Fatal(err) + } + + _, err = client.Catalog(), nil + if err != nil { + t.Fatal(err) + } + + req.Operation = logical.RenewOperation + req.Secret = generatedSecret + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("got nil response from renew") + } + + req.Operation = logical.RevokeOperation + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + // Build a management client and verify that the token does not exist anymore + consulmgmtConfig := consulapi.DefaultNonPooledConfig() + consulmgmtConfig.Address = connData["address"].(string) + consulmgmtConfig.Token = connData["token"].(string) + mgmtclient, err := consulapi.NewClient(consulmgmtConfig) + if err != nil { + t.Fatal(err) + } + q := &consulapi.QueryOptions{ + Datacenter: "DC1", + } + + _, _, err = mgmtclient.ACL().TokenRead(d.Accessor, q) + if err == nil { + t.Fatal("err: expected error") + } +} + +func TestBackend_LocalToken(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + cleanup, consulConfig := consul.PrepareTestContainer(t, "", false, true) + defer cleanup() + + connData := map[string]interface{}{ + "address": consulConfig.Address(), + "token": consulConfig.Token, + } + + req := &logical.Request{ + Storage: config.StorageView, + Operation: logical.UpdateOperation, + Path: "config/access", + Data: connData, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + req.Path = "roles/test" + req.Data = map[string]interface{}{ + "consul_policies": []string{"test"}, + "ttl": "6h", + "local": false, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + req.Path = "roles/test_local" + req.Data = map[string]interface{}{ + "consul_policies": []string{"test"}, + "ttl": "6h", + "local": true, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + req.Operation = logical.ReadOperation + req.Path = "creds/test" + resp, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("resp nil") + } + if resp.IsError() { + t.Fatalf("resp is error: %v", resp.Error()) + } + + var d struct { + Token string `mapstructure:"token"` + Accessor string `mapstructure:"accessor"` + Local bool `mapstructure:"local"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + t.Fatal(err) + } + + if d.Local { + t.Fatalf("requested global token, got local one") + } + + // Build a client and verify that the credentials work + consulapiConfig := consulapi.DefaultNonPooledConfig() + consulapiConfig.Address = connData["address"].(string) + consulapiConfig.Token = d.Token + client, err := consulapi.NewClient(consulapiConfig) + if err != nil { + t.Fatal(err) + } + + _, err = client.Catalog(), nil + if err != nil { + t.Fatal(err) + } + + req.Operation = logical.ReadOperation + req.Path = "creds/test_local" + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("resp nil") + } + if resp.IsError() { + t.Fatalf("resp is error: %v", resp.Error()) + } + + if err := mapstructure.Decode(resp.Data, &d); err != nil { + t.Fatal(err) + } + + if !d.Local { + t.Fatalf("requested local token, got global one") + } + + // Build a client and verify that the credentials work + consulapiConfig = consulapi.DefaultNonPooledConfig() + consulapiConfig.Address = connData["address"].(string) + consulapiConfig.Token = d.Token + client, err = consulapi.NewClient(consulapiConfig) + if err != nil { + t.Fatal(err) + } + + _, err = client.Catalog(), nil + if err != nil { + t.Fatal(err) + } +} + +func TestBackend_Management(t *testing.T) { + t.Run("management", func(t *testing.T) { + t.Parallel() + t.Run("pre-1.4.0", func(t *testing.T) { + t.Parallel() + testBackendManagement(t, "1.3.1") + }) + t.Run("post-1.4.0", func(t *testing.T) { + t.Parallel() + testBackendManagement(t, "1.4.4") + }) + + testBackendManagement(t, "1.10.8") + }) +} + +func testBackendManagement(t *testing.T, version string) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + cleanup, consulConfig := consul.PrepareTestContainer(t, version, false, true) + defer cleanup() + + connData := map[string]interface{}{ + "address": consulConfig.Address(), + "token": consulConfig.Token, + } + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfig(t, connData), + testAccStepWriteManagementPolicy(t, "test", ""), + testAccStepReadManagementToken(t, "test", connData), + }, + }) +} + +func TestBackend_Basic(t *testing.T) { + t.Run("basic", func(t *testing.T) { + t.Parallel() + t.Run("pre-1.4.0", func(t *testing.T) { + t.Parallel() + testBackendBasic(t, "1.3.1") + }) + t.Run("post-1.4.0", func(t *testing.T) { + t.Parallel() + t.Run("legacy", func(t *testing.T) { + t.Parallel() + testBackendBasic(t, "1.4.4") + }) + + testBackendBasic(t, "1.10.8") + }) + }) +} + +func testBackendBasic(t *testing.T, version string) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + cleanup, consulConfig := consul.PrepareTestContainer(t, version, false, true) + defer cleanup() + + connData := map[string]interface{}{ + "address": consulConfig.Address(), + "token": consulConfig.Token, + } + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfig(t, connData), + testAccStepWritePolicy(t, "test", testPolicy, ""), + testAccStepReadToken(t, "test", connData), + }, + }) +} + +func TestBackend_crud(t *testing.T) { + b, _ := Factory(context.Background(), logical.TestBackendConfig()) + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepWritePolicy(t, "test", testPolicy, ""), + testAccStepWritePolicy(t, "test2", testPolicy, ""), + testAccStepWritePolicy(t, "test3", testPolicy, ""), + testAccStepReadPolicy(t, "test", testPolicy, 0), + testAccStepListPolicy(t, []string{"test", "test2", "test3"}), + testAccStepDeletePolicy(t, "test"), + }, + }) +} + +func TestBackend_role_lease(t *testing.T) { + b, _ := Factory(context.Background(), logical.TestBackendConfig()) + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepWritePolicy(t, "test", testPolicy, "6h"), + testAccStepReadPolicy(t, "test", testPolicy, 6*time.Hour), + testAccStepDeletePolicy(t, "test"), + }, + }) +} + +func testAccStepConfig(t *testing.T, config map[string]interface{}) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "config/access", + Data: config, + } +} + +func testAccStepReadToken(t *testing.T, name string, conf map[string]interface{}) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: "creds/" + name, + Check: func(resp *logical.Response) error { + var d struct { + Token string `mapstructure:"token"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + log.Printf("[WARN] Generated token: %s", d.Token) + + // Build a client and verify that the credentials work + config := consulapi.DefaultConfig() + config.Address = conf["address"].(string) + config.Token = d.Token + client, err := consulapi.NewClient(config) + if err != nil { + return err + } + + log.Printf("[WARN] Verifying that the generated token works...") + _, err = client.KV().Put(&consulapi.KVPair{ + Key: "foo", + Value: []byte("bar"), + }, nil) + if err != nil { + return err + } + + return nil + }, + } +} + +func testAccStepReadManagementToken(t *testing.T, name string, conf map[string]interface{}) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: "creds/" + name, + Check: func(resp *logical.Response) error { + var d struct { + Token string `mapstructure:"token"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + log.Printf("[WARN] Generated token: %s", d.Token) + + // Build a client and verify that the credentials work + config := consulapi.DefaultConfig() + config.Address = conf["address"].(string) + config.Token = d.Token + client, err := consulapi.NewClient(config) + if err != nil { + return err + } + + log.Printf("[WARN] Verifying that the generated token works...") + _, _, err = client.ACL().Create(&consulapi.ACLEntry{ + Type: "management", + Name: "test2", + }, nil) + if err != nil { + return err + } + + return nil + }, + } +} + +func testAccStepWritePolicy(t *testing.T, name string, policy string, lease string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "roles/" + name, + Data: map[string]interface{}{ + "policy": base64.StdEncoding.EncodeToString([]byte(policy)), + "lease": lease, + }, + } +} + +func testAccStepWriteManagementPolicy(t *testing.T, name string, lease string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "roles/" + name, + Data: map[string]interface{}{ + "token_type": "management", + "lease": lease, + }, + } +} + +func testAccStepReadPolicy(t *testing.T, name string, policy string, lease time.Duration) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: "roles/" + name, + Check: func(resp *logical.Response) error { + policyRaw := resp.Data["policy"].(string) + out, err := base64.StdEncoding.DecodeString(policyRaw) + if err != nil { + return err + } + if string(out) != policy { + return fmt.Errorf("mismatch: %s %s", out, policy) + } + + l := resp.Data["lease"].(int64) + if lease != time.Second*time.Duration(l) { + return fmt.Errorf("mismatch: %v %v", l, lease) + } + return nil + }, + } +} + +func testAccStepListPolicy(t *testing.T, names []string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ListOperation, + Path: "roles/", + Check: func(resp *logical.Response) error { + respKeys := resp.Data["keys"].([]string) + if !reflect.DeepEqual(respKeys, names) { + return fmt.Errorf("mismatch: %#v %#v", respKeys, names) + } + return nil + }, + } +} + +func testAccStepDeletePolicy(t *testing.T, name string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.DeleteOperation, + Path: "roles/" + name, + } +} + +func TestBackend_Roles(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + cleanup, consulConfig := consul.PrepareTestContainer(t, "", false, true) + defer cleanup() + + connData := map[string]interface{}{ + "address": consulConfig.Address(), + "token": consulConfig.Token, + } + + req := &logical.Request{ + Storage: config.StorageView, + Operation: logical.UpdateOperation, + Path: "config/access", + Data: connData, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + // Create the consul_roles role + req.Path = "roles/test-consul-roles" + req.Data = map[string]interface{}{ + "consul_roles": []string{"role-test"}, + "lease": "6h", + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + req.Operation = logical.ReadOperation + req.Path = "creds/test-consul-roles" + resp, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("resp nil") + } + if resp.IsError() { + t.Fatalf("resp is error: %v", resp.Error()) + } + + generatedSecret := resp.Secret + generatedSecret.TTL = 6 * time.Hour + + var d struct { + Token string `mapstructure:"token"` + Accessor string `mapstructure:"accessor"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + t.Fatal(err) + } + + // Build a client and verify that the credentials work + consulapiConfig := consulapi.DefaultNonPooledConfig() + consulapiConfig.Address = connData["address"].(string) + consulapiConfig.Token = d.Token + client, err := consulapi.NewClient(consulapiConfig) + if err != nil { + t.Fatal(err) + } + + _, err = client.Catalog(), nil + if err != nil { + t.Fatal(err) + } + + req.Operation = logical.RenewOperation + req.Secret = generatedSecret + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("got nil response from renew") + } + + req.Operation = logical.RevokeOperation + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + // Build a management client and verify that the token does not exist anymore + consulmgmtConfig := consulapi.DefaultNonPooledConfig() + consulmgmtConfig.Address = connData["address"].(string) + consulmgmtConfig.Token = connData["token"].(string) + mgmtclient, err := consulapi.NewClient(consulmgmtConfig) + if err != nil { + t.Fatal(err) + } + q := &consulapi.QueryOptions{ + Datacenter: "DC1", + } + + _, _, err = mgmtclient.ACL().TokenRead(d.Accessor, q) + if err == nil { + t.Fatal("err: expected error") + } +} + +func TestBackend_Enterprise_Diff_Namespace_Revocation(t *testing.T) { + if _, hasLicense := os.LookupEnv("CONSUL_LICENSE"); !hasLicense { + t.Skip("Skipping: No enterprise license found") + } + + testBackendEntDiffNamespaceRevocation(t) +} + +func TestBackend_Enterprise_Diff_Partition_Revocation(t *testing.T) { + if _, hasLicense := os.LookupEnv("CONSUL_LICENSE"); !hasLicense { + t.Skip("Skipping: No enterprise license found") + } + + testBackendEntDiffPartitionRevocation(t) +} + +func TestBackend_Enterprise_Namespace(t *testing.T) { + if _, hasLicense := os.LookupEnv("CONSUL_LICENSE"); !hasLicense { + t.Skip("Skipping: No enterprise license found") + } + + testBackendEntNamespace(t) +} + +func TestBackend_Enterprise_Partition(t *testing.T) { + if _, hasLicense := os.LookupEnv("CONSUL_LICENSE"); !hasLicense { + t.Skip("Skipping: No enterprise license found") + } + + testBackendEntPartition(t) +} + +func testBackendEntDiffNamespaceRevocation(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + cleanup, consulConfig := consul.PrepareTestContainer(t, "", true, true) + defer cleanup() + + // Perform additional Consul configuration + consulapiConfig := consulapi.DefaultNonPooledConfig() + consulapiConfig.Address = consulConfig.Address() + consulapiConfig.Token = consulConfig.Token + client, err := consulapi.NewClient(consulapiConfig) + if err != nil { + t.Fatal(err) + } + + // Create Policy in default namespace to manage ACLs in a different + // namespace + nsPol := &consulapi.ACLPolicy{ + Name: "diff-ns-test", + Description: "policy to test management of ACLs in one ns from another", + Rules: `namespace "ns1" { + acl="write" + } + `, + } + pol, _, err := client.ACL().PolicyCreate(nsPol, nil) + if err != nil { + t.Fatal(err) + } + + // Create new Token in default namespace with new ACL + cToken, _, err := client.ACL().TokenCreate( + &consulapi.ACLToken{ + Policies: []*consulapi.ACLLink{{ID: pol.ID}}, + }, nil) + if err != nil { + t.Fatal(err) + } + + // Write backend config + connData := map[string]interface{}{ + "address": consulConfig.Address(), + "token": cToken.SecretID, + } + + req := &logical.Request{ + Storage: config.StorageView, + Operation: logical.UpdateOperation, + Path: "config/access", + Data: connData, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + // Create the role in namespace "ns1" + req.Path = "roles/test-ns" + req.Data = map[string]interface{}{ + "consul_policies": []string{"ns-test"}, + "lease": "6h", + "consul_namespace": "ns1", + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + // Get Token + req.Operation = logical.ReadOperation + req.Path = "creds/test-ns" + resp, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("resp nil") + } + if resp.IsError() { + t.Fatalf("resp is error: %v", resp.Error()) + } + + generatedSecret := resp.Secret + generatedSecret.TTL = 6 * time.Hour + + // Verify Secret + var d struct { + Token string `mapstructure:"token"` + Accessor string `mapstructure:"accessor"` + ConsulNamespace string `mapstructure:"consul_namespace"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + t.Fatal(err) + } + + if d.ConsulNamespace != "ns1" { + t.Fatalf("Failed to access namespace") + } + + // Revoke the credential + req.Operation = logical.RevokeOperation + req.Secret = generatedSecret + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("Revocation failed: %v", err) + } + + // Build a management client and verify that the token does not exist anymore + consulmgmtConfig := consulapi.DefaultNonPooledConfig() + consulmgmtConfig.Address = connData["address"].(string) + consulmgmtConfig.Token = connData["token"].(string) + mgmtclient, err := consulapi.NewClient(consulmgmtConfig) + if err != nil { + t.Fatal(err) + } + q := &consulapi.QueryOptions{ + Datacenter: "DC1", + Namespace: "ns1", + } + + _, _, err = mgmtclient.ACL().TokenRead(d.Accessor, q) + if err == nil { + t.Fatal("err: expected error") + } +} + +func testBackendEntDiffPartitionRevocation(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + cleanup, consulConfig := consul.PrepareTestContainer(t, "", true, true) + defer cleanup() + + // Perform additional Consul configuration + consulapiConfig := consulapi.DefaultNonPooledConfig() + consulapiConfig.Address = consulConfig.Address() + consulapiConfig.Token = consulConfig.Token + client, err := consulapi.NewClient(consulapiConfig) + if err != nil { + t.Fatal(err) + } + + // Create Policy in default partition to manage ACLs in a different + // partition + partPol := &consulapi.ACLPolicy{ + Name: "diff-part-test", + Description: "policy to test management of ACLs in one part from another", + Rules: `partition "part1" { + acl="write" + } + `, + } + pol, _, err := client.ACL().PolicyCreate(partPol, nil) + if err != nil { + t.Fatal(err) + } + + // Create new Token in default partition with new ACL + cToken, _, err := client.ACL().TokenCreate( + &consulapi.ACLToken{ + Policies: []*consulapi.ACLLink{{ID: pol.ID}}, + }, nil) + if err != nil { + t.Fatal(err) + } + + // Write backend config + connData := map[string]interface{}{ + "address": consulConfig.Address(), + "token": cToken.SecretID, + } + + req := &logical.Request{ + Storage: config.StorageView, + Operation: logical.UpdateOperation, + Path: "config/access", + Data: connData, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + // Create the role in partition "part1" + req.Path = "roles/test-part" + req.Data = map[string]interface{}{ + "consul_policies": []string{"part-test"}, + "lease": "6h", + "partition": "part1", + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + // Get Token + req.Operation = logical.ReadOperation + req.Path = "creds/test-part" + resp, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("resp nil") + } + if resp.IsError() { + t.Fatalf("resp is error: %v", resp.Error()) + } + + generatedSecret := resp.Secret + generatedSecret.TTL = 6 * time.Hour + + // Verify Secret + var d struct { + Token string `mapstructure:"token"` + Accessor string `mapstructure:"accessor"` + Partition string `mapstructure:"partition"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + t.Fatal(err) + } + + if d.Partition != "part1" { + t.Fatalf("Failed to access partition") + } + + // Revoke the credential + req.Operation = logical.RevokeOperation + req.Secret = generatedSecret + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("Revocation failed: %v", err) + } + + // Build a management client and verify that the token does not exist anymore + consulmgmtConfig := consulapi.DefaultNonPooledConfig() + consulmgmtConfig.Address = connData["address"].(string) + consulmgmtConfig.Token = connData["token"].(string) + mgmtclient, err := consulapi.NewClient(consulmgmtConfig) + if err != nil { + t.Fatal(err) + } + q := &consulapi.QueryOptions{ + Datacenter: "DC1", + Partition: "part1", + } + + _, _, err = mgmtclient.ACL().TokenRead(d.Accessor, q) + if err == nil { + t.Fatal("err: expected error") + } +} + +func testBackendEntNamespace(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + cleanup, consulConfig := consul.PrepareTestContainer(t, "", true, true) + defer cleanup() + + connData := map[string]interface{}{ + "address": consulConfig.Address(), + "token": consulConfig.Token, + } + + req := &logical.Request{ + Storage: config.StorageView, + Operation: logical.UpdateOperation, + Path: "config/access", + Data: connData, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + // Create the role in namespace "ns1" + req.Path = "roles/test-ns" + req.Data = map[string]interface{}{ + "consul_policies": []string{"ns-test"}, + "lease": "6h", + "consul_namespace": "ns1", + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + req.Operation = logical.ReadOperation + req.Path = "creds/test-ns" + resp, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("resp nil") + } + if resp.IsError() { + t.Fatalf("resp is error: %v", resp.Error()) + } + + generatedSecret := resp.Secret + generatedSecret.TTL = 6 * time.Hour + + var d struct { + Token string `mapstructure:"token"` + Accessor string `mapstructure:"accessor"` + ConsulNamespace string `mapstructure:"consul_namespace"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + t.Fatal(err) + } + + if d.ConsulNamespace != "ns1" { + t.Fatalf("Failed to access namespace") + } + + // Build a client and verify that the credentials work + consulapiConfig := consulapi.DefaultNonPooledConfig() + consulapiConfig.Address = connData["address"].(string) + consulapiConfig.Token = d.Token + client, err := consulapi.NewClient(consulapiConfig) + if err != nil { + t.Fatal(err) + } + + _, err = client.Catalog(), nil + if err != nil { + t.Fatal(err) + } + + req.Operation = logical.RenewOperation + req.Secret = generatedSecret + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("got nil response from renew") + } + + req.Operation = logical.RevokeOperation + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + // Build a management client and verify that the token does not exist anymore + consulmgmtConfig := consulapi.DefaultNonPooledConfig() + consulmgmtConfig.Address = connData["address"].(string) + consulmgmtConfig.Token = connData["token"].(string) + mgmtclient, err := consulapi.NewClient(consulmgmtConfig) + if err != nil { + t.Fatal(err) + } + q := &consulapi.QueryOptions{ + Datacenter: "DC1", + Namespace: "ns1", + } + + _, _, err = mgmtclient.ACL().TokenRead(d.Accessor, q) + if err == nil { + t.Fatal("err: expected error") + } +} + +func testBackendEntPartition(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + cleanup, consulConfig := consul.PrepareTestContainer(t, "", true, true) + defer cleanup() + + connData := map[string]interface{}{ + "address": consulConfig.Address(), + "token": consulConfig.Token, + } + + req := &logical.Request{ + Storage: config.StorageView, + Operation: logical.UpdateOperation, + Path: "config/access", + Data: connData, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + // Create the role in partition "part1" + req.Path = "roles/test-part" + req.Data = map[string]interface{}{ + "consul_policies": []string{"part-test"}, + "lease": "6h", + "partition": "part1", + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + req.Operation = logical.ReadOperation + req.Path = "creds/test-part" + resp, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("resp nil") + } + if resp.IsError() { + t.Fatalf("resp is error: %v", resp.Error()) + } + + generatedSecret := resp.Secret + generatedSecret.TTL = 6 * time.Hour + + var d struct { + Token string `mapstructure:"token"` + Accessor string `mapstructure:"accessor"` + Partition string `mapstructure:"partition"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + t.Fatal(err) + } + + if d.Partition != "part1" { + t.Fatalf("Failed to access partition") + } + + // Build a client and verify that the credentials work + consulapiConfig := consulapi.DefaultNonPooledConfig() + consulapiConfig.Address = connData["address"].(string) + consulapiConfig.Token = d.Token + client, err := consulapi.NewClient(consulapiConfig) + if err != nil { + t.Fatal(err) + } + + _, err = client.Catalog(), nil + if err != nil { + t.Fatal(err) + } + + req.Operation = logical.RenewOperation + req.Secret = generatedSecret + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("got nil response from renew") + } + + req.Operation = logical.RevokeOperation + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + // Build a management client and verify that the token does not exist anymore + consulmgmtConfig := consulapi.DefaultNonPooledConfig() + consulmgmtConfig.Address = connData["address"].(string) + consulmgmtConfig.Token = connData["token"].(string) + mgmtclient, err := consulapi.NewClient(consulmgmtConfig) + if err != nil { + t.Fatal(err) + } + q := &consulapi.QueryOptions{ + Datacenter: "DC1", + Partition: "test1", + } + + _, _, err = mgmtclient.ACL().TokenRead(d.Accessor, q) + if err == nil { + t.Fatal("err: expected error") + } +} + +func TestBackendRenewRevokeRolesAndIdentities(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + cleanup, consulConfig := consul.PrepareTestContainer(t, "", false, true) + defer cleanup() + + connData := map[string]interface{}{ + "address": consulConfig.Address(), + "token": consulConfig.Token, + } + + req := &logical.Request{ + Storage: config.StorageView, + Operation: logical.UpdateOperation, + Path: "config/access", + Data: connData, + } + resp, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + cases := map[string]struct { + RoleName string + RoleData map[string]interface{} + }{ + "just role": { + "r", + map[string]interface{}{ + "consul_roles": []string{"role-test"}, + "lease": "6h", + }, + }, + "role and policies": { + "rp", + map[string]interface{}{ + "consul_policies": []string{"test"}, + "consul_roles": []string{"role-test"}, + "lease": "6h", + }, + }, + "service identity": { + "si", + map[string]interface{}{ + "service_identities": "service1", + "lease": "6h", + }, + }, + "service identity and policies": { + "sip", + map[string]interface{}{ + "consul_policies": []string{"test"}, + "service_identities": "service1", + "lease": "6h", + }, + }, + "service identity and role": { + "sir", + map[string]interface{}{ + "consul_roles": []string{"role-test"}, + "service_identities": "service1", + "lease": "6h", + }, + }, + "service identity and role and policies": { + "sirp", + map[string]interface{}{ + "consul_policies": []string{"test"}, + "consul_roles": []string{"role-test"}, + "service_identities": "service1", + "lease": "6h", + }, + }, + "node identity": { + "ni", + map[string]interface{}{ + "node_identities": []string{"node1:dc1"}, + "lease": "6h", + }, + }, + "node identity and policies": { + "nip", + map[string]interface{}{ + "consul_policies": []string{"test"}, + "node_identities": []string{"node1:dc1"}, + "lease": "6h", + }, + }, + "node identity and role": { + "nir", + map[string]interface{}{ + "consul_roles": []string{"role-test"}, + "node_identities": []string{"node1:dc1"}, + "lease": "6h", + }, + }, + "node identity and role and policies": { + "nirp", + map[string]interface{}{ + "consul_policies": []string{"test"}, + "consul_roles": []string{"role-test"}, + "node_identities": []string{"node1:dc1"}, + "lease": "6h", + }, + }, + "node identity and service identity": { + "nisi", + map[string]interface{}{ + "service_identities": "service1", + "node_identities": []string{"node1:dc1"}, + "lease": "6h", + }, + }, + "node identity and service identity and policies": { + "nisip", + map[string]interface{}{ + "consul_policies": []string{"test"}, + "service_identities": "service1", + "node_identities": []string{"node1:dc1"}, + "lease": "6h", + }, + }, + "node identity and service identity and role": { + "nisir", + map[string]interface{}{ + "consul_roles": []string{"role-test"}, + "service_identities": "service1", + "node_identities": []string{"node1:dc1"}, + "lease": "6h", + }, + }, + "node identity and service identity and role and policies": { + "nisirp", + map[string]interface{}{ + "consul_policies": []string{"test"}, + "consul_roles": []string{"role-test"}, + "service_identities": "service1", + "node_identities": []string{"node1:dc1"}, + "lease": "6h", + }, + }, + } + + for description, tc := range cases { + t.Logf("Testing: %s", description) + + req.Operation = logical.UpdateOperation + req.Path = fmt.Sprintf("roles/%s", tc.RoleName) + req.Data = tc.RoleData + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + req.Operation = logical.ReadOperation + req.Path = fmt.Sprintf("creds/%s", tc.RoleName) + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("resp nil") + } + if resp.IsError() { + t.Fatalf("resp is error: %v", resp.Error()) + } + + generatedSecret := resp.Secret + generatedSecret.TTL = 6 * time.Hour + + var d struct { + Token string `mapstructure:"token"` + Accessor string `mapstructure:"accessor"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + t.Fatal(err) + } + + // Build a client and verify that the credentials work + consulapiConfig := consulapi.DefaultNonPooledConfig() + consulapiConfig.Address = connData["address"].(string) + consulapiConfig.Token = d.Token + client, err := consulapi.NewClient(consulapiConfig) + if err != nil { + t.Fatal(err) + } + + _, err = client.Catalog(), nil + if err != nil { + t.Fatal(err) + } + + req.Operation = logical.RenewOperation + req.Secret = generatedSecret + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("got nil response from renew") + } + + req.Operation = logical.RevokeOperation + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + // Build a management client and verify that the token does not exist anymore + consulmgmtConfig := consulapi.DefaultNonPooledConfig() + consulmgmtConfig.Address = connData["address"].(string) + consulmgmtConfig.Token = connData["token"].(string) + mgmtclient, err := consulapi.NewClient(consulmgmtConfig) + + q := &consulapi.QueryOptions{ + Datacenter: "DC1", + } + + _, _, err = mgmtclient.ACL().TokenRead(d.Accessor, q) + if err == nil { + t.Fatal("err: expected error") + } + } +} + +const testPolicy = ` +key "" { + policy = "write" +}` diff --git a/builtin/logical/consul/client.go b/builtin/logical/consul/client.go new file mode 100644 index 0000000..1e30c66 --- /dev/null +++ b/builtin/logical/consul/client.go @@ -0,0 +1,29 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package consul + +import ( + "context" + "fmt" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/vault/sdk/logical" +) + +func (b *backend) client(ctx context.Context, s logical.Storage) (*api.Client, error, error) { + conf, userErr, intErr := b.readConfigAccess(ctx, s) + if intErr != nil { + return nil, nil, intErr + } + if userErr != nil { + return nil, userErr, nil + } + if conf == nil { + return nil, nil, fmt.Errorf("no error received but no configuration found") + } + + consulConf := conf.NewConfig() + client, err := api.NewClient(consulConf) + return client, nil, err +} diff --git a/builtin/logical/consul/cmd/consul/main.go b/builtin/logical/consul/cmd/consul/main.go new file mode 100644 index 0000000..f42a535 --- /dev/null +++ b/builtin/logical/consul/cmd/consul/main.go @@ -0,0 +1,34 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "os" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/consul" + "github.com/hashicorp/vault/sdk/plugin" +) + +func main() { + apiClientMeta := &api.PluginAPIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(os.Args[1:]) + + tlsConfig := apiClientMeta.GetTLSConfig() + tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) + + if err := plugin.ServeMultiplex(&plugin.ServeOpts{ + BackendFactoryFunc: consul.Factory, + // set the TLSProviderFunc so that the plugin maintains backwards + // compatibility with Vault versions that don’t support plugin AutoMTLS + TLSProviderFunc: tlsProviderFunc, + }); err != nil { + logger := hclog.New(&hclog.LoggerOptions{}) + + logger.Error("plugin shutting down", "error", err) + os.Exit(1) + } +} diff --git a/builtin/logical/consul/path_config.go b/builtin/logical/consul/path_config.go new file mode 100644 index 0000000..2f925d5 --- /dev/null +++ b/builtin/logical/consul/path_config.go @@ -0,0 +1,174 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package consul + +import ( + "context" + "fmt" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathConfigAccess(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "config/access", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixConsul, + }, + + Fields: map[string]*framework.FieldSchema{ + "address": { + Type: framework.TypeString, + Description: "Consul server address", + }, + + "scheme": { + Type: framework.TypeString, + Description: "URI scheme for the Consul address", + + // https would be a better default but Consul on its own + // defaults to HTTP access, and when HTTPS is enabled it + // disables HTTP, so there isn't really any harm done here. + Default: "http", + }, + + "token": { + Type: framework.TypeString, + Description: "Token for API calls", + }, + + "ca_cert": { + Type: framework.TypeString, + Description: `CA certificate to use when verifying Consul server certificate, +must be x509 PEM encoded.`, + }, + + "client_cert": { + Type: framework.TypeString, + Description: `Client certificate used for Consul's TLS communication, +must be x509 PEM encoded and if this is set you need to also set client_key.`, + }, + + "client_key": { + Type: framework.TypeString, + Description: `Client key used for Consul's TLS communication, +must be x509 PEM encoded and if this is set you need to also set client_cert.`, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathConfigAccessRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "access-configuration", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigAccessWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "access", + }, + }, + }, + } +} + +func (b *backend) readConfigAccess(ctx context.Context, storage logical.Storage) (*accessConfig, error, error) { + entry, err := storage.Get(ctx, "config/access") + if err != nil { + return nil, nil, err + } + if entry == nil { + return nil, fmt.Errorf("access credentials for the backend itself haven't been configured; please configure them at the '/config/access' endpoint"), nil + } + + conf := &accessConfig{} + if err := entry.DecodeJSON(conf); err != nil { + return nil, nil, fmt.Errorf("error reading consul access configuration: %w", err) + } + + return conf, nil, nil +} + +func (b *backend) pathConfigAccessRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + conf, userErr, intErr := b.readConfigAccess(ctx, req.Storage) + if intErr != nil { + return nil, intErr + } + if userErr != nil { + return logical.ErrorResponse(userErr.Error()), nil + } + if conf == nil { + return nil, fmt.Errorf("no user error reported but consul access configuration not found") + } + + return &logical.Response{ + Data: map[string]interface{}{ + "address": conf.Address, + "scheme": conf.Scheme, + }, + }, nil +} + +func (b *backend) pathConfigAccessWrite(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + config := accessConfig{ + Address: data.Get("address").(string), + Scheme: data.Get("scheme").(string), + Token: data.Get("token").(string), + CACert: data.Get("ca_cert").(string), + ClientCert: data.Get("client_cert").(string), + ClientKey: data.Get("client_key").(string), + } + + // If a token has not been given by the user, we try to boostrap the ACL + // support + if config.Token == "" { + consulConf := config.NewConfig() + client, err := api.NewClient(consulConf) + if err != nil { + return nil, err + } + token, _, err := client.ACL().Bootstrap() + if err != nil { + return logical.ErrorResponse("Token not provided and failed to bootstrap ACLs: %s", err), nil + } + config.Token = token.SecretID + } + + entry, err := logical.StorageEntryJSON("config/access", config) + if err != nil { + return nil, err + } + + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + + return nil, nil +} + +type accessConfig struct { + Address string `json:"address"` + Scheme string `json:"scheme"` + Token string `json:"token"` + CACert string `json:"ca_cert"` + ClientCert string `json:"client_cert"` + ClientKey string `json:"client_key"` +} + +func (conf *accessConfig) NewConfig() *api.Config { + consulConf := api.DefaultNonPooledConfig() + consulConf.Address = conf.Address + consulConf.Scheme = conf.Scheme + consulConf.Token = conf.Token + consulConf.TLSConfig.CAPem = []byte(conf.CACert) + consulConf.TLSConfig.CertPEM = []byte(conf.ClientCert) + consulConf.TLSConfig.KeyPEM = []byte(conf.ClientKey) + + return consulConf +} diff --git a/builtin/logical/consul/path_roles.go b/builtin/logical/consul/path_roles.go new file mode 100644 index 0000000..3e8c059 --- /dev/null +++ b/builtin/logical/consul/path_roles.go @@ -0,0 +1,294 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package consul + +import ( + "context" + "encoding/base64" + "fmt" + "time" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathListRoles(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "roles/?$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixConsul, + OperationSuffix: "roles", + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: b.pathRoleList, + }, + } +} + +func pathRoles(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "roles/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixConsul, + OperationSuffix: "role", + }, + + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the role.", + }, + + // The "policy" and "token_type" parameters were deprecated in Consul back in version 1.4. + // They have been removed from Consul as of version 1.11. Consider removing them here in the future. + "policy": { + Type: framework.TypeString, + Description: `Policy document, base64 encoded. Required +for 'client' tokens. Required for Consul pre-1.4.`, + Deprecated: true, + }, + + "token_type": { + Type: framework.TypeString, + Default: "client", + Description: `Which type of token to create: 'client' or 'management'. If +a 'management' token, the "policy", "policies", and "consul_roles" parameters are not +required. Defaults to 'client'.`, + Deprecated: true, + }, + + "policies": { + Type: framework.TypeCommaStringSlice, + Description: `Use "consul_policies" instead.`, + Deprecated: true, + }, + + "consul_policies": { + Type: framework.TypeCommaStringSlice, + Description: `List of policies to attach to the token. Either "consul_policies" +or "consul_roles" are required for Consul 1.5 and above, or just "consul_policies" if +using Consul 1.4.`, + }, + + "consul_roles": { + Type: framework.TypeCommaStringSlice, + Description: `List of Consul roles to attach to the token. Either "policies" +or "consul_roles" are required for Consul 1.5 and above.`, + }, + + "local": { + Type: framework.TypeBool, + Description: `Indicates that the token should not be replicated globally +and instead be local to the current datacenter. Available in Consul 1.4 and above.`, + }, + + "ttl": { + Type: framework.TypeDurationSecond, + Description: "TTL for the Consul token created from the role.", + }, + + "max_ttl": { + Type: framework.TypeDurationSecond, + Description: "Max TTL for the Consul token created from the role.", + }, + + "lease": { + Type: framework.TypeDurationSecond, + Description: `Use "ttl" instead.`, + Deprecated: true, + }, + + "consul_namespace": { + Type: framework.TypeString, + Description: `Indicates which namespace that the token will be +created within. Defaults to 'default'. Available in Consul 1.7 and above.`, + }, + + "partition": { + Type: framework.TypeString, + Description: `Indicates which admin partition that the token +will be created within. Defaults to 'default'. Available in Consul 1.11 and above.`, + }, + + "service_identities": { + Type: framework.TypeStringSlice, + Description: `List of Service Identities to attach to the +token, separated by semicolons. Available in Consul 1.5 or above.`, + }, + + "node_identities": { + Type: framework.TypeStringSlice, + Description: `List of Node Identities to attach to the +token. Available in Consul 1.8.1 or above.`, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathRolesRead, + logical.UpdateOperation: b.pathRolesWrite, + logical.DeleteOperation: b.pathRolesDelete, + }, + } +} + +func (b *backend) pathRoleList(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + entries, err := req.Storage.List(ctx, "policy/") + if err != nil { + return nil, err + } + + return logical.ListResponse(entries), nil +} + +func (b *backend) pathRolesRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + + entry, err := req.Storage.Get(ctx, "policy/"+name) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var roleConfigData roleConfig + if err := entry.DecodeJSON(&roleConfigData); err != nil { + return nil, err + } + + if roleConfigData.TokenType == "" { + roleConfigData.TokenType = "client" + } + + // Generate the response + resp := &logical.Response{ + Data: map[string]interface{}{ + "lease": int64(roleConfigData.TTL.Seconds()), + "ttl": int64(roleConfigData.TTL.Seconds()), + "max_ttl": int64(roleConfigData.MaxTTL.Seconds()), + "token_type": roleConfigData.TokenType, + "local": roleConfigData.Local, + "consul_namespace": roleConfigData.ConsulNamespace, + "partition": roleConfigData.Partition, + }, + } + if roleConfigData.Policy != "" { + resp.Data["policy"] = base64.StdEncoding.EncodeToString([]byte(roleConfigData.Policy)) + } + if len(roleConfigData.Policies) > 0 { + resp.Data["consul_policies"] = roleConfigData.Policies + } + if len(roleConfigData.ConsulRoles) > 0 { + resp.Data["consul_roles"] = roleConfigData.ConsulRoles + } + if len(roleConfigData.ServiceIdentities) > 0 { + resp.Data["service_identities"] = roleConfigData.ServiceIdentities + } + if len(roleConfigData.NodeIdentities) > 0 { + resp.Data["node_identities"] = roleConfigData.NodeIdentities + } + + return resp, nil +} + +func (b *backend) pathRolesWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + tokenType := d.Get("token_type").(string) + policy := d.Get("policy").(string) + consulPolicies := d.Get("consul_policies").([]string) + policies := d.Get("policies").([]string) + roles := d.Get("consul_roles").([]string) + serviceIdentities := d.Get("service_identities").([]string) + nodeIdentities := d.Get("node_identities").([]string) + + switch tokenType { + case "client": + if policy == "" && len(policies) == 0 && len(consulPolicies) == 0 && + len(roles) == 0 && len(serviceIdentities) == 0 && len(nodeIdentities) == 0 { + return logical.ErrorResponse( + "Use either a policy document, a list of policies or roles, or a set of service or node identities, depending on your Consul version"), nil + } + case "management": + default: + return logical.ErrorResponse("token_type must be \"client\" or \"management\""), nil + } + + if len(consulPolicies) == 0 { + consulPolicies = policies + } + + policyRaw, err := base64.StdEncoding.DecodeString(policy) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf( + "Error decoding policy base64: %s", err)), nil + } + + var ttl time.Duration + ttlRaw, ok := d.GetOk("ttl") + if ok { + ttl = time.Second * time.Duration(ttlRaw.(int)) + } else { + leaseParamRaw, ok := d.GetOk("lease") + if ok { + ttl = time.Second * time.Duration(leaseParamRaw.(int)) + } + } + + var maxTTL time.Duration + maxTTLRaw, ok := d.GetOk("max_ttl") + if ok { + maxTTL = time.Second * time.Duration(maxTTLRaw.(int)) + } + + name := d.Get("name").(string) + local := d.Get("local").(bool) + namespace := d.Get("consul_namespace").(string) + partition := d.Get("partition").(string) + entry, err := logical.StorageEntryJSON("policy/"+name, roleConfig{ + Policy: string(policyRaw), + Policies: consulPolicies, + ConsulRoles: roles, + ServiceIdentities: serviceIdentities, + NodeIdentities: nodeIdentities, + TokenType: tokenType, + TTL: ttl, + MaxTTL: maxTTL, + Local: local, + ConsulNamespace: namespace, + Partition: partition, + }) + if err != nil { + return nil, err + } + + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + + return nil, nil +} + +func (b *backend) pathRolesDelete(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + if err := req.Storage.Delete(ctx, "policy/"+name); err != nil { + return nil, err + } + return nil, nil +} + +type roleConfig struct { + Policy string `json:"policy"` + Policies []string `json:"policies"` + ConsulRoles []string `json:"consul_roles"` + ServiceIdentities []string `json:"service_identities"` + NodeIdentities []string `json:"node_identities"` + TTL time.Duration `json:"lease"` + MaxTTL time.Duration `json:"max_ttl"` + TokenType string `json:"token_type"` + Local bool `json:"local"` + ConsulNamespace string `json:"consul_namespace"` + Partition string `json:"partition"` +} diff --git a/builtin/logical/consul/path_token.go b/builtin/logical/consul/path_token.go new file mode 100644 index 0000000..9c8dbd1 --- /dev/null +++ b/builtin/logical/consul/path_token.go @@ -0,0 +1,182 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package consul + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + tokenPolicyType = "token" +) + +func pathToken(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "creds/" + framework.GenericNameRegex("role"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixConsul, + OperationVerb: "generate", + OperationSuffix: "credentials", + }, + + Fields: map[string]*framework.FieldSchema{ + "role": { + Type: framework.TypeString, + Description: "Name of the role.", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathTokenRead, + }, + } +} + +func (b *backend) pathTokenRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + role := d.Get("role").(string) + entry, err := req.Storage.Get(ctx, "policy/"+role) + if err != nil { + return nil, fmt.Errorf("error retrieving role: %w", err) + } + if entry == nil { + return logical.ErrorResponse(fmt.Sprintf("role %q not found", role)), nil + } + + var roleConfigData roleConfig + if err := entry.DecodeJSON(&roleConfigData); err != nil { + return nil, err + } + + if roleConfigData.TokenType == "" { + roleConfigData.TokenType = "client" + } + + // Get the consul client + c, userErr, intErr := b.client(ctx, req.Storage) + if intErr != nil { + return nil, intErr + } + if userErr != nil { + return logical.ErrorResponse(userErr.Error()), nil + } + + // Generate a name for the token + tokenName := fmt.Sprintf("Vault %s %s %d", role, req.DisplayName, time.Now().UnixNano()) + + writeOpts := &api.WriteOptions{} + writeOpts = writeOpts.WithContext(ctx) + + // Create an ACLEntry for Consul pre 1.4 + if (roleConfigData.Policy != "" && roleConfigData.TokenType == "client") || + (roleConfigData.Policy == "" && roleConfigData.TokenType == "management") { + token, _, err := c.ACL().Create(&api.ACLEntry{ + Name: tokenName, + Type: roleConfigData.TokenType, + Rules: roleConfigData.Policy, + }, writeOpts) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + // Use the helper to create the secret + s := b.Secret(SecretTokenType).Response(map[string]interface{}{ + "token": token, + }, map[string]interface{}{ + "token": token, + "role": role, + }) + s.Secret.TTL = roleConfigData.TTL + s.Secret.MaxTTL = roleConfigData.MaxTTL + return s, nil + } + + // Create an ACLToken for Consul 1.4 and above + policyLinks := []*api.ACLTokenPolicyLink{} + for _, policyName := range roleConfigData.Policies { + policyLinks = append(policyLinks, &api.ACLTokenPolicyLink{ + Name: policyName, + }) + } + + roleLinks := []*api.ACLTokenRoleLink{} + for _, roleName := range roleConfigData.ConsulRoles { + roleLinks = append(roleLinks, &api.ACLTokenRoleLink{ + Name: roleName, + }) + } + + aclServiceIdentities := parseServiceIdentities(roleConfigData.ServiceIdentities) + aclNodeIdentities := parseNodeIdentities(roleConfigData.NodeIdentities) + + token, _, err := c.ACL().TokenCreate(&api.ACLToken{ + Description: tokenName, + Policies: policyLinks, + Roles: roleLinks, + ServiceIdentities: aclServiceIdentities, + NodeIdentities: aclNodeIdentities, + Local: roleConfigData.Local, + Namespace: roleConfigData.ConsulNamespace, + Partition: roleConfigData.Partition, + }, writeOpts) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + // Use the helper to create the secret + s := b.Secret(SecretTokenType).Response(map[string]interface{}{ + "token": token.SecretID, + "accessor": token.AccessorID, + "local": token.Local, + "consul_namespace": token.Namespace, + "partition": token.Partition, + }, map[string]interface{}{ + "token": token.AccessorID, + "role": role, + "version": tokenPolicyType, + }) + s.Secret.TTL = roleConfigData.TTL + s.Secret.MaxTTL = roleConfigData.MaxTTL + + return s, nil +} + +func parseServiceIdentities(data []string) []*api.ACLServiceIdentity { + aclServiceIdentities := []*api.ACLServiceIdentity{} + + for _, serviceIdentity := range data { + entry := &api.ACLServiceIdentity{} + components := strings.Split(serviceIdentity, ":") + entry.ServiceName = components[0] + if len(components) == 2 { + entry.Datacenters = strings.Split(components[1], ",") + } + aclServiceIdentities = append(aclServiceIdentities, entry) + } + + return aclServiceIdentities +} + +func parseNodeIdentities(data []string) []*api.ACLNodeIdentity { + aclNodeIdentities := []*api.ACLNodeIdentity{} + + for _, nodeIdentity := range data { + entry := &api.ACLNodeIdentity{} + components := strings.Split(nodeIdentity, ":") + entry.NodeName = components[0] + if len(components) > 1 { + entry.Datacenter = components[1] + } + aclNodeIdentities = append(aclNodeIdentities, entry) + } + + return aclNodeIdentities +} diff --git a/builtin/logical/consul/path_token_test.go b/builtin/logical/consul/path_token_test.go new file mode 100644 index 0000000..77e7f29 --- /dev/null +++ b/builtin/logical/consul/path_token_test.go @@ -0,0 +1,107 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package consul + +import ( + "reflect" + "testing" + + "github.com/hashicorp/consul/api" +) + +func TestToken_parseServiceIdentities(t *testing.T) { + tests := []struct { + name string + args []string + want []*api.ACLServiceIdentity + }{ + { + name: "No datacenters", + args: []string{"myservice-1"}, + want: []*api.ACLServiceIdentity{{ServiceName: "myservice-1", Datacenters: nil}}, + }, + { + name: "One datacenter", + args: []string{"myservice-1:dc1"}, + want: []*api.ACLServiceIdentity{{ServiceName: "myservice-1", Datacenters: []string{"dc1"}}}, + }, + { + name: "Multiple datacenters", + args: []string{"myservice-1:dc1,dc2,dc3"}, + want: []*api.ACLServiceIdentity{{ServiceName: "myservice-1", Datacenters: []string{"dc1", "dc2", "dc3"}}}, + }, + { + name: "Missing service name with datacenter", + args: []string{":dc1"}, + want: []*api.ACLServiceIdentity{{ServiceName: "", Datacenters: []string{"dc1"}}}, + }, + { + name: "Missing service name and missing datacenter", + args: []string{""}, + want: []*api.ACLServiceIdentity{{ServiceName: "", Datacenters: nil}}, + }, + { + name: "Multiple service identities", + args: []string{"myservice-1:dc1", "myservice-2:dc1", "myservice-3:dc1,dc2"}, + want: []*api.ACLServiceIdentity{ + {ServiceName: "myservice-1", Datacenters: []string{"dc1"}}, + {ServiceName: "myservice-2", Datacenters: []string{"dc1"}}, + {ServiceName: "myservice-3", Datacenters: []string{"dc1", "dc2"}}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := parseServiceIdentities(tt.args); !reflect.DeepEqual(got, tt.want) { + t.Errorf("parseServiceIdentities() = {%s:%v}, want {%s:%v}", got[0].ServiceName, got[0].Datacenters, tt.want[0].ServiceName, tt.want[0].Datacenters) + } + }) + } +} + +func TestToken_parseNodeIdentities(t *testing.T) { + tests := []struct { + name string + args []string + want []*api.ACLNodeIdentity + }{ + { + name: "No datacenter", + args: []string{"server-1"}, + want: []*api.ACLNodeIdentity{{NodeName: "server-1", Datacenter: ""}}, + }, + { + name: "One datacenter", + args: []string{"server-1:dc1"}, + want: []*api.ACLNodeIdentity{{NodeName: "server-1", Datacenter: "dc1"}}, + }, + { + name: "Missing node name with datacenter", + args: []string{":dc1"}, + want: []*api.ACLNodeIdentity{{NodeName: "", Datacenter: "dc1"}}, + }, + { + name: "Missing node name and missing datacenter", + args: []string{""}, + want: []*api.ACLNodeIdentity{{NodeName: "", Datacenter: ""}}, + }, + { + name: "Multiple node identities", + args: []string{"server-1:dc1", "server-2:dc1", "server-3:dc1"}, + want: []*api.ACLNodeIdentity{ + {NodeName: "server-1", Datacenter: "dc1"}, + {NodeName: "server-2", Datacenter: "dc1"}, + {NodeName: "server-3", Datacenter: "dc1"}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := parseNodeIdentities(tt.args); !reflect.DeepEqual(got, tt.want) { + t.Errorf("parseNodeIdentities() = {%s:%s}, want {%s:%s}", got[0].NodeName, got[0].Datacenter, tt.want[0].NodeName, tt.want[0].Datacenter) + } + }) + } +} diff --git a/builtin/logical/consul/secret_token.go b/builtin/logical/consul/secret_token.go new file mode 100644 index 0000000..8c56e0a --- /dev/null +++ b/builtin/logical/consul/secret_token.go @@ -0,0 +1,123 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package consul + +import ( + "context" + "fmt" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + SecretTokenType = "token" +) + +func secretToken(b *backend) *framework.Secret { + return &framework.Secret{ + Type: SecretTokenType, + Fields: map[string]*framework.FieldSchema{ + "token": { + Type: framework.TypeString, + Description: "Request token", + }, + }, + + Renew: b.secretTokenRenew, + Revoke: b.secretTokenRevoke, + } +} + +func (b *backend) secretTokenRenew(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + resp := &logical.Response{Secret: req.Secret} + roleRaw, ok := req.Secret.InternalData["role"] + if !ok { + return resp, nil + } + + role, ok := roleRaw.(string) + if !ok { + return resp, nil + } + + entry, err := req.Storage.Get(ctx, "policy/"+role) + if err != nil { + return nil, fmt.Errorf("error retrieving role: %w", err) + } + if entry == nil { + return logical.ErrorResponse(fmt.Sprintf("issuing role %q not found", role)), nil + } + + var result roleConfig + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + resp.Secret.TTL = result.TTL + resp.Secret.MaxTTL = result.MaxTTL + return resp, nil +} + +func (b *backend) secretTokenRevoke(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + c, userErr, intErr := b.client(ctx, req.Storage) + if intErr != nil { + return nil, intErr + } + if userErr != nil { + // Returning logical.ErrorResponse from revocation function is risky + return nil, userErr + } + + tokenRaw, ok := req.Secret.InternalData["token"] + if !ok { + // We return nil here because this is a pre-0.5.3 problem and there is + // nothing we can do about it. We already can't revoke the lease + // properly if it has been renewed and this is documented pre-0.5.3 + // behavior with a security bulletin about it. + return nil, nil + } + + var version string + versionRaw, ok := req.Secret.InternalData["version"] + if ok { + version = versionRaw.(string) + } + + // Extract Consul Namespace and Partition info from secret + var revokeWriteOptions *api.WriteOptions + var namespace, partition string + + namespaceRaw, ok := req.Data["consul_namespace"] + if ok { + namespace = namespaceRaw.(string) + } + partitionRaw, ok := req.Data["partition"] + if ok { + partition = partitionRaw.(string) + } + + revokeWriteOptions = &api.WriteOptions{ + Namespace: namespace, + Partition: partition, + } + + switch version { + case "": + // Pre 1.4 tokens + _, err := c.ACL().Destroy(tokenRaw.(string), nil) + if err != nil { + return nil, err + } + case tokenPolicyType: + _, err := c.ACL().TokenDelete(tokenRaw.(string), revokeWriteOptions) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("Invalid version string in data: %s", version) + } + + return nil, nil +} diff --git a/builtin/logical/database/backend.go b/builtin/logical/database/backend.go new file mode 100644 index 0000000..94091e2 --- /dev/null +++ b/builtin/logical/database/backend.go @@ -0,0 +1,438 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package database + +import ( + "context" + "fmt" + "net/rpc" + "strings" + "sync" + "time" + + "github.com/armon/go-metrics" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/helper/metricsutil" + "github.com/hashicorp/vault/internalshared/configutil" + v4 "github.com/hashicorp/vault/sdk/database/dbplugin" + v5 "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + "github.com/hashicorp/vault/sdk/database/helper/dbutil" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/locksutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/queue" +) + +const ( + operationPrefixDatabase = "database" + databaseConfigPath = "config/" + databaseRolePath = "role/" + databaseStaticRolePath = "static-role/" + minRootCredRollbackAge = 1 * time.Minute +) + +type dbPluginInstance struct { + sync.RWMutex + database databaseVersionWrapper + + id string + name string + closed bool +} + +func (dbi *dbPluginInstance) Close() error { + dbi.Lock() + defer dbi.Unlock() + + if dbi.closed { + return nil + } + dbi.closed = true + + return dbi.database.Close() +} + +func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + b := Backend(conf) + if err := b.Setup(ctx, conf); err != nil { + return nil, err + } + + b.credRotationQueue = queue.New() + // Load queue and kickoff new periodic ticker + go b.initQueue(b.queueCtx, conf, conf.System.ReplicationState()) + + // collect metrics on number of plugin instances + var err error + b.gaugeCollectionProcess, err = metricsutil.NewGaugeCollectionProcess( + []string{"secrets", "database", "backend", "pluginInstances", "count"}, + []metricsutil.Label{}, + b.collectPluginInstanceGaugeValues, + metrics.Default(), + configutil.UsageGaugeDefaultPeriod, // TODO: add config settings for these, or add plumbing to the main config settings + configutil.MaximumGaugeCardinalityDefault, + b.logger) + if err != nil { + return nil, err + } + go b.gaugeCollectionProcess.Run() + return b, nil +} + +func Backend(conf *logical.BackendConfig) *databaseBackend { + var b databaseBackend + b.Backend = &framework.Backend{ + Help: strings.TrimSpace(backendHelp), + + PathsSpecial: &logical.Paths{ + LocalStorage: []string{ + framework.WALPrefix, + }, + SealWrapStorage: []string{ + "config/*", + "static-role/*", + }, + }, + Paths: framework.PathAppend( + []*framework.Path{ + pathListPluginConnection(&b), + pathConfigurePluginConnection(&b), + pathResetConnection(&b), + }, + pathListRoles(&b), + pathRoles(&b), + pathCredsCreate(&b), + pathRotateRootCredentials(&b), + ), + + Secrets: []*framework.Secret{ + secretCreds(&b), + }, + Clean: b.clean, + Invalidate: b.invalidate, + WALRollback: b.walRollback, + WALRollbackMinAge: minRootCredRollbackAge, + BackendType: logical.TypeLogical, + } + + b.logger = conf.Logger + b.connections = make(map[string]*dbPluginInstance) + b.queueCtx, b.cancelQueueCtx = context.WithCancel(context.Background()) + b.roleLocks = locksutil.CreateLocks() + return &b +} + +func (b *databaseBackend) collectPluginInstanceGaugeValues(context.Context) ([]metricsutil.GaugeLabelValues, error) { + // copy the map so we can release the lock + connMapCopy := func() map[string]*dbPluginInstance { + b.connLock.RLock() + defer b.connLock.RUnlock() + mapCopy := map[string]*dbPluginInstance{} + for k, v := range b.connections { + mapCopy[k] = v + } + return mapCopy + }() + counts := map[string]int{} + for _, v := range connMapCopy { + dbType, err := v.database.Type() + if err != nil { + // there's a chance this will already be closed since we don't hold the lock + continue + } + if _, ok := counts[dbType]; !ok { + counts[dbType] = 0 + } + counts[dbType] += 1 + } + var gauges []metricsutil.GaugeLabelValues + for k, v := range counts { + gauges = append(gauges, metricsutil.GaugeLabelValues{Labels: []metricsutil.Label{{Name: "dbType", Value: k}}, Value: float32(v)}) + } + return gauges, nil +} + +type databaseBackend struct { + // connLock is used to synchronize access to the connections map + connLock sync.RWMutex + // connections holds configured database connections by config name + connections map[string]*dbPluginInstance + logger log.Logger + + *framework.Backend + // credRotationQueue is an in-memory priority queue used to track Static Roles + // that require periodic rotation. Backends will have a PriorityQueue + // initialized on setup, but only backends that are mounted by a primary + // server or mounted as a local mount will perform the rotations. + credRotationQueue *queue.PriorityQueue + // queueCtx is the context for the priority queue + queueCtx context.Context + // cancelQueueCtx is used to terminate the background ticker + cancelQueueCtx context.CancelFunc + + // roleLocks is used to lock modifications to roles in the queue, to ensure + // concurrent requests are not modifying the same role and possibly causing + // issues with the priority queue. + roleLocks []*locksutil.LockEntry + + // the running gauge collection process + gaugeCollectionProcess *metricsutil.GaugeCollectionProcess + gaugeCollectionProcessStop sync.Once +} + +func (b *databaseBackend) connGet(name string) *dbPluginInstance { + b.connLock.RLock() + defer b.connLock.RUnlock() + return b.connections[name] +} + +func (b *databaseBackend) connPop(name string) *dbPluginInstance { + b.connLock.Lock() + defer b.connLock.Unlock() + dbi, ok := b.connections[name] + if ok { + delete(b.connections, name) + } + return dbi +} + +func (b *databaseBackend) connPopIfEqual(name, id string) *dbPluginInstance { + b.connLock.Lock() + defer b.connLock.Unlock() + dbi, ok := b.connections[name] + if ok && dbi.id == id { + delete(b.connections, name) + return dbi + } + return nil +} + +func (b *databaseBackend) connPut(name string, newDbi *dbPluginInstance) *dbPluginInstance { + b.connLock.Lock() + defer b.connLock.Unlock() + dbi := b.connections[name] + b.connections[name] = newDbi + return dbi +} + +func (b *databaseBackend) connClear() map[string]*dbPluginInstance { + b.connLock.Lock() + defer b.connLock.Unlock() + old := b.connections + b.connections = make(map[string]*dbPluginInstance) + return old +} + +func (b *databaseBackend) DatabaseConfig(ctx context.Context, s logical.Storage, name string) (*DatabaseConfig, error) { + entry, err := s.Get(ctx, fmt.Sprintf("config/%s", name)) + if err != nil { + return nil, fmt.Errorf("failed to read connection configuration: %w", err) + } + if entry == nil { + return nil, fmt.Errorf("failed to find entry for connection with name: %q", name) + } + + var config DatabaseConfig + if err := entry.DecodeJSON(&config); err != nil { + return nil, err + } + + return &config, nil +} + +type upgradeStatements struct { + // This json tag has a typo in it, the new version does not. This + // necessitates this upgrade logic. + CreationStatements string `json:"creation_statments"` + RevocationStatements string `json:"revocation_statements"` + RollbackStatements string `json:"rollback_statements"` + RenewStatements string `json:"renew_statements"` +} + +type upgradeCheck struct { + // This json tag has a typo in it, the new version does not. This + // necessitates this upgrade logic. + Statements *upgradeStatements `json:"statments,omitempty"` +} + +func (b *databaseBackend) Role(ctx context.Context, s logical.Storage, roleName string) (*roleEntry, error) { + return b.roleAtPath(ctx, s, roleName, databaseRolePath) +} + +func (b *databaseBackend) StaticRole(ctx context.Context, s logical.Storage, roleName string) (*roleEntry, error) { + return b.roleAtPath(ctx, s, roleName, databaseStaticRolePath) +} + +func (b *databaseBackend) roleAtPath(ctx context.Context, s logical.Storage, roleName string, pathPrefix string) (*roleEntry, error) { + entry, err := s.Get(ctx, pathPrefix+roleName) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var upgradeCh upgradeCheck + if err := entry.DecodeJSON(&upgradeCh); err != nil { + return nil, err + } + + var result roleEntry + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + + switch { + case upgradeCh.Statements != nil: + var stmts v4.Statements + if upgradeCh.Statements.CreationStatements != "" { + stmts.Creation = []string{upgradeCh.Statements.CreationStatements} + } + if upgradeCh.Statements.RevocationStatements != "" { + stmts.Revocation = []string{upgradeCh.Statements.RevocationStatements} + } + if upgradeCh.Statements.RollbackStatements != "" { + stmts.Rollback = []string{upgradeCh.Statements.RollbackStatements} + } + if upgradeCh.Statements.RenewStatements != "" { + stmts.Renewal = []string{upgradeCh.Statements.RenewStatements} + } + result.Statements = stmts + } + + result.Statements.Revocation = strutil.RemoveEmpty(result.Statements.Revocation) + + // For backwards compatibility, copy the values back into the string form + // of the fields + result.Statements = dbutil.StatementCompatibilityHelper(result.Statements) + + return &result, nil +} + +func (b *databaseBackend) invalidate(ctx context.Context, key string) { + switch { + case strings.HasPrefix(key, databaseConfigPath): + name := strings.TrimPrefix(key, databaseConfigPath) + b.ClearConnection(name) + } +} + +func (b *databaseBackend) GetConnection(ctx context.Context, s logical.Storage, name string) (*dbPluginInstance, error) { + config, err := b.DatabaseConfig(ctx, s, name) + if err != nil { + return nil, err + } + + return b.GetConnectionWithConfig(ctx, name, config) +} + +func (b *databaseBackend) GetConnectionWithConfig(ctx context.Context, name string, config *DatabaseConfig) (*dbPluginInstance, error) { + dbi := b.connGet(name) + if dbi != nil { + return dbi, nil + } + + id, err := uuid.GenerateUUID() + if err != nil { + return nil, err + } + + dbw, err := newDatabaseWrapper(ctx, config.PluginName, config.PluginVersion, b.System(), b.logger) + if err != nil { + return nil, fmt.Errorf("unable to create database instance: %w", err) + } + + initReq := v5.InitializeRequest{ + Config: config.ConnectionDetails, + VerifyConnection: true, + } + _, err = dbw.Initialize(ctx, initReq) + if err != nil { + dbw.Close() + return nil, err + } + + dbi = &dbPluginInstance{ + database: dbw, + id: id, + name: name, + } + oldConn := b.connPut(name, dbi) + if oldConn != nil { + err := oldConn.Close() + if err != nil { + b.Logger().Warn("Error closing database connection", "error", err) + } + } + return dbi, nil +} + +// ClearConnection closes the database connection and +// removes it from the b.connections map. +func (b *databaseBackend) ClearConnection(name string) error { + db := b.connPop(name) + if db != nil { + // Ignore error here since the database client is always killed + db.Close() + } + return nil +} + +// ClearConnectionId closes the database connection with a specific id and +// removes it from the b.connections map. +func (b *databaseBackend) ClearConnectionId(name, id string) error { + db := b.connPopIfEqual(name, id) + if db != nil { + // Ignore error here since the database client is always killed + db.Close() + } + return nil +} + +func (b *databaseBackend) CloseIfShutdown(db *dbPluginInstance, err error) { + // Plugin has shutdown, close it so next call can reconnect. + switch err { + case rpc.ErrShutdown, v4.ErrPluginShutdown, v5.ErrPluginShutdown: + // Put this in a goroutine so that requests can run with the read or write lock + // and simply defer the unlock. Since we are attaching the instance and matching + // the id in the connection map, we can safely do this. + go func() { + db.Close() + + // Delete the connection if it is still active. + b.connPopIfEqual(db.name, db.id) + }() + } +} + +// clean closes all connections from all database types +// and cancels any rotation queue loading operation. +func (b *databaseBackend) clean(_ context.Context) { + // kill the queue and terminate the background ticker + if b.cancelQueueCtx != nil { + b.cancelQueueCtx() + } + + connections := b.connClear() + for _, db := range connections { + go db.Close() + } + b.gaugeCollectionProcessStop.Do(func() { + if b.gaugeCollectionProcess != nil { + b.gaugeCollectionProcess.Stop() + } + b.gaugeCollectionProcess = nil + }) +} + +const backendHelp = ` +The database backend supports using many different databases +as secret backends, including but not limited to: +cassandra, mssql, mysql, postgres + +After mounting this backend, configure it using the endpoints within +the "database/config/" path. +` diff --git a/builtin/logical/database/backend_test.go b/builtin/logical/database/backend_test.go new file mode 100644 index 0000000..574bcd0 --- /dev/null +++ b/builtin/logical/database/backend_test.go @@ -0,0 +1,1621 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package database + +import ( + "context" + "database/sql" + "fmt" + "log" + "net/url" + "os" + "reflect" + "strings" + "testing" + "time" + + "github.com/go-test/deep" + "github.com/hashicorp/go-hclog" + mongodbatlas "github.com/hashicorp/vault-plugin-database-mongodbatlas" + "github.com/hashicorp/vault/helper/builtinplugins" + "github.com/hashicorp/vault/helper/namespace" + postgreshelper "github.com/hashicorp/vault/helper/testhelpers/postgresql" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/plugins/database/mongodb" + "github.com/hashicorp/vault/plugins/database/postgresql" + v4 "github.com/hashicorp/vault/sdk/database/dbplugin" + v5 "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + "github.com/hashicorp/vault/sdk/database/helper/dbutil" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/pluginutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" + _ "github.com/jackc/pgx/v4" + "github.com/mitchellh/mapstructure" +) + +func getCluster(t *testing.T) (*vault.TestCluster, logical.SystemView) { + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "database": Factory, + }, + BuiltinRegistry: builtinplugins.Registry, + } + + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + cores := cluster.Cores + vault.TestWaitActive(t, cores[0].Core) + + os.Setenv(pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile) + + sys := vault.TestDynamicSystemView(cores[0].Core, nil) + vault.TestAddTestPlugin(t, cores[0].Core, "postgresql-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_Postgres", []string{}, "") + vault.TestAddTestPlugin(t, cores[0].Core, "postgresql-database-plugin-muxed", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_PostgresMultiplexed", []string{}, "") + vault.TestAddTestPlugin(t, cores[0].Core, "mongodb-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_Mongo", []string{}, "") + vault.TestAddTestPlugin(t, cores[0].Core, "mongodb-database-plugin-muxed", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MongoMultiplexed", []string{}, "") + vault.TestAddTestPlugin(t, cores[0].Core, "mongodbatlas-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MongoAtlas", []string{}, "") + vault.TestAddTestPlugin(t, cores[0].Core, "mongodbatlas-database-plugin-muxed", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MongoAtlasMultiplexed", []string{}, "") + + return cluster, sys +} + +func TestBackend_PluginMain_Postgres(t *testing.T) { + if os.Getenv(pluginutil.PluginVaultVersionEnv) == "" { + return + } + + dbType, err := postgresql.New() + if err != nil { + t.Fatalf("Failed to initialize postgres: %s", err) + } + + v5.Serve(dbType.(v5.Database)) +} + +func TestBackend_PluginMain_PostgresMultiplexed(t *testing.T) { + if os.Getenv(pluginutil.PluginVaultVersionEnv) == "" { + return + } + + v5.ServeMultiplex(postgresql.New) +} + +func TestBackend_PluginMain_Mongo(t *testing.T) { + if os.Getenv(pluginutil.PluginVaultVersionEnv) == "" { + return + } + + dbType, err := mongodb.New() + if err != nil { + t.Fatalf("Failed to initialize mongodb: %s", err) + } + + v5.Serve(dbType.(v5.Database)) +} + +func TestBackend_PluginMain_MongoMultiplexed(t *testing.T) { + if os.Getenv(pluginutil.PluginVaultVersionEnv) == "" { + return + } + + v5.ServeMultiplex(mongodb.New) +} + +func TestBackend_PluginMain_MongoAtlas(t *testing.T) { + if os.Getenv(pluginutil.PluginUnwrapTokenEnv) == "" { + return + } + + dbType, err := mongodbatlas.New() + if err != nil { + t.Fatalf("Failed to initialize mongodbatlas: %s", err) + } + + v5.Serve(dbType.(v5.Database)) +} + +func TestBackend_PluginMain_MongoAtlasMultiplexed(t *testing.T) { + if os.Getenv(pluginutil.PluginUnwrapTokenEnv) == "" { + return + } + + v5.ServeMultiplex(mongodbatlas.New) +} + +func TestBackend_RoleUpgrade(t *testing.T) { + storage := &logical.InmemStorage{} + backend := &databaseBackend{} + + roleExpected := &roleEntry{ + Statements: v4.Statements{ + CreationStatements: "test", + Creation: []string{"test"}, + }, + } + + entry, err := logical.StorageEntryJSON("role/test", &roleEntry{ + Statements: v4.Statements{ + CreationStatements: "test", + }, + }) + if err != nil { + t.Fatal(err) + } + if err := storage.Put(context.Background(), entry); err != nil { + t.Fatal(err) + } + + role, err := backend.Role(context.Background(), storage, "test") + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(role, roleExpected) { + t.Fatalf("bad role %#v, %#v", role, roleExpected) + } + + // Upgrade case + badJSON := `{"statments":{"creation_statments":"test","revocation_statements":"","rollback_statements":"","renew_statements":""}}` + entry = &logical.StorageEntry{ + Key: "role/test", + Value: []byte(badJSON), + } + if err := storage.Put(context.Background(), entry); err != nil { + t.Fatal(err) + } + + role, err = backend.Role(context.Background(), storage, "test") + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(role, roleExpected) { + t.Fatalf("bad role %#v, %#v", role, roleExpected) + } +} + +func TestBackend_config_connection(t *testing.T) { + var resp *logical.Response + var err error + + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + lb, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + b, ok := lb.(*databaseBackend) + if !ok { + t.Fatal("could not convert to database backend") + } + defer b.Cleanup(context.Background()) + + // Test creation + { + configData := map[string]interface{}{ + "connection_url": "sample_connection_url", + "someotherdata": "testing", + "plugin_name": "postgresql-database-plugin", + "verify_connection": false, + "allowed_roles": []string{"*"}, + "name": "plugin-test", + } + + configReq := &logical.Request{ + Operation: logical.CreateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: configData, + } + + exists, err := b.connectionExistenceCheck()(context.Background(), configReq, &framework.FieldData{ + Raw: configData, + Schema: pathConfigurePluginConnection(b).Fields, + }) + if err != nil { + t.Fatal(err) + } + if exists { + t.Fatal("expected not exists") + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), configReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v\n", err, resp) + } + + expected := map[string]interface{}{ + "plugin_name": "postgresql-database-plugin", + "connection_details": map[string]interface{}{ + "connection_url": "sample_connection_url", + "someotherdata": "testing", + }, + "allowed_roles": []string{"*"}, + "root_credentials_rotate_statements": []string{}, + "password_policy": "", + "plugin_version": "", + } + configReq.Operation = logical.ReadOperation + resp, err = b.HandleRequest(namespace.RootContext(nil), configReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + delete(resp.Data["connection_details"].(map[string]interface{}), "name") + if !reflect.DeepEqual(expected, resp.Data) { + t.Fatalf("bad: expected:%#v\nactual:%#v\n", expected, resp.Data) + } + } + + // Test existence check and an update to a single connection detail parameter + { + configData := map[string]interface{}{ + "connection_url": "sample_convection_url", + "verify_connection": false, + "name": "plugin-test", + } + + configReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: configData, + } + + exists, err := b.connectionExistenceCheck()(context.Background(), configReq, &framework.FieldData{ + Raw: configData, + Schema: pathConfigurePluginConnection(b).Fields, + }) + if err != nil { + t.Fatal(err) + } + if !exists { + t.Fatal("expected exists") + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), configReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v\n", err, resp) + } + + expected := map[string]interface{}{ + "plugin_name": "postgresql-database-plugin", + "connection_details": map[string]interface{}{ + "connection_url": "sample_convection_url", + "someotherdata": "testing", + }, + "allowed_roles": []string{"*"}, + "root_credentials_rotate_statements": []string{}, + "password_policy": "", + "plugin_version": "", + } + configReq.Operation = logical.ReadOperation + resp, err = b.HandleRequest(namespace.RootContext(nil), configReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + delete(resp.Data["connection_details"].(map[string]interface{}), "name") + if !reflect.DeepEqual(expected, resp.Data) { + t.Fatalf("bad: expected:%#v\nactual:%#v\n", expected, resp.Data) + } + } + + // Test an update to a non-details value + { + configData := map[string]interface{}{ + "verify_connection": false, + "allowed_roles": []string{"flu", "barre"}, + "name": "plugin-test", + } + + configReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: configData, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), configReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v\n", err, resp) + } + + expected := map[string]interface{}{ + "plugin_name": "postgresql-database-plugin", + "connection_details": map[string]interface{}{ + "connection_url": "sample_convection_url", + "someotherdata": "testing", + }, + "allowed_roles": []string{"flu", "barre"}, + "root_credentials_rotate_statements": []string{}, + "password_policy": "", + "plugin_version": "", + } + configReq.Operation = logical.ReadOperation + resp, err = b.HandleRequest(namespace.RootContext(nil), configReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + delete(resp.Data["connection_details"].(map[string]interface{}), "name") + if !reflect.DeepEqual(expected, resp.Data) { + t.Fatalf("bad: expected:%#v\nactual:%#v\n", expected, resp.Data) + } + } + + req := &logical.Request{ + Operation: logical.ListOperation, + Storage: config.StorageView, + Path: "config/", + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil { + t.Fatal(err) + } + keys := resp.Data["keys"].([]string) + key := keys[0] + if key != "plugin-test" { + t.Fatalf("bad key: %q", key) + } +} + +func TestBackend_BadConnectionString(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + defer b.Cleanup(context.Background()) + + cleanup, _ := postgreshelper.PrepareTestContainer(t, "13.4-buster") + defer cleanup() + + respCheck := func(req *logical.Request) { + t.Helper() + resp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err != nil { + t.Fatalf("err: %v", err) + } + if resp == nil || !resp.IsError() { + t.Fatalf("expected error, resp:%#v", resp) + } + err = resp.Error() + if strings.Contains(err.Error(), "localhost") { + t.Fatalf("error should not contain connection info") + } + } + + // Configure a connection + data := map[string]interface{}{ + "connection_url": "postgresql://:pw@[localhost", + "plugin_name": "postgresql-database-plugin", + "allowed_roles": []string{"plugin-role-test"}, + } + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + respCheck(req) + + time.Sleep(1 * time.Second) +} + +func TestBackend_basic(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + defer b.Cleanup(context.Background()) + + cleanup, connURL := postgreshelper.PrepareTestContainer(t, "13.4-buster") + defer cleanup() + + // Configure a connection + data := map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "allowed_roles": []string{"plugin-role-test"}, + } + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Create a role + data = map[string]interface{}{ + "db_name": "plugin-test", + "creation_statements": testRole, + "max_ttl": "10m", + } + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + // Get creds + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "creds/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + credsResp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (credsResp != nil && credsResp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, credsResp) + } + // Update the role with no max ttl + data = map[string]interface{}{ + "db_name": "plugin-test", + "creation_statements": testRole, + "default_ttl": "5m", + "max_ttl": 0, + } + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + // Get creds + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "creds/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + credsResp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (credsResp != nil && credsResp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, credsResp) + } + // Test for #3812 + if credsResp.Secret.TTL != 5*time.Minute { + t.Fatalf("unexpected TTL of %d", credsResp.Secret.TTL) + } + // Update the role with a max ttl + data = map[string]interface{}{ + "db_name": "plugin-test", + "creation_statements": testRole, + "default_ttl": "5m", + "max_ttl": "10m", + } + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Get creds and revoke when the role stays in existence + { + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "creds/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + credsResp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (credsResp != nil && credsResp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, credsResp) + } + // Test for #3812 + if credsResp.Secret.TTL != 5*time.Minute { + t.Fatalf("unexpected TTL of %d", credsResp.Secret.TTL) + } + if !testCredsExist(t, credsResp, connURL) { + t.Fatalf("Creds should exist") + } + + // Revoke creds + resp, err = b.HandleRequest(namespace.RootContext(nil), &logical.Request{ + Operation: logical.RevokeOperation, + Storage: config.StorageView, + Secret: &logical.Secret{ + InternalData: map[string]interface{}{ + "secret_type": "creds", + "username": credsResp.Data["username"], + "role": "plugin-role-test", + }, + }, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + if testCredsExist(t, credsResp, connURL) { + t.Fatalf("Creds should not exist") + } + } + + // Get creds and revoke using embedded revocation data + { + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "creds/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + credsResp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (credsResp != nil && credsResp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, credsResp) + } + if !testCredsExist(t, credsResp, connURL) { + t.Fatalf("Creds should exist") + } + + // Delete role, forcing us to rely on embedded data + req = &logical.Request{ + Operation: logical.DeleteOperation, + Path: "roles/plugin-role-test", + Storage: config.StorageView, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Revoke creds + resp, err = b.HandleRequest(namespace.RootContext(nil), &logical.Request{ + Operation: logical.RevokeOperation, + Storage: config.StorageView, + Secret: &logical.Secret{ + InternalData: map[string]interface{}{ + "secret_type": "creds", + "username": credsResp.Data["username"], + "role": "plugin-role-test", + "db_name": "plugin-test", + "revocation_statements": nil, + }, + }, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + if testCredsExist(t, credsResp, connURL) { + t.Fatalf("Creds should not exist") + } + } +} + +func TestBackend_connectionCrud(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + defer b.Cleanup(context.Background()) + + cleanup, connURL := postgreshelper.PrepareTestContainer(t, "13.4-buster") + defer cleanup() + + // Configure a connection + data := map[string]interface{}{ + "connection_url": "test", + "plugin_name": "postgresql-database-plugin", + "verify_connection": false, + } + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Create a role + data = map[string]interface{}{ + "db_name": "plugin-test", + "creation_statements": testRole, + "revocation_statements": defaultRevocationSQL, + "default_ttl": "5m", + "max_ttl": "10m", + } + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Update the connection + data = map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "allowed_roles": []string{"plugin-role-test"}, + "username": "postgres", + "password": "secret", + "private_key": "PRIVATE_KEY", + } + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + if len(resp.Warnings) == 0 { + t.Fatalf("expected warning about password in url %s, resp:%#v\n", connURL, resp) + } + + req.Operation = logical.ReadOperation + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + returnedConnectionDetails := resp.Data["connection_details"].(map[string]interface{}) + if strings.Contains(returnedConnectionDetails["connection_url"].(string), "secret") { + t.Fatal("password should not be found in the connection url") + } + // Covered by the filled out `expected` value below, but be explicit about this requirement. + if _, exists := returnedConnectionDetails["password"]; exists { + t.Fatal("password should NOT be found in the returned config") + } + if _, exists := returnedConnectionDetails["private_key"]; exists { + t.Fatal("private_key should NOT be found in the returned config") + } + + // Replace connection url with templated version + req.Operation = logical.UpdateOperation + connURL = strings.ReplaceAll(connURL, "postgres:secret", "{{username}}:{{password}}") + data["connection_url"] = connURL + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Read connection + expected := map[string]interface{}{ + "plugin_name": "postgresql-database-plugin", + "connection_details": map[string]interface{}{ + "username": "postgres", + "connection_url": connURL, + }, + "allowed_roles": []string{"plugin-role-test"}, + "root_credentials_rotate_statements": []string(nil), + "password_policy": "", + "plugin_version": "", + } + req.Operation = logical.ReadOperation + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + delete(resp.Data["connection_details"].(map[string]interface{}), "name") + if diff := deep.Equal(resp.Data, expected); diff != nil { + t.Fatal(diff) + } + + // Reset Connection + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "reset/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Get creds + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "creds/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + credsResp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (credsResp != nil && credsResp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, credsResp) + } + + credCheckURL := dbutil.QueryHelper(connURL, map[string]string{ + "username": "postgres", + "password": "secret", + }) + if !testCredsExist(t, credsResp, credCheckURL) { + t.Fatalf("Creds should exist") + } + + // Delete Connection + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.DeleteOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Read connection + req.Operation = logical.ReadOperation + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Should be empty + if resp != nil { + t.Fatal("Expected response to be nil") + } +} + +func TestBackend_roleCrud(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + lb, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + b, ok := lb.(*databaseBackend) + if !ok { + t.Fatal("could not convert to db backend") + } + defer b.Cleanup(context.Background()) + + cleanup, connURL := postgreshelper.PrepareTestContainer(t, "13.4-buster") + defer cleanup() + + // Configure a connection + data := map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + } + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Test role creation + { + data = map[string]interface{}{ + "db_name": "plugin-test", + "creation_statements": testRole, + "revocation_statements": defaultRevocationSQL, + "default_ttl": "5m", + "max_ttl": "10m", + } + req = &logical.Request{ + Operation: logical.CreateOperation, + Path: "roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Read the role + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + expected := v4.Statements{ + Creation: []string{strings.TrimSpace(testRole)}, + Revocation: []string{strings.TrimSpace(defaultRevocationSQL)}, + Rollback: []string{}, + Renewal: []string{}, + } + + actual := v4.Statements{ + Creation: resp.Data["creation_statements"].([]string), + Revocation: resp.Data["revocation_statements"].([]string), + Rollback: resp.Data["rollback_statements"].([]string), + Renewal: resp.Data["renew_statements"].([]string), + } + + if diff := deep.Equal(expected, actual); diff != nil { + t.Fatal(diff) + } + + if diff := deep.Equal(resp.Data["db_name"], "plugin-test"); diff != nil { + t.Fatal(diff) + } + if diff := deep.Equal(resp.Data["default_ttl"], float64(300)); diff != nil { + t.Fatal(diff) + } + if diff := deep.Equal(resp.Data["max_ttl"], float64(600)); diff != nil { + t.Fatal(diff) + } + } + + // Test role modification of TTL + { + data = map[string]interface{}{ + "name": "plugin-role-test", + "max_ttl": "7m", + } + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v\n", err, resp) + } + + // Read the role + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + expected := v4.Statements{ + Creation: []string{strings.TrimSpace(testRole)}, + Revocation: []string{strings.TrimSpace(defaultRevocationSQL)}, + Rollback: []string{}, + Renewal: []string{}, + } + + actual := v4.Statements{ + Creation: resp.Data["creation_statements"].([]string), + Revocation: resp.Data["revocation_statements"].([]string), + Rollback: resp.Data["rollback_statements"].([]string), + Renewal: resp.Data["renew_statements"].([]string), + } + + if !reflect.DeepEqual(expected, actual) { + t.Fatalf("Statements did not match, expected %#v, got %#v", expected, actual) + } + + if diff := deep.Equal(resp.Data["db_name"], "plugin-test"); diff != nil { + t.Fatal(diff) + } + if diff := deep.Equal(resp.Data["default_ttl"], float64(300)); diff != nil { + t.Fatal(diff) + } + if diff := deep.Equal(resp.Data["max_ttl"], float64(420)); diff != nil { + t.Fatal(diff) + } + + } + + // Test role modification of statements + { + data = map[string]interface{}{ + "name": "plugin-role-test", + "creation_statements": []string{testRole, testRole}, + "revocation_statements": []string{defaultRevocationSQL, defaultRevocationSQL}, + "rollback_statements": testRole, + "renew_statements": defaultRevocationSQL, + } + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(context.Background(), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v\n", err, resp) + } + + // Read the role + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(context.Background(), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + expected := v4.Statements{ + Creation: []string{strings.TrimSpace(testRole), strings.TrimSpace(testRole)}, + Rollback: []string{strings.TrimSpace(testRole)}, + Revocation: []string{strings.TrimSpace(defaultRevocationSQL), strings.TrimSpace(defaultRevocationSQL)}, + Renewal: []string{strings.TrimSpace(defaultRevocationSQL)}, + } + + actual := v4.Statements{ + Creation: resp.Data["creation_statements"].([]string), + Revocation: resp.Data["revocation_statements"].([]string), + Rollback: resp.Data["rollback_statements"].([]string), + Renewal: resp.Data["renew_statements"].([]string), + } + + if diff := deep.Equal(expected, actual); diff != nil { + t.Fatal(diff) + } + + if diff := deep.Equal(resp.Data["db_name"], "plugin-test"); diff != nil { + t.Fatal(diff) + } + if diff := deep.Equal(resp.Data["default_ttl"], float64(300)); diff != nil { + t.Fatal(diff) + } + if diff := deep.Equal(resp.Data["max_ttl"], float64(420)); diff != nil { + t.Fatal(diff) + } + } + + // Delete the role + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.DeleteOperation, + Path: "roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Read the role + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Should be empty + if resp != nil { + t.Fatal("Expected response to be nil") + } +} + +func TestBackend_allowedRoles(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + defer b.Cleanup(context.Background()) + + cleanup, connURL := postgreshelper.PrepareTestContainer(t, "13.4-buster") + defer cleanup() + + // Configure a connection + data := map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + } + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Create a denied and an allowed role + data = map[string]interface{}{ + "db_name": "plugin-test", + "creation_statements": testRole, + "default_ttl": "5m", + "max_ttl": "10m", + } + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/denied", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + data = map[string]interface{}{ + "db_name": "plugin-test", + "creation_statements": testRole, + "default_ttl": "5m", + "max_ttl": "10m", + } + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/allowed", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Get creds from denied role, should fail + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "creds/denied", + Storage: config.StorageView, + Data: data, + } + credsResp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err == nil { + t.Fatal("expected error because role is denied") + } + + // update connection with glob allowed roles connection + data = map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "allowed_roles": "allow*", + } + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Get creds, should work. + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "creds/allowed", + Storage: config.StorageView, + Data: data, + } + credsResp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (credsResp != nil && credsResp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, credsResp) + } + + if !testCredsExist(t, credsResp, connURL) { + t.Fatalf("Creds should exist") + } + + // update connection with * allowed roles connection + data = map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "allowed_roles": "*", + } + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Get creds, should work. + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "creds/allowed", + Storage: config.StorageView, + Data: data, + } + credsResp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (credsResp != nil && credsResp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, credsResp) + } + + if !testCredsExist(t, credsResp, connURL) { + t.Fatalf("Creds should exist") + } + + // update connection with allowed roles + data = map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "allowed_roles": "allow, allowed", + } + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Get creds from denied role, should fail + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "creds/denied", + Storage: config.StorageView, + Data: data, + } + credsResp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err == nil { + t.Fatal("expected error because role is denied") + } + + // Get creds from allowed role, should work. + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "creds/allowed", + Storage: config.StorageView, + Data: data, + } + credsResp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (credsResp != nil && credsResp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, credsResp) + } + + if !testCredsExist(t, credsResp, connURL) { + t.Fatalf("Creds should exist") + } +} + +func TestBackend_RotateRootCredentials(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + defer b.Cleanup(context.Background()) + + cleanup, connURL := postgreshelper.PrepareTestContainer(t, "13.4-buster") + defer cleanup() + + connURL = strings.ReplaceAll(connURL, "postgres:secret", "{{username}}:{{password}}") + + // Configure a connection + data := map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "allowed_roles": []string{"plugin-role-test"}, + "username": "postgres", + "password": "secret", + } + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Create a role + data = map[string]interface{}{ + "db_name": "plugin-test", + "creation_statements": testRole, + "max_ttl": "10m", + } + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + // Get creds + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "creds/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + credsResp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (credsResp != nil && credsResp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, credsResp) + } + + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "rotate-root/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (credsResp != nil && credsResp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, credsResp) + } + + dbConfig, err := b.(*databaseBackend).DatabaseConfig(context.Background(), config.StorageView, "plugin-test") + if err != nil { + t.Fatalf("err: %#v", err) + } + if dbConfig.ConnectionDetails["password"].(string) == "secret" { + t.Fatal("root credentials not rotated") + } + + // Get creds to make sure it still works + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "creds/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + credsResp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (credsResp != nil && credsResp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, credsResp) + } +} + +func TestBackend_ConnectionURL_redacted(t *testing.T) { + cluster, sys := getCluster(t) + t.Cleanup(cluster.Cleanup) + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + defer b.Cleanup(context.Background()) + + tests := []struct { + name string + password string + }{ + { + name: "basic", + password: "secret", + }, + { + name: "encoded", + password: "yourStrong(!)Password", + }, + } + + respCheck := func(req *logical.Request) *logical.Response { + t.Helper() + resp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err != nil { + t.Fatalf("err: %v", err) + } + if resp == nil { + t.Fatalf("expected a response, resp: %#v", resp) + } + + if resp.Error() != nil { + t.Fatalf("unexpected error in response, err: %#v", resp.Error()) + } + + return resp + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cleanup, u := postgreshelper.PrepareTestContainerWithPassword(t, "13.4-buster", tt.password) + t.Cleanup(cleanup) + + p, err := url.Parse(u) + if err != nil { + t.Fatal(err) + } + + actualPassword, _ := p.User.Password() + if tt.password != actualPassword { + t.Fatalf("expected computed URL password %#v, actual %#v", tt.password, actualPassword) + } + + // Configure a connection + data := map[string]interface{}{ + "connection_url": u, + "plugin_name": "postgresql-database-plugin", + "allowed_roles": []string{"plugin-role-test"}, + } + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("config/%s", tt.name), + Storage: config.StorageView, + Data: data, + } + respCheck(req) + + // read config + readReq := &logical.Request{ + Operation: logical.ReadOperation, + Path: req.Path, + Storage: config.StorageView, + } + resp := respCheck(readReq) + + var connDetails map[string]interface{} + if v, ok := resp.Data["connection_details"]; ok { + connDetails = v.(map[string]interface{}) + } + + if connDetails == nil { + t.Fatalf("response data missing connection_details, resp: %#v", resp) + } + + actual := connDetails["connection_url"].(string) + expected := p.Redacted() + if expected != actual { + t.Fatalf("expected redacted URL %q, actual %q", expected, actual) + } + + if tt.password != "" { + // extra test to ensure that URL.Redacted() is working as expected. + p, err = url.Parse(actual) + if err != nil { + t.Fatal(err) + } + if pp, _ := p.User.Password(); pp == tt.password { + t.Fatalf("password was not redacted by URL.Redacted()") + } + } + }) + } +} + +type hangingPlugin struct{} + +func (h hangingPlugin) Initialize(_ context.Context, req v5.InitializeRequest) (v5.InitializeResponse, error) { + return v5.InitializeResponse{ + Config: req.Config, + }, nil +} + +func (h hangingPlugin) NewUser(_ context.Context, _ v5.NewUserRequest) (v5.NewUserResponse, error) { + return v5.NewUserResponse{}, nil +} + +func (h hangingPlugin) UpdateUser(_ context.Context, _ v5.UpdateUserRequest) (v5.UpdateUserResponse, error) { + return v5.UpdateUserResponse{}, nil +} + +func (h hangingPlugin) DeleteUser(_ context.Context, _ v5.DeleteUserRequest) (v5.DeleteUserResponse, error) { + return v5.DeleteUserResponse{}, nil +} + +func (h hangingPlugin) Type() (string, error) { + return "hanging", nil +} + +func (h hangingPlugin) Close() error { + time.Sleep(1000 * time.Second) + return nil +} + +var _ v5.Database = (*hangingPlugin)(nil) + +func TestBackend_PluginMain_Hanging(t *testing.T) { + if os.Getenv(pluginutil.PluginVaultVersionEnv) == "" { + return + } + v5.Serve(&hangingPlugin{}) +} + +func TestBackend_AsyncClose(t *testing.T) { + // Test that having a plugin that takes a LONG time to close will not cause the cleanup function to take + // longer than 750ms. + cluster, sys := getCluster(t) + vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "hanging-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_Hanging", []string{}, "") + t.Cleanup(cluster.Cleanup) + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + // Configure a connection + data := map[string]interface{}{ + "connection_url": "doesn't matter", + "plugin_name": "hanging-plugin", + "allowed_roles": []string{"plugin-role-test"}, + } + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/hang", + Storage: config.StorageView, + Data: data, + } + _, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil { + t.Fatalf("err: %v", err) + } + timeout := time.NewTimer(750 * time.Millisecond) + done := make(chan bool) + go func() { + b.Cleanup(context.Background()) + // check that clean can be called twice safely + b.Cleanup(context.Background()) + done <- true + }() + select { + case <-timeout.C: + t.Error("Hanging plugin caused Close() to take longer than 750ms") + case <-done: + } +} + +func TestNewDatabaseWrapper_IgnoresBuiltinVersion(t *testing.T) { + cluster, sys := getCluster(t) + t.Cleanup(cluster.Cleanup) + _, err := newDatabaseWrapper(context.Background(), "hana-database-plugin", "v1.0.0+builtin", sys, hclog.Default()) + if err != nil { + t.Fatal(err) + } +} + +func testCredsExist(t *testing.T, resp *logical.Response, connURL string) bool { + t.Helper() + var d struct { + Username string `mapstructure:"username"` + Password string `mapstructure:"password"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + t.Fatal(err) + } + log.Printf("[TRACE] Generated credentials: %v", d) + + db, err := sql.Open("pgx", connURL+"&timezone=utc") + if err != nil { + t.Fatal(err) + } + + returnedRows := func() int { + stmt, err := db.Prepare("SELECT DISTINCT schemaname FROM pg_tables WHERE has_table_privilege($1, 'information_schema.role_column_grants', 'select');") + if err != nil { + return -1 + } + defer stmt.Close() + + rows, err := stmt.Query(d.Username) + if err != nil { + return -1 + } + defer rows.Close() + + i := 0 + for rows.Next() { + i++ + } + return i + } + + return returnedRows() == 2 +} + +const testRole = ` +CREATE ROLE "{{name}}" WITH + LOGIN + PASSWORD '{{password}}' + VALID UNTIL '{{expiration}}'; +GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}"; +` + +const defaultRevocationSQL = ` +REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM {{name}}; +REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM {{name}}; +REVOKE USAGE ON SCHEMA public FROM {{name}}; + +DROP ROLE IF EXISTS {{name}}; +` diff --git a/builtin/logical/database/credentials.go b/builtin/logical/database/credentials.go new file mode 100644 index 0000000..c43c264 --- /dev/null +++ b/builtin/logical/database/credentials.go @@ -0,0 +1,391 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package database + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "io" + "strings" + "time" + + "github.com/hashicorp/vault/helper/random" + "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/template" + "github.com/mitchellh/mapstructure" +) + +// passwordGenerator generates password credentials. +// A zero value passwordGenerator is usable. +type passwordGenerator struct { + // PasswordPolicy is the named password policy used to generate passwords. + // If empty (default), a random string of 20 characters will be generated. + PasswordPolicy string `mapstructure:"password_policy,omitempty"` +} + +// newPasswordGenerator returns a new passwordGenerator using the given config. +// Default values will be set on the returned passwordGenerator if not provided +// in the config. +func newPasswordGenerator(config map[string]interface{}) (passwordGenerator, error) { + var pg passwordGenerator + if err := mapstructure.WeakDecode(config, &pg); err != nil { + return pg, err + } + + return pg, nil +} + +// Generate generates a password credential using the configured password policy. +// Returns the generated password or an error. +func (pg passwordGenerator) generate(ctx context.Context, b *databaseBackend, wrapper databaseVersionWrapper) (string, error) { + if !wrapper.isV5() && !wrapper.isV4() { + return "", fmt.Errorf("no underlying database specified") + } + + // The database plugin generates the password if its interface is v4 + if wrapper.isV4() { + password, err := wrapper.v4.GenerateCredentials(ctx) + if err != nil { + return "", err + } + return password, nil + } + + if pg.PasswordPolicy == "" { + return random.DefaultStringGenerator.Generate(ctx, b.GetRandomReader()) + } + return b.System().GeneratePasswordFromPolicy(ctx, pg.PasswordPolicy) +} + +// configMap returns the configuration of the passwordGenerator +// as a map from string to string. +func (pg passwordGenerator) configMap() (map[string]interface{}, error) { + config := make(map[string]interface{}) + if err := mapstructure.WeakDecode(pg, &config); err != nil { + return nil, err + } + return config, nil +} + +// rsaKeyGenerator generates RSA key pair credentials. +// A zero value rsaKeyGenerator is usable. +type rsaKeyGenerator struct { + // Format is the output format of the generated private key. + // Options include: 'pkcs8' (default) + Format string `mapstructure:"format,omitempty"` + + // KeyBits is the bit size of the RSA key to generate. + // Options include: 2048 (default), 3072, and 4096 + KeyBits int `mapstructure:"key_bits,omitempty"` +} + +// newRSAKeyGenerator returns a new rsaKeyGenerator using the given config. +// Default values will be set on the returned rsaKeyGenerator if not provided +// in the given config. +func newRSAKeyGenerator(config map[string]interface{}) (rsaKeyGenerator, error) { + var kg rsaKeyGenerator + if err := mapstructure.WeakDecode(config, &kg); err != nil { + return kg, err + } + + switch strings.ToLower(kg.Format) { + case "": + kg.Format = "pkcs8" + case "pkcs8": + default: + return kg, fmt.Errorf("invalid format: %v", kg.Format) + } + + switch kg.KeyBits { + case 0: + kg.KeyBits = 2048 + case 2048, 3072, 4096: + default: + return kg, fmt.Errorf("invalid key_bits: %v", kg.KeyBits) + } + + return kg, nil +} + +// Generate generates an RSA key pair. Returns a PEM-encoded, PKIX marshaled +// public key and a PEM-encoded private key marshaled into the configuration +// format (in that order) or an error. +func (kg *rsaKeyGenerator) generate(r io.Reader) ([]byte, []byte, error) { + reader := rand.Reader + if r != nil { + reader = r + } + + var keyBits int + switch kg.KeyBits { + case 0: + keyBits = 2048 + case 2048, 3072, 4096: + keyBits = kg.KeyBits + default: + return nil, nil, fmt.Errorf("invalid key_bits: %v", kg.KeyBits) + } + + key, err := rsa.GenerateKey(reader, keyBits) + if err != nil { + return nil, nil, err + } + + public, err := x509.MarshalPKIXPublicKey(key.Public()) + if err != nil { + return nil, nil, err + } + + var private []byte + switch strings.ToLower(kg.Format) { + case "", "pkcs8": + private, err = x509.MarshalPKCS8PrivateKey(key) + if err != nil { + return nil, nil, err + } + default: + return nil, nil, fmt.Errorf("invalid format: %v", kg.Format) + } + + publicBlock := &pem.Block{ + Type: "PUBLIC KEY", + Bytes: public, + } + privateBlock := &pem.Block{ + Type: "PRIVATE KEY", + Bytes: private, + } + + return pem.EncodeToMemory(publicBlock), pem.EncodeToMemory(privateBlock), nil +} + +// configMap returns the configuration of the rsaKeyGenerator +// as a map from string to string. +func (kg rsaKeyGenerator) configMap() (map[string]interface{}, error) { + config := make(map[string]interface{}) + if err := mapstructure.WeakDecode(kg, &config); err != nil { + return nil, err + } + return config, nil +} + +type ClientCertificateGenerator struct { + // CommonNameTemplate is username template to be used for the client certificate common name. + CommonNameTemplate string `mapstructure:"common_name_template,omitempty"` + + // CAPrivateKey is the PEM-encoded private key for the given ca_cert. + CAPrivateKey string `mapstructure:"ca_private_key,omitempty"` + + // CACert is the PEM-encoded CA certificate. + CACert string `mapstructure:"ca_cert,omitempty"` + + // KeyType specifies the desired key type. + // Options include: 'rsa', 'ed25519', 'ec'. + KeyType string `mapstructure:"key_type,omitempty"` + + // KeyBits is the number of bits to use for the generated keys. + // Options include: with key_type=rsa, 2048 (default), 3072, 4096; + // With key_type=ec, allowed values are: 224, 256 (default), 384, 521; + // Ignored with key_type=ed25519. + KeyBits int `mapstructure:"key_bits,omitempty"` + + // SignatureBits is the number of bits to use in the signature algorithm. + // Options include: 256 (default), 384, 512. + SignatureBits int `mapstructure:"signature_bits,omitempty"` + + parsedCABundle *certutil.ParsedCertBundle + cnProducer template.StringTemplate +} + +// newClientCertificateGenerator returns a new ClientCertificateGenerator +// using the given config. Default values will be set on the returned +// ClientCertificateGenerator if not provided in the config. +func newClientCertificateGenerator(config map[string]interface{}) (ClientCertificateGenerator, error) { + var cg ClientCertificateGenerator + if err := mapstructure.WeakDecode(config, &cg); err != nil { + return cg, err + } + + switch cg.KeyType { + case "rsa": + switch cg.KeyBits { + case 0: + cg.KeyBits = 2048 + case 2048, 3072, 4096: + default: + return cg, fmt.Errorf("invalid key_bits") + } + case "ec": + switch cg.KeyBits { + case 0: + cg.KeyBits = 256 + case 224, 256, 384, 521: + default: + return cg, fmt.Errorf("invalid key_bits") + } + case "ed25519": + // key_bits ignored + default: + return cg, fmt.Errorf("invalid key_type") + } + + switch cg.SignatureBits { + case 0: + cg.SignatureBits = 256 + case 256, 384, 512: + default: + return cg, fmt.Errorf("invalid signature_bits") + } + + if cg.CommonNameTemplate == "" { + return cg, fmt.Errorf("missing required common_name_template") + } + + // Validate the common name template + t, err := template.NewTemplate(template.Template(cg.CommonNameTemplate)) + if err != nil { + return cg, fmt.Errorf("failed to create template: %w", err) + } + + _, err = t.Generate(dbplugin.UsernameMetadata{}) + if err != nil { + return cg, fmt.Errorf("invalid common_name_template: %w", err) + } + cg.cnProducer = t + + if cg.CACert == "" { + return cg, fmt.Errorf("missing required ca_cert") + } + if cg.CAPrivateKey == "" { + return cg, fmt.Errorf("missing required ca_private_key") + } + parsedBundle, err := certutil.ParsePEMBundle(strings.Join([]string{cg.CACert, cg.CAPrivateKey}, "\n")) + if err != nil { + return cg, err + } + if parsedBundle.PrivateKey == nil { + return cg, fmt.Errorf("private key not found in the PEM bundle") + } + if parsedBundle.PrivateKeyType == certutil.UnknownPrivateKey { + return cg, fmt.Errorf("unknown private key found in the PEM bundle") + } + if parsedBundle.Certificate == nil { + return cg, fmt.Errorf("certificate not found in the PEM bundle") + } + if !parsedBundle.Certificate.IsCA { + return cg, fmt.Errorf("the given certificate is not marked for CA use") + } + if !parsedBundle.Certificate.BasicConstraintsValid { + return cg, fmt.Errorf("the given certificate does not meet basic constraints for CA use") + } + + certBundle, err := parsedBundle.ToCertBundle() + if err != nil { + return cg, fmt.Errorf("error converting raw values into cert bundle: %w", err) + } + + parsedCABundle, err := certBundle.ToParsedCertBundle() + if err != nil { + return cg, fmt.Errorf("failed to parse cert bundle: %w", err) + } + cg.parsedCABundle = parsedCABundle + + return cg, nil +} + +func (cg *ClientCertificateGenerator) generate(r io.Reader, expiration time.Time, userMeta dbplugin.UsernameMetadata) (*certutil.CertBundle, string, error) { + commonName, err := cg.cnProducer.Generate(userMeta) + if err != nil { + return nil, "", err + } + + // Set defaults + keyBits := cg.KeyBits + signatureBits := cg.SignatureBits + switch cg.KeyType { + case "rsa": + if keyBits == 0 { + keyBits = 2048 + } + if signatureBits == 0 { + signatureBits = 256 + } + case "ec": + if keyBits == 0 { + keyBits = 256 + } + if signatureBits == 0 { + if keyBits == 224 { + signatureBits = 256 + } else { + signatureBits = keyBits + } + } + case "ed25519": + // key_bits ignored + if signatureBits == 0 { + signatureBits = 256 + } + } + + subject := pkix.Name{ + CommonName: commonName, + // Additional subject DN options intentionally omitted for now + } + + creation := &certutil.CreationBundle{ + Params: &certutil.CreationParameters{ + Subject: subject, + KeyType: cg.KeyType, + KeyBits: cg.KeyBits, + SignatureBits: cg.SignatureBits, + NotAfter: expiration, + KeyUsage: x509.KeyUsageDigitalSignature, + ExtKeyUsage: certutil.ClientAuthExtKeyUsage, + BasicConstraintsValidForNonCA: false, + NotBeforeDuration: 30 * time.Second, + URLs: &certutil.URLEntries{ + IssuingCertificates: []string{}, + CRLDistributionPoints: []string{}, + OCSPServers: []string{}, + }, + }, + SigningBundle: &certutil.CAInfoBundle{ + ParsedCertBundle: *cg.parsedCABundle, + URLs: &certutil.URLEntries{ + IssuingCertificates: []string{}, + CRLDistributionPoints: []string{}, + OCSPServers: []string{}, + }, + }, + } + + parsedClientBundle, err := certutil.CreateCertificateWithRandomSource(creation, r) + if err != nil { + return nil, "", fmt.Errorf("unable to generate client certificate: %w", err) + } + + cb, err := parsedClientBundle.ToCertBundle() + if err != nil { + return nil, "", fmt.Errorf("error converting raw cert bundle to cert bundle: %w", err) + } + + return cb, subject.String(), nil +} + +// configMap returns the configuration of the ClientCertificateGenerator +// as a map from string to string. +func (cg ClientCertificateGenerator) configMap() (map[string]interface{}, error) { + config := make(map[string]interface{}) + if err := mapstructure.WeakDecode(cg, &config); err != nil { + return nil, err + } + return config, nil +} diff --git a/builtin/logical/database/credentials_test.go b/builtin/logical/database/credentials_test.go new file mode 100644 index 0000000..5e113e3 --- /dev/null +++ b/builtin/logical/database/credentials_test.go @@ -0,0 +1,792 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package database + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "testing" + + "github.com/hashicorp/vault/sdk/helper/base62" + "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +// Test_newClientCertificateGenerator tests the ClientCertificateGenerator struct based on the config +func Test_newClientCertificateGenerator(t *testing.T) { + type args struct { + config map[string]interface{} + } + tests := []struct { + name string + args args + want ClientCertificateGenerator + wantErr bool + }{ + { + name: "newClientCertificateGenerator with nil config", + args: args{ + config: nil, + }, + want: ClientCertificateGenerator{ + CommonNameTemplate: "", + CAPrivateKey: "", + CACert: "", + KeyType: "", + KeyBits: 0, + SignatureBits: 0, + }, + }, + { + name: "newClientCertificateGenerator with zero value key_type", + args: args{ + config: map[string]interface{}{ + "key_type": "", + }, + }, + want: ClientCertificateGenerator{ + KeyType: "", + }, + }, + { + name: "newClientCertificateGenerator with rsa value key_type", + args: args{ + config: map[string]interface{}{ + "key_type": "rsa", + }, + }, + want: ClientCertificateGenerator{ + KeyType: "rsa", + KeyBits: 2048, + SignatureBits: 256, + }, + }, + { + name: "newClientCertificateGenerator with ec value key_type", + args: args{ + config: map[string]interface{}{ + "key_type": "ec", + }, + }, + want: ClientCertificateGenerator{ + KeyType: "ec", + KeyBits: 256, + SignatureBits: 256, + }, + }, + { + name: "newClientCertificateGenerator with ed25519 value key_type", + args: args{ + config: map[string]interface{}{ + "key_type": "ed25519", + }, + }, + want: ClientCertificateGenerator{ + KeyType: "ed25519", + SignatureBits: 256, + }, + }, + { + name: "newClientCertificateGenerator with invalid key_type", + args: args{ + config: map[string]interface{}{ + "key_type": "ece", + }, + }, + wantErr: true, + }, + { + name: "newClientCertificateGenerator with zero value key_bits", + args: args{ + config: map[string]interface{}{ + "key_bits": "0", + }, + }, + want: ClientCertificateGenerator{ + KeyBits: 0, + }, + }, + { + name: "newClientCertificateGenerator with 2048 value key_bits", + args: args{ + config: map[string]interface{}{ + "key_bits": "2048", + }, + }, + want: ClientCertificateGenerator{ + KeyBits: 2048, + }, + }, + { + name: "newClientCertificateGenerator with 3072 value key_bits", + args: args{ + config: map[string]interface{}{ + "key_bits": "3072", + }, + }, + want: ClientCertificateGenerator{ + KeyBits: 3072, + }, + }, + { + name: "newClientCertificateGenerator with 4096 value key_bits", + args: args{ + config: map[string]interface{}{ + "key_bits": "4096", + }, + }, + want: ClientCertificateGenerator{ + KeyBits: 4096, + }, + }, + { + name: "newClientCertificateGenerator with 224 value key_bits", + args: args{ + config: map[string]interface{}{ + "key_bits": "224", + }, + }, + want: ClientCertificateGenerator{ + KeyBits: 224, + }, + }, + { + name: "newClientCertificateGenerator with 256 value key_bits", + args: args{ + config: map[string]interface{}{ + "key_bits": "256", + }, + }, + want: ClientCertificateGenerator{ + KeyBits: 256, + }, + }, + { + name: "newClientCertificateGenerator with 384 value key_bits", + args: args{ + config: map[string]interface{}{ + "key_bits": "384", + }, + }, + want: ClientCertificateGenerator{ + KeyBits: 384, + }, + }, + { + name: "newClientCertificateGenerator with 521 value key_bits", + args: args{ + config: map[string]interface{}{ + "key_bits": "521", + }, + }, + want: ClientCertificateGenerator{ + KeyBits: 521, + }, + }, + { + name: "newClientCertificateGenerator with invalid key_bits", + args: args{ + config: map[string]interface{}{ + "key_bits": "4097", + }, + }, + wantErr: true, + }, + { + name: "newClientCertificateGenerator with zero value signature_bits", + args: args{ + config: map[string]interface{}{ + "signature_bits": "0", + }, + }, + want: ClientCertificateGenerator{ + SignatureBits: 0, + }, + }, + { + name: "newClientCertificateGenerator with 256 value signature_bits", + args: args{ + config: map[string]interface{}{ + "signature_bits": "256", + }, + }, + want: ClientCertificateGenerator{ + SignatureBits: 256, + }, + }, + { + name: "newClientCertificateGenerator with 384 value signature_bits", + args: args{ + config: map[string]interface{}{ + "signature_bits": "384", + }, + }, + want: ClientCertificateGenerator{ + SignatureBits: 384, + }, + }, + { + name: "newClientCertificateGenerator with 512 value signature_bits", + args: args{ + config: map[string]interface{}{ + "signature_bits": "512", + }, + }, + want: ClientCertificateGenerator{ + SignatureBits: 512, + }, + }, + { + name: "newClientCertificateGenerator with invalid signature_bits", + args: args{ + config: map[string]interface{}{ + "signature_bits": "612", + }, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := newClientCertificateGenerator(tt.args.config) + if tt.wantErr { + assert.Error(t, err) + return + } + assert.Equal(t, tt.want, got) + }) + } +} + +func Test_newPasswordGenerator(t *testing.T) { + type args struct { + config map[string]interface{} + } + tests := []struct { + name string + args args + want passwordGenerator + wantErr bool + }{ + { + name: "newPasswordGenerator with nil config", + args: args{ + config: nil, + }, + want: passwordGenerator{ + PasswordPolicy: "", + }, + }, + { + name: "newPasswordGenerator without password_policy", + args: args{ + config: map[string]interface{}{}, + }, + want: passwordGenerator{ + PasswordPolicy: "", + }, + }, + { + name: "newPasswordGenerator with password_policy", + args: args{ + config: map[string]interface{}{ + "password_policy": "test-policy", + }, + }, + want: passwordGenerator{ + PasswordPolicy: "test-policy", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := newPasswordGenerator(tt.args.config) + if tt.wantErr { + assert.Error(t, err) + return + } + assert.Equal(t, tt.want, got) + }) + } +} + +func Test_newRSAKeyGenerator(t *testing.T) { + type args struct { + config map[string]interface{} + } + tests := []struct { + name string + args args + want rsaKeyGenerator + wantErr bool + }{ + { + name: "newRSAKeyGenerator with nil config", + args: args{ + config: nil, + }, + want: rsaKeyGenerator{ + Format: "pkcs8", + KeyBits: 2048, + }, + }, + { + name: "newRSAKeyGenerator with empty config", + args: args{ + config: map[string]interface{}{}, + }, + want: rsaKeyGenerator{ + Format: "pkcs8", + KeyBits: 2048, + }, + }, + { + name: "newRSAKeyGenerator with zero value format", + args: args{ + config: map[string]interface{}{ + "format": "", + }, + }, + want: rsaKeyGenerator{ + Format: "pkcs8", + KeyBits: 2048, + }, + }, + { + name: "newRSAKeyGenerator with zero value key_bits", + args: args{ + config: map[string]interface{}{ + "key_bits": "0", + }, + }, + want: rsaKeyGenerator{ + Format: "pkcs8", + KeyBits: 2048, + }, + }, + { + name: "newRSAKeyGenerator with format", + args: args{ + config: map[string]interface{}{ + "format": "pkcs8", + }, + }, + want: rsaKeyGenerator{ + Format: "pkcs8", + KeyBits: 2048, + }, + }, + { + name: "newRSAKeyGenerator with format case insensitive", + args: args{ + config: map[string]interface{}{ + "format": "PKCS8", + }, + }, + want: rsaKeyGenerator{ + Format: "PKCS8", + KeyBits: 2048, + }, + }, + { + name: "newRSAKeyGenerator with 3072 key_bits", + args: args{ + config: map[string]interface{}{ + "key_bits": "3072", + }, + }, + want: rsaKeyGenerator{ + Format: "pkcs8", + KeyBits: 3072, + }, + }, + { + name: "newRSAKeyGenerator with 4096 key_bits", + args: args{ + config: map[string]interface{}{ + "key_bits": "4096", + }, + }, + want: rsaKeyGenerator{ + Format: "pkcs8", + KeyBits: 4096, + }, + }, + { + name: "newRSAKeyGenerator with invalid key_bits", + args: args{ + config: map[string]interface{}{ + "key_bits": "4097", + }, + }, + wantErr: true, + }, + { + name: "newRSAKeyGenerator with invalid format", + args: args{ + config: map[string]interface{}{ + "format": "pkcs1", + }, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := newRSAKeyGenerator(tt.args.config) + if tt.wantErr { + assert.Error(t, err) + return + } + assert.Equal(t, tt.want, got) + }) + } +} + +func Test_passwordGenerator_generate(t *testing.T) { + config := logical.TestBackendConfig() + b := Backend(config) + b.Setup(context.Background(), config) + + type args struct { + config map[string]interface{} + mock func() interface{} + passGen logical.PasswordGenerator + } + tests := []struct { + name string + args args + wantRegexp string + wantErr bool + }{ + { + name: "wrapper missing v4 and v5 interface", + args: args{ + mock: func() interface{} { + return nil + }, + }, + wantErr: true, + }, + { + name: "v4: generate password using GenerateCredentials", + args: args{ + mock: func() interface{} { + v4Mock := new(mockLegacyDatabase) + v4Mock.On("GenerateCredentials", mock.Anything). + Return("v4-generated-password", nil). + Times(1) + return v4Mock + }, + }, + wantRegexp: "^v4-generated-password$", + }, + { + name: "v5: generate password without policy", + args: args{ + mock: func() interface{} { + return new(mockNewDatabase) + }, + }, + wantRegexp: "^[a-zA-Z0-9-]{20}$", + }, + { + name: "v5: generate password with non-existing policy", + args: args{ + config: map[string]interface{}{ + "password_policy": "not-created", + }, + mock: func() interface{} { + return new(mockNewDatabase) + }, + }, + wantErr: true, + }, + { + name: "v5: generate password with existing policy", + args: args{ + config: map[string]interface{}{ + "password_policy": "test-policy", + }, + mock: func() interface{} { + return new(mockNewDatabase) + }, + passGen: func() (string, error) { + return base62.Random(30) + }, + }, + wantRegexp: "^[a-zA-Z0-9]{30}$", + }, + { + name: "v5: generate password with existing policy static", + args: args{ + config: map[string]interface{}{ + "password_policy": "test-policy", + }, + mock: func() interface{} { + return new(mockNewDatabase) + }, + passGen: func() (string, error) { + return "policy-generated-password", nil + }, + }, + wantRegexp: "^policy-generated-password$", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Set up the version wrapper with a mock database implementation + wrapper := databaseVersionWrapper{} + switch m := tt.args.mock().(type) { + case *mockNewDatabase: + wrapper.v5 = m + case *mockLegacyDatabase: + wrapper.v4 = m + } + + // Set the password policy for the test case + config.System.(*logical.StaticSystemView).SetPasswordPolicy( + "test-policy", tt.args.passGen) + + // Generate the password + pg, err := newPasswordGenerator(tt.args.config) + got, err := pg.generate(context.Background(), b, wrapper) + if tt.wantErr { + assert.Error(t, err) + return + } + assert.Regexp(t, tt.wantRegexp, got) + + // Assert all expected calls took place on the mock + if m, ok := wrapper.v5.(*mockNewDatabase); ok { + m.AssertExpectations(t) + } + if m, ok := wrapper.v4.(*mockLegacyDatabase); ok { + m.AssertExpectations(t) + } + }) + } +} + +func Test_passwordGenerator_configMap(t *testing.T) { + type args struct { + config map[string]interface{} + } + tests := []struct { + name string + args args + want map[string]interface{} + }{ + { + name: "nil config results in empty map", + args: args{ + config: nil, + }, + want: map[string]interface{}{}, + }, + { + name: "empty config results in empty map", + args: args{ + config: map[string]interface{}{}, + }, + want: map[string]interface{}{}, + }, + { + name: "input config is equal to output config", + args: args{ + config: map[string]interface{}{ + "password_policy": "test-policy", + }, + }, + want: map[string]interface{}{ + "password_policy": "test-policy", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + pg, err := newPasswordGenerator(tt.args.config) + assert.NoError(t, err) + cm, err := pg.configMap() + assert.NoError(t, err) + assert.Equal(t, tt.want, cm) + }) + } +} + +func Test_rsaKeyGenerator_generate(t *testing.T) { + type args struct { + config map[string]interface{} + } + tests := []struct { + name string + args args + }{ + { + name: "generate RSA key with nil default config", + args: args{ + config: nil, + }, + }, + { + name: "generate RSA key with empty default config", + args: args{ + config: map[string]interface{}{}, + }, + }, + { + name: "generate RSA key with 2048 key_bits and format", + args: args{ + config: map[string]interface{}{ + "key_bits": "2048", + "format": "pkcs8", + }, + }, + }, + { + name: "generate RSA key with 2048 key_bits", + args: args{ + config: map[string]interface{}{ + "key_bits": "2048", + }, + }, + }, + { + name: "generate RSA key with 3072 key_bits", + args: args{ + config: map[string]interface{}{ + "key_bits": "3072", + }, + }, + }, + { + name: "generate RSA key with 4096 key_bits", + args: args{ + config: map[string]interface{}{ + "key_bits": "4096", + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Generate the RSA key pair + kg, err := newRSAKeyGenerator(tt.args.config) + public, private, err := kg.generate(rand.Reader) + assert.NoError(t, err) + assert.NotEmpty(t, public) + assert.NotEmpty(t, private) + + // Decode the public and private key PEMs + pubBlock, pubRest := pem.Decode(public) + privBlock, privRest := pem.Decode(private) + assert.NotNil(t, pubBlock) + assert.Empty(t, pubRest) + assert.Equal(t, "PUBLIC KEY", pubBlock.Type) + assert.NotNil(t, privBlock) + assert.Empty(t, privRest) + assert.Equal(t, "PRIVATE KEY", privBlock.Type) + + // Assert that we can parse the public key PEM block + pub, err := x509.ParsePKIXPublicKey(pubBlock.Bytes) + assert.NoError(t, err) + assert.NotNil(t, pub) + assert.IsType(t, &rsa.PublicKey{}, pub) + + // Assert that we can parse the private key PEM block in + // the configured format + switch kg.Format { + case "pkcs8": + priv, err := x509.ParsePKCS8PrivateKey(privBlock.Bytes) + assert.NoError(t, err) + assert.NotNil(t, priv) + assert.IsType(t, &rsa.PrivateKey{}, priv) + default: + t.Fatal("unknown format") + } + }) + } +} + +func Test_rsaKeyGenerator_configMap(t *testing.T) { + type args struct { + config map[string]interface{} + } + tests := []struct { + name string + args args + want map[string]interface{} + }{ + { + name: "nil config results in defaults", + args: args{ + config: nil, + }, + want: map[string]interface{}{ + "format": "pkcs8", + "key_bits": 2048, + }, + }, + { + name: "empty config results in defaults", + args: args{ + config: map[string]interface{}{}, + }, + want: map[string]interface{}{ + "format": "pkcs8", + "key_bits": 2048, + }, + }, + { + name: "config with format", + args: args{ + config: map[string]interface{}{ + "format": "pkcs8", + }, + }, + want: map[string]interface{}{ + "format": "pkcs8", + "key_bits": 2048, + }, + }, + { + name: "config with key_bits", + args: args{ + config: map[string]interface{}{ + "key_bits": 4096, + }, + }, + want: map[string]interface{}{ + "format": "pkcs8", + "key_bits": 4096, + }, + }, + { + name: "config with format and key_bits", + args: args{ + config: map[string]interface{}{ + "format": "pkcs8", + "key_bits": 3072, + }, + }, + want: map[string]interface{}{ + "format": "pkcs8", + "key_bits": 3072, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + kg, err := newRSAKeyGenerator(tt.args.config) + assert.NoError(t, err) + cm, err := kg.configMap() + assert.NoError(t, err) + assert.Equal(t, tt.want, cm) + }) + } +} diff --git a/builtin/logical/database/dbplugin/plugin_test.go b/builtin/logical/database/dbplugin/plugin_test.go new file mode 100644 index 0000000..2b5f7a9 --- /dev/null +++ b/builtin/logical/database/dbplugin/plugin_test.go @@ -0,0 +1,278 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dbplugin_test + +import ( + "context" + "errors" + "os" + "testing" + "time" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/helper/namespace" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/database/dbplugin" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/pluginutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +type mockPlugin struct { + users map[string][]string +} + +var _ dbplugin.Database = &mockPlugin{} + +func (m *mockPlugin) Type() (string, error) { return "mock", nil } +func (m *mockPlugin) CreateUser(_ context.Context, statements dbplugin.Statements, usernameConf dbplugin.UsernameConfig, expiration time.Time) (username string, password string, err error) { + err = errors.New("err") + if usernameConf.DisplayName == "" || expiration.IsZero() { + return "", "", err + } + + if _, ok := m.users[usernameConf.DisplayName]; ok { + return "", "", err + } + + m.users[usernameConf.DisplayName] = []string{password} + + return usernameConf.DisplayName, "test", nil +} + +func (m *mockPlugin) RenewUser(_ context.Context, statements dbplugin.Statements, username string, expiration time.Time) error { + err := errors.New("err") + if username == "" || expiration.IsZero() { + return err + } + + if _, ok := m.users[username]; !ok { + return err + } + + return nil +} + +func (m *mockPlugin) RevokeUser(_ context.Context, statements dbplugin.Statements, username string) error { + err := errors.New("err") + if username == "" { + return err + } + + if _, ok := m.users[username]; !ok { + return err + } + + delete(m.users, username) + return nil +} + +func (m *mockPlugin) RotateRootCredentials(_ context.Context, statements []string) (map[string]interface{}, error) { + return nil, nil +} + +func (m *mockPlugin) Init(_ context.Context, conf map[string]interface{}, _ bool) (map[string]interface{}, error) { + err := errors.New("err") + if len(conf) != 1 { + return nil, err + } + + return conf, nil +} + +func (m *mockPlugin) Initialize(_ context.Context, conf map[string]interface{}, _ bool) error { + err := errors.New("err") + if len(conf) != 1 { + return err + } + + return nil +} + +func (m *mockPlugin) Close() error { + m.users = nil + return nil +} + +func (m *mockPlugin) GenerateCredentials(ctx context.Context) (password string, err error) { + return password, err +} + +func (m *mockPlugin) SetCredentials(ctx context.Context, statements dbplugin.Statements, staticConfig dbplugin.StaticUserConfig) (username string, password string, err error) { + return username, password, err +} + +func getCluster(t *testing.T) (*vault.TestCluster, logical.SystemView) { + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + cores := cluster.Cores + + sys := vault.TestDynamicSystemView(cores[0].Core, nil) + vault.TestAddTestPlugin(t, cores[0].Core, "test-plugin", consts.PluginTypeDatabase, "", "TestPlugin_GRPC_Main", []string{}, "") + + return cluster, sys +} + +// This is not an actual test case, it's a helper function that will be executed +// by the go-plugin client via an exec call. +func TestPlugin_GRPC_Main(t *testing.T) { + if os.Getenv(pluginutil.PluginUnwrapTokenEnv) == "" && os.Getenv(pluginutil.PluginMetadataModeEnv) != "true" { + return + } + + plugin := &mockPlugin{ + users: make(map[string][]string), + } + + args := []string{"--tls-skip-verify=true"} + + apiClientMeta := &api.PluginAPIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(args) + + dbplugin.Serve(plugin, api.VaultPluginTLSProvider(apiClientMeta.GetTLSConfig())) +} + +func TestPlugin_Init(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + dbRaw, err := dbplugin.PluginFactoryVersion(namespace.RootContext(nil), "test-plugin", "", sys, log.NewNullLogger()) + if err != nil { + t.Fatalf("err: %s", err) + } + + connectionDetails := map[string]interface{}{ + "test": 1, + } + + _, err = dbRaw.Init(context.Background(), connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = dbRaw.Close() + if err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestPlugin_CreateUser(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + db, err := dbplugin.PluginFactoryVersion(namespace.RootContext(nil), "test-plugin", "", sys, log.NewNullLogger()) + if err != nil { + t.Fatalf("err: %s", err) + } + defer db.Close() + + connectionDetails := map[string]interface{}{ + "test": 1, + } + + _, err = db.Init(context.Background(), connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + usernameConf := dbplugin.UsernameConfig{ + DisplayName: "test", + RoleName: "test", + } + + us, pw, err := db.CreateUser(context.Background(), dbplugin.Statements{}, usernameConf, time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } + if us != "test" || pw != "test" { + t.Fatal("expected username and password to be 'test'") + } + + // try and save the same user again to verify it saved the first time, this + // should return an error + _, _, err = db.CreateUser(context.Background(), dbplugin.Statements{}, usernameConf, time.Now().Add(time.Minute)) + if err == nil { + t.Fatal("expected an error, user wasn't created correctly") + } +} + +func TestPlugin_RenewUser(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + db, err := dbplugin.PluginFactoryVersion(namespace.RootContext(nil), "test-plugin", "", sys, log.NewNullLogger()) + if err != nil { + t.Fatalf("err: %s", err) + } + defer db.Close() + + connectionDetails := map[string]interface{}{ + "test": 1, + } + _, err = db.Init(context.Background(), connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + usernameConf := dbplugin.UsernameConfig{ + DisplayName: "test", + RoleName: "test", + } + + us, _, err := db.CreateUser(context.Background(), dbplugin.Statements{}, usernameConf, time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = db.RenewUser(context.Background(), dbplugin.Statements{}, us, time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestPlugin_RevokeUser(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + db, err := dbplugin.PluginFactoryVersion(namespace.RootContext(nil), "test-plugin", "", sys, log.NewNullLogger()) + if err != nil { + t.Fatalf("err: %s", err) + } + defer db.Close() + + connectionDetails := map[string]interface{}{ + "test": 1, + } + _, err = db.Init(context.Background(), connectionDetails, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + usernameConf := dbplugin.UsernameConfig{ + DisplayName: "test", + RoleName: "test", + } + + us, _, err := db.CreateUser(context.Background(), dbplugin.Statements{}, usernameConf, time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Test default revoke statements + err = db.RevokeUser(context.Background(), dbplugin.Statements{}, us) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Try adding the same username back so we can verify it was removed + _, _, err = db.CreateUser(context.Background(), dbplugin.Statements{}, usernameConf, time.Now().Add(time.Minute)) + if err != nil { + t.Fatalf("err: %s", err) + } +} diff --git a/builtin/logical/database/mocks_test.go b/builtin/logical/database/mocks_test.go new file mode 100644 index 0000000..afb1bbc --- /dev/null +++ b/builtin/logical/database/mocks_test.go @@ -0,0 +1,104 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package database + +import ( + "context" + "time" + + v4 "github.com/hashicorp/vault/sdk/database/dbplugin" + v5 "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + "github.com/stretchr/testify/mock" +) + +var _ v5.Database = &mockNewDatabase{} + +type mockNewDatabase struct { + mock.Mock +} + +func (m *mockNewDatabase) Initialize(ctx context.Context, req v5.InitializeRequest) (v5.InitializeResponse, error) { + args := m.Called(ctx, req) + return args.Get(0).(v5.InitializeResponse), args.Error(1) +} + +func (m *mockNewDatabase) NewUser(ctx context.Context, req v5.NewUserRequest) (v5.NewUserResponse, error) { + args := m.Called(ctx, req) + return args.Get(0).(v5.NewUserResponse), args.Error(1) +} + +func (m *mockNewDatabase) UpdateUser(ctx context.Context, req v5.UpdateUserRequest) (v5.UpdateUserResponse, error) { + args := m.Called(ctx, req) + return args.Get(0).(v5.UpdateUserResponse), args.Error(1) +} + +func (m *mockNewDatabase) DeleteUser(ctx context.Context, req v5.DeleteUserRequest) (v5.DeleteUserResponse, error) { + args := m.Called(ctx, req) + return args.Get(0).(v5.DeleteUserResponse), args.Error(1) +} + +func (m *mockNewDatabase) Type() (string, error) { + args := m.Called() + return args.String(0), args.Error(1) +} + +func (m *mockNewDatabase) Close() error { + args := m.Called() + return args.Error(0) +} + +var _ v4.Database = &mockLegacyDatabase{} + +type mockLegacyDatabase struct { + mock.Mock +} + +func (m *mockLegacyDatabase) CreateUser(ctx context.Context, statements v4.Statements, usernameConfig v4.UsernameConfig, expiration time.Time) (username string, password string, err error) { + args := m.Called(ctx, statements, usernameConfig, expiration) + return args.String(0), args.String(1), args.Error(2) +} + +func (m *mockLegacyDatabase) RenewUser(ctx context.Context, statements v4.Statements, username string, expiration time.Time) error { + args := m.Called(ctx, statements, username, expiration) + return args.Error(0) +} + +func (m *mockLegacyDatabase) RevokeUser(ctx context.Context, statements v4.Statements, username string) error { + args := m.Called(ctx, statements, username) + return args.Error(0) +} + +func (m *mockLegacyDatabase) RotateRootCredentials(ctx context.Context, statements []string) (config map[string]interface{}, err error) { + args := m.Called(ctx, statements) + return args.Get(0).(map[string]interface{}), args.Error(1) +} + +func (m *mockLegacyDatabase) GenerateCredentials(ctx context.Context) (string, error) { + args := m.Called(ctx) + return args.String(0), args.Error(1) +} + +func (m *mockLegacyDatabase) SetCredentials(ctx context.Context, statements v4.Statements, staticConfig v4.StaticUserConfig) (username string, password string, err error) { + args := m.Called(ctx, statements, staticConfig) + return args.String(0), args.String(1), args.Error(2) +} + +func (m *mockLegacyDatabase) Init(ctx context.Context, config map[string]interface{}, verifyConnection bool) (saveConfig map[string]interface{}, err error) { + args := m.Called(ctx, config, verifyConnection) + return args.Get(0).(map[string]interface{}), args.Error(1) +} + +func (m *mockLegacyDatabase) Type() (string, error) { + args := m.Called() + return args.String(0), args.Error(1) +} + +func (m *mockLegacyDatabase) Close() error { + args := m.Called() + return args.Error(0) +} + +func (m *mockLegacyDatabase) Initialize(ctx context.Context, config map[string]interface{}, verifyConnection bool) (err error) { + panic("Initialize should not be called") +} diff --git a/builtin/logical/database/mockv4.go b/builtin/logical/database/mockv4.go new file mode 100644 index 0000000..a85f307 --- /dev/null +++ b/builtin/logical/database/mockv4.go @@ -0,0 +1,121 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package database + +import ( + "context" + "fmt" + "time" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + v4 "github.com/hashicorp/vault/sdk/database/dbplugin" +) + +const mockV4Type = "mockv4" + +// MockDatabaseV4 is an implementation of Database interface +type MockDatabaseV4 struct { + config map[string]interface{} +} + +var _ v4.Database = &MockDatabaseV4{} + +// New returns a new in-memory instance +func NewV4() (interface{}, error) { + return MockDatabaseV4{}, nil +} + +// RunV4 instantiates a MongoDB object, and runs the RPC server for the plugin +func RunV4(apiTLSConfig *api.TLSConfig) error { + dbType, err := NewV4() + if err != nil { + return err + } + + v4.Serve(dbType.(v4.Database), api.VaultPluginTLSProvider(apiTLSConfig)) + + return nil +} + +func (m MockDatabaseV4) Init(ctx context.Context, config map[string]interface{}, verifyConnection bool) (saveConfig map[string]interface{}, err error) { + log.Default().Info("Init called", + "config", config, + "verifyConnection", verifyConnection) + + return config, nil +} + +func (m MockDatabaseV4) Initialize(ctx context.Context, config map[string]interface{}, verifyConnection bool) (err error) { + _, err = m.Init(ctx, config, verifyConnection) + return err +} + +func (m MockDatabaseV4) CreateUser(ctx context.Context, statements v4.Statements, usernameConfig v4.UsernameConfig, expiration time.Time) (username string, password string, err error) { + log.Default().Info("CreateUser called", + "statements", statements, + "usernameConfig", usernameConfig, + "expiration", expiration) + + now := time.Now() + user := fmt.Sprintf("mockv4_user_%s", now.Format(time.RFC3339)) + pass, err := m.GenerateCredentials(ctx) + if err != nil { + return "", "", fmt.Errorf("failed to generate credentials: %w", err) + } + return user, pass, nil +} + +func (m MockDatabaseV4) RenewUser(ctx context.Context, statements v4.Statements, username string, expiration time.Time) error { + log.Default().Info("RenewUser called", + "statements", statements, + "username", username, + "expiration", expiration) + + return nil +} + +func (m MockDatabaseV4) RevokeUser(ctx context.Context, statements v4.Statements, username string) error { + log.Default().Info("RevokeUser called", + "statements", statements, + "username", username) + + return nil +} + +func (m MockDatabaseV4) RotateRootCredentials(ctx context.Context, statements []string) (config map[string]interface{}, err error) { + log.Default().Info("RotateRootCredentials called", + "statements", statements) + + newPassword, err := m.GenerateCredentials(ctx) + if err != nil { + return config, fmt.Errorf("failed to generate credentials: %w", err) + } + config["password"] = newPassword + + return m.config, nil +} + +func (m MockDatabaseV4) SetCredentials(ctx context.Context, statements v4.Statements, staticConfig v4.StaticUserConfig) (username string, password string, err error) { + log.Default().Info("SetCredentials called", + "statements", statements, + "staticConfig", staticConfig) + return "", "", nil +} + +func (m MockDatabaseV4) GenerateCredentials(ctx context.Context) (password string, err error) { + now := time.Now() + pass := fmt.Sprintf("mockv4_password_%s", now.Format(time.RFC3339)) + return pass, nil +} + +func (m MockDatabaseV4) Type() (string, error) { + log.Default().Info("Type called") + return mockV4Type, nil +} + +func (m MockDatabaseV4) Close() error { + log.Default().Info("Close called") + return nil +} diff --git a/builtin/logical/database/mockv5.go b/builtin/logical/database/mockv5.go new file mode 100644 index 0000000..fecccfe --- /dev/null +++ b/builtin/logical/database/mockv5.go @@ -0,0 +1,94 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package database + +import ( + "context" + "fmt" + "time" + + log "github.com/hashicorp/go-hclog" + v5 "github.com/hashicorp/vault/sdk/database/dbplugin/v5" +) + +const mockV5Type = "mockv5" + +// MockDatabaseV5 is an implementation of Database interface +type MockDatabaseV5 struct { + config map[string]interface{} +} + +var _ v5.Database = &MockDatabaseV5{} + +// New returns a new in-memory instance +func New() (interface{}, error) { + db := MockDatabaseV5{} + return db, nil +} + +// Run instantiates a MongoDB object, and runs the RPC server for the plugin +func RunV5() error { + dbType, err := New() + if err != nil { + return err + } + + v5.Serve(dbType.(v5.Database)) + + return nil +} + +// Run instantiates a MongoDB object, and runs the RPC server for the plugin +func RunV6Multiplexed() error { + v5.ServeMultiplex(New) + + return nil +} + +func (m MockDatabaseV5) Initialize(ctx context.Context, req v5.InitializeRequest) (v5.InitializeResponse, error) { + log.Default().Info("Initialize called", + "req", req) + + config := req.Config + config["from-plugin"] = "this value is from the plugin itself" + + resp := v5.InitializeResponse{ + Config: req.Config, + } + return resp, nil +} + +func (m MockDatabaseV5) NewUser(ctx context.Context, req v5.NewUserRequest) (v5.NewUserResponse, error) { + log.Default().Info("NewUser called", + "req", req) + + now := time.Now() + user := fmt.Sprintf("mockv5_user_%s", now.Format(time.RFC3339)) + resp := v5.NewUserResponse{ + Username: user, + } + return resp, nil +} + +func (m MockDatabaseV5) UpdateUser(ctx context.Context, req v5.UpdateUserRequest) (v5.UpdateUserResponse, error) { + log.Default().Info("UpdateUser called", + "req", req) + return v5.UpdateUserResponse{}, nil +} + +func (m MockDatabaseV5) DeleteUser(ctx context.Context, req v5.DeleteUserRequest) (v5.DeleteUserResponse, error) { + log.Default().Info("DeleteUser called", + "req", req) + return v5.DeleteUserResponse{}, nil +} + +func (m MockDatabaseV5) Type() (string, error) { + log.Default().Info("Type called") + return mockV5Type, nil +} + +func (m MockDatabaseV5) Close() error { + log.Default().Info("Close called") + return nil +} diff --git a/builtin/logical/database/path_config_connection.go b/builtin/logical/database/path_config_connection.go new file mode 100644 index 0000000..b869fac --- /dev/null +++ b/builtin/logical/database/path_config_connection.go @@ -0,0 +1,552 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package database + +import ( + "context" + "errors" + "fmt" + "net/url" + "sort" + + "github.com/fatih/structs" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/go-version" + + "github.com/hashicorp/vault/helper/versions" + v5 "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/pluginutil" + "github.com/hashicorp/vault/sdk/logical" +) + +var ( + respErrEmptyPluginName = "empty plugin name" + respErrEmptyName = "empty name attribute given" +) + +// DatabaseConfig is used by the Factory function to configure a Database +// object. +type DatabaseConfig struct { + PluginName string `json:"plugin_name" structs:"plugin_name" mapstructure:"plugin_name"` + PluginVersion string `json:"plugin_version" structs:"plugin_version" mapstructure:"plugin_version"` + // ConnectionDetails stores the database specific connection settings needed + // by each database type. + ConnectionDetails map[string]interface{} `json:"connection_details" structs:"connection_details" mapstructure:"connection_details"` + AllowedRoles []string `json:"allowed_roles" structs:"allowed_roles" mapstructure:"allowed_roles"` + + RootCredentialsRotateStatements []string `json:"root_credentials_rotate_statements" structs:"root_credentials_rotate_statements" mapstructure:"root_credentials_rotate_statements"` + + PasswordPolicy string `json:"password_policy" structs:"password_policy" mapstructure:"password_policy"` +} + +func (c *DatabaseConfig) SupportsCredentialType(credentialType v5.CredentialType) bool { + credTypes, ok := c.ConnectionDetails[v5.SupportedCredentialTypesKey].([]interface{}) + if !ok { + // Default to supporting CredentialTypePassword for database plugins that + // don't specify supported credential types in the initialization response + return credentialType == v5.CredentialTypePassword + } + + for _, ct := range credTypes { + if ct == credentialType.String() { + return true + } + } + return false +} + +// pathResetConnection configures a path to reset a plugin. +func pathResetConnection(b *databaseBackend) *framework.Path { + return &framework.Path{ + Pattern: fmt.Sprintf("reset/%s", framework.GenericNameRegex("name")), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixDatabase, + OperationVerb: "reset", + OperationSuffix: "connection", + }, + + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of this database connection", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathConnectionReset(), + }, + + HelpSynopsis: pathResetConnectionHelpSyn, + HelpDescription: pathResetConnectionHelpDesc, + } +} + +// pathConnectionReset resets a plugin by closing the existing instance and +// creating a new one. +func (b *databaseBackend) pathConnectionReset() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + if name == "" { + return logical.ErrorResponse(respErrEmptyName), nil + } + + // Close plugin and delete the entry in the connections cache. + if err := b.ClearConnection(name); err != nil { + return nil, err + } + + // Execute plugin again, we don't need the object so throw away. + if _, err := b.GetConnection(ctx, req.Storage, name); err != nil { + return nil, err + } + + return nil, nil + } +} + +// pathConfigurePluginConnection returns a configured framework.Path setup to +// operate on plugins. +func pathConfigurePluginConnection(b *databaseBackend) *framework.Path { + return &framework.Path{ + Pattern: fmt.Sprintf("config/%s", framework.GenericNameRegex("name")), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixDatabase, + }, + + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of this database connection", + }, + + "plugin_name": { + Type: framework.TypeString, + Description: `The name of a builtin or previously registered + plugin known to vault. This endpoint will create an instance of + that plugin type.`, + }, + + "plugin_version": { + Type: framework.TypeString, + Description: `The version of the plugin to use.`, + }, + + "verify_connection": { + Type: framework.TypeBool, + Default: true, + Description: `If true, the connection details are verified by + actually connecting to the database. Defaults to true.`, + }, + + "allowed_roles": { + Type: framework.TypeCommaStringSlice, + Description: `Comma separated string or array of the role names + allowed to get creds from this database connection. If empty no + roles are allowed. If "*" all roles are allowed.`, + }, + + "root_rotation_statements": { + Type: framework.TypeStringSlice, + Description: `Specifies the database statements to be executed + to rotate the root user's credentials. See the plugin's API + page for more information on support and formatting for this + parameter.`, + }, + "password_policy": { + Type: framework.TypeString, + Description: `Password policy to use when generating passwords.`, + }, + }, + + ExistenceCheck: b.connectionExistenceCheck(), + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.CreateOperation: &framework.PathOperation{ + Callback: b.connectionWriteHandler(), + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "connection", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.connectionWriteHandler(), + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "connection", + }, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.connectionReadHandler(), + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "read", + OperationSuffix: "connection-configuration", + }, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.connectionDeleteHandler(), + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "delete", + OperationSuffix: "connection-configuration", + }, + }, + }, + + HelpSynopsis: pathConfigConnectionHelpSyn, + HelpDescription: pathConfigConnectionHelpDesc, + } +} + +func (b *databaseBackend) connectionExistenceCheck() framework.ExistenceFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (bool, error) { + name := data.Get("name").(string) + if name == "" { + return false, errors.New(`missing "name" parameter`) + } + + entry, err := req.Storage.Get(ctx, fmt.Sprintf("config/%s", name)) + if err != nil { + return false, fmt.Errorf("failed to read connection configuration: %w", err) + } + + return entry != nil, nil + } +} + +func pathListPluginConnection(b *databaseBackend) *framework.Path { + return &framework.Path{ + Pattern: fmt.Sprintf("config/?$"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixDatabase, + OperationSuffix: "connections", + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: b.connectionListHandler(), + }, + + HelpSynopsis: pathConfigConnectionHelpSyn, + HelpDescription: pathConfigConnectionHelpDesc, + } +} + +func (b *databaseBackend) connectionListHandler() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + entries, err := req.Storage.List(ctx, "config/") + if err != nil { + return nil, err + } + + return logical.ListResponse(entries), nil + } +} + +// connectionReadHandler reads out the connection configuration +func (b *databaseBackend) connectionReadHandler() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + if name == "" { + return logical.ErrorResponse(respErrEmptyName), nil + } + + entry, err := req.Storage.Get(ctx, fmt.Sprintf("config/%s", name)) + if err != nil { + return nil, fmt.Errorf("failed to read connection configuration: %w", err) + } + if entry == nil { + return nil, nil + } + + var config DatabaseConfig + if err := entry.DecodeJSON(&config); err != nil { + return nil, err + } + + // Ensure that we only ever include a redacted valid URL in the response. + if connURLRaw, ok := config.ConnectionDetails["connection_url"]; ok { + if p, err := url.Parse(connURLRaw.(string)); err == nil { + config.ConnectionDetails["connection_url"] = p.Redacted() + } + } + + if versions.IsBuiltinVersion(config.PluginVersion) { + // This gets treated as though it's empty when mounting, and will get + // overwritten to be empty when the config is next written. See #18051. + config.PluginVersion = "" + } + + delete(config.ConnectionDetails, "password") + delete(config.ConnectionDetails, "private_key") + + return &logical.Response{ + Data: structs.New(config).Map(), + }, nil + } +} + +// connectionDeleteHandler deletes the connection configuration +func (b *databaseBackend) connectionDeleteHandler() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + if name == "" { + return logical.ErrorResponse(respErrEmptyName), nil + } + + err := req.Storage.Delete(ctx, fmt.Sprintf("config/%s", name)) + if err != nil { + return nil, fmt.Errorf("failed to delete connection configuration: %w", err) + } + + if err := b.ClearConnection(name); err != nil { + return nil, err + } + + return nil, nil + } +} + +// connectionWriteHandler returns a handler function for creating and updating +// both builtin and plugin database types. +func (b *databaseBackend) connectionWriteHandler() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + verifyConnection := data.Get("verify_connection").(bool) + + name := data.Get("name").(string) + if name == "" { + return logical.ErrorResponse(respErrEmptyName), nil + } + + // Baseline + config := &DatabaseConfig{} + + entry, err := req.Storage.Get(ctx, fmt.Sprintf("config/%s", name)) + if err != nil { + return nil, fmt.Errorf("failed to read connection configuration: %w", err) + } + if entry != nil { + if err := entry.DecodeJSON(config); err != nil { + return nil, err + } + } + + if pluginNameRaw, ok := data.GetOk("plugin_name"); ok { + config.PluginName = pluginNameRaw.(string) + } else if req.Operation == logical.CreateOperation { + config.PluginName = data.Get("plugin_name").(string) + } + if config.PluginName == "" { + return logical.ErrorResponse(respErrEmptyPluginName), nil + } + + if pluginVersionRaw, ok := data.GetOk("plugin_version"); ok { + config.PluginVersion = pluginVersionRaw.(string) + } + + var builtinShadowed bool + if unversionedPlugin, err := b.System().LookupPlugin(ctx, config.PluginName, consts.PluginTypeDatabase); err == nil && !unversionedPlugin.Builtin { + builtinShadowed = true + } + switch { + case config.PluginVersion != "": + semanticVersion, err := version.NewVersion(config.PluginVersion) + if err != nil { + return logical.ErrorResponse("version %q is not a valid semantic version: %s", config.PluginVersion, err), nil + } + + // Canonicalize the version. + config.PluginVersion = "v" + semanticVersion.String() + + if config.PluginVersion == versions.GetBuiltinVersion(consts.PluginTypeDatabase, config.PluginName) { + if builtinShadowed { + return logical.ErrorResponse("database plugin %q, version %s not found, as it is"+ + " overridden by an unversioned plugin of the same name. Omit `plugin_version` to use the unversioned plugin", config.PluginName, config.PluginVersion), nil + } + + config.PluginVersion = "" + } + case builtinShadowed: + // We'll select the unversioned plugin that's been registered. + case req.Operation == logical.CreateOperation: + // No version provided and no unversioned plugin of that name available. + // Pin to the current latest version if any versioned plugins are registered. + plugins, err := b.System().ListVersionedPlugins(ctx, consts.PluginTypeDatabase) + if err != nil { + return nil, err + } + + var versionedCandidates []pluginutil.VersionedPlugin + for _, plugin := range plugins { + if !plugin.Builtin && plugin.Name == config.PluginName && plugin.Version != "" { + versionedCandidates = append(versionedCandidates, plugin) + } + } + + if len(versionedCandidates) != 0 { + // Sort in reverse order. + sort.SliceStable(versionedCandidates, func(i, j int) bool { + return versionedCandidates[i].SemanticVersion.GreaterThan(versionedCandidates[j].SemanticVersion) + }) + + config.PluginVersion = "v" + versionedCandidates[0].SemanticVersion.String() + b.logger.Debug(fmt.Sprintf("pinning %q database plugin version %q from candidates %v", config.PluginName, config.PluginVersion, versionedCandidates)) + } + } + + if allowedRolesRaw, ok := data.GetOk("allowed_roles"); ok { + config.AllowedRoles = allowedRolesRaw.([]string) + } else if req.Operation == logical.CreateOperation { + config.AllowedRoles = data.Get("allowed_roles").([]string) + } + + if rootRotationStatementsRaw, ok := data.GetOk("root_rotation_statements"); ok { + config.RootCredentialsRotateStatements = rootRotationStatementsRaw.([]string) + } else if req.Operation == logical.CreateOperation { + config.RootCredentialsRotateStatements = data.Get("root_rotation_statements").([]string) + } + + if passwordPolicyRaw, ok := data.GetOk("password_policy"); ok { + config.PasswordPolicy = passwordPolicyRaw.(string) + } + + // Remove these entries from the data before we store it keyed under + // ConnectionDetails. + delete(data.Raw, "name") + delete(data.Raw, "plugin_name") + delete(data.Raw, "plugin_version") + delete(data.Raw, "allowed_roles") + delete(data.Raw, "verify_connection") + delete(data.Raw, "root_rotation_statements") + delete(data.Raw, "password_policy") + + id, err := uuid.GenerateUUID() + if err != nil { + return nil, err + } + + // If this is an update, take any new values, overwrite what was there + // before, and pass that in as the "new" set of values to the plugin, + // then save what results + if req.Operation == logical.CreateOperation { + config.ConnectionDetails = data.Raw + } else { + if config.ConnectionDetails == nil { + config.ConnectionDetails = make(map[string]interface{}) + } + for k, v := range data.Raw { + config.ConnectionDetails[k] = v + } + } + + // Create a database plugin and initialize it. + dbw, err := newDatabaseWrapper(ctx, config.PluginName, config.PluginVersion, b.System(), b.logger) + if err != nil { + return logical.ErrorResponse("error creating database object: %s", err), nil + } + + initReq := v5.InitializeRequest{ + Config: config.ConnectionDetails, + VerifyConnection: verifyConnection, + } + initResp, err := dbw.Initialize(ctx, initReq) + if err != nil { + dbw.Close() + return logical.ErrorResponse("error creating database object: %s", err), nil + } + config.ConnectionDetails = initResp.Config + + b.Logger().Debug("created database object", "name", name, "plugin_name", config.PluginName) + + // Close and remove the old connection + oldConn := b.connPut(name, &dbPluginInstance{ + database: dbw, + name: name, + id: id, + }) + if oldConn != nil { + oldConn.Close() + } + + // 1.12.0 and 1.12.1 stored builtin plugins in storage, but 1.12.2 reverted + // that, so clean up any pre-existing stored builtin versions on write. + if versions.IsBuiltinVersion(config.PluginVersion) { + config.PluginVersion = "" + } + err = storeConfig(ctx, req.Storage, name, config) + if err != nil { + return nil, err + } + + resp := &logical.Response{} + + // This is a simple test to check for passwords in the connection_url parameter. If one exists, + // warn the user to use templated url string + if connURLRaw, ok := config.ConnectionDetails["connection_url"]; ok { + if connURL, err := url.Parse(connURLRaw.(string)); err == nil { + if _, ok := connURL.User.Password(); ok { + resp.AddWarning("Password found in connection_url, use a templated url to enable root rotation and prevent read access to password information.") + } + } + } + + // If using a legacy DB plugin and set the `password_policy` field, send a warning to the user indicating + // the `password_policy` will not be used + if dbw.isV4() && config.PasswordPolicy != "" { + resp.AddWarning(fmt.Sprintf("%s does not support password policies - upgrade to the latest version of "+ + "Vault (or the sdk if using a custom plugin) to gain password policy support", config.PluginName)) + } + + if len(resp.Warnings) == 0 { + return nil, nil + } + return resp, nil + } +} + +func storeConfig(ctx context.Context, storage logical.Storage, name string, config *DatabaseConfig) error { + entry, err := logical.StorageEntryJSON(fmt.Sprintf("config/%s", name), config) + if err != nil { + return fmt.Errorf("unable to marshal object to JSON: %w", err) + } + + err = storage.Put(ctx, entry) + if err != nil { + return fmt.Errorf("failed to save object: %w", err) + } + return nil +} + +const pathConfigConnectionHelpSyn = ` +Configure connection details to a database plugin. +` + +const pathConfigConnectionHelpDesc = ` +This path configures the connection details used to connect to a particular +database. This path runs the provided plugin name and passes the configured +connection details to the plugin. See the documentation for the plugin specified +for a full list of accepted connection details. + +In addition to the database specific connection details, this endpoint also +accepts: + + * "plugin_name" (required) - The name of a builtin or previously registered + plugin known to vault. This endpoint will create an instance of that + plugin type. + + * "verify_connection" (default: true) - A boolean value denoting if the plugin should verify + it is able to connect to the database using the provided connection + details. +` + +const pathResetConnectionHelpSyn = ` +Resets a database plugin. +` + +const pathResetConnectionHelpDesc = ` +This path resets the database connection by closing the existing database plugin +instance and running a new one. +` diff --git a/builtin/logical/database/path_config_connection_test.go b/builtin/logical/database/path_config_connection_test.go new file mode 100644 index 0000000..8cf0606 --- /dev/null +++ b/builtin/logical/database/path_config_connection_test.go @@ -0,0 +1,169 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package database + +import ( + "context" + "strings" + "testing" + + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/helper/versions" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" +) + +func TestWriteConfig_PluginVersionInStorage(t *testing.T) { + cluster, sys := getCluster(t) + t.Cleanup(cluster.Cleanup) + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + defer b.Cleanup(context.Background()) + + const hdb = "hana-database-plugin" + hdbBuiltin := versions.GetBuiltinVersion(consts.PluginTypeDatabase, hdb) + + // Configure a connection + writePluginVersion := func() { + t.Helper() + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: map[string]interface{}{ + "connection_url": "test", + "plugin_name": hdb, + "plugin_version": hdbBuiltin, + "verify_connection": false, + }, + } + resp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + } + writePluginVersion() + + getPluginVersionFromAPI := func() string { + t.Helper() + req := &logical.Request{ + Operation: logical.ReadOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + } + + resp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + return resp.Data["plugin_version"].(string) + } + pluginVersion := getPluginVersionFromAPI() + if pluginVersion != "" { + t.Fatalf("expected plugin_version empty but got %s", pluginVersion) + } + + // Directly store config to get the builtin plugin version into storage, + // simulating a write that happened before upgrading to 1.12.2+ + err = storeConfig(context.Background(), config.StorageView, "plugin-test", &DatabaseConfig{ + PluginName: hdb, + PluginVersion: hdbBuiltin, + }) + if err != nil { + t.Fatal(err) + } + + // Now replay the read request, and we still shouldn't get the builtin version back. + pluginVersion = getPluginVersionFromAPI() + if pluginVersion != "" { + t.Fatalf("expected plugin_version empty but got %s", pluginVersion) + } + + // Check the underlying data, which should still have the version in storage. + getPluginVersionFromStorage := func() string { + t.Helper() + entry, err := config.StorageView.Get(context.Background(), "config/plugin-test") + if err != nil { + t.Fatal(err) + } + if entry == nil { + t.Fatal() + } + + var config DatabaseConfig + if err := entry.DecodeJSON(&config); err != nil { + t.Fatal(err) + } + return config.PluginVersion + } + + storagePluginVersion := getPluginVersionFromStorage() + if storagePluginVersion != hdbBuiltin { + t.Fatalf("Expected %s, got: %s", hdbBuiltin, storagePluginVersion) + } + + // Trigger a write to storage, which should clean up plugin version in the storage entry. + writePluginVersion() + + storagePluginVersion = getPluginVersionFromStorage() + if storagePluginVersion != "" { + t.Fatalf("Expected empty, got: %s", storagePluginVersion) + } + + // Finally, confirm API requests still return empty plugin version too + pluginVersion = getPluginVersionFromAPI() + if pluginVersion != "" { + t.Fatalf("expected plugin_version empty but got %s", pluginVersion) + } +} + +func TestWriteConfig_HelpfulErrorMessageWhenBuiltinOverridden(t *testing.T) { + cluster, sys := getCluster(t) + t.Cleanup(cluster.Cleanup) + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + defer b.Cleanup(context.Background()) + + const pg = "postgresql-database-plugin" + pgBuiltin := versions.GetBuiltinVersion(consts.PluginTypeDatabase, pg) + + // Configure a connection + data := map[string]interface{}{ + "connection_url": "test", + "plugin_name": pg, + "plugin_version": pgBuiltin, + "verify_connection": false, + } + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err != nil { + t.Fatal(err) + } + if resp == nil || !resp.IsError() { + t.Fatalf("resp:%#v", resp) + } + if !strings.Contains(resp.Error().Error(), "overridden by an unversioned plugin") { + t.Fatalf("expected overridden error but got: %s", resp.Error()) + } +} diff --git a/builtin/logical/database/path_creds_create.go b/builtin/logical/database/path_creds_create.go new file mode 100644 index 0000000..a0fe6a3 --- /dev/null +++ b/builtin/logical/database/path_creds_create.go @@ -0,0 +1,288 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package database + +import ( + "context" + "fmt" + "time" + + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + v5 "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathCredsCreate(b *databaseBackend) []*framework.Path { + return []*framework.Path{ + { + Pattern: "creds/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixDatabase, + OperationVerb: "generate", + OperationSuffix: "credentials", + }, + + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the role.", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathCredsCreateRead(), + }, + + HelpSynopsis: pathCredsCreateReadHelpSyn, + HelpDescription: pathCredsCreateReadHelpDesc, + }, + { + Pattern: "static-creds/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixDatabase, + OperationVerb: "read", + OperationSuffix: "static-role-credentials", + }, + + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the static role.", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathStaticCredsRead(), + }, + + HelpSynopsis: pathStaticCredsReadHelpSyn, + HelpDescription: pathStaticCredsReadHelpDesc, + }, + } +} + +func (b *databaseBackend) pathCredsCreateRead() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + + // Get the role + role, err := b.Role(ctx, req.Storage, name) + if err != nil { + return nil, err + } + if role == nil { + return logical.ErrorResponse(fmt.Sprintf("unknown role: %s", name)), nil + } + + dbConfig, err := b.DatabaseConfig(ctx, req.Storage, role.DBName) + if err != nil { + return nil, err + } + + // If role name isn't in the database's allowed roles, send back a + // permission denied. + if !strutil.StrListContains(dbConfig.AllowedRoles, "*") && !strutil.StrListContainsGlob(dbConfig.AllowedRoles, name) { + return nil, fmt.Errorf("%q is not an allowed role", name) + } + + // If the plugin doesn't support the credential type, return an error + if !dbConfig.SupportsCredentialType(role.CredentialType) { + return logical.ErrorResponse("unsupported credential_type: %q", + role.CredentialType.String()), nil + } + + // Get the Database object + dbi, err := b.GetConnection(ctx, req.Storage, role.DBName) + if err != nil { + return nil, err + } + + dbi.RLock() + defer dbi.RUnlock() + + ttl, _, err := framework.CalculateTTL(b.System(), 0, role.DefaultTTL, 0, role.MaxTTL, 0, time.Time{}) + if err != nil { + return nil, err + } + expiration := time.Now().Add(ttl) + // Adding a small buffer since the TTL will be calculated again after this call + // to ensure the database credential does not expire before the lease + expiration = expiration.Add(5 * time.Second) + + newUserReq := v5.NewUserRequest{ + UsernameConfig: v5.UsernameMetadata{ + DisplayName: req.DisplayName, + RoleName: name, + }, + Statements: v5.Statements{ + Commands: role.Statements.Creation, + }, + RollbackStatements: v5.Statements{ + Commands: role.Statements.Rollback, + }, + Expiration: expiration, + } + + respData := make(map[string]interface{}) + + // Generate the credential based on the role's credential type + switch role.CredentialType { + case v5.CredentialTypePassword: + generator, err := newPasswordGenerator(role.CredentialConfig) + if err != nil { + return nil, fmt.Errorf("failed to construct credential generator: %s", err) + } + + // Fall back to database config-level password policy if not set on role + if generator.PasswordPolicy == "" { + generator.PasswordPolicy = dbConfig.PasswordPolicy + } + + // Generate the password + password, err := generator.generate(ctx, b, dbi.database) + if err != nil { + b.CloseIfShutdown(dbi, err) + return nil, fmt.Errorf("failed to generate password: %s", err) + } + + // Set input credential + newUserReq.CredentialType = v5.CredentialTypePassword + newUserReq.Password = password + + case v5.CredentialTypeRSAPrivateKey: + generator, err := newRSAKeyGenerator(role.CredentialConfig) + if err != nil { + return nil, fmt.Errorf("failed to construct credential generator: %s", err) + } + + // Generate the RSA key pair + public, private, err := generator.generate(b.GetRandomReader()) + if err != nil { + return nil, fmt.Errorf("failed to generate RSA key pair: %s", err) + } + + // Set input credential + newUserReq.CredentialType = v5.CredentialTypeRSAPrivateKey + newUserReq.PublicKey = public + + // Set output credential + respData["rsa_private_key"] = string(private) + case v5.CredentialTypeClientCertificate: + generator, err := newClientCertificateGenerator(role.CredentialConfig) + if err != nil { + return nil, fmt.Errorf("failed to construct credential generator: %s", err) + } + + // Generate the client certificate + cb, subject, err := generator.generate(b.GetRandomReader(), expiration, + newUserReq.UsernameConfig) + if err != nil { + return nil, fmt.Errorf("failed to generate client certificate: %w", err) + } + + // Set input credential + newUserReq.CredentialType = dbplugin.CredentialTypeClientCertificate + newUserReq.Subject = subject + + // Set output credential + respData["client_certificate"] = cb.Certificate + respData["private_key"] = cb.PrivateKey + respData["private_key_type"] = cb.PrivateKeyType + } + + // Overwriting the password in the event this is a legacy database + // plugin and the provided password is ignored + newUserResp, password, err := dbi.database.NewUser(ctx, newUserReq) + if err != nil { + b.CloseIfShutdown(dbi, err) + return nil, err + } + respData["username"] = newUserResp.Username + + // Database plugins using the v4 interface generate and return the password. + // Set the password response to what is returned by the NewUser request. + if role.CredentialType == v5.CredentialTypePassword { + respData["password"] = password + } + + internal := map[string]interface{}{ + "username": newUserResp.Username, + "role": name, + "db_name": role.DBName, + "revocation_statements": role.Statements.Revocation, + } + resp := b.Secret(SecretCredsType).Response(respData, internal) + resp.Secret.TTL = role.DefaultTTL + resp.Secret.MaxTTL = role.MaxTTL + return resp, nil + } +} + +func (b *databaseBackend) pathStaticCredsRead() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + + role, err := b.StaticRole(ctx, req.Storage, name) + if err != nil { + return nil, err + } + if role == nil { + return logical.ErrorResponse("unknown role: %s", name), nil + } + + dbConfig, err := b.DatabaseConfig(ctx, req.Storage, role.DBName) + if err != nil { + return nil, err + } + + // If role name isn't in the database's allowed roles, send back a + // permission denied. + if !strutil.StrListContains(dbConfig.AllowedRoles, "*") && !strutil.StrListContainsGlob(dbConfig.AllowedRoles, name) { + return nil, fmt.Errorf("%q is not an allowed role", name) + } + + respData := map[string]interface{}{ + "username": role.StaticAccount.Username, + "ttl": role.StaticAccount.CredentialTTL().Seconds(), + "rotation_period": role.StaticAccount.RotationPeriod.Seconds(), + "last_vault_rotation": role.StaticAccount.LastVaultRotation, + } + + switch role.CredentialType { + case v5.CredentialTypePassword: + respData["password"] = role.StaticAccount.Password + case v5.CredentialTypeRSAPrivateKey: + respData["rsa_private_key"] = string(role.StaticAccount.PrivateKey) + } + + return &logical.Response{ + Data: respData, + }, nil + } +} + +const pathCredsCreateReadHelpSyn = ` +Request database credentials for a certain role. +` + +const pathCredsCreateReadHelpDesc = ` +This path reads database credentials for a certain role. The +database credentials will be generated on demand and will be automatically +revoked when the lease is up. +` + +const pathStaticCredsReadHelpSyn = ` +Request database credentials for a certain static role. These credentials are +rotated periodically. +` + +const pathStaticCredsReadHelpDesc = ` +This path reads database credentials for a certain static role. The database +credentials are rotated periodically according to their configuration, and will +return the same password until they are rotated. +` diff --git a/builtin/logical/database/path_roles.go b/builtin/logical/database/path_roles.go new file mode 100644 index 0000000..ba4aa7f --- /dev/null +++ b/builtin/logical/database/path_roles.go @@ -0,0 +1,857 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package database + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-secure-stdlib/strutil" + v4 "github.com/hashicorp/vault/sdk/database/dbplugin" + v5 "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/locksutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/queue" +) + +func pathListRoles(b *databaseBackend) []*framework.Path { + return []*framework.Path{ + { + Pattern: "roles/?$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixDatabase, + OperationVerb: "list", + OperationSuffix: "roles", + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: b.pathRoleList, + }, + + HelpSynopsis: pathRoleHelpSyn, + HelpDescription: pathRoleHelpDesc, + }, + { + Pattern: "static-roles/?$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixDatabase, + OperationVerb: "list", + OperationSuffix: "static-roles", + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: b.pathRoleList, + }, + + HelpSynopsis: pathStaticRoleHelpSyn, + HelpDescription: pathStaticRoleHelpDesc, + }, + } +} + +func pathRoles(b *databaseBackend) []*framework.Path { + return []*framework.Path{ + { + Pattern: "roles/" + framework.GenericNameRegex("name"), + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixDatabase, + OperationSuffix: "role", + }, + Fields: fieldsForType(databaseRolePath), + ExistenceCheck: b.pathRoleExistenceCheck, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathRoleRead, + logical.CreateOperation: b.pathRoleCreateUpdate, + logical.UpdateOperation: b.pathRoleCreateUpdate, + logical.DeleteOperation: b.pathRoleDelete, + }, + + HelpSynopsis: pathRoleHelpSyn, + HelpDescription: pathRoleHelpDesc, + }, + + { + Pattern: "static-roles/" + framework.GenericNameRegex("name"), + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixDatabase, + OperationSuffix: "static-role", + }, + Fields: fieldsForType(databaseStaticRolePath), + ExistenceCheck: b.pathStaticRoleExistenceCheck, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathStaticRoleRead, + logical.CreateOperation: b.pathStaticRoleCreateUpdate, + logical.UpdateOperation: b.pathStaticRoleCreateUpdate, + logical.DeleteOperation: b.pathStaticRoleDelete, + }, + + HelpSynopsis: pathStaticRoleHelpSyn, + HelpDescription: pathStaticRoleHelpDesc, + }, + } +} + +// fieldsForType returns a map of string/FieldSchema items for the given role +// type. The purpose is to keep the shared fields between dynamic and static +// roles consistent, and allow for each type to override or provide their own +// specific fields +func fieldsForType(roleType string) map[string]*framework.FieldSchema { + fields := map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the role.", + }, + "db_name": { + Type: framework.TypeString, + Description: "Name of the database this role acts on.", + }, + "credential_type": { + Type: framework.TypeString, + Description: "The type of credential to manage. Options include: " + + "'password', 'rsa_private_key'. Defaults to 'password'.", + Default: "password", + }, + "credential_config": { + Type: framework.TypeKVPairs, + Description: "The configuration for the given credential_type.", + }, + } + + // Get the fields that are specific to the type of role, and add them to the + // common fields + var typeFields map[string]*framework.FieldSchema + switch roleType { + case databaseStaticRolePath: + typeFields = staticFields() + default: + typeFields = dynamicFields() + } + + for k, v := range typeFields { + fields[k] = v + } + + return fields +} + +// dynamicFields returns a map of key and field schema items that are specific +// only to dynamic roles +func dynamicFields() map[string]*framework.FieldSchema { + fields := map[string]*framework.FieldSchema{ + "default_ttl": { + Type: framework.TypeDurationSecond, + Description: "Default ttl for role.", + }, + "max_ttl": { + Type: framework.TypeDurationSecond, + Description: "Maximum time a credential is valid for", + }, + "creation_statements": { + Type: framework.TypeStringSlice, + Description: `Specifies the database statements executed to + create and configure a user. See the plugin's API page for more + information on support and formatting for this parameter.`, + }, + "revocation_statements": { + Type: framework.TypeStringSlice, + Description: `Specifies the database statements to be executed + to revoke a user. See the plugin's API page for more information + on support and formatting for this parameter.`, + }, + "renew_statements": { + Type: framework.TypeStringSlice, + Description: `Specifies the database statements to be executed + to renew a user. Not every plugin type will support this + functionality. See the plugin's API page for more information on + support and formatting for this parameter. `, + }, + "rollback_statements": { + Type: framework.TypeStringSlice, + Description: `Specifies the database statements to be executed + rollback a create operation in the event of an error. Not every plugin + type will support this functionality. See the plugin's API page for + more information on support and formatting for this parameter.`, + }, + } + return fields +} + +// staticFields returns a map of key and field schema items that are specific +// only to static roles +func staticFields() map[string]*framework.FieldSchema { + fields := map[string]*framework.FieldSchema{ + "username": { + Type: framework.TypeString, + Description: `Name of the static user account for Vault to manage. + Requires "rotation_period" to be specified`, + }, + "rotation_period": { + Type: framework.TypeDurationSecond, + Description: `Period for automatic + credential rotation of the given username. Not valid unless used with + "username".`, + }, + "rotation_statements": { + Type: framework.TypeStringSlice, + Description: `Specifies the database statements to be executed to + rotate the accounts credentials. Not every plugin type will support + this functionality. See the plugin's API page for more information on + support and formatting for this parameter.`, + }, + } + return fields +} + +func (b *databaseBackend) pathRoleExistenceCheck(ctx context.Context, req *logical.Request, data *framework.FieldData) (bool, error) { + role, err := b.Role(ctx, req.Storage, data.Get("name").(string)) + if err != nil { + return false, err + } + return role != nil, nil +} + +func (b *databaseBackend) pathStaticRoleExistenceCheck(ctx context.Context, req *logical.Request, data *framework.FieldData) (bool, error) { + role, err := b.StaticRole(ctx, req.Storage, data.Get("name").(string)) + if err != nil { + return false, err + } + return role != nil, nil +} + +func (b *databaseBackend) pathRoleDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + err := req.Storage.Delete(ctx, databaseRolePath+data.Get("name").(string)) + if err != nil { + return nil, err + } + + return nil, nil +} + +func (b *databaseBackend) pathStaticRoleDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + + // Grab the exclusive lock + lock := locksutil.LockForKey(b.roleLocks, name) + lock.Lock() + defer lock.Unlock() + + // Remove the item from the queue + _, _ = b.popFromRotationQueueByKey(name) + + err := req.Storage.Delete(ctx, databaseStaticRolePath+name) + if err != nil { + return nil, err + } + + walIDs, err := framework.ListWAL(ctx, req.Storage) + if err != nil { + return nil, err + } + var merr *multierror.Error + for _, walID := range walIDs { + wal, err := b.findStaticWAL(ctx, req.Storage, walID) + if err != nil { + merr = multierror.Append(merr, err) + continue + } + if wal != nil && name == wal.RoleName { + b.Logger().Debug("deleting WAL for deleted role", "WAL ID", walID, "role", name) + err = framework.DeleteWAL(ctx, req.Storage, walID) + if err != nil { + b.Logger().Debug("failed to delete WAL for deleted role", "WAL ID", walID, "error", err) + merr = multierror.Append(merr, err) + } + } + } + + return nil, merr.ErrorOrNil() +} + +func (b *databaseBackend) pathStaticRoleRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + role, err := b.StaticRole(ctx, req.Storage, d.Get("name").(string)) + if err != nil { + return nil, err + } + if role == nil { + return nil, nil + } + + data := map[string]interface{}{ + "db_name": role.DBName, + "rotation_statements": role.Statements.Rotation, + "credential_type": role.CredentialType.String(), + } + + // guard against nil StaticAccount; shouldn't happen but we'll be safe + if role.StaticAccount != nil { + data["username"] = role.StaticAccount.Username + data["rotation_statements"] = role.Statements.Rotation + data["rotation_period"] = role.StaticAccount.RotationPeriod.Seconds() + if !role.StaticAccount.LastVaultRotation.IsZero() { + data["last_vault_rotation"] = role.StaticAccount.LastVaultRotation + } + } + + if len(role.CredentialConfig) > 0 { + data["credential_config"] = role.CredentialConfig + } + if len(role.Statements.Rotation) == 0 { + data["rotation_statements"] = []string{} + } + + return &logical.Response{ + Data: data, + }, nil +} + +func (b *databaseBackend) pathRoleRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + role, err := b.Role(ctx, req.Storage, d.Get("name").(string)) + if err != nil { + return nil, err + } + if role == nil { + return nil, nil + } + + data := map[string]interface{}{ + "db_name": role.DBName, + "creation_statements": role.Statements.Creation, + "revocation_statements": role.Statements.Revocation, + "rollback_statements": role.Statements.Rollback, + "renew_statements": role.Statements.Renewal, + "default_ttl": role.DefaultTTL.Seconds(), + "max_ttl": role.MaxTTL.Seconds(), + "credential_type": role.CredentialType.String(), + } + if len(role.CredentialConfig) > 0 { + data["credential_config"] = role.CredentialConfig + } + if len(role.Statements.Creation) == 0 { + data["creation_statements"] = []string{} + } + if len(role.Statements.Revocation) == 0 { + data["revocation_statements"] = []string{} + } + if len(role.Statements.Rollback) == 0 { + data["rollback_statements"] = []string{} + } + if len(role.Statements.Renewal) == 0 { + data["renew_statements"] = []string{} + } + + return &logical.Response{ + Data: data, + }, nil +} + +func (b *databaseBackend) pathRoleList(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + path := databaseRolePath + if strings.HasPrefix(req.Path, "static-roles") { + path = databaseStaticRolePath + } + entries, err := req.Storage.List(ctx, path) + if err != nil { + return nil, err + } + + return logical.ListResponse(entries), nil +} + +func (b *databaseBackend) pathRoleCreateUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + if name == "" { + return logical.ErrorResponse("empty role name attribute given"), nil + } + + exists, err := b.pathStaticRoleExistenceCheck(ctx, req, data) + if err != nil { + return nil, err + } + if exists { + return logical.ErrorResponse("Role and Static Role names must be unique"), nil + } + + role, err := b.Role(ctx, req.Storage, name) + if err != nil { + return nil, err + } + if role == nil { + role = &roleEntry{} + } + + createOperation := (req.Operation == logical.CreateOperation) + + // DB Attributes + { + if dbNameRaw, ok := data.GetOk("db_name"); ok { + role.DBName = dbNameRaw.(string) + } else if createOperation { + role.DBName = data.Get("db_name").(string) + } + if role.DBName == "" { + return logical.ErrorResponse("database name is required"), nil + } + + if credentialTypeRaw, ok := data.GetOk("credential_type"); ok { + credentialType := credentialTypeRaw.(string) + if err := role.setCredentialType(credentialType); err != nil { + return logical.ErrorResponse(err.Error()), nil + } + } + + var credentialConfig map[string]string + if raw, ok := data.GetOk("credential_config"); ok { + credentialConfig = raw.(map[string]string) + } else if req.Operation == logical.CreateOperation { + credentialConfig = data.Get("credential_config").(map[string]string) + } + if err := role.setCredentialConfig(credentialConfig); err != nil { + return logical.ErrorResponse("credential_config validation failed: %s", err), nil + } + } + + // Statements + { + if creationStmtsRaw, ok := data.GetOk("creation_statements"); ok { + role.Statements.Creation = creationStmtsRaw.([]string) + } else if createOperation { + role.Statements.Creation = data.Get("creation_statements").([]string) + } + + if revocationStmtsRaw, ok := data.GetOk("revocation_statements"); ok { + role.Statements.Revocation = revocationStmtsRaw.([]string) + } else if createOperation { + role.Statements.Revocation = data.Get("revocation_statements").([]string) + } + + if rollbackStmtsRaw, ok := data.GetOk("rollback_statements"); ok { + role.Statements.Rollback = rollbackStmtsRaw.([]string) + } else if createOperation { + role.Statements.Rollback = data.Get("rollback_statements").([]string) + } + + if renewStmtsRaw, ok := data.GetOk("renew_statements"); ok { + role.Statements.Renewal = renewStmtsRaw.([]string) + } else if createOperation { + role.Statements.Renewal = data.Get("renew_statements").([]string) + } + + // Do not persist deprecated statements that are populated on role read + role.Statements.CreationStatements = "" + role.Statements.RevocationStatements = "" + role.Statements.RenewStatements = "" + role.Statements.RollbackStatements = "" + } + + role.Statements.Revocation = strutil.RemoveEmpty(role.Statements.Revocation) + + // TTLs + { + if defaultTTLRaw, ok := data.GetOk("default_ttl"); ok { + role.DefaultTTL = time.Duration(defaultTTLRaw.(int)) * time.Second + } else if createOperation { + role.DefaultTTL = time.Duration(data.Get("default_ttl").(int)) * time.Second + } + if maxTTLRaw, ok := data.GetOk("max_ttl"); ok { + role.MaxTTL = time.Duration(maxTTLRaw.(int)) * time.Second + } else if createOperation { + role.MaxTTL = time.Duration(data.Get("max_ttl").(int)) * time.Second + } + } + + // Store it + entry, err := logical.StorageEntryJSON(databaseRolePath+name, role) + if err != nil { + return nil, err + } + + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + + return nil, nil +} + +func (b *databaseBackend) pathStaticRoleCreateUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + if name == "" { + return logical.ErrorResponse("empty role name attribute given"), nil + } + + // Grab the exclusive lock as well potentially pop and re-push the queue item + // for this role + lock := locksutil.LockForKey(b.roleLocks, name) + lock.Lock() + defer lock.Unlock() + + exists, err := b.pathRoleExistenceCheck(ctx, req, data) + if err != nil { + return nil, err + } + if exists { + return logical.ErrorResponse("Role and Static Role names must be unique"), nil + } + + role, err := b.StaticRole(ctx, req.Storage, data.Get("name").(string)) + if err != nil { + return nil, err + } + + // createRole is a boolean to indicate if this is a new role creation. This is + // can be used later by database plugins that distinguish between creating and + // updating roles, and may use separate statements depending on the context. + createRole := (req.Operation == logical.CreateOperation) + if role == nil { + role = &roleEntry{ + StaticAccount: &staticAccount{}, + } + createRole = true + } + + // DB Attributes + if dbNameRaw, ok := data.GetOk("db_name"); ok { + role.DBName = dbNameRaw.(string) + } else if createRole { + role.DBName = data.Get("db_name").(string) + } + + if role.DBName == "" { + return logical.ErrorResponse("database name is a required field"), nil + } + + username := data.Get("username").(string) + if username == "" && createRole { + return logical.ErrorResponse("username is a required field to create a static account"), nil + } + + if role.StaticAccount.Username != "" && role.StaticAccount.Username != username { + return logical.ErrorResponse("cannot update static account username"), nil + } + role.StaticAccount.Username = username + + // If it's a Create operation, both username and rotation_period must be included + rotationPeriodSecondsRaw, ok := data.GetOk("rotation_period") + if !ok && createRole { + return logical.ErrorResponse("rotation_period is required to create static accounts"), nil + } + if ok { + rotationPeriodSeconds := rotationPeriodSecondsRaw.(int) + if rotationPeriodSeconds < defaultQueueTickSeconds { + // If rotation frequency is specified, and this is an update, the value + // must be at least that of the queue tick interval (5 seconds at + // time of writing), otherwise we wont be able to rotate in time + return logical.ErrorResponse(fmt.Sprintf("rotation_period must be %d seconds or more", defaultQueueTickSeconds)), nil + } + role.StaticAccount.RotationPeriod = time.Duration(rotationPeriodSeconds) * time.Second + } + + if rotationStmtsRaw, ok := data.GetOk("rotation_statements"); ok { + role.Statements.Rotation = rotationStmtsRaw.([]string) + } else if req.Operation == logical.CreateOperation { + role.Statements.Rotation = data.Get("rotation_statements").([]string) + } + + if credentialTypeRaw, ok := data.GetOk("credential_type"); ok { + credentialType := credentialTypeRaw.(string) + if err := role.setCredentialType(credentialType); err != nil { + return logical.ErrorResponse(err.Error()), nil + } + } + + var credentialConfig map[string]string + if raw, ok := data.GetOk("credential_config"); ok { + credentialConfig = raw.(map[string]string) + } else if req.Operation == logical.CreateOperation { + credentialConfig = data.Get("credential_config").(map[string]string) + } + if err := role.setCredentialConfig(credentialConfig); err != nil { + return logical.ErrorResponse("credential_config validation failed: %s", err), nil + } + + // lvr represents the roles' LastVaultRotation + lvr := role.StaticAccount.LastVaultRotation + + // Only call setStaticAccount if we're creating the role for the + // first time + var item *queue.Item + switch req.Operation { + case logical.CreateOperation: + // setStaticAccount calls Storage.Put and saves the role to storage + resp, err := b.setStaticAccount(ctx, req.Storage, &setStaticAccountInput{ + RoleName: name, + Role: role, + }) + if err != nil { + if resp != nil && resp.WALID != "" { + b.Logger().Debug("deleting WAL for failed role creation", "WAL ID", resp.WALID, "role", name) + walDeleteErr := framework.DeleteWAL(ctx, req.Storage, resp.WALID) + if walDeleteErr != nil { + b.Logger().Debug("failed to delete WAL for failed role creation", "WAL ID", resp.WALID, "error", walDeleteErr) + var merr *multierror.Error + merr = multierror.Append(merr, err) + merr = multierror.Append(merr, fmt.Errorf("failed to clean up WAL from failed role creation: %w", walDeleteErr)) + err = merr.ErrorOrNil() + } + } + + return nil, err + } + // guard against RotationTime not being set or zero-value + lvr = resp.RotationTime + item = &queue.Item{ + Key: name, + } + case logical.UpdateOperation: + // store updated Role + entry, err := logical.StorageEntryJSON(databaseStaticRolePath+name, role) + if err != nil { + return nil, err + } + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + item, err = b.popFromRotationQueueByKey(name) + if err != nil { + return nil, err + } + } + + item.Priority = lvr.Add(role.StaticAccount.RotationPeriod).Unix() + + // Add their rotation to the queue + if err := b.pushItem(item); err != nil { + return nil, err + } + + return nil, nil +} + +type roleEntry struct { + DBName string `json:"db_name"` + Statements v4.Statements `json:"statements"` + DefaultTTL time.Duration `json:"default_ttl"` + MaxTTL time.Duration `json:"max_ttl"` + CredentialType v5.CredentialType `json:"credential_type"` + CredentialConfig map[string]interface{} `json:"credential_config"` + StaticAccount *staticAccount `json:"static_account" mapstructure:"static_account"` +} + +// setCredentialType sets the credential type for the role given its string form. +// Returns an error if the given credential type string is unknown. +func (r *roleEntry) setCredentialType(credentialType string) error { + switch credentialType { + case v5.CredentialTypePassword.String(): + r.CredentialType = v5.CredentialTypePassword + case v5.CredentialTypeRSAPrivateKey.String(): + r.CredentialType = v5.CredentialTypeRSAPrivateKey + case v5.CredentialTypeClientCertificate.String(): + r.CredentialType = v5.CredentialTypeClientCertificate + default: + return fmt.Errorf("invalid credential_type %q", credentialType) + } + + return nil +} + +// setCredentialConfig validates and sets the credential configuration +// for the role using the role's credential type. It will also populate +// all default values. Returns an error if the configuration is invalid. +func (r *roleEntry) setCredentialConfig(config map[string]string) error { + c := make(map[string]interface{}) + for k, v := range config { + c[k] = v + } + + switch r.CredentialType { + case v5.CredentialTypePassword: + generator, err := newPasswordGenerator(c) + if err != nil { + return err + } + cm, err := generator.configMap() + if err != nil { + return err + } + if len(cm) > 0 { + r.CredentialConfig = cm + } + case v5.CredentialTypeRSAPrivateKey: + generator, err := newRSAKeyGenerator(c) + if err != nil { + return err + } + cm, err := generator.configMap() + if err != nil { + return err + } + if len(cm) > 0 { + r.CredentialConfig = cm + } + case v5.CredentialTypeClientCertificate: + generator, err := newClientCertificateGenerator(c) + if err != nil { + return err + } + cm, err := generator.configMap() + if err != nil { + return err + } + if len(cm) > 0 { + r.CredentialConfig = cm + } + } + + return nil +} + +type staticAccount struct { + // Username to create or assume management for static accounts + Username string `json:"username"` + + // Password is the current password credential for static accounts. As an input, + // this is used/required when trying to assume management of an existing static + // account. Returned on credential request if the role's credential type is + // CredentialTypePassword. + Password string `json:"password"` + + // PrivateKey is the current private key credential for static accounts. As an input, + // this is used/required when trying to assume management of an existing static + // account. Returned on credential request if the role's credential type is + // CredentialTypeRSAPrivateKey. + PrivateKey []byte `json:"private_key"` + + // LastVaultRotation represents the last time Vault rotated the password + LastVaultRotation time.Time `json:"last_vault_rotation"` + + // RotationPeriod is number in seconds between each rotation, effectively a + // "time to live". This value is compared to the LastVaultRotation to + // determine if a password needs to be rotated + RotationPeriod time.Duration `json:"rotation_period"` + + // RevokeUser is a boolean flag to indicate if Vault should revoke the + // database user when the role is deleted + RevokeUserOnDelete bool `json:"revoke_user_on_delete"` +} + +// NextRotationTime calculates the next rotation by adding the Rotation Period +// to the last known vault rotation +func (s *staticAccount) NextRotationTime() time.Time { + return s.LastVaultRotation.Add(s.RotationPeriod) +} + +// CredentialTTL calculates the approximate time remaining until the credential is +// no longer valid. This is approximate because the periodic rotation is only +// checked approximately every 5 seconds, and each rotation can take a small +// amount of time to process. This can result in a negative TTL time while the +// rotation function processes the Static Role and performs the rotation. If the +// TTL is negative, zero is returned. Users should not trust passwords with a +// Zero TTL, as they are likely in the process of being rotated and will quickly +// be invalidated. +func (s *staticAccount) CredentialTTL() time.Duration { + next := s.NextRotationTime() + ttl := next.Sub(time.Now()).Round(time.Second) + if ttl < 0 { + ttl = time.Duration(0) + } + return ttl +} + +const pathRoleHelpSyn = ` +Manage the roles that can be created with this backend. +` + +const pathStaticRoleHelpSyn = ` +Manage the static roles that can be created with this backend. +` + +const pathRoleHelpDesc = ` +This path lets you manage the roles that can be created with this backend. + +The "db_name" parameter is required and configures the name of the database +connection to use. + +The "creation_statements" parameter customizes the string used to create the +credentials. This can be a sequence of SQL queries, or other statement formats +for a particular database type. Some substitution will be done to the statement +strings for certain keys. The names of the variables must be surrounded by "{{" +and "}}" to be replaced. + + * "name" - The random username generated for the DB user. + + * "password" - The random password generated for the DB user. + + * "expiration" - The timestamp when this user will expire. + +Example of a decent creation_statements for a postgresql database plugin: + + CREATE ROLE "{{name}}" WITH + LOGIN + PASSWORD '{{password}}' + VALID UNTIL '{{expiration}}'; + GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}"; + +The "revocation_statements" parameter customizes the statement string used to +revoke a user. Example of a decent revocation_statements for a postgresql +database plugin: + + REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM {{name}}; + REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM {{name}}; + REVOKE USAGE ON SCHEMA public FROM {{name}}; + DROP ROLE IF EXISTS {{name}}; + +The "renew_statements" parameter customizes the statement string used to renew a +user. +The "rollback_statements' parameter customizes the statement string used to +rollback a change if needed. +` + +const pathStaticRoleHelpDesc = ` +This path lets you manage the static roles that can be created with this +backend. Static Roles are associated with a single database user, and manage the +credential based on a rotation period, automatically rotating the credential. + +The "db_name" parameter is required and configures the name of the database +connection to use. + +The "creation_statements" parameter customizes the string used to create the +credentials. This can be a sequence of SQL queries, or other statement formats +for a particular database type. Some substitution will be done to the statement +strings for certain keys. The names of the variables must be surrounded by "{{" +and "}}" to be replaced. + + * "name" - The random username generated for the DB user. + + * "password" - The random password generated for the DB user. Populated if the + static role's credential_type is 'password'. + + * "public_key" - The public key generated for the DB user. Populated if the + static role's credential_type is 'rsa_private_key'. + +Example of a decent creation_statements for a postgresql database plugin: + + CREATE ROLE "{{name}}" WITH + LOGIN + PASSWORD '{{password}}' + GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}"; + +The "revocation_statements" parameter customizes the statement string used to +revoke a user. Example of a decent revocation_statements for a postgresql +database plugin: + + REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM {{name}}; + REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM {{name}}; + REVOKE USAGE ON SCHEMA public FROM {{name}}; + DROP ROLE IF EXISTS {{name}}; + +The "renew_statements" parameter customizes the statement string used to renew a +user. +The "rollback_statements' parameter customizes the statement string used to +rollback a change if needed. +` diff --git a/builtin/logical/database/path_roles_test.go b/builtin/logical/database/path_roles_test.go new file mode 100644 index 0000000..dc2edde --- /dev/null +++ b/builtin/logical/database/path_roles_test.go @@ -0,0 +1,863 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package database + +import ( + "context" + "encoding/json" + "errors" + "strings" + "testing" + "time" + + "github.com/go-test/deep" + "github.com/hashicorp/vault/helper/namespace" + postgreshelper "github.com/hashicorp/vault/helper/testhelpers/postgresql" + v5 "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestBackend_Roles_CredentialTypes(t *testing.T) { + config := logical.TestBackendConfig() + config.System = logical.TestSystemView() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + type args struct { + credentialType v5.CredentialType + credentialConfig map[string]string + } + tests := []struct { + name string + args args + wantErr bool + expectedResp map[string]interface{} + }{ + { + name: "role with invalid credential type", + args: args{ + credentialType: v5.CredentialType(10), + }, + wantErr: true, + }, + { + name: "role with invalid credential type and valid credential config", + args: args{ + credentialType: v5.CredentialType(7), + credentialConfig: map[string]string{ + "password_policy": "test-policy", + }, + }, + wantErr: true, + }, + { + name: "role with password credential type", + args: args{ + credentialType: v5.CredentialTypePassword, + }, + expectedResp: map[string]interface{}{ + "credential_type": v5.CredentialTypePassword.String(), + "credential_config": nil, + }, + }, + { + name: "role with password credential type and configuration", + args: args{ + credentialType: v5.CredentialTypePassword, + credentialConfig: map[string]string{ + "password_policy": "test-policy", + }, + }, + expectedResp: map[string]interface{}{ + "credential_type": v5.CredentialTypePassword.String(), + "credential_config": map[string]interface{}{ + "password_policy": "test-policy", + }, + }, + }, + { + name: "role with rsa_private_key credential type and default configuration", + args: args{ + credentialType: v5.CredentialTypeRSAPrivateKey, + }, + expectedResp: map[string]interface{}{ + "credential_type": v5.CredentialTypeRSAPrivateKey.String(), + "credential_config": map[string]interface{}{ + "key_bits": json.Number("2048"), + "format": "pkcs8", + }, + }, + }, + { + name: "role with rsa_private_key credential type and 2048 bit configuration", + args: args{ + credentialType: v5.CredentialTypeRSAPrivateKey, + credentialConfig: map[string]string{ + "key_bits": "2048", + }, + }, + expectedResp: map[string]interface{}{ + "credential_type": v5.CredentialTypeRSAPrivateKey.String(), + "credential_config": map[string]interface{}{ + "key_bits": json.Number("2048"), + "format": "pkcs8", + }, + }, + }, + { + name: "role with rsa_private_key credential type and 3072 bit configuration", + args: args{ + credentialType: v5.CredentialTypeRSAPrivateKey, + credentialConfig: map[string]string{ + "key_bits": "3072", + }, + }, + expectedResp: map[string]interface{}{ + "credential_type": v5.CredentialTypeRSAPrivateKey.String(), + "credential_config": map[string]interface{}{ + "key_bits": json.Number("3072"), + "format": "pkcs8", + }, + }, + }, + { + name: "role with rsa_private_key credential type and 4096 bit configuration", + args: args{ + credentialType: v5.CredentialTypeRSAPrivateKey, + credentialConfig: map[string]string{ + "key_bits": "4096", + }, + }, + expectedResp: map[string]interface{}{ + "credential_type": v5.CredentialTypeRSAPrivateKey.String(), + "credential_config": map[string]interface{}{ + "key_bits": json.Number("4096"), + "format": "pkcs8", + }, + }, + }, + { + name: "role with rsa_private_key credential type invalid key_bits configuration", + args: args{ + credentialType: v5.CredentialTypeRSAPrivateKey, + credentialConfig: map[string]string{ + "key_bits": "256", + }, + }, + wantErr: true, + }, + { + name: "role with rsa_private_key credential type invalid format configuration", + args: args{ + credentialType: v5.CredentialTypeRSAPrivateKey, + credentialConfig: map[string]string{ + "format": "pkcs1", + }, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + req := &logical.Request{ + Operation: logical.CreateOperation, + Path: "roles/test", + Storage: config.StorageView, + Data: map[string]interface{}{ + "db_name": "test-database", + "creation_statements": "CREATE USER {{name}}", + "credential_type": tt.args.credentialType.String(), + "credential_config": tt.args.credentialConfig, + }, + } + + // Create the role + resp, err := b.HandleRequest(context.Background(), req) + if tt.wantErr { + assert.True(t, resp.IsError(), "expected error") + return + } + assert.False(t, resp.IsError()) + assert.Nil(t, err) + + // Read the role + req.Operation = logical.ReadOperation + resp, err = b.HandleRequest(context.Background(), req) + assert.False(t, resp.IsError()) + assert.Nil(t, err) + for k, v := range tt.expectedResp { + assert.Equal(t, v, resp.Data[k]) + } + + // Delete the role + req.Operation = logical.DeleteOperation + resp, err = b.HandleRequest(context.Background(), req) + assert.False(t, resp.IsError()) + assert.Nil(t, err) + }) + } +} + +func TestBackend_StaticRole_Config(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + lb, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + b, ok := lb.(*databaseBackend) + if !ok { + t.Fatal("could not convert to db backend") + } + defer b.Cleanup(context.Background()) + + cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + defer cleanup() + + // create the database user + createTestPGUser(t, connURL, dbUser, "password", testRoleStaticCreate) + + // Configure a connection + data := map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "verify_connection": false, + "allowed_roles": []string{"plugin-role-test"}, + "name": "plugin-test", + } + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Test static role creation scenarios. Uses a map, so there is no guaranteed + // ordering, so each case cleans up by deleting the role + testCases := map[string]struct { + account map[string]interface{} + path string + expected map[string]interface{} + err error + }{ + "basic": { + account: map[string]interface{}{ + "username": dbUser, + "rotation_period": "5400s", + }, + path: "plugin-role-test", + expected: map[string]interface{}{ + "username": dbUser, + "rotation_period": float64(5400), + }, + }, + "missing rotation period": { + account: map[string]interface{}{ + "username": dbUser, + }, + path: "plugin-role-test", + err: errors.New("rotation_period is required to create static accounts"), + }, + "disallowed role config": { + account: map[string]interface{}{ + "username": dbUser, + "rotation_period": "5400s", + }, + path: "disallowed-role", + err: errors.New("\"disallowed-role\" is not an allowed role"), + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + data := map[string]interface{}{ + "name": "plugin-role-test", + "db_name": "plugin-test", + "rotation_statements": testRoleStaticUpdate, + } + + for k, v := range tc.account { + data[k] = v + } + + path := "static-roles/" + tc.path + + req := &logical.Request{ + Operation: logical.CreateOperation, + Path: path, + Storage: config.StorageView, + Data: data, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + if tc.err == nil { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + if err != nil && tc.err.Error() == err.Error() { + // errors match + return + } + if err == nil && tc.err.Error() == resp.Error().Error() { + // errors match + return + } + t.Fatalf("expected err message: (%s), got (%s), response error: (%s)", tc.err, err, resp.Error()) + } + + if tc.err != nil { + if err == nil || (resp == nil || !resp.IsError()) { + t.Fatal("expected error, got none") + } + } + + // Read the role + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "static-roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + expected := tc.expected + actual := make(map[string]interface{}) + dataKeys := []string{"username", "password", "last_vault_rotation", "rotation_period"} + for _, key := range dataKeys { + if v, ok := resp.Data[key]; ok { + actual[key] = v + } + } + + if len(tc.expected) > 0 { + // verify a password is returned, but we don't care what it's value is + if actual["password"] == "" { + t.Fatalf("expected result to contain password, but none found") + } + if v, ok := actual["last_vault_rotation"].(time.Time); !ok { + t.Fatalf("expected last_vault_rotation to be set to time.Time type, got: %#v", v) + } + + // delete these values before the comparison, since we can't know them in + // advance + delete(actual, "password") + delete(actual, "last_vault_rotation") + if diff := deep.Equal(expected, actual); diff != nil { + t.Fatal(diff) + } + } + + if len(tc.expected) == 0 && resp.Data["static_account"] != nil { + t.Fatalf("got unexpected static_account info: %#v", actual) + } + + if diff := deep.Equal(resp.Data["db_name"], "plugin-test"); diff != nil { + t.Fatal(diff) + } + + // Delete role for next run + req = &logical.Request{ + Operation: logical.DeleteOperation, + Path: "static-roles/plugin-role-test", + Storage: config.StorageView, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + }) + } +} + +func TestBackend_StaticRole_Updates(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + lb, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + b, ok := lb.(*databaseBackend) + if !ok { + t.Fatal("could not convert to db backend") + } + defer b.Cleanup(context.Background()) + + cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + defer cleanup() + + // create the database user + createTestPGUser(t, connURL, dbUser, "password", testRoleStaticCreate) + + // Configure a connection + data := map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "verify_connection": false, + "allowed_roles": []string{"*"}, + "name": "plugin-test", + } + + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + data = map[string]interface{}{ + "name": "plugin-role-test-updates", + "db_name": "plugin-test", + "rotation_statements": testRoleStaticUpdate, + "username": dbUser, + "rotation_period": "5400s", + } + + req = &logical.Request{ + Operation: logical.CreateOperation, + Path: "static-roles/plugin-role-test-updates", + Storage: config.StorageView, + Data: data, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Read the role + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "static-roles/plugin-role-test-updates", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + rotation := resp.Data["rotation_period"].(float64) + + // capture the password to verify it doesn't change + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "static-creds/plugin-role-test-updates", + Storage: config.StorageView, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + username := resp.Data["username"].(string) + password := resp.Data["password"].(string) + if username == "" || password == "" { + t.Fatalf("expected both username/password, got (%s), (%s)", username, password) + } + + // update rotation_period + updateData := map[string]interface{}{ + "name": "plugin-role-test-updates", + "db_name": "plugin-test", + "username": dbUser, + "rotation_period": "6400s", + } + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "static-roles/plugin-role-test-updates", + Storage: config.StorageView, + Data: updateData, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // re-read the role + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "static-roles/plugin-role-test-updates", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + newRotation := resp.Data["rotation_period"].(float64) + if newRotation == rotation { + t.Fatalf("expected change in rotation, but got old value: %#v", newRotation) + } + + // re-capture the password to ensure it did not change + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "static-creds/plugin-role-test-updates", + Storage: config.StorageView, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + if username != resp.Data["username"].(string) { + t.Fatalf("usernames dont match!: (%s) / (%s)", username, resp.Data["username"].(string)) + } + if password != resp.Data["password"].(string) { + t.Fatalf("passwords dont match!: (%s) / (%s)", password, resp.Data["password"].(string)) + } + + // verify that rotation_period is only required when creating + updateData = map[string]interface{}{ + "name": "plugin-role-test-updates", + "db_name": "plugin-test", + "username": dbUser, + "rotation_statements": testRoleStaticUpdateRotation, + } + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "static-roles/plugin-role-test-updates", + Storage: config.StorageView, + Data: updateData, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // verify updating static username returns an error + updateData = map[string]interface{}{ + "name": "plugin-role-test-updates", + "db_name": "plugin-test", + "username": "statictestmodified", + } + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "static-roles/plugin-role-test-updates", + Storage: config.StorageView, + Data: updateData, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || !resp.IsError() { + t.Fatal("expected error on updating name") + } + err = resp.Error() + if err.Error() != "cannot update static account username" { + t.Fatalf("expected error on updating name, got: %s", err) + } +} + +func TestBackend_StaticRole_Role_name_check(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + lb, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + b, ok := lb.(*databaseBackend) + if !ok { + t.Fatal("could not convert to db backend") + } + defer b.Cleanup(context.Background()) + + cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + defer cleanup() + + // create the database user + createTestPGUser(t, connURL, dbUser, "password", testRoleStaticCreate) + + // Configure a connection + data := map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "verify_connection": false, + "allowed_roles": []string{"*"}, + "name": "plugin-test", + } + + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // non-static role + data = map[string]interface{}{ + "name": "plugin-role-test", + "db_name": "plugin-test", + "creation_statements": testRoleStaticCreate, + "rotation_statements": testRoleStaticUpdate, + "revocation_statements": defaultRevocationSQL, + "default_ttl": "5m", + "max_ttl": "10m", + } + + req = &logical.Request{ + Operation: logical.CreateOperation, + Path: "roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // create a static role with the same name, and expect failure + // static role + data = map[string]interface{}{ + "name": "plugin-role-test", + "db_name": "plugin-test", + "creation_statements": testRoleStaticCreate, + "rotation_statements": testRoleStaticUpdate, + "revocation_statements": defaultRevocationSQL, + } + + req = &logical.Request{ + Operation: logical.CreateOperation, + Path: "static-roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil { + t.Fatal(err) + } + if resp == nil || !resp.IsError() { + t.Fatalf("expected error, got none") + } + + // repeat, with a static role first + data = map[string]interface{}{ + "name": "plugin-role-test-2", + "db_name": "plugin-test", + "rotation_statements": testRoleStaticUpdate, + "username": dbUser, + "rotation_period": "1h", + } + + req = &logical.Request{ + Operation: logical.CreateOperation, + Path: "static-roles/plugin-role-test-2", + Storage: config.StorageView, + Data: data, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // create a non-static role with the same name, and expect failure + data = map[string]interface{}{ + "name": "plugin-role-test-2", + "db_name": "plugin-test", + "creation_statements": testRoleStaticCreate, + "revocation_statements": defaultRevocationSQL, + "default_ttl": "5m", + "max_ttl": "10m", + } + + req = &logical.Request{ + Operation: logical.CreateOperation, + Path: "roles/plugin-role-test-2", + Storage: config.StorageView, + Data: data, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil { + t.Fatal(err) + } + if resp == nil || !resp.IsError() { + t.Fatalf("expected error, got none") + } +} + +func TestWALsStillTrackedAfterUpdate(t *testing.T) { + ctx := context.Background() + b, storage, mockDB := getBackend(t) + defer b.Cleanup(ctx) + configureDBMount(t, storage) + + createRole(t, b, storage, mockDB, "hashicorp") + + generateWALFromFailedRotation(t, b, storage, mockDB, "hashicorp") + requireWALs(t, storage, 1) + + resp, err := b.HandleRequest(ctx, &logical.Request{ + Operation: logical.UpdateOperation, + Path: "static-roles/hashicorp", + Storage: storage, + Data: map[string]interface{}{ + "username": "hashicorp", + "rotation_period": "600s", + }, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatal(resp, err) + } + walIDs := requireWALs(t, storage, 1) + + // Now when we trigger a manual rotate, it should use the WAL's new password + // which will tell us that the in-memory structure still kept track of the + // WAL in addition to it still being in storage. + wal, err := b.findStaticWAL(ctx, storage, walIDs[0]) + if err != nil { + t.Fatal(err) + } + rotateRole(t, b, storage, mockDB, "hashicorp") + role, err := b.StaticRole(ctx, storage, "hashicorp") + if err != nil { + t.Fatal(err) + } + if role.StaticAccount.Password != wal.NewPassword { + t.Fatal() + } + requireWALs(t, storage, 0) +} + +func TestWALsDeletedOnRoleCreationFailed(t *testing.T) { + ctx := context.Background() + b, storage, mockDB := getBackend(t) + defer b.Cleanup(ctx) + configureDBMount(t, storage) + + for i := 0; i < 3; i++ { + mockDB.On("UpdateUser", mock.Anything, mock.Anything). + Return(v5.UpdateUserResponse{}, errors.New("forced error")). + Once() + resp, err := b.HandleRequest(ctx, &logical.Request{ + Operation: logical.CreateOperation, + Path: "static-roles/hashicorp", + Storage: storage, + Data: map[string]interface{}{ + "username": "hashicorp", + "db_name": "mockv5", + "rotation_period": "5s", + }, + }) + if err == nil { + t.Fatal("expected error from DB") + } + if !strings.Contains(err.Error(), "forced error") { + t.Fatal("expected forced error message", resp, err) + } + } + + requireWALs(t, storage, 0) +} + +func TestWALsDeletedOnRoleDeletion(t *testing.T) { + ctx := context.Background() + b, storage, mockDB := getBackend(t) + defer b.Cleanup(ctx) + configureDBMount(t, storage) + + // Create the roles + roleNames := []string{"hashicorp", "2"} + for _, roleName := range roleNames { + createRole(t, b, storage, mockDB, roleName) + } + + // Fail to rotate the roles + for _, roleName := range roleNames { + generateWALFromFailedRotation(t, b, storage, mockDB, roleName) + } + + // Should have 2 WALs hanging around + requireWALs(t, storage, 2) + + // Delete one of the static roles + resp, err := b.HandleRequest(ctx, &logical.Request{ + Operation: logical.DeleteOperation, + Path: "static-roles/hashicorp", + Storage: storage, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatal(resp, err) + } + + // 1 WAL should be cleared by the delete + requireWALs(t, storage, 1) +} + +func createRole(t *testing.T, b *databaseBackend, storage logical.Storage, mockDB *mockNewDatabase, roleName string) { + t.Helper() + mockDB.On("UpdateUser", mock.Anything, mock.Anything). + Return(v5.UpdateUserResponse{}, nil). + Once() + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.CreateOperation, + Path: "static-roles/" + roleName, + Storage: storage, + Data: map[string]interface{}{ + "username": roleName, + "db_name": "mockv5", + "rotation_period": "86400s", + }, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatal(resp, err) + } +} + +const testRoleStaticCreate = ` +CREATE ROLE "{{name}}" WITH + LOGIN + PASSWORD '{{password}}'; +` + +const testRoleStaticUpdate = ` +ALTER USER "{{name}}" WITH PASSWORD '{{password}}'; +` + +const testRoleStaticUpdateRotation = ` +ALTER USER "{{name}}" WITH PASSWORD '{{password}}';GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}"; +` diff --git a/builtin/logical/database/path_rotate_credentials.go b/builtin/logical/database/path_rotate_credentials.go new file mode 100644 index 0000000..49081a3 --- /dev/null +++ b/builtin/logical/database/path_rotate_credentials.go @@ -0,0 +1,261 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package database + +import ( + "context" + "fmt" + "time" + + "github.com/hashicorp/vault/helper/versions" + v5 "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/queue" +) + +func pathRotateRootCredentials(b *databaseBackend) []*framework.Path { + return []*framework.Path{ + { + Pattern: "rotate-root/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixDatabase, + OperationVerb: "rotate", + OperationSuffix: "root-credentials", + }, + + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of this database connection", + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathRotateRootCredentialsUpdate(), + ForwardPerformanceSecondary: true, + ForwardPerformanceStandby: true, + }, + }, + + HelpSynopsis: pathRotateCredentialsUpdateHelpSyn, + HelpDescription: pathRotateCredentialsUpdateHelpDesc, + }, + { + Pattern: "rotate-role/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixDatabase, + OperationVerb: "rotate", + OperationSuffix: "static-role-credentials", + }, + + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the static role", + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathRotateRoleCredentialsUpdate(), + ForwardPerformanceStandby: true, + ForwardPerformanceSecondary: true, + }, + }, + + HelpSynopsis: pathRotateRoleCredentialsUpdateHelpSyn, + HelpDescription: pathRotateRoleCredentialsUpdateHelpDesc, + }, + } +} + +func (b *databaseBackend) pathRotateRootCredentialsUpdate() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + if name == "" { + return logical.ErrorResponse(respErrEmptyName), nil + } + + config, err := b.DatabaseConfig(ctx, req.Storage, name) + if err != nil { + return nil, err + } + + rootUsername, ok := config.ConnectionDetails["username"].(string) + if !ok || rootUsername == "" { + return nil, fmt.Errorf("unable to rotate root credentials: no username in configuration") + } + + rootPassword, ok := config.ConnectionDetails["password"].(string) + if !ok || rootPassword == "" { + return nil, fmt.Errorf("unable to rotate root credentials: no password in configuration") + } + + dbi, err := b.GetConnection(ctx, req.Storage, name) + if err != nil { + return nil, err + } + + // Take the write lock on the instance + dbi.Lock() + defer func() { + dbi.Unlock() + // Even on error, still remove the connection + b.ClearConnectionId(name, dbi.id) + }() + defer func() { + // Close the plugin + dbi.closed = true + if err := dbi.database.Close(); err != nil { + b.Logger().Error("error closing the database plugin connection", "err", err) + } + }() + + generator, err := newPasswordGenerator(nil) + if err != nil { + return nil, fmt.Errorf("failed to construct credential generator: %s", err) + } + generator.PasswordPolicy = config.PasswordPolicy + + // Generate new credentials + oldPassword := config.ConnectionDetails["password"].(string) + newPassword, err := generator.generate(ctx, b, dbi.database) + if err != nil { + b.CloseIfShutdown(dbi, err) + return nil, fmt.Errorf("failed to generate password: %s", err) + } + config.ConnectionDetails["password"] = newPassword + + // Write a WAL entry + walID, err := framework.PutWAL(ctx, req.Storage, rotateRootWALKey, &rotateRootCredentialsWAL{ + ConnectionName: name, + UserName: rootUsername, + OldPassword: oldPassword, + NewPassword: newPassword, + }) + if err != nil { + return nil, err + } + + updateReq := v5.UpdateUserRequest{ + Username: rootUsername, + CredentialType: v5.CredentialTypePassword, + Password: &v5.ChangePassword{ + NewPassword: newPassword, + Statements: v5.Statements{ + Commands: config.RootCredentialsRotateStatements, + }, + }, + } + newConfigDetails, err := dbi.database.UpdateUser(ctx, updateReq, true) + if err != nil { + return nil, fmt.Errorf("failed to update user: %w", err) + } + if newConfigDetails != nil { + config.ConnectionDetails = newConfigDetails + } + + // 1.12.0 and 1.12.1 stored builtin plugins in storage, but 1.12.2 reverted + // that, so clean up any pre-existing stored builtin versions on write. + if versions.IsBuiltinVersion(config.PluginVersion) { + config.PluginVersion = "" + } + err = storeConfig(ctx, req.Storage, name, config) + if err != nil { + return nil, err + } + + err = framework.DeleteWAL(ctx, req.Storage, walID) + if err != nil { + b.Logger().Warn("unable to delete WAL", "error", err, "WAL ID", walID) + } + return nil, nil + } +} + +func (b *databaseBackend) pathRotateRoleCredentialsUpdate() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + if name == "" { + return logical.ErrorResponse("empty role name attribute given"), nil + } + + role, err := b.StaticRole(ctx, req.Storage, name) + if err != nil { + return nil, err + } + if role == nil { + return logical.ErrorResponse("no static role found for role name"), nil + } + + // In create/update of static accounts, we only care if the operation + // err'd , and this call does not return credentials + item, err := b.popFromRotationQueueByKey(name) + if err != nil { + item = &queue.Item{ + Key: name, + } + } + + input := &setStaticAccountInput{ + RoleName: name, + Role: role, + } + if walID, ok := item.Value.(string); ok { + input.WALID = walID + } + resp, err := b.setStaticAccount(ctx, req.Storage, input) + // if err is not nil, we need to attempt to update the priority and place + // this item back on the queue. The err should still be returned at the end + // of this method. + if err != nil { + b.logger.Warn("unable to rotate credentials in rotate-role", "error", err) + // Update the priority to re-try this rotation and re-add the item to + // the queue + item.Priority = time.Now().Add(10 * time.Second).Unix() + + // Preserve the WALID if it was returned + if resp != nil && resp.WALID != "" { + item.Value = resp.WALID + } + } else { + item.Priority = resp.RotationTime.Add(role.StaticAccount.RotationPeriod).Unix() + // Clear any stored WAL ID as we must have successfully deleted our WAL to get here. + item.Value = "" + } + + // Add their rotation to the queue + if err := b.pushItem(item); err != nil { + return nil, err + } + + if err != nil { + return nil, fmt.Errorf("unable to finish rotating credentials; retries will "+ + "continue in the background but it is also safe to retry manually: %w", err) + } + + // return any err from the setStaticAccount call + return nil, nil + } +} + +const pathRotateCredentialsUpdateHelpSyn = ` +Request to rotate the root credentials for a certain database connection. +` + +const pathRotateCredentialsUpdateHelpDesc = ` +This path attempts to rotate the root credentials for the given database. +` + +const pathRotateRoleCredentialsUpdateHelpSyn = ` +Request to rotate the credentials for a static user account. +` + +const pathRotateRoleCredentialsUpdateHelpDesc = ` +This path attempts to rotate the credentials for the given static user account. +` diff --git a/builtin/logical/database/rollback.go b/builtin/logical/database/rollback.go new file mode 100644 index 0000000..22ce616 --- /dev/null +++ b/builtin/logical/database/rollback.go @@ -0,0 +1,116 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package database + +import ( + "context" + "errors" + + "github.com/hashicorp/vault/sdk/database/dbplugin" + v5 "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + "github.com/hashicorp/vault/sdk/logical" + "github.com/mitchellh/mapstructure" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// WAL storage key used for the rollback of root database credentials +const rotateRootWALKey = "rotateRootWALKey" + +// WAL entry used for the rollback of root database credentials +type rotateRootCredentialsWAL struct { + ConnectionName string + UserName string + NewPassword string + OldPassword string +} + +// walRollback handles WAL entries that result from partial failures +// to rotate the root credentials of a database. It is responsible +// for rolling back root database credentials when doing so would +// reconcile the credentials with Vault storage. +func (b *databaseBackend) walRollback(ctx context.Context, req *logical.Request, kind string, data interface{}) error { + if kind != rotateRootWALKey { + return errors.New("unknown type to rollback") + } + + // Decode the WAL data + var entry rotateRootCredentialsWAL + if err := mapstructure.Decode(data, &entry); err != nil { + return err + } + + // Get the current database configuration from storage + config, err := b.DatabaseConfig(ctx, req.Storage, entry.ConnectionName) + if err != nil { + return err + } + + // The password in storage doesn't match the new password + // in the WAL entry. This means there was a partial failure + // to update either the database or storage. + if config.ConnectionDetails["password"] != entry.NewPassword { + // Clear any cached connection to inform the rollback decision + err := b.ClearConnection(entry.ConnectionName) + if err != nil { + return err + } + + // Attempt to get a connection with the current configuration. + // If successful, the WAL entry can be deleted. This means + // the root credentials are the same according to the database + // and storage. + _, err = b.GetConnection(ctx, req.Storage, entry.ConnectionName) + if err == nil { + return nil + } + + return b.rollbackDatabaseCredentials(ctx, config, entry) + } + + // The password in storage matches the new password in + // the WAL entry, so there is nothing to roll back. This + // means the new password was successfully updated in the + // database and storage, but the WAL wasn't deleted. + return nil +} + +// rollbackDatabaseCredentials rolls back root database credentials for +// the connection associated with the passed WAL entry. It will create +// a connection to the database using the WAL entry new password in +// order to alter the password to be the WAL entry old password. +func (b *databaseBackend) rollbackDatabaseCredentials(ctx context.Context, config *DatabaseConfig, entry rotateRootCredentialsWAL) error { + // Attempt to get a connection with the WAL entry new password. + config.ConnectionDetails["password"] = entry.NewPassword + dbi, err := b.GetConnectionWithConfig(ctx, entry.ConnectionName, config) + if err != nil { + return err + } + + // Ensure the connection used to roll back the database password is not cached + defer func() { + if err := b.ClearConnection(entry.ConnectionName); err != nil { + b.Logger().Error("error closing database plugin connection", "err", err) + } + }() + + updateReq := v5.UpdateUserRequest{ + Username: entry.UserName, + CredentialType: v5.CredentialTypePassword, + Password: &v5.ChangePassword{ + NewPassword: entry.OldPassword, + Statements: v5.Statements{ + Commands: config.RootCredentialsRotateStatements, + }, + }, + } + + // It actually is the root user here, but we only want to use SetCredentials since + // RotateRootCredentials doesn't give any control over what password is used + _, err = dbi.database.UpdateUser(ctx, updateReq, false) + if status.Code(err) == codes.Unimplemented || err == dbplugin.ErrPluginStaticUnsupported { + return nil + } + return err +} diff --git a/builtin/logical/database/rollback_test.go b/builtin/logical/database/rollback_test.go new file mode 100644 index 0000000..8f36fe2 --- /dev/null +++ b/builtin/logical/database/rollback_test.go @@ -0,0 +1,425 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package database + +import ( + "context" + "strings" + "testing" + "time" + + "github.com/hashicorp/vault/helper/namespace" + postgreshelper "github.com/hashicorp/vault/helper/testhelpers/postgresql" + v5 "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + databaseUser = "postgres" + defaultPassword = "secret" +) + +// Tests that the WAL rollback function rolls back the database password. +// The database password should be rolled back when: +// - A WAL entry exists +// - Password has been altered on the database +// - Password has not been updated in storage +func TestBackend_RotateRootCredentials_WAL_rollback(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + lb, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + dbBackend, ok := lb.(*databaseBackend) + if !ok { + t.Fatal("could not convert to db backend") + } + defer lb.Cleanup(context.Background()) + + cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + defer cleanup() + + connURL = strings.ReplaceAll(connURL, "postgres:secret", "{{username}}:{{password}}") + + // Configure a connection to the database + data := map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "allowed_roles": []string{"plugin-role-test"}, + "username": databaseUser, + "password": defaultPassword, + } + resp, err := lb.HandleRequest(namespace.RootContext(nil), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Create a role + data = map[string]interface{}{ + "db_name": "plugin-test", + "creation_statements": testRole, + "max_ttl": "10m", + } + resp, err = lb.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Read credentials to verify this initially works + credReq := &logical.Request{ + Operation: logical.ReadOperation, + Path: "creds/plugin-role-test", + Storage: config.StorageView, + Data: make(map[string]interface{}), + } + credResp, err := lb.HandleRequest(context.Background(), credReq) + if err != nil || (credResp != nil && credResp.IsError()) { + t.Fatalf("err:%s resp:%v\n", err, credResp) + } + + // Get a connection to the database plugin + dbi, err := dbBackend.GetConnection(context.Background(), + config.StorageView, "plugin-test") + if err != nil { + t.Fatal(err) + } + + // Alter the database password so it no longer matches what is in storage + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + updateReq := v5.UpdateUserRequest{ + Username: databaseUser, + Password: &v5.ChangePassword{ + NewPassword: "newSecret", + }, + } + _, err = dbi.database.UpdateUser(ctx, updateReq, false) + if err != nil { + t.Fatal(err) + } + + // Clear the plugin connection to verify we're no longer able to connect + err = dbBackend.ClearConnection("plugin-test") + if err != nil { + t.Fatal(err) + } + + // Reading credentials should no longer work + credResp, err = lb.HandleRequest(namespace.RootContext(nil), credReq) + if err == nil { + t.Fatalf("expected authentication to fail when reading credentials") + } + + // Put a WAL entry that will be used for rolling back the database password + walEntry := &rotateRootCredentialsWAL{ + ConnectionName: "plugin-test", + UserName: databaseUser, + OldPassword: defaultPassword, + NewPassword: "newSecret", + } + _, err = framework.PutWAL(context.Background(), config.StorageView, rotateRootWALKey, walEntry) + if err != nil { + t.Fatal(err) + } + assertWALCount(t, config.StorageView, 1, rotateRootWALKey) + + // Trigger an immediate RollbackOperation so that the WAL rollback + // function can use the WAL entry to roll back the database password + _, err = lb.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.RollbackOperation, + Path: "", + Storage: config.StorageView, + Data: map[string]interface{}{ + "immediate": true, + }, + }) + if err != nil { + t.Fatal(err) + } + assertWALCount(t, config.StorageView, 0, rotateRootWALKey) + + // Reading credentials should work again after the database + // password has been rolled back. + credResp, err = lb.HandleRequest(namespace.RootContext(nil), credReq) + if err != nil || (credResp != nil && credResp.IsError()) { + t.Fatalf("err:%s resp:%v\n", err, credResp) + } +} + +// Tests that the WAL rollback function does not roll back the database password. +// The database password should not be rolled back when: +// - A WAL entry exists +// - Password has not been altered on the database +// - Password has not been updated in storage +func TestBackend_RotateRootCredentials_WAL_no_rollback_1(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + lb, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + defer lb.Cleanup(context.Background()) + + cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + defer cleanup() + + connURL = strings.ReplaceAll(connURL, "postgres:secret", "{{username}}:{{password}}") + + // Configure a connection to the database + data := map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "allowed_roles": []string{"plugin-role-test"}, + "username": databaseUser, + "password": defaultPassword, + } + resp, err := lb.HandleRequest(namespace.RootContext(nil), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Create a role + data = map[string]interface{}{ + "db_name": "plugin-test", + "creation_statements": testRole, + "max_ttl": "10m", + } + resp, err = lb.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Read credentials to verify this initially works + credReq := &logical.Request{ + Operation: logical.ReadOperation, + Path: "creds/plugin-role-test", + Storage: config.StorageView, + Data: make(map[string]interface{}), + } + credResp, err := lb.HandleRequest(context.Background(), credReq) + if err != nil || (credResp != nil && credResp.IsError()) { + t.Fatalf("err:%s resp:%v\n", err, credResp) + } + + // Put a WAL entry + walEntry := &rotateRootCredentialsWAL{ + ConnectionName: "plugin-test", + UserName: databaseUser, + OldPassword: defaultPassword, + NewPassword: "newSecret", + } + _, err = framework.PutWAL(context.Background(), config.StorageView, rotateRootWALKey, walEntry) + if err != nil { + t.Fatal(err) + } + assertWALCount(t, config.StorageView, 1, rotateRootWALKey) + + // Trigger an immediate RollbackOperation + _, err = lb.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.RollbackOperation, + Path: "", + Storage: config.StorageView, + Data: map[string]interface{}{ + "immediate": true, + }, + }) + if err != nil { + t.Fatal(err) + } + assertWALCount(t, config.StorageView, 0, rotateRootWALKey) + + // Reading credentials should work + credResp, err = lb.HandleRequest(namespace.RootContext(nil), credReq) + if err != nil || (credResp != nil && credResp.IsError()) { + t.Fatalf("err:%s resp:%v\n", err, credResp) + } +} + +// Tests that the WAL rollback function does not roll back the database password. +// The database password should not be rolled back when: +// - A WAL entry exists +// - Password has been altered on the database +// - Password has been updated in storage +func TestBackend_RotateRootCredentials_WAL_no_rollback_2(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + lb, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + dbBackend, ok := lb.(*databaseBackend) + if !ok { + t.Fatal("could not convert to db backend") + } + defer lb.Cleanup(context.Background()) + + cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + defer cleanup() + + connURL = strings.ReplaceAll(connURL, "postgres:secret", "{{username}}:{{password}}") + + // Configure a connection to the database + data := map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "allowed_roles": []string{"plugin-role-test"}, + "username": databaseUser, + "password": defaultPassword, + } + resp, err := lb.HandleRequest(namespace.RootContext(nil), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Create a role + data = map[string]interface{}{ + "db_name": "plugin-test", + "creation_statements": testRole, + "max_ttl": "10m", + } + resp, err = lb.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Read credentials to verify this initially works + credReq := &logical.Request{ + Operation: logical.ReadOperation, + Path: "creds/plugin-role-test", + Storage: config.StorageView, + Data: make(map[string]interface{}), + } + credResp, err := lb.HandleRequest(context.Background(), credReq) + if err != nil || (credResp != nil && credResp.IsError()) { + t.Fatalf("err:%s resp:%v\n", err, credResp) + } + + // Get a connection to the database plugin + dbi, err := dbBackend.GetConnection(context.Background(), config.StorageView, "plugin-test") + if err != nil { + t.Fatal(err) + } + + // Alter the database password + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + updateReq := v5.UpdateUserRequest{ + Username: databaseUser, + Password: &v5.ChangePassword{ + NewPassword: "newSecret", + }, + } + _, err = dbi.database.UpdateUser(ctx, updateReq, false) + if err != nil { + t.Fatal(err) + } + + // Update storage with the new password + dbConfig, err := dbBackend.DatabaseConfig(context.Background(), config.StorageView, + "plugin-test") + if err != nil { + t.Fatal(err) + } + dbConfig.ConnectionDetails["password"] = "newSecret" + entry, err := logical.StorageEntryJSON("config/plugin-test", dbConfig) + if err != nil { + t.Fatal(err) + } + err = config.StorageView.Put(context.Background(), entry) + if err != nil { + t.Fatal(err) + } + + // Clear the plugin connection to verify we can connect to the database + err = dbBackend.ClearConnection("plugin-test") + if err != nil { + t.Fatal(err) + } + + // Reading credentials should work + credResp, err = lb.HandleRequest(namespace.RootContext(nil), credReq) + if err != nil || (credResp != nil && credResp.IsError()) { + t.Fatalf("err:%s resp:%v\n", err, credResp) + } + + // Put a WAL entry + walEntry := &rotateRootCredentialsWAL{ + ConnectionName: "plugin-test", + UserName: databaseUser, + OldPassword: defaultPassword, + NewPassword: "newSecret", + } + _, err = framework.PutWAL(context.Background(), config.StorageView, rotateRootWALKey, walEntry) + if err != nil { + t.Fatal(err) + } + assertWALCount(t, config.StorageView, 1, rotateRootWALKey) + + // Trigger an immediate RollbackOperation + _, err = lb.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.RollbackOperation, + Path: "", + Storage: config.StorageView, + Data: map[string]interface{}{ + "immediate": true, + }, + }) + if err != nil { + t.Fatal(err) + } + assertWALCount(t, config.StorageView, 0, rotateRootWALKey) + + // Reading credentials should work + credResp, err = lb.HandleRequest(namespace.RootContext(nil), credReq) + if err != nil || (credResp != nil && credResp.IsError()) { + t.Fatalf("err:%s resp:%v\n", err, credResp) + } +} diff --git a/builtin/logical/database/rotation.go b/builtin/logical/database/rotation.go new file mode 100644 index 0000000..b7e84d4 --- /dev/null +++ b/builtin/logical/database/rotation.go @@ -0,0 +1,690 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package database + +import ( + "context" + "errors" + "fmt" + "strconv" + "time" + + "github.com/hashicorp/go-secure-stdlib/strutil" + v5 "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/locksutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/queue" +) + +const ( + // Default interval to check the queue for items needing rotation + defaultQueueTickSeconds = 5 + + // Config key to set an alternate interval + queueTickIntervalKey = "rotation_queue_tick_interval" + + // WAL storage key used for static account rotations + staticWALKey = "staticRotationKey" +) + +// populateQueue loads the priority queue with existing static accounts. This +// occurs at initialization, after any WAL entries of failed or interrupted +// rotations have been processed. It lists the roles from storage and searches +// for any that have an associated static account, then adds them to the +// priority queue for rotations. +func (b *databaseBackend) populateQueue(ctx context.Context, s logical.Storage) { + log := b.Logger() + log.Info("populating role rotation queue") + + // Build map of role name / wal entries + walMap, err := b.loadStaticWALs(ctx, s) + if err != nil { + log.Warn("unable to load rotation WALs", "error", err) + } + + roles, err := s.List(ctx, databaseStaticRolePath) + if err != nil { + log.Warn("unable to list role for enqueueing", "error", err) + return + } + + for _, roleName := range roles { + select { + case <-ctx.Done(): + log.Info("rotation queue restore cancelled") + return + default: + } + + role, err := b.StaticRole(ctx, s, roleName) + if err != nil { + log.Warn("unable to read static role", "error", err, "role", roleName) + continue + } + + item := queue.Item{ + Key: roleName, + Priority: role.StaticAccount.NextRotationTime().Unix(), + } + + // Check if role name is in map + walEntry := walMap[roleName] + if walEntry != nil { + // Check walEntry last vault time + if walEntry.LastVaultRotation.IsZero() { + // A WAL's last Vault rotation can only ever be 0 for a role that + // was never successfully created. So we know this WAL couldn't + // have been created for this role we just retrieved from storage. + // i.e. it must be a hangover from a previous attempt at creating + // a role with the same name + log.Debug("deleting WAL with zero last rotation time", "WAL ID", walEntry.walID, "created", walEntry.walCreatedAt) + if err := framework.DeleteWAL(ctx, s, walEntry.walID); err != nil { + log.Warn("unable to delete zero-time WAL", "error", err, "WAL ID", walEntry.walID) + } + } else if walEntry.LastVaultRotation.Before(role.StaticAccount.LastVaultRotation) { + // WAL's last vault rotation record is older than the role's data, so + // delete and move on + log.Debug("deleting outdated WAL", "WAL ID", walEntry.walID, "created", walEntry.walCreatedAt) + if err := framework.DeleteWAL(ctx, s, walEntry.walID); err != nil { + log.Warn("unable to delete WAL", "error", err, "WAL ID", walEntry.walID) + } + } else { + log.Info("found WAL for role", "role", item.Key, "WAL ID", walEntry.walID) + item.Value = walEntry.walID + item.Priority = time.Now().Unix() + } + } + + if err := b.pushItem(&item); err != nil { + log.Warn("unable to enqueue item", "error", err, "role", roleName) + } + } +} + +// runTicker kicks off a periodic ticker that invoke the automatic credential +// rotation method at a determined interval. The default interval is 5 seconds. +func (b *databaseBackend) runTicker(ctx context.Context, queueTickInterval time.Duration, s logical.Storage) { + b.logger.Info("starting periodic ticker") + tick := time.NewTicker(queueTickInterval) + defer tick.Stop() + for { + select { + case <-tick.C: + b.rotateCredentials(ctx, s) + + case <-ctx.Done(): + b.logger.Info("stopping periodic ticker") + return + } + } +} + +// setCredentialsWAL is used to store information in a WAL that can retry a +// credential setting or rotation in the event of partial failure. +type setCredentialsWAL struct { + CredentialType v5.CredentialType `json:"credential_type"` + NewPassword string `json:"new_password"` + NewPublicKey []byte `json:"new_public_key"` + NewPrivateKey []byte `json:"new_private_key"` + RoleName string `json:"role_name"` + Username string `json:"username"` + + LastVaultRotation time.Time `json:"last_vault_rotation"` + + // Private fields which will not be included in json.Marshal/Unmarshal. + walID string + walCreatedAt int64 // Unix time at which the WAL was created. +} + +// credentialIsSet returns true if the credential associated with the +// CredentialType field is properly set. See field comments to for a +// mapping of CredentialType values to respective credential fields. +func (w *setCredentialsWAL) credentialIsSet() bool { + if w == nil { + return false + } + + switch w.CredentialType { + case v5.CredentialTypePassword: + return w.NewPassword != "" + case v5.CredentialTypeRSAPrivateKey: + return len(w.NewPublicKey) > 0 + default: + return false + } +} + +// rotateCredentials sets a new password for a static account. This method is +// invoked in the runTicker method, which is in it's own go-routine, and invoked +// periodically (approximately every 5 seconds). +// +// This method loops through the priority queue, popping the highest priority +// item until it encounters the first item that does not yet need rotation, +// based on the current time. +func (b *databaseBackend) rotateCredentials(ctx context.Context, s logical.Storage) { + for b.rotateCredential(ctx, s) { + } +} + +func (b *databaseBackend) rotateCredential(ctx context.Context, s logical.Storage) bool { + // Quit rotating credentials if shutdown has started + select { + case <-ctx.Done(): + return false + default: + } + item, err := b.popFromRotationQueue() + if err != nil { + if err != queue.ErrEmpty { + b.logger.Error("error popping item from queue", "err", err) + } + return false + } + + // Guard against possible nil item + if item == nil { + return false + } + + roleName := item.Key + logger := b.Logger().With("role", roleName) + + // Grab the exclusive lock for this Role, to make sure we don't incur and + // writes during the rotation process + lock := locksutil.LockForKey(b.roleLocks, roleName) + lock.Lock() + defer lock.Unlock() + + // Validate the role still exists + role, err := b.StaticRole(ctx, s, roleName) + if err != nil { + logger.Error("unable to load role", "error", err) + + item.Priority = time.Now().Add(10 * time.Second).Unix() + if err := b.pushItem(item); err != nil { + logger.Error("unable to push item on to queue", "error", err) + } + return true + } + if role == nil { + logger.Warn("role not found", "error", err) + return true + } + + logger = logger.With("database", role.DBName) + + // If "now" is less than the Item priority, then this item does not need to + // be rotated + if time.Now().Unix() < item.Priority { + if err := b.pushItem(item); err != nil { + logger.Error("unable to push item on to queue", "error", err) + } + // Break out of the for loop + return false + } + + input := &setStaticAccountInput{ + RoleName: roleName, + Role: role, + } + + // If there is a WAL entry related to this Role, the corresponding WAL ID + // should be stored in the Item's Value field. + if walID, ok := item.Value.(string); ok { + input.WALID = walID + } + + resp, err := b.setStaticAccount(ctx, s, input) + if err != nil { + logger.Error("unable to rotate credentials in periodic function", "error", err) + + // Increment the priority enough so that the next call to this method + // likely will not attempt to rotate it, as a back-off of sorts + item.Priority = time.Now().Add(10 * time.Second).Unix() + + // Preserve the WALID if it was returned + if resp != nil && resp.WALID != "" { + item.Value = resp.WALID + } + + if err := b.pushItem(item); err != nil { + logger.Error("unable to push item on to queue", "error", err) + } + // Go to next item + return true + } + // Clear any stored WAL ID as we must have successfully deleted our WAL to get here. + item.Value = "" + + lvr := resp.RotationTime + if lvr.IsZero() { + lvr = time.Now() + } + + // Update priority and push updated Item to the queue + nextRotation := lvr.Add(role.StaticAccount.RotationPeriod) + item.Priority = nextRotation.Unix() + if err := b.pushItem(item); err != nil { + logger.Warn("unable to push item on to queue", "error", err) + } + return true +} + +// findStaticWAL loads a WAL entry by ID. If found, only return the WAL if it +// is of type staticWALKey, otherwise return nil +func (b *databaseBackend) findStaticWAL(ctx context.Context, s logical.Storage, id string) (*setCredentialsWAL, error) { + wal, err := framework.GetWAL(ctx, s, id) + if err != nil { + return nil, err + } + + if wal == nil || wal.Kind != staticWALKey { + return nil, nil + } + + data := wal.Data.(map[string]interface{}) + walEntry := setCredentialsWAL{ + walID: id, + walCreatedAt: wal.CreatedAt, + NewPassword: data["new_password"].(string), + RoleName: data["role_name"].(string), + Username: data["username"].(string), + } + lvr, err := time.Parse(time.RFC3339, data["last_vault_rotation"].(string)) + if err != nil { + return nil, err + } + walEntry.LastVaultRotation = lvr + + return &walEntry, nil +} + +type setStaticAccountInput struct { + RoleName string + Role *roleEntry + WALID string +} + +type setStaticAccountOutput struct { + RotationTime time.Time + // Optional return field, in the event WAL was created and not destroyed + // during the operation + WALID string +} + +// setStaticAccount sets the credential for a static account associated with a +// Role. This method does many things: +// - verifies role exists and is in the allowed roles list +// - loads an existing WAL entry if WALID input is given, otherwise creates a +// new WAL entry +// - gets a database connection +// - accepts an input credential, otherwise generates a new one for +// the role's credential type +// - sets new credential for the static account +// - uses WAL for ensuring new credentials are not lost if storage to Vault fails, +// resulting in a partial failure. +// +// This method does not perform any operations on the priority queue. Those +// tasks must be handled outside of this method. +func (b *databaseBackend) setStaticAccount(ctx context.Context, s logical.Storage, input *setStaticAccountInput) (*setStaticAccountOutput, error) { + if input == nil || input.Role == nil || input.RoleName == "" { + return nil, errors.New("input was empty when attempting to set credentials for static account") + } + // Re-use WAL ID if present, otherwise PUT a new WAL + output := &setStaticAccountOutput{WALID: input.WALID} + + dbConfig, err := b.DatabaseConfig(ctx, s, input.Role.DBName) + if err != nil { + return output, err + } + if dbConfig == nil { + return output, errors.New("the config is currently unset") + } + + // If role name isn't in the database's allowed roles, send back a + // permission denied. + if !strutil.StrListContains(dbConfig.AllowedRoles, "*") && !strutil.StrListContainsGlob(dbConfig.AllowedRoles, input.RoleName) { + return output, fmt.Errorf("%q is not an allowed role", input.RoleName) + } + + // If the plugin doesn't support the credential type, return an error + if !dbConfig.SupportsCredentialType(input.Role.CredentialType) { + return output, fmt.Errorf("unsupported credential_type: %q", + input.Role.CredentialType.String()) + } + + // Get the Database object + dbi, err := b.GetConnection(ctx, s, input.Role.DBName) + if err != nil { + return output, err + } + + dbi.RLock() + defer dbi.RUnlock() + + updateReq := v5.UpdateUserRequest{ + Username: input.Role.StaticAccount.Username, + } + statements := v5.Statements{ + Commands: input.Role.Statements.Rotation, + } + + // Use credential from input if available. This happens if we're restoring from + // a WAL item or processing the rotation queue with an item that has a WAL + // associated with it + if output.WALID != "" { + wal, err := b.findStaticWAL(ctx, s, output.WALID) + if err != nil { + return output, fmt.Errorf("error retrieving WAL entry: %w", err) + } + switch { + case wal == nil: + b.Logger().Error("expected role to have WAL, but WAL not found in storage", "role", input.RoleName, "WAL ID", output.WALID) + + // Generate a new WAL entry and credential + output.WALID = "" + case !wal.credentialIsSet(): + b.Logger().Error("expected WAL to have a new credential set, but empty", "role", input.RoleName, "WAL ID", output.WALID) + if err := framework.DeleteWAL(ctx, s, output.WALID); err != nil { + b.Logger().Warn("failed to delete WAL with no new credential", "error", err, "WAL ID", output.WALID) + } + + // Generate a new WAL entry and credential + output.WALID = "" + case wal.CredentialType == v5.CredentialTypePassword: + // Roll forward by using the credential in the existing WAL entry + updateReq.CredentialType = v5.CredentialTypePassword + updateReq.Password = &v5.ChangePassword{ + NewPassword: wal.NewPassword, + Statements: statements, + } + input.Role.StaticAccount.Password = wal.NewPassword + case wal.CredentialType == v5.CredentialTypeRSAPrivateKey: + // Roll forward by using the credential in the existing WAL entry + updateReq.CredentialType = v5.CredentialTypeRSAPrivateKey + updateReq.PublicKey = &v5.ChangePublicKey{ + NewPublicKey: wal.NewPublicKey, + Statements: statements, + } + input.Role.StaticAccount.PrivateKey = wal.NewPrivateKey + } + } + + // Generate a new credential + if output.WALID == "" { + walEntry := &setCredentialsWAL{ + RoleName: input.RoleName, + Username: input.Role.StaticAccount.Username, + LastVaultRotation: input.Role.StaticAccount.LastVaultRotation, + } + + switch input.Role.CredentialType { + case v5.CredentialTypePassword: + generator, err := newPasswordGenerator(input.Role.CredentialConfig) + if err != nil { + return output, fmt.Errorf("failed to construct credential generator: %s", err) + } + + // Fall back to database config-level password policy if not set on role + if generator.PasswordPolicy == "" { + generator.PasswordPolicy = dbConfig.PasswordPolicy + } + + // Generate the password + newPassword, err := generator.generate(ctx, b, dbi.database) + if err != nil { + b.CloseIfShutdown(dbi, err) + return output, fmt.Errorf("failed to generate password: %s", err) + } + + // Set new credential in WAL entry and update user request + walEntry.NewPassword = newPassword + updateReq.CredentialType = v5.CredentialTypePassword + updateReq.Password = &v5.ChangePassword{ + NewPassword: newPassword, + Statements: statements, + } + + // Set new credential in static account + input.Role.StaticAccount.Password = newPassword + case v5.CredentialTypeRSAPrivateKey: + generator, err := newRSAKeyGenerator(input.Role.CredentialConfig) + if err != nil { + return output, fmt.Errorf("failed to construct credential generator: %s", err) + } + + // Generate the RSA key pair + public, private, err := generator.generate(b.GetRandomReader()) + if err != nil { + return output, fmt.Errorf("failed to generate RSA key pair: %s", err) + } + + // Set new credential in WAL entry and update user request + walEntry.NewPublicKey = public + updateReq.CredentialType = v5.CredentialTypeRSAPrivateKey + updateReq.PublicKey = &v5.ChangePublicKey{ + NewPublicKey: public, + Statements: statements, + } + + // Set new credential in static account + input.Role.StaticAccount.PrivateKey = private + } + + output.WALID, err = framework.PutWAL(ctx, s, staticWALKey, walEntry) + if err != nil { + return output, fmt.Errorf("error writing WAL entry: %w", err) + } + b.Logger().Debug("writing WAL", "role", input.RoleName, "WAL ID", output.WALID) + } + + _, err = dbi.database.UpdateUser(ctx, updateReq, false) + if err != nil { + b.CloseIfShutdown(dbi, err) + return output, fmt.Errorf("error setting credentials: %w", err) + } + + // Store updated role information + // lvr is the known LastVaultRotation + lvr := time.Now() + input.Role.StaticAccount.LastVaultRotation = lvr + output.RotationTime = lvr + + entry, err := logical.StorageEntryJSON(databaseStaticRolePath+input.RoleName, input.Role) + if err != nil { + return output, err + } + if err := s.Put(ctx, entry); err != nil { + return output, err + } + + // Cleanup WAL after successfully rotating and pushing new item on to queue + if err := framework.DeleteWAL(ctx, s, output.WALID); err != nil { + b.Logger().Warn("error deleting WAL", "WAL ID", output.WALID, "error", err) + return output, err + } + b.Logger().Debug("deleted WAL", "WAL ID", output.WALID) + + // The WAL has been deleted, return new setStaticAccountOutput without it + return &setStaticAccountOutput{RotationTime: lvr}, nil +} + +// initQueue preforms the necessary checks and initializations needed to perform +// automatic credential rotation for roles associated with static accounts. This +// method verifies if a queue is needed (primary server or local mount), and if +// so initializes the queue and launches a go-routine to periodically invoke a +// method to preform the rotations. +// +// initQueue is invoked by the Factory method in a go-routine. The Factory does +// not wait for success or failure of it's tasks before continuing. This is to +// avoid blocking the mount process while loading and evaluating existing roles, +// etc. +func (b *databaseBackend) initQueue(ctx context.Context, conf *logical.BackendConfig, replicationState consts.ReplicationState) { + // Verify this mount is on the primary server, or is a local mount. If not, do + // not create a queue or launch a ticker. Both processing the WAL list and + // populating the queue are done sequentially and before launching a + // go-routine to run the periodic ticker. + if (conf.System.LocalMount() || !replicationState.HasState(consts.ReplicationPerformanceSecondary)) && + !replicationState.HasState(consts.ReplicationDRSecondary) && + !replicationState.HasState(consts.ReplicationPerformanceStandby) { + b.Logger().Info("initializing database rotation queue") + + // Poll for a PutWAL call that does not return a "read-only storage" error. + // This ensures the startup phases of loading WAL entries from any possible + // failed rotations can complete without error when deleting from storage. + READONLY_LOOP: + for { + select { + case <-ctx.Done(): + b.Logger().Info("queue initialization canceled") + return + default: + } + + walID, err := framework.PutWAL(ctx, conf.StorageView, staticWALKey, &setCredentialsWAL{RoleName: "vault-readonlytest"}) + if walID != "" && err == nil { + defer framework.DeleteWAL(ctx, conf.StorageView, walID) + } + switch { + case err == nil: + break READONLY_LOOP + case err.Error() == logical.ErrSetupReadOnly.Error(): + time.Sleep(10 * time.Millisecond) + default: + b.Logger().Error("deleting nil key resulted in error", "error", err) + return + } + } + + // Load roles and populate queue with static accounts + b.populateQueue(ctx, conf.StorageView) + + // Launch ticker + queueTickerInterval := defaultQueueTickSeconds * time.Second + if strVal, ok := conf.Config[queueTickIntervalKey]; ok { + newVal, err := strconv.Atoi(strVal) + if err == nil { + queueTickerInterval = time.Duration(newVal) * time.Second + } else { + b.Logger().Error("bad value for %q option: %q", queueTickIntervalKey, strVal) + } + } + go b.runTicker(ctx, queueTickerInterval, conf.StorageView) + } +} + +// loadStaticWALs reads WAL entries and returns a map of roles and their +// setCredentialsWAL, if found. +func (b *databaseBackend) loadStaticWALs(ctx context.Context, s logical.Storage) (map[string]*setCredentialsWAL, error) { + keys, err := framework.ListWAL(ctx, s) + if err != nil { + return nil, err + } + if len(keys) == 0 { + b.Logger().Debug("no WAL entries found") + return nil, nil + } + + walMap := make(map[string]*setCredentialsWAL) + // Loop through WAL keys and process any rotation ones + for _, walID := range keys { + walEntry, err := b.findStaticWAL(ctx, s, walID) + if err != nil { + b.Logger().Error("error loading static WAL", "id", walID, "error", err) + continue + } + if walEntry == nil { + continue + } + + // Verify the static role still exists + roleName := walEntry.RoleName + role, err := b.StaticRole(ctx, s, roleName) + if err != nil { + b.Logger().Warn("unable to read static role", "error", err, "role", roleName) + continue + } + if role == nil || role.StaticAccount == nil { + b.Logger().Debug("deleting WAL with nil role or static account", "WAL ID", walEntry.walID) + if err := framework.DeleteWAL(ctx, s, walEntry.walID); err != nil { + b.Logger().Warn("unable to delete WAL", "error", err, "WAL ID", walEntry.walID) + } + continue + } + + if existingWALEntry, exists := walMap[walEntry.RoleName]; exists { + b.Logger().Debug("multiple WALs detected for role", "role", walEntry.RoleName, + "loaded WAL ID", existingWALEntry.walID, "created at", existingWALEntry.walCreatedAt, "last vault rotation", existingWALEntry.LastVaultRotation, + "candidate WAL ID", walEntry.walID, "created at", walEntry.walCreatedAt, "last vault rotation", walEntry.LastVaultRotation) + + if walEntry.walCreatedAt > existingWALEntry.walCreatedAt { + // If the existing WAL is older, delete it from storage and fall + // through to inserting our current WAL into the map. + b.Logger().Debug("deleting stale loaded WAL", "WAL ID", existingWALEntry.walID) + err = framework.DeleteWAL(ctx, s, existingWALEntry.walID) + if err != nil { + b.Logger().Warn("unable to delete loaded WAL", "error", err, "WAL ID", existingWALEntry.walID) + } + } else { + // If we already have a more recent WAL entry in the map, delete + // this one and continue onto the next WAL. + b.Logger().Debug("deleting stale candidate WAL", "WAL ID", walEntry.walID) + err = framework.DeleteWAL(ctx, s, walID) + if err != nil { + b.Logger().Warn("unable to delete candidate WAL", "error", err, "WAL ID", walEntry.walID) + } + continue + } + } + + b.Logger().Debug("loaded WAL", "WAL ID", walID) + walMap[walEntry.RoleName] = walEntry + } + return walMap, nil +} + +// pushItem wraps the internal queue's Push call, to make sure a queue is +// actually available. This is needed because both runTicker and initQueue +// operate in go-routines, and could be accessing the queue concurrently +func (b *databaseBackend) pushItem(item *queue.Item) error { + select { + case <-b.queueCtx.Done(): + default: + return b.credRotationQueue.Push(item) + } + b.Logger().Warn("no queue found during push item") + return nil +} + +// popFromRotationQueue wraps the internal queue's Pop call, to make sure a queue is +// actually available. This is needed because both runTicker and initQueue +// operate in go-routines, and could be accessing the queue concurrently +func (b *databaseBackend) popFromRotationQueue() (*queue.Item, error) { + select { + case <-b.queueCtx.Done(): + default: + return b.credRotationQueue.Pop() + } + return nil, queue.ErrEmpty +} + +// popFromRotationQueueByKey wraps the internal queue's PopByKey call, to make sure a queue is +// actually available. This is needed because both runTicker and initQueue +// operate in go-routines, and could be accessing the queue concurrently +func (b *databaseBackend) popFromRotationQueueByKey(name string) (*queue.Item, error) { + select { + case <-b.queueCtx.Done(): + default: + item, err := b.credRotationQueue.PopByKey(name) + if err != nil { + return nil, err + } + if item != nil { + return item, nil + } + } + return nil, queue.ErrEmpty +} diff --git a/builtin/logical/database/rotation_test.go b/builtin/logical/database/rotation_test.go new file mode 100644 index 0000000..e0cb96d --- /dev/null +++ b/builtin/logical/database/rotation_test.go @@ -0,0 +1,1450 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package database + +import ( + "context" + "database/sql" + "errors" + "fmt" + "log" + "os" + "strings" + "testing" + "time" + + "github.com/Sectorbob/mlab-ns2/gae/ns/digest" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/helper/testhelpers/mongodb" + postgreshelper "github.com/hashicorp/vault/helper/testhelpers/postgresql" + v5 "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/dbtxn" + "github.com/hashicorp/vault/sdk/helper/pluginutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/queue" + _ "github.com/jackc/pgx/v4/stdlib" + "github.com/stretchr/testify/mock" + mongodbatlasapi "go.mongodb.org/atlas/mongodbatlas" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" +) + +const ( + dbUser = "vaultstatictest" + dbUserDefaultPassword = "password" +) + +func TestBackend_StaticRole_Rotate_basic(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + lb, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + b, ok := lb.(*databaseBackend) + if !ok { + t.Fatal("could not convert to db backend") + } + defer b.Cleanup(context.Background()) + + cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + defer cleanup() + + // create the database user + createTestPGUser(t, connURL, dbUser, dbUserDefaultPassword, testRoleStaticCreate) + + verifyPgConn(t, dbUser, dbUserDefaultPassword, connURL) + + // Configure a connection + data := map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "verify_connection": false, + "allowed_roles": []string{"*"}, + "name": "plugin-test", + } + + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + data = map[string]interface{}{ + "name": "plugin-role-test", + "db_name": "plugin-test", + "rotation_statements": testRoleStaticUpdate, + "username": dbUser, + "rotation_period": "5400s", + } + + req = &logical.Request{ + Operation: logical.CreateOperation, + Path: "static-roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Read the creds + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "static-creds/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + username := resp.Data["username"].(string) + password := resp.Data["password"].(string) + if username == "" || password == "" { + t.Fatalf("empty username (%s) or password (%s)", username, password) + } + + // Verify username/password + verifyPgConn(t, dbUser, password, connURL) + + // Re-read the creds, verifying they aren't changing on read + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "static-creds/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + if username != resp.Data["username"].(string) || password != resp.Data["password"].(string) { + t.Fatal("expected re-read username/password to match, but didn't") + } + + // Trigger rotation + data = map[string]interface{}{"name": "plugin-role-test"} + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "rotate-role/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + if resp != nil { + t.Fatalf("Expected empty response from rotate-role: (%#v)", resp) + } + + // Re-Read the creds + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "static-creds/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + newPassword := resp.Data["password"].(string) + if password == newPassword { + t.Fatalf("expected passwords to differ, got (%s)", newPassword) + } + + // Verify new username/password + verifyPgConn(t, username, newPassword, connURL) +} + +// Sanity check to make sure we don't allow an attempt of rotating credentials +// for non-static accounts, which doesn't make sense anyway, but doesn't hurt to +// verify we return an error +func TestBackend_StaticRole_Rotate_NonStaticError(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + lb, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + b, ok := lb.(*databaseBackend) + if !ok { + t.Fatal("could not convert to db backend") + } + defer b.Cleanup(context.Background()) + + cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + defer cleanup() + + // create the database user + createTestPGUser(t, connURL, dbUser, dbUserDefaultPassword, testRoleStaticCreate) + + // Configure a connection + data := map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "verify_connection": false, + "allowed_roles": []string{"*"}, + "name": "plugin-test", + } + + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + data = map[string]interface{}{ + "name": "plugin-role-test", + "db_name": "plugin-test", + "creation_statements": testRoleStaticCreate, + "rotation_statements": testRoleStaticUpdate, + "revocation_statements": defaultRevocationSQL, + } + + req = &logical.Request{ + Operation: logical.CreateOperation, + Path: "roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Read the creds + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "creds/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + username := resp.Data["username"].(string) + password := resp.Data["password"].(string) + if username == "" || password == "" { + t.Fatalf("empty username (%s) or password (%s)", username, password) + } + + // Verify username/password + verifyPgConn(t, dbUser, dbUserDefaultPassword, connURL) + // Trigger rotation + data = map[string]interface{}{"name": "plugin-role-test"} + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "rotate-role/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + // expect resp to be an error + resp, _ = b.HandleRequest(namespace.RootContext(nil), req) + if !resp.IsError() { + t.Fatalf("expected error rotating non-static role") + } + + if resp.Error().Error() != "no static role found for role name" { + t.Fatalf("wrong error message: %s", err) + } +} + +func TestBackend_StaticRole_Revoke_user(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + lb, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + b, ok := lb.(*databaseBackend) + if !ok { + t.Fatal("could not convert to db backend") + } + defer b.Cleanup(context.Background()) + + cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + defer cleanup() + + // create the database user + createTestPGUser(t, connURL, dbUser, dbUserDefaultPassword, testRoleStaticCreate) + + // Configure a connection + data := map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "verify_connection": false, + "allowed_roles": []string{"*"}, + "name": "plugin-test", + } + + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + testCases := map[string]struct { + revoke *bool + expectVerifyErr bool + }{ + // Default case: user does not specify, Vault leaves the database user + // untouched, and the final connection check passes because the user still + // exists + "unset": {}, + // Revoke on delete. The final connection check should fail because the user + // no longer exists + "revoke": { + revoke: newBoolPtr(true), + expectVerifyErr: true, + }, + // Revoke false, final connection check should still pass + "persist": { + revoke: newBoolPtr(false), + }, + } + for k, tc := range testCases { + t.Run(k, func(t *testing.T) { + data = map[string]interface{}{ + "name": "plugin-role-test", + "db_name": "plugin-test", + "rotation_statements": testRoleStaticUpdate, + "username": dbUser, + "rotation_period": "5400s", + } + if tc.revoke != nil { + data["revoke_user_on_delete"] = *tc.revoke + } + + req = &logical.Request{ + Operation: logical.CreateOperation, + Path: "static-roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Read the creds + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "static-creds/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + username := resp.Data["username"].(string) + password := resp.Data["password"].(string) + if username == "" || password == "" { + t.Fatalf("empty username (%s) or password (%s)", username, password) + } + + // Verify username/password + verifyPgConn(t, username, password, connURL) + + // delete the role, expect the default where the user is not destroyed + // Read the creds + req = &logical.Request{ + Operation: logical.DeleteOperation, + Path: "static-roles/plugin-role-test", + Storage: config.StorageView, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Verify new username/password still work + verifyPgConn(t, username, password, connURL) + }) + } +} + +func createTestPGUser(t *testing.T, connURL string, username, password, query string) { + t.Helper() + log.Printf("[TRACE] Creating test user") + + db, err := sql.Open("pgx", connURL) + defer db.Close() + if err != nil { + t.Fatal(err) + } + + // Start a transaction + ctx := context.Background() + tx, err := db.BeginTx(ctx, nil) + if err != nil { + t.Fatal(err) + } + defer func() { + _ = tx.Rollback() + }() + + m := map[string]string{ + "name": username, + "password": password, + } + if err := dbtxn.ExecuteTxQueryDirect(ctx, tx, m, query); err != nil { + t.Fatal(err) + } + // Commit the transaction + if err := tx.Commit(); err != nil { + t.Fatal(err) + } +} + +func verifyPgConn(t *testing.T, username, password, connURL string) { + t.Helper() + cURL := strings.Replace(connURL, "postgres:secret", username+":"+password, 1) + db, err := sql.Open("pgx", cURL) + if err != nil { + t.Fatal(err) + } + if err := db.Ping(); err != nil { + t.Fatal(err) + } +} + +// WAL testing +// +// First scenario, WAL contains a role name that does not exist. +func TestBackend_Static_QueueWAL_discard_role_not_found(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + ctx := context.Background() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + _, err := framework.PutWAL(ctx, config.StorageView, staticWALKey, &setCredentialsWAL{ + RoleName: "doesnotexist", + }) + if err != nil { + t.Fatalf("error with PutWAL: %s", err) + } + + assertWALCount(t, config.StorageView, 1, staticWALKey) + + b, err := Factory(ctx, config) + if err != nil { + t.Fatal(err) + } + defer b.Cleanup(ctx) + + time.Sleep(5 * time.Second) + bd := b.(*databaseBackend) + if bd.credRotationQueue == nil { + t.Fatal("database backend had no credential rotation queue") + } + + // Verify empty queue + if bd.credRotationQueue.Len() != 0 { + t.Fatalf("expected zero queue items, got: %d", bd.credRotationQueue.Len()) + } + + assertWALCount(t, config.StorageView, 0, staticWALKey) +} + +// Second scenario, WAL contains a role name that does exist, but the role's +// LastVaultRotation is greater than the WAL has +func TestBackend_Static_QueueWAL_discard_role_newer_rotation_date(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + ctx := context.Background() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + roleName := "test-discard-by-date" + lb, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + b, ok := lb.(*databaseBackend) + if !ok { + t.Fatal("could not convert to db backend") + } + + cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + defer cleanup() + + // create the database user + createTestPGUser(t, connURL, dbUser, dbUserDefaultPassword, testRoleStaticCreate) + + // Configure a connection + data := map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "verify_connection": false, + "allowed_roles": []string{"*"}, + "name": "plugin-test", + } + + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Save Now() to make sure rotation time is after this, as well as the WAL + // time + roleTime := time.Now() + + // Create role + data = map[string]interface{}{ + "name": roleName, + "db_name": "plugin-test", + "rotation_statements": testRoleStaticUpdate, + "username": dbUser, + // Low value here, to make sure the backend rotates this password at least + // once before we compare it to the WAL + "rotation_period": "10s", + } + + req = &logical.Request{ + Operation: logical.CreateOperation, + Path: "static-roles/" + roleName, + Storage: config.StorageView, + Data: data, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Allow the first rotation to occur, setting LastVaultRotation + time.Sleep(time.Second * 12) + + // Cleanup the backend, then create a WAL for the role with a + // LastVaultRotation of 1 hour ago, so that when we recreate the backend the + // WAL will be read but discarded + b.Cleanup(ctx) + b = nil + time.Sleep(time.Second * 3) + + // Make a fake WAL entry with an older time + oldRotationTime := roleTime.Add(time.Hour * -1) + walPassword := "somejunkpassword" + _, err = framework.PutWAL(ctx, config.StorageView, staticWALKey, &setCredentialsWAL{ + RoleName: roleName, + NewPassword: walPassword, + LastVaultRotation: oldRotationTime, + Username: dbUser, + }) + if err != nil { + t.Fatalf("error with PutWAL: %s", err) + } + + assertWALCount(t, config.StorageView, 1, staticWALKey) + + // Reload backend + lb, err = Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + b, ok = lb.(*databaseBackend) + if !ok { + t.Fatal("could not convert to db backend") + } + defer b.Cleanup(ctx) + + // Allow enough time for populateQueue to work after boot + time.Sleep(time.Second * 12) + + // PopulateQueue should have processed the entry + assertWALCount(t, config.StorageView, 0, staticWALKey) + + // Read the role + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "static-roles/" + roleName, + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + lastVaultRotation := resp.Data["last_vault_rotation"].(time.Time) + if !lastVaultRotation.After(oldRotationTime) { + t.Fatal("last vault rotation time not greater than WAL time") + } + + if !lastVaultRotation.After(roleTime) { + t.Fatal("last vault rotation time not greater than role creation time") + } + + // Grab password to verify it didn't change + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "static-creds/" + roleName, + Storage: config.StorageView, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + password := resp.Data["password"].(string) + if password == walPassword { + t.Fatalf("expected password to not be changed by WAL, but was") + } +} + +// Helper to assert the number of WAL entries is what we expect +func assertWALCount(t *testing.T, s logical.Storage, expected int, key string) { + t.Helper() + + var count int + ctx := context.Background() + keys, err := framework.ListWAL(ctx, s) + if err != nil { + t.Fatal("error listing WALs") + } + + // Loop through WAL keys and process any rotation ones + for _, k := range keys { + walEntry, _ := framework.GetWAL(ctx, s, k) + if walEntry == nil { + continue + } + + if walEntry.Kind != key { + continue + } + count++ + } + if expected != count { + t.Fatalf("WAL count mismatch, expected (%d), got (%d)", expected, count) + } +} + +// +// End WAL testing +// + +type userCreator func(t *testing.T, username, password string) + +func TestBackend_StaticRole_Rotations_PostgreSQL(t *testing.T) { + cleanup, connURL := postgreshelper.PrepareTestContainer(t, "13.4-buster") + defer cleanup() + uc := userCreator(func(t *testing.T, username, password string) { + createTestPGUser(t, connURL, username, password, testRoleStaticCreate) + }) + testBackend_StaticRole_Rotations(t, uc, map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + }) +} + +func TestBackend_StaticRole_Rotations_MongoDB(t *testing.T) { + cleanup, connURL := mongodb.PrepareTestContainerWithDatabase(t, "5.0.10", "vaulttestdb") + defer cleanup() + + uc := userCreator(func(t *testing.T, username, password string) { + testCreateDBUser(t, connURL, "vaulttestdb", username, password) + }) + testBackend_StaticRole_Rotations(t, uc, map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "mongodb-database-plugin", + }) +} + +func TestBackend_StaticRole_Rotations_MongoDBAtlas(t *testing.T) { + // To get the project ID, connect to cloud.mongodb.com, go to the vault-test project and + // look at Project Settings. + projID := os.Getenv("VAULT_MONGODBATLAS_PROJECT_ID") + // For the private and public key, go to Organization Access Manager on cloud.mongodb.com, + // choose Create API Key, then create one using the defaults. Then go back to the vault-test + // project and add the API key to it, with permissions "Project Owner". + privKey := os.Getenv("VAULT_MONGODBATLAS_PRIVATE_KEY") + pubKey := os.Getenv("VAULT_MONGODBATLAS_PUBLIC_KEY") + if projID == "" { + t.Logf("Skipping MongoDB Atlas test because VAULT_MONGODBATLAS_PROJECT_ID not set") + t.SkipNow() + } + + transport := digest.NewTransport(pubKey, privKey) + cl, err := transport.Client() + if err != nil { + t.Fatal(err) + } + + api, err := mongodbatlasapi.New(cl) + if err != nil { + t.Fatal(err) + } + + uc := userCreator(func(t *testing.T, username, password string) { + // Delete the user in case it's still there from an earlier run, ignore + // errors in case it's not. + _, _ = api.DatabaseUsers.Delete(context.Background(), "admin", projID, username) + + req := &mongodbatlasapi.DatabaseUser{ + Username: username, + Password: password, + DatabaseName: "admin", + Roles: []mongodbatlasapi.Role{{RoleName: "atlasAdmin", DatabaseName: "admin"}}, + } + _, _, err := api.DatabaseUsers.Create(context.Background(), projID, req) + if err != nil { + t.Fatal(err) + } + }) + testBackend_StaticRole_Rotations(t, uc, map[string]interface{}{ + "plugin_name": "mongodbatlas-database-plugin", + "project_id": projID, + "private_key": privKey, + "public_key": pubKey, + }) +} + +func testBackend_StaticRole_Rotations(t *testing.T, createUser userCreator, opts map[string]interface{}) { + // We need to set this value for the plugin to run, but it doesn't matter what we set it to. + oldToken := os.Getenv(pluginutil.PluginUnwrapTokenEnv) + os.Setenv(pluginutil.PluginUnwrapTokenEnv, "...") + defer func() { + if oldToken != "" { + os.Setenv(pluginutil.PluginUnwrapTokenEnv, oldToken) + } else { + os.Unsetenv(pluginutil.PluginUnwrapTokenEnv) + } + }() + + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + // Change background task interval to 1s to give more margin + // for it to successfully run during the sleeps below. + config.Config[queueTickIntervalKey] = "1" + + // Rotation ticker starts running in Factory call + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + defer b.Cleanup(context.Background()) + + // allow initQueue to finish + bd := b.(*databaseBackend) + if bd.credRotationQueue == nil { + t.Fatal("database backend had no credential rotation queue") + } + + // Configure a connection + data := map[string]interface{}{ + "verify_connection": false, + "allowed_roles": []string{"*"}, + } + for k, v := range opts { + data[k] = v + } + + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + testCases := []string{"10", "20", "100"} + // Create database users ahead + for _, tc := range testCases { + createUser(t, "statictest"+tc, "test") + } + + // create three static roles with different rotation periods + for _, tc := range testCases { + roleName := "plugin-static-role-" + tc + data = map[string]interface{}{ + "name": roleName, + "db_name": "plugin-test", + "username": "statictest" + tc, + "rotation_period": tc, + } + + req = &logical.Request{ + Operation: logical.CreateOperation, + Path: "static-roles/" + roleName, + Storage: config.StorageView, + Data: data, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + } + + // verify the queue has 3 items in it + if bd.credRotationQueue.Len() != 3 { + t.Fatalf("expected 3 items in the rotation queue, got: (%d)", bd.credRotationQueue.Len()) + } + + // List the roles + data = map[string]interface{}{} + req = &logical.Request{ + Operation: logical.ListOperation, + Path: "static-roles/", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + keys := resp.Data["keys"].([]string) + if len(keys) != 3 { + t.Fatalf("expected 3 roles, got: (%d)", len(keys)) + } + + // capture initial passwords, before the periodic function is triggered + pws := make(map[string][]string, 0) + pws = capturePasswords(t, b, config, testCases, pws) + + // sleep to make sure the periodic func has time to actually run + time.Sleep(15 * time.Second) + pws = capturePasswords(t, b, config, testCases, pws) + + // sleep more, this should allow both sr10 and sr20 to rotate + time.Sleep(10 * time.Second) + pws = capturePasswords(t, b, config, testCases, pws) + + // verify all pws are as they should + pass := true + for k, v := range pws { + if len(v) < 3 { + t.Fatalf("expected to find 3 passwords for (%s), only found (%d)", k, len(v)) + } + switch { + case k == "plugin-static-role-10": + // expect all passwords to be different + if v[0] == v[1] || v[1] == v[2] || v[0] == v[2] { + pass = false + } + case k == "plugin-static-role-20": + // expect the first two to be equal, but different from the third + if v[0] != v[1] || v[0] == v[2] { + pass = false + } + case k == "plugin-static-role-100": + // expect all passwords to be equal + if v[0] != v[1] || v[1] != v[2] { + pass = false + } + default: + t.Fatalf("unexpected password key: %v", k) + } + } + if !pass { + t.Fatalf("password rotations did not match expected: %#v", pws) + } +} + +func testCreateDBUser(t testing.TB, connURL, db, username, password string) { + ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) + client, err := mongo.Connect(ctx, options.Client().ApplyURI(connURL)) + if err != nil { + t.Fatal(err) + } + + createUserCmd := &createUserCommand{ + Username: username, + Password: password, + Roles: []interface{}{}, + } + result := client.Database(db).RunCommand(ctx, createUserCmd, nil) + if result.Err() != nil { + t.Fatal(result.Err()) + } +} + +type createUserCommand struct { + Username string `bson:"createUser"` + Password string `bson:"pwd"` + Roles []interface{} `bson:"roles"` +} + +// Demonstrates a bug fix for the credential rotation not releasing locks +func TestBackend_StaticRole_LockRegression(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + lb, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + b, ok := lb.(*databaseBackend) + if !ok { + t.Fatal("could not convert to db backend") + } + defer b.Cleanup(context.Background()) + + cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + defer cleanup() + + // Configure a connection + data := map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "verify_connection": false, + "allowed_roles": []string{"*"}, + "name": "plugin-test", + } + + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + createTestPGUser(t, connURL, dbUser, dbUserDefaultPassword, testRoleStaticCreate) + data = map[string]interface{}{ + "name": "plugin-role-test", + "db_name": "plugin-test", + "rotation_statements": testRoleStaticUpdate, + "username": dbUser, + "rotation_period": "7s", + } + req = &logical.Request{ + Operation: logical.CreateOperation, + Path: "static-roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + for i := 0; i < 25; i++ { + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "static-roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // sleeping is needed to trigger the deadlock, otherwise things are + // processed too quickly to trigger the rotation lock on so few roles + time.Sleep(500 * time.Millisecond) + } +} + +func TestBackend_StaticRole_Rotate_Invalid_Role(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + + lb, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + b, ok := lb.(*databaseBackend) + if !ok { + t.Fatal("could not convert to db backend") + } + defer b.Cleanup(context.Background()) + + cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + defer cleanup() + + // create the database user + createTestPGUser(t, connURL, dbUser, dbUserDefaultPassword, testRoleStaticCreate) + + verifyPgConn(t, dbUser, dbUserDefaultPassword, connURL) + + // Configure a connection + data := map[string]interface{}{ + "connection_url": connURL, + "plugin_name": "postgresql-database-plugin", + "verify_connection": false, + "allowed_roles": []string{"*"}, + "name": "plugin-test", + } + + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/plugin-test", + Storage: config.StorageView, + Data: data, + } + resp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + data = map[string]interface{}{ + "name": "plugin-role-test", + "db_name": "plugin-test", + "rotation_statements": testRoleStaticUpdate, + "username": dbUser, + "rotation_period": "5400s", + } + + req = &logical.Request{ + Operation: logical.CreateOperation, + Path: "static-roles/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Pop manually key to emulate a queue without existing key + b.credRotationQueue.PopByKey("plugin-role-test") + + // Make sure queue is empty + if b.credRotationQueue.Len() != 0 { + t.Fatalf("expected queue length to be 0 but is %d", b.credRotationQueue.Len()) + } + + // Trigger rotation + data = map[string]interface{}{"name": "plugin-role-test"} + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "rotate-role/plugin-role-test", + Storage: config.StorageView, + Data: data, + } + resp, err = b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + // Check if key is in queue + if b.credRotationQueue.Len() != 1 { + t.Fatalf("expected queue length to be 1 but is %d", b.credRotationQueue.Len()) + } +} + +func TestRollsPasswordForwardsUsingWAL(t *testing.T) { + ctx := context.Background() + b, storage, mockDB := getBackend(t) + defer b.Cleanup(ctx) + configureDBMount(t, storage) + createRole(t, b, storage, mockDB, "hashicorp") + + role, err := b.StaticRole(ctx, storage, "hashicorp") + if err != nil { + t.Fatal(err) + } + oldPassword := role.StaticAccount.Password + + generateWALFromFailedRotation(t, b, storage, mockDB, "hashicorp") + + walIDs := requireWALs(t, storage, 1) + wal, err := b.findStaticWAL(ctx, storage, walIDs[0]) + if err != nil { + t.Fatal(err) + } + role, err = b.StaticRole(ctx, storage, "hashicorp") + if err != nil { + t.Fatal(err) + } + // Role's password should still be the WAL's old password + if role.StaticAccount.Password != oldPassword { + t.Fatal(role.StaticAccount.Password, oldPassword) + } + + rotateRole(t, b, storage, mockDB, "hashicorp") + + role, err = b.StaticRole(ctx, storage, "hashicorp") + if err != nil { + t.Fatal(err) + } + if role.StaticAccount.Password != wal.NewPassword { + t.Fatal("role password", role.StaticAccount.Password, "WAL new password", wal.NewPassword) + } + // WAL should be cleared by the successful rotate + requireWALs(t, storage, 0) +} + +func TestStoredWALsCorrectlyProcessed(t *testing.T) { + const walNewPassword = "new-password-from-wal" + for _, tc := range []struct { + name string + shouldRotate bool + wal *setCredentialsWAL + }{ + { + "WAL is kept and used for roll forward", + true, + &setCredentialsWAL{ + RoleName: "hashicorp", + Username: "hashicorp", + NewPassword: walNewPassword, + LastVaultRotation: time.Now().Add(time.Hour), + }, + }, + { + "zero-time WAL is discarded on load", + false, + &setCredentialsWAL{ + RoleName: "hashicorp", + Username: "hashicorp", + NewPassword: walNewPassword, + LastVaultRotation: time.Time{}, + }, + }, + { + "empty-password WAL is kept but a new password is generated", + true, + &setCredentialsWAL{ + RoleName: "hashicorp", + Username: "hashicorp", + NewPassword: "", + LastVaultRotation: time.Now().Add(time.Hour), + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + ctx := context.Background() + config := logical.TestBackendConfig() + storage := &logical.InmemStorage{} + config.StorageView = storage + b := Backend(config) + defer b.Cleanup(ctx) + mockDB := setupMockDB(b) + if err := b.Setup(ctx, config); err != nil { + t.Fatal(err) + } + b.credRotationQueue = queue.New() + configureDBMount(t, config.StorageView) + createRole(t, b, config.StorageView, mockDB, "hashicorp") + role, err := b.StaticRole(ctx, config.StorageView, "hashicorp") + if err != nil { + t.Fatal(err) + } + initialPassword := role.StaticAccount.Password + + // Set up a WAL for our test case + framework.PutWAL(ctx, config.StorageView, staticWALKey, tc.wal) + requireWALs(t, config.StorageView, 1) + // Reset the rotation queue to simulate startup memory state + b.credRotationQueue = queue.New() + + // Now finish the startup process by populating the queue, which should discard the WAL + b.initQueue(ctx, config, consts.ReplicationUnknown) + + if tc.shouldRotate { + requireWALs(t, storage, 1) + } else { + requireWALs(t, storage, 0) + } + + // Run one tick + mockDB.On("UpdateUser", mock.Anything, mock.Anything). + Return(v5.UpdateUserResponse{}, nil). + Once() + b.rotateCredentials(ctx, storage) + requireWALs(t, storage, 0) + + role, err = b.StaticRole(ctx, storage, "hashicorp") + if err != nil { + t.Fatal(err) + } + item, err := b.popFromRotationQueueByKey("hashicorp") + if err != nil { + t.Fatal(err) + } + + if tc.shouldRotate { + if tc.wal.NewPassword != "" { + // Should use WAL's new_password field + if role.StaticAccount.Password != walNewPassword { + t.Fatal() + } + } else { + // Should rotate but ignore WAL's new_password field + if role.StaticAccount.Password == initialPassword { + t.Fatal() + } + if role.StaticAccount.Password == walNewPassword { + t.Fatal() + } + } + } else { + // Ensure the role was not promoted for early rotation + if item.Priority < time.Now().Add(time.Hour).Unix() { + t.Fatal("priority should be for about a week away, but was", item.Priority) + } + if role.StaticAccount.Password != initialPassword { + t.Fatal("password should not have been rotated yet") + } + } + }) + } +} + +func TestDeletesOlderWALsOnLoad(t *testing.T) { + ctx := context.Background() + b, storage, mockDB := getBackend(t) + defer b.Cleanup(ctx) + configureDBMount(t, storage) + createRole(t, b, storage, mockDB, "hashicorp") + + // Create 4 WALs, with a clear winner for most recent. + wal := &setCredentialsWAL{ + RoleName: "hashicorp", + Username: "hashicorp", + NewPassword: "some-new-password", + LastVaultRotation: time.Now(), + } + for i := 0; i < 3; i++ { + _, err := framework.PutWAL(ctx, storage, staticWALKey, wal) + if err != nil { + t.Fatal(err) + } + } + time.Sleep(2 * time.Second) + // We expect this WAL to have the latest createdAt timestamp + walID, err := framework.PutWAL(ctx, storage, staticWALKey, wal) + if err != nil { + t.Fatal(err) + } + requireWALs(t, storage, 4) + + walMap, err := b.loadStaticWALs(ctx, storage) + if err != nil { + t.Fatal(err) + } + if len(walMap) != 1 || walMap["hashicorp"] == nil || walMap["hashicorp"].walID != walID { + t.Fatal() + } + requireWALs(t, storage, 1) +} + +func generateWALFromFailedRotation(t *testing.T, b *databaseBackend, storage logical.Storage, mockDB *mockNewDatabase, roleName string) { + t.Helper() + mockDB.On("UpdateUser", mock.Anything, mock.Anything). + Return(v5.UpdateUserResponse{}, errors.New("forced error")). + Once() + _, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "rotate-role/" + roleName, + Storage: storage, + }) + if err == nil { + t.Fatal("expected error") + } +} + +func rotateRole(t *testing.T, b *databaseBackend, storage logical.Storage, mockDB *mockNewDatabase, roleName string) { + t.Helper() + mockDB.On("UpdateUser", mock.Anything, mock.Anything). + Return(v5.UpdateUserResponse{}, nil). + Once() + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "rotate-role/" + roleName, + Storage: storage, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatal(resp, err) + } +} + +// returns a slice of the WAL IDs in storage +func requireWALs(t *testing.T, storage logical.Storage, expectedCount int) []string { + t.Helper() + wals, err := storage.List(context.Background(), "wal/") + if err != nil { + t.Fatal(err) + } + if len(wals) != expectedCount { + t.Fatal("expected WALs", expectedCount, "got", len(wals)) + } + + return wals +} + +func getBackend(t *testing.T) (*databaseBackend, logical.Storage, *mockNewDatabase) { + t.Helper() + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + // Create and init the backend ourselves instead of using a Factory because + // the factory function kicks off threads that cause racy tests. + b := Backend(config) + if err := b.Setup(context.Background(), config); err != nil { + t.Fatal(err) + } + b.credRotationQueue = queue.New() + b.populateQueue(context.Background(), config.StorageView) + + mockDB := setupMockDB(b) + + return b, config.StorageView, mockDB +} + +func setupMockDB(b *databaseBackend) *mockNewDatabase { + mockDB := &mockNewDatabase{} + mockDB.On("Initialize", mock.Anything, mock.Anything).Return(v5.InitializeResponse{}, nil) + mockDB.On("Close").Return(nil) + mockDB.On("Type").Return("mock", nil) + dbw := databaseVersionWrapper{ + v5: mockDB, + } + + dbi := &dbPluginInstance{ + database: dbw, + id: "foo-id", + name: "mockV5", + } + b.connections["mockv5"] = dbi + + return mockDB +} + +// configureDBMount puts config directly into storage to avoid the DB engine's +// plugin init code paths, allowing us to use a manually populated mock DB object. +func configureDBMount(t *testing.T, storage logical.Storage) { + t.Helper() + entry, err := logical.StorageEntryJSON(fmt.Sprintf("config/mockv5"), &DatabaseConfig{ + AllowedRoles: []string{"*"}, + }) + if err != nil { + t.Fatal(err) + } + + err = storage.Put(context.Background(), entry) + if err != nil { + t.Fatal(err) + } +} + +// capturePasswords captures the current passwords at the time of calling, and +// returns a map of username / passwords building off of the input map +func capturePasswords(t *testing.T, b logical.Backend, config *logical.BackendConfig, testCases []string, pws map[string][]string) map[string][]string { + new := make(map[string][]string, 0) + for _, tc := range testCases { + // Read the role + roleName := "plugin-static-role-" + tc + req := &logical.Request{ + Operation: logical.ReadOperation, + Path: "static-creds/" + roleName, + Storage: config.StorageView, + } + resp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%s resp:%#v\n", err, resp) + } + + username := resp.Data["username"].(string) + password := resp.Data["password"].(string) + if username == "" || password == "" { + t.Fatalf("expected both username/password for (%s), got (%s), (%s)", roleName, username, password) + } + new[roleName] = append(new[roleName], password) + } + + for k, v := range new { + pws[k] = append(pws[k], v...) + } + + return pws +} + +func newBoolPtr(b bool) *bool { + v := b + return &v +} diff --git a/builtin/logical/database/secret_creds.go b/builtin/logical/database/secret_creds.go new file mode 100644 index 0000000..fefa452 --- /dev/null +++ b/builtin/logical/database/secret_creds.go @@ -0,0 +1,167 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package database + +import ( + "context" + "fmt" + "time" + + v4 "github.com/hashicorp/vault/sdk/database/dbplugin" + v5 "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +const SecretCredsType = "creds" + +func secretCreds(b *databaseBackend) *framework.Secret { + return &framework.Secret{ + Type: SecretCredsType, + Fields: map[string]*framework.FieldSchema{}, + + Renew: b.secretCredsRenew(), + Revoke: b.secretCredsRevoke(), + } +} + +func (b *databaseBackend) secretCredsRenew() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // Get the username from the internal data + usernameRaw, ok := req.Secret.InternalData["username"] + if !ok { + return nil, fmt.Errorf("secret is missing username internal data") + } + username, ok := usernameRaw.(string) + + roleNameRaw, ok := req.Secret.InternalData["role"] + if !ok { + return nil, fmt.Errorf("could not find role with name: %q", req.Secret.InternalData["role"]) + } + + role, err := b.Role(ctx, req.Storage, roleNameRaw.(string)) + if err != nil { + return nil, err + } + if role == nil { + return nil, fmt.Errorf("error during renew: could not find role with name %q", req.Secret.InternalData["role"]) + } + + // Get the Database object + dbi, err := b.GetConnection(ctx, req.Storage, role.DBName) + if err != nil { + return nil, err + } + + dbi.RLock() + defer dbi.RUnlock() + + // Make sure we increase the VALID UNTIL endpoint for this user. + ttl, _, err := framework.CalculateTTL(b.System(), req.Secret.Increment, role.DefaultTTL, 0, role.MaxTTL, 0, req.Secret.IssueTime) + if err != nil { + return nil, err + } + if ttl > 0 { + expireTime := time.Now().Add(ttl) + // Adding a small buffer since the TTL will be calculated again after this call + // to ensure the database credential does not expire before the lease + expireTime = expireTime.Add(5 * time.Second) + + updateReq := v5.UpdateUserRequest{ + Username: username, + Expiration: &v5.ChangeExpiration{ + NewExpiration: expireTime, + Statements: v5.Statements{ + Commands: role.Statements.Renewal, + }, + }, + } + _, err := dbi.database.UpdateUser(ctx, updateReq, false) + if err != nil { + b.CloseIfShutdown(dbi, err) + return nil, err + } + } + resp := &logical.Response{Secret: req.Secret} + resp.Secret.TTL = role.DefaultTTL + resp.Secret.MaxTTL = role.MaxTTL + return resp, nil + } +} + +func (b *databaseBackend) secretCredsRevoke() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // Get the username from the internal data + usernameRaw, ok := req.Secret.InternalData["username"] + if !ok { + return nil, fmt.Errorf("secret is missing username internal data") + } + username, ok := usernameRaw.(string) + + var resp *logical.Response + + roleNameRaw, ok := req.Secret.InternalData["role"] + if !ok { + return nil, fmt.Errorf("no role name was provided") + } + + var dbName string + var statements v4.Statements + + role, err := b.Role(ctx, req.Storage, roleNameRaw.(string)) + if err != nil { + return nil, err + } + if role != nil { + dbName = role.DBName + statements = role.Statements + } else { + dbNameRaw, ok := req.Secret.InternalData["db_name"] + if !ok { + return nil, fmt.Errorf("error during revoke: could not find role with name %q or embedded revocation db name data", req.Secret.InternalData["role"]) + } + dbName = dbNameRaw.(string) + + statementsRaw, ok := req.Secret.InternalData["revocation_statements"] + if !ok { + return nil, fmt.Errorf("error during revoke: could not find role with name %q or embedded revocation statement data", req.Secret.InternalData["role"]) + } + + // If we don't actually have any statements, because none were + // set in the role, we'll end up with an empty one and the + // default for the db type will be attempted + if statementsRaw != nil { + statementsSlice, ok := statementsRaw.([]interface{}) + if !ok { + return nil, fmt.Errorf("error during revoke: could not find role with name %q and embedded reovcation data could not be read", req.Secret.InternalData["role"]) + } + for _, v := range statementsSlice { + statements.Revocation = append(statements.Revocation, v.(string)) + } + } + } + + // Get our connection + dbi, err := b.GetConnection(ctx, req.Storage, dbName) + if err != nil { + return nil, err + } + + dbi.RLock() + defer dbi.RUnlock() + + deleteReq := v5.DeleteUserRequest{ + Username: username, + Statements: v5.Statements{ + Commands: statements.Revocation, + }, + } + _, err = dbi.database.DeleteUser(ctx, deleteReq) + if err != nil { + b.CloseIfShutdown(dbi, err) + return nil, err + } + return resp, nil + } +} diff --git a/builtin/logical/database/version_wrapper.go b/builtin/logical/database/version_wrapper.go new file mode 100644 index 0000000..daab179 --- /dev/null +++ b/builtin/logical/database/version_wrapper.go @@ -0,0 +1,265 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package database + +import ( + "context" + "fmt" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/vault/helper/versions" + v4 "github.com/hashicorp/vault/sdk/database/dbplugin" + v5 "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + "github.com/hashicorp/vault/sdk/helper/pluginutil" + "github.com/hashicorp/vault/sdk/logical" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type databaseVersionWrapper struct { + v4 v4.Database + v5 v5.Database +} + +var _ logical.PluginVersioner = databaseVersionWrapper{} + +// newDatabaseWrapper figures out which version of the database the pluginName is referring to and returns a wrapper object +// that can be used to make operations on the underlying database plugin. If a builtin pluginVersion is provided, it will +// be ignored. +func newDatabaseWrapper(ctx context.Context, pluginName string, pluginVersion string, sys pluginutil.LookRunnerUtil, logger log.Logger) (dbw databaseVersionWrapper, err error) { + // 1.12.0 and 1.12.1 stored plugin version in the config, but that stored + // builtin version may disappear from the plugin catalog when Vault is + // upgraded, so always reference builtin plugins by an empty version. + if versions.IsBuiltinVersion(pluginVersion) { + pluginVersion = "" + } + newDB, err := v5.PluginFactoryVersion(ctx, pluginName, pluginVersion, sys, logger) + if err == nil { + dbw = databaseVersionWrapper{ + v5: newDB, + } + return dbw, nil + } + + merr := &multierror.Error{} + merr = multierror.Append(merr, err) + + legacyDB, err := v4.PluginFactoryVersion(ctx, pluginName, pluginVersion, sys, logger) + if err == nil { + dbw = databaseVersionWrapper{ + v4: legacyDB, + } + return dbw, nil + } + merr = multierror.Append(merr, err) + + return dbw, fmt.Errorf("invalid database version: %s", merr) +} + +// Initialize the underlying database. This is analogous to a constructor on the database plugin object. +// Errors if the wrapper does not contain an underlying database. +func (d databaseVersionWrapper) Initialize(ctx context.Context, req v5.InitializeRequest) (v5.InitializeResponse, error) { + if !d.isV5() && !d.isV4() { + return v5.InitializeResponse{}, fmt.Errorf("no underlying database specified") + } + + // v5 Database + if d.isV5() { + return d.v5.Initialize(ctx, req) + } + + // v4 Database + saveConfig, err := d.v4.Init(ctx, req.Config, req.VerifyConnection) + if err != nil { + return v5.InitializeResponse{}, err + } + resp := v5.InitializeResponse{ + Config: saveConfig, + } + return resp, nil +} + +// NewUser in the database. This is different from the v5 Database in that it returns a password as well. +// This is done because the v4 Database is expected to generate a password and return it. The NewUserResponse +// does not have a way of returning the password so this function signature needs to be different. +// The password returned here should be considered the source of truth, not the provided password. +// Errors if the wrapper does not contain an underlying database. +func (d databaseVersionWrapper) NewUser(ctx context.Context, req v5.NewUserRequest) (resp v5.NewUserResponse, password string, err error) { + if !d.isV5() && !d.isV4() { + return v5.NewUserResponse{}, "", fmt.Errorf("no underlying database specified") + } + + // v5 Database + if d.isV5() { + resp, err = d.v5.NewUser(ctx, req) + return resp, req.Password, err + } + + // v4 Database + stmts := v4.Statements{ + Creation: req.Statements.Commands, + Rollback: req.RollbackStatements.Commands, + } + usernameConfig := v4.UsernameConfig{ + DisplayName: req.UsernameConfig.DisplayName, + RoleName: req.UsernameConfig.RoleName, + } + username, password, err := d.v4.CreateUser(ctx, stmts, usernameConfig, req.Expiration) + if err != nil { + return resp, "", err + } + + resp = v5.NewUserResponse{ + Username: username, + } + return resp, password, nil +} + +// UpdateUser in the underlying database. This is used to update any information currently supported +// in the UpdateUserRequest such as password credentials or user TTL. +// Errors if the wrapper does not contain an underlying database. +func (d databaseVersionWrapper) UpdateUser(ctx context.Context, req v5.UpdateUserRequest, isRootUser bool) (saveConfig map[string]interface{}, err error) { + if !d.isV5() && !d.isV4() { + return nil, fmt.Errorf("no underlying database specified") + } + + // v5 Database + if d.isV5() { + _, err := d.v5.UpdateUser(ctx, req) + return nil, err + } + + // v4 Database + if req.Password == nil && req.Expiration == nil { + return nil, fmt.Errorf("missing change to be sent to the database") + } + if req.Password != nil && req.Expiration != nil { + // We could support this, but it would require handling partial + // errors which I'm punting on since we don't need it for now + return nil, fmt.Errorf("cannot specify both password and expiration change at the same time") + } + + // Change password + if req.Password != nil { + return d.changePasswordLegacy(ctx, req.Username, req.Password, isRootUser) + } + + // Change expiration date + if req.Expiration != nil { + stmts := v4.Statements{ + Renewal: req.Expiration.Statements.Commands, + } + err := d.v4.RenewUser(ctx, stmts, req.Username, req.Expiration.NewExpiration) + return nil, err + } + return nil, nil +} + +// changePasswordLegacy attempts to use SetCredentials to change the password for the user with the password provided +// in ChangePassword. If that user is the root user and SetCredentials is unimplemented, it will fall back to using +// RotateRootCredentials. If not the root user, this will not use RotateRootCredentials. +func (d databaseVersionWrapper) changePasswordLegacy(ctx context.Context, username string, passwordChange *v5.ChangePassword, isRootUser bool) (saveConfig map[string]interface{}, err error) { + err = d.changeUserPasswordLegacy(ctx, username, passwordChange) + + // If changing the root user's password but SetCredentials is unimplemented, fall back to RotateRootCredentials + if isRootUser && (err == v4.ErrPluginStaticUnsupported || status.Code(err) == codes.Unimplemented) { + saveConfig, err = d.changeRootUserPasswordLegacy(ctx, passwordChange) + if err != nil { + return nil, err + } + return saveConfig, nil + } + if err != nil { + return nil, err + } + return nil, nil +} + +func (d databaseVersionWrapper) changeUserPasswordLegacy(ctx context.Context, username string, passwordChange *v5.ChangePassword) (err error) { + stmts := v4.Statements{ + Rotation: passwordChange.Statements.Commands, + } + staticConfig := v4.StaticUserConfig{ + Username: username, + Password: passwordChange.NewPassword, + } + _, _, err = d.v4.SetCredentials(ctx, stmts, staticConfig) + return err +} + +func (d databaseVersionWrapper) changeRootUserPasswordLegacy(ctx context.Context, passwordChange *v5.ChangePassword) (saveConfig map[string]interface{}, err error) { + return d.v4.RotateRootCredentials(ctx, passwordChange.Statements.Commands) +} + +// DeleteUser in the underlying database. Errors if the wrapper does not contain an underlying database. +func (d databaseVersionWrapper) DeleteUser(ctx context.Context, req v5.DeleteUserRequest) (v5.DeleteUserResponse, error) { + if !d.isV5() && !d.isV4() { + return v5.DeleteUserResponse{}, fmt.Errorf("no underlying database specified") + } + + // v5 Database + if d.isV5() { + return d.v5.DeleteUser(ctx, req) + } + + // v4 Database + stmts := v4.Statements{ + Revocation: req.Statements.Commands, + } + err := d.v4.RevokeUser(ctx, stmts, req.Username) + return v5.DeleteUserResponse{}, err +} + +// Type of the underlying database. Errors if the wrapper does not contain an underlying database. +func (d databaseVersionWrapper) Type() (string, error) { + if !d.isV5() && !d.isV4() { + return "", fmt.Errorf("no underlying database specified") + } + + // v5 Database + if d.isV5() { + return d.v5.Type() + } + + // v4 Database + return d.v4.Type() +} + +// Close the underlying database. Errors if the wrapper does not contain an underlying database. +func (d databaseVersionWrapper) Close() error { + if !d.isV5() && !d.isV4() { + return fmt.Errorf("no underlying database specified") + } + // v5 Database + if d.isV5() { + return d.v5.Close() + } + + // v4 Database + return d.v4.Close() +} + +func (d databaseVersionWrapper) PluginVersion() logical.PluginVersion { + // v5 Database + if d.isV5() { + if versioner, ok := d.v5.(logical.PluginVersioner); ok { + return versioner.PluginVersion() + } + } + + // v4 Database + if versioner, ok := d.v4.(logical.PluginVersioner); ok { + return versioner.PluginVersion() + } + return logical.EmptyPluginVersion +} + +func (d databaseVersionWrapper) isV5() bool { + return d.v5 != nil +} + +func (d databaseVersionWrapper) isV4() bool { + return d.v4 != nil +} diff --git a/builtin/logical/database/version_wrapper_test.go b/builtin/logical/database/version_wrapper_test.go new file mode 100644 index 0000000..95a5f7b --- /dev/null +++ b/builtin/logical/database/version_wrapper_test.go @@ -0,0 +1,853 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package database + +import ( + "context" + "fmt" + "reflect" + "testing" + "time" + + v4 "github.com/hashicorp/vault/sdk/database/dbplugin" + v5 "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/mock" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestInitDatabase_missingDB(t *testing.T) { + dbw := databaseVersionWrapper{} + + req := v5.InitializeRequest{} + resp, err := dbw.Initialize(context.Background(), req) + if err == nil { + t.Fatalf("err expected, got nil") + } + + expectedResp := v5.InitializeResponse{} + if !reflect.DeepEqual(resp, expectedResp) { + t.Fatalf("Actual resp: %#v\nExpected resp: %#v", resp, expectedResp) + } +} + +func TestInitDatabase_newDB(t *testing.T) { + type testCase struct { + req v5.InitializeRequest + + newInitResp v5.InitializeResponse + newInitErr error + newInitCalls int + + expectedResp v5.InitializeResponse + expectErr bool + } + + tests := map[string]testCase{ + "success": { + req: v5.InitializeRequest{ + Config: map[string]interface{}{ + "foo": "bar", + }, + VerifyConnection: true, + }, + newInitResp: v5.InitializeResponse{ + Config: map[string]interface{}{ + "foo": "bar", + }, + }, + newInitCalls: 1, + expectedResp: v5.InitializeResponse{ + Config: map[string]interface{}{ + "foo": "bar", + }, + }, + expectErr: false, + }, + "error": { + req: v5.InitializeRequest{ + Config: map[string]interface{}{ + "foo": "bar", + }, + VerifyConnection: true, + }, + newInitResp: v5.InitializeResponse{}, + newInitErr: fmt.Errorf("test error"), + newInitCalls: 1, + expectedResp: v5.InitializeResponse{}, + expectErr: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + newDB := new(mockNewDatabase) + newDB.On("Initialize", mock.Anything, mock.Anything). + Return(test.newInitResp, test.newInitErr) + defer newDB.AssertNumberOfCalls(t, "Initialize", test.newInitCalls) + + dbw := databaseVersionWrapper{ + v5: newDB, + } + + resp, err := dbw.Initialize(context.Background(), test.req) + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + + if !reflect.DeepEqual(resp, test.expectedResp) { + t.Fatalf("Actual resp: %#v\nExpected resp: %#v", resp, test.expectedResp) + } + }) + } +} + +func TestInitDatabase_legacyDB(t *testing.T) { + type testCase struct { + req v5.InitializeRequest + + initConfig map[string]interface{} + initErr error + initCalls int + + expectedResp v5.InitializeResponse + expectErr bool + } + + tests := map[string]testCase{ + "success": { + req: v5.InitializeRequest{ + Config: map[string]interface{}{ + "foo": "bar", + }, + VerifyConnection: true, + }, + initConfig: map[string]interface{}{ + "foo": "bar", + }, + initCalls: 1, + expectedResp: v5.InitializeResponse{ + Config: map[string]interface{}{ + "foo": "bar", + }, + }, + expectErr: false, + }, + "error": { + req: v5.InitializeRequest{ + Config: map[string]interface{}{ + "foo": "bar", + }, + VerifyConnection: true, + }, + initErr: fmt.Errorf("test error"), + initCalls: 1, + expectedResp: v5.InitializeResponse{}, + expectErr: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + legacyDB := new(mockLegacyDatabase) + legacyDB.On("Init", mock.Anything, mock.Anything, mock.Anything). + Return(test.initConfig, test.initErr) + defer legacyDB.AssertNumberOfCalls(t, "Init", test.initCalls) + + dbw := databaseVersionWrapper{ + v4: legacyDB, + } + + resp, err := dbw.Initialize(context.Background(), test.req) + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + + if !reflect.DeepEqual(resp, test.expectedResp) { + t.Fatalf("Actual resp: %#v\nExpected resp: %#v", resp, test.expectedResp) + } + }) + } +} + +func TestNewUser_missingDB(t *testing.T) { + dbw := databaseVersionWrapper{} + + req := v5.NewUserRequest{} + resp, pass, err := dbw.NewUser(context.Background(), req) + if err == nil { + t.Fatalf("err expected, got nil") + } + + expectedResp := v5.NewUserResponse{} + if !reflect.DeepEqual(resp, expectedResp) { + t.Fatalf("Actual resp: %#v\nExpected resp: %#v", resp, expectedResp) + } + + if pass != "" { + t.Fatalf("Password should be empty but was: %s", pass) + } +} + +func TestNewUser_newDB(t *testing.T) { + type testCase struct { + req v5.NewUserRequest + + newUserResp v5.NewUserResponse + newUserErr error + newUserCalls int + + expectedResp v5.NewUserResponse + expectErr bool + } + + tests := map[string]testCase{ + "success": { + req: v5.NewUserRequest{ + Password: "new_password", + }, + + newUserResp: v5.NewUserResponse{ + Username: "newuser", + }, + newUserCalls: 1, + + expectedResp: v5.NewUserResponse{ + Username: "newuser", + }, + expectErr: false, + }, + "error": { + req: v5.NewUserRequest{ + Password: "new_password", + }, + + newUserErr: fmt.Errorf("test error"), + newUserCalls: 1, + + expectedResp: v5.NewUserResponse{}, + expectErr: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + newDB := new(mockNewDatabase) + newDB.On("NewUser", mock.Anything, mock.Anything). + Return(test.newUserResp, test.newUserErr) + defer newDB.AssertNumberOfCalls(t, "NewUser", test.newUserCalls) + + dbw := databaseVersionWrapper{ + v5: newDB, + } + + resp, password, err := dbw.NewUser(context.Background(), test.req) + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + + if !reflect.DeepEqual(resp, test.expectedResp) { + t.Fatalf("Actual resp: %#v\nExpected resp: %#v", resp, test.expectedResp) + } + + if password != test.req.Password { + t.Fatalf("Actual password: %s Expected password: %s", password, test.req.Password) + } + }) + } +} + +func TestNewUser_legacyDB(t *testing.T) { + type testCase struct { + req v5.NewUserRequest + + createUserUsername string + createUserPassword string + createUserErr error + createUserCalls int + + expectedResp v5.NewUserResponse + expectedPassword string + expectErr bool + } + + tests := map[string]testCase{ + "success": { + req: v5.NewUserRequest{ + Password: "new_password", + }, + + createUserUsername: "newuser", + createUserPassword: "securepassword", + createUserCalls: 1, + + expectedResp: v5.NewUserResponse{ + Username: "newuser", + }, + expectedPassword: "securepassword", + expectErr: false, + }, + "error": { + req: v5.NewUserRequest{ + Password: "new_password", + }, + + createUserErr: fmt.Errorf("test error"), + createUserCalls: 1, + + expectedResp: v5.NewUserResponse{}, + expectErr: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + legacyDB := new(mockLegacyDatabase) + legacyDB.On("CreateUser", mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(test.createUserUsername, test.createUserPassword, test.createUserErr) + defer legacyDB.AssertNumberOfCalls(t, "CreateUser", test.createUserCalls) + + dbw := databaseVersionWrapper{ + v4: legacyDB, + } + + resp, password, err := dbw.NewUser(context.Background(), test.req) + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + + if !reflect.DeepEqual(resp, test.expectedResp) { + t.Fatalf("Actual resp: %#v\nExpected resp: %#v", resp, test.expectedResp) + } + + if password != test.expectedPassword { + t.Fatalf("Actual password: %s Expected password: %s", password, test.req.Password) + } + }) + } +} + +func TestUpdateUser_missingDB(t *testing.T) { + dbw := databaseVersionWrapper{} + + req := v5.UpdateUserRequest{} + resp, err := dbw.UpdateUser(context.Background(), req, false) + if err == nil { + t.Fatalf("err expected, got nil") + } + + expectedConfig := map[string]interface{}(nil) + if !reflect.DeepEqual(resp, expectedConfig) { + t.Fatalf("Actual config: %#v\nExpected config: %#v", resp, expectedConfig) + } +} + +func TestUpdateUser_newDB(t *testing.T) { + type testCase struct { + req v5.UpdateUserRequest + + updateUserErr error + updateUserCalls int + + expectedResp v5.UpdateUserResponse + expectErr bool + } + + tests := map[string]testCase{ + "success": { + req: v5.UpdateUserRequest{ + Username: "existing_user", + }, + updateUserCalls: 1, + expectErr: false, + }, + "error": { + req: v5.UpdateUserRequest{ + Username: "existing_user", + }, + updateUserErr: fmt.Errorf("test error"), + updateUserCalls: 1, + expectErr: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + newDB := new(mockNewDatabase) + newDB.On("UpdateUser", mock.Anything, mock.Anything). + Return(v5.UpdateUserResponse{}, test.updateUserErr) + defer newDB.AssertNumberOfCalls(t, "UpdateUser", test.updateUserCalls) + + dbw := databaseVersionWrapper{ + v5: newDB, + } + + _, err := dbw.UpdateUser(context.Background(), test.req, false) + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + }) + } +} + +func TestUpdateUser_legacyDB(t *testing.T) { + type testCase struct { + req v5.UpdateUserRequest + isRootUser bool + + setCredentialsErr error + setCredentialsCalls int + + rotateRootConfig map[string]interface{} + rotateRootErr error + rotateRootCalls int + + renewUserErr error + renewUserCalls int + + expectedConfig map[string]interface{} + expectErr bool + } + + tests := map[string]testCase{ + "missing changes": { + req: v5.UpdateUserRequest{ + Username: "existing_user", + }, + isRootUser: false, + + setCredentialsCalls: 0, + rotateRootCalls: 0, + renewUserCalls: 0, + + expectErr: true, + }, + "both password and expiration changes": { + req: v5.UpdateUserRequest{ + Username: "existing_user", + Password: &v5.ChangePassword{}, + Expiration: &v5.ChangeExpiration{}, + }, + isRootUser: false, + + setCredentialsCalls: 0, + rotateRootCalls: 0, + renewUserCalls: 0, + + expectErr: true, + }, + "change password - SetCredentials": { + req: v5.UpdateUserRequest{ + Username: "existing_user", + Password: &v5.ChangePassword{ + NewPassword: "newpassowrd", + }, + }, + isRootUser: false, + + setCredentialsErr: nil, + setCredentialsCalls: 1, + rotateRootCalls: 0, + renewUserCalls: 0, + + expectedConfig: nil, + expectErr: false, + }, + "change password - SetCredentials failed": { + req: v5.UpdateUserRequest{ + Username: "existing_user", + Password: &v5.ChangePassword{ + NewPassword: "newpassowrd", + }, + }, + isRootUser: false, + + setCredentialsErr: fmt.Errorf("set credentials failed"), + setCredentialsCalls: 1, + rotateRootCalls: 0, + renewUserCalls: 0, + + expectedConfig: nil, + expectErr: true, + }, + "change password - SetCredentials unimplemented but not a root user": { + req: v5.UpdateUserRequest{ + Username: "existing_user", + Password: &v5.ChangePassword{ + NewPassword: "newpassowrd", + }, + }, + isRootUser: false, + + setCredentialsErr: status.Error(codes.Unimplemented, "SetCredentials is not implemented"), + setCredentialsCalls: 1, + + rotateRootCalls: 0, + renewUserCalls: 0, + + expectedConfig: nil, + expectErr: true, + }, + "change password - RotateRootCredentials (gRPC Unimplemented)": { + req: v5.UpdateUserRequest{ + Username: "existing_user", + Password: &v5.ChangePassword{ + NewPassword: "newpassowrd", + }, + }, + isRootUser: true, + + setCredentialsErr: status.Error(codes.Unimplemented, "SetCredentials is not implemented"), + setCredentialsCalls: 1, + + rotateRootConfig: map[string]interface{}{ + "foo": "bar", + }, + rotateRootCalls: 1, + + renewUserCalls: 0, + + expectedConfig: map[string]interface{}{ + "foo": "bar", + }, + expectErr: false, + }, + "change password - RotateRootCredentials (ErrPluginStaticUnsupported)": { + req: v5.UpdateUserRequest{ + Username: "existing_user", + Password: &v5.ChangePassword{ + NewPassword: "newpassowrd", + }, + }, + isRootUser: true, + + setCredentialsErr: v4.ErrPluginStaticUnsupported, + setCredentialsCalls: 1, + + rotateRootConfig: map[string]interface{}{ + "foo": "bar", + }, + rotateRootCalls: 1, + + renewUserCalls: 0, + + expectedConfig: map[string]interface{}{ + "foo": "bar", + }, + expectErr: false, + }, + "change password - RotateRootCredentials failed": { + req: v5.UpdateUserRequest{ + Username: "existing_user", + Password: &v5.ChangePassword{ + NewPassword: "newpassowrd", + }, + }, + isRootUser: true, + + setCredentialsErr: status.Error(codes.Unimplemented, "SetCredentials is not implemented"), + setCredentialsCalls: 1, + + rotateRootErr: fmt.Errorf("rotate root failed"), + rotateRootCalls: 1, + renewUserCalls: 0, + + expectedConfig: nil, + expectErr: true, + }, + + "change expiration": { + req: v5.UpdateUserRequest{ + Username: "existing_user", + Expiration: &v5.ChangeExpiration{ + NewExpiration: time.Now(), + }, + }, + isRootUser: false, + + setCredentialsCalls: 0, + rotateRootCalls: 0, + + renewUserErr: nil, + renewUserCalls: 1, + + expectedConfig: nil, + expectErr: false, + }, + "change expiration failed": { + req: v5.UpdateUserRequest{ + Username: "existing_user", + Expiration: &v5.ChangeExpiration{ + NewExpiration: time.Now(), + }, + }, + isRootUser: false, + + setCredentialsCalls: 0, + rotateRootCalls: 0, + + renewUserErr: fmt.Errorf("test error"), + renewUserCalls: 1, + + expectedConfig: nil, + expectErr: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + legacyDB := new(mockLegacyDatabase) + legacyDB.On("SetCredentials", mock.Anything, mock.Anything, mock.Anything). + Return("", "", test.setCredentialsErr) + defer legacyDB.AssertNumberOfCalls(t, "SetCredentials", test.setCredentialsCalls) + + legacyDB.On("RotateRootCredentials", mock.Anything, mock.Anything). + Return(test.rotateRootConfig, test.rotateRootErr) + defer legacyDB.AssertNumberOfCalls(t, "RotateRootCredentials", test.rotateRootCalls) + + legacyDB.On("RenewUser", mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(test.renewUserErr) + defer legacyDB.AssertNumberOfCalls(t, "RenewUser", test.renewUserCalls) + + dbw := databaseVersionWrapper{ + v4: legacyDB, + } + + newConfig, err := dbw.UpdateUser(context.Background(), test.req, test.isRootUser) + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + + if !reflect.DeepEqual(newConfig, test.expectedConfig) { + t.Fatalf("Actual config: %#v\nExpected config: %#v", newConfig, test.expectedConfig) + } + }) + } +} + +func TestDeleteUser_missingDB(t *testing.T) { + dbw := databaseVersionWrapper{} + + req := v5.DeleteUserRequest{} + _, err := dbw.DeleteUser(context.Background(), req) + if err == nil { + t.Fatalf("err expected, got nil") + } +} + +func TestDeleteUser_newDB(t *testing.T) { + type testCase struct { + req v5.DeleteUserRequest + + deleteUserErr error + deleteUserCalls int + + expectErr bool + } + + tests := map[string]testCase{ + "success": { + req: v5.DeleteUserRequest{ + Username: "existing_user", + }, + + deleteUserErr: nil, + deleteUserCalls: 1, + + expectErr: false, + }, + "error": { + req: v5.DeleteUserRequest{ + Username: "existing_user", + }, + + deleteUserErr: fmt.Errorf("test error"), + deleteUserCalls: 1, + + expectErr: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + newDB := new(mockNewDatabase) + newDB.On("DeleteUser", mock.Anything, mock.Anything). + Return(v5.DeleteUserResponse{}, test.deleteUserErr) + defer newDB.AssertNumberOfCalls(t, "DeleteUser", test.deleteUserCalls) + + dbw := databaseVersionWrapper{ + v5: newDB, + } + + _, err := dbw.DeleteUser(context.Background(), test.req) + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + }) + } +} + +func TestDeleteUser_legacyDB(t *testing.T) { + type testCase struct { + req v5.DeleteUserRequest + + revokeUserErr error + revokeUserCalls int + + expectErr bool + } + + tests := map[string]testCase{ + "success": { + req: v5.DeleteUserRequest{ + Username: "existing_user", + }, + + revokeUserErr: nil, + revokeUserCalls: 1, + + expectErr: false, + }, + "error": { + req: v5.DeleteUserRequest{ + Username: "existing_user", + }, + + revokeUserErr: fmt.Errorf("test error"), + revokeUserCalls: 1, + + expectErr: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + legacyDB := new(mockLegacyDatabase) + legacyDB.On("RevokeUser", mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(test.revokeUserErr) + defer legacyDB.AssertNumberOfCalls(t, "RevokeUser", test.revokeUserCalls) + + dbw := databaseVersionWrapper{ + v4: legacyDB, + } + + _, err := dbw.DeleteUser(context.Background(), test.req) + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + }) + } +} + +type badValue struct{} + +func (badValue) MarshalJSON() ([]byte, error) { + return nil, fmt.Errorf("this value cannot be marshalled to JSON") +} + +var _ logical.Storage = fakeStorage{} + +type fakeStorage struct { + putErr error +} + +func (f fakeStorage) Put(ctx context.Context, entry *logical.StorageEntry) error { + return f.putErr +} + +func (f fakeStorage) List(ctx context.Context, s string) ([]string, error) { + panic("list not implemented") +} + +func (f fakeStorage) Get(ctx context.Context, s string) (*logical.StorageEntry, error) { + panic("get not implemented") +} + +func (f fakeStorage) Delete(ctx context.Context, s string) error { + panic("delete not implemented") +} + +func TestStoreConfig(t *testing.T) { + type testCase struct { + config *DatabaseConfig + putErr error + expectErr bool + } + + tests := map[string]testCase{ + "bad config": { + config: &DatabaseConfig{ + PluginName: "testplugin", + ConnectionDetails: map[string]interface{}{ + "bad value": badValue{}, + }, + }, + putErr: nil, + expectErr: true, + }, + "storage error": { + config: &DatabaseConfig{ + PluginName: "testplugin", + ConnectionDetails: map[string]interface{}{ + "foo": "bar", + }, + }, + putErr: fmt.Errorf("failed to store config"), + expectErr: true, + }, + "happy path": { + config: &DatabaseConfig{ + PluginName: "testplugin", + ConnectionDetails: map[string]interface{}{ + "foo": "bar", + }, + }, + putErr: nil, + expectErr: false, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + storage := fakeStorage{ + putErr: test.putErr, + } + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + err := storeConfig(ctx, storage, "testconfig", test.config) + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + }) + } +} diff --git a/builtin/logical/database/versioning_large_test.go b/builtin/logical/database/versioning_large_test.go new file mode 100644 index 0000000..b39ddb7 --- /dev/null +++ b/builtin/logical/database/versioning_large_test.go @@ -0,0 +1,532 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package database + +// This file contains all "large"/expensive tests. These are running requests against a running backend + +import ( + "context" + "fmt" + "os" + "regexp" + "strings" + "testing" + "time" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/pluginutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +func TestPlugin_lifecycle(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v4-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MockV4", []string{}, "") + vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v5-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MockV5", []string{}, "") + vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v6-database-plugin-muxed", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MockV6Multiplexed", []string{}, "") + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + lb, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + b, ok := lb.(*databaseBackend) + if !ok { + t.Fatal("could not convert to database backend") + } + defer b.Cleanup(context.Background()) + + type testCase struct { + dbName string + dbType string + configData map[string]interface{} + assertDynamicUsername stringAssertion + assertDynamicPassword stringAssertion + } + + tests := map[string]testCase{ + "v4": { + dbName: "mockv4", + dbType: "mock-v4-database-plugin", + configData: map[string]interface{}{ + "name": "mockv4", + "plugin_name": "mock-v4-database-plugin", + "connection_url": "sample_connection_url", + "verify_connection": true, + "allowed_roles": []string{"*"}, + "username": "mockv4-user", + "password": "mysecurepassword", + }, + assertDynamicUsername: assertStringPrefix("mockv4_user_"), + assertDynamicPassword: assertStringPrefix("mockv4_"), + }, + "v5": { + dbName: "mockv5", + dbType: "mock-v5-database-plugin", + configData: map[string]interface{}{ + "connection_url": "sample_connection_url", + "plugin_name": "mock-v5-database-plugin", + "verify_connection": true, + "allowed_roles": []string{"*"}, + "name": "mockv5", + "username": "mockv5-user", + "password": "mysecurepassword", + }, + assertDynamicUsername: assertStringPrefix("mockv5_user_"), + assertDynamicPassword: assertStringRegex("^[a-zA-Z0-9-]{20}"), + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + var cleanupReqs []*logical.Request + defer func() { + // Do not defer cleanup directly so that we can populate the + // slice before the function gets executed. + cleanup(t, b, cleanupReqs) + }() + + // ///////////////////////////////////////////////////////////////// + // Configure + req := &logical.Request{ + Operation: logical.CreateOperation, + Path: fmt.Sprintf("config/%s", test.dbName), + Storage: config.StorageView, + Data: test.configData, + } + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + resp, err := b.HandleRequest(ctx, req) + assertErrIsNil(t, err) + assertRespHasNoErr(t, resp) + assertNoRespData(t, resp) + + cleanupReqs = append(cleanupReqs, &logical.Request{ + Operation: logical.DeleteOperation, + Path: fmt.Sprintf("config/%s", test.dbName), + Storage: config.StorageView, + }) + + // ///////////////////////////////////////////////////////////////// + // Rotate root credentials + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("rotate-root/%s", test.dbName), + Storage: config.StorageView, + } + ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + resp, err = b.HandleRequest(ctx, req) + assertErrIsNil(t, err) + assertRespHasNoErr(t, resp) + assertNoRespData(t, resp) + + // ///////////////////////////////////////////////////////////////// + // Dynamic credentials + + // Create role + dynamicRoleName := "dynamic-role" + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("roles/%s", dynamicRoleName), + Storage: config.StorageView, + Data: map[string]interface{}{ + "db_name": test.dbName, + "default_ttl": "5s", + "max_ttl": "1m", + }, + } + ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + resp, err = b.HandleRequest(ctx, req) + assertErrIsNil(t, err) + assertRespHasNoErr(t, resp) + assertNoRespData(t, resp) + + cleanupReqs = append(cleanupReqs, &logical.Request{ + Operation: logical.DeleteOperation, + Path: fmt.Sprintf("roles/%s", dynamicRoleName), + Storage: config.StorageView, + }) + + // Generate credentials + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: fmt.Sprintf("creds/%s", dynamicRoleName), + Storage: config.StorageView, + } + ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + resp, err = b.HandleRequest(ctx, req) + assertErrIsNil(t, err) + assertRespHasNoErr(t, resp) + assertRespHasData(t, resp) + + // TODO: Figure out how to make a call to the cluster that gives back a lease ID + // And also rotates the secret out after its TTL + + // ///////////////////////////////////////////////////////////////// + // Static credentials + + // Create static role + staticRoleName := "static-role" + req = &logical.Request{ + Operation: logical.CreateOperation, + Path: fmt.Sprintf("static-roles/%s", staticRoleName), + Storage: config.StorageView, + Data: map[string]interface{}{ + "db_name": test.dbName, + "username": "static-username", + "rotation_period": "5", + }, + } + ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + resp, err = b.HandleRequest(ctx, req) + assertErrIsNil(t, err) + assertRespHasNoErr(t, resp) + assertNoRespData(t, resp) + + cleanupReqs = append(cleanupReqs, &logical.Request{ + Operation: logical.DeleteOperation, + Path: fmt.Sprintf("static-roles/%s", staticRoleName), + Storage: config.StorageView, + }) + + // Get credentials + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: fmt.Sprintf("static-creds/%s", staticRoleName), + Storage: config.StorageView, + } + ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + resp, err = b.HandleRequest(ctx, req) + assertErrIsNil(t, err) + assertRespHasNoErr(t, resp) + assertRespHasData(t, resp) + }) + } +} + +func TestPlugin_VersionSelection(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + for _, version := range []string{"v11.0.0", "v11.0.1-rc1", "v2.0.0"} { + vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v5-database-plugin", consts.PluginTypeDatabase, version, "TestBackend_PluginMain_MockV5", []string{}, "") + } + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + lb, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + b, ok := lb.(*databaseBackend) + if !ok { + t.Fatal("could not convert to database backend") + } + defer b.Cleanup(context.Background()) + + test := func(t *testing.T, selectVersion, expectedVersion string) func(t *testing.T) { + return func(t *testing.T) { + req := &logical.Request{ + Operation: logical.CreateOperation, + Path: "config/db", + Storage: config.StorageView, + Data: map[string]interface{}{ + "connection_url": "sample_connection_url", + "plugin_name": "mock-v5-database-plugin", + "plugin_version": selectVersion, + "verify_connection": true, + "allowed_roles": []string{"*"}, + "name": "mockv5", + "username": "mockv5-user", + "password": "mysecurepassword", + }, + } + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + resp, err := b.HandleRequest(ctx, req) + assertErrIsNil(t, err) + assertRespHasNoErr(t, resp) + assertNoRespData(t, resp) + + defer func() { + _, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.DeleteOperation, + Path: "config/db", + Storage: config.StorageView, + }) + if err != nil { + t.Fatal(err) + } + }() + + req = &logical.Request{ + Operation: logical.ReadOperation, + Path: "config/db", + Storage: config.StorageView, + } + ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + resp, err = b.HandleRequest(ctx, req) + assertErrIsNil(t, err) + assertRespHasNoErr(t, resp) + if resp.Data["plugin_version"].(string) != expectedVersion { + t.Fatalf("Expected version %q but got %q", expectedVersion, resp.Data["plugin_version"].(string)) + } + } + } + + for name, tc := range map[string]struct { + selectVersion string + expectedVersion string + }{ + "no version specified, selects latest in the absence of unversioned plugins": { + selectVersion: "", + expectedVersion: "v11.0.1-rc1", + }, + "specific version selected": { + selectVersion: "11.0.0", + expectedVersion: "v11.0.0", + }, + } { + t.Run(name, test(t, tc.selectVersion, tc.expectedVersion)) + } + + // Register a newer version of the plugin, and ensure that's the new default version selected. + vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v5-database-plugin", consts.PluginTypeDatabase, "v11.0.1", "TestBackend_PluginMain_MockV5", []string{}, "") + t.Run("no version specified, new latest version selected", test(t, "", "v11.0.1")) + + // Register an unversioned plugin and ensure that is now selected when no version is specified. + vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v5-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MockV5", []string{}, "") + for name, tc := range map[string]struct { + selectVersion string + expectedVersion string + }{ + "no version specified, selects unversioned": { + selectVersion: "", + expectedVersion: "", + }, + "specific version selected": { + selectVersion: "v2.0.0", + expectedVersion: "v2.0.0", + }, + } { + t.Run(name, test(t, tc.selectVersion, tc.expectedVersion)) + } +} + +func TestPlugin_VersionMustBeExplicitlyUpgraded(t *testing.T) { + cluster, sys := getCluster(t) + defer cluster.Cleanup() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + config.System = sys + lb, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + b, ok := lb.(*databaseBackend) + if !ok { + t.Fatal("could not convert to database backend") + } + defer b.Cleanup(context.Background()) + + configData := func(extraData ...string) map[string]interface{} { + data := map[string]interface{}{ + "connection_url": "sample_connection_url", + "plugin_name": "mysql-database-plugin", + "verify_connection": false, + "allowed_roles": []string{"*"}, + "username": "mockv5-user", + "password": "mysecurepassword", + } + if len(extraData)%2 != 0 { + t.Fatal("Expected an even number of args in extraData") + } + for i := 0; i < len(extraData); i += 2 { + data[extraData[i]] = extraData[i+1] + } + return data + } + + readVersion := func() string { + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ReadOperation, + Path: "config/db", + Storage: config.StorageView, + }) + assertErrIsNil(t, err) + assertRespHasNoErr(t, resp) + return resp.Data["plugin_version"].(string) + } + + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.CreateOperation, + Path: "config/db", + Storage: config.StorageView, + Data: configData(), + }) + assertErrIsNil(t, err) + assertRespHasNoErr(t, resp) + assertNoRespData(t, resp) + + version := readVersion() + expectedVersion := "" + if version != expectedVersion { + t.Fatalf("Expected version %q but got %q", expectedVersion, version) + } + + // Register versioned plugin, and check that a new write to existing config doesn't upgrade the plugin implicitly. + vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mysql-database-plugin", consts.PluginTypeDatabase, "v1.0.0", "TestBackend_PluginMain_MockV5", []string{}, "") + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/db", + Storage: config.StorageView, + Data: configData(), + }) + assertErrIsNil(t, err) + assertRespHasNoErr(t, resp) + assertNoRespData(t, resp) + + version = readVersion() + if version != expectedVersion { + t.Fatalf("Expected version %q but got %q", expectedVersion, version) + } + + // Now explicitly upgrade. + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/db", + Storage: config.StorageView, + Data: configData("plugin_version", "1.0.0"), + }) + assertErrIsNil(t, err) + assertRespHasNoErr(t, resp) + assertNoRespData(t, resp) + + version = readVersion() + expectedVersion = "v1.0.0" + if version != expectedVersion { + t.Fatalf("Expected version %q but got %q", expectedVersion, version) + } +} + +func cleanup(t *testing.T, b *databaseBackend, reqs []*logical.Request) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // Go in stack order so it works similar to defer + for i := len(reqs) - 1; i >= 0; i-- { + req := reqs[i] + resp, err := b.HandleRequest(ctx, req) + if err != nil { + t.Fatalf("Error cleaning up: %s", err) + } + if resp != nil && resp.IsError() { + t.Fatalf("Error cleaning up: %s", resp.Error()) + } + } +} + +func TestBackend_PluginMain_MockV4(t *testing.T) { + if os.Getenv(pluginutil.PluginUnwrapTokenEnv) == "" { + return + } + + caPEM := os.Getenv(pluginutil.PluginCACertPEMEnv) + if caPEM == "" { + t.Fatal("CA cert not passed in") + } + + args := []string{"--ca-cert=" + caPEM} + + apiClientMeta := &api.PluginAPIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(args) + + RunV4(apiClientMeta.GetTLSConfig()) +} + +func TestBackend_PluginMain_MockV5(t *testing.T) { + if os.Getenv(pluginutil.PluginVaultVersionEnv) == "" { + return + } + + RunV5() +} + +func TestBackend_PluginMain_MockV6Multiplexed(t *testing.T) { + if os.Getenv(pluginutil.PluginVaultVersionEnv) == "" { + return + } + + RunV6Multiplexed() +} + +func assertNoRespData(t *testing.T, resp *logical.Response) { + t.Helper() + if resp != nil && len(resp.Data) > 0 { + t.Fatalf("Response had data when none was expected: %#v", resp.Data) + } +} + +func assertRespHasData(t *testing.T, resp *logical.Response) { + t.Helper() + if resp == nil || len(resp.Data) == 0 { + t.Fatalf("Response didn't have any data when some was expected") + } +} + +type stringAssertion func(t *testing.T, str string) + +func assertStringPrefix(expectedPrefix string) stringAssertion { + return func(t *testing.T, str string) { + t.Helper() + if !strings.HasPrefix(str, expectedPrefix) { + t.Fatalf("Missing prefix %q: Actual: %q", expectedPrefix, str) + } + } +} + +func assertStringRegex(expectedRegex string) stringAssertion { + re := regexp.MustCompile(expectedRegex) + return func(t *testing.T, str string) { + if !re.MatchString(str) { + t.Fatalf("Actual: %q did not match regexp %q", str, expectedRegex) + } + } +} + +func assertRespHasNoErr(t *testing.T, resp *logical.Response) { + t.Helper() + if resp != nil && resp.IsError() { + t.Fatalf("response is error: %#v\n", resp) + } +} + +func assertErrIsNil(t *testing.T, err error) { + t.Helper() + if err != nil { + t.Fatalf("No error expected, got: %s", err) + } +} diff --git a/builtin/logical/nomad/backend.go b/builtin/logical/nomad/backend.go new file mode 100644 index 0000000..0becdae --- /dev/null +++ b/builtin/logical/nomad/backend.go @@ -0,0 +1,85 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package nomad + +import ( + "context" + + "github.com/hashicorp/nomad/api" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +const operationPrefixNomad = "nomad" + +// Factory returns a Nomad backend that satisfies the logical.Backend interface +func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + b := Backend() + if err := b.Setup(ctx, conf); err != nil { + return nil, err + } + return b, nil +} + +// Backend returns the configured Nomad backend +func Backend() *backend { + var b backend + b.Backend = &framework.Backend{ + PathsSpecial: &logical.Paths{ + SealWrapStorage: []string{ + "config/access", + }, + }, + + Paths: []*framework.Path{ + pathConfigAccess(&b), + pathConfigLease(&b), + pathListRoles(&b), + pathRoles(&b), + pathCredsCreate(&b), + }, + + Secrets: []*framework.Secret{ + secretToken(&b), + }, + BackendType: logical.TypeLogical, + } + + return &b +} + +type backend struct { + *framework.Backend +} + +func clientFromConfig(conf *accessConfig) (*api.Client, error) { + nomadConf := api.DefaultConfig() + if conf != nil { + if conf.Address != "" { + nomadConf.Address = conf.Address + } + if conf.Token != "" { + nomadConf.SecretID = conf.Token + } + if conf.CACert != "" { + nomadConf.TLSConfig.CACertPEM = []byte(conf.CACert) + } + if conf.ClientCert != "" { + nomadConf.TLSConfig.ClientCertPEM = []byte(conf.ClientCert) + } + if conf.ClientKey != "" { + nomadConf.TLSConfig.ClientKeyPEM = []byte(conf.ClientKey) + } + } + return api.NewClient(nomadConf) +} + +func (b *backend) client(ctx context.Context, s logical.Storage) (*api.Client, error) { + conf, err := b.readConfigAccess(ctx, s) + if err != nil { + return nil, err + } + + return clientFromConfig(conf) +} diff --git a/builtin/logical/nomad/backend_test.go b/builtin/logical/nomad/backend_test.go new file mode 100644 index 0000000..dc57583 --- /dev/null +++ b/builtin/logical/nomad/backend_test.go @@ -0,0 +1,708 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package nomad + +import ( + "context" + "fmt" + "os" + "reflect" + "strings" + "testing" + "time" + + nomadapi "github.com/hashicorp/nomad/api" + "github.com/hashicorp/vault/helper/testhelpers" + "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/hashicorp/vault/sdk/logical" + "github.com/mitchellh/mapstructure" +) + +type Config struct { + docker.ServiceURL + Token string +} + +func (c *Config) APIConfig() *nomadapi.Config { + apiConfig := nomadapi.DefaultConfig() + apiConfig.Address = c.URL().String() + apiConfig.SecretID = c.Token + return apiConfig +} + +func (c *Config) Client() (*nomadapi.Client, error) { + apiConfig := c.APIConfig() + + return nomadapi.NewClient(apiConfig) +} + +func prepareTestContainer(t *testing.T, bootstrap bool) (func(), *Config) { + if retAddress := os.Getenv("NOMAD_ADDR"); retAddress != "" { + s, err := docker.NewServiceURLParse(retAddress) + if err != nil { + t.Fatal(err) + } + return func() {}, &Config{*s, os.Getenv("NOMAD_TOKEN")} + } + + runner, err := docker.NewServiceRunner(docker.RunOptions{ + ImageRepo: "docker.mirror.hashicorp.services/multani/nomad", + ImageTag: "1.1.6", + ContainerName: "nomad", + Ports: []string{"4646/tcp"}, + Cmd: []string{"agent", "-dev"}, + Env: []string{`NOMAD_LOCAL_CONFIG=bind_addr = "0.0.0.0" acl { enabled = true }`}, + }) + if err != nil { + t.Fatalf("Could not start docker Nomad: %s", err) + } + + var nomadToken string + svc, err := runner.StartService(context.Background(), func(ctx context.Context, host string, port int) (docker.ServiceConfig, error) { + var err error + nomadapiConfig := nomadapi.DefaultConfig() + nomadapiConfig.Address = fmt.Sprintf("http://%s:%d/", host, port) + nomad, err := nomadapi.NewClient(nomadapiConfig) + if err != nil { + return nil, err + } + + _, err = nomad.Status().Leader() + if err != nil { + t.Logf("[DEBUG] Nomad is not ready yet: %s", err) + return nil, err + } + + if bootstrap { + aclbootstrap, _, err := nomad.ACLTokens().Bootstrap(nil) + if err != nil { + return nil, err + } + nomadToken = aclbootstrap.SecretID + t.Logf("[WARN] Generated Master token: %s", nomadToken) + } + + nomadAuthConfig := nomadapi.DefaultConfig() + nomadAuthConfig.Address = nomad.Address() + + if bootstrap { + nomadAuthConfig.SecretID = nomadToken + + nomadAuth, err := nomadapi.NewClient(nomadAuthConfig) + if err != nil { + return nil, err + } + + err = preprePolicies(nomadAuth) + if err != nil { + return nil, err + } + } + + u, _ := docker.NewServiceURLParse(nomadapiConfig.Address) + return &Config{ + ServiceURL: *u, + Token: nomadToken, + }, nil + }) + if err != nil { + t.Fatalf("Could not start docker Nomad: %s", err) + } + + return svc.Cleanup, svc.Config.(*Config) +} + +func preprePolicies(nomadClient *nomadapi.Client) error { + policy := &nomadapi.ACLPolicy{ + Name: "test", + Description: "test", + Rules: `namespace "default" { + policy = "read" + } + `, + } + anonPolicy := &nomadapi.ACLPolicy{ + Name: "anonymous", + Description: "Deny all access for anonymous requests", + Rules: `namespace "default" { + policy = "deny" + } + agent { + policy = "deny" + } + node { + policy = "deny" + } + `, + } + + _, err := nomadClient.ACLPolicies().Upsert(policy, nil) + if err != nil { + return err + } + + _, err = nomadClient.ACLPolicies().Upsert(anonPolicy, nil) + if err != nil { + return err + } + + return nil +} + +func TestBackend_config_Bootstrap(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + cleanup, svccfg := prepareTestContainer(t, false) + defer cleanup() + + connData := map[string]interface{}{ + "address": svccfg.URL().String(), + "token": "", + } + + confReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/access", + Storage: config.StorageView, + Data: connData, + } + + resp, err := b.HandleRequest(context.Background(), confReq) + if err != nil || (resp != nil && resp.IsError()) || resp != nil { + t.Fatalf("failed to write configuration: resp:%#v err:%s", resp, err) + } + + confReq.Operation = logical.ReadOperation + resp, err = b.HandleRequest(context.Background(), confReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("failed to write configuration: resp:%#v err:%s", resp, err) + } + + expected := map[string]interface{}{ + "address": connData["address"].(string), + "max_token_name_length": 0, + "ca_cert": "", + "client_cert": "", + } + if !reflect.DeepEqual(expected, resp.Data) { + t.Fatalf("bad: expected:%#v\nactual:%#v\n", expected, resp.Data) + } + + nomadClient, err := svccfg.Client() + if err != nil { + t.Fatalf("failed to construct nomaad client, %v", err) + } + + token, _, err := nomadClient.ACLTokens().Bootstrap(nil) + if err == nil { + t.Fatalf("expected acl system to be bootstrapped already, but was able to get the bootstrap token : %v", token) + } + // NOTE: fragile test, but it's the only way, AFAIK, to check that nomad is + // bootstrapped + if !strings.Contains(err.Error(), "bootstrap already done") { + t.Fatalf("expected acl system to be bootstrapped already: err: %v", err) + } +} + +func TestBackend_config_access(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + cleanup, svccfg := prepareTestContainer(t, true) + defer cleanup() + + connData := map[string]interface{}{ + "address": svccfg.URL().String(), + "token": svccfg.Token, + } + + confReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/access", + Storage: config.StorageView, + Data: connData, + } + + resp, err := b.HandleRequest(context.Background(), confReq) + if err != nil || (resp != nil && resp.IsError()) || resp != nil { + t.Fatalf("failed to write configuration: resp:%#v err:%s", resp, err) + } + + confReq.Operation = logical.ReadOperation + resp, err = b.HandleRequest(context.Background(), confReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("failed to write configuration: resp:%#v err:%s", resp, err) + } + + expected := map[string]interface{}{ + "address": connData["address"].(string), + "max_token_name_length": 0, + "ca_cert": "", + "client_cert": "", + } + if !reflect.DeepEqual(expected, resp.Data) { + t.Fatalf("bad: expected:%#v\nactual:%#v\n", expected, resp.Data) + } + if resp.Data["token"] != nil { + t.Fatalf("token should not be set in the response") + } +} + +func TestBackend_config_access_with_certs(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + cleanup, svccfg := prepareTestContainer(t, true) + defer cleanup() + + connData := map[string]interface{}{ + "address": svccfg.URL().String(), + "token": svccfg.Token, + "ca_cert": caCert, + "client_cert": clientCert, + "client_key": clientKey, + } + + confReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/access", + Storage: config.StorageView, + Data: connData, + } + + resp, err := b.HandleRequest(context.Background(), confReq) + if err != nil || (resp != nil && resp.IsError()) || resp != nil { + t.Fatalf("failed to write configuration: resp:%#v err:%s", resp, err) + } + + confReq.Operation = logical.ReadOperation + resp, err = b.HandleRequest(context.Background(), confReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("failed to write configuration: resp:%#v err:%s", resp, err) + } + + expected := map[string]interface{}{ + "address": connData["address"].(string), + "max_token_name_length": 0, + "ca_cert": caCert, + "client_cert": clientCert, + } + if !reflect.DeepEqual(expected, resp.Data) { + t.Fatalf("bad: expected:%#v\nactual:%#v\n", expected, resp.Data) + } + if resp.Data["token"] != nil { + t.Fatalf("token should not be set in the response") + } +} + +func TestBackend_renew_revoke(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + cleanup, svccfg := prepareTestContainer(t, true) + defer cleanup() + + connData := map[string]interface{}{ + "address": svccfg.URL().String(), + "token": svccfg.Token, + } + + req := &logical.Request{ + Storage: config.StorageView, + Operation: logical.UpdateOperation, + Path: "config/access", + Data: connData, + } + resp, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + req.Path = "role/test" + req.Data = map[string]interface{}{ + "policies": []string{"policy"}, + "lease": "6h", + } + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + req.Operation = logical.ReadOperation + req.Path = "creds/test" + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("resp nil") + } + if resp.IsError() { + t.Fatalf("resp is error: %v", resp.Error()) + } + + generatedSecret := resp.Secret + generatedSecret.TTL = 6 * time.Hour + + var d struct { + Token string `mapstructure:"secret_id"` + Accessor string `mapstructure:"accessor_id"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + t.Fatal(err) + } + t.Logf("[WARN] Generated token: %s with accessor %s", d.Token, d.Accessor) + + // Build a client and verify that the credentials work + nomadapiConfig := nomadapi.DefaultConfig() + nomadapiConfig.Address = connData["address"].(string) + nomadapiConfig.SecretID = d.Token + client, err := nomadapi.NewClient(nomadapiConfig) + if err != nil { + t.Fatal(err) + } + + t.Log("[WARN] Verifying that the generated token works...") + _, err = client.Agent().Members, nil + if err != nil { + t.Fatal(err) + } + + req.Operation = logical.RenewOperation + req.Secret = generatedSecret + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("got nil response from renew") + } + + req.Operation = logical.RevokeOperation + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + // Build a management client and verify that the token does not exist anymore + nomadmgmtConfig := nomadapi.DefaultConfig() + nomadmgmtConfig.Address = connData["address"].(string) + nomadmgmtConfig.SecretID = connData["token"].(string) + mgmtclient, err := nomadapi.NewClient(nomadmgmtConfig) + if err != nil { + t.Fatal(err) + } + + q := &nomadapi.QueryOptions{ + Namespace: "default", + } + + t.Log("[WARN] Verifying that the generated token does not exist...") + _, _, err = mgmtclient.ACLTokens().Info(d.Accessor, q) + if err == nil { + t.Fatal("err: expected error") + } +} + +func TestBackend_CredsCreateEnvVar(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + cleanup, svccfg := prepareTestContainer(t, true) + defer cleanup() + + req := logical.TestRequest(t, logical.UpdateOperation, "role/test") + req.Data = map[string]interface{}{ + "policies": []string{"policy"}, + "lease": "6h", + } + resp, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + os.Setenv("NOMAD_TOKEN", svccfg.Token) + defer os.Unsetenv("NOMAD_TOKEN") + os.Setenv("NOMAD_ADDR", svccfg.URL().String()) + defer os.Unsetenv("NOMAD_ADDR") + + req.Operation = logical.ReadOperation + req.Path = "creds/test" + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("resp nil") + } + if resp.IsError() { + t.Fatalf("resp is error: %v", resp.Error()) + } +} + +func TestBackend_max_token_name_length(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + cleanup, svccfg := prepareTestContainer(t, true) + defer cleanup() + + testCases := []struct { + title string + roleName string + tokenLength int + }{ + { + title: "Default", + }, + { + title: "ConfigOverride", + tokenLength: 64, + }, + { + title: "ConfigOverride-LongName", + roleName: "testlongerrolenametoexceed64charsdddddddddddddddddddddddd", + tokenLength: 64, + }, + { + title: "Notrim", + roleName: "testlongersubrolenametoexceed64charsdddddddddddddddddddddddd", + }, + } + + for _, tc := range testCases { + t.Run(tc.title, func(t *testing.T) { + // setup config/access + connData := map[string]interface{}{ + "address": svccfg.URL().String(), + "token": svccfg.Token, + "max_token_name_length": tc.tokenLength, + } + expected := map[string]interface{}{ + "address": svccfg.URL().String(), + "max_token_name_length": tc.tokenLength, + "ca_cert": "", + "client_cert": "", + } + + expectedMaxTokenNameLength := maxTokenNameLength + if tc.tokenLength != 0 { + expectedMaxTokenNameLength = tc.tokenLength + } + + confReq := logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/access", + Storage: config.StorageView, + Data: connData, + } + + resp, err := b.HandleRequest(context.Background(), &confReq) + if err != nil || (resp != nil && resp.IsError()) || resp != nil { + t.Fatalf("failed to write configuration: resp:%#v err:%s", resp, err) + } + confReq.Operation = logical.ReadOperation + resp, err = b.HandleRequest(context.Background(), &confReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("failed to write configuration: resp:%#v err:%s", resp, err) + } + + // verify token length is returned in the config/access query + if !reflect.DeepEqual(expected, resp.Data) { + t.Fatalf("bad: expected:%#v\nactual:%#v\n", expected, resp.Data) + } + // verify token is not returned + if resp.Data["token"] != nil { + t.Fatalf("token should not be set in the response") + } + + // create a role to create nomad credentials with + // Seeds random with current timestamp + + if tc.roleName == "" { + tc.roleName = "test" + } + roleTokenName := testhelpers.RandomWithPrefix(tc.roleName) + + confReq.Path = "role/" + roleTokenName + confReq.Operation = logical.UpdateOperation + confReq.Data = map[string]interface{}{ + "policies": []string{"policy"}, + "lease": "6h", + } + resp, err = b.HandleRequest(context.Background(), &confReq) + if err != nil { + t.Fatal(err) + } + + confReq.Operation = logical.ReadOperation + confReq.Path = "creds/" + roleTokenName + resp, err = b.HandleRequest(context.Background(), &confReq) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("resp nil") + } + if resp.IsError() { + t.Fatalf("resp is error: %v", resp.Error()) + } + + // extract the secret, so we can query nomad directly + generatedSecret := resp.Secret + generatedSecret.TTL = 6 * time.Hour + + var d struct { + Token string `mapstructure:"secret_id"` + Accessor string `mapstructure:"accessor_id"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + t.Fatal(err) + } + + // Build a client and verify that the credentials work + nomadapiConfig := nomadapi.DefaultConfig() + nomadapiConfig.Address = connData["address"].(string) + nomadapiConfig.SecretID = d.Token + client, err := nomadapi.NewClient(nomadapiConfig) + if err != nil { + t.Fatal(err) + } + + // default query options for Nomad queries ... not sure if needed + qOpts := &nomadapi.QueryOptions{ + Namespace: "default", + } + + // connect to Nomad and verify the token name does not exceed the + // max_token_name_length + token, _, err := client.ACLTokens().Self(qOpts) + if err != nil { + t.Fatal(err) + } + + if len(token.Name) > expectedMaxTokenNameLength { + t.Fatalf("token name exceeds max length (%d): %s (%d)", expectedMaxTokenNameLength, token.Name, len(token.Name)) + } + }) + } +} + +const caCert = `-----BEGIN CERTIFICATE----- +MIIF7zCCA9egAwIBAgIINVVQic4bju8wDQYJKoZIhvcNAQELBQAwaDELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC1Vuc3BlY2lmaWVkMR8wHQYDVQQLDBZjYS0zODQzMDY2 +NDA5ODI5MjQwNTU5MSIwIAYDVQQDDBl4cHMxNS5sb2NhbC5jaXBoZXJib3kuY29t +MB4XDTIyMDYwMjIxMTgxN1oXDTIzMDcwNTIxMTgxN1owaDELMAkGA1UEBhMCVVMx +FDASBgNVBAoMC1Vuc3BlY2lmaWVkMR8wHQYDVQQLDBZjYS0zODQzMDY2NDA5ODI5 +MjQwNTU5MSIwIAYDVQQDDBl4cHMxNS5sb2NhbC5jaXBoZXJib3kuY29tMIICIjAN +BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA35VilgfqMUKhword7wORXRFyPbpz +8uqO7eRaylMnkAkbk5eoQB/iYfXjJ6ZBs5mJGQVz5ZNvh9EzZsk1J6wqYgbwVKUx +fh4kvW6sXtDirtb4ZQAK7OTLEoapUQGnGcvm+aEYfvC1sTBl4fbex7yyN5FYMJTM +TAUumhdq2pwujaj2xkN9DwZa89Tk7tbj9HE9DTRji7bnciEtrmTAOIOfOrT/1l3x +YW1BwYXpQ0TamJ58pC/iNgEp5FAxKt9d3RggesMA7pvG/f8fNgsa/Tku/PeEXNPA ++Yx4CcAipujmqpBKiKwJ6TOzp80m2zrZ7Da4Av5vVS5GsNJxhFYD1h8hU1ptK9BS +2CaTwBpV421C9BfEmtSAksGDIWYujfiHb6XNaQrt8Hu85GBuPUudVn0lpoXLn2xD +rGK8WEK2gWZ4eez3ZDLbpLui6c1m7AVlMtj374s+LHcD7JIxY475Na7pXmEWReqM +RUyCEq1spOOn70fOdhphhmpY6DoklOTOriPawCLNmkPWRnhrIwqyP1gse9YMqQ2n +LhWUkv/08m/0pb4e5ijVhsZNzv+1PXPWCk968nzt0BMDgJT+0ZiXsaU7FILXuo7Y +Ijgrj7dpXWx2MBdMGPFQdveog7Pa80Yb7r4ERW0DL78TxYC6m/S1p14PHwZpDZzQ +LrPrBcpI5XzI7osCAwEAAaOBnDCBmTAOBgNVHQ8BAf8EBAMCAqQwDAYDVR0TBAUw +AwEB/zA0BgNVHR4ELTAroCkwG4IZeHBzMTUubG9jYWwuY2lwaGVyYm95LmNvbTAK +hwh/AAAB/wAAADAkBgNVHREEHTAbghl4cHMxNS5sb2NhbC5jaXBoZXJib3kuY29t +MB0GA1UdDgQWBBR3bHgDp5RpzerMKRkaGDFN/ZeImjANBgkqhkiG9w0BAQsFAAOC +AgEArkuDYYWYHYxIoTeZkQz5L1y0H27ZWPJx5jBBuktPonDLQxBGAwkl6NZbJGLU +v+usII+eyjPKIgjhCiTXJAmeIngwWoN3PHOLMIPe9axuNt6qVoP4dQtzfpPR3buK +CWj9i3H0ixK73klk7QWZiBUDinYfEMSNRpU3G7NsqmqCXD4s5gB+8y9c7+zIiJyN +IaJBWpzI4eQBi/4cBhtM7Xa+CMB/8whhWYR6H+GXGZdNcP5f7bwneMstWKceTadk +IEzFucJHDySpEkIA2A9t33pV54FmEp+JVwvxAH4FABCnjPmhg0j1IonWV5pySWpG +hhEZpnRRH1XfpTA5i6dlyUA5DJjL8X1lYrgOK+LaoR52mQh5JBsMoVHFzN50DiMA +RTsbq4Qzozf23hU1BqW4NOzPTukgSGEcbT/DhXKPPPLL8JD0rPelJPq76X3TJjgZ +C9uMnZaDnxjppDXp5oBIXqC05FDxJ5sSODNOpKGyuzOU2qQLMau33yYOgaSAttBk +r29+LNFJ+0QzMuPjYXPznpxbsI+lrlZ3F2tDGGs8+JVceC1YX+cBEsEOiqNGTIip +/DY3b9gu5oiTwhcFyQW8+WFsirRS/g5t+M40WLKVPdK09z96krFXQMkL6a7LHLY1 +n9ivwj+sTG1XmJYXp8naLg4wdzIUf2fJxaFNI5Yq4elZ8sY= +-----END CERTIFICATE-----` + +const clientCert = `-----BEGIN CERTIFICATE----- +MIIEsDCCApigAwIBAgIIRY1JBRIynFYwDQYJKoZIhvcNAQELBQAwaDELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC1Vuc3BlY2lmaWVkMR8wHQYDVQQLDBZjYS0zODQzMDY2 +NDA5ODI5MjQwNTU5MSIwIAYDVQQDDBl4cHMxNS5sb2NhbC5jaXBoZXJib3kuY29t +MB4XDTIyMDYwMjIxMTgxOFoXDTIzMDcwNTIxMTgxOFowRzELMAkGA1UEBhMCVVMx +FDASBgNVBAoMC1Vuc3BlY2lmaWVkMSIwIAYDVQQDDBl4cHMxNS5sb2NhbC5jaXBo +ZXJib3kuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAs+XYhsW2 +vTwN7gY3xMxgbNN8d3aoeqCswOp05BBf0Vgv3febahm422ubXXd5Mg2UGiU7sJVe +4tUpDeupVVRX5Qr/hpiXgEyfRDAAAJKqrl65KSS62TCbT/eJZ0ah25HV1evI4uM2 +0kl5QWhtQjDyaVlTS38YFqXXQvpOuU5DG6UbKnpMcpsCPTyUKEJvJ95ZLcz0HJ8I +kIHrnX0Lt0pOhkllj5Nk4cXhU8CFk8IGNz7SVAycrUsffAUMNNEbrIOIfOTPHR1c +q3X9hO4/5pt80uIDMFwwumoA7nQR0AhlKkw9SskCIzJhKwKwssQY7fmovNG0fOEd +/+vSHK7OsYW+gwIDAQABo38wfTAOBgNVHQ8BAf8EBAMCBaAwEwYDVR0lBAwwCgYI +KwYBBQUHAwIwCQYDVR0TBAIwADAqBgNVHREEIzAhghl4cHMxNS5sb2NhbC5jaXBo +ZXJib3kuY29thwR/AAABMB8GA1UdIwQYMBaAFHdseAOnlGnN6swpGRoYMU39l4ia +MA0GCSqGSIb3DQEBCwUAA4ICAQBUSP4ZJglCCrYkM5Le7McdvfkM5uYv1aQn0sM4 +gbyDEWO0fnv50vLpD3y4ckgHLoD52pAZ0hN8a7rwAUae21GA6DvEchSH5x/yvJiS +7FBlq39sAafe03ZlzDErNYJRkLcnPAqG74lJ1SSsMcs9gCPHM8R7HtNnhAga06L7 +K8/G43dsGZCmEb+xcX2B9McCt8jBG6TJPTGafb3BJ0JTmR/tHdoLFIiNwI+qzd2U +lMnGlkIApULX8tmIMsWO0rjdiFkPWGcmfn9ChC0iDpQOAcKSDBcZlWrDNpzKk0mK +l0TbE6cxcmCUUpiwaXFrbkwVWQw4W0c4b3sWFtWifFbiR1qZ/OT2Y2sHbkbxwvPl +PjjXMDBAdRRwtNcTP1E55I5zvwzzBxUpxOob0miorhTJrZR9So0rgv7Roce4ED6M +WETYa/mGhe+Q7gBQygIVoryfQLgGBsHC+7V4RDvYTazwZkz9nLQxHLI/TAZU5ofM +WqdoUkMd68rxTTEUoMfGbftxjKA0raxGcO7/PjLR3O743EwCqeqYJ7OKWgGRLnui +kIKNUJlZ9umURUFzL++Bx4Pr95jWXb2WYqYYQxhDz0oR5q5smnFm5+/1/MLDMvDU +TrgBK6pey4QF33B/I55H1+7tGdv85Q57Z8UrNi/IQxR2sFlsOTeCwStpBQ56sdZk +Wi4+cQ== +-----END CERTIFICATE-----` + +const clientKey = `-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCz5diGxba9PA3u +BjfEzGBs03x3dqh6oKzA6nTkEF/RWC/d95tqGbjba5tdd3kyDZQaJTuwlV7i1SkN +66lVVFflCv+GmJeATJ9EMAAAkqquXrkpJLrZMJtP94lnRqHbkdXV68ji4zbSSXlB +aG1CMPJpWVNLfxgWpddC+k65TkMbpRsqekxymwI9PJQoQm8n3lktzPQcnwiQgeud +fQu3Sk6GSWWPk2ThxeFTwIWTwgY3PtJUDJytSx98BQw00Rusg4h85M8dHVyrdf2E +7j/mm3zS4gMwXDC6agDudBHQCGUqTD1KyQIjMmErArCyxBjt+ai80bR84R3/69Ic +rs6xhb6DAgMBAAECggEAPBcja2kxcCZWNNKo4DiwYMmHwtPE1SlEazAlmWSKzP+b +BZbGt/sdj1VzURYuSnTUqqMTPBm41yYCj57PMix5K42v6sKfoIB3lqw94/MZxiLn +0IFvVErzJhP2NqQWPqSI++rFcFwbHMTkFuAN1tVIs73dn9M1NaNxsvKvRyCIM/wz +5YQSDyTkdW4jQM2RvUFOoqwmeyAlQoBRMgQ4bHfLHxmPEjFgw1MAmmG8bJdkupin +MVzhZyKj4Fh80Xa2MU4KokijjG41hmYbg/sjNHaHJFDA92Rwq13dhWytrauJDxa/ +3yj8pHWc23Y3hXvRAf/cibDVzXmmLj49W1i06KuUCQKBgQDj5yF/DJV0IOkhfbol ++f5AGH4ZrEXA/JwA5SxHU+aKhUuPEqK/LeUWqiy3szFjOz2JOnCC0LMN42nsmMyK +sdQEKHp2SPd2wCxsAKZAuxrEi6yBt1mEPFFU5yzvZbdMqYChKJjm9fbRHtuc63s8 +PyVw67Ii9o4ij+PxfTobIs18xwKBgQDKE59w3uUDt2uoqNC8x4m5onL2p2vtcTHC +CxU57mu1+9CRM8N2BEp2VI5JaXjqt6W4u9ISrmOqmsPgTwosAquKpA/nu3bVvR9g +WlN9dh2Xgza0/AFaA9CB++ier8RJq5xFlcasMUmgkhYt3zgKNgRDfjfREWM0yamm +P++hAYRcZQKBgHEuYQk6k6J3ka/rQ54GmEj2oPFZB88+5K7hIWtO9IhIiGzGYYK2 +ZTYrT0fvuxA/5GCZYDTnNnUoQnuYqsQaamOiQqcpt5QG/kiozegJw9JmV0aYauFs +HyweHsfJaQ2uhE4E3mKdNnVGcORuYeZaqdp5gx8v+QibEyXj/g5p60kTAoGBALKp +TMOHXmW9yqKwtvThWoRU+13WQlcJSFvuXpL8mCCrBgkLAhqaypb6RV7ksLKdMhk1 +fhNkOdxBv0LXvv+QUMhgK2vP084/yrjuw3hecOVfboPvduZ2DuiNp2p9rocQAjeH +p8LgRN+Bqbhe7fYhMf3WX1UqEVM/pQ3G43+vjq39AoGAOyD2/hFSIx6BMddUNTHG +BEsMUc/DHYslZebbF1zAWnkKdTt+URhtHAFB2tYRDgkZfwW+wr/w12dJTIkX965o +HO7tI4FgpU9b0i8FTuwYkBfjwp2j0Xd2/VBR8Qpd17qKl3I6NXDsf3ykjGZAvldH +Tll+qwEZpXSRa5OWWTpGV8I= +-----END PRIVATE KEY-----` diff --git a/builtin/logical/nomad/cmd/nomad/main.go b/builtin/logical/nomad/cmd/nomad/main.go new file mode 100644 index 0000000..10f45aa --- /dev/null +++ b/builtin/logical/nomad/cmd/nomad/main.go @@ -0,0 +1,34 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "os" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/nomad" + "github.com/hashicorp/vault/sdk/plugin" +) + +func main() { + apiClientMeta := &api.PluginAPIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(os.Args[1:]) + + tlsConfig := apiClientMeta.GetTLSConfig() + tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) + + if err := plugin.ServeMultiplex(&plugin.ServeOpts{ + BackendFactoryFunc: nomad.Factory, + // set the TLSProviderFunc so that the plugin maintains backwards + // compatibility with Vault versions that don’t support plugin AutoMTLS + TLSProviderFunc: tlsProviderFunc, + }); err != nil { + logger := hclog.New(&hclog.LoggerOptions{}) + + logger.Error("plugin shutting down", "error", err) + os.Exit(1) + } +} diff --git a/builtin/logical/nomad/path_config_access.go b/builtin/logical/nomad/path_config_access.go new file mode 100644 index 0000000..cbb2140 --- /dev/null +++ b/builtin/logical/nomad/path_config_access.go @@ -0,0 +1,205 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package nomad + +import ( + "context" + "fmt" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +const configAccessKey = "config/access" + +func pathConfigAccess(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "config/access", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixNomad, + }, + + Fields: map[string]*framework.FieldSchema{ + "address": { + Type: framework.TypeString, + Description: "Nomad server address", + }, + + "token": { + Type: framework.TypeString, + Description: "Token for API calls", + }, + + "max_token_name_length": { + Type: framework.TypeInt, + Description: "Max length for name of generated Nomad tokens", + }, + "ca_cert": { + Type: framework.TypeString, + Description: `CA certificate to use when verifying Nomad server certificate, +must be x509 PEM encoded.`, + }, + "client_cert": { + Type: framework.TypeString, + Description: `Client certificate used for Nomad's TLS communication, +must be x509 PEM encoded and if this is set you need to also set client_key.`, + }, + "client_key": { + Type: framework.TypeString, + Description: `Client key used for Nomad's TLS communication, +must be x509 PEM encoded and if this is set you need to also set client_cert.`, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathConfigAccessRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "read", + OperationSuffix: "access-configuration", + }, + }, + logical.CreateOperation: &framework.PathOperation{ + Callback: b.pathConfigAccessWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "access", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigAccessWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "access", + }, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathConfigAccessDelete, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "delete", + OperationSuffix: "access-configuration", + }, + }, + }, + + ExistenceCheck: b.configExistenceCheck, + } +} + +func (b *backend) configExistenceCheck(ctx context.Context, req *logical.Request, data *framework.FieldData) (bool, error) { + entry, err := b.readConfigAccess(ctx, req.Storage) + if err != nil { + return false, err + } + + return entry != nil, nil +} + +func (b *backend) readConfigAccess(ctx context.Context, storage logical.Storage) (*accessConfig, error) { + entry, err := storage.Get(ctx, configAccessKey) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + conf := &accessConfig{} + if err := entry.DecodeJSON(conf); err != nil { + return nil, fmt.Errorf("error reading nomad access configuration: %w", err) + } + + return conf, nil +} + +func (b *backend) pathConfigAccessRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + conf, err := b.readConfigAccess(ctx, req.Storage) + if err != nil { + return nil, err + } + if conf == nil { + return nil, nil + } + + return &logical.Response{ + Data: map[string]interface{}{ + "address": conf.Address, + "max_token_name_length": conf.MaxTokenNameLength, + "ca_cert": conf.CACert, + "client_cert": conf.ClientCert, + }, + }, nil +} + +func (b *backend) pathConfigAccessWrite(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + conf, err := b.readConfigAccess(ctx, req.Storage) + if err != nil { + return nil, err + } + if conf == nil { + conf = &accessConfig{} + } + + address, ok := data.GetOk("address") + if ok { + conf.Address = address.(string) + } + token, ok := data.GetOk("token") + if ok { + conf.Token = token.(string) + } + caCert, ok := data.GetOk("ca_cert") + if ok { + conf.CACert = caCert.(string) + } + clientCert, ok := data.GetOk("client_cert") + if ok { + conf.ClientCert = clientCert.(string) + } + clientKey, ok := data.GetOk("client_key") + if ok { + conf.ClientKey = clientKey.(string) + } + + if conf.Token == "" { + client, err := clientFromConfig(conf) + if err != nil { + return logical.ErrorResponse("Token not provided and failed to constuct client"), err + } + token, _, err := client.ACLTokens().Bootstrap(nil) + if err != nil { + return logical.ErrorResponse("Token not provided and failed to bootstrap ACLs"), err + } + conf.Token = token.SecretID + } + + conf.MaxTokenNameLength = data.Get("max_token_name_length").(int) + + entry, err := logical.StorageEntryJSON("config/access", conf) + if err != nil { + return nil, err + } + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + + return nil, nil +} + +func (b *backend) pathConfigAccessDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + if err := req.Storage.Delete(ctx, configAccessKey); err != nil { + return nil, err + } + return nil, nil +} + +type accessConfig struct { + Address string `json:"address"` + Token string `json:"token"` + MaxTokenNameLength int `json:"max_token_name_length"` + CACert string `json:"ca_cert"` + ClientCert string `json:"client_cert"` + ClientKey string `json:"client_key"` +} diff --git a/builtin/logical/nomad/path_config_lease.go b/builtin/logical/nomad/path_config_lease.go new file mode 100644 index 0000000..05c83ff --- /dev/null +++ b/builtin/logical/nomad/path_config_lease.go @@ -0,0 +1,136 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package nomad + +import ( + "context" + "time" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +const leaseConfigKey = "config/lease" + +func pathConfigLease(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "config/lease", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixNomad, + }, + + Fields: map[string]*framework.FieldSchema{ + "ttl": { + Type: framework.TypeDurationSecond, + Description: "Duration before which the issued token needs renewal", + }, + "max_ttl": { + Type: framework.TypeDurationSecond, + Description: `Duration after which the issued token should not be allowed to be renewed`, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathLeaseRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "read", + OperationSuffix: "lease-configuration", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathLeaseUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "lease", + }, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathLeaseDelete, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "delete", + OperationSuffix: "lease-configuration", + }, + }, + }, + + HelpSynopsis: pathConfigLeaseHelpSyn, + HelpDescription: pathConfigLeaseHelpDesc, + } +} + +// Sets the lease configuration parameters +func (b *backend) pathLeaseUpdate(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + entry, err := logical.StorageEntryJSON("config/lease", &configLease{ + TTL: time.Second * time.Duration(d.Get("ttl").(int)), + MaxTTL: time.Second * time.Duration(d.Get("max_ttl").(int)), + }) + if err != nil { + return nil, err + } + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + + return nil, nil +} + +func (b *backend) pathLeaseDelete(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + if err := req.Storage.Delete(ctx, leaseConfigKey); err != nil { + return nil, err + } + + return nil, nil +} + +// Returns the lease configuration parameters +func (b *backend) pathLeaseRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + lease, err := b.LeaseConfig(ctx, req.Storage) + if err != nil { + return nil, err + } + if lease == nil { + return nil, nil + } + + return &logical.Response{ + Data: map[string]interface{}{ + "ttl": int64(lease.TTL.Seconds()), + "max_ttl": int64(lease.MaxTTL.Seconds()), + }, + }, nil +} + +// Lease returns the lease information +func (b *backend) LeaseConfig(ctx context.Context, s logical.Storage) (*configLease, error) { + entry, err := s.Get(ctx, leaseConfigKey) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result configLease + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + + return &result, nil +} + +// Lease configuration information for the secrets issued by this backend +type configLease struct { + TTL time.Duration `json:"ttl" mapstructure:"ttl"` + MaxTTL time.Duration `json:"max_ttl" mapstructure:"max_ttl"` +} + +var pathConfigLeaseHelpSyn = "Configure the lease parameters for generated tokens" + +var pathConfigLeaseHelpDesc = ` +Sets the ttl and max_ttl values for the secrets to be issued by this backend. +Both ttl and max_ttl takes in an integer number of seconds as input as well as +inputs like "1h". +` diff --git a/builtin/logical/nomad/path_creds_create.go b/builtin/logical/nomad/path_creds_create.go new file mode 100644 index 0000000..29f84d1 --- /dev/null +++ b/builtin/logical/nomad/path_creds_create.go @@ -0,0 +1,107 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package nomad + +import ( + "context" + "fmt" + "time" + + "github.com/hashicorp/nomad/api" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +// maxTokenNameLength is the maximum length for the name of a Nomad access +// token +const maxTokenNameLength = 256 + +func pathCredsCreate(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "creds/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixNomad, + OperationVerb: "generate", + OperationSuffix: "credentials", + }, + + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the role", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathTokenRead, + }, + } +} + +func (b *backend) pathTokenRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + conf, _ := b.readConfigAccess(ctx, req.Storage) + // establish a default + tokenNameLength := maxTokenNameLength + if conf != nil && conf.MaxTokenNameLength > 0 { + tokenNameLength = conf.MaxTokenNameLength + } + + role, err := b.Role(ctx, req.Storage, name) + if err != nil { + return nil, fmt.Errorf("error retrieving role: %w", err) + } + if role == nil { + return logical.ErrorResponse(fmt.Sprintf("role %q not found", name)), nil + } + + // Determine if we have a lease configuration + leaseConfig, err := b.LeaseConfig(ctx, req.Storage) + if err != nil { + return nil, err + } + if leaseConfig == nil { + leaseConfig = &configLease{} + } + + // Get the nomad client + c, err := b.client(ctx, req.Storage) + if err != nil { + return nil, err + } + + // Generate a name for the token + tokenName := fmt.Sprintf("vault-%s-%s-%d", name, req.DisplayName, time.Now().UnixNano()) + + // Note: if the given role name is sufficiently long, the UnixNano() portion + // of the pseudo randomized token name is the part that gets trimmed off, + // weakening it's randomness. + if len(tokenName) > tokenNameLength { + tokenName = tokenName[:tokenNameLength] + } + + // Create it + token, _, err := c.ACLTokens().Create(&api.ACLToken{ + Name: tokenName, + Type: role.TokenType, + Policies: role.Policies, + Global: role.Global, + }, nil) + if err != nil { + return nil, err + } + + // Use the helper to create the secret + resp := b.Secret(SecretTokenType).Response(map[string]interface{}{ + "secret_id": token.SecretID, + "accessor_id": token.AccessorID, + }, map[string]interface{}{ + "accessor_id": token.AccessorID, + }) + resp.Secret.TTL = leaseConfig.TTL + resp.Secret.MaxTTL = leaseConfig.MaxTTL + + return resp, nil +} diff --git a/builtin/logical/nomad/path_roles.go b/builtin/logical/nomad/path_roles.go new file mode 100644 index 0000000..e3cebef --- /dev/null +++ b/builtin/logical/nomad/path_roles.go @@ -0,0 +1,200 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package nomad + +import ( + "context" + "errors" + "fmt" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathListRoles(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "role/?$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixNomad, + OperationSuffix: "roles", + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: b.pathRoleList, + }, + } +} + +func pathRoles(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "role/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixNomad, + OperationSuffix: "role", + }, + + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the role", + }, + + "policies": { + Type: framework.TypeCommaStringSlice, + Description: "Comma-separated string or list of policies as previously created in Nomad. Required for 'client' token.", + }, + + "global": { + Type: framework.TypeBool, + Description: "Boolean value describing if the token should be global or not. Defaults to false.", + }, + + "type": { + Type: framework.TypeString, + Default: "client", + Description: `Which type of token to create: 'client' +or 'management'. If a 'management' token, +the "policies" parameter is not required. +Defaults to 'client'.`, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathRolesRead, + logical.CreateOperation: b.pathRolesWrite, + logical.UpdateOperation: b.pathRolesWrite, + logical.DeleteOperation: b.pathRolesDelete, + }, + + ExistenceCheck: b.rolesExistenceCheck, + } +} + +// Establishes dichotomy of request operation between CreateOperation and UpdateOperation. +// Returning 'true' forces an UpdateOperation, CreateOperation otherwise. +func (b *backend) rolesExistenceCheck(ctx context.Context, req *logical.Request, d *framework.FieldData) (bool, error) { + name := d.Get("name").(string) + entry, err := b.Role(ctx, req.Storage, name) + if err != nil { + return false, err + } + return entry != nil, nil +} + +func (b *backend) Role(ctx context.Context, storage logical.Storage, name string) (*roleConfig, error) { + if name == "" { + return nil, errors.New("invalid role name") + } + + entry, err := storage.Get(ctx, "role/"+name) + if err != nil { + return nil, fmt.Errorf("error retrieving role: %w", err) + } + if entry == nil { + return nil, nil + } + + var result roleConfig + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + return &result, nil +} + +func (b *backend) pathRoleList(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + entries, err := req.Storage.List(ctx, "role/") + if err != nil { + return nil, err + } + + return logical.ListResponse(entries), nil +} + +func (b *backend) pathRolesRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + + role, err := b.Role(ctx, req.Storage, name) + if err != nil { + return nil, err + } + if role == nil { + return nil, nil + } + + // Generate the response + resp := &logical.Response{ + Data: map[string]interface{}{ + "type": role.TokenType, + "global": role.Global, + "policies": role.Policies, + }, + } + return resp, nil +} + +func (b *backend) pathRolesWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + + role, err := b.Role(ctx, req.Storage, name) + if err != nil { + return nil, err + } + if role == nil { + role = new(roleConfig) + } + + policies, ok := d.GetOk("policies") + if ok { + role.Policies = policies.([]string) + } + + role.TokenType = d.Get("type").(string) + switch role.TokenType { + case "client": + if len(role.Policies) == 0 { + return logical.ErrorResponse( + "policies cannot be empty when using client tokens"), nil + } + case "management": + if len(role.Policies) != 0 { + return logical.ErrorResponse( + "policies should be empty when using management tokens"), nil + } + default: + return logical.ErrorResponse( + `type must be "client" or "management"`), nil + } + + global, ok := d.GetOk("global") + if ok { + role.Global = global.(bool) + } + + entry, err := logical.StorageEntryJSON("role/"+name, role) + if err != nil { + return nil, err + } + + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + + return nil, nil +} + +func (b *backend) pathRolesDelete(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + if err := req.Storage.Delete(ctx, "role/"+name); err != nil { + return nil, err + } + return nil, nil +} + +type roleConfig struct { + Policies []string `json:"policies"` + TokenType string `json:"type"` + Global bool `json:"global"` +} diff --git a/builtin/logical/nomad/secret_token.go b/builtin/logical/nomad/secret_token.go new file mode 100644 index 0000000..3c6b920 --- /dev/null +++ b/builtin/logical/nomad/secret_token.go @@ -0,0 +1,72 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package nomad + +import ( + "context" + "errors" + "fmt" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + SecretTokenType = "token" +) + +func secretToken(b *backend) *framework.Secret { + return &framework.Secret{ + Type: SecretTokenType, + Fields: map[string]*framework.FieldSchema{ + "token": { + Type: framework.TypeString, + Description: "Request token", + }, + }, + + Renew: b.secretTokenRenew, + Revoke: b.secretTokenRevoke, + } +} + +func (b *backend) secretTokenRenew(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + lease, err := b.LeaseConfig(ctx, req.Storage) + if err != nil { + return nil, err + } + if lease == nil { + lease = &configLease{} + } + resp := &logical.Response{Secret: req.Secret} + resp.Secret.TTL = lease.TTL + resp.Secret.MaxTTL = lease.MaxTTL + return resp, nil +} + +func (b *backend) secretTokenRevoke(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + c, err := b.client(ctx, req.Storage) + if err != nil { + return nil, err + } + + if c == nil { + return nil, fmt.Errorf("error getting Nomad client") + } + + accessorIDRaw, ok := req.Secret.InternalData["accessor_id"] + if !ok { + return nil, fmt.Errorf("accessor_id is missing on the lease") + } + accessorID, ok := accessorIDRaw.(string) + if !ok { + return nil, errors.New("unable to convert accessor_id") + } + _, err = c.ACLTokens().Delete(accessorID, nil) + if err != nil { + return nil, err + } + + return nil, nil +} diff --git a/builtin/logical/pki/acme_authorizations.go b/builtin/logical/pki/acme_authorizations.go new file mode 100644 index 0000000..82d439d --- /dev/null +++ b/builtin/logical/pki/acme_authorizations.go @@ -0,0 +1,187 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "fmt" + "time" +) + +type ACMEIdentifierType string + +const ( + ACMEDNSIdentifier ACMEIdentifierType = "dns" + ACMEIPIdentifier ACMEIdentifierType = "ip" +) + +type ACMEIdentifier struct { + Type ACMEIdentifierType `json:"type"` + Value string `json:"value"` + OriginalValue string `json:"original_value"` + IsWildcard bool `json:"is_wildcard"` +} + +func (ai *ACMEIdentifier) MaybeParseWildcard() (bool, string, error) { + if ai.Type != ACMEDNSIdentifier || !isWildcardDomain(ai.Value) { + return false, ai.Value, nil + } + + // Here on out, technically it is a wildcard. + ai.IsWildcard = true + + wildcardLabel, reducedName, err := validateWildcardDomain(ai.Value) + if err != nil { + return true, "", err + } + + if wildcardLabel != "*" { + // Per RFC 8555 Section. 7.1.3. Order Objects: + // + // > Any identifier of type "dns" in a newOrder request MAY have a + // > wildcard domain name as its value. A wildcard domain name consists + // > of a single asterisk character followed by a single full stop + // > character ("*.") followed by a domain name as defined for use in the + // > Subject Alternate Name Extension by [RFC5280]. + return true, "", fmt.Errorf("wildcard must be entire left-most label") + } + + if reducedName == "" { + return true, "", fmt.Errorf("wildcard must not be entire domain name; need at least two domain labels") + } + + // Parsing was indeed successful, so update our reduced name. + ai.Value = reducedName + + return true, reducedName, nil +} + +func (ai *ACMEIdentifier) NetworkMarshal(useOriginalValue bool) map[string]interface{} { + value := ai.OriginalValue + if !useOriginalValue { + value = ai.Value + } + return map[string]interface{}{ + "type": ai.Type, + "value": value, + } +} + +type ACMEAuthorizationStatusType string + +const ( + ACMEAuthorizationPending ACMEAuthorizationStatusType = "pending" + ACMEAuthorizationValid ACMEAuthorizationStatusType = "valid" + ACMEAuthorizationInvalid ACMEAuthorizationStatusType = "invalid" + ACMEAuthorizationDeactivated ACMEAuthorizationStatusType = "deactivated" + ACMEAuthorizationExpired ACMEAuthorizationStatusType = "expired" + ACMEAuthorizationRevoked ACMEAuthorizationStatusType = "revoked" +) + +type ACMEOrderStatusType string + +const ( + ACMEOrderPending ACMEOrderStatusType = "pending" + ACMEOrderProcessing ACMEOrderStatusType = "processing" + ACMEOrderValid ACMEOrderStatusType = "valid" + ACMEOrderInvalid ACMEOrderStatusType = "invalid" + ACMEOrderReady ACMEOrderStatusType = "ready" +) + +type ACMEChallengeType string + +const ( + ACMEHTTPChallenge ACMEChallengeType = "http-01" + ACMEDNSChallenge ACMEChallengeType = "dns-01" + ACMEALPNChallenge ACMEChallengeType = "tls-alpn-01" +) + +type ACMEChallengeStatusType string + +const ( + ACMEChallengePending ACMEChallengeStatusType = "pending" + ACMEChallengeProcessing ACMEChallengeStatusType = "processing" + ACMEChallengeValid ACMEChallengeStatusType = "valid" + ACMEChallengeInvalid ACMEChallengeStatusType = "invalid" +) + +type ACMEChallenge struct { + Type ACMEChallengeType `json:"type"` + Status ACMEChallengeStatusType `json:"status"` + Validated string `json:"validated,optional"` + Error map[string]interface{} `json:"error,optional"` + ChallengeFields map[string]interface{} `json:"challenge_fields"` +} + +func (ac *ACMEChallenge) NetworkMarshal(acmeCtx *acmeContext, authId string) map[string]interface{} { + resp := map[string]interface{}{ + "type": ac.Type, + "url": buildChallengeUrl(acmeCtx, authId, string(ac.Type)), + "status": ac.Status, + } + + if ac.Validated != "" { + resp["validated"] = ac.Validated + } + + if len(ac.Error) > 0 { + resp["error"] = ac.Error + } + + for field, value := range ac.ChallengeFields { + resp[field] = value + } + + return resp +} + +func buildChallengeUrl(acmeCtx *acmeContext, authId, challengeType string) string { + return acmeCtx.baseUrl.JoinPath("/challenge/", authId, challengeType).String() +} + +type ACMEAuthorization struct { + Id string `json:"id"` + AccountId string `json:"account_id"` + + Identifier *ACMEIdentifier `json:"identifier"` + Status ACMEAuthorizationStatusType `json:"status"` + + // Per RFC 8555 Section 7.1.4. Authorization Objects: + // + // > This field is REQUIRED for objects with "valid" in the "status" + // > field. + Expires string `json:"expires,optional"` + + Challenges []*ACMEChallenge `json:"challenges"` + Wildcard bool `json:"wildcard"` +} + +func (aa *ACMEAuthorization) GetExpires() (time.Time, error) { + if aa.Expires == "" { + return time.Time{}, nil + } + + return time.Parse(time.RFC3339, aa.Expires) +} + +func (aa *ACMEAuthorization) NetworkMarshal(acmeCtx *acmeContext) map[string]interface{} { + resp := map[string]interface{}{ + "identifier": aa.Identifier.NetworkMarshal( /* use value, not original value */ false), + "status": aa.Status, + "wildcard": aa.Wildcard, + } + + if aa.Expires != "" { + resp["expires"] = aa.Expires + } + + if len(aa.Challenges) > 0 { + challenges := []map[string]interface{}{} + for _, challenge := range aa.Challenges { + challenges = append(challenges, challenge.NetworkMarshal(acmeCtx, aa.Id)) + } + resp["challenges"] = challenges + } + + return resp +} diff --git a/builtin/logical/pki/acme_billing.go b/builtin/logical/pki/acme_billing.go new file mode 100644 index 0000000..642e0f4 --- /dev/null +++ b/builtin/logical/pki/acme_billing.go @@ -0,0 +1,25 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "context" + "fmt" + + "github.com/hashicorp/vault/sdk/logical" +) + +func (b *backend) doTrackBilling(ctx context.Context, identifiers []*ACMEIdentifier) error { + billingView, ok := b.System().(logical.ACMEBillingSystemView) + if !ok { + return fmt.Errorf("failed to perform cast to ACME billing system view interface") + } + + var realized []string + for _, identifier := range identifiers { + realized = append(realized, fmt.Sprintf("%s/%s", identifier.Type, identifier.OriginalValue)) + } + + return billingView.CreateActivityCountEventForIdentifiers(ctx, realized) +} diff --git a/builtin/logical/pki/acme_billing_test.go b/builtin/logical/pki/acme_billing_test.go new file mode 100644 index 0000000..72a5818 --- /dev/null +++ b/builtin/logical/pki/acme_billing_test.go @@ -0,0 +1,322 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/json" + "strings" + "testing" + "time" + + "golang.org/x/crypto/acme" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/pki/dnstest" + "github.com/hashicorp/vault/helper/constants" + "github.com/hashicorp/vault/helper/timeutil" + "github.com/hashicorp/vault/vault" + "github.com/hashicorp/vault/vault/activity" + + "github.com/stretchr/testify/require" +) + +// TestACMEBilling is a basic test that will validate client counts created via ACME workflows. +func TestACMEBilling(t *testing.T) { + t.Parallel() + timeutil.SkipAtEndOfMonth(t) + + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + + dns := dnstest.SetupResolver(t, "dadgarcorp.com") + defer dns.Cleanup() + + // Enable additional mounts. + setupAcmeBackendOnClusterAtPath(t, cluster, client, "pki2") + setupAcmeBackendOnClusterAtPath(t, cluster, client, "ns1/pki") + setupAcmeBackendOnClusterAtPath(t, cluster, client, "ns2/pki") + + // Enable custom DNS resolver for testing. + for _, mount := range []string{"pki", "pki2", "ns1/pki", "ns2/pki"} { + _, err := client.Logical().Write(mount+"/config/acme", map[string]interface{}{ + "dns_resolver": dns.GetLocalAddr(), + }) + require.NoError(t, err, "failed to set local dns resolver address for testing on mount: "+mount) + } + + // Enable client counting. + _, err := client.Logical().Write("/sys/internal/counters/config", map[string]interface{}{ + "enabled": "enable", + }) + require.NoError(t, err, "failed to enable client counting") + + // Setup ACME clients. We refresh account keys each time for consistency. + acmeClientPKI := getAcmeClientForCluster(t, cluster, "/v1/pki/acme/", nil) + acmeClientPKI2 := getAcmeClientForCluster(t, cluster, "/v1/pki2/acme/", nil) + acmeClientPKINS1 := getAcmeClientForCluster(t, cluster, "/v1/ns1/pki/acme/", nil) + acmeClientPKINS2 := getAcmeClientForCluster(t, cluster, "/v1/ns2/pki/acme/", nil) + + // Get our initial count. + expectedCount := validateClientCount(t, client, "", -1, "initial fetch") + + // Unique identifier: should increase by one. + doACMEForDomainWithDNS(t, dns, acmeClientPKI, []string{"dadgarcorp.com"}) + expectedCount = validateClientCount(t, client, "pki", expectedCount+1, "new certificate") + + // Different identifier; should increase by one. + doACMEForDomainWithDNS(t, dns, acmeClientPKI, []string{"example.dadgarcorp.com"}) + expectedCount = validateClientCount(t, client, "pki", expectedCount+1, "new certificate") + + // While same identifiers, used together and so thus are unique; increase by one. + doACMEForDomainWithDNS(t, dns, acmeClientPKI, []string{"example.dadgarcorp.com", "dadgarcorp.com"}) + expectedCount = validateClientCount(t, client, "pki", expectedCount+1, "new certificate") + + // Same identifiers in different order are not unique; keep the same. + doACMEForDomainWithDNS(t, dns, acmeClientPKI, []string{"dadgarcorp.com", "example.dadgarcorp.com"}) + expectedCount = validateClientCount(t, client, "pki", expectedCount, "different order; same identifiers") + + // Using a different mount shouldn't affect counts. + doACMEForDomainWithDNS(t, dns, acmeClientPKI2, []string{"dadgarcorp.com"}) + expectedCount = validateClientCount(t, client, "", expectedCount, "different mount; same identifiers") + + // But using a different identifier should. + doACMEForDomainWithDNS(t, dns, acmeClientPKI2, []string{"pki2.dadgarcorp.com"}) + expectedCount = validateClientCount(t, client, "pki2", expectedCount+1, "different mount with different identifiers") + + // A new identifier in a unique namespace will affect results. + doACMEForDomainWithDNS(t, dns, acmeClientPKINS1, []string{"unique.dadgarcorp.com"}) + expectedCount = validateClientCount(t, client, "ns1/pki", expectedCount+1, "unique identifier in a namespace") + + // But in a different namespace with the existing identifier will not. + doACMEForDomainWithDNS(t, dns, acmeClientPKINS2, []string{"unique.dadgarcorp.com"}) + expectedCount = validateClientCount(t, client, "", expectedCount, "existing identifier in a namespace") + doACMEForDomainWithDNS(t, dns, acmeClientPKI2, []string{"unique.dadgarcorp.com"}) + expectedCount = validateClientCount(t, client, "", expectedCount, "existing identifier outside of a namespace") + + // Creating a unique identifier in a namespace with a mount with the + // same name as another namespace should increase counts as well. + doACMEForDomainWithDNS(t, dns, acmeClientPKINS2, []string{"very-unique.dadgarcorp.com"}) + expectedCount = validateClientCount(t, client, "ns2/pki", expectedCount+1, "unique identifier in a different namespace") + + // Check the current fragment + fragment := cluster.Cores[0].Core.ResetActivityLog()[0] + if fragment == nil { + t.Fatal("no fragment created") + } + validateAcmeClientTypes(t, fragment, expectedCount) +} + +func validateAcmeClientTypes(t *testing.T, fragment *activity.LogFragment, expectedCount int64) { + t.Helper() + if int64(len(fragment.Clients)) != expectedCount { + t.Fatalf("bad number of entities, expected %v: got %v, entities are: %v", expectedCount, len(fragment.Clients), fragment.Clients) + } + + for _, ac := range fragment.Clients { + if ac.ClientType != vault.ACMEActivityType { + t.Fatalf("Couldn't find expected '%v' client_type in %v", vault.ACMEActivityType, fragment.Clients) + } + } +} + +func validateClientCount(t *testing.T, client *api.Client, mount string, expected int64, message string) int64 { + resp, err := client.Logical().Read("/sys/internal/counters/activity/monthly") + require.NoError(t, err, "failed to fetch client count values") + t.Logf("got client count numbers: %v", resp) + + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.Contains(t, resp.Data, "non_entity_clients") + require.Contains(t, resp.Data, "months") + + rawCount := resp.Data["non_entity_clients"].(json.Number) + count, err := rawCount.Int64() + require.NoError(t, err, "failed to parse number as int64: "+rawCount.String()) + + if expected != -1 { + require.Equal(t, expected, count, "value of client counts did not match expectations: "+message) + } + + if mount == "" { + return count + } + + months := resp.Data["months"].([]interface{}) + if len(months) > 1 { + t.Fatalf("running across a month boundary despite using SkipAtEndOfMonth(...); rerun test from start fully in the next month instead") + } + + require.Equal(t, 1, len(months), "expected only a single month when running this test") + + monthlyInfo := months[0].(map[string]interface{}) + + // Validate this month's aggregate counts match the overall value. + require.Contains(t, monthlyInfo, "counts", "expected monthly info to contain a count key") + monthlyCounts := monthlyInfo["counts"].(map[string]interface{}) + require.Contains(t, monthlyCounts, "non_entity_clients", "expected month[0].counts to contain a non_entity_clients key") + monthlyCountNonEntityRaw := monthlyCounts["non_entity_clients"].(json.Number) + monthlyCountNonEntity, err := monthlyCountNonEntityRaw.Int64() + require.NoError(t, err, "failed to parse number as int64: "+monthlyCountNonEntityRaw.String()) + require.Equal(t, count, monthlyCountNonEntity, "expected equal values for non entity client counts") + + // Validate this mount's namespace is included in the namespaces list, + // if this is enterprise. Otherwise, if its OSS or we don't have a + // namespace, we default to the value root. + mountNamespace := "" + mountPath := mount + "/" + if constants.IsEnterprise && strings.Contains(mount, "/") { + pieces := strings.Split(mount, "/") + require.Equal(t, 2, len(pieces), "we do not support nested namespaces in this test") + mountNamespace = pieces[0] + "/" + mountPath = pieces[1] + "/" + } + + require.Contains(t, monthlyInfo, "namespaces", "expected monthly info to contain a namespaces key") + monthlyNamespaces := monthlyInfo["namespaces"].([]interface{}) + foundNamespace := false + for index, namespaceRaw := range monthlyNamespaces { + namespace := namespaceRaw.(map[string]interface{}) + require.Contains(t, namespace, "namespace_path", "expected monthly.namespaces[%v] to contain a namespace_path key", index) + namespacePath := namespace["namespace_path"].(string) + + if namespacePath != mountNamespace { + t.Logf("skipping non-matching namespace %v: %v != %v / %v", index, namespacePath, mountNamespace, namespace) + continue + } + + foundNamespace = true + + // This namespace must have a non-empty aggregate non-entity count. + require.Contains(t, namespace, "counts", "expected monthly.namespaces[%v] to contain a counts key", index) + namespaceCounts := namespace["counts"].(map[string]interface{}) + require.Contains(t, namespaceCounts, "non_entity_clients", "expected namespace counts to contain a non_entity_clients key") + namespaceCountNonEntityRaw := namespaceCounts["non_entity_clients"].(json.Number) + namespaceCountNonEntity, err := namespaceCountNonEntityRaw.Int64() + require.NoError(t, err, "failed to parse number as int64: "+namespaceCountNonEntityRaw.String()) + require.Greater(t, namespaceCountNonEntity, int64(0), "expected at least one non-entity client count value in the namespace") + + require.Contains(t, namespace, "mounts", "expected monthly.namespaces[%v] to contain a mounts key", index) + namespaceMounts := namespace["mounts"].([]interface{}) + foundMount := false + for mountIndex, mountRaw := range namespaceMounts { + mountInfo := mountRaw.(map[string]interface{}) + require.Contains(t, mountInfo, "mount_path", "expected monthly.namespaces[%v].mounts[%v] to contain a mount_path key", index, mountIndex) + mountInfoPath := mountInfo["mount_path"].(string) + if mountPath != mountInfoPath { + t.Logf("skipping non-matching mount path %v in namespace %v: %v != %v / %v of %v", mountIndex, index, mountPath, mountInfoPath, mountInfo, namespace) + continue + } + + foundMount = true + + // This mount must also have a non-empty non-entity client count. + require.Contains(t, mountInfo, "counts", "expected monthly.namespaces[%v].mounts[%v] to contain a counts key", index, mountIndex) + mountCounts := mountInfo["counts"].(map[string]interface{}) + require.Contains(t, mountCounts, "non_entity_clients", "expected mount counts to contain a non_entity_clients key") + mountCountNonEntityRaw := mountCounts["non_entity_clients"].(json.Number) + mountCountNonEntity, err := mountCountNonEntityRaw.Int64() + require.NoError(t, err, "failed to parse number as int64: "+mountCountNonEntityRaw.String()) + require.Greater(t, mountCountNonEntity, int64(0), "expected at least one non-entity client count value in the mount") + } + + require.True(t, foundMount, "expected to find the mount "+mountPath+" in the list of mounts for namespace, but did not") + } + + require.True(t, foundNamespace, "expected to find the namespace "+mountNamespace+" in the list of namespaces, but did not") + + return count +} + +func doACMEForDomainWithDNS(t *testing.T, dns *dnstest.TestServer, acmeClient *acme.Client, domains []string) *x509.Certificate { + cr := &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: domains[0]}, + DNSNames: domains, + } + + return doACMEForCSRWithDNS(t, dns, acmeClient, domains, cr) +} + +func doACMEForCSRWithDNS(t *testing.T, dns *dnstest.TestServer, acmeClient *acme.Client, domains []string, cr *x509.CertificateRequest) *x509.Certificate { + accountKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed to generate account key") + acmeClient.Key = accountKey + + testCtx, cancelFunc := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancelFunc() + + // Register the client. + _, err = acmeClient.Register(testCtx, &acme.Account{Contact: []string{"mailto:ipsans@dadgarcorp.com"}}, func(tosURL string) bool { return true }) + require.NoError(t, err, "failed registering account") + + // Create the Order + var orderIdentifiers []acme.AuthzID + for _, domain := range domains { + orderIdentifiers = append(orderIdentifiers, acme.AuthzID{Type: "dns", Value: domain}) + } + order, err := acmeClient.AuthorizeOrder(testCtx, orderIdentifiers) + require.NoError(t, err, "failed creating ACME order") + + // Fetch its authorizations. + var auths []*acme.Authorization + for _, authUrl := range order.AuthzURLs { + authorization, err := acmeClient.GetAuthorization(testCtx, authUrl) + require.NoError(t, err, "failed to lookup authorization at url: %s", authUrl) + auths = append(auths, authorization) + } + + // For each dns-01 challenge, place the record in the associated DNS resolver. + var challengesToAccept []*acme.Challenge + for _, auth := range auths { + for _, challenge := range auth.Challenges { + if challenge.Status != acme.StatusPending { + t.Logf("ignoring challenge not in status pending: %v", challenge) + continue + } + + if challenge.Type == "dns-01" { + challengeBody, err := acmeClient.DNS01ChallengeRecord(challenge.Token) + require.NoError(t, err, "failed generating challenge response") + + dns.AddRecord("_acme-challenge."+auth.Identifier.Value, "TXT", challengeBody) + defer dns.RemoveRecord("_acme-challenge."+auth.Identifier.Value, "TXT", challengeBody) + + require.NoError(t, err, "failed setting DNS record") + + challengesToAccept = append(challengesToAccept, challenge) + } + } + } + + dns.PushConfig() + require.GreaterOrEqual(t, len(challengesToAccept), 1, "Need at least one challenge, got none") + + // Tell the ACME server, that they can now validate those challenges. + for _, challenge := range challengesToAccept { + _, err = acmeClient.Accept(testCtx, challenge) + require.NoError(t, err, "failed to accept challenge: %v", challenge) + } + + // Wait for the order/challenges to be validated. + _, err = acmeClient.WaitOrder(testCtx, order.URI) + require.NoError(t, err, "failed waiting for order to be ready") + + // Create/sign the CSR and ask ACME server to sign it returning us the final certificate + csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + csr, err := x509.CreateCertificateRequest(rand.Reader, cr, csrKey) + require.NoError(t, err, "failed generating csr") + + certs, _, err := acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, false) + require.NoError(t, err, "failed to get a certificate back from ACME") + + acmeCert, err := x509.ParseCertificate(certs[0]) + require.NoError(t, err, "failed parsing acme cert bytes") + + return acmeCert +} diff --git a/builtin/logical/pki/acme_challenge_engine.go b/builtin/logical/pki/acme_challenge_engine.go new file mode 100644 index 0000000..7dae884 --- /dev/null +++ b/builtin/logical/pki/acme_challenge_engine.go @@ -0,0 +1,563 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "container/list" + "context" + "fmt" + "sync" + "time" + + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" +) + +var MaxChallengeTimeout = 1 * time.Minute + +const MaxRetryAttempts = 5 + +const ChallengeAttemptFailedMsg = "this may occur if the validation target was misconfigured: check that challenge responses are available at the required locations and retry." + +type ChallengeValidation struct { + // Account KID that this validation attempt is recorded under. + Account string `json:"account"` + + // The authorization ID that this validation attempt is for. + Authorization string `json:"authorization"` + ChallengeType ACMEChallengeType `json:"challenge_type"` + + // The token of this challenge and the JWS thumbprint of the account + // we're validating against. + Token string `json:"token"` + Thumbprint string `json:"thumbprint"` + + Initiated time.Time `json:"initiated"` + FirstValidation time.Time `json:"first_validation,omitempty"` + RetryCount int `json:"retry_count,omitempty"` + LastRetry time.Time `json:"last_retry,omitempty"` + RetryAfter time.Time `json:"retry_after,omitempty"` +} + +type ChallengeQueueEntry struct { + Identifier string + RetryAfter time.Time + NumRetries int // Track if we are spinning on a corrupted challenge +} + +type ACMEChallengeEngine struct { + NumWorkers int + + ValidationLock sync.Mutex + NewValidation chan string + Closing chan struct{} + Validations *list.List +} + +func NewACMEChallengeEngine() *ACMEChallengeEngine { + ace := &ACMEChallengeEngine{} + ace.NewValidation = make(chan string, 1) + ace.Closing = make(chan struct{}, 1) + ace.Validations = list.New() + ace.NumWorkers = 5 + + return ace +} + +func (ace *ACMEChallengeEngine) LoadFromStorage(b *backend, sc *storageContext) error { + items, err := sc.Storage.List(sc.Context, acmeValidationPrefix) + if err != nil { + return fmt.Errorf("failed loading list of validations from disk: %w", err) + } + + ace.ValidationLock.Lock() + defer ace.ValidationLock.Unlock() + + // Add them to our queue of validations to work through later. + foundExistingValidations := false + for _, item := range items { + ace.Validations.PushBack(&ChallengeQueueEntry{ + Identifier: item, + }) + foundExistingValidations = true + } + + if foundExistingValidations { + ace.NewValidation <- "existing" + } + + return nil +} + +func (ace *ACMEChallengeEngine) Run(b *backend, state *acmeState, sc *storageContext) { + // We load the existing ACME challenges within the Run thread to avoid + // delaying the PKI mount initialization + b.Logger().Debug("Loading existing challenge validations on disk") + err := ace.LoadFromStorage(b, sc) + if err != nil { + b.Logger().Error("failed loading existing ACME challenge validations:", "err", err) + } + + for { + // err == nil on shutdown. + b.Logger().Debug("Starting ACME challenge validation engine") + err := ace._run(b, state) + if err != nil { + b.Logger().Error("Got unexpected error from ACME challenge validation engine", "err", err) + time.Sleep(1 * time.Second) + continue + } + break + } +} + +func (ace *ACMEChallengeEngine) _run(b *backend, state *acmeState) error { + // This runner uses a background context for storage operations: we don't + // want to tie it to a inbound request and we don't want to set a time + // limit, so create a fresh background context. + runnerSC := b.makeStorageContext(context.Background(), b.storage) + + // We want at most a certain number of workers operating to verify + // challenges. + var finishedWorkersChannels []chan bool + for { + // Wait until we've got more work to do. + select { + case <-ace.Closing: + b.Logger().Debug("shutting down ACME challenge validation engine") + return nil + case <-ace.NewValidation: + } + + // First try to reap any finished workers. Read from their channels + // and if not finished yet, add to a fresh slice. + var newFinishedWorkersChannels []chan bool + for _, channel := range finishedWorkersChannels { + select { + case <-channel: + default: + // This channel had not been written to, indicating that the + // worker had not yet finished. + newFinishedWorkersChannels = append(newFinishedWorkersChannels, channel) + } + } + finishedWorkersChannels = newFinishedWorkersChannels + + // If we have space to take on another work item, do so. + firstIdentifier := "" + startedWork := false + now := time.Now() + for len(finishedWorkersChannels) < ace.NumWorkers { + var task *ChallengeQueueEntry + + // Find our next work item. We do all of these operations + // while holding the queue lock, hence some repeated checks + // afterwards. Out of this, we get a candidate task, using + // element == nil as a sentinel for breaking our parent + // loop. + ace.ValidationLock.Lock() + element := ace.Validations.Front() + if element != nil { + ace.Validations.Remove(element) + task = element.Value.(*ChallengeQueueEntry) + if !task.RetryAfter.IsZero() && now.Before(task.RetryAfter) { + // We cannot work on this element yet; remove it to + // the back of the queue. This allows us to potentially + // select the next item in the next iteration. + ace.Validations.PushBack(task) + } + + if firstIdentifier != "" && task.Identifier == firstIdentifier { + // We found and rejected this element before; exit the + // loop by "claiming" we didn't find any work. + element = nil + } else if firstIdentifier == "" { + firstIdentifier = task.Identifier + } + } + ace.ValidationLock.Unlock() + if element == nil { + // There was no more work to do to fill up the queue; exit + // this loop. + break + } + if now.Before(task.RetryAfter) { + // Here, while we found an element, we didn't want to + // completely exit the loop (perhaps it was our first time + // finding a work order), so retry without modifying + // firstIdentifier. + continue + } + + config, err := state.getConfigWithUpdate(runnerSC) + if err != nil { + return fmt.Errorf("failed fetching ACME configuration: %w", err) + } + + // Since this work item was valid, we won't expect to see it in + // the validation queue again until it is executed. Here, we + // want to avoid infinite looping above (if we removed the one + // valid item and the remainder are all not immediately + // actionable). At the worst, we'll spend a little more time + // looping through the queue until we hit a repeat. + firstIdentifier = "" + + // If we are no longer the active node, break out + if b.System().ReplicationState().HasState(consts.ReplicationDRSecondary | consts.ReplicationPerformanceStandby) { + break + } + + // Here, we got a piece of work that is ready to check; create a + // channel and a new go routine and run it. Note that this still + // could have a RetryAfter date we're not aware of (e.g., if the + // cluster restarted as we do not read the entries there). + channel := make(chan bool, 1) + go ace.VerifyChallenge(runnerSC, task.Identifier, task.NumRetries, channel, config) + finishedWorkersChannels = append(finishedWorkersChannels, channel) + startedWork = true + } + + // If we have no more capacity for work, we should pause a little to + // let the system catch up. Additionally, if we only had + // non-actionable work items, we should pause until some time has + // elapsed: not too much that we potentially starve any new incoming + // items from validation, but not too short that we cause a busy loop. + if len(finishedWorkersChannels) == ace.NumWorkers || !startedWork { + time.Sleep(100 * time.Millisecond) + } + + // Lastly, if we have more work to do, re-trigger ourselves. + ace.ValidationLock.Lock() + if ace.Validations.Front() != nil { + select { + case ace.NewValidation <- "retry": + default: + } + } + ace.ValidationLock.Unlock() + } + + return fmt.Errorf("unexpectedly exited from ACMEChallengeEngine._run()") +} + +func (ace *ACMEChallengeEngine) AcceptChallenge(sc *storageContext, account string, authz *ACMEAuthorization, challenge *ACMEChallenge, thumbprint string) error { + name := authz.Id + "-" + string(challenge.Type) + path := acmeValidationPrefix + name + + entry, err := sc.Storage.Get(sc.Context, path) + if err == nil && entry != nil { + // Challenge already in the queue; exit without re-adding it. + return nil + } + + if authz.Status != ACMEAuthorizationPending { + return fmt.Errorf("%w: cannot accept already validated authorization %v (%v)", ErrMalformed, authz.Id, authz.Status) + } + + for _, otherChallenge := range authz.Challenges { + // We assume within an authorization we won't have multiple challenges of the same challenge type + // and we want to limit a single challenge being in a processing state to avoid race conditions + // failing one challenge and passing another. + if otherChallenge.Type != challenge.Type && otherChallenge.Status != ACMEChallengePending { + return fmt.Errorf("%w: only a single challenge within an authorization can be accepted (%v) in status %v", ErrMalformed, otherChallenge.Type, otherChallenge.Status) + } + + // The requested challenge can ping us to wake us up, so allow pending and currently processing statuses + if otherChallenge.Status != ACMEChallengePending && otherChallenge.Status != ACMEChallengeProcessing { + return fmt.Errorf("%w: challenge is in invalid state (%v) in authorization %v", ErrMalformed, challenge.Status, authz.Id) + } + } + + token := challenge.ChallengeFields["token"].(string) + + cv := &ChallengeValidation{ + Account: account, + Authorization: authz.Id, + ChallengeType: challenge.Type, + Token: token, + Thumbprint: thumbprint, + Initiated: time.Now(), + } + + json, err := logical.StorageEntryJSON(path, &cv) + if err != nil { + return fmt.Errorf("error creating challenge validation queue entry: %w", err) + } + + if err := sc.Storage.Put(sc.Context, json); err != nil { + return fmt.Errorf("error writing challenge validation entry: %w", err) + } + + if challenge.Status == ACMEChallengePending { + challenge.Status = ACMEChallengeProcessing + + authzPath := getAuthorizationPath(account, authz.Id) + if err := saveAuthorizationAtPath(sc, authzPath, authz); err != nil { + return fmt.Errorf("error saving updated authorization %v: %w", authz.Id, err) + } + } + + ace.ValidationLock.Lock() + defer ace.ValidationLock.Unlock() + ace.Validations.PushBack(&ChallengeQueueEntry{ + Identifier: name, + }) + + select { + case ace.NewValidation <- name: + default: + } + + return nil +} + +func (ace *ACMEChallengeEngine) VerifyChallenge(runnerSc *storageContext, id string, validationQueueRetries int, finished chan bool, config *acmeConfigEntry) { + sc, cancel := runnerSc.WithFreshTimeout(MaxChallengeTimeout) + defer cancel() + runnerSc.Backend.Logger().Debug("Starting verification of challenge", "id", id) + + if retry, retryAfter, err := ace._verifyChallenge(sc, id, config); err != nil { + // Because verification of this challenge failed, we need to retry + // it in the future. Log the error and re-add the item to the queue + // to try again later. + sc.Backend.Logger().Error(fmt.Sprintf("ACME validation failed for %v: %v", id, err)) + + if retry { + validationQueueRetries++ + + // The retry logic within _verifyChallenge is dependent on being able to read and decode + // the ACME challenge entries. If we encounter such failures we would retry forever, so + // we have a secondary check here to see if we are consistently looping within the validation + // queue that is larger than the normal retry attempts we would allow. + if validationQueueRetries > MaxRetryAttempts*2 { + sc.Backend.Logger().Warn("reached max error attempts within challenge queue: %v, giving up", id) + _, _, err = ace._verifyChallengeCleanup(sc, nil, id) + if err != nil { + sc.Backend.Logger().Warn("Failed cleaning up challenge entry: %v", err) + } + finished <- true + return + } + + ace.ValidationLock.Lock() + defer ace.ValidationLock.Unlock() + ace.Validations.PushBack(&ChallengeQueueEntry{ + Identifier: id, + RetryAfter: retryAfter, + NumRetries: validationQueueRetries, + }) + + // Let the validator know there's a pending challenge. + select { + case ace.NewValidation <- id: + default: + } + } + + // We're the only producer on this channel and it has a buffer size + // of one element, so it is safe to directly write here. + finished <- true + return + } + + // We're the only producer on this channel and it has a buffer size of one + // element, so it is safe to directly write here. + finished <- false +} + +func (ace *ACMEChallengeEngine) _verifyChallenge(sc *storageContext, id string, config *acmeConfigEntry) (bool, time.Time, error) { + now := time.Now() + backoffTime := now.Add(1 * time.Second) + path := acmeValidationPrefix + id + challengeEntry, err := sc.Storage.Get(sc.Context, path) + if err != nil { + return true, backoffTime, fmt.Errorf("error loading challenge %v: %w", id, err) + } + + if challengeEntry == nil { + // Something must've successfully cleaned up our storage entry from + // under us. Assume we don't need to rerun, else the client will + // trigger us to re-run. + return ace._verifyChallengeCleanup(sc, nil, id) + } + + var cv *ChallengeValidation + if err := challengeEntry.DecodeJSON(&cv); err != nil { + return true, backoffTime, fmt.Errorf("error decoding challenge %v: %w", id, err) + } + + if now.Before(cv.RetryAfter) { + return true, cv.RetryAfter, fmt.Errorf("retrying challenge %v too soon", id) + } + + authzPath := getAuthorizationPath(cv.Account, cv.Authorization) + authz, err := loadAuthorizationAtPath(sc, authzPath) + if err != nil { + return true, backoffTime, fmt.Errorf("error loading authorization %v/%v for challenge %v: %w", cv.Account, cv.Authorization, id, err) + } + + if authz.Status != ACMEAuthorizationPending { + // Something must've finished up this challenge for us. Assume we + // don't need to rerun and exit instead. + err = nil + return ace._verifyChallengeCleanup(sc, err, id) + } + + var challenge *ACMEChallenge + for _, authzChallenge := range authz.Challenges { + if authzChallenge.Type == cv.ChallengeType { + challenge = authzChallenge + break + } + } + + if challenge == nil { + err = fmt.Errorf("no challenge of type %v in authorization %v/%v for challenge %v", cv.ChallengeType, cv.Account, cv.Authorization, id) + return ace._verifyChallengeCleanup(sc, err, id) + } + + if challenge.Status != ACMEChallengePending && challenge.Status != ACMEChallengeProcessing { + err = fmt.Errorf("challenge is in invalid state %v in authorization %v/%v for challenge %v", challenge.Status, cv.Account, cv.Authorization, id) + return ace._verifyChallengeCleanup(sc, err, id) + } + + var valid bool + switch challenge.Type { + case ACMEHTTPChallenge: + if authz.Identifier.Type != ACMEDNSIdentifier && authz.Identifier.Type != ACMEIPIdentifier { + err = fmt.Errorf("unsupported identifier type for authorization %v/%v in challenge %v: %v", cv.Account, cv.Authorization, id, authz.Identifier.Type) + return ace._verifyChallengeCleanup(sc, err, id) + } + + if authz.Wildcard { + err = fmt.Errorf("unable to validate wildcard authorization %v/%v in challenge %v via http-01 challenge", cv.Account, cv.Authorization, id) + return ace._verifyChallengeCleanup(sc, err, id) + } + + valid, err = ValidateHTTP01Challenge(authz.Identifier.Value, cv.Token, cv.Thumbprint, config) + if err != nil { + err = fmt.Errorf("%w: error validating http-01 challenge %v: %v; %v", ErrIncorrectResponse, id, err, ChallengeAttemptFailedMsg) + return ace._verifyChallengeRetry(sc, cv, authzPath, authz, challenge, err, id) + } + case ACMEDNSChallenge: + if authz.Identifier.Type != ACMEDNSIdentifier { + err = fmt.Errorf("unsupported identifier type for authorization %v/%v in challenge %v: %v", cv.Account, cv.Authorization, id, authz.Identifier.Type) + return ace._verifyChallengeCleanup(sc, err, id) + } + + valid, err = ValidateDNS01Challenge(authz.Identifier.Value, cv.Token, cv.Thumbprint, config) + if err != nil { + err = fmt.Errorf("%w: error validating dns-01 challenge %v: %v; %v", ErrIncorrectResponse, id, err, ChallengeAttemptFailedMsg) + return ace._verifyChallengeRetry(sc, cv, authzPath, authz, challenge, err, id) + } + case ACMEALPNChallenge: + if authz.Identifier.Type != ACMEDNSIdentifier { + err = fmt.Errorf("unsupported identifier type for authorization %v/%v in challenge %v: %v", cv.Account, cv.Authorization, id, authz.Identifier.Type) + return ace._verifyChallengeCleanup(sc, err, id) + } + + if authz.Wildcard { + err = fmt.Errorf("unable to validate wildcard authorization %v/%v in challenge %v via tls-alpn-01 challenge", cv.Account, cv.Authorization, id) + return ace._verifyChallengeCleanup(sc, err, id) + } + + valid, err = ValidateTLSALPN01Challenge(authz.Identifier.Value, cv.Token, cv.Thumbprint, config) + if err != nil { + err = fmt.Errorf("%w: error validating tls-alpn-01 challenge %v: %s", ErrIncorrectResponse, id, err.Error()) + return ace._verifyChallengeRetry(sc, cv, authzPath, authz, challenge, err, id) + } + default: + err = fmt.Errorf("unsupported ACME challenge type %v for challenge %v", cv.ChallengeType, id) + return ace._verifyChallengeCleanup(sc, err, id) + } + + if !valid { + err = fmt.Errorf("%w: challenge failed with no additional information", ErrIncorrectResponse) + return ace._verifyChallengeRetry(sc, cv, authzPath, authz, challenge, err, id) + } + + // If we got here, the challenge verification was successful. Update + // the authorization appropriately. + expires := now.Add(15 * 24 * time.Hour) + challenge.Status = ACMEChallengeValid + challenge.Validated = now.Format(time.RFC3339) + challenge.Error = nil + authz.Status = ACMEAuthorizationValid + authz.Expires = expires.Format(time.RFC3339) + + if err := saveAuthorizationAtPath(sc, authzPath, authz); err != nil { + err = fmt.Errorf("error saving updated (validated) authorization %v/%v for challenge %v: %w", cv.Account, cv.Authorization, id, err) + return ace._verifyChallengeRetry(sc, cv, authzPath, authz, challenge, err, id) + } + + return ace._verifyChallengeCleanup(sc, nil, id) +} + +func (ace *ACMEChallengeEngine) _verifyChallengeRetry(sc *storageContext, cv *ChallengeValidation, authzPath string, auth *ACMEAuthorization, challenge *ACMEChallenge, verificationErr error, id string) (bool, time.Time, error) { + now := time.Now() + path := acmeValidationPrefix + id + + if err := updateChallengeStatus(sc, cv, authzPath, auth, challenge, verificationErr); err != nil { + return true, now, err + } + + if cv.RetryCount > MaxRetryAttempts { + err := fmt.Errorf("reached max error attempts for challenge %v: %w", id, verificationErr) + return ace._verifyChallengeCleanup(sc, err, id) + } + + if cv.FirstValidation.IsZero() { + cv.FirstValidation = now + } + cv.RetryCount += 1 + cv.LastRetry = now + cv.RetryAfter = now.Add(time.Duration(cv.RetryCount*5) * time.Second) + + json, jsonErr := logical.StorageEntryJSON(path, cv) + if jsonErr != nil { + return true, now, fmt.Errorf("error persisting updated challenge validation queue entry (error prior to retry, if any: %v): %w", verificationErr, jsonErr) + } + + if putErr := sc.Storage.Put(sc.Context, json); putErr != nil { + return true, now, fmt.Errorf("error writing updated challenge validation entry (error prior to retry, if any: %v): %w", verificationErr, putErr) + } + + if verificationErr != nil { + verificationErr = fmt.Errorf("retrying validation: %w", verificationErr) + } + + return true, cv.RetryAfter, verificationErr +} + +func updateChallengeStatus(sc *storageContext, cv *ChallengeValidation, authzPath string, auth *ACMEAuthorization, challenge *ACMEChallenge, verificationErr error) error { + if verificationErr != nil { + challengeError := TranslateErrorToErrorResponse(verificationErr) + challenge.Error = challengeError.MarshalForStorage() + } + + if cv.RetryCount > MaxRetryAttempts { + challenge.Status = ACMEChallengeInvalid + auth.Status = ACMEAuthorizationInvalid + } + + if err := saveAuthorizationAtPath(sc, authzPath, auth); err != nil { + return fmt.Errorf("error persisting authorization/challenge update: %w", err) + } + return nil +} + +func (ace *ACMEChallengeEngine) _verifyChallengeCleanup(sc *storageContext, err error, id string) (bool, time.Time, error) { + now := time.Now() + + // Remove our ChallengeValidation entry only. + if deleteErr := sc.Storage.Delete(sc.Context, acmeValidationPrefix+id); deleteErr != nil { + return true, now.Add(1 * time.Second), fmt.Errorf("error deleting challenge %v (error prior to cleanup, if any: %v): %w", id, err, deleteErr) + } + + if err != nil { + err = fmt.Errorf("removing challenge validation attempt and not retrying %v; previous error: %w", id, err) + } + + return false, now, err +} diff --git a/builtin/logical/pki/acme_challenges.go b/builtin/logical/pki/acme_challenges.go new file mode 100644 index 0000000..54441e0 --- /dev/null +++ b/builtin/logical/pki/acme_challenges.go @@ -0,0 +1,502 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "bytes" + "context" + "crypto/sha256" + "crypto/subtle" + "crypto/tls" + "crypto/x509" + "encoding/asn1" + "encoding/base64" + "fmt" + "io" + "net" + "net/http" + "strings" + "time" +) + +const ( + DNSChallengePrefix = "_acme-challenge." + ALPNProtocol = "acme-tls/1" +) + +// While this should be a constant, there's no way to do a low-level test of +// ValidateTLSALPN01Challenge without spinning up a complicated Docker +// instance to build a custom responder. Because we already have a local +// toolchain, it is far easier to drive this through Go tests with a custom +// (high) port, rather than requiring permission to bind to port 443 (root-run +// tests are even worse). +var ALPNPort = "443" + +// OID of the acmeIdentifier X.509 Certificate Extension. +var OIDACMEIdentifier = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 31} + +// ValidateKeyAuthorization validates that the given keyAuthz from a challenge +// matches our expectation, returning (true, nil) if so, or (false, err) if +// not. +func ValidateKeyAuthorization(keyAuthz string, token string, thumbprint string) (bool, error) { + parts := strings.Split(keyAuthz, ".") + if len(parts) != 2 { + return false, fmt.Errorf("invalid authorization: got %v parts, expected 2", len(parts)) + } + + tokenPart := parts[0] + thumbprintPart := parts[1] + + if token != tokenPart || thumbprint != thumbprintPart { + return false, fmt.Errorf("key authorization was invalid") + } + + return true, nil +} + +// ValidateSHA256KeyAuthorization validates that the given keyAuthz from a +// challenge matches our expectation, returning (true, nil) if so, or +// (false, err) if not. +// +// This is for use with DNS challenges, which require base64 encoding. +func ValidateSHA256KeyAuthorization(keyAuthz string, token string, thumbprint string) (bool, error) { + authzContents := token + "." + thumbprint + checksum := sha256.Sum256([]byte(authzContents)) + expectedAuthz := base64.RawURLEncoding.EncodeToString(checksum[:]) + + if keyAuthz != expectedAuthz { + return false, fmt.Errorf("sha256 key authorization was invalid") + } + + return true, nil +} + +// ValidateRawSHA256KeyAuthorization validates that the given keyAuthz from a +// challenge matches our expectation, returning (true, nil) if so, or +// (false, err) if not. +// +// This is for use with TLS challenges, which require the raw hash output. +func ValidateRawSHA256KeyAuthorization(keyAuthz []byte, token string, thumbprint string) (bool, error) { + authzContents := token + "." + thumbprint + expectedAuthz := sha256.Sum256([]byte(authzContents)) + + if len(keyAuthz) != len(expectedAuthz) || subtle.ConstantTimeCompare(expectedAuthz[:], keyAuthz) != 1 { + return false, fmt.Errorf("sha256 key authorization was invalid") + } + + return true, nil +} + +func buildResolver(config *acmeConfigEntry) (*net.Resolver, error) { + if len(config.DNSResolver) == 0 { + return net.DefaultResolver, nil + } + + return &net.Resolver{ + PreferGo: true, + StrictErrors: false, + Dial: func(ctx context.Context, network, address string) (net.Conn, error) { + d := net.Dialer{ + Timeout: 10 * time.Second, + } + return d.DialContext(ctx, network, config.DNSResolver) + }, + }, nil +} + +func buildDialerConfig(config *acmeConfigEntry) (*net.Dialer, error) { + resolver, err := buildResolver(config) + if err != nil { + return nil, fmt.Errorf("failed to build resolver: %w", err) + } + + return &net.Dialer{ + Timeout: 10 * time.Second, + KeepAlive: -1 * time.Second, + Resolver: resolver, + }, nil +} + +// Validates a given ACME http-01 challenge against the specified domain, +// per RFC 8555. +// +// We attempt to be defensive here against timeouts, extra redirects, &c. +func ValidateHTTP01Challenge(domain string, token string, thumbprint string, config *acmeConfigEntry) (bool, error) { + path := "http://" + domain + "/.well-known/acme-challenge/" + token + dialer, err := buildDialerConfig(config) + if err != nil { + return false, fmt.Errorf("failed to build dialer: %w", err) + } + + transport := &http.Transport{ + // Only a single request is sent to this server as we do not do any + // batching of validation attempts. There is no need to do an HTTP + // KeepAlive as a result. + DisableKeepAlives: true, + MaxIdleConns: 1, + MaxIdleConnsPerHost: 1, + MaxConnsPerHost: 1, + IdleConnTimeout: 1 * time.Second, + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + + // We'd rather timeout and re-attempt validation later than hang + // too many validators waiting for slow hosts. + DialContext: dialer.DialContext, + ResponseHeaderTimeout: 10 * time.Second, + } + + maxRedirects := 10 + urlLength := 2000 + + client := &http.Client{ + Transport: transport, + CheckRedirect: func(req *http.Request, via []*http.Request) error { + if len(via)+1 >= maxRedirects { + return fmt.Errorf("http-01: too many redirects: %v", len(via)+1) + } + + reqUrlLen := len(req.URL.String()) + if reqUrlLen > urlLength { + return fmt.Errorf("http-01: redirect url length too long: %v", reqUrlLen) + } + + return nil + }, + } + + resp, err := client.Get(path) + if err != nil { + return false, fmt.Errorf("http-01: failed to fetch path %v: %w", path, err) + } + + // We provision a buffer which allows for a variable size challenge, some + // whitespace, and a detection gap for too long of a message. + minExpected := len(token) + 1 + len(thumbprint) + maxExpected := 512 + + defer resp.Body.Close() + + // Attempt to read the body, but don't do so infinitely. + body, err := io.ReadAll(io.LimitReader(resp.Body, int64(maxExpected+1))) + if err != nil { + return false, fmt.Errorf("http-01: unexpected error while reading body: %w", err) + } + + if len(body) > maxExpected { + return false, fmt.Errorf("http-01: response too large: received %v > %v bytes", len(body), maxExpected) + } + + if len(body) < minExpected { + return false, fmt.Errorf("http-01: response too small: received %v < %v bytes", len(body), minExpected) + } + + // Per RFC 8555 Section 8.3. HTTP Challenge: + // + // > The server SHOULD ignore whitespace characters at the end of the body. + keyAuthz := string(body) + keyAuthz = strings.TrimSpace(keyAuthz) + + // If we got here, we got no non-EOF error while reading. Try to validate + // the token because we're bounded by a reasonable amount of length. + return ValidateKeyAuthorization(keyAuthz, token, thumbprint) +} + +func ValidateDNS01Challenge(domain string, token string, thumbprint string, config *acmeConfigEntry) (bool, error) { + // Here, domain is the value from the post-wildcard-processed identifier. + // Per RFC 8555, no difference in validation occurs if a wildcard entry + // is requested or if a non-wildcard entry is requested. + // + // XXX: In this case the DNS server is operator controlled and is assumed + // to be less malicious so the default resolver is used. In the future, + // we'll want to use net.Resolver for two reasons: + // + // 1. To control the actual resolver via ACME configuration, + // 2. To use a context to set stricter timeout limits. + resolver, err := buildResolver(config) + if err != nil { + return false, fmt.Errorf("failed to build resolver: %w", err) + } + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + name := DNSChallengePrefix + domain + results, err := resolver.LookupTXT(ctx, name) + if err != nil { + return false, fmt.Errorf("dns-01: failed to lookup TXT records for domain (%v) via resolver %v: %w", name, config.DNSResolver, err) + } + + for _, keyAuthz := range results { + ok, _ := ValidateSHA256KeyAuthorization(keyAuthz, token, thumbprint) + if ok { + return true, nil + } + } + + return false, fmt.Errorf("dns-01: challenge failed against %v records", len(results)) +} + +func ValidateTLSALPN01Challenge(domain string, token string, thumbprint string, config *acmeConfigEntry) (bool, error) { + // This RFC is defined in RFC 8737 Automated Certificate Management + // Environment (ACME) TLS Application‑Layer Protocol Negotiation + // (ALPN) Challenge Extension. + // + // This is conceptually similar to ValidateHTTP01Challenge, but + // uses a TLS connection on port 443 with the specified ALPN + // protocol. + + cfg := &tls.Config{ + // Per RFC 8737 Section 3. TLS with Application-Layer Protocol + // Negotiation (TLS ALPN) Challenge, the name of the negotiated + // protocol is "acme-tls/1". + NextProtos: []string{ALPNProtocol}, + + // Per RFC 8737 Section 3. TLS with Application-Layer Protocol + // Negotiation (TLS ALPN) Challenge: + // + // > ... and an SNI extension containing only the domain name + // > being validated during the TLS handshake. + // + // According to the Go docs, setting this option (even though + // InsecureSkipVerify=true is also specified), allows us to + // set the SNI extension to this value. + ServerName: domain, + + VerifyConnection: func(connState tls.ConnectionState) error { + // We initiated a fresh connection with no session tickets; + // even if we did have a session ticket, we do not wish to + // use it. Verify that the server has not inadvertently + // reused connections between validation attempts or something. + if connState.DidResume { + return fmt.Errorf("server under test incorrectly reported that handshake was resumed when no session cache was provided; refusing to continue") + } + + // Per RFC 8737 Section 3. TLS with Application-Layer Protocol + // Negotiation (TLS ALPN) Challenge: + // + // > The ACME server verifies that during the TLS handshake the + // > application-layer protocol "acme-tls/1" was successfully + // > negotiated (and that the ALPN extension contained only the + // > value "acme-tls/1"). + if connState.NegotiatedProtocol != ALPNProtocol { + return fmt.Errorf("server under test negotiated unexpected ALPN protocol %v", connState.NegotiatedProtocol) + } + + // Per RFC 8737 Section 3. TLS with Application-Layer Protocol + // Negotiation (TLS ALPN) Challenge: + // + // > and that the certificate returned + // + // Because this certificate MUST be self-signed (per earlier + // statement in RFC 8737 Section 3), there is no point in sending + // more than one certificate, and so we will err early here if + // we got more than one. + if len(connState.PeerCertificates) > 1 { + return fmt.Errorf("server under test returned multiple (%v) certificates when we expected only one", len(connState.PeerCertificates)) + } + cert := connState.PeerCertificates[0] + + // Per RFC 8737 Section 3. TLS with Application-Layer Protocol + // Negotiation (TLS ALPN) Challenge: + // + // > The client prepares for validation by constructing a + // > self-signed certificate that MUST contain an acmeIdentifier + // > extension and a subjectAlternativeName extension [RFC5280]. + // + // Verify that this is a self-signed certificate that isn't signed + // by another certificate (i.e., with the same key material but + // different issuer). + // NOTE: Do not use cert.CheckSignatureFrom(cert) as we need to bypass the + // checks for the parent certificate having the IsCA basic constraint set. + err := cert.CheckSignature(cert.SignatureAlgorithm, cert.RawTBSCertificate, cert.Signature) + if err != nil { + return fmt.Errorf("server under test returned a non-self-signed certificate: %v", err) + } + + if !bytes.Equal(cert.RawSubject, cert.RawIssuer) { + return fmt.Errorf("server under test returned a non-self-signed certificate: invalid subject (%v) <-> issuer (%v) match", cert.Subject.String(), cert.Issuer.String()) + } + + // Per RFC 8737 Section 3. TLS with Application-Layer Protocol + // Negotiation (TLS ALPN) Challenge: + // + // > The subjectAlternativeName extension MUST contain a single + // > dNSName entry where the value is the domain name being + // > validated. + // + // TODO: this does not validate that there are not other SANs + // with unknown (to Go) OIDs. + if len(cert.DNSNames) != 1 || len(cert.EmailAddresses) > 0 || len(cert.IPAddresses) > 0 || len(cert.URIs) > 0 { + return fmt.Errorf("server under test returned a certificate with incorrect SANs") + } + + // Per RFC 8737 Section 3. TLS with Application-Layer Protocol + // Negotiation (TLS ALPN) Challenge: + // + // > The comparison of dNSNames MUST be case insensitive + // > [RFC4343]. Note that as ACME doesn't support Unicode + // > identifiers, all dNSNames MUST be encoded using the rules + // > of [RFC3492]. + if !strings.EqualFold(cert.DNSNames[0], domain) { + return fmt.Errorf("server under test returned a certificate with unexpected identifier: %v", cert.DNSNames[0]) + } + + // Per above, verify that the acmeIdentifier extension is present + // exactly once and has the correct value. + var foundACMEId bool + for _, ext := range cert.Extensions { + if !ext.Id.Equal(OIDACMEIdentifier) { + continue + } + + // There must be only a single ACME extension. + if foundACMEId { + return fmt.Errorf("server under test returned a certificate with multiple acmeIdentifier extensions") + } + foundACMEId = true + + // Per RFC 8737 Section 3. TLS with Application-Layer Protocol + // Negotiation (TLS ALPN) Challenge: + // + // > a critical acmeIdentifier extension + if !ext.Critical { + return fmt.Errorf("server under test returned a certificate with an acmeIdentifier extension marked non-Critical") + } + + var keyAuthz []byte + remainder, err := asn1.Unmarshal(ext.Value, &keyAuthz) + if err != nil { + return fmt.Errorf("server under test returned a certificate with invalid acmeIdentifier extension value: %w", err) + } + if len(remainder) > 0 { + return fmt.Errorf("server under test returned a certificate with invalid acmeIdentifier extension value with additional trailing data") + } + + ok, err := ValidateRawSHA256KeyAuthorization(keyAuthz, token, thumbprint) + if !ok || err != nil { + return fmt.Errorf("server under test returned a certificate with an invalid key authorization (%w)", err) + } + } + + // Per RFC 8737 Section 3. TLS with Application-Layer Protocol + // Negotiation (TLS ALPN) Challenge: + // + // > The ACME server verifies that ... the certificate returned + // > contains: ... a critical acmeIdentifier extension containing + // > the expected SHA-256 digest computed in step 1. + if !foundACMEId { + return fmt.Errorf("server under test returned a certificate without the required acmeIdentifier extension") + } + + // Remove the handled critical extension and validate that we + // have no additional critical extensions left unhandled. + var index int = -1 + for oidIndex, oid := range cert.UnhandledCriticalExtensions { + if oid.Equal(OIDACMEIdentifier) { + index = oidIndex + break + } + } + if index != -1 { + // Unlike the foundACMEId case, this is not a failure; if Go + // updates to "understand" this critical extension, we do not + // wish to fail. + cert.UnhandledCriticalExtensions = append(cert.UnhandledCriticalExtensions[0:index], cert.UnhandledCriticalExtensions[index+1:]...) + } + if len(cert.UnhandledCriticalExtensions) > 0 { + return fmt.Errorf("server under test returned a certificate with additional unknown critical extensions (%v)", cert.UnhandledCriticalExtensions) + } + + // All good! + return nil + }, + + // We never want to resume a connection; do not provide session + // cache storage. + ClientSessionCache: nil, + + // Do not trust any system trusted certificates; we're going to be + // manually validating the chain, so specifying a non-empty pool + // here could only cause additional, unnecessary work. + RootCAs: x509.NewCertPool(), + + // Do not bother validating the client's chain; we know it should be + // self-signed. This also disables hostname verification, but we do + // this verification as part of VerifyConnection(...) ourselves. + // + // Per Go docs, this option is only safe in conjunction with + // VerifyConnection which we define above. + InsecureSkipVerify: true, + + // RFC 8737 Section 4. acme-tls/1 Protocol Definition: + // + // > ACME servers that implement "acme-tls/1" MUST only negotiate + // > TLS 1.2 [RFC5246] or higher when connecting to clients for + // > validation. + MinVersion: tls.VersionTLS12, + + // While RFC 8737 does not place restrictions around allowed cipher + // suites, we wish to restrict ourselves to secure defaults. Specify + // the Intermediate guideline from Mozilla's TLS config generator to + // disable obviously weak ciphers. + // + // See also: https://ssl-config.mozilla.org/#server=go&version=1.14.4&config=intermediate&guideline=5.7 + CipherSuites: []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, + tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + }, + } + + // Build a dialer using our custom DNS resolver, to ensure domains get + // resolved according to configuration. + dialer, err := buildDialerConfig(config) + if err != nil { + return false, fmt.Errorf("failed to build dialer: %w", err) + } + + // Per RFC 8737 Section 3. TLS with Application-Layer Protocol + // Negotiation (TLS ALPN) Challenge: + // + // > 2. The ACME server resolves the domain name being validated and + // > chooses one of the IP addresses returned for validation (the + // > server MAY validate against multiple addresses if more than + // > one is returned). + // > 3. The ACME server initiates a TLS connection to the chosen IP + // > address. This connection MUST use TCP port 443. + address := fmt.Sprintf("%v:"+ALPNPort, domain) + conn, err := dialer.Dial("tcp", address) + if err != nil { + return false, fmt.Errorf("tls-alpn-01: failed to dial host: %w", err) + } + + // Initiate the connection to the remote peer. + client := tls.Client(conn, cfg) + + // We intentionally swallow this error as it isn't useful to the + // underlying protocol we perform here. Notably, per RFC 8737 + // Section 4. acme-tls/1 Protocol Definition: + // + // > Once the handshake is completed, the client MUST NOT exchange + // > any further data with the server and MUST immediately close the + // > connection. ... Because of this, an ACME server MAY choose to + // > withhold authorization if either the certificate signature is + // > invalid or the handshake doesn't fully complete. + defer client.Close() + + // We wish to put time bounds on the total time the handshake can + // stall for, so build a connection context here. + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + // See note above about why we can allow Handshake to complete + // successfully. + if err := client.HandshakeContext(ctx); err != nil { + return false, fmt.Errorf("tls-alpn-01: failed to perform handshake: %w", err) + } + return true, nil +} diff --git a/builtin/logical/pki/acme_challenges_test.go b/builtin/logical/pki/acme_challenges_test.go new file mode 100644 index 0000000..ed5dfa4 --- /dev/null +++ b/builtin/logical/pki/acme_challenges_test.go @@ -0,0 +1,759 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/sha256" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/base64" + "fmt" + "math/big" + "net/http" + "net/http/httptest" + "strconv" + "strings" + "testing" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/builtin/logical/pki/dnstest" + + "github.com/stretchr/testify/require" +) + +type keyAuthorizationTestCase struct { + keyAuthz string + token string + thumbprint string + shouldFail bool +} + +var keyAuthorizationTestCases = []keyAuthorizationTestCase{ + { + // Entirely empty + "", + "non-empty-token", + "non-empty-thumbprint", + true, + }, + { + // Both empty + ".", + "non-empty-token", + "non-empty-thumbprint", + true, + }, + { + // Not equal + "non-.non-", + "non-empty-token", + "non-empty-thumbprint", + true, + }, + { + // Empty thumbprint + "non-.", + "non-empty-token", + "non-empty-thumbprint", + true, + }, + { + // Empty token + ".non-", + "non-empty-token", + "non-empty-thumbprint", + true, + }, + { + // Wrong order + "non-empty-thumbprint.non-empty-token", + "non-empty-token", + "non-empty-thumbprint", + true, + }, + { + // Too many pieces + "one.two.three", + "non-empty-token", + "non-empty-thumbprint", + true, + }, + { + // Valid + "non-empty-token.non-empty-thumbprint", + "non-empty-token", + "non-empty-thumbprint", + false, + }, +} + +func TestAcmeValidateKeyAuthorization(t *testing.T) { + t.Parallel() + + for index, tc := range keyAuthorizationTestCases { + t.Run("subtest-"+strconv.Itoa(index), func(st *testing.T) { + isValid, err := ValidateKeyAuthorization(tc.keyAuthz, tc.token, tc.thumbprint) + if !isValid && err == nil { + st.Fatalf("[%d] expected failure to give reason via err (%v / %v)", index, isValid, err) + } + + expectedValid := !tc.shouldFail + if expectedValid != isValid { + st.Fatalf("[%d] got ret=%v, expected ret=%v (shouldFail=%v)", index, isValid, expectedValid, tc.shouldFail) + } + }) + } +} + +func TestAcmeValidateHTTP01Challenge(t *testing.T) { + t.Parallel() + + for index, tc := range keyAuthorizationTestCases { + validFunc := func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte(tc.keyAuthz)) + } + withPadding := func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte(" " + tc.keyAuthz + " ")) + } + withRedirect := func(w http.ResponseWriter, r *http.Request) { + if strings.Contains(r.URL.Path, "/.well-known/") { + http.Redirect(w, r, "/my-http-01-challenge-response", 301) + return + } + + w.Write([]byte(tc.keyAuthz)) + } + withSleep := func(w http.ResponseWriter, r *http.Request) { + // Long enough to ensure any excessively short timeouts are hit, + // not long enough to trigger a failure (hopefully). + time.Sleep(5 * time.Second) + w.Write([]byte(tc.keyAuthz)) + } + + validHandlers := []http.HandlerFunc{ + http.HandlerFunc(validFunc), http.HandlerFunc(withPadding), + http.HandlerFunc(withRedirect), http.HandlerFunc(withSleep), + } + + for handlerIndex, handler := range validHandlers { + func() { + ts := httptest.NewServer(handler) + defer ts.Close() + + host := ts.URL[7:] + isValid, err := ValidateHTTP01Challenge(host, tc.token, tc.thumbprint, &acmeConfigEntry{}) + if !isValid && err == nil { + t.Fatalf("[tc=%d/handler=%d] expected failure to give reason via err (%v / %v)", index, handlerIndex, isValid, err) + } + + expectedValid := !tc.shouldFail + if expectedValid != isValid { + t.Fatalf("[tc=%d/handler=%d] got ret=%v (err=%v), expected ret=%v (shouldFail=%v)", index, handlerIndex, isValid, err, expectedValid, tc.shouldFail) + } + }() + } + } + + // Negative test cases for various HTTP-specific scenarios. + redirectLoop := func(w http.ResponseWriter, r *http.Request) { + http.Redirect(w, r, "/my-http-01-challenge-response", 301) + } + publicRedirect := func(w http.ResponseWriter, r *http.Request) { + http.Redirect(w, r, "http://hashicorp.com/", 301) + } + noData := func(w http.ResponseWriter, r *http.Request) {} + noContent := func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNoContent) + } + notFound := func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + } + simulateHang := func(w http.ResponseWriter, r *http.Request) { + time.Sleep(30 * time.Second) + w.Write([]byte("my-token.my-thumbprint")) + } + tooLarge := func(w http.ResponseWriter, r *http.Request) { + for i := 0; i < 512; i++ { + w.Write([]byte("my-token.my-thumbprint\n")) + } + } + + validHandlers := []http.HandlerFunc{ + http.HandlerFunc(redirectLoop), http.HandlerFunc(publicRedirect), + http.HandlerFunc(noData), http.HandlerFunc(noContent), + http.HandlerFunc(notFound), http.HandlerFunc(simulateHang), + http.HandlerFunc(tooLarge), + } + for handlerIndex, handler := range validHandlers { + func() { + ts := httptest.NewServer(handler) + defer ts.Close() + + host := ts.URL[7:] + isValid, err := ValidateHTTP01Challenge(host, "my-token", "my-thumbprint", &acmeConfigEntry{}) + if isValid || err == nil { + t.Fatalf("[handler=%d] expected failure validating challenge (%v / %v)", handlerIndex, isValid, err) + } + }() + } +} + +func TestAcmeValidateDNS01Challenge(t *testing.T) { + t.Parallel() + + host := "dadgarcorp.com" + resolver := dnstest.SetupResolver(t, host) + defer resolver.Cleanup() + + t.Logf("DNS Server Address: %v", resolver.GetLocalAddr()) + + config := &acmeConfigEntry{ + DNSResolver: resolver.GetLocalAddr(), + } + + for index, tc := range keyAuthorizationTestCases { + checksum := sha256.Sum256([]byte(tc.keyAuthz)) + authz := base64.RawURLEncoding.EncodeToString(checksum[:]) + resolver.AddRecord(DNSChallengePrefix+host, "TXT", authz) + resolver.PushConfig() + + isValid, err := ValidateDNS01Challenge(host, tc.token, tc.thumbprint, config) + if !isValid && err == nil { + t.Fatalf("[tc=%d] expected failure to give reason via err (%v / %v)", index, isValid, err) + } + + expectedValid := !tc.shouldFail + if expectedValid != isValid { + t.Fatalf("[tc=%d] got ret=%v (err=%v), expected ret=%v (shouldFail=%v)", index, isValid, err, expectedValid, tc.shouldFail) + } + + resolver.RemoveAllRecords() + } +} + +func TestAcmeValidateTLSALPN01Challenge(t *testing.T) { + // This test is not parallel because we modify ALPNPort to use a custom + // non-standard port _just for testing purposes_. + host := "localhost" + config := &acmeConfigEntry{} + + log := hclog.L() + + returnedProtocols := []string{ALPNProtocol} + var certificates []*x509.Certificate + var privateKey crypto.PrivateKey + + tlsCfg := &tls.Config{} + tlsCfg.GetConfigForClient = func(*tls.ClientHelloInfo) (*tls.Config, error) { + var retCfg tls.Config = *tlsCfg + retCfg.NextProtos = returnedProtocols + log.Info(fmt.Sprintf("[alpn-server] returned protocol: %v", returnedProtocols)) + return &retCfg, nil + } + tlsCfg.GetCertificate = func(*tls.ClientHelloInfo) (*tls.Certificate, error) { + var ret tls.Certificate + for index, cert := range certificates { + ret.Certificate = append(ret.Certificate, cert.Raw) + if index == 0 { + ret.Leaf = cert + } + } + ret.PrivateKey = privateKey + log.Info(fmt.Sprintf("[alpn-server] returned certificates: %v", ret)) + return &ret, nil + } + + ln, err := tls.Listen("tcp", host+":0", tlsCfg) + require.NoError(t, err, "failed to listen with TLS config") + + doOneAccept := func() { + log.Info("[alpn-server] starting accept...") + connRaw, err := ln.Accept() + require.NoError(t, err, "failed to accept TLS connection") + + log.Info("[alpn-server] got connection...") + conn := tls.Server(connRaw.(*tls.Conn), tlsCfg) + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer func() { + log.Info("[alpn-server] canceling listener connection...") + cancel() + }() + + log.Info("[alpn-server] starting handshake...") + if err := conn.HandshakeContext(ctx); err != nil { + log.Info("[alpn-server] got non-fatal error while handshaking connection: %v", err) + } + + log.Info("[alpn-server] closing connection...") + if err := conn.Close(); err != nil { + log.Info("[alpn-server] got non-fatal error while closing connection: %v", err) + } + } + + ALPNPort = strings.Split(ln.Addr().String(), ":")[1] + + type alpnTestCase struct { + name string + certificates []*x509.Certificate + privateKey crypto.PrivateKey + protocols []string + token string + thumbprint string + shouldFail bool + } + + var alpnTestCases []alpnTestCase + // Add all of our keyAuthorizationTestCases into alpnTestCases + for index, tc := range keyAuthorizationTestCases { + log.Info(fmt.Sprintf("using keyAuthorizationTestCase [tc=%d] as alpnTestCase [tc=%d]...", index, len(alpnTestCases))) + // Properly encode the authorization. + checksum := sha256.Sum256([]byte(tc.keyAuthz)) + authz, err := asn1.Marshal(checksum[:]) + require.NoError(t, err, "failed asn.1 marshalling authz") + + // Build a self-signed certificate. + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generating private key") + tmpl := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: host, + }, + Issuer: pkix.Name{ + CommonName: host, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + PublicKey: key.Public(), + SerialNumber: big.NewInt(1), + DNSNames: []string{host}, + ExtraExtensions: []pkix.Extension{ + { + Id: OIDACMEIdentifier, + Critical: true, + Value: authz, + }, + }, + BasicConstraintsValid: true, + IsCA: false, + } + certBytes, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key) + require.NoError(t, err, "failed to create certificate") + cert, err := x509.ParseCertificate(certBytes) + require.NoError(t, err, "failed to parse newly generated certificate") + + newTc := alpnTestCase{ + name: fmt.Sprintf("keyAuthorizationTestCase[%d]", index), + certificates: []*x509.Certificate{cert}, + privateKey: key, + protocols: []string{ALPNProtocol}, + token: tc.token, + thumbprint: tc.thumbprint, + shouldFail: tc.shouldFail, + } + alpnTestCases = append(alpnTestCases, newTc) + } + + { + // Test case: Longer chain + // Build a self-signed certificate. + rootKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generating root private key") + tmpl := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "Root CA", + }, + Issuer: pkix.Name{ + CommonName: "Root CA", + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + PublicKey: rootKey.Public(), + SerialNumber: big.NewInt(1), + BasicConstraintsValid: true, + IsCA: true, + } + rootCertBytes, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, rootKey.Public(), rootKey) + require.NoError(t, err, "failed to create root certificate") + rootCert, err := x509.ParseCertificate(rootCertBytes) + require.NoError(t, err, "failed to parse newly generated root certificate") + + // Compute our authorization. + checksum := sha256.Sum256([]byte("valid.valid")) + authz, err := asn1.Marshal(checksum[:]) + require.NoError(t, err, "failed to marshal authz with asn.1 ") + + // Build a leaf certificate which _could_ pass validation + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generating leaf private key") + tmpl = &x509.Certificate{ + Subject: pkix.Name{ + CommonName: host, + }, + Issuer: pkix.Name{ + CommonName: "Root CA", + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + PublicKey: key.Public(), + SerialNumber: big.NewInt(2), + DNSNames: []string{host}, + ExtraExtensions: []pkix.Extension{ + { + Id: OIDACMEIdentifier, + Critical: true, + Value: authz, + }, + }, + BasicConstraintsValid: true, + IsCA: false, + } + certBytes, err := x509.CreateCertificate(rand.Reader, tmpl, rootCert, key.Public(), rootKey) + require.NoError(t, err, "failed to create leaf certificate") + cert, err := x509.ParseCertificate(certBytes) + require.NoError(t, err, "failed to parse newly generated leaf certificate") + + newTc := alpnTestCase{ + name: "longer chain with valid leaf", + certificates: []*x509.Certificate{cert, rootCert}, + privateKey: key, + protocols: []string{ALPNProtocol}, + token: "valid", + thumbprint: "valid", + shouldFail: true, + } + alpnTestCases = append(alpnTestCases, newTc) + } + + { + // Test case: cert without DNSSan + // Compute our authorization. + checksum := sha256.Sum256([]byte("valid.valid")) + authz, err := asn1.Marshal(checksum[:]) + require.NoError(t, err, "failed to marshal authz with asn.1 ") + + // Build a leaf certificate without a DNSSan + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generating leaf private key") + tmpl := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: host, + }, + Issuer: pkix.Name{ + CommonName: host, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + PublicKey: key.Public(), + SerialNumber: big.NewInt(2), + // NO DNSNames + ExtraExtensions: []pkix.Extension{ + { + Id: OIDACMEIdentifier, + Critical: true, + Value: authz, + }, + }, + BasicConstraintsValid: true, + IsCA: false, + } + certBytes, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key) + require.NoError(t, err, "failed to create leaf certificate") + cert, err := x509.ParseCertificate(certBytes) + require.NoError(t, err, "failed to parse newly generated leaf certificate") + + newTc := alpnTestCase{ + name: "valid keyauthz without valid dnsname", + certificates: []*x509.Certificate{cert}, + privateKey: key, + protocols: []string{ALPNProtocol}, + token: "valid", + thumbprint: "valid", + shouldFail: true, + } + alpnTestCases = append(alpnTestCases, newTc) + } + + { + // Test case: cert without matching DNSSan + // Compute our authorization. + checksum := sha256.Sum256([]byte("valid.valid")) + authz, err := asn1.Marshal(checksum[:]) + require.NoError(t, err, "failed to marshal authz with asn.1 ") + + // Build a leaf certificate which fails validation due to bad DNSName + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generating leaf private key") + tmpl := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: host, + }, + Issuer: pkix.Name{ + CommonName: host, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + PublicKey: key.Public(), + SerialNumber: big.NewInt(2), + DNSNames: []string{host + ".dadgarcorp.com" /* not matching host! */}, + ExtraExtensions: []pkix.Extension{ + { + Id: OIDACMEIdentifier, + Critical: true, + Value: authz, + }, + }, + BasicConstraintsValid: true, + IsCA: false, + } + certBytes, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key) + require.NoError(t, err, "failed to create leaf certificate") + cert, err := x509.ParseCertificate(certBytes) + require.NoError(t, err, "failed to parse newly generated leaf certificate") + + newTc := alpnTestCase{ + name: "valid keyauthz without matching dnsname", + certificates: []*x509.Certificate{cert}, + privateKey: key, + protocols: []string{ALPNProtocol}, + token: "valid", + thumbprint: "valid", + shouldFail: true, + } + alpnTestCases = append(alpnTestCases, newTc) + } + + { + // Test case: cert with additional SAN + // Compute our authorization. + checksum := sha256.Sum256([]byte("valid.valid")) + authz, err := asn1.Marshal(checksum[:]) + require.NoError(t, err, "failed to marshal authz with asn.1 ") + + // Build a leaf certificate which has an invalid additional SAN + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generating leaf private key") + tmpl := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: host, + }, + Issuer: pkix.Name{ + CommonName: host, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + PublicKey: key.Public(), + SerialNumber: big.NewInt(2), + DNSNames: []string{host}, + EmailAddresses: []string{"webmaster@" + host}, /* unexpected */ + ExtraExtensions: []pkix.Extension{ + { + Id: OIDACMEIdentifier, + Critical: true, + Value: authz, + }, + }, + BasicConstraintsValid: true, + IsCA: false, + } + certBytes, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key) + require.NoError(t, err, "failed to create leaf certificate") + cert, err := x509.ParseCertificate(certBytes) + require.NoError(t, err, "failed to parse newly generated leaf certificate") + + newTc := alpnTestCase{ + name: "valid keyauthz with additional email SANs", + certificates: []*x509.Certificate{cert}, + privateKey: key, + protocols: []string{ALPNProtocol}, + token: "valid", + thumbprint: "valid", + shouldFail: true, + } + alpnTestCases = append(alpnTestCases, newTc) + } + + { + // Test case: cert without CN + // Compute our authorization. + checksum := sha256.Sum256([]byte("valid.valid")) + authz, err := asn1.Marshal(checksum[:]) + require.NoError(t, err, "failed to marshal authz with asn.1 ") + + // Build a leaf certificate which should pass validation + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generating leaf private key") + tmpl := &x509.Certificate{ + Subject: pkix.Name{}, + Issuer: pkix.Name{}, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + PublicKey: key.Public(), + SerialNumber: big.NewInt(2), + DNSNames: []string{host}, + ExtraExtensions: []pkix.Extension{ + { + Id: OIDACMEIdentifier, + Critical: true, + Value: authz, + }, + }, + BasicConstraintsValid: true, + IsCA: false, + } + certBytes, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key) + require.NoError(t, err, "failed to create leaf certificate") + cert, err := x509.ParseCertificate(certBytes) + require.NoError(t, err, "failed to parse newly generated leaf certificate") + + newTc := alpnTestCase{ + name: "valid certificate; no Subject/Issuer (missing CN)", + certificates: []*x509.Certificate{cert}, + privateKey: key, + protocols: []string{ALPNProtocol}, + token: "valid", + thumbprint: "valid", + shouldFail: false, + } + alpnTestCases = append(alpnTestCases, newTc) + } + + { + // Test case: cert without the extension + // Build a leaf certificate which should fail validation + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generating leaf private key") + tmpl := &x509.Certificate{ + Subject: pkix.Name{}, + Issuer: pkix.Name{}, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + PublicKey: key.Public(), + SerialNumber: big.NewInt(1), + DNSNames: []string{host}, + BasicConstraintsValid: true, + IsCA: true, + } + certBytes, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key) + require.NoError(t, err, "failed to create leaf certificate") + cert, err := x509.ParseCertificate(certBytes) + require.NoError(t, err, "failed to parse newly generated leaf certificate") + + newTc := alpnTestCase{ + name: "missing required acmeIdentifier extension", + certificates: []*x509.Certificate{cert}, + privateKey: key, + protocols: []string{ALPNProtocol}, + token: "valid", + thumbprint: "valid", + shouldFail: true, + } + alpnTestCases = append(alpnTestCases, newTc) + } + + { + // Test case: root without a leaf + // Build a self-signed certificate. + rootKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generating root private key") + tmpl := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "Root CA", + }, + Issuer: pkix.Name{ + CommonName: "Root CA", + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + PublicKey: rootKey.Public(), + SerialNumber: big.NewInt(1), + BasicConstraintsValid: true, + IsCA: true, + } + rootCertBytes, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, rootKey.Public(), rootKey) + require.NoError(t, err, "failed to create root certificate") + rootCert, err := x509.ParseCertificate(rootCertBytes) + require.NoError(t, err, "failed to parse newly generated root certificate") + + newTc := alpnTestCase{ + name: "root without leaf", + certificates: []*x509.Certificate{rootCert}, + privateKey: rootKey, + protocols: []string{ALPNProtocol}, + token: "valid", + thumbprint: "valid", + shouldFail: true, + } + alpnTestCases = append(alpnTestCases, newTc) + } + + for index, tc := range alpnTestCases { + log.Info(fmt.Sprintf("\n\n[tc=%d/name=%s] starting validation", index, tc.name)) + certificates = tc.certificates + privateKey = tc.privateKey + returnedProtocols = tc.protocols + + // Attempt to validate the challenge. + go doOneAccept() + isValid, err := ValidateTLSALPN01Challenge(host, tc.token, tc.thumbprint, config) + if !isValid && err == nil { + t.Fatalf("[tc=%d/name=%s] expected failure to give reason via err (%v / %v)", index, tc.name, isValid, err) + } + + expectedValid := !tc.shouldFail + if expectedValid != isValid { + t.Fatalf("[tc=%d/name=%s] got ret=%v (err=%v), expected ret=%v (shouldFail=%v)", index, tc.name, isValid, err, expectedValid, tc.shouldFail) + } else if err != nil { + log.Info(fmt.Sprintf("[tc=%d/name=%s] got expected failure: err=%v", index, tc.name, err)) + } + } +} + +// TestAcmeValidateHttp01TLSRedirect verify that we allow a http-01 challenge to redirect +// to a TLS server and not validate the certificate chain is valid. We don't validate the +// TLS chain as we would have accepted the auth over a non-secured channel anyway had +// the original request not redirected us. +func TestAcmeValidateHttp01TLSRedirect(t *testing.T) { + t.Parallel() + + for index, tc := range keyAuthorizationTestCases { + t.Run("subtest-"+strconv.Itoa(index), func(st *testing.T) { + validFunc := func(w http.ResponseWriter, r *http.Request) { + if strings.Contains(r.URL.Path, "/.well-known/") { + w.Write([]byte(tc.keyAuthz)) + return + } + http.Error(w, "status not found", http.StatusNotFound) + } + + tlsTs := httptest.NewTLSServer(http.HandlerFunc(validFunc)) + defer tlsTs.Close() + + // Set up a http server that will redirect to our TLS server + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.Redirect(w, r, tlsTs.URL+r.URL.Path, 301) + })) + defer ts.Close() + + host := ts.URL[len("http://"):] + isValid, err := ValidateHTTP01Challenge(host, tc.token, tc.thumbprint, &acmeConfigEntry{}) + if !isValid && err == nil { + st.Fatalf("[tc=%d] expected failure to give reason via err (%v / %v)", index, isValid, err) + } + + expectedValid := !tc.shouldFail + if expectedValid != isValid { + st.Fatalf("[tc=%d] got ret=%v (err=%v), expected ret=%v (shouldFail=%v)", index, isValid, err, expectedValid, tc.shouldFail) + } + }) + } +} diff --git a/builtin/logical/pki/acme_eab_policy.go b/builtin/logical/pki/acme_eab_policy.go new file mode 100644 index 0000000..9a96f3a --- /dev/null +++ b/builtin/logical/pki/acme_eab_policy.go @@ -0,0 +1,69 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "fmt" + "strings" +) + +type EabPolicyName string + +const ( + eabPolicyNotRequired EabPolicyName = "not-required" + eabPolicyNewAccountRequired EabPolicyName = "new-account-required" + eabPolicyAlwaysRequired EabPolicyName = "always-required" +) + +func getEabPolicyByString(name string) (EabPolicy, error) { + lcName := strings.TrimSpace(strings.ToLower(name)) + switch lcName { + case string(eabPolicyNotRequired): + return getEabPolicyByName(eabPolicyNotRequired), nil + case string(eabPolicyNewAccountRequired): + return getEabPolicyByName(eabPolicyNewAccountRequired), nil + case string(eabPolicyAlwaysRequired): + return getEabPolicyByName(eabPolicyAlwaysRequired), nil + default: + return getEabPolicyByName(eabPolicyAlwaysRequired), fmt.Errorf("unknown eab policy name: %s", name) + } +} + +func getEabPolicyByName(name EabPolicyName) EabPolicy { + return EabPolicy{Name: name} +} + +type EabPolicy struct { + Name EabPolicyName +} + +// EnforceForNewAccount for new account creations, should we require an EAB. +func (ep EabPolicy) EnforceForNewAccount(eabData *eabType) error { + if (ep.Name == eabPolicyAlwaysRequired || ep.Name == eabPolicyNewAccountRequired) && eabData == nil { + return ErrExternalAccountRequired + } + + return nil +} + +// EnforceForExistingAccount for all operations within ACME, does the account being used require an EAB attached to it. +func (ep EabPolicy) EnforceForExistingAccount(account *acmeAccount) error { + if ep.Name == eabPolicyAlwaysRequired && account.Eab == nil { + return ErrExternalAccountRequired + } + + return nil +} + +// IsExternalAccountRequired for new accounts incoming does is an EAB required +func (ep EabPolicy) IsExternalAccountRequired() bool { + return ep.Name == eabPolicyAlwaysRequired || ep.Name == eabPolicyNewAccountRequired +} + +// OverrideEnvDisablingPublicAcme determines if ACME is enabled but the OS environment variable +// has said to disable public acme support, if we can override that environment variable to +// turn on ACME support +func (ep EabPolicy) OverrideEnvDisablingPublicAcme() bool { + return ep.Name == eabPolicyAlwaysRequired +} diff --git a/builtin/logical/pki/acme_errors.go b/builtin/logical/pki/acme_errors.go new file mode 100644 index 0000000..5b73d9d --- /dev/null +++ b/builtin/logical/pki/acme_errors.go @@ -0,0 +1,210 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/vault/sdk/logical" +) + +// Error prefix; see RFC 8555 Section 6.7. Errors. +const ErrorPrefix = "urn:ietf:params:acme:error:" +const ErrorContentType = "application/problem+json" + +// See RFC 8555 Section 6.7. Errors. +var ErrAccountDoesNotExist = errors.New("The request specified an account that does not exist") + +var ErrAcmeDisabled = errors.New("ACME feature is disabled") + +var ( + ErrAlreadyRevoked = errors.New("The request specified a certificate to be revoked that has already been revoked") + ErrBadCSR = errors.New("The CSR is unacceptable") + ErrBadNonce = errors.New("The client sent an unacceptable anti-replay nonce") + ErrBadPublicKey = errors.New("The JWS was signed by a public key the server does not support") + ErrBadRevocationReason = errors.New("The revocation reason provided is not allowed by the server") + ErrBadSignatureAlgorithm = errors.New("The JWS was signed with an algorithm the server does not support") + ErrCAA = errors.New("Certification Authority Authorization (CAA) records forbid the CA from issuing a certificate") + ErrCompound = errors.New("Specific error conditions are indicated in the 'subproblems' array") + ErrConnection = errors.New("The server could not connect to validation target") + ErrDNS = errors.New("There was a problem with a DNS query during identifier validation") + ErrExternalAccountRequired = errors.New("The request must include a value for the 'externalAccountBinding' field") + ErrIncorrectResponse = errors.New("Response received didn't match the challenge's requirements") + ErrInvalidContact = errors.New("A contact URL for an account was invalid") + ErrMalformed = errors.New("The request message was malformed") + ErrOrderNotReady = errors.New("The request attempted to finalize an order that is not ready to be finalized") + ErrRateLimited = errors.New("The request exceeds a rate limit") + ErrRejectedIdentifier = errors.New("The server will not issue certificates for the identifier") + ErrServerInternal = errors.New("The server experienced an internal error") + ErrTLS = errors.New("The server received a TLS error during validation") + ErrUnauthorized = errors.New("The client lacks sufficient authorization") + ErrUnsupportedContact = errors.New("A contact URL for an account used an unsupported protocol scheme") + ErrUnsupportedIdentifier = errors.New("An identifier is of an unsupported type") + ErrUserActionRequired = errors.New("Visit the 'instance' URL and take actions specified there") +) + +// Mapping of err->name; see table in RFC 8555 Section 6.7. Errors. +var errIdMappings = map[error]string{ + ErrAccountDoesNotExist: "accountDoesNotExist", + ErrAlreadyRevoked: "alreadyRevoked", + ErrBadCSR: "badCSR", + ErrBadNonce: "badNonce", + ErrBadPublicKey: "badPublicKey", + ErrBadRevocationReason: "badRevocationReason", + ErrBadSignatureAlgorithm: "badSignatureAlgorithm", + ErrCAA: "caa", + ErrCompound: "compound", + ErrConnection: "connection", + ErrDNS: "dns", + ErrExternalAccountRequired: "externalAccountRequired", + ErrIncorrectResponse: "incorrectResponse", + ErrInvalidContact: "invalidContact", + ErrMalformed: "malformed", + ErrOrderNotReady: "orderNotReady", + ErrRateLimited: "rateLimited", + ErrRejectedIdentifier: "rejectedIdentifier", + ErrServerInternal: "serverInternal", + ErrTLS: "tls", + ErrUnauthorized: "unauthorized", + ErrUnsupportedContact: "unsupportedContact", + ErrUnsupportedIdentifier: "unsupportedIdentifier", + ErrUserActionRequired: "userActionRequired", +} + +// Mapping of err->status codes; see table in RFC 8555 Section 6.7. Errors. +var errCodeMappings = map[error]int{ + ErrAccountDoesNotExist: http.StatusBadRequest, // See RFC 8555 Section 7.3.1. Finding an Account URL Given a Key. + ErrAlreadyRevoked: http.StatusBadRequest, + ErrBadCSR: http.StatusBadRequest, + ErrBadNonce: http.StatusBadRequest, + ErrBadPublicKey: http.StatusBadRequest, + ErrBadRevocationReason: http.StatusBadRequest, + ErrBadSignatureAlgorithm: http.StatusBadRequest, + ErrCAA: http.StatusForbidden, + ErrCompound: http.StatusBadRequest, + ErrConnection: http.StatusInternalServerError, + ErrDNS: http.StatusInternalServerError, + ErrExternalAccountRequired: http.StatusUnauthorized, + ErrIncorrectResponse: http.StatusBadRequest, + ErrInvalidContact: http.StatusBadRequest, + ErrMalformed: http.StatusBadRequest, + ErrOrderNotReady: http.StatusForbidden, // See RFC 8555 Section 7.4. Applying for Certificate Issuance. + ErrRateLimited: http.StatusTooManyRequests, + ErrRejectedIdentifier: http.StatusBadRequest, + ErrServerInternal: http.StatusInternalServerError, + ErrTLS: http.StatusInternalServerError, + ErrUnauthorized: http.StatusUnauthorized, + ErrUnsupportedContact: http.StatusBadRequest, + ErrUnsupportedIdentifier: http.StatusBadRequest, + ErrUserActionRequired: http.StatusUnauthorized, +} + +type ErrorResponse struct { + StatusCode int `json:"-"` + Type string `json:"type"` + Detail string `json:"detail"` + Subproblems []*ErrorResponse `json:"subproblems"` +} + +func (e *ErrorResponse) MarshalForStorage() map[string]interface{} { + subProblems := []map[string]interface{}{} + for _, subProblem := range e.Subproblems { + subProblems = append(subProblems, subProblem.MarshalForStorage()) + } + return map[string]interface{}{ + "status": e.StatusCode, + "type": e.Type, + "detail": e.Detail, + "subproblems": subProblems, + } +} + +func (e *ErrorResponse) Marshal() (*logical.Response, error) { + body, err := json.Marshal(e) + if err != nil { + return nil, fmt.Errorf("failed marshalling of error response: %w", err) + } + + var resp logical.Response + resp.Data = map[string]interface{}{ + logical.HTTPContentType: ErrorContentType, + logical.HTTPRawBody: body, + logical.HTTPStatusCode: e.StatusCode, + } + + return &resp, nil +} + +func FindType(given error) (err error, id string, code int, found bool) { + matchedError := false + for err, id = range errIdMappings { + if errors.Is(given, err) { + matchedError = true + break + } + } + + // If the given error was not matched from one of the standard ACME errors + // make this error, force ErrServerInternal + if !matchedError { + err = ErrServerInternal + id = errIdMappings[err] + } + + code = errCodeMappings[err] + + return +} + +func TranslateError(given error) (*logical.Response, error) { + if errors.Is(given, logical.ErrReadOnly) { + return nil, given + } + + if errors.Is(given, ErrAcmeDisabled) { + return logical.RespondWithStatusCode(nil, nil, http.StatusNotFound) + } + + body := TranslateErrorToErrorResponse(given) + + return body.Marshal() +} + +func TranslateErrorToErrorResponse(given error) ErrorResponse { + // We're multierror aware here: if we're given a list of errors, assume + // they're structured so the first error is the outer error and the inner + // subproblems are subsequent in the multierror. + var remaining []error + if unwrapped, ok := given.(*multierror.Error); ok { + remaining = unwrapped.Errors[1:] + given = unwrapped.Errors[0] + } + + _, id, code, found := FindType(given) + if !found && len(remaining) > 0 { + // Translate multierrors into a generic error code. + id = errIdMappings[ErrCompound] + code = errCodeMappings[ErrCompound] + } + + var body ErrorResponse + body.Type = ErrorPrefix + id + body.Detail = given.Error() + body.StatusCode = code + + for _, subgiven := range remaining { + _, subid, _, _ := FindType(subgiven) + + var sub ErrorResponse + sub.Type = ErrorPrefix + subid + body.Detail = subgiven.Error() + + body.Subproblems = append(body.Subproblems, &sub) + } + return body +} diff --git a/builtin/logical/pki/acme_jws.go b/builtin/logical/pki/acme_jws.go new file mode 100644 index 0000000..3f6ba6d --- /dev/null +++ b/builtin/logical/pki/acme_jws.go @@ -0,0 +1,278 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "bytes" + "crypto" + "encoding/base64" + "encoding/json" + "fmt" + "strings" + + "github.com/go-jose/go-jose/v3" +) + +var AllowedOuterJWSTypes = map[string]interface{}{ + "RS256": true, + "RS384": true, + "RS512": true, + "PS256": true, + "PS384": true, + "PS512": true, + "ES256": true, + "ES384": true, + "ES512": true, + "EdDSA2": true, +} + +var AllowedEabJWSTypes = map[string]interface{}{ + "HS256": true, + "HS384": true, + "HS512": true, +} + +// This wraps a JWS message structure. +type jwsCtx struct { + Algo string `json:"alg"` + Kid string `json:"kid"` + Jwk json.RawMessage `json:"jwk"` + Nonce string `json:"nonce"` + Url string `json:"url"` + Key jose.JSONWebKey `json:"-"` + Existing bool `json:"-"` +} + +func (c *jwsCtx) GetKeyThumbprint() (string, error) { + keyThumbprint, err := c.Key.Thumbprint(crypto.SHA256) + if err != nil { + return "", fmt.Errorf("failed creating thumbprint: %w", err) + } + return base64.RawURLEncoding.EncodeToString(keyThumbprint), nil +} + +func UnmarshalEabJwsJson(eabBytes []byte) (*jwsCtx, error) { + var eabJws jwsCtx + var err error + if err = json.Unmarshal(eabBytes, &eabJws); err != nil { + return nil, err + } + + if eabJws.Kid == "" { + return nil, fmt.Errorf("invalid header: got missing required field 'kid': %w", ErrMalformed) + } + + if _, present := AllowedEabJWSTypes[eabJws.Algo]; !present { + return nil, fmt.Errorf("invalid header: unexpected value for 'algo': %w", ErrMalformed) + } + + return &eabJws, nil +} + +func (c *jwsCtx) UnmarshalOuterJwsJson(a *acmeState, ac *acmeContext, jws []byte) error { + var err error + if err = json.Unmarshal(jws, c); err != nil { + return err + } + + if c.Kid != "" && len(c.Jwk) > 0 { + // See RFC 8555 Section 6.2. Request Authentication: + // + // > The "jwk" and "kid" fields are mutually exclusive. Servers MUST + // > reject requests that contain both. + return fmt.Errorf("invalid header: got both account 'kid' and 'jwk' in the same message; expected only one: %w", ErrMalformed) + } + + if c.Kid == "" && len(c.Jwk) == 0 { + // See RFC 8555 Section 6.2. Request Authentication: + // + // > Either "jwk" (JSON Web Key) or "kid" (Key ID) as specified + // > below + return fmt.Errorf("invalid header: got neither required fields of 'kid' nor 'jwk': %w", ErrMalformed) + } + + if _, present := AllowedOuterJWSTypes[c.Algo]; !present { + // See RFC 8555 Section 6.2. Request Authentication: + // + // > The JWS Protected Header MUST include the following fields: + // > + // > - "alg" (Algorithm) + // > + // > * This field MUST NOT contain "none" or a Message + // > Authentication Code (MAC) algorithm (e.g. one in which the + // > algorithm registry description mentions MAC/HMAC). + return fmt.Errorf("invalid header: unexpected value for 'algo': %w", ErrMalformed) + } + + if c.Kid != "" { + // Load KID from storage first. + kid := getKeyIdFromAccountUrl(c.Kid) + c.Jwk, err = a.LoadJWK(ac, kid) + if err != nil { + return err + } + c.Kid = kid // Use the uuid itself, not the full account url that was originally provided to us. + c.Existing = true + } + + if err = c.Key.UnmarshalJSON(c.Jwk); err != nil { + return err + } + + if !c.Key.Valid() { + return fmt.Errorf("received invalid jwk: %w", ErrMalformed) + } + + if c.Kid == "" { + c.Kid = genUuid() + c.Existing = false + } + + return nil +} + +func getKeyIdFromAccountUrl(accountUrl string) string { + pieces := strings.Split(accountUrl, "/") + return pieces[len(pieces)-1] +} + +func hasValues(h jose.Header) bool { + return h.KeyID != "" || h.JSONWebKey != nil || h.Algorithm != "" || h.Nonce != "" || len(h.ExtraHeaders) > 0 +} + +func (c *jwsCtx) VerifyJWS(signature string) (map[string]interface{}, error) { + // See RFC 8555 Section 6.2. Request Authentication: + // + // > The JWS Unencoded Payload Option [RFC7797] MUST NOT be used + // + // This is validated by go-jose. + sig, err := jose.ParseSigned(signature) + if err != nil { + return nil, fmt.Errorf("error parsing signature: %s: %w", err, ErrMalformed) + } + + if len(sig.Signatures) > 1 { + // See RFC 8555 Section 6.2. Request Authentication: + // + // > The JWS MUST NOT have multiple signatures + return nil, fmt.Errorf("request had multiple signatures: %w", ErrMalformed) + } + + if hasValues(sig.Signatures[0].Unprotected) { + // See RFC 8555 Section 6.2. Request Authentication: + // + // > The JWS Unprotected Header [RFC7515] MUST NOT be used + return nil, fmt.Errorf("request had unprotected headers: %w", ErrMalformed) + } + + payload, err := sig.Verify(c.Key) + if err != nil { + return nil, err + } + + if len(payload) == 0 { + // Distinguish POST-AS-GET from POST-with-an-empty-body. + return nil, nil + } + + var m map[string]interface{} + if err := json.Unmarshal(payload, &m); err != nil { + return nil, fmt.Errorf("failed to json unmarshal 'payload': %s: %w", err, ErrMalformed) + } + + return m, nil +} + +func verifyEabPayload(acmeState *acmeState, ac *acmeContext, outer *jwsCtx, expectedPath string, payload map[string]interface{}) (*eabType, error) { + // Parse the key out. + rawProtectedBase64, ok := payload["protected"] + if !ok { + return nil, fmt.Errorf("missing required field 'protected': %w", ErrMalformed) + } + jwkBase64 := rawProtectedBase64.(string) + + jwkBytes, err := base64.RawURLEncoding.DecodeString(jwkBase64) + if err != nil { + return nil, fmt.Errorf("failed to base64 parse eab 'protected': %s: %w", err, ErrMalformed) + } + + eabJws, err := UnmarshalEabJwsJson(jwkBytes) + if err != nil { + return nil, fmt.Errorf("failed to json unmarshal eab 'protected': %w", err) + } + + if len(eabJws.Url) == 0 { + return nil, fmt.Errorf("missing required parameter 'url' in eab 'protected': %w", ErrMalformed) + } + expectedUrl := ac.clusterUrl.JoinPath(expectedPath).String() + if expectedUrl != eabJws.Url { + return nil, fmt.Errorf("invalid value for 'url' in eab 'protected': got '%v' expected '%v': %w", eabJws.Url, expectedUrl, ErrUnauthorized) + } + + if len(eabJws.Nonce) != 0 { + return nil, fmt.Errorf("nonce should not be provided in eab 'protected': %w", ErrMalformed) + } + + rawPayloadBase64, ok := payload["payload"] + if !ok { + return nil, fmt.Errorf("missing required field eab 'payload': %w", ErrMalformed) + } + payloadBase64, ok := rawPayloadBase64.(string) + if !ok { + return nil, fmt.Errorf("failed to parse 'payload' field: %w", ErrMalformed) + } + + rawSignatureBase64, ok := payload["signature"] + if !ok { + return nil, fmt.Errorf("missing required field 'signature': %w", ErrMalformed) + } + signatureBase64, ok := rawSignatureBase64.(string) + if !ok { + return nil, fmt.Errorf("failed to parse 'signature' field: %w", ErrMalformed) + } + + // go-jose only seems to support compact signature encodings. + compactSig := fmt.Sprintf("%v.%v.%v", jwkBase64, payloadBase64, signatureBase64) + sig, err := jose.ParseSigned(compactSig) + if err != nil { + return nil, fmt.Errorf("error parsing eab signature: %s: %w", err, ErrMalformed) + } + + if len(sig.Signatures) > 1 { + // See RFC 8555 Section 6.2. Request Authentication: + // + // > The JWS MUST NOT have multiple signatures + return nil, fmt.Errorf("eab had multiple signatures: %w", ErrMalformed) + } + + if hasValues(sig.Signatures[0].Unprotected) { + // See RFC 8555 Section 6.2. Request Authentication: + // + // > The JWS Unprotected Header [RFC7515] MUST NOT be used + return nil, fmt.Errorf("eab had unprotected headers: %w", ErrMalformed) + } + + // Load the EAB to validate the signature against + eabEntry, err := acmeState.LoadEab(ac.sc, eabJws.Kid) + if err != nil { + return nil, fmt.Errorf("%w: failed to verify eab", ErrUnauthorized) + } + + verifiedPayload, err := sig.Verify(eabEntry.PrivateBytes) + if err != nil { + return nil, err + } + + // Make sure how eab payload matches the outer JWK key value + if !bytes.Equal(outer.Jwk, verifiedPayload) { + return nil, fmt.Errorf("eab payload does not match outer JWK key: %w", ErrMalformed) + } + + if eabEntry.AcmeDirectory != ac.acmeDirectory { + // This EAB was not created for this specific ACME directory, reject it + return nil, fmt.Errorf("%w: failed to verify eab", ErrUnauthorized) + } + + return eabEntry, nil +} diff --git a/builtin/logical/pki/acme_state.go b/builtin/logical/pki/acme_state.go new file mode 100644 index 0000000..8d714c0 --- /dev/null +++ b/builtin/logical/pki/acme_state.go @@ -0,0 +1,676 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "crypto/rand" + "encoding/base64" + "errors" + "fmt" + "io" + "net" + "path" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/hashicorp/go-secure-stdlib/nonceutil" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + // How many bytes are in a token. Per RFC 8555 Section + // 8.3. HTTP Challenge and Section 11.3 Token Entropy: + // + // > token (required, string): A random value that uniquely identifies + // > the challenge. This value MUST have at least 128 bits of entropy. + tokenBytes = 128 / 8 + + // Path Prefixes + acmePathPrefix = "acme/" + acmeAccountPrefix = acmePathPrefix + "accounts/" + acmeThumbprintPrefix = acmePathPrefix + "account-thumbprints/" + acmeValidationPrefix = acmePathPrefix + "validations/" + acmeEabPrefix = acmePathPrefix + "eab/" +) + +type acmeState struct { + nonces nonceutil.NonceService + + validator *ACMEChallengeEngine + + configDirty *atomic.Bool + _config sync.RWMutex + config acmeConfigEntry +} + +type acmeThumbprint struct { + Kid string `json:"kid"` + Thumbprint string `json:"-"` +} + +func NewACMEState() *acmeState { + state := &acmeState{ + nonces: nonceutil.NewNonceService(), + validator: NewACMEChallengeEngine(), + configDirty: new(atomic.Bool), + } + // Config hasn't been loaded yet; mark dirty. + state.configDirty.Store(true) + + return state +} + +func (a *acmeState) Initialize(b *backend, sc *storageContext) error { + // Initialize the nonce service. + if err := a.nonces.Initialize(); err != nil { + return fmt.Errorf("failed to initialize the ACME nonce service: %w", err) + } + + // Load the ACME config. + _, err := a.getConfigWithUpdate(sc) + if err != nil { + return fmt.Errorf("error initializing ACME engine: %w", err) + } + + if b.System().ReplicationState().HasState(consts.ReplicationDRSecondary | consts.ReplicationPerformanceStandby) { + // It is assumed, that if the node does become the active node later + // the plugin is re-initialized, so this is safe. It also spares the node + // from loading the existing queue into memory for no reason. + b.Logger().Debug("Not on an active node, skipping starting ACME challenge validation engine") + return nil + } + // Kick off our ACME challenge validation engine. + go a.validator.Run(b, a, sc) + + // All good. + return nil +} + +func (a *acmeState) Shutdown(b *backend) { + // If we aren't the active node, nothing to shutdown + if b.System().ReplicationState().HasState(consts.ReplicationDRSecondary | consts.ReplicationPerformanceStandby) { + return + } + + a.validator.Closing <- struct{}{} +} + +func (a *acmeState) markConfigDirty() { + a.configDirty.Store(true) +} + +func (a *acmeState) reloadConfigIfRequired(sc *storageContext) error { + if !a.configDirty.Load() { + return nil + } + + a._config.Lock() + defer a._config.Unlock() + + if !a.configDirty.Load() { + // Someone beat us to grabbing the above write lock and already + // updated the config. + return nil + } + + config, err := sc.getAcmeConfig() + if err != nil { + return fmt.Errorf("failed reading config: %w", err) + } + + a.config = *config + a.configDirty.Store(false) + + return nil +} + +func (a *acmeState) getConfigWithUpdate(sc *storageContext) (*acmeConfigEntry, error) { + if err := a.reloadConfigIfRequired(sc); err != nil { + return nil, err + } + + a._config.RLock() + defer a._config.RUnlock() + + configCopy := a.config + return &configCopy, nil +} + +func generateRandomBase64(srcBytes int) (string, error) { + data := make([]byte, 21) + if _, err := io.ReadFull(rand.Reader, data); err != nil { + return "", err + } + + return base64.RawURLEncoding.EncodeToString(data), nil +} + +func (a *acmeState) GetNonce() (string, time.Time, error) { + return a.nonces.Get() +} + +func (a *acmeState) RedeemNonce(nonce string) bool { + return a.nonces.Redeem(nonce) +} + +func (a *acmeState) DoTidyNonces() { + a.nonces.Tidy() +} + +type ACMEAccountStatus string + +func (aas ACMEAccountStatus) String() string { + return string(aas) +} + +const ( + AccountStatusValid ACMEAccountStatus = "valid" + AccountStatusDeactivated ACMEAccountStatus = "deactivated" + AccountStatusRevoked ACMEAccountStatus = "revoked" +) + +type acmeAccount struct { + KeyId string `json:"-"` + Status ACMEAccountStatus `json:"status"` + Contact []string `json:"contact"` + TermsOfServiceAgreed bool `json:"terms-of-service-agreed"` + Jwk []byte `json:"jwk"` + AcmeDirectory string `json:"acme-directory"` + AccountCreatedDate time.Time `json:"account-created-date"` + MaxCertExpiry time.Time `json:"account-max-cert-expiry"` + AccountRevokedDate time.Time `json:"account-revoked-date"` + Eab *eabType `json:"eab"` +} + +type acmeOrder struct { + OrderId string `json:"-"` + AccountId string `json:"account-id"` + Status ACMEOrderStatusType `json:"status"` + Expires time.Time `json:"expires"` + Identifiers []*ACMEIdentifier `json:"identifiers"` + AuthorizationIds []string `json:"authorization-ids"` + CertificateSerialNumber string `json:"cert-serial-number"` + CertificateExpiry time.Time `json:"cert-expiry"` + // The actual issuer UUID that issued the certificate, blank if an order exists but no certificate was issued. + IssuerId issuerID `json:"issuer-id"` +} + +func (o acmeOrder) getIdentifierDNSValues() []string { + var identifiers []string + for _, value := range o.Identifiers { + if value.Type == ACMEDNSIdentifier { + // Here, because of wildcard processing, we need to use the + // original value provided by the caller rather than the + // post-modification (trimmed '*.' prefix) value. + identifiers = append(identifiers, value.OriginalValue) + } + } + return identifiers +} + +func (o acmeOrder) getIdentifierIPValues() []net.IP { + var identifiers []net.IP + for _, value := range o.Identifiers { + if value.Type == ACMEIPIdentifier { + identifiers = append(identifiers, net.ParseIP(value.Value)) + } + } + return identifiers +} + +func (a *acmeState) CreateAccount(ac *acmeContext, c *jwsCtx, contact []string, termsOfServiceAgreed bool, eab *eabType) (*acmeAccount, error) { + // Write out the thumbprint value/entry out first, if we get an error mid-way through + // this is easier to recover from. The new kid with the same existing public key + // will rewrite the thumbprint entry. This goes in hand with LoadAccountByKey that + // will return a nil, nil value if the referenced kid in a loaded thumbprint does not + // exist. This effectively makes this self-healing IF the end-user re-attempts the + // account creation with the same public key. + thumbprint, err := c.GetKeyThumbprint() + if err != nil { + return nil, fmt.Errorf("failed generating thumbprint: %w", err) + } + + thumbPrint := &acmeThumbprint{ + Kid: c.Kid, + Thumbprint: thumbprint, + } + thumbPrintEntry, err := logical.StorageEntryJSON(acmeThumbprintPrefix+thumbprint, thumbPrint) + if err != nil { + return nil, fmt.Errorf("error generating account thumbprint entry: %w", err) + } + + if err = ac.sc.Storage.Put(ac.sc.Context, thumbPrintEntry); err != nil { + return nil, fmt.Errorf("error writing account thumbprint entry: %w", err) + } + + // Now write out the main value that the thumbprint points too. + acct := &acmeAccount{ + KeyId: c.Kid, + Contact: contact, + TermsOfServiceAgreed: termsOfServiceAgreed, + Jwk: c.Jwk, + Status: AccountStatusValid, + AcmeDirectory: ac.acmeDirectory, + AccountCreatedDate: time.Now(), + Eab: eab, + } + json, err := logical.StorageEntryJSON(acmeAccountPrefix+c.Kid, acct) + if err != nil { + return nil, fmt.Errorf("error creating account entry: %w", err) + } + + if err := ac.sc.Storage.Put(ac.sc.Context, json); err != nil { + return nil, fmt.Errorf("error writing account entry: %w", err) + } + + return acct, nil +} + +func (a *acmeState) UpdateAccount(sc *storageContext, acct *acmeAccount) error { + json, err := logical.StorageEntryJSON(acmeAccountPrefix+acct.KeyId, acct) + if err != nil { + return fmt.Errorf("error creating account entry: %w", err) + } + + if err := sc.Storage.Put(sc.Context, json); err != nil { + return fmt.Errorf("error writing account entry: %w", err) + } + + return nil +} + +// LoadAccount will load the account object based on the passed in keyId field value +// otherwise will return an error if the account does not exist. +func (a *acmeState) LoadAccount(ac *acmeContext, keyId string) (*acmeAccount, error) { + entry, err := ac.sc.Storage.Get(ac.sc.Context, acmeAccountPrefix+keyId) + if err != nil { + return nil, fmt.Errorf("error loading account: %w", err) + } + if entry == nil { + return nil, fmt.Errorf("account not found: %w", ErrAccountDoesNotExist) + } + + var acct acmeAccount + err = entry.DecodeJSON(&acct) + if err != nil { + return nil, fmt.Errorf("error decoding account: %w", err) + } + + if acct.AcmeDirectory != ac.acmeDirectory { + return nil, fmt.Errorf("%w: account part of different ACME directory path", ErrMalformed) + } + + acct.KeyId = keyId + + return &acct, nil +} + +// LoadAccountByKey will attempt to load the account based on a key thumbprint. If the thumbprint +// or kid is unknown a nil, nil will be returned. +func (a *acmeState) LoadAccountByKey(ac *acmeContext, keyThumbprint string) (*acmeAccount, error) { + thumbprintEntry, err := ac.sc.Storage.Get(ac.sc.Context, acmeThumbprintPrefix+keyThumbprint) + if err != nil { + return nil, fmt.Errorf("failed loading acme thumbprintEntry for key: %w", err) + } + if thumbprintEntry == nil { + return nil, nil + } + + var thumbprint acmeThumbprint + err = thumbprintEntry.DecodeJSON(&thumbprint) + if err != nil { + return nil, fmt.Errorf("failed decoding thumbprint entry: %s: %w", keyThumbprint, err) + } + + if len(thumbprint.Kid) == 0 { + return nil, fmt.Errorf("empty kid within thumbprint entry: %s", keyThumbprint) + } + + acct, err := a.LoadAccount(ac, thumbprint.Kid) + if err != nil { + // If we fail to lookup the account that the thumbprint entry references, assume a bad + // write previously occurred in which we managed to write out the thumbprint but failed + // writing out the main account information. + if errors.Is(err, ErrAccountDoesNotExist) { + return nil, nil + } + return nil, err + } + + return acct, nil +} + +func (a *acmeState) LoadJWK(ac *acmeContext, keyId string) ([]byte, error) { + key, err := a.LoadAccount(ac, keyId) + if err != nil { + return nil, err + } + + if len(key.Jwk) == 0 { + return nil, fmt.Errorf("malformed key entry lacks JWK") + } + + return key.Jwk, nil +} + +func (a *acmeState) LoadAuthorization(ac *acmeContext, userCtx *jwsCtx, authId string) (*ACMEAuthorization, error) { + if authId == "" { + return nil, fmt.Errorf("malformed authorization identifier") + } + + authorizationPath := getAuthorizationPath(userCtx.Kid, authId) + + authz, err := loadAuthorizationAtPath(ac.sc, authorizationPath) + if err != nil { + return nil, err + } + + if userCtx.Kid != authz.AccountId { + return nil, ErrUnauthorized + } + + return authz, nil +} + +func loadAuthorizationAtPath(sc *storageContext, authorizationPath string) (*ACMEAuthorization, error) { + entry, err := sc.Storage.Get(sc.Context, authorizationPath) + if err != nil { + return nil, fmt.Errorf("error loading authorization: %w", err) + } + + if entry == nil { + return nil, fmt.Errorf("authorization does not exist: %w", ErrMalformed) + } + + var authz ACMEAuthorization + err = entry.DecodeJSON(&authz) + if err != nil { + return nil, fmt.Errorf("error decoding authorization: %w", err) + } + + return &authz, nil +} + +func (a *acmeState) SaveAuthorization(ac *acmeContext, authz *ACMEAuthorization) error { + path := getAuthorizationPath(authz.AccountId, authz.Id) + return saveAuthorizationAtPath(ac.sc, path, authz) +} + +func saveAuthorizationAtPath(sc *storageContext, path string, authz *ACMEAuthorization) error { + if authz.Id == "" { + return fmt.Errorf("invalid authorization, missing id") + } + + if authz.AccountId == "" { + return fmt.Errorf("invalid authorization, missing account id") + } + + json, err := logical.StorageEntryJSON(path, authz) + if err != nil { + return fmt.Errorf("error creating authorization entry: %w", err) + } + + if err = sc.Storage.Put(sc.Context, json); err != nil { + return fmt.Errorf("error writing authorization entry: %w", err) + } + + return nil +} + +func (a *acmeState) ParseRequestParams(ac *acmeContext, req *logical.Request, data *framework.FieldData) (*jwsCtx, map[string]interface{}, error) { + var c jwsCtx + var m map[string]interface{} + + // Parse the key out. + rawJWKBase64, ok := data.GetOk("protected") + if !ok { + return nil, nil, fmt.Errorf("missing required field 'protected': %w", ErrMalformed) + } + jwkBase64 := rawJWKBase64.(string) + + jwkBytes, err := base64.RawURLEncoding.DecodeString(jwkBase64) + if err != nil { + return nil, nil, fmt.Errorf("failed to base64 parse 'protected': %s: %w", err, ErrMalformed) + } + if err = c.UnmarshalOuterJwsJson(a, ac, jwkBytes); err != nil { + return nil, nil, fmt.Errorf("failed to json unmarshal 'protected': %w", err) + } + + // Since we already parsed the header to verify the JWS context, we + // should read and redeem the nonce here too, to avoid doing any extra + // work if it is invalid. + if !a.RedeemNonce(c.Nonce) { + return nil, nil, fmt.Errorf("invalid or reused nonce: %w", ErrBadNonce) + } + + // If the path is incorrect, reject the request. + // + // See RFC 8555 Section 6.4. Request URL Integrity: + // + // > As noted in Section 6.2, all ACME request objects carry a "url" + // > header parameter in their protected header. ... On receiving such + // > an object in an HTTP request, the server MUST compare the "url" + // > header parameter to the request URL. If the two do not match, + // > then the server MUST reject the request as unauthorized. + if len(c.Url) == 0 { + return nil, nil, fmt.Errorf("missing required parameter 'url' in 'protected': %w", ErrMalformed) + } + if ac.clusterUrl.JoinPath(req.Path).String() != c.Url { + return nil, nil, fmt.Errorf("invalid value for 'url' in 'protected': got '%v' expected '%v': %w", c.Url, ac.clusterUrl.JoinPath(req.Path).String(), ErrUnauthorized) + } + + rawPayloadBase64, ok := data.GetOk("payload") + if !ok { + return nil, nil, fmt.Errorf("missing required field 'payload': %w", ErrMalformed) + } + payloadBase64 := rawPayloadBase64.(string) + + rawSignatureBase64, ok := data.GetOk("signature") + if !ok { + return nil, nil, fmt.Errorf("missing required field 'signature': %w", ErrMalformed) + } + signatureBase64 := rawSignatureBase64.(string) + + // go-jose only seems to support compact signature encodings. + compactSig := fmt.Sprintf("%v.%v.%v", jwkBase64, payloadBase64, signatureBase64) + m, err = c.VerifyJWS(compactSig) + if err != nil { + return nil, nil, fmt.Errorf("failed to verify signature: %w", err) + } + + return &c, m, nil +} + +func (a *acmeState) LoadOrder(ac *acmeContext, userCtx *jwsCtx, orderId string) (*acmeOrder, error) { + path := getOrderPath(userCtx.Kid, orderId) + entry, err := ac.sc.Storage.Get(ac.sc.Context, path) + if err != nil { + return nil, fmt.Errorf("error loading order: %w", err) + } + + if entry == nil { + return nil, fmt.Errorf("order does not exist: %w", ErrMalformed) + } + + var order acmeOrder + err = entry.DecodeJSON(&order) + if err != nil { + return nil, fmt.Errorf("error decoding order: %w", err) + } + + if userCtx.Kid != order.AccountId { + return nil, ErrUnauthorized + } + + order.OrderId = orderId + + return &order, nil +} + +func (a *acmeState) SaveOrder(ac *acmeContext, order *acmeOrder) error { + if order.OrderId == "" { + return fmt.Errorf("invalid order, missing order id") + } + + if order.AccountId == "" { + return fmt.Errorf("invalid order, missing account id") + } + path := getOrderPath(order.AccountId, order.OrderId) + json, err := logical.StorageEntryJSON(path, order) + if err != nil { + return fmt.Errorf("error serializing order entry: %w", err) + } + + if err = ac.sc.Storage.Put(ac.sc.Context, json); err != nil { + return fmt.Errorf("error writing order entry: %w", err) + } + + return nil +} + +func (a *acmeState) ListOrderIds(sc *storageContext, accountId string) ([]string, error) { + accountOrderPrefixPath := acmeAccountPrefix + accountId + "/orders/" + + rawOrderIds, err := sc.Storage.List(sc.Context, accountOrderPrefixPath) + if err != nil { + return nil, fmt.Errorf("failed listing order ids for account %s: %w", accountId, err) + } + + orderIds := []string{} + for _, order := range rawOrderIds { + if strings.HasSuffix(order, "/") { + // skip any folders we might have for some reason + continue + } + orderIds = append(orderIds, order) + } + return orderIds, nil +} + +type acmeCertEntry struct { + Serial string `json:"-"` + Account string `json:"-"` + Order string `json:"order"` +} + +func (a *acmeState) TrackIssuedCert(ac *acmeContext, accountId string, serial string, orderId string) error { + path := getAcmeSerialToAccountTrackerPath(accountId, serial) + entry := acmeCertEntry{ + Order: orderId, + } + + json, err := logical.StorageEntryJSON(path, &entry) + if err != nil { + return fmt.Errorf("error serializing acme cert entry: %w", err) + } + + if err = ac.sc.Storage.Put(ac.sc.Context, json); err != nil { + return fmt.Errorf("error writing acme cert entry: %w", err) + } + + return nil +} + +func (a *acmeState) GetIssuedCert(ac *acmeContext, accountId string, serial string) (*acmeCertEntry, error) { + path := acmeAccountPrefix + accountId + "/certs/" + normalizeSerial(serial) + + entry, err := ac.sc.Storage.Get(ac.sc.Context, path) + if err != nil { + return nil, fmt.Errorf("error loading acme cert entry: %w", err) + } + + if entry == nil { + return nil, fmt.Errorf("no certificate with this serial was issued for this account") + } + + var cert acmeCertEntry + err = entry.DecodeJSON(&cert) + if err != nil { + return nil, fmt.Errorf("error decoding acme cert entry: %w", err) + } + + cert.Serial = denormalizeSerial(serial) + cert.Account = accountId + + return &cert, nil +} + +func (a *acmeState) SaveEab(sc *storageContext, eab *eabType) error { + json, err := logical.StorageEntryJSON(path.Join(acmeEabPrefix, eab.KeyID), eab) + if err != nil { + return err + } + return sc.Storage.Put(sc.Context, json) +} + +func (a *acmeState) LoadEab(sc *storageContext, eabKid string) (*eabType, error) { + rawEntry, err := sc.Storage.Get(sc.Context, path.Join(acmeEabPrefix, eabKid)) + if err != nil { + return nil, err + } + if rawEntry == nil { + return nil, fmt.Errorf("%w: no eab found for kid %s", ErrStorageItemNotFound, eabKid) + } + + var eab eabType + err = rawEntry.DecodeJSON(&eab) + if err != nil { + return nil, err + } + + eab.KeyID = eabKid + return &eab, nil +} + +func (a *acmeState) DeleteEab(sc *storageContext, eabKid string) (bool, error) { + rawEntry, err := sc.Storage.Get(sc.Context, path.Join(acmeEabPrefix, eabKid)) + if err != nil { + return false, err + } + if rawEntry == nil { + return false, nil + } + + err = sc.Storage.Delete(sc.Context, path.Join(acmeEabPrefix, eabKid)) + if err != nil { + return false, err + } + return true, nil +} + +func (a *acmeState) ListEabIds(sc *storageContext) ([]string, error) { + entries, err := sc.Storage.List(sc.Context, acmeEabPrefix) + if err != nil { + return nil, err + } + var ids []string + for _, entry := range entries { + if strings.HasSuffix(entry, "/") { + continue + } + ids = append(ids, entry) + } + + return ids, nil +} + +func getAcmeSerialToAccountTrackerPath(accountId string, serial string) string { + return acmeAccountPrefix + accountId + "/certs/" + normalizeSerial(serial) +} + +func getAuthorizationPath(accountId string, authId string) string { + return acmeAccountPrefix + accountId + "/authorizations/" + authId +} + +func getOrderPath(accountId string, orderId string) string { + return acmeAccountPrefix + accountId + "/orders/" + orderId +} + +func getACMEToken() (string, error) { + return generateRandomBase64(tokenBytes) +} diff --git a/builtin/logical/pki/acme_state_test.go b/builtin/logical/pki/acme_state_test.go new file mode 100644 index 0000000..8d4f121 --- /dev/null +++ b/builtin/logical/pki/acme_state_test.go @@ -0,0 +1,43 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestAcmeNonces(t *testing.T) { + t.Parallel() + + a := NewACMEState() + a.nonces.Initialize() + + // Simple operation should succeed. + nonce, _, err := a.GetNonce() + require.NoError(t, err) + require.NotEmpty(t, nonce) + + require.True(t, a.RedeemNonce(nonce)) + require.False(t, a.RedeemNonce(nonce)) + + // Redeeming in opposite order should work. + var nonces []string + for i := 0; i < len(nonce); i++ { + nonce, _, err = a.GetNonce() + require.NoError(t, err) + require.NotEmpty(t, nonce) + } + + for i := len(nonces) - 1; i >= 0; i-- { + nonce = nonces[i] + require.True(t, a.RedeemNonce(nonce)) + } + + for i := 0; i < len(nonces); i++ { + nonce = nonces[i] + require.False(t, a.RedeemNonce(nonce)) + } +} diff --git a/builtin/logical/pki/acme_wrappers.go b/builtin/logical/pki/acme_wrappers.go new file mode 100644 index 0000000..13ba1c3 --- /dev/null +++ b/builtin/logical/pki/acme_wrappers.go @@ -0,0 +1,470 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +type acmeContext struct { + // baseUrl is the combination of the configured cluster local URL and the acmePath up to /acme/ + baseUrl *url.URL + clusterUrl *url.URL + sc *storageContext + role *roleEntry + issuer *issuerEntry + // acmeDirectory is a string that can distinguish the various acme directories we have configured + // if something needs to remain locked into a directory path structure. + acmeDirectory string + eabPolicy EabPolicy +} + +func (c acmeContext) getAcmeState() *acmeState { + return c.sc.Backend.acmeState +} + +type ( + acmeOperation func(acmeCtx *acmeContext, r *logical.Request, _ *framework.FieldData) (*logical.Response, error) + acmeParsedOperation func(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData, userCtx *jwsCtx, data map[string]interface{}) (*logical.Response, error) + acmeAccountRequiredOperation func(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData, userCtx *jwsCtx, data map[string]interface{}, acct *acmeAccount) (*logical.Response, error) +) + +// acmeErrorWrapper the lowest level wrapper that will translate errors into proper ACME error responses +func acmeErrorWrapper(op framework.OperationFunc) framework.OperationFunc { + return func(ctx context.Context, r *logical.Request, data *framework.FieldData) (*logical.Response, error) { + resp, err := op(ctx, r, data) + if err != nil { + return TranslateError(err) + } + + return resp, nil + } +} + +// acmeWrapper a basic wrapper that all ACME handlers should leverage as the basis. +// This will create a basic ACME context, validate basic ACME configuration is setup +// for operations. This pulls in acmeErrorWrapper to translate error messages for users, +// but does not enforce any sort of ACME authentication. +func (b *backend) acmeWrapper(op acmeOperation) framework.OperationFunc { + return acmeErrorWrapper(func(ctx context.Context, r *logical.Request, data *framework.FieldData) (*logical.Response, error) { + sc := b.makeStorageContext(ctx, r.Storage) + + config, err := sc.Backend.acmeState.getConfigWithUpdate(sc) + if err != nil { + return nil, fmt.Errorf("failed to fetch ACME configuration: %w", err) + } + + // use string form in case someone messes up our config from raw storage. + eabPolicy, err := getEabPolicyByString(string(config.EabPolicyName)) + if err != nil { + return nil, err + } + + if isAcmeDisabled(sc, config, eabPolicy) { + return nil, ErrAcmeDisabled + } + + if b.useLegacyBundleCaStorage() { + return nil, fmt.Errorf("%w: Can not perform ACME operations until migration has completed", ErrServerInternal) + } + + acmeBaseUrl, clusterBase, err := getAcmeBaseUrl(sc, r) + if err != nil { + return nil, err + } + + role, issuer, err := getAcmeRoleAndIssuer(sc, data, config) + if err != nil { + return nil, err + } + + acmeDirectory, err := getAcmeDirectory(r) + if err != nil { + return nil, err + } + + acmeCtx := &acmeContext{ + baseUrl: acmeBaseUrl, + clusterUrl: clusterBase, + sc: sc, + role: role, + issuer: issuer, + acmeDirectory: acmeDirectory, + eabPolicy: eabPolicy, + } + + return op(acmeCtx, r, data) + }) +} + +// acmeParsedWrapper is an ACME wrapper that will parse out the ACME request parameters, validate +// that we have a proper signature and pass to the operation a decoded map of arguments received. +// This wrapper builds on top of acmeWrapper. Note that this does perform signature verification +// it does not enforce the account being in a valid state nor existing. +func (b *backend) acmeParsedWrapper(op acmeParsedOperation) framework.OperationFunc { + return b.acmeWrapper(func(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData) (*logical.Response, error) { + user, data, err := b.acmeState.ParseRequestParams(acmeCtx, r, fields) + if err != nil { + return nil, err + } + + resp, err := op(acmeCtx, r, fields, user, data) + + // Our response handlers might not add the necessary headers. + if resp != nil { + if resp.Headers == nil { + resp.Headers = map[string][]string{} + } + + if _, ok := resp.Headers["Replay-Nonce"]; !ok { + nonce, _, err := b.acmeState.GetNonce() + if err != nil { + return nil, err + } + + resp.Headers["Replay-Nonce"] = []string{nonce} + } + + if _, ok := resp.Headers["Link"]; !ok { + resp.Headers["Link"] = genAcmeLinkHeader(acmeCtx) + } else { + directory := genAcmeLinkHeader(acmeCtx)[0] + addDirectory := true + for _, item := range resp.Headers["Link"] { + if item == directory { + addDirectory = false + break + } + } + if addDirectory { + resp.Headers["Link"] = append(resp.Headers["Link"], directory) + } + } + + // ACME responses don't understand Vault's default encoding + // format. Rather than expecting everything to handle creating + // ACME-formatted responses, do the marshaling in one place. + if _, ok := resp.Data[logical.HTTPRawBody]; !ok { + ignored_values := map[string]bool{logical.HTTPContentType: true, logical.HTTPStatusCode: true} + fields := map[string]interface{}{} + body := map[string]interface{}{ + logical.HTTPContentType: "application/json", + logical.HTTPStatusCode: http.StatusOK, + } + + for key, value := range resp.Data { + if _, present := ignored_values[key]; !present { + fields[key] = value + } else { + body[key] = value + } + } + + rawBody, err := json.Marshal(fields) + if err != nil { + return nil, fmt.Errorf("Error marshaling JSON body: %w", err) + } + + body[logical.HTTPRawBody] = rawBody + resp.Data = body + } + } + + return resp, err + }) +} + +// acmeAccountRequiredWrapper builds on top of acmeParsedWrapper, enforcing the +// request has a proper signature for an existing account, and that account is +// in a valid status. It passes to the operation a decoded form of the request +// parameters as well as the ACME account the request is for. +func (b *backend) acmeAccountRequiredWrapper(op acmeAccountRequiredOperation) framework.OperationFunc { + return b.acmeParsedWrapper(func(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData, uc *jwsCtx, data map[string]interface{}) (*logical.Response, error) { + if !uc.Existing { + return nil, fmt.Errorf("cannot process request without a 'kid': %w", ErrMalformed) + } + + account, err := requireValidAcmeAccount(acmeCtx, uc) + if err != nil { + return nil, err + } + + return op(acmeCtx, r, fields, uc, data, account) + }) +} + +func requireValidAcmeAccount(acmeCtx *acmeContext, uc *jwsCtx) (*acmeAccount, error) { + account, err := acmeCtx.getAcmeState().LoadAccount(acmeCtx, uc.Kid) + if err != nil { + return nil, fmt.Errorf("error loading account: %w", err) + } + + if err = acmeCtx.eabPolicy.EnforceForExistingAccount(account); err != nil { + return nil, err + } + + if account.Status != AccountStatusValid { + // Treating "revoked" and "deactivated" as the same here. + return nil, fmt.Errorf("%w: account in status: %s", ErrUnauthorized, account.Status) + } + return account, nil +} + +// A helper function that will build up the various path patterns we want for ACME APIs. +func buildAcmeFrameworkPaths(b *backend, patternFunc func(b *backend, pattern string) *framework.Path, acmeApi string) []*framework.Path { + var patterns []*framework.Path + for _, baseUrl := range []string{ + "acme", + "roles/" + framework.GenericNameRegex("role") + "/acme", + "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/acme", + "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/roles/" + framework.GenericNameRegex("role") + "/acme", + } { + + if !strings.HasPrefix(acmeApi, "/") { + acmeApi = "/" + acmeApi + } + + path := patternFunc(b, baseUrl+acmeApi) + patterns = append(patterns, path) + } + + return patterns +} + +func getAcmeBaseUrl(sc *storageContext, r *logical.Request) (*url.URL, *url.URL, error) { + baseUrl, err := getBasePathFromClusterConfig(sc) + if err != nil { + return nil, nil, err + } + + directoryPrefix, err := getAcmeDirectory(r) + if err != nil { + return nil, nil, err + } + + return baseUrl.JoinPath(directoryPrefix), baseUrl, nil +} + +func getBasePathFromClusterConfig(sc *storageContext) (*url.URL, error) { + cfg, err := sc.getClusterConfig() + if err != nil { + return nil, fmt.Errorf("failed loading cluster config: %w", err) + } + + if cfg.Path == "" { + return nil, fmt.Errorf("ACME feature requires local cluster 'path' field configuration to be set") + } + + baseUrl, err := url.Parse(cfg.Path) + if err != nil { + return nil, fmt.Errorf("failed parsing URL configured in local cluster 'path' configuration: %s: %s", + cfg.Path, err.Error()) + } + return baseUrl, nil +} + +func getAcmeIssuer(sc *storageContext, issuerName string) (*issuerEntry, error) { + if issuerName == "" { + issuerName = defaultRef + } + issuerId, err := sc.resolveIssuerReference(issuerName) + if err != nil { + return nil, fmt.Errorf("%w: issuer does not exist", ErrMalformed) + } + + issuer, err := sc.fetchIssuerById(issuerId) + if err != nil { + return nil, fmt.Errorf("issuer failed to load: %w", err) + } + + if issuer.Usage.HasUsage(IssuanceUsage) && len(issuer.KeyID) > 0 { + return issuer, nil + } + + return nil, fmt.Errorf("%w: issuer missing proper issuance usage or key", ErrServerInternal) +} + +// getAcmeDirectory return the base acme directory path, without a leading '/' and including +// the trailing /acme/ folder which is the root of all our various directories +func getAcmeDirectory(r *logical.Request) (string, error) { + acmePath := r.Path + if !strings.HasPrefix(acmePath, "/") { + acmePath = "/" + acmePath + } + + lastIndex := strings.LastIndex(acmePath, "/acme/") + if lastIndex == -1 { + return "", fmt.Errorf("%w: unable to determine acme base folder path: %s", ErrServerInternal, acmePath) + } + + // Skip the leading '/' and return our base path with the /acme/ + return strings.TrimLeft(acmePath[0:lastIndex]+"/acme/", "/"), nil +} + +func getAcmeRoleAndIssuer(sc *storageContext, data *framework.FieldData, config *acmeConfigEntry) (*roleEntry, *issuerEntry, error) { + requestedIssuer := getRequestedAcmeIssuerFromPath(data) + requestedRole := getRequestedAcmeRoleFromPath(data) + issuerToLoad := requestedIssuer + + var role *roleEntry + var err error + + if len(requestedRole) == 0 { // Default Directory + policyType, err := getDefaultDirectoryPolicyType(config.DefaultDirectoryPolicy) + if err != nil { + return nil, nil, err + } + switch policyType { + case Forbid: + return nil, nil, fmt.Errorf("%w: default directory not allowed by ACME policy", ErrServerInternal) + case SignVerbatim: + role = buildSignVerbatimRoleWithNoData(&roleEntry{ + Issuer: requestedIssuer, + NoStore: false, + Name: requestedRole, + }) + case Role: + defaultRole, err := getDefaultDirectoryPolicyRole(config.DefaultDirectoryPolicy) + if err != nil { + return nil, nil, err + } + role, err = getAndValidateAcmeRole(sc, defaultRole) + if err != nil { + return nil, nil, err + } + } + } else { // Requested Role + role, err = getAndValidateAcmeRole(sc, requestedRole) + if err != nil { + return nil, nil, err + } + + // Check the Requested Role is Allowed + allowAnyRole := len(config.AllowedRoles) == 1 && config.AllowedRoles[0] == "*" + if !allowAnyRole { + + var foundRole bool + for _, name := range config.AllowedRoles { + if name == role.Name { + foundRole = true + break + } + } + + if !foundRole { + return nil, nil, fmt.Errorf("%w: specified role not allowed by ACME policy", ErrServerInternal) + } + } + + } + + // If we haven't loaded an issuer directly from our path and the specified (or default) + // role does specify an issuer prefer the role's issuer rather than the default issuer. + if len(role.Issuer) > 0 && len(requestedIssuer) == 0 { + issuerToLoad = role.Issuer + } + + issuer, err := getAcmeIssuer(sc, issuerToLoad) + if err != nil { + return nil, nil, err + } + + allowAnyIssuer := len(config.AllowedIssuers) == 1 && config.AllowedIssuers[0] == "*" + if !allowAnyIssuer { + var foundIssuer bool + for index, name := range config.AllowedIssuers { + candidateId, err := sc.resolveIssuerReference(name) + if err != nil { + return nil, nil, fmt.Errorf("failed to resolve reference for allowed_issuer entry %d: %w", index, err) + } + + if candidateId == issuer.ID { + foundIssuer = true + break + } + } + + if !foundIssuer { + return nil, nil, fmt.Errorf("%w: specified issuer not allowed by ACME policy", ErrServerInternal) + } + } + + // If not allowed in configuration, override ExtKeyUsage behavior to force it to only be + // ServerAuth within ACME issued certs + if !config.AllowRoleExtKeyUsage { + role.ExtKeyUsage = []string{"serverauth"} + role.ExtKeyUsageOIDs = []string{} + role.ServerFlag = true + role.ClientFlag = false + role.CodeSigningFlag = false + role.EmailProtectionFlag = false + } + + return role, issuer, nil +} + +func getAndValidateAcmeRole(sc *storageContext, requestedRole string) (*roleEntry, error) { + var err error + role, err := sc.Backend.getRole(sc.Context, sc.Storage, requestedRole) + if err != nil { + return nil, fmt.Errorf("%w: err loading role", ErrServerInternal) + } + + if role == nil { + return nil, fmt.Errorf("%w: role does not exist", ErrMalformed) + } + + if role.NoStore { + return nil, fmt.Errorf("%w: role can not be used as NoStore is set to true", ErrServerInternal) + } + + return role, nil +} + +func getRequestedAcmeRoleFromPath(data *framework.FieldData) string { + requestedRole := "" + roleNameRaw, present := data.GetOk("role") + if present { + requestedRole = roleNameRaw.(string) + } + return requestedRole +} + +func getRequestedAcmeIssuerFromPath(data *framework.FieldData) string { + requestedIssuer := "" + requestedIssuerRaw, present := data.GetOk(issuerRefParam) + if present { + requestedIssuer = requestedIssuerRaw.(string) + } + return requestedIssuer +} + +func isAcmeDisabled(sc *storageContext, config *acmeConfigEntry, policy EabPolicy) bool { + if !config.Enabled { + return true + } + + disableAcme, nonFatalErr := isPublicACMEDisabledByEnv() + if nonFatalErr != nil { + sc.Backend.Logger().Warn(fmt.Sprintf("could not parse env var '%s'", disableAcmeEnvVar), "error", nonFatalErr) + } + + // The OS environment if true will override any configuration option. + if disableAcme { + if policy.OverrideEnvDisablingPublicAcme() { + return false + } + return true + } + + return false +} diff --git a/builtin/logical/pki/acme_wrappers_test.go b/builtin/logical/pki/acme_wrappers_test.go new file mode 100644 index 0000000..4182066 --- /dev/null +++ b/builtin/logical/pki/acme_wrappers_test.go @@ -0,0 +1,125 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/require" +) + +// TestACMEIssuerRoleLoading validates the role and issuer loading logic within the base +// ACME wrapper is correct. +func TestACMEIssuerRoleLoading(t *testing.T) { + b, s := CreateBackendWithStorage(t) + + _, err := CBWrite(b, s, "config/cluster", map[string]interface{}{ + "path": "http://localhost:8200/v1/pki", + "aia_path": "http://localhost:8200/cdn/pki", + }) + require.NoError(t, err) + + _, err = CBWrite(b, s, "config/acme", map[string]interface{}{ + "enabled": true, + }) + require.NoError(t, err) + + _, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "myvault1.com", + "issuer_name": "issuer-1", + "key_type": "ec", + }) + require.NoError(t, err, "failed creating issuer issuer-1") + + _, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "myvault2.com", + "issuer_name": "issuer-2", + "key_type": "ec", + }) + require.NoError(t, err, "failed creating issuer issuer-2") + + _, err = CBWrite(b, s, "roles/role-bad-issuer", map[string]interface{}{ + issuerRefParam: "non-existant", + "no_store": "false", + }) + require.NoError(t, err, "failed creating role role-bad-issuer") + + _, err = CBWrite(b, s, "roles/role-no-store-enabled", map[string]interface{}{ + issuerRefParam: "issuer-2", + "no_store": "true", + }) + require.NoError(t, err, "failed creating role role-no-store-enabled") + + _, err = CBWrite(b, s, "roles/role-issuer-2", map[string]interface{}{ + issuerRefParam: "issuer-2", + "no_store": "false", + }) + require.NoError(t, err, "failed creating role role-issuer-2") + + tc := []struct { + name string + roleName string + issuerName string + expectedIssuerName string + expectErr bool + }{ + {name: "pass-default-use-default", roleName: "", issuerName: "", expectedIssuerName: "issuer-1", expectErr: false}, + {name: "pass-role-issuer-2", roleName: "role-issuer-2", issuerName: "", expectedIssuerName: "issuer-2", expectErr: false}, + {name: "pass-issuer-1-no-role", roleName: "", issuerName: "issuer-1", expectedIssuerName: "issuer-1", expectErr: false}, + {name: "fail-role-has-bad-issuer", roleName: "role-bad-issuer", issuerName: "", expectedIssuerName: "", expectErr: true}, + {name: "fail-role-no-store-enabled", roleName: "role-no-store-enabled", issuerName: "", expectedIssuerName: "", expectErr: true}, + {name: "fail-role-no-store-enabled", roleName: "role-no-store-enabled", issuerName: "", expectedIssuerName: "", expectErr: true}, + {name: "fail-role-does-not-exist", roleName: "non-existant", issuerName: "", expectedIssuerName: "", expectErr: true}, + {name: "fail-issuer-does-not-exist", roleName: "", issuerName: "non-existant", expectedIssuerName: "", expectErr: true}, + } + + for _, tt := range tc { + t.Run(tt.name, func(t *testing.T) { + f := b.acmeWrapper(func(acmeCtx *acmeContext, r *logical.Request, _ *framework.FieldData) (*logical.Response, error) { + if tt.roleName != acmeCtx.role.Name { + return nil, fmt.Errorf("expected role %s but got %s", tt.roleName, acmeCtx.role.Name) + } + + if tt.expectedIssuerName != acmeCtx.issuer.Name { + return nil, fmt.Errorf("expected issuer %s but got %s", tt.expectedIssuerName, acmeCtx.issuer.Name) + } + + return nil, nil + }) + + var acmePath string + fieldRaw := map[string]interface{}{} + if tt.issuerName != "" { + fieldRaw[issuerRefParam] = tt.issuerName + acmePath = "issuer/" + tt.issuerName + "/" + } + if tt.roleName != "" { + fieldRaw["role"] = tt.roleName + acmePath = acmePath + "roles/" + tt.roleName + "/" + } + + acmePath = strings.TrimLeft(acmePath+"/acme/directory", "/") + + resp, err := f(context.Background(), &logical.Request{Path: acmePath, Storage: s}, &framework.FieldData{ + Raw: fieldRaw, + Schema: getCsrSignVerbatimSchemaFields(), + }) + require.NoError(t, err, "all errors should be re-encoded") + + if tt.expectErr { + require.NotEqual(t, 200, resp.Data[logical.HTTPStatusCode]) + require.Equal(t, ErrorContentType, resp.Data[logical.HTTPContentType]) + } else { + if resp != nil { + t.Fatalf("expected no error got %s", string(resp.Data[logical.HTTPRawBody].([]uint8))) + } + } + }) + } +} diff --git a/builtin/logical/pki/backend.go b/builtin/logical/pki/backend.go new file mode 100644 index 0000000..5d525a2 --- /dev/null +++ b/builtin/logical/pki/backend.go @@ -0,0 +1,959 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "context" + "fmt" + "sort" + "strings" + "sync" + "sync/atomic" + "time" + + atomic2 "go.uber.org/atomic" + + "github.com/armon/go-metrics" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/vault/helper/constants" + "github.com/hashicorp/vault/helper/metricsutil" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + operationPrefixPKI = "pki" + operationPrefixPKIIssuer = "pki-issuer" + operationPrefixPKIIssuers = "pki-issuers" + operationPrefixPKIRoot = "pki-root" + + noRole = 0 + roleOptional = 1 + roleRequired = 2 +) + +/* + * PKI requests are a bit special to keep up with the various failure and load issues. + * + * Any requests to write/delete shared data (such as roles, issuers, keys, and configuration) + * are always forwarded to the Primary cluster's active node to write and send the key + * material/config globally across all clusters. Reads should be handled locally, to give a + * sense of where this cluster's replication state is at. + * + * CRL/Revocation and Fetch Certificate APIs are handled by the active node within the cluster + * they originate. This means, if a request comes into a performance secondary cluster, the writes + * will be forwarded to that cluster's active node and not go all the way up to the performance primary's + * active node. + * + * If a certificate issue request has a role in which no_store is set to true, that node itself + * will issue the certificate and not forward the request to the active node, as this does not + * need to write to storage. + * + * Following the same pattern, if a managed key is involved to sign an issued certificate request + * and the local node does not have access for some reason to it, the request will be forwarded to + * the active node within the cluster only. + * + * To make sense of what goes where the following bits need to be analyzed within the codebase. + * + * 1. The backend LocalStorage paths determine what storage paths will remain within a + * cluster and not be forwarded to a performance primary + * 2. Within each path's OperationHandler definition, check to see if ForwardPerformanceStandby & + * ForwardPerformanceSecondary flags are set to short-circuit the request to a given active node + * 3. Within the managed key util class in pki, an initialization failure could cause the request + * to be forwarded to an active node if not already on it. + */ + +// Factory creates a new backend implementing the logical.Backend interface +func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + b := Backend(conf) + if err := b.Setup(ctx, conf); err != nil { + return nil, err + } + return b, nil +} + +// Backend returns a new Backend framework struct +func Backend(conf *logical.BackendConfig) *backend { + var b backend + b.Backend = &framework.Backend{ + Help: strings.TrimSpace(backendHelp), + + PathsSpecial: &logical.Paths{ + Unauthenticated: []string{ + "cert/*", + "ca/pem", + "ca_chain", + "ca", + "crl/delta", + "crl/delta/pem", + "crl/pem", + "crl", + "issuer/+/crl/der", + "issuer/+/crl/pem", + "issuer/+/crl", + "issuer/+/crl/delta/der", + "issuer/+/crl/delta/pem", + "issuer/+/crl/delta", + "issuer/+/unified-crl/der", + "issuer/+/unified-crl/pem", + "issuer/+/unified-crl", + "issuer/+/unified-crl/delta/der", + "issuer/+/unified-crl/delta/pem", + "issuer/+/unified-crl/delta", + "issuer/+/pem", + "issuer/+/der", + "issuer/+/json", + "issuers/", // LIST operations append a '/' to the requested path + "ocsp", // OCSP POST + "ocsp/*", // OCSP GET + "unified-crl/delta", + "unified-crl/delta/pem", + "unified-crl/pem", + "unified-crl", + "unified-ocsp", // Unified OCSP POST + "unified-ocsp/*", // Unified OCSP GET + + // ACME paths are added below + }, + + LocalStorage: []string{ + revokedPath, + localDeltaWALPath, + legacyCRLPath, + clusterConfigPath, + "crls/", + "certs/", + acmePathPrefix, + }, + + Root: []string{ + "root", + "root/sign-self-issued", + }, + + SealWrapStorage: []string{ + legacyCertBundlePath, + legacyCertBundleBackupPath, + keyPrefix, + }, + + WriteForwardedStorage: []string{ + crossRevocationPath, + unifiedRevocationWritePathPrefix, + unifiedDeltaWALPath, + }, + }, + + Paths: []*framework.Path{ + pathListRoles(&b), + pathRoles(&b), + pathGenerateRoot(&b), + pathSignIntermediate(&b), + pathSignSelfIssued(&b), + pathDeleteRoot(&b), + pathGenerateIntermediate(&b), + pathSetSignedIntermediate(&b), + pathConfigCA(&b), + pathConfigCRL(&b), + pathConfigURLs(&b), + pathConfigCluster(&b), + pathSignVerbatim(&b), + pathSign(&b), + pathIssue(&b), + pathRotateCRL(&b), + pathRotateDeltaCRL(&b), + pathRevoke(&b), + pathRevokeWithKey(&b), + pathListCertsRevoked(&b), + pathTidy(&b), + pathTidyCancel(&b), + pathTidyStatus(&b), + pathConfigAutoTidy(&b), + + // Issuer APIs + pathListIssuers(&b), + pathGetIssuer(&b), + pathGetUnauthedIssuer(&b), + pathGetIssuerCRL(&b), + pathImportIssuer(&b), + pathIssuerIssue(&b), + pathIssuerSign(&b), + pathIssuerSignIntermediate(&b), + pathIssuerSignSelfIssued(&b), + pathIssuerSignVerbatim(&b), + pathIssuerGenerateRoot(&b), + pathRotateRoot(&b), + pathIssuerGenerateIntermediate(&b), + pathCrossSignIntermediate(&b), + pathConfigIssuers(&b), + pathReplaceRoot(&b), + pathRevokeIssuer(&b), + + // Key APIs + pathListKeys(&b), + pathKey(&b), + pathGenerateKey(&b), + pathImportKey(&b), + pathConfigKeys(&b), + + // Fetch APIs have been lowered to favor the newer issuer API endpoints + pathFetchCA(&b), + pathFetchCAChain(&b), + pathFetchCRL(&b), + pathFetchCRLViaCertPath(&b), + pathFetchValidRaw(&b), + pathFetchValid(&b), + pathFetchListCerts(&b), + + // OCSP APIs + buildPathOcspGet(&b), + buildPathOcspPost(&b), + + // CRL Signing + pathResignCrls(&b), + pathSignRevocationList(&b), + + // ACME + pathAcmeConfig(&b), + pathAcmeEabList(&b), + pathAcmeEabDelete(&b), + }, + + Secrets: []*framework.Secret{ + secretCerts(&b), + }, + + BackendType: logical.TypeLogical, + InitializeFunc: b.initialize, + Invalidate: b.invalidate, + PeriodicFunc: b.periodicFunc, + Clean: b.cleanup, + } + + // Add ACME paths to backend + var acmePaths []*framework.Path + acmePaths = append(acmePaths, pathAcmeDirectory(&b)...) + acmePaths = append(acmePaths, pathAcmeNonce(&b)...) + acmePaths = append(acmePaths, pathAcmeNewAccount(&b)...) + acmePaths = append(acmePaths, pathAcmeUpdateAccount(&b)...) + acmePaths = append(acmePaths, pathAcmeGetOrder(&b)...) + acmePaths = append(acmePaths, pathAcmeListOrders(&b)...) + acmePaths = append(acmePaths, pathAcmeNewOrder(&b)...) + acmePaths = append(acmePaths, pathAcmeFinalizeOrder(&b)...) + acmePaths = append(acmePaths, pathAcmeFetchOrderCert(&b)...) + acmePaths = append(acmePaths, pathAcmeChallenge(&b)...) + acmePaths = append(acmePaths, pathAcmeAuthorization(&b)...) + acmePaths = append(acmePaths, pathAcmeRevoke(&b)...) + acmePaths = append(acmePaths, pathAcmeNewEab(&b)...) // auth'd API that lives underneath the various /acme paths + + for _, acmePath := range acmePaths { + b.Backend.Paths = append(b.Backend.Paths, acmePath) + } + + // Add specific un-auth'd paths for ACME APIs + for _, acmePrefix := range []string{"", "issuer/+/", "roles/+/", "issuer/+/roles/+/"} { + b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, acmePrefix+"acme/directory") + b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, acmePrefix+"acme/new-nonce") + b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, acmePrefix+"acme/new-account") + b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, acmePrefix+"acme/new-order") + b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, acmePrefix+"acme/revoke-cert") + b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, acmePrefix+"acme/key-change") + b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, acmePrefix+"acme/account/+") + b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, acmePrefix+"acme/authorization/+") + b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, acmePrefix+"acme/challenge/+/+") + b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, acmePrefix+"acme/orders") + b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, acmePrefix+"acme/order/+") + b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, acmePrefix+"acme/order/+/finalize") + b.PathsSpecial.Unauthenticated = append(b.PathsSpecial.Unauthenticated, acmePrefix+"acme/order/+/cert") + // We specifically do NOT add acme/new-eab to this as it should be auth'd + } + + if constants.IsEnterprise { + // Unified CRL/OCSP paths are ENT only + entOnly := []*framework.Path{ + pathGetIssuerUnifiedCRL(&b), + pathListCertsRevocationQueue(&b), + pathListUnifiedRevoked(&b), + pathFetchUnifiedCRL(&b), + buildPathUnifiedOcspGet(&b), + buildPathUnifiedOcspPost(&b), + } + b.Backend.Paths = append(b.Backend.Paths, entOnly...) + } + + b.tidyCASGuard = new(uint32) + b.tidyCancelCAS = new(uint32) + b.tidyStatus = &tidyStatus{state: tidyStatusInactive} + b.storage = conf.StorageView + b.backendUUID = conf.BackendUUID + + b.pkiStorageVersion.Store(0) + + // b isn't yet initialized with SystemView state; calling b.System() will + // result in a nil pointer dereference. Instead query BackendConfig's + // copy of SystemView. + cannotRebuildCRLs := conf.System.ReplicationState().HasState(consts.ReplicationPerformanceStandby) || + conf.System.ReplicationState().HasState(consts.ReplicationDRSecondary) + b.crlBuilder = newCRLBuilder(!cannotRebuildCRLs) + + // Delay the first tidy until after we've started up. + b.lastTidy = time.Now() + + // Metrics initialization for count of certificates in storage + b.certCountEnabled = atomic2.NewBool(false) + b.publishCertCountMetrics = atomic2.NewBool(false) + b.certsCounted = atomic2.NewBool(false) + b.certCountError = "Initialize Not Yet Run, Cert Counts Unavailable" + b.certCount = &atomic.Uint32{} + b.revokedCertCount = &atomic.Uint32{} + b.possibleDoubleCountedSerials = make([]string, 0, 250) + b.possibleDoubleCountedRevokedSerials = make([]string, 0, 250) + + b.unifiedTransferStatus = newUnifiedTransferStatus() + + b.acmeState = NewACMEState() + return &b +} + +type backend struct { + *framework.Backend + + backendUUID string + storage logical.Storage + revokeStorageLock sync.RWMutex + tidyCASGuard *uint32 + tidyCancelCAS *uint32 + + tidyStatusLock sync.RWMutex + tidyStatus *tidyStatus + lastTidy time.Time + + unifiedTransferStatus *unifiedTransferStatus + + certCountEnabled *atomic2.Bool + publishCertCountMetrics *atomic2.Bool + certCount *atomic.Uint32 + revokedCertCount *atomic.Uint32 + certsCounted *atomic2.Bool + certCountError string + possibleDoubleCountedSerials []string + possibleDoubleCountedRevokedSerials []string + + pkiStorageVersion atomic.Value + crlBuilder *crlBuilder + + // Write lock around issuers and keys. + issuersLock sync.RWMutex + + // Context around ACME operations + acmeState *acmeState + acmeAccountLock sync.RWMutex // (Write) Locked on Tidy, (Read) Locked on Account Creation + // TODO: Stress test this - eg. creating an order while an account is being revoked +} + +type roleOperation func(ctx context.Context, req *logical.Request, data *framework.FieldData, role *roleEntry) (*logical.Response, error) + +const backendHelp = ` +The PKI backend dynamically generates X509 server and client certificates. + +After mounting this backend, configure the CA using the "pem_bundle" endpoint within +the "config/" path. +` + +func metricsKey(req *logical.Request, extra ...string) []string { + if req == nil || req.MountPoint == "" { + return extra + } + key := make([]string, len(extra)+1) + key[0] = req.MountPoint[:len(req.MountPoint)-1] + copy(key[1:], extra) + return key +} + +func (b *backend) metricsWrap(callType string, roleMode int, ofunc roleOperation) framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + key := metricsKey(req, callType) + var role *roleEntry + var labels []metrics.Label + var err error + + var roleName string + switch roleMode { + case roleRequired: + roleName = data.Get("role").(string) + case roleOptional: + r, ok := data.GetOk("role") + if ok { + roleName = r.(string) + } + } + if roleMode > noRole { + // Get the role + role, err = b.getRole(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil && (roleMode == roleRequired || len(roleName) > 0) { + return logical.ErrorResponse(fmt.Sprintf("unknown role: %s", roleName)), nil + } + labels = []metrics.Label{{"role", roleName}} + } + + ns, err := namespace.FromContext(ctx) + if err == nil { + labels = append(labels, metricsutil.NamespaceLabel(ns)) + } + + start := time.Now() + defer metrics.MeasureSinceWithLabels(key, start, labels) + resp, err := ofunc(ctx, req, data, role) + + if err != nil || resp.IsError() { + metrics.IncrCounterWithLabels(append(key, "failure"), 1.0, labels) + } else { + metrics.IncrCounterWithLabels(key, 1.0, labels) + } + return resp, err + } +} + +// initialize is used to perform a possible PKI storage migration if needed +func (b *backend) initialize(ctx context.Context, _ *logical.InitializationRequest) error { + sc := b.makeStorageContext(ctx, b.storage) + if err := b.crlBuilder.reloadConfigIfRequired(sc); err != nil { + return err + } + + err := b.initializePKIIssuersStorage(ctx) + if err != nil { + return err + } + + err = b.acmeState.Initialize(b, sc) + if err != nil { + return err + } + + // Initialize also needs to populate our certificate and revoked certificate count + err = b.initializeStoredCertificateCounts(ctx) + if err != nil { + // Don't block/err initialize/startup for metrics. Context on this call can time out due to number of certificates. + b.Logger().Error("Could not initialize stored certificate counts", "error", err) + b.certCountError = err.Error() + } + + return nil +} + +func (b *backend) cleanup(_ context.Context) { + b.acmeState.Shutdown(b) +} + +func (b *backend) initializePKIIssuersStorage(ctx context.Context) error { + // Grab the lock prior to the updating of the storage lock preventing us flipping + // the storage flag midway through the request stream of other requests. + b.issuersLock.Lock() + defer b.issuersLock.Unlock() + + // Load up our current pki storage state, no matter the host type we are on. + b.updatePkiStorageVersion(ctx, false) + + // Early exit if not a primary cluster or performance secondary with a local mount. + if b.System().ReplicationState().HasState(consts.ReplicationDRSecondary|consts.ReplicationPerformanceStandby) || + (!b.System().LocalMount() && b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) { + b.Logger().Debug("skipping PKI migration as we are not on primary or secondary with a local mount") + return nil + } + + if err := migrateStorage(ctx, b, b.storage); err != nil { + b.Logger().Error("Error during migration of PKI mount: " + err.Error()) + return err + } + + b.updatePkiStorageVersion(ctx, false) + + return nil +} + +func (b *backend) useLegacyBundleCaStorage() bool { + // This helper function is here to choose whether or not we use the newer + // issuer/key storage format or the older legacy ca bundle format. + // + // This happens because we might've upgraded secondary PR clusters to + // newer vault code versions. We still want to be able to service requests + // with the old bundle format (e.g., issuing and revoking certs), until + // the primary cluster's active node is upgraded to the newer Vault version + // and the storage is migrated to the new format. + version := b.pkiStorageVersion.Load() + return version == nil || version == 0 +} + +func (b *backend) updatePkiStorageVersion(ctx context.Context, grabIssuersLock bool) { + info, err := getMigrationInfo(ctx, b.storage) + if err != nil { + b.Logger().Error(fmt.Sprintf("Failed loading PKI migration status, staying in legacy mode: %v", err)) + return + } + + // If this method is called outside the initialize function, like say an + // invalidate func on a performance replica cluster, we should be grabbing + // the issuers lock to offer a consistent view of the storage version while + // other events are processing things. Its unknown what might happen during + // a single event if one part thinks we are in legacy mode, and then later + // on we aren't. + if grabIssuersLock { + b.issuersLock.Lock() + defer b.issuersLock.Unlock() + } + + if info.isRequired { + b.pkiStorageVersion.Store(0) + } else { + b.pkiStorageVersion.Store(1) + } +} + +func (b *backend) invalidate(ctx context.Context, key string) { + isNotPerfPrimary := b.System().ReplicationState().HasState(consts.ReplicationDRSecondary|consts.ReplicationPerformanceStandby) || + (!b.System().LocalMount() && b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) + + switch { + case strings.HasPrefix(key, legacyMigrationBundleLogKey): + // This is for a secondary cluster to pick up that the migration has completed + // and reset its compatibility mode and rebuild the CRL locally. Kick it off + // as a go routine to not block this call due to the lock grabbing + // within updatePkiStorageVersion. + go func() { + b.Logger().Info("Detected a migration completed, resetting pki storage version") + b.updatePkiStorageVersion(ctx, true) + b.crlBuilder.requestRebuildIfActiveNode(b) + }() + case strings.HasPrefix(key, issuerPrefix): + if !b.useLegacyBundleCaStorage() { + // See note in updateDefaultIssuerId about why this is necessary. + // We do this ahead of CRL rebuilding just so we know that things + // are stale. + b.crlBuilder.invalidateCRLBuildTime() + + // If an issuer has changed on the primary, we need to schedule an update of our CRL, + // the primary cluster would have done it already, but the CRL is cluster specific so + // force a rebuild of ours. + b.crlBuilder.requestRebuildIfActiveNode(b) + } else { + b.Logger().Debug("Ignoring invalidation updates for issuer as the PKI migration has yet to complete.") + } + case key == "config/crl": + // We may need to reload our OCSP status flag + b.crlBuilder.markConfigDirty() + case key == storageAcmeConfig: + b.acmeState.markConfigDirty() + case key == storageIssuerConfig: + b.crlBuilder.invalidateCRLBuildTime() + case strings.HasPrefix(key, crossRevocationPrefix): + split := strings.Split(key, "/") + + if !strings.HasSuffix(key, "/confirmed") { + cluster := split[len(split)-2] + serial := split[len(split)-1] + b.crlBuilder.addCertForRevocationCheck(cluster, serial) + } else { + if len(split) >= 3 { + cluster := split[len(split)-3] + serial := split[len(split)-2] + // Only process confirmations on the perf primary. The + // performance secondaries cannot remove other clusters' + // entries, and so do not need to track them (only to + // ignore them). On performance primary nodes though, + // we do want to track them to remove them. + if !isNotPerfPrimary { + b.crlBuilder.addCertForRevocationRemoval(cluster, serial) + } + } + } + case strings.HasPrefix(key, unifiedRevocationReadPathPrefix): + // Three parts to this key: prefix, cluster, and serial. + split := strings.Split(key, "/") + cluster := split[len(split)-2] + serial := split[len(split)-1] + b.crlBuilder.addCertFromCrossRevocation(cluster, serial) + } +} + +func (b *backend) periodicFunc(ctx context.Context, request *logical.Request) error { + sc := b.makeStorageContext(ctx, request.Storage) + + doCRL := func() error { + // First attempt to reload the CRL configuration. + if err := b.crlBuilder.reloadConfigIfRequired(sc); err != nil { + return err + } + + // As we're (below) modifying the backing storage, we need to ensure + // we're not on a standby/secondary node. + if b.System().ReplicationState().HasState(consts.ReplicationPerformanceStandby) || + b.System().ReplicationState().HasState(consts.ReplicationDRSecondary) { + return nil + } + + // First handle any global revocation queue entries. + if err := b.crlBuilder.processRevocationQueue(sc); err != nil { + return err + } + + // Then handle any unified cross-cluster revocations. + if err := b.crlBuilder.processCrossClusterRevocations(sc); err != nil { + return err + } + + // Check if we're set to auto rebuild and a CRL is set to expire. + if err := b.crlBuilder.checkForAutoRebuild(sc); err != nil { + return err + } + + // Then attempt to rebuild the CRLs if required. + warnings, err := b.crlBuilder.rebuildIfForced(sc) + if err != nil { + return err + } + if len(warnings) > 0 { + msg := "During rebuild of complete CRL, got the following warnings:" + for index, warning := range warnings { + msg = fmt.Sprintf("%v\n %d. %v", msg, index+1, warning) + } + b.Logger().Warn(msg) + } + + // If a delta CRL was rebuilt above as part of the complete CRL rebuild, + // this will be a no-op. However, if we do need to rebuild delta CRLs, + // this would cause us to do so. + warnings, err = b.crlBuilder.rebuildDeltaCRLsIfForced(sc, false) + if err != nil { + return err + } + if len(warnings) > 0 { + msg := "During rebuild of delta CRL, got the following warnings:" + for index, warning := range warnings { + msg = fmt.Sprintf("%v\n %d. %v", msg, index+1, warning) + } + b.Logger().Warn(msg) + } + + return nil + } + + doAutoTidy := func() error { + // As we're (below) modifying the backing storage, we need to ensure + // we're not on a standby/secondary node. + if b.System().ReplicationState().HasState(consts.ReplicationPerformanceStandby) || + b.System().ReplicationState().HasState(consts.ReplicationDRSecondary) { + return nil + } + + config, err := sc.getAutoTidyConfig() + if err != nil { + return err + } + + if !config.Enabled || config.Interval <= 0*time.Second { + return nil + } + + // Check if we should run another tidy... + now := time.Now() + b.tidyStatusLock.RLock() + nextOp := b.lastTidy.Add(config.Interval) + b.tidyStatusLock.RUnlock() + if now.Before(nextOp) { + return nil + } + + // Ensure a tidy isn't already running... If it is, we'll trigger + // again when the running one finishes. + if !atomic.CompareAndSwapUint32(b.tidyCASGuard, 0, 1) { + return nil + } + + // Prevent ourselves from starting another tidy operation while + // this one is still running. This operation runs in the background + // and has a separate error reporting mechanism. + b.tidyStatusLock.Lock() + b.lastTidy = now + b.tidyStatusLock.Unlock() + + // Because the request from the parent storage will be cleared at + // some point (and potentially reused) -- due to tidy executing in + // a background goroutine -- we need to copy the storage entry off + // of the backend instead. + backendReq := &logical.Request{ + Storage: b.storage, + } + + b.startTidyOperation(backendReq, config) + return nil + } + + // First tidy any ACME nonces to free memory. + b.acmeState.DoTidyNonces() + + // Then run unified transfer. + backgroundSc := b.makeStorageContext(context.Background(), b.storage) + go runUnifiedTransfer(backgroundSc) + + // Then run the CRL rebuild and tidy operation. + crlErr := doCRL() + tidyErr := doAutoTidy() + + // Periodically re-emit gauges so that they don't disappear/go stale + tidyConfig, err := sc.getAutoTidyConfig() + if err != nil { + return err + } + b.emitCertStoreMetrics(tidyConfig) + + var errors error + if crlErr != nil { + errors = multierror.Append(errors, fmt.Errorf("Error building CRLs:\n - %w\n", crlErr)) + } + + if tidyErr != nil { + errors = multierror.Append(errors, fmt.Errorf("Error running auto-tidy:\n - %w\n", tidyErr)) + } + + if errors != nil { + return errors + } + + // Check if the CRL was invalidated due to issuer swap and update + // accordingly. + if err := b.crlBuilder.flushCRLBuildTimeInvalidation(sc); err != nil { + return err + } + + // All good! + return nil +} + +func (b *backend) initializeStoredCertificateCounts(ctx context.Context) error { + // For performance reasons, we can't lock on issuance/storage of certs until a list operation completes, + // but we want to limit possible miscounts / double-counts to over-counting, so we take the tidy lock which + // prevents (most) deletions - in particular we take a read lock (sufficient to block the write lock in + // tidyStatusStart while allowing tidy to still acquire a read lock to report via its endpoint) + b.tidyStatusLock.RLock() + defer b.tidyStatusLock.RUnlock() + sc := b.makeStorageContext(ctx, b.storage) + config, err := sc.getAutoTidyConfig() + if err != nil { + return err + } + + b.certCountEnabled.Store(config.MaintainCount) + b.publishCertCountMetrics.Store(config.PublishMetrics) + + if config.MaintainCount == false { + b.possibleDoubleCountedRevokedSerials = nil + b.possibleDoubleCountedSerials = nil + b.certsCounted.Store(true) + b.certCount.Store(0) + b.revokedCertCount.Store(0) + b.certCountError = "Cert Count is Disabled: enable via Tidy Config maintain_stored_certificate_counts" + return nil + } + + // Ideally these three things would be set in one transaction, since that isn't possible, set the counts to "0", + // first, so count will over-count (and miss putting things in deduplicate queue), rather than under-count. + b.certCount.Store(0) + b.revokedCertCount.Store(0) + b.possibleDoubleCountedRevokedSerials = nil + b.possibleDoubleCountedSerials = nil + // A cert issued or revoked here will be double-counted. That's okay, this is "best effort" metrics. + b.certsCounted.Store(false) + + entries, err := b.storage.List(ctx, "certs/") + if err != nil { + return err + } + b.certCount.Add(uint32(len(entries))) + + revokedEntries, err := b.storage.List(ctx, "revoked/") + if err != nil { + return err + } + b.revokedCertCount.Add(uint32(len(revokedEntries))) + + b.certsCounted.Store(true) + // Now that the metrics are set, we can switch from appending newly-stored certificates to the possible double-count + // list, and instead have them update the counter directly. We need to do this so that we are looking at a static + // slice of possibly double counted serials. Note that certsCounted is computed before the storage operation, so + // there may be some delay here. + + // Sort the listed-entries first, to accommodate that delay. + sort.Slice(entries, func(i, j int) bool { + return entries[i] < entries[j] + }) + + sort.Slice(revokedEntries, func(i, j int) bool { + return revokedEntries[i] < revokedEntries[j] + }) + + // We assume here that these lists are now complete. + sort.Slice(b.possibleDoubleCountedSerials, func(i, j int) bool { + return b.possibleDoubleCountedSerials[i] < b.possibleDoubleCountedSerials[j] + }) + + listEntriesIndex := 0 + possibleDoubleCountIndex := 0 + for { + if listEntriesIndex >= len(entries) { + break + } + if possibleDoubleCountIndex >= len(b.possibleDoubleCountedSerials) { + break + } + if entries[listEntriesIndex] == b.possibleDoubleCountedSerials[possibleDoubleCountIndex] { + // This represents a double-counted entry + b.decrementTotalCertificatesCountNoReport() + listEntriesIndex = listEntriesIndex + 1 + possibleDoubleCountIndex = possibleDoubleCountIndex + 1 + continue + } + if entries[listEntriesIndex] < b.possibleDoubleCountedSerials[possibleDoubleCountIndex] { + listEntriesIndex = listEntriesIndex + 1 + continue + } + if entries[listEntriesIndex] > b.possibleDoubleCountedSerials[possibleDoubleCountIndex] { + possibleDoubleCountIndex = possibleDoubleCountIndex + 1 + continue + } + } + + sort.Slice(b.possibleDoubleCountedRevokedSerials, func(i, j int) bool { + return b.possibleDoubleCountedRevokedSerials[i] < b.possibleDoubleCountedRevokedSerials[j] + }) + + listRevokedEntriesIndex := 0 + possibleRevokedDoubleCountIndex := 0 + for { + if listRevokedEntriesIndex >= len(revokedEntries) { + break + } + if possibleRevokedDoubleCountIndex >= len(b.possibleDoubleCountedRevokedSerials) { + break + } + if revokedEntries[listRevokedEntriesIndex] == b.possibleDoubleCountedRevokedSerials[possibleRevokedDoubleCountIndex] { + // This represents a double-counted revoked entry + b.decrementTotalRevokedCertificatesCountNoReport() + listRevokedEntriesIndex = listRevokedEntriesIndex + 1 + possibleRevokedDoubleCountIndex = possibleRevokedDoubleCountIndex + 1 + continue + } + if revokedEntries[listRevokedEntriesIndex] < b.possibleDoubleCountedRevokedSerials[possibleRevokedDoubleCountIndex] { + listRevokedEntriesIndex = listRevokedEntriesIndex + 1 + continue + } + if revokedEntries[listRevokedEntriesIndex] > b.possibleDoubleCountedRevokedSerials[possibleRevokedDoubleCountIndex] { + possibleRevokedDoubleCountIndex = possibleRevokedDoubleCountIndex + 1 + continue + } + } + + b.possibleDoubleCountedRevokedSerials = nil + b.possibleDoubleCountedSerials = nil + + b.emitCertStoreMetrics(config) + + b.certCountError = "" + + return nil +} + +func (b *backend) emitCertStoreMetrics(config *tidyConfig) { + if config.PublishMetrics == true { + certCount := b.certCount.Load() + b.emitTotalCertCountMetric(certCount) + revokedCertCount := b.revokedCertCount.Load() + b.emitTotalRevokedCountMetric(revokedCertCount) + } +} + +// The "certsCounted" boolean here should be loaded from the backend certsCounted before the corresponding storage call: +// eg. certsCounted := b.certsCounted.Load() +func (b *backend) ifCountEnabledIncrementTotalCertificatesCount(certsCounted bool, newSerial string) { + if b.certCountEnabled.Load() { + certCount := b.certCount.Add(1) + switch { + case !certsCounted: + // This is unsafe, but a good best-attempt + if strings.HasPrefix(newSerial, "certs/") { + newSerial = newSerial[6:] + } + b.possibleDoubleCountedSerials = append(b.possibleDoubleCountedSerials, newSerial) + default: + if b.publishCertCountMetrics.Load() { + b.emitTotalCertCountMetric(certCount) + } + } + } +} + +func (b *backend) ifCountEnabledDecrementTotalCertificatesCountReport() { + if b.certCountEnabled.Load() { + certCount := b.decrementTotalCertificatesCountNoReport() + if b.publishCertCountMetrics.Load() { + b.emitTotalCertCountMetric(certCount) + } + } +} + +func (b *backend) emitTotalCertCountMetric(certCount uint32) { + metrics.SetGauge([]string{"secrets", "pki", b.backendUUID, "total_certificates_stored"}, float32(certCount)) +} + +// Called directly only by the initialize function to deduplicate the count, when we don't have a full count yet +// Does not respect whether-we-are-counting backend information. +func (b *backend) decrementTotalCertificatesCountNoReport() uint32 { + newCount := b.certCount.Add(^uint32(0)) + return newCount +} + +// The "certsCounted" boolean here should be loaded from the backend certsCounted before the corresponding storage call: +// eg. certsCounted := b.certsCounted.Load() +func (b *backend) ifCountEnabledIncrementTotalRevokedCertificatesCount(certsCounted bool, newSerial string) { + if b.certCountEnabled.Load() { + newRevokedCertCount := b.revokedCertCount.Add(1) + switch { + case !certsCounted: + // This is unsafe, but a good best-attempt + if strings.HasPrefix(newSerial, "revoked/") { // allow passing in the path (revoked/serial) OR the serial + newSerial = newSerial[8:] + } + b.possibleDoubleCountedRevokedSerials = append(b.possibleDoubleCountedRevokedSerials, newSerial) + default: + if b.publishCertCountMetrics.Load() { + b.emitTotalRevokedCountMetric(newRevokedCertCount) + } + } + } +} + +func (b *backend) ifCountEnabledDecrementTotalRevokedCertificatesCountReport() { + if b.certCountEnabled.Load() { + revokedCertCount := b.decrementTotalRevokedCertificatesCountNoReport() + if b.publishCertCountMetrics.Load() { + b.emitTotalRevokedCountMetric(revokedCertCount) + } + } +} + +func (b *backend) emitTotalRevokedCountMetric(revokedCertCount uint32) { + metrics.SetGauge([]string{"secrets", "pki", b.backendUUID, "total_revoked_certificates_stored"}, float32(revokedCertCount)) +} + +// Called directly only by the initialize function to deduplicate the count, when we don't have a full count yet +// Does not respect whether-we-are-counting backend information. +func (b *backend) decrementTotalRevokedCertificatesCountNoReport() uint32 { + newRevokedCertCount := b.revokedCertCount.Add(^uint32(0)) + return newRevokedCertCount +} diff --git a/builtin/logical/pki/backend_test.go b/builtin/logical/pki/backend_test.go new file mode 100644 index 0000000..f4e53d1 --- /dev/null +++ b/builtin/logical/pki/backend_test.go @@ -0,0 +1,7139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "bytes" + "context" + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/base64" + "encoding/hex" + "encoding/json" + "encoding/pem" + "fmt" + "math" + "math/big" + mathrand "math/rand" + "net" + "net/url" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "testing" + "time" + + "github.com/hashicorp/vault/helper/testhelpers/teststorage" + + "github.com/hashicorp/vault/helper/testhelpers" + + "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" + + "github.com/stretchr/testify/require" + + "github.com/armon/go-metrics" + "github.com/fatih/structs" + "github.com/go-test/deep" + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/api" + auth "github.com/hashicorp/vault/api/auth/userpass" + "github.com/hashicorp/vault/builtin/credential/userpass" + logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" + "github.com/mitchellh/mapstructure" + "golang.org/x/net/idna" +) + +var stepCount = 0 + +// From builtin/credential/cert/test-fixtures/root/rootcacert.pem +const ( + rootCACertPEM = `-----BEGIN CERTIFICATE----- +MIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw +MjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7 +Q7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0 +z2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x +AHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb +6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH +SWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G +A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx +7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc +BgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA +wHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2 +U946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa +cNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N +ScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ +t2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk +zehNe5dFTjFpylg1o6b8Ow== +-----END CERTIFICATE-----` + rootCAKeyPEM = `-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEA7FMy+FO4hFzZiHFmmY/B6A/zpyCep9PWZfGLUDtDuprHbg2p +t/TQjegMxC0AmWPZEHeG9FIJvT+WQXuLUG5v5MbG4zs21mqnYXwWAbTPaZ35bpp3 +BbbquXFtAqOZG8yfrob1g9OSgmY+bG3ZNxxv35qmbBbuWyUvmPwfjzEAclMxv48w +/2Rs4dXkBuvc9PiNdQ9Sv+ZYG8GIqIcbRd38cSaXI4Q94BOHEr4jm1vqb54H7twv +0Gy9CnLXfn7ZtEDFSmG8WLk2GvIna+UC+gnxSqKCw0rcTby63rQUCgdJZF8VQUVi +18+BMNLXI4pT/P9cxTaCJC/YeuV5a3SaniOoFQIDAQABAoIBAQCoGZJC84JnnIgb +ttZNWuWKBXbCJcDVDikOQJ9hBZbqsFg1X0CfGmQS3MHf9Ubc1Ro8zVjQh15oIEfn +8lIpdzTeXcpxLdiW8ix3ekVJF20F6pnXY8ZP6UnTeOwamXY6QPZAtb0D9UXcvY+f +nw+IVRD6082XS0Rmzu+peYWVXDy+FDN+HJRANBcdJZz8gOmNBIe0qDWx1b85d/s8 +2Kk1Wwdss1IwAGeSddTSwzBNaaHdItZaMZOqPW1gRyBfVSkcUQIE6zn2RKw2b70t +grkIvyRcTdfmiKbqkkJ+eR+ITOUt0cBZSH4cDjlQA+r7hulvoBpQBRj068Toxkcc +bTagHaPBAoGBAPWPGVkHqhTbJ/DjmqDIStxby2M1fhhHt4xUGHinhUYjQjGOtDQ9 +0mfaB7HObudRiSLydRAVGAHGyNJdQcTeFxeQbovwGiYKfZSA1IGpea7dTxPpGEdN +ksA0pzSp9MfKzX/MdLuAkEtO58aAg5YzsgX9hDNxo4MhH/gremZhEGZlAoGBAPZf +lqdYvAL0fjHGJ1FUEalhzGCGE9PH2iOqsxqLCXK7bDbzYSjvuiHkhYJHAOgVdiW1 +lB34UHHYAqZ1VVoFqJ05gax6DE2+r7K5VV3FUCaC0Zm3pavxchU9R/TKP82xRrBj +AFWwdgDTxUyvQEmgPR9sqorftO71Iz2tiwyTpIfxAoGBAIhEMLzHFAse0rtKkrRG +ccR27BbRyHeQ1Lp6sFnEHKEfT8xQdI/I/snCpCJ3e/PBu2g5Q9z416mktiyGs8ib +thTNgYsGYnxZtfaCx2pssanoBcn2wBJRae5fSapf5gY49HDG9MBYR7qCvvvYtSzU +4yWP2ZzyotpRt3vwJKxLkN5BAoGAORHpZvhiDNkvxj3da7Rqpu7VleJZA2y+9hYb +iOF+HcqWhaAY+I+XcTRrTMM/zYLzLEcEeXDEyao86uwxCjpXVZw1kotvAC9UqbTO +tnr3VwRkoxPsV4kFYTAh0+1pnC8dbcxxDmhi3Uww3tOVs7hfkEDuvF6XnebA9A+Y +LyCgMzECgYEA6cCU8QODOivIKWFRXucvWckgE6MYDBaAwe6qcLsd1Q/gpE2e3yQc +4RB3bcyiPROLzMLlXFxf1vSNJQdIaVfrRv+zJeGIiivLPU8+Eq4Lrb+tl1LepcOX +OzQeADTSCn5VidOfjDkIst9UXjMlrFfV9/oJEw5Eiqa6lkNPCGDhfA8= +-----END RSA PRIVATE KEY-----` +) + +func TestPKI_RequireCN(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "myvault.com", + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected ca info") + } + + // Create a role which does require CN (default) + _, err = CBWrite(b, s, "roles/example", map[string]interface{}{ + "allowed_domains": "foobar.com,zipzap.com,abc.com,xyz.com", + "allow_bare_domains": true, + "allow_subdomains": true, + "max_ttl": "2h", + }) + if err != nil { + t.Fatal(err) + } + + // Issue a cert with require_cn set to true and with common name supplied. + // It should succeed. + resp, err = CBWrite(b, s, "issue/example", map[string]interface{}{ + "common_name": "foobar.com", + }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("issue/example"), logical.UpdateOperation), resp, true) + if err != nil { + t.Fatal(err) + } + + // Issue a cert with require_cn set to true and with out supplying the + // common name. It should error out. + _, err = CBWrite(b, s, "issue/example", map[string]interface{}{}) + if err == nil { + t.Fatalf("expected an error due to missing common_name") + } + + // Modify the role to make the common name optional + _, err = CBWrite(b, s, "roles/example", map[string]interface{}{ + "allowed_domains": "foobar.com,zipzap.com,abc.com,xyz.com", + "allow_bare_domains": true, + "allow_subdomains": true, + "max_ttl": "2h", + "require_cn": false, + }) + if err != nil { + t.Fatal(err) + } + + // Issue a cert with require_cn set to false and without supplying the + // common name. It should succeed. + resp, err = CBWrite(b, s, "issue/example", map[string]interface{}{}) + if err != nil { + t.Fatal(err) + } + + if resp.Data["certificate"] == "" { + t.Fatalf("expected a cert to be generated") + } + + // Issue a cert with require_cn set to false and with a common name. It + // should succeed. + resp, err = CBWrite(b, s, "issue/example", map[string]interface{}{}) + if err != nil { + t.Fatal(err) + } + + if resp.Data["certificate"] == "" { + t.Fatalf("expected a cert to be generated") + } +} + +func TestPKI_DeviceCert(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "myvault.com", + "not_after": "9999-12-31T23:59:59Z", + "not_before_duration": "2h", + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected ca info") + } + var certBundle certutil.CertBundle + err = mapstructure.Decode(resp.Data, &certBundle) + if err != nil { + t.Fatal(err) + } + + parsedCertBundle, err := certBundle.ToParsedCertBundle() + if err != nil { + t.Fatal(err) + } + cert := parsedCertBundle.Certificate + notAfter := cert.NotAfter.Format(time.RFC3339) + if notAfter != "9999-12-31T23:59:59Z" { + t.Fatalf("not after from certificate: %v is not matching with input parameter: %v", cert.NotAfter, "9999-12-31T23:59:59Z") + } + if math.Abs(float64(time.Now().Add(-2*time.Hour).Unix()-cert.NotBefore.Unix())) > 10 { + t.Fatalf("root/generate/internal did not properly set validity period (notBefore): was %v vs expected %v", cert.NotBefore, time.Now().Add(-2*time.Hour)) + } + + // Create a role which does require CN (default) + _, err = CBWrite(b, s, "roles/example", map[string]interface{}{ + "allowed_domains": "foobar.com,zipzap.com,abc.com,xyz.com", + "allow_bare_domains": true, + "allow_subdomains": true, + "not_after": "9999-12-31T23:59:59Z", + }) + if err != nil { + t.Fatal(err) + } + + // Issue a cert with require_cn set to true and with common name supplied. + // It should succeed. + resp, err = CBWrite(b, s, "issue/example", map[string]interface{}{ + "common_name": "foobar.com", + }) + if err != nil { + t.Fatal(err) + } + err = mapstructure.Decode(resp.Data, &certBundle) + if err != nil { + t.Fatal(err) + } + + parsedCertBundle, err = certBundle.ToParsedCertBundle() + if err != nil { + t.Fatal(err) + } + cert = parsedCertBundle.Certificate + notAfter = cert.NotAfter.Format(time.RFC3339) + if notAfter != "9999-12-31T23:59:59Z" { + t.Fatal(fmt.Errorf("not after from certificate is not matching with input parameter")) + } +} + +func TestBackend_InvalidParameter(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + _, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "myvault.com", + "not_after": "9999-12-31T23:59:59Z", + "ttl": "25h", + }) + if err == nil { + t.Fatal(err) + } + + _, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "myvault.com", + "not_after": "9999-12-31T23:59:59", + }) + if err == nil { + t.Fatal(err) + } +} + +func TestBackend_CSRValues(t *testing.T) { + t.Parallel() + initTest.Do(setCerts) + b, _ := CreateBackendWithStorage(t) + + testCase := logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{}, + } + + intdata := map[string]interface{}{} + reqdata := map[string]interface{}{} + testCase.Steps = append(testCase.Steps, generateCSRSteps(t, ecCACert, ecCAKey, intdata, reqdata)...) + + logicaltest.Test(t, testCase) +} + +func TestBackend_URLsCRUD(t *testing.T) { + t.Parallel() + initTest.Do(setCerts) + b, _ := CreateBackendWithStorage(t) + + testCase := logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{}, + } + + intdata := map[string]interface{}{} + reqdata := map[string]interface{}{} + testCase.Steps = append(testCase.Steps, generateURLSteps(t, ecCACert, ecCAKey, intdata, reqdata)...) + + logicaltest.Test(t, testCase) +} + +// Generates and tests steps that walk through the various possibilities +// of role flags to ensure that they are properly restricted +func TestBackend_Roles(t *testing.T) { + t.Parallel() + cases := []struct { + name string + key, cert *string + useCSR bool + }{ + {"RSA", &rsaCAKey, &rsaCACert, false}, + {"RSACSR", &rsaCAKey, &rsaCACert, true}, + {"EC", &ecCAKey, &ecCACert, false}, + {"ECCSR", &ecCAKey, &ecCACert, true}, + {"ED", &edCAKey, &edCACert, false}, + {"EDCSR", &edCAKey, &edCACert, true}, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + initTest.Do(setCerts) + b, _ := CreateBackendWithStorage(t) + + testCase := logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + { + Operation: logical.UpdateOperation, + Path: "config/ca", + Data: map[string]interface{}{ + "pem_bundle": *tc.key + "\n" + *tc.cert, + }, + }, + }, + } + + testCase.Steps = append(testCase.Steps, generateRoleSteps(t, tc.useCSR)...) + if len(os.Getenv("VAULT_VERBOSE_PKITESTS")) > 0 { + for i, v := range testCase.Steps { + data := map[string]interface{}{} + var keys []string + for k := range v.Data { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + interf := v.Data[k] + switch v := interf.(type) { + case bool: + if !v { + continue + } + case int: + if v == 0 { + continue + } + case []string: + if len(v) == 0 { + continue + } + case string: + if v == "" { + continue + } + lines := strings.Split(v, "\n") + if len(lines) > 1 { + data[k] = lines[0] + " ... (truncated)" + continue + } + } + data[k] = interf + + } + t.Logf("Step %d:\n%s %s err=%v %+v\n\n", i+1, v.Operation, v.Path, v.ErrorOk, data) + } + } + + logicaltest.Test(t, testCase) + }) + } +} + +// Performs some validity checking on the returned bundles +func checkCertsAndPrivateKey(keyType string, key crypto.Signer, usage x509.KeyUsage, extUsage x509.ExtKeyUsage, validity time.Duration, certBundle *certutil.CertBundle) (*certutil.ParsedCertBundle, error) { + parsedCertBundle, err := certBundle.ToParsedCertBundle() + if err != nil { + return nil, fmt.Errorf("error parsing cert bundle: %s", err) + } + + if key != nil { + switch keyType { + case "rsa": + parsedCertBundle.PrivateKeyType = certutil.RSAPrivateKey + parsedCertBundle.PrivateKey = key + parsedCertBundle.PrivateKeyBytes = x509.MarshalPKCS1PrivateKey(key.(*rsa.PrivateKey)) + case "ec": + parsedCertBundle.PrivateKeyType = certutil.ECPrivateKey + parsedCertBundle.PrivateKey = key + parsedCertBundle.PrivateKeyBytes, err = x509.MarshalECPrivateKey(key.(*ecdsa.PrivateKey)) + if err != nil { + return nil, fmt.Errorf("error parsing EC key: %s", err) + } + case "ed25519": + parsedCertBundle.PrivateKeyType = certutil.Ed25519PrivateKey + parsedCertBundle.PrivateKey = key + parsedCertBundle.PrivateKeyBytes, err = x509.MarshalPKCS8PrivateKey(key.(ed25519.PrivateKey)) + if err != nil { + return nil, fmt.Errorf("error parsing Ed25519 key: %s", err) + } + } + } + + switch { + case parsedCertBundle.Certificate == nil: + return nil, fmt.Errorf("did not find a certificate in the cert bundle") + case len(parsedCertBundle.CAChain) == 0 || parsedCertBundle.CAChain[0].Certificate == nil: + return nil, fmt.Errorf("did not find a CA in the cert bundle") + case parsedCertBundle.PrivateKey == nil: + return nil, fmt.Errorf("did not find a private key in the cert bundle") + case parsedCertBundle.PrivateKeyType == certutil.UnknownPrivateKey: + return nil, fmt.Errorf("could not figure out type of private key") + } + + switch { + case parsedCertBundle.PrivateKeyType == certutil.Ed25519PrivateKey && keyType != "ed25519": + fallthrough + case parsedCertBundle.PrivateKeyType == certutil.RSAPrivateKey && keyType != "rsa": + fallthrough + case parsedCertBundle.PrivateKeyType == certutil.ECPrivateKey && keyType != "ec": + return nil, fmt.Errorf("given key type does not match type found in bundle") + } + + cert := parsedCertBundle.Certificate + + if usage != cert.KeyUsage { + return nil, fmt.Errorf("expected usage of %#v, got %#v; ext usage is %#v", usage, cert.KeyUsage, cert.ExtKeyUsage) + } + + // There should only be one ext usage type, because only one is requested + // in the tests + if len(cert.ExtKeyUsage) != 1 { + return nil, fmt.Errorf("got wrong size key usage in generated cert; expected 1, values are %#v", cert.ExtKeyUsage) + } + switch extUsage { + case x509.ExtKeyUsageEmailProtection: + if cert.ExtKeyUsage[0] != x509.ExtKeyUsageEmailProtection { + return nil, fmt.Errorf("bad extended key usage") + } + case x509.ExtKeyUsageServerAuth: + if cert.ExtKeyUsage[0] != x509.ExtKeyUsageServerAuth { + return nil, fmt.Errorf("bad extended key usage") + } + case x509.ExtKeyUsageClientAuth: + if cert.ExtKeyUsage[0] != x509.ExtKeyUsageClientAuth { + return nil, fmt.Errorf("bad extended key usage") + } + case x509.ExtKeyUsageCodeSigning: + if cert.ExtKeyUsage[0] != x509.ExtKeyUsageCodeSigning { + return nil, fmt.Errorf("bad extended key usage") + } + } + + // TODO: We incremented 20->25 due to CircleCI execution + // being slow and pausing this test. We might consider recording the + // actual issuance time of the cert and calculating the expected + // validity period +/- fuzz, but that'd require recording and passing + // through more information. + if math.Abs(float64(time.Now().Add(validity).Unix()-cert.NotAfter.Unix())) > 25 { + return nil, fmt.Errorf("certificate validity end: %s; expected within 25 seconds of %s", cert.NotAfter.Format(time.RFC3339), time.Now().Add(validity).Format(time.RFC3339)) + } + + return parsedCertBundle, nil +} + +func generateURLSteps(t *testing.T, caCert, caKey string, intdata, reqdata map[string]interface{}) []logicaltest.TestStep { + expected := certutil.URLEntries{ + IssuingCertificates: []string{ + "http://example.com/ca1", + "http://example.com/ca2", + }, + CRLDistributionPoints: []string{ + "http://example.com/crl1", + "http://example.com/crl2", + }, + OCSPServers: []string{ + "http://example.com/ocsp1", + "http://example.com/ocsp2", + }, + } + csrTemplate := x509.CertificateRequest{ + Subject: pkix.Name{ + CommonName: "my@example.com", + }, + } + + priv1024, _ := rsa.GenerateKey(rand.Reader, 1024) + csr1024, _ := x509.CreateCertificateRequest(rand.Reader, &csrTemplate, priv1024) + csrPem1024 := strings.TrimSpace(string(pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE REQUEST", + Bytes: csr1024, + }))) + + priv2048, _ := rsa.GenerateKey(rand.Reader, 2048) + csr2048, _ := x509.CreateCertificateRequest(rand.Reader, &csrTemplate, priv2048) + csrPem2048 := strings.TrimSpace(string(pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE REQUEST", + Bytes: csr2048, + }))) + + ret := []logicaltest.TestStep{ + { + Operation: logical.UpdateOperation, + Path: "root/generate/exported", + Data: map[string]interface{}{ + "common_name": "Root Cert", + "ttl": "180h", + }, + Check: func(resp *logical.Response) error { + if resp.Secret != nil && resp.Secret.LeaseID != "" { + return fmt.Errorf("root returned with a lease") + } + return nil + }, + }, + + { + Operation: logical.UpdateOperation, + Path: "config/urls", + Data: map[string]interface{}{ + "issuing_certificates": strings.Join(expected.IssuingCertificates, ","), + "crl_distribution_points": strings.Join(expected.CRLDistributionPoints, ","), + "ocsp_servers": strings.Join(expected.OCSPServers, ","), + }, + }, + + { + Operation: logical.ReadOperation, + Path: "config/urls", + Check: func(resp *logical.Response) error { + if resp.Data == nil { + return fmt.Errorf("no data returned") + } + var entries certutil.URLEntries + err := mapstructure.Decode(resp.Data, &entries) + if err != nil { + return err + } + if !reflect.DeepEqual(entries, expected) { + return fmt.Errorf("expected urls\n%#v\ndoes not match provided\n%#v\n", expected, entries) + } + + return nil + }, + }, + + { + Operation: logical.UpdateOperation, + Path: "root/sign-intermediate", + Data: map[string]interface{}{ + "common_name": "intermediate.cert.com", + "csr": csrPem1024, + "format": "der", + }, + ErrorOk: true, + Check: func(resp *logical.Response) error { + if !resp.IsError() { + return fmt.Errorf("expected an error response but did not get one") + } + if !strings.Contains(resp.Data["error"].(string), "2048") { + return fmt.Errorf("received an error but not about a 1024-bit key, error was: %s", resp.Data["error"].(string)) + } + + return nil + }, + }, + + { + Operation: logical.UpdateOperation, + Path: "root/sign-intermediate", + Data: map[string]interface{}{ + "common_name": "intermediate.cert.com", + "csr": csrPem2048, + "signature_bits": 512, + "format": "der", + "not_before_duration": "2h", + // Let's Encrypt -- R3 SKID + "skid": "14:2E:B3:17:B7:58:56:CB:AE:50:09:40:E6:1F:AF:9D:8B:14:C2:C6", + }, + Check: func(resp *logical.Response) error { + certString := resp.Data["certificate"].(string) + if certString == "" { + return fmt.Errorf("no certificate returned") + } + if resp.Secret != nil && resp.Secret.LeaseID != "" { + return fmt.Errorf("signed intermediate returned with a lease") + } + certBytes, _ := base64.StdEncoding.DecodeString(certString) + certs, err := x509.ParseCertificates(certBytes) + if err != nil { + return fmt.Errorf("returned cert cannot be parsed: %w", err) + } + if len(certs) != 1 { + return fmt.Errorf("unexpected returned length of certificates: %d", len(certs)) + } + cert := certs[0] + + skid, _ := hex.DecodeString("142EB317B75856CBAE500940E61FAF9D8B14C2C6") + + switch { + case !reflect.DeepEqual(expected.IssuingCertificates, cert.IssuingCertificateURL): + return fmt.Errorf("IssuingCertificateURL:\nexpected\n%#v\ngot\n%#v\n", expected.IssuingCertificates, cert.IssuingCertificateURL) + case !reflect.DeepEqual(expected.CRLDistributionPoints, cert.CRLDistributionPoints): + return fmt.Errorf("CRLDistributionPoints:\nexpected\n%#v\ngot\n%#v\n", expected.CRLDistributionPoints, cert.CRLDistributionPoints) + case !reflect.DeepEqual(expected.OCSPServers, cert.OCSPServer): + return fmt.Errorf("OCSPServer:\nexpected\n%#v\ngot\n%#v\n", expected.OCSPServers, cert.OCSPServer) + case !reflect.DeepEqual([]string{"intermediate.cert.com"}, cert.DNSNames): + return fmt.Errorf("DNSNames\nexpected\n%#v\ngot\n%#v\n", []string{"intermediate.cert.com"}, cert.DNSNames) + case !reflect.DeepEqual(x509.SHA512WithRSA, cert.SignatureAlgorithm): + return fmt.Errorf("Signature Algorithm:\nexpected\n%#v\ngot\n%#v\n", x509.SHA512WithRSA, cert.SignatureAlgorithm) + case !reflect.DeepEqual(skid, cert.SubjectKeyId): + return fmt.Errorf("SKID:\nexpected\n%#v\ngot\n%#v\n", skid, cert.SubjectKeyId) + } + + if math.Abs(float64(time.Now().Add(-2*time.Hour).Unix()-cert.NotBefore.Unix())) > 10 { + t.Fatalf("root/sign-intermediate did not properly set validity period (notBefore): was %v vs expected %v", cert.NotBefore, time.Now().Add(-2*time.Hour)) + } + + return nil + }, + }, + + // Same as above but exclude adding to sans + { + Operation: logical.UpdateOperation, + Path: "root/sign-intermediate", + Data: map[string]interface{}{ + "common_name": "intermediate.cert.com", + "csr": csrPem2048, + "format": "der", + "exclude_cn_from_sans": true, + }, + Check: func(resp *logical.Response) error { + certString := resp.Data["certificate"].(string) + if certString == "" { + return fmt.Errorf("no certificate returned") + } + if resp.Secret != nil && resp.Secret.LeaseID != "" { + return fmt.Errorf("signed intermediate returned with a lease") + } + certBytes, _ := base64.StdEncoding.DecodeString(certString) + certs, err := x509.ParseCertificates(certBytes) + if err != nil { + return fmt.Errorf("returned cert cannot be parsed: %w", err) + } + if len(certs) != 1 { + return fmt.Errorf("unexpected returned length of certificates: %d", len(certs)) + } + cert := certs[0] + + switch { + case !reflect.DeepEqual(expected.IssuingCertificates, cert.IssuingCertificateURL): + return fmt.Errorf("expected\n%#v\ngot\n%#v\n", expected.IssuingCertificates, cert.IssuingCertificateURL) + case !reflect.DeepEqual(expected.CRLDistributionPoints, cert.CRLDistributionPoints): + return fmt.Errorf("expected\n%#v\ngot\n%#v\n", expected.CRLDistributionPoints, cert.CRLDistributionPoints) + case !reflect.DeepEqual(expected.OCSPServers, cert.OCSPServer): + return fmt.Errorf("expected\n%#v\ngot\n%#v\n", expected.OCSPServers, cert.OCSPServer) + case !reflect.DeepEqual([]string(nil), cert.DNSNames): + return fmt.Errorf("expected\n%#v\ngot\n%#v\n", []string(nil), cert.DNSNames) + } + + return nil + }, + }, + } + return ret +} + +func generateCSR(t *testing.T, csrTemplate *x509.CertificateRequest, keyType string, keyBits int) (interface{}, []byte, string) { + t.Helper() + + var priv interface{} + var err error + switch keyType { + case "rsa": + priv, err = rsa.GenerateKey(rand.Reader, keyBits) + case "ec": + switch keyBits { + case 224: + priv, err = ecdsa.GenerateKey(elliptic.P224(), rand.Reader) + case 256: + priv, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + case 384: + priv, err = ecdsa.GenerateKey(elliptic.P384(), rand.Reader) + case 521: + priv, err = ecdsa.GenerateKey(elliptic.P521(), rand.Reader) + default: + t.Fatalf("Got unknown ec< key bits: %v", keyBits) + } + case "ed25519": + _, priv, err = ed25519.GenerateKey(rand.Reader) + } + + if err != nil { + t.Fatalf("Got error generating private key for CSR: %v", err) + } + + csr, err := x509.CreateCertificateRequest(rand.Reader, csrTemplate, priv) + if err != nil { + t.Fatalf("Got error generating CSR: %v", err) + } + + csrPem := strings.TrimSpace(string(pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE REQUEST", + Bytes: csr, + }))) + + return priv, csr, csrPem +} + +func generateCSRSteps(t *testing.T, caCert, caKey string, intdata, reqdata map[string]interface{}) []logicaltest.TestStep { + csrTemplate, csrPem := generateTestCsr(t, certutil.RSAPrivateKey, 2048) + + ret := []logicaltest.TestStep{ + { + Operation: logical.UpdateOperation, + Path: "root/generate/exported", + Data: map[string]interface{}{ + "common_name": "Root Cert", + "ttl": "180h", + "max_path_length": 0, + }, + }, + + { + Operation: logical.UpdateOperation, + Path: "root/sign-intermediate", + Data: map[string]interface{}{ + "use_csr_values": true, + "csr": csrPem, + "format": "der", + }, + ErrorOk: true, + }, + + { + Operation: logical.DeleteOperation, + Path: "root", + }, + + { + Operation: logical.UpdateOperation, + Path: "root/generate/exported", + Data: map[string]interface{}{ + "common_name": "Root Cert", + "ttl": "180h", + "max_path_length": 1, + }, + }, + + { + Operation: logical.UpdateOperation, + Path: "root/sign-intermediate", + Data: map[string]interface{}{ + "use_csr_values": true, + "csr": csrPem, + "format": "der", + }, + Check: func(resp *logical.Response) error { + certString := resp.Data["certificate"].(string) + if certString == "" { + return fmt.Errorf("no certificate returned") + } + certBytes, _ := base64.StdEncoding.DecodeString(certString) + certs, err := x509.ParseCertificates(certBytes) + if err != nil { + return fmt.Errorf("returned cert cannot be parsed: %w", err) + } + if len(certs) != 1 { + return fmt.Errorf("unexpected returned length of certificates: %d", len(certs)) + } + cert := certs[0] + + if cert.MaxPathLen != 0 { + return fmt.Errorf("max path length of %d does not match the requested of 3", cert.MaxPathLen) + } + if !cert.MaxPathLenZero { + return fmt.Errorf("max path length zero is not set") + } + + // We need to set these as they are filled in with unparsed values in the final cert + csrTemplate.Subject.Names = cert.Subject.Names + csrTemplate.Subject.ExtraNames = cert.Subject.ExtraNames + + switch { + case !reflect.DeepEqual(cert.Subject, csrTemplate.Subject): + return fmt.Errorf("cert subject\n%#v\ndoes not match csr subject\n%#v\n", cert.Subject, csrTemplate.Subject) + case !reflect.DeepEqual(cert.DNSNames, csrTemplate.DNSNames): + return fmt.Errorf("cert dns names\n%#v\ndoes not match csr dns names\n%#v\n", cert.DNSNames, csrTemplate.DNSNames) + case !reflect.DeepEqual(cert.EmailAddresses, csrTemplate.EmailAddresses): + return fmt.Errorf("cert email addresses\n%#v\ndoes not match csr email addresses\n%#v\n", cert.EmailAddresses, csrTemplate.EmailAddresses) + case !reflect.DeepEqual(cert.IPAddresses, csrTemplate.IPAddresses): + return fmt.Errorf("cert ip addresses\n%#v\ndoes not match csr ip addresses\n%#v\n", cert.IPAddresses, csrTemplate.IPAddresses) + } + return nil + }, + }, + } + return ret +} + +func generateTestCsr(t *testing.T, keyType certutil.PrivateKeyType, keyBits int) (x509.CertificateRequest, string) { + t.Helper() + + csrTemplate := x509.CertificateRequest{ + Subject: pkix.Name{ + Country: []string{"MyCountry"}, + PostalCode: []string{"MyPostalCode"}, + SerialNumber: "MySerialNumber", + CommonName: "my@example.com", + }, + DNSNames: []string{ + "name1.example.com", + "name2.example.com", + "name3.example.com", + }, + EmailAddresses: []string{ + "name1@example.com", + "name2@example.com", + "name3@example.com", + }, + IPAddresses: []net.IP{ + net.ParseIP("::ff:1:2:3:4"), + net.ParseIP("::ff:5:6:7:8"), + }, + } + + _, _, csrPem := generateCSR(t, &csrTemplate, string(keyType), keyBits) + return csrTemplate, csrPem +} + +// Generates steps to test out various role permutations +func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { + roleVals := roleEntry{ + MaxTTL: 12 * time.Hour, + KeyType: "rsa", + KeyBits: 2048, + RequireCN: true, + AllowWildcardCertificates: new(bool), + } + *roleVals.AllowWildcardCertificates = true + + issueVals := certutil.IssueData{} + ret := []logicaltest.TestStep{} + + roleTestStep := logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "roles/test", + } + var issueTestStep logicaltest.TestStep + if useCSRs { + issueTestStep = logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "sign/test", + } + } else { + issueTestStep = logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "issue/test", + } + } + + generatedRSAKeys := map[int]crypto.Signer{} + generatedECKeys := map[int]crypto.Signer{} + generatedEdKeys := map[int]crypto.Signer{} + /* + // For the number of tests being run, a seed of 1 has been tested + // to hit all of the various values below. However, for normal + // testing we use a randomized time for maximum fuzziness. + */ + var seed int64 = 1 + fixedSeed := os.Getenv("VAULT_PKITESTS_FIXED_SEED") + if len(fixedSeed) == 0 { + seed = time.Now().UnixNano() + } else { + var err error + seed, err = strconv.ParseInt(fixedSeed, 10, 64) + if err != nil { + t.Fatalf("error parsing fixed seed of %s: %v", fixedSeed, err) + } + } + mathRand := mathrand.New(mathrand.NewSource(seed)) + // t.Logf("seed under test: %v", seed) + + // Used by tests not toggling common names to turn off the behavior of random key bit fuzziness + keybitSizeRandOff := false + + genericErrorOkCheck := func(resp *logical.Response) error { + if resp.IsError() { + return nil + } + return fmt.Errorf("expected an error, but did not seem to get one") + } + + // Adds tests with the currently configured issue/role information + addTests := func(testCheck logicaltest.TestCheckFunc) { + stepCount++ + // t.Logf("test step %d\nrole vals: %#v\n", stepCount, roleVals) + stepCount++ + // t.Logf("test step %d\nissue vals: %#v\n", stepCount, issueTestStep) + roleTestStep.Data = roleVals.ToResponseData() + roleTestStep.Data["generate_lease"] = false + ret = append(ret, roleTestStep) + issueTestStep.Data = structs.New(issueVals).Map() + switch { + case issueTestStep.ErrorOk: + issueTestStep.Check = genericErrorOkCheck + case testCheck != nil: + issueTestStep.Check = testCheck + default: + issueTestStep.Check = nil + } + ret = append(ret, issueTestStep) + } + + getCountryCheck := func(role roleEntry) logicaltest.TestCheckFunc { + var certBundle certutil.CertBundle + return func(resp *logical.Response) error { + err := mapstructure.Decode(resp.Data, &certBundle) + if err != nil { + return err + } + parsedCertBundle, err := certBundle.ToParsedCertBundle() + if err != nil { + return fmt.Errorf("error checking generated certificate: %s", err) + } + cert := parsedCertBundle.Certificate + + expected := strutil.RemoveDuplicates(role.Country, true) + if !reflect.DeepEqual(cert.Subject.Country, expected) { + return fmt.Errorf("error: returned certificate has Country of %s but %s was specified in the role", cert.Subject.Country, expected) + } + return nil + } + } + + getOuCheck := func(role roleEntry) logicaltest.TestCheckFunc { + var certBundle certutil.CertBundle + return func(resp *logical.Response) error { + err := mapstructure.Decode(resp.Data, &certBundle) + if err != nil { + return err + } + parsedCertBundle, err := certBundle.ToParsedCertBundle() + if err != nil { + return fmt.Errorf("error checking generated certificate: %s", err) + } + cert := parsedCertBundle.Certificate + + expected := strutil.RemoveDuplicatesStable(role.OU, true) + if !reflect.DeepEqual(cert.Subject.OrganizationalUnit, expected) { + return fmt.Errorf("error: returned certificate has OU of %s but %s was specified in the role", cert.Subject.OrganizationalUnit, expected) + } + return nil + } + } + + getOrganizationCheck := func(role roleEntry) logicaltest.TestCheckFunc { + var certBundle certutil.CertBundle + return func(resp *logical.Response) error { + err := mapstructure.Decode(resp.Data, &certBundle) + if err != nil { + return err + } + parsedCertBundle, err := certBundle.ToParsedCertBundle() + if err != nil { + return fmt.Errorf("error checking generated certificate: %s", err) + } + cert := parsedCertBundle.Certificate + + expected := strutil.RemoveDuplicates(role.Organization, true) + if !reflect.DeepEqual(cert.Subject.Organization, expected) { + return fmt.Errorf("error: returned certificate has Organization of %s but %s was specified in the role", cert.Subject.Organization, expected) + } + return nil + } + } + + getLocalityCheck := func(role roleEntry) logicaltest.TestCheckFunc { + var certBundle certutil.CertBundle + return func(resp *logical.Response) error { + err := mapstructure.Decode(resp.Data, &certBundle) + if err != nil { + return err + } + parsedCertBundle, err := certBundle.ToParsedCertBundle() + if err != nil { + return fmt.Errorf("error checking generated certificate: %s", err) + } + cert := parsedCertBundle.Certificate + + expected := strutil.RemoveDuplicates(role.Locality, true) + if !reflect.DeepEqual(cert.Subject.Locality, expected) { + return fmt.Errorf("error: returned certificate has Locality of %s but %s was specified in the role", cert.Subject.Locality, expected) + } + return nil + } + } + + getProvinceCheck := func(role roleEntry) logicaltest.TestCheckFunc { + var certBundle certutil.CertBundle + return func(resp *logical.Response) error { + err := mapstructure.Decode(resp.Data, &certBundle) + if err != nil { + return err + } + parsedCertBundle, err := certBundle.ToParsedCertBundle() + if err != nil { + return fmt.Errorf("error checking generated certificate: %s", err) + } + cert := parsedCertBundle.Certificate + + expected := strutil.RemoveDuplicates(role.Province, true) + if !reflect.DeepEqual(cert.Subject.Province, expected) { + return fmt.Errorf("error: returned certificate has Province of %s but %s was specified in the role", cert.Subject.Province, expected) + } + return nil + } + } + + getStreetAddressCheck := func(role roleEntry) logicaltest.TestCheckFunc { + var certBundle certutil.CertBundle + return func(resp *logical.Response) error { + err := mapstructure.Decode(resp.Data, &certBundle) + if err != nil { + return err + } + parsedCertBundle, err := certBundle.ToParsedCertBundle() + if err != nil { + return fmt.Errorf("error checking generated certificate: %s", err) + } + cert := parsedCertBundle.Certificate + + expected := strutil.RemoveDuplicates(role.StreetAddress, true) + if !reflect.DeepEqual(cert.Subject.StreetAddress, expected) { + return fmt.Errorf("error: returned certificate has StreetAddress of %s but %s was specified in the role", cert.Subject.StreetAddress, expected) + } + return nil + } + } + + getPostalCodeCheck := func(role roleEntry) logicaltest.TestCheckFunc { + var certBundle certutil.CertBundle + return func(resp *logical.Response) error { + err := mapstructure.Decode(resp.Data, &certBundle) + if err != nil { + return err + } + parsedCertBundle, err := certBundle.ToParsedCertBundle() + if err != nil { + return fmt.Errorf("error checking generated certificate: %s", err) + } + cert := parsedCertBundle.Certificate + + expected := strutil.RemoveDuplicates(role.PostalCode, true) + if !reflect.DeepEqual(cert.Subject.PostalCode, expected) { + return fmt.Errorf("error: returned certificate has PostalCode of %s but %s was specified in the role", cert.Subject.PostalCode, expected) + } + return nil + } + } + + getNotBeforeCheck := func(role roleEntry) logicaltest.TestCheckFunc { + var certBundle certutil.CertBundle + return func(resp *logical.Response) error { + err := mapstructure.Decode(resp.Data, &certBundle) + if err != nil { + return err + } + parsedCertBundle, err := certBundle.ToParsedCertBundle() + if err != nil { + return fmt.Errorf("error checking generated certificate: %s", err) + } + cert := parsedCertBundle.Certificate + + actualDiff := time.Since(cert.NotBefore) + certRoleDiff := (role.NotBeforeDuration - actualDiff).Truncate(time.Second) + // These times get truncated, so give a 1 second buffer on each side + if certRoleDiff >= -1*time.Second && certRoleDiff <= 1*time.Second { + return nil + } + return fmt.Errorf("validity period out of range diff: %v", certRoleDiff) + } + } + + // Returns a TestCheckFunc that performs various validity checks on the + // returned certificate information, mostly within checkCertsAndPrivateKey + getCnCheck := func(name string, role roleEntry, key crypto.Signer, usage x509.KeyUsage, extUsage x509.ExtKeyUsage, validity time.Duration) logicaltest.TestCheckFunc { + var certBundle certutil.CertBundle + return func(resp *logical.Response) error { + err := mapstructure.Decode(resp.Data, &certBundle) + if err != nil { + return err + } + parsedCertBundle, err := checkCertsAndPrivateKey(role.KeyType, key, usage, extUsage, validity, &certBundle) + if err != nil { + return fmt.Errorf("error checking generated certificate: %s", err) + } + cert := parsedCertBundle.Certificate + if cert.Subject.CommonName != name { + return fmt.Errorf("error: returned certificate has CN of %s but %s was requested", cert.Subject.CommonName, name) + } + if strings.Contains(cert.Subject.CommonName, "@") { + if len(cert.DNSNames) != 0 || len(cert.EmailAddresses) != 1 { + return fmt.Errorf("error: found more than one DNS SAN or not one Email SAN but only one was requested, cert.DNSNames = %#v, cert.EmailAddresses = %#v", cert.DNSNames, cert.EmailAddresses) + } + } else { + if len(cert.DNSNames) != 1 || len(cert.EmailAddresses) != 0 { + return fmt.Errorf("error: found more than one Email SAN or not one DNS SAN but only one was requested, cert.DNSNames = %#v, cert.EmailAddresses = %#v", cert.DNSNames, cert.EmailAddresses) + } + } + var retName string + if len(cert.DNSNames) > 0 { + retName = cert.DNSNames[0] + } + if len(cert.EmailAddresses) > 0 { + retName = cert.EmailAddresses[0] + } + if retName != name { + // Check IDNA + p := idna.New( + idna.StrictDomainName(true), + idna.VerifyDNSLength(true), + ) + converted, err := p.ToUnicode(retName) + if err != nil { + t.Fatal(err) + } + if converted != name { + return fmt.Errorf("error: returned certificate has a DNS SAN of %s (from idna: %s) but %s was requested", retName, converted, name) + } + } + return nil + } + } + + type csrPlan struct { + errorOk bool + roleKeyBits int + cert string + privKey crypto.Signer + } + + getCsr := func(keyType string, keyBits int, csrTemplate *x509.CertificateRequest) (*pem.Block, crypto.Signer) { + var privKey crypto.Signer + var ok bool + switch keyType { + case "rsa": + privKey, ok = generatedRSAKeys[keyBits] + if !ok { + privKey, _ = rsa.GenerateKey(rand.Reader, keyBits) + generatedRSAKeys[keyBits] = privKey + } + + case "ec": + var curve elliptic.Curve + + switch keyBits { + case 224: + curve = elliptic.P224() + case 256: + curve = elliptic.P256() + case 384: + curve = elliptic.P384() + case 521: + curve = elliptic.P521() + } + + privKey, ok = generatedECKeys[keyBits] + if !ok { + privKey, _ = ecdsa.GenerateKey(curve, rand.Reader) + generatedECKeys[keyBits] = privKey + } + + case "ed25519": + privKey, ok = generatedEdKeys[keyBits] + if !ok { + _, privKey, _ = ed25519.GenerateKey(rand.Reader) + generatedEdKeys[keyBits] = privKey + } + + default: + panic("invalid key type: " + keyType) + } + + csr, err := x509.CreateCertificateRequest(rand.Reader, csrTemplate, privKey) + if err != nil { + t.Fatalf("Error creating certificate request: %s", err) + } + block := pem.Block{ + Type: "CERTIFICATE REQUEST", + Bytes: csr, + } + return &block, privKey + } + + getRandCsr := func(keyType string, errorOk bool, csrTemplate *x509.CertificateRequest) csrPlan { + rsaKeyBits := []int{2048, 3072, 4096} + ecKeyBits := []int{224, 256, 384, 521} + plan := csrPlan{errorOk: errorOk} + + var testBitSize int + switch keyType { + case "rsa": + plan.roleKeyBits = rsaKeyBits[mathRand.Int()%len(rsaKeyBits)] + testBitSize = plan.roleKeyBits + + // If we don't expect an error already, randomly choose a + // key size and expect an error if it's less than the role + // setting + if !keybitSizeRandOff && !errorOk { + testBitSize = rsaKeyBits[mathRand.Int()%len(rsaKeyBits)] + } + + if testBitSize < plan.roleKeyBits { + plan.errorOk = true + } + + case "ec": + plan.roleKeyBits = ecKeyBits[mathRand.Int()%len(ecKeyBits)] + testBitSize = plan.roleKeyBits + + // If we don't expect an error already, randomly choose a + // key size and expect an error if it's less than the role + // setting + if !keybitSizeRandOff && !errorOk { + testBitSize = ecKeyBits[mathRand.Int()%len(ecKeyBits)] + } + + if testBitSize < plan.roleKeyBits { + plan.errorOk = true + } + + default: + panic("invalid key type: " + keyType) + } + if len(os.Getenv("VAULT_VERBOSE_PKITESTS")) > 0 { + t.Logf("roleKeyBits=%d testBitSize=%d errorOk=%v", plan.roleKeyBits, testBitSize, plan.errorOk) + } + + block, privKey := getCsr(keyType, testBitSize, csrTemplate) + plan.cert = strings.TrimSpace(string(pem.EncodeToMemory(block))) + plan.privKey = privKey + return plan + } + + // Common names to test with the various role flags toggled + var commonNames struct { + Localhost bool `structs:"localhost"` + BareDomain bool `structs:"example.com"` + SecondDomain bool `structs:"foobar.com"` + SubDomain bool `structs:"foo.example.com"` + Wildcard bool `structs:"*.example.com"` + SubSubdomain bool `structs:"foo.bar.example.com"` + SubSubdomainWildcard bool `structs:"*.bar.example.com"` + GlobDomain bool `structs:"fooexample.com"` + IDN bool `structs:"daɪˈɛrɨsɨs"` + AnyHost bool `structs:"porkslap.beer"` + } + + // Adds a series of tests based on the current selection of + // allowed common names; contains some (seeded) randomness + // + // This allows for a variety of common names to be tested in various + // combinations with allowed toggles of the role + addCnTests := func() { + cnMap := structs.New(commonNames).Map() + for name, allowedInt := range cnMap { + roleVals.KeyType = "rsa" + roleVals.KeyBits = 2048 + if mathRand.Int()%3 == 1 { + roleVals.KeyType = "ec" + roleVals.KeyBits = 224 + } + + roleVals.ServerFlag = false + roleVals.ClientFlag = false + roleVals.CodeSigningFlag = false + roleVals.EmailProtectionFlag = false + + var usage []string + if mathRand.Int()%2 == 1 { + usage = append(usage, "DigitalSignature") + } + if mathRand.Int()%2 == 1 { + usage = append(usage, "ContentCoMmitment") + } + if mathRand.Int()%2 == 1 { + usage = append(usage, "KeyEncipherment") + } + if mathRand.Int()%2 == 1 { + usage = append(usage, "DataEncipherment") + } + if mathRand.Int()%2 == 1 { + usage = append(usage, "KeyAgreemEnt") + } + if mathRand.Int()%2 == 1 { + usage = append(usage, "CertSign") + } + if mathRand.Int()%2 == 1 { + usage = append(usage, "CRLSign") + } + if mathRand.Int()%2 == 1 { + usage = append(usage, "EncipherOnly") + } + if mathRand.Int()%2 == 1 { + usage = append(usage, "DecipherOnly") + } + + roleVals.KeyUsage = usage + parsedKeyUsage := parseKeyUsages(roleVals.KeyUsage) + if parsedKeyUsage == 0 && len(usage) != 0 { + panic("parsed key usages was zero") + } + + var extUsage x509.ExtKeyUsage + i := mathRand.Int() % 4 + switch { + case i == 0: + // Punt on this for now since I'm not clear the actual proper + // way to format these + if name != "daɪˈɛrɨsɨs" { + extUsage = x509.ExtKeyUsageEmailProtection + roleVals.EmailProtectionFlag = true + break + } + fallthrough + case i == 1: + extUsage = x509.ExtKeyUsageServerAuth + roleVals.ServerFlag = true + case i == 2: + extUsage = x509.ExtKeyUsageClientAuth + roleVals.ClientFlag = true + default: + extUsage = x509.ExtKeyUsageCodeSigning + roleVals.CodeSigningFlag = true + } + + allowed := allowedInt.(bool) + issueVals.CommonName = name + if roleVals.EmailProtectionFlag { + if !strings.HasPrefix(name, "*") { + issueVals.CommonName = "user@" + issueVals.CommonName + } + } + + issueTestStep.ErrorOk = !allowed + + validity := roleVals.MaxTTL + + if useCSRs { + templ := &x509.CertificateRequest{ + Subject: pkix.Name{ + CommonName: issueVals.CommonName, + }, + } + plan := getRandCsr(roleVals.KeyType, issueTestStep.ErrorOk, templ) + issueVals.CSR = plan.cert + roleVals.KeyBits = plan.roleKeyBits + issueTestStep.ErrorOk = plan.errorOk + + addTests(getCnCheck(issueVals.CommonName, roleVals, plan.privKey, x509.KeyUsage(parsedKeyUsage), extUsage, validity)) + } else { + addTests(getCnCheck(issueVals.CommonName, roleVals, nil, x509.KeyUsage(parsedKeyUsage), extUsage, validity)) + } + } + } + + funcs := []interface{}{ + addCnTests, getCnCheck, getCountryCheck, getLocalityCheck, getNotBeforeCheck, + getOrganizationCheck, getOuCheck, getPostalCodeCheck, getRandCsr, getStreetAddressCheck, + getProvinceCheck, + } + if len(os.Getenv("VAULT_VERBOSE_PKITESTS")) > 0 { + t.Logf("funcs=%d", len(funcs)) + } + + // Common Name tests + { + // common_name not provided + issueVals.CommonName = "" + issueTestStep.ErrorOk = true + addTests(nil) + + // Nothing is allowed + addCnTests() + + roleVals.AllowLocalhost = true + commonNames.Localhost = true + addCnTests() + + roleVals.AllowedDomains = []string{"foobar.com"} + addCnTests() + + roleVals.AllowedDomains = []string{"example.com"} + roleVals.AllowSubdomains = true + commonNames.SubDomain = true + commonNames.Wildcard = true + commonNames.SubSubdomain = true + commonNames.SubSubdomainWildcard = true + addCnTests() + + roleVals.AllowedDomains = []string{"foobar.com", "example.com"} + commonNames.SecondDomain = true + roleVals.AllowBareDomains = true + commonNames.BareDomain = true + addCnTests() + + roleVals.AllowedDomains = []string{"foobar.com", "*example.com"} + roleVals.AllowGlobDomains = true + commonNames.GlobDomain = true + addCnTests() + + roleVals.AllowAnyName = true + roleVals.EnforceHostnames = true + commonNames.AnyHost = true + commonNames.IDN = true + addCnTests() + + roleVals.EnforceHostnames = false + addCnTests() + + // Ensure that we end up with acceptable key sizes since they won't be + // toggled any longer + keybitSizeRandOff = true + addCnTests() + } + // Country tests + { + roleVals.Country = []string{"foo"} + addTests(getCountryCheck(roleVals)) + + roleVals.Country = []string{"foo", "bar"} + addTests(getCountryCheck(roleVals)) + } + // OU tests + { + roleVals.OU = []string{"foo"} + addTests(getOuCheck(roleVals)) + + roleVals.OU = []string{"bar", "foo"} + addTests(getOuCheck(roleVals)) + } + // Organization tests + { + roleVals.Organization = []string{"system:masters"} + addTests(getOrganizationCheck(roleVals)) + + roleVals.Organization = []string{"foo", "bar"} + addTests(getOrganizationCheck(roleVals)) + } + // Locality tests + { + roleVals.Locality = []string{"foo"} + addTests(getLocalityCheck(roleVals)) + + roleVals.Locality = []string{"foo", "bar"} + addTests(getLocalityCheck(roleVals)) + } + // Province tests + { + roleVals.Province = []string{"foo"} + addTests(getProvinceCheck(roleVals)) + + roleVals.Province = []string{"foo", "bar"} + addTests(getProvinceCheck(roleVals)) + } + // StreetAddress tests + { + roleVals.StreetAddress = []string{"123 foo street"} + addTests(getStreetAddressCheck(roleVals)) + + roleVals.StreetAddress = []string{"123 foo street", "456 bar avenue"} + addTests(getStreetAddressCheck(roleVals)) + } + // PostalCode tests + { + roleVals.PostalCode = []string{"f00"} + addTests(getPostalCodeCheck(roleVals)) + + roleVals.PostalCode = []string{"f00", "b4r"} + addTests(getPostalCodeCheck(roleVals)) + } + // NotBefore tests + { + roleVals.NotBeforeDuration = 10 * time.Second + addTests(getNotBeforeCheck(roleVals)) + + roleVals.NotBeforeDuration = 30 * time.Second + addTests(getNotBeforeCheck(roleVals)) + + roleVals.NotBeforeDuration = 0 + } + + // IP SAN tests + { + getIpCheck := func(expectedIp ...net.IP) logicaltest.TestCheckFunc { + return func(resp *logical.Response) error { + var certBundle certutil.CertBundle + err := mapstructure.Decode(resp.Data, &certBundle) + if err != nil { + return err + } + parsedCertBundle, err := certBundle.ToParsedCertBundle() + if err != nil { + return fmt.Errorf("error parsing cert bundle: %s", err) + } + cert := parsedCertBundle.Certificate + var expected []net.IP + expected = append(expected, expectedIp...) + if diff := deep.Equal(cert.IPAddresses, expected); len(diff) > 0 { + return fmt.Errorf("wrong SAN IPs, diff: %v", diff) + } + return nil + } + } + addIPSANTests := func(useCSRs, useCSRSANs, allowIPSANs, errorOk bool, ipSANs string, csrIPSANs []net.IP, check logicaltest.TestCheckFunc) { + if useCSRs { + csrTemplate := &x509.CertificateRequest{ + Subject: pkix.Name{ + CommonName: issueVals.CommonName, + }, + IPAddresses: csrIPSANs, + } + block, _ := getCsr(roleVals.KeyType, roleVals.KeyBits, csrTemplate) + issueVals.CSR = strings.TrimSpace(string(pem.EncodeToMemory(block))) + } + oldRoleVals, oldIssueVals, oldIssueTestStep := roleVals, issueVals, issueTestStep + roleVals.UseCSRSANs = useCSRSANs + roleVals.AllowIPSANs = allowIPSANs + issueVals.CommonName = "someone@example.com" + issueVals.IPSANs = ipSANs + issueTestStep.ErrorOk = errorOk + addTests(check) + roleVals, issueVals, issueTestStep = oldRoleVals, oldIssueVals, oldIssueTestStep + } + roleVals.AllowAnyName = true + roleVals.EnforceHostnames = true + roleVals.AllowLocalhost = true + roleVals.UseCSRCommonName = true + commonNames.Localhost = true + + netip1, netip2 := net.IP{127, 0, 0, 1}, net.IP{170, 171, 172, 173} + textip1, textip3 := "127.0.0.1", "::1" + + // IPSANs not allowed and not provided, should not be an error. + addIPSANTests(useCSRs, false, false, false, "", nil, getIpCheck()) + + // IPSANs not allowed, valid IPSANs provided, should be an error. + addIPSANTests(useCSRs, false, false, true, textip1+","+textip3, nil, nil) + + // IPSANs allowed, bogus IPSANs provided, should be an error. + addIPSANTests(useCSRs, false, true, true, "foobar", nil, nil) + + // Given IPSANs as API argument and useCSRSANs false, CSR arg ignored. + addIPSANTests(useCSRs, false, true, false, textip1, + []net.IP{netip2}, getIpCheck(netip1)) + + if useCSRs { + // IPSANs not allowed, valid IPSANs provided via CSR, should be an error. + addIPSANTests(useCSRs, true, false, true, "", []net.IP{netip1}, nil) + + // Given IPSANs as both API and CSR arguments and useCSRSANs=true, API arg ignored. + addIPSANTests(useCSRs, true, true, false, textip3, + []net.IP{netip1, netip2}, getIpCheck(netip1, netip2)) + } + } + + { + getOtherCheck := func(expectedOthers ...otherNameUtf8) logicaltest.TestCheckFunc { + return func(resp *logical.Response) error { + var certBundle certutil.CertBundle + err := mapstructure.Decode(resp.Data, &certBundle) + if err != nil { + return err + } + parsedCertBundle, err := certBundle.ToParsedCertBundle() + if err != nil { + return fmt.Errorf("error parsing cert bundle: %s", err) + } + cert := parsedCertBundle.Certificate + foundOthers, err := getOtherSANsFromX509Extensions(cert.Extensions) + if err != nil { + return err + } + var expected []otherNameUtf8 + expected = append(expected, expectedOthers...) + if diff := deep.Equal(foundOthers, expected); len(diff) > 0 { + return fmt.Errorf("wrong SAN IPs, diff: %v", diff) + } + return nil + } + } + + addOtherSANTests := func(useCSRs, useCSRSANs bool, allowedOtherSANs []string, errorOk bool, otherSANs []string, csrOtherSANs []otherNameUtf8, check logicaltest.TestCheckFunc) { + otherSansMap := func(os []otherNameUtf8) map[string][]string { + ret := make(map[string][]string) + for _, o := range os { + ret[o.oid] = append(ret[o.oid], o.value) + } + return ret + } + if useCSRs { + csrTemplate := &x509.CertificateRequest{ + Subject: pkix.Name{ + CommonName: issueVals.CommonName, + }, + } + if err := handleOtherCSRSANs(csrTemplate, otherSansMap(csrOtherSANs)); err != nil { + t.Fatal(err) + } + block, _ := getCsr(roleVals.KeyType, roleVals.KeyBits, csrTemplate) + issueVals.CSR = strings.TrimSpace(string(pem.EncodeToMemory(block))) + } + oldRoleVals, oldIssueVals, oldIssueTestStep := roleVals, issueVals, issueTestStep + roleVals.UseCSRSANs = useCSRSANs + roleVals.AllowedOtherSANs = allowedOtherSANs + issueVals.CommonName = "someone@example.com" + issueVals.OtherSANs = strings.Join(otherSANs, ",") + issueTestStep.ErrorOk = errorOk + addTests(check) + roleVals, issueVals, issueTestStep = oldRoleVals, oldIssueVals, oldIssueTestStep + } + roleVals.AllowAnyName = true + roleVals.EnforceHostnames = true + roleVals.AllowLocalhost = true + roleVals.UseCSRCommonName = true + commonNames.Localhost = true + + newOtherNameUtf8 := func(s string) (ret otherNameUtf8) { + pieces := strings.Split(s, ";") + if len(pieces) == 2 { + piecesRest := strings.Split(pieces[1], ":") + if len(piecesRest) == 2 { + switch strings.ToUpper(piecesRest[0]) { + case "UTF-8", "UTF8": + return otherNameUtf8{oid: pieces[0], value: piecesRest[1]} + } + } + } + t.Fatalf("error parsing otherName: %q", s) + return + } + oid1 := "1.3.6.1.4.1.311.20.2.3" + oth1str := oid1 + ";utf8:devops@nope.com" + oth1 := newOtherNameUtf8(oth1str) + oth2 := otherNameUtf8{oid1, "me@example.com"} + // allowNone, allowAll := []string{}, []string{oid1 + ";UTF-8:*"} + allowNone, allowAll := []string{}, []string{"*"} + + // OtherSANs not allowed and not provided, should not be an error. + addOtherSANTests(useCSRs, false, allowNone, false, nil, nil, getOtherCheck()) + + // OtherSANs not allowed, valid OtherSANs provided, should be an error. + addOtherSANTests(useCSRs, false, allowNone, true, []string{oth1str}, nil, nil) + + // OtherSANs allowed, bogus OtherSANs provided, should be an error. + addOtherSANTests(useCSRs, false, allowAll, true, []string{"foobar"}, nil, nil) + + // Given OtherSANs as API argument and useCSRSANs false, CSR arg ignored. + addOtherSANTests(useCSRs, false, allowAll, false, []string{oth1str}, + []otherNameUtf8{oth2}, getOtherCheck(oth1)) + + if useCSRs { + // OtherSANs not allowed, valid OtherSANs provided via CSR, should be an error. + addOtherSANTests(useCSRs, true, allowNone, true, nil, []otherNameUtf8{oth1}, nil) + + // Given OtherSANs as both API and CSR arguments and useCSRSANs=true, API arg ignored. + addOtherSANTests(useCSRs, false, allowAll, false, []string{oth2.String()}, + []otherNameUtf8{oth1}, getOtherCheck(oth2)) + } + } + + // Lease tests + { + roleTestStep.ErrorOk = true + roleVals.Lease = "" + roleVals.MaxTTL = 0 + addTests(nil) + + roleVals.Lease = "12h" + roleVals.MaxTTL = 6 * time.Hour + addTests(nil) + + roleTestStep.ErrorOk = false + roleVals.TTL = 0 + roleVals.MaxTTL = 12 * time.Hour + } + + // Listing test + ret = append(ret, logicaltest.TestStep{ + Operation: logical.ListOperation, + Path: "roles/", + Check: func(resp *logical.Response) error { + if resp.Data == nil { + return fmt.Errorf("nil data") + } + + keysRaw, ok := resp.Data["keys"] + if !ok { + return fmt.Errorf("no keys found") + } + + keys, ok := keysRaw.([]string) + if !ok { + return fmt.Errorf("could not convert keys to a string list") + } + + if len(keys) != 1 { + return fmt.Errorf("unexpected keys length of %d", len(keys)) + } + + if keys[0] != "test" { + return fmt.Errorf("unexpected key value of %s", keys[0]) + } + + return nil + }, + }) + + return ret +} + +func TestRolesAltIssuer(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + // Create two issuers. + resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "root a - example.com", + "issuer_name": "root-a", + "key_type": "ec", + }) + require.NoError(t, err) + require.NotNil(t, resp) + rootAPem := resp.Data["certificate"].(string) + rootACert := parseCert(t, rootAPem) + + resp, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "root b - example.com", + "issuer_name": "root-b", + "key_type": "ec", + }) + require.NoError(t, err) + require.NotNil(t, resp) + rootBPem := resp.Data["certificate"].(string) + rootBCert := parseCert(t, rootBPem) + + // Create three roles: one with no assignment, one with explicit root-a, + // one with explicit root-b. + _, err = CBWrite(b, s, "roles/use-default", map[string]interface{}{ + "allow_any_name": true, + "enforce_hostnames": false, + "key_type": "ec", + }) + require.NoError(t, err) + + _, err = CBWrite(b, s, "roles/use-root-a", map[string]interface{}{ + "allow_any_name": true, + "enforce_hostnames": false, + "key_type": "ec", + "issuer_ref": "root-a", + }) + require.NoError(t, err) + + _, err = CBWrite(b, s, "roles/use-root-b", map[string]interface{}{ + "allow_any_name": true, + "enforce_hostnames": false, + "issuer_ref": "root-b", + }) + require.NoError(t, err) + + // Now issue certs against these roles. + resp, err = CBWrite(b, s, "issue/use-default", map[string]interface{}{ + "common_name": "testing", + "ttl": "5s", + }) + require.NoError(t, err) + leafPem := resp.Data["certificate"].(string) + leafCert := parseCert(t, leafPem) + err = leafCert.CheckSignatureFrom(rootACert) + require.NoError(t, err, "should be signed by root-a but wasn't") + + resp, err = CBWrite(b, s, "issue/use-root-a", map[string]interface{}{ + "common_name": "testing", + "ttl": "5s", + }) + require.NoError(t, err) + leafPem = resp.Data["certificate"].(string) + leafCert = parseCert(t, leafPem) + err = leafCert.CheckSignatureFrom(rootACert) + require.NoError(t, err, "should be signed by root-a but wasn't") + + resp, err = CBWrite(b, s, "issue/use-root-b", map[string]interface{}{ + "common_name": "testing", + "ttl": "5s", + }) + require.NoError(t, err) + leafPem = resp.Data["certificate"].(string) + leafCert = parseCert(t, leafPem) + err = leafCert.CheckSignatureFrom(rootBCert) + require.NoError(t, err, "should be signed by root-b but wasn't") + + // Update the default issuer to be root B and make sure that the + // use-default role updates. + _, err = CBWrite(b, s, "config/issuers", map[string]interface{}{ + "default": "root-b", + }) + require.NoError(t, err) + + resp, err = CBWrite(b, s, "issue/use-default", map[string]interface{}{ + "common_name": "testing", + "ttl": "5s", + }) + require.NoError(t, err) + leafPem = resp.Data["certificate"].(string) + leafCert = parseCert(t, leafPem) + err = leafCert.CheckSignatureFrom(rootBCert) + require.NoError(t, err, "should be signed by root-b but wasn't") +} + +func TestBackend_PathFetchValidRaw(t *testing.T) { + t.Parallel() + b, storage := CreateBackendWithStorage(t) + + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/generate/internal", + Storage: storage, + Data: map[string]interface{}{ + "common_name": "test.com", + "ttl": "6h", + }, + MountPoint: "pki/", + }) + require.NoError(t, err) + if resp != nil && resp.IsError() { + t.Fatalf("failed to generate root, %#v", resp) + } + rootCaAsPem := resp.Data["certificate"].(string) + + // Chain should contain the root. + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ReadOperation, + Path: "ca_chain", + Storage: storage, + Data: map[string]interface{}{}, + MountPoint: "pki/", + }) + require.NoError(t, err) + if resp != nil && resp.IsError() { + t.Fatalf("failed read ca_chain, %#v", resp) + } + if strings.Count(string(resp.Data[logical.HTTPRawBody].([]byte)), rootCaAsPem) != 1 { + t.Fatalf("expected raw chain to contain the root cert") + } + + // The ca/pem should return us the actual CA... + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ReadOperation, + Path: "ca/pem", + Storage: storage, + Data: map[string]interface{}{}, + MountPoint: "pki/", + }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("ca/pem"), logical.ReadOperation), resp, true) + require.NoError(t, err) + if resp != nil && resp.IsError() { + t.Fatalf("failed read ca/pem, %#v", resp) + } + // check the raw cert matches the response body + if !bytes.Equal(resp.Data[logical.HTTPRawBody].([]byte), []byte(rootCaAsPem)) { + t.Fatalf("failed to get raw cert") + } + + _, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/example", + Storage: storage, + Data: map[string]interface{}{ + "allowed_domains": "example.com", + "allow_subdomains": "true", + "max_ttl": "1h", + "no_store": "false", + }, + MountPoint: "pki/", + }) + require.NoError(t, err, "error setting up pki role: %v", err) + + // Now issue a short-lived certificate from our pki-external. + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "issue/example", + Storage: storage, + Data: map[string]interface{}{ + "common_name": "test.example.com", + "ttl": "5m", + }, + MountPoint: "pki/", + }) + require.NoError(t, err, "error issuing certificate: %v", err) + require.NotNil(t, resp, "got nil response from issuing request") + + issueCrtAsPem := resp.Data["certificate"].(string) + issuedCrt := parseCert(t, issueCrtAsPem) + expectedSerial := serialFromCert(issuedCrt) + expectedCert := []byte(issueCrtAsPem) + + // get der cert + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ReadOperation, + Path: fmt.Sprintf("cert/%s/raw", expectedSerial), + Storage: storage, + }) + if resp != nil && resp.IsError() { + t.Fatalf("failed to get raw cert, %#v", resp) + } + if err != nil { + t.Fatal(err) + } + + // check the raw cert matches the response body + rawBody := resp.Data[logical.HTTPRawBody].([]byte) + bodyAsPem := []byte(strings.TrimSpace(string(pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: rawBody})))) + if !bytes.Equal(bodyAsPem, expectedCert) { + t.Fatalf("failed to get raw cert for serial number: %s", expectedSerial) + } + if resp.Data[logical.HTTPContentType] != "application/pkix-cert" { + t.Fatalf("failed to get raw cert content-type") + } + + // get pem + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ReadOperation, + Path: fmt.Sprintf("cert/%s/raw/pem", expectedSerial), + Storage: storage, + }) + if resp != nil && resp.IsError() { + t.Fatalf("failed to get raw, %#v", resp) + } + if err != nil { + t.Fatal(err) + } + + // check the pem cert matches the response body + if !bytes.Equal(resp.Data[logical.HTTPRawBody].([]byte), expectedCert) { + t.Fatalf("failed to get pem cert") + } + if resp.Data[logical.HTTPContentType] != "application/pem-certificate-chain" { + t.Fatalf("failed to get raw cert content-type") + } +} + +func TestBackend_PathFetchCertList(t *testing.T) { + t.Parallel() + // create the backend + b, storage := CreateBackendWithStorage(t) + + // generate root + rootData := map[string]interface{}{ + "common_name": "test.com", + "ttl": "6h", + } + + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/generate/internal", + Storage: storage, + Data: rootData, + MountPoint: "pki/", + }) + + if resp != nil && resp.IsError() { + t.Fatalf("failed to generate root, %#v", resp) + } + if err != nil { + t.Fatal(err) + } + + // config urls + urlsData := map[string]interface{}{ + "issuing_certificates": "http://127.0.0.1:8200/v1/pki/ca", + "crl_distribution_points": "http://127.0.0.1:8200/v1/pki/crl", + } + + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/urls", + Storage: storage, + Data: urlsData, + MountPoint: "pki/", + }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/urls"), logical.UpdateOperation), resp, true) + + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ReadOperation, + Path: "config/urls", + Storage: storage, + MountPoint: "pki/", + }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/urls"), logical.ReadOperation), resp, true) + + if resp != nil && resp.IsError() { + t.Fatalf("failed to config urls, %#v", resp) + } + if err != nil { + t.Fatal(err) + } + + // create a role entry + roleData := map[string]interface{}{ + "allowed_domains": "test.com", + "allow_subdomains": "true", + "max_ttl": "4h", + } + + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/test-example", + Storage: storage, + Data: roleData, + MountPoint: "pki/", + }) + if resp != nil && resp.IsError() { + t.Fatalf("failed to create a role, %#v", resp) + } + if err != nil { + t.Fatal(err) + } + + // issue some certs + i := 1 + for i < 10 { + certData := map[string]interface{}{ + "common_name": "example.test.com", + } + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "issue/test-example", + Storage: storage, + Data: certData, + MountPoint: "pki/", + }) + if resp != nil && resp.IsError() { + t.Fatalf("failed to issue a cert, %#v", resp) + } + if err != nil { + t.Fatal(err) + } + + i = i + 1 + } + + // list certs + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ListOperation, + Path: "certs", + Storage: storage, + MountPoint: "pki/", + }) + if resp != nil && resp.IsError() { + t.Fatalf("failed to list certs, %#v", resp) + } + if err != nil { + t.Fatal(err) + } + // check that the root and 9 additional certs are all listed + if len(resp.Data["keys"].([]string)) != 10 { + t.Fatalf("failed to list all 10 certs") + } + + // list certs/ + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ListOperation, + Path: "certs/", + Storage: storage, + MountPoint: "pki/", + }) + if resp != nil && resp.IsError() { + t.Fatalf("failed to list certs, %#v", resp) + } + if err != nil { + t.Fatal(err) + } + // check that the root and 9 additional certs are all listed + if len(resp.Data["keys"].([]string)) != 10 { + t.Fatalf("failed to list all 10 certs") + } +} + +func TestBackend_SignVerbatim(t *testing.T) { + t.Parallel() + testCases := []struct { + testName string + keyType string + }{ + {testName: "RSA", keyType: "rsa"}, + {testName: "ED25519", keyType: "ed25519"}, + {testName: "EC", keyType: "ec"}, + {testName: "Any", keyType: "any"}, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.testName, func(t *testing.T) { + runTestSignVerbatim(t, tc.keyType) + }) + } +} + +func runTestSignVerbatim(t *testing.T, keyType string) { + // create the backend + b, storage := CreateBackendWithStorage(t) + + // generate root + rootData := map[string]interface{}{ + "common_name": "test.com", + "not_after": "9999-12-31T23:59:59Z", + } + + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/generate/internal", + Storage: storage, + Data: rootData, + MountPoint: "pki/", + }) + if resp != nil && resp.IsError() { + t.Fatalf("failed to generate root, %#v", *resp) + } + if err != nil { + t.Fatal(err) + } + + // create a CSR and key + key, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + t.Fatal(err) + } + csrReq := &x509.CertificateRequest{ + Subject: pkix.Name{ + CommonName: "foo.bar.com", + }, + // Check that otherName extensions are not duplicated (see hashicorp/vault#16700). + // If these extensions are duplicated, sign-verbatim will fail when parsing the signed certificate on Go 1.19+ (see golang/go#50988). + // On older versions of Go this test will fail due to an explicit check for duplicate otherNames later in this test. + ExtraExtensions: []pkix.Extension{ + { + Id: oidExtensionSubjectAltName, + Critical: false, + Value: []byte{0x30, 0x26, 0xA0, 0x24, 0x06, 0x0A, 0x2B, 0x06, 0x01, 0x04, 0x01, 0x82, 0x37, 0x14, 0x02, 0x03, 0xA0, 0x16, 0x0C, 0x14, 0x75, 0x73, 0x65, 0x72, 0x6E, 0x61, 0x6D, 0x65, 0x40, 0x65, 0x78, 0x61, 0x6D, 0x70, 0x6C, 0x65, 0x2E, 0x63, 0x6F, 0x6D}, + }, + }, + } + csr, err := x509.CreateCertificateRequest(rand.Reader, csrReq, key) + if err != nil { + t.Fatal(err) + } + if len(csr) == 0 { + t.Fatal("generated csr is empty") + } + pemCSR := strings.TrimSpace(string(pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE REQUEST", + Bytes: csr, + }))) + if len(pemCSR) == 0 { + t.Fatal("pem csr is empty") + } + + signVerbatimData := map[string]interface{}{ + "csr": pemCSR, + } + if keyType == "rsa" { + signVerbatimData["signature_bits"] = 512 + } + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "sign-verbatim", + Storage: storage, + Data: signVerbatimData, + MountPoint: "pki/", + }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("sign-verbatim"), logical.UpdateOperation), resp, true) + + if resp != nil && resp.IsError() { + t.Fatalf("failed to sign-verbatim basic CSR: %#v", *resp) + } + if err != nil { + t.Fatal(err) + } + if resp.Secret != nil { + t.Fatal("secret is not nil") + } + + // create a role entry; we use this to check that sign-verbatim when used with a role is still honoring TTLs + roleData := map[string]interface{}{ + "ttl": "4h", + "max_ttl": "8h", + "key_type": keyType, + "not_before_duration": "2h", + } + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/test", + Storage: storage, + Data: roleData, + MountPoint: "pki/", + }) + if resp != nil && resp.IsError() { + t.Fatalf("failed to create a role, %#v", *resp) + } + if err != nil { + t.Fatal(err) + } + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "sign-verbatim/test", + Storage: storage, + Data: map[string]interface{}{ + "csr": pemCSR, + "ttl": "5h", + }, + MountPoint: "pki/", + }) + if resp != nil && resp.IsError() { + t.Fatalf("failed to sign-verbatim ttl'd CSR: %#v", *resp) + } + if err != nil { + t.Fatal(err) + } + if resp.Secret != nil { + t.Fatal("got a lease when we should not have") + } + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "sign-verbatim/test", + Storage: storage, + Data: map[string]interface{}{ + "csr": pemCSR, + "ttl": "12h", + }, + MountPoint: "pki/", + }) + if err != nil { + t.Fatal(err) + } + if resp != nil && resp.IsError() { + t.Fatalf(resp.Error().Error()) + } + if resp.Data == nil || resp.Data["certificate"] == nil { + t.Fatal("did not get expected data") + } + certString := resp.Data["certificate"].(string) + block, _ := pem.Decode([]byte(certString)) + if block == nil { + t.Fatal("nil pem block") + } + certs, err := x509.ParseCertificates(block.Bytes) + if err != nil { + t.Fatal(err) + } + if len(certs) != 1 { + t.Fatalf("expected a single cert, got %d", len(certs)) + } + cert := certs[0] + if math.Abs(float64(time.Now().Add(12*time.Hour).Unix()-cert.NotAfter.Unix())) < 10 { + t.Fatalf("sign-verbatim did not properly cap validity period (notAfter) on signed CSR: was %v vs requested %v but should've been %v", cert.NotAfter, time.Now().Add(12*time.Hour), time.Now().Add(8*time.Hour)) + } + if math.Abs(float64(time.Now().Add(-2*time.Hour).Unix()-cert.NotBefore.Unix())) > 10 { + t.Fatalf("sign-verbatim did not properly cap validity period (notBefore) on signed CSR: was %v vs expected %v", cert.NotBefore, time.Now().Add(-2*time.Hour)) + } + + // Now check signing a certificate using the not_after input using the Y10K value + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "sign-verbatim/test", + Storage: storage, + Data: map[string]interface{}{ + "csr": pemCSR, + "not_after": "9999-12-31T23:59:59Z", + }, + MountPoint: "pki/", + }) + if err != nil { + t.Fatal(err) + } + if resp != nil && resp.IsError() { + t.Fatalf(resp.Error().Error()) + } + if resp.Data == nil || resp.Data["certificate"] == nil { + t.Fatal("did not get expected data") + } + certString = resp.Data["certificate"].(string) + block, _ = pem.Decode([]byte(certString)) + if block == nil { + t.Fatal("nil pem block") + } + certs, err = x509.ParseCertificates(block.Bytes) + if err != nil { + t.Fatal(err) + } + if len(certs) != 1 { + t.Fatalf("expected a single cert, got %d", len(certs)) + } + cert = certs[0] + + // Fallback check for duplicate otherName, necessary on Go versions before 1.19. + // We assume that there is only one SAN in the original CSR and that it is an otherName. + san_count := 0 + for _, ext := range cert.Extensions { + if ext.Id.Equal(oidExtensionSubjectAltName) { + san_count += 1 + } + } + if san_count != 1 { + t.Fatalf("expected one SAN extension, got %d", san_count) + } + + notAfter := cert.NotAfter.Format(time.RFC3339) + if notAfter != "9999-12-31T23:59:59Z" { + t.Fatal(fmt.Errorf("not after from certificate is not matching with input parameter")) + } + + // now check that if we set generate-lease it takes it from the role and the TTLs match + roleData = map[string]interface{}{ + "ttl": "4h", + "max_ttl": "8h", + "generate_lease": true, + "key_type": keyType, + } + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/test", + Storage: storage, + Data: roleData, + MountPoint: "pki/", + }) + if resp != nil && resp.IsError() { + t.Fatalf("failed to create a role, %#v", *resp) + } + if err != nil { + t.Fatal(err) + } + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "sign-verbatim/test", + Storage: storage, + Data: map[string]interface{}{ + "csr": pemCSR, + "ttl": "5h", + }, + MountPoint: "pki/", + }) + if resp != nil && resp.IsError() { + t.Fatalf("failed to sign-verbatim role-leased CSR: %#v", *resp) + } + if err != nil { + t.Fatal(err) + } + if resp.Secret == nil { + t.Fatalf("secret is nil, response is %#v", *resp) + } + if math.Abs(float64(resp.Secret.TTL-(5*time.Hour))) > float64(5*time.Hour) { + t.Fatalf("ttl not default; wanted %v, got %v", b.System().DefaultLeaseTTL(), resp.Secret.TTL) + } +} + +func TestBackend_Root_Idempotency(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + // This is a change within 1.11, we are no longer idempotent across generate/internal calls. + resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "myvault.com", + }) + require.NoError(t, err) + require.NotNil(t, resp, "expected ca info") + keyId1 := resp.Data["key_id"] + issuerId1 := resp.Data["issuer_id"] + cert := parseCert(t, resp.Data["certificate"].(string)) + certSkid := certutil.GetHexFormatted(cert.SubjectKeyId, ":") + + // -> Validate the SKID matches between the root cert and the key + resp, err = CBRead(b, s, "key/"+keyId1.(keyID).String()) + require.NoError(t, err) + require.NotNil(t, resp, "expected a response") + require.Equal(t, resp.Data["subject_key_id"], certSkid) + + resp, err = CBRead(b, s, "cert/ca_chain") + require.NoError(t, err, "error reading ca_chain: %v", err) + + r1Data := resp.Data + + // Calling generate/internal should generate a new CA as well. + resp, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "myvault.com", + }) + require.NoError(t, err) + require.NotNil(t, resp, "expected ca info") + keyId2 := resp.Data["key_id"] + issuerId2 := resp.Data["issuer_id"] + cert = parseCert(t, resp.Data["certificate"].(string)) + certSkid = certutil.GetHexFormatted(cert.SubjectKeyId, ":") + + // -> Validate the SKID matches between the root cert and the key + resp, err = CBRead(b, s, "key/"+keyId2.(keyID).String()) + require.NoError(t, err) + require.NotNil(t, resp, "expected a response") + require.Equal(t, resp.Data["subject_key_id"], certSkid) + + // Make sure that we actually generated different issuer and key values + require.NotEqual(t, keyId1, keyId2) + require.NotEqual(t, issuerId1, issuerId2) + + // Now because the issued CA's have no links, the call to ca_chain should return the same data (ca chain from default) + resp, err = CBRead(b, s, "cert/ca_chain") + require.NoError(t, err, "error reading ca_chain: %v", err) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("cert/ca_chain"), logical.ReadOperation), resp, true) + + r2Data := resp.Data + if !reflect.DeepEqual(r1Data, r2Data) { + t.Fatal("got different ca certs") + } + + // Now let's validate that the import bundle is idempotent. + pemBundleRootCA := rootCACertPEM + "\n" + rootCAKeyPEM + resp, err = CBWrite(b, s, "config/ca", map[string]interface{}{ + "pem_bundle": pemBundleRootCA, + }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/ca"), logical.UpdateOperation), resp, true) + + require.NoError(t, err) + require.NotNil(t, resp, "expected ca info") + firstMapping := resp.Data["mapping"].(map[string]string) + firstImportedKeys := resp.Data["imported_keys"].([]string) + firstImportedIssuers := resp.Data["imported_issuers"].([]string) + firstExistingKeys := resp.Data["existing_keys"].([]string) + firstExistingIssuers := resp.Data["existing_issuers"].([]string) + + require.NotContains(t, firstImportedKeys, keyId1) + require.NotContains(t, firstImportedKeys, keyId2) + require.NotContains(t, firstImportedIssuers, issuerId1) + require.NotContains(t, firstImportedIssuers, issuerId2) + require.Empty(t, firstExistingKeys) + require.Empty(t, firstExistingIssuers) + require.NotEmpty(t, firstMapping) + require.Equal(t, 1, len(firstMapping)) + + var issuerId3 string + var keyId3 string + for i, k := range firstMapping { + issuerId3 = i + keyId3 = k + } + + // Performing this again should result in no key/issuer ids being imported/generated. + resp, err = CBWrite(b, s, "config/ca", map[string]interface{}{ + "pem_bundle": pemBundleRootCA, + }) + require.NoError(t, err) + require.NotNil(t, resp, "expected ca info") + secondMapping := resp.Data["mapping"].(map[string]string) + secondImportedKeys := resp.Data["imported_keys"] + secondImportedIssuers := resp.Data["imported_issuers"] + secondExistingKeys := resp.Data["existing_keys"] + secondExistingIssuers := resp.Data["existing_issuers"] + + require.Empty(t, secondImportedKeys) + require.Empty(t, secondImportedIssuers) + require.Contains(t, secondExistingKeys, keyId3) + require.Contains(t, secondExistingIssuers, issuerId3) + require.Equal(t, 1, len(secondMapping)) + + resp, err = CBDelete(b, s, "root") + require.NoError(t, err) + require.NotNil(t, resp) + require.Equal(t, 1, len(resp.Warnings)) + + // Make sure we can delete twice... + resp, err = CBDelete(b, s, "root") + require.NoError(t, err) + require.NotNil(t, resp) + require.Equal(t, 1, len(resp.Warnings)) + + _, err = CBRead(b, s, "cert/ca_chain") + require.Error(t, err, "expected an error fetching deleted ca_chain") + + // We should be able to import the same ca bundle as before and get a different key/issuer ids + resp, err = CBWrite(b, s, "config/ca", map[string]interface{}{ + "pem_bundle": pemBundleRootCA, + }) + require.NoError(t, err) + require.NotNil(t, resp, "expected ca info") + postDeleteImportedKeys := resp.Data["imported_keys"] + postDeleteImportedIssuers := resp.Data["imported_issuers"] + + // Make sure that we actually generated different issuer and key values, then the previous import + require.NotNil(t, postDeleteImportedKeys) + require.NotNil(t, postDeleteImportedIssuers) + require.NotEqual(t, postDeleteImportedKeys, firstImportedKeys) + require.NotEqual(t, postDeleteImportedIssuers, firstImportedIssuers) + + resp, err = CBRead(b, s, "cert/ca_chain") + require.NoError(t, err) + + caChainPostDelete := resp.Data + if reflect.DeepEqual(r1Data, caChainPostDelete) { + t.Fatal("ca certs from ca_chain were the same post delete, should have changed.") + } +} + +func TestBackend_SignIntermediate_AllowedPastCAValidity(t *testing.T) { + t.Parallel() + b_root, s_root := CreateBackendWithStorage(t) + b_int, s_int := CreateBackendWithStorage(t) + var err error + + // Direct issuing from root + _, err = CBWrite(b_root, s_root, "root/generate/internal", map[string]interface{}{ + "ttl": "40h", + "common_name": "myvault.com", + }) + if err != nil { + t.Fatal(err) + } + + _, err = CBWrite(b_root, s_root, "roles/test", map[string]interface{}{ + "allow_bare_domains": true, + "allow_subdomains": true, + "allow_any_name": true, + }) + if err != nil { + t.Fatal(err) + } + + resp, err := CBWrite(b_int, s_int, "intermediate/generate/internal", map[string]interface{}{ + "common_name": "myint.com", + }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b_root.Route("intermediate/generate/internal"), logical.UpdateOperation), resp, true) + require.Contains(t, resp.Data, "key_id") + intKeyId := resp.Data["key_id"].(keyID) + csr := resp.Data["csr"] + + resp, err = CBRead(b_int, s_int, "key/"+intKeyId.String()) + require.NoError(t, err) + require.NotNil(t, resp, "expected a response") + intSkid := resp.Data["subject_key_id"].(string) + + if err != nil { + t.Fatal(err) + } + + _, err = CBWrite(b_root, s_root, "sign/test", map[string]interface{}{ + "common_name": "myint.com", + "csr": csr, + "ttl": "60h", + }) + require.ErrorContains(t, err, "that is beyond the expiration of the CA certificate") + + _, err = CBWrite(b_root, s_root, "sign-verbatim/test", map[string]interface{}{ + "common_name": "myint.com", + "other_sans": "1.3.6.1.4.1.311.20.2.3;utf8:caadmin@example.com", + "csr": csr, + "ttl": "60h", + }) + require.ErrorContains(t, err, "that is beyond the expiration of the CA certificate") + + resp, err = CBWrite(b_root, s_root, "root/sign-intermediate", map[string]interface{}{ + "common_name": "myint.com", + "other_sans": "1.3.6.1.4.1.311.20.2.3;utf8:caadmin@example.com", + "csr": csr, + "ttl": "60h", + }) + if err != nil { + t.Fatalf("got error: %v", err) + } + if resp == nil { + t.Fatal("got nil response") + } + if len(resp.Warnings) == 0 { + t.Fatalf("expected warnings, got %#v", *resp) + } + + cert := parseCert(t, resp.Data["certificate"].(string)) + certSkid := certutil.GetHexFormatted(cert.SubjectKeyId, ":") + require.Equal(t, intSkid, certSkid) +} + +func TestBackend_ConsulSignLeafWithLegacyRole(t *testing.T) { + t.Parallel() + // create the backend + b, s := CreateBackendWithStorage(t) + + // generate root + data, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "ttl": "40h", + "common_name": "myvault.com", + }) + require.NoError(t, err, "failed generating internal root cert") + rootCaPem := data.Data["certificate"].(string) + + // Create a signing role like Consul did with the default args prior to Vault 1.10 + _, err = CBWrite(b, s, "roles/test", map[string]interface{}{ + "allow_any_name": true, + "allowed_serial_numbers": []string{"MySerialNumber"}, + "key_type": "any", + "key_bits": "2048", + "signature_bits": "256", + }) + require.NoError(t, err, "failed creating legacy role") + + _, csrPem := generateTestCsr(t, certutil.ECPrivateKey, 256) + data, err = CBWrite(b, s, "sign/test", map[string]interface{}{ + "csr": csrPem, + }) + require.NoError(t, err, "failed signing csr") + certAsPem := data.Data["certificate"].(string) + + signedCert := parseCert(t, certAsPem) + rootCert := parseCert(t, rootCaPem) + requireSignedBy(t, signedCert, rootCert) +} + +func TestBackend_SignSelfIssued(t *testing.T) { + t.Parallel() + // create the backend + b, storage := CreateBackendWithStorage(t) + + // generate root + rootData := map[string]interface{}{ + "common_name": "test.com", + "ttl": "172800", + } + + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/generate/internal", + Storage: storage, + Data: rootData, + MountPoint: "pki/", + }) + if resp != nil && resp.IsError() { + t.Fatalf("failed to generate root, %#v", *resp) + } + if err != nil { + t.Fatal(err) + } + + key, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + t.Fatal(err) + } + + template := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "foo.bar.com", + }, + SerialNumber: big.NewInt(1234), + IsCA: false, + BasicConstraintsValid: true, + } + + ss, _ := getSelfSigned(t, template, template, key) + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/sign-self-issued", + Storage: storage, + Data: map[string]interface{}{ + "certificate": ss, + }, + MountPoint: "pki/", + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("got nil response") + } + if !resp.IsError() { + t.Fatalf("expected error due to non-CA; got: %#v", *resp) + } + + // Set CA to true, but leave issuer alone + template.IsCA = true + + issuer := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "bar.foo.com", + }, + SerialNumber: big.NewInt(2345), + IsCA: true, + BasicConstraintsValid: true, + } + ss, ssCert := getSelfSigned(t, template, issuer, key) + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/sign-self-issued", + Storage: storage, + Data: map[string]interface{}{ + "certificate": ss, + }, + MountPoint: "pki/", + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("got nil response") + } + if !resp.IsError() { + t.Fatalf("expected error due to different issuer; cert info is\nIssuer\n%#v\nSubject\n%#v\n", ssCert.Issuer, ssCert.Subject) + } + + ss, _ = getSelfSigned(t, template, template, key) + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/sign-self-issued", + Storage: storage, + Data: map[string]interface{}{ + "certificate": ss, + }, + MountPoint: "pki/", + }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("root/sign-self-issued"), logical.UpdateOperation), resp, true) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("got nil response") + } + if resp.IsError() { + t.Fatalf("error in response: %s", resp.Error().Error()) + } + + newCertString := resp.Data["certificate"].(string) + block, _ := pem.Decode([]byte(newCertString)) + newCert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + t.Fatal(err) + } + + sc := b.makeStorageContext(context.Background(), storage) + signingBundle, err := sc.fetchCAInfo(defaultRef, ReadOnlyUsage) + if err != nil { + t.Fatal(err) + } + if reflect.DeepEqual(newCert.Subject, newCert.Issuer) { + t.Fatal("expected different subject/issuer") + } + if !reflect.DeepEqual(newCert.Issuer, signingBundle.Certificate.Subject) { + t.Fatalf("expected matching issuer/CA subject\n\nIssuer:\n%#v\nSubject:\n%#v\n", newCert.Issuer, signingBundle.Certificate.Subject) + } + if bytes.Equal(newCert.AuthorityKeyId, newCert.SubjectKeyId) { + t.Fatal("expected different authority/subject") + } + if !bytes.Equal(newCert.AuthorityKeyId, signingBundle.Certificate.SubjectKeyId) { + t.Fatal("expected authority on new cert to be same as signing subject") + } + if newCert.Subject.CommonName != "foo.bar.com" { + t.Fatalf("unexpected common name on new cert: %s", newCert.Subject.CommonName) + } +} + +// TestBackend_SignSelfIssued_DifferentTypes tests the functionality of the +// require_matching_certificate_algorithms flag. +func TestBackend_SignSelfIssued_DifferentTypes(t *testing.T) { + t.Parallel() + // create the backend + b, storage := CreateBackendWithStorage(t) + + // generate root + rootData := map[string]interface{}{ + "common_name": "test.com", + "ttl": "172800", + "key_type": "ec", + "key_bits": "521", + } + + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/generate/internal", + Storage: storage, + Data: rootData, + MountPoint: "pki/", + }) + if resp != nil && resp.IsError() { + t.Fatalf("failed to generate root, %#v", *resp) + } + if err != nil { + t.Fatal(err) + } + + key, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + t.Fatal(err) + } + + template := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "foo.bar.com", + }, + SerialNumber: big.NewInt(1234), + IsCA: true, + BasicConstraintsValid: true, + } + + // Tests absent the flag + ss, _ := getSelfSigned(t, template, template, key) + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/sign-self-issued", + Storage: storage, + Data: map[string]interface{}{ + "certificate": ss, + }, + MountPoint: "pki/", + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("got nil response") + } + + // Set CA to true, but leave issuer alone + template.IsCA = true + + // Tests with flag present but false + ss, _ = getSelfSigned(t, template, template, key) + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/sign-self-issued", + Storage: storage, + Data: map[string]interface{}{ + "certificate": ss, + "require_matching_certificate_algorithms": false, + }, + MountPoint: "pki/", + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("got nil response") + } + + // Test with flag present and true + ss, _ = getSelfSigned(t, template, template, key) + _, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/sign-self-issued", + Storage: storage, + Data: map[string]interface{}{ + "certificate": ss, + "require_matching_certificate_algorithms": true, + }, + MountPoint: "pki/", + }) + if err == nil { + t.Fatal("expected error due to mismatched algorithms") + } +} + +// This is a really tricky test because the Go stdlib asn1 package is incapable +// of doing the right thing with custom OID SANs (see comments in the package, +// it's readily admitted that it's too magic) but that means that any +// validation logic written for this test isn't being independently verified, +// as in, if cryptobytes is used to decode it to make the test work, that +// doesn't mean we're encoding and decoding correctly, only that we made the +// test pass. Instead, when run verbosely it will first perform a bunch of +// checks to verify that the OID SAN logic doesn't screw up other SANs, then +// will spit out the PEM. This can be validated independently. +// +// You want the hex dump of the octet string corresponding to the X509v3 +// Subject Alternative Name. There's a nice online utility at +// https://lapo.it/asn1js that can be used to view the structure of an +// openssl-generated other SAN at +// https://lapo.it/asn1js/#3022A020060A2B060104018237140203A0120C106465766F7073406C6F63616C686F7374 +// (openssl asn1parse can also be used with -strparse using an offset of the +// hex blob for the subject alternative names extension). +// +// The structure output from here should match that precisely (even if the OID +// itself doesn't) in the second test. +// +// The test that encodes two should have them be in separate elements in the +// top-level sequence; see +// https://lapo.it/asn1js/#3046A020060A2B060104018237140203A0120C106465766F7073406C6F63616C686F7374A022060A2B060104018237140204A0140C12322D6465766F7073406C6F63616C686F7374 for an openssl-generated example. +// +// The good news is that it's valid to simply copy and paste the PEM output from +// here into the form at that site as it will do the right thing so it's pretty +// easy to validate. +func TestBackend_OID_SANs(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + var err error + var resp *logical.Response + var certStr string + var block *pem.Block + var cert *x509.Certificate + + _, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "ttl": "40h", + "common_name": "myvault.com", + }) + if err != nil { + t.Fatal(err) + } + + _, err = CBWrite(b, s, "roles/test", map[string]interface{}{ + "allowed_domains": []string{"foobar.com", "zipzap.com"}, + "allow_bare_domains": true, + "allow_subdomains": true, + "allow_ip_sans": true, + "allowed_other_sans": "1.3.6.1.4.1.311.20.2.3;UTF8:devops@*,1.3.6.1.4.1.311.20.2.4;utf8:d*e@foobar.com", + }) + if err != nil { + t.Fatal(err) + } + + // Get a baseline before adding OID SANs. In the next sections we'll verify + // that the SANs are all added even as the OID SAN inclusion forces other + // adding logic (custom rather than built-in Golang logic) + resp, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "foobar.com", + "ip_sans": "1.2.3.4", + "alt_names": "foobar.com,foo.foobar.com,bar.foobar.com", + "ttl": "1h", + }) + if err != nil { + t.Fatal(err) + } + certStr = resp.Data["certificate"].(string) + block, _ = pem.Decode([]byte(certStr)) + cert, err = x509.ParseCertificate(block.Bytes) + if err != nil { + t.Fatal(err) + } + if cert.IPAddresses[0].String() != "1.2.3.4" { + t.Fatalf("unexpected IP SAN %q", cert.IPAddresses[0].String()) + } + if len(cert.DNSNames) != 3 || + cert.DNSNames[0] != "bar.foobar.com" || + cert.DNSNames[1] != "foo.foobar.com" || + cert.DNSNames[2] != "foobar.com" { + t.Fatalf("unexpected DNS SANs %v", cert.DNSNames) + } + + // First test some bad stuff that shouldn't work + _, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "foobar.com", + "ip_sans": "1.2.3.4", + "alt_names": "foo.foobar.com,bar.foobar.com", + "ttl": "1h", + // Not a valid value for the first possibility + "other_sans": "1.3.6.1.4.1.311.20.2.3;UTF8:devop@nope.com", + }) + if err == nil { + t.Fatal("expected error") + } + + _, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "foobar.com", + "ip_sans": "1.2.3.4", + "alt_names": "foo.foobar.com,bar.foobar.com", + "ttl": "1h", + // Not a valid OID for the first possibility + "other_sans": "1.3.6.1.4.1.311.20.2.5;UTF8:devops@nope.com", + }) + if err == nil { + t.Fatal("expected error") + } + + _, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "foobar.com", + "ip_sans": "1.2.3.4", + "alt_names": "foo.foobar.com,bar.foobar.com", + "ttl": "1h", + // Not a valid name for the second possibility + "other_sans": "1.3.6.1.4.1.311.20.2.4;UTF8:d34g@foobar.com", + }) + if err == nil { + t.Fatal("expected error") + } + + _, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "foobar.com", + "ip_sans": "1.2.3.4", + "alt_names": "foo.foobar.com,bar.foobar.com", + "ttl": "1h", + // Not a valid OID for the second possibility + "other_sans": "1.3.6.1.4.1.311.20.2.5;UTF8:d34e@foobar.com", + }) + if err == nil { + t.Fatal("expected error") + } + + _, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "foobar.com", + "ip_sans": "1.2.3.4", + "alt_names": "foo.foobar.com,bar.foobar.com", + "ttl": "1h", + // Not a valid type + "other_sans": "1.3.6.1.4.1.311.20.2.5;UTF2:d34e@foobar.com", + }) + if err == nil { + t.Fatal("expected error") + } + + // Valid for first possibility + resp, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "foobar.com", + "ip_sans": "1.2.3.4", + "alt_names": "foo.foobar.com,bar.foobar.com", + "ttl": "1h", + "other_sans": "1.3.6.1.4.1.311.20.2.3;utf8:devops@nope.com", + }) + if err != nil { + t.Fatal(err) + } + certStr = resp.Data["certificate"].(string) + block, _ = pem.Decode([]byte(certStr)) + cert, err = x509.ParseCertificate(block.Bytes) + if err != nil { + t.Fatal(err) + } + if cert.IPAddresses[0].String() != "1.2.3.4" { + t.Fatalf("unexpected IP SAN %q", cert.IPAddresses[0].String()) + } + if len(cert.DNSNames) != 3 || + cert.DNSNames[0] != "bar.foobar.com" || + cert.DNSNames[1] != "foo.foobar.com" || + cert.DNSNames[2] != "foobar.com" { + t.Fatalf("unexpected DNS SANs %v", cert.DNSNames) + } + if len(os.Getenv("VAULT_VERBOSE_PKITESTS")) > 0 { + t.Logf("certificate 1 to check:\n%s", certStr) + } + + // Valid for second possibility + resp, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "foobar.com", + "ip_sans": "1.2.3.4", + "alt_names": "foo.foobar.com,bar.foobar.com", + "ttl": "1h", + "other_sans": "1.3.6.1.4.1.311.20.2.4;UTF8:d234e@foobar.com", + }) + if err != nil { + t.Fatal(err) + } + certStr = resp.Data["certificate"].(string) + block, _ = pem.Decode([]byte(certStr)) + cert, err = x509.ParseCertificate(block.Bytes) + if err != nil { + t.Fatal(err) + } + if cert.IPAddresses[0].String() != "1.2.3.4" { + t.Fatalf("unexpected IP SAN %q", cert.IPAddresses[0].String()) + } + if len(cert.DNSNames) != 3 || + cert.DNSNames[0] != "bar.foobar.com" || + cert.DNSNames[1] != "foo.foobar.com" || + cert.DNSNames[2] != "foobar.com" { + t.Fatalf("unexpected DNS SANs %v", cert.DNSNames) + } + if len(os.Getenv("VAULT_VERBOSE_PKITESTS")) > 0 { + t.Logf("certificate 2 to check:\n%s", certStr) + } + + // Valid for both + oid1, type1, val1 := "1.3.6.1.4.1.311.20.2.3", "utf8", "devops@nope.com" + oid2, type2, val2 := "1.3.6.1.4.1.311.20.2.4", "utf-8", "d234e@foobar.com" + otherNames := []string{ + fmt.Sprintf("%s;%s:%s", oid1, type1, val1), + fmt.Sprintf("%s;%s:%s", oid2, type2, val2), + } + resp, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "foobar.com", + "ip_sans": "1.2.3.4", + "alt_names": "foo.foobar.com,bar.foobar.com", + "ttl": "1h", + "other_sans": strings.Join(otherNames, ","), + }) + if err != nil { + t.Fatal(err) + } + certStr = resp.Data["certificate"].(string) + block, _ = pem.Decode([]byte(certStr)) + cert, err = x509.ParseCertificate(block.Bytes) + if err != nil { + t.Fatal(err) + } + if cert.IPAddresses[0].String() != "1.2.3.4" { + t.Fatalf("unexpected IP SAN %q", cert.IPAddresses[0].String()) + } + if len(cert.DNSNames) != 3 || + cert.DNSNames[0] != "bar.foobar.com" || + cert.DNSNames[1] != "foo.foobar.com" || + cert.DNSNames[2] != "foobar.com" { + t.Fatalf("unexpected DNS SANs %v", cert.DNSNames) + } + expectedOtherNames := []otherNameUtf8{{oid1, val1}, {oid2, val2}} + foundOtherNames, err := getOtherSANsFromX509Extensions(cert.Extensions) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(expectedOtherNames, foundOtherNames); len(diff) != 0 { + t.Errorf("unexpected otherNames: %v", diff) + } + if len(os.Getenv("VAULT_VERBOSE_PKITESTS")) > 0 { + t.Logf("certificate 3 to check:\n%s", certStr) + } +} + +func TestBackend_AllowedSerialNumbers(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + var err error + var resp *logical.Response + var certStr string + var block *pem.Block + var cert *x509.Certificate + + _, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "ttl": "40h", + "common_name": "myvault.com", + }) + if err != nil { + t.Fatal(err) + } + + // First test that Serial Numbers are not allowed + _, err = CBWrite(b, s, "roles/test", map[string]interface{}{ + "allow_any_name": true, + "enforce_hostnames": false, + }) + if err != nil { + t.Fatal(err) + } + + _, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "foobar", + "ttl": "1h", + }) + if err != nil { + t.Fatal(err) + } + + _, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "foobar", + "ttl": "1h", + "serial_number": "foobar", + }) + if err == nil { + t.Fatal("expected error") + } + + // Update the role to allow serial numbers + _, err = CBWrite(b, s, "roles/test", map[string]interface{}{ + "allow_any_name": true, + "enforce_hostnames": false, + "allowed_serial_numbers": "f00*,b4r*", + }) + if err != nil { + t.Fatal(err) + } + + _, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "foobar", + "ttl": "1h", + // Not a valid serial number + "serial_number": "foobar", + }) + if err == nil { + t.Fatal("expected error") + } + + // Valid for first possibility + resp, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "foobar", + "serial_number": "f00bar", + }) + if err != nil { + t.Fatal(err) + } + certStr = resp.Data["certificate"].(string) + block, _ = pem.Decode([]byte(certStr)) + cert, err = x509.ParseCertificate(block.Bytes) + if err != nil { + t.Fatal(err) + } + if cert.Subject.SerialNumber != "f00bar" { + t.Fatalf("unexpected Subject SerialNumber %s", cert.Subject.SerialNumber) + } + if len(os.Getenv("VAULT_VERBOSE_PKITESTS")) > 0 { + t.Logf("certificate 1 to check:\n%s", certStr) + } + + // Valid for second possibility + resp, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "foobar", + "serial_number": "b4rf00", + }) + if err != nil { + t.Fatal(err) + } + certStr = resp.Data["certificate"].(string) + block, _ = pem.Decode([]byte(certStr)) + cert, err = x509.ParseCertificate(block.Bytes) + if err != nil { + t.Fatal(err) + } + if cert.Subject.SerialNumber != "b4rf00" { + t.Fatalf("unexpected Subject SerialNumber %s", cert.Subject.SerialNumber) + } + if len(os.Getenv("VAULT_VERBOSE_PKITESTS")) > 0 { + t.Logf("certificate 2 to check:\n%s", certStr) + } +} + +func TestBackend_URI_SANs(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + var err error + + _, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "ttl": "40h", + "common_name": "myvault.com", + }) + if err != nil { + t.Fatal(err) + } + + _, err = CBWrite(b, s, "roles/test", map[string]interface{}{ + "allowed_domains": []string{"foobar.com", "zipzap.com"}, + "allow_bare_domains": true, + "allow_subdomains": true, + "allow_ip_sans": true, + "allowed_uri_sans": []string{"http://someuri/abc", "spiffe://host.com/*"}, + }) + if err != nil { + t.Fatal(err) + } + + // First test some bad stuff that shouldn't work + _, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "foobar.com", + "ip_sans": "1.2.3.4", + "alt_names": "foo.foobar.com,bar.foobar.com", + "ttl": "1h", + "uri_sans": "http://www.mydomain.com/zxf", + }) + if err == nil { + t.Fatal("expected error") + } + + // Test valid single entry + _, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "foobar.com", + "ip_sans": "1.2.3.4", + "alt_names": "foo.foobar.com,bar.foobar.com", + "ttl": "1h", + "uri_sans": "http://someuri/abc", + }) + if err != nil { + t.Fatal(err) + } + + // Test globed entry + _, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "foobar.com", + "ip_sans": "1.2.3.4", + "alt_names": "foo.foobar.com,bar.foobar.com", + "ttl": "1h", + "uri_sans": "spiffe://host.com/something", + }) + if err != nil { + t.Fatal(err) + } + + // Test multiple entries + resp, err := CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "foobar.com", + "ip_sans": "1.2.3.4", + "alt_names": "foo.foobar.com,bar.foobar.com", + "ttl": "1h", + "uri_sans": "spiffe://host.com/something,http://someuri/abc", + }) + if err != nil { + t.Fatal(err) + } + + certStr := resp.Data["certificate"].(string) + block, _ := pem.Decode([]byte(certStr)) + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + t.Fatal(err) + } + + URI0, _ := url.Parse("spiffe://host.com/something") + URI1, _ := url.Parse("http://someuri/abc") + + if len(cert.URIs) != 2 { + t.Fatalf("expected 2 valid URIs SANs %v", cert.URIs) + } + + if cert.URIs[0].String() != URI0.String() || cert.URIs[1].String() != URI1.String() { + t.Fatalf( + "expected URIs SANs %v to equal provided values spiffe://host.com/something, http://someuri/abc", + cert.URIs) + } +} + +func TestBackend_AllowedURISANsTemplate(t *testing.T) { + t.Parallel() + coreConfig := &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "userpass": userpass.Factory, + }, + LogicalBackends: map[string]logical.Factory{ + "pki": Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + client := cluster.Cores[0].Client + + // Write test policy for userpass auth method. + err := client.Sys().PutPolicy("test", ` + path "pki/*" { + capabilities = ["update"] + }`) + if err != nil { + t.Fatal(err) + } + + // Enable userpass auth method. + if err := client.Sys().EnableAuth("userpass", "userpass", ""); err != nil { + t.Fatal(err) + } + + // Configure test role for userpass. + if _, err := client.Logical().Write("auth/userpass/users/userpassname", map[string]interface{}{ + "password": "test", + "policies": "test", + }); err != nil { + t.Fatal(err) + } + + // Login userpass for test role and keep client token. + secret, err := client.Logical().Write("auth/userpass/login/userpassname", map[string]interface{}{ + "password": "test", + }) + if err != nil || secret == nil { + t.Fatal(err) + } + userpassToken := secret.Auth.ClientToken + + // Get auth accessor for identity template. + auths, err := client.Sys().ListAuth() + if err != nil { + t.Fatal(err) + } + userpassAccessor := auths["userpass/"].Accessor + + // Mount PKI. + err = client.Sys().Mount("pki", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "60h", + }, + }) + if err != nil { + t.Fatal(err) + } + + // Generate internal CA. + _, err = client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ + "ttl": "40h", + "common_name": "myvault.com", + }) + if err != nil { + t.Fatal(err) + } + + // Write role PKI. + _, err = client.Logical().Write("pki/roles/test", map[string]interface{}{ + "allowed_uri_sans": []string{ + "spiffe://domain/{{identity.entity.aliases." + userpassAccessor + ".name}}", + "spiffe://domain/{{identity.entity.aliases." + userpassAccessor + ".name}}/*", "spiffe://domain/foo", + }, + "allowed_uri_sans_template": true, + "require_cn": false, + }) + if err != nil { + t.Fatal(err) + } + + // Issue certificate with identity templating + client.SetToken(userpassToken) + _, err = client.Logical().Write("pki/issue/test", map[string]interface{}{"uri_sans": "spiffe://domain/userpassname, spiffe://domain/foo"}) + if err != nil { + t.Fatal(err) + } + + // Issue certificate with identity templating and glob + client.SetToken(userpassToken) + _, err = client.Logical().Write("pki/issue/test", map[string]interface{}{"uri_sans": "spiffe://domain/userpassname/bar"}) + if err != nil { + t.Fatal(err) + } + + // Issue certificate with non-matching identity template parameter + client.SetToken(userpassToken) + _, err = client.Logical().Write("pki/issue/test", map[string]interface{}{"uri_sans": "spiffe://domain/unknownuser"}) + if err == nil { + t.Fatal(err) + } + + // Set allowed_uri_sans_template to false. + _, err = client.Logical().Write("pki/roles/test", map[string]interface{}{ + "allowed_uri_sans_template": false, + }) + if err != nil { + t.Fatal(err) + } + + // Issue certificate with userpassToken. + _, err = client.Logical().Write("pki/issue/test", map[string]interface{}{"uri_sans": "spiffe://domain/users/userpassname"}) + if err == nil { + t.Fatal("expected error") + } +} + +func TestBackend_AllowedDomainsTemplate(t *testing.T) { + t.Parallel() + coreConfig := &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "userpass": userpass.Factory, + }, + LogicalBackends: map[string]logical.Factory{ + "pki": Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + client := cluster.Cores[0].Client + + // Write test policy for userpass auth method. + err := client.Sys().PutPolicy("test", ` + path "pki/*" { + capabilities = ["update"] + }`) + if err != nil { + t.Fatal(err) + } + + // Enable userpass auth method. + if err := client.Sys().EnableAuth("userpass", "userpass", ""); err != nil { + t.Fatal(err) + } + + // Configure test role for userpass. + if _, err := client.Logical().Write("auth/userpass/users/userpassname", map[string]interface{}{ + "password": "test", + "policies": "test", + }); err != nil { + t.Fatal(err) + } + + // Login userpass for test role and set client token + userpassAuth, err := auth.NewUserpassAuth("userpassname", &auth.Password{FromString: "test"}) + if err != nil { + t.Fatal(err) + } + + // Get auth accessor for identity template. + auths, err := client.Sys().ListAuth() + if err != nil { + t.Fatal(err) + } + userpassAccessor := auths["userpass/"].Accessor + + // Mount PKI. + err = client.Sys().Mount("pki", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "60h", + }, + }) + if err != nil { + t.Fatal(err) + } + + // Generate internal CA. + _, err = client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ + "ttl": "40h", + "common_name": "myvault.com", + }) + if err != nil { + t.Fatal(err) + } + + // Write role PKI. + _, err = client.Logical().Write("pki/roles/test", map[string]interface{}{ + "allowed_domains": []string{ + "foobar.com", "zipzap.com", "{{identity.entity.aliases." + userpassAccessor + ".name}}", + "foo.{{identity.entity.aliases." + userpassAccessor + ".name}}.example.com", + }, + "allowed_domains_template": true, + "allow_bare_domains": true, + }) + if err != nil { + t.Fatal(err) + } + + // Issue certificate with userpassToken. + secret, err := client.Auth().Login(context.TODO(), userpassAuth) + if err != nil { + t.Fatal(err) + } + if err != nil || secret == nil { + t.Fatal(err) + } + _, err = client.Logical().Write("pki/issue/test", map[string]interface{}{"common_name": "userpassname"}) + if err != nil { + t.Fatal(err) + } + + // Issue certificate for foobar.com to verify allowed_domain_template doesn't break plain domains. + _, err = client.Logical().Write("pki/issue/test", map[string]interface{}{"common_name": "foobar.com"}) + if err != nil { + t.Fatal(err) + } + + // Issue certificate for unknown userpassname. + _, err = client.Logical().Write("pki/issue/test", map[string]interface{}{"common_name": "unknownuserpassname"}) + if err == nil { + t.Fatal("expected error") + } + + // Issue certificate for foo.userpassname.domain. + _, err = client.Logical().Write("pki/issue/test", map[string]interface{}{"common_name": "foo.userpassname.example.com"}) + if err != nil { + t.Fatal("expected error") + } + + // Set allowed_domains_template to false. + _, err = client.Logical().Write("pki/roles/test", map[string]interface{}{ + "allowed_domains_template": false, + }) + if err != nil { + t.Fatal(err) + } + + // Issue certificate with userpassToken. + _, err = client.Logical().Write("pki/issue/test", map[string]interface{}{"common_name": "userpassname"}) + if err == nil { + t.Fatal("expected error") + } +} + +func TestReadWriteDeleteRoles(t *testing.T) { + t.Parallel() + ctx := context.Background() + coreConfig := &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "userpass": userpass.Factory, + }, + LogicalBackends: map[string]logical.Factory{ + "pki": Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + client := cluster.Cores[0].Client + + // Mount PKI. + err := client.Sys().MountWithContext(ctx, "pki", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "60h", + }, + }) + if err != nil { + t.Fatal(err) + } + + resp, err := client.Logical().ReadWithContext(ctx, "pki/roles/test") + if err != nil { + t.Fatal(err) + } + + if resp != nil { + t.Fatalf("response should have been emtpy but was:\n%#v", resp) + } + + // Write role PKI. + _, err = client.Logical().WriteWithContext(ctx, "pki/roles/test", map[string]interface{}{}) + if err != nil { + t.Fatal(err) + } + + // Read the role. + resp, err = client.Logical().ReadWithContext(ctx, "pki/roles/test") + if err != nil { + t.Fatal(err) + } + + if resp.Data == nil { + t.Fatal("default data within response was nil when it should have contained data") + } + + // Validate that we have not changed any defaults unknowingly + expectedData := map[string]interface{}{ + "key_type": "rsa", + "use_csr_sans": true, + "client_flag": true, + "allowed_serial_numbers": []interface{}{}, + "generate_lease": false, + "signature_bits": json.Number("256"), + "use_pss": false, + "allowed_domains": []interface{}{}, + "allowed_uri_sans_template": false, + "enforce_hostnames": true, + "policy_identifiers": []interface{}{}, + "require_cn": true, + "allowed_domains_template": false, + "allow_token_displayname": false, + "country": []interface{}{}, + "not_after": "", + "postal_code": []interface{}{}, + "use_csr_common_name": true, + "allow_localhost": true, + "allow_subdomains": false, + "allow_wildcard_certificates": true, + "allowed_other_sans": []interface{}{}, + "allowed_uri_sans": []interface{}{}, + "basic_constraints_valid_for_non_ca": false, + "key_usage": []interface{}{"DigitalSignature", "KeyAgreement", "KeyEncipherment"}, + "not_before_duration": json.Number("30"), + "allow_glob_domains": false, + "ttl": json.Number("0"), + "ou": []interface{}{}, + "email_protection_flag": false, + "locality": []interface{}{}, + "server_flag": true, + "allow_bare_domains": false, + "allow_ip_sans": true, + "ext_key_usage_oids": []interface{}{}, + "allow_any_name": false, + "ext_key_usage": []interface{}{}, + "key_bits": json.Number("2048"), + "max_ttl": json.Number("0"), + "no_store": false, + "organization": []interface{}{}, + "province": []interface{}{}, + "street_address": []interface{}{}, + "code_signing_flag": false, + "issuer_ref": "default", + "cn_validations": []interface{}{"email", "hostname"}, + "allowed_user_ids": []interface{}{}, + } + + if diff := deep.Equal(expectedData, resp.Data); len(diff) > 0 { + t.Fatalf("pki role default values have changed, diff: %v", diff) + } + + _, err = client.Logical().DeleteWithContext(ctx, "pki/roles/test") + if err != nil { + t.Fatal(err) + } + + resp, err = client.Logical().ReadWithContext(ctx, "pki/roles/test") + if err != nil { + t.Fatal(err) + } + + if resp != nil { + t.Fatalf("response should have been empty but was:\n%#v", resp) + } +} + +func setCerts() { + cak, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + panic(err) + } + marshaledKey, err := x509.MarshalECPrivateKey(cak) + if err != nil { + panic(err) + } + keyPEMBlock := &pem.Block{ + Type: "EC PRIVATE KEY", + Bytes: marshaledKey, + } + ecCAKey = strings.TrimSpace(string(pem.EncodeToMemory(keyPEMBlock))) + if err != nil { + panic(err) + } + subjKeyID, err := certutil.GetSubjKeyID(cak) + if err != nil { + panic(err) + } + caCertTemplate := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "root.localhost", + }, + SubjectKeyId: subjKeyID, + DNSNames: []string{"root.localhost"}, + KeyUsage: x509.KeyUsage(x509.KeyUsageCertSign | x509.KeyUsageCRLSign), + SerialNumber: big.NewInt(mathrand.Int63()), + NotAfter: time.Now().Add(262980 * time.Hour), + BasicConstraintsValid: true, + IsCA: true, + } + caBytes, err := x509.CreateCertificate(rand.Reader, caCertTemplate, caCertTemplate, cak.Public(), cak) + if err != nil { + panic(err) + } + caCertPEMBlock := &pem.Block{ + Type: "CERTIFICATE", + Bytes: caBytes, + } + ecCACert = strings.TrimSpace(string(pem.EncodeToMemory(caCertPEMBlock))) + + rak, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + panic(err) + } + marshaledKey = x509.MarshalPKCS1PrivateKey(rak) + keyPEMBlock = &pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: marshaledKey, + } + rsaCAKey = strings.TrimSpace(string(pem.EncodeToMemory(keyPEMBlock))) + if err != nil { + panic(err) + } + _, err = certutil.GetSubjKeyID(rak) + if err != nil { + panic(err) + } + caBytes, err = x509.CreateCertificate(rand.Reader, caCertTemplate, caCertTemplate, rak.Public(), rak) + if err != nil { + panic(err) + } + caCertPEMBlock = &pem.Block{ + Type: "CERTIFICATE", + Bytes: caBytes, + } + rsaCACert = strings.TrimSpace(string(pem.EncodeToMemory(caCertPEMBlock))) + + _, edk, err := ed25519.GenerateKey(rand.Reader) + if err != nil { + panic(err) + } + marshaledKey, err = x509.MarshalPKCS8PrivateKey(edk) + if err != nil { + panic(err) + } + keyPEMBlock = &pem.Block{ + Type: "PRIVATE KEY", + Bytes: marshaledKey, + } + edCAKey = strings.TrimSpace(string(pem.EncodeToMemory(keyPEMBlock))) + if err != nil { + panic(err) + } + _, err = certutil.GetSubjKeyID(edk) + if err != nil { + panic(err) + } + caBytes, err = x509.CreateCertificate(rand.Reader, caCertTemplate, caCertTemplate, edk.Public(), edk) + if err != nil { + panic(err) + } + caCertPEMBlock = &pem.Block{ + Type: "CERTIFICATE", + Bytes: caBytes, + } + edCACert = strings.TrimSpace(string(pem.EncodeToMemory(caCertPEMBlock))) +} + +func TestBackend_RevokePlusTidy_Intermediate(t *testing.T) { + // Use a ridiculously long time to minimize the chance + // that we have to deal with more than one interval. + // InMemSink rounds down to an interval boundary rather than + // starting one at the time of initialization. + // + // This test is not parallelizable. + inmemSink := metrics.NewInmemSink( + 1000000*time.Hour, + 2000000*time.Hour) + + metricsConf := metrics.DefaultConfig("") + metricsConf.EnableHostname = false + metricsConf.EnableHostnameLabel = false + metricsConf.EnableServiceLabel = false + metricsConf.EnableTypePrefix = false + + _, err := metrics.NewGlobal(metricsConf, inmemSink) + if err != nil { + t.Fatal(err) + } + + // Enable PKI secret engine + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "pki": Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + cores := cluster.Cores + vault.TestWaitActive(t, cores[0].Core) + client := cores[0].Client + + // Mount /pki as a root CA + err = client.Sys().Mount("pki", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "32h", + }, + }) + if err != nil { + t.Fatal(err) + } + + // Set up Metric Configuration, then restart to enable it + _, err = client.Logical().Write("pki/config/auto-tidy", map[string]interface{}{ + "maintain_stored_certificate_counts": true, + "publish_stored_certificate_count_metrics": true, + }) + _, err = client.Logical().Write("/sys/plugins/reload/backend", map[string]interface{}{ + "mounts": "pki/", + }) + + // Check the metrics initialized in order to calculate backendUUID for /pki + // BackendUUID not consistent during tests with UUID from /sys/mounts/pki + metricsSuffix := "total_certificates_stored" + backendUUID := "" + mostRecentInterval := inmemSink.Data()[len(inmemSink.Data())-1] + for _, existingGauge := range mostRecentInterval.Gauges { + if strings.HasSuffix(existingGauge.Name, metricsSuffix) { + expandedGaugeName := existingGauge.Name + backendUUID = strings.Split(expandedGaugeName, ".")[2] + break + } + } + if backendUUID == "" { + t.Fatalf("No Gauge Found ending with %s", metricsSuffix) + } + + // Set the cluster's certificate as the root CA in /pki + pemBundleRootCA := string(cluster.CACertPEM) + string(cluster.CAKeyPEM) + _, err = client.Logical().Write("pki/config/ca", map[string]interface{}{ + "pem_bundle": pemBundleRootCA, + }) + if err != nil { + t.Fatal(err) + } + + // Mount /pki2 to operate as an intermediate CA + err = client.Sys().Mount("pki2", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "32h", + }, + }) + if err != nil { + t.Fatal(err) + } + // Set up Metric Configuration, then restart to enable it + _, err = client.Logical().Write("pki2/config/auto-tidy", map[string]interface{}{ + "maintain_stored_certificate_counts": true, + "publish_stored_certificate_count_metrics": true, + }) + _, err = client.Logical().Write("/sys/plugins/reload/backend", map[string]interface{}{ + "mounts": "pki2/", + }) + + // Create a CSR for the intermediate CA + secret, err := client.Logical().Write("pki2/intermediate/generate/internal", nil) + if err != nil { + t.Fatal(err) + } + intermediateCSR := secret.Data["csr"].(string) + + // Sign the intermediate CSR using /pki + secret, err = client.Logical().Write("pki/root/sign-intermediate", map[string]interface{}{ + "permitted_dns_domains": ".myvault.com", + "csr": intermediateCSR, + "ttl": "10s", + }) + if err != nil { + t.Fatal(err) + } + intermediateCertSerial := secret.Data["serial_number"].(string) + intermediateCASerialColon := strings.ReplaceAll(strings.ToLower(intermediateCertSerial), ":", "-") + + // Get the intermediate cert after signing + secret, err = client.Logical().Read("pki/cert/" + intermediateCASerialColon) + if err != nil { + t.Fatal(err) + } + + if secret == nil || len(secret.Data) == 0 || len(secret.Data["certificate"].(string)) == 0 { + t.Fatal("expected certificate information from read operation") + } + + // Issue a revoke on on /pki + _, err = client.Logical().Write("pki/revoke", map[string]interface{}{ + "serial_number": intermediateCertSerial, + }) + if err != nil { + t.Fatal(err) + } + + // Check the cert-count metrics + expectedCertCountGaugeMetrics := map[string]float32{ + "secrets.pki." + backendUUID + ".total_revoked_certificates_stored": 1, + "secrets.pki." + backendUUID + ".total_certificates_stored": 1, + } + mostRecentInterval = inmemSink.Data()[len(inmemSink.Data())-1] + for gauge, value := range expectedCertCountGaugeMetrics { + if _, ok := mostRecentInterval.Gauges[gauge]; !ok { + t.Fatalf("Expected metrics to include a value for gauge %s", gauge) + } + if value != mostRecentInterval.Gauges[gauge].Value { + t.Fatalf("Expected value metric %s to be %f but got %f", gauge, value, mostRecentInterval.Gauges[gauge].Value) + } + } + + // Revoke adds a fixed 2s buffer, so we sleep for a bit longer to ensure + // the revocation time is past the current time. + time.Sleep(3 * time.Second) + + // Issue a tidy on /pki + _, err = client.Logical().Write("pki/tidy", map[string]interface{}{ + "tidy_cert_store": true, + "tidy_revoked_certs": true, + "safety_buffer": "1s", + }) + if err != nil { + t.Fatal(err) + } + + // Sleep a bit to make sure we're past the safety buffer + time.Sleep(2 * time.Second) + + // Get CRL and ensure the tidied cert is still in the list after the tidy + // operation since it's not past the NotAfter (ttl) value yet. + crl := getParsedCrl(t, client, "pki") + + revokedCerts := crl.TBSCertList.RevokedCertificates + if len(revokedCerts) == 0 { + t.Fatal("expected CRL to be non-empty") + } + + sn := certutil.GetHexFormatted(revokedCerts[0].SerialNumber.Bytes(), ":") + if sn != intermediateCertSerial { + t.Fatalf("expected: %v, got: %v", intermediateCertSerial, sn) + } + + // Wait for cert to expire + time.Sleep(10 * time.Second) + + // Issue a tidy on /pki + _, err = client.Logical().Write("pki/tidy", map[string]interface{}{ + "tidy_cert_store": true, + "tidy_revoked_certs": true, + "safety_buffer": "1s", + }) + if err != nil { + t.Fatal(err) + } + + // Sleep a bit to make sure we're past the safety buffer + time.Sleep(2 * time.Second) + + // Issue a tidy-status on /pki + { + tidyStatus, err := client.Logical().Read("pki/tidy-status") + if err != nil { + t.Fatal(err) + } + expectedData := map[string]interface{}{ + "safety_buffer": json.Number("1"), + "issuer_safety_buffer": json.Number("31536000"), + "revocation_queue_safety_buffer": json.Number("172800"), + "tidy_cert_store": true, + "tidy_revoked_certs": true, + "tidy_revoked_cert_issuer_associations": false, + "tidy_expired_issuers": false, + "tidy_move_legacy_ca_bundle": false, + "tidy_revocation_queue": false, + "tidy_cross_cluster_revoked_certs": false, + "pause_duration": "0s", + "state": "Finished", + "error": nil, + "time_started": nil, + "time_finished": nil, + "last_auto_tidy_finished": nil, + "message": nil, + "cert_store_deleted_count": json.Number("1"), + "revoked_cert_deleted_count": json.Number("1"), + "missing_issuer_cert_count": json.Number("0"), + "current_cert_store_count": json.Number("0"), + "current_revoked_cert_count": json.Number("0"), + "revocation_queue_deleted_count": json.Number("0"), + "cross_revoked_cert_deleted_count": json.Number("0"), + "internal_backend_uuid": backendUUID, + "tidy_acme": false, + "acme_account_safety_buffer": json.Number("2592000"), + "acme_orders_deleted_count": json.Number("0"), + "acme_account_revoked_count": json.Number("0"), + "acme_account_deleted_count": json.Number("0"), + "total_acme_account_count": json.Number("0"), + } + // Let's copy the times from the response so that we can use deep.Equal() + timeStarted, ok := tidyStatus.Data["time_started"] + if !ok || timeStarted == "" { + t.Fatal("Expected tidy status response to include a value for time_started") + } + expectedData["time_started"] = timeStarted + timeFinished, ok := tidyStatus.Data["time_finished"] + if !ok || timeFinished == "" { + t.Fatal("Expected tidy status response to include a value for time_finished") + } + expectedData["time_finished"] = timeFinished + expectedData["last_auto_tidy_finished"] = tidyStatus.Data["last_auto_tidy_finished"] + + if diff := deep.Equal(expectedData, tidyStatus.Data); diff != nil { + t.Fatal(diff) + } + } + // Check the tidy metrics + { + // Map of gauges to expected value + expectedGauges := map[string]float32{ + "secrets.pki.tidy.cert_store_current_entry": 0, + "secrets.pki.tidy.cert_store_total_entries": 1, + "secrets.pki.tidy.revoked_cert_current_entry": 0, + "secrets.pki.tidy.revoked_cert_total_entries": 1, + "secrets.pki.tidy.start_time_epoch": 0, + "secrets.pki." + backendUUID + ".total_certificates_stored": 0, + "secrets.pki." + backendUUID + ".total_revoked_certificates_stored": 0, + "secrets.pki.tidy.cert_store_total_entries_remaining": 0, + "secrets.pki.tidy.revoked_cert_total_entries_remaining": 0, + } + // Map of counters to the sum of the metrics for that counter + expectedCounters := map[string]float64{ + "secrets.pki.tidy.cert_store_deleted_count": 1, + "secrets.pki.tidy.revoked_cert_deleted_count": 1, + "secrets.pki.tidy.success": 2, + // Note that "secrets.pki.tidy.failure" won't be in the captured metrics + } + + // If the metrics span more than one interval, skip the checks + intervals := inmemSink.Data() + if len(intervals) == 1 { + interval := inmemSink.Data()[0] + + for gauge, value := range expectedGauges { + if _, ok := interval.Gauges[gauge]; !ok { + t.Fatalf("Expected metrics to include a value for gauge %s", gauge) + } + if value != interval.Gauges[gauge].Value { + t.Fatalf("Expected value metric %s to be %f but got %f", gauge, value, interval.Gauges[gauge].Value) + } + + } + for counter, value := range expectedCounters { + if _, ok := interval.Counters[counter]; !ok { + t.Fatalf("Expected metrics to include a value for couter %s", counter) + } + if value != interval.Counters[counter].Sum { + t.Fatalf("Expected the sum of metric %s to be %f but got %f", counter, value, interval.Counters[counter].Sum) + } + } + + tidyDuration, ok := interval.Samples["secrets.pki.tidy.duration"] + if !ok { + t.Fatal("Expected metrics to include a value for sample secrets.pki.tidy.duration") + } + if tidyDuration.Count <= 0 { + t.Fatalf("Expected metrics to have count > 0 for sample secrets.pki.tidy.duration, but got %d", tidyDuration.Count) + } + } + } + + crl = getParsedCrl(t, client, "pki") + + revokedCerts = crl.TBSCertList.RevokedCertificates + if len(revokedCerts) != 0 { + t.Fatal("expected CRL to be empty") + } +} + +func TestBackend_Root_FullCAChain(t *testing.T) { + t.Parallel() + testCases := []struct { + testName string + keyType string + }{ + {testName: "RSA", keyType: "rsa"}, + {testName: "ED25519", keyType: "ed25519"}, + {testName: "EC", keyType: "ec"}, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.testName, func(t *testing.T) { + runFullCAChainTest(t, tc.keyType) + }) + } +} + +func runFullCAChainTest(t *testing.T, keyType string) { + // Generate a root CA at /pki-root + b_root, s_root := CreateBackendWithStorage(t) + + var err error + + resp, err := CBWrite(b_root, s_root, "root/generate/exported", map[string]interface{}{ + "common_name": "root myvault.com", + "key_type": keyType, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected ca info") + } + rootData := resp.Data + rootCert := rootData["certificate"].(string) + + // Validate that root's /cert/ca-chain now contains the certificate. + resp, err = CBRead(b_root, s_root, "cert/ca_chain") + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected intermediate chain information") + } + + fullChain := resp.Data["ca_chain"].(string) + requireCertInCaChainString(t, fullChain, rootCert, "expected root cert within root cert/ca_chain") + + // Make sure when we issue a leaf certificate we get the full chain back. + _, err = CBWrite(b_root, s_root, "roles/example", map[string]interface{}{ + "allowed_domains": "example.com", + "allow_subdomains": "true", + "max_ttl": "1h", + }) + require.NoError(t, err, "error setting up pki root role: %v", err) + + resp, err = CBWrite(b_root, s_root, "issue/example", map[string]interface{}{ + "common_name": "test.example.com", + "ttl": "5m", + }) + require.NoError(t, err, "error issuing certificate from pki root: %v", err) + fullChainArray := resp.Data["ca_chain"].([]string) + requireCertInCaChainArray(t, fullChainArray, rootCert, "expected root cert within root issuance pki-root/issue/example") + + // Now generate an intermediate at /pki-intermediate, signed by the root. + b_int, s_int := CreateBackendWithStorage(t) + + resp, err = CBWrite(b_int, s_int, "intermediate/generate/exported", map[string]interface{}{ + "common_name": "intermediate myvault.com", + "key_type": keyType, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected intermediate CSR info") + } + intermediateData := resp.Data + intermediateKey := intermediateData["private_key"].(string) + + resp, err = CBWrite(b_root, s_root, "root/sign-intermediate", map[string]interface{}{ + "csr": intermediateData["csr"], + "format": "pem", + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected signed intermediate info") + } + intermediateSignedData := resp.Data + intermediateCert := intermediateSignedData["certificate"].(string) + + rootCaCert := parseCert(t, rootCert) + intermediaryCaCert := parseCert(t, intermediateCert) + requireSignedBy(t, intermediaryCaCert, rootCaCert) + intermediateCaChain := intermediateSignedData["ca_chain"].([]string) + + require.Equal(t, parseCert(t, intermediateCaChain[0]), intermediaryCaCert, "intermediate signed cert should have been part of ca_chain") + require.Equal(t, parseCert(t, intermediateCaChain[1]), rootCaCert, "root cert should have been part of ca_chain") + + _, err = CBWrite(b_int, s_int, "intermediate/set-signed", map[string]interface{}{ + "certificate": intermediateCert + "\n" + rootCert + "\n", + }) + if err != nil { + t.Fatal(err) + } + + // Validate that intermediate's ca_chain field now includes the full + // chain. + resp, err = CBRead(b_int, s_int, "cert/ca_chain") + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected intermediate chain information") + } + + // Verify we have a proper CRL now + crl := getParsedCrlFromBackend(t, b_int, s_int, "crl") + require.Equal(t, 0, len(crl.TBSCertList.RevokedCertificates)) + + fullChain = resp.Data["ca_chain"].(string) + requireCertInCaChainString(t, fullChain, intermediateCert, "expected full chain to contain intermediate certificate from pki-intermediate/cert/ca_chain") + requireCertInCaChainString(t, fullChain, rootCert, "expected full chain to contain root certificate from pki-intermediate/cert/ca_chain") + + // Make sure when we issue a leaf certificate we get the full chain back. + _, err = CBWrite(b_int, s_int, "roles/example", map[string]interface{}{ + "allowed_domains": "example.com", + "allow_subdomains": "true", + "max_ttl": "1h", + }) + require.NoError(t, err, "error setting up pki intermediate role: %v", err) + + resp, err = CBWrite(b_int, s_int, "issue/example", map[string]interface{}{ + "common_name": "test.example.com", + "ttl": "5m", + }) + require.NoError(t, err, "error issuing certificate from pki intermediate: %v", err) + fullChainArray = resp.Data["ca_chain"].([]string) + requireCertInCaChainArray(t, fullChainArray, intermediateCert, "expected full chain to contain intermediate certificate from pki-intermediate/issue/example") + requireCertInCaChainArray(t, fullChainArray, rootCert, "expected full chain to contain root certificate from pki-intermediate/issue/example") + + // Finally, import this signing cert chain into a new mount to ensure + // "external" CAs behave as expected. + b_ext, s_ext := CreateBackendWithStorage(t) + + _, err = CBWrite(b_ext, s_ext, "config/ca", map[string]interface{}{ + "pem_bundle": intermediateKey + "\n" + intermediateCert + "\n" + rootCert + "\n", + }) + if err != nil { + t.Fatal(err) + } + + // Validate the external chain information was loaded correctly. + resp, err = CBRead(b_ext, s_ext, "cert/ca_chain") + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected intermediate chain information") + } + + fullChain = resp.Data["ca_chain"].(string) + if strings.Count(fullChain, intermediateCert) != 1 { + t.Fatalf("expected full chain to contain intermediate certificate; got %v occurrences", strings.Count(fullChain, intermediateCert)) + } + if strings.Count(fullChain, rootCert) != 1 { + t.Fatalf("expected full chain to contain root certificate; got %v occurrences", strings.Count(fullChain, rootCert)) + } + + // Now issue a short-lived certificate from our pki-external. + _, err = CBWrite(b_ext, s_ext, "roles/example", map[string]interface{}{ + "allowed_domains": "example.com", + "allow_subdomains": "true", + "max_ttl": "1h", + }) + require.NoError(t, err, "error setting up pki role: %v", err) + + resp, err = CBWrite(b_ext, s_ext, "issue/example", map[string]interface{}{ + "common_name": "test.example.com", + "ttl": "5m", + }) + require.NoError(t, err, "error issuing certificate: %v", err) + require.NotNil(t, resp, "got nil response from issuing request") + issueCrtAsPem := resp.Data["certificate"].(string) + issuedCrt := parseCert(t, issueCrtAsPem) + + // Verify that the certificates are signed by the intermediary CA key... + requireSignedBy(t, issuedCrt, intermediaryCaCert) + + // Test that we can request that the root ca certificate not appear in the ca_chain field + resp, err = CBWrite(b_ext, s_ext, "issue/example", map[string]interface{}{ + "common_name": "test.example.com", + "ttl": "5m", + "remove_roots_from_chain": "true", + }) + requireSuccessNonNilResponse(t, resp, err, "error issuing certificate when removing self signed") + fullChain = strings.Join(resp.Data["ca_chain"].([]string), "\n") + if strings.Count(fullChain, intermediateCert) != 1 { + t.Fatalf("expected full chain to contain intermediate certificate; got %v occurrences", strings.Count(fullChain, intermediateCert)) + } + if strings.Count(fullChain, rootCert) != 0 { + t.Fatalf("expected full chain to NOT contain root certificate; got %v occurrences", strings.Count(fullChain, rootCert)) + } +} + +func requireCertInCaChainArray(t *testing.T, chain []string, cert string, msgAndArgs ...interface{}) { + var fullChain string + for _, caCert := range chain { + fullChain = fullChain + "\n" + caCert + } + + requireCertInCaChainString(t, fullChain, cert, msgAndArgs) +} + +func requireCertInCaChainString(t *testing.T, chain string, cert string, msgAndArgs ...interface{}) { + count := strings.Count(chain, cert) + if count != 1 { + failMsg := fmt.Sprintf("Found %d occurrances of the cert in the provided chain", count) + require.FailNow(t, failMsg, msgAndArgs...) + } +} + +type MultiBool int + +const ( + MFalse MultiBool = iota + MTrue MultiBool = iota + MAny MultiBool = iota +) + +func (o MultiBool) ToValues() []bool { + if o == MTrue { + return []bool{true} + } + + if o == MFalse { + return []bool{false} + } + + if o == MAny { + return []bool{true, false} + } + + return []bool{} +} + +type IssuanceRegression struct { + AllowedDomains []string + AllowBareDomains MultiBool + AllowGlobDomains MultiBool + AllowSubdomains MultiBool + AllowLocalhost MultiBool + AllowWildcardCertificates MultiBool + CNValidations []string + CommonName string + Issued bool +} + +func RoleIssuanceRegressionHelper(t *testing.T, b *backend, s logical.Storage, index int, test IssuanceRegression) int { + tested := 0 + for _, AllowBareDomains := range test.AllowBareDomains.ToValues() { + for _, AllowGlobDomains := range test.AllowGlobDomains.ToValues() { + for _, AllowSubdomains := range test.AllowSubdomains.ToValues() { + for _, AllowLocalhost := range test.AllowLocalhost.ToValues() { + for _, AllowWildcardCertificates := range test.AllowWildcardCertificates.ToValues() { + role := fmt.Sprintf("issuance-regression-%d-bare-%v-glob-%v-subdomains-%v-localhost-%v-wildcard-%v", index, AllowBareDomains, AllowGlobDomains, AllowSubdomains, AllowLocalhost, AllowWildcardCertificates) + _, err := CBWrite(b, s, "roles/"+role, map[string]interface{}{ + "allowed_domains": test.AllowedDomains, + "allow_bare_domains": AllowBareDomains, + "allow_glob_domains": AllowGlobDomains, + "allow_subdomains": AllowSubdomains, + "allow_localhost": AllowLocalhost, + "allow_wildcard_certificates": AllowWildcardCertificates, + "cn_validations": test.CNValidations, + // TODO: test across this vector as well. Currently certain wildcard + // matching is broken with it enabled (such as x*x.foo). + "enforce_hostnames": false, + "key_type": "ec", + "key_bits": 256, + "no_store": true, + // With the CN Validations field, ensure we prevent CN from appearing + // in SANs. + }) + if err != nil { + t.Fatal(err) + } + + resp, err := CBWrite(b, s, "issue/"+role, map[string]interface{}{ + "common_name": test.CommonName, + "exclude_cn_from_sans": true, + }) + + haveErr := err != nil || resp == nil + expectErr := !test.Issued + + if haveErr != expectErr { + t.Fatalf("issuance regression test [%d] failed: haveErr: %v, expectErr: %v, err: %v, resp: %v, test case: %v, role: %v", index, haveErr, expectErr, err, resp, test, role) + } + + tested += 1 + } + } + } + } + } + + return tested +} + +func TestBackend_Roles_IssuanceRegression(t *testing.T) { + t.Parallel() + // Regression testing of role's issuance policy. + testCases := []IssuanceRegression{ + // allowed, bare, glob, subdomains, localhost, wildcards, cn, issued + + // === Globs not allowed but used === // + // Allowed contains globs, but globbing not allowed, resulting in all + // issuances failing. Note that tests against issuing a wildcard with + // a bare domain will be covered later. + /* 0 */ {[]string{"*.*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "baz.fud.bar.foo", false}, + /* 1 */ {[]string{"*.*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "*.fud.bar.foo", false}, + /* 2 */ {[]string{"*.*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "fud.bar.foo", false}, + /* 3 */ {[]string{"*.*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "*.bar.foo", false}, + /* 4 */ {[]string{"*.*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "bar.foo", false}, + /* 5 */ {[]string{"*.*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "*.foo", false}, + /* 6 */ {[]string{"*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "foo", false}, + /* 7 */ {[]string{"*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "baz.fud.bar.foo", false}, + /* 8 */ {[]string{"*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "*.fud.bar.foo", false}, + /* 9 */ {[]string{"*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "fud.bar.foo", false}, + /* 10 */ {[]string{"*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "*.bar.foo", false}, + /* 11 */ {[]string{"*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "bar.foo", false}, + /* 12 */ {[]string{"*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "foo", false}, + + // === Localhost sanity === // + // Localhost forbidden, not matching allowed domains -> not issued + /* 13 */ {[]string{"*.*.foo"}, MAny, MAny, MAny, MFalse, MAny, nil, "localhost", false}, + // Localhost allowed, not matching allowed domains -> issued + /* 14 */ {[]string{"*.*.foo"}, MAny, MAny, MAny, MTrue, MAny, nil, "localhost", true}, + // Localhost allowed via allowed domains (and bare allowed), not by AllowLocalhost -> issued + /* 15 */ {[]string{"localhost"}, MTrue, MAny, MAny, MFalse, MAny, nil, "localhost", true}, + // Localhost allowed via allowed domains (and bare not allowed), not by AllowLocalhost -> not issued + /* 16 */ {[]string{"localhost"}, MFalse, MAny, MAny, MFalse, MAny, nil, "localhost", false}, + // Localhost allowed via allowed domains (but bare not allowed), and by AllowLocalhost -> issued + /* 17 */ {[]string{"localhost"}, MFalse, MAny, MAny, MTrue, MAny, nil, "localhost", true}, + + // === Bare wildcard issuance == // + // allowed_domains contains one or more wildcards and bare domains allowed, + // resulting in the cert being issued. + /* 18 */ {[]string{"*.foo"}, MTrue, MAny, MAny, MAny, MTrue, nil, "*.foo", true}, + /* 19 */ {[]string{"*.*.foo"}, MTrue, MAny, MAny, MAny, MAny, nil, "*.*.foo", false}, // Does not conform to RFC 6125 + + // === Double Leading Glob Testing === // + // Allowed contains globs, but glob allowed so certain matches work. + // The value of bare and localhost does not impact these results. + /* 20 */ {[]string{"*.*.foo"}, MAny, MTrue, MFalse, MAny, MAny, nil, "baz.fud.bar.foo", true}, // glob domains allow infinite subdomains + /* 21 */ {[]string{"*.*.foo"}, MAny, MTrue, MFalse, MAny, MTrue, nil, "*.fud.bar.foo", true}, // glob domain allows wildcard of subdomains + /* 22 */ {[]string{"*.*.foo"}, MAny, MTrue, MFalse, MAny, MAny, nil, "fud.bar.foo", true}, + /* 23 */ {[]string{"*.*.foo"}, MAny, MTrue, MFalse, MAny, MTrue, nil, "*.bar.foo", true}, // Regression fix: Vault#13530 + /* 24 */ {[]string{"*.*.foo"}, MAny, MTrue, MFalse, MAny, MAny, nil, "bar.foo", false}, + /* 25 */ {[]string{"*.*.foo"}, MAny, MTrue, MFalse, MAny, MAny, nil, "*.foo", false}, + /* 26 */ {[]string{"*.*.foo"}, MAny, MTrue, MFalse, MAny, MAny, nil, "foo", false}, + + // Allowed contains globs, but glob and subdomain both work, so we expect + // wildcard issuance to work as well. The value of bare and localhost does + // not impact these results. + /* 27 */ {[]string{"*.*.foo"}, MAny, MTrue, MTrue, MAny, MAny, nil, "baz.fud.bar.foo", true}, + /* 28 */ {[]string{"*.*.foo"}, MAny, MTrue, MTrue, MAny, MTrue, nil, "*.fud.bar.foo", true}, + /* 29 */ {[]string{"*.*.foo"}, MAny, MTrue, MTrue, MAny, MAny, nil, "fud.bar.foo", true}, + /* 30 */ {[]string{"*.*.foo"}, MAny, MTrue, MTrue, MAny, MTrue, nil, "*.bar.foo", true}, // Regression fix: Vault#13530 + /* 31 */ {[]string{"*.*.foo"}, MAny, MTrue, MTrue, MAny, MAny, nil, "bar.foo", false}, + /* 32 */ {[]string{"*.*.foo"}, MAny, MTrue, MTrue, MAny, MAny, nil, "*.foo", false}, + /* 33 */ {[]string{"*.*.foo"}, MAny, MTrue, MTrue, MAny, MAny, nil, "foo", false}, + + // === Single Leading Glob Testing === // + // Allowed contains globs, but glob allowed so certain matches work. + // The value of bare and localhost does not impact these results. + /* 34 */ {[]string{"*.foo"}, MAny, MTrue, MFalse, MAny, MAny, nil, "baz.fud.bar.foo", true}, // glob domains allow infinite subdomains + /* 35 */ {[]string{"*.foo"}, MAny, MTrue, MFalse, MAny, MTrue, nil, "*.fud.bar.foo", true}, // glob domain allows wildcard of subdomains + /* 36 */ {[]string{"*.foo"}, MAny, MTrue, MFalse, MAny, MAny, nil, "fud.bar.foo", true}, // glob domains allow infinite subdomains + /* 37 */ {[]string{"*.foo"}, MAny, MTrue, MFalse, MAny, MTrue, nil, "*.bar.foo", true}, // glob domain allows wildcards of subdomains + /* 38 */ {[]string{"*.foo"}, MAny, MTrue, MFalse, MAny, MAny, nil, "bar.foo", true}, + /* 39 */ {[]string{"*.foo"}, MAny, MTrue, MFalse, MAny, MAny, nil, "foo", false}, + + // Allowed contains globs, but glob and subdomain both work, so we expect + // wildcard issuance to work as well. The value of bare and localhost does + // not impact these results. + /* 40 */ {[]string{"*.foo"}, MAny, MTrue, MTrue, MAny, MAny, nil, "baz.fud.bar.foo", true}, + /* 41 */ {[]string{"*.foo"}, MAny, MTrue, MTrue, MAny, MTrue, nil, "*.fud.bar.foo", true}, + /* 42 */ {[]string{"*.foo"}, MAny, MTrue, MTrue, MAny, MAny, nil, "fud.bar.foo", true}, + /* 43 */ {[]string{"*.foo"}, MAny, MTrue, MTrue, MAny, MTrue, nil, "*.bar.foo", true}, + /* 44 */ {[]string{"*.foo"}, MAny, MTrue, MTrue, MAny, MAny, nil, "bar.foo", true}, + /* 45 */ {[]string{"*.foo"}, MAny, MTrue, MTrue, MAny, MAny, nil, "foo", false}, + + // === Only base domain name === // + // Allowed contains only domain components, but subdomains not allowed. This + // results in most issuances failing unless we allow bare domains, in which + // case only the final issuance for "foo" will succeed. + /* 46 */ {[]string{"foo"}, MAny, MAny, MFalse, MAny, MAny, nil, "baz.fud.bar.foo", false}, + /* 47 */ {[]string{"foo"}, MAny, MAny, MFalse, MAny, MAny, nil, "*.fud.bar.foo", false}, + /* 48 */ {[]string{"foo"}, MAny, MAny, MFalse, MAny, MAny, nil, "fud.bar.foo", false}, + /* 49 */ {[]string{"foo"}, MAny, MAny, MFalse, MAny, MAny, nil, "*.bar.foo", false}, + /* 50 */ {[]string{"foo"}, MAny, MAny, MFalse, MAny, MAny, nil, "bar.foo", false}, + /* 51 */ {[]string{"foo"}, MAny, MAny, MFalse, MAny, MAny, nil, "*.foo", false}, + /* 52 */ {[]string{"foo"}, MFalse, MAny, MFalse, MAny, MAny, nil, "foo", false}, + /* 53 */ {[]string{"foo"}, MTrue, MAny, MFalse, MAny, MAny, nil, "foo", true}, + + // Allowed contains only domain components, and subdomains are now allowed. + // This results in most issuances succeeding, with the exception of the + // base foo, which is still governed by base's value. + /* 54 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MAny, nil, "baz.fud.bar.foo", true}, + /* 55 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MTrue, nil, "*.fud.bar.foo", true}, + /* 56 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MAny, nil, "fud.bar.foo", true}, + /* 57 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MTrue, nil, "*.bar.foo", true}, + /* 58 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MAny, nil, "bar.foo", true}, + /* 59 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MTrue, nil, "*.foo", true}, + /* 60 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MTrue, nil, "x*x.foo", true}, // internal wildcards should be allowed per RFC 6125/6.4.3 + /* 61 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MTrue, nil, "*x.foo", true}, // prefix wildcards should be allowed per RFC 6125/6.4.3 + /* 62 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MTrue, nil, "x*.foo", true}, // suffix wildcards should be allowed per RFC 6125/6.4.3 + /* 63 */ {[]string{"foo"}, MFalse, MAny, MTrue, MAny, MAny, nil, "foo", false}, + /* 64 */ {[]string{"foo"}, MTrue, MAny, MTrue, MAny, MAny, nil, "foo", true}, + + // === Internal Glob Matching === // + // Basic glob matching requirements + /* 65 */ {[]string{"x*x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "xerox.foo", true}, + /* 66 */ {[]string{"x*x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "xylophone.files.pyrex.foo", true}, // globs can match across subdomains + /* 67 */ {[]string{"x*x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "xercex.bar.foo", false}, // x.foo isn't matched + /* 68 */ {[]string{"x*x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "bar.foo", false}, // x*x isn't matched. + /* 69 */ {[]string{"x*x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "*.foo", false}, // unrelated wildcard + /* 70 */ {[]string{"x*x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "*.x*x.foo", false}, // Does not conform to RFC 6125 + /* 71 */ {[]string{"x*x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "*.xyx.foo", false}, // Globs and Subdomains do not layer per docs. + + // Various requirements around x*x.foo wildcard matching. + /* 72 */ {[]string{"x*x.foo"}, MFalse, MFalse, MAny, MAny, MAny, nil, "x*x.foo", false}, // base disabled, shouldn't match wildcard + /* 73 */ {[]string{"x*x.foo"}, MFalse, MTrue, MAny, MAny, MTrue, nil, "x*x.foo", true}, // base disallowed, but globbing allowed and should match + /* 74 */ {[]string{"x*x.foo"}, MTrue, MAny, MAny, MAny, MTrue, nil, "x*x.foo", true}, // base allowed, should match wildcard + + // Basic glob matching requirements with internal dots. + /* 75 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "xerox.foo", false}, // missing dots + /* 76 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "x.ero.x.foo", true}, + /* 77 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "xylophone.files.pyrex.foo", false}, // missing dots + /* 78 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "x.ylophone.files.pyre.x.foo", true}, // globs can match across subdomains + /* 79 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "xercex.bar.foo", false}, // x.foo isn't matched + /* 80 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "bar.foo", false}, // x.*.x isn't matched. + /* 81 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "*.foo", false}, // unrelated wildcard + /* 82 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "*.x.*.x.foo", false}, // Does not conform to RFC 6125 + /* 83 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "*.x.y.x.foo", false}, // Globs and Subdomains do not layer per docs. + + // === Wildcard restriction testing === // + /* 84 */ {[]string{"*.foo"}, MAny, MTrue, MFalse, MAny, MFalse, nil, "*.fud.bar.foo", false}, // glob domain allows wildcard of subdomains + /* 85 */ {[]string{"*.foo"}, MAny, MTrue, MFalse, MAny, MFalse, nil, "*.bar.foo", false}, // glob domain allows wildcards of subdomains + /* 86 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MFalse, nil, "*.fud.bar.foo", false}, + /* 87 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MFalse, nil, "*.bar.foo", false}, + /* 88 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MFalse, nil, "*.foo", false}, + /* 89 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MFalse, nil, "x*x.foo", false}, + /* 90 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MFalse, nil, "*x.foo", false}, + /* 91 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MFalse, nil, "x*.foo", false}, + /* 92 */ {[]string{"x*x.foo"}, MTrue, MAny, MAny, MAny, MFalse, nil, "x*x.foo", false}, + /* 93 */ {[]string{"*.foo"}, MFalse, MFalse, MAny, MAny, MAny, nil, "*.foo", false}, // Bare and globs forbidden despite (potentially) allowing wildcards. + /* 94 */ {[]string{"x.*.x.foo"}, MAny, MAny, MAny, MAny, MAny, nil, "x.*.x.foo", false}, // Does not conform to RFC 6125 + + // === CN validation allowances === // + /* 95 */ {[]string{"foo"}, MAny, MAny, MAny, MAny, MAny, []string{"disabled"}, "*.fud.bar.foo", true}, + /* 96 */ {[]string{"foo"}, MAny, MAny, MAny, MAny, MAny, []string{"disabled"}, "*.fud.*.foo", true}, + /* 97 */ {[]string{"foo"}, MAny, MAny, MAny, MAny, MAny, []string{"disabled"}, "*.bar.*.bar", true}, + /* 98 */ {[]string{"foo"}, MAny, MAny, MAny, MAny, MAny, []string{"disabled"}, "foo@foo", true}, + /* 99 */ {[]string{"foo"}, MAny, MAny, MAny, MAny, MAny, []string{"disabled"}, "foo@foo@foo", true}, + /* 100 */ {[]string{"foo"}, MAny, MAny, MAny, MAny, MAny, []string{"disabled"}, "bar@bar@bar", true}, + /* 101 */ {[]string{"foo"}, MTrue, MTrue, MTrue, MTrue, MTrue, []string{"email"}, "bar@bar@bar", false}, + /* 102 */ {[]string{"foo"}, MTrue, MTrue, MTrue, MTrue, MTrue, []string{"email"}, "bar@bar", false}, + /* 103 */ {[]string{"foo"}, MTrue, MTrue, MTrue, MTrue, MTrue, []string{"email"}, "bar@foo", true}, + /* 104 */ {[]string{"foo"}, MTrue, MTrue, MTrue, MTrue, MTrue, []string{"hostname"}, "bar@foo", false}, + /* 105 */ {[]string{"foo"}, MTrue, MTrue, MTrue, MTrue, MTrue, []string{"hostname"}, "bar@bar", false}, + /* 106 */ {[]string{"foo"}, MTrue, MTrue, MTrue, MTrue, MTrue, []string{"hostname"}, "bar.foo", true}, + /* 107 */ {[]string{"foo"}, MTrue, MTrue, MTrue, MTrue, MTrue, []string{"hostname"}, "bar.bar", false}, + /* 108 */ {[]string{"foo"}, MTrue, MTrue, MTrue, MTrue, MTrue, []string{"email"}, "bar.foo", false}, + /* 109 */ {[]string{"foo"}, MTrue, MTrue, MTrue, MTrue, MTrue, []string{"email"}, "bar.bar", false}, + } + + if len(testCases) != 110 { + t.Fatalf("misnumbered test case entries will make it hard to find bugs: %v", len(testCases)) + } + + b, s := CreateBackendWithStorage(t) + + // We need a RSA key so all signature sizes are valid with it. + resp, err := CBWrite(b, s, "root/generate/exported", map[string]interface{}{ + "common_name": "myvault.com", + "ttl": "128h", + "key_type": "rsa", + "key_bits": 2048, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected ca info") + } + + tested := 0 + for index, test := range testCases { + tested += RoleIssuanceRegressionHelper(t, b, s, index, test) + } + + t.Logf("Issuance regression expanded matrix test scenarios: %d", tested) +} + +type KeySizeRegression struct { + // Values reused for both Role and CA configuration. + RoleKeyType string + RoleKeyBits []int + + // Signature Bits presently is only specified on the role. + RoleSignatureBits []int + RoleUsePSS bool + + // These are tuples; must be of the same length. + TestKeyTypes []string + TestKeyBits []int + + // All of the above key types/sizes must pass or fail together. + ExpectError bool +} + +func (k KeySizeRegression) KeyTypeValues() []string { + if k.RoleKeyType == "any" { + return []string{"rsa", "ec", "ed25519"} + } + + return []string{k.RoleKeyType} +} + +func RoleKeySizeRegressionHelper(t *testing.T, b *backend, s logical.Storage, index int, test KeySizeRegression) int { + tested := 0 + + for _, caKeyType := range test.KeyTypeValues() { + for _, caKeyBits := range test.RoleKeyBits { + // Generate a new CA key. + resp, err := CBWrite(b, s, "root/generate/exported", map[string]interface{}{ + "common_name": "myvault.com", + "ttl": "128h", + "key_type": caKeyType, + "key_bits": caKeyBits, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected ca info") + } + + for _, roleKeyBits := range test.RoleKeyBits { + for _, roleSignatureBits := range test.RoleSignatureBits { + role := fmt.Sprintf("key-size-regression-%d-keytype-%v-keybits-%d-signature-bits-%d", index, test.RoleKeyType, roleKeyBits, roleSignatureBits) + _, err := CBWrite(b, s, "roles/"+role, map[string]interface{}{ + "key_type": test.RoleKeyType, + "key_bits": roleKeyBits, + "signature_bits": roleSignatureBits, + "use_pss": test.RoleUsePSS, + }) + if err != nil { + t.Fatal(err) + } + + for index, keyType := range test.TestKeyTypes { + keyBits := test.TestKeyBits[index] + + _, _, csrPem := generateCSR(t, &x509.CertificateRequest{ + Subject: pkix.Name{ + CommonName: "localhost", + }, + }, keyType, keyBits) + + resp, err = CBWrite(b, s, "sign/"+role, map[string]interface{}{ + "common_name": "localhost", + "csr": csrPem, + }) + + haveErr := err != nil || resp == nil + + if haveErr != test.ExpectError { + t.Fatalf("key size regression test [%d] failed: haveErr: %v, expectErr: %v, err: %v, resp: %v, test case: %v, caKeyType: %v, caKeyBits: %v, role: %v, keyType: %v, keyBits: %v", index, haveErr, test.ExpectError, err, resp, test, caKeyType, caKeyBits, role, keyType, keyBits) + } + + if resp != nil && test.RoleUsePSS && caKeyType == "rsa" { + leafCert := parseCert(t, resp.Data["certificate"].(string)) + switch leafCert.SignatureAlgorithm { + case x509.SHA256WithRSAPSS, x509.SHA384WithRSAPSS, x509.SHA512WithRSAPSS: + default: + t.Fatalf("key size regression test [%d] failed on role %v: unexpected signature algorithm; expected RSA-type CA to sign a leaf cert with PSS algorithm; got %v", index, role, leafCert.SignatureAlgorithm.String()) + } + } + + tested += 1 + } + } + } + + _, err = CBDelete(b, s, "root") + if err != nil { + t.Fatal(err) + } + } + } + + return tested +} + +func TestBackend_Roles_KeySizeRegression(t *testing.T) { + t.Parallel() + // Regression testing of role's issuance policy. + testCases := []KeySizeRegression{ + // RSA with default parameters should fail to issue smaller RSA keys + // and any size ECDSA/Ed25519 keys. + /* 0 */ {"rsa", []int{0, 2048}, []int{0, 256, 384, 512}, false, []string{"rsa", "ec", "ec", "ec", "ec", "ed25519"}, []int{1024, 224, 256, 384, 521, 0}, true}, + // But it should work to issue larger RSA keys. + /* 1 */ {"rsa", []int{0, 2048}, []int{0, 256, 384, 512}, false, []string{"rsa", "rsa"}, []int{2048, 3072}, false}, + + // EC with default parameters should fail to issue smaller EC keys + // and any size RSA/Ed25519 keys. + /* 2 */ {"ec", []int{0}, []int{0}, false, []string{"rsa", "ec", "ed25519"}, []int{2048, 224, 0}, true}, + // But it should work to issue larger EC keys. Note that we should be + // independent of signature bits as that's computed from the issuer + // type (for EC based issuers). + /* 3 */ {"ec", []int{224}, []int{0, 256, 384, 521}, false, []string{"ec", "ec", "ec", "ec"}, []int{224, 256, 384, 521}, false}, + /* 4 */ {"ec", []int{0, 256}, []int{0, 256, 384, 521}, false, []string{"ec", "ec", "ec"}, []int{256, 384, 521}, false}, + /* 5 */ {"ec", []int{384}, []int{0, 256, 384, 521}, false, []string{"ec", "ec"}, []int{384, 521}, false}, + /* 6 */ {"ec", []int{521}, []int{0, 256, 384, 512}, false, []string{"ec"}, []int{521}, false}, + + // Ed25519 should reject RSA and EC keys. + /* 7 */ {"ed25519", []int{0}, []int{0}, false, []string{"rsa", "ec", "ec"}, []int{2048, 256, 521}, true}, + // But it should work to issue Ed25519 keys. + /* 8 */ {"ed25519", []int{0}, []int{0}, false, []string{"ed25519"}, []int{0}, false}, + + // Any key type should reject insecure RSA key sizes. + /* 9 */ {"any", []int{0}, []int{0, 256, 384, 512}, false, []string{"rsa", "rsa"}, []int{512, 1024}, true}, + // But work for everything else. + /* 10 */ {"any", []int{0}, []int{0, 256, 384, 512}, false, []string{"rsa", "rsa", "ec", "ec", "ec", "ec", "ed25519"}, []int{2048, 3072, 224, 256, 384, 521, 0}, false}, + + // RSA with larger than default key size should reject smaller ones. + /* 11 */ {"rsa", []int{3072}, []int{0, 256, 384, 512}, false, []string{"rsa"}, []int{2048}, true}, + + // We should be able to sign with PSS with any CA key type. + /* 12 */ {"rsa", []int{0}, []int{0, 256, 384, 512}, true, []string{"rsa"}, []int{2048}, false}, + /* 13 */ {"ec", []int{0}, []int{0}, true, []string{"ec"}, []int{256}, false}, + /* 14 */ {"ed25519", []int{0}, []int{0}, true, []string{"ed25519"}, []int{0}, false}, + } + + if len(testCases) != 15 { + t.Fatalf("misnumbered test case entries will make it hard to find bugs: %v", len(testCases)) + } + + b, s := CreateBackendWithStorage(t) + + tested := 0 + for index, test := range testCases { + tested += RoleKeySizeRegressionHelper(t, b, s, index, test) + } + + t.Logf("Key size regression expanded matrix test scenarios: %d", tested) +} + +func TestRootWithExistingKey(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + var err error + + // Fail requests if type is existing, and we specify the key_type param + _, err = CBWrite(b, s, "root/generate/existing", map[string]interface{}{ + "common_name": "root myvault.com", + "key_type": "rsa", + }) + require.Error(t, err) + require.Contains(t, err.Error(), "key_type nor key_bits arguments can be set in this mode") + + // Fail requests if type is existing, and we specify the key_bits param + _, err = CBWrite(b, s, "root/generate/existing", map[string]interface{}{ + "common_name": "root myvault.com", + "key_bits": "2048", + }) + require.Error(t, err) + require.Contains(t, err.Error(), "key_type nor key_bits arguments can be set in this mode") + + // Fail if the specified key does not exist. + _, err = CBWrite(b, s, "issuers/generate/root/existing", map[string]interface{}{ + "common_name": "root myvault.com", + "issuer_name": "my-issuer1", + "key_ref": "my-key1", + }) + require.Error(t, err) + require.Contains(t, err.Error(), "unable to find PKI key for reference: my-key1") + + // Fail if the specified key name is default. + _, err = CBWrite(b, s, "issuers/generate/root/internal", map[string]interface{}{ + "common_name": "root myvault.com", + "issuer_name": "my-issuer1", + "key_name": "Default", + }) + require.Error(t, err) + require.Contains(t, err.Error(), "reserved keyword 'default' can not be used as key name") + + // Fail if the specified issuer name is default. + _, err = CBWrite(b, s, "issuers/generate/root/internal", map[string]interface{}{ + "common_name": "root myvault.com", + "issuer_name": "DEFAULT", + }) + require.Error(t, err) + require.Contains(t, err.Error(), "reserved keyword 'default' can not be used as issuer name") + + // Create the first CA + resp, err := CBWrite(b, s, "issuers/generate/root/internal", map[string]interface{}{ + "common_name": "root myvault.com", + "key_type": "rsa", + "issuer_name": "my-issuer1", + }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("issuers/generate/root/internal"), logical.UpdateOperation), resp, true) + require.NoError(t, err) + require.NotNil(t, resp.Data["certificate"]) + myIssuerId1 := resp.Data["issuer_id"] + myKeyId1 := resp.Data["key_id"] + require.NotEmpty(t, myIssuerId1) + require.NotEmpty(t, myKeyId1) + + // Fetch the parsed CRL; it should be empty as we've not revoked anything + parsedCrl := getParsedCrlFromBackend(t, b, s, "issuer/my-issuer1/crl/der") + require.Equal(t, len(parsedCrl.TBSCertList.RevokedCertificates), 0, "should have no revoked certificates") + + // Fail if the specified issuer name is re-used. + _, err = CBWrite(b, s, "issuers/generate/root/internal", map[string]interface{}{ + "common_name": "root myvault.com", + "issuer_name": "my-issuer1", + }) + require.Error(t, err) + require.Contains(t, err.Error(), "issuer name already in use") + + // Create the second CA + resp, err = CBWrite(b, s, "issuers/generate/root/internal", map[string]interface{}{ + "common_name": "root myvault.com", + "key_type": "rsa", + "issuer_name": "my-issuer2", + "key_name": "root-key2", + }) + require.NoError(t, err) + require.NotNil(t, resp.Data["certificate"]) + myIssuerId2 := resp.Data["issuer_id"] + myKeyId2 := resp.Data["key_id"] + require.NotEmpty(t, myIssuerId2) + require.NotEmpty(t, myKeyId2) + + // Fetch the parsed CRL; it should be empty as we've not revoked anything + parsedCrl = getParsedCrlFromBackend(t, b, s, "issuer/my-issuer2/crl/der") + require.Equal(t, len(parsedCrl.TBSCertList.RevokedCertificates), 0, "should have no revoked certificates") + + // Fail if the specified key name is re-used. + _, err = CBWrite(b, s, "issuers/generate/root/internal", map[string]interface{}{ + "common_name": "root myvault.com", + "issuer_name": "my-issuer3", + "key_name": "root-key2", + }) + require.Error(t, err) + require.Contains(t, err.Error(), "key name already in use") + + // Create a third CA re-using key from CA 1 + resp, err = CBWrite(b, s, "issuers/generate/root/existing", map[string]interface{}{ + "common_name": "root myvault.com", + "issuer_name": "my-issuer3", + "key_ref": myKeyId1, + }) + require.NoError(t, err) + require.NotNil(t, resp.Data["certificate"]) + myIssuerId3 := resp.Data["issuer_id"] + myKeyId3 := resp.Data["key_id"] + require.NotEmpty(t, myIssuerId3) + require.NotEmpty(t, myKeyId3) + + // Fetch the parsed CRL; it should be empty as we've not revoking anything. + parsedCrl = getParsedCrlFromBackend(t, b, s, "issuer/my-issuer3/crl/der") + require.Equal(t, len(parsedCrl.TBSCertList.RevokedCertificates), 0, "should have no revoked certificates") + // Signatures should be the same since this is just a reissued cert. We + // use signature as a proxy for "these two CRLs are equal". + firstCrl := getParsedCrlFromBackend(t, b, s, "issuer/my-issuer1/crl/der") + require.Equal(t, parsedCrl.SignatureValue, firstCrl.SignatureValue) + + require.NotEqual(t, myIssuerId1, myIssuerId2) + require.NotEqual(t, myIssuerId1, myIssuerId3) + require.NotEqual(t, myKeyId1, myKeyId2) + require.Equal(t, myKeyId1, myKeyId3) + + resp, err = CBList(b, s, "issuers") + require.NoError(t, err) + require.Equal(t, 3, len(resp.Data["keys"].([]string))) + require.Contains(t, resp.Data["keys"], string(myIssuerId1.(issuerID))) + require.Contains(t, resp.Data["keys"], string(myIssuerId2.(issuerID))) + require.Contains(t, resp.Data["keys"], string(myIssuerId3.(issuerID))) +} + +func TestIntermediateWithExistingKey(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + var err error + + // Fail requests if type is existing, and we specify the key_type param + _, err = CBWrite(b, s, "intermediate/generate/existing", map[string]interface{}{ + "common_name": "root myvault.com", + "key_type": "rsa", + }) + require.Error(t, err) + require.Contains(t, err.Error(), "key_type nor key_bits arguments can be set in this mode") + + // Fail requests if type is existing, and we specify the key_bits param + _, err = CBWrite(b, s, "intermediate/generate/existing", map[string]interface{}{ + "common_name": "root myvault.com", + "key_bits": "2048", + }) + require.Error(t, err) + require.Contains(t, err.Error(), "key_type nor key_bits arguments can be set in this mode") + + // Fail if the specified key does not exist. + _, err = CBWrite(b, s, "issuers/generate/intermediate/existing", map[string]interface{}{ + "common_name": "root myvault.com", + "key_ref": "my-key1", + }) + require.Error(t, err) + require.Contains(t, err.Error(), "unable to find PKI key for reference: my-key1") + + // Create the first intermediate CA + resp, err := CBWrite(b, s, "issuers/generate/intermediate/internal", map[string]interface{}{ + "common_name": "root myvault.com", + "key_type": "rsa", + }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("issuers/generate/intermediate/internal"), logical.UpdateOperation), resp, true) + require.NoError(t, err) + // csr1 := resp.Data["csr"] + myKeyId1 := resp.Data["key_id"] + require.NotEmpty(t, myKeyId1) + + // Create the second intermediate CA + resp, err = CBWrite(b, s, "issuers/generate/intermediate/internal", map[string]interface{}{ + "common_name": "root myvault.com", + "key_type": "rsa", + "key_name": "interkey1", + }) + require.NoError(t, err) + // csr2 := resp.Data["csr"] + myKeyId2 := resp.Data["key_id"] + require.NotEmpty(t, myKeyId2) + + // Create a third intermediate CA re-using key from intermediate CA 1 + resp, err = CBWrite(b, s, "issuers/generate/intermediate/existing", map[string]interface{}{ + "common_name": "root myvault.com", + "key_ref": myKeyId1, + }) + require.NoError(t, err) + // csr3 := resp.Data["csr"] + myKeyId3 := resp.Data["key_id"] + require.NotEmpty(t, myKeyId3) + + require.NotEqual(t, myKeyId1, myKeyId2) + require.Equal(t, myKeyId1, myKeyId3, "our new ca did not seem to reuse the key as we expected.") +} + +func TestIssuanceTTLs(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "root example.com", + "issuer_name": "root", + "ttl": "10s", + "key_type": "ec", + }) + require.NoError(t, err) + require.NotNil(t, resp) + rootCert := parseCert(t, resp.Data["certificate"].(string)) + + _, err = CBWrite(b, s, "roles/local-testing", map[string]interface{}{ + "allow_any_name": true, + "enforce_hostnames": false, + "key_type": "ec", + }) + require.NoError(t, err) + + _, err = CBWrite(b, s, "issue/local-testing", map[string]interface{}{ + "common_name": "testing", + "ttl": "1s", + }) + require.NoError(t, err, "expected issuance to succeed due to shorter ttl than cert ttl") + + _, err = CBWrite(b, s, "issue/local-testing", map[string]interface{}{ + "common_name": "testing", + }) + require.Error(t, err, "expected issuance to fail due to longer default ttl than cert ttl") + + resp, err = CBPatch(b, s, "issuer/root", map[string]interface{}{ + "leaf_not_after_behavior": "permit", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.Equal(t, resp.Data["leaf_not_after_behavior"], "permit") + + _, err = CBWrite(b, s, "issue/local-testing", map[string]interface{}{ + "common_name": "testing", + }) + require.NoError(t, err, "expected issuance to succeed due to permitted longer TTL") + + resp, err = CBWrite(b, s, "issuer/root", map[string]interface{}{ + "issuer_name": "root", + "leaf_not_after_behavior": "truncate", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.Equal(t, resp.Data["leaf_not_after_behavior"], "truncate") + + _, err = CBWrite(b, s, "issue/local-testing", map[string]interface{}{ + "common_name": "testing", + }) + require.NoError(t, err, "expected issuance to succeed due to truncated ttl") + + // Sleep until the parent cert expires and the clock rolls over + // to the next second. + time.Sleep(time.Until(rootCert.NotAfter) + (1500 * time.Millisecond)) + + resp, err = CBWrite(b, s, "issuer/root", map[string]interface{}{ + "issuer_name": "root", + "leaf_not_after_behavior": "err", + }) + require.NoError(t, err) + require.NotNil(t, resp) + + // Even 1s ttl should now fail. + _, err = CBWrite(b, s, "issue/local-testing", map[string]interface{}{ + "common_name": "testing", + "ttl": "1s", + }) + require.Error(t, err, "expected issuance to fail due to longer default ttl than cert ttl") +} + +func TestSealWrappedStorageConfigured(t *testing.T) { + t.Parallel() + b, _ := CreateBackendWithStorage(t) + wrappedEntries := b.Backend.PathsSpecial.SealWrapStorage + + // Make sure our legacy bundle is within the list + // NOTE: do not convert these test values to constants, we should always have these paths within seal wrap config + require.Contains(t, wrappedEntries, "config/ca_bundle", "Legacy bundle missing from seal wrap") + // The trailing / is important as it treats the entire folder requiring seal wrapping, not just config/key + require.Contains(t, wrappedEntries, "config/key/", "key prefix with trailing / missing from seal wrap.") +} + +func TestBackend_ConfigCA_WithECParams(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + // Generated key with OpenSSL: + // $ openssl ecparam -out p256.key -name prime256v1 -genkey + // + // Regression test for https://github.com/hashicorp/vault/issues/16667 + resp, err := CBWrite(b, s, "config/ca", map[string]interface{}{ + "pem_bundle": ` +-----BEGIN EC PARAMETERS----- +BggqhkjOPQMBBw== +-----END EC PARAMETERS----- +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEINzXthCZdhyV7+wIEBl/ty+ctNsUS99ykTeax6EbYZtvoAoGCCqGSM49 +AwEHoUQDQgAE57NX8bR/nDoW8yRgLswoXBQcjHrdyfuHS0gPwki6BNnfunUzryVb +8f22/JWj6fsEF6AOADZlrswKIbR2Es9e/w== +-----END EC PRIVATE KEY----- + `, + }) + require.NoError(t, err) + require.NotNil(t, resp, "expected ca info") + importedKeys := resp.Data["imported_keys"].([]string) + importedIssuers := resp.Data["imported_issuers"].([]string) + + require.Equal(t, len(importedKeys), 1) + require.Equal(t, len(importedIssuers), 0) +} + +func TestPerIssuerAIA(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + // Generating a root without anything should not have AIAs. + resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "root example.com", + "issuer_name": "root", + "key_type": "ec", + }) + require.NoError(t, err) + require.NotNil(t, resp) + rootCert := parseCert(t, resp.Data["certificate"].(string)) + require.Empty(t, rootCert.OCSPServer) + require.Empty(t, rootCert.IssuingCertificateURL) + require.Empty(t, rootCert.CRLDistributionPoints) + + // Set some local URLs on the issuer. + resp, err = CBWrite(b, s, "issuer/default", map[string]interface{}{ + "issuing_certificates": []string{"https://google.com"}, + }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("issuer/default"), logical.UpdateOperation), resp, true) + + require.NoError(t, err) + + _, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ + "allow_any_name": true, + "ttl": "85s", + "key_type": "ec", + }) + require.NoError(t, err) + + // Issue something with this re-configured issuer. + resp, err = CBWrite(b, s, "issuer/default/issue/testing", map[string]interface{}{ + "common_name": "localhost.com", + }) + require.NoError(t, err) + require.NotNil(t, resp) + leafCert := parseCert(t, resp.Data["certificate"].(string)) + require.Empty(t, leafCert.OCSPServer) + require.Equal(t, leafCert.IssuingCertificateURL, []string{"https://google.com"}) + require.Empty(t, leafCert.CRLDistributionPoints) + + // Set global URLs and ensure they don't appear on this issuer's leaf. + _, err = CBWrite(b, s, "config/urls", map[string]interface{}{ + "issuing_certificates": []string{"https://example.com/ca", "https://backup.example.com/ca"}, + "crl_distribution_points": []string{"https://example.com/crl", "https://backup.example.com/crl"}, + "ocsp_servers": []string{"https://example.com/ocsp", "https://backup.example.com/ocsp"}, + }) + require.NoError(t, err) + resp, err = CBWrite(b, s, "issuer/default/issue/testing", map[string]interface{}{ + "common_name": "localhost.com", + }) + require.NoError(t, err) + require.NotNil(t, resp) + leafCert = parseCert(t, resp.Data["certificate"].(string)) + require.Empty(t, leafCert.OCSPServer) + require.Equal(t, leafCert.IssuingCertificateURL, []string{"https://google.com"}) + require.Empty(t, leafCert.CRLDistributionPoints) + + // Now come back and remove the local modifications and ensure we get + // the defaults again. + _, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ + "issuing_certificates": []string{}, + }) + require.NoError(t, err) + resp, err = CBWrite(b, s, "issuer/default/issue/testing", map[string]interface{}{ + "common_name": "localhost.com", + }) + require.NoError(t, err) + require.NotNil(t, resp) + leafCert = parseCert(t, resp.Data["certificate"].(string)) + require.Equal(t, leafCert.IssuingCertificateURL, []string{"https://example.com/ca", "https://backup.example.com/ca"}) + require.Equal(t, leafCert.OCSPServer, []string{"https://example.com/ocsp", "https://backup.example.com/ocsp"}) + require.Equal(t, leafCert.CRLDistributionPoints, []string{"https://example.com/crl", "https://backup.example.com/crl"}) + + // Validate that we can set an issuer name and remove it. + _, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ + "issuer_name": "my-issuer", + }) + require.NoError(t, err) + _, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ + "issuer_name": "", + }) + require.NoError(t, err) +} + +func TestIssuersWithoutCRLBits(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + // Importing a root without CRL signing bits should work fine. + customBundleWithoutCRLBits := ` +-----BEGIN CERTIFICATE----- +MIIDGTCCAgGgAwIBAgIBATANBgkqhkiG9w0BAQsFADATMREwDwYDVQQDDAhyb290 +LW5ldzAeFw0yMjA4MjQxMjEzNTVaFw0yMzA5MDMxMjEzNTVaMBMxETAPBgNVBAMM +CHJvb3QtbmV3MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAojTA/Mx7 +LVW/Zgn/N4BqZbaF82MrTIBFug3ob7mqycNRlWp4/PH8v37+jYn8e691HUsKjden +rDTrO06kiQKiJinAzmlLJvgcazE3aXoh7wSzVG9lFHYvljEmVj+yDbkeaqaCktup +skuNjxCoN9BLmKzZIwVCHn92ZHlhN6LI7CNaU3SDJdu7VftWF9Ugzt9FIvI+6Gcn +/WNE9FWvZ9o7035rZ+1vvTn7/tgxrj2k3XvD51Kq4tsSbqjnSf3QieXT6E6uvtUE +TbPp3xjBElgBCKmeogR1l28rs1aujqqwzZ0B/zOeF8ptaH0aZOIBsVDJR8yTwHzq +s34hNdNfKLHzOwIDAQABo3gwdjAdBgNVHQ4EFgQUF4djNmx+1+uJINhZ82pN+7jz +H8EwHwYDVR0jBBgwFoAUF4djNmx+1+uJINhZ82pN+7jzH8EwDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAoQwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDQYJKoZI +hvcNAQELBQADggEBAICQovBz4KLWlLmXeZ2Vf6WfQYyGNgGyJa10XNXtWQ5dM2NU +OLAit4x1c2dz+aFocc8ZsX/ikYi/bruT2rsGWqMAGC4at3U4GuaYGO5a6XzMKIDC +nxIlbiO+Pn6Xum7fAqUri7+ZNf/Cygmc5sByi3MAAIkszeObUDZFTJL7gEOuXIMT +rKIXCINq/U+qc7m9AQ8vKhF1Ddj+dLGLzNQ5j3cKfilPs/wRaYqbMQvnmarX+5Cs +k1UL6kWSQsiP3+UWaBlcWkmD6oZ3fIG7c0aMxf7RISq1eTAM9XjH3vMxWQJlS5q3 +2weJ2LYoPe/DwX5CijR0IezapBCrin1BscJMLFQ= +-----END CERTIFICATE----- +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCiNMD8zHstVb9m +Cf83gGpltoXzYytMgEW6DehvuarJw1GVanj88fy/fv6Nifx7r3UdSwqN16esNOs7 +TqSJAqImKcDOaUsm+BxrMTdpeiHvBLNUb2UUdi+WMSZWP7INuR5qpoKS26myS42P +EKg30EuYrNkjBUIef3ZkeWE3osjsI1pTdIMl27tV+1YX1SDO30Ui8j7oZyf9Y0T0 +Va9n2jvTfmtn7W+9Ofv+2DGuPaTde8PnUqri2xJuqOdJ/dCJ5dPoTq6+1QRNs+nf +GMESWAEIqZ6iBHWXbyuzVq6OqrDNnQH/M54Xym1ofRpk4gGxUMlHzJPAfOqzfiE1 +018osfM7AgMBAAECggEAAVd6kZZaN69IZITIc1vHRYa2rlZpKS2JP7c8Vd3Z/4Fz +ZZvnJ7LgVAmUYg5WPZ2sOqBNLfKVN/oke5Q0dALgdxYl7dWQIhPjHeRFbZFtjqEV +OXZGBniamMO/HSKGWGrqFf7BM/H7AhClUwQgjnzVSz+B+LJJidM+SVys3n1xuDmC +EP+iOda+bAHqHv/7oCELQKhLmCvPc9v2fDy+180ttdo8EHuxwVnKiyR/ryKFhSyx +K1wgAPQ9jO+V+GESL90rqpX/r501REsIOOpm4orueelHTD4+dnHxvUPqJ++9aYGX +79qBNPPUhxrQI1yoHxwW0cTxW5EqkZ9bT2lSd5rjcQKBgQDNyPBpidkHPrYemQDT +RldtS6FiW/jc1It/CRbjU4A6Gi7s3Cda43pEUObKNLeXMyLQaMf4GbDPDX+eh7B8 +RkUq0Q/N0H4bn1hbxYSUdgv0j/6czpMo6rLcJHGwOTSpHGsNsxSLL7xlpgzuzqrG +FzEgjMA1aD3w8B9+/77AoSLoMQKBgQDJyYMw82+euLYRbR5Wc/SbrWfh2n1Mr2BG +pp1ZNYorXE5CL4ScdLcgH1q/b8r5XGwmhMcpeA+geAAaKmk1CGG+gPLoq20c9Q1Y +Ykq9tUVJasIkelvbb/SPxyjkJdBwylzcPP14IJBsqQM0be+yVqLJJVHSaoKhXZcl +IW2xgCpjKwKBgFpeX5U5P+F6nKebMU2WmlYY3GpBUWxIummzKCX0SV86mFjT5UR4 +mPzfOjqaI/V2M1eqbAZ74bVLjDumAs7QXReMb5BGetrOgxLqDmrT3DQt9/YMkXtq +ddlO984XkRSisjB18BOfhvBsl0lX4I7VKHHO3amWeX0RNgOjc7VMDfRBAoGAWAQH +r1BfvZHACLXZ58fISCdJCqCsysgsbGS8eW77B5LJp+DmLQBT6DUE9j+i/0Wq/ton +rRTrbAkrsj4RicpQKDJCwe4UN+9DlOu6wijRQgbJC/Q7IOoieJxcX7eGxcve2UnZ +HY7GsD7AYRwa02UquCYJHIjM1enmxZFhMW1AD+UCgYEAm4jdNz5e4QjA4AkNF+cB +ZenrAZ0q3NbTyiSsJEAtRe/c5fNFpmXo3mqgCannarREQYYDF0+jpSoTUY8XAc4q +wL7EZNzwxITLqBnnHQbdLdAvYxB43kvWTy+JRK8qY9LAMCCFeDoYwXkWV4Wkx/b0 +TgM7RZnmEjNdeaa4M52o7VY= +-----END PRIVATE KEY----- + ` + resp, err := CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ + "pem_bundle": customBundleWithoutCRLBits, + }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("issuers/import/bundle"), logical.UpdateOperation), resp, true) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotEmpty(t, resp.Data) + require.NotEmpty(t, resp.Data["imported_issuers"]) + require.NotEmpty(t, resp.Data["imported_keys"]) + require.NotEmpty(t, resp.Data["mapping"]) + + // Shouldn't have crl-signing on the newly imported issuer's usage. + resp, err = CBRead(b, s, "issuer/default") + require.NoError(t, err) + require.NotNil(t, resp) + require.NotEmpty(t, resp.Data) + require.NotEmpty(t, resp.Data["usage"]) + require.NotContains(t, resp.Data["usage"], "crl-signing") + + // Modifying to set CRL should fail. + resp, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ + "usage": "issuing-certificates,crl-signing", + }) + require.Error(t, err) + require.True(t, resp.IsError()) + + // Modifying to set issuing-certificates and ocsp-signing should succeed. + resp, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ + "usage": "issuing-certificates,ocsp-signing", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotEmpty(t, resp.Data) + require.NotEmpty(t, resp.Data["usage"]) + require.NotContains(t, resp.Data["usage"], "crl-signing") +} + +func TestBackend_IfModifiedSinceHeaders(t *testing.T) { + t.Parallel() + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "pki": Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + RequestResponseCallback: schema.ResponseValidatingCallback(t), + }) + cluster.Start() + defer cluster.Cleanup() + client := cluster.Cores[0].Client + + // Mount PKI. + err := client.Sys().Mount("pki", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "60h", + // Required to allow the header to be passed through. + PassthroughRequestHeaders: []string{"if-modified-since"}, + AllowedResponseHeaders: []string{"Last-Modified"}, + }, + }) + require.NoError(t, err) + + // Get a time before CA generation. Subtract two seconds to ensure + // the value in the seconds field is different than the time the CA + // is actually generated at. + beforeOldCAGeneration := time.Now().Add(-2 * time.Second) + + // Generate an internal CA. This one is the default. + resp, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ + "ttl": "40h", + "common_name": "Root X1", + "key_type": "ec", + "issuer_name": "old-root", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["certificate"]) + + // CA is generated, but give a grace window. + afterOldCAGeneration := time.Now().Add(2 * time.Second) + + // When you _save_ headers, client returns a copy. But when you go to + // reset them, it doesn't create a new copy (and instead directly + // assigns). This means we have to continually refresh our view of the + // last headers, otherwise the headers added after the last set operation + // leak into this copy... Yuck! + lastHeaders := client.Headers() + for _, path := range []string{"pki/cert/ca", "pki/cert/crl", "pki/issuer/default/json", "pki/issuer/old-root/json", "pki/issuer/old-root/crl", "pki/cert/delta-crl", "pki/issuer/old-root/crl/delta"} { + t.Logf("path: %v", path) + field := "certificate" + if strings.HasPrefix(path, "pki/issuer") && strings.Contains(path, "/crl") { + field = "crl" + } + + // Reading the CA should work, without a header. + resp, err := client.Logical().Read(path) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data[field]) + + // Ensure that the CA is returned correctly if we give it the old time. + client.AddHeader("If-Modified-Since", beforeOldCAGeneration.Format(time.RFC1123)) + resp, err = client.Logical().Read(path) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data[field]) + client.SetHeaders(lastHeaders) + lastHeaders = client.Headers() + + // Ensure that the CA is elided if we give it the present time (plus a + // grace window). + client.AddHeader("If-Modified-Since", afterOldCAGeneration.Format(time.RFC1123)) + t.Logf("headers: %v", client.Headers()) + resp, err = client.Logical().Read(path) + require.NoError(t, err) + require.Nil(t, resp) + client.SetHeaders(lastHeaders) + lastHeaders = client.Headers() + } + + // Wait three seconds. This ensures we have adequate grace period + // to distinguish the two cases, even with grace periods. + time.Sleep(3 * time.Second) + + // Generating a second root. This one isn't the default. + beforeNewCAGeneration := time.Now().Add(-2 * time.Second) + + // Generate an internal CA. This one is the default. + _, err = client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ + "ttl": "40h", + "common_name": "Root X1", + "key_type": "ec", + "issuer_name": "new-root", + }) + require.NoError(t, err) + + // As above. + afterNewCAGeneration := time.Now().Add(2 * time.Second) + + // New root isn't the default, so it has fewer paths. + for _, path := range []string{"pki/issuer/new-root/json", "pki/issuer/new-root/crl", "pki/issuer/new-root/crl/delta"} { + t.Logf("path: %v", path) + field := "certificate" + if strings.HasPrefix(path, "pki/issuer") && strings.Contains(path, "/crl") { + field = "crl" + } + + // Reading the CA should work, without a header. + resp, err := client.Logical().Read(path) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data[field]) + + // Ensure that the CA is returned correctly if we give it the old time. + client.AddHeader("If-Modified-Since", beforeNewCAGeneration.Format(time.RFC1123)) + resp, err = client.Logical().Read(path) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data[field]) + client.SetHeaders(lastHeaders) + lastHeaders = client.Headers() + + // Ensure that the CA is elided if we give it the present time (plus a + // grace window). + client.AddHeader("If-Modified-Since", afterNewCAGeneration.Format(time.RFC1123)) + t.Logf("headers: %v", client.Headers()) + resp, err = client.Logical().Read(path) + require.NoError(t, err) + require.Nil(t, resp) + client.SetHeaders(lastHeaders) + lastHeaders = client.Headers() + } + + // Wait three seconds. This ensures we have adequate grace period + // to distinguish the two cases, even with grace periods. + time.Sleep(3 * time.Second) + + // Now swap the default issuers around. + _, err = client.Logical().Write("pki/config/issuers", map[string]interface{}{ + "default": "new-root", + }) + require.NoError(t, err) + + // Reading both with the last modified date should return new values. + for _, path := range []string{"pki/cert/ca", "pki/cert/crl", "pki/issuer/default/json", "pki/issuer/old-root/json", "pki/issuer/new-root/json", "pki/issuer/old-root/crl", "pki/issuer/new-root/crl", "pki/cert/delta-crl", "pki/issuer/old-root/crl/delta", "pki/issuer/new-root/crl/delta"} { + t.Logf("path: %v", path) + field := "certificate" + if strings.HasPrefix(path, "pki/issuer") && strings.Contains(path, "/crl") { + field = "crl" + } + + // Ensure that the CA is returned correctly if we give it the old time. + client.AddHeader("If-Modified-Since", afterOldCAGeneration.Format(time.RFC1123)) + resp, err = client.Logical().Read(path) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data[field]) + client.SetHeaders(lastHeaders) + lastHeaders = client.Headers() + + // Ensure that the CA is returned correctly if we give it the old time. + client.AddHeader("If-Modified-Since", afterNewCAGeneration.Format(time.RFC1123)) + resp, err = client.Logical().Read(path) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data[field]) + client.SetHeaders(lastHeaders) + lastHeaders = client.Headers() + } + + // Wait for things to settle, record the present time, and wait for the + // clock to definitely tick over again. + time.Sleep(2 * time.Second) + preRevocationTimestamp := time.Now() + time.Sleep(2 * time.Second) + + // The above tests should say everything is cached. + for _, path := range []string{"pki/cert/ca", "pki/cert/crl", "pki/issuer/default/json", "pki/issuer/old-root/json", "pki/issuer/new-root/json", "pki/issuer/old-root/crl", "pki/issuer/new-root/crl", "pki/cert/delta-crl", "pki/issuer/old-root/crl/delta", "pki/issuer/new-root/crl/delta"} { + t.Logf("path: %v", path) + + // Ensure that the CA is returned correctly if we give it the new time. + client.AddHeader("If-Modified-Since", preRevocationTimestamp.Format(time.RFC1123)) + resp, err = client.Logical().Read(path) + require.NoError(t, err) + require.Nil(t, resp) + client.SetHeaders(lastHeaders) + lastHeaders = client.Headers() + } + + // We could generate some leaves and verify the revocation updates the + // CRL. But, revoking the issuer behaves the same, so let's do that + // instead. + _, err = client.Logical().Write("pki/issuer/old-root/revoke", map[string]interface{}{}) + require.NoError(t, err) + + // CA should still be valid. + for _, path := range []string{"pki/cert/ca", "pki/issuer/default/json", "pki/issuer/old-root/json", "pki/issuer/new-root/json"} { + t.Logf("path: %v", path) + + // Ensure that the CA is returned correctly if we give it the old time. + client.AddHeader("If-Modified-Since", preRevocationTimestamp.Format(time.RFC1123)) + resp, err = client.Logical().Read(path) + require.NoError(t, err) + require.Nil(t, resp) + client.SetHeaders(lastHeaders) + lastHeaders = client.Headers() + } + + // CRL should be invalidated + for _, path := range []string{"pki/cert/crl", "pki/issuer/old-root/crl", "pki/issuer/new-root/crl", "pki/cert/delta-crl", "pki/issuer/old-root/crl/delta", "pki/issuer/new-root/crl/delta"} { + t.Logf("path: %v", path) + field := "certificate" + if strings.HasPrefix(path, "pki/issuer") && strings.Contains(path, "/crl") { + field = "crl" + } + + client.AddHeader("If-Modified-Since", preRevocationTimestamp.Format(time.RFC1123)) + resp, err = client.Logical().Read(path) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data[field]) + client.SetHeaders(lastHeaders) + lastHeaders = client.Headers() + } + + // If we send some time in the future, everything should be cached again! + futureTime := time.Now().Add(30 * time.Second) + for _, path := range []string{"pki/cert/ca", "pki/cert/crl", "pki/issuer/default/json", "pki/issuer/old-root/json", "pki/issuer/new-root/json", "pki/issuer/old-root/crl", "pki/issuer/new-root/crl", "pki/cert/delta-crl", "pki/issuer/old-root/crl/delta", "pki/issuer/new-root/crl/delta"} { + t.Logf("path: %v", path) + + // Ensure that the CA is returned correctly if we give it the new time. + client.AddHeader("If-Modified-Since", futureTime.Format(time.RFC1123)) + resp, err = client.Logical().Read(path) + require.NoError(t, err) + require.Nil(t, resp) + client.SetHeaders(lastHeaders) + lastHeaders = client.Headers() + } + + beforeThreeWaySwap := time.Now().Add(-2 * time.Second) + + // Now, do a three-way swap of names (old->tmp; new->old; tmp->new). This + // should result in all names/CRLs being invalidated. + _, err = client.Logical().JSONMergePatch(ctx, "pki/issuer/old-root", map[string]interface{}{ + "issuer_name": "tmp-root", + }) + require.NoError(t, err) + _, err = client.Logical().JSONMergePatch(ctx, "pki/issuer/new-root", map[string]interface{}{ + "issuer_name": "old-root", + }) + require.NoError(t, err) + _, err = client.Logical().JSONMergePatch(ctx, "pki/issuer/tmp-root", map[string]interface{}{ + "issuer_name": "new-root", + }) + require.NoError(t, err) + + afterThreeWaySwap := time.Now().Add(2 * time.Second) + + for _, path := range []string{"pki/cert/ca", "pki/cert/crl", "pki/issuer/default/json", "pki/issuer/old-root/json", "pki/issuer/new-root/json", "pki/issuer/old-root/crl", "pki/issuer/new-root/crl", "pki/cert/delta-crl", "pki/issuer/old-root/crl/delta", "pki/issuer/new-root/crl/delta"} { + t.Logf("path: %v", path) + field := "certificate" + if strings.HasPrefix(path, "pki/issuer") && strings.Contains(path, "/crl") { + field = "crl" + } + + // Ensure that the CA is returned if we give it the pre-update time. + client.AddHeader("If-Modified-Since", beforeThreeWaySwap.Format(time.RFC1123)) + resp, err = client.Logical().Read(path) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data[field]) + client.SetHeaders(lastHeaders) + lastHeaders = client.Headers() + + // Ensure that the CA is elided correctly if we give it the after time. + client.AddHeader("If-Modified-Since", afterThreeWaySwap.Format(time.RFC1123)) + resp, err = client.Logical().Read(path) + require.NoError(t, err) + require.Nil(t, resp) + client.SetHeaders(lastHeaders) + lastHeaders = client.Headers() + } + + // Finally, rebuild the delta CRL and ensure that only that is + // invalidated. We first need to enable it though, and wait for + // all CRLs to rebuild. + _, err = client.Logical().Write("pki/config/crl", map[string]interface{}{ + "auto_rebuild": true, + "enable_delta": true, + }) + require.NoError(t, err) + time.Sleep(4 * time.Second) + beforeDeltaRotation := time.Now().Add(-2 * time.Second) + + resp, err = client.Logical().Read("pki/crl/rotate-delta") + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.Equal(t, resp.Data["success"], true) + + afterDeltaRotation := time.Now().Add(2 * time.Second) + + for _, path := range []string{"pki/cert/ca", "pki/cert/crl", "pki/issuer/default/json", "pki/issuer/old-root/json", "pki/issuer/new-root/json", "pki/issuer/old-root/crl", "pki/issuer/new-root/crl"} { + t.Logf("path: %v", path) + + for _, when := range []time.Time{beforeDeltaRotation, afterDeltaRotation} { + client.AddHeader("If-Modified-Since", when.Format(time.RFC1123)) + resp, err = client.Logical().Read(path) + require.NoError(t, err) + require.Nil(t, resp) + client.SetHeaders(lastHeaders) + lastHeaders = client.Headers() + } + } + + for _, path := range []string{"pki/cert/delta-crl", "pki/issuer/old-root/crl/delta", "pki/issuer/new-root/crl/delta"} { + t.Logf("path: %v", path) + field := "certificate" + if strings.HasPrefix(path, "pki/issuer") && strings.Contains(path, "/crl") { + field = "crl" + } + + // Ensure that the CRL is present if we give it the pre-update time. + client.AddHeader("If-Modified-Since", beforeDeltaRotation.Format(time.RFC1123)) + resp, err = client.Logical().Read(path) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data[field]) + client.SetHeaders(lastHeaders) + lastHeaders = client.Headers() + + client.AddHeader("If-Modified-Since", afterDeltaRotation.Format(time.RFC1123)) + resp, err = client.Logical().Read(path) + require.NoError(t, err) + require.Nil(t, resp) + client.SetHeaders(lastHeaders) + lastHeaders = client.Headers() + } +} + +func TestBackend_InitializeCertificateCounts(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + ctx := context.Background() + + // Set up an Issuer and Role + // We need a root certificate to write/revoke certificates with + resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "myvault.com", + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected ca info") + } + + // Create a role + _, err = CBWrite(b, s, "roles/example", map[string]interface{}{ + "allowed_domains": "myvault.com", + "allow_bare_domains": true, + "allow_subdomains": true, + "max_ttl": "2h", + }) + if err != nil { + t.Fatal(err) + } + + // Put certificates A, B, C, D, E in backend + var certificates []string = []string{"a", "b", "c", "d", "e"} + serials := make([]string, 5) + for i, cn := range certificates { + resp, err = CBWrite(b, s, "issue/example", map[string]interface{}{ + "common_name": cn + ".myvault.com", + }) + if err != nil { + t.Fatal(err) + } + serials[i] = resp.Data["serial_number"].(string) + } + + // Turn on certificate counting: + CBWrite(b, s, "config/auto-tidy", map[string]interface{}{ + "maintain_stored_certificate_counts": true, + "publish_stored_certificate_count_metrics": false, + }) + // Assert initialize from clean is correct: + b.initializeStoredCertificateCounts(ctx) + + // Revoke certificates A + B + revocations := serials[0:2] + for _, key := range revocations { + resp, err = CBWrite(b, s, "revoke", map[string]interface{}{ + "serial_number": key, + }) + if err != nil { + t.Fatal(err) + } + } + + if b.certCount.Load() != 6 { + t.Fatalf("Failed to count six certificates root,A,B,C,D,E, instead counted %d certs", b.certCount.Load()) + } + if b.revokedCertCount.Load() != 2 { + t.Fatalf("Failed to count two revoked certificates A+B, instead counted %d certs", b.revokedCertCount.Load()) + } + + // Simulates listing while initialize in progress, by "restarting it" + b.certCount.Store(0) + b.revokedCertCount.Store(0) + b.certsCounted.Store(false) + + // Revoke certificates C, D + dirtyRevocations := serials[2:4] + for _, key := range dirtyRevocations { + resp, err = CBWrite(b, s, "revoke", map[string]interface{}{ + "serial_number": key, + }) + if err != nil { + t.Fatal(err) + } + } + + // Put certificates F, G in the backend + dirtyCertificates := []string{"f", "g"} + for _, cn := range dirtyCertificates { + resp, err = CBWrite(b, s, "issue/example", map[string]interface{}{ + "common_name": cn + ".myvault.com", + }) + if err != nil { + t.Fatal(err) + } + } + + // Run initialize + b.initializeStoredCertificateCounts(ctx) + + // Test certificate count + if b.certCount.Load() != 8 { + t.Fatalf("Failed to initialize count of certificates root, A,B,C,D,E,F,G counted %d certs", b.certCount.Load()) + } + + if b.revokedCertCount.Load() != 4 { + t.Fatalf("Failed to count revoked certificates A,B,C,D counted %d certs", b.revokedCertCount.Load()) + } + + return +} + +// Verify that our default values are consistent when creating an issuer and when we do an +// empty POST update to it. This will hopefully identify if we have different default values +// for fields across the two APIs. +func TestBackend_VerifyIssuerUpdateDefaultsMatchCreation(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "myvault.com", + }) + requireSuccessNonNilResponse(t, resp, err, "failed generating root issuer") + + resp, err = CBRead(b, s, "issuer/default") + requireSuccessNonNilResponse(t, resp, err, "failed reading default issuer") + preUpdateValues := resp.Data + + // This field gets reset during issuer update to the empty string + // (meaning Go will auto-detect the rev-sig-algo). + preUpdateValues["revocation_signature_algorithm"] = "" + + resp, err = CBWrite(b, s, "issuer/default", map[string]interface{}{}) + requireSuccessNonNilResponse(t, resp, err, "failed updating default issuer with no values") + + resp, err = CBRead(b, s, "issuer/default") + requireSuccessNonNilResponse(t, resp, err, "failed reading default issuer") + postUpdateValues := resp.Data + + require.Equal(t, preUpdateValues, postUpdateValues, + "A value was updated based on the empty update of an issuer, "+ + "most likely we have a different set of field parameters across create and update of issuers.") +} + +func TestBackend_VerifyPSSKeysIssuersFailImport(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + // PKCS8 parsing fails on this key due to rsaPSS OID + rsaOIDKey := ` +-----BEGIN PRIVATE KEY----- +MIIEugIBADALBgkqhkiG9w0BAQoEggSmMIIEogIBAAKCAQEAtN0/NPuJHLuyEdBr +tUikXoXOV741XZcNvLAIVBIqDA0ege2gXt9A15FGUI4X3u6kT16Fl6MRdtUZ/qNS +Vs15nK9A1PI/AVekMgTVFTnoCzs550CKN8iRk9Om+lwHimpyXxKkFW69v8fsXwKE +Bsz69jjT7HV9VZQ7fQhmE79brAMuwKP1fUQKdHq5OBKtQ7Cl3Gmipp0izCsVuQIE +kBHvT3UUgyaSp2n+FONpOiyuBoYUH5tVEv9sZzBqSsrYBJYF+GvfnFy9AcTdqRe2 +VX2SjjWjDF84T30OBA798gIFIPwu9R4OjWOlPeh2bo2kGeo3AITjwFZ28m7kS7kc +OtvHpwIDAQABAoIBAFQxmjbj0RQbG+3HBBzD0CBgUYnu9ZC3vKFVoMriGci6YrVB +FSKU8u5mpkDhpKMWnE6GRdItCvgyg4NSLAZUaIRT4O5ARqwtTDYsobTb2/U+gNnx +5WXKbFpQcK6jIK+ClfNEDjYb8yDPxG0GEsfHrBvqoFy25L1t37N4sWwH7HjJyZIe +Hbqx4NVDur9qgqaUwkfSeufn4ycHqFtkzKNzCUarDkST9cxE6/1AKfhl09PPuMEa +lAY2JLiEplQL5sh9cxG5FObJbutJo5EIhR2OdM0VcPf0MTD9LXKRoGR3SNlG7IlS +llJzBjlh4J1ByMX32btKMHzEvlhyrMI90E1SEGECgYEAx1yDQWe4/b1MBqCxA3d0 +20dDmUHSRQFhkd/Mzkl5dPzRkG42W3ryNbMKdeuL0ZgK9AhfaLCjcj1i+44O7dHb +qBTVwfRrer2uoQVCqqJ6z8PGxPJJxTaqh9QuJxkoQ0i43ZNPcjc2M2sWLn+lkkdE +MaGMiyrmjIQEC6tmgCtZ1VUCgYEA6D9xoT9VuAnQjDvW2tO5N2U2H/8ZyRd1pC3z +H1CzjwShhxsP4YOUaVdw59K95JL4SMxSmpRrhthlW3cRaiT/exBcXLEvz0Qu0OhW +a6155ZFjK3UaLDKlwvmtuoAsuAFqX084LO0B1oxvUJESgyPncQ36fv2lZGV7A66z +Uo+BKQsCgYB2yGBMMAjA5nDN4iCV+C7gF+3m+pjWFKSVzcqxfoWndptGeuRYTUDT +TgIFkHqWPwkHrZVrQxOflYPMbi/m8wr1crSKA5+mWi4aMpAuKvERqYxc/B+IKbIh +jAKTuSGMNWAwZP0JCGx65mso+VUleuDe0Wpz4PPM9TuT2GQSKcI0oQKBgHAHcouC +npmo+lU65DgoWzaydrpWdpy+2Tt6AsW/Su4ZIMWoMy/oJaXuzQK2cG0ay/NpxArW +v0uLhNDrDZZzBF3blYIM4nALhr205UMJqjwntnuXACoDwFvdzoShIXEdFa+l6gYZ +yYIxudxWLmTd491wDb5GIgrcvMsY8V1I5dfjAoGAM9g2LtdqgPgK33dCDtZpBm8m +y4ri9PqHxnpps9WJ1dO6MW/YbW+a7vbsmNczdJ6XNLEfy2NWho1dw3xe7ztFVDjF +cWNUzs1+/6aFsi41UX7EFn3zAFhQUPxT59hXspuWuKbRAWc5fMnxbCfI/Cr8wTLJ +E/0kiZ4swUMyI4tYSbM= +-----END PRIVATE KEY----- +` + _, err := CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ + "pem_bundle": rsaOIDKey, + }) + require.Error(t, err, "expected error importing PKCS8 rsaPSS OID key") + + _, err = CBWrite(b, s, "keys/import", map[string]interface{}{ + "key": rsaOIDKey, + }) + require.Error(t, err, "expected error importing PKCS8 rsaPSS OID key") + + // Importing a cert with rsaPSS OID should also fail + rsaOIDCert := ` +-----BEGIN CERTIFICATE----- +MIIDfjCCAjGgAwIBAgIBATBCBgkqhkiG9w0BAQowNaAPMA0GCWCGSAFlAwQCAQUA +oRwwGgYJKoZIhvcNAQEIMA0GCWCGSAFlAwQCAQUAogQCAgDeMBMxETAPBgNVBAMM +CHJvb3Qtb2xkMB4XDTIyMDkxNjE0MDEwM1oXDTIzMDkyNjE0MDEwM1owEzERMA8G +A1UEAwwIcm9vdC1vbGQwggEgMAsGCSqGSIb3DQEBCgOCAQ8AMIIBCgKCAQEAtN0/ +NPuJHLuyEdBrtUikXoXOV741XZcNvLAIVBIqDA0ege2gXt9A15FGUI4X3u6kT16F +l6MRdtUZ/qNSVs15nK9A1PI/AVekMgTVFTnoCzs550CKN8iRk9Om+lwHimpyXxKk +FW69v8fsXwKEBsz69jjT7HV9VZQ7fQhmE79brAMuwKP1fUQKdHq5OBKtQ7Cl3Gmi +pp0izCsVuQIEkBHvT3UUgyaSp2n+FONpOiyuBoYUH5tVEv9sZzBqSsrYBJYF+Gvf +nFy9AcTdqRe2VX2SjjWjDF84T30OBA798gIFIPwu9R4OjWOlPeh2bo2kGeo3AITj +wFZ28m7kS7kcOtvHpwIDAQABo3UwczAdBgNVHQ4EFgQUVGkTAUJ8inxIVGBlfxf4 +cDhRSnowHwYDVR0jBBgwFoAUVGkTAUJ8inxIVGBlfxf4cDhRSnowDAYDVR0TBAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAYYwEwYDVR0lBAwwCgYIKwYBBQUHAwEwQgYJKoZI +hvcNAQEKMDWgDzANBglghkgBZQMEAgEFAKEcMBoGCSqGSIb3DQEBCDANBglghkgB +ZQMEAgEFAKIEAgIA3gOCAQEAQZ3iQ3NjvS4FYJ5WG41huZI0dkvNFNan+ZYWlYHJ +MIQhbFogb/UQB0rlsuldG0+HF1RDXoYNuThfzt5hiBWYEtMBNurezvnOn4DF0hrl +Uk3sBVnvTalVXg+UVjqh9hBGB75JYJl6a5Oa2Zrq++4qGNwjd0FqgnoXzqS5UGuB +TJL8nlnXPuOIK3VHoXEy7l9GtvEzKcys0xa7g1PYpaJ5D2kpbBJmuQGmU6CDcbP+ +m0hI4QDfVfHtnBp2VMCvhj0yzowtwF4BFIhv4EXZBU10mzxVj0zyKKft9++X8auH +nebuK22ZwzbPe4NhOvAdfNDElkrrtGvTnzkDB7ezPYjelA== +-----END CERTIFICATE----- +` + _, err = CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ + "pem_bundle": rsaOIDCert, + }) + require.Error(t, err, "expected error importing PKCS8 rsaPSS OID cert") + + _, err = CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ + "pem_bundle": rsaOIDKey + "\n" + rsaOIDCert, + }) + require.Error(t, err, "expected error importing PKCS8 rsaPSS OID key+cert") + + _, err = CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ + "pem_bundle": rsaOIDCert + "\n" + rsaOIDKey, + }) + require.Error(t, err, "expected error importing PKCS8 rsaPSS OID cert+key") + + // After all these errors, we should have zero issuers and keys. + resp, err := CBList(b, s, "issuers") + require.NoError(t, err) + require.Equal(t, nil, resp.Data["keys"]) + + resp, err = CBList(b, s, "keys") + require.NoError(t, err) + require.Equal(t, nil, resp.Data["keys"]) + + // If we create a new PSS root, we should be able to issue an intermediate + // under it. + resp, err = CBWrite(b, s, "root/generate/exported", map[string]interface{}{ + "use_pss": "true", + "common_name": "root x1 - pss", + "key_type": "ec", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["certificate"]) + require.NotEmpty(t, resp.Data["private_key"]) + + resp, err = CBWrite(b, s, "intermediate/generate/exported", map[string]interface{}{ + "use_pss": "true", + "common_name": "int x1 - pss", + "key_type": "ec", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["csr"]) + require.NotEmpty(t, resp.Data["private_key"]) + + resp, err = CBWrite(b, s, "issuer/default/sign-intermediate", map[string]interface{}{ + "use_pss": "true", + "common_name": "int x1 - pss", + "csr": resp.Data["csr"].(string), + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["certificate"]) + + resp, err = CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ + "pem_bundle": resp.Data["certificate"].(string), + }) + require.NoError(t, err) + + // Finally, if we were to take an rsaPSS OID'd CSR and use it against this + // mount, it will fail. + _, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ + "allow_any_name": true, + "ttl": "85s", + "key_type": "any", + }) + require.NoError(t, err) + + // Issuing a leaf from a CSR with rsaPSS OID should fail... + rsaOIDCSR := `-----BEGIN CERTIFICATE REQUEST----- +MIICkTCCAUQCAQAwGTEXMBUGA1UEAwwOcmFuY2hlci5teS5vcmcwggEgMAsGCSqG +SIb3DQEBCgOCAQ8AMIIBCgKCAQEAtzHuGEUK55lXI08yp9DXoye9yCZbkJZO+Hej +1TWGEkbX4hzauRJeNp2+wn8xU5y8ITjWSIXEVDHeezosLCSy0Y2QT7/V45zWPUYY +ld0oUnPiwsb9CPFlBRFnX3dO9SS5MONIrNCJGKXmLdF3lgSl8zPT6J/hWM+JBjHO +hBzK6L8IYwmcEujrQfnOnOztzgMEBJtWG8rnI8roz1adpczTddDKGymh2QevjhlL +X9CLeYSSQZInOMsgaDYl98Hn00K5x0CBp8ADzzXtaPSQ9nsnihN8VvZ/wHw6YbBS +BSHa6OD+MrYnw3Sao6/YgBRNT2glIX85uro4ARW9zGB9/748dwIDAQABoAAwQgYJ +KoZIhvcNAQEKMDWgDzANBglghkgBZQMEAgEFAKEcMBoGCSqGSIb3DQEBCDANBglg +hkgBZQMEAgEFAKIEAgIA3gOCAQEARGAa0HiwzWCpvAdLOVc4/srEyOYFZPLbtv+Y +ezZIaUBNaWhOvkunqpa48avmcbGlji7r6fxJ5sT28lHt7ODWcJfn1XPAnqesXErm +EBuOIhCv6WiwVyGeTVynuHYkHyw3rIL/zU7N8+zIFV2G2M1UAv5D/eyh/74cr9Of ++nvm9jAbkHix8UwOBCFY2LLNl6bXvbIeJEdDOEtA9UmDXs8QGBg4lngyqcE2Z7rz ++5N/x4guMk2FqblbFGiCc5fLB0Gp6lFFOqhX9Q8nLJ6HteV42xGJUUtsFpppNCRm +82dGIH2PTbXZ0k7iAAwLaPjzOv1v58Wq90o35d4iEsOfJ8v98Q== +-----END CERTIFICATE REQUEST-----` + + _, err = CBWrite(b, s, "issuer/default/sign/testing", map[string]interface{}{ + "common_name": "example.com", + "csr": rsaOIDCSR, + }) + require.Error(t, err) + + _, err = CBWrite(b, s, "issuer/default/sign-verbatim", map[string]interface{}{ + "common_name": "example.com", + "use_pss": true, + "csr": rsaOIDCSR, + }) + require.Error(t, err) + + _, err = CBWrite(b, s, "issuer/default/sign-intermediate", map[string]interface{}{ + "common_name": "faulty x1 - pss", + "use_pss": true, + "csr": rsaOIDCSR, + }) + require.Error(t, err) + + // Vault has a weird API for signing self-signed certificates. Ensure + // that doesn't accept rsaPSS OID'd certificates either. + _, err = CBWrite(b, s, "issuer/default/sign-self-issued", map[string]interface{}{ + "use_pss": true, + "certificate": rsaOIDCert, + }) + require.Error(t, err) + + // Issuing a regular leaf should succeed. + _, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ + "allow_any_name": true, + "ttl": "85s", + "key_type": "rsa", + "use_pss": "true", + }) + require.NoError(t, err) + + resp, err = CBWrite(b, s, "issuer/default/issue/testing", map[string]interface{}{ + "common_name": "example.com", + "use_pss": "true", + }) + requireSuccessNonNilResponse(t, resp, err, "failed to issue PSS leaf") +} + +func TestPKI_EmptyCRLConfigUpgraded(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + // Write an empty CRLConfig into storage. + crlConfigEntry, err := logical.StorageEntryJSON("config/crl", &crlConfig{}) + require.NoError(t, err) + err = s.Put(ctx, crlConfigEntry) + require.NoError(t, err) + + resp, err := CBRead(b, s, "config/crl") + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.Equal(t, resp.Data["expiry"], defaultCrlConfig.Expiry) + require.Equal(t, resp.Data["disable"], defaultCrlConfig.Disable) + require.Equal(t, resp.Data["ocsp_disable"], defaultCrlConfig.OcspDisable) + require.Equal(t, resp.Data["auto_rebuild"], defaultCrlConfig.AutoRebuild) + require.Equal(t, resp.Data["auto_rebuild_grace_period"], defaultCrlConfig.AutoRebuildGracePeriod) + require.Equal(t, resp.Data["enable_delta"], defaultCrlConfig.EnableDelta) + require.Equal(t, resp.Data["delta_rebuild_interval"], defaultCrlConfig.DeltaRebuildInterval) +} + +func TestPKI_ListRevokedCerts(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + // Test empty cluster + resp, err := CBList(b, s, "certs/revoked") + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("certs/revoked"), logical.ListOperation), resp, true) + requireSuccessNonNilResponse(t, resp, err, "failed listing empty cluster") + require.Empty(t, resp.Data, "response map contained data that we did not expect") + + // Set up a mount that we can revoke under (We will create 3 leaf certs, 2 of which will be revoked) + resp, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "test.com", + "key_type": "ec", + }) + requireSuccessNonNilResponse(t, resp, err, "error generating root CA") + requireFieldsSetInResp(t, resp, "serial_number") + issuerSerial := resp.Data["serial_number"] + + resp, err = CBWrite(b, s, "roles/test", map[string]interface{}{ + "allowed_domains": "test.com", + "allow_subdomains": "true", + "max_ttl": "1h", + }) + requireSuccessNonNilResponse(t, resp, err, "error setting up pki role") + + resp, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "test1.test.com", + }) + requireSuccessNonNilResponse(t, resp, err, "error issuing cert 1") + requireFieldsSetInResp(t, resp, "serial_number") + serial1 := resp.Data["serial_number"] + + resp, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "test2.test.com", + }) + requireSuccessNonNilResponse(t, resp, err, "error issuing cert 2") + requireFieldsSetInResp(t, resp, "serial_number") + serial2 := resp.Data["serial_number"] + + resp, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "test3.test.com", + }) + requireSuccessNonNilResponse(t, resp, err, "error issuing cert 2") + requireFieldsSetInResp(t, resp, "serial_number") + serial3 := resp.Data["serial_number"] + + resp, err = CBWrite(b, s, "revoke", map[string]interface{}{"serial_number": serial1}) + requireSuccessNonNilResponse(t, resp, err, "error revoking cert 1") + + resp, err = CBWrite(b, s, "revoke", map[string]interface{}{"serial_number": serial2}) + requireSuccessNonNilResponse(t, resp, err, "error revoking cert 2") + + // Test that we get back the expected revoked serial numbers. + resp, err = CBList(b, s, "certs/revoked") + requireSuccessNonNilResponse(t, resp, err, "failed listing revoked certs") + requireFieldsSetInResp(t, resp, "keys") + revokedKeys := resp.Data["keys"].([]string) + + require.Contains(t, revokedKeys, serial1) + require.Contains(t, revokedKeys, serial2) + require.Equal(t, 2, len(revokedKeys), "Expected 2 revoked entries got %d: %v", len(revokedKeys), revokedKeys) + + // Test that listing our certs returns a different response + resp, err = CBList(b, s, "certs") + requireSuccessNonNilResponse(t, resp, err, "failed listing written certs") + requireFieldsSetInResp(t, resp, "keys") + certKeys := resp.Data["keys"].([]string) + + require.Contains(t, certKeys, serial1) + require.Contains(t, certKeys, serial2) + require.Contains(t, certKeys, serial3) + require.Contains(t, certKeys, issuerSerial) + require.Equal(t, 4, len(certKeys), "Expected 4 cert entries got %d: %v", len(certKeys), certKeys) +} + +func TestPKI_TemplatedAIAs(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + // Setting templated AIAs should succeed. + resp, err := CBWrite(b, s, "config/cluster", map[string]interface{}{ + "path": "http://localhost:8200/v1/pki", + "aia_path": "http://localhost:8200/cdn/pki", + }) + require.NoError(t, err) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/cluster"), logical.UpdateOperation), resp, true) + + resp, err = CBRead(b, s, "config/cluster") + require.NoError(t, err) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/cluster"), logical.ReadOperation), resp, true) + + aiaData := map[string]interface{}{ + "crl_distribution_points": "{{cluster_path}}/issuer/{{issuer_id}}/crl/der", + "issuing_certificates": "{{cluster_aia_path}}/issuer/{{issuer_id}}/der", + "ocsp_servers": "{{cluster_path}}/ocsp", + "enable_templating": true, + } + _, err = CBWrite(b, s, "config/urls", aiaData) + require.NoError(t, err) + + // Root generation should succeed, but without AIA info. + rootData := map[string]interface{}{ + "common_name": "Long-Lived Root X1", + "issuer_name": "long-root-x1", + "key_type": "ec", + } + resp, err = CBWrite(b, s, "root/generate/internal", rootData) + require.NoError(t, err) + _, err = CBDelete(b, s, "root") + require.NoError(t, err) + + // Clearing the config and regenerating the root should still succeed. + _, err = CBWrite(b, s, "config/urls", map[string]interface{}{ + "crl_distribution_points": "{{cluster_path}}/issuer/my-root-id/crl/der", + "issuing_certificates": "{{cluster_aia_path}}/issuer/my-root-id/der", + "ocsp_servers": "{{cluster_path}}/ocsp", + "enable_templating": true, + }) + require.NoError(t, err) + resp, err = CBWrite(b, s, "root/generate/internal", rootData) + requireSuccessNonNilResponse(t, resp, err) + issuerId := string(resp.Data["issuer_id"].(issuerID)) + + // Now write the original AIA config and sign a leaf. + _, err = CBWrite(b, s, "config/urls", aiaData) + require.NoError(t, err) + _, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ + "allow_any_name": "true", + "key_type": "ec", + "ttl": "50m", + }) + require.NoError(t, err) + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "example.com", + }) + requireSuccessNonNilResponse(t, resp, err) + + // Validate the AIA info is correctly templated. + cert := parseCert(t, resp.Data["certificate"].(string)) + require.Equal(t, cert.OCSPServer, []string{"http://localhost:8200/v1/pki/ocsp"}) + require.Equal(t, cert.IssuingCertificateURL, []string{"http://localhost:8200/cdn/pki/issuer/" + issuerId + "/der"}) + require.Equal(t, cert.CRLDistributionPoints, []string{"http://localhost:8200/v1/pki/issuer/" + issuerId + "/crl/der"}) + + // Modify our issuer to set custom AIAs: these URLs are bad. + _, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ + "enable_aia_url_templating": "false", + "crl_distribution_points": "a", + "issuing_certificates": "b", + "ocsp_servers": "c", + }) + require.Error(t, err) + + // These URLs are good. + _, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ + "enable_aia_url_templating": "false", + "crl_distribution_points": "http://localhost/a", + "issuing_certificates": "http://localhost/b", + "ocsp_servers": "http://localhost/c", + }) + + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "example.com", + }) + requireSuccessNonNilResponse(t, resp, err) + + // Validate the AIA info is correctly templated. + cert = parseCert(t, resp.Data["certificate"].(string)) + require.Equal(t, cert.OCSPServer, []string{"http://localhost/c"}) + require.Equal(t, cert.IssuingCertificateURL, []string{"http://localhost/b"}) + require.Equal(t, cert.CRLDistributionPoints, []string{"http://localhost/a"}) + + // These URLs are bad, but will fail at issuance time due to AIA templating. + resp, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ + "enable_aia_url_templating": "true", + "crl_distribution_points": "a", + "issuing_certificates": "b", + "ocsp_servers": "c", + }) + requireSuccessNonNilResponse(t, resp, err) + require.NotEmpty(t, resp.Warnings) + _, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "example.com", + }) + require.Error(t, err) +} + +func requireSubjectUserIDAttr(t *testing.T, cert string, target string) { + xCert := parseCert(t, cert) + + for _, attr := range xCert.Subject.Names { + var userID string + if attr.Type.Equal(certutil.SubjectPilotUserIDAttributeOID) { + if target == "" { + t.Fatalf("expected no UserID (OID: %v) subject attributes in cert:\n%v", certutil.SubjectPilotUserIDAttributeOID, cert) + } + + switch aValue := attr.Value.(type) { + case string: + userID = aValue + case []byte: + userID = string(aValue) + default: + t.Fatalf("unknown type for UserID attribute: %v\nCert: %v", attr, cert) + } + + if userID == target { + return + } + } + } + + if target != "" { + t.Fatalf("failed to find UserID (OID: %v) matching %v in cert:\n%v", certutil.SubjectPilotUserIDAttributeOID, target, cert) + } +} + +func TestUserIDsInLeafCerts(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + // 1. Setup root issuer. + resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "Vault Root CA", + "key_type": "ec", + "ttl": "7200h", + }) + requireSuccessNonNilResponse(t, resp, err, "failed generating root issuer") + + // 2. Allow no user IDs. + resp, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ + "allowed_user_ids": "", + "key_type": "ec", + }) + requireSuccessNonNilResponse(t, resp, err, "failed setting up role") + + // - Issue cert without user IDs should work. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "") + + // - Issue cert with user ID should fail. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "humanoid", + }) + require.Error(t, err) + require.True(t, resp.IsError()) + + // 3. Allow any user IDs. + resp, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ + "allowed_user_ids": "*", + "key_type": "ec", + }) + requireSuccessNonNilResponse(t, resp, err, "failed setting up role") + + // - Issue cert without user IDs. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "") + + // - Issue cert with one user ID. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "humanoid", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "humanoid") + + // - Issue cert with two user IDs. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "humanoid,robot", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "humanoid") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "robot") + + // 4. Allow one specific user ID. + resp, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ + "allowed_user_ids": "humanoid", + "key_type": "ec", + }) + requireSuccessNonNilResponse(t, resp, err, "failed setting up role") + + // - Issue cert without user IDs. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "") + + // - Issue cert with approved ID. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "humanoid", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "humanoid") + + // - Issue cert with non-approved user ID should fail. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "robot", + }) + require.Error(t, err) + require.True(t, resp.IsError()) + + // - Issue cert with one approved and one non-approved should also fail. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "humanoid,robot", + }) + require.Error(t, err) + require.True(t, resp.IsError()) + + // 5. Allow two specific user IDs. + resp, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ + "allowed_user_ids": "humanoid,robot", + "key_type": "ec", + }) + requireSuccessNonNilResponse(t, resp, err, "failed setting up role") + + // - Issue cert without user IDs. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "") + + // - Issue cert with one approved ID. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "humanoid", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "humanoid") + + // - Issue cert with other user ID. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "robot", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "robot") + + // - Issue cert with unknown user ID will fail. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "robot2", + }) + require.Error(t, err) + require.True(t, resp.IsError()) + + // - Issue cert with both should succeed. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "humanoid,robot", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "humanoid") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "robot") + + // 6. Use a glob. + resp, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ + "allowed_user_ids": "human*", + "key_type": "ec", + "use_csr_sans": true, // setup for further testing. + }) + requireSuccessNonNilResponse(t, resp, err, "failed setting up role") + + // - Issue cert without user IDs. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "") + + // - Issue cert with approved ID. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "humanoid", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "humanoid") + + // - Issue cert with another approved ID. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "human", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "human") + + // - Issue cert with literal glob. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "human*", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "human*") + + // - Still no robotic certs are allowed; will fail. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "robot", + }) + require.Error(t, err) + require.True(t, resp.IsError()) + + // Create a CSR and validate it works with both sign/ and sign-verbatim. + csrTemplate := x509.CertificateRequest{ + Subject: pkix.Name{ + CommonName: "localhost", + ExtraNames: []pkix.AttributeTypeAndValue{ + { + Type: certutil.SubjectPilotUserIDAttributeOID, + Value: "humanoid", + }, + }, + }, + } + _, _, csrPem := generateCSR(t, &csrTemplate, "ec", 256) + + // Should work with role-based signing. + resp, err = CBWrite(b, s, "sign/testing", map[string]interface{}{ + "csr": csrPem, + }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("sign/testing"), logical.UpdateOperation), resp, true) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "humanoid") + + // - Definitely will work with sign-verbatim. + resp, err = CBWrite(b, s, "sign-verbatim", map[string]interface{}{ + "csr": csrPem, + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "humanoid") +} + +// TestStandby_Operations test proper forwarding for PKI requests from a standby node to the +// active node within a cluster. +func TestStandby_Operations(t *testing.T) { + conf, opts := teststorage.ClusterSetup(&vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "pki": Factory, + }, + }, nil, teststorage.InmemBackendSetup) + cluster := vault.NewTestCluster(t, conf, opts) + cluster.Start() + defer cluster.Cleanup() + + testhelpers.WaitForActiveNodeAndStandbys(t, cluster) + standbyCores := testhelpers.DeriveStandbyCores(t, cluster) + require.Greater(t, len(standbyCores), 0, "Need at least one standby core.") + client := standbyCores[0].Client + + mountPKIEndpoint(t, client, "pki") + + _, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ + "key_type": "ec", + "common_name": "root-ca.com", + "ttl": "600h", + }) + require.NoError(t, err, "error setting up pki role: %v", err) + + _, err = client.Logical().Write("pki/roles/example", map[string]interface{}{ + "allowed_domains": "example.com", + "allow_subdomains": "true", + "no_store": "false", // make sure we store this cert + "ttl": "5h", + "key_type": "ec", + }) + require.NoError(t, err, "error setting up pki role: %v", err) + + resp, err := client.Logical().Write("pki/issue/example", map[string]interface{}{ + "common_name": "test.example.com", + }) + require.NoError(t, err, "error issuing certificate: %v", err) + require.NotNil(t, resp, "got nil response from issuing request") + serialOfCert := resp.Data["serial_number"].(string) + + resp, err = client.Logical().Write("pki/revoke", map[string]interface{}{ + "serial_number": serialOfCert, + }) + require.NoError(t, err, "error revoking certificate: %v", err) + require.NotNil(t, resp, "got nil response from revoke request") +} + +type pathAuthCheckerFunc func(t *testing.T, client *api.Client, path string, token string) + +func isPermDenied(err error) bool { + return err != nil && strings.Contains(err.Error(), "permission denied") +} + +func isUnsupportedPathOperation(err error) bool { + return err != nil && (strings.Contains(err.Error(), "unsupported path") || strings.Contains(err.Error(), "unsupported operation")) +} + +func isDeniedOp(err error) bool { + return isPermDenied(err) || isUnsupportedPathOperation(err) +} + +func pathShouldBeAuthed(t *testing.T, client *api.Client, path string, token string) { + client.SetToken("") + resp, err := client.Logical().ReadWithContext(ctx, path) + if err == nil || !isPermDenied(err) { + t.Fatalf("expected failure to read %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().ListWithContext(ctx, path) + if err == nil || !isPermDenied(err) { + t.Fatalf("expected failure to list %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) + if err == nil || !isPermDenied(err) { + t.Fatalf("expected failure to write %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().DeleteWithContext(ctx, path) + if err == nil || !isPermDenied(err) { + t.Fatalf("expected failure to delete %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) + if err == nil || !isPermDenied(err) { + t.Fatalf("expected failure to patch %v while unauthed: %v / %v", path, err, resp) + } +} + +func pathShouldBeUnauthedReadList(t *testing.T, client *api.Client, path string, token string) { + // Should be able to read both with and without a token. + client.SetToken("") + resp, err := client.Logical().ReadWithContext(ctx, path) + if err != nil && isPermDenied(err) { + // Read will sometimes return permission denied, when the handler + // does not support the given operation. Retry with the token. + client.SetToken(token) + resp2, err2 := client.Logical().ReadWithContext(ctx, path) + if err2 != nil && !isUnsupportedPathOperation(err2) { + t.Fatalf("unexpected failure to read %v while unauthed: %v / %v\nWhile authed: %v / %v", path, err, resp, err2, resp2) + } + client.SetToken("") + } + resp, err = client.Logical().ListWithContext(ctx, path) + if err != nil && isPermDenied(err) { + // List will sometimes return permission denied, when the handler + // does not support the given operation. Retry with the token. + client.SetToken(token) + resp2, err2 := client.Logical().ListWithContext(ctx, path) + if err2 != nil && !isUnsupportedPathOperation(err2) { + t.Fatalf("unexpected failure to list %v while unauthed: %v / %v\nWhile authed: %v / %v", path, err, resp, err2, resp2) + } + client.SetToken("") + } + + // These should all be denied. + resp, err = client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) + if err == nil || !isDeniedOp(err) { + if !strings.Contains(path, "ocsp") || !strings.Contains(err.Error(), "Code: 40") { + t.Fatalf("unexpected failure during write on read-only path %v while unauthed: %v / %v", path, err, resp) + } + } + resp, err = client.Logical().DeleteWithContext(ctx, path) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during delete on read-only path %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during patch on read-only path %v while unauthed: %v / %v", path, err, resp) + } + + // Retrying with token should allow read/list, but not modification still. + client.SetToken(token) + resp, err = client.Logical().ReadWithContext(ctx, path) + if err != nil && isPermDenied(err) { + t.Fatalf("unexpected failure to read %v while authed: %v / %v", path, err, resp) + } + resp, err = client.Logical().ListWithContext(ctx, path) + if err != nil && isPermDenied(err) { + t.Fatalf("unexpected failure to list %v while authed: %v / %v", path, err, resp) + } + + // Should all be denied. + resp, err = client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) + if err == nil || !isDeniedOp(err) { + if !strings.Contains(path, "ocsp") || !strings.Contains(err.Error(), "Code: 40") { + t.Fatalf("unexpected failure during write on read-only path %v while authed: %v / %v", path, err, resp) + } + } + resp, err = client.Logical().DeleteWithContext(ctx, path) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during delete on read-only path %v while authed: %v / %v", path, err, resp) + } + resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during patch on read-only path %v while authed: %v / %v", path, err, resp) + } +} + +func pathShouldBeUnauthedWriteOnly(t *testing.T, client *api.Client, path string, token string) { + client.SetToken("") + resp, err := client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) + if err != nil && isPermDenied(err) { + t.Fatalf("unexpected failure to write %v while unauthed: %v / %v", path, err, resp) + } + + // These should all be denied. However, on OSS, we might end up with + // a regular 404, which looks like err == resp == nil; hence we only + // fail when there's a non-nil response and/or a non-nil err. + resp, err = client.Logical().ReadWithContext(ctx, path) + if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { + t.Fatalf("unexpected failure during read on write-only path %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().ListWithContext(ctx, path) + if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { + t.Fatalf("unexpected failure during list on write-only path %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().DeleteWithContext(ctx, path) + if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { + t.Fatalf("unexpected failure during delete on write-only path %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) + if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { + t.Fatalf("unexpected failure during patch on write-only path %v while unauthed: %v / %v", path, err, resp) + } + + // Retrying with token should allow writing, but nothing else. + client.SetToken(token) + resp, err = client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) + if err != nil && isPermDenied(err) { + t.Fatalf("unexpected failure to write %v while unauthed: %v / %v", path, err, resp) + } + + // These should all be denied. + resp, err = client.Logical().ReadWithContext(ctx, path) + if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { + t.Fatalf("unexpected failure during read on write-only path %v while authed: %v / %v", path, err, resp) + } + resp, err = client.Logical().ListWithContext(ctx, path) + if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { + if resp != nil || err != nil { + t.Fatalf("unexpected failure during list on write-only path %v while authed: %v / %v", path, err, resp) + } + } + resp, err = client.Logical().DeleteWithContext(ctx, path) + if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { + t.Fatalf("unexpected failure during delete on write-only path %v while authed: %v / %v", path, err, resp) + } + resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) + if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { + t.Fatalf("unexpected failure during patch on write-only path %v while authed: %v / %v", path, err, resp) + } +} + +type pathAuthChecker int + +const ( + shouldBeAuthed pathAuthChecker = iota + shouldBeUnauthedReadList + shouldBeUnauthedWriteOnly +) + +var pathAuthChckerMap = map[pathAuthChecker]pathAuthCheckerFunc{ + shouldBeAuthed: pathShouldBeAuthed, + shouldBeUnauthedReadList: pathShouldBeUnauthedReadList, + shouldBeUnauthedWriteOnly: pathShouldBeUnauthedWriteOnly, +} + +func TestProperAuthing(t *testing.T) { + t.Parallel() + ctx := context.Background() + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "pki": Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + client := cluster.Cores[0].Client + token := client.Token() + + // Mount PKI. + err := client.Sys().MountWithContext(ctx, "pki", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "60h", + }, + }) + if err != nil { + t.Fatal(err) + } + + // Setup basic configuration. + _, err = client.Logical().WriteWithContext(ctx, "pki/root/generate/internal", map[string]interface{}{ + "ttl": "40h", + "common_name": "myvault.com", + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().WriteWithContext(ctx, "pki/roles/test", map[string]interface{}{ + "allow_localhost": true, + }) + if err != nil { + t.Fatal(err) + } + + resp, err := client.Logical().WriteWithContext(ctx, "pki/issue/test", map[string]interface{}{ + "common_name": "localhost", + }) + if err != nil || resp == nil { + t.Fatal(err) + } + serial := resp.Data["serial_number"].(string) + eabKid := "13b80844-e60d-42d2-b7e9-152a8e834b90" + paths := map[string]pathAuthChecker{ + "ca_chain": shouldBeUnauthedReadList, + "cert/ca_chain": shouldBeUnauthedReadList, + "ca": shouldBeUnauthedReadList, + "ca/pem": shouldBeUnauthedReadList, + "cert/" + serial: shouldBeUnauthedReadList, + "cert/" + serial + "/raw": shouldBeUnauthedReadList, + "cert/" + serial + "/raw/pem": shouldBeUnauthedReadList, + "cert/crl": shouldBeUnauthedReadList, + "cert/crl/raw": shouldBeUnauthedReadList, + "cert/crl/raw/pem": shouldBeUnauthedReadList, + "cert/delta-crl": shouldBeUnauthedReadList, + "cert/delta-crl/raw": shouldBeUnauthedReadList, + "cert/delta-crl/raw/pem": shouldBeUnauthedReadList, + "cert/unified-crl": shouldBeUnauthedReadList, + "cert/unified-crl/raw": shouldBeUnauthedReadList, + "cert/unified-crl/raw/pem": shouldBeUnauthedReadList, + "cert/unified-delta-crl": shouldBeUnauthedReadList, + "cert/unified-delta-crl/raw": shouldBeUnauthedReadList, + "cert/unified-delta-crl/raw/pem": shouldBeUnauthedReadList, + "certs": shouldBeAuthed, + "certs/revoked": shouldBeAuthed, + "certs/revocation-queue": shouldBeAuthed, + "certs/revocation-queue/": shouldBeAuthed, + "certs/unified-revoked": shouldBeAuthed, + "certs/unified-revoked/": shouldBeAuthed, + "config/acme": shouldBeAuthed, + "config/auto-tidy": shouldBeAuthed, + "config/ca": shouldBeAuthed, + "config/cluster": shouldBeAuthed, + "config/crl": shouldBeAuthed, + "config/issuers": shouldBeAuthed, + "config/keys": shouldBeAuthed, + "config/urls": shouldBeAuthed, + "crl": shouldBeUnauthedReadList, + "crl/pem": shouldBeUnauthedReadList, + "crl/delta": shouldBeUnauthedReadList, + "crl/delta/pem": shouldBeUnauthedReadList, + "crl/rotate": shouldBeAuthed, + "crl/rotate-delta": shouldBeAuthed, + "intermediate/cross-sign": shouldBeAuthed, + "intermediate/generate/exported": shouldBeAuthed, + "intermediate/generate/internal": shouldBeAuthed, + "intermediate/generate/existing": shouldBeAuthed, + "intermediate/generate/kms": shouldBeAuthed, + "intermediate/set-signed": shouldBeAuthed, + "issue/test": shouldBeAuthed, + "issuer/default": shouldBeAuthed, + "issuer/default/der": shouldBeUnauthedReadList, + "issuer/default/json": shouldBeUnauthedReadList, + "issuer/default/pem": shouldBeUnauthedReadList, + "issuer/default/crl": shouldBeUnauthedReadList, + "issuer/default/crl/pem": shouldBeUnauthedReadList, + "issuer/default/crl/der": shouldBeUnauthedReadList, + "issuer/default/crl/delta": shouldBeUnauthedReadList, + "issuer/default/crl/delta/der": shouldBeUnauthedReadList, + "issuer/default/crl/delta/pem": shouldBeUnauthedReadList, + "issuer/default/unified-crl": shouldBeUnauthedReadList, + "issuer/default/unified-crl/pem": shouldBeUnauthedReadList, + "issuer/default/unified-crl/der": shouldBeUnauthedReadList, + "issuer/default/unified-crl/delta": shouldBeUnauthedReadList, + "issuer/default/unified-crl/delta/der": shouldBeUnauthedReadList, + "issuer/default/unified-crl/delta/pem": shouldBeUnauthedReadList, + "issuer/default/issue/test": shouldBeAuthed, + "issuer/default/resign-crls": shouldBeAuthed, + "issuer/default/revoke": shouldBeAuthed, + "issuer/default/sign-intermediate": shouldBeAuthed, + "issuer/default/sign-revocation-list": shouldBeAuthed, + "issuer/default/sign-self-issued": shouldBeAuthed, + "issuer/default/sign-verbatim": shouldBeAuthed, + "issuer/default/sign-verbatim/test": shouldBeAuthed, + "issuer/default/sign/test": shouldBeAuthed, + "issuers": shouldBeUnauthedReadList, + "issuers/generate/intermediate/exported": shouldBeAuthed, + "issuers/generate/intermediate/internal": shouldBeAuthed, + "issuers/generate/intermediate/existing": shouldBeAuthed, + "issuers/generate/intermediate/kms": shouldBeAuthed, + "issuers/generate/root/exported": shouldBeAuthed, + "issuers/generate/root/internal": shouldBeAuthed, + "issuers/generate/root/existing": shouldBeAuthed, + "issuers/generate/root/kms": shouldBeAuthed, + "issuers/import/cert": shouldBeAuthed, + "issuers/import/bundle": shouldBeAuthed, + "key/default": shouldBeAuthed, + "keys": shouldBeAuthed, + "keys/generate/internal": shouldBeAuthed, + "keys/generate/exported": shouldBeAuthed, + "keys/generate/kms": shouldBeAuthed, + "keys/import": shouldBeAuthed, + "ocsp": shouldBeUnauthedWriteOnly, + "ocsp/dGVzdAo=": shouldBeUnauthedReadList, + "revoke": shouldBeAuthed, + "revoke-with-key": shouldBeAuthed, + "roles/test": shouldBeAuthed, + "roles": shouldBeAuthed, + "root": shouldBeAuthed, + "root/generate/exported": shouldBeAuthed, + "root/generate/internal": shouldBeAuthed, + "root/generate/existing": shouldBeAuthed, + "root/generate/kms": shouldBeAuthed, + "root/replace": shouldBeAuthed, + "root/rotate/internal": shouldBeAuthed, + "root/rotate/exported": shouldBeAuthed, + "root/rotate/existing": shouldBeAuthed, + "root/rotate/kms": shouldBeAuthed, + "root/sign-intermediate": shouldBeAuthed, + "root/sign-self-issued": shouldBeAuthed, + "sign-verbatim": shouldBeAuthed, + "sign-verbatim/test": shouldBeAuthed, + "sign/test": shouldBeAuthed, + "tidy": shouldBeAuthed, + "tidy-cancel": shouldBeAuthed, + "tidy-status": shouldBeAuthed, + "unified-crl": shouldBeUnauthedReadList, + "unified-crl/pem": shouldBeUnauthedReadList, + "unified-crl/delta": shouldBeUnauthedReadList, + "unified-crl/delta/pem": shouldBeUnauthedReadList, + "unified-ocsp": shouldBeUnauthedWriteOnly, + "unified-ocsp/dGVzdAo=": shouldBeUnauthedReadList, + "eab": shouldBeAuthed, + "eab/" + eabKid: shouldBeAuthed, + } + + // Add ACME based paths to the test suite + for _, acmePrefix := range []string{"", "issuer/default/", "roles/test/", "issuer/default/roles/test/"} { + paths[acmePrefix+"acme/directory"] = shouldBeUnauthedReadList + paths[acmePrefix+"acme/new-nonce"] = shouldBeUnauthedReadList + paths[acmePrefix+"acme/new-account"] = shouldBeUnauthedWriteOnly + paths[acmePrefix+"acme/revoke-cert"] = shouldBeUnauthedWriteOnly + paths[acmePrefix+"acme/new-order"] = shouldBeUnauthedWriteOnly + paths[acmePrefix+"acme/orders"] = shouldBeUnauthedWriteOnly + paths[acmePrefix+"acme/account/hrKmDYTvicHoHGVN2-3uzZV_BPGdE0W_dNaqYTtYqeo="] = shouldBeUnauthedWriteOnly + paths[acmePrefix+"acme/authorization/29da8c38-7a09-465e-b9a6-3d76802b1afd"] = shouldBeUnauthedWriteOnly + paths[acmePrefix+"acme/challenge/29da8c38-7a09-465e-b9a6-3d76802b1afd/http-01"] = shouldBeUnauthedWriteOnly + paths[acmePrefix+"acme/order/13b80844-e60d-42d2-b7e9-152a8e834b90"] = shouldBeUnauthedWriteOnly + paths[acmePrefix+"acme/order/13b80844-e60d-42d2-b7e9-152a8e834b90/finalize"] = shouldBeUnauthedWriteOnly + paths[acmePrefix+"acme/order/13b80844-e60d-42d2-b7e9-152a8e834b90/cert"] = shouldBeUnauthedWriteOnly + + // Make sure this new-eab path is auth'd + paths[acmePrefix+"acme/new-eab"] = shouldBeAuthed + } + + for path, checkerType := range paths { + checker := pathAuthChckerMap[checkerType] + checker(t, client, "pki/"+path, token) + } + + client.SetToken(token) + openAPIResp, err := client.Logical().ReadWithContext(ctx, "sys/internal/specs/openapi") + if err != nil { + t.Fatalf("failed to get openapi data: %v", err) + } + + validatedPath := false + for openapi_path, raw_data := range openAPIResp.Data["paths"].(map[string]interface{}) { + if !strings.HasPrefix(openapi_path, "/pki/") { + t.Logf("Skipping path: %v", openapi_path) + continue + } + + t.Logf("Validating path: %v", openapi_path) + validatedPath = true + // Substitute values in from our testing map. + raw_path := openapi_path[5:] + if strings.Contains(raw_path, "roles/") && strings.Contains(raw_path, "{name}") { + raw_path = strings.ReplaceAll(raw_path, "{name}", "test") + } + if strings.Contains(raw_path, "{role}") { + raw_path = strings.ReplaceAll(raw_path, "{role}", "test") + } + if strings.Contains(raw_path, "ocsp/") && strings.Contains(raw_path, "{req}") { + raw_path = strings.ReplaceAll(raw_path, "{req}", "dGVzdAo=") + } + if strings.Contains(raw_path, "{issuer_ref}") { + raw_path = strings.ReplaceAll(raw_path, "{issuer_ref}", "default") + } + if strings.Contains(raw_path, "{key_ref}") { + raw_path = strings.ReplaceAll(raw_path, "{key_ref}", "default") + } + if strings.Contains(raw_path, "{exported}") { + raw_path = strings.ReplaceAll(raw_path, "{exported}", "internal") + } + if strings.Contains(raw_path, "{serial}") { + raw_path = strings.ReplaceAll(raw_path, "{serial}", serial) + } + if strings.Contains(raw_path, "acme/account/") && strings.Contains(raw_path, "{kid}") { + raw_path = strings.ReplaceAll(raw_path, "{kid}", "hrKmDYTvicHoHGVN2-3uzZV_BPGdE0W_dNaqYTtYqeo=") + } + if strings.Contains(raw_path, "acme/") && strings.Contains(raw_path, "{auth_id}") { + raw_path = strings.ReplaceAll(raw_path, "{auth_id}", "29da8c38-7a09-465e-b9a6-3d76802b1afd") + } + if strings.Contains(raw_path, "acme/") && strings.Contains(raw_path, "{challenge_type}") { + raw_path = strings.ReplaceAll(raw_path, "{challenge_type}", "http-01") + } + if strings.Contains(raw_path, "acme/") && strings.Contains(raw_path, "{order_id}") { + raw_path = strings.ReplaceAll(raw_path, "{order_id}", "13b80844-e60d-42d2-b7e9-152a8e834b90") + } + if strings.Contains(raw_path, "eab") && strings.Contains(raw_path, "{key_id}") { + raw_path = strings.ReplaceAll(raw_path, "{key_id}", eabKid) + } + + handler, present := paths[raw_path] + if !present { + t.Fatalf("OpenAPI reports PKI mount contains %v->%v but was not tested to be authed or authed.", openapi_path, raw_path) + } + + openapi_data := raw_data.(map[string]interface{}) + hasList := false + rawGetData, hasGet := openapi_data["get"] + if hasGet { + getData := rawGetData.(map[string]interface{}) + getParams, paramsPresent := getData["parameters"].(map[string]interface{}) + if getParams != nil && paramsPresent { + if _, hasList = getParams["list"]; hasList { + // LIST is exclusive from GET on the same endpoint usually. + hasGet = false + } + } + } + _, hasPost := openapi_data["post"] + _, hasDelete := openapi_data["delete"] + + if handler == shouldBeUnauthedReadList { + if hasPost || hasDelete { + t.Fatalf("Unauthed read-only endpoints should not have POST/DELETE capabilities: %v->%v", openapi_path, raw_path) + } + } else if handler == shouldBeUnauthedWriteOnly { + if hasGet || hasList { + t.Fatalf("Unauthed write-only endpoints should not have GET/LIST capabilities: %v->%v", openapi_path, raw_path) + } + } + } + + if !validatedPath { + t.Fatalf("Expected to have validated at least one path.") + } +} + +func TestPatchIssuer(t *testing.T) { + t.Parallel() + + type TestCase struct { + Field string + Before interface{} + Patched interface{} + } + testCases := []TestCase{ + { + Field: "issuer_name", + Before: "root", + Patched: "root-new", + }, + { + Field: "leaf_not_after_behavior", + Before: "err", + Patched: "permit", + }, + { + Field: "usage", + Before: "crl-signing,issuing-certificates,ocsp-signing,read-only", + Patched: "issuing-certificates,read-only", + }, + { + Field: "revocation_signature_algorithm", + Before: "ECDSAWithSHA256", + Patched: "ECDSAWithSHA384", + }, + { + Field: "issuing_certificates", + Before: []string{"http://localhost/v1/pki-1/ca"}, + Patched: []string{"http://localhost/v1/pki/ca"}, + }, + { + Field: "crl_distribution_points", + Before: []string{"http://localhost/v1/pki-1/crl"}, + Patched: []string{"http://localhost/v1/pki/crl"}, + }, + { + Field: "ocsp_servers", + Before: []string{"http://localhost/v1/pki-1/ocsp"}, + Patched: []string{"http://localhost/v1/pki/ocsp"}, + }, + { + Field: "enable_aia_url_templating", + Before: false, + Patched: true, + }, + { + Field: "manual_chain", + Before: []string(nil), + Patched: []string{"self"}, + }, + } + + for index, testCase := range testCases { + t.Logf("index: %v / tc: %v", index, testCase) + + b, s := CreateBackendWithStorage(t) + + // 1. Setup root issuer. + resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "Vault Root CA", + "key_type": "ec", + "ttl": "7200h", + "issuer_name": "root", + }) + requireSuccessNonNilResponse(t, resp, err, "failed generating root issuer") + id := string(resp.Data["issuer_id"].(issuerID)) + + // 2. Enable Cluster paths + resp, err = CBWrite(b, s, "config/urls", map[string]interface{}{ + "path": "https://localhost/v1/pki", + "aia_path": "http://localhost/v1/pki", + }) + requireSuccessNonNilResponse(t, resp, err, "failed updating AIA config") + + // 3. Add AIA information + resp, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ + "issuing_certificates": "http://localhost/v1/pki-1/ca", + "crl_distribution_points": "http://localhost/v1/pki-1/crl", + "ocsp_servers": "http://localhost/v1/pki-1/ocsp", + }) + requireSuccessNonNilResponse(t, resp, err, "failed setting up issuer") + + // 4. Read the issuer before. + resp, err = CBRead(b, s, "issuer/default") + requireSuccessNonNilResponse(t, resp, err, "failed reading root issuer before") + require.Equal(t, testCase.Before, resp.Data[testCase.Field], "bad expectations") + + // 5. Perform modification. + resp, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ + testCase.Field: testCase.Patched, + }) + requireSuccessNonNilResponse(t, resp, err, "failed patching root issuer") + + if testCase.Field != "manual_chain" { + require.Equal(t, testCase.Patched, resp.Data[testCase.Field], "failed persisting value") + } else { + // self->id + require.Equal(t, []string{id}, resp.Data[testCase.Field], "failed persisting value") + } + + // 6. Ensure it stuck + resp, err = CBRead(b, s, "issuer/default") + requireSuccessNonNilResponse(t, resp, err, "failed reading root issuer after") + + if testCase.Field != "manual_chain" { + require.Equal(t, testCase.Patched, resp.Data[testCase.Field]) + } else { + // self->id + require.Equal(t, []string{id}, resp.Data[testCase.Field], "failed persisting value") + } + } +} + +func TestGenerateRootCAWithAIA(t *testing.T) { + // Generate a root CA at /pki-root + b_root, s_root := CreateBackendWithStorage(t) + + // Setup templated AIA information + _, err := CBWrite(b_root, s_root, "config/cluster", map[string]interface{}{ + "path": "https://localhost:8200", + "aia_path": "https://localhost:8200", + }) + require.NoError(t, err, "failed to write AIA settings") + + _, err = CBWrite(b_root, s_root, "config/urls", map[string]interface{}{ + "crl_distribution_points": "{{cluster_path}}/issuer/{{issuer_id}}/crl/der", + "issuing_certificates": "{{cluster_aia_path}}/issuer/{{issuer_id}}/der", + "ocsp_servers": "{{cluster_path}}/ocsp", + "enable_templating": true, + }) + require.NoError(t, err, "failed to write AIA settings") + + // Write a root issuer, this should succeed. + resp, err := CBWrite(b_root, s_root, "root/generate/exported", map[string]interface{}{ + "common_name": "root myvault.com", + "key_type": "ec", + }) + requireSuccessNonNilResponse(t, resp, err, "expected root generation to succeed") +} + +var ( + initTest sync.Once + rsaCAKey string + rsaCACert string + ecCAKey string + ecCACert string + edCAKey string + edCACert string +) diff --git a/builtin/logical/pki/ca_test.go b/builtin/logical/pki/ca_test.go new file mode 100644 index 0000000..7dbffef --- /dev/null +++ b/builtin/logical/pki/ca_test.go @@ -0,0 +1,712 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "context" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/json" + "encoding/pem" + "math/big" + mathrand "math/rand" + "strings" + "testing" + "time" + + "github.com/go-test/deep" + "github.com/hashicorp/vault/api" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +func TestBackend_CA_Steps(t *testing.T) { + t.Parallel() + var b *backend + + factory := func(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + be, err := Factory(ctx, conf) + if err == nil { + b = be.(*backend) + } + return be, err + } + + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "pki": factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + client := cluster.Cores[0].Client + + // Set RSA/EC CA certificates + var rsaCAKey, rsaCACert, ecCAKey, ecCACert, edCAKey, edCACert string + { + cak, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + panic(err) + } + marshaledKey, err := x509.MarshalECPrivateKey(cak) + if err != nil { + panic(err) + } + keyPEMBlock := &pem.Block{ + Type: "EC PRIVATE KEY", + Bytes: marshaledKey, + } + ecCAKey = strings.TrimSpace(string(pem.EncodeToMemory(keyPEMBlock))) + if err != nil { + panic(err) + } + subjKeyID, err := certutil.GetSubjKeyID(cak) + if err != nil { + panic(err) + } + caCertTemplate := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "root.localhost", + }, + SubjectKeyId: subjKeyID, + DNSNames: []string{"root.localhost"}, + KeyUsage: x509.KeyUsage(x509.KeyUsageCertSign | x509.KeyUsageCRLSign), + SerialNumber: big.NewInt(mathrand.Int63()), + NotAfter: time.Now().Add(262980 * time.Hour), + BasicConstraintsValid: true, + IsCA: true, + } + caBytes, err := x509.CreateCertificate(rand.Reader, caCertTemplate, caCertTemplate, cak.Public(), cak) + if err != nil { + panic(err) + } + caCertPEMBlock := &pem.Block{ + Type: "CERTIFICATE", + Bytes: caBytes, + } + ecCACert = strings.TrimSpace(string(pem.EncodeToMemory(caCertPEMBlock))) + + rak, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + panic(err) + } + marshaledKey = x509.MarshalPKCS1PrivateKey(rak) + keyPEMBlock = &pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: marshaledKey, + } + rsaCAKey = strings.TrimSpace(string(pem.EncodeToMemory(keyPEMBlock))) + if err != nil { + panic(err) + } + _, err = certutil.GetSubjKeyID(rak) + if err != nil { + panic(err) + } + caBytes, err = x509.CreateCertificate(rand.Reader, caCertTemplate, caCertTemplate, rak.Public(), rak) + if err != nil { + panic(err) + } + caCertPEMBlock = &pem.Block{ + Type: "CERTIFICATE", + Bytes: caBytes, + } + rsaCACert = strings.TrimSpace(string(pem.EncodeToMemory(caCertPEMBlock))) + + _, edk, err := ed25519.GenerateKey(rand.Reader) + if err != nil { + panic(err) + } + marshaledKey, err = x509.MarshalPKCS8PrivateKey(edk) + if err != nil { + panic(err) + } + keyPEMBlock = &pem.Block{ + Type: "PRIVATE KEY", + Bytes: marshaledKey, + } + edCAKey = strings.TrimSpace(string(pem.EncodeToMemory(keyPEMBlock))) + if err != nil { + panic(err) + } + _, err = certutil.GetSubjKeyID(edk) + if err != nil { + panic(err) + } + caBytes, err = x509.CreateCertificate(rand.Reader, caCertTemplate, caCertTemplate, edk.Public(), edk) + if err != nil { + panic(err) + } + caCertPEMBlock = &pem.Block{ + Type: "CERTIFICATE", + Bytes: caBytes, + } + edCACert = strings.TrimSpace(string(pem.EncodeToMemory(caCertPEMBlock))) + } + + // Setup backends + var rsaRoot, rsaInt, ecRoot, ecInt, edRoot, edInt *backend + { + if err := client.Sys().Mount("rsaroot", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "60h", + }, + }); err != nil { + t.Fatal(err) + } + rsaRoot = b + + if err := client.Sys().Mount("rsaint", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "60h", + }, + }); err != nil { + t.Fatal(err) + } + rsaInt = b + + if err := client.Sys().Mount("ecroot", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "60h", + }, + }); err != nil { + t.Fatal(err) + } + ecRoot = b + + if err := client.Sys().Mount("ecint", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "60h", + }, + }); err != nil { + t.Fatal(err) + } + ecInt = b + + if err := client.Sys().Mount("ed25519root", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "60h", + }, + }); err != nil { + t.Fatal(err) + } + edRoot = b + + if err := client.Sys().Mount("ed25519int", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "60h", + }, + }); err != nil { + t.Fatal(err) + } + edInt = b + } + + t.Run("teststeps", func(t *testing.T) { + t.Run("rsa", func(t *testing.T) { + t.Parallel() + subClient, err := client.Clone() + if err != nil { + t.Fatal(err) + } + subClient.SetToken(client.Token()) + runSteps(t, rsaRoot, rsaInt, subClient, "rsaroot/", "rsaint/", rsaCACert, rsaCAKey) + }) + t.Run("ec", func(t *testing.T) { + t.Parallel() + subClient, err := client.Clone() + if err != nil { + t.Fatal(err) + } + subClient.SetToken(client.Token()) + runSteps(t, ecRoot, ecInt, subClient, "ecroot/", "ecint/", ecCACert, ecCAKey) + }) + t.Run("ed25519", func(t *testing.T) { + t.Parallel() + subClient, err := client.Clone() + if err != nil { + t.Fatal(err) + } + subClient.SetToken(client.Token()) + runSteps(t, edRoot, edInt, subClient, "ed25519root/", "ed25519int/", edCACert, edCAKey) + }) + }) +} + +func runSteps(t *testing.T, rootB, intB *backend, client *api.Client, rootName, intName, caCert, caKey string) { + // Load CA cert/key in and ensure we can fetch it back in various formats, + // unauthenticated + { + // Attempt import but only provide one the cert; this should work. + { + _, err := client.Logical().Write(rootName+"config/ca", map[string]interface{}{ + "pem_bundle": caCert, + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + } + + // Same but with only the key + { + _, err := client.Logical().Write(rootName+"config/ca", map[string]interface{}{ + "pem_bundle": caKey, + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + } + + // Import entire CA bundle; this should work as well + { + _, err := client.Logical().Write(rootName+"config/ca", map[string]interface{}{ + "pem_bundle": strings.Join([]string{caKey, caCert}, "\n"), + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + } + + prevToken := client.Token() + client.SetToken("") + + // cert/ca and issuer/default/json path + for _, path := range []string{"cert/ca", "issuer/default/json"} { + resp, err := client.Logical().Read(rootName + path) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("nil response") + } + expected := caCert + if path == "issuer/default/json" { + // Preserves the new line. + expected += "\n" + _, present := resp.Data["issuer_id"] + if !present { + t.Fatalf("expected issuer/default/json to include issuer_id") + } + _, present = resp.Data["issuer_name"] + if !present { + t.Fatalf("expected issuer/default/json to include issuer_name") + } + } + if diff := deep.Equal(resp.Data["certificate"].(string), expected); diff != nil { + t.Fatal(diff) + } + } + + // ca/pem and issuer/default/pem path (raw string) + for _, path := range []string{"ca/pem", "issuer/default/pem"} { + req := &logical.Request{ + Path: path, + Operation: logical.ReadOperation, + Storage: rootB.storage, + } + resp, err := rootB.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("nil response") + } + expected := []byte(caCert) + if path == "issuer/default/pem" { + // Preserves the new line. + expected = []byte(caCert + "\n") + } + if diff := deep.Equal(resp.Data["http_raw_body"].([]byte), expected); diff != nil { + t.Fatal(diff) + } + if resp.Data["http_content_type"].(string) != "application/pem-certificate-chain" { + t.Fatal("wrong content type") + } + } + + // ca and issuer/default/der (raw DER bytes) + for _, path := range []string{"ca", "issuer/default/der"} { + req := &logical.Request{ + Path: path, + Operation: logical.ReadOperation, + Storage: rootB.storage, + } + resp, err := rootB.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("nil response") + } + rawBytes := resp.Data["http_raw_body"].([]byte) + pemBytes := strings.TrimSpace(string(pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: rawBytes, + }))) + if diff := deep.Equal(pemBytes, caCert); diff != nil { + t.Fatal(diff) + } + if resp.Data["http_content_type"].(string) != "application/pkix-cert" { + t.Fatal("wrong content type") + } + } + + client.SetToken(prevToken) + } + + // Configure an expiry on the CRL and verify what comes back + { + // Set CRL config + { + _, err := client.Logical().Write(rootName+"config/crl", map[string]interface{}{ + "expiry": "16h", + }) + if err != nil { + t.Fatal(err) + } + } + + // Verify it + { + resp, err := client.Logical().Read(rootName + "config/crl") + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("nil response") + } + if resp.Data["expiry"].(string) != "16h" { + t.Fatal("expected a 16 hour expiry") + } + } + } + + // Test generating a root, an intermediate, signing it, setting signed, and + // revoking it + + // We'll need this later + var intSerialNumber string + { + // First, delete the existing CA info + { + _, err := client.Logical().Delete(rootName + "root") + if err != nil { + t.Fatal(err) + } + } + + var rootPEM, rootKey, rootPEMBundle string + // Test exported root generation + { + resp, err := client.Logical().Write(rootName+"root/generate/exported", map[string]interface{}{ + "common_name": "Root Cert", + "ttl": "180h", + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("nil response") + } + rootPEM = resp.Data["certificate"].(string) + rootKey = resp.Data["private_key"].(string) + rootPEMBundle = strings.Join([]string{rootPEM, rootKey}, "\n") + // This is really here to keep the use checker happy + if rootPEMBundle == "" { + t.Fatal("bad root pem bundle") + } + } + + var intPEM, intCSR, intKey string + // Test exported intermediate CSR generation + { + resp, err := client.Logical().Write(intName+"intermediate/generate/exported", map[string]interface{}{ + "common_name": "intermediate.cert.com", + "ttl": "180h", + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("nil response") + } + intCSR = resp.Data["csr"].(string) + intKey = resp.Data["private_key"].(string) + // This is really here to keep the use checker happy + if intCSR == "" || intKey == "" { + t.Fatal("int csr or key empty") + } + } + + // Test signing + { + resp, err := client.Logical().Write(rootName+"root/sign-intermediate", map[string]interface{}{ + "common_name": "intermediate.cert.com", + "ttl": "10s", + "csr": intCSR, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("nil response") + } + intPEM = resp.Data["certificate"].(string) + intSerialNumber = resp.Data["serial_number"].(string) + } + + // Test setting signed + { + resp, err := client.Logical().Write(intName+"intermediate/set-signed", map[string]interface{}{ + "certificate": intPEM, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("nil response") + } + } + + // Verify we can find it via the root + { + resp, err := client.Logical().Read(rootName + "cert/" + intSerialNumber) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("nil response") + } + if resp.Data["revocation_time"].(json.Number).String() != "0" { + t.Fatal("expected a zero revocation time") + } + } + + // Revoke the intermediate + { + resp, err := client.Logical().Write(rootName+"revoke", map[string]interface{}{ + "serial_number": intSerialNumber, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("nil response") + } + } + } + + verifyRevocation := func(t *testing.T, serial string, shouldFind bool) { + t.Helper() + // Verify it is now revoked + { + resp, err := client.Logical().Read(rootName + "cert/" + intSerialNumber) + if err != nil { + t.Fatal(err) + } + switch shouldFind { + case true: + if resp == nil { + t.Fatal("nil response") + } + if resp.Data["revocation_time"].(json.Number).String() == "0" { + t.Fatal("expected a non-zero revocation time") + } + default: + if resp != nil { + t.Fatalf("expected nil response, got %#v", *resp) + } + } + } + + // Fetch the CRL and make sure it shows up + for path, derPemOrJSON := range map[string]int{ + "crl": 0, + "issuer/default/crl/der": 0, + "crl/pem": 1, + "issuer/default/crl/pem": 1, + "cert/crl": 2, + "issuer/default/crl": 3, + } { + req := &logical.Request{ + Path: path, + Operation: logical.ReadOperation, + Storage: rootB.storage, + } + resp, err := rootB.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("nil response") + } + + var crlBytes []byte + if derPemOrJSON == 2 { + // Old endpoint + crlBytes = []byte(resp.Data["certificate"].(string)) + } else if derPemOrJSON == 3 { + // New endpoint + crlBytes = []byte(resp.Data["crl"].(string)) + } else { + // DER or PEM + crlBytes = resp.Data["http_raw_body"].([]byte) + } + + if derPemOrJSON >= 1 { + // Do for both PEM and JSON endpoints + pemBlock, _ := pem.Decode(crlBytes) + crlBytes = pemBlock.Bytes + } + + certList, err := x509.ParseCRL(crlBytes) + if err != nil { + t.Fatal(err) + } + switch shouldFind { + case true: + revokedList := certList.TBSCertList.RevokedCertificates + if len(revokedList) != 1 { + t.Fatalf("bad length of revoked list: %d", len(revokedList)) + } + revokedString := certutil.GetHexFormatted(revokedList[0].SerialNumber.Bytes(), ":") + if revokedString != intSerialNumber { + t.Fatalf("bad revoked serial: %s", revokedString) + } + default: + revokedList := certList.TBSCertList.RevokedCertificates + if len(revokedList) != 0 { + t.Fatalf("bad length of revoked list: %d", len(revokedList)) + } + } + } + } + + verifyTidyStatus := func(expectedCertStoreDeleteCount int, expectedRevokedCertDeletedCount int) { + tidyStatus, err := client.Logical().Read(rootName + "tidy-status") + if err != nil { + t.Fatal(err) + } + + if tidyStatus.Data["state"] != "Finished" { + t.Fatalf("Expected tidy operation to be finished, but tidy-status reports its state is %v", tidyStatus.Data) + } + + var count int64 + if count, err = tidyStatus.Data["cert_store_deleted_count"].(json.Number).Int64(); err != nil { + t.Fatal(err) + } + if int64(expectedCertStoreDeleteCount) != count { + t.Fatalf("Expected %d for cert_store_deleted_count, but got %d", expectedCertStoreDeleteCount, count) + } + + if count, err = tidyStatus.Data["revoked_cert_deleted_count"].(json.Number).Int64(); err != nil { + t.Fatal(err) + } + if int64(expectedRevokedCertDeletedCount) != count { + t.Fatalf("Expected %d for revoked_cert_deleted_count, but got %d", expectedRevokedCertDeletedCount, count) + } + } + + // Validate current state of revoked certificates + verifyRevocation(t, intSerialNumber, true) + + // Give time for the safety buffer to pass before tidying + time.Sleep(10 * time.Second) + + // Test tidying + { + // Run with a high safety buffer, nothing should happen + { + resp, err := client.Logical().Write(rootName+"tidy", map[string]interface{}{ + "safety_buffer": "3h", + "tidy_cert_store": true, + "tidy_revoked_certs": true, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected warnings") + } + + // Wait a few seconds as it runs in a goroutine + time.Sleep(5 * time.Second) + + // Check to make sure we still find the cert and see it on the CRL + verifyRevocation(t, intSerialNumber, true) + + verifyTidyStatus(0, 0) + } + + // Run with both values set false, nothing should happen + { + resp, err := client.Logical().Write(rootName+"tidy", map[string]interface{}{ + "safety_buffer": "1s", + "tidy_cert_store": false, + "tidy_revoked_certs": false, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected warnings") + } + + // Wait a few seconds as it runs in a goroutine + time.Sleep(5 * time.Second) + + // Check to make sure we still find the cert and see it on the CRL + verifyRevocation(t, intSerialNumber, true) + + verifyTidyStatus(0, 0) + } + + // Run with a short safety buffer and both set to true, both should be cleared + { + resp, err := client.Logical().Write(rootName+"tidy", map[string]interface{}{ + "safety_buffer": "1s", + "tidy_cert_store": true, + "tidy_revoked_certs": true, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected warnings") + } + + // Wait a few seconds as it runs in a goroutine + time.Sleep(5 * time.Second) + + // Check to make sure we still find the cert and see it on the CRL + verifyRevocation(t, intSerialNumber, false) + + verifyTidyStatus(1, 1) + } + } +} diff --git a/builtin/logical/pki/ca_util.go b/builtin/logical/pki/ca_util.go new file mode 100644 index 0000000..85dc243 --- /dev/null +++ b/builtin/logical/pki/ca_util.go @@ -0,0 +1,324 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "errors" + "fmt" + "io" + "time" + + "golang.org/x/crypto/ed25519" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func getGenerationParams(sc *storageContext, data *framework.FieldData) (exported bool, format string, role *roleEntry, errorResp *logical.Response) { + exportedStr := data.Get("exported").(string) + switch exportedStr { + case "exported": + exported = true + case "internal": + case "existing": + case "kms": + default: + errorResp = logical.ErrorResponse( + `the "exported" path parameter must be "internal", "existing", exported" or "kms"`) + return + } + + format = getFormat(data) + if format == "" { + errorResp = logical.ErrorResponse( + `the "format" path parameter must be "pem", "der", or "pem_bundle"`) + return + } + + keyType, keyBits, err := sc.getKeyTypeAndBitsForRole(data) + if err != nil { + errorResp = logical.ErrorResponse(err.Error()) + return + } + + role = &roleEntry{ + TTL: time.Duration(data.Get("ttl").(int)) * time.Second, + KeyType: keyType, + KeyBits: keyBits, + SignatureBits: data.Get("signature_bits").(int), + UsePSS: data.Get("use_pss").(bool), + AllowLocalhost: true, + AllowAnyName: true, + AllowIPSANs: true, + AllowWildcardCertificates: new(bool), + EnforceHostnames: false, + AllowedURISANs: []string{"*"}, + AllowedOtherSANs: []string{"*"}, + AllowedSerialNumbers: []string{"*"}, + AllowedUserIDs: []string{"*"}, + OU: data.Get("ou").([]string), + Organization: data.Get("organization").([]string), + Country: data.Get("country").([]string), + Locality: data.Get("locality").([]string), + Province: data.Get("province").([]string), + StreetAddress: data.Get("street_address").([]string), + PostalCode: data.Get("postal_code").([]string), + NotBeforeDuration: time.Duration(data.Get("not_before_duration").(int)) * time.Second, + CNValidations: []string{"disabled"}, + } + *role.AllowWildcardCertificates = true + + if role.KeyBits, role.SignatureBits, err = certutil.ValidateDefaultOrValueKeyTypeSignatureLength(role.KeyType, role.KeyBits, role.SignatureBits); err != nil { + errorResp = logical.ErrorResponse(err.Error()) + } + + return +} + +func generateCABundle(sc *storageContext, input *inputBundle, data *certutil.CreationBundle, randomSource io.Reader) (*certutil.ParsedCertBundle, error) { + ctx := sc.Context + b := sc.Backend + + if kmsRequested(input) { + keyId, err := getManagedKeyId(input.apiData) + if err != nil { + return nil, err + } + return generateManagedKeyCABundle(ctx, b, keyId, data, randomSource) + } + + if existingKeyRequested(input) { + keyRef, err := getKeyRefWithErr(input.apiData) + if err != nil { + return nil, err + } + + keyEntry, err := sc.getExistingKeyFromRef(keyRef) + if err != nil { + return nil, err + } + + if keyEntry.isManagedPrivateKey() { + keyId, err := keyEntry.getManagedKeyUUID() + if err != nil { + return nil, err + } + return generateManagedKeyCABundle(ctx, b, keyId, data, randomSource) + } + + return certutil.CreateCertificateWithKeyGenerator(data, randomSource, existingKeyGeneratorFromBytes(keyEntry)) + } + + return certutil.CreateCertificateWithRandomSource(data, randomSource) +} + +func generateCSRBundle(sc *storageContext, input *inputBundle, data *certutil.CreationBundle, addBasicConstraints bool, randomSource io.Reader) (*certutil.ParsedCSRBundle, error) { + ctx := sc.Context + b := sc.Backend + + if kmsRequested(input) { + keyId, err := getManagedKeyId(input.apiData) + if err != nil { + return nil, err + } + + return generateManagedKeyCSRBundle(ctx, b, keyId, data, addBasicConstraints, randomSource) + } + + if existingKeyRequested(input) { + keyRef, err := getKeyRefWithErr(input.apiData) + if err != nil { + return nil, err + } + + key, err := sc.getExistingKeyFromRef(keyRef) + if err != nil { + return nil, err + } + + if key.isManagedPrivateKey() { + keyId, err := key.getManagedKeyUUID() + if err != nil { + return nil, err + } + return generateManagedKeyCSRBundle(ctx, b, keyId, data, addBasicConstraints, randomSource) + } + + return certutil.CreateCSRWithKeyGenerator(data, addBasicConstraints, randomSource, existingKeyGeneratorFromBytes(key)) + } + + return certutil.CreateCSRWithRandomSource(data, addBasicConstraints, randomSource) +} + +func parseCABundle(ctx context.Context, b *backend, bundle *certutil.CertBundle) (*certutil.ParsedCertBundle, error) { + if bundle.PrivateKeyType == certutil.ManagedPrivateKey { + return parseManagedKeyCABundle(ctx, b, bundle) + } + return bundle.ToParsedCertBundle() +} + +func (sc *storageContext) getKeyTypeAndBitsForRole(data *framework.FieldData) (string, int, error) { + exportedStr := data.Get("exported").(string) + var keyType string + var keyBits int + + switch exportedStr { + case "internal": + fallthrough + case "exported": + keyType = data.Get("key_type").(string) + keyBits = data.Get("key_bits").(int) + return keyType, keyBits, nil + } + + // existing and kms types don't support providing the key_type and key_bits args. + _, okKeyType := data.Raw["key_type"] + _, okKeyBits := data.Raw["key_bits"] + + if okKeyType || okKeyBits { + return "", 0, errors.New("invalid parameter for the kms/existing path parameter, key_type nor key_bits arguments can be set in this mode") + } + + var pubKey crypto.PublicKey + if kmsRequestedFromFieldData(data) { + keyId, err := getManagedKeyId(data) + if err != nil { + return "", 0, errors.New("unable to determine managed key id: " + err.Error()) + } + + pubKeyManagedKey, err := getManagedKeyPublicKey(sc.Context, sc.Backend, keyId) + if err != nil { + return "", 0, errors.New("failed to lookup public key from managed key: " + err.Error()) + } + pubKey = pubKeyManagedKey + } + + if existingKeyRequestedFromFieldData(data) { + existingPubKey, err := sc.getExistingPublicKey(data) + if err != nil { + return "", 0, errors.New("failed to lookup public key from existing key: " + err.Error()) + } + pubKey = existingPubKey + } + + privateKeyType, keyBits, err := getKeyTypeAndBitsFromPublicKeyForRole(pubKey) + return string(privateKeyType), keyBits, err +} + +func (sc *storageContext) getExistingPublicKey(data *framework.FieldData) (crypto.PublicKey, error) { + keyRef, err := getKeyRefWithErr(data) + if err != nil { + return nil, err + } + id, err := sc.resolveKeyReference(keyRef) + if err != nil { + return nil, err + } + key, err := sc.fetchKeyById(id) + if err != nil { + return nil, err + } + return getPublicKey(sc.Context, sc.Backend, key) +} + +func getKeyTypeAndBitsFromPublicKeyForRole(pubKey crypto.PublicKey) (certutil.PrivateKeyType, int, error) { + var keyType certutil.PrivateKeyType + var keyBits int + + switch pubKey.(type) { + case *rsa.PublicKey: + keyType = certutil.RSAPrivateKey + keyBits = certutil.GetPublicKeySize(pubKey) + case *ecdsa.PublicKey: + keyType = certutil.ECPrivateKey + case *ed25519.PublicKey: + keyType = certutil.Ed25519PrivateKey + default: + return certutil.UnknownPrivateKey, 0, fmt.Errorf("unsupported public key: %#v", pubKey) + } + return keyType, keyBits, nil +} + +func (sc *storageContext) getExistingKeyFromRef(keyRef string) (*keyEntry, error) { + keyId, err := sc.resolveKeyReference(keyRef) + if err != nil { + return nil, err + } + return sc.fetchKeyById(keyId) +} + +func existingKeyGeneratorFromBytes(key *keyEntry) certutil.KeyGenerator { + return func(_ string, _ int, container certutil.ParsedPrivateKeyContainer, _ io.Reader) error { + signer, _, pemBytes, err := getSignerFromKeyEntryBytes(key) + if err != nil { + return err + } + + container.SetParsedPrivateKey(signer, key.PrivateKeyType, pemBytes.Bytes) + return nil + } +} + +func buildSignVerbatimRoleWithNoData(role *roleEntry) *roleEntry { + data := &framework.FieldData{ + Raw: map[string]interface{}{}, + Schema: addSignVerbatimRoleFields(map[string]*framework.FieldSchema{}), + } + return buildSignVerbatimRole(data, role) +} + +func buildSignVerbatimRole(data *framework.FieldData, role *roleEntry) *roleEntry { + entry := &roleEntry{ + AllowLocalhost: true, + AllowAnyName: true, + AllowIPSANs: true, + AllowWildcardCertificates: new(bool), + EnforceHostnames: false, + KeyType: "any", + UseCSRCommonName: true, + UseCSRSANs: true, + AllowedOtherSANs: []string{"*"}, + AllowedSerialNumbers: []string{"*"}, + AllowedURISANs: []string{"*"}, + AllowedUserIDs: []string{"*"}, + CNValidations: []string{"disabled"}, + GenerateLease: new(bool), + // If adding new fields to be read, update the field list within addSignVerbatimRoleFields + KeyUsage: data.Get("key_usage").([]string), + ExtKeyUsage: data.Get("ext_key_usage").([]string), + ExtKeyUsageOIDs: data.Get("ext_key_usage_oids").([]string), + SignatureBits: data.Get("signature_bits").(int), + UsePSS: data.Get("use_pss").(bool), + } + *entry.AllowWildcardCertificates = true + *entry.GenerateLease = false + + if role != nil { + if role.TTL > 0 { + entry.TTL = role.TTL + } + if role.MaxTTL > 0 { + entry.MaxTTL = role.MaxTTL + } + if role.GenerateLease != nil { + *entry.GenerateLease = *role.GenerateLease + } + if role.NotBeforeDuration > 0 { + entry.NotBeforeDuration = role.NotBeforeDuration + } + entry.NoStore = role.NoStore + entry.Issuer = role.Issuer + } + + if len(entry.Issuer) == 0 { + entry.Issuer = defaultRef + } + + return entry +} diff --git a/builtin/logical/pki/cert_util.go b/builtin/logical/pki/cert_util.go new file mode 100644 index 0000000..e6f49f1 --- /dev/null +++ b/builtin/logical/pki/cert_util.go @@ -0,0 +1,1792 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/base64" + "encoding/hex" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" + "net" + "net/url" + "regexp" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/ryanuber/go-glob" + "golang.org/x/crypto/cryptobyte" + cbbasn1 "golang.org/x/crypto/cryptobyte/asn1" + "golang.org/x/net/idna" +) + +type inputBundle struct { + role *roleEntry + req *logical.Request + apiData *framework.FieldData +} + +var ( + // labelRegex is a single label from a valid domain name and was extracted + // from hostnameRegex below for use in leftWildLabelRegex, without any + // label separators (`.`). + labelRegex = `([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])` + + // A note on hostnameRegex: although we set the StrictDomainName option + // when doing the idna conversion, this appears to only affect output, not + // input, so it will allow e.g. host^123.example.com straight through. So + // we still need to use this to check the output. + hostnameRegex = regexp.MustCompile(`^(\*\.)?(` + labelRegex + `\.)*` + labelRegex + `\.?$`) + + // Left Wildcard Label Regex is equivalent to a single domain label + // component from hostnameRegex above, but with additional wildcard + // characters added. There are four possibilities here: + // + // 1. Entire label is a wildcard, + // 2. Wildcard exists at the start, + // 3. Wildcard exists at the end, + // 4. Wildcard exists in the middle. + allWildRegex = `\*` + startWildRegex = `\*` + labelRegex + endWildRegex = labelRegex + `\*` + middleWildRegex = labelRegex + `\*` + labelRegex + leftWildLabelRegex = regexp.MustCompile(`^(` + allWildRegex + `|` + startWildRegex + `|` + endWildRegex + `|` + middleWildRegex + `)$`) + + // OIDs for X.509 certificate extensions used below. + oidExtensionSubjectAltName = []int{2, 5, 29, 17} +) + +func getFormat(data *framework.FieldData) string { + format := data.Get("format").(string) + switch format { + case "pem": + case "der": + case "pem_bundle": + default: + format = "" + } + return format +} + +// fetchCAInfo will fetch the CA info, will return an error if no ca info exists, this does NOT support +// loading using the legacyBundleShimID and should be used with care. This should be called only once +// within the request path otherwise you run the risk of a race condition with the issuer migration on perf-secondaries. +func (sc *storageContext) fetchCAInfo(issuerRef string, usage issuerUsage) (*certutil.CAInfoBundle, error) { + bundle, _, err := sc.fetchCAInfoWithIssuer(issuerRef, usage) + return bundle, err +} + +func (sc *storageContext) fetchCAInfoWithIssuer(issuerRef string, usage issuerUsage) (*certutil.CAInfoBundle, issuerID, error) { + var issuerId issuerID + + if sc.Backend.useLegacyBundleCaStorage() { + // We have not completed the migration so attempt to load the bundle from the legacy location + sc.Backend.Logger().Info("Using legacy CA bundle as PKI migration has not completed.") + issuerId = legacyBundleShimID + } else { + var err error + issuerId, err = sc.resolveIssuerReference(issuerRef) + if err != nil { + // Usually a bad label from the user or mis-configured default. + return nil, IssuerRefNotFound, errutil.UserError{Err: err.Error()} + } + } + + bundle, err := sc.fetchCAInfoByIssuerId(issuerId, usage) + if err != nil { + return nil, IssuerRefNotFound, err + } + + return bundle, issuerId, nil +} + +// fetchCAInfoByIssuerId will fetch the CA info, will return an error if no ca info exists for the given issuerId. +// This does support the loading using the legacyBundleShimID +func (sc *storageContext) fetchCAInfoByIssuerId(issuerId issuerID, usage issuerUsage) (*certutil.CAInfoBundle, error) { + entry, bundle, err := sc.fetchCertBundleByIssuerId(issuerId, true) + if err != nil { + switch err.(type) { + case errutil.UserError: + return nil, err + case errutil.InternalError: + return nil, err + default: + return nil, errutil.InternalError{Err: fmt.Sprintf("error fetching CA info: %v", err)} + } + } + + if err := entry.EnsureUsage(usage); err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("error while attempting to use issuer %v: %v", issuerId, err)} + } + + parsedBundle, err := parseCABundle(sc.Context, sc.Backend, bundle) + if err != nil { + return nil, errutil.InternalError{Err: err.Error()} + } + + if parsedBundle.Certificate == nil { + return nil, errutil.InternalError{Err: "stored CA information not able to be parsed"} + } + if parsedBundle.PrivateKey == nil { + return nil, errutil.UserError{Err: fmt.Sprintf("unable to fetch corresponding key for issuer %v; unable to use this issuer for signing", issuerId)} + } + + caInfo := &certutil.CAInfoBundle{ + ParsedCertBundle: *parsedBundle, + URLs: nil, + LeafNotAfterBehavior: entry.LeafNotAfterBehavior, + RevocationSigAlg: entry.RevocationSigAlg, + } + + entries, err := entry.GetAIAURLs(sc) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to fetch AIA URL information: %v", err)} + } + caInfo.URLs = entries + + return caInfo, nil +} + +func fetchCertBySerialBigInt(sc *storageContext, prefix string, serial *big.Int) (*logical.StorageEntry, error) { + return fetchCertBySerial(sc, prefix, serialFromBigInt(serial)) +} + +// Allows fetching certificates from the backend; it handles the slightly +// separate pathing for CRL, and revoked certificates. +// +// Support for fetching CA certificates was removed, due to the new issuers +// changes. +func fetchCertBySerial(sc *storageContext, prefix, serial string) (*logical.StorageEntry, error) { + var path, legacyPath string + var err error + var certEntry *logical.StorageEntry + + hyphenSerial := normalizeSerial(serial) + colonSerial := strings.ReplaceAll(strings.ToLower(serial), "-", ":") + + switch { + // Revoked goes first as otherwise crl get hardcoded paths which fail if + // we actually want revocation info + case strings.HasPrefix(prefix, "revoked/"): + legacyPath = "revoked/" + colonSerial + path = "revoked/" + hyphenSerial + case serial == legacyCRLPath || serial == deltaCRLPath || serial == unifiedCRLPath || serial == unifiedDeltaCRLPath: + warnings, err := sc.Backend.crlBuilder.rebuildIfForced(sc) + if err != nil { + return nil, err + } + if len(warnings) > 0 { + msg := "During rebuild of CRL for cert fetch, got the following warnings:" + for index, warning := range warnings { + msg = fmt.Sprintf("%v\n %d. %v", msg, index+1, warning) + } + sc.Backend.Logger().Warn(msg) + } + + unified := serial == unifiedCRLPath || serial == unifiedDeltaCRLPath + path, err = sc.resolveIssuerCRLPath(defaultRef, unified) + if err != nil { + return nil, err + } + + if serial == deltaCRLPath || serial == unifiedDeltaCRLPath { + if sc.Backend.useLegacyBundleCaStorage() { + return nil, fmt.Errorf("refusing to serve delta CRL with legacy CA bundle") + } + + path += deltaCRLPathSuffix + } + default: + legacyPath = "certs/" + colonSerial + path = "certs/" + hyphenSerial + } + + certEntry, err = sc.Storage.Get(sc.Context, path) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("error fetching certificate %s: %s", serial, err)} + } + if certEntry != nil { + if certEntry.Value == nil || len(certEntry.Value) == 0 { + return nil, errutil.InternalError{Err: fmt.Sprintf("returned certificate bytes for serial %s were empty", serial)} + } + return certEntry, nil + } + + // If legacyPath is unset, it's going to be a CA or CRL; return immediately + if legacyPath == "" { + return nil, nil + } + + // Retrieve the old-style path. We disregard errors here because they + // always manifest on Windows, and thus the initial check for a revoked + // cert fails would return an error when the cert isn't revoked, preventing + // the happy path from working. + certEntry, _ = sc.Storage.Get(sc.Context, legacyPath) + if certEntry == nil { + return nil, nil + } + if certEntry.Value == nil || len(certEntry.Value) == 0 { + return nil, errutil.InternalError{Err: fmt.Sprintf("returned certificate bytes for serial %s were empty", serial)} + } + + // Update old-style paths to new-style paths + certEntry.Key = path + certsCounted := sc.Backend.certsCounted.Load() + if err = sc.Storage.Put(sc.Context, certEntry); err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("error saving certificate with serial %s to new location", serial)} + } + if err = sc.Storage.Delete(sc.Context, legacyPath); err != nil { + // If we fail here, we have an extra (copy) of a cert in storage, add to metrics: + switch { + case strings.HasPrefix(prefix, "revoked/"): + sc.Backend.ifCountEnabledIncrementTotalRevokedCertificatesCount(certsCounted, path) + default: + sc.Backend.ifCountEnabledIncrementTotalCertificatesCount(certsCounted, path) + } + return nil, errutil.InternalError{Err: fmt.Sprintf("error deleting certificate with serial %s from old location", serial)} + } + + return certEntry, nil +} + +// Given a URI SAN, verify that it is allowed. +func validateURISAN(b *backend, data *inputBundle, uri string) bool { + valid := false + for _, allowed := range data.role.AllowedURISANs { + if data.role.AllowedURISANsTemplate { + isTemplate, _ := framework.ValidateIdentityTemplate(allowed) + if isTemplate && data.req.EntityID != "" { + tmpAllowed, err := framework.PopulateIdentityTemplate(allowed, data.req.EntityID, b.System()) + if err != nil { + continue + } + allowed = tmpAllowed + } + } + validURI := glob.Glob(allowed, uri) + if validURI { + valid = true + break + } + } + return valid +} + +// Validates a given common name, ensuring it's either an email or a hostname +// after validating it according to the role parameters, or disables +// validation altogether. +func validateCommonName(b *backend, data *inputBundle, name string) string { + isDisabled := len(data.role.CNValidations) == 1 && data.role.CNValidations[0] == "disabled" + if isDisabled { + return "" + } + + if validateNames(b, data, []string{name}) != "" { + return name + } + + // Validations weren't disabled, but the role lacked CN Validations, so + // don't restrict types. This case is hit in certain existing tests. + if len(data.role.CNValidations) == 0 { + return "" + } + + // If there's an at in the data, ensure email type validation is allowed. + // Otherwise, ensure hostname is allowed. + if strings.Contains(name, "@") { + var allowsEmails bool + for _, validation := range data.role.CNValidations { + if validation == "email" { + allowsEmails = true + break + } + } + if !allowsEmails { + return name + } + } else { + var allowsHostnames bool + for _, validation := range data.role.CNValidations { + if validation == "hostname" { + allowsHostnames = true + break + } + } + if !allowsHostnames { + return name + } + } + + return "" +} + +func isWildcardDomain(name string) bool { + // Per RFC 6125 Section 6.4.3, and explicitly contradicting the earlier + // RFC 2818 which no modern client will validate against, there are two + // main types of wildcards, each with a single wildcard specifier (`*`, + // functionally different from the `*` used as a glob from the + // AllowGlobDomains parsing path) in the left-most label: + // + // 1. Entire label is a single wildcard character (most common and + // well-supported), + // 2. Part of the label contains a single wildcard character (e.g. per + // RFC 6125: baz*.example.net, *baz.example.net, or b*z.example.net). + // + // We permit issuance of both but not the older RFC 2818 style under + // the new AllowWildcardCertificates option. However, anything with a + // glob character is technically a wildcard, though not a valid one. + + return strings.Contains(name, "*") +} + +func validateWildcardDomain(name string) (string, string, error) { + // See note in isWildcardDomain(...) about the definition of a wildcard + // domain. + var wildcardLabel string + var reducedName string + + if strings.Count(name, "*") > 1 { + // As mentioned above, only one wildcard character is permitted + // under RFC 6125 semantics. + return wildcardLabel, reducedName, fmt.Errorf("expected only one wildcard identifier in the given domain name") + } + + // Split the Common Name into two parts: a left-most label and the + // remaining segments (if present). + splitLabels := strings.SplitN(name, ".", 2) + if len(splitLabels) != 2 { + // We've been given a single-part domain name that consists + // entirely of a wildcard. This is a little tricky to handle, + // but EnforceHostnames validates both the wildcard-containing + // label and the reduced name, but _only_ the latter if it is + // non-empty. This allows us to still validate the only label + // component matches hostname expectations still. + wildcardLabel = splitLabels[0] + reducedName = "" + } else { + // We have a (at least) two label domain name. But before we can + // update our names, we need to validate the wildcard ended up + // in the segment we expected it to. While this is (kinda) + // validated under EnforceHostnames's leftWildLabelRegex, we + // still need to validate it in the non-enforced mode. + // + // By validated assumption above, we know there's strictly one + // wildcard in this domain so we only need to check the wildcard + // label or the reduced name (as one is equivalent to the other). + // Because we later assume reducedName _lacks_ wildcard segments, + // we validate that. + wildcardLabel = splitLabels[0] + reducedName = splitLabels[1] + if strings.Contains(reducedName, "*") { + return wildcardLabel, reducedName, fmt.Errorf("expected wildcard to only be present in left-most domain label") + } + } + + return wildcardLabel, reducedName, nil +} + +// Given a set of requested names for a certificate, verifies that all of them +// match the various toggles set in the role for controlling issuance. +// If one does not pass, it is returned in the string argument. +func validateNames(b *backend, data *inputBundle, names []string) string { + for _, name := range names { + // Previously, reducedName was called sanitizedName but this made + // little sense under the previous interpretation of wildcards, + // leading to two bugs in this implementation. We presently call it + // "reduced" to indicate that it is still untrusted input (potentially + // different from the bare Common Name entry we're validating), it + // might have been modified such as by the removal of wildcard labels + // or the email prefix. + reducedName := name + emailDomain := reducedName + wildcardLabel := "" + isEmail := false + isWildcard := false + + // If it has an @, assume it is an email address and separate out the + // user from the hostname portion so that we can act on the hostname. + // Note that this matches behavior from the alt_names parameter. If it + // ends up being problematic for users, I guess that could be separated + // into dns_names and email_names in the future to be explicit, but I + // don't think this is likely. + if strings.Contains(reducedName, "@") { + splitEmail := strings.Split(reducedName, "@") + if len(splitEmail) != 2 { + return name + } + reducedName = splitEmail[1] + emailDomain = splitEmail[1] + isEmail = true + } + + if isWildcardDomain(reducedName) { + // Regardless of later rejections below, this common name contains + // a wildcard character and is thus technically a wildcard name. + isWildcard = true + + // Additionally, if AllowWildcardCertificates is explicitly + // forbidden, it takes precedence over AllowAnyName, thus we should + // reject the name now. + // + // We expect the role to have been correctly migrated but guard for + // safety. + if data.role.AllowWildcardCertificates != nil && !*data.role.AllowWildcardCertificates { + return name + } + + // Check that this domain is well-formatted per RFC 6125. + var err error + wildcardLabel, reducedName, err = validateWildcardDomain(reducedName) + if err != nil { + return name + } + } + + // Email addresses using wildcard domain names do not make sense + // in a Common Name field. + if isEmail && isWildcard { + return name + } + + // AllowAnyName is checked after this because EnforceHostnames still + // applies when allowing any name. Also, we check the reduced name to + // ensure that we are not either checking a full email address or a + // wildcard prefix. + if data.role.EnforceHostnames { + if reducedName != "" { + // See note above about splitLabels having only one segment + // and setting reducedName to the empty string. + p := idna.New( + idna.StrictDomainName(true), + idna.VerifyDNSLength(true), + ) + converted, err := p.ToASCII(reducedName) + if err != nil { + return name + } + if !hostnameRegex.MatchString(converted) { + return name + } + } + + // When a wildcard is specified, we additionally need to validate + // the label with the wildcard is correctly formed. + if isWildcard && !leftWildLabelRegex.MatchString(wildcardLabel) { + return name + } + } + + // Self-explanatory, but validations from EnforceHostnames and + // AllowWildcardCertificates take precedence. + if data.role.AllowAnyName { + continue + } + + // The following blocks all work the same basic way: + // 1) If a role allows a certain class of base (localhost, token + // display name, role-configured domains), perform further tests + // + // 2) If there is a perfect match on either the sanitized name or it's an + // email address with a perfect match on the hostname portion, allow it + // + // 3) If subdomains are allowed, we check based on the sanitized name; + // note that if not a wildcard, will be equivalent to the email domain + // for email checks, and we already checked above for both a wildcard + // and email address being present in the same name + // 3a) First we check for a non-wildcard subdomain, as in . + // 3b) Then we check if it's a wildcard and the base domain is a match + // + // Variances are noted in-line + + if data.role.AllowLocalhost { + if reducedName == "localhost" || + reducedName == "localdomain" || + (isEmail && emailDomain == "localhost") || + (isEmail && emailDomain == "localdomain") { + continue + } + + if data.role.AllowSubdomains { + // It is possible, if unlikely, to have a subdomain of "localhost" + if strings.HasSuffix(reducedName, ".localhost") || + (isWildcard && reducedName == "localhost") { + continue + } + + // A subdomain of "localdomain" is also not entirely uncommon + if strings.HasSuffix(reducedName, ".localdomain") || + (isWildcard && reducedName == "localdomain") { + continue + } + } + } + + if data.role.AllowTokenDisplayName { + if name == data.req.DisplayName { + continue + } + + if data.role.AllowSubdomains { + if isEmail { + // If it's an email address, we need to parse the token + // display name in order to do a proper comparison of the + // subdomain + if strings.Contains(data.req.DisplayName, "@") { + splitDisplay := strings.Split(data.req.DisplayName, "@") + if len(splitDisplay) == 2 { + // Compare the sanitized name against the hostname + // portion of the email address in the broken + // display name + if strings.HasSuffix(reducedName, "."+splitDisplay[1]) { + continue + } + } + } + } + + if strings.HasSuffix(reducedName, "."+data.req.DisplayName) || + (isWildcard && reducedName == data.req.DisplayName) { + continue + } + } + } + + if len(data.role.AllowedDomains) > 0 { + valid := false + for _, currDomain := range data.role.AllowedDomains { + // If there is, say, a trailing comma, ignore it + if currDomain == "" { + continue + } + + if data.role.AllowedDomainsTemplate { + isTemplate, _ := framework.ValidateIdentityTemplate(currDomain) + if isTemplate && data.req.EntityID != "" { + tmpCurrDomain, err := framework.PopulateIdentityTemplate(currDomain, data.req.EntityID, b.System()) + if err != nil { + continue + } + + currDomain = tmpCurrDomain + } + } + + // First, allow an exact match of the base domain if that role flag + // is enabled + if data.role.AllowBareDomains && + (strings.EqualFold(name, currDomain) || + (isEmail && strings.EqualFold(emailDomain, currDomain))) { + valid = true + break + } + + if data.role.AllowSubdomains { + if strings.HasSuffix(reducedName, "."+currDomain) || + (isWildcard && strings.EqualFold(reducedName, currDomain)) { + valid = true + break + } + } + + if data.role.AllowGlobDomains && + strings.Contains(currDomain, "*") && + glob.Glob(currDomain, name) { + valid = true + break + } + } + + if valid { + continue + } + } + + return name + } + + return "" +} + +// validateOtherSANs checks if the values requested are allowed. If an OID +// isn't allowed, it will be returned as the first string. If a value isn't +// allowed, it will be returned as the second string. Empty strings + error +// means everything is okay. +func validateOtherSANs(data *inputBundle, requested map[string][]string) (string, string, error) { + if len(data.role.AllowedOtherSANs) == 1 && data.role.AllowedOtherSANs[0] == "*" { + // Anything is allowed + return "", "", nil + } + + allowed, err := parseOtherSANs(data.role.AllowedOtherSANs) + if err != nil { + return "", "", fmt.Errorf("error parsing role's allowed SANs: %w", err) + } + for oid, names := range requested { + for _, name := range names { + allowedNames, ok := allowed[oid] + if !ok { + return oid, "", nil + } + + valid := false + for _, allowedName := range allowedNames { + if glob.Glob(allowedName, name) { + valid = true + break + } + } + + if !valid { + return oid, name, nil + } + } + } + + return "", "", nil +} + +func parseOtherSANs(others []string) (map[string][]string, error) { + result := map[string][]string{} + for _, other := range others { + splitOther := strings.SplitN(other, ";", 2) + if len(splitOther) != 2 { + return nil, fmt.Errorf("expected a semicolon in other SAN %q", other) + } + splitType := strings.SplitN(splitOther[1], ":", 2) + if len(splitType) != 2 { + return nil, fmt.Errorf("expected a colon in other SAN %q", other) + } + switch { + case strings.EqualFold(splitType[0], "utf8"): + case strings.EqualFold(splitType[0], "utf-8"): + default: + return nil, fmt.Errorf("only utf8 other SANs are supported; found non-supported type in other SAN %q", other) + } + result[splitOther[0]] = append(result[splitOther[0]], splitType[1]) + } + + return result, nil +} + +// Returns bool stating whether the given UserId is Valid +func validateUserId(data *inputBundle, userId string) bool { + allowedList := data.role.AllowedUserIDs + + if len(allowedList) == 0 { + // Nothing is allowed. + return false + } + + if strutil.StrListContainsCaseInsensitive(allowedList, userId) { + return true + } + + for _, rolePattern := range allowedList { + if rolePattern == "" { + continue + } + + if strings.Contains(rolePattern, "*") && glob.Glob(rolePattern, userId) { + return true + } + } + + // No matches. + return false +} + +func validateSerialNumber(data *inputBundle, serialNumber string) string { + valid := false + if len(data.role.AllowedSerialNumbers) > 0 { + for _, currSerialNumber := range data.role.AllowedSerialNumbers { + if currSerialNumber == "" { + continue + } + + if (strings.Contains(currSerialNumber, "*") && + glob.Glob(currSerialNumber, serialNumber)) || + currSerialNumber == serialNumber { + valid = true + break + } + } + } + if !valid { + return serialNumber + } else { + return "" + } +} + +func generateCert(sc *storageContext, + input *inputBundle, + caSign *certutil.CAInfoBundle, + isCA bool, + randomSource io.Reader) (*certutil.ParsedCertBundle, []string, error, +) { + ctx := sc.Context + b := sc.Backend + + if input.role == nil { + return nil, nil, errutil.InternalError{Err: "no role found in data bundle"} + } + + if input.role.KeyType == "rsa" && input.role.KeyBits < 2048 { + return nil, nil, errutil.UserError{Err: "RSA keys < 2048 bits are unsafe and not supported"} + } + + data, warnings, err := generateCreationBundle(b, input, caSign, nil) + if err != nil { + return nil, nil, err + } + if data.Params == nil { + return nil, nil, errutil.InternalError{Err: "nil parameters received from parameter bundle generation"} + } + + if isCA { + data.Params.IsCA = isCA + data.Params.PermittedDNSDomains = input.apiData.Get("permitted_dns_domains").([]string) + + if data.SigningBundle == nil { + // Generating a self-signed root certificate. Since we have no + // issuer entry yet, we default to the global URLs. + entries, err := getGlobalAIAURLs(ctx, sc.Storage) + if err != nil { + return nil, nil, errutil.InternalError{Err: fmt.Sprintf("unable to fetch AIA URL information: %v", err)} + } + + uris, err := entries.toURLEntries(sc, issuerID("")) + if err != nil { + // When generating root issuers, don't err on missing issuer + // ID; there is little value in including AIA info on a root, + // as this info would point back to itself; though RFC 5280 is + // a touch vague on this point, this seems to be consensus + // from public CAs such as DigiCert Global Root G3, ISRG Root + // X1, and others. + // + // This is a UX bug if we do err here, as it requires AIA + // templating to not include issuer id (a best practice for + // child certs issued from root and intermediate mounts + // however), and setting this before root generation (or, on + // root renewal) could cause problems. + if _, nonEmptyIssuerErr := entries.toURLEntries(sc, issuerID("empty-issuer-id")); nonEmptyIssuerErr != nil { + return nil, nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse AIA URL information: %v\nUsing templated AIA URL's {{issuer_id}} field when generating root certificates is not supported.", err)} + } + + uris = &certutil.URLEntries{} + + msg := "When generating root CA, found global AIA configuration with issuer_id template unsuitable for root generation. This AIA configuration has been ignored. To include AIA on this root CA, set the global AIA configuration to not include issuer_id and instead to refer to a static issuer name." + warnings = append(warnings, msg) + } + + data.Params.URLs = uris + + if input.role.MaxPathLength == nil { + data.Params.MaxPathLength = -1 + } else { + data.Params.MaxPathLength = *input.role.MaxPathLength + } + } + } + + parsedBundle, err := generateCABundle(sc, input, data, randomSource) + if err != nil { + return nil, nil, err + } + + return parsedBundle, warnings, nil +} + +// N.B.: This is only meant to be used for generating intermediate CAs. +// It skips some sanity checks. +func generateIntermediateCSR(sc *storageContext, input *inputBundle, randomSource io.Reader) (*certutil.ParsedCSRBundle, []string, error) { + b := sc.Backend + + creation, warnings, err := generateCreationBundle(b, input, nil, nil) + if err != nil { + return nil, nil, err + } + if creation.Params == nil { + return nil, nil, errutil.InternalError{Err: "nil parameters received from parameter bundle generation"} + } + + addBasicConstraints := input.apiData != nil && input.apiData.Get("add_basic_constraints").(bool) + parsedBundle, err := generateCSRBundle(sc, input, creation, addBasicConstraints, randomSource) + if err != nil { + return nil, nil, err + } + + return parsedBundle, warnings, nil +} + +func signCert(b *backend, + data *inputBundle, + caSign *certutil.CAInfoBundle, + isCA bool, + useCSRValues bool) (*certutil.ParsedCertBundle, []string, error, +) { + if data.role == nil { + return nil, nil, errutil.InternalError{Err: "no role found in data bundle"} + } + + csrString := data.apiData.Get("csr").(string) + if csrString == "" { + return nil, nil, errutil.UserError{Err: "\"csr\" is empty"} + } + + pemBlock, _ := pem.Decode([]byte(csrString)) + if pemBlock == nil { + return nil, nil, errutil.UserError{Err: "csr contains no data"} + } + csr, err := x509.ParseCertificateRequest(pemBlock.Bytes) + if err != nil { + return nil, nil, errutil.UserError{Err: fmt.Sprintf("certificate request could not be parsed: %v", err)} + } + + if csr.PublicKeyAlgorithm == x509.UnknownPublicKeyAlgorithm || csr.PublicKey == nil { + return nil, nil, errutil.UserError{Err: "Refusing to sign CSR with empty PublicKey. This usually means the SubjectPublicKeyInfo field has an OID not recognized by Go, such as 1.2.840.113549.1.1.10 for rsaPSS."} + } + + // This switch validates that the CSR key type matches the role and sets + // the value in the actualKeyType/actualKeyBits values. + actualKeyType := "" + actualKeyBits := 0 + + switch data.role.KeyType { + case "rsa": + // Verify that the key matches the role type + if csr.PublicKeyAlgorithm != x509.RSA { + return nil, nil, errutil.UserError{Err: fmt.Sprintf( + "role requires keys of type %s", + data.role.KeyType)} + } + + pubKey, ok := csr.PublicKey.(*rsa.PublicKey) + if !ok { + return nil, nil, errutil.UserError{Err: "could not parse CSR's public key"} + } + + actualKeyType = "rsa" + actualKeyBits = pubKey.N.BitLen() + case "ec": + // Verify that the key matches the role type + if csr.PublicKeyAlgorithm != x509.ECDSA { + return nil, nil, errutil.UserError{Err: fmt.Sprintf( + "role requires keys of type %s", + data.role.KeyType)} + } + pubKey, ok := csr.PublicKey.(*ecdsa.PublicKey) + if !ok { + return nil, nil, errutil.UserError{Err: "could not parse CSR's public key"} + } + + actualKeyType = "ec" + actualKeyBits = pubKey.Params().BitSize + case "ed25519": + // Verify that the key matches the role type + if csr.PublicKeyAlgorithm != x509.Ed25519 { + return nil, nil, errutil.UserError{Err: fmt.Sprintf( + "role requires keys of type %s", + data.role.KeyType)} + } + + _, ok := csr.PublicKey.(ed25519.PublicKey) + if !ok { + return nil, nil, errutil.UserError{Err: "could not parse CSR's public key"} + } + + actualKeyType = "ed25519" + actualKeyBits = 0 + case "any": + // We need to compute the actual key type and key bits, to correctly + // validate minimums and SignatureBits below. + switch csr.PublicKeyAlgorithm { + case x509.RSA: + pubKey, ok := csr.PublicKey.(*rsa.PublicKey) + if !ok { + return nil, nil, errutil.UserError{Err: "could not parse CSR's public key"} + } + if pubKey.N.BitLen() < 2048 { + return nil, nil, errutil.UserError{Err: "RSA keys < 2048 bits are unsafe and not supported"} + } + + actualKeyType = "rsa" + actualKeyBits = pubKey.N.BitLen() + case x509.ECDSA: + pubKey, ok := csr.PublicKey.(*ecdsa.PublicKey) + if !ok { + return nil, nil, errutil.UserError{Err: "could not parse CSR's public key"} + } + + actualKeyType = "ec" + actualKeyBits = pubKey.Params().BitSize + case x509.Ed25519: + _, ok := csr.PublicKey.(ed25519.PublicKey) + if !ok { + return nil, nil, errutil.UserError{Err: "could not parse CSR's public key"} + } + + actualKeyType = "ed25519" + actualKeyBits = 0 + default: + return nil, nil, errutil.UserError{Err: "Unknown key type in CSR: " + csr.PublicKeyAlgorithm.String()} + } + default: + return nil, nil, errutil.InternalError{Err: fmt.Sprintf("unsupported key type value: %s", data.role.KeyType)} + } + + // Before validating key lengths, update our KeyBits/SignatureBits based + // on the actual CSR key type. + if data.role.KeyType == "any" { + // We update the value of KeyBits and SignatureBits here (from the + // role), using the specified key type. This allows us to convert + // the default value (0) for SignatureBits and KeyBits to a + // meaningful value. + // + // We ignore the role's original KeyBits value if the KeyType is any + // as legacy (pre-1.10) roles had default values that made sense only + // for RSA keys (key_bits=2048) and the older code paths ignored the role value + // set for KeyBits when KeyType was set to any. This also enforces the + // docs saying when key_type=any, we only enforce our specified minimums + // for signing operations + if data.role.KeyBits, data.role.SignatureBits, err = certutil.ValidateDefaultOrValueKeyTypeSignatureLength( + actualKeyType, 0, data.role.SignatureBits); err != nil { + return nil, nil, errutil.InternalError{Err: fmt.Sprintf("unknown internal error updating default values: %v", err)} + } + + // We're using the KeyBits field as a minimum value below, and P-224 is safe + // and a previously allowed value. However, the above call defaults + // to P-256 as that's a saner default than P-224 (w.r.t. generation), so + // override it here to allow 224 as the smallest size we permit. + if actualKeyType == "ec" { + data.role.KeyBits = 224 + } + } + + // At this point, data.role.KeyBits and data.role.SignatureBits should both + // be non-zero, for RSA and ECDSA keys. Validate the actualKeyBits based on + // the role's values. If the KeyType was any, and KeyBits was set to 0, + // KeyBits should be updated to 2048 unless some other value was chosen + // explicitly. + // + // This validation needs to occur regardless of the role's key type, so + // that we always validate both RSA and ECDSA key sizes. + if actualKeyType == "rsa" { + if actualKeyBits < data.role.KeyBits { + return nil, nil, errutil.UserError{Err: fmt.Sprintf( + "role requires a minimum of a %d-bit key, but CSR's key is %d bits", + data.role.KeyBits, actualKeyBits)} + } + + if actualKeyBits < 2048 { + return nil, nil, errutil.UserError{Err: fmt.Sprintf( + "Vault requires a minimum of a 2048-bit key, but CSR's key is %d bits", + actualKeyBits)} + } + } else if actualKeyType == "ec" { + if actualKeyBits < data.role.KeyBits { + return nil, nil, errutil.UserError{Err: fmt.Sprintf( + "role requires a minimum of a %d-bit key, but CSR's key is %d bits", + data.role.KeyBits, + actualKeyBits)} + } + } + + creation, warnings, err := generateCreationBundle(b, data, caSign, csr) + if err != nil { + return nil, nil, err + } + if creation.Params == nil { + return nil, nil, errutil.InternalError{Err: "nil parameters received from parameter bundle generation"} + } + + creation.Params.IsCA = isCA + creation.Params.UseCSRValues = useCSRValues + + if isCA { + creation.Params.PermittedDNSDomains = data.apiData.Get("permitted_dns_domains").([]string) + } else { + for _, ext := range csr.Extensions { + if ext.Id.Equal(certutil.ExtensionBasicConstraintsOID) { + warnings = append(warnings, "specified CSR contained a Basic Constraints extension that was ignored during issuance") + } + } + } + + parsedBundle, err := certutil.SignCertificate(creation) + if err != nil { + return nil, nil, err + } + + return parsedBundle, warnings, nil +} + +// otherNameRaw describes a name related to a certificate which is not in one +// of the standard name formats. RFC 5280, 4.2.1.6: +// +// OtherName ::= SEQUENCE { +// type-id OBJECT IDENTIFIER, +// value [0] EXPLICIT ANY DEFINED BY type-id } +type otherNameRaw struct { + TypeID asn1.ObjectIdentifier + Value asn1.RawValue +} + +type otherNameUtf8 struct { + oid string + value string +} + +// ExtractUTF8String returns the UTF8 string contained in the Value, or an error +// if none is present. +func (oraw *otherNameRaw) extractUTF8String() (*otherNameUtf8, error) { + svalue := cryptobyte.String(oraw.Value.Bytes) + var outTag cbbasn1.Tag + var val cryptobyte.String + read := svalue.ReadAnyASN1(&val, &outTag) + + if read && outTag == asn1.TagUTF8String { + return &otherNameUtf8{oid: oraw.TypeID.String(), value: string(val)}, nil + } + return nil, fmt.Errorf("no UTF-8 string found in OtherName") +} + +func (o otherNameUtf8) String() string { + return fmt.Sprintf("%s;%s:%s", o.oid, "UTF-8", o.value) +} + +func getOtherSANsFromX509Extensions(exts []pkix.Extension) ([]otherNameUtf8, error) { + var ret []otherNameUtf8 + for _, ext := range exts { + if !ext.Id.Equal(oidExtensionSubjectAltName) { + continue + } + err := forEachSAN(ext.Value, func(tag int, data []byte) error { + if tag != 0 { + return nil + } + + var other otherNameRaw + _, err := asn1.UnmarshalWithParams(data, &other, "tag:0") + if err != nil { + return fmt.Errorf("could not parse requested other SAN: %w", err) + } + val, err := other.extractUTF8String() + if err != nil { + return err + } + ret = append(ret, *val) + return nil + }) + if err != nil { + return nil, err + } + } + + return ret, nil +} + +func forEachSAN(extension []byte, callback func(tag int, data []byte) error) error { + // RFC 5280, 4.2.1.6 + + // SubjectAltName ::= GeneralNames + // + // GeneralNames ::= SEQUENCE SIZE (1..MAX) OF GeneralName + // + // GeneralName ::= CHOICE { + // otherName [0] OtherName, + // rfc822Name [1] IA5String, + // dNSName [2] IA5String, + // x400Address [3] ORAddress, + // directoryName [4] Name, + // ediPartyName [5] EDIPartyName, + // uniformResourceIdentifier [6] IA5String, + // iPAddress [7] OCTET STRING, + // registeredID [8] OBJECT IDENTIFIER } + var seq asn1.RawValue + rest, err := asn1.Unmarshal(extension, &seq) + if err != nil { + return err + } else if len(rest) != 0 { + return fmt.Errorf("x509: trailing data after X.509 extension") + } + if !seq.IsCompound || seq.Tag != 16 || seq.Class != 0 { + return asn1.StructuralError{Msg: "bad SAN sequence"} + } + + rest = seq.Bytes + for len(rest) > 0 { + var v asn1.RawValue + rest, err = asn1.Unmarshal(rest, &v) + if err != nil { + return err + } + + if err := callback(v.Tag, v.FullBytes); err != nil { + return err + } + } + + return nil +} + +// generateCreationBundle is a shared function that reads parameters supplied +// from the various endpoints and generates a CreationParameters with the +// parameters that can be used to issue or sign +func generateCreationBundle(b *backend, data *inputBundle, caSign *certutil.CAInfoBundle, csr *x509.CertificateRequest) (*certutil.CreationBundle, []string, error) { + // Read in names -- CN, DNS and email addresses + var cn string + var ridSerialNumber string + var warnings []string + dnsNames := []string{} + emailAddresses := []string{} + { + if csr != nil && data.role.UseCSRCommonName { + cn = csr.Subject.CommonName + } + if cn == "" { + cn = data.apiData.Get("common_name").(string) + if cn == "" && data.role.RequireCN { + return nil, nil, errutil.UserError{Err: `the common_name field is required, or must be provided in a CSR with "use_csr_common_name" set to true, unless "require_cn" is set to false`} + } + } + + ridSerialNumber = data.apiData.Get("serial_number").(string) + + // only take serial number from CSR if one was not supplied via API + if ridSerialNumber == "" && csr != nil { + ridSerialNumber = csr.Subject.SerialNumber + } + + if csr != nil && data.role.UseCSRSANs { + dnsNames = csr.DNSNames + emailAddresses = csr.EmailAddresses + } + + if cn != "" && !data.apiData.Get("exclude_cn_from_sans").(bool) { + if strings.Contains(cn, "@") { + // Note: emails are not disallowed if the role's email protection + // flag is false, because they may well be included for + // informational purposes; it is up to the verifying party to + // ensure that email addresses in a subject alternate name can be + // used for the purpose for which they are presented + emailAddresses = append(emailAddresses, cn) + } else { + // Only add to dnsNames if it's actually a DNS name but convert + // idn first + p := idna.New( + idna.StrictDomainName(true), + idna.VerifyDNSLength(true), + ) + converted, err := p.ToASCII(cn) + if err != nil { + return nil, nil, errutil.UserError{Err: err.Error()} + } + if hostnameRegex.MatchString(converted) { + dnsNames = append(dnsNames, converted) + } + } + } + + if csr == nil || !data.role.UseCSRSANs { + cnAltRaw, ok := data.apiData.GetOk("alt_names") + if ok { + cnAlt := strutil.ParseDedupAndSortStrings(cnAltRaw.(string), ",") + for _, v := range cnAlt { + if strings.Contains(v, "@") { + emailAddresses = append(emailAddresses, v) + } else { + // Only add to dnsNames if it's actually a DNS name but + // convert idn first + p := idna.New( + idna.StrictDomainName(true), + idna.VerifyDNSLength(true), + ) + converted, err := p.ToASCII(v) + if err != nil { + return nil, nil, errutil.UserError{Err: err.Error()} + } + if hostnameRegex.MatchString(converted) { + dnsNames = append(dnsNames, converted) + } + } + } + } + } + + // Check the CN. This ensures that the CN is checked even if it's + // excluded from SANs. + if cn != "" { + badName := validateCommonName(b, data, cn) + if len(badName) != 0 { + return nil, nil, errutil.UserError{Err: fmt.Sprintf( + "common name %s not allowed by this role", badName)} + } + } + + if ridSerialNumber != "" { + badName := validateSerialNumber(data, ridSerialNumber) + if len(badName) != 0 { + return nil, nil, errutil.UserError{Err: fmt.Sprintf( + "serial_number %s not allowed by this role", badName)} + } + } + + // Check for bad email and/or DNS names + badName := validateNames(b, data, dnsNames) + if len(badName) != 0 { + return nil, nil, errutil.UserError{Err: fmt.Sprintf( + "subject alternate name %s not allowed by this role", badName)} + } + + badName = validateNames(b, data, emailAddresses) + if len(badName) != 0 { + return nil, nil, errutil.UserError{Err: fmt.Sprintf( + "email address %s not allowed by this role", badName)} + } + } + + // otherSANsInput has the same format as the other_sans HTTP param in the + // Vault PKI API: it is a list of strings of the form ;: + // where must be UTF8/UTF-8. + var otherSANsInput []string + // otherSANs is the output of parseOtherSANs(otherSANsInput): its keys are + // the value, its values are of the form [, ] + var otherSANs map[string][]string + if sans := data.apiData.Get("other_sans").([]string); len(sans) > 0 { + otherSANsInput = sans + } + if data.role.UseCSRSANs && csr != nil && len(csr.Extensions) > 0 { + others, err := getOtherSANsFromX509Extensions(csr.Extensions) + if err != nil { + return nil, nil, errutil.UserError{Err: fmt.Errorf("could not parse requested other SAN: %w", err).Error()} + } + for _, other := range others { + otherSANsInput = append(otherSANsInput, other.String()) + } + } + if len(otherSANsInput) > 0 { + requested, err := parseOtherSANs(otherSANsInput) + if err != nil { + return nil, nil, errutil.UserError{Err: fmt.Errorf("could not parse requested other SAN: %w", err).Error()} + } + badOID, badName, err := validateOtherSANs(data, requested) + switch { + case err != nil: + return nil, nil, errutil.UserError{Err: err.Error()} + case len(badName) > 0: + return nil, nil, errutil.UserError{Err: fmt.Sprintf( + "other SAN %s not allowed for OID %s by this role", badName, badOID)} + case len(badOID) > 0: + return nil, nil, errutil.UserError{Err: fmt.Sprintf( + "other SAN OID %s not allowed by this role", badOID)} + default: + otherSANs = requested + } + } + + // Get and verify any IP SANs + ipAddresses := []net.IP{} + { + if csr != nil && data.role.UseCSRSANs { + if len(csr.IPAddresses) > 0 { + if !data.role.AllowIPSANs { + return nil, nil, errutil.UserError{Err: "IP Subject Alternative Names are not allowed in this role, but was provided some via CSR"} + } + ipAddresses = csr.IPAddresses + } + } else { + ipAlt := data.apiData.Get("ip_sans").([]string) + if len(ipAlt) > 0 { + if !data.role.AllowIPSANs { + return nil, nil, errutil.UserError{Err: fmt.Sprintf( + "IP Subject Alternative Names are not allowed in this role, but was provided %s", ipAlt)} + } + for _, v := range ipAlt { + parsedIP := net.ParseIP(v) + if parsedIP == nil { + return nil, nil, errutil.UserError{Err: fmt.Sprintf( + "the value %q is not a valid IP address", v)} + } + ipAddresses = append(ipAddresses, parsedIP) + } + } + } + } + + URIs := []*url.URL{} + { + if csr != nil && data.role.UseCSRSANs { + if len(csr.URIs) > 0 { + if len(data.role.AllowedURISANs) == 0 { + return nil, nil, errutil.UserError{ + Err: "URI Subject Alternative Names are not allowed in this role, but were provided via CSR", + } + } + + // validate uri sans + for _, uri := range csr.URIs { + valid := validateURISAN(b, data, uri.String()) + if !valid { + return nil, nil, errutil.UserError{ + Err: "URI Subject Alternative Names were provided via CSR which are not valid for this role", + } + } + + URIs = append(URIs, uri) + } + } + } else { + uriAlt := data.apiData.Get("uri_sans").([]string) + if len(uriAlt) > 0 { + if len(data.role.AllowedURISANs) == 0 { + return nil, nil, errutil.UserError{ + Err: "URI Subject Alternative Names are not allowed in this role, but were provided via the API", + } + } + + for _, uri := range uriAlt { + valid := validateURISAN(b, data, uri) + if !valid { + return nil, nil, errutil.UserError{ + Err: "URI Subject Alternative Names were provided via the API which are not valid for this role", + } + } + + parsedURI, err := url.Parse(uri) + if parsedURI == nil || err != nil { + return nil, nil, errutil.UserError{ + Err: fmt.Sprintf( + "the provided URI Subject Alternative Name %q is not a valid URI", uri), + } + } + + URIs = append(URIs, parsedURI) + } + } + } + } + + // Most of these could also be RemoveDuplicateStable, or even + // leave duplicates in, but OU is the one most likely to be duplicated. + subject := pkix.Name{ + CommonName: cn, + SerialNumber: ridSerialNumber, + Country: strutil.RemoveDuplicatesStable(data.role.Country, false), + Organization: strutil.RemoveDuplicatesStable(data.role.Organization, false), + OrganizationalUnit: strutil.RemoveDuplicatesStable(data.role.OU, false), + Locality: strutil.RemoveDuplicatesStable(data.role.Locality, false), + Province: strutil.RemoveDuplicatesStable(data.role.Province, false), + StreetAddress: strutil.RemoveDuplicatesStable(data.role.StreetAddress, false), + PostalCode: strutil.RemoveDuplicatesStable(data.role.PostalCode, false), + } + + // Get the TTL and verify it against the max allowed + notAfter, ttlWarnings, err := getCertificateNotAfter(b, data, caSign) + if err != nil { + return nil, warnings, err + } + warnings = append(warnings, ttlWarnings...) + + // Parse SKID from the request for cross-signing. + var skid []byte + { + if rawSKIDValue, ok := data.apiData.GetOk("skid"); ok { + // Handle removing common separators to make copy/paste from tool + // output easier. Chromium uses space, OpenSSL uses colons, and at + // one point, Vault had preferred dash as a separator for hex + // strings. + var err error + skidValue := rawSKIDValue.(string) + for _, separator := range []string{":", "-", " "} { + skidValue = strings.ReplaceAll(skidValue, separator, "") + } + + skid, err = hex.DecodeString(skidValue) + if err != nil { + return nil, nil, errutil.UserError{Err: fmt.Sprintf("cannot parse requested SKID value as hex: %v", err)} + } + } + } + + // Add UserIDs into the Subject, if the request type supports it. + if _, present := data.apiData.Schema["user_ids"]; present { + rawUserIDs := data.apiData.Get("user_ids").([]string) + + // Only take UserIDs from CSR if one was not supplied via API. + if len(rawUserIDs) == 0 && csr != nil { + for _, attr := range csr.Subject.Names { + if attr.Type.Equal(certutil.SubjectPilotUserIDAttributeOID) { + switch aValue := attr.Value.(type) { + case string: + rawUserIDs = append(rawUserIDs, aValue) + case []byte: + rawUserIDs = append(rawUserIDs, string(aValue)) + default: + return nil, nil, errutil.UserError{Err: "unknown type for user_id attribute in CSR's Subject"} + } + } + } + } + + // Check for bad userIDs and add to the subject. + if len(rawUserIDs) > 0 { + for _, value := range rawUserIDs { + if !validateUserId(data, value) { + return nil, nil, errutil.UserError{Err: fmt.Sprintf("user_id %v is not allowed by this role", value)} + } + + subject.ExtraNames = append(subject.ExtraNames, pkix.AttributeTypeAndValue{ + Type: certutil.SubjectPilotUserIDAttributeOID, + Value: value, + }) + } + } + } + + creation := &certutil.CreationBundle{ + Params: &certutil.CreationParameters{ + Subject: subject, + DNSNames: strutil.RemoveDuplicates(dnsNames, false), + EmailAddresses: strutil.RemoveDuplicates(emailAddresses, false), + IPAddresses: ipAddresses, + URIs: URIs, + OtherSANs: otherSANs, + KeyType: data.role.KeyType, + KeyBits: data.role.KeyBits, + SignatureBits: data.role.SignatureBits, + UsePSS: data.role.UsePSS, + NotAfter: notAfter, + KeyUsage: x509.KeyUsage(parseKeyUsages(data.role.KeyUsage)), + ExtKeyUsage: parseExtKeyUsages(data.role), + ExtKeyUsageOIDs: data.role.ExtKeyUsageOIDs, + PolicyIdentifiers: data.role.PolicyIdentifiers, + BasicConstraintsValidForNonCA: data.role.BasicConstraintsValidForNonCA, + NotBeforeDuration: data.role.NotBeforeDuration, + ForceAppendCaChain: caSign != nil, + SKID: skid, + }, + SigningBundle: caSign, + CSR: csr, + } + + // Don't deal with URLs or max path length if it's self-signed, as these + // normally come from the signing bundle + if caSign == nil { + return creation, warnings, nil + } + + // This will have been read in from the getGlobalAIAURLs function + creation.Params.URLs = caSign.URLs + + // If the max path length in the role is not nil, it was specified at + // generation time with the max_path_length parameter; otherwise derive it + // from the signing certificate + if data.role.MaxPathLength != nil { + creation.Params.MaxPathLength = *data.role.MaxPathLength + } else { + switch { + case caSign.Certificate.MaxPathLen < 0: + creation.Params.MaxPathLength = -1 + case caSign.Certificate.MaxPathLen == 0 && + caSign.Certificate.MaxPathLenZero: + // The signing function will ensure that we do not issue a CA cert + creation.Params.MaxPathLength = 0 + default: + // If this takes it to zero, we handle this case later if + // necessary + creation.Params.MaxPathLength = caSign.Certificate.MaxPathLen - 1 + } + } + + return creation, warnings, nil +} + +// getCertificateNotAfter compute a certificate's NotAfter date based on the mount ttl, role, signing bundle and input +// api data being sent. Returns a NotAfter time, a set of warnings or an error. +func getCertificateNotAfter(b *backend, data *inputBundle, caSign *certutil.CAInfoBundle) (time.Time, []string, error) { + var warnings []string + var maxTTL time.Duration + var notAfter time.Time + var err error + + ttl := time.Duration(data.apiData.Get("ttl").(int)) * time.Second + notAfterAlt := data.role.NotAfter + if notAfterAlt == "" { + notAfterAltRaw, ok := data.apiData.GetOk("not_after") + if ok { + notAfterAlt = notAfterAltRaw.(string) + } + } + if ttl > 0 && notAfterAlt != "" { + return time.Time{}, warnings, errutil.UserError{Err: "Either ttl or not_after should be provided. Both should not be provided in the same request."} + } + + if ttl == 0 && data.role.TTL > 0 { + ttl = data.role.TTL + } + + if data.role.MaxTTL > 0 { + maxTTL = data.role.MaxTTL + } + + if ttl == 0 { + ttl = b.System().DefaultLeaseTTL() + } + if maxTTL == 0 { + maxTTL = b.System().MaxLeaseTTL() + } + if ttl > maxTTL { + warnings = append(warnings, fmt.Sprintf("TTL %q is longer than permitted maxTTL %q, so maxTTL is being used", ttl, maxTTL)) + ttl = maxTTL + } + + if notAfterAlt != "" { + notAfter, err = time.Parse(time.RFC3339, notAfterAlt) + if err != nil { + return notAfter, warnings, errutil.UserError{Err: err.Error()} + } + } else { + notAfter = time.Now().Add(ttl) + } + if caSign != nil && notAfter.After(caSign.Certificate.NotAfter) { + // If it's not self-signed, verify that the issued certificate + // won't be valid past the lifetime of the CA certificate, and + // act accordingly. This is dependent based on the issuer's + // LeafNotAfterBehavior argument. + switch caSign.LeafNotAfterBehavior { + case certutil.PermitNotAfterBehavior: + // Explicitly do nothing. + case certutil.TruncateNotAfterBehavior: + notAfter = caSign.Certificate.NotAfter + case certutil.ErrNotAfterBehavior: + fallthrough + default: + return time.Time{}, warnings, errutil.UserError{Err: fmt.Sprintf( + "cannot satisfy request, as TTL would result in notAfter of %s that is beyond the expiration of the CA certificate at %s", notAfter.UTC().Format(time.RFC3339Nano), caSign.Certificate.NotAfter.UTC().Format(time.RFC3339Nano))} + } + } + return notAfter, warnings, nil +} + +func convertRespToPKCS8(resp *logical.Response) error { + privRaw, ok := resp.Data["private_key"] + if !ok { + return nil + } + priv, ok := privRaw.(string) + if !ok { + return fmt.Errorf("error converting response to pkcs8: could not parse original value as string") + } + + privKeyTypeRaw, ok := resp.Data["private_key_type"] + if !ok { + return fmt.Errorf("error converting response to pkcs8: %q not found in response", "private_key_type") + } + privKeyType, ok := privKeyTypeRaw.(certutil.PrivateKeyType) + if !ok { + return fmt.Errorf("error converting response to pkcs8: could not parse original type value as string") + } + + var keyData []byte + var pemUsed bool + var err error + var signer crypto.Signer + + block, _ := pem.Decode([]byte(priv)) + if block == nil { + keyData, err = base64.StdEncoding.DecodeString(priv) + if err != nil { + return fmt.Errorf("error converting response to pkcs8: error decoding original value: %w", err) + } + } else { + keyData = block.Bytes + pemUsed = true + } + + switch privKeyType { + case certutil.RSAPrivateKey: + signer, err = x509.ParsePKCS1PrivateKey(keyData) + case certutil.ECPrivateKey: + signer, err = x509.ParseECPrivateKey(keyData) + case certutil.Ed25519PrivateKey: + k, err := x509.ParsePKCS8PrivateKey(keyData) + if err != nil { + return fmt.Errorf("error converting response to pkcs8: error parsing previous key: %w", err) + } + signer = k.(crypto.Signer) + default: + return fmt.Errorf("unknown private key type %q", privKeyType) + } + if err != nil { + return fmt.Errorf("error converting response to pkcs8: error parsing previous key: %w", err) + } + + keyData, err = x509.MarshalPKCS8PrivateKey(signer) + if err != nil { + return fmt.Errorf("error converting response to pkcs8: error marshaling pkcs8 key: %w", err) + } + + if pemUsed { + block.Type = "PRIVATE KEY" + block.Bytes = keyData + resp.Data["private_key"] = strings.TrimSpace(string(pem.EncodeToMemory(block))) + } else { + resp.Data["private_key"] = base64.StdEncoding.EncodeToString(keyData) + } + + return nil +} + +func handleOtherCSRSANs(in *x509.CertificateRequest, sans map[string][]string) error { + certTemplate := &x509.Certificate{ + DNSNames: in.DNSNames, + IPAddresses: in.IPAddresses, + EmailAddresses: in.EmailAddresses, + URIs: in.URIs, + } + if err := handleOtherSANs(certTemplate, sans); err != nil { + return err + } + if len(certTemplate.ExtraExtensions) > 0 { + in.ExtraExtensions = append(in.ExtraExtensions, certTemplate.ExtraExtensions...) + } + return nil +} + +func handleOtherSANs(in *x509.Certificate, sans map[string][]string) error { + // If other SANs is empty we return which causes normal Go stdlib parsing + // of the other SAN types + if len(sans) == 0 { + return nil + } + + var rawValues []asn1.RawValue + + // We need to generate an IMPLICIT sequence for compatibility with OpenSSL + // -- it's an open question what the default for RFC 5280 actually is, see + // https://github.com/openssl/openssl/issues/5091 -- so we have to use + // cryptobyte because using the asn1 package's marshaling always produces + // an EXPLICIT sequence. Note that asn1 is way too magical according to + // agl, and cryptobyte is modeled after the CBB/CBS bits that agl put into + // boringssl. + for oid, vals := range sans { + for _, val := range vals { + var b cryptobyte.Builder + oidStr, err := stringToOid(oid) + if err != nil { + return err + } + b.AddASN1ObjectIdentifier(oidStr) + b.AddASN1(cbbasn1.Tag(0).ContextSpecific().Constructed(), func(b *cryptobyte.Builder) { + b.AddASN1(cbbasn1.UTF8String, func(b *cryptobyte.Builder) { + b.AddBytes([]byte(val)) + }) + }) + m, err := b.Bytes() + if err != nil { + return err + } + rawValues = append(rawValues, asn1.RawValue{Tag: 0, Class: 2, IsCompound: true, Bytes: m}) + } + } + + // If other SANs is empty we return which causes normal Go stdlib parsing + // of the other SAN types + if len(rawValues) == 0 { + return nil + } + + // Append any existing SANs, sans marshalling + rawValues = append(rawValues, marshalSANs(in.DNSNames, in.EmailAddresses, in.IPAddresses, in.URIs)...) + + // Marshal and add to ExtraExtensions + ext := pkix.Extension{ + // This is the defined OID for subjectAltName + Id: asn1.ObjectIdentifier(oidExtensionSubjectAltName), + } + var err error + ext.Value, err = asn1.Marshal(rawValues) + if err != nil { + return err + } + in.ExtraExtensions = append(in.ExtraExtensions, ext) + + return nil +} + +// Note: Taken from the Go source code since it's not public, and used in the +// modified function below (which also uses these consts upstream) +const ( + nameTypeOther = 0 + nameTypeEmail = 1 + nameTypeDNS = 2 + nameTypeURI = 6 + nameTypeIP = 7 +) + +// Note: Taken from the Go source code since it's not public, plus changed to not marshal +// marshalSANs marshals a list of addresses into the contents of an X.509 +// SubjectAlternativeName extension. +func marshalSANs(dnsNames, emailAddresses []string, ipAddresses []net.IP, uris []*url.URL) []asn1.RawValue { + var rawValues []asn1.RawValue + for _, name := range dnsNames { + rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeDNS, Class: 2, Bytes: []byte(name)}) + } + for _, email := range emailAddresses { + rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeEmail, Class: 2, Bytes: []byte(email)}) + } + for _, rawIP := range ipAddresses { + // If possible, we always want to encode IPv4 addresses in 4 bytes. + ip := rawIP.To4() + if ip == nil { + ip = rawIP + } + rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeIP, Class: 2, Bytes: ip}) + } + for _, uri := range uris { + rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeURI, Class: 2, Bytes: []byte(uri.String())}) + } + return rawValues +} + +func stringToOid(in string) (asn1.ObjectIdentifier, error) { + split := strings.Split(in, ".") + ret := make(asn1.ObjectIdentifier, 0, len(split)) + for _, v := range split { + i, err := strconv.Atoi(v) + if err != nil { + return nil, err + } + ret = append(ret, i) + } + return ret, nil +} + +func parseCertificateFromBytes(certBytes []byte) (*x509.Certificate, error) { + block, extra := pem.Decode(certBytes) + if block == nil { + return nil, errors.New("unable to parse certificate: invalid PEM") + } + if len(strings.TrimSpace(string(extra))) > 0 { + return nil, errors.New("unable to parse certificate: trailing PEM data") + } + + return x509.ParseCertificate(block.Bytes) +} diff --git a/builtin/logical/pki/cert_util_test.go b/builtin/logical/pki/cert_util_test.go new file mode 100644 index 0000000..7fb811c --- /dev/null +++ b/builtin/logical/pki/cert_util_test.go @@ -0,0 +1,237 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "context" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func TestPki_FetchCertBySerial(t *testing.T) { + t.Parallel() + b, storage := CreateBackendWithStorage(t) + sc := b.makeStorageContext(ctx, storage) + + cases := map[string]struct { + Req *logical.Request + Prefix string + Serial string + }{ + "valid cert": { + &logical.Request{ + Storage: storage, + }, + "certs/", + "00:00:00:00:00:00:00:00", + }, + "revoked cert": { + &logical.Request{ + Storage: storage, + }, + "revoked/", + "11:11:11:11:11:11:11:11", + }, + } + + // Test for colon-based paths in storage + for name, tc := range cases { + storageKey := fmt.Sprintf("%s%s", tc.Prefix, tc.Serial) + err := storage.Put(context.Background(), &logical.StorageEntry{ + Key: storageKey, + Value: []byte("some data"), + }) + if err != nil { + t.Fatalf("error writing to storage on %s colon-based storage path: %s", name, err) + } + + certEntry, err := fetchCertBySerial(sc, tc.Prefix, tc.Serial) + if err != nil { + t.Fatalf("error on %s for colon-based storage path: %s", name, err) + } + + // Check for non-nil on valid/revoked certs + if certEntry == nil { + t.Fatalf("nil on %s for colon-based storage path", name) + } + + // Ensure that cert serials are converted/updated after fetch + expectedKey := tc.Prefix + normalizeSerial(tc.Serial) + se, err := storage.Get(context.Background(), expectedKey) + if err != nil { + t.Fatalf("error on %s for colon-based storage path:%s", name, err) + } + if strings.Compare(expectedKey, se.Key) != 0 { + t.Fatalf("expected: %s, got: %s", expectedKey, certEntry.Key) + } + } + + // Reset storage + storage = &logical.InmemStorage{} + + // Test for hyphen-base paths in storage + for name, tc := range cases { + storageKey := tc.Prefix + normalizeSerial(tc.Serial) + err := storage.Put(context.Background(), &logical.StorageEntry{ + Key: storageKey, + Value: []byte("some data"), + }) + if err != nil { + t.Fatalf("error writing to storage on %s hyphen-based storage path: %s", name, err) + } + + certEntry, err := fetchCertBySerial(sc, tc.Prefix, tc.Serial) + if err != nil || certEntry == nil { + t.Fatalf("error on %s for hyphen-based storage path: err: %v, entry: %v", name, err, certEntry) + } + } +} + +// Demonstrate that multiple OUs in the name are handled in an +// order-preserving way. +func TestPki_MultipleOUs(t *testing.T) { + t.Parallel() + var b backend + fields := addCACommonFields(map[string]*framework.FieldSchema{}) + + apiData := &framework.FieldData{ + Schema: fields, + Raw: map[string]interface{}{ + "cn": "example.com", + "ttl": 3600, + }, + } + input := &inputBundle{ + apiData: apiData, + role: &roleEntry{ + MaxTTL: 3600, + OU: []string{"Z", "E", "V"}, + }, + } + cb, _, err := generateCreationBundle(&b, input, nil, nil) + if err != nil { + t.Fatalf("Error: %v", err) + } + + expected := []string{"Z", "E", "V"} + actual := cb.Params.Subject.OrganizationalUnit + + if !reflect.DeepEqual(expected, actual) { + t.Fatalf("Expected %v, got %v", expected, actual) + } +} + +func TestPki_PermitFQDNs(t *testing.T) { + t.Parallel() + var b backend + fields := addCACommonFields(map[string]*framework.FieldSchema{}) + + cases := map[string]struct { + input *inputBundle + expectedDnsNames []string + expectedEmails []string + }{ + "base valid case": { + input: &inputBundle{ + apiData: &framework.FieldData{ + Schema: fields, + Raw: map[string]interface{}{ + "common_name": "example.com.", + "ttl": 3600, + }, + }, + role: &roleEntry{ + AllowAnyName: true, + MaxTTL: 3600, + EnforceHostnames: true, + }, + }, + expectedDnsNames: []string{"example.com."}, + expectedEmails: []string{}, + }, + "case insensitivity validation": { + input: &inputBundle{ + apiData: &framework.FieldData{ + Schema: fields, + Raw: map[string]interface{}{ + "common_name": "Example.Net", + "alt_names": "eXaMPLe.COM", + "ttl": 3600, + }, + }, + role: &roleEntry{ + AllowedDomains: []string{"example.net", "EXAMPLE.COM"}, + AllowBareDomains: true, + MaxTTL: 3600, + }, + }, + expectedDnsNames: []string{"Example.Net", "eXaMPLe.COM"}, + expectedEmails: []string{}, + }, + "case email as AllowedDomain with bare domains": { + input: &inputBundle{ + apiData: &framework.FieldData{ + Schema: fields, + Raw: map[string]interface{}{ + "common_name": "test@testemail.com", + "ttl": 3600, + }, + }, + role: &roleEntry{ + AllowedDomains: []string{"test@testemail.com"}, + AllowBareDomains: true, + MaxTTL: 3600, + }, + }, + expectedDnsNames: []string{}, + expectedEmails: []string{"test@testemail.com"}, + }, + "case email common name with bare domains": { + input: &inputBundle{ + apiData: &framework.FieldData{ + Schema: fields, + Raw: map[string]interface{}{ + "common_name": "test@testemail.com", + "ttl": 3600, + }, + }, + role: &roleEntry{ + AllowedDomains: []string{"testemail.com"}, + AllowBareDomains: true, + MaxTTL: 3600, + }, + }, + expectedDnsNames: []string{}, + expectedEmails: []string{"test@testemail.com"}, + }, + } + + for name, testCase := range cases { + name := name + testCase := testCase + t.Run(name, func(t *testing.T) { + cb, _, err := generateCreationBundle(&b, testCase.input, nil, nil) + if err != nil { + t.Fatalf("Error: %v", err) + } + + actualDnsNames := cb.Params.DNSNames + + if !reflect.DeepEqual(testCase.expectedDnsNames, actualDnsNames) { + t.Fatalf("Expected dns names %v, got %v", testCase.expectedDnsNames, actualDnsNames) + } + + actualEmails := cb.Params.EmailAddresses + + if !reflect.DeepEqual(testCase.expectedEmails, actualEmails) { + t.Fatalf("Expected email addresses %v, got %v", testCase.expectedEmails, actualEmails) + } + }) + } +} diff --git a/builtin/logical/pki/chain_test.go b/builtin/logical/pki/chain_test.go new file mode 100644 index 0000000..e76df35 --- /dev/null +++ b/builtin/logical/pki/chain_test.go @@ -0,0 +1,1647 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "bytes" + "context" + "crypto/x509" + "crypto/x509/pkix" + "encoding/hex" + "encoding/pem" + "fmt" + "strconv" + "strings" + "testing" + "time" + + "github.com/hashicorp/vault/sdk/logical" +) + +// For speed, all keys are ECDSA. +type CBGenerateKey struct { + Name string +} + +func (c CBGenerateKey) Run(t testing.TB, b *backend, s logical.Storage, knownKeys map[string]string, knownCerts map[string]string) { + resp, err := CBWrite(b, s, "keys/generate/exported", map[string]interface{}{ + "name": c.Name, + "algo": "ec", + "bits": 256, + }) + if err != nil { + t.Fatalf("failed to provision key (%v): %v", c.Name, err) + } + knownKeys[c.Name] = resp.Data["private"].(string) +} + +// Generate a root. +type CBGenerateRoot struct { + Key string + Existing bool + Name string + CommonName string + ErrorMessage string +} + +func (c CBGenerateRoot) Run(t testing.TB, b *backend, s logical.Storage, knownKeys map[string]string, knownCerts map[string]string) { + url := "issuers/generate/root/" + data := make(map[string]interface{}) + + if c.Existing { + url += "existing" + data["key_ref"] = c.Key + } else { + url += "exported" + data["key_type"] = "ec" + data["key_bits"] = 256 + data["key_name"] = c.Key + } + + data["issuer_name"] = c.Name + data["common_name"] = c.Name + if len(c.CommonName) > 0 { + data["common_name"] = c.CommonName + } + + resp, err := CBWrite(b, s, url, data) + if err != nil { + if len(c.ErrorMessage) > 0 { + if !strings.Contains(err.Error(), c.ErrorMessage) { + t.Fatalf("failed to generate root cert for issuer (%v): expected (%v) in error message but got %v", c.Name, c.ErrorMessage, err) + } + return + } + t.Fatalf("failed to provision issuer (%v): %v / body: %v", c.Name, err, data) + } else if len(c.ErrorMessage) > 0 { + t.Fatalf("expected to fail generation of issuer (%v) with error message containing (%v)", c.Name, c.ErrorMessage) + } + + if !c.Existing { + knownKeys[c.Key] = resp.Data["private_key"].(string) + } + + knownCerts[c.Name] = resp.Data["certificate"].(string) + + // Validate key_id matches. + url = "key/" + c.Key + resp, err = CBRead(b, s, url) + if err != nil { + t.Fatalf("failed to fetch key for name %v: %v", c.Key, err) + } + if resp == nil { + t.Fatalf("failed to fetch key for name %v: nil response", c.Key) + } + + expectedKeyId := resp.Data["key_id"] + + url = "issuer/" + c.Name + resp, err = CBRead(b, s, url) + if err != nil { + t.Fatalf("failed to fetch issuer for name %v: %v", c.Name, err) + } + if resp == nil { + t.Fatalf("failed to fetch issuer for name %v: nil response", c.Name) + } + + actualKeyId := resp.Data["key_id"] + if expectedKeyId != actualKeyId { + t.Fatalf("expected issuer %v to have key matching %v but got mismatch: %v vs %v", c.Name, c.Key, actualKeyId, expectedKeyId) + } +} + +// Generate an intermediate. Might not really be an intermediate; might be +// a cross-signed cert. +type CBGenerateIntermediate struct { + Key string + Existing bool + Name string + CommonName string + SKID string + Parent string + ImportErrorMessage string +} + +func (c CBGenerateIntermediate) Run(t testing.TB, b *backend, s logical.Storage, knownKeys map[string]string, knownCerts map[string]string) { + // Build CSR + url := "issuers/generate/intermediate/" + data := make(map[string]interface{}) + + if c.Existing { + url += "existing" + data["key_ref"] = c.Key + } else { + url += "exported" + data["key_type"] = "ec" + data["key_bits"] = 256 + data["key_name"] = c.Key + } + + resp, err := CBWrite(b, s, url, data) + if err != nil { + t.Fatalf("failed to generate CSR for issuer (%v): %v / body: %v", c.Name, err, data) + } + + if !c.Existing { + knownKeys[c.Key] = resp.Data["private_key"].(string) + } + + csr := resp.Data["csr"].(string) + + // Sign CSR + url = fmt.Sprintf("issuer/%s/sign-intermediate", c.Parent) + data = make(map[string]interface{}) + data["csr"] = csr + data["common_name"] = c.Name + if len(c.CommonName) > 0 { + data["common_name"] = c.CommonName + } + if len(c.SKID) > 0 { + // Copy the SKID from an existing, already-issued cert. + otherPEM := knownCerts[c.SKID] + otherCert := ToCertificate(t, otherPEM) + + data["skid"] = hex.EncodeToString(otherCert.SubjectKeyId) + } + + resp, err = CBWrite(b, s, url, data) + if err != nil { + t.Fatalf("failed to sign CSR for issuer (%v): %v / body: %v", c.Name, err, data) + } + + knownCerts[c.Name] = strings.TrimSpace(resp.Data["certificate"].(string)) + + // Verify SKID if one was requested. + if len(c.SKID) > 0 { + otherPEM := knownCerts[c.SKID] + otherCert := ToCertificate(t, otherPEM) + ourCert := ToCertificate(t, knownCerts[c.Name]) + + if !bytes.Equal(otherCert.SubjectKeyId, ourCert.SubjectKeyId) { + t.Fatalf("Expected two certs to have equal SKIDs but differed: them: %v vs us: %v", otherCert.SubjectKeyId, ourCert.SubjectKeyId) + } + } + + // Set the signed intermediate + url = "intermediate/set-signed" + data = make(map[string]interface{}) + data["certificate"] = knownCerts[c.Name] + data["issuer_name"] = c.Name + + resp, err = CBWrite(b, s, url, data) + if err != nil { + if len(c.ImportErrorMessage) > 0 { + if !strings.Contains(err.Error(), c.ImportErrorMessage) { + t.Fatalf("failed to import signed cert for issuer (%v): expected (%v) in error message but got %v", c.Name, c.ImportErrorMessage, err) + } + return + } + + t.Fatalf("failed to import signed cert for issuer (%v): %v / body: %v", c.Name, err, data) + } else if len(c.ImportErrorMessage) > 0 { + t.Fatalf("expected to fail import (with error %v) of cert for issuer (%v) but was success: response: %v", c.ImportErrorMessage, c.Name, resp) + } + + // Update the name since set-signed doesn't actually take an issuer name + // parameter. + rawNewCerts := resp.Data["imported_issuers"].([]string) + if len(rawNewCerts) != 1 { + t.Fatalf("Expected a single new certificate during import of signed cert for %v: got %v\nresp: %v", c.Name, len(rawNewCerts), resp) + } + + newCertId := rawNewCerts[0] + _, err = CBWrite(b, s, "issuer/"+newCertId, map[string]interface{}{ + "issuer_name": c.Name, + }) + if err != nil { + t.Fatalf("failed to update name for issuer (%v/%v): %v", c.Name, newCertId, err) + } + + // Validate key_id matches. + url = "key/" + c.Key + resp, err = CBRead(b, s, url) + if err != nil { + t.Fatalf("failed to fetch key for name %v: %v", c.Key, err) + } + if resp == nil { + t.Fatalf("failed to fetch key for name %v: nil response", c.Key) + } + + expectedKeyId := resp.Data["key_id"] + + url = "issuer/" + c.Name + resp, err = CBRead(b, s, url) + if err != nil { + t.Fatalf("failed to fetch issuer for name %v: %v", c.Name, err) + } + if resp == nil { + t.Fatalf("failed to fetch issuer for name %v: nil response", c.Name) + } + + actualKeyId := resp.Data["key_id"] + if expectedKeyId != actualKeyId { + t.Fatalf("expected issuer %v to have key matching %v but got mismatch: %v vs %v", c.Name, c.Key, actualKeyId, expectedKeyId) + } +} + +// Delete an issuer; breaks chains. +type CBDeleteIssuer struct { + Issuer string +} + +func (c CBDeleteIssuer) Run(t testing.TB, b *backend, s logical.Storage, knownKeys map[string]string, knownCerts map[string]string) { + url := fmt.Sprintf("issuer/%v", c.Issuer) + _, err := CBDelete(b, s, url) + if err != nil { + t.Fatalf("failed to delete issuer (%v): %v", c.Issuer, err) + } + + delete(knownCerts, c.Issuer) +} + +// Validate the specified chain exists, by name. +type CBValidateChain struct { + Chains map[string][]string + Aliases map[string]string +} + +func (c CBValidateChain) ChainToPEMs(t testing.TB, parent string, chain []string, knownCerts map[string]string) []string { + var result []string + for entryIndex, entry := range chain { + var chainEntry string + modifiedEntry := entry + if entryIndex == 0 && entry == "self" { + modifiedEntry = parent + } + for pattern, replacement := range c.Aliases { + modifiedEntry = strings.ReplaceAll(modifiedEntry, pattern, replacement) + } + for _, issuer := range strings.Split(modifiedEntry, ",") { + cert, ok := knownCerts[issuer] + if !ok { + t.Fatalf("Unknown issuer %v in chain for %v: %v", issuer, parent, chain) + } + + chainEntry += cert + } + result = append(result, chainEntry) + } + + return result +} + +func (c CBValidateChain) FindNameForCert(t testing.TB, cert string, knownCerts map[string]string) string { + for issuer, known := range knownCerts { + if strings.TrimSpace(known) == strings.TrimSpace(cert) { + return issuer + } + } + + t.Fatalf("Unable to find cert:\n[%v]\nin known map:\n%v\n", cert, knownCerts) + return "" +} + +func (c CBValidateChain) PrettyChain(t testing.TB, chain []string, knownCerts map[string]string) []string { + var prettyChain []string + for _, cert := range chain { + prettyChain = append(prettyChain, c.FindNameForCert(t, cert, knownCerts)) + } + + return prettyChain +} + +func ToCertificate(t testing.TB, cert string) *x509.Certificate { + t.Helper() + + block, _ := pem.Decode([]byte(cert)) + if block == nil { + t.Fatalf("Unable to parse certificate: nil PEM block\n[%v]\n", cert) + } + + ret, err := x509.ParseCertificate(block.Bytes) + if err != nil { + t.Fatalf("Unable to parse certificate: %v\n[%v]\n", err, cert) + } + + return ret +} + +func ToCRL(t testing.TB, crl string, issuer *x509.Certificate) *pkix.CertificateList { + t.Helper() + + block, _ := pem.Decode([]byte(crl)) + if block == nil { + t.Fatalf("Unable to parse CRL: nil PEM block\n[%v]\n", crl) + } + + ret, err := x509.ParseCRL(block.Bytes) + if err != nil { + t.Fatalf("Unable to parse CRL: %v\n[%v]\n", err, crl) + } + + if issuer != nil { + if err := issuer.CheckCRLSignature(ret); err != nil { + t.Fatalf("Unable to check CRL signature: %v\n[%v]\n[%v]\n", err, crl, issuer) + } + } + + return ret +} + +func (c CBValidateChain) Run(t testing.TB, b *backend, s logical.Storage, knownKeys map[string]string, knownCerts map[string]string) { + for issuer, chain := range c.Chains { + resp, err := CBRead(b, s, "issuer/"+issuer) + if err != nil { + t.Fatalf("failed to get chain for issuer (%v): %v", issuer, err) + } + + rawCurrentChain := resp.Data["ca_chain"].([]string) + var currentChain []string + for _, entry := range rawCurrentChain { + currentChain = append(currentChain, strings.TrimSpace(entry)) + } + + // Ensure the issuer cert is always first. + if currentChain[0] != knownCerts[issuer] { + pretty := c.FindNameForCert(t, currentChain[0], knownCerts) + t.Fatalf("expected certificate at index 0 to be self:\n[%v]\n[pretty: %v]\nis not the issuer's cert:\n[%v]\n[pretty: %v]", currentChain[0], pretty, knownCerts[issuer], issuer) + } + + // Validate it against the expected chain. + expectedChain := c.ChainToPEMs(t, issuer, chain, knownCerts) + if len(currentChain) != len(expectedChain) { + prettyCurrentChain := c.PrettyChain(t, currentChain, knownCerts) + t.Fatalf("Lengths of chains for issuer %v mismatched: got %v vs expected %v:\n[%v]\n[pretty: %v]\n[%v]\n[pretty: %v]", issuer, len(currentChain), len(expectedChain), currentChain, prettyCurrentChain, expectedChain, chain) + } + + for currentIndex, currentCert := range currentChain { + // Chains might be forked so we may not be able to strictly validate + // the chain against a single value. Instead, use strings.Contains + // to validate the current cert is in the list of allowed + // possibilities. + if !strings.Contains(expectedChain[currentIndex], currentCert) { + pretty := c.FindNameForCert(t, currentCert, knownCerts) + t.Fatalf("chain mismatch at index %v for issuer %v: got cert:\n[%v]\n[pretty: %v]\nbut expected one of\n[%v]\n[pretty: %v]\n", currentIndex, issuer, currentCert, pretty, expectedChain[currentIndex], chain[currentIndex]) + } + } + + // Due to alternate paths, the above doesn't ensure ensure each cert + // in the chain is only used once. Validate that now. + for thisIndex, thisCert := range currentChain { + for otherIndex, otherCert := range currentChain[thisIndex+1:] { + if thisCert == otherCert { + thisPretty := c.FindNameForCert(t, thisCert, knownCerts) + otherPretty := c.FindNameForCert(t, otherCert, knownCerts) + otherIndex += thisIndex + 1 + t.Fatalf("cert reused in chain for %v:\n[%v]\n[pretty: %v / index: %v]\n[%v]\n[pretty: %v / index: %v]\n", issuer, thisCert, thisPretty, thisIndex, otherCert, otherPretty, otherIndex) + } + } + } + + // Finally, validate that all certs verify something that came before + // it. In the linear chain sense, this should strictly mean that the + // parent comes before the child. + for thisIndex, thisCertPem := range currentChain[1:] { + thisIndex += 1 // Absolute index. + parentCert := ToCertificate(t, thisCertPem) + + // Iterate backwards; prefer the most recent cert to the older + // certs. + foundCert := false + for otherIndex := thisIndex - 1; otherIndex >= 0; otherIndex-- { + otherCertPem := currentChain[otherIndex] + childCert := ToCertificate(t, otherCertPem) + + if err := childCert.CheckSignatureFrom(parentCert); err == nil { + foundCert = true + } + } + + if !foundCert { + pretty := c.FindNameForCert(t, thisCertPem, knownCerts) + t.Fatalf("malformed test scenario: certificate at chain index %v when validating %v does not validate any previous certificates:\n[%v]\n[pretty: %v]\n", thisIndex, issuer, thisCertPem, pretty) + } + } + } +} + +// Update an issuer +type CBUpdateIssuer struct { + Name string + CAChain []string + Usage string +} + +func (c CBUpdateIssuer) Run(t testing.TB, b *backend, s logical.Storage, knownKeys map[string]string, knownCerts map[string]string) { + url := "issuer/" + c.Name + data := make(map[string]interface{}) + data["issuer_name"] = c.Name + + resp, err := CBRead(b, s, url) + if err != nil { + t.Fatalf("failed to read issuer (%v): %v", c.Name, err) + } + + if len(c.CAChain) == 1 && c.CAChain[0] == "existing" { + data["manual_chain"] = resp.Data["manual_chain"] + } else { + data["manual_chain"] = c.CAChain + } + + if c.Usage == "existing" { + data["usage"] = resp.Data["usage"] + } else if len(c.Usage) == 0 { + data["usage"] = "read-only,issuing-certificates,crl-signing" + } else { + data["usage"] = c.Usage + } + + _, err = CBWrite(b, s, url, data) + if err != nil { + t.Fatalf("failed to update issuer (%v): %v / body: %v", c.Name, err, data) + } +} + +// Issue a leaf, revoke it, and then validate it appears on the CRL. +type CBIssueLeaf struct { + Issuer string + Role string +} + +func (c CBIssueLeaf) IssueLeaf(t testing.TB, b *backend, s logical.Storage, knownKeys map[string]string, knownCerts map[string]string, errorMessage string) *logical.Response { + // Write a role + url := "roles/" + c.Role + data := make(map[string]interface{}) + data["allow_localhost"] = true + data["ttl"] = "200s" + data["key_type"] = "ec" + + _, err := CBWrite(b, s, url, data) + if err != nil { + t.Fatalf("failed to update role (%v): %v / body: %v", c.Role, err, data) + } + + // Issue the certificate. + url = "issuer/" + c.Issuer + "/issue/" + c.Role + data = make(map[string]interface{}) + data["common_name"] = "localhost" + + resp, err := CBWrite(b, s, url, data) + if err != nil { + if len(errorMessage) >= 0 { + if !strings.Contains(err.Error(), errorMessage) { + t.Fatalf("failed to issue cert (%v via %v): %v / body: %v\nExpected error message: %v", c.Issuer, c.Role, err, data, errorMessage) + } + + return nil + } + + t.Fatalf("failed to issue cert (%v via %v): %v / body: %v", c.Issuer, c.Role, err, data) + } + if resp == nil { + t.Fatalf("failed to issue cert (%v via %v): nil response / body: %v", c.Issuer, c.Role, data) + } + + raw_cert := resp.Data["certificate"].(string) + cert := ToCertificate(t, raw_cert) + raw_issuer := resp.Data["issuing_ca"].(string) + issuer := ToCertificate(t, raw_issuer) + + // Validate issuer and signatures are good. + if strings.TrimSpace(raw_issuer) != strings.TrimSpace(knownCerts[c.Issuer]) { + t.Fatalf("signing certificate ended with wrong certificate for issuer %v:\n[%v]\n\nvs\n\n[%v]\n", c.Issuer, raw_issuer, knownCerts[c.Issuer]) + } + + if err := cert.CheckSignatureFrom(issuer); err != nil { + t.Fatalf("failed to verify signature on issued certificate from %v: %v\n[%v]\n[%v]\n", c.Issuer, err, raw_cert, raw_issuer) + } + + return resp +} + +func (c CBIssueLeaf) RevokeLeaf(t testing.TB, b *backend, s logical.Storage, knownKeys map[string]string, knownCerts map[string]string, issueResponse *logical.Response, hasCRL bool, isDefault bool) { + api_serial := issueResponse.Data["serial_number"].(string) + raw_cert := issueResponse.Data["certificate"].(string) + cert := ToCertificate(t, raw_cert) + raw_issuer := issueResponse.Data["issuing_ca"].(string) + issuer := ToCertificate(t, raw_issuer) + + // Revoke the certificate. + url := "revoke" + data := make(map[string]interface{}) + data["serial_number"] = api_serial + resp, err := CBWrite(b, s, url, data) + if err != nil { + t.Fatalf("failed to revoke issued certificate (%v) under role %v / issuer %v: %v", api_serial, c.Role, c.Issuer, err) + } + if resp == nil { + t.Fatalf("failed to revoke issued certificate (%v) under role %v / issuer %v: nil response", api_serial, c.Role, c.Issuer) + } + if _, ok := resp.Data["revocation_time"]; !ok { + t.Fatalf("failed to revoke issued certificate (%v) under role %v / issuer %v: expected response parameter revocation_time was missing from response:\n%v", api_serial, c.Role, c.Issuer, resp.Data) + } + + if !hasCRL { + // Nothing further we can test here. We could re-enable CRL building + // and check that it works, but that seems like a stretch. Other + // issuers might be functionally the same as this issuer (and thus + // this CRL will still be issued), but that requires more work to + // check and verify. + return + } + + // Verify it is on this issuer's CRL. + url = "issuer/" + c.Issuer + "/crl" + resp, err = CBRead(b, s, url) + if err != nil { + t.Fatalf("failed to fetch CRL for issuer %v: %v", c.Issuer, err) + } + if resp == nil { + t.Fatalf("failed to fetch CRL for issuer %v: nil response", c.Issuer) + } + + raw_crl := resp.Data["crl"].(string) + crl := ToCRL(t, raw_crl, issuer) + + foundCert := requireSerialNumberInCRL(nil, crl.TBSCertList, api_serial) + if !foundCert { + if !hasCRL && !isDefault { + // Update the issuer we expect to find this on. + resp, err := CBRead(b, s, "config/issuers") + if err != nil { + t.Fatalf("failed to read default issuer config: %v", err) + } + if resp == nil { + t.Fatalf("failed to read default issuer config: nil response") + } + defaultID := resp.Data["default"].(issuerID).String() + c.Issuer = defaultID + issuer = nil + } + + // Verify it is on the default issuer's CRL. + url = "issuer/" + c.Issuer + "/crl" + resp, err = CBRead(b, s, url) + if err != nil { + t.Fatalf("failed to fetch CRL for issuer %v: %v", c.Issuer, err) + } + if resp == nil { + t.Fatalf("failed to fetch CRL for issuer %v: nil response", c.Issuer) + } + + raw_crl = resp.Data["crl"].(string) + crl = ToCRL(t, raw_crl, issuer) + + foundCert = requireSerialNumberInCRL(nil, crl.TBSCertList, api_serial) + } + + if !foundCert { + // If CRL building is broken, this is useful for finding which issuer's + // CRL the revoked cert actually appears on. + for issuerName := range knownCerts { + url = "issuer/" + issuerName + "/crl" + resp, err = CBRead(b, s, url) + if err != nil { + t.Fatalf("failed to fetch CRL for issuer %v: %v", issuerName, err) + } + if resp == nil { + t.Fatalf("failed to fetch CRL for issuer %v: nil response", issuerName) + } + + raw_crl := resp.Data["crl"].(string) + crl := ToCRL(t, raw_crl, nil) + + for index, revoked := range crl.TBSCertList.RevokedCertificates { + // t.Logf("[%v] revoked serial number: %v -- vs -- %v", index, revoked.SerialNumber, cert.SerialNumber) + if revoked.SerialNumber.Cmp(cert.SerialNumber) == 0 { + t.Logf("found revoked cert at index: %v for unexpected issuer: %v", index, issuerName) + break + } + } + } + + t.Fatalf("expected to find certificate with serial [%v] on issuer %v's CRL but was missing: %v revoked certs\n\nCRL:\n[%v]\n\nLeaf:\n[%v]\n\nIssuer (hasCRL: %v):\n[%v]\n", api_serial, c.Issuer, len(crl.TBSCertList.RevokedCertificates), raw_crl, raw_cert, hasCRL, raw_issuer) + } +} + +func (c CBIssueLeaf) Run(t testing.TB, b *backend, s logical.Storage, knownKeys map[string]string, knownCerts map[string]string) { + if len(c.Role) == 0 { + c.Role = "testing" + } + + resp, err := CBRead(b, s, "config/issuers") + if err != nil { + t.Fatalf("failed to read default issuer config: %v", err) + } + if resp == nil { + t.Fatalf("failed to read default issuer config: nil response") + } + defaultID := resp.Data["default"].(issuerID).String() + + resp, err = CBRead(b, s, "issuer/"+c.Issuer) + if err != nil { + t.Fatalf("failed to read issuer %v: %v", c.Issuer, err) + } + if resp == nil { + t.Fatalf("failed to read issuer %v: nil response", c.Issuer) + } + ourID := resp.Data["issuer_id"].(issuerID).String() + areDefault := ourID == defaultID + + for _, usage := range []string{"read-only", "crl-signing", "issuing-certificates", "issuing-certificates,crl-signing"} { + ui := CBUpdateIssuer{ + Name: c.Issuer, + CAChain: []string{"existing"}, + Usage: usage, + } + ui.Run(t, b, s, knownKeys, knownCerts) + + ilError := "requested usage issuing-certificates for issuer" + hasIssuing := strings.Contains(usage, "issuing-certificates") + if hasIssuing { + ilError = "" + } + + hasCRL := strings.Contains(usage, "crl-signing") + + resp := c.IssueLeaf(t, b, s, knownKeys, knownCerts, ilError) + if resp == nil && !hasIssuing { + continue + } + + c.RevokeLeaf(t, b, s, knownKeys, knownCerts, resp, hasCRL, areDefault) + } +} + +// Stable ordering +func ensureStableOrderingOfChains(t testing.TB, b *backend, s logical.Storage, knownKeys map[string]string, knownCerts map[string]string) { + // Start by fetching all chains + certChains := make(map[string][]string) + for issuer := range knownCerts { + resp, err := CBRead(b, s, "issuer/"+issuer) + if err != nil { + t.Fatalf("failed to get chain for issuer (%v): %v", issuer, err) + } + + rawCurrentChain := resp.Data["ca_chain"].([]string) + var currentChain []string + for _, entry := range rawCurrentChain { + currentChain = append(currentChain, strings.TrimSpace(entry)) + } + + certChains[issuer] = currentChain + } + + // Now, generate a bunch of arbitrary roots and validate the chain is + // consistent. + var runs []time.Duration + for i := 0; i < 10; i++ { + name := "stable-order-root-" + strconv.Itoa(i) + step := CBGenerateRoot{ + Key: name, + Name: name, + } + step.Run(t, b, s, make(map[string]string), make(map[string]string)) + + before := time.Now() + _, err := CBDelete(b, s, "issuer/"+name) + if err != nil { + t.Fatalf("failed to delete temporary testing issuer %v: %v", name, err) + } + after := time.Now() + elapsed := after.Sub(before) + runs = append(runs, elapsed) + + for issuer := range knownCerts { + resp, err := CBRead(b, s, "issuer/"+issuer) + if err != nil { + t.Fatalf("failed to get chain for issuer (%v): %v", issuer, err) + } + + rawCurrentChain := resp.Data["ca_chain"].([]string) + for index, entry := range rawCurrentChain { + if strings.TrimSpace(entry) != certChains[issuer][index] { + t.Fatalf("iteration %d - chain for issuer %v differed at index %d\n%v\nvs\n%v", i, issuer, index, entry, certChains[issuer][index]) + } + } + } + } + + min := runs[0] + max := runs[0] + var avg time.Duration + for _, run := range runs { + if run < min { + min = run + } + + if run > max { + max = run + } + + avg += run + } + avg = avg / time.Duration(len(runs)) + + t.Logf("Chain building run time (deletion) - min: %v / avg: %v / max: %v - entries: %v", min, avg, max, runs) +} + +type CBTestStep interface { + Run(t testing.TB, b *backend, s logical.Storage, knownKeys map[string]string, knownCerts map[string]string) +} + +type CBTestScenario struct { + Steps []CBTestStep +} + +var chainBuildingTestCases = []CBTestScenario{ + { + // This test builds up two cliques lined by a cycle, dropping into + // a single intermediate. + Steps: []CBTestStep{ + // Create a reissued certificate using the same key. These + // should validate themselves. + CBGenerateRoot{ + Key: "key-root-old", + Name: "root-old-a", + CommonName: "root-old", + }, + CBValidateChain{ + Chains: map[string][]string{ + "root-old-a": {"self"}, + }, + }, + // After adding the second root using the same key and common + // name, there should now be two certs in each chain. + CBGenerateRoot{ + Key: "key-root-old", + Existing: true, + Name: "root-old-b", + CommonName: "root-old", + }, + CBValidateChain{ + Chains: map[string][]string{ + "root-old-a": {"self", "root-old-b"}, + "root-old-b": {"self", "root-old-a"}, + }, + }, + // After adding a third root, there are now two possibilities for + // each later chain entry. + CBGenerateRoot{ + Key: "key-root-old", + Existing: true, + Name: "root-old-c", + CommonName: "root-old", + }, + CBValidateChain{ + Chains: map[string][]string{ + "root-old-a": {"self", "root-old-bc", "root-old-bc"}, + "root-old-b": {"self", "root-old-ac", "root-old-ac"}, + "root-old-c": {"self", "root-old-ab", "root-old-ab"}, + }, + Aliases: map[string]string{ + "root-old-ac": "root-old-a,root-old-c", + "root-old-ab": "root-old-a,root-old-b", + "root-old-bc": "root-old-b,root-old-c", + }, + }, + // If we generate an unrelated issuer, it shouldn't affect either + // chain. + CBGenerateRoot{ + Key: "key-root-new", + Name: "root-new-a", + CommonName: "root-new", + }, + CBValidateChain{ + Chains: map[string][]string{ + "root-old-a": {"self", "root-old-bc", "root-old-bc"}, + "root-old-b": {"self", "root-old-ac", "root-old-ac"}, + "root-old-c": {"self", "root-old-ab", "root-old-ab"}, + "root-new-a": {"self"}, + }, + Aliases: map[string]string{ + "root-old-ac": "root-old-a,root-old-c", + "root-old-ab": "root-old-a,root-old-b", + "root-old-bc": "root-old-b,root-old-c", + }, + }, + // Reissuing this new root should form another clique. + CBGenerateRoot{ + Key: "key-root-new", + Existing: true, + Name: "root-new-b", + CommonName: "root-new", + }, + CBValidateChain{ + Chains: map[string][]string{ + "root-old-a": {"self", "root-old-bc", "root-old-bc"}, + "root-old-b": {"self", "root-old-ac", "root-old-ac"}, + "root-old-c": {"self", "root-old-ab", "root-old-ab"}, + "root-new-a": {"self", "root-new-b"}, + "root-new-b": {"self", "root-new-a"}, + }, + Aliases: map[string]string{ + "root-old-ac": "root-old-a,root-old-c", + "root-old-ab": "root-old-a,root-old-b", + "root-old-bc": "root-old-b,root-old-c", + }, + }, + // Generating a cross-signed cert from old->new should result + // in all old clique certs showing up in the new root's paths. + // This does not form a cycle. + CBGenerateIntermediate{ + // In order to validate the existing root-new clique, we + // have to reuse the key and common name here for + // cross-signing. + Key: "key-root-new", + Existing: true, + Name: "cross-old-new", + CommonName: "root-new", + SKID: "root-new-a", + // Which old issuer is used here doesn't matter as they have + // the same CN and key. + Parent: "root-old-a", + }, + CBValidateChain{ + Chains: map[string][]string{ + "root-old-a": {"self", "root-old-bc", "root-old-bc"}, + "root-old-b": {"self", "root-old-ac", "root-old-ac"}, + "root-old-c": {"self", "root-old-ab", "root-old-ab"}, + "cross-old-new": {"self", "root-old-abc", "root-old-abc", "root-old-abc"}, + "root-new-a": {"self", "root-new-b", "cross-old-new", "root-old-abc", "root-old-abc", "root-old-abc"}, + "root-new-b": {"self", "root-new-a", "cross-old-new", "root-old-abc", "root-old-abc", "root-old-abc"}, + }, + Aliases: map[string]string{ + "root-old-ac": "root-old-a,root-old-c", + "root-old-ab": "root-old-a,root-old-b", + "root-old-bc": "root-old-b,root-old-c", + "root-old-abc": "root-old-a,root-old-b,root-old-c", + }, + }, + // If we create a new intermediate off of the root-new, we should + // simply add to the existing chain. + CBGenerateIntermediate{ + Key: "key-inter-a-root-new", + Name: "inter-a-root-new", + Parent: "root-new-a", + }, + CBValidateChain{ + Chains: map[string][]string{ + "root-old-a": {"self", "root-old-bc", "root-old-bc"}, + "root-old-b": {"self", "root-old-ac", "root-old-ac"}, + "root-old-c": {"self", "root-old-ab", "root-old-ab"}, + "cross-old-new": {"self", "root-old-abc", "root-old-abc", "root-old-abc"}, + "root-new-a": {"self", "root-new-b", "cross-old-new", "root-old-abc", "root-old-abc", "root-old-abc"}, + "root-new-b": {"self", "root-new-a", "cross-old-new", "root-old-abc", "root-old-abc", "root-old-abc"}, + // If we find cross-old-new first, the old clique will be ahead + // of the new clique; otherwise the new clique will appear first. + "inter-a-root-new": {"self", "full-cycle", "full-cycle", "full-cycle", "full-cycle", "full-cycle", "full-cycle"}, + }, + Aliases: map[string]string{ + "root-old-ac": "root-old-a,root-old-c", + "root-old-ab": "root-old-a,root-old-b", + "root-old-bc": "root-old-b,root-old-c", + "root-old-abc": "root-old-a,root-old-b,root-old-c", + "full-cycle": "root-old-a,root-old-b,root-old-c,cross-old-new,root-new-a,root-new-b", + }, + }, + // Now, if we cross-sign back from new to old, we should + // form cycle with multiple reissued cliques. This means + // all nodes will have the same chain. + CBGenerateIntermediate{ + // In order to validate the existing root-old clique, we + // have to reuse the key and common name here for + // cross-signing. + Key: "key-root-old", + Existing: true, + Name: "cross-new-old", + CommonName: "root-old", + SKID: "root-old-a", + // Which new issuer is used here doesn't matter as they have + // the same CN and key. + Parent: "root-new-a", + }, + CBValidateChain{ + Chains: map[string][]string{ + "root-old-a": {"self", "root-old-bc", "root-old-bc", "both-cross-old-new", "both-cross-old-new", "root-new-ab", "root-new-ab"}, + "root-old-b": {"self", "root-old-ac", "root-old-ac", "both-cross-old-new", "both-cross-old-new", "root-new-ab", "root-new-ab"}, + "root-old-c": {"self", "root-old-ab", "root-old-ab", "both-cross-old-new", "both-cross-old-new", "root-new-ab", "root-new-ab"}, + "cross-old-new": {"self", "cross-new-old", "both-cliques", "both-cliques", "both-cliques", "both-cliques", "both-cliques"}, + "cross-new-old": {"self", "cross-old-new", "both-cliques", "both-cliques", "both-cliques", "both-cliques", "both-cliques"}, + "root-new-a": {"self", "root-new-b", "both-cross-old-new", "both-cross-old-new", "root-old-abc", "root-old-abc", "root-old-abc"}, + "root-new-b": {"self", "root-new-a", "both-cross-old-new", "both-cross-old-new", "root-old-abc", "root-old-abc", "root-old-abc"}, + "inter-a-root-new": {"self", "full-cycle", "full-cycle", "full-cycle", "full-cycle", "full-cycle", "full-cycle", "full-cycle"}, + }, + Aliases: map[string]string{ + "root-old-ac": "root-old-a,root-old-c", + "root-old-ab": "root-old-a,root-old-b", + "root-old-bc": "root-old-b,root-old-c", + "root-old-abc": "root-old-a,root-old-b,root-old-c", + "root-new-ab": "root-new-a,root-new-b", + "both-cross-old-new": "cross-old-new,cross-new-old", + "both-cliques": "root-old-a,root-old-b,root-old-c,root-new-a,root-new-b", + "full-cycle": "root-old-a,root-old-b,root-old-c,cross-old-new,cross-new-old,root-new-a,root-new-b", + }, + }, + // Update each old root to only include itself. + CBUpdateIssuer{ + Name: "root-old-a", + CAChain: []string{"root-old-a"}, + }, + CBUpdateIssuer{ + Name: "root-old-b", + CAChain: []string{"root-old-b"}, + }, + CBUpdateIssuer{ + Name: "root-old-c", + CAChain: []string{"root-old-c"}, + }, + // Step 19 + CBValidateChain{ + Chains: map[string][]string{ + "root-old-a": {"self"}, + "root-old-b": {"self"}, + "root-old-c": {"self"}, + "cross-old-new": {"self", "cross-new-old", "both-cliques", "both-cliques", "both-cliques", "both-cliques", "both-cliques"}, + "cross-new-old": {"self", "cross-old-new", "both-cliques", "both-cliques", "both-cliques", "both-cliques", "both-cliques"}, + "root-new-a": {"self", "root-new-b", "both-cross-old-new", "both-cross-old-new", "root-old-abc", "root-old-abc", "root-old-abc"}, + "root-new-b": {"self", "root-new-a", "both-cross-old-new", "both-cross-old-new", "root-old-abc", "root-old-abc", "root-old-abc"}, + "inter-a-root-new": {"self", "full-cycle", "full-cycle", "full-cycle", "full-cycle", "full-cycle", "full-cycle", "full-cycle"}, + }, + Aliases: map[string]string{ + "root-old-ac": "root-old-a,root-old-c", + "root-old-ab": "root-old-a,root-old-b", + "root-old-bc": "root-old-b,root-old-c", + "root-old-abc": "root-old-a,root-old-b,root-old-c", + "root-new-ab": "root-new-a,root-new-b", + "both-cross-old-new": "cross-old-new,cross-new-old", + "both-cliques": "root-old-a,root-old-b,root-old-c,root-new-a,root-new-b", + "full-cycle": "root-old-a,root-old-b,root-old-c,cross-old-new,cross-new-old,root-new-a,root-new-b", + }, + }, + // Reset the old roots; should get the original chains back. + CBUpdateIssuer{ + Name: "root-old-a", + }, + CBUpdateIssuer{ + Name: "root-old-b", + }, + CBUpdateIssuer{ + Name: "root-old-c", + }, + CBValidateChain{ + Chains: map[string][]string{ + "root-old-a": {"self", "root-old-bc", "root-old-bc", "both-cross-old-new", "both-cross-old-new", "root-new-ab", "root-new-ab"}, + "root-old-b": {"self", "root-old-ac", "root-old-ac", "both-cross-old-new", "both-cross-old-new", "root-new-ab", "root-new-ab"}, + "root-old-c": {"self", "root-old-ab", "root-old-ab", "both-cross-old-new", "both-cross-old-new", "root-new-ab", "root-new-ab"}, + "cross-old-new": {"self", "cross-new-old", "both-cliques", "both-cliques", "both-cliques", "both-cliques", "both-cliques"}, + "cross-new-old": {"self", "cross-old-new", "both-cliques", "both-cliques", "both-cliques", "both-cliques", "both-cliques"}, + "root-new-a": {"self", "root-new-b", "both-cross-old-new", "both-cross-old-new", "root-old-abc", "root-old-abc", "root-old-abc"}, + "root-new-b": {"self", "root-new-a", "both-cross-old-new", "both-cross-old-new", "root-old-abc", "root-old-abc", "root-old-abc"}, + "inter-a-root-new": {"self", "full-cycle", "full-cycle", "full-cycle", "full-cycle", "full-cycle", "full-cycle", "full-cycle"}, + }, + Aliases: map[string]string{ + "root-old-ac": "root-old-a,root-old-c", + "root-old-ab": "root-old-a,root-old-b", + "root-old-bc": "root-old-b,root-old-c", + "root-old-abc": "root-old-a,root-old-b,root-old-c", + "root-new-ab": "root-new-a,root-new-b", + "both-cross-old-new": "cross-old-new,cross-new-old", + "both-cliques": "root-old-a,root-old-b,root-old-c,root-new-a,root-new-b", + "full-cycle": "root-old-a,root-old-b,root-old-c,cross-old-new,cross-new-old,root-new-a,root-new-b", + }, + }, + CBIssueLeaf{Issuer: "root-old-a"}, + CBIssueLeaf{Issuer: "root-old-b"}, + CBIssueLeaf{Issuer: "root-old-c"}, + CBIssueLeaf{Issuer: "cross-old-new"}, + CBIssueLeaf{Issuer: "cross-new-old"}, + CBIssueLeaf{Issuer: "root-new-a"}, + CBIssueLeaf{Issuer: "root-new-b"}, + CBIssueLeaf{Issuer: "inter-a-root-new"}, + }, + }, + { + // Here we're testing our chain capacity. First we'll create a + // bunch of unique roots to form a cycle of length 10. + Steps: []CBTestStep{ + CBGenerateRoot{ + Key: "key-root-a", + Name: "root-a", + CommonName: "root-a", + }, + CBGenerateRoot{ + Key: "key-root-b", + Name: "root-b", + CommonName: "root-b", + }, + CBGenerateRoot{ + Key: "key-root-c", + Name: "root-c", + CommonName: "root-c", + }, + CBGenerateRoot{ + Key: "key-root-d", + Name: "root-d", + CommonName: "root-d", + }, + CBGenerateRoot{ + Key: "key-root-e", + Name: "root-e", + CommonName: "root-e", + }, + // They should all be disjoint to start. + CBValidateChain{ + Chains: map[string][]string{ + "root-a": {"self"}, + "root-b": {"self"}, + "root-c": {"self"}, + "root-d": {"self"}, + "root-e": {"self"}, + }, + }, + // Start the cross-signing chains. These are all linear, so there's + // no error expected; they're just long. + CBGenerateIntermediate{ + Key: "key-root-b", + Existing: true, + Name: "cross-a-b", + CommonName: "root-b", + Parent: "root-a", + }, + CBValidateChain{ + Chains: map[string][]string{ + "root-a": {"self"}, + "cross-a-b": {"self", "root-a"}, + "root-b": {"self", "cross-a-b", "root-a"}, + "root-c": {"self"}, + "root-d": {"self"}, + "root-e": {"self"}, + }, + }, + CBGenerateIntermediate{ + Key: "key-root-c", + Existing: true, + Name: "cross-b-c", + CommonName: "root-c", + Parent: "root-b", + }, + CBValidateChain{ + Chains: map[string][]string{ + "root-a": {"self"}, + "cross-a-b": {"self", "root-a"}, + "root-b": {"self", "cross-a-b", "root-a"}, + "cross-b-c": {"self", "b-or-cross", "b-chained-cross", "b-chained-cross"}, + "root-c": {"self", "cross-b-c", "b-or-cross", "b-chained-cross", "b-chained-cross"}, + "root-d": {"self"}, + "root-e": {"self"}, + }, + Aliases: map[string]string{ + "b-or-cross": "root-b,cross-a-b", + "b-chained-cross": "root-b,cross-a-b,root-a", + }, + }, + CBGenerateIntermediate{ + Key: "key-root-d", + Existing: true, + Name: "cross-c-d", + CommonName: "root-d", + Parent: "root-c", + }, + CBValidateChain{ + Chains: map[string][]string{ + "root-a": {"self"}, + "cross-a-b": {"self", "root-a"}, + "root-b": {"self", "cross-a-b", "root-a"}, + "cross-b-c": {"self", "b-or-cross", "b-chained-cross", "b-chained-cross"}, + "root-c": {"self", "cross-b-c", "b-or-cross", "b-chained-cross", "b-chained-cross"}, + "cross-c-d": {"self", "c-or-cross", "c-chained-cross", "c-chained-cross", "c-chained-cross", "c-chained-cross"}, + "root-d": {"self", "cross-c-d", "c-or-cross", "c-chained-cross", "c-chained-cross", "c-chained-cross", "c-chained-cross"}, + "root-e": {"self"}, + }, + Aliases: map[string]string{ + "b-or-cross": "root-b,cross-a-b", + "b-chained-cross": "root-b,cross-a-b,root-a", + "c-or-cross": "root-c,cross-b-c", + "c-chained-cross": "root-c,cross-b-c,root-b,cross-a-b,root-a", + }, + }, + CBGenerateIntermediate{ + Key: "key-root-e", + Existing: true, + Name: "cross-d-e", + CommonName: "root-e", + Parent: "root-d", + }, + CBValidateChain{ + Chains: map[string][]string{ + "root-a": {"self"}, + "cross-a-b": {"self", "root-a"}, + "root-b": {"self", "cross-a-b", "root-a"}, + "cross-b-c": {"self", "b-or-cross", "b-chained-cross", "b-chained-cross"}, + "root-c": {"self", "cross-b-c", "b-or-cross", "b-chained-cross", "b-chained-cross"}, + "cross-c-d": {"self", "c-or-cross", "c-chained-cross", "c-chained-cross", "c-chained-cross", "c-chained-cross"}, + "root-d": {"self", "cross-c-d", "c-or-cross", "c-chained-cross", "c-chained-cross", "c-chained-cross", "c-chained-cross"}, + "cross-d-e": {"self", "d-or-cross", "d-chained-cross", "d-chained-cross", "d-chained-cross", "d-chained-cross", "d-chained-cross", "d-chained-cross"}, + "root-e": {"self", "cross-d-e", "d-or-cross", "d-chained-cross", "d-chained-cross", "d-chained-cross", "d-chained-cross", "d-chained-cross", "d-chained-cross"}, + }, + Aliases: map[string]string{ + "b-or-cross": "root-b,cross-a-b", + "b-chained-cross": "root-b,cross-a-b,root-a", + "c-or-cross": "root-c,cross-b-c", + "c-chained-cross": "root-c,cross-b-c,root-b,cross-a-b,root-a", + "d-or-cross": "root-d,cross-c-d", + "d-chained-cross": "root-d,cross-c-d,root-c,cross-b-c,root-b,cross-a-b,root-a", + }, + }, + CBIssueLeaf{Issuer: "root-a"}, + CBIssueLeaf{Issuer: "cross-a-b"}, + CBIssueLeaf{Issuer: "root-b"}, + CBIssueLeaf{Issuer: "cross-b-c"}, + CBIssueLeaf{Issuer: "root-c"}, + CBIssueLeaf{Issuer: "cross-c-d"}, + CBIssueLeaf{Issuer: "root-d"}, + CBIssueLeaf{Issuer: "cross-d-e"}, + CBIssueLeaf{Issuer: "root-e"}, + // Importing the new e->a cross fails because the cycle + // it builds is too long. + CBGenerateIntermediate{ + Key: "key-root-a", + Existing: true, + Name: "cross-e-a", + CommonName: "root-a", + Parent: "root-e", + ImportErrorMessage: "exceeds max size", + }, + // Deleting any root and one of its crosses (either a->b or b->c) + // should fix this. + CBDeleteIssuer{"root-b"}, + CBDeleteIssuer{"cross-b-c"}, + // Importing the new e->a cross fails because the cycle + // it builds is too long. + CBGenerateIntermediate{ + Key: "key-root-a", + Existing: true, + Name: "cross-e-a", + CommonName: "root-a", + Parent: "root-e", + }, + CBIssueLeaf{Issuer: "root-a"}, + CBIssueLeaf{Issuer: "cross-a-b"}, + CBIssueLeaf{Issuer: "root-c"}, + CBIssueLeaf{Issuer: "cross-c-d"}, + CBIssueLeaf{Issuer: "root-d"}, + CBIssueLeaf{Issuer: "cross-d-e"}, + CBIssueLeaf{Issuer: "root-e"}, + CBIssueLeaf{Issuer: "cross-e-a"}, + }, + }, + { + // Here we're testing our clique capacity. First we'll create a + // bunch of unique roots to form a cycle of length 10. + Steps: []CBTestStep{ + CBGenerateRoot{ + Key: "key-root", + Name: "root-a", + CommonName: "root", + }, + CBGenerateRoot{ + Key: "key-root", + Existing: true, + Name: "root-b", + CommonName: "root", + }, + CBGenerateRoot{ + Key: "key-root", + Existing: true, + Name: "root-c", + CommonName: "root", + }, + CBGenerateRoot{ + Key: "key-root", + Existing: true, + Name: "root-d", + CommonName: "root", + }, + CBGenerateRoot{ + Key: "key-root", + Existing: true, + Name: "root-e", + CommonName: "root", + }, + CBGenerateRoot{ + Key: "key-root", + Existing: true, + Name: "root-f", + CommonName: "root", + }, + CBIssueLeaf{Issuer: "root-a"}, + CBIssueLeaf{Issuer: "root-b"}, + CBIssueLeaf{Issuer: "root-c"}, + CBIssueLeaf{Issuer: "root-d"}, + CBIssueLeaf{Issuer: "root-e"}, + CBIssueLeaf{Issuer: "root-f"}, + // Seventh reissuance fails. + CBGenerateRoot{ + Key: "key-root", + Existing: true, + Name: "root-g", + CommonName: "root", + ErrorMessage: "excessively reissued certificate", + }, + // Deleting one and trying again should succeed. + CBDeleteIssuer{"root-a"}, + CBGenerateRoot{ + Key: "key-root", + Existing: true, + Name: "root-g", + CommonName: "root", + }, + CBIssueLeaf{Issuer: "root-b"}, + CBIssueLeaf{Issuer: "root-c"}, + CBIssueLeaf{Issuer: "root-d"}, + CBIssueLeaf{Issuer: "root-e"}, + CBIssueLeaf{Issuer: "root-f"}, + CBIssueLeaf{Issuer: "root-g"}, + }, + }, + { + // There's one more pathological case here: we have a cycle + // which validates a clique/cycle via cross-signing. We call + // the parent cycle new roots and the child cycle/clique the + // old roots. + Steps: []CBTestStep{ + // New Cycle + CBGenerateRoot{ + Key: "key-root-new-a", + Name: "root-new-a", + }, + CBGenerateRoot{ + Key: "key-root-new-b", + Name: "root-new-b", + }, + CBGenerateIntermediate{ + Key: "key-root-new-b", + Existing: true, + Name: "cross-root-new-b-sig-a", + CommonName: "root-new-b", + Parent: "root-new-a", + }, + CBGenerateIntermediate{ + Key: "key-root-new-a", + Existing: true, + Name: "cross-root-new-a-sig-b", + CommonName: "root-new-a", + Parent: "root-new-b", + }, + // Old Cycle + Clique + CBGenerateRoot{ + Key: "key-root-old-a", + Name: "root-old-a", + }, + CBGenerateRoot{ + Key: "key-root-old-a", + Existing: true, + Name: "root-old-a-reissued", + CommonName: "root-old-a", + }, + CBGenerateRoot{ + Key: "key-root-old-b", + Name: "root-old-b", + }, + CBGenerateRoot{ + Key: "key-root-old-b", + Existing: true, + Name: "root-old-b-reissued", + CommonName: "root-old-b", + }, + CBGenerateIntermediate{ + Key: "key-root-old-b", + Existing: true, + Name: "cross-root-old-b-sig-a", + CommonName: "root-old-b", + Parent: "root-old-a", + }, + CBGenerateIntermediate{ + Key: "key-root-old-a", + Existing: true, + Name: "cross-root-old-a-sig-b", + CommonName: "root-old-a", + Parent: "root-old-b", + }, + // Validate the chains are separate before linking them. + CBValidateChain{ + Chains: map[string][]string{ + // New stuff + "root-new-a": {"self", "cross-root-new-a-sig-b", "root-new-b-or-cross", "root-new-b-or-cross"}, + "root-new-b": {"self", "cross-root-new-b-sig-a", "root-new-a-or-cross", "root-new-a-or-cross"}, + "cross-root-new-b-sig-a": {"self", "any-root-new", "any-root-new", "any-root-new"}, + "cross-root-new-a-sig-b": {"self", "any-root-new", "any-root-new", "any-root-new"}, + + // Old stuff + "root-old-a": {"self", "root-old-a-reissued", "cross-root-old-a-sig-b", "cross-root-old-b-sig-a", "both-root-old-b", "both-root-old-b"}, + "root-old-a-reissued": {"self", "root-old-a", "cross-root-old-a-sig-b", "cross-root-old-b-sig-a", "both-root-old-b", "both-root-old-b"}, + "root-old-b": {"self", "root-old-b-reissued", "cross-root-old-b-sig-a", "cross-root-old-a-sig-b", "both-root-old-a", "both-root-old-a"}, + "root-old-b-reissued": {"self", "root-old-b", "cross-root-old-b-sig-a", "cross-root-old-a-sig-b", "both-root-old-a", "both-root-old-a"}, + "cross-root-old-b-sig-a": {"self", "all-root-old", "all-root-old", "all-root-old", "all-root-old", "all-root-old"}, + "cross-root-old-a-sig-b": {"self", "all-root-old", "all-root-old", "all-root-old", "all-root-old", "all-root-old"}, + }, + Aliases: map[string]string{ + "root-new-a-or-cross": "root-new-a,cross-root-new-a-sig-b", + "root-new-b-or-cross": "root-new-b,cross-root-new-b-sig-a", + "both-root-new": "root-new-a,root-new-b", + "any-root-new": "root-new-a,cross-root-new-a-sig-b,root-new-b,cross-root-new-b-sig-a", + "both-root-old-a": "root-old-a,root-old-a-reissued", + "both-root-old-b": "root-old-b,root-old-b-reissued", + "all-root-old": "root-old-a,root-old-a-reissued,root-old-b,root-old-b-reissued,cross-root-old-b-sig-a,cross-root-old-a-sig-b", + }, + }, + // Finally, generate an intermediate to link new->old. We + // link root-new-a into root-old-a. + CBGenerateIntermediate{ + Key: "key-root-old-a", + Existing: true, + Name: "cross-root-old-a-sig-root-new-a", + CommonName: "root-old-a", + Parent: "root-new-a", + }, + CBValidateChain{ + Chains: map[string][]string{ + // New stuff should be unchanged. + "root-new-a": {"self", "cross-root-new-a-sig-b", "root-new-b-or-cross", "root-new-b-or-cross"}, + "root-new-b": {"self", "cross-root-new-b-sig-a", "root-new-a-or-cross", "root-new-a-or-cross"}, + "cross-root-new-b-sig-a": {"self", "any-root-new", "any-root-new", "any-root-new"}, + "cross-root-new-a-sig-b": {"self", "any-root-new", "any-root-new", "any-root-new"}, + + // Old stuff + "root-old-a": {"self", "root-old-a-reissued", "cross-root-old-a-sig-b", "cross-root-old-b-sig-a", "both-root-old-b", "both-root-old-b", "cross-root-old-a-sig-root-new-a", "any-root-new", "any-root-new", "any-root-new", "any-root-new"}, + "root-old-a-reissued": {"self", "root-old-a", "cross-root-old-a-sig-b", "cross-root-old-b-sig-a", "both-root-old-b", "both-root-old-b", "cross-root-old-a-sig-root-new-a", "any-root-new", "any-root-new", "any-root-new", "any-root-new"}, + "root-old-b": {"self", "root-old-b-reissued", "cross-root-old-b-sig-a", "cross-root-old-a-sig-b", "both-root-old-a", "both-root-old-a", "cross-root-old-a-sig-root-new-a", "any-root-new", "any-root-new", "any-root-new", "any-root-new"}, + "root-old-b-reissued": {"self", "root-old-b", "cross-root-old-b-sig-a", "cross-root-old-a-sig-b", "both-root-old-a", "both-root-old-a", "cross-root-old-a-sig-root-new-a", "any-root-new", "any-root-new", "any-root-new", "any-root-new"}, + "cross-root-old-b-sig-a": {"self", "all-root-old", "all-root-old", "all-root-old", "all-root-old", "all-root-old", "cross-root-old-a-sig-root-new-a", "any-root-new", "any-root-new", "any-root-new", "any-root-new"}, + "cross-root-old-a-sig-b": {"self", "all-root-old", "all-root-old", "all-root-old", "all-root-old", "all-root-old", "cross-root-old-a-sig-root-new-a", "any-root-new", "any-root-new", "any-root-new", "any-root-new"}, + + // Link + "cross-root-old-a-sig-root-new-a": {"self", "root-new-a-or-cross", "any-root-new", "any-root-new", "any-root-new"}, + }, + Aliases: map[string]string{ + "root-new-a-or-cross": "root-new-a,cross-root-new-a-sig-b", + "root-new-b-or-cross": "root-new-b,cross-root-new-b-sig-a", + "both-root-new": "root-new-a,root-new-b", + "any-root-new": "root-new-a,cross-root-new-a-sig-b,root-new-b,cross-root-new-b-sig-a", + "both-root-old-a": "root-old-a,root-old-a-reissued", + "both-root-old-b": "root-old-b,root-old-b-reissued", + "all-root-old": "root-old-a,root-old-a-reissued,root-old-b,root-old-b-reissued,cross-root-old-b-sig-a,cross-root-old-a-sig-b", + }, + }, + CBIssueLeaf{Issuer: "root-new-a"}, + CBIssueLeaf{Issuer: "root-new-b"}, + CBIssueLeaf{Issuer: "cross-root-new-b-sig-a"}, + CBIssueLeaf{Issuer: "cross-root-new-a-sig-b"}, + CBIssueLeaf{Issuer: "root-old-a"}, + CBIssueLeaf{Issuer: "root-old-a-reissued"}, + CBIssueLeaf{Issuer: "root-old-b"}, + CBIssueLeaf{Issuer: "root-old-b-reissued"}, + CBIssueLeaf{Issuer: "cross-root-old-b-sig-a"}, + CBIssueLeaf{Issuer: "cross-root-old-a-sig-b"}, + CBIssueLeaf{Issuer: "cross-root-old-a-sig-root-new-a"}, + }, + }, + { + // Test a dual-root of trust chaining example with different + // lengths of chains. + Steps: []CBTestStep{ + CBGenerateRoot{ + Key: "key-root-new", + Name: "root-new", + }, + CBGenerateIntermediate{ + Key: "key-inter-new", + Name: "inter-new", + Parent: "root-new", + }, + CBGenerateRoot{ + Key: "key-root-old", + Name: "root-old", + }, + CBGenerateIntermediate{ + Key: "key-inter-old-a", + Name: "inter-old-a", + Parent: "root-old", + }, + CBGenerateIntermediate{ + Key: "key-inter-old-b", + Name: "inter-old-b", + Parent: "inter-old-a", + }, + // Now generate a cross-signed intermediate to merge these + // two chains. + CBGenerateIntermediate{ + Key: "key-cross-old-new", + Name: "cross-old-new-signed-new", + CommonName: "cross-old-new", + Parent: "inter-new", + }, + CBGenerateIntermediate{ + Key: "key-cross-old-new", + Existing: true, + Name: "cross-old-new-signed-old", + CommonName: "cross-old-new", + Parent: "inter-old-b", + }, + CBGenerateIntermediate{ + Key: "key-leaf-inter", + Name: "leaf-inter", + Parent: "cross-old-new-signed-new", + }, + CBValidateChain{ + Chains: map[string][]string{ + "root-new": {"self"}, + "inter-new": {"self", "root-new"}, + "cross-old-new-signed-new": {"self", "inter-new", "root-new"}, + "root-old": {"self"}, + "inter-old-a": {"self", "root-old"}, + "inter-old-b": {"self", "inter-old-a", "root-old"}, + "cross-old-new-signed-old": {"self", "inter-old-b", "inter-old-a", "root-old"}, + "leaf-inter": {"self", "either-cross", "one-intermediate", "other-inter-or-root", "everything-else", "everything-else", "everything-else", "everything-else"}, + }, + Aliases: map[string]string{ + "either-cross": "cross-old-new-signed-new,cross-old-new-signed-old", + "one-intermediate": "inter-new,inter-old-b", + "other-inter-or-root": "root-new,inter-old-a", + "everything-else": "cross-old-new-signed-new,cross-old-new-signed-old,inter-new,inter-old-b,root-new,inter-old-a,root-old", + }, + }, + CBIssueLeaf{Issuer: "root-new"}, + CBIssueLeaf{Issuer: "inter-new"}, + CBIssueLeaf{Issuer: "root-old"}, + CBIssueLeaf{Issuer: "inter-old-a"}, + CBIssueLeaf{Issuer: "inter-old-b"}, + CBIssueLeaf{Issuer: "cross-old-new-signed-new"}, + CBIssueLeaf{Issuer: "cross-old-new-signed-old"}, + CBIssueLeaf{Issuer: "leaf-inter"}, + }, + }, + { + // Test just a single root. + Steps: []CBTestStep{ + CBGenerateRoot{ + Key: "key-root", + Name: "root", + }, + CBValidateChain{ + Chains: map[string][]string{ + "root": {"self"}, + }, + }, + CBIssueLeaf{Issuer: "root"}, + }, + }, + { + // Test root + intermediate. + Steps: []CBTestStep{ + CBGenerateRoot{ + Key: "key-root", + Name: "root", + }, + CBGenerateIntermediate{ + Key: "key-inter", + Name: "inter", + Parent: "root", + }, + CBValidateChain{ + Chains: map[string][]string{ + "root": {"self"}, + "inter": {"self", "root"}, + }, + }, + CBIssueLeaf{Issuer: "root"}, + CBIssueLeaf{Issuer: "inter"}, + }, + }, + { + // Test root + intermediate, twice (simulating rotation without + // chaining). + Steps: []CBTestStep{ + CBGenerateRoot{ + Key: "key-root-a", + Name: "root-a", + }, + CBGenerateIntermediate{ + Key: "key-inter-a", + Name: "inter-a", + Parent: "root-a", + }, + CBGenerateRoot{ + Key: "key-root-b", + Name: "root-b", + }, + CBGenerateIntermediate{ + Key: "key-inter-b", + Name: "inter-b", + Parent: "root-b", + }, + CBValidateChain{ + Chains: map[string][]string{ + "root-a": {"self"}, + "inter-a": {"self", "root-a"}, + "root-b": {"self"}, + "inter-b": {"self", "root-b"}, + }, + }, + CBIssueLeaf{Issuer: "root-a"}, + CBIssueLeaf{Issuer: "inter-a"}, + CBIssueLeaf{Issuer: "root-b"}, + CBIssueLeaf{Issuer: "inter-b"}, + }, + }, + { + // Test root + intermediate, twice, chained a->b. + Steps: []CBTestStep{ + CBGenerateRoot{ + Key: "key-root-a", + Name: "root-a", + }, + CBGenerateIntermediate{ + Key: "key-inter-a", + Name: "inter-a", + Parent: "root-a", + }, + CBGenerateRoot{ + Key: "key-root-b", + Name: "root-b", + }, + CBGenerateIntermediate{ + Key: "key-inter-b", + Name: "inter-b", + Parent: "root-b", + }, + CBGenerateIntermediate{ + Key: "key-root-b", + Existing: true, + Name: "cross-a-b", + CommonName: "root-b", + Parent: "root-a", + }, + CBValidateChain{ + Chains: map[string][]string{ + "root-a": {"self"}, + "inter-a": {"self", "root-a"}, + "root-b": {"self", "cross-a-b", "root-a"}, + "inter-b": {"self", "root-b", "cross-a-b", "root-a"}, + "cross-a-b": {"self", "root-a"}, + }, + }, + CBIssueLeaf{Issuer: "root-a"}, + CBIssueLeaf{Issuer: "inter-a"}, + CBIssueLeaf{Issuer: "root-b"}, + CBIssueLeaf{Issuer: "inter-b"}, + CBIssueLeaf{Issuer: "cross-a-b"}, + }, + }, +} + +func Test_CAChainBuilding(t *testing.T) { + t.Parallel() + for testIndex, testCase := range chainBuildingTestCases { + b, s := CreateBackendWithStorage(t) + + knownKeys := make(map[string]string) + knownCerts := make(map[string]string) + for stepIndex, testStep := range testCase.Steps { + t.Logf("Running %v / %v", testIndex, stepIndex) + testStep.Run(t, b, s, knownKeys, knownCerts) + } + + t.Logf("Checking stable ordering of chains...") + ensureStableOrderingOfChains(t, b, s, knownKeys, knownCerts) + } +} + +func BenchmarkChainBuilding(benchies *testing.B) { + for testIndex, testCase := range chainBuildingTestCases { + name := "test-case-" + strconv.Itoa(testIndex) + benchies.Run(name, func(bench *testing.B) { + // Stop the timer as we setup the infra and certs. + bench.StopTimer() + bench.ResetTimer() + + b, s := CreateBackendWithStorage(bench) + + knownKeys := make(map[string]string) + knownCerts := make(map[string]string) + for _, testStep := range testCase.Steps { + testStep.Run(bench, b, s, knownKeys, knownCerts) + } + + // Run the benchmark. + ctx := context.Background() + sc := b.makeStorageContext(ctx, s) + bench.StartTimer() + for n := 0; n < bench.N; n++ { + sc.rebuildIssuersChains(nil) + } + }) + } +} diff --git a/builtin/logical/pki/chain_util.go b/builtin/logical/pki/chain_util.go new file mode 100644 index 0000000..e884f07 --- /dev/null +++ b/builtin/logical/pki/chain_util.go @@ -0,0 +1,1383 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "bytes" + "crypto/x509" + "fmt" + "sort" + + "github.com/hashicorp/vault/sdk/helper/errutil" +) + +func prettyIssuer(issuerIdEntryMap map[issuerID]*issuerEntry, issuer issuerID) string { + if entry, ok := issuerIdEntryMap[issuer]; ok && len(entry.Name) > 0 { + return "[id:" + string(issuer) + "/name:" + entry.Name + "]" + } + + return "[" + string(issuer) + "]" +} + +func (sc *storageContext) rebuildIssuersChains(referenceCert *issuerEntry /* optional */) error { + // This function rebuilds the CAChain field of all known issuers. This + // function should usually be invoked when a new issuer is added to the + // pool of issuers. + // + // In addition to the context and storage, we take an optional + // referenceCert parameter -- an issuer certificate that we should write + // to storage once done, but which might not be persisted yet (either due + // to new values on it or due to it not yet existing in the list). This is + // helpful when calling e.g., importIssuer(...) (from storage.go), to allow + // the newly imported issuer to have its CAChain field computed, but + // without writing and re-reading it from storage (potentially failing in + // the process if chain building failed). + // + // Our contract guarantees that, if referenceCert is provided, we'll write + // it to storage. Further, we guarantee that (given the issuers haven't + // changed), the results will be stable on multiple calls to rebuild the + // chain. + // + // Note that at no point in time do we fetch the private keys associated + // with any issuers. It is sufficient to merely look at the issuers + // themselves. + // + // To begin, we fetch all known issuers from disk. + issuers, err := sc.listIssuers() + if err != nil { + return fmt.Errorf("unable to list issuers to build chain: %w", err) + } + + // Fast path: no issuers means we can set the reference cert's value, if + // provided, to itself. + if len(issuers) == 0 { + if referenceCert == nil { + // Nothing to do; no reference cert was provided. + return nil + } + + // Otherwise, the only entry in the chain (that we know about) is the + // certificate itself. + referenceCert.CAChain = []string{referenceCert.Certificate} + return sc.writeIssuer(referenceCert) + } + + // Our provided reference cert might not be in the list of issuers. In + // that case, add it manually. + if referenceCert != nil { + missing := true + for _, issuer := range issuers { + if issuer == referenceCert.ID { + missing = false + break + } + } + + if missing { + issuers = append(issuers, referenceCert.ID) + } + } + + // Now call a stable sorting algorithm here. We want to ensure the results + // are the same across multiple calls to rebuildIssuersChains with the same + // input data. + // + // Note: while we want to ensure referenceCert is written last (because it + // is the user-facing action), we need to balance this with always having + // a stable chain order, regardless of which certificate was chosen as the + // reference cert. (E.g., for a given collection of unchanging certificates, + // if we repeatedly set+unset a manual chain, triggering rebuilds, we should + // always have the same chain after each unset). Thus, delay the write of + // the referenceCert below when persisting -- but keep the sort AFTER the + // referenceCert was added to the list, not before. + // + // (Otherwise, if this is called with one existing issuer and one new + // reference cert, and the reference cert sorts before the existing + // issuer, we will sort this list and have persisted the new issuer + // first, and may fail on the subsequent write to the existing issuer. + // Alternatively, if we don't sort the issuers in this order and there's + // a parallel chain (where cert A is a child of both B and C, with + // C.ID < B.ID and C was passed in as the yet unwritten referenceCert), + // then we'll create a chain with order A -> B -> C on initial write (as + // A and B come from disk) but A -> C -> B on subsequent writes (when all + // certs come from disk). Thus the sort must be done after adding in the + // referenceCert, thus sorting it consistently, but its write must be + // singled out to occur last.) + sort.SliceStable(issuers, func(i, j int) bool { + return issuers[i] > issuers[j] + }) + + // We expect each of these maps to be the size of the number of issuers + // we have (as we're mapping from issuers to other values). + // + // The first caches the storage entry for the issuer, the second caches + // the parsed *x509.Certificate of the issuer itself, and the third and + // fourth maps that certificate back to the other issuers with that + // subject (note the keyword _other_: we'll exclude self-loops here) -- + // either via a parent or child relationship. + issuerIdEntryMap := make(map[issuerID]*issuerEntry, len(issuers)) + issuerIdCertMap := make(map[issuerID]*x509.Certificate, len(issuers)) + issuerIdParentsMap := make(map[issuerID][]issuerID, len(issuers)) + issuerIdChildrenMap := make(map[issuerID][]issuerID, len(issuers)) + + // For every known issuer, we map that subject back to the id of issuers + // containing that subject. This lets us build our issuerID -> parents + // mapping efficiently. Worst case we'll have a single linear chain where + // every entry has a distinct subject. + subjectIssuerIdsMap := make(map[string][]issuerID, len(issuers)) + + // First, read every issuer entry from storage. We'll propagate entries + // to three of the maps here: all but issuerIdParentsMap and + // issuerIdChildrenMap, which we'll do in a second pass. + for _, identifier := range issuers { + var stored *issuerEntry + + // When the reference issuer is provided and matches this identifier, + // prefer the updated reference copy instead. + if referenceCert != nil && identifier == referenceCert.ID { + stored = referenceCert + } else { + // Otherwise, fetch it from disk. + stored, err = sc.fetchIssuerById(identifier) + if err != nil { + return fmt.Errorf("unable to fetch issuer %v to build chain: %w", identifier, err) + } + } + + if stored == nil || len(stored.Certificate) == 0 { + return fmt.Errorf("bad issuer while building chain: missing certificate entry: %v", identifier) + } + + issuerIdEntryMap[identifier] = stored + cert, err := stored.GetCertificate() + if err != nil { + return fmt.Errorf("unable to parse issuer %v to certificate to build chain: %w", identifier, err) + } + + issuerIdCertMap[identifier] = cert + subjectIssuerIdsMap[string(cert.RawSubject)] = append(subjectIssuerIdsMap[string(cert.RawSubject)], identifier) + } + + // Now that we have the subj->issuer map built, we can build the parent + // and child mappings. We iterate over all issuers and build it one step + // at a time. + // + // This is worst case O(n^2) because all of the issuers could have the + // same name and be self-signed certs with different keys. That makes the + // chain building (below) fast as they've all got empty parents/children + // maps. + // + // Note that the order of iteration is stable. Why? We've built + // subjectIssuerIdsMap from the (above) sorted issuers by appending the + // next entry to the present list; since they're already sorted, that + // lookup will also be sorted. Thus, each of these iterations are also + // in sorted order, so the resulting map entries (of ids) are also sorted. + // Thus, the graph structure is in sorted order and thus the toposort + // below will be stable. + for _, child := range issuers { + // Fetch the certificate as we'll need it later. + childCert := issuerIdCertMap[child] + + parentSubject := string(issuerIdCertMap[child].RawIssuer) + parentCerts, ok := subjectIssuerIdsMap[parentSubject] + if !ok { + // When the issuer isn't known to Vault, the lookup by the issuer + // will be empty. This most commonly occurs when intermediates are + // directly added (via intermediate/set-signed) without providing + // the root. + continue + } + + // Now, iterate over all possible parents and assign the child/parent + // relationship. + for _, parent := range parentCerts { + // Skip self-references to the exact same certificate. + if child == parent { + continue + } + + // While we could use Subject/Authority Key Identifier (SKI/AKI) + // as a heuristic for whether or not this relationship is valid, + // this is insufficient as otherwise valid CA certificates could + // elide this information. That means its best to actually validate + // the signature (e.g., call child.CheckSignatureFrom(parent)) + // instead. + parentCert := issuerIdCertMap[parent] + if err := childCert.CheckSignatureFrom(parentCert); err != nil { + // We cannot return an error here as it could be that this + // signature is entirely valid -- but just for a different + // key. Instead, skip adding the parent->child and + // child->parent link. + continue + } + + // Otherwise, we can append it to the map, allowing us to walk the + // issuer->parent mapping. + issuerIdParentsMap[child] = append(issuerIdParentsMap[child], parent) + + // Also cross-add the child relationship step at the same time. + issuerIdChildrenMap[parent] = append(issuerIdChildrenMap[parent], child) + } + } + + // Finally, we consult RFC 8446 Section 4.4.2 for creating an algorithm for + // building the chain: + // + // > ... The sender's certificate MUST come in the first + // > CertificateEntry in the list. Each following certificate SHOULD + // > directly certify the one immediately preceding it. Because + // > certificate validation requires that trust anchors be distributed + // > independently, a certificate that specifies a trust anchor MAY be + // > omitted from the chain, provided that supported peers are known to + // > possess any omitted certificates. + // > + // > Note: Prior to TLS 1.3, "certificate_list" ordering required each + // > certificate to certify the one immediately preceding it; however, + // > some implementations allowed some flexibility. Servers sometimes + // > send both a current and deprecated intermediate for transitional + // > purposes, and others are simply configured incorrectly, but these + // > cases can nonetheless be validated properly. For maximum + // > compatibility, all implementations SHOULD be prepared to handle + // > potentially extraneous certificates and arbitrary orderings from any + // > TLS version, with the exception of the end-entity certificate which + // > MUST be first. + // + // So, we take this to mean we should build chains via DFS: each issuer is + // explored until an empty parent pointer (i.e., self-loop) is reached and + // then the last most recently seen duplicate parent link is then explored. + // + // However, we don't actually need to do a DFS (per issuer) here. We can + // simply invert the (pseudo-)directed graph, i.e., topologically sort it. + // Some number of certs (roots without cross-signing) lack parent issuers. + // These are already "done" from the PoV of chain building. We can thus + // iterating through the parent mapping to find entries without parents to + // start the sort. After processing, we can add all children and visit them + // if all parents have been processed. + // + // Note though, that while topographical sorting is equivalent to the DFS, + // we have to take care to make it a pseudo-DAG. This means handling the + // most common 2-star (2-clique) sub-graphs of reissued certificates, + // manually building their chain prior to starting the topographical sort. + // + // This thus runs in O(|V| + |E|) -> O(n^2) in the number of issuers. + processedIssuers := make(map[issuerID]bool, len(issuers)) + toVisit := make([]issuerID, 0, len(issuers)) + + // Handle any explicitly constructed certificate chains. Here, we don't + // validate much what the user provides; if they provide since-deleted + // refs, skip them; if they duplicate entries, add them multiple times. + // The other chain building logic will be able to deduplicate them when + // used as parents to other certificates. + for _, candidate := range issuers { + entry := issuerIdEntryMap[candidate] + if len(entry.ManualChain) == 0 { + continue + } + + entry.CAChain = nil + for _, parentId := range entry.ManualChain { + parentEntry := issuerIdEntryMap[parentId] + if parentEntry == nil { + continue + } + + entry.CAChain = append(entry.CAChain, parentEntry.Certificate) + } + + // Mark this node as processed and add its children. + processedIssuers[candidate] = true + children, ok := issuerIdChildrenMap[candidate] + if !ok { + continue + } + + toVisit = append(toVisit, children...) + } + + // Setup the toVisit queue. + for _, candidate := range issuers { + parentCerts, ok := issuerIdParentsMap[candidate] + if ok && len(parentCerts) > 0 { + // Assumption: no self-loops in the parent mapping, so if there's + // a non-empty parent mapping it means we can skip this node as + // it can't be processed yet. + continue + } + + // Because this candidate has no known parent issuers; update the + // list. + toVisit = append(toVisit, candidate) + } + + // If the queue is empty (and we know we have issuers), trigger the + // clique/cycle detection logic so we aren't starved for nodes. + if len(toVisit) == 0 { + toVisit, err = processAnyCliqueOrCycle(issuers, processedIssuers, toVisit, issuerIdEntryMap, issuerIdCertMap, issuerIdParentsMap, issuerIdChildrenMap, subjectIssuerIdsMap) + if err != nil { + return err + } + } + + // Now actually build the CAChain entries... Use a safety mechanism to + // ensure we don't accidentally infinite-loop (if we introduce a bug). + maxVisitCount := len(issuers)*len(issuers)*len(issuers) + 100 + for len(toVisit) > 0 && maxVisitCount >= 0 { + var issuer issuerID + issuer, toVisit = toVisit[0], toVisit[1:] + + // If (and only if) we're presently starved for next nodes to visit, + // attempt to resolve cliques and cycles again to fix that. This is + // because all-cycles cycle detection is at least as costly as + // traversing the entire graph a couple of times. + // + // Additionally, we do this immediately after popping a node from the + // queue as we wish to ensure we never become starved for nodes. + if len(toVisit) == 0 { + toVisit, err = processAnyCliqueOrCycle(issuers, processedIssuers, toVisit, issuerIdEntryMap, issuerIdCertMap, issuerIdParentsMap, issuerIdChildrenMap, subjectIssuerIdsMap) + if err != nil { + return err + } + } + + // Self-loops and cross-signing might lead to this node already being + // processed; skip it on the second pass. + if processed, ok := processedIssuers[issuer]; ok && processed { + continue + } + + // Check our parent certs now; if they are all processed, we can + // process this node. Otherwise, we'll re-add this to the queue + // when the last parent is processed (and we re-add its children). + parentCerts, ok := issuerIdParentsMap[issuer] + if ok && len(parentCerts) > 0 { + // For each parent, validate that we've processed it. + mustSkip := false + for _, parentCert := range parentCerts { + if processed, ok := processedIssuers[parentCert]; !ok || !processed { + mustSkip = true + break + } + } + + if mustSkip { + // Skip this node for now, we'll come back to it later. + continue + } + } + + // Now we can build the chain. Start with the current cert... + entry := issuerIdEntryMap[issuer] + entry.CAChain = []string{entry.Certificate} + + // ...and add all parents into it. Note that we have to tell if + // that parent was already visited or not. + if ok && len(parentCerts) > 0 { + // Split children into two categories: roots and intermediates. + // When building a straight-line chain, we want to prefer the + // root (thus, ending the verification) to any cross-signed + // intermediates. If a root is cross-signed, we'll include it's + // cross-signed cert in _its_ chain, thus ignoring our duplicate + // parent here. + // + // Why? When you step from the present node ("issuer") onto one + // of its parents, if you step onto a root, it is a no-op: you + // can still visit all of the neighbors (because any neighbors, + // if they exist, must be cross-signed alternative paths). + // However, if you directly step onto the cross-signed, now you're + // taken in an alternative direction (via its chain), and must + // revisit any roots later. + var roots []issuerID + var intermediates []issuerID + for _, parentCertId := range parentCerts { + if bytes.Equal(issuerIdCertMap[parentCertId].RawSubject, issuerIdCertMap[parentCertId].RawIssuer) { + roots = append(roots, parentCertId) + } else { + intermediates = append(intermediates, parentCertId) + } + } + + if len(parentCerts) > 1024*1024*1024 { + return errutil.InternalError{Err: fmt.Sprintf("error building certificate chain, %d is too many parent certs", + len(parentCerts))} + } + includedParentCerts := make(map[string]bool, len(parentCerts)+1) + includedParentCerts[entry.Certificate] = true + for _, parentCert := range append(roots, intermediates...) { + // See discussion of the algorithm above as to why this is + // in the correct order. However, note that we do need to + // exclude duplicate certs, hence the map above. + // + // Assumption: issuerIdEntryMap and issuerIdParentsMap is well + // constructed. + parent := issuerIdEntryMap[parentCert] + for _, parentChainCert := range parent.CAChain { + addToChainIfNotExisting(includedParentCerts, entry, parentChainCert) + } + } + } + + // Now, mark this node as processed and go and visit all of its + // children. + processedIssuers[issuer] = true + + childrenCerts, ok := issuerIdChildrenMap[issuer] + if ok && len(childrenCerts) > 0 { + toVisit = append(toVisit, childrenCerts...) + } + } + + // Assumption: no nodes left unprocessed. They should've either been + // reached through the parent->child addition or they should've been + // self-loops. + var msg string + for _, issuer := range issuers { + if visited, ok := processedIssuers[issuer]; !ok || !visited { + pretty := prettyIssuer(issuerIdEntryMap, issuer) + msg += fmt.Sprintf("[failed to build chain correctly: unprocessed issuer %v: ok: %v; visited: %v]\n", pretty, ok, visited) + } + } + if len(msg) > 0 { + return fmt.Errorf(msg) + } + + // Finally, write all issuers to disk. + // + // See the note above when sorting issuers for why we delay persisting + // the referenceCert, if it was provided. + for _, issuer := range issuers { + entry := issuerIdEntryMap[issuer] + + if referenceCert != nil && issuer == referenceCert.ID { + continue + } + + err := sc.writeIssuer(entry) + if err != nil { + pretty := prettyIssuer(issuerIdEntryMap, issuer) + return fmt.Errorf("failed to persist issuer (%v) chain to disk: %w", pretty, err) + } + } + if referenceCert != nil { + err := sc.writeIssuer(issuerIdEntryMap[referenceCert.ID]) + if err != nil { + pretty := prettyIssuer(issuerIdEntryMap, referenceCert.ID) + return fmt.Errorf("failed to persist issuer (%v) chain to disk: %w", pretty, err) + } + } + + // Everything worked \o/ + return nil +} + +func addToChainIfNotExisting(includedParentCerts map[string]bool, entry *issuerEntry, certToAdd string) { + included, ok := includedParentCerts[certToAdd] + if ok && included { + return + } + + entry.CAChain = append(entry.CAChain, certToAdd) + includedParentCerts[certToAdd] = true +} + +func processAnyCliqueOrCycle( + issuers []issuerID, + processedIssuers map[issuerID]bool, + toVisit []issuerID, + issuerIdEntryMap map[issuerID]*issuerEntry, + issuerIdCertMap map[issuerID]*x509.Certificate, + issuerIdParentsMap map[issuerID][]issuerID, + issuerIdChildrenMap map[issuerID][]issuerID, + subjectIssuerIdsMap map[string][]issuerID, +) ([]issuerID /* toVisit */, error) { + // Topological sort really only works on directed acyclic graphs (DAGs). + // But a pool of arbitrary (issuer) certificates are actually neither! + // This pool could contain both cliques and cycles. Because this could + // block chain construction, we need to handle these cases. + // + // Within the helper for rebuildIssuersChains, we realize that we might + // have certain pathological cases where cliques and cycles might _mix_. + // This warrants handling them outside of the topo-sort code, effectively + // acting as a node-collapsing technique (turning many nodes into one). + // In reality, we just special-case this and handle the processing of + // these nodes manually, fixing their CAChain value and then skipping + // them. + // + // Since clique detection is (in this case) cheap (at worst O(n) on the + // size of the graph), we favor it over the cycle detection logic. The + // order (in the case of mixed cliques+cycles) doesn't matter, as the + // discovery of the clique will lead to the cycle. We additionally find + // all (unprocessed) cliques first, so our cycle detection code can avoid + // falling into cliques. + // + // We need to be able to handle cliques adjacent to cycles. This is + // necessary because a cross-signed cert (with same subject and key as + // the clique, but different issuer) could be part of a cycle; this cycle + // loop forms a parent chain (that topo-sort can't resolve) -- AND the + // clique itself mixes with this, so resolving one or the other isn't + // sufficient (as the reissued clique plus the cross-signed cert + // effectively acts as a single node in the cycle). Oh, and there might + // be multiple cycles. :-) + // + // We also might just have cycles, separately from reissued cliques. + // + // The nice thing about both cliques and cycles is that, as long as you + // deduplicate your certs, all issuers in the collection (including the + // mixed collection) have the same chain entries, just in different + // orders (preferring the cycle and appending the remaining clique + // entries afterwards). + + // To begin, cache all cliques that we know about. + allCliques, issuerIdCliqueMap, allCliqueNodes, err := findAllCliques(processedIssuers, issuerIdCertMap, subjectIssuerIdsMap, issuers) + if err != nil { + // Found a clique that is too large; exit with an error. + return nil, err + } + + for _, issuer := range issuers { + // Skip anything that's already been processed. + if processed, ok := processedIssuers[issuer]; ok && processed { + continue + } + + // This first branch is finding cliques. However, finding a clique is + // not sufficient as discussed above -- we also need to find any + // incident cycle as this cycle is a parent and child to the clique, + // which means the cycle nodes _must_ include the clique _and_ the + // clique must include the cycle (in the CA Chain computation). + // However, its not sufficient to just do one and then the other: + // we need the closure of all cliques (and their incident cycles). + // Finally -- it isn't enough to consider this chain in isolation + // either. We need to consider _all_ parents and ensure they've been + // processed before processing this closure. + var cliques [][]issuerID + var cycles [][]issuerID + closure := make(map[issuerID]bool) + + var cliquesToProcess []issuerID + cliquesToProcess = append(cliquesToProcess, issuer) + + for len(cliquesToProcess) > 0 { + var node issuerID + node, cliquesToProcess = cliquesToProcess[0], cliquesToProcess[1:] + + // Skip potential clique nodes which have already been processed + // (either by the topo-sort or by this clique-finding code). + if processed, ok := processedIssuers[node]; ok && processed { + continue + } + if nodeInClosure, ok := closure[node]; ok && nodeInClosure { + continue + } + + // Check if we have a clique for this node from our computed + // collection of cliques. + cliqueId, ok := issuerIdCliqueMap[node] + if !ok { + continue + } + cliqueNodes := allCliques[cliqueId] + + // Add our discovered clique. Note that we avoid duplicate cliques by + // the skip logic above. Additionally, we know that cliqueNodes must + // be unique and not duplicated with any existing nodes so we can add + // all nodes to closure. + cliques = append(cliques, cliqueNodes) + for _, node := range cliqueNodes { + closure[node] = true + } + + // Try and expand the clique to see if there's common cycles around + // it. We exclude _all_ clique nodes from the expansion path, because + // it will unnecessarily bloat the detected cycles AND we know that + // we'll find them again from the neighborhood search. + // + // Additionally, note that, detection of cycles should be independent + // of cliques: cliques form under reissuance, and cycles form via + // cross-signing chains; the latter ensures that any cliques can be + // strictly bypassed from cycles (but the chain construction later + // ensures we pull in the cliques into the cycles). + foundCycles, err := findCyclesNearClique(processedIssuers, issuerIdCertMap, issuerIdChildrenMap, allCliqueNodes) + if err != nil { + // Cycle is too large. + return toVisit, err + } + + // Assumption: each cycle in foundCycles is in canonical order (see note + // below about canonical ordering). Deduplicate these against already + // existing cycles and add them to the closure nodes. + for _, cycle := range foundCycles { + cycles = appendCycleIfNotExisting(cycles, cycle) + + // Now, for each cycle node, we need to find all adjacent cliques. + // We do this by finding each child of the cycle and adding it to + // the queue. If these nodes aren't on cliques, we'll skip them + // fairly quickly since the cliques were pre-computed. + for _, cycleNode := range cycle { + children, ok := issuerIdChildrenMap[cycleNode] + if !ok { + continue + } + + cliquesToProcess = append(cliquesToProcess, children...) + + // While we're here, add this cycle node to the closure. + closure[cycleNode] = true + } + } + } + + // Before we begin, we need to compute the _parents_ of the nodes in + // these cliques and cycles and ensure they've all been processed (if + // they're not already part of the closure). + parents, ok := computeParentsFromClosure(processedIssuers, issuerIdParentsMap, closure) + if !ok { + // At least one parent wasn't processed; skip this cliques and + // cycles group for now until they have all been processed. + continue + } + + // Ok, we've computed the closure. Now we can build CA nodes and mark + // everything as processed, growing the toVisit queue in the process. + // For every node we've found... + for node := range closure { + // Skip anything that's already been processed. + if processed, ok := processedIssuers[node]; ok && processed { + continue + } + + // Before we begin, mark this node as processed (so we can continue + // later) and add children to toVisit. + processedIssuers[node] = true + childrenCerts, ok := issuerIdChildrenMap[node] + if ok && len(childrenCerts) > 0 { + toVisit = append(toVisit, childrenCerts...) + } + + // It can either be part of a clique or a cycle. We wish to add + // the nodes of whatever grouping + foundNode := false + for _, clique := range cliques { + inClique := false + for _, cliqueNode := range clique { + if cliqueNode == node { + inClique = true + break + } + } + + if inClique { + foundNode = true + + // Compute this node's CAChain. Note order doesn't matter + // (within the clique), but we'll preserve the relative + // order of associated cycles. + entry := issuerIdEntryMap[node] + entry.CAChain = []string{entry.Certificate} + + includedParentCerts := make(map[string]bool, len(closure)+1) + includedParentCerts[entry.Certificate] = true + + // First add nodes from this clique, then all cycles, and then + // all other cliques. + addNodeCertsToEntry(issuerIdEntryMap, issuerIdChildrenMap, includedParentCerts, entry, clique) + addNodeCertsToEntry(issuerIdEntryMap, issuerIdChildrenMap, includedParentCerts, entry, cycles...) + addNodeCertsToEntry(issuerIdEntryMap, issuerIdChildrenMap, includedParentCerts, entry, cliques...) + addParentChainsToEntry(issuerIdEntryMap, includedParentCerts, entry, parents) + + break + } + } + + // Otherwise, it must be part of a cycle. + for _, cycle := range cycles { + inCycle := false + offsetInCycle := 0 + for index, cycleNode := range cycle { + if cycleNode == node { + inCycle = true + offsetInCycle = index + break + } + } + + if inCycle { + foundNode = true + + // Compute this node's CAChain. Note that order within cycles + // matters, but we'll preserve the relative order. + entry := issuerIdEntryMap[node] + entry.CAChain = []string{entry.Certificate} + + includedParentCerts := make(map[string]bool, len(closure)+1) + includedParentCerts[entry.Certificate] = true + + // First add nodes from this cycle, then all cliques, then all + // other cycles, and finally from parents. + orderedCycle := append(cycle[offsetInCycle:], cycle[0:offsetInCycle]...) + addNodeCertsToEntry(issuerIdEntryMap, issuerIdChildrenMap, includedParentCerts, entry, orderedCycle) + addNodeCertsToEntry(issuerIdEntryMap, issuerIdChildrenMap, includedParentCerts, entry, cliques...) + addNodeCertsToEntry(issuerIdEntryMap, issuerIdChildrenMap, includedParentCerts, entry, cycles...) + addParentChainsToEntry(issuerIdEntryMap, includedParentCerts, entry, parents) + + break + } + } + + if !foundNode { + // Unable to find node; return an error. This shouldn't happen + // generally. + pretty := prettyIssuer(issuerIdEntryMap, issuer) + return nil, fmt.Errorf("unable to find node (%v) in closure (%v) but not in cycles (%v) or cliques (%v)", pretty, closure, cycles, cliques) + } + } + } + + // We might also have cycles without having associated cliques. We assume + // that any cliques (if they existed and were relevant for the remaining + // cycles) were processed at this point. However, we might still have + // unprocessed cliques (and related cycles) at this point _if_ an + // unrelated cycle is the parent to that clique+cycle group. + for _, issuer := range issuers { + // Skip this node if it is already processed. + if processed, ok := processedIssuers[issuer]; ok && processed { + continue + } + + // Cliques should've been processed by now, if they were necessary + // for processable cycles, so ignore them from here to avoid + // bloating our search paths. + cycles, err := findAllCyclesWithNode(processedIssuers, issuerIdCertMap, issuerIdChildrenMap, issuer, allCliqueNodes) + if err != nil { + // To large of cycle. + return nil, err + } + + closure := make(map[issuerID]bool) + for _, cycle := range cycles { + for _, node := range cycle { + closure[node] = true + } + } + + // Before we begin, we need to compute the _parents_ of the nodes in + // these cycles and ensure they've all been processed (if they're not + // part of the closure). + parents, ok := computeParentsFromClosure(processedIssuers, issuerIdParentsMap, closure) + if !ok { + // At least one parent wasn't processed; skip this cycle + // group for now until they have all been processed. + continue + } + + // Finally, for all detected cycles, build the CAChain for nodes in + // cycles. Since they all share a common parent, they must all contain + // each other. + for _, cycle := range cycles { + // For each node in each cycle + for nodeIndex, node := range cycle { + // If the node is processed already, skip it. + if processed, ok := processedIssuers[node]; ok && processed { + continue + } + + // Otherwise, build its CAChain. + entry := issuerIdEntryMap[node] + entry.CAChain = []string{entry.Certificate} + + // No indication as to size of chain here + includedParentCerts := make(map[string]bool) + includedParentCerts[entry.Certificate] = true + + // First add nodes from this cycle, then all other cycles, and + // finally from parents. + orderedCycle := append(cycle[nodeIndex:], cycle[0:nodeIndex]...) + addNodeCertsToEntry(issuerIdEntryMap, issuerIdChildrenMap, includedParentCerts, entry, orderedCycle) + addNodeCertsToEntry(issuerIdEntryMap, issuerIdChildrenMap, includedParentCerts, entry, cycles...) + addParentChainsToEntry(issuerIdEntryMap, includedParentCerts, entry, parents) + + // Finally, mark the node as processed and add the remaining + // children to toVisit. + processedIssuers[node] = true + childrenCerts, ok := issuerIdChildrenMap[node] + if ok && len(childrenCerts) > 0 { + toVisit = append(toVisit, childrenCerts...) + } + } + } + } + + return toVisit, nil +} + +func findAllCliques( + processedIssuers map[issuerID]bool, + issuerIdCertMap map[issuerID]*x509.Certificate, + subjectIssuerIdsMap map[string][]issuerID, + issuers []issuerID, +) ([][]issuerID, map[issuerID]int, []issuerID, error) { + var allCliques [][]issuerID + issuerIdCliqueMap := make(map[issuerID]int) + var allCliqueNodes []issuerID + + for _, node := range issuers { + // Check if the node has already been visited... + if processed, ok := processedIssuers[node]; ok && processed { + // ...if so it might have had a manually constructed chain; skip + // it for clique detection. + continue + } + if _, ok := issuerIdCliqueMap[node]; ok { + // ...if so it must be on another clique; skip the clique finding + // so we don't get duplicated cliques. + continue + } + + // See if this is a node on a clique and find that clique. + cliqueNodes, err := isOnReissuedClique(processedIssuers, issuerIdCertMap, subjectIssuerIdsMap, node) + if err != nil { + // Clique is too large. + return nil, nil, nil, err + } + + // Skip nodes which really aren't a clique. + if len(cliqueNodes) <= 1 { + continue + } + + // Add this clique and update the mapping. A given node can only be in one + // clique. + cliqueId := len(allCliques) + allCliques = append(allCliques, cliqueNodes) + allCliqueNodes = append(allCliqueNodes, cliqueNodes...) + for _, cliqueNode := range cliqueNodes { + issuerIdCliqueMap[cliqueNode] = cliqueId + } + } + + return allCliques, issuerIdCliqueMap, allCliqueNodes, nil +} + +func isOnReissuedClique( + processedIssuers map[issuerID]bool, + issuerIdCertMap map[issuerID]*x509.Certificate, + subjectIssuerIdsMap map[string][]issuerID, + node issuerID, +) ([]issuerID, error) { + // Finding max cliques in arbitrary graphs is a nearly pathological + // problem, usually left to the realm of SAT solvers and NP-Complete + // theoretical. + // + // We're not dealing with arbitrary graphs though. We're dealing with + // a highly regular, highly structured constructed graph. + // + // Reissued cliques form in certificate chains when two conditions hold: + // + // 1. The Subject of the certificate matches the Issuer. + // 2. The underlying public key is the same, resulting in the signature + // validating for any pair of certs. + // + // This follows from the definition of a reissued certificate (same key + // material, subject, and issuer but with a different serial number and + // a different validity period). The structure means that the graph is + // highly regular: given a partial or self-clique, if any candidate node + // can satisfy this relation with any node of the existing clique, it must + // mean it must form a larger clique and satisfy this relationship with + // all other nodes in the existing clique. + // + // (Aside: this is not the only type of clique, but it is the only type + // of 3+ node clique. A 2-star is emitted from certain graphs, but we + // chose to handle that case in the cycle detection code rather than + // under this reissued clique detection code). + // + // What does this mean for our algorithm? A simple greedy search is + // sufficient. If we index our certificates by subject -> issuerID + // (and cache its value across calls, which we've already done for + // building the parent/child relationship), we can find all other issuers + // with the same public key and subject as the existing node fairly + // easily. + // + // However, we should also set some reasonable bounds on clique size. + // Let's limit it to 6 nodes. + maxCliqueSize := 6 + + // Per assumptions of how we've built the graph, these map lookups should + // both exist. + cert := issuerIdCertMap[node] + subject := string(cert.RawSubject) + issuer := string(cert.RawIssuer) + candidates := subjectIssuerIdsMap[subject] + + // If the given node doesn't have the same subject and issuer, it isn't + // a valid clique node. + if subject != issuer { + return nil, nil + } + + // We have two choices here for validating that the two keys are the same: + // perform a cheap ASN.1 encoding comparison of the public keys, which + // _should_ be the same but may not be, or perform a more costly (but + // which should definitely be correct) signature verification. We prefer + // cheap and call it good enough. + spki := cert.RawSubjectPublicKeyInfo + + // We know candidates has everything satisfying _half_ of the first + // condition (the subject half), so validate they match the other half + // (the issuer half) and the second condition. For node (which is + // included in candidates), the condition should vacuously hold. + var clique []issuerID + for _, candidate := range candidates { + // Skip already processed nodes, even if they could be clique + // candidates. We'll treat them as any other (already processed) + // external parent in that scenario. + if processed, ok := processedIssuers[candidate]; ok && processed { + continue + } + + candidateCert := issuerIdCertMap[candidate] + hasRightKey := bytes.Equal(candidateCert.RawSubjectPublicKeyInfo, spki) + hasMatchingIssuer := string(candidateCert.RawIssuer) == issuer + + if hasRightKey && hasMatchingIssuer { + clique = append(clique, candidate) + } + } + + // Clique is invalid if it contains zero or one nodes. + if len(clique) <= 1 { + return nil, nil + } + + // Validate it is within the acceptable clique size. + if len(clique) > maxCliqueSize { + return clique, fmt.Errorf("error building issuer chains: excessively reissued certificate: %v entries", len(clique)) + } + + // Must be a valid clique. + return clique, nil +} + +func containsIssuer(collection []issuerID, target issuerID) bool { + if len(collection) == 0 { + return false + } + + for _, needle := range collection { + if needle == target { + return true + } + } + + return false +} + +func appendCycleIfNotExisting(knownCycles [][]issuerID, candidate []issuerID) [][]issuerID { + // There's two ways to do cycle detection: canonicalize the cycles, + // rewriting them to have the least (or max) element first or just + // brute force the detection. + // + // Canonicalizing them is faster and easier to write (just compare + // canonical forms) so do that instead. + canonicalized := canonicalizeCycle(candidate) + + found := false + for _, existing := range knownCycles { + if len(existing) != len(canonicalized) { + continue + } + + equivalent := true + for index, node := range canonicalized { + if node != existing[index] { + equivalent = false + break + } + } + + if equivalent { + found = true + break + } + } + + if !found { + return append(knownCycles, canonicalized) + } + + return knownCycles +} + +func canonicalizeCycle(cycle []issuerID) []issuerID { + // Find the minimum value and put it at the head, keeping the relative + // ordering the same. + minIndex := 0 + for index, entry := range cycle { + if entry < cycle[minIndex] { + minIndex = index + } + } + + ret := append(cycle[minIndex:], cycle[0:minIndex]...) + if len(ret) != len(cycle) { + panic("ABORT") + } + + return ret +} + +func findCyclesNearClique( + processedIssuers map[issuerID]bool, + issuerIdCertMap map[issuerID]*x509.Certificate, + issuerIdChildrenMap map[issuerID][]issuerID, + cliqueNodes []issuerID, +) ([][]issuerID, error) { + // When we have a reissued clique, we need to find all cycles next to it. + // Presumably, because they all have non-empty parents, they should not + // have been visited yet. We further know that (because we're exploring + // the children path), any processed check would be unnecessary as all + // children shouldn't have been processed yet (since their parents aren't + // either). + // + // So, we can explore each of the children of any one clique node and + // find all cycles using that node, until we come back to the starting + // node, excluding the clique and other cycles. + cliqueNode := cliqueNodes[0] + + // Copy the clique nodes as excluded nodes; we'll avoid exploring cycles + // which have parents that have been already explored. + excludeNodes := cliqueNodes[:] + var knownCycles [][]issuerID + + // We know the node has at least one child, since the clique is non-empty. + for _, child := range issuerIdChildrenMap[cliqueNode] { + // Skip children that are part of the clique. + if containsIssuer(excludeNodes, child) { + continue + } + + // Find cycles containing this node. + newCycles, err := findAllCyclesWithNode(processedIssuers, issuerIdCertMap, issuerIdChildrenMap, child, excludeNodes) + if err != nil { + // Found too large of a cycle + return nil, err + } + + // Add all cycles into the known cycles list. + for _, cycle := range newCycles { + knownCycles = appendCycleIfNotExisting(knownCycles, cycle) + } + + // Exclude only the current child. Adding everything in the cycles + // results might prevent discovery of other valid cycles. + excludeNodes = append(excludeNodes, child) + } + + // Sort cycles from longest->shortest. + sort.SliceStable(knownCycles, func(i, j int) bool { + return len(knownCycles[i]) < len(knownCycles[j]) + }) + + return knownCycles, nil +} + +func findAllCyclesWithNode( + processedIssuers map[issuerID]bool, + issuerIdCertMap map[issuerID]*x509.Certificate, + issuerIdChildrenMap map[issuerID][]issuerID, + source issuerID, + exclude []issuerID, +) ([][]issuerID, error) { + // We wish to find all cycles involving this particular node and report + // the corresponding paths. This is a full-graph traversal (excluding + // certain paths) as we're not just checking if a cycle occurred, but + // instead returning all of cycles with that node. + // + // Set some limit on max cycle size. + maxCycleSize := 8 + + // Whether we've visited any given node. + cycleVisited := make(map[issuerID]bool) + visitCounts := make(map[issuerID]int) + parentCounts := make(map[issuerID]map[issuerID]bool) + + // Paths to the specified node. Some of these might be cycles. + pathsTo := make(map[issuerID][][]issuerID) + + // Nodes to visit. + var visitQueue []issuerID + + // Add the source node to start. In order to set up the paths to a + // given node, we seed pathsTo with the single path involving just + // this node + visitQueue = append(visitQueue, source) + pathsTo[source] = [][]issuerID{{source}} + + // Begin building paths. + // + // Loop invariant: + // pathTo[x] contains valid paths to reach this node, from source. + for len(visitQueue) > 0 { + var current issuerID + current, visitQueue = visitQueue[0], visitQueue[1:] + + // If we've already processed this node, we have a cycle. Skip this + // node for now; we'll build cycles later. + if processed, ok := cycleVisited[current]; ok && processed { + continue + } + + // Mark this node as visited for next time. + cycleVisited[current] = true + if _, ok := visitCounts[current]; !ok { + visitCounts[current] = 0 + } + visitCounts[current] += 1 + + // For every child of this node... + children, ok := issuerIdChildrenMap[current] + if !ok { + // Node has no children, nothing else we can do. + continue + } + + for _, child := range children { + // Ensure we can visit this child; exclude processedIssuers and + // exclude lists. + if childProcessed, ok := processedIssuers[child]; ok && childProcessed { + continue + } + + skipNode := false + for _, excluded := range exclude { + if excluded == child { + skipNode = true + break + } + } + + if skipNode { + continue + } + + // Track this parent->child relationship to know when to exit. + setOfParents, ok := parentCounts[child] + if !ok { + setOfParents = make(map[issuerID]bool) + parentCounts[child] = setOfParents + } + _, existingParent := setOfParents[current] + setOfParents[current] = true + + // Since we know that we can visit this node, we should now build + // all destination paths using this node, from our current node. + // + // Since these are all starting at a single path from source, + // if we have any cycles back to source, we'll find them here. + // + // Only add this if it is a net-new path that doesn't repeat + // (either internally -- indicating an internal cycle -- or + // externally with an existing path). + addedPath := false + if _, ok := pathsTo[child]; !ok { + pathsTo[child] = make([][]issuerID, 0) + } + + for _, path := range pathsTo[current] { + if child != source { + // We only care about source->source cycles. If this + // cycles, but isn't a source->source cycle, don't add + // this path. + foundSelf := false + for _, node := range path { + if child == node { + foundSelf = true + break + } + } + if foundSelf { + // Skip this path. + continue + } + } + + if len(path) > 1024*1024*1024 { + return nil, errutil.InternalError{Err: fmt.Sprintf("Error updating certificate path: path of length %d is too long", len(path))} + } + // Make sure to deep copy the path. + newPath := make([]issuerID, 0, len(path)+1) + newPath = append(newPath, path...) + newPath = append(newPath, child) + + isSamePath := false + for _, childPath := range pathsTo[child] { + if len(childPath) != len(newPath) { + continue + } + + isSamePath = true + for index, node := range childPath { + if newPath[index] != node { + isSamePath = false + break + } + } + + if isSamePath { + break + } + } + + if !isSamePath { + pathsTo[child] = append(pathsTo[child], newPath) + addedPath = true + } + } + + // Add this child as a candidate to visit next. + visitQueue = append(visitQueue, child) + + // If there's a new parent or we found a new path, then we should + // revisit this child, to update _its_ children and see if there's + // another new path. Eventually the paths will stabilize and we'll + // end up with no new parents or paths. + if !existingParent || addedPath { + cycleVisited[child] = false + } + } + } + + // Ok, we've now exited from our loop. Any cycles would've been detected + // and their paths recorded in pathsTo. Now we can iterate over these + // (starting a source), clean them up and validate them. + var cycles [][]issuerID + for _, cycle := range pathsTo[source] { + // Skip the trivial cycle. + if len(cycle) == 1 && cycle[0] == source { + continue + } + + // Validate cycle starts and ends with source. + if cycle[0] != source { + return nil, fmt.Errorf("cycle (%v) unexpectedly starts with node %v; expected to start with %v", cycle, cycle[0], source) + } + + // If the cycle doesn't start/end with the source, + // skip it. + if cycle[len(cycle)-1] != source { + continue + } + + truncatedCycle := cycle[0 : len(cycle)-1] + if len(truncatedCycle) >= maxCycleSize { + return nil, fmt.Errorf("cycle (%v) exceeds max size: %v > %v", cycle, len(cycle), maxCycleSize) + } + + // Now one last thing: our cycle was built via parent->child + // traversal, but we want child->parent ordered cycles. So, + // just reverse it. + reversed := reversedCycle(truncatedCycle) + cycles = appendCycleIfNotExisting(cycles, reversed) + } + + // Sort cycles from longest->shortest. + sort.SliceStable(cycles, func(i, j int) bool { + return len(cycles[i]) > len(cycles[j]) + }) + + return cycles, nil +} + +func reversedCycle(cycle []issuerID) []issuerID { + var result []issuerID + for index := len(cycle) - 1; index >= 0; index-- { + result = append(result, cycle[index]) + } + + return result +} + +func computeParentsFromClosure( + processedIssuers map[issuerID]bool, + issuerIdParentsMap map[issuerID][]issuerID, + closure map[issuerID]bool, +) (map[issuerID]bool, bool) { + parents := make(map[issuerID]bool) + for node := range closure { + nodeParents, ok := issuerIdParentsMap[node] + if !ok { + continue + } + + for _, parent := range nodeParents { + if nodeInClosure, ok := closure[parent]; ok && nodeInClosure { + continue + } + + parents[parent] = true + if processed, ok := processedIssuers[parent]; ok && processed { + continue + } + + return nil, false + } + } + + return parents, true +} + +func addNodeCertsToEntry( + issuerIdEntryMap map[issuerID]*issuerEntry, + issuerIdChildrenMap map[issuerID][]issuerID, + includedParentCerts map[string]bool, + entry *issuerEntry, + issuersCollection ...[]issuerID, +) { + for _, collection := range issuersCollection { + // Find a starting point into this collection such that it verifies + // something in the existing collection. + offset := 0 + for index, issuer := range collection { + children, ok := issuerIdChildrenMap[issuer] + if !ok { + continue + } + + foundChild := false + for _, child := range children { + childEntry := issuerIdEntryMap[child] + if inChain, ok := includedParentCerts[childEntry.Certificate]; ok && inChain { + foundChild = true + break + } + } + + if foundChild { + offset = index + break + } + } + + // Assumption: collection is in child -> parent order. For cliques, + // this is trivially true because everyone can validate each other, + // but for cycles we have to ensure that in findAllCyclesWithNode. + // This allows us to build the chain in the correct order. + for _, issuer := range append(collection[offset:], collection[0:offset]...) { + nodeEntry := issuerIdEntryMap[issuer] + addToChainIfNotExisting(includedParentCerts, entry, nodeEntry.Certificate) + } + } +} + +func addParentChainsToEntry( + issuerIdEntryMap map[issuerID]*issuerEntry, + includedParentCerts map[string]bool, + entry *issuerEntry, + parents map[issuerID]bool, +) { + for parent := range parents { + nodeEntry := issuerIdEntryMap[parent] + for _, cert := range nodeEntry.CAChain { + addToChainIfNotExisting(includedParentCerts, entry, cert) + } + } +} diff --git a/builtin/logical/pki/cmd/pki/main.go b/builtin/logical/pki/cmd/pki/main.go new file mode 100644 index 0000000..7c804be --- /dev/null +++ b/builtin/logical/pki/cmd/pki/main.go @@ -0,0 +1,34 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "os" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/pki" + "github.com/hashicorp/vault/sdk/plugin" +) + +func main() { + apiClientMeta := &api.PluginAPIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(os.Args[1:]) + + tlsConfig := apiClientMeta.GetTLSConfig() + tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) + + if err := plugin.ServeMultiplex(&plugin.ServeOpts{ + BackendFactoryFunc: pki.Factory, + // set the TLSProviderFunc so that the plugin maintains backwards + // compatibility with Vault versions that don’t support plugin AutoMTLS + TLSProviderFunc: tlsProviderFunc, + }); err != nil { + logger := hclog.New(&hclog.LoggerOptions{}) + + logger.Error("plugin shutting down", "error", err) + os.Exit(1) + } +} diff --git a/builtin/logical/pki/config_util.go b/builtin/logical/pki/config_util.go new file mode 100644 index 0000000..8081455 --- /dev/null +++ b/builtin/logical/pki/config_util.go @@ -0,0 +1,121 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "fmt" + "strings" + "time" +) + +func (sc *storageContext) isDefaultKeySet() (bool, error) { + config, err := sc.getKeysConfig() + if err != nil { + return false, err + } + + return strings.TrimSpace(config.DefaultKeyId.String()) != "", nil +} + +func (sc *storageContext) isDefaultIssuerSet() (bool, error) { + config, err := sc.getIssuersConfig() + if err != nil { + return false, err + } + + return strings.TrimSpace(config.DefaultIssuerId.String()) != "", nil +} + +func (sc *storageContext) updateDefaultKeyId(id keyID) error { + config, err := sc.getKeysConfig() + if err != nil { + return err + } + + if config.DefaultKeyId != id { + return sc.setKeysConfig(&keyConfigEntry{ + DefaultKeyId: id, + }) + } + + return nil +} + +func (sc *storageContext) updateDefaultIssuerId(id issuerID) error { + config, err := sc.getIssuersConfig() + if err != nil { + return err + } + + if config.DefaultIssuerId != id { + config.DefaultIssuerId = id + return sc.setIssuersConfig(config) + } + + return nil +} + +func (sc *storageContext) changeDefaultIssuerTimestamps(oldDefault issuerID, newDefault issuerID) error { + if newDefault == oldDefault { + return nil + } + + now := time.Now().UTC() + + // When the default issuer changes, we need to modify four + // pieces of information: + // + // 1. The old default issuer's modification time, as it no + // longer works for the /cert/ca path. + // 2. The new default issuer's modification time, as it now + // works for the /cert/ca path. + // 3. & 4. Both issuer's CRLs, as they behave the same, under + // the /cert/crl path! + for _, thisId := range []issuerID{oldDefault, newDefault} { + if len(thisId) == 0 { + continue + } + + // 1 & 2 above. + issuer, err := sc.fetchIssuerById(thisId) + if err != nil { + // Due to the lack of transactions, if we deleted the default + // issuer (successfully), but the subsequent issuer config write + // (to clear the default issuer's old id) failed, we might have + // an inconsistent config. If we later hit this loop (and flush + // these timestamps again -- perhaps because the operator + // selected a new default), we'd have erred out here, because + // the since-deleted default issuer doesn't exist. In this case, + // skip the issuer instead of bailing. + err := fmt.Errorf("unable to update issuer (%v)'s modification time: error fetching issuer: %w", thisId, err) + if strings.Contains(err.Error(), "does not exist") { + sc.Backend.Logger().Warn(err.Error()) + continue + } + + return err + } + + issuer.LastModified = now + err = sc.writeIssuer(issuer) + if err != nil { + return fmt.Errorf("unable to update issuer (%v)'s modification time: error persisting issuer: %w", thisId, err) + } + } + + // Fetch and update the internalCRLConfigEntry (3&4). + cfg, err := sc.getLocalCRLConfig() + if err != nil { + return fmt.Errorf("unable to update local CRL config's modification time: error fetching local CRL config: %w", err) + } + + cfg.LastModified = now + cfg.DeltaLastModified = now + err = sc.setLocalCRLConfig(cfg) + if err != nil { + return fmt.Errorf("unable to update local CRL config's modification time: error persisting local CRL config: %w", err) + } + + return nil +} diff --git a/builtin/logical/pki/crl_test.go b/builtin/logical/pki/crl_test.go new file mode 100644 index 0000000..ebcb9a6 --- /dev/null +++ b/builtin/logical/pki/crl_test.go @@ -0,0 +1,1528 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "context" + "encoding/asn1" + "encoding/json" + "fmt" + "strings" + "testing" + "time" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/helper/constants" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" + + "github.com/hashicorp/go-secure-stdlib/parseutil" + + "github.com/stretchr/testify/require" +) + +func TestBackend_CRL_EnableDisableRoot(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "ttl": "40h", + "common_name": "myvault.com", + }) + if err != nil { + t.Fatal(err) + } + caSerial := resp.Data["serial_number"].(string) + + crlEnableDisableTestForBackend(t, b, s, []string{caSerial}) +} + +func TestBackend_CRLConfigUpdate(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + // Write a legacy config to storage. + type legacyConfig struct { + Expiry string `json:"expiry"` + Disable bool `json:"disable"` + } + oldConfig := legacyConfig{Expiry: "24h", Disable: false} + entry, err := logical.StorageEntryJSON("config/crl", oldConfig) + require.NoError(t, err, "generate storage entry objection with legacy config") + err = s.Put(ctx, entry) + require.NoError(t, err, "failed writing legacy config") + + // Now lets read it. + resp, err := CBRead(b, s, "config/crl") + requireSuccessNonNilResponse(t, resp, err) + requireFieldsSetInResp(t, resp, "disable", "expiry", "ocsp_disable", "auto_rebuild", "auto_rebuild_grace_period") + + require.Equal(t, "24h", resp.Data["expiry"]) + require.Equal(t, false, resp.Data["disable"]) + require.Equal(t, defaultCrlConfig.OcspDisable, resp.Data["ocsp_disable"]) + require.Equal(t, defaultCrlConfig.OcspExpiry, resp.Data["ocsp_expiry"]) + require.Equal(t, defaultCrlConfig.AutoRebuild, resp.Data["auto_rebuild"]) + require.Equal(t, defaultCrlConfig.AutoRebuildGracePeriod, resp.Data["auto_rebuild_grace_period"]) +} + +func TestBackend_CRLConfig(t *testing.T) { + t.Parallel() + + tests := []struct { + expiry string + disable bool + ocspDisable bool + ocspExpiry string + autoRebuild bool + autoRebuildGracePeriod string + }{ + {expiry: "24h", disable: true, ocspDisable: true, ocspExpiry: "72h", autoRebuild: false, autoRebuildGracePeriod: "36h"}, + {expiry: "16h", disable: false, ocspDisable: true, ocspExpiry: "0h", autoRebuild: true, autoRebuildGracePeriod: "1h"}, + {expiry: "8h", disable: true, ocspDisable: false, ocspExpiry: "24h", autoRebuild: false, autoRebuildGracePeriod: "24h"}, + } + for _, tc := range tests { + name := fmt.Sprintf("%s-%t-%t", tc.expiry, tc.disable, tc.ocspDisable) + t.Run(name, func(t *testing.T) { + b, s := CreateBackendWithStorage(t) + + resp, err := CBWrite(b, s, "config/crl", map[string]interface{}{ + "expiry": tc.expiry, + "disable": tc.disable, + "ocsp_disable": tc.ocspDisable, + "ocsp_expiry": tc.ocspExpiry, + "auto_rebuild": tc.autoRebuild, + "auto_rebuild_grace_period": tc.autoRebuildGracePeriod, + }) + requireSuccessNonNilResponse(t, resp, err) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/crl"), logical.UpdateOperation), resp, true) + + resp, err = CBRead(b, s, "config/crl") + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/crl"), logical.ReadOperation), resp, true) + + requireSuccessNonNilResponse(t, resp, err) + requireFieldsSetInResp(t, resp, "disable", "expiry", "ocsp_disable", "auto_rebuild", "auto_rebuild_grace_period") + + require.Equal(t, tc.expiry, resp.Data["expiry"]) + require.Equal(t, tc.disable, resp.Data["disable"]) + require.Equal(t, tc.ocspDisable, resp.Data["ocsp_disable"]) + require.Equal(t, tc.ocspExpiry, resp.Data["ocsp_expiry"]) + require.Equal(t, tc.autoRebuild, resp.Data["auto_rebuild"]) + require.Equal(t, tc.autoRebuildGracePeriod, resp.Data["auto_rebuild_grace_period"]) + }) + } + + badValueTests := []struct { + expiry string + disable string + ocspDisable string + ocspExpiry string + autoRebuild string + autoRebuildGracePeriod string + }{ + {expiry: "not a duration", disable: "true", ocspDisable: "true", ocspExpiry: "72h", autoRebuild: "true", autoRebuildGracePeriod: "1d"}, + {expiry: "16h", disable: "not a boolean", ocspDisable: "true", ocspExpiry: "72h", autoRebuild: "true", autoRebuildGracePeriod: "1d"}, + {expiry: "8h", disable: "true", ocspDisable: "not a boolean", ocspExpiry: "72h", autoRebuild: "true", autoRebuildGracePeriod: "1d"}, + {expiry: "8h", disable: "true", ocspDisable: "true", ocspExpiry: "not a duration", autoRebuild: "true", autoRebuildGracePeriod: "1d"}, + {expiry: "8h", disable: "true", ocspDisable: "true", ocspExpiry: "-1", autoRebuild: "true", autoRebuildGracePeriod: "1d"}, + {expiry: "8h", disable: "true", ocspDisable: "true", ocspExpiry: "72h", autoRebuild: "not a boolean", autoRebuildGracePeriod: "1d"}, + {expiry: "8h", disable: "true", ocspDisable: "true", ocspExpiry: "-1", autoRebuild: "true", autoRebuildGracePeriod: "not a duration"}, + } + for _, tc := range badValueTests { + name := fmt.Sprintf("bad-%s-%s-%s", tc.expiry, tc.disable, tc.ocspDisable) + t.Run(name, func(t *testing.T) { + b, s := CreateBackendWithStorage(t) + + _, err := CBWrite(b, s, "config/crl", map[string]interface{}{ + "expiry": tc.expiry, + "disable": tc.disable, + "ocsp_disable": tc.ocspDisable, + "ocsp_expiry": tc.ocspExpiry, + "auto_rebuild": tc.autoRebuild, + "auto_rebuild_grace_period": tc.autoRebuildGracePeriod, + }) + require.Error(t, err) + }) + } +} + +func TestBackend_CRL_AllKeyTypeSigAlgos(t *testing.T) { + t.Parallel() + + type testCase struct { + KeyType string + KeyBits int + SigBits int + UsePSS bool + SigAlgo string + } + + testCases := []testCase{ + {"rsa", 2048, 256, false, "SHA256WithRSA"}, + {"rsa", 2048, 384, false, "SHA384WithRSA"}, + {"rsa", 2048, 512, false, "SHA512WithRSA"}, + {"rsa", 2048, 256, true, "SHA256WithRSAPSS"}, + {"rsa", 2048, 384, true, "SHA384WithRSAPSS"}, + {"rsa", 2048, 512, true, "SHA512WithRSAPSS"}, + {"ec", 256, 256, false, "ECDSAWithSHA256"}, + {"ec", 384, 384, false, "ECDSAWithSHA384"}, + {"ec", 521, 521, false, "ECDSAWithSHA512"}, + {"ed25519", 0, 0, false, "Ed25519"}, + } + + for index, tc := range testCases { + t.Logf("tv %v", index) + b, s := CreateBackendWithStorage(t) + + resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "ttl": "40h", + "common_name": "myvault.com", + "key_type": tc.KeyType, + "key_bits": tc.KeyBits, + "signature_bits": tc.SigBits, + "use_pss": tc.UsePSS, + }) + if err != nil { + t.Fatalf("tc %v: %v", index, err) + } + caSerial := resp.Data["serial_number"].(string) + + resp, err = CBRead(b, s, "issuer/default") + requireSuccessNonNilResponse(t, resp, err, "fetching issuer should return data") + require.Equal(t, tc.SigAlgo, resp.Data["revocation_signature_algorithm"]) + + crlEnableDisableTestForBackend(t, b, s, []string{caSerial}) + + crl := getParsedCrlFromBackend(t, b, s, "crl") + if strings.HasSuffix(tc.SigAlgo, "PSS") { + algo := crl.SignatureAlgorithm + pssOid := asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 10} + if !algo.Algorithm.Equal(pssOid) { + t.Fatalf("tc %v failed: expected sig-alg to be %v / got %v", index, pssOid, algo) + } + } + } +} + +func TestBackend_CRL_EnableDisableIntermediateWithRoot(t *testing.T) { + t.Parallel() + crlEnableDisableIntermediateTestForBackend(t, true) +} + +func TestBackend_CRL_EnableDisableIntermediateWithoutRoot(t *testing.T) { + t.Parallel() + crlEnableDisableIntermediateTestForBackend(t, false) +} + +func crlEnableDisableIntermediateTestForBackend(t *testing.T, withRoot bool) { + b_root, s_root := CreateBackendWithStorage(t) + + resp, err := CBWrite(b_root, s_root, "root/generate/internal", map[string]interface{}{ + "ttl": "40h", + "common_name": "myvault.com", + }) + if err != nil { + t.Fatal(err) + } + rootSerial := resp.Data["serial_number"].(string) + + b_int, s_int := CreateBackendWithStorage(t) + + resp, err = CBWrite(b_int, s_int, "intermediate/generate/internal", map[string]interface{}{ + "common_name": "intermediate myvault.com", + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected intermediate CSR info") + } + intermediateData := resp.Data + + resp, err = CBWrite(b_root, s_root, "root/sign-intermediate", map[string]interface{}{ + "ttl": "30h", + "csr": intermediateData["csr"], + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected signed intermediate info") + } + intermediateSignedData := resp.Data + certs := intermediateSignedData["certificate"].(string) + caSerial := intermediateSignedData["serial_number"].(string) + caSerials := []string{caSerial} + if withRoot { + intermediateAndRootCert := intermediateSignedData["ca_chain"].([]string) + certs = strings.Join(intermediateAndRootCert, "\n") + caSerials = append(caSerials, rootSerial) + } + + _, err = CBWrite(b_int, s_int, "intermediate/set-signed", map[string]interface{}{ + "certificate": certs, + }) + if err != nil { + t.Fatal(err) + } + crlEnableDisableTestForBackend(t, b_int, s_int, caSerials) +} + +func crlEnableDisableTestForBackend(t *testing.T, b *backend, s logical.Storage, caSerials []string) { + var err error + + _, err = CBWrite(b, s, "roles/test", map[string]interface{}{ + "allow_bare_domains": true, + "allow_subdomains": true, + "allowed_domains": "foobar.com", + "generate_lease": true, + }) + if err != nil { + t.Fatal(err) + } + + serials := make(map[int]string) + for i := 0; i < 6; i++ { + resp, err := CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "test.foobar.com", + }) + if err != nil { + t.Fatal(err) + } + serials[i] = resp.Data["serial_number"].(string) + } + + test := func(numRevokedExpected int, expectedSerials ...string) { + certList := getParsedCrlFromBackend(t, b, s, "crl").TBSCertList + lenList := len(certList.RevokedCertificates) + if lenList != numRevokedExpected { + t.Fatalf("expected %d revoked certificates, found %d", numRevokedExpected, lenList) + } + + for _, serialNum := range expectedSerials { + requireSerialNumberInCRL(t, certList, serialNum) + } + + if len(certList.Extensions) > 2 { + t.Fatalf("expected up to 2 extensions on main CRL but got %v", len(certList.Extensions)) + } + + // Since this test assumes a complete CRL was rebuilt, we can grab + // the delta CRL and ensure it is empty. + deltaList := getParsedCrlFromBackend(t, b, s, "crl/delta").TBSCertList + lenDeltaList := len(deltaList.RevokedCertificates) + if lenDeltaList != 0 { + t.Fatalf("expected zero revoked certificates on the delta CRL due to complete CRL rebuild, found %d", lenDeltaList) + } + + if len(deltaList.Extensions) != len(certList.Extensions)+1 { + t.Fatalf("expected one more extensions on delta CRL than main but got %v on main vs %v on delta", len(certList.Extensions), len(deltaList.Extensions)) + } + } + + revoke := func(serialIndex int) { + _, err = CBWrite(b, s, "revoke", map[string]interface{}{ + "serial_number": serials[serialIndex], + }) + if err != nil { + t.Fatal(err) + } + + for _, caSerial := range caSerials { + _, err = CBWrite(b, s, "revoke", map[string]interface{}{ + "serial_number": caSerial, + }) + if err == nil { + t.Fatal("expected error") + } + } + } + + toggle := func(disabled bool) { + _, err = CBWrite(b, s, "config/crl", map[string]interface{}{ + "disable": disabled, + }) + if err != nil { + t.Fatal(err) + } + } + + test(0) + revoke(0) + revoke(1) + test(2, serials[0], serials[1]) + toggle(true) + test(0) + revoke(2) + revoke(3) + test(0) + toggle(false) + test(4, serials[0], serials[1], serials[2], serials[3]) + revoke(4) + revoke(5) + test(6) + toggle(true) + test(0) + toggle(false) + test(6) + + // The rotate command should reset the update time of the CRL. + crlCreationTime1 := getParsedCrlFromBackend(t, b, s, "crl").TBSCertList.ThisUpdate + time.Sleep(1 * time.Second) + _, err = CBRead(b, s, "crl/rotate") + require.NoError(t, err) + + crlCreationTime2 := getParsedCrlFromBackend(t, b, s, "crl").TBSCertList.ThisUpdate + require.NotEqual(t, crlCreationTime1, crlCreationTime2) +} + +func TestBackend_Secondary_CRL_Rebuilding(t *testing.T) { + t.Parallel() + ctx := context.Background() + b, s := CreateBackendWithStorage(t) + sc := b.makeStorageContext(ctx, s) + + // Write out the issuer/key to storage without going through the api call as replication would. + bundle := genCertBundle(t, b, s) + issuer, _, err := sc.writeCaBundle(bundle, "", "") + require.NoError(t, err) + + // Just to validate, before we call the invalidate function, make sure our CRL has not been generated + // and we get a nil response + resp := requestCrlFromBackend(t, s, b) + require.Nil(t, resp.Data["http_raw_body"]) + + // This should force any calls from now on to rebuild our CRL even a read + b.invalidate(ctx, issuerPrefix+issuer.ID.String()) + + // Perform the read operation again, we should have a valid CRL now... + resp = requestCrlFromBackend(t, s, b) + crl := parseCrlPemBytes(t, resp.Data["http_raw_body"].([]byte)) + require.Equal(t, 0, len(crl.RevokedCertificates)) +} + +func TestCrlRebuilder(t *testing.T) { + t.Parallel() + ctx := context.Background() + b, s := CreateBackendWithStorage(t) + sc := b.makeStorageContext(ctx, s) + + // Write out the issuer/key to storage without going through the api call as replication would. + bundle := genCertBundle(t, b, s) + _, _, err := sc.writeCaBundle(bundle, "", "") + require.NoError(t, err) + + cb := newCRLBuilder(true /* can rebuild and write CRLs */) + + // Force an initial build + warnings, err := cb.rebuild(sc, true) + require.NoError(t, err, "Failed to rebuild CRL") + require.Empty(t, warnings, "unexpectedly got warnings rebuilding CRL") + + resp := requestCrlFromBackend(t, s, b) + crl1 := parseCrlPemBytes(t, resp.Data["http_raw_body"].([]byte)) + + // We shouldn't rebuild within this call. + warnings, err = cb.rebuildIfForced(sc) + require.NoError(t, err, "Failed to rebuild if forced CRL") + require.Empty(t, warnings, "unexpectedly got warnings rebuilding CRL") + + resp = requestCrlFromBackend(t, s, b) + crl2 := parseCrlPemBytes(t, resp.Data["http_raw_body"].([]byte)) + require.Equal(t, crl1.ThisUpdate, crl2.ThisUpdate, "According to the update field, we rebuilt the CRL") + + // Make sure we have ticked over to the next second + for { + diff := time.Since(crl1.ThisUpdate) + if diff.Seconds() >= 1 { + break + } + time.Sleep(100 * time.Millisecond) + } + + // This should rebuild the CRL + cb.requestRebuildIfActiveNode(b) + warnings, err = cb.rebuildIfForced(sc) + require.NoError(t, err, "Failed to rebuild if forced CRL") + require.Empty(t, warnings, "unexpectedly got warnings rebuilding CRL") + resp = requestCrlFromBackend(t, s, b) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("crl/pem"), logical.ReadOperation), resp, true) + + crl3 := parseCrlPemBytes(t, resp.Data["http_raw_body"].([]byte)) + require.True(t, crl1.ThisUpdate.Before(crl3.ThisUpdate), + "initial crl time: %#v not before next crl rebuild time: %#v", crl1.ThisUpdate, crl3.ThisUpdate) +} + +func TestBYOC(t *testing.T) { + t.Parallel() + + b, s := CreateBackendWithStorage(t) + + // Create a root CA. + resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "root example.com", + "issuer_name": "root", + "key_type": "ec", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotEmpty(t, resp.Data["certificate"]) + oldRoot := resp.Data["certificate"].(string) + + // Create a role for issuance. + _, err = CBWrite(b, s, "roles/local-testing", map[string]interface{}{ + "allow_any_name": true, + "enforce_hostnames": false, + "key_type": "ec", + "ttl": "75s", + "no_store": "true", + }) + require.NoError(t, err) + + // Issue a leaf cert and ensure we can revoke it. + resp, err = CBWrite(b, s, "issue/local-testing", map[string]interface{}{ + "common_name": "testing", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotEmpty(t, resp.Data["certificate"]) + + _, err = CBWrite(b, s, "revoke", map[string]interface{}{ + "certificate": resp.Data["certificate"], + }) + require.NoError(t, err) + + // Issue a second leaf, but hold onto it for now. + resp, err = CBWrite(b, s, "issue/local-testing", map[string]interface{}{ + "common_name": "testing2", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotEmpty(t, resp.Data["certificate"]) + notStoredCert := resp.Data["certificate"].(string) + + // Update the role to make things stored and issue another cert. + _, err = CBWrite(b, s, "roles/stored-testing", map[string]interface{}{ + "allow_any_name": true, + "enforce_hostnames": false, + "key_type": "ec", + "ttl": "75s", + "no_store": "false", + }) + require.NoError(t, err) + + // Issue a leaf cert and ensure we can revoke it. + resp, err = CBWrite(b, s, "issue/stored-testing", map[string]interface{}{ + "common_name": "testing", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotEmpty(t, resp.Data["certificate"]) + storedCert := resp.Data["certificate"].(string) + + // Delete the root and regenerate a new one. + _, err = CBDelete(b, s, "issuer/default") + require.NoError(t, err) + + resp, err = CBList(b, s, "issuers") + require.NoError(t, err) + require.Equal(t, len(resp.Data), 0) + + _, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "root2 example.com", + "issuer_name": "root2", + "key_type": "ec", + }) + require.NoError(t, err) + + // Issue a new leaf and revoke that one. + resp, err = CBWrite(b, s, "issue/local-testing", map[string]interface{}{ + "common_name": "testing3", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotEmpty(t, resp.Data["certificate"]) + + _, err = CBWrite(b, s, "revoke", map[string]interface{}{ + "certificate": resp.Data["certificate"], + }) + require.NoError(t, err) + + // Now attempt to revoke the earlier leaves. The first should fail since + // we deleted its issuer, but the stored one should succeed. + _, err = CBWrite(b, s, "revoke", map[string]interface{}{ + "certificate": notStoredCert, + }) + require.Error(t, err) + + _, err = CBWrite(b, s, "revoke", map[string]interface{}{ + "certificate": storedCert, + }) + require.NoError(t, err) + + // Import the old root again and revoke the no stored leaf should work. + _, err = CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ + "pem_bundle": oldRoot, + }) + require.NoError(t, err) + + _, err = CBWrite(b, s, "revoke", map[string]interface{}{ + "certificate": notStoredCert, + }) + require.NoError(t, err) +} + +func TestPoP(t *testing.T) { + t.Parallel() + + b, s := CreateBackendWithStorage(t) + + // Create a root CA. + resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "root example.com", + "issuer_name": "root", + "key_type": "ec", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotEmpty(t, resp.Data["certificate"]) + oldRoot := resp.Data["certificate"].(string) + + // Create a role for issuance. + _, err = CBWrite(b, s, "roles/local-testing", map[string]interface{}{ + "allow_any_name": true, + "enforce_hostnames": false, + "key_type": "ec", + "ttl": "75s", + "no_store": "true", + }) + require.NoError(t, err) + + // Issue a leaf cert and ensure we can revoke it with the private key and + // an explicit certificate. + resp, err = CBWrite(b, s, "issue/local-testing", map[string]interface{}{ + "common_name": "testing1", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotEmpty(t, resp.Data["certificate"]) + + resp, err = CBWrite(b, s, "revoke-with-key", map[string]interface{}{ + "certificate": resp.Data["certificate"], + "private_key": resp.Data["private_key"], + }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("revoke-with-key"), logical.UpdateOperation), resp, true) + require.NoError(t, err) + + // Issue a second leaf, but hold onto it for now. + resp, err = CBWrite(b, s, "issue/local-testing", map[string]interface{}{ + "common_name": "testing2", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotEmpty(t, resp.Data["certificate"]) + notStoredCert := resp.Data["certificate"].(string) + notStoredKey := resp.Data["private_key"].(string) + + // Update the role to make things stored and issue another cert. + _, err = CBWrite(b, s, "roles/stored-testing", map[string]interface{}{ + "allow_any_name": true, + "enforce_hostnames": false, + "key_type": "ec", + "ttl": "75s", + "no_store": "false", + }) + require.NoError(t, err) + + // Issue a leaf and ensure we can revoke it via serial number and private key. + resp, err = CBWrite(b, s, "issue/stored-testing", map[string]interface{}{ + "common_name": "testing3", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotEmpty(t, resp.Data["certificate"]) + require.NotEmpty(t, resp.Data["serial_number"]) + require.NotEmpty(t, resp.Data["private_key"]) + + _, err = CBWrite(b, s, "revoke-with-key", map[string]interface{}{ + "serial_number": resp.Data["serial_number"], + "private_key": resp.Data["private_key"], + }) + require.NoError(t, err) + + // Issue a leaf cert and ensure we can revoke it after removing its root; + // hold onto it for now. + resp, err = CBWrite(b, s, "issue/stored-testing", map[string]interface{}{ + "common_name": "testing4", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotEmpty(t, resp.Data["certificate"]) + storedCert := resp.Data["certificate"].(string) + storedKey := resp.Data["private_key"].(string) + + // Delete the root and regenerate a new one. + _, err = CBDelete(b, s, "issuer/default") + require.NoError(t, err) + + resp, err = CBList(b, s, "issuers") + require.NoError(t, err) + require.Equal(t, len(resp.Data), 0) + + _, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "root2 example.com", + "issuer_name": "root2", + "key_type": "ec", + }) + require.NoError(t, err) + + // Issue a new leaf and revoke that one. + resp, err = CBWrite(b, s, "issue/local-testing", map[string]interface{}{ + "common_name": "testing5", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotEmpty(t, resp.Data["certificate"]) + + _, err = CBWrite(b, s, "revoke-with-key", map[string]interface{}{ + "certificate": resp.Data["certificate"], + "private_key": resp.Data["private_key"], + }) + require.NoError(t, err) + + // Now attempt to revoke the earlier leaves. The first should fail since + // we deleted its issuer, but the stored one should succeed. + _, err = CBWrite(b, s, "revoke-with-key", map[string]interface{}{ + "certificate": notStoredCert, + "private_key": notStoredKey, + }) + require.Error(t, err) + + // Incorrect combination (stored with not stored key) should fail. + _, err = CBWrite(b, s, "revoke-with-key", map[string]interface{}{ + "certificate": storedCert, + "private_key": notStoredKey, + }) + require.Error(t, err) + + // Correct combination (stored with stored) should succeed. + _, err = CBWrite(b, s, "revoke-with-key", map[string]interface{}{ + "certificate": storedCert, + "private_key": storedKey, + }) + require.NoError(t, err) + + // Import the old root again and revoke the no stored leaf should work. + _, err = CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ + "pem_bundle": oldRoot, + }) + require.NoError(t, err) + + // Incorrect combination (not stored with stored key) should fail. + _, err = CBWrite(b, s, "revoke-with-key", map[string]interface{}{ + "certificate": notStoredCert, + "private_key": storedKey, + }) + require.Error(t, err) + + // Correct combination (not stored with not stored) should succeed. + _, err = CBWrite(b, s, "revoke-with-key", map[string]interface{}{ + "certificate": notStoredCert, + "private_key": notStoredKey, + }) + require.NoError(t, err) +} + +func TestIssuerRevocation(t *testing.T) { + t.Parallel() + + b, s := CreateBackendWithStorage(t) + + // Write a config with auto-rebuilding so that we can verify stuff doesn't + // appear on the delta CRL. + _, err := CBWrite(b, s, "config/crl", map[string]interface{}{ + "auto_rebuild": true, + "enable_delta": true, + }) + require.NoError(t, err) + + // Create a root CA. + resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "root example.com", + "issuer_name": "root", + "key_type": "ec", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotEmpty(t, resp.Data["certificate"]) + require.NotEmpty(t, resp.Data["serial_number"]) + // oldRoot := resp.Data["certificate"].(string) + oldRootSerial := resp.Data["serial_number"].(string) + + // Create a second root CA. We'll revoke this one and ensure it + // doesn't appear on the former's CRL. + resp, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "root2 example.com", + "issuer_name": "root2", + "key_type": "ec", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotEmpty(t, resp.Data["certificate"]) + require.NotEmpty(t, resp.Data["serial_number"]) + // revokedRoot := resp.Data["certificate"].(string) + revokedRootSerial := resp.Data["serial_number"].(string) + + // Shouldn't be able to revoke it by serial number. + _, err = CBWrite(b, s, "revoke", map[string]interface{}{ + "serial_number": revokedRootSerial, + }) + require.Error(t, err) + + // Revoke it. + resp, err = CBWrite(b, s, "issuer/root2/revoke", map[string]interface{}{}) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("issuer/root2/revoke"), logical.UpdateOperation), resp, true) + + require.NoError(t, err) + require.NotNil(t, resp) + require.NotZero(t, resp.Data["revocation_time"]) + + // Regenerate the CRLs + resp, err = CBRead(b, s, "crl/rotate") + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("crl/rotate"), logical.ReadOperation), resp, true) + + require.NoError(t, err) + + // Ensure the old cert isn't on its own CRL. + crl := getParsedCrlFromBackend(t, b, s, "issuer/root2/crl/der") + if requireSerialNumberInCRL(nil, crl.TBSCertList, revokedRootSerial) { + t.Fatalf("the serial number %v should not be on its own CRL as self-CRL appearance should not occur", revokedRootSerial) + } + + // Ensure the old cert isn't on the one's CRL. + crl = getParsedCrlFromBackend(t, b, s, "issuer/root/crl/der") + if requireSerialNumberInCRL(nil, crl.TBSCertList, revokedRootSerial) { + t.Fatalf("the serial number %v should not be on %v's CRL as they're separate roots", revokedRootSerial, oldRootSerial) + } + + // Create a role and ensure we can't use the revoked root. + _, err = CBWrite(b, s, "roles/local-testing", map[string]interface{}{ + "allow_any_name": true, + "enforce_hostnames": false, + "key_type": "ec", + "ttl": "75s", + }) + require.NoError(t, err) + + // Issue a leaf cert and ensure it fails (because the issuer is revoked). + resp, err = CBWrite(b, s, "issuer/root2/issue/local-testing", map[string]interface{}{ + "common_name": "testing", + }) + require.Error(t, err) + + // Issue an intermediate and ensure we can revoke it. + resp, err = CBWrite(b, s, "intermediate/generate/internal", map[string]interface{}{ + "common_name": "intermediate example.com", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotEmpty(t, resp.Data["csr"]) + intCsr := resp.Data["csr"].(string) + resp, err = CBWrite(b, s, "root/sign-intermediate", map[string]interface{}{ + "ttl": "30h", + "csr": intCsr, + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotEmpty(t, resp.Data["certificate"]) + require.NotEmpty(t, resp.Data["serial_number"]) + intCert := resp.Data["certificate"].(string) + intCertSerial := resp.Data["serial_number"].(string) + resp, err = CBWrite(b, s, "intermediate/set-signed", map[string]interface{}{ + "certificate": intCert, + }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("intermediate/set-signed"), logical.UpdateOperation), resp, true) + + require.NoError(t, err) + require.NotNil(t, resp) + require.NotEmpty(t, resp.Data["imported_issuers"]) + importedIssuers := resp.Data["imported_issuers"].([]string) + require.Equal(t, len(importedIssuers), 1) + intId := importedIssuers[0] + _, err = CBPatch(b, s, "issuer/"+intId, map[string]interface{}{ + "issuer_name": "int1", + }) + require.NoError(t, err) + + // Now issue a leaf with the intermediate. + resp, err = CBWrite(b, s, "issuer/int1/issue/local-testing", map[string]interface{}{ + "common_name": "testing", + }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("issuer/int1/issue/local-testing"), logical.UpdateOperation), resp, true) + + require.NoError(t, err) + require.NotNil(t, resp) + require.NotEmpty(t, resp.Data["certificate"]) + require.NotEmpty(t, resp.Data["serial_number"]) + issuedSerial := resp.Data["serial_number"].(string) + + // Now revoke the intermediate. + resp, err = CBWrite(b, s, "issuer/int1/revoke", map[string]interface{}{}) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotZero(t, resp.Data["revocation_time"]) + + // Update the CRLs and ensure it appears. + _, err = CBRead(b, s, "crl/rotate") + require.NoError(t, err) + crl = getParsedCrlFromBackend(t, b, s, "issuer/root/crl/der") + requireSerialNumberInCRL(t, crl.TBSCertList, intCertSerial) + crl = getParsedCrlFromBackend(t, b, s, "issuer/root/crl/delta/der") + if requireSerialNumberInCRL(nil, crl.TBSCertList, intCertSerial) { + t.Fatalf("expected intermediate serial NOT to appear on root's delta CRL, but did") + } + + // Ensure we can still revoke the issued leaf. + resp, err = CBWrite(b, s, "revoke", map[string]interface{}{ + "serial_number": issuedSerial, + }) + require.NoError(t, err) + require.NotNil(t, resp) + + // Ensure it appears on the intermediate's CRL. + _, err = CBRead(b, s, "crl/rotate") + require.NoError(t, err) + crl = getParsedCrlFromBackend(t, b, s, "issuer/int1/crl/der") + requireSerialNumberInCRL(t, crl.TBSCertList, issuedSerial) + + // Ensure we can't fetch the intermediate's cert by serial any more. + resp, err = CBRead(b, s, "cert/"+intCertSerial) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotEmpty(t, resp.Data["revocation_time"]) +} + +func TestAutoRebuild(t *testing.T) { + t.Parallel() + + // While we'd like to reduce this duration, we need to wait until + // the rollback manager timer ticks. With the new helper, we can + // modify the rollback manager timer period directly, allowing us + // to shorten the total test time significantly. + // + // We set the delta CRL time to ensure it executes prior to the + // main CRL rebuild, and the new CRL doesn't rebuild until after + // we're done. + newPeriod := 1 * time.Second + deltaPeriod := (newPeriod + 1*time.Second).String() + crlTime := (6*newPeriod + 2*time.Second).String() + gracePeriod := (3 * newPeriod).String() + delta := 2 * newPeriod + + // This test requires the periodicFunc to trigger, which requires we stand + // up a full test cluster. + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "pki": Factory, + }, + // See notes below about usage of /sys/raw for reading cluster + // storage without barrier encryption. + EnableRaw: true, + RollbackPeriod: newPeriod, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + client := cluster.Cores[0].Client + + // Mount PKI + err := client.Sys().Mount("pki", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "60h", + }, + }) + require.NoError(t, err) + + // Generate root. + resp, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ + "ttl": "40h", + "common_name": "Root X1", + "key_type": "ec", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotEmpty(t, resp.Data) + require.NotEmpty(t, resp.Data["issuer_id"]) + rootIssuer := resp.Data["issuer_id"].(string) + + // Setup a testing role. + _, err = client.Logical().Write("pki/roles/local-testing", map[string]interface{}{ + "allow_any_name": true, + "enforce_hostnames": false, + "key_type": "ec", + }) + require.NoError(t, err) + + // Regression test: ensure we respond with the default values for CRL + // config when we haven't set any values yet. + resp, err = client.Logical().Read("pki/config/crl") + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.Equal(t, resp.Data["expiry"], defaultCrlConfig.Expiry) + require.Equal(t, resp.Data["disable"], defaultCrlConfig.Disable) + require.Equal(t, resp.Data["ocsp_disable"], defaultCrlConfig.OcspDisable) + require.Equal(t, resp.Data["auto_rebuild"], defaultCrlConfig.AutoRebuild) + require.Equal(t, resp.Data["auto_rebuild_grace_period"], defaultCrlConfig.AutoRebuildGracePeriod) + require.Equal(t, resp.Data["enable_delta"], defaultCrlConfig.EnableDelta) + require.Equal(t, resp.Data["delta_rebuild_interval"], defaultCrlConfig.DeltaRebuildInterval) + + // Safety guard: we play with rebuild timing below. + _, err = client.Logical().Write("pki/config/crl", map[string]interface{}{ + "expiry": crlTime, + }) + require.NoError(t, err) + + // Issue a cert and revoke it. It should appear on the CRL right away. + resp, err = client.Logical().Write("pki/issue/local-testing", map[string]interface{}{ + "common_name": "example.com", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["serial_number"]) + leafSerial := resp.Data["serial_number"].(string) + + _, err = client.Logical().Write("pki/revoke", map[string]interface{}{ + "serial_number": leafSerial, + }) + require.NoError(t, err) + + defaultCrlPath := "/v1/pki/crl" + crl := getParsedCrlAtPath(t, client, defaultCrlPath).TBSCertList + lastCRLNumber := getCRLNumber(t, crl) + lastCRLExpiry := crl.NextUpdate + requireSerialNumberInCRL(t, crl, leafSerial) + + // Enable periodic rebuild of the CRL. + _, err = client.Logical().Write("pki/config/crl", map[string]interface{}{ + "expiry": crlTime, + "auto_rebuild": true, + "auto_rebuild_grace_period": gracePeriod, + "enable_delta": true, + "delta_rebuild_interval": deltaPeriod, + }) + require.NoError(t, err) + + // Wait for the CRL to update based on the configuration change we just did + // so that it doesn't grab the revocation we are going to do afterwards. + crl = waitForUpdatedCrl(t, client, defaultCrlPath, lastCRLNumber, lastCRLExpiry.Sub(time.Now())) + lastCRLNumber = getCRLNumber(t, crl) + lastCRLExpiry = crl.NextUpdate + + // Issue a cert and revoke it. + resp, err = client.Logical().Write("pki/issue/local-testing", map[string]interface{}{ + "common_name": "example.com", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["serial_number"]) + newLeafSerial := resp.Data["serial_number"].(string) + + _, err = client.Logical().Write("pki/revoke", map[string]interface{}{ + "serial_number": newLeafSerial, + }) + require.NoError(t, err) + + // Now, we want to test the issuer identification on revocation. This + // only happens as a distinct "step" when CRL building isn't done on + // each revocation. Pull the storage from the cluster (via the sys/raw + // endpoint which requires the mount UUID) and verify the revInfo contains + // a matching issuer. + pkiMount := findStorageMountUuid(t, client, "pki") + revEntryPath := "logical/" + pkiMount + "/" + revokedPath + normalizeSerial(newLeafSerial) + + // storage from cluster.Core[0] is a physical storage copy, not a logical + // storage. This difference means, if we were to do a storage.Get(...) + // on the above path, we'd read the barrier-encrypted value. This is less + // than useful for decoding, and fetching the proper storage view is a + // touch much work. So, assert EnableRaw above and (ab)use it here. + resp, err = client.Logical().Read("sys/raw/" + revEntryPath) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["value"]) + revEntryValue := resp.Data["value"].(string) + var revInfo revocationInfo + err = json.Unmarshal([]byte(revEntryValue), &revInfo) + require.NoError(t, err) + require.Equal(t, revInfo.CertificateIssuer, issuerID(rootIssuer)) + + // New serial should not appear on CRL. + crl = getCrlCertificateList(t, client, "pki") + thisCRLNumber := getCRLNumber(t, crl) + requireSerialNumberInCRL(t, crl, leafSerial) // But the old one should. + now := time.Now() + graceInterval, _ := parseutil.ParseDurationSecond(gracePeriod) + expectedUpdate := lastCRLExpiry.Add(-1 * graceInterval) + if requireSerialNumberInCRL(nil, crl, newLeafSerial) { + // If we somehow lagged and we ended up needing to rebuild + // the CRL, we should avoid throwing an error. + + if thisCRLNumber == lastCRLNumber { + t.Fatalf("unexpected failure: last (%v) and current (%v) leaf certificate might have the same serial number?", leafSerial, newLeafSerial) + } + + if !now.After(expectedUpdate) { + t.Fatalf("expected newly generated certificate with serial %v not to appear on this CRL but it did, prematurely: %v", newLeafSerial, crl) + } + + t.Fatalf("shouldn't be here") + } + + // This serial should exist in the delta WAL section for the mount... + resp, err = client.Logical().List("sys/raw/logical/" + pkiMount + "/" + localDeltaWALPath) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotEmpty(t, resp.Data) + require.NotEmpty(t, resp.Data["keys"]) + require.Contains(t, resp.Data["keys"], normalizeSerial(newLeafSerial)) + + haveUpdatedDeltaCRL := false + interruptChan := time.After(4*newPeriod + delta) + for { + if haveUpdatedDeltaCRL { + break + } + + select { + case <-interruptChan: + t.Fatalf("expected to regenerate delta CRL within a couple of periodicFunc invocations (plus %v grace period)", delta) + default: + // Check and see if there's a storage entry for the last rebuild + // serial. If so, validate the delta CRL contains this entry. + resp, err = client.Logical().List("sys/raw/logical/" + pkiMount + "/" + localDeltaWALPath) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotEmpty(t, resp.Data) + require.NotEmpty(t, resp.Data["keys"]) + + haveRebuildMarker := false + for _, rawEntry := range resp.Data["keys"].([]interface{}) { + entry := rawEntry.(string) + if entry == deltaWALLastRevokedSerialName { + haveRebuildMarker = true + break + } + } + + if !haveRebuildMarker { + time.Sleep(1 * time.Second) + continue + } + + // Read the marker and see if its correct. + resp, err = client.Logical().Read("sys/raw/logical/" + pkiMount + "/" + localDeltaWALLastBuildSerial) + require.NoError(t, err) + if resp == nil { + time.Sleep(1 * time.Second) + continue + } + + require.NotNil(t, resp) + require.NotEmpty(t, resp.Data) + require.NotEmpty(t, resp.Data["value"]) + + // Easy than JSON decoding... + if !strings.Contains(resp.Data["value"].(string), newLeafSerial) { + time.Sleep(1 * time.Second) + continue + } + + haveUpdatedDeltaCRL = true + + // Ensure it has what we want. + deltaCrl := getParsedCrlAtPath(t, client, "/v1/pki/crl/delta").TBSCertList + if !requireSerialNumberInCRL(nil, deltaCrl, newLeafSerial) { + // Check if it is on the main CRL because its already regenerated. + mainCRL := getParsedCrlAtPath(t, client, defaultCrlPath).TBSCertList + requireSerialNumberInCRL(t, mainCRL, newLeafSerial) + } else { + referenceCrlNum := getCrlReferenceFromDelta(t, deltaCrl) + if lastCRLNumber < referenceCrlNum { + lastCRLNumber = referenceCrlNum + } + } + } + } + + // Now, wait until we're within the grace period... Then start prompting + // for regeneration. + if expectedUpdate.After(now) { + time.Sleep(expectedUpdate.Sub(now)) + } + + crl = waitForUpdatedCrl(t, client, defaultCrlPath, lastCRLNumber, lastCRLExpiry.Sub(now)+delta) + requireSerialNumberInCRL(t, crl, leafSerial) + requireSerialNumberInCRL(t, crl, newLeafSerial) +} + +func findStorageMountUuid(t *testing.T, client *api.Client, mount string) string { + resp, err := client.Logical().Read("sys/mounts/" + mount) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["uuid"]) + pkiMount := resp.Data["uuid"].(string) + require.NotEmpty(t, pkiMount) + return pkiMount +} + +func TestTidyIssuerAssociation(t *testing.T) { + t.Parallel() + + b, s := CreateBackendWithStorage(t) + + // Create a root CA. + resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "root example.com", + "issuer_name": "root", + "key_type": "ec", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotEmpty(t, resp.Data["certificate"]) + require.NotEmpty(t, resp.Data["issuer_id"]) + rootCert := resp.Data["certificate"].(string) + rootID := resp.Data["issuer_id"].(issuerID) + + // Create a role for issuance. + _, err = CBWrite(b, s, "roles/local-testing", map[string]interface{}{ + "allow_any_name": true, + "enforce_hostnames": false, + "key_type": "ec", + "ttl": "75m", + }) + require.NoError(t, err) + + // Issue a leaf cert and ensure we can revoke it. + resp, err = CBWrite(b, s, "issue/local-testing", map[string]interface{}{ + "common_name": "testing", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotEmpty(t, resp.Data["serial_number"]) + leafSerial := resp.Data["serial_number"].(string) + + _, err = CBWrite(b, s, "revoke", map[string]interface{}{ + "serial_number": leafSerial, + }) + require.NoError(t, err) + + // This leaf's revInfo entry should have an issuer associated + // with it. + entry, err := s.Get(ctx, revokedPath+normalizeSerial(leafSerial)) + require.NoError(t, err) + require.NotNil(t, entry) + require.NotNil(t, entry.Value) + + var leafInfo revocationInfo + err = entry.DecodeJSON(&leafInfo) + require.NoError(t, err) + require.Equal(t, rootID, leafInfo.CertificateIssuer) + + // Now remove the root and run tidy. + _, err = CBDelete(b, s, "issuer/default") + require.NoError(t, err) + _, err = CBWrite(b, s, "tidy", map[string]interface{}{ + "tidy_revoked_cert_issuer_associations": true, + }) + require.NoError(t, err) + + // Wait for tidy to finish. + for { + time.Sleep(125 * time.Millisecond) + + resp, err = CBRead(b, s, "tidy-status") + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["state"]) + state := resp.Data["state"].(string) + + if state == "Finished" { + break + } + if state == "Error" { + t.Fatalf("unexpected state for tidy operation: Error:\nStatus: %v", resp.Data) + } + } + + // Ensure we don't have an association on this leaf any more. + entry, err = s.Get(ctx, revokedPath+normalizeSerial(leafSerial)) + require.NoError(t, err) + require.NotNil(t, entry) + require.NotNil(t, entry.Value) + + err = entry.DecodeJSON(&leafInfo) + require.NoError(t, err) + require.Empty(t, leafInfo.CertificateIssuer) + + // Now, re-import the root and try again. + resp, err = CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ + "pem_bundle": rootCert, + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotNil(t, resp.Data["imported_issuers"]) + importedIssuers := resp.Data["imported_issuers"].([]string) + require.Equal(t, 1, len(importedIssuers)) + newRootID := importedIssuers[0] + require.NotEmpty(t, newRootID) + + // Re-run tidy... + _, err = CBWrite(b, s, "tidy", map[string]interface{}{ + "tidy_revoked_cert_issuer_associations": true, + }) + require.NoError(t, err) + + // Wait for tidy to finish. + for { + time.Sleep(125 * time.Millisecond) + + resp, err = CBRead(b, s, "tidy-status") + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["state"]) + state := resp.Data["state"].(string) + + if state == "Finished" { + break + } + if state == "Error" { + t.Fatalf("unexpected state for tidy operation: Error:\nStatus: %v", resp.Data) + } + } + + // Finally, double-check we associated things correctly. + entry, err = s.Get(ctx, revokedPath+normalizeSerial(leafSerial)) + require.NoError(t, err) + require.NotNil(t, entry) + require.NotNil(t, entry.Value) + + err = entry.DecodeJSON(&leafInfo) + require.NoError(t, err) + require.Equal(t, newRootID, string(leafInfo.CertificateIssuer)) +} + +func requestCrlFromBackend(t *testing.T, s logical.Storage, b *backend) *logical.Response { + crlReq := &logical.Request{ + Operation: logical.ReadOperation, + Path: "crl/pem", + Storage: s, + } + resp, err := b.HandleRequest(context.Background(), crlReq) + require.NoError(t, err, "crl req failed with an error") + require.NotNil(t, resp, "crl response was nil with no error") + require.False(t, resp.IsError(), "crl error response: %v", resp) + return resp +} + +func TestCRLWarningsEmptyKeyUsage(t *testing.T) { + t.Parallel() + + b, s := CreateBackendWithStorage(t) + + // Generated using OpenSSL with a configuration lacking KeyUsage on + // the CA certificate. + cert := `-----BEGIN CERTIFICATE----- +MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADATMREwDwYDVQQDDAhyb290 +LW9sZDAeFw0yMDAxMDEwMTAxMDFaFw0yMTAxMDEwMTAxMDFaMBMxETAPBgNVBAMM +CHJvb3Qtb2xkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzqhSZxAL +PwFhCIPL1jFPq6jxp1wFgo6YNSfVI13gfaGIjfErxsQUbosmlEuTeOc50zXXN3kb +SDufy5Yi1OeSkFZRdJ78zdKzsEDIVR1ukUngVsSrt05gdNMJlh8XOPbcrJo78jYG +lRgtkkFSc/wCu+ue6JqkfKrbUY/G9WK0UM8ppHm1Ux67ZGoypyEgaqqxKHBRC4Yl +D+lAs1vP4C6cavqdUMKgAPTKmMBzlbpCuYPLHSzWh9Com3WQSqCbrlo3uH5RT3V9 +5Gjuk3mMUhY1l6fRL7wG3f+4x+DS+ICQNT0o4lnMxpIsiTh0cEHUFgY7G0iHWYPj +CIN8UDhpZIpoCQIDAQABo2UwYzAdBgNVHQ4EFgQUJlHk3PN7pfC22FGxAb0rWgQt +L4cwHwYDVR0jBBgwFoAUJlHk3PN7pfC22FGxAb0rWgQtL4cwDAYDVR0TBAUwAwEB +/zATBgNVHSUEDDAKBggrBgEFBQcDATANBgkqhkiG9w0BAQsFAAOCAQEAcaU0FbXb +FfXluBrjKfOzVKz+kvQ1CVv3xe3MBkS6wvqybBjJCFChnqCPxEe57BdSbBXNU5LZ +zCR/OqYas4Csv9+msSn9BI2FSMAmfMDTsp5/6iIQJqlJx9L8a7bjzVMGX6QJm/3x +S/EgGsMETAgewQXeu4jhI6StgJ2V/4Ofe498hYw4LAiBapJmkU/nHezWodNBZJ7h +LcLOzVj0Hu5MZplGBgJFgRqBCVVkqXA0q7tORuhNzYtNdJFpv3pZIhvVFFu3HUPf +wYQPhLye5WNtosz5xKe8X0Q9qp8g6azMTk+5Qe7u1d8MYAA2AIlGuKUvPHRruOmN +NC+gQnS7AK1lCw== +-----END CERTIFICATE-----` + privKey := `-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDOqFJnEAs/AWEI +g8vWMU+rqPGnXAWCjpg1J9UjXeB9oYiN8SvGxBRuiyaUS5N45znTNdc3eRtIO5/L +liLU55KQVlF0nvzN0rOwQMhVHW6RSeBWxKu3TmB00wmWHxc49tysmjvyNgaVGC2S +QVJz/AK7657omqR8qttRj8b1YrRQzymkebVTHrtkajKnISBqqrEocFELhiUP6UCz +W8/gLpxq+p1QwqAA9MqYwHOVukK5g8sdLNaH0KibdZBKoJuuWje4flFPdX3kaO6T +eYxSFjWXp9EvvAbd/7jH4NL4gJA1PSjiWczGkiyJOHRwQdQWBjsbSIdZg+MIg3xQ +OGlkimgJAgMBAAECggEABKmCdmXDwy+eR0ll41aoc/hzPzHRxADAiU51Pf+DrYHj +6UPcF3db+KR2Adl0ocEhqlSoHs3CIk6KC9c+wOvagBwaaVWe4WvT9vF3M4he8rMm +dv6n2xJPFcOfDz5zUSssjk5KdOvoGRv7BzYnDIvOafvmUVwPwuo92Wizddy8saf4 +Xuea0Cupz1PELPKkbXcAqb+TzbAZrwdPj1Y7vTe/KGE4+aoDqCW/sFB1E0UsMGlt +/yfGwFP48b7kdkqSpcEQW5H8+WL3TfqRcolCD9To4vo2J+1Po0S/8qPNRvkNQDDX +AypHtrXFBOWHpJgXT4rKyH+ZGJchrCRDblt9s/sNQwKBgQD7NytvYET3pWemYiX+ +MB9uc6cPuMFONvlzjA9T6dbOSi/HLaeDoW027aMUZqb7QeaQCoWcUwh13dI2SZq0 +5+l9hei4JkWjoDhbWmPe7zDuQr3UMl0CSk3egz3BSHkjAhRAuUxK0QLKGB23zWxz +k8mUWYZaZRA39C6aqMt/jbJjDwKBgQDSl+eO+DjpwPzrjPSphpF4xYo4XDje9ovK +9q4KTHye7Flc3cMCX3WZBmzdt0zbqu6gWZjJH0XbWX/+SkJBGh77XWD0FeQnU7Vk +ipoeb8zTsCVxD9EytQuXti3cqBgClcCMvLKgLOJIcNYTnygojwg3t+jboQqbtV7p +VpQfAC6jZwKBgQCxJ46x1CnOmg4l/0DbqAQCV/yP0bI//fSbz0Ff459fimF3DHL9 +GHF0MtC2Kk3HEgoNud3PB58Hv43mSrGWsZSuuCgM9LBXWz1i7rNPG05eNyK26W09 +mDihmduK2hjS3zx5CDMM76gP7EHIxEyelLGqtBdS18JAMypKVo5rPPl3cQKBgQCG +ueXLImQOr4dfDntLpSqV0BLAQcekZKhEPZJURmCHr37wGXNzpixurJyjL2w9MFqf +PRKwwJAJZ3Wp8kn2qkZd23x2Szb+LeBjJQS6Kh4o44zgixTz0r1K3qLygptxs+pO +Xz4LmQte+skKHo0rfW3tb3vKXnmR6fOBZgE23//2SwKBgHck44hoE1Ex2gDEfIq1 +04OBoS1cpuc9ge4uHEmv+8uANjzwlsYf8hY1qae513MGixRBOkxcI5xX/fYPQV9F +t3Jfh8QX85JjnGntuXuraYZJMUjpwXr3QHPx0jpvAM3Au5j6qD3biC9Vrwq9Chkg +hbiiPARizZA/Tsna/9ox1qDT +-----END PRIVATE KEY-----` + resp, err := CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ + "pem_bundle": cert + "\n" + privKey, + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotEmpty(t, resp.Warnings) + originalWarnings := resp.Warnings + + resp, err = CBRead(b, s, "crl/rotate") + require.NoError(t, err) + require.NotNil(t, resp) + require.NotEmpty(t, resp.Warnings) + + // All CRL-specific warnings should've already occurred earlier on the + // import's CRL rebuild. + for _, warning := range resp.Warnings { + require.Contains(t, originalWarnings, warning) + } + + // Deleting the issuer and key should remove the warning. + _, err = CBDelete(b, s, "root") + require.NoError(t, err) + + resp, err = CBRead(b, s, "crl/rotate") + require.NoError(t, err) + require.NotNil(t, resp) + require.Empty(t, resp.Warnings) + + // Adding back just the cert shouldn't cause CRL rebuild warnings. + resp, err = CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ + "pem_bundle": cert, + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotNil(t, resp.Data["mapping"]) + require.NotEmpty(t, resp.Data["mapping"]) + require.Equal(t, len(resp.Data["mapping"].(map[string]string)), 1) + for key, value := range resp.Data["mapping"].(map[string]string) { + require.NotEmpty(t, key) + require.Empty(t, value) + } + + resp, err = CBRead(b, s, "crl/rotate") + require.NoError(t, err) + require.NotNil(t, resp) + require.Empty(t, resp.Warnings) +} + +func TestCRLIssuerRemoval(t *testing.T) { + t.Parallel() + + ctx := context.Background() + b, s := CreateBackendWithStorage(t) + + if constants.IsEnterprise { + // We don't really care about the whole cross cluster replication + // stuff, but we do want to enable unified CRLs if we can, so that + // unified CRLs get built. + _, err := CBWrite(b, s, "config/crl", map[string]interface{}{ + "cross_cluster_revocation": true, + "auto_rebuild": true, + }) + require.NoError(t, err, "failed enabling unified CRLs on enterprise") + } + + // Create a single root, configure delta CRLs, and rotate CRLs to prep a + // starting state. + _, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "Root R1", + "key_type": "ec", + }) + require.NoError(t, err) + _, err = CBWrite(b, s, "config/crl", map[string]interface{}{ + "enable_delta": true, + "auto_rebuild": true, + }) + require.NoError(t, err) + _, err = CBRead(b, s, "crl/rotate") + require.NoError(t, err) + + // List items in storage under both CRL paths so we know what is there in + // the "good" state. + crlList, err := s.List(ctx, "crls/") + require.NoError(t, err) + require.Contains(t, crlList, "config") + require.Greater(t, len(crlList), 1) + + unifiedCRLList, err := s.List(ctx, "unified-crls/") + require.NoError(t, err) + require.Contains(t, unifiedCRLList, "config") + require.Greater(t, len(unifiedCRLList), 1) + + // Now, create a bunch of issuers, generate CRLs, and remove them. + var keyIDs []string + var issuerIDs []string + for i := 1; i <= 25; i++ { + resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": fmt.Sprintf("Root X%v", i), + "key_type": "ec", + }) + require.NoError(t, err) + require.NotNil(t, resp) + + key := string(resp.Data["key_id"].(keyID)) + keyIDs = append(keyIDs, key) + issuer := string(resp.Data["issuer_id"].(issuerID)) + issuerIDs = append(issuerIDs, issuer) + } + _, err = CBRead(b, s, "crl/rotate") + require.NoError(t, err) + for _, issuer := range issuerIDs { + _, err := CBDelete(b, s, "issuer/"+issuer) + require.NoError(t, err) + } + for _, key := range keyIDs { + _, err := CBDelete(b, s, "key/"+key) + require.NoError(t, err) + } + + // Finally list storage entries again to ensure they are cleaned up. + afterCRLList, err := s.List(ctx, "crls/") + require.NoError(t, err) + for _, entry := range crlList { + require.Contains(t, afterCRLList, entry) + } + require.Equal(t, len(afterCRLList), len(crlList)) + + afterUnifiedCRLList, err := s.List(ctx, "unified-crls/") + require.NoError(t, err) + for _, entry := range unifiedCRLList { + require.Contains(t, afterUnifiedCRLList, entry) + } + require.Equal(t, len(afterUnifiedCRLList), len(unifiedCRLList)) +} diff --git a/builtin/logical/pki/crl_util.go b/builtin/logical/pki/crl_util.go new file mode 100644 index 0000000..d3ff0e3 --- /dev/null +++ b/builtin/logical/pki/crl_util.go @@ -0,0 +1,2212 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "bytes" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "fmt" + "math/big" + "strings" + "sync" + "time" + + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/logical" + atomic2 "go.uber.org/atomic" +) + +const ( + revokedPath = "revoked/" + crossRevocationPrefix = "cross-revocation-queue/" + crossRevocationPath = crossRevocationPrefix + "{{clusterId}}/" + deltaWALLastBuildSerialName = "last-build-serial" + deltaWALLastRevokedSerialName = "last-revoked-serial" + localDeltaWALPath = "delta-wal/" + localDeltaWALLastBuildSerial = localDeltaWALPath + deltaWALLastBuildSerialName + localDeltaWALLastRevokedSerial = localDeltaWALPath + deltaWALLastRevokedSerialName + unifiedDeltaWALPrefix = "unified-delta-wal/" + unifiedDeltaWALPath = "unified-delta-wal/{{clusterId}}/" + unifiedDeltaWALLastBuildSerial = unifiedDeltaWALPath + deltaWALLastBuildSerialName + unifiedDeltaWALLastRevokedSerial = unifiedDeltaWALPath + deltaWALLastRevokedSerialName +) + +type revocationInfo struct { + CertificateBytes []byte `json:"certificate_bytes"` + RevocationTime int64 `json:"revocation_time"` + RevocationTimeUTC time.Time `json:"revocation_time_utc"` + CertificateIssuer issuerID `json:"issuer_id"` +} + +type revocationRequest struct { + RequestedAt time.Time `json:"requested_at"` +} + +type revocationConfirmed struct { + RevokedAt string `json:"revoked_at"` + Source string `json:"source"` +} + +type revocationQueueEntry struct { + Cluster string + Serial string +} + +type ( + // Placeholder in case of migrations needing more data. Currently + // we use the path name to store the serial number that was revoked. + deltaWALInfo struct{} + lastWALInfo struct { + // Info to write about the last WAL entry. This is the serial number + // of the last revoked certificate. + // + // We write this below in revokedCert(...) and read it in + // rebuildDeltaCRLsIfForced(...). + Serial string `json:"serial"` + } + lastDeltaInfo struct { + // Info to write about the last built delta CRL. This is the serial + // number of the last revoked certificate that we saw prior to delta + // CRL building. + // + // We write this below in buildAnyCRLs(...) and read it in + // rebuildDeltaCRLsIfForced(...). + Serial string `json:"serial"` + } +) + +// crlBuilder is gatekeeper for controlling various read/write operations to the storage of the CRL. +// The extra complexity arises from secondary performance clusters seeing various writes to its storage +// without the actual API calls. During the storage invalidation process, we do not have the required state +// to actually rebuild the CRLs, so we need to schedule it in a deferred fashion. This allows either +// read or write calls to perform the operation if required, or have the flag reset upon a write operation +// +// The CRL builder also tracks the revocation configuration. +type crlBuilder struct { + _builder sync.Mutex + forceRebuild *atomic2.Bool + canRebuild bool + lastDeltaRebuildCheck time.Time + + _config sync.RWMutex + dirty *atomic2.Bool + config crlConfig + haveInitializedConfig bool + + // Whether to invalidate our LastModifiedTime due to write on the + // global issuance config. + invalidate *atomic2.Bool + + // Global revocation queue entries get accepted by the invalidate func + // and passed to the crlBuilder for processing. + haveInitializedQueue *atomic2.Bool + revQueue *revocationQueue + removalQueue *revocationQueue + crossQueue *revocationQueue +} + +const ( + _ignoreForceFlag = true + _enforceForceFlag = false +) + +func newCRLBuilder(canRebuild bool) *crlBuilder { + return &crlBuilder{ + forceRebuild: atomic2.NewBool(false), + canRebuild: canRebuild, + // Set the last delta rebuild window to now, delaying the first delta + // rebuild by the first rebuild period to give us some time on startup + // to stabilize. + lastDeltaRebuildCheck: time.Now(), + dirty: atomic2.NewBool(true), + config: defaultCrlConfig, + invalidate: atomic2.NewBool(false), + haveInitializedQueue: atomic2.NewBool(false), + revQueue: newRevocationQueue(), + removalQueue: newRevocationQueue(), + crossQueue: newRevocationQueue(), + } +} + +func (cb *crlBuilder) markConfigDirty() { + cb.dirty.Store(true) +} + +func (cb *crlBuilder) reloadConfigIfRequired(sc *storageContext) error { + if cb.dirty.Load() { + // Acquire a write lock. + cb._config.Lock() + defer cb._config.Unlock() + + if !cb.dirty.Load() { + // Someone else might've been reloading the config; no need + // to do it twice. + return nil + } + + config, err := sc.getRevocationConfig() + if err != nil { + return err + } + + previousConfig := cb.config + // Set the default config if none was returned to us. + if config != nil { + cb.config = *config + } else { + cb.config = defaultCrlConfig + } + + // Updated the config; unset dirty. + cb.dirty.Store(false) + triggerChangeNotification := true + if !cb.haveInitializedConfig { + cb.haveInitializedConfig = true + triggerChangeNotification = false // do not trigger on the initial loading of configuration. + } + + // Certain things need to be triggered on all server types when crlConfig is loaded. + if triggerChangeNotification { + cb.notifyOnConfigChange(sc, previousConfig, cb.config) + } + } + + return nil +} + +func (cb *crlBuilder) notifyOnConfigChange(sc *storageContext, priorConfig crlConfig, newConfig crlConfig) { + // If you need to hook into a CRL configuration change across different server types + // such as primary clusters as well as performance replicas, it is easier to do here than + // in two places (API layer and in invalidateFunc) + if priorConfig.UnifiedCRL != newConfig.UnifiedCRL && newConfig.UnifiedCRL { + sc.Backend.unifiedTransferStatus.forceRun() + } + + if priorConfig.UseGlobalQueue != newConfig.UseGlobalQueue && newConfig.UseGlobalQueue { + cb.haveInitializedQueue.Store(false) + } +} + +func (cb *crlBuilder) getConfigWithUpdate(sc *storageContext) (*crlConfig, error) { + // Config may mutate immediately after accessing, but will be freshly + // fetched if necessary. + if err := cb.reloadConfigIfRequired(sc); err != nil { + return nil, err + } + + cb._config.RLock() + defer cb._config.RUnlock() + + configCopy := cb.config + return &configCopy, nil +} + +func (cb *crlBuilder) checkForAutoRebuild(sc *storageContext) error { + cfg, err := cb.getConfigWithUpdate(sc) + if err != nil { + return err + } + + if cfg.Disable || !cfg.AutoRebuild || cb.forceRebuild.Load() { + // Not enabled, not on auto-rebuilder, or we're already scheduled to + // rebuild so there's no point to interrogate CRL values... + return nil + } + + // Auto-Rebuild is enabled. We need to check each issuer's CRL and see + // if its about to expire. If it is, we've gotta rebuild it (and well, + // every other CRL since we don't have a fine-toothed rebuilder). + // + // We store a list of all (unique) CRLs in the cluster-local CRL + // configuration along with their expiration dates. + internalCRLConfig, err := sc.getLocalCRLConfig() + if err != nil { + return fmt.Errorf("error checking for auto-rebuild status: unable to fetch cluster-local CRL configuration: %w", err) + } + + // If there's no config, assume we've gotta rebuild it to get this + // information. + if internalCRLConfig == nil { + cb.forceRebuild.Store(true) + return nil + } + + // If the map is empty, assume we need to upgrade and schedule a + // rebuild. + if len(internalCRLConfig.CRLExpirationMap) == 0 { + cb.forceRebuild.Store(true) + return nil + } + + // Otherwise, check CRL's expirations and see if its zero or within + // the grace period and act accordingly. + now := time.Now() + + period, err := parseutil.ParseDurationSecond(cfg.AutoRebuildGracePeriod) + if err != nil { + // This may occur if the duration is empty; in that case + // assume the default. The default should be valid and shouldn't + // error. + defaultPeriod, defaultErr := parseutil.ParseDurationSecond(defaultCrlConfig.AutoRebuildGracePeriod) + if defaultErr != nil { + return fmt.Errorf("error checking for auto-rebuild status: unable to parse duration from both config's grace period (%v) and default grace period (%v):\n- config: %v\n- default: %w\n", cfg.AutoRebuildGracePeriod, defaultCrlConfig.AutoRebuildGracePeriod, err, defaultErr) + } + + period = defaultPeriod + } + + for _, value := range internalCRLConfig.CRLExpirationMap { + if value.IsZero() || now.After(value.Add(-1*period)) { + cb.forceRebuild.Store(true) + return nil + } + } + + return nil +} + +// Mark the internal LastModifiedTime tracker invalid. +func (cb *crlBuilder) invalidateCRLBuildTime() { + cb.invalidate.Store(true) +} + +// Update the config to mark the modified CRL. See note in +// updateDefaultIssuerId about why this is necessary. +func (cb *crlBuilder) flushCRLBuildTimeInvalidation(sc *storageContext) error { + if cb.invalidate.CAS(true, false) { + // Flush out our invalidation. + cfg, err := sc.getLocalCRLConfig() + if err != nil { + cb.invalidate.Store(true) + return fmt.Errorf("unable to update local CRL config's modification time: error fetching: %w", err) + } + + cfg.LastModified = time.Now().UTC() + cfg.DeltaLastModified = time.Now().UTC() + err = sc.setLocalCRLConfig(cfg) + if err != nil { + cb.invalidate.Store(true) + return fmt.Errorf("unable to update local CRL config's modification time: error persisting: %w", err) + } + } + + return nil +} + +// rebuildIfForced is to be called by readers or periodic functions that might need to trigger +// a refresh of the CRL before the read occurs. +func (cb *crlBuilder) rebuildIfForced(sc *storageContext) ([]string, error) { + if cb.forceRebuild.Load() { + return cb._doRebuild(sc, true, _enforceForceFlag) + } + + return nil, nil +} + +// rebuild is to be called by various write apis that know the CRL is to be updated and can be now. +func (cb *crlBuilder) rebuild(sc *storageContext, forceNew bool) ([]string, error) { + return cb._doRebuild(sc, forceNew, _ignoreForceFlag) +} + +// requestRebuildIfActiveNode will schedule a rebuild of the CRL from the next read or write api call assuming we are the active node of a cluster +func (cb *crlBuilder) requestRebuildIfActiveNode(b *backend) { + // Only schedule us on active nodes, as the active node is the only node that can rebuild/write the CRL. + // Note 1: The CRL is cluster specific, so this does need to run on the active node of a performance secondary cluster. + // Note 2: This is called by the storage invalidation function, so it should not block. + if !cb.canRebuild { + b.Logger().Debug("Ignoring request to schedule a CRL rebuild, not on active node.") + return + } + + b.Logger().Info("Scheduling PKI CRL rebuild.") + // Set the flag to 1, we don't care if we aren't the ones that actually swap it to 1. + cb.forceRebuild.Store(true) +} + +func (cb *crlBuilder) _doRebuild(sc *storageContext, forceNew bool, ignoreForceFlag bool) ([]string, error) { + cb._builder.Lock() + defer cb._builder.Unlock() + // Re-read the lock in case someone beat us to the punch between the previous load op. + forceBuildFlag := cb.forceRebuild.Load() + if forceBuildFlag || ignoreForceFlag { + // Reset our original flag back to 0 before we start the rebuilding. This may lead to another round of + // CRL building, but we want to avoid the race condition caused by clearing the flag after we completed (An + // update/revocation occurred attempting to set the flag, after we listed the certs but before we wrote + // the CRL, so we missed the update and cleared the flag.) + cb.forceRebuild.Store(false) + + // if forceRebuild was requested, that should force a complete rebuild even if requested not too by forceNew + myForceNew := forceBuildFlag || forceNew + return buildCRLs(sc, myForceNew) + } + + return nil, nil +} + +func (cb *crlBuilder) _getPresentDeltaWALForClearing(sc *storageContext, path string) ([]string, error) { + // Clearing of the delta WAL occurs after a new complete CRL has been built. + walSerials, err := sc.Storage.List(sc.Context, path) + if err != nil { + return nil, fmt.Errorf("error fetching list of delta WAL certificates to clear: %w", err) + } + + // We _should_ remove the special WAL entries here, but we don't really + // want to traverse the list again (and also below in clearDeltaWAL). So + // trust the latter does the right thing. + return walSerials, nil +} + +func (cb *crlBuilder) getPresentLocalDeltaWALForClearing(sc *storageContext) ([]string, error) { + return cb._getPresentDeltaWALForClearing(sc, localDeltaWALPath) +} + +func (cb *crlBuilder) getPresentUnifiedDeltaWALForClearing(sc *storageContext) ([]string, error) { + walClusters, err := sc.Storage.List(sc.Context, unifiedDeltaWALPrefix) + if err != nil { + return nil, fmt.Errorf("error fetching list of clusters with delta WAL entries: %w", err) + } + + var allPaths []string + for index, cluster := range walClusters { + prefix := unifiedDeltaWALPrefix + cluster + clusterPaths, err := cb._getPresentDeltaWALForClearing(sc, prefix) + if err != nil { + return nil, fmt.Errorf("error fetching delta WAL entries for cluster (%v / %v): %w", index, cluster, err) + } + + // Here, we don't want to include the unifiedDeltaWALPrefix because + // clearUnifiedDeltaWAL handles that for us. Instead, just include + // the cluster identifier. + for _, clusterPath := range clusterPaths { + allPaths = append(allPaths, cluster+clusterPath) + } + } + + return allPaths, nil +} + +func (cb *crlBuilder) _clearDeltaWAL(sc *storageContext, walSerials []string, path string) error { + // Clearing of the delta WAL occurs after a new complete CRL has been built. + for _, serial := range walSerials { + // Don't remove our special entries! + if strings.HasSuffix(serial, deltaWALLastBuildSerialName) || strings.HasSuffix(serial, deltaWALLastRevokedSerialName) { + continue + } + + if err := sc.Storage.Delete(sc.Context, path+serial); err != nil { + return fmt.Errorf("error clearing delta WAL certificate: %w", err) + } + } + + return nil +} + +func (cb *crlBuilder) clearLocalDeltaWAL(sc *storageContext, walSerials []string) error { + return cb._clearDeltaWAL(sc, walSerials, localDeltaWALPath) +} + +func (cb *crlBuilder) clearUnifiedDeltaWAL(sc *storageContext, walSerials []string) error { + return cb._clearDeltaWAL(sc, walSerials, unifiedDeltaWALPrefix) +} + +func (cb *crlBuilder) rebuildDeltaCRLsIfForced(sc *storageContext, override bool) ([]string, error) { + // Delta CRLs use the same expiry duration as the complete CRL. Because + // we always rebuild the complete CRL and then the delta CRL, we can + // be assured that the delta CRL always expires after a complete CRL, + // and that rebuilding the complete CRL will trigger a fresh delta CRL + // build of its own. + // + // This guarantee means we can avoid checking delta CRL expiry. Thus, + // we only need to rebuild the delta CRL when we have new revocations, + // within our time window for updating it. + cfg, err := cb.getConfigWithUpdate(sc) + if err != nil { + return nil, err + } + + if !cfg.EnableDelta { + // We explicitly do not update the last check time here, as we + // want to persist the last rebuild window if it hasn't been set. + return nil, nil + } + + deltaRebuildDuration, err := parseutil.ParseDurationSecond(cfg.DeltaRebuildInterval) + if err != nil { + return nil, err + } + + // Acquire CRL building locks before we get too much further. + cb._builder.Lock() + defer cb._builder.Unlock() + + // Last is setup during newCRLBuilder(...), so we don't need to deal with + // a zero condition. + now := time.Now() + last := cb.lastDeltaRebuildCheck + nextRebuildCheck := last.Add(deltaRebuildDuration) + if !override && now.Before(nextRebuildCheck) { + // If we're still before the time of our next rebuild check, we can + // safely return here even if we have certs. We'll wait for a bit, + // retrigger this check, and then do the rebuild. + return nil, nil + } + + // Update our check time. If we bail out below (due to storage errors + // or whatever), we'll delay the next CRL check (hopefully allowing + // things to stabilize). Otherwise, we might not build a new Delta CRL + // until our next complete CRL build. + cb.lastDeltaRebuildCheck = now + + rebuildLocal, err := cb._shouldRebuildLocalCRLs(sc, override) + if err != nil { + return nil, fmt.Errorf("error determining if local CRLs should be rebuilt: %w", err) + } + + rebuildUnified, err := cb._shouldRebuildUnifiedCRLs(sc, override) + if err != nil { + return nil, fmt.Errorf("error determining if unified CRLs should be rebuilt: %w", err) + } + + if !rebuildLocal && !rebuildUnified { + return nil, nil + } + + // Finally, we must've needed to do the rebuild. Execute! + return cb.rebuildDeltaCRLsHoldingLock(sc, false) +} + +func (cb *crlBuilder) _shouldRebuildLocalCRLs(sc *storageContext, override bool) (bool, error) { + // Fetch two storage entries to see if we actually need to do this + // rebuild, given we're within the window. + lastWALEntry, err := sc.Storage.Get(sc.Context, localDeltaWALLastRevokedSerial) + if err != nil || !override && (lastWALEntry == nil || lastWALEntry.Value == nil) { + // If this entry does not exist, we don't need to rebuild the + // delta WAL due to the expiration assumption above. There must + // not have been any new revocations. Since err should be nil + // in this case, we can safely return it. + return false, err + } + + lastBuildEntry, err := sc.Storage.Get(sc.Context, localDeltaWALLastBuildSerial) + if err != nil { + return false, err + } + + if !override && lastBuildEntry != nil && lastBuildEntry.Value != nil { + // If the last build entry doesn't exist, we still want to build a + // new delta WAL, since this could be our very first time doing so. + // + // Otherwise, here, now that we know it exists, we want to check this + // value against the other value. Since we previously guarded the WAL + // entry being non-empty, we're good to decode everything within this + // guard. + var walInfo lastWALInfo + if err := lastWALEntry.DecodeJSON(&walInfo); err != nil { + return false, err + } + + var deltaInfo lastDeltaInfo + if err := lastBuildEntry.DecodeJSON(&deltaInfo); err != nil { + return false, err + } + + // Here, everything decoded properly and we know that no new certs + // have been revoked since we built this last delta CRL. We can exit + // without rebuilding then. + if walInfo.Serial == deltaInfo.Serial { + return false, nil + } + } + + return true, nil +} + +func (cb *crlBuilder) _shouldRebuildUnifiedCRLs(sc *storageContext, override bool) (bool, error) { + // Unified CRL can only be built by the main cluster. + b := sc.Backend + if b.System().ReplicationState().HasState(consts.ReplicationDRSecondary|consts.ReplicationPerformanceStandby) || + (!b.System().LocalMount() && b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) { + return false, nil + } + + // If we're overriding whether we should build Delta CRLs, always return + // true, even if storage errors might've happen. + if override { + return true, nil + } + + // Fetch two storage entries to see if we actually need to do this + // rebuild, given we're within the window. We need to fetch these + // two entries per cluster. + clusters, err := sc.Storage.List(sc.Context, unifiedDeltaWALPrefix) + if err != nil { + return false, fmt.Errorf("failed to get the list of clusters having written Delta WALs: %w", err) + } + + // If any cluster tells us to rebuild, we should rebuild. + shouldRebuild := false + for index, cluster := range clusters { + prefix := unifiedDeltaWALPrefix + cluster + clusterUnifiedLastRevokedWALEntry := prefix + deltaWALLastRevokedSerialName + clusterUnifiedLastBuiltWALEntry := prefix + deltaWALLastBuildSerialName + + lastWALEntry, err := sc.Storage.Get(sc.Context, clusterUnifiedLastRevokedWALEntry) + if err != nil { + return false, fmt.Errorf("failed fetching last revoked WAL entry for cluster (%v / %v): %w", index, cluster, err) + } + + if lastWALEntry == nil || lastWALEntry.Value == nil { + continue + } + + lastBuildEntry, err := sc.Storage.Get(sc.Context, clusterUnifiedLastBuiltWALEntry) + if err != nil { + return false, fmt.Errorf("failed fetching last built CRL WAL entry for cluster (%v / %v): %w", index, cluster, err) + } + + if lastBuildEntry == nil || lastBuildEntry.Value == nil { + // If the last build entry doesn't exist, we still want to build a + // new delta WAL, since this could be our very first time doing so. + shouldRebuild = true + break + } + + // Otherwise, here, now that we know it exists, we want to check this + // value against the other value. Since we previously guarded the WAL + // entry being non-empty, we're good to decode everything within this + // guard. + var walInfo lastWALInfo + if err := lastWALEntry.DecodeJSON(&walInfo); err != nil { + return false, fmt.Errorf("failed decoding last revoked WAL entry for cluster (%v / %v): %w", index, cluster, err) + } + + var deltaInfo lastDeltaInfo + if err := lastBuildEntry.DecodeJSON(&deltaInfo); err != nil { + return false, fmt.Errorf("failed decoding last built CRL WAL entry for cluster (%v / %v): %w", index, cluster, err) + } + + if walInfo.Serial != deltaInfo.Serial { + shouldRebuild = true + break + } + } + + // No errors occurred, so return the result. + return shouldRebuild, nil +} + +func (cb *crlBuilder) rebuildDeltaCRLs(sc *storageContext, forceNew bool) ([]string, error) { + cb._builder.Lock() + defer cb._builder.Unlock() + + return cb.rebuildDeltaCRLsHoldingLock(sc, forceNew) +} + +func (cb *crlBuilder) rebuildDeltaCRLsHoldingLock(sc *storageContext, forceNew bool) ([]string, error) { + return buildAnyCRLs(sc, forceNew, true /* building delta */) +} + +func (cb *crlBuilder) addCertForRevocationCheck(cluster, serial string) { + entry := &revocationQueueEntry{ + Cluster: cluster, + Serial: serial, + } + cb.revQueue.Add(entry) +} + +func (cb *crlBuilder) addCertForRevocationRemoval(cluster, serial string) { + entry := &revocationQueueEntry{ + Cluster: cluster, + Serial: serial, + } + cb.removalQueue.Add(entry) +} + +func (cb *crlBuilder) addCertFromCrossRevocation(cluster, serial string) { + entry := &revocationQueueEntry{ + Cluster: cluster, + Serial: serial, + } + cb.crossQueue.Add(entry) +} + +func (cb *crlBuilder) maybeGatherQueueForFirstProcess(sc *storageContext, isNotPerfPrimary bool) error { + // Assume holding lock. + if cb.haveInitializedQueue.Load() { + return nil + } + + sc.Backend.Logger().Debug(fmt.Sprintf("gathering first time existing revocations")) + + clusters, err := sc.Storage.List(sc.Context, crossRevocationPrefix) + if err != nil { + return fmt.Errorf("failed to list cross-cluster revocation queue participating clusters: %w", err) + } + + sc.Backend.Logger().Debug(fmt.Sprintf("found %v clusters: %v", len(clusters), clusters)) + + for cIndex, cluster := range clusters { + cluster = cluster[0 : len(cluster)-1] + cPath := crossRevocationPrefix + cluster + "/" + serials, err := sc.Storage.List(sc.Context, cPath) + if err != nil { + return fmt.Errorf("failed to list cross-cluster revocation queue entries for cluster %v (%v): %w", cluster, cIndex, err) + } + + sc.Backend.Logger().Debug(fmt.Sprintf("found %v serials for cluster %v: %v", len(serials), cluster, serials)) + + for _, serial := range serials { + if serial[len(serial)-1] == '/' { + serial = serial[0 : len(serial)-1] + } + + ePath := cPath + serial + eConfirmPath := ePath + "/confirmed" + removalEntry, err := sc.Storage.Get(sc.Context, eConfirmPath) + + entry := &revocationQueueEntry{ + Cluster: cluster, + Serial: serial, + } + + // No removal entry yet; add to regular queue. Otherwise, slate it + // for removal if we're a perfPrimary. + if err != nil || removalEntry == nil { + cb.revQueue.Add(entry) + } else if !isNotPerfPrimary { + cb.removalQueue.Add(entry) + } // Else, this is a confirmation but we're on a perf secondary so ignore it. + + // Overwrite the error; we don't really care about its contents + // at this step. + err = nil + } + } + + return nil +} + +func (cb *crlBuilder) processRevocationQueue(sc *storageContext) error { + sc.Backend.Logger().Debug(fmt.Sprintf("starting to process revocation requests")) + + isNotPerfPrimary := sc.Backend.System().ReplicationState().HasState(consts.ReplicationDRSecondary|consts.ReplicationPerformanceStandby) || + (!sc.Backend.System().LocalMount() && sc.Backend.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) + + if err := cb.maybeGatherQueueForFirstProcess(sc, isNotPerfPrimary); err != nil { + return fmt.Errorf("failed to gather first queue: %w", err) + } + + revQueue := cb.revQueue.Iterate() + removalQueue := cb.removalQueue.Iterate() + + sc.Backend.Logger().Debug(fmt.Sprintf("gathered %v revocations and %v confirmation entries", len(revQueue), len(removalQueue))) + + crlConfig, err := cb.getConfigWithUpdate(sc) + if err != nil { + return err + } + + ourClusterId, err := sc.Backend.System().ClusterID(sc.Context) + if err != nil { + return fmt.Errorf("unable to fetch clusterID to ignore local revocation entries: %w", err) + } + + for _, req := range revQueue { + // Regardless of whether we're on the perf primary or a secondary + // cluster, we can safely ignore revocation requests originating + // from our node, because we've already checked them once (when + // they were created). + if ourClusterId != "" && ourClusterId == req.Cluster { + continue + } + + // Fetch the revocation entry to ensure it exists. + rPath := crossRevocationPrefix + req.Cluster + "/" + req.Serial + entry, err := sc.Storage.Get(sc.Context, rPath) + if err != nil { + return fmt.Errorf("failed to read cross-cluster revocation queue entry: %w", err) + } + if entry == nil { + // Skipping this entry; it was likely an incorrect invalidation + // caused by the primary cluster removing the confirmation. + cb.revQueue.Remove(req) + continue + } + + resp, err := tryRevokeCertBySerial(sc, crlConfig, req.Serial) + if err == nil && resp != nil && !resp.IsError() && resp.Data != nil && resp.Data["state"].(string) == "revoked" { + if isNotPerfPrimary { + // Write a revocation queue removal entry. + confirmed := revocationConfirmed{ + RevokedAt: resp.Data["revocation_time_rfc3339"].(string), + Source: req.Cluster, + } + path := crossRevocationPath + req.Serial + "/confirmed" + confirmedEntry, err := logical.StorageEntryJSON(path, confirmed) + if err != nil { + return fmt.Errorf("failed to create storage entry for cross-cluster revocation confirmed response: %w", err) + } + + if err := sc.Storage.Put(sc.Context, confirmedEntry); err != nil { + return fmt.Errorf("error persisting cross-cluster revocation confirmation: %w", err) + } + } else { + // Since we're the active node of the primary cluster, go ahead + // and just remove it. + path := crossRevocationPrefix + req.Cluster + "/" + req.Serial + if err := sc.Storage.Delete(sc.Context, path); err != nil { + return fmt.Errorf("failed to delete processed revocation request: %w", err) + } + } + } else if err != nil { + // Because we fake being from a lease, we get the guarantee that + // err == nil == resp if the cert was already revoked; this means + // this err should actually be fatal. + return err + } + cb.revQueue.Remove(req) + } + + if isNotPerfPrimary { + sc.Backend.Logger().Debug(fmt.Sprintf("not on perf primary so ignoring any revocation confirmations")) + + // See note in pki/backend.go; this should be empty. + cb.removalQueue.RemoveAll() + cb.haveInitializedQueue.Store(true) + return nil + } + + clusters, err := sc.Storage.List(sc.Context, crossRevocationPrefix) + if err != nil { + return err + } + + for _, entry := range removalQueue { + // First remove the revocation request. + for cIndex, cluster := range clusters { + eEntry := crossRevocationPrefix + cluster + entry.Serial + if err := sc.Storage.Delete(sc.Context, eEntry); err != nil { + return fmt.Errorf("failed to delete potential cross-cluster revocation entry for cluster %v (%v) and serial %v: %w", cluster, cIndex, entry.Serial, err) + } + } + + // Then remove the confirmation. + if err := sc.Storage.Delete(sc.Context, crossRevocationPrefix+entry.Cluster+"/"+entry.Serial+"/confirmed"); err != nil { + return fmt.Errorf("failed to delete cross-cluster revocation confirmation entry for cluster %v and serial %v: %w", entry.Cluster, entry.Serial, err) + } + + cb.removalQueue.Remove(entry) + } + + cb.haveInitializedQueue.Store(true) + + return nil +} + +func (cb *crlBuilder) processCrossClusterRevocations(sc *storageContext) error { + sc.Backend.Logger().Debug(fmt.Sprintf("starting to process unified revocations")) + + crlConfig, err := cb.getConfigWithUpdate(sc) + if err != nil { + return err + } + + if !crlConfig.UnifiedCRL { + cb.crossQueue.RemoveAll() + return nil + } + + crossQueue := cb.crossQueue.Iterate() + sc.Backend.Logger().Debug(fmt.Sprintf("gathered %v unified revocations entries", len(crossQueue))) + + ourClusterId, err := sc.Backend.System().ClusterID(sc.Context) + if err != nil { + return fmt.Errorf("unable to fetch clusterID to ignore local unified revocation entries: %w", err) + } + + for _, req := range crossQueue { + // Regardless of whether we're on the perf primary or a secondary + // cluster, we can safely ignore revocation requests originating + // from our node, because we've already checked them once (when + // they were created). + if ourClusterId != "" && ourClusterId == req.Cluster { + continue + } + + // Fetch the revocation entry to ensure it exists and this wasn't + // a delete. + rPath := unifiedRevocationReadPathPrefix + req.Cluster + "/" + req.Serial + entry, err := sc.Storage.Get(sc.Context, rPath) + if err != nil { + return fmt.Errorf("failed to read unified revocation entry: %w", err) + } + if entry == nil { + // Skip this entry: it was likely caused by the deletion of this + // record during tidy. + cb.crossQueue.Remove(req) + continue + } + + resp, err := tryRevokeCertBySerial(sc, crlConfig, req.Serial) + if err == nil && resp != nil && !resp.IsError() && resp.Data != nil && resp.Data["state"].(string) == "revoked" { + // We could theoretically save ourselves from writing a global + // revocation entry during the above certificate revocation, as + // we don't really need it to appear on either the unified CRL + // or its delta CRL, but this would require more plumbing. + cb.crossQueue.Remove(req) + } else if err != nil { + // Because we fake being from a lease, we get the guarantee that + // err == nil == resp if the cert was already revoked; this means + // this err should actually be fatal. + return err + } + } + + return nil +} + +// Helper function to fetch a map of issuerID->parsed cert for revocation +// usage. Unlike other paths, this needs to handle the legacy bundle +// more gracefully than rejecting it outright. +func fetchIssuerMapForRevocationChecking(sc *storageContext) (map[issuerID]*x509.Certificate, error) { + var err error + var issuers []issuerID + + if !sc.Backend.useLegacyBundleCaStorage() { + issuers, err = sc.listIssuers() + if err != nil { + return nil, fmt.Errorf("could not fetch issuers list: %w", err) + } + } else { + // Hack: this isn't a real issuerID, but it works for fetchCAInfo + // since it resolves the reference. + issuers = []issuerID{legacyBundleShimID} + } + + issuerIDCertMap := make(map[issuerID]*x509.Certificate, len(issuers)) + for _, issuer := range issuers { + _, bundle, caErr := sc.fetchCertBundleByIssuerId(issuer, false) + if caErr != nil { + return nil, fmt.Errorf("error fetching CA certificate for issuer id %v: %w", issuer, caErr) + } + + if bundle == nil { + return nil, fmt.Errorf("faulty reference: %v - CA info not found", issuer) + } + + parsedBundle, err := parseCABundle(sc.Context, sc.Backend, bundle) + if err != nil { + return nil, errutil.InternalError{Err: err.Error()} + } + + if parsedBundle.Certificate == nil { + return nil, errutil.InternalError{Err: "stored CA information not able to be parsed"} + } + + issuerIDCertMap[issuer] = parsedBundle.Certificate + } + + return issuerIDCertMap, nil +} + +// Revoke a certificate from a given serial number if it is present in local +// storage. +func tryRevokeCertBySerial(sc *storageContext, config *crlConfig, serial string) (*logical.Response, error) { + // revokeCert requires us to hold these locks before calling it. + sc.Backend.revokeStorageLock.Lock() + defer sc.Backend.revokeStorageLock.Unlock() + + certEntry, err := fetchCertBySerial(sc, "certs/", serial) + if err != nil { + switch err.(type) { + case errutil.UserError: + return logical.ErrorResponse(err.Error()), nil + default: + return nil, err + } + } + + if certEntry == nil { + return nil, nil + } + + cert, err := x509.ParseCertificate(certEntry.Value) + if err != nil { + return nil, fmt.Errorf("error parsing certificate: %w", err) + } + + return revokeCert(sc, config, cert) +} + +// Revokes a cert, and tries to be smart about error recovery +func revokeCert(sc *storageContext, config *crlConfig, cert *x509.Certificate) (*logical.Response, error) { + // As this backend is self-contained and this function does not hook into + // third parties to manage users or resources, if the mount is tainted, + // revocation doesn't matter anyways -- the CRL that would be written will + // be immediately blown away by the view being cleared. So we can simply + // fast path a successful exit. + if sc.Backend.System().Tainted() { + return nil, nil + } + + colonSerial := serialFromCert(cert) + hyphenSerial := normalizeSerial(colonSerial) + + // Validate that no issuers match the serial number to be revoked. We need + // to gracefully degrade to the legacy cert bundle when it is required, as + // secondary PR clusters might not have been upgraded, but still need to + // handle revoking certs. + issuerIDCertMap, err := fetchIssuerMapForRevocationChecking(sc) + if err != nil { + return nil, err + } + + // Ensure we don't revoke an issuer via this API; use /issuer/:issuer_ref/revoke + // instead. + for issuer, certificate := range issuerIDCertMap { + if colonSerial == serialFromCert(certificate) { + return logical.ErrorResponse(fmt.Sprintf("adding issuer (id: %v) to its own CRL is not allowed", issuer)), nil + } + } + + curRevInfo, err := sc.fetchRevocationInfo(colonSerial) + if err != nil { + return nil, err + } + if curRevInfo != nil { + resp := &logical.Response{ + Data: map[string]interface{}{ + "revocation_time": curRevInfo.RevocationTime, + "state": "revoked", + }, + } + if !curRevInfo.RevocationTimeUTC.IsZero() { + resp.Data["revocation_time_rfc3339"] = curRevInfo.RevocationTimeUTC.Format(time.RFC3339Nano) + } + + return resp, nil + } + + // Add a little wiggle room because leases are stored with a second + // granularity + if cert.NotAfter.Before(time.Now().Add(2 * time.Second)) { + response := &logical.Response{} + response.AddWarning(fmt.Sprintf("certificate with serial %s already expired; refusing to add to CRL", colonSerial)) + return response, nil + } + + currTime := time.Now() + revInfo := revocationInfo{ + CertificateBytes: cert.Raw, + RevocationTime: currTime.Unix(), + RevocationTimeUTC: currTime.UTC(), + } + + // We may not find an issuer with this certificate; that's fine so + // ignore the return value. + associateRevokedCertWithIsssuer(&revInfo, cert, issuerIDCertMap) + + revEntry, err := logical.StorageEntryJSON(revokedPath+hyphenSerial, revInfo) + if err != nil { + return nil, fmt.Errorf("error creating revocation entry: %w", err) + } + + certsCounted := sc.Backend.certsCounted.Load() + err = sc.Storage.Put(sc.Context, revEntry) + if err != nil { + return nil, fmt.Errorf("error saving revoked certificate to new location: %w", err) + } + sc.Backend.ifCountEnabledIncrementTotalRevokedCertificatesCount(certsCounted, revEntry.Key) + + // From here on out, the certificate has been revoked locally. Any other + // persistence issues might still err, but any other failure messages + // should be added as warnings to the revocation. + resp := &logical.Response{ + Data: map[string]interface{}{ + "revocation_time": revInfo.RevocationTime, + "revocation_time_rfc3339": revInfo.RevocationTimeUTC.Format(time.RFC3339Nano), + "state": "revoked", + }, + } + + // If this flag is enabled after the fact, existing local entries will be published to + // the unified storage space through a periodic function. + failedWritingUnifiedCRL := false + if config.UnifiedCRL { + entry := &unifiedRevocationEntry{ + SerialNumber: colonSerial, + CertExpiration: cert.NotAfter, + RevocationTimeUTC: revInfo.RevocationTimeUTC, + CertificateIssuer: revInfo.CertificateIssuer, + } + + ignoreErr := writeUnifiedRevocationEntry(sc, entry) + if ignoreErr != nil { + // Just log the error if we fail to write across clusters, a separate background + // thread will reattempt it later on as we have the local write done. + sc.Backend.Logger().Error("Failed to write unified revocation entry, will re-attempt later", + "serial_number", colonSerial, "error", ignoreErr) + sc.Backend.unifiedTransferStatus.forceRun() + + resp.AddWarning(fmt.Sprintf("Failed to write unified revocation entry, will re-attempt later: %v", err)) + failedWritingUnifiedCRL = true + } + } + + if !config.AutoRebuild { + // Note that writing the Delta WAL here isn't necessary; we've + // already rebuilt the full CRL so the Delta WAL will be cleared + // afterwards. Writing an entry only to immediately remove it + // isn't necessary. + warnings, crlErr := sc.Backend.crlBuilder.rebuild(sc, false) + if crlErr != nil { + switch crlErr.(type) { + case errutil.UserError: + return logical.ErrorResponse(fmt.Sprintf("Error during CRL building: %s", crlErr)), nil + default: + return nil, fmt.Errorf("error encountered during CRL building: %w", crlErr) + } + } + for index, warning := range warnings { + resp.AddWarning(fmt.Sprintf("Warning %d during CRL rebuild: %v", index+1, warning)) + } + } else if config.EnableDelta { + if err := writeRevocationDeltaWALs(sc, config, resp, failedWritingUnifiedCRL, hyphenSerial, colonSerial); err != nil { + return nil, fmt.Errorf("failed to write WAL entries for Delta CRLs: %w", err) + } + } + + return resp, nil +} + +func writeRevocationDeltaWALs(sc *storageContext, config *crlConfig, resp *logical.Response, failedWritingUnifiedCRL bool, hyphenSerial string, colonSerial string) error { + if err := writeSpecificRevocationDeltaWALs(sc, hyphenSerial, colonSerial, localDeltaWALPath); err != nil { + return fmt.Errorf("failed to write local delta WAL entry: %w", err) + } + + if config.UnifiedCRL && !failedWritingUnifiedCRL { + // We only need to write cross-cluster unified Delta WAL entries when + // it is enabled; in particular, because we rebuild CRLs when enabling + // this flag, any revocations that happened prior to enabling unified + // revocation will appear on the complete CRL (+/- synchronization: + // in particular, if a perf replica revokes a cert prior to seeing + // unified revocation enabled, but after the main node has done the + // listing for the unified CRL rebuild, this revocation will not + // appear on either the main or the next delta CRL, but will need to + // wait for a subsequent complete CRL rebuild). + // + // Lastly, we don't attempt this if the unified CRL entry failed to + // write, as we need that entry before the delta WAL entry will make + // sense. + if ignoredErr := writeSpecificRevocationDeltaWALs(sc, hyphenSerial, colonSerial, unifiedDeltaWALPath); ignoredErr != nil { + // Just log the error if we fail to write across clusters, a separate background + // thread will reattempt it later on as we have the local write done. + sc.Backend.Logger().Error("Failed to write cross-cluster delta WAL entry, will re-attempt later", + "serial_number", colonSerial, "error", ignoredErr) + sc.Backend.unifiedTransferStatus.forceRun() + + resp.AddWarning(fmt.Sprintf("Failed to write cross-cluster delta WAL entry, will re-attempt later: %v", ignoredErr)) + } + } else if failedWritingUnifiedCRL { + resp.AddWarning("Skipping cross-cluster delta WAL entry as cross-cluster revocation failed to write; will re-attempt later.") + } + + return nil +} + +func writeSpecificRevocationDeltaWALs(sc *storageContext, hyphenSerial string, colonSerial string, pathPrefix string) error { + // Previously, regardless of whether or not we've presently enabled + // Delta CRLs, we would always write the Delta WAL in case it is + // enabled in the future. We though we could trigger another full CRL + // rebuild instead (to avoid inconsistent state between the CRL and + // missing Delta WAL entries), but writing extra (unused?) WAL entries + // versus an expensive full CRL rebuild was thought of as being + // probably a net wash. + // + // However, we've now added unified CRL building, adding cross-cluster + // writes to the revocation path. Because this is relatively expensive, + // we've opted to rebuild the complete+delta CRLs when toggling the + // state of delta enabled, instead of always writing delta CRL entries. + // + // Thus Delta WAL building happens **only** when Delta CRLs are enabled. + // + // We should only do this when the cert hasn't already been revoked. + // Otherwise, the re-revocation may appear on both an existing CRL and + // on a delta CRL, or a serial may be skipped from the delta CRL if + // there's an A->B->A revocation pattern and the delta was rebuilt + // after the first cert. + // + // Currently we don't store any data in the WAL entry. + var walInfo deltaWALInfo + walEntry, err := logical.StorageEntryJSON(pathPrefix+hyphenSerial, walInfo) + if err != nil { + return fmt.Errorf("unable to create delta CRL WAL entry: %w", err) + } + + if err = sc.Storage.Put(sc.Context, walEntry); err != nil { + return fmt.Errorf("error saving delta CRL WAL entry: %w", err) + } + + // In order for periodic delta rebuild to be mildly efficient, we + // should write the last revoked delta WAL entry so we know if we + // have new revocations that we should rebuild the delta WAL for. + lastRevSerial := lastWALInfo{Serial: colonSerial} + lastWALEntry, err := logical.StorageEntryJSON(pathPrefix+deltaWALLastRevokedSerialName, lastRevSerial) + if err != nil { + return fmt.Errorf("unable to create last delta CRL WAL entry: %w", err) + } + if err = sc.Storage.Put(sc.Context, lastWALEntry); err != nil { + return fmt.Errorf("error saving last delta CRL WAL entry: %w", err) + } + + return nil +} + +func buildCRLs(sc *storageContext, forceNew bool) ([]string, error) { + return buildAnyCRLs(sc, forceNew, false) +} + +func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) ([]string, error) { + // In order to build all CRLs, we need knowledge of all issuers. Any two + // issuers with the same keys _and_ subject should have the same CRL since + // they're functionally equivalent. + // + // When building CRLs, there's two types of CRLs: an "internal" CRL for + // just certificates issued by this issuer, and a "default" CRL, which + // not only contains certificates by this issuer, but also ones issued + // by "unknown" or past issuers. This means we need knowledge of not + // only all issuers (to tell whether or not to include these orphaned + // certs) but whether the present issuer is the configured default. + // + // If a configured default is lacking, we won't provision these + // certificates on any CRL. + // + // In order to know which CRL a given cert belongs on, we have to read + // it into memory, identify the corresponding issuer, and update its + // map with the revoked cert instance. If no such issuer is found, we'll + // place it in the default issuer's CRL. + // + // By not relying on the _cert_'s storage, we allow issuers to come and + // go (either by direct deletion, having their keys deleted, or by usage + // restrictions) -- and when they return, we'll correctly place certs + // on their CRLs. + + // See the message in revokedCert about rebuilding CRLs: we need to + // gracefully handle revoking entries with the legacy cert bundle. + var err error + var issuers []issuerID + var wasLegacy bool + + // First, fetch an updated copy of the CRL config. We'll pass this into + // buildCRL. + globalCRLConfig, err := sc.Backend.crlBuilder.getConfigWithUpdate(sc) + if err != nil { + return nil, fmt.Errorf("error building CRL: while updating config: %w", err) + } + + if globalCRLConfig.Disable && !forceNew { + // We build a single long-lived (but regular validity) empty CRL in + // the event that we disable the CRL, but we don't keep updating it + // with newer, more-valid empty CRLs in the event that we later + // re-enable it. This is a historical behavior. + // + // So, since tidy can now associate issuers on revocation entries, we + // can skip the rest of this function and exit early without updating + // anything. + return nil, nil + } + + if !sc.Backend.useLegacyBundleCaStorage() { + issuers, err = sc.listIssuers() + if err != nil { + return nil, fmt.Errorf("error building CRL: while listing issuers: %w", err) + } + } else { + // Here, we hard-code the legacy issuer entry instead of using the + // default ref. This is because we need to hack some of the logic + // below for revocation to handle the legacy bundle. + issuers = []issuerID{legacyBundleShimID} + wasLegacy = true + + // Here, we avoid building a delta CRL with the legacy CRL bundle. + // + // Users should upgrade symmetrically, rather than attempting + // backward compatibility for new features across disparate versions. + if isDelta { + return []string{"refusing to rebuild delta CRL with legacy bundle; finish migrating to newer issuer storage layout"}, nil + } + } + + issuersConfig, err := sc.getIssuersConfig() + if err != nil { + return nil, fmt.Errorf("error building CRLs: while getting the default config: %w", err) + } + + // We map issuerID->entry for fast lookup and also issuerID->Cert for + // signature verification and correlation of revoked certs. + issuerIDEntryMap := make(map[issuerID]*issuerEntry, len(issuers)) + issuerIDCertMap := make(map[issuerID]*x509.Certificate, len(issuers)) + + // We use a double map (keyID->subject->issuerID) to store whether or not this + // key+subject paring has been seen before. We can then iterate over each + // key/subject and choose any representative issuer for that combination. + keySubjectIssuersMap := make(map[keyID]map[string][]issuerID) + for _, issuer := range issuers { + // We don't strictly need this call, but by requesting the bundle, the + // legacy path is automatically ignored. + thisEntry, _, err := sc.fetchCertBundleByIssuerId(issuer, false) + if err != nil { + return nil, fmt.Errorf("error building CRLs: unable to fetch specified issuer (%v): %w", issuer, err) + } + + if len(thisEntry.KeyID) == 0 { + continue + } + + // n.b.: issuer usage check has been delayed. This occurred because + // we want to ensure any issuer (representative of a larger set) can + // be used to associate revocation entries and we won't bother + // rewriting that entry (causing churn) if the particular selected + // issuer lacks CRL signing capabilities. + // + // The result is that this map (and the other maps) contain all the + // issuers we know about, and only later do we check crlSigning before + // choosing our representative. + // + // The other side effect (making this not compatible with Vault 1.11 + // behavior) is that _identified_ certificates whose issuer set is + // not allowed for crlSigning will no longer appear on the default + // issuer's CRL. + issuerIDEntryMap[issuer] = thisEntry + + thisCert, err := thisEntry.GetCertificate() + if err != nil { + return nil, fmt.Errorf("error building CRLs: unable to parse issuer (%v)'s certificate: %w", issuer, err) + } + issuerIDCertMap[issuer] = thisCert + + subject := string(thisCert.RawSubject) + if _, ok := keySubjectIssuersMap[thisEntry.KeyID]; !ok { + keySubjectIssuersMap[thisEntry.KeyID] = make(map[string][]issuerID) + } + + keySubjectIssuersMap[thisEntry.KeyID][subject] = append(keySubjectIssuersMap[thisEntry.KeyID][subject], issuer) + } + + // Now we do two calls: building the cluster-local CRL, and potentially + // building the global CRL if we're on the active node of the performance + // primary. + currLocalDeltaSerials, localWarnings, err := buildAnyLocalCRLs(sc, issuersConfig, globalCRLConfig, + issuers, issuerIDEntryMap, + issuerIDCertMap, keySubjectIssuersMap, + wasLegacy, forceNew, isDelta) + if err != nil { + return nil, err + } + currUnifiedDeltaSerials, unifiedWarnings, err := buildAnyUnifiedCRLs(sc, issuersConfig, globalCRLConfig, + issuers, issuerIDEntryMap, + issuerIDCertMap, keySubjectIssuersMap, + wasLegacy, forceNew, isDelta) + if err != nil { + return nil, err + } + + var warnings []string + for _, warning := range localWarnings { + warnings = append(warnings, fmt.Sprintf("warning from local CRL rebuild: %v", warning)) + } + for _, warning := range unifiedWarnings { + warnings = append(warnings, fmt.Sprintf("warning from unified CRL rebuild: %v", warning)) + } + + // Finally, we decide if we need to rebuild the Delta CRLs again, for both + // global and local CRLs if necessary. + if !isDelta { + // After we've confirmed the primary CRLs have built OK, go ahead and + // clear the delta CRL WAL and rebuild it. + if err := sc.Backend.crlBuilder.clearLocalDeltaWAL(sc, currLocalDeltaSerials); err != nil { + return nil, fmt.Errorf("error building CRLs: unable to clear Delta WAL: %w", err) + } + if err := sc.Backend.crlBuilder.clearUnifiedDeltaWAL(sc, currUnifiedDeltaSerials); err != nil { + return nil, fmt.Errorf("error building CRLs: unable to clear Delta WAL: %w", err) + } + deltaWarnings, err := sc.Backend.crlBuilder.rebuildDeltaCRLsHoldingLock(sc, forceNew) + if err != nil { + return nil, fmt.Errorf("error building CRLs: unable to rebuild empty Delta WAL: %w", err) + } + for _, warning := range deltaWarnings { + warnings = append(warnings, fmt.Sprintf("warning from delta CRL rebuild: %v", warning)) + } + } + + return warnings, nil +} + +func getLastWALSerial(sc *storageContext, path string) (string, error) { + lastWALEntry, err := sc.Storage.Get(sc.Context, path) + if err != nil { + return "", err + } + + if lastWALEntry != nil && lastWALEntry.Value != nil { + var walInfo lastWALInfo + if err := lastWALEntry.DecodeJSON(&walInfo); err != nil { + return "", err + } + + return walInfo.Serial, nil + } + + // No serial to return. + return "", nil +} + +func buildAnyLocalCRLs( + sc *storageContext, + issuersConfig *issuerConfigEntry, + globalCRLConfig *crlConfig, + issuers []issuerID, + issuerIDEntryMap map[issuerID]*issuerEntry, + issuerIDCertMap map[issuerID]*x509.Certificate, + keySubjectIssuersMap map[keyID]map[string][]issuerID, + wasLegacy bool, + forceNew bool, + isDelta bool, +) ([]string, []string, error) { + var err error + var warnings []string + + // Before we load cert entries, we want to store the last seen delta WAL + // serial number. The subsequent List will have at LEAST that certificate + // (and potentially more) in it; when we're done writing the delta CRL, + // we'll write this serial as a sentinel to see if we need to rebuild it + // in the future. + var lastDeltaSerial string + if isDelta { + lastDeltaSerial, err = getLastWALSerial(sc, localDeltaWALLastRevokedSerial) + if err != nil { + return nil, nil, err + } + } + + // We fetch a list of delta WAL entries prior to generating the complete + // CRL. This allows us to avoid a lock (to clear such storage): anything + // visible now, should also be visible on the complete CRL we're writing. + var currDeltaCerts []string + if !isDelta { + currDeltaCerts, err = sc.Backend.crlBuilder.getPresentLocalDeltaWALForClearing(sc) + if err != nil { + return nil, nil, fmt.Errorf("error building CRLs: unable to get present delta WAL entries for removal: %w", err) + } + } + + var unassignedCerts []pkix.RevokedCertificate + var revokedCertsMap map[issuerID][]pkix.RevokedCertificate + + // If the CRL is disabled do not bother reading in all the revoked certificates. + if !globalCRLConfig.Disable { + // Next, we load and parse all revoked certificates. We need to assign + // these certificates to an issuer. Some certificates will not be + // assignable (if they were issued by a since-deleted issuer), so we need + // a separate pool for those. + unassignedCerts, revokedCertsMap, err = getLocalRevokedCertEntries(sc, issuerIDCertMap, isDelta) + if err != nil { + return nil, nil, fmt.Errorf("error building CRLs: unable to get revoked certificate entries: %w", err) + } + + if !isDelta { + // Revoking an issuer forces us to rebuild our complete CRL, + // regardless of whether or not we've enabled auto rebuilding or + // delta CRLs. If we elide the above isDelta check, this results + // in a non-empty delta CRL, containing the serial of the + // now-revoked issuer, even though it was generated _after_ the + // complete CRL with the issuer on it. There's no reason to + // duplicate this serial number on the delta, hence the above + // guard for isDelta. + if err := augmentWithRevokedIssuers(issuerIDEntryMap, issuerIDCertMap, revokedCertsMap); err != nil { + return nil, nil, fmt.Errorf("error building CRLs: unable to parse revoked issuers: %w", err) + } + } + } + + // Fetch the cluster-local CRL mapping so we know where to write the + // CRLs. + internalCRLConfig, err := sc.getLocalCRLConfig() + if err != nil { + return nil, nil, fmt.Errorf("error building CRLs: unable to fetch cluster-local CRL configuration: %w", err) + } + + rebuildWarnings, err := buildAnyCRLsWithCerts(sc, issuersConfig, globalCRLConfig, internalCRLConfig, + issuers, issuerIDEntryMap, keySubjectIssuersMap, + unassignedCerts, revokedCertsMap, + forceNew, false /* isUnified */, isDelta) + if err != nil { + return nil, nil, fmt.Errorf("error building CRLs: %w", err) + } + if len(rebuildWarnings) > 0 { + warnings = append(warnings, rebuildWarnings...) + } + + // Finally, persist our potentially updated local CRL config. Only do this + // if we didn't have a legacy CRL bundle. + if !wasLegacy { + if err := sc.setLocalCRLConfig(internalCRLConfig); err != nil { + return nil, nil, fmt.Errorf("error building CRLs: unable to persist updated cluster-local CRL config: %w", err) + } + } + + if isDelta { + // Update our last build time here so we avoid checking for new certs + // for a while. + sc.Backend.crlBuilder.lastDeltaRebuildCheck = time.Now() + + if len(lastDeltaSerial) > 0 { + // When we have a last delta serial, write out the relevant info + // so we can skip extra CRL rebuilds. + deltaInfo := lastDeltaInfo{Serial: lastDeltaSerial} + + lastDeltaBuildEntry, err := logical.StorageEntryJSON(localDeltaWALLastBuildSerial, deltaInfo) + if err != nil { + return nil, nil, fmt.Errorf("error creating last delta CRL rebuild serial entry: %w", err) + } + + err = sc.Storage.Put(sc.Context, lastDeltaBuildEntry) + if err != nil { + return nil, nil, fmt.Errorf("error persisting last delta CRL rebuild info: %w", err) + } + } + } + + return currDeltaCerts, warnings, nil +} + +func buildAnyUnifiedCRLs( + sc *storageContext, + issuersConfig *issuerConfigEntry, + globalCRLConfig *crlConfig, + issuers []issuerID, + issuerIDEntryMap map[issuerID]*issuerEntry, + issuerIDCertMap map[issuerID]*x509.Certificate, + keySubjectIssuersMap map[keyID]map[string][]issuerID, + wasLegacy bool, + forceNew bool, + isDelta bool, +) ([]string, []string, error) { + var err error + var warnings []string + + // Unified CRL can only be built by the main cluster. + b := sc.Backend + if b.System().ReplicationState().HasState(consts.ReplicationDRSecondary|consts.ReplicationPerformanceStandby) || + (!b.System().LocalMount() && b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) { + return nil, nil, nil + } + + // Unified CRL should only be built if enabled. + if !globalCRLConfig.UnifiedCRL && !forceNew { + return nil, nil, nil + } + + // Before we load cert entries, we want to store the last seen delta WAL + // serial number. The subsequent List will have at LEAST that certificate + // (and potentially more) in it; when we're done writing the delta CRL, + // we'll write this serial as a sentinel to see if we need to rebuild it + // in the future. + // + // We need to do this per-cluster. + lastDeltaSerial := map[string]string{} + if isDelta { + clusters, err := sc.Storage.List(sc.Context, unifiedDeltaWALPrefix) + if err != nil { + return nil, nil, fmt.Errorf("error listing clusters for unified delta WAL building: %w", err) + } + + for index, cluster := range clusters { + path := unifiedDeltaWALPrefix + cluster + deltaWALLastRevokedSerialName + serial, err := getLastWALSerial(sc, path) + if err != nil { + return nil, nil, fmt.Errorf("error getting last written Delta WAL serial for cluster (%v / %v): %w", index, cluster, err) + } + + lastDeltaSerial[cluster] = serial + } + } + + // We fetch a list of delta WAL entries prior to generating the complete + // CRL. This allows us to avoid a lock (to clear such storage): anything + // visible now, should also be visible on the complete CRL we're writing. + var currDeltaCerts []string + if !isDelta { + currDeltaCerts, err = sc.Backend.crlBuilder.getPresentUnifiedDeltaWALForClearing(sc) + if err != nil { + return nil, nil, fmt.Errorf("error building CRLs: unable to get present delta WAL entries for removal: %w", err) + } + } + + var unassignedCerts []pkix.RevokedCertificate + var revokedCertsMap map[issuerID][]pkix.RevokedCertificate + + // If the CRL is disabled do not bother reading in all the revoked certificates. + if !globalCRLConfig.Disable { + // Next, we load and parse all revoked certificates. We need to assign + // these certificates to an issuer. Some certificates will not be + // assignable (if they were issued by a since-deleted issuer), so we need + // a separate pool for those. + unassignedCerts, revokedCertsMap, err = getUnifiedRevokedCertEntries(sc, issuerIDCertMap, isDelta) + if err != nil { + return nil, nil, fmt.Errorf("error building CRLs: unable to get revoked certificate entries: %w", err) + } + + if !isDelta { + // Revoking an issuer forces us to rebuild our complete CRL, + // regardless of whether or not we've enabled auto rebuilding or + // delta CRLs. If we elide the above isDelta check, this results + // in a non-empty delta CRL, containing the serial of the + // now-revoked issuer, even though it was generated _after_ the + // complete CRL with the issuer on it. There's no reason to + // duplicate this serial number on the delta, hence the above + // guard for isDelta. + if err := augmentWithRevokedIssuers(issuerIDEntryMap, issuerIDCertMap, revokedCertsMap); err != nil { + return nil, nil, fmt.Errorf("error building CRLs: unable to parse revoked issuers: %w", err) + } + } + } + + // Fetch the cluster-local CRL mapping so we know where to write the + // CRLs. + internalCRLConfig, err := sc.getUnifiedCRLConfig() + if err != nil { + return nil, nil, fmt.Errorf("error building CRLs: unable to fetch cluster-local CRL configuration: %w", err) + } + + rebuildWarnings, err := buildAnyCRLsWithCerts(sc, issuersConfig, globalCRLConfig, internalCRLConfig, + issuers, issuerIDEntryMap, keySubjectIssuersMap, + unassignedCerts, revokedCertsMap, + forceNew, true /* isUnified */, isDelta) + if err != nil { + return nil, nil, fmt.Errorf("error building CRLs: %w", err) + } + if len(rebuildWarnings) > 0 { + warnings = append(warnings, rebuildWarnings...) + } + + // Finally, persist our potentially updated local CRL config. Only do this + // if we didn't have a legacy CRL bundle. + if !wasLegacy { + if err := sc.setUnifiedCRLConfig(internalCRLConfig); err != nil { + return nil, nil, fmt.Errorf("error building CRLs: unable to persist updated cluster-local CRL config: %w", err) + } + } + + if isDelta { + // Update our last build time here so we avoid checking for new certs + // for a while. + sc.Backend.crlBuilder.lastDeltaRebuildCheck = time.Now() + + // Persist all of our known last revoked serial numbers here, as the + // last seen serial during build. This will allow us to detect if any + // new revocations have occurred, forcing us to rebuild the delta CRL. + for cluster, serial := range lastDeltaSerial { + if len(serial) == 0 { + continue + } + + // Make sure to use the cluster-specific path. Since we're on the + // active node of the primary cluster, we own this entry and can + // safely write it. + path := unifiedDeltaWALPrefix + cluster + deltaWALLastBuildSerialName + deltaInfo := lastDeltaInfo{Serial: serial} + lastDeltaBuildEntry, err := logical.StorageEntryJSON(path, deltaInfo) + if err != nil { + return nil, nil, fmt.Errorf("error creating last delta CRL rebuild serial entry: %w", err) + } + + err = sc.Storage.Put(sc.Context, lastDeltaBuildEntry) + if err != nil { + return nil, nil, fmt.Errorf("error persisting last delta CRL rebuild info: %w", err) + } + } + } + + return currDeltaCerts, warnings, nil +} + +func buildAnyCRLsWithCerts( + sc *storageContext, + issuersConfig *issuerConfigEntry, + globalCRLConfig *crlConfig, + internalCRLConfig *internalCRLConfigEntry, + issuers []issuerID, + issuerIDEntryMap map[issuerID]*issuerEntry, + keySubjectIssuersMap map[keyID]map[string][]issuerID, + unassignedCerts []pkix.RevokedCertificate, + revokedCertsMap map[issuerID][]pkix.RevokedCertificate, + forceNew bool, + isUnified bool, + isDelta bool, +) ([]string, error) { + // Now we can call buildCRL once, on an arbitrary/representative issuer + // from each of these (keyID, subject) sets. + var warnings []string + for _, subjectIssuersMap := range keySubjectIssuersMap { + for _, issuersSet := range subjectIssuersMap { + if len(issuersSet) == 0 { + continue + } + + var revokedCerts []pkix.RevokedCertificate + representative := issuerID("") + var crlIdentifier crlID + var crlIdIssuer issuerID + for _, issuerId := range issuersSet { + // Skip entries which aren't enabled for CRL signing. We don't + // particularly care which issuer is ultimately chosen as the + // set representative for signing at this point, other than + // that it has crl-signing usage. + if err := issuerIDEntryMap[issuerId].EnsureUsage(CRLSigningUsage); err != nil { + continue + } + + // Prefer to use the default as the representative of this + // set, if it is a member. + // + // If it is, we'll also pull in the unassigned certs to remain + // compatible with Vault's earlier, potentially questionable + // behavior. + if issuerId == issuersConfig.DefaultIssuerId { + if len(unassignedCerts) > 0 { + revokedCerts = append(revokedCerts, unassignedCerts...) + } + + representative = issuerId + } + + // Otherwise, use any other random issuer if we've not yet + // chosen one. + if representative == issuerID("") { + representative = issuerId + } + + // Pull in the revoked certs associated with this member. + if thisRevoked, ok := revokedCertsMap[issuerId]; ok && len(thisRevoked) > 0 { + revokedCerts = append(revokedCerts, thisRevoked...) + } + + // Finally, check our crlIdentifier. + if thisCRLId, ok := internalCRLConfig.IssuerIDCRLMap[issuerId]; ok && len(thisCRLId) > 0 { + if len(crlIdentifier) > 0 && crlIdentifier != thisCRLId { + return nil, fmt.Errorf("error building CRLs: two issuers with same keys/subjects (%v vs %v) have different internal CRL IDs: %v vs %v", issuerId, crlIdIssuer, thisCRLId, crlIdentifier) + } + + crlIdentifier = thisCRLId + crlIdIssuer = issuerId + } + } + + if representative == "" { + // Skip this set for the time being; while we have valid + // issuers and associated keys, this occurred because we lack + // crl-signing usage on all issuers in this set. + // + // But, tell the user about this, so they can either correct + // this by reissuing the CA certificate or adding an equivalent + // version with KU bits if the CA cert lacks KU altogether. + // + // See also: https://github.com/hashicorp/vault/issues/20137 + warning := "Issuer equivalency set with associated keys lacked an issuer with CRL Signing KeyUsage; refusing to rebuild CRL for this group of issuers: " + var issuers []string + for _, issuerId := range issuersSet { + issuers = append(issuers, issuerId.String()) + } + warning += strings.Join(issuers, ",") + + // We only need this warning once. :-) + if !isUnified && !isDelta { + warnings = append(warnings, warning) + } + + continue + } + + if len(crlIdentifier) == 0 { + // Create a new random UUID for this CRL if none exists. + crlIdentifier = genCRLId() + internalCRLConfig.CRLNumberMap[crlIdentifier] = 1 + } + + // Update all issuers in this group to set the CRL Issuer + for _, issuerId := range issuersSet { + internalCRLConfig.IssuerIDCRLMap[issuerId] = crlIdentifier + } + + // We always update the CRL Number since we never want to + // duplicate numbers and missing numbers is fine. + crlNumber := internalCRLConfig.CRLNumberMap[crlIdentifier] + internalCRLConfig.CRLNumberMap[crlIdentifier] += 1 + + // CRLs (regardless of complete vs delta) are incrementally + // numbered. But delta CRLs need to know the number of the + // last complete CRL. We assume that's the previous identifier + // if no value presently exists. + lastCompleteNumber, haveLast := internalCRLConfig.LastCompleteNumberMap[crlIdentifier] + if !haveLast { + // We use the value of crlNumber for the current CRL, so + // decrement it by one to find the last one. + lastCompleteNumber = crlNumber - 1 + } + + // Update `LastModified` + if isDelta { + internalCRLConfig.DeltaLastModified = time.Now().UTC() + } else { + internalCRLConfig.LastModified = time.Now().UTC() + } + + // Lastly, build the CRL. + nextUpdate, err := buildCRL(sc, globalCRLConfig, forceNew, representative, revokedCerts, crlIdentifier, crlNumber, isUnified, isDelta, lastCompleteNumber) + if err != nil { + return nil, fmt.Errorf("error building CRLs: unable to build CRL for issuer (%v): %w", representative, err) + } + + internalCRLConfig.CRLExpirationMap[crlIdentifier] = *nextUpdate + if !isDelta { + internalCRLConfig.LastCompleteNumberMap[crlIdentifier] = crlNumber + } else if !haveLast { + // Since we're writing this config anyways, save our guess + // as to the last CRL number. + internalCRLConfig.LastCompleteNumberMap[crlIdentifier] = lastCompleteNumber + } + } + } + + // Before persisting our updated CRL config, check to see if we have + // any dangling references. If we have any issuers that don't exist, + // remove them, remembering their CRLs IDs. If we've completely removed + // all issuers pointing to that CRL number, we can remove it from the + // number map and from storage. + // + // Note that we persist the last generated CRL for a specified issuer + // if it is later disabled for CRL generation. This mirrors the old + // root deletion behavior, but using soft issuer deletes. If there is an + // alternate, equivalent issuer however, we'll keep updating the shared + // CRL; all equivalent issuers must have their CRLs disabled. + for mapIssuerId := range internalCRLConfig.IssuerIDCRLMap { + stillHaveIssuer := false + for _, listedIssuerId := range issuers { + if mapIssuerId == listedIssuerId { + stillHaveIssuer = true + break + } + } + + if !stillHaveIssuer { + delete(internalCRLConfig.IssuerIDCRLMap, mapIssuerId) + } + } + for crlId := range internalCRLConfig.CRLNumberMap { + stillHaveIssuerForID := false + for _, remainingCRL := range internalCRLConfig.IssuerIDCRLMap { + if remainingCRL == crlId { + stillHaveIssuerForID = true + break + } + } + + if !stillHaveIssuerForID { + if err := sc.Storage.Delete(sc.Context, "crls/"+crlId.String()); err != nil { + return nil, fmt.Errorf("error building CRLs: unable to clean up deleted issuers' CRL: %w", err) + } + } + } + + // All good :-) + return warnings, nil +} + +func isRevInfoIssuerValid(revInfo *revocationInfo, issuerIDCertMap map[issuerID]*x509.Certificate) bool { + if len(revInfo.CertificateIssuer) > 0 { + issuerId := revInfo.CertificateIssuer + if _, issuerExists := issuerIDCertMap[issuerId]; issuerExists { + return true + } + } + + return false +} + +func associateRevokedCertWithIsssuer(revInfo *revocationInfo, revokedCert *x509.Certificate, issuerIDCertMap map[issuerID]*x509.Certificate) bool { + for issuerId, issuerCert := range issuerIDCertMap { + if bytes.Equal(revokedCert.RawIssuer, issuerCert.RawSubject) { + if err := revokedCert.CheckSignatureFrom(issuerCert); err == nil { + // Valid mapping. Add it to the specified entry. + revInfo.CertificateIssuer = issuerId + return true + } + } + } + + return false +} + +func getLocalRevokedCertEntries(sc *storageContext, issuerIDCertMap map[issuerID]*x509.Certificate, isDelta bool) ([]pkix.RevokedCertificate, map[issuerID][]pkix.RevokedCertificate, error) { + var unassignedCerts []pkix.RevokedCertificate + revokedCertsMap := make(map[issuerID][]pkix.RevokedCertificate) + + listingPath := revokedPath + if isDelta { + listingPath = localDeltaWALPath + } + + revokedSerials, err := sc.Storage.List(sc.Context, listingPath) + if err != nil { + return nil, nil, errutil.InternalError{Err: fmt.Sprintf("error fetching list of revoked certs: %s", err)} + } + + // Build a mapping of issuer serial -> certificate. + issuerSerialCertMap := make(map[string][]*x509.Certificate, len(issuerIDCertMap)) + for _, cert := range issuerIDCertMap { + serialStr := serialFromCert(cert) + issuerSerialCertMap[serialStr] = append(issuerSerialCertMap[serialStr], cert) + } + + for _, serial := range revokedSerials { + if isDelta && (serial == deltaWALLastBuildSerialName || serial == deltaWALLastRevokedSerialName) { + // Skip our placeholder entries... + continue + } + + var revInfo revocationInfo + revokedEntry, err := sc.Storage.Get(sc.Context, revokedPath+serial) + if err != nil { + return nil, nil, errutil.InternalError{Err: fmt.Sprintf("unable to fetch revoked cert with serial %s: %s", serial, err)} + } + + if revokedEntry == nil { + return nil, nil, errutil.InternalError{Err: fmt.Sprintf("revoked certificate entry for serial %s is nil", serial)} + } + if revokedEntry.Value == nil || len(revokedEntry.Value) == 0 { + // TODO: In this case, remove it and continue? How likely is this to + // happen? Alternately, could skip it entirely, or could implement a + // delete function so that there is a way to remove these + return nil, nil, errutil.InternalError{Err: "found revoked serial but actual certificate is empty"} + } + + err = revokedEntry.DecodeJSON(&revInfo) + if err != nil { + return nil, nil, errutil.InternalError{Err: fmt.Sprintf("error decoding revocation entry for serial %s: %s", serial, err)} + } + + revokedCert, err := x509.ParseCertificate(revInfo.CertificateBytes) + if err != nil { + return nil, nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse stored revoked certificate with serial %s: %s", serial, err)} + } + + // We want to skip issuer certificate's revocationEntries for two + // reasons: + // + // 1. We canonically use augmentWithRevokedIssuers to handle this + // case and this entry is just a backup. This prevents the issue + // of duplicate serial numbers on the CRL from both paths. + // 2. We want to avoid a root's serial from appearing on its own + // CRL. If it is a cross-signed or re-issued variant, this is OK, + // but in the case we mark the root itself as "revoked", we want + // to avoid it appearing on the CRL as that is definitely + // undefined/little-supported behavior. + // + // This hash map lookup should be faster than byte comparison against + // each issuer proactively. + if candidates, present := issuerSerialCertMap[serialFromCert(revokedCert)]; present { + revokedCertIsIssuer := false + for _, candidate := range candidates { + if bytes.Equal(candidate.Raw, revokedCert.Raw) { + revokedCertIsIssuer = true + break + } + } + + if revokedCertIsIssuer { + continue + } + } + + // NOTE: We have to change this to UTC time because the CRL standard + // mandates it but Go will happily encode the CRL without this. + newRevCert := pkix.RevokedCertificate{ + SerialNumber: revokedCert.SerialNumber, + } + if !revInfo.RevocationTimeUTC.IsZero() { + newRevCert.RevocationTime = revInfo.RevocationTimeUTC + } else { + newRevCert.RevocationTime = time.Unix(revInfo.RevocationTime, 0).UTC() + } + + // If we have a CertificateIssuer field on the revocation entry, + // prefer it to manually checking each issuer signature, assuming it + // appears valid. It's highly unlikely for two different issuers + // to have the same id (after the first was deleted). + if isRevInfoIssuerValid(&revInfo, issuerIDCertMap) { + revokedCertsMap[revInfo.CertificateIssuer] = append(revokedCertsMap[revInfo.CertificateIssuer], newRevCert) + continue + + // Otherwise, fall through and update the entry. + } + + // Now we need to assign the revoked certificate to an issuer. + foundParent := associateRevokedCertWithIsssuer(&revInfo, revokedCert, issuerIDCertMap) + if !foundParent { + // If the parent isn't found, add it to the unassigned bucket. + unassignedCerts = append(unassignedCerts, newRevCert) + } else { + revokedCertsMap[revInfo.CertificateIssuer] = append(revokedCertsMap[revInfo.CertificateIssuer], newRevCert) + + // When the CertificateIssuer field wasn't found on the existing + // entry (or was invalid), and we've found a new value for it, + // we should update the entry to make future CRL builds faster. + revokedEntry, err = logical.StorageEntryJSON(revokedPath+serial, revInfo) + if err != nil { + return nil, nil, fmt.Errorf("error creating revocation entry for existing cert: %v: %w", serial, err) + } + + err = sc.Storage.Put(sc.Context, revokedEntry) + if err != nil { + return nil, nil, fmt.Errorf("error updating revoked certificate at existing location: %v: %w", serial, err) + } + } + } + + return unassignedCerts, revokedCertsMap, nil +} + +func getUnifiedRevokedCertEntries(sc *storageContext, issuerIDCertMap map[issuerID]*x509.Certificate, isDelta bool) ([]pkix.RevokedCertificate, map[issuerID][]pkix.RevokedCertificate, error) { + // Getting unified revocation entries is a bit different than getting + // the local ones. In particular, the full copy of the certificate is + // unavailable, so we'll be able to avoid parsing the stored certificate, + // at the expense of potentially having incorrect issuer mappings. + var unassignedCerts []pkix.RevokedCertificate + revokedCertsMap := make(map[issuerID][]pkix.RevokedCertificate) + + listingPath := unifiedRevocationReadPathPrefix + if isDelta { + listingPath = unifiedDeltaWALPrefix + } + + // First, we find all clusters that have written certificates. + clusterIds, err := sc.Storage.List(sc.Context, listingPath) + if err != nil { + return nil, nil, fmt.Errorf("failed to list clusters for unified CRL building: %w", err) + } + + // We wish to prevent duplicate revocations on separate clusters from + // being added multiple times to the CRL. While we can't guarantee these + // are the same certificate, it doesn't matter as (as long as they have + // the same issuer), it'd imply issuance of two certs with the same + // serial which'd be an intentional violation of RFC 5280 before importing + // an issuer into Vault, and would be highly unlikely within Vault, due + // to 120-bit random serial numbers. + foundSerials := make(map[string]bool) + + // Then for every cluster, we find its revoked certificates... + for _, clusterId := range clusterIds { + if !strings.HasSuffix(clusterId, "/") { + // No entries + continue + } + + clusterPath := listingPath + clusterId + serials, err := sc.Storage.List(sc.Context, clusterPath) + if err != nil { + return nil, nil, fmt.Errorf("failed to list serials in cluster (%v) for unified CRL building: %w", clusterId, err) + } + + // At this point, we need the storage entry. Rather than using the + // clusterPath and adding the serial, we need to use the true + // cross-cluster revocation entry (as, our above listing might have + // used delta WAL entires without the full revocation info). + serialPrefix := unifiedRevocationReadPathPrefix + clusterId + for _, serial := range serials { + if isDelta && (serial == deltaWALLastBuildSerialName || serial == deltaWALLastRevokedSerialName) { + // Skip our placeholder entries... + continue + } + + serialPath := serialPrefix + serial + entryRaw, err := sc.Storage.Get(sc.Context, serialPath) + if err != nil { + return nil, nil, fmt.Errorf("failed to read unified revocation entry in cluster (%v) for unified CRL building: %w", clusterId, err) + } + if entryRaw == nil { + // Skip empty entries. We'll eventually tidy them. + continue + } + + var xRevEntry unifiedRevocationEntry + if err := entryRaw.DecodeJSON(&xRevEntry); err != nil { + return nil, nil, fmt.Errorf("failed json decoding of unified revocation entry at path %v: %w ", serialPath, err) + } + + // Convert to pkix.RevokedCertificate entries. + var revEntry pkix.RevokedCertificate + var ok bool + revEntry.SerialNumber, ok = serialToBigInt(serial) + if !ok { + return nil, nil, fmt.Errorf("failed to encode serial for CRL building: %v", serial) + } + + revEntry.RevocationTime = xRevEntry.RevocationTimeUTC + + if found, inFoundMap := foundSerials[normalizeSerial(serial)]; found && inFoundMap { + // Serial has already been added to the CRL. + continue + } + foundSerials[normalizeSerial(serial)] = true + + // Finally, add it to the correct mapping. + _, present := issuerIDCertMap[xRevEntry.CertificateIssuer] + if !present { + unassignedCerts = append(unassignedCerts, revEntry) + } else { + revokedCertsMap[xRevEntry.CertificateIssuer] = append(revokedCertsMap[xRevEntry.CertificateIssuer], revEntry) + } + } + } + + return unassignedCerts, revokedCertsMap, nil +} + +func augmentWithRevokedIssuers(issuerIDEntryMap map[issuerID]*issuerEntry, issuerIDCertMap map[issuerID]*x509.Certificate, revokedCertsMap map[issuerID][]pkix.RevokedCertificate) error { + // When setup our maps with the legacy CA bundle, we only have a + // single entry here. This entry is never revoked, so the outer loop + // will exit quickly. + for ourIssuerID, ourIssuer := range issuerIDEntryMap { + if !ourIssuer.Revoked { + continue + } + + ourCert := issuerIDCertMap[ourIssuerID] + ourRevCert := pkix.RevokedCertificate{ + SerialNumber: ourCert.SerialNumber, + RevocationTime: ourIssuer.RevocationTimeUTC, + } + + for otherIssuerID := range issuerIDEntryMap { + if otherIssuerID == ourIssuerID { + continue + } + + // Find all _other_ certificates which verify this issuer, + // allowing us to add this revoked issuer to this issuer's + // CRL. + otherCert := issuerIDCertMap[otherIssuerID] + if err := ourCert.CheckSignatureFrom(otherCert); err == nil { + // Valid signature; add our result. + revokedCertsMap[otherIssuerID] = append(revokedCertsMap[otherIssuerID], ourRevCert) + } + } + } + + return nil +} + +// Builds a CRL by going through the list of revoked certificates and building +// a new CRL with the stored revocation times and serial numbers. +func buildCRL(sc *storageContext, crlInfo *crlConfig, forceNew bool, thisIssuerId issuerID, revoked []pkix.RevokedCertificate, identifier crlID, crlNumber int64, isUnified bool, isDelta bool, lastCompleteNumber int64) (*time.Time, error) { + var revokedCerts []pkix.RevokedCertificate + + crlLifetime, err := parseutil.ParseDurationSecond(crlInfo.Expiry) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("error parsing CRL duration of %s", crlInfo.Expiry)} + } + + if crlInfo.Disable { + if !forceNew { + // In the event of a disabled CRL, we'll have the next time set + // to the zero time as a sentinel in case we get re-enabled. + return &time.Time{}, nil + } + + // NOTE: in this case, the passed argument (revoked) is not added + // to the revokedCerts list. This is because we want to sign an + // **empty** CRL (as the CRL was disabled but we've specified the + // forceNew option). In previous versions of Vault (1.10 series and + // earlier), we'd have queried the certs below, whereas we now have + // an assignment from a pre-queried list. + goto WRITE + } + + revokedCerts = revoked + +WRITE: + signingBundle, caErr := sc.fetchCAInfoByIssuerId(thisIssuerId, CRLSigningUsage) + if caErr != nil { + switch caErr.(type) { + case errutil.UserError: + return nil, errutil.UserError{Err: fmt.Sprintf("could not fetch the CA certificate: %s", caErr)} + default: + return nil, errutil.InternalError{Err: fmt.Sprintf("error fetching CA certificate: %s", caErr)} + } + } + + now := time.Now() + nextUpdate := now.Add(crlLifetime) + + var extensions []pkix.Extension + if isDelta { + ext, err := certutil.CreateDeltaCRLIndicatorExt(lastCompleteNumber) + if err != nil { + return nil, fmt.Errorf("could not create crl delta indicator extension: %w", err) + } + extensions = []pkix.Extension{ext} + } + + revocationListTemplate := &x509.RevocationList{ + RevokedCertificates: revokedCerts, + Number: big.NewInt(crlNumber), + ThisUpdate: now, + NextUpdate: nextUpdate, + SignatureAlgorithm: signingBundle.RevocationSigAlg, + ExtraExtensions: extensions, + } + + crlBytes, err := x509.CreateRevocationList(rand.Reader, revocationListTemplate, signingBundle.Certificate, signingBundle.PrivateKey) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("error creating new CRL: %s", err)} + } + + writePath := "crls/" + identifier.String() + if thisIssuerId == legacyBundleShimID { + // Ignore the CRL ID as it won't be persisted anyways; hard-code the + // old legacy path and allow it to be updated. + writePath = legacyCRLPath + } else { + if isUnified { + writePath = unifiedCRLPathPrefix + writePath + } + + if isDelta { + // Write the delta CRL to a unique storage location. + writePath += deltaCRLPathSuffix + } + } + + err = sc.Storage.Put(sc.Context, &logical.StorageEntry{ + Key: writePath, + Value: crlBytes, + }) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("error storing CRL: %s", err)} + } + + return &nextUpdate, nil +} + +// shouldLocalPathsUseUnified assuming a legacy path for a CRL/OCSP request, does our +// configuration say we should be returning the unified response or not +func shouldLocalPathsUseUnified(cfg *crlConfig) bool { + return cfg.UnifiedCRL && cfg.UnifiedCRLOnExistingPaths +} diff --git a/builtin/logical/pki/dnstest/server.go b/builtin/logical/pki/dnstest/server.go new file mode 100644 index 0000000..4b7189c --- /dev/null +++ b/builtin/logical/pki/dnstest/server.go @@ -0,0 +1,428 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dnstest + +import ( + "context" + "fmt" + "net" + "strings" + "sync" + "testing" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/stretchr/testify/require" +) + +type TestServer struct { + t *testing.T + ctx context.Context + log hclog.Logger + + runner *docker.Runner + network string + startup *docker.Service + + lock sync.Mutex + serial int + forwarders []string + domains []string + records map[string]map[string][]string // domain -> record -> value(s). + + cleanup func() +} + +func SetupResolver(t *testing.T, domain string) *TestServer { + return SetupResolverOnNetwork(t, domain, "") +} + +func SetupResolverOnNetwork(t *testing.T, domain string, network string) *TestServer { + var ts TestServer + ts.t = t + ts.ctx = context.Background() + ts.domains = []string{domain} + ts.records = map[string]map[string][]string{} + ts.network = network + ts.log = hclog.L() + + ts.setupRunner(domain, network) + ts.startContainer(network) + ts.PushConfig() + + return &ts +} + +func (ts *TestServer) setupRunner(domain string, network string) { + var err error + ts.runner, err = docker.NewServiceRunner(docker.RunOptions{ + ImageRepo: "ubuntu/bind9", + ImageTag: "latest", + ContainerName: "bind9-dns-" + strings.ReplaceAll(domain, ".", "-"), + NetworkName: network, + Ports: []string{"53/udp"}, + // DNS container logging was disabled to reduce content within CI logs. + //LogConsumer: func(s string) { + // ts.log.Info(s) + //}, + }) + require.NoError(ts.t, err) +} + +func (ts *TestServer) startContainer(network string) { + connUpFunc := func(ctx context.Context, host string, port int) (docker.ServiceConfig, error) { + // Perform a simple connection to this resolver, even though the + // default configuration doesn't do anything useful. + peer, err := net.ResolveUDPAddr("udp", fmt.Sprintf("%s:%d", host, port)) + if err != nil { + return nil, fmt.Errorf("failed to resolve peer: %v / %v: %w", host, port, err) + } + + conn, err := net.DialUDP("udp", nil, peer) + if err != nil { + return nil, fmt.Errorf("failed to dial peer: %v / %v / %v: %w", host, port, peer, err) + } + defer conn.Close() + + _, err = conn.Write([]byte("garbage-in")) + if err != nil { + return nil, fmt.Errorf("failed to write to peer: %v / %v / %v: %w", host, port, peer, err) + } + + // Connection worked. + return docker.NewServiceHostPort(host, port), nil + } + + result, _, err := ts.runner.StartNewService(ts.ctx, true, true, connUpFunc) + require.NoError(ts.t, err, "failed to start dns resolver for "+ts.domains[0]) + ts.startup = result + + if ts.startup.StartResult.RealIP == "" { + mapping, err := ts.runner.GetNetworkAndAddresses(ts.startup.Container.ID) + require.NoError(ts.t, err, "failed to fetch network addresses to correct missing real IP address") + if len(network) == 0 { + require.Equal(ts.t, 1, len(mapping), "expected exactly one network address") + for network = range mapping { + // Because mapping is a map of network name->ip, we need + // to use the above range's assignment to get the name, + // as there is no other way of getting the keys of a map. + } + } + require.Contains(ts.t, mapping, network, "expected network to be part of the mapping") + ts.startup.StartResult.RealIP = mapping[network] + } + + ts.log.Info(fmt.Sprintf("[dnsserv] Addresses of DNS resolver: local=%v / container=%v", ts.GetLocalAddr(), ts.GetRemoteAddr())) +} + +func (ts *TestServer) buildNamedConf() string { + forwarders := "\n" + if len(ts.forwarders) > 0 { + forwarders = "\tforwarders {\n" + for _, forwarder := range ts.forwarders { + forwarders += "\t\t" + forwarder + ";\n" + } + forwarders += "\t};\n" + } + + zones := "\n" + for _, domain := range ts.domains { + zones += fmt.Sprintf("zone \"%s\" {\n", domain) + zones += "\ttype primary;\n" + zones += fmt.Sprintf("\tfile \"%s.zone\";\n", domain) + zones += "\tallow-update {\n\t\tnone;\n\t};\n" + zones += "\tnotify no;\n" + zones += "};\n\n" + } + + // Reverse lookups are not handles as they're not presently necessary. + + cfg := `options { + directory "/var/cache/bind"; + + dnssec-validation no; + + ` + forwarders + ` +}; + +` + zones + + return cfg +} + +func (ts *TestServer) buildZoneFile(target string) string { + // One second TTL by default to allow quick refreshes. + zone := "$TTL 1;\n" + + ts.serial += 1 + zone += fmt.Sprintf("@\tIN\tSOA\tns.%v.\troot.%v.\t(\n", target, target) + zone += fmt.Sprintf("\t\t\t%d;\n\t\t\t1;\n\t\t\t1;\n\t\t\t2;\n\t\t\t1;\n\t\t\t)\n\n", ts.serial) + zone += fmt.Sprintf("@\tIN\tNS\tns%d.%v.\n", ts.serial, target) + zone += fmt.Sprintf("ns%d.%v.\tIN\tA\t%v\n", ts.serial, target, "127.0.0.1") + + for domain, records := range ts.records { + if !strings.HasSuffix(domain, target) { + continue + } + + for recordType, values := range records { + for _, value := range values { + zone += fmt.Sprintf("%s.\tIN\t%s\t%s\n", domain, recordType, value) + } + } + } + + return zone +} + +func (ts *TestServer) pushNamedConf() { + contents := docker.NewBuildContext() + cfgPath := "/etc/bind/named.conf.options" + namedCfg := ts.buildNamedConf() + contents[cfgPath] = docker.PathContentsFromString(namedCfg) + contents[cfgPath].SetOwners(0, 142) // root, bind + + ts.log.Info(fmt.Sprintf("Generated bind9 config (%s):\n%v\n", cfgPath, namedCfg)) + + err := ts.runner.CopyTo(ts.startup.Container.ID, "/", contents) + require.NoError(ts.t, err, "failed pushing updated named.conf.options to container") +} + +func (ts *TestServer) pushZoneFiles() { + contents := docker.NewBuildContext() + + for _, domain := range ts.domains { + path := "/var/cache/bind/" + domain + ".zone" + zoneFile := ts.buildZoneFile(domain) + contents[path] = docker.PathContentsFromString(zoneFile) + contents[path].SetOwners(0, 142) // root, bind + + ts.log.Info(fmt.Sprintf("Generated bind9 zone file for %v (%s):\n%v\n", domain, path, zoneFile)) + } + + err := ts.runner.CopyTo(ts.startup.Container.ID, "/", contents) + require.NoError(ts.t, err, "failed pushing updated named.conf.options to container") +} + +func (ts *TestServer) PushConfig() { + ts.lock.Lock() + defer ts.lock.Unlock() + + _, _, _, err := ts.runner.RunCmdWithOutput(ts.ctx, ts.startup.Container.ID, []string{"rndc", "freeze"}) + require.NoError(ts.t, err, "failed to freeze DNS config") + + // There's two cases here: + // + // 1. We've added a new top-level domain name. Here, we want to make + // sure the new zone file is pushed before we push the reference + // to it. + // 2. We've just added a new. Here, the order doesn't matter, but + // mostly likely the second push will be a no-op. + ts.pushZoneFiles() + ts.pushNamedConf() + + _, _, _, err = ts.runner.RunCmdWithOutput(ts.ctx, ts.startup.Container.ID, []string{"rndc", "thaw"}) + require.NoError(ts.t, err, "failed to thaw DNS config") + + // Wait until our config has taken. + corehelpers.RetryUntil(ts.t, 15*time.Second, func() error { + // bind reloads based on file mtime, touch files before starting + // to make sure it has been updated more recently than when the + // last update was written. Then issue a new SIGHUP. + for _, domain := range ts.domains { + path := "/var/cache/bind/" + domain + ".zone" + touchCmd := []string{"touch", path} + + _, _, _, err := ts.runner.RunCmdWithOutput(ts.ctx, ts.startup.Container.ID, touchCmd) + if err != nil { + return fmt.Errorf("failed to update zone mtime: %w", err) + } + } + ts.runner.DockerAPI.ContainerKill(ts.ctx, ts.startup.Container.ID, "SIGHUP") + + // Connect to our bind resolver. + resolver := &net.Resolver{ + PreferGo: true, + StrictErrors: false, + Dial: func(ctx context.Context, network, address string) (net.Conn, error) { + d := net.Dialer{ + Timeout: 10 * time.Second, + } + return d.DialContext(ctx, network, ts.GetLocalAddr()) + }, + } + + // last domain has the given serial number, which also appears in the + // NS record so we can fetch it via Go. + lastDomain := ts.domains[len(ts.domains)-1] + records, err := resolver.LookupNS(ts.ctx, lastDomain) + if err != nil { + return fmt.Errorf("failed to lookup NS record for %v: %w", lastDomain, err) + } + + if len(records) != 1 { + return fmt.Errorf("expected only 1 NS record for %v, got %v/%v", lastDomain, len(records), records) + } + + expectedNS := fmt.Sprintf("ns%d.%v.", ts.serial, lastDomain) + if records[0].Host != expectedNS { + return fmt.Errorf("expected to find NS %v, got %v indicating reload hadn't completed", expectedNS, records[0]) + } + + return nil + }) +} + +func (ts *TestServer) GetLocalAddr() string { + return ts.startup.Config.Address() +} + +func (ts *TestServer) GetRemoteAddr() string { + return fmt.Sprintf("%s:%d", ts.startup.StartResult.RealIP, 53) +} + +func (ts *TestServer) AddDomain(domain string) { + ts.lock.Lock() + defer ts.lock.Unlock() + + for _, existing := range ts.domains { + if existing == domain { + return + } + } + + ts.domains = append(ts.domains, domain) +} + +func (ts *TestServer) AddRecord(domain string, record string, value string) { + ts.lock.Lock() + defer ts.lock.Unlock() + + foundDomain := false + for _, existing := range ts.domains { + if strings.HasSuffix(domain, existing) { + foundDomain = true + break + } + } + if !foundDomain { + ts.t.Fatalf("cannot add record %v/%v :: [%v] -- no domain zone matching (%v)", record, domain, value, ts.domains) + } + + value = strings.TrimSpace(value) + if _, present := ts.records[domain]; !present { + ts.records[domain] = map[string][]string{} + } + + if values, present := ts.records[domain][record]; present { + for _, candidate := range values { + if candidate == value { + // Already present; skip adding. + return + } + } + } + + ts.records[domain][record] = append(ts.records[domain][record], value) +} + +func (ts *TestServer) RemoveRecord(domain string, record string, value string) { + ts.lock.Lock() + defer ts.lock.Unlock() + + foundDomain := false + for _, existing := range ts.domains { + if strings.HasSuffix(domain, existing) { + foundDomain = true + break + } + } + if !foundDomain { + // Not found. + return + } + + value = strings.TrimSpace(value) + if _, present := ts.records[domain]; !present { + // Not found. + return + } + + var remaining []string + if values, present := ts.records[domain][record]; present { + for _, candidate := range values { + if candidate != value { + remaining = append(remaining, candidate) + } + } + } + + ts.records[domain][record] = remaining +} + +func (ts *TestServer) RemoveRecordsOfTypeForDomain(domain string, record string) { + ts.lock.Lock() + defer ts.lock.Unlock() + + foundDomain := false + for _, existing := range ts.domains { + if strings.HasSuffix(domain, existing) { + foundDomain = true + break + } + } + if !foundDomain { + // Not found. + return + } + + if _, present := ts.records[domain]; !present { + // Not found. + return + } + + delete(ts.records[domain], record) +} + +func (ts *TestServer) RemoveRecordsForDomain(domain string) { + ts.lock.Lock() + defer ts.lock.Unlock() + + foundDomain := false + for _, existing := range ts.domains { + if strings.HasSuffix(domain, existing) { + foundDomain = true + break + } + } + if !foundDomain { + // Not found. + return + } + + if _, present := ts.records[domain]; !present { + // Not found. + return + } + + ts.records[domain] = map[string][]string{} +} + +func (ts *TestServer) RemoveAllRecords() { + ts.lock.Lock() + defer ts.lock.Unlock() + + ts.records = map[string]map[string][]string{} +} + +func (ts *TestServer) Cleanup() { + if ts.cleanup != nil { + ts.cleanup() + } + if ts.startup != nil && ts.startup.Cleanup != nil { + ts.startup.Cleanup() + } +} diff --git a/builtin/logical/pki/fields.go b/builtin/logical/pki/fields.go new file mode 100644 index 0000000..b293539 --- /dev/null +++ b/builtin/logical/pki/fields.go @@ -0,0 +1,645 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "time" + + "github.com/hashicorp/vault/sdk/framework" +) + +const ( + issuerRefParam = "issuer_ref" + keyNameParam = "key_name" + keyRefParam = "key_ref" + keyIdParam = "key_id" + keyTypeParam = "key_type" + keyBitsParam = "key_bits" + skidParam = "subject_key_id" +) + +// addIssueAndSignCommonFields adds fields common to both CA and non-CA issuing +// and signing +func addIssueAndSignCommonFields(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { + fields["exclude_cn_from_sans"] = &framework.FieldSchema{ + Type: framework.TypeBool, + Default: false, + Description: `If true, the Common Name will not be +included in DNS or Email Subject Alternate Names. +Defaults to false (CN is included).`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Exclude Common Name from Subject Alternative Names (SANs)", + }, + } + + fields["format"] = &framework.FieldSchema{ + Type: framework.TypeString, + Default: "pem", + Description: `Format for returned data. Can be "pem", "der", +or "pem_bundle". If "pem_bundle", any private +key and issuing cert will be appended to the +certificate pem. If "der", the value will be +base64 encoded. Defaults to "pem".`, + AllowedValues: []interface{}{"pem", "der", "pem_bundle"}, + DisplayAttrs: &framework.DisplayAttributes{ + Value: "pem", + }, + } + + fields["private_key_format"] = &framework.FieldSchema{ + Type: framework.TypeString, + Default: "der", + Description: `Format for the returned private key. +Generally the default will be controlled by the "format" +parameter as either base64-encoded DER or PEM-encoded DER. +However, this can be set to "pkcs8" to have the returned +private key contain base64-encoded pkcs8 or PEM-encoded +pkcs8 instead. Defaults to "der".`, + AllowedValues: []interface{}{"", "der", "pem", "pkcs8"}, + DisplayAttrs: &framework.DisplayAttributes{ + Value: "der", + }, + } + + fields["ip_sans"] = &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: `The requested IP SANs, if any, in a +comma-delimited list`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "IP Subject Alternative Names (SANs)", + }, + } + + fields["uri_sans"] = &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: `The requested URI SANs, if any, in a +comma-delimited list.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "URI Subject Alternative Names (SANs)", + }, + } + + fields["other_sans"] = &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: `Requested other SANs, in an array with the format +;UTF8: for each entry.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Other SANs", + }, + } + + return fields +} + +// addNonCACommonFields adds fields with help text specific to non-CA +// certificate issuing and signing +func addNonCACommonFields(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { + fields = addIssueAndSignCommonFields(fields) + + fields["role"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: `The desired role with configuration for this +request`, + } + + fields["common_name"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: `The requested common name; if you want more than +one, specify the alternative names in the +alt_names map. If email protection is enabled +in the role, this may be an email address.`, + } + + fields["alt_names"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: `The requested Subject Alternative Names, if any, +in a comma-delimited list. If email protection +is enabled for the role, this may contain +email addresses.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "DNS/Email Subject Alternative Names (SANs)", + }, + } + + fields["serial_number"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: `The Subject's requested serial number, if any. +See RFC 4519 Section 2.31 'serialNumber' for a description of this field. +If you want more than one, specify alternative names in the alt_names +map using OID 2.5.4.5. This has no impact on the final certificate's +Serial Number field.`, + } + + fields["ttl"] = &framework.FieldSchema{ + Type: framework.TypeDurationSecond, + Description: `The requested Time To Live for the certificate; +sets the expiration date. If not specified +the role default, backend default, or system +default TTL is used, in that order. Cannot +be larger than the role max TTL.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "TTL", + }, + } + + fields["not_after"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: `Set the not after field of the certificate with specified date value. +The value format should be given in UTC format YYYY-MM-ddTHH:MM:SSZ`, + } + + fields["remove_roots_from_chain"] = &framework.FieldSchema{ + Type: framework.TypeBool, + Default: false, + Description: `Whether or not to remove self-signed CA certificates in the output +of the ca_chain field.`, + } + + fields["user_ids"] = &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: `The requested user_ids value to place in the subject, +if any, in a comma-delimited list. Restricted by allowed_user_ids. +Any values are added with OID 0.9.2342.19200300.100.1.1.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "User ID(s)", + }, + } + + fields = addIssuerRefField(fields) + + return fields +} + +// addCACommonFields adds fields with help text specific to CA +// certificate issuing and signing +func addCACommonFields(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { + fields = addIssueAndSignCommonFields(fields) + + fields["alt_names"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: `The requested Subject Alternative Names, if any, +in a comma-delimited list. May contain both +DNS names and email addresses.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "DNS/Email Subject Alternative Names (SANs)", + }, + } + + fields["common_name"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: `The requested common name; if you want more than +one, specify the alternative names in the alt_names +map. If not specified when signing, the common +name will be taken from the CSR; other names +must still be specified in alt_names or ip_sans.`, + } + + fields["ttl"] = &framework.FieldSchema{ + Type: framework.TypeDurationSecond, + Description: `The requested Time To Live for the certificate; +sets the expiration date. If not specified +the role default, backend default, or system +default TTL is used, in that order. Cannot +be larger than the mount max TTL. Note: +this only has an effect when generating +a CA cert or signing a CA cert, not when +generating a CSR for an intermediate CA.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "TTL", + }, + } + + fields["ou"] = &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: `If set, OU (OrganizationalUnit) will be set to +this value.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "OU (Organizational Unit)", + }, + } + + fields["organization"] = &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: `If set, O (Organization) will be set to +this value.`, + } + + fields["country"] = &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: `If set, Country will be set to +this value.`, + } + + fields["locality"] = &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: `If set, Locality will be set to +this value.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Locality/City", + }, + } + + fields["province"] = &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: `If set, Province will be set to +this value.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Province/State", + }, + } + + fields["street_address"] = &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: `If set, Street Address will be set to +this value.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Street Address", + }, + } + + fields["postal_code"] = &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: `If set, Postal Code will be set to +this value.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Postal Code", + }, + } + + fields["serial_number"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: `The Subject's requested serial number, if any. +See RFC 4519 Section 2.31 'serialNumber' for a description of this field. +If you want more than one, specify alternative names in the alt_names +map using OID 2.5.4.5. This has no impact on the final certificate's +Serial Number field.`, + } + + fields["not_after"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: `Set the not after field of the certificate with specified date value. +The value format should be given in UTC format YYYY-MM-ddTHH:MM:SSZ`, + } + + fields["not_before_duration"] = &framework.FieldSchema{ + Type: framework.TypeDurationSecond, + Default: 30, + Description: `The duration before now which the certificate needs to be backdated by.`, + DisplayAttrs: &framework.DisplayAttributes{ + Value: 30, + }, + } + + return fields +} + +// addCAKeyGenerationFields adds fields with help text specific to CA key +// generation and exporting +func addCAKeyGenerationFields(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { + fields["exported"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: `Must be "internal", "exported" or "kms". If set to +"exported", the generated private key will be +returned. This is your *only* chance to retrieve +the private key!`, + AllowedValues: []interface{}{"internal", "external", "kms"}, + } + + fields["managed_key_name"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: `The name of the managed key to use when the exported +type is kms. When kms type is the key type, this field or managed_key_id +is required. Ignored for other types.`, + } + + fields["managed_key_id"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: `The name of the managed key to use when the exported +type is kms. When kms type is the key type, this field or managed_key_name +is required. Ignored for other types.`, + } + + fields["key_bits"] = &framework.FieldSchema{ + Type: framework.TypeInt, + Default: 0, + Description: `The number of bits to use. Allowed values are +0 (universal default); with rsa key_type: 2048 (default), 3072, or +4096; with ec key_type: 224, 256 (default), 384, or 521; ignored with +ed25519.`, + DisplayAttrs: &framework.DisplayAttributes{ + Value: 0, + }, + } + + fields["signature_bits"] = &framework.FieldSchema{ + Type: framework.TypeInt, + Default: 0, + Description: `The number of bits to use in the signature +algorithm; accepts 256 for SHA-2-256, 384 for SHA-2-384, and 512 for +SHA-2-512. Defaults to 0 to automatically detect based on key length +(SHA-2-256 for RSA keys, and matching the curve size for NIST P-Curves).`, + DisplayAttrs: &framework.DisplayAttributes{ + Value: 0, + }, + } + + fields["use_pss"] = &framework.FieldSchema{ + Type: framework.TypeBool, + Default: false, + Description: `Whether or not to use PSS signatures when using a +RSA key-type issuer. Defaults to false.`, + } + + fields["key_type"] = &framework.FieldSchema{ + Type: framework.TypeString, + Default: "rsa", + Description: `The type of key to use; defaults to RSA. "rsa" +"ec" and "ed25519" are the only valid values.`, + AllowedValues: []interface{}{"rsa", "ec", "ed25519"}, + DisplayAttrs: &framework.DisplayAttributes{ + Value: "rsa", + }, + } + + fields = addKeyRefNameFields(fields) + + return fields +} + +// addCAIssueFields adds fields common to CA issuing, e.g. when returning +// an actual certificate +func addCAIssueFields(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { + fields["max_path_length"] = &framework.FieldSchema{ + Type: framework.TypeInt, + Default: -1, + Description: "The maximum allowable path length", + } + + fields["permitted_dns_domains"] = &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: `Domains for which this certificate is allowed to sign or issue child certificates. If set, all DNS names (subject and alt) on child certs must be exact matches or subsets of the given domains (see https://tools.ietf.org/html/rfc5280#section-4.2.1.10).`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Permitted DNS Domains", + }, + } + + fields = addIssuerNameField(fields) + + return fields +} + +func addIssuerRefNameFields(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { + fields = addIssuerNameField(fields) + fields = addIssuerRefField(fields) + return fields +} + +func addIssuerNameField(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { + fields["issuer_name"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: `Provide a name to the generated or existing issuer, the name +must be unique across all issuers and not be the reserved value 'default'`, + } + return fields +} + +func addIssuerRefField(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { + fields[issuerRefParam] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: `Reference to a existing issuer; either "default" +for the configured default issuer, an identifier or the name assigned +to the issuer.`, + Default: defaultRef, + } + return fields +} + +func addKeyRefNameFields(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { + fields = addKeyNameField(fields) + fields = addKeyRefField(fields) + return fields +} + +func addKeyNameField(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { + fields[keyNameParam] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: `Provide a name to the generated or existing key, the name +must be unique across all keys and not be the reserved value 'default'`, + } + + return fields +} + +func addKeyRefField(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { + fields[keyRefParam] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: `Reference to a existing key; either "default" +for the configured default key, an identifier or the name assigned +to the key.`, + Default: defaultRef, + } + return fields +} + +func addTidyFields(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { + fields["tidy_cert_store"] = &framework.FieldSchema{ + Type: framework.TypeBool, + Description: `Set to true to enable tidying up +the certificate store`, + } + + fields["tidy_revocation_list"] = &framework.FieldSchema{ + Type: framework.TypeBool, + Description: `Deprecated; synonym for 'tidy_revoked_certs`, + } + + fields["tidy_revoked_certs"] = &framework.FieldSchema{ + Type: framework.TypeBool, + Description: `Set to true to expire all revoked +and expired certificates, removing them both from the CRL and from storage. The +CRL will be rotated if this causes any values to be removed.`, + } + + fields["tidy_revoked_cert_issuer_associations"] = &framework.FieldSchema{ + Type: framework.TypeBool, + Description: `Set to true to validate issuer associations +on revocation entries. This helps increase the performance of CRL building +and OCSP responses.`, + } + + fields["tidy_expired_issuers"] = &framework.FieldSchema{ + Type: framework.TypeBool, + Description: `Set to true to automatically remove expired issuers +past the issuer_safety_buffer. No keys will be removed as part of this +operation.`, + } + + fields["tidy_move_legacy_ca_bundle"] = &framework.FieldSchema{ + Type: framework.TypeBool, + Description: `Set to true to move the legacy ca_bundle from +/config/ca_bundle to /config/ca_bundle.bak. This prevents downgrades +to pre-Vault 1.11 versions (as older PKI engines do not know about +the new multi-issuer storage layout), but improves the performance +on seal wrapped PKI mounts. This will only occur if at least +issuer_safety_buffer time has occurred after the initial storage +migration. + +This backup is saved in case of an issue in future migrations. +Operators may consider removing it via sys/raw if they desire. +The backup will be removed via a DELETE /root call, but note that +this removes ALL issuers within the mount (and is thus not desirable +in most operational scenarios).`, + } + + fields["tidy_acme"] = &framework.FieldSchema{ + Type: framework.TypeBool, + Description: `Set to true to enable tidying ACME accounts, +orders and authorizations. ACME orders are tidied (deleted) +safety_buffer after the certificate associated with them expires, +or after the order and relevant authorizations have expired if no +certificate was produced. Authorizations are tidied with the +corresponding order. + +When a valid ACME Account is at least acme_account_safety_buffer +old, and has no remaining orders associated with it, the account is +marked as revoked. After another acme_account_safety_buffer has +passed from the revocation or deactivation date, a revoked or +deactivated ACME account is deleted.`, + Default: false, + } + + fields["safety_buffer"] = &framework.FieldSchema{ + Type: framework.TypeDurationSecond, + Description: `The amount of extra time that must have passed +beyond certificate expiration before it is removed +from the backend storage and/or revocation list. +Defaults to 72 hours.`, + Default: int(defaultTidyConfig.SafetyBuffer / time.Second), // TypeDurationSecond currently requires defaults to be int + } + + fields["issuer_safety_buffer"] = &framework.FieldSchema{ + Type: framework.TypeDurationSecond, + Description: `The amount of extra time that must have passed +beyond issuer's expiration before it is removed +from the backend storage. +Defaults to 8760 hours (1 year).`, + Default: int(defaultTidyConfig.IssuerSafetyBuffer / time.Second), // TypeDurationSecond currently requires defaults to be int + } + + fields["acme_account_safety_buffer"] = &framework.FieldSchema{ + Type: framework.TypeDurationSecond, + Description: `The amount of time that must pass after creation +that an account with no orders is marked revoked, and the amount of time +after being marked revoked or deactivated.`, + Default: int(defaultTidyConfig.AcmeAccountSafetyBuffer / time.Second), // TypeDurationSecond currently requires defaults to be int + } + + fields["pause_duration"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: `The amount of time to wait between processing +certificates. This allows operators to change the execution profile +of tidy to take consume less resources by slowing down how long it +takes to run. Note that the entire list of certificates will be +stored in memory during the entire tidy operation, but resources to +read/process/update existing entries will be spread out over a +greater period of time. By default this is zero seconds.`, + Default: "0s", + } + + fields["tidy_revocation_queue"] = &framework.FieldSchema{ + Type: framework.TypeBool, + Description: `Set to true to remove stale revocation queue entries +that haven't been confirmed by any active cluster. Only runs on the +active primary node`, + Default: defaultTidyConfig.RevocationQueue, + } + + fields["revocation_queue_safety_buffer"] = &framework.FieldSchema{ + Type: framework.TypeDurationSecond, + Description: `The amount of time that must pass from the +cross-cluster revocation request being initiated to when it will be +slated for removal. Setting this too low may remove valid revocation +requests before the owning cluster has a chance to process them, +especially if the cluster is offline.`, + Default: int(defaultTidyConfig.QueueSafetyBuffer / time.Second), // TypeDurationSecond currently requires defaults to be int + } + + fields["tidy_cross_cluster_revoked_certs"] = &framework.FieldSchema{ + Type: framework.TypeBool, + Description: `Set to true to enable tidying up +the cross-cluster revoked certificate store. Only runs on the active +primary node.`, + } + + return fields +} + +// generate the entire list of schema fields we need for CSR sign verbatim, this is also +// leveraged by ACME internally. +func getCsrSignVerbatimSchemaFields() map[string]*framework.FieldSchema { + fields := map[string]*framework.FieldSchema{} + fields = addNonCACommonFields(fields) + fields = addSignVerbatimRoleFields(fields) + + fields["csr"] = &framework.FieldSchema{ + Type: framework.TypeString, + Default: "", + Description: `PEM-format CSR to be signed. Values will be +taken verbatim from the CSR, except for +basic constraints.`, + } + + return fields +} + +// addSignVerbatimRoleFields provides the fields and defaults to be used by anything that is building up the fields +// and their corresponding default values when generating/using a sign-verbatim type role such as buildSignVerbatimRole. +func addSignVerbatimRoleFields(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { + fields["key_usage"] = &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Default: []string{"DigitalSignature", "KeyAgreement", "KeyEncipherment"}, + Description: `A comma-separated string or list of key usages (not extended +key usages). Valid values can be found at +https://golang.org/pkg/crypto/x509/#KeyUsage +-- simply drop the "KeyUsage" part of the name. +To remove all key usages from being set, set +this value to an empty list.`, + } + + fields["ext_key_usage"] = &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Default: []string{}, + Description: `A comma-separated string or list of extended key usages. Valid values can be found at +https://golang.org/pkg/crypto/x509/#ExtKeyUsage +-- simply drop the "ExtKeyUsage" part of the name. +To remove all key usages from being set, set +this value to an empty list.`, + } + + fields["ext_key_usage_oids"] = &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: `A comma-separated string or list of extended key usage oids.`, + } + + fields["signature_bits"] = &framework.FieldSchema{ + Type: framework.TypeInt, + Default: 0, + Description: `The number of bits to use in the signature +algorithm; accepts 256 for SHA-2-256, 384 for SHA-2-384, and 512 for +SHA-2-512. Defaults to 0 to automatically detect based on key length +(SHA-2-256 for RSA keys, and matching the curve size for NIST P-Curves).`, + DisplayAttrs: &framework.DisplayAttributes{ + Value: 0, + }, + } + + fields["use_pss"] = &framework.FieldSchema{ + Type: framework.TypeBool, + Default: false, + Description: `Whether or not to use PSS signatures when using a +RSA key-type issuer. Defaults to false.`, + } + + return fields +} diff --git a/builtin/logical/pki/integration_test.go b/builtin/logical/pki/integration_test.go new file mode 100644 index 0000000..1c0eb25 --- /dev/null +++ b/builtin/logical/pki/integration_test.go @@ -0,0 +1,675 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "fmt" + "testing" + + "github.com/hashicorp/vault/api" + vaulthttp "github.com/hashicorp/vault/http" + vaultocsp "github.com/hashicorp/vault/sdk/helper/ocsp" + "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" + + "github.com/hashicorp/go-hclog" + "github.com/stretchr/testify/require" +) + +func TestIntegration_RotateRootUsesNext(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/rotate/internal", + Storage: s, + Data: map[string]interface{}{ + "common_name": "test.com", + }, + MountPoint: "pki/", + }) + require.NoError(t, err, "failed rotate root") + require.NotNil(t, resp, "got nil response from rotate root") + require.False(t, resp.IsError(), "got an error from rotate root: %#v", resp) + + issuerId1 := resp.Data["issuer_id"].(issuerID) + issuerName1 := resp.Data["issuer_name"] + + require.NotEmpty(t, issuerId1, "issuer id was empty on initial rotate root command") + require.Equal(t, "next", issuerName1, "expected an issuer name of next on initial rotate root command") + + // Call it again, we should get a new issuer id, but since next issuer_name is used we should get a blank value. + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/rotate/internal", + Storage: s, + Data: map[string]interface{}{ + "common_name": "test.com", + }, + MountPoint: "pki/", + }) + require.NoError(t, err, "failed rotate root") + require.NotNil(t, resp, "got nil response from rotate root") + require.False(t, resp.IsError(), "got an error from rotate root: %#v", resp) + + issuerId2 := resp.Data["issuer_id"].(issuerID) + issuerName2 := resp.Data["issuer_name"] + + require.NotEmpty(t, issuerId2, "issuer id was empty on second rotate root command") + require.NotEqual(t, issuerId1, issuerId2, "should have been different issuer ids") + require.Empty(t, issuerName2, "expected a blank issuer name on the second rotate root command") + + // Call it again, making sure we can use our own name if desired. + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/rotate/internal", + Storage: s, + Data: map[string]interface{}{ + "common_name": "test.com", + "issuer_name": "next-cert", + }, + MountPoint: "pki/", + }) + require.NoError(t, err, "failed rotate root") + require.NotNil(t, resp, "got nil response from rotate root") + require.False(t, resp.IsError(), "got an error from rotate root: %#v", resp) + + issuerId3 := resp.Data["issuer_id"].(issuerID) + issuerName3 := resp.Data["issuer_name"] + + require.NotEmpty(t, issuerId3, "issuer id was empty on third rotate root command") + require.NotEqual(t, issuerId3, issuerId1, "should have been different issuer id from initial") + require.NotEqual(t, issuerId3, issuerId2, "should have been different issuer id from second call") + require.Equal(t, "next-cert", issuerName3, "expected an issuer name that we specified on third rotate root command") +} + +func TestIntegration_ReplaceRootNormal(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + // generate roots + genTestRootCa(t, b, s) + issuerId2, _ := genTestRootCa(t, b, s) + + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/replace", + Storage: s, + Data: map[string]interface{}{ + "default": issuerId2.String(), + }, + MountPoint: "pki/", + }) + require.NoError(t, err, "failed replacing root") + require.NotNil(t, resp, "got nil response from replacing root") + require.False(t, resp.IsError(), "got an error from replacing root: %#v", resp) + + replacedIssuer := resp.Data["default"] + require.Equal(t, issuerId2, replacedIssuer, "expected return value to match issuer we set") + + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ReadOperation, + Path: "config/issuers", + Storage: s, + MountPoint: "pki/", + }) + require.NoError(t, err, "failed replacing root") + require.NotNil(t, resp, "got nil response from replacing root") + require.False(t, resp.IsError(), "got an error from replacing root: %#v", resp) + + defaultIssuer := resp.Data["default"] + require.Equal(t, issuerId2, defaultIssuer, "expected default issuer to be updated") +} + +func TestIntegration_ReplaceRootDefaultsToNext(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + // generate roots + genTestRootCa(t, b, s) + issuerId2, _ := genTestRootCaWithIssuerName(t, b, s, "next") + + // Do not specify the default value to replace. + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/replace", + Storage: s, + Data: map[string]interface{}{}, + MountPoint: "pki/", + }) + require.NoError(t, err, "failed replacing root") + require.NotNil(t, resp, "got nil response from replacing root") + require.False(t, resp.IsError(), "got an error from replacing root: %#v", resp) + + replacedIssuer := resp.Data["default"] + require.Equal(t, issuerId2, replacedIssuer, "expected return value to match the 'next' issuer we set") + + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ReadOperation, + Path: "config/issuers", + Storage: s, + MountPoint: "pki/", + }) + require.NoError(t, err, "failed replacing root") + require.NotNil(t, resp, "got nil response from replacing root") + require.False(t, resp.IsError(), "got an error from replacing root: %#v", resp) + + defaultIssuer := resp.Data["default"] + require.Equal(t, issuerId2, defaultIssuer, "expected default issuer to be updated") +} + +func TestIntegration_ReplaceRootBadIssuer(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + // generate roots + genTestRootCa(t, b, s) + genTestRootCa(t, b, s) + + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/replace", + Storage: s, + Data: map[string]interface{}{ + "default": "a-bad-issuer-id", + }, + MountPoint: "pki/", + }) + require.NoError(t, err, "failed replacing root, should have been an error in the response.") + require.NotNil(t, resp, "got nil response from replacing root") + require.True(t, resp.IsError(), "did not get an error from replacing root: %#v", resp) + + // Make sure we trap replacing with default. + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/replace", + Storage: s, + Data: map[string]interface{}{ + "default": "default", + }, + MountPoint: "pki/", + }) + require.NoError(t, err, "failed replacing root, should have been an error in the response.") + require.NotNil(t, resp, "got nil response from replacing root") + require.True(t, resp.IsError(), "did not get an error from replacing root: %#v", resp) + + // Make sure we trap replacing with blank string. + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/replace", + Storage: s, + Data: map[string]interface{}{ + "default": "", + }, + MountPoint: "pki/", + }) + require.NoError(t, err, "failed replacing root, should have been an error in the response.") + require.NotNil(t, resp, "got nil response from replacing root") + require.True(t, resp.IsError(), "did not get an error from replacing root: %#v", resp) +} + +func TestIntegration_SetSignedWithBackwardsPemBundles(t *testing.T) { + t.Parallel() + rootBackend, rootStorage := CreateBackendWithStorage(t) + intBackend, intStorage := CreateBackendWithStorage(t) + + // generate root + resp, err := rootBackend.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "issuers/generate/root/internal", + Storage: rootStorage, + Data: map[string]interface{}{ + "common_name": "test.com", + }, + MountPoint: "pki/", + }) + require.NoError(t, err, "failed generating root ca") + require.NotNil(t, resp, "got nil response from generating root ca") + require.False(t, resp.IsError(), "got an error from generating root ca: %#v", resp) + rootCert := resp.Data["certificate"].(string) + + schema.ValidateResponse(t, schema.GetResponseSchema(t, rootBackend.Route("issuers/generate/root/internal"), logical.UpdateOperation), resp, true) + + // generate intermediate + resp, err = intBackend.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "issuers/generate/intermediate/internal", + Storage: intStorage, + Data: map[string]interface{}{ + "common_name": "test.com", + }, + MountPoint: "pki-int/", + }) + require.NoError(t, err, "failed generating int ca") + require.NotNil(t, resp, "got nil response from generating int ca") + require.False(t, resp.IsError(), "got an error from generating int ca: %#v", resp) + intCsr := resp.Data["csr"].(string) + + // sign csr + resp, err = rootBackend.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/sign-intermediate", + Storage: rootStorage, + Data: map[string]interface{}{ + "csr": intCsr, + "format": "pem_bundle", + }, + MountPoint: "pki/", + }) + require.NoError(t, err, "failed generating root ca") + require.NotNil(t, resp, "got nil response from generating root ca") + require.False(t, resp.IsError(), "got an error from generating root ca: %#v", resp) + + intCert := resp.Data["certificate"].(string) + + // Send in the chain backwards now and make sure we link intCert as default. + resp, err = intBackend.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "intermediate/set-signed", + Storage: intStorage, + Data: map[string]interface{}{ + "certificate": rootCert + "\n" + intCert + "\n", + }, + MountPoint: "pki-int/", + }) + require.NoError(t, err, "failed generating root ca") + require.NotNil(t, resp, "got nil response from generating root ca") + require.False(t, resp.IsError(), "got an error from generating root ca: %#v", resp) + + // setup role + resp, err = intBackend.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/example", + Storage: intStorage, + Data: map[string]interface{}{ + "allowed_domains": "example.com", + "allow_subdomains": "true", + "max_ttl": "1h", + }, + MountPoint: "pki-int/", + }) + require.NoError(t, err, "failed setting up role example") + require.NotNil(t, resp, "got nil response from setting up role example: %#v", resp) + + schema.ValidateResponse(t, schema.GetResponseSchema(t, intBackend.Route("roles/example"), logical.UpdateOperation), resp, true) + + // Issue cert + resp, err = intBackend.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "issue/example", + Storage: intStorage, + Data: map[string]interface{}{ + "common_name": "test.example.com", + "ttl": "5m", + }, + MountPoint: "pki-int/", + }) + require.NoError(t, err, "failed issuing a leaf cert from int ca") + require.NotNil(t, resp, "got nil response issuing a leaf cert from int ca") + require.False(t, resp.IsError(), "got an error issuing a leaf cert from int ca: %#v", resp) + + schema.ValidateResponse(t, schema.GetResponseSchema(t, intBackend.Route("issue/example"), logical.UpdateOperation), resp, true) +} + +func TestIntegration_CSRGeneration(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + testCases := []struct { + keyType string + usePss bool + keyBits int + sigBits int + expectedPublicKeyType crypto.PublicKey + expectedSignature x509.SignatureAlgorithm + }{ + {"rsa", false, 2048, 0, &rsa.PublicKey{}, x509.SHA256WithRSA}, + {"rsa", false, 2048, 384, &rsa.PublicKey{}, x509.SHA384WithRSA}, + // Add back once https://github.com/golang/go/issues/45990 is fixed. + // {"rsa", true, 2048, 0, &rsa.PublicKey{}, x509.SHA256WithRSAPSS}, + // {"rsa", true, 2048, 512, &rsa.PublicKey{}, x509.SHA512WithRSAPSS}, + {"ec", false, 224, 0, &ecdsa.PublicKey{}, x509.ECDSAWithSHA256}, + {"ec", false, 256, 0, &ecdsa.PublicKey{}, x509.ECDSAWithSHA256}, + {"ec", false, 384, 0, &ecdsa.PublicKey{}, x509.ECDSAWithSHA384}, + {"ec", false, 521, 0, &ecdsa.PublicKey{}, x509.ECDSAWithSHA512}, + {"ec", false, 521, 224, &ecdsa.PublicKey{}, x509.ECDSAWithSHA512}, // We ignore signature_bits for ec + {"ed25519", false, 0, 0, ed25519.PublicKey{}, x509.PureEd25519}, // We ignore both fields for ed25519 + } + for _, tc := range testCases { + keyTypeName := tc.keyType + if tc.usePss { + keyTypeName = tc.keyType + "-pss" + } + testName := fmt.Sprintf("%s-%d-%d", keyTypeName, tc.keyBits, tc.sigBits) + t.Run(testName, func(t *testing.T) { + resp, err := CBWrite(b, s, "intermediate/generate/internal", map[string]interface{}{ + "common_name": "myint.com", + "key_type": tc.keyType, + "key_bits": tc.keyBits, + "signature_bits": tc.sigBits, + "use_pss": tc.usePss, + }) + requireSuccessNonNilResponse(t, resp, err) + requireFieldsSetInResp(t, resp, "csr") + + csrString := resp.Data["csr"].(string) + pemBlock, _ := pem.Decode([]byte(csrString)) + require.NotNil(t, pemBlock, "failed to parse returned csr pem block") + csr, err := x509.ParseCertificateRequest(pemBlock.Bytes) + require.NoError(t, err, "failed parsing certificate request") + + require.Equal(t, tc.expectedSignature, csr.SignatureAlgorithm, + "Expected %s, got %s", tc.expectedSignature.String(), csr.SignatureAlgorithm.String()) + require.IsType(t, tc.expectedPublicKeyType, csr.PublicKey) + }) + } +} + +func TestIntegration_AutoIssuer(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + // Generate two roots. The first should become default under the existing + // behavior; when we update the config and generate a second, it should + // take over as default. Deleting the first and re-importing it will make + // it default again, and then disabling the option and removing and + // reimporting the second and creating a new root won't affect it again. + resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "Root X1", + "issuer_name": "root-1", + "key_type": "ec", + }) + + requireSuccessNonNilResponse(t, resp, err) + issuerIdOne := resp.Data["issuer_id"] + require.NotEmpty(t, issuerIdOne) + certOne := resp.Data["certificate"] + require.NotEmpty(t, certOne) + + resp, err = CBRead(b, s, "config/issuers") + requireSuccessNonNilResponse(t, resp, err) + require.Equal(t, issuerIdOne, resp.Data["default"]) + + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/issuers"), logical.ReadOperation), resp, true) + + // Enable the new config option. + resp, err = CBWrite(b, s, "config/issuers", map[string]interface{}{ + "default": issuerIdOne, + "default_follows_latest_issuer": true, + }) + require.NoError(t, err) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/issuers"), logical.UpdateOperation), resp, true) + + // Now generate the second root; it should become default. + resp, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "Root X2", + "issuer_name": "root-2", + "key_type": "ec", + }) + requireSuccessNonNilResponse(t, resp, err) + issuerIdTwo := resp.Data["issuer_id"] + require.NotEmpty(t, issuerIdTwo) + certTwo := resp.Data["certificate"] + require.NotEmpty(t, certTwo) + + resp, err = CBRead(b, s, "config/issuers") + requireSuccessNonNilResponse(t, resp, err) + require.Equal(t, issuerIdTwo, resp.Data["default"]) + + // Deleting the first shouldn't affect the default issuer. + _, err = CBDelete(b, s, "issuer/root-1") + require.NoError(t, err) + resp, err = CBRead(b, s, "config/issuers") + requireSuccessNonNilResponse(t, resp, err) + require.Equal(t, issuerIdTwo, resp.Data["default"]) + + // But reimporting it should update it to the new issuer's value. + resp, err = CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ + "pem_bundle": certOne, + }) + requireSuccessNonNilResponse(t, resp, err) + issuerIdOneReimported := issuerID(resp.Data["imported_issuers"].([]string)[0]) + + resp, err = CBRead(b, s, "config/issuers") + requireSuccessNonNilResponse(t, resp, err) + require.Equal(t, issuerIdOneReimported, resp.Data["default"]) + + // Now update the config to disable this option again. + _, err = CBWrite(b, s, "config/issuers", map[string]interface{}{ + "default": issuerIdOneReimported, + "default_follows_latest_issuer": false, + }) + require.NoError(t, err) + + // Generating a new root shouldn't update the default. + resp, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "Root X3", + "issuer_name": "root-3", + "key_type": "ec", + }) + requireSuccessNonNilResponse(t, resp, err) + issuerIdThree := resp.Data["issuer_id"] + require.NotEmpty(t, issuerIdThree) + + resp, err = CBRead(b, s, "config/issuers") + requireSuccessNonNilResponse(t, resp, err) + require.Equal(t, issuerIdOneReimported, resp.Data["default"]) + + // Deleting and re-importing root 2 should also not affect it. + _, err = CBDelete(b, s, "issuer/root-2") + require.NoError(t, err) + resp, err = CBRead(b, s, "config/issuers") + requireSuccessNonNilResponse(t, resp, err) + require.Equal(t, issuerIdOneReimported, resp.Data["default"]) + + resp, err = CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ + "pem_bundle": certTwo, + }) + requireSuccessNonNilResponse(t, resp, err) + require.Equal(t, 1, len(resp.Data["imported_issuers"].([]string))) + resp, err = CBRead(b, s, "config/issuers") + requireSuccessNonNilResponse(t, resp, err) + require.Equal(t, issuerIdOneReimported, resp.Data["default"]) +} + +func TestIntegrationOCSPClientWithPKI(t *testing.T) { + t.Parallel() + + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "pki": Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + + cluster.Start() + defer cluster.Cleanup() + cores := cluster.Cores + vault.TestWaitActive(t, cores[0].Core) + client := cores[0].Client + + err := client.Sys().Mount("pki", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "32h", + }, + }) + require.NoError(t, err) + + resp, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ + "ttl": "40h", + "common_name": "Root R1", + "key_type": "ec", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["issuer_id"]) + rootIssuerId := resp.Data["issuer_id"].(string) + + // Set URLs pointing to the issuer. + _, err = client.Logical().Write("pki/config/cluster", map[string]interface{}{ + "path": client.Address() + "/v1/pki", + "aia_path": client.Address() + "/v1/pki", + }) + require.NoError(t, err) + + _, err = client.Logical().Write("pki/config/urls", map[string]interface{}{ + "enable_templating": true, + "crl_distribution_points": "{{cluster_aia_path}}/issuer/{{issuer_id}}/crl/der", + "issuing_certificates": "{{cluster_aia_path}}/issuer/{{issuer_id}}/der", + "ocsp_servers": "{{cluster_aia_path}}/ocsp", + }) + require.NoError(t, err) + + // Build an intermediate CA + resp, err = client.Logical().Write("pki/intermediate/generate/internal", map[string]interface{}{ + "common_name": "Int X1", + "key_type": "ec", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["csr"]) + intermediateCSR := resp.Data["csr"].(string) + + resp, err = client.Logical().Write("pki/root/sign-intermediate", map[string]interface{}{ + "csr": intermediateCSR, + "ttl": "20h", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["certificate"]) + intermediateCert := resp.Data["certificate"] + + resp, err = client.Logical().Write("pki/intermediate/set-signed", map[string]interface{}{ + "certificate": intermediateCert, + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["imported_issuers"]) + rawImportedIssuers := resp.Data["imported_issuers"].([]interface{}) + require.Equal(t, len(rawImportedIssuers), 1) + importedIssuer := rawImportedIssuers[0].(string) + require.NotEmpty(t, importedIssuer) + + // Set intermediate as default. + _, err = client.Logical().Write("pki/config/issuers", map[string]interface{}{ + "default": importedIssuer, + }) + require.NoError(t, err) + + // Setup roles for root, intermediate. + _, err = client.Logical().Write("pki/roles/example-root", map[string]interface{}{ + "allowed_domains": "example.com", + "allow_subdomains": "true", + "max_ttl": "1h", + "key_type": "ec", + "issuer_ref": rootIssuerId, + }) + require.NoError(t, err) + + _, err = client.Logical().Write("pki/roles/example-int", map[string]interface{}{ + "allowed_domains": "example.com", + "allow_subdomains": "true", + "max_ttl": "1h", + "key_type": "ec", + }) + require.NoError(t, err) + + // Issue certs and validate them against OCSP. + for _, path := range []string{"pki/issue/example-int", "pki/issue/example-root"} { + t.Logf("Validating against path: %v", path) + resp, err = client.Logical().Write(path, map[string]interface{}{ + "common_name": "test.example.com", + "ttl": "5m", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["certificate"]) + require.NotEmpty(t, resp.Data["issuing_ca"]) + require.NotEmpty(t, resp.Data["serial_number"]) + + certPEM := resp.Data["certificate"].(string) + certBlock, _ := pem.Decode([]byte(certPEM)) + require.NotNil(t, certBlock) + cert, err := x509.ParseCertificate(certBlock.Bytes) + require.NoError(t, err) + require.NotNil(t, cert) + + issuerPEM := resp.Data["issuing_ca"].(string) + issuerBlock, _ := pem.Decode([]byte(issuerPEM)) + require.NotNil(t, issuerBlock) + issuer, err := x509.ParseCertificate(issuerBlock.Bytes) + require.NoError(t, err) + require.NotNil(t, issuer) + + serialNumber := resp.Data["serial_number"].(string) + + testLogger := hclog.New(hclog.DefaultOptions) + + conf := &vaultocsp.VerifyConfig{ + OcspFailureMode: vaultocsp.FailOpenFalse, + ExtraCas: []*x509.Certificate{cluster.CACert}, + } + ocspClient := vaultocsp.New(func() hclog.Logger { + return testLogger + }, 10) + + err = ocspClient.VerifyLeafCertificate(context.Background(), cert, issuer, conf) + require.NoError(t, err) + + _, err = client.Logical().Write("pki/revoke", map[string]interface{}{ + "serial_number": serialNumber, + }) + require.NoError(t, err) + + err = ocspClient.VerifyLeafCertificate(context.Background(), cert, issuer, conf) + require.Error(t, err) + } +} + +func genTestRootCa(t *testing.T, b *backend, s logical.Storage) (issuerID, keyID) { + return genTestRootCaWithIssuerName(t, b, s, "") +} + +func genTestRootCaWithIssuerName(t *testing.T, b *backend, s logical.Storage, issuerName string) (issuerID, keyID) { + data := map[string]interface{}{ + "common_name": "test.com", + } + if len(issuerName) > 0 { + data["issuer_name"] = issuerName + } + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "issuers/generate/root/internal", + Storage: s, + Data: data, + MountPoint: "pki/", + }) + require.NoError(t, err, "failed generating root ca") + require.NotNil(t, resp, "got nil response from generating root ca") + require.False(t, resp.IsError(), "got an error from generating root ca: %#v", resp) + + issuerId := resp.Data["issuer_id"].(issuerID) + keyId := resp.Data["key_id"].(keyID) + + require.NotEmpty(t, issuerId, "returned issuer id was empty") + require.NotEmpty(t, keyId, "returned key id was empty") + + return issuerId, keyId +} diff --git a/builtin/logical/pki/key_util.go b/builtin/logical/pki/key_util.go new file mode 100644 index 0000000..5f2d19c --- /dev/null +++ b/builtin/logical/pki/key_util.go @@ -0,0 +1,96 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "context" + "crypto" + "encoding/pem" + "errors" + "fmt" + + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/errutil" +) + +func comparePublicKey(sc *storageContext, key *keyEntry, publicKey crypto.PublicKey) (bool, error) { + publicKeyForKeyEntry, err := getPublicKey(sc.Context, sc.Backend, key) + if err != nil { + return false, err + } + + return certutil.ComparePublicKeysAndType(publicKeyForKeyEntry, publicKey) +} + +func getPublicKey(ctx context.Context, b *backend, key *keyEntry) (crypto.PublicKey, error) { + if key.PrivateKeyType == certutil.ManagedPrivateKey { + keyId, err := extractManagedKeyId([]byte(key.PrivateKey)) + if err != nil { + return nil, err + } + return getManagedKeyPublicKey(ctx, b, keyId) + } + + signer, _, _, err := getSignerFromKeyEntryBytes(key) + if err != nil { + return nil, err + } + return signer.Public(), nil +} + +func getSignerFromKeyEntryBytes(key *keyEntry) (crypto.Signer, certutil.BlockType, *pem.Block, error) { + if key.PrivateKeyType == certutil.UnknownPrivateKey { + return nil, certutil.UnknownBlock, nil, errutil.InternalError{Err: fmt.Sprintf("unsupported unknown private key type for key: %s (%s)", key.ID, key.Name)} + } + + if key.PrivateKeyType == certutil.ManagedPrivateKey { + return nil, certutil.UnknownBlock, nil, errutil.InternalError{Err: fmt.Sprintf("can not get a signer from a managed key: %s (%s)", key.ID, key.Name)} + } + + bytes, blockType, blk, err := getSignerFromBytes([]byte(key.PrivateKey)) + if err != nil { + return nil, certutil.UnknownBlock, nil, errutil.InternalError{Err: fmt.Sprintf("failed parsing key entry bytes for key id: %s (%s): %s", key.ID, key.Name, err.Error())} + } + + return bytes, blockType, blk, nil +} + +func getSignerFromBytes(keyBytes []byte) (crypto.Signer, certutil.BlockType, *pem.Block, error) { + pemBlock, _ := pem.Decode(keyBytes) + if pemBlock == nil { + return nil, certutil.UnknownBlock, pemBlock, errutil.InternalError{Err: "no data found in PEM block"} + } + + signer, blk, err := certutil.ParseDERKey(pemBlock.Bytes) + if err != nil { + return nil, certutil.UnknownBlock, pemBlock, errutil.InternalError{Err: fmt.Sprintf("failed to parse PEM block: %s", err.Error())} + } + return signer, blk, pemBlock, nil +} + +func getPublicKeyFromBytes(keyBytes []byte) (crypto.PublicKey, error) { + signer, _, _, err := getSignerFromBytes(keyBytes) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("failed parsing key bytes: %s", err.Error())} + } + + return signer.Public(), nil +} + +func importKeyFromBytes(sc *storageContext, keyValue string, keyName string) (*keyEntry, bool, error) { + signer, _, _, err := getSignerFromBytes([]byte(keyValue)) + if err != nil { + return nil, false, err + } + privateKeyType := certutil.GetPrivateKeyTypeFromSigner(signer) + if privateKeyType == certutil.UnknownPrivateKey { + return nil, false, errors.New("unsupported private key type within pem bundle") + } + + key, existed, err := sc.importKey(keyValue, keyName, privateKeyType) + if err != nil { + return nil, false, err + } + return key, existed, nil +} diff --git a/builtin/logical/pki/managed_key_util.go b/builtin/logical/pki/managed_key_util.go new file mode 100644 index 0000000..42e031d --- /dev/null +++ b/builtin/logical/pki/managed_key_util.go @@ -0,0 +1,45 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !enterprise + +package pki + +import ( + "context" + "crypto" + "errors" + "io" + + "github.com/hashicorp/vault/sdk/helper/certutil" +) + +var errEntOnly = errors.New("managed keys are supported within enterprise edition only") + +func generateManagedKeyCABundle(ctx context.Context, b *backend, keyId managedKeyId, data *certutil.CreationBundle, randomSource io.Reader) (bundle *certutil.ParsedCertBundle, err error) { + return nil, errEntOnly +} + +func generateManagedKeyCSRBundle(ctx context.Context, b *backend, keyId managedKeyId, data *certutil.CreationBundle, addBasicConstraints bool, randomSource io.Reader) (bundle *certutil.ParsedCSRBundle, err error) { + return nil, errEntOnly +} + +func getManagedKeyPublicKey(ctx context.Context, b *backend, keyId managedKeyId) (crypto.PublicKey, error) { + return nil, errEntOnly +} + +func parseManagedKeyCABundle(ctx context.Context, b *backend, bundle *certutil.CertBundle) (*certutil.ParsedCertBundle, error) { + return nil, errEntOnly +} + +func extractManagedKeyId(privateKeyBytes []byte) (UUIDKey, error) { + return "", errEntOnly +} + +func createKmsKeyBundle(ctx context.Context, b *backend, keyId managedKeyId) (certutil.KeyBundle, certutil.PrivateKeyType, error) { + return certutil.KeyBundle{}, certutil.UnknownPrivateKey, errEntOnly +} + +func getManagedKeyInfo(ctx context.Context, b *backend, keyId managedKeyId) (*managedKeyInfo, error) { + return nil, errEntOnly +} diff --git a/builtin/logical/pki/path_acme_account.go b/builtin/logical/pki/path_acme_account.go new file mode 100644 index 0000000..3782197 --- /dev/null +++ b/builtin/logical/pki/path_acme_account.go @@ -0,0 +1,475 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "fmt" + "net/http" + "path" + "strings" + "time" + + "github.com/hashicorp/go-secure-stdlib/strutil" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func uuidNameRegex(name string) string { + return fmt.Sprintf("(?P<%s>[[:alnum:]]{8}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{4}-[[:alnum:]]{12}?)", name) +} + +func pathAcmeNewAccount(b *backend) []*framework.Path { + return buildAcmeFrameworkPaths(b, patternAcmeNewAccount, "/new-account") +} + +func pathAcmeUpdateAccount(b *backend) []*framework.Path { + return buildAcmeFrameworkPaths(b, patternAcmeNewAccount, "/account/"+uuidNameRegex("kid")) +} + +func addFieldsForACMEPath(fields map[string]*framework.FieldSchema, pattern string) map[string]*framework.FieldSchema { + if strings.Contains(pattern, framework.GenericNameRegex("role")) { + fields["role"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: `The desired role for the acme request`, + Required: true, + } + } + if strings.Contains(pattern, framework.GenericNameRegex(issuerRefParam)) { + fields[issuerRefParam] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: `Reference to an existing issuer name or issuer id`, + Required: true, + } + } + + return fields +} + +func addFieldsForACMERequest(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { + fields["protected"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: "ACME request 'protected' value", + Required: false, + } + + fields["payload"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: "ACME request 'payload' value", + Required: false, + } + + fields["signature"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: "ACME request 'signature' value", + Required: false, + } + + return fields +} + +func addFieldsForACMEKidRequest(fields map[string]*framework.FieldSchema, pattern string) map[string]*framework.FieldSchema { + if strings.Contains(pattern, uuidNameRegex("kid")) { + fields["kid"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: `The key identifier provided by the CA`, + Required: true, + } + } + + return fields +} + +func patternAcmeNewAccount(b *backend, pattern string) *framework.Path { + fields := map[string]*framework.FieldSchema{} + addFieldsForACMEPath(fields, pattern) + addFieldsForACMERequest(fields) + addFieldsForACMEKidRequest(fields, pattern) + + return &framework.Path{ + Pattern: pattern, + Fields: fields, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.acmeParsedWrapper(b.acmeNewAccountHandler), + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + }, + }, + + HelpSynopsis: pathAcmeHelpSync, + HelpDescription: pathAcmeHelpDesc, + } +} + +func (b *backend) acmeNewAccountHandler(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData, userCtx *jwsCtx, data map[string]interface{}) (*logical.Response, error) { + // Parameters + var ok bool + var onlyReturnExisting bool + var contacts []string + var termsOfServiceAgreed bool + var status string + var eabData map[string]interface{} + + rawContact, present := data["contact"] + if present { + listContact, ok := rawContact.([]interface{}) + if !ok { + return nil, fmt.Errorf("invalid type (%T) for field 'contact': %w", rawContact, ErrMalformed) + } + + for index, singleContact := range listContact { + contact, ok := singleContact.(string) + if !ok { + return nil, fmt.Errorf("invalid type (%T) for field 'contact' item %d: %w", singleContact, index, ErrMalformed) + } + + contacts = append(contacts, contact) + } + } + + rawTermsOfServiceAgreed, present := data["termsOfServiceAgreed"] + if present { + termsOfServiceAgreed, ok = rawTermsOfServiceAgreed.(bool) + if !ok { + return nil, fmt.Errorf("invalid type (%T) for field 'termsOfServiceAgreed': %w", rawTermsOfServiceAgreed, ErrMalformed) + } + } + + rawOnlyReturnExisting, present := data["onlyReturnExisting"] + if present { + onlyReturnExisting, ok = rawOnlyReturnExisting.(bool) + if !ok { + return nil, fmt.Errorf("invalid type (%T) for field 'onlyReturnExisting': %w", rawOnlyReturnExisting, ErrMalformed) + } + } + + // Per RFC 8555 7.3.6 Account deactivation, we will handle it within our update API. + rawStatus, present := data["status"] + if present { + status, ok = rawStatus.(string) + if !ok { + return nil, fmt.Errorf("invalid type (%T) for field 'onlyReturnExisting': %w", rawOnlyReturnExisting, ErrMalformed) + } + } + + if eabDataRaw, ok := data["externalAccountBinding"]; ok { + eabData, ok = eabDataRaw.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("%w: externalAccountBinding field was unparseable", ErrMalformed) + } + } + + // We have two paths here: search or create. + if onlyReturnExisting { + return b.acmeAccountSearchHandler(acmeCtx, userCtx) + } + + // Pass through the /new-account API calls to this specific handler as its requirements are different + // from the account update handler. + if strings.HasSuffix(r.Path, "/new-account") { + return b.acmeNewAccountCreateHandler(acmeCtx, userCtx, contacts, termsOfServiceAgreed, r, eabData) + } + + return b.acmeNewAccountUpdateHandler(acmeCtx, userCtx, contacts, status, eabData) +} + +func formatNewAccountResponse(acmeCtx *acmeContext, acct *acmeAccount, eabData map[string]interface{}) *logical.Response { + resp := formatAccountResponse(acmeCtx, acct) + + // Per RFC 8555 Section 7.1.2. Account Objects + // Including this field in a newAccount request indicates approval by + // the holder of an existing non-ACME account to bind that account to + // this ACME account + if acct.Eab != nil && len(eabData) != 0 { + resp.Data["externalAccountBinding"] = eabData + } + + return resp +} + +func formatAccountResponse(acmeCtx *acmeContext, acct *acmeAccount) *logical.Response { + location := acmeCtx.baseUrl.String() + "account/" + acct.KeyId + + resp := &logical.Response{ + Data: map[string]interface{}{ + "status": acct.Status, + "orders": location + "/orders", + }, + Headers: map[string][]string{ + "Location": {location}, + }, + } + + if len(acct.Contact) > 0 { + resp.Data["contact"] = acct.Contact + } + + return resp +} + +func (b *backend) acmeAccountSearchHandler(acmeCtx *acmeContext, userCtx *jwsCtx) (*logical.Response, error) { + thumbprint, err := userCtx.GetKeyThumbprint() + if err != nil { + return nil, fmt.Errorf("failed generating thumbprint for key: %w", err) + } + + account, err := b.acmeState.LoadAccountByKey(acmeCtx, thumbprint) + if err != nil { + return nil, fmt.Errorf("failed to load account by thumbprint: %w", err) + } + + if account != nil { + if err = acmeCtx.eabPolicy.EnforceForExistingAccount(account); err != nil { + return nil, err + } + return formatAccountResponse(acmeCtx, account), nil + } + + // Per RFC 8555 Section 7.3.1. Finding an Account URL Given a Key: + // + // > If a client sends such a request and an account does not exist, + // > then the server MUST return an error response with status code + // > 400 (Bad Request) and type "urn:ietf:params:acme:error:accountDoesNotExist". + return nil, fmt.Errorf("An account with this key does not exist: %w", ErrAccountDoesNotExist) +} + +func (b *backend) acmeNewAccountCreateHandler(acmeCtx *acmeContext, userCtx *jwsCtx, contact []string, termsOfServiceAgreed bool, r *logical.Request, eabData map[string]interface{}) (*logical.Response, error) { + if userCtx.Existing { + return nil, fmt.Errorf("cannot submit to newAccount with 'kid': %w", ErrMalformed) + } + + // If the account already exists, return the existing one. + thumbprint, err := userCtx.GetKeyThumbprint() + if err != nil { + return nil, fmt.Errorf("failed generating thumbprint for key: %w", err) + } + + accountByKey, err := b.acmeState.LoadAccountByKey(acmeCtx, thumbprint) + if err != nil { + return nil, fmt.Errorf("failed to load account by thumbprint: %w", err) + } + + if accountByKey != nil { + if err = acmeCtx.eabPolicy.EnforceForExistingAccount(accountByKey); err != nil { + return nil, err + } + return formatAccountResponse(acmeCtx, accountByKey), nil + } + + var eab *eabType + if len(eabData) != 0 { + eab, err = verifyEabPayload(b.acmeState, acmeCtx, userCtx, r.Path, eabData) + if err != nil { + return nil, err + } + } + + // Verify against our EAB policy + if err = acmeCtx.eabPolicy.EnforceForNewAccount(eab); err != nil { + return nil, err + } + + // TODO: Limit this only when ToS are required or set by the operator, since we don't have a + // ToS URL in the directory at the moment, we can not enforce this. + //if !termsOfServiceAgreed { + // return nil, fmt.Errorf("terms of service not agreed to: %w", ErrUserActionRequired) + //} + + if eab != nil { + // We delete the EAB to prevent future re-use after associating it with an account, worst + // case if we fail creating the account we simply nuked the EAB which they can create another + // and retry + wasDeleted, err := b.acmeState.DeleteEab(acmeCtx.sc, eab.KeyID) + if err != nil { + return nil, fmt.Errorf("failed to delete eab reference: %w", err) + } + + if !wasDeleted { + // Something consumed our EAB before we did bail... + return nil, fmt.Errorf("eab was already used: %w", ErrUnauthorized) + } + } + + b.acmeAccountLock.RLock() // Prevents Account Creation and Tidy Interfering + defer b.acmeAccountLock.RUnlock() + + accountByKid, err := b.acmeState.CreateAccount(acmeCtx, userCtx, contact, termsOfServiceAgreed, eab) + if err != nil { + if eab != nil { + return nil, fmt.Errorf("failed to create account: %w; the EAB key used for this request has been deleted as a result of this operation; fetch a new EAB key before retrying", err) + } + return nil, fmt.Errorf("failed to create account: %w", err) + } + + resp := formatNewAccountResponse(acmeCtx, accountByKid, eabData) + + // Per RFC 8555 Section 7.3. Account Management: + // + // > The server returns this account object in a 201 (Created) response, + // > with the account URL in a Location header field. + resp.Data[logical.HTTPStatusCode] = http.StatusCreated + return resp, nil +} + +func (b *backend) acmeNewAccountUpdateHandler(acmeCtx *acmeContext, userCtx *jwsCtx, contact []string, status string, eabData map[string]interface{}) (*logical.Response, error) { + if !userCtx.Existing { + return nil, fmt.Errorf("cannot submit to account updates without a 'kid': %w", ErrMalformed) + } + + if len(eabData) != 0 { + return nil, fmt.Errorf("%w: not allowed to update EAB data in accounts", ErrMalformed) + } + + account, err := b.acmeState.LoadAccount(acmeCtx, userCtx.Kid) + if err != nil { + return nil, fmt.Errorf("error loading account: %w", err) + } + + if err = acmeCtx.eabPolicy.EnforceForExistingAccount(account); err != nil { + return nil, err + } + + // Per RFC 8555 7.3.6 Account deactivation, if we were previously deactivated, we should return + // unauthorized. There is no way to reactivate any accounts per ACME RFC. + if account.Status != AccountStatusValid { + // Treating "revoked" and "deactivated" as the same here. + return nil, ErrUnauthorized + } + + shouldUpdate := false + // Check to see if we should update, we don't really care about ordering + if !strutil.EquivalentSlices(account.Contact, contact) { + shouldUpdate = true + account.Contact = contact + } + + // Check to process account de-activation status was requested. + // 7.3.6. Account Deactivation + if string(AccountStatusDeactivated) == status { + shouldUpdate = true + // TODO: This should cancel any ongoing operations (do not revoke certs), + // perhaps we should delete this account here? + account.Status = AccountStatusDeactivated + account.AccountRevokedDate = time.Now() + } + + if shouldUpdate { + err = b.acmeState.UpdateAccount(acmeCtx.sc, account) + if err != nil { + return nil, fmt.Errorf("failed to update account: %w", err) + } + } + + resp := formatAccountResponse(acmeCtx, account) + return resp, nil +} + +func (b *backend) tidyAcmeAccountByThumbprint(as *acmeState, sc *storageContext, keyThumbprint string, certTidyBuffer, accountTidyBuffer time.Duration) error { + thumbprintEntry, err := sc.Storage.Get(sc.Context, path.Join(acmeThumbprintPrefix, keyThumbprint)) + if err != nil { + return fmt.Errorf("error retrieving thumbprint entry %v, unable to find corresponding account entry: %w", keyThumbprint, err) + } + if thumbprintEntry == nil { + return fmt.Errorf("empty thumbprint entry %v, unable to find corresponding account entry", keyThumbprint) + } + + var thumbprint acmeThumbprint + err = thumbprintEntry.DecodeJSON(&thumbprint) + if err != nil { + return fmt.Errorf("unable to decode thumbprint entry %v to find account entry: %w", keyThumbprint, err) + } + + if len(thumbprint.Kid) == 0 { + return fmt.Errorf("unable to find account entry: empty kid within thumbprint entry: %s", keyThumbprint) + } + + // Now Get the Account: + accountEntry, err := sc.Storage.Get(sc.Context, acmeAccountPrefix+thumbprint.Kid) + if err != nil { + return err + } + if accountEntry == nil { + // We delete the Thumbprint Associated with the Account, and we are done + err = sc.Storage.Delete(sc.Context, path.Join(acmeThumbprintPrefix, keyThumbprint)) + if err != nil { + return err + } + b.tidyStatusIncDeletedAcmeAccountCount() + return nil + } + + var account acmeAccount + err = accountEntry.DecodeJSON(&account) + if err != nil { + return err + } + account.KeyId = thumbprint.Kid + + // Tidy Orders On the Account + orderIds, err := as.ListOrderIds(sc, thumbprint.Kid) + if err != nil { + return err + } + allOrdersTidied := true + maxCertExpiryUpdated := false + for _, orderId := range orderIds { + wasTidied, orderExpiry, err := b.acmeTidyOrder(sc, thumbprint.Kid, getOrderPath(thumbprint.Kid, orderId), certTidyBuffer) + if err != nil { + return err + } + if !wasTidied { + allOrdersTidied = false + } + + if !orderExpiry.IsZero() && account.MaxCertExpiry.Before(orderExpiry) { + account.MaxCertExpiry = orderExpiry + maxCertExpiryUpdated = true + } + } + + now := time.Now() + if allOrdersTidied && + now.After(account.AccountCreatedDate.Add(accountTidyBuffer)) && + now.After(account.MaxCertExpiry.Add(accountTidyBuffer)) { + // Tidy this account + // If it is Revoked or Deactivated: + if (account.Status == AccountStatusRevoked || account.Status == AccountStatusDeactivated) && now.After(account.AccountRevokedDate.Add(accountTidyBuffer)) { + // We Delete the Account Associated with this Thumbprint: + err = sc.Storage.Delete(sc.Context, path.Join(acmeAccountPrefix, thumbprint.Kid)) + if err != nil { + return err + } + + // Now we delete the Thumbprint Associated with the Account: + err = sc.Storage.Delete(sc.Context, path.Join(acmeThumbprintPrefix, keyThumbprint)) + if err != nil { + return err + } + b.tidyStatusIncDeletedAcmeAccountCount() + } else if account.Status == AccountStatusValid { + // Revoke This Account + account.AccountRevokedDate = now + account.Status = AccountStatusRevoked + err := as.UpdateAccount(sc, &account) + if err != nil { + return err + } + b.tidyStatusIncRevAcmeAccountCount() + } + } + + // Only update the account if we modified the max cert expiry values and the account is still valid, + // to prevent us from adding back a deleted account or not re-writing the revoked account that was + // already written above. + if maxCertExpiryUpdated && account.Status == AccountStatusValid { + // Update our expiry time we previously setup. + err := as.UpdateAccount(sc, &account) + if err != nil { + return err + } + } + + return nil +} diff --git a/builtin/logical/pki/path_acme_authorizations.go b/builtin/logical/pki/path_acme_authorizations.go new file mode 100644 index 0000000..9914491 --- /dev/null +++ b/builtin/logical/pki/path_acme_authorizations.go @@ -0,0 +1,100 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "fmt" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathAcmeAuthorization(b *backend) []*framework.Path { + return buildAcmeFrameworkPaths(b, patternAcmeAuthorization, "/authorization/"+framework.MatchAllRegex("auth_id")) +} + +func addFieldsForACMEAuthorization(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { + fields["auth_id"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: "ACME authorization identifier value", + Required: true, + } + + return fields +} + +func patternAcmeAuthorization(b *backend, pattern string) *framework.Path { + fields := map[string]*framework.FieldSchema{} + addFieldsForACMEPath(fields, pattern) + addFieldsForACMERequest(fields) + addFieldsForACMEAuthorization(fields) + + return &framework.Path{ + Pattern: pattern, + Fields: fields, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.acmeAccountRequiredWrapper(b.acmeAuthorizationHandler), + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + }, + }, + + HelpSynopsis: pathAcmeHelpSync, + HelpDescription: pathAcmeHelpDesc, + } +} + +func (b *backend) acmeAuthorizationHandler(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData, userCtx *jwsCtx, data map[string]interface{}, _ *acmeAccount) (*logical.Response, error) { + authId := fields.Get("auth_id").(string) + authz, err := b.acmeState.LoadAuthorization(acmeCtx, userCtx, authId) + if err != nil { + return nil, fmt.Errorf("failed to load authorization: %w", err) + } + + var status string + rawStatus, haveStatus := data["status"] + if haveStatus { + var ok bool + status, ok = rawStatus.(string) + if !ok { + return nil, fmt.Errorf("bad type (%T) for value 'status': %w", rawStatus, ErrMalformed) + } + } + + if len(data) == 0 { + return b.acmeAuthorizationFetchHandler(acmeCtx, r, fields, userCtx, data, authz) + } + + if haveStatus && status == "deactivated" { + return b.acmeAuthorizationDeactivateHandler(acmeCtx, r, fields, userCtx, data, authz) + } + + return nil, ErrMalformed +} + +func (b *backend) acmeAuthorizationFetchHandler(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData, userCtx *jwsCtx, data map[string]interface{}, authz *ACMEAuthorization) (*logical.Response, error) { + return &logical.Response{ + Data: authz.NetworkMarshal(acmeCtx), + }, nil +} + +func (b *backend) acmeAuthorizationDeactivateHandler(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData, userCtx *jwsCtx, data map[string]interface{}, authz *ACMEAuthorization) (*logical.Response, error) { + if authz.Status != ACMEAuthorizationPending && authz.Status != ACMEAuthorizationValid { + return nil, fmt.Errorf("unable to deactivate authorization in '%v' status: %w", authz.Status, ErrMalformed) + } + + authz.Status = ACMEAuthorizationDeactivated + for _, challenge := range authz.Challenges { + challenge.Status = ACMEChallengeInvalid + } + + if err := b.acmeState.SaveAuthorization(acmeCtx, authz); err != nil { + return nil, fmt.Errorf("error saving deactivated authorization: %w", err) + } + + return &logical.Response{ + Data: authz.NetworkMarshal(acmeCtx), + }, nil +} diff --git a/builtin/logical/pki/path_acme_challenges.go b/builtin/logical/pki/path_acme_challenges.go new file mode 100644 index 0000000..cded9d3 --- /dev/null +++ b/builtin/logical/pki/path_acme_challenges.go @@ -0,0 +1,114 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "fmt" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathAcmeChallenge(b *backend) []*framework.Path { + return buildAcmeFrameworkPaths(b, patternAcmeChallenge, + "/challenge/"+framework.MatchAllRegex("auth_id")+"/"+framework.MatchAllRegex("challenge_type")) +} + +func addFieldsForACMEChallenge(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { + fields["auth_id"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: "ACME authorization identifier value", + Required: true, + } + + fields["challenge_type"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: "ACME challenge type", + Required: true, + } + + return fields +} + +func patternAcmeChallenge(b *backend, pattern string) *framework.Path { + fields := map[string]*framework.FieldSchema{} + addFieldsForACMEPath(fields, pattern) + addFieldsForACMERequest(fields) + addFieldsForACMEChallenge(fields) + + return &framework.Path{ + Pattern: pattern, + Fields: fields, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.acmeAccountRequiredWrapper(b.acmeChallengeHandler), + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + }, + }, + + HelpSynopsis: pathAcmeHelpSync, + HelpDescription: pathAcmeHelpDesc, + } +} + +func (b *backend) acmeChallengeHandler(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData, userCtx *jwsCtx, data map[string]interface{}, _ *acmeAccount) (*logical.Response, error) { + authId := fields.Get("auth_id").(string) + challengeType := fields.Get("challenge_type").(string) + + authz, err := b.acmeState.LoadAuthorization(acmeCtx, userCtx, authId) + if err != nil { + return nil, fmt.Errorf("failed to load authorization: %w", err) + } + + return b.acmeChallengeFetchHandler(acmeCtx, r, fields, userCtx, data, authz, challengeType) +} + +func (b *backend) acmeChallengeFetchHandler(acmeCtx *acmeContext, r *logical.Request, fields *framework.FieldData, userCtx *jwsCtx, data map[string]interface{}, authz *ACMEAuthorization, challengeType string) (*logical.Response, error) { + var challenge *ACMEChallenge + for _, c := range authz.Challenges { + if string(c.Type) == challengeType { + challenge = c + break + } + } + + if challenge == nil { + return nil, fmt.Errorf("unknown challenge of type '%v' in authorization: %w", challengeType, ErrMalformed) + } + + // Per RFC 8555 Section 7.5.1. Responding to Challenges: + // + // > The client indicates to the server that it is ready for the challenge + // > validation by sending an empty JSON body ("{}") carried in a POST + // > request to the challenge URL (not the authorization URL). + if len(data) > 0 { + return nil, fmt.Errorf("unexpected request parameters: %w", ErrMalformed) + } + + // If data was nil, we got a POST-as-GET request, just return current challenge without an accept, + // otherwise we most likely got a "{}" payload which we should now accept the challenge. + if data != nil { + thumbprint, err := userCtx.GetKeyThumbprint() + if err != nil { + return nil, fmt.Errorf("failed to get thumbprint for key: %w", err) + } + + if err := b.acmeState.validator.AcceptChallenge(acmeCtx.sc, userCtx.Kid, authz, challenge, thumbprint); err != nil { + return nil, fmt.Errorf("error submitting challenge for validation: %w", err) + } + } + + return &logical.Response{ + Data: challenge.NetworkMarshal(acmeCtx, authz.Id), + + // Per RFC 8555 Section 7.1. Resources: + // + // > The "up" link relation is used with challenge resources to indicate + // > the authorization resource to which a challenge belongs. + Headers: map[string][]string{ + "Link": {fmt.Sprintf("<%s>;rel=\"up\"", buildAuthorizationUrl(acmeCtx, authz.Id))}, + }, + }, nil +} diff --git a/builtin/logical/pki/path_acme_directory.go b/builtin/logical/pki/path_acme_directory.go new file mode 100644 index 0000000..e556b35 --- /dev/null +++ b/builtin/logical/pki/path_acme_directory.go @@ -0,0 +1,70 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "encoding/json" + "fmt" + "net/http" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + pathAcmeHelpSync = `An endpoint implementing the standard ACME protocol` + pathAcmeHelpDesc = `This API endpoint implementing a subset of the ACME protocol + defined in RFC 8555, with its own authentication and argument syntax that + does not follow conventional Vault operations. An ACME client tool or library + should be used to interact with these endpoints.` +) + +func pathAcmeDirectory(b *backend) []*framework.Path { + return buildAcmeFrameworkPaths(b, patternAcmeDirectory, "/directory") +} + +func patternAcmeDirectory(b *backend, pattern string) *framework.Path { + fields := map[string]*framework.FieldSchema{} + addFieldsForACMEPath(fields, pattern) + + return &framework.Path{ + Pattern: pattern, + Fields: fields, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.acmeWrapper(b.acmeDirectoryHandler), + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + }, + }, + + HelpSynopsis: pathAcmeHelpSync, + HelpDescription: pathAcmeHelpDesc, + } +} + +func (b *backend) acmeDirectoryHandler(acmeCtx *acmeContext, r *logical.Request, _ *framework.FieldData) (*logical.Response, error) { + rawBody, err := json.Marshal(map[string]interface{}{ + "newNonce": acmeCtx.baseUrl.JoinPath("new-nonce").String(), + "newAccount": acmeCtx.baseUrl.JoinPath("new-account").String(), + "newOrder": acmeCtx.baseUrl.JoinPath("new-order").String(), + "revokeCert": acmeCtx.baseUrl.JoinPath("revoke-cert").String(), + "keyChange": acmeCtx.baseUrl.JoinPath("key-change").String(), + // This is purposefully missing newAuthz as we don't support pre-authorization + "meta": map[string]interface{}{ + "externalAccountRequired": acmeCtx.eabPolicy.IsExternalAccountRequired(), + }, + }) + if err != nil { + return nil, fmt.Errorf("failed encoding response: %w", err) + } + + return &logical.Response{ + Data: map[string]interface{}{ + logical.HTTPContentType: "application/json", + logical.HTTPStatusCode: http.StatusOK, + logical.HTTPRawBody: rawBody, + }, + }, nil +} diff --git a/builtin/logical/pki/path_acme_eab.go b/builtin/logical/pki/path_acme_eab.go new file mode 100644 index 0000000..b50077c --- /dev/null +++ b/builtin/logical/pki/path_acme_eab.go @@ -0,0 +1,294 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "context" + "crypto/rand" + "encoding/base64" + "fmt" + "net/http" + "path" + "strings" + "time" + + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +var decodedTokenPrefix = mustBase64Decode("vault-eab-0-") + +func mustBase64Decode(s string) []byte { + bytes, err := base64.RawURLEncoding.DecodeString(s) + if err != nil { + panic(fmt.Sprintf("Token prefix value: %s failed decoding: %v", s, err)) + } + + // Should be dividable by 3 otherwise our prefix will not be properly honored. + if len(bytes)%3 != 0 { + panic(fmt.Sprintf("Token prefix value: %s is not dividable by 3, will not prefix properly", s)) + } + return bytes +} + +/* + * This file unlike the other path_acme_xxx.go are VAULT APIs to manage the + * ACME External Account Bindings, this isn't providing any APIs that an ACME + * client would use. + */ +func pathAcmeEabList(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "eab/?$", + Fields: map[string]*framework.FieldSchema{}, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ListOperation: &framework.PathOperation{ + Callback: b.pathAcmeListEab, + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "list-eab-keys", + Description: "List all eab key identifiers yet to be used.", + }, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "keys": { + Type: framework.TypeStringSlice, + Description: `A list of unused eab keys`, + Required: true, + }, + "key_info": { + Type: framework.TypeMap, + Description: `EAB details keyed by the eab key id`, + Required: false, + }, + }, + }}, + }, + }, + }, + + HelpSynopsis: "list external account bindings to be used for ACME", + HelpDescription: `list identifiers that have been generated but yet to be used.`, + } +} + +func pathAcmeNewEab(b *backend) []*framework.Path { + return buildAcmeFrameworkPaths(b, patternAcmeNewEab, "/new-eab") +} + +func patternAcmeNewEab(b *backend, pattern string) *framework.Path { + fields := map[string]*framework.FieldSchema{} + addFieldsForACMEPath(fields, pattern) + + opSuffix := getAcmeOperationSuffix(pattern) + + return &framework.Path{ + Pattern: pattern, + Fields: fields, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathAcmeCreateEab, + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "generate-eab-key", + OperationSuffix: opSuffix, + Description: "Generate an ACME EAB token for a directory", + }, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "id": { + Type: framework.TypeString, + Description: `The EAB key identifier`, + Required: true, + }, + "key_type": { + Type: framework.TypeString, + Description: `The EAB key type`, + Required: true, + }, + "key": { + Type: framework.TypeString, + Description: `The EAB hmac key`, + Required: true, + }, + "acme_directory": { + Type: framework.TypeString, + Description: `The ACME directory to which the key belongs`, + Required: true, + }, + "created_on": { + Type: framework.TypeTime, + Description: `An RFC3339 formatted date time when the EAB token was created`, + Required: true, + }, + }, + }}, + }, + }, + }, + + HelpSynopsis: "Generate external account bindings to be used for ACME", + HelpDescription: `Generate single use id/key pairs to be used for ACME EAB.`, + } +} + +func pathAcmeEabDelete(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "eab/" + uuidNameRegex("key_id"), + + Fields: map[string]*framework.FieldSchema{ + "key_id": { + Type: framework.TypeString, + Description: "EAB key identifier", + Required: true, + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathAcmeDeleteEab, + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "delete-eab-key", + Description: "Delete an unused EAB token", + }, + }, + }, + + HelpSynopsis: "Delete an external account binding id prior to its use within an ACME account", + HelpDescription: `Allows an operator to delete an external account binding, +before its bound to a new ACME account. If the identifier provided does not exist or +was already consumed by an ACME account a successful response is returned along with +a warning that it did not exist.`, + } +} + +type eabType struct { + KeyID string `json:"-"` + KeyType string `json:"key-type"` + PrivateBytes []byte `json:"private-bytes"` + AcmeDirectory string `json:"acme-directory"` + CreatedOn time.Time `json:"created-on"` +} + +func (b *backend) pathAcmeListEab(ctx context.Context, r *logical.Request, _ *framework.FieldData) (*logical.Response, error) { + sc := b.makeStorageContext(ctx, r.Storage) + + eabIds, err := b.acmeState.ListEabIds(sc) + if err != nil { + return nil, err + } + + var warnings []string + var keyIds []string + keyInfos := map[string]interface{}{} + + for _, eabKey := range eabIds { + eab, err := b.acmeState.LoadEab(sc, eabKey) + if err != nil { + warnings = append(warnings, fmt.Sprintf("failed loading eab entry %s: %v", eabKey, err)) + continue + } + + keyIds = append(keyIds, eab.KeyID) + keyInfos[eab.KeyID] = map[string]interface{}{ + "key_type": eab.KeyType, + "acme_directory": path.Join(eab.AcmeDirectory, "directory"), + "created_on": eab.CreatedOn.Format(time.RFC3339), + } + } + + resp := logical.ListResponseWithInfo(keyIds, keyInfos) + for _, warning := range warnings { + resp.AddWarning(warning) + } + return resp, nil +} + +func (b *backend) pathAcmeCreateEab(ctx context.Context, r *logical.Request, data *framework.FieldData) (*logical.Response, error) { + kid := genUuid() + size := 32 + bytes, err := uuid.GenerateRandomBytesWithReader(size, rand.Reader) + if err != nil { + return nil, fmt.Errorf("failed generating eab key: %w", err) + } + + acmeDirectory, err := getAcmeDirectory(r) + if err != nil { + return nil, err + } + + eab := &eabType{ + KeyID: kid, + KeyType: "hs", + PrivateBytes: append(decodedTokenPrefix, bytes...), // we do this to avoid generating tokens that start with - + AcmeDirectory: acmeDirectory, + CreatedOn: time.Now(), + } + + sc := b.makeStorageContext(ctx, r.Storage) + err = b.acmeState.SaveEab(sc, eab) + if err != nil { + return nil, fmt.Errorf("failed saving generated eab: %w", err) + } + + encodedKey := base64.RawURLEncoding.EncodeToString(eab.PrivateBytes) + + return &logical.Response{ + Data: map[string]interface{}{ + "id": eab.KeyID, + "key_type": eab.KeyType, + "key": encodedKey, + "acme_directory": path.Join(eab.AcmeDirectory, "directory"), + "created_on": eab.CreatedOn.Format(time.RFC3339), + }, + }, nil +} + +func (b *backend) pathAcmeDeleteEab(ctx context.Context, r *logical.Request, d *framework.FieldData) (*logical.Response, error) { + sc := b.makeStorageContext(ctx, r.Storage) + keyId := d.Get("key_id").(string) + + _, err := uuid.ParseUUID(keyId) + if err != nil { + return nil, fmt.Errorf("badly formatted key_id field") + } + + deleted, err := b.acmeState.DeleteEab(sc, keyId) + if err != nil { + return nil, fmt.Errorf("failed deleting key id: %w", err) + } + + resp := &logical.Response{} + if !deleted { + resp.AddWarning("No key id found with id: " + keyId) + } + return resp, nil +} + +// getAcmeOperationSuffix used mainly to compute the OpenAPI spec suffix value to distinguish +// different versions of ACME Vault APIs based on directory paths +func getAcmeOperationSuffix(pattern string) string { + hasRole := strings.Contains(pattern, framework.GenericNameRegex("role")) + hasIssuer := strings.Contains(pattern, framework.GenericNameRegex(issuerRefParam)) + + switch { + case hasRole && hasIssuer: + return "for-issuer-and-role" + case hasRole: + return "for-role" + case hasIssuer: + return "for-issuer" + default: + return "" + } +} diff --git a/builtin/logical/pki/path_acme_nonce.go b/builtin/logical/pki/path_acme_nonce.go new file mode 100644 index 0000000..e973039 --- /dev/null +++ b/builtin/logical/pki/path_acme_nonce.go @@ -0,0 +1,73 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "fmt" + "net/http" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathAcmeNonce(b *backend) []*framework.Path { + return buildAcmeFrameworkPaths(b, patternAcmeNonce, "/new-nonce") +} + +func patternAcmeNonce(b *backend, pattern string) *framework.Path { + fields := map[string]*framework.FieldSchema{} + addFieldsForACMEPath(fields, pattern) + + return &framework.Path{ + Pattern: pattern, + Fields: fields, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.HeaderOperation: &framework.PathOperation{ + Callback: b.acmeWrapper(b.acmeNonceHandler), + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.acmeWrapper(b.acmeNonceHandler), + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + }, + }, + + HelpSynopsis: pathAcmeHelpSync, + HelpDescription: pathAcmeHelpDesc, + } +} + +func (b *backend) acmeNonceHandler(ctx *acmeContext, r *logical.Request, _ *framework.FieldData) (*logical.Response, error) { + nonce, _, err := b.acmeState.GetNonce() + if err != nil { + return nil, err + } + + // Header operations return 200, GET return 204. + httpStatus := http.StatusOK + if r.Operation == logical.ReadOperation { + httpStatus = http.StatusNoContent + } + + return &logical.Response{ + Headers: map[string][]string{ + "Cache-Control": {"no-store"}, + "Replay-Nonce": {nonce}, + "Link": genAcmeLinkHeader(ctx), + }, + Data: map[string]interface{}{ + logical.HTTPStatusCode: httpStatus, + // Get around Vault limitation of requiring a body set if the status is not http.StatusNoContent + // for our HEAD request responses. + logical.HTTPContentType: "", + }, + }, nil +} + +func genAcmeLinkHeader(ctx *acmeContext) []string { + path := fmt.Sprintf("<%s>;rel=\"index\"", ctx.baseUrl.JoinPath("directory").String()) + return []string{path} +} diff --git a/builtin/logical/pki/path_acme_order.go b/builtin/logical/pki/path_acme_order.go new file mode 100644 index 0000000..b4a6460 --- /dev/null +++ b/builtin/logical/pki/path_acme_order.go @@ -0,0 +1,1092 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "crypto/x509" + "encoding/base64" + "encoding/pem" + "fmt" + "net" + "net/http" + "sort" + "strings" + "time" + + "github.com/hashicorp/vault/sdk/helper/strutil" + + "github.com/hashicorp/vault/sdk/helper/certutil" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" + "golang.org/x/net/idna" +) + +var maxAcmeCertTTL = 90 * (24 * time.Hour) + +func pathAcmeListOrders(b *backend) []*framework.Path { + return buildAcmeFrameworkPaths(b, patternAcmeListOrders, "/orders") +} + +func pathAcmeGetOrder(b *backend) []*framework.Path { + return buildAcmeFrameworkPaths(b, patternAcmeGetOrder, "/order/"+uuidNameRegex("order_id")) +} + +func pathAcmeNewOrder(b *backend) []*framework.Path { + return buildAcmeFrameworkPaths(b, patternAcmeNewOrder, "/new-order") +} + +func pathAcmeFinalizeOrder(b *backend) []*framework.Path { + return buildAcmeFrameworkPaths(b, patternAcmeFinalizeOrder, "/order/"+uuidNameRegex("order_id")+"/finalize") +} + +func pathAcmeFetchOrderCert(b *backend) []*framework.Path { + return buildAcmeFrameworkPaths(b, patternAcmeFetchOrderCert, "/order/"+uuidNameRegex("order_id")+"/cert") +} + +func patternAcmeNewOrder(b *backend, pattern string) *framework.Path { + fields := map[string]*framework.FieldSchema{} + addFieldsForACMEPath(fields, pattern) + addFieldsForACMERequest(fields) + + return &framework.Path{ + Pattern: pattern, + Fields: fields, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.acmeAccountRequiredWrapper(b.acmeNewOrderHandler), + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + }, + }, + + HelpSynopsis: pathAcmeHelpSync, + HelpDescription: pathAcmeHelpDesc, + } +} + +func patternAcmeListOrders(b *backend, pattern string) *framework.Path { + fields := map[string]*framework.FieldSchema{} + addFieldsForACMEPath(fields, pattern) + addFieldsForACMERequest(fields) + + return &framework.Path{ + Pattern: pattern, + Fields: fields, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.acmeAccountRequiredWrapper(b.acmeListOrdersHandler), + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + }, + }, + + HelpSynopsis: pathAcmeHelpSync, + HelpDescription: pathAcmeHelpDesc, + } +} + +func patternAcmeGetOrder(b *backend, pattern string) *framework.Path { + fields := map[string]*framework.FieldSchema{} + addFieldsForACMEPath(fields, pattern) + addFieldsForACMERequest(fields) + addFieldsForACMEOrder(fields) + + return &framework.Path{ + Pattern: pattern, + Fields: fields, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.acmeAccountRequiredWrapper(b.acmeGetOrderHandler), + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + }, + }, + + HelpSynopsis: pathAcmeHelpSync, + HelpDescription: pathAcmeHelpDesc, + } +} + +func patternAcmeFinalizeOrder(b *backend, pattern string) *framework.Path { + fields := map[string]*framework.FieldSchema{} + addFieldsForACMEPath(fields, pattern) + addFieldsForACMERequest(fields) + addFieldsForACMEOrder(fields) + + return &framework.Path{ + Pattern: pattern, + Fields: fields, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.acmeAccountRequiredWrapper(b.acmeFinalizeOrderHandler), + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + }, + }, + + HelpSynopsis: pathAcmeHelpSync, + HelpDescription: pathAcmeHelpDesc, + } +} + +func patternAcmeFetchOrderCert(b *backend, pattern string) *framework.Path { + fields := map[string]*framework.FieldSchema{} + addFieldsForACMEPath(fields, pattern) + addFieldsForACMERequest(fields) + addFieldsForACMEOrder(fields) + + return &framework.Path{ + Pattern: pattern, + Fields: fields, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.acmeAccountRequiredWrapper(b.acmeFetchCertOrderHandler), + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + }, + }, + + HelpSynopsis: pathAcmeHelpSync, + HelpDescription: pathAcmeHelpDesc, + } +} + +func addFieldsForACMEOrder(fields map[string]*framework.FieldSchema) { + fields["order_id"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: `The ACME order identifier to fetch`, + Required: true, + } +} + +func (b *backend) acmeFetchCertOrderHandler(ac *acmeContext, _ *logical.Request, fields *framework.FieldData, uc *jwsCtx, data map[string]interface{}, _ *acmeAccount) (*logical.Response, error) { + orderId := fields.Get("order_id").(string) + + order, err := b.acmeState.LoadOrder(ac, uc, orderId) + if err != nil { + return nil, err + } + + if order.Status != ACMEOrderValid { + return nil, fmt.Errorf("%w: order is status %s, needs to be in valid state", ErrOrderNotReady, order.Status) + } + + if len(order.IssuerId) == 0 || len(order.CertificateSerialNumber) == 0 { + return nil, fmt.Errorf("order is missing required fields to load certificate") + } + + certEntry, err := fetchCertBySerial(ac.sc, "certs/", order.CertificateSerialNumber) + if err != nil { + return nil, fmt.Errorf("failed reading certificate %s from storage: %w", order.CertificateSerialNumber, err) + } + if certEntry == nil || len(certEntry.Value) == 0 { + return nil, fmt.Errorf("missing certificate %s from storage", order.CertificateSerialNumber) + } + + cert, err := x509.ParseCertificate(certEntry.Value) + if err != nil { + return nil, fmt.Errorf("failed parsing certificate %s: %w", order.CertificateSerialNumber, err) + } + + issuer, err := ac.sc.fetchIssuerById(order.IssuerId) + if err != nil { + return nil, fmt.Errorf("failed loading certificate issuer %s from storage: %w", order.IssuerId, err) + } + + allPems, err := func() ([]byte, error) { + leafPEM := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: cert.Raw, + }) + + chains := []byte(issuer.Certificate) + for _, chainVal := range issuer.CAChain { + if chainVal == issuer.Certificate { + continue + } + chains = append(chains, []byte(chainVal)...) + } + + return append(leafPEM, chains...), nil + }() + if err != nil { + return nil, fmt.Errorf("failed encoding certificate ca chain: %w", err) + } + + return &logical.Response{ + Data: map[string]interface{}{ + logical.HTTPContentType: "application/pem-certificate-chain", + logical.HTTPStatusCode: http.StatusOK, + logical.HTTPRawBody: allPems, + }, + }, nil +} + +func (b *backend) acmeFinalizeOrderHandler(ac *acmeContext, _ *logical.Request, fields *framework.FieldData, uc *jwsCtx, data map[string]interface{}, account *acmeAccount) (*logical.Response, error) { + orderId := fields.Get("order_id").(string) + + csr, err := parseCsrFromFinalize(data) + if err != nil { + return nil, err + } + + order, err := b.acmeState.LoadOrder(ac, uc, orderId) + if err != nil { + return nil, err + } + + order.Status, err = computeOrderStatus(ac, uc, order) + if err != nil { + return nil, err + } + + if order.Status != ACMEOrderReady { + return nil, fmt.Errorf("%w: order is status %s, needs to be in ready state", ErrOrderNotReady, order.Status) + } + + now := time.Now() + if !order.Expires.IsZero() && now.After(order.Expires) { + return nil, fmt.Errorf("%w: order %s is expired", ErrMalformed, orderId) + } + + if err = validateCsrMatchesOrder(csr, order); err != nil { + return nil, err + } + + if err = validateCsrNotUsingAccountKey(csr, uc); err != nil { + return nil, err + } + + signedCertBundle, issuerId, err := issueCertFromCsr(ac, csr) + if err != nil { + return nil, err + } + + hyphenSerialNumber := normalizeSerialFromBigInt(signedCertBundle.Certificate.SerialNumber) + err = storeCertificate(ac.sc, signedCertBundle) + if err != nil { + return nil, err + } + + if err := b.acmeState.TrackIssuedCert(ac, order.AccountId, hyphenSerialNumber, order.OrderId); err != nil { + b.Logger().Warn("orphaned generated ACME certificate due to error saving account->cert->order reference", "serial_number", hyphenSerialNumber, "error", err) + return nil, err + } + + order.Status = ACMEOrderValid + order.CertificateSerialNumber = hyphenSerialNumber + order.CertificateExpiry = signedCertBundle.Certificate.NotAfter + order.IssuerId = issuerId + + err = b.acmeState.SaveOrder(ac, order) + if err != nil { + b.Logger().Warn("orphaned generated ACME certificate due to error saving order", "serial_number", hyphenSerialNumber, "error", err) + return nil, fmt.Errorf("failed saving updated order: %w", err) + } + + if err := b.doTrackBilling(ac.sc.Context, order.Identifiers); err != nil { + b.Logger().Error("failed to track billing for order", "order", orderId, "error", err) + err = nil + } + + return formatOrderResponse(ac, order), nil +} + +func computeOrderStatus(ac *acmeContext, uc *jwsCtx, order *acmeOrder) (ACMEOrderStatusType, error) { + // If we reached a final stage, no use computing anything else + if order.Status == ACMEOrderInvalid || order.Status == ACMEOrderValid { + return order.Status, nil + } + + // We aren't in a final state yet, check for expiry + if time.Now().After(order.Expires) { + return ACMEOrderInvalid, nil + } + + // Intermediary steps passed authorizations should short circuit us as well + if order.Status == ACMEOrderReady || order.Status == ACMEOrderProcessing { + return order.Status, nil + } + + // If we have no authorizations attached to the order, nothing to compute either + if len(order.AuthorizationIds) == 0 { + return ACMEOrderPending, nil + } + + anyFailed := false + allPassed := true + for _, authId := range order.AuthorizationIds { + authorization, err := ac.getAcmeState().LoadAuthorization(ac, uc, authId) + if err != nil { + return order.Status, fmt.Errorf("failed loading authorization: %s: %w", authId, err) + } + + if authorization.Status == ACMEAuthorizationPending { + allPassed = false + continue + } + + if authorization.Status != ACMEAuthorizationValid { + // Per RFC 8555 - 7.1.6. Status Changes + // The order also moves to the "invalid" state if it expires or + // one of its authorizations enters a final state other than + // "valid" ("expired", "revoked", or "deactivated"). + allPassed = false + anyFailed = true + break + } + } + + if anyFailed { + return ACMEOrderInvalid, nil + } + + if allPassed { + return ACMEOrderReady, nil + } + + // The order has not expired, no authorizations have yet to be marked as failed + // nor have we passed them all. + return ACMEOrderPending, nil +} + +func validateCsrNotUsingAccountKey(csr *x509.CertificateRequest, uc *jwsCtx) error { + csrKey := csr.PublicKey + userKey := uc.Key.Public().Key + + sameKey, err := certutil.ComparePublicKeysAndType(csrKey, userKey) + if err != nil { + return err + } + + if sameKey { + return fmt.Errorf("%w: certificate public key must not match account key", ErrBadCSR) + } + + return nil +} + +func validateCsrMatchesOrder(csr *x509.CertificateRequest, order *acmeOrder) error { + csrDNSIdentifiers, csrIPIdentifiers := getIdentifiersFromCSR(csr) + orderDNSIdentifiers := strutil.RemoveDuplicates(order.getIdentifierDNSValues(), true) + orderIPIdentifiers := removeDuplicatesAndSortIps(order.getIdentifierIPValues()) + + if len(orderDNSIdentifiers) == 0 && len(orderIPIdentifiers) == 0 { + return fmt.Errorf("%w: order did not include any identifiers", ErrServerInternal) + } + + if len(orderDNSIdentifiers) != len(csrDNSIdentifiers) { + return fmt.Errorf("%w: Order (%v) and CSR (%v) mismatch on number of DNS identifiers", ErrBadCSR, len(orderDNSIdentifiers), len(csrDNSIdentifiers)) + } + + if len(orderIPIdentifiers) != len(csrIPIdentifiers) { + return fmt.Errorf("%w: Order (%v) and CSR (%v) mismatch on number of IP identifiers", ErrBadCSR, len(orderIPIdentifiers), len(csrIPIdentifiers)) + } + + for i, identifier := range orderDNSIdentifiers { + if identifier != csrDNSIdentifiers[i] { + return fmt.Errorf("%w: CSR is missing order DNS identifier %s", ErrBadCSR, identifier) + } + } + + for i, identifier := range orderIPIdentifiers { + if !identifier.Equal(csrIPIdentifiers[i]) { + return fmt.Errorf("%w: CSR is missing order IP identifier %s", ErrBadCSR, identifier.String()) + } + } + + // Since we do not support NotBefore/NotAfter dates at this time no need to validate CSR/Order match. + + return nil +} + +func (b *backend) validateIdentifiersAgainstRole(role *roleEntry, identifiers []*ACMEIdentifier) error { + for _, identifier := range identifiers { + switch identifier.Type { + case ACMEDNSIdentifier: + data := &inputBundle{ + role: role, + req: &logical.Request{}, + apiData: &framework.FieldData{}, + } + + if validateNames(b, data, []string{identifier.OriginalValue}) != "" { + return fmt.Errorf("%w: role (%s) will not issue certificate for name %v", + ErrRejectedIdentifier, role.Name, identifier.OriginalValue) + } + case ACMEIPIdentifier: + if !role.AllowIPSANs { + return fmt.Errorf("%w: role (%s) does not allow IP sans, so cannot issue certificate for %v", + ErrRejectedIdentifier, role.Name, identifier.OriginalValue) + } + default: + return fmt.Errorf("unknown type of identifier: %v for %v", identifier.Type, identifier.OriginalValue) + } + } + + return nil +} + +func getIdentifiersFromCSR(csr *x509.CertificateRequest) ([]string, []net.IP) { + dnsIdentifiers := append([]string(nil), csr.DNSNames...) + ipIdentifiers := append([]net.IP(nil), csr.IPAddresses...) + + if csr.Subject.CommonName != "" { + ip := net.ParseIP(csr.Subject.CommonName) + if ip != nil { + ipIdentifiers = append(ipIdentifiers, ip) + } else { + dnsIdentifiers = append(dnsIdentifiers, csr.Subject.CommonName) + } + } + + return strutil.RemoveDuplicates(dnsIdentifiers, true), removeDuplicatesAndSortIps(ipIdentifiers) +} + +func removeDuplicatesAndSortIps(ipIdentifiers []net.IP) []net.IP { + var uniqueIpIdentifiers []net.IP + for _, ip := range ipIdentifiers { + found := false + for _, curIp := range uniqueIpIdentifiers { + if curIp.Equal(ip) { + found = true + } + } + + if !found { + uniqueIpIdentifiers = append(uniqueIpIdentifiers, ip) + } + } + + sort.Slice(uniqueIpIdentifiers, func(i, j int) bool { + return uniqueIpIdentifiers[i].String() < uniqueIpIdentifiers[j].String() + }) + return uniqueIpIdentifiers +} + +func storeCertificate(sc *storageContext, signedCertBundle *certutil.ParsedCertBundle) error { + hyphenSerialNumber := normalizeSerialFromBigInt(signedCertBundle.Certificate.SerialNumber) + key := "certs/" + hyphenSerialNumber + certsCounted := sc.Backend.certsCounted.Load() + err := sc.Storage.Put(sc.Context, &logical.StorageEntry{ + Key: key, + Value: signedCertBundle.CertificateBytes, + }) + if err != nil { + return fmt.Errorf("unable to store certificate locally: %w", err) + } + sc.Backend.ifCountEnabledIncrementTotalCertificatesCount(certsCounted, key) + return nil +} + +func maybeAugmentReqDataWithSuitableCN(ac *acmeContext, csr *x509.CertificateRequest, data *framework.FieldData) { + // Role doesn't require a CN, so we don't care. + if !ac.role.RequireCN { + return + } + + // CSR contains a CN, so use that one. + if csr.Subject.CommonName != "" { + return + } + + // Choose a CN in the order wildcard -> DNS -> IP -> fail. + for _, name := range csr.DNSNames { + if strings.Contains(name, "*") { + data.Raw["common_name"] = name + return + } + } + if len(csr.DNSNames) > 0 { + data.Raw["common_name"] = csr.DNSNames[0] + return + } + if len(csr.IPAddresses) > 0 { + data.Raw["common_name"] = csr.IPAddresses[0].String() + return + } +} + +func issueCertFromCsr(ac *acmeContext, csr *x509.CertificateRequest) (*certutil.ParsedCertBundle, issuerID, error) { + pemBlock := &pem.Block{ + Type: "CERTIFICATE REQUEST", + Headers: nil, + Bytes: csr.Raw, + } + pemCsr := string(pem.EncodeToMemory(pemBlock)) + + data := &framework.FieldData{ + Raw: map[string]interface{}{ + "csr": pemCsr, + }, + Schema: getCsrSignVerbatimSchemaFields(), + } + + // XXX: Usability hack: by default, minimalist roles have require_cn=true, + // but some ACME clients do not provision one in the certificate as modern + // (TLS) clients are mostly verifying against server's DNS SANs. + maybeAugmentReqDataWithSuitableCN(ac, csr, data) + + signingBundle, issuerId, err := ac.sc.fetchCAInfoWithIssuer(ac.issuer.ID.String(), IssuanceUsage) + if err != nil { + return nil, "", fmt.Errorf("failed loading CA %s: %w", ac.issuer.ID.String(), err) + } + + // ACME issued cert will override the TTL values to truncate to the issuer's + // expiration if we go beyond, no matter the setting + if signingBundle.LeafNotAfterBehavior == certutil.ErrNotAfterBehavior { + signingBundle.LeafNotAfterBehavior = certutil.TruncateNotAfterBehavior + } + + input := &inputBundle{ + req: &logical.Request{}, + apiData: data, + role: ac.role, + } + + normalNotAfter, _, err := getCertificateNotAfter(ac.sc.Backend, input, signingBundle) + if err != nil { + return nil, "", fmt.Errorf("failed computing certificate TTL from role/mount: %v: %w", err, ErrMalformed) + } + + // Force a maximum 90 day TTL or lower for ACME + if time.Now().Add(maxAcmeCertTTL).Before(normalNotAfter) { + input.apiData.Raw["ttl"] = maxAcmeCertTTL + } + + if csr.PublicKeyAlgorithm == x509.UnknownPublicKeyAlgorithm || csr.PublicKey == nil { + return nil, "", fmt.Errorf("%w: Refusing to sign CSR with empty PublicKey", ErrBadCSR) + } + + // UseCSRValues as defined in certutil/helpers.go accepts the following + // fields off of the CSR: + // + // 1. Subject fields, + // 2. SANs, + // 3. Extensions (except for a BasicConstraint extension) + // + // Because we have stricter validation of subject parameters, and no way + // to validate or allow extensions, we do not wish to use the CSR's + // parameters for these values. If a CSR sets, e.g., an organizational + // unit, we have no way of validating this (via ACME here, without perhaps + // an external policy engine), and thus should not be setting it on our + // final issued certificate. + parsedBundle, _, err := signCert(ac.sc.Backend, input, signingBundle, false /* is_ca=false */, false /* use_csr_values */) + if err != nil { + return nil, "", fmt.Errorf("%w: refusing to sign CSR: %s", ErrBadCSR, err.Error()) + } + + if err = parsedBundle.Verify(); err != nil { + return nil, "", fmt.Errorf("verification of parsed bundle failed: %w", err) + } + + // We only allow ServerAuth key usage from ACME issued certs + // when configuration does not allow usage of ExtKeyusage field. + config, err := ac.sc.Backend.acmeState.getConfigWithUpdate(ac.sc) + if err != nil { + return nil, "", fmt.Errorf("failed to fetch ACME configuration: %w", err) + } + + if !config.AllowRoleExtKeyUsage { + for _, usage := range parsedBundle.Certificate.ExtKeyUsage { + if usage != x509.ExtKeyUsageServerAuth { + return nil, "", fmt.Errorf("%w: ACME certs only allow ServerAuth key usage", ErrBadCSR) + } + } + } + + return parsedBundle, issuerId, err +} + +func parseCsrFromFinalize(data map[string]interface{}) (*x509.CertificateRequest, error) { + csrInterface, present := data["csr"] + if !present { + return nil, fmt.Errorf("%w: missing csr in payload", ErrMalformed) + } + + base64Csr, ok := csrInterface.(string) + if !ok { + return nil, fmt.Errorf("%w: csr in payload not the expected type: %T", ErrMalformed, csrInterface) + } + + derCsr, err := base64.RawURLEncoding.DecodeString(base64Csr) + if err != nil { + return nil, fmt.Errorf("%w: failed base64 decoding csr: %s", ErrMalformed, err.Error()) + } + + csr, err := x509.ParseCertificateRequest(derCsr) + if err != nil { + return nil, fmt.Errorf("%w: failed to parse csr: %s", ErrMalformed, err.Error()) + } + + if csr.PublicKey == nil || csr.PublicKeyAlgorithm == x509.UnknownPublicKeyAlgorithm { + return nil, fmt.Errorf("%w: failed to parse csr no public key info or unknown key algorithm used", ErrBadCSR) + } + + for _, ext := range csr.Extensions { + if ext.Id.Equal(certutil.ExtensionBasicConstraintsOID) { + isCa, _, err := certutil.ParseBasicConstraintExtension(ext) + if err != nil { + return nil, fmt.Errorf("%w: refusing to accept CSR with Basic Constraints extension: %v", ErrBadCSR, err.Error()) + } + + if isCa { + return nil, fmt.Errorf("%w: refusing to accept CSR with Basic Constraints extension with CA set to true", ErrBadCSR) + } + } + } + + return csr, nil +} + +func (b *backend) acmeGetOrderHandler(ac *acmeContext, _ *logical.Request, fields *framework.FieldData, uc *jwsCtx, _ map[string]interface{}, _ *acmeAccount) (*logical.Response, error) { + orderId := fields.Get("order_id").(string) + + order, err := b.acmeState.LoadOrder(ac, uc, orderId) + if err != nil { + return nil, err + } + + order.Status, err = computeOrderStatus(ac, uc, order) + if err != nil { + return nil, err + } + + // Per RFC 8555 -> 7.1.3. Order Objects + // For final orders (in the "valid" or "invalid" state), the authorizations that were completed. + // + // Otherwise, for "pending" orders we will return our list as it was originally saved. + requiresFiltering := order.Status == ACMEOrderValid || order.Status == ACMEOrderInvalid + if requiresFiltering { + filteredAuthorizationIds := []string{} + + for _, authId := range order.AuthorizationIds { + authorization, err := b.acmeState.LoadAuthorization(ac, uc, authId) + if err != nil { + return nil, err + } + + if (order.Status == ACMEOrderInvalid || order.Status == ACMEOrderValid) && + authorization.Status == ACMEAuthorizationValid { + filteredAuthorizationIds = append(filteredAuthorizationIds, authId) + } + } + + order.AuthorizationIds = filteredAuthorizationIds + } + + return formatOrderResponse(ac, order), nil +} + +func (b *backend) acmeListOrdersHandler(ac *acmeContext, _ *logical.Request, _ *framework.FieldData, uc *jwsCtx, _ map[string]interface{}, acct *acmeAccount) (*logical.Response, error) { + orderIds, err := b.acmeState.ListOrderIds(ac.sc, acct.KeyId) + if err != nil { + return nil, err + } + + orderUrls := []string{} + for _, orderId := range orderIds { + order, err := b.acmeState.LoadOrder(ac, uc, orderId) + if err != nil { + return nil, err + } + + if order.Status == ACMEOrderInvalid { + // Per RFC8555 -> 7.1.2.1 - Orders List + // The server SHOULD include pending orders and SHOULD NOT + // include orders that are invalid in the array of URLs. + continue + } + + orderUrls = append(orderUrls, buildOrderUrl(ac, orderId)) + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "orders": orderUrls, + }, + } + + return resp, nil +} + +func (b *backend) acmeNewOrderHandler(ac *acmeContext, _ *logical.Request, _ *framework.FieldData, _ *jwsCtx, data map[string]interface{}, account *acmeAccount) (*logical.Response, error) { + identifiers, err := parseOrderIdentifiers(data) + if err != nil { + return nil, err + } + + notBefore, err := parseOptRFC3339Field(data, "notBefore") + if err != nil { + return nil, err + } + + notAfter, err := parseOptRFC3339Field(data, "notAfter") + if err != nil { + return nil, err + } + + if !notBefore.IsZero() || !notAfter.IsZero() { + return nil, fmt.Errorf("%w: NotBefore and NotAfter are not supported", ErrMalformed) + } + + err = validateAcmeProvidedOrderDates(notBefore, notAfter) + if err != nil { + return nil, err + } + + err = b.validateIdentifiersAgainstRole(ac.role, identifiers) + if err != nil { + return nil, err + } + + // Per RFC 8555 -> 7.1.3. Order Objects + // For pending orders, the authorizations that the client needs to complete before the + // requested certificate can be issued (see Section 7.5), including + // unexpired authorizations that the client has completed in the past + // for identifiers specified in the order. + // + // Since we are generating all authorizations here, there is no need to filter them out + // IF/WHEN we support pre-authz workflows and associate existing authorizations to this + // order they will need filtering. + var authorizations []*ACMEAuthorization + var authorizationIds []string + for _, identifier := range identifiers { + authz, err := generateAuthorization(account, identifier) + if err != nil { + return nil, fmt.Errorf("error generating authorizations: %w", err) + } + authorizations = append(authorizations, authz) + + err = b.acmeState.SaveAuthorization(ac, authz) + if err != nil { + return nil, fmt.Errorf("failed storing authorization: %w", err) + } + + authorizationIds = append(authorizationIds, authz.Id) + } + + order := &acmeOrder{ + OrderId: genUuid(), + AccountId: account.KeyId, + Status: ACMEOrderPending, + Expires: time.Now().Add(24 * time.Hour), // TODO: Readjust this based on authz and/or config + Identifiers: identifiers, + AuthorizationIds: authorizationIds, + } + + err = b.acmeState.SaveOrder(ac, order) + if err != nil { + return nil, fmt.Errorf("failed storing order: %w", err) + } + + resp := formatOrderResponse(ac, order) + + // Per RFC 8555 Section 7.4. Applying for Certificate Issuance: + // + // > If the server is willing to issue the requested certificate, it + // > responds with a 201 (Created) response. + resp.Data[logical.HTTPStatusCode] = http.StatusCreated + return resp, nil +} + +func validateAcmeProvidedOrderDates(notBefore time.Time, notAfter time.Time) error { + if !notBefore.IsZero() && !notAfter.IsZero() { + if notBefore.Equal(notAfter) { + return fmt.Errorf("%w: provided notBefore and notAfter dates can not be equal", ErrMalformed) + } + + if notBefore.After(notAfter) { + return fmt.Errorf("%w: provided notBefore can not be greater than notAfter", ErrMalformed) + } + } + + if !notAfter.IsZero() { + if time.Now().After(notAfter) { + return fmt.Errorf("%w: provided notAfter can not be in the past", ErrMalformed) + } + } + + return nil +} + +func formatOrderResponse(acmeCtx *acmeContext, order *acmeOrder) *logical.Response { + baseOrderUrl := buildOrderUrl(acmeCtx, order.OrderId) + + var authorizationUrls []string + for _, authId := range order.AuthorizationIds { + authorizationUrls = append(authorizationUrls, buildAuthorizationUrl(acmeCtx, authId)) + } + + var identifiers []map[string]interface{} + for _, identifier := range order.Identifiers { + identifiers = append(identifiers, identifier.NetworkMarshal( /* use original value */ true)) + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "status": order.Status, + "expires": order.Expires.Format(time.RFC3339), + "identifiers": identifiers, + "authorizations": authorizationUrls, + "finalize": baseOrderUrl + "/finalize", + }, + Headers: map[string][]string{ + "Location": {baseOrderUrl}, + }, + } + + // Only reply with the certificate URL if we are in a valid order state. + if order.Status == ACMEOrderValid { + resp.Data["certificate"] = baseOrderUrl + "/cert" + } + + return resp +} + +func buildAuthorizationUrl(acmeCtx *acmeContext, authId string) string { + return acmeCtx.baseUrl.JoinPath("authorization", authId).String() +} + +func buildOrderUrl(acmeCtx *acmeContext, orderId string) string { + return acmeCtx.baseUrl.JoinPath("order", orderId).String() +} + +func generateAuthorization(acct *acmeAccount, identifier *ACMEIdentifier) (*ACMEAuthorization, error) { + authId := genUuid() + + // Certain challenges have certain restrictions: DNS challenges cannot + // be used to validate IP addresses, and only DNS challenges can be used + // to validate wildcards. + allowedChallenges := []ACMEChallengeType{ACMEHTTPChallenge, ACMEDNSChallenge, ACMEALPNChallenge} + if identifier.Type == ACMEIPIdentifier { + allowedChallenges = []ACMEChallengeType{ACMEHTTPChallenge} + } else if identifier.IsWildcard { + allowedChallenges = []ACMEChallengeType{ACMEDNSChallenge} + } + + var challenges []*ACMEChallenge + for _, challengeType := range allowedChallenges { + token, err := getACMEToken() + if err != nil { + return nil, err + } + + challenge := &ACMEChallenge{ + Type: challengeType, + Status: ACMEChallengePending, + ChallengeFields: map[string]interface{}{ + "token": token, + }, + } + + challenges = append(challenges, challenge) + } + + return &ACMEAuthorization{ + Id: authId, + AccountId: acct.KeyId, + Identifier: identifier, + Status: ACMEAuthorizationPending, + Expires: "", // only populated when it switches to valid. + Challenges: challenges, + Wildcard: identifier.IsWildcard, + }, nil +} + +func parseOptRFC3339Field(data map[string]interface{}, keyName string) (time.Time, error) { + var timeVal time.Time + var err error + + rawBefore, present := data[keyName] + if present { + beforeStr, ok := rawBefore.(string) + if !ok { + return timeVal, fmt.Errorf("invalid type (%T) for field '%s': %w", rawBefore, keyName, ErrMalformed) + } + timeVal, err = time.Parse(time.RFC3339, beforeStr) + if err != nil { + return timeVal, fmt.Errorf("failed parsing field '%s' (%s): %s: %w", keyName, rawBefore, err.Error(), ErrMalformed) + } + + if timeVal.IsZero() { + return timeVal, fmt.Errorf("provided time value is invalid '%s' (%s): %w", keyName, rawBefore, ErrMalformed) + } + } + + return timeVal, nil +} + +func parseOrderIdentifiers(data map[string]interface{}) ([]*ACMEIdentifier, error) { + rawIdentifiers, present := data["identifiers"] + if !present { + return nil, fmt.Errorf("missing required identifiers argument: %w", ErrMalformed) + } + + listIdentifiers, ok := rawIdentifiers.([]interface{}) + if !ok { + return nil, fmt.Errorf("invalid type (%T) for field 'identifiers': %w", rawIdentifiers, ErrMalformed) + } + + var identifiers []*ACMEIdentifier + for _, rawIdentifier := range listIdentifiers { + mapIdentifier, ok := rawIdentifier.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("invalid type (%T) for value in 'identifiers': %w", rawIdentifier, ErrMalformed) + } + + typeVal, present := mapIdentifier["type"] + if !present { + return nil, fmt.Errorf("missing type argument for value in 'identifiers': %w", ErrMalformed) + } + typeStr, ok := typeVal.(string) + if !ok { + return nil, fmt.Errorf("invalid type for type argument (%T) for value in 'identifiers': %w", typeStr, ErrMalformed) + } + + valueVal, present := mapIdentifier["value"] + if !present { + return nil, fmt.Errorf("missing value argument for value in 'identifiers': %w", ErrMalformed) + } + valueStr, ok := valueVal.(string) + if !ok { + return nil, fmt.Errorf("invalid type for value argument (%T) for value in 'identifiers': %w", valueStr, ErrMalformed) + } + + if len(valueStr) == 0 { + return nil, fmt.Errorf("value argument for value in 'identifiers' can not be blank: %w", ErrMalformed) + } + + identifier := &ACMEIdentifier{ + Value: valueStr, + OriginalValue: valueStr, + } + + switch typeStr { + case string(ACMEIPIdentifier): + identifier.Type = ACMEIPIdentifier + ip := net.ParseIP(valueStr) + if ip == nil { + return nil, fmt.Errorf("value argument (%s) failed validation: failed parsing as IP: %w", valueStr, ErrMalformed) + } + case string(ACMEDNSIdentifier): + identifier.Type = ACMEDNSIdentifier + + // This check modifies the identifier if it is a wildcard, + // removing the non-wildcard portion. We do this before the + // IP address checks, in case of an attempt to bypass the IP/DNS + // check via including a leading wildcard (e.g., *.127.0.0.1). + // + // Per RFC 8555 Section 7.1.4. Authorization Objects: + // + // > Wildcard domain names (with "*" as the first label) MUST NOT + // > be included in authorization objects. + if _, _, err := identifier.MaybeParseWildcard(); err != nil { + return nil, fmt.Errorf("value argument (%s) failed validation: invalid wildcard: %v: %w", valueStr, err, ErrMalformed) + } + + if isIP := net.ParseIP(identifier.Value); isIP != nil { + return nil, fmt.Errorf("refusing to accept argument (%s) as DNS type identifier: parsed OK as IP address: %w", valueStr, ErrMalformed) + } + + // Use the reduced (identifier.Value) in case this was a wildcard + // domain. + p := idna.New(idna.ValidateForRegistration()) + converted, err := p.ToASCII(identifier.Value) + if err != nil { + return nil, fmt.Errorf("value argument (%s) failed validation: %s: %w", valueStr, err.Error(), ErrMalformed) + } + + // Per RFC 8555 Section 7.1.4. Authorization Objects: + // + // > The domain name MUST be encoded in the form in which it + // > would appear in a certificate. That is, it MUST be encoded + // > according to the rules in Section 7 of [RFC5280]. Servers + // > MUST verify any identifier values that begin with the + // > ASCII-Compatible Encoding prefix "xn--" as defined in + // > [RFC5890] are properly encoded. + if identifier.Value != converted { + return nil, fmt.Errorf("value argument (%s) failed IDNA round-tripping to ASCII: %w", valueStr, ErrMalformed) + } + default: + return nil, fmt.Errorf("unsupported identifier type %s: %w", typeStr, ErrUnsupportedIdentifier) + } + + identifiers = append(identifiers, identifier) + } + + return identifiers, nil +} + +func (b *backend) acmeTidyOrder(sc *storageContext, accountId string, orderPath string, certTidyBuffer time.Duration) (bool, time.Time, error) { + // First we get the order; note that the orderPath includes the account + // It's only accessed at acme/orders/ with the account context + // It's saved at acme//orders/ + entry, err := sc.Storage.Get(sc.Context, orderPath) + if err != nil { + return false, time.Time{}, fmt.Errorf("error loading order: %w", err) + } + if entry == nil { + return false, time.Time{}, fmt.Errorf("order does not exist: %w", ErrMalformed) + } + var order acmeOrder + err = entry.DecodeJSON(&order) + if err != nil { + return false, time.Time{}, fmt.Errorf("error decoding order: %w", err) + } + + // Determine whether we should tidy this order + shouldTidy := false + + // Track either the order expiry or certificate expiry to return to the caller, this + // can be used to influence the account's expiry + orderExpiry := order.CertificateExpiry + + // It is faster to check certificate information on the order entry rather than fetch the cert entry to parse: + if !order.CertificateExpiry.IsZero() { + // This implies that a certificate exists + // When a certificate exists, we want to expire and tidy the order when we tidy the certificate: + if time.Now().After(order.CertificateExpiry.Add(certTidyBuffer)) { // It's time to clean + shouldTidy = true + } + } else { + // This implies that no certificate exists + // In this case, we want to expire the order after it has expired (+ some safety buffer) + if time.Now().After(order.Expires) { + shouldTidy = true + } + orderExpiry = order.Expires + } + if shouldTidy == false { + return shouldTidy, orderExpiry, nil + } + + // Tidy this Order + // That includes any certificate acme//orders/orderPath/cert + // That also includes any related authorizations: acme//authorizations/ + + // First Authorizations + for _, authorizationId := range order.AuthorizationIds { + err = sc.Storage.Delete(sc.Context, getAuthorizationPath(accountId, authorizationId)) + if err != nil { + return false, orderExpiry, err + } + } + + // Normal Tidy will Take Care of the Certificate, we need to clean up the certificate to account tracker though + err = sc.Storage.Delete(sc.Context, getAcmeSerialToAccountTrackerPath(accountId, order.CertificateSerialNumber)) + if err != nil { + return false, orderExpiry, err + } + + // And Finally, the order: + err = sc.Storage.Delete(sc.Context, orderPath) + if err != nil { + return false, orderExpiry, err + } + b.tidyStatusIncDelAcmeOrderCount() + + return true, orderExpiry, nil +} diff --git a/builtin/logical/pki/path_acme_order_test.go b/builtin/logical/pki/path_acme_order_test.go new file mode 100644 index 0000000..5340bbd --- /dev/null +++ b/builtin/logical/pki/path_acme_order_test.go @@ -0,0 +1,142 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "net" + "testing" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/require" +) + +// TestACME_ValidateIdentifiersAgainstRole Verify the ACME order creation +// function verifies somewhat the identifiers that were provided have a +// decent chance of being allowed by the selected role. +func TestACME_ValidateIdentifiersAgainstRole(t *testing.T) { + b, _ := CreateBackendWithStorage(t) + + tests := []struct { + name string + role *roleEntry + identifiers []*ACMEIdentifier + expectErr bool + }{ + { + name: "verbatim-role-allows-dns-ip", + role: buildSignVerbatimRoleWithNoData(nil), + identifiers: _buildACMEIdentifiers("test.com", "127.0.0.1"), + expectErr: false, + }, + { + name: "default-role-does-not-allow-dns", + role: buildTestRole(t, nil), + identifiers: _buildACMEIdentifiers("www.test.com"), + expectErr: true, + }, + { + name: "default-role-allows-ip", + role: buildTestRole(t, nil), + identifiers: _buildACMEIdentifiers("192.168.0.1"), + expectErr: false, + }, + { + name: "disable-ip-sans-forbids-ip", + role: buildTestRole(t, map[string]interface{}{"allow_ip_sans": false}), + identifiers: _buildACMEIdentifiers("192.168.0.1"), + expectErr: true, + }, + { + name: "role-no-wildcards-allowed-without", + role: buildTestRole(t, map[string]interface{}{ + "allow_subdomains": true, + "allow_bare_domains": true, + "allowed_domains": []string{"test.com"}, + "allow_wildcard_certificates": false, + }), + identifiers: _buildACMEIdentifiers("www.test.com", "test.com"), + expectErr: false, + }, + { + name: "role-no-wildcards-allowed-with-wildcard", + role: buildTestRole(t, map[string]interface{}{ + "allow_subdomains": true, + "allowed_domains": []string{"test.com"}, + "allow_wildcard_certificates": false, + }), + identifiers: _buildACMEIdentifiers("*.test.com"), + expectErr: true, + }, + { + name: "role-wildcards-allowed-with-wildcard", + role: buildTestRole(t, map[string]interface{}{ + "allow_subdomains": true, + "allowed_domains": []string{"test.com"}, + "allow_wildcard_certificates": true, + }), + identifiers: _buildACMEIdentifiers("*.test.com"), + expectErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := b.validateIdentifiersAgainstRole(tt.role, tt.identifiers) + + if tt.expectErr { + require.Error(t, err, "validateIdentifiersAgainstRole(%v, %v)", tt.role.ToResponseData(), tt.identifiers) + // If we did return an error if should be classified as a ErrRejectedIdentifier + require.ErrorIs(t, err, ErrRejectedIdentifier) + } else { + require.NoError(t, err, "validateIdentifiersAgainstRole(%v, %v)", tt.role.ToResponseData(), tt.identifiers) + } + }) + } +} + +func _buildACMEIdentifiers(values ...string) []*ACMEIdentifier { + var identifiers []*ACMEIdentifier + + for _, value := range values { + identifiers = append(identifiers, _buildACMEIdentifier(value)) + } + + return identifiers +} + +func _buildACMEIdentifier(val string) *ACMEIdentifier { + ip := net.ParseIP(val) + if ip == nil { + identifier := &ACMEIdentifier{Type: "dns", Value: val, OriginalValue: val, IsWildcard: false} + _, _, _ = identifier.MaybeParseWildcard() + return identifier + } + + return &ACMEIdentifier{Type: "ip", Value: val, OriginalValue: val, IsWildcard: false} +} + +// Easily allow tests to create valid roles with proper defaults, since we don't have an easy +// way to generate roles with proper defaults, go through the createRole handler with the handlers +// field data so we pickup all the defaults specified there. +func buildTestRole(t *testing.T, config map[string]interface{}) *roleEntry { + b, s := CreateBackendWithStorage(t) + + path := pathRoles(b) + fields := path.Fields + if config == nil { + config = map[string]interface{}{} + } + + if _, exists := config["name"]; !exists { + config["name"] = genUuid() + } + + _, err := b.pathRoleCreate(ctx, &logical.Request{Storage: s}, &framework.FieldData{Raw: config, Schema: fields}) + require.NoError(t, err, "failed generating role with config %v", config) + + role, err := b.getRole(ctx, s, config["name"].(string)) + require.NoError(t, err, "failed loading stored role") + + return role +} diff --git a/builtin/logical/pki/path_acme_revoke.go b/builtin/logical/pki/path_acme_revoke.go new file mode 100644 index 0000000..fe81eb2 --- /dev/null +++ b/builtin/logical/pki/path_acme_revoke.go @@ -0,0 +1,182 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "bytes" + "crypto" + "crypto/x509" + "encoding/base64" + "fmt" + "time" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathAcmeRevoke(b *backend) []*framework.Path { + return buildAcmeFrameworkPaths(b, patternAcmeRevoke, "/revoke-cert") +} + +func patternAcmeRevoke(b *backend, pattern string) *framework.Path { + fields := map[string]*framework.FieldSchema{} + addFieldsForACMEPath(fields, pattern) + addFieldsForACMERequest(fields) + + return &framework.Path{ + Pattern: pattern, + Fields: fields, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.acmeParsedWrapper(b.acmeRevocationHandler), + ForwardPerformanceSecondary: false, + ForwardPerformanceStandby: true, + }, + }, + + HelpSynopsis: pathAcmeHelpSync, + HelpDescription: pathAcmeHelpDesc, + } +} + +func (b *backend) acmeRevocationHandler(acmeCtx *acmeContext, _ *logical.Request, _ *framework.FieldData, userCtx *jwsCtx, data map[string]interface{}) (*logical.Response, error) { + var cert *x509.Certificate + + rawCertificate, present := data["certificate"] + if present { + certBase64, ok := rawCertificate.(string) + if !ok { + return nil, fmt.Errorf("invalid type (%T; expected string) for field 'certificate': %w", rawCertificate, ErrMalformed) + } + + certBytes, err := base64.RawURLEncoding.DecodeString(certBase64) + if err != nil { + return nil, fmt.Errorf("failed to base64 decode certificate: %v: %w", err, ErrMalformed) + } + + cert, err = x509.ParseCertificate(certBytes) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate: %v: %w", err, ErrMalformed) + } + } else { + return nil, fmt.Errorf("bad request was lacking required field 'certificate': %w", ErrMalformed) + } + + rawReason, present := data["reason"] + if present { + reason, ok := rawReason.(float64) + if !ok { + return nil, fmt.Errorf("invalid type (%T; expected float64) for field 'reason': %w", rawReason, ErrMalformed) + } + + if int(reason) != 0 { + return nil, fmt.Errorf("Vault does not support revocation reasons (got %v; expected omitted or 0/unspecified): %w", int(reason), ErrBadRevocationReason) + } + } + + // If the certificate expired, there's no point in revoking it. + if cert.NotAfter.Before(time.Now()) { + return nil, fmt.Errorf("refusing to revoke expired certificate: %w", ErrMalformed) + } + + // Fetch the CRL config as we need it to ultimately do the + // revocation. This should be cached and thus relatively fast. + config, err := b.crlBuilder.getConfigWithUpdate(acmeCtx.sc) + if err != nil { + return nil, fmt.Errorf("unable to revoke certificate: failed reading revocation config: %v: %w", err, ErrServerInternal) + } + + // Load our certificate from storage to ensure it exists and matches + // what was given to us. + serial := serialFromCert(cert) + certEntry, err := fetchCertBySerial(acmeCtx.sc, "certs/", serial) + if err != nil { + return nil, fmt.Errorf("unable to revoke certificate: err reading global cert entry: %v: %w", err, ErrServerInternal) + } + if certEntry == nil { + return nil, fmt.Errorf("unable to revoke certificate: no global cert entry found: %w", ErrServerInternal) + } + + // Validate that the provided certificate matches the stored + // certificate. This completes the chain of: + // + // provided_auth -> provided_cert == stored cert. + // + // Allowing revocation to be safe. + // + // We use the non-subtle unsafe bytes equality check here as we have + // already fetched this certificate from storage, thus already leaking + // timing information that this cert exists. The user could thus simply + // fetch the cert from Vault matching this serial number via the unauthed + // pki/certs/:serial API endpoint. + if !bytes.Equal(certEntry.Value, cert.Raw) { + return nil, fmt.Errorf("unable to revoke certificate: supplied certificate does not match CA's stored value: %w", ErrMalformed) + } + + // Check if it was already revoked; in this case, we do not need to + // revoke it again and want to respond with an appropriate error message. + revEntry, err := fetchCertBySerial(acmeCtx.sc, "revoked/", serial) + if err != nil { + return nil, fmt.Errorf("unable to revoke certificate: err reading revocation entry: %v: %w", err, ErrServerInternal) + } + if revEntry != nil { + return nil, fmt.Errorf("unable to revoke certificate: %w", ErrAlreadyRevoked) + } + + // Finally, do the relevant permissions/authorization check as + // appropriate based on the type of revocation happening. + if !userCtx.Existing { + return b.acmeRevocationByPoP(acmeCtx, userCtx, cert, config) + } + + return b.acmeRevocationByAccount(acmeCtx, userCtx, cert, config) +} + +func (b *backend) acmeRevocationByPoP(acmeCtx *acmeContext, userCtx *jwsCtx, cert *x509.Certificate, config *crlConfig) (*logical.Response, error) { + // Since this account does not exist, ensure we've gotten a private key + // matching the certificate's public key. This private key isn't + // explicitly provided, but instead provided by proxy (public key, + // signature over message). That signature is validated by an earlier + // wrapper (VerifyJWS called by ParseRequestParams). What still remains + // is validating that this implicit private key (with given public key + // and valid JWS signature) matches the certificate's public key. + givenPublic, ok := userCtx.Key.Key.(crypto.PublicKey) + if !ok { + return nil, fmt.Errorf("unable to revoke certificate: unable to parse message header's JWS key of type (%T): %w", userCtx.Key.Key, ErrMalformed) + } + + // Ensure that our PoP's implicit private key matches this certificate's + // public key. + if err := validatePublicKeyMatchesCert(givenPublic, cert); err != nil { + return nil, fmt.Errorf("unable to revoke certificate: unable to verify proof of possession of private key provided by proxy: %v: %w", err, ErrMalformed) + } + + // Now it is safe to revoke. + b.revokeStorageLock.Lock() + defer b.revokeStorageLock.Unlock() + + return revokeCert(acmeCtx.sc, config, cert) +} + +func (b *backend) acmeRevocationByAccount(acmeCtx *acmeContext, userCtx *jwsCtx, cert *x509.Certificate, config *crlConfig) (*logical.Response, error) { + // Fetch the account; disallow revocations from non-valid-status accounts. + _, err := requireValidAcmeAccount(acmeCtx, userCtx) + if err != nil { + return nil, fmt.Errorf("failed to lookup account: %w", err) + } + + // We only support certificates issued by this user, we don't support + // cross-account revocations. + serial := serialFromCert(cert) + acmeEntry, err := b.acmeState.GetIssuedCert(acmeCtx, userCtx.Kid, serial) + if err != nil || acmeEntry == nil { + return nil, fmt.Errorf("unable to revoke certificate: %v: %w", err, ErrMalformed) + } + + // Now it is safe to revoke. + b.revokeStorageLock.Lock() + defer b.revokeStorageLock.Unlock() + + return revokeCert(acmeCtx.sc, config, cert) +} diff --git a/builtin/logical/pki/path_acme_test.go b/builtin/logical/pki/path_acme_test.go new file mode 100644 index 0000000..5a7b470 --- /dev/null +++ b/builtin/logical/pki/path_acme_test.go @@ -0,0 +1,1832 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "os" + "path" + "strings" + "testing" + "time" + + "github.com/hashicorp/vault/sdk/helper/certutil" + + "github.com/go-test/deep" + "github.com/stretchr/testify/require" + "golang.org/x/crypto/acme" + "golang.org/x/net/http2" + + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/pki/dnstest" + "github.com/hashicorp/vault/helper/constants" + "github.com/hashicorp/vault/helper/testhelpers" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +// TestAcmeBasicWorkflow a test that will validate a basic ACME workflow using the Golang ACME client. +func TestAcmeBasicWorkflow(t *testing.T) { + t.Parallel() + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + cases := []struct { + name string + prefixUrl string + }{ + {"root", "acme/"}, + {"role", "roles/test-role/acme/"}, + {"issuer", "issuer/int-ca/acme/"}, + {"issuer_role", "issuer/int-ca/roles/test-role/acme/"}, + } + testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + baseAcmeURL := "/v1/pki/" + tc.prefixUrl + accountKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa key") + + acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) + + t.Logf("Testing discover on %s", baseAcmeURL) + discovery, err := acmeClient.Discover(testCtx) + require.NoError(t, err, "failed acme discovery call") + + discoveryBaseUrl := client.Address() + baseAcmeURL + require.Equal(t, discoveryBaseUrl+"new-nonce", discovery.NonceURL) + require.Equal(t, discoveryBaseUrl+"new-account", discovery.RegURL) + require.Equal(t, discoveryBaseUrl+"new-order", discovery.OrderURL) + require.Equal(t, discoveryBaseUrl+"revoke-cert", discovery.RevokeURL) + require.Equal(t, discoveryBaseUrl+"key-change", discovery.KeyChangeURL) + require.False(t, discovery.ExternalAccountRequired, "bad value for external account required in directory") + + // Attempt to update prior to creating an account + t.Logf("Testing updates with no proper account fail on %s", baseAcmeURL) + _, err = acmeClient.UpdateReg(testCtx, &acme.Account{Contact: []string{"mailto:shouldfail@example.com"}}) + require.ErrorIs(t, err, acme.ErrNoAccount, "expected failure attempting to update prior to account registration") + + // Create new account + t.Logf("Testing register on %s", baseAcmeURL) + acct, err := acmeClient.Register(testCtx, &acme.Account{ + Contact: []string{"mailto:test@example.com", "mailto:test2@test.com"}, + }, func(tosURL string) bool { return true }) + require.NoError(t, err, "failed registering account") + require.Equal(t, acme.StatusValid, acct.Status) + require.Contains(t, acct.Contact, "mailto:test@example.com") + require.Contains(t, acct.Contact, "mailto:test2@test.com") + require.Len(t, acct.Contact, 2) + + // Call register again we should get existing account + t.Logf("Testing duplicate register returns existing account on %s", baseAcmeURL) + _, err = acmeClient.Register(testCtx, acct, func(tosURL string) bool { return true }) + require.ErrorIs(t, err, acme.ErrAccountAlreadyExists, + "We should have returned a 200 status code which would have triggered an error in the golang acme"+ + " library") + + // Update contact + t.Logf("Testing Update account contacts on %s", baseAcmeURL) + acct.Contact = []string{"mailto:test3@example.com"} + acct2, err := acmeClient.UpdateReg(testCtx, acct) + require.NoError(t, err, "failed updating account") + require.Equal(t, acme.StatusValid, acct2.Status) + // We should get this back, not the original values. + require.Contains(t, acct2.Contact, "mailto:test3@example.com") + require.Len(t, acct2.Contact, 1) + + // Make sure order's do not accept dates + _, err = acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{{Type: "dns", Value: "localhost"}}, + acme.WithOrderNotBefore(time.Now().Add(10*time.Minute))) + require.Error(t, err, "should have rejected a new order with NotBefore set") + + _, err = acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{{Type: "dns", Value: "localhost"}}, + acme.WithOrderNotAfter(time.Now().Add(10*time.Minute))) + require.Error(t, err, "should have rejected a new order with NotAfter set") + + // Make sure DNS identifiers cannot include IP addresses + _, err = acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{{Type: "dns", Value: "127.0.0.1"}}, + acme.WithOrderNotAfter(time.Now().Add(10*time.Minute))) + require.Error(t, err, "should have rejected a new order with IP-like DNS-type identifier") + _, err = acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{{Type: "dns", Value: "*.127.0.0.1"}}, + acme.WithOrderNotAfter(time.Now().Add(10*time.Minute))) + require.Error(t, err, "should have rejected a new order with IP-like DNS-type identifier") + + // Create an order + t.Logf("Testing Authorize Order on %s", baseAcmeURL) + identifiers := []string{"localhost.localdomain", "*.localdomain"} + createOrder, err := acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{ + {Type: "dns", Value: identifiers[0]}, + {Type: "dns", Value: identifiers[1]}, + }) + require.NoError(t, err, "failed creating order") + require.Equal(t, acme.StatusPending, createOrder.Status) + require.Empty(t, createOrder.CertURL) + require.Equal(t, createOrder.URI+"/finalize", createOrder.FinalizeURL) + require.Len(t, createOrder.AuthzURLs, 2, "expected two authzurls") + + // Get order + t.Logf("Testing GetOrder on %s", baseAcmeURL) + getOrder, err := acmeClient.GetOrder(testCtx, createOrder.URI) + require.NoError(t, err, "failed fetching order") + require.Equal(t, acme.StatusPending, createOrder.Status) + if diffs := deep.Equal(createOrder, getOrder); diffs != nil { + t.Fatalf("Differences exist between create and get order: \n%v", strings.Join(diffs, "\n")) + } + + // Make sure the identifiers returned in the order contain the original values + var ids []string + for _, id := range getOrder.Identifiers { + require.Equal(t, "dns", id.Type) + ids = append(ids, id.Value) + } + require.ElementsMatch(t, identifiers, ids, "order responses should have all original identifiers") + + // Load authorizations + var authorizations []*acme.Authorization + for _, authUrl := range getOrder.AuthzURLs { + auth, err := acmeClient.GetAuthorization(testCtx, authUrl) + require.NoError(t, err, "failed fetching authorization: %s", authUrl) + + authorizations = append(authorizations, auth) + } + + // We should have 2 separate auth challenges as we have two separate identifier + require.Len(t, authorizations, 2, "expected 2 authorizations in order") + + var wildcardAuth *acme.Authorization + var domainAuth *acme.Authorization + for _, auth := range authorizations { + if auth.Wildcard { + wildcardAuth = auth + } else { + domainAuth = auth + } + } + + // Test the values for the domain authentication + require.Equal(t, acme.StatusPending, domainAuth.Status) + require.Equal(t, "dns", domainAuth.Identifier.Type) + require.Equal(t, "localhost.localdomain", domainAuth.Identifier.Value) + require.False(t, domainAuth.Wildcard, "should not be a wildcard") + require.True(t, domainAuth.Expires.IsZero(), "authorization should only have expiry set on valid status") + + require.Len(t, domainAuth.Challenges, 3, "expected three challenges") + require.Equal(t, acme.StatusPending, domainAuth.Challenges[0].Status) + require.True(t, domainAuth.Challenges[0].Validated.IsZero(), "validated time should be 0 on challenge") + require.Equal(t, "http-01", domainAuth.Challenges[0].Type) + require.NotEmpty(t, domainAuth.Challenges[0].Token, "missing challenge token") + require.Equal(t, acme.StatusPending, domainAuth.Challenges[1].Status) + require.True(t, domainAuth.Challenges[1].Validated.IsZero(), "validated time should be 0 on challenge") + require.Equal(t, "dns-01", domainAuth.Challenges[1].Type) + require.NotEmpty(t, domainAuth.Challenges[1].Token, "missing challenge token") + require.Equal(t, acme.StatusPending, domainAuth.Challenges[2].Status) + require.True(t, domainAuth.Challenges[2].Validated.IsZero(), "validated time should be 0 on challenge") + require.Equal(t, "tls-alpn-01", domainAuth.Challenges[2].Type) + require.NotEmpty(t, domainAuth.Challenges[2].Token, "missing challenge token") + + // Test the values for the wildcard authentication + require.Equal(t, acme.StatusPending, wildcardAuth.Status) + require.Equal(t, "dns", wildcardAuth.Identifier.Type) + require.Equal(t, "localdomain", wildcardAuth.Identifier.Value) // Make sure we strip the *. in auth responses + require.True(t, wildcardAuth.Wildcard, "should be a wildcard") + require.True(t, wildcardAuth.Expires.IsZero(), "authorization should only have expiry set on valid status") + + require.Len(t, wildcardAuth.Challenges, 1, "expected one challenge") + require.Equal(t, acme.StatusPending, domainAuth.Challenges[0].Status) + require.True(t, wildcardAuth.Challenges[0].Validated.IsZero(), "validated time should be 0 on challenge") + require.Equal(t, "dns-01", wildcardAuth.Challenges[0].Type) + require.NotEmpty(t, domainAuth.Challenges[0].Token, "missing challenge token") + + // Make sure that getting a challenge does not start it. + challenge, err := acmeClient.GetChallenge(testCtx, domainAuth.Challenges[0].URI) + require.NoError(t, err, "failed to load challenge") + require.Equal(t, acme.StatusPending, challenge.Status) + require.True(t, challenge.Validated.IsZero(), "validated time should be 0 on challenge") + require.Equal(t, "http-01", challenge.Type) + + // Accept a challenge; this triggers validation to start. + challenge, err = acmeClient.Accept(testCtx, domainAuth.Challenges[0]) + require.NoError(t, err, "failed to load challenge") + require.Equal(t, acme.StatusProcessing, challenge.Status) + require.True(t, challenge.Validated.IsZero(), "validated time should be 0 on challenge") + require.Equal(t, "http-01", challenge.Type) + + require.NotEmpty(t, challenge.Token, "missing challenge token") + + // HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow + // test. + markAuthorizationSuccess(t, client, acmeClient, acct, getOrder) + + // Make sure sending a CSR with the account key gets rejected. + goodCr := &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: identifiers[1]}, + DNSNames: []string{identifiers[0], identifiers[1]}, + } + t.Logf("csr: %v", goodCr) + + // We want to make sure people are not using the same keys for CSR/Certs and their ACME account. + csrSignedWithAccountKey, err := x509.CreateCertificateRequest(rand.Reader, goodCr, accountKey) + require.NoError(t, err, "failed generating csr") + _, _, err = acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csrSignedWithAccountKey, true) + require.Error(t, err, "should not be allowed to use the account key for a CSR") + + csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generated key for CSR") + + // Validate we reject CSRs that contain CN that aren't in the original order + badCr := &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: "not-in-original-order.com"}, + DNSNames: []string{identifiers[0], identifiers[1]}, + } + t.Logf("csr: %v", badCr) + + csrWithBadCName, err := x509.CreateCertificateRequest(rand.Reader, badCr, csrKey) + require.NoError(t, err, "failed generating csr with bad common name") + + _, _, err = acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csrWithBadCName, true) + require.Error(t, err, "should not be allowed to csr with different common names than order") + + // Validate we reject CSRs that contain DNS names that aren't in the original order + badCr = &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: createOrder.Identifiers[0].Value}, + DNSNames: []string{"www.notinorder.com"}, + } + + csrWithBadName, err := x509.CreateCertificateRequest(rand.Reader, badCr, csrKey) + require.NoError(t, err, "failed generating csr with bad name") + + _, _, err = acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csrWithBadName, true) + require.Error(t, err, "should not be allowed to csr with different names than order") + + // Validate we reject CSRs that contain IP addresses that weren't in the original order + badCr = &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: createOrder.Identifiers[0].Value}, + IPAddresses: []net.IP{{127, 0, 0, 1}}, + } + + csrWithBadIP, err := x509.CreateCertificateRequest(rand.Reader, badCr, csrKey) + require.NoError(t, err, "failed generating csr with bad name") + + _, _, err = acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csrWithBadIP, true) + require.Error(t, err, "should not be allowed to csr with different ip address than order") + + // Validate we reject CSRs that contains fewer names than in the original order. + badCr = &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: identifiers[0]}, + } + + csrWithBadName, err = x509.CreateCertificateRequest(rand.Reader, badCr, csrKey) + require.NoError(t, err, "failed generating csr with bad name") + + _, _, err = acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csrWithBadName, true) + require.Error(t, err, "should not be allowed to csr with different names than order") + + // Finally test a proper CSR, with the correct name and signed with a different key works. + csr, err := x509.CreateCertificateRequest(rand.Reader, goodCr, csrKey) + require.NoError(t, err, "failed generating csr") + + certs, _, err := acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csr, true) + require.NoError(t, err, "failed finalizing order") + require.Len(t, certs, 3, "expected three items within the returned certs") + + testAcmeCertSignedByCa(t, client, certs, "int-ca") + + // Make sure the certificate has a NotAfter date of a maximum of 90 days + acmeCert, err := x509.ParseCertificate(certs[0]) + require.NoError(t, err, "failed parsing acme cert bytes") + maxAcmeNotAfter := time.Now().Add(maxAcmeCertTTL) + if maxAcmeNotAfter.Before(acmeCert.NotAfter) { + require.Fail(t, fmt.Sprintf("certificate has a NotAfter value %v greater than ACME max ttl %v", acmeCert.NotAfter, maxAcmeNotAfter)) + } + + // Can we revoke it using the account key revocation + err = acmeClient.RevokeCert(ctx, nil, certs[0], acme.CRLReasonUnspecified) + require.NoError(t, err, "failed to revoke certificate through account key") + + // Make sure it was actually revoked + certResp, err := client.Logical().ReadWithContext(ctx, "pki/cert/"+serialFromCert(acmeCert)) + require.NoError(t, err, "failed to read certificate status") + require.NotNil(t, certResp, "certificate status response was nil") + revocationTime := certResp.Data["revocation_time"].(json.Number) + revocationTimeInt, err := revocationTime.Int64() + require.NoError(t, err, "failed converting revocation_time value: %v", revocationTime) + require.Greater(t, revocationTimeInt, int64(0), + "revocation time was not greater than 0, revocation did not work value was: %v", revocationTimeInt) + + // Make sure we can revoke an authorization as a client + err = acmeClient.RevokeAuthorization(ctx, authorizations[0].URI) + require.NoError(t, err, "failed revoking authorization status") + + revokedAuth, err := acmeClient.GetAuthorization(ctx, authorizations[0].URI) + require.NoError(t, err, "failed fetching authorization") + require.Equal(t, acme.StatusDeactivated, revokedAuth.Status) + + // Deactivate account + t.Logf("Testing deactivate account on %s", baseAcmeURL) + err = acmeClient.DeactivateReg(testCtx) + require.NoError(t, err, "failed deactivating account") + + // Make sure we get an unauthorized error trying to update the account again. + t.Logf("Testing update on deactivated account fails on %s", baseAcmeURL) + _, err = acmeClient.UpdateReg(testCtx, acct) + require.Error(t, err, "expected account to be deactivated") + require.IsType(t, &acme.Error{}, err, "expected acme error type") + acmeErr := err.(*acme.Error) + require.Equal(t, "urn:ietf:params:acme:error:unauthorized", acmeErr.ProblemType) + }) + } +} + +// TestAcmeBasicWorkflowWithEab verify that new accounts require EAB's if enforced by configuration. +func TestAcmeBasicWorkflowWithEab(t *testing.T) { + t.Parallel() + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + // Enable EAB + _, err := client.Logical().WriteWithContext(context.Background(), "pki/config/acme", map[string]interface{}{ + "enabled": true, + "eab_policy": "always-required", + }) + require.NoError(t, err) + + cases := []struct { + name string + prefixUrl string + }{ + {"root", "acme/"}, + {"role", "roles/test-role/acme/"}, + {"issuer", "issuer/int-ca/acme/"}, + {"issuer_role", "issuer/int-ca/roles/test-role/acme/"}, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + baseAcmeURL := "/v1/pki/" + tc.prefixUrl + accountKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed creating ec key") + + acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) + + t.Logf("Testing discover on %s", baseAcmeURL) + discovery, err := acmeClient.Discover(testCtx) + require.NoError(t, err, "failed acme discovery call") + require.True(t, discovery.ExternalAccountRequired, "bad value for external account required in directory") + + // Create new account without EAB, should fail + t.Logf("Testing register on %s", baseAcmeURL) + _, err = acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true }) + require.ErrorContains(t, err, "urn:ietf:params:acme:error:externalAccountRequired", + "expected failure creating an account without eab") + + // Test fetch, list, delete workflow + kid, _ := getEABKey(t, client, tc.prefixUrl) + resp, err := client.Logical().ListWithContext(testCtx, "pki/eab") + require.NoError(t, err, "failed to list eab tokens") + require.NotNil(t, resp, "list response for eab tokens should not be nil") + require.Contains(t, resp.Data, "keys") + require.Contains(t, resp.Data, "key_info") + require.Len(t, resp.Data["keys"], 1) + require.Contains(t, resp.Data["keys"], kid) + + _, err = client.Logical().DeleteWithContext(testCtx, "pki/eab/"+kid) + require.NoError(t, err, "failed to delete eab") + + // List eabs should return zero results + resp, err = client.Logical().ListWithContext(testCtx, "pki/eab") + require.NoError(t, err, "failed to list eab tokens") + require.Nil(t, resp, "list response for eab tokens should have been nil") + + // fetch a new EAB + kid, eabKeyBytes := getEABKey(t, client, tc.prefixUrl) + acct := &acme.Account{ + ExternalAccountBinding: &acme.ExternalAccountBinding{ + KID: kid, + Key: eabKeyBytes, + }, + } + + // Make sure we can list our key + resp, err = client.Logical().ListWithContext(testCtx, "pki/eab") + require.NoError(t, err, "failed to list eab tokens") + require.NotNil(t, resp, "list response for eab tokens should not be nil") + require.Contains(t, resp.Data, "keys") + require.Contains(t, resp.Data, "key_info") + require.Len(t, resp.Data["keys"], 1) + require.Contains(t, resp.Data["keys"], kid) + + keyInfo := resp.Data["key_info"].(map[string]interface{}) + require.Contains(t, keyInfo, kid) + + infoForKid := keyInfo[kid].(map[string]interface{}) + require.Equal(t, "hs", infoForKid["key_type"]) + require.Equal(t, tc.prefixUrl+"directory", infoForKid["acme_directory"]) + + // Create new account with EAB + t.Logf("Testing register on %s", baseAcmeURL) + _, err = acmeClient.Register(testCtx, acct, func(tosURL string) bool { return true }) + require.NoError(t, err, "failed registering new account with eab") + + // Make sure our EAB is no longer available + resp, err = client.Logical().ListWithContext(context.Background(), "pki/eab") + require.NoError(t, err, "failed to list eab tokens") + require.Nil(t, resp, "list response for eab tokens should have been nil due to empty list") + + // Attempt to create another account with the same EAB as before -- should fail + accountKey2, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed creating ec key") + + acmeClient2 := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey2) + acct2 := &acme.Account{ + ExternalAccountBinding: &acme.ExternalAccountBinding{ + KID: kid, + Key: eabKeyBytes, + }, + } + + _, err = acmeClient2.Register(testCtx, acct2, func(tosURL string) bool { return true }) + require.ErrorContains(t, err, "urn:ietf:params:acme:error:unauthorized", "should fail due to EAB re-use") + + // We can lookup/find an existing account without EAB if we have the account key + _, err = acmeClient.GetReg(testCtx /* unused url */, "") + require.NoError(t, err, "expected to lookup existing account without eab") + }) + } +} + +// TestAcmeNonce a basic test that will validate we get back a nonce with the proper status codes +// based on the +func TestAcmeNonce(t *testing.T) { + t.Parallel() + cluster, client, pathConfig := setupAcmeBackend(t) + defer cluster.Cleanup() + + cases := []struct { + name string + prefixUrl string + directoryUrl string + }{ + {"root", "", "pki/acme/new-nonce"}, + {"role", "/roles/test-role", "pki/roles/test-role/acme/new-nonce"}, + {"issuer", "/issuer/default", "pki/issuer/default/acme/new-nonce"}, + {"issuer_role", "/issuer/default/roles/test-role", "pki/issuer/default/roles/test-role/acme/new-nonce"}, + } + + for _, tc := range cases { + for _, httpOp := range []string{"get", "header"} { + t.Run(fmt.Sprintf("%s-%s", tc.name, httpOp), func(t *testing.T) { + var req *api.Request + switch httpOp { + case "get": + req = client.NewRequest(http.MethodGet, "/v1/"+tc.directoryUrl) + case "header": + req = client.NewRequest(http.MethodHead, "/v1/"+tc.directoryUrl) + } + res, err := client.RawRequestWithContext(ctx, req) + require.NoError(t, err, "failed sending raw request") + _ = res.Body.Close() + + // Proper Status Code + switch httpOp { + case "get": + require.Equal(t, http.StatusNoContent, res.StatusCode) + case "header": + require.Equal(t, http.StatusOK, res.StatusCode) + } + + // Make sure we don't have a Content-Type header. + require.Equal(t, "", res.Header.Get("Content-Type")) + + // Make sure we return the Cache-Control header + require.Contains(t, res.Header.Get("Cache-Control"), "no-store", + "missing Cache-Control header with no-store header value") + + // Test for our nonce header value + require.NotEmpty(t, res.Header.Get("Replay-Nonce"), "missing Replay-Nonce header with an actual value") + + // Test Link header value + expectedLinkHeader := fmt.Sprintf("<%s>;rel=\"index\"", pathConfig+tc.prefixUrl+"/acme/directory") + require.Contains(t, res.Header.Get("Link"), expectedLinkHeader, + "different value for link header than expected") + }) + } + } +} + +// TestAcmeClusterPathNotConfigured basic testing of the ACME error handler. +func TestAcmeClusterPathNotConfigured(t *testing.T) { + t.Parallel() + cluster, client := setupTestPkiCluster(t) + defer cluster.Cleanup() + + // Go sneaky, sneaky and update the acme configuration through sys/raw to bypass config/cluster path checks + pkiMount := findStorageMountUuid(t, client, "pki") + rawPath := path.Join("/sys/raw/logical/", pkiMount, storageAcmeConfig) + _, err := client.Logical().WriteWithContext(context.Background(), rawPath, map[string]interface{}{ + "value": "{\"enabled\": true, \"eab_policy_name\": \"not-required\"}", + }) + require.NoError(t, err, "failed updating acme config through sys/raw") + + // Force reload the plugin so we read the new config we slipped in. + _, err = client.Sys().ReloadPluginWithContext(context.Background(), &api.ReloadPluginInput{Mounts: []string{"pki"}}) + require.NoError(t, err, "failed reloading plugin") + + // Do not fill in the path option within the local cluster configuration + cases := []struct { + name string + directoryUrl string + }{ + {"root", "pki/acme/directory"}, + {"role", "pki/roles/test-role/acme/directory"}, + {"issuer", "pki/issuer/default/acme/directory"}, + {"issuer_role", "pki/issuer/default/roles/test-role/acme/directory"}, + } + testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + dirResp, err := client.Logical().ReadRawWithContext(testCtx, tc.directoryUrl) + require.Error(t, err, "expected failure reading ACME directory configuration got none") + + require.Equal(t, "application/problem+json", dirResp.Header.Get("Content-Type")) + require.Equal(t, http.StatusInternalServerError, dirResp.StatusCode) + + rawBodyBytes, err := io.ReadAll(dirResp.Body) + require.NoError(t, err, "failed reading from directory response body") + _ = dirResp.Body.Close() + + respType := map[string]interface{}{} + err = json.Unmarshal(rawBodyBytes, &respType) + require.NoError(t, err, "failed unmarshalling ACME directory response body") + + require.Equal(t, "urn:ietf:params:acme:error:serverInternal", respType["type"]) + require.NotEmpty(t, respType["detail"]) + }) + } +} + +// TestAcmeAccountsCrossingDirectoryPath make sure that if an account attempts to use a different ACME +// directory path that we get an error. +func TestAcmeAccountsCrossingDirectoryPath(t *testing.T) { + t.Parallel() + cluster, _, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + + baseAcmeURL := "/v1/pki/acme/" + accountKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa key") + + testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) + + // Create new account + acct, err := acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true }) + require.NoError(t, err, "failed registering account") + + // Try to update the account under another ACME directory + baseAcmeURL2 := "/v1/pki/roles/test-role/acme/" + acmeClient2 := getAcmeClientForCluster(t, cluster, baseAcmeURL2, accountKey) + acct.Contact = []string{"mailto:test3@example.com"} + _, err = acmeClient2.UpdateReg(testCtx, acct) + require.Error(t, err, "successfully updated account when we should have failed due to different directory") + // We don't test for the specific error about using the wrong directory, as the golang library + // swallows the error we are sending back to a no account error +} + +// TestAcmeEabCrossingDirectoryPath make sure that if an account attempts to use a different ACME +// directory path that an EAB was created within we get an error. +func TestAcmeEabCrossingDirectoryPath(t *testing.T) { + t.Parallel() + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + + // Enable EAB + _, err := client.Logical().WriteWithContext(context.Background(), "pki/config/acme", map[string]interface{}{ + "enabled": true, + "eab_policy": "always-required", + }) + require.NoError(t, err) + + baseAcmeURL := "/v1/pki/acme/" + accountKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa key") + + testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) + + // fetch a new EAB + kid, eabKeyBytes := getEABKey(t, client, "roles/test-role/acme/") + acct := &acme.Account{ + ExternalAccountBinding: &acme.ExternalAccountBinding{ + KID: kid, + Key: eabKeyBytes, + }, + } + + // Create new account + _, err = acmeClient.Register(testCtx, acct, func(tosURL string) bool { return true }) + require.ErrorContains(t, err, "failed to verify eab", "should have failed as EAB is for a different directory") +} + +// TestAcmeDisabledWithEnvVar verifies if VAULT_DISABLE_PUBLIC_ACME is set that we completely +// disable the ACME service +func TestAcmeDisabledWithEnvVar(t *testing.T) { + // Setup a cluster with the configuration set to not-required, initially as the + // configuration will validate if the environment var is set + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + + // Seal setup the environment variable, and unseal which now means we have a cluster + // with ACME configuration saying it is enabled with a bad EAB policy. + cluster.EnsureCoresSealed(t) + t.Setenv("VAULT_DISABLE_PUBLIC_ACME", "true") + cluster.UnsealCores(t) + + // Make sure that ACME is disabled now. + for _, method := range []string{http.MethodHead, http.MethodGet} { + t.Run(fmt.Sprintf("%s", method), func(t *testing.T) { + req := client.NewRequest(method, "/v1/pki/acme/new-nonce") + _, err := client.RawRequestWithContext(ctx, req) + require.Error(t, err, "should have received an error as ACME should have been disabled") + + if apiError, ok := err.(*api.ResponseError); ok { + require.Equal(t, 404, apiError.StatusCode) + } + }) + } +} + +// TestAcmeConfigChecksPublicAcmeEnv verifies certain EAB policy values can not be set if ENV var is enabled +func TestAcmeConfigChecksPublicAcmeEnv(t *testing.T) { + t.Setenv("VAULT_DISABLE_PUBLIC_ACME", "true") + cluster, client := setupTestPkiCluster(t) + defer cluster.Cleanup() + + _, err := client.Logical().WriteWithContext(context.Background(), "pki/config/cluster", map[string]interface{}{ + "path": "https://dadgarcorp.com/v1/pki", + }) + require.NoError(t, err) + + _, err = client.Logical().WriteWithContext(context.Background(), "pki/config/acme", map[string]interface{}{ + "enabled": true, + "eab_policy": string(eabPolicyAlwaysRequired), + }) + require.NoError(t, err) + + for _, policyName := range []EabPolicyName{eabPolicyNewAccountRequired, eabPolicyNotRequired} { + _, err = client.Logical().WriteWithContext(context.Background(), "pki/config/acme", map[string]interface{}{ + "enabled": true, + "eab_policy": string(policyName), + }) + require.Error(t, err, "eab policy %s should have not been allowed to be set") + } + + // Make sure we can disable ACME and the eab policy is not checked + _, err = client.Logical().WriteWithContext(context.Background(), "pki/config/acme", map[string]interface{}{ + "enabled": false, + "eab_policy": string(eabPolicyNotRequired), + }) + require.NoError(t, err) +} + +// TestAcmeTruncatesToIssuerExpiry make sure that if the selected issuer's expiry is shorter than the +// CSR's selected TTL value in ACME and the issuer's leaf_not_after_behavior setting is set to Err, +// we will override the configured behavior and truncate to the issuer's NotAfter +func TestAcmeTruncatesToIssuerExpiry(t *testing.T) { + t.Parallel() + + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + + testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + mount := "pki" + resp, err := client.Logical().WriteWithContext(context.Background(), mount+"/issuers/generate/intermediate/internal", + map[string]interface{}{ + "key_name": "short-key", + "key_type": "ec", + "common_name": "test.com", + }) + require.NoError(t, err, "failed creating intermediary CSR") + intermediateCSR := resp.Data["csr"].(string) + + // Sign the intermediate CSR using /pki + resp, err = client.Logical().Write(mount+"/issuer/root-ca/sign-intermediate", map[string]interface{}{ + "csr": intermediateCSR, + "ttl": "10m", + "max_ttl": "1h", + }) + require.NoError(t, err, "failed signing intermediary CSR") + intermediateCertPEM := resp.Data["certificate"].(string) + + shortCa := parseCert(t, intermediateCertPEM) + + // Configure the intermediate cert as the CA in /pki2 + resp, err = client.Logical().Write(mount+"/issuers/import/cert", map[string]interface{}{ + "pem_bundle": intermediateCertPEM, + }) + require.NoError(t, err, "failed importing intermediary cert") + importedIssuersRaw := resp.Data["imported_issuers"].([]interface{}) + require.Len(t, importedIssuersRaw, 1) + shortCaUuid := importedIssuersRaw[0].(string) + + _, err = client.Logical().Write(mount+"/issuer/"+shortCaUuid, map[string]interface{}{ + "leaf_not_after_behavior": "err", + "issuer_name": "short-ca", + }) + require.NoError(t, err, "failed updating issuer name") + + baseAcmeURL := "/v1/pki/issuer/short-ca/acme/" + accountKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa key") + + acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) + + // Create new account + t.Logf("Testing register on %s", baseAcmeURL) + acct, err := acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true }) + require.NoError(t, err, "failed registering account") + + // Create an order + t.Logf("Testing Authorize Order on %s", baseAcmeURL) + identifiers := []string{"*.localdomain"} + order, err := acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{ + {Type: "dns", Value: identifiers[0]}, + }) + require.NoError(t, err, "failed creating order") + + // HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow + // test. + markAuthorizationSuccess(t, client, acmeClient, acct, order) + + // Build a proper CSR, with the correct name and signed with a different key works. + goodCr := &x509.CertificateRequest{DNSNames: []string{identifiers[0]}} + csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generated key for CSR") + csr, err := x509.CreateCertificateRequest(rand.Reader, goodCr, csrKey) + require.NoError(t, err, "failed generating csr") + + certs, _, err := acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, true) + require.NoError(t, err, "failed finalizing order") + require.Len(t, certs, 3, "expected full acme chain") + + testAcmeCertSignedByCa(t, client, certs, "short-ca") + + acmeCert, err := x509.ParseCertificate(certs[0]) + require.NoError(t, err, "failed parsing acme cert") + + require.Equal(t, shortCa.NotAfter, acmeCert.NotAfter, "certificate times aren't the same") +} + +// TestAcmeRoleExtKeyUsage verify that ACME by default ignores the role's various ExtKeyUsage flags, +// but if the ACME configuration override of allow_role_ext_key_usage is set that we then honor +// the role's flag. +func TestAcmeRoleExtKeyUsage(t *testing.T) { + t.Parallel() + + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + + testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + roleName := "test-role" + + roleOpt := map[string]interface{}{ + "ttl": "365h", + "max_ttl": "720h", + "key_type": "any", + "allowed_domains": "localdomain", + "allow_subdomains": "true", + "allow_wildcard_certificates": "true", + "require_cn": "true", /* explicit default */ + "server_flag": "true", + "client_flag": "true", + "code_signing_flag": "true", + "email_protection_flag": "true", + } + + _, err := client.Logical().Write("pki/roles/"+roleName, roleOpt) + + baseAcmeURL := "/v1/pki/roles/" + roleName + "/acme/" + accountKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa key") + + require.NoError(t, err, "failed creating role test-role") + + acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) + + // Create new account + t.Logf("Testing register on %s", baseAcmeURL) + acct, err := acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true }) + require.NoError(t, err, "failed registering account") + + // Create an order + t.Logf("Testing Authorize Order on %s", baseAcmeURL) + identifiers := []string{"*.localdomain"} + order, err := acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{ + {Type: "dns", Value: identifiers[0]}, + }) + require.NoError(t, err, "failed creating order") + + // HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow test. + markAuthorizationSuccess(t, client, acmeClient, acct, order) + + // Build a proper CSR, with the correct name and signed with a different key works. + goodCr := &x509.CertificateRequest{DNSNames: []string{identifiers[0]}} + csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generated key for CSR") + csr, err := x509.CreateCertificateRequest(rand.Reader, goodCr, csrKey) + require.NoError(t, err, "failed generating csr") + + certs, _, err := acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, true) + require.NoError(t, err, "order finalization failed") + require.GreaterOrEqual(t, len(certs), 1, "expected at least one cert in bundle") + acmeCert, err := x509.ParseCertificate(certs[0]) + require.NoError(t, err, "failed parsing acme cert") + + require.Equal(t, 1, len(acmeCert.ExtKeyUsage), "mis-match on expected ExtKeyUsages") + require.ElementsMatch(t, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, acmeCert.ExtKeyUsage, + "mismatch of ExtKeyUsage flags") + + // Now turn the ACME configuration allow_role_ext_key_usage and retest to make sure we get a certificate + // with them all + _, err = client.Logical().WriteWithContext(context.Background(), "pki/config/acme", map[string]interface{}{ + "enabled": true, + "eab_policy": "not-required", + "allow_role_ext_key_usage": true, + }) + require.NoError(t, err, "failed updating ACME configuration") + + t.Logf("Testing Authorize Order on %s", baseAcmeURL) + order, err = acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{ + {Type: "dns", Value: identifiers[0]}, + }) + require.NoError(t, err, "failed creating order") + + // HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow test. + markAuthorizationSuccess(t, client, acmeClient, acct, order) + + certs, _, err = acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, true) + require.NoError(t, err, "order finalization failed") + require.GreaterOrEqual(t, len(certs), 1, "expected at least one cert in bundle") + acmeCert, err = x509.ParseCertificate(certs[0]) + require.NoError(t, err, "failed parsing acme cert") + + require.Equal(t, 4, len(acmeCert.ExtKeyUsage), "mis-match on expected ExtKeyUsages") + require.ElementsMatch(t, []x509.ExtKeyUsage{ + x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth, + x509.ExtKeyUsageCodeSigning, x509.ExtKeyUsageEmailProtection, + }, + acmeCert.ExtKeyUsage, "mismatch of ExtKeyUsage flags") +} + +func TestIssuerRoleDirectoryAssociations(t *testing.T) { + t.Parallel() + + // This creates two issuers for us (root-ca, int-ca) and two + // roles (test-role, acme) that we can use with various directory + // configurations. + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + + // Setup DNS for validations. + testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + dns := dnstest.SetupResolver(t, "dadgarcorp.com") + defer dns.Cleanup() + _, err := client.Logical().WriteWithContext(testCtx, "pki/config/acme", map[string]interface{}{ + "dns_resolver": dns.GetLocalAddr(), + }) + require.NoError(t, err, "failed to specify dns resolver") + + // 1. Use a forbidden role should fail. + resp, err := client.Logical().WriteWithContext(testCtx, "pki/config/acme", map[string]interface{}{ + "enabled": true, + "allowed_roles": []string{"acme"}, + }) + require.NoError(t, err, "failed to write config") + require.NotNil(t, resp) + + _, err = client.Logical().ReadWithContext(testCtx, "pki/roles/test-role/acme/directory") + require.Error(t, err, "failed to forbid usage of test-role") + _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/default/roles/test-role/acme/directory") + require.Error(t, err, "failed to forbid usage of test-role under default issuer") + _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/int-ca/roles/test-role/acme/directory") + require.Error(t, err, "failed to forbid usage of test-role under int-ca issuer") + _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/root-ca/roles/test-role/acme/directory") + require.Error(t, err, "failed to forbid usage of test-role under root-ca issuer") + + _, err = client.Logical().ReadWithContext(testCtx, "pki/roles/acme/acme/directory") + require.NoError(t, err, "failed to allow usage of acme") + _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/default/roles/acme/acme/directory") + require.NoError(t, err, "failed to allow usage of acme under default issuer") + _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/int-ca/roles/acme/acme/directory") + require.NoError(t, err, "failed to allow usage of acme under int-ca issuer") + _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/root-ca/roles/acme/acme/directory") + require.NoError(t, err, "failed to allow usage of acme under root-ca issuer") + + // 2. Use a forbidden issuer should fail. + resp, err = client.Logical().WriteWithContext(testCtx, "pki/config/acme", map[string]interface{}{ + "allowed_roles": []string{"acme"}, + "allowed_issuers": []string{"int-ca"}, + }) + require.NoError(t, err, "failed to write config") + require.NotNil(t, resp) + + _, err = client.Logical().ReadWithContext(testCtx, "pki/roles/test-role/acme/directory") + require.Error(t, err, "failed to forbid usage of test-role") + _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/default/roles/test-role/acme/directory") + require.Error(t, err, "failed to forbid usage of test-role under default issuer") + _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/int-ca/roles/test-role/acme/directory") + require.Error(t, err, "failed to forbid usage of test-role under int-ca issuer") + _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/root-ca/roles/test-role/acme/directory") + require.Error(t, err, "failed to forbid usage of test-role under root-ca issuer") + + _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/root-ca/roles/acme/acme/directory") + require.Error(t, err, "failed to forbid usage of acme under root-ca issuer") + + _, err = client.Logical().ReadWithContext(testCtx, "pki/roles/acme/acme/directory") + require.NoError(t, err, "failed to allow usage of acme") + _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/default/roles/acme/acme/directory") + require.NoError(t, err, "failed to allow usage of acme under default issuer") + _, err = client.Logical().ReadWithContext(testCtx, "pki/issuer/int-ca/roles/acme/acme/directory") + require.NoError(t, err, "failed to allow usage of acme under int-ca issuer") + + // 3. Setting the default directory to be a sign-verbatim policy and + // using two different CAs should result in certs signed by each CA. + resp, err = client.Logical().WriteWithContext(testCtx, "pki/config/acme", map[string]interface{}{ + "allowed_roles": []string{"*"}, + "allowed_issuers": []string{"*"}, + "default_directory_policy": "sign-verbatim", + }) + require.NoError(t, err, "failed to write config") + require.NotNil(t, resp) + + // default == int-ca + acmeClientDefault := getAcmeClientForCluster(t, cluster, "/v1/pki/issuer/default/acme/", nil) + defaultLeafCert := doACMEForDomainWithDNS(t, dns, acmeClientDefault, []string{"default-ca.dadgarcorp.com"}) + requireSignedByAtPath(t, client, defaultLeafCert, "pki/issuer/int-ca") + + acmeClientIntCA := getAcmeClientForCluster(t, cluster, "/v1/pki/issuer/int-ca/acme/", nil) + intCALeafCert := doACMEForDomainWithDNS(t, dns, acmeClientIntCA, []string{"int-ca.dadgarcorp.com"}) + requireSignedByAtPath(t, client, intCALeafCert, "pki/issuer/int-ca") + + acmeClientRootCA := getAcmeClientForCluster(t, cluster, "/v1/pki/issuer/root-ca/acme/", nil) + rootCALeafCert := doACMEForDomainWithDNS(t, dns, acmeClientRootCA, []string{"root-ca.dadgarcorp.com"}) + requireSignedByAtPath(t, client, rootCALeafCert, "pki/issuer/root-ca") + + // 4. Using a role-based default directory should allow us to control leaf + // issuance on the base and issuer-specific directories. + resp, err = client.Logical().WriteWithContext(testCtx, "pki/config/acme", map[string]interface{}{ + "allowed_roles": []string{"*"}, + "allowed_issuers": []string{"*"}, + "default_directory_policy": "role:acme", + }) + require.NoError(t, err, "failed to write config") + require.NotNil(t, resp) + + resp, err = client.Logical().JSONMergePatch(testCtx, "pki/roles/acme", map[string]interface{}{ + "ou": "IT Security", + "organization": []string{"Dadgar Corporation, Limited"}, + "allow_any_name": true, + }) + require.NoError(t, err, "failed to write role differentiator") + require.NotNil(t, resp) + + for _, issuer := range []string{"", "default", "int-ca", "root-ca"} { + // Path should override role. + directory := "/v1/pki/issuer/" + issuer + "/acme/" + issuerPath := "/pki/issuer/" + issuer + if issuer == "" { + directory = "/v1/pki/acme/" + issuerPath = "/pki/issuer/int-ca" + } else if issuer == "default" { + issuerPath = "/pki/issuer/int-ca" + } + + t.Logf("using directory: %v / issuer: %v", directory, issuerPath) + + acmeClient := getAcmeClientForCluster(t, cluster, directory, nil) + leafCert := doACMEForDomainWithDNS(t, dns, acmeClient, []string{"role-restricted.dadgarcorp.com"}) + require.Contains(t, leafCert.Subject.Organization, "Dadgar Corporation, Limited", "on directory: %v", directory) + require.Contains(t, leafCert.Subject.OrganizationalUnit, "IT Security", "on directory: %v", directory) + requireSignedByAtPath(t, client, leafCert, issuerPath) + } +} + +func TestACMESubjectFieldsAndExtensionsIgnored(t *testing.T) { + t.Parallel() + + // This creates two issuers for us (root-ca, int-ca) and two + // roles (test-role, acme) that we can use with various directory + // configurations. + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + + // Setup DNS for validations. + testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + dns := dnstest.SetupResolver(t, "dadgarcorp.com") + defer dns.Cleanup() + _, err := client.Logical().WriteWithContext(testCtx, "pki/config/acme", map[string]interface{}{ + "dns_resolver": dns.GetLocalAddr(), + }) + require.NoError(t, err, "failed to specify dns resolver") + + // Use the default sign-verbatim policy and ensure OU does not get set. + directory := "/v1/pki/acme/" + domains := []string{"no-ou.dadgarcorp.com"} + acmeClient := getAcmeClientForCluster(t, cluster, directory, nil) + cr := &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: domains[0], OrganizationalUnit: []string{"DadgarCorp IT"}}, + DNSNames: domains, + } + cert := doACMEForCSRWithDNS(t, dns, acmeClient, domains, cr) + t.Logf("Got certificate: %v", cert) + require.Empty(t, cert.Subject.OrganizationalUnit) + + // Use the default sign-verbatim policy and ensure extension does not get set. + domains = []string{"no-ext.dadgarcorp.com"} + extension, err := certutil.CreateDeltaCRLIndicatorExt(12345) + require.NoError(t, err) + cr = &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: domains[0]}, + DNSNames: domains, + ExtraExtensions: []pkix.Extension{extension}, + } + cert = doACMEForCSRWithDNS(t, dns, acmeClient, domains, cr) + t.Logf("Got certificate: %v", cert) + for _, ext := range cert.Extensions { + require.False(t, ext.Id.Equal(certutil.DeltaCRLIndicatorOID)) + } + require.NotEmpty(t, cert.Extensions) +} + +// TestAcmeWithCsrIncludingBasicConstraintExtension verify that we error out for a CSR that is requesting a +// certificate with the IsCA set to true, false is okay, within the basic constraints extension and that no matter what +// the extension is not present on the returned certificate. +func TestAcmeWithCsrIncludingBasicConstraintExtension(t *testing.T) { + t.Parallel() + + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + + testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + baseAcmeURL := "/v1/pki/acme/" + accountKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa key") + + acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) + + // Create new account + t.Logf("Testing register on %s", baseAcmeURL) + acct, err := acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true }) + require.NoError(t, err, "failed registering account") + + // Create an order + t.Logf("Testing Authorize Order on %s", baseAcmeURL) + identifiers := []string{"*.localdomain"} + order, err := acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{ + {Type: "dns", Value: identifiers[0]}, + }) + require.NoError(t, err, "failed creating order") + + // HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow test. + markAuthorizationSuccess(t, client, acmeClient, acct, order) + + // Build a CSR with IsCA set to true, making sure we reject it + extension, err := certutil.CreateBasicConstraintExtension(true, -1) + require.NoError(t, err, "failed generating basic constraint extension") + + isCATrueCSR := &x509.CertificateRequest{ + DNSNames: []string{identifiers[0]}, + ExtraExtensions: []pkix.Extension{extension}, + } + csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generated key for CSR") + csr, err := x509.CreateCertificateRequest(rand.Reader, isCATrueCSR, csrKey) + require.NoError(t, err, "failed generating csr") + + _, _, err = acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, true) + require.Error(t, err, "order finalization should have failed with IsCA set to true") + + extension, err = certutil.CreateBasicConstraintExtension(false, -1) + require.NoError(t, err, "failed generating basic constraint extension") + isCAFalseCSR := &x509.CertificateRequest{ + DNSNames: []string{identifiers[0]}, + Extensions: []pkix.Extension{extension}, + } + + csr, err = x509.CreateCertificateRequest(rand.Reader, isCAFalseCSR, csrKey) + require.NoError(t, err, "failed generating csr") + + certs, _, err := acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, true) + require.NoError(t, err, "order finalization should have failed with IsCA set to false") + + require.GreaterOrEqual(t, len(certs), 1, "expected at least one cert in bundle") + acmeCert, err := x509.ParseCertificate(certs[0]) + require.NoError(t, err, "failed parsing acme cert") + + // Make sure we don't have any basic constraint extension within the returned cert + for _, ext := range acmeCert.Extensions { + if ext.Id.Equal(certutil.ExtensionBasicConstraintsOID) { + // We shouldn't have this extension in our cert + t.Fatalf("acme csr contained a basic constraints extension") + } + } +} + +func markAuthorizationSuccess(t *testing.T, client *api.Client, acmeClient *acme.Client, acct *acme.Account, order *acme.Order) { + testCtx := context.Background() + + pkiMount := findStorageMountUuid(t, client, "pki") + + // Delete any and all challenge validation entries to stop the engine from overwriting our hack here + i := 0 + for { + deleteCvEntries(t, client, pkiMount) + + accountId := acct.URI[strings.LastIndex(acct.URI, "/"):] + for _, authURI := range order.AuthzURLs { + authId := authURI[strings.LastIndex(authURI, "/"):] + + // sys/raw does not work with namespaces + baseClient := client.WithNamespace("") + + values, err := baseClient.Logical().ListWithContext(testCtx, "sys/raw/logical/") + require.NoError(t, err) + require.True(t, true, "values: %v", values) + + rawPath := path.Join("sys/raw/logical/", pkiMount, getAuthorizationPath(accountId, authId)) + resp, err := baseClient.Logical().ReadWithContext(testCtx, rawPath) + require.NoError(t, err, "failed looking up authorization storage") + require.NotNil(t, resp, "sys raw response was nil") + require.NotEmpty(t, resp.Data["value"], "no value field in sys raw response") + + var authz ACMEAuthorization + err = jsonutil.DecodeJSON([]byte(resp.Data["value"].(string)), &authz) + require.NoError(t, err, "error decoding authorization: %w", err) + authz.Status = ACMEAuthorizationValid + for _, challenge := range authz.Challenges { + challenge.Status = ACMEChallengeValid + } + + encodeJSON, err := jsonutil.EncodeJSON(authz) + require.NoError(t, err, "failed encoding authz json") + _, err = baseClient.Logical().WriteWithContext(testCtx, rawPath, map[string]interface{}{ + "value": base64.StdEncoding.EncodeToString(encodeJSON), + "encoding": "base64", + }) + require.NoError(t, err, "failed writing authorization storage") + } + + // Give some time + time.Sleep(200 * time.Millisecond) + + // Check to see if we have fixed up the status and no new entries have appeared. + if !deleteCvEntries(t, client, pkiMount) { + // No entries found + // Look to see if we raced against the engine + orderLookup, err := acmeClient.GetOrder(testCtx, order.URI) + require.NoError(t, err, "failed loading order status after manually ") + + if orderLookup.Status == string(ACMEOrderReady) { + // Our order seems to be in the proper status, should be safe-ish to go ahead now + break + } else { + t.Logf("order status was not ready, retrying") + } + } else { + t.Logf("new challenge entries appeared after deletion, retrying") + } + + if i > 5 { + t.Fatalf("We are constantly deleting cv entries or order status is not changing, something is wrong") + } + + i++ + } +} + +func deleteCvEntries(t *testing.T, client *api.Client, pkiMount string) bool { + testCtx := context.Background() + + baseClient := client.WithNamespace("") + + cvPath := path.Join("sys/raw/logical/", pkiMount, acmeValidationPrefix) + resp, err := baseClient.Logical().ListWithContext(testCtx, cvPath) + require.NoError(t, err, "failed listing cv path items") + + deletedEntries := false + if resp != nil { + cvEntries := resp.Data["keys"].([]interface{}) + for _, cvEntry := range cvEntries { + cvEntryPath := path.Join(cvPath, cvEntry.(string)) + _, err = baseClient.Logical().DeleteWithContext(testCtx, cvEntryPath) + require.NoError(t, err, "failed to delete cv entry") + deletedEntries = true + } + } + + return deletedEntries +} + +func setupAcmeBackend(t *testing.T) (*vault.TestCluster, *api.Client, string) { + cluster, client := setupTestPkiCluster(t) + + return setupAcmeBackendOnClusterAtPath(t, cluster, client, "pki") +} + +func setupAcmeBackendOnClusterAtPath(t *testing.T, cluster *vault.TestCluster, client *api.Client, mount string) (*vault.TestCluster, *api.Client, string) { + mount = strings.Trim(mount, "/") + + // Setting templated AIAs should succeed. + pathConfig := client.Address() + "/v1/" + mount + + namespace := "" + mountName := mount + if mount != "pki" { + if strings.Contains(mount, "/") && constants.IsEnterprise { + ns_pieces := strings.Split(mount, "/") + c := len(ns_pieces) + // mount is c-1 + ns_name := ns_pieces[c-2] + if len(ns_pieces) > 2 { + // Parent's namespaces + parent := strings.Join(ns_pieces[0:c-2], "/") + _, err := client.WithNamespace(parent).Logical().Write("/sys/namespaces/"+ns_name, nil) + require.NoError(t, err, "failed to create nested namespaces "+parent+" -> "+ns_name) + } else { + _, err := client.Logical().Write("/sys/namespaces/"+ns_name, nil) + require.NoError(t, err, "failed to create nested namespace "+ns_name) + } + namespace = strings.Join(ns_pieces[0:c-1], "/") + mountName = ns_pieces[c-1] + } + + err := client.WithNamespace(namespace).Sys().Mount(mountName, &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "3000h", + MaxLeaseTTL: "600000h", + }, + }) + require.NoError(t, err, "failed to mount new PKI instance at "+mount) + } + + err := client.Sys().TuneMountWithContext(ctx, mount, api.MountConfigInput{ + DefaultLeaseTTL: "3000h", + MaxLeaseTTL: "600000h", + }) + require.NoError(t, err, "failed updating mount lease times "+mount) + + _, err = client.Logical().WriteWithContext(context.Background(), mount+"/config/cluster", map[string]interface{}{ + "path": pathConfig, + "aia_path": "http://localhost:8200/cdn/" + mount, + }) + require.NoError(t, err) + + _, err = client.Logical().WriteWithContext(context.Background(), mount+"/config/acme", map[string]interface{}{ + "enabled": true, + "eab_policy": "not-required", + }) + require.NoError(t, err) + + // Allow certain headers to pass through for ACME support + _, err = client.WithNamespace(namespace).Logical().WriteWithContext(context.Background(), "sys/mounts/"+mountName+"/tune", map[string]interface{}{ + "allowed_response_headers": []string{"Last-Modified", "Replay-Nonce", "Link", "Location"}, + "max_lease_ttl": "920000h", + }) + require.NoError(t, err, "failed tuning mount response headers") + + resp, err := client.Logical().WriteWithContext(context.Background(), mount+"/issuers/generate/root/internal", + map[string]interface{}{ + "issuer_name": "root-ca", + "key_name": "root-key", + "key_type": "ec", + "common_name": "Test Root R1 " + mount, + "ttl": "7200h", + "max_ttl": "920000h", + }) + require.NoError(t, err, "failed creating root CA") + + resp, err = client.Logical().WriteWithContext(context.Background(), mount+"/issuers/generate/intermediate/internal", + map[string]interface{}{ + "key_name": "int-key", + "key_type": "ec", + "common_name": "Test Int X1 " + mount, + }) + require.NoError(t, err, "failed creating intermediary CSR") + intermediateCSR := resp.Data["csr"].(string) + + // Sign the intermediate CSR using /pki + resp, err = client.Logical().Write(mount+"/issuer/root-ca/sign-intermediate", map[string]interface{}{ + "csr": intermediateCSR, + "ttl": "7100h", + "max_ttl": "910000h", + }) + require.NoError(t, err, "failed signing intermediary CSR") + intermediateCertPEM := resp.Data["certificate"].(string) + + // Configure the intermediate cert as the CA in /pki2 + resp, err = client.Logical().Write(mount+"/issuers/import/cert", map[string]interface{}{ + "pem_bundle": intermediateCertPEM, + }) + require.NoError(t, err, "failed importing intermediary cert") + importedIssuersRaw := resp.Data["imported_issuers"].([]interface{}) + require.Len(t, importedIssuersRaw, 1) + intCaUuid := importedIssuersRaw[0].(string) + + _, err = client.Logical().Write(mount+"/issuer/"+intCaUuid, map[string]interface{}{ + "issuer_name": "int-ca", + }) + require.NoError(t, err, "failed updating issuer name") + + _, err = client.Logical().Write(mount+"/config/issuers", map[string]interface{}{ + "default": "int-ca", + }) + require.NoError(t, err, "failed updating default issuer") + + _, err = client.Logical().Write(mount+"/roles/test-role", map[string]interface{}{ + "ttl": "168h", + "max_ttl": "168h", + "key_type": "any", + "allowed_domains": "localdomain", + "allow_subdomains": "true", + "allow_wildcard_certificates": "true", + }) + require.NoError(t, err, "failed creating role test-role") + + _, err = client.Logical().Write(mount+"/roles/acme", map[string]interface{}{ + "ttl": "3650h", + "max_ttl": "7200h", + "key_type": "any", + }) + require.NoError(t, err, "failed creating role acme") + + return cluster, client, pathConfig +} + +func testAcmeCertSignedByCa(t *testing.T, client *api.Client, derCerts [][]byte, issuerRef string) { + t.Helper() + require.NotEmpty(t, derCerts) + acmeCert, err := x509.ParseCertificate(derCerts[0]) + require.NoError(t, err, "failed parsing acme cert bytes") + + resp, err := client.Logical().ReadWithContext(context.Background(), "pki/issuer/"+issuerRef) + require.NoError(t, err, "failed reading issuer with name %s", issuerRef) + issuerCert := parseCert(t, resp.Data["certificate"].(string)) + issuerChainRaw := resp.Data["ca_chain"].([]interface{}) + + err = acmeCert.CheckSignatureFrom(issuerCert) + require.NoError(t, err, "issuer %s did not sign provided cert", issuerRef) + + expectedCerts := [][]byte{derCerts[0]} + + for _, entry := range issuerChainRaw { + chainCert := parseCert(t, entry.(string)) + expectedCerts = append(expectedCerts, chainCert.Raw) + } + + if diffs := deep.Equal(expectedCerts, derCerts); diffs != nil { + t.Fatalf("diffs were found between the acme chain returned and the expected value: \n%v", diffs) + } +} + +// TestAcmeValidationError make sure that we properly return errors on validation errors. +func TestAcmeValidationError(t *testing.T) { + t.Parallel() + cluster, _, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + + testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + baseAcmeURL := "/v1/pki/acme/" + accountKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa key") + + acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) + + // Create new account + t.Logf("Testing register on %s", baseAcmeURL) + _, err = acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true }) + require.NoError(t, err, "failed registering account") + + // Create an order + t.Logf("Testing Authorize Order on %s", baseAcmeURL) + identifiers := []string{"www.dadgarcorp.com"} + order, err := acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{ + {Type: "dns", Value: identifiers[0]}, + }) + require.NoError(t, err, "failed creating order") + + // Load authorizations + var authorizations []*acme.Authorization + for _, authUrl := range order.AuthzURLs { + auth, err := acmeClient.GetAuthorization(testCtx, authUrl) + require.NoError(t, err, "failed fetching authorization: %s", authUrl) + + authorizations = append(authorizations, auth) + } + require.Len(t, authorizations, 1, "expected a certain number of authorizations") + require.Len(t, authorizations[0].Challenges, 3, "expected a certain number of challenges associated with authorization") + + acceptedAuth, err := acmeClient.Accept(testCtx, authorizations[0].Challenges[0]) + require.NoError(t, err, "Should have been allowed to accept challenge 1") + require.Equal(t, string(ACMEChallengeProcessing), acceptedAuth.Status) + + _, err = acmeClient.Accept(testCtx, authorizations[0].Challenges[1]) + require.Error(t, err, "Should have been prevented to accept challenge 2") + + // Make sure our challenge returns errors + testhelpers.RetryUntil(t, 30*time.Second, func() error { + challenge, err := acmeClient.GetChallenge(testCtx, authorizations[0].Challenges[0].URI) + if err != nil { + return err + } + + if challenge.Error == nil { + return fmt.Errorf("no error set in challenge yet") + } + + acmeError, ok := challenge.Error.(*acme.Error) + if !ok { + return fmt.Errorf("unexpected error back: %v", err) + } + + if acmeError.ProblemType != "urn:ietf:params:acme:error:incorrectResponse" { + return fmt.Errorf("unexpected ACME error back: %v", acmeError) + } + + return nil + }) + + // Make sure our challenge,auth and order status change. + // This takes a little too long to run in CI properly, we need the ability to influence + // how long the validations take before CI can go wild on this. + if os.Getenv("CI") == "" { + testhelpers.RetryUntil(t, 10*time.Minute, func() error { + challenge, err := acmeClient.GetChallenge(testCtx, authorizations[0].Challenges[0].URI) + if err != nil { + return fmt.Errorf("failed to load challenge: %w", err) + } + + if challenge.Status != string(ACMEChallengeInvalid) { + return fmt.Errorf("challenge state was not changed to invalid: %v", challenge) + } + + authz, err := acmeClient.GetAuthorization(testCtx, authorizations[0].URI) + if err != nil { + return fmt.Errorf("failed to load authorization: %w", err) + } + + if authz.Status != string(ACMEAuthorizationInvalid) { + return fmt.Errorf("authz state was not changed to invalid: %v", authz) + } + + myOrder, err := acmeClient.GetOrder(testCtx, order.URI) + if err != nil { + return fmt.Errorf("failed to load order: %w", err) + } + + if myOrder.Status != string(ACMEOrderInvalid) { + return fmt.Errorf("order state was not changed to invalid: %v", order) + } + + return nil + }) + } +} + +// TestAcmeRevocationAcrossAccounts makes sure that we can revoke certificates using different accounts if +// we have another ACME account or not but access to the certificate key. Also verifies we can't revoke +// certificates across account keys. +func TestAcmeRevocationAcrossAccounts(t *testing.T) { + t.Parallel() + + cluster, vaultClient, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + baseAcmeURL := "/v1/pki/acme/" + accountKey1, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa key") + + acmeClient1 := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey1) + + leafKey, certs := doACMEWorkflow(t, vaultClient, acmeClient1) + acmeCert, err := x509.ParseCertificate(certs[0]) + require.NoError(t, err, "failed parsing acme cert bytes") + + // Make sure our cert is not revoked + certResp, err := vaultClient.Logical().ReadWithContext(ctx, "pki/cert/"+serialFromCert(acmeCert)) + require.NoError(t, err, "failed to read certificate status") + require.NotNil(t, certResp, "certificate status response was nil") + revocationTime := certResp.Data["revocation_time"].(json.Number) + revocationTimeInt, err := revocationTime.Int64() + require.NoError(t, err, "failed converting revocation_time value: %v", revocationTime) + require.Equal(t, revocationTimeInt, int64(0), + "revocation time was not 0, cert was already revoked: %v", revocationTimeInt) + + // Test that we can't revoke the certificate with another account's key + accountKey2, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader) + require.NoError(t, err, "failed creating rsa key") + + acmeClient2 := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey2) + _, err = acmeClient2.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true }) + require.NoError(t, err, "failed registering second account") + + err = acmeClient2.RevokeCert(ctx, nil, certs[0], acme.CRLReasonUnspecified) + require.Error(t, err, "should have failed revoking the certificate with a different account") + + // Make sure our cert is not revoked + certResp, err = vaultClient.Logical().ReadWithContext(ctx, "pki/cert/"+serialFromCert(acmeCert)) + require.NoError(t, err, "failed to read certificate status") + require.NotNil(t, certResp, "certificate status response was nil") + revocationTime = certResp.Data["revocation_time"].(json.Number) + revocationTimeInt, err = revocationTime.Int64() + require.NoError(t, err, "failed converting revocation_time value: %v", revocationTime) + require.Equal(t, revocationTimeInt, int64(0), + "revocation time was not 0, cert was already revoked: %v", revocationTimeInt) + + // But we can revoke if we sign the request with the certificate's key and a different account + err = acmeClient2.RevokeCert(ctx, leafKey, certs[0], acme.CRLReasonUnspecified) + require.NoError(t, err, "should have been allowed to revoke certificate with csr key across accounts") + + // Make sure our cert is now revoked + certResp, err = vaultClient.Logical().ReadWithContext(ctx, "pki/cert/"+serialFromCert(acmeCert)) + require.NoError(t, err, "failed to read certificate status") + require.NotNil(t, certResp, "certificate status response was nil") + revocationTime = certResp.Data["revocation_time"].(json.Number) + revocationTimeInt, err = revocationTime.Int64() + require.NoError(t, err, "failed converting revocation_time value: %v", revocationTime) + require.Greater(t, revocationTimeInt, int64(0), + "revocation time was not greater than 0, cert was not revoked: %v", revocationTimeInt) + + // Make sure we can revoke a certificate without a registered ACME account + leafKey2, certs2 := doACMEWorkflow(t, vaultClient, acmeClient1) + + acmeClient3 := getAcmeClientForCluster(t, cluster, baseAcmeURL, nil) + err = acmeClient3.RevokeCert(ctx, leafKey2, certs2[0], acme.CRLReasonUnspecified) + require.NoError(t, err, "should be allowed to revoke a cert with no ACME account but with cert key") + + // Make sure our cert is now revoked + acmeCert2, err := x509.ParseCertificate(certs2[0]) + require.NoError(t, err, "failed parsing acme cert 2 bytes") + + certResp, err = vaultClient.Logical().ReadWithContext(ctx, "pki/cert/"+serialFromCert(acmeCert2)) + require.NoError(t, err, "failed to read certificate status") + require.NotNil(t, certResp, "certificate status response was nil") + revocationTime = certResp.Data["revocation_time"].(json.Number) + revocationTimeInt, err = revocationTime.Int64() + require.NoError(t, err, "failed converting revocation_time value: %v", revocationTime) + require.Greater(t, revocationTimeInt, int64(0), + "revocation time was not greater than 0, cert was not revoked: %v", revocationTimeInt) +} + +func doACMEWorkflow(t *testing.T, vaultClient *api.Client, acmeClient *acme.Client) (*ecdsa.PrivateKey, [][]byte) { + testCtx := context.Background() + + // Create new account + acct, err := acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true }) + if err != nil { + if strings.Contains(err.Error(), "acme: account already exists") { + acct, err = acmeClient.GetReg(testCtx, "") + require.NoError(t, err, "failed looking up account after account exists error?") + } else { + require.NoError(t, err, "failed registering account") + } + } + + // Create an order + identifiers := []string{"*.localdomain"} + order, err := acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{ + {Type: "dns", Value: identifiers[0]}, + }) + require.NoError(t, err, "failed creating order") + + // HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow + // test. + markAuthorizationSuccess(t, vaultClient, acmeClient, acct, order) + + // Build a proper CSR, with the correct name and signed with a different key works. + goodCr := &x509.CertificateRequest{DNSNames: []string{identifiers[0]}} + csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generated key for CSR") + csr, err := x509.CreateCertificateRequest(rand.Reader, goodCr, csrKey) + require.NoError(t, err, "failed generating csr") + + certs, _, err := acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, true) + require.NoError(t, err, "failed finalizing order") + require.Len(t, certs, 3, "expected full acme chain") + + return csrKey, certs +} + +func setupTestPkiCluster(t *testing.T) (*vault.TestCluster, *api.Client) { + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "pki": Factory, + }, + EnableRaw: true, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + client := cluster.Cores[0].Client + mountPKIEndpoint(t, client, "pki") + return cluster, client +} + +func getAcmeClientForCluster(t *testing.T, cluster *vault.TestCluster, baseUrl string, key crypto.Signer) *acme.Client { + coreAddr := cluster.Cores[0].Listeners[0].Address + tlsConfig := cluster.Cores[0].TLSConfig() + + transport := cleanhttp.DefaultPooledTransport() + transport.TLSClientConfig = tlsConfig.Clone() + if err := http2.ConfigureTransport(transport); err != nil { + t.Fatal(err) + } + httpClient := &http.Client{Transport: transport} + if baseUrl[0] == '/' { + baseUrl = baseUrl[1:] + } + if !strings.HasPrefix(baseUrl, "v1/") { + baseUrl = "v1/" + baseUrl + } + if !strings.HasSuffix(baseUrl, "/") { + baseUrl = baseUrl + "/" + } + baseAcmeURL := fmt.Sprintf("https://%s/%s", coreAddr.String(), baseUrl) + return &acme.Client{ + Key: key, + HTTPClient: httpClient, + DirectoryURL: baseAcmeURL + "directory", + } +} + +func getEABKey(t *testing.T, client *api.Client, baseUrl string) (string, []byte) { + resp, err := client.Logical().WriteWithContext(ctx, path.Join("pki/", baseUrl, "/new-eab"), map[string]interface{}{}) + require.NoError(t, err, "failed getting eab key") + require.NotNil(t, resp, "eab key returned nil response") + require.NotEmpty(t, resp.Data["id"], "eab key response missing id field") + kid := resp.Data["id"].(string) + + require.NotEmpty(t, resp.Data["key"], "eab key response missing private_key field") + base64Key := resp.Data["key"].(string) + require.True(t, strings.HasPrefix(base64Key, "vault-eab-0-"), "%s should have had a prefix of vault-eab-0-", base64Key) + privateKeyBytes, err := base64.RawURLEncoding.DecodeString(base64Key) + require.NoError(t, err, "failed base 64 decoding eab key response") + + require.Equal(t, "hs", resp.Data["key_type"], "eab key_type field mis-match") + require.Equal(t, baseUrl+"directory", resp.Data["acme_directory"], "eab acme_directory field mis-match") + require.NotEmpty(t, resp.Data["created_on"], "empty created_on field") + _, err = time.Parse(time.RFC3339, resp.Data["created_on"].(string)) + require.NoError(t, err, "failed parsing eab created_on field") + + return kid, privateKeyBytes +} + +func TestACMEClientRequestLimits(t *testing.T) { + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + + cases := []struct { + name string + authorizations []acme.AuthzID + requestCSR x509.CertificateRequest + valid bool + }{ + { + "validate-only-cn", + []acme.AuthzID{ + {"dns", "localhost"}, + }, + x509.CertificateRequest{ + Subject: pkix.Name{CommonName: "localhost"}, + }, + true, + }, + { + "validate-only-san", + []acme.AuthzID{ + {"dns", "localhost"}, + }, + x509.CertificateRequest{ + DNSNames: []string{"localhost"}, + }, + true, + }, + { + "validate-only-ip-address", + []acme.AuthzID{ + {"ip", "127.0.0.1"}, + }, + x509.CertificateRequest{ + IPAddresses: []net.IP{{127, 0, 0, 1}}, + }, + true, + }, + } + + testCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + acmeConfig := map[string]interface{}{ + "enabled": true, + "allowed_issuers": "*", + "allowed_roles": "*", + "default_directory_policy": "sign-verbatim", + "dns_resolver": "", + "eab_policy_name": "", + } + _, err := client.Logical().WriteWithContext(testCtx, "pki/config/acme", acmeConfig) + require.NoError(t, err, "error configuring acme") + + for _, tc := range cases { + + // First Create Our Client + accountKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa key") + acmeClient := getAcmeClientForCluster(t, cluster, "/v1/pki/acme/", accountKey) + + discovery, err := acmeClient.Discover(testCtx) + require.NoError(t, err, "failed acme discovery call") + t.Logf("%v", discovery) + + acct, err := acmeClient.Register(testCtx, &acme.Account{ + Contact: []string{"mailto:test@example.com"}, + }, func(tosURL string) bool { return true }) + require.NoError(t, err, "failed registering account") + require.Equal(t, acme.StatusValid, acct.Status) + require.Contains(t, acct.Contact, "mailto:test@example.com") + require.Len(t, acct.Contact, 1) + + // Create an order + t.Logf("Testing Authorize Order on %s", "pki/acme") + identifiers := make([]string, len(tc.authorizations)) + for index, auth := range tc.authorizations { + identifiers[index] = auth.Value + } + + createOrder, err := acmeClient.AuthorizeOrder(testCtx, tc.authorizations) + require.NoError(t, err, "failed creating order") + require.Equal(t, acme.StatusPending, createOrder.Status) + require.Empty(t, createOrder.CertURL) + require.Equal(t, createOrder.URI+"/finalize", createOrder.FinalizeURL) + require.Len(t, createOrder.AuthzURLs, len(tc.authorizations), "expected same number of authzurls as identifiers") + + // HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow + // test. + markAuthorizationSuccess(t, client, acmeClient, acct, createOrder) + + // Submit the CSR + csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generated key for CSR") + csr, err := x509.CreateCertificateRequest(rand.Reader, &tc.requestCSR, csrKey) + require.NoError(t, err, "failed generating csr") + + certs, _, err := acmeClient.CreateOrderCert(testCtx, createOrder.FinalizeURL, csr, true) + + if tc.valid { + require.NoError(t, err, "failed finalizing order") + + // Validate we get a signed cert back + testAcmeCertSignedByCa(t, client, certs, "int-ca") + } else { + require.Error(t, err, "Not a valid CSR, should err") + } + } +} diff --git a/builtin/logical/pki/path_config_acme.go b/builtin/logical/pki/path_config_acme.go new file mode 100644 index 0000000..c3fa539 --- /dev/null +++ b/builtin/logical/pki/path_config_acme.go @@ -0,0 +1,387 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "context" + "fmt" + "net" + "os" + "strconv" + "strings" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + storageAcmeConfig = "config/acme" + pathConfigAcmeHelpSyn = "Configuration of ACME Endpoints" + pathConfigAcmeHelpDesc = "Here we configure:\n\nenabled=false, whether ACME is enabled, defaults to false meaning that clusters will by default not get ACME support,\nallowed_issuers=\"default\", which issuers are allowed for use with ACME; by default, this will only be the primary (default) issuer,\nallowed_roles=\"*\", which roles are allowed for use with ACME; by default these will be all roles matching our selection criteria,\ndefault_directory_policy=\"\", either \"forbid\", preventing the default directory from being used at all, \"role:\" which is the role to be used for non-role-qualified ACME requests; or \"sign-verbatim\", the default meaning ACME issuance will be equivalent to sign-verbatim.,\ndns_resolver=\"\", which specifies a custom DNS resolver to use for all ACME-related DNS lookups" + disableAcmeEnvVar = "VAULT_DISABLE_PUBLIC_ACME" +) + +type acmeConfigEntry struct { + Enabled bool `json:"enabled"` + AllowedIssuers []string `json:"allowed_issuers="` + AllowedRoles []string `json:"allowed_roles"` + AllowRoleExtKeyUsage bool `json:"allow_role_ext_key_usage"` + DefaultDirectoryPolicy string `json:"default_directory_policy"` + DNSResolver string `json:"dns_resolver"` + EabPolicyName EabPolicyName `json:"eab_policy_name"` +} + +var defaultAcmeConfig = acmeConfigEntry{ + Enabled: false, + AllowedIssuers: []string{"*"}, + AllowedRoles: []string{"*"}, + AllowRoleExtKeyUsage: false, + DefaultDirectoryPolicy: "sign-verbatim", + DNSResolver: "", + EabPolicyName: eabPolicyNotRequired, +} + +func (sc *storageContext) getAcmeConfig() (*acmeConfigEntry, error) { + entry, err := sc.Storage.Get(sc.Context, storageAcmeConfig) + if err != nil { + return nil, err + } + + var mapping acmeConfigEntry + if entry == nil { + mapping = defaultAcmeConfig + return &mapping, nil + } + + if err := entry.DecodeJSON(&mapping); err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to decode ACME configuration: %v", err)} + } + + return &mapping, nil +} + +func (sc *storageContext) setAcmeConfig(entry *acmeConfigEntry) error { + json, err := logical.StorageEntryJSON(storageAcmeConfig, entry) + if err != nil { + return fmt.Errorf("failed creating storage entry: %w", err) + } + + if err := sc.Storage.Put(sc.Context, json); err != nil { + return fmt.Errorf("failed writing storage entry: %w", err) + } + + sc.Backend.acmeState.markConfigDirty() + return nil +} + +func pathAcmeConfig(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "config/acme", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + }, + + Fields: map[string]*framework.FieldSchema{ + "enabled": { + Type: framework.TypeBool, + Description: `whether ACME is enabled, defaults to false meaning that clusters will by default not get ACME support`, + Default: false, + }, + "allowed_issuers": { + Type: framework.TypeCommaStringSlice, + Description: `which issuers are allowed for use with ACME; by default, this will only be the primary (default) issuer`, + Default: []string{"*"}, + }, + "allowed_roles": { + Type: framework.TypeCommaStringSlice, + Description: `which roles are allowed for use with ACME; by default via '*', these will be all roles including sign-verbatim; when concrete role names are specified, any default_directory_policy role must be included to allow usage of the default acme directories under /pki/acme/directory and /pki/issuer/:issuer_id/acme/directory.`, + Default: []string{"*"}, + }, + "allow_role_ext_key_usage": { + Type: framework.TypeBool, + Description: `whether the ExtKeyUsage field from a role is used, defaults to false meaning that certificate will be signed with ServerAuth.`, + Default: false, + }, + "default_directory_policy": { + Type: framework.TypeString, + Description: `the policy to be used for non-role-qualified ACME requests; by default ACME issuance will be otherwise unrestricted, equivalent to the sign-verbatim endpoint; one may also specify a role to use as this policy, as "role:", the specified role must be allowed by allowed_roles`, + Default: "sign-verbatim", + }, + "dns_resolver": { + Type: framework.TypeString, + Description: `DNS resolver to use for domain resolution on this mount. Defaults to using the default system resolver. Must be in the format :, with both parts mandatory.`, + Default: "", + }, + "eab_policy": { + Type: framework.TypeString, + Description: `Specify the policy to use for external account binding behaviour, 'not-required', 'new-account-required' or 'always-required'`, + Default: "always-required", + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "acme-configuration", + }, + Callback: b.pathAcmeRead, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathAcmeWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "acme", + }, + // Read more about why these flags are set in backend.go. + ForwardPerformanceStandby: true, + ForwardPerformanceSecondary: true, + }, + }, + + HelpSynopsis: pathConfigAcmeHelpSyn, + HelpDescription: pathConfigAcmeHelpDesc, + } +} + +func (b *backend) pathAcmeRead(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { + sc := b.makeStorageContext(ctx, req.Storage) + config, err := sc.getAcmeConfig() + if err != nil { + return nil, err + } + + var warnings []string + if config.Enabled { + _, err := getBasePathFromClusterConfig(sc) + if err != nil { + warnings = append(warnings, err.Error()) + } + } + + return genResponseFromAcmeConfig(config, warnings), nil +} + +func genResponseFromAcmeConfig(config *acmeConfigEntry, warnings []string) *logical.Response { + response := &logical.Response{ + Data: map[string]interface{}{ + "allowed_roles": config.AllowedRoles, + "allow_role_ext_key_usage": config.AllowRoleExtKeyUsage, + "allowed_issuers": config.AllowedIssuers, + "default_directory_policy": config.DefaultDirectoryPolicy, + "enabled": config.Enabled, + "dns_resolver": config.DNSResolver, + "eab_policy": config.EabPolicyName, + }, + Warnings: warnings, + } + + // TODO: Add some nice warning if we are on a replication cluster and path isn't set + + return response +} + +func (b *backend) pathAcmeWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + sc := b.makeStorageContext(ctx, req.Storage) + + config, err := sc.getAcmeConfig() + if err != nil { + return nil, err + } + + if enabledRaw, ok := d.GetOk("enabled"); ok { + config.Enabled = enabledRaw.(bool) + } + + if allowedRolesRaw, ok := d.GetOk("allowed_roles"); ok { + config.AllowedRoles = allowedRolesRaw.([]string) + if len(config.AllowedRoles) == 0 { + return nil, fmt.Errorf("allowed_roles must take a non-zero length value; specify '*' as the value to allow anything or specify enabled=false to disable ACME entirely") + } + } + + if allowRoleExtKeyUsageRaw, ok := d.GetOk("allow_role_ext_key_usage"); ok { + config.AllowRoleExtKeyUsage = allowRoleExtKeyUsageRaw.(bool) + } + + if defaultDirectoryPolicyRaw, ok := d.GetOk("default_directory_policy"); ok { + config.DefaultDirectoryPolicy = defaultDirectoryPolicyRaw.(string) + } + + if allowedIssuersRaw, ok := d.GetOk("allowed_issuers"); ok { + config.AllowedIssuers = allowedIssuersRaw.([]string) + if len(config.AllowedIssuers) == 0 { + return nil, fmt.Errorf("allowed_issuers must take a non-zero length value; specify '*' as the value to allow anything or specify enabled=false to disable ACME entirely") + } + } + + if dnsResolverRaw, ok := d.GetOk("dns_resolver"); ok { + config.DNSResolver = dnsResolverRaw.(string) + if config.DNSResolver != "" { + addr, _, err := net.SplitHostPort(config.DNSResolver) + if err != nil { + return nil, fmt.Errorf("failed to parse DNS resolver address: %w", err) + } + if addr == "" { + return nil, fmt.Errorf("failed to parse DNS resolver address: got empty address") + } + if net.ParseIP(addr) == nil { + return nil, fmt.Errorf("failed to parse DNS resolver address: expected IPv4/IPv6 address, likely got hostname") + } + } + } + + if eabPolicyRaw, ok := d.GetOk("eab_policy"); ok { + eabPolicy, err := getEabPolicyByString(eabPolicyRaw.(string)) + if err != nil { + return nil, fmt.Errorf("invalid eab policy name provided, valid values are '%s', '%s', '%s'", + eabPolicyNotRequired, eabPolicyNewAccountRequired, eabPolicyAlwaysRequired) + } + config.EabPolicyName = eabPolicy.Name + } + + // Validate Default Directory Behavior: + defaultDirectoryPolicyType, err := getDefaultDirectoryPolicyType(config.DefaultDirectoryPolicy) + if err != nil { + return nil, fmt.Errorf("invalid default_directory_policy: %w", err) + } + defaultDirectoryRoleName := "" + switch defaultDirectoryPolicyType { + case Forbid: + case SignVerbatim: + case Role: + defaultDirectoryRoleName, err = getDefaultDirectoryPolicyRole(config.DefaultDirectoryPolicy) + if err != nil { + return nil, fmt.Errorf("failed extracting role name from default directory policy %w", err) + } + + _, err := getAndValidateAcmeRole(sc, defaultDirectoryRoleName) + if err != nil { + return nil, fmt.Errorf("default directory policy role %v is not a valid ACME role: %w", defaultDirectoryRoleName, err) + } + default: + return nil, fmt.Errorf("validation for the type of policy defined by %v is undefined", config.DefaultDirectoryPolicy) + } + + // Validate Allowed Roles + allowAnyRole := len(config.AllowedRoles) == 1 && config.AllowedRoles[0] == "*" + foundDefault := false + if !allowAnyRole { + for index, name := range config.AllowedRoles { + if name == "*" { + return nil, fmt.Errorf("cannot use '*' as role name at index %d", index) + } + + _, err := getAndValidateAcmeRole(sc, name) + if err != nil { + return nil, fmt.Errorf("allowed_role %v is not a valid acme role: %w", name, err) + } + + if defaultDirectoryPolicyType == Role && name == defaultDirectoryRoleName { + foundDefault = true + } + } + + if !foundDefault && defaultDirectoryPolicyType == Role { + return nil, fmt.Errorf("default directory policy %v was not specified in allowed_roles: %v", config.DefaultDirectoryPolicy, config.AllowedRoles) + } + } + + allowAnyIssuer := len(config.AllowedIssuers) == 1 && config.AllowedIssuers[0] == "*" + if !allowAnyIssuer { + for index, name := range config.AllowedIssuers { + if name == "*" { + return nil, fmt.Errorf("cannot use '*' as issuer name at index %d", index) + } + + _, err := sc.resolveIssuerReference(name) + if err != nil { + return nil, fmt.Errorf("failed validating allowed_issuers: unable to fetch issuer: %v: %w", name, err) + } + } + } + + // Check to make sure that we have a proper value for the cluster path which ACME requires + if config.Enabled { + _, err = getBasePathFromClusterConfig(sc) + if err != nil { + return nil, err + } + } + + var warnings []string + // Lastly lets verify that the configuration is honored/invalidated by the public ACME env var. + isPublicAcmeDisabledByEnv, err := isPublicACMEDisabledByEnv() + if err != nil { + warnings = append(warnings, err.Error()) + } + if isPublicAcmeDisabledByEnv && config.Enabled { + eabPolicy := getEabPolicyByName(config.EabPolicyName) + if !eabPolicy.OverrideEnvDisablingPublicAcme() { + resp := logical.ErrorResponse("%s env var is enabled, ACME EAB policy needs to be '%s' with ACME enabled", + disableAcmeEnvVar, eabPolicyAlwaysRequired) + resp.Warnings = warnings + return resp, nil + } + } + + err = sc.setAcmeConfig(config) + if err != nil { + return nil, err + } + + return genResponseFromAcmeConfig(config, warnings), nil +} + +func isPublicACMEDisabledByEnv() (bool, error) { + disableAcmeRaw, ok := os.LookupEnv(disableAcmeEnvVar) + if !ok { + return false, nil + } + + disableAcme, err := strconv.ParseBool(disableAcmeRaw) + if err != nil { + // So the environment variable was set but we couldn't parse the value as a string, assume + // the operator wanted public ACME disabled. + return true, fmt.Errorf("failed parsing environment variable %s: %w", disableAcmeEnvVar, err) + } + + return disableAcme, nil +} + +func getDefaultDirectoryPolicyType(defaultDirectoryPolicy string) (DefaultDirectoryPolicyType, error) { + switch { + case defaultDirectoryPolicy == "forbid": + return Forbid, nil + case defaultDirectoryPolicy == "sign-verbatim": + return SignVerbatim, nil + case strings.HasPrefix(defaultDirectoryPolicy, "role:"): + if len(defaultDirectoryPolicy) == 5 { + return Forbid, fmt.Errorf("no role specified by policy %v", defaultDirectoryPolicy) + } + return Role, nil + default: + return Forbid, fmt.Errorf("string %v not a valid Default Directory Policy", defaultDirectoryPolicy) + } +} + +func getDefaultDirectoryPolicyRole(defaultDirectoryPolicy string) (string, error) { + policyType, err := getDefaultDirectoryPolicyType(defaultDirectoryPolicy) + if err != nil { + return "", err + } + if policyType != Role { + return "", fmt.Errorf("default directory policy %v is not a role-based-policy", defaultDirectoryPolicy) + } + return defaultDirectoryPolicy[5:], nil +} + +type DefaultDirectoryPolicyType int + +const ( + Forbid DefaultDirectoryPolicyType = iota + SignVerbatim + Role +) diff --git a/builtin/logical/pki/path_config_acme_test.go b/builtin/logical/pki/path_config_acme_test.go new file mode 100644 index 0000000..a044cc5 --- /dev/null +++ b/builtin/logical/pki/path_config_acme_test.go @@ -0,0 +1,132 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestAcmeConfig(t *testing.T) { + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + + cases := []struct { + name string + AcmeConfig map[string]interface{} + prefixUrl string + validConfig bool + works bool + }{ + {"unspecified-root", map[string]interface{}{ + "enabled": true, + "allowed_issuers": "*", + "allowed_roles": "*", + "dns_resolver": "", + "eab_policy_name": "", + }, "acme/", true, true}, + {"bad-policy-root", map[string]interface{}{ + "enabled": true, + "allowed_issuers": "*", + "allowed_roles": "*", + "default_directory_policy": "bad", + "dns_resolver": "", + "eab_policy_name": "", + }, "acme/", false, false}, + {"forbid-root", map[string]interface{}{ + "enabled": true, + "allowed_issuers": "*", + "allowed_roles": "*", + "default_directory_policy": "forbid", + "dns_resolver": "", + "eab_policy_name": "", + }, "acme/", true, false}, + {"sign-verbatim-root", map[string]interface{}{ + "enabled": true, + "allowed_issuers": "*", + "allowed_roles": "*", + "default_directory_policy": "sign-verbatim", + "dns_resolver": "", + "eab_policy_name": "", + }, "acme/", true, true}, + {"role-root", map[string]interface{}{ + "enabled": true, + "allowed_issuers": "*", + "allowed_roles": "*", + "default_directory_policy": "role:exists", + "dns_resolver": "", + "eab_policy_name": "", + }, "acme/", true, true}, + {"bad-role-root", map[string]interface{}{ + "enabled": true, + "allowed_issuers": "*", + "allowed_roles": "*", + "default_directory_policy": "role:notgood", + "dns_resolver": "", + "eab_policy_name": "", + }, "acme/", false, true}, + {"disallowed-role-root", map[string]interface{}{ + "enabled": true, + "allowed_issuers": "*", + "allowed_roles": "good", + "default_directory_policy": "role:exists", + "dns_resolver": "", + "eab_policy_name": "", + }, "acme/", false, false}, + } + + roleConfig := map[string]interface{}{ + "issuer_ref": "default", + "allowed_domains": "example.com", + "allow_subdomains": true, + "max_ttl": "720h", + } + + testCtx := context.Background() + + for _, tc := range cases { + deadline := time.Now().Add(1 * time.Minute) + subTestCtx, _ := context.WithDeadline(testCtx, deadline) + + _, err := client.Logical().WriteWithContext(subTestCtx, "pki/roles/exists", roleConfig) + require.NoError(t, err) + _, err = client.Logical().WriteWithContext(subTestCtx, "pki/roles/good", roleConfig) + require.NoError(t, err) + + t.Run(tc.name, func(t *testing.T) { + _, err := client.Logical().WriteWithContext(subTestCtx, "pki/config/acme", tc.AcmeConfig) + + if tc.validConfig { + require.NoError(t, err) + } else { + require.Error(t, err) + return + } + + _, err = client.Logical().ReadWithContext(subTestCtx, "pki/acme/directory") + if tc.works { + require.NoError(t, err) + + baseAcmeURL := "/v1/pki/" + tc.prefixUrl + accountKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa key") + + acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) + + // Create new account + _, err = acmeClient.Discover(subTestCtx) + require.NoError(t, err, "failed acme discovery call") + } else { + require.Error(t, err, "Acme Configuration should prevent usage") + } + + t.Logf("Completed case %v", tc.name) + }) + } +} diff --git a/builtin/logical/pki/path_config_ca.go b/builtin/logical/pki/path_config_ca.go new file mode 100644 index 0000000..e77386f --- /dev/null +++ b/builtin/logical/pki/path_config_ca.go @@ -0,0 +1,429 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "context" + "net/http" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathConfigCA(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "config/ca", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "configure", + OperationSuffix: "ca", + }, + + Fields: map[string]*framework.FieldSchema{ + "pem_bundle": { + Type: framework.TypeString, + Description: `PEM-format, concatenated unencrypted +secret key and certificate.`, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathImportIssuers, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "mapping": { + Type: framework.TypeMap, + Description: "A mapping of issuer_id to key_id for all issuers included in this request", + Required: true, + }, + "imported_keys": { + Type: framework.TypeCommaStringSlice, + Description: "Net-new keys imported as a part of this request", + Required: true, + }, + "imported_issuers": { + Type: framework.TypeCommaStringSlice, + Description: "Net-new issuers imported as a part of this request", + Required: true, + }, + "existing_keys": { + Type: framework.TypeCommaStringSlice, + Description: "Existing keys specified as part of the import bundle of this request", + Required: true, + }, + "existing_issuers": { + Type: framework.TypeCommaStringSlice, + Description: "Existing issuers specified as part of the import bundle of this request", + Required: true, + }, + }, + }}, + }, + // Read more about why these flags are set in backend.go. + ForwardPerformanceStandby: true, + ForwardPerformanceSecondary: true, + }, + }, + + HelpSynopsis: pathConfigCAHelpSyn, + HelpDescription: pathConfigCAHelpDesc, + } +} + +const pathConfigCAHelpSyn = ` +Set the CA certificate and private key used for generated credentials. +` + +const pathConfigCAHelpDesc = ` +This sets the CA information used for credentials generated by this +by this mount. This must be a PEM-format, concatenated unencrypted +secret key and certificate. + +For security reasons, the secret key cannot be retrieved later. +` + +func pathConfigIssuers(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "config/issuers", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + }, + + Fields: map[string]*framework.FieldSchema{ + defaultRef: { + Type: framework.TypeString, + Description: `Reference (name or identifier) to the default issuer.`, + }, + "default_follows_latest_issuer": { + Type: framework.TypeBool, + Description: `Whether the default issuer should automatically follow the latest generated or imported issuer. Defaults to false.`, + Default: false, + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathCAIssuersRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "issuers-configuration", + }, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "default": { + Type: framework.TypeString, + Description: `Reference (name or identifier) to the default issuer.`, + Required: true, + }, + "default_follows_latest_issuer": { + Type: framework.TypeBool, + Description: `Whether the default issuer should automatically follow the latest generated or imported issuer. Defaults to false.`, + Required: true, + }, + }, + }}, + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathCAIssuersWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "issuers", + }, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "default": { + Type: framework.TypeString, + Description: `Reference (name or identifier) to the default issuer.`, + }, + "default_follows_latest_issuer": { + Type: framework.TypeBool, + Description: `Whether the default issuer should automatically follow the latest generated or imported issuer. Defaults to false.`, + }, + }, + }}, + }, + // Read more about why these flags are set in backend.go. + ForwardPerformanceStandby: true, + ForwardPerformanceSecondary: true, + }, + }, + + HelpSynopsis: pathConfigIssuersHelpSyn, + HelpDescription: pathConfigIssuersHelpDesc, + } +} + +func pathReplaceRoot(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "root/replace", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "replace", + OperationSuffix: "root", + }, + + Fields: map[string]*framework.FieldSchema{ + "default": { + Type: framework.TypeString, + Description: `Reference (name or identifier) to the default issuer.`, + Default: "next", + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathCAIssuersWrite, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "default": { + Type: framework.TypeString, + Description: `Reference (name or identifier) to the default issuer.`, + Required: true, + }, + "default_follows_latest_issuer": { + Type: framework.TypeBool, + Description: `Whether the default issuer should automatically follow the latest generated or imported issuer. Defaults to false.`, + Required: true, + }, + }, + }}, + }, + // Read more about why these flags are set in backend.go. + ForwardPerformanceStandby: true, + ForwardPerformanceSecondary: true, + }, + }, + + HelpSynopsis: pathConfigIssuersHelpSyn, + HelpDescription: pathConfigIssuersHelpDesc, + } +} + +func (b *backend) pathCAIssuersRead(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { + if b.useLegacyBundleCaStorage() { + return logical.ErrorResponse("Cannot read defaults until migration has completed"), nil + } + + sc := b.makeStorageContext(ctx, req.Storage) + config, err := sc.getIssuersConfig() + if err != nil { + return logical.ErrorResponse("Error loading issuers configuration: " + err.Error()), nil + } + + return b.formatCAIssuerConfigRead(config), nil +} + +func (b *backend) formatCAIssuerConfigRead(config *issuerConfigEntry) *logical.Response { + return &logical.Response{ + Data: map[string]interface{}{ + defaultRef: config.DefaultIssuerId, + "default_follows_latest_issuer": config.DefaultFollowsLatestIssuer, + }, + } +} + +func (b *backend) pathCAIssuersWrite(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // Since we're planning on updating issuers here, grab the lock so we've + // got a consistent view. + b.issuersLock.Lock() + defer b.issuersLock.Unlock() + + if b.useLegacyBundleCaStorage() { + return logical.ErrorResponse("Cannot update defaults until migration has completed"), nil + } + + sc := b.makeStorageContext(ctx, req.Storage) + + // Validate the new default reference. + newDefault := data.Get(defaultRef).(string) + if len(newDefault) == 0 || newDefault == defaultRef { + return logical.ErrorResponse("Invalid issuer specification; must be non-empty and can't be 'default'."), nil + } + parsedIssuer, err := sc.resolveIssuerReference(newDefault) + if err != nil { + return logical.ErrorResponse("Error resolving issuer reference: " + err.Error()), nil + } + entry, err := sc.fetchIssuerById(parsedIssuer) + if err != nil { + return logical.ErrorResponse("Unable to fetch issuer: " + err.Error()), nil + } + + // Get the other new parameters. This doesn't exist on the /root/replace + // variant of this call. + var followIssuer bool + followIssuersRaw, followOk := data.GetOk("default_follows_latest_issuer") + if followOk { + followIssuer = followIssuersRaw.(bool) + } + + // Update the config + config, err := sc.getIssuersConfig() + if err != nil { + return logical.ErrorResponse("Unable to fetch existing issuers configuration: " + err.Error()), nil + } + config.DefaultIssuerId = parsedIssuer + if followOk { + config.DefaultFollowsLatestIssuer = followIssuer + } + + // Add our warning if necessary. + response := b.formatCAIssuerConfigRead(config) + if len(entry.KeyID) == 0 { + msg := "This selected default issuer has no key associated with it. Some operations like issuing certificates and signing CRLs will be unavailable with the requested default issuer until a key is imported or the default issuer is changed." + response.AddWarning(msg) + b.Logger().Error(msg) + } + + if err := sc.setIssuersConfig(config); err != nil { + return logical.ErrorResponse("Error updating issuer configuration: " + err.Error()), nil + } + + return response, nil +} + +const pathConfigIssuersHelpSyn = `Read and set the default issuer certificate for signing.` + +const pathConfigIssuersHelpDesc = ` +This path allows configuration of issuer parameters. + +Presently, the "default" parameter controls which issuer is the default, +accessible by the existing signing paths (/root/sign-intermediate, +/root/sign-self-issued, /sign-verbatim, /sign/:role, and /issue/:role). + +The /root/replace path is aliased to this path, with default taking the +value of the issuer with the name "next", if it exists. +` + +func pathConfigKeys(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "config/keys", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + }, + + Fields: map[string]*framework.FieldSchema{ + defaultRef: { + Type: framework.TypeString, + Description: `Reference (name or identifier) of the default key.`, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathKeyDefaultWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "keys", + }, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "default": { + Type: framework.TypeString, + Description: `Reference (name or identifier) to the default issuer.`, + Required: true, + }, + }, + }}, + }, + ForwardPerformanceStandby: true, + ForwardPerformanceSecondary: true, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathKeyDefaultRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "keys-configuration", + }, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "default": { + Type: framework.TypeString, + Description: `Reference (name or identifier) to the default issuer.`, + }, + }, + }}, + }, + ForwardPerformanceStandby: false, + ForwardPerformanceSecondary: false, + }, + }, + + HelpSynopsis: pathConfigKeysHelpSyn, + HelpDescription: pathConfigKeysHelpDesc, + } +} + +func (b *backend) pathKeyDefaultRead(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { + if b.useLegacyBundleCaStorage() { + return logical.ErrorResponse("Cannot read key defaults until migration has completed"), nil + } + + sc := b.makeStorageContext(ctx, req.Storage) + config, err := sc.getKeysConfig() + if err != nil { + return logical.ErrorResponse("Error loading keys configuration: " + err.Error()), nil + } + + return &logical.Response{ + Data: map[string]interface{}{ + defaultRef: config.DefaultKeyId, + }, + }, nil +} + +func (b *backend) pathKeyDefaultWrite(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // Since we're planning on updating keys here, grab the lock so we've + // got a consistent view. + b.issuersLock.Lock() + defer b.issuersLock.Unlock() + + if b.useLegacyBundleCaStorage() { + return logical.ErrorResponse("Cannot update key defaults until migration has completed"), nil + } + + newDefault := data.Get(defaultRef).(string) + if len(newDefault) == 0 || newDefault == defaultRef { + return logical.ErrorResponse("Invalid key specification; must be non-empty and can't be 'default'."), nil + } + + sc := b.makeStorageContext(ctx, req.Storage) + parsedKey, err := sc.resolveKeyReference(newDefault) + if err != nil { + return logical.ErrorResponse("Error resolving issuer reference: " + err.Error()), nil + } + + err = sc.updateDefaultKeyId(parsedKey) + if err != nil { + return logical.ErrorResponse("Error updating issuer configuration: " + err.Error()), nil + } + + return &logical.Response{ + Data: map[string]interface{}{ + defaultRef: parsedKey, + }, + }, nil +} + +const pathConfigKeysHelpSyn = `Read and set the default key used for signing` + +const pathConfigKeysHelpDesc = ` +This path allows configuration of key parameters. + +The "default" parameter controls which key is the default used by signing paths. +` diff --git a/builtin/logical/pki/path_config_cluster.go b/builtin/logical/pki/path_config_cluster.go new file mode 100644 index 0000000..4bdfb82 --- /dev/null +++ b/builtin/logical/pki/path_config_cluster.go @@ -0,0 +1,198 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "context" + "fmt" + "net/http" + + "github.com/asaskevich/govalidator" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathConfigCluster(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "config/cluster", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + }, + + Fields: map[string]*framework.FieldSchema{ + "path": { + Type: framework.TypeString, + Description: `Canonical URI to this mount on this performance +replication cluster's external address. This is for resolving AIA URLs and +providing the {{cluster_path}} template parameter but might be used for other +purposes in the future. + +This should only point back to this particular PR replica and should not ever +point to another PR cluster. It may point to any node in the PR replica, +including standby nodes, and need not always point to the active node. + +For example: https://pr1.vault.example.com:8200/v1/pki`, + }, + "aia_path": { + Type: framework.TypeString, + Description: `Optional URI to this mount's AIA distribution +point; may refer to an external non-Vault responder. This is for resolving AIA +URLs and providing the {{cluster_aia_path}} template parameter and will not +be used for other purposes. As such, unlike path above, this could safely +be an insecure transit mechanism (like HTTP without TLS). + +For example: http://cdn.example.com/pr1/pki`, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "cluster", + }, + Callback: b.pathWriteCluster, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "path": { + Type: framework.TypeString, + Description: `Canonical URI to this mount on this performance +replication cluster's external address. This is for resolving AIA URLs and +providing the {{cluster_path}} template parameter but might be used for other +purposes in the future. + +This should only point back to this particular PR replica and should not ever +point to another PR cluster. It may point to any node in the PR replica, +including standby nodes, and need not always point to the active node. + +For example: https://pr1.vault.example.com:8200/v1/pki`, + }, + "aia_path": { + Type: framework.TypeString, + Description: `Optional URI to this mount's AIA distribution +point; may refer to an external non-Vault responder. This is for resolving AIA +URLs and providing the {{cluster_aia_path}} template parameter and will not +be used for other purposes. As such, unlike path above, this could safely +be an insecure transit mechanism (like HTTP without TLS). + +For example: http://cdn.example.com/pr1/pki`, + }, + }, + }}, + }, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathReadCluster, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "cluster-configuration", + }, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "path": { + Type: framework.TypeString, + Description: `Canonical URI to this mount on this performance +replication cluster's external address. This is for resolving AIA URLs and +providing the {{cluster_path}} template parameter but might be used for other +purposes in the future. + +This should only point back to this particular PR replica and should not ever +point to another PR cluster. It may point to any node in the PR replica, +including standby nodes, and need not always point to the active node. + +For example: https://pr1.vault.example.com:8200/v1/pki`, + Required: true, + }, + "aia_path": { + Type: framework.TypeString, + Description: `Optional URI to this mount's AIA distribution +point; may refer to an external non-Vault responder. This is for resolving AIA +URLs and providing the {{cluster_aia_path}} template parameter and will not +be used for other purposes. As such, unlike path above, this could safely +be an insecure transit mechanism (like HTTP without TLS). + +For example: http://cdn.example.com/pr1/pki`, + }, + }, + }}, + }, + }, + }, + + HelpSynopsis: pathConfigClusterHelpSyn, + HelpDescription: pathConfigClusterHelpDesc, + } +} + +func (b *backend) pathReadCluster(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { + sc := b.makeStorageContext(ctx, req.Storage) + cfg, err := sc.getClusterConfig() + if err != nil { + return nil, err + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "path": cfg.Path, + "aia_path": cfg.AIAPath, + }, + } + + return resp, nil +} + +func (b *backend) pathWriteCluster(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + sc := b.makeStorageContext(ctx, req.Storage) + cfg, err := sc.getClusterConfig() + if err != nil { + return nil, err + } + + if value, ok := data.GetOk("path"); ok { + cfg.Path = value.(string) + + // This field is required by ACME, if ever we allow un-setting in the + // future, this code will need to verify that ACME is not enabled. + if !govalidator.IsURL(cfg.Path) { + return nil, fmt.Errorf("invalid, non-URL path given to cluster: %v", cfg.Path) + } + } + + if value, ok := data.GetOk("aia_path"); ok { + cfg.AIAPath = value.(string) + if !govalidator.IsURL(cfg.AIAPath) { + return nil, fmt.Errorf("invalid, non-URL aia_path given to cluster: %v", cfg.AIAPath) + } + } + + if err := sc.writeClusterConfig(cfg); err != nil { + return nil, err + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "path": cfg.Path, + "aia_path": cfg.AIAPath, + }, + } + + return resp, nil +} + +const pathConfigClusterHelpSyn = ` +Set cluster-local configuration, including address to this PR cluster. +` + +const pathConfigClusterHelpDesc = ` +This path allows you to set cluster-local configuration, including the +URI to this performance replication cluster. This allows you to use +templated AIA URLs with /config/urls and /issuer/:issuer_ref, setting the +reference to the cluster's URI. + +Only one address can be specified for any given cluster. +` diff --git a/builtin/logical/pki/path_config_crl.go b/builtin/logical/pki/path_config_crl.go new file mode 100644 index 0000000..c06da87 --- /dev/null +++ b/builtin/logical/pki/path_config_crl.go @@ -0,0 +1,475 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "context" + "fmt" + "net/http" + + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/helper/constants" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/logical" +) + +const latestCrlConfigVersion = 1 + +// CRLConfig holds basic CRL configuration information +type crlConfig struct { + Version int `json:"version"` + Expiry string `json:"expiry"` + Disable bool `json:"disable"` + OcspDisable bool `json:"ocsp_disable"` + AutoRebuild bool `json:"auto_rebuild"` + AutoRebuildGracePeriod string `json:"auto_rebuild_grace_period"` + OcspExpiry string `json:"ocsp_expiry"` + EnableDelta bool `json:"enable_delta"` + DeltaRebuildInterval string `json:"delta_rebuild_interval"` + UseGlobalQueue bool `json:"cross_cluster_revocation"` + UnifiedCRL bool `json:"unified_crl"` + UnifiedCRLOnExistingPaths bool `json:"unified_crl_on_existing_paths"` +} + +// Implicit default values for the config if it does not exist. +var defaultCrlConfig = crlConfig{ + Version: latestCrlConfigVersion, + Expiry: "72h", + Disable: false, + OcspDisable: false, + OcspExpiry: "12h", + AutoRebuild: false, + AutoRebuildGracePeriod: "12h", + EnableDelta: false, + DeltaRebuildInterval: "15m", + UseGlobalQueue: false, + UnifiedCRL: false, + UnifiedCRLOnExistingPaths: false, +} + +func pathConfigCRL(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "config/crl", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + }, + + Fields: map[string]*framework.FieldSchema{ + "expiry": { + Type: framework.TypeString, + Description: `The amount of time the generated CRL should be +valid; defaults to 72 hours`, + Default: "72h", + }, + "disable": { + Type: framework.TypeBool, + Description: `If set to true, disables generating the CRL entirely.`, + }, + "ocsp_disable": { + Type: framework.TypeBool, + Description: `If set to true, ocsp unauthorized responses will be returned.`, + }, + "ocsp_expiry": { + Type: framework.TypeString, + Description: `The amount of time an OCSP response will be valid (controls +the NextUpdate field); defaults to 12 hours`, + Default: "1h", + }, + "auto_rebuild": { + Type: framework.TypeBool, + Description: `If set to true, enables automatic rebuilding of the CRL`, + }, + "auto_rebuild_grace_period": { + Type: framework.TypeString, + Description: `The time before the CRL expires to automatically rebuild it, when enabled. Must be shorter than the CRL expiry. Defaults to 12h.`, + Default: "12h", + }, + "enable_delta": { + Type: framework.TypeBool, + Description: `Whether to enable delta CRLs between authoritative CRL rebuilds`, + }, + "delta_rebuild_interval": { + Type: framework.TypeString, + Description: `The time between delta CRL rebuilds if a new revocation has occurred. Must be shorter than the CRL expiry. Defaults to 15m.`, + Default: "15m", + }, + "cross_cluster_revocation": { + Type: framework.TypeBool, + Description: `Whether to enable a global, cross-cluster revocation queue. +Must be used with auto_rebuild=true.`, + }, + "unified_crl": { + Type: framework.TypeBool, + Description: `If set to true enables global replication of revocation entries, +also enabling unified versions of OCSP and CRLs if their respective features are enabled. +disable for CRLs and ocsp_disable for OCSP.`, + Default: "false", + }, + "unified_crl_on_existing_paths": { + Type: framework.TypeBool, + Description: `If set to true, +existing CRL and OCSP paths will return the unified CRL instead of a response based on cluster-local data`, + Default: "false", + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "crl-configuration", + }, + Callback: b.pathCRLRead, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "expiry": { + Type: framework.TypeString, + Description: `The amount of time the generated CRL should be +valid; defaults to 72 hours`, + Required: true, + }, + "disable": { + Type: framework.TypeBool, + Description: `If set to true, disables generating the CRL entirely.`, + Required: true, + }, + "ocsp_disable": { + Type: framework.TypeBool, + Description: `If set to true, ocsp unauthorized responses will be returned.`, + Required: true, + }, + "ocsp_expiry": { + Type: framework.TypeString, + Description: `The amount of time an OCSP response will be valid (controls +the NextUpdate field); defaults to 12 hours`, + Required: true, + }, + "auto_rebuild": { + Type: framework.TypeBool, + Description: `If set to true, enables automatic rebuilding of the CRL`, + Required: true, + }, + "auto_rebuild_grace_period": { + Type: framework.TypeString, + Description: `The time before the CRL expires to automatically rebuild it, when enabled. Must be shorter than the CRL expiry. Defaults to 12h.`, + Required: true, + }, + "enable_delta": { + Type: framework.TypeBool, + Description: `Whether to enable delta CRLs between authoritative CRL rebuilds`, + Required: true, + }, + "delta_rebuild_interval": { + Type: framework.TypeString, + Description: `The time between delta CRL rebuilds if a new revocation has occurred. Must be shorter than the CRL expiry. Defaults to 15m.`, + Required: true, + }, + "cross_cluster_revocation": { + Type: framework.TypeBool, + Description: `Whether to enable a global, cross-cluster revocation queue. +Must be used with auto_rebuild=true.`, + Required: true, + }, + "unified_crl": { + Type: framework.TypeBool, + Description: `If set to true enables global replication of revocation entries, +also enabling unified versions of OCSP and CRLs if their respective features are enabled. +disable for CRLs and ocsp_disable for OCSP.`, + Required: true, + }, + "unified_crl_on_existing_paths": { + Type: framework.TypeBool, + Description: `If set to true, +existing CRL and OCSP paths will return the unified CRL instead of a response based on cluster-local data`, + Required: true, + }, + }, + }}, + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathCRLWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "crl", + }, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "expiry": { + Type: framework.TypeString, + Description: `The amount of time the generated CRL should be +valid; defaults to 72 hours`, + Default: "72h", + }, + "disable": { + Type: framework.TypeBool, + Description: `If set to true, disables generating the CRL entirely.`, + }, + "ocsp_disable": { + Type: framework.TypeBool, + Description: `If set to true, ocsp unauthorized responses will be returned.`, + }, + "ocsp_expiry": { + Type: framework.TypeString, + Description: `The amount of time an OCSP response will be valid (controls +the NextUpdate field); defaults to 12 hours`, + Default: "1h", + }, + "auto_rebuild": { + Type: framework.TypeBool, + Description: `If set to true, enables automatic rebuilding of the CRL`, + }, + "auto_rebuild_grace_period": { + Type: framework.TypeString, + Description: `The time before the CRL expires to automatically rebuild it, when enabled. Must be shorter than the CRL expiry. Defaults to 12h.`, + Default: "12h", + }, + "enable_delta": { + Type: framework.TypeBool, + Description: `Whether to enable delta CRLs between authoritative CRL rebuilds`, + }, + "delta_rebuild_interval": { + Type: framework.TypeString, + Description: `The time between delta CRL rebuilds if a new revocation has occurred. Must be shorter than the CRL expiry. Defaults to 15m.`, + Default: "15m", + }, + "cross_cluster_revocation": { + Type: framework.TypeBool, + Description: `Whether to enable a global, cross-cluster revocation queue. +Must be used with auto_rebuild=true.`, + Required: false, + }, + "unified_crl": { + Type: framework.TypeBool, + Description: `If set to true enables global replication of revocation entries, +also enabling unified versions of OCSP and CRLs if their respective features are enabled. +disable for CRLs and ocsp_disable for OCSP.`, + Required: false, + }, + "unified_crl_on_existing_paths": { + Type: framework.TypeBool, + Description: `If set to true, +existing CRL and OCSP paths will return the unified CRL instead of a response based on cluster-local data`, + Required: false, + }, + }, + }}, + }, + // Read more about why these flags are set in backend.go. + ForwardPerformanceStandby: true, + ForwardPerformanceSecondary: true, + }, + }, + + HelpSynopsis: pathConfigCRLHelpSyn, + HelpDescription: pathConfigCRLHelpDesc, + } +} + +func (b *backend) pathCRLRead(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { + sc := b.makeStorageContext(ctx, req.Storage) + config, err := sc.getRevocationConfig() + if err != nil { + return nil, err + } + + return genResponseFromCrlConfig(config), nil +} + +func (b *backend) pathCRLWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + sc := b.makeStorageContext(ctx, req.Storage) + config, err := sc.getRevocationConfig() + if err != nil { + return nil, err + } + + if expiryRaw, ok := d.GetOk("expiry"); ok { + expiry := expiryRaw.(string) + _, err := parseutil.ParseDurationSecond(expiry) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("given expiry could not be decoded: %s", err)), nil + } + config.Expiry = expiry + } + + oldDisable := config.Disable + if disableRaw, ok := d.GetOk("disable"); ok { + config.Disable = disableRaw.(bool) + } + + if ocspDisableRaw, ok := d.GetOk("ocsp_disable"); ok { + config.OcspDisable = ocspDisableRaw.(bool) + } + + if expiryRaw, ok := d.GetOk("ocsp_expiry"); ok { + expiry := expiryRaw.(string) + duration, err := parseutil.ParseDurationSecond(expiry) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("given ocsp_expiry could not be decoded: %s", err)), nil + } + if duration < 0 { + return logical.ErrorResponse(fmt.Sprintf("ocsp_expiry must be greater than or equal to 0 got: %s", duration)), nil + } + config.OcspExpiry = expiry + } + + oldAutoRebuild := config.AutoRebuild + if autoRebuildRaw, ok := d.GetOk("auto_rebuild"); ok { + config.AutoRebuild = autoRebuildRaw.(bool) + } + + if autoRebuildGracePeriodRaw, ok := d.GetOk("auto_rebuild_grace_period"); ok { + autoRebuildGracePeriod := autoRebuildGracePeriodRaw.(string) + if _, err := parseutil.ParseDurationSecond(autoRebuildGracePeriod); err != nil { + return logical.ErrorResponse(fmt.Sprintf("given auto_rebuild_grace_period could not be decoded: %s", err)), nil + } + config.AutoRebuildGracePeriod = autoRebuildGracePeriod + } + + oldEnableDelta := config.EnableDelta + if enableDeltaRaw, ok := d.GetOk("enable_delta"); ok { + config.EnableDelta = enableDeltaRaw.(bool) + } + + if deltaRebuildIntervalRaw, ok := d.GetOk("delta_rebuild_interval"); ok { + deltaRebuildInterval := deltaRebuildIntervalRaw.(string) + if _, err := parseutil.ParseDurationSecond(deltaRebuildInterval); err != nil { + return logical.ErrorResponse(fmt.Sprintf("given delta_rebuild_interval could not be decoded: %s", err)), nil + } + config.DeltaRebuildInterval = deltaRebuildInterval + } + + if useGlobalQueue, ok := d.GetOk("cross_cluster_revocation"); ok { + config.UseGlobalQueue = useGlobalQueue.(bool) + } + + oldUnifiedCRL := config.UnifiedCRL + if unifiedCrlRaw, ok := d.GetOk("unified_crl"); ok { + config.UnifiedCRL = unifiedCrlRaw.(bool) + } + + if unifiedCrlOnExistingPathsRaw, ok := d.GetOk("unified_crl_on_existing_paths"); ok { + config.UnifiedCRLOnExistingPaths = unifiedCrlOnExistingPathsRaw.(bool) + } + + if config.UnifiedCRLOnExistingPaths && !config.UnifiedCRL { + return logical.ErrorResponse("unified_crl_on_existing_paths cannot be enabled if unified_crl is disabled"), nil + } + + expiry, _ := parseutil.ParseDurationSecond(config.Expiry) + if config.AutoRebuild { + gracePeriod, _ := parseutil.ParseDurationSecond(config.AutoRebuildGracePeriod) + if gracePeriod >= expiry { + return logical.ErrorResponse(fmt.Sprintf("CRL auto-rebuilding grace period (%v) must be strictly shorter than CRL expiry (%v) value when auto-rebuilding of CRLs is enabled", config.AutoRebuildGracePeriod, config.Expiry)), nil + } + } + + if config.EnableDelta { + deltaRebuildInterval, _ := parseutil.ParseDurationSecond(config.DeltaRebuildInterval) + if deltaRebuildInterval >= expiry { + return logical.ErrorResponse(fmt.Sprintf("CRL delta rebuild window (%v) must be strictly shorter than CRL expiry (%v) value when delta CRLs are enabled", config.DeltaRebuildInterval, config.Expiry)), nil + } + } + + if !config.AutoRebuild { + if config.EnableDelta { + return logical.ErrorResponse("Delta CRLs cannot be enabled when auto rebuilding is disabled as the complete CRL is always regenerated!"), nil + } + + if config.UseGlobalQueue { + return logical.ErrorResponse("Global, cross-cluster revocation queue cannot be enabled when auto rebuilding is disabled as the local cluster may not have the certificate entry!"), nil + } + } + + if !constants.IsEnterprise && config.UseGlobalQueue { + return logical.ErrorResponse("Global, cross-cluster revocation queue (cross_cluster_revocation) can only be enabled on Vault Enterprise."), nil + } + + if !constants.IsEnterprise && config.UnifiedCRL { + return logical.ErrorResponse("unified_crl can only be enabled on Vault Enterprise"), nil + } + + isLocalMount := b.System().LocalMount() + if isLocalMount && config.UseGlobalQueue { + return logical.ErrorResponse("Global, cross-cluster revocation queue (cross_cluster_revocation) cannot be enabled on local mounts."), + nil + } + + if isLocalMount && config.UnifiedCRL { + return logical.ErrorResponse("unified_crl cannot be enabled on local mounts."), nil + } + + if !config.AutoRebuild && config.UnifiedCRL { + return logical.ErrorResponse("unified_crl=true requires auto_rebuild=true, as unified CRLs cannot be rebuilt on every revocation."), nil + } + + entry, err := logical.StorageEntryJSON("config/crl", config) + if err != nil { + return nil, err + } + err = req.Storage.Put(ctx, entry) + if err != nil { + return nil, err + } + + b.crlBuilder.markConfigDirty() + b.crlBuilder.reloadConfigIfRequired(sc) + + resp := genResponseFromCrlConfig(config) + + // Note this only affects/happens on the main cluster node, if you need to + // notify something based on a configuration change on all server types + // have a look at crlBuilder::reloadConfigIfRequired + if oldDisable != config.Disable || (oldAutoRebuild && !config.AutoRebuild) || (oldEnableDelta != config.EnableDelta) || (oldUnifiedCRL != config.UnifiedCRL) { + // It wasn't disabled but now it is (or equivalently, we were set to + // auto-rebuild and we aren't now or equivalently, we changed our + // mind about delta CRLs and need a new complete one or equivalently, + // we changed our mind about unified CRLs), rotate the CRLs. + warnings, crlErr := b.crlBuilder.rebuild(sc, true) + if crlErr != nil { + switch crlErr.(type) { + case errutil.UserError: + return logical.ErrorResponse(fmt.Sprintf("Error during CRL building: %s", crlErr)), nil + default: + return nil, fmt.Errorf("error encountered during CRL building: %w", crlErr) + } + } + for index, warning := range warnings { + resp.AddWarning(fmt.Sprintf("Warning %d during CRL rebuild: %v", index+1, warning)) + } + } + + return resp, nil +} + +func genResponseFromCrlConfig(config *crlConfig) *logical.Response { + return &logical.Response{ + Data: map[string]interface{}{ + "expiry": config.Expiry, + "disable": config.Disable, + "ocsp_disable": config.OcspDisable, + "ocsp_expiry": config.OcspExpiry, + "auto_rebuild": config.AutoRebuild, + "auto_rebuild_grace_period": config.AutoRebuildGracePeriod, + "enable_delta": config.EnableDelta, + "delta_rebuild_interval": config.DeltaRebuildInterval, + "cross_cluster_revocation": config.UseGlobalQueue, + "unified_crl": config.UnifiedCRL, + "unified_crl_on_existing_paths": config.UnifiedCRLOnExistingPaths, + }, + } +} + +const pathConfigCRLHelpSyn = ` +Configure the CRL expiration. +` + +const pathConfigCRLHelpDesc = ` +This endpoint allows configuration of the CRL lifetime. +` diff --git a/builtin/logical/pki/path_config_urls.go b/builtin/logical/pki/path_config_urls.go new file mode 100644 index 0000000..341f3db --- /dev/null +++ b/builtin/logical/pki/path_config_urls.go @@ -0,0 +1,294 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/asaskevich/govalidator" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathConfigURLs(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "config/urls", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + }, + + Fields: map[string]*framework.FieldSchema{ + "issuing_certificates": { + Type: framework.TypeCommaStringSlice, + Description: `Comma-separated list of URLs to be used +for the issuing certificate attribute. See also RFC 5280 Section 4.2.2.1.`, + }, + + "crl_distribution_points": { + Type: framework.TypeCommaStringSlice, + Description: `Comma-separated list of URLs to be used +for the CRL distribution points attribute. See also RFC 5280 Section 4.2.1.13.`, + }, + + "ocsp_servers": { + Type: framework.TypeCommaStringSlice, + Description: `Comma-separated list of URLs to be used +for the OCSP servers attribute. See also RFC 5280 Section 4.2.2.1.`, + }, + + "enable_templating": { + Type: framework.TypeBool, + Description: `Whether or not to enabling templating of the +above AIA fields. When templating is enabled the special values '{{issuer_id}}', +'{{cluster_path}}', and '{{cluster_aia_path}}' are available, but the addresses +are not checked for URI validity until issuance time. Using '{{cluster_path}}' +requires /config/cluster's 'path' member to be set on all PR Secondary clusters +and using '{{cluster_aia_path}}' requires /config/cluster's 'aia_path' member +to be set on all PR secondary clusters.`, + Default: false, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "urls", + }, + Callback: b.pathWriteURL, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "issuing_certificates": { + Type: framework.TypeCommaStringSlice, + Description: `Comma-separated list of URLs to be used +for the issuing certificate attribute. See also RFC 5280 Section 4.2.2.1.`, + }, + "crl_distribution_points": { + Type: framework.TypeCommaStringSlice, + Description: `Comma-separated list of URLs to be used +for the CRL distribution points attribute. See also RFC 5280 Section 4.2.1.13.`, + }, + "ocsp_servers": { + Type: framework.TypeCommaStringSlice, + Description: `Comma-separated list of URLs to be used +for the OCSP servers attribute. See also RFC 5280 Section 4.2.2.1.`, + }, + "enable_templating": { + Type: framework.TypeBool, + Description: `Whether or not to enabling templating of the +above AIA fields. When templating is enabled the special values '{{issuer_id}}' +and '{{cluster_path}}' are available, but the addresses are not checked for +URI validity until issuance time. This requires /config/cluster's path to be +set on all PR Secondary clusters.`, + Default: false, + }, + }, + }}, + }, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathReadURL, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "urls-configuration", + }, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "issuing_certificates": { + Type: framework.TypeCommaStringSlice, + Description: `Comma-separated list of URLs to be used +for the issuing certificate attribute. See also RFC 5280 Section 4.2.2.1.`, + Required: true, + }, + "crl_distribution_points": { + Type: framework.TypeCommaStringSlice, + Description: `Comma-separated list of URLs to be used +for the CRL distribution points attribute. See also RFC 5280 Section 4.2.1.13.`, + Required: true, + }, + "ocsp_servers": { + Type: framework.TypeCommaStringSlice, + Description: `Comma-separated list of URLs to be used +for the OCSP servers attribute. See also RFC 5280 Section 4.2.2.1.`, + Required: true, + }, + "enable_templating": { + Type: framework.TypeBool, + Description: `Whether or not to enable templating of the +above AIA fields. When templating is enabled the special values '{{issuer_id}}' +and '{{cluster_path}}' are available, but the addresses are not checked for +URI validity until issuance time. This requires /config/cluster's path to be +set on all PR Secondary clusters.`, + Required: true, + }, + }, + }}, + }, + }, + }, + + HelpSynopsis: pathConfigURLsHelpSyn, + HelpDescription: pathConfigURLsHelpDesc, + } +} + +func validateURLs(urls []string) string { + for _, curr := range urls { + if !govalidator.IsURL(curr) || strings.Contains(curr, "{{issuer_id}}") || strings.Contains(curr, "{{cluster_path}}") || strings.Contains(curr, "{{cluster_aia_path}}") { + return curr + } + } + + return "" +} + +func getGlobalAIAURLs(ctx context.Context, storage logical.Storage) (*aiaConfigEntry, error) { + entry, err := storage.Get(ctx, "urls") + if err != nil { + return nil, err + } + + entries := &aiaConfigEntry{ + IssuingCertificates: []string{}, + CRLDistributionPoints: []string{}, + OCSPServers: []string{}, + EnableTemplating: false, + } + + if entry == nil { + return entries, nil + } + + if err := entry.DecodeJSON(entries); err != nil { + return nil, err + } + + return entries, nil +} + +func writeURLs(ctx context.Context, storage logical.Storage, entries *aiaConfigEntry) error { + entry, err := logical.StorageEntryJSON("urls", entries) + if err != nil { + return err + } + if entry == nil { + return fmt.Errorf("unable to marshal entry into JSON") + } + + err = storage.Put(ctx, entry) + if err != nil { + return err + } + + return nil +} + +func (b *backend) pathReadURL(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { + entries, err := getGlobalAIAURLs(ctx, req.Storage) + if err != nil { + return nil, err + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "issuing_certificates": entries.IssuingCertificates, + "crl_distribution_points": entries.CRLDistributionPoints, + "ocsp_servers": entries.OCSPServers, + "enable_templating": entries.EnableTemplating, + }, + } + + return resp, nil +} + +func (b *backend) pathWriteURL(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + entries, err := getGlobalAIAURLs(ctx, req.Storage) + if err != nil { + return nil, err + } + + if enableTemplating, ok := data.GetOk("enable_templating"); ok { + entries.EnableTemplating = enableTemplating.(bool) + } + if urlsInt, ok := data.GetOk("issuing_certificates"); ok { + entries.IssuingCertificates = urlsInt.([]string) + } + if urlsInt, ok := data.GetOk("crl_distribution_points"); ok { + entries.CRLDistributionPoints = urlsInt.([]string) + } + if urlsInt, ok := data.GetOk("ocsp_servers"); ok { + entries.OCSPServers = urlsInt.([]string) + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "issuing_certificates": entries.IssuingCertificates, + "crl_distribution_points": entries.CRLDistributionPoints, + "ocsp_servers": entries.OCSPServers, + "enable_templating": entries.EnableTemplating, + }, + } + + if entries.EnableTemplating && !b.useLegacyBundleCaStorage() { + sc := b.makeStorageContext(ctx, req.Storage) + issuers, err := sc.listIssuers() + if err != nil { + return nil, fmt.Errorf("unable to read issuers list to validate templated URIs: %w", err) + } + + if len(issuers) > 0 { + issuer, err := sc.fetchIssuerById(issuers[0]) + if err != nil { + return nil, fmt.Errorf("unable to read issuer to validate templated URIs: %w", err) + } + + _, err = entries.toURLEntries(sc, issuer.ID) + if err != nil { + resp.AddWarning(fmt.Sprintf("issuance may fail: %v\n\nConsider setting the cluster-local address if it is not already set.", err)) + } + } + } else if !entries.EnableTemplating { + if badURL := validateURLs(entries.IssuingCertificates); badURL != "" { + return logical.ErrorResponse(fmt.Sprintf( + "invalid URL found in Authority Information Access (AIA) parameter issuing_certificates: %s", badURL)), nil + } + + if badURL := validateURLs(entries.CRLDistributionPoints); badURL != "" { + return logical.ErrorResponse(fmt.Sprintf( + "invalid URL found in Authority Information Access (AIA) parameter crl_distribution_points: %s", badURL)), nil + } + + if badURL := validateURLs(entries.OCSPServers); badURL != "" { + return logical.ErrorResponse(fmt.Sprintf( + "invalid URL found in Authority Information Access (AIA) parameter ocsp_servers: %s", badURL)), nil + } + } + + if err := writeURLs(ctx, req.Storage, entries); err != nil { + return nil, err + } + + return resp, nil +} + +const pathConfigURLsHelpSyn = ` +Set the URLs for the issuing CA, CRL distribution points, and OCSP servers. +` + +const pathConfigURLsHelpDesc = ` +This path allows you to set the issuing CA, CRL distribution points, and +OCSP server URLs that will be encoded into issued certificates. If these +values are not set, no such information will be encoded in the issued +certificates. To delete URLs, simply re-set the appropriate value with an +empty string. + +Multiple URLs can be specified for each type; use commas to separate them. +` diff --git a/builtin/logical/pki/path_fetch.go b/builtin/logical/pki/path_fetch.go new file mode 100644 index 0000000..b255cee --- /dev/null +++ b/builtin/logical/pki/path_fetch.go @@ -0,0 +1,537 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "context" + "encoding/pem" + "fmt" + "net/http" + "strings" + "time" + + "github.com/hashicorp/vault/helper/constants" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/logical" +) + +var pathFetchReadSchema = map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "certificate": { + Type: framework.TypeString, + Description: `Certificate`, + Required: false, + }, + "revocation_time": { + Type: framework.TypeInt64, + Description: `Revocation time`, + Required: false, + }, + "revocation_time_rfc3339": { + Type: framework.TypeString, + Description: `Revocation time RFC 3339 formatted`, + Required: false, + }, + "issuer_id": { + Type: framework.TypeString, + Description: `ID of the issuer`, + Required: false, + }, + "ca_chain": { + Type: framework.TypeString, + Description: `Issuing CA Chain`, + Required: false, + }, + }, + }}, +} + +// Returns the CA in raw format +func pathFetchCA(b *backend) *framework.Path { + return &framework.Path{ + Pattern: `ca(/pem)?`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "ca-der|ca-pem", + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathFetchRead, + Responses: pathFetchReadSchema, + }, + }, + + HelpSynopsis: pathFetchHelpSyn, + HelpDescription: pathFetchHelpDesc, + } +} + +// Returns the CA chain +func pathFetchCAChain(b *backend) *framework.Path { + return &framework.Path{ + Pattern: `(cert/)?ca_chain`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "ca-chain-pem|cert-ca-chain", + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathFetchRead, + Responses: pathFetchReadSchema, + }, + }, + + HelpSynopsis: pathFetchHelpSyn, + HelpDescription: pathFetchHelpDesc, + } +} + +// Returns the CRL in raw format +func pathFetchCRL(b *backend) *framework.Path { + return &framework.Path{ + Pattern: `crl(/pem|/delta(/pem)?)?`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "crl-der|crl-pem|crl-delta|crl-delta-pem", + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathFetchRead, + Responses: pathFetchReadSchema, + }, + }, + + HelpSynopsis: pathFetchHelpSyn, + HelpDescription: pathFetchHelpDesc, + } +} + +// Returns the CRL in raw format +func pathFetchUnifiedCRL(b *backend) *framework.Path { + return &framework.Path{ + Pattern: `unified-crl(/pem|/delta(/pem)?)?`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "unified-crl-der|unified-crl-pem|unified-crl-delta|unified-crl-delta-pem", + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathFetchRead, + }, + }, + + HelpSynopsis: pathFetchHelpSyn, + HelpDescription: pathFetchHelpDesc, + } +} + +// Returns any valid (non-revoked) cert in raw format. +func pathFetchValidRaw(b *backend) *framework.Path { + return &framework.Path{ + Pattern: `cert/(?P[0-9A-Fa-f-:]+)/raw(/pem)?`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "cert-raw-der|cert-raw-pem", + }, + + Fields: map[string]*framework.FieldSchema{ + "serial": { + Type: framework.TypeString, + Description: `Certificate serial number, in colon- or +hyphen-separated octal`, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathFetchRead, + Responses: pathFetchReadSchema, + }, + }, + + HelpSynopsis: pathFetchHelpSyn, + HelpDescription: pathFetchHelpDesc, + } +} + +// Returns any valid (non-revoked) cert. Since "ca" fits the pattern, this path +// also handles returning the CA cert in a non-raw format. +func pathFetchValid(b *backend) *framework.Path { + return &framework.Path{ + Pattern: `cert/(?P[0-9A-Fa-f-:]+)`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "cert", + }, + + Fields: map[string]*framework.FieldSchema{ + "serial": { + Type: framework.TypeString, + Description: `Certificate serial number, in colon- or +hyphen-separated octal`, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathFetchRead, + Responses: pathFetchReadSchema, + }, + }, + + HelpSynopsis: pathFetchHelpSyn, + HelpDescription: pathFetchHelpDesc, + } +} + +// This returns the CRL in a non-raw format +func pathFetchCRLViaCertPath(b *backend) *framework.Path { + pattern := `cert/(crl|delta-crl)` + if constants.IsEnterprise { + pattern = `cert/(crl|delta-crl|unified-crl|unified-delta-crl)` + } + + return &framework.Path{ + Pattern: pattern, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "cert-crl|cert-delta-crl|cert-unified-crl|cert-unified-delta-crl", + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathFetchRead, + Responses: pathFetchReadSchema, + }, + }, + + HelpSynopsis: pathFetchHelpSyn, + HelpDescription: pathFetchHelpDesc, + } +} + +// This returns the list of serial numbers for certs +func pathFetchListCerts(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "certs/?$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "certs", + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ListOperation: &framework.PathOperation{ + Callback: b.pathFetchCertList, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "keys": { + Type: framework.TypeStringSlice, + Description: `A list of keys`, + Required: true, + }, + }, + }}, + }, + }, + }, + + HelpSynopsis: pathFetchHelpSyn, + HelpDescription: pathFetchHelpDesc, + } +} + +func (b *backend) pathFetchCertList(ctx context.Context, req *logical.Request, _ *framework.FieldData) (response *logical.Response, retErr error) { + entries, err := req.Storage.List(ctx, "certs/") + if err != nil { + return nil, err + } + for i := range entries { + entries[i] = denormalizeSerial(entries[i]) + } + return logical.ListResponse(entries), nil +} + +func (b *backend) pathFetchRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (response *logical.Response, retErr error) { + var serial, pemType, contentType string + var certEntry, revokedEntry *logical.StorageEntry + var funcErr error + var certificate []byte + var fullChain []byte + var revocationTime int64 + var revocationIssuerId string + var revocationTimeRfc3339 string + + response = &logical.Response{ + Data: map[string]interface{}{}, + } + sc := b.makeStorageContext(ctx, req.Storage) + + // Some of these need to return raw and some non-raw; + // this is basically handled by setting contentType or not. + // Errors don't cause an immediate exit, because the raw + // paths still need to return raw output. + + modifiedCtx := &IfModifiedSinceHelper{ + req: req, + issuerRef: defaultRef, + } + switch { + case req.Path == "ca" || req.Path == "ca/pem" || req.Path == "cert/ca" || req.Path == "cert/ca/raw" || req.Path == "cert/ca/raw/pem": + modifiedCtx.reqType = ifModifiedCA + ret, err := sendNotModifiedResponseIfNecessary(modifiedCtx, sc, response) + if err != nil || ret { + retErr = err + goto reply + } + + serial = "ca" + contentType = "application/pkix-cert" + if req.Path == "ca/pem" || req.Path == "cert/ca/raw/pem" { + pemType = "CERTIFICATE" + contentType = "application/pem-certificate-chain" + } else if req.Path == "cert/ca" { + pemType = "CERTIFICATE" + contentType = "" + } + case req.Path == "ca_chain" || req.Path == "cert/ca_chain": + serial = "ca_chain" + if req.Path == "ca_chain" { + contentType = "application/pkix-cert" + } + case req.Path == "crl" || req.Path == "crl/pem" || req.Path == "crl/delta" || req.Path == "crl/delta/pem" || req.Path == "cert/crl" || req.Path == "cert/crl/raw" || req.Path == "cert/crl/raw/pem" || req.Path == "cert/delta-crl" || req.Path == "cert/delta-crl/raw" || req.Path == "cert/delta-crl/raw/pem" || req.Path == "unified-crl" || req.Path == "unified-crl/pem" || req.Path == "unified-crl/delta" || req.Path == "unified-crl/delta/pem" || req.Path == "cert/unified-crl" || req.Path == "cert/unified-crl/raw" || req.Path == "cert/unified-crl/raw/pem" || req.Path == "cert/unified-delta-crl" || req.Path == "cert/unified-delta-crl/raw" || req.Path == "cert/unified-delta-crl/raw/pem": + config, err := b.crlBuilder.getConfigWithUpdate(sc) + if err != nil { + retErr = err + goto reply + } + var isDelta bool + var isUnified bool + if strings.Contains(req.Path, "delta") { + isDelta = true + } + if strings.Contains(req.Path, "unified") || shouldLocalPathsUseUnified(config) { + isUnified = true + } + + modifiedCtx.reqType = ifModifiedCRL + if !isUnified && isDelta { + modifiedCtx.reqType = ifModifiedDeltaCRL + } else if isUnified && !isDelta { + modifiedCtx.reqType = ifModifiedUnifiedCRL + } else if isUnified && isDelta { + modifiedCtx.reqType = ifModifiedUnifiedDeltaCRL + } + + ret, err := sendNotModifiedResponseIfNecessary(modifiedCtx, sc, response) + if err != nil || ret { + retErr = err + goto reply + } + + serial = legacyCRLPath + if !isUnified && isDelta { + serial = deltaCRLPath + } else if isUnified && !isDelta { + serial = unifiedCRLPath + } else if isUnified && isDelta { + serial = unifiedDeltaCRLPath + } + + contentType = "application/pkix-crl" + if strings.Contains(req.Path, "pem") { + pemType = "X509 CRL" + contentType = "application/x-pem-file" + } else if req.Path == "cert/crl" || req.Path == "cert/delta-crl" || req.Path == "cert/unified-crl" || req.Path == "cert/unified-delta-crl" { + pemType = "X509 CRL" + contentType = "" + } + case strings.HasSuffix(req.Path, "/pem") || strings.HasSuffix(req.Path, "/raw"): + serial = data.Get("serial").(string) + contentType = "application/pkix-cert" + if strings.HasSuffix(req.Path, "/pem") { + pemType = "CERTIFICATE" + contentType = "application/pem-certificate-chain" + } + default: + serial = data.Get("serial").(string) + pemType = "CERTIFICATE" + } + if len(serial) == 0 { + response = logical.ErrorResponse("The serial number must be provided") + goto reply + } + + // Prefer fetchCAInfo to fetchCertBySerial for CA certificates. + if serial == "ca_chain" || serial == "ca" { + caInfo, err := sc.fetchCAInfo(defaultRef, ReadOnlyUsage) + if err != nil { + switch err.(type) { + case errutil.UserError: + response = logical.ErrorResponse(err.Error()) + goto reply + default: + retErr = err + goto reply + } + } + + if serial == "ca_chain" { + rawChain := caInfo.GetFullChain() + var chainStr string + for _, ca := range rawChain { + block := pem.Block{ + Type: "CERTIFICATE", + Bytes: ca.Bytes, + } + chainStr = strings.Join([]string{chainStr, strings.TrimSpace(string(pem.EncodeToMemory(&block)))}, "\n") + } + fullChain = []byte(strings.TrimSpace(chainStr)) + certificate = fullChain + } else if serial == "ca" { + certificate = caInfo.Certificate.Raw + + if len(pemType) != 0 { + block := pem.Block{ + Type: pemType, + Bytes: certificate, + } + + // This is convoluted on purpose to ensure that we don't have trailing + // newlines via various paths + certificate = []byte(strings.TrimSpace(string(pem.EncodeToMemory(&block)))) + } + } + + goto reply + } + + certEntry, funcErr = fetchCertBySerial(sc, req.Path, serial) + if funcErr != nil { + switch funcErr.(type) { + case errutil.UserError: + response = logical.ErrorResponse(funcErr.Error()) + goto reply + default: + retErr = funcErr + goto reply + } + } + if certEntry == nil { + response = nil + goto reply + } + + certificate = certEntry.Value + + if len(pemType) != 0 { + block := pem.Block{ + Type: pemType, + Bytes: certEntry.Value, + } + // This is convoluted on purpose to ensure that we don't have trailing + // newlines via various paths + certificate = []byte(strings.TrimSpace(string(pem.EncodeToMemory(&block)))) + } + + revokedEntry, funcErr = fetchCertBySerial(sc, "revoked/", serial) + if funcErr != nil { + switch funcErr.(type) { + case errutil.UserError: + response = logical.ErrorResponse(funcErr.Error()) + goto reply + default: + retErr = funcErr + goto reply + } + } + if revokedEntry != nil { + var revInfo revocationInfo + err := revokedEntry.DecodeJSON(&revInfo) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("Error decoding revocation entry for serial %s: %s", serial, err)), nil + } + revocationTime = revInfo.RevocationTime + revocationIssuerId = revInfo.CertificateIssuer.String() + + if !revInfo.RevocationTimeUTC.IsZero() { + revocationTimeRfc3339 = revInfo.RevocationTimeUTC.Format(time.RFC3339Nano) + } + } + +reply: + switch { + case len(contentType) != 0: + response = &logical.Response{ + Data: map[string]interface{}{ + logical.HTTPContentType: contentType, + logical.HTTPRawBody: certificate, + }, + } + if retErr != nil { + if b.Logger().IsWarn() { + b.Logger().Warn("possible error, but cannot return in raw response. Note that an empty CA probably means none was configured, and an empty CRL is possibly correct", "error", retErr) + } + } + retErr = nil + if len(certificate) > 0 { + response.Data[logical.HTTPStatusCode] = 200 + } else { + response.Data[logical.HTTPStatusCode] = 204 + } + case retErr != nil: + response = nil + return + case response == nil: + return + case response.IsError(): + return response, nil + default: + response.Data["certificate"] = string(certificate) + response.Data["revocation_time"] = revocationTime + response.Data["revocation_time_rfc3339"] = revocationTimeRfc3339 + // Only output this field if we have a value for it as it doesn't make sense for a + // bunch of code paths that go through here + if revocationIssuerId != "" { + response.Data["issuer_id"] = revocationIssuerId + } + + if len(fullChain) > 0 { + response.Data["ca_chain"] = string(fullChain) + } + } + + return +} + +const pathFetchHelpSyn = ` +Fetch a CA, CRL, CA Chain, or non-revoked certificate. +` + +const pathFetchHelpDesc = ` +This allows certificates to be fetched. Use /cert/:serial for JSON responses. + +Using "ca" or "crl" as the value fetches the appropriate information in DER encoding. Add "/pem" to either to get PEM encoding. + +Using "ca_chain" as the value fetches the certificate authority trust chain in PEM encoding. + +Otherwise, specify a serial number to fetch the specified certificate. Add "/raw" to get just the certificate in DER form, "/raw/pem" to get the PEM encoded certificate. +` diff --git a/builtin/logical/pki/path_fetch_issuers.go b/builtin/logical/pki/path_fetch_issuers.go new file mode 100644 index 0000000..5c7f841 --- /dev/null +++ b/builtin/logical/pki/path_fetch_issuers.go @@ -0,0 +1,1350 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "context" + "crypto/x509" + "encoding/pem" + "fmt" + "net/http" + "strings" + "time" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathListIssuers(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "issuers/?$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "issuers", + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ListOperation: &framework.PathOperation{ + Callback: b.pathListIssuersHandler, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "keys": { + Type: framework.TypeStringSlice, + Description: `A list of keys`, + Required: true, + }, + "key_info": { + Type: framework.TypeMap, + Description: `Key info with issuer name`, + Required: false, + }, + }, + }}, + }, + }, + }, + + HelpSynopsis: pathListIssuersHelpSyn, + HelpDescription: pathListIssuersHelpDesc, + } +} + +func (b *backend) pathListIssuersHandler(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { + if b.useLegacyBundleCaStorage() { + return logical.ErrorResponse("Can not list issuers until migration has completed"), nil + } + + var responseKeys []string + responseInfo := make(map[string]interface{}) + + sc := b.makeStorageContext(ctx, req.Storage) + entries, err := sc.listIssuers() + if err != nil { + return nil, err + } + + config, err := sc.getIssuersConfig() + if err != nil { + return nil, err + } + + // For each issuer, we need not only the identifier (as returned by + // listIssuers), but also the name of the issuer. This means we have to + // fetch the actual issuer object as well. + for _, identifier := range entries { + issuer, err := sc.fetchIssuerById(identifier) + if err != nil { + return nil, err + } + + responseKeys = append(responseKeys, string(identifier)) + responseInfo[string(identifier)] = map[string]interface{}{ + "issuer_name": issuer.Name, + "is_default": identifier == config.DefaultIssuerId, + "serial_number": issuer.SerialNumber, + + // While nominally this could be considered sensitive information + // to be returned on an unauthed endpoint, there's two mitigating + // circumstances: + // + // 1. Key IDs are purely random numbers generated by Vault and + // have no relationship to the actual key material. + // 2. They also don't _do_ anything by themselves. There is no + // modification of KeyIDs allowed, you need to be authenticated + // to Vault to understand what they mean, you _essentially_ + // get the same information from looking at/comparing various + // cert's SubjectPublicKeyInfo field, and there's the `default` + // reference that anyone with issuer generation capabilities + // can use even if they can't access any of the other /key/* + // endpoints. + // + // So all in all, exposing this value is not a security risk and + // is otherwise beneficial for the UI, hence its inclusion. + "key_id": issuer.KeyID, + } + } + + return logical.ListResponseWithInfo(responseKeys, responseInfo), nil +} + +const ( + pathListIssuersHelpSyn = `Fetch a list of CA certificates.` + pathListIssuersHelpDesc = ` +This endpoint allows listing of known issuing certificates, returning +their identifier and their name (if set). +` +) + +func pathGetIssuer(b *backend) *framework.Path { + pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "$" + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "issuer", + } + + return buildPathIssuer(b, pattern, displayAttrs) +} + +func pathGetUnauthedIssuer(b *backend) *framework.Path { + pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/(json|der|pem)$" + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "issuer-json|issuer-der|issuer-pem", + } + + return buildPathGetIssuer(b, pattern, displayAttrs) +} + +func buildPathIssuer(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { + fields := map[string]*framework.FieldSchema{} + fields = addIssuerRefNameFields(fields) + + // Fields for updating issuer. + fields["manual_chain"] = &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: `Chain of issuer references to use to build this +issuer's computed CAChain field, when non-empty.`, + } + fields["leaf_not_after_behavior"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: `Behavior of leaf's NotAfter fields: "err" to error +if the computed NotAfter date exceeds that of this issuer; "truncate" to +silently truncate to that of this issuer; or "permit" to allow this +issuance to succeed (with NotAfter exceeding that of an issuer). Note that +not all values will results in certificates that can be validated through +the entire validity period. It is suggested to use "truncate" for +intermediate CAs and "permit" only for root CAs.`, + Default: "err", + } + fields["usage"] = &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: `Comma-separated list (or string slice) of usages for +this issuer; valid values are "read-only", "issuing-certificates", +"crl-signing", and "ocsp-signing". Multiple values may be specified. Read-only +is implicit and always set.`, + Default: []string{"read-only", "issuing-certificates", "crl-signing", "ocsp-signing"}, + } + fields["revocation_signature_algorithm"] = &framework.FieldSchema{ + Type: framework.TypeString, + Description: `Which x509.SignatureAlgorithm name to use for +signing CRLs. This parameter allows differentiation between PKCS#1v1.5 +and PSS keys and choice of signature hash algorithm. The default (empty +string) value is for Go to select the signature algorithm. This can fail +if the underlying key does not support the requested signature algorithm, +which may not be known at modification time (such as with PKCS#11 managed +RSA keys).`, + Default: "", + } + fields["issuing_certificates"] = &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: `Comma-separated list of URLs to be used +for the issuing certificate attribute. See also RFC 5280 Section 4.2.2.1.`, + } + fields["crl_distribution_points"] = &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: `Comma-separated list of URLs to be used +for the CRL distribution points attribute. See also RFC 5280 Section 4.2.1.13.`, + } + fields["ocsp_servers"] = &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: `Comma-separated list of URLs to be used +for the OCSP servers attribute. See also RFC 5280 Section 4.2.2.1.`, + } + fields["enable_aia_url_templating"] = &framework.FieldSchema{ + Type: framework.TypeBool, + Description: `Whether or not to enabling templating of the +above AIA fields. When templating is enabled the special values '{{issuer_id}}', +'{{cluster_path}}', '{{cluster_aia_path}}' are available, but the addresses are +not checked for URL validity until issuance time. Using '{{cluster_path}}' +requires /config/cluster's 'path' member to be set on all PR Secondary clusters +and using '{{cluster_aia_path}}' requires /config/cluster's 'aia_path' member +to be set on all PR secondary clusters.`, + Default: false, + } + + updateIssuerSchema := map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "issuer_id": { + Type: framework.TypeString, + Description: `Issuer Id`, + Required: false, + }, + "issuer_name": { + Type: framework.TypeString, + Description: `Issuer Name`, + Required: false, + }, + "key_id": { + Type: framework.TypeString, + Description: `Key Id`, + Required: false, + }, + "certificate": { + Type: framework.TypeString, + Description: `Certificate`, + Required: false, + }, + "manual_chain": { + Type: framework.TypeStringSlice, + Description: `Manual Chain`, + Required: false, + }, + "ca_chain": { + Type: framework.TypeStringSlice, + Description: `CA Chain`, + Required: false, + }, + "leaf_not_after_behavior": { + Type: framework.TypeString, + Description: `Leaf Not After Behavior`, + Required: false, + }, + "usage": { + Type: framework.TypeString, + Description: `Usage`, + Required: false, + }, + "revocation_signature_algorithm": { + Type: framework.TypeString, + Description: `Revocation Signature Alogrithm`, + Required: false, + }, + "revoked": { + Type: framework.TypeBool, + Description: `Revoked`, + Required: false, + }, + "revocation_time": { + Type: framework.TypeInt, + Required: false, + }, + "revocation_time_rfc3339": { + Type: framework.TypeString, + Required: false, + }, + "issuing_certificates": { + Type: framework.TypeStringSlice, + Description: `Issuing Certificates`, + Required: false, + }, + "crl_distribution_points": { + Type: framework.TypeStringSlice, + Description: `CRL Distribution Points`, + Required: false, + }, + "ocsp_servers": { + Type: framework.TypeStringSlice, + Description: `OCSP Servers`, + Required: false, + }, + "enable_aia_url_templating": { + Type: framework.TypeBool, + Description: `Whether or not templating is enabled for AIA fields`, + Required: false, + }, + }, + }}, + } + + return &framework.Path{ + // Returns a JSON entry. + Pattern: pattern, + DisplayAttrs: displayAttrs, + Fields: fields, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathGetIssuer, + Responses: updateIssuerSchema, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathUpdateIssuer, + Responses: updateIssuerSchema, + + // Read more about why these flags are set in backend.go. + ForwardPerformanceStandby: true, + ForwardPerformanceSecondary: true, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathDeleteIssuer, + Responses: map[int][]framework.Response{ + http.StatusNoContent: {{ + Description: "No Content", + }}, + }, + // Read more about why these flags are set in backend.go. + ForwardPerformanceStandby: true, + ForwardPerformanceSecondary: true, + }, + logical.PatchOperation: &framework.PathOperation{ + Callback: b.pathPatchIssuer, + Responses: updateIssuerSchema, + // Read more about why these flags are set in backend.go. + ForwardPerformanceStandby: true, + ForwardPerformanceSecondary: true, + }, + }, + + HelpSynopsis: pathGetIssuerHelpSyn, + HelpDescription: pathGetIssuerHelpDesc, + } +} + +func buildPathGetIssuer(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { + fields := map[string]*framework.FieldSchema{} + fields = addIssuerRefField(fields) + + getIssuerSchema := map[int][]framework.Response{ + http.StatusNotModified: {{ + Description: "Not Modified", + }}, + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "issuer_id": { + Type: framework.TypeString, + Description: `Issuer Id`, + Required: true, + }, + "issuer_name": { + Type: framework.TypeString, + Description: `Issuer Name`, + Required: true, + }, + "certificate": { + Type: framework.TypeString, + Description: `Certificate`, + Required: true, + }, + "ca_chain": { + Type: framework.TypeStringSlice, + Description: `CA Chain`, + Required: true, + }, + }, + }}, + } + + return &framework.Path{ + // Returns a JSON entry. + Pattern: pattern, + DisplayAttrs: displayAttrs, + Fields: fields, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathGetIssuer, + Responses: getIssuerSchema, + }, + }, + + HelpSynopsis: pathGetIssuerHelpSyn, + HelpDescription: pathGetIssuerHelpDesc, + } +} + +func (b *backend) pathGetIssuer(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // Handle raw issuers first. + if strings.HasSuffix(req.Path, "/der") || strings.HasSuffix(req.Path, "/pem") || strings.HasSuffix(req.Path, "/json") { + return b.pathGetRawIssuer(ctx, req, data) + } + + if b.useLegacyBundleCaStorage() { + return logical.ErrorResponse("Can not get issuer until migration has completed"), nil + } + + issuerName := getIssuerRef(data) + if len(issuerName) == 0 { + return logical.ErrorResponse("missing issuer reference"), nil + } + + sc := b.makeStorageContext(ctx, req.Storage) + ref, err := sc.resolveIssuerReference(issuerName) + if err != nil { + return nil, err + } + if ref == "" { + return logical.ErrorResponse("unable to resolve issuer id for reference: " + issuerName), nil + } + + issuer, err := sc.fetchIssuerById(ref) + if err != nil { + return nil, err + } + + return respondReadIssuer(issuer) +} + +func respondReadIssuer(issuer *issuerEntry) (*logical.Response, error) { + var respManualChain []string + for _, entity := range issuer.ManualChain { + respManualChain = append(respManualChain, string(entity)) + } + + revSigAlgStr, present := certutil.InvSignatureAlgorithmNames[issuer.RevocationSigAlg] + if !present { + revSigAlgStr = issuer.RevocationSigAlg.String() + if issuer.RevocationSigAlg == x509.UnknownSignatureAlgorithm { + revSigAlgStr = "" + } + } + + data := map[string]interface{}{ + "issuer_id": issuer.ID, + "issuer_name": issuer.Name, + "key_id": issuer.KeyID, + "certificate": issuer.Certificate, + "manual_chain": respManualChain, + "ca_chain": issuer.CAChain, + "leaf_not_after_behavior": issuer.LeafNotAfterBehavior.String(), + "usage": issuer.Usage.Names(), + "revocation_signature_algorithm": revSigAlgStr, + "revoked": issuer.Revoked, + "issuing_certificates": []string{}, + "crl_distribution_points": []string{}, + "ocsp_servers": []string{}, + } + + if issuer.Revoked { + data["revocation_time"] = issuer.RevocationTime + data["revocation_time_rfc3339"] = issuer.RevocationTimeUTC.Format(time.RFC3339Nano) + } + + if issuer.AIAURIs != nil { + data["issuing_certificates"] = issuer.AIAURIs.IssuingCertificates + data["crl_distribution_points"] = issuer.AIAURIs.CRLDistributionPoints + data["ocsp_servers"] = issuer.AIAURIs.OCSPServers + data["enable_aia_url_templating"] = issuer.AIAURIs.EnableTemplating + } + + response := &logical.Response{ + Data: data, + } + + if issuer.RevocationSigAlg == x509.SHA256WithRSAPSS || issuer.RevocationSigAlg == x509.SHA384WithRSAPSS || issuer.RevocationSigAlg == x509.SHA512WithRSAPSS { + response.AddWarning("Issuer uses a PSS Revocation Signature Algorithm. This algorithm will be downgraded to PKCS#1v1.5 signature scheme on OCSP responses, due to limitations in the OCSP library.") + } + + return response, nil +} + +func (b *backend) pathUpdateIssuer(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // Since we're planning on updating issuers here, grab the lock so we've + // got a consistent view. + b.issuersLock.Lock() + defer b.issuersLock.Unlock() + + if b.useLegacyBundleCaStorage() { + return logical.ErrorResponse("Can not update issuer until migration has completed"), nil + } + + issuerName := getIssuerRef(data) + if len(issuerName) == 0 { + return logical.ErrorResponse("missing issuer reference"), nil + } + + sc := b.makeStorageContext(ctx, req.Storage) + ref, err := sc.resolveIssuerReference(issuerName) + if err != nil { + return nil, err + } + if ref == "" { + return logical.ErrorResponse("unable to resolve issuer id for reference: " + issuerName), nil + } + + issuer, err := sc.fetchIssuerById(ref) + if err != nil { + return nil, err + } + + newName, err := getIssuerName(sc, data) + if err != nil && err != errIssuerNameInUse { + // If the error is name already in use, and the new name is the + // old name for this issuer, we're not actually updating the + // issuer name (or causing a conflict) -- so don't err out. Other + // errs should still be surfaced, however. + return logical.ErrorResponse(err.Error()), nil + } + if err == errIssuerNameInUse && issuer.Name != newName { + // When the new name is in use but isn't this name, throw an error. + return logical.ErrorResponse(err.Error()), nil + } + if len(newName) > 0 && !nameMatcher.MatchString(newName) { + return logical.ErrorResponse("new key name outside of valid character limits"), nil + } + + newPath := data.Get("manual_chain").([]string) + rawLeafBehavior := data.Get("leaf_not_after_behavior").(string) + var newLeafBehavior certutil.NotAfterBehavior + switch rawLeafBehavior { + case "err": + newLeafBehavior = certutil.ErrNotAfterBehavior + case "truncate": + newLeafBehavior = certutil.TruncateNotAfterBehavior + case "permit": + newLeafBehavior = certutil.PermitNotAfterBehavior + default: + return logical.ErrorResponse("Unknown value for field `leaf_not_after_behavior`. Possible values are `err`, `truncate`, and `permit`."), nil + } + + rawUsage := data.Get("usage").([]string) + newUsage, err := NewIssuerUsageFromNames(rawUsage) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("Unable to parse specified usages: %v - valid values are %v", rawUsage, AllIssuerUsages.Names())), nil + } + + // Revocation signature algorithm changes + revSigAlgStr := data.Get("revocation_signature_algorithm").(string) + revSigAlg, present := certutil.SignatureAlgorithmNames[strings.ToLower(revSigAlgStr)] + if !present && revSigAlgStr != "" { + var knownAlgos []string + for algoName := range certutil.SignatureAlgorithmNames { + knownAlgos = append(knownAlgos, algoName) + } + + return logical.ErrorResponse(fmt.Sprintf("Unknown signature algorithm value: %v - valid values are %v", revSigAlg, strings.Join(knownAlgos, ", "))), nil + } else if revSigAlgStr == "" { + revSigAlg = x509.UnknownSignatureAlgorithm + } + if err := issuer.CanMaybeSignWithAlgo(revSigAlg); err != nil { + return nil, err + } + + // AIA access changes + enableTemplating := data.Get("enable_aia_url_templating").(bool) + issuerCertificates := data.Get("issuing_certificates").([]string) + if badURL := validateURLs(issuerCertificates); !enableTemplating && badURL != "" { + return logical.ErrorResponse(fmt.Sprintf("invalid URL found in Authority Information Access (AIA) parameter issuing_certificates: %s", badURL)), nil + } + crlDistributionPoints := data.Get("crl_distribution_points").([]string) + if badURL := validateURLs(crlDistributionPoints); !enableTemplating && badURL != "" { + return logical.ErrorResponse(fmt.Sprintf("invalid URL found in Authority Information Access (AIA) parameter crl_distribution_points: %s", badURL)), nil + } + ocspServers := data.Get("ocsp_servers").([]string) + if badURL := validateURLs(ocspServers); !enableTemplating && badURL != "" { + return logical.ErrorResponse(fmt.Sprintf("invalid URL found in Authority Information Access (AIA) parameter ocsp_servers: %s", badURL)), nil + } + + modified := false + + var oldName string + if newName != issuer.Name { + oldName = issuer.Name + issuer.Name = newName + issuer.LastModified = time.Now().UTC() + // See note in updateDefaultIssuerId about why this is necessary. + b.crlBuilder.invalidateCRLBuildTime() + b.crlBuilder.flushCRLBuildTimeInvalidation(sc) + modified = true + } + + if newLeafBehavior != issuer.LeafNotAfterBehavior { + issuer.LeafNotAfterBehavior = newLeafBehavior + modified = true + } + + if newUsage != issuer.Usage { + if issuer.Revoked && newUsage.HasUsage(IssuanceUsage) { + // Forbid allowing cert signing on its usage. + return logical.ErrorResponse("This issuer was revoked; unable to modify its usage to include certificate signing again. Reissue this certificate (preferably with a new key) and modify that entry instead."), nil + } + + // Ensure we deny adding CRL usage if the bits are missing from the + // cert itself. + cert, err := issuer.GetCertificate() + if err != nil { + return nil, fmt.Errorf("unable to parse issuer's certificate: %w", err) + } + if (cert.KeyUsage&x509.KeyUsageCRLSign) == 0 && newUsage.HasUsage(CRLSigningUsage) { + return logical.ErrorResponse("This issuer's underlying certificate lacks the CRLSign KeyUsage value; unable to set CRLSigningUsage on this issuer as a result."), nil + } + + issuer.Usage = newUsage + modified = true + } + + if revSigAlg != issuer.RevocationSigAlg { + issuer.RevocationSigAlg = revSigAlg + modified = true + } + + if issuer.AIAURIs == nil && (len(issuerCertificates) > 0 || len(crlDistributionPoints) > 0 || len(ocspServers) > 0) { + issuer.AIAURIs = &aiaConfigEntry{} + } + if issuer.AIAURIs != nil { + // Associative mapping from data source to destination on the + // backing issuer object. + type aiaPair struct { + Source *[]string + Dest *[]string + } + pairs := []aiaPair{ + { + Source: &issuerCertificates, + Dest: &issuer.AIAURIs.IssuingCertificates, + }, + { + Source: &crlDistributionPoints, + Dest: &issuer.AIAURIs.CRLDistributionPoints, + }, + { + Source: &ocspServers, + Dest: &issuer.AIAURIs.OCSPServers, + }, + } + + // For each pair, if it is different on the object, update it. + for _, pair := range pairs { + if isStringArrayDifferent(*pair.Source, *pair.Dest) { + *pair.Dest = *pair.Source + modified = true + } + } + if enableTemplating != issuer.AIAURIs.EnableTemplating { + issuer.AIAURIs.EnableTemplating = enableTemplating + modified = true + } + + // If no AIA URLs exist on the issuer, set the AIA URLs entry to nil + // to ease usage later. + if len(issuer.AIAURIs.IssuingCertificates) == 0 && len(issuer.AIAURIs.CRLDistributionPoints) == 0 && len(issuer.AIAURIs.OCSPServers) == 0 { + issuer.AIAURIs = nil + } + } + + // Updating the chain should be the last modification as there's a chance + // it'll write it out to disk for us. We'd hate to then modify the issuer + // again and write it a second time. + var updateChain bool + var constructedChain []issuerID + for index, newPathRef := range newPath { + // Allow self for the first entry. + if index == 0 && newPathRef == "self" { + newPathRef = string(ref) + } + + resolvedId, err := sc.resolveIssuerReference(newPathRef) + if err != nil { + return nil, err + } + + if index == 0 && resolvedId != ref { + return logical.ErrorResponse(fmt.Sprintf("expected first cert in chain to be a self-reference, but was: %v/%v", newPathRef, resolvedId)), nil + } + + constructedChain = append(constructedChain, resolvedId) + if len(issuer.ManualChain) < len(constructedChain) || constructedChain[index] != issuer.ManualChain[index] { + updateChain = true + } + } + + if len(issuer.ManualChain) != len(constructedChain) { + updateChain = true + } + + if updateChain { + issuer.ManualChain = constructedChain + + // Building the chain will write the issuer to disk; no need to do it + // twice. + modified = false + err := sc.rebuildIssuersChains(issuer) + if err != nil { + return nil, err + } + } + + if modified { + err := sc.writeIssuer(issuer) + if err != nil { + return nil, err + } + } + + response, err := respondReadIssuer(issuer) + if newName != oldName { + addWarningOnDereferencing(sc, oldName, response) + } + if issuer.AIAURIs != nil && issuer.AIAURIs.EnableTemplating { + _, aiaErr := issuer.AIAURIs.toURLEntries(sc, issuer.ID) + if aiaErr != nil { + response.AddWarning(fmt.Sprintf("issuance may fail: %v\n\nConsider setting the cluster-local address if it is not already set.", aiaErr)) + } + } + + return response, err +} + +func (b *backend) pathPatchIssuer(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // Since we're planning on updating issuers here, grab the lock so we've + // got a consistent view. + b.issuersLock.Lock() + defer b.issuersLock.Unlock() + + if b.useLegacyBundleCaStorage() { + return logical.ErrorResponse("Can not patch issuer until migration has completed"), nil + } + + // First we fetch the issuer + issuerName := getIssuerRef(data) + if len(issuerName) == 0 { + return logical.ErrorResponse("missing issuer reference"), nil + } + + sc := b.makeStorageContext(ctx, req.Storage) + ref, err := sc.resolveIssuerReference(issuerName) + if err != nil { + return nil, err + } + if ref == "" { + return logical.ErrorResponse("unable to resolve issuer id for reference: " + issuerName), nil + } + + issuer, err := sc.fetchIssuerById(ref) + if err != nil { + return nil, err + } + + // Now We are Looking at What (Might) Have Changed + modified := false + + // Name Changes First + _, ok := data.GetOk("issuer_name") // Don't check for conflicts if we aren't updating the name + var oldName string + var newName string + if ok { + newName, err = getIssuerName(sc, data) + if err != nil && err != errIssuerNameInUse && err != errIssuerNameIsEmpty { + // If the error is name already in use, and the new name is the + // old name for this issuer, we're not actually updating the + // issuer name (or causing a conflict) -- so don't err out. Other + // errs should still be surfaced, however. + return logical.ErrorResponse(err.Error()), nil + } + if err == errIssuerNameInUse && issuer.Name != newName { + // When the new name is in use but isn't this name, throw an error. + return logical.ErrorResponse(err.Error()), nil + } + if len(newName) > 0 && !nameMatcher.MatchString(newName) { + return logical.ErrorResponse("new key name outside of valid character limits"), nil + } + if newName != issuer.Name { + oldName = issuer.Name + issuer.Name = newName + issuer.LastModified = time.Now().UTC() + // See note in updateDefaultIssuerId about why this is necessary. + b.crlBuilder.invalidateCRLBuildTime() + b.crlBuilder.flushCRLBuildTimeInvalidation(sc) + modified = true + } + } + + // Leaf Not After Changes + rawLeafBehaviorData, ok := data.GetOk("leaf_not_after_behavior") + if ok { + rawLeafBehavior := rawLeafBehaviorData.(string) + var newLeafBehavior certutil.NotAfterBehavior + switch rawLeafBehavior { + case "err": + newLeafBehavior = certutil.ErrNotAfterBehavior + case "truncate": + newLeafBehavior = certutil.TruncateNotAfterBehavior + case "permit": + newLeafBehavior = certutil.PermitNotAfterBehavior + default: + return logical.ErrorResponse("Unknown value for field `leaf_not_after_behavior`. Possible values are `err`, `truncate`, and `permit`."), nil + } + if newLeafBehavior != issuer.LeafNotAfterBehavior { + issuer.LeafNotAfterBehavior = newLeafBehavior + modified = true + } + } + + // Usage Changes + rawUsageData, ok := data.GetOk("usage") + if ok { + rawUsage := rawUsageData.([]string) + newUsage, err := NewIssuerUsageFromNames(rawUsage) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("Unable to parse specified usages: %v - valid values are %v", rawUsage, AllIssuerUsages.Names())), nil + } + if newUsage != issuer.Usage { + if issuer.Revoked && newUsage.HasUsage(IssuanceUsage) { + // Forbid allowing cert signing on its usage. + return logical.ErrorResponse("This issuer was revoked; unable to modify its usage to include certificate signing again. Reissue this certificate (preferably with a new key) and modify that entry instead."), nil + } + + cert, err := issuer.GetCertificate() + if err != nil { + return nil, fmt.Errorf("unable to parse issuer's certificate: %w", err) + } + if (cert.KeyUsage&x509.KeyUsageCRLSign) == 0 && newUsage.HasUsage(CRLSigningUsage) { + return logical.ErrorResponse("This issuer's underlying certificate lacks the CRLSign KeyUsage value; unable to set CRLSigningUsage on this issuer as a result."), nil + } + + issuer.Usage = newUsage + modified = true + } + } + + // Revocation signature algorithm changes + rawRevSigAlg, ok := data.GetOk("revocation_signature_algorithm") + if ok { + revSigAlgStr := rawRevSigAlg.(string) + revSigAlg, present := certutil.SignatureAlgorithmNames[strings.ToLower(revSigAlgStr)] + if !present && revSigAlgStr != "" { + var knownAlgos []string + for algoName := range certutil.SignatureAlgorithmNames { + knownAlgos = append(knownAlgos, algoName) + } + + return logical.ErrorResponse(fmt.Sprintf("Unknown signature algorithm value: %v - valid values are %v", revSigAlg, strings.Join(knownAlgos, ", "))), nil + } else if revSigAlgStr == "" { + revSigAlg = x509.UnknownSignatureAlgorithm + } + + if err := issuer.CanMaybeSignWithAlgo(revSigAlg); err != nil { + return nil, err + } + + if revSigAlg != issuer.RevocationSigAlg { + issuer.RevocationSigAlg = revSigAlg + modified = true + } + } + + // AIA access changes. + if issuer.AIAURIs == nil { + issuer.AIAURIs = &aiaConfigEntry{} + } + + // Associative mapping from data source to destination on the + // backing issuer object. For PATCH requests, we use the source + // data parameter as we still need to validate them and process + // it into a string list. + type aiaPair struct { + Source string + Dest *[]string + } + pairs := []aiaPair{ + { + Source: "issuing_certificates", + Dest: &issuer.AIAURIs.IssuingCertificates, + }, + { + Source: "crl_distribution_points", + Dest: &issuer.AIAURIs.CRLDistributionPoints, + }, + { + Source: "ocsp_servers", + Dest: &issuer.AIAURIs.OCSPServers, + }, + } + + if enableTemplatingRaw, ok := data.GetOk("enable_aia_url_templating"); ok { + enableTemplating := enableTemplatingRaw.(bool) + if enableTemplating != issuer.AIAURIs.EnableTemplating { + issuer.AIAURIs.EnableTemplating = true + modified = true + } + } + + // For each pair, if it is different on the object, update it. + for _, pair := range pairs { + rawURLsValue, ok := data.GetOk(pair.Source) + if ok { + urlsValue := rawURLsValue.([]string) + if badURL := validateURLs(urlsValue); !issuer.AIAURIs.EnableTemplating && badURL != "" { + return logical.ErrorResponse(fmt.Sprintf("invalid URL found in Authority Information Access (AIA) parameter %v: %s", pair.Source, badURL)), nil + } + + if isStringArrayDifferent(urlsValue, *pair.Dest) { + modified = true + *pair.Dest = urlsValue + } + } + } + + // If no AIA URLs exist on the issuer, set the AIA URLs entry to nil to + // ease usage later. + if len(issuer.AIAURIs.IssuingCertificates) == 0 && len(issuer.AIAURIs.CRLDistributionPoints) == 0 && len(issuer.AIAURIs.OCSPServers) == 0 { + issuer.AIAURIs = nil + } + + // Manual Chain Changes + newPathData, ok := data.GetOk("manual_chain") + if ok { + newPath := newPathData.([]string) + var updateChain bool + var constructedChain []issuerID + for index, newPathRef := range newPath { + // Allow self for the first entry. + if index == 0 && newPathRef == "self" { + newPathRef = string(ref) + } + + resolvedId, err := sc.resolveIssuerReference(newPathRef) + if err != nil { + return nil, err + } + + if index == 0 && resolvedId != ref { + return logical.ErrorResponse(fmt.Sprintf("expected first cert in chain to be a self-reference, but was: %v/%v", newPathRef, resolvedId)), nil + } + + constructedChain = append(constructedChain, resolvedId) + if len(issuer.ManualChain) < len(constructedChain) || constructedChain[index] != issuer.ManualChain[index] { + updateChain = true + } + } + + if len(issuer.ManualChain) != len(constructedChain) { + updateChain = true + } + + if updateChain { + issuer.ManualChain = constructedChain + + // Building the chain will write the issuer to disk; no need to do it + // twice. + modified = false + err := sc.rebuildIssuersChains(issuer) + if err != nil { + return nil, err + } + } + } + + if modified { + err := sc.writeIssuer(issuer) + if err != nil { + return nil, err + } + } + + response, err := respondReadIssuer(issuer) + if newName != oldName { + addWarningOnDereferencing(sc, oldName, response) + } + if issuer.AIAURIs != nil && issuer.AIAURIs.EnableTemplating { + _, aiaErr := issuer.AIAURIs.toURLEntries(sc, issuer.ID) + if aiaErr != nil { + response.AddWarning(fmt.Sprintf("issuance may fail: %v\n\nConsider setting the cluster-local address if it is not already set.", aiaErr)) + } + } + + return response, err +} + +func (b *backend) pathGetRawIssuer(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + if b.useLegacyBundleCaStorage() { + return logical.ErrorResponse("Can not get issuer until migration has completed"), nil + } + + issuerName := getIssuerRef(data) + if len(issuerName) == 0 { + return logical.ErrorResponse("missing issuer reference"), nil + } + + sc := b.makeStorageContext(ctx, req.Storage) + ref, err := sc.resolveIssuerReference(issuerName) + if err != nil { + return nil, err + } + if ref == "" { + return logical.ErrorResponse("unable to resolve issuer id for reference: " + issuerName), nil + } + + issuer, err := sc.fetchIssuerById(ref) + if err != nil { + return nil, err + } + + var contentType string + var certificate []byte + + response := &logical.Response{} + ret, err := sendNotModifiedResponseIfNecessary(&IfModifiedSinceHelper{req: req, reqType: ifModifiedCA, issuerRef: ref}, sc, response) + if err != nil { + return nil, err + } + if ret { + return response, nil + } + + certificate = []byte(issuer.Certificate) + + if strings.HasSuffix(req.Path, "/pem") { + contentType = "application/pem-certificate-chain" + } else if strings.HasSuffix(req.Path, "/der") { + contentType = "application/pkix-cert" + } + + if strings.HasSuffix(req.Path, "/der") { + pemBlock, _ := pem.Decode(certificate) + if pemBlock == nil { + return nil, err + } + + certificate = pemBlock.Bytes + } + + statusCode := 200 + if len(certificate) == 0 { + statusCode = 204 + } + + if strings.HasSuffix(req.Path, "/pem") || strings.HasSuffix(req.Path, "/der") { + return &logical.Response{ + Data: map[string]interface{}{ + logical.HTTPContentType: contentType, + logical.HTTPRawBody: certificate, + logical.HTTPStatusCode: statusCode, + }, + }, nil + } else { + return &logical.Response{ + Data: map[string]interface{}{ + "certificate": string(certificate), + "ca_chain": issuer.CAChain, + "issuer_id": issuer.ID, + "issuer_name": issuer.Name, + }, + }, nil + } +} + +func (b *backend) pathDeleteIssuer(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // Since we're planning on updating issuers here, grab the lock so we've + // got a consistent view. + b.issuersLock.Lock() + defer b.issuersLock.Unlock() + + if b.useLegacyBundleCaStorage() { + return logical.ErrorResponse("Can not delete issuer until migration has completed"), nil + } + + issuerName := getIssuerRef(data) + if len(issuerName) == 0 { + return logical.ErrorResponse("missing issuer reference"), nil + } + + sc := b.makeStorageContext(ctx, req.Storage) + ref, err := sc.resolveIssuerReference(issuerName) + if err != nil { + // Return as if we deleted it if we fail to lookup the issuer. + if ref == IssuerRefNotFound { + return &logical.Response{}, nil + } + return nil, err + } + + response := &logical.Response{} + + issuer, err := sc.fetchIssuerById(ref) + if err != nil { + return nil, err + } + if issuer.Name != "" { + addWarningOnDereferencing(sc, issuer.Name, response) + } + addWarningOnDereferencing(sc, string(issuer.ID), response) + + wasDefault, err := sc.deleteIssuer(ref) + if err != nil { + return nil, err + } + if wasDefault { + response.AddWarning(fmt.Sprintf("Deleted issuer %v (via issuer_ref %v); this was configured as the default issuer. Operations without an explicit issuer will not work until a new default is configured.", ref, issuerName)) + addWarningOnDereferencing(sc, defaultRef, response) + } + + // Since we've deleted an issuer, the chains might've changed. Call the + // rebuild code. We shouldn't technically err (as the issuer was deleted + // successfully), but log a warning (and to the response) if this fails. + if err := sc.rebuildIssuersChains(nil); err != nil { + msg := fmt.Sprintf("Failed to rebuild remaining issuers' chains: %v", err) + b.Logger().Error(msg) + response.AddWarning(msg) + } + + // Finally, we need to rebuild both the local and the unified CRLs. This + // will free up any now unnecessary space used in both the CRL config + // and for the underlying CRL. + warnings, err := b.crlBuilder.rebuild(sc, true) + if err != nil { + return nil, err + } + + for index, warning := range warnings { + response.AddWarning(fmt.Sprintf("Warning %d during CRL rebuild: %v", index+1, warning)) + } + + return response, nil +} + +func addWarningOnDereferencing(sc *storageContext, name string, resp *logical.Response) { + timeout, inUseBy, err := sc.checkForRolesReferencing(name) + if err != nil || timeout { + if inUseBy == 0 { + resp.AddWarning(fmt.Sprint("Unable to check if any roles referenced this issuer by ", name)) + } else { + resp.AddWarning(fmt.Sprint("The name ", name, " was in use by at least ", inUseBy, " roles")) + } + } else { + if inUseBy > 0 { + resp.AddWarning(fmt.Sprint(inUseBy, " roles reference ", name)) + } + } +} + +const ( + pathGetIssuerHelpSyn = `Fetch a single issuer certificate.` + pathGetIssuerHelpDesc = ` +This allows fetching information associated with the underlying issuer +certificate. + +:ref can be either the literal value "default", in which case /config/issuers +will be consulted for the present default issuer, an identifier of an issuer, +or its assigned name value. + +Use /issuer/:ref/der or /issuer/:ref/pem to return just the certificate in +raw DER or PEM form, without the JSON structure of /issuer/:ref. + +Writing to /issuer/:ref allows updating of the name field associated with +the certificate. +` +) + +func pathGetIssuerCRL(b *backend) *framework.Path { + pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/crl(/pem|/der|/delta(/pem|/der)?)?" + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIIssuer, + OperationSuffix: "crl|crl-pem|crl-der|crl-delta|crl-delta-pem|crl-delta-der", + } + + return buildPathGetIssuerCRL(b, pattern, displayAttrs) +} + +func pathGetIssuerUnifiedCRL(b *backend) *framework.Path { + pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/unified-crl(/pem|/der|/delta(/pem|/der)?)?" + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIIssuer, + OperationSuffix: "unified-crl|unified-crl-pem|unified-crl-der|unified-crl-delta|unified-crl-delta-pem|unified-crl-delta-der", + } + + return buildPathGetIssuerCRL(b, pattern, displayAttrs) +} + +func buildPathGetIssuerCRL(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { + fields := map[string]*framework.FieldSchema{} + fields = addIssuerRefNameFields(fields) + + return &framework.Path{ + // Returns raw values. + Pattern: pattern, + DisplayAttrs: displayAttrs, + Fields: fields, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathGetIssuerCRL, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "crl": { + Type: framework.TypeString, + Required: false, + }, + }, + }}, + }, + }, + }, + + HelpSynopsis: pathGetIssuerCRLHelpSyn, + HelpDescription: pathGetIssuerCRLHelpDesc, + } +} + +func (b *backend) pathGetIssuerCRL(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + if b.useLegacyBundleCaStorage() { + return logical.ErrorResponse("Can not get issuer's CRL until migration has completed"), nil + } + + issuerName := getIssuerRef(data) + if len(issuerName) == 0 { + return logical.ErrorResponse("missing issuer reference"), nil + } + + sc := b.makeStorageContext(ctx, req.Storage) + warnings, err := b.crlBuilder.rebuildIfForced(sc) + if err != nil { + return nil, err + } + if len(warnings) > 0 { + // Since this is a fetch of a specific CRL, this most likely comes + // from an automated system of some sort; these warnings would be + // ignored and likely meaningless. Log them instead. + msg := "During rebuild of CRL on issuer CRL fetch, got the following warnings:" + for index, warning := range warnings { + msg = fmt.Sprintf("%v\n %d. %v", msg, index+1, warning) + } + b.Logger().Warn(msg) + } + + var certificate []byte + var contentType string + + isUnified := strings.Contains(req.Path, "unified") + isDelta := strings.Contains(req.Path, "delta") + + response := &logical.Response{} + var crlType ifModifiedReqType = ifModifiedCRL + + if !isUnified && isDelta { + crlType = ifModifiedDeltaCRL + } else if isUnified && !isDelta { + crlType = ifModifiedUnifiedCRL + } else if isUnified && isDelta { + crlType = ifModifiedUnifiedDeltaCRL + } + + ret, err := sendNotModifiedResponseIfNecessary(&IfModifiedSinceHelper{req: req, reqType: crlType}, sc, response) + if err != nil { + return nil, err + } + if ret { + return response, nil + } + + crlPath, err := sc.resolveIssuerCRLPath(issuerName, isUnified) + if err != nil { + return nil, err + } + + if strings.Contains(req.Path, "delta") { + crlPath += deltaCRLPathSuffix + } + + crlEntry, err := req.Storage.Get(ctx, crlPath) + if err != nil { + return nil, err + } + + if crlEntry != nil && len(crlEntry.Value) > 0 { + certificate = []byte(crlEntry.Value) + } + + if strings.HasSuffix(req.Path, "/der") { + contentType = "application/pkix-crl" + } else if strings.HasSuffix(req.Path, "/pem") { + contentType = "application/x-pem-file" + } + + if !strings.HasSuffix(req.Path, "/der") { + // Rather return an empty response rather than an empty PEM blob. + // We build this PEM block for both the JSON and PEM endpoints. + if len(certificate) > 0 { + pemBlock := pem.Block{ + Type: "X509 CRL", + Bytes: certificate, + } + + certificate = pem.EncodeToMemory(&pemBlock) + } + } + + statusCode := 200 + if len(certificate) == 0 { + statusCode = 204 + } + + if strings.HasSuffix(req.Path, "/der") || strings.HasSuffix(req.Path, "/pem") { + return &logical.Response{ + Data: map[string]interface{}{ + logical.HTTPContentType: contentType, + logical.HTTPRawBody: certificate, + logical.HTTPStatusCode: statusCode, + }, + }, nil + } + + return &logical.Response{ + Data: map[string]interface{}{ + "crl": string(certificate), + }, + }, nil +} + +const ( + pathGetIssuerCRLHelpSyn = `Fetch an issuer's Certificate Revocation Log (CRL).` + pathGetIssuerCRLHelpDesc = ` +This allows fetching the specified issuer's CRL. Note that this is different +than the legacy path (/crl and /certs/crl) in that this is per-issuer and not +just the default issuer's CRL. + +Two issuers will have the same CRL if they have the same key material and if +they have the same Subject value. + +:ref can be either the literal value "default", in which case /config/issuers +will be consulted for the present default issuer, an identifier of an issuer, +or its assigned name value. + + - /issuer/:ref/crl is JSON encoded and contains a PEM CRL, + - /issuer/:ref/crl/pem contains the PEM-encoded CRL, + - /issuer/:ref/crl/DER contains the raw DER-encoded (binary) CRL. +` +) diff --git a/builtin/logical/pki/path_fetch_keys.go b/builtin/logical/pki/path_fetch_keys.go new file mode 100644 index 0000000..23b3bf5 --- /dev/null +++ b/builtin/logical/pki/path_fetch_keys.go @@ -0,0 +1,400 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "context" + "crypto" + "fmt" + "net/http" + + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/errutil" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathListKeys(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "keys/?$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "keys", + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ListOperation: &framework.PathOperation{ + Callback: b.pathListKeysHandler, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "keys": { + Type: framework.TypeStringSlice, + Description: `A list of keys`, + Required: true, + }, + "key_info": { + Type: framework.TypeMap, + Description: `Key info with issuer name`, + Required: false, + }, + }, + }}, + }, + ForwardPerformanceStandby: false, + ForwardPerformanceSecondary: false, + }, + }, + + HelpSynopsis: pathListKeysHelpSyn, + HelpDescription: pathListKeysHelpDesc, + } +} + +const ( + pathListKeysHelpSyn = `Fetch a list of all issuer keys` + pathListKeysHelpDesc = `This endpoint allows listing of known backing keys, returning +their identifier and their name (if set).` +) + +func (b *backend) pathListKeysHandler(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { + if b.useLegacyBundleCaStorage() { + return logical.ErrorResponse("Can not list keys until migration has completed"), nil + } + + var responseKeys []string + responseInfo := make(map[string]interface{}) + + sc := b.makeStorageContext(ctx, req.Storage) + entries, err := sc.listKeys() + if err != nil { + return nil, err + } + + config, err := sc.getKeysConfig() + if err != nil { + return nil, err + } + + for _, identifier := range entries { + key, err := sc.fetchKeyById(identifier) + if err != nil { + return nil, err + } + + responseKeys = append(responseKeys, string(identifier)) + responseInfo[string(identifier)] = map[string]interface{}{ + keyNameParam: key.Name, + "is_default": identifier == config.DefaultKeyId, + } + + } + return logical.ListResponseWithInfo(responseKeys, responseInfo), nil +} + +func pathKey(b *backend) *framework.Path { + pattern := "key/" + framework.GenericNameRegex(keyRefParam) + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "key", + } + + return buildPathKey(b, pattern, displayAttrs) +} + +func buildPathKey(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { + return &framework.Path{ + Pattern: pattern, + DisplayAttrs: displayAttrs, + + Fields: map[string]*framework.FieldSchema{ + keyRefParam: { + Type: framework.TypeString, + Description: `Reference to key; either "default" for the configured default key, an identifier of a key, or the name assigned to the key.`, + Default: defaultRef, + }, + keyNameParam: { + Type: framework.TypeString, + Description: `Human-readable name for this key.`, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathGetKeyHandler, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "key_id": { + Type: framework.TypeString, + Description: `Key Id`, + Required: true, + }, + "key_name": { + Type: framework.TypeString, + Description: `Key Name`, + Required: true, + }, + "key_type": { + Type: framework.TypeString, + Description: `Key Type`, + Required: true, + }, + "subject_key_id": { + Type: framework.TypeString, + Description: `RFC 5280 Subject Key Identifier of the public counterpart`, + Required: false, + }, + "managed_key_id": { + Type: framework.TypeString, + Description: `Managed Key Id`, + Required: false, + }, + "managed_key_name": { + Type: framework.TypeString, + Description: `Managed Key Name`, + Required: false, + }, + }, + }}, + }, + ForwardPerformanceStandby: false, + ForwardPerformanceSecondary: false, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathUpdateKeyHandler, + Responses: map[int][]framework.Response{ + http.StatusNoContent: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "key_id": { + Type: framework.TypeString, + Description: `Key Id`, + Required: true, + }, + "key_name": { + Type: framework.TypeString, + Description: `Key Name`, + Required: true, + }, + "key_type": { + Type: framework.TypeString, + Description: `Key Type`, + Required: true, + }, + }, + }}, + }, + ForwardPerformanceStandby: true, + ForwardPerformanceSecondary: true, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathDeleteKeyHandler, + Responses: map[int][]framework.Response{ + http.StatusNoContent: {{ + Description: "No Content", + }}, + }, + ForwardPerformanceStandby: true, + ForwardPerformanceSecondary: true, + }, + }, + + HelpSynopsis: pathKeysHelpSyn, + HelpDescription: pathKeysHelpDesc, + } +} + +const ( + pathKeysHelpSyn = `Fetch a single issuer key` + pathKeysHelpDesc = `This allows fetching information associated with the underlying key. + +:ref can be either the literal value "default", in which case /config/keys +will be consulted for the present default key, an identifier of a key, +or its assigned name value. + +Writing to /key/:ref allows updating of the name field associated with +the certificate. +` +) + +func (b *backend) pathGetKeyHandler(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + if b.useLegacyBundleCaStorage() { + return logical.ErrorResponse("Can not get keys until migration has completed"), nil + } + + keyRef := data.Get(keyRefParam).(string) + if len(keyRef) == 0 { + return logical.ErrorResponse("missing key reference"), nil + } + + sc := b.makeStorageContext(ctx, req.Storage) + keyId, err := sc.resolveKeyReference(keyRef) + if err != nil { + return nil, err + } + if keyId == "" { + return logical.ErrorResponse("unable to resolve key id for reference" + keyRef), nil + } + + key, err := sc.fetchKeyById(keyId) + if err != nil { + return nil, err + } + + respData := map[string]interface{}{ + keyIdParam: key.ID, + keyNameParam: key.Name, + keyTypeParam: string(key.PrivateKeyType), + } + + var pkForSkid crypto.PublicKey + if key.isManagedPrivateKey() { + managedKeyUUID, err := key.getManagedKeyUUID() + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("failed extracting managed key uuid from key id %s (%s): %v", key.ID, key.Name, err)} + } + + keyInfo, err := getManagedKeyInfo(ctx, b, managedKeyUUID) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("failed fetching managed key info from key id %s (%s): %v", key.ID, key.Name, err)} + } + + pkForSkid, err = getManagedKeyPublicKey(sc.Context, sc.Backend, managedKeyUUID) + if err != nil { + return nil, err + } + + // To remain consistent across the api responses (mainly generate root/intermediate calls), return the actual + // type of key, not that it is a managed key. + respData[keyTypeParam] = string(keyInfo.keyType) + respData[managedKeyIdArg] = string(keyInfo.uuid) + respData[managedKeyNameArg] = string(keyInfo.name) + } else { + pkForSkid, err = getPublicKeyFromBytes([]byte(key.PrivateKey)) + if err != nil { + return nil, err + } + } + + skid, err := certutil.GetSubjectKeyID(pkForSkid) + if err != nil { + return nil, err + } + respData[skidParam] = certutil.GetHexFormatted([]byte(skid), ":") + + return &logical.Response{Data: respData}, nil +} + +func (b *backend) pathUpdateKeyHandler(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // Since we're planning on updating keys here, grab the lock so we've + // got a consistent view. + b.issuersLock.Lock() + defer b.issuersLock.Unlock() + + if b.useLegacyBundleCaStorage() { + return logical.ErrorResponse("Can not update keys until migration has completed"), nil + } + + keyRef := data.Get(keyRefParam).(string) + if len(keyRef) == 0 { + return logical.ErrorResponse("missing key reference"), nil + } + + sc := b.makeStorageContext(ctx, req.Storage) + keyId, err := sc.resolveKeyReference(keyRef) + if err != nil { + return nil, err + } + if keyId == "" { + return logical.ErrorResponse("unable to resolve key id for reference" + keyRef), nil + } + + key, err := sc.fetchKeyById(keyId) + if err != nil { + return nil, err + } + + newName := data.Get(keyNameParam).(string) + if len(newName) > 0 && !nameMatcher.MatchString(newName) { + return logical.ErrorResponse("new key name outside of valid character limits"), nil + } + + if newName != key.Name { + key.Name = newName + + err := sc.writeKey(*key) + if err != nil { + return nil, err + } + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + keyIdParam: key.ID, + keyNameParam: key.Name, + keyTypeParam: key.PrivateKeyType, + }, + } + + if len(newName) == 0 { + resp.AddWarning("Name successfully deleted, you will now need to reference this key by it's Id: " + string(key.ID)) + } + + return resp, nil +} + +func (b *backend) pathDeleteKeyHandler(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // Since we're planning on updating issuers here, grab the lock so we've + // got a consistent view. + b.issuersLock.Lock() + defer b.issuersLock.Unlock() + + if b.useLegacyBundleCaStorage() { + return logical.ErrorResponse("Can not delete keys until migration has completed"), nil + } + + keyRef := data.Get(keyRefParam).(string) + if len(keyRef) == 0 { + return logical.ErrorResponse("missing key reference"), nil + } + + sc := b.makeStorageContext(ctx, req.Storage) + keyId, err := sc.resolveKeyReference(keyRef) + if err != nil { + if keyId == KeyRefNotFound { + // We failed to lookup the key, we should ignore any error here and reply as if it was deleted. + return nil, nil + } + return nil, err + } + + keyInUse, issuerId, err := sc.isKeyInUse(keyId.String()) + if err != nil { + return nil, err + } + if keyInUse { + return logical.ErrorResponse(fmt.Sprintf("Failed to Delete Key. Key in Use by Issuer: %s", issuerId)), nil + } + + wasDefault, err := sc.deleteKey(keyId) + if err != nil { + return nil, err + } + + var response *logical.Response + if wasDefault { + msg := fmt.Sprintf("Deleted key %v (via key_ref %v); this was configured as the default key. Operations without an explicit key will not work until a new default is configured.", string(keyId), keyRef) + b.Logger().Error(msg) + response = &logical.Response{} + response.AddWarning(msg) + } + + return response, nil +} diff --git a/builtin/logical/pki/path_intermediate.go b/builtin/logical/pki/path_intermediate.go new file mode 100644 index 0000000..1a68563 --- /dev/null +++ b/builtin/logical/pki/path_intermediate.go @@ -0,0 +1,225 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "context" + "encoding/base64" + "fmt" + "net/http" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathGenerateIntermediate(b *backend) *framework.Path { + pattern := "intermediate/generate/" + framework.GenericNameRegex("exported") + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "generate", + OperationSuffix: "intermediate", + } + + return buildPathGenerateIntermediate(b, pattern, displayAttrs) +} + +func pathSetSignedIntermediate(b *backend) *framework.Path { + ret := &framework.Path{ + Pattern: "intermediate/set-signed", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "set-signed", + OperationSuffix: "intermediate", + }, + + Fields: map[string]*framework.FieldSchema{ + "certificate": { + Type: framework.TypeString, + Description: `PEM-format certificate. This must be a CA +certificate with a public key matching the +previously-generated key from the generation +endpoint. Additional parent CAs may be optionally +appended to the bundle.`, + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathImportIssuers, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "mapping": { + Type: framework.TypeMap, + Description: "A mapping of issuer_id to key_id for all issuers included in this request", + Required: true, + }, + "imported_keys": { + Type: framework.TypeCommaStringSlice, + Description: "Net-new keys imported as a part of this request", + Required: true, + }, + "imported_issuers": { + Type: framework.TypeCommaStringSlice, + Description: "Net-new issuers imported as a part of this request", + Required: true, + }, + "existing_keys": { + Type: framework.TypeCommaStringSlice, + Description: "Existing keys specified as part of the import bundle of this request", + Required: true, + }, + "existing_issuers": { + Type: framework.TypeCommaStringSlice, + Description: "Existing issuers specified as part of the import bundle of this request", + Required: true, + }, + }, + }}, + }, + // Read more about why these flags are set in backend.go + ForwardPerformanceStandby: true, + ForwardPerformanceSecondary: true, + }, + }, + + HelpSynopsis: pathSetSignedIntermediateHelpSyn, + HelpDescription: pathSetSignedIntermediateHelpDesc, + } + + return ret +} + +func (b *backend) pathGenerateIntermediate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // Since we're planning on updating issuers here, grab the lock so we've + // got a consistent view. + b.issuersLock.Lock() + defer b.issuersLock.Unlock() + + var err error + + if b.useLegacyBundleCaStorage() { + return logical.ErrorResponse("Can not create intermediate until migration has completed"), nil + } + + // Nasty hack :-) For cross-signing, we want to use the existing key, but + // this isn't _actually_ part of the path. Put it into the request + // parameters as if it was. + if req.Path == "intermediate/cross-sign" { + data.Raw["exported"] = "existing" + } + + // Remove this once https://github.com/golang/go/issues/45990 is fixed + data.Schema["use_pss"] = &framework.FieldSchema{ + Type: framework.TypeBool, + Default: false, + } + data.Raw["use_pss"] = false + + sc := b.makeStorageContext(ctx, req.Storage) + exported, format, role, errorResp := getGenerationParams(sc, data) + if errorResp != nil { + return errorResp, nil + } + + keyName, err := getKeyName(sc, data) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + var resp *logical.Response + input := &inputBundle{ + role: role, + req: req, + apiData: data, + } + + parsedBundle, warnings, err := generateIntermediateCSR(sc, input, b.Backend.GetRandomReader()) + if err != nil { + switch err.(type) { + case errutil.UserError: + return logical.ErrorResponse(err.Error()), nil + default: + return nil, err + } + } + + csrb, err := parsedBundle.ToCSRBundle() + if err != nil { + return nil, fmt.Errorf("error converting raw CSR bundle to CSR bundle: %w", err) + } + + resp = &logical.Response{ + Data: map[string]interface{}{}, + } + + entries, err := getGlobalAIAURLs(ctx, req.Storage) + if err == nil && len(entries.OCSPServers) == 0 && len(entries.IssuingCertificates) == 0 && len(entries.CRLDistributionPoints) == 0 { + // If the operator hasn't configured any of the URLs prior to + // generating this issuer, we should add a warning to the response, + // informing them they might want to do so and re-generate the issuer. + resp.AddWarning("This mount hasn't configured any authority information access (AIA) fields; this may make it harder for systems to find missing certificates in the chain or to validate revocation status of certificates. Consider updating /config/urls or the newly generated issuer with this information. Since this certificate is an intermediate, it might be useful to regenerate this certificate after fixing this problem for the root mount.") + } + + switch format { + case "pem": + resp.Data["csr"] = csrb.CSR + if exported { + resp.Data["private_key"] = csrb.PrivateKey + resp.Data["private_key_type"] = csrb.PrivateKeyType + } + + case "pem_bundle": + resp.Data["csr"] = csrb.CSR + if exported { + resp.Data["csr"] = fmt.Sprintf("%s\n%s", csrb.PrivateKey, csrb.CSR) + resp.Data["private_key"] = csrb.PrivateKey + resp.Data["private_key_type"] = csrb.PrivateKeyType + } + + case "der": + resp.Data["csr"] = base64.StdEncoding.EncodeToString(parsedBundle.CSRBytes) + if exported { + resp.Data["private_key"] = base64.StdEncoding.EncodeToString(parsedBundle.PrivateKeyBytes) + resp.Data["private_key_type"] = csrb.PrivateKeyType + } + default: + return nil, fmt.Errorf("unsupported format argument: %s", format) + } + + if data.Get("private_key_format").(string) == "pkcs8" { + err = convertRespToPKCS8(resp) + if err != nil { + return nil, err + } + } + + myKey, _, err := sc.importKey(csrb.PrivateKey, keyName, csrb.PrivateKeyType) + if err != nil { + return nil, err + } + resp.Data["key_id"] = myKey.ID + + resp = addWarnings(resp, warnings) + + return resp, nil +} + +const pathGenerateIntermediateHelpSyn = ` +Generate a new CSR and private key used for signing. +` + +const pathGenerateIntermediateHelpDesc = ` +See the API documentation for more information. +` + +const pathSetSignedIntermediateHelpSyn = ` +Provide the signed intermediate CA cert. +` + +const pathSetSignedIntermediateHelpDesc = ` +See the API documentation for more information. +` diff --git a/builtin/logical/pki/path_issue_sign.go b/builtin/logical/pki/path_issue_sign.go new file mode 100644 index 0000000..9833605 --- /dev/null +++ b/builtin/logical/pki/path_issue_sign.go @@ -0,0 +1,584 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "bytes" + "context" + "crypto/rand" + "encoding/base64" + "encoding/pem" + "fmt" + "net/http" + "strings" + "time" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathIssue(b *backend) *framework.Path { + pattern := "issue/" + framework.GenericNameRegex("role") + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "issue", + OperationSuffix: "with-role", + } + + return buildPathIssue(b, pattern, displayAttrs) +} + +func pathIssuerIssue(b *backend) *framework.Path { + pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/issue/" + framework.GenericNameRegex("role") + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIIssuer, + OperationVerb: "issue", + OperationSuffix: "with-role", + } + + return buildPathIssue(b, pattern, displayAttrs) +} + +func buildPathIssue(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { + ret := &framework.Path{ + Pattern: pattern, + DisplayAttrs: displayAttrs, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.metricsWrap("issue", roleRequired, b.pathIssue), + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "certificate": { + Type: framework.TypeString, + Description: `Certificate`, + Required: true, + }, + "issuing_ca": { + Type: framework.TypeString, + Description: `Issuing Certificate Authority`, + Required: true, + }, + "ca_chain": { + Type: framework.TypeCommaStringSlice, + Description: `Certificate Chain`, + Required: false, + }, + "serial_number": { + Type: framework.TypeString, + Description: `Serial Number`, + Required: true, + }, + "expiration": { + Type: framework.TypeInt64, + Description: `Time of expiration`, + Required: true, + }, + "private_key": { + Type: framework.TypeString, + Description: `Private key`, + Required: false, + }, + "private_key_type": { + Type: framework.TypeString, + Description: `Private key type`, + Required: false, + }, + }, + }}, + }, + }, + }, + + HelpSynopsis: pathIssueHelpSyn, + HelpDescription: pathIssueHelpDesc, + } + + ret.Fields = addNonCACommonFields(map[string]*framework.FieldSchema{}) + return ret +} + +func pathSign(b *backend) *framework.Path { + pattern := "sign/" + framework.GenericNameRegex("role") + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "sign", + OperationSuffix: "with-role", + } + + return buildPathSign(b, pattern, displayAttrs) +} + +func pathIssuerSign(b *backend) *framework.Path { + pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/sign/" + framework.GenericNameRegex("role") + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIIssuer, + OperationVerb: "sign", + OperationSuffix: "with-role", + } + + return buildPathSign(b, pattern, displayAttrs) +} + +func buildPathSign(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { + ret := &framework.Path{ + Pattern: pattern, + DisplayAttrs: displayAttrs, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.metricsWrap("sign", roleRequired, b.pathSign), + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "certificate": { + Type: framework.TypeString, + Description: `Certificate`, + Required: true, + }, + "issuing_ca": { + Type: framework.TypeString, + Description: `Issuing Certificate Authority`, + Required: true, + }, + "ca_chain": { + Type: framework.TypeCommaStringSlice, + Description: `Certificate Chain`, + Required: false, + }, + "serial_number": { + Type: framework.TypeString, + Description: `Serial Number`, + Required: true, + }, + "expiration": { + Type: framework.TypeInt64, + Description: `Time of expiration`, + Required: true, + }, + "private_key": { + Type: framework.TypeString, + Description: `Private key`, + Required: false, + }, + "private_key_type": { + Type: framework.TypeString, + Description: `Private key type`, + Required: false, + }, + }, + }}, + }, + }, + }, + + HelpSynopsis: pathSignHelpSyn, + HelpDescription: pathSignHelpDesc, + } + + ret.Fields = addNonCACommonFields(map[string]*framework.FieldSchema{}) + + ret.Fields["csr"] = &framework.FieldSchema{ + Type: framework.TypeString, + Default: "", + Description: `PEM-format CSR to be signed.`, + } + + return ret +} + +func pathIssuerSignVerbatim(b *backend) *framework.Path { + pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/sign-verbatim" + framework.OptionalParamRegex("role") + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIIssuer, + OperationVerb: "sign", + OperationSuffix: "verbatim|verbatim-with-role", + } + + return buildPathIssuerSignVerbatim(b, pattern, displayAttrs) +} + +func pathSignVerbatim(b *backend) *framework.Path { + pattern := "sign-verbatim" + framework.OptionalParamRegex("role") + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "sign", + OperationSuffix: "verbatim|verbatim-with-role", + } + + return buildPathIssuerSignVerbatim(b, pattern, displayAttrs) +} + +func buildPathIssuerSignVerbatim(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { + ret := &framework.Path{ + Pattern: pattern, + DisplayAttrs: displayAttrs, + Fields: getCsrSignVerbatimSchemaFields(), + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.metricsWrap("sign-verbatim", roleOptional, b.pathSignVerbatim), + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "certificate": { + Type: framework.TypeString, + Description: `Certificate`, + Required: true, + }, + "issuing_ca": { + Type: framework.TypeString, + Description: `Issuing Certificate Authority`, + Required: true, + }, + "ca_chain": { + Type: framework.TypeCommaStringSlice, + Description: `Certificate Chain`, + Required: false, + }, + "serial_number": { + Type: framework.TypeString, + Description: `Serial Number`, + Required: true, + }, + "expiration": { + Type: framework.TypeInt64, + Description: `Time of expiration`, + Required: true, + }, + "private_key": { + Type: framework.TypeString, + Description: `Private key`, + Required: false, + }, + "private_key_type": { + Type: framework.TypeString, + Description: `Private key type`, + Required: false, + }, + }, + }}, + }, + }, + }, + + HelpSynopsis: pathIssuerSignVerbatimHelpSyn, + HelpDescription: pathIssuerSignVerbatimHelpDesc, + } + + return ret +} + +const ( + pathIssuerSignVerbatimHelpSyn = `Issue a certificate directly based on the provided CSR.` + pathIssuerSignVerbatimHelpDesc = ` +This API endpoint allows for directly signing the specified certificate +signing request (CSR) without the typical role-based validation. This +allows for attributes from the CSR to be directly copied to the resulting +certificate. + +Usually the role-based sign operations (/sign and /issue) are preferred to +this operation. + +Note that this is a very privileged operation and should be extremely +restricted in terms of who is allowed to use it. All values will be taken +directly from the incoming CSR. No further verification of attribute are +performed, except as permitted by this endpoint's parameters. + +See the API documentation for more information about required parameters. +` +) + +// pathIssue issues a certificate and private key from given parameters, +// subject to role restrictions +func (b *backend) pathIssue(ctx context.Context, req *logical.Request, data *framework.FieldData, role *roleEntry) (*logical.Response, error) { + if role.KeyType == "any" { + return logical.ErrorResponse("role key type \"any\" not allowed for issuing certificates, only signing"), nil + } + + return b.pathIssueSignCert(ctx, req, data, role, false, false) +} + +// pathSign issues a certificate from a submitted CSR, subject to role +// restrictions +func (b *backend) pathSign(ctx context.Context, req *logical.Request, data *framework.FieldData, role *roleEntry) (*logical.Response, error) { + return b.pathIssueSignCert(ctx, req, data, role, true, false) +} + +// pathSignVerbatim issues a certificate from a submitted CSR, *not* subject to +// role restrictions +func (b *backend) pathSignVerbatim(ctx context.Context, req *logical.Request, data *framework.FieldData, role *roleEntry) (*logical.Response, error) { + entry := buildSignVerbatimRole(data, role) + + return b.pathIssueSignCert(ctx, req, data, entry, true, true) +} + +func (b *backend) pathIssueSignCert(ctx context.Context, req *logical.Request, data *framework.FieldData, role *roleEntry, useCSR, useCSRValues bool) (*logical.Response, error) { + // If storing the certificate and on a performance standby, forward this request on to the primary + // Allow performance secondaries to generate and store certificates locally to them. + if !role.NoStore && b.System().ReplicationState().HasState(consts.ReplicationPerformanceStandby) { + return nil, logical.ErrReadOnly + } + + // We prefer the issuer from the role in two cases: + // + // 1. On the legacy sign-verbatim paths, as we always provision an issuer + // in both the role and role-less cases, and + // 2. On the legacy sign/:role or issue/:role paths, as the issuer was + // set on the role directly (either via upgrade or not). Note that + // the updated issuer/:ref/{sign,issue}/:role path is not affected, + // and we instead pull the issuer out of the path instead (which + // allows users with access to those paths to manually choose their + // issuer in desired scenarios). + var issuerName string + if strings.HasPrefix(req.Path, "sign-verbatim/") || strings.HasPrefix(req.Path, "sign/") || strings.HasPrefix(req.Path, "issue/") { + issuerName = role.Issuer + if len(issuerName) == 0 { + issuerName = defaultRef + } + } else { + // Otherwise, we must have a newer API which requires an issuer + // reference. Fetch it in this case + issuerName = getIssuerRef(data) + if len(issuerName) == 0 { + return logical.ErrorResponse("missing issuer reference"), nil + } + } + + format := getFormat(data) + if format == "" { + return logical.ErrorResponse( + `the "format" path parameter must be "pem", "der", or "pem_bundle"`), nil + } + + var caErr error + sc := b.makeStorageContext(ctx, req.Storage) + signingBundle, caErr := sc.fetchCAInfo(issuerName, IssuanceUsage) + if caErr != nil { + switch caErr.(type) { + case errutil.UserError: + return nil, errutil.UserError{Err: fmt.Sprintf( + "could not fetch the CA certificate (was one set?): %s", caErr)} + default: + return nil, errutil.InternalError{Err: fmt.Sprintf( + "error fetching CA certificate: %s", caErr)} + } + } + + input := &inputBundle{ + req: req, + apiData: data, + role: role, + } + var parsedBundle *certutil.ParsedCertBundle + var err error + var warnings []string + if useCSR { + parsedBundle, warnings, err = signCert(b, input, signingBundle, false, useCSRValues) + } else { + parsedBundle, warnings, err = generateCert(sc, input, signingBundle, false, rand.Reader) + } + if err != nil { + switch err.(type) { + case errutil.UserError: + return logical.ErrorResponse(err.Error()), nil + case errutil.InternalError: + return nil, err + default: + return nil, fmt.Errorf("error signing/generating certificate: %w", err) + } + } + + signingCB, err := signingBundle.ToCertBundle() + if err != nil { + return nil, fmt.Errorf("error converting raw signing bundle to cert bundle: %w", err) + } + + cb, err := parsedBundle.ToCertBundle() + if err != nil { + return nil, fmt.Errorf("error converting raw cert bundle to cert bundle: %w", err) + } + + caChainGen := newCaChainOutput(parsedBundle, data) + + respData := map[string]interface{}{ + "expiration": int64(parsedBundle.Certificate.NotAfter.Unix()), + "serial_number": cb.SerialNumber, + } + + switch format { + case "pem": + respData["issuing_ca"] = signingCB.Certificate + respData["certificate"] = cb.Certificate + if caChainGen.containsChain() { + respData["ca_chain"] = caChainGen.pemEncodedChain() + } + if !useCSR { + respData["private_key"] = cb.PrivateKey + respData["private_key_type"] = cb.PrivateKeyType + } + + case "pem_bundle": + respData["issuing_ca"] = signingCB.Certificate + respData["certificate"] = cb.ToPEMBundle() + if caChainGen.containsChain() { + respData["ca_chain"] = caChainGen.pemEncodedChain() + } + if !useCSR { + respData["private_key"] = cb.PrivateKey + respData["private_key_type"] = cb.PrivateKeyType + } + + case "der": + respData["certificate"] = base64.StdEncoding.EncodeToString(parsedBundle.CertificateBytes) + respData["issuing_ca"] = base64.StdEncoding.EncodeToString(signingBundle.CertificateBytes) + + if caChainGen.containsChain() { + respData["ca_chain"] = caChainGen.derEncodedChain() + } + + if !useCSR { + respData["private_key"] = base64.StdEncoding.EncodeToString(parsedBundle.PrivateKeyBytes) + respData["private_key_type"] = cb.PrivateKeyType + } + default: + return nil, fmt.Errorf("unsupported format: %s", format) + } + + var resp *logical.Response + switch { + case role.GenerateLease == nil: + return nil, fmt.Errorf("generate lease in role is nil") + case !*role.GenerateLease: + // If lease generation is disabled do not populate `Secret` field in + // the response + resp = &logical.Response{ + Data: respData, + } + default: + resp = b.Secret(SecretCertsType).Response( + respData, + map[string]interface{}{ + "serial_number": cb.SerialNumber, + }) + resp.Secret.TTL = parsedBundle.Certificate.NotAfter.Sub(time.Now()) + } + + if data.Get("private_key_format").(string) == "pkcs8" { + err = convertRespToPKCS8(resp) + if err != nil { + return nil, err + } + } + + if !role.NoStore { + key := "certs/" + normalizeSerial(cb.SerialNumber) + certsCounted := b.certsCounted.Load() + err = req.Storage.Put(ctx, &logical.StorageEntry{ + Key: key, + Value: parsedBundle.CertificateBytes, + }) + if err != nil { + return nil, fmt.Errorf("unable to store certificate locally: %w", err) + } + b.ifCountEnabledIncrementTotalCertificatesCount(certsCounted, key) + } + + if useCSR { + if role.UseCSRCommonName && data.Get("common_name").(string) != "" { + resp.AddWarning("the common_name field was provided but the role is set with \"use_csr_common_name\" set to true") + } + if role.UseCSRSANs && data.Get("alt_names").(string) != "" { + resp.AddWarning("the alt_names field was provided but the role is set with \"use_csr_sans\" set to true") + } + } + + resp = addWarnings(resp, warnings) + + return resp, nil +} + +type caChainOutput struct { + chain []*certutil.CertBlock +} + +func newCaChainOutput(parsedBundle *certutil.ParsedCertBundle, data *framework.FieldData) caChainOutput { + if filterCaChain := data.Get("remove_roots_from_chain").(bool); filterCaChain { + var myChain []*certutil.CertBlock + for _, certBlock := range parsedBundle.CAChain { + cert := certBlock.Certificate + + if (len(cert.AuthorityKeyId) > 0 && !bytes.Equal(cert.AuthorityKeyId, cert.SubjectKeyId)) || + (len(cert.AuthorityKeyId) == 0 && (!bytes.Equal(cert.RawIssuer, cert.RawSubject) || cert.CheckSignatureFrom(cert) != nil)) { + // We aren't self-signed so add it to the list. + myChain = append(myChain, certBlock) + } + } + return caChainOutput{chain: myChain} + } + + return caChainOutput{chain: parsedBundle.CAChain} +} + +func (cac *caChainOutput) containsChain() bool { + return len(cac.chain) > 0 +} + +func (cac *caChainOutput) pemEncodedChain() []string { + var chain []string + for _, cert := range cac.chain { + block := pem.Block{Type: "CERTIFICATE", Bytes: cert.Bytes} + certificate := strings.TrimSpace(string(pem.EncodeToMemory(&block))) + chain = append(chain, certificate) + } + return chain +} + +func (cac *caChainOutput) derEncodedChain() []string { + var derCaChain []string + for _, caCert := range cac.chain { + derCaChain = append(derCaChain, base64.StdEncoding.EncodeToString(caCert.Bytes)) + } + return derCaChain +} + +const pathIssueHelpSyn = ` +Request a certificate using a certain role with the provided details. +` + +const pathIssueHelpDesc = ` +This path allows requesting a certificate to be issued according to the +policy of the given role. The certificate will only be issued if the +requested details are allowed by the role policy. + +This path returns a certificate and a private key. If you want a workflow +that does not expose a private key, generate a CSR locally and use the +sign path instead. +` + +const pathSignHelpSyn = ` +Request certificates using a certain role with the provided details. +` + +const pathSignHelpDesc = ` +This path allows requesting certificates to be issued according to the +policy of the given role. The certificate will only be issued if the +requested common name is allowed by the role policy. + +This path requires a CSR; if you want Vault to generate a private key +for you, use the issue path instead. +` diff --git a/builtin/logical/pki/path_manage_issuers.go b/builtin/logical/pki/path_manage_issuers.go new file mode 100644 index 0000000..2b6e108 --- /dev/null +++ b/builtin/logical/pki/path_manage_issuers.go @@ -0,0 +1,827 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "bytes" + "context" + "crypto/x509" + "encoding/pem" + "fmt" + "net/http" + "strings" + "time" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathIssuerGenerateRoot(b *backend) *framework.Path { + pattern := "issuers/generate/root/" + framework.GenericNameRegex("exported") + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIIssuers, + OperationVerb: "generate", + OperationSuffix: "root", + } + + return buildPathGenerateRoot(b, pattern, displayAttrs) +} + +func pathRotateRoot(b *backend) *framework.Path { + pattern := "root/rotate/" + framework.GenericNameRegex("exported") + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "rotate", + OperationSuffix: "root", + } + + return buildPathGenerateRoot(b, pattern, displayAttrs) +} + +func buildPathGenerateRoot(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { + ret := &framework.Path{ + Pattern: pattern, + DisplayAttrs: displayAttrs, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathCAGenerateRoot, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "expiration": { + Type: framework.TypeInt64, + Description: `The expiration of the given issuer.`, + Required: true, + }, + "serial_number": { + Type: framework.TypeString, + Description: `The requested Subject's named serial number.`, + Required: true, + }, + "certificate": { + Type: framework.TypeString, + Description: `The generated self-signed CA certificate.`, + Required: true, + }, + "issuing_ca": { + Type: framework.TypeString, + Description: `The issuing certificate authority.`, + Required: true, + }, + "issuer_id": { + Type: framework.TypeString, + Description: `The ID of the issuer`, + Required: true, + }, + "issuer_name": { + Type: framework.TypeString, + Description: `The name of the issuer.`, + Required: true, + }, + "key_id": { + Type: framework.TypeString, + Description: `The ID of the key.`, + Required: true, + }, + "key_name": { + Type: framework.TypeString, + Description: `The key name if given.`, + Required: true, + }, + "private_key": { + Type: framework.TypeString, + Description: `The private key if exported was specified.`, + Required: false, + }, + }, + }}, + }, + // Read more about why these flags are set in backend.go + ForwardPerformanceStandby: true, + ForwardPerformanceSecondary: true, + }, + }, + + HelpSynopsis: pathGenerateRootHelpSyn, + HelpDescription: pathGenerateRootHelpDesc, + } + + ret.Fields = addCACommonFields(map[string]*framework.FieldSchema{}) + ret.Fields = addCAKeyGenerationFields(ret.Fields) + ret.Fields = addCAIssueFields(ret.Fields) + return ret +} + +func pathIssuerGenerateIntermediate(b *backend) *framework.Path { + pattern := "issuers/generate/intermediate/" + framework.GenericNameRegex("exported") + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIIssuers, + OperationVerb: "generate", + OperationSuffix: "intermediate", + } + + return buildPathGenerateIntermediate(b, pattern, displayAttrs) +} + +func pathCrossSignIntermediate(b *backend) *framework.Path { + pattern := "intermediate/cross-sign" + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "cross-sign", + OperationSuffix: "intermediate", + } + + return buildPathGenerateIntermediate(b, pattern, displayAttrs) +} + +func buildPathGenerateIntermediate(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { + ret := &framework.Path{ + Pattern: pattern, + DisplayAttrs: displayAttrs, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathGenerateIntermediate, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "csr": { + Type: framework.TypeString, + Description: `Certificate signing request.`, + Required: true, + }, + "key_id": { + Type: framework.TypeString, + Description: `Id of the key.`, + Required: true, + }, + "private_key": { + Type: framework.TypeString, + Description: `Generated private key.`, + Required: false, + }, + "private_key_type": { + Type: framework.TypeString, + Description: `Specifies the format used for marshaling the private key.`, + Required: false, + }, + }, + }}, + }, + // Read more about why these flags are set in backend.go + ForwardPerformanceStandby: true, + ForwardPerformanceSecondary: true, + }, + }, + + HelpSynopsis: pathGenerateIntermediateHelpSyn, + HelpDescription: pathGenerateIntermediateHelpDesc, + } + + ret.Fields = addCACommonFields(map[string]*framework.FieldSchema{}) + ret.Fields = addCAKeyGenerationFields(ret.Fields) + ret.Fields["add_basic_constraints"] = &framework.FieldSchema{ + Type: framework.TypeBool, + Description: `Whether to add a Basic Constraints +extension with CA: true. Only needed as a +workaround in some compatibility scenarios +with Active Directory Certificate Services.`, + } + + // At this time Go does not support signing CSRs using PSS signatures, see + // https://github.com/golang/go/issues/45990 + delete(ret.Fields, "use_pss") + + return ret +} + +func pathImportIssuer(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "issuers/import/(cert|bundle)", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIIssuers, + OperationVerb: "import", + OperationSuffix: "cert|bundle", + }, + + Fields: map[string]*framework.FieldSchema{ + "pem_bundle": { + Type: framework.TypeString, + Description: `PEM-format, concatenated unencrypted +secret-key (optional) and certificates.`, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathImportIssuers, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "mapping": { + Type: framework.TypeMap, + Description: "A mapping of issuer_id to key_id for all issuers included in this request", + Required: true, + }, + "imported_keys": { + Type: framework.TypeCommaStringSlice, + Description: "Net-new keys imported as a part of this request", + Required: true, + }, + "imported_issuers": { + Type: framework.TypeCommaStringSlice, + Description: "Net-new issuers imported as a part of this request", + Required: true, + }, + "existing_keys": { + Type: framework.TypeCommaStringSlice, + Description: "Existing keys specified as part of the import bundle of this request", + Required: true, + }, + "existing_issuers": { + Type: framework.TypeCommaStringSlice, + Description: "Existing issuers specified as part of the import bundle of this request", + Required: true, + }, + }, + }}, + }, + // Read more about why these flags are set in backend.go + ForwardPerformanceStandby: true, + ForwardPerformanceSecondary: true, + }, + }, + + HelpSynopsis: pathImportIssuersHelpSyn, + HelpDescription: pathImportIssuersHelpDesc, + } +} + +func (b *backend) pathImportIssuers(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // Since we're planning on updating issuers here, grab the lock so we've + // got a consistent view. + b.issuersLock.Lock() + defer b.issuersLock.Unlock() + + keysAllowed := strings.HasSuffix(req.Path, "bundle") || req.Path == "config/ca" + + if b.useLegacyBundleCaStorage() { + return logical.ErrorResponse("Can not import issuers until migration has completed"), nil + } + + var pemBundle string + var certificate string + rawPemBundle, bundleOk := data.GetOk("pem_bundle") + rawCertificate, certOk := data.GetOk("certificate") + if bundleOk { + pemBundle = rawPemBundle.(string) + } + if certOk { + certificate = rawCertificate.(string) + } + + if len(pemBundle) == 0 && len(certificate) == 0 { + return logical.ErrorResponse("'pem_bundle' and 'certificate' parameters were empty"), nil + } + if len(pemBundle) > 0 && len(certificate) > 0 { + return logical.ErrorResponse("'pem_bundle' and 'certificate' parameters were both provided"), nil + } + if len(certificate) > 0 { + keysAllowed = false + pemBundle = certificate + } + if len(pemBundle) < 75 { + // It is almost nearly impossible to store a complete certificate in + // less than 75 bytes. It is definitely impossible to do so when PEM + // encoding has been applied. Detect this and give a better warning + // than "provided PEM block contained no data" in this case. This is + // because the PEM headers contain 5*4 + 6 + 4 + 2 + 2 = 34 characters + // minimum (five dashes, "BEGIN" + space + at least one character + // identifier, "END" + space + at least one character identifier, and + // a pair of new lines). That would leave 41 bytes for Base64 data, + // meaning at most a 30-byte DER certificate. + // + // However, < 75 bytes is probably a good length for a file path so + // suggest that is the case. + return logical.ErrorResponse("provided data for import was too short; perhaps a path was passed to the API rather than the contents of a PEM file"), nil + } + + var createdKeys []string + var createdIssuers []string + var existingKeys []string + var existingIssuers []string + issuerKeyMap := make(map[string]string) + + // Rather than using certutil.ParsePEMBundle (which restricts the + // construction of the PEM bundle), we manually parse the bundle instead. + pemBytes := []byte(pemBundle) + var pemBlock *pem.Block + + var issuers []string + var keys []string + + // By decoding and re-encoding PEM blobs, we can pass strict PEM blobs + // to the import functionality (importKeys, importIssuers). This allows + // them to validate no duplicate issuers exist (and place greater + // restrictions during parsing) but allows this code to accept OpenSSL + // parsed chains (with full textual output between PEM entries). + for len(bytes.TrimSpace(pemBytes)) > 0 { + pemBlock, pemBytes = pem.Decode(pemBytes) + if pemBlock == nil { + return logical.ErrorResponse("provided PEM block contained no data"), nil + } + + pemBlockString := string(pem.EncodeToMemory(pemBlock)) + + switch pemBlock.Type { + case "CERTIFICATE", "X509 CERTIFICATE": + // Must be a certificate + issuers = append(issuers, pemBlockString) + case "CRL", "X509 CRL": + // Ignore any CRL entries. + case "EC PARAMS", "EC PARAMETERS": + // Ignore any EC parameter entries. This is an optional block + // that some implementations send, to ensure some semblance of + // compatibility with weird curves. Go doesn't support custom + // curves and 99% of software doesn't either, so discard them + // without parsing them. + default: + // Otherwise, treat them as keys. + keys = append(keys, pemBlockString) + } + } + + if len(keys) > 0 && !keysAllowed { + return logical.ErrorResponse("private keys found in the PEM bundle but not allowed by the path; use /issuers/import/bundle"), nil + } + + sc := b.makeStorageContext(ctx, req.Storage) + + for keyIndex, keyPem := range keys { + // Handle import of private key. + key, existing, err := importKeyFromBytes(sc, keyPem, "") + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("Error parsing key %v: %v", keyIndex, err)), nil + } + + if !existing { + createdKeys = append(createdKeys, key.ID.String()) + } else { + existingKeys = append(existingKeys, key.ID.String()) + } + } + + for certIndex, certPem := range issuers { + cert, existing, err := sc.importIssuer(certPem, "") + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("Error parsing issuer %v: %v\n%v", certIndex, err, certPem)), nil + } + + issuerKeyMap[cert.ID.String()] = cert.KeyID.String() + if !existing { + createdIssuers = append(createdIssuers, cert.ID.String()) + } else { + existingIssuers = append(existingIssuers, cert.ID.String()) + } + } + + response := &logical.Response{ + Data: map[string]interface{}{ + "mapping": issuerKeyMap, + "imported_keys": createdKeys, + "imported_issuers": createdIssuers, + "existing_keys": existingKeys, + "existing_issuers": existingIssuers, + }, + } + + if len(createdIssuers) > 0 { + warnings, err := b.crlBuilder.rebuild(sc, true) + if err != nil { + // Before returning, check if the error message includes the + // string "PSS". If so, it indicates we might've wanted to modify + // this issuer, so convert the error to a warning. + if strings.Contains(err.Error(), "PSS") || strings.Contains(err.Error(), "pss") { + err = fmt.Errorf("Rebuilding the CRL failed with a message relating to the PSS signature algorithm. This likely means the revocation_signature_algorithm needs to be set on the newly imported issuer(s) because a managed key supports only the PSS algorithm; by default PKCS#1v1.5 was used to build the CRLs. CRLs will not be generated until this has been addressed, however the import was successful. The original error is reproduced below:\n\n\t%w", err) + } else { + // Note to the caller that while this is an error, we did + // successfully import the issuers. + err = fmt.Errorf("Rebuilding the CRL failed. While this is indicative of a problem with the imported issuers (perhaps because of their revocation_signature_algorithm), they did import successfully and are now usable. It is strongly suggested to fix the CRL building errors before continuing. The original error is reproduced below:\n\n\t%w", err) + } + + return nil, err + } + for index, warning := range warnings { + response.AddWarning(fmt.Sprintf("Warning %d during CRL rebuild: %v", index+1, warning)) + } + + var issuersWithKeys []string + for _, issuer := range createdIssuers { + if issuerKeyMap[issuer] != "" { + issuersWithKeys = append(issuersWithKeys, issuer) + } + } + + // Check whether we need to update our default issuer configuration. + config, err := sc.getIssuersConfig() + if err != nil { + response.AddWarning("Unable to fetch default issuers configuration to update default issuer if necessary: " + err.Error()) + } else if config.DefaultFollowsLatestIssuer { + if len(issuersWithKeys) == 1 { + if err := sc.updateDefaultIssuerId(issuerID(issuersWithKeys[0])); err != nil { + response.AddWarning("Unable to update this new root as the default issuer: " + err.Error()) + } + } else if len(issuersWithKeys) > 1 { + response.AddWarning("Default issuer left unchanged: could not select new issuer automatically as multiple imported issuers had key material in Vault.") + } + } + } + + // While we're here, check if we should warn about a bad default key. We + // do this unconditionally if the issuer or key was modified, so the admin + // is always warned. But if unrelated key material was imported, we do + // not warn. + config, err := sc.getIssuersConfig() + if err == nil && len(config.DefaultIssuerId) > 0 { + // We can use the mapping above to check the issuer mapping. + if keyId, ok := issuerKeyMap[string(config.DefaultIssuerId)]; ok && len(keyId) == 0 { + msg := "The default issuer has no key associated with it. Some operations like issuing certificates and signing CRLs will be unavailable with the requested default issuer until a key is imported or the default issuer is changed." + response.AddWarning(msg) + b.Logger().Error(msg) + } + + // If we imported multiple issuers with keys (or matched existing + // keys), and we set one of those as a default, warn the end-user we + // might have selected the wrong one. + if len(createdIssuers) > 1 { + numCreatedIssuersWithKeys := 0 + defaultIssuerWasCreated := false + for _, issuerId := range createdIssuers { + if keyId, ok := issuerKeyMap[issuerId]; ok && len(keyId) != 0 { + numCreatedIssuersWithKeys++ + } + + if config.DefaultIssuerId.String() == issuerId { + defaultIssuerWasCreated = true + } + } + + if numCreatedIssuersWithKeys > 1 && defaultIssuerWasCreated { + msg := "The imported bundle contained multiple certs matching keys, " + + "the default issuer that was selected should be verified and manually changed if incorrect." + response.AddWarning(msg) + b.Logger().Error(msg) + } + } + } + + // Also while we're here, we should let the user know the next steps. + // In particular, if there's no default AIA URLs configuration, we should + // tell the user that's probably next. + if entries, err := getGlobalAIAURLs(ctx, req.Storage); err == nil && len(entries.IssuingCertificates) == 0 && len(entries.CRLDistributionPoints) == 0 && len(entries.OCSPServers) == 0 { + response.AddWarning("This mount hasn't configured any authority information access (AIA) fields; this may make it harder for systems to find missing certificates in the chain or to validate revocation status of certificates. Consider updating /config/urls or the newly generated issuer with this information.") + } + + return response, nil +} + +const ( + pathImportIssuersHelpSyn = `Import the specified issuing certificates.` + pathImportIssuersHelpDesc = ` +This endpoint allows importing the specified issuer certificates. + +:type is either the literal value "cert", to only allow importing +certificates, else "bundle" to allow importing keys as well as +certificates. + +Depending on the value of :type, the pem_bundle request parameter can +either take PEM-formatted certificates, and, if :type="bundle", unencrypted +secret-keys. +` +) + +func pathRevokeIssuer(b *backend) *framework.Path { + fields := addIssuerRefField(map[string]*framework.FieldSchema{}) + + return &framework.Path{ + Pattern: "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/revoke", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "revoke", + OperationSuffix: "issuer", + }, + + Fields: fields, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathRevokeIssuer, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "issuer_id": { + Type: framework.TypeString, + Description: `ID of the issuer`, + Required: true, + }, + "issuer_name": { + Type: framework.TypeString, + Description: `Name of the issuer`, + Required: true, + }, + "key_id": { + Type: framework.TypeString, + Description: `ID of the Key`, + Required: true, + }, + "certificate": { + Type: framework.TypeString, + Description: `Certificate`, + Required: true, + }, + "manual_chain": { + Type: framework.TypeCommaStringSlice, + Description: `Manual Chain`, + Required: true, + }, + "ca_chain": { + Type: framework.TypeCommaStringSlice, + Description: `Certificate Authority Chain`, + Required: true, + }, + "leaf_not_after_behavior": { + Type: framework.TypeString, + Description: ``, + Required: true, + }, + "usage": { + Type: framework.TypeString, + Description: `Allowed usage`, + Required: true, + }, + "revocation_signature_algorithm": { + Type: framework.TypeString, + Description: `Which signature algorithm to use when building CRLs`, + Required: true, + }, + "revoked": { + Type: framework.TypeBool, + Description: `Whether the issuer was revoked`, + Required: true, + }, + "issuing_certificates": { + Type: framework.TypeCommaStringSlice, + Description: `Specifies the URL values for the Issuing Certificate field`, + Required: true, + }, + "crl_distribution_points": { + Type: framework.TypeStringSlice, + Description: `Specifies the URL values for the CRL Distribution Points field`, + Required: true, + }, + "ocsp_servers": { + Type: framework.TypeStringSlice, + Description: `Specifies the URL values for the OCSP Servers field`, + Required: true, + }, + "revocation_time": { + Type: framework.TypeInt64, + Description: `Time of revocation`, + Required: false, + }, + "revocation_time_rfc3339": { + Type: framework.TypeTime, + Description: `RFC formatted time of revocation`, + Required: false, + }, + }, + }}, + }, + // Read more about why these flags are set in backend.go + ForwardPerformanceStandby: true, + ForwardPerformanceSecondary: true, + }, + }, + + HelpSynopsis: pathRevokeIssuerHelpSyn, + HelpDescription: pathRevokeIssuerHelpDesc, + } +} + +func (b *backend) pathRevokeIssuer(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // Since we're planning on updating issuers here, grab the lock so we've + // got a consistent view. + b.issuersLock.Lock() + defer b.issuersLock.Unlock() + + // Issuer revocation can't work on the legacy cert bundle. + if b.useLegacyBundleCaStorage() { + return logical.ErrorResponse("cannot revoke issuer until migration has completed"), nil + } + + issuerName := getIssuerRef(data) + if len(issuerName) == 0 { + return logical.ErrorResponse("missing issuer reference"), nil + } + + // Fetch the issuer. + sc := b.makeStorageContext(ctx, req.Storage) + ref, err := sc.resolveIssuerReference(issuerName) + if err != nil { + return nil, err + } + if ref == "" { + return logical.ErrorResponse("unable to resolve issuer id for reference: " + issuerName), nil + } + + issuer, err := sc.fetchIssuerById(ref) + if err != nil { + return nil, err + } + + // If its already been revoked, just return the read results sans warnings + // like we would otherwise. + if issuer.Revoked { + return respondReadIssuer(issuer) + } + + // When revoking, we want to forbid new certificate issuance. We allow + // new revocations of leaves issued by this issuer to trigger a CRL + // rebuild still. + issuer.Revoked = true + if issuer.Usage.HasUsage(IssuanceUsage) { + issuer.Usage.ToggleUsage(IssuanceUsage) + } + + currTime := time.Now() + issuer.RevocationTime = currTime.Unix() + issuer.RevocationTimeUTC = currTime.UTC() + + err = sc.writeIssuer(issuer) + if err != nil { + return nil, err + } + + // Now, if the parent issuer exists within this mount, we'd have written + // a storage entry for this certificate, making it appear as any other + // leaf. We need to add a revocationInfo entry for this into storage, + // so that it appears as if it was revoked. + // + // This is a _necessary_ but not necessarily _sufficient_ step to + // consider an arbitrary issuer revoked and the former step (setting + // issuer.Revoked = true) is more correct: if two intermediates have the + // same serial number, and one appears somehow in the storage but from a + // different issuer, we'd only include one in the CRLs, but we'd want to + // include both in two separate CRLs. Hence, the former is the condition + // we check in CRL building, but this step satisfies other guarantees + // within Vault. + certEntry, err := fetchCertBySerial(sc, "certs/", issuer.SerialNumber) + if err == nil && certEntry != nil { + // We've inverted this error check as it doesn't matter; we already + // consider this certificate revoked. + storageCert, err := x509.ParseCertificate(certEntry.Value) + if err != nil { + return nil, fmt.Errorf("error parsing stored certificate value: %w", err) + } + + issuerCert, err := issuer.GetCertificate() + if err != nil { + return nil, fmt.Errorf("error parsing issuer certificate value: %w", err) + } + + if bytes.Equal(issuerCert.Raw, storageCert.Raw) { + // If the issuer is on disk at its serial number is the same as + // our issuer, we know we can write the revocation entry. Since + // Vault has historically forbid revocation of non-stored certs + // and issuers, we're the only ones to write this entry, so we + // don't need the write guard that exists in crl_util.go for the + // general case (forbidding a newer revocation time). + // + // We'll let a cleanup pass or CRL build identify the issuer for + // us. + revInfo := revocationInfo{ + CertificateBytes: issuerCert.Raw, + RevocationTime: issuer.RevocationTime, + RevocationTimeUTC: issuer.RevocationTimeUTC, + } + + revEntry, err := logical.StorageEntryJSON(revokedPath+normalizeSerial(issuer.SerialNumber), revInfo) + if err != nil { + return nil, fmt.Errorf("error creating revocation entry for issuer: %w", err) + } + + err = req.Storage.Put(ctx, revEntry) + if err != nil { + return nil, fmt.Errorf("error saving revoked issuer to new location: %w", err) + } + } + } + + // Rebuild the CRL to include the newly revoked issuer. + warnings, crlErr := b.crlBuilder.rebuild(sc, false) + if crlErr != nil { + switch crlErr.(type) { + case errutil.UserError: + return logical.ErrorResponse(fmt.Sprintf("Error during CRL building: %s", crlErr)), nil + default: + return nil, fmt.Errorf("error encountered during CRL building: %w", crlErr) + } + } + + // Finally, respond with the issuer's updated data. + response, err := respondReadIssuer(issuer) + if err != nil { + // Impossible. + return nil, err + } + for index, warning := range warnings { + response.AddWarning(fmt.Sprintf("Warning %d during CRL rebuild: %v", index+1, warning)) + } + + // For sanity, we'll add a warning message here if there's no other + // issuer which verifies this issuer. + ourCert, err := issuer.GetCertificate() + if err != nil { + return nil, err + } + + allIssuers, err := sc.listIssuers() + if err != nil { + return nil, err + } + + isSelfSigned := false + haveOtherIssuer := false + for _, candidateID := range allIssuers { + candidate, err := sc.fetchIssuerById(candidateID) + if err != nil { + return nil, err + } + + candidateCert, err := candidate.GetCertificate() + if err != nil { + // Returning this error is fine because more things will fail + // if this issuer can't parse. + return nil, err + } + + if err := ourCert.CheckSignatureFrom(candidateCert); err == nil { + // Signature verification is a success. This means we have a + // parent for this cert. But notice above we didn't filter out + // ourselves: we want to see if this is a self-signed cert. So + // check that now. + if candidate.ID == issuer.ID { + isSelfSigned = true + } else { + haveOtherIssuer = true + } + } + + // If we have both possible warning candidates, no sense continuing + // to check signatures; exit. + if isSelfSigned && haveOtherIssuer { + break + } + } + + if isSelfSigned { + response.AddWarning("This issuer is a self-signed (potentially root) certificate. This means it may not be considered revoked if there is not an external, cross-signed variant of this certificate. This issuer's serial number will not appear on its own CRL.") + } + + if !haveOtherIssuer { + response.AddWarning("This issuer lacks another parent issuer within the mount. This means it will not appear on any other CRLs and may not be considered revoked by clients. Consider adding this issuer to its issuer's CRL as well if it is not self-signed.") + } + + config, err := sc.getIssuersConfig() + if err == nil && config != nil && config.DefaultIssuerId == issuer.ID { + response.AddWarning("This issuer is currently configured as the default issuer for this mount; operations such as certificate issuance may not work until a new default issuer is selected.") + } + + return response, nil +} + +const ( + pathRevokeIssuerHelpSyn = `Revoke the specified issuer certificate.` + pathRevokeIssuerHelpDesc = ` +This endpoint allows revoking the specified issuer certificates. + +This is useful when the issuer and its parent exist within the same PKI +mount point (utilizing the multi-issuer functionality). If no suitable +parent is found, this revocation may not appear on any CRL in this mount. + +Once revoked, issuers cannot be unrevoked and may not be used to sign any +more certificates. +` +) diff --git a/builtin/logical/pki/path_manage_keys.go b/builtin/logical/pki/path_manage_keys.go new file mode 100644 index 0000000..3c10c32 --- /dev/null +++ b/builtin/logical/pki/path_manage_keys.go @@ -0,0 +1,320 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "bytes" + "context" + "encoding/pem" + "net/http" + "strings" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathGenerateKey(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "keys/generate/(internal|exported|kms)", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "generate", + OperationSuffix: "internal-key|exported-key|kms-key", + }, + + Fields: map[string]*framework.FieldSchema{ + keyNameParam: { + Type: framework.TypeString, + Description: "Optional name to be used for this key", + }, + keyTypeParam: { + Type: framework.TypeString, + Default: "rsa", + Description: `The type of key to use; defaults to RSA. "rsa" +"ec" and "ed25519" are the only valid values.`, + AllowedValues: []interface{}{"rsa", "ec", "ed25519"}, + DisplayAttrs: &framework.DisplayAttributes{ + Value: "rsa", + }, + }, + keyBitsParam: { + Type: framework.TypeInt, + Default: 0, + Description: `The number of bits to use. Allowed values are +0 (universal default); with rsa key_type: 2048 (default), 3072, or +4096; with ec key_type: 224, 256 (default), 384, or 521; ignored with +ed25519.`, + }, + "managed_key_name": { + Type: framework.TypeString, + Description: `The name of the managed key to use when the exported +type is kms. When kms type is the key type, this field or managed_key_id +is required. Ignored for other types.`, + }, + "managed_key_id": { + Type: framework.TypeString, + Description: `The name of the managed key to use when the exported +type is kms. When kms type is the key type, this field or managed_key_name +is required. Ignored for other types.`, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathGenerateKeyHandler, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "key_id": { + Type: framework.TypeString, + Description: `ID assigned to this key.`, + Required: true, + }, + "key_name": { + Type: framework.TypeString, + Description: `Name assigned to this key.`, + Required: true, + }, + "key_type": { + Type: framework.TypeString, + Description: `The type of key to use; defaults to RSA. "rsa" + "ec" and "ed25519" are the only valid values.`, + Required: true, + }, + "private_key": { + Type: framework.TypeString, + Description: `The private key string`, + Required: false, + }, + }, + }}, + }, + + ForwardPerformanceStandby: true, + ForwardPerformanceSecondary: true, + }, + }, + + HelpSynopsis: pathGenerateKeyHelpSyn, + HelpDescription: pathGenerateKeyHelpDesc, + } +} + +const ( + pathGenerateKeyHelpSyn = `Generate a new private key used for signing.` + pathGenerateKeyHelpDesc = `This endpoint will generate a new key pair of the specified type (internal, exported, or kms).` +) + +func (b *backend) pathGenerateKeyHandler(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // Since we're planning on updating issuers here, grab the lock so we've + // got a consistent view. + b.issuersLock.Lock() + defer b.issuersLock.Unlock() + + if b.useLegacyBundleCaStorage() { + return logical.ErrorResponse("Can not generate keys until migration has completed"), nil + } + + sc := b.makeStorageContext(ctx, req.Storage) + keyName, err := getKeyName(sc, data) + if err != nil { // Fail Immediately if Key Name is in Use, etc... + return logical.ErrorResponse(err.Error()), nil + } + + exportPrivateKey := false + var keyBundle certutil.KeyBundle + var actualPrivateKeyType certutil.PrivateKeyType + switch { + case strings.HasSuffix(req.Path, "/exported"): + exportPrivateKey = true + fallthrough + case strings.HasSuffix(req.Path, "/internal"): + keyType := data.Get(keyTypeParam).(string) + keyBits := data.Get(keyBitsParam).(int) + + keyBits, _, err := certutil.ValidateDefaultOrValueKeyTypeSignatureLength(keyType, keyBits, 0) + if err != nil { + return logical.ErrorResponse("Validation for key_type, key_bits failed: %s", err.Error()), nil + } + + // Internal key generation, stored in storage + keyBundle, err = certutil.CreateKeyBundle(keyType, keyBits, b.GetRandomReader()) + if err != nil { + return nil, err + } + + actualPrivateKeyType = keyBundle.PrivateKeyType + case strings.HasSuffix(req.Path, "/kms"): + keyId, err := getManagedKeyId(data) + if err != nil { + return nil, err + } + + keyBundle, actualPrivateKeyType, err = createKmsKeyBundle(ctx, b, keyId) + if err != nil { + return nil, err + } + default: + return logical.ErrorResponse("Unknown type of key to generate"), nil + } + + privateKeyPemString, err := keyBundle.ToPrivateKeyPemString() + if err != nil { + return nil, err + } + + key, _, err := sc.importKey(privateKeyPemString, keyName, keyBundle.PrivateKeyType) + if err != nil { + return nil, err + } + responseData := map[string]interface{}{ + keyIdParam: key.ID, + keyNameParam: key.Name, + keyTypeParam: string(actualPrivateKeyType), + } + if exportPrivateKey { + responseData["private_key"] = privateKeyPemString + } + return &logical.Response{ + Data: responseData, + }, nil +} + +func pathImportKey(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "keys/import", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "import", + OperationSuffix: "key", + }, + + Fields: map[string]*framework.FieldSchema{ + keyNameParam: { + Type: framework.TypeString, + Description: "Optional name to be used for this key", + }, + "pem_bundle": { + Type: framework.TypeString, + Description: `PEM-format, unencrypted secret key`, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathImportKeyHandler, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "key_id": { + Type: framework.TypeString, + Description: `ID assigned to this key.`, + Required: true, + }, + "key_name": { + Type: framework.TypeString, + Description: `Name assigned to this key.`, + Required: true, + }, + "key_type": { + Type: framework.TypeString, + Description: `The type of key to use; defaults to RSA. "rsa" + "ec" and "ed25519" are the only valid values.`, + Required: true, + }, + }, + }}, + }, + ForwardPerformanceStandby: true, + ForwardPerformanceSecondary: true, + }, + }, + + HelpSynopsis: pathImportKeyHelpSyn, + HelpDescription: pathImportKeyHelpDesc, + } +} + +const ( + pathImportKeyHelpSyn = `Import the specified key.` + pathImportKeyHelpDesc = `This endpoint allows importing a specified issuer key from a pem bundle. +If key_name is set, that will be set on the key, assuming the key did not exist previously.` +) + +func (b *backend) pathImportKeyHandler(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // Since we're planning on updating issuers here, grab the lock so we've + // got a consistent view. + b.issuersLock.Lock() + defer b.issuersLock.Unlock() + + if b.useLegacyBundleCaStorage() { + return logical.ErrorResponse("Cannot import keys until migration has completed"), nil + } + + sc := b.makeStorageContext(ctx, req.Storage) + pemBundle := data.Get("pem_bundle").(string) + keyName, err := getKeyName(sc, data) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + if len(pemBundle) < 64 { + // It is almost nearly impossible to store a complete key in + // less than 64 bytes. It is definitely impossible to do so when PEM + // encoding has been applied. Detect this and give a better warning + // than "provided PEM block contained no data" in this case. This is + // because the PEM headers contain 5*4 + 6 + 4 + 2 + 2 = 34 characters + // minimum (five dashes, "BEGIN" + space + at least one character + // identifier, "END" + space + at least one character identifier, and + // a pair of new lines). That would leave 30 bytes for Base64 data, + // meaning at most a 22-byte DER key. Even with a 128-bit key, 6 bytes + // is not sufficient for the required ASN.1 structure and OID encoding. + // + // However, < 64 bytes is probably a good length for a file path so + // suggest that is the case. + return logical.ErrorResponse("provided data for import was too short; perhaps a path was passed to the API rather than the contents of a PEM file"), nil + } + + pemBytes := []byte(pemBundle) + var pemBlock *pem.Block + + var keys []string + for len(bytes.TrimSpace(pemBytes)) > 0 { + pemBlock, pemBytes = pem.Decode(pemBytes) + if pemBlock == nil { + return logical.ErrorResponse("provided PEM block contained no data"), nil + } + + pemBlockString := string(pem.EncodeToMemory(pemBlock)) + keys = append(keys, pemBlockString) + } + + if len(keys) != 1 { + return logical.ErrorResponse("only a single key can be present within the pem_bundle for importing"), nil + } + + key, existed, err := importKeyFromBytes(sc, keys[0], keyName) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + resp := logical.Response{ + Data: map[string]interface{}{ + keyIdParam: key.ID, + keyNameParam: key.Name, + keyTypeParam: key.PrivateKeyType, + }, + } + + if existed { + resp.AddWarning("Key already imported, use key/ endpoint to update name.") + } + + return &resp, nil +} diff --git a/builtin/logical/pki/path_manage_keys_test.go b/builtin/logical/pki/path_manage_keys_test.go new file mode 100644 index 0000000..3c5708a --- /dev/null +++ b/builtin/logical/pki/path_manage_keys_test.go @@ -0,0 +1,441 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "context" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "encoding/pem" + "fmt" + "testing" + + "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" + + "github.com/hashicorp/vault/sdk/helper/certutil" + + "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/require" +) + +func TestPKI_PathManageKeys_GenerateInternalKeys(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + tests := []struct { + name string + keyType string + keyBits []int + wantLogicalErr bool + }{ + {"all-defaults", "", []int{0}, false}, + {"rsa", "rsa", []int{0, 2048, 3072, 4096}, false}, + {"ec", "ec", []int{0, 224, 256, 384, 521}, false}, + {"ed25519", "ed25519", []int{0}, false}, + {"error-rsa", "rsa", []int{-1, 343444}, true}, + {"error-ec", "ec", []int{-1, 3434324}, true}, + {"error-bad-type", "dskjfkdsfjdkf", []int{0}, true}, + } + for _, tt := range tests { + tt := tt + for _, keyBitParam := range tt.keyBits { + keyName := fmt.Sprintf("%s-%d", tt.name, keyBitParam) + t.Run(keyName, func(t *testing.T) { + data := make(map[string]interface{}) + if tt.keyType != "" { + data["key_type"] = tt.keyType + } + if keyBitParam != 0 { + data["key_bits"] = keyBitParam + } + keyName = genUuid() + "-" + tt.keyType + "-key-name" + data["key_name"] = keyName + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "keys/generate/internal", + Storage: s, + Data: data, + MountPoint: "pki/", + }) + require.NoError(t, err, + "Failed generating key with values key_type:%s key_bits:%d key_name:%s", tt.keyType, keyBitParam, keyName) + require.NotNil(t, resp, + "Got nil response generating key with values key_type:%s key_bits:%d key_name:%s", tt.keyType, keyBitParam, keyName) + if tt.wantLogicalErr { + require.True(t, resp.IsError(), "expected logical error but the request passed:\n%#v", resp) + } else { + require.False(t, resp.IsError(), + "Got logical error response when not expecting one, "+ + "generating key with values key_type:%s key_bits:%d key_name:%s\n%s", tt.keyType, keyBitParam, keyName, resp.Error()) + + // Special case our all-defaults + if tt.keyType == "" { + tt.keyType = "rsa" + } + + require.Equal(t, tt.keyType, resp.Data["key_type"], "key_type field contained an invalid type") + require.NotEmpty(t, resp.Data["key_id"], "returned an empty key_id field, should never happen") + require.Equal(t, keyName, resp.Data["key_name"], "key name was not processed correctly") + require.Nil(t, resp.Data["private_key"], "private_key field should not appear in internal generation type.") + } + }) + } + } +} + +func TestPKI_PathManageKeys_GenerateExportedKeys(t *testing.T) { + t.Parallel() + // We tested a lot of the logic above within the internal test, so just make sure we honor the exported contract + b, s := CreateBackendWithStorage(t) + + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "keys/generate/exported", + Storage: s, + Data: map[string]interface{}{ + "key_type": "ec", + "key_bits": 224, + }, + MountPoint: "pki/", + }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("keys/generate/exported"), logical.UpdateOperation), resp, true) + + require.NoError(t, err, "Failed generating exported key") + require.NotNil(t, resp, "Got nil response generating exported key") + require.Equal(t, "ec", resp.Data["key_type"], "key_type field contained an invalid type") + require.NotEmpty(t, resp.Data["key_id"], "returned an empty key_id field, should never happen") + require.Empty(t, resp.Data["key_name"], "key name should have been empty but was not") + require.NotEmpty(t, resp.Data["private_key"], "private_key field should not be empty in exported generation type.") + + // Make sure we can decode our private key as expected + keyData := resp.Data["private_key"].(string) + block, rest := pem.Decode([]byte(keyData)) + require.Empty(t, rest, "should not have had any trailing data") + require.NotEmpty(t, block, "failed decoding pem block") + + key, err := x509.ParseECPrivateKey(block.Bytes) + require.NoError(t, err, "failed parsing pem block as ec private key") + require.Equal(t, elliptic.P224(), key.Curve, "got unexpected curve value in returned private key") +} + +func TestPKI_PathManageKeys_ImportKeyBundle(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + bundle1, err := certutil.CreateKeyBundle("ec", 224, rand.Reader) + require.NoError(t, err, "failed generating an ec key bundle") + bundle2, err := certutil.CreateKeyBundle("rsa", 2048, rand.Reader) + require.NoError(t, err, "failed generating an rsa key bundle") + pem1, err := bundle1.ToPrivateKeyPemString() + require.NoError(t, err, "failed converting ec key to pem") + pem2, err := bundle2.ToPrivateKeyPemString() + require.NoError(t, err, "failed converting rsa key to pem") + + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "keys/import", + Storage: s, + Data: map[string]interface{}{ + "key_name": "my-ec-key", + "pem_bundle": pem1, + }, + MountPoint: "pki/", + }) + + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("keys/import"), logical.UpdateOperation), resp, true) + + require.NoError(t, err, "Failed importing ec key") + require.NotNil(t, resp, "Got nil response importing ec key") + require.False(t, resp.IsError(), "received an error response: %v", resp.Error()) + require.NotEmpty(t, resp.Data["key_id"], "key id for ec import response was empty") + require.Equal(t, "my-ec-key", resp.Data["key_name"], "key_name was incorrect for ec key") + require.Equal(t, certutil.ECPrivateKey, resp.Data["key_type"]) + keyId1 := resp.Data["key_id"].(keyID) + + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "keys/import", + Storage: s, + Data: map[string]interface{}{ + "key_name": "my-rsa-key", + "pem_bundle": pem2, + }, + MountPoint: "pki/", + }) + require.NoError(t, err, "Failed importing rsa key") + require.NotNil(t, resp, "Got nil response importing rsa key") + require.False(t, resp.IsError(), "received an error response: %v", resp.Error()) + require.NotEmpty(t, resp.Data["key_id"], "key id for rsa import response was empty") + require.Equal(t, "my-rsa-key", resp.Data["key_name"], "key_name was incorrect for ec key") + require.Equal(t, certutil.RSAPrivateKey, resp.Data["key_type"]) + keyId2 := resp.Data["key_id"].(keyID) + + require.NotEqual(t, keyId1, keyId2) + + // Attempt to reimport the same key with a different name. + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "keys/import", + Storage: s, + Data: map[string]interface{}{ + "key_name": "my-new-ec-key", + "pem_bundle": pem1, + }, + MountPoint: "pki/", + }) + require.NoError(t, err, "Failed importing the same ec key") + require.NotNil(t, resp, "Got nil response importing the same ec key") + require.False(t, resp.IsError(), "received an error response: %v", resp.Error()) + require.NotEmpty(t, resp.Data["key_id"], "key id for ec import response was empty") + // Note we should receive back the original name, not the new updated name. + require.Equal(t, "my-ec-key", resp.Data["key_name"], "key_name was incorrect for ec key") + require.Equal(t, certutil.ECPrivateKey, resp.Data["key_type"]) + keyIdReimport := resp.Data["key_id"] + require.Equal(t, keyId1, keyIdReimport, "the re-imported key did not return the same key id") + + // Make sure we can not reuse an existing name across different keys. + bundle3, err := certutil.CreateKeyBundle("ec", 224, rand.Reader) + require.NoError(t, err, "failed generating an ec key bundle") + pem3, err := bundle3.ToPrivateKeyPemString() + require.NoError(t, err, "failed converting rsa key to pem") + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "keys/import", + Storage: s, + Data: map[string]interface{}{ + "key_name": "my-ec-key", + "pem_bundle": pem3, + }, + MountPoint: "pki/", + }) + require.NoError(t, err, "Failed importing the same ec key") + require.NotNil(t, resp, "Got nil response importing the same ec key") + require.True(t, resp.IsError(), "should have received an error response importing a key with a re-used name") + + // Delete the key to make sure re-importing gets another ID + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.DeleteOperation, + Path: "key/" + keyId2.String(), + Storage: s, + MountPoint: "pki/", + }) + require.NoError(t, err, "failed deleting keyId 2") + require.Nil(t, resp, "Got non-nil response deleting the key: %#v", resp) + + // Deleting a non-existent key should be okay... + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.DeleteOperation, + Path: "key/" + keyId2.String(), + Storage: s, + MountPoint: "pki/", + }) + require.NoError(t, err, "failed deleting keyId 2") + require.Nil(t, resp, "Got non-nil response deleting the key: %#v", resp) + + // Let's reimport key 2 post-deletion to make sure we re-generate a new key id + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "keys/import", + Storage: s, + Data: map[string]interface{}{ + "key_name": "my-rsa-key", + "pem_bundle": pem2, + }, + MountPoint: "pki/", + }) + require.NoError(t, err, "Failed importing rsa key") + require.NotNil(t, resp, "Got nil response importing rsa key") + require.False(t, resp.IsError(), "received an error response: %v", resp.Error()) + require.NotEmpty(t, resp.Data["key_id"], "key id for rsa import response was empty") + require.Equal(t, "my-rsa-key", resp.Data["key_name"], "key_name was incorrect for ec key") + require.Equal(t, certutil.RSAPrivateKey, resp.Data["key_type"]) + keyId2Reimport := resp.Data["key_id"].(keyID) + + require.NotEqual(t, keyId2, keyId2Reimport, "re-importing key 2 did not generate a new key id") +} + +func TestPKI_PathManageKeys_DeleteDefaultKeyWarns(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "keys/generate/internal", + Storage: s, + Data: map[string]interface{}{"key_type": "ec"}, + MountPoint: "pki/", + }) + require.NoError(t, err, "Failed generating key") + require.NotNil(t, resp, "Got nil response generating key") + require.False(t, resp.IsError(), "resp contained errors generating key: %#v", resp.Error()) + keyId := resp.Data["key_id"].(keyID) + + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.DeleteOperation, + Path: "key/" + keyId.String(), + Storage: s, + MountPoint: "pki/", + }) + require.NoError(t, err, "failed deleting default key") + require.NotNil(t, resp, "Got nil response deleting the default key") + require.False(t, resp.IsError(), "expected no errors deleting default key: %#v", resp.Error()) + require.NotEmpty(t, resp.Warnings, "expected warnings to be populated on deleting default key") +} + +func TestPKI_PathManageKeys_DeleteUsedKeyFails(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "issuers/generate/root/internal", + Storage: s, + Data: map[string]interface{}{"common_name": "test.com"}, + MountPoint: "pki/", + }) + require.NoError(t, err, "Failed generating issuer") + require.NotNil(t, resp, "Got nil response generating issuer") + require.False(t, resp.IsError(), "resp contained errors generating issuer: %#v", resp.Error()) + keyId := resp.Data["key_id"].(keyID) + + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.DeleteOperation, + Path: "key/" + keyId.String(), + Storage: s, + MountPoint: "pki/", + }) + require.NoError(t, err, "failed deleting key used by an issuer") + require.NotNil(t, resp, "Got nil response deleting key used by an issuer") + require.True(t, resp.IsError(), "expected an error deleting key used by an issuer") +} + +func TestPKI_PathManageKeys_UpdateKeyDetails(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "keys/generate/internal", + Storage: s, + Data: map[string]interface{}{"key_type": "ec"}, + MountPoint: "pki/", + }) + require.NoError(t, err, "Failed generating key") + require.NotNil(t, resp, "Got nil response generating key") + require.False(t, resp.IsError(), "resp contained errors generating key: %#v", resp.Error()) + keyId := resp.Data["key_id"].(keyID) + + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "key/" + keyId.String(), + Storage: s, + Data: map[string]interface{}{"key_name": "new-name"}, + MountPoint: "pki/", + }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("key/"+keyId.String()), logical.UpdateOperation), resp, true) + + require.NoError(t, err, "failed updating key with new name") + require.NotNil(t, resp, "Got nil response updating key with new name") + require.False(t, resp.IsError(), "unexpected error updating key with new name: %#v", resp.Error()) + + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ReadOperation, + Path: "key/" + keyId.String(), + Storage: s, + MountPoint: "pki/", + }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("key/"+keyId.String()), logical.ReadOperation), resp, true) + + require.NoError(t, err, "failed reading key after name update") + require.NotNil(t, resp, "Got nil response reading key after name update") + require.False(t, resp.IsError(), "unexpected error reading key: %#v", resp.Error()) + keyName := resp.Data["key_name"].(string) + + require.Equal(t, "new-name", keyName, "failed to update key_name expected: new-name was: %s", keyName) + + // Make sure we do not allow updates to invalid name values + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "key/" + keyId.String(), + Storage: s, + Data: map[string]interface{}{"key_name": "a-bad\\-name"}, + MountPoint: "pki/", + }) + require.NoError(t, err, "failed updating key with a bad name") + require.NotNil(t, resp, "Got nil response updating key with a bad name") + require.True(t, resp.IsError(), "expected an error updating key with a bad name, but did not get one.") +} + +func TestPKI_PathManageKeys_ImportKeyBundleBadData(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "keys/import", + Storage: s, + Data: map[string]interface{}{ + "key_name": "my-ec-key", + "pem_bundle": "this-is-not-a-pem-bundle", + }, + MountPoint: "pki/", + }) + require.NoError(t, err, "got a 500 error type response from a bad pem bundle") + require.NotNil(t, resp, "Got nil response importing a bad pem bundle") + require.True(t, resp.IsError(), "should have received an error response importing a bad pem bundle") + + // Make sure we also bomb on a proper certificate + bundle := genCertBundle(t, b, s) + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "keys/import", + Storage: s, + Data: map[string]interface{}{ + "pem_bundle": bundle.Certificate, + }, + MountPoint: "pki/", + }) + require.NoError(t, err, "got a 500 error type response from a certificate pem bundle") + require.NotNil(t, resp, "Got nil response importing a certificate bundle") + require.True(t, resp.IsError(), "should have received an error response importing a certificate pem bundle") +} + +func TestPKI_PathManageKeys_ImportKeyRejectsMultipleKeys(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + bundle1, err := certutil.CreateKeyBundle("ec", 224, rand.Reader) + require.NoError(t, err, "failed generating an ec key bundle") + bundle2, err := certutil.CreateKeyBundle("rsa", 2048, rand.Reader) + require.NoError(t, err, "failed generating an rsa key bundle") + pem1, err := bundle1.ToPrivateKeyPemString() + require.NoError(t, err, "failed converting ec key to pem") + pem2, err := bundle2.ToPrivateKeyPemString() + require.NoError(t, err, "failed converting rsa key to pem") + + importPem := pem1 + "\n" + pem2 + + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "keys/import", + Storage: s, + Data: map[string]interface{}{ + "key_name": "my-ec-key", + "pem_bundle": importPem, + }, + MountPoint: "pki/", + }) + require.NoError(t, err, "got a 500 error type response from a bad pem bundle") + require.NotNil(t, resp, "Got nil response importing a bad pem bundle") + require.True(t, resp.IsError(), "should have received an error response importing a pem bundle with more than 1 key") + + ctx := context.Background() + sc := b.makeStorageContext(ctx, s) + keys, _ := sc.listKeys() + for _, keyId := range keys { + id, _ := sc.fetchKeyById(keyId) + t.Logf("%s:%s", id.ID, id.Name) + } +} diff --git a/builtin/logical/pki/path_ocsp.go b/builtin/logical/pki/path_ocsp.go new file mode 100644 index 0000000..1c90ce4 --- /dev/null +++ b/builtin/logical/pki/path_ocsp.go @@ -0,0 +1,535 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "bytes" + "context" + "crypto" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/base64" + "errors" + "fmt" + "io" + "math/big" + "net/http" + "strings" + "time" + + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/logical" + "golang.org/x/crypto/ocsp" +) + +const ( + ocspReqParam = "req" + ocspResponseContentType = "application/ocsp-response" + maximumRequestSize = 2048 // A normal simple request is 87 bytes, so give us some buffer +) + +type ocspRespInfo struct { + serialNumber *big.Int + ocspStatus int + revocationTimeUTC *time.Time + issuerID issuerID +} + +// These response variables should not be mutated, instead treat them as constants +var ( + OcspUnauthorizedResponse = &logical.Response{ + Data: map[string]interface{}{ + logical.HTTPContentType: ocspResponseContentType, + logical.HTTPStatusCode: http.StatusUnauthorized, + logical.HTTPRawBody: ocsp.UnauthorizedErrorResponse, + }, + } + OcspMalformedResponse = &logical.Response{ + Data: map[string]interface{}{ + logical.HTTPContentType: ocspResponseContentType, + logical.HTTPStatusCode: http.StatusBadRequest, + logical.HTTPRawBody: ocsp.MalformedRequestErrorResponse, + }, + } + OcspInternalErrorResponse = &logical.Response{ + Data: map[string]interface{}{ + logical.HTTPContentType: ocspResponseContentType, + logical.HTTPStatusCode: http.StatusInternalServerError, + logical.HTTPRawBody: ocsp.InternalErrorErrorResponse, + }, + } + + ErrMissingOcspUsage = errors.New("issuer entry did not have the OCSPSigning usage") + ErrIssuerHasNoKey = errors.New("issuer has no key") + ErrUnknownIssuer = errors.New("unknown issuer") +) + +func buildPathOcspGet(b *backend) *framework.Path { + pattern := "ocsp/" + framework.MatchAllRegex(ocspReqParam) + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "query", + OperationSuffix: "ocsp-with-get-req", + } + + return buildOcspGetWithPath(b, pattern, displayAttrs) +} + +func buildPathUnifiedOcspGet(b *backend) *framework.Path { + pattern := "unified-ocsp/" + framework.MatchAllRegex(ocspReqParam) + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "query", + OperationSuffix: "unified-ocsp-with-get-req", + } + + return buildOcspGetWithPath(b, pattern, displayAttrs) +} + +func buildOcspGetWithPath(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { + return &framework.Path{ + Pattern: pattern, + DisplayAttrs: displayAttrs, + Fields: map[string]*framework.FieldSchema{ + ocspReqParam: { + Type: framework.TypeString, + Description: "base-64 encoded ocsp request", + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.ocspHandler, + }, + }, + + HelpSynopsis: pathOcspHelpSyn, + HelpDescription: pathOcspHelpDesc, + } +} + +func buildPathOcspPost(b *backend) *framework.Path { + pattern := "ocsp" + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "query", + OperationSuffix: "ocsp", + } + + return buildOcspPostWithPath(b, pattern, displayAttrs) +} + +func buildPathUnifiedOcspPost(b *backend) *framework.Path { + pattern := "unified-ocsp" + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "query", + OperationSuffix: "unified-ocsp", + } + + return buildOcspPostWithPath(b, pattern, displayAttrs) +} + +func buildOcspPostWithPath(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { + return &framework.Path{ + Pattern: pattern, + DisplayAttrs: displayAttrs, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.ocspHandler, + }, + }, + + HelpSynopsis: pathOcspHelpSyn, + HelpDescription: pathOcspHelpDesc, + } +} + +func (b *backend) ocspHandler(ctx context.Context, request *logical.Request, data *framework.FieldData) (*logical.Response, error) { + sc := b.makeStorageContext(ctx, request.Storage) + cfg, err := b.crlBuilder.getConfigWithUpdate(sc) + if err != nil || cfg.OcspDisable || (isUnifiedOcspPath(request) && !cfg.UnifiedCRL) { + return OcspUnauthorizedResponse, nil + } + + derReq, err := fetchDerEncodedRequest(request, data) + if err != nil { + return OcspMalformedResponse, nil + } + + ocspReq, err := ocsp.ParseRequest(derReq) + if err != nil { + return OcspMalformedResponse, nil + } + + useUnifiedStorage := canUseUnifiedStorage(request, cfg) + + ocspStatus, err := getOcspStatus(sc, ocspReq, useUnifiedStorage) + if err != nil { + return logAndReturnInternalError(b, err), nil + } + + caBundle, issuer, err := lookupOcspIssuer(sc, ocspReq, ocspStatus.issuerID) + if err != nil { + if errors.Is(err, ErrUnknownIssuer) { + // Since we were not able to find a matching issuer for the incoming request + // generate an Unknown OCSP response. This might turn into an Unauthorized if + // we find out that we don't have a default issuer or it's missing the proper Usage flags + return generateUnknownResponse(cfg, sc, ocspReq), nil + } + if errors.Is(err, ErrMissingOcspUsage) { + // If we did find a matching issuer but aren't allowed to sign, the spec says + // we should be responding with an Unauthorized response as we don't have the + // ability to sign the response. + // https://www.rfc-editor.org/rfc/rfc5019#section-2.2.3 + return OcspUnauthorizedResponse, nil + } + return logAndReturnInternalError(b, err), nil + } + + byteResp, err := genResponse(cfg, caBundle, ocspStatus, ocspReq.HashAlgorithm, issuer.RevocationSigAlg) + if err != nil { + return logAndReturnInternalError(b, err), nil + } + + return &logical.Response{ + Data: map[string]interface{}{ + logical.HTTPContentType: ocspResponseContentType, + logical.HTTPStatusCode: http.StatusOK, + logical.HTTPRawBody: byteResp, + }, + }, nil +} + +func canUseUnifiedStorage(req *logical.Request, cfg *crlConfig) bool { + if isUnifiedOcspPath(req) { + return true + } + + // We are operating on the existing /pki/ocsp path, both of these fields need to be enabled + // for us to use the unified path. + return shouldLocalPathsUseUnified(cfg) +} + +func isUnifiedOcspPath(req *logical.Request) bool { + return strings.HasPrefix(req.Path, "unified-ocsp") +} + +func generateUnknownResponse(cfg *crlConfig, sc *storageContext, ocspReq *ocsp.Request) *logical.Response { + // Generate an Unknown OCSP response, signing with the default issuer from the mount as we did + // not match the request's issuer. If no default issuer can be used, return with Unauthorized as there + // isn't much else we can do at this point. + config, err := sc.getIssuersConfig() + if err != nil { + return logAndReturnInternalError(sc.Backend, err) + } + + if config.DefaultIssuerId == "" { + // If we don't have any issuers or default issuers set, no way to sign a response so Unauthorized it is. + return OcspUnauthorizedResponse + } + + caBundle, issuer, err := getOcspIssuerParsedBundle(sc, config.DefaultIssuerId) + if err != nil { + if errors.Is(err, ErrUnknownIssuer) || errors.Is(err, ErrIssuerHasNoKey) { + // We must have raced on a delete/update of the default issuer, anyways + // no way to sign a response so Unauthorized it is. + return OcspUnauthorizedResponse + } + return logAndReturnInternalError(sc.Backend, err) + } + + if !issuer.Usage.HasUsage(OCSPSigningUsage) { + // If we don't have any issuers or default issuers set, no way to sign a response so Unauthorized it is. + return OcspUnauthorizedResponse + } + + info := &ocspRespInfo{ + serialNumber: ocspReq.SerialNumber, + ocspStatus: ocsp.Unknown, + } + + byteResp, err := genResponse(cfg, caBundle, info, ocspReq.HashAlgorithm, issuer.RevocationSigAlg) + if err != nil { + return logAndReturnInternalError(sc.Backend, err) + } + + return &logical.Response{ + Data: map[string]interface{}{ + logical.HTTPContentType: ocspResponseContentType, + logical.HTTPStatusCode: http.StatusOK, + logical.HTTPRawBody: byteResp, + }, + } +} + +func fetchDerEncodedRequest(request *logical.Request, data *framework.FieldData) ([]byte, error) { + switch request.Operation { + case logical.ReadOperation: + // The param within the GET request should have a base64 encoded version of a DER request. + base64Req := data.Get(ocspReqParam).(string) + if base64Req == "" { + return nil, errors.New("no base64 encoded ocsp request was found") + } + + if len(base64Req) >= maximumRequestSize { + return nil, errors.New("request is too large") + } + + return base64.StdEncoding.DecodeString(base64Req) + case logical.UpdateOperation: + // POST bodies should contain the binary form of the DER request. + // NOTE: Writing an empty update request to Vault causes a nil request.HTTPRequest, and that object + // says that it is possible for its Body element to be nil as well, so check both just in case. + if request.HTTPRequest == nil { + return nil, errors.New("no data in request") + } + rawBody := request.HTTPRequest.Body + if rawBody == nil { + return nil, errors.New("no data in request body") + } + defer rawBody.Close() + + requestBytes, err := io.ReadAll(io.LimitReader(rawBody, maximumRequestSize)) + if err != nil { + return nil, err + } + + if len(requestBytes) >= maximumRequestSize { + return nil, errors.New("request is too large") + } + return requestBytes, nil + default: + return nil, fmt.Errorf("unsupported request method: %s", request.Operation) + } +} + +func logAndReturnInternalError(b *backend, err error) *logical.Response { + // Since OCSP might be a high traffic endpoint, we will log at debug level only + // any internal errors we do get. There is no way for us to return to the end-user + // errors, so we rely on the log statement to help in debugging possible + // issues in the field. + b.Logger().Debug("OCSP internal error", "error", err) + return OcspInternalErrorResponse +} + +func getOcspStatus(sc *storageContext, ocspReq *ocsp.Request, useUnifiedStorage bool) (*ocspRespInfo, error) { + revEntryRaw, err := fetchCertBySerialBigInt(sc, revokedPath, ocspReq.SerialNumber) + if err != nil { + return nil, err + } + + info := ocspRespInfo{ + serialNumber: ocspReq.SerialNumber, + ocspStatus: ocsp.Good, + } + + if revEntryRaw != nil { + var revEntry revocationInfo + if err := revEntryRaw.DecodeJSON(&revEntry); err != nil { + return nil, err + } + + info.ocspStatus = ocsp.Revoked + info.revocationTimeUTC = &revEntry.RevocationTimeUTC + info.issuerID = revEntry.CertificateIssuer // This might be empty if the CRL hasn't been rebuilt + } else if useUnifiedStorage { + dashSerial := normalizeSerialFromBigInt(ocspReq.SerialNumber) + unifiedEntry, err := getUnifiedRevocationBySerial(sc, dashSerial) + if err != nil { + return nil, err + } + + if unifiedEntry != nil { + info.ocspStatus = ocsp.Revoked + info.revocationTimeUTC = &unifiedEntry.RevocationTimeUTC + info.issuerID = unifiedEntry.CertificateIssuer + } + } + + return &info, nil +} + +func lookupOcspIssuer(sc *storageContext, req *ocsp.Request, optRevokedIssuer issuerID) (*certutil.ParsedCertBundle, *issuerEntry, error) { + reqHash := req.HashAlgorithm + if !reqHash.Available() { + return nil, nil, x509.ErrUnsupportedAlgorithm + } + + // This will prime up issuerIds, with either the optRevokedIssuer value if set + // or if we are operating in legacy storage mode, the shim bundle id or finally + // a list of all our issuers in this mount. + issuerIds, err := lookupIssuerIds(sc, optRevokedIssuer) + if err != nil { + return nil, nil, err + } + + matchedButNoUsage := false + for _, issuerId := range issuerIds { + parsedBundle, issuer, err := getOcspIssuerParsedBundle(sc, issuerId) + if err != nil { + // A bit touchy here as if we get an ErrUnknownIssuer for an issuer id that we picked up + // from a revocation entry, we still return an ErrUnknownOcspIssuer as we can't validate + // the end-user actually meant this specific issuer's cert with serial X. + if errors.Is(err, ErrUnknownIssuer) || errors.Is(err, ErrIssuerHasNoKey) { + // This skips either bad issuer ids, or root certs with no keys that we can't use. + continue + } + return nil, nil, err + } + + // Make sure the client and Vault are talking about the same issuer, otherwise + // we might have a case of a matching serial number for a different issuer which + // we should not respond back in the affirmative about. + matches, err := doesRequestMatchIssuer(parsedBundle, req) + if err != nil { + return nil, nil, err + } + + if matches { + if !issuer.Usage.HasUsage(OCSPSigningUsage) { + matchedButNoUsage = true + // We found a matching issuer, but it's not allowed to sign the + // response, there might be another issuer that we rotated + // that will match though, so keep iterating. + continue + } + + return parsedBundle, issuer, nil + } + } + + if matchedButNoUsage { + // We matched an issuer but it did not have an OCSP signing usage set so bail. + return nil, nil, ErrMissingOcspUsage + } + + return nil, nil, ErrUnknownIssuer +} + +func getOcspIssuerParsedBundle(sc *storageContext, issuerId issuerID) (*certutil.ParsedCertBundle, *issuerEntry, error) { + issuer, bundle, err := sc.fetchCertBundleByIssuerId(issuerId, true) + if err != nil { + switch err.(type) { + case errutil.UserError: + // Most likely the issuer id no longer exists skip it + return nil, nil, ErrUnknownIssuer + default: + return nil, nil, err + } + } + + if issuer.KeyID == "" { + // No point if the key does not exist from the issuer to use as a signer. + return nil, nil, ErrIssuerHasNoKey + } + + caBundle, err := parseCABundle(sc.Context, sc.Backend, bundle) + if err != nil { + return nil, nil, err + } + + return caBundle, issuer, nil +} + +func lookupIssuerIds(sc *storageContext, optRevokedIssuer issuerID) ([]issuerID, error) { + if optRevokedIssuer != "" { + return []issuerID{optRevokedIssuer}, nil + } + + if sc.Backend.useLegacyBundleCaStorage() { + return []issuerID{legacyBundleShimID}, nil + } + + return sc.listIssuers() +} + +func doesRequestMatchIssuer(parsedBundle *certutil.ParsedCertBundle, req *ocsp.Request) (bool, error) { + // issuer name hashing taken from golang.org/x/crypto/ocsp. + var pkInfo struct { + Algorithm pkix.AlgorithmIdentifier + PublicKey asn1.BitString + } + if _, err := asn1.Unmarshal(parsedBundle.Certificate.RawSubjectPublicKeyInfo, &pkInfo); err != nil { + return false, err + } + + h := req.HashAlgorithm.New() + h.Write(pkInfo.PublicKey.RightAlign()) + issuerKeyHash := h.Sum(nil) + + h.Reset() + h.Write(parsedBundle.Certificate.RawSubject) + issuerNameHash := h.Sum(nil) + + return bytes.Equal(req.IssuerKeyHash, issuerKeyHash) && bytes.Equal(req.IssuerNameHash, issuerNameHash), nil +} + +func genResponse(cfg *crlConfig, caBundle *certutil.ParsedCertBundle, info *ocspRespInfo, reqHash crypto.Hash, revSigAlg x509.SignatureAlgorithm) ([]byte, error) { + curTime := time.Now() + duration, err := parseutil.ParseDurationSecond(cfg.OcspExpiry) + if err != nil { + return nil, err + } + + // x/crypto/ocsp lives outside of the standard library's crypto/x509 and includes + // ripped-off variants of many internal structures and functions. These + // lack support for PSS signatures altogether, so if we have revSigAlg + // that uses PSS, downgrade it to PKCS#1v1.5. This fixes the lack of + // support in x/ocsp, at the risk of OCSP requests failing due to lack + // of PKCS#1v1.5 (in say, PKCS#11 HSMs or GCP). + // + // Other restrictions, such as hash function selection, will still work + // however. + switch revSigAlg { + case x509.SHA256WithRSAPSS: + revSigAlg = x509.SHA256WithRSA + case x509.SHA384WithRSAPSS: + revSigAlg = x509.SHA384WithRSA + case x509.SHA512WithRSAPSS: + revSigAlg = x509.SHA512WithRSA + } + + // Due to a bug in Go's ocsp.ParseResponse(...), we do not provision + // Certificate any more on the response to help Go based OCSP clients. + // This was technically unnecessary, as the Certificate given here + // both signed the OCSP response and issued the leaf cert, and so + // should already be trusted by the client. + // + // See also: https://github.com/golang/go/issues/59641 + template := ocsp.Response{ + IssuerHash: reqHash, + Status: info.ocspStatus, + SerialNumber: info.serialNumber, + ThisUpdate: curTime, + ExtraExtensions: []pkix.Extension{}, + SignatureAlgorithm: revSigAlg, + } + + if duration > 0 { + template.NextUpdate = curTime.Add(duration) + } + + if info.ocspStatus == ocsp.Revoked { + template.RevokedAt = *info.revocationTimeUTC + template.RevocationReason = ocsp.Unspecified + } + + return ocsp.CreateResponse(caBundle.Certificate, caBundle.Certificate, template, caBundle.PrivateKey) +} + +const pathOcspHelpSyn = ` +Query a certificate's revocation status through OCSP' +` + +const pathOcspHelpDesc = ` +This endpoint expects DER encoded OCSP requests and returns DER encoded OCSP responses +` diff --git a/builtin/logical/pki/path_ocsp_test.go b/builtin/logical/pki/path_ocsp_test.go new file mode 100644 index 0000000..a3d2140 --- /dev/null +++ b/builtin/logical/pki/path_ocsp_test.go @@ -0,0 +1,742 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "bytes" + "context" + "crypto" + "crypto/x509" + "encoding/base64" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "testing" + "time" + + "github.com/hashicorp/go-secure-stdlib/parseutil" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" + "github.com/stretchr/testify/require" + "golang.org/x/crypto/ocsp" +) + +// If the ocsp_disabled flag is set to true in the crl configuration make sure we always +// return an Unauthorized error back as we assume an end-user disabling the feature does +// not want us to act as the OCSP authority and the RFC specifies this is the appropriate response. +func TestOcsp_Disabled(t *testing.T) { + t.Parallel() + type testArgs struct { + reqType string + } + var tests []testArgs + for _, reqType := range []string{"get", "post"} { + tests = append(tests, testArgs{ + reqType: reqType, + }) + } + for _, tt := range tests { + localTT := tt + t.Run(localTT.reqType, func(t *testing.T) { + b, s, testEnv := setupOcspEnv(t, "rsa") + resp, err := CBWrite(b, s, "config/crl", map[string]interface{}{ + "ocsp_disable": "true", + }) + requireSuccessNonNilResponse(t, resp, err) + resp, err = SendOcspRequest(t, b, s, localTT.reqType, testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) + require.NoError(t, err) + requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") + require.Equal(t, 401, resp.Data["http_status_code"]) + require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) + respDer := resp.Data["http_raw_body"].([]byte) + + require.Equal(t, ocsp.UnauthorizedErrorResponse, respDer) + }) + } +} + +// If we can't find the issuer within the request and have no default issuer to sign an Unknown response +// with return an UnauthorizedErrorResponse/according to/the RFC, similar to if we are disabled (lack of authority) +// This behavior differs from CRLs when an issuer is removed from a mount. +func TestOcsp_UnknownIssuerWithNoDefault(t *testing.T) { + t.Parallel() + + _, _, testEnv := setupOcspEnv(t, "ec") + // Create another completely empty mount so the created issuer/certificate above is unknown + b, s := CreateBackendWithStorage(t) + + resp, err := SendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) + require.NoError(t, err) + requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") + require.Equal(t, 401, resp.Data["http_status_code"]) + require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) + respDer := resp.Data["http_raw_body"].([]byte) + + require.Equal(t, ocsp.UnauthorizedErrorResponse, respDer) +} + +// If the issuer in the request does exist, but the request coming in associates the serial with the +// wrong issuer return an Unknown response back to the caller. +func TestOcsp_WrongIssuerInRequest(t *testing.T) { + t.Parallel() + + b, s, testEnv := setupOcspEnv(t, "ec") + serial := serialFromCert(testEnv.leafCertIssuer1) + resp, err := CBWrite(b, s, "revoke", map[string]interface{}{ + "serial_number": serial, + }) + requireSuccessNonNilResponse(t, resp, err, "revoke") + + resp, err = SendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer2, crypto.SHA1) + require.NoError(t, err) + requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") + require.Equal(t, 200, resp.Data["http_status_code"]) + require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) + respDer := resp.Data["http_raw_body"].([]byte) + + ocspResp, err := ocsp.ParseResponse(respDer, testEnv.issuer1) + require.NoError(t, err, "parsing ocsp get response") + + require.Equal(t, ocsp.Unknown, ocspResp.Status) +} + +// Verify that requests we can't properly decode result in the correct response of MalformedRequestError +func TestOcsp_MalformedRequests(t *testing.T) { + t.Parallel() + type testArgs struct { + reqType string + } + var tests []testArgs + for _, reqType := range []string{"get", "post"} { + tests = append(tests, testArgs{ + reqType: reqType, + }) + } + for _, tt := range tests { + localTT := tt + t.Run(localTT.reqType, func(t *testing.T) { + b, s, _ := setupOcspEnv(t, "rsa") + badReq := []byte("this is a bad request") + var resp *logical.Response + var err error + switch localTT.reqType { + case "get": + resp, err = sendOcspGetRequest(b, s, badReq) + case "post": + resp, err = sendOcspPostRequest(b, s, badReq) + default: + t.Fatalf("bad request type") + } + require.NoError(t, err) + requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") + require.Equal(t, 400, resp.Data["http_status_code"]) + require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) + respDer := resp.Data["http_raw_body"].([]byte) + + require.Equal(t, ocsp.MalformedRequestErrorResponse, respDer) + }) + } +} + +// Validate that we properly handle a revocation entry that contains an issuer ID that no longer exists, +// the best we can do in this use case is to respond back with the default issuer that we don't know +// the issuer that they are requesting (we can't guarantee that the client is actually requesting a serial +// from that issuer) +func TestOcsp_InvalidIssuerIdInRevocationEntry(t *testing.T) { + t.Parallel() + + b, s, testEnv := setupOcspEnv(t, "ec") + ctx := context.Background() + + // Revoke the entry + serial := serialFromCert(testEnv.leafCertIssuer1) + resp, err := CBWrite(b, s, "revoke", map[string]interface{}{ + "serial_number": serial, + }) + requireSuccessNonNilResponse(t, resp, err, "revoke") + + // Twiddle the entry so that the issuer id is no longer valid. + storagePath := revokedPath + normalizeSerial(serial) + var revInfo revocationInfo + revEntry, err := s.Get(ctx, storagePath) + require.NoError(t, err, "failed looking up storage path: %s", storagePath) + err = revEntry.DecodeJSON(&revInfo) + require.NoError(t, err, "failed decoding storage entry: %v", revEntry) + revInfo.CertificateIssuer = "00000000-0000-0000-0000-000000000000" + revEntry, err = logical.StorageEntryJSON(storagePath, revInfo) + require.NoError(t, err, "failed re-encoding revocation info: %v", revInfo) + err = s.Put(ctx, revEntry) + require.NoError(t, err, "failed writing out new revocation entry: %v", revEntry) + + // Send the request + resp, err = SendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) + require.NoError(t, err) + requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") + require.Equal(t, 200, resp.Data["http_status_code"]) + require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) + respDer := resp.Data["http_raw_body"].([]byte) + + ocspResp, err := ocsp.ParseResponse(respDer, testEnv.issuer1) + require.NoError(t, err, "parsing ocsp get response") + + require.Equal(t, ocsp.Unknown, ocspResp.Status) +} + +// Validate that we properly handle an unknown issuer use-case but that the default issuer +// does not have the OCSP usage flag set, we can't do much else other than reply with an +// Unauthorized response. +func TestOcsp_UnknownIssuerIdWithDefaultHavingOcspUsageRemoved(t *testing.T) { + t.Parallel() + + b, s, testEnv := setupOcspEnv(t, "ec") + ctx := context.Background() + + // Revoke the entry + serial := serialFromCert(testEnv.leafCertIssuer1) + resp, err := CBWrite(b, s, "revoke", map[string]interface{}{ + "serial_number": serial, + }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("revoke"), logical.UpdateOperation), resp, true) + requireSuccessNonNilResponse(t, resp, err, "revoke") + + // Twiddle the entry so that the issuer id is no longer valid. + storagePath := revokedPath + normalizeSerial(serial) + var revInfo revocationInfo + revEntry, err := s.Get(ctx, storagePath) + require.NoError(t, err, "failed looking up storage path: %s", storagePath) + err = revEntry.DecodeJSON(&revInfo) + require.NoError(t, err, "failed decoding storage entry: %v", revEntry) + revInfo.CertificateIssuer = "00000000-0000-0000-0000-000000000000" + revEntry, err = logical.StorageEntryJSON(storagePath, revInfo) + require.NoError(t, err, "failed re-encoding revocation info: %v", revInfo) + err = s.Put(ctx, revEntry) + require.NoError(t, err, "failed writing out new revocation entry: %v", revEntry) + + // Update our issuers to no longer have the OcspSigning usage + resp, err = CBPatch(b, s, "issuer/"+testEnv.issuerId1.String(), map[string]interface{}{ + "usage": "read-only,issuing-certificates,crl-signing", + }) + requireSuccessNonNilResponse(t, resp, err, "failed resetting usage flags on issuer1") + resp, err = CBPatch(b, s, "issuer/"+testEnv.issuerId2.String(), map[string]interface{}{ + "usage": "read-only,issuing-certificates,crl-signing", + }) + requireSuccessNonNilResponse(t, resp, err, "failed resetting usage flags on issuer2") + + // Send the request + resp, err = SendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) + require.NoError(t, err) + requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") + require.Equal(t, 401, resp.Data["http_status_code"]) + require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) + respDer := resp.Data["http_raw_body"].([]byte) + + require.Equal(t, ocsp.UnauthorizedErrorResponse, respDer) +} + +// Verify that if we do have a revoked certificate entry for the request, that matches an +// issuer but that issuer does not have the OcspUsage flag set that we return an Unauthorized +// response back to the caller +func TestOcsp_RevokedCertHasIssuerWithoutOcspUsage(t *testing.T) { + b, s, testEnv := setupOcspEnv(t, "ec") + + // Revoke our certificate + resp, err := CBWrite(b, s, "revoke", map[string]interface{}{ + "serial_number": serialFromCert(testEnv.leafCertIssuer1), + }) + requireSuccessNonNilResponse(t, resp, err, "revoke") + + // Update our issuer to no longer have the OcspSigning usage + resp, err = CBPatch(b, s, "issuer/"+testEnv.issuerId1.String(), map[string]interface{}{ + "usage": "read-only,issuing-certificates,crl-signing", + }) + requireSuccessNonNilResponse(t, resp, err, "failed resetting usage flags on issuer") + requireFieldsSetInResp(t, resp, "usage") + + // Do not assume a specific ordering for usage... + usages, err := NewIssuerUsageFromNames(strings.Split(resp.Data["usage"].(string), ",")) + require.NoError(t, err, "failed parsing usage return value") + require.True(t, usages.HasUsage(IssuanceUsage)) + require.True(t, usages.HasUsage(CRLSigningUsage)) + require.False(t, usages.HasUsage(OCSPSigningUsage)) + + // Request an OCSP request from it, we should get an Unauthorized response back + resp, err = SendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) + requireSuccessNonNilResponse(t, resp, err, "ocsp get request") + requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") + require.Equal(t, 401, resp.Data["http_status_code"]) + require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) + respDer := resp.Data["http_raw_body"].([]byte) + + require.Equal(t, ocsp.UnauthorizedErrorResponse, respDer) +} + +// Verify if our matching issuer for a revocation entry has no key associated with it that +// we bail with an Unauthorized response. +func TestOcsp_RevokedCertHasIssuerWithoutAKey(t *testing.T) { + b, s, testEnv := setupOcspEnv(t, "ec") + + // Revoke our certificate + resp, err := CBWrite(b, s, "revoke", map[string]interface{}{ + "serial_number": serialFromCert(testEnv.leafCertIssuer1), + }) + requireSuccessNonNilResponse(t, resp, err, "revoke") + + // Delete the key associated with our issuer + resp, err = CBRead(b, s, "issuer/"+testEnv.issuerId1.String()) + requireSuccessNonNilResponse(t, resp, err, "failed reading issuer") + requireFieldsSetInResp(t, resp, "key_id") + keyId := resp.Data["key_id"].(keyID) + + // This is a bit naughty but allow me to delete the key... + sc := b.makeStorageContext(context.Background(), s) + issuer, err := sc.fetchIssuerById(testEnv.issuerId1) + require.NoError(t, err, "failed to get issuer from storage") + issuer.KeyID = "" + err = sc.writeIssuer(issuer) + require.NoError(t, err, "failed to write issuer update") + + resp, err = CBDelete(b, s, "key/"+keyId.String()) + requireSuccessNonNilResponse(t, resp, err, "failed deleting key") + + // Request an OCSP request from it, we should get an Unauthorized response back + resp, err = SendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) + requireSuccessNonNilResponse(t, resp, err, "ocsp get request") + requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") + require.Equal(t, 401, resp.Data["http_status_code"]) + require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) + respDer := resp.Data["http_raw_body"].([]byte) + + require.Equal(t, ocsp.UnauthorizedErrorResponse, respDer) +} + +// Verify if for some reason an end-user has rotated an existing certificate using the same +// key so our algo matches multiple issuers and one has OCSP usage disabled. We expect that +// even if a prior issuer issued the certificate, the new matching issuer can respond and sign +// the response to the caller on its behalf. +// +// NOTE: This test is a bit at the mercy of iteration order of the issuer ids. +// +// If it becomes flaky, most likely something is wrong in the code +// and not the test. +func TestOcsp_MultipleMatchingIssuersOneWithoutSigningUsage(t *testing.T) { + b, s, testEnv := setupOcspEnv(t, "ec") + + // Create a matching issuer as issuer1 with the same backing key + resp, err := CBWrite(b, s, "root/rotate/existing", map[string]interface{}{ + "key_ref": testEnv.keyId1, + "ttl": "40h", + "common_name": "example-ocsp.com", + }) + requireSuccessNonNilResponse(t, resp, err, "rotate issuer failed") + requireFieldsSetInResp(t, resp, "issuer_id") + rotatedCert := parseCert(t, resp.Data["certificate"].(string)) + + // Remove ocsp signing from our issuer + resp, err = CBPatch(b, s, "issuer/"+testEnv.issuerId1.String(), map[string]interface{}{ + "usage": "read-only,issuing-certificates,crl-signing", + }) + requireSuccessNonNilResponse(t, resp, err, "failed resetting usage flags on issuer") + requireFieldsSetInResp(t, resp, "usage") + // Do not assume a specific ordering for usage... + usages, err := NewIssuerUsageFromNames(strings.Split(resp.Data["usage"].(string), ",")) + require.NoError(t, err, "failed parsing usage return value") + require.True(t, usages.HasUsage(IssuanceUsage)) + require.True(t, usages.HasUsage(CRLSigningUsage)) + require.False(t, usages.HasUsage(OCSPSigningUsage)) + + // Request an OCSP request from it, we should get a Good response back, from the rotated cert + resp, err = SendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) + requireSuccessNonNilResponse(t, resp, err, "ocsp get request") + requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") + require.Equal(t, 200, resp.Data["http_status_code"]) + require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) + respDer := resp.Data["http_raw_body"].([]byte) + + ocspResp, err := ocsp.ParseResponse(respDer, testEnv.issuer1) + require.NoError(t, err, "parsing ocsp get response") + + require.Equal(t, ocsp.Good, ocspResp.Status) + require.Equal(t, crypto.SHA1, ocspResp.IssuerHash) + require.Equal(t, 0, ocspResp.RevocationReason) + require.Equal(t, testEnv.leafCertIssuer1.SerialNumber, ocspResp.SerialNumber) + + requireOcspSignatureAlgoForKey(t, rotatedCert.SignatureAlgorithm, ocspResp.SignatureAlgorithm) + requireOcspResponseSignedBy(t, ocspResp, rotatedCert) +} + +// Make sure OCSP GET/POST requests work through the entire stack, and not just +// through the quicker backend layer the other tests are doing. +func TestOcsp_HigherLevel(t *testing.T) { + t.Parallel() + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "pki": Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + client := cluster.Cores[0].Client + mountPKIEndpoint(t, client, "pki") + resp, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ + "key_type": "ec", + "common_name": "root-ca.com", + "ttl": "600h", + }) + + require.NoError(t, err, "error generating root ca: %v", err) + require.NotNil(t, resp, "expected ca info from root") + + issuerCert := parseCert(t, resp.Data["certificate"].(string)) + + resp, err = client.Logical().Write("pki/roles/example", map[string]interface{}{ + "allowed_domains": "example.com", + "allow_subdomains": "true", + "no_store": "false", // make sure we store this cert + "max_ttl": "1h", + "key_type": "ec", + }) + require.NoError(t, err, "error setting up pki role: %v", err) + + resp, err = client.Logical().Write("pki/issue/example", map[string]interface{}{ + "common_name": "test.example.com", + "ttl": "15m", + }) + require.NoError(t, err, "error issuing certificate: %v", err) + require.NotNil(t, resp, "got nil response from issuing request") + certToRevoke := parseCert(t, resp.Data["certificate"].(string)) + serialNum := resp.Data["serial_number"].(string) + + // Revoke the certificate + resp, err = client.Logical().Write("pki/revoke", map[string]interface{}{ + "serial_number": serialNum, + }) + require.NoError(t, err, "error revoking certificate: %v", err) + require.NotNil(t, resp, "got nil response from revoke") + + // Make sure that OCSP handler responds properly + ocspReq := generateRequest(t, crypto.SHA256, certToRevoke, issuerCert) + ocspPostReq := client.NewRequest(http.MethodPost, "/v1/pki/ocsp") + ocspPostReq.Headers.Set("Content-Type", "application/ocsp-request") + ocspPostReq.BodyBytes = ocspReq + rawResp, err := client.RawRequest(ocspPostReq) + require.NoError(t, err, "failed sending ocsp post request") + + require.Equal(t, 200, rawResp.StatusCode) + require.Equal(t, ocspResponseContentType, rawResp.Header.Get("Content-Type")) + bodyReader := rawResp.Body + respDer, err := io.ReadAll(bodyReader) + bodyReader.Close() + require.NoError(t, err, "failed reading response body") + + ocspResp, err := ocsp.ParseResponse(respDer, issuerCert) + require.NoError(t, err, "parsing ocsp get response") + + require.Equal(t, ocsp.Revoked, ocspResp.Status) + require.Equal(t, certToRevoke.SerialNumber, ocspResp.SerialNumber) + + // Test OCSP Get request for ocsp + urlEncoded := base64.StdEncoding.EncodeToString(ocspReq) + if strings.Contains(urlEncoded, "//") { + // workaround known redirect bug that is difficult to fix + t.Skipf("VAULT-13630 - Skipping GET OCSP test with encoded issuer cert containing // triggering redirection bug") + } + + ocspGetReq := client.NewRequest(http.MethodGet, "/v1/pki/ocsp/"+urlEncoded) + ocspGetReq.Headers.Set("Content-Type", "application/ocsp-request") + rawResp, err = client.RawRequest(ocspGetReq) + require.NoError(t, err, "failed sending ocsp get request") + + require.Equal(t, 200, rawResp.StatusCode) + require.Equal(t, ocspResponseContentType, rawResp.Header.Get("Content-Type")) + bodyReader = rawResp.Body + respDer, err = io.ReadAll(bodyReader) + bodyReader.Close() + require.NoError(t, err, "failed reading response body") + + ocspResp, err = ocsp.ParseResponse(respDer, issuerCert) + require.NoError(t, err, "parsing ocsp get response") + + require.Equal(t, ocsp.Revoked, ocspResp.Status) + require.Equal(t, certToRevoke.SerialNumber, ocspResp.SerialNumber) +} + +// TestOcsp_NextUpdate make sure that we are setting the appropriate values +// for the NextUpdate field within our responses. +func TestOcsp_NextUpdate(t *testing.T) { + // Within the runOcspRequestTest, with a ocspExpiry of 0, + // we will validate that NextUpdate was not set in the response + runOcspRequestTest(t, "POST", "ec", 0, 0, crypto.SHA256, 0) + + // Within the runOcspRequestTest, with a ocspExpiry of 24 hours, we will validate + // that NextUpdate is set and has a time 24 hours larger than ThisUpdate + runOcspRequestTest(t, "POST", "ec", 0, 0, crypto.SHA256, 24*time.Hour) +} + +func TestOcsp_ValidRequests(t *testing.T) { + type caKeyConf struct { + keyType string + keyBits int + sigBits int + } + t.Parallel() + type testArgs struct { + reqType string + keyConf caKeyConf + reqHash crypto.Hash + } + var tests []testArgs + for _, reqType := range []string{"get", "post"} { + for _, keyConf := range []caKeyConf{ + {"rsa", 0, 0}, + {"rsa", 0, 384}, + {"rsa", 0, 512}, + {"ec", 0, 0}, + {"ec", 521, 0}, + } { + // "ed25519" is not supported at the moment in x/crypto/ocsp + for _, requestHash := range []crypto.Hash{crypto.SHA1, crypto.SHA256, crypto.SHA384, crypto.SHA512} { + tests = append(tests, testArgs{ + reqType: reqType, + keyConf: keyConf, + reqHash: requestHash, + }) + } + } + } + for _, tt := range tests { + localTT := tt + testName := fmt.Sprintf("%s-%s-keybits-%d-sigbits-%d-reqHash-%s", localTT.reqType, localTT.keyConf.keyType, + localTT.keyConf.keyBits, + localTT.keyConf.sigBits, + localTT.reqHash) + t.Run(testName, func(t *testing.T) { + runOcspRequestTest(t, localTT.reqType, localTT.keyConf.keyType, localTT.keyConf.keyBits, + localTT.keyConf.sigBits, localTT.reqHash, 12*time.Hour) + }) + } +} + +func runOcspRequestTest(t *testing.T, requestType string, caKeyType string, + caKeyBits int, caKeySigBits int, requestHash crypto.Hash, ocspExpiry time.Duration, +) { + b, s, testEnv := setupOcspEnvWithCaKeyConfig(t, caKeyType, caKeyBits, caKeySigBits, ocspExpiry) + + // Non-revoked cert + resp, err := SendOcspRequest(t, b, s, requestType, testEnv.leafCertIssuer1, testEnv.issuer1, requestHash) + requireSuccessNonNilResponse(t, resp, err, "ocsp get request") + requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") + require.Equal(t, 200, resp.Data["http_status_code"]) + require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) + respDer := resp.Data["http_raw_body"].([]byte) + + ocspResp, err := ocsp.ParseResponse(respDer, testEnv.issuer1) + require.NoError(t, err, "parsing ocsp get response") + + require.Equal(t, ocsp.Good, ocspResp.Status) + require.Equal(t, requestHash, ocspResp.IssuerHash) + require.Equal(t, 0, ocspResp.RevocationReason) + require.Equal(t, testEnv.leafCertIssuer1.SerialNumber, ocspResp.SerialNumber) + + requireOcspSignatureAlgoForKey(t, testEnv.issuer1.SignatureAlgorithm, ocspResp.SignatureAlgorithm) + requireOcspResponseSignedBy(t, ocspResp, testEnv.issuer1) + + // Now revoke it + resp, err = CBWrite(b, s, "revoke", map[string]interface{}{ + "serial_number": serialFromCert(testEnv.leafCertIssuer1), + }) + requireSuccessNonNilResponse(t, resp, err, "revoke") + + resp, err = SendOcspRequest(t, b, s, requestType, testEnv.leafCertIssuer1, testEnv.issuer1, requestHash) + requireSuccessNonNilResponse(t, resp, err, "ocsp get request with revoked") + requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") + require.Equal(t, 200, resp.Data["http_status_code"]) + require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) + respDer = resp.Data["http_raw_body"].([]byte) + + ocspResp, err = ocsp.ParseResponse(respDer, testEnv.issuer1) + require.NoError(t, err, "parsing ocsp get response with revoked") + + require.Equal(t, ocsp.Revoked, ocspResp.Status) + require.Equal(t, requestHash, ocspResp.IssuerHash) + require.Equal(t, 0, ocspResp.RevocationReason) + require.Equal(t, testEnv.leafCertIssuer1.SerialNumber, ocspResp.SerialNumber) + + requireOcspSignatureAlgoForKey(t, testEnv.issuer1.SignatureAlgorithm, ocspResp.SignatureAlgorithm) + requireOcspResponseSignedBy(t, ocspResp, testEnv.issuer1) + + // Request status for our second issuer + resp, err = SendOcspRequest(t, b, s, requestType, testEnv.leafCertIssuer2, testEnv.issuer2, requestHash) + requireSuccessNonNilResponse(t, resp, err, "ocsp get request") + requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") + require.Equal(t, 200, resp.Data["http_status_code"]) + require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) + respDer = resp.Data["http_raw_body"].([]byte) + + ocspResp, err = ocsp.ParseResponse(respDer, testEnv.issuer2) + require.NoError(t, err, "parsing ocsp get response") + + require.Equal(t, ocsp.Good, ocspResp.Status) + require.Equal(t, requestHash, ocspResp.IssuerHash) + require.Equal(t, 0, ocspResp.RevocationReason) + require.Equal(t, testEnv.leafCertIssuer2.SerialNumber, ocspResp.SerialNumber) + + // Verify that our thisUpdate and nextUpdate fields are updated as expected + resp, err = CBRead(b, s, "config/crl") + requireSuccessNonNilResponse(t, resp, err, "failed reading from config/crl") + requireFieldsSetInResp(t, resp, "ocsp_expiry") + ocspExpiryRaw := resp.Data["ocsp_expiry"].(string) + expectedDiff, err := parseutil.ParseDurationSecond(ocspExpiryRaw) + require.NoError(t, err, "failed to parse default ocsp expiry value") + + thisUpdate := ocspResp.ThisUpdate + require.Less(t, time.Since(thisUpdate), 10*time.Second, "expected ThisUpdate field to be within the last 10 seconds") + if expectedDiff != 0 { + nextUpdate := ocspResp.NextUpdate + require.False(t, nextUpdate.IsZero(), "nextUpdate field value should have been a non-zero time") + require.True(t, thisUpdate.Before(nextUpdate), + fmt.Sprintf("thisUpdate %s, should have been before nextUpdate: %s", thisUpdate, nextUpdate)) + nextUpdateDiff := nextUpdate.Sub(thisUpdate) + require.Equal(t, expectedDiff, nextUpdateDiff, + fmt.Sprintf("the delta between thisUpdate %s and nextUpdate: %s should have been around: %s but was %s", + thisUpdate, nextUpdate, defaultCrlConfig.OcspExpiry, nextUpdateDiff)) + } else { + // With the config value set to 0, we shouldn't have a NextUpdate field set + require.True(t, ocspResp.NextUpdate.IsZero(), "nextUpdate value was not zero as expected was: %v", ocspResp.NextUpdate) + } + requireOcspSignatureAlgoForKey(t, testEnv.issuer2.SignatureAlgorithm, ocspResp.SignatureAlgorithm) + requireOcspResponseSignedBy(t, ocspResp, testEnv.issuer2) +} + +func requireOcspSignatureAlgoForKey(t *testing.T, expected x509.SignatureAlgorithm, actual x509.SignatureAlgorithm) { + t.Helper() + + require.Equal(t, expected.String(), actual.String()) +} + +type ocspTestEnv struct { + issuer1 *x509.Certificate + issuer2 *x509.Certificate + + issuerId1 issuerID + issuerId2 issuerID + + leafCertIssuer1 *x509.Certificate + leafCertIssuer2 *x509.Certificate + + keyId1 keyID + keyId2 keyID +} + +func setupOcspEnv(t *testing.T, keyType string) (*backend, logical.Storage, *ocspTestEnv) { + return setupOcspEnvWithCaKeyConfig(t, keyType, 0, 0, 12*time.Hour) +} + +func setupOcspEnvWithCaKeyConfig(t *testing.T, keyType string, caKeyBits int, caKeySigBits int, ocspExpiry time.Duration) (*backend, logical.Storage, *ocspTestEnv) { + b, s := CreateBackendWithStorage(t) + var issuerCerts []*x509.Certificate + var leafCerts []*x509.Certificate + var issuerIds []issuerID + var keyIds []keyID + + resp, err := CBWrite(b, s, "config/crl", map[string]interface{}{ + "ocsp_enable": true, + "ocsp_expiry": fmt.Sprintf("%ds", int(ocspExpiry.Seconds())), + }) + requireSuccessNonNilResponse(t, resp, err, "config/crl failed") + + for i := 0; i < 2; i++ { + resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "key_type": keyType, + "key_bits": caKeyBits, + "signature_bits": caKeySigBits, + "ttl": "40h", + "common_name": "example-ocsp.com", + }) + requireSuccessNonNilResponse(t, resp, err, "root/generate/internal") + requireFieldsSetInResp(t, resp, "issuer_id", "key_id") + issuerId := resp.Data["issuer_id"].(issuerID) + keyId := resp.Data["key_id"].(keyID) + + resp, err = CBWrite(b, s, "roles/test"+strconv.FormatInt(int64(i), 10), map[string]interface{}{ + "allow_bare_domains": true, + "allow_subdomains": true, + "allowed_domains": "foobar.com", + "no_store": false, + "generate_lease": false, + "issuer_ref": issuerId, + "key_type": keyType, + }) + requireSuccessNonNilResponse(t, resp, err, "roles/test"+strconv.FormatInt(int64(i), 10)) + + resp, err = CBWrite(b, s, "issue/test"+strconv.FormatInt(int64(i), 10), map[string]interface{}{ + "common_name": "test.foobar.com", + }) + requireSuccessNonNilResponse(t, resp, err, "roles/test"+strconv.FormatInt(int64(i), 10)) + requireFieldsSetInResp(t, resp, "certificate", "issuing_ca", "serial_number") + leafCert := parseCert(t, resp.Data["certificate"].(string)) + issuingCa := parseCert(t, resp.Data["issuing_ca"].(string)) + + issuerCerts = append(issuerCerts, issuingCa) + leafCerts = append(leafCerts, leafCert) + issuerIds = append(issuerIds, issuerId) + keyIds = append(keyIds, keyId) + } + + testEnv := &ocspTestEnv{ + issuerId1: issuerIds[0], + issuer1: issuerCerts[0], + leafCertIssuer1: leafCerts[0], + keyId1: keyIds[0], + + issuerId2: issuerIds[1], + issuer2: issuerCerts[1], + leafCertIssuer2: leafCerts[1], + keyId2: keyIds[1], + } + + return b, s, testEnv +} + +func SendOcspRequest(t *testing.T, b *backend, s logical.Storage, getOrPost string, cert, issuer *x509.Certificate, requestHash crypto.Hash) (*logical.Response, error) { + t.Helper() + + ocspRequest := generateRequest(t, requestHash, cert, issuer) + + switch strings.ToLower(getOrPost) { + case "get": + return sendOcspGetRequest(b, s, ocspRequest) + case "post": + return sendOcspPostRequest(b, s, ocspRequest) + default: + t.Fatalf("unsupported value for SendOcspRequest getOrPost arg: %s", getOrPost) + } + return nil, nil +} + +func sendOcspGetRequest(b *backend, s logical.Storage, ocspRequest []byte) (*logical.Response, error) { + urlEncoded := base64.StdEncoding.EncodeToString(ocspRequest) + return CBRead(b, s, "ocsp/"+urlEncoded) +} + +func sendOcspPostRequest(b *backend, s logical.Storage, ocspRequest []byte) (*logical.Response, error) { + reader := io.NopCloser(bytes.NewReader(ocspRequest)) + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "ocsp", + Storage: s, + MountPoint: "pki/", + HTTPRequest: &http.Request{ + Body: reader, + }, + }) + + return resp, err +} diff --git a/builtin/logical/pki/path_resign_crls.go b/builtin/logical/pki/path_resign_crls.go new file mode 100644 index 0000000..a82f94f --- /dev/null +++ b/builtin/logical/pki/path_resign_crls.go @@ -0,0 +1,675 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "context" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/base64" + "encoding/hex" + "encoding/pem" + "errors" + "fmt" + "math/big" + "net/http" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + crlNumberParam = "crl_number" + deltaCrlBaseNumberParam = "delta_crl_base_number" + nextUpdateParam = "next_update" + crlsParam = "crls" + formatParam = "format" +) + +var ( + akOid = asn1.ObjectIdentifier{2, 5, 29, 35} + crlNumOid = asn1.ObjectIdentifier{2, 5, 29, 20} + deltaCrlOid = asn1.ObjectIdentifier{2, 5, 29, 27} +) + +func pathResignCrls(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/resign-crls", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIIssuer, + OperationVerb: "resign", + OperationSuffix: "crls", + }, + + Fields: map[string]*framework.FieldSchema{ + issuerRefParam: { + Type: framework.TypeString, + Description: `Reference to a existing issuer; either "default" +for the configured default issuer, an identifier or the name assigned +to the issuer.`, + Default: defaultRef, + }, + crlNumberParam: { + Type: framework.TypeInt, + Description: `The sequence number to be written within the CRL Number extension.`, + }, + deltaCrlBaseNumberParam: { + Type: framework.TypeInt, + Description: `Using a zero or greater value specifies the base CRL revision number to encode within + a Delta CRL indicator extension, otherwise the extension will not be added.`, + Default: -1, + }, + nextUpdateParam: { + Type: framework.TypeString, + Description: `The amount of time the generated CRL should be +valid; defaults to 72 hours.`, + Default: defaultCrlConfig.Expiry, + }, + crlsParam: { + Type: framework.TypeStringSlice, + Description: `A list of PEM encoded CRLs to combine, originally signed by the requested issuer.`, + }, + formatParam: { + Type: framework.TypeString, + Description: `The format of the combined CRL, can be "pem" or "der". If "der", the value will be +base64 encoded. Defaults to "pem".`, + Default: "pem", + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathUpdateResignCrlsHandler, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "crl": { + Type: framework.TypeString, + Description: `CRL`, + Required: true, + }, + }, + }}, + }, + }, + }, + + HelpSynopsis: `Combine and sign with the provided issuer different CRLs`, + HelpDescription: `Provide two or more PEM encoded CRLs signed by the issuer, + normally from separate Vault clusters to be combined and signed.`, + } +} + +func pathSignRevocationList(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/sign-revocation-list", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIIssuer, + OperationVerb: "sign", + OperationSuffix: "revocation-list", + }, + + Fields: map[string]*framework.FieldSchema{ + issuerRefParam: { + Type: framework.TypeString, + Description: `Reference to a existing issuer; either "default" +for the configured default issuer, an identifier or the name assigned +to the issuer.`, + Default: defaultRef, + }, + crlNumberParam: { + Type: framework.TypeInt, + Description: `The sequence number to be written within the CRL Number extension.`, + }, + deltaCrlBaseNumberParam: { + Type: framework.TypeInt, + Description: `Using a zero or greater value specifies the base CRL revision number to encode within + a Delta CRL indicator extension, otherwise the extension will not be added.`, + Default: -1, + }, + nextUpdateParam: { + Type: framework.TypeString, + Description: `The amount of time the generated CRL should be +valid; defaults to 72 hours.`, + Default: defaultCrlConfig.Expiry, + }, + formatParam: { + Type: framework.TypeString, + Description: `The format of the combined CRL, can be "pem" or "der". If "der", the value will be +base64 encoded. Defaults to "pem".`, + Default: "pem", + }, + "revoked_certs": { + Type: framework.TypeSlice, + Description: `A list of maps containing the keys serial_number (string), revocation_time (string), +and extensions (map with keys id (string), critical (bool), value (string))`, + }, + "extensions": { + Type: framework.TypeSlice, + Description: `A list of maps containing extensions with keys id (string), critical (bool), +value (string)`, + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathUpdateSignRevocationListHandler, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "crl": { + Type: framework.TypeString, + Description: `CRL`, + Required: true, + }, + }, + }}, + }, + }, + }, + + HelpSynopsis: `Generate and sign a CRL based on the provided parameters.`, + HelpDescription: `Given a list of revoked certificates and other parameters, +return a signed CRL based on the parameter values.`, + } +} + +func (b *backend) pathUpdateResignCrlsHandler(ctx context.Context, request *logical.Request, data *framework.FieldData) (*logical.Response, error) { + if b.useLegacyBundleCaStorage() { + return logical.ErrorResponse("This API cannot be used until the migration has completed"), nil + } + + issuerRef := getIssuerRef(data) + crlNumber := data.Get(crlNumberParam).(int) + deltaCrlBaseNumber := data.Get(deltaCrlBaseNumberParam).(int) + nextUpdateStr := data.Get(nextUpdateParam).(string) + rawCrls := data.Get(crlsParam).([]string) + + format, err := parseCrlFormat(data.Get(formatParam).(string)) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + nextUpdateOffset, err := parseutil.ParseDurationSecond(nextUpdateStr) + if err != nil { + return logical.ErrorResponse("invalid value for %s: %v", nextUpdateParam, err), nil + } + + if nextUpdateOffset <= 0 { + return logical.ErrorResponse("%s parameter must be greater than 0", nextUpdateParam), nil + } + + if crlNumber < 0 { + return logical.ErrorResponse("%s parameter must be 0 or greater", crlNumberParam), nil + } + if deltaCrlBaseNumber < -1 { + return logical.ErrorResponse("%s parameter must be -1 or greater", deltaCrlBaseNumberParam), nil + } + + if issuerRef == "" { + return logical.ErrorResponse("%s parameter cannot be blank", issuerRefParam), nil + } + + providedCrls, err := decodePemCrls(rawCrls) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + sc := b.makeStorageContext(ctx, request.Storage) + caBundle, err := getCaBundle(sc, issuerRef) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + if err := verifyCrlsAreFromIssuersKey(caBundle.Certificate, providedCrls); err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + revokedCerts, warnings, err := getAllRevokedCertsFromPem(providedCrls) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + now := time.Now() + template := &x509.RevocationList{ + SignatureAlgorithm: caBundle.RevocationSigAlg, + RevokedCertificates: revokedCerts, + Number: big.NewInt(int64(crlNumber)), + ThisUpdate: now, + NextUpdate: now.Add(nextUpdateOffset), + } + + if deltaCrlBaseNumber > -1 { + ext, err := certutil.CreateDeltaCRLIndicatorExt(int64(deltaCrlBaseNumber)) + if err != nil { + return nil, fmt.Errorf("could not create crl delta indicator extension: %v", err) + } + template.ExtraExtensions = []pkix.Extension{ext} + } + + crlBytes, err := x509.CreateRevocationList(rand.Reader, template, caBundle.Certificate, caBundle.PrivateKey) + if err != nil { + return nil, fmt.Errorf("error creating new CRL: %w", err) + } + + body := encodeResponse(crlBytes, format == "der") + + return &logical.Response{ + Warnings: warnings, + Data: map[string]interface{}{ + "crl": body, + }, + }, nil +} + +func (b *backend) pathUpdateSignRevocationListHandler(ctx context.Context, request *logical.Request, data *framework.FieldData) (*logical.Response, error) { + if b.useLegacyBundleCaStorage() { + return logical.ErrorResponse("This API cannot be used until the migration has completed"), nil + } + + issuerRef := getIssuerRef(data) + crlNumber := data.Get(crlNumberParam).(int) + deltaCrlBaseNumber := data.Get(deltaCrlBaseNumberParam).(int) + nextUpdateStr := data.Get(nextUpdateParam).(string) + nextUpdateOffset, err := parseutil.ParseDurationSecond(nextUpdateStr) + if err != nil { + return logical.ErrorResponse("invalid value for %s: %v", nextUpdateParam, err), nil + } + + if nextUpdateOffset <= 0 { + return logical.ErrorResponse("%s parameter must be greater than 0", nextUpdateParam), nil + } + + if crlNumber < 0 { + return logical.ErrorResponse("%s parameter must be 0 or greater", crlNumberParam), nil + } + if deltaCrlBaseNumber < -1 { + return logical.ErrorResponse("%s parameter must be -1 or greater", deltaCrlBaseNumberParam), nil + } + + if issuerRef == "" { + return logical.ErrorResponse("%s parameter cannot be blank", issuerRefParam), nil + } + + format, err := parseCrlFormat(data.Get(formatParam).(string)) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + revokedCerts, err := parseRevokedCertsParam(data.Get("revoked_certs").([]interface{})) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + crlExtensions, err := parseExtensionsParam(data.Get("extensions").([]interface{})) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + sc := b.makeStorageContext(ctx, request.Storage) + caBundle, err := getCaBundle(sc, issuerRef) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + if deltaCrlBaseNumber > -1 { + ext, err := certutil.CreateDeltaCRLIndicatorExt(int64(deltaCrlBaseNumber)) + if err != nil { + return nil, fmt.Errorf("could not create crl delta indicator extension: %v", err) + } + crlExtensions = append(crlExtensions, ext) + } + + now := time.Now() + template := &x509.RevocationList{ + SignatureAlgorithm: caBundle.RevocationSigAlg, + RevokedCertificates: revokedCerts, + Number: big.NewInt(int64(crlNumber)), + ThisUpdate: now, + NextUpdate: now.Add(nextUpdateOffset), + ExtraExtensions: crlExtensions, + } + + crlBytes, err := x509.CreateRevocationList(rand.Reader, template, caBundle.Certificate, caBundle.PrivateKey) + if err != nil { + return nil, fmt.Errorf("error creating new CRL: %w", err) + } + + body := encodeResponse(crlBytes, format == "der") + + return &logical.Response{ + Data: map[string]interface{}{ + "crl": body, + }, + }, nil +} + +func parseRevokedCertsParam(revokedCerts []interface{}) ([]pkix.RevokedCertificate, error) { + var parsedCerts []pkix.RevokedCertificate + seenSerials := make(map[*big.Int]int) + for i, entry := range revokedCerts { + if revokedCert, ok := entry.(map[string]interface{}); ok { + serialNum, err := parseSerialNum(revokedCert) + if err != nil { + return nil, fmt.Errorf("failed parsing serial_number from entry %d: %w", i, err) + } + + if origEntry, exists := seenSerials[serialNum]; exists { + serialNumStr := revokedCert["serial_number"] + return nil, fmt.Errorf("duplicate serial number: %s, original entry %d and %d", serialNumStr, origEntry, i) + } + + seenSerials[serialNum] = i + + revocationTime, err := parseRevocationTime(revokedCert) + if err != nil { + return nil, fmt.Errorf("failed parsing revocation_time from entry %d: %w", i, err) + } + + extensions, err := parseCertExtensions(revokedCert) + if err != nil { + return nil, fmt.Errorf("failed parsing extensions from entry %d: %w", i, err) + } + + parsedCerts = append(parsedCerts, pkix.RevokedCertificate{ + SerialNumber: serialNum, + RevocationTime: revocationTime, + Extensions: extensions, + }) + } + } + + return parsedCerts, nil +} + +func parseCertExtensions(cert map[string]interface{}) ([]pkix.Extension, error) { + extRaw, exists := cert["extensions"] + if !exists || extRaw == nil || extRaw == "" { + // We don't require extensions to be populated + return []pkix.Extension{}, nil + } + + extListRaw, ok := extRaw.([]interface{}) + if !ok { + return nil, errors.New("'extensions' field did not contain a slice") + } + + return parseExtensionsParam(extListRaw) +} + +func parseExtensionsParam(extRawList []interface{}) ([]pkix.Extension, error) { + var extensions []pkix.Extension + seenOid := make(map[string]struct{}) + for i, entryRaw := range extRawList { + entry, ok := entryRaw.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("extension entry %d not a map", i) + } + extension, err := parseExtension(entry) + if err != nil { + return nil, fmt.Errorf("failed parsing extension entry %d: %w", i, err) + } + + parsedIdStr := extension.Id.String() + if _, exists := seenOid[parsedIdStr]; exists { + return nil, fmt.Errorf("duplicate extension id: %s", parsedIdStr) + } + + seenOid[parsedIdStr] = struct{}{} + + extensions = append(extensions, extension) + } + + return extensions, nil +} + +func parseExtension(entry map[string]interface{}) (pkix.Extension, error) { + asnObjectId, err := parseExtAsn1ObjectId(entry) + if err != nil { + return pkix.Extension{}, err + } + + if asnObjectId.Equal(akOid) { + return pkix.Extension{}, fmt.Errorf("authority key object identifier (%s) is reserved", akOid.String()) + } + + if asnObjectId.Equal(crlNumOid) { + return pkix.Extension{}, fmt.Errorf("crl number object identifier (%s) is reserved", crlNumOid.String()) + } + + if asnObjectId.Equal(deltaCrlOid) { + return pkix.Extension{}, fmt.Errorf("delta crl object identifier (%s) is reserved", deltaCrlOid.String()) + } + + critical, err := parseExtCritical(entry) + if err != nil { + return pkix.Extension{}, err + } + + extVal, err := parseExtValue(entry) + if err != nil { + return pkix.Extension{}, err + } + + return pkix.Extension{ + Id: asnObjectId, + Critical: critical, + Value: extVal, + }, nil +} + +func parseExtValue(entry map[string]interface{}) ([]byte, error) { + valRaw, exists := entry["value"] + if !exists { + return nil, errors.New("missing 'value' field") + } + + valStr, err := parseutil.ParseString(valRaw) + if err != nil { + return nil, fmt.Errorf("'value' field value was not a string: %w", err) + } + + if len(valStr) == 0 { + return []byte{}, nil + } + + decodeString, err := base64.StdEncoding.DecodeString(valStr) + if err != nil { + return nil, fmt.Errorf("failed base64 decoding 'value' field: %w", err) + } + return decodeString, nil +} + +func parseExtCritical(entry map[string]interface{}) (bool, error) { + critRaw, exists := entry["critical"] + if !exists || critRaw == nil || critRaw == "" { + // Optional field, so just return as if they provided the value false. + return false, nil + } + + myBool, err := parseutil.ParseBool(critRaw) + if err != nil { + return false, fmt.Errorf("critical field value failed to be parsed: %w", err) + } + + return myBool, nil +} + +func parseExtAsn1ObjectId(entry map[string]interface{}) (asn1.ObjectIdentifier, error) { + idRaw, idExists := entry["id"] + if !idExists { + return asn1.ObjectIdentifier{}, errors.New("missing id field") + } + + oidStr, err := parseutil.ParseString(idRaw) + if err != nil { + return nil, fmt.Errorf("'id' field value was not a string: %w", err) + } + + if len(oidStr) == 0 { + return asn1.ObjectIdentifier{}, errors.New("zero length object identifier") + } + + // Parse out dot notation + oidParts := strings.Split(oidStr, ".") + oid := make(asn1.ObjectIdentifier, len(oidParts), len(oidParts)) + for i := range oidParts { + oidIntVal, err := strconv.Atoi(oidParts[i]) + if err != nil { + return nil, fmt.Errorf("failed parsing asn1 index element %d value %s: %w", i, oidParts[i], err) + } + oid[i] = oidIntVal + } + return oid, nil +} + +func parseRevocationTime(cert map[string]interface{}) (time.Time, error) { + var revTime time.Time + revTimeRaw, exists := cert["revocation_time"] + if !exists { + return revTime, errors.New("missing 'revocation_time' field") + } + revTime, err := parseutil.ParseAbsoluteTime(revTimeRaw) + if err != nil { + return revTime, fmt.Errorf("failed parsing time %v: %w", revTimeRaw, err) + } + return revTime, nil +} + +func parseSerialNum(cert map[string]interface{}) (*big.Int, error) { + serialNumRaw, serialExists := cert["serial_number"] + if !serialExists { + return nil, errors.New("missing 'serial_number' field") + } + serialNumStr, err := parseutil.ParseString(serialNumRaw) + if err != nil { + return nil, fmt.Errorf("'serial_number' field value was not a string: %w", err) + } + // Clean up any provided serials to decoder + for _, separator := range []string{":", ".", "-", " "} { + serialNumStr = strings.ReplaceAll(serialNumStr, separator, "") + } + // Prefer hex.DecodeString over certutil.ParseHexFormatted as we don't need a separator + serialBytes, err := hex.DecodeString(serialNumStr) + if err != nil { + return nil, fmt.Errorf("'serial_number' failed converting to bytes: %w", err) + } + + bigIntSerial := big.Int{} + bigIntSerial.SetBytes(serialBytes) + return &bigIntSerial, nil +} + +func parseCrlFormat(requestedValue string) (string, error) { + format := strings.ToLower(requestedValue) + switch format { + case "pem", "der": + return format, nil + default: + return "", fmt.Errorf("unknown format value of %s", requestedValue) + } +} + +func verifyCrlsAreFromIssuersKey(caCert *x509.Certificate, crls []*x509.RevocationList) error { + for i, crl := range crls { + // At this point we assume if the issuer's key signed the CRL that is a good enough check + // to validate that we owned/generated the provided CRL. + if err := crl.CheckSignatureFrom(caCert); err != nil { + return fmt.Errorf("CRL index: %d was not signed by requested issuer", i) + } + } + + return nil +} + +func encodeResponse(crlBytes []byte, derFormatRequested bool) string { + if derFormatRequested { + return base64.StdEncoding.EncodeToString(crlBytes) + } + + block := pem.Block{ + Type: "X509 CRL", + Bytes: crlBytes, + } + return string(pem.EncodeToMemory(&block)) +} + +func getAllRevokedCertsFromPem(crls []*x509.RevocationList) ([]pkix.RevokedCertificate, []string, error) { + uniqueCert := map[string]pkix.RevokedCertificate{} + var warnings []string + for _, crl := range crls { + for _, curCert := range crl.RevokedCertificates { + serial := serialFromBigInt(curCert.SerialNumber) + // Get rid of any extensions the existing certificate might have had. + curCert.Extensions = []pkix.Extension{} + + existingCert, exists := uniqueCert[serial] + if !exists { + // First time we see the revoked cert + uniqueCert[serial] = curCert + continue + } + + if existingCert.RevocationTime.Equal(curCert.RevocationTime) { + // Same revocation times, just skip it + continue + } + + warn := fmt.Sprintf("Duplicate serial %s with different revocation "+ + "times detected, using oldest revocation time", serial) + warnings = append(warnings, warn) + + if existingCert.RevocationTime.After(curCert.RevocationTime) { + uniqueCert[serial] = curCert + } + } + } + + var revokedCerts []pkix.RevokedCertificate + for _, cert := range uniqueCert { + revokedCerts = append(revokedCerts, cert) + } + + return revokedCerts, warnings, nil +} + +func getCaBundle(sc *storageContext, issuerRef string) (*certutil.CAInfoBundle, error) { + issuerId, err := sc.resolveIssuerReference(issuerRef) + if err != nil { + return nil, fmt.Errorf("failed to resolve issuer %s: %w", issuerRefParam, err) + } + + return sc.fetchCAInfoByIssuerId(issuerId, CRLSigningUsage) +} + +func decodePemCrls(rawCrls []string) ([]*x509.RevocationList, error) { + var crls []*x509.RevocationList + for i, rawCrl := range rawCrls { + crl, err := decodePemCrl(rawCrl) + if err != nil { + return nil, fmt.Errorf("failed decoding crl %d: %w", i, err) + } + crls = append(crls, crl) + } + + return crls, nil +} + +func decodePemCrl(crl string) (*x509.RevocationList, error) { + block, rest := pem.Decode([]byte(crl)) + if len(rest) != 0 { + return nil, errors.New("invalid crl; should be one PEM block only") + } + + return x509.ParseRevocationList(block.Bytes) +} diff --git a/builtin/logical/pki/path_resign_crls_test.go b/builtin/logical/pki/path_resign_crls_test.go new file mode 100644 index 0000000..f1ee115 --- /dev/null +++ b/builtin/logical/pki/path_resign_crls_test.go @@ -0,0 +1,512 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/base64" + "fmt" + "math/big" + "testing" + "time" + + "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" + + "github.com/hashicorp/vault/api" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/vault" + + "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/require" +) + +func TestResignCrls_ForbidSigningOtherIssuerCRL(t *testing.T) { + t.Parallel() + + // Some random CRL from another issuer + pem1 := "-----BEGIN X509 CRL-----\nMIIBvjCBpwIBATANBgkqhkiG9w0BAQsFADAbMRkwFwYDVQQDExByb290LWV4YW1w\nbGUuY29tFw0yMjEwMjYyMTI5MzlaFw0yMjEwMjkyMTI5MzlaMCcwJQIUSnVf8wsd\nHjOt9drCYFhWxS9QqGoXDTIyMTAyNjIxMjkzOVqgLzAtMB8GA1UdIwQYMBaAFHki\nZ0XDUQVSajNRGXrg66OaIFlYMAoGA1UdFAQDAgEDMA0GCSqGSIb3DQEBCwUAA4IB\nAQBGIdtqTwemnLZF5AoP+jzvKZ26S3y7qvRIzd7f4A0EawzYmWXSXfwqo4TQ4DG3\nnvT+AaA1zCCOlH/1U+ufN9gSSN0j9ax58brSYMnMskMCqhLKIp0qnvS4jr/gopmF\nv8grbvLHEqNYTu1T7umMLdNQUsWT3Qc+EIjfoKj8xD2FHsZwJ+EMbytwl8Unipjr\nhz4rmcES/65vavfdFpOI6YXfi+UAaHBdkTqmHgg4BdpuXfYtlf+iotFSOkygD5fl\n0D+RVFW9uJv2WfbQ7kRt1X/VcFk/onw0AQqxZRVUzvjoMw+EMcxSq3UKOlXcWDxm\nEFz9rFQQ66L388EP8RD7Dh3X\n-----END X509 CRL-----" + + b, s := CreateBackendWithStorage(t) + resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "test.com", + "key_type": "ec", + }) + requireSuccessNonNilResponse(t, resp, err) + + resp, err = CBWrite(b, s, "issuer/default/resign-crls", map[string]interface{}{ + "crl_number": "2", + "next_update": "1h", + "format": "pem", + "crls": []string{pem1}, + }) + require.ErrorContains(t, err, "was not signed by requested issuer") +} + +func TestResignCrls_NormalCrl(t *testing.T) { + t.Parallel() + b1, s1 := CreateBackendWithStorage(t) + b2, s2 := CreateBackendWithStorage(t) + + // Setup two backends, with the same key material/certificate with a different leaf in each that is revoked. + caCert, serial1, serial2, crl1, crl2 := setupResignCrlMounts(t, b1, s1, b2, s2) + + // Attempt to combine the CRLs + resp, err := CBWrite(b1, s1, "issuer/default/resign-crls", map[string]interface{}{ + "crl_number": "2", + "next_update": "1h", + "format": "pem", + "crls": []string{crl1, crl2}, + }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b1.Route("issuer/default/resign-crls"), logical.UpdateOperation), resp, true) + requireSuccessNonNilResponse(t, resp, err) + requireFieldsSetInResp(t, resp, "crl") + pemCrl := resp.Data["crl"].(string) + combinedCrl, err := decodePemCrl(pemCrl) + require.NoError(t, err, "failed decoding combined CRL") + serials := extractSerialsFromCrl(t, combinedCrl) + + require.Contains(t, serials, serial1) + require.Contains(t, serials, serial2) + require.Equal(t, 2, len(serials), "serials contained more serials than expected") + + require.Equal(t, big.NewInt(int64(2)), combinedCrl.Number) + require.Equal(t, combinedCrl.ThisUpdate.Add(1*time.Hour), combinedCrl.NextUpdate) + + extensions := combinedCrl.Extensions + requireExtensionOid(t, []int{2, 5, 29, 20}, extensions) // CRL Number Extension + requireExtensionOid(t, []int{2, 5, 29, 35}, extensions) // akidOid + require.Equal(t, 2, len(extensions)) + + err = combinedCrl.CheckSignatureFrom(caCert) + require.NoError(t, err, "failed signature check of CRL") +} + +func TestResignCrls_EliminateDuplicates(t *testing.T) { + t.Parallel() + b1, s1 := CreateBackendWithStorage(t) + b2, s2 := CreateBackendWithStorage(t) + + // Setup two backends, with the same key material/certificate with a different leaf in each that is revoked. + _, serial1, _, crl1, _ := setupResignCrlMounts(t, b1, s1, b2, s2) + + // Pass in the same CRLs to make sure we do not duplicate entries + resp, err := CBWrite(b1, s1, "issuer/default/resign-crls", map[string]interface{}{ + "crl_number": "2", + "next_update": "1h", + "format": "pem", + "crls": []string{crl1, crl1}, + }) + requireSuccessNonNilResponse(t, resp, err) + requireFieldsSetInResp(t, resp, "crl") + pemCrl := resp.Data["crl"].(string) + combinedCrl, err := decodePemCrl(pemCrl) + require.NoError(t, err, "failed decoding combined CRL") + + // Technically this will die if we have duplicates. + serials := extractSerialsFromCrl(t, combinedCrl) + + // We should have no warnings about collisions if they have the same revoked time + require.Empty(t, resp.Warnings, "expected no warnings in response") + + require.Contains(t, serials, serial1) + require.Equal(t, 1, len(serials), "serials contained more serials than expected") +} + +func TestResignCrls_ConflictingExpiry(t *testing.T) { + t.Parallel() + b1, s1 := CreateBackendWithStorage(t) + b2, s2 := CreateBackendWithStorage(t) + + // Setup two backends, with the same key material/certificate with a different leaf in each that is revoked. + _, serial1, serial2, crl1, _ := setupResignCrlMounts(t, b1, s1, b2, s2) + + timeAfterMountSetup := time.Now() + + // Read in serial1 from mount 1 + resp, err := CBRead(b1, s1, "cert/"+serial1) + requireSuccessNonNilResponse(t, resp, err, "failed reading serial 1's certificate") + requireFieldsSetInResp(t, resp, "certificate") + cert1 := resp.Data["certificate"].(string) + + // Wait until at least we have rolled over to the next second to match sure the generated CRL time + // on backend 2 for the serial 1 will be different + for { + if time.Now().After(timeAfterMountSetup.Add(1 * time.Second)) { + break + } + } + + // Use BYOC to revoke the same certificate on backend 2 now + resp, err = CBWrite(b2, s2, "revoke", map[string]interface{}{ + "certificate": cert1, + }) + requireSuccessNonNilResponse(t, resp, err, "failed revoking serial 1 on backend 2") + + // Fetch the new CRL from backend2 now + resp, err = CBRead(b2, s2, "cert/crl") + requireSuccessNonNilResponse(t, resp, err, "error fetch crl from backend 2") + requireFieldsSetInResp(t, resp, "certificate") + crl2 := resp.Data["certificate"].(string) + + // Attempt to combine the CRLs + resp, err = CBWrite(b1, s1, "issuer/default/resign-crls", map[string]interface{}{ + "crl_number": "2", + "next_update": "1h", + "format": "pem", + "crls": []string{crl2, crl1}, // Make sure we don't just grab the first colliding entry... + }) + requireSuccessNonNilResponse(t, resp, err) + requireFieldsSetInResp(t, resp, "crl") + pemCrl := resp.Data["crl"].(string) + combinedCrl, err := decodePemCrl(pemCrl) + require.NoError(t, err, "failed decoding combined CRL") + combinedSerials := extractSerialsFromCrl(t, combinedCrl) + + require.Contains(t, combinedSerials, serial1) + require.Contains(t, combinedSerials, serial2) + require.Equal(t, 2, len(combinedSerials), "serials contained more serials than expected") + + // Make sure we issued a warning about the time collision + require.NotEmpty(t, resp.Warnings, "expected at least one warning") + require.Contains(t, resp.Warnings[0], "different revocation times detected") + + // Make sure we have the initial revocation time from backend 1 within the combined CRL. + decodedCrl1, err := decodePemCrl(crl1) + require.NoError(t, err, "failed decoding crl from backend 1") + serialsFromBackend1 := extractSerialsFromCrl(t, decodedCrl1) + + require.Equal(t, serialsFromBackend1[serial1], combinedSerials[serial1]) + + // Make sure we have the initial revocation time from backend 1 does not match with backend 2's time + decodedCrl2, err := decodePemCrl(crl2) + require.NoError(t, err, "failed decoding crl from backend 2") + serialsFromBackend2 := extractSerialsFromCrl(t, decodedCrl2) + + require.NotEqual(t, serialsFromBackend1[serial1], serialsFromBackend2[serial1]) +} + +func TestResignCrls_DeltaCrl(t *testing.T) { + t.Parallel() + + b1, s1 := CreateBackendWithStorage(t) + b2, s2 := CreateBackendWithStorage(t) + + // Setup two backends, with the same key material/certificate with a different leaf in each that is revoked. + caCert, serial1, serial2, crl1, crl2 := setupResignCrlMounts(t, b1, s1, b2, s2) + + resp, err := CBWrite(b1, s1, "issuer/default/resign-crls", map[string]interface{}{ + "crl_number": "5", + "delta_crl_base_number": "4", + "next_update": "12h", + "format": "pem", + "crls": []string{crl1, crl2}, + }) + requireSuccessNonNilResponse(t, resp, err) + requireFieldsSetInResp(t, resp, "crl") + pemCrl := resp.Data["crl"].(string) + combinedCrl, err := decodePemCrl(pemCrl) + require.NoError(t, err, "failed decoding combined CRL") + serials := extractSerialsFromCrl(t, combinedCrl) + + require.Contains(t, serials, serial1) + require.Contains(t, serials, serial2) + require.Equal(t, 2, len(serials), "serials contained more serials than expected") + + require.Equal(t, big.NewInt(int64(5)), combinedCrl.Number) + require.Equal(t, combinedCrl.ThisUpdate.Add(12*time.Hour), combinedCrl.NextUpdate) + + extensions := combinedCrl.Extensions + requireExtensionOid(t, []int{2, 5, 29, 27}, extensions) // Delta CRL Extension + requireExtensionOid(t, []int{2, 5, 29, 20}, extensions) // CRL Number Extension + requireExtensionOid(t, []int{2, 5, 29, 35}, extensions) // akidOid + require.Equal(t, 3, len(extensions)) + + err = combinedCrl.CheckSignatureFrom(caCert) + require.NoError(t, err, "failed signature check of CRL") +} + +func TestSignRevocationList(t *testing.T) { + t.Parallel() + + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "pki": Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + client := cluster.Cores[0].Client + + // Mount PKI, use this form of backend so our request is closer to reality (json parsed) + err := client.Sys().Mount("pki", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "60h", + }, + }) + require.NoError(t, err) + + // Generate internal CA. + resp, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ + "ttl": "40h", + "common_name": "myvault.com", + }) + require.NoError(t, err) + caCert := parseCert(t, resp.Data["certificate"].(string)) + + resp, err = client.Logical().Write("pki/issuer/default/sign-revocation-list", map[string]interface{}{ + "crl_number": "1", + "next_update": "12h", + "format": "pem", + "revoked_certs": []map[string]interface{}{ + { + "serial_number": "37:60:16:e4:85:d5:96:38:3a:ed:31:06:8d:ed:7a:46:d4:22:63:d8", + "revocation_time": "1668614976", + "extensions": []map[string]interface{}{}, + }, + { + "serial_number": "27:03:89:76:5a:d4:d8:19:48:47:ca:96:db:6f:27:86:31:92:9f:82", + "revocation_time": "2022-11-16T16:09:36.739592Z", + }, + { + "serial_number": "27:03:89:76:5a:d4:d8:19:48:47:ca:96:db:6f:27:86:31:92:9f:81", + "revocation_time": "2022-10-16T16:09:36.739592Z", + "extensions": []map[string]interface{}{ + { + "id": "2.5.29.100", + "critical": "true", + "value": "aGVsbG8=", // "hello" base64 encoded + }, + { + "id": "2.5.29.101", + "critical": "false", + "value": "Ynll", // "bye" base64 encoded + }, + }, + }, + }, + "extensions": []map[string]interface{}{ + { + "id": "2.5.29.200", + "critical": "true", + "value": "aGVsbG8=", // "hello" base64 encoded + }, + { + "id": "2.5.29.201", + "critical": "false", + "value": "Ynll", // "bye" base64 encoded + }, + }, + }) + require.NoError(t, err) + pemCrl := resp.Data["crl"].(string) + crl, err := decodePemCrl(pemCrl) + require.NoError(t, err, "failed decoding CRL") + serials := extractSerialsFromCrl(t, crl) + require.Contains(t, serials, "37:60:16:e4:85:d5:96:38:3a:ed:31:06:8d:ed:7a:46:d4:22:63:d8") + require.Contains(t, serials, "27:03:89:76:5a:d4:d8:19:48:47:ca:96:db:6f:27:86:31:92:9f:82") + require.Contains(t, serials, "27:03:89:76:5a:d4:d8:19:48:47:ca:96:db:6f:27:86:31:92:9f:81") + require.Equal(t, 3, len(serials), "expected 3 serials within CRL") + + // Make sure extensions on serials match what we expect. + require.Equal(t, 0, len(crl.RevokedCertificates[0].Extensions), "Expected no extensions on 1st serial") + require.Equal(t, 0, len(crl.RevokedCertificates[1].Extensions), "Expected no extensions on 2nd serial") + require.Equal(t, 2, len(crl.RevokedCertificates[2].Extensions), "Expected 2 extensions on 3 serial") + require.Equal(t, "2.5.29.100", crl.RevokedCertificates[2].Extensions[0].Id.String()) + require.True(t, crl.RevokedCertificates[2].Extensions[0].Critical) + require.Equal(t, []byte("hello"), crl.RevokedCertificates[2].Extensions[0].Value) + + require.Equal(t, "2.5.29.101", crl.RevokedCertificates[2].Extensions[1].Id.String()) + require.False(t, crl.RevokedCertificates[2].Extensions[1].Critical) + require.Equal(t, []byte("bye"), crl.RevokedCertificates[2].Extensions[1].Value) + + // CRL Number and times + require.Equal(t, big.NewInt(int64(1)), crl.Number) + require.Equal(t, crl.ThisUpdate.Add(12*time.Hour), crl.NextUpdate) + + // Verify top level extensions are present + extensions := crl.Extensions + requireExtensionOid(t, []int{2, 5, 29, 20}, extensions) // CRL Number Extension + requireExtensionOid(t, []int{2, 5, 29, 35}, extensions) // akidOid + requireExtensionOid(t, []int{2, 5, 29, 200}, extensions) // Added value from param + requireExtensionOid(t, []int{2, 5, 29, 201}, extensions) // Added value from param + require.Equal(t, 4, len(extensions)) + + // Signature + err = crl.CheckSignatureFrom(caCert) + require.NoError(t, err, "failed signature check of CRL") +} + +func TestSignRevocationList_NoRevokedCerts(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "test.com", + }) + requireSuccessNonNilResponse(t, resp, err) + + resp, err = CBWrite(b, s, "issuer/default/sign-revocation-list", map[string]interface{}{ + "crl_number": "10000", + "next_update": "12h", + "format": "pem", + }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("issuer/default/sign-revocation-list"), logical.UpdateOperation), resp, true) + requireSuccessNonNilResponse(t, resp, err) + requireFieldsSetInResp(t, resp, "crl") + pemCrl := resp.Data["crl"].(string) + crl, err := decodePemCrl(pemCrl) + require.NoError(t, err, "failed decoding CRL") + + serials := extractSerialsFromCrl(t, crl) + require.Equal(t, 0, len(serials), "no serials were expected in CRL") + + require.Equal(t, big.NewInt(int64(10000)), crl.Number) + require.Equal(t, crl.ThisUpdate.Add(12*time.Hour), crl.NextUpdate) +} + +func TestSignRevocationList_ReservedExtensions(t *testing.T) { + t.Parallel() + + reservedOids := []asn1.ObjectIdentifier{ + akOid, deltaCrlOid, crlNumOid, + } + // Validate there isn't copy/paste issues with our constants... + require.Equal(t, asn1.ObjectIdentifier{2, 5, 29, 27}, deltaCrlOid) // Delta CRL Extension + require.Equal(t, asn1.ObjectIdentifier{2, 5, 29, 20}, crlNumOid) // CRL Number Extension + require.Equal(t, asn1.ObjectIdentifier{2, 5, 29, 35}, akOid) // akidOid + + for _, reservedOid := range reservedOids { + t.Run(reservedOid.String(), func(t *testing.T) { + b, s := CreateBackendWithStorage(t) + resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "test.com", + }) + requireSuccessNonNilResponse(t, resp, err) + + resp, err = CBWrite(b, s, "issuer/default/sign-revocation-list", map[string]interface{}{ + "crl_number": "1", + "next_update": "12h", + "format": "pem", + "extensions": []map[string]interface{}{ + { + "id": reservedOid.String(), + "critical": "false", + "value": base64.StdEncoding.EncodeToString([]byte("hello")), + }, + }, + }) + + require.ErrorContains(t, err, "is reserved") + }) + } +} + +func setupResignCrlMounts(t *testing.T, b1 *backend, s1 logical.Storage, b2 *backend, s2 logical.Storage) (*x509.Certificate, string, string, string, string) { + t.Helper() + + // Setup two mounts with the same CA/key material + resp, err := CBWrite(b1, s1, "root/generate/exported", map[string]interface{}{ + "common_name": "test.com", + }) + requireSuccessNonNilResponse(t, resp, err) + requireFieldsSetInResp(t, resp, "certificate", "private_key") + pemCaCert := resp.Data["certificate"].(string) + caCert := parseCert(t, pemCaCert) + privKey := resp.Data["private_key"].(string) + + // Import the above key/cert into another mount + resp, err = CBWrite(b2, s2, "config/ca", map[string]interface{}{ + "pem_bundle": pemCaCert + "\n" + privKey, + }) + requireSuccessNonNilResponse(t, resp, err, "error setting up CA on backend 2") + + // Create the same role in both mounts + resp, err = CBWrite(b1, s1, "roles/test", map[string]interface{}{ + "allowed_domains": "test.com", + "allow_subdomains": "true", + "max_ttl": "1h", + }) + requireSuccessNonNilResponse(t, resp, err, "error setting up pki role on backend 1") + + resp, err = CBWrite(b2, s2, "roles/test", map[string]interface{}{ + "allowed_domains": "test.com", + "allow_subdomains": "true", + "max_ttl": "1h", + }) + requireSuccessNonNilResponse(t, resp, err, "error setting up pki role on backend 2") + + // Issue and revoke a cert in backend 1 + resp, err = CBWrite(b1, s1, "issue/test", map[string]interface{}{ + "common_name": "test1.test.com", + }) + requireSuccessNonNilResponse(t, resp, err, "error issuing cert from backend 1") + requireFieldsSetInResp(t, resp, "serial_number") + serial1 := resp.Data["serial_number"].(string) + + resp, err = CBWrite(b1, s1, "revoke", map[string]interface{}{"serial_number": serial1}) + requireSuccessNonNilResponse(t, resp, err, "error revoking cert from backend 2") + + // Issue and revoke a cert in backend 2 + resp, err = CBWrite(b2, s2, "issue/test", map[string]interface{}{ + "common_name": "test1.test.com", + }) + requireSuccessNonNilResponse(t, resp, err, "error issuing cert from backend 2") + requireFieldsSetInResp(t, resp, "serial_number") + serial2 := resp.Data["serial_number"].(string) + + resp, err = CBWrite(b2, s2, "revoke", map[string]interface{}{"serial_number": serial2}) + requireSuccessNonNilResponse(t, resp, err, "error revoking cert from backend 2") + + // Fetch PEM CRLs from each + resp, err = CBRead(b1, s1, "cert/crl") + requireSuccessNonNilResponse(t, resp, err, "error fetch crl from backend 1") + requireFieldsSetInResp(t, resp, "certificate") + crl1 := resp.Data["certificate"].(string) + + resp, err = CBRead(b2, s2, "cert/crl") + requireSuccessNonNilResponse(t, resp, err, "error fetch crl from backend 2") + requireFieldsSetInResp(t, resp, "certificate") + crl2 := resp.Data["certificate"].(string) + return caCert, serial1, serial2, crl1, crl2 +} + +func requireExtensionOid(t *testing.T, identifier asn1.ObjectIdentifier, extensions []pkix.Extension, msgAndArgs ...interface{}) { + t.Helper() + + found := false + var oidsInExtensions []string + for _, extension := range extensions { + oidsInExtensions = append(oidsInExtensions, extension.Id.String()) + if extension.Id.Equal(identifier) { + found = true + break + } + } + + if !found { + msg := fmt.Sprintf("Failed to find matching asn oid %s out of %v", identifier.String(), oidsInExtensions) + require.Fail(t, msg, msgAndArgs) + } +} + +func extractSerialsFromCrl(t *testing.T, crl *x509.RevocationList) map[string]time.Time { + t.Helper() + + serials := map[string]time.Time{} + + for _, revokedCert := range crl.RevokedCertificates { + serial := serialFromBigInt(revokedCert.SerialNumber) + if _, exists := serials[serial]; exists { + t.Fatalf("Serial number %s was duplicated in CRL", serial) + } + serials[serial] = revokedCert.RevocationTime + } + return serials +} diff --git a/builtin/logical/pki/path_revoke.go b/builtin/logical/pki/path_revoke.go new file mode 100644 index 0000000..2decd50 --- /dev/null +++ b/builtin/logical/pki/path_revoke.go @@ -0,0 +1,889 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/subtle" + "crypto/x509" + "encoding/pem" + "fmt" + "net/http" + "strings" + "time" + + "github.com/hashicorp/vault/sdk/helper/consts" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathListCertsRevoked(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "certs/revoked/?$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "revoked-certs", + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ListOperation: &framework.PathOperation{ + Callback: b.pathListRevokedCertsHandler, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "keys": { + Type: framework.TypeStringSlice, + Description: `List of Keys`, + Required: false, + }, + }, + }}, + }, + }, + }, + + HelpSynopsis: pathListRevokedHelpSyn, + HelpDescription: pathListRevokedHelpDesc, + } +} + +func pathListCertsRevocationQueue(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "certs/revocation-queue/?$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "certs-revocation-queue", + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ListOperation: &framework.PathOperation{ + Callback: b.pathListRevocationQueueHandler, + }, + }, + + HelpSynopsis: pathListRevocationQueueHelpSyn, + HelpDescription: pathListRevocationQueueHelpDesc, + } +} + +func pathRevoke(b *backend) *framework.Path { + return &framework.Path{ + Pattern: `revoke`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "revoke", + }, + + Fields: map[string]*framework.FieldSchema{ + "serial_number": { + Type: framework.TypeString, + Description: `Certificate serial number, in colon- or +hyphen-separated octal`, + }, + "certificate": { + Type: framework.TypeString, + Description: `Certificate to revoke in PEM format; must be +signed by an issuer in this mount.`, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.metricsWrap("revoke", noRole, b.pathRevokeWrite), + // This should never be forwarded. See backend.go for more information. + // If this needs to write, the entire request will be forwarded to the + // active node of the current performance cluster, but we don't want to + // forward invalid revoke requests there. + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "revocation_time": { + Type: framework.TypeInt64, + Description: `Revocation Time`, + Required: false, + }, + "revocation_time_rfc3339": { + Type: framework.TypeTime, + Description: `Revocation Time`, + Required: false, + }, + "state": { + Type: framework.TypeString, + Description: `Revocation State`, + Required: false, + }, + }, + }}, + }, + }, + }, + + HelpSynopsis: pathRevokeHelpSyn, + HelpDescription: pathRevokeHelpDesc, + } +} + +func pathRevokeWithKey(b *backend) *framework.Path { + return &framework.Path{ + Pattern: `revoke-with-key`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "revoke", + OperationSuffix: "with-key", + }, + + Fields: map[string]*framework.FieldSchema{ + "serial_number": { + Type: framework.TypeString, + Description: `Certificate serial number, in colon- or +hyphen-separated octal`, + }, + "certificate": { + Type: framework.TypeString, + Description: `Certificate to revoke in PEM format; must be +signed by an issuer in this mount.`, + }, + "private_key": { + Type: framework.TypeString, + Description: `Key to use to verify revocation permission; must +be in PEM format.`, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.metricsWrap("revoke", noRole, b.pathRevokeWrite), + // This should never be forwarded. See backend.go for more information. + // If this needs to write, the entire request will be forwarded to the + // active node of the current performance cluster, but we don't want to + // forward invalid revoke requests there. + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "revocation_time": { + Type: framework.TypeInt64, + Description: `Revocation Time`, + Required: false, + }, + "revocation_time_rfc3339": { + Type: framework.TypeTime, + Description: `Revocation Time`, + Required: false, + }, + "state": { + Type: framework.TypeString, + Description: `Revocation State`, + Required: false, + }, + }, + }}, + }, + }, + }, + + HelpSynopsis: pathRevokeHelpSyn, + HelpDescription: pathRevokeHelpDesc, + } +} + +func pathRotateCRL(b *backend) *framework.Path { + return &framework.Path{ + Pattern: `crl/rotate`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "rotate", + OperationSuffix: "crl", + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathRotateCRLRead, + // See backend.go; we will read a lot of data prior to calling write, + // so this request should be forwarded when it is first seen, not + // when it is ready to write. + ForwardPerformanceStandby: true, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "success": { + Type: framework.TypeBool, + Description: `Whether rotation was successful`, + Required: true, + }, + }, + }}, + }, + }, + }, + + HelpSynopsis: pathRotateCRLHelpSyn, + HelpDescription: pathRotateCRLHelpDesc, + } +} + +func pathRotateDeltaCRL(b *backend) *framework.Path { + return &framework.Path{ + Pattern: `crl/rotate-delta`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "rotate", + OperationSuffix: "delta-crl", + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathRotateDeltaCRLRead, + // See backend.go; we will read a lot of data prior to calling write, + // so this request should be forwarded when it is first seen, not + // when it is ready to write. + ForwardPerformanceStandby: true, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "success": { + Type: framework.TypeBool, + Description: `Whether rotation was successful`, + Required: true, + }, + }, + }}, + }, + }, + }, + + HelpSynopsis: pathRotateDeltaCRLHelpSyn, + HelpDescription: pathRotateDeltaCRLHelpDesc, + } +} + +func pathListUnifiedRevoked(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "certs/unified-revoked/?$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "unified-revoked-certs", + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ListOperation: &framework.PathOperation{ + Callback: b.pathListUnifiedRevokedCertsHandler, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "keys": { + Type: framework.TypeStringSlice, + Description: `List of Keys`, + Required: false, + }, + "key_info": { + Type: framework.TypeString, + Description: `Key information`, + Required: false, + }, + }, + }}, + }, + }, + }, + + HelpSynopsis: pathListUnifiedRevokedHelpSyn, + HelpDescription: pathListUnifiedRevokedHelpDesc, + } +} + +func (b *backend) pathRevokeWriteHandleCertificate(ctx context.Context, req *logical.Request, certPem string) (string, bool, *x509.Certificate, error) { + // This function handles just the verification of the certificate against + // the global issuer set, checking whether or not it is importable. + // + // We return the parsed serial number, an optionally-nil byte array to + // write out to disk, and an error if one occurred. + if b.useLegacyBundleCaStorage() { + // We require listing all issuers from the 1.11 method. If we're + // still using the legacy CA bundle but with the newer certificate + // attribute, we err and require the operator to upgrade and migrate + // prior to servicing new requests. + return "", false, nil, errutil.UserError{Err: "unable to process BYOC revocation until CA issuer migration has completed"} + } + + // First start by parsing the certificate. + if len(certPem) < 75 { + // See note in pathImportIssuers about this check. + return "", false, nil, errutil.UserError{Err: "provided certificate data was too short; perhaps a path was passed to the API rather than the contents of a PEM file"} + } + + pemBlock, _ := pem.Decode([]byte(certPem)) + if pemBlock == nil { + return "", false, nil, errutil.UserError{Err: "certificate contains no PEM data"} + } + + certReference, err := x509.ParseCertificate(pemBlock.Bytes) + if err != nil { + return "", false, nil, errutil.UserError{Err: fmt.Sprintf("certificate could not be parsed: %v", err)} + } + + // Ensure we have a well-formed serial number before continuing. + serial := serialFromCert(certReference) + if len(serial) == 0 { + return "", false, nil, errutil.UserError{Err: "invalid serial number on presented certificate"} + } + + // We have two approaches here: we could start verifying against issuers + // (which involves fetching and parsing them), or we could see if, by + // some chance we've already imported it (cheap). The latter tells us + // if we happen to have a serial number collision (which shouldn't + // happen in practice) versus an already-imported cert (which might + // happen and its fine to handle safely). + // + // Start with the latter since its cheaper. Fetch the cert (by serial) + // and if it exists, compare the contents. + sc := b.makeStorageContext(ctx, req.Storage) + certEntry, err := fetchCertBySerial(sc, "certs/", serial) + if err != nil { + return serial, false, nil, err + } + + if certEntry != nil { + // As seen with importing issuers, it is best to parse the certificate + // and compare parsed values, rather than attempting to infer equality + // from the raw data. + certReferenceStored, err := x509.ParseCertificate(certEntry.Value) + if err != nil { + return serial, false, nil, err + } + + if !areCertificatesEqual(certReference, certReferenceStored) { + // Here we refuse the import with an error because the two certs + // are unequal but we would've otherwise overwritten the existing + // copy. + return serial, false, nil, fmt.Errorf("certificate with same serial but unequal value already present in this cluster's storage; refusing to revoke") + } else { + // Otherwise, we can return without an error as we've already + // imported this certificate, likely when we issued it. We don't + // need to re-verify the signature as we assume it was already + // verified when it was imported. + return serial, false, certReferenceStored, nil + } + } + + // Otherwise, we must not have a stored copy. From here on out, the second + // parameter (except in error cases) should cause the cert to write out. + // + // Fetch and iterate through each issuer. + issuers, err := sc.listIssuers() + if err != nil { + return serial, false, nil, err + } + + foundMatchingIssuer := false + for _, issuerId := range issuers { + issuer, err := sc.fetchIssuerById(issuerId) + if err != nil { + return serial, false, nil, err + } + + issuerCert, err := issuer.GetCertificate() + if err != nil { + return serial, false, nil, err + } + + if err := certReference.CheckSignatureFrom(issuerCert); err == nil { + // If the signature was valid, we found our match and can safely + // exit. + foundMatchingIssuer = true + break + } + } + + if foundMatchingIssuer { + return serial, true, certReference, nil + } + + return serial, false, nil, errutil.UserError{Err: "unable to verify signature on presented cert from any present issuer in this mount; certificates from previous CAs will need to have their issuing CA and key re-imported if revocation is necessary"} +} + +func (b *backend) pathRevokeWriteHandleKey(req *logical.Request, certReference *x509.Certificate, keyPem string) error { + if keyPem == "" { + // The only way to get here should be via the /revoke endpoint; + // validate the path one more time and return an error if necessary. + if req.Path != "revoke" { + return fmt.Errorf("must have private key to revoke via the /revoke-with-key path") + } + + // Otherwise, we don't need to validate the key and thus can return + // with success. + return nil + } + + // Now parse the key's PEM block. + pemBlock, _ := pem.Decode([]byte(keyPem)) + if pemBlock == nil { + return errutil.UserError{Err: "provided key PEM block contained no data or failed to parse"} + } + + // Parse the inner DER key. + signer, _, err := certutil.ParseDERKey(pemBlock.Bytes) + if err != nil { + return fmt.Errorf("failed to parse provided private key: %w", err) + } + + return validatePrivateKeyMatchesCert(signer, certReference) +} + +func validatePrivateKeyMatchesCert(signer crypto.Signer, certReference *x509.Certificate) error { + public := signer.Public() + + switch certReference.PublicKey.(type) { + case *rsa.PublicKey: + rsaPriv, ok := signer.(*rsa.PrivateKey) + if !ok { + return errutil.UserError{Err: "provided private key type does not match certificate's public key type"} + } + + if err := rsaPriv.Validate(); err != nil { + return errutil.UserError{Err: fmt.Sprintf("error validating integrity of private key: %v", err)} + } + } + + return validatePublicKeyMatchesCert(public, certReference) +} + +func validatePublicKeyMatchesCert(verifier crypto.PublicKey, certReference *x509.Certificate) error { + // Finally, verify if the cert and key match. This code has been + // cribbed from the Go TLS config code, with minor modifications. + // + // In particular, we validate against the derived public key + // components and ensure we validate exponent and curve information + // as well. + // + // See: https://github.com/golang/go/blob/c6a2dada0df8c2d75cf3ae599d7caed77d416fa2/src/crypto/tls/tls.go#L304-L331 + switch certPub := certReference.PublicKey.(type) { + case *rsa.PublicKey: + privPub, ok := verifier.(*rsa.PublicKey) + if !ok { + return errutil.UserError{Err: "provided private key type does not match certificate's public key type"} + } + if certPub.N.Cmp(privPub.N) != 0 || certPub.E != privPub.E { + return errutil.UserError{Err: "provided private key does not match certificate's public key"} + } + case *ecdsa.PublicKey: + privPub, ok := verifier.(*ecdsa.PublicKey) + if !ok { + return errutil.UserError{Err: "provided private key type does not match certificate's public key type"} + } + if certPub.X.Cmp(privPub.X) != 0 || certPub.Y.Cmp(privPub.Y) != 0 || certPub.Params().Name != privPub.Params().Name { + return errutil.UserError{Err: "provided private key does not match certificate's public key"} + } + case ed25519.PublicKey: + privPub, ok := verifier.(ed25519.PublicKey) + if !ok { + return errutil.UserError{Err: "provided private key type does not match certificate's public key type"} + } + if subtle.ConstantTimeCompare(privPub, certPub) == 0 { + return errutil.UserError{Err: "provided private key does not match certificate's public key"} + } + default: + return errutil.UserError{Err: "certificate has an unknown public key algorithm; unable to validate provided private key; ask an admin to revoke this certificate instead"} + } + + return nil +} + +func (b *backend) maybeRevokeCrossCluster(sc *storageContext, config *crlConfig, serial string, havePrivateKey bool) (*logical.Response, error) { + if !config.UseGlobalQueue { + return logical.ErrorResponse(fmt.Sprintf("certificate with serial %s not found.", serial)), nil + } + + if havePrivateKey { + return logical.ErrorResponse(fmt.Sprintf("certificate with serial %s not found, "+ + "and cross-cluster revocation not supported with key revocation.", serial)), nil + } + + // Here, we have to use the global revocation queue as the cert + // was not found on this current cluster. + currTime := time.Now() + nSerial := normalizeSerial(serial) + queueReq := revocationRequest{ + RequestedAt: currTime, + } + path := crossRevocationPath + nSerial + + reqEntry, err := logical.StorageEntryJSON(path, queueReq) + if err != nil { + return nil, fmt.Errorf("failed to create storage entry for cross-cluster revocation request: %w", err) + } + + if err := sc.Storage.Put(sc.Context, reqEntry); err != nil { + return nil, fmt.Errorf("error persisting cross-cluster revocation request: %w", err) + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "state": "pending", + }, + } + resp.AddWarning("Revocation request was not found on this present node. This request will be in a pending state until the PR cluster which issued this certificate sees the request and revokes the certificate. If no online cluster has this certificate, the request will eventually be removed without revoking any certificates.") + return resp, nil +} + +func (b *backend) pathRevokeWrite(ctx context.Context, req *logical.Request, data *framework.FieldData, _ *roleEntry) (*logical.Response, error) { + rawSerial, haveSerial := data.GetOk("serial_number") + rawCertificate, haveCert := data.GetOk("certificate") + sc := b.makeStorageContext(ctx, req.Storage) + + if !haveSerial && !haveCert { + return logical.ErrorResponse("The serial number or certificate to revoke must be provided."), nil + } else if haveSerial && haveCert { + return logical.ErrorResponse("Must provide either the certificate or the serial to revoke; not both."), nil + } + + var keyPem string + if req.Path == "revoke-with-key" { + rawKey, haveKey := data.GetOk("private_key") + if !haveKey { + return logical.ErrorResponse("Must have private key to revoke via the /revoke-with-key path."), nil + } + + keyPem = rawKey.(string) + if len(keyPem) < 64 { + // See note in pathImportKeyHandler... + return logical.ErrorResponse("Provided data for private_key was too short; perhaps a path was passed to the API rather than the contents of a PEM file?"), nil + } + } + + writeCert := false + var cert *x509.Certificate + var serial string + + config, err := sc.Backend.crlBuilder.getConfigWithUpdate(sc) + if err != nil { + return nil, fmt.Errorf("error revoking serial: %s: failed reading config: %w", serial, err) + } + + if haveCert { + serial, writeCert, cert, err = b.pathRevokeWriteHandleCertificate(ctx, req, rawCertificate.(string)) + if err != nil { + return nil, err + } + } else { + // Easy case: this cert should be in storage already. + serial = rawSerial.(string) + if len(serial) == 0 { + return logical.ErrorResponse("The serial number must be provided"), nil + } + + certEntry, err := fetchCertBySerial(sc, "certs/", serial) + if err != nil { + switch err.(type) { + case errutil.UserError: + return logical.ErrorResponse(err.Error()), nil + default: + return nil, err + } + } + + if certEntry != nil { + cert, err = x509.ParseCertificate(certEntry.Value) + if err != nil { + return nil, fmt.Errorf("error parsing certificate: %w", err) + } + } + } + + if cert == nil { + if config.UnifiedCRL { + // Saving grace if we aren't able to load the certificate locally/or were given it, + // if we have a unified revocation entry already return its revocation times, + // otherwise we fail with a certificate not found message. + unifiedRev, err := getUnifiedRevocationBySerial(sc, normalizeSerial(serial)) + if err != nil { + return nil, err + } + if unifiedRev != nil { + return &logical.Response{ + Data: map[string]interface{}{ + "revocation_time": unifiedRev.RevocationTimeUTC.Unix(), + "revocation_time_rfc3339": unifiedRev.RevocationTimeUTC.Format(time.RFC3339Nano), + }, + }, nil + } + } + + return b.maybeRevokeCrossCluster(sc, config, serial, keyPem != "") + } + + // Before we write the certificate, we've gotta verify the request in + // the event of a PoP-based revocation scheme; we don't want to litter + // storage with issued-but-not-revoked certificates. + if err := b.pathRevokeWriteHandleKey(req, cert, keyPem); err != nil { + return nil, err + } + + // At this point, a forward operation will occur if we're on a standby + // node as we're now attempting to write the bytes of the cert out to + // disk. + if writeCert { + err := req.Storage.Put(ctx, &logical.StorageEntry{ + Key: "certs/" + normalizeSerial(serial), + Value: cert.Raw, + }) + if err != nil { + return nil, err + } + } + + // Assumption: this check is cheap. Call this twice, in the cert-import + // case, to allow cert verification to get rejected on the standby node, + // but we still need it to protect the serial number case. + if b.System().ReplicationState().HasState(consts.ReplicationPerformanceStandby) { + return nil, logical.ErrReadOnly + } + + b.revokeStorageLock.Lock() + defer b.revokeStorageLock.Unlock() + + return revokeCert(sc, config, cert) +} + +func (b *backend) pathRotateCRLRead(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { + b.revokeStorageLock.RLock() + defer b.revokeStorageLock.RUnlock() + + sc := b.makeStorageContext(ctx, req.Storage) + warnings, crlErr := b.crlBuilder.rebuild(sc, false) + if crlErr != nil { + switch crlErr.(type) { + case errutil.UserError: + return logical.ErrorResponse(fmt.Sprintf("Error during CRL building: %s", crlErr)), nil + default: + return nil, fmt.Errorf("error encountered during CRL building: %w", crlErr) + } + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "success": true, + }, + } + + for index, warning := range warnings { + resp.AddWarning(fmt.Sprintf("Warning %d during CRL rebuild: %v", index+1, warning)) + } + + return resp, nil +} + +func (b *backend) pathRotateDeltaCRLRead(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { + sc := b.makeStorageContext(ctx, req.Storage) + + cfg, err := b.crlBuilder.getConfigWithUpdate(sc) + if err != nil { + return nil, fmt.Errorf("error fetching CRL configuration: %w", err) + } + + isEnabled := cfg.EnableDelta + + warnings, crlErr := b.crlBuilder.rebuildDeltaCRLsIfForced(sc, true) + if crlErr != nil { + switch crlErr.(type) { + case errutil.UserError: + return logical.ErrorResponse(fmt.Sprintf("Error during delta CRL building: %s", crlErr)), nil + default: + return nil, fmt.Errorf("error encountered during delta CRL building: %w", crlErr) + } + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "success": true, + }, + } + + if !isEnabled { + resp.AddWarning("requested rebuild of delta CRL when delta CRL is not enabled; this is a no-op") + } + for index, warning := range warnings { + resp.AddWarning(fmt.Sprintf("Warning %d during CRL rebuild: %v", index+1, warning)) + } + + return resp, nil +} + +func (b *backend) pathListRevokedCertsHandler(ctx context.Context, request *logical.Request, _ *framework.FieldData) (*logical.Response, error) { + sc := b.makeStorageContext(ctx, request.Storage) + + revokedCerts, err := sc.listRevokedCerts() + if err != nil { + return nil, err + } + + // Normalize serial back to a format people are expecting. + for i, serial := range revokedCerts { + revokedCerts[i] = denormalizeSerial(serial) + } + + return logical.ListResponse(revokedCerts), nil +} + +func (b *backend) pathListRevocationQueueHandler(ctx context.Context, request *logical.Request, _ *framework.FieldData) (*logical.Response, error) { + var responseKeys []string + responseInfo := make(map[string]interface{}) + + sc := b.makeStorageContext(ctx, request.Storage) + + clusters, err := sc.Storage.List(sc.Context, crossRevocationPrefix) + if err != nil { + return nil, fmt.Errorf("failed to list cross-cluster revocation queue participating clusters: %w", err) + } + + for cIndex, cluster := range clusters { + cluster = cluster[0 : len(cluster)-1] + cPath := crossRevocationPrefix + cluster + "/" + serials, err := sc.Storage.List(sc.Context, cPath) + if err != nil { + return nil, fmt.Errorf("failed to list cross-cluster revocation queue entries for cluster %v (%v): %w", cluster, cIndex, err) + } + + for _, serial := range serials { + // Always strip the slash out; it indicates the presence of + // a confirmed revocation, which we add to the main serial's + // entry. + hasSlash := serial[len(serial)-1] == '/' + if hasSlash { + serial = serial[0 : len(serial)-1] + } + serial = denormalizeSerial(serial) + + var data map[string]interface{} + rawData, isPresent := responseInfo[serial] + if !isPresent { + data = map[string]interface{}{} + responseKeys = append(responseKeys, serial) + } else { + data = rawData.(map[string]interface{}) + } + + if hasSlash { + data["confirmed"] = true + data["confirmation_cluster"] = cluster + } else { + data["requesting_cluster"] = cluster + } + + responseInfo[serial] = data + } + } + + return logical.ListResponseWithInfo(responseKeys, responseInfo), nil +} + +func (b *backend) pathListUnifiedRevokedCertsHandler(ctx context.Context, request *logical.Request, _ *framework.FieldData) (*logical.Response, error) { + sc := b.makeStorageContext(ctx, request.Storage) + responseKeys := []string{} + responseInfo := make(map[string]interface{}) + + clusterPathsById, err := lookupUnifiedClusterPaths(sc) + if err != nil { + return nil, err + } + + for clusterId := range clusterPathsById { + clusterSerials, err := listClusterSpecificUnifiedRevokedCerts(sc, clusterId) + if err != nil { + return nil, err + } + for _, serial := range clusterSerials { + if strings.HasSuffix(serial, "/") { + // Skip folders as they wouldn't be a proper revocation + continue + } + colonSerial := denormalizeSerial(serial) + var data map[string][]string + rawData, isPresent := responseInfo[colonSerial] + if !isPresent { + responseKeys = append(responseKeys, colonSerial) + data = map[string][]string{} + } else { + data = rawData.(map[string][]string) + } + + data["revoking_clusters"] = append(data["revoking_clusters"], clusterId) + responseInfo[colonSerial] = data + } + } + + return logical.ListResponseWithInfo(responseKeys, responseInfo), nil +} + +const pathRevokeHelpSyn = ` +Revoke a certificate by serial number or with explicit certificate. + +When calling /revoke-with-key, the private key corresponding to the +certificate must be provided to authenticate the request. +` + +const pathRevokeHelpDesc = ` +This allows certificates to be revoke. A root token or corresponding +private key is required. +` + +const pathRotateCRLHelpSyn = ` +Force a rebuild of the CRL. +` + +const pathRotateCRLHelpDesc = ` +Force a rebuild of the CRL. This can be used to remove expired certificates from it if no certificates have been revoked. A root token is required. +` + +const pathRotateDeltaCRLHelpSyn = ` +Force a rebuild of the delta CRL. +` + +const pathRotateDeltaCRLHelpDesc = ` +Force a rebuild of the delta CRL. This can be used to force an update of the otherwise periodically-rebuilt delta CRLs. +` + +const pathListRevokedHelpSyn = ` +List all revoked serial numbers within the local cluster +` + +const pathListRevokedHelpDesc = ` +Returns a list of serial numbers for revoked certificates in the local cluster. +` + +const pathListUnifiedRevokedHelpSyn = ` +List all revoked serial numbers within this cluster's unified storage area. +` + +const pathListUnifiedRevokedHelpDesc = ` +Returns a list of serial numbers for revoked certificates within this cluster's unified storage. +` + +const pathListRevocationQueueHelpSyn = ` +List all pending, cross-cluster revocations known to the local cluster. +` + +const pathListRevocationQueueHelpDesc = ` +Returns a detailed list containing serial number, requesting cluster, and +optionally a confirming cluster. +` diff --git a/builtin/logical/pki/path_roles.go b/builtin/logical/pki/path_roles.go new file mode 100644 index 0000000..5b7d37e --- /dev/null +++ b/builtin/logical/pki/path_roles.go @@ -0,0 +1,1672 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "context" + "crypto/x509" + "encoding/json" + "fmt" + "net/http" + "strings" + "time" + + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathListRoles(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "roles/?$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "roles", + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ListOperation: &framework.PathOperation{ + Callback: b.pathRoleList, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "keys": { + Type: framework.TypeStringSlice, + Description: "List of roles", + Required: true, + }, + }, + }}, + }, + }, + }, + + HelpSynopsis: pathListRolesHelpSyn, + HelpDescription: pathListRolesHelpDesc, + } +} + +func pathRoles(b *backend) *framework.Path { + pathRolesResponseFields := map[string]*framework.FieldSchema{ + "ttl": { + Type: framework.TypeInt64, + Required: true, + Description: `The lease duration (validity period of the +certificate) if no specific lease duration is requested. +The lease duration controls the expiration of certificates +issued by this backend. Defaults to the system default +value or the value of max_ttl, whichever is shorter.`, + }, + + "max_ttl": { + Type: framework.TypeInt64, + Required: true, + Description: `The maximum allowed lease duration. If not +set, defaults to the system maximum lease TTL.`, + }, + "allow_token_displayname": { + Type: framework.TypeBool, + Required: true, + Description: `Whether to allow "localhost" and "localdomain" +as a valid common name in a request, independent of allowed_domains value.`, + }, + + "allow_localhost": { + Type: framework.TypeBool, + Required: true, + Description: `Whether to allow "localhost" and "localdomain" +as a valid common name in a request, independent of allowed_domains value.`, + }, + + "allowed_domains": { + Type: framework.TypeCommaStringSlice, + Required: true, + Description: `Specifies the domains this role is allowed +to issue certificates for. This is used with the allow_bare_domains, +allow_subdomains, and allow_glob_domains to determine matches for the +common name, DNS-typed SAN entries, and Email-typed SAN entries of +certificates. See the documentation for more information. This parameter +accepts a comma-separated string or list of domains.`, + }, + "allowed_domains_template": { + Type: framework.TypeBool, + Required: true, + Description: `If set, Allowed domains can be specified using identity template policies. + Non-templated domains are also permitted.`, + }, + "allow_bare_domains": { + Type: framework.TypeBool, + Required: true, + Description: `If set, clients can request certificates +for the base domains themselves, e.g. "example.com" of domains listed +in allowed_domains. This is a separate option as in some cases this can +be considered a security threat. See the documentation for more +information.`, + }, + + "allow_subdomains": { + Type: framework.TypeBool, + Required: true, + Description: `If set, clients can request certificates for +subdomains of domains listed in allowed_domains, including wildcard +subdomains. See the documentation for more information.`, + }, + + "allow_glob_domains": { + Type: framework.TypeBool, + Required: true, + Description: `If set, domains specified in allowed_domains +can include shell-style glob patterns, e.g. "ftp*.example.com". +See the documentation for more information.`, + }, + + "allow_wildcard_certificates": { + Type: framework.TypeBool, + Required: true, + Description: `If set, allows certificates with wildcards in +the common name to be issued, conforming to RFC 6125's Section 6.4.3; e.g., +"*.example.net" or "b*z.example.net". See the documentation for more +information.`, + }, + + "allow_any_name": { + Type: framework.TypeBool, + Required: true, + Description: `If set, clients can request certificates for +any domain, regardless of allowed_domains restrictions. +See the documentation for more information.`, + }, + + "enforce_hostnames": { + Type: framework.TypeBool, + Required: true, + Description: `If set, only valid host names are allowed for +CN and DNS SANs, and the host part of email addresses. Defaults to true.`, + }, + + "allow_ip_sans": { + Type: framework.TypeBool, + Required: true, + Description: `If set, IP Subject Alternative Names are allowed. +Any valid IP is accepted and No authorization checking is performed.`, + }, + + "allowed_uri_sans": { + Type: framework.TypeCommaStringSlice, + Required: true, + Description: `If set, an array of allowed URIs for URI Subject Alternative Names. +Any valid URI is accepted, these values support globbing.`, + }, + + "allowed_uri_sans_template": { + Type: framework.TypeBool, + Required: true, + Description: `If set, Allowed URI SANs can be specified using identity template policies. + Non-templated URI SANs are also permitted.`, + }, + + "allowed_other_sans": { + Type: framework.TypeCommaStringSlice, + Required: true, + Description: `If set, an array of allowed other names to put in SANs. These values support globbing and must be in the format ;:. Currently only "utf8" is a valid type. All values, including globbing values, must use this syntax, with the exception being a single "*" which allows any OID and any value (but type must still be utf8).`, + }, + + "allowed_serial_numbers": { + Type: framework.TypeCommaStringSlice, + Required: true, + Description: `If set, an array of allowed serial numbers to put in Subject. These values support globbing.`, + }, + "allowed_user_ids": { + Type: framework.TypeCommaStringSlice, + Description: `If set, an array of allowed user-ids to put in user system login name specified here: https://www.rfc-editor.org/rfc/rfc1274#section-9.3.1`, + }, + "server_flag": { + Type: framework.TypeBool, + Default: true, + Description: `If set, certificates are flagged for server auth use. +Defaults to true. See also RFC 5280 Section 4.2.1.12.`, + }, + + "client_flag": { + Type: framework.TypeBool, + Required: true, + Description: `If set, certificates are flagged for client auth use. +Defaults to true. See also RFC 5280 Section 4.2.1.12.`, + }, + + "code_signing_flag": { + Type: framework.TypeBool, + Required: true, + Description: `If set, certificates are flagged for code signing +use. Defaults to false. See also RFC 5280 Section 4.2.1.12.`, + }, + + "email_protection_flag": { + Type: framework.TypeBool, + Required: true, + Description: `If set, certificates are flagged for email +protection use. Defaults to false. See also RFC 5280 Section 4.2.1.12.`, + }, + + "key_type": { + Type: framework.TypeString, + Required: true, + Description: `The type of key to use; defaults to RSA. "rsa" +"ec", "ed25519" and "any" are the only valid values.`, + }, + + "key_bits": { + Type: framework.TypeInt, + Required: true, + Description: `The number of bits to use. Allowed values are +0 (universal default); with rsa key_type: 2048 (default), 3072, or +4096; with ec key_type: 224, 256 (default), 384, or 521; ignored with +ed25519.`, + }, + "signature_bits": { + Type: framework.TypeInt, + Required: true, + Description: `The number of bits to use in the signature +algorithm; accepts 256 for SHA-2-256, 384 for SHA-2-384, and 512 for +SHA-2-512. Defaults to 0 to automatically detect based on key length +(SHA-2-256 for RSA keys, and matching the curve size for NIST P-Curves).`, + }, + "use_pss": { + Type: framework.TypeBool, + Required: false, + Description: `Whether or not to use PSS signatures when using a +RSA key-type issuer. Defaults to false.`, + }, + "key_usage": { + Type: framework.TypeCommaStringSlice, + Required: true, + Description: `A comma-separated string or list of key usages (not extended +key usages). Valid values can be found at +https://golang.org/pkg/crypto/x509/#KeyUsage +-- simply drop the "KeyUsage" part of the name. +To remove all key usages from being set, set +this value to an empty list. See also RFC 5280 +Section 4.2.1.3.`, + }, + + "ext_key_usage": { + Type: framework.TypeCommaStringSlice, + Required: true, + Description: `A comma-separated string or list of extended key usages. Valid values can be found at +https://golang.org/pkg/crypto/x509/#ExtKeyUsage +-- simply drop the "ExtKeyUsage" part of the name. +To remove all key usages from being set, set +this value to an empty list. See also RFC 5280 +Section 4.2.1.12.`, + }, + + "ext_key_usage_oids": { + Type: framework.TypeCommaStringSlice, + Required: true, + Description: `A comma-separated string or list of extended key usage oids.`, + }, + + "use_csr_common_name": { + Type: framework.TypeBool, + Required: true, + Description: `If set, when used with a signing profile, +the common name in the CSR will be used. This +does *not* include any requested Subject Alternative +Names; use use_csr_sans for that. Defaults to true.`, + }, + + "use_csr_sans": { + Type: framework.TypeBool, + Required: true, + Description: `If set, when used with a signing profile, +the SANs in the CSR will be used. This does *not* +include the Common Name (cn); use use_csr_common_name +for that. Defaults to true.`, + }, + + "ou": { + Type: framework.TypeCommaStringSlice, + Description: `If set, OU (OrganizationalUnit) will be set to +this value in certificates issued by this role.`, + }, + + "organization": { + Type: framework.TypeCommaStringSlice, + Description: `If set, O (Organization) will be set to +this value in certificates issued by this role.`, + }, + + "country": { + Type: framework.TypeCommaStringSlice, + Description: `If set, Country will be set to +this value in certificates issued by this role.`, + }, + + "locality": { + Type: framework.TypeCommaStringSlice, + Description: `If set, Locality will be set to +this value in certificates issued by this role.`, + }, + + "province": { + Type: framework.TypeCommaStringSlice, + Description: `If set, Province will be set to +this value in certificates issued by this role.`, + }, + + "street_address": { + Type: framework.TypeCommaStringSlice, + Description: `If set, Street Address will be set to +this value in certificates issued by this role.`, + }, + + "postal_code": { + Type: framework.TypeCommaStringSlice, + Description: `If set, Postal Code will be set to +this value in certificates issued by this role.`, + }, + + "generate_lease": { + Type: framework.TypeBool, + Description: ` +If set, certificates issued/signed against this role will have Vault leases +attached to them. Defaults to "false". Certificates can be added to the CRL by +"vault revoke " when certificates are associated with leases. It can +also be done using the "pki/revoke" endpoint. However, when lease generation is +disabled, invoking "pki/revoke" would be the only way to add the certificates +to the CRL. When large number of certificates are generated with long +lifetimes, it is recommended that lease generation be disabled, as large amount of +leases adversely affect the startup time of Vault.`, + }, + + "no_store": { + Type: framework.TypeBool, + Description: ` +If set, certificates issued/signed against this role will not be stored in the +storage backend. This can improve performance when issuing large numbers of +certificates. However, certificates issued in this way cannot be enumerated +or revoked, so this option is recommended only for certificates that are +non-sensitive, or extremely short-lived. This option implies a value of "false" +for "generate_lease".`, + }, + + "require_cn": { + Type: framework.TypeBool, + Description: `If set to false, makes the 'common_name' field optional while generating a certificate.`, + }, + + "cn_validations": { + Type: framework.TypeCommaStringSlice, + Description: `List of allowed validations to run against the +Common Name field. Values can include 'email' to validate the CN is a email +address, 'hostname' to validate the CN is a valid hostname (potentially +including wildcards). When multiple validations are specified, these take +OR semantics (either email OR hostname are allowed). The special value +'disabled' allows disabling all CN name validations, allowing for arbitrary +non-Hostname, non-Email address CNs.`, + }, + + "policy_identifiers": { + Type: framework.TypeCommaStringSlice, + Description: `A comma-separated string or list of policy OIDs, or a JSON list of qualified policy +information, which must include an oid, and may include a notice and/or cps url, using the form +[{"oid"="1.3.6.1.4.1.7.8","notice"="I am a user Notice"}, {"oid"="1.3.6.1.4.1.44947.1.2.4 ","cps"="https://example.com"}].`, + }, + + "basic_constraints_valid_for_non_ca": { + Type: framework.TypeBool, + Description: `Mark Basic Constraints valid when issuing non-CA certificates.`, + }, + "not_before_duration": { + Type: framework.TypeInt64, + Description: `The duration in seconds before now which the certificate needs to be backdated by.`, + }, + "not_after": { + Type: framework.TypeString, + Description: `Set the not after field of the certificate with specified date value. +The value format should be given in UTC format YYYY-MM-ddTHH:MM:SSZ.`, + }, + "issuer_ref": { + Type: framework.TypeString, + Description: `Reference to the issuer used to sign requests +serviced by this role.`, + }, + } + + return &framework.Path{ + Pattern: "roles/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "role", + }, + + Fields: map[string]*framework.FieldSchema{ + "backend": { + Type: framework.TypeString, + Description: "Backend Type", + }, + + "name": { + Type: framework.TypeString, + Description: "Name of the role", + }, + + "ttl": { + Type: framework.TypeDurationSecond, + Description: `The lease duration (validity period of the +certificate) if no specific lease duration is requested. +The lease duration controls the expiration of certificates +issued by this backend. Defaults to the system default +value or the value of max_ttl, whichever is shorter.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "TTL", + }, + }, + + "max_ttl": { + Type: framework.TypeDurationSecond, + Description: `The maximum allowed lease duration. If not +set, defaults to the system maximum lease TTL.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Max TTL", + }, + }, + + "allow_localhost": { + Type: framework.TypeBool, + Default: true, + Description: `Whether to allow "localhost" and "localdomain" +as a valid common name in a request, independent of allowed_domains value.`, + DisplayAttrs: &framework.DisplayAttributes{ + Value: true, + }, + }, + + "allowed_domains": { + Type: framework.TypeCommaStringSlice, + Description: `Specifies the domains this role is allowed +to issue certificates for. This is used with the allow_bare_domains, +allow_subdomains, and allow_glob_domains to determine matches for the +common name, DNS-typed SAN entries, and Email-typed SAN entries of +certificates. See the documentation for more information. This parameter +accepts a comma-separated string or list of domains.`, + }, + "allowed_domains_template": { + Type: framework.TypeBool, + Description: `If set, Allowed domains can be specified using identity template policies. + Non-templated domains are also permitted.`, + Default: false, + }, + "allow_bare_domains": { + Type: framework.TypeBool, + Description: `If set, clients can request certificates +for the base domains themselves, e.g. "example.com" of domains listed +in allowed_domains. This is a separate option as in some cases this can +be considered a security threat. See the documentation for more +information.`, + }, + + "allow_subdomains": { + Type: framework.TypeBool, + Description: `If set, clients can request certificates for +subdomains of domains listed in allowed_domains, including wildcard +subdomains. See the documentation for more information.`, + }, + + "allow_glob_domains": { + Type: framework.TypeBool, + Description: `If set, domains specified in allowed_domains +can include shell-style glob patterns, e.g. "ftp*.example.com". +See the documentation for more information.`, + }, + + "allow_wildcard_certificates": { + Type: framework.TypeBool, + Description: `If set, allows certificates with wildcards in +the common name to be issued, conforming to RFC 6125's Section 6.4.3; e.g., +"*.example.net" or "b*z.example.net". See the documentation for more +information.`, + Default: true, + }, + + "allow_any_name": { + Type: framework.TypeBool, + Description: `If set, clients can request certificates for +any domain, regardless of allowed_domains restrictions. +See the documentation for more information.`, + }, + + "enforce_hostnames": { + Type: framework.TypeBool, + Default: true, + Description: `If set, only valid host names are allowed for +CN and DNS SANs, and the host part of email addresses. Defaults to true.`, + DisplayAttrs: &framework.DisplayAttributes{ + Value: true, + }, + }, + + "allow_ip_sans": { + Type: framework.TypeBool, + Default: true, + Description: `If set, IP Subject Alternative Names are allowed. +Any valid IP is accepted and No authorization checking is performed.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Allow IP Subject Alternative Names", + Value: true, + }, + }, + + "allowed_uri_sans": { + Type: framework.TypeCommaStringSlice, + Description: `If set, an array of allowed URIs for URI Subject Alternative Names. +Any valid URI is accepted, these values support globbing.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Allowed URI Subject Alternative Names", + }, + }, + + "allowed_uri_sans_template": { + Type: framework.TypeBool, + Description: `If set, Allowed URI SANs can be specified using identity template policies. + Non-templated URI SANs are also permitted.`, + Default: false, + }, + + "allowed_other_sans": { + Type: framework.TypeCommaStringSlice, + Description: `If set, an array of allowed other names to put in SANs. These values support globbing and must be in the format ;:. Currently only "utf8" is a valid type. All values, including globbing values, must use this syntax, with the exception being a single "*" which allows any OID and any value (but type must still be utf8).`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Allowed Other Subject Alternative Names", + }, + }, + + "allowed_serial_numbers": { + Type: framework.TypeCommaStringSlice, + Description: `If set, an array of allowed serial numbers to put in Subject. These values support globbing.`, + }, + + "allowed_user_ids": { + Type: framework.TypeCommaStringSlice, + Description: `If set, an array of allowed user-ids to put in user system login name specified here: https://www.rfc-editor.org/rfc/rfc1274#section-9.3.1`, + }, + + "server_flag": { + Type: framework.TypeBool, + Default: true, + Description: `If set, certificates are flagged for server auth use. +Defaults to true. See also RFC 5280 Section 4.2.1.12.`, + DisplayAttrs: &framework.DisplayAttributes{ + Value: true, + }, + }, + + "client_flag": { + Type: framework.TypeBool, + Default: true, + Description: `If set, certificates are flagged for client auth use. +Defaults to true. See also RFC 5280 Section 4.2.1.12.`, + DisplayAttrs: &framework.DisplayAttributes{ + Value: true, + }, + }, + + "code_signing_flag": { + Type: framework.TypeBool, + Description: `If set, certificates are flagged for code signing +use. Defaults to false. See also RFC 5280 Section 4.2.1.12.`, + }, + + "email_protection_flag": { + Type: framework.TypeBool, + Description: `If set, certificates are flagged for email +protection use. Defaults to false. See also RFC 5280 Section 4.2.1.12.`, + }, + + "key_type": { + Type: framework.TypeString, + Default: "rsa", + Description: `The type of key to use; defaults to RSA. "rsa" +"ec", "ed25519" and "any" are the only valid values.`, + AllowedValues: []interface{}{"rsa", "ec", "ed25519", "any"}, + }, + + "key_bits": { + Type: framework.TypeInt, + Default: 0, + Description: `The number of bits to use. Allowed values are +0 (universal default); with rsa key_type: 2048 (default), 3072, or +4096; with ec key_type: 224, 256 (default), 384, or 521; ignored with +ed25519.`, + }, + + "signature_bits": { + Type: framework.TypeInt, + Default: 0, + Description: `The number of bits to use in the signature +algorithm; accepts 256 for SHA-2-256, 384 for SHA-2-384, and 512 for +SHA-2-512. Defaults to 0 to automatically detect based on key length +(SHA-2-256 for RSA keys, and matching the curve size for NIST P-Curves).`, + }, + + "use_pss": { + Type: framework.TypeBool, + Default: false, + Description: `Whether or not to use PSS signatures when using a +RSA key-type issuer. Defaults to false.`, + }, + + "key_usage": { + Type: framework.TypeCommaStringSlice, + Default: []string{"DigitalSignature", "KeyAgreement", "KeyEncipherment"}, + Description: `A comma-separated string or list of key usages (not extended +key usages). Valid values can be found at +https://golang.org/pkg/crypto/x509/#KeyUsage +-- simply drop the "KeyUsage" part of the name. +To remove all key usages from being set, set +this value to an empty list. See also RFC 5280 +Section 4.2.1.3.`, + DisplayAttrs: &framework.DisplayAttributes{ + Value: "DigitalSignature,KeyAgreement,KeyEncipherment", + }, + }, + + "ext_key_usage": { + Type: framework.TypeCommaStringSlice, + Default: []string{}, + Description: `A comma-separated string or list of extended key usages. Valid values can be found at +https://golang.org/pkg/crypto/x509/#ExtKeyUsage +-- simply drop the "ExtKeyUsage" part of the name. +To remove all key usages from being set, set +this value to an empty list. See also RFC 5280 +Section 4.2.1.12.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Extended Key Usage", + }, + }, + + "ext_key_usage_oids": { + Type: framework.TypeCommaStringSlice, + Description: `A comma-separated string or list of extended key usage oids.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Extended Key Usage OIDs", + }, + }, + + "use_csr_common_name": { + Type: framework.TypeBool, + Default: true, + Description: `If set, when used with a signing profile, +the common name in the CSR will be used. This +does *not* include any requested Subject Alternative +Names; use use_csr_sans for that. Defaults to true.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Use CSR Common Name", + Value: true, + }, + }, + + "use_csr_sans": { + Type: framework.TypeBool, + Default: true, + Description: `If set, when used with a signing profile, +the SANs in the CSR will be used. This does *not* +include the Common Name (cn); use use_csr_common_name +for that. Defaults to true.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Use CSR Subject Alternative Names", + Value: true, + }, + }, + + "ou": { + Type: framework.TypeCommaStringSlice, + Description: `If set, OU (OrganizationalUnit) will be set to +this value in certificates issued by this role.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Organizational Unit", + }, + }, + + "organization": { + Type: framework.TypeCommaStringSlice, + Description: `If set, O (Organization) will be set to +this value in certificates issued by this role.`, + }, + + "country": { + Type: framework.TypeCommaStringSlice, + Description: `If set, Country will be set to +this value in certificates issued by this role.`, + }, + + "locality": { + Type: framework.TypeCommaStringSlice, + Description: `If set, Locality will be set to +this value in certificates issued by this role.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Locality/City", + }, + }, + + "province": { + Type: framework.TypeCommaStringSlice, + Description: `If set, Province will be set to +this value in certificates issued by this role.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Province/State", + }, + }, + + "street_address": { + Type: framework.TypeCommaStringSlice, + Description: `If set, Street Address will be set to +this value in certificates issued by this role.`, + }, + + "postal_code": { + Type: framework.TypeCommaStringSlice, + Description: `If set, Postal Code will be set to +this value in certificates issued by this role.`, + }, + + "generate_lease": { + Type: framework.TypeBool, + Description: ` +If set, certificates issued/signed against this role will have Vault leases +attached to them. Defaults to "false". Certificates can be added to the CRL by +"vault revoke " when certificates are associated with leases. It can +also be done using the "pki/revoke" endpoint. However, when lease generation is +disabled, invoking "pki/revoke" would be the only way to add the certificates +to the CRL. When large number of certificates are generated with long +lifetimes, it is recommended that lease generation be disabled, as large amount of +leases adversely affect the startup time of Vault.`, + }, + + "no_store": { + Type: framework.TypeBool, + Description: ` +If set, certificates issued/signed against this role will not be stored in the +storage backend. This can improve performance when issuing large numbers of +certificates. However, certificates issued in this way cannot be enumerated +or revoked, so this option is recommended only for certificates that are +non-sensitive, or extremely short-lived. This option implies a value of "false" +for "generate_lease".`, + }, + + "require_cn": { + Type: framework.TypeBool, + Default: true, + Description: `If set to false, makes the 'common_name' field optional while generating a certificate.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Require Common Name", + }, + }, + + "cn_validations": { + Type: framework.TypeCommaStringSlice, + Default: []string{"email", "hostname"}, + Description: `List of allowed validations to run against the +Common Name field. Values can include 'email' to validate the CN is a email +address, 'hostname' to validate the CN is a valid hostname (potentially +including wildcards). When multiple validations are specified, these take +OR semantics (either email OR hostname are allowed). The special value +'disabled' allows disabling all CN name validations, allowing for arbitrary +non-Hostname, non-Email address CNs.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Common Name Validations", + }, + }, + + "policy_identifiers": { + Type: framework.TypeCommaStringSlice, + Description: `A comma-separated string or list of policy OIDs, or a JSON list of qualified policy +information, which must include an oid, and may include a notice and/or cps url, using the form +[{"oid"="1.3.6.1.4.1.7.8","notice"="I am a user Notice"}, {"oid"="1.3.6.1.4.1.44947.1.2.4 ","cps"="https://example.com"}].`, + }, + + "basic_constraints_valid_for_non_ca": { + Type: framework.TypeBool, + Description: `Mark Basic Constraints valid when issuing non-CA certificates.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Basic Constraints Valid for Non-CA", + }, + }, + "not_before_duration": { + Type: framework.TypeDurationSecond, + Default: 30, + Description: `The duration before now which the certificate needs to be backdated by.`, + DisplayAttrs: &framework.DisplayAttributes{ + Value: 30, + }, + }, + "not_after": { + Type: framework.TypeString, + Description: `Set the not after field of the certificate with specified date value. +The value format should be given in UTC format YYYY-MM-ddTHH:MM:SSZ.`, + }, + "issuer_ref": { + Type: framework.TypeString, + Description: `Reference to the issuer used to sign requests +serviced by this role.`, + Default: defaultRef, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathRoleRead, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: pathRolesResponseFields, + }}, + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathRoleCreate, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: pathRolesResponseFields, + }}, + }, + // Read more about why these flags are set in backend.go. + ForwardPerformanceStandby: true, + ForwardPerformanceSecondary: true, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathRoleDelete, + Responses: map[int][]framework.Response{ + http.StatusNoContent: {{ + Description: "No Content", + }}, + }, + // Read more about why these flags are set in backend.go. + ForwardPerformanceStandby: true, + ForwardPerformanceSecondary: true, + }, + logical.PatchOperation: &framework.PathOperation{ + Callback: b.pathRolePatch, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: pathRolesResponseFields, + }}, + }, + // Read more about why these flags are set in backend.go. + ForwardPerformanceStandby: true, + ForwardPerformanceSecondary: true, + }, + }, + + HelpSynopsis: pathRoleHelpSyn, + HelpDescription: pathRoleHelpDesc, + } +} + +func (b *backend) getRole(ctx context.Context, s logical.Storage, n string) (*roleEntry, error) { + entry, err := s.Get(ctx, "role/"+n) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result roleEntry + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + + // Migrate existing saved entries and save back if changed + modified := false + if len(result.DeprecatedTTL) == 0 && len(result.Lease) != 0 { + result.DeprecatedTTL = result.Lease + result.Lease = "" + modified = true + } + if result.TTL == 0 && len(result.DeprecatedTTL) != 0 { + parsed, err := parseutil.ParseDurationSecond(result.DeprecatedTTL) + if err != nil { + return nil, err + } + result.TTL = parsed + result.DeprecatedTTL = "" + modified = true + } + if len(result.DeprecatedMaxTTL) == 0 && len(result.LeaseMax) != 0 { + result.DeprecatedMaxTTL = result.LeaseMax + result.LeaseMax = "" + modified = true + } + if result.MaxTTL == 0 && len(result.DeprecatedMaxTTL) != 0 { + parsed, err := parseutil.ParseDurationSecond(result.DeprecatedMaxTTL) + if err != nil { + return nil, err + } + result.MaxTTL = parsed + result.DeprecatedMaxTTL = "" + modified = true + } + if result.AllowBaseDomain { + result.AllowBaseDomain = false + result.AllowBareDomains = true + modified = true + } + if result.AllowedDomainsOld != "" { + result.AllowedDomains = strings.Split(result.AllowedDomainsOld, ",") + result.AllowedDomainsOld = "" + modified = true + } + if result.AllowedBaseDomain != "" { + found := false + for _, v := range result.AllowedDomains { + if v == result.AllowedBaseDomain { + found = true + break + } + } + if !found { + result.AllowedDomains = append(result.AllowedDomains, result.AllowedBaseDomain) + } + result.AllowedBaseDomain = "" + modified = true + } + if result.AllowWildcardCertificates == nil { + // While not the most secure default, when AllowWildcardCertificates isn't + // explicitly specified in the stored Role, we automatically upgrade it to + // true to preserve compatibility with previous versions of Vault. Once this + // field is set, this logic will not be triggered any more. + result.AllowWildcardCertificates = new(bool) + *result.AllowWildcardCertificates = true + modified = true + } + + // Upgrade generate_lease in role + if result.GenerateLease == nil { + // All the new roles will have GenerateLease always set to a value. A + // nil value indicates that this role needs an upgrade. Set it to + // `true` to not alter its current behavior. + result.GenerateLease = new(bool) + *result.GenerateLease = true + modified = true + } + + // Upgrade key usages + if result.KeyUsageOld != "" { + result.KeyUsage = strings.Split(result.KeyUsageOld, ",") + result.KeyUsageOld = "" + modified = true + } + + // Upgrade OU + if result.OUOld != "" { + result.OU = strings.Split(result.OUOld, ",") + result.OUOld = "" + modified = true + } + + // Upgrade Organization + if result.OrganizationOld != "" { + result.Organization = strings.Split(result.OrganizationOld, ",") + result.OrganizationOld = "" + modified = true + } + + // Set the issuer field to default if not set. We want to do this + // unconditionally as we should probably never have an empty issuer + // on a stored roles. + if len(result.Issuer) == 0 { + result.Issuer = defaultRef + modified = true + } + + // Update CN Validations to be the present default, "email,hostname" + if len(result.CNValidations) == 0 { + result.CNValidations = []string{"email", "hostname"} + modified = true + } + + // Ensure the role is valid after updating. + _, err = validateRole(b, &result, ctx, s) + if err != nil { + return nil, err + } + + if modified && (b.System().LocalMount() || !b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) { + jsonEntry, err := logical.StorageEntryJSON("role/"+n, &result) + if err != nil { + return nil, err + } + if err := s.Put(ctx, jsonEntry); err != nil { + // Only perform upgrades on replication primary + if !strings.Contains(err.Error(), logical.ErrReadOnly.Error()) { + return nil, err + } + } + } + + result.Name = n + + return &result, nil +} + +func (b *backend) pathRoleDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + err := req.Storage.Delete(ctx, "role/"+data.Get("name").(string)) + if err != nil { + return nil, err + } + + return nil, nil +} + +func (b *backend) pathRoleRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := data.Get("name").(string) + if roleName == "" { + return logical.ErrorResponse("missing role name"), nil + } + + role, err := b.getRole(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil { + return nil, nil + } + + resp := &logical.Response{ + Data: role.ToResponseData(), + } + return resp, nil +} + +func (b *backend) pathRoleList(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { + entries, err := req.Storage.List(ctx, "role/") + if err != nil { + return nil, err + } + + return logical.ListResponse(entries), nil +} + +func (b *backend) pathRoleCreate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + var err error + name := data.Get("name").(string) + + entry := &roleEntry{ + MaxTTL: time.Duration(data.Get("max_ttl").(int)) * time.Second, + TTL: time.Duration(data.Get("ttl").(int)) * time.Second, + AllowLocalhost: data.Get("allow_localhost").(bool), + AllowedDomains: data.Get("allowed_domains").([]string), + AllowedDomainsTemplate: data.Get("allowed_domains_template").(bool), + AllowBareDomains: data.Get("allow_bare_domains").(bool), + AllowSubdomains: data.Get("allow_subdomains").(bool), + AllowGlobDomains: data.Get("allow_glob_domains").(bool), + AllowWildcardCertificates: new(bool), // Handled specially below + AllowAnyName: data.Get("allow_any_name").(bool), + AllowedURISANsTemplate: data.Get("allowed_uri_sans_template").(bool), + EnforceHostnames: data.Get("enforce_hostnames").(bool), + AllowIPSANs: data.Get("allow_ip_sans").(bool), + AllowedURISANs: data.Get("allowed_uri_sans").([]string), + ServerFlag: data.Get("server_flag").(bool), + ClientFlag: data.Get("client_flag").(bool), + CodeSigningFlag: data.Get("code_signing_flag").(bool), + EmailProtectionFlag: data.Get("email_protection_flag").(bool), + KeyType: data.Get("key_type").(string), + KeyBits: data.Get("key_bits").(int), + SignatureBits: data.Get("signature_bits").(int), + UsePSS: data.Get("use_pss").(bool), + UseCSRCommonName: data.Get("use_csr_common_name").(bool), + UseCSRSANs: data.Get("use_csr_sans").(bool), + KeyUsage: data.Get("key_usage").([]string), + ExtKeyUsage: data.Get("ext_key_usage").([]string), + ExtKeyUsageOIDs: data.Get("ext_key_usage_oids").([]string), + OU: data.Get("ou").([]string), + Organization: data.Get("organization").([]string), + Country: data.Get("country").([]string), + Locality: data.Get("locality").([]string), + Province: data.Get("province").([]string), + StreetAddress: data.Get("street_address").([]string), + PostalCode: data.Get("postal_code").([]string), + GenerateLease: new(bool), + NoStore: data.Get("no_store").(bool), + RequireCN: data.Get("require_cn").(bool), + CNValidations: data.Get("cn_validations").([]string), + AllowedSerialNumbers: data.Get("allowed_serial_numbers").([]string), + AllowedUserIDs: data.Get("allowed_user_ids").([]string), + PolicyIdentifiers: getPolicyIdentifier(data, nil), + BasicConstraintsValidForNonCA: data.Get("basic_constraints_valid_for_non_ca").(bool), + NotBeforeDuration: time.Duration(data.Get("not_before_duration").(int)) * time.Second, + NotAfter: data.Get("not_after").(string), + Issuer: data.Get("issuer_ref").(string), + Name: name, + } + + allowedOtherSANs := data.Get("allowed_other_sans").([]string) + switch { + case len(allowedOtherSANs) == 0: + case len(allowedOtherSANs) == 1 && allowedOtherSANs[0] == "*": + default: + _, err := parseOtherSANs(allowedOtherSANs) + if err != nil { + return logical.ErrorResponse(fmt.Errorf("error parsing allowed_other_sans: %w", err).Error()), nil + } + } + entry.AllowedOtherSANs = allowedOtherSANs + + allowWildcardCertificates, present := data.GetOk("allow_wildcard_certificates") + if !present { + // While not the most secure default, when AllowWildcardCertificates isn't + // explicitly specified in the request, we automatically set it to true to + // preserve compatibility with previous versions of Vault. + allowWildcardCertificates = true + } + *entry.AllowWildcardCertificates = allowWildcardCertificates.(bool) + + warning := "" + // no_store implies generate_lease := false + if entry.NoStore { + *entry.GenerateLease = false + if data.Get("generate_lease").(bool) { + warning = "mutually exclusive values no_store=true and generate_lease=true were both specified; no_store=true takes priority" + } + } else { + *entry.GenerateLease = data.Get("generate_lease").(bool) + if *entry.GenerateLease { + warning = "it is encouraged to disable generate_lease and rely on PKI's native capabilities when possible; this option can cause Vault-wide issues with large numbers of issued certificates" + } + } + + resp, err := validateRole(b, entry, ctx, req.Storage) + if err != nil { + return nil, err + } + if warning != "" { + resp.AddWarning(warning) + } + if resp.IsError() { + return resp, nil + } + + // Store it + jsonEntry, err := logical.StorageEntryJSON("role/"+name, entry) + if err != nil { + return nil, err + } + if err := req.Storage.Put(ctx, jsonEntry); err != nil { + return nil, err + } + + return resp, nil +} + +func validateRole(b *backend, entry *roleEntry, ctx context.Context, s logical.Storage) (*logical.Response, error) { + resp := &logical.Response{} + var err error + + if entry.MaxTTL > 0 && entry.TTL > entry.MaxTTL { + return logical.ErrorResponse( + `"ttl" value must be less than "max_ttl" value`, + ), nil + } + + if entry.KeyBits, entry.SignatureBits, err = certutil.ValidateDefaultOrValueKeyTypeSignatureLength(entry.KeyType, entry.KeyBits, entry.SignatureBits); err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + if len(entry.ExtKeyUsageOIDs) > 0 { + for _, oidstr := range entry.ExtKeyUsageOIDs { + _, err := certutil.StringToOid(oidstr) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("%q could not be parsed as a valid oid for an extended key usage", oidstr)), nil + } + } + } + + if len(entry.PolicyIdentifiers) > 0 { + _, err := certutil.CreatePolicyInformationExtensionFromStorageStrings(entry.PolicyIdentifiers) + if err != nil { + return nil, err + } + } + + // Ensure issuers ref is set to a non-empty value. Note that we never + // resolve the reference (to an issuerId) at role creation time; instead, + // resolve it at use time. This allows values such as `default` or other + // user-assigned names to "float" and change over time. + if len(entry.Issuer) == 0 { + entry.Issuer = defaultRef + } + // Check that the issuers reference set resolves to something + if !b.useLegacyBundleCaStorage() { + sc := b.makeStorageContext(ctx, s) + issuerId, err := sc.resolveIssuerReference(entry.Issuer) + if err != nil { + if issuerId == IssuerRefNotFound { + resp = &logical.Response{} + if entry.Issuer == defaultRef { + resp.AddWarning("Issuing Certificate was set to default, but no default issuing certificate (configurable at /config/issuers) is currently set") + } else { + resp.AddWarning(fmt.Sprintf("Issuing Certificate was set to %s but no issuing certificate currently has that name", entry.Issuer)) + } + } else { + return nil, err + } + } + + } + + // Ensures CNValidations are alright + entry.CNValidations, err = checkCNValidations(entry.CNValidations) + if err != nil { + return nil, errutil.UserError{Err: err.Error()} + } + + resp.Data = entry.ToResponseData() + return resp, nil +} + +func getWithExplicitDefault(data *framework.FieldData, field string, defaultValue interface{}) interface{} { + assignedValue, ok := data.GetOk(field) + if ok { + return assignedValue + } + return defaultValue +} + +func getTimeWithExplicitDefault(data *framework.FieldData, field string, defaultValue time.Duration) time.Duration { + assignedValue, ok := data.GetOk(field) + if ok { + return time.Duration(assignedValue.(int)) * time.Second + } + return defaultValue +} + +func (b *backend) pathRolePatch(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + + oldEntry, err := b.getRole(ctx, req.Storage, name) + if err != nil { + return nil, err + } + if oldEntry == nil { + return logical.ErrorResponse("Unable to fetch role entry to patch"), nil + } + + entry := &roleEntry{ + MaxTTL: getTimeWithExplicitDefault(data, "max_ttl", oldEntry.MaxTTL), + TTL: getTimeWithExplicitDefault(data, "ttl", oldEntry.TTL), + AllowLocalhost: getWithExplicitDefault(data, "allow_localhost", oldEntry.AllowLocalhost).(bool), + AllowedDomains: getWithExplicitDefault(data, "allowed_domains", oldEntry.AllowedDomains).([]string), + AllowedDomainsTemplate: getWithExplicitDefault(data, "allowed_domains_template", oldEntry.AllowedDomainsTemplate).(bool), + AllowBareDomains: getWithExplicitDefault(data, "allow_bare_domains", oldEntry.AllowBareDomains).(bool), + AllowSubdomains: getWithExplicitDefault(data, "allow_subdomains", oldEntry.AllowSubdomains).(bool), + AllowGlobDomains: getWithExplicitDefault(data, "allow_glob_domains", oldEntry.AllowGlobDomains).(bool), + AllowWildcardCertificates: new(bool), // Handled specially below + AllowAnyName: getWithExplicitDefault(data, "allow_any_name", oldEntry.AllowAnyName).(bool), + AllowedURISANsTemplate: getWithExplicitDefault(data, "allowed_uri_sans_template", oldEntry.AllowedURISANsTemplate).(bool), + EnforceHostnames: getWithExplicitDefault(data, "enforce_hostnames", oldEntry.EnforceHostnames).(bool), + AllowIPSANs: getWithExplicitDefault(data, "allow_ip_sans", oldEntry.AllowIPSANs).(bool), + AllowedURISANs: getWithExplicitDefault(data, "allowed_uri_sans", oldEntry.AllowedURISANs).([]string), + ServerFlag: getWithExplicitDefault(data, "server_flag", oldEntry.ServerFlag).(bool), + ClientFlag: getWithExplicitDefault(data, "client_flag", oldEntry.ClientFlag).(bool), + CodeSigningFlag: getWithExplicitDefault(data, "code_signing_flag", oldEntry.CodeSigningFlag).(bool), + EmailProtectionFlag: getWithExplicitDefault(data, "email_protection_flag", oldEntry.EmailProtectionFlag).(bool), + KeyType: getWithExplicitDefault(data, "key_type", oldEntry.KeyType).(string), + KeyBits: getWithExplicitDefault(data, "key_bits", oldEntry.KeyBits).(int), + SignatureBits: getWithExplicitDefault(data, "signature_bits", oldEntry.SignatureBits).(int), + UsePSS: getWithExplicitDefault(data, "use_pss", oldEntry.UsePSS).(bool), + UseCSRCommonName: getWithExplicitDefault(data, "use_csr_common_name", oldEntry.UseCSRCommonName).(bool), + UseCSRSANs: getWithExplicitDefault(data, "use_csr_sans", oldEntry.UseCSRSANs).(bool), + KeyUsage: getWithExplicitDefault(data, "key_usage", oldEntry.KeyUsage).([]string), + ExtKeyUsage: getWithExplicitDefault(data, "ext_key_usage", oldEntry.ExtKeyUsage).([]string), + ExtKeyUsageOIDs: getWithExplicitDefault(data, "ext_key_usage_oids", oldEntry.ExtKeyUsageOIDs).([]string), + OU: getWithExplicitDefault(data, "ou", oldEntry.OU).([]string), + Organization: getWithExplicitDefault(data, "organization", oldEntry.Organization).([]string), + Country: getWithExplicitDefault(data, "country", oldEntry.Country).([]string), + Locality: getWithExplicitDefault(data, "locality", oldEntry.Locality).([]string), + Province: getWithExplicitDefault(data, "province", oldEntry.Province).([]string), + StreetAddress: getWithExplicitDefault(data, "street_address", oldEntry.StreetAddress).([]string), + PostalCode: getWithExplicitDefault(data, "postal_code", oldEntry.PostalCode).([]string), + GenerateLease: new(bool), + NoStore: getWithExplicitDefault(data, "no_store", oldEntry.NoStore).(bool), + RequireCN: getWithExplicitDefault(data, "require_cn", oldEntry.RequireCN).(bool), + CNValidations: getWithExplicitDefault(data, "cn_validations", oldEntry.CNValidations).([]string), + AllowedSerialNumbers: getWithExplicitDefault(data, "allowed_serial_numbers", oldEntry.AllowedSerialNumbers).([]string), + AllowedUserIDs: getWithExplicitDefault(data, "allowed_user_ids", oldEntry.AllowedUserIDs).([]string), + PolicyIdentifiers: getPolicyIdentifier(data, &oldEntry.PolicyIdentifiers), + BasicConstraintsValidForNonCA: getWithExplicitDefault(data, "basic_constraints_valid_for_non_ca", oldEntry.BasicConstraintsValidForNonCA).(bool), + NotBeforeDuration: getTimeWithExplicitDefault(data, "not_before_duration", oldEntry.NotBeforeDuration), + NotAfter: getWithExplicitDefault(data, "not_after", oldEntry.NotAfter).(string), + Issuer: getWithExplicitDefault(data, "issuer_ref", oldEntry.Issuer).(string), + } + + allowedOtherSANsData, wasSet := data.GetOk("allowed_other_sans") + if wasSet { + allowedOtherSANs := allowedOtherSANsData.([]string) + switch { + case len(allowedOtherSANs) == 0: + case len(allowedOtherSANs) == 1 && allowedOtherSANs[0] == "*": + default: + _, err := parseOtherSANs(allowedOtherSANs) + if err != nil { + return logical.ErrorResponse(fmt.Errorf("error parsing allowed_other_sans: %w", err).Error()), nil + } + } + entry.AllowedOtherSANs = allowedOtherSANs + } else { + entry.AllowedOtherSANs = oldEntry.AllowedOtherSANs + } + + allowWildcardCertificates, present := data.GetOk("allow_wildcard_certificates") + if !present { + allowWildcardCertificates = *oldEntry.AllowWildcardCertificates + } + *entry.AllowWildcardCertificates = allowWildcardCertificates.(bool) + + warning := "" + generateLease, ok := data.GetOk("generate_lease") + // no_store implies generate_lease := false + if entry.NoStore { + *entry.GenerateLease = false + if ok && generateLease.(bool) || !ok && *oldEntry.GenerateLease { + warning = "mutually exclusive values no_store=true and generate_lease=true were both specified; no_store=true takes priority" + } + } else { + if ok { + *entry.GenerateLease = data.Get("generate_lease").(bool) + } else { + entry.GenerateLease = oldEntry.GenerateLease + } + + if *entry.GenerateLease { + warning = "it is encouraged to disable generate_lease and rely on PKI's native capabilities when possible; this option can cause Vault-wide issues with large numbers of issued certificates" + } + } + + resp, err := validateRole(b, entry, ctx, req.Storage) + if err != nil { + return nil, err + } + if warning != "" { + resp.AddWarning(warning) + } + if resp.IsError() { + return resp, nil + } + + // Store it + jsonEntry, err := logical.StorageEntryJSON("role/"+name, entry) + if err != nil { + return nil, err + } + if err := req.Storage.Put(ctx, jsonEntry); err != nil { + return nil, err + } + + return resp, nil +} + +func parseKeyUsages(input []string) int { + var parsedKeyUsages x509.KeyUsage + for _, k := range input { + switch strings.ToLower(strings.TrimSpace(k)) { + case "digitalsignature": + parsedKeyUsages |= x509.KeyUsageDigitalSignature + case "contentcommitment": + parsedKeyUsages |= x509.KeyUsageContentCommitment + case "keyencipherment": + parsedKeyUsages |= x509.KeyUsageKeyEncipherment + case "dataencipherment": + parsedKeyUsages |= x509.KeyUsageDataEncipherment + case "keyagreement": + parsedKeyUsages |= x509.KeyUsageKeyAgreement + case "certsign": + parsedKeyUsages |= x509.KeyUsageCertSign + case "crlsign": + parsedKeyUsages |= x509.KeyUsageCRLSign + case "encipheronly": + parsedKeyUsages |= x509.KeyUsageEncipherOnly + case "decipheronly": + parsedKeyUsages |= x509.KeyUsageDecipherOnly + } + } + + return int(parsedKeyUsages) +} + +func parseExtKeyUsages(role *roleEntry) certutil.CertExtKeyUsage { + var parsedKeyUsages certutil.CertExtKeyUsage + + if role.ServerFlag { + parsedKeyUsages |= certutil.ServerAuthExtKeyUsage + } + + if role.ClientFlag { + parsedKeyUsages |= certutil.ClientAuthExtKeyUsage + } + + if role.CodeSigningFlag { + parsedKeyUsages |= certutil.CodeSigningExtKeyUsage + } + + if role.EmailProtectionFlag { + parsedKeyUsages |= certutil.EmailProtectionExtKeyUsage + } + + for _, k := range role.ExtKeyUsage { + switch strings.ToLower(strings.TrimSpace(k)) { + case "any": + parsedKeyUsages |= certutil.AnyExtKeyUsage + case "serverauth": + parsedKeyUsages |= certutil.ServerAuthExtKeyUsage + case "clientauth": + parsedKeyUsages |= certutil.ClientAuthExtKeyUsage + case "codesigning": + parsedKeyUsages |= certutil.CodeSigningExtKeyUsage + case "emailprotection": + parsedKeyUsages |= certutil.EmailProtectionExtKeyUsage + case "ipsecendsystem": + parsedKeyUsages |= certutil.IpsecEndSystemExtKeyUsage + case "ipsectunnel": + parsedKeyUsages |= certutil.IpsecTunnelExtKeyUsage + case "ipsecuser": + parsedKeyUsages |= certutil.IpsecUserExtKeyUsage + case "timestamping": + parsedKeyUsages |= certutil.TimeStampingExtKeyUsage + case "ocspsigning": + parsedKeyUsages |= certutil.OcspSigningExtKeyUsage + case "microsoftservergatedcrypto": + parsedKeyUsages |= certutil.MicrosoftServerGatedCryptoExtKeyUsage + case "netscapeservergatedcrypto": + parsedKeyUsages |= certutil.NetscapeServerGatedCryptoExtKeyUsage + } + } + + return parsedKeyUsages +} + +type roleEntry struct { + LeaseMax string `json:"lease_max"` + Lease string `json:"lease"` + DeprecatedMaxTTL string `json:"max_ttl"` + DeprecatedTTL string `json:"ttl"` + TTL time.Duration `json:"ttl_duration"` + MaxTTL time.Duration `json:"max_ttl_duration"` + AllowLocalhost bool `json:"allow_localhost"` + AllowedBaseDomain string `json:"allowed_base_domain"` + AllowedDomainsOld string `json:"allowed_domains,omitempty"` + AllowedDomains []string `json:"allowed_domains_list"` + AllowedDomainsTemplate bool `json:"allowed_domains_template"` + AllowBaseDomain bool `json:"allow_base_domain"` + AllowBareDomains bool `json:"allow_bare_domains"` + AllowTokenDisplayName bool `json:"allow_token_displayname"` + AllowSubdomains bool `json:"allow_subdomains"` + AllowGlobDomains bool `json:"allow_glob_domains"` + AllowWildcardCertificates *bool `json:"allow_wildcard_certificates,omitempty"` + AllowAnyName bool `json:"allow_any_name"` + EnforceHostnames bool `json:"enforce_hostnames"` + AllowIPSANs bool `json:"allow_ip_sans"` + ServerFlag bool `json:"server_flag"` + ClientFlag bool `json:"client_flag"` + CodeSigningFlag bool `json:"code_signing_flag"` + EmailProtectionFlag bool `json:"email_protection_flag"` + UseCSRCommonName bool `json:"use_csr_common_name"` + UseCSRSANs bool `json:"use_csr_sans"` + KeyType string `json:"key_type"` + KeyBits int `json:"key_bits"` + UsePSS bool `json:"use_pss"` + SignatureBits int `json:"signature_bits"` + MaxPathLength *int `json:",omitempty"` + KeyUsageOld string `json:"key_usage,omitempty"` + KeyUsage []string `json:"key_usage_list"` + ExtKeyUsage []string `json:"extended_key_usage_list"` + OUOld string `json:"ou,omitempty"` + OU []string `json:"ou_list"` + OrganizationOld string `json:"organization,omitempty"` + Organization []string `json:"organization_list"` + Country []string `json:"country"` + Locality []string `json:"locality"` + Province []string `json:"province"` + StreetAddress []string `json:"street_address"` + PostalCode []string `json:"postal_code"` + GenerateLease *bool `json:"generate_lease,omitempty"` + NoStore bool `json:"no_store"` + RequireCN bool `json:"require_cn"` + CNValidations []string `json:"cn_validations"` + AllowedOtherSANs []string `json:"allowed_other_sans"` + AllowedSerialNumbers []string `json:"allowed_serial_numbers"` + AllowedUserIDs []string `json:"allowed_user_ids"` + AllowedURISANs []string `json:"allowed_uri_sans"` + AllowedURISANsTemplate bool `json:"allowed_uri_sans_template"` + PolicyIdentifiers []string `json:"policy_identifiers"` + ExtKeyUsageOIDs []string `json:"ext_key_usage_oids"` + BasicConstraintsValidForNonCA bool `json:"basic_constraints_valid_for_non_ca"` + NotBeforeDuration time.Duration `json:"not_before_duration"` + NotAfter string `json:"not_after"` + Issuer string `json:"issuer"` + // Name is only set when the role has been stored, on the fly roles have a blank name + Name string `json:"-"` +} + +func (r *roleEntry) ToResponseData() map[string]interface{} { + responseData := map[string]interface{}{ + "ttl": int64(r.TTL.Seconds()), + "max_ttl": int64(r.MaxTTL.Seconds()), + "allow_localhost": r.AllowLocalhost, + "allowed_domains": r.AllowedDomains, + "allowed_domains_template": r.AllowedDomainsTemplate, + "allow_bare_domains": r.AllowBareDomains, + "allow_token_displayname": r.AllowTokenDisplayName, + "allow_subdomains": r.AllowSubdomains, + "allow_glob_domains": r.AllowGlobDomains, + "allow_wildcard_certificates": r.AllowWildcardCertificates, + "allow_any_name": r.AllowAnyName, + "allowed_uri_sans_template": r.AllowedURISANsTemplate, + "enforce_hostnames": r.EnforceHostnames, + "allow_ip_sans": r.AllowIPSANs, + "server_flag": r.ServerFlag, + "client_flag": r.ClientFlag, + "code_signing_flag": r.CodeSigningFlag, + "email_protection_flag": r.EmailProtectionFlag, + "use_csr_common_name": r.UseCSRCommonName, + "use_csr_sans": r.UseCSRSANs, + "key_type": r.KeyType, + "key_bits": r.KeyBits, + "signature_bits": r.SignatureBits, + "use_pss": r.UsePSS, + "key_usage": r.KeyUsage, + "ext_key_usage": r.ExtKeyUsage, + "ext_key_usage_oids": r.ExtKeyUsageOIDs, + "ou": r.OU, + "organization": r.Organization, + "country": r.Country, + "locality": r.Locality, + "province": r.Province, + "street_address": r.StreetAddress, + "postal_code": r.PostalCode, + "no_store": r.NoStore, + "allowed_other_sans": r.AllowedOtherSANs, + "allowed_serial_numbers": r.AllowedSerialNumbers, + "allowed_user_ids": r.AllowedUserIDs, + "allowed_uri_sans": r.AllowedURISANs, + "require_cn": r.RequireCN, + "cn_validations": r.CNValidations, + "policy_identifiers": r.PolicyIdentifiers, + "basic_constraints_valid_for_non_ca": r.BasicConstraintsValidForNonCA, + "not_before_duration": int64(r.NotBeforeDuration.Seconds()), + "not_after": r.NotAfter, + "issuer_ref": r.Issuer, + } + if r.MaxPathLength != nil { + responseData["max_path_length"] = r.MaxPathLength + } + if r.GenerateLease != nil { + responseData["generate_lease"] = r.GenerateLease + } + return responseData +} + +func checkCNValidations(validations []string) ([]string, error) { + var haveDisabled bool + var haveEmail bool + var haveHostname bool + + var result []string + + if len(validations) == 0 { + return []string{"email", "hostname"}, nil + } + + for _, validation := range validations { + switch strings.ToLower(validation) { + case "disabled": + if haveDisabled { + return nil, fmt.Errorf("cn_validations value incorrect: `disabled` specified multiple times") + } + haveDisabled = true + case "email": + if haveEmail { + return nil, fmt.Errorf("cn_validations value incorrect: `email` specified multiple times") + } + haveEmail = true + case "hostname": + if haveHostname { + return nil, fmt.Errorf("cn_validations value incorrect: `hostname` specified multiple times") + } + haveHostname = true + default: + return nil, fmt.Errorf("cn_validations value incorrect: unknown type: `%s`", validation) + } + + result = append(result, strings.ToLower(validation)) + } + + if !haveDisabled && !haveEmail && !haveHostname { + return nil, fmt.Errorf("cn_validations value incorrect: must specify a value (`email` and/or `hostname`) or `disabled`") + } + + if haveDisabled && (haveEmail || haveHostname) { + return nil, fmt.Errorf("cn_validations value incorrect: cannot specify `disabled` along with `email` or `hostname`") + } + + return result, nil +} + +const pathListRolesHelpSyn = `List the existing roles in this backend` + +const pathListRolesHelpDesc = `Roles will be listed by the role name.` + +const pathRoleHelpSyn = `Manage the roles that can be created with this backend.` + +const pathRoleHelpDesc = `This path lets you manage the roles that can be created with this backend.` + +const policyIdentifiersParam = "policy_identifiers" + +func getPolicyIdentifier(data *framework.FieldData, defaultIdentifiers *[]string) []string { + policyIdentifierEntry, ok := data.GetOk(policyIdentifiersParam) + if !ok { + // No Entry for policy_identifiers + if defaultIdentifiers != nil { + return *defaultIdentifiers + } + return data.Get(policyIdentifiersParam).([]string) + } + // Could Be A JSON Entry + policyIdentifierJsonEntry := data.Raw[policyIdentifiersParam] + policyIdentifierJsonString, ok := policyIdentifierJsonEntry.(string) + if ok { + policyIdentifiers, err := parsePolicyIdentifiersFromJson(policyIdentifierJsonString) + if err == nil { + return policyIdentifiers + } + } + // Else could Just Be A List of OIDs + return policyIdentifierEntry.([]string) +} + +func parsePolicyIdentifiersFromJson(policyIdentifiers string) ([]string, error) { + var entries []certutil.PolicyIdentifierWithQualifierEntry + var policyIdentifierList []string + err := json.Unmarshal([]byte(policyIdentifiers), &entries) + if err != nil { + return policyIdentifierList, err + } + policyIdentifierList = make([]string, 0, len(entries)) + for _, entry := range entries { + policyString, err := json.Marshal(entry) + if err != nil { + return policyIdentifierList, err + } + policyIdentifierList = append(policyIdentifierList, string(policyString)) + } + return policyIdentifierList, nil +} diff --git a/builtin/logical/pki/path_roles_test.go b/builtin/logical/pki/path_roles_test.go new file mode 100644 index 0000000..98f5e27 --- /dev/null +++ b/builtin/logical/pki/path_roles_test.go @@ -0,0 +1,1162 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "context" + "crypto/x509" + "encoding/asn1" + "encoding/base64" + "encoding/pem" + "fmt" + "testing" + + "github.com/go-errors/errors" + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" + "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPki_RoleGenerateLease(t *testing.T) { + t.Parallel() + var resp *logical.Response + var err error + b, storage := CreateBackendWithStorage(t) + + roleData := map[string]interface{}{ + "allowed_domains": "myvault.com", + "ttl": "5h", + } + + roleReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/testrole", + Storage: storage, + Data: roleData, + } + + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v resp: %#v", err, resp) + } + + roleReq.Operation = logical.ReadOperation + + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v resp: %#v", err, resp) + } + + // generate_lease cannot be nil. It either has to be set during role + // creation or has to be filled in by the upgrade code + generateLease := resp.Data["generate_lease"].(*bool) + if generateLease == nil { + t.Fatalf("generate_lease should not be nil") + } + + // By default, generate_lease should be `false` + if *generateLease { + t.Fatalf("generate_lease should not be set by default") + } + + // To test upgrade of generate_lease, we read the storage entry, + // modify it to remove generate_lease, and rewrite it. + entry, err := storage.Get(context.Background(), "role/testrole") + if err != nil || entry == nil { + t.Fatal(err) + } + + var role roleEntry + if err := entry.DecodeJSON(&role); err != nil { + t.Fatal(err) + } + + role.GenerateLease = nil + + entry, err = logical.StorageEntryJSON("role/testrole", role) + if err != nil { + t.Fatal(err) + } + if err := storage.Put(context.Background(), entry); err != nil { + t.Fatal(err) + } + + // Reading should upgrade generate_lease + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v resp: %#v", err, resp) + } + + generateLease = resp.Data["generate_lease"].(*bool) + if generateLease == nil { + t.Fatalf("generate_lease should not be nil") + } + + // Upgrade should set generate_lease to `true` + if !*generateLease { + t.Fatalf("generate_lease should be set after an upgrade") + } + + // Make sure that setting generate_lease to `true` works properly + roleReq.Operation = logical.UpdateOperation + roleReq.Path = "roles/testrole2" + roleReq.Data["generate_lease"] = true + + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v resp: %#v", err, resp) + } + + roleReq.Operation = logical.ReadOperation + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v resp: %#v", err, resp) + } + + generateLease = resp.Data["generate_lease"].(*bool) + if generateLease == nil { + t.Fatalf("generate_lease should not be nil") + } + if !*generateLease { + t.Fatalf("generate_lease should have been set") + } +} + +func TestPki_RoleKeyUsage(t *testing.T) { + t.Parallel() + var resp *logical.Response + var err error + b, storage := CreateBackendWithStorage(t) + + roleData := map[string]interface{}{ + "allowed_domains": "myvault.com", + "ttl": "5h", + "key_usage": []string{"KeyEncipherment", "DigitalSignature"}, + } + + roleReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/testrole", + Storage: storage, + Data: roleData, + } + + resp, err = b.HandleRequest(context.Background(), roleReq) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route(roleReq.Path), logical.UpdateOperation), resp, true) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v resp: %#v", err, resp) + } + + roleReq.Operation = logical.ReadOperation + resp, err = b.HandleRequest(context.Background(), roleReq) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route(roleReq.Path), logical.ReadOperation), resp, true) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v resp: %#v", err, resp) + } + + keyUsage := resp.Data["key_usage"].([]string) + if len(keyUsage) != 2 { + t.Fatalf("key_usage should have 2 values") + } + + // To test the upgrade of KeyUsageOld into KeyUsage, we read + // the storage entry, modify it to set KUO and unset KU, and + // rewrite it. + entry, err := storage.Get(context.Background(), "role/testrole") + if err != nil || entry == nil { + t.Fatal(err) + } + + var role roleEntry + if err := entry.DecodeJSON(&role); err != nil { + t.Fatal(err) + } + + role.KeyUsageOld = "KeyEncipherment,DigitalSignature" + role.KeyUsage = nil + + entry, err = logical.StorageEntryJSON("role/testrole", role) + if err != nil { + t.Fatal(err) + } + if err := storage.Put(context.Background(), entry); err != nil { + t.Fatal(err) + } + + // Reading should upgrade key_usage + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v resp: %#v", err, resp) + } + + keyUsage = resp.Data["key_usage"].([]string) + if len(keyUsage) != 2 { + t.Fatalf("key_usage should have 2 values") + } + + // Read back from storage to ensure upgrade + entry, err = storage.Get(context.Background(), "role/testrole") + if err != nil { + t.Fatalf("err: %v", err) + } + if entry == nil { + t.Fatalf("role should not be nil") + } + var result roleEntry + if err := entry.DecodeJSON(&result); err != nil { + t.Fatalf("err: %v", err) + } + + if result.KeyUsageOld != "" { + t.Fatal("old key usage value should be blank") + } + if len(result.KeyUsage) != 2 { + t.Fatal("key_usage should have 2 values") + } +} + +func TestPki_RoleOUOrganizationUpgrade(t *testing.T) { + t.Parallel() + var resp *logical.Response + var err error + b, storage := CreateBackendWithStorage(t) + + roleData := map[string]interface{}{ + "allowed_domains": "myvault.com", + "ttl": "5h", + "ou": []string{"abc", "123"}, + "organization": []string{"org1", "org2"}, + } + + roleReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/testrole", + Storage: storage, + Data: roleData, + } + + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v resp: %#v", err, resp) + } + + roleReq.Operation = logical.ReadOperation + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v resp: %#v", err, resp) + } + + ou := resp.Data["ou"].([]string) + if len(ou) != 2 { + t.Fatalf("ou should have 2 values") + } + organization := resp.Data["organization"].([]string) + if len(organization) != 2 { + t.Fatalf("organization should have 2 values") + } + + // To test upgrade of O/OU, we read the storage entry, modify it to set + // the old O/OU value over the new one, and rewrite it. + entry, err := storage.Get(context.Background(), "role/testrole") + if err != nil || entry == nil { + t.Fatal(err) + } + + var role roleEntry + if err := entry.DecodeJSON(&role); err != nil { + t.Fatal(err) + } + role.OUOld = "abc,123" + role.OU = nil + role.OrganizationOld = "org1,org2" + role.Organization = nil + + entry, err = logical.StorageEntryJSON("role/testrole", role) + if err != nil { + t.Fatal(err) + } + if err := storage.Put(context.Background(), entry); err != nil { + t.Fatal(err) + } + + // Reading should upgrade key_usage + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v resp: %#v", err, resp) + } + + ou = resp.Data["ou"].([]string) + if len(ou) != 2 { + t.Fatalf("ou should have 2 values") + } + organization = resp.Data["organization"].([]string) + if len(organization) != 2 { + t.Fatalf("organization should have 2 values") + } + + // Read back from storage to ensure upgrade + entry, err = storage.Get(context.Background(), "role/testrole") + if err != nil { + t.Fatalf("err: %v", err) + } + if entry == nil { + t.Fatalf("role should not be nil") + } + var result roleEntry + if err := entry.DecodeJSON(&result); err != nil { + t.Fatalf("err: %v", err) + } + + if result.OUOld != "" { + t.Fatal("old ou value should be blank") + } + if len(result.OU) != 2 { + t.Fatal("ou should have 2 values") + } + if result.OrganizationOld != "" { + t.Fatal("old organization value should be blank") + } + if len(result.Organization) != 2 { + t.Fatal("organization should have 2 values") + } +} + +func TestPki_RoleAllowedDomains(t *testing.T) { + t.Parallel() + var resp *logical.Response + var err error + b, storage := CreateBackendWithStorage(t) + + roleData := map[string]interface{}{ + "allowed_domains": []string{"foobar.com", "*example.com"}, + "ttl": "5h", + } + + roleReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/testrole", + Storage: storage, + Data: roleData, + } + + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v resp: %#v", err, resp) + } + + roleReq.Operation = logical.ReadOperation + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v resp: %#v", err, resp) + } + + allowedDomains := resp.Data["allowed_domains"].([]string) + if len(allowedDomains) != 2 { + t.Fatalf("allowed_domains should have 2 values") + } + + // To test upgrade of allowed_domains, we read the storage entry, + // set the old one, and rewrite it. + entry, err := storage.Get(context.Background(), "role/testrole") + if err != nil || entry == nil { + t.Fatal(err) + } + + var role roleEntry + if err := entry.DecodeJSON(&role); err != nil { + t.Fatal(err) + } + role.AllowedDomainsOld = "foobar.com,*example.com" + role.AllowedDomains = nil + + entry, err = logical.StorageEntryJSON("role/testrole", role) + if err != nil { + t.Fatal(err) + } + if err := storage.Put(context.Background(), entry); err != nil { + t.Fatal(err) + } + + // Reading should upgrade key_usage + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v resp: %#v", err, resp) + } + + allowedDomains = resp.Data["allowed_domains"].([]string) + if len(allowedDomains) != 2 { + t.Fatalf("allowed_domains should have 2 values") + } + + // Read back from storage to ensure upgrade + entry, err = storage.Get(context.Background(), "role/testrole") + if err != nil { + t.Fatalf("err: %v", err) + } + if entry == nil { + t.Fatalf("role should not be nil") + } + var result roleEntry + if err := entry.DecodeJSON(&result); err != nil { + t.Fatalf("err: %v", err) + } + + if result.AllowedDomainsOld != "" { + t.Fatal("old allowed_domains value should be blank") + } + if len(result.AllowedDomains) != 2 { + t.Fatal("allowed_domains should have 2 values") + } +} + +func TestPki_RoleAllowedURISANs(t *testing.T) { + t.Parallel() + var resp *logical.Response + var err error + b, storage := CreateBackendWithStorage(t) + + roleData := map[string]interface{}{ + "allowed_uri_sans": []string{"http://foobar.com", "spiffe://*"}, + "ttl": "5h", + } + + roleReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/testrole", + Storage: storage, + Data: roleData, + } + + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v resp: %#v", err, resp) + } + + roleReq.Operation = logical.ReadOperation + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v resp: %#v", err, resp) + } + + allowedURISANs := resp.Data["allowed_uri_sans"].([]string) + if len(allowedURISANs) != 2 { + t.Fatalf("allowed_uri_sans should have 2 values") + } +} + +func TestPki_RolePkixFields(t *testing.T) { + t.Parallel() + var resp *logical.Response + var err error + b, storage := CreateBackendWithStorage(t) + + roleData := map[string]interface{}{ + "ttl": "5h", + "country": []string{"c1", "c2"}, + "ou": []string{"abc", "123"}, + "organization": []string{"org1", "org2"}, + "locality": []string{"foocity", "bartown"}, + "province": []string{"bar", "foo"}, + "street_address": []string{"123 foo street", "789 bar avenue"}, + "postal_code": []string{"f00", "b4r"}, + } + + roleReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/testrole_pkixfields", + Storage: storage, + Data: roleData, + } + + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v resp: %#v", err, resp) + } + + roleReq.Operation = logical.ReadOperation + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v resp: %#v", err, resp) + } + + origCountry := roleData["country"].([]string) + respCountry := resp.Data["country"].([]string) + if !strutil.StrListSubset(origCountry, respCountry) { + t.Fatalf("country did not match values set in role") + } else if len(origCountry) != len(respCountry) { + t.Fatalf("country did not have same number of values set in role") + } + + origOU := roleData["ou"].([]string) + respOU := resp.Data["ou"].([]string) + if !strutil.StrListSubset(origOU, respOU) { + t.Fatalf("ou did not match values set in role") + } else if len(origOU) != len(respOU) { + t.Fatalf("ou did not have same number of values set in role") + } + + origOrganization := roleData["organization"].([]string) + respOrganization := resp.Data["organization"].([]string) + if !strutil.StrListSubset(origOrganization, respOrganization) { + t.Fatalf("organization did not match values set in role") + } else if len(origOrganization) != len(respOrganization) { + t.Fatalf("organization did not have same number of values set in role") + } + + origLocality := roleData["locality"].([]string) + respLocality := resp.Data["locality"].([]string) + if !strutil.StrListSubset(origLocality, respLocality) { + t.Fatalf("locality did not match values set in role") + } else if len(origLocality) != len(respLocality) { + t.Fatalf("locality did not have same number of values set in role: ") + } + + origProvince := roleData["province"].([]string) + respProvince := resp.Data["province"].([]string) + if !strutil.StrListSubset(origProvince, respProvince) { + t.Fatalf("province did not match values set in role") + } else if len(origProvince) != len(respProvince) { + t.Fatalf("province did not have same number of values set in role") + } + + origStreetAddress := roleData["street_address"].([]string) + respStreetAddress := resp.Data["street_address"].([]string) + if !strutil.StrListSubset(origStreetAddress, respStreetAddress) { + t.Fatalf("street_address did not match values set in role") + } else if len(origStreetAddress) != len(respStreetAddress) { + t.Fatalf("street_address did not have same number of values set in role") + } + + origPostalCode := roleData["postal_code"].([]string) + respPostalCode := resp.Data["postal_code"].([]string) + if !strutil.StrListSubset(origPostalCode, respPostalCode) { + t.Fatalf("postal_code did not match values set in role") + } else if len(origPostalCode) != len(respPostalCode) { + t.Fatalf("postal_code did not have same number of values set in role") + } +} + +func TestPki_RoleNoStore(t *testing.T) { + t.Parallel() + var resp *logical.Response + var err error + b, storage := CreateBackendWithStorage(t) + + roleData := map[string]interface{}{ + "allowed_domains": "myvault.com", + "ttl": "5h", + } + + roleReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/testrole", + Storage: storage, + Data: roleData, + } + + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v resp: %#v", err, resp) + } + + roleReq.Operation = logical.ReadOperation + + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v resp: %#v", err, resp) + } + + // By default, no_store should be `false` + noStore := resp.Data["no_store"].(bool) + if noStore { + t.Fatalf("no_store should not be set by default") + } + + // By default, allowed_domains_template should be `false` + allowedDomainsTemplate := resp.Data["allowed_domains_template"].(bool) + if allowedDomainsTemplate { + t.Fatalf("allowed_domains_template should not be set by default") + } + + // By default, allowed_uri_sans_template should be `false` + allowedURISANsTemplate := resp.Data["allowed_uri_sans_template"].(bool) + if allowedURISANsTemplate { + t.Fatalf("allowed_uri_sans_template should not be set by default") + } + + // Make sure that setting no_store to `true` works properly + roleReq.Operation = logical.UpdateOperation + roleReq.Path = "roles/testrole_nostore" + roleReq.Data["no_store"] = true + roleReq.Data["allowed_domain"] = "myvault.com" + roleReq.Data["allow_subdomains"] = true + roleReq.Data["ttl"] = "5h" + + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v resp: %#v", err, resp) + } + + roleReq.Operation = logical.ReadOperation + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v resp: %#v", err, resp) + } + + noStore = resp.Data["no_store"].(bool) + if !noStore { + t.Fatalf("no_store should have been set to true") + } + + // issue a certificate and test that it's not stored + caData := map[string]interface{}{ + "common_name": "myvault.com", + "ttl": "5h", + "ip_sans": "127.0.0.1", + } + caReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/generate/internal", + Storage: storage, + Data: caData, + } + resp, err = b.HandleRequest(context.Background(), caReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v resp: %#v", err, resp) + } + + issueData := map[string]interface{}{ + "common_name": "cert.myvault.com", + "format": "pem", + "ip_sans": "127.0.0.1", + "ttl": "1h", + } + issueReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "issue/testrole_nostore", + Storage: storage, + Data: issueData, + } + + resp, err = b.HandleRequest(context.Background(), issueReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v resp: %#v", err, resp) + } + + // list certs + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ListOperation, + Path: "certs", + Storage: storage, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v resp: %#v", err, resp) + } + if len(resp.Data["keys"].([]string)) != 1 { + t.Fatalf("Only the CA certificate should be stored: %#v", resp) + } +} + +func TestPki_CertsLease(t *testing.T) { + t.Parallel() + var resp *logical.Response + var err error + b, storage := CreateBackendWithStorage(t) + + caData := map[string]interface{}{ + "common_name": "myvault.com", + "ttl": "5h", + "ip_sans": "127.0.0.1", + } + + caReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/generate/internal", + Storage: storage, + Data: caData, + } + + resp, err = b.HandleRequest(context.Background(), caReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v resp: %#v", err, resp) + } + + roleData := map[string]interface{}{ + "allowed_domains": "myvault.com", + "allow_subdomains": true, + "ttl": "2h", + } + + roleReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/testrole", + Storage: storage, + Data: roleData, + } + + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v resp: %#v", err, resp) + } + + issueData := map[string]interface{}{ + "common_name": "cert.myvault.com", + "format": "pem", + "ip_sans": "127.0.0.1", + } + issueReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "issue/testrole", + Storage: storage, + Data: issueData, + } + + resp, err = b.HandleRequest(context.Background(), issueReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v resp: %#v", err, resp) + } + + if resp.Secret != nil { + t.Fatalf("expected a response that does not contain a secret") + } + + // Turn on the lease generation and issue a certificate. The response + // should have a `Secret` object populated. + roleData["generate_lease"] = true + + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v resp: %#v", err, resp) + } + + resp, err = b.HandleRequest(context.Background(), issueReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v resp: %#v", err, resp) + } + + if resp.Secret == nil { + t.Fatalf("expected a response that contains a secret") + } +} + +func TestPki_RolePatch(t *testing.T) { + t.Parallel() + type TestCase struct { + Field string + Before interface{} + Patched interface{} + } + + testCases := []TestCase{ + { + Field: "ttl", + Before: int64(5), + Patched: int64(10), + }, + { + Field: "max_ttl", + Before: int64(5), + Patched: int64(10), + }, + { + Field: "allow_localhost", + Before: true, + Patched: false, + }, + { + Field: "allowed_domains", + Before: []string{"alex", "bob"}, + Patched: []string{"sam", "alex", "frank"}, + }, + { + Field: "allowed_domains_template", + Before: false, + Patched: true, + }, + { + Field: "allow_bare_domains", + Before: true, + Patched: false, + }, + { + Field: "allow_subdomains", + Before: false, + Patched: true, + }, + { + Field: "allow_glob_domains", + Before: true, + Patched: false, + }, + { + Field: "allow_wildcard_certificates", + Before: false, + Patched: true, + }, + { + Field: "allow_any_name", + Before: true, + Patched: false, + }, + { + Field: "enforce_hostnames", + Before: false, + Patched: true, + }, + { + Field: "allow_ip_sans", + Before: true, + Patched: false, + }, + { + Field: "allowed_uri_sans", + Before: []string{"gopher://*"}, + Patched: []string{"https://*"}, + }, + { + Field: "allowed_uri_sans_template", + Before: false, + Patched: true, + }, + { + Field: "allowed_other_sans", + Before: []string{"1.2.3.4;UTF8:magic"}, + Patched: []string{"4.3.2.1;UTF8:cigam"}, + }, + { + Field: "allowed_serial_numbers", + Before: []string{"*"}, + Patched: []string{""}, + }, + { + Field: "server_flag", + Before: true, + Patched: false, + }, + { + Field: "client_flag", + Before: false, + Patched: true, + }, + { + Field: "code_signing_flag", + Before: true, + Patched: false, + }, + { + Field: "email_protection_flag", + Before: false, + Patched: true, + }, + // key_type, key_bits, and signature_bits can't be tested in this setup + // due to their non-default stored nature. + { + Field: "key_usage", + Before: []string{"DigitialSignature"}, + Patched: []string{"DigitalSignature", "KeyAgreement"}, + }, + { + Field: "ext_key_usage", + Before: []string{"ServerAuth"}, + Patched: []string{"ClientAuth"}, + }, + { + Field: "ext_key_usage_oids", + Before: []string{"1.2.3.4"}, + Patched: []string{"4.3.2.1"}, + }, + { + Field: "use_csr_common_name", + Before: true, + Patched: false, + }, + { + Field: "use_csr_sans", + Before: false, + Patched: true, + }, + { + Field: "ou", + Before: []string{"crypto"}, + Patched: []string{"cryptosec"}, + }, + { + Field: "organization", + Before: []string{"hashicorp"}, + Patched: []string{"dadgarcorp"}, + }, + { + Field: "country", + Before: []string{"US"}, + Patched: []string{"USA"}, + }, + { + Field: "locality", + Before: []string{"Orange"}, + Patched: []string{"Blue"}, + }, + { + Field: "province", + Before: []string{"CA"}, + Patched: []string{"AC"}, + }, + { + Field: "street_address", + Before: []string{"101 First"}, + Patched: []string{"202 Second", "Unit 020"}, + }, + { + Field: "postal_code", + Before: []string{"12345"}, + Patched: []string{"54321-1234"}, + }, + { + Field: "generate_lease", + Before: false, + Patched: true, + }, + { + Field: "no_store", + Before: true, + Patched: false, + }, + { + Field: "require_cn", + Before: false, + Patched: true, + }, + { + Field: "policy_identifiers", + Before: []string{"1.3.6.1.4.1.1.1"}, + Patched: []string{"1.3.6.1.4.1.1.2"}, + }, + { + Field: "basic_constraints_valid_for_non_ca", + Before: true, + Patched: false, + }, + { + Field: "not_before_duration", + Before: int64(30), + Patched: int64(300), + }, + { + Field: "not_after", + Before: "9999-12-31T23:59:59Z", + Patched: "1230-12-31T23:59:59Z", + }, + { + Field: "issuer_ref", + Before: "default", + Patched: "missing", + }, + } + + b, storage := CreateBackendWithStorage(t) + + for index, testCase := range testCases { + var resp *logical.Response + var roleDataResp *logical.Response + var afterRoleDataResp *logical.Response + var err error + + // Create the role + roleData := map[string]interface{}{} + roleData[testCase.Field] = testCase.Before + + roleReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/testrole", + Storage: storage, + Data: roleData, + } + + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad [%d/%v] create: err: %v resp: %#v", index, testCase.Field, err, resp) + } + + // Read the role after creation + roleReq.Operation = logical.ReadOperation + roleDataResp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (roleDataResp != nil && roleDataResp.IsError()) { + t.Fatalf("bad [%d/%v] read: err: %v resp: %#v", index, testCase.Field, err, resp) + } + + beforeRoleData := roleDataResp.Data + + // Patch the role + roleReq.Operation = logical.PatchOperation + roleReq.Data[testCase.Field] = testCase.Patched + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad [%d/%v] patch: err: %v resp: %#v", index, testCase.Field, err, resp) + } + + // Re-read and verify the role + roleReq.Operation = logical.ReadOperation + afterRoleDataResp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (afterRoleDataResp != nil && afterRoleDataResp.IsError()) { + t.Fatalf("bad [%d/%v] read: err: %v resp: %#v", index, testCase.Field, err, resp) + } + + afterRoleData := afterRoleDataResp.Data + + for field, before := range beforeRoleData { + switch typed := before.(type) { + case *bool: + before = *typed + afterRoleData[field] = *(afterRoleData[field].(*bool)) + } + + if field != testCase.Field { + require.Equal(t, before, afterRoleData[field], fmt.Sprintf("bad [%d/%v] compare: non-modified field %v should not be changed", index, testCase.Field, field)) + } else { + require.Equal(t, before, testCase.Before, fmt.Sprintf("bad [%d] compare: modified field %v before should be correct", index, field)) + require.Equal(t, afterRoleData[field], testCase.Patched, fmt.Sprintf("bad [%d] compare: modified field %v after should be correct", index, field)) + } + } + } +} + +func TestPKI_RolePolicyInformation_Flat(t *testing.T) { + t.Parallel() + type TestCase struct { + Input interface{} + ASN interface{} + OidList []string + } + + expectedSimpleAsnExtension := "MBYwCQYHKwYBBAEBATAJBgcrBgEEAQEC" + expectedSimpleOidList := append(*new([]string), "1.3.6.1.4.1.1.1", "1.3.6.1.4.1.1.2") + + testCases := []TestCase{ + { + Input: "1.3.6.1.4.1.1.1,1.3.6.1.4.1.1.2", + ASN: expectedSimpleAsnExtension, + OidList: expectedSimpleOidList, + }, + { + Input: "[{\"oid\":\"1.3.6.1.4.1.1.1\"},{\"oid\":\"1.3.6.1.4.1.1.2\"}]", + ASN: expectedSimpleAsnExtension, + OidList: expectedSimpleOidList, + }, + { + Input: "[{\"oid\":\"1.3.6.1.4.1.7.8\",\"notice\":\"I am a user Notice\"},{\"oid\":\"1.3.6.1.44947.1.2.4\",\"cps\":\"https://example.com\"}]", + ASN: "MF8wLQYHKwYBBAEHCDAiMCAGCCsGAQUFBwICMBQMEkkgYW0gYSB1c2VyIE5vdGljZTAuBgkrBgGC3xMBAgQwITAfBggrBgEFBQcCARYTaHR0cHM6Ly9leGFtcGxlLmNvbQ==", + OidList: append(*new([]string), "1.3.6.1.4.1.7.8", "1.3.6.1.44947.1.2.4"), + }, + } + + b, storage := CreateBackendWithStorage(t) + + caData := map[string]interface{}{ + "common_name": "myvault.com", + "ttl": "5h", + "ip_sans": "127.0.0.1", + } + caReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/generate/internal", + Storage: storage, + Data: caData, + } + caResp, err := b.HandleRequest(context.Background(), caReq) + if err != nil || (caResp != nil && caResp.IsError()) { + t.Fatalf("bad: err: %v resp: %#v", err, caResp) + } + + for index, testCase := range testCases { + var roleResp *logical.Response + var issueResp *logical.Response + var err error + + // Create/update the role + roleData := map[string]interface{}{} + roleData[policyIdentifiersParam] = testCase.Input + + roleReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/testrole", + Storage: storage, + Data: roleData, + } + + roleResp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (roleResp != nil && roleResp.IsError()) { + t.Fatalf("bad [%d], setting policy identifier %v err: %v resp: %#v", index, testCase.Input, err, roleResp) + } + + // Issue Using this role + issueData := map[string]interface{}{} + issueData["common_name"] = "localhost" + issueData["ttl"] = "2s" + + issueReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "issue/testrole", + Storage: storage, + Data: issueData, + } + + issueResp, err = b.HandleRequest(context.Background(), issueReq) + if err != nil || (issueResp != nil && issueResp.IsError()) { + t.Fatalf("bad [%d], setting policy identifier %v err: %v resp: %#v", index, testCase.Input, err, issueResp) + } + + // Validate the OIDs + policyIdentifiers, err := getPolicyIdentifiersOffCertificate(*issueResp) + if err != nil { + t.Fatalf("bad [%d], getting policy identifier from %v err: %v resp: %#v", index, testCase.Input, err, issueResp) + } + if len(policyIdentifiers) != len(testCase.OidList) { + t.Fatalf("bad [%d], wrong certificate policy identifier from %v len expected: %d got %d", index, testCase.Input, len(testCase.OidList), len(policyIdentifiers)) + } + for i, identifier := range policyIdentifiers { + if identifier != testCase.OidList[i] { + t.Fatalf("bad [%d], wrong certificate policy identifier from %v expected: %v got %v", index, testCase.Input, testCase.OidList[i], policyIdentifiers[i]) + } + } + // Validate the ASN + certificateAsn, err := getPolicyInformationExtensionOffCertificate(*issueResp) + if err != nil { + t.Fatalf("bad [%d], getting extension from %v err: %v resp: %#v", index, testCase.Input, err, issueResp) + } + certificateB64 := make([]byte, len(certificateAsn)*2) + base64.StdEncoding.Encode(certificateB64, certificateAsn) + certificateString := string(certificateB64[:]) + assert.Contains(t, certificateString, testCase.ASN) + } +} + +func getPolicyIdentifiersOffCertificate(resp logical.Response) ([]string, error) { + stringCertificate := resp.Data["certificate"].(string) + block, _ := pem.Decode([]byte(stringCertificate)) + certificate, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, err + } + policyIdentifierStrings := make([]string, len(certificate.PolicyIdentifiers)) + for index, asnOid := range certificate.PolicyIdentifiers { + policyIdentifierStrings[index] = asnOid.String() + } + return policyIdentifierStrings, nil +} + +func getPolicyInformationExtensionOffCertificate(resp logical.Response) ([]byte, error) { + stringCertificate := resp.Data["certificate"].(string) + block, _ := pem.Decode([]byte(stringCertificate)) + certificate, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, err + } + for _, extension := range certificate.Extensions { + if extension.Id.Equal(asn1.ObjectIdentifier{2, 5, 29, 32}) { + return extension.Value, nil + } + } + return *new([]byte), errors.New("No Policy Information Extension Found") +} diff --git a/builtin/logical/pki/path_root.go b/builtin/logical/pki/path_root.go new file mode 100644 index 0000000..fc5476b --- /dev/null +++ b/builtin/logical/pki/path_root.go @@ -0,0 +1,647 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "errors" + "fmt" + "net/http" + "reflect" + "strings" + "time" + + "golang.org/x/crypto/ed25519" + + "github.com/hashicorp/vault/sdk/helper/certutil" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathGenerateRoot(b *backend) *framework.Path { + pattern := "root/generate/" + framework.GenericNameRegex("exported") + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "generate", + OperationSuffix: "root", + } + + return buildPathGenerateRoot(b, pattern, displayAttrs) +} + +func pathDeleteRoot(b *backend) *framework.Path { + ret := &framework.Path{ + Pattern: "root", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationSuffix: "root", + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathCADeleteRoot, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + }}, + }, + // Read more about why these flags are set in backend.go + ForwardPerformanceStandby: true, + ForwardPerformanceSecondary: true, + }, + }, + + HelpSynopsis: pathDeleteRootHelpSyn, + HelpDescription: pathDeleteRootHelpDesc, + } + + return ret +} + +func (b *backend) pathCADeleteRoot(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { + // Since we're planning on updating issuers here, grab the lock so we've + // got a consistent view. + b.issuersLock.Lock() + defer b.issuersLock.Unlock() + + sc := b.makeStorageContext(ctx, req.Storage) + if !b.useLegacyBundleCaStorage() { + issuers, err := sc.listIssuers() + if err != nil { + return nil, err + } + + keys, err := sc.listKeys() + if err != nil { + return nil, err + } + + // Delete all issuers and keys. Ignore deleting the default since we're + // explicitly deleting everything. + for _, issuer := range issuers { + if _, err = sc.deleteIssuer(issuer); err != nil { + return nil, err + } + } + for _, key := range keys { + if _, err = sc.deleteKey(key); err != nil { + return nil, err + } + } + } + + // Delete legacy CA bundle and its backup, if any. + if err := req.Storage.Delete(ctx, legacyCertBundlePath); err != nil { + return nil, err + } + + if err := req.Storage.Delete(ctx, legacyCertBundleBackupPath); err != nil { + return nil, err + } + + // Delete legacy CRL bundle. + if err := req.Storage.Delete(ctx, legacyCRLPath); err != nil { + return nil, err + } + + // Return a warning about preferring to delete issuers and keys + // explicitly versus deleting everything. + resp := &logical.Response{} + resp.AddWarning("DELETE /root deletes all keys and issuers; prefer the new DELETE /key/:key_ref and DELETE /issuer/:issuer_ref for finer granularity, unless removal of all keys and issuers is desired.") + return resp, nil +} + +func (b *backend) pathCAGenerateRoot(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // Since we're planning on updating issuers here, grab the lock so we've + // got a consistent view. + b.issuersLock.Lock() + defer b.issuersLock.Unlock() + + var err error + + if b.useLegacyBundleCaStorage() { + return logical.ErrorResponse("Can not create root CA until migration has completed"), nil + } + + sc := b.makeStorageContext(ctx, req.Storage) + + exported, format, role, errorResp := getGenerationParams(sc, data) + if errorResp != nil { + return errorResp, nil + } + + maxPathLengthIface, ok := data.GetOk("max_path_length") + if ok { + maxPathLength := maxPathLengthIface.(int) + role.MaxPathLength = &maxPathLength + } + + issuerName, err := getIssuerName(sc, data) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + // Handle the aliased path specifying the new issuer name as "next", but + // only do it if its not in use. + if strings.HasPrefix(req.Path, "root/rotate/") && len(issuerName) == 0 { + // err is nil when the issuer name is in use. + _, err = sc.resolveIssuerReference("next") + if err != nil { + issuerName = "next" + } + } + + keyName, err := getKeyName(sc, data) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + input := &inputBundle{ + req: req, + apiData: data, + role: role, + } + parsedBundle, warnings, err := generateCert(sc, input, nil, true, b.Backend.GetRandomReader()) + if err != nil { + switch err.(type) { + case errutil.UserError: + return logical.ErrorResponse(err.Error()), nil + default: + return nil, err + } + } + + cb, err := parsedBundle.ToCertBundle() + if err != nil { + return nil, fmt.Errorf("error converting raw cert bundle to cert bundle: %w", err) + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "expiration": int64(parsedBundle.Certificate.NotAfter.Unix()), + "serial_number": cb.SerialNumber, + }, + } + + if len(parsedBundle.Certificate.RawSubject) <= 2 { + // Strictly a subject is a SEQUENCE of SETs of SEQUENCES. + // + // The outer SEQUENCE is preserved, having byte value 30 00. + // + // Because of the tag and the length encoding each taking up + // at least one byte, it is impossible to have a non-empty + // subject in two or fewer bytes. We're also not here to validate + // our certificate's ASN.1 content, so let's just assume it holds + // and move on. + resp.AddWarning("This issuer certificate was generated without a Subject; this makes it likely that issuing leaf certs with this certificate will cause TLS validation libraries to reject this certificate.") + } + + if len(parsedBundle.Certificate.OCSPServer) == 0 && len(parsedBundle.Certificate.IssuingCertificateURL) == 0 && len(parsedBundle.Certificate.CRLDistributionPoints) == 0 { + // If the operator hasn't configured any of the URLs prior to + // generating this issuer, we should add a warning to the response, + // informing them they might want to do so prior to issuing leaves. + resp.AddWarning("This mount hasn't configured any authority information access (AIA) fields; this may make it harder for systems to find missing certificates in the chain or to validate revocation status of certificates. Consider updating /config/urls or the newly generated issuer with this information.") + } + + switch format { + case "pem": + resp.Data["certificate"] = cb.Certificate + resp.Data["issuing_ca"] = cb.Certificate + if exported { + resp.Data["private_key"] = cb.PrivateKey + resp.Data["private_key_type"] = cb.PrivateKeyType + } + + case "pem_bundle": + resp.Data["issuing_ca"] = cb.Certificate + + if exported { + resp.Data["private_key"] = cb.PrivateKey + resp.Data["private_key_type"] = cb.PrivateKeyType + resp.Data["certificate"] = fmt.Sprintf("%s\n%s", cb.PrivateKey, cb.Certificate) + } else { + resp.Data["certificate"] = cb.Certificate + } + + case "der": + resp.Data["certificate"] = base64.StdEncoding.EncodeToString(parsedBundle.CertificateBytes) + resp.Data["issuing_ca"] = base64.StdEncoding.EncodeToString(parsedBundle.CertificateBytes) + if exported { + resp.Data["private_key"] = base64.StdEncoding.EncodeToString(parsedBundle.PrivateKeyBytes) + resp.Data["private_key_type"] = cb.PrivateKeyType + } + default: + return nil, fmt.Errorf("unsupported format argument: %s", format) + } + + if data.Get("private_key_format").(string) == "pkcs8" { + err = convertRespToPKCS8(resp) + if err != nil { + return nil, err + } + } + + // Store it as the CA bundle + myIssuer, myKey, err := sc.writeCaBundle(cb, issuerName, keyName) + if err != nil { + return nil, err + } + resp.Data["issuer_id"] = myIssuer.ID + resp.Data["issuer_name"] = myIssuer.Name + resp.Data["key_id"] = myKey.ID + resp.Data["key_name"] = myKey.Name + + // The one time that it is safe (and good) to copy the + // SignatureAlgorithm field off the certificate (for the purposes of + // detecting PSS support) is when we've freshly generated it AND it + // is a root (exactly this endpoint). + // + // For intermediates, this doesn't hold (not this endpoint) as that + // reflects the parent key's preferences. For imports, this doesn't + // hold as the old system might've allowed other signature types that + // the new system (whether Vault or a managed key) doesn't. + // + // Previously we did this conditionally on whether or not PSS was in + // use. This is insufficient as some cloud KMS providers (namely, GCP) + // restrict the key to a single signature algorithm! So e.g., a RSA 3072 + // key MUST use SHA-384 as the hash algorithm. Thus we pull in the + // RevocationSigAlg unconditionally on roots now. + myIssuer.RevocationSigAlg = parsedBundle.Certificate.SignatureAlgorithm + if err := sc.writeIssuer(myIssuer); err != nil { + return nil, fmt.Errorf("unable to store PSS-updated issuer: %w", err) + } + + // Also store it as just the certificate identified by serial number, so it + // can be revoked + key := "certs/" + normalizeSerial(cb.SerialNumber) + certsCounted := b.certsCounted.Load() + err = req.Storage.Put(ctx, &logical.StorageEntry{ + Key: key, + Value: parsedBundle.CertificateBytes, + }) + if err != nil { + return nil, fmt.Errorf("unable to store certificate locally: %w", err) + } + b.ifCountEnabledIncrementTotalCertificatesCount(certsCounted, key) + + // Build a fresh CRL + warnings, err = b.crlBuilder.rebuild(sc, true) + if err != nil { + return nil, err + } + for index, warning := range warnings { + resp.AddWarning(fmt.Sprintf("Warning %d during CRL rebuild: %v", index+1, warning)) + } + + if parsedBundle.Certificate.MaxPathLen == 0 { + resp.AddWarning("Max path length of the generated certificate is zero. This certificate cannot be used to issue intermediate CA certificates.") + } + + // Check whether we need to update our default issuer configuration. + config, err := sc.getIssuersConfig() + if err != nil { + resp.AddWarning("Unable to fetch default issuers configuration to update default issuer if necessary: " + err.Error()) + } else if config.DefaultFollowsLatestIssuer { + if err := sc.updateDefaultIssuerId(myIssuer.ID); err != nil { + resp.AddWarning("Unable to update this new root as the default issuer: " + err.Error()) + } + } + + resp = addWarnings(resp, warnings) + + return resp, nil +} + +func (b *backend) pathIssuerSignIntermediate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + var err error + + issuerName := getIssuerRef(data) + if len(issuerName) == 0 { + return logical.ErrorResponse("missing issuer reference"), nil + } + + format := getFormat(data) + if format == "" { + return logical.ErrorResponse( + `The "format" path parameter must be "pem" or "der"`, + ), nil + } + + role := &roleEntry{ + OU: data.Get("ou").([]string), + Organization: data.Get("organization").([]string), + Country: data.Get("country").([]string), + Locality: data.Get("locality").([]string), + Province: data.Get("province").([]string), + StreetAddress: data.Get("street_address").([]string), + PostalCode: data.Get("postal_code").([]string), + TTL: time.Duration(data.Get("ttl").(int)) * time.Second, + AllowLocalhost: true, + AllowAnyName: true, + AllowIPSANs: true, + AllowWildcardCertificates: new(bool), + EnforceHostnames: false, + KeyType: "any", + SignatureBits: data.Get("signature_bits").(int), + UsePSS: data.Get("use_pss").(bool), + AllowedOtherSANs: []string{"*"}, + AllowedSerialNumbers: []string{"*"}, + AllowedURISANs: []string{"*"}, + NotAfter: data.Get("not_after").(string), + NotBeforeDuration: time.Duration(data.Get("not_before_duration").(int)) * time.Second, + CNValidations: []string{"disabled"}, + } + *role.AllowWildcardCertificates = true + + if cn := data.Get("common_name").(string); len(cn) == 0 { + role.UseCSRCommonName = true + } + + var caErr error + sc := b.makeStorageContext(ctx, req.Storage) + signingBundle, caErr := sc.fetchCAInfo(issuerName, IssuanceUsage) + if caErr != nil { + switch caErr.(type) { + case errutil.UserError: + return nil, errutil.UserError{Err: fmt.Sprintf( + "could not fetch the CA certificate (was one set?): %s", caErr)} + default: + return nil, errutil.InternalError{Err: fmt.Sprintf( + "error fetching CA certificate: %s", caErr)} + } + } + + // Since we are signing an intermediate, we explicitly want to override + // the leaf NotAfterBehavior to permit issuing intermediates longer than + // the life of this issuer. + signingBundle.LeafNotAfterBehavior = certutil.PermitNotAfterBehavior + + useCSRValues := data.Get("use_csr_values").(bool) + + maxPathLengthIface, ok := data.GetOk("max_path_length") + if ok { + maxPathLength := maxPathLengthIface.(int) + role.MaxPathLength = &maxPathLength + } + + input := &inputBundle{ + req: req, + apiData: data, + role: role, + } + parsedBundle, warnings, err := signCert(b, input, signingBundle, true, useCSRValues) + if err != nil { + switch err.(type) { + case errutil.UserError: + return logical.ErrorResponse(err.Error()), nil + default: + return nil, errutil.InternalError{Err: fmt.Sprintf( + "error signing cert: %s", err)} + } + } + + if err := parsedBundle.Verify(); err != nil { + return nil, fmt.Errorf("verification of parsed bundle failed: %w", err) + } + + signingCB, err := signingBundle.ToCertBundle() + if err != nil { + return nil, fmt.Errorf("error converting raw signing bundle to cert bundle: %w", err) + } + + cb, err := parsedBundle.ToCertBundle() + if err != nil { + return nil, fmt.Errorf("error converting raw cert bundle to cert bundle: %w", err) + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "expiration": int64(parsedBundle.Certificate.NotAfter.Unix()), + "serial_number": cb.SerialNumber, + }, + } + + if signingBundle.Certificate.NotAfter.Before(parsedBundle.Certificate.NotAfter) { + resp.AddWarning("The expiration time for the signed certificate is after the CA's expiration time. If the new certificate is not treated as a root, validation paths with the certificate past the issuing CA's expiration time will fail.") + } + + if len(parsedBundle.Certificate.RawSubject) <= 2 { + // Strictly a subject is a SEQUENCE of SETs of SEQUENCES. + // + // The outer SEQUENCE is preserved, having byte value 30 00. + // + // Because of the tag and the length encoding each taking up + // at least one byte, it is impossible to have a non-empty + // subject in two or fewer bytes. We're also not here to validate + // our certificate's ASN.1 content, so let's just assume it holds + // and move on. + resp.AddWarning("This issuer certificate was generated without a Subject; this makes it likely that issuing leaf certs with this certificate will cause TLS validation libraries to reject this certificate.") + } + + if len(parsedBundle.Certificate.OCSPServer) == 0 && len(parsedBundle.Certificate.IssuingCertificateURL) == 0 && len(parsedBundle.Certificate.CRLDistributionPoints) == 0 { + // If the operator hasn't configured any of the URLs prior to + // generating this issuer, we should add a warning to the response, + // informing them they might want to do so prior to issuing leaves. + resp.AddWarning("This mount hasn't configured any authority information access (AIA) fields; this may make it harder for systems to find missing certificates in the chain or to validate revocation status of certificates. Consider updating /config/urls or the newly generated issuer with this information.") + } + + caChain := append([]string{cb.Certificate}, cb.CAChain...) + + switch format { + case "pem": + resp.Data["certificate"] = cb.Certificate + resp.Data["issuing_ca"] = signingCB.Certificate + resp.Data["ca_chain"] = caChain + + case "pem_bundle": + resp.Data["certificate"] = cb.ToPEMBundle() + resp.Data["issuing_ca"] = signingCB.Certificate + resp.Data["ca_chain"] = caChain + + case "der": + resp.Data["certificate"] = base64.StdEncoding.EncodeToString(parsedBundle.CertificateBytes) + resp.Data["issuing_ca"] = base64.StdEncoding.EncodeToString(signingBundle.CertificateBytes) + + var derCaChain []string + derCaChain = append(derCaChain, base64.StdEncoding.EncodeToString(parsedBundle.CertificateBytes)) + for _, caCert := range parsedBundle.CAChain { + derCaChain = append(derCaChain, base64.StdEncoding.EncodeToString(caCert.Bytes)) + } + resp.Data["ca_chain"] = derCaChain + + default: + return nil, fmt.Errorf("unsupported format argument: %s", format) + } + + key := "certs/" + normalizeSerial(cb.SerialNumber) + certsCounted := b.certsCounted.Load() + err = req.Storage.Put(ctx, &logical.StorageEntry{ + Key: key, + Value: parsedBundle.CertificateBytes, + }) + if err != nil { + return nil, fmt.Errorf("unable to store certificate locally: %w", err) + } + b.ifCountEnabledIncrementTotalCertificatesCount(certsCounted, key) + + if parsedBundle.Certificate.MaxPathLen == 0 { + resp.AddWarning("Max path length of the signed certificate is zero. This certificate cannot be used to issue intermediate CA certificates.") + } + + resp = addWarnings(resp, warnings) + + return resp, nil +} + +func (b *backend) pathIssuerSignSelfIssued(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + var err error + + issuerName := getIssuerRef(data) + if len(issuerName) == 0 { + return logical.ErrorResponse("missing issuer reference"), nil + } + + certPem := data.Get("certificate").(string) + block, _ := pem.Decode([]byte(certPem)) + if block == nil || len(block.Bytes) == 0 { + return logical.ErrorResponse("certificate could not be PEM-decoded"), nil + } + certs, err := x509.ParseCertificates(block.Bytes) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("error parsing certificate: %s", err)), nil + } + if len(certs) != 1 { + return logical.ErrorResponse(fmt.Sprintf("%d certificates found in PEM file, expected 1", len(certs))), nil + } + + cert := certs[0] + if !cert.IsCA { + return logical.ErrorResponse("given certificate is not a CA certificate"), nil + } + if !reflect.DeepEqual(cert.Issuer, cert.Subject) { + return logical.ErrorResponse("given certificate is not self-issued"), nil + } + + var caErr error + sc := b.makeStorageContext(ctx, req.Storage) + signingBundle, caErr := sc.fetchCAInfo(issuerName, IssuanceUsage) + if caErr != nil { + switch caErr.(type) { + case errutil.UserError: + return nil, errutil.UserError{Err: fmt.Sprintf( + "could not fetch the CA certificate (was one set?): %s", caErr)} + default: + return nil, errutil.InternalError{Err: fmt.Sprintf("error fetching CA certificate: %s", caErr)} + } + } + + signingCB, err := signingBundle.ToCertBundle() + if err != nil { + return nil, fmt.Errorf("error converting raw signing bundle to cert bundle: %w", err) + } + + urls := &certutil.URLEntries{} + if signingBundle.URLs != nil { + urls = signingBundle.URLs + } + cert.IssuingCertificateURL = urls.IssuingCertificates + cert.CRLDistributionPoints = urls.CRLDistributionPoints + cert.OCSPServer = urls.OCSPServers + + // If the requested signature algorithm isn't the same as the signing certificate, and + // the user has requested a cross-algorithm signature, reset the template's signing algorithm + // to that of the signing key + signingPubType, signingAlgorithm, err := publicKeyType(signingBundle.Certificate.PublicKey) + if err != nil { + return nil, fmt.Errorf("error determining signing certificate algorithm type: %e", err) + } + certPubType, _, err := publicKeyType(cert.PublicKey) + if err != nil { + return nil, fmt.Errorf("error determining template algorithm type: %e", err) + } + + if signingPubType != certPubType { + b, ok := data.GetOk("require_matching_certificate_algorithms") + if !ok || !b.(bool) { + cert.SignatureAlgorithm = signingAlgorithm + } else { + return nil, fmt.Errorf("signing certificate's public key algorithm (%s) does not match submitted certificate's (%s), and require_matching_certificate_algorithms is true", + signingPubType.String(), certPubType.String()) + } + } + + newCert, err := x509.CreateCertificate(rand.Reader, cert, signingBundle.Certificate, cert.PublicKey, signingBundle.PrivateKey) + if err != nil { + return nil, fmt.Errorf("error signing self-issued certificate: %w", err) + } + if len(newCert) == 0 { + return nil, fmt.Errorf("nil cert was created when signing self-issued certificate") + } + pemCert := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: newCert, + }) + + return &logical.Response{ + Data: map[string]interface{}{ + "certificate": strings.TrimSpace(string(pemCert)), + "issuing_ca": signingCB.Certificate, + }, + }, nil +} + +// Adapted from similar code in https://github.com/golang/go/blob/4a4221e8187189adcc6463d2d96fe2e8da290132/src/crypto/x509/x509.go#L1342, +// may need to be updated in the future. +func publicKeyType(pub crypto.PublicKey) (pubType x509.PublicKeyAlgorithm, sigAlgo x509.SignatureAlgorithm, err error) { + switch pub := pub.(type) { + case *rsa.PublicKey: + pubType = x509.RSA + sigAlgo = x509.SHA256WithRSA + case *ecdsa.PublicKey: + pubType = x509.ECDSA + switch pub.Curve { + case elliptic.P224(), elliptic.P256(): + sigAlgo = x509.ECDSAWithSHA256 + case elliptic.P384(): + sigAlgo = x509.ECDSAWithSHA384 + case elliptic.P521(): + sigAlgo = x509.ECDSAWithSHA512 + default: + err = errors.New("x509: unknown elliptic curve") + } + case ed25519.PublicKey: + pubType = x509.Ed25519 + sigAlgo = x509.PureEd25519 + default: + err = errors.New("x509: only RSA, ECDSA and Ed25519 keys supported") + } + return +} + +const pathGenerateRootHelpSyn = ` +Generate a new CA certificate and private key used for signing. +` + +const pathGenerateRootHelpDesc = ` +See the API documentation for more information. +` + +const pathDeleteRootHelpSyn = ` +Deletes the root CA key to allow a new one to be generated. +` + +const pathDeleteRootHelpDesc = ` +See the API documentation for more information. +` diff --git a/builtin/logical/pki/path_sign_issuers.go b/builtin/logical/pki/path_sign_issuers.go new file mode 100644 index 0000000..0b6b833 --- /dev/null +++ b/builtin/logical/pki/path_sign_issuers.go @@ -0,0 +1,259 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "net/http" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathIssuerSignIntermediate(b *backend) *framework.Path { + pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/sign-intermediate" + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIIssuer, + OperationVerb: "sign", + OperationSuffix: "intermediate", + } + + return buildPathIssuerSignIntermediateRaw(b, pattern, displayAttrs) +} + +func pathSignIntermediate(b *backend) *framework.Path { + pattern := "root/sign-intermediate" + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIRoot, + OperationVerb: "sign", + OperationSuffix: "intermediate", + } + + return buildPathIssuerSignIntermediateRaw(b, pattern, displayAttrs) +} + +func buildPathIssuerSignIntermediateRaw(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { + fields := addIssuerRefField(map[string]*framework.FieldSchema{}) + path := &framework.Path{ + Pattern: pattern, + DisplayAttrs: displayAttrs, + Fields: fields, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathIssuerSignIntermediate, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "expiration": { + Type: framework.TypeInt64, + Description: `Expiration Time`, + Required: true, + }, + "serial_number": { + Type: framework.TypeString, + Description: `Serial Number`, + Required: false, + }, + "certificate": { + Type: framework.TypeString, + Description: `Certificate`, + Required: true, + }, + "issuing_ca": { + Type: framework.TypeString, + Description: `Issuing CA`, + Required: true, + }, + "ca_chain": { + Type: framework.TypeStringSlice, + Description: `CA Chain`, + Required: true, + }, + }, + }}, + }, + }, + }, + + HelpSynopsis: pathIssuerSignIntermediateHelpSyn, + HelpDescription: pathIssuerSignIntermediateHelpDesc, + } + + path.Fields = addCACommonFields(path.Fields) + path.Fields = addCAIssueFields(path.Fields) + + path.Fields["csr"] = &framework.FieldSchema{ + Type: framework.TypeString, + Default: "", + Description: `PEM-format CSR to be signed.`, + } + + path.Fields["use_csr_values"] = &framework.FieldSchema{ + Type: framework.TypeBool, + Default: false, + Description: `If true, then: +1) Subject information, including names and alternate +names, will be preserved from the CSR rather than +using values provided in the other parameters to +this path; +2) Any key usages requested in the CSR will be +added to the basic set of key usages used for CA +certs signed by this path; for instance, +the non-repudiation flag; +3) Extensions requested in the CSR will be copied +into the issued certificate.`, + } + + fields["signature_bits"] = &framework.FieldSchema{ + Type: framework.TypeInt, + Default: 0, + Description: `The number of bits to use in the signature +algorithm; accepts 256 for SHA-2-256, 384 for SHA-2-384, and 512 for +SHA-2-512. Defaults to 0 to automatically detect based on key length +(SHA-2-256 for RSA keys, and matching the curve size for NIST P-Curves).`, + DisplayAttrs: &framework.DisplayAttributes{ + Value: 0, + }, + } + + fields["skid"] = &framework.FieldSchema{ + Type: framework.TypeString, + Default: "", + Description: `Value for the Subject Key Identifier field +(RFC 5280 Section 4.2.1.2). This value should ONLY be used when +cross-signing to mimic the existing certificate's SKID value; this +is necessary to allow certain TLS implementations (such as OpenSSL) +which use SKID/AKID matches in chain building to restrict possible +valid chains. + +Specified as a string in hex format. Default is empty, allowing +Vault to automatically calculate the SKID according to method one +in the above RFC section.`, + DisplayAttrs: &framework.DisplayAttributes{ + Value: "", + }, + } + + fields["use_pss"] = &framework.FieldSchema{ + Type: framework.TypeBool, + Default: false, + Description: `Whether or not to use PSS signatures when using a +RSA key-type issuer. Defaults to false.`, + } + + return path +} + +const ( + pathIssuerSignIntermediateHelpSyn = `Issue an intermediate CA certificate based on the provided CSR.` + pathIssuerSignIntermediateHelpDesc = ` +This API endpoint allows for signing the specified CSR, adding to it a basic +constraint for IsCA=True. This allows the issued certificate to issue its own +leaf certificates. + +Note that the resulting certificate is not imported as an issuer in this PKI +mount. This means that you can use the resulting certificate in another Vault +PKI mount point or to issue an external intermediate (e.g., for use with +another X.509 CA). + +See the API documentation for more information about required parameters. +` +) + +func pathIssuerSignSelfIssued(b *backend) *framework.Path { + pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/sign-self-issued" + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIIssuer, + OperationVerb: "sign", + OperationSuffix: "self-issued", + } + + return buildPathIssuerSignSelfIssued(b, pattern, displayAttrs) +} + +func pathSignSelfIssued(b *backend) *framework.Path { + pattern := "root/sign-self-issued" + + displayAttrs := &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKIRoot, + OperationVerb: "sign", + OperationSuffix: "self-issued", + } + + return buildPathIssuerSignSelfIssued(b, pattern, displayAttrs) +} + +func buildPathIssuerSignSelfIssued(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { + fields := map[string]*framework.FieldSchema{ + "certificate": { + Type: framework.TypeString, + Description: `PEM-format self-issued certificate to be signed.`, + }, + "require_matching_certificate_algorithms": { + Type: framework.TypeBool, + Default: false, + Description: `If true, require the public key algorithm of the signer to match that of the self issued certificate.`, + }, + } + fields = addIssuerRefField(fields) + path := &framework.Path{ + Pattern: pattern, + DisplayAttrs: displayAttrs, + Fields: fields, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathIssuerSignSelfIssued, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "certificate": { + Type: framework.TypeString, + Description: `Certificate`, + Required: true, + }, + "issuing_ca": { + Type: framework.TypeString, + Description: `Issuing CA`, + Required: true, + }, + }, + }}, + }, + }, + }, + + HelpSynopsis: pathIssuerSignSelfIssuedHelpSyn, + HelpDescription: pathIssuerSignSelfIssuedHelpDesc, + } + + return path +} + +const ( + pathIssuerSignSelfIssuedHelpSyn = `Re-issue a self-signed certificate based on the provided certificate.` + pathIssuerSignSelfIssuedHelpDesc = ` +This API endpoint allows for signing the specified self-signed certificate, +effectively allowing cross-signing of external root CAs. This allows for an +alternative validation path, chaining back through this PKI mount. This +endpoint is also useful in a rolling-root scenario, allowing devices to trust +and validate later (or earlier) root certificates and their issued leaves. + +Usually the sign-intermediate operation is preferred to this operation. + +Note that this is a very privileged operation and should be extremely +restricted in terms of who is allowed to use it. All values will be taken +directly from the incoming certificate and only verification that it is +self-issued will be performed. + +Configured URLs for CRLs/OCSP/etc. will be copied over and the issuer will +be this mount's CA cert. Other than that, all other values will be used +verbatim from the given certificate. + +See the API documentation for more information about required parameters. +` +) diff --git a/builtin/logical/pki/path_tidy.go b/builtin/logical/pki/path_tidy.go new file mode 100644 index 0000000..9064063 --- /dev/null +++ b/builtin/logical/pki/path_tidy.go @@ -0,0 +1,2094 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "context" + "crypto/x509" + "errors" + "fmt" + "net/http" + "sync/atomic" + "time" + + "github.com/armon/go-metrics" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" +) + +var tidyCancelledError = errors.New("tidy operation cancelled") + +type tidyStatusState int + +const ( + tidyStatusInactive tidyStatusState = iota + tidyStatusStarted = iota + tidyStatusFinished = iota + tidyStatusError = iota + tidyStatusCancelling = iota + tidyStatusCancelled = iota +) + +type tidyStatus struct { + // Parameters used to initiate the operation + safetyBuffer int + issuerSafetyBuffer int + revQueueSafetyBuffer int + acmeAccountSafetyBuffer int + + tidyCertStore bool + tidyRevokedCerts bool + tidyRevokedAssocs bool + tidyExpiredIssuers bool + tidyBackupBundle bool + tidyRevocationQueue bool + tidyCrossRevokedCerts bool + tidyAcme bool + pauseDuration string + + // Status + state tidyStatusState + err error + timeStarted time.Time + timeFinished time.Time + message string + + // These counts use a custom incrementer that grab and release + // a lock prior to reading. + certStoreDeletedCount uint + revokedCertDeletedCount uint + missingIssuerCertCount uint + revQueueDeletedCount uint + crossRevokedDeletedCount uint + + acmeAccountsCount uint + acmeAccountsRevokedCount uint + acmeAccountsDeletedCount uint + acmeOrdersDeletedCount uint +} + +type tidyConfig struct { + // AutoTidy config + Enabled bool `json:"enabled"` + Interval time.Duration `json:"interval_duration"` + + // Tidy Operations + CertStore bool `json:"tidy_cert_store"` + RevokedCerts bool `json:"tidy_revoked_certs"` + IssuerAssocs bool `json:"tidy_revoked_cert_issuer_associations"` + ExpiredIssuers bool `json:"tidy_expired_issuers"` + BackupBundle bool `json:"tidy_move_legacy_ca_bundle"` + RevocationQueue bool `json:"tidy_revocation_queue"` + CrossRevokedCerts bool `json:"tidy_cross_cluster_revoked_certs"` + TidyAcme bool `json:"tidy_acme"` + + // Safety Buffers + SafetyBuffer time.Duration `json:"safety_buffer"` + IssuerSafetyBuffer time.Duration `json:"issuer_safety_buffer"` + QueueSafetyBuffer time.Duration `json:"revocation_queue_safety_buffer"` + AcmeAccountSafetyBuffer time.Duration `json:"acme_account_safety_buffer"` + PauseDuration time.Duration `json:"pause_duration"` + + // Metrics. + MaintainCount bool `json:"maintain_stored_certificate_counts"` + PublishMetrics bool `json:"publish_stored_certificate_count_metrics"` +} + +func (tc *tidyConfig) IsAnyTidyEnabled() bool { + return tc.CertStore || tc.RevokedCerts || tc.IssuerAssocs || tc.ExpiredIssuers || tc.BackupBundle || tc.TidyAcme || tc.CrossRevokedCerts || tc.RevocationQueue +} + +func (tc *tidyConfig) AnyTidyConfig() string { + return "tidy_cert_store / tidy_revoked_certs / tidy_revoked_cert_issuer_associations / tidy_expired_issuers / tidy_move_legacy_ca_bundle / tidy_revocation_queue / tidy_cross_cluster_revoked_certs / tidy_acme" +} + +var defaultTidyConfig = tidyConfig{ + Enabled: false, + Interval: 12 * time.Hour, + CertStore: false, + RevokedCerts: false, + IssuerAssocs: false, + ExpiredIssuers: false, + BackupBundle: false, + TidyAcme: false, + SafetyBuffer: 72 * time.Hour, + IssuerSafetyBuffer: 365 * 24 * time.Hour, + AcmeAccountSafetyBuffer: 30 * 24 * time.Hour, + PauseDuration: 0 * time.Second, + MaintainCount: false, + PublishMetrics: false, + RevocationQueue: false, + QueueSafetyBuffer: 48 * time.Hour, + CrossRevokedCerts: false, +} + +func pathTidy(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "tidy$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "tidy", + }, + + Fields: addTidyFields(map[string]*framework.FieldSchema{}), + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathTidyWrite, + Responses: map[int][]framework.Response{ + http.StatusAccepted: {{ + Description: "Accepted", + Fields: map[string]*framework.FieldSchema{}, + }}, + }, + ForwardPerformanceStandby: true, + }, + }, + HelpSynopsis: pathTidyHelpSyn, + HelpDescription: pathTidyHelpDesc, + } +} + +func pathTidyCancel(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "tidy-cancel$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "tidy", + OperationSuffix: "cancel", + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathTidyCancelWrite, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "safety_buffer": { + Type: framework.TypeInt, + Description: `Safety buffer time duration`, + Required: false, + }, + "issuer_safety_buffer": { + Type: framework.TypeInt, + Description: `Issuer safety buffer`, + Required: false, + }, + "revocation_queue_safety_buffer": { + Type: framework.TypeInt, + Description: `Revocation queue safety buffer`, + Required: true, + }, + "tidy_cert_store": { + Type: framework.TypeBool, + Description: `Tidy certificate store`, + Required: false, + }, + "tidy_revoked_certs": { + Type: framework.TypeBool, + Description: `Tidy revoked certificates`, + Required: false, + }, + "tidy_revoked_cert_issuer_associations": { + Type: framework.TypeBool, + Description: `Tidy revoked certificate issuer associations`, + Required: false, + }, + "tidy_acme": { + Type: framework.TypeBool, + Description: `Tidy Unused Acme Accounts, and Orders`, + Required: false, + }, + "acme_account_safety_buffer": { + Type: framework.TypeInt, + Description: `Safety buffer after creation after which accounts lacking orders are revoked`, + Required: false, + }, + "tidy_expired_issuers": { + Type: framework.TypeBool, + Description: `Tidy expired issuers`, + Required: false, + }, + "pause_duration": { + Type: framework.TypeString, + Description: `Duration to pause between tidying certificates`, + Required: false, + }, + "state": { + Type: framework.TypeString, + Description: `One of Inactive, Running, Finished, or Error`, + Required: false, + }, + "error": { + Type: framework.TypeString, + Description: `The error message`, + Required: false, + }, + "time_started": { + Type: framework.TypeString, + Description: `Time the operation started`, + Required: false, + }, + "time_finished": { + Type: framework.TypeString, + Description: `Time the operation finished`, + Required: false, + }, + "last_auto_tidy_finished": { + Type: framework.TypeString, + Description: `Time the last auto-tidy operation finished`, + Required: true, + }, + "message": { + Type: framework.TypeString, + Description: `Message of the operation`, + Required: false, + }, + "cert_store_deleted_count": { + Type: framework.TypeInt, + Description: `The number of certificate storage entries deleted`, + Required: false, + }, + "revoked_cert_deleted_count": { + Type: framework.TypeInt, + Description: `The number of revoked certificate entries deleted`, + Required: false, + }, + "current_cert_store_count": { + Type: framework.TypeInt, + Description: `The number of revoked certificate entries deleted`, + Required: false, + }, + "current_revoked_cert_count": { + Type: framework.TypeInt, + Description: `The number of revoked certificate entries deleted`, + Required: false, + }, + "missing_issuer_cert_count": { + Type: framework.TypeInt, + Required: false, + }, + "tidy_move_legacy_ca_bundle": { + Type: framework.TypeBool, + Required: false, + }, + "tidy_cross_cluster_revoked_certs": { + Type: framework.TypeBool, + Description: `Tidy the cross-cluster revoked certificate store`, + Required: false, + }, + "tidy_revocation_queue": { + Type: framework.TypeBool, + Required: false, + }, + "revocation_queue_deleted_count": { + Type: framework.TypeInt, + Required: false, + }, + "cross_revoked_cert_deleted_count": { + Type: framework.TypeInt, + Required: false, + }, + "internal_backend_uuid": { + Type: framework.TypeString, + Required: false, + }, + "total_acme_account_count": { + Type: framework.TypeInt, + Description: `Total number of acme accounts iterated over`, + Required: false, + }, + "acme_account_deleted_count": { + Type: framework.TypeInt, + Description: `The number of revoked acme accounts removed`, + Required: false, + }, + "acme_account_revoked_count": { + Type: framework.TypeInt, + Description: `The number of unused acme accounts revoked`, + Required: false, + }, + "acme_orders_deleted_count": { + Type: framework.TypeInt, + Description: `The number of expired, unused acme orders removed`, + Required: false, + }, + }, + }}, + }, + ForwardPerformanceStandby: true, + }, + }, + HelpSynopsis: pathTidyCancelHelpSyn, + HelpDescription: pathTidyCancelHelpDesc, + } +} + +func pathTidyStatus(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "tidy-status$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + OperationVerb: "tidy", + OperationSuffix: "status", + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathTidyStatusRead, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "safety_buffer": { + Type: framework.TypeInt, + Description: `Safety buffer time duration`, + Required: true, + }, + "issuer_safety_buffer": { + Type: framework.TypeInt, + Description: `Issuer safety buffer`, + Required: true, + }, + "revocation_queue_safety_buffer": { + Type: framework.TypeInt, + Description: `Revocation queue safety buffer`, + Required: true, + }, + "acme_account_safety_buffer": { + Type: framework.TypeInt, + Description: `Safety buffer after creation after which accounts lacking orders are revoked`, + Required: false, + }, + "tidy_cert_store": { + Type: framework.TypeBool, + Description: `Tidy certificate store`, + Required: true, + }, + "tidy_revoked_certs": { + Type: framework.TypeBool, + Description: `Tidy revoked certificates`, + Required: true, + }, + "tidy_revoked_cert_issuer_associations": { + Type: framework.TypeBool, + Description: `Tidy revoked certificate issuer associations`, + Required: true, + }, + "tidy_expired_issuers": { + Type: framework.TypeBool, + Description: `Tidy expired issuers`, + Required: true, + }, + "tidy_cross_cluster_revoked_certs": { + Type: framework.TypeBool, + Description: `Tidy the cross-cluster revoked certificate store`, + Required: false, + }, + "tidy_acme": { + Type: framework.TypeBool, + Description: `Tidy Unused Acme Accounts, and Orders`, + Required: true, + }, + "pause_duration": { + Type: framework.TypeString, + Description: `Duration to pause between tidying certificates`, + Required: true, + }, + "state": { + Type: framework.TypeString, + Description: `One of Inactive, Running, Finished, or Error`, + Required: true, + }, + "error": { + Type: framework.TypeString, + Description: `The error message`, + Required: true, + }, + "time_started": { + Type: framework.TypeString, + Description: `Time the operation started`, + Required: true, + }, + "time_finished": { + Type: framework.TypeString, + Description: `Time the operation finished`, + Required: false, + }, + "last_auto_tidy_finished": { + Type: framework.TypeString, + Description: `Time the last auto-tidy operation finished`, + Required: true, + }, + "message": { + Type: framework.TypeString, + Description: `Message of the operation`, + Required: true, + }, + "cert_store_deleted_count": { + Type: framework.TypeInt, + Description: `The number of certificate storage entries deleted`, + Required: true, + }, + "revoked_cert_deleted_count": { + Type: framework.TypeInt, + Description: `The number of revoked certificate entries deleted`, + Required: true, + }, + "current_cert_store_count": { + Type: framework.TypeInt, + Description: `The number of revoked certificate entries deleted`, + Required: true, + }, + "cross_revoked_cert_deleted_count": { + Type: framework.TypeInt, + Description: ``, + Required: true, + }, + "current_revoked_cert_count": { + Type: framework.TypeInt, + Description: `The number of revoked certificate entries deleted`, + Required: true, + }, + "revocation_queue_deleted_count": { + Type: framework.TypeInt, + Required: true, + }, + "tidy_move_legacy_ca_bundle": { + Type: framework.TypeBool, + Required: true, + }, + "tidy_revocation_queue": { + Type: framework.TypeBool, + Required: true, + }, + "missing_issuer_cert_count": { + Type: framework.TypeInt, + Required: true, + }, + "internal_backend_uuid": { + Type: framework.TypeString, + Required: true, + }, + "total_acme_account_count": { + Type: framework.TypeInt, + Description: `Total number of acme accounts iterated over`, + Required: false, + }, + "acme_account_deleted_count": { + Type: framework.TypeInt, + Description: `The number of revoked acme accounts removed`, + Required: false, + }, + "acme_account_revoked_count": { + Type: framework.TypeInt, + Description: `The number of unused acme accounts revoked`, + Required: false, + }, + "acme_orders_deleted_count": { + Type: framework.TypeInt, + Description: `The number of expired, unused acme orders removed`, + Required: false, + }, + }, + }}, + }, + ForwardPerformanceStandby: true, + }, + }, + HelpSynopsis: pathTidyStatusHelpSyn, + HelpDescription: pathTidyStatusHelpDesc, + } +} + +func pathConfigAutoTidy(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "config/auto-tidy", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixPKI, + }, + Fields: addTidyFields(map[string]*framework.FieldSchema{ + "enabled": { + Type: framework.TypeBool, + Description: `Set to true to enable automatic tidy operations.`, + }, + "interval_duration": { + Type: framework.TypeDurationSecond, + Description: `Interval at which to run an auto-tidy operation. This is the time between tidy invocations (after one finishes to the start of the next). Running a manual tidy will reset this duration.`, + Default: int(defaultTidyConfig.Interval / time.Second), // TypeDurationSecond currently requires the default to be an int. + }, + "maintain_stored_certificate_counts": { + Type: framework.TypeBool, + Description: `This configures whether stored certificates +are counted upon initialization of the backend, and whether during +normal operation, a running count of certificates stored is maintained.`, + Default: false, + }, + "publish_stored_certificate_count_metrics": { + Type: framework.TypeBool, + Description: `This configures whether the stored certificate +count is published to the metrics consumer. It does not affect if the +stored certificate count is maintained, and if maintained, it will be +available on the tidy-status endpoint.`, + Default: false, + }, + }), + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathConfigAutoTidyRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "auto-tidy-configuration", + }, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "enabled": { + Type: framework.TypeBool, + Description: `Specifies whether automatic tidy is enabled or not`, + Required: true, + }, + "interval_duration": { + Type: framework.TypeInt, + Description: `Specifies the duration between automatic tidy operation`, + Required: true, + }, + "tidy_cert_store": { + Type: framework.TypeBool, + Description: `Specifies whether to tidy up the certificate store`, + Required: true, + }, + "tidy_revoked_certs": { + Type: framework.TypeBool, + Description: `Specifies whether to remove all invalid and expired certificates from storage`, + Required: true, + }, + "tidy_revoked_cert_issuer_associations": { + Type: framework.TypeBool, + Description: `Specifies whether to associate revoked certificates with their corresponding issuers`, + Required: true, + }, + "tidy_expired_issuers": { + Type: framework.TypeBool, + Description: `Specifies whether tidy expired issuers`, + Required: true, + }, + "tidy_acme": { + Type: framework.TypeBool, + Description: `Tidy Unused Acme Accounts, and Orders`, + Required: true, + }, + "safety_buffer": { + Type: framework.TypeInt, + Description: `Safety buffer time duration`, + Required: true, + }, + "issuer_safety_buffer": { + Type: framework.TypeInt, + Description: `Issuer safety buffer`, + Required: true, + }, + "acme_account_safety_buffer": { + Type: framework.TypeInt, + Description: `Safety buffer after creation after which accounts lacking orders are revoked`, + Required: false, + }, + "pause_duration": { + Type: framework.TypeString, + Description: `Duration to pause between tidying certificates`, + Required: true, + }, + "tidy_move_legacy_ca_bundle": { + Type: framework.TypeBool, + Required: true, + }, + "tidy_cross_cluster_revoked_certs": { + Type: framework.TypeBool, + Required: true, + }, + "tidy_revocation_queue": { + Type: framework.TypeBool, + Required: true, + }, + "revocation_queue_safety_buffer": { + Type: framework.TypeInt, + Required: true, + }, + "publish_stored_certificate_count_metrics": { + Type: framework.TypeBool, + Required: true, + }, + "maintain_stored_certificate_counts": { + Type: framework.TypeBool, + Required: true, + }, + }, + }}, + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigAutoTidyWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "auto-tidy", + }, + Responses: map[int][]framework.Response{ + http.StatusOK: {{ + Description: "OK", + Fields: map[string]*framework.FieldSchema{ + "enabled": { + Type: framework.TypeBool, + Description: `Specifies whether automatic tidy is enabled or not`, + Required: true, + }, + "interval_duration": { + Type: framework.TypeInt, + Description: `Specifies the duration between automatic tidy operation`, + Required: true, + }, + "tidy_cert_store": { + Type: framework.TypeBool, + Description: `Specifies whether to tidy up the certificate store`, + Required: true, + }, + "tidy_revoked_certs": { + Type: framework.TypeBool, + Description: `Specifies whether to remove all invalid and expired certificates from storage`, + Required: true, + }, + "tidy_revoked_cert_issuer_associations": { + Type: framework.TypeBool, + Description: `Specifies whether to associate revoked certificates with their corresponding issuers`, + Required: true, + }, + "tidy_expired_issuers": { + Type: framework.TypeBool, + Description: `Specifies whether tidy expired issuers`, + Required: true, + }, + "tidy_acme": { + Type: framework.TypeBool, + Description: `Tidy Unused Acme Accounts, and Orders`, + Required: true, + }, + "safety_buffer": { + Type: framework.TypeInt, + Description: `Safety buffer time duration`, + Required: true, + }, + "issuer_safety_buffer": { + Type: framework.TypeInt, + Description: `Issuer safety buffer`, + Required: true, + }, + "acme_account_safety_buffer": { + Type: framework.TypeInt, + Description: `Safety buffer after creation after which accounts lacking orders are revoked`, + Required: true, + }, + "pause_duration": { + Type: framework.TypeString, + Description: `Duration to pause between tidying certificates`, + Required: true, + }, + "tidy_cross_cluster_revoked_certs": { + Type: framework.TypeBool, + Description: `Tidy the cross-cluster revoked certificate store`, + Required: true, + }, + "tidy_revocation_queue": { + Type: framework.TypeBool, + Required: true, + }, + "tidy_move_legacy_ca_bundle": { + Type: framework.TypeBool, + Required: true, + }, + "revocation_queue_safety_buffer": { + Type: framework.TypeInt, + Required: true, + }, + "publish_stored_certificate_count_metrics": { + Type: framework.TypeBool, + Required: true, + }, + "maintain_stored_certificate_counts": { + Type: framework.TypeBool, + Required: true, + }, + }, + }}, + }, + // Read more about why these flags are set in backend.go. + ForwardPerformanceStandby: true, + ForwardPerformanceSecondary: true, + }, + }, + HelpSynopsis: pathConfigAutoTidySyn, + HelpDescription: pathConfigAutoTidyDesc, + } +} + +func (b *backend) pathTidyWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + safetyBuffer := d.Get("safety_buffer").(int) + tidyCertStore := d.Get("tidy_cert_store").(bool) + tidyRevokedCerts := d.Get("tidy_revoked_certs").(bool) || d.Get("tidy_revocation_list").(bool) + tidyRevokedAssocs := d.Get("tidy_revoked_cert_issuer_associations").(bool) + tidyExpiredIssuers := d.Get("tidy_expired_issuers").(bool) + tidyBackupBundle := d.Get("tidy_move_legacy_ca_bundle").(bool) + issuerSafetyBuffer := d.Get("issuer_safety_buffer").(int) + pauseDurationStr := d.Get("pause_duration").(string) + pauseDuration := 0 * time.Second + tidyRevocationQueue := d.Get("tidy_revocation_queue").(bool) + queueSafetyBuffer := d.Get("revocation_queue_safety_buffer").(int) + tidyCrossRevokedCerts := d.Get("tidy_cross_cluster_revoked_certs").(bool) + tidyAcme := d.Get("tidy_acme").(bool) + acmeAccountSafetyBuffer := d.Get("acme_account_safety_buffer").(int) + + if safetyBuffer < 1 { + return logical.ErrorResponse("safety_buffer must be greater than zero"), nil + } + + if issuerSafetyBuffer < 1 { + return logical.ErrorResponse("issuer_safety_buffer must be greater than zero"), nil + } + + if queueSafetyBuffer < 1 { + return logical.ErrorResponse("revocation_queue_safety_buffer must be greater than zero"), nil + } + + if acmeAccountSafetyBuffer < 1 { + return logical.ErrorResponse("acme_account_safety_buffer must be greater than zero"), nil + } + + if pauseDurationStr != "" { + var err error + pauseDuration, err = parseutil.ParseDurationSecond(pauseDurationStr) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("Error parsing pause_duration: %v", err)), nil + } + + if pauseDuration < (0 * time.Second) { + return logical.ErrorResponse("received invalid, negative pause_duration"), nil + } + } + + bufferDuration := time.Duration(safetyBuffer) * time.Second + issuerBufferDuration := time.Duration(issuerSafetyBuffer) * time.Second + queueSafetyBufferDuration := time.Duration(queueSafetyBuffer) * time.Second + acmeAccountSafetyBufferDuration := time.Duration(acmeAccountSafetyBuffer) * time.Second + + // Manual run with constructed configuration. + config := &tidyConfig{ + Enabled: true, + Interval: 0 * time.Second, + CertStore: tidyCertStore, + RevokedCerts: tidyRevokedCerts, + IssuerAssocs: tidyRevokedAssocs, + ExpiredIssuers: tidyExpiredIssuers, + BackupBundle: tidyBackupBundle, + SafetyBuffer: bufferDuration, + IssuerSafetyBuffer: issuerBufferDuration, + PauseDuration: pauseDuration, + RevocationQueue: tidyRevocationQueue, + QueueSafetyBuffer: queueSafetyBufferDuration, + CrossRevokedCerts: tidyCrossRevokedCerts, + TidyAcme: tidyAcme, + AcmeAccountSafetyBuffer: acmeAccountSafetyBufferDuration, + } + + if !atomic.CompareAndSwapUint32(b.tidyCASGuard, 0, 1) { + resp := &logical.Response{} + resp.AddWarning("Tidy operation already in progress.") + return resp, nil + } + + // Tests using framework will screw up the storage so make a locally + // scoped req to hold a reference + req = &logical.Request{ + Storage: req.Storage, + } + + // Mark the last tidy operation as relatively recent, to ensure we don't + // try to trigger the periodic function. + b.tidyStatusLock.Lock() + b.lastTidy = time.Now() + b.tidyStatusLock.Unlock() + + // Kick off the actual tidy. + b.startTidyOperation(req, config) + + resp := &logical.Response{} + if !config.IsAnyTidyEnabled() { + resp.AddWarning("Manual tidy requested but no tidy operations were set. Enable at least one tidy operation to be run (" + config.AnyTidyConfig() + ").") + } else { + resp.AddWarning("Tidy operation successfully started. Any information from the operation will be printed to Vault's server logs.") + } + + if tidyRevocationQueue || tidyCrossRevokedCerts { + isNotPerfPrimary := b.System().ReplicationState().HasState(consts.ReplicationDRSecondary|consts.ReplicationPerformanceStandby) || + (!b.System().LocalMount() && b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) + if isNotPerfPrimary { + resp.AddWarning("tidy_revocation_queue=true and tidy_cross_cluster_revoked_certs=true can only be set on the active node of the primary cluster unless a local mount is used; this option has been ignored.") + } + } + + return logical.RespondWithStatusCode(resp, req, http.StatusAccepted) +} + +func (b *backend) startTidyOperation(req *logical.Request, config *tidyConfig) { + go func() { + atomic.StoreUint32(b.tidyCancelCAS, 0) + defer atomic.StoreUint32(b.tidyCASGuard, 0) + + b.tidyStatusStart(config) + + // Don't cancel when the original client request goes away. + ctx := context.Background() + + logger := b.Logger().Named("tidy") + + doTidy := func() error { + if config.CertStore { + if err := b.doTidyCertStore(ctx, req, logger, config); err != nil { + return err + } + } + + // Check for cancel before continuing. + if atomic.CompareAndSwapUint32(b.tidyCancelCAS, 1, 0) { + return tidyCancelledError + } + + if config.RevokedCerts || config.IssuerAssocs { + if err := b.doTidyRevocationStore(ctx, req, logger, config); err != nil { + return err + } + } + + // Check for cancel before continuing. + if atomic.CompareAndSwapUint32(b.tidyCancelCAS, 1, 0) { + return tidyCancelledError + } + + if config.ExpiredIssuers { + if err := b.doTidyExpiredIssuers(ctx, req, logger, config); err != nil { + return err + } + } + + // Check for cancel before continuing. + if atomic.CompareAndSwapUint32(b.tidyCancelCAS, 1, 0) { + return tidyCancelledError + } + + if config.BackupBundle { + if err := b.doTidyMoveCABundle(ctx, req, logger, config); err != nil { + return err + } + } + + // Check for cancel before continuing. + if atomic.CompareAndSwapUint32(b.tidyCancelCAS, 1, 0) { + return tidyCancelledError + } + + if config.RevocationQueue { + if err := b.doTidyRevocationQueue(ctx, req, logger, config); err != nil { + return err + } + } + + // Check for cancel before continuing. + if atomic.CompareAndSwapUint32(b.tidyCancelCAS, 1, 0) { + return tidyCancelledError + } + + if config.CrossRevokedCerts { + if err := b.doTidyCrossRevocationStore(ctx, req, logger, config); err != nil { + return err + } + } + + // Check for cancel before continuing. + if atomic.CompareAndSwapUint32(b.tidyCancelCAS, 1, 0) { + return tidyCancelledError + } + + if config.TidyAcme { + if err := b.doTidyAcme(ctx, req, logger, config); err != nil { + return err + } + } + + return nil + } + + if err := doTidy(); err != nil { + logger.Error("error running tidy", "error", err) + b.tidyStatusStop(err) + } else { + b.tidyStatusStop(nil) + + // Since the tidy operation finished without an error, we don't + // really want to start another tidy right away (if the interval + // is too short). So mark the last tidy as now. + b.tidyStatusLock.Lock() + b.lastTidy = time.Now() + b.tidyStatusLock.Unlock() + } + }() +} + +func (b *backend) doTidyCertStore(ctx context.Context, req *logical.Request, logger hclog.Logger, config *tidyConfig) error { + serials, err := req.Storage.List(ctx, "certs/") + if err != nil { + return fmt.Errorf("error fetching list of certs: %w", err) + } + + serialCount := len(serials) + metrics.SetGauge([]string{"secrets", "pki", "tidy", "cert_store_total_entries"}, float32(serialCount)) + for i, serial := range serials { + b.tidyStatusMessage(fmt.Sprintf("Tidying certificate store: checking entry %d of %d", i, serialCount)) + metrics.SetGauge([]string{"secrets", "pki", "tidy", "cert_store_current_entry"}, float32(i)) + + // Check for cancel before continuing. + if atomic.CompareAndSwapUint32(b.tidyCancelCAS, 1, 0) { + return tidyCancelledError + } + + // Check for pause duration to reduce resource consumption. + if config.PauseDuration > (0 * time.Second) { + time.Sleep(config.PauseDuration) + } + + certEntry, err := req.Storage.Get(ctx, "certs/"+serial) + if err != nil { + return fmt.Errorf("error fetching certificate %q: %w", serial, err) + } + + if certEntry == nil { + logger.Warn("certificate entry is nil; tidying up since it is no longer useful for any server operations", "serial", serial) + if err := req.Storage.Delete(ctx, "certs/"+serial); err != nil { + return fmt.Errorf("error deleting nil entry with serial %s: %w", serial, err) + } + b.tidyStatusIncCertStoreCount() + continue + } + + if certEntry.Value == nil || len(certEntry.Value) == 0 { + logger.Warn("certificate entry has no value; tidying up since it is no longer useful for any server operations", "serial", serial) + if err := req.Storage.Delete(ctx, "certs/"+serial); err != nil { + return fmt.Errorf("error deleting entry with nil value with serial %s: %w", serial, err) + } + b.tidyStatusIncCertStoreCount() + continue + } + + cert, err := x509.ParseCertificate(certEntry.Value) + if err != nil { + return fmt.Errorf("unable to parse stored certificate with serial %q: %w", serial, err) + } + + if time.Since(cert.NotAfter) > config.SafetyBuffer { + if err := req.Storage.Delete(ctx, "certs/"+serial); err != nil { + return fmt.Errorf("error deleting serial %q from storage: %w", serial, err) + } + b.tidyStatusIncCertStoreCount() + } + } + + b.tidyStatusLock.RLock() + metrics.SetGauge([]string{"secrets", "pki", "tidy", "cert_store_total_entries_remaining"}, float32(uint(serialCount)-b.tidyStatus.certStoreDeletedCount)) + b.tidyStatusLock.RUnlock() + + return nil +} + +func (b *backend) doTidyRevocationStore(ctx context.Context, req *logical.Request, logger hclog.Logger, config *tidyConfig) error { + b.revokeStorageLock.Lock() + defer b.revokeStorageLock.Unlock() + + // Fetch and parse our issuers so we can associate them if necessary. + sc := b.makeStorageContext(ctx, req.Storage) + issuerIDCertMap, err := fetchIssuerMapForRevocationChecking(sc) + if err != nil { + return err + } + + rebuildCRL := false + + revokedSerials, err := req.Storage.List(ctx, "revoked/") + if err != nil { + return fmt.Errorf("error fetching list of revoked certs: %w", err) + } + + revokedSerialsCount := len(revokedSerials) + metrics.SetGauge([]string{"secrets", "pki", "tidy", "revoked_cert_total_entries"}, float32(revokedSerialsCount)) + + fixedIssuers := 0 + + var revInfo revocationInfo + for i, serial := range revokedSerials { + b.tidyStatusMessage(fmt.Sprintf("Tidying revoked certificates: checking certificate %d of %d", i, len(revokedSerials))) + metrics.SetGauge([]string{"secrets", "pki", "tidy", "revoked_cert_current_entry"}, float32(i)) + + // Check for cancel before continuing. + if atomic.CompareAndSwapUint32(b.tidyCancelCAS, 1, 0) { + return tidyCancelledError + } + + // Check for pause duration to reduce resource consumption. + if config.PauseDuration > (0 * time.Second) { + b.revokeStorageLock.Unlock() + time.Sleep(config.PauseDuration) + b.revokeStorageLock.Lock() + } + + revokedEntry, err := req.Storage.Get(ctx, "revoked/"+serial) + if err != nil { + return fmt.Errorf("unable to fetch revoked cert with serial %q: %w", serial, err) + } + + if revokedEntry == nil { + logger.Warn("revoked entry is nil; tidying up since it is no longer useful for any server operations", "serial", serial) + if err := req.Storage.Delete(ctx, "revoked/"+serial); err != nil { + return fmt.Errorf("error deleting nil revoked entry with serial %s: %w", serial, err) + } + b.tidyStatusIncRevokedCertCount() + continue + } + + if revokedEntry.Value == nil || len(revokedEntry.Value) == 0 { + logger.Warn("revoked entry has nil value; tidying up since it is no longer useful for any server operations", "serial", serial) + if err := req.Storage.Delete(ctx, "revoked/"+serial); err != nil { + return fmt.Errorf("error deleting revoked entry with nil value with serial %s: %w", serial, err) + } + b.tidyStatusIncRevokedCertCount() + continue + } + + err = revokedEntry.DecodeJSON(&revInfo) + if err != nil { + return fmt.Errorf("error decoding revocation entry for serial %q: %w", serial, err) + } + + revokedCert, err := x509.ParseCertificate(revInfo.CertificateBytes) + if err != nil { + return fmt.Errorf("unable to parse stored revoked certificate with serial %q: %w", serial, err) + } + + // Tidy operations over revoked certs should execute prior to + // tidyRevokedCerts as that may remove the entry. If that happens, + // we won't persist the revInfo changes (as it was deleted instead). + var storeCert bool + if config.IssuerAssocs { + if !isRevInfoIssuerValid(&revInfo, issuerIDCertMap) { + b.tidyStatusIncMissingIssuerCertCount() + revInfo.CertificateIssuer = issuerID("") + storeCert = true + if associateRevokedCertWithIsssuer(&revInfo, revokedCert, issuerIDCertMap) { + fixedIssuers += 1 + } + } + } + + if config.RevokedCerts { + // Only remove the entries from revoked/ and certs/ if we're + // past its NotAfter value. This is because we use the + // information on revoked/ to build the CRL and the + // information on certs/ for lookup. + if time.Since(revokedCert.NotAfter) > config.SafetyBuffer { + if err := req.Storage.Delete(ctx, "revoked/"+serial); err != nil { + return fmt.Errorf("error deleting serial %q from revoked list: %w", serial, err) + } + if err := req.Storage.Delete(ctx, "certs/"+serial); err != nil { + return fmt.Errorf("error deleting serial %q from store when tidying revoked: %w", serial, err) + } + rebuildCRL = true + storeCert = false + b.tidyStatusIncRevokedCertCount() + } + } + + // If the entry wasn't removed but was otherwise modified, + // go ahead and write it back out. + if storeCert { + revokedEntry, err = logical.StorageEntryJSON("revoked/"+serial, revInfo) + if err != nil { + return fmt.Errorf("error building entry to persist changes to serial %v from revoked list: %w", serial, err) + } + + err = req.Storage.Put(ctx, revokedEntry) + if err != nil { + return fmt.Errorf("error persisting changes to serial %v from revoked list: %w", serial, err) + } + } + } + + b.tidyStatusLock.RLock() + metrics.SetGauge([]string{"secrets", "pki", "tidy", "revoked_cert_total_entries_remaining"}, float32(uint(revokedSerialsCount)-b.tidyStatus.revokedCertDeletedCount)) + metrics.SetGauge([]string{"secrets", "pki", "tidy", "revoked_cert_entries_incorrect_issuers"}, float32(b.tidyStatus.missingIssuerCertCount)) + metrics.SetGauge([]string{"secrets", "pki", "tidy", "revoked_cert_entries_fixed_issuers"}, float32(fixedIssuers)) + b.tidyStatusLock.RUnlock() + + if rebuildCRL { + // Expired certificates isn't generally an important + // reason to trigger a CRL rebuild for. Check if + // automatic CRL rebuilds have been enabled and defer + // the rebuild if so. + config, err := sc.getRevocationConfig() + if err != nil { + return err + } + + if !config.AutoRebuild { + warnings, err := b.crlBuilder.rebuild(sc, false) + if err != nil { + return err + } + if len(warnings) > 0 { + msg := "During rebuild of CRL for tidy, got the following warnings:" + for index, warning := range warnings { + msg = fmt.Sprintf("%v\n %d. %v", msg, index+1, warning) + } + b.Logger().Warn(msg) + } + } + } + + return nil +} + +func (b *backend) doTidyExpiredIssuers(ctx context.Context, req *logical.Request, logger hclog.Logger, config *tidyConfig) error { + // We do not support cancelling within the expired issuers operation. + // Any cancellation will occur before or after this operation. + + if b.System().ReplicationState().HasState(consts.ReplicationDRSecondary|consts.ReplicationPerformanceStandby) || + (!b.System().LocalMount() && b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) { + b.Logger().Debug("skipping expired issuer tidy as we're not on the primary or secondary with a local mount") + return nil + } + + // Short-circuit to avoid having to deal with the legacy mounts. While we + // could handle this case and remove these issuers, its somewhat + // unexpected behavior and we'd prefer to finish the migration first. + if b.useLegacyBundleCaStorage() { + return nil + } + + b.issuersLock.Lock() + defer b.issuersLock.Unlock() + + // Fetch and parse our issuers so we have their expiration date. + sc := b.makeStorageContext(ctx, req.Storage) + issuerIDCertMap, err := fetchIssuerMapForRevocationChecking(sc) + if err != nil { + return err + } + + // Fetch the issuer config to find the default; we don't want to remove + // the current active issuer automatically. + iConfig, err := sc.getIssuersConfig() + if err != nil { + return err + } + + // We want certificates which have expired before this date by a given + // safety buffer. + rebuildChainsAndCRL := false + + for issuer, cert := range issuerIDCertMap { + if time.Since(cert.NotAfter) <= config.IssuerSafetyBuffer { + continue + } + + entry, err := sc.fetchIssuerById(issuer) + if err != nil { + return nil + } + + // This issuer's certificate has expired. We explicitly persist the + // key, but log both the certificate and the keyId to the + // informational logs so an admin can recover the removed cert if + // necessary or remove the key (and know which cert it belonged to), + // if desired. + msg := "[Tidy on mount: %v] Issuer %v has expired by %v and is being removed." + idAndName := fmt.Sprintf("[id:%v/name:%v]", entry.ID, entry.Name) + msg = fmt.Sprintf(msg, b.backendUUID, idAndName, config.IssuerSafetyBuffer) + + // Before we log, check if we're the default. While this is late, and + // after we read it from storage, we have more info here to tell the + // user that their default has expired AND has passed the safety + // buffer. + if iConfig.DefaultIssuerId == issuer { + msg = "[Tidy on mount: %v] Issuer %v has expired and would be removed via tidy, but won't be, as it is currently the default issuer." + msg = fmt.Sprintf(msg, b.backendUUID, idAndName) + b.Logger().Warn(msg) + continue + } + + // Log the above message.. + b.Logger().Info(msg, "serial_number", entry.SerialNumber, "key_id", entry.KeyID, "certificate", entry.Certificate) + + wasDefault, err := sc.deleteIssuer(issuer) + if err != nil { + b.Logger().Error(fmt.Sprintf("failed to remove %v: %v", idAndName, err)) + return err + } + if wasDefault { + b.Logger().Warn(fmt.Sprintf("expired issuer %v was default; it is strongly encouraged to choose a new default issuer for backwards compatibility", idAndName)) + } + + rebuildChainsAndCRL = true + } + + if rebuildChainsAndCRL { + // When issuers are removed, there's a chance chains change as a + // result; remove them. + if err := sc.rebuildIssuersChains(nil); err != nil { + return err + } + + // Removal of issuers is generally a good reason to rebuild the CRL, + // even if auto-rebuild is enabled. + b.revokeStorageLock.Lock() + defer b.revokeStorageLock.Unlock() + + warnings, err := b.crlBuilder.rebuild(sc, false) + if err != nil { + return err + } + if len(warnings) > 0 { + msg := "During rebuild of CRL for tidy, got the following warnings:" + for index, warning := range warnings { + msg = fmt.Sprintf("%v\n %d. %v", msg, index+1, warning) + } + b.Logger().Warn(msg) + } + } + + return nil +} + +func (b *backend) doTidyMoveCABundle(ctx context.Context, req *logical.Request, logger hclog.Logger, config *tidyConfig) error { + // We do not support cancelling within this operation; any cancel will + // occur before or after this operation. + + if b.System().ReplicationState().HasState(consts.ReplicationDRSecondary|consts.ReplicationPerformanceStandby) || + (!b.System().LocalMount() && b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) { + b.Logger().Debug("skipping moving the legacy CA bundle as we're not on the primary or secondary with a local mount") + return nil + } + + // Short-circuit to avoid moving the legacy bundle from under a legacy + // mount. + if b.useLegacyBundleCaStorage() { + return nil + } + + // If we've already run, exit. + _, bundle, err := getLegacyCertBundle(ctx, req.Storage) + if err != nil { + return fmt.Errorf("failed to fetch the legacy CA bundle: %w", err) + } + + if bundle == nil { + b.Logger().Debug("No legacy CA bundle available; nothing to do.") + return nil + } + + log, err := getLegacyBundleMigrationLog(ctx, req.Storage) + if err != nil { + return fmt.Errorf("failed to fetch the legacy bundle migration log: %w", err) + } + + if log == nil { + return fmt.Errorf("refusing to tidy with an empty legacy migration log but present CA bundle: %w", err) + } + + if time.Since(log.Created) <= config.IssuerSafetyBuffer { + b.Logger().Debug("Migration was created too recently to remove the legacy bundle; refusing to move legacy CA bundle to backup location.") + return nil + } + + // Do the write before the delete. + entry, err := logical.StorageEntryJSON(legacyCertBundleBackupPath, bundle) + if err != nil { + return fmt.Errorf("failed to create new backup storage entry: %w", err) + } + + err = req.Storage.Put(ctx, entry) + if err != nil { + return fmt.Errorf("failed to write new backup legacy CA bundle: %w", err) + } + + err = req.Storage.Delete(ctx, legacyCertBundlePath) + if err != nil { + return fmt.Errorf("failed to remove old legacy CA bundle path: %w", err) + } + + b.Logger().Info("legacy CA bundle successfully moved to backup location") + return nil +} + +func (b *backend) doTidyRevocationQueue(ctx context.Context, req *logical.Request, logger hclog.Logger, config *tidyConfig) error { + if b.System().ReplicationState().HasState(consts.ReplicationDRSecondary|consts.ReplicationPerformanceStandby) || + (!b.System().LocalMount() && b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) { + b.Logger().Debug("skipping cross-cluster revocation queue tidy as we're not on the primary or secondary with a local mount") + return nil + } + + sc := b.makeStorageContext(ctx, req.Storage) + clusters, err := sc.Storage.List(sc.Context, crossRevocationPrefix) + if err != nil { + return fmt.Errorf("failed to list cross-cluster revocation queue participating clusters: %w", err) + } + + // Grab locks as we're potentially modifying revocation-related storage. + b.revokeStorageLock.Lock() + defer b.revokeStorageLock.Unlock() + + for cIndex, cluster := range clusters { + if cluster[len(cluster)-1] == '/' { + cluster = cluster[0 : len(cluster)-1] + } + + cPath := crossRevocationPrefix + cluster + "/" + serials, err := sc.Storage.List(sc.Context, cPath) + if err != nil { + return fmt.Errorf("failed to list cross-cluster revocation queue entries for cluster %v (%v): %w", cluster, cIndex, err) + } + + for _, serial := range serials { + // Check for cancellation. + if atomic.CompareAndSwapUint32(b.tidyCancelCAS, 1, 0) { + return tidyCancelledError + } + + // Check for pause duration to reduce resource consumption. + if config.PauseDuration > (0 * time.Second) { + b.revokeStorageLock.Unlock() + time.Sleep(config.PauseDuration) + b.revokeStorageLock.Lock() + } + + // Confirmation entries _should_ be handled by this cluster's + // processRevocationQueue(...) invocation; if not, when the plugin + // reloads, maybeGatherQueueForFirstProcess(...) will remove all + // stale confirmation requests. However, we don't want to force an + // operator to reload their in-use plugin, so allow tidy to also + // clean up confirmation values without reloading. + if serial[len(serial)-1] == '/' { + // Check if we have a confirmed entry. + confirmedPath := cPath + serial + "confirmed" + removalEntry, err := sc.Storage.Get(sc.Context, confirmedPath) + if err != nil { + return fmt.Errorf("error reading revocation confirmation (%v) during tidy: %w", confirmedPath, err) + } + if removalEntry == nil { + continue + } + + // Remove potential revocation requests from all clusters. + for _, subCluster := range clusters { + if subCluster[len(subCluster)-1] == '/' { + subCluster = subCluster[0 : len(subCluster)-1] + } + + reqPath := subCluster + "/" + serial[0:len(serial)-1] + if err := sc.Storage.Delete(sc.Context, reqPath); err != nil { + return fmt.Errorf("failed to remove confirmed revocation request on candidate cluster (%v): %w", reqPath, err) + } + } + + // Then delete the confirmation. + if err := sc.Storage.Delete(sc.Context, confirmedPath); err != nil { + return fmt.Errorf("failed to remove confirmed revocation confirmation (%v): %w", confirmedPath, err) + } + + // No need to handle a revocation request at this path: it can't + // still exist on this cluster after we deleted it above. + continue + } + + ePath := cPath + serial + entry, err := sc.Storage.Get(sc.Context, ePath) + if err != nil { + return fmt.Errorf("error reading revocation request (%v) to tidy: %w", ePath, err) + } + if entry == nil || entry.Value == nil { + continue + } + + var revRequest revocationRequest + if err := entry.DecodeJSON(&revRequest); err != nil { + return fmt.Errorf("error reading revocation request (%v) to tidy: %w", ePath, err) + } + + if time.Since(revRequest.RequestedAt) <= config.QueueSafetyBuffer { + continue + } + + // Safe to remove this entry. + if err := sc.Storage.Delete(sc.Context, ePath); err != nil { + return fmt.Errorf("error deleting revocation request (%v): %w", ePath, err) + } + + // Assumption: there should never be a need to remove this from + // the processing queue on this node. We're on the active primary, + // so our writes don't cause invalidations. This means we'd have + // to have slated it for deletion very quickly after it'd been + // sent (i.e., inside of the 1-minute boundary that periodicFunc + // executes at). While this is possible, because we grab the + // revocationStorageLock above, we can't execute interleaved + // with that periodicFunc, so the periodicFunc would've had to + // finished before we actually did this deletion (or it wouldn't + // have ignored this serial because our deletion would've + // happened prior to it reading the storage entry). Thus we should + // be safe to ignore the revocation queue removal here. + b.tidyStatusIncRevQueueCount() + } + } + + return nil +} + +func (b *backend) doTidyCrossRevocationStore(ctx context.Context, req *logical.Request, logger hclog.Logger, config *tidyConfig) error { + if b.System().ReplicationState().HasState(consts.ReplicationDRSecondary|consts.ReplicationPerformanceStandby) || + (!b.System().LocalMount() && b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) { + b.Logger().Debug("skipping cross-cluster revoked certificate store tidy as we're not on the primary or secondary with a local mount") + return nil + } + + sc := b.makeStorageContext(ctx, req.Storage) + clusters, err := sc.Storage.List(sc.Context, unifiedRevocationReadPathPrefix) + if err != nil { + return fmt.Errorf("failed to list cross-cluster revoked certificate store participating clusters: %w", err) + } + + // Grab locks as we're potentially modifying revocation-related storage. + b.revokeStorageLock.Lock() + defer b.revokeStorageLock.Unlock() + + for cIndex, cluster := range clusters { + if cluster[len(cluster)-1] == '/' { + cluster = cluster[0 : len(cluster)-1] + } + + cPath := unifiedRevocationReadPathPrefix + cluster + "/" + serials, err := sc.Storage.List(sc.Context, cPath) + if err != nil { + return fmt.Errorf("failed to list cross-cluster revoked certificate store entries for cluster %v (%v): %w", cluster, cIndex, err) + } + + for _, serial := range serials { + // Check for cancellation. + if atomic.CompareAndSwapUint32(b.tidyCancelCAS, 1, 0) { + return tidyCancelledError + } + + // Check for pause duration to reduce resource consumption. + if config.PauseDuration > (0 * time.Second) { + b.revokeStorageLock.Unlock() + time.Sleep(config.PauseDuration) + b.revokeStorageLock.Lock() + } + + ePath := cPath + serial + entry, err := sc.Storage.Get(sc.Context, ePath) + if err != nil { + return fmt.Errorf("error reading cross-cluster revocation entry (%v) to tidy: %w", ePath, err) + } + if entry == nil || entry.Value == nil { + continue + } + + var details unifiedRevocationEntry + if err := entry.DecodeJSON(&details); err != nil { + return fmt.Errorf("error decoding cross-cluster revocation entry (%v) to tidy: %w", ePath, err) + } + + if time.Since(details.CertExpiration) <= config.SafetyBuffer { + continue + } + + // Safe to remove this entry. + if err := sc.Storage.Delete(sc.Context, ePath); err != nil { + return fmt.Errorf("error deleting revocation request (%v): %w", ePath, err) + } + + b.tidyStatusIncCrossRevCertCount() + } + } + + return nil +} + +func (b *backend) doTidyAcme(ctx context.Context, req *logical.Request, logger hclog.Logger, config *tidyConfig) error { + b.acmeAccountLock.Lock() + defer b.acmeAccountLock.Unlock() + + sc := b.makeStorageContext(ctx, req.Storage) + thumbprints, err := sc.Storage.List(ctx, acmeThumbprintPrefix) + if err != nil { + return err + } + + b.tidyStatusLock.Lock() + b.tidyStatus.acmeAccountsCount = uint(len(thumbprints)) + b.tidyStatusLock.Unlock() + + for _, thumbprint := range thumbprints { + err := b.tidyAcmeAccountByThumbprint(b.acmeState, sc, thumbprint, config.SafetyBuffer, config.AcmeAccountSafetyBuffer) + if err != nil { + logger.Warn("error tidying account %v: %v", thumbprint, err.Error()) + } + + // Check for cancel before continuing. + if atomic.CompareAndSwapUint32(b.tidyCancelCAS, 1, 0) { + return tidyCancelledError + } + + // Check for pause duration to reduce resource consumption. + if config.PauseDuration > (0 * time.Second) { + b.acmeAccountLock.Unlock() // Correct the Lock + time.Sleep(config.PauseDuration) + b.acmeAccountLock.Lock() + } + + } + + // Clean up any unused EAB + eabIds, err := b.acmeState.ListEabIds(sc) + if err != nil { + return fmt.Errorf("failed listing EAB ids: %w", err) + } + + for _, eabId := range eabIds { + eab, err := b.acmeState.LoadEab(sc, eabId) + if err != nil { + if errors.Is(err, ErrStorageItemNotFound) { + // We don't need to worry about a consumed EAB + continue + } + return err + } + + eabExpiration := eab.CreatedOn.Add(config.AcmeAccountSafetyBuffer) + if time.Now().After(eabExpiration) { + _, err := b.acmeState.DeleteEab(sc, eabId) + if err != nil { + return fmt.Errorf("failed to tidy eab %s: %w", eabId, err) + } + } + + // Check for cancel before continuing. + if atomic.CompareAndSwapUint32(b.tidyCancelCAS, 1, 0) { + return tidyCancelledError + } + + // Check for pause duration to reduce resource consumption. + if config.PauseDuration > (0 * time.Second) { + b.acmeAccountLock.Unlock() // Correct the Lock + time.Sleep(config.PauseDuration) + b.acmeAccountLock.Lock() + } + } + + return nil +} + +func (b *backend) pathTidyCancelWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + if atomic.LoadUint32(b.tidyCASGuard) == 0 { + resp := &logical.Response{} + resp.AddWarning("Tidy operation cannot be cancelled as none is currently running.") + return resp, nil + } + + // Grab the status lock before writing the cancel atomic. This lets us + // update the status correctly as well, avoiding writing it if we're not + // presently running. + // + // Unlock needs to occur prior to calling read. + b.tidyStatusLock.Lock() + if b.tidyStatus.state == tidyStatusStarted || atomic.LoadUint32(b.tidyCASGuard) == 1 { + if atomic.CompareAndSwapUint32(b.tidyCancelCAS, 0, 1) { + b.tidyStatus.state = tidyStatusCancelling + } + } + b.tidyStatusLock.Unlock() + + return b.pathTidyStatusRead(ctx, req, d) +} + +func (b *backend) pathTidyStatusRead(_ context.Context, _ *logical.Request, _ *framework.FieldData) (*logical.Response, error) { + b.tidyStatusLock.RLock() + defer b.tidyStatusLock.RUnlock() + + resp := &logical.Response{ + Data: map[string]interface{}{ + "safety_buffer": nil, + "issuer_safety_buffer": nil, + "tidy_cert_store": nil, + "tidy_revoked_certs": nil, + "tidy_revoked_cert_issuer_associations": nil, + "tidy_expired_issuers": nil, + "tidy_move_legacy_ca_bundle": nil, + "tidy_revocation_queue": nil, + "tidy_cross_cluster_revoked_certs": nil, + "tidy_acme": nil, + "pause_duration": nil, + "state": "Inactive", + "error": nil, + "time_started": nil, + "time_finished": nil, + "message": nil, + "cert_store_deleted_count": nil, + "revoked_cert_deleted_count": nil, + "missing_issuer_cert_count": nil, + "current_cert_store_count": nil, + "current_revoked_cert_count": nil, + "internal_backend_uuid": nil, + "revocation_queue_deleted_count": nil, + "cross_revoked_cert_deleted_count": nil, + "total_acme_account_count": nil, + "acme_account_deleted_count": nil, + "acme_account_revoked_count": nil, + "acme_orders_deleted_count": nil, + "acme_account_safety_buffer": nil, + }, + } + + resp.Data["internal_backend_uuid"] = b.backendUUID + + if b.certCountEnabled.Load() { + resp.Data["current_cert_store_count"] = b.certCount.Load() + resp.Data["current_revoked_cert_count"] = b.revokedCertCount.Load() + if !b.certsCounted.Load() { + resp.AddWarning("Certificates in storage are still being counted, current counts provided may be " + + "inaccurate") + } + if b.certCountError != "" { + resp.Data["certificate_counting_error"] = b.certCountError + } + } + + if b.tidyStatus.state == tidyStatusInactive { + return resp, nil + } + + resp.Data["safety_buffer"] = b.tidyStatus.safetyBuffer + resp.Data["issuer_safety_buffer"] = b.tidyStatus.issuerSafetyBuffer + resp.Data["tidy_cert_store"] = b.tidyStatus.tidyCertStore + resp.Data["tidy_revoked_certs"] = b.tidyStatus.tidyRevokedCerts + resp.Data["tidy_revoked_cert_issuer_associations"] = b.tidyStatus.tidyRevokedAssocs + resp.Data["tidy_expired_issuers"] = b.tidyStatus.tidyExpiredIssuers + resp.Data["tidy_move_legacy_ca_bundle"] = b.tidyStatus.tidyBackupBundle + resp.Data["tidy_revocation_queue"] = b.tidyStatus.tidyRevocationQueue + resp.Data["tidy_cross_cluster_revoked_certs"] = b.tidyStatus.tidyCrossRevokedCerts + resp.Data["tidy_acme"] = b.tidyStatus.tidyAcme + resp.Data["pause_duration"] = b.tidyStatus.pauseDuration + resp.Data["time_started"] = b.tidyStatus.timeStarted + resp.Data["message"] = b.tidyStatus.message + resp.Data["cert_store_deleted_count"] = b.tidyStatus.certStoreDeletedCount + resp.Data["revoked_cert_deleted_count"] = b.tidyStatus.revokedCertDeletedCount + resp.Data["missing_issuer_cert_count"] = b.tidyStatus.missingIssuerCertCount + resp.Data["revocation_queue_deleted_count"] = b.tidyStatus.revQueueDeletedCount + resp.Data["cross_revoked_cert_deleted_count"] = b.tidyStatus.crossRevokedDeletedCount + resp.Data["revocation_queue_safety_buffer"] = b.tidyStatus.revQueueSafetyBuffer + resp.Data["last_auto_tidy_finished"] = b.lastTidy + resp.Data["total_acme_account_count"] = b.tidyStatus.acmeAccountsCount + resp.Data["acme_account_deleted_count"] = b.tidyStatus.acmeAccountsDeletedCount + resp.Data["acme_account_revoked_count"] = b.tidyStatus.acmeAccountsRevokedCount + resp.Data["acme_orders_deleted_count"] = b.tidyStatus.acmeOrdersDeletedCount + resp.Data["acme_account_safety_buffer"] = b.tidyStatus.acmeAccountSafetyBuffer + + switch b.tidyStatus.state { + case tidyStatusStarted: + resp.Data["state"] = "Running" + case tidyStatusFinished: + resp.Data["state"] = "Finished" + resp.Data["time_finished"] = b.tidyStatus.timeFinished + resp.Data["message"] = nil + case tidyStatusError: + resp.Data["state"] = "Error" + resp.Data["time_finished"] = b.tidyStatus.timeFinished + resp.Data["error"] = b.tidyStatus.err.Error() + // Don't clear the message so that it serves as a hint about when + // the error occurred. + case tidyStatusCancelling: + resp.Data["state"] = "Cancelling" + case tidyStatusCancelled: + resp.Data["state"] = "Cancelled" + resp.Data["time_finished"] = b.tidyStatus.timeFinished + } + + return resp, nil +} + +func (b *backend) pathConfigAutoTidyRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + sc := b.makeStorageContext(ctx, req.Storage) + config, err := sc.getAutoTidyConfig() + if err != nil { + return nil, err + } + + return &logical.Response{ + Data: getTidyConfigData(*config), + }, nil +} + +func (b *backend) pathConfigAutoTidyWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + sc := b.makeStorageContext(ctx, req.Storage) + config, err := sc.getAutoTidyConfig() + if err != nil { + return nil, err + } + + if enabledRaw, ok := d.GetOk("enabled"); ok { + config.Enabled = enabledRaw.(bool) + } + + if intervalRaw, ok := d.GetOk("interval_duration"); ok { + config.Interval = time.Duration(intervalRaw.(int)) * time.Second + if config.Interval < 0 { + return logical.ErrorResponse(fmt.Sprintf("given interval_duration must be greater than or equal to zero seconds; got: %v", intervalRaw)), nil + } + } + + if certStoreRaw, ok := d.GetOk("tidy_cert_store"); ok { + config.CertStore = certStoreRaw.(bool) + } + + if revokedCertsRaw, ok := d.GetOk("tidy_revoked_certs"); ok { + config.RevokedCerts = revokedCertsRaw.(bool) + } + + if issuerAssocRaw, ok := d.GetOk("tidy_revoked_cert_issuer_associations"); ok { + config.IssuerAssocs = issuerAssocRaw.(bool) + } + + if safetyBufferRaw, ok := d.GetOk("safety_buffer"); ok { + config.SafetyBuffer = time.Duration(safetyBufferRaw.(int)) * time.Second + if config.SafetyBuffer < 1*time.Second { + return logical.ErrorResponse(fmt.Sprintf("given safety_buffer must be at least one second; got: %v", safetyBufferRaw)), nil + } + } + + if pauseDurationRaw, ok := d.GetOk("pause_duration"); ok { + config.PauseDuration, err = parseutil.ParseDurationSecond(pauseDurationRaw.(string)) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("unable to parse given pause_duration: %v", err)), nil + } + + if config.PauseDuration < (0 * time.Second) { + return logical.ErrorResponse("received invalid, negative pause_duration"), nil + } + } + + if expiredIssuers, ok := d.GetOk("tidy_expired_issuers"); ok { + config.ExpiredIssuers = expiredIssuers.(bool) + } + + if issuerSafetyBufferRaw, ok := d.GetOk("issuer_safety_buffer"); ok { + config.IssuerSafetyBuffer = time.Duration(issuerSafetyBufferRaw.(int)) * time.Second + if config.IssuerSafetyBuffer < 1*time.Second { + return logical.ErrorResponse(fmt.Sprintf("given safety_buffer must be at least one second; got: %v", issuerSafetyBufferRaw)), nil + } + } + + if backupBundle, ok := d.GetOk("tidy_move_legacy_ca_bundle"); ok { + config.BackupBundle = backupBundle.(bool) + } + + if revocationQueueRaw, ok := d.GetOk("tidy_revocation_queue"); ok { + config.RevocationQueue = revocationQueueRaw.(bool) + } + + if queueSafetyBufferRaw, ok := d.GetOk("revocation_queue_safety_buffer"); ok { + config.QueueSafetyBuffer = time.Duration(queueSafetyBufferRaw.(int)) * time.Second + if config.QueueSafetyBuffer < 1*time.Second { + return logical.ErrorResponse(fmt.Sprintf("given revocation_queue_safety_buffer must be at least one second; got: %v", queueSafetyBufferRaw)), nil + } + } + + if crossRevokedRaw, ok := d.GetOk("tidy_cross_cluster_revoked_certs"); ok { + config.CrossRevokedCerts = crossRevokedRaw.(bool) + } + + if tidyAcmeRaw, ok := d.GetOk("tidy_acme"); ok { + config.TidyAcme = tidyAcmeRaw.(bool) + } + + if acmeAccountSafetyBufferRaw, ok := d.GetOk("acme_account_safety_buffer"); ok { + config.AcmeAccountSafetyBuffer = time.Duration(acmeAccountSafetyBufferRaw.(int)) * time.Second + if config.AcmeAccountSafetyBuffer < 1*time.Second { + return logical.ErrorResponse(fmt.Sprintf("given acme_account_safety_buffer must be at least one second; got: %v", acmeAccountSafetyBufferRaw)), nil + } + } + + if config.Enabled && !config.IsAnyTidyEnabled() { + return logical.ErrorResponse("Auto-tidy enabled but no tidy operations were requested. Enable at least one tidy operation to be run (" + config.AnyTidyConfig() + ")."), nil + } + + if maintainCountEnabledRaw, ok := d.GetOk("maintain_stored_certificate_counts"); ok { + config.MaintainCount = maintainCountEnabledRaw.(bool) + } + + if runningStorageMetricsEnabledRaw, ok := d.GetOk("publish_stored_certificate_count_metrics"); ok { + config.PublishMetrics = runningStorageMetricsEnabledRaw.(bool) + } + + if config.PublishMetrics && !config.MaintainCount { + return logical.ErrorResponse("Can not publish a running storage metrics count to metrics without first maintaining that count. Enable `maintain_stored_certificate_counts` to enable `publish_stored_certificate_count_metrics`."), nil + } + + if err := sc.writeAutoTidyConfig(config); err != nil { + return nil, err + } + + return &logical.Response{ + Data: getTidyConfigData(*config), + }, nil +} + +func (b *backend) tidyStatusStart(config *tidyConfig) { + b.tidyStatusLock.Lock() + defer b.tidyStatusLock.Unlock() + + b.tidyStatus = &tidyStatus{ + safetyBuffer: int(config.SafetyBuffer / time.Second), + issuerSafetyBuffer: int(config.IssuerSafetyBuffer / time.Second), + revQueueSafetyBuffer: int(config.QueueSafetyBuffer / time.Second), + acmeAccountSafetyBuffer: int(config.AcmeAccountSafetyBuffer / time.Second), + tidyCertStore: config.CertStore, + tidyRevokedCerts: config.RevokedCerts, + tidyRevokedAssocs: config.IssuerAssocs, + tidyExpiredIssuers: config.ExpiredIssuers, + tidyBackupBundle: config.BackupBundle, + tidyRevocationQueue: config.RevocationQueue, + tidyCrossRevokedCerts: config.CrossRevokedCerts, + tidyAcme: config.TidyAcme, + pauseDuration: config.PauseDuration.String(), + + state: tidyStatusStarted, + timeStarted: time.Now(), + } + + metrics.SetGauge([]string{"secrets", "pki", "tidy", "start_time_epoch"}, float32(b.tidyStatus.timeStarted.Unix())) +} + +func (b *backend) tidyStatusStop(err error) { + b.tidyStatusLock.Lock() + defer b.tidyStatusLock.Unlock() + + b.tidyStatus.timeFinished = time.Now() + b.tidyStatus.err = err + if err == nil { + b.tidyStatus.state = tidyStatusFinished + } else if err == tidyCancelledError { + b.tidyStatus.state = tidyStatusCancelled + } else { + b.tidyStatus.state = tidyStatusError + } + + metrics.MeasureSince([]string{"secrets", "pki", "tidy", "duration"}, b.tidyStatus.timeStarted) + metrics.SetGauge([]string{"secrets", "pki", "tidy", "start_time_epoch"}, 0) + metrics.IncrCounter([]string{"secrets", "pki", "tidy", "cert_store_deleted_count"}, float32(b.tidyStatus.certStoreDeletedCount)) + metrics.IncrCounter([]string{"secrets", "pki", "tidy", "revoked_cert_deleted_count"}, float32(b.tidyStatus.revokedCertDeletedCount)) + + if err != nil { + metrics.IncrCounter([]string{"secrets", "pki", "tidy", "failure"}, 1) + } else { + metrics.IncrCounter([]string{"secrets", "pki", "tidy", "success"}, 1) + } +} + +func (b *backend) tidyStatusMessage(msg string) { + b.tidyStatusLock.Lock() + defer b.tidyStatusLock.Unlock() + + b.tidyStatus.message = msg +} + +func (b *backend) tidyStatusIncCertStoreCount() { + b.tidyStatusLock.Lock() + defer b.tidyStatusLock.Unlock() + + b.tidyStatus.certStoreDeletedCount++ + + b.ifCountEnabledDecrementTotalCertificatesCountReport() +} + +func (b *backend) tidyStatusIncRevokedCertCount() { + b.tidyStatusLock.Lock() + defer b.tidyStatusLock.Unlock() + + b.tidyStatus.revokedCertDeletedCount++ + + b.ifCountEnabledDecrementTotalRevokedCertificatesCountReport() +} + +func (b *backend) tidyStatusIncMissingIssuerCertCount() { + b.tidyStatusLock.Lock() + defer b.tidyStatusLock.Unlock() + + b.tidyStatus.missingIssuerCertCount++ +} + +func (b *backend) tidyStatusIncRevQueueCount() { + b.tidyStatusLock.Lock() + defer b.tidyStatusLock.Unlock() + + b.tidyStatus.revQueueDeletedCount++ +} + +func (b *backend) tidyStatusIncCrossRevCertCount() { + b.tidyStatusLock.Lock() + defer b.tidyStatusLock.Unlock() + + b.tidyStatus.crossRevokedDeletedCount++ +} + +func (b *backend) tidyStatusIncRevAcmeAccountCount() { + b.tidyStatusLock.Lock() + defer b.tidyStatusLock.Unlock() + + b.tidyStatus.acmeAccountsRevokedCount++ +} + +func (b *backend) tidyStatusIncDeletedAcmeAccountCount() { + b.tidyStatusLock.Lock() + defer b.tidyStatusLock.Unlock() + + b.tidyStatus.acmeAccountsDeletedCount++ +} + +func (b *backend) tidyStatusIncDelAcmeOrderCount() { + b.tidyStatusLock.Lock() + defer b.tidyStatusLock.Unlock() + + b.tidyStatus.acmeOrdersDeletedCount++ +} + +const pathTidyHelpSyn = ` +Tidy up the backend by removing expired certificates, revocation information, +or both. +` + +const pathTidyHelpDesc = ` +This endpoint allows expired certificates and/or revocation information to be +removed from the backend, freeing up storage and shortening CRLs. + +For safety, this function is a noop if called without parameters; cleanup from +normal certificate storage must be enabled with 'tidy_cert_store' and cleanup +from revocation information must be enabled with 'tidy_revocation_list'. + +The 'safety_buffer' parameter is useful to ensure that clock skew amongst your +hosts cannot lead to a certificate being removed from the CRL while it is still +considered valid by other hosts (for instance, if their clocks are a few +minutes behind). The 'safety_buffer' parameter can be an integer number of +seconds or a string duration like "72h". + +All certificates and/or revocation information currently stored in the backend +will be checked when this endpoint is hit. The expiration of the +certificate/revocation information of each certificate being held in +certificate storage or in revocation information will then be checked. If the +current time, minus the value of 'safety_buffer', is greater than the +expiration, it will be removed. +` + +const pathTidyCancelHelpSyn = ` +Cancels a currently running tidy operation. +` + +const pathTidyCancelHelpDesc = ` +This endpoint allows cancelling a currently running tidy operation. + +Periodically throughout the invocation of tidy, we'll check if the operation +has been requested to be cancelled. If so, we'll stop the currently running +tidy operation. +` + +const pathTidyStatusHelpSyn = ` +Returns the status of the tidy operation. +` + +const pathTidyStatusHelpDesc = ` +This is a read only endpoint that returns information about the current tidy +operation, or the most recent if none is currently running. + +The result includes the following fields: +* 'safety_buffer': the value of this parameter when initiating the tidy operation +* 'tidy_cert_store': the value of this parameter when initiating the tidy operation +* 'tidy_revoked_certs': the value of this parameter when initiating the tidy operation +* 'tidy_revoked_cert_issuer_associations': the value of this parameter when initiating the tidy operation +* 'state': one of "Inactive", "Running", "Finished", "Error" +* 'error': the error message, if the operation ran into an error +* 'time_started': the time the operation started +* 'time_finished': the time the operation finished +* 'message': One of "Tidying certificate store: checking entry N of TOTAL" or + "Tidying revoked certificates: checking certificate N of TOTAL" +* 'cert_store_deleted_count': The number of certificate storage entries deleted +* 'revoked_cert_deleted_count': The number of revoked certificate entries deleted +* 'missing_issuer_cert_count': The number of revoked certificates which were missing a valid issuer reference +* 'tidy_expired_issuers': the value of this parameter when initiating the tidy operation +* 'issuer_safety_buffer': the value of this parameter when initiating the tidy operation +* 'tidy_move_legacy_ca_bundle': the value of this parameter when initiating the tidy operation +* 'tidy_revocation_queue': the value of this parameter when initiating the tidy operation +* 'revocation_queue_deleted_count': the number of revocation queue entries deleted +* 'tidy_cross_cluster_revoked_certs': the value of this parameter when initiating the tidy operation +* 'cross_revoked_cert_deleted_count': the number of cross-cluster revoked certificate entries deleted +* 'revocation_queue_safety_buffer': the value of this parameter when initiating the tidy operation +* 'tidy_acme': the value of this parameter when initiating the tidy operation +* 'acme_account_safety_buffer': the value of this parameter when initiating the tidy operation +* 'total_acme_account_count': the total number of acme accounts in the list to be iterated over +* 'acme_account_deleted_count': the number of revoked acme accounts deleted during the operation +* 'acme_account_revoked_count': the number of acme accounts revoked during the operation +* 'acme_orders_deleted_count': the number of acme orders deleted during the operation +` + +const pathConfigAutoTidySyn = ` +Modifies the current configuration for automatic tidy execution. +` + +const pathConfigAutoTidyDesc = ` +This endpoint accepts parameters to a tidy operation (see /tidy) that +will be used for automatic tidy execution. This takes two extra parameters, +enabled (to enable or disable auto-tidy) and interval_duration (which +controls the frequency of auto-tidy execution). + +Once enabled, a tidy operation will be kicked off automatically, as if it +were executed with the posted configuration. +` + +func getTidyConfigData(config tidyConfig) map[string]interface{} { + return map[string]interface{}{ + // This map is in the same order as tidyConfig to ensure that all fields are accounted for + "enabled": config.Enabled, + "interval_duration": int(config.Interval / time.Second), + "tidy_cert_store": config.CertStore, + "tidy_revoked_certs": config.RevokedCerts, + "tidy_revoked_cert_issuer_associations": config.IssuerAssocs, + "tidy_expired_issuers": config.ExpiredIssuers, + "tidy_move_legacy_ca_bundle": config.BackupBundle, + "tidy_acme": config.TidyAcme, + "safety_buffer": int(config.SafetyBuffer / time.Second), + "issuer_safety_buffer": int(config.IssuerSafetyBuffer / time.Second), + "acme_account_safety_buffer": int(config.AcmeAccountSafetyBuffer / time.Second), + "pause_duration": config.PauseDuration.String(), + "publish_stored_certificate_count_metrics": config.PublishMetrics, + "maintain_stored_certificate_counts": config.MaintainCount, + "tidy_revocation_queue": config.RevocationQueue, + "revocation_queue_safety_buffer": int(config.QueueSafetyBuffer / time.Second), + "tidy_cross_cluster_revoked_certs": config.CrossRevokedCerts, + } +} diff --git a/builtin/logical/pki/path_tidy_test.go b/builtin/logical/pki/path_tidy_test.go new file mode 100644 index 0000000..0f6c4dc --- /dev/null +++ b/builtin/logical/pki/path_tidy_test.go @@ -0,0 +1,1322 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "path" + "strings" + "testing" + "time" + + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "golang.org/x/crypto/acme" + + "github.com/hashicorp/vault/helper/testhelpers" + "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" + + "github.com/armon/go-metrics" + + "github.com/hashicorp/vault/api" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" + + "github.com/stretchr/testify/require" +) + +func TestTidyConfigs(t *testing.T) { + t.Parallel() + + var cfg tidyConfig + operations := strings.Split(cfg.AnyTidyConfig(), " / ") + require.Greater(t, len(operations), 1, "expected more than one operation") + t.Logf("Got tidy operations: %v", operations) + + lastOp := operations[len(operations)-1] + + for _, operation := range operations { + b, s := CreateBackendWithStorage(t) + + resp, err := CBWrite(b, s, "config/auto-tidy", map[string]interface{}{ + "enabled": true, + operation: true, + }) + requireSuccessNonNilResponse(t, resp, err, "expected to be able to enable auto-tidy operation "+operation) + + resp, err = CBRead(b, s, "config/auto-tidy") + requireSuccessNonNilResponse(t, resp, err, "expected to be able to read auto-tidy operation for operation "+operation) + require.True(t, resp.Data[operation].(bool), "expected operation to be enabled after reading auto-tidy config "+operation) + + resp, err = CBWrite(b, s, "config/auto-tidy", map[string]interface{}{ + "enabled": true, + operation: false, + lastOp: true, + }) + requireSuccessNonNilResponse(t, resp, err, "expected to be able to disable auto-tidy operation "+operation) + + resp, err = CBRead(b, s, "config/auto-tidy") + requireSuccessNonNilResponse(t, resp, err, "expected to be able to read auto-tidy operation for operation "+operation) + require.False(t, resp.Data[operation].(bool), "expected operation to be disabled after reading auto-tidy config "+operation) + + resp, err = CBWrite(b, s, "tidy", map[string]interface{}{ + operation: true, + }) + requireSuccessNonNilResponse(t, resp, err, "expected to be able to start tidy operation with "+operation) + if len(resp.Warnings) > 0 { + t.Logf("got warnings while starting manual tidy: %v", resp.Warnings) + for _, warning := range resp.Warnings { + if strings.Contains(warning, "Manual tidy requested but no tidy operations were set.") { + t.Fatalf("expected to be able to enable tidy operation with just %v but got warning: %v / (resp=%v)", operation, warning, resp) + } + } + } + + lastOp = operation + } + + // pause_duration is tested elsewhere in other tests. + type configSafetyBufferValueStr struct { + Config string + FirstValue int + SecondValue int + DefaultValue int + } + configSafetyBufferValues := []configSafetyBufferValueStr{ + { + Config: "safety_buffer", + FirstValue: 1, + SecondValue: 2, + DefaultValue: int(defaultTidyConfig.SafetyBuffer / time.Second), + }, + { + Config: "issuer_safety_buffer", + FirstValue: 1, + SecondValue: 2, + DefaultValue: int(defaultTidyConfig.IssuerSafetyBuffer / time.Second), + }, + { + Config: "acme_account_safety_buffer", + FirstValue: 1, + SecondValue: 2, + DefaultValue: int(defaultTidyConfig.AcmeAccountSafetyBuffer / time.Second), + }, + { + Config: "revocation_queue_safety_buffer", + FirstValue: 1, + SecondValue: 2, + DefaultValue: int(defaultTidyConfig.QueueSafetyBuffer / time.Second), + }, + } + + for _, flag := range configSafetyBufferValues { + b, s := CreateBackendWithStorage(t) + + resp, err := CBRead(b, s, "config/auto-tidy") + requireSuccessNonNilResponse(t, resp, err, "expected to be able to read auto-tidy operation for flag "+flag.Config) + require.Equal(t, resp.Data[flag.Config].(int), flag.DefaultValue, "expected initial auto-tidy config to match default value for "+flag.Config) + + resp, err = CBWrite(b, s, "config/auto-tidy", map[string]interface{}{ + "enabled": true, + "tidy_cert_store": true, + flag.Config: flag.FirstValue, + }) + requireSuccessNonNilResponse(t, resp, err, "expected to be able to set auto-tidy config option "+flag.Config) + + resp, err = CBRead(b, s, "config/auto-tidy") + requireSuccessNonNilResponse(t, resp, err, "expected to be able to read auto-tidy operation for config "+flag.Config) + require.Equal(t, resp.Data[flag.Config].(int), flag.FirstValue, "expected value to be set after reading auto-tidy config "+flag.Config) + + resp, err = CBWrite(b, s, "config/auto-tidy", map[string]interface{}{ + "enabled": true, + "tidy_cert_store": true, + flag.Config: flag.SecondValue, + }) + requireSuccessNonNilResponse(t, resp, err, "expected to be able to set auto-tidy config option "+flag.Config) + + resp, err = CBRead(b, s, "config/auto-tidy") + requireSuccessNonNilResponse(t, resp, err, "expected to be able to read auto-tidy operation for config "+flag.Config) + require.Equal(t, resp.Data[flag.Config].(int), flag.SecondValue, "expected value to be set after reading auto-tidy config "+flag.Config) + + resp, err = CBWrite(b, s, "tidy", map[string]interface{}{ + "tidy_cert_store": true, + flag.Config: flag.FirstValue, + }) + t.Logf("tidy run results: resp=%v/err=%v", resp, err) + requireSuccessNonNilResponse(t, resp, err, "expected to be able to start tidy operation with "+flag.Config) + if len(resp.Warnings) > 0 { + for _, warning := range resp.Warnings { + if strings.Contains(warning, "unrecognized parameter") && strings.Contains(warning, flag.Config) { + t.Fatalf("warning '%v' claims parameter '%v' is unknown", warning, flag.Config) + } + } + } + + time.Sleep(2 * time.Second) + + resp, err = CBRead(b, s, "tidy-status") + requireSuccessNonNilResponse(t, resp, err, "expected to be able to start tidy operation with "+flag.Config) + t.Logf("got response: %v for config: %v", resp, flag.Config) + require.Equal(t, resp.Data[flag.Config].(int), flag.FirstValue, "expected flag to be set in tidy-status for config "+flag.Config) + } +} + +func TestAutoTidy(t *testing.T) { + t.Parallel() + + // While we'd like to reduce this duration, we need to wait until + // the rollback manager timer ticks. With the new helper, we can + // modify the rollback manager timer period directly, allowing us + // to shorten the total test time significantly. + // + // We set the delta CRL time to ensure it executes prior to the + // main CRL rebuild, and the new CRL doesn't rebuild until after + // we're done. + newPeriod := 1 * time.Second + + // This test requires the periodicFunc to trigger, which requires we stand + // up a full test cluster. + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "pki": Factory, + }, + // See notes below about usage of /sys/raw for reading cluster + // storage without barrier encryption. + EnableRaw: true, + RollbackPeriod: newPeriod, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + client := cluster.Cores[0].Client + + // Mount PKI + err := client.Sys().Mount("pki", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "10m", + MaxLeaseTTL: "60m", + }, + }) + require.NoError(t, err) + + // Generate root. + resp, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ + "ttl": "40h", + "common_name": "Root X1", + "key_type": "ec", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotEmpty(t, resp.Data) + require.NotEmpty(t, resp.Data["issuer_id"]) + issuerId := resp.Data["issuer_id"] + + // Run tidy so status is not empty when we run it later... + _, err = client.Logical().Write("pki/tidy", map[string]interface{}{ + "tidy_revoked_certs": true, + }) + require.NoError(t, err) + + // Setup a testing role. + _, err = client.Logical().Write("pki/roles/local-testing", map[string]interface{}{ + "allow_any_name": true, + "enforce_hostnames": false, + "key_type": "ec", + }) + require.NoError(t, err) + + // Write the auto-tidy config. + _, err = client.Logical().Write("pki/config/auto-tidy", map[string]interface{}{ + "enabled": true, + "interval_duration": "1s", + "tidy_cert_store": true, + "tidy_revoked_certs": true, + "safety_buffer": "1s", + }) + require.NoError(t, err) + + // Issue a cert and revoke it. + resp, err = client.Logical().Write("pki/issue/local-testing", map[string]interface{}{ + "common_name": "example.com", + "ttl": "10s", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["serial_number"]) + require.NotEmpty(t, resp.Data["certificate"]) + leafSerial := resp.Data["serial_number"].(string) + leafCert := parseCert(t, resp.Data["certificate"].(string)) + + // Read cert before revoking + resp, err = client.Logical().Read("pki/cert/" + leafSerial) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["certificate"]) + revocationTime, err := (resp.Data["revocation_time"].(json.Number)).Int64() + require.Equal(t, int64(0), revocationTime, "revocation time was not zero") + require.Empty(t, resp.Data["revocation_time_rfc3339"], "revocation_time_rfc3339 was not empty") + require.Empty(t, resp.Data["issuer_id"], "issuer_id was not empty") + + _, err = client.Logical().Write("pki/revoke", map[string]interface{}{ + "serial_number": leafSerial, + }) + require.NoError(t, err) + + // Cert should still exist. + resp, err = client.Logical().Read("pki/cert/" + leafSerial) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["certificate"]) + revocationTime, err = (resp.Data["revocation_time"].(json.Number)).Int64() + require.NoError(t, err, "failed converting %s to int", resp.Data["revocation_time"]) + revTime := time.Unix(revocationTime, 0) + now := time.Now() + if !(now.After(revTime) && now.Add(-10*time.Minute).Before(revTime)) { + t.Fatalf("parsed revocation time not within the last 10 minutes current time: %s, revocation time: %s", now, revTime) + } + utcLoc, err := time.LoadLocation("UTC") + require.NoError(t, err, "failed to parse UTC location?") + + rfc3339RevocationTime, err := time.Parse(time.RFC3339Nano, resp.Data["revocation_time_rfc3339"].(string)) + require.NoError(t, err, "failed parsing revocation_time_rfc3339 field: %s", resp.Data["revocation_time_rfc3339"]) + + require.Equal(t, revTime.In(utcLoc), rfc3339RevocationTime.Truncate(time.Second), + "revocation times did not match revocation_time: %s, "+"rfc3339 time: %s", revTime, rfc3339RevocationTime) + require.Equal(t, issuerId, resp.Data["issuer_id"], "issuer_id on leaf cert did not match") + + // Wait for cert to expire and the safety buffer to elapse. + time.Sleep(time.Until(leafCert.NotAfter) + 3*time.Second) + + // Wait for auto-tidy to run afterwards. + var foundTidyRunning string + var foundTidyFinished bool + timeoutChan := time.After(120 * time.Second) + for { + if foundTidyRunning != "" && foundTidyFinished { + break + } + + select { + case <-timeoutChan: + t.Fatalf("expected auto-tidy to run (%v) and finish (%v) before 120 seconds elapsed", foundTidyRunning, foundTidyFinished) + default: + time.Sleep(250 * time.Millisecond) + + resp, err = client.Logical().Read("pki/tidy-status") + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["state"]) + require.NotEmpty(t, resp.Data["time_started"]) + state := resp.Data["state"].(string) + started := resp.Data["time_started"].(string) + t.Logf("Resp: %v", resp.Data) + + // We want the _next_ tidy run after the cert expires. This + // means if we're currently finished when we hit this the + // first time, we want to wait for the next run. + if foundTidyRunning == "" { + foundTidyRunning = started + } else if foundTidyRunning != started && !foundTidyFinished && state == "Finished" { + foundTidyFinished = true + } + } + } + + // Cert should no longer exist. + resp, err = client.Logical().Read("pki/cert/" + leafSerial) + require.Nil(t, err) + require.Nil(t, resp) +} + +func TestTidyCancellation(t *testing.T) { + t.Parallel() + + numLeaves := 100 + + b, s := CreateBackendWithStorage(t) + + // Create a root, a role, and a bunch of leaves. + _, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "root example.com", + "issuer_name": "root", + "ttl": "20m", + "key_type": "ec", + }) + require.NoError(t, err) + _, err = CBWrite(b, s, "roles/local-testing", map[string]interface{}{ + "allow_any_name": true, + "enforce_hostnames": false, + "key_type": "ec", + }) + require.NoError(t, err) + for i := 0; i < numLeaves; i++ { + _, err = CBWrite(b, s, "issue/local-testing", map[string]interface{}{ + "common_name": "testing", + "ttl": "1s", + }) + require.NoError(t, err) + } + + // Kick off a tidy operation (which runs in the background), but with + // a slow-ish pause between certificates. + resp, err := CBWrite(b, s, "tidy", map[string]interface{}{ + "tidy_cert_store": true, + "safety_buffer": "1s", + "pause_duration": "1s", + }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("tidy"), logical.UpdateOperation), resp, true) + + // If we wait six seconds, the operation should still be running. That's + // how we check that pause_duration works. + time.Sleep(3 * time.Second) + + resp, err = CBRead(b, s, "tidy-status") + + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.Equal(t, resp.Data["state"], "Running") + + // If we now cancel the operation, the response should say Cancelling. + cancelResp, err := CBWrite(b, s, "tidy-cancel", map[string]interface{}{}) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("tidy-cancel"), logical.UpdateOperation), resp, true) + require.NoError(t, err) + require.NotNil(t, cancelResp) + require.NotNil(t, cancelResp.Data) + state := cancelResp.Data["state"].(string) + howMany := cancelResp.Data["cert_store_deleted_count"].(uint) + + if state == "Cancelled" { + // Rest of the test can't run; log and exit. + t.Log("Went to cancel the operation but response was already cancelled") + return + } + + require.Equal(t, state, "Cancelling") + + // Wait a little longer, and ensure we only processed at most 2 more certs + // after the cancellation respon. + time.Sleep(3 * time.Second) + + statusResp, err := CBRead(b, s, "tidy-status") + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("tidy-status"), logical.ReadOperation), resp, true) + require.NoError(t, err) + require.NotNil(t, statusResp) + require.NotNil(t, statusResp.Data) + require.Equal(t, statusResp.Data["state"], "Cancelled") + nowMany := statusResp.Data["cert_store_deleted_count"].(uint) + if howMany+3 <= nowMany { + t.Fatalf("expected to only process at most 3 more certificates, but processed (%v >>> %v) certs", nowMany, howMany) + } +} + +func TestTidyIssuers(t *testing.T) { + t.Parallel() + + b, s := CreateBackendWithStorage(t) + + // Create a root that expires quickly and one valid for longer. + _, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "root1 example.com", + "issuer_name": "root-expired", + "ttl": "1s", + "key_type": "ec", + }) + require.NoError(t, err) + + _, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "root2 example.com", + "issuer_name": "root-valid", + "ttl": "60m", + "key_type": "rsa", + }) + require.NoError(t, err) + + // Sleep long enough to expire the root. + time.Sleep(2 * time.Second) + + // First tidy run shouldn't remove anything; too long of safety buffer. + _, err = CBWrite(b, s, "tidy", map[string]interface{}{ + "tidy_expired_issuers": true, + "issuer_safety_buffer": "60m", + }) + require.NoError(t, err) + + // Wait for tidy to finish. + time.Sleep(2 * time.Second) + + // Expired issuer should exist. + resp, err := CBRead(b, s, "issuer/root-expired") + requireSuccessNonNilResponse(t, resp, err, "expired should still be present") + resp, err = CBRead(b, s, "issuer/root-valid") + requireSuccessNonNilResponse(t, resp, err, "valid should still be present") + + // Second tidy run with shorter safety buffer shouldn't remove the + // expired one, as it should be the default issuer. + _, err = CBWrite(b, s, "tidy", map[string]interface{}{ + "tidy_expired_issuers": true, + "issuer_safety_buffer": "1s", + }) + require.NoError(t, err) + + // Wait for tidy to finish. + time.Sleep(2 * time.Second) + + // Expired issuer should still exist. + resp, err = CBRead(b, s, "issuer/root-expired") + requireSuccessNonNilResponse(t, resp, err, "expired should still be present") + resp, err = CBRead(b, s, "issuer/root-valid") + requireSuccessNonNilResponse(t, resp, err, "valid should still be present") + + // Update the default issuer. + _, err = CBWrite(b, s, "config/issuers", map[string]interface{}{ + "default": "root-valid", + }) + require.NoError(t, err) + + // Third tidy run should remove the expired one. + _, err = CBWrite(b, s, "tidy", map[string]interface{}{ + "tidy_expired_issuers": true, + "issuer_safety_buffer": "1s", + }) + require.NoError(t, err) + + // Wait for tidy to finish. + time.Sleep(2 * time.Second) + + // Valid issuer should exist still; other should be removed. + resp, err = CBRead(b, s, "issuer/root-expired") + require.Error(t, err) + require.Nil(t, resp) + resp, err = CBRead(b, s, "issuer/root-valid") + requireSuccessNonNilResponse(t, resp, err, "valid should still be present") + + // Finally, one more tidy should cause no changes. + _, err = CBWrite(b, s, "tidy", map[string]interface{}{ + "tidy_expired_issuers": true, + "issuer_safety_buffer": "1s", + }) + require.NoError(t, err) + + // Wait for tidy to finish. + time.Sleep(2 * time.Second) + + // Valid issuer should exist still; other should be removed. + resp, err = CBRead(b, s, "issuer/root-expired") + require.Error(t, err) + require.Nil(t, resp) + resp, err = CBRead(b, s, "issuer/root-valid") + requireSuccessNonNilResponse(t, resp, err, "valid should still be present") + + // Ensure we have safety buffer and expired issuers set correctly. + statusResp, err := CBRead(b, s, "tidy-status") + require.NoError(t, err) + require.NotNil(t, statusResp) + require.NotNil(t, statusResp.Data) + require.Equal(t, statusResp.Data["issuer_safety_buffer"], 1) + require.Equal(t, statusResp.Data["tidy_expired_issuers"], true) +} + +func TestTidyIssuerConfig(t *testing.T) { + t.Parallel() + + b, s := CreateBackendWithStorage(t) + + // Ensure the default auto-tidy config matches expectations + resp, err := CBRead(b, s, "config/auto-tidy") + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/auto-tidy"), logical.ReadOperation), resp, true) + requireSuccessNonNilResponse(t, resp, err) + + jsonBlob, err := json.Marshal(&defaultTidyConfig) + require.NoError(t, err) + var defaultConfigMap map[string]interface{} + err = json.Unmarshal(jsonBlob, &defaultConfigMap) + require.NoError(t, err) + + // Coerce defaults to API response types. + defaultConfigMap["interval_duration"] = int(time.Duration(defaultConfigMap["interval_duration"].(float64)) / time.Second) + defaultConfigMap["issuer_safety_buffer"] = int(time.Duration(defaultConfigMap["issuer_safety_buffer"].(float64)) / time.Second) + defaultConfigMap["safety_buffer"] = int(time.Duration(defaultConfigMap["safety_buffer"].(float64)) / time.Second) + defaultConfigMap["pause_duration"] = time.Duration(defaultConfigMap["pause_duration"].(float64)).String() + defaultConfigMap["revocation_queue_safety_buffer"] = int(time.Duration(defaultConfigMap["revocation_queue_safety_buffer"].(float64)) / time.Second) + defaultConfigMap["acme_account_safety_buffer"] = int(time.Duration(defaultConfigMap["acme_account_safety_buffer"].(float64)) / time.Second) + + require.Equal(t, defaultConfigMap, resp.Data) + + // Ensure setting issuer-tidy related fields stick. + resp, err = CBWrite(b, s, "config/auto-tidy", map[string]interface{}{ + "tidy_expired_issuers": true, + "issuer_safety_buffer": "5s", + }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/auto-tidy"), logical.UpdateOperation), resp, true) + + requireSuccessNonNilResponse(t, resp, err) + require.Equal(t, true, resp.Data["tidy_expired_issuers"]) + require.Equal(t, 5, resp.Data["issuer_safety_buffer"]) +} + +// TestCertStorageMetrics ensures that when enabled, metrics are able to count the number of certificates in storage and +// number of revoked certificates in storage. Moreover, this test ensures that the gauge is emitted periodically, so +// that the metric does not disappear or go stale. +func TestCertStorageMetrics(t *testing.T) { + // This tests uses the same setup as TestAutoTidy + newPeriod := 1 * time.Second + + // We set up a metrics accumulator + inmemSink := metrics.NewInmemSink( + 2*newPeriod, // A short time period is ideal here to test metrics are emitted every periodic func + 10*newPeriod) // Do not keep a huge amount of metrics in the sink forever, clear them out to save memory usage. + + metricsConf := metrics.DefaultConfig("") + metricsConf.EnableHostname = false + metricsConf.EnableHostnameLabel = false + metricsConf.EnableServiceLabel = false + metricsConf.EnableTypePrefix = false + + _, err := metrics.NewGlobal(metricsConf, inmemSink) + if err != nil { + t.Fatal(err) + } + + // This test requires the periodicFunc to trigger, which requires we stand + // up a full test cluster. + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "pki": Factory, + }, + // See notes below about usage of /sys/raw for reading cluster + // storage without barrier encryption. + EnableRaw: true, + RollbackPeriod: newPeriod, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + NumCores: 1, + }) + cluster.Start() + defer cluster.Cleanup() + client := cluster.Cores[0].Client + + // Mount PKI + err = client.Sys().Mount("pki", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "10m", + MaxLeaseTTL: "60m", + }, + }) + require.NoError(t, err) + + // Generate root. + resp, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ + "ttl": "40h", + "common_name": "Root X1", + "key_type": "ec", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotEmpty(t, resp.Data) + require.NotEmpty(t, resp.Data["issuer_id"]) + + // Set up a testing role. + _, err = client.Logical().Write("pki/roles/local-testing", map[string]interface{}{ + "allow_any_name": true, + "enforce_hostnames": false, + "key_type": "ec", + }) + require.NoError(t, err) + + // Run tidy so that tidy-status is not empty + _, err = client.Logical().Write("pki/tidy", map[string]interface{}{ + "tidy_revoked_certs": true, + }) + require.NoError(t, err) + + // Since certificate counts are off by default, we shouldn't see counts in the tidy status + tidyStatus, err := client.Logical().Read("pki/tidy-status") + if err != nil { + t.Fatal(err) + } + // backendUUID should exist, we need this for metrics + backendUUID := tidyStatus.Data["internal_backend_uuid"].(string) + // "current_cert_store_count", "current_revoked_cert_count" + countData, ok := tidyStatus.Data["current_cert_store_count"] + if ok && countData != nil { + t.Fatalf("Certificate counting should be off by default, but current cert store count %v appeared in tidy status in unconfigured mount", countData) + } + revokedCountData, ok := tidyStatus.Data["current_revoked_cert_count"] + if ok && revokedCountData != nil { + t.Fatalf("Certificate counting should be off by default, but revoked cert count %v appeared in tidy status in unconfigured mount", revokedCountData) + } + + // Since certificate counts are off by default, those metrics should not exist yet + stableMetric := inmemSink.Data() + mostRecentInterval := stableMetric[len(stableMetric)-1] + _, ok = mostRecentInterval.Gauges["secrets.pki."+backendUUID+".total_revoked_certificates_stored"] + if ok { + t.Fatalf("Certificate counting should be off by default, but revoked cert count was emitted as a metric in an unconfigured mount") + } + _, ok = mostRecentInterval.Gauges["secrets.pki."+backendUUID+".total_certificates_stored"] + if ok { + t.Fatalf("Certificate counting should be off by default, but total certificate count was emitted as a metric in an unconfigured mount") + } + + // Write the auto-tidy config. + _, err = client.Logical().Write("pki/config/auto-tidy", map[string]interface{}{ + "enabled": true, + "interval_duration": "1s", + "tidy_cert_store": true, + "tidy_revoked_certs": true, + "safety_buffer": "1s", + "maintain_stored_certificate_counts": true, + "publish_stored_certificate_count_metrics": false, + }) + require.NoError(t, err) + + // Reload the Mount - Otherwise Stored Certificate Counts Will Not Be Populated + // Sealing cores as plugin reload triggers the race detector - VAULT-13635 + testhelpers.EnsureCoresSealed(t, cluster) + testhelpers.EnsureCoresUnsealed(t, cluster) + + // Wait until a tidy run has completed. + testhelpers.RetryUntil(t, 5*time.Second, func() error { + resp, err = client.Logical().Read("pki/tidy-status") + if err != nil { + return fmt.Errorf("error reading tidy status: %w", err) + } + if finished, ok := resp.Data["time_finished"]; !ok || finished == "" || finished == nil { + return fmt.Errorf("tidy time_finished not run yet: %v", finished) + } + return nil + }) + + // Since publish_stored_certificate_count_metrics is still false, these metrics should still not exist yet + stableMetric = inmemSink.Data() + mostRecentInterval = stableMetric[len(stableMetric)-1] + _, ok = mostRecentInterval.Gauges["secrets.pki."+backendUUID+".total_revoked_certificates_stored"] + if ok { + t.Fatalf("Certificate counting should be off by default, but revoked cert count was emitted as a metric in an unconfigured mount") + } + _, ok = mostRecentInterval.Gauges["secrets.pki."+backendUUID+".total_certificates_stored"] + if ok { + t.Fatalf("Certificate counting should be off by default, but total certificate count was emitted as a metric in an unconfigured mount") + } + + // But since certificate counting is on, the metrics should exist on tidyStatus endpoint: + tidyStatus, err = client.Logical().Read("pki/tidy-status") + require.NoError(t, err, "failed reading tidy-status endpoint") + + // backendUUID should exist, we need this for metrics + backendUUID = tidyStatus.Data["internal_backend_uuid"].(string) + // "current_cert_store_count", "current_revoked_cert_count" + certStoreCount, ok := tidyStatus.Data["current_cert_store_count"] + if !ok { + t.Fatalf("Certificate counting has been turned on, but current cert store count does not appear in tidy status") + } + if certStoreCount != json.Number("1") { + t.Fatalf("Only created one certificate, but a got a certificate count of %v", certStoreCount) + } + revokedCertCount, ok := tidyStatus.Data["current_revoked_cert_count"] + if !ok { + t.Fatalf("Certificate counting has been turned on, but revoked cert store count does not appear in tidy status") + } + if revokedCertCount != json.Number("0") { + t.Fatalf("Have not yet revoked a certificate, but got a revoked cert store count of %v", revokedCertCount) + } + + // Write the auto-tidy config, again, this time turning on metrics + _, err = client.Logical().Write("pki/config/auto-tidy", map[string]interface{}{ + "enabled": true, + "interval_duration": "1s", + "tidy_cert_store": true, + "tidy_revoked_certs": true, + "safety_buffer": "1s", + "maintain_stored_certificate_counts": true, + "publish_stored_certificate_count_metrics": true, + }) + require.NoError(t, err, "failed updating auto-tidy configuration") + + // Issue a cert and revoke it. + resp, err = client.Logical().Write("pki/issue/local-testing", map[string]interface{}{ + "common_name": "example.com", + "ttl": "10s", + }) + require.NoError(t, err, "failed to issue leaf certificate") + require.NotNil(t, resp, "nil response without error on issuing leaf certificate") + require.NotNil(t, resp.Data, "empty Data without error on issuing leaf certificate") + require.NotEmpty(t, resp.Data["serial_number"]) + require.NotEmpty(t, resp.Data["certificate"]) + leafSerial := resp.Data["serial_number"].(string) + leafCert := parseCert(t, resp.Data["certificate"].(string)) + + // Read cert before revoking + resp, err = client.Logical().Read("pki/cert/" + leafSerial) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["certificate"]) + revocationTime, err := (resp.Data["revocation_time"].(json.Number)).Int64() + require.Equal(t, int64(0), revocationTime, "revocation time was not zero") + require.Empty(t, resp.Data["revocation_time_rfc3339"], "revocation_time_rfc3339 was not empty") + require.Empty(t, resp.Data["issuer_id"], "issuer_id was not empty") + + revokeResp, err := client.Logical().Write("pki/revoke", map[string]interface{}{ + "serial_number": leafSerial, + }) + require.NoError(t, err, "failed revoking serial number: %s", leafSerial) + + for _, warning := range revokeResp.Warnings { + if strings.Contains(warning, "already expired; refusing to add to CRL") { + t.Skipf("Skipping test as we missed the revocation window of our leaf cert") + } + } + + // We read the auto-tidy endpoint again, to ensure any metrics logic has completed (lock on config) + _, err = client.Logical().Read("/pki/config/auto-tidy") + require.NoError(t, err, "failed to read auto-tidy configuration") + + // Check Metrics After Cert Has Be Created and Revoked + tidyStatus, err = client.Logical().Read("pki/tidy-status") + require.NoError(t, err, "failed to read tidy-status") + + backendUUID = tidyStatus.Data["internal_backend_uuid"].(string) + certStoreCount, ok = tidyStatus.Data["current_cert_store_count"] + if !ok { + t.Fatalf("Certificate counting has been turned on, but current cert store count does not appear in tidy status") + } + if certStoreCount != json.Number("2") { + t.Fatalf("Created root and leaf certificate, but a got a certificate count of %v", certStoreCount) + } + revokedCertCount, ok = tidyStatus.Data["current_revoked_cert_count"] + if !ok { + t.Fatalf("Certificate counting has been turned on, but revoked cert store count does not appear in tidy status") + } + if revokedCertCount != json.Number("1") { + t.Fatalf("Revoked one certificate, but got a revoked cert store count of %v\n:%v", revokedCertCount, tidyStatus) + } + // This should now be initialized + certCountError, ok := tidyStatus.Data["certificate_counting_error"] + if ok && certCountError.(string) != "" { + t.Fatalf("Expected certificate count error to disappear after initialization, but got error %v", certCountError) + } + + testhelpers.RetryUntil(t, newPeriod*5, func() error { + stableMetric = inmemSink.Data() + mostRecentInterval = stableMetric[len(stableMetric)-1] + revokedCertCountGaugeValue, ok := mostRecentInterval.Gauges["secrets.pki."+backendUUID+".total_revoked_certificates_stored"] + if !ok { + return errors.New("turned on metrics, but revoked cert count was not emitted") + } + if revokedCertCountGaugeValue.Value != 1 { + return fmt.Errorf("revoked one certificate, but metrics emitted a revoked cert store count of %v", revokedCertCountGaugeValue) + } + certStoreCountGaugeValue, ok := mostRecentInterval.Gauges["secrets.pki."+backendUUID+".total_certificates_stored"] + if !ok { + return errors.New("turned on metrics, but total certificate count was not emitted") + } + if certStoreCountGaugeValue.Value != 2 { + return fmt.Errorf("stored two certificiates, but total certificate count emitted was %v", certStoreCountGaugeValue.Value) + } + return nil + }) + + // Wait for cert to expire and the safety buffer to elapse. + sleepFor := time.Until(leafCert.NotAfter) + 3*time.Second + t.Logf("%v: Sleeping for %v, leaf certificate expires: %v", time.Now().Format(time.RFC3339), sleepFor, leafCert.NotAfter) + time.Sleep(sleepFor) + + // Wait for auto-tidy to run afterwards. + var foundTidyRunning string + var foundTidyFinished bool + timeoutChan := time.After(120 * time.Second) + for { + if foundTidyRunning != "" && foundTidyFinished { + break + } + + select { + case <-timeoutChan: + t.Fatalf("expected auto-tidy to run (%v) and finish (%v) before 120 seconds elapsed", foundTidyRunning, foundTidyFinished) + default: + time.Sleep(250 * time.Millisecond) + + resp, err = client.Logical().Read("pki/tidy-status") + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["state"]) + require.NotEmpty(t, resp.Data["time_started"]) + state := resp.Data["state"].(string) + started := resp.Data["time_started"].(string) + + t.Logf("%v: Resp: %v", time.Now().Format(time.RFC3339), resp.Data) + + // We want the _next_ tidy run after the cert expires. This + // means if we're currently finished when we hit this the + // first time, we want to wait for the next run. + if foundTidyRunning == "" { + foundTidyRunning = started + } else if foundTidyRunning != started && !foundTidyFinished && state == "Finished" { + foundTidyFinished = true + } + } + } + + // After Tidy, Cert Store Count Should Still Be Available, and Be Updated: + // Check Metrics After Cert Has Be Created and Revoked + tidyStatus, err = client.Logical().Read("pki/tidy-status") + if err != nil { + t.Fatal(err) + } + backendUUID = tidyStatus.Data["internal_backend_uuid"].(string) + // "current_cert_store_count", "current_revoked_cert_count" + certStoreCount, ok = tidyStatus.Data["current_cert_store_count"] + if !ok { + t.Fatalf("Certificate counting has been turned on, but current cert store count does not appear in tidy status") + } + if certStoreCount != json.Number("1") { + t.Fatalf("Created root and leaf certificate, deleted leaf, but a got a certificate count of %v", certStoreCount) + } + revokedCertCount, ok = tidyStatus.Data["current_revoked_cert_count"] + if !ok { + t.Fatalf("Certificate counting has been turned on, but revoked cert store count does not appear in tidy status") + } + if revokedCertCount != json.Number("0") { + t.Fatalf("Revoked certificate has been tidied, but got a revoked cert store count of %v", revokedCertCount) + } + + testhelpers.RetryUntil(t, newPeriod*5, func() error { + stableMetric = inmemSink.Data() + mostRecentInterval = stableMetric[len(stableMetric)-1] + revokedCertCountGaugeValue, ok := mostRecentInterval.Gauges["secrets.pki."+backendUUID+".total_revoked_certificates_stored"] + if !ok { + return errors.New("turned on metrics, but revoked cert count was not emitted") + } + if revokedCertCountGaugeValue.Value != 0 { + return fmt.Errorf("revoked certificate has been tidied, but metrics emitted a revoked cert store count of %v", revokedCertCountGaugeValue) + } + certStoreCountGaugeValue, ok := mostRecentInterval.Gauges["secrets.pki."+backendUUID+".total_certificates_stored"] + if !ok { + return errors.New("turned on metrics, but total certificate count was not emitted") + } + if certStoreCountGaugeValue.Value != 1 { + return fmt.Errorf("only one of two certificates left after tidy, but total certificate count emitted was %v", certStoreCountGaugeValue.Value) + } + return nil + }) +} + +// This test uses the default safety buffer with backdating. +func TestTidyAcmeWithBackdate(t *testing.T) { + t.Parallel() + + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + testCtx := context.Background() + + // Grab the mount UUID for sys/raw invocations. + pkiMount := findStorageMountUuid(t, client, "pki") + + // Register an Account, do nothing with it + baseAcmeURL := "/v1/pki/acme/" + accountKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa key") + + acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) + + // Create new account with order/cert + t.Logf("Testing register on %s", baseAcmeURL) + acct, err := acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true }) + t.Logf("got account URI: %v", acct.URI) + require.NoError(t, err, "failed registering account") + identifiers := []string{"*.localdomain"} + order, err := acmeClient.AuthorizeOrder(testCtx, []acme.AuthzID{ + {Type: "dns", Value: identifiers[0]}, + }) + require.NoError(t, err, "failed creating order") + + // HACK: Update authorization/challenge to completed as we can't really do it properly in this workflow test. + markAuthorizationSuccess(t, client, acmeClient, acct, order) + + goodCr := &x509.CertificateRequest{DNSNames: []string{identifiers[0]}} + csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generated key for CSR") + csr, err := x509.CreateCertificateRequest(rand.Reader, goodCr, csrKey) + require.NoError(t, err, "failed generating csr") + certs, _, err := acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, true) + require.NoError(t, err, "order finalization failed") + require.GreaterOrEqual(t, len(certs), 1, "expected at least one cert in bundle") + + acmeCert, err := x509.ParseCertificate(certs[0]) + require.NoError(t, err, "failed parsing acme cert") + + // -> Ensure we see it in storage. Since we don't have direct storage + // access, use sys/raw interface. + acmeThumbprintsPath := path.Join("sys/raw/logical", pkiMount, acmeThumbprintPrefix) + listResp, err := client.Logical().ListWithContext(testCtx, acmeThumbprintsPath) + require.NoError(t, err, "failed listing ACME thumbprints") + require.NotEmpty(t, listResp.Data["keys"], "expected non-empty list response") + + // Run Tidy + _, err = client.Logical().Write("pki/tidy", map[string]interface{}{ + "tidy_acme": true, + }) + require.NoError(t, err) + + // Wait for tidy to finish. + waitForTidyToFinish(t, client, "pki") + + // Check that the Account is Still There, Still Valid. + account, err := acmeClient.GetReg(context.Background(), "" /* legacy unused param*/) + require.NoError(t, err, "received account looking up acme account") + require.Equal(t, acme.StatusValid, account.Status) + + // Find the associated thumbprint + listResp, err = client.Logical().ListWithContext(testCtx, acmeThumbprintsPath) + require.NoError(t, err) + require.NotNil(t, listResp) + thumbprintEntries := listResp.Data["keys"].([]interface{}) + require.Equal(t, len(thumbprintEntries), 1) + thumbprint := thumbprintEntries[0].(string) + + // Let "Time Pass"; this is a HACK, this function sys-writes to overwrite the date on objects in storage + duration := time.Until(acmeCert.NotAfter) + 31*24*time.Hour + accountId := acmeClient.KID[strings.LastIndex(string(acmeClient.KID), "/")+1:] + orderId := order.URI[strings.LastIndex(order.URI, "/")+1:] + backDateAcmeOrderSys(t, testCtx, client, string(accountId), orderId, duration, pkiMount) + + // Run Tidy -> clean up order + _, err = client.Logical().Write("pki/tidy", map[string]interface{}{ + "tidy_acme": true, + }) + require.NoError(t, err) + + // Wait for tidy to finish. + tidyResp := waitForTidyToFinish(t, client, "pki") + + require.Equal(t, tidyResp.Data["acme_orders_deleted_count"], json.Number("1"), + "expected to revoke a single ACME order: %v", tidyResp) + require.Equal(t, tidyResp.Data["acme_account_revoked_count"], json.Number("0"), + "no ACME account should have been revoked: %v", tidyResp) + require.Equal(t, tidyResp.Data["acme_account_deleted_count"], json.Number("0"), + "no ACME account should have been revoked: %v", tidyResp) + + // Make sure our order is indeed deleted. + _, err = acmeClient.GetOrder(context.Background(), order.URI) + require.ErrorContains(t, err, "order does not exist") + + // Check that the Account is Still There, Still Valid. + account, err = acmeClient.GetReg(context.Background(), "" /* legacy unused param*/) + require.NoError(t, err, "received account looking up acme account") + require.Equal(t, acme.StatusValid, account.Status) + + // Now back date the account to make sure we revoke it + backDateAcmeAccountSys(t, testCtx, client, thumbprint, duration, pkiMount) + + // Run Tidy -> mark account revoked + _, err = client.Logical().Write("pki/tidy", map[string]interface{}{ + "tidy_acme": true, + }) + require.NoError(t, err) + + // Wait for tidy to finish. + tidyResp = waitForTidyToFinish(t, client, "pki") + require.Equal(t, tidyResp.Data["acme_orders_deleted_count"], json.Number("0"), + "no ACME orders should have been deleted: %v", tidyResp) + require.Equal(t, tidyResp.Data["acme_account_revoked_count"], json.Number("1"), + "expected to revoke a single ACME account: %v", tidyResp) + require.Equal(t, tidyResp.Data["acme_account_deleted_count"], json.Number("0"), + "no ACME account should have been revoked: %v", tidyResp) + + // Lookup our account to make sure we get the appropriate revoked status + account, err = acmeClient.GetReg(context.Background(), "" /* legacy unused param*/) + require.NoError(t, err, "received account looking up acme account") + require.Equal(t, acme.StatusRevoked, account.Status) + + // Let "Time Pass"; this is a HACK, this function sys-writes to overwrite the date on objects in storage + backDateAcmeAccountSys(t, testCtx, client, thumbprint, duration, pkiMount) + + // Run Tidy -> remove account + _, err = client.Logical().Write("pki/tidy", map[string]interface{}{ + "tidy_acme": true, + }) + require.NoError(t, err) + + // Wait for tidy to finish. + waitForTidyToFinish(t, client, "pki") + + // Check Account No Longer Appears + listResp, err = client.Logical().ListWithContext(testCtx, acmeThumbprintsPath) + require.NoError(t, err) + if listResp != nil { + thumbprintEntries = listResp.Data["keys"].([]interface{}) + require.Equal(t, 0, len(thumbprintEntries)) + } + + // Nor Under Account + _, acctKID := path.Split(acct.URI) + acctPath := path.Join("sys/raw/logical", pkiMount, acmeAccountPrefix, acctKID) + t.Logf("account path: %v", acctPath) + getResp, err := client.Logical().ReadWithContext(testCtx, acctPath) + require.NoError(t, err) + require.Nil(t, getResp) +} + +// This test uses a smaller safety buffer. +func TestTidyAcmeWithSafetyBuffer(t *testing.T) { + t.Parallel() + + // This would still be way easier if I could do both sides + cluster, client, _ := setupAcmeBackend(t) + defer cluster.Cleanup() + testCtx := context.Background() + + // Grab the mount UUID for sys/raw invocations. + pkiMount := findStorageMountUuid(t, client, "pki") + + // Register an Account, do nothing with it + baseAcmeURL := "/v1/pki/acme/" + accountKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa key") + + acmeClient := getAcmeClientForCluster(t, cluster, baseAcmeURL, accountKey) + + // Create new account + t.Logf("Testing register on %s", baseAcmeURL) + acct, err := acmeClient.Register(testCtx, &acme.Account{}, func(tosURL string) bool { return true }) + t.Logf("got account URI: %v", acct.URI) + require.NoError(t, err, "failed registering account") + + // -> Ensure we see it in storage. Since we don't have direct storage + // access, use sys/raw interface. + acmeThumbprintsPath := path.Join("sys/raw/logical", pkiMount, acmeThumbprintPrefix) + listResp, err := client.Logical().ListWithContext(testCtx, acmeThumbprintsPath) + require.NoError(t, err, "failed listing ACME thumbprints") + require.NotEmpty(t, listResp.Data["keys"], "expected non-empty list response") + thumbprintEntries := listResp.Data["keys"].([]interface{}) + require.Equal(t, len(thumbprintEntries), 1) + + // Wait for the account to expire. + time.Sleep(2 * time.Second) + + // Run Tidy -> mark account revoked + _, err = client.Logical().Write("pki/tidy", map[string]interface{}{ + "tidy_acme": true, + "acme_account_safety_buffer": "1s", + }) + require.NoError(t, err) + + // Wait for tidy to finish. + statusResp := waitForTidyToFinish(t, client, "pki") + require.Equal(t, statusResp.Data["acme_account_revoked_count"], json.Number("1"), "expected to revoke a single ACME account") + + // Wait for the account to expire. + time.Sleep(2 * time.Second) + + // Run Tidy -> remove account + _, err = client.Logical().Write("pki/tidy", map[string]interface{}{ + "tidy_acme": true, + "acme_account_safety_buffer": "1s", + }) + require.NoError(t, err) + + // Wait for tidy to finish. + waitForTidyToFinish(t, client, "pki") + + // Check Account No Longer Appears + listResp, err = client.Logical().ListWithContext(testCtx, acmeThumbprintsPath) + require.NoError(t, err) + if listResp != nil { + thumbprintEntries = listResp.Data["keys"].([]interface{}) + require.Equal(t, 0, len(thumbprintEntries)) + } + + // Nor Under Account + _, acctKID := path.Split(acct.URI) + acctPath := path.Join("sys/raw/logical", pkiMount, acmeAccountPrefix, acctKID) + t.Logf("account path: %v", acctPath) + getResp, err := client.Logical().ReadWithContext(testCtx, acctPath) + require.NoError(t, err) + require.Nil(t, getResp) +} + +// The sys tests refer to all of the tests using sys/raw/logical which work off of a client +func backDateAcmeAccountSys(t *testing.T, testContext context.Context, client *api.Client, thumbprintString string, backdateAmount time.Duration, mount string) { + rawThumbprintPath := path.Join("sys/raw/logical/", mount, acmeThumbprintPrefix+thumbprintString) + thumbprintResp, err := client.Logical().ReadWithContext(testContext, rawThumbprintPath) + if err != nil { + t.Fatalf("unable to fetch thumbprint response at %v: %v", rawThumbprintPath, err) + } + + var thumbprint acmeThumbprint + err = jsonutil.DecodeJSON([]byte(thumbprintResp.Data["value"].(string)), &thumbprint) + if err != nil { + t.Fatalf("unable to decode thumbprint response %v to find account entry: %v", thumbprintResp.Data, err) + } + + accountPath := path.Join("sys/raw/logical", mount, acmeAccountPrefix+thumbprint.Kid) + accountResp, err := client.Logical().ReadWithContext(testContext, accountPath) + if err != nil { + t.Fatalf("unable to fetch account entry %v: %v", thumbprint.Kid, err) + } + + var account acmeAccount + err = jsonutil.DecodeJSON([]byte(accountResp.Data["value"].(string)), &account) + if err != nil { + t.Fatalf("unable to decode acme account %v: %v", accountResp, err) + } + + t.Logf("got account before update: %v", account) + + account.AccountCreatedDate = backDate(account.AccountCreatedDate, backdateAmount) + account.MaxCertExpiry = backDate(account.MaxCertExpiry, backdateAmount) + account.AccountRevokedDate = backDate(account.AccountRevokedDate, backdateAmount) + + t.Logf("got account after update: %v", account) + + encodeJSON, err := jsonutil.EncodeJSON(account) + _, err = client.Logical().WriteWithContext(context.Background(), accountPath, map[string]interface{}{ + "value": base64.StdEncoding.EncodeToString(encodeJSON), + "encoding": "base64", + }) + if err != nil { + t.Fatalf("error saving backdated account entry at %v: %v", accountPath, err) + } + + ordersPath := path.Join("sys/raw/logical", mount, acmeAccountPrefix, thumbprint.Kid, "/orders/") + ordersRaw, err := client.Logical().ListWithContext(context.Background(), ordersPath) + require.NoError(t, err, "failed listing orders") + + if ordersRaw == nil { + t.Logf("skipping backdating orders as there are none") + return + } + + require.NotNil(t, ordersRaw, "got no response data") + require.NotNil(t, ordersRaw.Data, "got no response data") + + orders := ordersRaw.Data + + for _, orderId := range orders["keys"].([]interface{}) { + backDateAcmeOrderSys(t, testContext, client, thumbprint.Kid, orderId.(string), backdateAmount, mount) + } + + // No need to change certificates entries here - no time is stored on AcmeCertEntry +} + +func backDateAcmeOrderSys(t *testing.T, testContext context.Context, client *api.Client, accountKid string, orderId string, backdateAmount time.Duration, mount string) { + rawOrderPath := path.Join("sys/raw/logical/", mount, acmeAccountPrefix, accountKid, "orders", orderId) + orderResp, err := client.Logical().ReadWithContext(testContext, rawOrderPath) + if err != nil { + t.Fatalf("unable to fetch order entry %v on account %v at %v", orderId, accountKid, rawOrderPath) + } + + var order *acmeOrder + err = jsonutil.DecodeJSON([]byte(orderResp.Data["value"].(string)), &order) + if err != nil { + t.Fatalf("error decoding order entry %v on account %v, %v produced: %v", orderId, accountKid, orderResp, err) + } + + order.Expires = backDate(order.Expires, backdateAmount) + order.CertificateExpiry = backDate(order.CertificateExpiry, backdateAmount) + + encodeJSON, err := jsonutil.EncodeJSON(order) + _, err = client.Logical().WriteWithContext(context.Background(), rawOrderPath, map[string]interface{}{ + "value": base64.StdEncoding.EncodeToString(encodeJSON), + "encoding": "base64", + }) + if err != nil { + t.Fatalf("error saving backdated order entry %v on account %v : %v", orderId, accountKid, err) + } + + for _, authId := range order.AuthorizationIds { + backDateAcmeAuthorizationSys(t, testContext, client, accountKid, authId, backdateAmount, mount) + } +} + +func backDateAcmeAuthorizationSys(t *testing.T, testContext context.Context, client *api.Client, accountKid string, authId string, backdateAmount time.Duration, mount string) { + rawAuthPath := path.Join("sys/raw/logical/", mount, acmeAccountPrefix, accountKid, "/authorizations/", authId) + + authResp, err := client.Logical().ReadWithContext(testContext, rawAuthPath) + if err != nil { + t.Fatalf("unable to fetch authorization %v : %v", rawAuthPath, err) + } + + var auth *ACMEAuthorization + err = jsonutil.DecodeJSON([]byte(authResp.Data["value"].(string)), &auth) + if err != nil { + t.Fatalf("error decoding auth %v, auth entry %v produced %v", rawAuthPath, authResp, err) + } + + expiry, err := auth.GetExpires() + if err != nil { + t.Fatalf("could not get expiry on %v: %v", rawAuthPath, err) + } + newExpiry := backDate(expiry, backdateAmount) + auth.Expires = time.Time.Format(newExpiry, time.RFC3339) + + encodeJSON, err := jsonutil.EncodeJSON(auth) + _, err = client.Logical().WriteWithContext(context.Background(), rawAuthPath, map[string]interface{}{ + "value": base64.StdEncoding.EncodeToString(encodeJSON), + "encoding": "base64", + }) + if err != nil { + t.Fatalf("error updating authorization date on %v: %v", rawAuthPath, err) + } +} + +func backDate(original time.Time, change time.Duration) time.Time { + if original.IsZero() { + return original + } + + zeroTime := time.Time{} + + if original.Before(zeroTime.Add(change)) { + return zeroTime + } + + return original.Add(-change) +} + +func waitForTidyToFinish(t *testing.T, client *api.Client, mount string) *api.Secret { + var statusResp *api.Secret + testhelpers.RetryUntil(t, 5*time.Second, func() error { + var err error + + tidyStatusPath := mount + "/tidy-status" + statusResp, err = client.Logical().Read(tidyStatusPath) + if err != nil { + return fmt.Errorf("failed reading path: %s: %w", tidyStatusPath, err) + } + if state, ok := statusResp.Data["state"]; !ok || state == "Running" { + return fmt.Errorf("tidy status state is still running") + } + + if errorOccurred, ok := statusResp.Data["error"]; !ok || !(errorOccurred == nil || errorOccurred == "") { + return fmt.Errorf("tidy status returned an error: %s", errorOccurred) + } + + return nil + }) + + t.Logf("got tidy status: %v", statusResp.Data) + return statusResp +} diff --git a/builtin/logical/pki/periodic.go b/builtin/logical/pki/periodic.go new file mode 100644 index 0000000..77ff312 --- /dev/null +++ b/builtin/logical/pki/periodic.go @@ -0,0 +1,337 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "crypto/x509" + "errors" + "fmt" + "sync/atomic" + "time" + + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + minUnifiedTransferDelay = 30 * time.Minute +) + +type unifiedTransferStatus struct { + isRunning atomic.Bool + lastRun time.Time + forceRerun atomic.Bool +} + +func (uts *unifiedTransferStatus) forceRun() { + uts.forceRerun.Store(true) +} + +func newUnifiedTransferStatus() *unifiedTransferStatus { + return &unifiedTransferStatus{} +} + +// runUnifiedTransfer meant to run as a background, this will process all and +// send all missing local revocation entries to the unified space if the feature +// is enabled. +func runUnifiedTransfer(sc *storageContext) { + b := sc.Backend + status := b.unifiedTransferStatus + + isPerfStandby := b.System().ReplicationState().HasState(consts.ReplicationDRSecondary | consts.ReplicationPerformanceStandby) + + if isPerfStandby || b.System().LocalMount() { + // We only do this on active enterprise nodes, when we aren't a local mount + return + } + + config, err := b.crlBuilder.getConfigWithUpdate(sc) + if err != nil { + b.Logger().Error("failed to retrieve crl config from storage for unified transfer background process", + "error", err) + return + } + + if !config.UnifiedCRL { + // Feature is disabled, no need to run + return + } + + clusterId, err := b.System().ClusterID(sc.Context) + if err != nil { + b.Logger().Error("failed to fetch cluster id for unified transfer background process", + "error", err) + return + } + + if !status.isRunning.CompareAndSwap(false, true) { + b.Logger().Debug("an existing unified transfer process is already running") + return + } + defer status.isRunning.Store(false) + + // Because access to lastRun is not locked, we need to delay this check + // until after we grab the isRunning CAS lock. + if !status.lastRun.IsZero() { + // We have run before, we only run again if we have + // been requested to forceRerun, and we haven't run since our + // minimum delay. + if !(status.forceRerun.Load() && time.Since(status.lastRun) < minUnifiedTransferDelay) { + return + } + } + + // Reset our flag before we begin, we do this before we start as + // we can't guarantee that we can properly parse/fix the error from an + // error that comes in from the revoke API after that. This will + // force another run, which worst case, we will fix it on the next + // periodic function call that passes our min delay. + status.forceRerun.Store(false) + + err = doUnifiedTransferMissingLocalSerials(sc, clusterId) + if err != nil { + b.Logger().Error("an error occurred running unified transfer", "error", err.Error()) + status.forceRerun.Store(true) + } else { + if config.EnableDelta { + err = doUnifiedTransferMissingDeltaWALSerials(sc, clusterId) + if err != nil { + b.Logger().Error("an error occurred running unified transfer", "error", err.Error()) + status.forceRerun.Store(true) + } + } + } + + status.lastRun = time.Now() +} + +func doUnifiedTransferMissingLocalSerials(sc *storageContext, clusterId string) error { + localRevokedSerialNums, err := sc.listRevokedCerts() + if err != nil { + return err + } + if len(localRevokedSerialNums) == 0 { + // No local certs to transfer, no further work to do. + return nil + } + + unifiedSerials, err := listClusterSpecificUnifiedRevokedCerts(sc, clusterId) + if err != nil { + return err + } + unifiedCertLookup := sliceToMapKey(unifiedSerials) + + errCount := 0 + for i, serialNum := range localRevokedSerialNums { + if i%25 == 0 { + config, _ := sc.Backend.crlBuilder.getConfigWithUpdate(sc) + if config != nil && !config.UnifiedCRL { + return errors.New("unified crl has been disabled after we started, stopping") + } + } + if _, ok := unifiedCertLookup[serialNum]; !ok { + err := readRevocationEntryAndTransfer(sc, serialNum) + if err != nil { + errCount++ + sc.Backend.Logger().Error("Failed transferring local revocation to unified space", + "serial", serialNum, "error", err) + } + } + } + + if errCount > 0 { + sc.Backend.Logger().Warn(fmt.Sprintf("Failed transfering %d local serials to unified storage", errCount)) + } + + return nil +} + +func doUnifiedTransferMissingDeltaWALSerials(sc *storageContext, clusterId string) error { + // We need to do a similar thing for Delta WAL entry certificates. + // When the delta WAL failed to write for one or more entries, + // we'll need to replicate these up to the primary cluster. When it + // has performed a new delta WAL build, it will empty storage and + // update to a last written WAL entry that exceeds what we've seen + // locally. + thisUnifiedWALEntryPath := unifiedDeltaWALPath + deltaWALLastRevokedSerialName + lastUnifiedWALEntry, err := getLastWALSerial(sc, thisUnifiedWALEntryPath) + if err != nil { + return fmt.Errorf("failed to fetch last cross-cluster unified revoked delta WAL serial number: %w", err) + } + + lastLocalWALEntry, err := getLastWALSerial(sc, localDeltaWALLastRevokedSerial) + if err != nil { + return fmt.Errorf("failed to fetch last locally revoked delta WAL serial number: %w", err) + } + + // We now need to transfer all the entries and then write the last WAL + // entry at the end. Start by listing all certificates; any missing + // certificates will be copied over and then the WAL entry will be + // updated once. + // + // We do not delete entries either locally or remotely, as either + // cluster could've rebuilt delta CRLs with out-of-sync information, + // removing some entries (and, we cannot differentiate between these + // two cases). On next full CRL rebuild (on either cluster), the state + // should get synchronized, and future delta CRLs after this function + // returns without issue will see the remaining entries. + // + // Lastly, we need to ensure we don't accidentally write any unified + // delta WAL entries that aren't present in the main cross-cluster + // revoked storage location. This would mean the above function failed + // to copy them for some reason, despite them presumably appearing + // locally. + _unifiedWALEntries, err := sc.Storage.List(sc.Context, unifiedDeltaWALPath) + if err != nil { + return fmt.Errorf("failed to list cross-cluster unified delta WAL storage: %w", err) + } + unifiedWALEntries := sliceToMapKey(_unifiedWALEntries) + + _unifiedRevokedSerials, err := listClusterSpecificUnifiedRevokedCerts(sc, clusterId) + if err != nil { + return fmt.Errorf("failed to list cross-cluster revoked certificates: %w", err) + } + unifiedRevokedSerials := sliceToMapKey(_unifiedRevokedSerials) + + localWALEntries, err := sc.Storage.List(sc.Context, localDeltaWALPath) + if err != nil { + return fmt.Errorf("failed to list local delta WAL storage: %w", err) + } + + if lastUnifiedWALEntry == lastLocalWALEntry && len(_unifiedWALEntries) == len(localWALEntries) { + // Writing the last revoked WAL entry is the last thing that we do. + // Because these entries match (across clusters) and we have the same + // number of entries, assume we don't have anything to sync and exit + // early. + // + // We need both checks as, in the event of PBPWF failing and then + // returning while more revocations are happening, we could have + // been schedule to run, but then skip running (if only the first + // condition was checked) because a later revocation succeeded + // in writing a unified WAL entry, before we started replicating + // the rest back up. + // + // The downside of this approach is that, if the main cluster + // does a full rebuild in the mean time, we could re-sync more + // entries back up to the primary cluster that are already + // included in the complete CRL. Users can manually rebuild the + // full CRL (clearing these duplicate delta CRL entries) if this + // affects them. + return nil + } + + errCount := 0 + for index, serial := range localWALEntries { + if index%25 == 0 { + config, _ := sc.Backend.crlBuilder.getConfigWithUpdate(sc) + if config != nil && (!config.UnifiedCRL || !config.EnableDelta) { + return errors.New("unified or delta CRLs have been disabled after we started, stopping") + } + } + + if serial == deltaWALLastBuildSerialName || serial == deltaWALLastRevokedSerialName { + // Skip our special serial numbers. + continue + } + + _, isAlreadyPresent := unifiedWALEntries[serial] + if isAlreadyPresent { + // Serial exists on both local and unified cluster. We're + // presuming we don't need to read and re-write these entries + // and that only missing entries need to be updated. + continue + } + + _, isRevokedCopied := unifiedRevokedSerials[serial] + if !isRevokedCopied { + // We need to wait here to copy over. + errCount += 1 + sc.Backend.Logger().Debug("Delta WAL exists locally, but corresponding cross-cluster full revocation entry is missing; skipping", "serial", serial) + continue + } + + // All good: read the local entry and write to the remote variant. + localPath := localDeltaWALPath + serial + unifiedPath := unifiedDeltaWALPath + serial + + entry, err := sc.Storage.Get(sc.Context, localPath) + if err != nil || entry == nil { + errCount += 1 + sc.Backend.Logger().Error("Failed reading local delta WAL entry to copy to cross-cluster", "serial", serial, "err", err) + continue + } + + entry.Key = unifiedPath + err = sc.Storage.Put(sc.Context, entry) + if err != nil { + errCount += 1 + sc.Backend.Logger().Error("Failed sync local delta WAL entry to cross-cluster unified delta WAL location", "serial", serial, "err", err) + continue + } + } + + if errCount > 0 { + // See note above about why we don't fail here. + sc.Backend.Logger().Warn(fmt.Sprintf("Failed transfering %d local delta WAL serials to unified storage", errCount)) + return nil + } + + // Everything worked. Here, we can write over the delta WAL last revoked + // value. By using the earlier value, even if new revocations have + // occurred, we ensure any further missing entries can be handled in the + // next round. + lastRevSerial := lastWALInfo{Serial: lastLocalWALEntry} + lastWALEntry, err := logical.StorageEntryJSON(thisUnifiedWALEntryPath, lastRevSerial) + if err != nil { + return fmt.Errorf("unable to create cross-cluster unified last delta CRL WAL entry: %w", err) + } + if err = sc.Storage.Put(sc.Context, lastWALEntry); err != nil { + return fmt.Errorf("error saving cross-cluster unified last delta CRL WAL entry: %w", err) + } + + return nil +} + +func readRevocationEntryAndTransfer(sc *storageContext, serial string) error { + hyphenSerial := normalizeSerial(serial) + revInfo, err := sc.fetchRevocationInfo(hyphenSerial) + if err != nil { + return fmt.Errorf("failed loading revocation entry for serial: %s: %w", serial, err) + } + if revInfo == nil { + sc.Backend.Logger().Debug("no certificate revocation entry for serial", "serial", serial) + return nil + } + cert, err := x509.ParseCertificate(revInfo.CertificateBytes) + if err != nil { + sc.Backend.Logger().Debug("failed parsing certificate stored in revocation entry for serial", + "serial", serial, "error", err) + return nil + } + if revInfo.CertificateIssuer == "" { + // No certificate issuer assigned to this serial yet, just drop it for now, + // as a crl rebuild/tidy needs to happen + return nil + } + + revocationTime := revInfo.RevocationTimeUTC + if revInfo.RevocationTimeUTC.IsZero() { + // Legacy revocation entries only had this field and not revocationTimeUTC set... + revocationTime = time.Unix(revInfo.RevocationTime, 0) + } + + if time.Now().After(cert.NotAfter) { + // ignore transferring this entry as it has already expired. + return nil + } + + entry := &unifiedRevocationEntry{ + SerialNumber: hyphenSerial, + CertExpiration: cert.NotAfter, + RevocationTimeUTC: revocationTime, + CertificateIssuer: revInfo.CertificateIssuer, + } + + return writeUnifiedRevocationEntry(sc, entry) +} diff --git a/builtin/logical/pki/secret_certs.go b/builtin/logical/pki/secret_certs.go new file mode 100644 index 0000000..11ebcd2 --- /dev/null +++ b/builtin/logical/pki/secret_certs.go @@ -0,0 +1,86 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "context" + "crypto/x509" + "fmt" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +// SecretCertsType is the name used to identify this type +const SecretCertsType = "pki" + +func secretCerts(b *backend) *framework.Secret { + return &framework.Secret{ + Type: SecretCertsType, + Fields: map[string]*framework.FieldSchema{ + "certificate": { + Type: framework.TypeString, + Description: `The PEM-encoded concatenated certificate and +issuing certificate authority`, + }, + "private_key": { + Type: framework.TypeString, + Description: "The PEM-encoded private key for the certificate", + }, + "serial": { + Type: framework.TypeString, + Description: `The serial number of the certificate, for handy +reference`, + }, + }, + + Revoke: b.secretCredsRevoke, + } +} + +func (b *backend) secretCredsRevoke(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { + if req.Secret == nil { + return nil, fmt.Errorf("secret is nil in request") + } + + serialInt, ok := req.Secret.InternalData["serial_number"] + if !ok { + return nil, fmt.Errorf("could not find serial in internal secret data") + } + + b.revokeStorageLock.Lock() + defer b.revokeStorageLock.Unlock() + + sc := b.makeStorageContext(ctx, req.Storage) + serial := serialInt.(string) + + certEntry, err := fetchCertBySerial(sc, "certs/", serial) + if err != nil { + return nil, err + } + if certEntry == nil { + // We can't write to revoked/ or update the CRL anyway because we don't have the cert, + // and there's no reason to expect this will work on a subsequent + // retry. Just give up and let the lease get deleted. + b.Logger().Warn("expired certificate revoke failed because not found in storage, treating as success", "serial", serial) + return nil, nil + } + + cert, err := x509.ParseCertificate(certEntry.Value) + if err != nil { + return nil, fmt.Errorf("error parsing certificate: %w", err) + } + + // Compatibility: Don't revoke CAs if they had leases. New CAs going forward aren't issued leases. + if cert.IsCA { + return nil, nil + } + + config, err := sc.Backend.crlBuilder.getConfigWithUpdate(sc) + if err != nil { + return nil, fmt.Errorf("error revoking serial: %s: failed reading config: %w", serial, err) + } + + return revokeCert(sc, config, cert) +} diff --git a/builtin/logical/pki/storage.go b/builtin/logical/pki/storage.go new file mode 100644 index 0000000..e329a2b --- /dev/null +++ b/builtin/logical/pki/storage.go @@ -0,0 +1,1533 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "bytes" + "context" + "crypto" + "crypto/x509" + "errors" + "fmt" + "sort" + "strings" + "time" + + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/helper/constants" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/logical" +) + +var ErrStorageItemNotFound = errors.New("storage item not found") + +const ( + storageKeyConfig = "config/keys" + storageIssuerConfig = "config/issuers" + keyPrefix = "config/key/" + issuerPrefix = "config/issuer/" + storageLocalCRLConfig = "crls/config" + storageUnifiedCRLConfig = "unified-crls/config" + + legacyMigrationBundleLogKey = "config/legacyMigrationBundleLog" + legacyCertBundlePath = "config/ca_bundle" + legacyCertBundleBackupPath = "config/ca_bundle.bak" + legacyCRLPath = "crl" + deltaCRLPath = "delta-crl" + deltaCRLPathSuffix = "-delta" + unifiedCRLPath = "unified-crl" + unifiedDeltaCRLPath = "unified-delta-crl" + unifiedCRLPathPrefix = "unified-" + + autoTidyConfigPath = "config/auto-tidy" + clusterConfigPath = "config/cluster" + + // Used as a quick sanity check for a reference id lookups... + uuidLength = 36 + + maxRolesToScanOnIssuerChange = 100 + maxRolesToFindOnIssuerChange = 10 + + latestIssuerVersion = 1 +) + +type keyID string + +func (p keyID) String() string { + return string(p) +} + +type issuerID string + +func (p issuerID) String() string { + return string(p) +} + +type crlID string + +func (p crlID) String() string { + return string(p) +} + +const ( + IssuerRefNotFound = issuerID("not-found") + KeyRefNotFound = keyID("not-found") +) + +type keyEntry struct { + ID keyID `json:"id"` + Name string `json:"name"` + PrivateKeyType certutil.PrivateKeyType `json:"private_key_type"` + PrivateKey string `json:"private_key"` +} + +func (e keyEntry) getManagedKeyUUID() (UUIDKey, error) { + if !e.isManagedPrivateKey() { + return "", errutil.InternalError{Err: "getManagedKeyId called on a key id %s (%s) "} + } + return extractManagedKeyId([]byte(e.PrivateKey)) +} + +func (e keyEntry) isManagedPrivateKey() bool { + return e.PrivateKeyType == certutil.ManagedPrivateKey +} + +type issuerUsage uint + +const ( + ReadOnlyUsage issuerUsage = iota + IssuanceUsage issuerUsage = 1 << iota + CRLSigningUsage issuerUsage = 1 << iota + OCSPSigningUsage issuerUsage = 1 << iota + + // When adding a new usage in the future, we'll need to create a usage + // mask field on the IssuerEntry and handle migrations to a newer mask, + // inferring a value for the new bits. + AllIssuerUsages = ReadOnlyUsage | IssuanceUsage | CRLSigningUsage | OCSPSigningUsage +) + +var namedIssuerUsages = map[string]issuerUsage{ + "read-only": ReadOnlyUsage, + "issuing-certificates": IssuanceUsage, + "crl-signing": CRLSigningUsage, + "ocsp-signing": OCSPSigningUsage, +} + +func (i *issuerUsage) ToggleUsage(usages ...issuerUsage) { + for _, usage := range usages { + *i ^= usage + } +} + +func (i issuerUsage) HasUsage(usage issuerUsage) bool { + return (i & usage) == usage +} + +func (i issuerUsage) Names() string { + var names []string + var builtUsage issuerUsage + + // Return the known set of usages in a sorted order to not have Terraform state files flipping + // saying values are different when it's the same list in a different order. + keys := make([]string, 0, len(namedIssuerUsages)) + for k := range namedIssuerUsages { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, name := range keys { + usage := namedIssuerUsages[name] + if i.HasUsage(usage) { + names = append(names, name) + builtUsage.ToggleUsage(usage) + } + } + + if i != builtUsage { + // Found some unknown usage, we should indicate this in the names. + names = append(names, fmt.Sprintf("unknown:%v", i^builtUsage)) + } + + return strings.Join(names, ",") +} + +func NewIssuerUsageFromNames(names []string) (issuerUsage, error) { + var result issuerUsage + for index, name := range names { + usage, ok := namedIssuerUsages[name] + if !ok { + return ReadOnlyUsage, fmt.Errorf("unknown name for usage at index %v: %v", index, name) + } + + result.ToggleUsage(usage) + } + + return result, nil +} + +type issuerEntry struct { + ID issuerID `json:"id"` + Name string `json:"name"` + KeyID keyID `json:"key_id"` + Certificate string `json:"certificate"` + CAChain []string `json:"ca_chain"` + ManualChain []issuerID `json:"manual_chain"` + SerialNumber string `json:"serial_number"` + LeafNotAfterBehavior certutil.NotAfterBehavior `json:"not_after_behavior"` + Usage issuerUsage `json:"usage"` + RevocationSigAlg x509.SignatureAlgorithm `json:"revocation_signature_algorithm"` + Revoked bool `json:"revoked"` + RevocationTime int64 `json:"revocation_time"` + RevocationTimeUTC time.Time `json:"revocation_time_utc"` + AIAURIs *aiaConfigEntry `json:"aia_uris,omitempty"` + LastModified time.Time `json:"last_modified"` + Version uint `json:"version"` +} + +type internalCRLConfigEntry struct { + IssuerIDCRLMap map[issuerID]crlID `json:"issuer_id_crl_map"` + CRLNumberMap map[crlID]int64 `json:"crl_number_map"` + LastCompleteNumberMap map[crlID]int64 `json:"last_complete_number_map"` + CRLExpirationMap map[crlID]time.Time `json:"crl_expiration_map"` + LastModified time.Time `json:"last_modified"` + DeltaLastModified time.Time `json:"delta_last_modified"` + UseGlobalQueue bool `json:"cross_cluster_revocation"` +} + +type keyConfigEntry struct { + DefaultKeyId keyID `json:"default"` +} + +type issuerConfigEntry struct { + // This new fetchedDefault field allows us to detect if the default + // issuer was modified, in turn dispatching the timestamp updater + // if necessary. + fetchedDefault issuerID `json:"-"` + DefaultIssuerId issuerID `json:"default"` + DefaultFollowsLatestIssuer bool `json:"default_follows_latest_issuer"` +} + +type clusterConfigEntry struct { + Path string `json:"path"` + AIAPath string `json:"aia_path"` +} + +type aiaConfigEntry struct { + IssuingCertificates []string `json:"issuing_certificates"` + CRLDistributionPoints []string `json:"crl_distribution_points"` + OCSPServers []string `json:"ocsp_servers"` + EnableTemplating bool `json:"enable_templating"` +} + +func (c *aiaConfigEntry) toURLEntries(sc *storageContext, issuer issuerID) (*certutil.URLEntries, error) { + if len(c.IssuingCertificates) == 0 && len(c.CRLDistributionPoints) == 0 && len(c.OCSPServers) == 0 { + return &certutil.URLEntries{}, nil + } + + result := certutil.URLEntries{ + IssuingCertificates: c.IssuingCertificates[:], + CRLDistributionPoints: c.CRLDistributionPoints[:], + OCSPServers: c.OCSPServers[:], + } + + if c.EnableTemplating { + cfg, err := sc.getClusterConfig() + if err != nil { + return nil, fmt.Errorf("error fetching cluster-local address config: %w", err) + } + + for name, source := range map[string]*[]string{ + "issuing_certificates": &result.IssuingCertificates, + "crl_distribution_points": &result.CRLDistributionPoints, + "ocsp_servers": &result.OCSPServers, + } { + templated := make([]string, len(*source)) + for index, uri := range *source { + if strings.Contains(uri, "{{cluster_path}}") && len(cfg.Path) == 0 { + return nil, fmt.Errorf("unable to template AIA URLs as we lack local cluster address information (path)") + } + if strings.Contains(uri, "{{cluster_aia_path}}") && len(cfg.AIAPath) == 0 { + return nil, fmt.Errorf("unable to template AIA URLs as we lack local cluster address information (aia_path)") + } + if strings.Contains(uri, "{{issuer_id}}") && len(issuer) == 0 { + // Elide issuer AIA info as we lack an issuer_id. + return nil, fmt.Errorf("unable to template AIA URLs as we lack an issuer_id for this operation") + } + + uri = strings.ReplaceAll(uri, "{{cluster_path}}", cfg.Path) + uri = strings.ReplaceAll(uri, "{{cluster_aia_path}}", cfg.AIAPath) + uri = strings.ReplaceAll(uri, "{{issuer_id}}", issuer.String()) + templated[index] = uri + } + + if uri := validateURLs(templated); uri != "" { + return nil, fmt.Errorf("error validating templated %v; invalid URI: %v", name, uri) + } + + *source = templated + } + } + + return &result, nil +} + +type storageContext struct { + Context context.Context + Storage logical.Storage + Backend *backend +} + +func (b *backend) makeStorageContext(ctx context.Context, s logical.Storage) *storageContext { + return &storageContext{ + Context: ctx, + Storage: s, + Backend: b, + } +} + +func (sc *storageContext) WithFreshTimeout(timeout time.Duration) (*storageContext, context.CancelFunc) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + return &storageContext{ + Context: ctx, + Storage: sc.Storage, + Backend: sc.Backend, + }, cancel +} + +func (sc *storageContext) listKeys() ([]keyID, error) { + strList, err := sc.Storage.List(sc.Context, keyPrefix) + if err != nil { + return nil, err + } + + keyIds := make([]keyID, 0, len(strList)) + for _, entry := range strList { + keyIds = append(keyIds, keyID(entry)) + } + + return keyIds, nil +} + +func (sc *storageContext) fetchKeyById(keyId keyID) (*keyEntry, error) { + if len(keyId) == 0 { + return nil, errutil.InternalError{Err: "unable to fetch pki key: empty key identifier"} + } + + entry, err := sc.Storage.Get(sc.Context, keyPrefix+keyId.String()) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to fetch pki key: %v", err)} + } + if entry == nil { + return nil, errutil.UserError{Err: fmt.Sprintf("pki key id %s does not exist", keyId.String())} + } + + var key keyEntry + if err := entry.DecodeJSON(&key); err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to decode pki key with id %s: %v", keyId.String(), err)} + } + + return &key, nil +} + +func (sc *storageContext) writeKey(key keyEntry) error { + keyId := key.ID + + json, err := logical.StorageEntryJSON(keyPrefix+keyId.String(), key) + if err != nil { + return err + } + + return sc.Storage.Put(sc.Context, json) +} + +func (sc *storageContext) deleteKey(id keyID) (bool, error) { + config, err := sc.getKeysConfig() + if err != nil { + return false, err + } + + wasDefault := false + if config.DefaultKeyId == id { + wasDefault = true + config.DefaultKeyId = keyID("") + if err := sc.setKeysConfig(config); err != nil { + return wasDefault, err + } + } + + return wasDefault, sc.Storage.Delete(sc.Context, keyPrefix+id.String()) +} + +func (sc *storageContext) importKey(keyValue string, keyName string, keyType certutil.PrivateKeyType) (*keyEntry, bool, error) { + // importKey imports the specified PEM-format key (from keyValue) into + // the new PKI storage format. The first return field is a reference to + // the new key; the second is whether or not the key already existed + // during import (in which case, *key points to the existing key reference + // and identifier); the last return field is whether or not an error + // occurred. + // + // Normalize whitespace before beginning. See note in importIssuer as to + // why we do this. + keyValue = strings.TrimSpace(keyValue) + "\n" + // + // Before we can import a known key, we first need to know if the key + // exists in storage already. This means iterating through all known + // keys and comparing their private value against this value. + knownKeys, err := sc.listKeys() + if err != nil { + return nil, false, err + } + + // Get our public key from the current inbound key, to compare against all the other keys. + var pkForImportingKey crypto.PublicKey + if keyType == certutil.ManagedPrivateKey { + managedKeyUUID, err := extractManagedKeyId([]byte(keyValue)) + if err != nil { + return nil, false, errutil.InternalError{Err: fmt.Sprintf("failed extracting managed key uuid from key: %v", err)} + } + pkForImportingKey, err = getManagedKeyPublicKey(sc.Context, sc.Backend, managedKeyUUID) + if err != nil { + return nil, false, err + } + } else { + pkForImportingKey, err = getPublicKeyFromBytes([]byte(keyValue)) + if err != nil { + return nil, false, err + } + } + + foundExistingKeyWithName := false + for _, identifier := range knownKeys { + existingKey, err := sc.fetchKeyById(identifier) + if err != nil { + return nil, false, err + } + areEqual, err := comparePublicKey(sc, existingKey, pkForImportingKey) + if err != nil { + return nil, false, err + } + + if areEqual { + // Here, we don't need to stitch together the issuer entries, + // because the last run should've done that for us (or, when + // importing an issuer). + return existingKey, true, nil + } + + // Allow us to find an existing matching key with a different name before erroring out + if keyName != "" && existingKey.Name == keyName { + foundExistingKeyWithName = true + } + } + + // Another key with a different value is using the keyName so reject this request. + if foundExistingKeyWithName { + return nil, false, errutil.UserError{Err: fmt.Sprintf("an existing key is using the requested key name value: %s", keyName)} + } + + // Haven't found a key, so we've gotta create it and write it into storage. + var result keyEntry + result.ID = genKeyId() + result.Name = keyName + result.PrivateKey = keyValue + result.PrivateKeyType = keyType + + // Finally, we can write the key to storage. + if err := sc.writeKey(result); err != nil { + return nil, false, err + } + + // Before we return below, we need to iterate over _all_ issuers and see if + // one of them has a missing KeyId link, and if so, point it back to + // ourselves. We fetch the list of issuers up front, even when don't need + // it, to give ourselves a better chance of succeeding below. + knownIssuers, err := sc.listIssuers() + if err != nil { + return nil, false, err + } + + issuerDefaultSet, err := sc.isDefaultIssuerSet() + if err != nil { + return nil, false, err + } + + // Now, for each issuer, try and compute the issuer<->key link if missing. + for _, identifier := range knownIssuers { + existingIssuer, err := sc.fetchIssuerById(identifier) + if err != nil { + return nil, false, err + } + + // If the KeyID value is already present, we can skip it. + if len(existingIssuer.KeyID) > 0 { + continue + } + + // Otherwise, compare public values. Note that there might be multiple + // certificates (e.g., cross-signed) with the same key. + + cert, err := existingIssuer.GetCertificate() + if err != nil { + // Malformed issuer. + return nil, false, err + } + + equal, err := certutil.ComparePublicKeysAndType(cert.PublicKey, pkForImportingKey) + if err != nil { + return nil, false, err + } + + if equal { + // These public keys are equal, so this key entry must be the + // corresponding private key to this issuer; update it accordingly. + existingIssuer.KeyID = result.ID + if err := sc.writeIssuer(existingIssuer); err != nil { + return nil, false, err + } + + // If there was no prior default value set and/or we had no known + // issuers when we started, set this issuer as default. + if !issuerDefaultSet { + err = sc.updateDefaultIssuerId(existingIssuer.ID) + if err != nil { + return nil, false, err + } + issuerDefaultSet = true + } + } + } + + // If there was no prior default value set and/or we had no known + // keys when we started, set this key as default. + keyDefaultSet, err := sc.isDefaultKeySet() + if err != nil { + return nil, false, err + } + if len(knownKeys) == 0 || !keyDefaultSet { + if err = sc.updateDefaultKeyId(result.ID); err != nil { + return nil, false, err + } + } + + // All done; return our new key reference. + return &result, false, nil +} + +func (i issuerEntry) GetCertificate() (*x509.Certificate, error) { + cert, err := parseCertificateFromBytes([]byte(i.Certificate)) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse certificate from issuer: %s: %v", err.Error(), i.ID)} + } + + return cert, nil +} + +func (i issuerEntry) EnsureUsage(usage issuerUsage) error { + // We want to spit out a nice error message about missing usages. + if i.Usage.HasUsage(usage) { + return nil + } + + issuerRef := fmt.Sprintf("id:%v", i.ID) + if len(i.Name) > 0 { + issuerRef = fmt.Sprintf("%v / name:%v", issuerRef, i.Name) + } + + // These usages differ at some point in time. We've gotta find the first + // usage that differs and return a logical-sounding error message around + // that difference. + for name, candidate := range namedIssuerUsages { + if usage.HasUsage(candidate) && !i.Usage.HasUsage(candidate) { + return fmt.Errorf("requested usage %v for issuer [%v] but only had usage %v", name, issuerRef, i.Usage.Names()) + } + } + + // Maybe we have an unnamed usage that's requested. + return fmt.Errorf("unknown delta between usages: %v -> %v / for issuer [%v]", usage.Names(), i.Usage.Names(), issuerRef) +} + +func (i issuerEntry) CanMaybeSignWithAlgo(algo x509.SignatureAlgorithm) error { + // Hack: Go isn't kind enough expose its lovely signatureAlgorithmDetails + // informational struct for our usage. However, we don't want to actually + // fetch the private key and attempt a signature with this algo (as we'll + // mint new, previously unsigned material in the process that could maybe + // be potentially abused if it leaks). + // + // So... + // + // ...we maintain our own mapping of cert.PKI<->sigAlgos. Notably, we + // exclude DSA support as the PKI engine has never supported DSA keys. + if algo == x509.UnknownSignatureAlgorithm { + // Special cased to indicate upgrade and letting Go automatically + // chose the correct value. + return nil + } + + cert, err := i.GetCertificate() + if err != nil { + return fmt.Errorf("unable to parse issuer's potential signature algorithm types: %w", err) + } + + switch cert.PublicKeyAlgorithm { + case x509.RSA: + switch algo { + case x509.SHA256WithRSA, x509.SHA384WithRSA, x509.SHA512WithRSA, + x509.SHA256WithRSAPSS, x509.SHA384WithRSAPSS, + x509.SHA512WithRSAPSS: + return nil + } + case x509.ECDSA: + switch algo { + case x509.ECDSAWithSHA256, x509.ECDSAWithSHA384, x509.ECDSAWithSHA512: + return nil + } + case x509.Ed25519: + switch algo { + case x509.PureEd25519: + return nil + } + } + + return fmt.Errorf("unable to use issuer of type %v to sign with %v key type", cert.PublicKeyAlgorithm.String(), algo.String()) +} + +func (i issuerEntry) GetAIAURLs(sc *storageContext) (*certutil.URLEntries, error) { + // Default to the per-issuer AIA URLs. + entries := i.AIAURIs + + // If none are set (either due to a nil entry or because no URLs have + // been provided), fall back to the global AIA URL config. + if entries == nil || (len(entries.IssuingCertificates) == 0 && len(entries.CRLDistributionPoints) == 0 && len(entries.OCSPServers) == 0) { + var err error + + entries, err = getGlobalAIAURLs(sc.Context, sc.Storage) + if err != nil { + return nil, err + } + } + + if entries == nil { + return &certutil.URLEntries{}, nil + } + + return entries.toURLEntries(sc, i.ID) +} + +func (sc *storageContext) listIssuers() ([]issuerID, error) { + strList, err := sc.Storage.List(sc.Context, issuerPrefix) + if err != nil { + return nil, err + } + + issuerIds := make([]issuerID, 0, len(strList)) + for _, entry := range strList { + issuerIds = append(issuerIds, issuerID(entry)) + } + + return issuerIds, nil +} + +func (sc *storageContext) resolveKeyReference(reference string) (keyID, error) { + if reference == defaultRef { + // Handle fetching the default key. + config, err := sc.getKeysConfig() + if err != nil { + return keyID("config-error"), err + } + if len(config.DefaultKeyId) == 0 { + return KeyRefNotFound, fmt.Errorf("no default key currently configured") + } + + return config.DefaultKeyId, nil + } + + // Lookup by a direct get first to see if our reference is an ID, this is quick and cached. + if len(reference) == uuidLength { + entry, err := sc.Storage.Get(sc.Context, keyPrefix+reference) + if err != nil { + return keyID("key-read"), err + } + if entry != nil { + return keyID(reference), nil + } + } + + // ... than to pull all keys from storage. + keys, err := sc.listKeys() + if err != nil { + return keyID("list-error"), err + } + for _, keyId := range keys { + key, err := sc.fetchKeyById(keyId) + if err != nil { + return keyID("key-read"), err + } + + if key.Name == reference { + return key.ID, nil + } + } + + // Otherwise, we must not have found the key. + return KeyRefNotFound, errutil.UserError{Err: fmt.Sprintf("unable to find PKI key for reference: %v", reference)} +} + +// fetchIssuerById returns an issuerEntry based on issuerId, if none found an error is returned. +func (sc *storageContext) fetchIssuerById(issuerId issuerID) (*issuerEntry, error) { + if len(issuerId) == 0 { + return nil, errutil.InternalError{Err: "unable to fetch pki issuer: empty issuer identifier"} + } + + entry, err := sc.Storage.Get(sc.Context, issuerPrefix+issuerId.String()) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to fetch pki issuer: %v", err)} + } + if entry == nil { + return nil, errutil.UserError{Err: fmt.Sprintf("pki issuer id %s does not exist", issuerId.String())} + } + + var issuer issuerEntry + if err := entry.DecodeJSON(&issuer); err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to decode pki issuer with id %s: %v", issuerId.String(), err)} + } + + return sc.upgradeIssuerIfRequired(&issuer), nil +} + +func (sc *storageContext) upgradeIssuerIfRequired(issuer *issuerEntry) *issuerEntry { + // *NOTE*: Don't attempt to write out the issuer here as it may cause ErrReadOnly that will direct the + // request all the way up to the primary cluster which would be horrible for local cluster operations such + // as generating a leaf cert or a revoke. + // Also even though we could tell if we are the primary cluster's active node, we can't tell if we have the + // a full rw issuer lock, so it might not be safe to write. + if issuer.Version == latestIssuerVersion { + return issuer + } + + if issuer.Version == 0 { + // Upgrade at this step requires interrogating the certificate itself; + // if this decode fails, it indicates internal problems and the + // request will subsequently fail elsewhere. However, decoding this + // certificate is mildly expensive, so we only do it in the event of + // a Version 0 certificate. + cert, err := issuer.GetCertificate() + if err != nil { + return issuer + } + + hadCRL := issuer.Usage.HasUsage(CRLSigningUsage) + // Remove CRL signing usage if it exists on the issuer but doesn't + // exist in the KU of the x509 certificate. + if hadCRL && (cert.KeyUsage&x509.KeyUsageCRLSign) == 0 { + issuer.Usage.ToggleUsage(CRLSigningUsage) + } + + // Handle our new OCSPSigning usage flag for earlier versions. If we + // had it (prior to removing it in this upgrade), we'll add the OCSP + // flag since EKUs don't matter. + if hadCRL && !issuer.Usage.HasUsage(OCSPSigningUsage) { + issuer.Usage.ToggleUsage(OCSPSigningUsage) + } + } + + issuer.Version = latestIssuerVersion + return issuer +} + +func (sc *storageContext) writeIssuer(issuer *issuerEntry) error { + issuerId := issuer.ID + if issuer.LastModified.IsZero() { + issuer.LastModified = time.Now().UTC() + } + + json, err := logical.StorageEntryJSON(issuerPrefix+issuerId.String(), issuer) + if err != nil { + return err + } + + return sc.Storage.Put(sc.Context, json) +} + +func (sc *storageContext) deleteIssuer(id issuerID) (bool, error) { + config, err := sc.getIssuersConfig() + if err != nil { + return false, err + } + + wasDefault := false + if config.DefaultIssuerId == id { + wasDefault = true + // Overwrite the fetched default issuer as we're going to remove this + // entry. + config.fetchedDefault = issuerID("") + config.DefaultIssuerId = issuerID("") + if err := sc.setIssuersConfig(config); err != nil { + return wasDefault, err + } + } + + return wasDefault, sc.Storage.Delete(sc.Context, issuerPrefix+id.String()) +} + +func (sc *storageContext) importIssuer(certValue string, issuerName string) (*issuerEntry, bool, error) { + // importIssuers imports the specified PEM-format certificate (from + // certValue) into the new PKI storage format. The first return field is a + // reference to the new issuer; the second is whether or not the issuer + // already existed during import (in which case, *issuer points to the + // existing issuer reference and identifier); the last return field is + // whether or not an error occurred. + + // Before we begin, we need to ensure the PEM formatted certificate looks + // good. Restricting to "just" `CERTIFICATE` entries is a little + // restrictive, as it could be a `X509 CERTIFICATE` entry or a custom + // value wrapping an actual DER cert. So validating the contents of the + // PEM header is out of the question (and validating the contents of the + // PEM block is left to our GetCertificate call below). + // + // However, we should trim all leading and trailing spaces and add a + // single new line. This allows callers to blindly concatenate PEM + // blobs from the API and get roughly what they'd expect. + // + // Discussed further in #11960 and RFC 7468. + certValue = strings.TrimSpace(certValue) + "\n" + + // Extracting the certificate is necessary for two reasons: first, it lets + // us fetch the serial number; second, for the public key comparison with + // known keys. + issuerCert, err := parseCertificateFromBytes([]byte(certValue)) + if err != nil { + return nil, false, err + } + + // Ensure this certificate is a usable as a CA certificate. + if !issuerCert.BasicConstraintsValid || !issuerCert.IsCA { + return nil, false, errutil.UserError{Err: "Refusing to import non-CA certificate"} + } + + // Ensure this certificate has a parsed public key. Otherwise, we've + // likely been given a bad certificate. + if issuerCert.PublicKeyAlgorithm == x509.UnknownPublicKeyAlgorithm || issuerCert.PublicKey == nil { + return nil, false, errutil.UserError{Err: "Refusing to import CA certificate with empty PublicKey. This usually means the SubjectPublicKeyInfo field has an OID not recognized by Go, such as 1.2.840.113549.1.1.10 for rsaPSS."} + } + + // Before we can import a known issuer, we first need to know if the issuer + // exists in storage already. This means iterating through all known + // issuers and comparing their private value against this value. + knownIssuers, err := sc.listIssuers() + if err != nil { + return nil, false, err + } + + foundExistingIssuerWithName := false + for _, identifier := range knownIssuers { + existingIssuer, err := sc.fetchIssuerById(identifier) + if err != nil { + return nil, false, err + } + existingIssuerCert, err := existingIssuer.GetCertificate() + if err != nil { + return nil, false, err + } + if areCertificatesEqual(existingIssuerCert, issuerCert) { + // Here, we don't need to stitch together the key entries, + // because the last run should've done that for us (or, when + // importing a key). + return existingIssuer, true, nil + } + + // Allow us to find an existing matching issuer with a different name before erroring out + if issuerName != "" && existingIssuer.Name == issuerName { + foundExistingIssuerWithName = true + } + } + + if foundExistingIssuerWithName { + return nil, false, errutil.UserError{Err: fmt.Sprintf("another issuer is using the requested name: %s", issuerName)} + } + + // Haven't found an issuer, so we've gotta create it and write it into + // storage. + var result issuerEntry + result.ID = genIssuerId() + result.Name = issuerName + result.Certificate = certValue + result.LeafNotAfterBehavior = certutil.ErrNotAfterBehavior + result.Usage.ToggleUsage(AllIssuerUsages) + result.Version = latestIssuerVersion + + // If we lack relevant bits for CRL, prohibit it from being set + // on the usage side. + if (issuerCert.KeyUsage&x509.KeyUsageCRLSign) == 0 && result.Usage.HasUsage(CRLSigningUsage) { + result.Usage.ToggleUsage(CRLSigningUsage) + } + + // We shouldn't add CSRs or multiple certificates in this + countCertificates := strings.Count(result.Certificate, "-BEGIN ") + if countCertificates != 1 { + return nil, false, fmt.Errorf("bad issuer: potentially multiple PEM blobs in one certificate storage entry:\n%v", result.Certificate) + } + + result.SerialNumber = serialFromCert(issuerCert) + + // Before we return below, we need to iterate over _all_ keys and see if + // one of them a public key matching this certificate, and if so, update our + // link accordingly. We fetch the list of keys up front, even may not need + // it, to give ourselves a better chance of succeeding below. + knownKeys, err := sc.listKeys() + if err != nil { + return nil, false, err + } + + // Now, for each key, try and compute the issuer<->key link. We delay + // writing issuer to storage as we won't need to update the key, only + // the issuer. + for _, identifier := range knownKeys { + existingKey, err := sc.fetchKeyById(identifier) + if err != nil { + return nil, false, err + } + + equal, err := comparePublicKey(sc, existingKey, issuerCert.PublicKey) + if err != nil { + return nil, false, err + } + + if equal { + result.KeyID = existingKey.ID + // Here, there's exactly one stored key with the same public key + // as us, per guarantees in importKey; as we're importing an + // issuer, there's no other keys or issuers we'd need to read or + // update, so exit. + break + } + } + + // Finally, rebuild the chains. In this process, because the provided + // reference issuer is non-nil, we'll save this issuer to storage. + if err := sc.rebuildIssuersChains(&result); err != nil { + return nil, false, err + } + + // If there was no prior default value set and/or we had no known + // issuers when we started, set this issuer as default. + issuerDefaultSet, err := sc.isDefaultIssuerSet() + if err != nil { + return nil, false, err + } + if (len(knownIssuers) == 0 || !issuerDefaultSet) && len(result.KeyID) != 0 { + if err = sc.updateDefaultIssuerId(result.ID); err != nil { + return nil, false, err + } + } + + // All done; return our new key reference. + return &result, false, nil +} + +func areCertificatesEqual(cert1 *x509.Certificate, cert2 *x509.Certificate) bool { + return bytes.Equal(cert1.Raw, cert2.Raw) +} + +func (sc *storageContext) _cleanupInternalCRLMapping(mapping *internalCRLConfigEntry, path string) error { + // Track which CRL IDs are presently referred to by issuers; any other CRL + // IDs are subject to cleanup. + // + // Unused IDs both need to be removed from this map (cleaning up the size + // of this storage entry) but also the full CRLs removed from disk. + presentMap := make(map[crlID]bool) + for _, id := range mapping.IssuerIDCRLMap { + presentMap[id] = true + } + + // Identify which CRL IDs exist and are candidates for removal; + // theoretically these three maps should be in sync, but were added + // at different times. + toRemove := make(map[crlID]bool) + for id := range mapping.CRLNumberMap { + if !presentMap[id] { + toRemove[id] = true + } + } + for id := range mapping.LastCompleteNumberMap { + if !presentMap[id] { + toRemove[id] = true + } + } + for id := range mapping.CRLExpirationMap { + if !presentMap[id] { + toRemove[id] = true + } + } + + // Depending on which path we're writing this config to, we need to + // remove CRLs from the relevant folder too. + isLocal := path == storageLocalCRLConfig + baseCRLPath := "crls/" + if !isLocal { + baseCRLPath = "unified-crls/" + } + + for id := range toRemove { + // Clean up space in this mapping... + delete(mapping.CRLNumberMap, id) + delete(mapping.LastCompleteNumberMap, id) + delete(mapping.CRLExpirationMap, id) + + // And clean up space on disk from the fat CRL mapping. + crlPath := baseCRLPath + string(id) + deltaCRLPath := crlPath + "-delta" + if err := sc.Storage.Delete(sc.Context, crlPath); err != nil { + return fmt.Errorf("failed to delete unreferenced CRL %v: %w", id, err) + } + if err := sc.Storage.Delete(sc.Context, deltaCRLPath); err != nil { + return fmt.Errorf("failed to delete unreferenced delta CRL %v: %w", id, err) + } + } + + // Lastly, some CRLs could've been partially removed from the map but + // not from disk. Check to see if we have any dangling CRLs and remove + // them too. + list, err := sc.Storage.List(sc.Context, baseCRLPath) + if err != nil { + return fmt.Errorf("failed listing all CRLs: %w", err) + } + for _, crl := range list { + if crl == "config" || strings.HasSuffix(crl, "/") { + continue + } + + if presentMap[crlID(crl)] { + continue + } + + if err := sc.Storage.Delete(sc.Context, baseCRLPath+"/"+crl); err != nil { + return fmt.Errorf("failed cleaning up orphaned CRL %v: %w", crl, err) + } + } + + return nil +} + +func (sc *storageContext) _setInternalCRLConfig(mapping *internalCRLConfigEntry, path string) error { + if err := sc._cleanupInternalCRLMapping(mapping, path); err != nil { + return fmt.Errorf("failed to clean up internal CRL mapping: %w", err) + } + + json, err := logical.StorageEntryJSON(path, mapping) + if err != nil { + return err + } + + return sc.Storage.Put(sc.Context, json) +} + +func (sc *storageContext) setLocalCRLConfig(mapping *internalCRLConfigEntry) error { + return sc._setInternalCRLConfig(mapping, storageLocalCRLConfig) +} + +func (sc *storageContext) setUnifiedCRLConfig(mapping *internalCRLConfigEntry) error { + return sc._setInternalCRLConfig(mapping, storageUnifiedCRLConfig) +} + +func (sc *storageContext) _getInternalCRLConfig(path string) (*internalCRLConfigEntry, error) { + entry, err := sc.Storage.Get(sc.Context, path) + if err != nil { + return nil, err + } + + mapping := &internalCRLConfigEntry{} + if entry != nil { + if err := entry.DecodeJSON(mapping); err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to decode cluster-local CRL configuration: %v", err)} + } + } + + if len(mapping.IssuerIDCRLMap) == 0 { + mapping.IssuerIDCRLMap = make(map[issuerID]crlID) + } + + if len(mapping.CRLNumberMap) == 0 { + mapping.CRLNumberMap = make(map[crlID]int64) + } + + if len(mapping.LastCompleteNumberMap) == 0 { + mapping.LastCompleteNumberMap = make(map[crlID]int64) + + // Since this might not exist on migration, we want to guess as + // to the last full CRL number was. This was likely the last + // value from CRLNumberMap if it existed, since we're just adding + // the mapping here in this block. + // + // After the next full CRL build, we will have set this value + // correctly, so it doesn't really matter in the long term if + // we're off here. + for id, number := range mapping.CRLNumberMap { + // Decrement by one, since CRLNumberMap is the future number, + // not the last built number. + mapping.LastCompleteNumberMap[id] = number - 1 + } + } + + if len(mapping.CRLExpirationMap) == 0 { + mapping.CRLExpirationMap = make(map[crlID]time.Time) + } + + return mapping, nil +} + +func (sc *storageContext) getLocalCRLConfig() (*internalCRLConfigEntry, error) { + return sc._getInternalCRLConfig(storageLocalCRLConfig) +} + +func (sc *storageContext) getUnifiedCRLConfig() (*internalCRLConfigEntry, error) { + return sc._getInternalCRLConfig(storageUnifiedCRLConfig) +} + +func (sc *storageContext) setKeysConfig(config *keyConfigEntry) error { + json, err := logical.StorageEntryJSON(storageKeyConfig, config) + if err != nil { + return err + } + + return sc.Storage.Put(sc.Context, json) +} + +func (sc *storageContext) getKeysConfig() (*keyConfigEntry, error) { + entry, err := sc.Storage.Get(sc.Context, storageKeyConfig) + if err != nil { + return nil, err + } + + keyConfig := &keyConfigEntry{} + if entry != nil { + if err := entry.DecodeJSON(keyConfig); err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to decode key configuration: %v", err)} + } + } + + return keyConfig, nil +} + +func (sc *storageContext) setIssuersConfig(config *issuerConfigEntry) error { + json, err := logical.StorageEntryJSON(storageIssuerConfig, config) + if err != nil { + return err + } + + if err := sc.Storage.Put(sc.Context, json); err != nil { + return err + } + + if err := sc.changeDefaultIssuerTimestamps(config.fetchedDefault, config.DefaultIssuerId); err != nil { + return err + } + + return nil +} + +func (sc *storageContext) getIssuersConfig() (*issuerConfigEntry, error) { + entry, err := sc.Storage.Get(sc.Context, storageIssuerConfig) + if err != nil { + return nil, err + } + + issuerConfig := &issuerConfigEntry{} + if entry != nil { + if err := entry.DecodeJSON(issuerConfig); err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to decode issuer configuration: %v", err)} + } + } + issuerConfig.fetchedDefault = issuerConfig.DefaultIssuerId + + return issuerConfig, nil +} + +// Lookup within storage the value of reference, assuming the string is a reference to an issuer entry, +// returning the converted issuerID or an error if not found. This method will not properly resolve the +// special legacyBundleShimID value as we do not want to confuse our special value and a user-provided name of the +// same value. +func (sc *storageContext) resolveIssuerReference(reference string) (issuerID, error) { + if reference == defaultRef { + // Handle fetching the default issuer. + config, err := sc.getIssuersConfig() + if err != nil { + return issuerID("config-error"), err + } + if len(config.DefaultIssuerId) == 0 { + return IssuerRefNotFound, fmt.Errorf("no default issuer currently configured") + } + + return config.DefaultIssuerId, nil + } + + // Lookup by a direct get first to see if our reference is an ID, this is quick and cached. + if len(reference) == uuidLength { + entry, err := sc.Storage.Get(sc.Context, issuerPrefix+reference) + if err != nil { + return issuerID("issuer-read"), err + } + if entry != nil { + return issuerID(reference), nil + } + } + + // ... than to pull all issuers from storage. + issuers, err := sc.listIssuers() + if err != nil { + return issuerID("list-error"), err + } + + for _, issuerId := range issuers { + issuer, err := sc.fetchIssuerById(issuerId) + if err != nil { + return issuerID("issuer-read"), err + } + + if issuer.Name == reference { + return issuer.ID, nil + } + } + + // Otherwise, we must not have found the issuer. + return IssuerRefNotFound, errutil.UserError{Err: fmt.Sprintf("unable to find PKI issuer for reference: %v", reference)} +} + +func (sc *storageContext) resolveIssuerCRLPath(reference string, unified bool) (string, error) { + if sc.Backend.useLegacyBundleCaStorage() { + return legacyCRLPath, nil + } + + issuer, err := sc.resolveIssuerReference(reference) + if err != nil { + return legacyCRLPath, err + } + + configPath := storageLocalCRLConfig + if unified { + configPath = storageUnifiedCRLConfig + } + + crlConfig, err := sc._getInternalCRLConfig(configPath) + if err != nil { + return legacyCRLPath, err + } + + if crlId, ok := crlConfig.IssuerIDCRLMap[issuer]; ok && len(crlId) > 0 { + path := fmt.Sprintf("crls/%v", crlId) + if unified { + path = unifiedCRLPathPrefix + path + } + + return path, nil + } + + return legacyCRLPath, fmt.Errorf("unable to find CRL for issuer: id:%v/ref:%v", issuer, reference) +} + +// Builds a certutil.CertBundle from the specified issuer identifier, +// optionally loading the key or not. This method supports loading legacy +// bundles using the legacyBundleShimID issuerId, and if no entry is found will return an error. +func (sc *storageContext) fetchCertBundleByIssuerId(id issuerID, loadKey bool) (*issuerEntry, *certutil.CertBundle, error) { + if id == legacyBundleShimID { + // We have not completed the migration, or started a request in legacy mode, so + // attempt to load the bundle from the legacy location + issuer, bundle, err := getLegacyCertBundle(sc.Context, sc.Storage) + if err != nil { + return nil, nil, err + } + if issuer == nil || bundle == nil { + return nil, nil, errutil.UserError{Err: "no legacy cert bundle exists"} + } + + return issuer, bundle, err + } + + issuer, err := sc.fetchIssuerById(id) + if err != nil { + return nil, nil, err + } + + var bundle certutil.CertBundle + bundle.Certificate = issuer.Certificate + bundle.CAChain = issuer.CAChain + bundle.SerialNumber = issuer.SerialNumber + + // Fetch the key if it exists. Sometimes we don't need the key immediately. + if loadKey && issuer.KeyID != keyID("") { + key, err := sc.fetchKeyById(issuer.KeyID) + if err != nil { + return nil, nil, err + } + + bundle.PrivateKeyType = key.PrivateKeyType + bundle.PrivateKey = key.PrivateKey + } + + return issuer, &bundle, nil +} + +func (sc *storageContext) writeCaBundle(caBundle *certutil.CertBundle, issuerName string, keyName string) (*issuerEntry, *keyEntry, error) { + myKey, _, err := sc.importKey(caBundle.PrivateKey, keyName, caBundle.PrivateKeyType) + if err != nil { + return nil, nil, err + } + + // We may have existing mounts that only contained a key with no certificate yet as a signed CSR + // was never setup within the mount. + if caBundle.Certificate == "" { + return &issuerEntry{}, myKey, nil + } + + myIssuer, _, err := sc.importIssuer(caBundle.Certificate, issuerName) + if err != nil { + return nil, nil, err + } + + for _, cert := range caBundle.CAChain { + if _, _, err = sc.importIssuer(cert, ""); err != nil { + return nil, nil, err + } + } + + return myIssuer, myKey, nil +} + +func genIssuerId() issuerID { + return issuerID(genUuid()) +} + +func genKeyId() keyID { + return keyID(genUuid()) +} + +func genCRLId() crlID { + return crlID(genUuid()) +} + +func genUuid() string { + aUuid, err := uuid.GenerateUUID() + if err != nil { + panic(err) + } + return aUuid +} + +func (sc *storageContext) isKeyInUse(keyId string) (inUse bool, issuerId string, err error) { + knownIssuers, err := sc.listIssuers() + if err != nil { + return true, "", err + } + + for _, issuerId := range knownIssuers { + issuerEntry, err := sc.fetchIssuerById(issuerId) + if err != nil { + return true, issuerId.String(), errutil.InternalError{Err: fmt.Sprintf("unable to fetch pki issuer: %v", err)} + } + if issuerEntry == nil { + return true, issuerId.String(), errutil.InternalError{Err: fmt.Sprintf("Issuer listed: %s does not exist", issuerId.String())} + } + if issuerEntry.KeyID.String() == keyId { + return true, issuerId.String(), nil + } + } + + return false, "", nil +} + +func (sc *storageContext) checkForRolesReferencing(issuerId string) (timeout bool, inUseBy int32, err error) { + roleEntries, err := sc.Storage.List(sc.Context, "role/") + if err != nil { + return false, 0, err + } + + inUseBy = 0 + checkedRoles := 0 + + for _, roleName := range roleEntries { + entry, err := sc.Storage.Get(sc.Context, "role/"+roleName) + if err != nil { + return false, 0, err + } + if entry != nil { // If nil, someone deleted an entry since we haven't taken a lock here so just continue + var role roleEntry + err = entry.DecodeJSON(&role) + if err != nil { + return false, inUseBy, err + } + if role.Issuer == issuerId { + inUseBy = inUseBy + 1 + if inUseBy >= maxRolesToFindOnIssuerChange { + return true, inUseBy, nil + } + } + } + checkedRoles = checkedRoles + 1 + if checkedRoles >= maxRolesToScanOnIssuerChange { + return true, inUseBy, nil + } + } + + return false, inUseBy, nil +} + +func (sc *storageContext) getRevocationConfig() (*crlConfig, error) { + entry, err := sc.Storage.Get(sc.Context, "config/crl") + if err != nil { + return nil, err + } + + var result crlConfig + if entry == nil { + result = defaultCrlConfig + return &result, nil + } + + if err = entry.DecodeJSON(&result); err != nil { + return nil, err + } + + if result.Version == 0 { + // Automatically update existing configurations. + result.OcspDisable = defaultCrlConfig.OcspDisable + result.OcspExpiry = defaultCrlConfig.OcspExpiry + result.AutoRebuild = defaultCrlConfig.AutoRebuild + result.AutoRebuildGracePeriod = defaultCrlConfig.AutoRebuildGracePeriod + result.Version = 1 + } + if result.Version == 1 { + if result.DeltaRebuildInterval == "" { + result.DeltaRebuildInterval = defaultCrlConfig.DeltaRebuildInterval + } + result.Version = 2 + } + + // Depending on client version, it's possible that the expiry is unset. + // This sets the default value to prevent issues in downstream code. + if result.Expiry == "" { + result.Expiry = defaultCrlConfig.Expiry + } + + isLocalMount := sc.Backend.System().LocalMount() + if (!constants.IsEnterprise || isLocalMount) && (result.UnifiedCRLOnExistingPaths || result.UnifiedCRL || result.UseGlobalQueue) { + // An end user must have had Enterprise, enabled the unified config args and then downgraded to OSS. + sc.Backend.Logger().Warn("Not running Vault Enterprise or using a local mount, " + + "disabling unified_crl, unified_crl_on_existing_paths and cross_cluster_revocation config flags.") + result.UnifiedCRLOnExistingPaths = false + result.UnifiedCRL = false + result.UseGlobalQueue = false + } + + return &result, nil +} + +func (sc *storageContext) getAutoTidyConfig() (*tidyConfig, error) { + entry, err := sc.Storage.Get(sc.Context, autoTidyConfigPath) + if err != nil { + return nil, err + } + + var result tidyConfig + if entry == nil { + result = defaultTidyConfig + return &result, nil + } + + if err = entry.DecodeJSON(&result); err != nil { + return nil, err + } + + if result.IssuerSafetyBuffer == 0 { + result.IssuerSafetyBuffer = defaultTidyConfig.IssuerSafetyBuffer + } + + return &result, nil +} + +func (sc *storageContext) writeAutoTidyConfig(config *tidyConfig) error { + entry, err := logical.StorageEntryJSON(autoTidyConfigPath, config) + if err != nil { + return err + } + + err = sc.Storage.Put(sc.Context, entry) + if err != nil { + return err + } + + sc.Backend.publishCertCountMetrics.Store(config.PublishMetrics) + + // To Potentially Disable Certificate Counting + if config.MaintainCount == false { + certCountWasEnabled := sc.Backend.certCountEnabled.Swap(config.MaintainCount) + if certCountWasEnabled { + sc.Backend.certsCounted.Store(true) + sc.Backend.certCountError = "Cert Count is Disabled: enable via Tidy Config maintain_stored_certificate_counts" + sc.Backend.possibleDoubleCountedSerials = nil // This won't stop a list operation, but will stop an expensive clean-up during initialize + sc.Backend.possibleDoubleCountedRevokedSerials = nil // This won't stop a list operation, but will stop an expensive clean-up during initialize + sc.Backend.certCount.Store(0) + sc.Backend.revokedCertCount.Store(0) + } + } else { // To Potentially Enable Certificate Counting + if sc.Backend.certCountEnabled.Load() == false { + // We haven't written "re-enable certificate counts" outside the initialize function + // Any call derived call to do so is likely to time out on ~2 million certs + sc.Backend.certCountError = "Certificate Counting Has Not Been Initialized, re-initialize this mount" + } + } + + return nil +} + +func (sc *storageContext) listRevokedCerts() ([]string, error) { + list, err := sc.Storage.List(sc.Context, revokedPath) + if err != nil { + return nil, fmt.Errorf("failed listing revoked certs: %w", err) + } + + return list, err +} + +func (sc *storageContext) getClusterConfig() (*clusterConfigEntry, error) { + entry, err := sc.Storage.Get(sc.Context, clusterConfigPath) + if err != nil { + return nil, err + } + + var result clusterConfigEntry + if entry == nil { + return &result, nil + } + + if err = entry.DecodeJSON(&result); err != nil { + return nil, err + } + + return &result, nil +} + +func (sc *storageContext) writeClusterConfig(config *clusterConfigEntry) error { + entry, err := logical.StorageEntryJSON(clusterConfigPath, config) + if err != nil { + return err + } + + return sc.Storage.Put(sc.Context, entry) +} + +func (sc *storageContext) fetchRevocationInfo(serial string) (*revocationInfo, error) { + var revInfo *revocationInfo + revEntry, err := fetchCertBySerial(sc, revokedPath, serial) + if err != nil { + return nil, err + } + if revEntry != nil { + err = revEntry.DecodeJSON(&revInfo) + if err != nil { + return nil, fmt.Errorf("error decoding existing revocation info: %w", err) + } + } + + return revInfo, nil +} diff --git a/builtin/logical/pki/storage_migrations.go b/builtin/logical/pki/storage_migrations.go new file mode 100644 index 0000000..b89bb0e --- /dev/null +++ b/builtin/logical/pki/storage_migrations.go @@ -0,0 +1,234 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "fmt" + "time" + + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/logical" +) + +// This allows us to record the version of the migration code within the log entry +// in case we find out in the future that something was horribly wrong with the migration, +// and we need to perform it again... +const ( + latestMigrationVersion = 2 + legacyBundleShimID = issuerID("legacy-entry-shim-id") + legacyBundleShimKeyID = keyID("legacy-entry-shim-key-id") +) + +type legacyBundleMigrationLog struct { + Hash string `json:"hash"` + Created time.Time `json:"created"` + CreatedIssuer issuerID `json:"issuer_id"` + CreatedKey keyID `json:"key_id"` + MigrationVersion int `json:"migrationVersion"` +} + +type migrationInfo struct { + isRequired bool + legacyBundle *certutil.CertBundle + legacyBundleHash string + migrationLog *legacyBundleMigrationLog +} + +func getMigrationInfo(ctx context.Context, s logical.Storage) (migrationInfo, error) { + migrationInfo := migrationInfo{ + isRequired: false, + legacyBundle: nil, + legacyBundleHash: "", + migrationLog: nil, + } + + var err error + _, migrationInfo.legacyBundle, err = getLegacyCertBundle(ctx, s) + if err != nil { + return migrationInfo, err + } + + migrationInfo.migrationLog, err = getLegacyBundleMigrationLog(ctx, s) + if err != nil { + return migrationInfo, err + } + + migrationInfo.legacyBundleHash, err = computeHashOfLegacyBundle(migrationInfo.legacyBundle) + if err != nil { + return migrationInfo, err + } + + // Even if there isn't anything to migrate, we always want to write out the log entry + // as that will trigger the secondary clusters to toggle/wake up + if (migrationInfo.migrationLog == nil) || + (migrationInfo.migrationLog.Hash != migrationInfo.legacyBundleHash) || + (migrationInfo.migrationLog.MigrationVersion != latestMigrationVersion) { + migrationInfo.isRequired = true + } + + return migrationInfo, nil +} + +func migrateStorage(ctx context.Context, b *backend, s logical.Storage) error { + migrationInfo, err := getMigrationInfo(ctx, s) + if err != nil { + return err + } + + if !migrationInfo.isRequired { + // No migration was deemed to be required. + return nil + } + + var issuerIdentifier issuerID + var keyIdentifier keyID + sc := b.makeStorageContext(ctx, s) + if migrationInfo.legacyBundle != nil { + // When the legacy bundle still exists, there's three scenarios we + // need to worry about: + // + // 1. When we have no migration log, we definitely want to migrate. + haveNoLog := migrationInfo.migrationLog == nil + // 2. When we have an (empty) log and the version is zero, we want to + // migrate. + haveOldVersion := !haveNoLog && migrationInfo.migrationLog.MigrationVersion == 0 + // 3. When we have a log and the version is at least 1 (where this + // migration was introduced), we want to run the migration again + // only if the legacy bundle hash has changed. + isCurrentOrBetterVersion := !haveNoLog && migrationInfo.migrationLog.MigrationVersion >= 1 + haveChange := !haveNoLog && migrationInfo.migrationLog.Hash != migrationInfo.legacyBundleHash + haveVersionWithChange := isCurrentOrBetterVersion && haveChange + + if haveNoLog || haveOldVersion || haveVersionWithChange { + // Generate a unique name for the migrated items in case things were to be re-migrated again + // for some weird reason in the future... + migrationName := fmt.Sprintf("current-%d", time.Now().Unix()) + + b.Logger().Info("performing PKI migration to new keys/issuers layout") + anIssuer, aKey, err := sc.writeCaBundle(migrationInfo.legacyBundle, migrationName, migrationName) + if err != nil { + return err + } + b.Logger().Info("Migration generated the following ids and set them as defaults", + "issuer id", anIssuer.ID, "key id", aKey.ID) + issuerIdentifier = anIssuer.ID + keyIdentifier = aKey.ID + + // Since we do not have all the mount information available we must schedule + // the CRL to be rebuilt at a later time. + b.crlBuilder.requestRebuildIfActiveNode(b) + } + } + + if migrationInfo.migrationLog != nil && migrationInfo.migrationLog.MigrationVersion == 1 { + // We've seen a bundle with migration version 1; this means an + // earlier version of the code ran which didn't have the fix for + // correct write order in rebuildIssuersChains(...). Rather than + // having every user read the migrated active issuer and see if + // their chains need rebuilding, we'll schedule a one-off chain + // migration here. + b.Logger().Info(fmt.Sprintf("%v: performing maintenance rebuild of ca_chains", b.backendUUID)) + if err := sc.rebuildIssuersChains(nil); err != nil { + return err + } + } + + // We always want to write out this log entry as the secondary clusters leverage this path to wake up + // if they were upgraded prior to the primary cluster's migration occurred. + err = setLegacyBundleMigrationLog(ctx, s, &legacyBundleMigrationLog{ + Hash: migrationInfo.legacyBundleHash, + Created: time.Now(), + CreatedIssuer: issuerIdentifier, + CreatedKey: keyIdentifier, + MigrationVersion: latestMigrationVersion, + }) + if err != nil { + return err + } + + b.Logger().Info(fmt.Sprintf("%v: succeeded in migrating to issuer storage version %v", b.backendUUID, latestMigrationVersion)) + + return nil +} + +func computeHashOfLegacyBundle(bundle *certutil.CertBundle) (string, error) { + hasher := sha256.New() + // Generate an empty hash if the bundle does not exist. + if bundle != nil { + // We only hash the main certificate and the certs within the CAChain, + // assuming that any sort of change that occurred would have influenced one of those two fields. + if _, err := hasher.Write([]byte(bundle.Certificate)); err != nil { + return "", err + } + for _, cert := range bundle.CAChain { + if _, err := hasher.Write([]byte(cert)); err != nil { + return "", err + } + } + } + return hex.EncodeToString(hasher.Sum(nil)), nil +} + +func getLegacyBundleMigrationLog(ctx context.Context, s logical.Storage) (*legacyBundleMigrationLog, error) { + entry, err := s.Get(ctx, legacyMigrationBundleLogKey) + if err != nil { + return nil, err + } + + if entry == nil { + return nil, nil + } + + lbm := &legacyBundleMigrationLog{} + err = entry.DecodeJSON(lbm) + if err != nil { + // If we can't decode our bundle, lets scrap it and assume a blank value, + // re-running the migration will at most bring back an older certificate/private key + return nil, nil + } + return lbm, nil +} + +func setLegacyBundleMigrationLog(ctx context.Context, s logical.Storage, lbm *legacyBundleMigrationLog) error { + json, err := logical.StorageEntryJSON(legacyMigrationBundleLogKey, lbm) + if err != nil { + return err + } + + return s.Put(ctx, json) +} + +func getLegacyCertBundle(ctx context.Context, s logical.Storage) (*issuerEntry, *certutil.CertBundle, error) { + entry, err := s.Get(ctx, legacyCertBundlePath) + if err != nil { + return nil, nil, err + } + + if entry == nil { + return nil, nil, nil + } + + cb := &certutil.CertBundle{} + err = entry.DecodeJSON(cb) + if err != nil { + return nil, nil, err + } + + // Fake a storage entry with backwards compatibility in mind. + issuer := &issuerEntry{ + ID: legacyBundleShimID, + KeyID: legacyBundleShimKeyID, + Name: "legacy-entry-shim", + Certificate: cb.Certificate, + CAChain: cb.CAChain, + SerialNumber: cb.SerialNumber, + LeafNotAfterBehavior: certutil.ErrNotAfterBehavior, + } + issuer.Usage.ToggleUsage(AllIssuerUsages) + + return issuer, cb, nil +} diff --git a/builtin/logical/pki/storage_migrations_test.go b/builtin/logical/pki/storage_migrations_test.go new file mode 100644 index 0000000..ef70447 --- /dev/null +++ b/builtin/logical/pki/storage_migrations_test.go @@ -0,0 +1,982 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "context" + "strings" + "testing" + "time" + + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/require" +) + +func Test_migrateStorageEmptyStorage(t *testing.T) { + t.Parallel() + startTime := time.Now() + ctx := context.Background() + b, s := CreateBackendWithStorage(t) + sc := b.makeStorageContext(ctx, s) + + // Reset the version the helper above set to 1. + b.pkiStorageVersion.Store(0) + require.True(t, b.useLegacyBundleCaStorage(), "pre migration we should have been told to use legacy storage.") + + request := &logical.InitializationRequest{Storage: s} + err := b.initialize(ctx, request) + require.NoError(t, err) + + issuerIds, err := sc.listIssuers() + require.NoError(t, err) + require.Empty(t, issuerIds) + + keyIds, err := sc.listKeys() + require.NoError(t, err) + require.Empty(t, keyIds) + + logEntry, err := getLegacyBundleMigrationLog(ctx, s) + require.NoError(t, err) + require.NotNil(t, logEntry) + require.Equal(t, latestMigrationVersion, logEntry.MigrationVersion) + require.True(t, len(strings.TrimSpace(logEntry.Hash)) > 0, + "Hash value (%s) should not have been empty", logEntry.Hash) + require.True(t, startTime.Before(logEntry.Created), + "created log entry time (%v) was before our start time(%v)?", logEntry.Created, startTime) + require.Empty(t, logEntry.CreatedIssuer) + require.Empty(t, logEntry.CreatedKey) + + require.False(t, b.useLegacyBundleCaStorage(), "post migration we are still told to use legacy storage") + + // Make sure we can re-run the migration without issues + request = &logical.InitializationRequest{Storage: s} + err = b.initialize(ctx, request) + require.NoError(t, err) + logEntry2, err := getLegacyBundleMigrationLog(ctx, s) + require.NoError(t, err) + require.NotNil(t, logEntry2) + + // Make sure the hash and created times have not changed. + require.Equal(t, logEntry.Created, logEntry2.Created) + require.Equal(t, logEntry.Hash, logEntry2.Hash) +} + +func Test_migrateStorageOnlyKey(t *testing.T) { + t.Parallel() + startTime := time.Now() + ctx := context.Background() + b, s := CreateBackendWithStorage(t) + sc := b.makeStorageContext(ctx, s) + + // Reset the version the helper above set to 1. + b.pkiStorageVersion.Store(0) + require.True(t, b.useLegacyBundleCaStorage(), "pre migration we should have been told to use legacy storage.") + + bundle := genCertBundle(t, b, s) + // Clear everything except for the key + bundle.SerialNumber = "" + bundle.CAChain = []string{} + bundle.Certificate = "" + bundle.IssuingCA = "" + + json, err := logical.StorageEntryJSON(legacyCertBundlePath, bundle) + require.NoError(t, err) + err = s.Put(ctx, json) + require.NoError(t, err) + + request := &logical.InitializationRequest{Storage: s} + err = b.initialize(ctx, request) + require.NoError(t, err) + + issuerIds, err := sc.listIssuers() + require.NoError(t, err) + require.Equal(t, 0, len(issuerIds)) + + keyIds, err := sc.listKeys() + require.NoError(t, err) + require.Equal(t, 1, len(keyIds)) + + logEntry, err := getLegacyBundleMigrationLog(ctx, s) + require.NoError(t, err) + require.NotNil(t, logEntry) + require.Equal(t, latestMigrationVersion, logEntry.MigrationVersion) + require.True(t, len(strings.TrimSpace(logEntry.Hash)) > 0, + "Hash value (%s) should not have been empty", logEntry.Hash) + require.True(t, startTime.Before(logEntry.Created), + "created log entry time (%v) was before our start time(%v)?", logEntry.Created, startTime) + require.Equal(t, logEntry.CreatedIssuer, issuerID("")) + require.Equal(t, logEntry.CreatedKey, keyIds[0]) + + keyId := keyIds[0] + key, err := sc.fetchKeyById(keyId) + require.NoError(t, err) + require.True(t, strings.HasPrefix(key.Name, "current-"), + "expected key name to start with current- was %s", key.Name) + require.Equal(t, keyId, key.ID) + require.Equal(t, strings.TrimSpace(bundle.PrivateKey), strings.TrimSpace(key.PrivateKey)) + require.Equal(t, bundle.PrivateKeyType, key.PrivateKeyType) + + // Make sure we kept the old bundle + _, certBundle, err := getLegacyCertBundle(ctx, s) + require.NoError(t, err) + require.Equal(t, bundle, certBundle) + + // Make sure we setup the default values + keysConfig, err := sc.getKeysConfig() + require.NoError(t, err) + require.Equal(t, &keyConfigEntry{DefaultKeyId: keyId}, keysConfig) + + issuersConfig, err := sc.getIssuersConfig() + require.NoError(t, err) + require.Equal(t, issuerID(""), issuersConfig.DefaultIssuerId) + + // Make sure if we attempt to re-run the migration nothing happens... + err = migrateStorage(ctx, b, s) + require.NoError(t, err) + logEntry2, err := getLegacyBundleMigrationLog(ctx, s) + require.NoError(t, err) + require.NotNil(t, logEntry2) + + require.Equal(t, logEntry.Created, logEntry2.Created) + require.Equal(t, logEntry.Hash, logEntry2.Hash) + + require.False(t, b.useLegacyBundleCaStorage(), "post migration we are still told to use legacy storage") +} + +func Test_migrateStorageSimpleBundle(t *testing.T) { + t.Parallel() + startTime := time.Now() + ctx := context.Background() + b, s := CreateBackendWithStorage(t) + sc := b.makeStorageContext(ctx, s) + + // Reset the version the helper above set to 1. + b.pkiStorageVersion.Store(0) + require.True(t, b.useLegacyBundleCaStorage(), "pre migration we should have been told to use legacy storage.") + + bundle := genCertBundle(t, b, s) + json, err := logical.StorageEntryJSON(legacyCertBundlePath, bundle) + require.NoError(t, err) + err = s.Put(ctx, json) + require.NoError(t, err) + + request := &logical.InitializationRequest{Storage: s} + err = b.initialize(ctx, request) + require.NoError(t, err) + + issuerIds, err := sc.listIssuers() + require.NoError(t, err) + require.Equal(t, 1, len(issuerIds)) + + keyIds, err := sc.listKeys() + require.NoError(t, err) + require.Equal(t, 1, len(keyIds)) + + logEntry, err := getLegacyBundleMigrationLog(ctx, s) + require.NoError(t, err) + require.NotNil(t, logEntry) + require.Equal(t, latestMigrationVersion, logEntry.MigrationVersion) + require.True(t, len(strings.TrimSpace(logEntry.Hash)) > 0, + "Hash value (%s) should not have been empty", logEntry.Hash) + require.True(t, startTime.Before(logEntry.Created), + "created log entry time (%v) was before our start time(%v)?", logEntry.Created, startTime) + require.Equal(t, logEntry.CreatedIssuer, issuerIds[0]) + require.Equal(t, logEntry.CreatedKey, keyIds[0]) + + issuerId := issuerIds[0] + keyId := keyIds[0] + issuer, err := sc.fetchIssuerById(issuerId) + require.NoError(t, err) + require.True(t, strings.HasPrefix(issuer.Name, "current-"), + "expected issuer name to start with current- was %s", issuer.Name) + require.Equal(t, certutil.ErrNotAfterBehavior, issuer.LeafNotAfterBehavior) + + key, err := sc.fetchKeyById(keyId) + require.NoError(t, err) + require.True(t, strings.HasPrefix(key.Name, "current-"), + "expected key name to start with current- was %s", key.Name) + + require.Equal(t, issuerId, issuer.ID) + require.Equal(t, bundle.SerialNumber, issuer.SerialNumber) + require.Equal(t, strings.TrimSpace(bundle.Certificate), strings.TrimSpace(issuer.Certificate)) + require.Equal(t, keyId, issuer.KeyID) + require.Empty(t, issuer.ManualChain) + require.Equal(t, []string{bundle.Certificate + "\n"}, issuer.CAChain) + require.Equal(t, AllIssuerUsages, issuer.Usage) + require.Equal(t, certutil.ErrNotAfterBehavior, issuer.LeafNotAfterBehavior) + + require.Equal(t, keyId, key.ID) + require.Equal(t, strings.TrimSpace(bundle.PrivateKey), strings.TrimSpace(key.PrivateKey)) + require.Equal(t, bundle.PrivateKeyType, key.PrivateKeyType) + + // Make sure we kept the old bundle + _, certBundle, err := getLegacyCertBundle(ctx, s) + require.NoError(t, err) + require.Equal(t, bundle, certBundle) + + // Make sure we setup the default values + keysConfig, err := sc.getKeysConfig() + require.NoError(t, err) + require.Equal(t, &keyConfigEntry{DefaultKeyId: keyId}, keysConfig) + + issuersConfig, err := sc.getIssuersConfig() + require.NoError(t, err) + require.Equal(t, issuerId, issuersConfig.DefaultIssuerId) + + // Make sure if we attempt to re-run the migration nothing happens... + err = migrateStorage(ctx, b, s) + require.NoError(t, err) + logEntry2, err := getLegacyBundleMigrationLog(ctx, s) + require.NoError(t, err) + require.NotNil(t, logEntry2) + + require.Equal(t, logEntry.Created, logEntry2.Created) + require.Equal(t, logEntry.Hash, logEntry2.Hash) + + require.False(t, b.useLegacyBundleCaStorage(), "post migration we are still told to use legacy storage") + + // Make sure we can re-process a migration from scratch for whatever reason + err = s.Delete(ctx, legacyMigrationBundleLogKey) + require.NoError(t, err) + + err = migrateStorage(ctx, b, s) + require.NoError(t, err) + + logEntry3, err := getLegacyBundleMigrationLog(ctx, s) + require.NoError(t, err) + require.NotNil(t, logEntry3) + + require.NotEqual(t, logEntry.Created, logEntry3.Created) + require.Equal(t, logEntry.Hash, logEntry3.Hash) +} + +func TestMigration_OnceChainRebuild(t *testing.T) { + t.Parallel() + ctx := context.Background() + b, s := CreateBackendWithStorage(t) + sc := b.makeStorageContext(ctx, s) + + // Create a legacy CA bundle that we'll migrate to the new layout. We call + // ToParsedCertBundle just to make sure it works and to populate + // bundle.SerialNumber for us. + bundle := &certutil.CertBundle{ + PrivateKeyType: certutil.RSAPrivateKey, + Certificate: migIntCA, + IssuingCA: migRootCA, + CAChain: []string{migRootCA}, + PrivateKey: migIntPrivKey, + } + _, err := bundle.ToParsedCertBundle() + require.NoError(t, err) + writeLegacyBundle(t, b, s, bundle) + + // Do an initial migration. Ensure we end up at least on version 2. + request := &logical.InitializationRequest{Storage: s} + err = b.initialize(ctx, request) + require.NoError(t, err) + + issuerIds, err := sc.listIssuers() + require.NoError(t, err) + require.Equal(t, 2, len(issuerIds)) + + keyIds, err := sc.listKeys() + require.NoError(t, err) + require.Equal(t, 1, len(keyIds)) + + logEntry, err := getLegacyBundleMigrationLog(ctx, s) + require.NoError(t, err) + require.NotNil(t, logEntry) + require.GreaterOrEqual(t, logEntry.MigrationVersion, 2) + require.GreaterOrEqual(t, latestMigrationVersion, 2) + + // Verify the chain built correctly: current should have a CA chain of + // length two. + // + // Afterwards, we mutate these issuers to only point at themselves and + // write back out. + var rootIssuerId issuerID + var intIssuerId issuerID + for _, issuerId := range issuerIds { + issuer, err := sc.fetchIssuerById(issuerId) + require.NoError(t, err) + require.NotNil(t, issuer) + + if strings.HasPrefix(issuer.Name, "current-") { + require.Equal(t, 2, len(issuer.CAChain)) + require.Equal(t, migIntCA, issuer.CAChain[0]) + require.Equal(t, migRootCA, issuer.CAChain[1]) + intIssuerId = issuerId + + issuer.CAChain = []string{migIntCA} + err = sc.writeIssuer(issuer) + require.NoError(t, err) + } else { + require.Equal(t, 1, len(issuer.CAChain)) + require.Equal(t, migRootCA, issuer.CAChain[0]) + rootIssuerId = issuerId + } + } + + // Reset our migration version back to one, as if this never + // happened... + logEntry.MigrationVersion = 1 + err = setLegacyBundleMigrationLog(ctx, s, logEntry) + require.NoError(t, err) + b.pkiStorageVersion.Store(1) + + // Re-attempt the migration by reinitializing the mount. + err = b.initialize(ctx, request) + require.NoError(t, err) + + newIssuerIds, err := sc.listIssuers() + require.NoError(t, err) + require.Equal(t, 2, len(newIssuerIds)) + require.Equal(t, issuerIds, newIssuerIds) + + newKeyIds, err := sc.listKeys() + require.NoError(t, err) + require.Equal(t, 1, len(newKeyIds)) + require.Equal(t, keyIds, newKeyIds) + + logEntry, err = getLegacyBundleMigrationLog(ctx, s) + require.NoError(t, err) + require.NotNil(t, logEntry) + require.Equal(t, logEntry.MigrationVersion, latestMigrationVersion) + + // Ensure the chains are correct on the intermediate. By using the + // issuerId saved above, this ensures we didn't change any issuerIds, + // we merely updated the existing issuers. + intIssuer, err := sc.fetchIssuerById(intIssuerId) + require.NoError(t, err) + require.NotNil(t, intIssuer) + require.Equal(t, 2, len(intIssuer.CAChain)) + require.Equal(t, migIntCA, intIssuer.CAChain[0]) + require.Equal(t, migRootCA, intIssuer.CAChain[1]) + + rootIssuer, err := sc.fetchIssuerById(rootIssuerId) + require.NoError(t, err) + require.NotNil(t, rootIssuer) + require.Equal(t, 1, len(rootIssuer.CAChain)) + require.Equal(t, migRootCA, rootIssuer.CAChain[0]) +} + +func TestExpectedOpsWork_PreMigration(t *testing.T) { + t.Parallel() + ctx := context.Background() + b, s := CreateBackendWithStorage(t) + // Reset the version the helper above set to 1. + b.pkiStorageVersion.Store(0) + require.True(t, b.useLegacyBundleCaStorage(), "pre migration we should have been told to use legacy storage.") + + bundle := genCertBundle(t, b, s) + json, err := logical.StorageEntryJSON(legacyCertBundlePath, bundle) + require.NoError(t, err) + err = s.Put(ctx, json) + require.NoError(t, err) + + // generate role + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/allow-all", + Storage: s, + Data: map[string]interface{}{ + "allow_any_name": "true", + "no_store": "false", + }, + MountPoint: "pki/", + }) + require.NoError(t, err, "error from creating role") + require.NotNil(t, resp, "got nil response object from creating role") + + // List roles + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ListOperation, + Path: "roles", + Storage: s, + MountPoint: "pki/", + }) + require.NoError(t, err, "error from listing roles") + require.NotNil(t, resp, "got nil response object from listing roles") + require.False(t, resp.IsError(), "got error response from listing roles: %#v", resp) + require.Contains(t, resp.Data["keys"], "allow-all", "failed to list our roles") + + // Read roles + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ReadOperation, + Path: "roles/allow-all", + Storage: s, + MountPoint: "pki/", + }) + require.NoError(t, err, "error from reading role") + require.NotNil(t, resp, "got nil response object from reading role") + require.False(t, resp.IsError(), "got error response from reading role: %#v", resp) + require.NotEmpty(t, resp.Data, "data map should not have been empty of reading role") + + // Issue a cert from our legacy bundle. + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "issue/allow-all", + Storage: s, + Data: map[string]interface{}{ + "common_name": "test.com", + "ttl": "60s", + }, + MountPoint: "pki/", + }) + require.NoError(t, err, "error issue on allow-all") + require.NotNil(t, resp, "got nil response object from issue allow-all") + require.False(t, resp.IsError(), "got error response from issue on allow-all: %#v", resp) + serialNum := resp.Data["serial_number"].(string) + require.NotEmpty(t, serialNum) + + // Make sure we can list + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ListOperation, + Path: "certs", + Storage: s, + MountPoint: "pki/", + }) + require.NoError(t, err, "error listing certs") + require.NotNil(t, resp, "got nil response object from listing certs") + require.False(t, resp.IsError(), "got error response from listing certs: %#v", resp) + require.Contains(t, resp.Data["keys"], serialNum, "failed to list our cert") + + // Revoke the cert now. + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "revoke", + Storage: s, + Data: map[string]interface{}{ + "serial_number": serialNum, + }, + MountPoint: "pki/", + }) + require.NoError(t, err, "error revoking cert") + require.NotNil(t, resp, "got nil response object from revoke cert") + require.False(t, resp.IsError(), "got error response from revoke cert: %#v", resp) + + // Check our CRL includes the revoked cert. + resp = requestCrlFromBackend(t, s, b) + crl := parseCrlPemBytes(t, resp.Data["http_raw_body"].([]byte)) + requireSerialNumberInCRL(t, crl, serialNum) + + // Set CRL config + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/crl", + Storage: s, + Data: map[string]interface{}{ + "expiry": "72h", + "disable": "false", + }, + MountPoint: "pki/", + }) + require.NoError(t, err, "error setting CRL config") + require.NotNil(t, resp, "got nil response setting CRL config") + + // Set URL config + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/urls", + Storage: s, + Data: map[string]interface{}{ + "ocsp_servers": []string{"https://localhost:8080"}, + }, + MountPoint: "pki/", + }) + requireSuccessNonNilResponse(t, resp, err) + + // Make sure we can fetch the old values... + for _, path := range []string{"ca/pem", "ca_chain", "cert/" + serialNum, "cert/ca", "cert/crl", "cert/ca_chain", "config/crl", "config/urls"} { + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ReadOperation, + Path: path, + Storage: s, + MountPoint: "pki/", + }) + require.NoError(t, err, "error reading cert %s", path) + require.NotNil(t, resp, "got nil response object from reading cert %s", path) + require.False(t, resp.IsError(), "got error response from reading cert %s: %#v", path, resp) + } + + // Sign CSR + _, csr := generateTestCsr(t, certutil.ECPrivateKey, 224) + for _, path := range []string{"sign/allow-all", "root/sign-intermediate", "sign-verbatim"} { + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: path, + Storage: s, + Data: map[string]interface{}{ + "csr": csr, + }, + MountPoint: "pki/", + }) + require.NoError(t, err, "error signing csr from path %s", path) + require.NotNil(t, resp, "got nil response object from path %s", path) + require.NotEmpty(t, resp.Data, "data map response was empty from path %s", path) + } + + // Sign self-issued + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/sign-self-issued", + Storage: s, + Data: map[string]interface{}{ + "certificate": csr, + }, + MountPoint: "pki/", + }) + require.NoError(t, err, "error signing csr from path root/sign-self-issued") + require.NotNil(t, resp, "got nil response object from path root/sign-self-issued") + require.NotEmpty(t, resp.Data, "data map response was empty from path root/sign-self-issued") + + // Delete Role + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.DeleteOperation, + Path: "roles/allow-all", + Storage: s, + MountPoint: "pki/", + }) + require.NoError(t, err, "error deleting role") + require.Nil(t, resp, "got non-nil response object from deleting role") + + // Delete Root + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.DeleteOperation, + Path: "root", + Storage: s, + MountPoint: "pki/", + }) + require.NoError(t, err, "error deleting root") + require.NotNil(t, resp, "got nil response object from deleting root") + require.NotEmpty(t, resp.Warnings, "expected warnings set on delete root") + + /////////////////////////////// + // Legacy calls we expect to fail when in migration mode + /////////////////////////////// + requireFailInMigration(t, b, s, logical.UpdateOperation, "config/ca") + requireFailInMigration(t, b, s, logical.UpdateOperation, "intermediate/generate/internal") + requireFailInMigration(t, b, s, logical.UpdateOperation, "intermediate/set-signed") + requireFailInMigration(t, b, s, logical.UpdateOperation, "root/generate/internal") + + /////////////////////////////// + // New apis should be unavailable + /////////////////////////////// + requireFailInMigration(t, b, s, logical.ListOperation, "issuers") + requireFailInMigration(t, b, s, logical.UpdateOperation, "issuers/generate/root/internal") + requireFailInMigration(t, b, s, logical.UpdateOperation, "issuers/generate/intermediate/internal") + requireFailInMigration(t, b, s, logical.UpdateOperation, "issuers/import/cert") + requireFailInMigration(t, b, s, logical.ReadOperation, "issuer/default/json") + requireFailInMigration(t, b, s, logical.ReadOperation, "issuer/default/crl/pem") + requireFailInMigration(t, b, s, logical.UpdateOperation, "issuer/test-role") + + // The following calls work as they are shared handlers with existing paths. + // requireFailInMigration(t, b, s, logical.UpdateOperation, "issuer/default/issue/test-role") + // requireFailInMigration(t, b, s, logical.UpdateOperation, "issuer/default/sign/test-role") + // requireFailInMigration(t, b, s, logical.UpdateOperation, "issuer/default/sign-verbatim") + // requireFailInMigration(t, b, s, logical.UpdateOperation, "issuer/default/sign-self-issued") + + requireFailInMigration(t, b, s, logical.UpdateOperation, "root/replace") + requireFailInMigration(t, b, s, logical.UpdateOperation, "root/rotate/internal") + requireFailInMigration(t, b, s, logical.UpdateOperation, "intermediate/cross-sign") + + requireFailInMigration(t, b, s, logical.UpdateOperation, "config/issuers") + requireFailInMigration(t, b, s, logical.ReadOperation, "config/issuers") + + requireFailInMigration(t, b, s, logical.ListOperation, "keys") + requireFailInMigration(t, b, s, logical.UpdateOperation, "keys/generate/internal") + requireFailInMigration(t, b, s, logical.UpdateOperation, "keys/import") + requireFailInMigration(t, b, s, logical.ReadOperation, "key/default") + requireFailInMigration(t, b, s, logical.UpdateOperation, "config/keys") + requireFailInMigration(t, b, s, logical.ReadOperation, "config/keys") +} + +func TestBackupBundle(t *testing.T) { + t.Parallel() + ctx := context.Background() + b, s := CreateBackendWithStorage(t) + sc := b.makeStorageContext(ctx, s) + + // Reset the version the helper above set to 1. + b.pkiStorageVersion.Store(0) + require.True(t, b.useLegacyBundleCaStorage(), "pre migration we should have been told to use legacy storage.") + + // Create an empty request and tidy configuration for us. + req := &logical.Request{ + Storage: s, + MountPoint: "pki/", + } + cfg := &tidyConfig{ + BackupBundle: true, + IssuerSafetyBuffer: 120 * time.Second, + } + + // Migration should do nothing if we're on an empty mount. + err := b.doTidyMoveCABundle(ctx, req, b.Logger(), cfg) + require.NoError(t, err) + requireFileNotExists(t, sc, legacyCertBundlePath) + requireFileNotExists(t, sc, legacyCertBundleBackupPath) + issuerIds, err := sc.listIssuers() + require.NoError(t, err) + require.Empty(t, issuerIds) + keyIds, err := sc.listKeys() + require.NoError(t, err) + require.Empty(t, keyIds) + + // Create a legacy CA bundle and write it out. + bundle := genCertBundle(t, b, s) + json, err := logical.StorageEntryJSON(legacyCertBundlePath, bundle) + require.NoError(t, err) + err = s.Put(ctx, json) + require.NoError(t, err) + legacyContents := requireFileExists(t, sc, legacyCertBundlePath, nil) + + // Doing another tidy should maintain the status quo since we've + // still not done our migration. + err = b.doTidyMoveCABundle(ctx, req, b.Logger(), cfg) + require.NoError(t, err) + requireFileExists(t, sc, legacyCertBundlePath, legacyContents) + requireFileNotExists(t, sc, legacyCertBundleBackupPath) + issuerIds, err = sc.listIssuers() + require.NoError(t, err) + require.Empty(t, issuerIds) + keyIds, err = sc.listKeys() + require.NoError(t, err) + require.Empty(t, keyIds) + + // Do a migration; this should provision an issuer and key. + initReq := &logical.InitializationRequest{Storage: s} + err = b.initialize(ctx, initReq) + require.NoError(t, err) + requireFileExists(t, sc, legacyCertBundlePath, legacyContents) + issuerIds, err = sc.listIssuers() + require.NoError(t, err) + require.NotEmpty(t, issuerIds) + keyIds, err = sc.listKeys() + require.NoError(t, err) + require.NotEmpty(t, keyIds) + + // Doing another tidy should maintain the status quo since we've + // done our migration too recently relative to the safety buffer. + err = b.doTidyMoveCABundle(ctx, req, b.Logger(), cfg) + require.NoError(t, err) + requireFileExists(t, sc, legacyCertBundlePath, legacyContents) + requireFileNotExists(t, sc, legacyCertBundleBackupPath) + issuerIds, err = sc.listIssuers() + require.NoError(t, err) + require.NotEmpty(t, issuerIds) + keyIds, err = sc.listKeys() + require.NoError(t, err) + require.NotEmpty(t, keyIds) + + // Shortening our buffer should ensure the migration occurs, removing + // the legacy bundle but creating the backup one. + time.Sleep(2 * time.Second) + cfg.IssuerSafetyBuffer = 1 * time.Second + err = b.doTidyMoveCABundle(ctx, req, b.Logger(), cfg) + require.NoError(t, err) + requireFileNotExists(t, sc, legacyCertBundlePath) + requireFileExists(t, sc, legacyCertBundleBackupPath, legacyContents) + issuerIds, err = sc.listIssuers() + require.NoError(t, err) + require.NotEmpty(t, issuerIds) + keyIds, err = sc.listKeys() + require.NoError(t, err) + require.NotEmpty(t, keyIds) + + // A new initialization should do nothing. + err = b.initialize(ctx, initReq) + require.NoError(t, err) + requireFileNotExists(t, sc, legacyCertBundlePath) + requireFileExists(t, sc, legacyCertBundleBackupPath, legacyContents) + issuerIds, err = sc.listIssuers() + require.NoError(t, err) + require.NotEmpty(t, issuerIds) + require.Equal(t, len(issuerIds), 1) + keyIds, err = sc.listKeys() + require.NoError(t, err) + require.NotEmpty(t, keyIds) + require.Equal(t, len(keyIds), 1) + + // Restoring the legacy bundles with new issuers should redo the + // migration. + newBundle := genCertBundle(t, b, s) + json, err = logical.StorageEntryJSON(legacyCertBundlePath, newBundle) + require.NoError(t, err) + err = s.Put(ctx, json) + require.NoError(t, err) + newLegacyContents := requireFileExists(t, sc, legacyCertBundlePath, nil) + + // -> reinit + err = b.initialize(ctx, initReq) + require.NoError(t, err) + requireFileExists(t, sc, legacyCertBundlePath, newLegacyContents) + requireFileExists(t, sc, legacyCertBundleBackupPath, legacyContents) + issuerIds, err = sc.listIssuers() + require.NoError(t, err) + require.NotEmpty(t, issuerIds) + require.Equal(t, len(issuerIds), 2) + keyIds, err = sc.listKeys() + require.NoError(t, err) + require.NotEmpty(t, keyIds) + require.Equal(t, len(keyIds), 2) + + // -> when we tidy again, we'll overwrite the old backup with the new + // one. + time.Sleep(2 * time.Second) + err = b.doTidyMoveCABundle(ctx, req, b.Logger(), cfg) + require.NoError(t, err) + requireFileNotExists(t, sc, legacyCertBundlePath) + requireFileExists(t, sc, legacyCertBundleBackupPath, newLegacyContents) + issuerIds, err = sc.listIssuers() + require.NoError(t, err) + require.NotEmpty(t, issuerIds) + keyIds, err = sc.listKeys() + require.NoError(t, err) + require.NotEmpty(t, keyIds) + + // Finally, restoring the legacy bundle and re-migrating should redo + // the migration. + err = s.Put(ctx, json) + require.NoError(t, err) + requireFileExists(t, sc, legacyCertBundlePath, newLegacyContents) + requireFileExists(t, sc, legacyCertBundleBackupPath, newLegacyContents) + + // -> overwrite the version and re-migrate + logEntry, err := getLegacyBundleMigrationLog(ctx, s) + require.NoError(t, err) + logEntry.MigrationVersion = 0 + err = setLegacyBundleMigrationLog(ctx, s, logEntry) + require.NoError(t, err) + err = b.initialize(ctx, initReq) + require.NoError(t, err) + requireFileExists(t, sc, legacyCertBundlePath, newLegacyContents) + requireFileExists(t, sc, legacyCertBundleBackupPath, newLegacyContents) + issuerIds, err = sc.listIssuers() + require.NoError(t, err) + require.NotEmpty(t, issuerIds) + require.Equal(t, len(issuerIds), 2) + keyIds, err = sc.listKeys() + require.NoError(t, err) + require.NotEmpty(t, keyIds) + require.Equal(t, len(keyIds), 2) + + // -> Re-tidy should remove the legacy one. + time.Sleep(2 * time.Second) + err = b.doTidyMoveCABundle(ctx, req, b.Logger(), cfg) + require.NoError(t, err) + requireFileNotExists(t, sc, legacyCertBundlePath) + requireFileExists(t, sc, legacyCertBundleBackupPath, newLegacyContents) + issuerIds, err = sc.listIssuers() + require.NoError(t, err) + require.NotEmpty(t, issuerIds) + keyIds, err = sc.listKeys() + require.NoError(t, err) + require.NotEmpty(t, keyIds) +} + +func TestDeletedIssuersPostMigration(t *testing.T) { + // We want to simulate the following scenario: + // + // 1.10.x: -> Create a CA. + // 1.11.0: -> Migrate to new issuer layout but version 1. + // -> Delete existing issuers, create new ones. + // (now): -> Migrate to version 2 layout, make sure we don't see + // re-migration. + + t.Parallel() + ctx := context.Background() + b, s := CreateBackendWithStorage(t) + sc := b.makeStorageContext(ctx, s) + + // Reset the version the helper above set to 1. + b.pkiStorageVersion.Store(0) + require.True(t, b.useLegacyBundleCaStorage(), "pre migration we should have been told to use legacy storage.") + + // Create a legacy CA bundle and write it out. + bundle := genCertBundle(t, b, s) + json, err := logical.StorageEntryJSON(legacyCertBundlePath, bundle) + require.NoError(t, err) + err = s.Put(ctx, json) + require.NoError(t, err) + legacyContents := requireFileExists(t, sc, legacyCertBundlePath, nil) + + // Do a migration; this should provision an issuer and key. + initReq := &logical.InitializationRequest{Storage: s} + err = b.initialize(ctx, initReq) + require.NoError(t, err) + requireFileExists(t, sc, legacyCertBundlePath, legacyContents) + issuerIds, err := sc.listIssuers() + require.NoError(t, err) + require.NotEmpty(t, issuerIds) + keyIds, err := sc.listKeys() + require.NoError(t, err) + require.NotEmpty(t, keyIds) + + // Hack: reset the version to 1, to simulate a pre-version-2 migration + // log. + info, err := getMigrationInfo(sc.Context, sc.Storage) + require.NoError(t, err, "failed to read migration info") + info.migrationLog.MigrationVersion = 1 + err = setLegacyBundleMigrationLog(sc.Context, sc.Storage, info.migrationLog) + require.NoError(t, err, "failed to write migration info") + + // Now delete all issuers and keys and create some new ones. + for _, issuerId := range issuerIds { + deleted, err := sc.deleteIssuer(issuerId) + require.True(t, deleted, "expected it to be deleted") + require.NoError(t, err, "error removing issuer") + } + for _, keyId := range keyIds { + deleted, err := sc.deleteKey(keyId) + require.True(t, deleted, "expected it to be deleted") + require.NoError(t, err, "error removing key") + } + emptyIssuers, err := sc.listIssuers() + require.NoError(t, err) + require.Empty(t, emptyIssuers) + emptyKeys, err := sc.listKeys() + require.NoError(t, err) + require.Empty(t, emptyKeys) + + // Create a new issuer + key. + bundle = genCertBundle(t, b, s) + _, _, err = sc.writeCaBundle(bundle, "", "") + require.NoError(t, err) + + // List which issuers + keys we currently have. + postDeletionIssuers, err := sc.listIssuers() + require.NoError(t, err) + require.NotEmpty(t, postDeletionIssuers) + postDeletionKeys, err := sc.listKeys() + require.NoError(t, err) + require.NotEmpty(t, postDeletionKeys) + + // Now do another migration from 1->2. This should retain the newly + // created issuers+keys, but not revive any deleted ones. + err = b.initialize(ctx, initReq) + require.NoError(t, err) + requireFileExists(t, sc, legacyCertBundlePath, legacyContents) + postMigrationIssuers, err := sc.listIssuers() + require.NoError(t, err) + require.NotEmpty(t, postMigrationIssuers) + require.Equal(t, postMigrationIssuers, postDeletionIssuers, "regression failed: expected second migration from v1->v2 to not introduce new issuers") + postMigrationKeys, err := sc.listKeys() + require.NoError(t, err) + require.NotEmpty(t, postMigrationKeys) + require.Equal(t, postMigrationKeys, postDeletionKeys, "regression failed: expected second migration from v1->v2 to not introduce new keys") +} + +// requireFailInMigration validate that we fail the operation with the appropriate error message to the end-user +func requireFailInMigration(t *testing.T, b *backend, s logical.Storage, operation logical.Operation, path string) { + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: operation, + Path: path, + Storage: s, + MountPoint: "pki/", + }) + require.NoError(t, err, "error from op:%s path:%s", operation, path) + require.NotNil(t, resp, "got nil response from op:%s path:%s", operation, path) + require.True(t, resp.IsError(), "error flag was not set from op:%s path:%s resp: %#v", operation, path, resp) + require.Contains(t, resp.Error().Error(), "migration has completed", + "error message did not contain migration test for op:%s path:%s resp: %#v", operation, path, resp) +} + +func requireFileNotExists(t *testing.T, sc *storageContext, path string) { + t.Helper() + + entry, err := sc.Storage.Get(sc.Context, path) + require.NoError(t, err) + if entry != nil { + require.Empty(t, entry.Value) + } else { + require.Empty(t, entry) + } +} + +func requireFileExists(t *testing.T, sc *storageContext, path string, contents []byte) []byte { + t.Helper() + + entry, err := sc.Storage.Get(sc.Context, path) + require.NoError(t, err) + require.NotNil(t, entry) + require.NotEmpty(t, entry.Value) + if contents != nil { + require.Equal(t, entry.Value, contents) + } + return entry.Value +} + +// Keys to simulate an intermediate CA mount with also-imported root (parent). +const ( + migIntPrivKey = `-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAqu88Jcct/EyT8gDF+jdWuAwFplvanQ7KXAO5at58G6Y39UUz +fwnMS3P3VRBUoV5BDX+13wI2ldskbTKITsl6IXBPXUz0sKrdEKzXRVY4D6P2JR7W +YO1IUytfTgR+3F4sotFNQB++3ivT66AYLW7lOkoa+5lxsPM/oJ82DOlD2uGtDVTU +gQy1zugMBgPDlj+8tB562J9MTIdcKe9JpYrN0eO+aHzhbfvaSpScU4aZBgkS0kDv +8G4FxVfrBSDeD/JjCWaC48rLdgei1YrY0NFuw/8p/nPfA9vf2AtHMsWZRSwukQfq +I5HhQu+0OHQy3NWqXaBPzJNu3HnpKLykPHW7sQIDAQABAoIBAHNJy/2G66MhWx98 +Ggt7S4fyw9TCWx5XHXEWKfbEfFyBrXhF5kemqh2x5319+DamRaX/HwF8kqhcF6N2 +06ygAzmOcFjzUI3fkB5xFPh1AHa8FYZP2DOjloZR2IPcUFv9QInINRwszSU31kUz +w1rRUtYPqUdM5Pt99Mo219O5eMSlGtPKXm09uDAR8ZPuUx4jwGw90pSgeRB1Bg7X +Dt3YXx3X+OOs3Hbir1VDLSqCuy825l6Kn79h3eB8LAi+FUwCBvnTqyOEWyH2XjgP +z+tbz7lwnhGeKtxUl6Jb3m3SHtXpylot/4fwPisRV/9vaEDhVjKTmySH1WM+TRNR +CQLCJekCgYEA3b67DBhAYsFFdUd/4xh4QhHBanOcepV1CwaRln+UUjw1618ZEsTS +DKb9IS72C+ukUusGhQqxjFJlhOdXeMXpEbnEUY3PlREevWwm3bVAxtoAVRcmkQyK +PM4Oj9ODi2z8Cds0NvEXdX69uVutcbvm/JRZr/dsERWcLsfwdV/QqYcCgYEAxVce +d4ylsqORLm0/gcLnEyB9zhEPwmiJe1Yj5sH7LhGZ6JtLCqbOJO4jXmIzCrkbGyuf +BA/U7klc6jSprkBMgYhgOIuaULuFJvtKzJUzoATGFqX4r8WJm2ZycXgooAwZq6SZ +ySXOuQe9V7hlpI0fJfNhw+/HIjivL1jrnjBoXwcCgYEAtTv6LLx1g0Frv5scj0Ok +pntUlei/8ADPlJ9dxp+nXj8P4rvrBkgPVX/2S3TSbJO/znWA8qP20TVW+/UIrRE0 +mOQ37F/3VWKUuUT3zyUhOGVc+C7fupWBNolDpZG+ZepBZNzgJDeQcNuRvTmM3PQy +qiWl2AhlLuF2sVWA1q3lIWkCgYEAnuHWgNA3dE1nDWceE351hxvIzklEU/TQhAHF +o/uYHO5E6VdmoqvMG0W0KkCL8d046rZDMAUDHdrpOROvbcENF9lSBxS26LshqFH4 +ViDmULanOgLk57f2Y6ynBZ6Frt4vKNe8jYuoFacale67vzFz251JoHSD8pSKz2cb +ROCal68CgYA51hKqvki4r5rmS7W/Yvc3x3Wc0wpDEHTgLMoH+EV7AffJ8dy0/+po +AHK0nnRU63++1JmhQczBR0yTI6PUyeegEBk/d5CgFlY7UJQMTFPsMsiuM0Xw5nAv +KMPykK01D28UAkUxhwF7CqFrwwEv9GislgjewbdF5Za176+EuMEwIw== +-----END RSA PRIVATE KEY----- +` + migIntCA = `-----BEGIN CERTIFICATE----- +MIIDHTCCAgWgAwIBAgIUfxlNBmrI7jsgH2Sdle1nVTqn5YQwDQYJKoZIhvcNAQEL +BQAwEjEQMA4GA1UEAxMHUm9vdCBYMTAeFw0yMjExMDIxMjI2MjhaFw0yMjEyMDQx +MjI2NThaMBoxGDAWBgNVBAMTD0ludGVybWVkaWF0ZSBSMTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAKrvPCXHLfxMk/IAxfo3VrgMBaZb2p0OylwDuWre +fBumN/VFM38JzEtz91UQVKFeQQ1/td8CNpXbJG0yiE7JeiFwT11M9LCq3RCs10VW +OA+j9iUe1mDtSFMrX04EftxeLKLRTUAfvt4r0+ugGC1u5TpKGvuZcbDzP6CfNgzp +Q9rhrQ1U1IEMtc7oDAYDw5Y/vLQeetifTEyHXCnvSaWKzdHjvmh84W372kqUnFOG +mQYJEtJA7/BuBcVX6wUg3g/yYwlmguPKy3YHotWK2NDRbsP/Kf5z3wPb39gLRzLF +mUUsLpEH6iOR4ULvtDh0MtzVql2gT8yTbtx56Si8pDx1u7ECAwEAAaNjMGEwDgYD +VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFusWj3piAiY +CR7tszR6uNYSMLe2MB8GA1UdIwQYMBaAFMNRNkLozstIhNhXCefi+WnaQApbMA0G +CSqGSIb3DQEBCwUAA4IBAQCmH852E/pDGBhf2VI1JAPZy9VYaRkKoqn4+5R1Gnoq +b90zhdCGueIm/usC1wAa0OOn7+xdQXFNfeI8UUB9w10q0QnM/A/G2v8UkdlLPPQP +zPjIYLalOOIOHf8hU2O5lwj0IA4JwjwDQ4xj69eX/N+x2LEI7SHyVVUZWAx0Y67a +QdyubpIJZlW/PI7kMwGyTx3tdkZxk1nTNtf/0nKvNuXKKcVzBCEMfvXyx4LFEM+U +nc2vdWN7PAoXcjUbxD3ZNGinr7mSBpQg82+nur/8yuSwu6iHomnfGxjUsEHic2GC +ja9siTbR+ONvVb4xUjugN/XmMSSaZnxig2vM9xcV8OMG +-----END CERTIFICATE----- +` + migRootCA = `-----BEGIN CERTIFICATE----- +MIIDFTCCAf2gAwIBAgIURDTnXp8u78jWMe770Jj6Ac1paxkwDQYJKoZIhvcNAQEL +BQAwEjEQMA4GA1UEAxMHUm9vdCBYMTAeFw0yMjExMDIxMjI0NTVaFw0yMjEyMDQx +MjI1MjRaMBIxEDAOBgNVBAMTB1Jvb3QgWDEwggEiMA0GCSqGSIb3DQEBAQUAA4IB +DwAwggEKAoIBAQC/+dh/o1qKTOua/OkHRMIvHiyBxjjoqrLqFSBYhjYKs+alA0qS +lLVzNqIKU8jm3fT73orx7yk/6acWaEYv/6owMaUn51xwS3gQhTHdFR/fLJwXnu2O +PZNqAs6tjAM3Q08aqR0qfxnjDvcgO7TOWSyOvVT2cTRK+uKYzxJEY52BDMUbp+iC +WJdXca9UwKRzi2wFqGliDycYsBBt/tr8tHSbTSZ5Qx6UpFrKpjZn+sT5KhKUlsdd +BYFmRegc0wXq4/kRjum0oEUigUMlHADIEhRasyXPEKa19sGP8nAZfo/hNOusGhj7 +z7UPA0Cbe2uclpYPxsKgvcqQmgKugqKLL305AgMBAAGjYzBhMA4GA1UdDwEB/wQE +AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTDUTZC6M7LSITYVwnn4vlp +2kAKWzAfBgNVHSMEGDAWgBTDUTZC6M7LSITYVwnn4vlp2kAKWzANBgkqhkiG9w0B +AQsFAAOCAQEAu7qdM1Li6V6iDCPpLg5zZReRtcxhUdwb5Xn4sDa8GJCy35f1voew +n0TQgM3Uph5x/djCR/Sj91MyAJ1/Q1PQQTyKGyUjSHvkcOBg628IAnLthn8Ua1fL +oQC/F/mlT1Yv+/W8eNPtD453/P0z8E0xMT5K3kpEDW/6K9RdHZlDJMW/z3UJ+4LN +6ONjIBmgffmLz9sVMpgCFyL7+w3W01bGP7w5AfKj2duoVG/Ekf2yUwmm6r9NgTQ1 +oke0ShbZuMocwO8anq7k0R42FoluH3ipv9Qzzhsy+KdK5/fW5oqy1tKFaZsc67Q6 +0UmD9DiDpCtn2Wod3nwxn0zW5HvDAWuDwg== +-----END CERTIFICATE----- +` +) diff --git a/builtin/logical/pki/storage_test.go b/builtin/logical/pki/storage_test.go new file mode 100644 index 0000000..625c046 --- /dev/null +++ b/builtin/logical/pki/storage_test.go @@ -0,0 +1,279 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "context" + "strings" + "testing" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/require" +) + +var ctx = context.Background() + +func Test_ConfigsRoundTrip(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + sc := b.makeStorageContext(ctx, s) + + // Create an empty key, issuer for testing. + key := keyEntry{ID: genKeyId()} + err := sc.writeKey(key) + require.NoError(t, err) + issuer := &issuerEntry{ID: genIssuerId()} + err = sc.writeIssuer(issuer) + require.NoError(t, err) + + // Verify we handle nothing stored properly + keyConfigEmpty, err := sc.getKeysConfig() + require.NoError(t, err) + require.Equal(t, &keyConfigEntry{}, keyConfigEmpty) + + issuerConfigEmpty, err := sc.getIssuersConfig() + require.NoError(t, err) + require.Equal(t, &issuerConfigEntry{}, issuerConfigEmpty) + + // Now attempt to store and reload properly + origKeyConfig := &keyConfigEntry{ + DefaultKeyId: key.ID, + } + origIssuerConfig := &issuerConfigEntry{ + DefaultIssuerId: issuer.ID, + } + + err = sc.setKeysConfig(origKeyConfig) + require.NoError(t, err) + err = sc.setIssuersConfig(origIssuerConfig) + require.NoError(t, err) + + keyConfig, err := sc.getKeysConfig() + require.NoError(t, err) + require.Equal(t, origKeyConfig, keyConfig) + + issuerConfig, err := sc.getIssuersConfig() + require.NoError(t, err) + require.Equal(t, origIssuerConfig.DefaultIssuerId, issuerConfig.DefaultIssuerId) +} + +func Test_IssuerRoundTrip(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + sc := b.makeStorageContext(ctx, s) + issuer1, key1 := genIssuerAndKey(t, b, s) + issuer2, key2 := genIssuerAndKey(t, b, s) + + // We get an error when issuer id not found + _, err := sc.fetchIssuerById(issuer1.ID) + require.Error(t, err) + + // We get an error when key id not found + _, err = sc.fetchKeyById(key1.ID) + require.Error(t, err) + + // Now write out our issuers and keys + err = sc.writeKey(key1) + require.NoError(t, err) + err = sc.writeIssuer(&issuer1) + require.NoError(t, err) + + err = sc.writeKey(key2) + require.NoError(t, err) + err = sc.writeIssuer(&issuer2) + require.NoError(t, err) + + fetchedKey1, err := sc.fetchKeyById(key1.ID) + require.NoError(t, err) + + fetchedIssuer1, err := sc.fetchIssuerById(issuer1.ID) + require.NoError(t, err) + + require.Equal(t, &key1, fetchedKey1) + require.Equal(t, &issuer1, fetchedIssuer1) + + keys, err := sc.listKeys() + require.NoError(t, err) + + require.ElementsMatch(t, []keyID{key1.ID, key2.ID}, keys) + + issuers, err := sc.listIssuers() + require.NoError(t, err) + + require.ElementsMatch(t, []issuerID{issuer1.ID, issuer2.ID}, issuers) +} + +func Test_KeysIssuerImport(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + sc := b.makeStorageContext(ctx, s) + + issuer1, key1 := genIssuerAndKey(t, b, s) + issuer2, key2 := genIssuerAndKey(t, b, s) + + // Key 1 before Issuer 1; Issuer 2 before Key 2. + // Remove KeyIDs from non-written entities before beginning. + key1.ID = "" + issuer1.ID = "" + issuer1.KeyID = "" + + key1Ref1, existing, err := sc.importKey(key1.PrivateKey, "key1", key1.PrivateKeyType) + require.NoError(t, err) + require.False(t, existing) + require.Equal(t, strings.TrimSpace(key1.PrivateKey), strings.TrimSpace(key1Ref1.PrivateKey)) + + // Make sure if we attempt to re-import the same private key, no import/updates occur. + // So the existing flag should be set to true, and we do not update the existing Name field. + key1Ref2, existing, err := sc.importKey(key1.PrivateKey, "ignore-me", key1.PrivateKeyType) + require.NoError(t, err) + require.True(t, existing) + require.Equal(t, key1.PrivateKey, key1Ref1.PrivateKey) + require.Equal(t, key1Ref1.ID, key1Ref2.ID) + require.Equal(t, key1Ref1.Name, key1Ref2.Name) + + issuer1Ref1, existing, err := sc.importIssuer(issuer1.Certificate, "issuer1") + require.NoError(t, err) + require.False(t, existing) + require.Equal(t, strings.TrimSpace(issuer1.Certificate), strings.TrimSpace(issuer1Ref1.Certificate)) + require.Equal(t, key1Ref1.ID, issuer1Ref1.KeyID) + require.Equal(t, "issuer1", issuer1Ref1.Name) + + // Make sure if we attempt to re-import the same issuer, no import/updates occur. + // So the existing flag should be set to true, and we do not update the existing Name field. + issuer1Ref2, existing, err := sc.importIssuer(issuer1.Certificate, "ignore-me") + require.NoError(t, err) + require.True(t, existing) + require.Equal(t, strings.TrimSpace(issuer1.Certificate), strings.TrimSpace(issuer1Ref1.Certificate)) + require.Equal(t, issuer1Ref1.ID, issuer1Ref2.ID) + require.Equal(t, key1Ref1.ID, issuer1Ref2.KeyID) + require.Equal(t, issuer1Ref1.Name, issuer1Ref2.Name) + + err = sc.writeIssuer(&issuer2) + require.NoError(t, err) + + err = sc.writeKey(key2) + require.NoError(t, err) + + // Same double import tests as above, but make sure if the previous was created through writeIssuer not importIssuer. + issuer2Ref, existing, err := sc.importIssuer(issuer2.Certificate, "ignore-me") + require.NoError(t, err) + require.True(t, existing) + require.Equal(t, strings.TrimSpace(issuer2.Certificate), strings.TrimSpace(issuer2Ref.Certificate)) + require.Equal(t, issuer2.ID, issuer2Ref.ID) + require.Equal(t, "", issuer2Ref.Name) + require.Equal(t, issuer2.KeyID, issuer2Ref.KeyID) + + // Same double import tests as above, but make sure if the previous was created through writeKey not importKey. + key2Ref, existing, err := sc.importKey(key2.PrivateKey, "ignore-me", key2.PrivateKeyType) + require.NoError(t, err) + require.True(t, existing) + require.Equal(t, key2.PrivateKey, key2Ref.PrivateKey) + require.Equal(t, key2.ID, key2Ref.ID) + require.Equal(t, "", key2Ref.Name) +} + +func Test_IssuerUpgrade(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + sc := b.makeStorageContext(ctx, s) + + // Make sure that we add OCSP signing to v0 issuers if CRLSigning is enabled + issuer, _ := genIssuerAndKey(t, b, s) + issuer.Version = 0 + issuer.Usage.ToggleUsage(OCSPSigningUsage) + + err := sc.writeIssuer(&issuer) + require.NoError(t, err, "failed writing out issuer") + + newIssuer, err := sc.fetchIssuerById(issuer.ID) + require.NoError(t, err, "failed fetching issuer") + + require.Equal(t, uint(1), newIssuer.Version) + require.True(t, newIssuer.Usage.HasUsage(OCSPSigningUsage)) + + // If CRLSigning is not present on a v0, we should not have OCSP signing after upgrade. + issuer, _ = genIssuerAndKey(t, b, s) + issuer.Version = 0 + issuer.Usage.ToggleUsage(OCSPSigningUsage) + issuer.Usage.ToggleUsage(CRLSigningUsage) + + err = sc.writeIssuer(&issuer) + require.NoError(t, err, "failed writing out issuer") + + newIssuer, err = sc.fetchIssuerById(issuer.ID) + require.NoError(t, err, "failed fetching issuer") + + require.Equal(t, uint(1), newIssuer.Version) + require.False(t, newIssuer.Usage.HasUsage(OCSPSigningUsage)) +} + +func genIssuerAndKey(t *testing.T, b *backend, s logical.Storage) (issuerEntry, keyEntry) { + certBundle := genCertBundle(t, b, s) + + keyId := genKeyId() + + pkiKey := keyEntry{ + ID: keyId, + PrivateKeyType: certBundle.PrivateKeyType, + PrivateKey: strings.TrimSpace(certBundle.PrivateKey) + "\n", + } + + issuerId := genIssuerId() + + pkiIssuer := issuerEntry{ + ID: issuerId, + KeyID: keyId, + Certificate: strings.TrimSpace(certBundle.Certificate) + "\n", + CAChain: certBundle.CAChain, + SerialNumber: certBundle.SerialNumber, + Usage: AllIssuerUsages, + Version: latestIssuerVersion, + } + + return pkiIssuer, pkiKey +} + +func genCertBundle(t *testing.T, b *backend, s logical.Storage) *certutil.CertBundle { + // Pretty gross just to generate a cert bundle, but + fields := addCACommonFields(map[string]*framework.FieldSchema{}) + fields = addCAKeyGenerationFields(fields) + fields = addCAIssueFields(fields) + apiData := &framework.FieldData{ + Schema: fields, + Raw: map[string]interface{}{ + "exported": "internal", + "cn": "example.com", + "ttl": 3600, + }, + } + sc := b.makeStorageContext(ctx, s) + _, _, role, respErr := getGenerationParams(sc, apiData) + require.Nil(t, respErr) + + input := &inputBundle{ + req: &logical.Request{ + Operation: logical.UpdateOperation, + Path: "issue/testrole", + Storage: s, + }, + apiData: apiData, + role: role, + } + parsedCertBundle, _, err := generateCert(sc, input, nil, true, b.GetRandomReader()) + + require.NoError(t, err) + certBundle, err := parsedCertBundle.ToCertBundle() + require.NoError(t, err) + return certBundle +} + +func writeLegacyBundle(t *testing.T, b *backend, s logical.Storage, bundle *certutil.CertBundle) { + entry, err := logical.StorageEntryJSON(legacyCertBundlePath, bundle) + require.NoError(t, err) + + err = s.Put(context.Background(), entry) + require.NoError(t, err) +} diff --git a/builtin/logical/pki/storage_unified.go b/builtin/logical/pki/storage_unified.go new file mode 100644 index 0000000..28c656b --- /dev/null +++ b/builtin/logical/pki/storage_unified.go @@ -0,0 +1,91 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "fmt" + "strings" + "time" + + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + unifiedRevocationReadPathPrefix = "unified-revocation/" + unifiedRevocationWritePathPrefix = unifiedRevocationReadPathPrefix + "{{clusterId}}/" +) + +type unifiedRevocationEntry struct { + SerialNumber string `json:"-"` + CertExpiration time.Time `json:"certificate_expiration_utc"` + RevocationTimeUTC time.Time `json:"revocation_time_utc"` + CertificateIssuer issuerID `json:"issuer_id"` +} + +func getUnifiedRevocationBySerial(sc *storageContext, serial string) (*unifiedRevocationEntry, error) { + clusterPaths, err := lookupUnifiedClusterPaths(sc) + if err != nil { + return nil, err + } + + for _, path := range clusterPaths { + serialPath := path + serial + entryRaw, err := sc.Storage.Get(sc.Context, serialPath) + if err != nil { + return nil, err + } + + if entryRaw != nil { + var revEntry unifiedRevocationEntry + if err := entryRaw.DecodeJSON(&revEntry); err != nil { + return nil, fmt.Errorf("failed json decoding of unified entry at path %s: %w", serialPath, err) + } + revEntry.SerialNumber = serial + return &revEntry, nil + } + } + + return nil, nil +} + +func writeUnifiedRevocationEntry(sc *storageContext, ure *unifiedRevocationEntry) error { + json, err := logical.StorageEntryJSON(unifiedRevocationWritePathPrefix+normalizeSerial(ure.SerialNumber), ure) + if err != nil { + return err + } + + return sc.Storage.Put(sc.Context, json) +} + +// listClusterSpecificUnifiedRevokedCerts returns a list of revoked certificates from a given cluster +func listClusterSpecificUnifiedRevokedCerts(sc *storageContext, clusterId string) ([]string, error) { + path := unifiedRevocationReadPathPrefix + clusterId + "/" + serials, err := sc.Storage.List(sc.Context, path) + if err != nil { + return nil, err + } + + return serials, nil +} + +// lookupUnifiedClusterPaths returns a map of cluster id to the prefix storage path for that given cluster's +// unified revoked certificates +func lookupUnifiedClusterPaths(sc *storageContext) (map[string]string, error) { + fullPaths := map[string]string{} + + clusterPaths, err := sc.Storage.List(sc.Context, unifiedRevocationReadPathPrefix) + if err != nil { + return nil, err + } + + for _, clusterIdWithSlash := range clusterPaths { + // Only include folder listings, if a file were to be stored under this path ignore it. + if strings.HasSuffix(clusterIdWithSlash, "/") { + clusterId := clusterIdWithSlash[:len(clusterIdWithSlash)-1] // remove trailing / + fullPaths[clusterId] = unifiedRevocationReadPathPrefix + clusterIdWithSlash + } + } + + return fullPaths, nil +} diff --git a/builtin/logical/pki/test_helpers.go b/builtin/logical/pki/test_helpers.go new file mode 100644 index 0000000..b2286e7 --- /dev/null +++ b/builtin/logical/pki/test_helpers.go @@ -0,0 +1,438 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "context" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/pem" + "fmt" + "io" + "math" + "math/big" + http2 "net/http" + "strings" + "testing" + "time" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/require" + "golang.org/x/crypto/ocsp" +) + +// Setup helpers +func CreateBackendWithStorage(t testing.TB) (*backend, logical.Storage) { + t.Helper() + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + + var err error + b := Backend(config) + err = b.Setup(context.Background(), config) + if err != nil { + t.Fatal(err) + } + // Assume for our tests we have performed the migration already. + b.pkiStorageVersion.Store(1) + return b, config.StorageView +} + +func mountPKIEndpoint(t testing.TB, client *api.Client, path string) { + t.Helper() + + err := client.Sys().Mount(path, &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "32h", + }, + }) + require.NoError(t, err, "failed mounting pki endpoint") +} + +// Signing helpers +func requireSignedBy(t *testing.T, cert *x509.Certificate, signingCert *x509.Certificate) { + t.Helper() + + if err := cert.CheckSignatureFrom(signingCert); err != nil { + t.Fatalf("signature verification failed: %v", err) + } +} + +func requireSignedByAtPath(t *testing.T, client *api.Client, leaf *x509.Certificate, path string) { + t.Helper() + + resp, err := client.Logical().Read(path) + require.NoError(t, err, "got unexpected error fetching parent certificate") + require.NotNil(t, resp, "missing response when fetching parent certificate") + require.NotNil(t, resp.Data, "missing data from parent certificate response") + require.NotNil(t, resp.Data["certificate"], "missing certificate field on parent read response") + + parentCert := resp.Data["certificate"].(string) + parent := parseCert(t, parentCert) + + requireSignedBy(t, leaf, parent) +} + +// Certificate helper +func parseCert(t *testing.T, pemCert string) *x509.Certificate { + t.Helper() + + block, _ := pem.Decode([]byte(pemCert)) + require.NotNil(t, block, "failed to decode PEM block") + + cert, err := x509.ParseCertificate(block.Bytes) + require.NoError(t, err) + return cert +} + +func requireMatchingPublicKeys(t *testing.T, cert *x509.Certificate, key crypto.PublicKey) { + t.Helper() + + certPubKey := cert.PublicKey + areEqual, err := certutil.ComparePublicKeysAndType(certPubKey, key) + require.NoError(t, err, "failed comparing public keys: %#v", err) + require.True(t, areEqual, "public keys mismatched: got: %v, expected: %v", certPubKey, key) +} + +func getSelfSigned(t *testing.T, subject, issuer *x509.Certificate, key *rsa.PrivateKey) (string, *x509.Certificate) { + t.Helper() + selfSigned, err := x509.CreateCertificate(rand.Reader, subject, issuer, key.Public(), key) + if err != nil { + t.Fatal(err) + } + cert, err := x509.ParseCertificate(selfSigned) + if err != nil { + t.Fatal(err) + } + pemSS := strings.TrimSpace(string(pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: selfSigned, + }))) + return pemSS, cert +} + +// CRL related helpers +func getCrlCertificateList(t *testing.T, client *api.Client, mountPoint string) pkix.TBSCertificateList { + t.Helper() + + path := fmt.Sprintf("/v1/%s/crl", mountPoint) + return getParsedCrlAtPath(t, client, path).TBSCertList +} + +func parseCrlPemBytes(t *testing.T, crlPem []byte) pkix.TBSCertificateList { + t.Helper() + + certList, err := x509.ParseCRL(crlPem) + require.NoError(t, err) + return certList.TBSCertList +} + +func requireSerialNumberInCRL(t *testing.T, revokeList pkix.TBSCertificateList, serialNum string) bool { + if t != nil { + t.Helper() + } + + serialsInList := make([]string, 0, len(revokeList.RevokedCertificates)) + for _, revokeEntry := range revokeList.RevokedCertificates { + formattedSerial := certutil.GetHexFormatted(revokeEntry.SerialNumber.Bytes(), ":") + serialsInList = append(serialsInList, formattedSerial) + if formattedSerial == serialNum { + return true + } + } + + if t != nil { + t.Fatalf("the serial number %s, was not found in the CRL list containing: %v", serialNum, serialsInList) + } + + return false +} + +func getParsedCrl(t *testing.T, client *api.Client, mountPoint string) *pkix.CertificateList { + t.Helper() + + path := fmt.Sprintf("/v1/%s/crl", mountPoint) + return getParsedCrlAtPath(t, client, path) +} + +func getParsedCrlAtPath(t *testing.T, client *api.Client, path string) *pkix.CertificateList { + t.Helper() + + req := client.NewRequest("GET", path) + resp, err := client.RawRequest(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + + crlBytes, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatalf("err: %s", err) + } + if len(crlBytes) == 0 { + t.Fatalf("expected CRL in response body") + } + + crl, err := x509.ParseDERCRL(crlBytes) + if err != nil { + t.Fatal(err) + } + return crl +} + +func getParsedCrlFromBackend(t *testing.T, b *backend, s logical.Storage, path string) *pkix.CertificateList { + t.Helper() + + resp, err := CBRead(b, s, path) + if err != nil { + t.Fatal(err) + } + + crl, err := x509.ParseDERCRL(resp.Data[logical.HTTPRawBody].([]byte)) + if err != nil { + t.Fatal(err) + } + return crl +} + +// Direct storage backend helpers (b, s := createBackendWithStorage(t)) which +// are mostly compatible with client.Logical() operations. The main difference +// is that the JSON round-tripping hasn't occurred, so values are as the +// backend returns them (e.g., []string instead of []interface{}). +func CBReq(b *backend, s logical.Storage, operation logical.Operation, path string, data map[string]interface{}) (*logical.Response, error) { + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: operation, + Path: path, + Data: data, + Storage: s, + MountPoint: "pki/", + }) + if err != nil || resp == nil { + return resp, err + } + + if msg, ok := resp.Data["error"]; ok && msg != nil && len(msg.(string)) > 0 { + return resp, fmt.Errorf("%s", msg) + } + + return resp, nil +} + +func CBHeader(b *backend, s logical.Storage, path string) (*logical.Response, error) { + return CBReq(b, s, logical.HeaderOperation, path, make(map[string]interface{})) +} + +func CBRead(b *backend, s logical.Storage, path string) (*logical.Response, error) { + return CBReq(b, s, logical.ReadOperation, path, make(map[string]interface{})) +} + +func CBWrite(b *backend, s logical.Storage, path string, data map[string]interface{}) (*logical.Response, error) { + return CBReq(b, s, logical.UpdateOperation, path, data) +} + +func CBPatch(b *backend, s logical.Storage, path string, data map[string]interface{}) (*logical.Response, error) { + return CBReq(b, s, logical.PatchOperation, path, data) +} + +func CBList(b *backend, s logical.Storage, path string) (*logical.Response, error) { + return CBReq(b, s, logical.ListOperation, path, make(map[string]interface{})) +} + +func CBDelete(b *backend, s logical.Storage, path string) (*logical.Response, error) { + return CBReq(b, s, logical.DeleteOperation, path, make(map[string]interface{})) +} + +func requireFieldsSetInResp(t *testing.T, resp *logical.Response, fields ...string) { + t.Helper() + + var missingFields []string + for _, field := range fields { + value, ok := resp.Data[field] + if !ok || value == nil { + missingFields = append(missingFields, field) + } + } + + require.Empty(t, missingFields, "The following fields were required but missing from response:\n%v", resp.Data) +} + +func requireSuccessNonNilResponse(t *testing.T, resp *logical.Response, err error, msgAndArgs ...interface{}) { + t.Helper() + + require.NoError(t, err, msgAndArgs...) + if resp.IsError() { + errContext := fmt.Sprintf("Expected successful response but got error: %v", resp.Error()) + require.Falsef(t, resp.IsError(), errContext, msgAndArgs...) + } + require.NotNil(t, resp, msgAndArgs...) +} + +func requireSuccessNilResponse(t *testing.T, resp *logical.Response, err error, msgAndArgs ...interface{}) { + t.Helper() + + require.NoError(t, err, msgAndArgs...) + if resp.IsError() { + errContext := fmt.Sprintf("Expected successful response but got error: %v", resp.Error()) + require.Falsef(t, resp.IsError(), errContext, msgAndArgs...) + } + if resp != nil { + msg := fmt.Sprintf("expected nil response but got: %v", resp) + require.Nilf(t, resp, msg, msgAndArgs...) + } +} + +func getCRLNumber(t *testing.T, crl pkix.TBSCertificateList) int { + t.Helper() + + for _, extension := range crl.Extensions { + if extension.Id.Equal(certutil.CRLNumberOID) { + bigInt := new(big.Int) + leftOver, err := asn1.Unmarshal(extension.Value, &bigInt) + require.NoError(t, err, "Failed unmarshalling crl number extension") + require.Empty(t, leftOver, "leftover bytes from unmarshalling crl number extension") + require.True(t, bigInt.IsInt64(), "parsed crl number integer is not an int64") + require.False(t, math.MaxInt <= bigInt.Int64(), "parsed crl number integer can not fit in an int") + return int(bigInt.Int64()) + } + } + + t.Fatalf("failed to find crl number extension") + return 0 +} + +func getCrlReferenceFromDelta(t *testing.T, crl pkix.TBSCertificateList) int { + t.Helper() + + for _, extension := range crl.Extensions { + if extension.Id.Equal(certutil.DeltaCRLIndicatorOID) { + bigInt := new(big.Int) + leftOver, err := asn1.Unmarshal(extension.Value, &bigInt) + require.NoError(t, err, "Failed unmarshalling delta crl indicator extension") + require.Empty(t, leftOver, "leftover bytes from unmarshalling delta crl indicator extension") + require.True(t, bigInt.IsInt64(), "parsed delta crl integer is not an int64") + require.False(t, math.MaxInt <= bigInt.Int64(), "parsed delta crl integer can not fit in an int") + return int(bigInt.Int64()) + } + } + + t.Fatalf("failed to find delta crl indicator extension") + return 0 +} + +// waitForUpdatedCrl will wait until the CRL at the provided path has been reloaded +// up for a maxWait duration and gives up if the timeout has been reached. If a negative +// value for lastSeenCRLNumber is provided, the method will load the current CRL and wait +// for a newer CRL be generated. +func waitForUpdatedCrl(t *testing.T, client *api.Client, crlPath string, lastSeenCRLNumber int, maxWait time.Duration) pkix.TBSCertificateList { + t.Helper() + + newCrl, didTimeOut := waitForUpdatedCrlUntil(t, client, crlPath, lastSeenCRLNumber, maxWait) + if didTimeOut { + t.Fatalf("Timed out waiting for new CRL rebuild on path %s", crlPath) + } + return newCrl.TBSCertList +} + +// waitForUpdatedCrlUntil is a helper method that will wait for a CRL to be updated up until maxWait duration +// or give up and return the last CRL it loaded. It will not fail, if it does not see a new CRL within the +// max duration unlike waitForUpdatedCrl. Returns the last loaded CRL at the provided path and a boolean +// indicating if we hit maxWait duration or not. +func waitForUpdatedCrlUntil(t *testing.T, client *api.Client, crlPath string, lastSeenCrlNumber int, maxWait time.Duration) (*pkix.CertificateList, bool) { + t.Helper() + + crl := getParsedCrlAtPath(t, client, crlPath) + initialCrlRevision := getCRLNumber(t, crl.TBSCertList) + newCrlRevision := initialCrlRevision + + // Short circuit the fetches if we have a version of the CRL we want + if lastSeenCrlNumber > 0 && getCRLNumber(t, crl.TBSCertList) > lastSeenCrlNumber { + return crl, false + } + + start := time.Now() + iteration := 0 + for { + iteration++ + + if time.Since(start) > maxWait { + t.Logf("Timed out waiting for new CRL on path %s after iteration %d, delay: %v", + crlPath, iteration, time.Now().Sub(start)) + return crl, true + } + + crl = getParsedCrlAtPath(t, client, crlPath) + newCrlRevision = getCRLNumber(t, crl.TBSCertList) + if newCrlRevision > initialCrlRevision { + t.Logf("Got new revision of CRL %s from %d to %d after iteration %d, delay %v", + crlPath, initialCrlRevision, newCrlRevision, iteration, time.Now().Sub(start)) + return crl, false + } + + time.Sleep(100 * time.Millisecond) + } +} + +// A quick CRL to string to provide better test error messages +func summarizeCrl(t *testing.T, crl pkix.TBSCertificateList) string { + version := getCRLNumber(t, crl) + serials := []string{} + for _, cert := range crl.RevokedCertificates { + serials = append(serials, normalizeSerialFromBigInt(cert.SerialNumber)) + } + return fmt.Sprintf("CRL Version: %d\n"+ + "This Update: %s\n"+ + "Next Update: %s\n"+ + "Revoked Serial Count: %d\n"+ + "Revoked Serials: %v", version, crl.ThisUpdate, crl.NextUpdate, len(serials), serials) +} + +// OCSP helpers +func generateRequest(t *testing.T, requestHash crypto.Hash, cert *x509.Certificate, issuer *x509.Certificate) []byte { + t.Helper() + + opts := &ocsp.RequestOptions{Hash: requestHash} + ocspRequestDer, err := ocsp.CreateRequest(cert, issuer, opts) + require.NoError(t, err, "Failed generating OCSP request") + return ocspRequestDer +} + +func requireOcspResponseSignedBy(t *testing.T, ocspResp *ocsp.Response, issuer *x509.Certificate) { + t.Helper() + + err := ocspResp.CheckSignatureFrom(issuer) + require.NoError(t, err, "Failed signature verification of ocsp response: %w", err) +} + +func performOcspPost(t *testing.T, cert *x509.Certificate, issuerCert *x509.Certificate, client *api.Client, ocspPath string) *ocsp.Response { + t.Helper() + + baseClient := client.WithNamespace("") + + ocspReq := generateRequest(t, crypto.SHA256, cert, issuerCert) + ocspPostReq := baseClient.NewRequest(http2.MethodPost, ocspPath) + ocspPostReq.Headers.Set("Content-Type", "application/ocsp-request") + ocspPostReq.BodyBytes = ocspReq + rawResp, err := baseClient.RawRequest(ocspPostReq) + require.NoError(t, err, "failed sending unified-ocsp post request") + + require.Equal(t, 200, rawResp.StatusCode) + require.Equal(t, ocspResponseContentType, rawResp.Header.Get("Content-Type")) + bodyReader := rawResp.Body + respDer, err := io.ReadAll(bodyReader) + bodyReader.Close() + require.NoError(t, err, "failed reading response body") + + ocspResp, err := ocsp.ParseResponse(respDer, issuerCert) + require.NoError(t, err, "parsing ocsp get response") + return ocspResp +} diff --git a/builtin/logical/pki/util.go b/builtin/logical/pki/util.go new file mode 100644 index 0000000..d90e055 --- /dev/null +++ b/builtin/logical/pki/util.go @@ -0,0 +1,503 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pki + +import ( + "crypto" + "crypto/x509" + "fmt" + "math/big" + "net/http" + "regexp" + "strings" + "sync" + "time" + + "github.com/hashicorp/vault/sdk/framework" + + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + managedKeyNameArg = "managed_key_name" + managedKeyIdArg = "managed_key_id" + defaultRef = "default" + + // Constants for If-Modified-Since operation + headerIfModifiedSince = "If-Modified-Since" + headerLastModified = "Last-Modified" +) + +var ( + nameMatcher = regexp.MustCompile("^" + framework.GenericNameRegex(issuerRefParam) + "$") + errIssuerNameInUse = errutil.UserError{Err: "issuer name already in use"} + errIssuerNameIsEmpty = errutil.UserError{Err: "expected non-empty issuer name"} + errKeyNameInUse = errutil.UserError{Err: "key name already in use"} +) + +func serialFromCert(cert *x509.Certificate) string { + return serialFromBigInt(cert.SerialNumber) +} + +func serialFromBigInt(serial *big.Int) string { + return strings.TrimSpace(certutil.GetHexFormatted(serial.Bytes(), ":")) +} + +func normalizeSerialFromBigInt(serial *big.Int) string { + return strings.TrimSpace(certutil.GetHexFormatted(serial.Bytes(), "-")) +} + +func normalizeSerial(serial string) string { + return strings.ReplaceAll(strings.ToLower(serial), ":", "-") +} + +func denormalizeSerial(serial string) string { + return strings.ReplaceAll(strings.ToLower(serial), "-", ":") +} + +func serialToBigInt(serial string) (*big.Int, bool) { + norm := normalizeSerial(serial) + hex := strings.ReplaceAll(norm, "-", "") + return big.NewInt(0).SetString(hex, 16) +} + +func kmsRequested(input *inputBundle) bool { + return kmsRequestedFromFieldData(input.apiData) +} + +func kmsRequestedFromFieldData(data *framework.FieldData) bool { + exportedStr, ok := data.GetOk("exported") + if !ok { + return false + } + return exportedStr.(string) == "kms" +} + +func existingKeyRequested(input *inputBundle) bool { + return existingKeyRequestedFromFieldData(input.apiData) +} + +func existingKeyRequestedFromFieldData(data *framework.FieldData) bool { + exportedStr, ok := data.GetOk("exported") + if !ok { + return false + } + return exportedStr.(string) == "existing" +} + +type managedKeyId interface { + String() string +} + +type ( + UUIDKey string + NameKey string +) + +func (u UUIDKey) String() string { + return string(u) +} + +func (n NameKey) String() string { + return string(n) +} + +type managedKeyInfo struct { + publicKey crypto.PublicKey + keyType certutil.PrivateKeyType + name NameKey + uuid UUIDKey +} + +// getManagedKeyId returns a NameKey or a UUIDKey, whichever was specified in the +// request API data. +func getManagedKeyId(data *framework.FieldData) (managedKeyId, error) { + name, UUID, err := getManagedKeyNameOrUUID(data) + if err != nil { + return nil, err + } + + var keyId managedKeyId = NameKey(name) + if len(UUID) > 0 { + keyId = UUIDKey(UUID) + } + + return keyId, nil +} + +func getKeyRefWithErr(data *framework.FieldData) (string, error) { + keyRef := getKeyRef(data) + + if len(keyRef) == 0 { + return "", errutil.UserError{Err: "missing argument key_ref for existing type"} + } + + return keyRef, nil +} + +func getManagedKeyNameOrUUID(data *framework.FieldData) (name string, UUID string, err error) { + getApiData := func(argName string) (string, error) { + arg, ok := data.GetOk(argName) + if !ok { + return "", nil + } + + argValue, ok := arg.(string) + if !ok { + return "", errutil.UserError{Err: fmt.Sprintf("invalid type for argument %s", argName)} + } + + return strings.TrimSpace(argValue), nil + } + + keyName, err := getApiData(managedKeyNameArg) + keyUUID, err2 := getApiData(managedKeyIdArg) + switch { + case err != nil: + return "", "", err + case err2 != nil: + return "", "", err2 + case len(keyName) == 0 && len(keyUUID) == 0: + return "", "", errutil.UserError{Err: fmt.Sprintf("missing argument %s or %s", managedKeyNameArg, managedKeyIdArg)} + case len(keyName) > 0 && len(keyUUID) > 0: + return "", "", errutil.UserError{Err: fmt.Sprintf("only one argument of %s or %s should be specified", managedKeyNameArg, managedKeyIdArg)} + } + + return keyName, keyUUID, nil +} + +func getIssuerName(sc *storageContext, data *framework.FieldData) (string, error) { + issuerName := "" + issuerNameIface, ok := data.GetOk("issuer_name") + if ok { + issuerName = strings.TrimSpace(issuerNameIface.(string)) + if len(issuerName) == 0 { + return issuerName, errIssuerNameIsEmpty + } + if strings.ToLower(issuerName) == defaultRef { + return issuerName, errutil.UserError{Err: "reserved keyword 'default' can not be used as issuer name"} + } + if !nameMatcher.MatchString(issuerName) { + return issuerName, errutil.UserError{Err: "issuer name contained invalid characters"} + } + issuerId, err := sc.resolveIssuerReference(issuerName) + if err == nil { + return issuerName, errIssuerNameInUse + } + + if err != nil && issuerId != IssuerRefNotFound { + return issuerName, errutil.InternalError{Err: err.Error()} + } + } + return issuerName, nil +} + +func getKeyName(sc *storageContext, data *framework.FieldData) (string, error) { + keyName := "" + keyNameIface, ok := data.GetOk(keyNameParam) + if ok { + keyName = strings.TrimSpace(keyNameIface.(string)) + + if strings.ToLower(keyName) == defaultRef { + return "", errutil.UserError{Err: "reserved keyword 'default' can not be used as key name"} + } + + if !nameMatcher.MatchString(keyName) { + return "", errutil.UserError{Err: "key name contained invalid characters"} + } + keyId, err := sc.resolveKeyReference(keyName) + if err == nil { + return "", errKeyNameInUse + } + + if err != nil && keyId != KeyRefNotFound { + return "", errutil.InternalError{Err: err.Error()} + } + } + return keyName, nil +} + +func getIssuerRef(data *framework.FieldData) string { + return extractRef(data, issuerRefParam) +} + +func getKeyRef(data *framework.FieldData) string { + return extractRef(data, keyRefParam) +} + +func extractRef(data *framework.FieldData, paramName string) string { + value := strings.TrimSpace(data.Get(paramName).(string)) + if strings.EqualFold(value, defaultRef) { + return defaultRef + } + return value +} + +func isStringArrayDifferent(a, b []string) bool { + if len(a) != len(b) { + return true + } + + for i, v := range a { + if v != b[i] { + return true + } + } + + return false +} + +func hasHeader(header string, req *logical.Request) bool { + var hasHeader bool + headerValue := req.Headers[header] + if len(headerValue) > 0 { + hasHeader = true + } + + return hasHeader +} + +func parseIfNotModifiedSince(req *logical.Request) (time.Time, error) { + var headerTimeValue time.Time + headerValue := req.Headers[headerIfModifiedSince] + + headerTimeValue, err := time.Parse(time.RFC1123, headerValue[0]) + if err != nil { + return headerTimeValue, fmt.Errorf("failed to parse given value for '%s' header: %w", headerIfModifiedSince, err) + } + + return headerTimeValue, nil +} + +type ifModifiedReqType int + +const ( + ifModifiedUnknown ifModifiedReqType = iota + ifModifiedCA = iota + ifModifiedCRL = iota + ifModifiedDeltaCRL = iota + ifModifiedUnifiedCRL = iota + ifModifiedUnifiedDeltaCRL = iota +) + +type IfModifiedSinceHelper struct { + req *logical.Request + reqType ifModifiedReqType + issuerRef issuerID +} + +func sendNotModifiedResponseIfNecessary(helper *IfModifiedSinceHelper, sc *storageContext, resp *logical.Response) (bool, error) { + responseHeaders := map[string][]string{} + if !hasHeader(headerIfModifiedSince, helper.req) { + return false, nil + } + + before, err := sc.isIfModifiedSinceBeforeLastModified(helper, responseHeaders) + if err != nil { + return false, err + } + + if !before { + return false, nil + } + + // Fill response + resp.Data = map[string]interface{}{ + logical.HTTPContentType: "", + logical.HTTPStatusCode: 304, + } + resp.Headers = responseHeaders + + return true, nil +} + +func (sc *storageContext) isIfModifiedSinceBeforeLastModified(helper *IfModifiedSinceHelper, responseHeaders map[string][]string) (bool, error) { + // False return --> we were last modified _before_ the requester's + // time --> keep using the cached copy and return 304. + var err error + var lastModified time.Time + ifModifiedSince, err := parseIfNotModifiedSince(helper.req) + if err != nil { + return false, err + } + + switch helper.reqType { + case ifModifiedCRL, ifModifiedDeltaCRL: + if sc.Backend.crlBuilder.invalidate.Load() { + // When we see the CRL is invalidated, respond with false + // regardless of what the local CRL state says. We've likely + // renamed some issuers or are about to rebuild a new CRL.... + // + // We do this earlier, ahead of config load, as it saves us a + // potential error condition. + return false, nil + } + + crlConfig, err := sc.getLocalCRLConfig() + if err != nil { + return false, err + } + + lastModified = crlConfig.LastModified + if helper.reqType == ifModifiedDeltaCRL { + lastModified = crlConfig.DeltaLastModified + } + case ifModifiedUnifiedCRL, ifModifiedUnifiedDeltaCRL: + if sc.Backend.crlBuilder.invalidate.Load() { + // When we see the CRL is invalidated, respond with false + // regardless of what the local CRL state says. We've likely + // renamed some issuers or are about to rebuild a new CRL.... + // + // We do this earlier, ahead of config load, as it saves us a + // potential error condition. + return false, nil + } + + crlConfig, err := sc.getUnifiedCRLConfig() + if err != nil { + return false, err + } + + lastModified = crlConfig.LastModified + if helper.reqType == ifModifiedUnifiedDeltaCRL { + lastModified = crlConfig.DeltaLastModified + } + case ifModifiedCA: + issuerId, err := sc.resolveIssuerReference(string(helper.issuerRef)) + if err != nil { + return false, err + } + + issuer, err := sc.fetchIssuerById(issuerId) + if err != nil { + return false, err + } + + lastModified = issuer.LastModified + default: + return false, fmt.Errorf("unknown if-modified-since request type: %v", helper.reqType) + } + + if !lastModified.IsZero() && lastModified.Before(ifModifiedSince) { + responseHeaders[headerLastModified] = []string{lastModified.Format(http.TimeFormat)} + return true, nil + } + + return false, nil +} + +func addWarnings(resp *logical.Response, warnings []string) *logical.Response { + for _, warning := range warnings { + resp.AddWarning(warning) + } + return resp +} + +// revocationQueue is a type for allowing invalidateFunc to continue operating +// quickly, while letting periodicFunc slowly sort through all open +// revocations to process. In particular, we do not wish to be holding this +// lock while periodicFunc is running, so iteration returns a full copy of +// the data in this queue. We use a map from serial->[]clusterId, allowing us +// to quickly insert and remove items, without using a slice of tuples. One +// serial might be present on two clusters, if two clusters both have the cert +// stored locally (e.g., via BYOC), which would result in two confirmation +// entries and thus dictating the need for []clusterId. This also lets us +// avoid having duplicate entries. +type revocationQueue struct { + _l sync.Mutex + queue map[string][]string +} + +func newRevocationQueue() *revocationQueue { + return &revocationQueue{ + queue: make(map[string][]string), + } +} + +func (q *revocationQueue) Add(items ...*revocationQueueEntry) { + q._l.Lock() + defer q._l.Unlock() + + for _, item := range items { + var found bool + for _, cluster := range q.queue[item.Serial] { + if cluster == item.Cluster { + found = true + break + } + } + + if !found { + q.queue[item.Serial] = append(q.queue[item.Serial], item.Cluster) + } + } +} + +func (q *revocationQueue) Remove(item *revocationQueueEntry) { + q._l.Lock() + defer q._l.Unlock() + + clusters, present := q.queue[item.Serial] + if !present { + return + } + + if len(clusters) == 0 || (len(clusters) == 1 && clusters[0] == item.Cluster) { + delete(q.queue, item.Serial) + return + } + + result := clusters + for index, cluster := range clusters { + if cluster == item.Cluster { + result = append(clusters[0:index], clusters[index+1:]...) + break + } + } + + q.queue[item.Serial] = result +} + +// As this doesn't depend on any internal state, it should not be called +// unless it is OK to remove any items added since the last Iterate() +// function call. +func (q *revocationQueue) RemoveAll() { + q._l.Lock() + defer q._l.Unlock() + + q.queue = make(map[string][]string) +} + +func (q *revocationQueue) Iterate() []*revocationQueueEntry { + q._l.Lock() + defer q._l.Unlock() + + // Heuristic: by storing by serial, occasionally we'll get double entires + // if it was already revoked, but otherwise we'll be off by fewer when + // building this list. + ret := make([]*revocationQueueEntry, 0, len(q.queue)) + + for serial, clusters := range q.queue { + for _, cluster := range clusters { + ret = append(ret, &revocationQueueEntry{ + Serial: serial, + Cluster: cluster, + }) + } + } + + return ret +} + +// sliceToMapKey return a map that who's keys are entries in a map. +func sliceToMapKey(s []string) map[string]struct{} { + var empty struct{} + myMap := make(map[string]struct{}, len(s)) + for _, s := range s { + myMap[s] = empty + } + return myMap +} diff --git a/builtin/logical/pkiext/README.md b/builtin/logical/pkiext/README.md new file mode 100644 index 0000000..40364e0 --- /dev/null +++ b/builtin/logical/pkiext/README.md @@ -0,0 +1,9 @@ +# What is `pkiext`? + +`pkiext` exists to split the Docker tests into a separate package from the +main PKI tests. Because the Docker tests execute in a smaller runner with +fewer resources, and we were hitting timeouts waiting for the entire PKI +test suite to run, we need to split the larger non-Docker PKI tests from +the smaller Docker tests, to ensure the former can execute. + +This package should lack any non-test related targets. diff --git a/builtin/logical/pkiext/nginx_test.go b/builtin/logical/pkiext/nginx_test.go new file mode 100644 index 0000000..1532f3e --- /dev/null +++ b/builtin/logical/pkiext/nginx_test.go @@ -0,0 +1,638 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pkiext + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io" + "net" + "net/http" + "strconv" + "strings" + "sync" + "testing" + "time" + + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/builtin/logical/pki" + "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/stretchr/testify/require" +) + +var ( + cwRunner *docker.Runner + builtNetwork string + buildClientContainerOnce sync.Once +) + +const ( + protectedFile = `dadgarcorp-internal-protected` + unprotectedFile = `hello-world` + failureIndicator = `THIS-TEST-SHOULD-FAIL` + uniqueHostname = `dadgarcorpvaultpkitestingnginxwgetcurlcontainersexample.com` + containerName = `vault_pki_nginx_integration` +) + +func buildNginxContainer(t *testing.T, root string, crl string, chain string, private string) (func(), string, int, string, string, int) { + containerfile := ` +FROM nginx:latest + +RUN mkdir /www /etc/nginx/ssl && rm /etc/nginx/conf.d/*.conf + +COPY testing.conf /etc/nginx/conf.d/ +COPY root.pem /etc/nginx/ssl/root.pem +COPY fullchain.pem /etc/nginx/ssl/fullchain.pem +COPY privkey.pem /etc/nginx/ssl/privkey.pem +COPY crl.pem /etc/nginx/ssl/crl.pem +COPY /data /www/data +` + + siteConfig := ` +server { + listen 80; + listen [::]:80; + + location / { + return 301 $request_uri; + } +} + +server { + listen 443 ssl; + listen [::]:443 ssl; + + ssl_certificate /etc/nginx/ssl/fullchain.pem; + ssl_certificate_key /etc/nginx/ssl/privkey.pem; + + ssl_client_certificate /etc/nginx/ssl/root.pem; + ssl_crl /etc/nginx/ssl/crl.pem; + ssl_verify_client optional; + + # Magic per: https://serverfault.com/questions/891603/nginx-reverse-proxy-with-optional-ssl-client-authentication + # Only necessary since we're too lazy to setup two different subdomains. + set $ssl_status 'open'; + if ($request_uri ~ protected) { + set $ssl_status 'closed'; + } + + if ($ssl_client_verify != SUCCESS) { + set $ssl_status "$ssl_status-fail"; + } + + if ($ssl_status = "closed-fail") { + return 403; + } + + location / { + root /www/data; + } +} +` + + bCtx := docker.NewBuildContext() + bCtx["testing.conf"] = docker.PathContentsFromString(siteConfig) + bCtx["root.pem"] = docker.PathContentsFromString(root) + bCtx["fullchain.pem"] = docker.PathContentsFromString(chain) + bCtx["privkey.pem"] = docker.PathContentsFromString(private) + bCtx["crl.pem"] = docker.PathContentsFromString(crl) + bCtx["/data/index.html"] = docker.PathContentsFromString(unprotectedFile) + bCtx["/data/protected.html"] = docker.PathContentsFromString(protectedFile) + + imageName := "vault_pki_nginx_integration" + suffix, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("error generating unique suffix: %v", err) + } + imageTag := suffix + + runner, err := docker.NewServiceRunner(docker.RunOptions{ + ImageRepo: imageName, + ImageTag: imageTag, + ContainerName: containerName, + Ports: []string{"443/tcp"}, + LogConsumer: func(s string) { + if t.Failed() { + t.Logf("container logs: %s", s) + } + }, + }) + if err != nil { + t.Fatalf("Could not provision docker service runner: %s", err) + } + + ctx := context.Background() + output, err := runner.BuildImage(ctx, containerfile, bCtx, + docker.BuildRemove(true), docker.BuildForceRemove(true), + docker.BuildPullParent(true), + docker.BuildTags([]string{imageName + ":" + imageTag})) + if err != nil { + t.Fatalf("Could not build new image: %v", err) + } + + t.Logf("Image build output: %v", string(output)) + + svc, err := runner.StartService(ctx, func(ctx context.Context, host string, port int) (docker.ServiceConfig, error) { + // Nginx loads fast, we're too lazy to validate this properly. + time.Sleep(5 * time.Second) + return docker.NewServiceHostPort(host, port), nil + }) + if err != nil { + t.Fatalf("Could not start nginx container: %v", err) + } + + // We also need to find the network address of this node, and return + // the non-local address associated with it so that we can spawn the + // client command on the correct network/port. + networks, err := runner.GetNetworkAndAddresses(svc.Container.ID) + if err != nil { + t.Fatalf("Could not interrogate container for addresses: %v", err) + } + + var networkName string + var networkAddr string + for name, addr := range networks { + if addr == "" { + continue + } + + networkName = name + networkAddr = addr + break + } + + if networkName == "" || networkAddr == "" { + t.Fatalf("failed to get network info for containers: empty network address: %v", networks) + } + + pieces := strings.Split(svc.Config.Address(), ":") + port, _ := strconv.Atoi(pieces[1]) + return svc.Cleanup, pieces[0], port, networkName, networkAddr, 443 +} + +func buildWgetCurlContainer(t *testing.T, network string) { + containerfile := ` +FROM ubuntu:latest + +RUN apt update && DEBIAN_FRONTEND="noninteractive" apt install -y curl wget wget2 +` + + bCtx := docker.NewBuildContext() + + imageName := "vault_pki_wget_curl_integration" + imageTag := "latest" + + var err error + cwRunner, err = docker.NewServiceRunner(docker.RunOptions{ + ImageRepo: imageName, + ImageTag: imageTag, + ContainerName: "vault_pki_wget_curl", + NetworkID: network, + // We want to run sleep in the background so we're not stuck waiting + // for the default ubuntu container's shell to prompt for input. + Entrypoint: []string{"sleep", "45"}, + LogConsumer: func(s string) { + if t.Failed() { + t.Logf("container logs: %s", s) + } + }, + }) + if err != nil { + t.Fatalf("Could not provision docker service runner: %s", err) + } + + ctx := context.Background() + output, err := cwRunner.BuildImage(ctx, containerfile, bCtx, + docker.BuildRemove(true), docker.BuildForceRemove(true), + docker.BuildPullParent(true), + docker.BuildTags([]string{imageName + ":" + imageTag})) + if err != nil { + t.Fatalf("Could not build new image: %v", err) + } + + t.Logf("Image build output: %v", string(output)) +} + +func CheckWithClients(t *testing.T, network string, address string, url string, rootCert string, certificate string, privatekey string) { + // We assume the network doesn't change once assigned. + buildClientContainerOnce.Do(func() { + buildWgetCurlContainer(t, network) + builtNetwork = network + }) + + if builtNetwork != network { + t.Fatalf("failed assumption check: different built network (%v) vs run network (%v); must've changed while running tests", builtNetwork, network) + } + + // Start our service with a random name to not conflict with other + // threads. + ctx := context.Background() + result, err := cwRunner.Start(ctx, true, false) + if err != nil { + t.Fatalf("Could not start golang container for wget/curl checks: %s", err) + } + + // Commands to run after potentially writing the certificate. We + // might augment these if the certificate exists. + // + // We manually add the expected hostname to the local hosts file + // to avoid resolving it over the network and instead resolving it + // to this other container we just started (potentially in parallel + // with other containers). + hostPrimeCmd := []string{"sh", "-c", "echo '" + address + " " + uniqueHostname + "' >> /etc/hosts"} + wgetCmd := []string{"wget", "--verbose", "--ca-certificate=/root.pem", url} + curlCmd := []string{"curl", "--verbose", "--cacert", "/root.pem", url} + + certCtx := docker.NewBuildContext() + certCtx["root.pem"] = docker.PathContentsFromString(rootCert) + if certificate != "" { + // Copy the cert into the newly running container. + certCtx["client-cert.pem"] = docker.PathContentsFromString(certificate) + certCtx["client-privkey.pem"] = docker.PathContentsFromString(privatekey) + + wgetCmd = []string{"wget", "--verbose", "--ca-certificate=/root.pem", "--certificate=/client-cert.pem", "--private-key=/client-privkey.pem", url} + curlCmd = []string{"curl", "--verbose", "--cacert", "/root.pem", "--cert", "/client-cert.pem", "--key", "/client-privkey.pem", url} + } + if err := cwRunner.CopyTo(result.Container.ID, "/", certCtx); err != nil { + t.Fatalf("Could not copy certificate and key into container: %v", err) + } + + for _, cmd := range [][]string{hostPrimeCmd, wgetCmd, curlCmd} { + t.Logf("Running client connection command: %v", cmd) + + stdout, stderr, retcode, err := cwRunner.RunCmdWithOutput(ctx, result.Container.ID, cmd) + if err != nil { + t.Fatalf("Could not run command (%v) in container: %v", cmd, err) + } + + if len(stderr) != 0 { + t.Logf("Got stderr from command (%v):\n%v\n", cmd, string(stderr)) + } + + if retcode != 0 { + t.Logf("Got stdout from command (%v):\n%v\n", cmd, string(stdout)) + t.Fatalf("Got unexpected non-zero retcode from command (%v): %v\n", cmd, retcode) + } + } +} + +func CheckDeltaCRL(t *testing.T, network string, address string, url string, rootCert string, crls string) { + // We assume the network doesn't change once assigned. + buildClientContainerOnce.Do(func() { + buildWgetCurlContainer(t, network) + builtNetwork = network + }) + + if builtNetwork != network { + t.Fatalf("failed assumption check: different built network (%v) vs run network (%v); must've changed while running tests", builtNetwork, network) + } + + // Start our service with a random name to not conflict with other + // threads. + ctx := context.Background() + result, err := cwRunner.Start(ctx, true, false) + if err != nil { + t.Fatalf("Could not start golang container for wget2 delta CRL checks: %s", err) + } + + // Commands to run after potentially writing the certificate. We + // might augment these if the certificate exists. + // + // We manually add the expected hostname to the local hosts file + // to avoid resolving it over the network and instead resolving it + // to this other container we just started (potentially in parallel + // with other containers). + hostPrimeCmd := []string{"sh", "-c", "echo '" + address + " " + uniqueHostname + "' >> /etc/hosts"} + wgetCmd := []string{"wget2", "--verbose", "--ca-certificate=/root.pem", "--crl-file=/crls.pem", url} + + certCtx := docker.NewBuildContext() + certCtx["root.pem"] = docker.PathContentsFromString(rootCert) + certCtx["crls.pem"] = docker.PathContentsFromString(crls) + if err := cwRunner.CopyTo(result.Container.ID, "/", certCtx); err != nil { + t.Fatalf("Could not copy certificate and key into container: %v", err) + } + + for index, cmd := range [][]string{hostPrimeCmd, wgetCmd} { + t.Logf("Running client connection command: %v", cmd) + + stdout, stderr, retcode, err := cwRunner.RunCmdWithOutput(ctx, result.Container.ID, cmd) + if err != nil { + t.Fatalf("Could not run command (%v) in container: %v", cmd, err) + } + + if len(stderr) != 0 { + t.Logf("Got stderr from command (%v):\n%v\n", cmd, string(stderr)) + } + + if retcode != 0 && index == 0 { + t.Logf("Got stdout from command (%v):\n%v\n", cmd, string(stdout)) + t.Fatalf("Got unexpected non-zero retcode from command (%v): %v\n", cmd, retcode) + } + + if retcode == 0 && index == 1 { + t.Logf("Got stdout from command (%v):\n%v\n", cmd, string(stdout)) + t.Fatalf("Got unexpected zero retcode from command; wanted this to fail (%v): %v\n", cmd, retcode) + } + } +} + +func CheckWithGo(t *testing.T, rootCert string, clientCert string, clientChain []string, clientKey string, host string, port int, networkAddr string, networkPort int, url string, expected string, shouldFail bool) { + // Ensure we can connect with Go. + pool := x509.NewCertPool() + pool.AppendCertsFromPEM([]byte(rootCert)) + tlsConfig := &tls.Config{ + RootCAs: pool, + } + + if clientCert != "" { + var clientTLSCert tls.Certificate + clientTLSCert.Certificate = append(clientTLSCert.Certificate, parseCert(t, clientCert).Raw) + clientTLSCert.PrivateKey = parseKey(t, clientKey) + for _, cert := range clientChain { + clientTLSCert.Certificate = append(clientTLSCert.Certificate, parseCert(t, cert).Raw) + } + + tlsConfig.Certificates = append(tlsConfig.Certificates, clientTLSCert) + } + + dialer := &net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + } + + transport := &http.Transport{ + TLSClientConfig: tlsConfig, + DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) { + if addr == host+":"+strconv.Itoa(port) { + // If we can't resolve our hostname, try + // accessing it via the docker protocol + // instead of via the returned service + // address. + if _, err := net.LookupHost(host); err != nil && strings.Contains(err.Error(), "no such host") { + addr = networkAddr + ":" + strconv.Itoa(networkPort) + } + } + return dialer.DialContext(ctx, network, addr) + }, + } + + client := &http.Client{Transport: transport} + clientResp, err := client.Get(url) + if err != nil { + if shouldFail { + return + } + + t.Fatalf("failed to fetch url (%v): %v", url, err) + } else if shouldFail { + if clientResp.StatusCode == 200 { + t.Fatalf("expected failure to fetch url (%v): got response: %v", url, clientResp) + } + + return + } + + defer clientResp.Body.Close() + body, err := io.ReadAll(clientResp.Body) + if err != nil { + t.Fatalf("failed to get read response body: %v", err) + } + if !strings.Contains(string(body), expected) { + t.Fatalf("expected body to contain (%v) but was:\n%v", expected, string(body)) + } +} + +func RunNginxRootTest(t *testing.T, caKeyType string, caKeyBits int, caUsePSS bool, roleKeyType string, roleKeyBits int, roleUsePSS bool) { + t.Skipf("flaky in CI") + + b, s := pki.CreateBackendWithStorage(t) + + testSuffix := fmt.Sprintf(" - %v %v %v - %v %v %v", caKeyType, caKeyType, caUsePSS, roleKeyType, roleKeyBits, roleUsePSS) + + // Configure our mount to use auto-rotate, even though we don't have + // a periodic func. + _, err := pki.CBWrite(b, s, "config/crl", map[string]interface{}{ + "auto_rebuild": true, + "enable_delta": true, + }) + + // Create a root and intermediate, setting the intermediate as default. + resp, err := pki.CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "Root X1" + testSuffix, + "country": "US", + "organization": "Dadgarcorp", + "ou": "QA", + "key_type": caKeyType, + "key_bits": caKeyBits, + "use_pss": caUsePSS, + "issuer_name": "root", + }) + requireSuccessNonNilResponse(t, resp, err, "failed to create root cert") + rootCert := resp.Data["certificate"].(string) + resp, err = pki.CBWrite(b, s, "intermediate/generate/internal", map[string]interface{}{ + "common_name": "Intermediate I1" + testSuffix, + "country": "US", + "organization": "Dadgarcorp", + "ou": "QA", + "key_type": caKeyType, + "key_bits": caKeyBits, + "use_pss": caUsePSS, + }) + requireSuccessNonNilResponse(t, resp, err, "failed to create intermediate csr") + resp, err = pki.CBWrite(b, s, "issuer/default/sign-intermediate", map[string]interface{}{ + "common_name": "Intermediate I1", + "country": "US", + "organization": "Dadgarcorp", + "ou": "QA", + "key_type": caKeyType, + "csr": resp.Data["csr"], + }) + requireSuccessNonNilResponse(t, resp, err, "failed to sign intermediate csr") + intCert := resp.Data["certificate"].(string) + resp, err = pki.CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ + "pem_bundle": intCert, + }) + requireSuccessNonNilResponse(t, resp, err, "failed to sign intermediate csr") + _, err = pki.CBWrite(b, s, "config/issuers", map[string]interface{}{ + "default": resp.Data["imported_issuers"].([]string)[0], + }) + + // Create a role+certificate valid for localhost only. + _, err = pki.CBWrite(b, s, "roles/testing", map[string]interface{}{ + "allow_any_name": true, + "key_type": roleKeyType, + "key_bits": roleKeyBits, + "use_pss": roleUsePSS, + "ttl": "60m", + }) + require.NoError(t, err) + resp, err = pki.CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": uniqueHostname, + "ip_sans": "127.0.0.1,::1", + "sans": uniqueHostname + ",localhost,localhost4,localhost6,localhost.localdomain", + }) + requireSuccessNonNilResponse(t, resp, err, "failed to create server leaf cert") + leafCert := resp.Data["certificate"].(string) + leafPrivateKey := resp.Data["private_key"].(string) + "\n" + fullChain := leafCert + "\n" + for _, cert := range resp.Data["ca_chain"].([]string) { + fullChain += cert + "\n" + } + + // Issue a client leaf certificate. + resp, err = pki.CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "testing.client.dadgarcorp.com", + }) + requireSuccessNonNilResponse(t, resp, err, "failed to create client leaf cert") + clientCert := resp.Data["certificate"].(string) + clientKey := resp.Data["private_key"].(string) + "\n" + clientWireChain := clientCert + "\n" + resp.Data["issuing_ca"].(string) + "\n" + clientTrustChain := resp.Data["issuing_ca"].(string) + "\n" + rootCert + "\n" + clientCAChain := resp.Data["ca_chain"].([]string) + + // Issue a client leaf cert and revoke it, placing it on the main CRL + // via rotation. + resp, err = pki.CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "revoked-crl.client.dadgarcorp.com", + }) + requireSuccessNonNilResponse(t, resp, err, "failed to create revoked client leaf cert") + revokedCert := resp.Data["certificate"].(string) + revokedKey := resp.Data["private_key"].(string) + "\n" + // revokedFullChain := revokedCert + "\n" + resp.Data["issuing_ca"].(string) + "\n" + // revokedTrustChain := resp.Data["issuing_ca"].(string) + "\n" + rootCert + "\n" + revokedCAChain := resp.Data["ca_chain"].([]string) + _, err = pki.CBWrite(b, s, "revoke", map[string]interface{}{ + "certificate": revokedCert, + }) + require.NoError(t, err) + _, err = pki.CBRead(b, s, "crl/rotate") + require.NoError(t, err) + + // Issue a client leaf cert and revoke it, placing it on the delta CRL + // via rotation. + /*resp, err = pki.CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "revoked-delta-crl.client.dadgarcorp.com", + }) + requireSuccessNonNilResponse(t, resp, err, "failed to create delta CRL revoked client leaf cert") + deltaCert := resp.Data["certificate"].(string) + deltaKey := resp.Data["private_key"].(string) + "\n" + //deltaFullChain := deltaCert + "\n" + resp.Data["issuing_ca"].(string) + "\n" + //deltaTrustChain := resp.Data["issuing_ca"].(string) + "\n" + rootCert + "\n" + deltaCAChain := resp.Data["ca_chain"].([]string) + _, err = pki.CBWrite(b, s, "revoke", map[string]interface{}{ + "certificate": deltaCert, + }) + require.NoError(t, err) + _, err = pki.CBRead(b, s, "crl/rotate-delta") + require.NoError(t, err)*/ + + // Get the CRL and Delta CRLs. + resp, err = pki.CBRead(b, s, "issuer/root/crl") + require.NoError(t, err) + rootCRL := resp.Data["crl"].(string) + "\n" + resp, err = pki.CBRead(b, s, "issuer/default/crl") + require.NoError(t, err) + intCRL := resp.Data["crl"].(string) + "\n" + + // No need to fetch root Delta CRL as we've not revoked anything on it. + resp, err = pki.CBRead(b, s, "issuer/default/crl/delta") + require.NoError(t, err) + deltaCRL := resp.Data["crl"].(string) + "\n" + + crls := rootCRL + intCRL + deltaCRL + + cleanup, host, port, networkName, networkAddr, networkPort := buildNginxContainer(t, rootCert, crls, fullChain, leafPrivateKey) + defer cleanup() + + if host != "127.0.0.1" && host != "::1" && strings.HasPrefix(host, containerName) { + t.Logf("Assuming %v:%v is a container name rather than localhost reference.", host, port) + host = uniqueHostname + port = networkPort + } + + localBase := "https://" + host + ":" + strconv.Itoa(port) + localURL := localBase + "/index.html" + localProtectedURL := localBase + "/protected.html" + containerBase := "https://" + uniqueHostname + ":" + strconv.Itoa(networkPort) + containerURL := containerBase + "/index.html" + containerProtectedURL := containerBase + "/protected.html" + + t.Logf("Spawned nginx container:\nhost: %v\nport: %v\nnetworkName: %v\nnetworkAddr: %v\nnetworkPort: %v\nlocalURL: %v\ncontainerURL: %v\n", host, port, networkName, networkAddr, networkPort, localBase, containerBase) + + // Ensure we can connect with Go. We do our checks for revocation here, + // as this behavior is server-controlled and shouldn't matter based on + // client type. + CheckWithGo(t, rootCert, "", nil, "", host, port, networkAddr, networkPort, localURL, unprotectedFile, false) + CheckWithGo(t, rootCert, "", nil, "", host, port, networkAddr, networkPort, localProtectedURL, failureIndicator, true) + CheckWithGo(t, rootCert, clientCert, clientCAChain, clientKey, host, port, networkAddr, networkPort, localProtectedURL, protectedFile, false) + CheckWithGo(t, rootCert, revokedCert, revokedCAChain, revokedKey, host, port, networkAddr, networkPort, localProtectedURL, protectedFile, true) + // CheckWithGo(t, rootCert, deltaCert, deltaCAChain, deltaKey, host, port, networkAddr, networkPort, localProtectedURL, protectedFile, true) + + // Ensure we can connect with wget/curl. + CheckWithClients(t, networkName, networkAddr, containerURL, rootCert, "", "") + CheckWithClients(t, networkName, networkAddr, containerProtectedURL, clientTrustChain, clientWireChain, clientKey) + + // Ensure OpenSSL will validate the delta CRL by revoking our server leaf + // and then using it with wget2. This will land on the intermediate's + // Delta CRL. + _, err = pki.CBWrite(b, s, "revoke", map[string]interface{}{ + "certificate": leafCert, + }) + require.NoError(t, err) + _, err = pki.CBRead(b, s, "crl/rotate-delta") + require.NoError(t, err) + resp, err = pki.CBRead(b, s, "issuer/default/crl/delta") + require.NoError(t, err) + deltaCRL = resp.Data["crl"].(string) + "\n" + crls = rootCRL + intCRL + deltaCRL + + CheckDeltaCRL(t, networkName, networkAddr, containerURL, rootCert, crls) +} + +func Test_NginxRSAPure(t *testing.T) { + t.Parallel() + RunNginxRootTest(t, "rsa", 2048, false, "rsa", 2048, false) +} + +func Test_NginxRSAPurePSS(t *testing.T) { + t.Parallel() + RunNginxRootTest(t, "rsa", 2048, false, "rsa", 2048, true) +} + +func Test_NginxRSAPSSPure(t *testing.T) { + t.Parallel() + RunNginxRootTest(t, "rsa", 2048, true, "rsa", 2048, false) +} + +func Test_NginxRSAPSSPurePSS(t *testing.T) { + t.Parallel() + RunNginxRootTest(t, "rsa", 2048, true, "rsa", 2048, true) +} + +func Test_NginxECDSA256Pure(t *testing.T) { + t.Parallel() + RunNginxRootTest(t, "ec", 256, false, "ec", 256, false) +} + +func Test_NginxECDSAHybrid(t *testing.T) { + t.Parallel() + RunNginxRootTest(t, "ec", 256, false, "rsa", 2048, false) +} + +func Test_NginxECDSAHybridPSS(t *testing.T) { + t.Parallel() + RunNginxRootTest(t, "ec", 256, false, "rsa", 2048, true) +} + +func Test_NginxRSAHybrid(t *testing.T) { + t.Parallel() + RunNginxRootTest(t, "rsa", 2048, false, "ec", 256, false) +} + +func Test_NginxRSAPSSHybrid(t *testing.T) { + t.Parallel() + RunNginxRootTest(t, "rsa", 2048, true, "ec", 256, false) +} diff --git a/builtin/logical/pkiext/pkiext_binary/acme_test.go b/builtin/logical/pkiext/pkiext_binary/acme_test.go new file mode 100644 index 0000000..22488ef --- /dev/null +++ b/builtin/logical/pkiext/pkiext_binary/acme_test.go @@ -0,0 +1,1102 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pkiext_binary + +import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + _ "embed" + "encoding/hex" + "errors" + "fmt" + "html/template" + "net" + "net/http" + "path" + "strings" + "testing" + "time" + + "golang.org/x/crypto/acme" + + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/builtin/logical/pkiext" + "github.com/hashicorp/vault/helper/testhelpers" + "github.com/hashicorp/vault/sdk/helper/certutil" + hDocker "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/stretchr/testify/require" +) + +//go:embed testdata/caddy_http.json +var caddyConfigTemplateHTTP string + +//go:embed testdata/caddy_http_eab.json +var caddyConfigTemplateHTTPEAB string + +//go:embed testdata/caddy_tls_alpn.json +var caddyConfigTemplateTLSALPN string + +// Test_ACME will start a Vault cluster using the docker based binary, and execute +// a bunch of sub-tests against that cluster. It is up to each sub-test to run/configure +// a new pki mount within the cluster to not interfere with each other. +func Test_ACME(t *testing.T) { + cluster := NewVaultPkiClusterWithDNS(t) + defer cluster.Cleanup() + + tc := map[string]func(t *testing.T, cluster *VaultPkiCluster){ + "caddy http": SubtestACMECaddy(caddyConfigTemplateHTTP, false), + "caddy http eab": SubtestACMECaddy(caddyConfigTemplateHTTPEAB, true), + "caddy tls-alpn": SubtestACMECaddy(caddyConfigTemplateTLSALPN, false), + "certbot": SubtestACMECertbot, + "certbot eab": SubtestACMECertbotEab, + "acme ip sans": SubtestACMEIPAndDNS, + "acme wildcard": SubtestACMEWildcardDNS, + "acme prevents ica": SubtestACMEPreventsICADNS, + } + + // Wrap the tests within an outer group, so that we run all tests + // in parallel, but still wait for all tests to finish before completing + // and running the cleanup of the Vault cluster. + t.Run("group", func(gt *testing.T) { + for testName := range tc { + // Trap the function to be embedded later in the run so it + // doesn't get clobbered on the next for iteration + testFunc := tc[testName] + + gt.Run(testName, func(st *testing.T) { + st.Parallel() + testFunc(st, cluster) + }) + } + }) + + // Do not run these tests in parallel. + t.Run("step down", func(gt *testing.T) { SubtestACMEStepDownNode(gt, cluster) }) +} + +// caddyConfig contains information used to render a Caddy configuration file from a template. +type caddyConfig struct { + Hostname string + Directory string + CACert string + EABID string + EABKey string +} + +// SubtestACMECaddy returns an ACME test for Caddy using the provided template. +func SubtestACMECaddy(configTemplate string, enableEAB bool) func(*testing.T, *VaultPkiCluster) { + return func(t *testing.T, cluster *VaultPkiCluster) { + ctx := context.Background() + + // Roll a random run ID for mount and hostname uniqueness. + runID, err := uuid.GenerateUUID() + require.NoError(t, err, "failed to generate a unique ID for test run") + runID = strings.Split(runID, "-")[0] + + // Create the PKI mount with ACME enabled + pki, err := cluster.CreateAcmeMount(runID) + require.NoError(t, err, "failed to set up ACME mount") + + // Conditionally enable EAB and retrieve the key. + var eabID, eabKey string + if enableEAB { + err = pki.UpdateAcmeConfig(true, map[string]interface{}{ + "eab_policy": "new-account-required", + }) + require.NoError(t, err, "failed to configure EAB policy in PKI mount") + + eabID, eabKey, err = pki.GetEabKey("acme/") + require.NoError(t, err, "failed to retrieve EAB key from PKI mount") + } + + directory := fmt.Sprintf("https://%s:8200/v1/%s/acme/directory", pki.GetActiveContainerIP(), runID) + vaultNetwork := pki.GetContainerNetworkName() + t.Logf("dir: %s", directory) + + logConsumer, logStdout, logStderr := getDockerLog(t) + + sleepTimer := "45" + + // Kick off Caddy container. + t.Logf("creating on network: %v", vaultNetwork) + caddyRunner, err := hDocker.NewServiceRunner(hDocker.RunOptions{ + ImageRepo: "docker.mirror.hashicorp.services/library/caddy", + ImageTag: "2.6.4", + ContainerName: fmt.Sprintf("caddy_test_%s", runID), + NetworkName: vaultNetwork, + Ports: []string{"80/tcp", "443/tcp", "443/udp"}, + Entrypoint: []string{"sleep", sleepTimer}, + LogConsumer: logConsumer, + LogStdout: logStdout, + LogStderr: logStderr, + }) + require.NoError(t, err, "failed creating caddy service runner") + + caddyResult, err := caddyRunner.Start(ctx, true, false) + require.NoError(t, err, "could not start Caddy container") + require.NotNil(t, caddyResult, "could not start Caddy container") + + defer caddyRunner.Stop(ctx, caddyResult.Container.ID) + + networks, err := caddyRunner.GetNetworkAndAddresses(caddyResult.Container.ID) + require.NoError(t, err, "could not read caddy container's IP address") + require.Contains(t, networks, vaultNetwork, "expected to contain vault network") + + ipAddr := networks[vaultNetwork] + hostname := fmt.Sprintf("%s.dadgarcorp.com", runID) + + err = pki.AddHostname(hostname, ipAddr) + require.NoError(t, err, "failed to update vault host files") + + // Render the Caddy configuration from the specified template. + tmpl, err := template.New("config").Parse(configTemplate) + require.NoError(t, err, "failed to parse Caddy config template") + var b strings.Builder + err = tmpl.Execute( + &b, + caddyConfig{ + Hostname: hostname, + Directory: directory, + CACert: "/tmp/vault_ca_cert.crt", + EABID: eabID, + EABKey: eabKey, + }, + ) + require.NoError(t, err, "failed to render Caddy config template") + + // Push the Caddy config and the cluster listener's CA certificate over to the docker container. + cpCtx := hDocker.NewBuildContext() + cpCtx["caddy_config.json"] = hDocker.PathContentsFromString(b.String()) + cpCtx["vault_ca_cert.crt"] = hDocker.PathContentsFromString(string(cluster.GetListenerCACertPEM())) + err = caddyRunner.CopyTo(caddyResult.Container.ID, "/tmp/", cpCtx) + require.NoError(t, err, "failed to copy Caddy config and Vault listener CA certificate to container") + + // Start the Caddy server. + caddyCmd := []string{ + "caddy", + "start", + "--config", "/tmp/caddy_config.json", + } + stdout, stderr, retcode, err := caddyRunner.RunCmdWithOutput(ctx, caddyResult.Container.ID, caddyCmd) + t.Logf("Caddy Start Command: %v\nstdout: %v\nstderr: %v\n", caddyCmd, string(stdout), string(stderr)) + require.NoError(t, err, "got error running Caddy start command") + require.Equal(t, 0, retcode, "expected zero retcode Caddy start command result") + + // Start a cURL container. + curlRunner, err := hDocker.NewServiceRunner(hDocker.RunOptions{ + ImageRepo: "docker.mirror.hashicorp.services/curlimages/curl", + ImageTag: "8.4.0", + ContainerName: fmt.Sprintf("curl_test_%s", runID), + NetworkName: vaultNetwork, + Entrypoint: []string{"sleep", sleepTimer}, + LogConsumer: logConsumer, + LogStdout: logStdout, + LogStderr: logStderr, + }) + require.NoError(t, err, "failed creating cURL service runner") + + curlResult, err := curlRunner.Start(ctx, true, false) + require.NoError(t, err, "could not start cURL container") + require.NotNil(t, curlResult, "could not start cURL container") + + // Retrieve the PKI mount CA cert and copy it over to the cURL container. + mountCACert, err := pki.GetCACertPEM() + require.NoError(t, err, "failed to retrieve PKI mount CA certificate") + + mountCACertCtx := hDocker.NewBuildContext() + mountCACertCtx["ca_cert.crt"] = hDocker.PathContentsFromString(mountCACert) + err = curlRunner.CopyTo(curlResult.Container.ID, "/tmp/", mountCACertCtx) + require.NoError(t, err, "failed to copy PKI mount CA certificate to cURL container") + + // Use cURL to hit the Caddy server and validate that a certificate was retrieved successfully. + curlCmd := []string{ + "curl", + "-L", + "--cacert", "/tmp/ca_cert.crt", + "--resolve", hostname + ":443:" + ipAddr, + "https://" + hostname + "/", + } + stdout, stderr, retcode, err = curlRunner.RunCmdWithOutput(ctx, curlResult.Container.ID, curlCmd) + t.Logf("cURL Command: %v\nstdout: %v\nstderr: %v\n", curlCmd, string(stdout), string(stderr)) + require.NoError(t, err, "got error running cURL command") + require.Equal(t, 0, retcode, "expected zero retcode cURL command result") + } +} + +func SubtestACMECertbot(t *testing.T, cluster *VaultPkiCluster) { + pki, err := cluster.CreateAcmeMount("pki") + require.NoError(t, err, "failed setting up acme mount") + + directory := "https://" + pki.GetActiveContainerIP() + ":8200/v1/pki/acme/directory" + vaultNetwork := pki.GetContainerNetworkName() + + logConsumer, logStdout, logStderr := getDockerLog(t) + + // Default to 45 second timeout, but bump to 120 when running locally or if nightly regression + // flag is provided. + sleepTimer := "45" + if testhelpers.IsLocalOrRegressionTests() { + sleepTimer = "120" + } + + t.Logf("creating on network: %v", vaultNetwork) + runner, err := hDocker.NewServiceRunner(hDocker.RunOptions{ + ImageRepo: "docker.mirror.hashicorp.services/certbot/certbot", + ImageTag: "latest", + ContainerName: "vault_pki_certbot_test", + NetworkName: vaultNetwork, + Entrypoint: []string{"sleep", sleepTimer}, + LogConsumer: logConsumer, + LogStdout: logStdout, + LogStderr: logStderr, + }) + require.NoError(t, err, "failed creating service runner") + + ctx := context.Background() + result, err := runner.Start(ctx, true, false) + require.NoError(t, err, "could not start container") + require.NotNil(t, result, "could not start container") + + defer runner.Stop(context.Background(), result.Container.ID) + + networks, err := runner.GetNetworkAndAddresses(result.Container.ID) + require.NoError(t, err, "could not read container's IP address") + require.Contains(t, networks, vaultNetwork, "expected to contain vault network") + + ipAddr := networks[vaultNetwork] + hostname := "certbot-acme-client.dadgarcorp.com" + + err = pki.AddHostname(hostname, ipAddr) + require.NoError(t, err, "failed to update vault host files") + + // Sinkhole a domain that's invalid just in case it's registered in the future. + cluster.Dns.AddDomain("armoncorp.com") + cluster.Dns.AddRecord("armoncorp.com", "A", "127.0.0.1") + + certbotCmd := []string{ + "certbot", + "certonly", + "--no-eff-email", + "--email", "certbot.client@dadgarcorp.com", + "--agree-tos", + "--no-verify-ssl", + "--standalone", + "--non-interactive", + "--server", directory, + "-d", hostname, + } + logCatCmd := []string{"cat", "/var/log/letsencrypt/letsencrypt.log"} + + stdout, stderr, retcode, err := runner.RunCmdWithOutput(ctx, result.Container.ID, certbotCmd) + t.Logf("Certbot Issue Command: %v\nstdout: %v\nstderr: %v\n", certbotCmd, string(stdout), string(stderr)) + if err != nil || retcode != 0 { + logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) + t.Logf("Certbot logs\nstdout: %v\nstderr: %v\n", string(logsStdout), string(logsStderr)) + } + require.NoError(t, err, "got error running issue command") + require.Equal(t, 0, retcode, "expected zero retcode issue command result") + + // N.B. We're using the `certonly` subcommand here because it seems as though the `renew` command + // attempts to install the cert for you. This ends up hanging and getting killed by docker, but is + // also not desired behavior. The certbot docs suggest using `certonly` to renew as seen here: + // https://eff-certbot.readthedocs.io/en/stable/using.html#renewing-certificates + certbotRenewCmd := []string{ + "certbot", + "certonly", + "--no-eff-email", + "--email", "certbot.client@dadgarcorp.com", + "--agree-tos", + "--no-verify-ssl", + "--standalone", + "--non-interactive", + "--server", directory, + "-d", hostname, + "--cert-name", hostname, + "--force-renewal", + } + + stdout, stderr, retcode, err = runner.RunCmdWithOutput(ctx, result.Container.ID, certbotRenewCmd) + t.Logf("Certbot Renew Command: %v\nstdout: %v\nstderr: %v\n", certbotRenewCmd, string(stdout), string(stderr)) + if err != nil || retcode != 0 { + logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) + t.Logf("Certbot logs\nstdout: %v\nstderr: %v\n", string(logsStdout), string(logsStderr)) + } + require.NoError(t, err, "got error running renew command") + require.Equal(t, 0, retcode, "expected zero retcode renew command result") + + certbotRevokeCmd := []string{ + "certbot", + "revoke", + "--no-eff-email", + "--email", "certbot.client@dadgarcorp.com", + "--agree-tos", + "--no-verify-ssl", + "--non-interactive", + "--no-delete-after-revoke", + "--cert-name", hostname, + } + + stdout, stderr, retcode, err = runner.RunCmdWithOutput(ctx, result.Container.ID, certbotRevokeCmd) + t.Logf("Certbot Revoke Command: %v\nstdout: %v\nstderr: %v\n", certbotRevokeCmd, string(stdout), string(stderr)) + if err != nil || retcode != 0 { + logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) + t.Logf("Certbot logs\nstdout: %v\nstderr: %v\n", string(logsStdout), string(logsStderr)) + } + require.NoError(t, err, "got error running revoke command") + require.Equal(t, 0, retcode, "expected zero retcode revoke command result") + + // Revoking twice should fail. + stdout, stderr, retcode, err = runner.RunCmdWithOutput(ctx, result.Container.ID, certbotRevokeCmd) + t.Logf("Certbot Double Revoke Command: %v\nstdout: %v\nstderr: %v\n", certbotRevokeCmd, string(stdout), string(stderr)) + if err != nil || retcode == 0 { + logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) + t.Logf("Certbot logs\nstdout: %v\nstderr: %v\n", string(logsStdout), string(logsStderr)) + } + + require.NoError(t, err, "got error running double revoke command") + require.NotEqual(t, 0, retcode, "expected non-zero retcode double revoke command result") + + // Attempt to issue against a domain that doesn't match the challenge. + // N.B. This test only runs locally or when the nightly regression env var is provided to CI. + if testhelpers.IsLocalOrRegressionTests() { + certbotInvalidIssueCmd := []string{ + "certbot", + "certonly", + "--no-eff-email", + "--email", "certbot.client@dadgarcorp.com", + "--agree-tos", + "--no-verify-ssl", + "--standalone", + "--non-interactive", + "--server", directory, + "-d", "armoncorp.com", + "--issuance-timeout", "10", + } + + stdout, stderr, retcode, err = runner.RunCmdWithOutput(ctx, result.Container.ID, certbotInvalidIssueCmd) + t.Logf("Certbot Invalid Issue Command: %v\nstdout: %v\nstderr: %v\n", certbotInvalidIssueCmd, string(stdout), string(stderr)) + if err != nil || retcode != 0 { + logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) + t.Logf("Certbot logs\nstdout: %v\nstderr: %v\n", string(logsStdout), string(logsStderr)) + } + require.NoError(t, err, "got error running issue command") + require.NotEqual(t, 0, retcode, "expected non-zero retcode issue command result") + } + + // Attempt to close out our ACME account + certbotUnregisterCmd := []string{ + "certbot", + "unregister", + "--no-verify-ssl", + "--non-interactive", + "--server", directory, + } + + stdout, stderr, retcode, err = runner.RunCmdWithOutput(ctx, result.Container.ID, certbotUnregisterCmd) + t.Logf("Certbot Unregister Command: %v\nstdout: %v\nstderr: %v\n", certbotUnregisterCmd, string(stdout), string(stderr)) + if err != nil || retcode != 0 { + logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) + t.Logf("Certbot logs\nstdout: %v\nstderr: %v\n", string(logsStdout), string(logsStderr)) + } + require.NoError(t, err, "got error running unregister command") + require.Equal(t, 0, retcode, "expected zero retcode unregister command result") + + // Attempting to close out our ACME account twice should fail + stdout, stderr, retcode, err = runner.RunCmdWithOutput(ctx, result.Container.ID, certbotUnregisterCmd) + t.Logf("Certbot double Unregister Command: %v\nstdout: %v\nstderr: %v\n", certbotUnregisterCmd, string(stdout), string(stderr)) + if err != nil || retcode != 0 { + logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) + t.Logf("Certbot double logs\nstdout: %v\nstderr: %v\n", string(logsStdout), string(logsStderr)) + } + require.NoError(t, err, "got error running double unregister command") + require.Equal(t, 1, retcode, "expected non-zero retcode double unregister command result") +} + +func SubtestACMECertbotEab(t *testing.T, cluster *VaultPkiCluster) { + mountName := "pki-certbot-eab" + pki, err := cluster.CreateAcmeMount(mountName) + require.NoError(t, err, "failed setting up acme mount") + + err = pki.UpdateAcmeConfig(true, map[string]interface{}{ + "eab_policy": "new-account-required", + }) + require.NoError(t, err) + + eabId, base64EabKey, err := pki.GetEabKey("acme/") + + directory := "https://" + pki.GetActiveContainerIP() + ":8200/v1/" + mountName + "/acme/directory" + vaultNetwork := pki.GetContainerNetworkName() + + logConsumer, logStdout, logStderr := getDockerLog(t) + + t.Logf("creating on network: %v", vaultNetwork) + runner, err := hDocker.NewServiceRunner(hDocker.RunOptions{ + ImageRepo: "docker.mirror.hashicorp.services/certbot/certbot", + ImageTag: "latest", + ContainerName: "vault_pki_certbot_eab_test", + NetworkName: vaultNetwork, + Entrypoint: []string{"sleep", "45"}, + LogConsumer: logConsumer, + LogStdout: logStdout, + LogStderr: logStderr, + }) + require.NoError(t, err, "failed creating service runner") + + ctx := context.Background() + result, err := runner.Start(ctx, true, false) + require.NoError(t, err, "could not start container") + require.NotNil(t, result, "could not start container") + + defer runner.Stop(context.Background(), result.Container.ID) + + networks, err := runner.GetNetworkAndAddresses(result.Container.ID) + require.NoError(t, err, "could not read container's IP address") + require.Contains(t, networks, vaultNetwork, "expected to contain vault network") + + ipAddr := networks[vaultNetwork] + hostname := "certbot-eab-acme-client.dadgarcorp.com" + + err = pki.AddHostname(hostname, ipAddr) + require.NoError(t, err, "failed to update vault host files") + + certbotCmd := []string{ + "certbot", + "certonly", + "--no-eff-email", + "--email", "certbot.client@dadgarcorp.com", + "--eab-kid", eabId, + "--eab-hmac-key='" + base64EabKey + "'", + "--agree-tos", + "--no-verify-ssl", + "--standalone", + "--non-interactive", + "--server", directory, + "-d", hostname, + } + logCatCmd := []string{"cat", "/var/log/letsencrypt/letsencrypt.log"} + + stdout, stderr, retcode, err := runner.RunCmdWithOutput(ctx, result.Container.ID, certbotCmd) + t.Logf("Certbot Issue Command: %v\nstdout: %v\nstderr: %v\n", certbotCmd, string(stdout), string(stderr)) + if err != nil || retcode != 0 { + logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) + t.Logf("Certbot logs\nstdout: %v\nstderr: %v\n", string(logsStdout), string(logsStderr)) + } + require.NoError(t, err, "got error running issue command") + require.Equal(t, 0, retcode, "expected zero retcode issue command result") + + certbotRenewCmd := []string{ + "certbot", + "certonly", + "--no-eff-email", + "--email", "certbot.client@dadgarcorp.com", + "--agree-tos", + "--no-verify-ssl", + "--standalone", + "--non-interactive", + "--server", directory, + "-d", hostname, + "--cert-name", hostname, + "--force-renewal", + } + + stdout, stderr, retcode, err = runner.RunCmdWithOutput(ctx, result.Container.ID, certbotRenewCmd) + t.Logf("Certbot Renew Command: %v\nstdout: %v\nstderr: %v\n", certbotRenewCmd, string(stdout), string(stderr)) + if err != nil || retcode != 0 { + logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) + t.Logf("Certbot logs\nstdout: %v\nstderr: %v\n", string(logsStdout), string(logsStderr)) + } + require.NoError(t, err, "got error running renew command") + require.Equal(t, 0, retcode, "expected zero retcode renew command result") + + certbotRevokeCmd := []string{ + "certbot", + "revoke", + "--no-eff-email", + "--email", "certbot.client@dadgarcorp.com", + "--agree-tos", + "--no-verify-ssl", + "--non-interactive", + "--no-delete-after-revoke", + "--cert-name", hostname, + } + + stdout, stderr, retcode, err = runner.RunCmdWithOutput(ctx, result.Container.ID, certbotRevokeCmd) + t.Logf("Certbot Revoke Command: %v\nstdout: %v\nstderr: %v\n", certbotRevokeCmd, string(stdout), string(stderr)) + if err != nil || retcode != 0 { + logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) + t.Logf("Certbot logs\nstdout: %v\nstderr: %v\n", string(logsStdout), string(logsStderr)) + } + require.NoError(t, err, "got error running revoke command") + require.Equal(t, 0, retcode, "expected zero retcode revoke command result") + + // Revoking twice should fail. + stdout, stderr, retcode, err = runner.RunCmdWithOutput(ctx, result.Container.ID, certbotRevokeCmd) + t.Logf("Certbot Double Revoke Command: %v\nstdout: %v\nstderr: %v\n", certbotRevokeCmd, string(stdout), string(stderr)) + if err != nil || retcode == 0 { + logsStdout, logsStderr, _, _ := runner.RunCmdWithOutput(ctx, result.Container.ID, logCatCmd) + t.Logf("Certbot logs\nstdout: %v\nstderr: %v\n", string(logsStdout), string(logsStderr)) + } + + require.NoError(t, err, "got error running double revoke command") + require.NotEqual(t, 0, retcode, "expected non-zero retcode double revoke command result") +} + +func SubtestACMEIPAndDNS(t *testing.T, cluster *VaultPkiCluster) { + pki, err := cluster.CreateAcmeMount("pki-ip-dns-sans") + require.NoError(t, err, "failed setting up acme mount") + + // Since we interact with ACME from outside the container network the ACME + // configuration needs to be updated to use the host port and not the internal + // docker ip. + basePath, err := pki.UpdateClusterConfigLocalAddr() + require.NoError(t, err, "failed updating cluster config") + + logConsumer, logStdout, logStderr := getDockerLog(t) + + // Setup an nginx container that we can have respond the queries for ips + runner, err := hDocker.NewServiceRunner(hDocker.RunOptions{ + ImageRepo: "docker.mirror.hashicorp.services/nginx", + ImageTag: "latest", + ContainerName: "vault_pki_ipsans_test", + NetworkName: pki.GetContainerNetworkName(), + LogConsumer: logConsumer, + LogStdout: logStdout, + LogStderr: logStderr, + }) + require.NoError(t, err, "failed creating service runner") + + ctx := context.Background() + result, err := runner.Start(ctx, true, false) + require.NoError(t, err, "could not start container") + require.NotNil(t, result, "could not start container") + + nginxContainerId := result.Container.ID + defer runner.Stop(context.Background(), nginxContainerId) + networks, err := runner.GetNetworkAndAddresses(nginxContainerId) + + challengeFolder := "/usr/share/nginx/html/.well-known/acme-challenge/" + createChallengeFolderCmd := []string{ + "sh", "-c", + "mkdir -p '" + challengeFolder + "'", + } + stdout, stderr, retcode, err := runner.RunCmdWithOutput(ctx, nginxContainerId, createChallengeFolderCmd) + require.NoError(t, err, "failed to create folder in nginx container") + t.Logf("Update host file command: %v\nstdout: %v\nstderr: %v", createChallengeFolderCmd, string(stdout), string(stderr)) + require.Equal(t, 0, retcode, "expected zero retcode from mkdir in nginx container") + + ipAddr := networks[pki.GetContainerNetworkName()] + hostname := "go-lang-acme-client.dadgarcorp.com" + + err = pki.AddHostname(hostname, ipAddr) + require.NoError(t, err, "failed to update vault host files") + + // Perform an ACME lifecycle with an order that contains both an IP and a DNS name identifier + err = pki.UpdateRole("ip-dns-sans", map[string]interface{}{ + "key_type": "any", + "allowed_domains": "dadgarcorp.com", + "allow_subdomains": true, + "allow_wildcard_certificates": false, + }) + require.NoError(t, err, "failed creating role ip-dns-sans") + + directoryUrl := basePath + "/roles/ip-dns-sans/acme/directory" + acmeOrderIdentifiers := []acme.AuthzID{ + {Type: "ip", Value: ipAddr}, + {Type: "dns", Value: hostname}, + } + cr := &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: hostname}, + DNSNames: []string{hostname}, + IPAddresses: []net.IP{net.ParseIP(ipAddr)}, + } + + provisioningFunc := func(acmeClient *acme.Client, auths []*acme.Authorization) []*acme.Challenge { + // For each http-01 challenge, generate the file to place underneath the nginx challenge folder + acmeCtx := hDocker.NewBuildContext() + var challengesToAccept []*acme.Challenge + for _, auth := range auths { + for _, challenge := range auth.Challenges { + if challenge.Status != acme.StatusPending { + t.Logf("ignoring challenge not in status pending: %v", challenge) + continue + } + + if challenge.Type == "http-01" { + challengeBody, err := acmeClient.HTTP01ChallengeResponse(challenge.Token) + require.NoError(t, err, "failed generating challenge response") + + challengePath := acmeClient.HTTP01ChallengePath(challenge.Token) + require.NoError(t, err, "failed generating challenge path") + + challengeFile := path.Base(challengePath) + + acmeCtx[challengeFile] = hDocker.PathContentsFromString(challengeBody) + + challengesToAccept = append(challengesToAccept, challenge) + } + } + } + + require.GreaterOrEqual(t, len(challengesToAccept), 1, "Need at least one challenge, got none") + + // Copy all challenges within the nginx container + err = runner.CopyTo(nginxContainerId, challengeFolder, acmeCtx) + require.NoError(t, err, "failed copying challenges to container") + + return challengesToAccept + } + + acmeCert := doAcmeValidationWithGoLibrary(t, directoryUrl, acmeOrderIdentifiers, cr, provisioningFunc, "") + + require.Len(t, acmeCert.IPAddresses, 1, "expected only a single ip address in cert") + require.Equal(t, ipAddr, acmeCert.IPAddresses[0].String()) + require.Equal(t, []string{hostname}, acmeCert.DNSNames) + require.Equal(t, hostname, acmeCert.Subject.CommonName) + + // Perform an ACME lifecycle with an order that contains just an IP identifier + err = pki.UpdateRole("ip-sans", map[string]interface{}{ + "key_type": "any", + "use_csr_common_name": false, + "require_cn": false, + "client_flag": false, + }) + require.NoError(t, err, "failed creating role ip-sans") + + directoryUrl = basePath + "/roles/ip-sans/acme/directory" + acmeOrderIdentifiers = []acme.AuthzID{ + {Type: "ip", Value: ipAddr}, + } + cr = &x509.CertificateRequest{ + IPAddresses: []net.IP{net.ParseIP(ipAddr)}, + } + + acmeCert = doAcmeValidationWithGoLibrary(t, directoryUrl, acmeOrderIdentifiers, cr, provisioningFunc, "") + + require.Len(t, acmeCert.IPAddresses, 1, "expected only a single ip address in cert") + require.Equal(t, ipAddr, acmeCert.IPAddresses[0].String()) + require.Empty(t, acmeCert.DNSNames, "acme cert dns name field should have been empty") + require.Equal(t, "", acmeCert.Subject.CommonName) +} + +type acmeGoValidatorProvisionerFunc func(acmeClient *acme.Client, auths []*acme.Authorization) []*acme.Challenge + +func doAcmeValidationWithGoLibrary(t *testing.T, directoryUrl string, acmeOrderIdentifiers []acme.AuthzID, cr *x509.CertificateRequest, provisioningFunc acmeGoValidatorProvisionerFunc, expectedFailure string) *x509.Certificate { + // Since we are contacting Vault through the host ip/port, the certificate will not validate properly + tr := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + } + httpClient := &http.Client{Transport: tr} + + accountKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa account key") + + t.Logf("Using the following url for the ACME directory: %s", directoryUrl) + acmeClient := &acme.Client{ + Key: accountKey, + HTTPClient: httpClient, + DirectoryURL: directoryUrl, + } + + testCtx, cancelFunc := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancelFunc() + + // Create new account + _, err = acmeClient.Register(testCtx, &acme.Account{Contact: []string{"mailto:ipsans@dadgarcorp.com"}}, + func(tosURL string) bool { return true }) + require.NoError(t, err, "failed registering account") + + // Create an ACME order + order, err := acmeClient.AuthorizeOrder(testCtx, acmeOrderIdentifiers) + require.NoError(t, err, "failed creating ACME order") + + var auths []*acme.Authorization + for _, authUrl := range order.AuthzURLs { + authorization, err := acmeClient.GetAuthorization(testCtx, authUrl) + require.NoError(t, err, "failed to lookup authorization at url: %s", authUrl) + auths = append(auths, authorization) + } + + // Handle the validation using the external validation mechanism. + challengesToAccept := provisioningFunc(acmeClient, auths) + require.NotEmpty(t, challengesToAccept, "provisioning function failed to return any challenges to accept") + + // Tell the ACME server, that they can now validate those challenges. + for _, challenge := range challengesToAccept { + _, err = acmeClient.Accept(testCtx, challenge) + require.NoError(t, err, "failed to accept challenge: %v", challenge) + } + + // Wait for the order/challenges to be validated. + _, err = acmeClient.WaitOrder(testCtx, order.URI) + require.NoError(t, err, "failed waiting for order to be ready") + + // Create/sign the CSR and ask ACME server to sign it returning us the final certificate + csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + csr, err := x509.CreateCertificateRequest(rand.Reader, cr, csrKey) + require.NoError(t, err, "failed generating csr") + + t.Logf("[TEST-LOG] Created CSR: %v", hex.EncodeToString(csr)) + + certs, _, err := acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, false) + if err != nil { + if expectedFailure != "" { + require.Contains(t, err.Error(), expectedFailure, "got a unexpected failure not matching expected value") + return nil + } + + require.NoError(t, err, "failed to get a certificate back from ACME") + } else if expectedFailure != "" { + t.Fatalf("expected failure containing: %s got none", expectedFailure) + } + + acmeCert, err := x509.ParseCertificate(certs[0]) + require.NoError(t, err, "failed parsing acme cert bytes") + + return acmeCert +} + +func SubtestACMEWildcardDNS(t *testing.T, cluster *VaultPkiCluster) { + pki, err := cluster.CreateAcmeMount("pki-dns-wildcards") + require.NoError(t, err, "failed setting up acme mount") + + // Since we interact with ACME from outside the container network the ACME + // configuration needs to be updated to use the host port and not the internal + // docker ip. + basePath, err := pki.UpdateClusterConfigLocalAddr() + require.NoError(t, err, "failed updating cluster config") + + hostname := "go-lang-wildcard-client.dadgarcorp.com" + wildcard := "*." + hostname + + // Do validation without a role first. + directoryUrl := basePath + "/acme/directory" + acmeOrderIdentifiers := []acme.AuthzID{ + {Type: "dns", Value: hostname}, + {Type: "dns", Value: wildcard}, + } + cr := &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: wildcard}, + DNSNames: []string{hostname, wildcard}, + } + + provisioningFunc := func(acmeClient *acme.Client, auths []*acme.Authorization) []*acme.Challenge { + // For each dns-01 challenge, place the record in the associated DNS resolver. + var challengesToAccept []*acme.Challenge + for _, auth := range auths { + for _, challenge := range auth.Challenges { + if challenge.Status != acme.StatusPending { + t.Logf("ignoring challenge not in status pending: %v", challenge) + continue + } + + if challenge.Type == "dns-01" { + challengeBody, err := acmeClient.DNS01ChallengeRecord(challenge.Token) + require.NoError(t, err, "failed generating challenge response") + + err = pki.AddDNSRecord("_acme-challenge."+auth.Identifier.Value, "TXT", challengeBody) + require.NoError(t, err, "failed setting DNS record") + + challengesToAccept = append(challengesToAccept, challenge) + } + } + } + + require.GreaterOrEqual(t, len(challengesToAccept), 1, "Need at least one challenge, got none") + return challengesToAccept + } + + acmeCert := doAcmeValidationWithGoLibrary(t, directoryUrl, acmeOrderIdentifiers, cr, provisioningFunc, "") + require.Contains(t, acmeCert.DNSNames, hostname) + require.Contains(t, acmeCert.DNSNames, wildcard) + require.Equal(t, wildcard, acmeCert.Subject.CommonName) + pki.RemoveDNSRecordsForDomain(hostname) + + // Redo validation with a role this time. + err = pki.UpdateRole("wildcard", map[string]interface{}{ + "key_type": "any", + "allowed_domains": "go-lang-wildcard-client.dadgarcorp.com", + "allow_subdomains": true, + "allow_bare_domains": true, + "allow_wildcard_certificates": true, + "client_flag": false, + }) + require.NoError(t, err, "failed creating role wildcard") + directoryUrl = basePath + "/roles/wildcard/acme/directory" + + acmeCert = doAcmeValidationWithGoLibrary(t, directoryUrl, acmeOrderIdentifiers, cr, provisioningFunc, "") + require.Contains(t, acmeCert.DNSNames, hostname) + require.Contains(t, acmeCert.DNSNames, wildcard) + require.Equal(t, wildcard, acmeCert.Subject.CommonName) + pki.RemoveDNSRecordsForDomain(hostname) +} + +func SubtestACMEPreventsICADNS(t *testing.T, cluster *VaultPkiCluster) { + pki, err := cluster.CreateAcmeMount("pki-dns-ica") + require.NoError(t, err, "failed setting up acme mount") + + // Since we interact with ACME from outside the container network the ACME + // configuration needs to be updated to use the host port and not the internal + // docker ip. + basePath, err := pki.UpdateClusterConfigLocalAddr() + require.NoError(t, err, "failed updating cluster config") + + hostname := "go-lang-intermediate-ca-cert.dadgarcorp.com" + + // Do validation without a role first. + directoryUrl := basePath + "/acme/directory" + acmeOrderIdentifiers := []acme.AuthzID{ + {Type: "dns", Value: hostname}, + } + cr := &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: hostname}, + DNSNames: []string{hostname}, + ExtraExtensions: []pkix.Extension{ + // Basic Constraint with IsCA asserted to true. + { + Id: certutil.ExtensionBasicConstraintsOID, + Critical: true, + Value: []byte{0x30, 0x03, 0x01, 0x01, 0xFF}, + }, + }, + } + + provisioningFunc := func(acmeClient *acme.Client, auths []*acme.Authorization) []*acme.Challenge { + // For each dns-01 challenge, place the record in the associated DNS resolver. + var challengesToAccept []*acme.Challenge + for _, auth := range auths { + for _, challenge := range auth.Challenges { + if challenge.Status != acme.StatusPending { + t.Logf("ignoring challenge not in status pending: %v", challenge) + continue + } + + if challenge.Type == "dns-01" { + challengeBody, err := acmeClient.DNS01ChallengeRecord(challenge.Token) + require.NoError(t, err, "failed generating challenge response") + + err = pki.AddDNSRecord("_acme-challenge."+auth.Identifier.Value, "TXT", challengeBody) + require.NoError(t, err, "failed setting DNS record") + + challengesToAccept = append(challengesToAccept, challenge) + } + } + } + + require.GreaterOrEqual(t, len(challengesToAccept), 1, "Need at least one challenge, got none") + return challengesToAccept + } + + doAcmeValidationWithGoLibrary(t, directoryUrl, acmeOrderIdentifiers, cr, provisioningFunc, "refusing to accept CSR with Basic Constraints extension") + pki.RemoveDNSRecordsForDomain(hostname) + + // Redo validation with a role this time. + err = pki.UpdateRole("ica", map[string]interface{}{ + "key_type": "any", + "allowed_domains": "go-lang-intermediate-ca-cert.dadgarcorp.com", + "allow_subdomains": true, + "allow_bare_domains": true, + "allow_wildcard_certificates": true, + "client_flag": false, + }) + require.NoError(t, err, "failed creating role wildcard") + directoryUrl = basePath + "/roles/ica/acme/directory" + + doAcmeValidationWithGoLibrary(t, directoryUrl, acmeOrderIdentifiers, cr, provisioningFunc, "refusing to accept CSR with Basic Constraints extension") + pki.RemoveDNSRecordsForDomain(hostname) +} + +// SubtestACMEStepDownNode Verify that we can properly run an ACME session through a +// secondary node, and midway through the challenge verification process, seal the +// active node and make sure we can complete the ACME session on the new active node. +func SubtestACMEStepDownNode(t *testing.T, cluster *VaultPkiCluster) { + pki, err := cluster.CreateAcmeMount("stepdown-test") + require.NoError(t, err) + + // Since we interact with ACME from outside the container network the ACME + // configuration needs to be updated to use the host port and not the internal + // docker ip. We also grab the non-active node here on purpose to verify + // ACME related APIs are properly forwarded across standby hosts. + nonActiveNodes := pki.GetNonActiveNodes() + require.GreaterOrEqual(t, len(nonActiveNodes), 1, "Need at least one non-active node") + + nonActiveNode := nonActiveNodes[0] + + basePath := fmt.Sprintf("https://%s/v1/%s", nonActiveNode.HostPort, pki.mount) + err = pki.UpdateClusterConfig(map[string]interface{}{ + "path": basePath, + }) + + hostname := "go-lang-stepdown-client.dadgarcorp.com" + + acmeOrderIdentifiers := []acme.AuthzID{ + {Type: "dns", Value: hostname}, + } + cr := &x509.CertificateRequest{ + DNSNames: []string{hostname, hostname}, + } + + accountKey, err := rsa.GenerateKey(rand.Reader, 2048) + require.NoError(t, err, "failed creating rsa account key") + + acmeClient := &acme.Client{ + Key: accountKey, + HTTPClient: &http.Client{Transport: &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + }}, + DirectoryURL: basePath + "/acme/directory", + } + + testCtx, cancelFunc := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancelFunc() + + // Create new account + _, err = acmeClient.Register(testCtx, &acme.Account{Contact: []string{"mailto:ipsans@dadgarcorp.com"}}, + func(tosURL string) bool { return true }) + require.NoError(t, err, "failed registering account") + + // Create an ACME order + order, err := acmeClient.AuthorizeOrder(testCtx, acmeOrderIdentifiers) + require.NoError(t, err, "failed creating ACME order") + + require.Len(t, order.AuthzURLs, 1, "expected a single authz url") + authUrl := order.AuthzURLs[0] + + authorization, err := acmeClient.GetAuthorization(testCtx, authUrl) + require.NoError(t, err, "failed to lookup authorization at url: %s", authUrl) + + dnsTxtRecordsToAdd := map[string]string{} + + var challengesToAccept []*acme.Challenge + for _, challenge := range authorization.Challenges { + if challenge.Status != acme.StatusPending { + t.Logf("ignoring challenge not in status pending: %v", challenge) + continue + } + + if challenge.Type == "dns-01" { + challengeBody, err := acmeClient.DNS01ChallengeRecord(challenge.Token) + require.NoError(t, err, "failed generating challenge response") + + // Collect the challenges for us to add the DNS records after step-down + dnsTxtRecordsToAdd["_acme-challenge."+authorization.Identifier.Value] = challengeBody + challengesToAccept = append(challengesToAccept, challenge) + } + } + + // Tell the ACME server, that they can now validate those challenges, this will cause challenge + // verification failures on the main node as the DNS records do not exist. + for _, challenge := range challengesToAccept { + _, err = acmeClient.Accept(testCtx, challenge) + require.NoError(t, err, "failed to accept challenge: %v", challenge) + } + + // Now wait till we start seeing the challenge engine start failing the lookups. + testhelpers.RetryUntil(t, 10*time.Second, func() error { + myAuth, err := acmeClient.GetAuthorization(testCtx, authUrl) + require.NoError(t, err, "failed to lookup authorization at url: %s", authUrl) + + for _, challenge := range myAuth.Challenges { + if challenge.Error != nil { + // The engine failed on one of the challenges, we are done waiting + return nil + } + } + + return fmt.Errorf("no challenges for auth %v contained any errors", myAuth.Identifier) + }) + + // Seal the active node now and wait for the next node to appear + previousActiveNode := pki.GetActiveClusterNode() + t.Logf("Stepping down node id: %s", previousActiveNode.NodeID) + + haStatus, _ := previousActiveNode.APIClient().Sys().HAStatus() + t.Logf("Node: %v HaStatus: %v\n", previousActiveNode.NodeID, haStatus) + + testhelpers.RetryUntil(t, 2*time.Minute, func() error { + state, err := previousActiveNode.APIClient().Sys().RaftAutopilotState() + if err != nil { + return err + } + + t.Logf("Node: %v Raft AutoPilotState: %v\n", previousActiveNode.NodeID, state) + + if !state.Healthy { + return fmt.Errorf("raft auto pilot state is not healthy") + } + + // Make sure that we have at least one node that can take over prior to sealing the current active node. + if state.FailureTolerance < 1 { + msg := fmt.Sprintf("there is no fault tolerance within raft state yet: %d", state.FailureTolerance) + t.Log(msg) + return errors.New(msg) + } + + return nil + }) + + t.Logf("Sealing active node") + err = previousActiveNode.APIClient().Sys().Seal() + require.NoError(t, err, "failed stepping down node") + + // Add our DNS records now + t.Logf("Adding DNS records") + for dnsHost, dnsValue := range dnsTxtRecordsToAdd { + err = pki.AddDNSRecord(dnsHost, "TXT", dnsValue) + require.NoError(t, err, "failed adding DNS record: %s:%s", dnsHost, dnsValue) + } + + // Wait for our new active node to come up + testhelpers.RetryUntil(t, 2*time.Minute, func() error { + newNode := pki.GetActiveClusterNode() + if newNode.NodeID == previousActiveNode.NodeID { + return fmt.Errorf("existing node is still the leader after stepdown: %s", newNode.NodeID) + } + + t.Logf("New active node has node id: %v", newNode.NodeID) + return nil + }) + + // Wait for the order/challenges to be validated. + _, err = acmeClient.WaitOrder(testCtx, order.URI) + if err != nil { + // We failed waiting for the order to become ready, lets print out current challenge statuses to help debugging + myAuth, authErr := acmeClient.GetAuthorization(testCtx, authUrl) + require.NoError(t, authErr, "failed to lookup authorization at url: %s and wait order failed with: %v", authUrl, err) + + t.Logf("Authorization Status: %s", myAuth.Status) + for _, challenge := range myAuth.Challenges { + // The engine failed on one of the challenges, we are done waiting + t.Logf("challenge: %v state: %v Error: %v", challenge.Type, challenge.Status, challenge.Error) + } + + require.NoError(t, err, "failed waiting for order to be ready") + } + + // Create/sign the CSR and ask ACME server to sign it returning us the final certificate + csrKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + csr, err := x509.CreateCertificateRequest(rand.Reader, cr, csrKey) + require.NoError(t, err, "failed generating csr") + + certs, _, err := acmeClient.CreateOrderCert(testCtx, order.FinalizeURL, csr, false) + require.NoError(t, err, "failed to get a certificate back from ACME") + + _, err = x509.ParseCertificate(certs[0]) + require.NoError(t, err, "failed parsing acme cert bytes") +} + +func getDockerLog(t *testing.T) (func(s string), *pkiext.LogConsumerWriter, *pkiext.LogConsumerWriter) { + logConsumer := func(s string) { + t.Logf(s) + } + + logStdout := &pkiext.LogConsumerWriter{logConsumer} + logStderr := &pkiext.LogConsumerWriter{logConsumer} + return logConsumer, logStdout, logStderr +} diff --git a/builtin/logical/pkiext/pkiext_binary/pki_cluster.go b/builtin/logical/pkiext/pkiext_binary/pki_cluster.go new file mode 100644 index 0000000..9f6a19f --- /dev/null +++ b/builtin/logical/pkiext/pkiext_binary/pki_cluster.go @@ -0,0 +1,316 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pkiext_binary + +import ( + "context" + "fmt" + "os" + "testing" + "time" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/pki/dnstest" + dockhelper "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/hashicorp/vault/sdk/helper/testcluster" + "github.com/hashicorp/vault/sdk/helper/testcluster/docker" +) + +type VaultPkiCluster struct { + cluster *docker.DockerCluster + Dns *dnstest.TestServer +} + +func NewVaultPkiCluster(t *testing.T) *VaultPkiCluster { + binary := os.Getenv("VAULT_BINARY") + if binary == "" { + t.Skip("only running docker test when $VAULT_BINARY present") + } + + opts := &docker.DockerClusterOptions{ + ImageRepo: "docker.mirror.hashicorp.services/hashicorp/vault", + // We're replacing the binary anyway, so we're not too particular about + // the docker image version tag. + ImageTag: "latest", + VaultBinary: binary, + ClusterOptions: testcluster.ClusterOptions{ + VaultNodeConfig: &testcluster.VaultNodeConfig{ + LogLevel: "TRACE", + }, + NumCores: 3, + }, + } + + cluster := docker.NewTestDockerCluster(t, opts) + + return &VaultPkiCluster{cluster: cluster} +} + +func NewVaultPkiClusterWithDNS(t *testing.T) *VaultPkiCluster { + cluster := NewVaultPkiCluster(t) + dns := dnstest.SetupResolverOnNetwork(t, "dadgarcorp.com", cluster.GetContainerNetworkName()) + cluster.Dns = dns + return cluster +} + +func (vpc *VaultPkiCluster) Cleanup() { + vpc.cluster.Cleanup() + if vpc.Dns != nil { + vpc.Dns.Cleanup() + } +} + +func (vpc *VaultPkiCluster) GetActiveClusterNode() *docker.DockerClusterNode { + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + node, err := testcluster.WaitForActiveNode(ctx, vpc.cluster) + if err != nil { + panic(fmt.Sprintf("no cluster node became active in timeout window: %v", err)) + } + + return vpc.cluster.ClusterNodes[node] +} + +func (vpc *VaultPkiCluster) GetNonActiveNodes() []*docker.DockerClusterNode { + nodes := []*docker.DockerClusterNode{} + for _, node := range vpc.cluster.ClusterNodes { + leader, err := node.APIClient().Sys().Leader() + if err != nil { + continue + } + + if !leader.IsSelf { + nodes = append(nodes, node) + } + } + + return nodes +} + +func (vpc *VaultPkiCluster) GetActiveContainerHostPort() string { + return vpc.GetActiveClusterNode().HostPort +} + +func (vpc *VaultPkiCluster) GetContainerNetworkName() string { + return vpc.cluster.ClusterNodes[0].ContainerNetworkName +} + +func (vpc *VaultPkiCluster) GetActiveContainerIP() string { + return vpc.GetActiveClusterNode().ContainerIPAddress +} + +func (vpc *VaultPkiCluster) GetActiveContainerID() string { + return vpc.GetActiveClusterNode().Container.ID +} + +func (vpc *VaultPkiCluster) GetActiveNode() *api.Client { + return vpc.GetActiveClusterNode().APIClient() +} + +// GetListenerCACertPEM returns the Vault cluster's PEM-encoded CA certificate. +func (vpc *VaultPkiCluster) GetListenerCACertPEM() []byte { + return vpc.cluster.CACertPEM +} + +func (vpc *VaultPkiCluster) AddHostname(hostname, ip string) error { + if vpc.Dns != nil { + vpc.Dns.AddRecord(hostname, "A", ip) + vpc.Dns.PushConfig() + return nil + } else { + return vpc.AddNameToHostFiles(hostname, ip) + } +} + +func (vpc *VaultPkiCluster) AddNameToHostFiles(hostname, ip string) error { + updateHostsCmd := []string{ + "sh", "-c", + "echo '" + ip + " " + hostname + "' >> /etc/hosts", + } + for _, node := range vpc.cluster.ClusterNodes { + containerID := node.Container.ID + _, _, retcode, err := dockhelper.RunCmdWithOutput(vpc.cluster.DockerAPI, context.Background(), containerID, updateHostsCmd) + if err != nil { + return fmt.Errorf("failed updating container %s host file: %w", containerID, err) + } + + if retcode != 0 { + return fmt.Errorf("expected zero retcode from updating vault host file in container %s got: %d", containerID, retcode) + } + } + + return nil +} + +func (vpc *VaultPkiCluster) AddDNSRecord(hostname, recordType, ip string) error { + if vpc.Dns == nil { + return fmt.Errorf("no DNS server was provisioned on this cluster group; unable to provision custom records") + } + + vpc.Dns.AddRecord(hostname, recordType, ip) + vpc.Dns.PushConfig() + return nil +} + +func (vpc *VaultPkiCluster) RemoveDNSRecord(domain string, record string, value string) error { + if vpc.Dns == nil { + return fmt.Errorf("no DNS server was provisioned on this cluster group; unable to remove specific record") + } + + vpc.Dns.RemoveRecord(domain, record, value) + return nil +} + +func (vpc *VaultPkiCluster) RemoveDNSRecordsOfTypeForDomain(domain string, record string) error { + if vpc.Dns == nil { + return fmt.Errorf("no DNS server was provisioned on this cluster group; unable to remove all records of type") + } + + vpc.Dns.RemoveRecordsOfTypeForDomain(domain, record) + return nil +} + +func (vpc *VaultPkiCluster) RemoveDNSRecordsForDomain(domain string) error { + if vpc.Dns == nil { + return fmt.Errorf("no DNS server was provisioned on this cluster group; unable to remove records for domain") + } + + vpc.Dns.RemoveRecordsForDomain(domain) + return nil +} + +func (vpc *VaultPkiCluster) RemoveAllDNSRecords() error { + if vpc.Dns == nil { + return fmt.Errorf("no DNS server was provisioned on this cluster group; unable to remove all records") + } + + vpc.Dns.RemoveAllRecords() + return nil +} + +func (vpc *VaultPkiCluster) CreateMount(name string) (*VaultPkiMount, error) { + err := vpc.GetActiveNode().Sys().Mount(name, &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "32h", + AllowedResponseHeaders: []string{ + "Last-Modified", "Replay-Nonce", + "Link", "Location", + }, + }, + }) + if err != nil { + return nil, err + } + + return &VaultPkiMount{ + vpc, + name, + }, nil +} + +func (vpc *VaultPkiCluster) CreateAcmeMount(mountName string) (*VaultPkiMount, error) { + pki, err := vpc.CreateMount(mountName) + if err != nil { + return nil, fmt.Errorf("failed creating mount %s: %w", mountName, err) + } + + err = pki.UpdateClusterConfig(nil) + if err != nil { + return nil, fmt.Errorf("failed updating cluster config: %w", err) + } + + cfg := map[string]interface{}{ + "eab_policy": "not-required", + } + if vpc.Dns != nil { + cfg["dns_resolver"] = vpc.Dns.GetRemoteAddr() + } + + err = pki.UpdateAcmeConfig(true, cfg) + if err != nil { + return nil, fmt.Errorf("failed updating acme config: %w", err) + } + + // Setup root+intermediate CA hierarchy within this mount. + resp, err := pki.GenerateRootInternal(map[string]interface{}{ + "common_name": "Root X1", + "country": "US", + "organization": "Dadgarcorp", + "ou": "QA", + "key_type": "ec", + "key_bits": 256, + "use_pss": false, + "issuer_name": "root", + }) + if err != nil { + return nil, fmt.Errorf("failed generating root internal: %w", err) + } + if resp == nil || len(resp.Data) == 0 { + return nil, fmt.Errorf("failed generating root internal: nil or empty response but no error") + } + + resp, err = pki.GenerateIntermediateInternal(map[string]interface{}{ + "common_name": "Intermediate I1", + "country": "US", + "organization": "Dadgarcorp", + "ou": "QA", + "key_type": "ec", + "key_bits": 256, + "use_pss": false, + }) + if err != nil { + return nil, fmt.Errorf("failed generating int csr: %w", err) + } + if resp == nil || len(resp.Data) == 0 { + return nil, fmt.Errorf("failed generating int csr: nil or empty response but no error") + } + + resp, err = pki.SignIntermediary("default", resp.Data["csr"], map[string]interface{}{ + "common_name": "Intermediate I1", + "country": "US", + "organization": "Dadgarcorp", + "ou": "QA", + "key_type": "ec", + "csr": resp.Data["csr"], + }) + if err != nil { + return nil, fmt.Errorf("failed signing int csr: %w", err) + } + if resp == nil || len(resp.Data) == 0 { + return nil, fmt.Errorf("failed signing int csr: nil or empty response but no error") + } + intCert := resp.Data["certificate"].(string) + + resp, err = pki.ImportBundle(intCert, nil) + if err != nil { + return nil, fmt.Errorf("failed importing signed cert: %w", err) + } + if resp == nil || len(resp.Data) == 0 { + return nil, fmt.Errorf("failed importing signed cert: nil or empty response but no error") + } + + err = pki.UpdateDefaultIssuer(resp.Data["imported_issuers"].([]interface{})[0].(string), nil) + if err != nil { + return nil, fmt.Errorf("failed to set intermediate as default: %w", err) + } + + err = pki.UpdateIssuer("default", map[string]interface{}{ + "leaf_not_after_behavior": "truncate", + }) + if err != nil { + return nil, fmt.Errorf("failed to update intermediate ttl behavior: %w", err) + } + + err = pki.UpdateIssuer("root", map[string]interface{}{ + "leaf_not_after_behavior": "truncate", + }) + if err != nil { + return nil, fmt.Errorf("failed to update root ttl behavior: %w", err) + } + + return pki, nil +} diff --git a/builtin/logical/pkiext/pkiext_binary/pki_mount.go b/builtin/logical/pkiext/pkiext_binary/pki_mount.go new file mode 100644 index 0000000..770b7ac --- /dev/null +++ b/builtin/logical/pkiext/pkiext_binary/pki_mount.go @@ -0,0 +1,160 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pkiext_binary + +import ( + "context" + "encoding/base64" + "fmt" + "path" + + "github.com/hashicorp/vault/api" +) + +type VaultPkiMount struct { + *VaultPkiCluster + mount string +} + +func (vpm *VaultPkiMount) UpdateClusterConfig(config map[string]interface{}) error { + defaultPath := "https://" + vpm.cluster.ClusterNodes[0].ContainerIPAddress + ":8200/v1/" + vpm.mount + defaults := map[string]interface{}{ + "path": defaultPath, + "aia_path": defaultPath, + } + + _, err := vpm.GetActiveNode().Logical().WriteWithContext(context.Background(), + vpm.mount+"/config/cluster", mergeWithDefaults(config, defaults)) + return err +} + +func (vpm *VaultPkiMount) UpdateClusterConfigLocalAddr() (string, error) { + basePath := fmt.Sprintf("https://%s/v1/%s", vpm.GetActiveContainerHostPort(), vpm.mount) + return basePath, vpm.UpdateClusterConfig(map[string]interface{}{ + "path": basePath, + }) +} + +func (vpm *VaultPkiMount) UpdateAcmeConfig(enable bool, config map[string]interface{}) error { + defaults := map[string]interface{}{ + "enabled": enable, + } + + _, err := vpm.GetActiveNode().Logical().WriteWithContext(context.Background(), + vpm.mount+"/config/acme", mergeWithDefaults(config, defaults)) + return err +} + +func (vpm *VaultPkiMount) GenerateRootInternal(props map[string]interface{}) (*api.Secret, error) { + defaults := map[string]interface{}{ + "common_name": "root-test.com", + "key_type": "ec", + "issuer_name": "root", + } + + return vpm.GetActiveNode().Logical().WriteWithContext(context.Background(), + vpm.mount+"/root/generate/internal", mergeWithDefaults(props, defaults)) +} + +func (vpm *VaultPkiMount) GenerateIntermediateInternal(props map[string]interface{}) (*api.Secret, error) { + defaults := map[string]interface{}{ + "common_name": "intermediary-test.com", + "key_type": "ec", + "issuer_name": "intermediary", + } + + return vpm.GetActiveNode().Logical().WriteWithContext(context.Background(), + vpm.mount+"/intermediate/generate/internal", mergeWithDefaults(props, defaults)) +} + +func (vpm *VaultPkiMount) SignIntermediary(signingIssuer string, csr interface{}, props map[string]interface{}) (*api.Secret, error) { + defaults := map[string]interface{}{ + "csr": csr, + } + + return vpm.GetActiveNode().Logical().WriteWithContext(context.Background(), + vpm.mount+"/issuer/"+signingIssuer+"/sign-intermediate", + mergeWithDefaults(props, defaults)) +} + +func (vpm *VaultPkiMount) ImportBundle(pemBundle interface{}, props map[string]interface{}) (*api.Secret, error) { + defaults := map[string]interface{}{ + "pem_bundle": pemBundle, + } + + return vpm.GetActiveNode().Logical().WriteWithContext(context.Background(), + vpm.mount+"/issuers/import/bundle", mergeWithDefaults(props, defaults)) +} + +func (vpm *VaultPkiMount) UpdateDefaultIssuer(issuerId string, props map[string]interface{}) error { + defaults := map[string]interface{}{ + "default": issuerId, + } + + _, err := vpm.GetActiveNode().Logical().WriteWithContext(context.Background(), + vpm.mount+"/config/issuers", mergeWithDefaults(props, defaults)) + + return err +} + +func (vpm *VaultPkiMount) UpdateIssuer(issuerRef string, props map[string]interface{}) error { + defaults := map[string]interface{}{} + + _, err := vpm.GetActiveNode().Logical().JSONMergePatch(context.Background(), + vpm.mount+"/issuer/"+issuerRef, mergeWithDefaults(props, defaults)) + + return err +} + +func (vpm *VaultPkiMount) UpdateRole(roleName string, config map[string]interface{}) error { + defaults := map[string]interface{}{} + + _, err := vpm.GetActiveNode().Logical().WriteWithContext(context.Background(), + vpm.mount+"/roles/"+roleName, mergeWithDefaults(config, defaults)) + + return err +} + +func (vpm *VaultPkiMount) GetEabKey(acmeDirectory string) (string, string, error) { + eabPath := path.Join(vpm.mount, acmeDirectory, "/new-eab") + resp, err := vpm.GetActiveNode().Logical().WriteWithContext(context.Background(), eabPath, map[string]interface{}{}) + if err != nil { + return "", "", fmt.Errorf("failed fetching eab from %s: %w", eabPath, err) + } + eabId := resp.Data["id"].(string) + base64EabKey := resp.Data["key"].(string) + // just make sure we get something valid back from the server, we still want to pass back the base64 version + // to the caller... + _, err = base64.RawURLEncoding.DecodeString(base64EabKey) + if err != nil { + return "", "", fmt.Errorf("failed decoding key response field: %s: %w", base64EabKey, err) + } + return eabId, base64EabKey, nil +} + +// GetCACertPEM retrieves the PKI mount's PEM-encoded CA certificate. +func (vpm *VaultPkiMount) GetCACertPEM() (string, error) { + caCertPath := path.Join(vpm.mount, "/cert/ca") + resp, err := vpm.GetActiveNode().Logical().ReadWithContext(context.Background(), caCertPath) + if err != nil { + return "", err + } + return resp.Data["certificate"].(string), nil +} + +func mergeWithDefaults(config map[string]interface{}, defaults map[string]interface{}) map[string]interface{} { + myConfig := config + if myConfig == nil { + myConfig = map[string]interface{}{} + } + for key, value := range defaults { + if origVal, exists := config[key]; !exists { + myConfig[key] = value + } else { + myConfig[key] = origVal + } + } + + return myConfig +} diff --git a/builtin/logical/pkiext/pkiext_binary/testdata/caddy_http.json b/builtin/logical/pkiext/pkiext_binary/testdata/caddy_http.json new file mode 100644 index 0000000..272ecd1 --- /dev/null +++ b/builtin/logical/pkiext/pkiext_binary/testdata/caddy_http.json @@ -0,0 +1,66 @@ +{ + "apps": { + "http": { + "servers": { + "srv0": { + "listen": [ + ":80", + ":443" + ], + "routes": [ + { + "match": [ + { + "host": [ + "{{.Hostname}}" + ] + } + ], + "handle": [ + { + "handler": "subroute", + "routes": [ + { + "handle": [ + { + "body": "Hello!", + "handler": "static_response" + } + ] + } + ] + } + ], + "terminal": true + } + ] + } + } + }, + "tls": { + "automation": { + "policies": [ + { + "subjects": [ + "{{.Hostname}}" + ], + "issuers": [ + { + "ca": "{{.Directory}}", + "module": "acme", + "challenges": { + "tls-alpn": { + "disabled": true + } + }, + "trusted_roots_pem_files": [ + "{{.CACert}}" + ] + } + ] + } + ] + } + } + } +} diff --git a/builtin/logical/pkiext/pkiext_binary/testdata/caddy_http_eab.json b/builtin/logical/pkiext/pkiext_binary/testdata/caddy_http_eab.json new file mode 100644 index 0000000..61cab88 --- /dev/null +++ b/builtin/logical/pkiext/pkiext_binary/testdata/caddy_http_eab.json @@ -0,0 +1,70 @@ +{ + "apps": { + "http": { + "servers": { + "srv0": { + "listen": [ + ":80", + ":443" + ], + "routes": [ + { + "match": [ + { + "host": [ + "{{.Hostname}}" + ] + } + ], + "handle": [ + { + "handler": "subroute", + "routes": [ + { + "handle": [ + { + "body": "Hello!", + "handler": "static_response" + } + ] + } + ] + } + ], + "terminal": true + } + ] + } + } + }, + "tls": { + "automation": { + "policies": [ + { + "subjects": [ + "{{.Hostname}}" + ], + "issuers": [ + { + "ca": "{{.Directory}}", + "module": "acme", + "external_account": { + "key_id": "{{.EABID}}", + "mac_key": "{{.EABKey}}" + }, + "challenges": { + "tls-alpn": { + "disabled": true + } + }, + "trusted_roots_pem_files": [ + "{{.CACert}}" + ] + } + ] + } + ] + } + } + } +} diff --git a/builtin/logical/pkiext/pkiext_binary/testdata/caddy_tls_alpn.json b/builtin/logical/pkiext/pkiext_binary/testdata/caddy_tls_alpn.json new file mode 100644 index 0000000..0bc0ea9 --- /dev/null +++ b/builtin/logical/pkiext/pkiext_binary/testdata/caddy_tls_alpn.json @@ -0,0 +1,66 @@ +{ + "apps": { + "http": { + "servers": { + "srv0": { + "listen": [ + ":80", + ":443" + ], + "routes": [ + { + "match": [ + { + "host": [ + "{{.Hostname}}" + ] + } + ], + "handle": [ + { + "handler": "subroute", + "routes": [ + { + "handle": [ + { + "body": "Hello!", + "handler": "static_response" + } + ] + } + ] + } + ], + "terminal": true + } + ] + } + } + }, + "tls": { + "automation": { + "policies": [ + { + "subjects": [ + "{{.Hostname}}" + ], + "issuers": [ + { + "ca": "{{.Directory}}", + "module": "acme", + "challenges": { + "http": { + "disabled": true + } + }, + "trusted_roots_pem_files": [ + "{{.CACert}}" + ] + } + ] + } + ] + } + } + } +} diff --git a/builtin/logical/pkiext/test_helpers.go b/builtin/logical/pkiext/test_helpers.go new file mode 100644 index 0000000..38bbdfe --- /dev/null +++ b/builtin/logical/pkiext/test_helpers.go @@ -0,0 +1,86 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pkiext + +import ( + "bufio" + "bytes" + "crypto" + "crypto/x509" + "encoding/pem" + "fmt" + "testing" + + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/logical" + + "github.com/stretchr/testify/require" +) + +func requireFieldsSetInResp(t *testing.T, resp *logical.Response, fields ...string) { + var missingFields []string + for _, field := range fields { + value, ok := resp.Data[field] + if !ok || value == nil { + missingFields = append(missingFields, field) + } + } + + require.Empty(t, missingFields, "The following fields were required but missing from response:\n%v", resp.Data) +} + +func requireSuccessNonNilResponse(t *testing.T, resp *logical.Response, err error, msgAndArgs ...interface{}) { + require.NoError(t, err, msgAndArgs...) + if resp.IsError() { + errContext := fmt.Sprintf("Expected successful response but got error: %v", resp.Error()) + require.Falsef(t, resp.IsError(), errContext, msgAndArgs...) + } + require.NotNil(t, resp, msgAndArgs...) +} + +func requireSuccessNilResponse(t *testing.T, resp *logical.Response, err error, msgAndArgs ...interface{}) { + require.NoError(t, err, msgAndArgs...) + if resp.IsError() { + errContext := fmt.Sprintf("Expected successful response but got error: %v", resp.Error()) + require.Falsef(t, resp.IsError(), errContext, msgAndArgs...) + } + if resp != nil { + msg := fmt.Sprintf("expected nil response but got: %v", resp) + require.Nilf(t, resp, msg, msgAndArgs...) + } +} + +func parseCert(t *testing.T, pemCert string) *x509.Certificate { + block, _ := pem.Decode([]byte(pemCert)) + require.NotNil(t, block, "failed to decode PEM block") + + cert, err := x509.ParseCertificate(block.Bytes) + require.NoError(t, err) + return cert +} + +func parseKey(t *testing.T, pemKey string) crypto.Signer { + block, _ := pem.Decode([]byte(pemKey)) + require.NotNil(t, block, "failed to decode PEM block") + + key, _, err := certutil.ParseDERKey(block.Bytes) + require.NoError(t, err) + return key +} + +type LogConsumerWriter struct { + Consumer func(string) +} + +func (l LogConsumerWriter) Write(p []byte) (n int, err error) { + // TODO this assumes that we're never passed partial log lines, which + // seems a safe assumption for now based on how docker looks to implement + // logging, but might change in the future. + scanner := bufio.NewScanner(bytes.NewReader(p)) + scanner.Buffer(make([]byte, 64*1024), bufio.MaxScanTokenSize) + for scanner.Scan() { + l.Consumer(scanner.Text()) + } + return len(p), nil +} diff --git a/builtin/logical/pkiext/zlint_test.go b/builtin/logical/pkiext/zlint_test.go new file mode 100644 index 0000000..2f20152 --- /dev/null +++ b/builtin/logical/pkiext/zlint_test.go @@ -0,0 +1,192 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pkiext + +import ( + "context" + "encoding/json" + "sync" + "testing" + + "github.com/hashicorp/vault/builtin/logical/pki" + "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/stretchr/testify/require" +) + +var ( + zRunner *docker.Runner + buildZLintOnce sync.Once +) + +func buildZLintContainer(t *testing.T) { + containerfile := ` +FROM docker.mirror.hashicorp.services/library/golang:latest + +RUN go install github.com/zmap/zlint/v3/cmd/zlint@latest +` + + bCtx := docker.NewBuildContext() + + imageName := "vault_pki_zlint_validator" + imageTag := "latest" + + var err error + zRunner, err = docker.NewServiceRunner(docker.RunOptions{ + ImageRepo: imageName, + ImageTag: imageTag, + ContainerName: "pki_zlint", + // We want to run sleep in the background so we're not stuck waiting + // for the default golang container's shell to prompt for input. + Entrypoint: []string{"sleep", "45"}, + LogConsumer: func(s string) { + if t.Failed() { + t.Logf("container logs: %s", s) + } + }, + }) + if err != nil { + t.Fatalf("Could not provision docker service runner: %s", err) + } + + ctx := context.Background() + output, err := zRunner.BuildImage(ctx, containerfile, bCtx, + docker.BuildRemove(true), docker.BuildForceRemove(true), + docker.BuildPullParent(true), + docker.BuildTags([]string{imageName + ":" + imageTag})) + if err != nil { + t.Fatalf("Could not build new image: %v", err) + } + + t.Logf("Image build output: %v", string(output)) +} + +func RunZLintContainer(t *testing.T, certificate string) []byte { + buildZLintOnce.Do(func() { + buildZLintContainer(t) + }) + + ctx := context.Background() + // We don't actually care about the address, we just want to start the + // container so we can run commands in it. We'd ideally like to skip this + // step and only build a new image, but the zlint output would be + // intermingled with container build stages, so its not that useful. + result, err := zRunner.Start(ctx, true, false) + if err != nil { + t.Fatalf("Could not start golang container for zlint: %s", err) + } + + // Copy the cert into the newly running container. + certCtx := docker.NewBuildContext() + certCtx["cert.pem"] = docker.PathContentsFromBytes([]byte(certificate)) + if err := zRunner.CopyTo(result.Container.ID, "/go/", certCtx); err != nil { + t.Fatalf("Could not copy certificate into container: %v", err) + } + + // Run the zlint command and save the output. + cmd := []string{"/go/bin/zlint", "/go/cert.pem"} + stdout, stderr, retcode, err := zRunner.RunCmdWithOutput(ctx, result.Container.ID, cmd) + if err != nil { + t.Fatalf("Could not run command in container: %v", err) + } + + if len(stderr) != 0 { + t.Logf("Got stderr from command:\n%v\n", string(stderr)) + } + + if retcode != 0 { + t.Logf("Got stdout from command:\n%v\n", string(stdout)) + t.Fatalf("Got unexpected non-zero retcode from zlint: %v\n", retcode) + } + + // Clean up after ourselves. + if err := zRunner.Stop(context.Background(), result.Container.ID); err != nil { + t.Fatalf("failed to stop container: %v", err) + } + + return stdout +} + +func RunZLintRootTest(t *testing.T, keyType string, keyBits int, usePSS bool, ignored []string) { + b, s := pki.CreateBackendWithStorage(t) + + resp, err := pki.CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "Root X1", + "country": "US", + "organization": "Dadgarcorp", + "ou": "QA", + "key_type": keyType, + "key_bits": keyBits, + "use_pss": usePSS, + }) + require.NoError(t, err) + rootCert := resp.Data["certificate"].(string) + + var parsed map[string]interface{} + output := RunZLintContainer(t, rootCert) + + if err := json.Unmarshal(output, &parsed); err != nil { + t.Fatalf("failed to parse zlint output as JSON: %v\nOutput:\n%v\n\n", err, string(output)) + } + + for key, rawValue := range parsed { + value := rawValue.(map[string]interface{}) + result, ok := value["result"] + if !ok || result == "NA" { + continue + } + + if result == "error" { + skip := false + for _, allowedFailures := range ignored { + if allowedFailures == key { + skip = true + break + } + } + + if !skip { + t.Fatalf("got unexpected error from test %v: %v", key, value) + } + } + } +} + +func Test_ZLintRSA2048(t *testing.T) { + t.Parallel() + RunZLintRootTest(t, "rsa", 2048, false, nil) +} + +func Test_ZLintRSA2048PSS(t *testing.T) { + t.Parallel() + RunZLintRootTest(t, "rsa", 2048, true, nil) +} + +func Test_ZLintRSA3072(t *testing.T) { + t.Parallel() + RunZLintRootTest(t, "rsa", 3072, false, nil) +} + +func Test_ZLintRSA3072PSS(t *testing.T) { + t.Parallel() + RunZLintRootTest(t, "rsa", 3072, true, nil) +} + +func Test_ZLintECDSA256(t *testing.T) { + t.Parallel() + RunZLintRootTest(t, "ec", 256, false, nil) +} + +func Test_ZLintECDSA384(t *testing.T) { + t.Parallel() + RunZLintRootTest(t, "ec", 384, false, nil) +} + +func Test_ZLintECDSA521(t *testing.T) { + t.Parallel() + // Mozilla doesn't allow P-521 ECDSA keys. + RunZLintRootTest(t, "ec", 521, false, []string{ + "e_mp_ecdsa_pub_key_encoding_correct", + "e_mp_ecdsa_signature_encoding_correct", + }) +} diff --git a/builtin/logical/rabbitmq/backend.go b/builtin/logical/rabbitmq/backend.go new file mode 100644 index 0000000..20ad1af --- /dev/null +++ b/builtin/logical/rabbitmq/backend.go @@ -0,0 +1,141 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package rabbitmq + +import ( + "context" + "strings" + "sync" + + cleanhttp "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" + rabbithole "github.com/michaelklishin/rabbit-hole/v2" +) + +const operationPrefixRabbitMQ = "rabbit-mq" + +// Factory creates and configures the backend +func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + b := Backend() + if err := b.Setup(ctx, conf); err != nil { + return nil, err + } + return b, nil +} + +// Creates a new backend with all the paths and secrets belonging to it +func Backend() *backend { + var b backend + b.Backend = &framework.Backend{ + Help: strings.TrimSpace(backendHelp), + + PathsSpecial: &logical.Paths{ + SealWrapStorage: []string{ + "config/connection", + }, + }, + + Paths: []*framework.Path{ + pathConfigConnection(&b), + pathConfigLease(&b), + pathListRoles(&b), + pathCreds(&b), + pathRoles(&b), + }, + + Secrets: []*framework.Secret{ + secretCreds(&b), + }, + + Clean: b.resetClient, + Invalidate: b.invalidate, + BackendType: logical.TypeLogical, + } + + return &b +} + +type backend struct { + *framework.Backend + + client *rabbithole.Client + lock sync.RWMutex +} + +// DB returns the database connection. +func (b *backend) Client(ctx context.Context, s logical.Storage) (*rabbithole.Client, error) { + b.lock.RLock() + + // If we already have a client, return it + if b.client != nil { + b.lock.RUnlock() + return b.client, nil + } + + b.lock.RUnlock() + + // Otherwise, attempt to make connection + connConfig, err := readConfig(ctx, s) + if err != nil { + return nil, err + } + + b.lock.Lock() + defer b.lock.Unlock() + + // If the client was created during the lock switch, return it + if b.client != nil { + return b.client, nil + } + + b.client, err = rabbithole.NewClient(connConfig.URI, connConfig.Username, connConfig.Password) + if err != nil { + return nil, err + } + // Use a default pooled transport so there would be no leaked file descriptors + b.client.SetTransport(cleanhttp.DefaultPooledTransport()) + + return b.client, nil +} + +// resetClient forces a connection next time Client() is called. +func (b *backend) resetClient(_ context.Context) { + b.lock.Lock() + defer b.lock.Unlock() + + b.client = nil +} + +func (b *backend) invalidate(ctx context.Context, key string) { + switch key { + case "config/connection": + b.resetClient(ctx) + } +} + +// Lease returns the lease information +func (b *backend) Lease(ctx context.Context, s logical.Storage) (*configLease, error) { + entry, err := s.Get(ctx, "config/lease") + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result configLease + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + + return &result, nil +} + +const backendHelp = ` +The RabbitMQ backend dynamically generates RabbitMQ users. + +After mounting this backend, configure it using the endpoints within +the "config/" path. +` diff --git a/builtin/logical/rabbitmq/backend_test.go b/builtin/logical/rabbitmq/backend_test.go new file mode 100644 index 0000000..61b18da --- /dev/null +++ b/builtin/logical/rabbitmq/backend_test.go @@ -0,0 +1,349 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package rabbitmq + +import ( + "context" + "fmt" + "log" + "os" + "testing" + + "github.com/hashicorp/go-secure-stdlib/base62" + logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" + "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/logical" + rabbithole "github.com/michaelklishin/rabbit-hole/v2" + "github.com/mitchellh/mapstructure" +) + +const ( + envRabbitMQConnectionURI = "RABBITMQ_CONNECTION_URI" + envRabbitMQUsername = "RABBITMQ_USERNAME" + envRabbitMQPassword = "RABBITMQ_PASSWORD" +) + +const ( + testTags = "administrator" + testVHosts = `{"/": {"configure": ".*", "write": ".*", "read": ".*"}}` + testVHostTopics = `{"/": {"amq.topic": {"write": ".*", "read": ".*"}}}` + + roleName = "web" +) + +func prepareRabbitMQTestContainer(t *testing.T) (func(), string) { + if os.Getenv(envRabbitMQConnectionURI) != "" { + return func() {}, os.Getenv(envRabbitMQConnectionURI) + } + + runner, err := docker.NewServiceRunner(docker.RunOptions{ + ImageRepo: "docker.mirror.hashicorp.services/library/rabbitmq", + ImageTag: "3-management", + ContainerName: "rabbitmq", + Ports: []string{"15672/tcp"}, + }) + if err != nil { + t.Fatalf("could not start docker rabbitmq: %s", err) + } + + svc, err := runner.StartService(context.Background(), func(ctx context.Context, host string, port int) (docker.ServiceConfig, error) { + connURL := fmt.Sprintf("http://%s:%d", host, port) + rmqc, err := rabbithole.NewClient(connURL, "guest", "guest") + if err != nil { + return nil, err + } + + _, err = rmqc.Overview() + if err != nil { + return nil, err + } + + return docker.NewServiceURLParse(connURL) + }) + if err != nil { + t.Fatalf("could not start docker rabbitmq: %s", err) + } + return svc.Cleanup, svc.Config.URL().String() +} + +func TestBackend_basic(t *testing.T) { + b, _ := Factory(context.Background(), logical.TestBackendConfig()) + + cleanup, uri := prepareRabbitMQTestContainer(t) + defer cleanup() + + logicaltest.Test(t, logicaltest.TestCase{ + PreCheck: testAccPreCheckFunc(t, uri), + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfig(t, uri, ""), + testAccStepRole(t), + testAccStepReadCreds(t, b, uri, roleName), + }, + }) +} + +func TestBackend_returnsErrs(t *testing.T) { + b, _ := Factory(context.Background(), logical.TestBackendConfig()) + + cleanup, uri := prepareRabbitMQTestContainer(t) + defer cleanup() + + logicaltest.Test(t, logicaltest.TestCase{ + PreCheck: testAccPreCheckFunc(t, uri), + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfig(t, uri, ""), + { + Operation: logical.CreateOperation, + Path: fmt.Sprintf("roles/%s", roleName), + Data: map[string]interface{}{ + "tags": testTags, + "vhosts": `{"invalid":{"write": ".*", "read": ".*"}}`, + "vhost_topics": testVHostTopics, + }, + }, + { + Operation: logical.ReadOperation, + Path: fmt.Sprintf("creds/%s", roleName), + ErrorOk: true, + }, + }, + }) +} + +func TestBackend_roleCrud(t *testing.T) { + b, _ := Factory(context.Background(), logical.TestBackendConfig()) + + cleanup, uri := prepareRabbitMQTestContainer(t) + defer cleanup() + + logicaltest.Test(t, logicaltest.TestCase{ + PreCheck: testAccPreCheckFunc(t, uri), + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfig(t, uri, ""), + testAccStepRole(t), + testAccStepReadRole(t, roleName, testTags, testVHosts, testVHostTopics), + testAccStepDeleteRole(t, roleName), + testAccStepReadRole(t, roleName, "", "", ""), + }, + }) +} + +func TestBackend_roleWithPasswordPolicy(t *testing.T) { + if os.Getenv(logicaltest.TestEnvVar) == "" { + t.Skip(fmt.Sprintf("Acceptance tests skipped unless env %q set", logicaltest.TestEnvVar)) + return + } + + backendConfig := logical.TestBackendConfig() + passGen := func() (password string, err error) { + return base62.Random(30) + } + backendConfig.System.(*logical.StaticSystemView).SetPasswordPolicy("testpolicy", passGen) + b, _ := Factory(context.Background(), backendConfig) + + cleanup, uri := prepareRabbitMQTestContainer(t) + defer cleanup() + + logicaltest.Test(t, logicaltest.TestCase{ + PreCheck: testAccPreCheckFunc(t, uri), + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepConfig(t, uri, "testpolicy"), + testAccStepRole(t), + testAccStepReadCreds(t, b, uri, roleName), + }, + }) +} + +func testAccPreCheckFunc(t *testing.T, uri string) func() { + return func() { + if uri == "" { + t.Fatal("RabbitMQ URI must be set for acceptance tests") + } + } +} + +func testAccStepConfig(t *testing.T, uri string, passwordPolicy string) logicaltest.TestStep { + username := os.Getenv(envRabbitMQUsername) + if len(username) == 0 { + username = "guest" + } + password := os.Getenv(envRabbitMQPassword) + if len(password) == 0 { + password = "guest" + } + + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "config/connection", + Data: map[string]interface{}{ + "connection_uri": uri, + "username": username, + "password": password, + "password_policy": passwordPolicy, + }, + } +} + +func testAccStepRole(t *testing.T) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("roles/%s", roleName), + Data: map[string]interface{}{ + "tags": testTags, + "vhosts": testVHosts, + "vhost_topics": testVHostTopics, + }, + } +} + +func testAccStepDeleteRole(t *testing.T, n string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.DeleteOperation, + Path: "roles/" + n, + } +} + +func testAccStepReadCreds(t *testing.T, b logical.Backend, uri, name string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: "creds/" + name, + Check: func(resp *logical.Response) error { + var d struct { + Username string `mapstructure:"username"` + Password string `mapstructure:"password"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + log.Printf("[WARN] Generated credentials: %v", d) + + client, err := rabbithole.NewClient(uri, d.Username, d.Password) + if err != nil { + t.Fatal(err) + } + + _, err = client.ListVhosts() + if err != nil { + t.Fatalf("unable to list vhosts with generated credentials: %s", err) + } + + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.RevokeOperation, + Secret: &logical.Secret{ + InternalData: map[string]interface{}{ + "secret_type": "creds", + "username": d.Username, + }, + }, + }) + if err != nil { + return err + } + if resp != nil { + if resp.IsError() { + return fmt.Errorf("error on resp: %#v", *resp) + } + } + + client, err = rabbithole.NewClient(uri, d.Username, d.Password) + if err != nil { + t.Fatal(err) + } + + _, err = client.ListVhosts() + if err == nil { + t.Fatalf("expected to fail listing vhosts: %s", err) + } + + return nil + }, + } +} + +func testAccStepReadRole(t *testing.T, name, tags, rawVHosts string, rawVHostTopics string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: "roles/" + name, + Check: func(resp *logical.Response) error { + if resp == nil { + if tags == "" && rawVHosts == "" && rawVHostTopics == "" { + return nil + } + + return fmt.Errorf("bad: %#v", resp) + } + + var d struct { + Tags string `mapstructure:"tags"` + VHosts map[string]vhostPermission `mapstructure:"vhosts"` + VHostTopics map[string]map[string]vhostTopicPermission `mapstructure:"vhost_topics"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + + if d.Tags != tags { + return fmt.Errorf("bad: %#v", resp) + } + + var vhosts map[string]vhostPermission + if err := jsonutil.DecodeJSON([]byte(rawVHosts), &vhosts); err != nil { + return fmt.Errorf("bad expected vhosts %#v: %s", vhosts, err) + } + + for host, permission := range vhosts { + actualPermission, ok := d.VHosts[host] + if !ok { + return fmt.Errorf("expected vhost: %s", host) + } + + if actualPermission.Configure != permission.Configure { + return fmt.Errorf("expected permission %s to be %s, got %s", "configure", permission.Configure, actualPermission.Configure) + } + + if actualPermission.Write != permission.Write { + return fmt.Errorf("expected permission %s to be %s, got %s", "write", permission.Write, actualPermission.Write) + } + + if actualPermission.Read != permission.Read { + return fmt.Errorf("expected permission %s to be %s, got %s", "read", permission.Read, actualPermission.Read) + } + } + + var vhostTopics map[string]map[string]vhostTopicPermission + if err := jsonutil.DecodeJSON([]byte(rawVHostTopics), &vhostTopics); err != nil { + return fmt.Errorf("bad expected vhostTopics %#v: %s", vhostTopics, err) + } + + for host, permissions := range vhostTopics { + for exchange, permission := range permissions { + actualPermissions, ok := d.VHostTopics[host] + if !ok { + return fmt.Errorf("expected vhost topics: %s", host) + } + + actualPermission, ok := actualPermissions[exchange] + if !ok { + return fmt.Errorf("expected vhost topic exchange: %s", exchange) + } + + if actualPermission.Write != permission.Write { + return fmt.Errorf("expected permission %s to be %s, got %s", "write", permission.Write, actualPermission.Write) + } + + if actualPermission.Read != permission.Read { + return fmt.Errorf("expected permission %s to be %s, got %s", "read", permission.Read, actualPermission.Read) + } + } + } + + return nil + }, + } +} diff --git a/builtin/logical/rabbitmq/cmd/rabbitmq/main.go b/builtin/logical/rabbitmq/cmd/rabbitmq/main.go new file mode 100644 index 0000000..2cb62da --- /dev/null +++ b/builtin/logical/rabbitmq/cmd/rabbitmq/main.go @@ -0,0 +1,34 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "os" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/rabbitmq" + "github.com/hashicorp/vault/sdk/plugin" +) + +func main() { + apiClientMeta := &api.PluginAPIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(os.Args[1:]) + + tlsConfig := apiClientMeta.GetTLSConfig() + tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) + + if err := plugin.ServeMultiplex(&plugin.ServeOpts{ + BackendFactoryFunc: rabbitmq.Factory, + // set the TLSProviderFunc so that the plugin maintains backwards + // compatibility with Vault versions that don’t support plugin AutoMTLS + TLSProviderFunc: tlsProviderFunc, + }); err != nil { + logger := hclog.New(&hclog.LoggerOptions{}) + + logger.Error("plugin shutting down", "error", err) + os.Exit(1) + } +} diff --git a/builtin/logical/rabbitmq/passwords.go b/builtin/logical/rabbitmq/passwords.go new file mode 100644 index 0000000..8ba08a0 --- /dev/null +++ b/builtin/logical/rabbitmq/passwords.go @@ -0,0 +1,17 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package rabbitmq + +import ( + "context" + + "github.com/hashicorp/go-secure-stdlib/base62" +) + +func (b *backend) generatePassword(ctx context.Context, policyName string) (password string, err error) { + if policyName != "" { + return b.System().GeneratePasswordFromPolicy(ctx, policyName) + } + return base62.Random(36) +} diff --git a/builtin/logical/rabbitmq/path_config_connection.go b/builtin/logical/rabbitmq/path_config_connection.go new file mode 100644 index 0000000..8979455 --- /dev/null +++ b/builtin/logical/rabbitmq/path_config_connection.go @@ -0,0 +1,190 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package rabbitmq + +import ( + "context" + "fmt" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/template" + "github.com/hashicorp/vault/sdk/logical" + rabbithole "github.com/michaelklishin/rabbit-hole/v2" +) + +const ( + storageKey = "config/connection" +) + +func pathConfigConnection(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "config/connection", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixRabbitMQ, + OperationVerb: "configure", + OperationSuffix: "connection", + }, + + Fields: map[string]*framework.FieldSchema{ + "connection_uri": { + Type: framework.TypeString, + Description: "RabbitMQ Management URI", + }, + "username": { + Type: framework.TypeString, + Description: "Username of a RabbitMQ management administrator", + }, + "password": { + Type: framework.TypeString, + Description: "Password of the provided RabbitMQ management user", + }, + "verify_connection": { + Type: framework.TypeBool, + Default: true, + Description: `If set, connection_uri is verified by actually connecting to the RabbitMQ management API`, + }, + "password_policy": { + Type: framework.TypeString, + Description: "Name of the password policy to use to generate passwords for dynamic credentials.", + }, + "username_template": { + Type: framework.TypeString, + Description: "Template describing how dynamic usernames are generated.", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathConnectionUpdate, + }, + + HelpSynopsis: pathConfigConnectionHelpSyn, + HelpDescription: pathConfigConnectionHelpDesc, + } +} + +func (b *backend) pathConnectionUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + uri := data.Get("connection_uri").(string) + if uri == "" { + return logical.ErrorResponse("missing connection_uri"), nil + } + + username := data.Get("username").(string) + if username == "" { + return logical.ErrorResponse("missing username"), nil + } + + password := data.Get("password").(string) + if password == "" { + return logical.ErrorResponse("missing password"), nil + } + + usernameTemplate := data.Get("username_template").(string) + if usernameTemplate != "" { + up, err := template.NewTemplate(template.Template(usernameTemplate)) + if err != nil { + return logical.ErrorResponse("unable to initialize username template: %w", err), nil + } + + _, err = up.Generate(UsernameMetadata{}) + if err != nil { + return logical.ErrorResponse("invalid username template: %w", err), nil + } + } + + passwordPolicy := data.Get("password_policy").(string) + + // Don't check the connection_url if verification is disabled + verifyConnection := data.Get("verify_connection").(bool) + if verifyConnection { + // Create RabbitMQ management client + client, err := rabbithole.NewClient(uri, username, password) + if err != nil { + return nil, fmt.Errorf("failed to create client: %w", err) + } + + // Verify that configured credentials is capable of listing + if _, err = client.ListUsers(); err != nil { + return nil, fmt.Errorf("failed to validate the connection: %w", err) + } + } + + // Store it + config := connectionConfig{ + URI: uri, + Username: username, + Password: password, + PasswordPolicy: passwordPolicy, + UsernameTemplate: usernameTemplate, + } + err := writeConfig(ctx, req.Storage, config) + if err != nil { + return nil, err + } + + // Reset the client connection + b.resetClient(ctx) + + return nil, nil +} + +func readConfig(ctx context.Context, storage logical.Storage) (connectionConfig, error) { + entry, err := storage.Get(ctx, storageKey) + if err != nil { + return connectionConfig{}, err + } + if entry == nil { + return connectionConfig{}, nil + } + + var connConfig connectionConfig + if err := entry.DecodeJSON(&connConfig); err != nil { + return connectionConfig{}, err + } + return connConfig, nil +} + +func writeConfig(ctx context.Context, storage logical.Storage, config connectionConfig) error { + entry, err := logical.StorageEntryJSON(storageKey, config) + if err != nil { + return err + } + if err := storage.Put(ctx, entry); err != nil { + return err + } + return nil +} + +// connectionConfig contains the information required to make a connection to a RabbitMQ node +type connectionConfig struct { + // URI of the RabbitMQ server + URI string `json:"connection_uri"` + + // Username which has 'administrator' tag attached to it + Username string `json:"username"` + + // Password for the Username + Password string `json:"password"` + + // PasswordPolicy for generating passwords for dynamic credentials + PasswordPolicy string `json:"password_policy"` + + // UsernameTemplate for storing the raw template in Vault's backing data store + UsernameTemplate string `json:"username_template"` +} + +const pathConfigConnectionHelpSyn = ` +Configure the connection URI, username, and password to talk to RabbitMQ management HTTP API. +` + +const pathConfigConnectionHelpDesc = ` +This path configures the connection properties used to connect to RabbitMQ management HTTP API. +The "connection_uri" parameter is a string that is used to connect to the API. The "username" +and "password" parameters are strings that are used as credentials to the API. The "verify_connection" +parameter is a boolean that is used to verify whether the provided connection URI, username, and password +are valid. + +The URI looks like: +"http://localhost:15672" +` diff --git a/builtin/logical/rabbitmq/path_config_connection_test.go b/builtin/logical/rabbitmq/path_config_connection_test.go new file mode 100644 index 0000000..55e6b2c --- /dev/null +++ b/builtin/logical/rabbitmq/path_config_connection_test.go @@ -0,0 +1,107 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package rabbitmq + +import ( + "context" + "reflect" + "testing" + + "github.com/hashicorp/vault/sdk/logical" +) + +func TestBackend_ConfigConnection_DefaultUsernameTemplate(t *testing.T) { + var resp *logical.Response + var err error + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b := Backend() + if err = b.Setup(context.Background(), config); err != nil { + t.Fatal(err) + } + + configData := map[string]interface{}{ + "connection_uri": "uri", + "username": "username", + "password": "password", + "verify_connection": "false", + } + configReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/connection", + Storage: config.StorageView, + Data: configData, + } + resp, err = b.HandleRequest(context.Background(), configReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr:%s", resp, err) + } + if resp != nil { + t.Fatal("expected a nil response") + } + + actualConfig, err := readConfig(context.Background(), config.StorageView) + if err != nil { + t.Fatalf("unable to read configuration: %v", err) + } + + expectedConfig := connectionConfig{ + URI: "uri", + Username: "username", + Password: "password", + UsernameTemplate: "", + } + + if !reflect.DeepEqual(actualConfig, expectedConfig) { + t.Fatalf("Expected: %#v\nActual: %#v", expectedConfig, actualConfig) + } +} + +func TestBackend_ConfigConnection_CustomUsernameTemplate(t *testing.T) { + var resp *logical.Response + var err error + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b := Backend() + if err = b.Setup(context.Background(), config); err != nil { + t.Fatal(err) + } + + configData := map[string]interface{}{ + "connection_uri": "uri", + "username": "username", + "password": "password", + "verify_connection": "false", + "username_template": "{{ .DisplayName }}", + } + configReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/connection", + Storage: config.StorageView, + Data: configData, + } + resp, err = b.HandleRequest(context.Background(), configReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr:%s", resp, err) + } + if resp != nil { + t.Fatal("expected a nil response") + } + + actualConfig, err := readConfig(context.Background(), config.StorageView) + if err != nil { + t.Fatalf("unable to read configuration: %v", err) + } + + expectedConfig := connectionConfig{ + URI: "uri", + Username: "username", + Password: "password", + UsernameTemplate: "{{ .DisplayName }}", + } + + if !reflect.DeepEqual(actualConfig, expectedConfig) { + t.Fatalf("Expected: %#v\nActual: %#v", expectedConfig, actualConfig) + } +} diff --git a/builtin/logical/rabbitmq/path_config_lease.go b/builtin/logical/rabbitmq/path_config_lease.go new file mode 100644 index 0000000..9436f3f --- /dev/null +++ b/builtin/logical/rabbitmq/path_config_lease.go @@ -0,0 +1,104 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package rabbitmq + +import ( + "context" + "time" + + "github.com/fatih/structs" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathConfigLease(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "config/lease", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixRabbitMQ, + }, + + Fields: map[string]*framework.FieldSchema{ + "ttl": { + Type: framework.TypeDurationSecond, + Default: 0, + Description: "Duration before which the issued credentials needs renewal", + }, + "max_ttl": { + Type: framework.TypeDurationSecond, + Default: 0, + Description: `Duration after which the issued credentials should not be allowed to be renewed`, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathLeaseRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "read", + OperationSuffix: "lease-configuration", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathLeaseUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "lease", + }, + }, + }, + + HelpSynopsis: pathConfigLeaseHelpSyn, + HelpDescription: pathConfigLeaseHelpDesc, + } +} + +// Sets the lease configuration parameters +func (b *backend) pathLeaseUpdate(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + entry, err := logical.StorageEntryJSON("config/lease", &configLease{ + TTL: time.Second * time.Duration(d.Get("ttl").(int)), + MaxTTL: time.Second * time.Duration(d.Get("max_ttl").(int)), + }) + if err != nil { + return nil, err + } + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + + return nil, nil +} + +// Returns the lease configuration parameters +func (b *backend) pathLeaseRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + lease, err := b.Lease(ctx, req.Storage) + if err != nil { + return nil, err + } + if lease == nil { + return nil, nil + } + + lease.TTL = lease.TTL / time.Second + lease.MaxTTL = lease.MaxTTL / time.Second + + return &logical.Response{ + Data: structs.New(lease).Map(), + }, nil +} + +// Lease configuration information for the secrets issued by this backend +type configLease struct { + TTL time.Duration `json:"ttl" structs:"ttl" mapstructure:"ttl"` + MaxTTL time.Duration `json:"max_ttl" structs:"max_ttl" mapstructure:"max_ttl"` +} + +var pathConfigLeaseHelpSyn = "Configure the lease parameters for generated credentials" + +var pathConfigLeaseHelpDesc = ` +Sets the ttl and max_ttl values for the secrets to be issued by this backend. +Both ttl and max_ttl takes in an integer number of seconds as input as well as +inputs like "1h". +` diff --git a/builtin/logical/rabbitmq/path_config_lease_test.go b/builtin/logical/rabbitmq/path_config_lease_test.go new file mode 100644 index 0000000..9e565c5 --- /dev/null +++ b/builtin/logical/rabbitmq/path_config_lease_test.go @@ -0,0 +1,57 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package rabbitmq + +import ( + "context" + "testing" + "time" + + "github.com/hashicorp/vault/sdk/logical" +) + +func TestBackend_config_lease_RU(t *testing.T) { + var resp *logical.Response + var err error + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b := Backend() + if err = b.Setup(context.Background(), config); err != nil { + t.Fatal(err) + } + + configData := map[string]interface{}{ + "ttl": "10h", + "max_ttl": "20h", + } + configReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/lease", + Storage: config.StorageView, + Data: configData, + } + resp, err = b.HandleRequest(context.Background(), configReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr:%s", resp, err) + } + if resp != nil { + t.Fatal("expected a nil response") + } + + configReq.Operation = logical.ReadOperation + resp, err = b.HandleRequest(context.Background(), configReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr:%s", resp, err) + } + if resp == nil { + t.Fatal("expected a response") + } + + if resp.Data["ttl"].(time.Duration) != 36000 { + t.Fatalf("bad: ttl: expected:36000 actual:%d", resp.Data["ttl"].(time.Duration)) + } + if resp.Data["max_ttl"].(time.Duration) != 72000 { + t.Fatalf("bad: ttl: expected:72000 actual:%d", resp.Data["ttl"].(time.Duration)) + } +} diff --git a/builtin/logical/rabbitmq/path_role_create.go b/builtin/logical/rabbitmq/path_role_create.go new file mode 100644 index 0000000..fd3f256 --- /dev/null +++ b/builtin/logical/rabbitmq/path_role_create.go @@ -0,0 +1,235 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package rabbitmq + +import ( + "context" + "fmt" + "io/ioutil" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/template" + "github.com/hashicorp/vault/sdk/logical" + rabbithole "github.com/michaelklishin/rabbit-hole/v2" +) + +const ( + defaultUserNameTemplate = `{{ printf "%s-%s" (.DisplayName) (uuid) }}` +) + +func pathCreds(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "creds/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixRabbitMQ, + OperationVerb: "request", + OperationSuffix: "credentials", + }, + + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the role.", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathCredsRead, + }, + + HelpSynopsis: pathRoleCreateReadHelpSyn, + HelpDescription: pathRoleCreateReadHelpDesc, + } +} + +// Issues the credential based on the role name +func (b *backend) pathCredsRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + if name == "" { + return logical.ErrorResponse("missing name"), nil + } + + // Get the role + role, err := b.Role(ctx, req.Storage, name) + if err != nil { + return nil, err + } + if role == nil { + return logical.ErrorResponse(fmt.Sprintf("unknown role: %s", name)), nil + } + + config, err := readConfig(ctx, req.Storage) + if err != nil { + return nil, fmt.Errorf("unable to read configuration: %w", err) + } + + usernameTemplate := config.UsernameTemplate + if usernameTemplate == "" { + usernameTemplate = defaultUserNameTemplate + } + + up, err := template.NewTemplate(template.Template(usernameTemplate)) + if err != nil { + return nil, fmt.Errorf("unable to initialize username template: %w", err) + } + + um := UsernameMetadata{ + DisplayName: req.DisplayName, + RoleName: name, + } + + username, err := up.Generate(um) + if err != nil { + return nil, fmt.Errorf("failed to generate username: %w", err) + } + + password, err := b.generatePassword(ctx, config.PasswordPolicy) + if err != nil { + return nil, err + } + + // Get the client configuration + client, err := b.Client(ctx, req.Storage) + if err != nil { + return nil, err + } + if client == nil { + return logical.ErrorResponse("failed to get the client"), nil + } + + // Register the generated credentials in the backend, with the RabbitMQ server + resp, err := client.PutUser(username, rabbithole.UserSettings{ + Password: password, + Tags: []string{role.Tags}, + }) + if err != nil { + return nil, fmt.Errorf("failed to create a new user with the generated credentials") + } + defer func() { + if err := resp.Body.Close(); err != nil { + b.Logger().Error(fmt.Sprintf("unable to close response body: %s", err)) + } + }() + if !isIn200s(resp.StatusCode) { + body, _ := ioutil.ReadAll(resp.Body) + return nil, fmt.Errorf("error creating user %s - %d: %s", username, resp.StatusCode, body) + } + + success := false + defer func() { + if success { + return + } + // Delete the user because it's in an unknown state. + resp, err := client.DeleteUser(username) + if err != nil { + b.Logger().Error(fmt.Sprintf("deleting %s due to permissions being in an unknown state, but failed: %s", username, err)) + } + if !isIn200s(resp.StatusCode) { + body, _ := ioutil.ReadAll(resp.Body) + b.Logger().Error(fmt.Sprintf("deleting %s due to permissions being in an unknown state, but error deleting: %d: %s", username, resp.StatusCode, body)) + } + }() + + // If the role had vhost permissions specified, assign those permissions + // to the created username for respective vhosts. + for vhost, permission := range role.VHosts { + err := func() error { + resp, err := client.UpdatePermissionsIn(vhost, username, rabbithole.Permissions{ + Configure: permission.Configure, + Write: permission.Write, + Read: permission.Read, + }) + if err != nil { + return err + } + defer func() { + if err := resp.Body.Close(); err != nil { + b.Logger().Error(fmt.Sprintf("unable to close response body: %s", err)) + } + }() + if !isIn200s(resp.StatusCode) { + body, _ := ioutil.ReadAll(resp.Body) + return fmt.Errorf("error updating vhost permissions for %s - %d: %s", vhost, resp.StatusCode, body) + } + return nil + }() + if err != nil { + return nil, err + } + } + + // If the role had vhost topic permissions specified, assign those permissions + // to the created username for respective vhosts and exchange. + for vhost, permissions := range role.VHostTopics { + for exchange, permission := range permissions { + err := func() error { + resp, err := client.UpdateTopicPermissionsIn(vhost, username, rabbithole.TopicPermissions{ + Exchange: exchange, + Write: permission.Write, + Read: permission.Read, + }) + if err != nil { + return err + } + defer func() { + if err := resp.Body.Close(); err != nil { + b.Logger().Error(fmt.Sprintf("unable to close response body: %s", err)) + } + }() + if !isIn200s(resp.StatusCode) { + body, _ := ioutil.ReadAll(resp.Body) + return fmt.Errorf("error updating vhost permissions for %s - %d: %s", vhost, resp.StatusCode, body) + } + return nil + }() + if err != nil { + return nil, err + } + } + } + success = true + + // Return the secret + response := b.Secret(SecretCredsType).Response(map[string]interface{}{ + "username": username, + "password": password, + }, map[string]interface{}{ + "username": username, + }) + + // Determine if we have a lease + lease, err := b.Lease(ctx, req.Storage) + if err != nil { + return nil, err + } + + if lease != nil { + response.Secret.TTL = lease.TTL + response.Secret.MaxTTL = lease.MaxTTL + } + + return response, nil +} + +func isIn200s(respStatus int) bool { + return respStatus >= 200 && respStatus < 300 +} + +// UsernameMetadata is metadata the database plugin can use to generate a username +type UsernameMetadata struct { + DisplayName string + RoleName string +} + +const pathRoleCreateReadHelpSyn = ` +Request RabbitMQ credentials for a certain role. +` + +const pathRoleCreateReadHelpDesc = ` +This path reads RabbitMQ credentials for a certain role. The +RabbitMQ credentials will be generated on demand and will be automatically +revoked when the lease is up. +` diff --git a/builtin/logical/rabbitmq/path_role_create_test.go b/builtin/logical/rabbitmq/path_role_create_test.go new file mode 100644 index 0000000..ecb9746 --- /dev/null +++ b/builtin/logical/rabbitmq/path_role_create_test.go @@ -0,0 +1,164 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package rabbitmq + +import ( + "context" + "testing" + + "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/require" +) + +func TestBackend_RoleCreate_DefaultUsernameTemplate(t *testing.T) { + cleanup, connectionURI := prepareRabbitMQTestContainer(t) + defer cleanup() + + var resp *logical.Response + var err error + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b := Backend() + if err = b.Setup(context.Background(), config); err != nil { + t.Fatal(err) + } + + configData := map[string]interface{}{ + "connection_uri": connectionURI, + "username": "guest", + "password": "guest", + "username_template": "", + } + configReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/connection", + Storage: config.StorageView, + Data: configData, + } + resp, err = b.HandleRequest(context.Background(), configReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr:%s", resp, err) + } + if resp != nil { + t.Fatal("expected a nil response") + } + + roleData := map[string]interface{}{ + "name": "foo", + "tags": "bar", + } + roleReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/foo", + Storage: config.StorageView, + Data: roleData, + } + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr:%s", resp, err) + } + if resp != nil { + t.Fatal("expected a nil response") + } + + credsReq := &logical.Request{ + Operation: logical.ReadOperation, + Path: "creds/foo", + Storage: config.StorageView, + DisplayName: "token", + } + resp, err = b.HandleRequest(context.Background(), credsReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr:%s", resp, err) + } + if resp == nil { + t.Fatal("missing creds response") + } + if resp.Data == nil { + t.Fatalf("missing creds data") + } + + username, exists := resp.Data["username"] + if !exists { + t.Fatalf("missing username in response") + } + + require.Regexp(t, `^token-[a-z0-9]{8}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{12}$`, username) +} + +func TestBackend_RoleCreate_CustomUsernameTemplate(t *testing.T) { + cleanup, connectionURI := prepareRabbitMQTestContainer(t) + defer cleanup() + + var resp *logical.Response + var err error + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b := Backend() + if err = b.Setup(context.Background(), config); err != nil { + t.Fatal(err) + } + + configData := map[string]interface{}{ + "connection_uri": connectionURI, + "username": "guest", + "password": "guest", + "username_template": "foo-{{ .DisplayName }}", + } + configReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/connection", + Storage: config.StorageView, + Data: configData, + } + resp, err = b.HandleRequest(context.Background(), configReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr:%s", resp, err) + } + if resp != nil { + t.Fatal("expected a nil response") + } + + roleData := map[string]interface{}{ + "name": "foo", + "tags": "bar", + } + roleReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/foo", + Storage: config.StorageView, + Data: roleData, + } + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr:%s", resp, err) + } + if resp != nil { + t.Fatal("expected a nil response") + } + + credsReq := &logical.Request{ + Operation: logical.ReadOperation, + Path: "creds/foo", + Storage: config.StorageView, + DisplayName: "token", + } + resp, err = b.HandleRequest(context.Background(), credsReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: resp: %#v\nerr:%s", resp, err) + } + if resp == nil { + t.Fatal("missing creds response") + } + if resp.Data == nil { + t.Fatalf("missing creds data") + } + + username, exists := resp.Data["username"] + if !exists { + t.Fatalf("missing username in response") + } + + require.Regexp(t, `^foo-token$`, username) +} diff --git a/builtin/logical/rabbitmq/path_roles.go b/builtin/logical/rabbitmq/path_roles.go new file mode 100644 index 0000000..98c2f3d --- /dev/null +++ b/builtin/logical/rabbitmq/path_roles.go @@ -0,0 +1,237 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package rabbitmq + +import ( + "context" + "fmt" + + "github.com/fatih/structs" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathListRoles(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "roles/?$", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixRabbitMQ, + OperationSuffix: "roles", + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: b.pathRoleList, + }, + HelpSynopsis: pathRoleHelpSyn, + HelpDescription: pathRoleHelpDesc, + } +} + +func pathRoles(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "roles/" + framework.GenericNameRegex("name"), + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixRabbitMQ, + OperationSuffix: "role", + }, + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the role.", + }, + "tags": { + Type: framework.TypeString, + Description: "Comma-separated list of tags for this role.", + }, + "vhosts": { + Type: framework.TypeString, + Description: "A map of virtual hosts to permissions.", + }, + "vhost_topics": { + Type: framework.TypeString, + Description: "A nested map of virtual hosts and exchanges to topic permissions.", + }, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathRoleRead, + logical.UpdateOperation: b.pathRoleUpdate, + logical.DeleteOperation: b.pathRoleDelete, + }, + HelpSynopsis: pathRoleHelpSyn, + HelpDescription: pathRoleHelpDesc, + } +} + +// Reads the role configuration from the storage +func (b *backend) Role(ctx context.Context, s logical.Storage, n string) (*roleEntry, error) { + entry, err := s.Get(ctx, "role/"+n) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result roleEntry + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + + return &result, nil +} + +// Deletes an existing role +func (b *backend) pathRoleDelete(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + if name == "" { + return logical.ErrorResponse("missing name"), nil + } + + return nil, req.Storage.Delete(ctx, "role/"+name) +} + +// Reads an existing role +func (b *backend) pathRoleRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + if name == "" { + return logical.ErrorResponse("missing name"), nil + } + + role, err := b.Role(ctx, req.Storage, name) + if err != nil { + return nil, err + } + if role == nil { + return nil, nil + } + + return &logical.Response{ + Data: structs.New(role).Map(), + }, nil +} + +// Lists all the roles registered with the backend +func (b *backend) pathRoleList(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + roles, err := req.Storage.List(ctx, "role/") + if err != nil { + return nil, err + } + + return logical.ListResponse(roles), nil +} + +// Registers a new role with the backend +func (b *backend) pathRoleUpdate(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + if name == "" { + return logical.ErrorResponse("missing name"), nil + } + + tags := d.Get("tags").(string) + rawVHosts := d.Get("vhosts").(string) + rawVHostTopics := d.Get("vhost_topics").(string) + + // Either tags or VHost permissions are always required, but topic permissions are always optional. + if tags == "" && rawVHosts == "" { + return logical.ErrorResponse("both tags and vhosts not specified"), nil + } + + var vhosts map[string]vhostPermission + if len(rawVHosts) > 0 { + if err := jsonutil.DecodeJSON([]byte(rawVHosts), &vhosts); err != nil { + return logical.ErrorResponse(fmt.Sprintf("failed to unmarshal vhosts: %s", err)), nil + } + } + + var vhostTopics map[string]map[string]vhostTopicPermission + if len(rawVHostTopics) > 0 { + if err := jsonutil.DecodeJSON([]byte(rawVHostTopics), &vhostTopics); err != nil { + return logical.ErrorResponse(fmt.Sprintf("failed to unmarshal vhost_topics: %s", err)), nil + } + } + + // Store it + entry, err := logical.StorageEntryJSON("role/"+name, &roleEntry{ + Tags: tags, + VHosts: vhosts, + VHostTopics: vhostTopics, + }) + if err != nil { + return nil, err + } + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + + return nil, nil +} + +// Role that defines the capabilities of the credentials issued against it. +// Maps are used because the names of vhosts and exchanges will vary widely. +// VHosts is a map with a vhost name as key and the permissions as value. +// VHostTopics is a nested map with vhost name and exchange name as keys and +// the topic permissions as value. +type roleEntry struct { + Tags string `json:"tags" structs:"tags" mapstructure:"tags"` + VHosts map[string]vhostPermission `json:"vhosts" structs:"vhosts" mapstructure:"vhosts"` + VHostTopics map[string]map[string]vhostTopicPermission `json:"vhost_topics" structs:"vhost_topics" mapstructure:"vhost_topics"` +} + +// Structure representing the permissions of a vhost +type vhostPermission struct { + Configure string `json:"configure" structs:"configure" mapstructure:"configure"` + Write string `json:"write" structs:"write" mapstructure:"write"` + Read string `json:"read" structs:"read" mapstructure:"read"` +} + +// Structure representing the topic permissions of an exchange +type vhostTopicPermission struct { + Write string `json:"write" structs:"write" mapstructure:"write"` + Read string `json:"read" structs:"read" mapstructure:"read"` +} + +const pathRoleHelpSyn = ` +Manage the roles that can be created with this backend. +` + +const pathRoleHelpDesc = ` +This path lets you manage the roles that can be created with this backend. + +The "tags" parameter customizes the tags used to create the role. +This is a comma separated list of strings. The "vhosts" parameter customizes +the virtual hosts that this user will be associated with. This is a JSON object +passed as a string in the form: +{ + "vhostOne": { + "configure": ".*", + "write": ".*", + "read": ".*" + }, + "vhostTwo": { + "configure": ".*", + "write": ".*", + "read": ".*" + } +} +The "vhost_topics" parameter customizes the topic permissions that this user +will be granted. This is a JSON object passed as a string in the form: +{ + "vhostOne": { + "exchangeOneOne": { + "write": ".*", + "read": ".*" + }, + "exchangeOneTwo": { + "write": ".*", + "read": ".*" + } + }, + "vhostTwo": { + "exchangeTwoOne": { + "write": ".*", + "read": ".*" + } + } +} +` diff --git a/builtin/logical/rabbitmq/secret_creds.go b/builtin/logical/rabbitmq/secret_creds.go new file mode 100644 index 0000000..2d0cce3 --- /dev/null +++ b/builtin/logical/rabbitmq/secret_creds.go @@ -0,0 +1,72 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package rabbitmq + +import ( + "context" + "fmt" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +// SecretCredsType is the key for this backend's secrets. +const SecretCredsType = "creds" + +func secretCreds(b *backend) *framework.Secret { + return &framework.Secret{ + Type: SecretCredsType, + Fields: map[string]*framework.FieldSchema{ + "username": { + Type: framework.TypeString, + Description: "RabbitMQ username", + }, + "password": { + Type: framework.TypeString, + Description: "Password for the RabbitMQ username", + }, + }, + Renew: b.secretCredsRenew, + Revoke: b.secretCredsRevoke, + } +} + +// Renew the previously issued secret +func (b *backend) secretCredsRenew(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + // Get the lease information + lease, err := b.Lease(ctx, req.Storage) + if err != nil { + return nil, err + } + if lease == nil { + lease = &configLease{} + } + + resp := &logical.Response{Secret: req.Secret} + resp.Secret.TTL = lease.TTL + resp.Secret.MaxTTL = lease.MaxTTL + return resp, nil +} + +// Revoke the previously issued secret +func (b *backend) secretCredsRevoke(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + // Get the username from the internal data + usernameRaw, ok := req.Secret.InternalData["username"] + if !ok { + return nil, fmt.Errorf("secret is missing username internal data") + } + username := usernameRaw.(string) + + // Get our connection + client, err := b.Client(ctx, req.Storage) + if err != nil { + return nil, err + } + + if _, err = client.DeleteUser(username); err != nil { + return nil, fmt.Errorf("could not delete user: %w", err) + } + + return nil, nil +} diff --git a/builtin/logical/ssh/backend.go b/builtin/logical/ssh/backend.go new file mode 100644 index 0000000..0606b3e --- /dev/null +++ b/builtin/logical/ssh/backend.go @@ -0,0 +1,130 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ssh + +import ( + "context" + "strings" + "sync" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/logical" +) + +const operationPrefixSSH = "ssh" + +type backend struct { + *framework.Backend + view logical.Storage + salt *salt.Salt + saltMutex sync.RWMutex +} + +func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + b, err := Backend(conf) + if err != nil { + return nil, err + } + if err := b.Setup(ctx, conf); err != nil { + return nil, err + } + return b, nil +} + +func Backend(conf *logical.BackendConfig) (*backend, error) { + var b backend + b.view = conf.StorageView + b.Backend = &framework.Backend{ + Help: strings.TrimSpace(backendHelp), + + PathsSpecial: &logical.Paths{ + Unauthenticated: []string{ + "verify", + "public_key", + }, + + LocalStorage: []string{ + "otp/", + }, + + SealWrapStorage: []string{ + caPrivateKey, + caPrivateKeyStoragePath, + keysStoragePrefix, + }, + }, + + Paths: []*framework.Path{ + pathConfigZeroAddress(&b), + pathListRoles(&b), + pathRoles(&b), + pathCredsCreate(&b), + pathLookup(&b), + pathVerify(&b), + pathConfigCA(&b), + pathSign(&b), + pathIssue(&b), + pathFetchPublicKey(&b), + pathCleanupKeys(&b), + }, + + Secrets: []*framework.Secret{ + secretOTP(&b), + }, + + Invalidate: b.invalidate, + BackendType: logical.TypeLogical, + } + return &b, nil +} + +func (b *backend) Salt(ctx context.Context) (*salt.Salt, error) { + b.saltMutex.RLock() + if b.salt != nil { + defer b.saltMutex.RUnlock() + return b.salt, nil + } + b.saltMutex.RUnlock() + b.saltMutex.Lock() + defer b.saltMutex.Unlock() + if b.salt != nil { + return b.salt, nil + } + salt, err := salt.NewSalt(ctx, b.view, &salt.Config{ + HashFunc: salt.SHA256Hash, + Location: salt.DefaultLocation, + }) + if err != nil { + return nil, err + } + b.salt = salt + return salt, nil +} + +func (b *backend) invalidate(_ context.Context, key string) { + switch key { + case salt.DefaultLocation: + b.saltMutex.Lock() + defer b.saltMutex.Unlock() + b.salt = nil + } +} + +const backendHelp = ` +The SSH backend generates credentials allowing clients to establish SSH +connections to remote hosts. + +There are two variants of the backend, which generate different types of +credentials: One-Time Passwords (OTPs) and certificate authority. The desired behavior +is role-specific and chosen at role creation time with the 'key_type' +parameter. + +Please see the backend documentation for a thorough description of both +types. The Vault team strongly recommends the OTP type. + +After mounting this backend, before generating credentials, configure the +backend's lease behavior using the 'config/lease' endpoint and create roles +using the 'roles/' endpoint. +` diff --git a/builtin/logical/ssh/backend_test.go b/builtin/logical/ssh/backend_test.go new file mode 100644 index 0000000..02224e0 --- /dev/null +++ b/builtin/logical/ssh/backend_test.go @@ -0,0 +1,2790 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ssh + +import ( + "bytes" + "context" + "encoding/base64" + "errors" + "fmt" + "net" + "reflect" + "strings" + "testing" + "time" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/credential/userpass" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/require" + "golang.org/x/crypto/ssh" +) + +const ( + testIP = "127.0.0.1" + testUserName = "vaultssh" + testMultiUserName = "vaultssh,otherssh" + testAdminUser = "vaultssh" + testCaKeyType = "ca" + testOTPKeyType = "otp" + testCIDRList = "127.0.0.1/32" + testAtRoleName = "test@RoleName" + testOTPRoleName = "testOTPRoleName" + // testKeyName is the name of the entry that will be written to SSHMOUNTPOINT/ssh/keys + testKeyName = "testKeyName" + // testSharedPrivateKey is the value of the entry that will be written to SSHMOUNTPOINT/ssh/keys + testSharedPrivateKey = ` +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAvYvoRcWRxqOim5VZnuM6wHCbLUeiND0yaM1tvOl+Fsrz55DG +A0OZp4RGAu1Fgr46E1mzxFz1+zY4UbcEExg+u21fpa8YH8sytSWW1FyuD8ICib0A +/l8slmDMw4BkkGOtSlEqgscpkpv/TWZD1NxJWkPcULk8z6c7TOETn2/H9mL+v2RE +mbE6NDEwJKfD3MvlpIqCP7idR+86rNBAODjGOGgyUbtFLT+K01XmDRALkV3V/nh+ +GltyjL4c6RU4zG2iRyV5RHlJtkml+UzUMkzr4IQnkCC32CC/wmtoo/IsAprpcHVe +nkBn3eFQ7uND70p5n6GhN/KOh2j519JFHJyokwIDAQABAoIBAHX7VOvBC3kCN9/x ++aPdup84OE7Z7MvpX6w+WlUhXVugnmsAAVDczhKoUc/WktLLx2huCGhsmKvyVuH+ +MioUiE+vx75gm3qGx5xbtmOfALVMRLopjCnJYf6EaFA0ZeQ+NwowNW7Lu0PHmAU8 +Z3JiX8IwxTz14DU82buDyewO7v+cEr97AnERe3PUcSTDoUXNaoNxjNpEJkKREY6h +4hAY676RT/GsRcQ8tqe/rnCqPHNd7JGqL+207FK4tJw7daoBjQyijWuB7K5chSal +oPInylM6b13ASXuOAOT/2uSUBWmFVCZPDCmnZxy2SdnJGbsJAMl7Ma3MUlaGvVI+ +Tfh1aQkCgYEA4JlNOabTb3z42wz6mz+Nz3JRwbawD+PJXOk5JsSnV7DtPtfgkK9y +6FTQdhnozGWShAvJvc+C4QAihs9AlHXoaBY5bEU7R/8UK/pSqwzam+MmxmhVDV7G +IMQPV0FteoXTaJSikhZ88mETTegI2mik+zleBpVxvfdhE5TR+lq8Br0CgYEA2AwJ +CUD5CYUSj09PluR0HHqamWOrJkKPFPwa+5eiTTCzfBBxImYZh7nXnWuoviXC0sg2 +AuvCW+uZ48ygv/D8gcz3j1JfbErKZJuV+TotK9rRtNIF5Ub7qysP7UjyI7zCssVM +kuDd9LfRXaB/qGAHNkcDA8NxmHW3gpln4CFdSY8CgYANs4xwfercHEWaJ1qKagAe +rZyrMpffAEhicJ/Z65lB0jtG4CiE6w8ZeUMWUVJQVcnwYD+4YpZbX4S7sJ0B8Ydy +AhkSr86D/92dKTIt2STk6aCN7gNyQ1vW198PtaAWH1/cO2UHgHOy3ZUt5X/Uwxl9 +cex4flln+1Viumts2GgsCQKBgCJH7psgSyPekK5auFdKEr5+Gc/jB8I/Z3K9+g4X +5nH3G1PBTCJYLw7hRzw8W/8oALzvddqKzEFHphiGXK94Lqjt/A4q1OdbCrhiE68D +My21P/dAKB1UYRSs9Y8CNyHCjuZM9jSMJ8vv6vG/SOJPsnVDWVAckAbQDvlTHC9t +O98zAoGAcbW6uFDkrv0XMCpB9Su3KaNXOR0wzag+WIFQRXCcoTvxVi9iYfUReQPi +oOyBJU/HMVvBfv4g+OVFLVgSwwm6owwsouZ0+D/LasbuHqYyqYqdyPJQYzWA2Y+F ++B6f4RoPdSXj24JHPg/ioRxjaj094UXJxua2yfkcecGNEuBQHSs= +-----END RSA PRIVATE KEY----- +` + // Public half of `testCAPrivateKey`, identical to how it would be fed in from a file + testCAPublicKey = `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDArgK0ilRRfk8E7HIsjz5l3BuxmwpDd8DHRCVfOhbZ4gOSVxjEOOqBwWGjygdboBIZwFXmwDlU6sWX0hBJAgpQz0Cjvbjxtq/NjkvATrYPgnrXUhTaEn2eQO0PsqRNSFH46SK/oJfTp0q8/WgojxWJ2L7FUV8PO8uIk49DzqAqPV7WXU63vFsjx+3WQOX/ILeQvHCvaqs3dWjjzEoDudRWCOdUqcHEOshV9azIzPrXlQVzRV3QAKl6u7pC+/Secorpwt6IHpMKoVPGiR0tMMuNOVH8zrAKzIxPGfy2WmNDpJopbXMTvSOGAqNcp49O4SKOQl9Fzfq2HEevJamKLrMB dummy@example.com +` + publicKey2 = `AAAAB3NzaC1yc2EAAAADAQABAAABAQDArgK0ilRRfk8E7HIsjz5l3BuxmwpDd8DHRCVfOhbZ4gOSVxjEOOqBwWGjygdboBIZwFXmwDlU6sWX0hBJAgpQz0Cjvbjxtq/NjkvATrYPgnrXUhTaEn2eQO0PsqRNSFH46SK/oJfTp0q8/WgojxWJ2L7FUV8PO8uIk49DzqAqPV7WXU63vFsjx+3WQOX/ILeQvHCvaqs3dWjjzEoDudRWCOdUqcHEOshV9azIzPrXlQVzRV3QAKl6u7pC+/Secorpwt6IHpMKoVPGiR0tMMuNOVH8zrAKzIxPGfy2WmNDpJopbXMTvSOGAqNcp49O4SKOQl9Fzfq2HEevJamKLrMB +` + + publicKey3072 = `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDlsMr3K1d0nzE1TjUULPRuVjEGETmOqHtWq4gVPq3HiuNVHE/e/BJnkXc40BoClQ2Z5ZZPJZ6izF9PnlzNDjpq8DrILUrn/6KrzCHvRwnkYMAXbfM/Br09z5QGptbOe1EMLeVe0b/udmUicbYAGPxMruZk+ljyr4vXkO+gOAIrxeSIQSdMVLU4g0pCPQuDCOx5IQpDYSlOB3091frpN8npfMueKPflNYzxnqqYgAVeDKAIqMCGOMOHUeIZJ7A7HuynEAVOsOkJwC9nesy9D6ppdWNduGl42IkzlwVdDMZtUAEznMUT/dnHNG1Krx9SuNZ/S9fGjxGVsT+jzUmizrWB9/6XIEHDxPBzcqlWFuwYTGz1OL8bfZ+HldOGPcnqZn9hKntWwjUc3whcvWt+NCmXpHSVLSxf+WN8pdmfEsCqn8mpvo2MXa+iJrtAVPX4i0u8AQUuqC3NuXHv4Cn0LNwtziBT544UjgbWkAZqzFZJREYA09OHscc3akEIrTnPehk= demo@example.com` + + publicKey4096 = `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC54Oj4YCFDYxYv69Q9KfU6rWYtUB1eByQdUW0nXFi/vr98QUIV77sEeUVhaQzZcuCojAi/GrloW7ta0Z2DaEv5jOQMAnGpXBcqLJsz3KdrHbpvl93MPNdmNaGPU0GnUEsjBVuDVn9HdIUa8CNrxShvPu7/VqoaRHKLqphGgzFb37vi4qvnQ+5VYAO/TzyVYMD6qJX6I/9Pw8d74jCfEdOh2yGKkP7rXWOghreyIl8H2zTJKg9KoZuPq9F5M8nNt7Oi3rf+DwQiYvamzIqlDP4s5oFVTZW0E9lwWvYDpyiJnUrkQqksebBK/rcyfiFG3onb4qLo2WVWXeK3si8IhGik/TEzprScyAWIf9RviT8O+l5hTA2/c+ctn3MVCLRNfez2lKpdxCoprv1MbIcySGWblTJEcY6RA+aauVJpu7FMtRxHHtZKtMpep8cLu8GKbiP6Ifq2JXBtXtNxDeIgo2MkNoMh/NHAsACJniE/dqV/+u9HvhvgrTbJ69ell0nE4ivzA7O4kZgbR/4MHlLgLFvaqC8RrWRLY6BdFagPIMxghWha7Qw16zqoIjRnolvRzUWvSXanJVg8Z6ua1VxwgirNaAH1ivmJhUh2+4lNxCX6jmZyR3zjJsWY03gjJTairvI762opjjalF8fH6Xrs15mB14JiAlNbk6+5REQcvXlGqw== dummy@example.com` + + testCAPrivateKey = `-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAwK4CtIpUUX5PBOxyLI8+ZdwbsZsKQ3fAx0QlXzoW2eIDklcY +xDjqgcFho8oHW6ASGcBV5sA5VOrFl9IQSQIKUM9Ao7248bavzY5LwE62D4J611IU +2hJ9nkDtD7KkTUhR+Okiv6CX06dKvP1oKI8Vidi+xVFfDzvLiJOPQ86gKj1e1l1O +t7xbI8ft1kDl/yC3kLxwr2qrN3Vo48xKA7nUVgjnVKnBxDrIVfWsyMz615UFc0Vd +0ACperu6Qvv0nnKK6cLeiB6TCqFTxokdLTDLjTlR/M6wCsyMTxn8tlpjQ6SaKW1z +E70jhgKjXKePTuEijkJfRc36thxHryWpii6zAQIDAQABAoIBAA/DrPD8iF2KigiL +F+RRa/eFhLaJStOuTpV/G9eotwnolgY5Hguf5H/tRIHUG7oBZLm6pMyWWZp7AuOj +CjYO9q0Z5939vc349nVI+SWoyviF4msPiik1bhWulja8lPjFu/8zg+ZNy15Dx7ei +vAzleAupMiKOv8pNSB/KguQ3WZ9a9bcQcoFQ2Foru6mXpLJ03kghVRlkqvQ7t5cA +n11d2Hiipq9mleESr0c+MUPKLBX/neaWfGA4xgJTjIYjZi6avmYc/Ox3sQ9aLq2J +tH0D4HVUZvaU28hn+jhbs64rRFbu++qQMe3vNvi/Q/iqcYU4b6tgDNzm/JFRTS/W +njiz4mkCgYEA44CnQVmonN6qQ0AgNNlBY5+RX3wwBJZ1AaxpzwDRylAt2vlVUA0n +YY4RW4J4+RMRKwHwjxK5RRmHjsIJx+nrpqihW3fte3ev5F2A9Wha4dzzEHxBY6IL +362T/x2f+vYk6tV+uTZSUPHsuELH26mitbBVFNB/00nbMNdEc2bO5FMCgYEA2NCw +ubt+g2bRkkT/Qf8gIM8ZDpZbARt6onqxVcWkQFT16ZjbsBWUrH1Xi7alv9+lwYLJ +ckY/XDX4KeU19HabeAbpyy6G9Q2uBSWZlJbjl7QNhdLeuzV82U1/r8fy6Uu3gQnU +WSFx2GesRpSmZpqNKMs5ksqteZ9Yjg1EIgXdINsCgYBIn9REt1NtKGOf7kOZu1T1 +cYXdvm4xuLoHW7u3OiK+e9P3mCqU0G4m5UxDMyZdFKohWZAqjCaamWi9uNGYgOMa +I7DG20TzaiS7OOIm9TY17eul8pSJMrypnealxRZB7fug/6Bhjaa/cktIEwFr7P4l +E/JFH73+fBA9yipu0H3xQwKBgHmiwrLAZF6VrVcxDD9bQQwHA5iyc4Wwg+Fpkdl7 +0wUgZQHTdtRXlxwaCaZhJqX5c4WXuSo6DMvPn1TpuZZXgCsbPch2ZtJOBWXvzTSW +XkK6iaedQMWoYU2L8+mK9FU73EwxVodWgwcUSosiVCRV6oGLWdZnjGEiK00uVh38 +Si1nAoGBAL47wWinv1cDTnh5mm0mybz3oI2a6V9aIYCloQ/EFcvtahyR/gyB8qNF +lObH9Faf0WGdnACZvTz22U9gWhw79S0SpDV31tC5Kl8dXHFiZ09vYUKkYmSd/kms +SeKWrUkryx46LVf6NMhkyYmRqCEjBwfOozzezi5WbiJy6nn54GQt +-----END RSA PRIVATE KEY----- +` + + testCAPublicKeyEd25519 = `ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO1S6g5Bib7vT8eoFnvTl3dZSjOQL/GkH1nkRcDS9++a ca +` + + testCAPrivateKeyEd25519 = `-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW +QyNTUxOQAAACDtUuoOQYm+70/HqBZ705d3WUozkC/xpB9Z5EXA0vfvmgAAAIhfRuszX0br +MwAAAAtzc2gtZWQyNTUxOQAAACDtUuoOQYm+70/HqBZ705d3WUozkC/xpB9Z5EXA0vfvmg +AAAEBQYa029SP/7AGPFQLmzwOc9eCoOZuwCq3iIf2C6fj9j+1S6g5Bib7vT8eoFnvTl3dZ +SjOQL/GkH1nkRcDS9++aAAAAAmNhAQID +-----END OPENSSH PRIVATE KEY----- +` + + publicKeyECDSA256 = `ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBJsfOouYIjJNI23QJqaDsFTGukm21fRAMeGvKZDB59i5jnX1EubMH1AEjjzz4fgySUlyWKo+TS31rxU8kX3DDM4= demo@example.com` + publicKeyECDSA521 = `ecdsa-sha2-nistp521 AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBAEg73ORD4J3FV2CrL01gLSKREO2EHrZPlJCOeDL5OKD3M1GCHv3q8O452RW49Aw+8zFFFU5u6d1Ys3Qsj05zdaQwQDt/D3ceWLGVkWiKyLPQStfn0GGOZh3YFKEw5XmeW9jh6xudEHlKs4Pfv2FrroaUKZvM2SlxR/feOK0tCQyq3MN/g== demo@example.com` + + // testPublicKeyInstall is the public key that is installed in the + // admin account's authorized_keys + testPublicKeyInstall = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC9i+hFxZHGo6KblVme4zrAcJstR6I0PTJozW286X4WyvPnkMYDQ5mnhEYC7UWCvjoTWbPEXPX7NjhRtwQTGD67bV+lrxgfyzK1JZbUXK4PwgKJvQD+XyyWYMzDgGSQY61KUSqCxymSm/9NZkPU3ElaQ9xQuTzPpztM4ROfb8f2Yv6/ZESZsTo0MTAkp8Pcy+WkioI/uJ1H7zqs0EA4OMY4aDJRu0UtP4rTVeYNEAuRXdX+eH4aW3KMvhzpFTjMbaJHJXlEeUm2SaX5TNQyTOvghCeQILfYIL/Ca2ij8iwCmulwdV6eQGfd4VDu40PvSnmfoaE38o6HaPnX0kUcnKiT" + + dockerImageTagSupportsRSA1 = "8.1_p1-r0-ls20" + dockerImageTagSupportsNoRSA1 = "8.4_p1-r3-ls48" +) + +var ctx = context.Background() + +func prepareTestContainer(t *testing.T, tag, caPublicKeyPEM string) (func(), string) { + if tag == "" { + tag = dockerImageTagSupportsNoRSA1 + } + runner, err := docker.NewServiceRunner(docker.RunOptions{ + ContainerName: "openssh", + ImageRepo: "docker.mirror.hashicorp.services/linuxserver/openssh-server", + ImageTag: tag, + Env: []string{ + "DOCKER_MODS=linuxserver/mods:openssh-server-openssh-client", + "PUBLIC_KEY=" + testPublicKeyInstall, + "SUDO_ACCESS=true", + "USER_NAME=vaultssh", + }, + Ports: []string{"2222/tcp"}, + }) + if err != nil { + t.Fatalf("Could not start local ssh docker container: %s", err) + } + + svc, err := runner.StartService(context.Background(), func(ctx context.Context, host string, port int) (docker.ServiceConfig, error) { + ipaddr, err := net.ResolveIPAddr("ip", host) + if err != nil { + return nil, err + } + sshAddress := fmt.Sprintf("%s:%d", ipaddr.String(), port) + + signer, err := ssh.ParsePrivateKey([]byte(testSharedPrivateKey)) + if err != nil { + return nil, err + } + + // Install util-linux for non-busybox flock that supports timeout option + err = testSSH("vaultssh", sshAddress, ssh.PublicKeys(signer), fmt.Sprintf(` + set -e; + sudo ln -s /config /home/vaultssh + sudo apk add util-linux; + echo "LogLevel DEBUG" | sudo tee -a /config/ssh_host_keys/sshd_config; + echo "TrustedUserCAKeys /config/ssh_host_keys/trusted-user-ca-keys.pem" | sudo tee -a /config/ssh_host_keys/sshd_config; + kill -HUP $(cat /config/sshd.pid) + echo "%s" | sudo tee /config/ssh_host_keys/trusted-user-ca-keys.pem + `, caPublicKeyPEM)) + if err != nil { + return nil, err + } + + return docker.NewServiceHostPort(ipaddr.String(), port), nil + }) + if err != nil { + t.Fatalf("Could not start docker ssh server: %s", err) + } + return svc.Cleanup, svc.Config.Address() +} + +func testSSH(user, host string, auth ssh.AuthMethod, command string) error { + client, err := ssh.Dial("tcp", host, &ssh.ClientConfig{ + User: user, + Auth: []ssh.AuthMethod{auth}, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + Timeout: 5 * time.Second, + }) + if err != nil { + return fmt.Errorf("unable to dial sshd to host %q: %v", host, err) + } + session, err := client.NewSession() + if err != nil { + return fmt.Errorf("unable to create sshd session to host %q: %v", host, err) + } + var stderr bytes.Buffer + session.Stderr = &stderr + defer session.Close() + err = session.Run(command) + if err != nil { + return fmt.Errorf("command %v failed, error: %v, stderr: %v", command, err, stderr.String()) + } + return nil +} + +func TestBackend_AllowedUsers(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + + b, err := Backend(config) + if err != nil { + t.Fatal(err) + } + err = b.Setup(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + roleData := map[string]interface{}{ + "key_type": "otp", + "default_user": "ubuntu", + "cidr_list": "52.207.235.245/16", + "allowed_users": "test", + } + + roleReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/role1", + Storage: config.StorageView, + Data: roleData, + } + + resp, err := b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) || resp != nil { + t.Fatalf("failed to create role: resp:%#v err:%s", resp, err) + } + + credsData := map[string]interface{}{ + "ip": "52.207.235.245", + "username": "ubuntu", + } + credsReq := &logical.Request{ + Operation: logical.UpdateOperation, + Storage: config.StorageView, + Path: "creds/role1", + Data: credsData, + } + + resp, err = b.HandleRequest(context.Background(), credsReq) + if err != nil || (resp != nil && resp.IsError()) || resp == nil { + t.Fatalf("failed to create role: resp:%#v err:%s", resp, err) + } + if resp.Data["key"] == "" || + resp.Data["key_type"] != "otp" || + resp.Data["ip"] != "52.207.235.245" || + resp.Data["username"] != "ubuntu" { + t.Fatalf("failed to create credential: resp:%#v", resp) + } + + credsData["username"] = "test" + resp, err = b.HandleRequest(context.Background(), credsReq) + if err != nil || (resp != nil && resp.IsError()) || resp == nil { + t.Fatalf("failed to create role: resp:%#v err:%s", resp, err) + } + if resp.Data["key"] == "" || + resp.Data["key_type"] != "otp" || + resp.Data["ip"] != "52.207.235.245" || + resp.Data["username"] != "test" { + t.Fatalf("failed to create credential: resp:%#v", resp) + } + + credsData["username"] = "random" + resp, err = b.HandleRequest(context.Background(), credsReq) + if err != nil || resp == nil || (resp != nil && !resp.IsError()) { + t.Fatalf("expected failure: resp:%#v err:%s", resp, err) + } + + delete(roleData, "allowed_users") + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) || resp != nil { + t.Fatalf("failed to create role: resp:%#v err:%s", resp, err) + } + + credsData["username"] = "ubuntu" + resp, err = b.HandleRequest(context.Background(), credsReq) + if err != nil || (resp != nil && resp.IsError()) || resp == nil { + t.Fatalf("failed to create role: resp:%#v err:%s", resp, err) + } + if resp.Data["key"] == "" || + resp.Data["key_type"] != "otp" || + resp.Data["ip"] != "52.207.235.245" || + resp.Data["username"] != "ubuntu" { + t.Fatalf("failed to create credential: resp:%#v", resp) + } + + credsData["username"] = "test" + resp, err = b.HandleRequest(context.Background(), credsReq) + if err != nil || resp == nil || (resp != nil && !resp.IsError()) { + t.Fatalf("expected failure: resp:%#v err:%s", resp, err) + } + + roleData["allowed_users"] = "*" + resp, err = b.HandleRequest(context.Background(), roleReq) + if err != nil || (resp != nil && resp.IsError()) || resp != nil { + t.Fatalf("failed to create role: resp:%#v err:%s", resp, err) + } + + resp, err = b.HandleRequest(context.Background(), credsReq) + if err != nil || (resp != nil && resp.IsError()) || resp == nil { + t.Fatalf("failed to create role: resp:%#v err:%s", resp, err) + } + if resp.Data["key"] == "" || + resp.Data["key_type"] != "otp" || + resp.Data["ip"] != "52.207.235.245" || + resp.Data["username"] != "test" { + t.Fatalf("failed to create credential: resp:%#v", resp) + } +} + +func TestBackend_AllowedDomainsTemplate(t *testing.T) { + testAllowedDomainsTemplate := "{{ identity.entity.metadata.ssh_username }}.example.com" + expectedValidPrincipal := "foo." + testUserName + ".example.com" + testAllowedPrincipalsTemplate( + t, testAllowedDomainsTemplate, + expectedValidPrincipal, + map[string]string{ + "ssh_username": testUserName, + }, + map[string]interface{}{ + "key_type": testCaKeyType, + "algorithm_signer": "rsa-sha2-256", + "allow_host_certificates": true, + "allow_subdomains": true, + "allowed_domains": testAllowedDomainsTemplate, + "allowed_domains_template": true, + }, + map[string]interface{}{ + "cert_type": "host", + "public_key": testCAPublicKey, + "valid_principals": expectedValidPrincipal, + }, + ) +} + +func TestBackend_AllowedUsersTemplate(t *testing.T) { + testAllowedUsersTemplate(t, + "{{ identity.entity.metadata.ssh_username }}", + testUserName, map[string]string{ + "ssh_username": testUserName, + }, + ) +} + +func TestBackend_MultipleAllowedUsersTemplate(t *testing.T) { + testAllowedUsersTemplate(t, + "{{ identity.entity.metadata.ssh_username }}", + testUserName, map[string]string{ + "ssh_username": testMultiUserName, + }, + ) +} + +func TestBackend_AllowedUsersTemplate_WithStaticPrefix(t *testing.T) { + testAllowedUsersTemplate(t, + "ssh-{{ identity.entity.metadata.ssh_username }}", + "ssh-"+testUserName, map[string]string{ + "ssh_username": testUserName, + }, + ) +} + +func TestBackend_DefaultUserTemplate(t *testing.T) { + testDefaultUserTemplate(t, + "{{ identity.entity.metadata.ssh_username }}", + testUserName, + map[string]string{ + "ssh_username": testUserName, + }, + ) +} + +func TestBackend_DefaultUserTemplate_WithStaticPrefix(t *testing.T) { + testDefaultUserTemplate(t, + "user-{{ identity.entity.metadata.ssh_username }}", + "user-"+testUserName, + map[string]string{ + "ssh_username": testUserName, + }, + ) +} + +func TestBackend_DefaultUserTemplateFalse_AllowedUsersTemplateTrue(t *testing.T) { + cluster, userpassToken := getSshCaTestCluster(t, testUserName) + defer cluster.Cleanup() + client := cluster.Cores[0].Client + + // set metadata "ssh_username" to userpass username + tokenLookupResponse, err := client.Logical().Write("/auth/token/lookup", map[string]interface{}{ + "token": userpassToken, + }) + if err != nil { + t.Fatal(err) + } + entityID := tokenLookupResponse.Data["entity_id"].(string) + _, err = client.Logical().Write("/identity/entity/id/"+entityID, map[string]interface{}{ + "metadata": map[string]string{ + "ssh_username": testUserName, + }, + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().Write("ssh/roles/my-role", map[string]interface{}{ + "key_type": testCaKeyType, + "allow_user_certificates": true, + "default_user": "{{identity.entity.metadata.ssh_username}}", + // disable user templating but not allowed_user_template and the request should fail + "default_user_template": false, + "allowed_users": "{{identity.entity.metadata.ssh_username}}", + "allowed_users_template": true, + }) + if err != nil { + t.Fatal(err) + } + + // sign SSH key as userpass user + client.SetToken(userpassToken) + _, err = client.Logical().Write("ssh/sign/my-role", map[string]interface{}{ + "public_key": testCAPublicKey, + }) + if err == nil { + t.Errorf("signing request should fail when default_user is not in the allowed_users list, because allowed_users_template is true and default_user_template is not") + } + + expectedErrStr := "{{identity.entity.metadata.ssh_username}} is not a valid value for valid_principals" + if !strings.Contains(err.Error(), expectedErrStr) { + t.Errorf("expected error to include %q but it was: %q", expectedErrStr, err.Error()) + } +} + +func TestBackend_DefaultUserTemplateFalse_AllowedUsersTemplateFalse(t *testing.T) { + cluster, userpassToken := getSshCaTestCluster(t, testUserName) + defer cluster.Cleanup() + client := cluster.Cores[0].Client + + // set metadata "ssh_username" to userpass username + tokenLookupResponse, err := client.Logical().Write("/auth/token/lookup", map[string]interface{}{ + "token": userpassToken, + }) + if err != nil { + t.Fatal(err) + } + entityID := tokenLookupResponse.Data["entity_id"].(string) + _, err = client.Logical().Write("/identity/entity/id/"+entityID, map[string]interface{}{ + "metadata": map[string]string{ + "ssh_username": testUserName, + }, + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().Write("ssh/roles/my-role", map[string]interface{}{ + "key_type": testCaKeyType, + "allow_user_certificates": true, + "default_user": "{{identity.entity.metadata.ssh_username}}", + "default_user_template": false, + "allowed_users": "{{identity.entity.metadata.ssh_username}}", + "allowed_users_template": false, + }) + if err != nil { + t.Fatal(err) + } + + // sign SSH key as userpass user + client.SetToken(userpassToken) + signResponse, err := client.Logical().Write("ssh/sign/my-role", map[string]interface{}{ + "public_key": testCAPublicKey, + }) + if err != nil { + t.Fatal(err) + } + + // check for the expected valid principals of certificate + signedKey := signResponse.Data["signed_key"].(string) + key, _ := base64.StdEncoding.DecodeString(strings.Split(signedKey, " ")[1]) + parsedKey, err := ssh.ParsePublicKey(key) + if err != nil { + t.Fatal(err) + } + actualPrincipals := parsedKey.(*ssh.Certificate).ValidPrincipals + if len(actualPrincipals) < 1 { + t.Fatal( + fmt.Sprintf("No ValidPrincipals returned: should have been %v", + []string{"{{identity.entity.metadata.ssh_username}}"}), + ) + } + if len(actualPrincipals) > 1 { + t.Error( + fmt.Sprintf("incorrect number ValidPrincipals, expected only 1: %v should be %v", + actualPrincipals, []string{"{{identity.entity.metadata.ssh_username}}"}), + ) + } + if actualPrincipals[0] != "{{identity.entity.metadata.ssh_username}}" { + t.Fatal( + fmt.Sprintf("incorrect ValidPrincipals: %v should be %v", + actualPrincipals, []string{"{{identity.entity.metadata.ssh_username}}"}), + ) + } +} + +func newTestingFactory(t *testing.T) func(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + return func(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + defaultLeaseTTLVal := 2 * time.Minute + maxLeaseTTLVal := 10 * time.Minute + return Factory(context.Background(), &logical.BackendConfig{ + Logger: corehelpers.NewTestLogger(t), + StorageView: &logical.InmemStorage{}, + System: &logical.StaticSystemView{ + DefaultLeaseTTLVal: defaultLeaseTTLVal, + MaxLeaseTTLVal: maxLeaseTTLVal, + }, + }) + } +} + +func TestSSHBackend_Lookup(t *testing.T) { + testOTPRoleData := map[string]interface{}{ + "key_type": testOTPKeyType, + "default_user": testUserName, + "cidr_list": testCIDRList, + } + data := map[string]interface{}{ + "ip": testIP, + } + resp1 := []string(nil) + resp2 := []string{testOTPRoleName} + resp3 := []string{testAtRoleName} + logicaltest.Test(t, logicaltest.TestCase{ + LogicalFactory: newTestingFactory(t), + Steps: []logicaltest.TestStep{ + testLookupRead(t, data, resp1), + testRoleWrite(t, testOTPRoleName, testOTPRoleData), + testLookupRead(t, data, resp2), + testRoleDelete(t, testOTPRoleName), + testLookupRead(t, data, resp1), + testRoleWrite(t, testAtRoleName, testOTPRoleData), + testLookupRead(t, data, resp3), + testRoleDelete(t, testAtRoleName), + testLookupRead(t, data, resp1), + }, + }) +} + +func TestSSHBackend_RoleList(t *testing.T) { + testOTPRoleData := map[string]interface{}{ + "key_type": testOTPKeyType, + "default_user": testUserName, + "cidr_list": testCIDRList, + } + resp1 := map[string]interface{}{} + resp2 := map[string]interface{}{ + "keys": []string{testOTPRoleName}, + "key_info": map[string]interface{}{ + testOTPRoleName: map[string]interface{}{ + "key_type": testOTPKeyType, + }, + }, + } + resp3 := map[string]interface{}{ + "keys": []string{testAtRoleName, testOTPRoleName}, + "key_info": map[string]interface{}{ + testOTPRoleName: map[string]interface{}{ + "key_type": testOTPKeyType, + }, + testAtRoleName: map[string]interface{}{ + "key_type": testOTPKeyType, + }, + }, + } + logicaltest.Test(t, logicaltest.TestCase{ + LogicalFactory: newTestingFactory(t), + Steps: []logicaltest.TestStep{ + testRoleList(t, resp1), + testRoleWrite(t, testOTPRoleName, testOTPRoleData), + testRoleList(t, resp2), + testRoleWrite(t, testAtRoleName, testOTPRoleData), + testRoleList(t, resp3), + testRoleDelete(t, testAtRoleName), + testRoleList(t, resp2), + testRoleDelete(t, testOTPRoleName), + testRoleList(t, resp1), + }, + }) +} + +func TestSSHBackend_OTPRoleCrud(t *testing.T) { + testOTPRoleData := map[string]interface{}{ + "key_type": testOTPKeyType, + "default_user": testUserName, + "cidr_list": testCIDRList, + } + respOTPRoleData := map[string]interface{}{ + "key_type": testOTPKeyType, + "port": 22, + "default_user": testUserName, + "cidr_list": testCIDRList, + } + logicaltest.Test(t, logicaltest.TestCase{ + LogicalFactory: newTestingFactory(t), + Steps: []logicaltest.TestStep{ + testRoleWrite(t, testOTPRoleName, testOTPRoleData), + testRoleRead(t, testOTPRoleName, respOTPRoleData), + testRoleDelete(t, testOTPRoleName), + testRoleRead(t, testOTPRoleName, nil), + testRoleWrite(t, testAtRoleName, testOTPRoleData), + testRoleRead(t, testAtRoleName, respOTPRoleData), + testRoleDelete(t, testAtRoleName), + testRoleRead(t, testAtRoleName, nil), + }, + }) +} + +func TestSSHBackend_OTPCreate(t *testing.T) { + cleanup, sshAddress := prepareTestContainer(t, "", "") + defer func() { + if !t.Failed() { + cleanup() + } + }() + + host, port, err := net.SplitHostPort(sshAddress) + if err != nil { + t.Fatal(err) + } + + testOTPRoleData := map[string]interface{}{ + "key_type": testOTPKeyType, + "default_user": testUserName, + "cidr_list": host + "/32", + "port": port, + } + data := map[string]interface{}{ + "username": testUserName, + "ip": host, + } + logicaltest.Test(t, logicaltest.TestCase{ + LogicalFactory: newTestingFactory(t), + Steps: []logicaltest.TestStep{ + testRoleWrite(t, testOTPRoleName, testOTPRoleData), + testCredsWrite(t, testOTPRoleName, data, false, sshAddress), + }, + }) +} + +func TestSSHBackend_VerifyEcho(t *testing.T) { + verifyData := map[string]interface{}{ + "otp": api.VerifyEchoRequest, + } + expectedData := map[string]interface{}{ + "message": api.VerifyEchoResponse, + } + logicaltest.Test(t, logicaltest.TestCase{ + LogicalFactory: newTestingFactory(t), + Steps: []logicaltest.TestStep{ + testVerifyWrite(t, verifyData, expectedData), + }, + }) +} + +func TestSSHBackend_ConfigZeroAddressCRUD(t *testing.T) { + testOTPRoleData := map[string]interface{}{ + "key_type": testOTPKeyType, + "default_user": testUserName, + "cidr_list": testCIDRList, + } + req1 := map[string]interface{}{ + "roles": testOTPRoleName, + } + resp1 := map[string]interface{}{ + "roles": []string{testOTPRoleName}, + } + resp2 := map[string]interface{}{ + "roles": []string{testOTPRoleName}, + } + resp3 := map[string]interface{}{ + "roles": []string{}, + } + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalFactory: newTestingFactory(t), + Steps: []logicaltest.TestStep{ + testRoleWrite(t, testOTPRoleName, testOTPRoleData), + testConfigZeroAddressWrite(t, req1), + testConfigZeroAddressRead(t, resp1), + testConfigZeroAddressRead(t, resp2), + testConfigZeroAddressRead(t, resp1), + testRoleDelete(t, testOTPRoleName), + testConfigZeroAddressRead(t, resp3), + testConfigZeroAddressDelete(t), + }, + }) +} + +func TestSSHBackend_CredsForZeroAddressRoles_otp(t *testing.T) { + otpRoleData := map[string]interface{}{ + "key_type": testOTPKeyType, + "default_user": testUserName, + } + data := map[string]interface{}{ + "username": testUserName, + "ip": testIP, + } + req1 := map[string]interface{}{ + "roles": testOTPRoleName, + } + logicaltest.Test(t, logicaltest.TestCase{ + LogicalFactory: newTestingFactory(t), + Steps: []logicaltest.TestStep{ + testRoleWrite(t, testOTPRoleName, otpRoleData), + testCredsWrite(t, testOTPRoleName, data, true, ""), + testConfigZeroAddressWrite(t, req1), + testCredsWrite(t, testOTPRoleName, data, false, ""), + testConfigZeroAddressDelete(t), + testCredsWrite(t, testOTPRoleName, data, true, ""), + }, + }) +} + +func TestSSHBackend_CA(t *testing.T) { + testCases := []struct { + name string + tag string + caPublicKey string + caPrivateKey string + algoSigner string + expectError bool + }{ + { + "RSAKey_EmptyAlgoSigner_ImageSupportsRSA1", + dockerImageTagSupportsRSA1, + testCAPublicKey, + testCAPrivateKey, + "", + false, + }, + { + "RSAKey_EmptyAlgoSigner_ImageSupportsNoRSA1", + dockerImageTagSupportsNoRSA1, + testCAPublicKey, + testCAPrivateKey, + "", + false, + }, + { + "RSAKey_DefaultAlgoSigner_ImageSupportsRSA1", + dockerImageTagSupportsRSA1, + testCAPublicKey, + testCAPrivateKey, + "default", + false, + }, + { + "RSAKey_DefaultAlgoSigner_ImageSupportsNoRSA1", + dockerImageTagSupportsNoRSA1, + testCAPublicKey, + testCAPrivateKey, + "default", + false, + }, + { + "RSAKey_RSA1AlgoSigner_ImageSupportsRSA1", + dockerImageTagSupportsRSA1, + testCAPublicKey, + testCAPrivateKey, + ssh.SigAlgoRSA, + false, + }, + { + "RSAKey_RSA1AlgoSigner_ImageSupportsNoRSA1", + dockerImageTagSupportsNoRSA1, + testCAPublicKey, + testCAPrivateKey, + ssh.SigAlgoRSA, + true, + }, + { + "RSAKey_RSASHA2256AlgoSigner_ImageSupportsRSA1", + dockerImageTagSupportsRSA1, + testCAPublicKey, + testCAPrivateKey, + ssh.SigAlgoRSASHA2256, + false, + }, + { + "RSAKey_RSASHA2256AlgoSigner_ImageSupportsNoRSA1", + dockerImageTagSupportsNoRSA1, + testCAPublicKey, + testCAPrivateKey, + ssh.SigAlgoRSASHA2256, + false, + }, + { + "ed25519Key_EmptyAlgoSigner_ImageSupportsRSA1", + dockerImageTagSupportsRSA1, + testCAPublicKeyEd25519, + testCAPrivateKeyEd25519, + "", + false, + }, + { + "ed25519Key_EmptyAlgoSigner_ImageSupportsNoRSA1", + dockerImageTagSupportsNoRSA1, + testCAPublicKeyEd25519, + testCAPrivateKeyEd25519, + "", + false, + }, + { + "ed25519Key_RSA1AlgoSigner_ImageSupportsRSA1", + dockerImageTagSupportsRSA1, + testCAPublicKeyEd25519, + testCAPrivateKeyEd25519, + ssh.SigAlgoRSA, + true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + testSSHBackend_CA(t, tc.tag, tc.caPublicKey, tc.caPrivateKey, tc.algoSigner, tc.expectError) + }) + } +} + +func testSSHBackend_CA(t *testing.T, dockerImageTag, caPublicKey, caPrivateKey, algorithmSigner string, expectError bool) { + cleanup, sshAddress := prepareTestContainer(t, dockerImageTag, caPublicKey) + defer cleanup() + config := logical.TestBackendConfig() + + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatalf("Cannot create backend: %s", err) + } + + testKeyToSignPrivate := `-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABFwAAAAdzc2gtcn +NhAAAAAwEAAQAAAQEAwn1V2xd/EgJXIY53fBTtc20k/ajekqQngvkpFSwNHW63XNEQK8Ll +FOCyGXoje9DUGxnYs3F/ohfsBBWkLNfU7fiENdSJL1pbkAgJ+2uhV9sLZjvYhikrXWoyJX +LDKfY12LjpcBS2HeLMT04laZ/xSJrOBEJHGzHyr2wUO0NUQUQPUODAFhnHKgvvA4Uu79UY +gcdThF4w83+EAnE4JzBZMKPMjzy4u1C0R/LoD8DuapHwX6NGWdEUvUZZ+XRcIWeCOvR0ne +qGBRH35k1Mv7k65d7kkE0uvM5Z36erw3tdoszxPYf7AKnO1DpeU2uwMcym6xNwfwynKjhL +qL/Mgi4uRwAAA8iAsY0zgLGNMwAAAAdzc2gtcnNhAAABAQDCfVXbF38SAlchjnd8FO1zbS +T9qN6SpCeC+SkVLA0dbrdc0RArwuUU4LIZeiN70NQbGdizcX+iF+wEFaQs19Tt+IQ11Ikv +WluQCAn7a6FX2wtmO9iGKStdajIlcsMp9jXYuOlwFLYd4sxPTiVpn/FIms4EQkcbMfKvbB +Q7Q1RBRA9Q4MAWGccqC+8DhS7v1RiBx1OEXjDzf4QCcTgnMFkwo8yPPLi7ULRH8ugPwO5q +kfBfo0ZZ0RS9Rln5dFwhZ4I69HSd6oYFEffmTUy/uTrl3uSQTS68zlnfp6vDe12izPE9h/ +sAqc7UOl5Ta7AxzKbrE3B/DKcqOEuov8yCLi5HAAAAAwEAAQAAAQABns2yT5XNbpuPOgKg +1APObGBchKWmDxwNKUpAVOefEScR7OP3mV4TOHQDZlMZWvoJZ8O4av+nOA/NUOjXPs0VVn +azhBvIezY8EvUSVSk49Cg6J9F7/KfR1WqpiTU7CkQUlCXNuz5xLUyKdJo3MQ/vjOqeenbh +MR9Wes4IWF1BVe4VOD6lxRsjwuIieIgmScW28FFh2rgsEfO2spzZ3AWOGExw+ih757hFz5 +4A2fhsQXP8m3r8m7iiqcjTLWXdxTUk4zot2kZEjbI4Avk0BL+wVeFq6f/y+G+g5edqSo7j +uuSgzbUQtA9PMnGxhrhU2Ob7n3VGdya7WbGZkaKP8zJhAAAAgQC3bJurmOSLIi3KVhp7lD +/FfxwXHwVBFALCgq7EyNlkTz6RDoMFM4eOTRMDvsgWxT+bSB8R8eg1sfgY8rkHOuvTAVI5 +3oEYco3H7NWE9X8Zt0lyhO1uaE49EENNSQ8hY7R3UIw5becyI+7ZZxs9HkBgCQCZzSjzA+ +SIyAoMKM261AAAAIEA+PCkcDRp3J0PaoiuetXSlWZ5WjP3CtwT2xrvEX9x+ZsDgXCDYQ5T +osxvEKOGSfIrHUUhzZbFGvqWyfrziPe9ypJrtCM7RJT/fApBXnbWFcDZzWamkQvohst+0w +XHYCmNoJ6/Y+roLv3pzyFUmqRNcrQaohex7TZmsvHJT513UakAAACBAMgBXxH8DyNYdniX +mIXEto4GqMh4rXdNwCghfpyWdJE6vCyDt7g7bYMq7AQ2ynSKRtQDT/ZgQNfSbilUq3iXz7 +xNZn5U9ndwFs90VmEpBup/PmhfX+Gwt5hQZLbkKZcgQ9XrhSKdMxVm1yy/fk0U457enlz5 +cKumubUxOfFdy1ZvAAAAEm5jY0BtYnAudWJudC5sb2NhbA== +-----END OPENSSH PRIVATE KEY----- +` + testKeyToSignPublic := `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDCfVXbF38SAlchjnd8FO1zbST9qN6SpCeC+SkVLA0dbrdc0RArwuUU4LIZeiN70NQbGdizcX+iF+wEFaQs19Tt+IQ11IkvWluQCAn7a6FX2wtmO9iGKStdajIlcsMp9jXYuOlwFLYd4sxPTiVpn/FIms4EQkcbMfKvbBQ7Q1RBRA9Q4MAWGccqC+8DhS7v1RiBx1OEXjDzf4QCcTgnMFkwo8yPPLi7ULRH8ugPwO5qkfBfo0ZZ0RS9Rln5dFwhZ4I69HSd6oYFEffmTUy/uTrl3uSQTS68zlnfp6vDe12izPE9h/sAqc7UOl5Ta7AxzKbrE3B/DKcqOEuov8yCLi5H ` + + roleOptions := map[string]interface{}{ + "allow_user_certificates": true, + "allowed_users": "*", + "default_extensions": []map[string]string{ + { + "permit-pty": "", + }, + }, + "key_type": "ca", + "default_user": testUserName, + "ttl": "30m0s", + } + if algorithmSigner != "" { + roleOptions["algorithm_signer"] = algorithmSigner + } + testCase := logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + configCaStep(caPublicKey, caPrivateKey), + testRoleWrite(t, "testcarole", roleOptions), + { + Operation: logical.UpdateOperation, + Path: "sign/testcarole", + ErrorOk: expectError, + Data: map[string]interface{}{ + "public_key": testKeyToSignPublic, + "valid_principals": testUserName, + }, + + Check: func(resp *logical.Response) error { + // Tolerate nil response if an error was expected + if expectError && resp == nil { + return nil + } + + signedKey := strings.TrimSpace(resp.Data["signed_key"].(string)) + if signedKey == "" { + return errors.New("no signed key in response") + } + + privKey, err := ssh.ParsePrivateKey([]byte(testKeyToSignPrivate)) + if err != nil { + return fmt.Errorf("error parsing private key: %v", err) + } + + parsedKey, _, _, _, err := ssh.ParseAuthorizedKey([]byte(signedKey)) + if err != nil { + return fmt.Errorf("error parsing signed key: %v", err) + } + certSigner, err := ssh.NewCertSigner(parsedKey.(*ssh.Certificate), privKey) + if err != nil { + return err + } + + err = testSSH(testUserName, sshAddress, ssh.PublicKeys(certSigner), "date") + if expectError && err == nil { + return fmt.Errorf("expected error but got none") + } + if !expectError && err != nil { + return err + } + + return nil + }, + }, + }, + } + + logicaltest.Test(t, testCase) +} + +func TestSSHBackend_CAUpgradeAlgorithmSigner(t *testing.T) { + cleanup, sshAddress := prepareTestContainer(t, dockerImageTagSupportsRSA1, testCAPublicKey) + defer cleanup() + config := logical.TestBackendConfig() + + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatalf("Cannot create backend: %s", err) + } + + testKeyToSignPrivate := `-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABFwAAAAdzc2gtcn +NhAAAAAwEAAQAAAQEAwn1V2xd/EgJXIY53fBTtc20k/ajekqQngvkpFSwNHW63XNEQK8Ll +FOCyGXoje9DUGxnYs3F/ohfsBBWkLNfU7fiENdSJL1pbkAgJ+2uhV9sLZjvYhikrXWoyJX +LDKfY12LjpcBS2HeLMT04laZ/xSJrOBEJHGzHyr2wUO0NUQUQPUODAFhnHKgvvA4Uu79UY +gcdThF4w83+EAnE4JzBZMKPMjzy4u1C0R/LoD8DuapHwX6NGWdEUvUZZ+XRcIWeCOvR0ne +qGBRH35k1Mv7k65d7kkE0uvM5Z36erw3tdoszxPYf7AKnO1DpeU2uwMcym6xNwfwynKjhL +qL/Mgi4uRwAAA8iAsY0zgLGNMwAAAAdzc2gtcnNhAAABAQDCfVXbF38SAlchjnd8FO1zbS +T9qN6SpCeC+SkVLA0dbrdc0RArwuUU4LIZeiN70NQbGdizcX+iF+wEFaQs19Tt+IQ11Ikv +WluQCAn7a6FX2wtmO9iGKStdajIlcsMp9jXYuOlwFLYd4sxPTiVpn/FIms4EQkcbMfKvbB +Q7Q1RBRA9Q4MAWGccqC+8DhS7v1RiBx1OEXjDzf4QCcTgnMFkwo8yPPLi7ULRH8ugPwO5q +kfBfo0ZZ0RS9Rln5dFwhZ4I69HSd6oYFEffmTUy/uTrl3uSQTS68zlnfp6vDe12izPE9h/ +sAqc7UOl5Ta7AxzKbrE3B/DKcqOEuov8yCLi5HAAAAAwEAAQAAAQABns2yT5XNbpuPOgKg +1APObGBchKWmDxwNKUpAVOefEScR7OP3mV4TOHQDZlMZWvoJZ8O4av+nOA/NUOjXPs0VVn +azhBvIezY8EvUSVSk49Cg6J9F7/KfR1WqpiTU7CkQUlCXNuz5xLUyKdJo3MQ/vjOqeenbh +MR9Wes4IWF1BVe4VOD6lxRsjwuIieIgmScW28FFh2rgsEfO2spzZ3AWOGExw+ih757hFz5 +4A2fhsQXP8m3r8m7iiqcjTLWXdxTUk4zot2kZEjbI4Avk0BL+wVeFq6f/y+G+g5edqSo7j +uuSgzbUQtA9PMnGxhrhU2Ob7n3VGdya7WbGZkaKP8zJhAAAAgQC3bJurmOSLIi3KVhp7lD +/FfxwXHwVBFALCgq7EyNlkTz6RDoMFM4eOTRMDvsgWxT+bSB8R8eg1sfgY8rkHOuvTAVI5 +3oEYco3H7NWE9X8Zt0lyhO1uaE49EENNSQ8hY7R3UIw5becyI+7ZZxs9HkBgCQCZzSjzA+ +SIyAoMKM261AAAAIEA+PCkcDRp3J0PaoiuetXSlWZ5WjP3CtwT2xrvEX9x+ZsDgXCDYQ5T +osxvEKOGSfIrHUUhzZbFGvqWyfrziPe9ypJrtCM7RJT/fApBXnbWFcDZzWamkQvohst+0w +XHYCmNoJ6/Y+roLv3pzyFUmqRNcrQaohex7TZmsvHJT513UakAAACBAMgBXxH8DyNYdniX +mIXEto4GqMh4rXdNwCghfpyWdJE6vCyDt7g7bYMq7AQ2ynSKRtQDT/ZgQNfSbilUq3iXz7 +xNZn5U9ndwFs90VmEpBup/PmhfX+Gwt5hQZLbkKZcgQ9XrhSKdMxVm1yy/fk0U457enlz5 +cKumubUxOfFdy1ZvAAAAEm5jY0BtYnAudWJudC5sb2NhbA== +-----END OPENSSH PRIVATE KEY----- +` + testKeyToSignPublic := `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDCfVXbF38SAlchjnd8FO1zbST9qN6SpCeC+SkVLA0dbrdc0RArwuUU4LIZeiN70NQbGdizcX+iF+wEFaQs19Tt+IQ11IkvWluQCAn7a6FX2wtmO9iGKStdajIlcsMp9jXYuOlwFLYd4sxPTiVpn/FIms4EQkcbMfKvbBQ7Q1RBRA9Q4MAWGccqC+8DhS7v1RiBx1OEXjDzf4QCcTgnMFkwo8yPPLi7ULRH8ugPwO5qkfBfo0ZZ0RS9Rln5dFwhZ4I69HSd6oYFEffmTUy/uTrl3uSQTS68zlnfp6vDe12izPE9h/sAqc7UOl5Ta7AxzKbrE3B/DKcqOEuov8yCLi5H ` + + // Old role entries between 1.4.3 and 1.5.2 had algorithm_signer default to + // ssh-rsa if not provided. + roleOptionsOldEntry := map[string]interface{}{ + "allow_user_certificates": true, + "allowed_users": "*", + "default_extensions": []map[string]string{ + { + "permit-pty": "", + }, + }, + "key_type": "ca", + "default_user": testUserName, + "ttl": "30m0s", + "algorithm_signer": ssh.SigAlgoRSA, + } + + // Upgrade entry by overwriting algorithm_signer with an empty value + roleOptionsUpgradedEntry := map[string]interface{}{ + "allow_user_certificates": true, + "allowed_users": "*", + "default_extensions": []map[string]string{ + { + "permit-pty": "", + }, + }, + "key_type": "ca", + "default_user": testUserName, + "ttl": "30m0s", + "algorithm_signer": "", + } + + testCase := logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + configCaStep(testCAPublicKey, testCAPrivateKey), + testRoleWrite(t, "testcarole", roleOptionsOldEntry), + testRoleWrite(t, "testcarole", roleOptionsUpgradedEntry), + { + Operation: logical.UpdateOperation, + Path: "sign/testcarole", + ErrorOk: false, + Data: map[string]interface{}{ + "public_key": testKeyToSignPublic, + "valid_principals": testUserName, + }, + + Check: func(resp *logical.Response) error { + signedKey := strings.TrimSpace(resp.Data["signed_key"].(string)) + if signedKey == "" { + return errors.New("no signed key in response") + } + + privKey, err := ssh.ParsePrivateKey([]byte(testKeyToSignPrivate)) + if err != nil { + return fmt.Errorf("error parsing private key: %v", err) + } + + parsedKey, _, _, _, err := ssh.ParseAuthorizedKey([]byte(signedKey)) + if err != nil { + return fmt.Errorf("error parsing signed key: %v", err) + } + certSigner, err := ssh.NewCertSigner(parsedKey.(*ssh.Certificate), privKey) + if err != nil { + return err + } + + err = testSSH(testUserName, sshAddress, ssh.PublicKeys(certSigner), "date") + if err != nil { + return err + } + + return nil + }, + }, + }, + } + + logicaltest.Test(t, testCase) +} + +func TestBackend_AbleToRetrievePublicKey(t *testing.T) { + config := logical.TestBackendConfig() + + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatalf("Cannot create backend: %s", err) + } + + testCase := logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + configCaStep(testCAPublicKey, testCAPrivateKey), + + { + Operation: logical.ReadOperation, + Path: "public_key", + Unauthenticated: true, + + Check: func(resp *logical.Response) error { + key := string(resp.Data["http_raw_body"].([]byte)) + + if key != testCAPublicKey { + return fmt.Errorf("public_key incorrect. Expected %v, actual %v", testCAPublicKey, key) + } + + return nil + }, + }, + }, + } + + logicaltest.Test(t, testCase) +} + +func TestBackend_AbleToAutoGenerateSigningKeys(t *testing.T) { + config := logical.TestBackendConfig() + + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatalf("Cannot create backend: %s", err) + } + + var expectedPublicKey string + testCase := logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + { + Operation: logical.UpdateOperation, + Path: "config/ca", + Check: func(resp *logical.Response) error { + if resp.Data["public_key"].(string) == "" { + return fmt.Errorf("public_key empty") + } + expectedPublicKey = resp.Data["public_key"].(string) + return nil + }, + }, + + { + Operation: logical.ReadOperation, + Path: "public_key", + Unauthenticated: true, + + Check: func(resp *logical.Response) error { + key := string(resp.Data["http_raw_body"].([]byte)) + + if key == "" { + return fmt.Errorf("public_key empty. Expected not empty, actual %s", key) + } + if key != expectedPublicKey { + return fmt.Errorf("public_key mismatch. Expected %s, actual %s", expectedPublicKey, key) + } + + return nil + }, + }, + }, + } + + logicaltest.Test(t, testCase) +} + +func TestBackend_ValidPrincipalsValidatedForHostCertificates(t *testing.T) { + config := logical.TestBackendConfig() + + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatalf("Cannot create backend: %s", err) + } + + testCase := logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + configCaStep(testCAPublicKey, testCAPrivateKey), + + createRoleStep("testing", map[string]interface{}{ + "key_type": "ca", + "allow_host_certificates": true, + "allowed_domains": "example.com,example.org", + "allow_subdomains": true, + "default_critical_options": map[string]interface{}{ + "option": "value", + }, + "default_extensions": map[string]interface{}{ + "extension": "extended", + }, + }), + + signCertificateStep("testing", "vault-root-22608f5ef173aabf700797cb95c5641e792698ec6380e8e1eb55523e39aa5e51", ssh.HostCert, []string{"dummy.example.org", "second.example.com"}, map[string]string{ + "option": "value", + }, map[string]string{ + "extension": "extended", + }, + 2*time.Hour, map[string]interface{}{ + "public_key": publicKey2, + "ttl": "2h", + "cert_type": "host", + "valid_principals": "dummy.example.org,second.example.com", + }), + }, + } + + logicaltest.Test(t, testCase) +} + +func TestBackend_OptionsOverrideDefaults(t *testing.T) { + config := logical.TestBackendConfig() + + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatalf("Cannot create backend: %s", err) + } + + testCase := logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + configCaStep(testCAPublicKey, testCAPrivateKey), + + createRoleStep("testing", map[string]interface{}{ + "key_type": "ca", + "allowed_users": "tuber", + "default_user": "tuber", + "allow_user_certificates": true, + "allowed_critical_options": "option,secondary", + "allowed_extensions": "extension,additional", + "default_critical_options": map[string]interface{}{ + "option": "value", + }, + "default_extensions": map[string]interface{}{ + "extension": "extended", + }, + }), + + signCertificateStep("testing", "vault-root-22608f5ef173aabf700797cb95c5641e792698ec6380e8e1eb55523e39aa5e51", ssh.UserCert, []string{"tuber"}, map[string]string{ + "secondary": "value", + }, map[string]string{ + "additional": "value", + }, 2*time.Hour, map[string]interface{}{ + "public_key": publicKey2, + "ttl": "2h", + "critical_options": map[string]interface{}{ + "secondary": "value", + }, + "extensions": map[string]interface{}{ + "additional": "value", + }, + }), + }, + } + + logicaltest.Test(t, testCase) +} + +func TestBackend_AllowedUserKeyLengths(t *testing.T) { + config := logical.TestBackendConfig() + + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatalf("Cannot create backend: %s", err) + } + testCase := logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + configCaStep(testCAPublicKey, testCAPrivateKey), + createRoleStep("weakkey", map[string]interface{}{ + "key_type": "ca", + "allow_user_certificates": true, + "allowed_user_key_lengths": map[string]interface{}{ + "rsa": 4096, + }, + }), + { + Operation: logical.UpdateOperation, + Path: "sign/weakkey", + Data: map[string]interface{}{ + "public_key": testCAPublicKey, + }, + ErrorOk: true, + Check: func(resp *logical.Response) error { + if resp.Data["error"] != "public_key failed to meet the key requirements: key is of an invalid size: 2048" { + return errors.New("a smaller key (2048) was allowed, when the minimum was set for 4096") + } + return nil + }, + }, + createRoleStep("stdkey", map[string]interface{}{ + "key_type": "ca", + "allow_user_certificates": true, + "allowed_user_key_lengths": map[string]interface{}{ + "rsa": 2048, + }, + }), + // Pass with 2048 key + { + Operation: logical.UpdateOperation, + Path: "sign/stdkey", + Data: map[string]interface{}{ + "public_key": testCAPublicKey, + }, + }, + // Fail with 4096 key + { + Operation: logical.UpdateOperation, + Path: "sign/stdkey", + Data: map[string]interface{}{ + "public_key": publicKey4096, + }, + ErrorOk: true, + Check: func(resp *logical.Response) error { + if resp.Data["error"] != "public_key failed to meet the key requirements: key is of an invalid size: 4096" { + return errors.New("a larger key (4096) was allowed, when the size was set for 2048") + } + return nil + }, + }, + createRoleStep("multikey", map[string]interface{}{ + "key_type": "ca", + "allow_user_certificates": true, + "allowed_user_key_lengths": map[string]interface{}{ + "rsa": []int{2048, 4096}, + }, + }), + // Pass with 2048-bit key + { + Operation: logical.UpdateOperation, + Path: "sign/multikey", + Data: map[string]interface{}{ + "public_key": testCAPublicKey, + }, + }, + // Pass with 4096-bit key + { + Operation: logical.UpdateOperation, + Path: "sign/multikey", + Data: map[string]interface{}{ + "public_key": publicKey4096, + }, + }, + // Fail with 3072-bit key + { + Operation: logical.UpdateOperation, + Path: "sign/multikey", + Data: map[string]interface{}{ + "public_key": publicKey3072, + }, + ErrorOk: true, + Check: func(resp *logical.Response) error { + if resp.Data["error"] != "public_key failed to meet the key requirements: key is of an invalid size: 3072" { + return errors.New("a larger key (3072) was allowed, when the size was set for 2048") + } + return nil + }, + }, + // Fail with ECDSA key + { + Operation: logical.UpdateOperation, + Path: "sign/multikey", + Data: map[string]interface{}{ + "public_key": publicKeyECDSA256, + }, + ErrorOk: true, + Check: func(resp *logical.Response) error { + if resp.Data["error"] != "public_key failed to meet the key requirements: key of type ecdsa is not allowed" { + return errors.New("an ECDSA key was allowed under RSA-only policy") + } + return nil + }, + }, + createRoleStep("ectypes", map[string]interface{}{ + "key_type": "ca", + "allow_user_certificates": true, + "allowed_user_key_lengths": map[string]interface{}{ + "ec": []int{256}, + "ecdsa-sha2-nistp521": 0, + }, + }), + // Pass with ECDSA P-256 + { + Operation: logical.UpdateOperation, + Path: "sign/ectypes", + Data: map[string]interface{}{ + "public_key": publicKeyECDSA256, + }, + }, + // Pass with ECDSA P-521 + { + Operation: logical.UpdateOperation, + Path: "sign/ectypes", + Data: map[string]interface{}{ + "public_key": publicKeyECDSA521, + }, + }, + // Fail with RSA key + { + Operation: logical.UpdateOperation, + Path: "sign/ectypes", + Data: map[string]interface{}{ + "public_key": publicKey3072, + }, + ErrorOk: true, + Check: func(resp *logical.Response) error { + if resp.Data["error"] != "public_key failed to meet the key requirements: key of type rsa is not allowed" { + return errors.New("an RSA key was allowed under ECDSA-only policy") + } + return nil + }, + }, + }, + } + + logicaltest.Test(t, testCase) +} + +func TestBackend_CustomKeyIDFormat(t *testing.T) { + config := logical.TestBackendConfig() + + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatalf("Cannot create backend: %s", err) + } + + testCase := logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + configCaStep(testCAPublicKey, testCAPrivateKey), + + createRoleStep("customrole", map[string]interface{}{ + "key_type": "ca", + "key_id_format": "{{role_name}}-{{token_display_name}}-{{public_key_hash}}", + "allowed_users": "tuber", + "default_user": "tuber", + "allow_user_certificates": true, + "allowed_critical_options": "option,secondary", + "allowed_extensions": "extension,additional", + "default_critical_options": map[string]interface{}{ + "option": "value", + }, + "default_extensions": map[string]interface{}{ + "extension": "extended", + }, + }), + + signCertificateStep("customrole", "customrole-root-22608f5ef173aabf700797cb95c5641e792698ec6380e8e1eb55523e39aa5e51", ssh.UserCert, []string{"tuber"}, map[string]string{ + "secondary": "value", + }, map[string]string{ + "additional": "value", + }, 2*time.Hour, map[string]interface{}{ + "public_key": publicKey2, + "ttl": "2h", + "critical_options": map[string]interface{}{ + "secondary": "value", + }, + "extensions": map[string]interface{}{ + "additional": "value", + }, + }), + }, + } + + logicaltest.Test(t, testCase) +} + +func TestBackend_DisallowUserProvidedKeyIDs(t *testing.T) { + config := logical.TestBackendConfig() + + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatalf("Cannot create backend: %s", err) + } + + testCase := logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + configCaStep(testCAPublicKey, testCAPrivateKey), + + createRoleStep("testing", map[string]interface{}{ + "key_type": "ca", + "allow_user_key_ids": false, + "allow_user_certificates": true, + }), + { + Operation: logical.UpdateOperation, + Path: "sign/testing", + Data: map[string]interface{}{ + "public_key": publicKey2, + "key_id": "override", + }, + ErrorOk: true, + Check: func(resp *logical.Response) error { + if resp.Data["error"] != "setting key_id is not allowed by role" { + return errors.New("custom user key id was allowed even when 'allow_user_key_ids' is false") + } + return nil + }, + }, + }, + } + + logicaltest.Test(t, testCase) +} + +func TestBackend_DefExtTemplatingEnabled(t *testing.T) { + cluster, userpassToken := getSshCaTestCluster(t, testUserName) + defer cluster.Cleanup() + client := cluster.Cores[0].Client + + // Get auth accessor for identity template. + auths, err := client.Sys().ListAuth() + if err != nil { + t.Fatal(err) + } + userpassAccessor := auths["userpass/"].Accessor + + // Write SSH role. + _, err = client.Logical().Write("ssh/roles/test", map[string]interface{}{ + "key_type": "ca", + "allowed_extensions": "login@zipzap.com", + "allow_user_certificates": true, + "allowed_users": "tuber", + "default_user": "tuber", + "default_extensions_template": true, + "default_extensions": map[string]interface{}{ + "login@foobar.com": "{{identity.entity.aliases." + userpassAccessor + ".name}}", + "login@foobar2.com": "{{identity.entity.aliases." + userpassAccessor + ".name}}, " + + "{{identity.entity.aliases." + userpassAccessor + ".name}}_foobar", + }, + }) + if err != nil { + t.Fatal(err) + } + + sshKeyID := "vault-userpass-" + testUserName + "-9bd0f01b7dfc50a13aa5e5cd11aea19276968755c8f1f9c98965d04147f30ed0" + + // Issue SSH certificate with default extensions templating enabled, and no user-provided extensions + client.SetToken(userpassToken) + resp, err := client.Logical().Write("ssh/sign/test", map[string]interface{}{ + "public_key": publicKey4096, + }) + if err != nil { + t.Fatal(err) + } + signedKey := resp.Data["signed_key"].(string) + key, _ := base64.StdEncoding.DecodeString(strings.Split(signedKey, " ")[1]) + + parsedKey, err := ssh.ParsePublicKey(key) + if err != nil { + t.Fatal(err) + } + + defaultExtensionPermissions := map[string]string{ + "login@foobar.com": testUserName, + "login@foobar2.com": fmt.Sprintf("%s, %s_foobar", testUserName, testUserName), + } + + err = validateSSHCertificate(parsedKey.(*ssh.Certificate), sshKeyID, ssh.UserCert, []string{"tuber"}, map[string]string{}, defaultExtensionPermissions, 16*time.Hour) + if err != nil { + t.Fatal(err) + } + + // Issue SSH certificate with default extensions templating enabled, and user-provided extensions + // The certificate should only have the user-provided extensions, and no templated extensions + userProvidedExtensionPermissions := map[string]string{ + "login@zipzap.com": "some_other_user_name", + } + resp, err = client.Logical().Write("ssh/sign/test", map[string]interface{}{ + "public_key": publicKey4096, + "extensions": userProvidedExtensionPermissions, + }) + if err != nil { + t.Fatal(err) + } + signedKey = resp.Data["signed_key"].(string) + key, _ = base64.StdEncoding.DecodeString(strings.Split(signedKey, " ")[1]) + + parsedKey, err = ssh.ParsePublicKey(key) + if err != nil { + t.Fatal(err) + } + + err = validateSSHCertificate(parsedKey.(*ssh.Certificate), sshKeyID, ssh.UserCert, []string{"tuber"}, map[string]string{}, userProvidedExtensionPermissions, 16*time.Hour) + if err != nil { + t.Fatal(err) + } + + // Issue SSH certificate with default extensions templating enabled, and invalid user-provided extensions - it should fail + invalidUserProvidedExtensionPermissions := map[string]string{ + "login@foobar.com": "{{identity.entity.metadata}}", + } + resp, err = client.Logical().Write("ssh/sign/test", map[string]interface{}{ + "public_key": publicKey4096, + "extensions": invalidUserProvidedExtensionPermissions, + }) + if err == nil { + t.Fatal("expected an error while attempting to sign a key with invalid permissions") + } +} + +func TestBackend_EmptyAllowedExtensionFailsClosed(t *testing.T) { + cluster, userpassToken := getSshCaTestCluster(t, testUserName) + defer cluster.Cleanup() + client := cluster.Cores[0].Client + + // Get auth accessor for identity template. + auths, err := client.Sys().ListAuth() + if err != nil { + t.Fatal(err) + } + userpassAccessor := auths["userpass/"].Accessor + + // Write SSH role to test with no allowed extension. We also provide a templated default extension, + // to verify that it's not actually being evaluated + _, err = client.Logical().Write("ssh/roles/test_allow_all_extensions", map[string]interface{}{ + "key_type": "ca", + "allow_user_certificates": true, + "allowed_users": "tuber", + "default_user": "tuber", + "allowed_extensions": "", + "default_extensions_template": false, + "default_extensions": map[string]interface{}{ + "login@foobar.com": "{{identity.entity.aliases." + userpassAccessor + ".name}}", + }, + }) + if err != nil { + t.Fatal(err) + } + + // Issue SSH certificate with default extensions templating disabled, and user-provided extensions + client.SetToken(userpassToken) + userProvidedAnyExtensionPermissions := map[string]string{ + "login@foobar.com": "not_userpassname", + } + _, err = client.Logical().Write("ssh/sign/test_allow_all_extensions", map[string]interface{}{ + "public_key": publicKey4096, + "extensions": userProvidedAnyExtensionPermissions, + }) + if err == nil { + t.Fatal("Expected failure we should not have allowed specifying custom extensions") + } + + if !strings.Contains(err.Error(), "are not on allowed list") { + t.Fatalf("Expected failure to contain 'are not on allowed list' but was %s", err) + } +} + +func TestBackend_DefExtTemplatingDisabled(t *testing.T) { + cluster, userpassToken := getSshCaTestCluster(t, testUserName) + defer cluster.Cleanup() + client := cluster.Cores[0].Client + + // Get auth accessor for identity template. + auths, err := client.Sys().ListAuth() + if err != nil { + t.Fatal(err) + } + userpassAccessor := auths["userpass/"].Accessor + + // Write SSH role to test with any extension. We also provide a templated default extension, + // to verify that it's not actually being evaluated + _, err = client.Logical().Write("ssh/roles/test_allow_all_extensions", map[string]interface{}{ + "key_type": "ca", + "allow_user_certificates": true, + "allowed_users": "tuber", + "default_user": "tuber", + "allowed_extensions": "*", + "default_extensions_template": false, + "default_extensions": map[string]interface{}{ + "login@foobar.com": "{{identity.entity.aliases." + userpassAccessor + ".name}}", + }, + }) + if err != nil { + t.Fatal(err) + } + + sshKeyID := "vault-userpass-" + testUserName + "-9bd0f01b7dfc50a13aa5e5cd11aea19276968755c8f1f9c98965d04147f30ed0" + + // Issue SSH certificate with default extensions templating disabled, and no user-provided extensions + client.SetToken(userpassToken) + defaultExtensionPermissions := map[string]string{ + "login@foobar.com": "{{identity.entity.aliases." + userpassAccessor + ".name}}", + "login@zipzap.com": "some_other_user_name", + } + resp, err := client.Logical().Write("ssh/sign/test_allow_all_extensions", map[string]interface{}{ + "public_key": publicKey4096, + "extensions": defaultExtensionPermissions, + }) + if err != nil { + t.Fatal(err) + } + signedKey := resp.Data["signed_key"].(string) + key, _ := base64.StdEncoding.DecodeString(strings.Split(signedKey, " ")[1]) + + parsedKey, err := ssh.ParsePublicKey(key) + if err != nil { + t.Fatal(err) + } + + err = validateSSHCertificate(parsedKey.(*ssh.Certificate), sshKeyID, ssh.UserCert, []string{"tuber"}, map[string]string{}, defaultExtensionPermissions, 16*time.Hour) + if err != nil { + t.Fatal(err) + } + + // Issue SSH certificate with default extensions templating disabled, and user-provided extensions + client.SetToken(userpassToken) + userProvidedAnyExtensionPermissions := map[string]string{ + "login@foobar.com": "not_userpassname", + "login@zipzap.com": "some_other_user_name", + } + resp, err = client.Logical().Write("ssh/sign/test_allow_all_extensions", map[string]interface{}{ + "public_key": publicKey4096, + "extensions": userProvidedAnyExtensionPermissions, + }) + if err != nil { + t.Fatal(err) + } + signedKey = resp.Data["signed_key"].(string) + key, _ = base64.StdEncoding.DecodeString(strings.Split(signedKey, " ")[1]) + + parsedKey, err = ssh.ParsePublicKey(key) + if err != nil { + t.Fatal(err) + } + + err = validateSSHCertificate(parsedKey.(*ssh.Certificate), sshKeyID, ssh.UserCert, []string{"tuber"}, map[string]string{}, userProvidedAnyExtensionPermissions, 16*time.Hour) + if err != nil { + t.Fatal(err) + } +} + +func TestSSHBackend_ValidateNotBeforeDuration(t *testing.T) { + config := logical.TestBackendConfig() + + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatalf("Cannot create backend: %s", err) + } + testCase := logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + configCaStep(testCAPublicKey, testCAPrivateKey), + + createRoleStep("testing", map[string]interface{}{ + "key_type": "ca", + "allow_host_certificates": true, + "allowed_domains": "example.com,example.org", + "allow_subdomains": true, + "default_critical_options": map[string]interface{}{ + "option": "value", + }, + "default_extensions": map[string]interface{}{ + "extension": "extended", + }, + "not_before_duration": "300s", + }), + + signCertificateStep("testing", "vault-root-22608f5ef173aabf700797cb95c5641e792698ec6380e8e1eb55523e39aa5e51", ssh.HostCert, []string{"dummy.example.org", "second.example.com"}, map[string]string{ + "option": "value", + }, map[string]string{ + "extension": "extended", + }, + 2*time.Hour+5*time.Minute-30*time.Second, map[string]interface{}{ + "public_key": publicKey2, + "ttl": "2h", + "cert_type": "host", + "valid_principals": "dummy.example.org,second.example.com", + }), + + createRoleStep("testing", map[string]interface{}{ + "key_type": "ca", + "allow_host_certificates": true, + "allowed_domains": "example.com,example.org", + "allow_subdomains": true, + "default_critical_options": map[string]interface{}{ + "option": "value", + }, + "default_extensions": map[string]interface{}{ + "extension": "extended", + }, + "not_before_duration": "2h", + }), + + signCertificateStep("testing", "vault-root-22608f5ef173aabf700797cb95c5641e792698ec6380e8e1eb55523e39aa5e51", ssh.HostCert, []string{"dummy.example.org", "second.example.com"}, map[string]string{ + "option": "value", + }, map[string]string{ + "extension": "extended", + }, + 4*time.Hour-30*time.Second, map[string]interface{}{ + "public_key": publicKey2, + "ttl": "2h", + "cert_type": "host", + "valid_principals": "dummy.example.org,second.example.com", + }), + createRoleStep("testing", map[string]interface{}{ + "key_type": "ca", + "allow_host_certificates": true, + "allowed_domains": "example.com,example.org", + "allow_subdomains": true, + "default_critical_options": map[string]interface{}{ + "option": "value", + }, + "default_extensions": map[string]interface{}{ + "extension": "extended", + }, + "not_before_duration": "30s", + }), + + signCertificateStep("testing", "vault-root-22608f5ef173aabf700797cb95c5641e792698ec6380e8e1eb55523e39aa5e51", ssh.HostCert, []string{"dummy.example.org", "second.example.com"}, map[string]string{ + "option": "value", + }, map[string]string{ + "extension": "extended", + }, + 2*time.Hour, map[string]interface{}{ + "public_key": publicKey2, + "ttl": "2h", + "cert_type": "host", + "valid_principals": "dummy.example.org,second.example.com", + }), + }, + } + + logicaltest.Test(t, testCase) +} + +func TestSSHBackend_IssueSign(t *testing.T) { + config := logical.TestBackendConfig() + + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatalf("Cannot create backend: %s", err) + } + + testCase := logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + configCaStep(testCAPublicKey, testCAPrivateKey), + + createRoleStep("testing", map[string]interface{}{ + "key_type": "otp", + "default_user": "user", + }), + // Key pair not issued with invalid role key type + issueSSHKeyPairStep("testing", "rsa", 0, true, "role key type 'otp' not allowed to issue key pairs"), + + createRoleStep("testing", map[string]interface{}{ + "key_type": "ca", + "allow_user_key_ids": false, + "allow_user_certificates": true, + "allowed_user_key_lengths": map[string]interface{}{ + "ssh-rsa": []int{2048, 3072, 4096}, + "ecdsa-sha2-nistp521": 0, + "ed25519": 0, + }, + }), + // Key_type not in allowed_user_key_types_lengths + issueSSHKeyPairStep("testing", "ec", 256, true, "provided key_type value not in allowed_user_key_types"), + // Key_bits not in allowed_user_key_types_lengths for provided key_type + issueSSHKeyPairStep("testing", "rsa", 2560, true, "provided key_bits value not in list of role's allowed_user_key_types"), + // key_type `rsa` and key_bits `2048` successfully created + issueSSHKeyPairStep("testing", "rsa", 2048, false, ""), + // key_type `ed22519` and key_bits `0` successfully created + issueSSHKeyPairStep("testing", "ed25519", 0, false, ""), + }, + } + + logicaltest.Test(t, testCase) +} + +func getSshCaTestCluster(t *testing.T, userIdentity string) (*vault.TestCluster, string) { + coreConfig := &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "userpass": userpass.Factory, + }, + LogicalBackends: map[string]logical.Factory{ + "ssh": Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + client := cluster.Cores[0].Client + + // Write test policy for userpass auth method. + err := client.Sys().PutPolicy("test", ` + path "ssh/*" { + capabilities = ["update"] + }`) + if err != nil { + t.Fatal(err) + } + + // Enable userpass auth method. + if err := client.Sys().EnableAuth("userpass", "userpass", ""); err != nil { + t.Fatal(err) + } + + // Configure test role for userpass. + if _, err := client.Logical().Write("auth/userpass/users/"+userIdentity, map[string]interface{}{ + "password": "test", + "policies": "test", + }); err != nil { + t.Fatal(err) + } + + // Login userpass for test role and keep client token. + secret, err := client.Logical().Write("auth/userpass/login/"+userIdentity, map[string]interface{}{ + "password": "test", + }) + if err != nil || secret == nil { + t.Fatal(err) + } + userpassToken := secret.Auth.ClientToken + + // Mount SSH. + err = client.Sys().Mount("ssh", &api.MountInput{ + Type: "ssh", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "60h", + }, + }) + if err != nil { + t.Fatal(err) + } + + // Configure SSH CA. + _, err = client.Logical().Write("ssh/config/ca", map[string]interface{}{ + "public_key": testCAPublicKey, + "private_key": testCAPrivateKey, + }) + if err != nil { + t.Fatal(err) + } + + return cluster, userpassToken +} + +func testDefaultUserTemplate(t *testing.T, testDefaultUserTemplate string, + expectedValidPrincipal string, testEntityMetadata map[string]string, +) { + cluster, userpassToken := getSshCaTestCluster(t, testUserName) + defer cluster.Cleanup() + client := cluster.Cores[0].Client + + // set metadata "ssh_username" to userpass username + tokenLookupResponse, err := client.Logical().Write("/auth/token/lookup", map[string]interface{}{ + "token": userpassToken, + }) + if err != nil { + t.Fatal(err) + } + entityID := tokenLookupResponse.Data["entity_id"].(string) + _, err = client.Logical().Write("/identity/entity/id/"+entityID, map[string]interface{}{ + "metadata": testEntityMetadata, + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().Write("ssh/roles/my-role", map[string]interface{}{ + "key_type": testCaKeyType, + "allow_user_certificates": true, + "default_user": testDefaultUserTemplate, + "default_user_template": true, + "allowed_users": testDefaultUserTemplate, + "allowed_users_template": true, + }) + if err != nil { + t.Fatal(err) + } + + // sign SSH key as userpass user + client.SetToken(userpassToken) + signResponse, err := client.Logical().Write("ssh/sign/my-role", map[string]interface{}{ + "public_key": testCAPublicKey, + }) + if err != nil { + t.Fatal(err) + } + + // check for the expected valid principals of certificate + signedKey := signResponse.Data["signed_key"].(string) + key, _ := base64.StdEncoding.DecodeString(strings.Split(signedKey, " ")[1]) + parsedKey, err := ssh.ParsePublicKey(key) + if err != nil { + t.Fatal(err) + } + actualPrincipals := parsedKey.(*ssh.Certificate).ValidPrincipals + if actualPrincipals[0] != expectedValidPrincipal { + t.Fatal( + fmt.Sprintf("incorrect ValidPrincipals: %v should be %v", + actualPrincipals, []string{expectedValidPrincipal}), + ) + } +} + +func testAllowedPrincipalsTemplate(t *testing.T, testAllowedDomainsTemplate string, + expectedValidPrincipal string, testEntityMetadata map[string]string, + roleConfigPayload map[string]interface{}, signingPayload map[string]interface{}, +) { + cluster, userpassToken := getSshCaTestCluster(t, testUserName) + defer cluster.Cleanup() + client := cluster.Cores[0].Client + + // set metadata "ssh_username" to userpass username + tokenLookupResponse, err := client.Logical().Write("/auth/token/lookup", map[string]interface{}{ + "token": userpassToken, + }) + if err != nil { + t.Fatal(err) + } + entityID := tokenLookupResponse.Data["entity_id"].(string) + _, err = client.Logical().Write("/identity/entity/id/"+entityID, map[string]interface{}{ + "metadata": testEntityMetadata, + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().Write("ssh/roles/my-role", roleConfigPayload) + if err != nil { + t.Fatal(err) + } + + // sign SSH key as userpass user + client.SetToken(userpassToken) + signResponse, err := client.Logical().Write("ssh/sign/my-role", signingPayload) + if err != nil { + t.Fatal(err) + } + + // check for the expected valid principals of certificate + signedKey := signResponse.Data["signed_key"].(string) + key, _ := base64.StdEncoding.DecodeString(strings.Split(signedKey, " ")[1]) + parsedKey, err := ssh.ParsePublicKey(key) + if err != nil { + t.Fatal(err) + } + actualPrincipals := parsedKey.(*ssh.Certificate).ValidPrincipals + if actualPrincipals[0] != expectedValidPrincipal { + t.Fatal( + fmt.Sprintf("incorrect ValidPrincipals: %v should be %v", + actualPrincipals, []string{expectedValidPrincipal}), + ) + } +} + +func testAllowedUsersTemplate(t *testing.T, testAllowedUsersTemplate string, + expectedValidPrincipal string, testEntityMetadata map[string]string, +) { + testAllowedPrincipalsTemplate( + t, testAllowedUsersTemplate, + expectedValidPrincipal, testEntityMetadata, + map[string]interface{}{ + "key_type": testCaKeyType, + "allow_user_certificates": true, + "allowed_users": testAllowedUsersTemplate, + "allowed_users_template": true, + }, + map[string]interface{}{ + "public_key": testCAPublicKey, + "valid_principals": expectedValidPrincipal, + }, + ) +} + +func configCaStep(caPublicKey, caPrivateKey string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "config/ca", + Data: map[string]interface{}{ + "public_key": caPublicKey, + "private_key": caPrivateKey, + }, + } +} + +func createRoleStep(name string, parameters map[string]interface{}) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.CreateOperation, + Path: "roles/" + name, + Data: parameters, + } +} + +func signCertificateStep( + role, keyID string, certType int, validPrincipals []string, + criticalOptionPermissions, extensionPermissions map[string]string, + ttl time.Duration, + requestParameters map[string]interface{}, +) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "sign/" + role, + Data: requestParameters, + + Check: func(resp *logical.Response) error { + serialNumber := resp.Data["serial_number"].(string) + if serialNumber == "" { + return errors.New("no serial number in response") + } + + signedKey := strings.TrimSpace(resp.Data["signed_key"].(string)) + if signedKey == "" { + return errors.New("no signed key in response") + } + + key, _ := base64.StdEncoding.DecodeString(strings.Split(signedKey, " ")[1]) + + parsedKey, err := ssh.ParsePublicKey(key) + if err != nil { + return err + } + + return validateSSHCertificate(parsedKey.(*ssh.Certificate), keyID, certType, validPrincipals, criticalOptionPermissions, extensionPermissions, ttl) + }, + } +} + +func issueSSHKeyPairStep(role, keyType string, keyBits int, expectError bool, errorMsg string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "issue/" + role, + Data: map[string]interface{}{ + "key_type": keyType, + "key_bits": keyBits, + }, + ErrorOk: true, + Check: func(resp *logical.Response) error { + if expectError { + var err error + if resp.Data["error"] != errorMsg { + err = fmt.Errorf("actual error message \"%s\" different from expected error message \"%s\"", resp.Data["error"], errorMsg) + } + + return err + } + + if resp.IsError() { + return fmt.Errorf("unexpected error response returned: %v", resp.Error()) + } + + if resp.Data["private_key_type"] != keyType { + return fmt.Errorf("response private_key_type (%s) does not match the provided key_type (%s)", resp.Data["private_key_type"], keyType) + } + + if resp.Data["signed_key"] == "" { + return errors.New("certificate/signed_key should not be empty") + } + + return nil + }, + } +} + +func validateSSHCertificate(cert *ssh.Certificate, keyID string, certType int, validPrincipals []string, criticalOptionPermissions, extensionPermissions map[string]string, + ttl time.Duration, +) error { + if cert.KeyId != keyID { + return fmt.Errorf("incorrect KeyId: %v, wanted %v", cert.KeyId, keyID) + } + + if cert.CertType != uint32(certType) { + return fmt.Errorf("incorrect CertType: %v", cert.CertType) + } + + if time.Unix(int64(cert.ValidAfter), 0).After(time.Now()) { + return fmt.Errorf("incorrect ValidAfter: %v", cert.ValidAfter) + } + + if time.Unix(int64(cert.ValidBefore), 0).Before(time.Now()) { + return fmt.Errorf("incorrect ValidBefore: %v", cert.ValidBefore) + } + + actualTTL := time.Unix(int64(cert.ValidBefore), 0).Add(-30 * time.Second).Sub(time.Unix(int64(cert.ValidAfter), 0)) + if actualTTL != ttl { + return fmt.Errorf("incorrect ttl: expected: %v, actual %v", ttl, actualTTL) + } + + if !reflect.DeepEqual(cert.ValidPrincipals, validPrincipals) { + return fmt.Errorf("incorrect ValidPrincipals: expected: %#v actual: %#v", validPrincipals, cert.ValidPrincipals) + } + + publicSigningKey, err := getSigningPublicKey() + if err != nil { + return err + } + if !reflect.DeepEqual(cert.SignatureKey, publicSigningKey) { + return fmt.Errorf("incorrect SignatureKey: %v", cert.SignatureKey) + } + + if cert.Signature == nil { + return fmt.Errorf("incorrect Signature: %v", cert.Signature) + } + + if !reflect.DeepEqual(cert.Permissions.Extensions, extensionPermissions) { + return fmt.Errorf("incorrect Permissions.Extensions: Expected: %v, Actual: %v", extensionPermissions, cert.Permissions.Extensions) + } + + if !reflect.DeepEqual(cert.Permissions.CriticalOptions, criticalOptionPermissions) { + return fmt.Errorf("incorrect Permissions.CriticalOptions: %v", cert.Permissions.CriticalOptions) + } + + return nil +} + +func getSigningPublicKey() (ssh.PublicKey, error) { + key, err := base64.StdEncoding.DecodeString(strings.Split(testCAPublicKey, " ")[1]) + if err != nil { + return nil, err + } + + parsedKey, err := ssh.ParsePublicKey(key) + if err != nil { + return nil, err + } + + return parsedKey, nil +} + +func testConfigZeroAddressDelete(t *testing.T) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.DeleteOperation, + Path: "config/zeroaddress", + } +} + +func testConfigZeroAddressWrite(t *testing.T, data map[string]interface{}) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "config/zeroaddress", + Data: data, + } +} + +func testConfigZeroAddressRead(t *testing.T, expected map[string]interface{}) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: "config/zeroaddress", + Check: func(resp *logical.Response) error { + var d zeroAddressRoles + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + + var ex zeroAddressRoles + if err := mapstructure.Decode(expected, &ex); err != nil { + return err + } + + if !reflect.DeepEqual(d, ex) { + return fmt.Errorf("Response mismatch:\nActual:%#v\nExpected:%#v", d, ex) + } + + return nil + }, + } +} + +func testVerifyWrite(t *testing.T, data map[string]interface{}, expected map[string]interface{}) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("verify"), + Data: data, + Check: func(resp *logical.Response) error { + var ac api.SSHVerifyResponse + if err := mapstructure.Decode(resp.Data, &ac); err != nil { + return err + } + var ex api.SSHVerifyResponse + if err := mapstructure.Decode(expected, &ex); err != nil { + return err + } + + if !reflect.DeepEqual(ac, ex) { + return fmt.Errorf("invalid response") + } + return nil + }, + } +} + +func testLookupRead(t *testing.T, data map[string]interface{}, expected []string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "lookup", + Data: data, + Check: func(resp *logical.Response) error { + if resp.Data == nil || resp.Data["roles"] == nil { + return fmt.Errorf("missing roles information") + } + if !reflect.DeepEqual(resp.Data["roles"].([]string), expected) { + return fmt.Errorf("Invalid response: \nactual:%#v\nexpected:%#v", resp.Data["roles"].([]string), expected) + } + return nil + }, + } +} + +func testRoleWrite(t *testing.T, name string, data map[string]interface{}) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "roles/" + name, + Data: data, + } +} + +func testRoleList(t *testing.T, expected map[string]interface{}) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ListOperation, + Path: "roles", + Check: func(resp *logical.Response) error { + if resp == nil { + return fmt.Errorf("nil response") + } + if resp.Data == nil { + return fmt.Errorf("nil data") + } + if !reflect.DeepEqual(resp.Data, expected) { + return fmt.Errorf("Invalid response:\nactual:%#v\nexpected is %#v", resp.Data, expected) + } + return nil + }, + } +} + +func testRoleRead(t *testing.T, roleName string, expected map[string]interface{}) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: "roles/" + roleName, + Check: func(resp *logical.Response) error { + if resp == nil { + if expected == nil { + return nil + } + return fmt.Errorf("bad: %#v", resp) + } + var d sshRole + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return fmt.Errorf("error decoding response:%s", err) + } + switch d.KeyType { + case "otp": + if d.KeyType != expected["key_type"] || d.DefaultUser != expected["default_user"] || d.CIDRList != expected["cidr_list"] { + return fmt.Errorf("data mismatch. bad: %#v", resp) + } + default: + return fmt.Errorf("unknown key type. bad: %#v", resp) + } + return nil + }, + } +} + +func testRoleDelete(t *testing.T, name string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.DeleteOperation, + Path: "roles/" + name, + } +} + +func testCredsWrite(t *testing.T, roleName string, data map[string]interface{}, expectError bool, address string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("creds/%s", roleName), + Data: data, + ErrorOk: expectError, + Check: func(resp *logical.Response) error { + if resp == nil { + return fmt.Errorf("response is nil") + } + if resp.Data == nil { + return fmt.Errorf("data is nil") + } + if expectError { + var e struct { + Error string `mapstructure:"error"` + } + if err := mapstructure.Decode(resp.Data, &e); err != nil { + return err + } + if len(e.Error) == 0 { + return fmt.Errorf("expected error, but write succeeded") + } + return nil + } + if roleName == testAtRoleName { + var d struct { + Key string `mapstructure:"key"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + if d.Key == "" { + return fmt.Errorf("generated key is an empty string") + } + // Checking only for a parsable key + privKey, err := ssh.ParsePrivateKey([]byte(d.Key)) + if err != nil { + return fmt.Errorf("generated key is invalid") + } + if err := testSSH(data["username"].(string), address, ssh.PublicKeys(privKey), "date"); err != nil { + return fmt.Errorf("unable to SSH with new key (%s): %w", d.Key, err) + } + } else { + if resp.Data["key_type"] != KeyTypeOTP { + return fmt.Errorf("incorrect key_type") + } + if resp.Data["key"] == nil { + return fmt.Errorf("invalid key") + } + } + return nil + }, + } +} + +func TestBackend_CleanupDynamicHostKeys(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + + b, err := Backend(config) + if err != nil { + t.Fatal(err) + } + err = b.Setup(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + // Running on a clean mount shouldn't do anything. + cleanRequest := &logical.Request{ + Operation: logical.DeleteOperation, + Path: "tidy/dynamic-keys", + Storage: config.StorageView, + } + + resp, err := b.HandleRequest(context.Background(), cleanRequest) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotNil(t, resp.Data["message"]) + require.Contains(t, resp.Data["message"], "0 of 0") + + // Write a bunch of bogus entries. + for i := 0; i < 15; i++ { + data := map[string]interface{}{ + "host": "localhost", + "key": "nothing-to-see-here", + } + entry, err := logical.StorageEntryJSON(fmt.Sprintf("%vexample-%v", keysStoragePrefix, i), &data) + require.NoError(t, err) + err = config.StorageView.Put(context.Background(), entry) + require.NoError(t, err) + } + + // Should now have 15 + resp, err = b.HandleRequest(context.Background(), cleanRequest) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotNil(t, resp.Data["message"]) + require.Contains(t, resp.Data["message"], "15 of 15") + + // Should have none left. + resp, err = b.HandleRequest(context.Background(), cleanRequest) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotNil(t, resp.Data["message"]) + require.Contains(t, resp.Data["message"], "0 of 0") +} + +type pathAuthCheckerFunc func(t *testing.T, client *api.Client, path string, token string) + +func isPermDenied(err error) bool { + return strings.Contains(err.Error(), "permission denied") +} + +func isUnsupportedPathOperation(err error) bool { + return strings.Contains(err.Error(), "unsupported path") || strings.Contains(err.Error(), "unsupported operation") +} + +func isDeniedOp(err error) bool { + return isPermDenied(err) || isUnsupportedPathOperation(err) +} + +func pathShouldBeAuthed(t *testing.T, client *api.Client, path string, token string) { + client.SetToken("") + resp, err := client.Logical().ReadWithContext(ctx, path) + if err == nil || !isPermDenied(err) { + t.Fatalf("expected failure to read %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().ListWithContext(ctx, path) + if err == nil || !isPermDenied(err) { + t.Fatalf("expected failure to list %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) + if err == nil || !isPermDenied(err) { + t.Fatalf("expected failure to write %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().DeleteWithContext(ctx, path) + if err == nil || !isPermDenied(err) { + t.Fatalf("expected failure to delete %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) + if err == nil || !isPermDenied(err) { + t.Fatalf("expected failure to patch %v while unauthed: %v / %v", path, err, resp) + } +} + +func pathShouldBeUnauthedReadList(t *testing.T, client *api.Client, path string, token string) { + // Should be able to read both with and without a token. + client.SetToken("") + resp, err := client.Logical().ReadWithContext(ctx, path) + if err != nil && isPermDenied(err) { + // Read will sometimes return permission denied, when the handler + // does not support the given operation. Retry with the token. + client.SetToken(token) + resp2, err2 := client.Logical().ReadWithContext(ctx, path) + if err2 != nil && !isUnsupportedPathOperation(err2) { + t.Fatalf("unexpected failure to read %v while unauthed: %v / %v\nWhile authed: %v / %v", path, err, resp, err2, resp2) + } + client.SetToken("") + } + resp, err = client.Logical().ListWithContext(ctx, path) + if err != nil && isPermDenied(err) { + // List will sometimes return permission denied, when the handler + // does not support the given operation. Retry with the token. + client.SetToken(token) + resp2, err2 := client.Logical().ListWithContext(ctx, path) + if err2 != nil && !isUnsupportedPathOperation(err2) { + t.Fatalf("unexpected failure to list %v while unauthed: %v / %v\nWhile authed: %v / %v", path, err, resp, err2, resp2) + } + client.SetToken("") + } + + // These should all be denied. + resp, err = client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during write on read-only path %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().DeleteWithContext(ctx, path) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during delete on read-only path %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during patch on read-only path %v while unauthed: %v / %v", path, err, resp) + } + + // Retrying with token should allow read/list, but not modification still. + client.SetToken(token) + resp, err = client.Logical().ReadWithContext(ctx, path) + if err != nil && isPermDenied(err) { + t.Fatalf("unexpected failure to read %v while authed: %v / %v", path, err, resp) + } + resp, err = client.Logical().ListWithContext(ctx, path) + if err != nil && isPermDenied(err) { + t.Fatalf("unexpected failure to list %v while authed: %v / %v", path, err, resp) + } + + // Should all be denied. + resp, err = client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during write on read-only path %v while authed: %v / %v", path, err, resp) + } + resp, err = client.Logical().DeleteWithContext(ctx, path) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during delete on read-only path %v while authed: %v / %v", path, err, resp) + } + resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during patch on read-only path %v while authed: %v / %v", path, err, resp) + } +} + +func pathShouldBeUnauthedWriteOnly(t *testing.T, client *api.Client, path string, token string) { + client.SetToken("") + resp, err := client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) + if err != nil && isPermDenied(err) { + t.Fatalf("unexpected failure to write %v while unauthed: %v / %v", path, err, resp) + } + + // These should all be denied. + resp, err = client.Logical().ReadWithContext(ctx, path) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during read on write-only path %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().ListWithContext(ctx, path) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during list on write-only path %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().DeleteWithContext(ctx, path) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during delete on write-only path %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during patch on write-only path %v while unauthed: %v / %v", path, err, resp) + } + + // Retrying with token should allow writing, but nothing else. + client.SetToken(token) + resp, err = client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) + if err != nil && isPermDenied(err) { + t.Fatalf("unexpected failure to write %v while unauthed: %v / %v", path, err, resp) + } + + // These should all be denied. + resp, err = client.Logical().ReadWithContext(ctx, path) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during read on write-only path %v while authed: %v / %v", path, err, resp) + } + resp, err = client.Logical().ListWithContext(ctx, path) + if err == nil || !isDeniedOp(err) { + if resp != nil || err != nil { + t.Fatalf("unexpected failure during list on write-only path %v while authed: %v / %v", path, err, resp) + } + } + resp, err = client.Logical().DeleteWithContext(ctx, path) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during delete on write-only path %v while authed: %v / %v", path, err, resp) + } + resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during patch on write-only path %v while authed: %v / %v", path, err, resp) + } +} + +type pathAuthChecker int + +const ( + shouldBeAuthed pathAuthChecker = iota + shouldBeUnauthedReadList + shouldBeUnauthedWriteOnly +) + +var pathAuthChckerMap = map[pathAuthChecker]pathAuthCheckerFunc{ + shouldBeAuthed: pathShouldBeAuthed, + shouldBeUnauthedReadList: pathShouldBeUnauthedReadList, + shouldBeUnauthedWriteOnly: pathShouldBeUnauthedWriteOnly, +} + +func TestProperAuthing(t *testing.T) { + t.Parallel() + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "ssh": Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + client := cluster.Cores[0].Client + token := client.Token() + + // Mount SSH. + err := client.Sys().MountWithContext(ctx, "ssh", &api.MountInput{ + Type: "ssh", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "60h", + }, + }) + if err != nil { + t.Fatal(err) + } + + // Setup basic configuration. + _, err = client.Logical().WriteWithContext(ctx, "ssh/config/ca", map[string]interface{}{ + "generate_signing_key": true, + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().WriteWithContext(ctx, "ssh/roles/test-ca", map[string]interface{}{ + "key_type": "ca", + "allow_user_certificates": true, + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().WriteWithContext(ctx, "ssh/issue/test-ca", map[string]interface{}{ + "username": "toor", + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().WriteWithContext(ctx, "ssh/roles/test-otp", map[string]interface{}{ + "key_type": "otp", + "default_user": "toor", + "cidr_list": "127.0.0.0/24", + }) + if err != nil { + t.Fatal(err) + } + + resp, err := client.Logical().WriteWithContext(ctx, "ssh/creds/test-otp", map[string]interface{}{ + "username": "toor", + "ip": "127.0.0.1", + }) + if err != nil || resp == nil { + t.Fatal(err) + } + // key := resp.Data["key"].(string) + + paths := map[string]pathAuthChecker{ + "config/ca": shouldBeAuthed, + "config/zeroaddress": shouldBeAuthed, + "creds/test-otp": shouldBeAuthed, + "issue/test-ca": shouldBeAuthed, + "lookup": shouldBeAuthed, + "public_key": shouldBeUnauthedReadList, + "roles/test-ca": shouldBeAuthed, + "roles/test-otp": shouldBeAuthed, + "roles": shouldBeAuthed, + "sign/test-ca": shouldBeAuthed, + "tidy/dynamic-keys": shouldBeAuthed, + "verify": shouldBeUnauthedWriteOnly, + } + for path, checkerType := range paths { + checker := pathAuthChckerMap[checkerType] + checker(t, client, "ssh/"+path, token) + } + + client.SetToken(token) + openAPIResp, err := client.Logical().ReadWithContext(ctx, "sys/internal/specs/openapi") + if err != nil { + t.Fatalf("failed to get openapi data: %v", err) + } + + if len(openAPIResp.Data["paths"].(map[string]interface{})) == 0 { + t.Fatalf("expected to get response from OpenAPI; got empty path list") + } + + validatedPath := false + for openapi_path, raw_data := range openAPIResp.Data["paths"].(map[string]interface{}) { + if !strings.HasPrefix(openapi_path, "/ssh/") { + t.Logf("Skipping path: %v", openapi_path) + continue + } + + t.Logf("Validating path: %v", openapi_path) + validatedPath = true + + // Substitute values in from our testing map. + raw_path := openapi_path[5:] + if strings.Contains(raw_path, "{role}") && strings.Contains(raw_path, "roles/") { + raw_path = strings.ReplaceAll(raw_path, "{role}", "test-ca") + } + if strings.Contains(raw_path, "{role}") && (strings.Contains(raw_path, "sign/") || strings.Contains(raw_path, "issue/")) { + raw_path = strings.ReplaceAll(raw_path, "{role}", "test-ca") + } + if strings.Contains(raw_path, "{role}") && strings.Contains(raw_path, "creds") { + raw_path = strings.ReplaceAll(raw_path, "{role}", "test-otp") + } + + handler, present := paths[raw_path] + if !present { + t.Fatalf("OpenAPI reports SSH mount contains %v->%v but was not tested to be authed or authed.", openapi_path, raw_path) + } + + openapi_data := raw_data.(map[string]interface{}) + hasList := false + rawGetData, hasGet := openapi_data["get"] + if hasGet { + getData := rawGetData.(map[string]interface{}) + getParams, paramsPresent := getData["parameters"].(map[string]interface{}) + if getParams != nil && paramsPresent { + if _, hasList = getParams["list"]; hasList { + // LIST is exclusive from GET on the same endpoint usually. + hasGet = false + } + } + } + _, hasPost := openapi_data["post"] + _, hasDelete := openapi_data["delete"] + + if handler == shouldBeUnauthedReadList { + if hasPost || hasDelete { + t.Fatalf("Unauthed read-only endpoints should not have POST/DELETE capabilities") + } + } + } + + if !validatedPath { + t.Fatalf("Expected to have validated at least one path.") + } +} diff --git a/builtin/logical/ssh/cmd/ssh/main.go b/builtin/logical/ssh/cmd/ssh/main.go new file mode 100644 index 0000000..a9cf8b2 --- /dev/null +++ b/builtin/logical/ssh/cmd/ssh/main.go @@ -0,0 +1,34 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "os" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/ssh" + "github.com/hashicorp/vault/sdk/plugin" +) + +func main() { + apiClientMeta := &api.PluginAPIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(os.Args[1:]) + + tlsConfig := apiClientMeta.GetTLSConfig() + tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) + + if err := plugin.ServeMultiplex(&plugin.ServeOpts{ + BackendFactoryFunc: ssh.Factory, + // set the TLSProviderFunc so that the plugin maintains backwards + // compatibility with Vault versions that don’t support plugin AutoMTLS + TLSProviderFunc: tlsProviderFunc, + }); err != nil { + logger := hclog.New(&hclog.LoggerOptions{}) + + logger.Error("plugin shutting down", "error", err) + os.Exit(1) + } +} diff --git a/builtin/logical/ssh/path_cleanup_dynamic_host_keys.go b/builtin/logical/ssh/path_cleanup_dynamic_host_keys.go new file mode 100644 index 0000000..5ae2afc --- /dev/null +++ b/builtin/logical/ssh/path_cleanup_dynamic_host_keys.go @@ -0,0 +1,50 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ssh + +import ( + "context" + "fmt" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +const keysStoragePrefix = "keys/" + +func pathCleanupKeys(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "tidy/dynamic-keys", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixSSH, + OperationVerb: "tidy", + OperationSuffix: "dynamic-host-keys", + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.DeleteOperation: b.handleCleanupKeys, + }, + HelpSynopsis: `This endpoint removes the stored host keys used for the removed Dynamic Key feature, if present.`, + HelpDescription: `For more information, refer to the API documentation.`, + } +} + +func (b *backend) handleCleanupKeys(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + names, err := req.Storage.List(ctx, keysStoragePrefix) + if err != nil { + return nil, fmt.Errorf("unable to list keys for removal: %w", err) + } + + for index, name := range names { + keyPath := keysStoragePrefix + name + if err := req.Storage.Delete(ctx, keyPath); err != nil { + return nil, fmt.Errorf("unable to delete key %v of %v: %w", index+1, len(names), err) + } + } + + return &logical.Response{ + Data: map[string]interface{}{ + "message": fmt.Sprintf("Removed %v of %v host keys.", len(names), len(names)), + }, + }, nil +} diff --git a/builtin/logical/ssh/path_config_ca.go b/builtin/logical/ssh/path_config_ca.go new file mode 100644 index 0000000..3fa890c --- /dev/null +++ b/builtin/logical/ssh/path_config_ca.go @@ -0,0 +1,409 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ssh + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "io" + + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" + "golang.org/x/crypto/ssh" + + "github.com/mikesmitty/edkey" +) + +const ( + caPublicKey = "ca_public_key" + caPrivateKey = "ca_private_key" + caPublicKeyStoragePath = "config/ca_public_key" + caPublicKeyStoragePathDeprecated = "public_key" + caPrivateKeyStoragePath = "config/ca_private_key" + caPrivateKeyStoragePathDeprecated = "config/ca_bundle" +) + +type keyStorageEntry struct { + Key string `json:"key" structs:"key" mapstructure:"key"` +} + +func pathConfigCA(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "config/ca", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixSSH, + }, + + Fields: map[string]*framework.FieldSchema{ + "private_key": { + Type: framework.TypeString, + Description: `Private half of the SSH key that will be used to sign certificates.`, + }, + "public_key": { + Type: framework.TypeString, + Description: `Public half of the SSH key that will be used to sign certificates.`, + }, + "generate_signing_key": { + Type: framework.TypeBool, + Description: `Generate SSH key pair internally rather than use the private_key and public_key fields.`, + Default: true, + }, + "key_type": { + Type: framework.TypeString, + Description: `Specifies the desired key type when generating; could be a OpenSSH key type identifier (ssh-rsa, ecdsa-sha2-nistp256, ecdsa-sha2-nistp384, ecdsa-sha2-nistp521, or ssh-ed25519) or an algorithm (rsa, ec, ed25519).`, + Default: "ssh-rsa", + }, + "key_bits": { + Type: framework.TypeInt, + Description: `Specifies the desired key bits when generating variable-length keys (such as when key_type="ssh-rsa") or which NIST P-curve to use when key_type="ec" (256, 384, or 521).`, + Default: 0, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigCAUpdate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "ca", + }, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathConfigCADelete, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "ca-configuration", + }, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathConfigCARead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "ca-configuration", + }, + }, + }, + + HelpSynopsis: `Set the SSH private key used for signing certificates.`, + HelpDescription: `This sets the CA information used for certificates generated by this +by this mount. The fields must be in the standard private and public SSH format. + +For security reasons, the private key cannot be retrieved later. + +Read operations will return the public key, if already stored/generated.`, + } +} + +func (b *backend) pathConfigCARead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + publicKeyEntry, err := caKey(ctx, req.Storage, caPublicKey) + if err != nil { + return nil, fmt.Errorf("failed to read CA public key: %w", err) + } + + if publicKeyEntry == nil { + return logical.ErrorResponse("keys haven't been configured yet"), nil + } + + response := &logical.Response{ + Data: map[string]interface{}{ + "public_key": publicKeyEntry.Key, + }, + } + + return response, nil +} + +func (b *backend) pathConfigCADelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + if err := req.Storage.Delete(ctx, caPrivateKeyStoragePath); err != nil { + return nil, err + } + if err := req.Storage.Delete(ctx, caPublicKeyStoragePath); err != nil { + return nil, err + } + return nil, nil +} + +func caKey(ctx context.Context, storage logical.Storage, keyType string) (*keyStorageEntry, error) { + var path, deprecatedPath string + switch keyType { + case caPrivateKey: + path = caPrivateKeyStoragePath + deprecatedPath = caPrivateKeyStoragePathDeprecated + case caPublicKey: + path = caPublicKeyStoragePath + deprecatedPath = caPublicKeyStoragePathDeprecated + default: + return nil, fmt.Errorf("unrecognized key type %q", keyType) + } + + entry, err := storage.Get(ctx, path) + if err != nil { + return nil, fmt.Errorf("failed to read CA key of type %q: %w", keyType, err) + } + + if entry == nil { + // If the entry is not found, look at an older path. If found, upgrade + // it. + entry, err = storage.Get(ctx, deprecatedPath) + if err != nil { + return nil, err + } + if entry != nil { + entry, err = logical.StorageEntryJSON(path, keyStorageEntry{ + Key: string(entry.Value), + }) + if err != nil { + return nil, err + } + if err := storage.Put(ctx, entry); err != nil { + return nil, err + } + if err = storage.Delete(ctx, deprecatedPath); err != nil { + return nil, err + } + } + } + if entry == nil { + return nil, nil + } + + var keyEntry keyStorageEntry + if err := entry.DecodeJSON(&keyEntry); err != nil { + return nil, err + } + + return &keyEntry, nil +} + +func (b *backend) pathConfigCAUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + var err error + publicKey := data.Get("public_key").(string) + privateKey := data.Get("private_key").(string) + + var generateSigningKey bool + + generateSigningKeyRaw, ok := data.GetOk("generate_signing_key") + switch { + // explicitly set true + case ok && generateSigningKeyRaw.(bool): + if publicKey != "" || privateKey != "" { + return logical.ErrorResponse("public_key and private_key must not be set when generate_signing_key is set to true"), nil + } + + generateSigningKey = true + + // explicitly set to false, or not set and we have both a public and private key + case ok, publicKey != "" && privateKey != "": + if publicKey == "" { + return logical.ErrorResponse("missing public_key"), nil + } + + if privateKey == "" { + return logical.ErrorResponse("missing private_key"), nil + } + + _, err := ssh.ParsePrivateKey([]byte(privateKey)) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("Unable to parse private_key as an SSH private key: %v", err)), nil + } + + _, err = parsePublicSSHKey(publicKey) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("Unable to parse public_key as an SSH public key: %v", err)), nil + } + + // not set and no public/private key provided so generate + case publicKey == "" && privateKey == "": + generateSigningKey = true + + // not set, but one or the other supplied + default: + return logical.ErrorResponse("only one of public_key and private_key set; both must be set to use, or both must be blank to auto-generate"), nil + } + + if generateSigningKey { + keyType := data.Get("key_type").(string) + keyBits := data.Get("key_bits").(int) + + publicKey, privateKey, err = generateSSHKeyPair(b.Backend.GetRandomReader(), keyType, keyBits) + if err != nil { + return nil, err + } + } + + if publicKey == "" || privateKey == "" { + return nil, fmt.Errorf("failed to generate or parse the keys") + } + + publicKeyEntry, err := caKey(ctx, req.Storage, caPublicKey) + if err != nil { + return nil, fmt.Errorf("failed to read CA public key: %w", err) + } + + privateKeyEntry, err := caKey(ctx, req.Storage, caPrivateKey) + if err != nil { + return nil, fmt.Errorf("failed to read CA private key: %w", err) + } + + if (publicKeyEntry != nil && publicKeyEntry.Key != "") || (privateKeyEntry != nil && privateKeyEntry.Key != "") { + return logical.ErrorResponse("keys are already configured; delete them before reconfiguring"), nil + } + + entry, err := logical.StorageEntryJSON(caPublicKeyStoragePath, &keyStorageEntry{ + Key: publicKey, + }) + if err != nil { + return nil, err + } + + // Save the public key + err = req.Storage.Put(ctx, entry) + if err != nil { + return nil, err + } + + entry, err = logical.StorageEntryJSON(caPrivateKeyStoragePath, &keyStorageEntry{ + Key: privateKey, + }) + if err != nil { + return nil, err + } + + // Save the private key + err = req.Storage.Put(ctx, entry) + if err != nil { + var mErr *multierror.Error + + mErr = multierror.Append(mErr, fmt.Errorf("failed to store CA private key: %w", err)) + + // If storing private key fails, the corresponding public key should be + // removed + if delErr := req.Storage.Delete(ctx, caPublicKeyStoragePath); delErr != nil { + mErr = multierror.Append(mErr, fmt.Errorf("failed to cleanup CA public key: %w", delErr)) + return nil, mErr + } + + return nil, err + } + + if generateSigningKey { + response := &logical.Response{ + Data: map[string]interface{}{ + "public_key": publicKey, + }, + } + + return response, nil + } + + return nil, nil +} + +func generateSSHKeyPair(randomSource io.Reader, keyType string, keyBits int) (string, string, error) { + if randomSource == nil { + randomSource = rand.Reader + } + + var publicKey crypto.PublicKey + var privateBlock *pem.Block + + switch keyType { + case ssh.KeyAlgoRSA, "rsa": + if keyBits == 0 { + keyBits = 4096 + } + + if keyBits < 2048 { + return "", "", fmt.Errorf("refusing to generate weak %v key: %v bits < 2048 bits", keyType, keyBits) + } + + privateSeed, err := rsa.GenerateKey(randomSource, keyBits) + if err != nil { + return "", "", err + } + + privateBlock = &pem.Block{ + Type: "RSA PRIVATE KEY", + Headers: nil, + Bytes: x509.MarshalPKCS1PrivateKey(privateSeed), + } + + publicKey = privateSeed.Public() + case ssh.KeyAlgoECDSA256, ssh.KeyAlgoECDSA384, ssh.KeyAlgoECDSA521, "ec": + var curve elliptic.Curve + switch keyType { + case ssh.KeyAlgoECDSA256: + curve = elliptic.P256() + case ssh.KeyAlgoECDSA384: + curve = elliptic.P384() + case ssh.KeyAlgoECDSA521: + curve = elliptic.P521() + default: + switch keyBits { + case 0, 256: + curve = elliptic.P256() + case 384: + curve = elliptic.P384() + case 521: + curve = elliptic.P521() + default: + return "", "", fmt.Errorf("unknown ECDSA key pair algorithm and bits: %v / %v", keyType, keyBits) + } + } + + privateSeed, err := ecdsa.GenerateKey(curve, randomSource) + if err != nil { + return "", "", err + } + + marshalled, err := x509.MarshalECPrivateKey(privateSeed) + if err != nil { + return "", "", err + } + + privateBlock = &pem.Block{ + Type: "EC PRIVATE KEY", + Headers: nil, + Bytes: marshalled, + } + + publicKey = privateSeed.Public() + case ssh.KeyAlgoED25519, "ed25519": + _, privateSeed, err := ed25519.GenerateKey(randomSource) + if err != nil { + return "", "", err + } + + marshalled := edkey.MarshalED25519PrivateKey(privateSeed) + if marshalled == nil { + return "", "", errors.New("unable to marshal ed25519 private key") + } + + privateBlock = &pem.Block{ + Type: "OPENSSH PRIVATE KEY", + Headers: nil, + Bytes: marshalled, + } + + publicKey = privateSeed.Public() + default: + return "", "", fmt.Errorf("unknown ssh key pair algorithm: %v", keyType) + } + + public, err := ssh.NewPublicKey(publicKey) + if err != nil { + return "", "", err + } + + return string(ssh.MarshalAuthorizedKey(public)), string(pem.EncodeToMemory(privateBlock)), nil +} diff --git a/builtin/logical/ssh/path_config_ca_test.go b/builtin/logical/ssh/path_config_ca_test.go new file mode 100644 index 0000000..4c33fc8 --- /dev/null +++ b/builtin/logical/ssh/path_config_ca_test.go @@ -0,0 +1,277 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ssh + +import ( + "context" + "strings" + "testing" + + "github.com/hashicorp/vault/sdk/logical" +) + +func TestSSH_ConfigCAStorageUpgrade(t *testing.T) { + var err error + + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + + b, err := Backend(config) + if err != nil { + t.Fatal(err) + } + + err = b.Setup(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + // Store at an older path + err = config.StorageView.Put(context.Background(), &logical.StorageEntry{ + Key: caPrivateKeyStoragePathDeprecated, + Value: []byte(testCAPrivateKey), + }) + if err != nil { + t.Fatal(err) + } + + // Reading it should return the key as well as upgrade the storage path + privateKeyEntry, err := caKey(context.Background(), config.StorageView, caPrivateKey) + if err != nil { + t.Fatal(err) + } + if privateKeyEntry == nil || privateKeyEntry.Key == "" { + t.Fatalf("failed to read the stored private key") + } + + entry, err := config.StorageView.Get(context.Background(), caPrivateKeyStoragePathDeprecated) + if err != nil { + t.Fatal(err) + } + if entry != nil { + t.Fatalf("bad: expected a nil entry after upgrade") + } + + entry, err = config.StorageView.Get(context.Background(), caPrivateKeyStoragePath) + if err != nil { + t.Fatal(err) + } + if entry == nil { + t.Fatalf("bad: expected a non-nil entry after upgrade") + } + + // Store at an older path + err = config.StorageView.Put(context.Background(), &logical.StorageEntry{ + Key: caPublicKeyStoragePathDeprecated, + Value: []byte(testCAPublicKey), + }) + if err != nil { + t.Fatal(err) + } + + // Reading it should return the key as well as upgrade the storage path + publicKeyEntry, err := caKey(context.Background(), config.StorageView, caPublicKey) + if err != nil { + t.Fatal(err) + } + if publicKeyEntry == nil || publicKeyEntry.Key == "" { + t.Fatalf("failed to read the stored public key") + } + + entry, err = config.StorageView.Get(context.Background(), caPublicKeyStoragePathDeprecated) + if err != nil { + t.Fatal(err) + } + if entry != nil { + t.Fatalf("bad: expected a nil entry after upgrade") + } + + entry, err = config.StorageView.Get(context.Background(), caPublicKeyStoragePath) + if err != nil { + t.Fatal(err) + } + if entry == nil { + t.Fatalf("bad: expected a non-nil entry after upgrade") + } +} + +func TestSSH_ConfigCAUpdateDelete(t *testing.T) { + var resp *logical.Response + var err error + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatalf("Cannot create backend: %s", err) + } + + caReq := &logical.Request{ + Path: "config/ca", + Operation: logical.UpdateOperation, + Storage: config.StorageView, + } + + // Auto-generate the keys + resp, err = b.HandleRequest(context.Background(), caReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v, resp:%v", err, resp) + } + + // Fail to overwrite it + resp, err = b.HandleRequest(context.Background(), caReq) + if err != nil { + t.Fatal(err) + } + if !resp.IsError() { + t.Fatalf("expected an error, got %#v", *resp) + } + + caReq.Operation = logical.DeleteOperation + // Delete the configured keys + resp, err = b.HandleRequest(context.Background(), caReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v, resp:%v", err, resp) + } + + caReq.Operation = logical.UpdateOperation + caReq.Data = map[string]interface{}{ + "public_key": testCAPublicKey, + "private_key": testCAPrivateKey, + } + + // Successfully create a new one + resp, err = b.HandleRequest(context.Background(), caReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v, resp:%v", err, resp) + } + + // Fail to overwrite it + resp, err = b.HandleRequest(context.Background(), caReq) + if err != nil { + t.Fatal(err) + } + if !resp.IsError() { + t.Fatalf("expected an error, got %#v", *resp) + } + + caReq.Operation = logical.DeleteOperation + // Delete the configured keys + resp, err = b.HandleRequest(context.Background(), caReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v, resp:%v", err, resp) + } + + caReq.Operation = logical.UpdateOperation + caReq.Data = nil + + // Successfully create a new one + resp, err = b.HandleRequest(context.Background(), caReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v, resp:%v", err, resp) + } + + // Delete the configured keys + caReq.Operation = logical.DeleteOperation + resp, err = b.HandleRequest(context.Background(), caReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v, resp:%v", err, resp) + } +} + +func createDeleteHelper(t *testing.T, b logical.Backend, config *logical.BackendConfig, index int, keyType string, keyBits int) { + // Check that we can create a new key of the specified type + caReq := &logical.Request{ + Path: "config/ca", + Operation: logical.UpdateOperation, + Storage: config.StorageView, + } + caReq.Data = map[string]interface{}{ + "generate_signing_key": true, + "key_type": keyType, + "key_bits": keyBits, + } + resp, err := b.HandleRequest(context.Background(), caReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad case %v: err: %v, resp: %v", index, err, resp) + } + if !strings.Contains(resp.Data["public_key"].(string), caReq.Data["key_type"].(string)) { + t.Fatalf("bad case %v: expected public key of type %v but was %v", index, caReq.Data["key_type"], resp.Data["public_key"]) + } + + issueOptions := map[string]interface{}{ + "public_key": testCAPublicKeyEd25519, + } + issueReq := &logical.Request{ + Path: "sign/ca-issuance", + Operation: logical.UpdateOperation, + Storage: config.StorageView, + Data: issueOptions, + } + resp, err = b.HandleRequest(context.Background(), issueReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad case %v: err: %v, resp: %v", index, err, resp) + } + + // Delete the configured keys + caReq.Operation = logical.DeleteOperation + resp, err = b.HandleRequest(context.Background(), caReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad case %v: err: %v, resp: %v", index, err, resp) + } +} + +func TestSSH_ConfigCAKeyTypes(t *testing.T) { + var err error + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatalf("Cannot create backend: %s", err) + } + + cases := []struct { + keyType string + keyBits int + }{ + {"ssh-rsa", 2048}, + {"ssh-rsa", 4096}, + {"ssh-rsa", 0}, + {"rsa", 2048}, + {"rsa", 4096}, + {"ecdsa-sha2-nistp256", 0}, + {"ecdsa-sha2-nistp384", 0}, + {"ecdsa-sha2-nistp521", 0}, + {"ec", 256}, + {"ec", 384}, + {"ec", 521}, + {"ec", 0}, + {"ssh-ed25519", 0}, + {"ed25519", 0}, + } + + // Create a role for ssh signing. + roleOptions := map[string]interface{}{ + "allow_user_certificates": true, + "allowed_users": "*", + "key_type": "ca", + "ttl": "30s", + "not_before_duration": "2h", + } + roleReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/ca-issuance", + Data: roleOptions, + Storage: config.StorageView, + } + _, err = b.HandleRequest(context.Background(), roleReq) + if err != nil { + t.Fatalf("Cannot create role to issue against: %s", err) + } + + for index, scenario := range cases { + createDeleteHelper(t, b, config, index, scenario.keyType, scenario.keyBits) + } +} diff --git a/builtin/logical/ssh/path_config_zeroaddress.go b/builtin/logical/ssh/path_config_zeroaddress.go new file mode 100644 index 0000000..773e9b3 --- /dev/null +++ b/builtin/logical/ssh/path_config_zeroaddress.go @@ -0,0 +1,176 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ssh + +import ( + "context" + "fmt" + + "github.com/hashicorp/go-secure-stdlib/strutil" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +// Structure to hold roles that are allowed to accept any IP address. +type zeroAddressRoles struct { + Roles []string `json:"roles" mapstructure:"roles"` +} + +func pathConfigZeroAddress(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "config/zeroaddress", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixSSH, + }, + + Fields: map[string]*framework.FieldSchema{ + "roles": { + Type: framework.TypeCommaStringSlice, + Description: `[Required] Comma separated list of role names which + allows credentials to be requested for any IP address. CIDR blocks + previously registered under these roles will be ignored.`, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigZeroAddressWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "zero-address", + }, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathConfigZeroAddressRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "zero-address-configuration", + }, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathConfigZeroAddressDelete, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "zero-address-configuration", + }, + }, + }, + HelpSynopsis: pathConfigZeroAddressSyn, + HelpDescription: pathConfigZeroAddressDesc, + } +} + +func (b *backend) pathConfigZeroAddressDelete(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + err := req.Storage.Delete(ctx, "config/zeroaddress") + if err != nil { + return nil, err + } + return nil, nil +} + +func (b *backend) pathConfigZeroAddressRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + entry, err := b.getZeroAddressRoles(ctx, req.Storage) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + return &logical.Response{ + Data: map[string]interface{}{ + "roles": entry.Roles, + }, + }, nil +} + +func (b *backend) pathConfigZeroAddressWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + roles := d.Get("roles").([]string) + if len(roles) == 0 { + return logical.ErrorResponse("Missing roles"), nil + } + + // Check if the roles listed actually exist in the backend + for _, item := range roles { + role, err := b.getRole(ctx, req.Storage, item) + if err != nil { + return nil, err + } + if role == nil { + return logical.ErrorResponse(fmt.Sprintf("Role %q does not exist", item)), nil + } + } + + err := b.putZeroAddressRoles(ctx, req.Storage, roles) + if err != nil { + return nil, err + } + + return nil, nil +} + +// Stores the given list of roles at zeroaddress endpoint +func (b *backend) putZeroAddressRoles(ctx context.Context, s logical.Storage, roles []string) error { + entry, err := logical.StorageEntryJSON("config/zeroaddress", &zeroAddressRoles{ + Roles: roles, + }) + if err != nil { + return err + } + if err := s.Put(ctx, entry); err != nil { + return err + } + return nil +} + +// Retrieves the list of roles from the zeroaddress endpoint. +func (b *backend) getZeroAddressRoles(ctx context.Context, s logical.Storage) (*zeroAddressRoles, error) { + entry, err := s.Get(ctx, "config/zeroaddress") + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result zeroAddressRoles + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + + return &result, nil +} + +// Removes a role from the list of roles present in config/zeroaddress path +func (b *backend) removeZeroAddressRole(ctx context.Context, s logical.Storage, roleName string) error { + zeroAddressEntry, err := b.getZeroAddressRoles(ctx, s) + if err != nil { + return err + } + if zeroAddressEntry == nil { + return nil + } + + zeroAddressEntry.Roles = strutil.StrListDelete(zeroAddressEntry.Roles, roleName) + + return b.putZeroAddressRoles(ctx, s, zeroAddressEntry.Roles) +} + +const pathConfigZeroAddressSyn = ` +Assign zero address as default CIDR block for select roles. +` + +const pathConfigZeroAddressDesc = ` +Administrator can choose to make a select few registered roles to accept any IP +address, overriding the CIDR blocks registered during creation of roles. This +doesn't mean that the credentials are created for any IP address. Clients who +have access to these roles are trusted to make valid requests. Access to these +roles should be controlled using Vault policies. It is recommended that all the +roles that are allowed to accept any IP address should have an explicit policy +of deny for unintended clients. + +This is a root authenticated endpoint. If backend is mounted at 'ssh' then use +the endpoint 'ssh/config/zeroaddress' to provide the list of allowed roles. +After mounting the backend, use 'path-help' for additional information. +` diff --git a/builtin/logical/ssh/path_creds_create.go b/builtin/logical/ssh/path_creds_create.go new file mode 100644 index 0000000..b95c1f3 --- /dev/null +++ b/builtin/logical/ssh/path_creds_create.go @@ -0,0 +1,281 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ssh + +import ( + "context" + "fmt" + "net" + "strings" + + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +type sshOTP struct { + Username string `json:"username" structs:"username" mapstructure:"username"` + IP string `json:"ip" structs:"ip" mapstructure:"ip"` + RoleName string `json:"role_name" structs:"role_name" mapstructure:"role_name"` +} + +func pathCredsCreate(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "creds/" + framework.GenericNameWithAtRegex("role"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixSSH, + OperationVerb: "generate", + OperationSuffix: "credentials", + }, + + Fields: map[string]*framework.FieldSchema{ + "role": { + Type: framework.TypeString, + Description: "[Required] Name of the role", + }, + "username": { + Type: framework.TypeString, + Description: "[Optional] Username in remote host", + }, + "ip": { + Type: framework.TypeString, + Description: "[Required] IP of the remote host", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathCredsCreateWrite, + }, + + HelpSynopsis: pathCredsCreateHelpSyn, + HelpDescription: pathCredsCreateHelpDesc, + } +} + +func (b *backend) pathCredsCreateWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + roleName := d.Get("role").(string) + if roleName == "" { + return logical.ErrorResponse("Missing role"), nil + } + + ipRaw := d.Get("ip").(string) + if ipRaw == "" { + return logical.ErrorResponse("Missing ip"), nil + } + + role, err := b.getRole(ctx, req.Storage, roleName) + if err != nil { + return nil, fmt.Errorf("error retrieving role: %w", err) + } + if role == nil { + return logical.ErrorResponse(fmt.Sprintf("Role %q not found", roleName)), nil + } + + // username is an optional parameter. + username := d.Get("username").(string) + + // Set the default username + if username == "" { + if role.DefaultUser == "" { + return logical.ErrorResponse("No default username registered. Use 'username' option"), nil + } + username = role.DefaultUser + } + + if role.AllowedUsers != "" { + // Check if the username is present in allowed users list. + err := validateUsername(username, role.AllowedUsers) + + // If username is not present in allowed users list, check if it + // is the default username in the role. If neither is true, then + // that username is not allowed to generate a credential. + if err != nil && username != role.DefaultUser { + return logical.ErrorResponse("Username is not present is allowed users list"), nil + } + } else if username != role.DefaultUser { + return logical.ErrorResponse("Username has to be either in allowed users list or has to be a default username"), nil + } + + // Validate the IP address + ipAddr := net.ParseIP(ipRaw) + if ipAddr == nil { + return logical.ErrorResponse(fmt.Sprintf("Invalid IP %q", ipRaw)), nil + } + + // Check if the IP belongs to the registered list of CIDR blocks under the role + ip := ipAddr.String() + + zeroAddressEntry, err := b.getZeroAddressRoles(ctx, req.Storage) + if err != nil { + return nil, fmt.Errorf("error retrieving zero-address roles: %w", err) + } + var zeroAddressRoles []string + if zeroAddressEntry != nil { + zeroAddressRoles = zeroAddressEntry.Roles + } + + err = validateIP(ip, roleName, role.CIDRList, role.ExcludeCIDRList, zeroAddressRoles) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("Error validating IP: %v", err)), nil + } + + var result *logical.Response + if role.KeyType == KeyTypeOTP { + // Generate an OTP + otp, err := b.GenerateOTPCredential(ctx, req, &sshOTP{ + Username: username, + IP: ip, + RoleName: roleName, + }) + if err != nil { + return nil, err + } + + // Return the information relevant to user of OTP type and save + // the data required for later use in the internal section of secret. + // In this case, saving just the OTP is sufficient since there is + // no need to establish connection with the remote host. + result = b.Secret(SecretOTPType).Response(map[string]interface{}{ + "key_type": role.KeyType, + "key": otp, + "username": username, + "ip": ip, + "port": role.Port, + }, map[string]interface{}{ + "otp": otp, + }) + } else if role.KeyType == KeyTypeDynamic { + return nil, fmt.Errorf("dynamic key types have been removed") + } else { + return nil, fmt.Errorf("key type unknown") + } + + return result, nil +} + +// Generates a UUID OTP and its salted value based on the salt of the backend. +func (b *backend) GenerateSaltedOTP(ctx context.Context) (string, string, error) { + str, err := uuid.GenerateUUID() + if err != nil { + return "", "", err + } + salt, err := b.Salt(ctx) + if err != nil { + return "", "", err + } + + return str, salt.SaltID(str), nil +} + +// Generates an UUID OTP and creates an entry for the same in storage backend with its salted string. +func (b *backend) GenerateOTPCredential(ctx context.Context, req *logical.Request, sshOTPEntry *sshOTP) (string, error) { + otp, otpSalted, err := b.GenerateSaltedOTP(ctx) + if err != nil { + return "", err + } + + // Check if there is an entry already created for the newly generated OTP. + entry, err := b.getOTP(ctx, req.Storage, otpSalted) + + // If entry already exists for the OTP, make sure that new OTP is not + // replacing an existing one by recreating new ones until an unused + // OTP is generated. It is very unlikely that this is the case and this + // code is just for safety. + for err == nil && entry != nil { + otp, otpSalted, err = b.GenerateSaltedOTP(ctx) + if err != nil { + return "", err + } + entry, err = b.getOTP(ctx, req.Storage, otpSalted) + if err != nil { + return "", err + } + } + + // Store an entry for the salt of OTP. + newEntry, err := logical.StorageEntryJSON("otp/"+otpSalted, sshOTPEntry) + if err != nil { + return "", err + } + if err := req.Storage.Put(ctx, newEntry); err != nil { + return "", err + } + return otp, nil +} + +// ValidateIP first checks if the role belongs to the list of privileged +// roles that could allow any IP address and if there is a match, IP is +// accepted immediately. If not, IP is searched in the allowed CIDR blocks +// registered with the role. If there is a match, then it is searched in the +// excluded CIDR blocks and if IP is found there as well, an error is returned. +// IP is valid only if it is encompassed by allowed CIDR blocks and not by +// excluded CIDR blocks. +func validateIP(ip, roleName, cidrList, excludeCidrList string, zeroAddressRoles []string) error { + // Search IP in the zero-address list + for _, role := range zeroAddressRoles { + if roleName == role { + return nil + } + } + + // Search IP in allowed CIDR blocks + ipMatched, err := cidrListContainsIP(ip, cidrList) + if err != nil { + return err + } + if !ipMatched { + return fmt.Errorf("IP does not belong to role") + } + + if len(excludeCidrList) == 0 { + return nil + } + + // Search IP in exclude list + ipMatched, err = cidrListContainsIP(ip, excludeCidrList) + if err != nil { + return err + } + if ipMatched { + return fmt.Errorf("IP does not belong to role") + } + + return nil +} + +// Checks if the username supplied by the user is present in the list of +// allowed users registered which creation of role. +func validateUsername(username, allowedUsers string) error { + if allowedUsers == "" { + return fmt.Errorf("username not in allowed users list") + } + + // Role was explicitly configured to allow any username. + if allowedUsers == "*" { + return nil + } + + userList := strings.Split(allowedUsers, ",") + for _, user := range userList { + if strings.TrimSpace(user) == username { + return nil + } + } + + return fmt.Errorf("username not in allowed users list") +} + +const pathCredsCreateHelpSyn = ` +Creates a credential for establishing SSH connection with the remote host. +` + +const pathCredsCreateHelpDesc = ` +This path will generate a new key for establishing SSH session with +target host. The key can be a One Time Password (OTP) using 'key_type' +being 'otp'. + +Keys will have a lease associated with them. The access keys can be +revoked by using the lease ID. +` diff --git a/builtin/logical/ssh/path_fetch.go b/builtin/logical/ssh/path_fetch.go new file mode 100644 index 0000000..3a1fa5f --- /dev/null +++ b/builtin/logical/ssh/path_fetch.go @@ -0,0 +1,49 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ssh + +import ( + "context" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathFetchPublicKey(b *backend) *framework.Path { + return &framework.Path{ + Pattern: `public_key`, + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixSSH, + OperationSuffix: "public-key", + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathFetchPublicKey, + }, + + HelpSynopsis: `Retrieve the public key.`, + HelpDescription: `This allows the public key of the SSH CA certificate that this backend has been configured with to be fetched. This is a raw response endpoint without JSON encoding; use -format=raw or an external tool (e.g., curl) to fetch this value.`, + } +} + +func (b *backend) pathFetchPublicKey(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + publicKeyEntry, err := caKey(ctx, req.Storage, caPublicKey) + if err != nil { + return nil, err + } + if publicKeyEntry == nil || publicKeyEntry.Key == "" { + return nil, nil + } + + response := &logical.Response{ + Data: map[string]interface{}{ + logical.HTTPContentType: "text/plain", + logical.HTTPRawBody: []byte(publicKeyEntry.Key), + logical.HTTPStatusCode: 200, + }, + } + + return response, nil +} diff --git a/builtin/logical/ssh/path_issue.go b/builtin/logical/ssh/path_issue.go new file mode 100644 index 0000000..b50e03e --- /dev/null +++ b/builtin/logical/ssh/path_issue.go @@ -0,0 +1,192 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ssh + +import ( + "context" + "crypto/rand" + "errors" + "fmt" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +type keySpecs struct { + Type string + Bits int +} + +func pathIssue(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "issue/" + framework.GenericNameWithAtRegex("role"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixSSH, + OperationVerb: "issue", + OperationSuffix: "certificate", + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathIssue, + }, + }, + Fields: map[string]*framework.FieldSchema{ + "role": { + Type: framework.TypeString, + Description: `The desired role with configuration for this request.`, + }, + "key_type": { + Type: framework.TypeString, + Description: "Specifies the desired key type; must be `rsa`, `ed25519` or `ec`", + Default: "rsa", + }, + "key_bits": { + Type: framework.TypeInt, + Description: "Specifies the number of bits to use for the generated keys.", + Default: 0, + }, + "ttl": { + Type: framework.TypeDurationSecond, + Description: `The requested Time To Live for the SSH certificate; +sets the expiration date. If not specified +the role default, backend default, or system +default TTL is used, in that order. Cannot +be later than the role max TTL.`, + }, + "valid_principals": { + Type: framework.TypeString, + Description: `Valid principals, either usernames or hostnames, that the certificate should be signed for.`, + }, + "cert_type": { + Type: framework.TypeString, + Description: `Type of certificate to be created; either "user" or "host".`, + Default: "user", + }, + "key_id": { + Type: framework.TypeString, + Description: `Key id that the created certificate should have. If not specified, the display name of the token will be used.`, + }, + "critical_options": { + Type: framework.TypeMap, + Description: `Critical options that the certificate should be signed for.`, + }, + "extensions": { + Type: framework.TypeMap, + Description: `Extensions that the certificate should be signed for.`, + }, + }, + HelpSynopsis: pathIssueHelpSyn, + HelpDescription: pathIssueHelpDesc, + } +} + +func (b *backend) pathIssue(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // Get the role + roleName := data.Get("role").(string) + role, err := b.getRole(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil { + return logical.ErrorResponse(fmt.Sprintf("unknown role: %s", roleName)), nil + } + + if role.KeyType != "ca" { + return logical.ErrorResponse("role key type '%s' not allowed to issue key pairs", role.KeyType), nil + } + + // Validate and extract key specifications + keySpecs, err := extractKeySpecs(role, data) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + // Issue certificate + return b.pathIssueCertificate(ctx, req, data, role, keySpecs) +} + +func (b *backend) pathIssueCertificate(ctx context.Context, req *logical.Request, data *framework.FieldData, role *sshRole, keySpecs *keySpecs) (*logical.Response, error) { + publicKey, privateKey, err := generateSSHKeyPair(rand.Reader, keySpecs.Type, keySpecs.Bits) + if err != nil { + return nil, err + } + + // Sign key + userPublicKey, err := parsePublicSSHKey(publicKey) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("failed to parse public_key as SSH key: %s", err)), nil + } + + response, err := b.pathSignIssueCertificateHelper(ctx, req, data, role, userPublicKey) + if err != nil { + return nil, err + } + if response.IsError() { + return response, nil + } + + // Additional to sign response + response.Data["private_key"] = privateKey + response.Data["private_key_type"] = keySpecs.Type + + return response, nil +} + +func extractKeySpecs(role *sshRole, data *framework.FieldData) (*keySpecs, error) { + keyType := data.Get("key_type").(string) + keyBits := data.Get("key_bits").(int) + keySpecs := keySpecs{ + Type: keyType, + Bits: keyBits, + } + + keyTypeToMapKey := createKeyTypeToMapKey(keyType, keyBits) + + if len(role.AllowedUserKeyTypesLengths) != 0 { + var keyAllowed bool + var bitsAllowed bool + + keyTypeAliasesLoop: + for _, keyTypeAlias := range keyTypeToMapKey[keyType] { + allowedValues, allowed := role.AllowedUserKeyTypesLengths[keyTypeAlias] + if !allowed { + continue + } + keyAllowed = true + + for _, value := range allowedValues { + if value == keyBits { + bitsAllowed = true + break keyTypeAliasesLoop + } + } + } + + if !keyAllowed { + return nil, errors.New("provided key_type value not in allowed_user_key_types") + } + + if !bitsAllowed { + return nil, errors.New("provided key_bits value not in list of role's allowed_user_key_types") + } + } + + return &keySpecs, nil +} + +const pathIssueHelpSyn = ` +Request a certificate using a certain role with the provided details. +` + +const pathIssueHelpDesc = ` +This path allows requesting a certificate to be issued according to the +policy of the given role. The certificate will only be issued if the +requested details are allowed by the role policy. + +This path returns a certificate and a private key. If you want a workflow +that does not expose a private key, generate a CSR locally and use the +sign path instead. +` diff --git a/builtin/logical/ssh/path_issue_sign.go b/builtin/logical/ssh/path_issue_sign.go new file mode 100644 index 0000000..c4e68e4 --- /dev/null +++ b/builtin/logical/ssh/path_issue_sign.go @@ -0,0 +1,561 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ssh + +import ( + "context" + "crypto/dsa" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "errors" + "fmt" + "io" + "regexp" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/hashicorp/vault/sdk/logical" + "golang.org/x/crypto/ssh" +) + +var containsTemplateRegex = regexp.MustCompile(`{{.+?}}`) + +var ecCurveBitsToAlgoName = map[int]string{ + 256: ssh.KeyAlgoECDSA256, + 384: ssh.KeyAlgoECDSA384, + 521: ssh.KeyAlgoECDSA521, +} + +// If the algorithm is not found, it could be that we have a curve +// that we haven't added a constant for yet. But they could allow it +// (assuming x/crypto/ssh can parse it) via setting a ec: +// mapping rather than using a named SSH key type, so erring out here +// isn't advisable. + +type creationBundle struct { + KeyID string + ValidPrincipals []string + PublicKey ssh.PublicKey + CertificateType uint32 + TTL time.Duration + Signer ssh.Signer + Role *sshRole + CriticalOptions map[string]string + Extensions map[string]string +} + +func (b *backend) pathSignIssueCertificateHelper(ctx context.Context, req *logical.Request, data *framework.FieldData, role *sshRole, publicKey ssh.PublicKey) (*logical.Response, error) { + // Note that these various functions always return "user errors" so we pass + // them as 4xx values + keyID, err := b.calculateKeyID(data, req, role, publicKey) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + certificateType, err := b.calculateCertificateType(data, role) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + var parsedPrincipals []string + if certificateType == ssh.HostCert { + parsedPrincipals, err = b.calculateValidPrincipals(data, req, role, "", role.AllowedDomains, role.AllowedDomainsTemplate, validateValidPrincipalForHosts(role)) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + } else { + defaultPrincipal := role.DefaultUser + if role.DefaultUserTemplate { + defaultPrincipal, err = b.renderPrincipal(role.DefaultUser, req) + if err != nil { + return nil, err + } + } + parsedPrincipals, err = b.calculateValidPrincipals(data, req, role, defaultPrincipal, role.AllowedUsers, role.AllowedUsersTemplate, strutil.StrListContains) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + } + + ttl, err := b.calculateTTL(data, role) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + criticalOptions, err := b.calculateCriticalOptions(data, role) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + extensions, addExtTemplatingWarning, err := b.calculateExtensions(data, req, role) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + + privateKeyEntry, err := caKey(ctx, req.Storage, caPrivateKey) + if err != nil { + return nil, fmt.Errorf("failed to read CA private key: %w", err) + } + if privateKeyEntry == nil || privateKeyEntry.Key == "" { + return nil, errors.New("failed to read CA private key") + } + + signer, err := ssh.ParsePrivateKey([]byte(privateKeyEntry.Key)) + if err != nil { + return nil, fmt.Errorf("failed to parse stored CA private key: %w", err) + } + + cBundle := creationBundle{ + KeyID: keyID, + PublicKey: publicKey, + Signer: signer, + ValidPrincipals: parsedPrincipals, + TTL: ttl, + CertificateType: certificateType, + Role: role, + CriticalOptions: criticalOptions, + Extensions: extensions, + } + + certificate, err := cBundle.sign() + if err != nil { + return nil, err + } + + signedSSHCertificate := ssh.MarshalAuthorizedKey(certificate) + if len(signedSSHCertificate) == 0 { + return nil, errors.New("error marshaling signed certificate") + } + + response := &logical.Response{ + Data: map[string]interface{}{ + "serial_number": strconv.FormatUint(certificate.Serial, 16), + "signed_key": string(signedSSHCertificate), + }, + } + + if addExtTemplatingWarning { + response.AddWarning("default_extension templating enabled with at least one extension requiring identity templating. However, this request lacked identity entity information, causing one or more extensions to be skipped from the generated certificate.") + } + + return response, nil +} + +func (b *backend) renderPrincipal(principal string, req *logical.Request) (string, error) { + // Look for templating markers {{ .* }} + matched := containsTemplateRegex.MatchString(principal) + if matched { + if req.EntityID != "" { + // Retrieve principal based on template + entityID from request. + renderedPrincipal, err := framework.PopulateIdentityTemplate(principal, req.EntityID, b.System()) + if err != nil { + return "", fmt.Errorf("template '%s' could not be rendered -> %s", principal, err) + } + return renderedPrincipal, nil + } + } + // Static principal + return principal, nil +} + +func (b *backend) calculateValidPrincipals(data *framework.FieldData, req *logical.Request, role *sshRole, defaultPrincipal, principalsAllowedByRole string, enableTemplating bool, validatePrincipal func([]string, string) bool) ([]string, error) { + validPrincipals := "" + validPrincipalsRaw, ok := data.GetOk("valid_principals") + if ok { + validPrincipals = validPrincipalsRaw.(string) + } else { + validPrincipals = defaultPrincipal + } + + parsedPrincipals := strutil.RemoveDuplicates(strutil.ParseStringSlice(validPrincipals, ","), false) + // Build list of allowed Principals from template and static principalsAllowedByRole + var allowedPrincipals []string + if enableTemplating { + rendered, err := b.renderPrincipal(principalsAllowedByRole, req) + if err != nil { + return nil, err + } + allowedPrincipals = strutil.RemoveDuplicates(strutil.ParseStringSlice(rendered, ","), false) + } else { + allowedPrincipals = strutil.RemoveDuplicates(strutil.ParseStringSlice(principalsAllowedByRole, ","), false) + } + + switch { + case len(parsedPrincipals) == 0: + // There is nothing to process + return nil, nil + case len(allowedPrincipals) == 0: + // User has requested principals to be set, but role is not configured + // with any principals + return nil, fmt.Errorf("role is not configured to allow any principals") + default: + // Role was explicitly configured to allow any principal. + if principalsAllowedByRole == "*" { + return parsedPrincipals, nil + } + + for _, principal := range parsedPrincipals { + if !validatePrincipal(strutil.RemoveDuplicates(allowedPrincipals, false), principal) { + return nil, fmt.Errorf("%v is not a valid value for valid_principals", principal) + } + } + return parsedPrincipals, nil + } +} + +func validateValidPrincipalForHosts(role *sshRole) func([]string, string) bool { + return func(allowedPrincipals []string, validPrincipal string) bool { + for _, allowedPrincipal := range allowedPrincipals { + if allowedPrincipal == validPrincipal && role.AllowBareDomains { + return true + } + if role.AllowSubdomains && strings.HasSuffix(validPrincipal, "."+allowedPrincipal) { + return true + } + } + + return false + } +} + +func (b *backend) calculateCertificateType(data *framework.FieldData, role *sshRole) (uint32, error) { + requestedCertificateType := data.Get("cert_type").(string) + + var certificateType uint32 + switch requestedCertificateType { + case "user": + if !role.AllowUserCertificates { + return 0, errors.New("cert_type 'user' is not allowed by role") + } + certificateType = ssh.UserCert + case "host": + if !role.AllowHostCertificates { + return 0, errors.New("cert_type 'host' is not allowed by role") + } + certificateType = ssh.HostCert + default: + return 0, errors.New("cert_type must be either 'user' or 'host'") + } + + return certificateType, nil +} + +func (b *backend) calculateKeyID(data *framework.FieldData, req *logical.Request, role *sshRole, pubKey ssh.PublicKey) (string, error) { + reqID := data.Get("key_id").(string) + + if reqID != "" { + if !role.AllowUserKeyIDs { + return "", fmt.Errorf("setting key_id is not allowed by role") + } + return reqID, nil + } + + keyIDFormat := "vault-{{token_display_name}}-{{public_key_hash}}" + if req.DisplayName == "" { + keyIDFormat = "vault-{{public_key_hash}}" + } + + if role.KeyIDFormat != "" { + keyIDFormat = role.KeyIDFormat + } + + keyID := substQuery(keyIDFormat, map[string]string{ + "token_display_name": req.DisplayName, + "role_name": data.Get("role").(string), + "public_key_hash": fmt.Sprintf("%x", sha256.Sum256(pubKey.Marshal())), + }) + + return keyID, nil +} + +func (b *backend) calculateCriticalOptions(data *framework.FieldData, role *sshRole) (map[string]string, error) { + unparsedCriticalOptions := data.Get("critical_options").(map[string]interface{}) + if len(unparsedCriticalOptions) == 0 { + return role.DefaultCriticalOptions, nil + } + + criticalOptions := convertMapToStringValue(unparsedCriticalOptions) + + if role.AllowedCriticalOptions != "" { + notAllowedOptions := []string{} + allowedCriticalOptions := strings.Split(role.AllowedCriticalOptions, ",") + + for option := range criticalOptions { + if !strutil.StrListContains(allowedCriticalOptions, option) { + notAllowedOptions = append(notAllowedOptions, option) + } + } + + if len(notAllowedOptions) != 0 { + return nil, fmt.Errorf("critical options not on allowed list: %v", notAllowedOptions) + } + } + + return criticalOptions, nil +} + +func (b *backend) calculateExtensions(data *framework.FieldData, req *logical.Request, role *sshRole) (map[string]string, bool, error) { + unparsedExtensions := data.Get("extensions").(map[string]interface{}) + extensions := make(map[string]string) + + if len(unparsedExtensions) > 0 { + extensions := convertMapToStringValue(unparsedExtensions) + if role.AllowedExtensions == "*" { + // Allowed extensions was configured to allow all + return extensions, false, nil + } + + notAllowed := []string{} + allowedExtensions := strings.Split(role.AllowedExtensions, ",") + for extensionKey := range extensions { + if !strutil.StrListContains(allowedExtensions, extensionKey) { + notAllowed = append(notAllowed, extensionKey) + } + } + + if len(notAllowed) != 0 { + return nil, false, fmt.Errorf("extensions %v are not on allowed list", notAllowed) + } + return extensions, false, nil + } + + haveMissingEntityInfoWithTemplatedExt := false + + if role.DefaultExtensionsTemplate { + for extensionKey, extensionValue := range role.DefaultExtensions { + // Look for templating markers {{ .* }} + matched := containsTemplateRegex.MatchString(extensionValue) + if matched { + if req.EntityID != "" { + // Retrieve extension value based on template + entityID from request. + templateExtensionValue, err := framework.PopulateIdentityTemplate(extensionValue, req.EntityID, b.System()) + if err == nil { + // Template returned an extension value that we can use + extensions[extensionKey] = templateExtensionValue + } else { + return nil, false, fmt.Errorf("template '%s' could not be rendered -> %s", extensionValue, err) + } + } else { + haveMissingEntityInfoWithTemplatedExt = true + } + } else { + // Static extension value or err template + extensions[extensionKey] = extensionValue + } + } + } else { + extensions = role.DefaultExtensions + } + + return extensions, haveMissingEntityInfoWithTemplatedExt, nil +} + +func (b *backend) calculateTTL(data *framework.FieldData, role *sshRole) (time.Duration, error) { + var ttl, maxTTL time.Duration + var err error + + ttlRaw, specifiedTTL := data.GetOk("ttl") + if specifiedTTL { + ttl = time.Duration(ttlRaw.(int)) * time.Second + } else { + ttl, err = parseutil.ParseDurationSecond(role.TTL) + if err != nil { + return 0, err + } + } + if ttl == 0 { + ttl = b.System().DefaultLeaseTTL() + } + + maxTTL, err = parseutil.ParseDurationSecond(role.MaxTTL) + if err != nil { + return 0, err + } + if maxTTL == 0 { + maxTTL = b.System().MaxLeaseTTL() + } + + if ttl > maxTTL { + // Don't error if they were using system defaults, only error if + // they specifically chose a bad TTL + if !specifiedTTL { + ttl = maxTTL + } else { + return 0, fmt.Errorf("ttl is larger than maximum allowed %d", maxTTL/time.Second) + } + } + + return ttl, nil +} + +func (b *backend) validateSignedKeyRequirements(publickey ssh.PublicKey, role *sshRole) error { + if len(role.AllowedUserKeyTypesLengths) != 0 { + var keyType string + var keyBits int + + switch k := publickey.(type) { + case ssh.CryptoPublicKey: + ff := k.CryptoPublicKey() + switch k := ff.(type) { + case *rsa.PublicKey: + keyType = "rsa" + keyBits = k.N.BitLen() + case *dsa.PublicKey: + keyType = "dsa" + keyBits = k.Parameters.P.BitLen() + case *ecdsa.PublicKey: + keyType = "ecdsa" + keyBits = k.Curve.Params().BitSize + case ed25519.PublicKey: + keyType = "ed25519" + default: + return fmt.Errorf("public key type of %s is not allowed", keyType) + } + default: + return fmt.Errorf("pubkey not suitable for crypto (expected ssh.CryptoPublicKey but found %T)", k) + } + + keyTypeToMapKey := createKeyTypeToMapKey(keyType, keyBits) + + var present bool + var pass bool + for _, kstr := range keyTypeToMapKey[keyType] { + allowed_values, ok := role.AllowedUserKeyTypesLengths[kstr] + if !ok { + continue + } + + present = true + + for _, value := range allowed_values { + if keyType == "rsa" || keyType == "dsa" { + // Regardless of map naming, we always need to validate the + // bit length of RSA and DSA keys. Use the keyType flag to + if keyBits == value { + pass = true + } + } else if kstr == "ec" || kstr == "ecdsa" { + // If the map string is "ecdsa", we have to validate the keyBits + // are a match for an allowed value, meaning that our curve + // is allowed. This isn't necessary when a named curve (e.g. + // ssh.KeyAlgoECDSA256) is allowed (and hence kstr is that), + // because keyBits is already specified in the kstr. Thus, + // we have conditioned around kstr and not keyType (like with + // rsa or dsa). + if keyBits == value { + pass = true + } + } else { + // We get here in two cases: we have a algo-named EC key + // matching a format specifier in the key map (e.g., a P-256 + // key with a KeyAlgoECDSA256 entry in the map) or we have a + // ed25519 key (which is always allowed). + pass = true + } + } + } + + if !present { + return fmt.Errorf("key of type %s is not allowed", keyType) + } + + if !pass { + return fmt.Errorf("key is of an invalid size: %v", keyBits) + } + } + return nil +} + +func (b *creationBundle) sign() (retCert *ssh.Certificate, retErr error) { + defer func() { + if r := recover(); r != nil { + errMsg, ok := r.(string) + if ok { + retCert = nil + retErr = errors.New(errMsg) + } + } + }() + + serialNumber, err := certutil.GenerateSerialNumber() + if err != nil { + return nil, err + } + + now := time.Now() + + sshAlgorithmSigner, ok := b.Signer.(ssh.AlgorithmSigner) + if !ok { + return nil, fmt.Errorf("failed to generate signed SSH key: signer is not an AlgorithmSigner") + } + + // prepare certificate for signing + nonce := make([]byte, 32) + if _, err := io.ReadFull(rand.Reader, nonce); err != nil { + return nil, fmt.Errorf("failed to generate signed SSH key: error generating random nonce: %w", err) + } + certificate := &ssh.Certificate{ + Serial: serialNumber.Uint64(), + Key: b.PublicKey, + KeyId: b.KeyID, + ValidPrincipals: b.ValidPrincipals, + ValidAfter: uint64(now.Add(-b.Role.NotBeforeDuration).In(time.UTC).Unix()), + ValidBefore: uint64(now.Add(b.TTL).In(time.UTC).Unix()), + CertType: b.CertificateType, + Permissions: ssh.Permissions{ + CriticalOptions: b.CriticalOptions, + Extensions: b.Extensions, + }, + Nonce: nonce, + SignatureKey: sshAlgorithmSigner.PublicKey(), + } + + // get bytes to sign; this is based on Certificate.bytesForSigning() from the go ssh lib + out := certificate.Marshal() + // Drop trailing signature length. + certificateBytes := out[:len(out)-4] + + algo := b.Role.AlgorithmSigner + + // Handle the new default algorithm selection process correctly. + if algo == DefaultAlgorithmSigner && sshAlgorithmSigner.PublicKey().Type() == ssh.KeyAlgoRSA { + algo = ssh.SigAlgoRSASHA2256 + } else if algo == DefaultAlgorithmSigner { + algo = "" + } + + sig, err := sshAlgorithmSigner.SignWithAlgorithm(rand.Reader, certificateBytes, algo) + if err != nil { + return nil, fmt.Errorf("failed to generate signed SSH key: sign error: %w", err) + } + + certificate.Signature = sig + + return certificate, nil +} + +func createKeyTypeToMapKey(keyType string, keyBits int) map[string][]string { + keyTypeToMapKey := map[string][]string{ + "rsa": {"rsa", ssh.KeyAlgoRSA}, + "dsa": {"dsa", ssh.KeyAlgoDSA}, + "ecdsa": {"ecdsa", "ec"}, + "ed25519": {"ed25519", ssh.KeyAlgoED25519}, + } + + if keyType == "ecdsa" { + if algo, ok := ecCurveBitsToAlgoName[keyBits]; ok { + keyTypeToMapKey[keyType] = append(keyTypeToMapKey[keyType], algo) + } + } + + return keyTypeToMapKey +} diff --git a/builtin/logical/ssh/path_lookup.go b/builtin/logical/ssh/path_lookup.go new file mode 100644 index 0000000..8ea0b53 --- /dev/null +++ b/builtin/logical/ssh/path_lookup.go @@ -0,0 +1,103 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ssh + +import ( + "context" + "fmt" + "net" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathLookup(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "lookup", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixSSH, + OperationVerb: "list", + OperationSuffix: "roles-by-ip", + }, + + Fields: map[string]*framework.FieldSchema{ + "ip": { + Type: framework.TypeString, + Description: "[Required] IP address of remote host", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathLookupWrite, + }, + + HelpSynopsis: pathLookupSyn, + HelpDescription: pathLookupDesc, + } +} + +func (b *backend) pathLookupWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + ipAddr := d.Get("ip").(string) + if ipAddr == "" { + return logical.ErrorResponse("Missing ip"), nil + } + ip := net.ParseIP(ipAddr) + if ip == nil { + return logical.ErrorResponse(fmt.Sprintf("Invalid IP %q", ip.String())), nil + } + + // Get all the roles created in the backend. + keys, err := req.Storage.List(ctx, "roles/") + if err != nil { + return nil, err + } + + // Look for roles which has CIDR blocks that encompasses the given IP + // and create a list out of it. + var matchingRoles []string + for _, role := range keys { + if contains, _ := roleContainsIP(ctx, req.Storage, role, ip.String()); contains { + matchingRoles = append(matchingRoles, role) + } + } + + // Add roles that are allowed to accept any IP address. + zeroAddressEntry, err := b.getZeroAddressRoles(ctx, req.Storage) + if err != nil { + return nil, err + } + if zeroAddressEntry != nil { + matchingRoles = append(matchingRoles, zeroAddressEntry.Roles...) + } + + // This list may potentially reveal more information than it is supposed to. + // The roles for which the client is not authorized to will also be displayed. + // However, if the client tries to use the role for which the client is not + // authenticated, it will fail. It is not a problem. In a way this can be + // viewed as a feature. The client can ask for permissions to be given for + // a specific role if things are not working! + // + // Ideally, the role names should be filtered and only the roles which + // the client is authorized to see, should be returned. + return &logical.Response{ + Data: map[string]interface{}{ + "roles": matchingRoles, + }, + }, nil +} + +const pathLookupSyn = ` +List all the roles associated with the given IP address. +` + +const pathLookupDesc = ` +The IP address for which the key is requested, is searched in the CIDR blocks +registered with vault using the 'roles' endpoint. Keys can be generated only by +specifying the 'role' name. The roles that can be used to generate the key for +a particular IP, are listed via this endpoint. For example, if this backend is +mounted at "ssh", then "ssh/lookup" lists the roles associated with keys can be +generated for a target IP, if the CIDR block encompassing the IP is registered +with vault. +` diff --git a/builtin/logical/ssh/path_roles.go b/builtin/logical/ssh/path_roles.go new file mode 100644 index 0000000..b16c1d7 --- /dev/null +++ b/builtin/logical/ssh/path_roles.go @@ -0,0 +1,798 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ssh + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/cidrutil" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" + "golang.org/x/crypto/ssh" +) + +const ( + // KeyTypeOTP is an key of type OTP + KeyTypeOTP = "otp" + // KeyTypeDynamic is dynamic key type; removed. + KeyTypeDynamic = "dynamic" + // KeyTypeCA is an key of type CA + KeyTypeCA = "ca" + + // DefaultAlgorithmSigner is the default RSA signing algorithm + DefaultAlgorithmSigner = "default" + + // Present version of the sshRole struct; when adding a new field or are + // needing to perform a migration, increment this struct and read the note + // in checkUpgrade(...). + roleEntryVersion = 3 +) + +// Structure that represents a role in SSH backend. This is a common role structure +// for both OTP and CA roles. Not all the fields are mandatory for both type. +// Some are applicable for one and not for other. It doesn't matter. +type sshRole struct { + KeyType string `mapstructure:"key_type" json:"key_type"` + DefaultUser string `mapstructure:"default_user" json:"default_user"` + DefaultUserTemplate bool `mapstructure:"default_user_template" json:"default_user_template"` + CIDRList string `mapstructure:"cidr_list" json:"cidr_list"` + ExcludeCIDRList string `mapstructure:"exclude_cidr_list" json:"exclude_cidr_list"` + Port int `mapstructure:"port" json:"port"` + AllowedUsers string `mapstructure:"allowed_users" json:"allowed_users"` + AllowedUsersTemplate bool `mapstructure:"allowed_users_template" json:"allowed_users_template"` + AllowedDomains string `mapstructure:"allowed_domains" json:"allowed_domains"` + AllowedDomainsTemplate bool `mapstructure:"allowed_domains_template" json:"allowed_domains_template"` + MaxTTL string `mapstructure:"max_ttl" json:"max_ttl"` + TTL string `mapstructure:"ttl" json:"ttl"` + DefaultCriticalOptions map[string]string `mapstructure:"default_critical_options" json:"default_critical_options"` + DefaultExtensions map[string]string `mapstructure:"default_extensions" json:"default_extensions"` + DefaultExtensionsTemplate bool `mapstructure:"default_extensions_template" json:"default_extensions_template"` + AllowedCriticalOptions string `mapstructure:"allowed_critical_options" json:"allowed_critical_options"` + AllowedExtensions string `mapstructure:"allowed_extensions" json:"allowed_extensions"` + AllowUserCertificates bool `mapstructure:"allow_user_certificates" json:"allow_user_certificates"` + AllowHostCertificates bool `mapstructure:"allow_host_certificates" json:"allow_host_certificates"` + AllowBareDomains bool `mapstructure:"allow_bare_domains" json:"allow_bare_domains"` + AllowSubdomains bool `mapstructure:"allow_subdomains" json:"allow_subdomains"` + AllowUserKeyIDs bool `mapstructure:"allow_user_key_ids" json:"allow_user_key_ids"` + KeyIDFormat string `mapstructure:"key_id_format" json:"key_id_format"` + OldAllowedUserKeyLengths map[string]int `mapstructure:"allowed_user_key_lengths" json:"allowed_user_key_lengths,omitempty"` + AllowedUserKeyTypesLengths map[string][]int `mapstructure:"allowed_user_key_types_lengths" json:"allowed_user_key_types_lengths"` + AlgorithmSigner string `mapstructure:"algorithm_signer" json:"algorithm_signer"` + Version int `mapstructure:"role_version" json:"role_version"` + NotBeforeDuration time.Duration `mapstructure:"not_before_duration" json:"not_before_duration"` +} + +func pathListRoles(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "roles/?$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixSSH, + OperationSuffix: "roles", + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: b.pathRoleList, + }, + + HelpSynopsis: pathRoleHelpSyn, + HelpDescription: pathRoleHelpDesc, + } +} + +func pathRoles(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "roles/" + framework.GenericNameWithAtRegex("role"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixSSH, + OperationSuffix: "role", + }, + + Fields: map[string]*framework.FieldSchema{ + "role": { + Type: framework.TypeString, + Description: ` + [Required for all types] + Name of the role being created.`, + }, + "default_user": { + Type: framework.TypeString, + Description: ` + [Required for OTP type] [Optional for CA type] + Default username for which a credential will be generated. + When the endpoint 'creds/' is used without a username, this + value will be used as default username.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Default Username", + }, + }, + "default_user_template": { + Type: framework.TypeBool, + Description: ` + [Not applicable for OTP type] [Optional for CA type] + If set, Default user can be specified using identity template policies. + Non-templated users are also permitted. + `, + Default: false, + }, + "cidr_list": { + Type: framework.TypeString, + Description: ` + [Optional for OTP type] [Not applicable for CA type] + Comma separated list of CIDR blocks for which the role is applicable for. + CIDR blocks can belong to more than one role.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "CIDR List", + }, + }, + "exclude_cidr_list": { + Type: framework.TypeString, + Description: ` + [Optional for OTP type] [Not applicable for CA type] + Comma separated list of CIDR blocks. IP addresses belonging to these blocks are not + accepted by the role. This is particularly useful when big CIDR blocks are being used + by the role and certain parts of it needs to be kept out.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Exclude CIDR List", + }, + }, + "port": { + Type: framework.TypeInt, + Description: ` + [Optional for OTP type] [Not applicable for CA type] + Port number for SSH connection. Default is '22'. Port number does not + play any role in creation of OTP. For 'otp' type, this is just a way + to inform client about the port number to use. Port number will be + returned to client by Vault server along with OTP.`, + DisplayAttrs: &framework.DisplayAttributes{ + Value: 22, + }, + }, + "key_type": { + Type: framework.TypeString, + Description: ` + [Required for all types] + Type of key used to login to hosts. It can be either 'otp' or 'ca'. + 'otp' type requires agent to be installed in remote hosts.`, + AllowedValues: []interface{}{"otp", "ca"}, + DisplayAttrs: &framework.DisplayAttributes{ + Value: "ca", + }, + }, + "allowed_users": { + Type: framework.TypeString, + Description: ` + [Optional for all types] [Works differently for CA type] + If this option is not specified, or is '*', client can request a + credential for any valid user at the remote host, including the + admin user. If only certain usernames are to be allowed, then + this list enforces it. If this field is set, then credentials + can only be created for default_user and usernames present in + this list. Setting this option will enable all the users with + access to this role to fetch credentials for all other usernames + in this list. Use with caution. N.B.: with the CA type, an empty + list means that no users are allowed; explicitly specify '*' to + allow any user. + `, + }, + "allowed_users_template": { + Type: framework.TypeBool, + Description: ` + [Not applicable for OTP type] [Optional for CA type] + If set, Allowed users can be specified using identity template policies. + Non-templated users are also permitted. + `, + Default: false, + }, + "allowed_domains": { + Type: framework.TypeString, + Description: ` + [Not applicable for OTP type] [Optional for CA type] + If this option is not specified, client can request for a signed certificate for any + valid host. If only certain domains are allowed, then this list enforces it. + `, + }, + "allowed_domains_template": { + Type: framework.TypeBool, + Description: ` + [Not applicable for OTP type] [Optional for CA type] + If set, Allowed domains can be specified using identity template policies. + Non-templated domains are also permitted. + `, + Default: false, + }, + "ttl": { + Type: framework.TypeDurationSecond, + Description: ` + [Not applicable for OTP type] [Optional for CA type] + The lease duration if no specific lease duration is + requested. The lease duration controls the expiration + of certificates issued by this backend. Defaults to + the value of max_ttl.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "TTL", + }, + }, + "max_ttl": { + Type: framework.TypeDurationSecond, + Description: ` + [Not applicable for OTP type] [Optional for CA type] + The maximum allowed lease duration + `, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Max TTL", + }, + }, + "allowed_critical_options": { + Type: framework.TypeString, + Description: ` + [Not applicable for OTP type] [Optional for CA type] + A comma-separated list of critical options that certificates can have when signed. + To allow any critical options, set this to an empty string. + `, + }, + "allowed_extensions": { + Type: framework.TypeString, + Description: ` + [Not applicable for OTP type] [Optional for CA type] + A comma-separated list of extensions that certificates can have when signed. + An empty list means that no extension overrides are allowed by an end-user; explicitly + specify '*' to allow any extensions to be set. + `, + }, + "default_critical_options": { + Type: framework.TypeMap, + Description: ` + [Not applicable for OTP type] [Optional for CA type] + Critical options certificates should + have if none are provided when signing. This field takes in key + value pairs in JSON format. Note that these are not restricted + by "allowed_critical_options". Defaults to none. + `, + }, + "default_extensions": { + Type: framework.TypeMap, + Description: ` + [Not applicable for OTP type] [Optional for CA type] + Extensions certificates should have if + none are provided when signing. This field takes in key value + pairs in JSON format. Note that these are not restricted by + "allowed_extensions". Defaults to none. + `, + }, + "default_extensions_template": { + Type: framework.TypeBool, + Description: ` + [Not applicable for OTP type] [Optional for CA type] + If set, Default extension values can be specified using identity template policies. + Non-templated extension values are also permitted. + `, + Default: false, + }, + "allow_user_certificates": { + Type: framework.TypeBool, + Description: ` + [Not applicable for OTP type] [Optional for CA type] + If set, certificates are allowed to be signed for use as a 'user'. + `, + Default: false, + }, + "allow_host_certificates": { + Type: framework.TypeBool, + Description: ` + [Not applicable for OTP type] [Optional for CA type] + If set, certificates are allowed to be signed for use as a 'host'. + `, + Default: false, + }, + "allow_bare_domains": { + Type: framework.TypeBool, + Description: ` + [Not applicable for OTP type] [Optional for CA type] + If set, host certificates that are requested are allowed to use the base domains listed in + "allowed_domains", e.g. "example.com". + This is a separate option as in some cases this can be considered a security threat. + `, + }, + "allow_subdomains": { + Type: framework.TypeBool, + Description: ` + [Not applicable for OTP type] [Optional for CA type] + If set, host certificates that are requested are allowed to use subdomains of those listed in "allowed_domains". + `, + }, + "allow_user_key_ids": { + Type: framework.TypeBool, + Description: ` + [Not applicable for OTP type] [Optional for CA type] + If true, users can override the key ID for a signed certificate with the "key_id" field. + When false, the key ID will always be the token display name. + The key ID is logged by the SSH server and can be useful for auditing. + `, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Allow User Key IDs", + }, + }, + "key_id_format": { + Type: framework.TypeString, + Description: ` + [Not applicable for OTP type] [Optional for CA type] + When supplied, this value specifies a custom format for the key id of a signed certificate. + The following variables are available for use: '{{token_display_name}}' - The display name of + the token used to make the request. '{{role_name}}' - The name of the role signing the request. + '{{public_key_hash}}' - A SHA256 checksum of the public key that is being signed. + `, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Key ID Format", + }, + }, + "allowed_user_key_lengths": { + Type: framework.TypeMap, + Description: ` + [Not applicable for OTP type] [Optional for CA type] + If set, allows the enforcement of key types and minimum key sizes to be signed. + `, + }, + "algorithm_signer": { + Type: framework.TypeString, + Description: ` + [Not applicable for OTP type] [Optional for CA type] + When supplied, this value specifies a signing algorithm for the key. Possible values: + ssh-rsa, rsa-sha2-256, rsa-sha2-512, default, or the empty string. + `, + AllowedValues: []interface{}{"", DefaultAlgorithmSigner, ssh.SigAlgoRSA, ssh.SigAlgoRSASHA2256, ssh.SigAlgoRSASHA2512}, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Signing Algorithm", + }, + }, + "not_before_duration": { + Type: framework.TypeDurationSecond, + Default: 30, + Description: ` + [Not applicable for OTP type] [Optional for CA type] + The duration that the SSH certificate should be backdated by at issuance.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Not before duration", + Value: 30, + }, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathRoleRead, + logical.UpdateOperation: b.pathRoleWrite, + logical.DeleteOperation: b.pathRoleDelete, + }, + + HelpSynopsis: pathRoleHelpSyn, + HelpDescription: pathRoleHelpDesc, + } +} + +func (b *backend) pathRoleWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + roleName := d.Get("role").(string) + if roleName == "" { + return logical.ErrorResponse("missing role name"), nil + } + + // Allowed users is an optional field, applicable for both OTP and CA types. + allowedUsers := d.Get("allowed_users").(string) + + // Validate the CIDR blocks + cidrList := d.Get("cidr_list").(string) + if cidrList != "" { + valid, err := cidrutil.ValidateCIDRListString(cidrList, ",") + if err != nil { + return nil, fmt.Errorf("failed to validate cidr_list: %w", err) + } + if !valid { + return logical.ErrorResponse("failed to validate cidr_list"), nil + } + } + + // Validate the excluded CIDR blocks + excludeCidrList := d.Get("exclude_cidr_list").(string) + if excludeCidrList != "" { + valid, err := cidrutil.ValidateCIDRListString(excludeCidrList, ",") + if err != nil { + return nil, fmt.Errorf("failed to validate exclude_cidr_list entry: %w", err) + } + if !valid { + return logical.ErrorResponse(fmt.Sprintf("failed to validate exclude_cidr_list entry: %v", err)), nil + } + } + + port := d.Get("port").(int) + if port == 0 { + port = 22 + } + + keyType := d.Get("key_type").(string) + if keyType == "" { + return logical.ErrorResponse("missing key type"), nil + } + keyType = strings.ToLower(keyType) + + var roleEntry sshRole + if keyType == KeyTypeOTP { + defaultUser := d.Get("default_user").(string) + if defaultUser == "" { + return logical.ErrorResponse("missing default user"), nil + } + + // Below are the only fields used from the role structure for OTP type. + roleEntry = sshRole{ + DefaultUser: defaultUser, + CIDRList: cidrList, + ExcludeCIDRList: excludeCidrList, + KeyType: KeyTypeOTP, + Port: port, + AllowedUsers: allowedUsers, + Version: roleEntryVersion, + } + } else if keyType == KeyTypeDynamic { + return logical.ErrorResponse("dynamic key type roles are no longer supported"), nil + } else if keyType == KeyTypeCA { + algorithmSigner := DefaultAlgorithmSigner + algorithmSignerRaw, ok := d.GetOk("algorithm_signer") + if ok { + algorithmSigner = algorithmSignerRaw.(string) + switch algorithmSigner { + case ssh.SigAlgoRSA, ssh.SigAlgoRSASHA2256, ssh.SigAlgoRSASHA2512: + case "", DefaultAlgorithmSigner: + // This case is valid, and the sign operation will use the signer's + // default algorithm. Explicitly reset the value to the default value + // rather than use the more vague implicit empty string. + algorithmSigner = DefaultAlgorithmSigner + default: + return nil, fmt.Errorf("unknown algorithm signer %q", algorithmSigner) + } + } + + role, errorResponse := b.createCARole(allowedUsers, d.Get("default_user").(string), algorithmSigner, d) + if errorResponse != nil { + return errorResponse, nil + } + roleEntry = *role + } else { + return logical.ErrorResponse("invalid key type"), nil + } + + entry, err := logical.StorageEntryJSON(fmt.Sprintf("roles/%s", roleName), roleEntry) + if err != nil { + return nil, err + } + + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + return nil, nil +} + +func (b *backend) createCARole(allowedUsers, defaultUser, signer string, data *framework.FieldData) (*sshRole, *logical.Response) { + ttl := time.Duration(data.Get("ttl").(int)) * time.Second + maxTTL := time.Duration(data.Get("max_ttl").(int)) * time.Second + role := &sshRole{ + AllowedCriticalOptions: data.Get("allowed_critical_options").(string), + AllowedExtensions: data.Get("allowed_extensions").(string), + AllowUserCertificates: data.Get("allow_user_certificates").(bool), + AllowHostCertificates: data.Get("allow_host_certificates").(bool), + AllowedUsers: allowedUsers, + AllowedUsersTemplate: data.Get("allowed_users_template").(bool), + AllowedDomains: data.Get("allowed_domains").(string), + AllowedDomainsTemplate: data.Get("allowed_domains_template").(bool), + DefaultUser: defaultUser, + DefaultUserTemplate: data.Get("default_user_template").(bool), + AllowBareDomains: data.Get("allow_bare_domains").(bool), + AllowSubdomains: data.Get("allow_subdomains").(bool), + AllowUserKeyIDs: data.Get("allow_user_key_ids").(bool), + DefaultExtensionsTemplate: data.Get("default_extensions_template").(bool), + KeyIDFormat: data.Get("key_id_format").(string), + KeyType: KeyTypeCA, + AlgorithmSigner: signer, + Version: roleEntryVersion, + NotBeforeDuration: time.Duration(data.Get("not_before_duration").(int)) * time.Second, + } + + if !role.AllowUserCertificates && !role.AllowHostCertificates { + return nil, logical.ErrorResponse("Either 'allow_user_certificates' or 'allow_host_certificates' must be set to 'true'") + } + + defaultCriticalOptions := convertMapToStringValue(data.Get("default_critical_options").(map[string]interface{})) + defaultExtensions := convertMapToStringValue(data.Get("default_extensions").(map[string]interface{})) + allowedUserKeyLengths, err := convertMapToIntSlice(data.Get("allowed_user_key_lengths").(map[string]interface{})) + if err != nil { + return nil, logical.ErrorResponse(fmt.Sprintf("error processing allowed_user_key_lengths: %s", err.Error())) + } + + if ttl != 0 && maxTTL != 0 && ttl > maxTTL { + return nil, logical.ErrorResponse( + `"ttl" value must be less than "max_ttl" when both are specified`) + } + + // Persist TTLs + role.TTL = ttl.String() + role.MaxTTL = maxTTL.String() + role.DefaultCriticalOptions = defaultCriticalOptions + role.DefaultExtensions = defaultExtensions + role.AllowedUserKeyTypesLengths = allowedUserKeyLengths + + return role, nil +} + +func (b *backend) getRole(ctx context.Context, s logical.Storage, n string) (*sshRole, error) { + entry, err := s.Get(ctx, "roles/"+n) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result sshRole + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + + if err := b.checkUpgrade(ctx, s, n, &result); err != nil { + return nil, err + } + + return &result, nil +} + +func (b *backend) checkUpgrade(ctx context.Context, s logical.Storage, n string, result *sshRole) error { + modified := false + + // NOTE: When introducing a new migration, increment roleEntryVersion and + // check if the version is less than the version this change was introduced + // at and perform the change. At the end, set modified and update the + // version to the version this migration was introduced at! Additionally, + // add new migrations after all existing migrations. + // + // Otherwise, past or future migrations may not execute! + if result.Version == roleEntryVersion { + return nil + } + + // Role version introduced at version 1, migrating OldAllowedUserKeyLengths + // to the newer AllowedUserKeyTypesLengths field. + if result.Version < 1 { + // Only migrate if we have old data and no new data to avoid clobbering. + // + // This change introduced the first role version, value of 1. + if len(result.OldAllowedUserKeyLengths) > 0 && len(result.AllowedUserKeyTypesLengths) == 0 { + result.AllowedUserKeyTypesLengths = make(map[string][]int) + for k, v := range result.OldAllowedUserKeyLengths { + result.AllowedUserKeyTypesLengths[k] = []int{v} + } + result.OldAllowedUserKeyLengths = nil + } + + result.Version = 1 + modified = true + } + + // Role version 2 migrates an empty AlgorithmSigner to an explicit ssh-rsa + // value WHEN the SSH CA key is a RSA key. + if result.Version < 2 { + // In order to perform the version 2 upgrade, we need knowledge of the + // signing key type as we want to make ssh-rsa an explicitly notated + // algorithm choice. + var publicKey ssh.PublicKey + publicKeyEntry, err := caKey(ctx, s, caPublicKey) + if err != nil { + b.Logger().Debug(fmt.Sprintf("failed to load public key entry while attempting to migrate: %v", err)) + goto SKIPVERSION2 + } + if publicKeyEntry == nil || publicKeyEntry.Key == "" { + b.Logger().Debug(fmt.Sprintf("got empty public key entry while attempting to migrate")) + goto SKIPVERSION2 + } + + publicKey, err = parsePublicSSHKey(publicKeyEntry.Key) + if err == nil { + // Move an empty signing algorithm to an explicit ssh-rsa (SHA-1) + // if this key is of type RSA. This isn't a secure default but + // exists for backwards compatibility with existing versions of + // Vault. By making it explicit, operators can see that this is + // the value and move it to a newer algorithm in the future. + if publicKey.Type() == ssh.KeyAlgoRSA && result.AlgorithmSigner == "" { + result.AlgorithmSigner = ssh.SigAlgoRSA + } + + result.Version = 2 + modified = true + } + + SKIPVERSION2: + err = nil + } + + if result.Version < 3 { + modified = true + result.NotBeforeDuration = 30 * time.Second + result.Version = 3 + } + + // Add new migrations just before here. + // + // Condition copied from PKI builtin. + if modified && (b.System().LocalMount() || !b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) { + jsonEntry, err := logical.StorageEntryJSON("roles/"+n, &result) + if err != nil { + return err + } + if err := s.Put(ctx, jsonEntry); err != nil { + // Only perform upgrades on replication primary + if !strings.Contains(err.Error(), logical.ErrReadOnly.Error()) { + return err + } + } + } + + return nil +} + +// parseRole converts a sshRole object into its map[string]interface representation, +// with appropriate values for each KeyType. If the KeyType is invalid, it will return +// an error. +func (b *backend) parseRole(role *sshRole) (map[string]interface{}, error) { + var result map[string]interface{} + + switch role.KeyType { + case KeyTypeOTP: + result = map[string]interface{}{ + "default_user": role.DefaultUser, + "cidr_list": role.CIDRList, + "exclude_cidr_list": role.ExcludeCIDRList, + "key_type": role.KeyType, + "port": role.Port, + "allowed_users": role.AllowedUsers, + } + case KeyTypeCA: + ttl, err := parseutil.ParseDurationSecond(role.TTL) + if err != nil { + return nil, err + } + maxTTL, err := parseutil.ParseDurationSecond(role.MaxTTL) + if err != nil { + return nil, err + } + + result = map[string]interface{}{ + "allowed_users": role.AllowedUsers, + "allowed_users_template": role.AllowedUsersTemplate, + "allowed_domains": role.AllowedDomains, + "allowed_domains_template": role.AllowedDomainsTemplate, + "default_user": role.DefaultUser, + "default_user_template": role.DefaultUserTemplate, + "ttl": int64(ttl.Seconds()), + "max_ttl": int64(maxTTL.Seconds()), + "allowed_critical_options": role.AllowedCriticalOptions, + "allowed_extensions": role.AllowedExtensions, + "allow_user_certificates": role.AllowUserCertificates, + "allow_host_certificates": role.AllowHostCertificates, + "allow_bare_domains": role.AllowBareDomains, + "allow_subdomains": role.AllowSubdomains, + "allow_user_key_ids": role.AllowUserKeyIDs, + "key_id_format": role.KeyIDFormat, + "key_type": role.KeyType, + "default_critical_options": role.DefaultCriticalOptions, + "default_extensions": role.DefaultExtensions, + "default_extensions_template": role.DefaultExtensionsTemplate, + "allowed_user_key_lengths": role.AllowedUserKeyTypesLengths, + "algorithm_signer": role.AlgorithmSigner, + "not_before_duration": int64(role.NotBeforeDuration.Seconds()), + } + case KeyTypeDynamic: + return nil, fmt.Errorf("dynamic key type roles are no longer supported") + default: + return nil, fmt.Errorf("invalid key type: %v", role.KeyType) + } + + return result, nil +} + +func (b *backend) pathRoleList(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + entries, err := req.Storage.List(ctx, "roles/") + if err != nil { + return nil, err + } + + keyInfo := map[string]interface{}{} + for _, entry := range entries { + role, err := b.getRole(ctx, req.Storage, entry) + if err != nil { + // On error, log warning and continue + if b.Logger().IsWarn() { + b.Logger().Warn("error getting role info", "role", entry, "error", err) + } + continue + } + if role == nil { + // On empty role, log warning and continue + if b.Logger().IsWarn() { + b.Logger().Warn("no role info found", "role", entry) + } + continue + } + + roleInfo, err := b.parseRole(role) + if err != nil { + if b.Logger().IsWarn() { + b.Logger().Warn("error parsing role info", "role", entry, "error", err) + } + continue + } + + if keyType, ok := roleInfo["key_type"]; ok { + keyInfo[entry] = map[string]interface{}{ + "key_type": keyType, + } + } + } + + return logical.ListResponseWithInfo(entries, keyInfo), nil +} + +func (b *backend) pathRoleRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + role, err := b.getRole(ctx, req.Storage, d.Get("role").(string)) + if err != nil { + return nil, err + } + if role == nil { + return nil, nil + } + + roleInfo, err := b.parseRole(role) + if err != nil { + return nil, err + } + + return &logical.Response{ + Data: roleInfo, + }, nil +} + +func (b *backend) pathRoleDelete(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + roleName := d.Get("role").(string) + + // If the role was given privilege to accept any IP address, there will + // be an entry for this role in zero-address roles list. Before the role + // is removed, the entry in the list has to be removed. + err := b.removeZeroAddressRole(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + + err = req.Storage.Delete(ctx, fmt.Sprintf("roles/%s", roleName)) + if err != nil { + return nil, err + } + return nil, nil +} + +const pathRoleHelpSyn = ` +Manage the 'roles' that can be created with this backend. +` + +const pathRoleHelpDesc = ` +This path allows you to manage the roles that are used to generate credentials. + +Role takes a 'key_type' parameter that decides what type of credential this role +can generate. If remote hosts have Vault SSH Agent installed, an 'otp' type can +be used, otherwise 'dynamic' type can be used. + +If the backend is mounted at "ssh" and the role is created at "ssh/roles/web", +then a user could request for a credential at "ssh/creds/web" for an IP that +belongs to the role. The credential will be for the 'default_user' registered +with the role. There is also an optional parameter 'username' for 'creds/' endpoint. +` diff --git a/builtin/logical/ssh/path_sign.go b/builtin/logical/ssh/path_sign.go new file mode 100644 index 0000000..36971eb --- /dev/null +++ b/builtin/logical/ssh/path_sign.go @@ -0,0 +1,105 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ssh + +import ( + "context" + "fmt" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathSign(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "sign/" + framework.GenericNameWithAtRegex("role"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixSSH, + OperationVerb: "sign", + OperationSuffix: "certificate", + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathSign, + }, + + Fields: map[string]*framework.FieldSchema{ + "role": { + Type: framework.TypeString, + Description: `The desired role with configuration for this request.`, + }, + "ttl": { + Type: framework.TypeDurationSecond, + Description: `The requested Time To Live for the SSH certificate; +sets the expiration date. If not specified +the role default, backend default, or system +default TTL is used, in that order. Cannot +be later than the role max TTL.`, + }, + "public_key": { + Type: framework.TypeString, + Description: `SSH public key that should be signed.`, + }, + "valid_principals": { + Type: framework.TypeString, + Description: `Valid principals, either usernames or hostnames, that the certificate should be signed for.`, + }, + "cert_type": { + Type: framework.TypeString, + Description: `Type of certificate to be created; either "user" or "host".`, + Default: "user", + }, + "key_id": { + Type: framework.TypeString, + Description: `Key id that the created certificate should have. If not specified, the display name of the token will be used.`, + }, + "critical_options": { + Type: framework.TypeMap, + Description: `Critical options that the certificate should be signed for.`, + }, + "extensions": { + Type: framework.TypeMap, + Description: `Extensions that the certificate should be signed for.`, + }, + }, + + HelpSynopsis: `Request signing an SSH key using a certain role with the provided details.`, + HelpDescription: `This path allows SSH keys to be signed according to the policy of the given role.`, + } +} + +func (b *backend) pathSign(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + roleName := data.Get("role").(string) + + // Get the role + role, err := b.getRole(ctx, req.Storage, roleName) + if err != nil { + return nil, err + } + if role == nil { + return logical.ErrorResponse(fmt.Sprintf("Unknown role: %s", roleName)), nil + } + + return b.pathSignCertificate(ctx, req, data, role) +} + +func (b *backend) pathSignCertificate(ctx context.Context, req *logical.Request, data *framework.FieldData, role *sshRole) (*logical.Response, error) { + publicKey := data.Get("public_key").(string) + if publicKey == "" { + return logical.ErrorResponse("missing public_key"), nil + } + + userPublicKey, err := parsePublicSSHKey(publicKey) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("failed to parse public_key as SSH key: %s", err)), nil + } + + err = b.validateSignedKeyRequirements(userPublicKey, role) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("public_key failed to meet the key requirements: %s", err)), nil + } + + return b.pathSignIssueCertificateHelper(ctx, req, data, role, userPublicKey) +} diff --git a/builtin/logical/ssh/path_verify.go b/builtin/logical/ssh/path_verify.go new file mode 100644 index 0000000..9062722 --- /dev/null +++ b/builtin/logical/ssh/path_verify.go @@ -0,0 +1,111 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ssh + +import ( + "context" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func pathVerify(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "verify", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixSSH, + OperationVerb: "verify", + OperationSuffix: "otp", + }, + Fields: map[string]*framework.FieldSchema{ + "otp": { + Type: framework.TypeString, + Description: "[Required] One-Time-Key that needs to be validated", + }, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathVerifyWrite, + }, + HelpSynopsis: pathVerifyHelpSyn, + HelpDescription: pathVerifyHelpDesc, + } +} + +func (b *backend) getOTP(ctx context.Context, s logical.Storage, n string) (*sshOTP, error) { + entry, err := s.Get(ctx, "otp/"+n) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result sshOTP + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + + return &result, nil +} + +func (b *backend) pathVerifyWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + otp := d.Get("otp").(string) + + // If OTP is not a UUID and a string matching VerifyEchoRequest, then the + // response will be VerifyEchoResponse. This is used by agent to check if + // connection to Vault server is proper. + if otp == api.VerifyEchoRequest { + return &logical.Response{ + Data: map[string]interface{}{ + "message": api.VerifyEchoResponse, + }, + }, nil + } + + // Create the salt of OTP because entry would have been create with the + // salt and not directly of the OTP. Salt will yield the same value which + // because the seed is the same, the backend salt. + salt, err := b.Salt(ctx) + if err != nil { + return nil, err + } + otpSalted := salt.SaltID(otp) + + // Return nil if there is no entry found for the OTP + otpEntry, err := b.getOTP(ctx, req.Storage, otpSalted) + if err != nil { + return nil, err + } + if otpEntry == nil { + return logical.ErrorResponse("OTP not found"), nil + } + + // Delete the OTP if found. This is what makes the key an OTP. + err = req.Storage.Delete(ctx, "otp/"+otpSalted) + if err != nil { + return nil, err + } + + // Return username and IP only if there were no problems uptill this point. + return &logical.Response{ + Data: map[string]interface{}{ + "username": otpEntry.Username, + "ip": otpEntry.IP, + "role_name": otpEntry.RoleName, + }, + }, nil +} + +const pathVerifyHelpSyn = ` +Validate the OTP provided by Vault SSH Agent. +` + +const pathVerifyHelpDesc = ` +This path will be used by Vault SSH Agent running in the remote hosts. The OTP +provided by the client is sent to Vault for validation by the agent. If Vault +finds an entry for the OTP, it responds with the username and IP it is associated +with. Agent uses this information to authenticate the client. Vault deletes the +OTP after validating it once. +` diff --git a/builtin/logical/ssh/secret_otp.go b/builtin/logical/ssh/secret_otp.go new file mode 100644 index 0000000..a70cf60 --- /dev/null +++ b/builtin/logical/ssh/secret_otp.go @@ -0,0 +1,49 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ssh + +import ( + "context" + "fmt" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +const SecretOTPType = "secret_otp_type" + +func secretOTP(b *backend) *framework.Secret { + return &framework.Secret{ + Type: SecretOTPType, + Fields: map[string]*framework.FieldSchema{ + "otp": { + Type: framework.TypeString, + Description: "One time password", + }, + }, + + Revoke: b.secretOTPRevoke, + } +} + +func (b *backend) secretOTPRevoke(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + otpRaw, ok := req.Secret.InternalData["otp"] + if !ok { + return nil, fmt.Errorf("secret is missing internal data") + } + otp, ok := otpRaw.(string) + if !ok { + return nil, fmt.Errorf("secret is missing internal data") + } + + salt, err := b.Salt(ctx) + if err != nil { + return nil, err + } + err = req.Storage.Delete(ctx, "otp/"+salt.SaltID(otp)) + if err != nil { + return nil, err + } + return nil, nil +} diff --git a/builtin/logical/ssh/util.go b/builtin/logical/ssh/util.go new file mode 100644 index 0000000..b886750 --- /dev/null +++ b/builtin/logical/ssh/util.go @@ -0,0 +1,136 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ssh + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "fmt" + "net" + "strings" + + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/sdk/logical" + "golang.org/x/crypto/ssh" +) + +// Creates a new RSA key pair with the given key length. The private key will be +// of pem format and the public key will be of OpenSSH format. +func generateRSAKeys(keyBits int) (publicKeyRsa string, privateKeyRsa string, err error) { + privateKey, err := rsa.GenerateKey(rand.Reader, keyBits) + if err != nil { + return "", "", fmt.Errorf("error generating RSA key-pair: %w", err) + } + + privateKeyRsa = string(pem.EncodeToMemory(&pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(privateKey), + })) + + sshPublicKey, err := ssh.NewPublicKey(privateKey.Public()) + if err != nil { + return "", "", fmt.Errorf("error generating RSA key-pair: %w", err) + } + publicKeyRsa = "ssh-rsa " + base64.StdEncoding.EncodeToString(sshPublicKey.Marshal()) + return +} + +// Takes an IP address and role name and checks if the IP is part +// of CIDR blocks belonging to the role. +func roleContainsIP(ctx context.Context, s logical.Storage, roleName string, ip string) (bool, error) { + if roleName == "" { + return false, fmt.Errorf("missing role name") + } + + if ip == "" { + return false, fmt.Errorf("missing ip") + } + + roleEntry, err := s.Get(ctx, fmt.Sprintf("roles/%s", roleName)) + if err != nil { + return false, fmt.Errorf("error retrieving role %w", err) + } + if roleEntry == nil { + return false, fmt.Errorf("role %q not found", roleName) + } + + var role sshRole + if err := roleEntry.DecodeJSON(&role); err != nil { + return false, fmt.Errorf("error decoding role %q", roleName) + } + + if matched, err := cidrListContainsIP(ip, role.CIDRList); err != nil { + return false, err + } else { + return matched, nil + } +} + +// Returns true if the IP supplied by the user is part of the comma +// separated CIDR blocks +func cidrListContainsIP(ip, cidrList string) (bool, error) { + if len(cidrList) == 0 { + return false, fmt.Errorf("IP does not belong to role") + } + for _, item := range strings.Split(cidrList, ",") { + _, cidrIPNet, err := net.ParseCIDR(item) + if err != nil { + return false, fmt.Errorf("invalid CIDR entry %q", item) + } + if cidrIPNet.Contains(net.ParseIP(ip)) { + return true, nil + } + } + return false, nil +} + +func parsePublicSSHKey(key string) (ssh.PublicKey, error) { + keyParts := strings.Split(key, " ") + if len(keyParts) > 1 { + // Someone has sent the 'full' public key rather than just the base64 encoded part that the ssh library wants + key = keyParts[1] + } + + decodedKey, err := base64.StdEncoding.DecodeString(key) + if err != nil { + return nil, err + } + + return ssh.ParsePublicKey([]byte(decodedKey)) +} + +func convertMapToStringValue(initial map[string]interface{}) map[string]string { + result := map[string]string{} + for key, value := range initial { + result[key] = fmt.Sprintf("%v", value) + } + return result +} + +func convertMapToIntSlice(initial map[string]interface{}) (map[string][]int, error) { + var err error + result := map[string][]int{} + + for key, value := range initial { + result[key], err = parseutil.SafeParseIntSlice(value, 0 /* no upper bound on number of keys lengths per key type */) + if err != nil { + return nil, err + } + } + + return result, nil +} + +// Serve a template processor for custom format inputs +func substQuery(tpl string, data map[string]string) string { + for k, v := range data { + tpl = strings.ReplaceAll(tpl, fmt.Sprintf("{{%s}}", k), v) + } + + return tpl +} diff --git a/builtin/logical/totp/backend.go b/builtin/logical/totp/backend.go new file mode 100644 index 0000000..5f0cb52 --- /dev/null +++ b/builtin/logical/totp/backend.go @@ -0,0 +1,60 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package totp + +import ( + "context" + "strings" + "time" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" + cache "github.com/patrickmn/go-cache" +) + +const operationPrefixTOTP = "totp" + +func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + b := Backend() + if err := b.Setup(ctx, conf); err != nil { + return nil, err + } + return b, nil +} + +func Backend() *backend { + var b backend + b.Backend = &framework.Backend{ + Help: strings.TrimSpace(backendHelp), + + PathsSpecial: &logical.Paths{ + SealWrapStorage: []string{ + "key/", + }, + }, + + Paths: []*framework.Path{ + pathListKeys(&b), + pathKeys(&b), + pathCode(&b), + }, + + Secrets: []*framework.Secret{}, + BackendType: logical.TypeLogical, + } + + b.usedCodes = cache.New(0, 30*time.Second) + + return &b +} + +type backend struct { + *framework.Backend + + usedCodes *cache.Cache +} + +const backendHelp = ` +The TOTP backend dynamically generates time-based one-time use passwords. +` diff --git a/builtin/logical/totp/backend_test.go b/builtin/logical/totp/backend_test.go new file mode 100644 index 0000000..1260042 --- /dev/null +++ b/builtin/logical/totp/backend_test.go @@ -0,0 +1,1243 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package totp + +import ( + "context" + "fmt" + "log" + "net/url" + "path" + "strings" + "testing" + "time" + + "github.com/hashicorp/vault/helper/namespace" + logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" + "github.com/hashicorp/vault/sdk/logical" + "github.com/mitchellh/mapstructure" + otplib "github.com/pquerna/otp" + totplib "github.com/pquerna/otp/totp" +) + +func createKey() (string, error) { + keyUrl, err := totplib.Generate(totplib.GenerateOpts{ + Issuer: "Vault", + AccountName: "Test", + }) + + key := keyUrl.Secret() + + return strings.ToLower(key), err +} + +func generateCode(key string, period uint, digits otplib.Digits, algorithm otplib.Algorithm) (string, error) { + // Generate password using totp library + totpToken, err := totplib.GenerateCodeCustom(key, time.Now(), totplib.ValidateOpts{ + Period: period, + Digits: digits, + Algorithm: algorithm, + }) + + return totpToken, err +} + +func TestBackend_KeyName(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + tests := []struct { + Name string + KeyName string + Fail bool + }{ + { + "without @", + "sample", + false, + }, + { + "with @ in the beginning", + "@sample.com", + true, + }, + { + "with @ in the end", + "sample.com@", + true, + }, + { + "with @ in between", + "sample@sample.com", + false, + }, + { + "with multiple @", + "sample@sample@@sample.com", + false, + }, + } + var resp *logical.Response + for _, tc := range tests { + resp, err = b.HandleRequest(namespace.RootContext(nil), &logical.Request{ + Path: "keys/" + tc.KeyName, + Operation: logical.UpdateOperation, + Storage: config.StorageView, + Data: map[string]interface{}{ + "generate": true, + "account_name": "vault", + "issuer": "hashicorp", + }, + }) + if tc.Fail { + if err == nil { + t.Fatalf("expected an error for test %q", tc.Name) + } + continue + } else if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: test name: %q\nresp: %#v\nerr: %v", tc.Name, resp, err) + } + resp, err = b.HandleRequest(namespace.RootContext(nil), &logical.Request{ + Path: "code/" + tc.KeyName, + Operation: logical.ReadOperation, + Storage: config.StorageView, + }) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: test name: %q\nresp: %#v\nerr: %v", tc.Name, resp, err) + } + if resp.Data["code"].(string) == "" { + t.Fatalf("failed to generate code for test %q", tc.Name) + } + } +} + +func TestBackend_readCredentialsDefaultValues(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + // Generate a new shared key + key, _ := createKey() + + keyData := map[string]interface{}{ + "key": key, + "generate": false, + } + + expected := map[string]interface{}{ + "issuer": "", + "account_name": "", + "digits": otplib.DigitsSix, + "period": 30, + "algorithm": otplib.AlgorithmSHA1, + "key": key, + } + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, false), + testAccStepReadKey(t, "test", expected), + testAccStepReadCreds(t, b, config.StorageView, "test", expected), + }, + }) +} + +func TestBackend_readCredentialsEightDigitsThirtySecondPeriod(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + // Generate a new shared key + key, _ := createKey() + + keyData := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "key": key, + "digits": 8, + "generate": false, + } + + expected := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "digits": otplib.DigitsEight, + "period": 30, + "algorithm": otplib.AlgorithmSHA1, + "key": key, + } + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, false), + testAccStepReadKey(t, "test", expected), + testAccStepReadCreds(t, b, config.StorageView, "test", expected), + }, + }) +} + +func TestBackend_readCredentialsSixDigitsNinetySecondPeriod(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + // Generate a new shared key + key, _ := createKey() + + keyData := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "key": key, + "period": 90, + "generate": false, + } + + expected := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "digits": otplib.DigitsSix, + "period": 90, + "algorithm": otplib.AlgorithmSHA1, + "key": key, + } + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, false), + testAccStepReadKey(t, "test", expected), + testAccStepReadCreds(t, b, config.StorageView, "test", expected), + }, + }) +} + +func TestBackend_readCredentialsSHA256(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + // Generate a new shared key + key, _ := createKey() + + keyData := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "key": key, + "algorithm": "SHA256", + "generate": false, + } + + expected := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "digits": otplib.DigitsSix, + "period": 30, + "algorithm": otplib.AlgorithmSHA256, + "key": key, + } + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, false), + testAccStepReadKey(t, "test", expected), + testAccStepReadCreds(t, b, config.StorageView, "test", expected), + }, + }) +} + +func TestBackend_readCredentialsSHA512(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + // Generate a new shared key + key, _ := createKey() + + keyData := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "key": key, + "algorithm": "SHA512", + "generate": false, + } + + expected := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "digits": otplib.DigitsSix, + "period": 30, + "algorithm": otplib.AlgorithmSHA512, + "key": key, + } + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, false), + testAccStepReadKey(t, "test", expected), + testAccStepReadCreds(t, b, config.StorageView, "test", expected), + }, + }) +} + +func TestBackend_keyCrudDefaultValues(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + key, _ := createKey() + + keyData := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "key": key, + "generate": false, + } + + expected := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "digits": otplib.DigitsSix, + "period": 30, + "algorithm": otplib.AlgorithmSHA1, + "key": key, + } + + code, _ := generateCode(key, 30, otplib.DigitsSix, otplib.AlgorithmSHA1) + invalidCode := "12345678" + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, false), + testAccStepReadKey(t, "test", expected), + testAccStepValidateCode(t, "test", code, true, false), + // Next step should fail because it should be in the used cache + testAccStepValidateCode(t, "test", code, false, true), + testAccStepValidateCode(t, "test", invalidCode, false, false), + testAccStepDeleteKey(t, "test"), + testAccStepReadKey(t, "test", nil), + }, + }) +} + +func TestBackend_createKeyMissingKeyValue(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + keyData := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "generate": false, + } + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, true), + testAccStepReadKey(t, "test", nil), + }, + }) +} + +func TestBackend_createKeyInvalidKeyValue(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + keyData := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "key": "1", + "generate": false, + } + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, true), + testAccStepReadKey(t, "test", nil), + }, + }) +} + +func TestBackend_createKeyInvalidAlgorithm(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + // Generate a new shared key + key, _ := createKey() + + keyData := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "key": key, + "algorithm": "BADALGORITHM", + "generate": false, + } + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, true), + testAccStepReadKey(t, "test", nil), + }, + }) +} + +func TestBackend_createKeyInvalidPeriod(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + // Generate a new shared key + key, _ := createKey() + + keyData := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "key": key, + "period": -1, + "generate": false, + } + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, true), + testAccStepReadKey(t, "test", nil), + }, + }) +} + +func TestBackend_createKeyInvalidDigits(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + // Generate a new shared key + key, _ := createKey() + + keyData := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "key": key, + "digits": 20, + "generate": false, + } + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, true), + testAccStepReadKey(t, "test", nil), + }, + }) +} + +func TestBackend_generatedKeyDefaultValues(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + keyData := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "generate": true, + "key_size": 20, + "exported": true, + "qr_size": 200, + } + + expected := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "digits": otplib.DigitsSix, + "period": 30, + "algorithm": otplib.AlgorithmSHA1, + } + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, false), + testAccStepReadKey(t, "test", expected), + }, + }) +} + +func TestBackend_generatedKeyDefaultValuesNoQR(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + keyData := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "generate": true, + "key_size": 20, + "exported": true, + "qr_size": 0, + } + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, false), + }, + }) +} + +func TestBackend_generatedKeyNonDefaultKeySize(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + keyData := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "generate": true, + "key_size": 10, + "exported": true, + "qr_size": 200, + } + + expected := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "digits": otplib.DigitsSix, + "period": 30, + "algorithm": otplib.AlgorithmSHA1, + } + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, false), + testAccStepReadKey(t, "test", expected), + }, + }) +} + +func TestBackend_urlPassedNonGeneratedKeyInvalidPeriod(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + urlString := "otpauth://totp/Vault:test@email.com?secret=HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ&algorithm=SHA512&digits=6&period=AZ" + + keyData := map[string]interface{}{ + "url": urlString, + "generate": false, + } + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, true), + testAccStepReadKey(t, "test", nil), + }, + }) +} + +func TestBackend_urlPassedNonGeneratedKeyInvalidDigits(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + urlString := "otpauth://totp/Vault:test@email.com?secret=HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ&algorithm=SHA512&digits=Q&period=60" + + keyData := map[string]interface{}{ + "url": urlString, + "generate": false, + } + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, true), + testAccStepReadKey(t, "test", nil), + }, + }) +} + +func TestBackend_urlPassedNonGeneratedKeyIssuerInFirstPosition(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + urlString := "otpauth://totp/Vault:test@email.com?secret=HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ&algorithm=SHA512&digits=6&period=60" + + keyData := map[string]interface{}{ + "url": urlString, + "generate": false, + } + + expected := map[string]interface{}{ + "issuer": "Vault", + "account_name": "test@email.com", + "digits": otplib.DigitsSix, + "period": 60, + "algorithm": otplib.AlgorithmSHA512, + "key": "HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ", + } + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, false), + testAccStepReadKey(t, "test", expected), + testAccStepReadCreds(t, b, config.StorageView, "test", expected), + }, + }) +} + +func TestBackend_urlPassedNonGeneratedKeyIssuerInQueryString(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + urlString := "otpauth://totp/test@email.com?secret=HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ&algorithm=SHA512&digits=6&period=60&issuer=Vault" + + keyData := map[string]interface{}{ + "url": urlString, + "generate": false, + } + + expected := map[string]interface{}{ + "issuer": "Vault", + "account_name": "test@email.com", + "digits": otplib.DigitsSix, + "period": 60, + "algorithm": otplib.AlgorithmSHA512, + "key": "HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ", + } + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, false), + testAccStepReadKey(t, "test", expected), + testAccStepReadCreds(t, b, config.StorageView, "test", expected), + }, + }) +} + +func TestBackend_urlPassedNonGeneratedKeyMissingIssuer(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + urlString := "otpauth://totp/test@email.com?secret=HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ&algorithm=SHA512&digits=6&period=60" + + keyData := map[string]interface{}{ + "url": urlString, + "generate": false, + } + + expected := map[string]interface{}{ + "issuer": "", + "account_name": "test@email.com", + "digits": otplib.DigitsSix, + "period": 60, + "algorithm": otplib.AlgorithmSHA512, + "key": "HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ", + } + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, false), + testAccStepReadKey(t, "test", expected), + testAccStepReadCreds(t, b, config.StorageView, "test", expected), + }, + }) +} + +func TestBackend_urlPassedNonGeneratedKeyMissingAccountName(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + urlString := "otpauth://totp/Vault:?secret=HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ&algorithm=SHA512&digits=6&period=60" + + keyData := map[string]interface{}{ + "url": urlString, + "generate": false, + } + + expected := map[string]interface{}{ + "issuer": "Vault", + "account_name": "", + "digits": otplib.DigitsSix, + "period": 60, + "algorithm": otplib.AlgorithmSHA512, + "key": "HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ", + } + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, false), + testAccStepReadKey(t, "test", expected), + testAccStepReadCreds(t, b, config.StorageView, "test", expected), + }, + }) +} + +func TestBackend_urlPassedNonGeneratedKeyMissingAccountNameandIssuer(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + urlString := "otpauth://totp/?secret=HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ&algorithm=SHA512&digits=6&period=60" + + keyData := map[string]interface{}{ + "url": urlString, + "generate": false, + } + + expected := map[string]interface{}{ + "issuer": "", + "account_name": "", + "digits": otplib.DigitsSix, + "period": 60, + "algorithm": otplib.AlgorithmSHA512, + "key": "HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ", + } + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, false), + testAccStepReadKey(t, "test", expected), + testAccStepReadCreds(t, b, config.StorageView, "test", expected), + }, + }) +} + +func TestBackend_urlPassedNonGeneratedKeyMissingAccountNameandIssuerandPadding(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + urlString := "otpauth://totp/?secret=GEZDGNBVGY3TQOJQGEZDGNBVGY3TQOJQGEZAU&algorithm=SHA512&digits=6&period=60" + + keyData := map[string]interface{}{ + "url": urlString, + "generate": false, + } + + expected := map[string]interface{}{ + "issuer": "", + "account_name": "", + "digits": otplib.DigitsSix, + "period": 60, + "algorithm": otplib.AlgorithmSHA512, + "key": "GEZDGNBVGY3TQOJQGEZDGNBVGY3TQOJQGEZAU===", + } + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, false), + testAccStepReadKey(t, "test", expected), + testAccStepReadCreds(t, b, config.StorageView, "test", expected), + }, + }) +} + +func TestBackend_generatedKeyInvalidSkew(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + keyData := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "skew": "2", + "generate": true, + } + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, true), + testAccStepReadKey(t, "test", nil), + }, + }) +} + +func TestBackend_generatedKeyInvalidQRSize(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + keyData := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "qr_size": "-100", + "generate": true, + } + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, true), + testAccStepReadKey(t, "test", nil), + }, + }) +} + +func TestBackend_generatedKeyInvalidKeySize(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + keyData := map[string]interface{}{ + "issuer": "Vault", + "account_name": "Test", + "key_size": "-100", + "generate": true, + } + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, true), + testAccStepReadKey(t, "test", nil), + }, + }) +} + +func TestBackend_generatedKeyMissingAccountName(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + keyData := map[string]interface{}{ + "issuer": "Vault", + "generate": true, + } + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, true), + testAccStepReadKey(t, "test", nil), + }, + }) +} + +func TestBackend_generatedKeyMissingIssuer(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + keyData := map[string]interface{}{ + "account_name": "test@email.com", + "generate": true, + } + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, true), + testAccStepReadKey(t, "test", nil), + }, + }) +} + +func TestBackend_invalidURLValue(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + keyData := map[string]interface{}{ + "url": "notaurl", + "generate": false, + } + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, true), + testAccStepReadKey(t, "test", nil), + }, + }) +} + +func TestBackend_urlAndGenerateTrue(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + keyData := map[string]interface{}{ + "url": "otpauth://totp/Vault:test@email.com?secret=HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ&algorithm=SHA512&digits=6&period=60", + "generate": true, + } + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, true), + testAccStepReadKey(t, "test", nil), + }, + }) +} + +func TestBackend_keyAndGenerateTrue(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + keyData := map[string]interface{}{ + "key": "HXDMVJECJJWSRB3HWIZR4IFUGFTMXBOZ", + "generate": true, + } + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, true), + testAccStepReadKey(t, "test", nil), + }, + }) +} + +func TestBackend_generatedKeyExportedFalse(t *testing.T) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + b, err := Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + + keyData := map[string]interface{}{ + "issuer": "Vault", + "account_name": "test@email.com", + "generate": true, + "exported": false, + } + + expected := map[string]interface{}{ + "issuer": "Vault", + "account_name": "test@email.com", + "digits": otplib.DigitsSix, + "period": 30, + "algorithm": otplib.AlgorithmSHA1, + } + + logicaltest.Test(t, logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + testAccStepCreateKey(t, "test", keyData, false), + testAccStepReadKey(t, "test", expected), + }, + }) +} + +func testAccStepCreateKey(t *testing.T, name string, keyData map[string]interface{}, expectFail bool) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: path.Join("keys", name), + Data: keyData, + ErrorOk: expectFail, + Check: func(resp *logical.Response) error { + // Skip this if the key is not generated by vault or if the test is expected to fail + if !keyData["generate"].(bool) || expectFail { + return nil + } + + // Check to see if barcode and url were returned if exported is false + if !keyData["exported"].(bool) { + if resp != nil { + t.Fatalf("data was returned when exported was set to false") + } + return nil + } + + // Check to see if a barcode was returned when qr_size is zero + if keyData["qr_size"].(int) == 0 { + if _, exists := resp.Data["barcode"]; exists { + t.Fatalf("a barcode was returned when qr_size was set to zero") + } + return nil + } + + var d struct { + Url string `mapstructure:"url"` + Barcode string `mapstructure:"barcode"` + } + + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + + // Check to see if barcode and url are returned + if d.Barcode == "" { + t.Fatalf("a barcode was not returned for a generated key") + } + + if d.Url == "" { + t.Fatalf("a url was not returned for a generated key") + } + + // Parse url + urlObject, err := url.Parse(d.Url) + if err != nil { + t.Fatal("an error occurred while parsing url string") + } + + // Set up query object + urlQuery := urlObject.Query() + + // Read secret + urlSecret := urlQuery.Get("secret") + + // Check key length + keySize := keyData["key_size"].(int) + correctSecretStringSize := (keySize / 5) * 8 + actualSecretStringSize := len(urlSecret) + + if actualSecretStringSize != correctSecretStringSize { + t.Fatal("incorrect key string length") + } + + return nil + }, + } +} + +func testAccStepDeleteKey(t *testing.T, name string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.DeleteOperation, + Path: path.Join("keys", name), + } +} + +func testAccStepReadCreds(t *testing.T, b logical.Backend, s logical.Storage, name string, validation map[string]interface{}) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: path.Join("code", name), + Check: func(resp *logical.Response) error { + var d struct { + Code string `mapstructure:"code"` + } + + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + + log.Printf("[TRACE] Generated credentials: %v", d) + + period := validation["period"].(int) + key := validation["key"].(string) + algorithm := validation["algorithm"].(otplib.Algorithm) + digits := validation["digits"].(otplib.Digits) + + valid, _ := totplib.ValidateCustom(d.Code, key, time.Now(), totplib.ValidateOpts{ + Period: uint(period), + Skew: 1, + Digits: digits, + Algorithm: algorithm, + }) + + if !valid { + t.Fatalf("generated code isn't valid") + } + + return nil + }, + } +} + +func testAccStepReadKey(t *testing.T, name string, expected map[string]interface{}) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: "keys/" + name, + Check: func(resp *logical.Response) error { + if resp == nil { + if expected == nil { + return nil + } + return fmt.Errorf("bad: %#v", resp) + } + + var d struct { + Issuer string `mapstructure:"issuer"` + AccountName string `mapstructure:"account_name"` + Period uint `mapstructure:"period"` + Algorithm string `mapstructure:"algorithm"` + Digits otplib.Digits `mapstructure:"digits"` + } + + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + + var keyAlgorithm otplib.Algorithm + switch d.Algorithm { + case "SHA1": + keyAlgorithm = otplib.AlgorithmSHA1 + case "SHA256": + keyAlgorithm = otplib.AlgorithmSHA256 + case "SHA512": + keyAlgorithm = otplib.AlgorithmSHA512 + } + + period := expected["period"].(int) + + switch { + case d.Issuer != expected["issuer"]: + return fmt.Errorf("issuer should equal: %s", expected["issuer"]) + case d.AccountName != expected["account_name"]: + return fmt.Errorf("account_name should equal: %s", expected["account_name"]) + case d.Period != uint(period): + return fmt.Errorf("period should equal: %d", expected["period"]) + case keyAlgorithm != expected["algorithm"]: + return fmt.Errorf("algorithm should equal: %s", expected["algorithm"]) + case d.Digits != expected["digits"]: + return fmt.Errorf("digits should equal: %d", expected["digits"]) + } + return nil + }, + } +} + +func testAccStepValidateCode(t *testing.T, name string, code string, valid, expectError bool) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "code/" + name, + Data: map[string]interface{}{ + "code": code, + }, + ErrorOk: expectError, + Check: func(resp *logical.Response) error { + if resp == nil { + return fmt.Errorf("bad: %#v", resp) + } + + var d struct { + Valid bool `mapstructure:"valid"` + } + + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + + switch valid { + case true: + if d.Valid != true { + return fmt.Errorf("code was not valid: %s", code) + } + + default: + if d.Valid != false { + return fmt.Errorf("code was incorrectly validated: %s", code) + } + } + return nil + }, + } +} diff --git a/builtin/logical/totp/cmd/totp/main.go b/builtin/logical/totp/cmd/totp/main.go new file mode 100644 index 0000000..9a2a49b --- /dev/null +++ b/builtin/logical/totp/cmd/totp/main.go @@ -0,0 +1,34 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "os" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/totp" + "github.com/hashicorp/vault/sdk/plugin" +) + +func main() { + apiClientMeta := &api.PluginAPIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(os.Args[1:]) + + tlsConfig := apiClientMeta.GetTLSConfig() + tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) + + if err := plugin.ServeMultiplex(&plugin.ServeOpts{ + BackendFactoryFunc: totp.Factory, + // set the TLSProviderFunc so that the plugin maintains backwards + // compatibility with Vault versions that don’t support plugin AutoMTLS + TLSProviderFunc: tlsProviderFunc, + }); err != nil { + logger := hclog.New(&hclog.LoggerOptions{}) + + logger.Error("plugin shutting down", "error", err) + os.Exit(1) + } +} diff --git a/builtin/logical/totp/path_code.go b/builtin/logical/totp/path_code.go new file mode 100644 index 0000000..c792a29 --- /dev/null +++ b/builtin/logical/totp/path_code.go @@ -0,0 +1,146 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package totp + +import ( + "context" + "fmt" + "time" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" + otplib "github.com/pquerna/otp" + totplib "github.com/pquerna/otp/totp" +) + +func pathCode(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "code/" + framework.GenericNameWithAtRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTOTP, + OperationSuffix: "code", + }, + + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the key.", + }, + "code": { + Type: framework.TypeString, + Description: "TOTP code to be validated.", + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathReadCode, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "generate", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathValidateCode, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "validate", + }, + }, + }, + + HelpSynopsis: pathCodeHelpSyn, + HelpDescription: pathCodeHelpDesc, + } +} + +func (b *backend) pathReadCode(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + + // Get the key + key, err := b.Key(ctx, req.Storage, name) + if err != nil { + return nil, err + } + if key == nil { + return logical.ErrorResponse(fmt.Sprintf("unknown key: %s", name)), nil + } + + // Generate password using totp library + totpToken, err := totplib.GenerateCodeCustom(key.Key, time.Now(), totplib.ValidateOpts{ + Period: key.Period, + Digits: key.Digits, + Algorithm: key.Algorithm, + }) + if err != nil { + return nil, err + } + + // Return the secret + return &logical.Response{ + Data: map[string]interface{}{ + "code": totpToken, + }, + }, nil +} + +func (b *backend) pathValidateCode(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + code := data.Get("code").(string) + + // Enforce input value requirements + if code == "" { + return logical.ErrorResponse("the code value is required"), nil + } + + // Get the key's stored values + key, err := b.Key(ctx, req.Storage, name) + if err != nil { + return nil, err + } + if key == nil { + return logical.ErrorResponse(fmt.Sprintf("unknown key: %s", name)), nil + } + + usedName := fmt.Sprintf("%s_%s", name, code) + + _, ok := b.usedCodes.Get(usedName) + if ok { + return logical.ErrorResponse("code already used; wait until the next time period"), nil + } + + valid, err := totplib.ValidateCustom(code, key.Key, time.Now(), totplib.ValidateOpts{ + Period: key.Period, + Skew: key.Skew, + Digits: key.Digits, + Algorithm: key.Algorithm, + }) + if err != nil && err != otplib.ErrValidateInputInvalidLength { + return logical.ErrorResponse("an error occurred while validating the code"), err + } + + // Take the key skew, add two for behind and in front, and multiple that by + // the period to cover the full possibility of the validity of the key + err = b.usedCodes.Add(usedName, nil, time.Duration( + int64(time.Second)* + int64(key.Period)* + int64((2+key.Skew)))) + if err != nil { + return nil, fmt.Errorf("error adding code to used cache: %w", err) + } + + return &logical.Response{ + Data: map[string]interface{}{ + "valid": valid, + }, + }, nil +} + +const pathCodeHelpSyn = ` +Request time-based one-time use password or validate a password for a certain key . +` + +const pathCodeHelpDesc = ` +This path generates and validates time-based one-time use passwords for a certain key. + +` diff --git a/builtin/logical/totp/path_keys.go b/builtin/logical/totp/path_keys.go new file mode 100644 index 0000000..05e8e5a --- /dev/null +++ b/builtin/logical/totp/path_keys.go @@ -0,0 +1,455 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package totp + +import ( + "bytes" + "context" + "encoding/base32" + "encoding/base64" + "fmt" + "image/png" + "net/url" + "strconv" + "strings" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" + otplib "github.com/pquerna/otp" + totplib "github.com/pquerna/otp/totp" +) + +func pathListKeys(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "keys/?$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTOTP, + OperationSuffix: "keys", + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: b.pathKeyList, + }, + + HelpSynopsis: pathKeyHelpSyn, + HelpDescription: pathKeyHelpDesc, + } +} + +func pathKeys(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "keys/" + framework.GenericNameWithAtRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTOTP, + OperationSuffix: "key", + }, + + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the key.", + }, + + "generate": { + Type: framework.TypeBool, + Default: false, + Description: "Determines if a key should be generated by Vault or if a key is being passed from another service.", + }, + + "exported": { + Type: framework.TypeBool, + Default: true, + Description: "Determines if a QR code and url are returned upon generating a key. Only used if generate is true.", + }, + + "key_size": { + Type: framework.TypeInt, + Default: 20, + Description: "Determines the size in bytes of the generated key. Only used if generate is true.", + }, + + "key": { + Type: framework.TypeString, + Description: "The shared master key used to generate a TOTP token. Only used if generate is false.", + }, + + "issuer": { + Type: framework.TypeString, + Description: `The name of the key's issuing organization. Required if generate is true.`, + }, + + "account_name": { + Type: framework.TypeString, + Description: `The name of the account associated with the key. Required if generate is true.`, + }, + + "period": { + Type: framework.TypeDurationSecond, + Default: 30, + Description: `The length of time used to generate a counter for the TOTP token calculation.`, + }, + + "algorithm": { + Type: framework.TypeString, + Default: "SHA1", + Description: `The hashing algorithm used to generate the TOTP token. Options include SHA1, SHA256 and SHA512.`, + }, + + "digits": { + Type: framework.TypeInt, + Default: 6, + Description: `The number of digits in the generated TOTP token. This value can either be 6 or 8.`, + }, + + "skew": { + Type: framework.TypeInt, + Default: 1, + Description: `The number of delay periods that are allowed when validating a TOTP token. This value can either be 0 or 1. Only used if generate is true.`, + }, + + "qr_size": { + Type: framework.TypeInt, + Default: 200, + Description: `The pixel size of the generated square QR code. Only used if generate is true and exported is true. If this value is 0, a QR code will not be returned.`, + }, + + "url": { + Type: framework.TypeString, + Description: `A TOTP url string containing all of the parameters for key setup. Only used if generate is false.`, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathKeyRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "read", + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathKeyCreate, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "create", + }, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathKeyDelete, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "delete", + }, + }, + }, + + HelpSynopsis: pathKeyHelpSyn, + HelpDescription: pathKeyHelpDesc, + } +} + +func (b *backend) Key(ctx context.Context, s logical.Storage, n string) (*keyEntry, error) { + entry, err := s.Get(ctx, "key/"+n) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result keyEntry + if err := entry.DecodeJSON(&result); err != nil { + return nil, err + } + + return &result, nil +} + +func (b *backend) pathKeyDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + err := req.Storage.Delete(ctx, "key/"+data.Get("name").(string)) + if err != nil { + return nil, err + } + + return nil, nil +} + +func (b *backend) pathKeyRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + key, err := b.Key(ctx, req.Storage, data.Get("name").(string)) + if err != nil { + return nil, err + } + if key == nil { + return nil, nil + } + + // Translate algorithm back to string + algorithm := key.Algorithm.String() + + // Return values of key + return &logical.Response{ + Data: map[string]interface{}{ + "issuer": key.Issuer, + "account_name": key.AccountName, + "period": key.Period, + "algorithm": algorithm, + "digits": key.Digits, + }, + }, nil +} + +func (b *backend) pathKeyList(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + entries, err := req.Storage.List(ctx, "key/") + if err != nil { + return nil, err + } + + return logical.ListResponse(entries), nil +} + +func (b *backend) pathKeyCreate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + name := data.Get("name").(string) + generate := data.Get("generate").(bool) + exported := data.Get("exported").(bool) + keyString := data.Get("key").(string) + issuer := data.Get("issuer").(string) + accountName := data.Get("account_name").(string) + period := data.Get("period").(int) + algorithm := data.Get("algorithm").(string) + digits := data.Get("digits").(int) + skew := data.Get("skew").(int) + qrSize := data.Get("qr_size").(int) + keySize := data.Get("key_size").(int) + inputURL := data.Get("url").(string) + + if generate { + if keyString != "" { + return logical.ErrorResponse("a key should not be passed if generate is true"), nil + } + if inputURL != "" { + return logical.ErrorResponse("a url should not be passed if generate is true"), nil + } + } + + // Read parameters from url if given + if inputURL != "" { + // Parse url + urlObject, err := url.Parse(inputURL) + if err != nil { + return logical.ErrorResponse("an error occurred while parsing url string"), err + } + + // Set up query object + urlQuery := urlObject.Query() + path := strings.TrimPrefix(urlObject.Path, "/") + index := strings.Index(path, ":") + + // Read issuer + urlIssuer := urlQuery.Get("issuer") + if urlIssuer != "" { + issuer = urlIssuer + } else { + if index != -1 { + issuer = path[:index] + } + } + + // Read account name + if index == -1 { + accountName = path + } else { + accountName = path[index+1:] + } + + // Read key string + keyString = urlQuery.Get("secret") + + // Read period + periodQuery := urlQuery.Get("period") + if periodQuery != "" { + periodInt, err := strconv.Atoi(periodQuery) + if err != nil { + return logical.ErrorResponse("an error occurred while parsing period value in url"), err + } + period = periodInt + } + + // Read digits + digitsQuery := urlQuery.Get("digits") + if digitsQuery != "" { + digitsInt, err := strconv.Atoi(digitsQuery) + if err != nil { + return logical.ErrorResponse("an error occurred while parsing digits value in url"), err + } + digits = digitsInt + } + + // Read algorithm + algorithmQuery := urlQuery.Get("algorithm") + if algorithmQuery != "" { + algorithm = algorithmQuery + } + } + + // Translate digits and algorithm to a format the totp library understands + var keyDigits otplib.Digits + switch digits { + case 6: + keyDigits = otplib.DigitsSix + case 8: + keyDigits = otplib.DigitsEight + default: + return logical.ErrorResponse("the digits value can only be 6 or 8"), nil + } + + var keyAlgorithm otplib.Algorithm + switch algorithm { + case "SHA1": + keyAlgorithm = otplib.AlgorithmSHA1 + case "SHA256": + keyAlgorithm = otplib.AlgorithmSHA256 + case "SHA512": + keyAlgorithm = otplib.AlgorithmSHA512 + default: + return logical.ErrorResponse("the algorithm value is not valid"), nil + } + + // Enforce input value requirements + if period <= 0 { + return logical.ErrorResponse("the period value must be greater than zero"), nil + } + + switch skew { + case 0: + case 1: + default: + return logical.ErrorResponse("the skew value must be 0 or 1"), nil + } + + // QR size can be zero but it shouldn't be negative + if qrSize < 0 { + return logical.ErrorResponse("the qr_size value must be greater than or equal to zero"), nil + } + + if keySize <= 0 { + return logical.ErrorResponse("the key_size value must be greater than zero"), nil + } + + // Period, Skew and Key Size need to be unsigned ints + uintPeriod := uint(period) + uintSkew := uint(skew) + uintKeySize := uint(keySize) + + var response *logical.Response + + switch generate { + case true: + // If the key is generated, Account Name and Issuer are required. + if accountName == "" { + return logical.ErrorResponse("the account_name value is required for generated keys"), nil + } + + if issuer == "" { + return logical.ErrorResponse("the issuer value is required for generated keys"), nil + } + + // Generate a new key + keyObject, err := totplib.Generate(totplib.GenerateOpts{ + Issuer: issuer, + AccountName: accountName, + Period: uintPeriod, + Digits: keyDigits, + Algorithm: keyAlgorithm, + SecretSize: uintKeySize, + Rand: b.GetRandomReader(), + }) + if err != nil { + return logical.ErrorResponse("an error occurred while generating a key"), err + } + + // Get key string value + keyString = keyObject.Secret() + + // Skip returning the QR code and url if exported is set to false + if exported { + // Prepare the url and barcode + urlString := keyObject.String() + + // Don't include QR code if size is set to zero + if qrSize == 0 { + response = &logical.Response{ + Data: map[string]interface{}{ + "url": urlString, + }, + } + } else { + barcode, err := keyObject.Image(qrSize, qrSize) + if err != nil { + return nil, fmt.Errorf("failed to generate QR code image: %w", err) + } + + var buff bytes.Buffer + png.Encode(&buff, barcode) + b64Barcode := base64.StdEncoding.EncodeToString(buff.Bytes()) + response = &logical.Response{ + Data: map[string]interface{}{ + "url": urlString, + "barcode": b64Barcode, + }, + } + } + } + default: + if keyString == "" { + return logical.ErrorResponse("the key value is required"), nil + } + + if i := len(keyString) % 8; i != 0 { + keyString += strings.Repeat("=", 8-i) + } + + _, err := base32.StdEncoding.DecodeString(strings.ToUpper(keyString)) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf( + "invalid key value: %s", err)), nil + } + } + + // Store it + entry, err := logical.StorageEntryJSON("key/"+name, &keyEntry{ + Key: keyString, + Issuer: issuer, + AccountName: accountName, + Period: uintPeriod, + Algorithm: keyAlgorithm, + Digits: keyDigits, + Skew: uintSkew, + }) + if err != nil { + return nil, err + } + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + + return response, nil +} + +type keyEntry struct { + Key string `json:"key" mapstructure:"key" structs:"key"` + Issuer string `json:"issuer" mapstructure:"issuer" structs:"issuer"` + AccountName string `json:"account_name" mapstructure:"account_name" structs:"account_name"` + Period uint `json:"period" mapstructure:"period" structs:"period"` + Algorithm otplib.Algorithm `json:"algorithm" mapstructure:"algorithm" structs:"algorithm"` + Digits otplib.Digits `json:"digits" mapstructure:"digits" structs:"digits"` + Skew uint `json:"skew" mapstructure:"skew" structs:"skew"` +} + +const pathKeyHelpSyn = ` +Manage the keys that can be created with this backend. +` + +const pathKeyHelpDesc = ` +This path lets you manage the keys that can be created with this backend. + +` diff --git a/builtin/logical/transit/backend.go b/builtin/logical/transit/backend.go new file mode 100644 index 0000000..4b4d4a2 --- /dev/null +++ b/builtin/logical/transit/backend.go @@ -0,0 +1,291 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit + +import ( + "context" + "fmt" + "io" + "strconv" + "strings" + "sync" + "time" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/keysutil" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + operationPrefixTransit = "transit" + + // Minimum cache size for transit backend + minCacheSize = 10 +) + +func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + b, err := Backend(ctx, conf) + if err != nil { + return nil, err + } + if err := b.Setup(ctx, conf); err != nil { + return nil, err + } + return b, nil +} + +func Backend(ctx context.Context, conf *logical.BackendConfig) (*backend, error) { + var b backend + b.Backend = &framework.Backend{ + PathsSpecial: &logical.Paths{ + SealWrapStorage: []string{ + "archive/", + "policy/", + }, + }, + + Paths: []*framework.Path{ + // Rotate/Config needs to come before Keys + // as the handler is greedy + b.pathRotate(), + b.pathRewrap(), + b.pathWrappingKey(), + b.pathImport(), + b.pathImportVersion(), + b.pathKeys(), + b.pathListKeys(), + b.pathBYOKExportKeys(), + b.pathExportKeys(), + b.pathKeysConfig(), + b.pathEncrypt(), + b.pathDecrypt(), + b.pathDatakey(), + b.pathRandom(), + b.pathHash(), + b.pathHMAC(), + b.pathSign(), + b.pathVerify(), + b.pathBackup(), + b.pathRestore(), + b.pathTrim(), + b.pathCacheConfig(), + b.pathConfigKeys(), + }, + + Secrets: []*framework.Secret{}, + Invalidate: b.invalidate, + BackendType: logical.TypeLogical, + PeriodicFunc: b.periodicFunc, + } + + b.backendUUID = conf.BackendUUID + + // determine cacheSize to use. Defaults to 0 which means unlimited + cacheSize := 0 + useCache := !conf.System.CachingDisabled() + if useCache { + var err error + cacheSize, err = GetCacheSizeFromStorage(ctx, conf.StorageView) + if err != nil { + return nil, fmt.Errorf("Error retrieving cache size from storage: %w", err) + } + + if cacheSize != 0 && cacheSize < minCacheSize { + b.Logger().Warn("size %d is less than minimum %d. Cache size is set to %d", cacheSize, minCacheSize, minCacheSize) + cacheSize = minCacheSize + } + } + + var err error + b.lm, err = keysutil.NewLockManager(useCache, cacheSize) + if err != nil { + return nil, err + } + + return &b, nil +} + +type backend struct { + *framework.Backend + lm *keysutil.LockManager + // Lock to make changes to any of the backend's cache configuration. + configMutex sync.RWMutex + cacheSizeChanged bool + checkAutoRotateAfter time.Time + autoRotateOnce sync.Once + backendUUID string +} + +func GetCacheSizeFromStorage(ctx context.Context, s logical.Storage) (int, error) { + size := 0 + entry, err := s.Get(ctx, "config/cache") + if err != nil { + return 0, err + } + if entry != nil { + var storedCache configCache + if err := entry.DecodeJSON(&storedCache); err != nil { + return 0, err + } + size = storedCache.Size + } + return size, nil +} + +// Update cache size and get policy +func (b *backend) GetPolicy(ctx context.Context, polReq keysutil.PolicyRequest, rand io.Reader) (retP *keysutil.Policy, retUpserted bool, retErr error) { + // Acquire read lock to read cacheSizeChanged + b.configMutex.RLock() + if b.lm.GetUseCache() && b.cacheSizeChanged { + var err error + currentCacheSize := b.lm.GetCacheSize() + storedCacheSize, err := GetCacheSizeFromStorage(ctx, polReq.Storage) + if err != nil { + b.configMutex.RUnlock() + return nil, false, err + } + if currentCacheSize != storedCacheSize { + err = b.lm.InitCache(storedCacheSize) + if err != nil { + b.configMutex.RUnlock() + return nil, false, err + } + } + // Release the read lock and acquire the write lock + b.configMutex.RUnlock() + b.configMutex.Lock() + defer b.configMutex.Unlock() + b.cacheSizeChanged = false + } else { + b.configMutex.RUnlock() + } + p, _, err := b.lm.GetPolicy(ctx, polReq, rand) + if err != nil { + return p, false, err + } + return p, true, nil +} + +func (b *backend) invalidate(ctx context.Context, key string) { + if b.Logger().IsDebug() { + b.Logger().Debug("invalidating key", "key", key) + } + switch { + case strings.HasPrefix(key, "policy/"): + name := strings.TrimPrefix(key, "policy/") + b.lm.InvalidatePolicy(name) + case strings.HasPrefix(key, "cache-config/"): + // Acquire the lock to set the flag to indicate that cache size needs to be refreshed from storage + b.configMutex.Lock() + defer b.configMutex.Unlock() + b.cacheSizeChanged = true + } +} + +// periodicFunc is a central collection of functions that run on an interval. +// Anything that should be called regularly can be placed within this method. +func (b *backend) periodicFunc(ctx context.Context, req *logical.Request) error { + // These operations ensure the auto-rotate only happens once simultaneously. It's an unlikely edge + // given the time scale, but a safeguard nonetheless. + var err error + didAutoRotate := false + autoRotateOnceFn := func() { + err = b.autoRotateKeys(ctx, req) + didAutoRotate = true + } + b.autoRotateOnce.Do(autoRotateOnceFn) + if didAutoRotate { + b.autoRotateOnce = sync.Once{} + } + + return err +} + +// autoRotateKeys retrieves all transit keys and rotates those which have an +// auto rotate period defined which has passed. This operation only happens +// on primary nodes and performance secondary nodes which have a local mount. +func (b *backend) autoRotateKeys(ctx context.Context, req *logical.Request) error { + // Only check for autorotation once an hour to avoid unnecessarily iterating + // over all keys too frequently. + if time.Now().Before(b.checkAutoRotateAfter) { + return nil + } + b.checkAutoRotateAfter = time.Now().Add(1 * time.Hour) + + // Early exit if not a primary or performance secondary with a local mount. + if b.System().ReplicationState().HasState(consts.ReplicationDRSecondary|consts.ReplicationPerformanceStandby) || + (!b.System().LocalMount() && b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) { + return nil + } + + // Retrieve all keys and loop over them to check if they need to be rotated. + keys, err := req.Storage.List(ctx, "policy/") + if err != nil { + return err + } + + // Collect errors in a multierror to ensure a single failure doesn't prevent + // all keys from being rotated. + var errs *multierror.Error + + for _, key := range keys { + p, _, err := b.GetPolicy(ctx, keysutil.PolicyRequest{ + Storage: req.Storage, + Name: key, + }, b.GetRandomReader()) + if err != nil { + errs = multierror.Append(errs, err) + continue + } + + // If the policy is nil, move onto the next one. + if p == nil { + continue + } + + err = b.rotateIfRequired(ctx, req, key, p) + if err != nil { + errs = multierror.Append(errs, err) + } + } + + return errs.ErrorOrNil() +} + +// rotateIfRequired rotates a key if it is due for autorotation. +func (b *backend) rotateIfRequired(ctx context.Context, req *logical.Request, key string, p *keysutil.Policy) error { + if !b.System().CachingDisabled() { + p.Lock(true) + } + defer p.Unlock() + + // If the key is imported, it can only be rotated from within Vault if allowed. + if p.Imported && !p.AllowImportedKeyRotation { + return nil + } + + // If the policy's automatic rotation period is 0, it should not + // automatically rotate. + if p.AutoRotatePeriod == 0 { + return nil + } + + // We can't auto-rotate managed keys + if p.Type == keysutil.KeyType_MANAGED_KEY { + return nil + } + + // Retrieve the latest version of the policy and determine if it is time to rotate. + latestKey := p.Keys[strconv.Itoa(p.LatestVersion)] + if time.Now().After(latestKey.CreationTime.Add(p.AutoRotatePeriod)) { + if b.Logger().IsDebug() { + b.Logger().Debug("automatically rotating key", "key", key) + } + return p.Rotate(ctx, req.Storage, b.GetRandomReader()) + + } + return nil +} diff --git a/builtin/logical/transit/backend_test.go b/builtin/logical/transit/backend_test.go new file mode 100644 index 0000000..2ee9e9b --- /dev/null +++ b/builtin/logical/transit/backend_test.go @@ -0,0 +1,2305 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit + +import ( + "context" + "crypto" + "crypto/ed25519" + cryptoRand "crypto/rand" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "fmt" + "io" + "math/rand" + "os" + "path" + "reflect" + "strconv" + "strings" + "sync" + "testing" + "time" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/pki" + logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/keysutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" + + uuid "github.com/hashicorp/go-uuid" + "github.com/mitchellh/mapstructure" + + "github.com/stretchr/testify/require" +) + +const ( + testPlaintext = "The quick brown fox" +) + +func createBackendWithStorage(t testing.TB) (*backend, logical.Storage) { + config := logical.TestBackendConfig() + config.StorageView = &logical.InmemStorage{} + + b, _ := Backend(context.Background(), config) + if b == nil { + t.Fatalf("failed to create backend") + } + err := b.Backend.Setup(context.Background(), config) + if err != nil { + t.Fatal(err) + } + return b, config.StorageView +} + +func createBackendWithSysView(t testing.TB) (*backend, logical.Storage) { + sysView := logical.TestSystemView() + storage := &logical.InmemStorage{} + + conf := &logical.BackendConfig{ + StorageView: storage, + System: sysView, + } + + b, _ := Backend(context.Background(), conf) + if b == nil { + t.Fatal("failed to create backend") + } + + err := b.Backend.Setup(context.Background(), conf) + if err != nil { + t.Fatal(err) + } + + return b, storage +} + +func createBackendWithSysViewWithStorage(t testing.TB, s logical.Storage) *backend { + sysView := logical.TestSystemView() + + conf := &logical.BackendConfig{ + StorageView: s, + System: sysView, + } + + b, _ := Backend(context.Background(), conf) + if b == nil { + t.Fatal("failed to create backend") + } + + err := b.Backend.Setup(context.Background(), conf) + if err != nil { + t.Fatal(err) + } + + return b +} + +func createBackendWithForceNoCacheWithSysViewWithStorage(t testing.TB, s logical.Storage) *backend { + sysView := logical.TestSystemView() + sysView.CachingDisabledVal = true + + conf := &logical.BackendConfig{ + StorageView: s, + System: sysView, + } + + b, _ := Backend(context.Background(), conf) + if b == nil { + t.Fatal("failed to create backend") + } + + err := b.Backend.Setup(context.Background(), conf) + if err != nil { + t.Fatal(err) + } + + return b +} + +func TestTransit_RSA(t *testing.T) { + testTransit_RSA(t, "rsa-2048") + testTransit_RSA(t, "rsa-3072") + testTransit_RSA(t, "rsa-4096") +} + +func testTransit_RSA(t *testing.T, keyType string) { + var resp *logical.Response + var err error + b, storage := createBackendWithStorage(t) + + keyReq := &logical.Request{ + Path: "keys/rsa", + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "type": keyType, + }, + Storage: storage, + } + + resp, err = b.HandleRequest(context.Background(), keyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } + + plaintext := "dGhlIHF1aWNrIGJyb3duIGZveA==" // "the quick brown fox" + + encryptReq := &logical.Request{ + Path: "encrypt/rsa", + Operation: logical.UpdateOperation, + Storage: storage, + Data: map[string]interface{}{ + "plaintext": plaintext, + }, + } + + resp, err = b.HandleRequest(context.Background(), encryptReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } + + ciphertext1 := resp.Data["ciphertext"].(string) + + decryptReq := &logical.Request{ + Path: "decrypt/rsa", + Operation: logical.UpdateOperation, + Storage: storage, + Data: map[string]interface{}{ + "ciphertext": ciphertext1, + }, + } + + resp, err = b.HandleRequest(context.Background(), decryptReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } + + decryptedPlaintext := resp.Data["plaintext"] + + if plaintext != decryptedPlaintext { + t.Fatalf("bad: plaintext; expected: %q\nactual: %q", plaintext, decryptedPlaintext) + } + + // Rotate the key + rotateReq := &logical.Request{ + Path: "keys/rsa/rotate", + Operation: logical.UpdateOperation, + Storage: storage, + } + resp, err = b.HandleRequest(context.Background(), rotateReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } + + // Encrypt again + resp, err = b.HandleRequest(context.Background(), encryptReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } + ciphertext2 := resp.Data["ciphertext"].(string) + + if ciphertext1 == ciphertext2 { + t.Fatalf("expected different ciphertexts") + } + + // See if the older ciphertext can still be decrypted + resp, err = b.HandleRequest(context.Background(), decryptReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } + if resp.Data["plaintext"].(string) != plaintext { + t.Fatal("failed to decrypt old ciphertext after rotating the key") + } + + // Decrypt the new ciphertext + decryptReq.Data = map[string]interface{}{ + "ciphertext": ciphertext2, + } + resp, err = b.HandleRequest(context.Background(), decryptReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } + if resp.Data["plaintext"].(string) != plaintext { + t.Fatal("failed to decrypt ciphertext after rotating the key") + } + + signReq := &logical.Request{ + Path: "sign/rsa", + Operation: logical.UpdateOperation, + Storage: storage, + Data: map[string]interface{}{ + "input": plaintext, + }, + } + resp, err = b.HandleRequest(context.Background(), signReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } + signature := resp.Data["signature"].(string) + + verifyReq := &logical.Request{ + Path: "verify/rsa", + Operation: logical.UpdateOperation, + Storage: storage, + Data: map[string]interface{}{ + "input": plaintext, + "signature": signature, + }, + } + + resp, err = b.HandleRequest(context.Background(), verifyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } + if !resp.Data["valid"].(bool) { + t.Fatalf("failed to verify the RSA signature") + } + + signReq.Data = map[string]interface{}{ + "input": plaintext, + "hash_algorithm": "invalid", + } + resp, err = b.HandleRequest(context.Background(), signReq) + if err == nil { + t.Fatal(err) + } + + signReq.Data = map[string]interface{}{ + "input": plaintext, + "hash_algorithm": "sha2-512", + } + resp, err = b.HandleRequest(context.Background(), signReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } + signature = resp.Data["signature"].(string) + + verifyReq.Data = map[string]interface{}{ + "input": plaintext, + "signature": signature, + } + resp, err = b.HandleRequest(context.Background(), verifyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } + if resp.Data["valid"].(bool) { + t.Fatalf("expected validation to fail") + } + + verifyReq.Data = map[string]interface{}{ + "input": plaintext, + "signature": signature, + "hash_algorithm": "sha2-512", + } + resp, err = b.HandleRequest(context.Background(), verifyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } + if !resp.Data["valid"].(bool) { + t.Fatalf("failed to verify the RSA signature") + } + + // Take a random hash and sign it using PKCSv1_5_NoOID. + hash := "P8m2iUWdc4+MiKOkiqnjNUIBa3pAUuABqqU2/KdIE8s=" + signReq.Data = map[string]interface{}{ + "input": hash, + "hash_algorithm": "none", + "signature_algorithm": "pkcs1v15", + "prehashed": true, + } + resp, err = b.HandleRequest(context.Background(), signReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } + signature = resp.Data["signature"].(string) + + verifyReq.Data = map[string]interface{}{ + "input": hash, + "signature": signature, + "hash_algorithm": "none", + "signature_algorithm": "pkcs1v15", + "prehashed": true, + } + resp, err = b.HandleRequest(context.Background(), verifyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } + if !resp.Data["valid"].(bool) { + t.Fatalf("failed to verify the RSA signature") + } +} + +func TestBackend_basic(t *testing.T) { + decryptData := make(map[string]interface{}) + logicaltest.Test(t, logicaltest.TestCase{ + LogicalFactory: Factory, + Steps: []logicaltest.TestStep{ + testAccStepListPolicy(t, "test", true), + testAccStepWritePolicy(t, "test", false), + testAccStepListPolicy(t, "test", false), + testAccStepReadPolicy(t, "test", false, false), + testAccStepEncrypt(t, "test", testPlaintext, decryptData), + testAccStepDecrypt(t, "test", testPlaintext, decryptData), + testAccStepEncrypt(t, "test", "", decryptData), + testAccStepDecrypt(t, "test", "", decryptData), + testAccStepDeleteNotDisabledPolicy(t, "test"), + testAccStepEnableDeletion(t, "test"), + testAccStepDeletePolicy(t, "test"), + testAccStepWritePolicy(t, "test", false), + testAccStepEnableDeletion(t, "test"), + testAccStepDisableDeletion(t, "test"), + testAccStepDeleteNotDisabledPolicy(t, "test"), + testAccStepEnableDeletion(t, "test"), + testAccStepDeletePolicy(t, "test"), + testAccStepReadPolicy(t, "test", true, false), + }, + }) +} + +func TestBackend_upsert(t *testing.T) { + decryptData := make(map[string]interface{}) + logicaltest.Test(t, logicaltest.TestCase{ + LogicalFactory: Factory, + Steps: []logicaltest.TestStep{ + testAccStepReadPolicy(t, "test", true, false), + testAccStepListPolicy(t, "test", true), + testAccStepEncryptUpsert(t, "test", testPlaintext, decryptData), + testAccStepListPolicy(t, "test", false), + testAccStepReadPolicy(t, "test", false, false), + testAccStepDecrypt(t, "test", testPlaintext, decryptData), + }, + }) +} + +func TestBackend_datakey(t *testing.T) { + dataKeyInfo := make(map[string]interface{}) + logicaltest.Test(t, logicaltest.TestCase{ + LogicalFactory: Factory, + Steps: []logicaltest.TestStep{ + testAccStepListPolicy(t, "test", true), + testAccStepWritePolicy(t, "test", false), + testAccStepListPolicy(t, "test", false), + testAccStepReadPolicy(t, "test", false, false), + testAccStepWriteDatakey(t, "test", false, 256, dataKeyInfo), + testAccStepDecryptDatakey(t, "test", dataKeyInfo), + testAccStepWriteDatakey(t, "test", true, 128, dataKeyInfo), + }, + }) +} + +func TestBackend_rotation(t *testing.T) { + defer os.Setenv("TRANSIT_ACC_KEY_TYPE", "") + testBackendRotation(t) + os.Setenv("TRANSIT_ACC_KEY_TYPE", "CHACHA") + testBackendRotation(t) +} + +func testBackendRotation(t *testing.T) { + decryptData := make(map[string]interface{}) + encryptHistory := make(map[int]map[string]interface{}) + logicaltest.Test(t, logicaltest.TestCase{ + LogicalFactory: Factory, + Steps: []logicaltest.TestStep{ + testAccStepListPolicy(t, "test", true), + testAccStepWritePolicy(t, "test", false), + testAccStepListPolicy(t, "test", false), + testAccStepEncryptVX(t, "test", testPlaintext, decryptData, 0, encryptHistory), + testAccStepEncryptVX(t, "test", testPlaintext, decryptData, 1, encryptHistory), + testAccStepRotate(t, "test"), // now v2 + testAccStepEncryptVX(t, "test", testPlaintext, decryptData, 2, encryptHistory), + testAccStepRotate(t, "test"), // now v3 + testAccStepEncryptVX(t, "test", testPlaintext, decryptData, 3, encryptHistory), + testAccStepRotate(t, "test"), // now v4 + testAccStepEncryptVX(t, "test", testPlaintext, decryptData, 4, encryptHistory), + testAccStepDecrypt(t, "test", testPlaintext, decryptData), + testAccStepEncryptVX(t, "test", testPlaintext, decryptData, 99, encryptHistory), + testAccStepDecryptExpectFailure(t, "test", testPlaintext, decryptData), + testAccStepLoadVX(t, "test", decryptData, 0, encryptHistory), + testAccStepDecrypt(t, "test", testPlaintext, decryptData), + testAccStepLoadVX(t, "test", decryptData, 1, encryptHistory), + testAccStepDecrypt(t, "test", testPlaintext, decryptData), + testAccStepLoadVX(t, "test", decryptData, 2, encryptHistory), + testAccStepDecrypt(t, "test", testPlaintext, decryptData), + testAccStepLoadVX(t, "test", decryptData, 3, encryptHistory), + testAccStepDecrypt(t, "test", testPlaintext, decryptData), + testAccStepLoadVX(t, "test", decryptData, 99, encryptHistory), + testAccStepDecryptExpectFailure(t, "test", testPlaintext, decryptData), + testAccStepLoadVX(t, "test", decryptData, 4, encryptHistory), + testAccStepDecrypt(t, "test", testPlaintext, decryptData), + testAccStepDeleteNotDisabledPolicy(t, "test"), + testAccStepAdjustPolicyMinDecryption(t, "test", 3), + testAccStepAdjustPolicyMinEncryption(t, "test", 4), + testAccStepReadPolicyWithVersions(t, "test", false, false, 3, 4), + testAccStepLoadVX(t, "test", decryptData, 0, encryptHistory), + testAccStepDecryptExpectFailure(t, "test", testPlaintext, decryptData), + testAccStepLoadVX(t, "test", decryptData, 1, encryptHistory), + testAccStepDecryptExpectFailure(t, "test", testPlaintext, decryptData), + testAccStepLoadVX(t, "test", decryptData, 2, encryptHistory), + testAccStepDecryptExpectFailure(t, "test", testPlaintext, decryptData), + testAccStepLoadVX(t, "test", decryptData, 3, encryptHistory), + testAccStepDecrypt(t, "test", testPlaintext, decryptData), + testAccStepLoadVX(t, "test", decryptData, 4, encryptHistory), + testAccStepDecrypt(t, "test", testPlaintext, decryptData), + testAccStepAdjustPolicyMinDecryption(t, "test", 1), + testAccStepReadPolicyWithVersions(t, "test", false, false, 1, 4), + testAccStepLoadVX(t, "test", decryptData, 0, encryptHistory), + testAccStepDecrypt(t, "test", testPlaintext, decryptData), + testAccStepLoadVX(t, "test", decryptData, 1, encryptHistory), + testAccStepDecrypt(t, "test", testPlaintext, decryptData), + testAccStepLoadVX(t, "test", decryptData, 2, encryptHistory), + testAccStepDecrypt(t, "test", testPlaintext, decryptData), + testAccStepRewrap(t, "test", decryptData, 4), + testAccStepDecrypt(t, "test", testPlaintext, decryptData), + testAccStepEnableDeletion(t, "test"), + testAccStepDeletePolicy(t, "test"), + testAccStepReadPolicy(t, "test", true, false), + testAccStepListPolicy(t, "test", true), + }, + }) +} + +func TestBackend_basic_derived(t *testing.T) { + decryptData := make(map[string]interface{}) + logicaltest.Test(t, logicaltest.TestCase{ + LogicalFactory: Factory, + Steps: []logicaltest.TestStep{ + testAccStepListPolicy(t, "test", true), + testAccStepWritePolicy(t, "test", true), + testAccStepListPolicy(t, "test", false), + testAccStepReadPolicy(t, "test", false, true), + testAccStepEncryptContext(t, "test", testPlaintext, "my-cool-context", decryptData), + testAccStepDecrypt(t, "test", testPlaintext, decryptData), + testAccStepEnableDeletion(t, "test"), + testAccStepDeletePolicy(t, "test"), + testAccStepReadPolicy(t, "test", true, true), + }, + }) +} + +func testAccStepWritePolicy(t *testing.T, name string, derived bool) logicaltest.TestStep { + ts := logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "keys/" + name, + Data: map[string]interface{}{ + "derived": derived, + }, + } + if os.Getenv("TRANSIT_ACC_KEY_TYPE") == "CHACHA" { + ts.Data["type"] = "chacha20-poly1305" + } + return ts +} + +func testAccStepListPolicy(t *testing.T, name string, expectNone bool) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ListOperation, + Path: "keys", + Check: func(resp *logical.Response) error { + if resp == nil { + return fmt.Errorf("missing response") + } + if expectNone { + keysRaw, ok := resp.Data["keys"] + if ok || keysRaw != nil { + return fmt.Errorf("response data when expecting none") + } + return nil + } + if len(resp.Data) == 0 { + return fmt.Errorf("no data returned") + } + + var d struct { + Keys []string `mapstructure:"keys"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + if len(d.Keys) > 0 && d.Keys[0] != name { + return fmt.Errorf("bad name: %#v", d) + } + if len(d.Keys) != 1 { + return fmt.Errorf("only 1 key expected, %d returned", len(d.Keys)) + } + return nil + }, + } +} + +func testAccStepAdjustPolicyMinDecryption(t *testing.T, name string, minVer int) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "keys/" + name + "/config", + Data: map[string]interface{}{ + "min_decryption_version": minVer, + }, + } +} + +func testAccStepAdjustPolicyMinEncryption(t *testing.T, name string, minVer int) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "keys/" + name + "/config", + Data: map[string]interface{}{ + "min_encryption_version": minVer, + }, + } +} + +func testAccStepDisableDeletion(t *testing.T, name string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "keys/" + name + "/config", + Data: map[string]interface{}{ + "deletion_allowed": false, + }, + } +} + +func testAccStepEnableDeletion(t *testing.T, name string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "keys/" + name + "/config", + Data: map[string]interface{}{ + "deletion_allowed": true, + }, + } +} + +func testAccStepDeletePolicy(t *testing.T, name string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.DeleteOperation, + Path: "keys/" + name, + } +} + +func testAccStepDeleteNotDisabledPolicy(t *testing.T, name string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.DeleteOperation, + Path: "keys/" + name, + ErrorOk: true, + Check: func(resp *logical.Response) error { + if resp == nil { + return fmt.Errorf("got nil response instead of error") + } + if resp.IsError() { + return nil + } + return fmt.Errorf("expected error but did not get one") + }, + } +} + +func testAccStepReadPolicy(t *testing.T, name string, expectNone, derived bool) logicaltest.TestStep { + return testAccStepReadPolicyWithVersions(t, name, expectNone, derived, 1, 0) +} + +func testAccStepReadPolicyWithVersions(t *testing.T, name string, expectNone, derived bool, minDecryptionVersion int, minEncryptionVersion int) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: "keys/" + name, + Check: func(resp *logical.Response) error { + if resp == nil && !expectNone { + return fmt.Errorf("missing response") + } else if expectNone { + if resp != nil { + return fmt.Errorf("response when expecting none") + } + return nil + } + var d struct { + Name string `mapstructure:"name"` + Key []byte `mapstructure:"key"` + Keys map[string]int64 `mapstructure:"keys"` + Type string `mapstructure:"type"` + Derived bool `mapstructure:"derived"` + KDF string `mapstructure:"kdf"` + DeletionAllowed bool `mapstructure:"deletion_allowed"` + ConvergentEncryption bool `mapstructure:"convergent_encryption"` + MinDecryptionVersion int `mapstructure:"min_decryption_version"` + MinEncryptionVersion int `mapstructure:"min_encryption_version"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + + if d.Name != name { + return fmt.Errorf("bad name: %#v", d) + } + if os.Getenv("TRANSIT_ACC_KEY_TYPE") == "CHACHA" { + if d.Type != keysutil.KeyType(keysutil.KeyType_ChaCha20_Poly1305).String() { + return fmt.Errorf("bad key type: %#v", d) + } + } else if d.Type != keysutil.KeyType(keysutil.KeyType_AES256_GCM96).String() { + return fmt.Errorf("bad key type: %#v", d) + } + // Should NOT get a key back + if d.Key != nil { + return fmt.Errorf("bad: %#v", d) + } + if d.Keys == nil { + return fmt.Errorf("bad: %#v", d) + } + if d.MinDecryptionVersion != minDecryptionVersion { + return fmt.Errorf("bad: %#v", d) + } + if d.MinEncryptionVersion != minEncryptionVersion { + return fmt.Errorf("bad: %#v", d) + } + if d.DeletionAllowed { + return fmt.Errorf("bad: %#v", d) + } + if d.Derived != derived { + return fmt.Errorf("bad: %#v", d) + } + if derived && d.KDF != "hkdf_sha256" { + return fmt.Errorf("bad: %#v", d) + } + return nil + }, + } +} + +func testAccStepEncrypt( + t *testing.T, name, plaintext string, decryptData map[string]interface{}, +) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "encrypt/" + name, + Data: map[string]interface{}{ + "plaintext": base64.StdEncoding.EncodeToString([]byte(plaintext)), + }, + Check: func(resp *logical.Response) error { + var d struct { + Ciphertext string `mapstructure:"ciphertext"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + if d.Ciphertext == "" { + return fmt.Errorf("missing ciphertext") + } + decryptData["ciphertext"] = d.Ciphertext + return nil + }, + } +} + +func testAccStepEncryptUpsert( + t *testing.T, name, plaintext string, decryptData map[string]interface{}, +) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.CreateOperation, + Path: "encrypt/" + name, + Data: map[string]interface{}{ + "plaintext": base64.StdEncoding.EncodeToString([]byte(plaintext)), + }, + Check: func(resp *logical.Response) error { + var d struct { + Ciphertext string `mapstructure:"ciphertext"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + if d.Ciphertext == "" { + return fmt.Errorf("missing ciphertext") + } + decryptData["ciphertext"] = d.Ciphertext + return nil + }, + } +} + +func testAccStepEncryptContext( + t *testing.T, name, plaintext, context string, decryptData map[string]interface{}, +) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "encrypt/" + name, + Data: map[string]interface{}{ + "plaintext": base64.StdEncoding.EncodeToString([]byte(plaintext)), + "context": base64.StdEncoding.EncodeToString([]byte(context)), + }, + Check: func(resp *logical.Response) error { + var d struct { + Ciphertext string `mapstructure:"ciphertext"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + if d.Ciphertext == "" { + return fmt.Errorf("missing ciphertext") + } + decryptData["ciphertext"] = d.Ciphertext + decryptData["context"] = base64.StdEncoding.EncodeToString([]byte(context)) + return nil + }, + } +} + +func testAccStepDecrypt( + t *testing.T, name, plaintext string, decryptData map[string]interface{}, +) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "decrypt/" + name, + Data: decryptData, + Check: func(resp *logical.Response) error { + var d struct { + Plaintext string `mapstructure:"plaintext"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + + // Decode the base64 + plainRaw, err := base64.StdEncoding.DecodeString(d.Plaintext) + if err != nil { + return err + } + + if string(plainRaw) != plaintext { + return fmt.Errorf("plaintext mismatch: %s expect: %s, decryptData was %#v", plainRaw, plaintext, decryptData) + } + return nil + }, + } +} + +func testAccStepRewrap( + t *testing.T, name string, decryptData map[string]interface{}, expectedVer int, +) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "rewrap/" + name, + Data: decryptData, + Check: func(resp *logical.Response) error { + var d struct { + Ciphertext string `mapstructure:"ciphertext"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + if d.Ciphertext == "" { + return fmt.Errorf("missing ciphertext") + } + splitStrings := strings.Split(d.Ciphertext, ":") + verString := splitStrings[1][1:] + ver, err := strconv.Atoi(verString) + if err != nil { + return fmt.Errorf("error pulling out version from verString %q, ciphertext was %s", verString, d.Ciphertext) + } + if ver != expectedVer { + return fmt.Errorf("did not get expected version") + } + decryptData["ciphertext"] = d.Ciphertext + return nil + }, + } +} + +func testAccStepEncryptVX( + t *testing.T, name, plaintext string, decryptData map[string]interface{}, + ver int, encryptHistory map[int]map[string]interface{}, +) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "encrypt/" + name, + Data: map[string]interface{}{ + "plaintext": base64.StdEncoding.EncodeToString([]byte(plaintext)), + }, + Check: func(resp *logical.Response) error { + var d struct { + Ciphertext string `mapstructure:"ciphertext"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + if d.Ciphertext == "" { + return fmt.Errorf("missing ciphertext") + } + splitStrings := strings.Split(d.Ciphertext, ":") + splitStrings[1] = "v" + strconv.Itoa(ver) + ciphertext := strings.Join(splitStrings, ":") + decryptData["ciphertext"] = ciphertext + encryptHistory[ver] = map[string]interface{}{ + "ciphertext": ciphertext, + } + return nil + }, + } +} + +func testAccStepLoadVX( + t *testing.T, name string, decryptData map[string]interface{}, + ver int, encryptHistory map[int]map[string]interface{}, +) logicaltest.TestStep { + // This is really a no-op to allow us to do data manip in the check function + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: "keys/" + name, + Check: func(resp *logical.Response) error { + decryptData["ciphertext"] = encryptHistory[ver]["ciphertext"].(string) + return nil + }, + } +} + +func testAccStepDecryptExpectFailure( + t *testing.T, name, plaintext string, decryptData map[string]interface{}, +) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "decrypt/" + name, + Data: decryptData, + ErrorOk: true, + Check: func(resp *logical.Response) error { + if !resp.IsError() { + return fmt.Errorf("expected error") + } + return nil + }, + } +} + +func testAccStepRotate(t *testing.T, name string) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "keys/" + name + "/rotate", + } +} + +func testAccStepWriteDatakey(t *testing.T, name string, + noPlaintext bool, bits int, + dataKeyInfo map[string]interface{}, +) logicaltest.TestStep { + data := map[string]interface{}{} + subPath := "plaintext" + if noPlaintext { + subPath = "wrapped" + } + if bits != 256 { + data["bits"] = bits + } + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "datakey/" + subPath + "/" + name, + Data: data, + Check: func(resp *logical.Response) error { + var d struct { + Plaintext string `mapstructure:"plaintext"` + Ciphertext string `mapstructure:"ciphertext"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + if noPlaintext && len(d.Plaintext) != 0 { + return fmt.Errorf("received plaintxt when we disabled it") + } + if !noPlaintext { + if len(d.Plaintext) == 0 { + return fmt.Errorf("did not get plaintext when we expected it") + } + dataKeyInfo["plaintext"] = d.Plaintext + plainBytes, err := base64.StdEncoding.DecodeString(d.Plaintext) + if err != nil { + return fmt.Errorf("could not base64 decode plaintext string %q", d.Plaintext) + } + if len(plainBytes)*8 != bits { + return fmt.Errorf("returned key does not have correct bit length") + } + } + dataKeyInfo["ciphertext"] = d.Ciphertext + return nil + }, + } +} + +func testAccStepDecryptDatakey(t *testing.T, name string, + dataKeyInfo map[string]interface{}, +) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "decrypt/" + name, + Data: dataKeyInfo, + Check: func(resp *logical.Response) error { + var d struct { + Plaintext string `mapstructure:"plaintext"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + + if d.Plaintext != dataKeyInfo["plaintext"].(string) { + return fmt.Errorf("plaintext mismatch: got %q, expected %q, decryptData was %#v", d.Plaintext, dataKeyInfo["plaintext"].(string), resp.Data) + } + return nil + }, + } +} + +func TestKeyUpgrade(t *testing.T) { + key, _ := uuid.GenerateRandomBytes(32) + p := &keysutil.Policy{ + Name: "test", + Key: key, + Type: keysutil.KeyType_AES256_GCM96, + } + + p.MigrateKeyToKeysMap() + + if p.Key != nil || + p.Keys == nil || + len(p.Keys) != 1 || + !reflect.DeepEqual(p.Keys[strconv.Itoa(1)].Key, key) { + t.Errorf("bad key migration, result is %#v", p.Keys) + } +} + +func TestDerivedKeyUpgrade(t *testing.T) { + testDerivedKeyUpgrade(t, keysutil.KeyType_AES256_GCM96) + testDerivedKeyUpgrade(t, keysutil.KeyType_ChaCha20_Poly1305) +} + +func testDerivedKeyUpgrade(t *testing.T, keyType keysutil.KeyType) { + storage := &logical.InmemStorage{} + key, _ := uuid.GenerateRandomBytes(32) + keyContext, _ := uuid.GenerateRandomBytes(32) + + p := &keysutil.Policy{ + Name: "test", + Key: key, + Type: keyType, + Derived: true, + } + + p.MigrateKeyToKeysMap() + p.Upgrade(context.Background(), storage, cryptoRand.Reader) // Need to run the upgrade code to make the migration stick + + if p.KDF != keysutil.Kdf_hmac_sha256_counter { + t.Fatalf("bad KDF value by default; counter val is %d, KDF val is %d, policy is %#v", keysutil.Kdf_hmac_sha256_counter, p.KDF, *p) + } + + derBytesOld, err := p.GetKey(keyContext, 1, 0) + if err != nil { + t.Fatal(err) + } + + derBytesOld2, err := p.GetKey(keyContext, 1, 0) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(derBytesOld, derBytesOld2) { + t.Fatal("mismatch of same context alg") + } + + p.KDF = keysutil.Kdf_hkdf_sha256 + if p.NeedsUpgrade() { + t.Fatal("expected no upgrade needed") + } + + derBytesNew, err := p.GetKey(keyContext, 1, 64) + if err != nil { + t.Fatal(err) + } + + derBytesNew2, err := p.GetKey(keyContext, 1, 64) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(derBytesNew, derBytesNew2) { + t.Fatal("mismatch of same context alg") + } + + if reflect.DeepEqual(derBytesOld, derBytesNew) { + t.Fatal("match of different context alg") + } +} + +func TestConvergentEncryption(t *testing.T) { + testConvergentEncryptionCommon(t, 0, keysutil.KeyType_AES256_GCM96) + testConvergentEncryptionCommon(t, 2, keysutil.KeyType_AES128_GCM96) + testConvergentEncryptionCommon(t, 2, keysutil.KeyType_AES256_GCM96) + testConvergentEncryptionCommon(t, 2, keysutil.KeyType_ChaCha20_Poly1305) + testConvergentEncryptionCommon(t, 3, keysutil.KeyType_AES128_GCM96) + testConvergentEncryptionCommon(t, 3, keysutil.KeyType_AES256_GCM96) + testConvergentEncryptionCommon(t, 3, keysutil.KeyType_ChaCha20_Poly1305) +} + +func testConvergentEncryptionCommon(t *testing.T, ver int, keyType keysutil.KeyType) { + b, storage := createBackendWithSysView(t) + + req := &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "keys/testkeynonderived", + Data: map[string]interface{}{ + "derived": false, + "convergent_encryption": true, + "type": keyType.String(), + }, + } + resp, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if !resp.IsError() { + t.Fatalf("bad: expected error response, got %#v", *resp) + } + + req = &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "keys/testkey", + Data: map[string]interface{}{ + "derived": true, + "convergent_encryption": true, + "type": keyType.String(), + }, + } + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + require.NotNil(t, resp, "expected populated request") + + p, err := keysutil.LoadPolicy(context.Background(), storage, path.Join("policy", "testkey")) + if err != nil { + t.Fatal(err) + } + if p == nil { + t.Fatal("got nil policy") + } + + if ver > 2 { + p.ConvergentVersion = -1 + } else { + p.ConvergentVersion = ver + } + err = p.Persist(context.Background(), storage) + if err != nil { + t.Fatal(err) + } + b.invalidate(context.Background(), "policy/testkey") + + if ver < 3 { + // There will be an embedded key version of 3, so specifically clear it + key := p.Keys[strconv.Itoa(p.LatestVersion)] + key.ConvergentVersion = 0 + p.Keys[strconv.Itoa(p.LatestVersion)] = key + err = p.Persist(context.Background(), storage) + if err != nil { + t.Fatal(err) + } + b.invalidate(context.Background(), "policy/testkey") + + // Verify it + p, err = keysutil.LoadPolicy(context.Background(), storage, path.Join(p.StoragePrefix, "policy", "testkey")) + if err != nil { + t.Fatal(err) + } + if p == nil { + t.Fatal("got nil policy") + } + if p.ConvergentVersion != ver { + t.Fatalf("bad convergent version %d", p.ConvergentVersion) + } + key = p.Keys[strconv.Itoa(p.LatestVersion)] + if key.ConvergentVersion != 0 { + t.Fatalf("bad convergent key version %d", key.ConvergentVersion) + } + } + + // First, test using an invalid length of nonce -- this is only used for v1 convergent + req.Path = "encrypt/testkey" + if ver < 2 { + req.Data = map[string]interface{}{ + "plaintext": "emlwIHphcA==", // "zip zap" + "nonce": "Zm9vIGJhcg==", // "foo bar" + "context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S", + } + resp, err = b.HandleRequest(context.Background(), req) + if err == nil { + t.Fatalf("expected error, got nil, version is %d", ver) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if !resp.IsError() { + t.Fatalf("expected error response, got %#v", *resp) + } + + // Ensure we fail if we do not provide a nonce + req.Data = map[string]interface{}{ + "plaintext": "emlwIHphcA==", // "zip zap" + "context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S", + } + resp, err = b.HandleRequest(context.Background(), req) + if err == nil && (resp == nil || !resp.IsError()) { + t.Fatal("expected error response") + } + } + + // Now test encrypting the same value twice + req.Data = map[string]interface{}{ + "plaintext": "emlwIHphcA==", // "zip zap" + "context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S", + } + if ver == 0 { + req.Data["nonce"] = "b25ldHdvdGhyZWVl" // "onetwothreee" + } + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if resp.IsError() { + t.Fatalf("got error response: %#v", *resp) + } + ciphertext1 := resp.Data["ciphertext"].(string) + + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if resp.IsError() { + t.Fatalf("got error response: %#v", *resp) + } + ciphertext2 := resp.Data["ciphertext"].(string) + + if ciphertext1 != ciphertext2 { + t.Fatalf("expected the same ciphertext but got %s and %s", ciphertext1, ciphertext2) + } + + // For sanity, also check a different nonce value... + req.Data = map[string]interface{}{ + "plaintext": "emlwIHphcA==", // "zip zap" + "context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S", + } + if ver == 0 { + req.Data["nonce"] = "dHdvdGhyZWVmb3Vy" // "twothreefour" + } else { + req.Data["context"] = "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOldandSdd7S" + } + + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if resp.IsError() { + t.Fatalf("got error response: %#v", *resp) + } + ciphertext3 := resp.Data["ciphertext"].(string) + + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if resp.IsError() { + t.Fatalf("got error response: %#v", *resp) + } + ciphertext4 := resp.Data["ciphertext"].(string) + + if ciphertext3 != ciphertext4 { + t.Fatalf("expected the same ciphertext but got %s and %s", ciphertext3, ciphertext4) + } + if ciphertext1 == ciphertext3 { + t.Fatalf("expected different ciphertexts") + } + + // ...and a different context value + req.Data = map[string]interface{}{ + "plaintext": "emlwIHphcA==", // "zip zap" + "context": "qV4h9iQyvn+raODOer4JNAsOhkXBwdT4HZ677Ql4KLqXSU+Jk4C/fXBWbv6xkSYT", + } + if ver == 0 { + req.Data["nonce"] = "dHdvdGhyZWVmb3Vy" // "twothreefour" + } + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if resp.IsError() { + t.Fatalf("got error response: %#v", *resp) + } + ciphertext5 := resp.Data["ciphertext"].(string) + + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if resp.IsError() { + t.Fatalf("got error response: %#v", *resp) + } + ciphertext6 := resp.Data["ciphertext"].(string) + + if ciphertext5 != ciphertext6 { + t.Fatalf("expected the same ciphertext but got %s and %s", ciphertext5, ciphertext6) + } + if ciphertext1 == ciphertext5 { + t.Fatalf("expected different ciphertexts") + } + if ciphertext3 == ciphertext5 { + t.Fatalf("expected different ciphertexts") + } + + // If running version 2, check upgrade handling + if ver == 2 { + curr, err := keysutil.LoadPolicy(context.Background(), storage, path.Join(p.StoragePrefix, "policy", "testkey")) + if err != nil { + t.Fatal(err) + } + if curr == nil { + t.Fatal("got nil policy") + } + if curr.ConvergentVersion != 2 { + t.Fatalf("bad convergent version %d", curr.ConvergentVersion) + } + key := curr.Keys[strconv.Itoa(curr.LatestVersion)] + if key.ConvergentVersion != 0 { + t.Fatalf("bad convergent key version %d", key.ConvergentVersion) + } + + curr.ConvergentVersion = 3 + err = curr.Persist(context.Background(), storage) + if err != nil { + t.Fatal(err) + } + b.invalidate(context.Background(), "policy/testkey") + + // Different algorithm, should be different value + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if resp.IsError() { + t.Fatalf("got error response: %#v", *resp) + } + ciphertext7 := resp.Data["ciphertext"].(string) + + // Now do it via key-specified version + if len(curr.Keys) != 1 { + t.Fatalf("unexpected length of keys %d", len(curr.Keys)) + } + key = curr.Keys[strconv.Itoa(curr.LatestVersion)] + key.ConvergentVersion = 3 + curr.Keys[strconv.Itoa(curr.LatestVersion)] = key + curr.ConvergentVersion = 2 + err = curr.Persist(context.Background(), storage) + if err != nil { + t.Fatal(err) + } + b.invalidate(context.Background(), "policy/testkey") + + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if resp.IsError() { + t.Fatalf("got error response: %#v", *resp) + } + ciphertext8 := resp.Data["ciphertext"].(string) + + if ciphertext7 != ciphertext8 { + t.Fatalf("expected the same ciphertext but got %s and %s", ciphertext7, ciphertext8) + } + if ciphertext6 == ciphertext7 { + t.Fatalf("expected different ciphertexts") + } + if ciphertext3 == ciphertext7 { + t.Fatalf("expected different ciphertexts") + } + } + + // Finally, check operations on empty values + // First, check without setting a plaintext at all + req.Data = map[string]interface{}{ + "context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S", + } + if ver == 0 { + req.Data["nonce"] = "dHdvdGhyZWVmb3Vy" // "twothreefour" + } + resp, err = b.HandleRequest(context.Background(), req) + if err == nil { + t.Fatal("expected error, got nil") + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if !resp.IsError() { + t.Fatalf("expected error response, got: %#v", *resp) + } + + // Now set plaintext to empty + req.Data = map[string]interface{}{ + "plaintext": "", + "context": "pWZ6t/im3AORd0lVYE0zBdKpX6Bl3/SvFtoVTPWbdkzjG788XmMAnOlxandSdd7S", + } + if ver == 0 { + req.Data["nonce"] = "dHdvdGhyZWVmb3Vy" // "twothreefour" + } + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if resp.IsError() { + t.Fatalf("got error response: %#v", *resp) + } + ciphertext7 := resp.Data["ciphertext"].(string) + + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if resp.IsError() { + t.Fatalf("got error response: %#v", *resp) + } + ciphertext8 := resp.Data["ciphertext"].(string) + + if ciphertext7 != ciphertext8 { + t.Fatalf("expected the same ciphertext but got %s and %s", ciphertext7, ciphertext8) + } +} + +func TestPolicyFuzzing(t *testing.T) { + var be *backend + sysView := logical.TestSystemView() + sysView.CachingDisabledVal = true + conf := &logical.BackendConfig{ + System: sysView, + } + + be, _ = Backend(context.Background(), conf) + be.Setup(context.Background(), conf) + testPolicyFuzzingCommon(t, be) + + sysView.CachingDisabledVal = true + be, _ = Backend(context.Background(), conf) + be.Setup(context.Background(), conf) + testPolicyFuzzingCommon(t, be) +} + +func testPolicyFuzzingCommon(t *testing.T, be *backend) { + storage := &logical.InmemStorage{} + wg := sync.WaitGroup{} + + funcs := []string{"encrypt", "decrypt", "rotate", "change_min_version"} + // keys := []string{"test1", "test2", "test3", "test4", "test5"} + keys := []string{"test1", "test2", "test3"} + + // This is the goroutine loop + doFuzzy := func(id int) { + // Check for panics, otherwise notify we're done + defer func() { + wg.Done() + }() + + // Holds the latest encrypted value for each key + latestEncryptedText := map[string]string{} + + startTime := time.Now() + req := &logical.Request{ + Storage: storage, + Data: map[string]interface{}{}, + } + fd := &framework.FieldData{} + + var chosenFunc, chosenKey string + + // t.Errorf("Starting %d", id) + for { + // Stop after 10 seconds + if time.Now().Sub(startTime) > 10*time.Second { + return + } + + // Pick a function and a key + chosenFunc = funcs[rand.Int()%len(funcs)] + chosenKey = keys[rand.Int()%len(keys)] + + fd.Raw = map[string]interface{}{ + "name": chosenKey, + } + fd.Schema = be.pathKeys().Fields + + // Try to write the key to make sure it exists + _, err := be.pathPolicyWrite(context.Background(), req, fd) + if err != nil { + t.Errorf("got an error: %v", err) + } + + switch chosenFunc { + // Encrypt our plaintext and store the result + case "encrypt": + // t.Errorf("%s, %s, %d", chosenFunc, chosenKey, id) + fd.Raw["plaintext"] = base64.StdEncoding.EncodeToString([]byte(testPlaintext)) + fd.Schema = be.pathEncrypt().Fields + resp, err := be.pathEncryptWrite(context.Background(), req, fd) + if err != nil { + t.Errorf("got an error: %v, resp is %#v", err, *resp) + } + latestEncryptedText[chosenKey] = resp.Data["ciphertext"].(string) + + // Rotate to a new key version + case "rotate": + // t.Errorf("%s, %s, %d", chosenFunc, chosenKey, id) + fd.Schema = be.pathRotate().Fields + resp, err := be.pathRotateWrite(context.Background(), req, fd) + if err != nil { + t.Errorf("got an error: %v, resp is %#v, chosenKey is %s", err, *resp, chosenKey) + } + + // Decrypt the ciphertext and compare the result + case "decrypt": + // t.Errorf("%s, %s, %d", chosenFunc, chosenKey, id) + ct := latestEncryptedText[chosenKey] + if ct == "" { + continue + } + + fd.Raw["ciphertext"] = ct + fd.Schema = be.pathDecrypt().Fields + resp, err := be.pathDecryptWrite(context.Background(), req, fd) + if err != nil { + // This could well happen since the min version is jumping around + if resp.Data["error"].(string) == keysutil.ErrTooOld { + continue + } + t.Errorf("got an error: %v, resp is %#v, ciphertext was %s, chosenKey is %s, id is %d", err, *resp, ct, chosenKey, id) + } + ptb64, ok := resp.Data["plaintext"].(string) + if !ok { + t.Errorf("no plaintext found, response was %#v", *resp) + return + } + pt, err := base64.StdEncoding.DecodeString(ptb64) + if err != nil { + t.Errorf("got an error decoding base64 plaintext: %v", err) + return + } + if string(pt) != testPlaintext { + t.Errorf("got bad plaintext back: %s", pt) + } + + // Change the min version, which also tests the archive functionality + case "change_min_version": + // t.Errorf("%s, %s, %d", chosenFunc, chosenKey, id) + resp, err := be.pathPolicyRead(context.Background(), req, fd) + if err != nil { + t.Errorf("got an error reading policy %s: %v", chosenKey, err) + } + latestVersion := resp.Data["latest_version"].(int) + + // keys start at version 1 so we want [1, latestVersion] not [0, latestVersion) + setVersion := (rand.Int() % latestVersion) + 1 + fd.Raw["min_decryption_version"] = setVersion + fd.Schema = be.pathKeysConfig().Fields + resp, err = be.pathKeysConfigWrite(context.Background(), req, fd) + if err != nil { + t.Errorf("got an error setting min decryption version: %v", err) + } + } + } + } + + // Spawn 1000 of these workers for 10 seconds + for i := 0; i < 1000; i++ { + wg.Add(1) + go doFuzzy(i) + } + + // Wait for them all to finish + wg.Wait() +} + +func TestBadInput(t *testing.T) { + b, storage := createBackendWithSysView(t) + + req := &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "keys/test", + } + + resp, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + require.NotNil(t, resp, "expected populated request") + + req.Path = "decrypt/test" + req.Data = map[string]interface{}{ + "ciphertext": "vault:v1:abcd", + } + + _, err = b.HandleRequest(context.Background(), req) + if err == nil { + t.Fatal("expected error") + } +} + +func TestTransit_AutoRotateKeys(t *testing.T) { + tests := map[string]struct { + isDRSecondary bool + isPerfSecondary bool + isStandby bool + isLocal bool + shouldRotate bool + }{ + "primary, no local mount": { + shouldRotate: true, + }, + "DR secondary, no local mount": { + isDRSecondary: true, + shouldRotate: false, + }, + "perf standby, no local mount": { + isStandby: true, + shouldRotate: false, + }, + "perf secondary, no local mount": { + isPerfSecondary: true, + shouldRotate: false, + }, + "perf secondary, local mount": { + isPerfSecondary: true, + isLocal: true, + shouldRotate: true, + }, + } + + for name, test := range tests { + t.Run( + name, + func(t *testing.T) { + var repState consts.ReplicationState + if test.isDRSecondary { + repState.AddState(consts.ReplicationDRSecondary) + } + if test.isPerfSecondary { + repState.AddState(consts.ReplicationPerformanceSecondary) + } + if test.isStandby { + repState.AddState(consts.ReplicationPerformanceStandby) + } + + sysView := logical.TestSystemView() + sysView.ReplicationStateVal = repState + sysView.LocalMountVal = test.isLocal + + storage := &logical.InmemStorage{} + + conf := &logical.BackendConfig{ + StorageView: storage, + System: sysView, + } + + b, _ := Backend(context.Background(), conf) + if b == nil { + t.Fatal("failed to create backend") + } + + err := b.Backend.Setup(context.Background(), conf) + if err != nil { + t.Fatal(err) + } + + // Write a key with the default auto rotate value (0/disabled) + req := &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "keys/test1", + } + resp, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + require.NotNil(t, resp, "expected populated request") + + // Write a key with an auto rotate value one day in the future + req = &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "keys/test2", + Data: map[string]interface{}{ + "auto_rotate_period": 24 * time.Hour, + }, + } + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + require.NotNil(t, resp, "expected populated request") + + // Run the rotation check and ensure none of the keys have rotated + b.checkAutoRotateAfter = time.Now() + if err = b.autoRotateKeys(context.Background(), &logical.Request{Storage: storage}); err != nil { + t.Fatal(err) + } + req = &logical.Request{ + Storage: storage, + Operation: logical.ReadOperation, + Path: "keys/test1", + } + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if resp.Data["latest_version"] != 1 { + t.Fatalf("incorrect latest_version found, got: %d, want: %d", resp.Data["latest_version"], 1) + } + + req.Path = "keys/test2" + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if resp.Data["latest_version"] != 1 { + t.Fatalf("incorrect latest_version found, got: %d, want: %d", resp.Data["latest_version"], 1) + } + + // Update auto rotate period on one key to be one nanosecond + p, _, err := b.GetPolicy(context.Background(), keysutil.PolicyRequest{ + Storage: storage, + Name: "test2", + }, b.GetRandomReader()) + if err != nil { + t.Fatal(err) + } + if p == nil { + t.Fatal("expected non-nil policy") + } + p.AutoRotatePeriod = time.Nanosecond + err = p.Persist(context.Background(), storage) + if err != nil { + t.Fatal(err) + } + + // Run the rotation check and validate the state of key rotations + b.checkAutoRotateAfter = time.Now() + if err = b.autoRotateKeys(context.Background(), &logical.Request{Storage: storage}); err != nil { + t.Fatal(err) + } + req = &logical.Request{ + Storage: storage, + Operation: logical.ReadOperation, + Path: "keys/test1", + } + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if resp.Data["latest_version"] != 1 { + t.Fatalf("incorrect latest_version found, got: %d, want: %d", resp.Data["latest_version"], 1) + } + req.Path = "keys/test2" + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + expectedVersion := 1 + if test.shouldRotate { + expectedVersion = 2 + } + if resp.Data["latest_version"] != expectedVersion { + t.Fatalf("incorrect latest_version found, got: %d, want: %d", resp.Data["latest_version"], expectedVersion) + } + }, + ) + } +} + +func TestTransit_AEAD(t *testing.T) { + testTransit_AEAD(t, "aes128-gcm96") + testTransit_AEAD(t, "aes256-gcm96") + testTransit_AEAD(t, "chacha20-poly1305") +} + +func testTransit_AEAD(t *testing.T, keyType string) { + var resp *logical.Response + var err error + b, storage := createBackendWithStorage(t) + + keyReq := &logical.Request{ + Path: "keys/aead", + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "type": keyType, + }, + Storage: storage, + } + + resp, err = b.HandleRequest(context.Background(), keyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } + + plaintext := "dGhlIHF1aWNrIGJyb3duIGZveA==" // "the quick brown fox" + associated := "U3BoaW54IG9mIGJsYWNrIHF1YXJ0eiwganVkZ2UgbXkgdm93Lgo=" // "Sphinx of black quartz, judge my vow." + + // Basic encrypt/decrypt should work. + encryptReq := &logical.Request{ + Path: "encrypt/aead", + Operation: logical.UpdateOperation, + Storage: storage, + Data: map[string]interface{}{ + "plaintext": plaintext, + }, + } + + resp, err = b.HandleRequest(context.Background(), encryptReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } + + ciphertext1 := resp.Data["ciphertext"].(string) + + decryptReq := &logical.Request{ + Path: "decrypt/aead", + Operation: logical.UpdateOperation, + Storage: storage, + Data: map[string]interface{}{ + "ciphertext": ciphertext1, + }, + } + + resp, err = b.HandleRequest(context.Background(), decryptReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } + + decryptedPlaintext := resp.Data["plaintext"] + + if plaintext != decryptedPlaintext { + t.Fatalf("bad: plaintext; expected: %q\nactual: %q", plaintext, decryptedPlaintext) + } + + // Using associated as ciphertext should fail. + decryptReq.Data["ciphertext"] = associated + resp, err = b.HandleRequest(context.Background(), decryptReq) + if err == nil || (resp != nil && !resp.IsError()) { + t.Fatalf("bad expected error: err: %v\nresp: %#v", err, resp) + } + + // Redoing the above with additional data should work. + encryptReq.Data["associated_data"] = associated + resp, err = b.HandleRequest(context.Background(), encryptReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } + + ciphertext2 := resp.Data["ciphertext"].(string) + decryptReq.Data["ciphertext"] = ciphertext2 + decryptReq.Data["associated_data"] = associated + + resp, err = b.HandleRequest(context.Background(), decryptReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("bad: err: %v\nresp: %#v", err, resp) + } + + decryptedPlaintext = resp.Data["plaintext"] + if plaintext != decryptedPlaintext { + t.Fatalf("bad: plaintext; expected: %q\nactual: %q", plaintext, decryptedPlaintext) + } + + // Removing the associated_data should break the decryption. + decryptReq.Data = map[string]interface{}{ + "ciphertext": ciphertext2, + } + resp, err = b.HandleRequest(context.Background(), decryptReq) + if err == nil || (resp != nil && !resp.IsError()) { + t.Fatalf("bad expected error: err: %v\nresp: %#v", err, resp) + } + + // Using a valid ciphertext with associated_data should also break the + // decryption. + decryptReq.Data["ciphertext"] = ciphertext1 + decryptReq.Data["associated_data"] = associated + resp, err = b.HandleRequest(context.Background(), decryptReq) + if err == nil || (resp != nil && !resp.IsError()) { + t.Fatalf("bad expected error: err: %v\nresp: %#v", err, resp) + } +} + +// Hack: use Transit as a signer. +type transitKey struct { + public any + mount string + name string + t *testing.T + client *api.Client +} + +func (k *transitKey) Public() crypto.PublicKey { + return k.public +} + +func (k *transitKey) Sign(_ io.Reader, digest []byte, opts crypto.SignerOpts) (signature []byte, err error) { + hash := opts.(crypto.Hash) + if hash.String() != "SHA-256" { + return nil, fmt.Errorf("unknown hash algorithm: %v", opts) + } + + resp, err := k.client.Logical().Write(k.mount+"/sign/"+k.name, map[string]interface{}{ + "hash_algorithm": "sha2-256", + "input": base64.StdEncoding.EncodeToString(digest), + "prehashed": true, + "signature_algorithm": "pkcs1v15", + }) + if err != nil { + return nil, fmt.Errorf("failed to sign data: %w", err) + } + require.NotNil(k.t, resp) + require.NotNil(k.t, resp.Data) + require.NotNil(k.t, resp.Data["signature"]) + rawSig := resp.Data["signature"].(string) + sigParts := strings.Split(rawSig, ":") + + decoded, err := base64.StdEncoding.DecodeString(sigParts[2]) + if err != nil { + return nil, fmt.Errorf("failed to decode signature (%v): %w", rawSig, err) + } + + return decoded, nil +} + +func TestTransitPKICSR(t *testing.T) { + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "transit": Factory, + "pki": pki.Factory, + }, + } + + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + cores := cluster.Cores + + vault.TestWaitActive(t, cores[0].Core) + + client := cores[0].Client + + // Mount transit, write a key. + err := client.Sys().Mount("transit", &api.MountInput{ + Type: "transit", + }) + require.NoError(t, err) + + _, err = client.Logical().Write("transit/keys/leaf", map[string]interface{}{ + "type": "rsa-2048", + }) + require.NoError(t, err) + + resp, err := client.Logical().Read("transit/keys/leaf") + require.NoError(t, err) + require.NotNil(t, resp) + + keys := resp.Data["keys"].(map[string]interface{}) + require.NotNil(t, keys) + keyData := keys["1"].(map[string]interface{}) + require.NotNil(t, keyData) + keyPublic := keyData["public_key"].(string) + require.NotEmpty(t, keyPublic) + + pemBlock, _ := pem.Decode([]byte(keyPublic)) + require.NotNil(t, pemBlock) + pubKey, err := x509.ParsePKIXPublicKey(pemBlock.Bytes) + require.NoError(t, err) + require.NotNil(t, pubKey) + + // Setup a new CSR... + var reqTemplate x509.CertificateRequest + reqTemplate.PublicKey = pubKey + reqTemplate.PublicKeyAlgorithm = x509.RSA + reqTemplate.Subject.CommonName = "dadgarcorp.com" + + var k transitKey + k.public = pubKey + k.mount = "transit" + k.name = "leaf" + k.t = t + k.client = client + + req, err := x509.CreateCertificateRequest(cryptoRand.Reader, &reqTemplate, &k) + require.NoError(t, err) + require.NotNil(t, req) + + reqPEM := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE REQUEST", + Bytes: req, + }) + t.Logf("csr: %v", string(reqPEM)) + + // Mount PKI, generate a root, sign this CSR. + err = client.Sys().Mount("pki", &api.MountInput{ + Type: "pki", + }) + require.NoError(t, err) + + resp, err = client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ + "common_name": "PKI Root X1", + }) + require.NoError(t, err) + require.NotNil(t, resp) + rootCertPEM := resp.Data["certificate"].(string) + + pemBlock, _ = pem.Decode([]byte(rootCertPEM)) + require.NotNil(t, pemBlock) + + rootCert, err := x509.ParseCertificate(pemBlock.Bytes) + require.NoError(t, err) + + resp, err = client.Logical().Write("pki/issuer/default/sign-verbatim", map[string]interface{}{ + "csr": string(reqPEM), + "ttl": "10m", + }) + require.NoError(t, err) + require.NotNil(t, resp) + + leafCertPEM := resp.Data["certificate"].(string) + pemBlock, _ = pem.Decode([]byte(leafCertPEM)) + require.NotNil(t, pemBlock) + + leafCert, err := x509.ParseCertificate(pemBlock.Bytes) + require.NoError(t, err) + require.NoError(t, leafCert.CheckSignatureFrom(rootCert)) + t.Logf("root: %v", rootCertPEM) + t.Logf("leaf: %v", leafCertPEM) +} + +func TestTransit_ReadPublicKeyImported(t *testing.T) { + testTransit_ReadPublicKeyImported(t, "rsa-2048") + testTransit_ReadPublicKeyImported(t, "ecdsa-p256") + testTransit_ReadPublicKeyImported(t, "ed25519") +} + +func testTransit_ReadPublicKeyImported(t *testing.T, keyType string) { + generateKeys(t) + b, s := createBackendWithStorage(t) + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + + // Get key + privateKey := getKey(t, keyType) + publicKeyBytes, err := getPublicKey(privateKey, keyType) + if err != nil { + t.Fatalf("failed to extract the public key: %s", err) + } + + // Import key + importReq := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", keyID), + Data: map[string]interface{}{ + "public_key": publicKeyBytes, + "type": keyType, + }, + } + importResp, err := b.HandleRequest(context.Background(), importReq) + if err != nil || (importResp != nil && importResp.IsError()) { + t.Fatalf("failed to import public key. err: %s\nresp: %#v", err, importResp) + } + + // Read key + readReq := &logical.Request{ + Operation: logical.ReadOperation, + Path: "keys/" + keyID, + Storage: s, + } + + readResp, err := b.HandleRequest(context.Background(), readReq) + if err != nil || (readResp != nil && readResp.IsError()) { + t.Fatalf("failed to read key. err: %s\nresp: %#v", err, readResp) + } +} + +func TestTransit_SignWithImportedPublicKey(t *testing.T) { + testTransit_SignWithImportedPublicKey(t, "rsa-2048") + testTransit_SignWithImportedPublicKey(t, "ecdsa-p256") + testTransit_SignWithImportedPublicKey(t, "ed25519") +} + +func testTransit_SignWithImportedPublicKey(t *testing.T, keyType string) { + generateKeys(t) + b, s := createBackendWithStorage(t) + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + + // Get key + privateKey := getKey(t, keyType) + publicKeyBytes, err := getPublicKey(privateKey, keyType) + if err != nil { + t.Fatalf("failed to extract the public key: %s", err) + } + + // Import key + importReq := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", keyID), + Data: map[string]interface{}{ + "public_key": publicKeyBytes, + "type": keyType, + }, + } + importResp, err := b.HandleRequest(context.Background(), importReq) + if err != nil || (importResp != nil && importResp.IsError()) { + t.Fatalf("failed to import public key. err: %s\nresp: %#v", err, importResp) + } + + // Sign text + signReq := &logical.Request{ + Path: "sign/" + keyID, + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "plaintext": base64.StdEncoding.EncodeToString([]byte(testPlaintext)), + }, + } + + _, err = b.HandleRequest(context.Background(), signReq) + if err == nil { + t.Fatalf("expected error, should have failed to sign input") + } +} + +func TestTransit_VerifyWithImportedPublicKey(t *testing.T) { + generateKeys(t) + keyType := "rsa-2048" + b, s := createBackendWithStorage(t) + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + + // Get key + privateKey := getKey(t, keyType) + publicKeyBytes, err := getPublicKey(privateKey, keyType) + if err != nil { + t.Fatal(err) + } + + // Retrieve public wrapping key + wrappingKey, err := b.getWrappingKey(context.Background(), s) + if err != nil || wrappingKey == nil { + t.Fatalf("failed to retrieve public wrapping key: %s", err) + } + + privWrappingKey := wrappingKey.Keys[strconv.Itoa(wrappingKey.LatestVersion)].RSAKey + pubWrappingKey := &privWrappingKey.PublicKey + + // generate ciphertext + importBlob := wrapTargetKeyForImport(t, pubWrappingKey, privateKey, keyType, "SHA256") + + // Import private key + importReq := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", keyID), + Data: map[string]interface{}{ + "ciphertext": importBlob, + "type": keyType, + }, + } + importResp, err := b.HandleRequest(context.Background(), importReq) + if err != nil || (importResp != nil && importResp.IsError()) { + t.Fatalf("failed to import key. err: %s\nresp: %#v", err, importResp) + } + + // Sign text + signReq := &logical.Request{ + Storage: s, + Path: "sign/" + keyID, + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "plaintext": base64.StdEncoding.EncodeToString([]byte(testPlaintext)), + }, + } + + signResp, err := b.HandleRequest(context.Background(), signReq) + if err != nil || (signResp != nil && signResp.IsError()) { + t.Fatalf("failed to sign plaintext. err: %s\nresp: %#v", err, signResp) + } + + // Get signature + signature := signResp.Data["signature"].(string) + + // Import new key as public key + importPubReq := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", "public-key-rsa"), + Data: map[string]interface{}{ + "public_key": publicKeyBytes, + "type": keyType, + }, + } + importPubResp, err := b.HandleRequest(context.Background(), importPubReq) + if err != nil || (importPubResp != nil && importPubResp.IsError()) { + t.Fatalf("failed to import public key. err: %s\nresp: %#v", err, importPubResp) + } + + // Verify signed text + verifyReq := &logical.Request{ + Path: "verify/public-key-rsa", + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "input": base64.StdEncoding.EncodeToString([]byte(testPlaintext)), + "signature": signature, + }, + } + + verifyResp, err := b.HandleRequest(context.Background(), verifyReq) + if err != nil || (importResp != nil && verifyResp.IsError()) { + t.Fatalf("failed to verify signed data. err: %s\nresp: %#v", err, importResp) + } +} + +func TestTransit_ExportPublicKeyImported(t *testing.T) { + testTransit_ExportPublicKeyImported(t, "rsa-2048") + testTransit_ExportPublicKeyImported(t, "ecdsa-p256") + testTransit_ExportPublicKeyImported(t, "ed25519") +} + +func testTransit_ExportPublicKeyImported(t *testing.T, keyType string) { + generateKeys(t) + b, s := createBackendWithStorage(t) + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + + // Get key + privateKey := getKey(t, keyType) + publicKeyBytes, err := getPublicKey(privateKey, keyType) + if err != nil { + t.Fatalf("failed to extract the public key: %s", err) + } + + t.Logf("generated key: %v", string(publicKeyBytes)) + + // Import key + importReq := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", keyID), + Data: map[string]interface{}{ + "public_key": publicKeyBytes, + "type": keyType, + "exportable": true, + }, + } + importResp, err := b.HandleRequest(context.Background(), importReq) + if err != nil || (importResp != nil && importResp.IsError()) { + t.Fatalf("failed to import public key. err: %s\nresp: %#v", err, importResp) + } + + t.Logf("importing key: %v", importResp) + + // Export key + exportReq := &logical.Request{ + Operation: logical.ReadOperation, + Path: fmt.Sprintf("export/public-key/%s/latest", keyID), + Storage: s, + } + + exportResp, err := b.HandleRequest(context.Background(), exportReq) + if err != nil || (exportResp != nil && exportResp.IsError()) { + t.Fatalf("failed to export key. err: %v\nresp: %#v", err, exportResp) + } + + t.Logf("exporting key: %v", exportResp) + + responseKeys, exist := exportResp.Data["keys"] + if !exist { + t.Fatal("expected response data to hold a 'keys' field") + } + + exportedKeyBytes := responseKeys.(map[string]string)["1"] + + if keyType != "ed25519" { + exportedKeyBlock, _ := pem.Decode([]byte(exportedKeyBytes)) + publicKeyBlock, _ := pem.Decode(publicKeyBytes) + + if !reflect.DeepEqual(publicKeyBlock.Bytes, exportedKeyBlock.Bytes) { + t.Fatalf("exported key bytes should have matched with imported key for key type: %v\nexported: %v\nimported: %v", keyType, exportedKeyBlock.Bytes, publicKeyBlock.Bytes) + } + } else { + exportedKey, err := base64.StdEncoding.DecodeString(exportedKeyBytes) + if err != nil { + t.Fatalf("error decoding exported key bytes (%v) to base64 for key type %v: %v", exportedKeyBytes, keyType, err) + } + + publicKeyBlock, _ := pem.Decode(publicKeyBytes) + publicKeyParsed, err := x509.ParsePKIXPublicKey(publicKeyBlock.Bytes) + if err != nil { + t.Fatalf("error decoding source key bytes (%v) from PKIX marshaling for key type %v: %v", publicKeyBlock.Bytes, keyType, err) + } + + if !reflect.DeepEqual([]byte(publicKeyParsed.(ed25519.PublicKey)), exportedKey) { + t.Fatalf("exported key bytes should have matched with imported key for key type: %v\nexported: %v\nimported: %v", keyType, exportedKey, publicKeyParsed) + } + } +} diff --git a/builtin/logical/transit/cmd/transit/main.go b/builtin/logical/transit/cmd/transit/main.go new file mode 100644 index 0000000..7e2ae87 --- /dev/null +++ b/builtin/logical/transit/cmd/transit/main.go @@ -0,0 +1,34 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "os" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/transit" + "github.com/hashicorp/vault/sdk/plugin" +) + +func main() { + apiClientMeta := &api.PluginAPIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(os.Args[1:]) + + tlsConfig := apiClientMeta.GetTLSConfig() + tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) + + if err := plugin.ServeMultiplex(&plugin.ServeOpts{ + BackendFactoryFunc: transit.Factory, + // set the TLSProviderFunc so that the plugin maintains backwards + // compatibility with Vault versions that don’t support plugin AutoMTLS + TLSProviderFunc: tlsProviderFunc, + }); err != nil { + logger := hclog.New(&hclog.LoggerOptions{}) + + logger.Error("plugin shutting down", "error", err) + os.Exit(1) + } +} diff --git a/builtin/logical/transit/managed_key_util.go b/builtin/logical/transit/managed_key_util.go new file mode 100644 index 0000000..c4dc1e9 --- /dev/null +++ b/builtin/logical/transit/managed_key_util.go @@ -0,0 +1,17 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !enterprise + +package transit + +import ( + "context" + "errors" +) + +var errEntOnly = errors.New("managed keys are supported within enterprise edition only") + +func GetManagedKeyUUID(ctx context.Context, b *backend, keyName string, keyId string) (uuid string, err error) { + return "", errEntOnly +} diff --git a/builtin/logical/transit/path_backup.go b/builtin/logical/transit/path_backup.go new file mode 100644 index 0000000..9383342 --- /dev/null +++ b/builtin/logical/transit/path_backup.go @@ -0,0 +1,55 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit + +import ( + "context" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func (b *backend) pathBackup() *framework.Path { + return &framework.Path{ + Pattern: "backup/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "back-up", + OperationSuffix: "key", + }, + + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the key", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathBackupRead, + }, + + HelpSynopsis: pathBackupHelpSyn, + HelpDescription: pathBackupHelpDesc, + } +} + +func (b *backend) pathBackupRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + backup, err := b.lm.BackupPolicy(ctx, req.Storage, d.Get("name").(string)) + if err != nil { + return nil, err + } + + return &logical.Response{ + Data: map[string]interface{}{ + "backup": backup, + }, + }, nil +} + +const ( + pathBackupHelpSyn = `Backup the named key` + pathBackupHelpDesc = `This path is used to backup the named key.` +) diff --git a/builtin/logical/transit/path_backup_test.go b/builtin/logical/transit/path_backup_test.go new file mode 100644 index 0000000..3627d6b --- /dev/null +++ b/builtin/logical/transit/path_backup_test.go @@ -0,0 +1,258 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit + +import ( + "context" + "testing" + + "github.com/hashicorp/vault/sdk/logical" +) + +func TestTransit_BackupRestore(t *testing.T) { + // Test encryption/decryption after a restore for supported keys + testBackupRestore(t, "aes128-gcm96", "encrypt-decrypt") + testBackupRestore(t, "aes256-gcm96", "encrypt-decrypt") + testBackupRestore(t, "chacha20-poly1305", "encrypt-decrypt") + testBackupRestore(t, "rsa-2048", "encrypt-decrypt") + testBackupRestore(t, "rsa-3072", "encrypt-decrypt") + testBackupRestore(t, "rsa-4096", "encrypt-decrypt") + + // Test signing/verification after a restore for supported keys + testBackupRestore(t, "ecdsa-p256", "sign-verify") + testBackupRestore(t, "ecdsa-p384", "sign-verify") + testBackupRestore(t, "ecdsa-p521", "sign-verify") + testBackupRestore(t, "ed25519", "sign-verify") + testBackupRestore(t, "rsa-2048", "sign-verify") + testBackupRestore(t, "rsa-3072", "sign-verify") + testBackupRestore(t, "rsa-4096", "sign-verify") + + // Test HMAC/verification after a restore for all key types + testBackupRestore(t, "aes128-gcm96", "hmac-verify") + testBackupRestore(t, "aes256-gcm96", "hmac-verify") + testBackupRestore(t, "chacha20-poly1305", "hmac-verify") + testBackupRestore(t, "ecdsa-p256", "hmac-verify") + testBackupRestore(t, "ecdsa-p384", "hmac-verify") + testBackupRestore(t, "ecdsa-p521", "hmac-verify") + testBackupRestore(t, "ed25519", "hmac-verify") + testBackupRestore(t, "rsa-2048", "hmac-verify") + testBackupRestore(t, "rsa-3072", "hmac-verify") + testBackupRestore(t, "rsa-4096", "hmac-verify") + testBackupRestore(t, "hmac", "hmac-verify") +} + +func testBackupRestore(t *testing.T, keyType, feature string) { + var resp *logical.Response + var err error + + b, s := createBackendWithStorage(t) + + // Create a key + keyReq := &logical.Request{ + Path: "keys/test", + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "type": keyType, + "exportable": true, + }, + } + if keyType == "hmac" { + keyReq.Data["key_size"] = 32 + } + resp, err = b.HandleRequest(context.Background(), keyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + + // Configure the key to allow its deletion + configReq := &logical.Request{ + Path: "keys/test/config", + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "deletion_allowed": true, + "allow_plaintext_backup": true, + }, + } + resp, err = b.HandleRequest(context.Background(), configReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + + // Take a backup of the key + backupReq := &logical.Request{ + Path: "backup/test", + Operation: logical.ReadOperation, + Storage: s, + } + resp, err = b.HandleRequest(context.Background(), backupReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + backup := resp.Data["backup"] + + // Try to restore the key without deleting it. Expect error due to + // conflicting key names. + restoreReq := &logical.Request{ + Path: "restore", + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "backup": backup, + }, + } + resp, err = b.HandleRequest(context.Background(), restoreReq) + if resp != nil && resp.IsError() { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + if err == nil { + t.Fatalf("expected an error") + } + + plaintextB64 := "dGhlIHF1aWNrIGJyb3duIGZveA==" // "the quick brown fox" + + // Perform encryption, signing or hmac-ing based on the set 'feature' + var encryptReq, signReq, hmacReq *logical.Request + var ciphertext, signature, hmac string + switch feature { + case "encrypt-decrypt": + encryptReq = &logical.Request{ + Path: "encrypt/test", + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "plaintext": plaintextB64, + }, + } + resp, err = b.HandleRequest(context.Background(), encryptReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + ciphertext = resp.Data["ciphertext"].(string) + + case "sign-verify": + signReq = &logical.Request{ + Path: "sign/test", + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "input": plaintextB64, + }, + } + resp, err = b.HandleRequest(context.Background(), signReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + signature = resp.Data["signature"].(string) + + case "hmac-verify": + hmacReq = &logical.Request{ + Path: "hmac/test", + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "input": plaintextB64, + }, + } + resp, err = b.HandleRequest(context.Background(), hmacReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + hmac = resp.Data["hmac"].(string) + } + + // Delete the key + keyReq.Operation = logical.DeleteOperation + resp, err = b.HandleRequest(context.Background(), keyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + + // Restore the key from the backup + resp, err = b.HandleRequest(context.Background(), restoreReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + + // validationFunc verifies the ciphertext, signature or hmac based on the + // set 'feature' + validationFunc := func(keyName string) { + var decryptReq *logical.Request + var verifyReq *logical.Request + switch feature { + case "encrypt-decrypt": + decryptReq = &logical.Request{ + Path: "decrypt/" + keyName, + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "ciphertext": ciphertext, + }, + } + resp, err = b.HandleRequest(context.Background(), decryptReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + + if resp.Data["plaintext"].(string) != plaintextB64 { + t.Fatalf("bad: plaintext; expected: %q, actual: %q", plaintextB64, resp.Data["plaintext"].(string)) + } + case "sign-verify": + verifyReq = &logical.Request{ + Path: "verify/" + keyName, + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "signature": signature, + "input": plaintextB64, + }, + } + resp, err = b.HandleRequest(context.Background(), verifyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + if resp.Data["valid"].(bool) != true { + t.Fatalf("bad: signature verification failed for key type %q", keyType) + } + + case "hmac-verify": + verifyReq = &logical.Request{ + Path: "verify/" + keyName, + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "hmac": hmac, + "input": plaintextB64, + }, + } + resp, err = b.HandleRequest(context.Background(), verifyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + if resp.Data["valid"].(bool) != true { + t.Fatalf("bad: HMAC verification failed for key type %q", keyType) + } + } + } + + // Ensure that the restored key is functional + validationFunc("test") + + // Delete the key again + resp, err = b.HandleRequest(context.Background(), keyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + + // Restore the key under a different name + restoreReq.Path = "restore/test1" + resp, err = b.HandleRequest(context.Background(), restoreReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + + // Ensure that the restored key is functional + validationFunc("test1") +} diff --git a/builtin/logical/transit/path_byok.go b/builtin/logical/transit/path_byok.go new file mode 100644 index 0000000..40f7cac --- /dev/null +++ b/builtin/logical/transit/path_byok.go @@ -0,0 +1,206 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit + +import ( + "context" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "errors" + "fmt" + "strconv" + "strings" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/keysutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func (b *backend) pathBYOKExportKeys() *framework.Path { + return &framework.Path{ + Pattern: "byok-export/" + framework.GenericNameRegex("destination") + "/" + framework.GenericNameRegex("source") + framework.OptionalParamRegex("version"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "byok", + OperationSuffix: "key|key-version", + }, + + Fields: map[string]*framework.FieldSchema{ + "destination": { + Type: framework.TypeString, + Description: "Destination key to export to; usually the public wrapping key of another Transit instance.", + }, + "source": { + Type: framework.TypeString, + Description: "Source key to export; could be any present key within Transit.", + }, + "version": { + Type: framework.TypeString, + Description: "Optional version of the key to export, else all key versions are exported.", + }, + "hash": { + Type: framework.TypeString, + Description: "Hash function to use for inner OAEP encryption. Defaults to SHA256.", + Default: "SHA256", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathPolicyBYOKExportRead, + }, + + HelpSynopsis: pathBYOKExportHelpSyn, + HelpDescription: pathBYOKExportHelpDesc, + } +} + +func (b *backend) pathPolicyBYOKExportRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + dst := d.Get("destination").(string) + src := d.Get("source").(string) + version := d.Get("version").(string) + hash := d.Get("hash").(string) + + dstP, _, err := b.GetPolicy(ctx, keysutil.PolicyRequest{ + Storage: req.Storage, + Name: dst, + }, b.GetRandomReader()) + if err != nil { + return nil, err + } + if dstP == nil { + return nil, fmt.Errorf("no such destination key to export to") + } + if !b.System().CachingDisabled() { + dstP.Lock(false) + } + defer dstP.Unlock() + + srcP, _, err := b.GetPolicy(ctx, keysutil.PolicyRequest{ + Storage: req.Storage, + Name: src, + }, b.GetRandomReader()) + if err != nil { + return nil, err + } + if srcP == nil { + return nil, fmt.Errorf("no such source key for export") + } + if !b.System().CachingDisabled() { + srcP.Lock(false) + } + defer srcP.Unlock() + + if !srcP.Exportable { + return logical.ErrorResponse("key is not exportable"), nil + } + + retKeys := map[string]string{} + switch version { + case "": + for k, v := range srcP.Keys { + exportKey, err := getBYOKExportKey(dstP, srcP, &v, hash) + if err != nil { + return nil, err + } + retKeys[k] = exportKey + } + + default: + var versionValue int + if version == "latest" { + versionValue = srcP.LatestVersion + } else { + version = strings.TrimPrefix(version, "v") + versionValue, err = strconv.Atoi(version) + if err != nil { + return logical.ErrorResponse("invalid key version"), logical.ErrInvalidRequest + } + } + + if versionValue < srcP.MinDecryptionVersion { + return logical.ErrorResponse("version for export is below minimum decryption version"), logical.ErrInvalidRequest + } + key, ok := srcP.Keys[strconv.Itoa(versionValue)] + if !ok { + return logical.ErrorResponse("version does not exist or cannot be found"), logical.ErrInvalidRequest + } + + exportKey, err := getBYOKExportKey(dstP, srcP, &key, hash) + if err != nil { + return nil, err + } + + retKeys[strconv.Itoa(versionValue)] = exportKey + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "name": srcP.Name, + "type": srcP.Type.String(), + "keys": retKeys, + }, + } + + return resp, nil +} + +func getBYOKExportKey(dstP *keysutil.Policy, srcP *keysutil.Policy, key *keysutil.KeyEntry, hash string) (string, error) { + if dstP == nil || srcP == nil { + return "", errors.New("nil policy provided") + } + + var targetKey interface{} + switch srcP.Type { + case keysutil.KeyType_AES128_GCM96, keysutil.KeyType_AES256_GCM96, keysutil.KeyType_ChaCha20_Poly1305, keysutil.KeyType_HMAC: + targetKey = key.Key + case keysutil.KeyType_RSA2048, keysutil.KeyType_RSA3072, keysutil.KeyType_RSA4096: + targetKey = key.RSAKey + case keysutil.KeyType_ECDSA_P256, keysutil.KeyType_ECDSA_P384, keysutil.KeyType_ECDSA_P521: + var curve elliptic.Curve + switch srcP.Type { + case keysutil.KeyType_ECDSA_P384: + curve = elliptic.P384() + case keysutil.KeyType_ECDSA_P521: + curve = elliptic.P521() + default: + curve = elliptic.P256() + } + pubKey := ecdsa.PublicKey{ + Curve: curve, + X: key.EC_X, + Y: key.EC_Y, + } + targetKey = &ecdsa.PrivateKey{ + PublicKey: pubKey, + D: key.EC_D, + } + case keysutil.KeyType_ED25519: + targetKey = ed25519.PrivateKey(key.Key) + default: + return "", fmt.Errorf("unable to export to unknown key type: %v", srcP.Type) + } + + hasher, err := parseHashFn(hash) + if err != nil { + return "", err + } + + return dstP.WrapKey(0, targetKey, srcP.Type, hasher) +} + +const pathBYOKExportHelpSyn = `Securely export named encryption or signing key` + +const pathBYOKExportHelpDesc = ` +This path is used to export the named keys that are configured as +exportable. + +Unlike the regular /export/:name[/:version] paths, this path uses +the same encryption specification /import, allowing secure migration +of keys between clusters to enable workloads to communicate between +them. + +Presently this only works for RSA destination keys. +` diff --git a/builtin/logical/transit/path_byok_test.go b/builtin/logical/transit/path_byok_test.go new file mode 100644 index 0000000..a05a719 --- /dev/null +++ b/builtin/logical/transit/path_byok_test.go @@ -0,0 +1,229 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit + +import ( + "context" + "testing" + + "github.com/hashicorp/vault/sdk/logical" +) + +func TestTransit_BYOKExportImport(t *testing.T) { + // Test encryption/decryption after a restore for supported keys + testBYOKExportImport(t, "aes128-gcm96", "encrypt-decrypt") + testBYOKExportImport(t, "aes256-gcm96", "encrypt-decrypt") + testBYOKExportImport(t, "chacha20-poly1305", "encrypt-decrypt") + testBYOKExportImport(t, "rsa-2048", "encrypt-decrypt") + testBYOKExportImport(t, "rsa-3072", "encrypt-decrypt") + testBYOKExportImport(t, "rsa-4096", "encrypt-decrypt") + + // Test signing/verification after a restore for supported keys + testBYOKExportImport(t, "ecdsa-p256", "sign-verify") + testBYOKExportImport(t, "ecdsa-p384", "sign-verify") + testBYOKExportImport(t, "ecdsa-p521", "sign-verify") + testBYOKExportImport(t, "ed25519", "sign-verify") + testBYOKExportImport(t, "rsa-2048", "sign-verify") + testBYOKExportImport(t, "rsa-3072", "sign-verify") + testBYOKExportImport(t, "rsa-4096", "sign-verify") + + // Test HMAC sign/verify after a restore for supported keys. + testBYOKExportImport(t, "hmac", "hmac-verify") +} + +func testBYOKExportImport(t *testing.T, keyType, feature string) { + var resp *logical.Response + var err error + + b, s := createBackendWithStorage(t) + + // Create a key + keyReq := &logical.Request{ + Path: "keys/test-source", + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "type": keyType, + "exportable": true, + }, + } + if keyType == "hmac" { + keyReq.Data["key_size"] = 32 + } + resp, err = b.HandleRequest(context.Background(), keyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + + // Read the wrapping key. + wrapKeyReq := &logical.Request{ + Path: "wrapping_key", + Operation: logical.ReadOperation, + Storage: s, + } + resp, err = b.HandleRequest(context.Background(), wrapKeyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + + // Import the wrapping key. + wrapKeyImportReq := &logical.Request{ + Path: "keys/wrapper/import", + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "public_key": resp.Data["public_key"], + "type": "rsa-4096", + }, + } + resp, err = b.HandleRequest(context.Background(), wrapKeyImportReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + + // Export the key + backupReq := &logical.Request{ + Path: "byok-export/wrapper/test-source", + Operation: logical.ReadOperation, + Storage: s, + } + resp, err = b.HandleRequest(context.Background(), backupReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + keys := resp.Data["keys"].(map[string]string) + + // Import the key to a new name. + restoreReq := &logical.Request{ + Path: "keys/test/import", + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "ciphertext": keys["1"], + "type": keyType, + }, + } + resp, err = b.HandleRequest(context.Background(), restoreReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + + plaintextB64 := "dGhlIHF1aWNrIGJyb3duIGZveA==" // "the quick brown fox" + // Perform encryption, signing or hmac-ing based on the set 'feature' + var encryptReq, signReq, hmacReq *logical.Request + var ciphertext, signature, hmac string + switch feature { + case "encrypt-decrypt": + encryptReq = &logical.Request{ + Path: "encrypt/test-source", + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "plaintext": plaintextB64, + }, + } + resp, err = b.HandleRequest(context.Background(), encryptReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + ciphertext = resp.Data["ciphertext"].(string) + + case "sign-verify": + signReq = &logical.Request{ + Path: "sign/test-source", + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "input": plaintextB64, + }, + } + resp, err = b.HandleRequest(context.Background(), signReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + signature = resp.Data["signature"].(string) + + case "hmac-verify": + hmacReq = &logical.Request{ + Path: "hmac/test-source", + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "input": plaintextB64, + }, + } + resp, err = b.HandleRequest(context.Background(), hmacReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + hmac = resp.Data["hmac"].(string) + } + + // validationFunc verifies the ciphertext, signature or hmac based on the + // set 'feature' + validationFunc := func(keyName string) { + var decryptReq *logical.Request + var verifyReq *logical.Request + switch feature { + case "encrypt-decrypt": + decryptReq = &logical.Request{ + Path: "decrypt/" + keyName, + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "ciphertext": ciphertext, + }, + } + resp, err = b.HandleRequest(context.Background(), decryptReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + + if resp.Data["plaintext"].(string) != plaintextB64 { + t.Fatalf("bad: plaintext; expected: %q, actual: %q", plaintextB64, resp.Data["plaintext"].(string)) + } + case "sign-verify": + verifyReq = &logical.Request{ + Path: "verify/" + keyName, + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "signature": signature, + "input": plaintextB64, + }, + } + resp, err = b.HandleRequest(context.Background(), verifyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + if resp.Data["valid"].(bool) != true { + t.Fatalf("bad: signature verification failed for key type %q", keyType) + } + + case "hmac-verify": + verifyReq = &logical.Request{ + Path: "verify/" + keyName, + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "hmac": hmac, + "input": plaintextB64, + }, + } + resp, err = b.HandleRequest(context.Background(), verifyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + if resp.Data["valid"].(bool) != true { + t.Fatalf("bad: HMAC verification failed for key type %q", keyType) + } + } + } + + // Ensure that the restored key is functional + validationFunc("test") + + // Ensure the original key is functional + validationFunc("test-source") +} diff --git a/builtin/logical/transit/path_cache_config.go b/builtin/logical/transit/path_cache_config.go new file mode 100644 index 0000000..f8f0cea --- /dev/null +++ b/builtin/logical/transit/path_cache_config.go @@ -0,0 +1,133 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit + +import ( + "context" + "errors" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func (b *backend) pathCacheConfig() *framework.Path { + return &framework.Path{ + Pattern: "cache-config", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + }, + + Fields: map[string]*framework.FieldSchema{ + "size": { + Type: framework.TypeInt, + Required: false, + Default: 0, + Description: `Size of cache, use 0 for an unlimited cache size, defaults to 0`, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathCacheConfigRead, + Summary: "Returns the size of the active cache", + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "cache-configuration", + }, + }, + + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathCacheConfigWrite, + Summary: "Configures a new cache of the specified size", + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "cache", + }, + }, + + logical.CreateOperation: &framework.PathOperation{ + Callback: b.pathCacheConfigWrite, + Summary: "Configures a new cache of the specified size", + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "cache", + }, + }, + }, + + HelpSynopsis: pathCacheConfigHelpSyn, + HelpDescription: pathCacheConfigHelpDesc, + } +} + +func (b *backend) pathCacheConfigWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + // get target size + cacheSize := d.Get("size").(int) + if cacheSize != 0 && cacheSize < minCacheSize { + return logical.ErrorResponse("size must be 0 or a value greater or equal to %d", minCacheSize), logical.ErrInvalidRequest + } + + // store cache size + entry, err := logical.StorageEntryJSON("config/cache", &configCache{ + Size: cacheSize, + }) + if err != nil { + return nil, err + } + if err := req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + + err = b.lm.InitCache(cacheSize) + if err != nil { + return nil, err + } + + return &logical.Response{ + Data: map[string]interface{}{ + "size": cacheSize, + }, + }, nil +} + +type configCache struct { + Size int `json:"size"` +} + +func (b *backend) pathCacheConfigRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + // error if no cache is configured + if !b.lm.GetUseCache() { + return nil, errors.New( + "caching is disabled for this transit mount", + ) + } + + // Compare current and stored cache sizes. If they are different warn the user. + currentCacheSize := b.lm.GetCacheSize() + storedCacheSize, err := GetCacheSizeFromStorage(ctx, req.Storage) + if err != nil { + return nil, err + } + + if currentCacheSize != storedCacheSize { + err = b.lm.InitCache(storedCacheSize) + if err != nil { + return nil, err + } + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "size": storedCacheSize, + }, + } + + return resp, nil +} + +const pathCacheConfigHelpSyn = `Configure caching strategy` + +const pathCacheConfigHelpDesc = ` +This path is used to configure and query the cache size of the active cache, a size of 0 means unlimited. +` diff --git a/builtin/logical/transit/path_cache_config_test.go b/builtin/logical/transit/path_cache_config_test.go new file mode 100644 index 0000000..f5c8316 --- /dev/null +++ b/builtin/logical/transit/path_cache_config_test.go @@ -0,0 +1,123 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit + +import ( + "context" + "testing" + + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + targetCacheSize = 12345 + smallCacheSize = 3 +) + +func TestTransit_CacheConfig(t *testing.T) { + b1, storage := createBackendWithSysView(t) + + doReq := func(b *backend, req *logical.Request) *logical.Response { + resp, err := b.HandleRequest(context.Background(), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("got err:\n%#v\nreq:\n%#v\n", err, *req) + } + return resp + } + + doErrReq := func(b *backend, req *logical.Request) { + resp, err := b.HandleRequest(context.Background(), req) + if err == nil { + if resp == nil || !resp.IsError() { + t.Fatalf("expected error; req:\n%#v\n", *req) + } + } + } + + validateResponse := func(resp *logical.Response, expectedCacheSize int, expectedWarning bool) { + actualCacheSize, ok := resp.Data["size"].(int) + if !ok { + t.Fatalf("No size returned") + } + if expectedCacheSize != actualCacheSize { + t.Fatalf("testAccReadCacheConfig expected: %d got: %d", expectedCacheSize, actualCacheSize) + } + // check for the presence/absence of warnings - warnings are expected if a cache size has been + // configured but not yet applied by reloading the plugin + warningCheckPass := expectedWarning == (len(resp.Warnings) > 0) + if !warningCheckPass { + t.Fatalf( + "testAccSteporeadCacheConfig warnings error.\n"+ + "expect warnings: %t but number of warnings was: %d", + expectedWarning, len(resp.Warnings), + ) + } + } + + writeReq := &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "cache-config", + Data: map[string]interface{}{ + "size": targetCacheSize, + }, + } + + writeSmallCacheSizeReq := &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "cache-config", + Data: map[string]interface{}{ + "size": smallCacheSize, + }, + } + + readReq := &logical.Request{ + Storage: storage, + Operation: logical.ReadOperation, + Path: "cache-config", + } + + polReq := &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "keys/aes256", + Data: map[string]interface{}{ + "derived": true, + }, + } + + // test steps + // b1 should spin up with an unlimited cache + validateResponse(doReq(b1, readReq), 0, false) + + // Change cache size to targetCacheSize 12345 and validate that cache size is updated + doReq(b1, writeReq) + validateResponse(doReq(b1, readReq), targetCacheSize, false) + b1.invalidate(context.Background(), "cache-config/") + + // Change the cache size to 1000 to mock the scenario where + // current cache size and stored cache size are different and + // a cache update is needed + b1.lm.InitCache(1000) + + // Write a new policy which in its code path detects that cache size has changed + // and refreshes the cache to 12345 + doReq(b1, polReq) + + // Validate that cache size is updated to 12345 + validateResponse(doReq(b1, readReq), targetCacheSize, false) + + // b2 should spin up with a configured cache + b2 := createBackendWithSysViewWithStorage(t, storage) + validateResponse(doReq(b2, readReq), targetCacheSize, false) + + // b3 enables transit without a cache, trying to read it should error + b3 := createBackendWithForceNoCacheWithSysViewWithStorage(t, storage) + doErrReq(b3, readReq) + + // b4 should spin up with a size less than minimum cache size (10) + b4, storage := createBackendWithSysView(t) + doErrReq(b4, writeSmallCacheSizeReq) +} diff --git a/builtin/logical/transit/path_config_keys.go b/builtin/logical/transit/path_config_keys.go new file mode 100644 index 0000000..bbf2cc4 --- /dev/null +++ b/builtin/logical/transit/path_config_keys.go @@ -0,0 +1,136 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit + +import ( + "context" + "fmt" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +const keysConfigPath = "config/keys" + +type keysConfig struct { + DisableUpsert bool `json:"disable_upsert"` +} + +var defaultKeysConfig = keysConfig{ + DisableUpsert: false, +} + +func (b *backend) pathConfigKeys() *framework.Path { + return &framework.Path{ + Pattern: "config/keys", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + }, + + Fields: map[string]*framework.FieldSchema{ + "disable_upsert": { + Type: framework.TypeBool, + Description: `Whether to allow automatic upserting (creation) of +keys on the encrypt endpoint.`, + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathConfigKeysWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "configure", + OperationSuffix: "keys", + }, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathConfigKeysRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationSuffix: "keys-configuration", + }, + }, + }, + + HelpSynopsis: pathConfigKeysHelpSyn, + HelpDescription: pathConfigKeysHelpDesc, + } +} + +func (b *backend) readConfigKeys(ctx context.Context, req *logical.Request) (*keysConfig, error) { + entry, err := req.Storage.Get(ctx, keysConfigPath) + if err != nil { + return nil, fmt.Errorf("failed to fetch keys configuration: %w", err) + } + + var cfg keysConfig + if entry == nil { + cfg = defaultKeysConfig + return &cfg, nil + } + + if err := entry.DecodeJSON(&cfg); err != nil { + return nil, fmt.Errorf("failed to decode keys configuration: %w", err) + } + + return &cfg, nil +} + +func (b *backend) writeConfigKeys(ctx context.Context, req *logical.Request, cfg *keysConfig) error { + entry, err := logical.StorageEntryJSON(keysConfigPath, cfg) + if err != nil { + return fmt.Errorf("failed to marshal keys configuration: %w", err) + } + + return req.Storage.Put(ctx, entry) +} + +func respondConfigKeys(cfg *keysConfig) *logical.Response { + return &logical.Response{ + Data: map[string]interface{}{ + "disable_upsert": cfg.DisableUpsert, + }, + } +} + +func (b *backend) pathConfigKeysWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + upsert := d.Get("disable_upsert").(bool) + + cfg, err := b.readConfigKeys(ctx, req) + if err != nil { + return nil, err + } + + modified := false + + if cfg.DisableUpsert != upsert { + cfg.DisableUpsert = upsert + modified = true + } + + if modified { + if err := b.writeConfigKeys(ctx, req, cfg); err != nil { + return nil, err + } + } + + return respondConfigKeys(cfg), nil +} + +func (b *backend) pathConfigKeysRead(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { + cfg, err := b.readConfigKeys(ctx, req) + if err != nil { + return nil, err + } + + return respondConfigKeys(cfg), nil +} + +const pathConfigKeysHelpSyn = `Configuration common across all keys` + +const pathConfigKeysHelpDesc = ` +This path is used to configure common functionality across all keys. Currently, +this supports limiting the ability to automatically create new keys when an +unknown key is used for encryption (upsert). +` diff --git a/builtin/logical/transit/path_config_keys_test.go b/builtin/logical/transit/path_config_keys_test.go new file mode 100644 index 0000000..dde7c58 --- /dev/null +++ b/builtin/logical/transit/path_config_keys_test.go @@ -0,0 +1,70 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit + +import ( + "context" + "testing" + + "github.com/hashicorp/vault/sdk/logical" +) + +func TestTransit_ConfigKeys(t *testing.T) { + b, s := createBackendWithSysView(t) + + doReq := func(req *logical.Request) *logical.Response { + resp, err := b.HandleRequest(context.Background(), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("got err:\n%#v\nreq:\n%#v\n", err, *req) + } + return resp + } + doErrReq := func(req *logical.Request) { + resp, err := b.HandleRequest(context.Background(), req) + if err == nil { + if resp == nil || !resp.IsError() { + t.Fatalf("expected error; req:\n%#v\n", *req) + } + } + } + + // First read the global config + req := &logical.Request{ + Storage: s, + Operation: logical.ReadOperation, + Path: "config/keys", + } + resp := doReq(req) + if resp.Data["disable_upsert"].(bool) != false { + t.Fatalf("expected disable_upsert to be false; got: %v", resp) + } + + // Ensure we can upsert. + req.Operation = logical.CreateOperation + req.Path = "encrypt/upsert-1" + req.Data = map[string]interface{}{ + "plaintext": "aGVsbG8K", + } + doReq(req) + + // Disable upserting. + req.Operation = logical.UpdateOperation + req.Path = "config/keys" + req.Data = map[string]interface{}{ + "disable_upsert": true, + } + doReq(req) + + // Attempt upserting again, it should fail. + req.Operation = logical.CreateOperation + req.Path = "encrypt/upsert-2" + req.Data = map[string]interface{}{ + "plaintext": "aGVsbG8K", + } + doErrReq(req) + + // Redoing this with the first key should succeed. + req.Path = "encrypt/upsert-1" + doReq(req) +} diff --git a/builtin/logical/transit/path_datakey.go b/builtin/logical/transit/path_datakey.go new file mode 100644 index 0000000..ddb5c76 --- /dev/null +++ b/builtin/logical/transit/path_datakey.go @@ -0,0 +1,215 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit + +import ( + "context" + "crypto/rand" + "encoding/base64" + "errors" + "fmt" + + "github.com/hashicorp/vault/helper/constants" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/helper/keysutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func (b *backend) pathDatakey() *framework.Path { + return &framework.Path{ + Pattern: "datakey/" + framework.GenericNameRegex("plaintext") + "/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "generate", + OperationSuffix: "data-key", + }, + + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "The backend key used for encrypting the data key", + }, + + "plaintext": { + Type: framework.TypeString, + Description: `"plaintext" will return the key in both plaintext and +ciphertext; "wrapped" will return the ciphertext only.`, + }, + + "context": { + Type: framework.TypeString, + Description: "Context for key derivation. Required for derived keys.", + }, + + "nonce": { + Type: framework.TypeString, + Description: "Nonce for when convergent encryption v1 is used (only in Vault 0.6.1)", + }, + + "bits": { + Type: framework.TypeInt, + Description: `Number of bits for the key; currently 128, 256, +and 512 bits are supported. Defaults to 256.`, + Default: 256, + }, + + "key_version": { + Type: framework.TypeInt, + Description: `The version of the Vault key to use for +encryption of the data key. Must be 0 (for latest) +or a value greater than or equal to the +min_encryption_version configured on the key.`, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathDatakeyWrite, + }, + + HelpSynopsis: pathDatakeyHelpSyn, + HelpDescription: pathDatakeyHelpDesc, + } +} + +func (b *backend) pathDatakeyWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + ver := d.Get("key_version").(int) + + plaintext := d.Get("plaintext").(string) + plaintextAllowed := false + switch plaintext { + case "plaintext": + plaintextAllowed = true + case "wrapped": + default: + return logical.ErrorResponse("Invalid path, must be 'plaintext' or 'wrapped'"), logical.ErrInvalidRequest + } + + var err error + + // Decode the context if any + contextRaw := d.Get("context").(string) + var context []byte + if len(contextRaw) != 0 { + context, err = base64.StdEncoding.DecodeString(contextRaw) + if err != nil { + return logical.ErrorResponse("failed to base64-decode context"), logical.ErrInvalidRequest + } + } + + // Decode the nonce if any + nonceRaw := d.Get("nonce").(string) + var nonce []byte + if len(nonceRaw) != 0 { + nonce, err = base64.StdEncoding.DecodeString(nonceRaw) + if err != nil { + return logical.ErrorResponse("failed to base64-decode nonce"), logical.ErrInvalidRequest + } + } + + // Get the policy + p, _, err := b.GetPolicy(ctx, keysutil.PolicyRequest{ + Storage: req.Storage, + Name: name, + }, b.GetRandomReader()) + if err != nil { + return nil, err + } + if p == nil { + return logical.ErrorResponse("encryption key not found"), logical.ErrInvalidRequest + } + if !b.System().CachingDisabled() { + p.Lock(false) + } + defer p.Unlock() + + newKey := make([]byte, 32) + bits := d.Get("bits").(int) + switch bits { + case 512: + newKey = make([]byte, 64) + case 256: + case 128: + newKey = make([]byte, 16) + default: + return logical.ErrorResponse("invalid bit length"), logical.ErrInvalidRequest + } + _, err = rand.Read(newKey) + if err != nil { + return nil, err + } + + var managedKeyFactory ManagedKeyFactory + if p.Type == keysutil.KeyType_MANAGED_KEY { + managedKeySystemView, ok := b.System().(logical.ManagedKeySystemView) + if !ok { + return nil, errors.New("unsupported system view") + } + + managedKeyFactory = ManagedKeyFactory{ + managedKeyParams: keysutil.ManagedKeyParameters{ + ManagedKeySystemView: managedKeySystemView, + BackendUUID: b.backendUUID, + Context: ctx, + }, + } + } + + ciphertext, err := p.EncryptWithFactory(ver, context, nonce, base64.StdEncoding.EncodeToString(newKey), nil, managedKeyFactory) + if err != nil { + switch err.(type) { + case errutil.UserError: + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + case errutil.InternalError: + return nil, err + default: + return nil, err + } + } + + if ciphertext == "" { + return nil, fmt.Errorf("empty ciphertext returned") + } + + keyVersion := ver + if keyVersion == 0 { + keyVersion = p.LatestVersion + } + + // Generate the response + resp := &logical.Response{ + Data: map[string]interface{}{ + "ciphertext": ciphertext, + "key_version": keyVersion, + }, + } + + if len(nonce) > 0 && !nonceAllowed(p) { + return nil, ErrNonceNotAllowed + } + + if constants.IsFIPS() && shouldWarnAboutNonceUsage(p, nonce) { + resp.AddWarning("A provided nonce value was used within FIPS mode, this violates FIPS 140 compliance.") + } + + if plaintextAllowed { + resp.Data["plaintext"] = base64.StdEncoding.EncodeToString(newKey) + } + + return resp, nil +} + +const pathDatakeyHelpSyn = `Generate a data key` + +const pathDatakeyHelpDesc = ` +This path can be used to generate a data key: a random +key of a certain length that can be used for encryption +and decryption, protected by the named backend key. 128, 256, +or 512 bits can be specified; if not specified, the default +is 256 bits. Call with the the "wrapped" path to prevent the +(base64-encoded) plaintext key from being returned along with +the encrypted key, the "plaintext" path returns both. +` diff --git a/builtin/logical/transit/path_decrypt.go b/builtin/logical/transit/path_decrypt.go new file mode 100644 index 0000000..116732b --- /dev/null +++ b/builtin/logical/transit/path_decrypt.go @@ -0,0 +1,269 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/helper/keysutil" + "github.com/hashicorp/vault/sdk/logical" +) + +type DecryptBatchResponseItem struct { + // Plaintext for the ciphertext present in the corresponding batch + // request item + Plaintext string `json:"plaintext" structs:"plaintext" mapstructure:"plaintext"` + + // Error, if set represents a failure encountered while encrypting a + // corresponding batch request item + Error string `json:"error,omitempty" structs:"error" mapstructure:"error"` + + // Reference is an arbitrary caller supplied string value that will be placed on the + // batch response to ease correlation between inputs and outputs + Reference string `json:"reference" structs:"reference" mapstructure:"reference"` +} + +func (b *backend) pathDecrypt() *framework.Path { + return &framework.Path{ + Pattern: "decrypt/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "decrypt", + }, + + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the key", + }, + + "ciphertext": { + Type: framework.TypeString, + Description: ` +The ciphertext to decrypt, provided as returned by encrypt.`, + }, + + "context": { + Type: framework.TypeString, + Description: ` +Base64 encoded context for key derivation. Required if key derivation is +enabled.`, + }, + + "nonce": { + Type: framework.TypeString, + Description: ` +Base64 encoded nonce value used during encryption. Must be provided if +convergent encryption is enabled for this key and the key was generated with +Vault 0.6.1. Not required for keys created in 0.6.2+.`, + }, + + "partial_failure_response_code": { + Type: framework.TypeInt, + Description: ` +Ordinarily, if a batch item fails to decrypt due to a bad input, but other batch items succeed, +the HTTP response code is 400 (Bad Request). Some applications may want to treat partial failures differently. +Providing the parameter returns the given response code integer instead of a 400 in this case. If all values fail +HTTP 400 is still returned.`, + }, + + "associated_data": { + Type: framework.TypeString, + Description: ` +When using an AEAD cipher mode, such as AES-GCM, this parameter allows +passing associated data (AD/AAD) into the encryption function; this data +must be passed on subsequent decryption requests but can be transited in +plaintext. On successful decryption, both the ciphertext and the associated +data are attested not to have been tampered with. + `, + }, + + "batch_input": { + Type: framework.TypeSlice, + Description: ` +Specifies a list of items to be decrypted in a single batch. When this +parameter is set, if the parameters 'ciphertext', 'context' and 'nonce' are +also set, they will be ignored. Any batch output will preserve the order +of the batch input.`, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathDecryptWrite, + }, + + HelpSynopsis: pathDecryptHelpSyn, + HelpDescription: pathDecryptHelpDesc, + } +} + +func (b *backend) pathDecryptWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + batchInputRaw := d.Raw["batch_input"] + var batchInputItems []BatchRequestItem + var err error + if batchInputRaw != nil { + err = decodeDecryptBatchRequestItems(batchInputRaw, &batchInputItems) + if err != nil { + return nil, fmt.Errorf("failed to parse batch input: %w", err) + } + + if len(batchInputItems) == 0 { + return logical.ErrorResponse("missing batch input to process"), logical.ErrInvalidRequest + } + } else { + ciphertext := d.Get("ciphertext").(string) + if len(ciphertext) == 0 { + return logical.ErrorResponse("missing ciphertext to decrypt"), logical.ErrInvalidRequest + } + + batchInputItems = make([]BatchRequestItem, 1) + batchInputItems[0] = BatchRequestItem{ + Ciphertext: ciphertext, + Context: d.Get("context").(string), + Nonce: d.Get("nonce").(string), + AssociatedData: d.Get("associated_data").(string), + } + } + + batchResponseItems := make([]DecryptBatchResponseItem, len(batchInputItems)) + contextSet := len(batchInputItems[0].Context) != 0 + + userErrorInBatch := false + internalErrorInBatch := false + + for i, item := range batchInputItems { + if (len(item.Context) == 0 && contextSet) || (len(item.Context) != 0 && !contextSet) { + return logical.ErrorResponse("context should be set either in all the request blocks or in none"), logical.ErrInvalidRequest + } + + if item.Ciphertext == "" { + userErrorInBatch = true + batchResponseItems[i].Error = "missing ciphertext to decrypt" + continue + } + + // Decode the context + if len(item.Context) != 0 { + batchInputItems[i].DecodedContext, err = base64.StdEncoding.DecodeString(item.Context) + if err != nil { + userErrorInBatch = true + batchResponseItems[i].Error = err.Error() + continue + } + } + + // Decode the nonce + if len(item.Nonce) != 0 { + batchInputItems[i].DecodedNonce, err = base64.StdEncoding.DecodeString(item.Nonce) + if err != nil { + userErrorInBatch = true + batchResponseItems[i].Error = err.Error() + continue + } + } + } + + // Get the policy + p, _, err := b.GetPolicy(ctx, keysutil.PolicyRequest{ + Storage: req.Storage, + Name: d.Get("name").(string), + }, b.GetRandomReader()) + if err != nil { + return nil, err + } + if p == nil { + return logical.ErrorResponse("encryption key not found"), logical.ErrInvalidRequest + } + if !b.System().CachingDisabled() { + p.Lock(false) + } + + successesInBatch := false + for i, item := range batchInputItems { + if batchResponseItems[i].Error != "" { + continue + } + + var factory interface{} + if item.AssociatedData != "" { + if !p.Type.AssociatedDataSupported() { + batchResponseItems[i].Error = fmt.Sprintf("'[%d].associated_data' provided for non-AEAD cipher suite %v", i, p.Type.String()) + continue + } + + factory = AssocDataFactory{item.AssociatedData} + } + + var managedKeyFactory ManagedKeyFactory + if p.Type == keysutil.KeyType_MANAGED_KEY { + managedKeySystemView, ok := b.System().(logical.ManagedKeySystemView) + if !ok { + batchResponseItems[i].Error = errors.New("unsupported system view").Error() + } + + managedKeyFactory = ManagedKeyFactory{ + managedKeyParams: keysutil.ManagedKeyParameters{ + ManagedKeySystemView: managedKeySystemView, + BackendUUID: b.backendUUID, + Context: ctx, + }, + } + } + + plaintext, err := p.DecryptWithFactory(item.DecodedContext, item.DecodedNonce, item.Ciphertext, factory, managedKeyFactory) + if err != nil { + switch err.(type) { + case errutil.InternalError: + internalErrorInBatch = true + default: + userErrorInBatch = true + } + batchResponseItems[i].Error = err.Error() + continue + } + successesInBatch = true + batchResponseItems[i].Plaintext = plaintext + } + + resp := &logical.Response{} + if batchInputRaw != nil { + // Copy the references + for i := range batchInputItems { + batchResponseItems[i].Reference = batchInputItems[i].Reference + } + resp.Data = map[string]interface{}{ + "batch_results": batchResponseItems, + } + } else { + if batchResponseItems[0].Error != "" { + p.Unlock() + + if internalErrorInBatch { + return nil, errutil.InternalError{Err: batchResponseItems[0].Error} + } + + return logical.ErrorResponse(batchResponseItems[0].Error), logical.ErrInvalidRequest + } + resp.Data = map[string]interface{}{ + "plaintext": batchResponseItems[0].Plaintext, + } + } + + p.Unlock() + + return batchRequestResponse(d, resp, req, successesInBatch, userErrorInBatch, internalErrorInBatch) +} + +const pathDecryptHelpSyn = `Decrypt a ciphertext value using a named key` + +const pathDecryptHelpDesc = ` +This path uses the named key from the request path to decrypt a user +provided ciphertext. The plaintext is returned base64 encoded. +` diff --git a/builtin/logical/transit/path_decrypt_bench_test.go b/builtin/logical/transit/path_decrypt_bench_test.go new file mode 100644 index 0000000..c4dc728 --- /dev/null +++ b/builtin/logical/transit/path_decrypt_bench_test.go @@ -0,0 +1,91 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit + +import ( + "context" + "testing" + + "github.com/hashicorp/vault/sdk/logical" +) + +func BenchmarkTransit_BatchDecryption1(b *testing.B) { + BTransit_BatchDecryption(b, 1) +} + +func BenchmarkTransit_BatchDecryption10(b *testing.B) { + BTransit_BatchDecryption(b, 10) +} + +func BenchmarkTransit_BatchDecryption50(b *testing.B) { + BTransit_BatchDecryption(b, 50) +} + +func BenchmarkTransit_BatchDecryption100(b *testing.B) { + BTransit_BatchDecryption(b, 100) +} + +func BenchmarkTransit_BatchDecryption1000(b *testing.B) { + BTransit_BatchDecryption(b, 1_000) +} + +func BenchmarkTransit_BatchDecryption10000(b *testing.B) { + BTransit_BatchDecryption(b, 10_000) +} + +func BTransit_BatchDecryption(b *testing.B, bsize int) { + b.StopTimer() + + var resp *logical.Response + var err error + + backend, s := createBackendWithStorage(b) + + batchEncryptionInput := make([]interface{}, 0, bsize) + for i := 0; i < bsize; i++ { + batchEncryptionInput = append( + batchEncryptionInput, + map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA=="}, + ) + } + + batchEncryptionData := map[string]interface{}{ + "batch_input": batchEncryptionInput, + } + + batchEncryptionReq := &logical.Request{ + Operation: logical.CreateOperation, + Path: "encrypt/upserted_key", + Storage: s, + Data: batchEncryptionData, + } + resp, err = backend.HandleRequest(context.Background(), batchEncryptionReq) + if err != nil || (resp != nil && resp.IsError()) { + b.Fatalf("err:%v resp:%#v", err, resp) + } + + batchResponseItems := resp.Data["batch_results"].([]EncryptBatchResponseItem) + batchDecryptionInput := make([]interface{}, len(batchResponseItems)) + for i, item := range batchResponseItems { + batchDecryptionInput[i] = map[string]interface{}{"ciphertext": item.Ciphertext} + } + batchDecryptionData := map[string]interface{}{ + "batch_input": batchDecryptionInput, + } + + batchDecryptionReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "decrypt/upserted_key", + Storage: s, + Data: batchDecryptionData, + } + + b.StartTimer() + for i := 0; i < b.N; i++ { + resp, err = backend.HandleRequest(context.Background(), batchDecryptionReq) + if err != nil || (resp != nil && resp.IsError()) { + b.Fatalf("err:%v resp:%#v", err, resp) + } + } +} diff --git a/builtin/logical/transit/path_decrypt_test.go b/builtin/logical/transit/path_decrypt_test.go new file mode 100644 index 0000000..e69402c --- /dev/null +++ b/builtin/logical/transit/path_decrypt_test.go @@ -0,0 +1,281 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit + +import ( + "context" + "encoding/json" + "net/http" + "reflect" + "testing" + + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/mitchellh/mapstructure" +) + +func TestTransit_BatchDecryption(t *testing.T) { + var resp *logical.Response + var err error + + b, s := createBackendWithStorage(t) + + batchEncryptionInput := []interface{}{ + map[string]interface{}{"plaintext": "", "reference": "foo"}, // empty string + map[string]interface{}{"plaintext": "Cg==", "reference": "bar"}, // newline + map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA==", "reference": "baz"}, + } + batchEncryptionData := map[string]interface{}{ + "batch_input": batchEncryptionInput, + } + + batchEncryptionReq := &logical.Request{ + Operation: logical.CreateOperation, + Path: "encrypt/upserted_key", + Storage: s, + Data: batchEncryptionData, + } + resp, err = b.HandleRequest(context.Background(), batchEncryptionReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + batchResponseItems := resp.Data["batch_results"].([]EncryptBatchResponseItem) + batchDecryptionInput := make([]interface{}, len(batchResponseItems)) + for i, item := range batchResponseItems { + batchDecryptionInput[i] = map[string]interface{}{"ciphertext": item.Ciphertext, "reference": item.Reference} + } + batchDecryptionData := map[string]interface{}{ + "batch_input": batchDecryptionInput, + } + + batchDecryptionReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "decrypt/upserted_key", + Storage: s, + Data: batchDecryptionData, + } + resp, err = b.HandleRequest(context.Background(), batchDecryptionReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + batchDecryptionResponseItems := resp.Data["batch_results"].([]DecryptBatchResponseItem) + // This seems fragile + expectedResult := "[{\"plaintext\":\"\",\"reference\":\"foo\"},{\"plaintext\":\"Cg==\",\"reference\":\"bar\"},{\"plaintext\":\"dGhlIHF1aWNrIGJyb3duIGZveA==\",\"reference\":\"baz\"}]" + + jsonResponse, err := json.Marshal(batchDecryptionResponseItems) + if err != nil || err == nil && string(jsonResponse) != expectedResult { + t.Fatalf("bad: expected json response [%s]", jsonResponse) + } +} + +func TestTransit_BatchDecryption_DerivedKey(t *testing.T) { + var req *logical.Request + var resp *logical.Response + var err error + + b, s := createBackendWithStorage(t) + + // Create a derived key. + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "keys/existing_key", + Storage: s, + Data: map[string]interface{}{ + "derived": true, + }, + } + resp, err = b.HandleRequest(context.Background(), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + // Encrypt some values for use in test cases. + plaintextItems := []struct { + plaintext, context string + }{ + {plaintext: "dGhlIHF1aWNrIGJyb3duIGZveA==", context: "dGVzdGNvbnRleHQ="}, + {plaintext: "anVtcGVkIG92ZXIgdGhlIGxhenkgZG9n", context: "dGVzdGNvbnRleHQy"}, + } + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "encrypt/existing_key", + Storage: s, + Data: map[string]interface{}{ + "batch_input": []interface{}{ + map[string]interface{}{"plaintext": plaintextItems[0].plaintext, "context": plaintextItems[0].context}, + map[string]interface{}{"plaintext": plaintextItems[1].plaintext, "context": plaintextItems[1].context}, + }, + }, + } + resp, err = b.HandleRequest(context.Background(), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + encryptedItems := resp.Data["batch_results"].([]EncryptBatchResponseItem) + + tests := []struct { + name string + in []interface{} + want []DecryptBatchResponseItem + shouldErr bool + wantHTTPStatus int + params map[string]interface{} + }{ + { + name: "nil-input", + in: nil, + shouldErr: true, + }, + { + name: "empty-input", + in: []interface{}{}, + shouldErr: true, + }, + { + name: "single-item-success", + in: []interface{}{ + map[string]interface{}{"ciphertext": encryptedItems[0].Ciphertext, "context": plaintextItems[0].context}, + }, + want: []DecryptBatchResponseItem{ + {Plaintext: plaintextItems[0].plaintext}, + }, + }, + { + name: "single-item-invalid-ciphertext", + in: []interface{}{ + map[string]interface{}{"ciphertext": "xxx", "context": plaintextItems[0].context}, + }, + want: []DecryptBatchResponseItem{ + {Error: "invalid ciphertext: no prefix"}, + }, + wantHTTPStatus: http.StatusBadRequest, + }, + { + name: "single-item-wrong-context", + in: []interface{}{ + map[string]interface{}{"ciphertext": encryptedItems[0].Ciphertext, "context": plaintextItems[1].context}, + }, + want: []DecryptBatchResponseItem{ + {Error: "cipher: message authentication failed"}, + }, + wantHTTPStatus: http.StatusBadRequest, + }, + { + name: "batch-full-success", + in: []interface{}{ + map[string]interface{}{"ciphertext": encryptedItems[0].Ciphertext, "context": plaintextItems[0].context}, + map[string]interface{}{"ciphertext": encryptedItems[1].Ciphertext, "context": plaintextItems[1].context}, + }, + want: []DecryptBatchResponseItem{ + {Plaintext: plaintextItems[0].plaintext}, + {Plaintext: plaintextItems[1].plaintext}, + }, + }, + { + name: "batch-partial-success", + in: []interface{}{ + map[string]interface{}{"ciphertext": encryptedItems[0].Ciphertext, "context": plaintextItems[1].context}, + map[string]interface{}{"ciphertext": encryptedItems[1].Ciphertext, "context": plaintextItems[1].context}, + }, + want: []DecryptBatchResponseItem{ + {Error: "cipher: message authentication failed"}, + {Plaintext: plaintextItems[1].plaintext}, + }, + wantHTTPStatus: http.StatusBadRequest, + }, + { + name: "batch-partial-success-overridden-response", + in: []interface{}{ + map[string]interface{}{"ciphertext": encryptedItems[0].Ciphertext, "context": plaintextItems[1].context}, + map[string]interface{}{"ciphertext": encryptedItems[1].Ciphertext, "context": plaintextItems[1].context}, + }, + want: []DecryptBatchResponseItem{ + {Error: "cipher: message authentication failed"}, + {Plaintext: plaintextItems[1].plaintext}, + }, + params: map[string]interface{}{"partial_failure_response_code": http.StatusAccepted}, + wantHTTPStatus: http.StatusAccepted, + }, + { + name: "batch-full-failure", + in: []interface{}{ + map[string]interface{}{"ciphertext": encryptedItems[0].Ciphertext, "context": plaintextItems[1].context}, + map[string]interface{}{"ciphertext": encryptedItems[1].Ciphertext, "context": plaintextItems[0].context}, + }, + want: []DecryptBatchResponseItem{ + {Error: "cipher: message authentication failed"}, + {Error: "cipher: message authentication failed"}, + }, + wantHTTPStatus: http.StatusBadRequest, + }, + { + name: "batch-full-failure-overridden-response", + in: []interface{}{ + map[string]interface{}{"ciphertext": encryptedItems[0].Ciphertext, "context": plaintextItems[1].context}, + map[string]interface{}{"ciphertext": encryptedItems[1].Ciphertext, "context": plaintextItems[0].context}, + }, + want: []DecryptBatchResponseItem{ + {Error: "cipher: message authentication failed"}, + {Error: "cipher: message authentication failed"}, + }, + params: map[string]interface{}{"partial_failure_response_code": http.StatusAccepted}, + // Full failure, shouldn't affect status code + wantHTTPStatus: http.StatusBadRequest, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "decrypt/existing_key", + Storage: s, + Data: map[string]interface{}{ + "batch_input": tt.in, + }, + } + for k, v := range tt.params { + req.Data[k] = v + } + resp, err = b.HandleRequest(context.Background(), req) + + didErr := err != nil || (resp != nil && resp.IsError()) + if didErr { + if !tt.shouldErr { + t.Fatalf("unexpected error err:%v, resp:%#v", err, resp) + } + } else { + if tt.shouldErr { + t.Fatal("expected error, but none occurred") + } + + if rawRespBody, ok := resp.Data[logical.HTTPRawBody]; ok { + httpResp := &logical.HTTPResponse{} + err = jsonutil.DecodeJSON([]byte(rawRespBody.(string)), httpResp) + if err != nil { + t.Fatalf("failed to unmarshal nested response: err:%v, resp:%#v", err, resp) + } + + if respStatus, ok := resp.Data[logical.HTTPStatusCode]; !ok || respStatus != tt.wantHTTPStatus { + t.Fatalf("HTTP response status code mismatch, want:%d, got:%d", tt.wantHTTPStatus, respStatus) + } + + resp = logical.HTTPResponseToLogicalResponse(httpResp) + } + + var respItems []DecryptBatchResponseItem + err = mapstructure.Decode(resp.Data["batch_results"], &respItems) + if err != nil { + t.Fatalf("problem decoding response items: err:%v, resp:%#v", err, resp) + } + if !reflect.DeepEqual(tt.want, respItems) { + t.Fatalf("response items mismatch, want:%#v, got:%#v", tt.want, respItems) + } + } + }) + } +} diff --git a/builtin/logical/transit/path_encrypt.go b/builtin/logical/transit/path_encrypt.go new file mode 100644 index 0000000..f7bdf9c --- /dev/null +++ b/builtin/logical/transit/path_encrypt.go @@ -0,0 +1,658 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit + +import ( + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "net/http" + "reflect" + + "github.com/hashicorp/vault/helper/constants" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/helper/keysutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/mitchellh/mapstructure" +) + +// BatchRequestItem represents a request item for batch processing +type BatchRequestItem struct { + // Context for key derivation. This is required for derived keys. + Context string `json:"context" structs:"context" mapstructure:"context"` + + // DecodedContext is the base64 decoded version of Context + DecodedContext []byte + + // Plaintext for encryption + Plaintext string `json:"plaintext" structs:"plaintext" mapstructure:"plaintext"` + + // Ciphertext for decryption + Ciphertext string `json:"ciphertext" structs:"ciphertext" mapstructure:"ciphertext"` + + // Nonce to be used when v1 convergent encryption is used + Nonce string `json:"nonce" structs:"nonce" mapstructure:"nonce"` + + // The key version to be used for encryption + KeyVersion int `json:"key_version" structs:"key_version" mapstructure:"key_version"` + + // DecodedNonce is the base64 decoded version of Nonce + DecodedNonce []byte + + // Associated Data for AEAD ciphers + AssociatedData string `json:"associated_data" struct:"associated_data" mapstructure:"associated_data"` + + // Reference is an arbitrary caller supplied string value that will be placed on the + // batch response to ease correlation between inputs and outputs + Reference string `json:"reference" structs:"reference" mapstructure:"reference"` +} + +// EncryptBatchResponseItem represents a response item for batch processing +type EncryptBatchResponseItem struct { + // Ciphertext for the plaintext present in the corresponding batch + // request item + Ciphertext string `json:"ciphertext,omitempty" structs:"ciphertext" mapstructure:"ciphertext"` + + // KeyVersion defines the key version used to encrypt plaintext. + KeyVersion int `json:"key_version,omitempty" structs:"key_version" mapstructure:"key_version"` + + // Error, if set represents a failure encountered while encrypting a + // corresponding batch request item + Error string `json:"error,omitempty" structs:"error" mapstructure:"error"` + + // Reference is an arbitrary caller supplied string value that will be placed on the + // batch response to ease correlation between inputs and outputs + Reference string `json:"reference"` +} + +type AssocDataFactory struct { + Encoded string +} + +func (a AssocDataFactory) GetAssociatedData() ([]byte, error) { + return base64.StdEncoding.DecodeString(a.Encoded) +} + +type ManagedKeyFactory struct { + managedKeyParams keysutil.ManagedKeyParameters +} + +func (m ManagedKeyFactory) GetManagedKeyParameters() keysutil.ManagedKeyParameters { + return m.managedKeyParams +} + +func (b *backend) pathEncrypt() *framework.Path { + return &framework.Path{ + Pattern: "encrypt/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "encrypt", + }, + + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the key", + }, + + "plaintext": { + Type: framework.TypeString, + Description: "Base64 encoded plaintext value to be encrypted", + }, + + "context": { + Type: framework.TypeString, + Description: "Base64 encoded context for key derivation. Required if key derivation is enabled", + }, + + "nonce": { + Type: framework.TypeString, + Description: ` +Base64 encoded nonce value. Must be provided if convergent encryption is +enabled for this key and the key was generated with Vault 0.6.1. Not required +for keys created in 0.6.2+. The value must be exactly 96 bits (12 bytes) long +and the user must ensure that for any given context (and thus, any given +encryption key) this nonce value is **never reused**. +`, + }, + + "type": { + Type: framework.TypeString, + Default: "aes256-gcm96", + Description: ` +This parameter is required when encryption key is expected to be created. +When performing an upsert operation, the type of key to create. Currently, +"aes128-gcm96" (symmetric) and "aes256-gcm96" (symmetric) are the only types supported. Defaults to "aes256-gcm96".`, + }, + + "convergent_encryption": { + Type: framework.TypeBool, + Description: ` +This parameter will only be used when a key is expected to be created. Whether +to support convergent encryption. This is only supported when using a key with +key derivation enabled and will require all requests to carry both a context +and 96-bit (12-byte) nonce. The given nonce will be used in place of a randomly +generated nonce. As a result, when the same context and nonce are supplied, the +same ciphertext is generated. It is *very important* when using this mode that +you ensure that all nonces are unique for a given context. Failing to do so +will severely impact the ciphertext's security.`, + }, + + "key_version": { + Type: framework.TypeInt, + Description: `The version of the key to use for encryption. +Must be 0 (for latest) or a value greater than or equal +to the min_encryption_version configured on the key.`, + }, + + "partial_failure_response_code": { + Type: framework.TypeInt, + Description: ` +Ordinarily, if a batch item fails to encrypt due to a bad input, but other batch items succeed, +the HTTP response code is 400 (Bad Request). Some applications may want to treat partial failures differently. +Providing the parameter returns the given response code integer instead of a 400 in this case. If all values fail +HTTP 400 is still returned.`, + }, + + "associated_data": { + Type: framework.TypeString, + Description: ` +When using an AEAD cipher mode, such as AES-GCM, this parameter allows +passing associated data (AD/AAD) into the encryption function; this data +must be passed on subsequent decryption requests but can be transited in +plaintext. On successful decryption, both the ciphertext and the associated +data are attested not to have been tampered with. + `, + }, + + "batch_input": { + Type: framework.TypeSlice, + Description: ` +Specifies a list of items to be encrypted in a single batch. When this parameter +is set, if the parameters 'plaintext', 'context' and 'nonce' are also set, they +will be ignored. Any batch output will preserve the order of the batch input.`, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.CreateOperation: b.pathEncryptWrite, + logical.UpdateOperation: b.pathEncryptWrite, + }, + + ExistenceCheck: b.pathEncryptExistenceCheck, + + HelpSynopsis: pathEncryptHelpSyn, + HelpDescription: pathEncryptHelpDesc, + } +} + +func decodeEncryptBatchRequestItems(src interface{}, dst *[]BatchRequestItem) error { + return decodeBatchRequestItems(src, true, false, dst) +} + +func decodeDecryptBatchRequestItems(src interface{}, dst *[]BatchRequestItem) error { + return decodeBatchRequestItems(src, false, true, dst) +} + +// decodeBatchRequestItems is a fast path alternative to mapstructure.Decode to decode []BatchRequestItem. +// It aims to behave as closely possible to the original mapstructure.Decode and will return the same errors. +// Note, however, that an error will also be returned if one of the required fields is missing. +// https://github.com/hashicorp/vault/pull/8775/files#r437709722 +func decodeBatchRequestItems(src interface{}, requirePlaintext bool, requireCiphertext bool, dst *[]BatchRequestItem) error { + if src == nil || dst == nil { + return nil + } + + items, ok := src.([]interface{}) + if !ok { + return fmt.Errorf("source data must be an array or slice, got %T", src) + } + + // Early return should happen before allocating the array if the batch is empty. + // However to comply with mapstructure output it's needed to allocate an empty array. + sitems := len(items) + *dst = make([]BatchRequestItem, sitems) + if sitems == 0 { + return nil + } + + // To comply with mapstructure output the same error type is needed. + var errs mapstructure.Error + + for i, iitem := range items { + item, ok := iitem.(map[string]interface{}) + if !ok { + return fmt.Errorf("[%d] expected a map, got '%T'", i, iitem) + } + + if v, has := item["context"]; has { + if !reflect.ValueOf(v).IsValid() { + } else if casted, ok := v.(string); ok { + (*dst)[i].Context = casted + } else { + errs.Errors = append(errs.Errors, fmt.Sprintf("'[%d].context' expected type 'string', got unconvertible type '%T'", i, item["context"])) + } + } + + if v, has := item["ciphertext"]; has { + if !reflect.ValueOf(v).IsValid() { + } else if casted, ok := v.(string); ok { + (*dst)[i].Ciphertext = casted + } else { + errs.Errors = append(errs.Errors, fmt.Sprintf("'[%d].ciphertext' expected type 'string', got unconvertible type '%T'", i, item["ciphertext"])) + } + } else if requireCiphertext { + errs.Errors = append(errs.Errors, fmt.Sprintf("'[%d].ciphertext' missing ciphertext to decrypt", i)) + } + + if v, has := item["plaintext"]; has { + if casted, ok := v.(string); ok { + (*dst)[i].Plaintext = casted + } else { + errs.Errors = append(errs.Errors, fmt.Sprintf("'[%d].plaintext' expected type 'string', got unconvertible type '%T'", i, item["plaintext"])) + } + } else if requirePlaintext { + errs.Errors = append(errs.Errors, fmt.Sprintf("'[%d].plaintext' missing plaintext to encrypt", i)) + } + + if v, has := item["nonce"]; has { + if !reflect.ValueOf(v).IsValid() { + } else if casted, ok := v.(string); ok { + (*dst)[i].Nonce = casted + } else { + errs.Errors = append(errs.Errors, fmt.Sprintf("'[%d].nonce' expected type 'string', got unconvertible type '%T'", i, item["nonce"])) + } + } + + if v, has := item["key_version"]; has { + if !reflect.ValueOf(v).IsValid() { + } else if casted, ok := v.(int); ok { + (*dst)[i].KeyVersion = casted + } else if js, ok := v.(json.Number); ok { + // https://github.com/hashicorp/vault/issues/10232 + // Because API server parses json request with UseNumber=true, logical.Request.Data can include json.Number for a number field. + if casted, err := js.Int64(); err == nil { + (*dst)[i].KeyVersion = int(casted) + } else { + errs.Errors = append(errs.Errors, fmt.Sprintf(`error decoding %T into [%d].key_version: strconv.ParseInt: parsing "%s": invalid syntax`, v, i, v)) + } + } else { + errs.Errors = append(errs.Errors, fmt.Sprintf("'[%d].key_version' expected type 'int', got unconvertible type '%T'", i, item["key_version"])) + } + } + + if v, has := item["associated_data"]; has { + if !reflect.ValueOf(v).IsValid() { + } else if casted, ok := v.(string); ok { + (*dst)[i].AssociatedData = casted + } else { + errs.Errors = append(errs.Errors, fmt.Sprintf("'[%d].associated_data' expected type 'string', got unconvertible type '%T'", i, item["associated_data"])) + } + } + if v, has := item["reference"]; has { + if !reflect.ValueOf(v).IsValid() { + } else if casted, ok := v.(string); ok { + (*dst)[i].Reference = casted + } else { + errs.Errors = append(errs.Errors, fmt.Sprintf("'[%d].reference' expected type 'string', got unconvertible type '%T'", i, item["reference"])) + } + } + } + + if len(errs.Errors) > 0 { + return &errs + } + + return nil +} + +func (b *backend) pathEncryptExistenceCheck(ctx context.Context, req *logical.Request, d *framework.FieldData) (bool, error) { + name := d.Get("name").(string) + p, _, err := b.GetPolicy(ctx, keysutil.PolicyRequest{ + Storage: req.Storage, + Name: name, + }, b.GetRandomReader()) + if err != nil { + return false, err + } + if p != nil && b.System().CachingDisabled() { + p.Unlock() + } + + return p != nil, nil +} + +func (b *backend) pathEncryptWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + var err error + batchInputRaw := d.Raw["batch_input"] + var batchInputItems []BatchRequestItem + if batchInputRaw != nil { + err = decodeEncryptBatchRequestItems(batchInputRaw, &batchInputItems) + if err != nil { + return nil, fmt.Errorf("failed to parse batch input: %w", err) + } + + if len(batchInputItems) == 0 { + return logical.ErrorResponse("missing batch input to process"), logical.ErrInvalidRequest + } + } else { + valueRaw, ok, err := d.GetOkErr("plaintext") + if err != nil { + return nil, err + } + if !ok { + return logical.ErrorResponse("missing plaintext to encrypt"), logical.ErrInvalidRequest + } + + batchInputItems = make([]BatchRequestItem, 1) + batchInputItems[0] = BatchRequestItem{ + Plaintext: valueRaw.(string), + Context: d.Get("context").(string), + Nonce: d.Get("nonce").(string), + KeyVersion: d.Get("key_version").(int), + AssociatedData: d.Get("associated_data").(string), + } + } + + batchResponseItems := make([]EncryptBatchResponseItem, len(batchInputItems)) + contextSet := len(batchInputItems[0].Context) != 0 + + userErrorInBatch := false + internalErrorInBatch := false + + // Before processing the batch request items, get the policy. If the + // policy is supposed to be upserted, then determine if 'derived' is to + // be set or not, based on the presence of 'context' field in all the + // input items. + for i, item := range batchInputItems { + if (len(item.Context) == 0 && contextSet) || (len(item.Context) != 0 && !contextSet) { + return logical.ErrorResponse("context should be set either in all the request blocks or in none"), logical.ErrInvalidRequest + } + + _, err := base64.StdEncoding.DecodeString(item.Plaintext) + if err != nil { + userErrorInBatch = true + batchResponseItems[i].Error = err.Error() + continue + } + + // Decode the context + if len(item.Context) != 0 { + batchInputItems[i].DecodedContext, err = base64.StdEncoding.DecodeString(item.Context) + if err != nil { + userErrorInBatch = true + batchResponseItems[i].Error = err.Error() + continue + } + } + + // Decode the nonce + if len(item.Nonce) != 0 { + batchInputItems[i].DecodedNonce, err = base64.StdEncoding.DecodeString(item.Nonce) + if err != nil { + userErrorInBatch = true + batchResponseItems[i].Error = err.Error() + continue + } + } + } + + // Get the policy + var p *keysutil.Policy + var upserted bool + var polReq keysutil.PolicyRequest + + if req.Operation == logical.CreateOperation { + convergent := d.Get("convergent_encryption").(bool) + if convergent && !contextSet { + return logical.ErrorResponse("convergent encryption requires derivation to be enabled, so context is required"), nil + } + + cfg, err := b.readConfigKeys(ctx, req) + if err != nil { + return nil, err + } + + polReq = keysutil.PolicyRequest{ + Upsert: !cfg.DisableUpsert, + Storage: req.Storage, + Name: name, + Derived: contextSet, + Convergent: convergent, + } + + keyType := d.Get("type").(string) + switch keyType { + case "aes128-gcm96": + polReq.KeyType = keysutil.KeyType_AES128_GCM96 + case "aes256-gcm96": + polReq.KeyType = keysutil.KeyType_AES256_GCM96 + case "chacha20-poly1305": + polReq.KeyType = keysutil.KeyType_ChaCha20_Poly1305 + case "ecdsa-p256", "ecdsa-p384", "ecdsa-p521": + return logical.ErrorResponse(fmt.Sprintf("key type %v not supported for this operation", keyType)), logical.ErrInvalidRequest + case "managed_key": + polReq.KeyType = keysutil.KeyType_MANAGED_KEY + default: + return logical.ErrorResponse(fmt.Sprintf("unknown key type %v", keyType)), logical.ErrInvalidRequest + } + } else { + polReq = keysutil.PolicyRequest{ + Storage: req.Storage, + Name: name, + } + } + + p, upserted, err = b.GetPolicy(ctx, polReq, b.GetRandomReader()) + if err != nil { + return nil, err + } + if p == nil { + return logical.ErrorResponse("encryption key not found"), logical.ErrInvalidRequest + } + if !b.System().CachingDisabled() { + p.Lock(false) + } + + // Process batch request items. If encryption of any request + // item fails, respectively mark the error in the response + // collection and continue to process other items. + warnAboutNonceUsage := false + successesInBatch := false + for i, item := range batchInputItems { + if batchResponseItems[i].Error != "" { + userErrorInBatch = true + continue + } + + if item.Nonce != "" && !nonceAllowed(p) { + userErrorInBatch = true + batchResponseItems[i].Error = ErrNonceNotAllowed.Error() + continue + } + + if !warnAboutNonceUsage && shouldWarnAboutNonceUsage(p, item.DecodedNonce) { + warnAboutNonceUsage = true + } + + var factory interface{} + if item.AssociatedData != "" { + if !p.Type.AssociatedDataSupported() { + batchResponseItems[i].Error = fmt.Sprintf("'[%d].associated_data' provided for non-AEAD cipher suite %v", i, p.Type.String()) + continue + } + + factory = AssocDataFactory{item.AssociatedData} + } + + var managedKeyFactory ManagedKeyFactory + if p.Type == keysutil.KeyType_MANAGED_KEY { + managedKeySystemView, ok := b.System().(logical.ManagedKeySystemView) + if !ok { + batchResponseItems[i].Error = errors.New("unsupported system view").Error() + } + + managedKeyFactory = ManagedKeyFactory{ + managedKeyParams: keysutil.ManagedKeyParameters{ + ManagedKeySystemView: managedKeySystemView, + BackendUUID: b.backendUUID, + Context: ctx, + }, + } + } + + ciphertext, err := p.EncryptWithFactory(item.KeyVersion, item.DecodedContext, item.DecodedNonce, item.Plaintext, factory, managedKeyFactory) + if err != nil { + switch err.(type) { + case errutil.InternalError: + internalErrorInBatch = true + default: + userErrorInBatch = true + } + batchResponseItems[i].Error = err.Error() + continue + } + + if ciphertext == "" { + userErrorInBatch = true + batchResponseItems[i].Error = fmt.Sprintf("empty ciphertext returned for input item %d", i) + continue + } + + successesInBatch = true + keyVersion := item.KeyVersion + if keyVersion == 0 { + keyVersion = p.LatestVersion + } + + batchResponseItems[i].Ciphertext = ciphertext + batchResponseItems[i].KeyVersion = keyVersion + } + + resp := &logical.Response{} + if batchInputRaw != nil { + // Copy the references + for i := range batchInputItems { + batchResponseItems[i].Reference = batchInputItems[i].Reference + } + resp.Data = map[string]interface{}{ + "batch_results": batchResponseItems, + } + } else { + if batchResponseItems[0].Error != "" { + p.Unlock() + + if internalErrorInBatch { + return nil, errutil.InternalError{Err: batchResponseItems[0].Error} + } + + return logical.ErrorResponse(batchResponseItems[0].Error), logical.ErrInvalidRequest + } + + resp.Data = map[string]interface{}{ + "ciphertext": batchResponseItems[0].Ciphertext, + "key_version": batchResponseItems[0].KeyVersion, + } + } + + if constants.IsFIPS() && warnAboutNonceUsage { + resp.AddWarning("A provided nonce value was used within FIPS mode, this violates FIPS 140 compliance.") + } + + if req.Operation == logical.CreateOperation && !upserted { + resp.AddWarning("Attempted creation of the key during the encrypt operation, but it was created beforehand") + } + + p.Unlock() + + return batchRequestResponse(d, resp, req, successesInBatch, userErrorInBatch, internalErrorInBatch) +} + +func nonceAllowed(p *keysutil.Policy) bool { + var supportedKeyType bool + switch p.Type { + case keysutil.KeyType_MANAGED_KEY: + return true + case keysutil.KeyType_AES128_GCM96, keysutil.KeyType_AES256_GCM96, keysutil.KeyType_ChaCha20_Poly1305: + supportedKeyType = true + default: + supportedKeyType = false + } + + if supportedKeyType && p.ConvergentEncryption && p.ConvergentVersion == 1 { + // We only use the user supplied nonce for v1 convergent encryption keys + return true + } + + return false +} + +// Depending on the errors in the batch, different status codes should be returned. User errors +// will return a 400 and precede internal errors which return a 500. The reasoning behind this is +// that user errors are non-retryable without making changes to the request, and should be surfaced +// to the user first. +func batchRequestResponse(d *framework.FieldData, resp *logical.Response, req *logical.Request, successesInBatch, userErrorInBatch, internalErrorInBatch bool) (*logical.Response, error) { + if userErrorInBatch || internalErrorInBatch { + var code int + switch { + case userErrorInBatch: + code = http.StatusBadRequest + case internalErrorInBatch: + code = http.StatusInternalServerError + } + if codeRaw, ok := d.GetOk("partial_failure_response_code"); ok && successesInBatch { + newCode := codeRaw.(int) + if newCode < 1 || newCode > 599 { + resp.AddWarning(fmt.Sprintf("invalid HTTP response code override from partial_failure_response_code, reverting to %d", code)) + } else { + code = newCode + } + } + return logical.RespondWithStatusCode(resp, req, code) + } + + return resp, nil +} + +// shouldWarnAboutNonceUsage attempts to determine if we will use a provided nonce or not. Ideally this +// would be information returned through p.Encrypt but that would require an SDK api change and this is +// transit specific +func shouldWarnAboutNonceUsage(p *keysutil.Policy, userSuppliedNonce []byte) bool { + if len(userSuppliedNonce) == 0 { + return false + } + + var supportedKeyType bool + switch p.Type { + case keysutil.KeyType_AES128_GCM96, keysutil.KeyType_AES256_GCM96, keysutil.KeyType_ChaCha20_Poly1305: + supportedKeyType = true + default: + supportedKeyType = false + } + + if supportedKeyType && p.ConvergentEncryption && p.ConvergentVersion == 1 { + // We only use the user supplied nonce for v1 convergent encryption keys + return true + } + + if supportedKeyType && !p.ConvergentEncryption { + return true + } + + return false +} + +const pathEncryptHelpSyn = `Encrypt a plaintext value or a batch of plaintext +blocks using a named key` + +const pathEncryptHelpDesc = ` +This path uses the named key from the request path to encrypt a user provided +plaintext or a batch of plaintext blocks. The plaintext must be base64 encoded. +` diff --git a/builtin/logical/transit/path_encrypt_bench_test.go b/builtin/logical/transit/path_encrypt_bench_test.go new file mode 100644 index 0000000..8aef39d --- /dev/null +++ b/builtin/logical/transit/path_encrypt_bench_test.go @@ -0,0 +1,71 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit + +import ( + "context" + "testing" + + "github.com/hashicorp/vault/sdk/logical" +) + +func BenchmarkTransit_BatchEncryption1(b *testing.B) { + BTransit_BatchEncryption(b, 1) +} + +func BenchmarkTransit_BatchEncryption10(b *testing.B) { + BTransit_BatchEncryption(b, 10) +} + +func BenchmarkTransit_BatchEncryption50(b *testing.B) { + BTransit_BatchEncryption(b, 50) +} + +func BenchmarkTransit_BatchEncryption100(b *testing.B) { + BTransit_BatchEncryption(b, 100) +} + +func BenchmarkTransit_BatchEncryption1000(b *testing.B) { + BTransit_BatchEncryption(b, 1_000) +} + +func BenchmarkTransit_BatchEncryption10000(b *testing.B) { + BTransit_BatchEncryption(b, 10_000) +} + +func BTransit_BatchEncryption(b *testing.B, bsize int) { + b.StopTimer() + + var resp *logical.Response + var err error + + backend, s := createBackendWithStorage(b) + + batchEncryptionInput := make([]interface{}, 0, bsize) + for i := 0; i < bsize; i++ { + batchEncryptionInput = append( + batchEncryptionInput, + map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA=="}, + ) + } + + batchEncryptionData := map[string]interface{}{ + "batch_input": batchEncryptionInput, + } + + batchEncryptionReq := &logical.Request{ + Operation: logical.CreateOperation, + Path: "encrypt/upserted_key", + Storage: s, + Data: batchEncryptionData, + } + + b.StartTimer() + for i := 0; i < b.N; i++ { + resp, err = backend.HandleRequest(context.Background(), batchEncryptionReq) + if err != nil || (resp != nil && resp.IsError()) { + b.Fatalf("err:%v resp:%#v", err, resp) + } + } +} diff --git a/builtin/logical/transit/path_encrypt_test.go b/builtin/logical/transit/path_encrypt_test.go new file mode 100644 index 0000000..c0e0f9c --- /dev/null +++ b/builtin/logical/transit/path_encrypt_test.go @@ -0,0 +1,1068 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + "strings" + "testing" + + "github.com/hashicorp/vault/sdk/helper/keysutil" + + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/sdk/logical" + "github.com/mitchellh/mapstructure" +) + +func TestTransit_MissingPlaintext(t *testing.T) { + var resp *logical.Response + var err error + + b, s := createBackendWithStorage(t) + + // Create the policy + policyReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "keys/existing_key", + Storage: s, + } + resp, err = b.HandleRequest(context.Background(), policyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + encReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "encrypt/existing_key", + Storage: s, + Data: map[string]interface{}{}, + } + resp, err = b.HandleRequest(context.Background(), encReq) + if resp == nil || !resp.IsError() { + t.Fatalf("expected error due to missing plaintext in request, err:%v resp:%#v", err, resp) + } +} + +func TestTransit_MissingPlaintextInBatchInput(t *testing.T) { + var resp *logical.Response + var err error + + b, s := createBackendWithStorage(t) + + // Create the policy + policyReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "keys/existing_key", + Storage: s, + } + resp, err = b.HandleRequest(context.Background(), policyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + batchInput := []interface{}{ + map[string]interface{}{}, // Note that there is no map entry for plaintext + } + + batchData := map[string]interface{}{ + "batch_input": batchInput, + } + batchReq := &logical.Request{ + Operation: logical.CreateOperation, + Path: "encrypt/upserted_key", + Storage: s, + Data: batchData, + } + resp, err = b.HandleRequest(context.Background(), batchReq) + if err == nil { + t.Fatalf("expected error due to missing plaintext in request, err:%v resp:%#v", err, resp) + } +} + +// Case1: Ensure that batch encryption did not affect the normal flow of +// encrypting the plaintext with a pre-existing key. +func TestTransit_BatchEncryptionCase1(t *testing.T) { + var resp *logical.Response + var err error + + b, s := createBackendWithStorage(t) + + // Create the policy + policyReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "keys/existing_key", + Storage: s, + } + resp, err = b.HandleRequest(context.Background(), policyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + plaintext := "dGhlIHF1aWNrIGJyb3duIGZveA==" // "the quick brown fox" + + encData := map[string]interface{}{ + "plaintext": plaintext, + } + + encReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "encrypt/existing_key", + Storage: s, + Data: encData, + } + resp, err = b.HandleRequest(context.Background(), encReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + keyVersion := resp.Data["key_version"].(int) + if keyVersion != 1 { + t.Fatalf("unexpected key version; got: %d, expected: %d", keyVersion, 1) + } + + ciphertext := resp.Data["ciphertext"] + + decData := map[string]interface{}{ + "ciphertext": ciphertext, + } + decReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "decrypt/existing_key", + Storage: s, + Data: decData, + } + resp, err = b.HandleRequest(context.Background(), decReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + if resp.Data["plaintext"] != plaintext { + t.Fatalf("bad: plaintext. Expected: %q, Actual: %q", plaintext, resp.Data["plaintext"]) + } +} + +// Case2: Ensure that batch encryption did not affect the normal flow of +// encrypting the plaintext with the key upserted. +func TestTransit_BatchEncryptionCase2(t *testing.T) { + var resp *logical.Response + var err error + b, s := createBackendWithStorage(t) + + // Upsert the key and encrypt the data + plaintext := "dGhlIHF1aWNrIGJyb3duIGZveA==" + + encData := map[string]interface{}{ + "plaintext": plaintext, + } + + encReq := &logical.Request{ + Operation: logical.CreateOperation, + Path: "encrypt/upserted_key", + Storage: s, + Data: encData, + } + resp, err = b.HandleRequest(context.Background(), encReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + keyVersion := resp.Data["key_version"].(int) + if keyVersion != 1 { + t.Fatalf("unexpected key version; got: %d, expected: %d", keyVersion, 1) + } + + ciphertext := resp.Data["ciphertext"] + decData := map[string]interface{}{ + "ciphertext": ciphertext, + } + + policyReq := &logical.Request{ + Operation: logical.ReadOperation, + Path: "keys/upserted_key", + Storage: s, + } + + resp, err = b.HandleRequest(context.Background(), policyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + decReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "decrypt/upserted_key", + Storage: s, + Data: decData, + } + resp, err = b.HandleRequest(context.Background(), decReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + if resp.Data["plaintext"] != plaintext { + t.Fatalf("bad: plaintext. Expected: %q, Actual: %q", plaintext, resp.Data["plaintext"]) + } +} + +// Case3: If batch encryption input is not base64 encoded, it should fail. +func TestTransit_BatchEncryptionCase3(t *testing.T) { + var err error + + b, s := createBackendWithStorage(t) + + batchInput := `[{"plaintext":"dGhlIHF1aWNrIGJyb3duIGZveA=="}]` + batchData := map[string]interface{}{ + "batch_input": batchInput, + } + + batchReq := &logical.Request{ + Operation: logical.CreateOperation, + Path: "encrypt/upserted_key", + Storage: s, + Data: batchData, + } + _, err = b.HandleRequest(context.Background(), batchReq) + if err == nil { + t.Fatal("expected an error") + } +} + +// Case4: Test batch encryption with an existing key (and test references) +func TestTransit_BatchEncryptionCase4(t *testing.T) { + var resp *logical.Response + var err error + + b, s := createBackendWithStorage(t) + + policyReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "keys/existing_key", + Storage: s, + } + resp, err = b.HandleRequest(context.Background(), policyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + batchInput := []interface{}{ + map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA==", "reference": "b"}, + map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA==", "reference": "a"}, + } + + batchData := map[string]interface{}{ + "batch_input": batchInput, + } + batchReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "encrypt/existing_key", + Storage: s, + Data: batchData, + } + resp, err = b.HandleRequest(context.Background(), batchReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + batchResponseItems := resp.Data["batch_results"].([]EncryptBatchResponseItem) + + decReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "decrypt/existing_key", + Storage: s, + } + + plaintext := "dGhlIHF1aWNrIGJyb3duIGZveA==" + + for i, item := range batchResponseItems { + if item.KeyVersion != 1 { + t.Fatalf("unexpected key version; got: %d, expected: %d", item.KeyVersion, 1) + } + + decReq.Data = map[string]interface{}{ + "ciphertext": item.Ciphertext, + } + resp, err = b.HandleRequest(context.Background(), decReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + if resp.Data["plaintext"] != plaintext { + t.Fatalf("bad: plaintext. Expected: %q, Actual: %q", plaintext, resp.Data["plaintext"]) + } + inputItem := batchInput[i].(map[string]interface{}) + if item.Reference != inputItem["reference"] { + t.Fatalf("reference mismatch. Expected %s, Actual: %s", inputItem["reference"], item.Reference) + } + } +} + +// Case5: Test batch encryption with an existing derived key +func TestTransit_BatchEncryptionCase5(t *testing.T) { + var resp *logical.Response + var err error + + b, s := createBackendWithStorage(t) + + policyData := map[string]interface{}{ + "derived": true, + } + + policyReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "keys/existing_key", + Storage: s, + Data: policyData, + } + + resp, err = b.HandleRequest(context.Background(), policyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + batchInput := []interface{}{ + map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA==", "context": "dmlzaGFsCg=="}, + map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA==", "context": "dmlzaGFsCg=="}, + } + + batchData := map[string]interface{}{ + "batch_input": batchInput, + } + + batchReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "encrypt/existing_key", + Storage: s, + Data: batchData, + } + resp, err = b.HandleRequest(context.Background(), batchReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + batchResponseItems := resp.Data["batch_results"].([]EncryptBatchResponseItem) + + decReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "decrypt/existing_key", + Storage: s, + } + + plaintext := "dGhlIHF1aWNrIGJyb3duIGZveA==" + + for _, item := range batchResponseItems { + if item.KeyVersion != 1 { + t.Fatalf("unexpected key version; got: %d, expected: %d", item.KeyVersion, 1) + } + + decReq.Data = map[string]interface{}{ + "ciphertext": item.Ciphertext, + "context": "dmlzaGFsCg==", + } + resp, err = b.HandleRequest(context.Background(), decReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + if resp.Data["plaintext"] != plaintext { + t.Fatalf("bad: plaintext. Expected: %q, Actual: %q", plaintext, resp.Data["plaintext"]) + } + } +} + +// Case6: Test batch encryption with an upserted non-derived key +func TestTransit_BatchEncryptionCase6(t *testing.T) { + var resp *logical.Response + var err error + + b, s := createBackendWithStorage(t) + + batchInput := []interface{}{ + map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA=="}, + map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA=="}, + } + + batchData := map[string]interface{}{ + "batch_input": batchInput, + } + batchReq := &logical.Request{ + Operation: logical.CreateOperation, + Path: "encrypt/upserted_key", + Storage: s, + Data: batchData, + } + resp, err = b.HandleRequest(context.Background(), batchReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + batchResponseItems := resp.Data["batch_results"].([]EncryptBatchResponseItem) + + decReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "decrypt/upserted_key", + Storage: s, + } + + plaintext := "dGhlIHF1aWNrIGJyb3duIGZveA==" + + for _, responseItem := range batchResponseItems { + var item EncryptBatchResponseItem + if err := mapstructure.Decode(responseItem, &item); err != nil { + t.Fatal(err) + } + + if item.KeyVersion != 1 { + t.Fatalf("unexpected key version; got: %d, expected: %d", item.KeyVersion, 1) + } + + decReq.Data = map[string]interface{}{ + "ciphertext": item.Ciphertext, + } + resp, err = b.HandleRequest(context.Background(), decReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + if resp.Data["plaintext"] != plaintext { + t.Fatalf("bad: plaintext. Expected: %q, Actual: %q", plaintext, resp.Data["plaintext"]) + } + } +} + +// Case7: Test batch encryption with an upserted derived key +func TestTransit_BatchEncryptionCase7(t *testing.T) { + var resp *logical.Response + var err error + + b, s := createBackendWithStorage(t) + + batchInput := []interface{}{ + map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA==", "context": "dmlzaGFsCg=="}, + map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA==", "context": "dmlzaGFsCg=="}, + } + + batchData := map[string]interface{}{ + "batch_input": batchInput, + } + batchReq := &logical.Request{ + Operation: logical.CreateOperation, + Path: "encrypt/upserted_key", + Storage: s, + Data: batchData, + } + resp, err = b.HandleRequest(context.Background(), batchReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + batchResponseItems := resp.Data["batch_results"].([]EncryptBatchResponseItem) + + decReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "decrypt/upserted_key", + Storage: s, + } + + plaintext := "dGhlIHF1aWNrIGJyb3duIGZveA==" + + for _, item := range batchResponseItems { + if item.KeyVersion != 1 { + t.Fatalf("unexpected key version; got: %d, expected: %d", item.KeyVersion, 1) + } + + decReq.Data = map[string]interface{}{ + "ciphertext": item.Ciphertext, + "context": "dmlzaGFsCg==", + } + resp, err = b.HandleRequest(context.Background(), decReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + if resp.Data["plaintext"] != plaintext { + t.Fatalf("bad: plaintext. Expected: %q, Actual: %q", plaintext, resp.Data["plaintext"]) + } + } +} + +// Case8: If plaintext is not base64 encoded, encryption should fail +func TestTransit_BatchEncryptionCase8(t *testing.T) { + var resp *logical.Response + var err error + + b, s := createBackendWithStorage(t) + + // Create the policy + policyReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "keys/existing_key", + Storage: s, + } + resp, err = b.HandleRequest(context.Background(), policyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + batchInput := []interface{}{ + map[string]interface{}{"plaintext": "simple_plaintext"}, + } + batchData := map[string]interface{}{ + "batch_input": batchInput, + } + batchReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "encrypt/existing_key", + Storage: s, + Data: batchData, + } + resp, err = b.HandleRequest(context.Background(), batchReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + plaintext := "simple plaintext" + + encData := map[string]interface{}{ + "plaintext": plaintext, + } + + encReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "encrypt/existing_key", + Storage: s, + Data: encData, + } + resp, err = b.HandleRequest(context.Background(), encReq) + if err == nil { + t.Fatal("expected an error") + } +} + +// Case9: If both plaintext and batch inputs are supplied, plaintext should be +// ignored. +func TestTransit_BatchEncryptionCase9(t *testing.T) { + var resp *logical.Response + var err error + + b, s := createBackendWithStorage(t) + + batchInput := []interface{}{ + map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA=="}, + map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA=="}, + } + plaintext := "dGhlIHF1aWNrIGJyb3duIGZveA==" + batchData := map[string]interface{}{ + "batch_input": batchInput, + "plaintext": plaintext, + } + batchReq := &logical.Request{ + Operation: logical.CreateOperation, + Path: "encrypt/upserted_key", + Storage: s, + Data: batchData, + } + resp, err = b.HandleRequest(context.Background(), batchReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + _, ok := resp.Data["ciphertext"] + if ok { + t.Fatal("ciphertext field should not be set") + } +} + +// Case10: Inconsistent presence of 'context' in batch input should be caught +func TestTransit_BatchEncryptionCase10(t *testing.T) { + var err error + + b, s := createBackendWithStorage(t) + + batchInput := []interface{}{ + map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA=="}, + map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA==", "context": "dmlzaGFsCg=="}, + } + + batchData := map[string]interface{}{ + "batch_input": batchInput, + } + + batchReq := &logical.Request{ + Operation: logical.CreateOperation, + Path: "encrypt/upserted_key", + Storage: s, + Data: batchData, + } + _, err = b.HandleRequest(context.Background(), batchReq) + if err == nil { + t.Fatalf("expected an error") + } +} + +// Case11: Incorrect inputs for context and nonce should not fail the operation +func TestTransit_BatchEncryptionCase11(t *testing.T) { + var err error + + b, s := createBackendWithStorage(t) + + batchInput := []interface{}{ + map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA==", "context": "dmlzaGFsCg=="}, + map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA==", "context": "not-encoded"}, + } + + batchData := map[string]interface{}{ + "batch_input": batchInput, + } + batchReq := &logical.Request{ + Operation: logical.CreateOperation, + Path: "encrypt/upserted_key", + Storage: s, + Data: batchData, + } + _, err = b.HandleRequest(context.Background(), batchReq) + if err != nil { + t.Fatal(err) + } +} + +// Case12: Invalid batch input +func TestTransit_BatchEncryptionCase12(t *testing.T) { + var err error + b, s := createBackendWithStorage(t) + + batchInput := []interface{}{ + map[string]interface{}{}, + "unexpected_interface", + } + + batchData := map[string]interface{}{ + "batch_input": batchInput, + } + batchReq := &logical.Request{ + Operation: logical.CreateOperation, + Path: "encrypt/upserted_key", + Storage: s, + Data: batchData, + } + _, err = b.HandleRequest(context.Background(), batchReq) + if err == nil { + t.Fatalf("expected an error") + } +} + +// Case13: Incorrect input for nonce when we aren't in convergent encryption should fail the operation +func TestTransit_EncryptionCase13(t *testing.T) { + var err error + + b, s := createBackendWithStorage(t) + + // Non-batch first + data := map[string]interface{}{"plaintext": "bXkgc2VjcmV0IGRhdGE=", "nonce": "R80hr9eNUIuFV52e"} + req := &logical.Request{ + Operation: logical.CreateOperation, + Path: "encrypt/my-key", + Storage: s, + Data: data, + } + resp, err := b.HandleRequest(context.Background(), req) + if err == nil { + t.Fatal("expected invalid request") + } + + batchInput := []interface{}{ + map[string]interface{}{"plaintext": "bXkgc2VjcmV0IGRhdGE=", "nonce": "R80hr9eNUIuFV52e"}, + } + + batchData := map[string]interface{}{ + "batch_input": batchInput, + } + batchReq := &logical.Request{ + Operation: logical.CreateOperation, + Path: "encrypt/my-key", + Storage: s, + Data: batchData, + } + resp, err = b.HandleRequest(context.Background(), batchReq) + if err != nil { + t.Fatal(err) + } + + if v, ok := resp.Data["http_status_code"]; !ok || v.(int) != http.StatusBadRequest { + t.Fatal("expected request error") + } +} + +// Case14: Incorrect input for nonce when we are in convergent version 3 should fail +func TestTransit_EncryptionCase14(t *testing.T) { + var err error + + b, s := createBackendWithStorage(t) + + cReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "keys/my-key", + Storage: s, + Data: map[string]interface{}{ + "convergent_encryption": "true", + "derived": "true", + }, + } + resp, err := b.HandleRequest(context.Background(), cReq) + if err != nil { + t.Fatal(err) + } + + // Non-batch first + data := map[string]interface{}{"plaintext": "bXkgc2VjcmV0IGRhdGE=", "context": "SGVsbG8sIFdvcmxkCg==", "nonce": "R80hr9eNUIuFV52e"} + req := &logical.Request{ + Operation: logical.CreateOperation, + Path: "encrypt/my-key", + Storage: s, + Data: data, + } + + resp, err = b.HandleRequest(context.Background(), req) + if err == nil { + t.Fatal("expected invalid request") + } + + batchInput := []interface{}{ + data, + } + + batchData := map[string]interface{}{ + "batch_input": batchInput, + } + batchReq := &logical.Request{ + Operation: logical.CreateOperation, + Path: "encrypt/my-key", + Storage: s, + Data: batchData, + } + resp, err = b.HandleRequest(context.Background(), batchReq) + if err != nil { + t.Fatal(err) + } + + if v, ok := resp.Data["http_status_code"]; !ok || v.(int) != http.StatusBadRequest { + t.Fatal("expected request error") + } +} + +// Test that the fast path function decodeBatchRequestItems behave like mapstructure.Decode() to decode []BatchRequestItem. +func TestTransit_decodeBatchRequestItems(t *testing.T) { + tests := []struct { + name string + src interface{} + requirePlaintext bool + requireCiphertext bool + dest []BatchRequestItem + wantErrContains string + }{ + // basic edge cases of nil values + {name: "nil-nil", src: nil, dest: nil}, + {name: "nil-empty", src: nil, dest: []BatchRequestItem{}}, + {name: "empty-nil", src: []interface{}{}, dest: nil}, + { + name: "src-nil", + src: []interface{}{map[string]interface{}{}}, + dest: nil, + }, + // empty src & dest + { + name: "src-dest", + src: []interface{}{map[string]interface{}{}}, + dest: []BatchRequestItem{}, + }, + // empty src but with already populated dest, mapstructure discard pre-populated data. + { + name: "src-dest_pre_filled", + src: []interface{}{map[string]interface{}{}}, + dest: []BatchRequestItem{{}}, + }, + // two test per properties to test valid and invalid input + { + name: "src_plaintext-dest", + src: []interface{}{map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA=="}}, + dest: []BatchRequestItem{}, + }, + { + name: "src_plaintext_invalid-dest", + src: []interface{}{map[string]interface{}{"plaintext": 666}}, + dest: []BatchRequestItem{}, + wantErrContains: "expected type 'string', got unconvertible type 'int'", + }, + { + name: "src_ciphertext-dest", + src: []interface{}{map[string]interface{}{"ciphertext": "dGhlIHF1aWNrIGJyb3duIGZveA=="}}, + dest: []BatchRequestItem{}, + }, + { + name: "src_ciphertext_invalid-dest", + src: []interface{}{map[string]interface{}{"ciphertext": 666}}, + dest: []BatchRequestItem{}, + wantErrContains: "expected type 'string', got unconvertible type 'int'", + }, + { + name: "src_key_version-dest", + src: []interface{}{map[string]interface{}{"key_version": 1}}, + dest: []BatchRequestItem{}, + }, + { + name: "src_key_version_invalid-dest", + src: []interface{}{map[string]interface{}{"key_version": "666"}}, + dest: []BatchRequestItem{}, + wantErrContains: "expected type 'int', got unconvertible type 'string'", + }, + { + name: "src_key_version_invalid-number-dest", + src: []interface{}{map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA==", "key_version": json.Number("1.1")}}, + dest: []BatchRequestItem{}, + wantErrContains: "error decoding json.Number into [0].key_version", + }, + { + name: "src_nonce-dest", + src: []interface{}{map[string]interface{}{"nonce": "dGVzdGNvbnRleHQ="}}, + dest: []BatchRequestItem{}, + }, + { + name: "src_nonce_invalid-dest", + src: []interface{}{map[string]interface{}{"nonce": 666}}, + dest: []BatchRequestItem{}, + wantErrContains: "expected type 'string', got unconvertible type 'int'", + }, + { + name: "src_context-dest", + src: []interface{}{map[string]interface{}{"context": "dGVzdGNvbnRleHQ="}}, + dest: []BatchRequestItem{}, + }, + { + name: "src_context_invalid-dest", + src: []interface{}{map[string]interface{}{"context": 666}}, + dest: []BatchRequestItem{}, + wantErrContains: "expected type 'string', got unconvertible type 'int'", + }, + { + name: "src_multi_order-dest", + src: []interface{}{ + map[string]interface{}{"context": "1"}, + map[string]interface{}{"context": "2"}, + map[string]interface{}{"context": "3"}, + }, + dest: []BatchRequestItem{}, + }, + { + name: "src_multi_with_invalid-dest", + src: []interface{}{ + map[string]interface{}{"context": "1"}, + map[string]interface{}{"context": "2", "key_version": "666"}, + map[string]interface{}{"context": "3"}, + }, + dest: []BatchRequestItem{}, + wantErrContains: "expected type 'int', got unconvertible type 'string'", + }, + { + name: "src_multi_with_multi_invalid-dest", + src: []interface{}{ + map[string]interface{}{"context": "1"}, + map[string]interface{}{"context": "2", "key_version": "666"}, + map[string]interface{}{"context": "3", "key_version": "1337"}, + }, + dest: []BatchRequestItem{}, + wantErrContains: "expected type 'int', got unconvertible type 'string'", + }, + { + name: "src_plaintext-nil-nonce", + src: []interface{}{map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA==", "nonce": "null"}}, + dest: []BatchRequestItem{}, + }, + // required fields + { + name: "required_plaintext_present", + src: []interface{}{map[string]interface{}{"plaintext": ""}}, + requirePlaintext: true, + dest: []BatchRequestItem{}, + }, + { + name: "required_plaintext_missing", + src: []interface{}{map[string]interface{}{}}, + requirePlaintext: true, + dest: []BatchRequestItem{}, + wantErrContains: "missing plaintext", + }, + { + name: "required_ciphertext_present", + src: []interface{}{map[string]interface{}{"ciphertext": "dGhlIHF1aWNrIGJyb3duIGZveA=="}}, + requireCiphertext: true, + dest: []BatchRequestItem{}, + }, + { + name: "required_ciphertext_missing", + src: []interface{}{map[string]interface{}{}}, + requireCiphertext: true, + dest: []BatchRequestItem{}, + wantErrContains: "missing ciphertext", + }, + { + name: "required_plaintext_and_ciphertext_missing", + src: []interface{}{map[string]interface{}{}}, + requirePlaintext: true, + requireCiphertext: true, + dest: []BatchRequestItem{}, + wantErrContains: "missing ciphertext", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + expectedDest := append(tt.dest[:0:0], tt.dest...) // copy of the dest state + expectedErr := mapstructure.Decode(tt.src, &expectedDest) != nil || tt.wantErrContains != "" + + gotErr := decodeBatchRequestItems(tt.src, tt.requirePlaintext, tt.requireCiphertext, &tt.dest) + gotDest := tt.dest + + if expectedErr { + if gotErr == nil { + t.Fatal("decodeBatchRequestItems unexpected error value; expected error but got none") + } + if tt.wantErrContains == "" { + t.Fatal("missing error condition") + } + if !strings.Contains(gotErr.Error(), tt.wantErrContains) { + t.Errorf("decodeBatchRequestItems unexpected error value, want err contains: '%v', got: '%v'", tt.wantErrContains, gotErr) + } + } + + if !reflect.DeepEqual(expectedDest, gotDest) { + t.Errorf("decodeBatchRequestItems unexpected dest value, want: '%v', got: '%v'", expectedDest, gotDest) + } + }) + } +} + +func TestShouldWarnAboutNonceUsage(t *testing.T) { + tests := []struct { + name string + keyTypes []keysutil.KeyType + nonce []byte + convergentEncryption bool + convergentVersion int + expected bool + }{ + { + name: "-NoConvergent-WithNonce", + keyTypes: []keysutil.KeyType{keysutil.KeyType_AES256_GCM96, keysutil.KeyType_AES128_GCM96, keysutil.KeyType_ChaCha20_Poly1305}, + nonce: []byte("testnonce"), + convergentEncryption: false, + convergentVersion: -1, + expected: true, + }, + { + name: "-NoConvergent-NoNonce", + keyTypes: []keysutil.KeyType{keysutil.KeyType_AES256_GCM96, keysutil.KeyType_AES128_GCM96, keysutil.KeyType_ChaCha20_Poly1305}, + nonce: []byte{}, + convergentEncryption: false, + convergentVersion: -1, + expected: false, + }, + { + name: "-Convergentv1-WithNonce", + keyTypes: []keysutil.KeyType{keysutil.KeyType_AES256_GCM96, keysutil.KeyType_AES128_GCM96, keysutil.KeyType_ChaCha20_Poly1305}, + nonce: []byte("testnonce"), + convergentEncryption: true, + convergentVersion: 1, + expected: true, + }, + { + name: "-Convergentv2-WithNonce", + keyTypes: []keysutil.KeyType{keysutil.KeyType_AES256_GCM96, keysutil.KeyType_AES128_GCM96, keysutil.KeyType_ChaCha20_Poly1305}, + nonce: []byte("testnonce"), + convergentEncryption: true, + convergentVersion: 2, + expected: false, + }, + { + name: "-Convergentv3-WithNonce", + keyTypes: []keysutil.KeyType{keysutil.KeyType_AES256_GCM96, keysutil.KeyType_AES128_GCM96, keysutil.KeyType_ChaCha20_Poly1305}, + nonce: []byte("testnonce"), + convergentEncryption: true, + convergentVersion: 3, + expected: false, + }, + { + name: "-NoConvergent-WithNonce", + keyTypes: []keysutil.KeyType{keysutil.KeyType_RSA2048, keysutil.KeyType_RSA4096}, + nonce: []byte("testnonce"), + convergentEncryption: false, + convergentVersion: -1, + expected: false, + }, + } + + for _, tt := range tests { + for _, keyType := range tt.keyTypes { + testName := keyType.String() + tt.name + t.Run(testName, func(t *testing.T) { + p := keysutil.Policy{ + ConvergentEncryption: tt.convergentEncryption, + ConvergentVersion: tt.convergentVersion, + Type: keyType, + } + + actual := shouldWarnAboutNonceUsage(&p, tt.nonce) + + if actual != tt.expected { + t.Errorf("Expected actual '%v' but got '%v'", tt.expected, actual) + } + }) + } + } +} + +func TestTransit_EncryptWithRSAPublicKey(t *testing.T) { + generateKeys(t) + b, s := createBackendWithStorage(t) + keyType := "rsa-2048" + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + + // Get key + privateKey := getKey(t, keyType) + publicKeyBytes, err := getPublicKey(privateKey, keyType) + if err != nil { + t.Fatal(err) + } + + // Import key + req := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", keyID), + Data: map[string]interface{}{ + "public_key": publicKeyBytes, + "type": keyType, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to import public key: %s", err) + } + + req = &logical.Request{ + Operation: logical.CreateOperation, + Path: fmt.Sprintf("encrypt/%s", keyID), + Storage: s, + Data: map[string]interface{}{ + "plaintext": "bXkgc2VjcmV0IGRhdGE=", + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } +} diff --git a/builtin/logical/transit/path_export.go b/builtin/logical/transit/path_export.go new file mode 100644 index 0000000..6cfd7d0 --- /dev/null +++ b/builtin/logical/transit/path_export.go @@ -0,0 +1,368 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "errors" + "fmt" + "strconv" + "strings" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/keysutil" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + exportTypeEncryptionKey = "encryption-key" + exportTypeSigningKey = "signing-key" + exportTypeHMACKey = "hmac-key" + exportTypePublicKey = "public-key" +) + +func (b *backend) pathExportKeys() *framework.Path { + return &framework.Path{ + Pattern: "export/" + framework.GenericNameRegex("type") + "/" + framework.GenericNameRegex("name") + framework.OptionalParamRegex("version"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "export", + OperationSuffix: "key|key-version", + }, + + Fields: map[string]*framework.FieldSchema{ + "type": { + Type: framework.TypeString, + Description: "Type of key to export (encryption-key, signing-key, hmac-key, public-key)", + }, + "name": { + Type: framework.TypeString, + Description: "Name of the key", + }, + "version": { + Type: framework.TypeString, + Description: "Version of the key", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathPolicyExportRead, + }, + + HelpSynopsis: pathExportHelpSyn, + HelpDescription: pathExportHelpDesc, + } +} + +func (b *backend) pathPolicyExportRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + exportType := d.Get("type").(string) + name := d.Get("name").(string) + version := d.Get("version").(string) + + switch exportType { + case exportTypeEncryptionKey: + case exportTypeSigningKey: + case exportTypeHMACKey: + case exportTypePublicKey: + default: + return logical.ErrorResponse(fmt.Sprintf("invalid export type: %s", exportType)), logical.ErrInvalidRequest + } + + p, _, err := b.GetPolicy(ctx, keysutil.PolicyRequest{ + Storage: req.Storage, + Name: name, + }, b.GetRandomReader()) + if err != nil { + return nil, err + } + if p == nil { + return nil, nil + } + if !b.System().CachingDisabled() { + p.Lock(false) + } + defer p.Unlock() + + if !p.Exportable && exportType != exportTypePublicKey { + return logical.ErrorResponse("private key material is not exportable"), nil + } + + switch exportType { + case exportTypeEncryptionKey: + if !p.Type.EncryptionSupported() { + return logical.ErrorResponse("encryption not supported for the key"), logical.ErrInvalidRequest + } + case exportTypeSigningKey: + if !p.Type.SigningSupported() { + return logical.ErrorResponse("signing not supported for the key"), logical.ErrInvalidRequest + } + } + + retKeys := map[string]string{} + switch version { + case "": + for k, v := range p.Keys { + exportKey, err := getExportKey(p, &v, exportType) + if err != nil { + return nil, err + } + retKeys[k] = exportKey + } + + default: + var versionValue int + if version == "latest" { + versionValue = p.LatestVersion + } else { + version = strings.TrimPrefix(version, "v") + versionValue, err = strconv.Atoi(version) + if err != nil { + return logical.ErrorResponse("invalid key version"), logical.ErrInvalidRequest + } + } + + if versionValue < p.MinDecryptionVersion { + return logical.ErrorResponse("version for export is below minimum decryption version"), logical.ErrInvalidRequest + } + key, ok := p.Keys[strconv.Itoa(versionValue)] + if !ok { + return logical.ErrorResponse("version does not exist or cannot be found"), logical.ErrInvalidRequest + } + + exportKey, err := getExportKey(p, &key, exportType) + if err != nil { + return nil, err + } + + retKeys[strconv.Itoa(versionValue)] = exportKey + } + + resp := &logical.Response{ + Data: map[string]interface{}{ + "name": p.Name, + "type": p.Type.String(), + "keys": retKeys, + }, + } + + return resp, nil +} + +func getExportKey(policy *keysutil.Policy, key *keysutil.KeyEntry, exportType string) (string, error) { + if policy == nil { + return "", errors.New("nil policy provided") + } + + switch exportType { + case exportTypeHMACKey: + src := key.HMACKey + if policy.Type == keysutil.KeyType_HMAC { + src = key.Key + } + return strings.TrimSpace(base64.StdEncoding.EncodeToString(src)), nil + + case exportTypeEncryptionKey: + switch policy.Type { + case keysutil.KeyType_AES128_GCM96, keysutil.KeyType_AES256_GCM96, keysutil.KeyType_ChaCha20_Poly1305: + return strings.TrimSpace(base64.StdEncoding.EncodeToString(key.Key)), nil + + case keysutil.KeyType_RSA2048, keysutil.KeyType_RSA3072, keysutil.KeyType_RSA4096: + rsaKey, err := encodeRSAPrivateKey(key) + if err != nil { + return "", err + } + return rsaKey, nil + } + + case exportTypeSigningKey: + switch policy.Type { + case keysutil.KeyType_ECDSA_P256, keysutil.KeyType_ECDSA_P384, keysutil.KeyType_ECDSA_P521: + var curve elliptic.Curve + switch policy.Type { + case keysutil.KeyType_ECDSA_P384: + curve = elliptic.P384() + case keysutil.KeyType_ECDSA_P521: + curve = elliptic.P521() + default: + curve = elliptic.P256() + } + ecKey, err := keyEntryToECPrivateKey(key, curve) + if err != nil { + return "", err + } + return ecKey, nil + + case keysutil.KeyType_ED25519: + if len(key.Key) == 0 { + return "", nil + } + + return strings.TrimSpace(base64.StdEncoding.EncodeToString(key.Key)), nil + + case keysutil.KeyType_RSA2048, keysutil.KeyType_RSA3072, keysutil.KeyType_RSA4096: + rsaKey, err := encodeRSAPrivateKey(key) + if err != nil { + return "", err + } + return rsaKey, nil + } + case exportTypePublicKey: + switch policy.Type { + case keysutil.KeyType_ECDSA_P256, keysutil.KeyType_ECDSA_P384, keysutil.KeyType_ECDSA_P521: + var curve elliptic.Curve + switch policy.Type { + case keysutil.KeyType_ECDSA_P384: + curve = elliptic.P384() + case keysutil.KeyType_ECDSA_P521: + curve = elliptic.P521() + default: + curve = elliptic.P256() + } + ecKey, err := keyEntryToECPublicKey(key, curve) + if err != nil { + return "", err + } + return ecKey, nil + + case keysutil.KeyType_ED25519: + return strings.TrimSpace(key.FormattedPublicKey), nil + + case keysutil.KeyType_RSA2048, keysutil.KeyType_RSA3072, keysutil.KeyType_RSA4096: + rsaKey, err := encodeRSAPublicKey(key) + if err != nil { + return "", err + } + return rsaKey, nil + } + } + + return "", fmt.Errorf("unknown key type %v for export type %v", policy.Type, exportType) +} + +func encodeRSAPrivateKey(key *keysutil.KeyEntry) (string, error) { + if key == nil { + return "", errors.New("nil KeyEntry provided") + } + + if key.IsPrivateKeyMissing() { + return "", nil + } + + // When encoding PKCS1, the PEM header should be `RSA PRIVATE KEY`. When Go + // has PKCS8 encoding support, we may want to change this. + blockType := "RSA PRIVATE KEY" + derBytes := x509.MarshalPKCS1PrivateKey(key.RSAKey) + pemBlock := pem.Block{ + Type: blockType, + Bytes: derBytes, + } + + pemBytes := pem.EncodeToMemory(&pemBlock) + return string(pemBytes), nil +} + +func encodeRSAPublicKey(key *keysutil.KeyEntry) (string, error) { + if key == nil { + return "", errors.New("nil KeyEntry provided") + } + + var publicKey crypto.PublicKey + publicKey = key.RSAPublicKey + if key.RSAKey != nil { + // Prefer the private key if it exists + publicKey = key.RSAKey.Public() + } + + if publicKey == nil { + return "", errors.New("requested to encode an RSA public key with no RSA key present") + } + + // Encode the RSA public key in PEM format to return over the API + derBytes, err := x509.MarshalPKIXPublicKey(publicKey) + if err != nil { + return "", fmt.Errorf("error marshaling RSA public key: %w", err) + } + pemBlock := &pem.Block{ + Type: "PUBLIC KEY", + Bytes: derBytes, + } + pemBytes := pem.EncodeToMemory(pemBlock) + if pemBytes == nil || len(pemBytes) == 0 { + return "", fmt.Errorf("failed to PEM-encode RSA public key") + } + + return string(pemBytes), nil +} + +func keyEntryToECPrivateKey(k *keysutil.KeyEntry, curve elliptic.Curve) (string, error) { + if k == nil { + return "", errors.New("nil KeyEntry provided") + } + + if k.IsPrivateKeyMissing() { + return "", nil + } + + pubKey := ecdsa.PublicKey{ + Curve: curve, + X: k.EC_X, + Y: k.EC_Y, + } + + blockType := "EC PRIVATE KEY" + privKey := &ecdsa.PrivateKey{ + PublicKey: pubKey, + D: k.EC_D, + } + derBytes, err := x509.MarshalECPrivateKey(privKey) + if err != nil { + return "", err + } + + pemBlock := pem.Block{ + Type: blockType, + Bytes: derBytes, + } + + return strings.TrimSpace(string(pem.EncodeToMemory(&pemBlock))), nil +} + +func keyEntryToECPublicKey(k *keysutil.KeyEntry, curve elliptic.Curve) (string, error) { + if k == nil { + return "", errors.New("nil KeyEntry provided") + } + + pubKey := ecdsa.PublicKey{ + Curve: curve, + X: k.EC_X, + Y: k.EC_Y, + } + + blockType := "PUBLIC KEY" + derBytes, err := x509.MarshalPKIXPublicKey(&pubKey) + if err != nil { + return "", err + } + + pemBlock := pem.Block{ + Type: blockType, + Bytes: derBytes, + } + + return strings.TrimSpace(string(pem.EncodeToMemory(&pemBlock))), nil +} + +const pathExportHelpSyn = `Export named encryption or signing key` + +const pathExportHelpDesc = ` +This path is used to export the named keys that are configured as +exportable. +` diff --git a/builtin/logical/transit/path_export_test.go b/builtin/logical/transit/path_export_test.go new file mode 100644 index 0000000..5eb6eea --- /dev/null +++ b/builtin/logical/transit/path_export_test.go @@ -0,0 +1,489 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package transit + +import ( + "context" + "fmt" + "reflect" + "strconv" + "strings" + "testing" + + "github.com/hashicorp/vault/sdk/logical" +) + +func TestTransit_Export_Unknown_ExportType(t *testing.T) { + t.Parallel() + + b, storage := createBackendWithSysView(t) + keyType := "ed25519" + req := &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "keys/foo", + Data: map[string]interface{}{ + "exportable": true, + "type": keyType, + }, + } + _, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed creating key %s: %v", keyType, err) + } + + req = &logical.Request{ + Storage: storage, + Operation: logical.ReadOperation, + Path: "export/bad-export-type/foo", + } + rsp, err := b.HandleRequest(context.Background(), req) + if err == nil { + t.Fatalf("did not error on bad export type got: %v", rsp) + } + if rsp == nil || !rsp.IsError() { + t.Fatalf("response did not contain an error on bad export type got: %v", rsp) + } + if !strings.Contains(rsp.Error().Error(), "invalid export type") { + t.Fatalf("failed with unexpected error: %v", err) + } +} + +func TestTransit_Export_KeyVersion_ExportsCorrectVersion(t *testing.T) { + t.Parallel() + + verifyExportsCorrectVersion(t, "encryption-key", "aes128-gcm96") + verifyExportsCorrectVersion(t, "encryption-key", "aes256-gcm96") + verifyExportsCorrectVersion(t, "encryption-key", "chacha20-poly1305") + verifyExportsCorrectVersion(t, "encryption-key", "rsa-2048") + verifyExportsCorrectVersion(t, "encryption-key", "rsa-3072") + verifyExportsCorrectVersion(t, "encryption-key", "rsa-4096") + verifyExportsCorrectVersion(t, "signing-key", "ecdsa-p256") + verifyExportsCorrectVersion(t, "signing-key", "ecdsa-p384") + verifyExportsCorrectVersion(t, "signing-key", "ecdsa-p521") + verifyExportsCorrectVersion(t, "signing-key", "ed25519") + verifyExportsCorrectVersion(t, "signing-key", "rsa-2048") + verifyExportsCorrectVersion(t, "signing-key", "rsa-3072") + verifyExportsCorrectVersion(t, "signing-key", "rsa-4096") + verifyExportsCorrectVersion(t, "hmac-key", "aes128-gcm96") + verifyExportsCorrectVersion(t, "hmac-key", "aes256-gcm96") + verifyExportsCorrectVersion(t, "hmac-key", "chacha20-poly1305") + verifyExportsCorrectVersion(t, "hmac-key", "ecdsa-p256") + verifyExportsCorrectVersion(t, "hmac-key", "ecdsa-p384") + verifyExportsCorrectVersion(t, "hmac-key", "ecdsa-p521") + verifyExportsCorrectVersion(t, "hmac-key", "ed25519") + verifyExportsCorrectVersion(t, "hmac-key", "hmac") + verifyExportsCorrectVersion(t, "public-key", "rsa-2048") + verifyExportsCorrectVersion(t, "public-key", "rsa-3072") + verifyExportsCorrectVersion(t, "public-key", "rsa-4096") + verifyExportsCorrectVersion(t, "public-key", "ecdsa-p256") + verifyExportsCorrectVersion(t, "public-key", "ecdsa-p384") + verifyExportsCorrectVersion(t, "public-key", "ecdsa-p521") + verifyExportsCorrectVersion(t, "public-key", "ed25519") +} + +func verifyExportsCorrectVersion(t *testing.T, exportType, keyType string) { + b, storage := createBackendWithSysView(t) + + // First create a key, v1 + req := &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "keys/foo", + } + req.Data = map[string]interface{}{ + "exportable": true, + "type": keyType, + } + if keyType == "hmac" { + req.Data["key_size"] = 32 + } + _, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + verifyVersion := func(versionRequest string, expectedVersion int) { + req := &logical.Request{ + Storage: storage, + Operation: logical.ReadOperation, + Path: fmt.Sprintf("export/%s/foo/%s", exportType, versionRequest), + } + rsp, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + typRaw, ok := rsp.Data["type"] + if !ok { + t.Fatal("no type returned from export") + } + typ, ok := typRaw.(string) + if !ok { + t.Fatalf("could not find key type, resp data is %#v", rsp.Data) + } + if typ != keyType { + t.Fatalf("key type mismatch; %q vs %q", typ, keyType) + } + + keysRaw, ok := rsp.Data["keys"] + if !ok { + t.Fatal("could not find keys value") + } + keys, ok := keysRaw.(map[string]string) + if !ok { + t.Fatal("could not cast to keys map") + } + if len(keys) != 1 { + t.Fatal("unexpected number of keys found") + } + + for k := range keys { + if k != strconv.Itoa(expectedVersion) { + t.Fatalf("expected version %q, received version %q", strconv.Itoa(expectedVersion), k) + } + } + } + + verifyVersion("v1", 1) + verifyVersion("1", 1) + verifyVersion("latest", 1) + + req.Path = "keys/foo/rotate" + // v2 + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + verifyVersion("v1", 1) + verifyVersion("1", 1) + verifyVersion("v2", 2) + verifyVersion("2", 2) + verifyVersion("latest", 2) + + // v3 + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + verifyVersion("v1", 1) + verifyVersion("1", 1) + verifyVersion("v3", 3) + verifyVersion("3", 3) + verifyVersion("latest", 3) +} + +func TestTransit_Export_ValidVersionsOnly(t *testing.T) { + t.Parallel() + + b, storage := createBackendWithSysView(t) + + // First create a key, v1 + req := &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "keys/foo", + } + req.Data = map[string]interface{}{ + "exportable": true, + } + _, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + req.Path = "keys/foo/rotate" + // v2 + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + // v3 + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + verifyExport := func(validVersions []int) { + req = &logical.Request{ + Storage: storage, + Operation: logical.ReadOperation, + Path: "export/encryption-key/foo", + } + rsp, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if _, ok := rsp.Data["keys"]; !ok { + t.Error("no keys returned from export") + } + + keys, ok := rsp.Data["keys"].(map[string]string) + if !ok { + t.Error("could not cast to keys object") + } + if len(keys) != len(validVersions) { + t.Errorf("expected %d key count, received %d", len(validVersions), len(keys)) + } + for _, version := range validVersions { + if _, ok := keys[strconv.Itoa(version)]; !ok { + t.Errorf("expecting to find key version %d, not found", version) + } + } + } + + verifyExport([]int{1, 2, 3}) + + req = &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "keys/foo/config", + } + req.Data = map[string]interface{}{ + "min_decryption_version": 3, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + verifyExport([]int{3}) + + req = &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "keys/foo/config", + } + req.Data = map[string]interface{}{ + "min_decryption_version": 2, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + verifyExport([]int{2, 3}) + + req = &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "keys/foo/rotate", + } + // v4 + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + verifyExport([]int{2, 3, 4}) +} + +func TestTransit_Export_KeysNotMarkedExportable_ReturnsError(t *testing.T) { + t.Parallel() + + b, storage := createBackendWithSysView(t) + + req := &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "keys/foo", + } + req.Data = map[string]interface{}{ + "exportable": false, + } + _, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + req = &logical.Request{ + Storage: storage, + Operation: logical.ReadOperation, + Path: "export/encryption-key/foo", + } + rsp, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + if !rsp.IsError() { + t.Fatal("Key not marked as exportable but was exported.") + } +} + +func TestTransit_Export_SigningDoesNotSupportSigning_ReturnsError(t *testing.T) { + t.Parallel() + + b, storage := createBackendWithSysView(t) + + req := &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "keys/foo", + } + req.Data = map[string]interface{}{ + "exportable": true, + "type": "aes256-gcm96", + } + _, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + req = &logical.Request{ + Storage: storage, + Operation: logical.ReadOperation, + Path: "export/signing-key/foo", + } + _, err = b.HandleRequest(context.Background(), req) + if err == nil { + t.Fatal("Key does not support signing but was exported without error.") + } +} + +func TestTransit_Export_EncryptionDoesNotSupportEncryption_ReturnsError(t *testing.T) { + t.Parallel() + + testTransit_Export_EncryptionDoesNotSupportEncryption_ReturnsError(t, "ecdsa-p256") + testTransit_Export_EncryptionDoesNotSupportEncryption_ReturnsError(t, "ecdsa-p384") + testTransit_Export_EncryptionDoesNotSupportEncryption_ReturnsError(t, "ecdsa-p521") + testTransit_Export_EncryptionDoesNotSupportEncryption_ReturnsError(t, "ed25519") +} + +func testTransit_Export_EncryptionDoesNotSupportEncryption_ReturnsError(t *testing.T, keyType string) { + b, storage := createBackendWithSysView(t) + + req := &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "keys/foo", + } + req.Data = map[string]interface{}{ + "exportable": true, + "type": keyType, + } + _, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + req = &logical.Request{ + Storage: storage, + Operation: logical.ReadOperation, + Path: "export/encryption-key/foo", + } + _, err = b.HandleRequest(context.Background(), req) + if err == nil { + t.Fatalf("Key %s does not support encryption but was exported without error.", keyType) + } +} + +func TestTransit_Export_PublicKeyDoesNotSupportEncryption_ReturnsError(t *testing.T) { + t.Parallel() + + testTransit_Export_PublicKeyNotSupported_ReturnsError(t, "chacha20-poly1305") + testTransit_Export_PublicKeyNotSupported_ReturnsError(t, "aes128-gcm96") + testTransit_Export_PublicKeyNotSupported_ReturnsError(t, "aes256-gcm96") + testTransit_Export_PublicKeyNotSupported_ReturnsError(t, "hmac") +} + +func testTransit_Export_PublicKeyNotSupported_ReturnsError(t *testing.T, keyType string) { + b, storage := createBackendWithSysView(t) + + req := &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "keys/foo", + Data: map[string]interface{}{ + "type": keyType, + }, + } + if keyType == "hmac" { + req.Data["key_size"] = 32 + } + _, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed creating key %s: %v", keyType, err) + } + + req = &logical.Request{ + Storage: storage, + Operation: logical.ReadOperation, + Path: "export/public-key/foo", + } + _, err = b.HandleRequest(context.Background(), req) + if err == nil { + t.Fatalf("Key %s does not support public key exporting but was exported without error.", keyType) + } + if !strings.Contains(err.Error(), fmt.Sprintf("unknown key type %s for export type public-key", keyType)) { + t.Fatalf("unexpected error value for key type: %s: %v", keyType, err) + } +} + +func TestTransit_Export_KeysDoesNotExist_ReturnsNotFound(t *testing.T) { + t.Parallel() + + b, storage := createBackendWithSysView(t) + + req := &logical.Request{ + Storage: storage, + Operation: logical.ReadOperation, + Path: "export/encryption-key/foo", + } + rsp, err := b.HandleRequest(context.Background(), req) + + if !(rsp == nil && err == nil) { + t.Fatal("Key does not exist but does not return not found") + } +} + +func TestTransit_Export_EncryptionKey_DoesNotExportHMACKey(t *testing.T) { + t.Parallel() + + b, storage := createBackendWithSysView(t) + + req := &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "keys/foo", + } + req.Data = map[string]interface{}{ + "exportable": true, + "type": "aes256-gcm96", + } + _, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + req = &logical.Request{ + Storage: storage, + Operation: logical.ReadOperation, + Path: "export/encryption-key/foo", + } + encryptionKeyRsp, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + req.Path = "export/hmac-key/foo" + hmacKeyRsp, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + encryptionKeys, ok := encryptionKeyRsp.Data["keys"].(map[string]string) + if !ok { + t.Error("could not cast to keys object") + } + hmacKeys, ok := hmacKeyRsp.Data["keys"].(map[string]string) + if !ok { + t.Error("could not cast to keys object") + } + if len(hmacKeys) != len(encryptionKeys) { + t.Errorf("hmac (%d) and encryption (%d) key count don't match", + len(hmacKeys), len(encryptionKeys)) + } + + if reflect.DeepEqual(encryptionKeyRsp.Data, hmacKeyRsp.Data) { + t.Fatal("Encryption key data matched hmac key data") + } +} diff --git a/builtin/logical/transit/path_hash.go b/builtin/logical/transit/path_hash.go new file mode 100644 index 0000000..ecf619a --- /dev/null +++ b/builtin/logical/transit/path_hash.go @@ -0,0 +1,148 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit + +import ( + "context" + "crypto/sha256" + "crypto/sha512" + "encoding/base64" + "encoding/hex" + "fmt" + "hash" + + "golang.org/x/crypto/sha3" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func (b *backend) pathHash() *framework.Path { + return &framework.Path{ + Pattern: "hash" + framework.OptionalParamRegex("urlalgorithm"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "hash", + OperationSuffix: "|with-algorithm", + }, + + Fields: map[string]*framework.FieldSchema{ + "input": { + Type: framework.TypeString, + Description: "The base64-encoded input data", + }, + + "algorithm": { + Type: framework.TypeString, + Default: "sha2-256", + Description: `Algorithm to use (POST body parameter). Valid values are: + +* sha2-224 +* sha2-256 +* sha2-384 +* sha2-512 +* sha3-224 +* sha3-256 +* sha3-384 +* sha3-512 + +Defaults to "sha2-256".`, + }, + + "urlalgorithm": { + Type: framework.TypeString, + Description: `Algorithm to use (POST URL parameter)`, + }, + + "format": { + Type: framework.TypeString, + Default: "hex", + Description: `Encoding format to use. Can be "hex" or "base64". Defaults to "hex".`, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathHashWrite, + }, + + HelpSynopsis: pathHashHelpSyn, + HelpDescription: pathHashHelpDesc, + } +} + +func (b *backend) pathHashWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + rawInput, ok, err := d.GetOkErr("input") + if err != nil { + return nil, err + } + if !ok { + return logical.ErrorResponse("input missing"), logical.ErrInvalidRequest + } + + inputB64 := rawInput.(string) + format := d.Get("format").(string) + algorithm := d.Get("urlalgorithm").(string) + if algorithm == "" { + algorithm = d.Get("algorithm").(string) + } + + input, err := base64.StdEncoding.DecodeString(inputB64) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("unable to decode input as base64: %s", err)), logical.ErrInvalidRequest + } + + switch format { + case "hex": + case "base64": + default: + return logical.ErrorResponse(fmt.Sprintf("unsupported encoding format %s; must be \"hex\" or \"base64\"", format)), nil + } + + var hf hash.Hash + switch algorithm { + case "sha2-224": + hf = sha256.New224() + case "sha2-256": + hf = sha256.New() + case "sha2-384": + hf = sha512.New384() + case "sha2-512": + hf = sha512.New() + case "sha3-224": + hf = sha3.New224() + case "sha3-256": + hf = sha3.New256() + case "sha3-384": + hf = sha3.New384() + case "sha3-512": + hf = sha3.New512() + default: + return logical.ErrorResponse(fmt.Sprintf("unsupported algorithm %s", algorithm)), nil + } + hf.Write(input) + retBytes := hf.Sum(nil) + + var retStr string + switch format { + case "hex": + retStr = hex.EncodeToString(retBytes) + case "base64": + retStr = base64.StdEncoding.EncodeToString(retBytes) + } + + // Generate the response + resp := &logical.Response{ + Data: map[string]interface{}{ + "sum": retStr, + }, + } + return resp, nil +} + +const pathHashHelpSyn = `Generate a hash sum for input data` + +const pathHashHelpDesc = ` +Generates a hash sum of the given algorithm against the given input data. +` diff --git a/builtin/logical/transit/path_hash_test.go b/builtin/logical/transit/path_hash_test.go new file mode 100644 index 0000000..084012d --- /dev/null +++ b/builtin/logical/transit/path_hash_test.go @@ -0,0 +1,106 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit + +import ( + "context" + "testing" + + "github.com/hashicorp/vault/sdk/logical" +) + +func TestTransit_Hash(t *testing.T) { + b, storage := createBackendWithSysView(t) + + req := &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "hash", + Data: map[string]interface{}{ + "input": "dGhlIHF1aWNrIGJyb3duIGZveA==", + }, + } + + doRequest := func(req *logical.Request, errExpected bool, expected string) { + resp, err := b.HandleRequest(context.Background(), req) + if err != nil && !errExpected { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if errExpected { + if !resp.IsError() { + t.Fatalf("bad: did not get error response: %#v", *resp) + } + return + } + if resp.IsError() { + t.Fatalf("bad: got error response: %#v", *resp) + } + sum, ok := resp.Data["sum"] + if !ok { + t.Fatal("no sum key found in returned data") + } + if sum.(string) != expected { + t.Fatal("mismatched hashes") + } + } + + // Test defaults -- sha2-256 + doRequest(req, false, "9ecb36561341d18eb65484e833efea61edc74b84cf5e6ae1b81c63533e25fc8f") + + // Test algorithm selection in the path + req.Path = "hash/sha2-224" + doRequest(req, false, "ea074a96cabc5a61f8298a2c470f019074642631a49e1c5e2f560865") + + // Reset and test algorithm selection in the data + req.Path = "hash" + req.Data["algorithm"] = "sha2-224" + doRequest(req, false, "ea074a96cabc5a61f8298a2c470f019074642631a49e1c5e2f560865") + + req.Data["algorithm"] = "sha2-384" + doRequest(req, false, "15af9ec8be783f25c583626e9491dbf129dd6dd620466fdf05b3a1d0bb8381d30f4d3ec29f923ff1e09a0f6b337365a6") + + req.Data["algorithm"] = "sha2-512" + doRequest(req, false, "d9d380f29b97ad6a1d92e987d83fa5a02653301e1006dd2bcd51afa59a9147e9caedaf89521abc0f0b682adcd47fb512b8343c834a32f326fe9bef00542ce887") + + // Test returning as base64 + req.Data["format"] = "base64" + doRequest(req, false, "2dOA8puXrWodkumH2D+loCZTMB4QBt0rzVGvpZqRR+nK7a+JUhq8DwtoKtzUf7USuDQ8g0oy8yb+m+8AVCzohw==") + + // Test SHA3 + req.Data["format"] = "hex" + req.Data["algorithm"] = "sha3-224" + doRequest(req, false, "ced91e69d89c837e87cff960bd64fd9b9f92325fb9add8988d33d007") + + req.Data["algorithm"] = "sha3-256" + doRequest(req, false, "e4bd866ec3fa52df3b7842aa97b448bc859a7606cefcdad1715847f4b82a6c93") + + req.Data["algorithm"] = "sha3-384" + doRequest(req, false, "715cd38cbf8d0bab426b6a084d649760be555dd64b34de6db148a3fbf2cd2aa5d8b03eb6eda73a3e9a8769c00b4c2113") + + req.Data["algorithm"] = "sha3-512" + doRequest(req, false, "f7cac5ad830422a5408b36a60a60620687be180765a3e2895bc3bdbd857c9e08246c83064d4e3612f0cb927f3ead208413ab98624bf7b0617af0f03f62080976") + + // Test returning SHA3 as base64 + req.Data["format"] = "base64" + doRequest(req, false, "98rFrYMEIqVAizamCmBiBoe+GAdlo+KJW8O9vYV8nggkbIMGTU42EvDLkn8+rSCEE6uYYkv3sGF68PA/YggJdg==") + + // Test bad input/format/algorithm + delete(req.Data, "input") + doRequest(req, true, "") + + req.Data["input"] = "dGhlIHF1aWNrIGJyb3duIGZveA==" + req.Data["format"] = "base92" + doRequest(req, true, "") + + req.Data["format"] = "hex" + req.Data["algorithm"] = "foobar" + doRequest(req, true, "") + + req.Data["algorithm"] = "sha2-256" + req.Data["input"] = "foobar" + doRequest(req, true, "") +} diff --git a/builtin/logical/transit/path_hmac.go b/builtin/logical/transit/path_hmac.go new file mode 100644 index 0000000..704a1b8 --- /dev/null +++ b/builtin/logical/transit/path_hmac.go @@ -0,0 +1,433 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit + +import ( + "context" + "crypto/hmac" + "encoding/base64" + "errors" + "fmt" + "strconv" + "strings" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/keysutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/mitchellh/mapstructure" +) + +// BatchRequestHMACItem represents a request item for batch processing. +// A map type allows us to distinguish between empty and missing values. +type batchRequestHMACItem map[string]string + +// batchResponseHMACItem represents a response item for batch processing +type batchResponseHMACItem struct { + // HMAC for the input present in the corresponding batch request item + HMAC string `json:"hmac,omitempty" mapstructure:"hmac"` + + // Valid indicates whether signature matches the signature derived from the input string + Valid bool `json:"valid,omitempty" mapstructure:"valid"` + + // Error, if set represents a failure encountered while encrypting a + // corresponding batch request item + Error string `json:"error,omitempty" mapstructure:"error"` + + // The return paths in some cases are (nil, err) and others + // (logical.ErrorResponse(..),nil), and others (logical.ErrorResponse(..),err). + // For batch processing to successfully mimic previous handling for simple 'input', + // both output values are needed - though 'err' should never be serialized. + err error + + // Reference is an arbitrary caller supplied string value that will be placed on the + // batch response to ease correlation between inputs and outputs + Reference string `json:"reference" mapstructure:"reference"` +} + +func (b *backend) pathHMAC() *framework.Path { + return &framework.Path{ + Pattern: "hmac/" + framework.GenericNameRegex("name") + framework.OptionalParamRegex("urlalgorithm"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "generate", + OperationSuffix: "hmac|hmac-with-algorithm", + }, + + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "The key to use for the HMAC function", + }, + + "input": { + Type: framework.TypeString, + Description: "The base64-encoded input data", + }, + + "algorithm": { + Type: framework.TypeString, + Default: "sha2-256", + Description: `Algorithm to use (POST body parameter). Valid values are: + +* sha2-224 +* sha2-256 +* sha2-384 +* sha2-512 +* sha3-224 +* sha3-256 +* sha3-384 +* sha3-512 + +Defaults to "sha2-256".`, + }, + + "urlalgorithm": { + Type: framework.TypeString, + Description: `Algorithm to use (POST URL parameter)`, + }, + + "key_version": { + Type: framework.TypeInt, + Description: `The version of the key to use for generating the HMAC. +Must be 0 (for latest) or a value greater than or equal +to the min_encryption_version configured on the key.`, + }, + + "batch_input": { + Type: framework.TypeSlice, + Description: ` +Specifies a list of items to be processed in a single batch. When this parameter +is set, if the parameter 'input' is also set, it will be ignored. +Any batch output will preserve the order of the batch input.`, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathHMACWrite, + }, + + HelpSynopsis: pathHMACHelpSyn, + HelpDescription: pathHMACHelpDesc, + } +} + +func (b *backend) pathHMACWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + ver := d.Get("key_version").(int) + + algorithm := d.Get("urlalgorithm").(string) + if algorithm == "" { + algorithm = d.Get("algorithm").(string) + } + + // Get the policy + p, _, err := b.GetPolicy(ctx, keysutil.PolicyRequest{ + Storage: req.Storage, + Name: name, + }, b.GetRandomReader()) + if err != nil { + return nil, err + } + if p == nil { + return logical.ErrorResponse("encryption key not found"), logical.ErrInvalidRequest + } + if !b.System().CachingDisabled() { + p.Lock(false) + } + + switch { + case ver == 0: + // Allowed, will use latest; set explicitly here to ensure the string + // is generated properly + ver = p.LatestVersion + case ver == p.LatestVersion: + // Allowed + case p.MinEncryptionVersion > 0 && ver < p.MinEncryptionVersion: + p.Unlock() + return logical.ErrorResponse("cannot generate HMAC: version is too old (disallowed by policy)"), logical.ErrInvalidRequest + } + + key, err := p.HMACKey(ver) + if err != nil { + p.Unlock() + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } + if key == nil && p.Type != keysutil.KeyType_MANAGED_KEY { + p.Unlock() + return nil, fmt.Errorf("HMAC key value could not be computed") + } + + hashAlgorithm, ok := keysutil.HashTypeMap[algorithm] + if !ok { + p.Unlock() + return logical.ErrorResponse("unsupported algorithm %q", hashAlgorithm), nil + } + + hashAlg := keysutil.HashFuncMap[hashAlgorithm] + + batchInputRaw := d.Raw["batch_input"] + var batchInputItems []batchRequestHMACItem + if batchInputRaw != nil { + err = mapstructure.Decode(batchInputRaw, &batchInputItems) + if err != nil { + p.Unlock() + return nil, fmt.Errorf("failed to parse batch input: %w", err) + } + + if len(batchInputItems) == 0 { + p.Unlock() + return logical.ErrorResponse("missing batch input to process"), logical.ErrInvalidRequest + } + } else { + valueRaw, ok := d.GetOk("input") + if !ok { + p.Unlock() + return logical.ErrorResponse("missing input for HMAC"), logical.ErrInvalidRequest + } + + batchInputItems = make([]batchRequestHMACItem, 1) + batchInputItems[0] = batchRequestHMACItem{ + "input": valueRaw.(string), + } + } + + response := make([]batchResponseHMACItem, len(batchInputItems)) + + for i, item := range batchInputItems { + rawInput, ok := item["input"] + if !ok { + response[i].Error = "missing input for HMAC" + response[i].err = logical.ErrInvalidRequest + continue + } + + input, err := base64.StdEncoding.DecodeString(rawInput) + if err != nil { + response[i].Error = fmt.Sprintf("unable to decode input as base64: %s", err) + response[i].err = logical.ErrInvalidRequest + continue + } + + var retBytes []byte + + if p.Type == keysutil.KeyType_MANAGED_KEY { + managedKeySystemView, ok := b.System().(logical.ManagedKeySystemView) + if !ok { + response[i].err = errors.New("unsupported system view") + } + + retBytes, err = p.HMACWithManagedKey(ctx, ver, managedKeySystemView, b.backendUUID, algorithm, input) + if err != nil { + response[i].err = err + } + } else { + hf := hmac.New(hashAlg, key) + hf.Write(input) + retBytes = hf.Sum(nil) + } + + retStr := base64.StdEncoding.EncodeToString(retBytes) + retStr = fmt.Sprintf("vault:v%s:%s", strconv.Itoa(ver), retStr) + response[i].HMAC = retStr + } + + p.Unlock() + + // Generate the response + resp := &logical.Response{} + if batchInputRaw != nil { + // Copy the references + for i := range batchInputItems { + response[i].Reference = batchInputItems[i]["reference"] + } + resp.Data = map[string]interface{}{ + "batch_results": response, + } + } else { + if response[0].Error != "" || response[0].err != nil { + if response[0].Error != "" { + return logical.ErrorResponse(response[0].Error), response[0].err + } else { + return nil, response[0].err + } + } + resp.Data = map[string]interface{}{ + "hmac": response[0].HMAC, + } + } + + return resp, nil +} + +func (b *backend) pathHMACVerify(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + algorithm := d.Get("urlalgorithm").(string) + if algorithm == "" { + algorithm = d.Get("algorithm").(string) + } + + // Get the policy + p, _, err := b.GetPolicy(ctx, keysutil.PolicyRequest{ + Storage: req.Storage, + Name: name, + }, b.GetRandomReader()) + if err != nil { + return nil, err + } + if p == nil { + return logical.ErrorResponse("encryption key not found"), logical.ErrInvalidRequest + } + if !b.System().CachingDisabled() { + p.Lock(false) + } + + hashAlgorithm, ok := keysutil.HashTypeMap[algorithm] + if !ok { + p.Unlock() + return logical.ErrorResponse("unsupported algorithm %q", hashAlgorithm), nil + } + + hashAlg := keysutil.HashFuncMap[hashAlgorithm] + + batchInputRaw := d.Raw["batch_input"] + var batchInputItems []batchRequestHMACItem + if batchInputRaw != nil { + err := mapstructure.Decode(batchInputRaw, &batchInputItems) + if err != nil { + p.Unlock() + return nil, fmt.Errorf("failed to parse batch input: %w", err) + } + + if len(batchInputItems) == 0 { + p.Unlock() + return logical.ErrorResponse("missing batch input to process"), logical.ErrInvalidRequest + } + } else { + // use empty string if input is missing - not an error + inputB64 := d.Get("input").(string) + hmac := d.Get("hmac").(string) + + batchInputItems = make([]batchRequestHMACItem, 1) + batchInputItems[0] = batchRequestHMACItem{ + "input": inputB64, + "hmac": hmac, + } + } + + response := make([]batchResponseHMACItem, len(batchInputItems)) + + for i, item := range batchInputItems { + rawInput, ok := item["input"] + if !ok { + response[i].Error = "missing input" + response[i].err = logical.ErrInvalidRequest + continue + } + + input, err := base64.StdEncoding.DecodeString(rawInput) + if err != nil { + response[i].Error = fmt.Sprintf("unable to decode input as base64: %s", err) + response[i].err = logical.ErrInvalidRequest + continue + } + + verificationHMAC, ok := item["hmac"] + if !ok { + response[i].Error = "missing hmac" + response[i].err = logical.ErrInvalidRequest + continue + } + + // Verify the prefix + if !strings.HasPrefix(verificationHMAC, "vault:v") { + response[i].Error = "invalid HMAC to verify: no prefix" + response[i].err = logical.ErrInvalidRequest + continue + } + + splitVerificationHMAC := strings.SplitN(strings.TrimPrefix(verificationHMAC, "vault:v"), ":", 2) + if len(splitVerificationHMAC) != 2 { + response[i].Error = "invalid HMAC: wrong number of fields" + response[i].err = logical.ErrInvalidRequest + continue + } + + ver, err := strconv.Atoi(splitVerificationHMAC[0]) + if err != nil { + response[i].Error = "invalid HMAC: version number could not be decoded" + response[i].err = logical.ErrInvalidRequest + continue + } + + verBytes, err := base64.StdEncoding.DecodeString(splitVerificationHMAC[1]) + if err != nil { + response[i].Error = fmt.Sprintf("unable to decode verification HMAC as base64: %s", err) + response[i].err = logical.ErrInvalidRequest + continue + } + + if ver > p.LatestVersion { + response[i].Error = "invalid HMAC: version is too new" + response[i].err = logical.ErrInvalidRequest + continue + } + + if p.MinDecryptionVersion > 0 && ver < p.MinDecryptionVersion { + response[i].Error = "cannot verify HMAC: version is too old (disallowed by policy)" + response[i].err = logical.ErrInvalidRequest + continue + } + + key, err := p.HMACKey(ver) + if err != nil { + response[i].Error = err.Error() + response[i].err = logical.ErrInvalidRequest + continue + } + if key == nil { + response[i].Error = "" + response[i].err = fmt.Errorf("HMAC key value could not be computed") + continue + } + + hf := hmac.New(hashAlg, key) + hf.Write(input) + retBytes := hf.Sum(nil) + response[i].Valid = hmac.Equal(retBytes, verBytes) + } + + p.Unlock() + + // Generate the response + resp := &logical.Response{} + if batchInputRaw != nil { + // Copy the references + for i := range batchInputItems { + response[i].Reference = batchInputItems[i]["reference"] + } + resp.Data = map[string]interface{}{ + "batch_results": response, + } + } else { + if response[0].Error != "" || response[0].err != nil { + if response[0].Error != "" { + return logical.ErrorResponse(response[0].Error), response[0].err + } else { + return nil, response[0].err + } + } + resp.Data = map[string]interface{}{ + "valid": response[0].Valid, + } + } + + return resp, nil +} + +const pathHMACHelpSyn = `Generate an HMAC for input data using the named key` + +const pathHMACHelpDesc = ` +Generates an HMAC sum of the given algorithm and key against the given input data. +` diff --git a/builtin/logical/transit/path_hmac_test.go b/builtin/logical/transit/path_hmac_test.go new file mode 100644 index 0000000..af98dd2 --- /dev/null +++ b/builtin/logical/transit/path_hmac_test.go @@ -0,0 +1,374 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit + +import ( + "context" + "fmt" + "strconv" + "strings" + "testing" + + "github.com/hashicorp/vault/sdk/helper/keysutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func TestTransit_HMAC(t *testing.T) { + b, storage := createBackendWithSysView(t) + + cases := []struct { + name string + typ string + }{ + { + name: "foo", + typ: "", + }, + { + name: "dedicated", + typ: "hmac", + }, + } + + for _, c := range cases { + req := &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "keys/" + c.name, + } + _, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + // Now, change the key value to something we control + p, _, err := b.GetPolicy(context.Background(), keysutil.PolicyRequest{ + Storage: storage, + Name: c.name, + }, b.GetRandomReader()) + if err != nil { + t.Fatal(err) + } + // We don't care as we're the only one using this + latestVersion := strconv.Itoa(p.LatestVersion) + keyEntry := p.Keys[latestVersion] + keyEntry.HMACKey = []byte("01234567890123456789012345678901") + keyEntry.Key = []byte("01234567890123456789012345678901") + p.Keys[latestVersion] = keyEntry + if err = p.Persist(context.Background(), storage); err != nil { + t.Fatal(err) + } + + req.Path = "hmac/" + c.name + req.Data = map[string]interface{}{ + "input": "dGhlIHF1aWNrIGJyb3duIGZveA==", + } + + doRequest := func(req *logical.Request, errExpected bool, expected string) { + path := req.Path + defer func() { req.Path = path }() + + resp, err := b.HandleRequest(context.Background(), req) + if err != nil && !errExpected { + panic(fmt.Sprintf("%v", err)) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if errExpected { + if !resp.IsError() { + t.Fatalf("bad: got error response: %#v", *resp) + } + return + } + if resp.IsError() { + t.Fatalf("bad: got error response: %#v", *resp) + } + value, ok := resp.Data["hmac"] + if !ok { + t.Fatalf("no hmac key found in returned data, got resp data %#v", resp.Data) + } + if value.(string) != expected { + panic(fmt.Sprintf("mismatched hashes; expected %s, got resp data %#v", expected, resp.Data)) + } + + // Now verify + req.Path = strings.ReplaceAll(req.Path, "hmac", "verify") + req.Data["hmac"] = value.(string) + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("%v: %v", err, resp) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if resp.Data["valid"].(bool) == false { + panic(fmt.Sprintf("error validating hmac;\nreq:\n%#v\nresp:\n%#v", *req, *resp)) + } + } + + // Comparisons are against values generated via openssl + + // Test defaults -- sha2-256 + doRequest(req, false, "vault:v1:UcBvm5VskkukzZHlPgm3p5P/Yr/PV6xpuOGZISya3A4=") + + // Test algorithm selection in the path + req.Path = "hmac/" + c.name + "/sha2-224" + doRequest(req, false, "vault:v1:3p+ZWVquYDvu2dSTCa65Y3fgoMfIAc6fNaBbtg==") + + // Reset and test algorithm selection in the data + req.Path = "hmac/" + c.name + req.Data["algorithm"] = "sha2-224" + doRequest(req, false, "vault:v1:3p+ZWVquYDvu2dSTCa65Y3fgoMfIAc6fNaBbtg==") + + req.Data["algorithm"] = "sha2-384" + doRequest(req, false, "vault:v1:jDB9YXdPjpmr29b1JCIEJO93IydlKVfD9mA2EO9OmJtJQg3QAV5tcRRRb7IQGW9p") + + req.Data["algorithm"] = "sha2-512" + doRequest(req, false, "vault:v1:PSXLXvkvKF4CpU65e2bK1tGBZQpcpCEM32fq2iUoiTyQQCfBcGJJItQ+60tMwWXAPQrC290AzTrNJucGrr4GFA==") + + // Test returning as base64 + req.Data["format"] = "base64" + doRequest(req, false, "vault:v1:PSXLXvkvKF4CpU65e2bK1tGBZQpcpCEM32fq2iUoiTyQQCfBcGJJItQ+60tMwWXAPQrC290AzTrNJucGrr4GFA==") + + // Test SHA3 + req.Path = "hmac/" + c.name + req.Data["algorithm"] = "sha3-224" + doRequest(req, false, "vault:v1:TGipmKH8LR/BkMolYpDYy0BJCIhTtGPDhV2VkQ==") + + req.Data["algorithm"] = "sha3-256" + doRequest(req, false, "vault:v1:+px9V/7QYLfdK808zPESC2T/L33uFf4Blzsn9Jy838o=") + + req.Data["algorithm"] = "sha3-384" + doRequest(req, false, "vault:v1:YGoRwN4UdTRYZeOER86jsQOB8piWenzLDzJ2wJQK/Jq59rAsY8lh7SCdqqCyFg70") + + req.Data["algorithm"] = "sha3-512" + doRequest(req, false, "vault:v1:GrNA8sU88naMPEQ7UZGj9EJl7YJhl03AFHfxcEURFrtvnobdea9ZlZHePpxAx/oCaC7R2HkrAO+Tu3uXPIl3lg==") + + // Test returning SHA3 as base64 + req.Data["format"] = "base64" + doRequest(req, false, "vault:v1:GrNA8sU88naMPEQ7UZGj9EJl7YJhl03AFHfxcEURFrtvnobdea9ZlZHePpxAx/oCaC7R2HkrAO+Tu3uXPIl3lg==") + + req.Data["algorithm"] = "foobar" + doRequest(req, true, "") + + req.Data["algorithm"] = "sha2-256" + req.Data["input"] = "foobar" + doRequest(req, true, "") + req.Data["input"] = "dGhlIHF1aWNrIGJyb3duIGZveA==" + + // Rotate + err = p.Rotate(context.Background(), storage, b.GetRandomReader()) + if err != nil { + t.Fatal(err) + } + keyEntry = p.Keys["2"] + // Set to another value we control + keyEntry.HMACKey = []byte("12345678901234567890123456789012") + p.Keys["2"] = keyEntry + if err = p.Persist(context.Background(), storage); err != nil { + t.Fatal(err) + } + + doRequest(req, false, "vault:v2:Dt+mO/B93kuWUbGMMobwUNX5Wodr6dL3JH4DMfpQ0kw=") + + // Verify a previous version + req.Path = "verify/" + c.name + + req.Data["hmac"] = "vault:v1:UcBvm5VskkukzZHlPgm3p5P/Yr/PV6xpuOGZISya3A4=" + resp, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("%v: %v", err, resp) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if resp.Data["valid"].(bool) == false { + t.Fatalf("error validating hmac\nreq\n%#v\nresp\n%#v", *req, *resp) + } + + // Try a bad value + req.Data["hmac"] = "vault:v1:UcBvm4VskkukzZHlPgm3p5P/Yr/PV6xpuOGZISya3A4=" + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("%v: %v", err, resp) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if resp.Data["valid"].(bool) { + t.Fatalf("expected error validating hmac") + } + + // Set min decryption version, attempt to verify + p.MinDecryptionVersion = 2 + if err = p.Persist(context.Background(), storage); err != nil { + t.Fatal(err) + } + + req.Data["hmac"] = "vault:v1:UcBvm5VskkukzZHlPgm3p5P/Yr/PV6xpuOGZISya3A4=" + resp, err = b.HandleRequest(context.Background(), req) + if err == nil { + t.Fatalf("expected an error, got response %#v", resp) + } + if err != logical.ErrInvalidRequest { + t.Fatalf("expected invalid request error, got %v", err) + } + } +} + +func TestTransit_batchHMAC(t *testing.T) { + b, storage := createBackendWithSysView(t) + + // First create a key + req := &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "keys/foo", + } + _, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + // Now, change the key value to something we control + p, _, err := b.GetPolicy(context.Background(), keysutil.PolicyRequest{ + Storage: storage, + Name: "foo", + }, b.GetRandomReader()) + if err != nil { + t.Fatal(err) + } + // We don't care as we're the only one using this + latestVersion := strconv.Itoa(p.LatestVersion) + keyEntry := p.Keys[latestVersion] + keyEntry.HMACKey = []byte("01234567890123456789012345678901") + p.Keys[latestVersion] = keyEntry + if err = p.Persist(context.Background(), storage); err != nil { + t.Fatal(err) + } + + req.Path = "hmac/foo" + batchInput := []batchRequestHMACItem{ + {"input": "dGhlIHF1aWNrIGJyb3duIGZveA==", "reference": "one"}, + {"input": "dGhlIHF1aWNrIGJyb3duIGZveA==", "reference": "two"}, + {"input": "", "reference": "three"}, + {"input": ":;.?", "reference": "four"}, + {}, + } + + expected := []batchResponseHMACItem{ + {HMAC: "vault:v1:UcBvm5VskkukzZHlPgm3p5P/Yr/PV6xpuOGZISya3A4=", Reference: "one"}, + {HMAC: "vault:v1:UcBvm5VskkukzZHlPgm3p5P/Yr/PV6xpuOGZISya3A4=", Reference: "two"}, + {HMAC: "vault:v1:BCfVv6rlnRsIKpjCZCxWvh5iYwSSabRXpX9XJniuNgc=", Reference: "three"}, + {Error: "unable to decode input as base64: illegal base64 data at input byte 0", Reference: "four"}, + {Error: "missing input for HMAC"}, + } + + req.Data = map[string]interface{}{ + "batch_input": batchInput, + } + + resp, err := b.HandleRequest(context.Background(), req) + + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + batchResponseItems := resp.Data["batch_results"].([]batchResponseHMACItem) + + if len(batchResponseItems) != len(batchInput) { + t.Fatalf("Expected %d items in response. Got %d", len(batchInput), len(batchResponseItems)) + } + + for i, m := range batchResponseItems { + if expected[i].Error == "" && expected[i].HMAC != m.HMAC { + t.Fatalf("Expected HMAC %s got %s in result %d", expected[i].HMAC, m.HMAC, i) + } + if expected[i].Error != "" && expected[i].Error != m.Error { + t.Fatalf("Expected Error %q got %q in result %d", expected[i].Error, m.Error, i) + } + if expected[i].Reference != m.Reference { + t.Fatalf("Expected references to match, Got %s, Expected %s", m.Reference, expected[i].Reference) + } + } + + // Verify a previous version + req.Path = "verify/foo" + good_hmac := "vault:v1:UcBvm5VskkukzZHlPgm3p5P/Yr/PV6xpuOGZISya3A4=" + bad_hmac := "vault:v1:UcBvm4VskkukzZHlPgm3p5P/Yr/PV6xpuOGZISya3A4=" + verifyBatch := []batchRequestHMACItem{ + {"input": "dGhlIHF1aWNrIGJyb3duIGZveA==", "hmac": good_hmac}, + } + + req.Data = map[string]interface{}{ + "batch_input": verifyBatch, + } + + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("%v: %v", err, resp) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + + batchHMACVerifyResponseItems := resp.Data["batch_results"].([]batchResponseHMACItem) + + if !batchHMACVerifyResponseItems[0].Valid { + t.Fatalf("error validating hmac\nreq\n%#v\nresp\n%#v", *req, *resp) + } + + // Try a bad value + verifyBatch[0]["hmac"] = bad_hmac + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("%v: %v", err, resp) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + + batchHMACVerifyResponseItems = resp.Data["batch_results"].([]batchResponseHMACItem) + + if batchHMACVerifyResponseItems[0].Valid { + t.Fatalf("expected error validating hmac\nreq\n%#v\nresp\n%#v", *req, *resp) + } + + // Rotate + err = p.Rotate(context.Background(), storage, b.GetRandomReader()) + if err != nil { + t.Fatal(err) + } + keyEntry = p.Keys["2"] + // Set to another value we control + keyEntry.HMACKey = []byte("12345678901234567890123456789012") + p.Keys["2"] = keyEntry + if err = p.Persist(context.Background(), storage); err != nil { + t.Fatal(err) + } + + // Set min decryption version, attempt to verify + p.MinDecryptionVersion = 2 + if err = p.Persist(context.Background(), storage); err != nil { + t.Fatal(err) + } + + // supply a good hmac, but with expired key version + verifyBatch[0]["hmac"] = good_hmac + + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("%v: %v", err, resp) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + + batchHMACVerifyResponseItems = resp.Data["batch_results"].([]batchResponseHMACItem) + + if batchHMACVerifyResponseItems[0].Valid { + t.Fatalf("expected error validating hmac\nreq\n%#v\nresp\n%#v", *req, *resp) + } +} diff --git a/builtin/logical/transit/path_import.go b/builtin/logical/transit/path_import.go new file mode 100644 index 0000000..45cb4dd --- /dev/null +++ b/builtin/logical/transit/path_import.go @@ -0,0 +1,436 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit + +import ( + "context" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/base64" + "errors" + "fmt" + "hash" + "strconv" + "strings" + "time" + + "github.com/google/tink/go/kwp/subtle" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/keysutil" + "github.com/hashicorp/vault/sdk/logical" +) + +const EncryptedKeyBytes = 512 + +func (b *backend) pathImport() *framework.Path { + return &framework.Path{ + Pattern: "keys/" + framework.GenericNameRegex("name") + "/import", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "import", + OperationSuffix: "key", + }, + + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "The name of the key", + }, + "type": { + Type: framework.TypeString, + Default: "aes256-gcm96", + Description: `The type of key being imported. Currently, "aes128-gcm96" (symmetric), "aes256-gcm96" (symmetric), "ecdsa-p256" +(asymmetric), "ecdsa-p384" (asymmetric), "ecdsa-p521" (asymmetric), "ed25519" (asymmetric), "rsa-2048" (asymmetric), "rsa-3072" +(asymmetric), "rsa-4096" (asymmetric) are supported. Defaults to "aes256-gcm96". +`, + }, + "hash_function": { + Type: framework.TypeString, + Default: "SHA256", + Description: `The hash function used as a random oracle in the OAEP wrapping of the user-generated, +ephemeral AES key. Can be one of "SHA1", "SHA224", "SHA256" (default), "SHA384", or "SHA512"`, + }, + "ciphertext": { + Type: framework.TypeString, + Description: `The base64-encoded ciphertext of the keys. The AES key should be encrypted using OAEP +with the wrapping key and then concatenated with the import key, wrapped by the AES key.`, + }, + "public_key": { + Type: framework.TypeString, + Description: `The plaintext PEM public key to be imported. If "ciphertext" is set, this field is ignored.`, + }, + "allow_rotation": { + Type: framework.TypeBool, + Description: "True if the imported key may be rotated within Vault; false otherwise.", + }, + "derived": { + Type: framework.TypeBool, + Description: `Enables key derivation mode. This +allows for per-transaction unique +keys for encryption operations.`, + }, + + "exportable": { + Type: framework.TypeBool, + Description: `Enables keys to be exportable. +This allows for all the valid keys +in the key ring to be exported.`, + }, + + "allow_plaintext_backup": { + Type: framework.TypeBool, + Description: `Enables taking a backup of the named +key in plaintext format. Once set, +this cannot be disabled.`, + }, + + "context": { + Type: framework.TypeString, + Description: `Base64 encoded context for key derivation. +When reading a key with key derivation enabled, +if the key type supports public keys, this will +return the public key for the given context.`, + }, + "auto_rotate_period": { + Type: framework.TypeDurationSecond, + Default: 0, + Description: `Amount of time the key should live before +being automatically rotated. A value of 0 +(default) disables automatic rotation for the +key.`, + }, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathImportWrite, + }, + HelpSynopsis: pathImportWriteSyn, + HelpDescription: pathImportWriteDesc, + } +} + +func (b *backend) pathImportVersion() *framework.Path { + return &framework.Path{ + Pattern: "keys/" + framework.GenericNameRegex("name") + "/import_version", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "import", + OperationSuffix: "key-version", + }, + + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "The name of the key", + }, + "ciphertext": { + Type: framework.TypeString, + Description: `The base64-encoded ciphertext of the keys. The AES key should be encrypted using OAEP +with the wrapping key and then concatenated with the import key, wrapped by the AES key.`, + }, + "public_key": { + Type: framework.TypeString, + Description: `The plaintext public key to be imported. If "ciphertext" is set, this field is ignored.`, + }, + "hash_function": { + Type: framework.TypeString, + Default: "SHA256", + Description: `The hash function used as a random oracle in the OAEP wrapping of the user-generated, +ephemeral AES key. Can be one of "SHA1", "SHA224", "SHA256" (default), "SHA384", or "SHA512"`, + }, + "version": { + Type: framework.TypeInt, + Description: `Key version to be updated, if left empty, a new version will be created unless +a private key is specified and the 'Latest' key is missing a private key.`, + }, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathImportVersionWrite, + }, + HelpSynopsis: pathImportVersionWriteSyn, + HelpDescription: pathImportVersionWriteDesc, + } +} + +func (b *backend) pathImportWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + derived := d.Get("derived").(bool) + keyType := d.Get("type").(string) + exportable := d.Get("exportable").(bool) + allowPlaintextBackup := d.Get("allow_plaintext_backup").(bool) + autoRotatePeriod := time.Second * time.Duration(d.Get("auto_rotate_period").(int)) + allowRotation := d.Get("allow_rotation").(bool) + + // Ensure the caller didn't supply "convergent_encryption" as a field, since it's not supported on import. + if _, ok := d.Raw["convergent_encryption"]; ok { + return nil, errors.New("import cannot be used on keys with convergent encryption enabled") + } + + if autoRotatePeriod > 0 && !allowRotation { + return nil, errors.New("allow_rotation must be set to true if auto-rotation is enabled") + } + + // Ensure that at least on `key` field has been set + isCiphertextSet, err := checkKeyFieldsSet(d) + if err != nil { + return nil, err + } + + polReq := keysutil.PolicyRequest{ + Storage: req.Storage, + Name: name, + Derived: derived, + Exportable: exportable, + AllowPlaintextBackup: allowPlaintextBackup, + AutoRotatePeriod: autoRotatePeriod, + AllowImportedKeyRotation: allowRotation, + IsPrivateKey: isCiphertextSet, + } + + switch strings.ToLower(keyType) { + case "aes128-gcm96": + polReq.KeyType = keysutil.KeyType_AES128_GCM96 + case "aes256-gcm96": + polReq.KeyType = keysutil.KeyType_AES256_GCM96 + case "chacha20-poly1305": + polReq.KeyType = keysutil.KeyType_ChaCha20_Poly1305 + case "ecdsa-p256": + polReq.KeyType = keysutil.KeyType_ECDSA_P256 + case "ecdsa-p384": + polReq.KeyType = keysutil.KeyType_ECDSA_P384 + case "ecdsa-p521": + polReq.KeyType = keysutil.KeyType_ECDSA_P521 + case "ed25519": + polReq.KeyType = keysutil.KeyType_ED25519 + case "rsa-2048": + polReq.KeyType = keysutil.KeyType_RSA2048 + case "rsa-3072": + polReq.KeyType = keysutil.KeyType_RSA3072 + case "rsa-4096": + polReq.KeyType = keysutil.KeyType_RSA4096 + case "hmac": + polReq.KeyType = keysutil.KeyType_HMAC + default: + return logical.ErrorResponse(fmt.Sprintf("unknown key type: %v", keyType)), logical.ErrInvalidRequest + } + + p, _, err := b.GetPolicy(ctx, polReq, b.GetRandomReader()) + if err != nil { + return nil, err + } + + if p != nil { + if b.System().CachingDisabled() { + p.Unlock() + } + return nil, errors.New("the import path cannot be used with an existing key; use import-version to rotate an existing imported key") + } + + key, resp, err := b.extractKeyFromFields(ctx, req, d, polReq.KeyType, isCiphertextSet) + if err != nil { + return resp, err + } + + err = b.lm.ImportPolicy(ctx, polReq, key, b.GetRandomReader()) + if err != nil { + return nil, err + } + + return nil, nil +} + +func (b *backend) pathImportVersionWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + + isCiphertextSet, err := checkKeyFieldsSet(d) + if err != nil { + return nil, err + } + + polReq := keysutil.PolicyRequest{ + Storage: req.Storage, + Name: name, + Upsert: false, + IsPrivateKey: isCiphertextSet, + } + p, _, err := b.GetPolicy(ctx, polReq, b.GetRandomReader()) + if err != nil { + return nil, err + } + if p == nil { + return nil, fmt.Errorf("no key found with name %s; to import a new key, use the import/ endpoint", name) + } + if !p.Imported { + return nil, errors.New("the import_version endpoint can only be used with an imported key") + } + if p.ConvergentEncryption { + return nil, errors.New("import_version cannot be used on keys with convergent encryption enabled") + } + + if !b.System().CachingDisabled() { + p.Lock(true) + } + defer p.Unlock() + + key, resp, err := b.extractKeyFromFields(ctx, req, d, p.Type, isCiphertextSet) + if err != nil { + return resp, err + } + + // Get param version if set else import a new version. + if version, ok := d.GetOk("version"); ok { + versionToUpdate := version.(int) + + // Check if given version can be updated given input + err = p.KeyVersionCanBeUpdated(versionToUpdate, isCiphertextSet) + if err == nil { + err = p.ImportPrivateKeyForVersion(ctx, req.Storage, versionToUpdate, key) + } + } else { + err = p.ImportPublicOrPrivate(ctx, req.Storage, key, isCiphertextSet, b.GetRandomReader()) + } + + if err != nil { + return nil, err + } + + return nil, nil +} + +func (b *backend) decryptImportedKey(ctx context.Context, storage logical.Storage, ciphertext []byte, hashFn hash.Hash) ([]byte, error) { + // Bounds check the ciphertext to avoid panics + if len(ciphertext) <= EncryptedKeyBytes { + return nil, errors.New("provided ciphertext is too short") + } + + wrappedEphKey := ciphertext[:EncryptedKeyBytes] + wrappedImportKey := ciphertext[EncryptedKeyBytes:] + + wrappingKey, err := b.getWrappingKey(ctx, storage) + if err != nil { + return nil, err + } + if wrappingKey == nil { + return nil, fmt.Errorf("error importing key: wrapping key was nil") + } + + privWrappingKey := wrappingKey.Keys[strconv.Itoa(wrappingKey.LatestVersion)].RSAKey + ephKey, err := rsa.DecryptOAEP(hashFn, b.GetRandomReader(), privWrappingKey, wrappedEphKey, []byte{}) + if err != nil { + return nil, err + } + + // Zero out the ephemeral AES key just to be extra cautious. Note that this + // isn't a guarantee against memory analysis! See the documentation for the + // `vault.memzero` utility function for more information. + defer func() { + for i := range ephKey { + ephKey[i] = 0 + } + }() + + // Ensure the ephemeral AES key is 256-bit + if len(ephKey) != 32 { + return nil, errors.New("expected ephemeral AES key to be 256-bit") + } + + kwp, err := subtle.NewKWP(ephKey) + if err != nil { + return nil, err + } + + importKey, err := kwp.Unwrap(wrappedImportKey) + if err != nil { + return nil, err + } + + return importKey, nil +} + +func (b *backend) extractKeyFromFields(ctx context.Context, req *logical.Request, d *framework.FieldData, keyType keysutil.KeyType, isPrivateKey bool) ([]byte, *logical.Response, error) { + var key []byte + if isPrivateKey { + hashFnStr := d.Get("hash_function").(string) + hashFn, err := parseHashFn(hashFnStr) + if err != nil { + return key, logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } + + ciphertextString := d.Get("ciphertext").(string) + ciphertext, err := base64.StdEncoding.DecodeString(ciphertextString) + if err != nil { + return key, nil, err + } + + key, err = b.decryptImportedKey(ctx, req.Storage, ciphertext, hashFn) + if err != nil { + return key, nil, err + } + } else { + publicKeyString := d.Get("public_key").(string) + if !keyType.ImportPublicKeySupported() { + return key, nil, errors.New("provided type does not support public_key import") + } + key = []byte(publicKeyString) + } + + return key, nil, nil +} + +func parseHashFn(hashFn string) (hash.Hash, error) { + switch strings.ToUpper(hashFn) { + case "SHA1": + return sha1.New(), nil + case "SHA224": + return sha256.New224(), nil + case "SHA256": + return sha256.New(), nil + case "SHA384": + return sha512.New384(), nil + case "SHA512": + return sha512.New(), nil + default: + return nil, fmt.Errorf("unknown hash function: %s", hashFn) + } +} + +// checkKeyFieldsSet: Checks which key fields are set. If both are set, an error is returned +func checkKeyFieldsSet(d *framework.FieldData) (bool, error) { + ciphertextSet := isFieldSet("ciphertext", d) + publicKeySet := isFieldSet("publicKey", d) + + if ciphertextSet && publicKeySet { + return false, errors.New("only one of the following fields, ciphertext and public_key, can be set") + } else if ciphertextSet { + return true, nil + } else { + return false, nil + } +} + +func isFieldSet(fieldName string, d *framework.FieldData) bool { + _, fieldSet := d.Raw[fieldName] + if !fieldSet { + return false + } + + return true +} + +const ( + pathImportWriteSyn = "Imports an externally-generated key into a new transit key" + pathImportWriteDesc = "This path is used to import an externally-generated " + + "key into Vault. The import operation creates a new key and cannot be used to " + + "replace an existing key." +) + +const pathImportVersionWriteSyn = "Imports an externally-generated key into an " + + "existing imported key" + +const pathImportVersionWriteDesc = "This path is used to import a new version of an " + + "externally-generated key into an existing import key. The import_version endpoint " + + "only supports importing key material into existing imported keys." diff --git a/builtin/logical/transit/path_import_test.go b/builtin/logical/transit/path_import_test.go new file mode 100644 index 0000000..cb59e8d --- /dev/null +++ b/builtin/logical/transit/path_import_test.go @@ -0,0 +1,1075 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "fmt" + "strconv" + "sync" + "testing" + + "github.com/google/tink/go/kwp/subtle" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/sdk/logical" +) + +var keyTypes = []string{ + "aes256-gcm96", + "aes128-gcm96", + "chacha20-poly1305", + "ed25519", + "ecdsa-p256", + "ecdsa-p384", + "ecdsa-p521", + "rsa-2048", + "rsa-3072", + "rsa-4096", + "hmac", +} + +var hashFns = []string{ + "SHA256", + "SHA1", + "SHA224", + "SHA384", + "SHA512", +} + +var ( + keysLock sync.RWMutex + keys = map[string]interface{}{} +) + +const ( + nssFormattedEd25519Key = "MGcCAQAwFAYHKoZIzj0CAQYJKwYBBAHaRw8BBEwwSgIBAQQgfJm5R+LK4FMwGzOpemTBXksimEVOVCE8QeC+XBBfNU+hIwMhADaif7IhYx46IHcRTy1z8LeyhABep+UB8Da6olMZGx0i" + rsaPSSFormattedKey = "MIIEvAIBADALBgkqhkiG9w0BAQoEggSoMIIEpAIBAAKCAQEAiFXSBaicB534+2qMZTVzQHMjuhb4NM9hi5H4EAFiYHEBuvm2BAk58NdBK3wiMq/p7Ewu5NQI0gJ7GlcV1MBU94U6MEmWNd0ztmlz37esEDuaCDhmLEBHKRzs8Om0bY9vczcNwcnRIYusP2KMxon3Gv2C86M2Jahig70AIq0E9C7esfrlYxFnoxUfO09XyYfiHlZY59+/dhyULp/RDIvaQ0/DqSSnYmXw8vRQ1gp6DqIzxx3j8ikUrpE7MK6348keFQj1eb83Z5w8qgIdceHHH4wbIAW7qWCPJ/vIJp8Pe1NEanlef61pDut2YcljvN79ccjX/QyqwqYv6xX2uzSlpQIDAQABAoIBACtpBCAoIVJtkv9e3EhHniR55PjWYn7SP5GEz3MtNalWokHqS/H6DBhrOcWCV5NDHx1N3qqe9xYDkzX+X6Wn/gX4RmBkte79uX8OEca8wY1DpRaT+riBWQc2vh0xlPFDuC177KX1QGFJi3V9SCzZdjSCXyV7pPyVopSm4/mmlMq5ANfN8bcHAtcArP7vPzEdckJqurjwHyzsUZJa9sk3OL3rBkKy5bmoPebE1ZQ7C+9eA4u9MKSy95WpTiqMe3rRhvr6zj4bzEvzS9M4r2EdwgAn4FyDwtGdOqtfbtSLTikb73f4MSINnWbt3YPBfRC4PGjWXIN2sMG5XYC3KH+RKbsCgYEAu0HOFInH8OtWiUY0aqRKZuo7lrBczNa5gnce3ZYnNkfrPlu1Xp0SjUkEWukznBLO0N9lvG9j3ksUDTQlPoKarJb9uf/1H0tYHhHm6mP8mH87yfVn2bLb3VPeIQYb+MXnDrwNVCAtxhuHlpnXJPldeuVKeRigHUNIEs76UMiiLqMCgYEAumJxm5NrKk0LXUQmeZolLh0lM/shg8zW7Vi3Ksz5Pe4Pcmg+hTbHjZuJwK6HesljEA0JDNkS0+5hkqiS5UDnj94XfDbi08/kKbPYA12GPVSRNTJxL8q70rFnEUZuMBeL0SKMPhEfR2z5TDDZUBoO6HBUUwgJAij1EsXrBAb0BxcCgYBKS3eKKohLi/PPjy0oynpCjtiJlvuawe7kVoLGg9aW8L3jBdvV6Bf+OmQh9bhmSggIUzo4IzHKdptECdZlEMhxhY6xh14nxmr1s0Cc6oLDtmdwX4+OjioxjB7rl1Ltxwc/j1jycbn3ieCn3e3AW7e9FNARb7XHJnSoEbq65n+CZQKBgQChLPozYAL/HIrkR0fCRmM6gmemkNeFo0CFFP+oWoJ6ZIAlHjJafmmIcmVoI0TzEG3C9pLJ8nmOnYjxCyekakEUryi9+LSkGBWlXmlBV8H7DUNYrlskyfssEs8fKDmnCuWUn3yJO8NBv+HBWkjCNRaJOIIjH0KzBHoRludJnz2tVwKBgQCsQF5lvcXefNfQojbhF+9NfyhvAc7EsMTXQhP9HEj0wVqTuuqyGyu8meXEkcQPRl6yD/yZKuMREDNNck4KV2fdGekBsh8zBgpxdHQ2DcbfxZfNgv3yoX3f0grb/ApQNJb3DVW9FVRigue8XPzFOFX/demJmkUnTg3zGFnXLXjgxg==" +) + +func generateKeys(t *testing.T) { + t.Helper() + + keysLock.Lock() + defer keysLock.Unlock() + + if len(keys) > 0 { + return + } + + for _, keyType := range keyTypes { + key, err := generateKey(keyType) + if err != nil { + t.Fatalf("failed to generate %s key: %s", keyType, err) + } + keys[keyType] = key + } +} + +func getKey(t *testing.T, keyType string) interface{} { + t.Helper() + + keysLock.RLock() + defer keysLock.RUnlock() + + key, ok := keys[keyType] + if !ok { + t.Fatalf("no pre-generated key of type: %s", keyType) + } + + return key +} + +func TestTransit_ImportNSSEd25519Key(t *testing.T) { + generateKeys(t) + b, s := createBackendWithStorage(t) + + wrappingKey, err := b.getWrappingKey(context.Background(), s) + if err != nil || wrappingKey == nil { + t.Fatalf("failed to retrieve public wrapping key: %s", err) + } + privWrappingKey := wrappingKey.Keys[strconv.Itoa(wrappingKey.LatestVersion)].RSAKey + pubWrappingKey := &privWrappingKey.PublicKey + + rawPKCS8, err := base64.StdEncoding.DecodeString(nssFormattedEd25519Key) + if err != nil { + t.Fatalf("failed to parse nss base64: %v", err) + } + + blob := wrapTargetPKCS8ForImport(t, pubWrappingKey, rawPKCS8, "SHA256") + req := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: "keys/nss-ed25519/import", + Data: map[string]interface{}{ + "ciphertext": blob, + "type": "ed25519", + }, + } + + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to import NSS-formatted Ed25519 key: %v", err) + } +} + +func TestTransit_ImportRSAPSS(t *testing.T) { + generateKeys(t) + b, s := createBackendWithStorage(t) + + wrappingKey, err := b.getWrappingKey(context.Background(), s) + if err != nil || wrappingKey == nil { + t.Fatalf("failed to retrieve public wrapping key: %s", err) + } + privWrappingKey := wrappingKey.Keys[strconv.Itoa(wrappingKey.LatestVersion)].RSAKey + pubWrappingKey := &privWrappingKey.PublicKey + + rawPKCS8, err := base64.StdEncoding.DecodeString(rsaPSSFormattedKey) + if err != nil { + t.Fatalf("failed to parse rsa-pss base64: %v", err) + } + + blob := wrapTargetPKCS8ForImport(t, pubWrappingKey, rawPKCS8, "SHA256") + req := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: "keys/rsa-pss/import", + Data: map[string]interface{}{ + "ciphertext": blob, + "type": "rsa-2048", + }, + } + + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to import RSA-PSS private key: %v", err) + } +} + +func TestTransit_Import(t *testing.T) { + generateKeys(t) + b, s := createBackendWithStorage(t) + + t.Run( + "import into a key fails before wrapping key is read", + func(t *testing.T) { + fakeWrappingKey, err := rsa.GenerateKey(rand.Reader, 4096) + if err != nil { + t.Fatalf("failed to generate fake wrapping key: %s", err) + } + // Roll an AES256 key and import + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + targetKey := getKey(t, "aes256-gcm96") + importBlob := wrapTargetKeyForImport(t, &fakeWrappingKey.PublicKey, targetKey, "aes256-gcm96", "SHA256") + req := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", keyID), + Data: map[string]interface{}{ + "ciphertext": importBlob, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err == nil { + t.Fatal("import prior to wrapping key generation incorrectly succeeded") + } + }, + ) + + // Retrieve public wrapping key + wrappingKey, err := b.getWrappingKey(context.Background(), s) + if err != nil || wrappingKey == nil { + t.Fatalf("failed to retrieve public wrapping key: %s", err) + } + privWrappingKey := wrappingKey.Keys[strconv.Itoa(wrappingKey.LatestVersion)].RSAKey + pubWrappingKey := &privWrappingKey.PublicKey + + t.Run( + "import into an existing key fails", + func(t *testing.T) { + // Generate a key ID + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate a key ID: %s", err) + } + + // Create an AES256 key within Transit + req := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s", keyID), + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("unexpected error creating key: %s", err) + } + + targetKey := getKey(t, "aes256-gcm96") + importBlob := wrapTargetKeyForImport(t, pubWrappingKey, targetKey, "aes256-gcm96", "SHA256") + req = &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", keyID), + Data: map[string]interface{}{ + "ciphertext": importBlob, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err == nil { + t.Fatal("import into an existing key incorrectly succeeded") + } + }, + ) + + for _, keyType := range keyTypes { + priv := getKey(t, keyType) + for _, hashFn := range hashFns { + t.Run( + fmt.Sprintf("%s/%s", keyType, hashFn), + func(t *testing.T) { + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + importBlob := wrapTargetKeyForImport(t, pubWrappingKey, priv, keyType, hashFn) + req := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", keyID), + Data: map[string]interface{}{ + "type": keyType, + "hash_function": hashFn, + "ciphertext": importBlob, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to import valid key: %s", err) + } + }, + ) + + // Shouldn't need to test every combination of key and hash function + if keyType != "aes256-gcm96" { + break + } + } + } + + failures := []struct { + name string + ciphertext interface{} + keyType interface{} + hashFn interface{} + }{ + { + name: "nil ciphertext", + }, + { + name: "empty string ciphertext", + ciphertext: "", + }, + { + name: "ciphertext not base64", + ciphertext: "this isn't correct", + }, + { + name: "ciphertext too short", + ciphertext: "ZmFrZSBjaXBoZXJ0ZXh0Cg", + }, + { + name: "invalid key type", + keyType: "fake-key-type", + }, + { + name: "invalid hash function", + hashFn: "fake-hash-fn", + }, + } + for _, tt := range failures { + t.Run( + tt.name, + func(t *testing.T) { + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + req := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", keyID), + Data: map[string]interface{}{}, + } + if tt.ciphertext != nil { + req.Data["ciphertext"] = tt.ciphertext + } + if tt.keyType != nil { + req.Data["type"] = tt.keyType + } + if tt.hashFn != nil { + req.Data["hash_function"] = tt.hashFn + } + _, err = b.HandleRequest(context.Background(), req) + if err == nil { + t.Fatal("invalid import request incorrectly succeeded") + } + }, + ) + } + + t.Run( + "disallow import of convergent keys", + func(t *testing.T) { + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + targetKey := getKey(t, "aes256-gcm96") + importBlob := wrapTargetKeyForImport(t, pubWrappingKey, targetKey, "aes256-gcm96", "SHA256") + req := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", keyID), + Data: map[string]interface{}{ + "convergent_encryption": true, + "ciphertext": importBlob, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err == nil { + t.Fatal("import of convergent key incorrectly succeeded") + } + }, + ) + + t.Run( + "allow_rotation=true enables rotation within vault", + func(t *testing.T) { + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + targetKey := getKey(t, "aes256-gcm96") + + // Import key + importBlob := wrapTargetKeyForImport(t, pubWrappingKey, targetKey, "aes256-gcm96", "SHA256") + req := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", keyID), + Data: map[string]interface{}{ + "allow_rotation": true, + "ciphertext": importBlob, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to import key: %s", err) + } + + // Rotate key + req = &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/rotate", keyID), + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to rotate key: %s", err) + } + }, + ) + + t.Run( + "allow_rotation=false disables rotation within vault", + func(t *testing.T) { + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + targetKey := getKey(t, "aes256-gcm96") + + // Import key + importBlob := wrapTargetKeyForImport(t, pubWrappingKey, targetKey, "aes256-gcm96", "SHA256") + req := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", keyID), + Data: map[string]interface{}{ + "allow_rotation": false, + "ciphertext": importBlob, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to import key: %s", err) + } + + // Rotate key + req = &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/rotate", keyID), + } + _, err = b.HandleRequest(context.Background(), req) + if err == nil { + t.Fatal("rotation of key with allow_rotation incorrectly succeeded") + } + }, + ) + + t.Run( + "import public key ed25519", + func(t *testing.T) { + keyType := "ed25519" + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + + // Get keys + privateKey := getKey(t, keyType) + publicKeyBytes, err := getPublicKey(privateKey, keyType) + if err != nil { + t.Fatal(err) + } + + // Import key + req := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", keyID), + Data: map[string]interface{}{ + "public_key": publicKeyBytes, + "type": keyType, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to import ed25519 key: %v", err) + } + }) + + t.Run( + "import public key ecdsa", + func(t *testing.T) { + keyType := "ecdsa-p256" + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + + // Get keys + privateKey := getKey(t, keyType) + publicKeyBytes, err := getPublicKey(privateKey, keyType) + if err != nil { + t.Fatal(err) + } + + // Import key + req := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", keyID), + Data: map[string]interface{}{ + "public_key": publicKeyBytes, + "type": keyType, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to import public key: %s", err) + } + }) +} + +func TestTransit_ImportVersion(t *testing.T) { + generateKeys(t) + b, s := createBackendWithStorage(t) + + t.Run( + "import into a key version fails before wrapping key is read", + func(t *testing.T) { + fakeWrappingKey, err := rsa.GenerateKey(rand.Reader, 4096) + if err != nil { + t.Fatalf("failed to generate fake wrapping key: %s", err) + } + // Roll an AES256 key and import + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + targetKey := getKey(t, "aes256-gcm96") + importBlob := wrapTargetKeyForImport(t, &fakeWrappingKey.PublicKey, targetKey, "aes256-gcm96", "SHA256") + req := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import_version", keyID), + Data: map[string]interface{}{ + "ciphertext": importBlob, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err == nil { + t.Fatal("import_version prior to wrapping key generation incorrectly succeeded") + } + }, + ) + + // Retrieve public wrapping key + wrappingKey, err := b.getWrappingKey(context.Background(), s) + if err != nil || wrappingKey == nil { + t.Fatalf("failed to retrieve public wrapping key: %s", err) + } + privWrappingKey := wrappingKey.Keys[strconv.Itoa(wrappingKey.LatestVersion)].RSAKey + pubWrappingKey := &privWrappingKey.PublicKey + + t.Run( + "import into a non-existent key fails", + func(t *testing.T) { + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + targetKey := getKey(t, "aes256-gcm96") + importBlob := wrapTargetKeyForImport(t, pubWrappingKey, targetKey, "aes256-gcm96", "SHA256") + req := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import_version", keyID), + Data: map[string]interface{}{ + "ciphertext": importBlob, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err == nil { + t.Fatal("import_version into a non-existent key incorrectly succeeded") + } + }, + ) + + t.Run( + "import into an internally-generated key fails", + func(t *testing.T) { + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + + // Roll a key within Transit + req := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s", keyID), + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to generate a key within transit: %s", err) + } + + // Attempt to import into newly generated key + targetKey := getKey(t, "aes256-gcm96") + importBlob := wrapTargetKeyForImport(t, pubWrappingKey, targetKey, "aes256-gcm96", "SHA256") + req = &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import_version", keyID), + Data: map[string]interface{}{ + "ciphertext": importBlob, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err == nil { + t.Fatal("import_version into an internally-generated key incorrectly succeeded") + } + }, + ) + + t.Run( + "imported key version type must match existing key type", + func(t *testing.T) { + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + + // Import an RSA key + targetKey := getKey(t, "rsa-2048") + importBlob := wrapTargetKeyForImport(t, pubWrappingKey, targetKey, "rsa-2048", "SHA256") + req := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", keyID), + Data: map[string]interface{}{ + "ciphertext": importBlob, + "type": "rsa-2048", + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to generate a key within transit: %s", err) + } + + // Attempt to import an AES key version into existing RSA key + targetKey = getKey(t, "aes256-gcm96") + importBlob = wrapTargetKeyForImport(t, pubWrappingKey, targetKey, "aes256-gcm96", "SHA256") + req = &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import_version", keyID), + Data: map[string]interface{}{ + "ciphertext": importBlob, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err == nil { + t.Fatal("import_version into a key of a different type incorrectly succeeded") + } + }, + ) + + t.Run( + "import rsa public key and update version with private counterpart", + func(t *testing.T) { + keyType := "rsa-2048" + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + + // Get keys + privateKey := getKey(t, keyType) + importBlob := wrapTargetKeyForImport(t, pubWrappingKey, privateKey, keyType, "SHA256") + publicKeyBytes, err := getPublicKey(privateKey, keyType) + if err != nil { + t.Fatal(err) + } + + // Import RSA public key + req := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", keyID), + Data: map[string]interface{}{ + "public_key": publicKeyBytes, + "type": keyType, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to import public key: %s", err) + } + + // Update version - import RSA private key + req = &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import_version", keyID), + Data: map[string]interface{}{ + "ciphertext": importBlob, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to update key: %s", err) + } + }, + ) +} + +func TestTransit_ImportVersionWithPublicKeys(t *testing.T) { + generateKeys(t) + b, s := createBackendWithStorage(t) + + // Retrieve public wrapping key + wrappingKey, err := b.getWrappingKey(context.Background(), s) + if err != nil || wrappingKey == nil { + t.Fatalf("failed to retrieve public wrapping key: %s", err) + } + privWrappingKey := wrappingKey.Keys[strconv.Itoa(wrappingKey.LatestVersion)].RSAKey + pubWrappingKey := &privWrappingKey.PublicKey + + // Import a public key then import private should give us one key + t.Run( + "import rsa public key and update version with private counterpart", + func(t *testing.T) { + keyType := "ecdsa-p256" + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + + // Get keys + privateKey := getKey(t, keyType) + importBlob := wrapTargetKeyForImport(t, pubWrappingKey, privateKey, keyType, "SHA256") + publicKeyBytes, err := getPublicKey(privateKey, keyType) + if err != nil { + t.Fatal(err) + } + + // Import EC public key + req := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", keyID), + Data: map[string]interface{}{ + "public_key": publicKeyBytes, + "type": keyType, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to import public key: %s", err) + } + + // Update version - import EC private key + req = &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import_version", keyID), + Data: map[string]interface{}{ + "ciphertext": importBlob, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to update key: %s", err) + } + + // We should have one key on export + req = &logical.Request{ + Storage: s, + Operation: logical.ReadOperation, + Path: fmt.Sprintf("export/public-key/%s", keyID), + } + resp, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to export key: %s", err) + } + + if len(resp.Data["keys"].(map[string]string)) != 1 { + t.Fatalf("expected 1 key but got %v: %v", len(resp.Data["keys"].(map[string]string)), resp) + } + }, + ) + + // Import a private and then public should give us two keys + t.Run( + "import ec private key and then its public counterpart", + func(t *testing.T) { + keyType := "ecdsa-p256" + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + + // Get keys + privateKey := getKey(t, keyType) + importBlob := wrapTargetKeyForImport(t, pubWrappingKey, privateKey, keyType, "SHA256") + publicKeyBytes, err := getPublicKey(privateKey, keyType) + if err != nil { + t.Fatal(err) + } + + // Import EC private key + req := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", keyID), + Data: map[string]interface{}{ + "ciphertext": importBlob, + "type": keyType, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to update key: %s", err) + } + + // Update version - Import EC public key + req = &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import_version", keyID), + Data: map[string]interface{}{ + "public_key": publicKeyBytes, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to import public key: %s", err) + } + + // We should have two keys on export + req = &logical.Request{ + Storage: s, + Operation: logical.ReadOperation, + Path: fmt.Sprintf("export/public-key/%s", keyID), + } + resp, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to export key: %s", err) + } + + if len(resp.Data["keys"].(map[string]string)) != 2 { + t.Fatalf("expected 2 key but got %v: %v", len(resp.Data["keys"].(map[string]string)), resp) + } + }, + ) + + // Import a public and another public should allow us to insert two private key. + t.Run( + "import two public keys and two private keys in reverse order", + func(t *testing.T) { + keyType := "ecdsa-p256" + keyID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("failed to generate key ID: %s", err) + } + + // Get keys + privateKey1 := getKey(t, keyType) + importBlob1 := wrapTargetKeyForImport(t, pubWrappingKey, privateKey1, keyType, "SHA256") + publicKeyBytes1, err := getPublicKey(privateKey1, keyType) + if err != nil { + t.Fatal(err) + } + + privateKey2, err := generateKey(keyType) + if err != nil { + t.Fatal(err) + } + importBlob2 := wrapTargetKeyForImport(t, pubWrappingKey, privateKey2, keyType, "SHA256") + publicKeyBytes2, err := getPublicKey(privateKey2, keyType) + if err != nil { + t.Fatal(err) + } + + // Import EC public key + req := &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import", keyID), + Data: map[string]interface{}{ + "public_key": publicKeyBytes1, + "type": keyType, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to update key: %s", err) + } + + // Update version - Import second EC public key + req = &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import_version", keyID), + Data: map[string]interface{}{ + "public_key": publicKeyBytes2, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to import public key: %s", err) + } + + // We should have two keys on export + req = &logical.Request{ + Storage: s, + Operation: logical.ReadOperation, + Path: fmt.Sprintf("export/public-key/%s", keyID), + } + resp, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to export key: %s", err) + } + + if len(resp.Data["keys"].(map[string]string)) != 2 { + t.Fatalf("expected 2 key but got %v: %v", len(resp.Data["keys"].(map[string]string)), resp) + } + + // Import second private key first, with no options. + req = &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import_version", keyID), + Data: map[string]interface{}{ + "ciphertext": importBlob2, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to import private key: %s", err) + } + + // Import first private key second, with a version + req = &logical.Request{ + Storage: s, + Operation: logical.UpdateOperation, + Path: fmt.Sprintf("keys/%s/import_version", keyID), + Data: map[string]interface{}{ + "ciphertext": importBlob1, + "version": 1, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to import private key: %s", err) + } + + // We should still have two keys on export + req = &logical.Request{ + Storage: s, + Operation: logical.ReadOperation, + Path: fmt.Sprintf("export/public-key/%s", keyID), + } + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("failed to export key: %s", err) + } + + if len(resp.Data["keys"].(map[string]string)) != 2 { + t.Fatalf("expected 2 key but got %v: %v", len(resp.Data["keys"].(map[string]string)), resp) + } + }, + ) +} + +func wrapTargetKeyForImport(t *testing.T, wrappingKey *rsa.PublicKey, targetKey interface{}, targetKeyType string, hashFnName string) string { + t.Helper() + + // Format target key for wrapping + var preppedTargetKey []byte + var ok bool + var err error + switch targetKeyType { + case "aes128-gcm96", "aes256-gcm96", "chacha20-poly1305", "hmac": + preppedTargetKey, ok = targetKey.([]byte) + if !ok { + t.Fatal("failed to wrap target key for import: symmetric key not provided in byte format") + } + default: + preppedTargetKey, err = x509.MarshalPKCS8PrivateKey(targetKey) + if err != nil { + t.Fatalf("failed to wrap target key for import: %s", err) + } + } + + return wrapTargetPKCS8ForImport(t, wrappingKey, preppedTargetKey, hashFnName) +} + +func wrapTargetPKCS8ForImport(t *testing.T, wrappingKey *rsa.PublicKey, preppedTargetKey []byte, hashFnName string) string { + t.Helper() + + // Generate an ephemeral AES-256 key + ephKey, err := uuid.GenerateRandomBytes(32) + if err != nil { + t.Fatalf("failed to wrap target key for import: %s", err) + } + + // Parse the hash function name into an actual function + hashFn, err := parseHashFn(hashFnName) + if err != nil { + t.Fatalf("failed to wrap target key for import: %s", err) + } + + // Wrap ephemeral AES key with public wrapping key + ephKeyWrapped, err := rsa.EncryptOAEP(hashFn, rand.Reader, wrappingKey, ephKey, []byte{}) + if err != nil { + t.Fatalf("failed to wrap target key for import: %s", err) + } + + // Create KWP instance for wrapping target key + kwp, err := subtle.NewKWP(ephKey) + if err != nil { + t.Fatalf("failed to wrap target key for import: %s", err) + } + + // Wrap target key with KWP + targetKeyWrapped, err := kwp.Wrap(preppedTargetKey) + if err != nil { + t.Fatalf("failed to wrap target key for import: %s", err) + } + + // Combined wrapped keys into a single blob and base64 encode + wrappedKeys := append(ephKeyWrapped, targetKeyWrapped...) + return base64.StdEncoding.EncodeToString(wrappedKeys) +} + +func generateKey(keyType string) (interface{}, error) { + switch keyType { + case "aes128-gcm96": + return uuid.GenerateRandomBytes(16) + case "aes256-gcm96", "hmac": + return uuid.GenerateRandomBytes(32) + case "chacha20-poly1305": + return uuid.GenerateRandomBytes(32) + case "ed25519": + _, priv, err := ed25519.GenerateKey(rand.Reader) + return priv, err + case "ecdsa-p256": + return ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + case "ecdsa-p384": + return ecdsa.GenerateKey(elliptic.P384(), rand.Reader) + case "ecdsa-p521": + return ecdsa.GenerateKey(elliptic.P521(), rand.Reader) + case "rsa-2048": + return rsa.GenerateKey(rand.Reader, 2048) + case "rsa-3072": + return rsa.GenerateKey(rand.Reader, 3072) + case "rsa-4096": + return rsa.GenerateKey(rand.Reader, 4096) + default: + return nil, fmt.Errorf("failed to generate unsupported key type: %s", keyType) + } +} + +func getPublicKey(privateKey crypto.PrivateKey, keyType string) ([]byte, error) { + var publicKey crypto.PublicKey + var publicKeyBytes []byte + switch keyType { + case "rsa-2048", "rsa-3072", "rsa-4096": + publicKey = privateKey.(*rsa.PrivateKey).Public() + case "ecdsa-p256", "ecdsa-p384", "ecdsa-p521": + publicKey = privateKey.(*ecdsa.PrivateKey).Public() + case "ed25519": + publicKey = privateKey.(ed25519.PrivateKey).Public() + default: + return publicKeyBytes, fmt.Errorf("failed to get public key from %s key", keyType) + } + + publicKeyBytes, err := publicKeyToBytes(publicKey) + if err != nil { + return publicKeyBytes, err + } + + return publicKeyBytes, nil +} + +func publicKeyToBytes(publicKey crypto.PublicKey) ([]byte, error) { + var publicKeyBytesPem []byte + publicKeyBytes, err := x509.MarshalPKIXPublicKey(publicKey) + if err != nil { + return publicKeyBytesPem, fmt.Errorf("failed to marshal public key: %s", err) + } + + pemBlock := &pem.Block{ + Type: "PUBLIC KEY", + Bytes: publicKeyBytes, + } + + return pem.EncodeToMemory(pemBlock), nil +} diff --git a/builtin/logical/transit/path_keys.go b/builtin/logical/transit/path_keys.go new file mode 100644 index 0000000..89334a6 --- /dev/null +++ b/builtin/logical/transit/path_keys.go @@ -0,0 +1,448 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit + +import ( + "context" + "crypto/elliptic" + "encoding/base64" + "fmt" + "strconv" + "time" + + "golang.org/x/crypto/ed25519" + + "github.com/fatih/structs" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/keysutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func (b *backend) pathListKeys() *framework.Path { + return &framework.Path{ + Pattern: "keys/?$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationSuffix: "keys", + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: b.pathKeysList, + }, + + HelpSynopsis: pathPolicyHelpSyn, + HelpDescription: pathPolicyHelpDesc, + } +} + +func (b *backend) pathKeys() *framework.Path { + return &framework.Path{ + Pattern: "keys/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationSuffix: "key", + }, + + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the key", + }, + + "type": { + Type: framework.TypeString, + Default: "aes256-gcm96", + Description: ` +The type of key to create. Currently, "aes128-gcm96" (symmetric), "aes256-gcm96" (symmetric), "ecdsa-p256" +(asymmetric), "ecdsa-p384" (asymmetric), "ecdsa-p521" (asymmetric), "ed25519" (asymmetric), "rsa-2048" (asymmetric), "rsa-3072" +(asymmetric), "rsa-4096" (asymmetric) are supported. Defaults to "aes256-gcm96". +`, + }, + + "derived": { + Type: framework.TypeBool, + Description: `Enables key derivation mode. This +allows for per-transaction unique +keys for encryption operations.`, + }, + + "convergent_encryption": { + Type: framework.TypeBool, + Description: `Whether to support convergent encryption. +This is only supported when using a key with +key derivation enabled and will require all +requests to carry both a context and 96-bit +(12-byte) nonce. The given nonce will be used +in place of a randomly generated nonce. As a +result, when the same context and nonce are +supplied, the same ciphertext is generated. It +is *very important* when using this mode that +you ensure that all nonces are unique for a +given context. Failing to do so will severely +impact the ciphertext's security.`, + }, + + "exportable": { + Type: framework.TypeBool, + Description: `Enables keys to be exportable. +This allows for all the valid keys +in the key ring to be exported.`, + }, + + "allow_plaintext_backup": { + Type: framework.TypeBool, + Description: `Enables taking a backup of the named +key in plaintext format. Once set, +this cannot be disabled.`, + }, + + "context": { + Type: framework.TypeString, + Description: `Base64 encoded context for key derivation. +When reading a key with key derivation enabled, +if the key type supports public keys, this will +return the public key for the given context.`, + }, + + "auto_rotate_period": { + Type: framework.TypeDurationSecond, + Default: 0, + Description: `Amount of time the key should live before +being automatically rotated. A value of 0 +(default) disables automatic rotation for the +key.`, + }, + "key_size": { + Type: framework.TypeInt, + Default: 0, + Description: fmt.Sprintf("The key size in bytes for the algorithm. Only applies to HMAC and must be no fewer than %d bytes and no more than %d", keysutil.HmacMinKeySize, keysutil.HmacMaxKeySize), + }, + "managed_key_name": { + Type: framework.TypeString, + Description: "The name of the managed key to use for this transit key", + }, + "managed_key_id": { + Type: framework.TypeString, + Description: "The UUID of the managed key to use for this transit key", + }, + }, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: b.pathPolicyWrite, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "create", + }, + }, + logical.DeleteOperation: &framework.PathOperation{ + Callback: b.pathPolicyDelete, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "delete", + }, + }, + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathPolicyRead, + DisplayAttrs: &framework.DisplayAttributes{ + OperationVerb: "read", + }, + }, + }, + + HelpSynopsis: pathPolicyHelpSyn, + HelpDescription: pathPolicyHelpDesc, + } +} + +func (b *backend) pathKeysList(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + entries, err := req.Storage.List(ctx, "policy/") + if err != nil { + return nil, err + } + + return logical.ListResponse(entries), nil +} + +func (b *backend) pathPolicyWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + derived := d.Get("derived").(bool) + convergent := d.Get("convergent_encryption").(bool) + keyType := d.Get("type").(string) + keySize := d.Get("key_size").(int) + exportable := d.Get("exportable").(bool) + allowPlaintextBackup := d.Get("allow_plaintext_backup").(bool) + autoRotatePeriod := time.Second * time.Duration(d.Get("auto_rotate_period").(int)) + managedKeyName := d.Get("managed_key_name").(string) + managedKeyId := d.Get("managed_key_id").(string) + + if autoRotatePeriod != 0 && autoRotatePeriod < time.Hour { + return logical.ErrorResponse("auto rotate period must be 0 to disable or at least an hour"), nil + } + + if !derived && convergent { + return logical.ErrorResponse("convergent encryption requires derivation to be enabled"), nil + } + + polReq := keysutil.PolicyRequest{ + Upsert: true, + Storage: req.Storage, + Name: name, + Derived: derived, + Convergent: convergent, + Exportable: exportable, + AllowPlaintextBackup: allowPlaintextBackup, + AutoRotatePeriod: autoRotatePeriod, + } + + switch keyType { + case "aes128-gcm96": + polReq.KeyType = keysutil.KeyType_AES128_GCM96 + case "aes256-gcm96": + polReq.KeyType = keysutil.KeyType_AES256_GCM96 + case "chacha20-poly1305": + polReq.KeyType = keysutil.KeyType_ChaCha20_Poly1305 + case "ecdsa-p256": + polReq.KeyType = keysutil.KeyType_ECDSA_P256 + case "ecdsa-p384": + polReq.KeyType = keysutil.KeyType_ECDSA_P384 + case "ecdsa-p521": + polReq.KeyType = keysutil.KeyType_ECDSA_P521 + case "ed25519": + polReq.KeyType = keysutil.KeyType_ED25519 + case "rsa-2048": + polReq.KeyType = keysutil.KeyType_RSA2048 + case "rsa-3072": + polReq.KeyType = keysutil.KeyType_RSA3072 + case "rsa-4096": + polReq.KeyType = keysutil.KeyType_RSA4096 + case "hmac": + polReq.KeyType = keysutil.KeyType_HMAC + case "managed_key": + polReq.KeyType = keysutil.KeyType_MANAGED_KEY + default: + return logical.ErrorResponse(fmt.Sprintf("unknown key type %v", keyType)), logical.ErrInvalidRequest + } + if keySize != 0 { + if polReq.KeyType != keysutil.KeyType_HMAC { + return logical.ErrorResponse(fmt.Sprintf("key_size is not valid for algorithm %v", polReq.KeyType)), logical.ErrInvalidRequest + } + if keySize < keysutil.HmacMinKeySize || keySize > keysutil.HmacMaxKeySize { + return logical.ErrorResponse(fmt.Sprintf("invalid key_size %d", keySize)), logical.ErrInvalidRequest + } + polReq.KeySize = keySize + } + + if polReq.KeyType == keysutil.KeyType_MANAGED_KEY { + keyId, err := GetManagedKeyUUID(ctx, b, managedKeyName, managedKeyId) + if err != nil { + return nil, err + } + + polReq.ManagedKeyUUID = keyId + } + + p, upserted, err := b.GetPolicy(ctx, polReq, b.GetRandomReader()) + if err != nil { + return nil, err + } + if p == nil { + return nil, fmt.Errorf("error generating key: returned policy was nil") + } + if b.System().CachingDisabled() { + p.Unlock() + } + + resp, err := b.formatKeyPolicy(p, nil) + if err != nil { + return nil, err + } + if !upserted { + resp.AddWarning(fmt.Sprintf("key %s already existed", name)) + } + return resp, nil +} + +// Built-in helper type for returning asymmetric keys +type asymKey struct { + Name string `json:"name" structs:"name" mapstructure:"name"` + PublicKey string `json:"public_key" structs:"public_key" mapstructure:"public_key"` + CreationTime time.Time `json:"creation_time" structs:"creation_time" mapstructure:"creation_time"` +} + +func (b *backend) pathPolicyRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + + p, _, err := b.GetPolicy(ctx, keysutil.PolicyRequest{ + Storage: req.Storage, + Name: name, + }, b.GetRandomReader()) + if err != nil { + return nil, err + } + if p == nil { + return nil, nil + } + if !b.System().CachingDisabled() { + p.Lock(false) + } + defer p.Unlock() + + contextRaw := d.Get("context").(string) + var context []byte + if len(contextRaw) != 0 { + context, err = base64.StdEncoding.DecodeString(contextRaw) + if err != nil { + return logical.ErrorResponse("failed to base64-decode context"), logical.ErrInvalidRequest + } + } + + return b.formatKeyPolicy(p, context) +} + +func (b *backend) formatKeyPolicy(p *keysutil.Policy, context []byte) (*logical.Response, error) { + // Return the response + resp := &logical.Response{ + Data: map[string]interface{}{ + "name": p.Name, + "type": p.Type.String(), + "derived": p.Derived, + "deletion_allowed": p.DeletionAllowed, + "min_available_version": p.MinAvailableVersion, + "min_decryption_version": p.MinDecryptionVersion, + "min_encryption_version": p.MinEncryptionVersion, + "latest_version": p.LatestVersion, + "exportable": p.Exportable, + "allow_plaintext_backup": p.AllowPlaintextBackup, + "supports_encryption": p.Type.EncryptionSupported(), + "supports_decryption": p.Type.DecryptionSupported(), + "supports_signing": p.Type.SigningSupported(), + "supports_derivation": p.Type.DerivationSupported(), + "auto_rotate_period": int64(p.AutoRotatePeriod.Seconds()), + "imported_key": p.Imported, + }, + } + if p.KeySize != 0 { + resp.Data["key_size"] = p.KeySize + } + + if p.Imported { + resp.Data["imported_key_allow_rotation"] = p.AllowImportedKeyRotation + } + + if p.BackupInfo != nil { + resp.Data["backup_info"] = map[string]interface{}{ + "time": p.BackupInfo.Time, + "version": p.BackupInfo.Version, + } + } + if p.RestoreInfo != nil { + resp.Data["restore_info"] = map[string]interface{}{ + "time": p.RestoreInfo.Time, + "version": p.RestoreInfo.Version, + } + } + + if p.Derived { + switch p.KDF { + case keysutil.Kdf_hmac_sha256_counter: + resp.Data["kdf"] = "hmac-sha256-counter" + resp.Data["kdf_mode"] = "hmac-sha256-counter" + case keysutil.Kdf_hkdf_sha256: + resp.Data["kdf"] = "hkdf_sha256" + } + resp.Data["convergent_encryption"] = p.ConvergentEncryption + if p.ConvergentEncryption { + resp.Data["convergent_encryption_version"] = p.ConvergentVersion + } + } + + switch p.Type { + case keysutil.KeyType_AES128_GCM96, keysutil.KeyType_AES256_GCM96, keysutil.KeyType_ChaCha20_Poly1305: + retKeys := map[string]int64{} + for k, v := range p.Keys { + retKeys[k] = v.DeprecatedCreationTime + } + resp.Data["keys"] = retKeys + + case keysutil.KeyType_ECDSA_P256, keysutil.KeyType_ECDSA_P384, keysutil.KeyType_ECDSA_P521, keysutil.KeyType_ED25519, keysutil.KeyType_RSA2048, keysutil.KeyType_RSA3072, keysutil.KeyType_RSA4096: + retKeys := map[string]map[string]interface{}{} + for k, v := range p.Keys { + key := asymKey{ + PublicKey: v.FormattedPublicKey, + CreationTime: v.CreationTime, + } + if key.CreationTime.IsZero() { + key.CreationTime = time.Unix(v.DeprecatedCreationTime, 0) + } + + switch p.Type { + case keysutil.KeyType_ECDSA_P256: + key.Name = elliptic.P256().Params().Name + case keysutil.KeyType_ECDSA_P384: + key.Name = elliptic.P384().Params().Name + case keysutil.KeyType_ECDSA_P521: + key.Name = elliptic.P521().Params().Name + case keysutil.KeyType_ED25519: + if p.Derived { + if len(context) == 0 { + key.PublicKey = "" + } else { + ver, err := strconv.Atoi(k) + if err != nil { + return nil, fmt.Errorf("invalid version %q: %w", k, err) + } + derived, err := p.GetKey(context, ver, 32) + if err != nil { + return nil, fmt.Errorf("failed to derive key to return public component: %w", err) + } + pubKey := ed25519.PrivateKey(derived).Public().(ed25519.PublicKey) + key.PublicKey = base64.StdEncoding.EncodeToString(pubKey) + } + } + key.Name = "ed25519" + case keysutil.KeyType_RSA2048, keysutil.KeyType_RSA3072, keysutil.KeyType_RSA4096: + key.Name = "rsa-2048" + if p.Type == keysutil.KeyType_RSA3072 { + key.Name = "rsa-3072" + } + + if p.Type == keysutil.KeyType_RSA4096 { + key.Name = "rsa-4096" + } + + pubKey, err := encodeRSAPublicKey(&v) + if err != nil { + return nil, err + } + key.PublicKey = pubKey + } + + retKeys[k] = structs.New(key).Map() + } + resp.Data["keys"] = retKeys + } + + return resp, nil +} + +func (b *backend) pathPolicyDelete(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + + // Delete does its own locking + err := b.lm.DeletePolicy(ctx, req.Storage, name) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("error deleting policy %s: %s", name, err)), err + } + + return nil, nil +} + +const pathPolicyHelpSyn = `Managed named encryption keys` + +const pathPolicyHelpDesc = ` +This path is used to manage the named keys that are available. +Doing a write with no value against a new named key will create +it using a randomly generated key. +` diff --git a/builtin/logical/transit/path_keys_config.go b/builtin/logical/transit/path_keys_config.go new file mode 100644 index 0000000..7b85161 --- /dev/null +++ b/builtin/logical/transit/path_keys_config.go @@ -0,0 +1,265 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit + +import ( + "context" + "fmt" + "time" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/keysutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func (b *backend) pathKeysConfig() *framework.Path { + return &framework.Path{ + Pattern: "keys/" + framework.GenericNameRegex("name") + "/config", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "configure", + OperationSuffix: "key", + }, + + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the key", + }, + + "min_decryption_version": { + Type: framework.TypeInt, + Description: `If set, the minimum version of the key allowed +to be decrypted. For signing keys, the minimum +version allowed to be used for verification.`, + }, + + "min_encryption_version": { + Type: framework.TypeInt, + Description: `If set, the minimum version of the key allowed +to be used for encryption; or for signing keys, +to be used for signing. If set to zero, only +the latest version of the key is allowed.`, + }, + + "deletion_allowed": { + Type: framework.TypeBool, + Description: "Whether to allow deletion of the key", + }, + + "exportable": { + Type: framework.TypeBool, + Description: `Enables export of the key. Once set, this cannot be disabled.`, + }, + + "allow_plaintext_backup": { + Type: framework.TypeBool, + Description: `Enables taking a backup of the named key in plaintext format. Once set, this cannot be disabled.`, + }, + + "auto_rotate_period": { + Type: framework.TypeDurationSecond, + Description: `Amount of time the key should live before +being automatically rotated. A value of 0 +disables automatic rotation for the key.`, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathKeysConfigWrite, + }, + + HelpSynopsis: pathKeysConfigHelpSyn, + HelpDescription: pathKeysConfigHelpDesc, + } +} + +func (b *backend) pathKeysConfigWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (resp *logical.Response, retErr error) { + name := d.Get("name").(string) + + // Check if the policy already exists before we lock everything + p, _, err := b.GetPolicy(ctx, keysutil.PolicyRequest{ + Storage: req.Storage, + Name: name, + }, b.GetRandomReader()) + if err != nil { + return nil, err + } + if p == nil { + return logical.ErrorResponse( + fmt.Sprintf("no existing key named %s could be found", name)), + logical.ErrInvalidRequest + } + if !b.System().CachingDisabled() { + p.Lock(true) + } + defer p.Unlock() + + var warning string + + originalMinDecryptionVersion := p.MinDecryptionVersion + originalMinEncryptionVersion := p.MinEncryptionVersion + originalDeletionAllowed := p.DeletionAllowed + originalExportable := p.Exportable + originalAllowPlaintextBackup := p.AllowPlaintextBackup + + defer func() { + if retErr != nil || (resp != nil && resp.IsError()) { + p.MinDecryptionVersion = originalMinDecryptionVersion + p.MinEncryptionVersion = originalMinEncryptionVersion + p.DeletionAllowed = originalDeletionAllowed + p.Exportable = originalExportable + p.AllowPlaintextBackup = originalAllowPlaintextBackup + } + }() + + persistNeeded := false + + minDecryptionVersionRaw, ok := d.GetOk("min_decryption_version") + if ok { + minDecryptionVersion := minDecryptionVersionRaw.(int) + + if minDecryptionVersion < 0 { + return logical.ErrorResponse("min decryption version cannot be negative"), nil + } + + if minDecryptionVersion == 0 { + minDecryptionVersion = 1 + warning = "since Vault 0.3, transit key numbering starts at 1; forcing minimum to 1" + } + + if minDecryptionVersion != p.MinDecryptionVersion { + if minDecryptionVersion > p.LatestVersion { + return logical.ErrorResponse( + fmt.Sprintf("cannot set min decryption version of %d, latest key version is %d", minDecryptionVersion, p.LatestVersion)), nil + } + p.MinDecryptionVersion = minDecryptionVersion + persistNeeded = true + } + } + + minEncryptionVersionRaw, ok := d.GetOk("min_encryption_version") + if ok { + minEncryptionVersion := minEncryptionVersionRaw.(int) + + if minEncryptionVersion < 0 { + return logical.ErrorResponse("min encryption version cannot be negative"), nil + } + + if minEncryptionVersion != p.MinEncryptionVersion { + if minEncryptionVersion > p.LatestVersion { + return logical.ErrorResponse( + fmt.Sprintf("cannot set min encryption version of %d, latest key version is %d", minEncryptionVersion, p.LatestVersion)), nil + } + p.MinEncryptionVersion = minEncryptionVersion + persistNeeded = true + } + } + + // Check here to get the final picture after the logic on each + // individually. MinDecryptionVersion will always be 1 or above. + if p.MinEncryptionVersion > 0 && + p.MinEncryptionVersion < p.MinDecryptionVersion { + return logical.ErrorResponse( + fmt.Sprintf("cannot set min encryption/decryption values; min encryption version of %d must be greater than or equal to min decryption version of %d", p.MinEncryptionVersion, p.MinDecryptionVersion)), nil + } + + allowDeletionInt, ok := d.GetOk("deletion_allowed") + if ok { + allowDeletion := allowDeletionInt.(bool) + if allowDeletion != p.DeletionAllowed { + p.DeletionAllowed = allowDeletion + persistNeeded = true + } + } + + // Add this as a guard here before persisting since we now require the min + // decryption version to start at 1; even if it's not explicitly set here, + // force the upgrade + if p.MinDecryptionVersion == 0 { + p.MinDecryptionVersion = 1 + persistNeeded = true + } + + exportableRaw, ok := d.GetOk("exportable") + if ok { + exportable := exportableRaw.(bool) + // Don't unset the already set value + if exportable && !p.Exportable { + p.Exportable = exportable + persistNeeded = true + } + } + + allowPlaintextBackupRaw, ok := d.GetOk("allow_plaintext_backup") + if ok { + allowPlaintextBackup := allowPlaintextBackupRaw.(bool) + // Don't unset the already set value + if allowPlaintextBackup && !p.AllowPlaintextBackup { + p.AllowPlaintextBackup = allowPlaintextBackup + persistNeeded = true + } + } + + autoRotatePeriodRaw, ok, err := d.GetOkErr("auto_rotate_period") + if err != nil { + return nil, err + } + if ok { + autoRotatePeriod := time.Second * time.Duration(autoRotatePeriodRaw.(int)) + // Provided value must be 0 to disable or at least an hour + if autoRotatePeriod != 0 && autoRotatePeriod < time.Hour { + return logical.ErrorResponse("auto rotate period must be 0 to disable or at least an hour"), nil + } + + if autoRotatePeriod != p.AutoRotatePeriod { + p.AutoRotatePeriod = autoRotatePeriod + persistNeeded = true + } + + if p.Type == keysutil.KeyType_MANAGED_KEY && autoRotatePeriod != 0 { + return logical.ErrorResponse("Auto rotation can not be set for managed keys"), nil + } + } + + if !persistNeeded { + resp, err := b.formatKeyPolicy(p, nil) + if err != nil { + return nil, err + } + if warning != "" { + resp.AddWarning(warning) + } + return resp, nil + } + + switch { + case p.MinAvailableVersion > p.MinEncryptionVersion: + return logical.ErrorResponse("min encryption version should not be less than min available version"), nil + case p.MinAvailableVersion > p.MinDecryptionVersion: + return logical.ErrorResponse("min decryption version should not be less then min available version"), nil + } + + if err := p.Persist(ctx, req.Storage); err != nil { + return nil, err + } + + resp, err = b.formatKeyPolicy(p, nil) + if err != nil { + return nil, err + } + if warning != "" { + resp.AddWarning(warning) + } + return resp, nil +} + +const pathKeysConfigHelpSyn = `Configure a named encryption key` + +const pathKeysConfigHelpDesc = ` +This path is used to configure the named key. Currently, this +supports adjusting the minimum version of the key allowed to +be used for decryption via the min_decryption_version parameter. +` diff --git a/builtin/logical/transit/path_keys_config_test.go b/builtin/logical/transit/path_keys_config_test.go new file mode 100644 index 0000000..335607c --- /dev/null +++ b/builtin/logical/transit/path_keys_config_test.go @@ -0,0 +1,408 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit + +import ( + "context" + "encoding/hex" + "encoding/json" + "fmt" + "strconv" + "strings" + "testing" + "time" + + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/api" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +func TestTransit_ConfigSettings(t *testing.T) { + b, storage := createBackendWithSysView(t) + + doReq := func(req *logical.Request) *logical.Response { + resp, err := b.HandleRequest(context.Background(), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("got err:\n%#v\nreq:\n%#v\n", err, *req) + } + return resp + } + doErrReq := func(req *logical.Request) { + resp, err := b.HandleRequest(context.Background(), req) + if err == nil { + if resp == nil || !resp.IsError() { + t.Fatalf("expected error; req:\n%#v\n", *req) + } + } + } + + // First create a key + req := &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "keys/aes256", + Data: map[string]interface{}{ + "derived": true, + }, + } + doReq(req) + + req.Path = "keys/aes128" + req.Data["type"] = "aes128-gcm96" + doReq(req) + + req.Path = "keys/ed" + req.Data["type"] = "ed25519" + doReq(req) + + delete(req.Data, "derived") + + req.Path = "keys/p256" + req.Data["type"] = "ecdsa-p256" + doReq(req) + + req.Path = "keys/p384" + req.Data["type"] = "ecdsa-p384" + doReq(req) + + req.Path = "keys/p521" + req.Data["type"] = "ecdsa-p521" + doReq(req) + + delete(req.Data, "type") + + req.Path = "keys/aes128/rotate" + doReq(req) + doReq(req) + doReq(req) + doReq(req) + + req.Path = "keys/aes256/rotate" + doReq(req) + doReq(req) + doReq(req) + doReq(req) + + req.Path = "keys/ed/rotate" + doReq(req) + doReq(req) + doReq(req) + doReq(req) + + req.Path = "keys/p256/rotate" + doReq(req) + doReq(req) + doReq(req) + doReq(req) + + req.Path = "keys/p384/rotate" + doReq(req) + doReq(req) + doReq(req) + doReq(req) + + req.Path = "keys/p521/rotate" + doReq(req) + doReq(req) + doReq(req) + doReq(req) + + req.Path = "keys/aes256/config" + // Too high + req.Data["min_decryption_version"] = 7 + doErrReq(req) + // Too low + req.Data["min_decryption_version"] = -1 + doErrReq(req) + + delete(req.Data, "min_decryption_version") + // Too high + req.Data["min_encryption_version"] = 7 + doErrReq(req) + // Too low + req.Data["min_encryption_version"] = 7 + doErrReq(req) + + // Not allowed, cannot decrypt + req.Data["min_decryption_version"] = 3 + req.Data["min_encryption_version"] = 2 + doErrReq(req) + + // Allowed + req.Data["min_decryption_version"] = 2 + req.Data["min_encryption_version"] = 3 + doReq(req) + req.Path = "keys/aes128/config" + doReq(req) + req.Path = "keys/ed/config" + doReq(req) + req.Path = "keys/p256/config" + doReq(req) + req.Path = "keys/p384/config" + doReq(req) + + req.Path = "keys/p521/config" + doReq(req) + + req.Data = map[string]interface{}{ + "plaintext": "abcd", + "input": "abcd", + "context": "abcd", + } + + maxKeyVersion := 5 + key := "aes256" + + testHMAC := func(ver int, valid bool) { + req.Path = "hmac/" + key + delete(req.Data, "hmac") + if ver == maxKeyVersion { + delete(req.Data, "key_version") + } else { + req.Data["key_version"] = ver + } + + if !valid { + doErrReq(req) + return + } + + resp := doReq(req) + ct := resp.Data["hmac"].(string) + if strings.Split(ct, ":")[1] != "v"+strconv.Itoa(ver) { + t.Fatal("wrong hmac version") + } + + req.Path = "verify/" + key + delete(req.Data, "key_version") + req.Data["hmac"] = resp.Data["hmac"] + doReq(req) + } + + testEncryptDecrypt := func(ver int, valid bool) { + req.Path = "encrypt/" + key + delete(req.Data, "ciphertext") + if ver == maxKeyVersion { + delete(req.Data, "key_version") + } else { + req.Data["key_version"] = ver + } + + if !valid { + doErrReq(req) + return + } + + resp := doReq(req) + ct := resp.Data["ciphertext"].(string) + if strings.Split(ct, ":")[1] != "v"+strconv.Itoa(ver) { + t.Fatal("wrong encryption version") + } + + req.Path = "decrypt/" + key + delete(req.Data, "key_version") + req.Data["ciphertext"] = resp.Data["ciphertext"] + doReq(req) + } + testEncryptDecrypt(5, true) + testEncryptDecrypt(4, true) + testEncryptDecrypt(3, true) + testEncryptDecrypt(2, false) + testHMAC(5, true) + testHMAC(4, true) + testHMAC(3, true) + testHMAC(2, false) + + key = "aes128" + testEncryptDecrypt(5, true) + testEncryptDecrypt(4, true) + testEncryptDecrypt(3, true) + testEncryptDecrypt(2, false) + testHMAC(5, true) + testHMAC(4, true) + testHMAC(3, true) + testHMAC(2, false) + + delete(req.Data, "plaintext") + req.Data["input"] = "abcd" + key = "ed" + testSignVerify := func(ver int, valid bool) { + req.Path = "sign/" + key + delete(req.Data, "signature") + if ver == maxKeyVersion { + delete(req.Data, "key_version") + } else { + req.Data["key_version"] = ver + } + + if !valid { + doErrReq(req) + return + } + + resp := doReq(req) + ct := resp.Data["signature"].(string) + if strings.Split(ct, ":")[1] != "v"+strconv.Itoa(ver) { + t.Fatal("wrong signature version") + } + + req.Path = "verify/" + key + delete(req.Data, "key_version") + req.Data["signature"] = resp.Data["signature"] + doReq(req) + } + testSignVerify(5, true) + testSignVerify(4, true) + testSignVerify(3, true) + testSignVerify(2, false) + testHMAC(5, true) + testHMAC(4, true) + testHMAC(3, true) + testHMAC(2, false) + + delete(req.Data, "context") + key = "p256" + testSignVerify(5, true) + testSignVerify(4, true) + testSignVerify(3, true) + testSignVerify(2, false) + testHMAC(5, true) + testHMAC(4, true) + testHMAC(3, true) + testHMAC(2, false) + + key = "p384" + testSignVerify(5, true) + testSignVerify(4, true) + testSignVerify(3, true) + testSignVerify(2, false) + testHMAC(5, true) + testHMAC(4, true) + testHMAC(3, true) + testHMAC(2, false) + + key = "p521" + testSignVerify(5, true) + testSignVerify(4, true) + testSignVerify(3, true) + testSignVerify(2, false) + testHMAC(5, true) + testHMAC(4, true) + testHMAC(3, true) + testHMAC(2, false) +} + +func TestTransit_UpdateKeyConfigWithAutorotation(t *testing.T) { + tests := map[string]struct { + initialAutoRotatePeriod interface{} + newAutoRotatePeriod interface{} + shouldError bool + expectedValue time.Duration + }{ + "default (no value)": { + initialAutoRotatePeriod: "5h", + shouldError: false, + expectedValue: 5 * time.Hour, + }, + "0 (int)": { + initialAutoRotatePeriod: "5h", + newAutoRotatePeriod: 0, + shouldError: false, + expectedValue: 0, + }, + "0 (string)": { + initialAutoRotatePeriod: "5h", + newAutoRotatePeriod: 0, + shouldError: false, + expectedValue: 0, + }, + "5 seconds": { + newAutoRotatePeriod: "5s", + shouldError: true, + }, + "5 hours": { + newAutoRotatePeriod: "5h", + shouldError: false, + expectedValue: 5 * time.Hour, + }, + "negative value": { + newAutoRotatePeriod: "-1800s", + shouldError: true, + }, + "invalid string": { + newAutoRotatePeriod: "this shouldn't work", + shouldError: true, + }, + } + + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "transit": Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + cores := cluster.Cores + vault.TestWaitActive(t, cores[0].Core) + client := cores[0].Client + err := client.Sys().Mount("transit", &api.MountInput{ + Type: "transit", + }) + if err != nil { + t.Fatal(err) + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + keyNameBytes, err := uuid.GenerateRandomBytes(16) + if err != nil { + t.Fatal(err) + } + keyName := hex.EncodeToString(keyNameBytes) + + _, err = client.Logical().Write(fmt.Sprintf("transit/keys/%s", keyName), map[string]interface{}{ + "auto_rotate_period": test.initialAutoRotatePeriod, + }) + if err != nil { + t.Fatal(err) + } + resp, err := client.Logical().Write(fmt.Sprintf("transit/keys/%s/config", keyName), map[string]interface{}{ + "auto_rotate_period": test.newAutoRotatePeriod, + }) + switch { + case test.shouldError && err == nil: + t.Fatal("expected non-nil error") + case !test.shouldError && err != nil: + t.Fatal(err) + } + + if !test.shouldError { + resp, err = client.Logical().Read(fmt.Sprintf("transit/keys/%s", keyName)) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + gotRaw, ok := resp.Data["auto_rotate_period"].(json.Number) + if !ok { + t.Fatal("returned value is of unexpected type") + } + got, err := gotRaw.Int64() + if err != nil { + t.Fatal(err) + } + want := int64(test.expectedValue.Seconds()) + if got != want { + t.Fatalf("incorrect auto_rotate_period returned, got: %d, want: %d", got, want) + } + } + }) + } +} diff --git a/builtin/logical/transit/path_keys_test.go b/builtin/logical/transit/path_keys_test.go new file mode 100644 index 0000000..4b33039 --- /dev/null +++ b/builtin/logical/transit/path_keys_test.go @@ -0,0 +1,198 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit_test + +import ( + "encoding/hex" + "encoding/json" + "fmt" + "testing" + "time" + + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/audit" + "github.com/hashicorp/vault/builtin/audit/file" + "github.com/hashicorp/vault/builtin/logical/transit" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +func TestTransit_Issue_2958(t *testing.T) { + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "transit": transit.Factory, + }, + AuditBackends: map[string]audit.Factory{ + "file": file.Factory, + }, + } + + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + cores := cluster.Cores + + vault.TestWaitActive(t, cores[0].Core) + + client := cores[0].Client + + err := client.Sys().EnableAuditWithOptions("file", &api.EnableAuditOptions{ + Type: "file", + Options: map[string]string{ + "file_path": "/dev/null", + }, + }) + if err != nil { + t.Fatal(err) + } + + err = client.Sys().Mount("transit", &api.MountInput{ + Type: "transit", + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().Write("transit/keys/foo", map[string]interface{}{ + "type": "ecdsa-p256", + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().Write("transit/keys/foobar", map[string]interface{}{ + "type": "ecdsa-p384", + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().Write("transit/keys/bar", map[string]interface{}{ + "type": "ed25519", + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().Read("transit/keys/foo") + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().Read("transit/keys/foobar") + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().Read("transit/keys/bar") + if err != nil { + t.Fatal(err) + } +} + +func TestTransit_CreateKeyWithAutorotation(t *testing.T) { + tests := map[string]struct { + autoRotatePeriod interface{} + shouldError bool + expectedValue time.Duration + }{ + "default (no value)": { + shouldError: false, + }, + "0 (int)": { + autoRotatePeriod: 0, + shouldError: false, + expectedValue: 0, + }, + "0 (string)": { + autoRotatePeriod: "0", + shouldError: false, + expectedValue: 0, + }, + "5 seconds": { + autoRotatePeriod: "5s", + shouldError: true, + }, + "5 hours": { + autoRotatePeriod: "5h", + shouldError: false, + expectedValue: 5 * time.Hour, + }, + "negative value": { + autoRotatePeriod: "-1800s", + shouldError: true, + }, + "invalid string": { + autoRotatePeriod: "this shouldn't work", + shouldError: true, + }, + } + + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "transit": transit.Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + cores := cluster.Cores + vault.TestWaitActive(t, cores[0].Core) + client := cores[0].Client + err := client.Sys().Mount("transit", &api.MountInput{ + Type: "transit", + }) + if err != nil { + t.Fatal(err) + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + keyNameBytes, err := uuid.GenerateRandomBytes(16) + if err != nil { + t.Fatal(err) + } + keyName := hex.EncodeToString(keyNameBytes) + + _, err = client.Logical().Write(fmt.Sprintf("transit/keys/%s", keyName), map[string]interface{}{ + "auto_rotate_period": test.autoRotatePeriod, + }) + switch { + case test.shouldError && err == nil: + t.Fatal("expected non-nil error") + case !test.shouldError && err != nil: + t.Fatal(err) + } + + if !test.shouldError { + resp, err := client.Logical().Read(fmt.Sprintf("transit/keys/%s", keyName)) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + gotRaw, ok := resp.Data["auto_rotate_period"].(json.Number) + if !ok { + t.Fatal("returned value is of unexpected type") + } + got, err := gotRaw.Int64() + if err != nil { + t.Fatal(err) + } + want := int64(test.expectedValue.Seconds()) + if got != want { + t.Fatalf("incorrect auto_rotate_period returned, got: %d, want: %d", got, want) + } + } + }) + } +} diff --git a/builtin/logical/transit/path_random.go b/builtin/logical/transit/path_random.go new file mode 100644 index 0000000..3fc5abe --- /dev/null +++ b/builtin/logical/transit/path_random.go @@ -0,0 +1,66 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit + +import ( + "context" + + "github.com/hashicorp/vault/helper/random" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func (b *backend) pathRandom() *framework.Path { + return &framework.Path{ + Pattern: "random(/" + framework.GenericNameRegex("source") + ")?" + framework.OptionalParamRegex("urlbytes"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "generate", + OperationSuffix: "random|random-with-source|random-with-bytes|random-with-source-and-bytes", + }, + + Fields: map[string]*framework.FieldSchema{ + "urlbytes": { + Type: framework.TypeString, + Description: "The number of bytes to generate (POST URL parameter)", + }, + + "bytes": { + Type: framework.TypeInt, + Default: 32, + Description: "The number of bytes to generate (POST body parameter). Defaults to 32 (256 bits).", + }, + + "format": { + Type: framework.TypeString, + Default: "base64", + Description: `Encoding format to use. Can be "hex" or "base64". Defaults to "base64".`, + }, + + "source": { + Type: framework.TypeString, + Default: "platform", + Description: `Which system to source random data from, ether "platform", "seal", or "all".`, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathRandomWrite, + }, + + HelpSynopsis: pathRandomHelpSyn, + HelpDescription: pathRandomHelpDesc, + } +} + +func (b *backend) pathRandomWrite(_ context.Context, _ *logical.Request, d *framework.FieldData) (*logical.Response, error) { + return random.HandleRandomAPI(d, b.GetRandomReader()) +} + +const pathRandomHelpSyn = `Generate random bytes` + +const pathRandomHelpDesc = ` +This function can be used to generate high-entropy random bytes. +` diff --git a/builtin/logical/transit/path_random_test.go b/builtin/logical/transit/path_random_test.go new file mode 100644 index 0000000..35782ec --- /dev/null +++ b/builtin/logical/transit/path_random_test.go @@ -0,0 +1,127 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit + +import ( + "context" + "encoding/base64" + "encoding/hex" + "fmt" + "reflect" + "testing" + + "github.com/hashicorp/vault/helper/random" + "github.com/hashicorp/vault/sdk/logical" +) + +func TestTransit_Random(t *testing.T) { + var b *backend + sysView := logical.TestSystemView() + storage := &logical.InmemStorage{} + sysView.CachingDisabledVal = true + + b, _ = Backend(context.Background(), &logical.BackendConfig{ + StorageView: storage, + System: sysView, + }) + + req := &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "random", + Data: map[string]interface{}{}, + } + + doRequest := func(req *logical.Request, errExpected bool, format string, numBytes int) { + getResponse := func() []byte { + resp, err := b.HandleRequest(context.Background(), req) + if err != nil && !errExpected { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if errExpected { + if !resp.IsError() { + t.Fatalf("bad: got error response: %#v", *resp) + } + return nil + } + if resp.IsError() { + t.Fatalf("bad: got error response: %#v", *resp) + } + if _, ok := resp.Data["random_bytes"]; !ok { + t.Fatal("no random_bytes found in response") + } + + outputStr := resp.Data["random_bytes"].(string) + var outputBytes []byte + switch format { + case "base64": + outputBytes, err = base64.StdEncoding.DecodeString(outputStr) + case "hex": + outputBytes, err = hex.DecodeString(outputStr) + default: + t.Fatal("unknown format") + } + if err != nil { + t.Fatal(err) + } + + return outputBytes + } + + rand1 := getResponse() + // Expected error + if rand1 == nil { + return + } + rand2 := getResponse() + if len(rand1) != numBytes || len(rand2) != numBytes { + t.Fatal("length of output random bytes not what is expected") + } + if reflect.DeepEqual(rand1, rand2) { + t.Fatal("found identical ouputs") + } + } + + for _, source := range []string{"", "platform", "seal", "all"} { + req.Data["source"] = source + req.Data["bytes"] = 32 + req.Data["format"] = "base64" + req.Path = "random" + // Test defaults + doRequest(req, false, "base64", 32) + + // Test size selection in the path + req.Path = "random/24" + req.Data["format"] = "hex" + doRequest(req, false, "hex", 24) + + if source != "" { + // Test source selection in the path + req.Path = fmt.Sprintf("random/%s", source) + req.Data["format"] = "hex" + doRequest(req, false, "hex", 32) + + req.Path = fmt.Sprintf("random/%s/24", source) + req.Data["format"] = "hex" + doRequest(req, false, "hex", 24) + } + + // Test bad input/format + req.Path = "random" + req.Data["format"] = "base92" + doRequest(req, true, "", 0) + + req.Data["format"] = "hex" + req.Data["bytes"] = -1 + doRequest(req, true, "", 0) + + req.Data["format"] = "hex" + req.Data["bytes"] = random.APIMaxBytes + 1 + + doRequest(req, true, "", 0) + } +} diff --git a/builtin/logical/transit/path_restore.go b/builtin/logical/transit/path_restore.go new file mode 100644 index 0000000..4df9d69 --- /dev/null +++ b/builtin/logical/transit/path_restore.go @@ -0,0 +1,72 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit + +import ( + "context" + "errors" + "strings" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +func (b *backend) pathRestore() *framework.Path { + return &framework.Path{ + Pattern: "restore" + framework.OptionalParamRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "restore", + OperationSuffix: "key|and-rename-key", + }, + + Fields: map[string]*framework.FieldSchema{ + "backup": { + Type: framework.TypeString, + Description: "Backed up key data to be restored. This should be the output from the 'backup/' endpoint.", + }, + "name": { + Type: framework.TypeString, + Description: "If set, this will be the name of the restored key.", + }, + "force": { + Type: framework.TypeBool, + Description: "If set and a key by the given name exists, force the restore operation and override the key.", + Default: false, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathRestoreUpdate, + }, + + HelpSynopsis: pathRestoreHelpSyn, + HelpDescription: pathRestoreHelpDesc, + } +} + +func (b *backend) pathRestoreUpdate(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + backupB64 := d.Get("backup").(string) + force := d.Get("force").(bool) + if backupB64 == "" { + return logical.ErrorResponse("'backup' must be supplied"), nil + } + + // If a name is given, make sure it does not contain any slashes. The Transit + // secret engine does not allow sub-paths in key names + keyName := d.Get("name").(string) + if strings.Contains(keyName, "/") { + return nil, ErrInvalidKeyName + } + + return nil, b.lm.RestorePolicy(ctx, req.Storage, keyName, backupB64, force) +} + +const ( + pathRestoreHelpSyn = `Restore the named key` + pathRestoreHelpDesc = `This path is used to restore the named key.` +) + +var ErrInvalidKeyName = errors.New("key names cannot be paths") diff --git a/builtin/logical/transit/path_restore_test.go b/builtin/logical/transit/path_restore_test.go new file mode 100644 index 0000000..3dcc552 --- /dev/null +++ b/builtin/logical/transit/path_restore_test.go @@ -0,0 +1,263 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit + +import ( + "context" + "fmt" + "testing" + + "github.com/hashicorp/vault/helper/testhelpers" + "github.com/hashicorp/vault/sdk/logical" +) + +func TestTransit_Restore(t *testing.T) { + // Test setup: + // - Create a key + // - Configure it to be exportable, allowing deletion, and backups + // - Capture backup + // - Delete key + // - Run test cases + // + // Each test case should start with no key present. If the 'Seed' parameter is + // in the struct, we'll start by restoring it (without force) to run that test + // as if the key already existed + + keyType := "aes256-gcm96" + b, s := createBackendWithStorage(t) + keyName := testhelpers.RandomWithPrefix("my-key") + + // Create a key + keyReq := &logical.Request{ + Path: "keys/" + keyName, + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "type": keyType, + "exportable": true, + }, + } + resp, err := b.HandleRequest(context.Background(), keyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + + // Configure the key to allow its deletion and backup + configReq := &logical.Request{ + Path: fmt.Sprintf("keys/%s/config", keyName), + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "deletion_allowed": true, + "allow_plaintext_backup": true, + }, + } + resp, err = b.HandleRequest(context.Background(), configReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + + // Take a backup of the key + backupReq := &logical.Request{ + Path: "backup/" + keyName, + Operation: logical.ReadOperation, + Storage: s, + } + resp, err = b.HandleRequest(context.Background(), backupReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + + backupKey := resp.Data["backup"].(string) + if backupKey == "" { + t.Fatal("failed to get a backup") + } + + // Delete the key to start test cases with clean slate + keyReq.Operation = logical.DeleteOperation + resp, err = b.HandleRequest(context.Background(), keyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + + // helper func to get a pointer value for a boolean + boolPtr := func(b bool) *bool { + return &b + } + + keyExitsError := fmt.Errorf("key %q already exists", keyName) + + testCases := []struct { + Name string + // Seed dermines if we start the test by restoring the initial backup we + // took, to test a restore operation based on the key existing or not + Seed bool + // Force is a pointer to differentiate between default false and given false + Force *bool + // The error we expect, if any + ExpectedErr error + + // RestoreName is used to restore the key to a differnt name + RestoreName string + }{ + { + // key does not already exist + Name: "Default restore", + }, + { + // key already exists + Name: "Restore-without-force", + Seed: true, + ExpectedErr: keyExitsError, + }, + { + // key already exists, use force to force a restore + Name: "Restore-with-force", + Seed: true, + Force: boolPtr(true), + }, + { + // using force shouldn't matter if the key doesn't exist + Name: "Restore-with-force-no-seed", + Force: boolPtr(true), + }, + { + // key already exists, restore to new name + Name: "Restore-new-name", + Seed: true, + RestoreName: "new-key", + }, + { + // key already exists, restore to bad path, should error + Name: "Restore-new-name-bad-path", + Seed: true, + RestoreName: "sub/path/new-key", + ExpectedErr: ErrInvalidKeyName, + }, + { + // using force shouldn't matter if the restore key name is different + Name: "Restore-with-force-seed-new-name", + Seed: true, + Force: boolPtr(true), + RestoreName: "other-key", + }, + { + // using force shouldn't matter if the restore key name is different + Name: "Restore-with-out-force-seed-new-name", + Seed: true, + Force: boolPtr(false), + RestoreName: "other-key", + }, + { + // using force shouldn't matter if the key doesn't exist + Name: "Restore-force-false", + Force: boolPtr(false), + }, + { + // using false force should still error + Name: "Restore-force-false", + Seed: true, + Force: boolPtr(false), + ExpectedErr: keyExitsError, + }, + } + + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + var resp *logical.Response + var err error + if tc.Seed { + // restore our key to test a pre-existing key + seedRestoreReq := &logical.Request{ + Path: "restore", + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "backup": backupKey, + }, + } + + resp, err := b.HandleRequest(context.Background(), seedRestoreReq) + if resp != nil && resp.IsError() { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + if err != nil && tc.ExpectedErr == nil { + t.Fatalf("did not expect an error in SeedKey restore: %s", err) + } + } + + restorePath := "restore" + if tc.RestoreName != "" { + restorePath = fmt.Sprintf("%s/%s", restorePath, tc.RestoreName) + } + + restoreReq := &logical.Request{ + Path: restorePath, + Operation: logical.UpdateOperation, + Storage: s, + Data: map[string]interface{}{ + "backup": backupKey, + }, + } + + if tc.Force != nil { + restoreReq.Data["force"] = *tc.Force + } + + resp, err = b.HandleRequest(context.Background(), restoreReq) + if resp != nil && resp.IsError() { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + if err == nil && tc.ExpectedErr != nil { + t.Fatalf("expected an error, but got none") + } + if err != nil && tc.ExpectedErr == nil { + t.Fatalf("unexpected error:%s", err) + } + + if err != nil && tc.ExpectedErr != nil { + if err.Error() != tc.ExpectedErr.Error() { + t.Fatalf("expected error: (%s), got: (%s)", tc.ExpectedErr.Error(), err.Error()) + } + } + + readKeyName := keyName + if tc.RestoreName != "" { + readKeyName = tc.RestoreName + } + + // read the key and make sure it's there + readReq := &logical.Request{ + Path: "keys/" + readKeyName, + Operation: logical.ReadOperation, + Storage: s, + } + + resp, _ = b.HandleRequest(context.Background(), readReq) + if resp != nil && resp.IsError() { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + + if tc.ExpectedErr == nil && resp == nil { + t.Fatal("expected to find a key, but got none") + } + + // cleanup / delete key after each run + keyReq.Operation = logical.DeleteOperation + resp, err = b.HandleRequest(context.Background(), keyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + + // cleanup / delete restore key after each run, if it was created + if tc.RestoreName != "" && tc.ExpectedErr == nil { + readReq.Operation = logical.DeleteOperation + resp, err = b.HandleRequest(context.Background(), readReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("resp: %#v\nerr: %v", resp, err) + } + } + }) + } +} diff --git a/builtin/logical/transit/path_rewrap.go b/builtin/logical/transit/path_rewrap.go new file mode 100644 index 0000000..ced28d3 --- /dev/null +++ b/builtin/logical/transit/path_rewrap.go @@ -0,0 +1,243 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + + "github.com/hashicorp/vault/helper/constants" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/helper/keysutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/mitchellh/mapstructure" +) + +var ErrNonceNotAllowed = errors.New("provided nonce not allowed for this key") + +func (b *backend) pathRewrap() *framework.Path { + return &framework.Path{ + Pattern: "rewrap/" + framework.GenericNameRegex("name"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "rewrap", + }, + + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the key", + }, + + "ciphertext": { + Type: framework.TypeString, + Description: "Ciphertext value to rewrap", + }, + + "context": { + Type: framework.TypeString, + Description: "Base64 encoded context for key derivation. Required for derived keys.", + }, + + "nonce": { + Type: framework.TypeString, + Description: "Nonce for when convergent encryption is used", + }, + + "key_version": { + Type: framework.TypeInt, + Description: `The version of the key to use for encryption. +Must be 0 (for latest) or a value greater than or equal +to the min_encryption_version configured on the key.`, + }, + + "batch_input": { + Type: framework.TypeSlice, + Description: ` +Specifies a list of items to be re-encrypted in a single batch. When this parameter is set, +if the parameters 'ciphertext', 'context' and 'nonce' are also set, they will be ignored. +Any batch output will preserve the order of the batch input.`, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathRewrapWrite, + }, + + HelpSynopsis: pathRewrapHelpSyn, + HelpDescription: pathRewrapHelpDesc, + } +} + +func (b *backend) pathRewrapWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + batchInputRaw := d.Raw["batch_input"] + var batchInputItems []BatchRequestItem + var err error + if batchInputRaw != nil { + err = mapstructure.Decode(batchInputRaw, &batchInputItems) + if err != nil { + return nil, fmt.Errorf("failed to parse batch input: %w", err) + } + + if len(batchInputItems) == 0 { + return logical.ErrorResponse("missing batch input to process"), logical.ErrInvalidRequest + } + } else { + ciphertext := d.Get("ciphertext").(string) + if len(ciphertext) == 0 { + return logical.ErrorResponse("missing ciphertext to decrypt"), logical.ErrInvalidRequest + } + + batchInputItems = make([]BatchRequestItem, 1) + batchInputItems[0] = BatchRequestItem{ + Ciphertext: ciphertext, + Context: d.Get("context").(string), + Nonce: d.Get("nonce").(string), + KeyVersion: d.Get("key_version").(int), + } + } + + batchResponseItems := make([]EncryptBatchResponseItem, len(batchInputItems)) + contextSet := len(batchInputItems[0].Context) != 0 + + for i, item := range batchInputItems { + if (len(item.Context) == 0 && contextSet) || (len(item.Context) != 0 && !contextSet) { + return logical.ErrorResponse("context should be set either in all the request blocks or in none"), logical.ErrInvalidRequest + } + + if item.Ciphertext == "" { + batchResponseItems[i].Error = "missing ciphertext to decrypt" + continue + } + + // Decode the context + if len(item.Context) != 0 { + batchInputItems[i].DecodedContext, err = base64.StdEncoding.DecodeString(item.Context) + if err != nil { + batchResponseItems[i].Error = err.Error() + continue + } + } + + // Decode the nonce + if len(item.Nonce) != 0 { + batchInputItems[i].DecodedNonce, err = base64.StdEncoding.DecodeString(item.Nonce) + if err != nil { + batchResponseItems[i].Error = err.Error() + continue + } + } + } + + // Get the policy + p, _, err := b.GetPolicy(ctx, keysutil.PolicyRequest{ + Storage: req.Storage, + Name: d.Get("name").(string), + }, b.GetRandomReader()) + if err != nil { + return nil, err + } + if p == nil { + return logical.ErrorResponse("encryption key not found"), logical.ErrInvalidRequest + } + if !b.System().CachingDisabled() { + p.Lock(false) + } + + warnAboutNonceUsage := false + for i, item := range batchInputItems { + if batchResponseItems[i].Error != "" { + continue + } + + if item.Nonce != "" && !nonceAllowed(p) { + batchResponseItems[i].Error = ErrNonceNotAllowed.Error() + continue + } + + plaintext, err := p.Decrypt(item.DecodedContext, item.DecodedNonce, item.Ciphertext) + if err != nil { + switch err.(type) { + case errutil.UserError: + batchResponseItems[i].Error = err.Error() + continue + default: + p.Unlock() + return nil, err + } + } + + if !warnAboutNonceUsage && shouldWarnAboutNonceUsage(p, item.DecodedNonce) { + warnAboutNonceUsage = true + } + + ciphertext, err := p.Encrypt(item.KeyVersion, item.DecodedContext, item.DecodedNonce, plaintext) + if err != nil { + switch err.(type) { + case errutil.UserError: + batchResponseItems[i].Error = err.Error() + continue + case errutil.InternalError: + p.Unlock() + return nil, err + default: + p.Unlock() + return nil, err + } + } + + if ciphertext == "" { + p.Unlock() + return nil, fmt.Errorf("empty ciphertext returned for input item %d", i) + } + + keyVersion := item.KeyVersion + if keyVersion == 0 { + keyVersion = p.LatestVersion + } + + batchResponseItems[i].Ciphertext = ciphertext + batchResponseItems[i].KeyVersion = keyVersion + } + + resp := &logical.Response{} + if batchInputRaw != nil { + // Copy the references + for i := range batchInputItems { + batchResponseItems[i].Reference = batchInputItems[i].Reference + } + resp.Data = map[string]interface{}{ + "batch_results": batchResponseItems, + } + } else { + if batchResponseItems[0].Error != "" { + p.Unlock() + return logical.ErrorResponse(batchResponseItems[0].Error), logical.ErrInvalidRequest + } + resp.Data = map[string]interface{}{ + "ciphertext": batchResponseItems[0].Ciphertext, + "key_version": batchResponseItems[0].KeyVersion, + } + } + + if constants.IsFIPS() && warnAboutNonceUsage { + resp.AddWarning("A provided nonce value was used within FIPS mode, this violates FIPS 140 compliance.") + } + + p.Unlock() + return resp, nil +} + +const pathRewrapHelpSyn = `Rewrap ciphertext` + +const pathRewrapHelpDesc = ` +After key rotation, this function can be used to rewrap the given ciphertext or +a batch of given ciphertext blocks with the latest version of the named key. +If the given ciphertext is already using the latest version of the key, this +function is a no-op. +` diff --git a/builtin/logical/transit/path_rewrap_test.go b/builtin/logical/transit/path_rewrap_test.go new file mode 100644 index 0000000..097626c --- /dev/null +++ b/builtin/logical/transit/path_rewrap_test.go @@ -0,0 +1,328 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit + +import ( + "context" + "strings" + "testing" + + "github.com/hashicorp/vault/sdk/logical" +) + +// Check the normal flow of rewrap +func TestTransit_BatchRewrapCase1(t *testing.T) { + var resp *logical.Response + var err error + b, s := createBackendWithStorage(t) + + // Upsert the key and encrypt the data + plaintext := "dGhlIHF1aWNrIGJyb3duIGZveA==" + + encData := map[string]interface{}{ + "plaintext": plaintext, + } + + // Create a key and encrypt a plaintext + encReq := &logical.Request{ + Operation: logical.CreateOperation, + Path: "encrypt/upserted_key", + Storage: s, + Data: encData, + } + resp, err = b.HandleRequest(context.Background(), encReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + // Cache the ciphertext + ciphertext := resp.Data["ciphertext"] + if !strings.HasPrefix(ciphertext.(string), "vault:v1") { + t.Fatalf("bad: ciphertext version: expected: 'vault:v1', actual: %s", ciphertext) + } + + keyVersion := resp.Data["key_version"].(int) + if keyVersion != 1 { + t.Fatalf("unexpected key version; got: %d, expected: %d", keyVersion, 1) + } + + rewrapData := map[string]interface{}{ + "ciphertext": ciphertext, + } + + // Read the policy and check if the latest version is 1 + policyReq := &logical.Request{ + Operation: logical.ReadOperation, + Path: "keys/upserted_key", + Storage: s, + } + + resp, err = b.HandleRequest(context.Background(), policyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + if resp.Data["latest_version"] != 1 { + t.Fatalf("bad: latest_version: expected: 1, actual: %d", resp.Data["latest_version"]) + } + + rotateReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "keys/upserted_key/rotate", + Storage: s, + } + resp, err = b.HandleRequest(context.Background(), rotateReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + // Read the policy again and the latest version is 2 + resp, err = b.HandleRequest(context.Background(), policyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + if resp.Data["latest_version"] != 2 { + t.Fatalf("bad: latest_version: expected: 2, actual: %d", resp.Data["latest_version"]) + } + + // Rewrap the ciphertext and check that they are different + rewrapReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "rewrap/upserted_key", + Storage: s, + Data: rewrapData, + } + + resp, err = b.HandleRequest(context.Background(), rewrapReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + if ciphertext.(string) == resp.Data["ciphertext"].(string) { + t.Fatalf("bad: ciphertexts are same before and after rewrap") + } + + if !strings.HasPrefix(resp.Data["ciphertext"].(string), "vault:v2") { + t.Fatalf("bad: ciphertext version: expected: 'vault:v2', actual: %s", resp.Data["ciphertext"].(string)) + } + + keyVersion = resp.Data["key_version"].(int) + if keyVersion != 2 { + t.Fatalf("unexpected key version; got: %d, expected: %d", keyVersion, 2) + } +} + +// Check the normal flow of rewrap with upserted key +func TestTransit_BatchRewrapCase2(t *testing.T) { + var resp *logical.Response + var err error + b, s := createBackendWithStorage(t) + + // Upsert the key and encrypt the data + plaintext := "dGhlIHF1aWNrIGJyb3duIGZveA==" + + encData := map[string]interface{}{ + "plaintext": plaintext, + "context": "dmlzaGFsCg==", + } + + // Create a key and encrypt a plaintext + encReq := &logical.Request{ + Operation: logical.CreateOperation, + Path: "encrypt/upserted_key", + Storage: s, + Data: encData, + } + resp, err = b.HandleRequest(context.Background(), encReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + // Cache the ciphertext + ciphertext := resp.Data["ciphertext"] + if !strings.HasPrefix(ciphertext.(string), "vault:v1") { + t.Fatalf("bad: ciphertext version: expected: 'vault:v1', actual: %s", ciphertext) + } + + keyVersion := resp.Data["key_version"].(int) + if keyVersion != 1 { + t.Fatalf("unexpected key version; got: %d, expected: %d", keyVersion, 1) + } + + rewrapData := map[string]interface{}{ + "ciphertext": ciphertext, + "context": "dmlzaGFsCg==", + } + + // Read the policy and check if the latest version is 1 + policyReq := &logical.Request{ + Operation: logical.ReadOperation, + Path: "keys/upserted_key", + Storage: s, + } + + resp, err = b.HandleRequest(context.Background(), policyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + if resp.Data["latest_version"] != 1 { + t.Fatalf("bad: latest_version: expected: 1, actual: %d", resp.Data["latest_version"]) + } + + rotateReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "keys/upserted_key/rotate", + Storage: s, + } + resp, err = b.HandleRequest(context.Background(), rotateReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + // Read the policy again and the latest version is 2 + resp, err = b.HandleRequest(context.Background(), policyReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + if resp.Data["latest_version"] != 2 { + t.Fatalf("bad: latest_version: expected: 2, actual: %d", resp.Data["latest_version"]) + } + + // Rewrap the ciphertext and check that they are different + rewrapReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "rewrap/upserted_key", + Storage: s, + Data: rewrapData, + } + + resp, err = b.HandleRequest(context.Background(), rewrapReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + if ciphertext.(string) == resp.Data["ciphertext"].(string) { + t.Fatalf("bad: ciphertexts are same before and after rewrap") + } + + if !strings.HasPrefix(resp.Data["ciphertext"].(string), "vault:v2") { + t.Fatalf("bad: ciphertext version: expected: 'vault:v2', actual: %s", resp.Data["ciphertext"].(string)) + } + + keyVersion = resp.Data["key_version"].(int) + if keyVersion != 2 { + t.Fatalf("unexpected key version; got: %d, expected: %d", keyVersion, 2) + } +} + +// Batch encrypt plaintexts, rotate the keys and rewrap all the ciphertexts +func TestTransit_BatchRewrapCase3(t *testing.T) { + var resp *logical.Response + var err error + + b, s := createBackendWithStorage(t) + + batchEncryptionInput := []interface{}{ + map[string]interface{}{"plaintext": "dmlzaGFsCg==", "reference": "ek"}, + map[string]interface{}{"plaintext": "dGhlIHF1aWNrIGJyb3duIGZveA==", "reference": "do"}, + } + batchEncryptionData := map[string]interface{}{ + "batch_input": batchEncryptionInput, + } + batchReq := &logical.Request{ + Operation: logical.CreateOperation, + Path: "encrypt/upserted_key", + Storage: s, + Data: batchEncryptionData, + } + resp, err = b.HandleRequest(context.Background(), batchReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + batchEncryptionResponseItems := resp.Data["batch_results"].([]EncryptBatchResponseItem) + + batchRewrapInput := make([]interface{}, len(batchEncryptionResponseItems)) + for i, item := range batchEncryptionResponseItems { + batchRewrapInput[i] = map[string]interface{}{"ciphertext": item.Ciphertext, "reference": item.Reference} + } + + batchRewrapData := map[string]interface{}{ + "batch_input": batchRewrapInput, + } + + rotateReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "keys/upserted_key/rotate", + Storage: s, + } + resp, err = b.HandleRequest(context.Background(), rotateReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + rewrapReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "rewrap/upserted_key", + Storage: s, + Data: batchRewrapData, + } + + resp, err = b.HandleRequest(context.Background(), rewrapReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + batchRewrapResponseItems := resp.Data["batch_results"].([]EncryptBatchResponseItem) + + if len(batchRewrapResponseItems) != len(batchEncryptionResponseItems) { + t.Fatalf("bad: length of input and output or rewrap are not matching; expected: %d, actual: %d", len(batchEncryptionResponseItems), len(batchRewrapResponseItems)) + } + + decReq := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "decrypt/upserted_key", + Storage: s, + } + + for i, eItem := range batchEncryptionResponseItems { + rItem := batchRewrapResponseItems[i] + + inputRef := batchEncryptionInput[i].(map[string]interface{})["reference"] + if eItem.Reference != inputRef { + t.Fatalf("bad: reference mismatch. Expected %s, Actual: %s", inputRef, eItem.Reference) + } + + if eItem.Ciphertext == rItem.Ciphertext { + t.Fatalf("bad: rewrap input and output are the same") + } + + if !strings.HasPrefix(rItem.Ciphertext, "vault:v2") { + t.Fatalf("bad: invalid version of ciphertext in rewrap response; expected: 'vault:v2', actual: %s", rItem.Ciphertext) + } + + if rItem.KeyVersion != 2 { + t.Fatalf("unexpected key version; got: %d, expected: %d", rItem.KeyVersion, 2) + } + + decReq.Data = map[string]interface{}{ + "ciphertext": rItem.Ciphertext, + } + + resp, err = b.HandleRequest(context.Background(), decReq) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("err:%v resp:%#v", err, resp) + } + + plaintext1 := "dGhlIHF1aWNrIGJyb3duIGZveA==" + plaintext2 := "dmlzaGFsCg==" + if resp.Data["plaintext"] != plaintext1 && resp.Data["plaintext"] != plaintext2 { + t.Fatalf("bad: plaintext. Expected: %q or %q, Actual: %q", plaintext1, plaintext2, resp.Data["plaintext"]) + } + + } +} diff --git a/builtin/logical/transit/path_rotate.go b/builtin/logical/transit/path_rotate.go new file mode 100644 index 0000000..0035dcf --- /dev/null +++ b/builtin/logical/transit/path_rotate.go @@ -0,0 +1,95 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit + +import ( + "context" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/keysutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func (b *backend) pathRotate() *framework.Path { + return &framework.Path{ + Pattern: "keys/" + framework.GenericNameRegex("name") + "/rotate", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "rotate", + OperationSuffix: "key", + }, + + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the key", + }, + "managed_key_name": { + Type: framework.TypeString, + Description: "The name of the managed key to use for the new version of this transit key", + }, + "managed_key_id": { + Type: framework.TypeString, + Description: "The UUID of the managed key to use for the new version of this transit key", + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathRotateWrite, + }, + + HelpSynopsis: pathRotateHelpSyn, + HelpDescription: pathRotateHelpDesc, + } +} + +func (b *backend) pathRotateWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + managedKeyName := d.Get("managed_key_name").(string) + managedKeyId := d.Get("managed_key_id").(string) + + // Get the policy + p, _, err := b.GetPolicy(ctx, keysutil.PolicyRequest{ + Storage: req.Storage, + Name: name, + }, b.GetRandomReader()) + if err != nil { + return nil, err + } + if p == nil { + return logical.ErrorResponse("key not found"), logical.ErrInvalidRequest + } + if !b.System().CachingDisabled() { + p.Lock(true) + } + defer p.Unlock() + + if p.Type == keysutil.KeyType_MANAGED_KEY { + var keyId string + keyId, err = GetManagedKeyUUID(ctx, b, managedKeyName, managedKeyId) + if err != nil { + p.Unlock() + return nil, err + } + err = p.RotateManagedKey(ctx, req.Storage, keyId) + } else { + // Rotate the policy + err = p.Rotate(ctx, req.Storage, b.GetRandomReader()) + } + + if err != nil { + return nil, err + } + + return b.formatKeyPolicy(p, nil) +} + +const pathRotateHelpSyn = `Rotate named encryption key` + +const pathRotateHelpDesc = ` +This path is used to rotate the named key. After rotation, +new encryption requests using this name will use the new key, +but decryption will still be supported for older versions. +` diff --git a/builtin/logical/transit/path_sign_verify.go b/builtin/logical/transit/path_sign_verify.go new file mode 100644 index 0000000..dd48c3c --- /dev/null +++ b/builtin/logical/transit/path_sign_verify.go @@ -0,0 +1,759 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit + +import ( + "context" + "crypto/rsa" + "encoding/base64" + "errors" + "fmt" + "strconv" + "strings" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/helper/keysutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/mitchellh/mapstructure" +) + +// BatchRequestSignItem represents a request item for batch processing. +// A map type allows us to distinguish between empty and missing values. +type batchRequestSignItem map[string]string + +// BatchResponseSignItem represents a response item for batch processing +type batchResponseSignItem struct { + // signature for the input present in the corresponding batch + // request item + Signature string `json:"signature,omitempty" mapstructure:"signature"` + + // The key version to be used for signing + KeyVersion int `json:"key_version" mapstructure:"key_version"` + + PublicKey []byte `json:"publickey,omitempty" mapstructure:"publickey"` + + // Error, if set represents a failure encountered while signing a + // corresponding batch request item + Error string `json:"error,omitempty" mapstructure:"error"` + + // The return paths through WriteSign in some cases are (nil, err) and others + // (logical.ErrorResponse(..),nil), and others (logical.ErrorResponse(..),err). + // For batch processing to successfully mimic previous handling for simple 'input', + // both output values are needed - though 'err' should never be serialized. + err error + + // Reference is an arbitrary caller supplied string value that will be placed on the + // batch response to ease correlation between inputs and outputs + Reference string `json:"reference" mapstructure:"reference"` +} + +// BatchRequestVerifyItem represents a request item for batch processing. +// A map type allows us to distinguish between empty and missing values. +type batchRequestVerifyItem map[string]string + +// BatchResponseVerifyItem represents a response item for batch processing +type batchResponseVerifyItem struct { + // Valid indicates whether signature matches the signature derived from the input string + Valid bool `json:"valid" mapstructure:"valid"` + + // Error, if set represents a failure encountered while verifying a + // corresponding batch request item + Error string `json:"error,omitempty" mapstructure:"error"` + + // The return paths through WriteSign in some cases are (nil, err) and others + // (logical.ErrorResponse(..),nil), and others (logical.ErrorResponse(..),err). + // For batch processing to successfully mimic previous handling for simple 'input', + // both output values are needed - though 'err' should never be serialized. + err error + + // Reference is an arbitrary caller supplied string value that will be placed on the + // batch response to ease correlation between inputs and outputs + Reference string `json:"reference" mapstructure:"reference"` +} + +const defaultHashAlgorithm = "sha2-256" + +func (b *backend) pathSign() *framework.Path { + return &framework.Path{ + Pattern: "sign/" + framework.GenericNameRegex("name") + framework.OptionalParamRegex("urlalgorithm"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "sign", + OperationSuffix: "|with-algorithm", + }, + + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "The key to use", + }, + + "input": { + Type: framework.TypeString, + Description: "The base64-encoded input data", + }, + + "context": { + Type: framework.TypeString, + Description: `Base64 encoded context for key derivation. Required if key +derivation is enabled; currently only available with ed25519 keys.`, + }, + + "hash_algorithm": { + Type: framework.TypeString, + Default: defaultHashAlgorithm, + Description: `Hash algorithm to use (POST body parameter). Valid values are: + +* sha1 +* sha2-224 +* sha2-256 +* sha2-384 +* sha2-512 +* sha3-224 +* sha3-256 +* sha3-384 +* sha3-512 +* none + +Defaults to "sha2-256". Not valid for all key types, +including ed25519. Using none requires setting prehashed=true and +signature_algorithm=pkcs1v15, yielding a PKCSv1_5_NoOID instead of +the usual PKCSv1_5_DERnull signature.`, + }, + + "algorithm": { + Type: framework.TypeString, + Default: defaultHashAlgorithm, + Description: `Deprecated: use "hash_algorithm" instead.`, + }, + + "urlalgorithm": { + Type: framework.TypeString, + Description: `Hash algorithm to use (POST URL parameter)`, + }, + + "key_version": { + Type: framework.TypeInt, + Description: `The version of the key to use for signing. +Must be 0 (for latest) or a value greater than or equal +to the min_encryption_version configured on the key.`, + }, + + "prehashed": { + Type: framework.TypeBool, + Description: `Set to 'true' when the input is already hashed. If the key type is 'rsa-2048', 'rsa-3072' or 'rsa-4096', then the algorithm used to hash the input should be indicated by the 'algorithm' parameter.`, + }, + + "signature_algorithm": { + Type: framework.TypeString, + Description: `The signature algorithm to use for signing. Currently only applies to RSA key types. +Options are 'pss' or 'pkcs1v15'. Defaults to 'pss'`, + }, + + "marshaling_algorithm": { + Type: framework.TypeString, + Default: "asn1", + Description: `The method by which to marshal the signature. The default is 'asn1' which is used by openssl and X.509. It can also be set to 'jws' which is used for JWT signatures; setting it to this will also cause the encoding of the signature to be url-safe base64 instead of using standard base64 encoding. Currently only valid for ECDSA P-256 key types".`, + }, + + "salt_length": { + Type: framework.TypeString, + Default: "auto", + Description: `The salt length used to sign. Currently only applies to the RSA PSS signature scheme. +Options are 'auto' (the default used by Golang, causing the salt to be as large as possible when signing), 'hash' (causes the salt length to equal the length of the hash used in the signature), or an integer between the minimum and the maximum permissible salt lengths for the given RSA key size. Defaults to 'auto'.`, + }, + + "batch_input": { + Type: framework.TypeSlice, + Description: `Specifies a list of items for processing. When this parameter is set, +any supplied 'input' or 'context' parameters will be ignored. Responses are returned in the +'batch_results' array component of the 'data' element of the response. Any batch output will +preserve the order of the batch input`, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathSignWrite, + }, + + HelpSynopsis: pathSignHelpSyn, + HelpDescription: pathSignHelpDesc, + } +} + +func (b *backend) pathVerify() *framework.Path { + return &framework.Path{ + Pattern: "verify/" + framework.GenericNameRegex("name") + framework.OptionalParamRegex("urlalgorithm"), + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "verify", + OperationSuffix: "|with-algorithm", + }, + + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "The key to use", + }, + + "context": { + Type: framework.TypeString, + Description: `Base64 encoded context for key derivation. Required if key +derivation is enabled; currently only available with ed25519 keys.`, + }, + + "signature": { + Type: framework.TypeString, + Description: "The signature, including vault header/key version", + }, + + "hmac": { + Type: framework.TypeString, + Description: "The HMAC, including vault header/key version", + }, + + "input": { + Type: framework.TypeString, + Description: "The base64-encoded input data to verify", + }, + + "urlalgorithm": { + Type: framework.TypeString, + Description: `Hash algorithm to use (POST URL parameter)`, + }, + + "hash_algorithm": { + Type: framework.TypeString, + Default: defaultHashAlgorithm, + Description: `Hash algorithm to use (POST body parameter). Valid values are: + +* sha1 +* sha2-224 +* sha2-256 +* sha2-384 +* sha2-512 +* sha3-224 +* sha3-256 +* sha3-384 +* sha3-512 +* none + +Defaults to "sha2-256". Not valid for all key types. See note about +none on signing path.`, + }, + + "algorithm": { + Type: framework.TypeString, + Default: defaultHashAlgorithm, + Description: `Deprecated: use "hash_algorithm" instead.`, + }, + + "prehashed": { + Type: framework.TypeBool, + Description: `Set to 'true' when the input is already hashed. If the key type is 'rsa-2048', 'rsa-3072' or 'rsa-4096', then the algorithm used to hash the input should be indicated by the 'algorithm' parameter.`, + }, + + "signature_algorithm": { + Type: framework.TypeString, + Description: `The signature algorithm to use for signature verification. Currently only applies to RSA key types. +Options are 'pss' or 'pkcs1v15'. Defaults to 'pss'`, + }, + + "marshaling_algorithm": { + Type: framework.TypeString, + Default: "asn1", + Description: `The method by which to unmarshal the signature when verifying. The default is 'asn1' which is used by openssl and X.509; can also be set to 'jws' which is used for JWT signatures in which case the signature is also expected to be url-safe base64 encoding instead of standard base64 encoding. Currently only valid for ECDSA P-256 key types".`, + }, + + "salt_length": { + Type: framework.TypeString, + Default: "auto", + Description: `The salt length used to sign. Currently only applies to the RSA PSS signature scheme. +Options are 'auto' (the default used by Golang, causing the salt to be as large as possible when signing), 'hash' (causes the salt length to equal the length of the hash used in the signature), or an integer between the minimum and the maximum permissible salt lengths for the given RSA key size. Defaults to 'auto'.`, + }, + + "batch_input": { + Type: framework.TypeSlice, + Description: `Specifies a list of items for processing. When this parameter is set, +any supplied 'input', 'hmac' or 'signature' parameters will be ignored. Responses are returned in the +'batch_results' array component of the 'data' element of the response. Any batch output will +preserve the order of the batch input`, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathVerifyWrite, + }, + + HelpSynopsis: pathVerifyHelpSyn, + HelpDescription: pathVerifyHelpDesc, + } +} + +func (b *backend) getSaltLength(d *framework.FieldData) (int, error) { + rawSaltLength, ok := d.GetOk("salt_length") + // This should only happen when something is wrong with the schema, + // so this is a reasonable default. + if !ok { + return rsa.PSSSaltLengthAuto, nil + } + + rawSaltLengthStr := rawSaltLength.(string) + lowerSaltLengthStr := strings.ToLower(rawSaltLengthStr) + switch lowerSaltLengthStr { + case "auto": + return rsa.PSSSaltLengthAuto, nil + case "hash": + return rsa.PSSSaltLengthEqualsHash, nil + default: + saltLengthInt, err := strconv.Atoi(lowerSaltLengthStr) + if err != nil { + return rsa.PSSSaltLengthEqualsHash - 1, fmt.Errorf("salt length neither 'auto', 'hash', nor an int: %s", rawSaltLength) + } + if saltLengthInt < rsa.PSSSaltLengthEqualsHash { + return rsa.PSSSaltLengthEqualsHash - 1, fmt.Errorf("salt length is invalid: %d", saltLengthInt) + } + return saltLengthInt, nil + } +} + +func (b *backend) pathSignWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + name := d.Get("name").(string) + ver := d.Get("key_version").(int) + hashAlgorithmStr := d.Get("urlalgorithm").(string) + if hashAlgorithmStr == "" { + hashAlgorithmStr = d.Get("hash_algorithm").(string) + if hashAlgorithmStr == "" { + hashAlgorithmStr = d.Get("algorithm").(string) + if hashAlgorithmStr == "" { + hashAlgorithmStr = defaultHashAlgorithm + } + } + } + + hashAlgorithm, ok := keysutil.HashTypeMap[hashAlgorithmStr] + if !ok { + return logical.ErrorResponse(fmt.Sprintf("invalid hash algorithm %q", hashAlgorithmStr)), logical.ErrInvalidRequest + } + + marshalingStr := d.Get("marshaling_algorithm").(string) + marshaling, ok := keysutil.MarshalingTypeMap[marshalingStr] + if !ok { + return logical.ErrorResponse(fmt.Sprintf("invalid marshaling type %q", marshalingStr)), logical.ErrInvalidRequest + } + + prehashed := d.Get("prehashed").(bool) + sigAlgorithm := d.Get("signature_algorithm").(string) + saltLength, err := b.getSaltLength(d) + if err != nil { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } + + // Get the policy + p, _, err := b.GetPolicy(ctx, keysutil.PolicyRequest{ + Storage: req.Storage, + Name: name, + }, b.GetRandomReader()) + if err != nil { + return nil, err + } + if p == nil { + return logical.ErrorResponse("signing key not found"), logical.ErrInvalidRequest + } + if !b.System().CachingDisabled() { + p.Lock(false) + } + + if !p.Type.SigningSupported() { + p.Unlock() + return logical.ErrorResponse(fmt.Sprintf("key type %v does not support signing", p.Type)), logical.ErrInvalidRequest + } + + // Allow managed keys to specify no hash algo without additional conditions. + if hashAlgorithm == keysutil.HashTypeNone && p.Type != keysutil.KeyType_MANAGED_KEY { + if !prehashed || sigAlgorithm != "pkcs1v15" { + return logical.ErrorResponse("hash_algorithm=none requires both prehashed=true and signature_algorithm=pkcs1v15"), logical.ErrInvalidRequest + } + } + + batchInputRaw := d.Raw["batch_input"] + var batchInputItems []batchRequestSignItem + if batchInputRaw != nil { + err = mapstructure.Decode(batchInputRaw, &batchInputItems) + if err != nil { + p.Unlock() + return nil, fmt.Errorf("failed to parse batch input: %w", err) + } + + if len(batchInputItems) == 0 { + p.Unlock() + return logical.ErrorResponse("missing batch input to process"), logical.ErrInvalidRequest + } + } else { + // use empty string if input is missing - not an error + batchInputItems = make([]batchRequestSignItem, 1) + batchInputItems[0] = batchRequestSignItem{ + "input": d.Get("input").(string), + "context": d.Get("context").(string), + } + } + + response := make([]batchResponseSignItem, len(batchInputItems)) + + for i, item := range batchInputItems { + + rawInput, ok := item["input"] + if !ok { + response[i].Error = "missing input" + response[i].err = logical.ErrInvalidRequest + continue + } + + input, err := base64.StdEncoding.DecodeString(rawInput) + if err != nil { + response[i].Error = fmt.Sprintf("unable to decode input as base64: %s", err) + response[i].err = logical.ErrInvalidRequest + continue + } + + if p.Type.HashSignatureInput() && !prehashed { + hf := keysutil.HashFuncMap[hashAlgorithm]() + if hf != nil { + hf.Write(input) + input = hf.Sum(nil) + } + } + + contextRaw := item["context"] + var context []byte + if len(contextRaw) != 0 { + context, err = base64.StdEncoding.DecodeString(contextRaw) + if err != nil { + response[i].Error = "failed to base64-decode context" + response[i].err = logical.ErrInvalidRequest + continue + } + } + + var managedKeyParameters keysutil.ManagedKeyParameters + if p.Type == keysutil.KeyType_MANAGED_KEY { + managedKeySystemView, ok := b.System().(logical.ManagedKeySystemView) + if !ok { + return nil, errors.New("unsupported system view") + } + + managedKeyParameters = keysutil.ManagedKeyParameters{ + ManagedKeySystemView: managedKeySystemView, + BackendUUID: b.backendUUID, + Context: ctx, + } + } + + sig, err := p.SignWithOptions(ver, context, input, &keysutil.SigningOptions{ + HashAlgorithm: hashAlgorithm, + Marshaling: marshaling, + SaltLength: saltLength, + SigAlgorithm: sigAlgorithm, + ManagedKeyParams: managedKeyParameters, + }) + if err != nil { + if batchInputRaw != nil { + response[i].Error = err.Error() + } + response[i].err = err + } else if sig == nil { + response[i].err = fmt.Errorf("signature could not be computed") + } else { + keyVersion := ver + if keyVersion == 0 { + keyVersion = p.LatestVersion + } + + response[i].Signature = sig.Signature + response[i].PublicKey = sig.PublicKey + response[i].KeyVersion = keyVersion + } + } + + // Generate the response + resp := &logical.Response{} + if batchInputRaw != nil { + // Copy the references + for i := range batchInputItems { + response[i].Reference = batchInputItems[i]["reference"] + } + resp.Data = map[string]interface{}{ + "batch_results": response, + } + } else { + if response[0].Error != "" || response[0].err != nil { + p.Unlock() + if response[0].Error != "" { + return logical.ErrorResponse(response[0].Error), response[0].err + } + + return nil, response[0].err + } + + resp.Data = map[string]interface{}{ + "signature": response[0].Signature, + "key_version": response[0].KeyVersion, + } + + if len(response[0].PublicKey) > 0 { + resp.Data["public_key"] = response[0].PublicKey + } + } + + p.Unlock() + return resp, nil +} + +func (b *backend) pathVerifyWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { + batchInputRaw := d.Raw["batch_input"] + var batchInputItems []batchRequestVerifyItem + if batchInputRaw != nil { + err := mapstructure.Decode(batchInputRaw, &batchInputItems) + if err != nil { + return nil, fmt.Errorf("failed to parse batch input: %w", err) + } + + if len(batchInputItems) == 0 { + return logical.ErrorResponse("missing batch input to process"), logical.ErrInvalidRequest + } + } else { + // use empty string if input is missing - not an error + inputB64 := d.Get("input").(string) + + batchInputItems = make([]batchRequestVerifyItem, 1) + batchInputItems[0] = batchRequestVerifyItem{ + "input": inputB64, + } + if sig, ok := d.GetOk("signature"); ok { + batchInputItems[0]["signature"] = sig.(string) + } + if hmac, ok := d.GetOk("hmac"); ok { + batchInputItems[0]["hmac"] = hmac.(string) + } + batchInputItems[0]["context"] = d.Get("context").(string) + } + + // For simplicity, 'signature' and 'hmac' cannot be mixed across batch_input elements. + // If one batch_input item is 'signature', they all must be 'signature'. + // If one batch_input item is 'hmac', they all must be 'hmac'. + sigFound := false + hmacFound := false + missing := false + for _, v := range batchInputItems { + if _, ok := v["signature"]; ok { + sigFound = true + } else if _, ok := v["hmac"]; ok { + hmacFound = true + } else { + missing = true + } + } + + switch { + case batchInputRaw == nil && sigFound && hmacFound: + return logical.ErrorResponse("provide one of 'signature' or 'hmac'"), logical.ErrInvalidRequest + + case batchInputRaw == nil && !sigFound && !hmacFound: + return logical.ErrorResponse("neither a 'signature' nor an 'hmac' were given to verify"), logical.ErrInvalidRequest + + case sigFound && hmacFound: + return logical.ErrorResponse("elements of batch_input must all provide 'signature' or all provide 'hmac'"), logical.ErrInvalidRequest + + case missing && sigFound: + return logical.ErrorResponse("some elements of batch_input are missing 'signature'"), logical.ErrInvalidRequest + + case missing && hmacFound: + return logical.ErrorResponse("some elements of batch_input are missing 'hmac'"), logical.ErrInvalidRequest + + case missing: + return logical.ErrorResponse("no batch_input elements have 'signature' or 'hmac'"), logical.ErrInvalidRequest + + case hmacFound: + return b.pathHMACVerify(ctx, req, d) + } + + name := d.Get("name").(string) + hashAlgorithmStr := d.Get("urlalgorithm").(string) + if hashAlgorithmStr == "" { + hashAlgorithmStr = d.Get("hash_algorithm").(string) + if hashAlgorithmStr == "" { + hashAlgorithmStr = d.Get("algorithm").(string) + if hashAlgorithmStr == "" { + hashAlgorithmStr = defaultHashAlgorithm + } + } + } + + hashAlgorithm, ok := keysutil.HashTypeMap[hashAlgorithmStr] + if !ok { + return logical.ErrorResponse(fmt.Sprintf("invalid hash algorithm %q", hashAlgorithmStr)), logical.ErrInvalidRequest + } + + marshalingStr := d.Get("marshaling_algorithm").(string) + marshaling, ok := keysutil.MarshalingTypeMap[marshalingStr] + if !ok { + return logical.ErrorResponse(fmt.Sprintf("invalid marshaling type %q", marshalingStr)), logical.ErrInvalidRequest + } + + prehashed := d.Get("prehashed").(bool) + sigAlgorithm := d.Get("signature_algorithm").(string) + saltLength, err := b.getSaltLength(d) + if err != nil { + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } + + // Get the policy + p, _, err := b.GetPolicy(ctx, keysutil.PolicyRequest{ + Storage: req.Storage, + Name: name, + }, b.GetRandomReader()) + if err != nil { + return nil, err + } + if p == nil { + return logical.ErrorResponse("signature verification key not found"), logical.ErrInvalidRequest + } + if !b.System().CachingDisabled() { + p.Lock(false) + } + + if !p.Type.SigningSupported() { + p.Unlock() + return logical.ErrorResponse(fmt.Sprintf("key type %v does not support verification", p.Type)), logical.ErrInvalidRequest + } + + // Allow managed keys to specify no hash algo without additional conditions. + if hashAlgorithm == keysutil.HashTypeNone && p.Type != keysutil.KeyType_MANAGED_KEY { + if !prehashed || sigAlgorithm != "pkcs1v15" { + return logical.ErrorResponse("hash_algorithm=none requires both prehashed=true and signature_algorithm=pkcs1v15"), logical.ErrInvalidRequest + } + } + + response := make([]batchResponseVerifyItem, len(batchInputItems)) + + for i, item := range batchInputItems { + + rawInput, ok := item["input"] + if !ok { + response[i].Error = "missing input" + response[i].err = logical.ErrInvalidRequest + continue + } + + input, err := base64.StdEncoding.DecodeString(rawInput) + if err != nil { + response[i].Error = fmt.Sprintf("unable to decode input as base64: %s", err) + response[i].err = logical.ErrInvalidRequest + continue + } + + sig, ok := item["signature"] + if !ok { + response[i].Error = "missing signature" + response[i].err = logical.ErrInvalidRequest + continue + } + + if p.Type.HashSignatureInput() && !prehashed { + hf := keysutil.HashFuncMap[hashAlgorithm]() + if hf != nil { + hf.Write(input) + input = hf.Sum(nil) + } + } + + contextRaw := item["context"] + var context []byte + if len(contextRaw) != 0 { + context, err = base64.StdEncoding.DecodeString(contextRaw) + if err != nil { + response[i].Error = "failed to base64-decode context" + response[i].err = logical.ErrInvalidRequest + continue + } + } + var managedKeyParameters keysutil.ManagedKeyParameters + if p.Type == keysutil.KeyType_MANAGED_KEY { + managedKeySystemView, ok := b.System().(logical.ManagedKeySystemView) + if !ok { + return nil, errors.New("unsupported system view") + } + + managedKeyParameters = keysutil.ManagedKeyParameters{ + ManagedKeySystemView: managedKeySystemView, + BackendUUID: b.backendUUID, + Context: ctx, + } + } + + signingOptions := &keysutil.SigningOptions{ + HashAlgorithm: hashAlgorithm, + Marshaling: marshaling, + SaltLength: saltLength, + SigAlgorithm: sigAlgorithm, + ManagedKeyParams: managedKeyParameters, + } + + valid, err := p.VerifySignatureWithOptions(context, input, sig, signingOptions) + if err != nil { + switch err.(type) { + case errutil.UserError: + response[i].Error = err.Error() + response[i].err = logical.ErrInvalidRequest + default: + if batchInputRaw != nil { + response[i].Error = err.Error() + } + response[i].err = err + } + } else { + response[i].Valid = valid + } + } + + // Generate the response + resp := &logical.Response{} + if batchInputRaw != nil { + // Copy the references + for i := range batchInputItems { + response[i].Reference = batchInputItems[i]["reference"] + } + resp.Data = map[string]interface{}{ + "batch_results": response, + } + } else { + if response[0].Error != "" || response[0].err != nil { + p.Unlock() + if response[0].Error != "" { + return logical.ErrorResponse(response[0].Error), response[0].err + } + return nil, response[0].err + } + resp.Data = map[string]interface{}{ + "valid": response[0].Valid, + } + } + + p.Unlock() + return resp, nil +} + +const pathSignHelpSyn = `Generate a signature for input data using the named key` + +const pathSignHelpDesc = ` +Generates a signature of the input data using the named key and the given hash algorithm. +` +const pathVerifyHelpSyn = `Verify a signature or HMAC for input data created using the named key` + +const pathVerifyHelpDesc = ` +Verifies a signature or HMAC of the input data using the named key and the given hash algorithm. +` diff --git a/builtin/logical/transit/path_sign_verify_test.go b/builtin/logical/transit/path_sign_verify_test.go new file mode 100644 index 0000000..63aef9c --- /dev/null +++ b/builtin/logical/transit/path_sign_verify_test.go @@ -0,0 +1,981 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit + +import ( + "context" + "encoding/base64" + "fmt" + "strconv" + "strings" + "testing" + + "github.com/hashicorp/vault/helper/constants" + + "golang.org/x/crypto/ed25519" + + "github.com/hashicorp/vault/sdk/helper/keysutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/mitchellh/mapstructure" +) + +// The outcome of processing a request includes +// the possibility that the request is incomplete or incorrect, +// or that the request is well-formed but the signature (for verification) +// is invalid, or that the signature is valid, but the key is not. +type signOutcome struct { + requestOk bool + valid bool + keyValid bool + reference string +} + +func TestTransit_SignVerify_ECDSA(t *testing.T) { + t.Run("256", func(t *testing.T) { + testTransit_SignVerify_ECDSA(t, 256) + }) + t.Run("384", func(t *testing.T) { + testTransit_SignVerify_ECDSA(t, 384) + }) + t.Run("521", func(t *testing.T) { + testTransit_SignVerify_ECDSA(t, 521) + }) +} + +func testTransit_SignVerify_ECDSA(t *testing.T, bits int) { + b, storage := createBackendWithSysView(t) + + // First create a key + req := &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "keys/foo", + Data: map[string]interface{}{ + "type": fmt.Sprintf("ecdsa-p%d", bits), + }, + } + _, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + // Now, change the key value to something we control + p, _, err := b.GetPolicy(context.Background(), keysutil.PolicyRequest{ + Storage: storage, + Name: "foo", + }, b.GetRandomReader()) + if err != nil { + t.Fatal(err) + } + + // Useful code to output a key for openssl verification + /* + if bits == 384 { + var curve elliptic.Curve + switch bits { + case 521: + curve = elliptic.P521() + case 384: + curve = elliptic.P384() + default: + curve = elliptic.P256() + } + key := p.Keys[strconv.Itoa(p.LatestVersion)] + keyBytes, _ := x509.MarshalECPrivateKey(&ecdsa.PrivateKey{ + PublicKey: ecdsa.PublicKey{ + Curve: curve, + X: key.EC_X, + Y: key.EC_Y, + }, + D: key.EC_D, + }) + pemBlock := &pem.Block{ + Type: "EC PRIVATE KEY", + Bytes: keyBytes, + } + pemBytes := pem.EncodeToMemory(pemBlock) + t.Fatalf("X: %s, Y: %s, D: %s, marshaled: %s", key.EC_X.Text(16), key.EC_Y.Text(16), key.EC_D.Text(16), string(pemBytes)) + } + */ + + var xString, yString, dString string + switch bits { + case 384: + xString = "703457a84e48bfcb037cfb509f1870d2aa5b74c109c2f24624ab21444492575229f8711453e5c656dab596b4e26db30e" + yString = "411c5b7092a893dc8b7af39de3d21d1c26f45b27616baeac4c479ef3c9f21c194b5ac501dee47ba2b2cb243a54256524" + dString = "3de3e4fd2ecbc490e956f41f5003a1e57a84763cec7b722fa3427cf461a1148ea4d5206023bcce0422289f6633730759" + /* + -----BEGIN EC PRIVATE KEY----- + MIGkAgEBBDA94+T9LsvEkOlW9B9QA6HleoR2POx7ci+jQnz0YaEUjqTVIGAjvM4E + IiifZjNzB1mgBwYFK4EEACKhZANiAARwNFeoTki/ywN8+1CfGHDSqlt0wQnC8kYk + qyFERJJXUin4cRRT5cZW2rWWtOJtsw5BHFtwkqiT3It6853j0h0cJvRbJ2FrrqxM + R57zyfIcGUtaxQHe5HuissskOlQlZSQ= + -----END EC PRIVATE KEY----- + */ + case 521: + xString = "1913f75fc044fe5d1f871c2629a377462fd819b174a41d3ec7d04ebd5ae35475ff8de544f4e19a9aa6b16a8f67af479be6884e00ca3147dc24d5924d66ac395e04b" + yString = "4919406b90d8323fdb5c9c4f48259c56ebcea37b40ad1a82bbbfad62a9b9c2dce515772274b84725471c7d0b7c62e10c23296b1a9d2b2586ada67735ff5d9fffc4" + dString = "1867d0fcd9bac4c5821b70a6b13117499438f8c274579c0aba254fbd85fa98892c3608576197d5534366a9aab0f904155bec46d800d23a57f7f053d91526568b09" + /* + -----BEGIN EC PRIVATE KEY----- + MIHcAgEBBEIAGGfQ/Nm6xMWCG3CmsTEXSZQ4+MJ0V5wKuiVPvYX6mIksNghXYZfV + U0Nmqaqw+QQVW+xG2ADSOlf38FPZFSZWiwmgBwYFK4EEACOhgYkDgYYABAGRP3X8 + BE/l0fhxwmKaN3Ri/YGbF0pB0+x9BOvVrjVHX/jeVE9OGamqaxao9nr0eb5ohOAM + oxR9wk1ZJNZqw5XgSwBJGUBrkNgyP9tcnE9IJZxW686je0CtGoK7v61iqbnC3OUV + dyJ0uEclRxx9C3xi4QwjKWsanSslhq2mdzX/XZ//xA== + -----END EC PRIVATE KEY----- + */ + default: + xString = "7336010a6da5935113d26d9ea4bb61b3b8d102c9a8083ed432f9b58fd7e80686" + yString = "4040aa31864691a8a9e7e3ec9250e85425b797ad7be34ba8df62bfbad45ebb0e" + dString = "99e5569be8683a2691dfc560ca9dfa71e887867a3af60635a08a3e3655aba3ef" + } + + keyEntry := p.Keys[strconv.Itoa(p.LatestVersion)] + _, ok := keyEntry.EC_X.SetString(xString, 16) + if !ok { + t.Fatal("could not set X") + } + _, ok = keyEntry.EC_Y.SetString(yString, 16) + if !ok { + t.Fatal("could not set Y") + } + _, ok = keyEntry.EC_D.SetString(dString, 16) + if !ok { + t.Fatal("could not set D") + } + p.Keys[strconv.Itoa(p.LatestVersion)] = keyEntry + if err = p.Persist(context.Background(), storage); err != nil { + t.Fatal(err) + } + req.Data = map[string]interface{}{ + "input": "dGhlIHF1aWNrIGJyb3duIGZveA==", + } + + signRequest := func(req *logical.Request, errExpected bool, postpath string) string { + t.Helper() + req.Path = "sign/foo" + postpath + resp, err := b.HandleRequest(context.Background(), req) + if err != nil && !errExpected { + t.Fatalf("request: %v\nerror: %v", req, err) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if errExpected { + if !resp.IsError() { + t.Fatalf("bad: should have gotten error response: %#v", *resp) + } + return "" + } + if resp.IsError() { + t.Fatalf("bad: got error response: %#v", *resp) + } + value, ok := resp.Data["signature"] + if !ok { + t.Fatalf("no signature key found in returned data, got resp data %#v", resp.Data) + } + return value.(string) + } + + verifyRequest := func(req *logical.Request, errExpected bool, postpath, sig string) { + t.Helper() + req.Path = "verify/foo" + postpath + req.Data["signature"] = sig + resp, err := b.HandleRequest(context.Background(), req) + if err != nil { + if errExpected { + return + } + t.Fatalf("got error: %v, sig was %v", err, sig) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if resp.IsError() { + if errExpected { + return + } + t.Fatalf("bad: got error response: %#v", *resp) + } + value, ok := resp.Data["valid"] + if !ok { + t.Fatalf("no valid key found in returned data, got resp data %#v", resp.Data) + } + if !value.(bool) && !errExpected { + t.Fatalf("verification failed; req was %#v, resp is %#v", *req, *resp) + } else if value.(bool) && errExpected { + t.Fatalf("expected error and didn't get one; req was %#v, resp is %#v", *req, *resp) + } + } + + // Comparisons are against values generated via openssl + + // Test defaults -- sha2-256 + sig := signRequest(req, false, "") + verifyRequest(req, false, "", sig) + + // Test a bad signature + verifyRequest(req, true, "", sig[0:len(sig)-2]) + + // Test a signature generated with the same key by openssl + switch bits { + case 384: + sig = `vault:v1:MGUCMHHZLRN/3ehWuWACfSCMLtFtNEAdx6Rkwon2Lx6FWCyXCXqH6A8Pz8er0Qkgvm2ElQIxAO922LmUeYzHmDSfC5is/TjFu3b4Fb+1XtoBXncc2u4t2vSuTAxEv7WMh2D2YDdxeA==` + case 521: + sig = `vault:v1:MIGIAkIBYhspOgSs/K/NUWtlBN+CfYe1IVFpUbQNSqdjT7s+QKcr6GKmdGLIQAXw0q6K0elBgzi1wgLjxwdscwMeW7tm/QQCQgDzdITGlUEd9Z7DOfLCnDP4X8pGsfO60Tvsh/BN44drZsHLtXYBXLczB/XZfIWAsPMuI5F7ExwVNbmQP0FBVri/QQ==` + default: + sig = `vault:v1:MEUCIAgnEl9V8P305EBAlz68Nq4jZng5fE8k6MactcnlUw9dAiEAvJVePg3dazW6MaW7lRAVtEz82QJDVmR98tXCl8Pc7DA=` + } + verifyRequest(req, false, "", sig) + + // Test algorithm selection in the path + sig = signRequest(req, false, "/sha2-224") + verifyRequest(req, false, "/sha2-224", sig) + + // Reset and test algorithm selection in the data + req.Data["hash_algorithm"] = "sha2-224" + sig = signRequest(req, false, "") + verifyRequest(req, false, "", sig) + + req.Data["hash_algorithm"] = "sha2-384" + sig = signRequest(req, false, "") + verifyRequest(req, false, "", sig) + + req.Data["hash_algorithm"] = "sha2-512" + sig = signRequest(req, false, "") + verifyRequest(req, false, "", sig) + + req.Data["hash_algorithm"] = "sha3-224" + sig = signRequest(req, false, "") + verifyRequest(req, false, "", sig) + + req.Data["hash_algorithm"] = "sha3-256" + sig = signRequest(req, false, "") + verifyRequest(req, false, "", sig) + + req.Data["hash_algorithm"] = "sha3-384" + sig = signRequest(req, false, "") + verifyRequest(req, false, "", sig) + + req.Data["hash_algorithm"] = "sha3-512" + sig = signRequest(req, false, "") + verifyRequest(req, false, "", sig) + + req.Data["prehashed"] = true + sig = signRequest(req, false, "") + verifyRequest(req, false, "", sig) + delete(req.Data, "prehashed") + + // Test marshaling selection + // Bad value + req.Data["marshaling_algorithm"] = "asn2" + sig = signRequest(req, true, "") + // Use the default, verify we can't validate with jws + req.Data["marshaling_algorithm"] = "asn1" + sig = signRequest(req, false, "") + req.Data["marshaling_algorithm"] = "jws" + verifyRequest(req, true, "", sig) + // Sign with jws, verify we can validate + sig = signRequest(req, false, "") + verifyRequest(req, false, "", sig) + // If we change marshaling back to asn1 we shouldn't be able to verify + delete(req.Data, "marshaling_algorithm") + verifyRequest(req, true, "", sig) + + // Test 512 and save sig for later to ensure we can't validate once min + // decryption version is set + req.Data["hash_algorithm"] = "sha2-512" + sig = signRequest(req, false, "") + verifyRequest(req, false, "", sig) + + v1sig := sig + + // Test bad algorithm + req.Data["hash_algorithm"] = "foobar" + signRequest(req, true, "") + + // Test bad input + req.Data["hash_algorithm"] = "sha2-256" + req.Data["input"] = "foobar" + signRequest(req, true, "") + + // Rotate and set min decryption version + err = p.Rotate(context.Background(), storage, b.GetRandomReader()) + if err != nil { + t.Fatal(err) + } + err = p.Rotate(context.Background(), storage, b.GetRandomReader()) + if err != nil { + t.Fatal(err) + } + + p.MinDecryptionVersion = 2 + if err = p.Persist(context.Background(), storage); err != nil { + t.Fatal(err) + } + + req.Data["input"] = "dGhlIHF1aWNrIGJyb3duIGZveA==" + req.Data["hash_algorithm"] = "sha2-256" + // Make sure signing still works fine + sig = signRequest(req, false, "") + verifyRequest(req, false, "", sig) + // Now try the v1 + verifyRequest(req, true, "", v1sig) +} + +func validatePublicKey(t *testing.T, in string, sig string, pubKeyRaw []byte, expectValid bool, postpath string, b *backend) { + t.Helper() + input, _ := base64.StdEncoding.DecodeString(in) + splitSig := strings.Split(sig, ":") + signature, _ := base64.StdEncoding.DecodeString(splitSig[2]) + valid := ed25519.Verify(ed25519.PublicKey(pubKeyRaw), input, signature) + if valid != expectValid { + t.Fatalf("status of signature: expected %v. Got %v", valid, expectValid) + } + if !valid { + return + } + + keyReadReq := &logical.Request{ + Operation: logical.ReadOperation, + Path: "keys/" + postpath, + } + keyReadResp, err := b.HandleRequest(context.Background(), keyReadReq) + if err != nil { + t.Fatal(err) + } + val := keyReadResp.Data["keys"].(map[string]map[string]interface{})[strings.TrimPrefix(splitSig[1], "v")] + var ak asymKey + if err := mapstructure.Decode(val, &ak); err != nil { + t.Fatal(err) + } + if ak.PublicKey != "" { + t.Fatal("got non-empty public key") + } + keyReadReq.Data = map[string]interface{}{ + "context": "abcd", + } + keyReadResp, err = b.HandleRequest(context.Background(), keyReadReq) + if err != nil { + t.Fatal(err) + } + val = keyReadResp.Data["keys"].(map[string]map[string]interface{})[strings.TrimPrefix(splitSig[1], "v")] + if err := mapstructure.Decode(val, &ak); err != nil { + t.Fatal(err) + } + if ak.PublicKey != base64.StdEncoding.EncodeToString(pubKeyRaw) { + t.Fatalf("got incorrect public key; got %q, expected %q\nasymKey struct is\n%#v", ak.PublicKey, pubKeyRaw, ak) + } +} + +func TestTransit_SignVerify_ED25519(t *testing.T) { + b, storage := createBackendWithSysView(t) + + // First create a key + req := &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "keys/foo", + Data: map[string]interface{}{ + "type": "ed25519", + }, + } + _, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + // Now create a derived key" + req = &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "keys/bar", + Data: map[string]interface{}{ + "type": "ed25519", + "derived": true, + }, + } + _, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + // Get the keys for later + fooP, _, err := b.GetPolicy(context.Background(), keysutil.PolicyRequest{ + Storage: storage, + Name: "foo", + }, b.GetRandomReader()) + if err != nil { + t.Fatal(err) + } + + barP, _, err := b.GetPolicy(context.Background(), keysutil.PolicyRequest{ + Storage: storage, + Name: "bar", + }, b.GetRandomReader()) + if err != nil { + t.Fatal(err) + } + + signRequest := func(req *logical.Request, errExpected bool, postpath string) []string { + t.Helper() + // Delete any key that exists in the request + delete(req.Data, "public_key") + req.Path = "sign/" + postpath + resp, err := b.HandleRequest(context.Background(), req) + if err != nil { + if !errExpected { + t.Fatal(err) + } + return nil + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if errExpected { + if resp.IsError() { + return nil + } + t.Fatalf("bad: expected error response, got: %#v", *resp) + } + if resp.IsError() { + t.Fatalf("bad: got error response: %#v", *resp) + } + // memoize any pubic key + if key, ok := resp.Data["public_key"]; ok { + req.Data["public_key"] = key + } + // batch_input supplied + if _, ok := req.Data["batch_input"]; ok { + batchRequestItems := req.Data["batch_input"].([]batchRequestSignItem) + + batchResults, ok := resp.Data["batch_results"] + if !ok { + t.Fatalf("no batch_results in returned data, got resp data %#v", resp.Data) + } + batchResponseItems := batchResults.([]batchResponseSignItem) + if len(batchResponseItems) != len(batchRequestItems) { + t.Fatalf("Expected %d items in response. Got %d: %#v", len(batchRequestItems), len(batchResponseItems), resp) + } + if len(batchRequestItems) == 0 { + return nil + } + ret := make([]string, len(batchRequestItems)) + for i, v := range batchResponseItems { + ret[i] = v.Signature + } + return ret + } + + // input supplied + value, ok := resp.Data["signature"] + if !ok { + t.Fatalf("no signature key found in returned data, got resp data %#v", resp.Data) + } + return []string{value.(string)} + } + + verifyRequest := func(req *logical.Request, errExpected bool, outcome []signOutcome, postpath string, sig []string, attachSig bool) { + t.Helper() + req.Path = "verify/" + postpath + if _, ok := req.Data["batch_input"]; ok && attachSig { + batchRequestItems := req.Data["batch_input"].([]batchRequestSignItem) + if len(batchRequestItems) != len(sig) { + t.Fatalf("number of requests in batch(%d) != number of signatures(%d)", len(batchRequestItems), len(sig)) + } + for i, v := range sig { + batchRequestItems[i]["signature"] = v + batchRequestItems[i]["reference"] = outcome[i].reference + } + } else if attachSig { + req.Data["signature"] = sig[0] + } + resp, err := b.HandleRequest(context.Background(), req) + if err != nil && !errExpected { + t.Fatalf("got error: %v, sig was %v", err, sig) + } + if errExpected { + if resp != nil && !resp.IsError() { + t.Fatalf("bad: expected error response, got: %#v\n%#v", *resp, req) + } + return + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if resp.IsError() { + t.Fatalf("bad: got error response: %#v", *resp) + } + + // batch_input field supplied + if _, ok := req.Data["batch_input"]; ok { + batchRequestItems := req.Data["batch_input"].([]batchRequestSignItem) + + batchResults, ok := resp.Data["batch_results"] + if !ok { + t.Fatalf("no batch_results in returned data, got resp data %#v", resp.Data) + } + batchResponseItems := batchResults.([]batchResponseVerifyItem) + if len(batchResponseItems) != len(batchRequestItems) { + t.Fatalf("Expected %d items in response. Got %d: %#v", len(batchRequestItems), len(batchResponseItems), resp) + } + if len(batchRequestItems) == 0 { + return + } + for i, v := range batchResponseItems { + if v.Error != "" && outcome[i].requestOk { + t.Fatalf("verification failed; req was %#v, resp is %#v", *req, *resp) + } + if v.Error != "" { + continue + } + if v.Valid != outcome[i].valid { + t.Fatalf("verification failed; req was %#v, resp is %#v", *req, *resp) + } + if !v.Valid { + continue + } + if pubKeyRaw, ok := req.Data["public_key"]; ok { + validatePublicKey(t, batchRequestItems[i]["input"], sig[i], pubKeyRaw.([]byte), outcome[i].keyValid, postpath, b) + } + if v.Reference != outcome[i].reference { + t.Fatalf("verification failed, mismatched references %s vs %s", v.Reference, outcome[i].reference) + } + } + return + } + + // input field supplied + value, ok := resp.Data["valid"] + if !ok { + t.Fatalf("no valid key found in returned data, got resp data %#v", resp.Data) + } + valid := value.(bool) + if valid != outcome[0].valid { + t.Fatalf("verification failed; req was %#v, resp is %#v", *req, *resp) + } + if !valid { + return + } + + if pubKeyRaw, ok := req.Data["public_key"]; ok { + validatePublicKey(t, req.Data["input"].(string), sig[0], pubKeyRaw.([]byte), outcome[0].keyValid, postpath, b) + } + } + + req.Data = map[string]interface{}{ + "input": "dGhlIHF1aWNrIGJyb3duIGZveA==", + "context": "abcd", + } + + outcome := []signOutcome{{requestOk: true, valid: true, keyValid: true}} + // Test defaults + sig := signRequest(req, false, "foo") + verifyRequest(req, false, outcome, "foo", sig, true) + + sig = signRequest(req, false, "bar") + verifyRequest(req, false, outcome, "bar", sig, true) + + // Verify with incorrect key + outcome[0].valid = false + verifyRequest(req, false, outcome, "foo", sig, true) + + // Verify with missing signatures + delete(req.Data, "signature") + verifyRequest(req, true, outcome, "foo", sig, false) + + // Test a bad signature + badsig := sig[0] + badsig = badsig[:len(badsig)-2] + verifyRequest(req, true, outcome, "bar", []string{badsig}, true) + + v1sig := sig + + // Test a missing context + delete(req.Data, "context") + sig = signRequest(req, true, "bar") + + // Rotate and set min decryption version + err = fooP.Rotate(context.Background(), storage, b.GetRandomReader()) + if err != nil { + t.Fatal(err) + } + err = fooP.Rotate(context.Background(), storage, b.GetRandomReader()) + if err != nil { + t.Fatal(err) + } + fooP.MinDecryptionVersion = 2 + if err = fooP.Persist(context.Background(), storage); err != nil { + t.Fatal(err) + } + err = barP.Rotate(context.Background(), storage, b.GetRandomReader()) + if err != nil { + t.Fatal(err) + } + err = barP.Rotate(context.Background(), storage, b.GetRandomReader()) + if err != nil { + t.Fatal(err) + } + barP.MinDecryptionVersion = 2 + if err = barP.Persist(context.Background(), storage); err != nil { + t.Fatal(err) + } + + req.Data = map[string]interface{}{ + "input": "dGhlIHF1aWNrIGJyb3duIGZveA==", + "context": "abcd", + } + + // Make sure signing still works fine + sig = signRequest(req, false, "foo") + outcome[0].valid = true + verifyRequest(req, false, outcome, "foo", sig, true) + // Now try the v1 + verifyRequest(req, true, outcome, "foo", v1sig, true) + + // Repeat with the other key + sig = signRequest(req, false, "bar") + verifyRequest(req, false, outcome, "bar", sig, true) + verifyRequest(req, true, outcome, "bar", v1sig, true) + + // Test Batch Signing + batchInput := []batchRequestSignItem{ + {"context": "abcd", "input": "dGhlIHF1aWNrIGJyb3duIGZveA==", "reference": "uno"}, + {"context": "efgh", "input": "dGhlIHF1aWNrIGJyb3duIGZveA==", "reference": "dos"}, + } + + req.Data = map[string]interface{}{ + "batch_input": batchInput, + } + + outcome = []signOutcome{ + {requestOk: true, valid: true, keyValid: true, reference: "uno"}, + {requestOk: true, valid: true, keyValid: true, reference: "dos"}, + } + + sig = signRequest(req, false, "foo") + verifyRequest(req, false, outcome, "foo", sig, true) + + goodsig := signRequest(req, false, "bar") + verifyRequest(req, false, outcome, "bar", goodsig, true) + + // key doesn't match signatures + outcome[0].valid = false + outcome[1].valid = false + verifyRequest(req, false, outcome, "foo", goodsig, true) + + // Test a bad signature + badsig = sig[0] + badsig = badsig[:len(badsig)-2] + // matching key, but first signature is corrupted + outcome[0].requestOk = false + outcome[1].valid = true + verifyRequest(req, false, outcome, "bar", []string{badsig, goodsig[1]}, true) + + // Verify with missing signatures + outcome[0].valid = false + outcome[1].valid = false + delete(batchInput[0], "signature") + delete(batchInput[1], "signature") + verifyRequest(req, true, outcome, "foo", sig, false) + + // Test missing context + batchInput = []batchRequestSignItem{ + {"context": "abcd", "input": "dGhlIHF1aWNrIGJyb3duIGZveA=="}, + {"input": "dGhlIHF1aWNrIGJyb3duIGZveA=="}, + } + + req.Data = map[string]interface{}{ + "batch_input": batchInput, + } + + sig = signRequest(req, false, "bar") + + outcome[0].requestOk = true + outcome[0].valid = true + outcome[1].requestOk = false + verifyRequest(req, false, outcome, "bar", goodsig, true) + + // Test incorrect context + batchInput = []batchRequestSignItem{ + {"context": "abca", "input": "dGhlIHF1aWNrIGJyb3duIGZveA=="}, + {"context": "efga", "input": "dGhlIHF1aWNrIGJyb3duIGZveA=="}, + } + req.Data = map[string]interface{}{ + "batch_input": batchInput, + } + + outcome[0].requestOk = true + outcome[0].valid = false + outcome[1].requestOk = true + outcome[1].valid = false + verifyRequest(req, false, outcome, "bar", goodsig, true) +} + +func TestTransit_SignVerify_RSA_PSS(t *testing.T) { + t.Run("2048", func(t *testing.T) { + testTransit_SignVerify_RSA_PSS(t, 2048) + }) + t.Run("3072", func(t *testing.T) { + testTransit_SignVerify_RSA_PSS(t, 3072) + }) + t.Run("4096", func(t *testing.T) { + testTransit_SignVerify_RSA_PSS(t, 4096) + }) +} + +func testTransit_SignVerify_RSA_PSS(t *testing.T, bits int) { + b, storage := createBackendWithSysView(t) + + // First create a key + req := &logical.Request{ + Storage: storage, + Operation: logical.UpdateOperation, + Path: "keys/foo", + Data: map[string]interface{}{ + "type": fmt.Sprintf("rsa-%d", bits), + }, + } + _, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatal(err) + } + + signRequest := func(errExpected bool, postpath string) string { + t.Helper() + req.Path = "sign/foo" + postpath + resp, err := b.HandleRequest(context.Background(), req) + if err != nil && !errExpected { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if errExpected { + if !resp.IsError() { + t.Fatalf("bad: should have gotten error response: %#v", *resp) + } + return "" + } + if resp.IsError() { + t.Fatalf("bad: got error response: %#v", *resp) + } + // Since we are reusing the same request, let's clear the salt length each time. + delete(req.Data, "salt_length") + + value, ok := resp.Data["signature"] + if !ok { + t.Fatalf("no signature key found in returned data, got resp data %#v", resp.Data) + } + return value.(string) + } + + verifyRequest := func(errExpected bool, postpath, sig string) { + t.Helper() + req.Path = "verify/foo" + postpath + req.Data["signature"] = sig + resp, err := b.HandleRequest(context.Background(), req) + if err != nil { + if errExpected { + return + } + t.Fatalf("got error: %v, sig was %v", err, sig) + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if resp.IsError() { + if errExpected { + return + } + t.Fatalf("bad: got error response: %#v", *resp) + } + value, ok := resp.Data["valid"] + if !ok { + t.Fatalf("no valid key found in returned data, got resp data %#v", resp.Data) + } + if !value.(bool) && !errExpected { + t.Fatalf("verification failed; req was %#v, resp is %#v", *req, *resp) + } else if value.(bool) && errExpected { + t.Fatalf("expected error and didn't get one; req was %#v, resp is %#v", *req, *resp) + } + // Since we are reusing the same request, let's clear the signature each time. + delete(req.Data, "signature") + } + + newReqData := func(hashAlgorithm string, marshalingName string) map[string]interface{} { + return map[string]interface{}{ + "input": "dGhlIHF1aWNrIGJyb3duIGZveA==", + "signature_algorithm": "pss", + "hash_algorithm": hashAlgorithm, + "marshaling_algorithm": marshalingName, + } + } + + signAndVerifyRequest := func(hashAlgorithm string, marshalingName string, signSaltLength string, signErrExpected bool, verifySaltLength string, verifyErrExpected bool) { + t.Log("\t\t\t", signSaltLength, "/", verifySaltLength) + req.Data = newReqData(hashAlgorithm, marshalingName) + + req.Data["salt_length"] = signSaltLength + t.Log("\t\t\t\t", "sign req data:", req.Data) + sig := signRequest(signErrExpected, "") + + req.Data["salt_length"] = verifySaltLength + t.Log("\t\t\t\t", "verify req data:", req.Data) + verifyRequest(verifyErrExpected, "", sig) + } + + invalidSaltLengths := []string{"bar", "-2"} + t.Log("invalidSaltLengths:", invalidSaltLengths) + + autoSaltLengths := []string{"auto", "0"} + t.Log("autoSaltLengths:", autoSaltLengths) + + hashSaltLengths := []string{"hash", "-1"} + t.Log("hashSaltLengths:", hashSaltLengths) + + positiveSaltLengths := []string{"1"} + t.Log("positiveSaltLengths:", positiveSaltLengths) + + nonAutoSaltLengths := append(hashSaltLengths, positiveSaltLengths...) + t.Log("nonAutoSaltLengths:", nonAutoSaltLengths) + + validSaltLengths := append(autoSaltLengths, nonAutoSaltLengths...) + t.Log("validSaltLengths:", validSaltLengths) + + testCombinatorics := func(t *testing.T, hashAlgorithm string, marshalingName string) { + t.Log("\t\t", "valid", "/", "invalid salt lengths") + for _, validSaltLength := range validSaltLengths { + for _, invalidSaltLength := range invalidSaltLengths { + signAndVerifyRequest(hashAlgorithm, marshalingName, validSaltLength, false, invalidSaltLength, true) + } + } + + t.Log("\t\t", "invalid", "/", "invalid salt lengths") + for _, invalidSaltLength1 := range invalidSaltLengths { + for _, invalidSaltLength2 := range invalidSaltLengths { + signAndVerifyRequest(hashAlgorithm, marshalingName, invalidSaltLength1, true, invalidSaltLength2, true) + } + } + + t.Log("\t\t", "invalid", "/", "valid salt lengths") + for _, invalidSaltLength := range invalidSaltLengths { + for _, validSaltLength := range validSaltLengths { + signAndVerifyRequest(hashAlgorithm, marshalingName, invalidSaltLength, true, validSaltLength, true) + } + } + + t.Log("\t\t", "valid", "/", "valid salt lengths") + for _, validSaltLength := range validSaltLengths { + signAndVerifyRequest(hashAlgorithm, marshalingName, validSaltLength, false, validSaltLength, false) + } + + t.Log("\t\t", "hash", "/", "hash salt lengths") + for _, hashSaltLength1 := range hashSaltLengths { + for _, hashSaltLength2 := range hashSaltLengths { + if hashSaltLength1 != hashSaltLength2 { + signAndVerifyRequest(hashAlgorithm, marshalingName, hashSaltLength1, false, hashSaltLength2, false) + } + } + } + + t.Log("\t\t", "hash", "/", "positive salt lengths") + for _, hashSaltLength := range hashSaltLengths { + for _, positiveSaltLength := range positiveSaltLengths { + signAndVerifyRequest(hashAlgorithm, marshalingName, hashSaltLength, false, positiveSaltLength, true) + } + } + + t.Log("\t\t", "positive", "/", "hash salt lengths") + for _, positiveSaltLength := range positiveSaltLengths { + for _, hashSaltLength := range hashSaltLengths { + signAndVerifyRequest(hashAlgorithm, marshalingName, positiveSaltLength, false, hashSaltLength, true) + } + } + + t.Log("\t\t", "auto", "/", "auto salt lengths") + for _, autoSaltLength1 := range autoSaltLengths { + for _, autoSaltLength2 := range autoSaltLengths { + if autoSaltLength1 != autoSaltLength2 { + signAndVerifyRequest(hashAlgorithm, marshalingName, autoSaltLength1, false, autoSaltLength2, false) + } + } + } + + t.Log("\t\t", "auto", "/", "non-auto salt lengths") + for _, autoSaltLength := range autoSaltLengths { + for _, nonAutoSaltLength := range nonAutoSaltLengths { + signAndVerifyRequest(hashAlgorithm, marshalingName, autoSaltLength, false, nonAutoSaltLength, true) + } + } + + t.Log("\t\t", "non-auto", "/", "auto salt lengths") + for _, nonAutoSaltLength := range nonAutoSaltLengths { + for _, autoSaltLength := range autoSaltLengths { + signAndVerifyRequest(hashAlgorithm, marshalingName, nonAutoSaltLength, false, autoSaltLength, false) + } + } + } + + testAutoSignAndVerify := func(t *testing.T, hashAlgorithm string, marshalingName string) { + t.Log("\t\t", "Make a signature with an implicit, automatic salt length") + req.Data = newReqData(hashAlgorithm, marshalingName) + t.Log("\t\t\t", "sign req data:", req.Data) + sig := signRequest(false, "") + + t.Log("\t\t", "Verify it with an implicit, automatic salt length") + t.Log("\t\t\t", "verify req data:", req.Data) + verifyRequest(false, "", sig) + + t.Log("\t\t", "Verify it with an explicit, automatic salt length") + for _, autoSaltLength := range autoSaltLengths { + t.Log("\t\t\t", "auto", "/", autoSaltLength) + req.Data["salt_length"] = autoSaltLength + t.Log("\t\t\t\t", "verify req data:", req.Data) + verifyRequest(false, "", sig) + } + + t.Log("\t\t", "Try to verify it with an explicit, incorrect salt length") + for _, nonAutoSaltLength := range nonAutoSaltLengths { + t.Log("\t\t\t", "auto", "/", nonAutoSaltLength) + req.Data["salt_length"] = nonAutoSaltLength + t.Log("\t\t\t\t", "verify req data:", req.Data) + verifyRequest(true, "", sig) + } + + t.Log("\t\t", "Make a signature with an explicit, valid salt length & and verify it with an implicit, automatic salt length") + for _, validSaltLength := range validSaltLengths { + t.Log("\t\t\t", validSaltLength, "/", "auto") + + req.Data = newReqData(hashAlgorithm, marshalingName) + req.Data["salt_length"] = validSaltLength + t.Log("\t\t\t", "sign req data:", req.Data) + sig := signRequest(false, "") + + t.Log("\t\t\t", "verify req data:", req.Data) + verifyRequest(false, "", sig) + } + } + + for hashAlgorithm := range keysutil.HashTypeMap { + t.Log("Hash algorithm:", hashAlgorithm) + if hashAlgorithm == "none" { + continue + } + + for marshalingName := range keysutil.MarshalingTypeMap { + t.Log("\t", "Marshaling type:", marshalingName) + testName := fmt.Sprintf("%s-%s", hashAlgorithm, marshalingName) + t.Run(testName, func(t *testing.T) { + if constants.IsFIPS() && strings.HasPrefix(hashAlgorithm, "sha3-") { + t.Skip("\t", "Skipping hashing algo on fips:", hashAlgorithm) + } + + testCombinatorics(t, hashAlgorithm, marshalingName) + testAutoSignAndVerify(t, hashAlgorithm, marshalingName) + }) + } + } +} diff --git a/builtin/logical/transit/path_trim.go b/builtin/logical/transit/path_trim.go new file mode 100644 index 0000000..71f4181 --- /dev/null +++ b/builtin/logical/transit/path_trim.go @@ -0,0 +1,112 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit + +import ( + "context" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/keysutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func (b *backend) pathTrim() *framework.Path { + return &framework.Path{ + Pattern: "keys/" + framework.GenericNameRegex("name") + "/trim", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationVerb: "trim", + OperationSuffix: "key", + }, + + Fields: map[string]*framework.FieldSchema{ + "name": { + Type: framework.TypeString, + Description: "Name of the key", + }, + "min_available_version": { + Type: framework.TypeInt, + Description: ` +The minimum available version for the key ring. All versions before this +version will be permanently deleted. This value can at most be equal to the +lesser of 'min_decryption_version' and 'min_encryption_version'. This is not +allowed to be set when either 'min_encryption_version' or +'min_decryption_version' is set to zero.`, + }, + }, + + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathTrimUpdate(), + }, + + HelpSynopsis: pathTrimHelpSyn, + HelpDescription: pathTrimHelpDesc, + } +} + +func (b *backend) pathTrimUpdate() framework.OperationFunc { + return func(ctx context.Context, req *logical.Request, d *framework.FieldData) (resp *logical.Response, retErr error) { + name := d.Get("name").(string) + + p, _, err := b.GetPolicy(ctx, keysutil.PolicyRequest{ + Storage: req.Storage, + Name: name, + }, b.GetRandomReader()) + if err != nil { + return nil, err + } + if p == nil { + return logical.ErrorResponse("invalid key name"), logical.ErrInvalidRequest + } + if !b.System().CachingDisabled() { + p.Lock(true) + } + defer p.Unlock() + + minAvailableVersionRaw, ok, err := d.GetOkErr("min_available_version") + if err != nil { + return nil, err + } + if !ok { + return logical.ErrorResponse("missing min_available_version"), nil + } + minAvailableVersion := minAvailableVersionRaw.(int) + + originalMinAvailableVersion := p.MinAvailableVersion + + switch { + case minAvailableVersion < originalMinAvailableVersion: + return logical.ErrorResponse("minimum available version cannot be decremented"), nil + case p.MinEncryptionVersion == 0: + return logical.ErrorResponse("minimum available version cannot be set when minimum encryption version is not set"), nil + case p.MinDecryptionVersion == 0: + return logical.ErrorResponse("minimum available version cannot be set when minimum decryption version is not set"), nil + case minAvailableVersion > p.MinEncryptionVersion: + return logical.ErrorResponse("minimum available version cannot be greater than minmum encryption version"), nil + case minAvailableVersion > p.MinDecryptionVersion: + return logical.ErrorResponse("minimum available version cannot be greater than minimum decryption version"), nil + case minAvailableVersion < 0: + return logical.ErrorResponse("minimum available version cannot be negative"), nil + case minAvailableVersion == 0: + return logical.ErrorResponse("minimum available version should be positive"), nil + } + + // Ensure that cache doesn't get corrupted in error cases + p.MinAvailableVersion = minAvailableVersion + if err := p.Persist(ctx, req.Storage); err != nil { + p.MinAvailableVersion = originalMinAvailableVersion + return nil, err + } + + return b.formatKeyPolicy(p, nil) + } +} + +const pathTrimHelpSyn = `Trim key versions of a named key` + +const pathTrimHelpDesc = ` +This path is used to trim key versions of a named key. Trimming only happens +from the lower end of version numbers. +` diff --git a/builtin/logical/transit/path_trim_test.go b/builtin/logical/transit/path_trim_test.go new file mode 100644 index 0000000..b63d644 --- /dev/null +++ b/builtin/logical/transit/path_trim_test.go @@ -0,0 +1,273 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit + +import ( + "testing" + + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/helper/keysutil" + "github.com/hashicorp/vault/sdk/logical" +) + +func TestTransit_Trim(t *testing.T) { + b, storage := createBackendWithSysView(t) + + doReq := func(t *testing.T, req *logical.Request) *logical.Response { + t.Helper() + resp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err != nil || (resp != nil && resp.IsError()) { + t.Fatalf("got err:\n%#v\nresp:\n%#v\n", err, resp) + } + return resp + } + doErrReq := func(t *testing.T, req *logical.Request) { + t.Helper() + resp, err := b.HandleRequest(namespace.RootContext(nil), req) + if err == nil && (resp == nil || !resp.IsError()) { + t.Fatalf("expected error; resp:\n%#v\n", resp) + } + } + + // Create a key + req := &logical.Request{ + Path: "keys/aes", + Storage: storage, + Operation: logical.UpdateOperation, + } + doReq(t, req) + + // Get the policy and check that the archive has correct number of keys + p, _, err := b.GetPolicy(namespace.RootContext(nil), keysutil.PolicyRequest{ + Storage: storage, + Name: "aes", + }, b.GetRandomReader()) + if err != nil { + t.Fatal(err) + } + + // Archive: 0, 1 + archive, err := p.LoadArchive(namespace.RootContext(nil), storage) + if err != nil { + t.Fatal(err) + } + // Index "0" in the archive is unused. Hence the length of the archived + // keys will always be 1 more than the actual number of keys. + if len(archive.Keys) != 2 { + t.Fatalf("bad: len of archived keys; expected: 2, actual: %d", len(archive.Keys)) + } + + // Ensure that there are 5 key versions, by rotating the key 4 times + for i := 0; i < 4; i++ { + req.Path = "keys/aes/rotate" + req.Data = nil + doReq(t, req) + } + + // Archive: 0, 1, 2, 3, 4, 5 + archive, err = p.LoadArchive(namespace.RootContext(nil), storage) + if err != nil { + t.Fatal(err) + } + if len(archive.Keys) != 6 { + t.Fatalf("bad: len of archived keys; expected: 6, actual: %d", len(archive.Keys)) + } + + // Min available version should not be set when min_encryption_version is not + // set + req.Path = "keys/aes/trim" + req.Data = map[string]interface{}{ + "min_available_version": 1, + } + doErrReq(t, req) + + // Set min_encryption_version to 0 + req.Path = "keys/aes/config" + req.Data = map[string]interface{}{ + "min_encryption_version": 0, + } + doReq(t, req) + + // Min available version should not be converted to 0 for nil values + req.Path = "keys/aes/trim" + req.Data = map[string]interface{}{ + "min_available_version": nil, + } + doErrReq(t, req) + + // Set min_encryption_version to 4 + req.Path = "keys/aes/config" + req.Data = map[string]interface{}{ + "min_encryption_version": 4, + } + doReq(t, req) + + // Set min_decryption_version to 3 + req.Data = map[string]interface{}{ + "min_decryption_version": 3, + } + doReq(t, req) + + // Min available version cannot be greater than min encryption version + req.Path = "keys/aes/trim" + req.Data = map[string]interface{}{ + "min_available_version": 5, + } + doErrReq(t, req) + + // Min available version cannot be greater than min decryption version + req.Data["min_available_version"] = 4 + doErrReq(t, req) + + // Min available version cannot be negative + req.Data["min_available_version"] = -1 + doErrReq(t, req) + + // Min available version should be positive + req.Data["min_available_version"] = 0 + doErrReq(t, req) + + // Trim all keys before version 3. Index 0 and index 1 will be deleted from + // archived keys. + req.Data["min_available_version"] = 3 + doReq(t, req) + + // Archive: 3, 4, 5 + archive, err = p.LoadArchive(namespace.RootContext(nil), storage) + if err != nil { + t.Fatal(err) + } + if len(archive.Keys) != 3 { + t.Fatalf("bad: len of archived keys; expected: 3, actual: %d", len(archive.Keys)) + } + + // Min decryption version should not be less than min available version + req.Path = "keys/aes/config" + req.Data = map[string]interface{}{ + "min_decryption_version": 1, + } + doErrReq(t, req) + + // Min encryption version should not be less than min available version + req.Data = map[string]interface{}{ + "min_encryption_version": 2, + } + doErrReq(t, req) + + // Rotate 5 more times + for i := 0; i < 5; i++ { + doReq(t, &logical.Request{ + Path: "keys/aes/rotate", + Storage: storage, + Operation: logical.UpdateOperation, + }) + } + + // Archive: 3, 4, 5, 6, 7, 8, 9, 10 + archive, err = p.LoadArchive(namespace.RootContext(nil), storage) + if err != nil { + t.Fatal(err) + } + if len(archive.Keys) != 8 { + t.Fatalf("bad: len of archived keys; expected: 8, actual: %d", len(archive.Keys)) + } + + // Set min encryption version to 7 + req.Data = map[string]interface{}{ + "min_encryption_version": 7, + } + doReq(t, req) + + // Set min decryption version to 7 + req.Data = map[string]interface{}{ + "min_decryption_version": 7, + } + doReq(t, req) + + // Trim all versions before 7 + req.Path = "keys/aes/trim" + req.Data = map[string]interface{}{ + "min_available_version": 7, + } + doReq(t, req) + + // Archive: 7, 8, 9, 10 + archive, err = p.LoadArchive(namespace.RootContext(nil), storage) + if err != nil { + t.Fatal(err) + } + if len(archive.Keys) != 4 { + t.Fatalf("bad: len of archived keys; expected: 4, actual: %d", len(archive.Keys)) + } + + // Read the key + req.Path = "keys/aes" + req.Operation = logical.ReadOperation + resp := doReq(t, req) + keys := resp.Data["keys"].(map[string]int64) + if len(keys) != 4 { + t.Fatalf("bad: number of keys; expected: 4, actual: %d", len(keys)) + } + + // Test if moving the min_encryption_version and min_decryption_versions + // are working fine + + // Set min encryption version to 10 + req.Path = "keys/aes/config" + req.Operation = logical.UpdateOperation + req.Data = map[string]interface{}{ + "min_encryption_version": 10, + } + doReq(t, req) + if p.MinEncryptionVersion != 10 { + t.Fatalf("failed to set min encryption version") + } + + // Set min decryption version to 9 + req.Data = map[string]interface{}{ + "min_decryption_version": 9, + } + doReq(t, req) + if p.MinDecryptionVersion != 9 { + t.Fatalf("failed to set min encryption version") + } + + // Reduce the min decryption version to 8 + req.Data = map[string]interface{}{ + "min_decryption_version": 8, + } + doReq(t, req) + if p.MinDecryptionVersion != 8 { + t.Fatalf("failed to set min encryption version") + } + + // Reduce the min encryption version to 8 + req.Data = map[string]interface{}{ + "min_encryption_version": 8, + } + doReq(t, req) + if p.MinDecryptionVersion != 8 { + t.Fatalf("failed to set min decryption version") + } + + // Read the key to ensure that the keys are properly copied from the + // archive into the policy + req.Path = "keys/aes" + req.Operation = logical.ReadOperation + resp = doReq(t, req) + keys = resp.Data["keys"].(map[string]int64) + if len(keys) != 3 { + t.Fatalf("bad: number of keys; expected: 3, actual: %d", len(keys)) + } + + // Ensure that archive has remained unchanged + // Archive: 7, 8, 9, 10 + archive, err = p.LoadArchive(namespace.RootContext(nil), storage) + if err != nil { + t.Fatal(err) + } + if len(archive.Keys) != 4 { + t.Fatalf("bad: len of archived keys; expected: 4, actual: %d", len(archive.Keys)) + } +} diff --git a/builtin/logical/transit/path_wrapping_key.go b/builtin/logical/transit/path_wrapping_key.go new file mode 100644 index 0000000..f27a32a --- /dev/null +++ b/builtin/logical/transit/path_wrapping_key.go @@ -0,0 +1,96 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit + +import ( + "context" + "crypto/x509" + "encoding/pem" + "fmt" + "strconv" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/keysutil" + "github.com/hashicorp/vault/sdk/logical" +) + +const WrappingKeyName = "wrapping-key" + +func (b *backend) pathWrappingKey() *framework.Path { + return &framework.Path{ + Pattern: "wrapping_key", + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: operationPrefixTransit, + OperationSuffix: "wrapping-key", + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathWrappingKeyRead, + }, + HelpSynopsis: pathWrappingKeyHelpSyn, + HelpDescription: pathWrappingKeyHelpDesc, + } +} + +func (b *backend) pathWrappingKeyRead(ctx context.Context, req *logical.Request, _ *framework.FieldData) (*logical.Response, error) { + p, err := b.getWrappingKey(ctx, req.Storage) + if err != nil { + return nil, err + } + wrappingKey := p.Keys[strconv.Itoa(p.LatestVersion)] + + derBytes, err := x509.MarshalPKIXPublicKey(wrappingKey.RSAKey.Public()) + if err != nil { + return nil, fmt.Errorf("error marshaling RSA public key: %w", err) + } + pemBlock := &pem.Block{ + Type: "PUBLIC KEY", + Bytes: derBytes, + } + pemBytes := pem.EncodeToMemory(pemBlock) + if pemBytes == nil || len(pemBytes) == 0 { + return nil, fmt.Errorf("failed to PEM-encode RSA public key") + } + + publicKeyString := string(pemBytes) + + resp := &logical.Response{ + Data: map[string]interface{}{ + "public_key": publicKeyString, + }, + } + + return resp, nil +} + +func (b *backend) getWrappingKey(ctx context.Context, storage logical.Storage) (*keysutil.Policy, error) { + polReq := keysutil.PolicyRequest{ + Upsert: true, + Storage: storage, + Name: fmt.Sprintf("import/%s", WrappingKeyName), + KeyType: keysutil.KeyType_RSA4096, + Derived: false, + Convergent: false, + Exportable: false, + AllowPlaintextBackup: false, + AutoRotatePeriod: 0, + } + p, _, err := b.GetPolicy(ctx, polReq, b.GetRandomReader()) + if err != nil { + return nil, err + } + if p == nil { + return nil, fmt.Errorf("error retrieving wrapping key: returned policy was nil") + } + if b.System().CachingDisabled() { + p.Unlock() + } + + return p, nil +} + +const ( + pathWrappingKeyHelpSyn = "Returns the public key to use for wrapping imported keys" + pathWrappingKeyHelpDesc = "This path is used to retrieve the RSA-4096 wrapping key " + + "for wrapping keys that are being imported into transit." +) diff --git a/builtin/logical/transit/path_wrapping_key_test.go b/builtin/logical/transit/path_wrapping_key_test.go new file mode 100644 index 0000000..468c3f4 --- /dev/null +++ b/builtin/logical/transit/path_wrapping_key_test.go @@ -0,0 +1,75 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit + +import ( + "context" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "testing" + + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + storagePath = "policy/import/" + WrappingKeyName +) + +func TestTransit_WrappingKey(t *testing.T) { + // Set up shared backend for subtests + b, s := createBackendWithStorage(t) + + // Ensure the key does not exist before requesting it. + keyEntry, err := s.Get(context.Background(), storagePath) + if err != nil { + t.Fatalf("error retrieving wrapping key from storage: %s", err) + } + if keyEntry != nil { + t.Fatal("wrapping key unexpectedly exists") + } + + // Generate the key pair by requesting the public key. + req := &logical.Request{ + Storage: s, + Operation: logical.ReadOperation, + Path: "wrapping_key", + } + resp, err := b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("unexpected request error: %s", err) + } + if resp == nil || resp.Data == nil || resp.Data["public_key"] == nil { + t.Fatal("expected non-nil response") + } + pubKeyPEM := resp.Data["public_key"] + + // Ensure the returned key is a 4096-bit RSA key. + pubKeyBlock, _ := pem.Decode([]byte(pubKeyPEM.(string))) + rawPubKey, err := x509.ParsePKIXPublicKey(pubKeyBlock.Bytes) + if err != nil { + t.Fatalf("failed to parse public wrapping key: %s", err) + } + wrappingKey, ok := rawPubKey.(*rsa.PublicKey) + if !ok || wrappingKey.Size() != 512 { + t.Fatal("public wrapping key is not a 4096-bit RSA key") + } + + // Request the wrapping key again to ensure it isn't regenerated. + req = &logical.Request{ + Storage: s, + Operation: logical.ReadOperation, + Path: "wrapping_key", + } + resp, err = b.HandleRequest(context.Background(), req) + if err != nil { + t.Fatalf("unexpected request error: %s", err) + } + if resp == nil || resp.Data == nil || resp.Data["public_key"] == nil { + t.Fatal("expected non-nil response") + } + if resp.Data["public_key"] != pubKeyPEM { + t.Fatal("wrapping key public component changed between requests") + } +} diff --git a/builtin/logical/transit/stepwise_test.go b/builtin/logical/transit/stepwise_test.go new file mode 100644 index 0000000..2b40cea --- /dev/null +++ b/builtin/logical/transit/stepwise_test.go @@ -0,0 +1,238 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package transit + +import ( + "encoding/base64" + "fmt" + "os" + "testing" + + stepwise "github.com/hashicorp/vault-testing-stepwise" + dockerEnvironment "github.com/hashicorp/vault-testing-stepwise/environments/docker" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/helper/keysutil" + "github.com/mitchellh/mapstructure" +) + +// TestBackend_basic_docker is an example test using the Docker Environment +func TestAccBackend_basic_docker(t *testing.T) { + decryptData := make(map[string]interface{}) + envOptions := stepwise.MountOptions{ + RegistryName: "updatedtransit", + PluginType: api.PluginTypeSecrets, + PluginName: "transit", + MountPathPrefix: "transit_temp", + } + stepwise.Run(t, stepwise.Case{ + Environment: dockerEnvironment.NewEnvironment("updatedtransit", &envOptions), + Steps: []stepwise.Step{ + testAccStepwiseListPolicy(t, "test", true), + testAccStepwiseWritePolicy(t, "test", true), + testAccStepwiseListPolicy(t, "test", false), + testAccStepwiseReadPolicy(t, "test", false, true), + testAccStepwiseEncryptContext(t, "test", testPlaintext, "my-cool-context", decryptData), + testAccStepwiseDecrypt(t, "test", testPlaintext, decryptData), + testAccStepwiseEnableDeletion(t, "test"), + testAccStepwiseDeletePolicy(t, "test"), + testAccStepwiseReadPolicy(t, "test", true, true), + }, + }) +} + +func testAccStepwiseWritePolicy(t *testing.T, name string, derived bool) stepwise.Step { + ts := stepwise.Step{ + Operation: stepwise.WriteOperation, + Path: "keys/" + name, + Data: map[string]interface{}{ + "derived": derived, + }, + } + if os.Getenv("TRANSIT_ACC_KEY_TYPE") == "CHACHA" { + ts.Data["type"] = "chacha20-poly1305" + } + return ts +} + +func testAccStepwiseListPolicy(t *testing.T, name string, expectNone bool) stepwise.Step { + return stepwise.Step{ + Operation: stepwise.ListOperation, + Path: "keys", + Assert: func(resp *api.Secret, err error) error { + if (resp == nil || len(resp.Data) == 0) && !expectNone { + return fmt.Errorf("missing response") + } + if expectNone && resp != nil { + return fmt.Errorf("response data when expecting none") + } + + if expectNone && resp == nil { + return nil + } + + var d struct { + Keys []string `mapstructure:"keys"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + if len(d.Keys) == 0 { + return fmt.Errorf("missing keys") + } + if len(d.Keys) > 1 { + return fmt.Errorf("only 1 key expected, %d returned", len(d.Keys)) + } + if d.Keys[0] != name { + return fmt.Errorf("Actual key: %s\nExpected key: %s", d.Keys[0], name) + } + return nil + }, + } +} + +func testAccStepwiseReadPolicy(t *testing.T, name string, expectNone, derived bool) stepwise.Step { + t.Helper() + return testAccStepwiseReadPolicyWithVersions(t, name, expectNone, derived, 1, 0) +} + +func testAccStepwiseReadPolicyWithVersions(t *testing.T, name string, expectNone, derived bool, minDecryptionVersion int, minEncryptionVersion int) stepwise.Step { + t.Helper() + return stepwise.Step{ + Operation: stepwise.ReadOperation, + Path: "keys/" + name, + Assert: func(resp *api.Secret, err error) error { + t.Helper() + if resp == nil && !expectNone { + return fmt.Errorf("missing response") + } else if expectNone { + if resp != nil { + return fmt.Errorf("response when expecting none") + } + return nil + } + var d struct { + Name string `mapstructure:"name"` + Key []byte `mapstructure:"key"` + Keys map[string]int64 `mapstructure:"keys"` + Type string `mapstructure:"type"` + Derived bool `mapstructure:"derived"` + KDF string `mapstructure:"kdf"` + DeletionAllowed bool `mapstructure:"deletion_allowed"` + ConvergentEncryption bool `mapstructure:"convergent_encryption"` + MinDecryptionVersion int `mapstructure:"min_decryption_version"` + MinEncryptionVersion int `mapstructure:"min_encryption_version"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + + if d.Name != name { + return fmt.Errorf("bad name: %#v", d) + } + if os.Getenv("TRANSIT_ACC_KEY_TYPE") == "CHACHA" { + if d.Type != keysutil.KeyType(keysutil.KeyType_ChaCha20_Poly1305).String() { + return fmt.Errorf("bad key type: %#v", d) + } + } else if d.Type != keysutil.KeyType(keysutil.KeyType_AES256_GCM96).String() { + return fmt.Errorf("bad key type: %#v", d) + } + // Should NOT get a key back + if d.Key != nil { + return fmt.Errorf("unexpected key found") + } + if d.Keys == nil { + return fmt.Errorf("no keys found") + } + if d.MinDecryptionVersion != minDecryptionVersion { + return fmt.Errorf("minimum decryption version mismatch, expected (%#v), found (%#v)", minEncryptionVersion, d.MinDecryptionVersion) + } + if d.MinEncryptionVersion != minEncryptionVersion { + return fmt.Errorf("minimum encryption version mismatch, expected (%#v), found (%#v)", minEncryptionVersion, d.MinDecryptionVersion) + } + if d.DeletionAllowed { + return fmt.Errorf("expected DeletionAllowed to be false, but got true") + } + if d.Derived != derived { + return fmt.Errorf("derived mismatch, expected (%t), got (%t)", derived, d.Derived) + } + if derived && d.KDF != "hkdf_sha256" { + return fmt.Errorf("expected KDF to be hkdf_sha256, but got (%s)", d.KDF) + } + return nil + }, + } +} + +func testAccStepwiseEncryptContext( + t *testing.T, name, plaintext, context string, decryptData map[string]interface{}, +) stepwise.Step { + return stepwise.Step{ + Operation: stepwise.UpdateOperation, + Path: "encrypt/" + name, + Data: map[string]interface{}{ + "plaintext": base64.StdEncoding.EncodeToString([]byte(plaintext)), + "context": base64.StdEncoding.EncodeToString([]byte(context)), + }, + Assert: func(resp *api.Secret, err error) error { + var d struct { + Ciphertext string `mapstructure:"ciphertext"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + if d.Ciphertext == "" { + return fmt.Errorf("missing ciphertext") + } + decryptData["ciphertext"] = d.Ciphertext + decryptData["context"] = base64.StdEncoding.EncodeToString([]byte(context)) + return nil + }, + } +} + +func testAccStepwiseDecrypt( + t *testing.T, name, plaintext string, decryptData map[string]interface{}, +) stepwise.Step { + return stepwise.Step{ + Operation: stepwise.UpdateOperation, + Path: "decrypt/" + name, + Data: decryptData, + Assert: func(resp *api.Secret, err error) error { + var d struct { + Plaintext string `mapstructure:"plaintext"` + } + if err := mapstructure.Decode(resp.Data, &d); err != nil { + return err + } + + // Decode the base64 + plainRaw, err := base64.StdEncoding.DecodeString(d.Plaintext) + if err != nil { + return err + } + + if string(plainRaw) != plaintext { + return fmt.Errorf("plaintext mismatch: %s expect: %s, decryptData was %#v", plainRaw, plaintext, decryptData) + } + return nil + }, + } +} + +func testAccStepwiseEnableDeletion(t *testing.T, name string) stepwise.Step { + return stepwise.Step{ + Operation: stepwise.UpdateOperation, + Path: "keys/" + name + "/config", + Data: map[string]interface{}{ + "deletion_allowed": true, + }, + } +} + +func testAccStepwiseDeletePolicy(t *testing.T, name string) stepwise.Step { + return stepwise.Step{ + Operation: stepwise.DeleteOperation, + Path: "keys/" + name, + } +} diff --git a/builtin/plugin/backend.go b/builtin/plugin/backend.go new file mode 100644 index 0000000..04606bc --- /dev/null +++ b/builtin/plugin/backend.go @@ -0,0 +1,304 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "context" + "fmt" + "net/rpc" + "reflect" + "sync" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-uuid" + v5 "github.com/hashicorp/vault/builtin/plugin/v5" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" + bplugin "github.com/hashicorp/vault/sdk/plugin" +) + +var ( + ErrMismatchType = fmt.Errorf("mismatch on mounted backend and plugin backend type") + ErrMismatchPaths = fmt.Errorf("mismatch on mounted backend and plugin backend special paths") +) + +// Factory returns a configured plugin logical.Backend. +func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + merr := &multierror.Error{} + _, ok := conf.Config["plugin_name"] + if !ok { + return nil, fmt.Errorf("plugin_name not provided") + } + b, err := v5.Backend(ctx, conf) + if err == nil { + if err := b.Setup(ctx, conf); err != nil { + return nil, err + } + return b, nil + } + merr = multierror.Append(merr, err) + + b, err = Backend(ctx, conf) + if err != nil { + merr = multierror.Append(merr, err) + return nil, fmt.Errorf("invalid backend version: %s", merr) + } + + if err := b.Setup(ctx, conf); err != nil { + merr = multierror.Append(merr, err) + return nil, merr.ErrorOrNil() + } + return b, nil +} + +// Backend returns an instance of the backend, either as a plugin if external +// or as a concrete implementation if builtin, casted as logical.Backend. +func Backend(ctx context.Context, conf *logical.BackendConfig) (*PluginBackend, error) { + var b PluginBackend + + name := conf.Config["plugin_name"] + pluginType, err := consts.ParsePluginType(conf.Config["plugin_type"]) + if err != nil { + return nil, err + } + version := conf.Config["plugin_version"] + + sys := conf.System + + // NewBackendWithVersion with isMetadataMode set to true + raw, err := bplugin.NewBackendWithVersion(ctx, name, pluginType, sys, conf, true, version) + if err != nil { + return nil, err + } + err = raw.Setup(ctx, conf) + if err != nil { + raw.Cleanup(ctx) + return nil, err + } + // Get SpecialPaths and BackendType + paths := raw.SpecialPaths() + btype := raw.Type() + runningVersion := "" + if versioner, ok := raw.(logical.PluginVersioner); ok { + runningVersion = versioner.PluginVersion().Version + } + + // Cleanup meta plugin backend + raw.Cleanup(ctx) + + // Initialize b.Backend with placeholder backend since plugin + // backends will need to be lazy loaded. + b.Backend = &framework.Backend{ + PathsSpecial: paths, + BackendType: btype, + RunningVersion: runningVersion, + } + + b.config = conf + + return &b, nil +} + +// PluginBackend is a thin wrapper around plugin.BackendPluginClient +type PluginBackend struct { + Backend logical.Backend + sync.RWMutex + + config *logical.BackendConfig + + // Used to detect if we already reloaded + canary string + + // Used to detect if plugin is set + loaded bool +} + +// startBackend starts a plugin backend +func (b *PluginBackend) startBackend(ctx context.Context, storage logical.Storage) error { + pluginName := b.config.Config["plugin_name"] + pluginType, err := consts.ParsePluginType(b.config.Config["plugin_type"]) + if err != nil { + return err + } + + // Ensure proper cleanup of the backend (i.e. call client.Kill()) + b.Backend.Cleanup(ctx) + + nb, err := bplugin.NewBackendWithVersion(ctx, pluginName, pluginType, b.config.System, b.config, false, b.config.Config["plugin_version"]) + if err != nil { + return err + } + err = nb.Setup(ctx, b.config) + if err != nil { + nb.Cleanup(ctx) + return err + } + + // If the backend has not been loaded (i.e. still in metadata mode), + // check if type and special paths still matches + if !b.loaded { + if b.Backend.Type() != nb.Type() { + nb.Cleanup(ctx) + b.Backend.Logger().Warn("failed to start plugin process", "plugin", pluginName, "error", ErrMismatchType) + return ErrMismatchType + } + if !reflect.DeepEqual(b.Backend.SpecialPaths(), nb.SpecialPaths()) { + nb.Cleanup(ctx) + b.Backend.Logger().Warn("failed to start plugin process", "plugin", pluginName, "error", ErrMismatchPaths) + return ErrMismatchPaths + } + } + + b.Backend = nb + b.loaded = true + + // call Initialize() explicitly here. + return b.Backend.Initialize(ctx, &logical.InitializationRequest{ + Storage: storage, + }) +} + +// lazyLoad lazy-loads the backend before running a method +func (b *PluginBackend) lazyLoadBackend(ctx context.Context, storage logical.Storage, methodWrapper func() error) error { + b.RLock() + canary := b.canary + + // Lazy-load backend + if !b.loaded { + // Upgrade lock + b.RUnlock() + b.Lock() + // Check once more after lock swap + if !b.loaded { + err := b.startBackend(ctx, storage) + if err != nil { + b.Unlock() + return err + } + } + b.Unlock() + b.RLock() + } + + err := methodWrapper() + b.RUnlock() + + // Need to compare string value for case were err comes from plugin RPC + // and is returned as plugin.BasicError type. + if err != nil && + (err.Error() == rpc.ErrShutdown.Error() || err == bplugin.ErrPluginShutdown) { + // Reload plugin if it's an rpc.ErrShutdown + b.Lock() + if b.canary == canary { + b.Backend.Logger().Debug("reloading plugin backend", "plugin", b.config.Config["plugin_name"]) + err := b.startBackend(ctx, storage) + if err != nil { + b.Unlock() + return err + } + b.canary, err = uuid.GenerateUUID() + if err != nil { + b.Unlock() + return err + } + } + b.Unlock() + + // Try once more + b.RLock() + defer b.RUnlock() + return methodWrapper() + } + return err +} + +// HandleRequest is a thin wrapper implementation of HandleRequest that includes +// automatic plugin reload. +func (b *PluginBackend) HandleRequest(ctx context.Context, req *logical.Request) (resp *logical.Response, err error) { + err = b.lazyLoadBackend(ctx, req.Storage, func() error { + var merr error + resp, merr = b.Backend.HandleRequest(ctx, req) + return merr + }) + + return +} + +// HandleExistenceCheck is a thin wrapper implementation of HandleExistenceCheck +// that includes automatic plugin reload. +func (b *PluginBackend) HandleExistenceCheck(ctx context.Context, req *logical.Request) (checkFound bool, exists bool, err error) { + err = b.lazyLoadBackend(ctx, req.Storage, func() error { + var merr error + checkFound, exists, merr = b.Backend.HandleExistenceCheck(ctx, req) + return merr + }) + + return +} + +// Initialize is intentionally a no-op here, the backend will instead be +// initialized when it is lazily loaded. +func (b *PluginBackend) Initialize(ctx context.Context, req *logical.InitializationRequest) error { + return nil +} + +// SpecialPaths is a thin wrapper used to ensure we grab the lock for race purposes +func (b *PluginBackend) SpecialPaths() *logical.Paths { + b.RLock() + defer b.RUnlock() + return b.Backend.SpecialPaths() +} + +// System is a thin wrapper used to ensure we grab the lock for race purposes +func (b *PluginBackend) System() logical.SystemView { + b.RLock() + defer b.RUnlock() + return b.Backend.System() +} + +// Logger is a thin wrapper used to ensure we grab the lock for race purposes +func (b *PluginBackend) Logger() log.Logger { + b.RLock() + defer b.RUnlock() + return b.Backend.Logger() +} + +// Cleanup is a thin wrapper used to ensure we grab the lock for race purposes +func (b *PluginBackend) Cleanup(ctx context.Context) { + b.RLock() + defer b.RUnlock() + b.Backend.Cleanup(ctx) +} + +// InvalidateKey is a thin wrapper used to ensure we grab the lock for race purposes +func (b *PluginBackend) InvalidateKey(ctx context.Context, key string) { + b.RLock() + defer b.RUnlock() + b.Backend.InvalidateKey(ctx, key) +} + +// Setup is a thin wrapper used to ensure we grab the lock for race purposes +func (b *PluginBackend) Setup(ctx context.Context, config *logical.BackendConfig) error { + b.RLock() + defer b.RUnlock() + return b.Backend.Setup(ctx, config) +} + +// Type is a thin wrapper used to ensure we grab the lock for race purposes +func (b *PluginBackend) Type() logical.BackendType { + b.RLock() + defer b.RUnlock() + return b.Backend.Type() +} + +func (b *PluginBackend) PluginVersion() logical.PluginVersion { + if versioner, ok := b.Backend.(logical.PluginVersioner); ok { + return versioner.PluginVersion() + } + return logical.EmptyPluginVersion +} + +var _ logical.PluginVersioner = (*PluginBackend)(nil) diff --git a/builtin/plugin/backend_lazyLoad_test.go b/builtin/plugin/backend_lazyLoad_test.go new file mode 100644 index 0000000..b2f6303 --- /dev/null +++ b/builtin/plugin/backend_lazyLoad_test.go @@ -0,0 +1,203 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "context" + "errors" + "testing" + + "github.com/hashicorp/vault/sdk/helper/logging" + + "github.com/hashicorp/vault/sdk/helper/pluginutil" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/plugin" +) + +func TestBackend_lazyLoad(t *testing.T) { + // normal load + var invocations int + b := testLazyLoad(t, func() error { + invocations++ + return nil + }) + if invocations != 1 { + t.Fatalf("expected 1 invocation") + } + if b.canary != "" { + t.Fatalf("expected empty canary") + } + + // load with plugin shutdown + invocations = 0 + b = testLazyLoad(t, func() error { + invocations++ + if invocations == 1 { + return plugin.ErrPluginShutdown + } + return nil + }) + if invocations != 2 { + t.Fatalf("expected 2 invocations") + } + if b.canary == "" { + t.Fatalf("expected canary") + } +} + +func testLazyLoad(t *testing.T, methodWrapper func() error) *PluginBackend { + sysView := newTestSystemView() + + ctx := context.Background() + config := &logical.BackendConfig{ + Logger: logging.NewVaultLogger(hclog.Trace), + System: sysView, + Config: map[string]string{ + "plugin_name": "test-plugin", + "plugin_type": "secret", + }, + } + + // this is a dummy plugin that hasn't really been loaded yet + orig, err := plugin.NewBackend(ctx, "test-plugin", consts.PluginTypeSecrets, sysView, config, true) + if err != nil { + t.Fatal(err) + } + + b := &PluginBackend{ + Backend: orig, + config: config, + } + + // lazy load + err = b.lazyLoadBackend(ctx, &logical.InmemStorage{}, methodWrapper) + if err != nil { + t.Fatal(err) + } + if !b.loaded { + t.Fatalf("not loaded") + } + + // make sure dummy plugin was handled properly + ob := orig.(*testBackend) + if !ob.cleaned { + t.Fatalf("not cleaned") + } + if ob.setup { + t.Fatalf("setup") + } + if ob.initialized { + t.Fatalf("initialized") + } + + // make sure our newly initialized plugin was handled properly + nb := b.Backend.(*testBackend) + if nb.cleaned { + t.Fatalf("cleaned") + } + if !nb.setup { + t.Fatalf("not setup") + } + if !nb.initialized { + t.Fatalf("not initialized") + } + + return b +} + +//------------------------------------------------------------------ + +type testBackend struct { + cleaned bool + setup bool + initialized bool +} + +var _ logical.Backend = (*testBackend)(nil) + +func (b *testBackend) Cleanup(context.Context) { + b.cleaned = true +} + +func (b *testBackend) Setup(context.Context, *logical.BackendConfig) error { + b.setup = true + return nil +} + +func (b *testBackend) Initialize(context.Context, *logical.InitializationRequest) error { + b.initialized = true + return nil +} + +func (b *testBackend) Type() logical.BackendType { + return logical.TypeLogical +} + +func (b *testBackend) SpecialPaths() *logical.Paths { + return &logical.Paths{ + Root: []string{"test-root"}, + } +} + +func (b *testBackend) Logger() hclog.Logger { + return logging.NewVaultLogger(hclog.Trace) +} + +func (b *testBackend) HandleRequest(context.Context, *logical.Request) (*logical.Response, error) { + panic("not needed") +} + +func (b *testBackend) System() logical.SystemView { + panic("not needed") +} + +func (b *testBackend) HandleExistenceCheck(context.Context, *logical.Request) (bool, bool, error) { + panic("not needed") +} + +func (b *testBackend) InvalidateKey(context.Context, string) { + panic("not needed") +} + +//------------------------------------------------------------------ + +type testSystemView struct { + logical.StaticSystemView + factory logical.Factory +} + +func newTestSystemView() testSystemView { + return testSystemView{ + factory: func(_ context.Context, _ *logical.BackendConfig) (logical.Backend, error) { + return &testBackend{}, nil + }, + } +} + +func (v testSystemView) LookupPlugin(context.Context, string, consts.PluginType) (*pluginutil.PluginRunner, error) { + return &pluginutil.PluginRunner{ + Name: "test-plugin-runner", + Builtin: true, + BuiltinFactory: func() (interface{}, error) { + return v.factory, nil + }, + }, nil +} + +func (v testSystemView) LookupPluginVersion(context.Context, string, consts.PluginType, string) (*pluginutil.PluginRunner, error) { + return &pluginutil.PluginRunner{ + Name: "test-plugin-runner", + Builtin: true, + BuiltinFactory: func() (interface{}, error) { + return v.factory, nil + }, + }, nil +} + +func (v testSystemView) ListVersionedPlugins(_ context.Context, _ consts.PluginType) ([]pluginutil.VersionedPlugin, error) { + return nil, errors.New("ListVersionedPlugins not implemented for testSystemView") +} diff --git a/builtin/plugin/backend_test.go b/builtin/plugin/backend_test.go new file mode 100644 index 0000000..28dd1e3 --- /dev/null +++ b/builtin/plugin/backend_test.go @@ -0,0 +1,145 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin_test + +import ( + "context" + "fmt" + "os" + "testing" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/plugin" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/helper/pluginutil" + "github.com/hashicorp/vault/sdk/logical" + logicalPlugin "github.com/hashicorp/vault/sdk/plugin" + "github.com/hashicorp/vault/sdk/plugin/mock" + "github.com/hashicorp/vault/vault" +) + +func TestBackend_impl(t *testing.T) { + var _ logical.Backend = &plugin.PluginBackend{} +} + +func TestBackend(t *testing.T) { + pluginCmds := []string{"TestBackend_PluginMain", "TestBackend_PluginMain_Multiplexed"} + + for _, pluginCmd := range pluginCmds { + t.Run(pluginCmd, func(t *testing.T) { + config, cleanup := testConfig(t, pluginCmd) + defer cleanup() + + _, err := plugin.Backend(context.Background(), config) + if err != nil { + t.Fatal(err) + } + }) + } +} + +func TestBackend_Factory(t *testing.T) { + pluginCmds := []string{"TestBackend_PluginMain", "TestBackend_PluginMain_Multiplexed"} + + for _, pluginCmd := range pluginCmds { + t.Run(pluginCmd, func(t *testing.T) { + config, cleanup := testConfig(t, pluginCmd) + defer cleanup() + + _, err := plugin.Factory(context.Background(), config) + if err != nil { + t.Fatal(err) + } + }) + } +} + +func TestBackend_PluginMain(t *testing.T) { + args := []string{} + if os.Getenv(pluginutil.PluginUnwrapTokenEnv) == "" && os.Getenv(pluginutil.PluginMetadataModeEnv) != "true" { + return + } + + caPEM := os.Getenv(pluginutil.PluginCACertPEMEnv) + if caPEM == "" { + t.Fatal("CA cert not passed in") + } + + args = append(args, fmt.Sprintf("--ca-cert=%s", caPEM)) + + apiClientMeta := &api.PluginAPIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(args) + tlsConfig := apiClientMeta.GetTLSConfig() + tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) + + err := logicalPlugin.Serve(&logicalPlugin.ServeOpts{ + BackendFactoryFunc: mock.Factory, + TLSProviderFunc: tlsProviderFunc, + }) + if err != nil { + t.Fatal(err) + } +} + +func TestBackend_PluginMain_Multiplexed(t *testing.T) { + args := []string{} + if os.Getenv(pluginutil.PluginUnwrapTokenEnv) == "" && os.Getenv(pluginutil.PluginMetadataModeEnv) != "true" { + return + } + + caPEM := os.Getenv(pluginutil.PluginCACertPEMEnv) + if caPEM == "" { + t.Fatal("CA cert not passed in") + } + + args = append(args, fmt.Sprintf("--ca-cert=%s", caPEM)) + + apiClientMeta := &api.PluginAPIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(args) + tlsConfig := apiClientMeta.GetTLSConfig() + tlsProviderFunc := api.VaultPluginTLSProvider(tlsConfig) + + err := logicalPlugin.ServeMultiplex(&logicalPlugin.ServeOpts{ + BackendFactoryFunc: mock.Factory, + TLSProviderFunc: tlsProviderFunc, + }) + if err != nil { + t.Fatal(err) + } +} + +func testConfig(t *testing.T, pluginCmd string) (*logical.BackendConfig, func()) { + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + cores := cluster.Cores + + core := cores[0] + + sys := vault.TestDynamicSystemView(core.Core, nil) + + config := &logical.BackendConfig{ + Logger: logging.NewVaultLogger(log.Debug), + System: sys, + Config: map[string]string{ + "plugin_name": "mock-plugin", + "plugin_type": "secret", + "plugin_version": "v0.0.0+mock", + }, + } + + os.Setenv(pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile) + + vault.TestAddTestPlugin(t, core.Core, "mock-plugin", consts.PluginTypeSecrets, "", pluginCmd, []string{}, "") + + return config, func() { + cluster.Cleanup() + } +} diff --git a/builtin/plugin/mock_plugin_test.go b/builtin/plugin/mock_plugin_test.go new file mode 100644 index 0000000..9279c82 --- /dev/null +++ b/builtin/plugin/mock_plugin_test.go @@ -0,0 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + _ "github.com/hashicorp/vault-plugin-mock" +) + +// This file exists to force an import of vault-plugin-mock (which itself does nothing), +// for purposes of CI and GitHub actions testing between plugin repos and Vault. diff --git a/builtin/plugin/v5/backend.go b/builtin/plugin/v5/backend.go new file mode 100644 index 0000000..eac311b --- /dev/null +++ b/builtin/plugin/v5/backend.go @@ -0,0 +1,170 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "context" + "net/rpc" + "sync" + + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/plugin" + bplugin "github.com/hashicorp/vault/sdk/plugin" +) + +// Backend returns an instance of the backend, either as a plugin if external +// or as a concrete implementation if builtin, casted as logical.Backend. +func Backend(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + var b backend + name := conf.Config["plugin_name"] + pluginType, err := consts.ParsePluginType(conf.Config["plugin_type"]) + if err != nil { + return nil, err + } + pluginVersion := conf.Config["plugin_version"] + + sys := conf.System + + raw, err := plugin.NewBackendV5(ctx, name, pluginType, pluginVersion, sys, conf) + if err != nil { + return nil, err + } + b.Backend = raw + b.config = conf + + return &b, nil +} + +// backend is a thin wrapper around a builtin plugin or a plugin.BackendPluginClientV5 +type backend struct { + logical.Backend + mu sync.RWMutex + + config *logical.BackendConfig + + // Used to detect if we already reloaded + canary string +} + +func (b *backend) reloadBackend(ctx context.Context, storage logical.Storage) error { + pluginName := b.config.Config["plugin_name"] + pluginType, err := consts.ParsePluginType(b.config.Config["plugin_type"]) + if err != nil { + return err + } + pluginVersion := b.config.Config["plugin_version"] + + b.Logger().Debug("plugin: reloading plugin backend", "plugin", pluginName) + + // Ensure proper cleanup of the backend + // Pass a context value so that the plugin client will call the appropriate + // cleanup method for reloading + reloadCtx := context.WithValue(ctx, plugin.ContextKeyPluginReload, "reload") + b.Backend.Cleanup(reloadCtx) + + nb, err := plugin.NewBackendV5(ctx, pluginName, pluginType, pluginVersion, b.config.System, b.config) + if err != nil { + return err + } + err = nb.Setup(ctx, b.config) + if err != nil { + return err + } + b.Backend = nb + + // Re-initialize the backend in case plugin was reloaded + // after it crashed + err = b.Backend.Initialize(ctx, &logical.InitializationRequest{ + Storage: storage, + }) + + if err != nil { + return err + } + + return nil +} + +// HandleRequest is a thin wrapper implementation of HandleRequest that includes automatic plugin reload. +func (b *backend) HandleRequest(ctx context.Context, req *logical.Request) (*logical.Response, error) { + b.mu.RLock() + canary := b.canary + resp, err := b.Backend.HandleRequest(ctx, req) + b.mu.RUnlock() + // Need to compare string value for case were err comes from plugin RPC + // and is returned as plugin.BasicError type. + if err != nil && + (err.Error() == rpc.ErrShutdown.Error() || err == bplugin.ErrPluginShutdown) { + // Reload plugin if it's an rpc.ErrShutdown + b.mu.Lock() + if b.canary == canary { + err := b.reloadBackend(ctx, req.Storage) + if err != nil { + b.mu.Unlock() + return nil, err + } + b.canary, err = uuid.GenerateUUID() + if err != nil { + b.mu.Unlock() + return nil, err + } + } + b.mu.Unlock() + + // Try request once more + b.mu.RLock() + defer b.mu.RUnlock() + return b.Backend.HandleRequest(ctx, req) + } + return resp, err +} + +// HandleExistenceCheck is a thin wrapper implementation of HandleRequest that includes automatic plugin reload. +func (b *backend) HandleExistenceCheck(ctx context.Context, req *logical.Request) (bool, bool, error) { + b.mu.RLock() + canary := b.canary + checkFound, exists, err := b.Backend.HandleExistenceCheck(ctx, req) + b.mu.RUnlock() + if err != nil && + (err.Error() == rpc.ErrShutdown.Error() || err == bplugin.ErrPluginShutdown) { + // Reload plugin if it's an rpc.ErrShutdown + b.mu.Lock() + if b.canary == canary { + err := b.reloadBackend(ctx, req.Storage) + if err != nil { + b.mu.Unlock() + return false, false, err + } + b.canary, err = uuid.GenerateUUID() + if err != nil { + b.mu.Unlock() + return false, false, err + } + } + b.mu.Unlock() + + // Try request once more + b.mu.RLock() + defer b.mu.RUnlock() + return b.Backend.HandleExistenceCheck(ctx, req) + } + return checkFound, exists, err +} + +// InvalidateKey is a thin wrapper used to ensure we grab the lock for race purposes +func (b *backend) InvalidateKey(ctx context.Context, key string) { + b.mu.RLock() + defer b.mu.RUnlock() + b.Backend.InvalidateKey(ctx, key) +} + +func (b *backend) IsExternal() bool { + switch b.Backend.(type) { + case *plugin.BackendPluginClientV5: + return true + } + return false +} diff --git a/changelog/10072.txt b/changelog/10072.txt new file mode 100644 index 0000000..c6c8b8f --- /dev/null +++ b/changelog/10072.txt @@ -0,0 +1,3 @@ +```release-note:bug +http: change max_request_size to be unlimited when the config value is less than 0 +``` diff --git a/changelog/10077.txt b/changelog/10077.txt new file mode 100644 index 0000000..09b6cd5 --- /dev/null +++ b/changelog/10077.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fix client.Clone() to include the address +``` diff --git a/changelog/10085.txt b/changelog/10085.txt new file mode 100644 index 0000000..6688b0f --- /dev/null +++ b/changelog/10085.txt @@ -0,0 +1,3 @@ +```release-note:bug +identity: merge associated entity groups when merging entities +``` diff --git a/changelog/10101.txt b/changelog/10101.txt new file mode 100644 index 0000000..6f831c8 --- /dev/null +++ b/changelog/10101.txt @@ -0,0 +1,3 @@ +```release-note:bug +identity: dedup from_entity_ids when merging two entities +``` diff --git a/changelog/10131.txt b/changelog/10131.txt new file mode 100644 index 0000000..71d64df --- /dev/null +++ b/changelog/10131.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Aerospike Storage Backend**: Add support for using Aerospike as a storage backend +``` \ No newline at end of file diff --git a/changelog/10181.txt b/changelog/10181.txt new file mode 100644 index 0000000..ccff6b8 --- /dev/null +++ b/changelog/10181.txt @@ -0,0 +1,3 @@ +```release-note:bug +storage/dynamodb: Handle throttled batch write requests by retrying, without which writes could be lost. +``` diff --git a/changelog/10231.txt b/changelog/10231.txt new file mode 100644 index 0000000..c0158e4 --- /dev/null +++ b/changelog/10231.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: Agent can now run as a Windows service. +``` diff --git a/changelog/10249.txt b/changelog/10249.txt new file mode 100644 index 0000000..7be43db --- /dev/null +++ b/changelog/10249.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Allow URI SAN templates in allowed_uri_sans when allowed_uri_sans_template is set to true. +``` diff --git a/changelog/10299.txt b/changelog/10299.txt new file mode 100644 index 0000000..db135b6 --- /dev/null +++ b/changelog/10299.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Add algorithm-signer as a SSH Secrets Engine UI field +``` diff --git a/changelog/10365.txt b/changelog/10365.txt new file mode 100644 index 0000000..b197af5 --- /dev/null +++ b/changelog/10365.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core/metrics: Added "vault operator usage" command. +``` diff --git a/changelog/10375.txt b/changelog/10375.txt new file mode 100644 index 0000000..a693af8 --- /dev/null +++ b/changelog/10375.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core/metrics: New telemetry metrics reporting lease expirations by time interval and namespace +``` \ No newline at end of file diff --git a/changelog/10384.txt b/changelog/10384.txt new file mode 100644 index 0000000..0440049 --- /dev/null +++ b/changelog/10384.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/database/influxdb: Fix issue where not all errors from InfluxDB were being handled +``` diff --git a/changelog/10386.txt b/changelog/10386.txt new file mode 100644 index 0000000..54c2a7b --- /dev/null +++ b/changelog/10386.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/transit: allow for null string to be used for optional parameters in encrypt and decrypt +``` \ No newline at end of file diff --git a/changelog/10416.txt b/changelog/10416.txt new file mode 100644 index 0000000..ac93a70 --- /dev/null +++ b/changelog/10416.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/database: Sanitize `private_key` field when reading database plugin config +``` diff --git a/changelog/10417.txt b/changelog/10417.txt new file mode 100644 index 0000000..7ff4c3c --- /dev/null +++ b/changelog/10417.txt @@ -0,0 +1,4 @@ +```release-note:bug +ui: Fix bug in Transform secret engine when a new role is added and then removed from a transformation +``` + diff --git a/changelog/10424.txt b/changelog/10424.txt new file mode 100644 index 0000000..df4d9cf --- /dev/null +++ b/changelog/10424.txt @@ -0,0 +1,3 @@ +```release-note:bug +license: Fix license caching issue that prevents new licenses to get picked up by the license manager +``` diff --git a/changelog/10433.txt b/changelog/10433.txt new file mode 100644 index 0000000..37f5f84 --- /dev/null +++ b/changelog/10433.txt @@ -0,0 +1,4 @@ +```release-note:bug +secrets/database/mysql: Fixes issue where the DisplayName within generated usernames was the incorrect length +``` + diff --git a/changelog/10444.txt b/changelog/10444.txt new file mode 100644 index 0000000..b9212a0 --- /dev/null +++ b/changelog/10444.txt @@ -0,0 +1,4 @@ +```release-note:bug +api: Fixes CORS API methods that were outdated and invalid +``` + diff --git a/changelog/10456.txt b/changelog/10456.txt new file mode 100644 index 0000000..ea1d241 --- /dev/null +++ b/changelog/10456.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Avoid deadlocks by ensuring that if grabLockOrStop returns stopped=true, the lock will not be held. +``` diff --git a/changelog/10467.txt b/changelog/10467.txt new file mode 100644 index 0000000..411bbf7 --- /dev/null +++ b/changelog/10467.txt @@ -0,0 +1,3 @@ +```release-note:improvement +storage/cassandra: tuning parameters for clustered environments `connection_timeout`, `initial_connection_timeout`, `simple_retry_policy_retries`. +``` diff --git a/changelog/10487.txt b/changelog/10487.txt new file mode 100644 index 0000000..266b2f7 --- /dev/null +++ b/changelog/10487.txt @@ -0,0 +1,3 @@ +```release-note:bug +core (enterprise): Limit entropy augmentation during token generation to root tokens. +``` \ No newline at end of file diff --git a/changelog/10489.txt b/changelog/10489.txt new file mode 100644 index 0000000..c86cb8a --- /dev/null +++ b/changelog/10489.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Added active since timestamp to the status output of active nodes. +``` diff --git a/changelog/10490.txt b/changelog/10490.txt new file mode 100644 index 0000000..81aadcd --- /dev/null +++ b/changelog/10490.txt @@ -0,0 +1,3 @@ +```release-note:bug +api/sys/config/ui: Fixes issue where multiple UI custom header values are ignored and only the first given value is used +``` diff --git a/changelog/10491.txt b/changelog/10491.txt new file mode 100644 index 0000000..14b33bf --- /dev/null +++ b/changelog/10491.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix footer URL linking to the correct version changelog. +``` diff --git a/changelog/10498.txt b/changelog/10498.txt new file mode 100644 index 0000000..15cd8a8 --- /dev/null +++ b/changelog/10498.txt @@ -0,0 +1,4 @@ +```release-note:bug +core: Make all APIs that report init status consistent, and make them report +initialized=true when a Raft join is in progress. +``` \ No newline at end of file diff --git a/changelog/10505.txt b/changelog/10505.txt new file mode 100644 index 0000000..3c52855 --- /dev/null +++ b/changelog/10505.txt @@ -0,0 +1,3 @@ +```release-note:improvement +sdk: Add helper for decoding root tokens +``` \ No newline at end of file diff --git a/changelog/10514.txt b/changelog/10514.txt new file mode 100644 index 0000000..60c2eb3 --- /dev/null +++ b/changelog/10514.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: add metrics for active entity count +``` diff --git a/changelog/10520.txt b/changelog/10520.txt new file mode 100644 index 0000000..a8caf77 --- /dev/null +++ b/changelog/10520.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Check audit device with a test message before adding it. +``` diff --git a/changelog/10536.txt b/changelog/10536.txt new file mode 100644 index 0000000..483b823 --- /dev/null +++ b/changelog/10536.txt @@ -0,0 +1,4 @@ +```release-note:bug +core: Fix rate limit resource quota migration from 1.5.x to 1.6.x by ensuring `purgeInterval` and +`staleAge` are set appropriately. +``` diff --git a/changelog/10537.txt b/changelog/10537.txt new file mode 100644 index 0000000..f32399a --- /dev/null +++ b/changelog/10537.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/ldap: Improve consistency in error messages +``` \ No newline at end of file diff --git a/changelog/10546.txt b/changelog/10546.txt new file mode 100644 index 0000000..3cf83ff --- /dev/null +++ b/changelog/10546.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/jwt: Fixes `bound_claims` validation for provider-specific group and user info fetching. +``` diff --git a/changelog/10556.txt b/changelog/10556.txt new file mode 100644 index 0000000..26c5474 --- /dev/null +++ b/changelog/10556.txt @@ -0,0 +1,3 @@ +```release-note:bug +agent: Only set the namespace if the VAULT_NAMESPACE env var isn't present +``` diff --git a/changelog/10558.txt b/changelog/10558.txt new file mode 100644 index 0000000..b3acedb --- /dev/null +++ b/changelog/10558.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/gcp: Truncate ServiceAccount display names longer than 100 characters. +``` \ No newline at end of file diff --git a/changelog/10579.txt b/changelog/10579.txt new file mode 100644 index 0000000..718ba3b --- /dev/null +++ b/changelog/10579.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Avoid disclosing IP addresses in the errors of unauthenticated requests +``` \ No newline at end of file diff --git a/changelog/10588.txt b/changelog/10588.txt new file mode 100644 index 0000000..0e363b4 --- /dev/null +++ b/changelog/10588.txt @@ -0,0 +1,3 @@ +```release-note:feature +ui: Adds check for feature flag on application, and updates namespace toolbar on login if present +``` diff --git a/changelog/10596.txt b/changelog/10596.txt new file mode 100644 index 0000000..492f972 --- /dev/null +++ b/changelog/10596.txt @@ -0,0 +1,4 @@ +```release-note:bug +ui: Fix bug that double encodes secret route when there are spaces in the path and makes you unable to view the version history. +``` + diff --git a/changelog/10603.txt b/changelog/10603.txt new file mode 100644 index 0000000..1c93ed7 --- /dev/null +++ b/changelog/10603.txt @@ -0,0 +1,3 @@ +```release-note:feature +secrets/db/snowflake: Added support for Snowflake to the Database Secret Engine +``` \ No newline at end of file diff --git a/changelog/10609.txt b/changelog/10609.txt new file mode 100644 index 0000000..77ca658 --- /dev/null +++ b/changelog/10609.txt @@ -0,0 +1,3 @@ +```release-note:improvement +command/debug: Now collects logs (at level `trace`) as a periodic output. +``` \ No newline at end of file diff --git a/changelog/10613.txt b/changelog/10613.txt new file mode 100644 index 0000000..276b329 --- /dev/null +++ b/changelog/10613.txt @@ -0,0 +1,3 @@ +```release-node:improvement +core: Added an internal endpoint that lists feature flags. +``` diff --git a/changelog/10650.txt b/changelog/10650.txt new file mode 100644 index 0000000..49c8298 --- /dev/null +++ b/changelog/10650.txt @@ -0,0 +1,4 @@ +```release-note:bug +core: Make the response to an unauthenticated request to sys/internal endpoints consistent regardless of mount existence. +``` + diff --git a/changelog/10653.txt b/changelog/10653.txt new file mode 100644 index 0000000..16bf22d --- /dev/null +++ b/changelog/10653.txt @@ -0,0 +1,3 @@ +```release-note:feature +sdk: Private key generation in the certutil package now allows custom io.Readers to be used. +``` \ No newline at end of file diff --git a/changelog/10655.txt b/changelog/10655.txt new file mode 100644 index 0000000..83ed769 --- /dev/null +++ b/changelog/10655.txt @@ -0,0 +1,3 @@ +```release-note:feature +ui: Database secrets engine, supporting MongoDB only +``` \ No newline at end of file diff --git a/changelog/10677.txt b/changelog/10677.txt new file mode 100644 index 0000000..cdc1992 --- /dev/null +++ b/changelog/10677.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Upgrade dependencies to resolve potential JS vulnerabilities +``` diff --git a/changelog/10684.txt b/changelog/10684.txt new file mode 100644 index 0000000..3798d2e --- /dev/null +++ b/changelog/10684.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix expected response from feature-flags endpoint +``` diff --git a/changelog/10689.txt b/changelog/10689.txt new file mode 100644 index 0000000..6d2ff85 --- /dev/null +++ b/changelog/10689.txt @@ -0,0 +1,3 @@ +```release-note:bug +quotas/rate-limit: Fix quotas enforcing old rate limit quota paths +``` diff --git a/changelog/10705.txt b/changelog/10705.txt new file mode 100644 index 0000000..324cd90 --- /dev/null +++ b/changelog/10705.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: Better concurrent request handling on identical requests proxied through Agent. +``` diff --git a/changelog/10708.txt b/changelog/10708.txt new file mode 100644 index 0000000..a33bfb0 --- /dev/null +++ b/changelog/10708.txt @@ -0,0 +1,3 @@ +```release-note:bug +metrics: Protect emitMetrics from panicking during post-seal +``` \ No newline at end of file diff --git a/changelog/10725.txt b/changelog/10725.txt new file mode 100644 index 0000000..e7bb8fb --- /dev/null +++ b/changelog/10725.txt @@ -0,0 +1,3 @@ +```release-note: improvement +core (enterprise): "vault status" command works when a namespace is set. +``` diff --git a/changelog/10726.txt b/changelog/10726.txt new file mode 100644 index 0000000..e8de077 --- /dev/null +++ b/changelog/10726.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: reduce memory used by leases +``` diff --git a/changelog/10730.txt b/changelog/10730.txt new file mode 100644 index 0000000..e62c1d1 --- /dev/null +++ b/changelog/10730.txt @@ -0,0 +1,3 @@ +```release-note:change +go: Update go version to 1.15.7 +``` diff --git a/changelog/10743.txt b/changelog/10743.txt new file mode 100644 index 0000000..b0a86e0 --- /dev/null +++ b/changelog/10743.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Turn off case sensitivity for allowed entity alias check during token create operation. +``` diff --git a/changelog/10744.txt b/changelog/10744.txt new file mode 100644 index 0000000..e447cbf --- /dev/null +++ b/changelog/10744.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Track barrier encryption count and automatically rotate after a large number of operations or on a schedule +``` diff --git a/changelog/10751.txt b/changelog/10751.txt new file mode 100644 index 0000000..6fb7c31 --- /dev/null +++ b/changelog/10751.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/consul: Vault is now able to automatically bootstrap the Consul ACL system. +``` diff --git a/changelog/10756.txt b/changelog/10756.txt new file mode 100644 index 0000000..ab9ce93 --- /dev/null +++ b/changelog/10756.txt @@ -0,0 +1,4 @@ +```release-note:bug +consul-template: Update consul-template vendor version and associated dependencies to master, +pulling in https://github.com/hashicorp/consul-template/pull/1447 +``` \ No newline at end of file diff --git a/changelog/10757.txt b/changelog/10757.txt new file mode 100644 index 0000000..23fec64 --- /dev/null +++ b/changelog/10757.txt @@ -0,0 +1,3 @@ +```release-note:bug +agent: Set namespace for template server in agent. +``` \ No newline at end of file diff --git a/changelog/10758.txt b/changelog/10758.txt new file mode 100644 index 0000000..54fb69b --- /dev/null +++ b/changelog/10758.txt @@ -0,0 +1,3 @@ +```release-note:security +replication (enterprise): On DR secondaries, use DR operation token to authenticate raft remove-peer. +``` \ No newline at end of file diff --git a/changelog/10759.txt b/changelog/10759.txt new file mode 100644 index 0000000..81027d0 --- /dev/null +++ b/changelog/10759.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/gcp: Fix issue with account and iam_policy roleset WALs not being removed after attempts when GCP project no longer exists +``` \ No newline at end of file diff --git a/changelog/10766.txt b/changelog/10766.txt new file mode 100644 index 0000000..566add0 --- /dev/null +++ b/changelog/10766.txt @@ -0,0 +1,3 @@ +```release-note:feature +secrets/database/postgresql: Add ability to customize dynamic usernames +``` diff --git a/changelog/10767.txt b/changelog/10767.txt new file mode 100644 index 0000000..1b8fb49 --- /dev/null +++ b/changelog/10767.txt @@ -0,0 +1,3 @@ +```release-note:feature +secrets/database/mssql: Add ability to customize dynamic usernames +``` diff --git a/changelog/10812.txt b/changelog/10812.txt new file mode 100644 index 0000000..749dc23 --- /dev/null +++ b/changelog/10812.txt @@ -0,0 +1,3 @@ +```release-note:bug +identity: When the identity/group endpoint is used to modify a group by name, correctly update its policy and member entities. +``` diff --git a/changelog/10826.txt b/changelog/10826.txt new file mode 100644 index 0000000..24a5c0a --- /dev/null +++ b/changelog/10826.txt @@ -0,0 +1,3 @@ +```release-note:changes +auth/approle: Secrets ID generation endpoint now returns `secret_id_ttl` as part of its response. +``` diff --git a/changelog/10833.txt b/changelog/10833.txt new file mode 100644 index 0000000..588c782 --- /dev/null +++ b/changelog/10833.txt @@ -0,0 +1,3 @@ +```release-note:bug +agent: Set TokenParent correctly in the Index to be cached. +``` diff --git a/changelog/10834.txt b/changelog/10834.txt new file mode 100644 index 0000000..ad2e57e --- /dev/null +++ b/changelog/10834.txt @@ -0,0 +1,3 @@ +```release-note:feature +secrets/database/mysql: Add ability to customize dynamic usernames +``` diff --git a/changelog/10848.txt b/changelog/10848.txt new file mode 100644 index 0000000..9b3c659 --- /dev/null +++ b/changelog/10848.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Upgrade date-fns from 1.3.0 to 2.16.1. +``` diff --git a/changelog/10850.txt b/changelog/10850.txt new file mode 100644 index 0000000..211b9a1 --- /dev/null +++ b/changelog/10850.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: change auto-auth to preload an existing token on start +``` diff --git a/changelog/10855.txt b/changelog/10855.txt new file mode 100644 index 0000000..19d4ba5 --- /dev/null +++ b/changelog/10855.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fix duplicate quotas on performance standby nodes. +``` diff --git a/changelog/10858.txt b/changelog/10858.txt new file mode 100644 index 0000000..0288316 --- /dev/null +++ b/changelog/10858.txt @@ -0,0 +1,3 @@ +```release-note:feature +secrets/database/mongodb: Add ability to customize dynamic usernames +``` diff --git a/changelog/10877.txt b/changelog/10877.txt new file mode 100644 index 0000000..59f3983 --- /dev/null +++ b/changelog/10877.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/identity: Fix deadlock in entity merge endpoint. +``` diff --git a/changelog/10886.txt b/changelog/10886.txt new file mode 100644 index 0000000..8545369 --- /dev/null +++ b/changelog/10886.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/ssh: Let allowed_users template mix templated and non-templated parts. +``` \ No newline at end of file diff --git a/changelog/10901.txt b/changelog/10901.txt new file mode 100644 index 0000000..456ccb2 --- /dev/null +++ b/changelog/10901.txt @@ -0,0 +1,3 @@ +```release-note:bug +serviceregistration: Fix race during shutdown of Consul service registration. +``` diff --git a/changelog/10904.txt b/changelog/10904.txt new file mode 100644 index 0000000..92c80cd --- /dev/null +++ b/changelog/10904.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Upgrade Storybook from 5.3.19 to 6.1.17. +``` diff --git a/changelog/10906.txt b/changelog/10906.txt new file mode 100644 index 0000000..0b48505 --- /dev/null +++ b/changelog/10906.txt @@ -0,0 +1,3 @@ +```release-note:feature +secrets/database/cassandra: Add ability to customize dynamic usernames +``` diff --git a/changelog/10919.txt b/changelog/10919.txt new file mode 100644 index 0000000..7d6f3df --- /dev/null +++ b/changelog/10919.txt @@ -0,0 +1,10 @@ +```release-note:feature +auth/jwt: Adds `max_age` role parameter and `auth_time` claim validation. +``` +```release-note:bug +auth/jwt: Fixes an issue where JWT verification keys weren't updated after a `jwks_url` change. +``` +```release-note:bug +auth/jwt: Fixes an issue where `jwt_supported_algs` were not being validated for JWT auth using +`jwks_url` and `jwt_validation_pubkeys`. +``` diff --git a/changelog/10927.txt b/changelog/10927.txt new file mode 100644 index 0000000..67ac038 --- /dev/null +++ b/changelog/10927.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: Route templating server through cache when persistent cache is enabled. +``` diff --git a/changelog/10931.txt b/changelog/10931.txt new file mode 100644 index 0000000..e51642b --- /dev/null +++ b/changelog/10931.txt @@ -0,0 +1,3 @@ +```release-note:feature +secrets/terraform: New secret engine for managing Terraform Cloud API tokens +``` \ No newline at end of file diff --git a/changelog/10938.txt b/changelog/10938.txt new file mode 100644 index 0000000..841c37a --- /dev/null +++ b/changelog/10938.txt @@ -0,0 +1,3 @@ +```release-note:feature +agent: Support for persisting the agent cache to disk +``` diff --git a/changelog/10942.txt b/changelog/10942.txt new file mode 100644 index 0000000..fda073a --- /dev/null +++ b/changelog/10942.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/okta: Adds support for Okta Verify TOTP MFA. +``` diff --git a/changelog/10948.txt b/changelog/10948.txt new file mode 100644 index 0000000..fdf33e8 --- /dev/null +++ b/changelog/10948.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Add ability to fetch individual certificate as DER or PEM +``` \ No newline at end of file diff --git a/changelog/10949.txt b/changelog/10949.txt new file mode 100644 index 0000000..6613c48 --- /dev/null +++ b/changelog/10949.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Customize MongoDB input fields on Database Secrets Engine +``` diff --git a/changelog/10951.txt b/changelog/10951.txt new file mode 100644 index 0000000..1fdac2d --- /dev/null +++ b/changelog/10951.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Clarify language on usage metrics page empty state +``` diff --git a/changelog/10952.txt b/changelog/10952.txt new file mode 100644 index 0000000..0d1d48f --- /dev/null +++ b/changelog/10952.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/oci: Fixes alias name to use the role name, and not the literal string `name` [[GH-10](https://github.com/hashicorp/vault-plugin-auth-oci/pull/10)] +``` diff --git a/changelog/10953.txt b/changelog/10953.txt new file mode 100644 index 0000000..55d011e --- /dev/null +++ b/changelog/10953.txt @@ -0,0 +1,3 @@ +```release-note:feature +secrets/aws: add IAM tagging support for iam_user roles +``` diff --git a/changelog/10964.txt b/changelog/10964.txt new file mode 100644 index 0000000..04874b8 --- /dev/null +++ b/changelog/10964.txt @@ -0,0 +1,5 @@ +```release-note:changes +agent: Failed auto-auth attempts are now throttled by an exponential backoff instead of the +~2 second retry delay. The maximum backoff may be configured with the new `max_backoff` parameter, +which defaults to 5 minutes. +``` diff --git a/changelog/10980.txt b/changelog/10980.txt new file mode 100644 index 0000000..d6edaf6 --- /dev/null +++ b/changelog/10980.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: better errors on Database secrets engine role create +``` diff --git a/changelog/10982.txt b/changelog/10982.txt new file mode 100644 index 0000000..7102478 --- /dev/null +++ b/changelog/10982.txt @@ -0,0 +1,3 @@ +```release-note:feature +ui: Adds the wizard to the Database Secret Engine +``` diff --git a/changelog/10992.txt b/changelog/10992.txt new file mode 100644 index 0000000..5b216af --- /dev/null +++ b/changelog/10992.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: dev mode listener allows unauthenticated sys/metrics requests +``` diff --git a/changelog/10995.txt b/changelog/10995.txt new file mode 100644 index 0000000..bf8b41c --- /dev/null +++ b/changelog/10995.txt @@ -0,0 +1,3 @@ +```release-note:feature +secrets/database/couchbase: Add ability to customize dynamic usernames +``` diff --git a/changelog/10996.txt b/changelog/10996.txt new file mode 100644 index 0000000..e79f99b --- /dev/null +++ b/changelog/10996.txt @@ -0,0 +1,3 @@ +```release-note:feature +secrets/openldap: Added dynamic roles to OpenLDAP similar to the combined database engine +``` diff --git a/changelog/10997.txt b/changelog/10997.txt new file mode 100644 index 0000000..0a6b846 --- /dev/null +++ b/changelog/10997.txt @@ -0,0 +1,13 @@ +```release-note:change +aws/auth: AWS Auth concepts and endpoints that use the "whitelist" and "blacklist" terms +have been updated to more inclusive language (e.g. `/auth/aws/identity-whitelist` has been +updated to`/auth/aws/identity-accesslist`). The legacy endpoint names have not been removed +but are considered **deprecated**. The old and new endpoints are essentially aliases, sharing +the same underlying data. The complete list of endpoint changes is available in the +[AWS Auth API docs](https://www.vaultproject.io/api-docs/auth/aws). +``` + +```release-note:deprecation +aws/auth: AWS Auth endpoints that use the "whitelist" and "blacklist" terms have been deprecated. +Refer to the CHANGES section for addition details. +``` diff --git a/changelog/11000.txt b/changelog/11000.txt new file mode 100644 index 0000000..84b4af9 --- /dev/null +++ b/changelog/11000.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/ldap: include support for an optional user filter field when searching for users +``` diff --git a/changelog/11011.txt b/changelog/11011.txt new file mode 100644 index 0000000..8855842 --- /dev/null +++ b/changelog/11011.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix KMIP failing test and a bug that ocurred because the configuration model was not being unloaded. +``` diff --git a/changelog/11015.txt b/changelog/11015.txt new file mode 100644 index 0000000..694ee6e --- /dev/null +++ b/changelog/11015.txt @@ -0,0 +1,3 @@ +```release-note:improvement +release-note: Add dependencies listed in dependencies/2-25-21 +``` diff --git a/changelog/11018.txt b/changelog/11018.txt new file mode 100644 index 0000000..d5af26f --- /dev/null +++ b/changelog/11018.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +ui: Add tests for database role setting form +``` diff --git a/changelog/11022.txt b/changelog/11022.txt new file mode 100644 index 0000000..6c3f507 --- /dev/null +++ b/changelog/11022.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: add partial month client count api +``` \ No newline at end of file diff --git a/changelog/11094.txt b/changelog/11094.txt new file mode 100644 index 0000000..b193bd5 --- /dev/null +++ b/changelog/11094.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix issue where logging in without namespace input causes error +``` diff --git a/changelog/11113.txt b/changelog/11113.txt new file mode 100644 index 0000000..0e372e6 --- /dev/null +++ b/changelog/11113.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +agent: Add a vault.retry stanza that allows specifying number of retries on failure; this applies both to templating and proxied requests. +``` diff --git a/changelog/11119.txt b/changelog/11119.txt new file mode 100644 index 0000000..6ce5c6d --- /dev/null +++ b/changelog/11119.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Add role from database connection automatically populates the database for new role +``` diff --git a/changelog/11127.txt b/changelog/11127.txt new file mode 100644 index 0000000..7d25325 --- /dev/null +++ b/changelog/11127.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix bug where database secret engines with custom names cannot delete connections +``` diff --git a/changelog/11129.txt b/changelog/11129.txt new file mode 100644 index 0000000..8faedf2 --- /dev/null +++ b/changelog/11129.txt @@ -0,0 +1,3 @@ +```release-note:feature +cli/api: Add lease lookup command +``` diff --git a/changelog/11142.txt b/changelog/11142.txt new file mode 100644 index 0000000..713dc13 --- /dev/null +++ b/changelog/11142.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix date display on expired token notice +``` diff --git a/changelog/11143.txt b/changelog/11143.txt new file mode 100644 index 0000000..e847eb9 --- /dev/null +++ b/changelog/11143.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix error message caused by control group +``` diff --git a/changelog/11182.txt b/changelog/11182.txt new file mode 100644 index 0000000..cdb40ed --- /dev/null +++ b/changelog/11182.txt @@ -0,0 +1,4 @@ +```release-note:bug +ui: Fix namespace-bug on login +``` + diff --git a/changelog/11208.txt b/changelog/11208.txt new file mode 100644 index 0000000..555a304 --- /dev/null +++ b/changelog/11208.txt @@ -0,0 +1,3 @@ +```release-note:change +ui: replaces Vault's use of elazarl/go-bindata-assetfs in building the UI with Go's native Embed package +``` diff --git a/changelog/11213.txt b/changelog/11213.txt new file mode 100644 index 0000000..120582e --- /dev/null +++ b/changelog/11213.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix status menu no showing on login +``` diff --git a/changelog/11216.txt b/changelog/11216.txt new file mode 100644 index 0000000..beef5c5 --- /dev/null +++ b/changelog/11216.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: select appropriate signature algorithm for ECDSA signature on certificates. +``` diff --git a/changelog/11218.txt b/changelog/11218.txt new file mode 100644 index 0000000..ef5e87f --- /dev/null +++ b/changelog/11218.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Calculate the Subject Key Identifier as suggested in [RFC 5280, Section 4.2.1.2](https://datatracker.ietf.org/doc/html/rfc5280#section-4.2.1.2). +``` diff --git a/changelog/11226.txt b/changelog/11226.txt new file mode 100644 index 0000000..dd40cfa --- /dev/null +++ b/changelog/11226.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +core: Add tls_max_version listener config option. +``` diff --git a/changelog/11231.txt b/changelog/11231.txt new file mode 100644 index 0000000..a51bb2d --- /dev/null +++ b/changelog/11231.txt @@ -0,0 +1,3 @@ +```release-note:feature +ui: Add database secret engine support for MSSQL +``` \ No newline at end of file diff --git a/changelog/11245.txt b/changelog/11245.txt new file mode 100644 index 0000000..4f20c69 --- /dev/null +++ b/changelog/11245.txt @@ -0,0 +1,3 @@ +```release-note:improvement +pki: adds signature_bits field to customize signature algorithm on CAs and certs signed by Vault +``` diff --git a/changelog/11247.txt b/changelog/11247.txt new file mode 100644 index 0000000..916d89b --- /dev/null +++ b/changelog/11247.txt @@ -0,0 +1,3 @@ +```release-note:bug +storage/raft: Support cluster address change for nodes in a cluster managed by autopilot +``` \ No newline at end of file diff --git a/changelog/11252.txt b/changelog/11252.txt new file mode 100644 index 0000000..67bd5f2 --- /dev/null +++ b/changelog/11252.txt @@ -0,0 +1,3 @@ +```release-note:bug +storage/raft: leader_tls_servername wasn't used unless leader_ca_cert_file and/or mTLS were configured. +``` diff --git a/changelog/11256.txt b/changelog/11256.txt new file mode 100644 index 0000000..63f22f6 --- /dev/null +++ b/changelog/11256.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixed and updated lease renewal picker +``` \ No newline at end of file diff --git a/changelog/11258.txt b/changelog/11258.txt new file mode 100644 index 0000000..49d936f --- /dev/null +++ b/changelog/11258.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix bug where the UI does not recognize version 2 KV until refresh, and fix [object Object] error message +``` diff --git a/changelog/11259.txt b/changelog/11259.txt new file mode 100644 index 0000000..78c5871 --- /dev/null +++ b/changelog/11259.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +secret/pki: Preserve ordering of all DN attribute values when issuing certificates +``` diff --git a/changelog/11260.txt b/changelog/11260.txt new file mode 100644 index 0000000..88c7d73 --- /dev/null +++ b/changelog/11260.txt @@ -0,0 +1,3 @@ +```release-note:improvement +storage/raft: Support autopilot for HA only raft storage. +``` \ No newline at end of file diff --git a/changelog/11262.txt b/changelog/11262.txt new file mode 100644 index 0000000..2627ca2 --- /dev/null +++ b/changelog/11262.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/database/cassandra: Updated default statement for password rotation to allow for special characters. This applies to root and static credentials. +``` diff --git a/changelog/11269.txt b/changelog/11269.txt new file mode 100644 index 0000000..d732eaf --- /dev/null +++ b/changelog/11269.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +storage/raft: Switch to shared raft-boltdb library and add boltdb metrics +``` diff --git a/changelog/11283.txt b/changelog/11283.txt new file mode 100644 index 0000000..14b33bf --- /dev/null +++ b/changelog/11283.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix footer URL linking to the correct version changelog. +``` diff --git a/changelog/11284.txt b/changelog/11284.txt new file mode 100644 index 0000000..8c0332a --- /dev/null +++ b/changelog/11284.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Obscure secret values on input and displayOnly fields like certificates. +``` \ No newline at end of file diff --git a/changelog/11288.txt b/changelog/11288.txt new file mode 100644 index 0000000..6f0e95c --- /dev/null +++ b/changelog/11288.txt @@ -0,0 +1,3 @@ +```release-note:bug +agent: Fixed agent templating to use configured tls servername values +``` diff --git a/changelog/11289.txt b/changelog/11289.txt new file mode 100644 index 0000000..29e9087 --- /dev/null +++ b/changelog/11289.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +http: Add optional HTTP response headers for hostname and raft node ID +``` diff --git a/changelog/11294.txt b/changelog/11294.txt new file mode 100644 index 0000000..a176b4a --- /dev/null +++ b/changelog/11294.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fix issue where select-one option was not showing in secrets database role creation +``` diff --git a/changelog/11324.txt b/changelog/11324.txt new file mode 100644 index 0000000..e638d26 --- /dev/null +++ b/changelog/11324.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +core: Add per-listener config to allow unauthenticated pprof requests, and collect a few more pprof targets. +``` diff --git a/changelog/11345.txt b/changelog/11345.txt new file mode 100644 index 0000000..8ff694f --- /dev/null +++ b/changelog/11345.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/aws: add ability to provide a role session name when generating STS credentials +``` diff --git a/changelog/11360.txt b/changelog/11360.txt new file mode 100644 index 0000000..a773c41 --- /dev/null +++ b/changelog/11360.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Updated search select component styling +``` \ No newline at end of file diff --git a/changelog/11364.txt b/changelog/11364.txt new file mode 100644 index 0000000..64fae59 --- /dev/null +++ b/changelog/11364.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: allow arbitrary length stack traces upon receiving SIGUSR2 (was 32MB) +``` diff --git a/changelog/11365.txt b/changelog/11365.txt new file mode 100644 index 0000000..cf99d52 --- /dev/null +++ b/changelog/11365.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/database/cassandra: Fixed issue where hostnames were not being validated when using TLS +``` diff --git a/changelog/11366.txt b/changelog/11366.txt new file mode 100644 index 0000000..b1d9eb1 --- /dev/null +++ b/changelog/11366.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Add metrics for standby node forwarding. +``` diff --git a/changelog/11367.txt b/changelog/11367.txt new file mode 100644 index 0000000..5e79b1c --- /dev/null +++ b/changelog/11367.txt @@ -0,0 +1,3 @@ +```release-note:bug +pki: Only remove revoked entry for certificates during tidy if they are past their NotAfter value +``` \ No newline at end of file diff --git a/changelog/11371.txt b/changelog/11371.txt new file mode 100644 index 0000000..884c3b9 --- /dev/null +++ b/changelog/11371.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fix goroutine leak when updating rate limit quota +``` diff --git a/changelog/11377.txt b/changelog/11377.txt new file mode 100644 index 0000000..1719473 --- /dev/null +++ b/changelog/11377.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fix storage entry leak when revoking leases created with non-orphan batch tokens. +``` diff --git a/changelog/11388.txt b/changelog/11388.txt new file mode 100644 index 0000000..d39fe5d --- /dev/null +++ b/changelog/11388.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +auth/jwt: Adds ability to directly provide service account JSON in G Suite provider config. +``` diff --git a/changelog/11404.txt b/changelog/11404.txt new file mode 100644 index 0000000..d45aecc --- /dev/null +++ b/changelog/11404.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Add root rotation statements support to appropriate database secret engine plugins +``` diff --git a/changelog/11408.txt b/changelog/11408.txt new file mode 100644 index 0000000..e4c47e3 --- /dev/null +++ b/changelog/11408.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fix cleanup of storage entries from cubbyholes within namespaces. +``` \ No newline at end of file diff --git a/changelog/11442.txt b/changelog/11442.txt new file mode 100644 index 0000000..8c12323 --- /dev/null +++ b/changelog/11442.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Add push notification message when selecting okta auth. +``` diff --git a/changelog/11447.txt b/changelog/11447.txt new file mode 100644 index 0000000..243cfe9 --- /dev/null +++ b/changelog/11447.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Update ember to latest LTS and upgrade UI dependencies +``` diff --git a/changelog/11451.txt b/changelog/11451.txt new file mode 100644 index 0000000..b5ea7e2 --- /dev/null +++ b/changelog/11451.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/database: Fix marshalling to allow providing numeric arguments to external database plugins. +``` diff --git a/changelog/11453.txt b/changelog/11453.txt new file mode 100644 index 0000000..8549b53 --- /dev/null +++ b/changelog/11453.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fix race that allowed remounting on path used by another mount +``` diff --git a/changelog/11473.txt b/changelog/11473.txt new file mode 100644 index 0000000..851ae2c --- /dev/null +++ b/changelog/11473.txt @@ -0,0 +1,4 @@ +```release-note:change +agent: Update to use IAM Service Account Credentials endpoint for signing JWTs +when using GCP Auto-Auth method +``` diff --git a/changelog/11495.txt b/changelog/11495.txt new file mode 100644 index 0000000..d529872 --- /dev/null +++ b/changelog/11495.txt @@ -0,0 +1,3 @@ +```release-note:feature +ssh: add support for templated values in SSH CA DefaultExtensions +``` \ No newline at end of file diff --git a/changelog/11500.txt b/changelog/11500.txt new file mode 100644 index 0000000..e242c6f --- /dev/null +++ b/changelog/11500.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Updated ivy code mirror component for consistency +``` \ No newline at end of file diff --git a/changelog/11502.txt b/changelog/11502.txt new file mode 100644 index 0000000..af84fba --- /dev/null +++ b/changelog/11502.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: Allow Agent auto auth to read symlinked JWT files +``` diff --git a/changelog/11506.txt b/changelog/11506.txt new file mode 100644 index 0000000..15ab6f3 --- /dev/null +++ b/changelog/11506.txt @@ -0,0 +1,3 @@ +```release-note:bug +identity: Use correct mount accessor when refreshing external group memberships. +``` diff --git a/changelog/11517.txt b/changelog/11517.txt new file mode 100644 index 0000000..f5cca73 --- /dev/null +++ b/changelog/11517.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Send notifications to systemd on start, stop, and configuration reload. +``` diff --git a/changelog/11530.txt b/changelog/11530.txt new file mode 100644 index 0000000..95bb8be --- /dev/null +++ b/changelog/11530.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Redesign of KV 2 Delete toolbar. +``` \ No newline at end of file diff --git a/changelog/11532.txt b/changelog/11532.txt new file mode 100644 index 0000000..59760fa --- /dev/null +++ b/changelog/11532.txt @@ -0,0 +1,3 @@ +```release-note:feature +**MySQL Database UI**: The UI now supports adding and editing MySQL connections in the database secret engine +``` diff --git a/changelog/11541.txt b/changelog/11541.txt new file mode 100644 index 0000000..3cf468c --- /dev/null +++ b/changelog/11541.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fix edge cases in the configuration endpoint for barrier key autorotation. +``` diff --git a/changelog/11562.txt b/changelog/11562.txt new file mode 100644 index 0000000..3593fbc --- /dev/null +++ b/changelog/11562.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +secrets/azure: Update Azure SDK to allow consuming different Resource Manager Endpoint URIs +``` \ No newline at end of file diff --git a/changelog/11576.txt b/changelog/11576.txt new file mode 100644 index 0000000..0886ee9 --- /dev/null +++ b/changelog/11576.txt @@ -0,0 +1,4 @@ +```release-note:bug +agent/cert: Fix issue where the API client on agent was not honoring certificate +information from the auto-auth config map on renewals or retries. +``` \ No newline at end of file diff --git a/changelog/11585.txt b/changelog/11585.txt new file mode 100644 index 0000000..c983802 --- /dev/null +++ b/changelog/11585.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/database: Fixes issue for V4 database interface where `SetCredentials` wasn't falling back to using `RotateRootCredentials` if `SetCredentials` is `Unimplemented` +``` diff --git a/changelog/11586.txt b/changelog/11586.txt new file mode 100644 index 0000000..31c4069 --- /dev/null +++ b/changelog/11586.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Add regex validation to Transform Template pattern input +``` diff --git a/changelog/11588.txt b/changelog/11588.txt new file mode 100644 index 0000000..1f7f036 --- /dev/null +++ b/changelog/11588.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Add a small (<1s) exponential backoff to failed TCP listener Accept failures. +``` \ No newline at end of file diff --git a/changelog/11596.txt b/changelog/11596.txt new file mode 100644 index 0000000..3735ca0 --- /dev/null +++ b/changelog/11596.txt @@ -0,0 +1,3 @@ +```release-note:bug +core (enterprise): Fix plugins mounted in namespaces being unable to use password policies +``` diff --git a/changelog/11597.txt b/changelog/11597.txt new file mode 100644 index 0000000..4a9c113 --- /dev/null +++ b/changelog/11597.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix text link URL on database roles list +``` \ No newline at end of file diff --git a/changelog/11600.txt b/changelog/11600.txt new file mode 100644 index 0000000..f40d4bc --- /dev/null +++ b/changelog/11600.txt @@ -0,0 +1,9 @@ +```release-note:improvement +secrets/database/mongodb: Add ability to customize `SocketTimeout`, `ConnectTimeout`, and `ServerSelectionTimeout` +``` +```release-note:improvement +secrets/database/mongodb: Increased throughput by allowing for multiple request threads to simultaneously update users in MongoDB +``` +```release-note:bug +secrets/database: Fixed minor race condition when rotate-root is called +``` diff --git a/changelog/11607.txt b/changelog/11607.txt new file mode 100644 index 0000000..4404a23 --- /dev/null +++ b/changelog/11607.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: add irrevocable lease list and count apis +``` \ No newline at end of file diff --git a/changelog/11628.txt b/changelog/11628.txt new file mode 100644 index 0000000..335777e --- /dev/null +++ b/changelog/11628.txt @@ -0,0 +1,3 @@ +```release-note:bug +secret: fix the bug where transit encrypt batch doesn't work with key_version +``` diff --git a/changelog/11638.txt b/changelog/11638.txt new file mode 100644 index 0000000..5ed5065 --- /dev/null +++ b/changelog/11638.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/aws: Underlying error included in validation failure message. +``` diff --git a/changelog/11641.txt b/changelog/11641.txt new file mode 100644 index 0000000..84bd311 --- /dev/null +++ b/changelog/11641.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix entity group membership and metadata not showing +``` diff --git a/changelog/11647.txt b/changelog/11647.txt new file mode 100644 index 0000000..2075989 --- /dev/null +++ b/changelog/11647.txt @@ -0,0 +1,3 @@ +```release-note:bug +tokenutil: Perform the num uses check before token type. +``` diff --git a/changelog/11650.txt b/changelog/11650.txt new file mode 100644 index 0000000..75029f9 --- /dev/null +++ b/changelog/11650.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: correct logic for renewal of leases nearing their expiration time. +``` diff --git a/changelog/11672.txt b/changelog/11672.txt new file mode 100644 index 0000000..2d019ce --- /dev/null +++ b/changelog/11672.txt @@ -0,0 +1,4 @@ +```release-note:improvement +ui: Replace tool partials with components. +``` + diff --git a/changelog/11680.txt b/changelog/11680.txt new file mode 100644 index 0000000..3e8b919 --- /dev/null +++ b/changelog/11680.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Update partials to components +``` \ No newline at end of file diff --git a/changelog/11696.txt b/changelog/11696.txt new file mode 100644 index 0000000..e3bc54c --- /dev/null +++ b/changelog/11696.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: Allow a leveled logger to be provided to `api.Client` through `SetLogger`. +``` diff --git a/changelog/11705.txt b/changelog/11705.txt new file mode 100644 index 0000000..42d683d --- /dev/null +++ b/changelog/11705.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Add specific error message if unseal fails due to license +``` diff --git a/changelog/11708.txt b/changelog/11708.txt new file mode 100644 index 0000000..2121191 --- /dev/null +++ b/changelog/11708.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: JSON fields on database can be cleared on edit +``` diff --git a/changelog/11759.txt b/changelog/11759.txt new file mode 100644 index 0000000..0b0776a --- /dev/null +++ b/changelog/11759.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: show site-wide banners for license warnings if applicable +``` diff --git a/changelog/11775.txt b/changelog/11775.txt new file mode 100644 index 0000000..be37355 --- /dev/null +++ b/changelog/11775.txt @@ -0,0 +1,9 @@ +```release-note:change +agent: Errors in the template engine will no longer cause agent to exit unless +explicitly defined to do so. A new configuration parameter, +`exit_on_retry_failure`, within the new top-level stanza, `template_config`, can +be set to `true` in order to cause agent to exit. Note that for agent to exit if +`template.error_on_missing_key` is set to `true`, `exit_on_retry_failure` must +be also set to `true`. Otherwise, the template engine will log an error but then +restart its internal runner. +``` diff --git a/changelog/11778.txt b/changelog/11778.txt new file mode 100644 index 0000000..34acf3d --- /dev/null +++ b/changelog/11778.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: update license page with relevant autoload info +``` diff --git a/changelog/11780.txt b/changelog/11780.txt new file mode 100644 index 0000000..e424054 --- /dev/null +++ b/changelog/11780.txt @@ -0,0 +1,3 @@ +```release-note:feature +pki: Support ed25519 as a key for the pki backend +``` diff --git a/changelog/11784.txt b/changelog/11784.txt new file mode 100644 index 0000000..1ccffea --- /dev/null +++ b/changelog/11784.txt @@ -0,0 +1,4 @@ +```release-note:bug +auth/jwt: Updates the [hashicorp/cap](https://github.com/hashicorp/cap) library to `v0.1.0` to +bring in a verification key caching fix. +``` diff --git a/changelog/11785.txt b/changelog/11785.txt new file mode 100644 index 0000000..e8bf429 --- /dev/null +++ b/changelog/11785.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Add Validation to KV secret engine +``` \ No newline at end of file diff --git a/changelog/11795.txt b/changelog/11795.txt new file mode 100644 index 0000000..ead4434 --- /dev/null +++ b/changelog/11795.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Added auth method descriptions to UI login page +``` \ No newline at end of file diff --git a/changelog/11796.txt b/changelog/11796.txt new file mode 100644 index 0000000..76509d9 --- /dev/null +++ b/changelog/11796.txt @@ -0,0 +1,3 @@ +```release-note:feature +secrets/database/influxdb: Add ability to customize dynamic usernames +``` diff --git a/changelog/11802.txt b/changelog/11802.txt new file mode 100644 index 0000000..90dee25 --- /dev/null +++ b/changelog/11802.txt @@ -0,0 +1,3 @@ +```release-note:change +go: Update to Go 1.16.5 +``` diff --git a/changelog/11820.txt b/changelog/11820.txt new file mode 100644 index 0000000..a0d2c0e --- /dev/null +++ b/changelog/11820.txt @@ -0,0 +1,3 @@ +```release-note:improvement +db/cassandra: Added tls_server_name to specify server name for TLS validation +``` \ No newline at end of file diff --git a/changelog/11826.txt b/changelog/11826.txt new file mode 100644 index 0000000..b9accbd --- /dev/null +++ b/changelog/11826.txt @@ -0,0 +1,3 @@ +```release-note:bug +activity: Omit wrapping tokens and control groups from client counts +``` diff --git a/changelog/11836.txt b/changelog/11836.txt new file mode 100644 index 0000000..46981fd --- /dev/null +++ b/changelog/11836.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/ad: Forward all creds requests to active node [[GH-76](https://github.com/hashicorp/vault-plugin-secrets-ad/pull/76)] +``` diff --git a/changelog/11838.txt b/changelog/11838.txt new file mode 100644 index 0000000..dc53551 --- /dev/null +++ b/changelog/11838.txt @@ -0,0 +1,7 @@ +```release-note:bug +agent: fix timestamp format in log messages from the templating engine +``` + +```release-note:bug +agent/template: fix command shell quoting issue +``` \ No newline at end of file diff --git a/changelog/11861.txt b/changelog/11861.txt new file mode 100644 index 0000000..8dc8ae4 --- /dev/null +++ b/changelog/11861.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/database/cassandra: Fixed issue where the PEM parsing logic of `pem_bundle` and `pem_json` didn't work for CA-only configurations +``` diff --git a/changelog/11864.txt b/changelog/11864.txt new file mode 100644 index 0000000..0773ab0 --- /dev/null +++ b/changelog/11864.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/approle: fixing dereference of nil pointer +``` \ No newline at end of file diff --git a/changelog/11872.txt b/changelog/11872.txt new file mode 100644 index 0000000..a573559 --- /dev/null +++ b/changelog/11872.txt @@ -0,0 +1,3 @@ +```release-note:bug +mongo-db: default username template now strips invalid '.' characters +``` diff --git a/changelog/11878.txt b/changelog/11878.txt new file mode 100644 index 0000000..62d28a3 --- /dev/null +++ b/changelog/11878.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: add validations for duplicate path kv engine +``` diff --git a/changelog/11884.txt b/changelog/11884.txt new file mode 100644 index 0000000..1dd884c --- /dev/null +++ b/changelog/11884.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fix oidc login with Safari +``` \ No newline at end of file diff --git a/changelog/11887.txt b/changelog/11887.txt new file mode 100644 index 0000000..280d06e --- /dev/null +++ b/changelog/11887.txt @@ -0,0 +1,3 @@ +```release-note:bug +secret/totp: pad input key to ensure length is a multiple of 8 +``` diff --git a/changelog/11895.txt b/changelog/11895.txt new file mode 100644 index 0000000..2a8837f --- /dev/null +++ b/changelog/11895.txt @@ -0,0 +1,3 @@ +```release-note:improvement +raft: change freelist type to map and set nofreelistsync to true +``` diff --git a/changelog/11899.txt b/changelog/11899.txt new file mode 100644 index 0000000..9867d9a --- /dev/null +++ b/changelog/11899.txt @@ -0,0 +1,3 @@ +```release-note:feature +secret/rabbitmq: Add ability to customize dynamic usernames +``` diff --git a/changelog/11904.txt b/changelog/11904.txt new file mode 100644 index 0000000..584aeae --- /dev/null +++ b/changelog/11904.txt @@ -0,0 +1,3 @@ +```release-note:bug +api: properly handle switching to/from unix domain socket when changing client address +``` diff --git a/changelog/11907.txt b/changelog/11907.txt new file mode 100644 index 0000000..e2bbafe --- /dev/null +++ b/changelog/11907.txt @@ -0,0 +1,3 @@ +```release-note:improvement +raft: Improve raft batch size selection +``` diff --git a/changelog/11934.txt b/changelog/11934.txt new file mode 100644 index 0000000..ee67171 --- /dev/null +++ b/changelog/11934.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent/template: Added static_secret_render_interval to specify how often to fetch non-leased secrets +``` diff --git a/changelog/11942.txt b/changelog/11942.txt new file mode 100644 index 0000000..52d7c69 --- /dev/null +++ b/changelog/11942.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Support manual rotation of Active Directory roles**: `rotate-role` endpoint has been added to the Active Directory secret engine to allow manual rotations of service accounts. +``` diff --git a/changelog/11956.txt b/changelog/11956.txt new file mode 100644 index 0000000..c2cb185 --- /dev/null +++ b/changelog/11956.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Customize dynamic usernames in MongoDB Atlas**: Adds the ability to customize username generation for dynamic users in MongoDB Atlas. +``` diff --git a/changelog/11957.txt b/changelog/11957.txt new file mode 100644 index 0000000..afc53a4 --- /dev/null +++ b/changelog/11957.txt @@ -0,0 +1,3 @@ +```release-note:feature +secrets/database/elasticsearch: Add ability to customize dynamic usernames +``` diff --git a/changelog/11958.txt b/changelog/11958.txt new file mode 100644 index 0000000..ef6b187 --- /dev/null +++ b/changelog/11958.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Key Management Secrets Engine (Enterprise)**: Adds general availability for distributing and managing keys in AWS KMS. +``` diff --git a/changelog/11963.txt b/changelog/11963.txt new file mode 100644 index 0000000..dd9c196 --- /dev/null +++ b/changelog/11963.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Add validation support for open api form fields +``` \ No newline at end of file diff --git a/changelog/11969.txt b/changelog/11969.txt new file mode 100644 index 0000000..6680935 --- /dev/null +++ b/changelog/11969.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: JWT auto auth now supports a `remove_jwt_after_reading` config option which defaults to true. +``` \ No newline at end of file diff --git a/changelog/11970.txt b/changelog/11970.txt new file mode 100644 index 0000000..60ea438 --- /dev/null +++ b/changelog/11970.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fixed double counting of http requests after operator stepdown +``` \ No newline at end of file diff --git a/changelog/11975.txt b/changelog/11975.txt new file mode 100644 index 0000000..0969112 --- /dev/null +++ b/changelog/11975.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/ldap: Fix a bug where the LDAP auth method does not return the request_timeout configuration parameter on config read. +``` diff --git a/changelog/11980.txt b/changelog/11980.txt new file mode 100644 index 0000000..0fe44f5 --- /dev/null +++ b/changelog/11980.txt @@ -0,0 +1,3 @@ +```release-note:improvement +physical/etcd: Upgrade etcd3 client to v3.5.0 and etcd2 to v2.305.0. +``` diff --git a/changelog/11984.txt b/changelog/11984.txt new file mode 100644 index 0000000..11b23e8 --- /dev/null +++ b/changelog/11984.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Show day of month instead of day of year in the expiration warning dialog +``` diff --git a/changelog/11992.txt b/changelog/11992.txt new file mode 100644 index 0000000..c9b2c47 --- /dev/null +++ b/changelog/11992.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli: vault delete and vault kv delete should support the same output options (e.g. -format) as vault write. +``` diff --git a/changelog/11995.txt b/changelog/11995.txt new file mode 100644 index 0000000..ccabb84 --- /dev/null +++ b/changelog/11995.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Show description on secret engine list +``` \ No newline at end of file diff --git a/changelog/11997.txt b/changelog/11997.txt new file mode 100644 index 0000000..b782ec8 --- /dev/null +++ b/changelog/11997.txt @@ -0,0 +1,3 @@ +```release-note:feature +secrets/database/snowflake: Add ability to customize dynamic usernames +``` diff --git a/changelog/12003.txt b/changelog/12003.txt new file mode 100644 index 0000000..ed81f83 --- /dev/null +++ b/changelog/12003.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: add transform secrets engine to features list +``` diff --git a/changelog/12008.txt b/changelog/12008.txt new file mode 100644 index 0000000..2c648f6 --- /dev/null +++ b/changelog/12008.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Revert fix for PR [11423](https://github.com/hashicorp/vault/pull/11423). +``` \ No newline at end of file diff --git a/changelog/12016.txt b/changelog/12016.txt new file mode 100644 index 0000000..4b57569 --- /dev/null +++ b/changelog/12016.txt @@ -0,0 +1,3 @@ +```release-note:feature +secrets/database/redshift: Add ability to customize dynamic usernames +``` diff --git a/changelog/12019.txt b/changelog/12019.txt new file mode 100644 index 0000000..86d6a4d --- /dev/null +++ b/changelog/12019.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/openldap: Fix bug where schema was not compatible with rotate-root [#24](https://github.com/hashicorp/vault-plugin-secrets-openldap/pull/24) +``` diff --git a/changelog/12020.txt b/changelog/12020.txt new file mode 100644 index 0000000..c3c605d --- /dev/null +++ b/changelog/12020.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/metrics: Add generic KV mount support for vault.kv.secret.count telemetry metric +``` diff --git a/changelog/12023.txt b/changelog/12023.txt new file mode 100644 index 0000000..8a3a87b --- /dev/null +++ b/changelog/12023.txt @@ -0,0 +1,9 @@ +```release-note:feature +**GCP Secrets Engine Static Accounts**: Adds ability to use existing service accounts for generation +of service account keys and access tokens. +``` + +```release-note:deprecation +secrets/gcp: Deprecated the `/gcp/token/:roleset` and `/gcp/key/:roleset` paths for generating +secrets for rolesets. Use `/gcp/roleset/:roleset/token` and `/gcp/roleset/:roleset/key` instead. +``` diff --git a/changelog/12024.txt b/changelog/12024.txt new file mode 100644 index 0000000..52e39a0 --- /dev/null +++ b/changelog/12024.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fix control group access for database credential +``` diff --git a/changelog/12025.txt b/changelog/12025.txt new file mode 100644 index 0000000..79f14b1 --- /dev/null +++ b/changelog/12025.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Add `prefix_filter` to telemetry config +``` \ No newline at end of file diff --git a/changelog/12026.txt b/changelog/12026.txt new file mode 100644 index 0000000..12b6cdd --- /dev/null +++ b/changelog/12026.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/aws: Remove warning stating AWS Token TTL will be capped by the Default Lease TTL. +``` \ No newline at end of file diff --git a/changelog/12031.txt b/changelog/12031.txt new file mode 100644 index 0000000..80cc8f3 --- /dev/null +++ b/changelog/12031.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli: Add support for response wrapping in `vault list` and `vault kv list` with output format other than `table`. +``` diff --git a/changelog/12034.txt b/changelog/12034.txt new file mode 100644 index 0000000..09f91f8 --- /dev/null +++ b/changelog/12034.txt @@ -0,0 +1,3 @@ +```release-note:bug +storage/raft: Tweak creation of vault.db file +``` diff --git a/changelog/12035.txt b/changelog/12035.txt new file mode 100644 index 0000000..b0f2745 --- /dev/null +++ b/changelog/12035.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Automatically refresh the page when user logs out +``` diff --git a/changelog/12042.txt b/changelog/12042.txt new file mode 100644 index 0000000..2ec23c8 --- /dev/null +++ b/changelog/12042.txt @@ -0,0 +1,3 @@ +```release-note:changes +api: A request that fails field validation will now be responded to with a 400 rather than 500. +``` diff --git a/changelog/12049.txt b/changelog/12049.txt new file mode 100644 index 0000000..103bc0c --- /dev/null +++ b/changelog/12049.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Updated node to v14, latest stable build +``` \ No newline at end of file diff --git a/changelog/12066.txt b/changelog/12066.txt new file mode 100644 index 0000000..6a672ff --- /dev/null +++ b/changelog/12066.txt @@ -0,0 +1,3 @@ +```release-note:feature +secrets/aws: add support for custom IAM usernames +``` diff --git a/changelog/12071.txt b/changelog/12071.txt new file mode 100644 index 0000000..17b7c68 --- /dev/null +++ b/changelog/12071.txt @@ -0,0 +1,3 @@ +```release-note:feature +core: Add a darwin/arm64 binary release supporting the Apple M1 CPU +``` \ No newline at end of file diff --git a/changelog/12073.txt b/changelog/12073.txt new file mode 100644 index 0000000..765e101 --- /dev/null +++ b/changelog/12073.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/kubernetes: Fix AliasLookahead to correctly extract ServiceAccount UID when using ephemeral JWTs +``` diff --git a/changelog/12079.txt b/changelog/12079.txt new file mode 100644 index 0000000..e41d032 --- /dev/null +++ b/changelog/12079.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix Version History queryParams on LinkedBlock +``` diff --git a/changelog/12084.txt b/changelog/12084.txt new file mode 100644 index 0000000..b2e5d51 --- /dev/null +++ b/changelog/12084.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Render dashes for secret empty states +``` \ No newline at end of file diff --git a/changelog/12087.txt b/changelog/12087.txt new file mode 100644 index 0000000..9141739 --- /dev/null +++ b/changelog/12087.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/database: Fixed an issue that prevented external database plugin processes from restarting after a shutdown. +``` diff --git a/changelog/12111.txt b/changelog/12111.txt new file mode 100644 index 0000000..192b4a1 --- /dev/null +++ b/changelog/12111.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix database role CG access +``` \ No newline at end of file diff --git a/changelog/12115.txt b/changelog/12115.txt new file mode 100644 index 0000000..cb00d73 --- /dev/null +++ b/changelog/12115.txt @@ -0,0 +1,3 @@ +```release-note:bug +service_registration/consul: Print config service_address set in DEBUG log +``` diff --git a/changelog/12117.txt b/changelog/12117.txt new file mode 100644 index 0000000..081a587 --- /dev/null +++ b/changelog/12117.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: Allow cloning `api.Client` HTTP headers via `api.Config.CloneHeaders` or `api.Client.SetCloneHeaders`. +``` diff --git a/changelog/12126.txt b/changelog/12126.txt new file mode 100644 index 0000000..c53d2a1 --- /dev/null +++ b/changelog/12126.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli/api: Providing consistency for the use of comma separated parameters in auth/secret enable/tune +``` diff --git a/changelog/12151.txt b/changelog/12151.txt new file mode 100644 index 0000000..2d2a834 --- /dev/null +++ b/changelog/12151.txt @@ -0,0 +1,3 @@ +```release-note:bug +identity: do not allow a role's token_ttl to be longer than the signing key's verification_ttl +``` diff --git a/changelog/12162.txt b/changelog/12162.txt new file mode 100644 index 0000000..70c943a --- /dev/null +++ b/changelog/12162.txt @@ -0,0 +1,3 @@ +```release-note:improvement +storage/raft: Best-effort handling of cancelled contexts. +``` diff --git a/changelog/12163.txt b/changelog/12163.txt new file mode 100644 index 0000000..10af9d0 --- /dev/null +++ b/changelog/12163.txt @@ -0,0 +1,3 @@ +```release-note:improvement +serviceregistration: add `external-source: "vault"` metadata value for Consul registration. +``` \ No newline at end of file diff --git a/changelog/12165.txt b/changelog/12165.txt new file mode 100644 index 0000000..c5e28ee --- /dev/null +++ b/changelog/12165.txt @@ -0,0 +1,3 @@ +```release-note:improvement +storage/aerospike: Upgrade `aerospike-client-go` to v5.6.0. +``` diff --git a/changelog/12166.txt b/changelog/12166.txt new file mode 100644 index 0000000..9cec76c --- /dev/null +++ b/changelog/12166.txt @@ -0,0 +1,3 @@ +```release-note:improvement +storage/raft: add additional raft metrics relating to applied index and heartbeating; also ensure OSS standbys emit periodic metrics. +``` diff --git a/changelog/12169.txt b/changelog/12169.txt new file mode 100644 index 0000000..6842cbd --- /dev/null +++ b/changelog/12169.txt @@ -0,0 +1,3 @@ +```release-note:feature +ui: Add custom metadata to KV secret engine and metadata to config +``` diff --git a/changelog/12175.txt b/changelog/12175.txt new file mode 100644 index 0000000..a75b21c --- /dev/null +++ b/changelog/12175.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli: vault debug now puts newlines after every captured log line. +``` diff --git a/changelog/12185.txt b/changelog/12185.txt new file mode 100644 index 0000000..919da9e --- /dev/null +++ b/changelog/12185.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/aws: Add conditional template that allows custom usernames for both STS and IAM cases +``` \ No newline at end of file diff --git a/changelog/12196.txt b/changelog/12196.txt new file mode 100644 index 0000000..28b6b64 --- /dev/null +++ b/changelog/12196.txt @@ -0,0 +1,3 @@ +```release-note:bug +core (enterprise): namespace header included in responses, Go client uses it when displaying error messages +``` diff --git a/changelog/12208.txt b/changelog/12208.txt new file mode 100644 index 0000000..ab1aa83 --- /dev/null +++ b/changelog/12208.txt @@ -0,0 +1,3 @@ +```release-note:bug +identity: disallow creation of role without a key parameter +``` diff --git a/changelog/12212.txt b/changelog/12212.txt new file mode 100644 index 0000000..5130815 --- /dev/null +++ b/changelog/12212.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: updating database TTL picker help text. +``` diff --git a/changelog/12229.txt b/changelog/12229.txt new file mode 100644 index 0000000..6503ba5 --- /dev/null +++ b/changelog/12229.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: fix byte printing for diagnose disk checks +``` diff --git a/changelog/12245.txt b/changelog/12245.txt new file mode 100644 index 0000000..2bf1aee --- /dev/null +++ b/changelog/12245.txt @@ -0,0 +1,3 @@ +```release-note:change +go: Update go version to 1.16.6 +``` diff --git a/changelog/12253.txt b/changelog/12253.txt new file mode 100644 index 0000000..fdf704c --- /dev/null +++ b/changelog/12253.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Update github.com/ulikunitz/xz to fix security vulnerability GHSA-25xm-hr59-7c27. +``` diff --git a/changelog/12255.txt b/changelog/12255.txt new file mode 100644 index 0000000..e4c4781 --- /dev/null +++ b/changelog/12255.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Upgrade github.com/gogo/protobuf +``` diff --git a/changelog/12262.txt b/changelog/12262.txt new file mode 100644 index 0000000..415c922 --- /dev/null +++ b/changelog/12262.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/database/influxdb: Switch/upgrade to the `influxdb1-client` module +``` diff --git a/changelog/12265.txt b/changelog/12265.txt new file mode 100644 index 0000000..5527f32 --- /dev/null +++ b/changelog/12265.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/jwt: Fixes OIDC auth from the Vault UI when using `form_post` as the `oidc_response_mode`. +``` diff --git a/changelog/12295.txt b/changelog/12295.txt new file mode 100644 index 0000000..144ad0f --- /dev/null +++ b/changelog/12295.txt @@ -0,0 +1,3 @@ +```release-note:feature +ui: Creates new StatText component +``` \ No newline at end of file diff --git a/changelog/12301.txt b/changelog/12301.txt new file mode 100644 index 0000000..d4acaf2 --- /dev/null +++ b/changelog/12301.txt @@ -0,0 +1,3 @@ +```release-note:bug +database/couchbase: change default template to truncate username at 128 characters +``` \ No newline at end of file diff --git a/changelog/12317.txt b/changelog/12317.txt new file mode 100644 index 0000000..4554419 --- /dev/null +++ b/changelog/12317.txt @@ -0,0 +1,3 @@ +```release-note: bug +core (enterprise): Fixes reading raft auto-snapshot configuration from performance standby node +``` \ No newline at end of file diff --git a/changelog/12320.txt b/changelog/12320.txt new file mode 100644 index 0000000..f6091e1 --- /dev/null +++ b/changelog/12320.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/okta: Send x-forwarded-for in Okta Push Factor request +``` diff --git a/changelog/12338.txt b/changelog/12338.txt new file mode 100644 index 0000000..6faf03e --- /dev/null +++ b/changelog/12338.txt @@ -0,0 +1,3 @@ +```release-note: bug +api: Fixes storage APIs returning incorrect error when parsing responses +``` diff --git a/changelog/12339.txt b/changelog/12339.txt new file mode 100644 index 0000000..b82044e --- /dev/null +++ b/changelog/12339.txt @@ -0,0 +1,3 @@ +```release-note:bug +core (enterprise): Only delete quotas on primary cluster. +``` diff --git a/changelog/12340.txt b/changelog/12340.txt new file mode 100644 index 0000000..f40253a --- /dev/null +++ b/changelog/12340.txt @@ -0,0 +1,3 @@ +```release-note: bug +auth/aws: Fixes ec2 login no longer supporting DSA signature verification +``` diff --git a/changelog/12348.txt b/changelog/12348.txt new file mode 100644 index 0000000..80ac862 --- /dev/null +++ b/changelog/12348.txt @@ -0,0 +1,3 @@ +```release-note: bug +ui: Fixes metrics page when read on counter config not allowed +``` \ No newline at end of file diff --git a/changelog/12351.txt b/changelog/12351.txt new file mode 100644 index 0000000..bb8f84a --- /dev/null +++ b/changelog/12351.txt @@ -0,0 +1,3 @@ +```release-note: bug +sdk/database: Fix a DeleteUser error message on the gRPC client. +``` \ No newline at end of file diff --git a/changelog/12354.txt b/changelog/12354.txt new file mode 100644 index 0000000..bbf0e2e --- /dev/null +++ b/changelog/12354.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixed api explorer routing bug +``` \ No newline at end of file diff --git a/changelog/12357.txt b/changelog/12357.txt new file mode 100644 index 0000000..30de5af --- /dev/null +++ b/changelog/12357.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixed text overflow in flash messages +``` \ No newline at end of file diff --git a/changelog/12366.txt b/changelog/12366.txt new file mode 100644 index 0000000..e8c16d1 --- /dev/null +++ b/changelog/12366.txt @@ -0,0 +1,3 @@ +```release-note:bug +storage/raft: Support `addr_type=public_v6` in auto-join +``` diff --git a/changelog/12371.txt b/changelog/12371.txt new file mode 100644 index 0000000..eb36e1d --- /dev/null +++ b/changelog/12371.txt @@ -0,0 +1,3 @@ +```release-note: bug +identity: Fix a panic on arm64 platform when doing identity I/O. +``` \ No newline at end of file diff --git a/changelog/12372.txt b/changelog/12372.txt new file mode 100644 index 0000000..22a64c9 --- /dev/null +++ b/changelog/12372.txt @@ -0,0 +1,3 @@ +```release-note: bug +core/api: Fix an arm64 bug converting a negative int to an unsigned int +``` \ No newline at end of file diff --git a/changelog/12377.txt b/changelog/12377.txt new file mode 100644 index 0000000..d3bd52a --- /dev/null +++ b/changelog/12377.txt @@ -0,0 +1,3 @@ +```release-note: bug +physical/raft: Fix safeio.Rename error when restoring snapshots on windows +``` \ No newline at end of file diff --git a/changelog/12378.txt b/changelog/12378.txt new file mode 100644 index 0000000..336ceb8 --- /dev/null +++ b/changelog/12378.txt @@ -0,0 +1,3 @@ +```release-note: bug +plugin/snowflake: Fixed bug where plugin would crash on 32 bit systems +``` diff --git a/changelog/12379.txt b/changelog/12379.txt new file mode 100644 index 0000000..c4b2397 --- /dev/null +++ b/changelog/12379.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/gcp: Fixes a potential panic in the service account policy rollback for rolesets. +``` diff --git a/changelog/12388.txt b/changelog/12388.txt new file mode 100644 index 0000000..f384c90 --- /dev/null +++ b/changelog/12388.txt @@ -0,0 +1,3 @@ +```release-note:bug +storage/raft: Detect incomplete raft snapshots in api.RaftSnapshot(), and thereby in `vault operator raft snapshot save`. +``` diff --git a/changelog/12393.txt b/changelog/12393.txt new file mode 100644 index 0000000..7997d51 --- /dev/null +++ b/changelog/12393.txt @@ -0,0 +1,3 @@ +```release-note: improvement +core: observe the client counts broken down by namespace for partial month client count +``` \ No newline at end of file diff --git a/changelog/12408.txt b/changelog/12408.txt new file mode 100644 index 0000000..5a5532b --- /dev/null +++ b/changelog/12408.txt @@ -0,0 +1,3 @@ +```release-note:change +go: Update go version to 1.16.7 +``` diff --git a/changelog/12409.txt b/changelog/12409.txt new file mode 100644 index 0000000..483a151 --- /dev/null +++ b/changelog/12409.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fix issue where on MaskedInput on auth methods if tab it would clear the value. +``` diff --git a/changelog/12413.txt b/changelog/12413.txt new file mode 100644 index 0000000..cd90b6e --- /dev/null +++ b/changelog/12413.txt @@ -0,0 +1,4 @@ +```release-note:bug +storage/postgres: Update postgres library (github.com/lib/pq) to properly remove terminated TLS connections from the connection pool. +database/postgres: Update postgres library (github.com/lib/pq) to properly remove terminated TLS connections from the connection pool. +``` diff --git a/changelog/12414.txt b/changelog/12414.txt new file mode 100644 index 0000000..5f3cfdd --- /dev/null +++ b/changelog/12414.txt @@ -0,0 +1,3 @@ +```release-note:improvement +identity: fix issue where Cache-Control header causes stampede of requests for JWKS keys +``` diff --git a/changelog/12418.txt b/changelog/12418.txt new file mode 100644 index 0000000..e6c07e9 --- /dev/null +++ b/changelog/12418.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/transit: Enforce minimum cache size for transit backend and init cache size on transit backend without restart. +``` \ No newline at end of file diff --git a/changelog/12422.txt b/changelog/12422.txt new file mode 100644 index 0000000..728b4a3 --- /dev/null +++ b/changelog/12422.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: updated client tracking config view +``` \ No newline at end of file diff --git a/changelog/12425.txt b/changelog/12425.txt new file mode 100644 index 0000000..709e631 --- /dev/null +++ b/changelog/12425.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/approle: expose secret_id_accessor as WrappedAccessor when creating wrapped secret-id. +``` \ No newline at end of file diff --git a/changelog/12428.txt b/changelog/12428.txt new file mode 100644 index 0000000..8daeb8a --- /dev/null +++ b/changelog/12428.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: add api method for modifying raft autopilot configuration +``` diff --git a/changelog/12437.txt b/changelog/12437.txt new file mode 100644 index 0000000..d329e1b --- /dev/null +++ b/changelog/12437.txt @@ -0,0 +1,3 @@ +```release-note:feature +ui: creates bar chart component for displaying client count data by namespace +``` \ No newline at end of file diff --git a/changelog/12443.txt b/changelog/12443.txt new file mode 100644 index 0000000..6014140 --- /dev/null +++ b/changelog/12443.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/database/cassandra: change connect_timeout to 5s as documentation says +``` diff --git a/changelog/12451.txt b/changelog/12451.txt new file mode 100644 index 0000000..9cd265f --- /dev/null +++ b/changelog/12451.txt @@ -0,0 +1,3 @@ +```release-note:feature +nomad: Bootstrap Nomad ACL system if no token is provided +``` diff --git a/changelog/12473.txt b/changelog/12473.txt new file mode 100644 index 0000000..3a0ecdd --- /dev/null +++ b/changelog/12473.txt @@ -0,0 +1,3 @@ +```release-note:bug +identity: Fail alias rename if the resulting (name,accessor) exists already +``` \ No newline at end of file diff --git a/changelog/12478.txt b/changelog/12478.txt new file mode 100644 index 0000000..3704e9a --- /dev/null +++ b/changelog/12478.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fix missing navbar items on login to namespace +``` \ No newline at end of file diff --git a/changelog/12483.txt b/changelog/12483.txt new file mode 100644 index 0000000..da3a72a --- /dev/null +++ b/changelog/12483.txt @@ -0,0 +1,3 @@ +```release-note:improvement +plugin: update the couchbase gocb version in the couchbase plugin +``` diff --git a/changelog/12485.txt b/changelog/12485.txt new file mode 100644 index 0000000..6c8a87c --- /dev/null +++ b/changelog/12485.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Customizable HTTP Headers**: Add support to define custom HTTP headers for root path (`/`) and also on API endpoints (`/v1/*`) +``` diff --git a/changelog/12502.txt b/changelog/12502.txt new file mode 100644 index 0000000..4d24e7a --- /dev/null +++ b/changelog/12502.txt @@ -0,0 +1,3 @@ +```release-note:feature +core: adds custom_metadata field for aliases +``` \ No newline at end of file diff --git a/changelog/12505.txt b/changelog/12505.txt new file mode 100644 index 0000000..075e53d --- /dev/null +++ b/changelog/12505.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent/template: add support for new 'writeToFile' template function +``` \ No newline at end of file diff --git a/changelog/12508.txt b/changelog/12508.txt new file mode 100644 index 0000000..52c9e7c --- /dev/null +++ b/changelog/12508.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cli: add new http option : -header which enable sending arbitrary headers with the cli +``` diff --git a/changelog/12514.txt b/changelog/12514.txt new file mode 100644 index 0000000..8f00061 --- /dev/null +++ b/changelog/12514.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Allow signing of self-issued certs with a different signature algorithm. +``` diff --git a/changelog/12519.txt b/changelog/12519.txt new file mode 100644 index 0000000..df383b9 --- /dev/null +++ b/changelog/12519.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/aws: Fix ec2 auth on instances that have a cert in their PKCS7 signature +``` diff --git a/changelog/12534.txt b/changelog/12534.txt new file mode 100644 index 0000000..d7c05f6 --- /dev/null +++ b/changelog/12534.txt @@ -0,0 +1,3 @@ +```release-note:bug +agent: Avoid possible `unexpected fault address` panic when using persistent cache. +``` diff --git a/changelog/12541.txt b/changelog/12541.txt new file mode 100644 index 0000000..aa10c65 --- /dev/null +++ b/changelog/12541.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: parse and display pki cert metadata +``` \ No newline at end of file diff --git a/changelog/12550.txt b/changelog/12550.txt new file mode 100644 index 0000000..171a34b --- /dev/null +++ b/changelog/12550.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix bug where capabilities check on secret-delete-menu was encoding the forward slashes. +``` diff --git a/changelog/12554.txt b/changelog/12554.txt new file mode 100644 index 0000000..e3549ec --- /dev/null +++ b/changelog/12554.txt @@ -0,0 +1,3 @@ +```release-note:feature +ui: client count monthly view +``` \ No newline at end of file diff --git a/changelog/12559.txt b/changelog/12559.txt new file mode 100644 index 0000000..9bcd4e8 --- /dev/null +++ b/changelog/12559.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Use entropy augmentation when available when generating root and intermediate CA key material. +``` \ No newline at end of file diff --git a/changelog/12560.txt b/changelog/12560.txt new file mode 100644 index 0000000..0b76337 --- /dev/null +++ b/changelog/12560.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/ssh: Use entropy augmentation when available for generation of the signing key. +``` \ No newline at end of file diff --git a/changelog/12563.txt b/changelog/12563.txt new file mode 100644 index 0000000..9298e82 --- /dev/null +++ b/changelog/12563.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/db: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. +``` diff --git a/changelog/12565.txt b/changelog/12565.txt new file mode 100644 index 0000000..a125950 --- /dev/null +++ b/changelog/12565.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core/token: Return the token_no_default_policy config on token role read if set +``` \ No newline at end of file diff --git a/changelog/12577.txt b/changelog/12577.txt new file mode 100644 index 0000000..7f69476 --- /dev/null +++ b/changelog/12577.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: namespace search in client count views +``` \ No newline at end of file diff --git a/changelog/12581.txt b/changelog/12581.txt new file mode 100644 index 0000000..2260987 --- /dev/null +++ b/changelog/12581.txt @@ -0,0 +1,3 @@ +```release-note: improvements +core/plugin: Update plugin proto to send tls.ConnectionState across gRPC boundary +``` diff --git a/changelog/12582.txt b/changelog/12582.txt new file mode 100644 index 0000000..6e5c0c9 --- /dev/null +++ b/changelog/12582.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: Support VAULT_HTTP_PROXY environment variable to allow overriding the Vault client's HTTP proxy +``` diff --git a/changelog/12600.txt b/changelog/12600.txt new file mode 100644 index 0000000..45d34e9 --- /dev/null +++ b/changelog/12600.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/openldap: Fix bug where Vault can rotate static role passwords early during start up under certain conditions. [#28](https://github.com/hashicorp/vault-plugin-secrets-openldap/pull/28) +``` \ No newline at end of file diff --git a/changelog/12621.txt b/changelog/12621.txt new file mode 100644 index 0000000..b17ad16 --- /dev/null +++ b/changelog/12621.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/aws: add profile support for AWS credentials when using the AWS auth method +``` \ No newline at end of file diff --git a/changelog/12622.txt b/changelog/12622.txt new file mode 100644 index 0000000..5c2d569 --- /dev/null +++ b/changelog/12622.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: update bar chart when model changes +``` \ No newline at end of file diff --git a/changelog/12626.txt b/changelog/12626.txt new file mode 100644 index 0000000..7c3cc7c --- /dev/null +++ b/changelog/12626.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Add KV secret search box when no metadata list access. +``` \ No newline at end of file diff --git a/changelog/12629.txt b/changelog/12629.txt new file mode 100644 index 0000000..39b3eba --- /dev/null +++ b/changelog/12629.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/azure: Adds support for using Microsoft Graph API since Azure Active Directory API is being removed in 2022. [#67](https://github.com/hashicorp/vault-plugin-secrets-azure/pull/67) +``` diff --git a/changelog/12633.txt b/changelog/12633.txt new file mode 100644 index 0000000..d8f1609 --- /dev/null +++ b/changelog/12633.txt @@ -0,0 +1,3 @@ +```release-note:feature +auth/kubernetes: Add ability to configure entity alias names based on the serviceaccount's namespace and name. [#110](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/110) [#112](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/112) +``` diff --git a/changelog/12635.txt b/changelog/12635.txt new file mode 100644 index 0000000..9e1a7d7 --- /dev/null +++ b/changelog/12635.txt @@ -0,0 +1,3 @@ +```release-note:bug +core (enterprise): Fix bug where password generation through password policies do not work on namespaces if performed outside a request callback or from an external plugin. +``` \ No newline at end of file diff --git a/changelog/12646.txt b/changelog/12646.txt new file mode 100644 index 0000000..e4f9385 --- /dev/null +++ b/changelog/12646.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix bug where edit role form on auth method is invalid by default +``` \ No newline at end of file diff --git a/changelog/12663.txt b/changelog/12663.txt new file mode 100644 index 0000000..aab6e38 --- /dev/null +++ b/changelog/12663.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Add creation time to KV 2 version history and version view +``` diff --git a/changelog/12668.txt b/changelog/12668.txt new file mode 100644 index 0000000..7006da5 --- /dev/null +++ b/changelog/12668.txt @@ -0,0 +1,3 @@ +```release-note:improvement +sdk/framework: The '+' wildcard is now supported for parameterizing unauthenticated paths. +``` diff --git a/changelog/12672.txt b/changelog/12672.txt new file mode 100644 index 0000000..62ac12b --- /dev/null +++ b/changelog/12672.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Elasticsearch in the UI**: Elasticsearch DB is now supported by the UI +``` \ No newline at end of file diff --git a/changelog/12687.txt b/changelog/12687.txt new file mode 100644 index 0000000..f5998de --- /dev/null +++ b/changelog/12687.txt @@ -0,0 +1,5 @@ +```release-note:feature +**KV patch**: Add partial update support the for the `//data/:path` kv-v2 +endpoint through HTTP `PATCH`. A new `patch` ACL capability has been added and +is required to make such requests. +``` diff --git a/changelog/12688.txt b/changelog/12688.txt new file mode 100644 index 0000000..e57e56b --- /dev/null +++ b/changelog/12688.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/kubernetes: validate JWT against the provided role on alias look ahead operations +``` diff --git a/changelog/12691.txt b/changelog/12691.txt new file mode 100644 index 0000000..2a8efdc --- /dev/null +++ b/changelog/12691.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fix a deadlock on HA leadership transfer +``` diff --git a/changelog/12713.txt b/changelog/12713.txt new file mode 100644 index 0000000..2204a3d --- /dev/null +++ b/changelog/12713.txt @@ -0,0 +1,3 @@ +```release-note:bug +http: removed unpublished true from logical_system path, making openapi spec consistent with documentation +``` \ No newline at end of file diff --git a/changelog/12715.txt b/changelog/12715.txt new file mode 100644 index 0000000..b4a61a7 --- /dev/null +++ b/changelog/12715.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/aws: fix config/rotate-root to store new key +``` diff --git a/changelog/12716.txt b/changelog/12716.txt new file mode 100644 index 0000000..9a41d31 --- /dev/null +++ b/changelog/12716.txt @@ -0,0 +1,3 @@ +```release-note:bug +pki: Fix regression preventing email addresses being used as a common name within certificates +``` diff --git a/changelog/12718.txt b/changelog/12718.txt new file mode 100644 index 0000000..1e786d7 --- /dev/null +++ b/changelog/12718.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent/cache: tolerate partial restore failure from persistent cache +``` diff --git a/changelog/12720.txt b/changelog/12720.txt new file mode 100644 index 0000000..2c21312 --- /dev/null +++ b/changelog/12720.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli: fixes CLI requests when namespace is both provided as argument and part of the path +``` diff --git a/changelog/12724.txt b/changelog/12724.txt new file mode 100644 index 0000000..caeb6cf --- /dev/null +++ b/changelog/12724.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Update Oracle Cloud library to enable seal integration with the uk-gov-london-1 region +``` \ No newline at end of file diff --git a/changelog/12731.txt b/changelog/12731.txt new file mode 100644 index 0000000..c883668 --- /dev/null +++ b/changelog/12731.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: Move mergeStates and other required utils from agent to api module +``` \ No newline at end of file diff --git a/changelog/12747.txt b/changelog/12747.txt new file mode 100644 index 0000000..2347d30 --- /dev/null +++ b/changelog/12747.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/identity: Disallow entity alias creation/update if a conflicting alias exists for the target entity and mount combination +``` \ No newline at end of file diff --git a/changelog/12752.txt b/changelog/12752.txt new file mode 100644 index 0000000..c5f1856 --- /dev/null +++ b/changelog/12752.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Oracle DB in the UI**: Oracle DB connection is now supported in the UI +``` \ No newline at end of file diff --git a/changelog/12762.txt b/changelog/12762.txt new file mode 100644 index 0000000..6c90c48 --- /dev/null +++ b/changelog/12762.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent/cache: Use an in-process listener between consul-template and vault-agent when caching is enabled and either templates or a listener is defined +``` diff --git a/changelog/12763.txt b/changelog/12763.txt new file mode 100644 index 0000000..0deac4c --- /dev/null +++ b/changelog/12763.txt @@ -0,0 +1,3 @@ +```release-note: bug +core: Fix double counting for "route" metrics +``` \ No newline at end of file diff --git a/changelog/12770.txt b/changelog/12770.txt new file mode 100644 index 0000000..46d99f4 --- /dev/null +++ b/changelog/12770.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Filter DB connection attributes so only relevant attrs POST to backend +``` \ No newline at end of file diff --git a/changelog/12780.txt b/changelog/12780.txt new file mode 100644 index 0000000..61a2c5d --- /dev/null +++ b/changelog/12780.txt @@ -0,0 +1,3 @@ +```release-note:improvement +identity/token: Only return keys from the `.well-known/keys` endpoint that are being used by roles to sign/verify tokens. +``` diff --git a/changelog/12787.txt b/changelog/12787.txt new file mode 100644 index 0000000..f7826b2 --- /dev/null +++ b/changelog/12787.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Add support to list password policies at `sys/policies/password` +``` diff --git a/changelog/12788.txt b/changelog/12788.txt new file mode 100644 index 0000000..47dd965 --- /dev/null +++ b/changelog/12788.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/approle: The `role/:name/secret-id-accessor/lookup` endpoint now returns a 404 status code when the `secret_id_accessor` cannot be found +``` diff --git a/changelog/12790.txt b/changelog/12790.txt new file mode 100644 index 0000000..dbea34d --- /dev/null +++ b/changelog/12790.txt @@ -0,0 +1,3 @@ +```release-note:improvement +audit: The audit logs now contain the port used by the client +``` diff --git a/changelog/12791.txt b/changelog/12791.txt new file mode 100644 index 0000000..1aa881a --- /dev/null +++ b/changelog/12791.txt @@ -0,0 +1,3 @@ +```release-notes:improvemment +core: `num_uses` is now returned during authentication +``` diff --git a/changelog/12792.txt b/changelog/12792.txt new file mode 100644 index 0000000..76f5875 --- /dev/null +++ b/changelog/12792.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Reading `sys/mounts/:path` now returns the configuration for the secret engine at the given path +``` diff --git a/changelog/12793.txt b/changelog/12793.txt new file mode 100644 index 0000000..617c20f --- /dev/null +++ b/changelog/12793.txt @@ -0,0 +1,3 @@ +```release-note: improvement +auth: reading `sys/auth/:path` now returns the configuration for the auth engine mounted at the given path +``` diff --git a/changelog/12795.txt b/changelog/12795.txt new file mode 100644 index 0000000..5b360be --- /dev/null +++ b/changelog/12795.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core/pki: Support Y10K value in notAfter field to be compliant with IEEE 802.1AR-2018 standard +``` diff --git a/changelog/12796.txt b/changelog/12796.txt new file mode 100644 index 0000000..8c001f3 --- /dev/null +++ b/changelog/12796.txt @@ -0,0 +1,3 @@ +```release-note:feature +api: adds native Login method to Go client module with different auth method interfaces to support easier authentication +``` \ No newline at end of file diff --git a/changelog/12800.txt b/changelog/12800.txt new file mode 100644 index 0000000..38aadc0 --- /dev/null +++ b/changelog/12800.txt @@ -0,0 +1,3 @@ +```release-note:feature +**OIDC Authorization Code Flow**: The Vault UI now supports OIDC Authorization Code Flow +``` diff --git a/changelog/12802.txt b/changelog/12802.txt new file mode 100644 index 0000000..9c49bf1 --- /dev/null +++ b/changelog/12802.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cli: Operator diagnose now tests for missing or partial telemetry configurations. +``` \ No newline at end of file diff --git a/changelog/12812.txt b/changelog/12812.txt new file mode 100644 index 0000000..2179340 --- /dev/null +++ b/changelog/12812.txt @@ -0,0 +1,3 @@ +```release-note:bug +identity: suppress duplicate policies on entities +``` diff --git a/changelog/12814.txt b/changelog/12814.txt new file mode 100644 index 0000000..9d5b541 --- /dev/null +++ b/changelog/12814.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: Add configuration option for ensuring isolated read-after-write semantics for all Client requests. +``` diff --git a/changelog/12819.txt b/changelog/12819.txt new file mode 100644 index 0000000..80e772b --- /dev/null +++ b/changelog/12819.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Removes empty rows from DB config views +``` diff --git a/changelog/12820.txt b/changelog/12820.txt new file mode 100644 index 0000000..c7b92e6 --- /dev/null +++ b/changelog/12820.txt @@ -0,0 +1,3 @@ +```release-note:feature +Add ClientID to Token With Entities in Activity Log: Vault tokens without entities are now tracked with client IDs and deduplicated in the Activity Log +``` diff --git a/changelog/12834.txt b/changelog/12834.txt new file mode 100644 index 0000000..205b648 --- /dev/null +++ b/changelog/12834.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/identity: Cleanup alias in the in-memory entity after an alias deletion by ID +``` \ No newline at end of file diff --git a/changelog/12839.txt b/changelog/12839.txt new file mode 100644 index 0000000..f591936 --- /dev/null +++ b/changelog/12839.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/database: Update MSSQL dependency github.com/denisenkom/go-mssqldb to v0.11.0 and include support for contained databases in MSSQL plugin +``` diff --git a/changelog/12843.txt b/changelog/12843.txt new file mode 100644 index 0000000..2beee7f --- /dev/null +++ b/changelog/12843.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent/cache: Process persistent cache leases in dependency order during restore to ensure child leases are always correctly restored +``` diff --git a/changelog/12847.txt b/changelog/12847.txt new file mode 100644 index 0000000..30c92d0 --- /dev/null +++ b/changelog/12847.txt @@ -0,0 +1,5 @@ +```release-note:breaking-change +secrets/ssh: Roles with empty allowed_extensions will now forbid end-users +specifying extensions when requesting ssh key signing. Update roles setting +allowed_extensions to '*' to permit any extension to be specified by an end-user. +``` diff --git a/changelog/12868.txt b/changelog/12868.txt new file mode 100644 index 0000000..1fe6ece --- /dev/null +++ b/changelog/12868.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: build with Go 1.17, and mitigate a breaking change they made that could impact how approle and ssh interpret IPs/CIDRs +``` diff --git a/changelog/12872.txt b/changelog/12872.txt new file mode 100644 index 0000000..6a6157c --- /dev/null +++ b/changelog/12872.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Fixes around NIST P-curve signature hash length, default value for signature_bits changed to 0. +``` diff --git a/changelog/12876.txt b/changelog/12876.txt new file mode 100644 index 0000000..a5c0944 --- /dev/null +++ b/changelog/12876.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/oidc: Adds the `skip_browser` CLI option to allow users to skip opening the default browser during the authentication flow. +``` diff --git a/changelog/12877.txt b/changelog/12877.txt new file mode 100644 index 0000000..c17731c --- /dev/null +++ b/changelog/12877.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/rabbitmq: Update dependency github.com/michaelklishin/rabbit-hole to v2 and resolve UserInfo.tags regression from RabbitMQ v3.9 +``` diff --git a/changelog/12881.txt b/changelog/12881.txt new file mode 100644 index 0000000..3cdf5b4 --- /dev/null +++ b/changelog/12881.txt @@ -0,0 +1,3 @@ +```release-note:improvement +command: operator generate-root -decode: allow passing encoded token via stdin +``` diff --git a/changelog/12885.txt b/changelog/12885.txt new file mode 100644 index 0000000..4018b93 --- /dev/null +++ b/changelog/12885.txt @@ -0,0 +1,3 @@ +```release-note:feature: +secrets/pki: Add `tidy-status` endpoint to obtain information of the current or most recent tidy operation. +``` diff --git a/changelog/12887.txt b/changelog/12887.txt new file mode 100644 index 0000000..92ce4c8 --- /dev/null +++ b/changelog/12887.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Remove spinner after token renew +``` diff --git a/changelog/12888.txt b/changelog/12888.txt new file mode 100644 index 0000000..f7c3d56 --- /dev/null +++ b/changelog/12888.txt @@ -0,0 +1,7 @@ +```release-note:change +expiration: VAULT_LEASE_USE_LEGACY_REVOCATION_STRATEGY environment variable has +been removed. +``` +```release-note:change +expiration: VAULT_16_REVOKE_PERMITPOOL environment variable has been removed. +``` \ No newline at end of file diff --git a/changelog/12890.txt b/changelog/12890.txt new file mode 100644 index 0000000..603e72d --- /dev/null +++ b/changelog/12890.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Click to copy database static role last rotation value in tooltip +``` \ No newline at end of file diff --git a/changelog/12895.txt b/changelog/12895.txt new file mode 100644 index 0000000..ff9a66e --- /dev/null +++ b/changelog/12895.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Standardizes toolbar presentation of destructive actions +``` \ No newline at end of file diff --git a/changelog/12903.txt b/changelog/12903.txt new file mode 100644 index 0000000..f14762d --- /dev/null +++ b/changelog/12903.txt @@ -0,0 +1,3 @@ +```release-note:improvement +db/cassandra: make the connect_timeout config option actually apply to connection timeouts, in addition to non-connection operations +``` \ No newline at end of file diff --git a/changelog/12904.txt b/changelog/12904.txt new file mode 100644 index 0000000..e65636b --- /dev/null +++ b/changelog/12904.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Removes ability to tune token_type for token auth methods +``` \ No newline at end of file diff --git a/changelog/12906.txt b/changelog/12906.txt new file mode 100644 index 0000000..69e96ad --- /dev/null +++ b/changelog/12906.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Added resize for JSON editor [[GH-12906](https://github.com/hashicorp/vault/pull/12906)] +``` diff --git a/changelog/12907.txt b/changelog/12907.txt new file mode 100644 index 0000000..c4c8c5d --- /dev/null +++ b/changelog/12907.txt @@ -0,0 +1,5 @@ +```release-note:feature +**KV Custom Metadata**: Add ability in kv-v2 to specify version-agnostic custom key metadata via the +metadata endpoint. The data will be present in responses made to the data endpoint independent of the +calling token's `read` access to the metadata endpoint. +``` diff --git a/changelog/12908.txt b/changelog/12908.txt new file mode 100644 index 0000000..2e54217 --- /dev/null +++ b/changelog/12908.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Updates font for table row value fields +``` \ No newline at end of file diff --git a/changelog/12911.txt b/changelog/12911.txt new file mode 100644 index 0000000..2c21312 --- /dev/null +++ b/changelog/12911.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli: fixes CLI requests when namespace is both provided as argument and part of the path +``` diff --git a/changelog/12916.txt b/changelog/12916.txt new file mode 100644 index 0000000..8d75b39 --- /dev/null +++ b/changelog/12916.txt @@ -0,0 +1,3 @@ +```release-note:bug +identity/token: Adds missing call to unlock mutex in key deletion error handling +``` diff --git a/changelog/12921.txt b/changelog/12921.txt new file mode 100644 index 0000000..77bb465 --- /dev/null +++ b/changelog/12921.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Adds warning about white space in KV secret engine. +``` diff --git a/changelog/12932.txt b/changelog/12932.txt new file mode 100644 index 0000000..cd2224e --- /dev/null +++ b/changelog/12932.txt @@ -0,0 +1,3 @@ +```release-note:feature +**OIDC Identity Provider (Tech Preview)**: Adds support for Vault to be an OpenID Connect (OIDC) provider. +``` diff --git a/changelog/12934.txt b/changelog/12934.txt new file mode 100644 index 0000000..fbc6aa5 --- /dev/null +++ b/changelog/12934.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/keymgmt (enterprise): Fix support for Azure Managed HSM Key Vault instances. +``` diff --git a/changelog/12945.txt b/changelog/12945.txt new file mode 100644 index 0000000..1283787 --- /dev/null +++ b/changelog/12945.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Postgres in the UI**: Postgres DB is now supported by the UI +``` \ No newline at end of file diff --git a/changelog/12965.txt b/changelog/12965.txt new file mode 100644 index 0000000..4045d55 --- /dev/null +++ b/changelog/12965.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cockroachdb: add high-availability support +``` diff --git a/changelog/12976.txt b/changelog/12976.txt new file mode 100644 index 0000000..1dea358 --- /dev/null +++ b/changelog/12976.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Adds flight icons to UI +``` \ No newline at end of file diff --git a/changelog/13000.txt b/changelog/13000.txt new file mode 100644 index 0000000..daddd07 --- /dev/null +++ b/changelog/13000.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Add version diff view for KV V2 +``` \ No newline at end of file diff --git a/changelog/13015.txt b/changelog/13015.txt new file mode 100644 index 0000000..48fd263 --- /dev/null +++ b/changelog/13015.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Display badge for all versions in secrets engine header +``` \ No newline at end of file diff --git a/changelog/13022.txt b/changelog/13022.txt new file mode 100644 index 0000000..bb3d3a6 --- /dev/null +++ b/changelog/13022.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: Implements Login method in Go client libraries for GCP and Azure auth methods +``` diff --git a/changelog/13024.txt b/changelog/13024.txt new file mode 100644 index 0000000..765d5e1 --- /dev/null +++ b/changelog/13024.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Report in-flight requests**: Adding a trace capability to show in-flight requests, and a new gauge metric to show the total number of in-flight requests +``` diff --git a/changelog/13032.txt b/changelog/13032.txt new file mode 100644 index 0000000..b3ea4cd --- /dev/null +++ b/changelog/13032.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes long secret key names overlapping masked values +``` diff --git a/changelog/13033.txt b/changelog/13033.txt new file mode 100644 index 0000000..53ee82f --- /dev/null +++ b/changelog/13033.txt @@ -0,0 +1,3 @@ +```release-note:bug +plugin/couchbase: Fix an issue in which the locking patterns did not allow parallel requests. +``` diff --git a/changelog/13034.txt b/changelog/13034.txt new file mode 100644 index 0000000..36c8f7f --- /dev/null +++ b/changelog/13034.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/azure: Adds support for rotate-root. [#70](https://github.com/hashicorp/vault-plugin-secrets-azure/pull/70) +``` \ No newline at end of file diff --git a/changelog/13038.txt b/changelog/13038.txt new file mode 100644 index 0000000..cf20dec --- /dev/null +++ b/changelog/13038.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes issue with the number of PGP Key inputs not matching the key shares number in the initialization form on change +``` \ No newline at end of file diff --git a/changelog/13042.txt b/changelog/13042.txt new file mode 100644 index 0000000..192b4d4 --- /dev/null +++ b/changelog/13042.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fix warnings logged on perf standbys re stored versions +``` \ No newline at end of file diff --git a/changelog/13044.txt b/changelog/13044.txt new file mode 100644 index 0000000..3896544 --- /dev/null +++ b/changelog/13044.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: Trim newline character from wrapping token in logical.Unwrap from the api package +``` \ No newline at end of file diff --git a/changelog/13054.txt b/changelog/13054.txt new file mode 100644 index 0000000..f48d68b --- /dev/null +++ b/changelog/13054.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Adds pagination to auth methods list view +``` \ No newline at end of file diff --git a/changelog/13078.txt b/changelog/13078.txt new file mode 100644 index 0000000..789445b --- /dev/null +++ b/changelog/13078.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Periodically test the health of connectivity to auto-seal backends +``` \ No newline at end of file diff --git a/changelog/13080.txt b/changelog/13080.txt new file mode 100644 index 0000000..9c3ed52 --- /dev/null +++ b/changelog/13080.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Default value for key_bits changed to 0, enabling key_type=ec key generation with default value +``` diff --git a/changelog/13086.txt b/changelog/13086.txt new file mode 100644 index 0000000..d8c92ba --- /dev/null +++ b/changelog/13086.txt @@ -0,0 +1,3 @@ +```release-note:bug +activity log (enterprise): allow partial monthly client count to be accessed from namespaces +``` \ No newline at end of file diff --git a/changelog/13090.txt b/changelog/13090.txt new file mode 100644 index 0000000..82ad157 --- /dev/null +++ b/changelog/13090.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: customizes empty state messages for transit and transform +``` \ No newline at end of file diff --git a/changelog/13093.txt b/changelog/13093.txt new file mode 100644 index 0000000..d5b8af0 --- /dev/null +++ b/changelog/13093.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/identity: Address a data race condition between local updates to aliases and invalidations +``` diff --git a/changelog/13098.txt b/changelog/13098.txt new file mode 100644 index 0000000..84bbcbf --- /dev/null +++ b/changelog/13098.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes issue removing raft storage peer via cli not reflected in UI until refresh +``` \ No newline at end of file diff --git a/changelog/13107.txt b/changelog/13107.txt new file mode 100644 index 0000000..50f01ef --- /dev/null +++ b/changelog/13107.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes issue restoring raft storage snapshot +``` \ No newline at end of file diff --git a/changelog/13111.txt b/changelog/13111.txt new file mode 100644 index 0000000..800cabd --- /dev/null +++ b/changelog/13111.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/transit: Don't abort transit encrypt or decrypt batches on single item failure. +``` diff --git a/changelog/13133.txt b/changelog/13133.txt new file mode 100644 index 0000000..ae39a1c --- /dev/null +++ b/changelog/13133.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes issue with OIDC auth workflow when using MetaMask Chrome extension +``` \ No newline at end of file diff --git a/changelog/13146.txt b/changelog/13146.txt new file mode 100644 index 0000000..6bbe85e --- /dev/null +++ b/changelog/13146.txt @@ -0,0 +1,3 @@ +```release-note:bug +sdk/queue: move lock before length check to prevent panics. +``` \ No newline at end of file diff --git a/changelog/13149.txt b/changelog/13149.txt new file mode 100644 index 0000000..54bcf18 --- /dev/null +++ b/changelog/13149.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Updates ember blueprints to glimmer components +``` \ No newline at end of file diff --git a/changelog/13152.txt b/changelog/13152.txt new file mode 100644 index 0000000..88c2f0e --- /dev/null +++ b/changelog/13152.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Do not show verify connection value on database connection config page +``` \ No newline at end of file diff --git a/changelog/13162.txt b/changelog/13162.txt new file mode 100644 index 0000000..273a7e5 --- /dev/null +++ b/changelog/13162.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: authentication to "login" endpoint for non-existent mount path returns permission denied with status code 403 +``` \ No newline at end of file diff --git a/changelog/13165.txt b/changelog/13165.txt new file mode 100644 index 0000000..4a0cbe9 --- /dev/null +++ b/changelog/13165.txt @@ -0,0 +1,3 @@ +```release-note:bug +storage/raft: Fix regression in 1.9.0-rc1 that changed how time is represented in Raft logs; this prevented using a raft db created pre-1.9. +``` \ No newline at end of file diff --git a/changelog/13166.txt b/changelog/13166.txt new file mode 100644 index 0000000..ff1ab5f --- /dev/null +++ b/changelog/13166.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes issue with placeholder not displaying for automatically deleted secrets when deletion time has passed +``` \ No newline at end of file diff --git a/changelog/13168.txt b/changelog/13168.txt new file mode 100644 index 0000000..0782766 --- /dev/null +++ b/changelog/13168.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: revert some unintentionally downgraded dependencies from 1.9.0-rc1 +``` \ No newline at end of file diff --git a/changelog/13169.txt b/changelog/13169.txt new file mode 100644 index 0000000..5b2236e --- /dev/null +++ b/changelog/13169.txt @@ -0,0 +1,3 @@ +```release-note:bug +identity: Fix regression preventing startup when aliases were created pre-1.9. +``` \ No newline at end of file diff --git a/changelog/13177.txt b/changelog/13177.txt new file mode 100644 index 0000000..fd59459 --- /dev/null +++ b/changelog/13177.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes issue with automate secret deletion value not displaying initially if set in secret metadata edit view +``` \ No newline at end of file diff --git a/changelog/13178.txt b/changelog/13178.txt new file mode 100644 index 0000000..67f6b2a --- /dev/null +++ b/changelog/13178.txt @@ -0,0 +1,3 @@ +```release-note:improvement +storage/raft: Set InitialMmapSize to 100GB on 64bit architectures +``` diff --git a/changelog/13195.txt b/changelog/13195.txt new file mode 100644 index 0000000..0f9ce70 --- /dev/null +++ b/changelog/13195.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Added client side paging for namespace list view +``` \ No newline at end of file diff --git a/changelog/13200.txt b/changelog/13200.txt new file mode 100644 index 0000000..18b2cee --- /dev/null +++ b/changelog/13200.txt @@ -0,0 +1,3 @@ +```release-note:bug +http:Fix /sys/monitor endpoint returning streaming not supported +``` diff --git a/changelog/13215.txt b/changelog/13215.txt new file mode 100644 index 0000000..20a787e --- /dev/null +++ b/changelog/13215.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/kv: add patch support for KVv2 key metadata +``` diff --git a/changelog/13231.txt b/changelog/13231.txt new file mode 100644 index 0000000..3af5233 --- /dev/null +++ b/changelog/13231.txt @@ -0,0 +1,3 @@ +```release-note:bug +identity/oidc: Make the `nonce` parameter optional for the Authorization Endpoint of OIDC providers. +``` diff --git a/changelog/13233.txt b/changelog/13233.txt new file mode 100644 index 0000000..718f520 --- /dev/null +++ b/changelog/13233.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/token: Fix null token panic from 'v1/auth/token/' endpoints and return proper error response. +``` \ No newline at end of file diff --git a/changelog/13235.txt b/changelog/13235.txt new file mode 100644 index 0000000..50c3035 --- /dev/null +++ b/changelog/13235.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/approle: Fix regression where unset cidrlist is returned as nil instead of zero-length array. +``` \ No newline at end of file diff --git a/changelog/13236.txt b/changelog/13236.txt new file mode 100644 index 0000000..568f021 --- /dev/null +++ b/changelog/13236.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/token: Fix null token_type panic resulting from 'v1/auth/token/roles/{role_name}' endpoint +``` \ No newline at end of file diff --git a/changelog/13238.txt b/changelog/13238.txt new file mode 100644 index 0000000..7b11758 --- /dev/null +++ b/changelog/13238.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes node-forge error when parsing EC (elliptical curve) certs +``` \ No newline at end of file diff --git a/changelog/13241.txt b/changelog/13241.txt new file mode 100644 index 0000000..aa5ead9 --- /dev/null +++ b/changelog/13241.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: respect WithWrappingToken() option during AppRole login authentication when used with secret ID specified from environment or from string +``` diff --git a/changelog/13254.txt b/changelog/13254.txt new file mode 100644 index 0000000..581d74b --- /dev/null +++ b/changelog/13254.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Skip signature bits validation for ed25519 curve key type +``` diff --git a/changelog/13257.txt b/changelog/13257.txt new file mode 100644 index 0000000..10a4902 --- /dev/null +++ b/changelog/13257.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Recognize ed25519 when requesting a response in PKCS8 format +``` diff --git a/changelog/13277.txt b/changelog/13277.txt new file mode 100644 index 0000000..28de86b --- /dev/null +++ b/changelog/13277.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/azure: Fixes service principal generation when assigning roles that have [DataActions](https://docs.microsoft.com/en-us/azure/role-based-access-control/role-definitions#dataactions). +``` diff --git a/changelog/13282.txt b/changelog/13282.txt new file mode 100644 index 0000000..2c32209 --- /dev/null +++ b/changelog/13282.txt @@ -0,0 +1,3 @@ +```release-note:bug +storage/raft: Fix a panic when trying to write a key > 32KB +``` diff --git a/changelog/13286.txt b/changelog/13286.txt new file mode 100644 index 0000000..9d39be6 --- /dev/null +++ b/changelog/13286.txt @@ -0,0 +1,3 @@ +```release-note:bug +storage/raft: Fix a panic when trying to store a key > 32KB in a transaction. +``` diff --git a/changelog/13292.txt b/changelog/13292.txt new file mode 100644 index 0000000..e394338 --- /dev/null +++ b/changelog/13292.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core/ha: Add new mechanism for keeping track of peers talking to active node, and new 'operator members' command to view them. +``` diff --git a/changelog/13298.txt b/changelog/13298.txt new file mode 100644 index 0000000..893ab12 --- /dev/null +++ b/changelog/13298.txt @@ -0,0 +1,3 @@ +```release-note:bug +identity: Fixes a panic in the OIDC key rotation due to a missing nil check. +``` diff --git a/changelog/13318.txt b/changelog/13318.txt new file mode 100644 index 0000000..79ddb15 --- /dev/null +++ b/changelog/13318.txt @@ -0,0 +1,3 @@ +```release-note:bug +identity: Fix possible nil pointer dereference. +``` diff --git a/changelog/13324.txt b/changelog/13324.txt new file mode 100644 index 0000000..f0bced3 --- /dev/null +++ b/changelog/13324.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Replace "master key" terminology with "root key" +``` diff --git a/changelog/13332.txt b/changelog/13332.txt new file mode 100644 index 0000000..968f32a --- /dev/null +++ b/changelog/13332.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/github: Use the Organization ID instead of the Organization name to verify the org membership. +``` diff --git a/changelog/13348.txt b/changelog/13348.txt new file mode 100644 index 0000000..f7ca0c9 --- /dev/null +++ b/changelog/13348.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/cert: Add certificate extensions as metadata +``` diff --git a/changelog/13365.txt b/changelog/13365.txt new file mode 100644 index 0000000..65284ab --- /dev/null +++ b/changelog/13365.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/jwt: The Authorization Code flow makes use of the Proof Key for Code Exchange (PKCE) extension. +``` diff --git a/changelog/13367.txt b/changelog/13367.txt new file mode 100644 index 0000000..85bbe5e --- /dev/null +++ b/changelog/13367.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Transit SHA-3 Support**: Add support for SHA-3 in the Transit backend. +``` diff --git a/changelog/13395.txt b/changelog/13395.txt new file mode 100644 index 0000000..d8f2e71 --- /dev/null +++ b/changelog/13395.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core/identity: Support updating an alias' `custom_metadata` to be empty. +``` diff --git a/changelog/13396.txt b/changelog/13396.txt new file mode 100644 index 0000000..6600abe --- /dev/null +++ b/changelog/13396.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix client count current month data not showing unless monthly history data exists +``` diff --git a/changelog/13408.txt b/changelog/13408.txt new file mode 100644 index 0000000..4861b17 --- /dev/null +++ b/changelog/13408.txt @@ -0,0 +1,3 @@ +```release-note:change +go: Update go version to 1.17.5 +``` diff --git a/changelog/13414.txt b/changelog/13414.txt new file mode 100644 index 0000000..54ae53d --- /dev/null +++ b/changelog/13414.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/database: Add database configuration parameter 'disable_escaping' for username and password when connecting to a database. +``` diff --git a/changelog/13439.txt b/changelog/13439.txt new file mode 100644 index 0000000..a7f76de --- /dev/null +++ b/changelog/13439.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/okta: Update [okta-sdk-golang](https://github.com/okta/okta-sdk-golang) dependency to version v2.9.1 for improved request backoff handling +``` diff --git a/changelog/13443.txt b/changelog/13443.txt new file mode 100644 index 0000000..74a995c --- /dev/null +++ b/changelog/13443.txt @@ -0,0 +1,3 @@ +```release-note:change +ui: Upgrade Ember to version 3.24 +``` \ No newline at end of file diff --git a/changelog/13452.txt b/changelog/13452.txt new file mode 100644 index 0000000..6177230 --- /dev/null +++ b/changelog/13452.txt @@ -0,0 +1,3 @@ +```release-note:bug +sdk/helper/ldaputil: properly escape a trailing escape character to prevent panics. +``` \ No newline at end of file diff --git a/changelog/13469.txt b/changelog/13469.txt new file mode 100644 index 0000000..97cb471 --- /dev/null +++ b/changelog/13469.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/database/mssql: Accept a boolean for `contained_db`, rather than just a string. +``` diff --git a/changelog/13476.txt b/changelog/13476.txt new file mode 100644 index 0000000..d5b8af0 --- /dev/null +++ b/changelog/13476.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/identity: Address a data race condition between local updates to aliases and invalidations +``` diff --git a/changelog/13486.txt b/changelog/13486.txt new file mode 100644 index 0000000..493d363 --- /dev/null +++ b/changelog/13486.txt @@ -0,0 +1,3 @@ +```release-note:bug +api/client: Fixes an issue where the `replicateStateStore` was being set to `nil` upon consecutive calls to `client.SetReadYourWrites(true)`. +``` diff --git a/changelog/13487.txt b/changelog/13487.txt new file mode 100644 index 0000000..1df81d2 --- /dev/null +++ b/changelog/13487.txt @@ -0,0 +1,3 @@ +```release-note:bug +sdk/framework: Generate proper OpenAPI specs for path patterns that use an alternation as the root. +``` \ No newline at end of file diff --git a/changelog/13492.txt b/changelog/13492.txt new file mode 100644 index 0000000..81841fa --- /dev/null +++ b/changelog/13492.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/oidc: Fixes OIDC auth from the Vault UI when using the implicit flow and `form_post` response mode. +``` diff --git a/changelog/13515.txt b/changelog/13515.txt new file mode 100644 index 0000000..7c34dc9 --- /dev/null +++ b/changelog/13515.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: Allow cloning `api.Client` tokens via `api.Config.CloneToken` or `api.Client.SetCloneToken()`. +``` diff --git a/changelog/13537.txt b/changelog/13537.txt new file mode 100644 index 0000000..d1c31af --- /dev/null +++ b/changelog/13537.txt @@ -0,0 +1,3 @@ +```release-note:improvement +sys/raw: Enhance sys/raw to read and write values that cannot be encoded in json. +``` \ No newline at end of file diff --git a/changelog/13540.txt b/changelog/13540.txt new file mode 100644 index 0000000..05fd83d --- /dev/null +++ b/changelog/13540.txt @@ -0,0 +1,4 @@ +```release-note:improvement +core: Vault now supports the PROXY protocol v2. Support for UNKNOWN connections +has also been added to the PROXY protocol v1. +``` diff --git a/changelog/13548.txt b/changelog/13548.txt new file mode 100644 index 0000000..0edc831 --- /dev/null +++ b/changelog/13548.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/gcp: Fixes role bindings for BigQuery dataset resources. +``` diff --git a/changelog/13573.txt b/changelog/13573.txt new file mode 100644 index 0000000..f908854 --- /dev/null +++ b/changelog/13573.txt @@ -0,0 +1,3 @@ +```release-note:bug +storage/raft: On linux, use map_populate for bolt files to improve startup time. +``` diff --git a/changelog/13585.txt b/changelog/13585.txt new file mode 100644 index 0000000..886c8ed --- /dev/null +++ b/changelog/13585.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes issue saving KMIP role correctly +``` \ No newline at end of file diff --git a/changelog/13590.txt b/changelog/13590.txt new file mode 100644 index 0000000..8731607 --- /dev/null +++ b/changelog/13590.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes issue with SearchSelect component not holding focus +``` \ No newline at end of file diff --git a/changelog/13595.txt b/changelog/13595.txt new file mode 100644 index 0000000..ae86886 --- /dev/null +++ b/changelog/13595.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/kubernetes: Added support for dynamically reloading short-lived tokens for better Kubernetes 1.21+ compatibility +``` diff --git a/changelog/13604.txt b/changelog/13604.txt new file mode 100644 index 0000000..c74a2e2 --- /dev/null +++ b/changelog/13604.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes breadcrumb bug for secrets navigation +``` \ No newline at end of file diff --git a/changelog/13606.txt b/changelog/13606.txt new file mode 100644 index 0000000..c41abba --- /dev/null +++ b/changelog/13606.txt @@ -0,0 +1,3 @@ +```release-note:improvement +storage/raft: When using retry_join stanzas, join against all of them in parallel. +``` \ No newline at end of file diff --git a/changelog/13615.txt b/changelog/13615.txt new file mode 100644 index 0000000..8e9a678 --- /dev/null +++ b/changelog/13615.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli: Fix using kv patch with older server versions that don't support HTTP PATCH. +``` \ No newline at end of file diff --git a/changelog/13643.txt b/changelog/13643.txt new file mode 100644 index 0000000..fd3c278 --- /dev/null +++ b/changelog/13643.txt @@ -0,0 +1,3 @@ +```release-note:bug +sdk: Fixes OpenAPI to distinguish between paths that can do only List, or both List and Read. +``` \ No newline at end of file diff --git a/changelog/13660.txt b/changelog/13660.txt new file mode 100644 index 0000000..1920afe --- /dev/null +++ b/changelog/13660.txt @@ -0,0 +1,4 @@ +```release-note:bug +core: `-output-curl-string` now properly sets cURL options for client and CA +certificates. +``` diff --git a/changelog/13661.txt b/changelog/13661.txt new file mode 100644 index 0000000..99ea592 --- /dev/null +++ b/changelog/13661.txt @@ -0,0 +1,4 @@ +```release-note:improvement +auth/token: The `auth/token/revoke-accessor` endpoint is now idempotent and will +not error out if the token has already been revoked. +``` diff --git a/changelog/13667.txt b/changelog/13667.txt new file mode 100644 index 0000000..5c0c2b8 --- /dev/null +++ b/changelog/13667.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Fixes code scanning alerts +``` \ No newline at end of file diff --git a/changelog/13669.txt b/changelog/13669.txt new file mode 100644 index 0000000..01d4fe4 --- /dev/null +++ b/changelog/13669.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/ldap: Add username to alias metadata +``` \ No newline at end of file diff --git a/changelog/13675.txt b/changelog/13675.txt new file mode 100644 index 0000000..3441db3 --- /dev/null +++ b/changelog/13675.txt @@ -0,0 +1,4 @@ +```release-note:feature +agent: The Vault agent now returns telemetry information at the `/agent/v1/metrics` +path. +``` diff --git a/changelog/13678.txt b/changelog/13678.txt new file mode 100644 index 0000000..f8cbbf3 --- /dev/null +++ b/changelog/13678.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: add support for go-sockaddr templates in the top-level cluster_addr field +``` \ No newline at end of file diff --git a/changelog/13682.txt b/changelog/13682.txt new file mode 100644 index 0000000..3eb908d --- /dev/null +++ b/changelog/13682.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Add duration and start_time to completed requests log entries +``` \ No newline at end of file diff --git a/changelog/13683.txt b/changelog/13683.txt new file mode 100644 index 0000000..4b5fa51 --- /dev/null +++ b/changelog/13683.txt @@ -0,0 +1,4 @@ +```release-note:improvement +ui: The integrated web terminal now accepts both `-f` and `--force` as aliases +for `-force` for the `write` commmand. +``` diff --git a/changelog/13690.txt b/changelog/13690.txt new file mode 100644 index 0000000..03bbc11 --- /dev/null +++ b/changelog/13690.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/transit: Ensure that Vault does not panic for invalid nonce size when we aren't in convergent encryption mode. +``` \ No newline at end of file diff --git a/changelog/13691.txt b/changelog/13691.txt new file mode 100644 index 0000000..b3c0cb9 --- /dev/null +++ b/changelog/13691.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Transit Time-Based Key Autorotation**: Add support for automatic, time-based key rotation to transit secrets engine. +``` diff --git a/changelog/13703.txt b/changelog/13703.txt new file mode 100644 index 0000000..c713630 --- /dev/null +++ b/changelog/13703.txt @@ -0,0 +1,3 @@ +```release-note:bug +storage/raft: Fix issues allowing invalid nodes to become leadership candidates. +``` \ No newline at end of file diff --git a/changelog/13716.txt b/changelog/13716.txt new file mode 100644 index 0000000..e7d35a0 --- /dev/null +++ b/changelog/13716.txt @@ -0,0 +1,3 @@ +```release-note:bug +identity/oidc: Check for a nil signing key on rotation to prevent panics. +``` diff --git a/changelog/13736.txt b/changelog/13736.txt new file mode 100644 index 0000000..f90f363 --- /dev/null +++ b/changelog/13736.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core/pki: Support Y10K value in notAfter field when signing non-CA certificates +``` diff --git a/changelog/13749.txt b/changelog/13749.txt new file mode 100644 index 0000000..f760799 --- /dev/null +++ b/changelog/13749.txt @@ -0,0 +1,3 @@ +```release-note:bug +storage/raft: Units for bolt metrics now given in milliseconds instead of nanoseconds +``` diff --git a/changelog/13759.txt b/changelog/13759.txt new file mode 100644 index 0000000..8c66ff5 --- /dev/null +++ b/changelog/13759.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Fix regression causing performance secondaries to forward certificate generation to the primary. +``` \ No newline at end of file diff --git a/changelog/13766.txt b/changelog/13766.txt new file mode 100644 index 0000000..14913c1 --- /dev/null +++ b/changelog/13766.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Add support to list version history via API at `sys/version-history` and via CLI with `vault version-history` +``` diff --git a/changelog/13799.txt b/changelog/13799.txt new file mode 100644 index 0000000..27e15d8 --- /dev/null +++ b/changelog/13799.txt @@ -0,0 +1,3 @@ +```release-note:security +database/mssql: Removed string interpolation on internal queries and replaced them with inline queries using named parameters. +``` \ No newline at end of file diff --git a/changelog/13841.txt b/changelog/13841.txt new file mode 100644 index 0000000..964eb91 --- /dev/null +++ b/changelog/13841.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: Implements Login method in Go client libraries for LDAP auth methods +``` \ No newline at end of file diff --git a/changelog/13850.txt b/changelog/13850.txt new file mode 100644 index 0000000..6e600d3 --- /dev/null +++ b/changelog/13850.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/consul: Add support for consul enterprise namespaces and admin partitions. +``` \ No newline at end of file diff --git a/changelog/13871.txt b/changelog/13871.txt new file mode 100644 index 0000000..c8c5103 --- /dev/null +++ b/changelog/13871.txt @@ -0,0 +1,3 @@ +```release-note:bug +identity/oidc: Adds support for port-agnostic validation of loopback IP redirect URIs. +``` diff --git a/changelog/13872.txt b/changelog/13872.txt new file mode 100644 index 0000000..688f6cc --- /dev/null +++ b/changelog/13872.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix kv engine access bug +``` \ No newline at end of file diff --git a/changelog/13889.txt b/changelog/13889.txt new file mode 100644 index 0000000..8d98d4a --- /dev/null +++ b/changelog/13889.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Add count and duration metrics to PKI issue and revoke calls. +``` diff --git a/changelog/13893.txt b/changelog/13893.txt new file mode 100644 index 0000000..d7ff943 --- /dev/null +++ b/changelog/13893.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/kv: add subkeys endpoint to retrieve a secret's stucture without its values +``` diff --git a/changelog/13894.txt b/changelog/13894.txt new file mode 100644 index 0000000..2ada1d5 --- /dev/null +++ b/changelog/13894.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Add support for ECDSA and Ed25519 certificate views +``` diff --git a/changelog/13908.txt b/changelog/13908.txt new file mode 100644 index 0000000..7cc942a --- /dev/null +++ b/changelog/13908.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Transform advanced templating with encode/decode format support +``` \ No newline at end of file diff --git a/changelog/13917.txt b/changelog/13917.txt new file mode 100644 index 0000000..aa166c3 --- /dev/null +++ b/changelog/13917.txt @@ -0,0 +1,3 @@ +```release-note:improvement +identity/oidc: Adds proof key for code exchange (PKCE) support to OIDC providers. +``` diff --git a/changelog/13925.txt b/changelog/13925.txt new file mode 100644 index 0000000..a7cce8d --- /dev/null +++ b/changelog/13925.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/kubernetes: Properly handle the migration of role storage entries containing an empty `alias_name_source` +``` diff --git a/changelog/13927.txt b/changelog/13927.txt new file mode 100644 index 0000000..ec0e99f --- /dev/null +++ b/changelog/13927.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Use application/pem-certificate-chain for PEM certificates, application/x-pem-file for PEM CRLs +``` diff --git a/changelog/13935.txt b/changelog/13935.txt new file mode 100644 index 0000000..4066f53 --- /dev/null +++ b/changelog/13935.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Return complete chain (in `ca_chain` field) on calls to `pki/cert/ca_chain` +``` diff --git a/changelog/13950.txt b/changelog/13950.txt new file mode 100644 index 0000000..02ed84d --- /dev/null +++ b/changelog/13950.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: trigger token renewal if inactive and half of TTL has passed +``` \ No newline at end of file diff --git a/changelog/13958.txt b/changelog/13958.txt new file mode 100644 index 0000000..3b70069 --- /dev/null +++ b/changelog/13958.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Allow other_sans in sign-intermediate and sign-verbatim +``` diff --git a/changelog/13970.txt b/changelog/13970.txt new file mode 100644 index 0000000..0c15ae0 --- /dev/null +++ b/changelog/13970.txt @@ -0,0 +1,3 @@ +```release-note:feature +ui: Add support for time-based key autorotation in transit secrets engine. +``` \ No newline at end of file diff --git a/changelog/13973.txt b/changelog/13973.txt new file mode 100644 index 0000000..625fd45 --- /dev/null +++ b/changelog/13973.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/azure: Fixed bug where Azure environment did not change Graph URL +``` \ No newline at end of file diff --git a/changelog/13974.txt b/changelog/13974.txt new file mode 100644 index 0000000..9a35564 --- /dev/null +++ b/changelog/13974.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/gcp: Fixed bug where error was not reported for invalid bindings +``` \ No newline at end of file diff --git a/changelog/13991.txt b/changelog/13991.txt new file mode 100644 index 0000000..571689f --- /dev/null +++ b/changelog/13991.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/ssh: Allow specifying multiple approved key lengths for a single algorithm +``` diff --git a/changelog/14006.txt b/changelog/14006.txt new file mode 100644 index 0000000..30b5780 --- /dev/null +++ b/changelog/14006.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/ssh: Use secure default for algorithm signer (rsa-sha2-256) with RSA SSH CA keys on new roles +``` diff --git a/changelog/14008.txt b/changelog/14008.txt new file mode 100644 index 0000000..624ba6f --- /dev/null +++ b/changelog/14008.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/ssh: Add support for generating non-RSA SSH CAs +``` diff --git a/changelog/14013.txt b/changelog/14013.txt new file mode 100644 index 0000000..ad6c311 --- /dev/null +++ b/changelog/14013.txt @@ -0,0 +1,3 @@ +```release-note:bug +identity/oidc: Fixes inherited group membership when evaluating client assignments +``` diff --git a/changelog/14014.txt b/changelog/14014.txt new file mode 100644 index 0000000..14bdef1 --- /dev/null +++ b/changelog/14014.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/consul: Add support for consul roles. +``` \ No newline at end of file diff --git a/changelog/14025.txt b/changelog/14025.txt new file mode 100644 index 0000000..1c01f0b --- /dev/null +++ b/changelog/14025.txt @@ -0,0 +1,3 @@ +```release-note:feature +* **Login MFA**: Single and two phase MFA is now available when authenticating to Vault. +``` diff --git a/changelog/14033.txt b/changelog/14033.txt new file mode 100644 index 0000000..43906a4 --- /dev/null +++ b/changelog/14033.txt @@ -0,0 +1,7 @@ +```release-note:feature +**Database plugin multiplexing**: manage multiple database connections with a single plugin process +``` + +```release-note:change +plugin/database: The return value from `POST /database/config/:name` has been updated to "204 No Content" +``` diff --git a/changelog/14049.txt b/changelog/14049.txt new file mode 100644 index 0000000..93af683 --- /dev/null +++ b/changelog/14049.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Adds multi-factor authentication support +``` \ No newline at end of file diff --git a/changelog/14051.txt b/changelog/14051.txt new file mode 100644 index 0000000..00068e7 --- /dev/null +++ b/changelog/14051.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/aws: Enable region detection in the CLI by specifying the region as `auto` +``` diff --git a/changelog/14054.txt b/changelog/14054.txt new file mode 100644 index 0000000..a75d6fa --- /dev/null +++ b/changelog/14054.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Swap browser localStorage in favor of sessionStorage +``` diff --git a/changelog/14067.txt b/changelog/14067.txt new file mode 100644 index 0000000..bd24019 --- /dev/null +++ b/changelog/14067.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: Define constants for X-Vault-Forward and X-Vault-Inconsistent headers +``` diff --git a/changelog/14074.txt b/changelog/14074.txt new file mode 100644 index 0000000..2e553c8 --- /dev/null +++ b/changelog/14074.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/transit: Return an error if any required parameter is missing. +``` diff --git a/changelog/14095.txt b/changelog/14095.txt new file mode 100644 index 0000000..f534d66 --- /dev/null +++ b/changelog/14095.txt @@ -0,0 +1,4 @@ +```release-note:improvement +auth/ldap: Add a response warning and server log whenever the config is accessed +if `userfilter` doesn't consider `userattr` +``` diff --git a/changelog/14107.txt b/changelog/14107.txt new file mode 100644 index 0000000..f17138c --- /dev/null +++ b/changelog/14107.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/approle: Fix wrapping of nil errors in `login` endpoint +``` diff --git a/changelog/14109.txt b/changelog/14109.txt new file mode 100644 index 0000000..0e473d8 --- /dev/null +++ b/changelog/14109.txt @@ -0,0 +1,4 @@ +```release-note:feature +**Server Side Consistent Tokens**: Service tokens have been updated to be longer (a minimum of 95 bytes) and token prefixes for all token types are updated from s., b., and r. to hvs., hvb., and hvr. for service, batch, and recovery tokens respectively. Vault clusters with integrated storage will now have read-after-write +consistency by default. [[GH-14109](https://github.com/hashicorp/vault/pull/14109)] +``` \ No newline at end of file diff --git a/changelog/14119.txt b/changelog/14119.txt new file mode 100644 index 0000000..3056bc2 --- /dev/null +++ b/changelog/14119.txt @@ -0,0 +1,9 @@ +```release-note:improvement +identity/oidc: Adds a default OIDC provider +``` +```release-note:improvement +identity/oidc: Adds a default key for OIDC clients +``` +```release-note:improvement +identity/oidc: Adds an `allow_all` assignment that permits all entities to authenticate via an OIDC client +``` diff --git a/changelog/14130.txt b/changelog/14130.txt new file mode 100644 index 0000000..25978b5 --- /dev/null +++ b/changelog/14130.txt @@ -0,0 +1,8 @@ +```release-note:change +secrets/azure: Changes the configuration parameter `use_microsoft_graph_api` to use the Microsoft +Graph API by default. +``` +```release-note:bug +secrets/azure: Fixes the [rotate root](https://www.vaultproject.io/api-docs/secret/azure#rotate-root) +operation for upgraded configurations with a `root_password_ttl` of zero. +``` diff --git a/changelog/14131.txt b/changelog/14131.txt new file mode 100644 index 0000000..e2d2d87 --- /dev/null +++ b/changelog/14131.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cli: interactive CLI for login mfa +``` diff --git a/changelog/14138.txt b/changelog/14138.txt new file mode 100644 index 0000000..ccc11dd --- /dev/null +++ b/changelog/14138.txt @@ -0,0 +1,3 @@ +```release-note:bug +Fixed bug where auth method only considers system-identity when multiple identities are available. [#50](https://github.com/hashicorp/vault-plugin-auth-azure/pull/50) +``` diff --git a/changelog/14144.txt b/changelog/14144.txt new file mode 100644 index 0000000..2c80941 --- /dev/null +++ b/changelog/14144.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/kubernetes: ensure valid entity alias names created for projected volume tokens +``` diff --git a/changelog/14171.txt b/changelog/14171.txt new file mode 100644 index 0000000..eb7dde3 --- /dev/null +++ b/changelog/14171.txt @@ -0,0 +1,3 @@ +```release-note:bug + secrets/openldap: Fix panic from nil logger in backend +``` diff --git a/changelog/14178.txt b/changelog/14178.txt new file mode 100644 index 0000000..ef6c355 --- /dev/null +++ b/changelog/14178.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Add not_before_duration to root CA generation, intermediate CA signing paths. +``` diff --git a/changelog/14190.txt b/changelog/14190.txt new file mode 100644 index 0000000..7f982cf --- /dev/null +++ b/changelog/14190.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Update github.com/prometheus/client_golang to fix security vulnerability CVE-2022-21698. +``` diff --git a/changelog/14193.txt b/changelog/14193.txt new file mode 100644 index 0000000..4dfc83f --- /dev/null +++ b/changelog/14193.txt @@ -0,0 +1,3 @@ +```release-note:change +storage/etcd: Remove support for v2. +``` diff --git a/changelog/14195.txt b/changelog/14195.txt new file mode 100644 index 0000000..7dc18c5 --- /dev/null +++ b/changelog/14195.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Add error handling for error types other than UserError or InternalError +``` diff --git a/changelog/14197.txt b/changelog/14197.txt new file mode 100644 index 0000000..dec9d3d --- /dev/null +++ b/changelog/14197.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Small changes to ensure goroutines terminate in tests +``` diff --git a/changelog/14206.txt b/changelog/14206.txt new file mode 100644 index 0000000..00c7cc1 --- /dev/null +++ b/changelog/14206.txt @@ -0,0 +1,4 @@ +```release-note:change +core: Changes the unit of `default_lease_ttl` and `max_lease_ttl` values returned by +the `/sys/config/state/sanitized` endpoint from nanoseconds to seconds. +``` diff --git a/changelog/14214.txt b/changelog/14214.txt new file mode 100644 index 0000000..773b894 --- /dev/null +++ b/changelog/14214.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: Adds ability to configure specific user-assigned managed identities for Azure auto-auth. +``` diff --git a/changelog/14217.txt b/changelog/14217.txt new file mode 100644 index 0000000..de42fca --- /dev/null +++ b/changelog/14217.txt @@ -0,0 +1,3 @@ +```release-note:improvement +sdk: Change OpenAPI code generator to extract request objects into /components/schemas and reference them by name. +``` diff --git a/changelog/14222.txt b/changelog/14222.txt new file mode 100644 index 0000000..3d9cfb1 --- /dev/null +++ b/changelog/14222.txt @@ -0,0 +1,3 @@ +```release-note: bug +core/api: Fix overwriting of request headers when using JSONMergePatch. +``` diff --git a/changelog/14223.txt b/changelog/14223.txt new file mode 100644 index 0000000..6bb4d1d --- /dev/null +++ b/changelog/14223.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: The `agent/v1/quit` endpoint can now be used to stop the Vault Agent remotely +``` diff --git a/changelog/14224.txt b/changelog/14224.txt new file mode 100644 index 0000000..f7e566b --- /dev/null +++ b/changelog/14224.txt @@ -0,0 +1,3 @@ +```release-note:bug + ui: Fix default TTL display and set on database role +``` diff --git a/changelog/14229.txt b/changelog/14229.txt new file mode 100644 index 0000000..5fa7a2d --- /dev/null +++ b/changelog/14229.txt @@ -0,0 +1,4 @@ +```release-note:change +core: Vault version has been moved out of sdk and into main vault module. +Plugins using sdk/useragent.String must instead use sdk/useragent.PluginString. +``` diff --git a/changelog/14231.txt b/changelog/14231.txt new file mode 100644 index 0000000..d0c261f --- /dev/null +++ b/changelog/14231.txt @@ -0,0 +1,3 @@ +```release-note:bug + physical/mysql: Create table with wider `vault_key` column when initializing database tables. +``` diff --git a/changelog/14232.txt b/changelog/14232.txt new file mode 100644 index 0000000..e9497b5 --- /dev/null +++ b/changelog/14232.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Bump Go version to 1.17.7. +``` diff --git a/changelog/14233.txt b/changelog/14233.txt new file mode 100644 index 0000000..9cee8ff --- /dev/null +++ b/changelog/14233.txt @@ -0,0 +1,3 @@ +```release-note:bug + ui: Fix incorrect validity message on transit secrets engine +``` diff --git a/changelog/14235.txt b/changelog/14235.txt new file mode 100644 index 0000000..75fa53d --- /dev/null +++ b/changelog/14235.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Fix issuance of wildcard certificates matching glob patterns +``` diff --git a/changelog/14238.txt b/changelog/14238.txt new file mode 100644 index 0000000..8c794eb --- /dev/null +++ b/changelog/14238.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Restrict issuance of wildcard certificates via role parameter (`allow_wildcard_certificates`) +``` diff --git a/changelog/14268.txt b/changelog/14268.txt new file mode 100644 index 0000000..85de0a8 --- /dev/null +++ b/changelog/14268.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Allow static role credential rotation in Database secrets engines +``` diff --git a/changelog/14269.txt b/changelog/14269.txt new file mode 100644 index 0000000..529b7c6 --- /dev/null +++ b/changelog/14269.txt @@ -0,0 +1,3 @@ +```release-note:bug + api/sys/raft: Update RaftSnapshotRestore to use net/http client allowing bodies larger than allocated memory to be streamed +``` diff --git a/changelog/14292.txt b/changelog/14292.txt new file mode 100644 index 0000000..98d48f9 --- /dev/null +++ b/changelog/14292.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Warn when `generate_lease` and `no_store` are both set to `true` on requests. +``` diff --git a/changelog/14301.txt b/changelog/14301.txt new file mode 100644 index 0000000..9f5e4ba --- /dev/null +++ b/changelog/14301.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/kv: add full secret path output to table-formatted responses +``` diff --git a/changelog/14324.txt b/changelog/14324.txt new file mode 100644 index 0000000..2932b23 --- /dev/null +++ b/changelog/14324.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/ldap: Add username_as_alias configurable to change how aliases are named +``` diff --git a/changelog/14328.txt b/changelog/14328.txt new file mode 100644 index 0000000..c5e45f0 --- /dev/null +++ b/changelog/14328.txt @@ -0,0 +1,3 @@ +```release-note:change +core: A request that fails path validation due to relative path check will now be responded to with a 400 rather than 500. +``` diff --git a/changelog/14329.txt b/changelog/14329.txt new file mode 100644 index 0000000..87917be --- /dev/null +++ b/changelog/14329.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes issue logging out with wrapped token query parameter +``` \ No newline at end of file diff --git a/changelog/14385.txt b/changelog/14385.txt new file mode 100644 index 0000000..101fa11 --- /dev/null +++ b/changelog/14385.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Systemd unit file included with the Linux packages now sets the service type to notify. +``` \ No newline at end of file diff --git a/changelog/14388.txt b/changelog/14388.txt new file mode 100644 index 0000000..5db7af5 --- /dev/null +++ b/changelog/14388.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: Add context-aware functions to vault/api for each API wrapper function. +``` diff --git a/changelog/14389.txt b/changelog/14389.txt new file mode 100644 index 0000000..56ee8c4 --- /dev/null +++ b/changelog/14389.txt @@ -0,0 +1,3 @@ +```release-note:improvement +website/docs: added a link to an Enigma secret plugin. +``` diff --git a/changelog/14399.txt b/changelog/14399.txt new file mode 100644 index 0000000..5d5c6b1 --- /dev/null +++ b/changelog/14399.txt @@ -0,0 +1,3 @@ +```release-note:bug +debug: Fix panic when capturing debug bundle on Windows +``` \ No newline at end of file diff --git a/changelog/14400.txt b/changelog/14400.txt new file mode 100644 index 0000000..0efaa3d --- /dev/null +++ b/changelog/14400.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes displaying empty masked values in PKI engine +``` \ No newline at end of file diff --git a/changelog/14422.txt b/changelog/14422.txt new file mode 100644 index 0000000..edfc380 --- /dev/null +++ b/changelog/14422.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Redirects to managed namespace if incorrect namespace in URL param +``` \ No newline at end of file diff --git a/changelog/14424.txt b/changelog/14424.txt new file mode 100644 index 0000000..cc54058 --- /dev/null +++ b/changelog/14424.txt @@ -0,0 +1,3 @@ +```release-note:bug +agent: Fix log level mismatch between ERR and ERROR +``` diff --git a/changelog/14426.txt b/changelog/14426.txt new file mode 100644 index 0000000..81ecc0d --- /dev/null +++ b/changelog/14426.txt @@ -0,0 +1,3 @@ +```release-note:bug +identity/oidc: Fixes potential write to readonly storage on performance secondary clusters during key rotation +``` diff --git a/changelog/14455.txt b/changelog/14455.txt new file mode 100644 index 0000000..e8e7f74 --- /dev/null +++ b/changelog/14455.txt @@ -0,0 +1,3 @@ +```release-note:improvement +storage/gcs: Add documentation explaining how to configure the gcs backend using environment variables instead of options in the configuration stanza +``` diff --git a/changelog/14474.txt b/changelog/14474.txt new file mode 100644 index 0000000..1469c03 --- /dev/null +++ b/changelog/14474.txt @@ -0,0 +1,4 @@ +```release-note:improvement +auth/approle: SecretIDs can now be generated with an per-request specified TTL and num_uses. +When either the ttl and num_uses fields are not specified, the role's configuration is used. +``` \ No newline at end of file diff --git a/changelog/14487.txt b/changelog/14487.txt new file mode 100644 index 0000000..b7e9969 --- /dev/null +++ b/changelog/14487.txt @@ -0,0 +1,3 @@ +```release-note:bug +sdk/cidrutil: Only check if cidr contains remote address for IP addresses +``` \ No newline at end of file diff --git a/changelog/14489.txt b/changelog/14489.txt new file mode 100644 index 0000000..014d0a5 --- /dev/null +++ b/changelog/14489.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes caching issue on kv new version create +``` diff --git a/changelog/14493.txt b/changelog/14493.txt new file mode 100644 index 0000000..cb98314 --- /dev/null +++ b/changelog/14493.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes horizontal bar chart hover issue when filtering namespaces and mounts +``` \ No newline at end of file diff --git a/changelog/14501.txt b/changelog/14501.txt new file mode 100644 index 0000000..5ed687e --- /dev/null +++ b/changelog/14501.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fix panic caused by parsing policies with empty slice values. +``` diff --git a/changelog/14508.txt b/changelog/14508.txt new file mode 100644 index 0000000..dde6b55 --- /dev/null +++ b/changelog/14508.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Parse schema refs from OpenAPI +``` \ No newline at end of file diff --git a/changelog/14522.txt b/changelog/14522.txt new file mode 100644 index 0000000..420a983 --- /dev/null +++ b/changelog/14522.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited strings +``` diff --git a/changelog/14523.txt b/changelog/14523.txt new file mode 100644 index 0000000..f314731 --- /dev/null +++ b/changelog/14523.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli: Fix panic caused by parsing key=value fields whose value is a single backslash +``` diff --git a/changelog/14543.txt b/changelog/14543.txt new file mode 100644 index 0000000..649d1e7 --- /dev/null +++ b/changelog/14543.txt @@ -0,0 +1,3 @@ +```release-note:bug +identity/token: Fixes a bug where duplicate public keys could appear in the .well-known JWKS +``` diff --git a/changelog/14545.txt b/changelog/14545.txt new file mode 100644 index 0000000..6b34221 --- /dev/null +++ b/changelog/14545.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes issue with correct auth method not selected when logging out from OIDC or JWT methods +``` \ No newline at end of file diff --git a/changelog/14551.txt b/changelog/14551.txt new file mode 100644 index 0000000..47ef9ad --- /dev/null +++ b/changelog/14551.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix issue where UI incorrectly handled API errors when mounting backends +``` \ No newline at end of file diff --git a/changelog/14622.txt b/changelog/14622.txt new file mode 100644 index 0000000..289756a --- /dev/null +++ b/changelog/14622.txt @@ -0,0 +1,3 @@ +```release-note:bug +replication (enterprise): fix panic due to missing entity during invalidation of local aliases. +``` \ No newline at end of file diff --git a/changelog/14659.txt b/changelog/14659.txt new file mode 100644 index 0000000..0468cbc --- /dev/null +++ b/changelog/14659.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Replaces the IvyCodemirror wrapper with a custom ember modifier. +``` \ No newline at end of file diff --git a/changelog/14670.txt b/changelog/14670.txt new file mode 100644 index 0000000..c054f55 --- /dev/null +++ b/changelog/14670.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cli/vault: warn when policy name contains upper-case letter +``` \ No newline at end of file diff --git a/changelog/14704.txt b/changelog/14704.txt new file mode 100644 index 0000000..e5663e1 --- /dev/null +++ b/changelog/14704.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fix panic for help request URL paths without /v1/ prefix +``` \ No newline at end of file diff --git a/changelog/14744.txt b/changelog/14744.txt new file mode 100644 index 0000000..e5226f9 --- /dev/null +++ b/changelog/14744.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/database: Ensure that a `connection_url` password is redacted in all cases. +``` diff --git a/changelog/14746.txt b/changelog/14746.txt new file mode 100644 index 0000000..4c22a7c --- /dev/null +++ b/changelog/14746.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/approle: Add maximum length for input values that result in SHA56 HMAC calculation +``` diff --git a/changelog/14751.txt b/changelog/14751.txt new file mode 100644 index 0000000..17cbfa0 --- /dev/null +++ b/changelog/14751.txt @@ -0,0 +1,4 @@ + +```release-note:improvement +auth/cert: Add metadata to identity-alias +``` diff --git a/changelog/14752.txt b/changelog/14752.txt new file mode 100644 index 0000000..3cc5fd0 --- /dev/null +++ b/changelog/14752.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: report unused or redundant keys in server configuration +``` diff --git a/changelog/14753.txt b/changelog/14753.txt new file mode 100644 index 0000000..af7fbb5 --- /dev/null +++ b/changelog/14753.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: Add ability to pass certificate as PEM bytes to api.Client. +``` diff --git a/changelog/14755.txt b/changelog/14755.txt new file mode 100644 index 0000000..1c94f44 --- /dev/null +++ b/changelog/14755.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/metrics: Fix incorrect table size metric for local mounts +``` diff --git a/changelog/14763.txt b/changelog/14763.txt new file mode 100644 index 0000000..8eb8ff0 --- /dev/null +++ b/changelog/14763.txt @@ -0,0 +1,3 @@ +```release-note:change +ui: Upgrade Ember to version 3.28 +``` \ No newline at end of file diff --git a/changelog/14775.txt b/changelog/14775.txt new file mode 100644 index 0000000..03beb82 --- /dev/null +++ b/changelog/14775.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: Use the context passed to the api/auth Login helpers. +``` diff --git a/changelog/14791.txt b/changelog/14791.txt new file mode 100644 index 0000000..b9e4315 --- /dev/null +++ b/changelog/14791.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: fixing excessive unix file permissions +``` diff --git a/changelog/14794.txt b/changelog/14794.txt new file mode 100644 index 0000000..fb45492 --- /dev/null +++ b/changelog/14794.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix KV secret showing in the edit form after a user creates a new version but doesn't have read capabilities +``` \ No newline at end of file diff --git a/changelog/14807.txt b/changelog/14807.txt new file mode 100644 index 0000000..3c338a2 --- /dev/null +++ b/changelog/14807.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cli: Alternative flag-based syntax for KV to mitigate confusion from automatically appended /data +``` \ No newline at end of file diff --git a/changelog/14814.txt b/changelog/14814.txt new file mode 100644 index 0000000..0583fb7 --- /dev/null +++ b/changelog/14814.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: time.After() used in a select statement can lead to memory leak +``` diff --git a/changelog/14817.txt b/changelog/14817.txt new file mode 100644 index 0000000..9b8e39a --- /dev/null +++ b/changelog/14817.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core : check uid and permissions of config dir, config file, plugin dir and plugin binaries +``` \ No newline at end of file diff --git a/changelog/14836.txt b/changelog/14836.txt new file mode 100644 index 0000000..6ee8d54 --- /dev/null +++ b/changelog/14836.txt @@ -0,0 +1,3 @@ +```release-note:bug +api: Respect increment value in grace period calculations in LifetimeWatcher +``` diff --git a/changelog/14846.txt b/changelog/14846.txt new file mode 100644 index 0000000..10621ff --- /dev/null +++ b/changelog/14846.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: fixing excessive unix file permissions on dir, files and archive created by vault debug command +``` \ No newline at end of file diff --git a/changelog/14864.txt b/changelog/14864.txt new file mode 100644 index 0000000..34af10f --- /dev/null +++ b/changelog/14864.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth: enforce a rate limit for TOTP passcode validation attempts +``` diff --git a/changelog/14869.txt b/changelog/14869.txt new file mode 100644 index 0000000..1ee6122 --- /dev/null +++ b/changelog/14869.txt @@ -0,0 +1,4 @@ +```release-note:change +auth: Remove support for legacy MFA +(https://www.vaultproject.io/docs/v1.10.x/auth/mfa) +``` diff --git a/changelog/14875.txt b/changelog/14875.txt new file mode 100644 index 0000000..ef4622d --- /dev/null +++ b/changelog/14875.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Fix handling of "any" key type with default zero signature bits value. +``` diff --git a/changelog/14899.txt b/changelog/14899.txt new file mode 100644 index 0000000..550005e --- /dev/null +++ b/changelog/14899.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Print minimum required policy for any command**: The global CLI flag `-output-policy` can now be used with any command to print out the minimum required policy HCL for that operation, including whether the given path requires the "sudo" capability. +``` \ No newline at end of file diff --git a/changelog/14900.txt b/changelog/14900.txt new file mode 100644 index 0000000..6d995fa --- /dev/null +++ b/changelog/14900.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: Added MFALogin() for handling MFA flow when using login helpers. +``` \ No newline at end of file diff --git a/changelog/14916.txt b/changelog/14916.txt new file mode 100644 index 0000000..d61065a --- /dev/null +++ b/changelog/14916.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes issue logging in with OIDC from a listed auth mounts tab +``` \ No newline at end of file diff --git a/changelog/14941.txt b/changelog/14941.txt new file mode 100644 index 0000000..f82b633 --- /dev/null +++ b/changelog/14941.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix issue with KV not recomputing model when you changed versions. +``` diff --git a/changelog/14943.txt b/changelog/14943.txt new file mode 100644 index 0000000..1501c56 --- /dev/null +++ b/changelog/14943.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Fixed bug where larger SHA-2 hashes were truncated with shorter ECDSA CA certificates +``` diff --git a/changelog/14945.txt b/changelog/14945.txt new file mode 100644 index 0000000..50dea71 --- /dev/null +++ b/changelog/14945.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cli: Support the -format=raw option, to read non-JSON Vault endpoints and original response bodies. +``` diff --git a/changelog/14946.txt b/changelog/14946.txt new file mode 100644 index 0000000..43ee1e5 --- /dev/null +++ b/changelog/14946.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Secrets/auth plugin multiplexing**: manage multiple plugin configurations with a single plugin process +``` diff --git a/changelog/14954.txt b/changelog/14954.txt new file mode 100644 index 0000000..fc8be70 --- /dev/null +++ b/changelog/14954.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/aws: Add RoleSession to DisplayName when using assumeRole for authentication +``` \ No newline at end of file diff --git a/changelog/14957.txt b/changelog/14957.txt new file mode 100644 index 0000000..96f0c04 --- /dev/null +++ b/changelog/14957.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Include build date in `sys/seal-status` and `sys/version-history` endpoints. +``` diff --git a/changelog/14962.txt b/changelog/14962.txt new file mode 100644 index 0000000..b8bea45 --- /dev/null +++ b/changelog/14962.txt @@ -0,0 +1,6 @@ +```release-note:improvement +api: If the parameters supplied over the API payload are ignored due to not +being what the endpoints were expecting, or if the parameters supplied get +replaced by the values in the endpoint's path itself, warnings will be added to +the non-empty responses listing all the ignored and replaced parameters. +``` diff --git a/changelog/14963.txt b/changelog/14963.txt new file mode 100644 index 0000000..9bc3dc8 --- /dev/null +++ b/changelog/14963.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: Provide a helper method WithNamespace to create a cloned client with a new NS +``` \ No newline at end of file diff --git a/changelog/14966.txt b/changelog/14966.txt new file mode 100644 index 0000000..8892944 --- /dev/null +++ b/changelog/14966.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes edit auth method capabilities issue +``` \ No newline at end of file diff --git a/changelog/14968.txt b/changelog/14968.txt new file mode 100644 index 0000000..a9a5e76 --- /dev/null +++ b/changelog/14968.txt @@ -0,0 +1,3 @@ +```release-note:bug +api: Fixes bug where OutputCurlString field was unintentionally being copied over during client cloning +``` diff --git a/changelog/14973.txt b/changelog/14973.txt new file mode 100644 index 0000000..461aa38 --- /dev/null +++ b/changelog/14973.txt @@ -0,0 +1,3 @@ +```release-note:bug +cassandra: Update gocql Cassandra client to fix "no hosts available in the pool" error +``` diff --git a/changelog/14975.txt b/changelog/14975.txt new file mode 100644 index 0000000..34de9a7 --- /dev/null +++ b/changelog/14975.txt @@ -0,0 +1,5 @@ +```release-note:change +secrets/pki: existing Generate Root (pki/root/generate/:type), +Set Signed Intermediate (/pki/intermediate/set-signed) APIs will +add new issuers/keys to a mount instead of warning that an existing CA exists +``` \ No newline at end of file diff --git a/changelog/14977.txt b/changelog/14977.txt new file mode 100644 index 0000000..937c5d7 --- /dev/null +++ b/changelog/14977.txt @@ -0,0 +1,3 @@ +```release-note:bug +raft: Ensure initialMmapSize is set to 0 on Windows +``` diff --git a/changelog/14985.txt b/changelog/14985.txt new file mode 100644 index 0000000..3d8f428 --- /dev/null +++ b/changelog/14985.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/okta: Add support for Google provider TOTP type in the Okta auth method +``` diff --git a/changelog/15004.txt b/changelog/15004.txt new file mode 100644 index 0000000..dc1fc2d --- /dev/null +++ b/changelog/15004.txt @@ -0,0 +1,4 @@ +```release-note:change +secrets/pki: existing Delete Root API (pki/root) will now delete all issuers +and keys within the mount path. +``` \ No newline at end of file diff --git a/changelog/15009.txt b/changelog/15009.txt new file mode 100644 index 0000000..aa2fd74 --- /dev/null +++ b/changelog/15009.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth: forward requests subject to login MFA from perfStandby to Active node +``` diff --git a/changelog/15025.txt b/changelog/15025.txt new file mode 100644 index 0000000..fd69733 --- /dev/null +++ b/changelog/15025.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: masked values no longer give away length or location of special characters +``` diff --git a/changelog/15041.txt b/changelog/15041.txt new file mode 100644 index 0000000..609b23d --- /dev/null +++ b/changelog/15041.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: fixed systemd reloading notification +``` diff --git a/changelog/15042.txt b/changelog/15042.txt new file mode 100644 index 0000000..77c5a85 --- /dev/null +++ b/changelog/15042.txt @@ -0,0 +1,3 @@ +```release-note:improvement +storage/raft: Use larger timeouts at startup to reduce likelihood of inducing elections. +``` diff --git a/changelog/15046.txt b/changelog/15046.txt new file mode 100644 index 0000000..e52d9db --- /dev/null +++ b/changelog/15046.txt @@ -0,0 +1,4 @@ +```release-note:bug +ui: Fix Generated Token's Policies helpText to clarify that comma separated values are not accepted in this field. +``` + diff --git a/changelog/15054.txt b/changelog/15054.txt new file mode 100644 index 0000000..8d4a827 --- /dev/null +++ b/changelog/15054.txt @@ -0,0 +1,3 @@ +```release-note:feature +storage/dynamodb: Added `AWS_DYNAMODB_REGION` environment variable. +``` diff --git a/changelog/15055.txt b/changelog/15055.txt new file mode 100644 index 0000000..648aa29 --- /dev/null +++ b/changelog/15055.txt @@ -0,0 +1,3 @@ +```release-note:bug +identity: deduplicate policies when creating/updating identity groups +``` \ No newline at end of file diff --git a/changelog/15058.txt b/changelog/15058.txt new file mode 100644 index 0000000..6e4cad8 --- /dev/null +++ b/changelog/15058.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fix search-select component showing blank selections when editing group member entity +``` diff --git a/changelog/15067.txt b/changelog/15067.txt new file mode 100644 index 0000000..4adee8f --- /dev/null +++ b/changelog/15067.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: pre-calculate namespace specific paths when tainting a route during postUnseal +``` diff --git a/changelog/15072.txt b/changelog/15072.txt new file mode 100644 index 0000000..c96b194 --- /dev/null +++ b/changelog/15072.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited integers +``` diff --git a/changelog/15074.txt b/changelog/15074.txt new file mode 100644 index 0000000..fed33ea --- /dev/null +++ b/changelog/15074.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Remove storybook. +``` diff --git a/changelog/15092.txt b/changelog/15092.txt new file mode 100644 index 0000000..968d6ba --- /dev/null +++ b/changelog/15092.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: Upgrade hashicorp/consul-template version for sprig template functions and improved writeTo function +``` diff --git a/changelog/15100.txt b/changelog/15100.txt new file mode 100644 index 0000000..0b4a716 --- /dev/null +++ b/changelog/15100.txt @@ -0,0 +1,4 @@ +```release-note:change +secrets/pki: Existing CRL API (/pki/crl) now returns an X.509 v2 CRL instead +of a v1 CRL. +``` \ No newline at end of file diff --git a/changelog/15104.txt b/changelog/15104.txt new file mode 100644 index 0000000..39ea1f5 --- /dev/null +++ b/changelog/15104.txt @@ -0,0 +1,3 @@ +```release-note:bug +sdk: Fix OpenApi spec generator to properly convert TypeInt64 to OAS supported int64 +``` diff --git a/changelog/15123.txt b/changelog/15123.txt new file mode 100644 index 0000000..51223e4 --- /dev/null +++ b/changelog/15123.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Fix some identity data races found by Go race detector (no known impact yet). +``` diff --git a/changelog/15125.txt b/changelog/15125.txt new file mode 100644 index 0000000..4d6e508 --- /dev/null +++ b/changelog/15125.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Upgrade github.org/x/crypto/ssh +``` diff --git a/changelog/15152.txt b/changelog/15152.txt new file mode 100644 index 0000000..e2e3c03 --- /dev/null +++ b/changelog/15152.txt @@ -0,0 +1,4 @@ +```release-note:improvement +secrets/pki: Allow operators to control the issuing certificate behavior when +the requested TTL is beyond the NotAfter value of the signing certificate +``` \ No newline at end of file diff --git a/changelog/15155.txt b/changelog/15155.txt new file mode 100644 index 0000000..c65e1c0 --- /dev/null +++ b/changelog/15155.txt @@ -0,0 +1,5 @@ +```release-note:change +secrets/pki: The `ca_chain` response field within issuing (/pki/issue/:role) +and signing APIs will now include the root CA certificate if the mount is +aware of it. +``` \ No newline at end of file diff --git a/changelog/15156.txt b/changelog/15156.txt new file mode 100644 index 0000000..8718f1b --- /dev/null +++ b/changelog/15156.txt @@ -0,0 +1,3 @@ +```release-note:bug +rafft: fix Raft TLS key rotation panic that occurs if active key is more than 24 hours old +``` diff --git a/changelog/15163.txt b/changelog/15163.txt new file mode 100644 index 0000000..337a279 --- /dev/null +++ b/changelog/15163.txt @@ -0,0 +1,3 @@ +```release-note:bug +sdk: Fix OpenApi spec generator to remove duplicate sha_256 parameter +``` \ No newline at end of file diff --git a/changelog/15166.txt b/changelog/15166.txt new file mode 100644 index 0000000..bf73aef --- /dev/null +++ b/changelog/15166.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Add new DB methods that do not prepare statements. +``` diff --git a/changelog/15167.txt b/changelog/15167.txt new file mode 100644 index 0000000..f4c0d86 --- /dev/null +++ b/changelog/15167.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixed client count timezone for start and end months +``` \ No newline at end of file diff --git a/changelog/15179.txt b/changelog/15179.txt new file mode 100644 index 0000000..00944f6 --- /dev/null +++ b/changelog/15179.txt @@ -0,0 +1,4 @@ +```release-note:bug +secrets/pki: CRLs on performance secondary clusters are now automatically +rebuilt upon changes to the list of issuers. +``` \ No newline at end of file diff --git a/changelog/15188.txt b/changelog/15188.txt new file mode 100644 index 0000000..4435ccb --- /dev/null +++ b/changelog/15188.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli: kv get command now honors trailing spaces to retrieve secrets +``` diff --git a/changelog/15204.txt b/changelog/15204.txt new file mode 100644 index 0000000..f4c5b8c --- /dev/null +++ b/changelog/15204.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent/auto-auth: Add `min_backoff` to the method stanza for configuring initial backoff duration. +``` diff --git a/changelog/15211.txt b/changelog/15211.txt new file mode 100644 index 0000000..da7faa9 --- /dev/null +++ b/changelog/15211.txt @@ -0,0 +1,6 @@ +```release-note:change +secrets/pki: A new aliased api path (/pki/issuer/:issuer_ref/sign-self-issued) +providing the same functionality as the existing API(/pki/root/sign-self-issued) +does not require sudo capabilities but the latter still requires it in an +effort to maintain backwards compatibility. +``` \ No newline at end of file diff --git a/changelog/15213.txt b/changelog/15213.txt new file mode 100644 index 0000000..b5774b2 --- /dev/null +++ b/changelog/15213.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core,transit: Allow callers to choose random byte source including entropy augmentation sources for the sys/tools/random and transit/random endpoints. +``` \ No newline at end of file diff --git a/changelog/15224.txt b/changelog/15224.txt new file mode 100644 index 0000000..c25ccf6 --- /dev/null +++ b/changelog/15224.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/config: Only ask the system about network interfaces when address configs contain a template having the format: {{ ... }} +``` \ No newline at end of file diff --git a/changelog/15248.txt b/changelog/15248.txt new file mode 100644 index 0000000..9726d07 --- /dev/null +++ b/changelog/15248.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth: Globally scoped Login MFA method Get/List endpoints +``` diff --git a/changelog/15250.txt b/changelog/15250.txt new file mode 100644 index 0000000..36d6d03 --- /dev/null +++ b/changelog/15250.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/ssh: Support for `add_before_duration` in SSH +``` \ No newline at end of file diff --git a/changelog/15259.txt b/changelog/15259.txt new file mode 100644 index 0000000..6ad0b2e --- /dev/null +++ b/changelog/15259.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core/activity: Order month data in ascending order of timestamps +``` \ No newline at end of file diff --git a/changelog/15261.txt b/changelog/15261.txt new file mode 100644 index 0000000..c04c80a --- /dev/null +++ b/changelog/15261.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth: load login MFA configuration upon restart +``` diff --git a/changelog/15277.txt b/changelog/15277.txt new file mode 100644 index 0000000..91a3754 --- /dev/null +++ b/changelog/15277.txt @@ -0,0 +1,6 @@ +```release-note:feature +**Non-Disruptive Intermediate/Root Certificate Rotation**: This allows +import, generation and configuration of any number of keys and/or issuers +within a PKI mount, providing operators the ability to rotate certificates +in place without affecting existing client configurations. +``` diff --git a/changelog/15293.txt b/changelog/15293.txt new file mode 100644 index 0000000..857b198 --- /dev/null +++ b/changelog/15293.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: Update consult-template to v0.29.0 +``` \ No newline at end of file diff --git a/changelog/15295.txt b/changelog/15295.txt new file mode 100644 index 0000000..0fbf4b5 --- /dev/null +++ b/changelog/15295.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/consul: Add support for Consul node-identities and service-identities +``` \ No newline at end of file diff --git a/changelog/15305.txt b/changelog/15305.txt new file mode 100644 index 0000000..be3607c --- /dev/null +++ b/changelog/15305.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: KV helper methods to simplify the common use case of reading and writing KV secrets +``` \ No newline at end of file diff --git a/changelog/15316.txt b/changelog/15316.txt new file mode 100644 index 0000000..d871900 --- /dev/null +++ b/changelog/15316.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cli/debug: added support for retrieving metrics from DR clusters if `unauthenticated_metrics_access` is enabled +``` diff --git a/changelog/15342.txt b/changelog/15342.txt new file mode 100644 index 0000000..0b0d6c1 --- /dev/null +++ b/changelog/15342.txt @@ -0,0 +1,3 @@ +```release-note:improvement +audit: Include mount_accessor in audit request and response logs +``` \ No newline at end of file diff --git a/changelog/15343.txt b/changelog/15343.txt new file mode 100644 index 0000000..e293dd4 --- /dev/null +++ b/changelog/15343.txt @@ -0,0 +1,15 @@ +```release-note:change +physical/cockroachdb: Change underlying driver library from [lib/pq](https://github.com/lib/pq) to [pgx](https://github.com/jackc/pgx) +``` + +```release-note:change +physical/postgres: Change underlying driver library from [lib/pq](https://github.com/lib/pq) to [pgx](https://github.com/jackc/pgx) +``` + +```release-note:change +database/postgres: Change underlying driver library from [lib/pq](https://github.com/lib/pq) to [pgx](https://github.com/jackc/pgx) +``` + +```release-note:change +database/redshift: Change underlying driver library from [lib/pq](https://github.com/lib/pq) to [pgx](https://github.com/jackc/pgx) +``` diff --git a/changelog/15352.txt b/changelog/15352.txt new file mode 100644 index 0000000..c9ce4c0 --- /dev/null +++ b/changelog/15352.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core/activity: allow client counts to be precomputed and queried on non-contiguous chunks of data +``` \ No newline at end of file diff --git a/changelog/15355.txt b/changelog/15355.txt new file mode 100644 index 0000000..ef56c84 --- /dev/null +++ b/changelog/15355.txt @@ -0,0 +1,3 @@ +```release-note:improvement +mfa/okta: migrate to use official Okta SDK +``` diff --git a/changelog/15361.txt b/changelog/15361.txt new file mode 100644 index 0000000..1d4284d --- /dev/null +++ b/changelog/15361.txt @@ -0,0 +1,5 @@ +```release-note:improvement +auth/okta: Add support for performing [the number +challenge](https://help.okta.com/en-us/Content/Topics/Mobile/ov-admin-config.htm?cshid=csh-okta-verify-number-challenge-v1#enable-number-challenge) +during an Okta Verify push challenge +``` \ No newline at end of file diff --git a/changelog/15364.txt b/changelog/15364.txt new file mode 100644 index 0000000..e118076 --- /dev/null +++ b/changelog/15364.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fix firefox inability to recognize file format of client count csv export +``` \ No newline at end of file diff --git a/changelog/15369.txt b/changelog/15369.txt new file mode 100644 index 0000000..b27c004 --- /dev/null +++ b/changelog/15369.txt @@ -0,0 +1,3 @@ +```release-note:bug +mfa/okta: disable client side rate limiting causing delays in push notifications +``` \ No newline at end of file diff --git a/changelog/15376.txt b/changelog/15376.txt new file mode 100644 index 0000000..68ebd65 --- /dev/null +++ b/changelog/15376.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Snowflake Database Plugin**: Adds ability to manage RSA key pair credentials for dynamic and static Snowflake users. +``` diff --git a/changelog/15377.txt b/changelog/15377.txt new file mode 100644 index 0000000..c754710 --- /dev/null +++ b/changelog/15377.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: Support VAULT_PROXY_ADDR environment variable to allow overriding the Vault client's HTTP proxy. +``` \ No newline at end of file diff --git a/changelog/15378.txt b/changelog/15378.txt new file mode 100644 index 0000000..bb752e6 --- /dev/null +++ b/changelog/15378.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Allow namespace param to be parsed from state queryParam +``` \ No newline at end of file diff --git a/changelog/15380.txt b/changelog/15380.txt new file mode 100644 index 0000000..d895f8a --- /dev/null +++ b/changelog/15380.txt @@ -0,0 +1,3 @@ +```release-note:bug +agent: Redact auto auth token from renew endpoints +``` diff --git a/changelog/15383.txt b/changelog/15383.txt new file mode 100644 index 0000000..e987382 --- /dev/null +++ b/changelog/15383.txt @@ -0,0 +1,3 @@ +```release-note:bug +command: do not report listener and storage types as key not found warnings +``` \ No newline at end of file diff --git a/changelog/15400.txt b/changelog/15400.txt new file mode 100644 index 0000000..aa14944 --- /dev/null +++ b/changelog/15400.txt @@ -0,0 +1,3 @@ +```release-note:deprecation +secrets/consul: Deprecate parameter "policies" in favor of "consul_policies" for consistency +``` \ No newline at end of file diff --git a/changelog/15405.txt b/changelog/15405.txt new file mode 100644 index 0000000..eb98386 --- /dev/null +++ b/changelog/15405.txt @@ -0,0 +1,3 @@ +```release-note:improvement +command: Support optional '-log-level' flag to be passed to 'operator migrate' command (defaults to info). Also support VAULT_LOG_LEVEL env var. +``` \ No newline at end of file diff --git a/changelog/15414.txt b/changelog/15414.txt new file mode 100644 index 0000000..de6372b --- /dev/null +++ b/changelog/15414.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Transit BYOK**: Allow import of externally-generated keys into the Transit secrets engine. +``` diff --git a/changelog/15417.txt b/changelog/15417.txt new file mode 100644 index 0000000..1864779 --- /dev/null +++ b/changelog/15417.txt @@ -0,0 +1,3 @@ +```release-note:improvement +command: Support the optional '-detailed' flag to be passed to 'vault list' command to show ListResponseWithInfo data. Also supports the VAULT_DETAILED env var. +``` diff --git a/changelog/15420.txt b/changelog/15420.txt new file mode 100644 index 0000000..e92c5da --- /dev/null +++ b/changelog/15420.txt @@ -0,0 +1,3 @@ +```release-note:improvement +activity: return nil response months in activity log API when no month data exists +``` \ No newline at end of file diff --git a/changelog/15428.txt b/changelog/15428.txt new file mode 100644 index 0000000..2c4a4a5 --- /dev/null +++ b/changelog/15428.txt @@ -0,0 +1,9 @@ +```release-note:bug +auth: Fixed erroneous success message when using vault login in case of two-phase MFA +``` +```release-note:bug +auth: Fixed erroneous token information being displayed when using vault login in case of two-phase MFA +``` +```release-note:bug +auth: Fixed two-phase MFA information missing from table format when using vault login +``` \ No newline at end of file diff --git a/changelog/15429.txt b/changelog/15429.txt new file mode 100644 index 0000000..1f46aaf --- /dev/null +++ b/changelog/15429.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Have pki/sign-verbatim use the not_before_duration field defined in the role +``` \ No newline at end of file diff --git a/changelog/15434.txt b/changelog/15434.txt new file mode 100644 index 0000000..565fb9e --- /dev/null +++ b/changelog/15434.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: make ListPlugins parse only known plugin types +``` \ No newline at end of file diff --git a/changelog/15440.txt b/changelog/15440.txt new file mode 100644 index 0000000..36e0c79 --- /dev/null +++ b/changelog/15440.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/ssh: Add connection timeout of 1 minute for outbound SSH connection in deprecated Dynamic SSH Keys mode. +``` diff --git a/changelog/15452.txt b/changelog/15452.txt new file mode 100644 index 0000000..3014168 --- /dev/null +++ b/changelog/15452.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: renaming the environment variable VAULT_DISABLE_FILE_PERMISSIONS_CHECK to VAULT_ENABLE_FILE_PERMISSIONS_CHECK and adjusting the logic +``` \ No newline at end of file diff --git a/changelog/15457.txt b/changelog/15457.txt new file mode 100644 index 0000000..d3a2f18 --- /dev/null +++ b/changelog/15457.txt @@ -0,0 +1,4 @@ +```release-note:improvement +audit: Add a policy_results block into the audit log that contains the set of +policies that granted this request access. +``` diff --git a/changelog/15464.txt b/changelog/15464.txt new file mode 100644 index 0000000..73cd5aa --- /dev/null +++ b/changelog/15464.txt @@ -0,0 +1,3 @@ +```release-note:changes +ui: pki issuer delete capabilities have been removed from the UI and reserved for the API and CLI +``` \ No newline at end of file diff --git a/changelog/15469.txt b/changelog/15469.txt new file mode 100644 index 0000000..ec87347 --- /dev/null +++ b/changelog/15469.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth: forward cached MFA auth response to the leader using RPC instead of forwarding all login requests +``` diff --git a/changelog/15470.txt b/changelog/15470.txt new file mode 100644 index 0000000..2fbaede --- /dev/null +++ b/changelog/15470.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Always return CRLs, URLs configurations, even if using the default value. +``` diff --git a/changelog/15474.txt b/changelog/15474.txt new file mode 100644 index 0000000..158da8c --- /dev/null +++ b/changelog/15474.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Default auto-rotation period in transit is 30 days +``` \ No newline at end of file diff --git a/changelog/15478.txt b/changelog/15478.txt new file mode 100644 index 0000000..4e35391 --- /dev/null +++ b/changelog/15478.txt @@ -0,0 +1,3 @@ +```release-note:change +secret/pki: Remove unused signature_bits parameter from intermediate CSR generation; this parameter doesn't control the final certificate's signature algorithm selection as that is up to the signing CA +``` diff --git a/changelog/15482.txt b/changelog/15482.txt new file mode 100644 index 0000000..0dcfd6f --- /dev/null +++ b/changelog/15482.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth: Prevent deleting a valid MFA method ID using the endpoint for a different MFA method type +``` diff --git a/changelog/15487.txt b/changelog/15487.txt new file mode 100644 index 0000000..4ce85a1 --- /dev/null +++ b/changelog/15487.txt @@ -0,0 +1,3 @@ +```release-note:improvement +audit: added entity_created boolean to audit log, set when login operations create an entity +``` \ No newline at end of file diff --git a/changelog/15493.txt b/changelog/15493.txt new file mode 100644 index 0000000..dfa11f4 --- /dev/null +++ b/changelog/15493.txt @@ -0,0 +1,3 @@ +```release-note:bug +storage/raft: Forward autopilot state requests on perf standbys to active node. +``` \ No newline at end of file diff --git a/changelog/15494.txt b/changelog/15494.txt new file mode 100644 index 0000000..cd2caec --- /dev/null +++ b/changelog/15494.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Warn on empty Subject field during issuer generation (root/generate and root/sign-intermediate). +``` diff --git a/changelog/15509.txt b/changelog/15509.txt new file mode 100644 index 0000000..88d4aa7 --- /dev/null +++ b/changelog/15509.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Warn on missing AIA access information when generating issuers (config/urls). +``` diff --git a/changelog/15510.txt b/changelog/15510.txt new file mode 100644 index 0000000..b6b6ab9 --- /dev/null +++ b/changelog/15510.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Enable Patch Functionality for Roles and Issuers (API only) +``` \ No newline at end of file diff --git a/changelog/15513.txt b/changelog/15513.txt new file mode 100644 index 0000000..a3d8a33 --- /dev/null +++ b/changelog/15513.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Remove stored license references +``` \ No newline at end of file diff --git a/changelog/15519.txt b/changelog/15519.txt new file mode 100644 index 0000000..d0ec744 --- /dev/null +++ b/changelog/15519.txt @@ -0,0 +1,3 @@ +```release-note:bug +storage/raft: joining a node to a cluster now ignores any VAULT_NAMESPACE environment variable set on the server process +``` diff --git a/changelog/15523.txt b/changelog/15523.txt new file mode 100644 index 0000000..867fe27 --- /dev/null +++ b/changelog/15523.txt @@ -0,0 +1,3 @@ +```release-note:feature +**KeyMgmt UI**: Add UI support for managing the Key Management Secrets Engine +``` \ No newline at end of file diff --git a/changelog/15524.txt b/changelog/15524.txt new file mode 100644 index 0000000..1331f1e --- /dev/null +++ b/changelog/15524.txt @@ -0,0 +1,4 @@ +```release-note:change +secrets/pki: the signed CA certificate from the sign-intermediate api will now appear within the ca_chain +response field along with the issuer's ca chain. +``` \ No newline at end of file diff --git a/changelog/15525.txt b/changelog/15525.txt new file mode 100644 index 0000000..3e44c20 --- /dev/null +++ b/changelog/15525.txt @@ -0,0 +1,8 @@ +```release-note:bug +auth/ldap: The logic for setting the entity alias when `username_as_alias` is set +has been fixed. The previous behavior would make a request to the LDAP server to +get `user_attr` before discarding it and using the username instead. This would +make it impossible for a user to connect if this attribute was missing or had +multiple values, even though it would not be used anyway. This has been fixed +and the username is now used without making superfluous LDAP searches. +``` diff --git a/changelog/15527.txt b/changelog/15527.txt new file mode 100644 index 0000000..2dfb63f --- /dev/null +++ b/changelog/15527.txt @@ -0,0 +1,3 @@ +```release-note:improvement +website/docs: Add usage documentation for Kubernetes Secrets Engine +``` diff --git a/changelog/15536.txt b/changelog/15536.txt new file mode 100644 index 0000000..7bda619 --- /dev/null +++ b/changelog/15536.txt @@ -0,0 +1,7 @@ +```release-note:improvement +api/monitor: Add log_format option to allow for logs to be emitted in JSON format +``` + +```release-note:improvement +command/debug: Add log_format flag to allow for logs to be emitted in JSON format +``` diff --git a/changelog/15543.txt b/changelog/15543.txt new file mode 100644 index 0000000..43c2c74 --- /dev/null +++ b/changelog/15543.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/pki: Err on unknown role during sign-verbatim. +``` diff --git a/changelog/15550.txt b/changelog/15550.txt new file mode 100644 index 0000000..6579b89 --- /dev/null +++ b/changelog/15550.txt @@ -0,0 +1,3 @@ +```release-note:deprecation +secrets/consul: Deprecate old parameters "token_type" and "policy" +``` \ No newline at end of file diff --git a/changelog/15551.txt b/changelog/15551.txt new file mode 100644 index 0000000..de41f7c --- /dev/null +++ b/changelog/15551.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Kubernetes Secrets Engine**: This new secrets engine generates Kubernetes service account tokens, service accounts, role bindings, and roles dynamically. +``` diff --git a/changelog/15552.txt b/changelog/15552.txt new file mode 100644 index 0000000..22d854b --- /dev/null +++ b/changelog/15552.txt @@ -0,0 +1,6 @@ +```release-note:bug +openapi: Fixed issue where information about /auth/token endpoints was not present with explicit policy permissions +``` +```release-note:bug +api: Fixed issue with internal/ui/mounts and internal/ui/mounts/(?P.+) endpoints where it was not properly handling /auth/ +``` \ No newline at end of file diff --git a/changelog/15559.txt b/changelog/15559.txt new file mode 100644 index 0000000..5df2f8b --- /dev/null +++ b/changelog/15559.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/ssh: Convert role field not_before_duration to seconds before returning it +``` \ No newline at end of file diff --git a/changelog/15560.txt b/changelog/15560.txt new file mode 100644 index 0000000..387d2a8 --- /dev/null +++ b/changelog/15560.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fix form validations ignoring default values and disabling submit button +``` diff --git a/changelog/15561.txt b/changelog/15561.txt new file mode 100644 index 0000000..95787b6 --- /dev/null +++ b/changelog/15561.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ssh: Addition of an endpoint `ssh/issue/:role` to allow the creation of signed key pairs +``` diff --git a/changelog/15573.txt b/changelog/15573.txt new file mode 100644 index 0000000..49f1d26 --- /dev/null +++ b/changelog/15573.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixed unsupported revocation statements field for DB roles +``` \ No newline at end of file diff --git a/changelog/15579.txt b/changelog/15579.txt new file mode 100644 index 0000000..f682dd4 --- /dev/null +++ b/changelog/15579.txt @@ -0,0 +1,3 @@ +```release-note:bug +plugin: Fix a bug where plugin reload would falsely report success in certain scenarios. +``` diff --git a/changelog/15581.txt b/changelog/15581.txt new file mode 100644 index 0000000..abc55a5 --- /dev/null +++ b/changelog/15581.txt @@ -0,0 +1,3 @@ +```release-note:deprecation +docs: Document removal of X.509 certificates with signatures who use SHA-1 in Vault 1.12 +``` diff --git a/changelog/15583.txt b/changelog/15583.txt new file mode 100644 index 0000000..b6cda31 --- /dev/null +++ b/changelog/15583.txt @@ -0,0 +1,3 @@ +```release-note:bug +core (enterprise): Fix bug where wrapping token lookup does not work within namespaces. +``` diff --git a/changelog/15584.txt b/changelog/15584.txt new file mode 100644 index 0000000..ebf4f11 --- /dev/null +++ b/changelog/15584.txt @@ -0,0 +1,6 @@ +```release-note:bug +auth/kubernetes: Fix error code when using the wrong service account +``` +```release-note:change +auth/kubernetes: If `kubernetes_ca_cert` is unset, and there is no pod-local CA available, an error will be surfaced when writing config instead of waiting for login. +``` \ No newline at end of file diff --git a/changelog/15586.txt b/changelog/15586.txt new file mode 100644 index 0000000..063f193 --- /dev/null +++ b/changelog/15586.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Add an export API for historical activity log data +``` diff --git a/changelog/15592.txt b/changelog/15592.txt new file mode 100644 index 0000000..0393c6a --- /dev/null +++ b/changelog/15592.txt @@ -0,0 +1,6 @@ +```release-note:improvement +auth/gcp: Vault CLI now infers the service account email when running on Google Cloud +``` +```release-note:improvement +auth/gcp: Enable the Google service endpoints used by the underlying client to be customized +``` diff --git a/changelog/15593.txt b/changelog/15593.txt new file mode 100644 index 0000000..01d749f --- /dev/null +++ b/changelog/15593.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/jwt: Adds ability to use JSON pointer syntax for the `user_claim` value. +``` diff --git a/changelog/15614.txt b/changelog/15614.txt new file mode 100644 index 0000000..3c63dd5 --- /dev/null +++ b/changelog/15614.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/database/elasticsearch: Use the new /_security base API path instead of /_xpack/security when managing elasticsearch. +``` diff --git a/changelog/15638.txt b/changelog/15638.txt new file mode 100644 index 0000000..9417955 --- /dev/null +++ b/changelog/15638.txt @@ -0,0 +1,3 @@ +```release-note:bug +vault: Fix a bug where duplicate policies could be added to an identity group. +``` \ No newline at end of file diff --git a/changelog/15655.txt b/changelog/15655.txt new file mode 100644 index 0000000..c5864ba --- /dev/null +++ b/changelog/15655.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/kubernetes: Split `additional_metadata` into `extra_annotations` and `extra_labels` parameters +``` diff --git a/changelog/15681.txt b/changelog/15681.txt new file mode 100644 index 0000000..2054411 --- /dev/null +++ b/changelog/15681.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixed bug where red spellcheck underline appears in sensitive/secret kv values when it should not appear +``` \ No newline at end of file diff --git a/changelog/15685.txt b/changelog/15685.txt new file mode 100644 index 0000000..bf9313f --- /dev/null +++ b/changelog/15685.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Updated `leasId` to `leaseId` in the "Copy Credentials" section of "Generate AWS Credentials" +``` diff --git a/changelog/15693.txt b/changelog/15693.txt new file mode 100644 index 0000000..7426c15 --- /dev/null +++ b/changelog/15693.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Prevent metrics generation from causing deadlocks. +``` diff --git a/changelog/15719.txt b/changelog/15719.txt new file mode 100644 index 0000000..902648c --- /dev/null +++ b/changelog/15719.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/aws: Add rsa2048 signature type to API +``` \ No newline at end of file diff --git a/changelog/15735.txt b/changelog/15735.txt new file mode 100644 index 0000000..3dd6600 --- /dev/null +++ b/changelog/15735.txt @@ -0,0 +1,3 @@ +```release-note:bug +quotas/lease-count: Fix lease-count quotas on mounts not properly being enforced when the lease generating request is a read +``` diff --git a/changelog/15742.txt b/changelog/15742.txt new file mode 100644 index 0000000..c7f69b6 --- /dev/null +++ b/changelog/15742.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secret/transit: Allow importing Ed25519 keys from PKCS#8 with inner RFC 5915 ECPrivateKey blobs (NSS-wrapped keys). +``` diff --git a/changelog/15751.txt b/changelog/15751.txt new file mode 100644 index 0000000..d753db8 --- /dev/null +++ b/changelog/15751.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Add support for CPS URLs and User Notice to Policy Information +``` \ No newline at end of file diff --git a/changelog/15759.txt b/changelog/15759.txt new file mode 100644 index 0000000..0687000 --- /dev/null +++ b/changelog/15759.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Prevent changing file permissions of audit logs when mode 0000 is used. +``` \ No newline at end of file diff --git a/changelog/15769.txt b/changelog/15769.txt new file mode 100644 index 0000000..76eab0e --- /dev/null +++ b/changelog/15769.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Revert using localStorage in favor of sessionStorage +``` \ No newline at end of file diff --git a/changelog/15789.txt b/changelog/15789.txt new file mode 100644 index 0000000..d6aaaa6 --- /dev/null +++ b/changelog/15789.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix inconsistent behavior in client count calendar widget +``` \ No newline at end of file diff --git a/changelog/15792.txt b/changelog/15792.txt new file mode 100644 index 0000000..9a7eb43 --- /dev/null +++ b/changelog/15792.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/kv: Fix issue preventing the ability to reset the `delete_version_after` key metadata field to 0s via HTTP `PATCH`. +``` diff --git a/changelog/15809.txt b/changelog/15809.txt new file mode 100644 index 0000000..87e42c8 --- /dev/null +++ b/changelog/15809.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secret/nomad: allow reading CA and client auth certificate from /nomad/config/access +``` diff --git a/changelog/15824.txt b/changelog/15824.txt new file mode 100644 index 0000000..9d9708f --- /dev/null +++ b/changelog/15824.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix issue where metadata tab is hidden even though policy grants access +``` diff --git a/changelog/15835.txt b/changelog/15835.txt new file mode 100644 index 0000000..d689c2a --- /dev/null +++ b/changelog/15835.txt @@ -0,0 +1,3 @@ +```release-note:bug +api/sys/internal/specs/openapi: support a new "dynamic" query parameter to generate generic mountpaths +``` diff --git a/changelog/15852.txt b/changelog/15852.txt new file mode 100644 index 0000000..8ed97dc --- /dev/null +++ b/changelog/15852.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Changed the tokenBoundCidrs tooltip content to clarify that comma separated values are not accepted in this field. +``` \ No newline at end of file diff --git a/changelog/15858.txt b/changelog/15858.txt new file mode 100644 index 0000000..24f4488 --- /dev/null +++ b/changelog/15858.txt @@ -0,0 +1,3 @@ +```release-note:change +core/fips: Disable and warn about entropy augmentation in FIPS 140-2 Inside mode +``` diff --git a/changelog/15866.txt b/changelog/15866.txt new file mode 100644 index 0000000..384762e --- /dev/null +++ b/changelog/15866.txt @@ -0,0 +1,3 @@ +```release-note:improvement +physical/postgresql: pass context to queries to propagate timeouts and cancellations on requests. +``` \ No newline at end of file diff --git a/changelog/15869.txt b/changelog/15869.txt new file mode 100644 index 0000000..bb0278d --- /dev/null +++ b/changelog/15869.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/aws: do not create leases for non-renewable/non-revocable STS credentials to reduce storage calls +``` \ No newline at end of file diff --git a/changelog/15879.txt b/changelog/15879.txt new file mode 100644 index 0000000..0d435b0 --- /dev/null +++ b/changelog/15879.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Limit SSCT WAL checks on perf standbys to raft backends only +``` diff --git a/changelog/15898.txt b/changelog/15898.txt new file mode 100644 index 0000000..02e380d --- /dev/null +++ b/changelog/15898.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Removed deprecated version of core-js 2.6.11 +``` \ No newline at end of file diff --git a/changelog/15900.txt b/changelog/15900.txt new file mode 100644 index 0000000..ec1e8b6 --- /dev/null +++ b/changelog/15900.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fixes parsing boolean values for ha_storage backends in config +``` \ No newline at end of file diff --git a/changelog/15912.txt b/changelog/15912.txt new file mode 100644 index 0000000..391d735 --- /dev/null +++ b/changelog/15912.txt @@ -0,0 +1,3 @@ +```release-note:change +identity: a request to `/identity/group` that includes `member_group_ids` that contains a cycle will now be responded to with a 400 rather than 500 +``` diff --git a/changelog/15933.txt b/changelog/15933.txt new file mode 100644 index 0000000..9afa910 --- /dev/null +++ b/changelog/15933.txt @@ -0,0 +1,3 @@ +```release-note:bug +database: Invalidate queue should cancel context first to avoid deadlock +``` diff --git a/changelog/15946.txt b/changelog/15946.txt new file mode 100644 index 0000000..869f61a --- /dev/null +++ b/changelog/15946.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/seal: Fix possible keyring truncation when using the file backend. +``` diff --git a/changelog/15986.txt b/changelog/15986.txt new file mode 100644 index 0000000..101bc13 --- /dev/null +++ b/changelog/15986.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: Added `disable_idle_connections` configuration to disable leaving idle connections open in auto-auth, caching and templating. +``` diff --git a/changelog/15989.txt b/changelog/15989.txt new file mode 100644 index 0000000..68ad278 --- /dev/null +++ b/changelog/15989.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core/quotas: Added ability to add path suffixes for rate-limit resource quotas +``` \ No newline at end of file diff --git a/changelog/15996.txt b/changelog/15996.txt new file mode 100644 index 0000000..b29f1da --- /dev/null +++ b/changelog/15996.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secret/pki: Allow issuing certificates with non-domain, non-email Common Names from roles, sign-verbatim, and as issuers (`cn_validations`). +``` diff --git a/changelog/15998.txt b/changelog/15998.txt new file mode 100644 index 0000000..69274f6 --- /dev/null +++ b/changelog/15998.txt @@ -0,0 +1,3 @@ +```release-note:feature +ui: UI support for Okta Number Challenge. +``` diff --git a/changelog/16000.txt b/changelog/16000.txt new file mode 100644 index 0000000..fde39b9 --- /dev/null +++ b/changelog/16000.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Limit activity log client count usage by namespaces +``` \ No newline at end of file diff --git a/changelog/16018.txt b/changelog/16018.txt new file mode 100644 index 0000000..31b4929 --- /dev/null +++ b/changelog/16018.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/ssh: Allow additional text along with a template definition in defaultExtension value fields. +``` diff --git a/changelog/16056.txt b/changelog/16056.txt new file mode 100644 index 0000000..1652726 --- /dev/null +++ b/changelog/16056.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/ssh: Add allowed_domains_template to allow templating of allowed_domains. +``` diff --git a/changelog/16063.txt b/changelog/16063.txt new file mode 100644 index 0000000..aa90bec --- /dev/null +++ b/changelog/16063.txt @@ -0,0 +1,4 @@ +```release-note:improvement +website/docs: Update replication docs to mention Integrated Storage +``` + diff --git a/changelog/16087.txt b/changelog/16087.txt new file mode 100644 index 0000000..5320635 --- /dev/null +++ b/changelog/16087.txt @@ -0,0 +1,3 @@ +```release-note:bug +agent: Update consul-template for pkiCert bug fixes +``` diff --git a/changelog/16088.txt b/changelog/16088.txt new file mode 100644 index 0000000..ff2457a --- /dev/null +++ b/changelog/16088.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/identity: Replicate member_entity_ids and policies in identity/group across nodes identically +``` diff --git a/changelog/16094.txt b/changelog/16094.txt new file mode 100644 index 0000000..edb1060 --- /dev/null +++ b/changelog/16094.txt @@ -0,0 +1,3 @@ +```release-note:bug +plugin/multiplexing: Fix panic when id doesn't exist in connection map +``` \ No newline at end of file diff --git a/changelog/16111.txt b/changelog/16111.txt new file mode 100644 index 0000000..59d5330 --- /dev/null +++ b/changelog/16111.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Add `sys/loggers` and `sys/loggers/:name` endpoints to provide ability to modify logging verbosity +``` diff --git a/changelog/16112.txt b/changelog/16112.txt new file mode 100644 index 0000000..3b61c6b --- /dev/null +++ b/changelog/16112.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/auth: Return a 403 instead of a 500 for a malformed SSCT +``` diff --git a/changelog/16115.txt b/changelog/16115.txt new file mode 100644 index 0000000..82998b6 --- /dev/null +++ b/changelog/16115.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core/quotas: Added ability to add role information for rate-limit resource quotas, to limit login requests on auth mounts made using that role +``` \ No newline at end of file diff --git a/changelog/16124.txt b/changelog/16124.txt new file mode 100644 index 0000000..38eca2a --- /dev/null +++ b/changelog/16124.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secret/pki: Add signature_bits to sign-intermediate, sign-verbatim endpoints +``` diff --git a/changelog/16140.txt b/changelog/16140.txt new file mode 100644 index 0000000..1fffd7e --- /dev/null +++ b/changelog/16140.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/ad: set config default length only if password_policy is missing +``` diff --git a/changelog/16146.txt b/changelog/16146.txt new file mode 100644 index 0000000..39086b3 --- /dev/null +++ b/changelog/16146.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core/activity: generate hyperloglogs containing clientIds for each month during precomputation +``` \ No newline at end of file diff --git a/changelog/16162.txt b/changelog/16162.txt new file mode 100644 index 0000000..5e3c348 --- /dev/null +++ b/changelog/16162.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core/activity: refactor activity log api to reuse partial api functions in activity endpoint when current month is specified +``` \ No newline at end of file diff --git a/changelog/16170.txt b/changelog/16170.txt new file mode 100644 index 0000000..04707ce --- /dev/null +++ b/changelog/16170.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: OIDC login type uses localStorage instead of sessionStorage +``` diff --git a/changelog/16181.txt b/changelog/16181.txt new file mode 100644 index 0000000..1e97d1e --- /dev/null +++ b/changelog/16181.txt @@ -0,0 +1,3 @@ +```release-note:improvement +identity/oidc: allows filtering the list providers response by an allowed_client_id +``` diff --git a/changelog/16184.txt b/changelog/16184.txt new file mode 100644 index 0000000..e7a8b06 --- /dev/null +++ b/changelog/16184.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core/activity: use monthly hyperloglogs to calculate new clients approximation for current month +``` \ No newline at end of file diff --git a/changelog/16213.txt b/changelog/16213.txt new file mode 100644 index 0000000..489243d --- /dev/null +++ b/changelog/16213.txt @@ -0,0 +1,3 @@ +```release-note:improvement +website/docs: API generate-recovery-token documentation. +``` diff --git a/changelog/16218.txt b/changelog/16218.txt new file mode 100644 index 0000000..aa6eff0 --- /dev/null +++ b/changelog/16218.txt @@ -0,0 +1,3 @@ +```release-note:improvement +docs: Clarify the behaviour of local mounts in the context of DR replication +``` diff --git a/changelog/16224.txt b/changelog/16224.txt new file mode 100644 index 0000000..822b245 --- /dev/null +++ b/changelog/16224.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: fix GPG encryption to support subkeys. +``` diff --git a/changelog/16231.txt b/changelog/16231.txt new file mode 100644 index 0000000..aa979d1 --- /dev/null +++ b/changelog/16231.txt @@ -0,0 +1,3 @@ +```release-note:bug +agent/template: Fix parsing error for the exec stanza +``` diff --git a/changelog/16246.txt b/changelog/16246.txt new file mode 100644 index 0000000..8883de1 --- /dev/null +++ b/changelog/16246.txt @@ -0,0 +1,3 @@ +```release-note:bug +secret/pki: Do not fail validation with a legacy key_bits default value and key_type=any when signing CSRs +``` diff --git a/changelog/16249.txt b/changelog/16249.txt new file mode 100644 index 0000000..f84977d --- /dev/null +++ b/changelog/16249.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Honor If-Modified-Since header on CA, CRL fetch; requires passthrough_request_headers modification on the mount point. +``` diff --git a/changelog/16274.txt b/changelog/16274.txt new file mode 100644 index 0000000..75374ed --- /dev/null +++ b/changelog/16274.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/oidc: Adds support for group membership parsing when using SecureAuth as an OIDC provider. +``` diff --git a/changelog/16324.txt b/changelog/16324.txt new file mode 100644 index 0000000..3d4cfbf --- /dev/null +++ b/changelog/16324.txt @@ -0,0 +1,3 @@ +```release-note:bug +storage/raft (enterprise): Prevent unauthenticated voter status change with rejoin +``` diff --git a/changelog/16327.txt b/changelog/16327.txt new file mode 100644 index 0000000..da22993 --- /dev/null +++ b/changelog/16327.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Increase the allowed concurrent gRPC streams over the cluster port. +``` diff --git a/changelog/16351.txt b/changelog/16351.txt new file mode 100644 index 0000000..879c7f6 --- /dev/null +++ b/changelog/16351.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/ssh: Allow the use of Identity templates in the `default_user` field +``` diff --git a/changelog/16353.txt b/changelog/16353.txt new file mode 100644 index 0000000..be247cc --- /dev/null +++ b/changelog/16353.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: remove gox +``` \ No newline at end of file diff --git a/changelog/16379.txt b/changelog/16379.txt new file mode 100644 index 0000000..99ed7e5 --- /dev/null +++ b/changelog/16379.txt @@ -0,0 +1,3 @@ +```release-note:change +core: Validate input parameters for vault operator init command. Vault 1.12 CLI version is needed to run operator init now. +``` \ No newline at end of file diff --git a/changelog/16386.txt b/changelog/16386.txt new file mode 100644 index 0000000..4fa6a6c --- /dev/null +++ b/changelog/16386.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/quotas: Added globbing functionality on the end of path suffix quota paths +``` diff --git a/changelog/16409.txt b/changelog/16409.txt new file mode 100644 index 0000000..d8f83b0 --- /dev/null +++ b/changelog/16409.txt @@ -0,0 +1,3 @@ +```release-note:improvement +command/audit: Improve missing type error message +``` diff --git a/changelog/16421.txt b/changelog/16421.txt new file mode 100644 index 0000000..281d2e8 --- /dev/null +++ b/changelog/16421.txt @@ -0,0 +1,3 @@ +```release-note:improvement +command/server: add `-dev-tls` and `-dev-tls-cert-dir` subcommands to create a Vault dev server with generated certificates and private key. +``` diff --git a/changelog/16435.txt b/changelog/16435.txt new file mode 100644 index 0000000..a7246e3 --- /dev/null +++ b/changelog/16435.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/gcp: Add support for GCE regional instance groups +``` \ No newline at end of file diff --git a/changelog/16441.txt b/changelog/16441.txt new file mode 100644 index 0000000..f265483 --- /dev/null +++ b/changelog/16441.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cli: CLI commands will print a warning if flags will be ignored because they are passed after positional arguments. +``` diff --git a/changelog/16443.txt b/changelog/16443.txt new file mode 100644 index 0000000..86b43c6 --- /dev/null +++ b/changelog/16443.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/kv: Fix `kv get` issue preventing the ability to read a secret when providing a leading slash +``` diff --git a/changelog/16455.txt b/changelog/16455.txt new file mode 100644 index 0000000..660dbc1 --- /dev/null +++ b/changelog/16455.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/aws: PKCS7 signatures will now use SHA256 by default in prep for Go 1.18 +``` diff --git a/changelog/16466.txt b/changelog/16466.txt new file mode 100644 index 0000000..1b5fb3c --- /dev/null +++ b/changelog/16466.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix issue logging in with JWT auth method +``` diff --git a/changelog/16479.txt b/changelog/16479.txt new file mode 100644 index 0000000..43b5258 --- /dev/null +++ b/changelog/16479.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: Added `disable_keep_alives` configuration to disable keep alives in auto-auth, caching and templating. +``` diff --git a/changelog/16487.txt b/changelog/16487.txt new file mode 100644 index 0000000..cbf2a2a --- /dev/null +++ b/changelog/16487.txt @@ -0,0 +1,3 @@ +```release-note:improvement +identity: Prevent possibility of data races on entity creation. +``` diff --git a/changelog/16489.txt b/changelog/16489.txt new file mode 100644 index 0000000..17c66ca --- /dev/null +++ b/changelog/16489.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Renamed labels under Tools for wrap, lookup, rewrap and unwrap with description. +``` diff --git a/changelog/16494.txt b/changelog/16494.txt new file mode 100644 index 0000000..40cf364 --- /dev/null +++ b/changelog/16494.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secret/pki: Allow specifying SKID for cross-signed issuance from older Vault versions. +``` diff --git a/changelog/16519.txt b/changelog/16519.txt new file mode 100644 index 0000000..1325202 --- /dev/null +++ b/changelog/16519.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secret/pki: Add RSA PSS signature support for issuing certificates, signing CRLs +``` diff --git a/changelog/16525.txt b/changelog/16525.txt new file mode 100644 index 0000000..2f611af --- /dev/null +++ b/changelog/16525.txt @@ -0,0 +1,6 @@ +```release-note:improvement +auth/jwt: Improves detection of Windows Subsystem for Linux (WSL) for CLI-based logins. +``` +```release-note:improvement +auth/jwt: Adds support for Microsoft US Gov L4 to the Azure provider for groups fetching. +``` diff --git a/changelog/16534.txt b/changelog/16534.txt new file mode 100644 index 0000000..6d4386c --- /dev/null +++ b/changelog/16534.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/gcp: Fixes duplicate static account key creation from performance secondary clusters. +``` diff --git a/changelog/16539.txt b/changelog/16539.txt new file mode 100644 index 0000000..9927329 --- /dev/null +++ b/changelog/16539.txt @@ -0,0 +1,3 @@ +```release-note:change +core/entities: Fixed stranding of aliases upon entity merge, and require explicit selection of which aliases should be kept when some must be deleted +``` diff --git a/changelog/16549.txt b/changelog/16549.txt new file mode 100644 index 0000000..101d1f9 --- /dev/null +++ b/changelog/16549.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/transit: Allow configuring the possible salt lengths for RSA PSS signatures. +``` \ No newline at end of file diff --git a/changelog/16550.txt b/changelog/16550.txt new file mode 100644 index 0000000..a1df8bd --- /dev/null +++ b/changelog/16550.txt @@ -0,0 +1,3 @@ +```release-note:bug +storage/raft: Fix retry_join initialization failure +``` diff --git a/changelog/16553.txt b/changelog/16553.txt new file mode 100644 index 0000000..7031f04 --- /dev/null +++ b/changelog/16553.txt @@ -0,0 +1,3 @@ +```release-note:improvement +command: Fix shell completion for KV v2 mounts +``` diff --git a/changelog/16563.txt b/changelog/16563.txt new file mode 100644 index 0000000..e5ff275 --- /dev/null +++ b/changelog/16563.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Add support for per-issuer Authority Information Access (AIA) URLs +``` diff --git a/changelog/16564.txt b/changelog/16564.txt new file mode 100644 index 0000000..90a5524 --- /dev/null +++ b/changelog/16564.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Allow revocation of certificates with explicitly provided certificate (bring your own certificate / BYOC). +``` diff --git a/changelog/16566.txt b/changelog/16566.txt new file mode 100644 index 0000000..269d8da --- /dev/null +++ b/changelog/16566.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Allow revocation via proving possession of certificate's private key +``` diff --git a/changelog/16567.txt b/changelog/16567.txt new file mode 100644 index 0000000..78492e3 --- /dev/null +++ b/changelog/16567.txt @@ -0,0 +1,3 @@ +```release-note:improvement +identity/oidc: Adds support for detailed listing of clients and providers. +``` diff --git a/changelog/16594.txt b/changelog/16594.txt new file mode 100644 index 0000000..3aae964 --- /dev/null +++ b/changelog/16594.txt @@ -0,0 +1,5 @@ +```release-note:improvement +auth/kerberos: add `remove_instance_name` parameter to the login CLI and the +Kerberos config in Vault. This removes any instance names found in the keytab +service principal name. +``` diff --git a/changelog/16598.txt b/changelog/16598.txt new file mode 100644 index 0000000..5b051b1 --- /dev/null +++ b/changelog/16598.txt @@ -0,0 +1,3 @@ +```release-note:improvement +identity/oidc: Adds the `client_secret_post` token endpoint authentication method. +``` diff --git a/changelog/16599.txt b/changelog/16599.txt new file mode 100644 index 0000000..dd32395 --- /dev/null +++ b/changelog/16599.txt @@ -0,0 +1,3 @@ +```release-note:bug +identity/oidc: Change the `state` parameter of the Authorization Endpoint to optional. +``` diff --git a/changelog/16600.txt b/changelog/16600.txt new file mode 100644 index 0000000..e0855f6 --- /dev/null +++ b/changelog/16600.txt @@ -0,0 +1,3 @@ +```release-note:bug +identity/oidc: Fixes validation of the `request` and `request_uri` parameters. +``` diff --git a/changelog/16601.txt b/changelog/16601.txt new file mode 100644 index 0000000..ce0e77b --- /dev/null +++ b/changelog/16601.txt @@ -0,0 +1,4 @@ +```release-note:bug +identity/oidc: Detect invalid `redirect_uri` values sooner in validation of the +Authorization Endpoint. +``` diff --git a/changelog/16609.txt b/changelog/16609.txt new file mode 100644 index 0000000..13ecb7b --- /dev/null +++ b/changelog/16609.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Upgrade github.com/hashicorp/raft +``` \ No newline at end of file diff --git a/changelog/16621.txt b/changelog/16621.txt new file mode 100644 index 0000000..e447dbb --- /dev/null +++ b/changelog/16621.txt @@ -0,0 +1,2 @@ +```release-note:improvement +secrets/pki: Allow revocation of issuers within the same mount. diff --git a/changelog/16622.txt b/changelog/16622.txt new file mode 100644 index 0000000..37ae5ab --- /dev/null +++ b/changelog/16622.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/ssh: Evaluate ssh validprincipals user template before splitting +``` diff --git a/changelog/16631.txt b/changelog/16631.txt new file mode 100644 index 0000000..4e092b3 --- /dev/null +++ b/changelog/16631.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/database/hana: Add ability to customize dynamic usernames +``` \ No newline at end of file diff --git a/changelog/16636.txt b/changelog/16636.txt new file mode 100644 index 0000000..e36a6fe --- /dev/null +++ b/changelog/16636.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/kerberos: Maintain headers set by the client +``` diff --git a/changelog/16659.txt b/changelog/16659.txt new file mode 100644 index 0000000..c202616 --- /dev/null +++ b/changelog/16659.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix info tooltip submitting form +``` diff --git a/changelog/16668.txt b/changelog/16668.txt new file mode 100644 index 0000000..745cf0b --- /dev/null +++ b/changelog/16668.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/transit: Add a dedicated HMAC key type, which can be used with key import. +``` \ No newline at end of file diff --git a/changelog/16673.txt b/changelog/16673.txt new file mode 100644 index 0000000..e632bfe --- /dev/null +++ b/changelog/16673.txt @@ -0,0 +1,3 @@ +```release-note:bug +plugin/secrets/auth: Fix a bug with aliased backends such as aws-ec2 or generic +``` diff --git a/changelog/16676.txt b/changelog/16676.txt new file mode 100644 index 0000000..e52c086 --- /dev/null +++ b/changelog/16676.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Added gauge metrics "secrets.pki.total_revoked_certificates_stored" and "secrets.pki.total_certificates_stored" to track the number of certificates in storage. +``` diff --git a/changelog/16686.txt b/changelog/16686.txt new file mode 100644 index 0000000..293eb7c --- /dev/null +++ b/changelog/16686.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/database: Fix a bug where the secret engine would queue up a lot of WAL deletes during startup. +``` diff --git a/changelog/16688.txt b/changelog/16688.txt new file mode 100644 index 0000000..c7310fd --- /dev/null +++ b/changelog/16688.txt @@ -0,0 +1,6 @@ +```release-note:change +plugins: `GET /sys/plugins/catalog` endpoint now returns an additional `detailed` field in the response data with a list of additional plugin metadata. +``` +```release-note:change +plugins: `GET /sys/plugins/catalog/:type/:name` endpoint now returns an additional `version` field in the response data. +``` diff --git a/changelog/16699.txt b/changelog/16699.txt new file mode 100644 index 0000000..4a96e86 --- /dev/null +++ b/changelog/16699.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: Add a sentinel error for missing KV secrets +``` diff --git a/changelog/16700.txt b/changelog/16700.txt new file mode 100644 index 0000000..9dc8d1e --- /dev/null +++ b/changelog/16700.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Fixes duplicate otherName in certificates created by the sign-verbatim endpoint. +``` diff --git a/changelog/16702.txt b/changelog/16702.txt new file mode 100644 index 0000000..a197646 --- /dev/null +++ b/changelog/16702.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki/tidy: Add another pair of metrics counting certificates not deleted by the tidy operation. +``` diff --git a/changelog/16714.txt b/changelog/16714.txt new file mode 100644 index 0000000..a136106 --- /dev/null +++ b/changelog/16714.txt @@ -0,0 +1,3 @@ +```release-note:bug +debug: Remove extra empty lines from vault.log when debug command is run +``` \ No newline at end of file diff --git a/changelog/16721.txt b/changelog/16721.txt new file mode 100644 index 0000000..84e0ffa --- /dev/null +++ b/changelog/16721.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Ignore EC PARAMETER PEM blocks during issuer import (/config/ca, /issuers/import/*, and /intermediate/set-signed) +``` diff --git a/changelog/16723.txt b/changelog/16723.txt new file mode 100644 index 0000000..faba459 --- /dev/null +++ b/changelog/16723.txt @@ -0,0 +1,4 @@ +```release-note:feature +**OCSP Responder**: PKI mounts now have an OCSP responder that implements a subset of RFC6960, answering single serial number OCSP requests for +a specific cluster's revoked certificates in a mount. +``` diff --git a/changelog/16739.txt b/changelog/16739.txt new file mode 100644 index 0000000..00d190f --- /dev/null +++ b/changelog/16739.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix naming of permitted_dns_domains form parameter on CA creation (root generation and sign intermediate). +``` diff --git a/changelog/16762.txt b/changelog/16762.txt new file mode 100644 index 0000000..ade57bd --- /dev/null +++ b/changelog/16762.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Add ability to periodically rebuild CRL before expiry +``` diff --git a/changelog/16773.txt b/changelog/16773.txt new file mode 100644 index 0000000..bebb0d5 --- /dev/null +++ b/changelog/16773.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Support generating delta CRLs for up-to-date CRLs when auto-building is enabled. +``` diff --git a/changelog/16794.txt b/changelog/16794.txt new file mode 100644 index 0000000..0f78cf0 --- /dev/null +++ b/changelog/16794.txt @@ -0,0 +1,3 @@ +```release-note:bug +api: Fixed erroneous warnings of unrecognized parameters when unwrapping data. +``` diff --git a/changelog/16813.txt b/changelog/16813.txt new file mode 100644 index 0000000..352fa09 --- /dev/null +++ b/changelog/16813.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Fix migration to properly handle mounts that contain only keys, no certificates +``` diff --git a/changelog/16821.txt b/changelog/16821.txt new file mode 100644 index 0000000..c414d80 --- /dev/null +++ b/changelog/16821.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: redirect_to param forwards from auth route when authenticated +``` \ No newline at end of file diff --git a/changelog/16830.txt b/changelog/16830.txt new file mode 100644 index 0000000..d0fa219 --- /dev/null +++ b/changelog/16830.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: LIST issuers endpoint is now unauthenticated. +``` \ No newline at end of file diff --git a/changelog/16834.txt b/changelog/16834.txt new file mode 100644 index 0000000..70c7d34 --- /dev/null +++ b/changelog/16834.txt @@ -0,0 +1,3 @@ +```release-note:bug +command/debug: fix bug where monitor was not honoring configured duration +``` \ No newline at end of file diff --git a/changelog/16846.txt b/changelog/16846.txt new file mode 100644 index 0000000..dd4aeaa --- /dev/null +++ b/changelog/16846.txt @@ -0,0 +1,3 @@ +```release-note:improvement +plugins: Add Deprecation Status method to builtinregistry. +``` diff --git a/changelog/16849.txt b/changelog/16849.txt new file mode 100644 index 0000000..a320eed --- /dev/null +++ b/changelog/16849.txt @@ -0,0 +1,15 @@ +```release-note:change +auth: `GET /sys/auth` endpoint now returns an additional `deprecation_status` field in the response data for builtins. +``` +```release-note:change +auth: `GET /sys/auth/:name` endpoint now returns an additional `deprecation_status` field in the response data for builtins. +``` +```release-note:change +secrets: `GET /sys/mounts` endpoint now returns an additional `deprecation_status` field in the response data for builtins. +``` +```release-note:change +secrets: `GET /sys/mounts/:name` endpoint now returns an additional `deprecation_status` field in the response data for builtins. +``` +```release-note:improvement +cli: `auth` and `secrets` list `-detailed` commands now show Deprecation Status for builtin plugins. +``` diff --git a/changelog/16856.txt b/changelog/16856.txt new file mode 100644 index 0000000..512dd67 --- /dev/null +++ b/changelog/16856.txt @@ -0,0 +1,3 @@ +```release-note:change +plugins: Add plugin version to auth register, list, and mount table +``` diff --git a/changelog/16865.txt b/changelog/16865.txt new file mode 100644 index 0000000..2f03b83 --- /dev/null +++ b/changelog/16865.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Allow import of issuers without CRLSign KeyUsage; prohibit setting crl-signing usage on such issuers +``` diff --git a/changelog/16871.txt b/changelog/16871.txt new file mode 100644 index 0000000..8b57c78 --- /dev/null +++ b/changelog/16871.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Allow tidy to associate revoked certs with their issuers for OCSP performance +``` diff --git a/changelog/16872.txt b/changelog/16872.txt new file mode 100644 index 0000000..5bbad14 --- /dev/null +++ b/changelog/16872.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: fix incorrectly used loop variables in parallel tests and when finalizing seals +``` diff --git a/changelog/16874.txt b/changelog/16874.txt new file mode 100644 index 0000000..f1dafa0 --- /dev/null +++ b/changelog/16874.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Improve stability of association of revoked cert with its parent issuer; when an issuer loses crl-signing usage, do not place certs on default issuer's CRL. +``` diff --git a/changelog/16886.txt b/changelog/16886.txt new file mode 100644 index 0000000..73b9b41 --- /dev/null +++ b/changelog/16886.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix OIDC callback to accept namespace flag in different formats +``` \ No newline at end of file diff --git a/changelog/16890.txt b/changelog/16890.txt new file mode 100644 index 0000000..1a3657c --- /dev/null +++ b/changelog/16890.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/kerberos: add `add_group_aliases` config to include LDAP groups in Vault group aliases +``` diff --git a/changelog/16900.txt b/changelog/16900.txt new file mode 100644 index 0000000..35e2b5a --- /dev/null +++ b/changelog/16900.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Add ability to periodically run tidy operations to remove expired certificates. +``` diff --git a/changelog/16911.txt b/changelog/16911.txt new file mode 100644 index 0000000..a451f69 --- /dev/null +++ b/changelog/16911.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api/mfa: Add namespace path to the MFA read/list endpoint +``` diff --git a/changelog/16930.txt b/changelog/16930.txt new file mode 100644 index 0000000..1431689 --- /dev/null +++ b/changelog/16930.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix lease force revoke action +``` \ No newline at end of file diff --git a/changelog/16935.txt b/changelog/16935.txt new file mode 100644 index 0000000..0b0b46f --- /dev/null +++ b/changelog/16935.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Add a new flag to issue/sign APIs which can filter out root CAs from the returned ca_chain field +``` diff --git a/changelog/16938.txt b/changelog/16938.txt new file mode 100644 index 0000000..6fb00bf --- /dev/null +++ b/changelog/16938.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/token: Fix ignored parameter warnings for valid parameters on token create +``` diff --git a/changelog/16950.txt b/changelog/16950.txt new file mode 100644 index 0000000..0ee2d5b --- /dev/null +++ b/changelog/16950.txt @@ -0,0 +1,3 @@ +```release-note:improvement +website/docs: Add documentation around the expensiveness of making lots of lease count quotas in a short period +``` diff --git a/changelog/16956.txt b/changelog/16956.txt new file mode 100644 index 0000000..8912f72 --- /dev/null +++ b/changelog/16956.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Prevent two or more DR failovers from invalidating SSCT tokens generated on the previous primaries. +``` diff --git a/changelog/16958.txt b/changelog/16958.txt new file mode 100644 index 0000000..a77af9a --- /dev/null +++ b/changelog/16958.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Add ability to cancel tidy operations, control tidy resource usage. +``` diff --git a/changelog/16970.txt b/changelog/16970.txt new file mode 100644 index 0000000..0f0a9f8 --- /dev/null +++ b/changelog/16970.txt @@ -0,0 +1,3 @@ +```release-note:bug +agent: Agent will now respect `max_retries` retry configuration even when caching is set. +``` diff --git a/changelog/16972.txt b/changelog/16972.txt new file mode 100644 index 0000000..3aec66a --- /dev/null +++ b/changelog/16972.txt @@ -0,0 +1,3 @@ +```release-note:improvement +plugins: Added environment variable flag to opt-out specific plugins from multiplexing +``` \ No newline at end of file diff --git a/changelog/16982.txt b/changelog/16982.txt new file mode 100644 index 0000000..70d2521 --- /dev/null +++ b/changelog/16982.txt @@ -0,0 +1,3 @@ +```release-note:change +plugins: `GET /database/config/:name` endpoint now returns an additional `plugin_version` field in the response data. +``` \ No newline at end of file diff --git a/changelog/16983.txt b/changelog/16983.txt new file mode 100644 index 0000000..f89971d --- /dev/null +++ b/changelog/16983.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes secret version and status menu links transitioning to auth screen +``` \ No newline at end of file diff --git a/changelog/16992.txt b/changelog/16992.txt new file mode 100644 index 0000000..178bb4c --- /dev/null +++ b/changelog/16992.txt @@ -0,0 +1,3 @@ +```release-note:bug +identity/oidc: Adds `claims_supported` to discovery document. +``` diff --git a/changelog/16995.txt b/changelog/16995.txt new file mode 100644 index 0000000..c1adc90 --- /dev/null +++ b/changelog/16995.txt @@ -0,0 +1,3 @@ +```release-note:improvement +plugins/multiplexing: Added multiplexing support to database plugins if run as external plugins +``` \ No newline at end of file diff --git a/changelog/17005.txt b/changelog/17005.txt new file mode 100644 index 0000000..04273d9 --- /dev/null +++ b/changelog/17005.txt @@ -0,0 +1,13 @@ +```release-note:change +auth: `auth enable` returns an error and `POST /sys/auth/:type` endpoint +reports an error for `Pending Removal` auth methods. +``` +```release-note:change +secrets: `secrets enable` returns an error and `POST /sys/mount/:type` endpoint +reports an error for `Pending Removal` secrets engines. +``` +```release-note:improvement +core: Handle and log deprecated builtin mounts. Introduces +`VAULT_ALLOW_PENDING_REMOVAL_MOUNTS` to override shutdown and error when +attempting to mount `Pending Removal` builtin plugins. +``` diff --git a/changelog/17019.txt b/changelog/17019.txt new file mode 100644 index 0000000..63e2da4 --- /dev/null +++ b/changelog/17019.txt @@ -0,0 +1,3 @@ +```release-note:bug +storage/raft: Nodes no longer get demoted to nonvoter if we don't know their version due to missing heartbeats. +``` diff --git a/changelog/17028.txt b/changelog/17028.txt new file mode 100644 index 0000000..fd49440 --- /dev/null +++ b/changelog/17028.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Activity log goroutine management improvements to allow tests to be more deterministic. +``` \ No newline at end of file diff --git a/changelog/17038.txt b/changelog/17038.txt new file mode 100644 index 0000000..f6451dc --- /dev/null +++ b/changelog/17038.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets: All database-specific (standalone DB) secrets engines are now marked `Pending Removal`. +``` diff --git a/changelog/17040.txt b/changelog/17040.txt new file mode 100644 index 0000000..add116d --- /dev/null +++ b/changelog/17040.txt @@ -0,0 +1,3 @@ +```release-note:bug +login: Store token in tokenhelper for interactive login MFA +``` diff --git a/changelog/17045.txt b/changelog/17045.txt new file mode 100644 index 0000000..600641d --- /dev/null +++ b/changelog/17045.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/azure: Adds option to permanently delete AzureAD objects created by Vault. +``` diff --git a/changelog/17058.txt b/changelog/17058.txt new file mode 100644 index 0000000..fd527cc --- /dev/null +++ b/changelog/17058.txt @@ -0,0 +1,6 @@ +```release-note:change +auth: `POST /sys/auth/:type` endpoint response contains a warning for `Deprecated` auth methods. +``` +```release-note:change +secrets: `POST /sys/mounts/:type` endpoint response contains a warning for `Deprecated` secrets engines. +``` diff --git a/changelog/17070.txt b/changelog/17070.txt new file mode 100644 index 0000000..1b45fd6 --- /dev/null +++ b/changelog/17070.txt @@ -0,0 +1,4 @@ +```release-note:feature +**Redis DB Engine**: Adding the new Redis database engine that supports the generation of static and dynamic user +roles and root credential rotation on a stand alone Redis server. +``` \ No newline at end of file diff --git a/changelog/17071.txt b/changelog/17071.txt new file mode 100644 index 0000000..926ca83 --- /dev/null +++ b/changelog/17071.txt @@ -0,0 +1,2 @@ +```release-note:feature +**UI OIDC Provider Config**: Adds configuration of Vault as an OIDC identity provider, and offer Vault’s various authentication methods and source of identity to any client applications. \ No newline at end of file diff --git a/changelog/17073.txt b/changelog/17073.txt new file mode 100644 index 0000000..96ab50b --- /dev/null +++ b/changelog/17073.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Add a warning to any successful response when the requested TTL is overwritten by MaxTTL +``` \ No newline at end of file diff --git a/changelog/17075.txt b/changelog/17075.txt new file mode 100644 index 0000000..1b122e5 --- /dev/null +++ b/changelog/17075.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Redis ElastiCache DB Plugin**: Added Redis ElastiCache as a built-in plugin. +``` \ No newline at end of file diff --git a/changelog/17077.txt b/changelog/17077.txt new file mode 100644 index 0000000..0f5d1f8 --- /dev/null +++ b/changelog/17077.txt @@ -0,0 +1,12 @@ +```release-note:change +plugins: `GET /sys/plugins/catalog/:type/:name` endpoint contains deprecation status for builtin plugins. +``` +```release-note:change +plugins: `GET /sys/plugins/catalog/` endpoint contains deprecation status in `detailed` list. +``` +```release-note:change +plugins: `plugin list` now accepts a `-detailed` flag, which display deprecation status and version info. +``` +```release-note:change +plugins: `plugin info` displays deprecation status for builtin plugins. +``` diff --git a/changelog/17079.txt b/changelog/17079.txt new file mode 100644 index 0000000..1f3d7b1 --- /dev/null +++ b/changelog/17079.txt @@ -0,0 +1,2 @@ +```release-note:bug +storage/raft: Fix error writing raft TLS keyring during follower joins \ No newline at end of file diff --git a/changelog/17086.txt b/changelog/17086.txt new file mode 100644 index 0000000..ce221ad --- /dev/null +++ b/changelog/17086.txt @@ -0,0 +1,3 @@ +```release-note:change +ui: Upgrade Ember to version 4.4.0 +``` \ No newline at end of file diff --git a/changelog/17088.txt b/changelog/17088.txt new file mode 100644 index 0000000..dfd08c9 --- /dev/null +++ b/changelog/17088.txt @@ -0,0 +1,3 @@ +```release-note:improvement +plugins: Adding version to plugin GRPC interface +``` \ No newline at end of file diff --git a/changelog/17091.txt b/changelog/17091.txt new file mode 100644 index 0000000..2b2c041 --- /dev/null +++ b/changelog/17091.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent/auto-auth: Add `exit_on_err` which when set to true, will cause Agent to exit if any errors are encountered during authentication. +``` diff --git a/changelog/17093.txt b/changelog/17093.txt new file mode 100644 index 0000000..a51f3de --- /dev/null +++ b/changelog/17093.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/cert: Add configurable support for validating client certs with OCSP. +``` \ No newline at end of file diff --git a/changelog/17104.txt b/changelog/17104.txt new file mode 100644 index 0000000..00c5eeb --- /dev/null +++ b/changelog/17104.txt @@ -0,0 +1,4 @@ +```release-note:change +auth: Returns invalid credentials for ldap, userpass and approle when wrong credentials are provided for existent users. +This will only be used internally for implementing user lockout. +``` \ No newline at end of file diff --git a/changelog/17116.txt b/changelog/17116.txt new file mode 100644 index 0000000..73116ee --- /dev/null +++ b/changelog/17116.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Replaces non-inclusive terms +``` \ No newline at end of file diff --git a/changelog/17118.txt b/changelog/17118.txt new file mode 100644 index 0000000..76c7748 --- /dev/null +++ b/changelog/17118.txt @@ -0,0 +1,4 @@ +```release-note:improvement +secrets/transit: Added a parameter to encrypt/decrypt batch operations to allow the caller to +override the HTTP response code in case of partial user-input failures. +``` \ No newline at end of file diff --git a/changelog/17124.txt b/changelog/17124.txt new file mode 100644 index 0000000..ef4f116 --- /dev/null +++ b/changelog/17124.txt @@ -0,0 +1,2 @@ +```release-note:bug +ui: Fix kv deleting the latest version when not allowed to soft delete (and delete a specific version of a secret) \ No newline at end of file diff --git a/changelog/17136.txt b/changelog/17136.txt new file mode 100644 index 0000000..e5e9744 --- /dev/null +++ b/changelog/17136.txt @@ -0,0 +1,5 @@ +```release-note:improvement +auth/cert: Operators can now specify a CRL distribution point URL, in which +case the cert auth engine will fetch and use the CRL from that location +rather than needing to push CRLs directly to auth/cert. +``` \ No newline at end of file diff --git a/changelog/17138.txt b/changelog/17138.txt new file mode 100644 index 0000000..e8e2bfd --- /dev/null +++ b/changelog/17138.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/cert: Vault does not initially load the CRLs in cert auth unless the read/write CRL endpoint is hit. +``` \ No newline at end of file diff --git a/changelog/17139.txt b/changelog/17139.txt new file mode 100644 index 0000000..81b7507 --- /dev/null +++ b/changelog/17139.txt @@ -0,0 +1,6 @@ +```release-note:improvement +ui: Prevents requests to /sys/internal/ui/resultant-acl endpoint when unauthenticated +``` +```release-note:improvement +website/docs: Removes mentions of unauthenticated from internal ui resultant-acl doc +``` \ No newline at end of file diff --git a/changelog/17152.txt b/changelog/17152.txt new file mode 100644 index 0000000..fe5fee7 --- /dev/null +++ b/changelog/17152.txt @@ -0,0 +1,4 @@ +```release-note:feature +**LDAP Secrets Engine**: Adds the `ldap` secrets engine with service account check-out +functionality for all supported schemas. +``` \ No newline at end of file diff --git a/changelog/17153.txt b/changelog/17153.txt new file mode 100644 index 0000000..00fe707 --- /dev/null +++ b/changelog/17153.txt @@ -0,0 +1,2 @@ +```release-note:improvement +ui: add 'disable' param to pki crl configuration \ No newline at end of file diff --git a/changelog/17159.txt b/changelog/17159.txt new file mode 100644 index 0000000..b480f81 --- /dev/null +++ b/changelog/17159.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/database/snowflake: Add multiplexing support +``` diff --git a/changelog/17160.txt b/changelog/17160.txt new file mode 100644 index 0000000..36b6d07 --- /dev/null +++ b/changelog/17160.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/gcp: Updates dependencies: `google.golang.org/api@v0.83.0`, `github.com/hashicorp/go-gcp-common@v0.8.0`. +``` \ No newline at end of file diff --git a/changelog/17161.txt b/changelog/17161.txt new file mode 100644 index 0000000..d23421a --- /dev/null +++ b/changelog/17161.txt @@ -0,0 +1,7 @@ +```release-note:improvement +auth/kubernetes: Role resolution for K8S Auth [[GH-156](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/156)] +``` + +```release-note:bug +auth/kubernetes: Restore support for JWT signature algorithm ES384 [[GH-160](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/160)] +``` diff --git a/changelog/17164.txt b/changelog/17164.txt new file mode 100644 index 0000000..e09797d --- /dev/null +++ b/changelog/17164.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/kubernetes: upgrade to v0.2.0 +``` diff --git a/changelog/17167.txt b/changelog/17167.txt new file mode 100644 index 0000000..89b8905 --- /dev/null +++ b/changelog/17167.txt @@ -0,0 +1,6 @@ +```release-note:change +plugins: `GET /sys/auth/:path/tune` and `GET /sys/mounts/:path/tune` endpoints may now return an additional `plugin_version` field in the response data if set. +``` +```release-note:change +plugins: `GET` for `/sys/auth`, `/sys/auth/:path`, `/sys/mounts`, and `/sys/mounts/:path` paths now return additional `plugin_version`, `running_plugin_version` and `running_sha256` fields in the response data for each mount. +``` \ No newline at end of file diff --git a/changelog/17174.txt b/changelog/17174.txt new file mode 100644 index 0000000..302c99a --- /dev/null +++ b/changelog/17174.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/gcp: Updates dependencies: `google.golang.org/api@v0.83.0`, `github.com/hashicorp/go-gcp-common@v0.8.0`. +``` \ No newline at end of file diff --git a/changelog/17180.txt b/changelog/17180.txt new file mode 100644 index 0000000..4e1e5d9 --- /dev/null +++ b/changelog/17180.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/azure: Removed deprecated AAD graph API support from the secrets engine. +``` \ No newline at end of file diff --git a/changelog/17186.txt b/changelog/17186.txt new file mode 100644 index 0000000..d086dd0 --- /dev/null +++ b/changelog/17186.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: fix race when using SystemView.ReplicationState outside of a request context +``` \ No newline at end of file diff --git a/changelog/17187.txt b/changelog/17187.txt new file mode 100644 index 0000000..71476ef --- /dev/null +++ b/changelog/17187.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Refactor lock grabbing code to simplify stateLock deadlock investigations +``` \ No newline at end of file diff --git a/changelog/17194.txt b/changelog/17194.txt new file mode 100644 index 0000000..a3ea955 --- /dev/null +++ b/changelog/17194.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/azure: Enables Azure roles to be compatible with Vault's role based quotas. +``` \ No newline at end of file diff --git a/changelog/17196.txt b/changelog/17196.txt new file mode 100644 index 0000000..586205b --- /dev/null +++ b/changelog/17196.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/cf: Enables CF roles to be compatible with Vault's role based quotas. +``` \ No newline at end of file diff --git a/changelog/17199.txt b/changelog/17199.txt new file mode 100644 index 0000000..e960045 --- /dev/null +++ b/changelog/17199.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/gcpkms: Update dependencies: google.golang.org/api@v0.83.0. +``` diff --git a/changelog/17204.txt b/changelog/17204.txt new file mode 100644 index 0000000..07ea8c0 --- /dev/null +++ b/changelog/17204.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fix panic when the plugin catalog returns neither a plugin nor an error. +``` diff --git a/changelog/17212.txt b/changelog/17212.txt new file mode 100644 index 0000000..1130ca6 --- /dev/null +++ b/changelog/17212.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/oci: Add support for role resolution. +``` diff --git a/changelog/17251.txt b/changelog/17251.txt new file mode 100644 index 0000000..2849ab0 --- /dev/null +++ b/changelog/17251.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/alicloud: Enables AliCloud roles to be compatible with Vault's role based quotas. +``` diff --git a/changelog/17265.txt b/changelog/17265.txt new file mode 100644 index 0000000..fe2d3b9 --- /dev/null +++ b/changelog/17265.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: License location is no longer cache exempt, meaning sys/health will not contribute as greatly to storage load when using consul as a storage backend. +``` diff --git a/changelog/17281.txt b/changelog/17281.txt new file mode 100644 index 0000000..8711283 --- /dev/null +++ b/changelog/17281.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/quotas: Fix goroutine leak caused by the seal process not fully cleaning up Rate Limit Quotas. +``` diff --git a/changelog/17289.txt b/changelog/17289.txt new file mode 100644 index 0000000..e8df6ca --- /dev/null +++ b/changelog/17289.txt @@ -0,0 +1,3 @@ +```release-note:improvement +plugins: Allow selecting builtin plugins by their reported semantic version of the form `vX.Y.Z+builtin` or `vX.Y.Z+builtin.vault`. +``` \ No newline at end of file diff --git a/changelog/17308.txt b/changelog/17308.txt new file mode 100644 index 0000000..5a2bf8b --- /dev/null +++ b/changelog/17308.txt @@ -0,0 +1,3 @@ +```release-note:improvement: +cli: User-requested -help text now appears on stdout for paging rather than stderr +``` diff --git a/changelog/17328.txt b/changelog/17328.txt new file mode 100644 index 0000000..e10e380 --- /dev/null +++ b/changelog/17328.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Do not ignore provided signature bits value when signing intermediate and leaf certificates with a managed key +``` diff --git a/changelog/17338.txt b/changelog/17338.txt new file mode 100644 index 0000000..00b537b --- /dev/null +++ b/changelog/17338.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Add user lockout field to config and configuring this for auth mount using auth tune to prevent brute forcing in auth methods +``` \ No newline at end of file diff --git a/changelog/17339.txt b/changelog/17339.txt new file mode 100644 index 0000000..4ab1421 --- /dev/null +++ b/changelog/17339.txt @@ -0,0 +1,3 @@ +```release-note:bug +plugins/kv: KV v2 returns 404 instead of 500 for request paths that incorrectly include a trailing slash. +``` diff --git a/changelog/17340.txt b/changelog/17340.txt new file mode 100644 index 0000000..733c737 --- /dev/null +++ b/changelog/17340.txt @@ -0,0 +1,3 @@ +```release-note:bug +plugins: Corrected the path to check permissions on when the registered plugin name does not match the plugin binary's filename. +``` diff --git a/changelog/17347.txt b/changelog/17347.txt new file mode 100644 index 0000000..e74f4e3 --- /dev/null +++ b/changelog/17347.txt @@ -0,0 +1,6 @@ +```release-note:change +api: Exclusively use `GET /sys/plugins/catalog` endpoint for listing plugins, and add `details` field to list responses. +``` +```release-note:improvement +cli: `vault plugin list` now has a `details` field in JSON format, and version and type information in table format. +``` diff --git a/changelog/17352.txt b/changelog/17352.txt new file mode 100644 index 0000000..a5b5ca5 --- /dev/null +++ b/changelog/17352.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: Support VAULT_DISABLE_REDIRECTS environment variable (and --disable-redirects flag) to disable default client behavior and prevent the client following any redirection responses. +``` \ No newline at end of file diff --git a/changelog/17376.txt b/changelog/17376.txt new file mode 100644 index 0000000..0b2f4e4 --- /dev/null +++ b/changelog/17376.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Remove default value of 30 to TtlPicker2 if no value is passed in. +``` diff --git a/changelog/17385.txt b/changelog/17385.txt new file mode 100644 index 0000000..a6a8749 --- /dev/null +++ b/changelog/17385.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Do not read revoked certificates from backend when CRL is disabled +``` diff --git a/changelog/17388.txt b/changelog/17388.txt new file mode 100644 index 0000000..e3a98d8 --- /dev/null +++ b/changelog/17388.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Add support to specify signature bits when generating CSRs through intermediate/generate apis +``` diff --git a/changelog/17395.txt b/changelog/17395.txt new file mode 100644 index 0000000..f7ce602 --- /dev/null +++ b/changelog/17395.txt @@ -0,0 +1,3 @@ +```release-note:bug: +api: Fix to account for older versions of Vault with no `custom_metadata` support +``` diff --git a/changelog/17406.txt b/changelog/17406.txt new file mode 100644 index 0000000..29bf04a --- /dev/null +++ b/changelog/17406.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/kv: new KVv2 mounts and KVv1 mounts without any keys will upgrade synchronously, allowing for instant use +``` diff --git a/changelog/17407.txt b/changelog/17407.txt new file mode 100644 index 0000000..b0e5ae4 --- /dev/null +++ b/changelog/17407.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui/keymgmt: Sets the defaultValue for type when creating a key. +``` diff --git a/changelog/17419.txt b/changelog/17419.txt new file mode 100644 index 0000000..f57fd31 --- /dev/null +++ b/changelog/17419.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fix panic caused in Vault Agent when rendering certificate templates +``` \ No newline at end of file diff --git a/changelog/17430.txt b/changelog/17430.txt new file mode 100644 index 0000000..12ab331 --- /dev/null +++ b/changelog/17430.txt @@ -0,0 +1,3 @@ +```release-note:improvement +plugins: Add plugin version information to key plugin lifecycle log lines. +``` \ No newline at end of file diff --git a/changelog/17459.txt b/changelog/17459.txt new file mode 100644 index 0000000..fd240c5 --- /dev/null +++ b/changelog/17459.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core/identity: Add machine-readable output to body of response upon alias clash during entity merge +``` \ No newline at end of file diff --git a/changelog/17497.txt b/changelog/17497.txt new file mode 100644 index 0000000..98e6136 --- /dev/null +++ b/changelog/17497.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Respond to tidy-status, tidy-cancel on PR Secondary clusters. +``` diff --git a/changelog/17499.txt b/changelog/17499.txt new file mode 100644 index 0000000..f3b1831 --- /dev/null +++ b/changelog/17499.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Update protoc from 3.21.5 to 3.21.7 +``` \ No newline at end of file diff --git a/changelog/17514.txt b/changelog/17514.txt new file mode 100644 index 0000000..215ea83 --- /dev/null +++ b/changelog/17514.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fix vault operator init command to show the right curl string with -output-curl-string and right policy hcl with -output-policy +``` \ No newline at end of file diff --git a/changelog/17532.txt b/changelog/17532.txt new file mode 100644 index 0000000..0a09261 --- /dev/null +++ b/changelog/17532.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: prevent memory leak when using control group factors in a policy +``` diff --git a/changelog/17540.txt b/changelog/17540.txt new file mode 100644 index 0000000..3915eae --- /dev/null +++ b/changelog/17540.txt @@ -0,0 +1,4 @@ +```release-note:improvement +auth/azure: Adds support for authentication with Managed Service Identity (MSI) from a +Virtual Machine Scale Set (VMSS) in flexible orchestration mode. +``` \ No newline at end of file diff --git a/changelog/17562.txt b/changelog/17562.txt new file mode 100644 index 0000000..9d9d0f7 --- /dev/null +++ b/changelog/17562.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: prevent panic during mfa after enforcement's namespace is deleted +``` diff --git a/changelog/17575.txt b/changelog/17575.txt new file mode 100644 index 0000000..f08b53f --- /dev/null +++ b/changelog/17575.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: use the combined activity log (partial + historic) API for client count dashboard and remove use of monthly endpoint +``` diff --git a/changelog/17577.txt b/changelog/17577.txt new file mode 100644 index 0000000..2794e5f --- /dev/null +++ b/changelog/17577.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli: Remove empty table heading for `vault secrets list -detailed` output. +``` \ No newline at end of file diff --git a/changelog/17593.txt b/changelog/17593.txt new file mode 100644 index 0000000..4f38d40 --- /dev/null +++ b/changelog/17593.txt @@ -0,0 +1,3 @@ +```release-note:improvement +database/snowflake: Allow parallel requests to Snowflake +``` \ No newline at end of file diff --git a/changelog/17612.txt b/changelog/17612.txt new file mode 100644 index 0000000..3005d0b --- /dev/null +++ b/changelog/17612.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/seal: Fix regression handling of the key_id parameter in seal configuration HCL. +``` \ No newline at end of file diff --git a/changelog/17636.txt b/changelog/17636.txt new file mode 100644 index 0000000..0668f21 --- /dev/null +++ b/changelog/17636.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/transit: Add support for PKCSv1_5_NoOID RSA signatures +``` diff --git a/changelog/17638.txt b/changelog/17638.txt new file mode 100644 index 0000000..37e0575 --- /dev/null +++ b/changelog/17638.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/transit: Add associated_data parameter for additional authenticated data in AEAD ciphers +``` diff --git a/changelog/17640.txt b/changelog/17640.txt new file mode 100644 index 0000000..6db136a --- /dev/null +++ b/changelog/17640.txt @@ -0,0 +1,3 @@ +```release-note:improvement +sdk/ldap: Added support for paging when searching for groups using group filters +``` \ No newline at end of file diff --git a/changelog/17650.txt b/changelog/17650.txt new file mode 100644 index 0000000..79e7669 --- /dev/null +++ b/changelog/17650.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cli: Add support for creating requests to existing non-KVv2 PATCH-capable endpoints. +``` diff --git a/changelog/17660.txt b/changelog/17660.txt new file mode 100644 index 0000000..59ac664 --- /dev/null +++ b/changelog/17660.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fixes spurious warnings being emitted relating to "unknown or unsupported fields" for JSON config +``` diff --git a/changelog/17661.txt b/changelog/17661.txt new file mode 100644 index 0000000..5dfb8ea --- /dev/null +++ b/changelog/17661.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes oidc/jwt login issue with alternate mount path and jwt login via mount path tab +``` \ No newline at end of file diff --git a/changelog/17678.txt b/changelog/17678.txt new file mode 100644 index 0000000..bddf213 --- /dev/null +++ b/changelog/17678.txt @@ -0,0 +1,3 @@ +```release-note:improvement +Reduced binary size +``` diff --git a/changelog/17679.txt b/changelog/17679.txt new file mode 100644 index 0000000..b77f631 --- /dev/null +++ b/changelog/17679.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli: Fix issue preventing kv commands from executing properly when the mount path provided by `-mount` flag and secret key path are the same. +``` diff --git a/changelog/17693.txt b/changelog/17693.txt new file mode 100644 index 0000000..748af4e --- /dev/null +++ b/changelog/17693.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Fix upgrade of missing expiry, delta_rebuild_interval by setting them to the default. +``` diff --git a/changelog/17732.txt b/changelog/17732.txt new file mode 100644 index 0000000..e9c30dc --- /dev/null +++ b/changelog/17732.txt @@ -0,0 +1,3 @@ +```release-note:bug +storage/raft: Don't panic on unknown raft ops +``` diff --git a/changelog/17747.txt b/changelog/17747.txt new file mode 100644 index 0000000..53ceafa --- /dev/null +++ b/changelog/17747.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/aws: Update dependencies [[PR-17747](https://github.com/hashicorp/vault/pull/17747)] +``` diff --git a/changelog/17749.txt b/changelog/17749.txt new file mode 100644 index 0000000..aac4946 --- /dev/null +++ b/changelog/17749.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Add inline policy creation when creating an identity entity or group +``` diff --git a/changelog/17750.txt b/changelog/17750.txt new file mode 100644 index 0000000..af12458 --- /dev/null +++ b/changelog/17750.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cli/pki: Add health-check subcommand to evaluate the health of a PKI instance. +``` diff --git a/changelog/17752.txt b/changelog/17752.txt new file mode 100644 index 0000000..628eed8 --- /dev/null +++ b/changelog/17752.txt @@ -0,0 +1,3 @@ +```release-note:bug +openapi: fix gen_openapi.sh script to correctly load vault plugins +``` diff --git a/changelog/17768.txt b/changelog/17768.txt new file mode 100644 index 0000000..25246ea --- /dev/null +++ b/changelog/17768.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/approle: Add maximum length of 4096 for approle role_names, as this value results in HMAC calculation +``` \ No newline at end of file diff --git a/changelog/17769.txt b/changelog/17769.txt new file mode 100644 index 0000000..12fe2d4 --- /dev/null +++ b/changelog/17769.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes issue with not being able to download raft snapshot via service worker +``` \ No newline at end of file diff --git a/changelog/17772.txt b/changelog/17772.txt new file mode 100644 index 0000000..6866d3f --- /dev/null +++ b/changelog/17772.txt @@ -0,0 +1,3 @@ +```release-note:bug +secret/pki: fix bug with initial legacy bundle migration (from < 1.11 into 1.11+) and missing issuers from ca_chain +``` diff --git a/changelog/17774.txt b/changelog/17774.txt new file mode 100644 index 0000000..f6103ec --- /dev/null +++ b/changelog/17774.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Return new fields revocation_time_rfc3339 and issuer_id to existing certificate serial lookup api if it is revoked +``` diff --git a/changelog/17779.txt b/changelog/17779.txt new file mode 100644 index 0000000..a0b3bd6 --- /dev/null +++ b/changelog/17779.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Add a new API that returns the serial numbers of revoked certificates on the local cluster +``` diff --git a/changelog/17789.txt b/changelog/17789.txt new file mode 100644 index 0000000..fd6f3b0 --- /dev/null +++ b/changelog/17789.txt @@ -0,0 +1,3 @@ +```release-note:improvement +sys/internal/inspect: Creates an endpoint to look to inspect internal subsystems. +``` \ No newline at end of file diff --git a/changelog/17801.txt b/changelog/17801.txt new file mode 100644 index 0000000..8d3764a --- /dev/null +++ b/changelog/17801.txt @@ -0,0 +1,4 @@ +```release-note:bug +core: fix a start up race condition where performance standbys could go into a +mount loop if default policies are not yet synced from the active node. +``` diff --git a/changelog/17816.txt b/changelog/17816.txt new file mode 100644 index 0000000..4ad3bc8 --- /dev/null +++ b/changelog/17816.txt @@ -0,0 +1,3 @@ +```release-note:bug +plugins: Only report deprecation status for builtin plugins. +``` diff --git a/changelog/17822.txt b/changelog/17822.txt new file mode 100644 index 0000000..5c39b3c --- /dev/null +++ b/changelog/17822.txt @@ -0,0 +1,3 @@ +```release-note:change +logging: Removed legacy environment variable for log format ('LOGXI_FORMAT'), should use 'VAULT_LOG_FORMAT' instead +``` diff --git a/changelog/17823.txt b/changelog/17823.txt new file mode 100644 index 0000000..d999c87 --- /dev/null +++ b/changelog/17823.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Allow tidying of expired issuer certificates. +``` diff --git a/changelog/17824.txt b/changelog/17824.txt new file mode 100644 index 0000000..0d3fbb6 --- /dev/null +++ b/changelog/17824.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Allow issuer creation, import to change default issuer via `default_follows_latest_issuer`. +``` diff --git a/changelog/17835.txt b/changelog/17835.txt new file mode 100644 index 0000000..3c48884 --- /dev/null +++ b/changelog/17835.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: mfa: use proper request id generation +``` diff --git a/changelog/17836.txt b/changelog/17836.txt new file mode 100644 index 0000000..66bc236 --- /dev/null +++ b/changelog/17836.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: trying to unseal with the wrong key now returns HTTP 400 +``` \ No newline at end of file diff --git a/changelog/17841.txt b/changelog/17841.txt new file mode 100644 index 0000000..6234bbb --- /dev/null +++ b/changelog/17841.txt @@ -0,0 +1,3 @@ +```release-note:improvement +logging: Vault Agent supports logging to a specified file path via environment variable, CLI or config +``` \ No newline at end of file diff --git a/changelog/17855.txt b/changelog/17855.txt new file mode 100644 index 0000000..c73838a --- /dev/null +++ b/changelog/17855.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Added warning to /sys/seal-status and vault status command if potentially dangerous behaviour overrides are being used. +``` \ No newline at end of file diff --git a/changelog/17856.txt b/changelog/17856.txt new file mode 100644 index 0000000..f039794 --- /dev/null +++ b/changelog/17856.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/activity: fix the end_date returned from the activity log endpoint when partial counts are computed +``` diff --git a/changelog/17857.txt b/changelog/17857.txt new file mode 100644 index 0000000..7c07f4e --- /dev/null +++ b/changelog/17857.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/azure: upgrades dependencies +``` \ No newline at end of file diff --git a/changelog/17858.txt b/changelog/17858.txt new file mode 100644 index 0000000..2b1a76e --- /dev/null +++ b/changelog/17858.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/gcp: Upgrades dependencies +``` \ No newline at end of file diff --git a/changelog/17866.txt b/changelog/17866.txt new file mode 100644 index 0000000..7e173f8 --- /dev/null +++ b/changelog/17866.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: consolidate all tag usage +``` \ No newline at end of file diff --git a/changelog/17871.txt b/changelog/17871.txt new file mode 100644 index 0000000..b4fb2b0 --- /dev/null +++ b/changelog/17871.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/gcp: Upgrades dependencies +``` \ No newline at end of file diff --git a/changelog/17879.txt b/changelog/17879.txt new file mode 100644 index 0000000..efb71b8 --- /dev/null +++ b/changelog/17879.txt @@ -0,0 +1,7 @@ +```release-note:bug +plugins: Allow running external plugins which override deprecated builtins. +``` +```release-note:improvement +plugins: Let Vault unseal and mount deprecated builtin plugins in a +deactivated state if this is not the first unseal after an upgrade. +``` diff --git a/changelog/17893.txt b/changelog/17893.txt new file mode 100644 index 0000000..3dddc76 --- /dev/null +++ b/changelog/17893.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Kubernetes Secrets Engine UI**: Kubernetes is now available in the UI as a supported secrets engine. +``` \ No newline at end of file diff --git a/changelog/17894.txt b/changelog/17894.txt new file mode 100644 index 0000000..bd056cd --- /dev/null +++ b/changelog/17894.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: allow selection of "default" for ssh algorithm_signer in web interface +``` diff --git a/changelog/17904.txt b/changelog/17904.txt new file mode 100644 index 0000000..aa65404 --- /dev/null +++ b/changelog/17904.txt @@ -0,0 +1,3 @@ +```release-note:bug +credential/cert: adds error message if no tls connection is found during the AliasLookahead operation +``` \ No newline at end of file diff --git a/changelog/17909.txt b/changelog/17909.txt new file mode 100644 index 0000000..3f19f94 --- /dev/null +++ b/changelog/17909.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: Mark request body objects as required +``` diff --git a/changelog/17913.txt b/changelog/17913.txt new file mode 100644 index 0000000..f26a779 --- /dev/null +++ b/changelog/17913.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli: Fix vault read handling to return raw data as secret.Data when there is no top-level data object from api response. +``` \ No newline at end of file diff --git a/changelog/17914.txt b/changelog/17914.txt new file mode 100644 index 0000000..671e636 --- /dev/null +++ b/changelog/17914.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth: Deduplicate policies prior to ACL generation +``` diff --git a/changelog/17919.txt b/changelog/17919.txt new file mode 100644 index 0000000..8fbb41d --- /dev/null +++ b/changelog/17919.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: property based testing for LifetimeWatcher sleep duration calculation +``` diff --git a/changelog/17927.txt b/changelog/17927.txt new file mode 100644 index 0000000..946c535 --- /dev/null +++ b/changelog/17927.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Enable typescript for future development +``` \ No newline at end of file diff --git a/changelog/17929.txt b/changelog/17929.txt new file mode 100644 index 0000000..72b639c --- /dev/null +++ b/changelog/17929.txt @@ -0,0 +1,4 @@ +```release-note:improvement +core/server: Added an environment variable to write goroutine stacktraces to a +temporary file for SIGUSR2 signals. +``` diff --git a/changelog/17932.txt b/changelog/17932.txt new file mode 100644 index 0000000..09dd01c --- /dev/null +++ b/changelog/17932.txt @@ -0,0 +1,3 @@ +```release-note:bug: +UI: Fix "MFA-Setup permission denied error" by using user-token specific MFA generate endpoint instead of admin-generate +``` diff --git a/changelog/17934.txt b/changelog/17934.txt new file mode 100644 index 0000000..7f087a9 --- /dev/null +++ b/changelog/17934.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/transit: Add support to import public keys in transit engine and allow encryption and verification of signed data +``` diff --git a/changelog/17935.txt b/changelog/17935.txt new file mode 100644 index 0000000..c16ffdf --- /dev/null +++ b/changelog/17935.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/activity: return partial month counts when querying a historical date range and no historical data exists. +``` diff --git a/changelog/17944.txt b/changelog/17944.txt new file mode 100644 index 0000000..2ce17ca --- /dev/null +++ b/changelog/17944.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fix potential deadlock if barrier ciphertext is less than 4 bytes. +``` diff --git a/changelog/17950.txt b/changelog/17950.txt new file mode 100644 index 0000000..318334e --- /dev/null +++ b/changelog/17950.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fix entity policies list link to policy show page +``` diff --git a/changelog/17951.txt b/changelog/17951.txt new file mode 100644 index 0000000..06dd7a4 --- /dev/null +++ b/changelog/17951.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: added changes for user lockout workflow. +``` \ No newline at end of file diff --git a/changelog/17964.txt b/changelog/17964.txt new file mode 100644 index 0000000..605100f --- /dev/null +++ b/changelog/17964.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/azure: upgrades dependencies +``` diff --git a/changelog/17979.txt b/changelog/17979.txt new file mode 100644 index 0000000..81a5c02 --- /dev/null +++ b/changelog/17979.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Add read support to `sys/loggers` and `sys/loggers/:name` endpoints +``` diff --git a/changelog/18011.txt b/changelog/18011.txt new file mode 100644 index 0000000..ed22510 --- /dev/null +++ b/changelog/18011.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/okta: fix a panic for AuthRenew in Okta +``` diff --git a/changelog/18021.txt b/changelog/18021.txt new file mode 100644 index 0000000..69a6df1 --- /dev/null +++ b/changelog/18021.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/alicloud: upgrades dependencies +``` diff --git a/changelog/18030.txt b/changelog/18030.txt new file mode 100644 index 0000000..30a49c5 --- /dev/null +++ b/changelog/18030.txt @@ -0,0 +1,3 @@ +```release-note:improvement +storage/raft: Add `retry_join_as_non_voter` config option. +``` diff --git a/changelog/18031.txt b/changelog/18031.txt new file mode 100644 index 0000000..74d659f --- /dev/null +++ b/changelog/18031.txt @@ -0,0 +1,3 @@ +```release-note:improvement +logging: Vault agent and server commands support log file and log rotation. +``` \ No newline at end of file diff --git a/changelog/18039.txt b/changelog/18039.txt new file mode 100644 index 0000000..ea522a7 --- /dev/null +++ b/changelog/18039.txt @@ -0,0 +1,6 @@ +```release-note:improvement +plugins: Mark logical database plugins Removed and remove the plugin code. +``` +```release-note:improvement +plugins: Mark app-id auth method Removed and remove the plugin code. +``` diff --git a/changelog/18040.txt b/changelog/18040.txt new file mode 100644 index 0000000..cb4eaf8 --- /dev/null +++ b/changelog/18040.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Added a new API that allows external actors to craft a CRL through JSON parameters +``` diff --git a/changelog/18043.txt b/changelog/18043.txt new file mode 100644 index 0000000..564574c --- /dev/null +++ b/changelog/18043.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/cert: Support listing provisioned CRLs within the mount. +``` diff --git a/changelog/18051.txt b/changelog/18051.txt new file mode 100644 index 0000000..c4c3232 --- /dev/null +++ b/changelog/18051.txt @@ -0,0 +1,6 @@ +```release-note:change +plugins: Mounts can no longer be pinned to a specific _builtin_ version. Mounts previously pinned to a specific builtin version will now automatically upgrade to the latest builtin version, and may now be overridden if an unversioned plugin of the same name and type is registered. Mounts using plugin versions without `builtin` in their metadata remain unaffected. +``` +```release-note:bug +plugins: Vault upgrades will no longer fail if a mount has been created using an explicit builtin plugin version. +``` diff --git a/changelog/18067.txt b/changelog/18067.txt new file mode 100644 index 0000000..2c86305 --- /dev/null +++ b/changelog/18067.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cli/kv: improve kv CLI to remove data or custom metadata using kv patch +``` diff --git a/changelog/18101.txt b/changelog/18101.txt new file mode 100644 index 0000000..97ece74 --- /dev/null +++ b/changelog/18101.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: Agent listeners can now be to be the `metrics_only` role, serving only metrics, as part of the listener's new top level `role` option. +``` diff --git a/changelog/18114.txt b/changelog/18114.txt new file mode 100644 index 0000000..3692b12 --- /dev/null +++ b/changelog/18114.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: update TTL picker for consistency +``` diff --git a/changelog/18121.txt b/changelog/18121.txt new file mode 100644 index 0000000..e162247 --- /dev/null +++ b/changelog/18121.txt @@ -0,0 +1,3 @@ +```release-note:improvement +audit: Include stack trace when audit logging recovers from a panic. +``` diff --git a/changelog/18125.txt b/changelog/18125.txt new file mode 100644 index 0000000..5d9f05a --- /dev/null +++ b/changelog/18125.txt @@ -0,0 +1,3 @@ +```release-note:improvement +command/server: Environment variable keys are now logged at startup. +``` \ No newline at end of file diff --git a/changelog/18128.txt b/changelog/18128.txt new file mode 100644 index 0000000..32dc537 --- /dev/null +++ b/changelog/18128.txt @@ -0,0 +1,3 @@ +```release-note:improvement +audit: Add `elide_list_responses` option, providing a countermeasure for a common source of oversized audit log entries +``` diff --git a/changelog/18137.txt b/changelog/18137.txt new file mode 100644 index 0000000..f262f96 --- /dev/null +++ b/changelog/18137.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: Configured Vault Agent listeners now listen without the need for caching to be configured. +``` diff --git a/changelog/18143.txt b/changelog/18143.txt new file mode 100644 index 0000000..b1ff7f3 --- /dev/null +++ b/changelog/18143.txt @@ -0,0 +1,3 @@ +```release-note:bug +command/namespace: Fix vault cli namespace patch examples in help text. +``` \ No newline at end of file diff --git a/changelog/18145.txt b/changelog/18145.txt new file mode 100644 index 0000000..2e7172e --- /dev/null +++ b/changelog/18145.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/approle: Fix `token_bound_cidrs` validation when using /32 blocks for role and secret ID +``` diff --git a/changelog/18163.txt b/changelog/18163.txt new file mode 100644 index 0000000..f98c751 --- /dev/null +++ b/changelog/18163.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli/kv: skip formatting of nil secrets for patch and put with field parameter set +``` \ No newline at end of file diff --git a/changelog/18173.txt b/changelog/18173.txt new file mode 100644 index 0000000..04545ea --- /dev/null +++ b/changelog/18173.txt @@ -0,0 +1,3 @@ +```release-note:bug +plugins: Listing all plugins while audit logging is enabled will no longer result in an internal server error. +``` diff --git a/changelog/18184.txt b/changelog/18184.txt new file mode 100644 index 0000000..153131a --- /dev/null +++ b/changelog/18184.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Address nil panic when an empty POST request is sent to the OCSP handler +``` diff --git a/changelog/18186.txt b/changelog/18186.txt new file mode 100644 index 0000000..1371082 --- /dev/null +++ b/changelog/18186.txt @@ -0,0 +1,6 @@ +```release-note:breaking-change +secrets/pki: Maintaining running count of certificates will be turned off by default. +To re-enable keeping these metrics available on the tidy status endpoint, enable +maintain_stored_certificate_counts on tidy-config, to also publish them to the +metrics consumer, enable publish_stored_certificate_count_metrics . +``` \ No newline at end of file diff --git a/changelog/18189.txt b/changelog/18189.txt new file mode 100644 index 0000000..5033404 --- /dev/null +++ b/changelog/18189.txt @@ -0,0 +1,3 @@ +```release-note:bug +plugins: Skip loading but still mount data associated with missing plugins on unseal. +``` diff --git a/changelog/18192.txt b/changelog/18192.txt new file mode 100644 index 0000000..56e377e --- /dev/null +++ b/changelog/18192.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: Add logic to generate openapi response structures +``` diff --git a/changelog/18198.txt b/changelog/18198.txt new file mode 100644 index 0000000..ed03e74 --- /dev/null +++ b/changelog/18198.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: Add openapi response definitions to approle/path_role.go +``` diff --git a/changelog/18199.txt b/changelog/18199.txt new file mode 100644 index 0000000..706a556 --- /dev/null +++ b/changelog/18199.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Allow templating performance replication cluster- and issuer-specific AIA URLs. +``` diff --git a/changelog/18210.txt b/changelog/18210.txt new file mode 100644 index 0000000..bd1c70b --- /dev/null +++ b/changelog/18210.txt @@ -0,0 +1,3 @@ +```release-note:bug +sdk: Don't panic if system view or storage methods called during plugin setup. +``` diff --git a/changelog/18222.txt b/changelog/18222.txt new file mode 100644 index 0000000..a7f822a --- /dev/null +++ b/changelog/18222.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Respond with written data to `config/auto-tidy`, `config/crl`, and `roles/:role`. +``` diff --git a/changelog/18225.txt b/changelog/18225.txt new file mode 100644 index 0000000..567c3c7 --- /dev/null +++ b/changelog/18225.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/ldap: allow providing the LDAP password via an env var when authenticating via the CLI +``` diff --git a/changelog/18227.txt b/changelog/18227.txt new file mode 100644 index 0000000..4779159 --- /dev/null +++ b/changelog/18227.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Server UDS Listener**: Adding listener to Vault server to serve http request via unix domain socket +``` \ No newline at end of file diff --git a/changelog/18228.txt b/changelog/18228.txt new file mode 100644 index 0000000..4f1b6d1 --- /dev/null +++ b/changelog/18228.txt @@ -0,0 +1,3 @@ +```release-note:improvement +hcp/connectivity: Add foundational OSS support for opt-in secure communication between self-managed Vault nodes and [HashiCorp Cloud Platform](https://cloud.hashicorp.com) +``` diff --git a/changelog/18230.txt b/changelog/18230.txt new file mode 100644 index 0000000..335f967 --- /dev/null +++ b/changelog/18230.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/ldap: allow configuration of alias dereferencing in LDAP search +``` diff --git a/changelog/18243.txt b/changelog/18243.txt new file mode 100644 index 0000000..f187579 --- /dev/null +++ b/changelog/18243.txt @@ -0,0 +1,4 @@ +```release-note:improvement +secrets/transit: Add an optional reference field to batch operation items +which is repeated on batch responses to help more easily correlate inputs with outputs. +``` \ No newline at end of file diff --git a/changelog/18244.txt b/changelog/18244.txt new file mode 100644 index 0000000..b81de03 --- /dev/null +++ b/changelog/18244.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: parallelize backend initialization to improve startup time for large numbers of mounts. +``` \ No newline at end of file diff --git a/changelog/18263.txt b/changelog/18263.txt new file mode 100644 index 0000000..1cdad85 --- /dev/null +++ b/changelog/18263.txt @@ -0,0 +1,5 @@ +```release-note:bug +storage/raft (enterprise): An already joined node can rejoin by wiping storage +and re-issueing a join request, but in doing so could transiently become a +non-voter. In some scenarios this resulted in loss of quorum. +``` diff --git a/changelog/18272.txt b/changelog/18272.txt new file mode 100644 index 0000000..168913b --- /dev/null +++ b/changelog/18272.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/transit: Allow configuring whether upsert of keys is allowed. +``` diff --git a/changelog/18273.txt b/changelog/18273.txt new file mode 100644 index 0000000..46033c7 --- /dev/null +++ b/changelog/18273.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/quotas: Fix issue with improper application of default rate limit quota exempt paths +``` \ No newline at end of file diff --git a/changelog/18279.txt b/changelog/18279.txt new file mode 100644 index 0000000..9823a1f --- /dev/null +++ b/changelog/18279.txt @@ -0,0 +1,4 @@ +```release-note:improvement +core: Added sys/lockedusers/[mount_accessor]/unlock/[alias_identifier] endpoint to unlock an user +with given mount_accessor and alias_identifier if locked +``` \ No newline at end of file diff --git a/changelog/18299.txt b/changelog/18299.txt new file mode 100644 index 0000000..b340b95 --- /dev/null +++ b/changelog/18299.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/transit: Do not warn about unrecognized parameter 'batch_input' +``` diff --git a/changelog/18302.txt b/changelog/18302.txt new file mode 100644 index 0000000..1f4b69d --- /dev/null +++ b/changelog/18302.txt @@ -0,0 +1,3 @@ +```release-note:improvement +hcp/status: Expand node-level status information +``` diff --git a/changelog/18310.txt b/changelog/18310.txt new file mode 100644 index 0000000..5af54ed --- /dev/null +++ b/changelog/18310.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/transit: Honor `partial_success_response_code` on decryption failures. +``` \ No newline at end of file diff --git a/changelog/18315.txt b/changelog/18315.txt new file mode 100644 index 0000000..dc69847 --- /dev/null +++ b/changelog/18315.txt @@ -0,0 +1,3 @@ +```release-note:improvement +hcp/connectivity: Include HCP organization, project, and resource ID in server startup logs +``` diff --git a/changelog/18342.txt b/changelog/18342.txt new file mode 100644 index 0000000..f2978bf --- /dev/null +++ b/changelog/18342.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Prepends "passcode=" if not provided in user input for duo totp mfa method authentication +``` \ No newline at end of file diff --git a/changelog/18350.txt b/changelog/18350.txt new file mode 100644 index 0000000..efe65ee --- /dev/null +++ b/changelog/18350.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Update language on database role to "Connection name" [[GH-18261](https://github.com/hashicorp/vault/issues/18261)] +``` diff --git a/changelog/18351.txt b/changelog/18351.txt new file mode 100644 index 0000000..07faa06 --- /dev/null +++ b/changelog/18351.txt @@ -0,0 +1,3 @@ +```release-note:improvement +hcp/status: Add cluster-level status information +``` diff --git a/changelog/18374.txt b/changelog/18374.txt new file mode 100644 index 0000000..eaa5798 --- /dev/null +++ b/changelog/18374.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: update DocLink component to use new host url: developer.hashicorp.com +``` diff --git a/changelog/18376.txt b/changelog/18376.txt new file mode 100644 index 0000000..1edc3df --- /dev/null +++ b/changelog/18376.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: Add openapi response definitions to pki/config_*.go +``` diff --git a/changelog/18397.txt b/changelog/18397.txt new file mode 100644 index 0000000..aafb9d7 --- /dev/null +++ b/changelog/18397.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Allow UserID Field (https://www.rfc-editor.org/rfc/rfc1274#section-9.3.1) to be set on Certificates when +allowed by role``` diff --git a/changelog/18401.txt b/changelog/18401.txt new file mode 100644 index 0000000..8f1c148 --- /dev/null +++ b/changelog/18401.txt @@ -0,0 +1,3 @@ +```release-note:bug +expiration: Prevent panics on perf standbys when an irrevocable lease gets deleted. +``` diff --git a/changelog/18403.txt b/changelog/18403.txt new file mode 100644 index 0000000..458f6c9 --- /dev/null +++ b/changelog/18403.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent/config: Allow config directories to be specified with -config, and allow multiple -configs to be supplied. +``` diff --git a/changelog/18437.txt b/changelog/18437.txt new file mode 100644 index 0000000..9ca8a8d --- /dev/null +++ b/changelog/18437.txt @@ -0,0 +1,3 @@ +```release-note:improvement +client/pki: Add a new command verify-sign which checks the relationship between two certificates. +``` diff --git a/changelog/18452.txt b/changelog/18452.txt new file mode 100644 index 0000000..6d45666 --- /dev/null +++ b/changelog/18452.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/activity: de-duplicate namespaces when historical and current month data are mixed +``` diff --git a/changelog/18456.txt b/changelog/18456.txt new file mode 100644 index 0000000..ee29750 --- /dev/null +++ b/changelog/18456.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: add openapi response defintions to /sys/audit endpoints +``` \ No newline at end of file diff --git a/changelog/18463.txt b/changelog/18463.txt new file mode 100644 index 0000000..538f66e --- /dev/null +++ b/changelog/18463.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cli/pki: Add List-Intermediates functionality to pki client. +``` diff --git a/changelog/18465.txt b/changelog/18465.txt new file mode 100644 index 0000000..928da99 --- /dev/null +++ b/changelog/18465.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: add openapi response defintions to /sys/auth endpoints +``` \ No newline at end of file diff --git a/changelog/18466.txt b/changelog/18466.txt new file mode 100644 index 0000000..220e058 --- /dev/null +++ b/changelog/18466.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Allow patching issuer to set an empty issuer name. +``` diff --git a/changelog/18467.txt b/changelog/18467.txt new file mode 100644 index 0000000..55a85a6 --- /dev/null +++ b/changelog/18467.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cli/pki: Add pki issue command, which creates a CSR, has a vault mount sign it, then reimports it. +``` \ No newline at end of file diff --git a/changelog/18468.txt b/changelog/18468.txt new file mode 100644 index 0000000..362bf05 --- /dev/null +++ b/changelog/18468.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: add openapi response defintions to /sys/capabilities endpoints +``` \ No newline at end of file diff --git a/changelog/18472.txt b/changelog/18472.txt new file mode 100644 index 0000000..e34d53a --- /dev/null +++ b/changelog/18472.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: add openapi response defintions to /sys/config and /sys/generate-root endpoints +``` \ No newline at end of file diff --git a/changelog/18482.txt b/changelog/18482.txt new file mode 100644 index 0000000..f51abb6 --- /dev/null +++ b/changelog/18482.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Return issuer_id and issuer_name on /issuer/:issuer_ref/json endpoint. +``` diff --git a/changelog/18499.txt b/changelog/18499.txt new file mode 100644 index 0000000..b329ed0 --- /dev/null +++ b/changelog/18499.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cli/pki: Added "Reissue" command which allows extracting fields from an existing certificate to create a new certificate. +``` \ No newline at end of file diff --git a/changelog/18515.txt b/changelog/18515.txt new file mode 100644 index 0000000..86eb71b --- /dev/null +++ b/changelog/18515.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: Add openapi response definitions to vault/logical_system_paths.go defined endpoints. +``` diff --git a/changelog/18521.txt b/changelog/18521.txt new file mode 100644 index 0000000..4111aea --- /dev/null +++ b/changelog/18521.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: wait for wanted message event during OIDC callback instead of using the first message event +``` diff --git a/changelog/18542.txt b/changelog/18542.txt new file mode 100644 index 0000000..ff46740 --- /dev/null +++ b/changelog/18542.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: add openapi response definitions to /sys/internal endpoints +``` diff --git a/changelog/18546.txt b/changelog/18546.txt new file mode 100644 index 0000000..48bc906 --- /dev/null +++ b/changelog/18546.txt @@ -0,0 +1,3 @@ +```release-note:bug +database/mongodb: Fix writeConcern set to be applied to any query made on the database +``` \ No newline at end of file diff --git a/changelog/18554.txt b/changelog/18554.txt new file mode 100644 index 0000000..68d1d84 --- /dev/null +++ b/changelog/18554.txt @@ -0,0 +1,3 @@ +```release-note:bug +openapi: Fix many incorrect details in generated API spec, by using better techniques to parse path regexps +``` diff --git a/changelog/18556.txt b/changelog/18556.txt new file mode 100644 index 0000000..a48dacd --- /dev/null +++ b/changelog/18556.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/token: Fix parsing of `auth/token/create` fields to avoid incorrect warnings about ignored parameters +``` diff --git a/changelog/18568.txt b/changelog/18568.txt new file mode 100644 index 0000000..a1fbabf --- /dev/null +++ b/changelog/18568.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fix spurious `permission denied` for all HelpOperations on sudo-protected paths +``` diff --git a/changelog/18571.txt b/changelog/18571.txt new file mode 100644 index 0000000..dd811d9 --- /dev/null +++ b/changelog/18571.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/token, sys: Fix path-help being unavailable for some list-only endpoints +``` diff --git a/changelog/18585.txt b/changelog/18585.txt new file mode 100644 index 0000000..a0832e2 --- /dev/null +++ b/changelog/18585.txt @@ -0,0 +1,3 @@ +```release-note:improvement +hcp/connectivity: Only update SCADA session metadata if status changes +``` diff --git a/changelog/18587.txt b/changelog/18587.txt new file mode 100644 index 0000000..7471d9a --- /dev/null +++ b/changelog/18587.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/kubernetes: Add /check endpoint to determine if environment variables are set [[GH-18](https://github.com/hashicorp/vault-plugin-secrets-kubernetes/pull/18)] +``` diff --git a/changelog/18589.txt b/changelog/18589.txt new file mode 100644 index 0000000..2e1ef48 --- /dev/null +++ b/changelog/18589.txt @@ -0,0 +1,3 @@ +```release-note:improvement +vault/diagnose: Upgrade `go.opentelemetry.io/otel`, `go.opentelemetry.io/otel/sdk`, `go.opentelemetry.io/otel/trace` to v1.11.2 +``` \ No newline at end of file diff --git a/changelog/18598.txt b/changelog/18598.txt new file mode 100644 index 0000000..62d13d0 --- /dev/null +++ b/changelog/18598.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/activity: include mount counts when de-duplicating current and historical month data +``` diff --git a/changelog/18604.txt b/changelog/18604.txt new file mode 100644 index 0000000..7645cbb --- /dev/null +++ b/changelog/18604.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: add `detect_deadlocks` config to optionally detect core state deadlocks +``` \ No newline at end of file diff --git a/changelog/18610.txt b/changelog/18610.txt new file mode 100644 index 0000000..bac3add --- /dev/null +++ b/changelog/18610.txt @@ -0,0 +1,4 @@ +```release-note:improvement +auth: Allow naming login MFA methods and using those names instead of IDs in satisfying MFA requirement for requests. +Make passcode arguments consistent across login MFA method types. +``` \ No newline at end of file diff --git a/changelog/18624.txt b/changelog/18624.txt new file mode 100644 index 0000000..91209bb --- /dev/null +++ b/changelog/18624.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: add openapi response definitions to /sys/rotate endpoints +``` diff --git a/changelog/18625.txt b/changelog/18625.txt new file mode 100644 index 0000000..526d6b6 --- /dev/null +++ b/changelog/18625.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: add openapi response definitions to /sys/seal endpoints +``` \ No newline at end of file diff --git a/changelog/18626.txt b/changelog/18626.txt new file mode 100644 index 0000000..6bb2ba0 --- /dev/null +++ b/changelog/18626.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: add openapi response definitions to /sys/tool endpoints +``` \ No newline at end of file diff --git a/changelog/18627.txt b/changelog/18627.txt new file mode 100644 index 0000000..e2a4dfb --- /dev/null +++ b/changelog/18627.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: add openapi response definitions to /sys/wrapping endpoints +``` \ No newline at end of file diff --git a/changelog/18628.txt b/changelog/18628.txt new file mode 100644 index 0000000..0722856 --- /dev/null +++ b/changelog/18628.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: add openapi response definitions to /sys/version-history, /sys/leader, /sys/ha-status, /sys/host-info, /sys/in-flight-req +``` \ No newline at end of file diff --git a/changelog/18632.txt b/changelog/18632.txt new file mode 100644 index 0000000..5359613 --- /dev/null +++ b/changelog/18632.txt @@ -0,0 +1,3 @@ +```release-note:improvement +database/postgres: Support multiline strings for revocation statements. +``` diff --git a/changelog/18633.txt b/changelog/18633.txt new file mode 100644 index 0000000..2048c46 --- /dev/null +++ b/changelog/18633.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: Add openapi response definitions to /sys defined endpoints. +``` \ No newline at end of file diff --git a/changelog/18635.txt b/changelog/18635.txt new file mode 100644 index 0000000..43f3fdf --- /dev/null +++ b/changelog/18635.txt @@ -0,0 +1,3 @@ +```release-note:improvement +sdk: Add response schema validation method framework/FieldData.ValidateStrict and two test helpers (ValidateResponse, ValidateResponseData) +``` diff --git a/changelog/18636.txt b/changelog/18636.txt new file mode 100644 index 0000000..9f260e2 --- /dev/null +++ b/changelog/18636.txt @@ -0,0 +1,3 @@ +```release-note:improvement +sdk: Adding FindResponseSchema test helper to assist with response schema validation in tests +``` diff --git a/changelog/18638.txt b/changelog/18638.txt new file mode 100644 index 0000000..727c85a --- /dev/null +++ b/changelog/18638.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: allows some parts of config to be reloaded without requiring a restart. +``` \ No newline at end of file diff --git a/changelog/18645.txt b/changelog/18645.txt new file mode 100644 index 0000000..0122111 --- /dev/null +++ b/changelog/18645.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Allow tidying of the legacy ca_bundle, improving startup on post-migrated, seal-wrapped PKI mounts. +``` diff --git a/changelog/18651.txt b/changelog/18651.txt new file mode 100644 index 0000000..9fc7ff8 --- /dev/null +++ b/changelog/18651.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: cleanup unsaved auth method ember data record when navigating away from mount backend form +``` \ No newline at end of file diff --git a/changelog/18663.txt b/changelog/18663.txt new file mode 100644 index 0000000..941b271 --- /dev/null +++ b/changelog/18663.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: generic_mount_paths: Move implementation fully into server, rather than partially in plugin framework; recognize all 4 singleton mounts (auth/token, cubbyhole, identity, system) rather than just 2; change parameter from `{mountPath}` to `{_mount_path}` +``` diff --git a/changelog/18673.txt b/changelog/18673.txt new file mode 100644 index 0000000..73a2a8f --- /dev/null +++ b/changelog/18673.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Implemented background thread to update locked user entries every 15 minutes to prevent brute forcing in auth methods. +``` \ No newline at end of file diff --git a/changelog/18675.txt b/changelog/18675.txt new file mode 100644 index 0000000..90a8ed6 --- /dev/null +++ b/changelog/18675.txt @@ -0,0 +1,4 @@ +```release-note:improvement +core: Added sys/locked-users endpoint to list locked users. Changed api endpoint from +sys/lockedusers/[mount_accessor]/unlock/[alias_identifier] to sys/locked-users/[mount_accessor]/unlock/[alias_identifier]. +``` \ No newline at end of file diff --git a/changelog/18682.txt b/changelog/18682.txt new file mode 100644 index 0000000..9042109 --- /dev/null +++ b/changelog/18682.txt @@ -0,0 +1,4 @@ +```release-note:improvement +core: Add experiments system and `events.alpha1` experiment. +``` + diff --git a/changelog/18684.txt b/changelog/18684.txt new file mode 100644 index 0000000..803c7cc --- /dev/null +++ b/changelog/18684.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: Add note in logs when starting Vault Agent indicating if the version differs to the Vault Server. +``` diff --git a/changelog/18704.txt b/changelog/18704.txt new file mode 100644 index 0000000..bc76db9 --- /dev/null +++ b/changelog/18704.txt @@ -0,0 +1,3 @@ +```release-note:bug +storage/raft: Fix race with follower heartbeat tracker during teardown. +``` \ No newline at end of file diff --git a/changelog/18708.txt b/changelog/18708.txt new file mode 100644 index 0000000..1db2ba6 --- /dev/null +++ b/changelog/18708.txt @@ -0,0 +1,3 @@ +```release-note:bug +api: Remove timeout logic from ReadRaw functions and add ReadRawWithContext +``` diff --git a/changelog/18716.txt b/changelog/18716.txt new file mode 100644 index 0000000..e3fa257 --- /dev/null +++ b/changelog/18716.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/kubernetes: Ensure a consistent TLS configuration for all k8s API requests [[#173](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/173)] +``` diff --git a/changelog/18718.txt b/changelog/18718.txt new file mode 100644 index 0000000..a5b9b13 --- /dev/null +++ b/changelog/18718.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Add vault.core.locked_users telemetry metric to emit information about total number of locked users. +``` \ No newline at end of file diff --git a/changelog/18729.txt b/changelog/18729.txt new file mode 100644 index 0000000..975d027 --- /dev/null +++ b/changelog/18729.txt @@ -0,0 +1,3 @@ +```release-note:bug +sdk/backend: prevent panic when computing the zero value for a `TypeInt64` schema field. +``` \ No newline at end of file diff --git a/changelog/18740.txt b/changelog/18740.txt new file mode 100644 index 0000000..f493995 --- /dev/null +++ b/changelog/18740.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: Added `token_file` auto-auth configuration to allow using a pre-existing token for Vault Agent. +``` diff --git a/changelog/18743.txt b/changelog/18743.txt new file mode 100644 index 0000000..7cdfb79 --- /dev/null +++ b/changelog/18743.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes query parameters not passed in api explorer test requests +``` \ No newline at end of file diff --git a/changelog/18752.txt b/changelog/18752.txt new file mode 100644 index 0000000..95346e0 --- /dev/null +++ b/changelog/18752.txt @@ -0,0 +1,3 @@ +```release-note:improvement +**Redis ElastiCache DB Engine**: Renamed configuration parameters for disambiguation; old parameters still supported for compatibility. +``` \ No newline at end of file diff --git a/changelog/18766.txt b/changelog/18766.txt new file mode 100644 index 0000000..50743b3 --- /dev/null +++ b/changelog/18766.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/activity: add namespace breakdown for new clients when date range spans multiple months, including the current month. +``` diff --git a/changelog/18772.txt b/changelog/18772.txt new file mode 100644 index 0000000..55c0696 --- /dev/null +++ b/changelog/18772.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: Add openapi response definitions to approle/path_login.go & approle/path_tidy_user_id.go +``` diff --git a/changelog/18787.txt b/changelog/18787.txt new file mode 100644 index 0000000..e865125 --- /dev/null +++ b/changelog/18787.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Added JWT authentication warning message about blocked pop-up windows and web browser settings. +``` diff --git a/changelog/18799.txt b/changelog/18799.txt new file mode 100644 index 0000000..1d71593 --- /dev/null +++ b/changelog/18799.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/db/mysql: Add `tls_server_name` and `tls_skip_verify` parameters +``` diff --git a/changelog/18808.txt b/changelog/18808.txt new file mode 100644 index 0000000..12c80e6 --- /dev/null +++ b/changelog/18808.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes keymgmt key details page +`` \ No newline at end of file diff --git a/changelog/18809.txt b/changelog/18809.txt new file mode 100644 index 0000000..a1ec06f --- /dev/null +++ b/changelog/18809.txt @@ -0,0 +1,3 @@ +```release-note:bug +activity (enterprise): Fix misattribution of entities to no or child namespace auth methods +``` diff --git a/changelog/18811.txt b/changelog/18811.txt new file mode 100644 index 0000000..34a155d --- /dev/null +++ b/changelog/18811.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth: Provide an IP address of the requests from Vault to a Duo challenge after successful authentication. +``` diff --git a/changelog/18817.txt b/changelog/18817.txt new file mode 100644 index 0000000..17c93aa --- /dev/null +++ b/changelog/18817.txt @@ -0,0 +1,3 @@ +```release-note:improvement +migration: allow parallelization of key migration for `vault operator migrate` in order to speed up a migration. +``` \ No newline at end of file diff --git a/changelog/18842.txt b/changelog/18842.txt new file mode 100644 index 0000000..9a69ff6 --- /dev/null +++ b/changelog/18842.txt @@ -0,0 +1,3 @@ +```release-note:feature +**New PKI UI**: Add beta support for new and improved PKI UI +``` \ No newline at end of file diff --git a/changelog/18859.txt b/changelog/18859.txt new file mode 100644 index 0000000..0ee2c36 --- /dev/null +++ b/changelog/18859.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/auth: Return a 403 instead of a 500 for wrapping requests when token is not provided +``` diff --git a/changelog/18863.txt b/changelog/18863.txt new file mode 100644 index 0000000..c1f2800 --- /dev/null +++ b/changelog/18863.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: JWT auto-auth has a new config option, `remove_jwt_follows_symlinks` (default: false), that, if set to true will now remove the JWT, instead of the symlink to the JWT, if a symlink to a JWT has been provided in the `path` option, and the `remove_jwt_after_reading` config option is set to true (default). +``` \ No newline at end of file diff --git a/changelog/18870.txt b/changelog/18870.txt new file mode 100644 index 0000000..1b69489 --- /dev/null +++ b/changelog/18870.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core:provide more descriptive error message when calling enterprise feature paths in open-source +``` \ No newline at end of file diff --git a/changelog/18874.txt b/changelog/18874.txt new file mode 100644 index 0000000..7483c43 --- /dev/null +++ b/changelog/18874.txt @@ -0,0 +1,3 @@ +```release-note:security +secrets/ssh: removal of the deprecated dynamic keys mode. **When any remaining dynamic key leases expire**, an error stating `secret is unsupported by this backend` will be thrown by the lease manager. +``` diff --git a/changelog/18885.txt b/changelog/18885.txt new file mode 100644 index 0000000..99878c8 --- /dev/null +++ b/changelog/18885.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +auth/cert: Load config, crls from InitializeFunc to allow parallel processing. +``` diff --git a/changelog/18887.txt b/changelog/18887.txt new file mode 100644 index 0000000..55e8600 --- /dev/null +++ b/changelog/18887.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cli: Add transit import key helper commands for BYOK to Transit/Transform. +``` \ No newline at end of file diff --git a/changelog/18890.txt b/changelog/18890.txt new file mode 100644 index 0000000..056e585 --- /dev/null +++ b/changelog/18890.txt @@ -0,0 +1,4 @@ +```release-note:bug +core: removes strings.ToLower for alias name from pathLoginAliasLookahead function in userpass. This fixes +the storage entry for locked users by having the correct alias name in path. +`` \ No newline at end of file diff --git a/changelog/18892.txt b/changelog/18892.txt new file mode 100644 index 0000000..65b6ebf --- /dev/null +++ b/changelog/18892.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cli: updated `vault operator rekey` prompts to describe recovery keys when `-target=recovery` +``` diff --git a/changelog/18899.txt b/changelog/18899.txt new file mode 100644 index 0000000..92f2474 --- /dev/null +++ b/changelog/18899.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: fix race between tidy's cert counting and tidy status reporting. +``` diff --git a/changelog/18916.txt b/changelog/18916.txt new file mode 100644 index 0000000..eb2792b --- /dev/null +++ b/changelog/18916.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/activity: report mount paths (rather than mount accessors) in current month activity log counts and include deleted mount paths in precomputed queries. +``` diff --git a/changelog/18923.txt b/changelog/18923.txt new file mode 100644 index 0000000..2b4abae --- /dev/null +++ b/changelog/18923.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: prevent panic in login mfa enforcement delete after enforcement's namespace is deleted +``` diff --git a/changelog/18934.txt b/changelog/18934.txt new file mode 100644 index 0000000..e84f666 --- /dev/null +++ b/changelog/18934.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: Change gen_openapi.sh to generate schema with generic mount paths +``` diff --git a/changelog/18935.txt b/changelog/18935.txt new file mode 100644 index 0000000..c55cda1 --- /dev/null +++ b/changelog/18935.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: Add default values to thing_mount_path parameters +``` diff --git a/changelog/18938.txt b/changelog/18938.txt new file mode 100644 index 0000000..de937fc --- /dev/null +++ b/changelog/18938.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: OCSP GET request parameter was not being URL unescaped before processing. +``` diff --git a/changelog/18939.txt b/changelog/18939.txt new file mode 100644 index 0000000..aa7f8e7 --- /dev/null +++ b/changelog/18939.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/ssh: Allow removing SSH host keys from the dynamic keys feature. +``` diff --git a/changelog/18945.txt b/changelog/18945.txt new file mode 100644 index 0000000..a6f6a66 --- /dev/null +++ b/changelog/18945.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/cert: Address a race condition accessing the loaded crls without a lock +``` diff --git a/changelog/18951.txt b/changelog/18951.txt new file mode 100644 index 0000000..9617c0d --- /dev/null +++ b/changelog/18951.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Prevent panics in `sys/leases/lookup`, `sys/leases/revoke`, and `sys/leases/renew` endpoints if provided `lease_id` is null +``` diff --git a/changelog/18962.txt b/changelog/18962.txt new file mode 100644 index 0000000..322c347 --- /dev/null +++ b/changelog/18962.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: Remove dependency on sdk module. +``` diff --git a/changelog/18984.txt b/changelog/18984.txt new file mode 100644 index 0000000..4652bf2 --- /dev/null +++ b/changelog/18984.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: consistently use UTC for CA's notAfter exceeded error message +``` diff --git a/changelog/19002.txt b/changelog/19002.txt new file mode 100644 index 0000000..d1a1ff5 --- /dev/null +++ b/changelog/19002.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: Added `reload` option to cert auth configuration in case of external renewals of local x509 key-pairs. +``` \ No newline at end of file diff --git a/changelog/19005.txt b/changelog/19005.txt new file mode 100644 index 0000000..27e251e --- /dev/null +++ b/changelog/19005.txt @@ -0,0 +1,7 @@ +```release-note:change +auth/alicloud: require the `role` field on login +``` + +```release-note:bug +auth/alicloud: fix regression in vault login command that caused login to fail +``` diff --git a/changelog/19018.txt b/changelog/19018.txt new file mode 100644 index 0000000..bd79dbd --- /dev/null +++ b/changelog/19018.txt @@ -0,0 +1,7 @@ +```release-note:feature +**GCP Secrets Impersonated Account Support**: Add support for GCP service account impersonation, allowing callers to generate a GCP access token without requiring Vault to store or retrieve a GCP service account key for each role. +``` + +```release-note:bug +secrets/gcp: fix issue where IAM bindings were not preserved during policy update +``` diff --git a/changelog/19032.txt b/changelog/19032.txt new file mode 100644 index 0000000..a474c22 --- /dev/null +++ b/changelog/19032.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/ldap: Add max_page_size configurable to LDAP configuration +``` diff --git a/changelog/19036.txt b/changelog/19036.txt new file mode 100644 index 0000000..ebe62a7 --- /dev/null +++ b/changelog/19036.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes logout route wrapped_token bug +`` \ No newline at end of file diff --git a/changelog/19037.txt b/changelog/19037.txt new file mode 100644 index 0000000..2ccd656 --- /dev/null +++ b/changelog/19037.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Revert fix for PR [18938](https://github.com/hashicorp/vault/pull/18938) +``` diff --git a/changelog/19043.txt b/changelog/19043.txt new file mode 100644 index 0000000..20a1a77 --- /dev/null +++ b/changelog/19043.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: added ability to validate response structures against openapi schema for test clusters +``` \ No newline at end of file diff --git a/changelog/19044.txt b/changelog/19044.txt new file mode 100644 index 0000000..7926bb6 --- /dev/null +++ b/changelog/19044.txt @@ -0,0 +1,3 @@ +```release-note:improvement +database/redis-elasticache: changed config argument names for disambiguation +``` \ No newline at end of file diff --git a/changelog/19056.txt b/changelog/19056.txt new file mode 100644 index 0000000..b5b1ae3 --- /dev/null +++ b/changelog/19056.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/kv: make upgrade synchronous when no keys to upgrade +``` \ No newline at end of file diff --git a/changelog/19061.txt b/changelog/19061.txt new file mode 100644 index 0000000..ddf7943 --- /dev/null +++ b/changelog/19061.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/ad: Fix bug where updates to config would fail if password isn't provided +``` diff --git a/changelog/19063.txt b/changelog/19063.txt new file mode 100644 index 0000000..df36111 --- /dev/null +++ b/changelog/19063.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/gcpkms: Updated plugin from v0.13.0 to v0.14.0 +``` diff --git a/changelog/19068.txt b/changelog/19068.txt new file mode 100644 index 0000000..6edb29f --- /dev/null +++ b/changelog/19068.txt @@ -0,0 +1,3 @@ +```release-note:change +sdk: Remove version package, make useragent.String versionless. +``` diff --git a/changelog/19071.txt b/changelog/19071.txt new file mode 100644 index 0000000..ca988db --- /dev/null +++ b/changelog/19071.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix bug where logging in via OIDC fails if browser is in fullscreen mode +``` diff --git a/changelog/19076.txt b/changelog/19076.txt new file mode 100644 index 0000000..c206f44 --- /dev/null +++ b/changelog/19076.txt @@ -0,0 +1,6 @@ +```release-note:improvement +auth/oidc: Adds ability to set Google Workspace domain for groups search +``` +```release-note:improvement +auth/oidc: Adds `abort_on_error` parameter to CLI login command to help in non-interactive contexts +``` \ No newline at end of file diff --git a/changelog/19077.txt b/changelog/19077.txt new file mode 100644 index 0000000..604cea5 --- /dev/null +++ b/changelog/19077.txt @@ -0,0 +1,11 @@ +```release-note:feature +**Azure Auth Rotate Root**: Add support for rotate root in Azure Auth engine +``` + +```release-note:feature +**Azure Auth Managed Identities**: Allow any Azure resource that supports managed identities to authenticate with Vault +``` + +```release-note:feature +**VMSS Flex Authentication**: Adds support for Virtual Machine Scale Set Flex Authentication +``` \ No newline at end of file diff --git a/changelog/19084.txt b/changelog/19084.txt new file mode 100644 index 0000000..97896d3 --- /dev/null +++ b/changelog/19084.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/kubernetes: add /check endpoint to determine if environment variables are set +``` diff --git a/changelog/19094.txt b/changelog/19094.txt new file mode 100644 index 0000000..d3d872d --- /dev/null +++ b/changelog/19094.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/kubernetes: fixes and dep updates for the auth-kubernetes plugin (see plugin changelog for details) +``` diff --git a/changelog/19096.txt b/changelog/19096.txt new file mode 100644 index 0000000..2cb0bbf --- /dev/null +++ b/changelog/19096.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/azure: Adds ability to persist an application for the lifetime of a role. +``` \ No newline at end of file diff --git a/changelog/19098.txt b/changelog/19098.txt new file mode 100644 index 0000000..df0f9c1 --- /dev/null +++ b/changelog/19098.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/cf: Remove incorrect usage of CreateOperation from path_config +``` diff --git a/changelog/19100.txt b/changelog/19100.txt new file mode 100644 index 0000000..a2f1b72 --- /dev/null +++ b/changelog/19100.txt @@ -0,0 +1,4 @@ +```release-note:improvement +Bump github.com/hashicorp/go-plugin version from 1.4.5 to 1.4.8 +``` + diff --git a/changelog/19103.txt b/changelog/19103.txt new file mode 100644 index 0000000..868db62 --- /dev/null +++ b/changelog/19103.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/database: Adds error message requiring password on root crednetial rotation. +``` \ No newline at end of file diff --git a/changelog/19111.txt b/changelog/19111.txt new file mode 100644 index 0000000..35b7803 --- /dev/null +++ b/changelog/19111.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/mongodb-atlas: Fix a bug that did not allow WAL rollback to handle partial failures when creating API keys +``` diff --git a/changelog/19116.txt b/changelog/19116.txt new file mode 100644 index 0000000..5dfcd9e --- /dev/null +++ b/changelog/19116.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Allows license-banners to be dismissed. Saves preferences in localStorage. +``` \ No newline at end of file diff --git a/changelog/19135.txt b/changelog/19135.txt new file mode 100644 index 0000000..a3e085b --- /dev/null +++ b/changelog/19135.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui (enterprise): Fix cancel button from transform engine role creation page +``` diff --git a/changelog/19139.txt b/changelog/19139.txt new file mode 100644 index 0000000..75e9a78 --- /dev/null +++ b/changelog/19139.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes bug in kmip role form that caused `operation_all` to persist after deselecting all operation checkboxes +``` diff --git a/changelog/19145.txt b/changelog/19145.txt new file mode 100644 index 0000000..9cca8e8 --- /dev/null +++ b/changelog/19145.txt @@ -0,0 +1,4 @@ +```release-note:improvement +secrets/kv: Emit events on write if events system enabled +``` + diff --git a/changelog/19160.txt b/changelog/19160.txt new file mode 100644 index 0000000..66a3baa --- /dev/null +++ b/changelog/19160.txt @@ -0,0 +1,3 @@ +```release-note:bug +api: Addressed a couple of issues that arose as edge cases for the -output-policy flag. Specifically around properly handling list commands, distinguishing kv V1/V2, and correctly recognizing protected paths. +``` \ No newline at end of file diff --git a/changelog/19170.txt b/changelog/19170.txt new file mode 100644 index 0000000..9a421dd --- /dev/null +++ b/changelog/19170.txt @@ -0,0 +1,3 @@ +```release-note:improvement +website/docs: fix database static-user sample payload +``` diff --git a/changelog/19186.txt b/changelog/19186.txt new file mode 100644 index 0000000..cb3b59a --- /dev/null +++ b/changelog/19186.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/approle: Add nil check for the secret ID entry when deleting via secret id accessor preventing cross role secret id deletion +``` diff --git a/changelog/19187.txt b/changelog/19187.txt new file mode 100644 index 0000000..c04234a --- /dev/null +++ b/changelog/19187.txt @@ -0,0 +1,3 @@ +```release-note:improvement +website/docs: Add rotate root documentation for azure secrets engine +``` diff --git a/changelog/19190.txt b/changelog/19190.txt new file mode 100644 index 0000000..480006b --- /dev/null +++ b/changelog/19190.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: show Get credentials button for static roles detail page when a user has the proper permissions. +``` diff --git a/changelog/19194.txt b/changelog/19194.txt new file mode 100644 index 0000000..b2a5ff3 --- /dev/null +++ b/changelog/19194.txt @@ -0,0 +1,4 @@ +```release-note:feature +**Event System (Alpha)**: Vault has a new opt-in experimental event system. Not yet suitable for production use. Events are currently only generated on writes to the KV secrets engine, but external plugins can also be updated to start generating events. +``` + diff --git a/changelog/19196.txt b/changelog/19196.txt new file mode 100644 index 0000000..aab2638 --- /dev/null +++ b/changelog/19196.txt @@ -0,0 +1,5 @@ +```release-note:feature +**PKI Cross-Cluster Revocations**: Revocation information can now be +synchronized across primary and performance replica clusters offering +a unified CRL/OCSP view of revocations across cluster boundaries. +``` diff --git a/changelog/19215.txt b/changelog/19215.txt new file mode 100644 index 0000000..33fea94 --- /dev/null +++ b/changelog/19215.txt @@ -0,0 +1,5 @@ +```release-note:feature +**Secrets/Auth Plugin Multiplexing**: The plugin will be multiplexed when run +as an external plugin by vault versions that support secrets/auth plugin +multiplexing (> 1.12) +``` diff --git a/changelog/19216.txt b/changelog/19216.txt new file mode 100644 index 0000000..e03e866 --- /dev/null +++ b/changelog/19216.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: adds allowed_response_headers as param for secret engine mount config +``` diff --git a/changelog/19220.txt b/changelog/19220.txt new file mode 100644 index 0000000..cbfe7e5 --- /dev/null +++ b/changelog/19220.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: remove wizard +``` diff --git a/changelog/19244.txt b/changelog/19244.txt new file mode 100644 index 0000000..63a663e --- /dev/null +++ b/changelog/19244.txt @@ -0,0 +1,4 @@ +```release-note:improvement +auth/github: Allow for an optional Github auth token environment variable to make authenticated requests when fetching org id +website/docs: Add docs for `VAULT_AUTH_CONFIG_GITHUB_TOKEN` environment variable when writing Github config +``` diff --git a/changelog/19247.txt b/changelog/19247.txt new file mode 100644 index 0000000..f51e847 --- /dev/null +++ b/changelog/19247.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/oidc: Adds support for group membership parsing when using IBM ISAM as an OIDC provider. +``` diff --git a/changelog/19252.txt b/changelog/19252.txt new file mode 100644 index 0000000..9912135 --- /dev/null +++ b/changelog/19252.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: Consistently stop Vault server on exit in gen_openapi.sh +``` diff --git a/changelog/19260.txt b/changelog/19260.txt new file mode 100644 index 0000000..77138a3 --- /dev/null +++ b/changelog/19260.txt @@ -0,0 +1,3 @@ +```release-note:feature +**agent/auto-auth:**: Add OCI (Oracle Cloud Infrastructure) auto-auth method +``` diff --git a/changelog/19265.txt b/changelog/19265.txt new file mode 100644 index 0000000..23d957e --- /dev/null +++ b/changelog/19265.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli/pki: Decode integer values properly in health-check configuration file +``` diff --git a/changelog/19269.txt b/changelog/19269.txt new file mode 100644 index 0000000..57ff207 --- /dev/null +++ b/changelog/19269.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cli/pki: Change the pki health-check --list default config output to JSON so it's a usable configuration file +``` diff --git a/changelog/19274.txt b/changelog/19274.txt new file mode 100644 index 0000000..a7f5d8c --- /dev/null +++ b/changelog/19274.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli/pki: Fix path for role health-check warning messages +``` diff --git a/changelog/19276.txt b/changelog/19276.txt new file mode 100644 index 0000000..3731994 --- /dev/null +++ b/changelog/19276.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli/pki: Properly report permission issues within health-check mount tune checks +``` diff --git a/changelog/19290.txt b/changelog/19290.txt new file mode 100644 index 0000000..1a45115 --- /dev/null +++ b/changelog/19290.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Remove `default` and add `default-service` and `default-batch` to UI token_type for auth mount and tuning. +``` diff --git a/changelog/19296.txt b/changelog/19296.txt new file mode 100644 index 0000000..1ef62a0 --- /dev/null +++ b/changelog/19296.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Sidebar Navigation in UI**: A new sidebar navigation panel has been added in the UI to replace the top navigation bar. +``` \ No newline at end of file diff --git a/changelog/19311.txt b/changelog/19311.txt new file mode 100644 index 0000000..5ad6e2c --- /dev/null +++ b/changelog/19311.txt @@ -0,0 +1,3 @@ +```release-note:bug +server/config: Use file.Stat when checking file permissions when VAULT_ENABLE_FILE_PERMISSIONS_CHECK is enabled +``` diff --git a/changelog/19319.txt b/changelog/19319.txt new file mode 100644 index 0000000..4702344 --- /dev/null +++ b/changelog/19319.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: Improve operationId/request/response naming strategy +``` diff --git a/changelog/19334.txt b/changelog/19334.txt new file mode 100644 index 0000000..7df6826 --- /dev/null +++ b/changelog/19334.txt @@ -0,0 +1,3 @@ +```release-note:deprecation +secrets/ad: Marks the Active Directory (AD) secrets engine as deprecated. +``` \ No newline at end of file diff --git a/changelog/19365.txt b/changelog/19365.txt new file mode 100644 index 0000000..774c750 --- /dev/null +++ b/changelog/19365.txt @@ -0,0 +1,7 @@ +```release-note: enhancement +auth/aws: Support request cancellation with AWS requests +``` + +```release-note: enhancement +secrets/aws: Support request cancellation with AWS requests +``` diff --git a/changelog/19373.txt b/changelog/19373.txt new file mode 100644 index 0000000..8775180 --- /dev/null +++ b/changelog/19373.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli/transit: Fix import, import-version command invocation +``` diff --git a/changelog/19378.txt b/changelog/19378.txt new file mode 100644 index 0000000..40a1e82 --- /dev/null +++ b/changelog/19378.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli/kv: add -mount flag to kv list +``` diff --git a/changelog/19416.txt b/changelog/19416.txt new file mode 100644 index 0000000..f2a7d32 --- /dev/null +++ b/changelog/19416.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/token: Fix cubbyhole and revocation for legacy service tokens +``` diff --git a/changelog/19428.txt b/changelog/19428.txt new file mode 100644 index 0000000..c1ae6d5 --- /dev/null +++ b/changelog/19428.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes crypto.randomUUID error in unsecure contexts from third party ember-data library +``` \ No newline at end of file diff --git a/changelog/19429.txt b/changelog/19429.txt new file mode 100644 index 0000000..341fbf5 --- /dev/null +++ b/changelog/19429.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: pass encodeBase64 param to HMAC transit-key-actions. +``` diff --git a/changelog/19448.txt b/changelog/19448.txt new file mode 100644 index 0000000..8c75b79 --- /dev/null +++ b/changelog/19448.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes SSH engine config deletion +``` diff --git a/changelog/19460.txt b/changelog/19460.txt new file mode 100644 index 0000000..6334c7f --- /dev/null +++ b/changelog/19460.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: use URLSearchParams interface to capture namespace param from SSOs (ex. ADFS) with decoded state param in callback url +``` diff --git a/changelog/19468.txt b/changelog/19468.txt new file mode 100644 index 0000000..5afce90 --- /dev/null +++ b/changelog/19468.txt @@ -0,0 +1,3 @@ +```release-note:bug +plugin/reload: Fix a possible data race with rollback manager and plugin reload +``` diff --git a/changelog/19472.txt b/changelog/19472.txt new file mode 100644 index 0000000..db9ec72 --- /dev/null +++ b/changelog/19472.txt @@ -0,0 +1,3 @@ +```release-note:improvement +autopilot: Update version to v0.2.0 to add better support for respecting min quorum +``` diff --git a/changelog/19483.txt b/changelog/19483.txt new file mode 100644 index 0000000..c7ba6f6 --- /dev/null +++ b/changelog/19483.txt @@ -0,0 +1,3 @@ +```release-note:bug +agent: Fix panic when SIGHUP is issued to Agent while it has a non-TLS listener. +``` diff --git a/changelog/19495.txt b/changelog/19495.txt new file mode 100644 index 0000000..dac2ca0 --- /dev/null +++ b/changelog/19495.txt @@ -0,0 +1,3 @@ +```release-note:bug +shamir: change mul and div implementations to be constant-time +``` \ No newline at end of file diff --git a/changelog/19519.txt b/changelog/19519.txt new file mode 100644 index 0000000..6756f62 --- /dev/null +++ b/changelog/19519.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/transit: Allow importing RSA-PSS OID (1.2.840.113549.1.1.10) private keys via BYOK. +``` diff --git a/changelog/19520.txt b/changelog/19520.txt new file mode 100644 index 0000000..726be2c --- /dev/null +++ b/changelog/19520.txt @@ -0,0 +1,3 @@ +```release-note:improvement +http: Support responding to HEAD operation from plugins +``` diff --git a/changelog/19541.txt b/changelog/19541.txt new file mode 100644 index 0000000..9bdecc3 --- /dev/null +++ b/changelog/19541.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes oidc tabs in auth form submitting with the root's default_role value after a namespace has been inputted +``` diff --git a/changelog/19545.txt b/changelog/19545.txt new file mode 100644 index 0000000..615742c --- /dev/null +++ b/changelog/19545.txt @@ -0,0 +1,3 @@ +```release-note:improvement +database/elasticsearch: Update error messages resulting from Elasticsearch API errors +``` \ No newline at end of file diff --git a/changelog/19585.txt b/changelog/19585.txt new file mode 100644 index 0000000..f68c0dc --- /dev/null +++ b/changelog/19585.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fixed issue with remounting mounts that have a non-trailing space in the 'to' or 'from' paths. +``` diff --git a/changelog/19591.txt b/changelog/19591.txt new file mode 100644 index 0000000..f15d397 --- /dev/null +++ b/changelog/19591.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: validate name identifiers in mssql physical storage backend prior use +``` diff --git a/changelog/19593.txt b/changelog/19593.txt new file mode 100644 index 0000000..8f17057 --- /dev/null +++ b/changelog/19593.txt @@ -0,0 +1,4 @@ +```release-note:improvement +events: Suppress log warnings triggered when events are sent but the events system is not enabled. +``` + diff --git a/changelog/19600.txt b/changelog/19600.txt new file mode 100644 index 0000000..f2c1f71 --- /dev/null +++ b/changelog/19600.txt @@ -0,0 +1,3 @@ +```release-note:bug +openapi: Fix logic for labeling unauthenticated/sudo paths. +``` diff --git a/changelog/19616.txt b/changelog/19616.txt new file mode 100644 index 0000000..3afcc60 --- /dev/null +++ b/changelog/19616.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/postgresql: Add configuration to scram-sha-256 encrypt passwords on Vault before sending them to PostgreSQL +``` \ No newline at end of file diff --git a/changelog/19624.txt b/changelog/19624.txt new file mode 100644 index 0000000..7bc2df6 --- /dev/null +++ b/changelog/19624.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Fix PKI revocation request forwarding from standby nodes due to an error wrapping bug +``` diff --git a/changelog/19640.txt b/changelog/19640.txt new file mode 100644 index 0000000..8dcf59b --- /dev/null +++ b/changelog/19640.txt @@ -0,0 +1,3 @@ +```release-note:bug + secrets/ldap: Invalidates WAL entry for static role if `password_policy` has changed. +``` diff --git a/changelog/19676.txt b/changelog/19676.txt new file mode 100644 index 0000000..090dc80 --- /dev/null +++ b/changelog/19676.txt @@ -0,0 +1,4 @@ +```release-note:improvement +core: Allow overriding gRPC connect timeout via VAULT_GRPC_MIN_CONNECT_TIMEOUT. This is an env var rather than a config setting because we don't expect this to ever be needed. It's being added as a last-ditch +option in case all else fails for some replication issues we may not have fully reproduced. +``` diff --git a/changelog/19703.txt b/changelog/19703.txt new file mode 100644 index 0000000..6bf8e5c --- /dev/null +++ b/changelog/19703.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes issue navigating back a level using the breadcrumb from secret metadata view +``` \ No newline at end of file diff --git a/changelog/19721.txt b/changelog/19721.txt new file mode 100644 index 0000000..9818a0f --- /dev/null +++ b/changelog/19721.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fix regression breaking non-raft clusters whose nodes share the same cluster_addr/api_addr. +``` \ No newline at end of file diff --git a/changelog/19776.txt b/changelog/19776.txt new file mode 100644 index 0000000..786cfd3 --- /dev/null +++ b/changelog/19776.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: Vault Agent now reports its name and version as part of the User-Agent header in all requests issued. +``` diff --git a/changelog/19791.txt b/changelog/19791.txt new file mode 100644 index 0000000..26722cd --- /dev/null +++ b/changelog/19791.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: add allowed_managed_keys field to secret engine mount options +``` diff --git a/changelog/19798.txt b/changelog/19798.txt new file mode 100644 index 0000000..4bae8b6 --- /dev/null +++ b/changelog/19798.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/terraform: upgrades dependencies +``` \ No newline at end of file diff --git a/changelog/19799.txt b/changelog/19799.txt new file mode 100644 index 0000000..aee76ca --- /dev/null +++ b/changelog/19799.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix bad link to namespace when namespace name includes `.` +``` \ No newline at end of file diff --git a/changelog/19814.txt b/changelog/19814.txt new file mode 100644 index 0000000..687527e --- /dev/null +++ b/changelog/19814.txt @@ -0,0 +1,3 @@ +```release-note:improvement +audit: add plugin metadata, including plugin name, type, version, sha256, and whether plugin is external, to audit logging +``` \ No newline at end of file diff --git a/changelog/19829.txt b/changelog/19829.txt new file mode 100644 index 0000000..e8472b2 --- /dev/null +++ b/changelog/19829.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/ad: upgrades dependencies +``` \ No newline at end of file diff --git a/changelog/19846.txt b/changelog/19846.txt new file mode 100644 index 0000000..269b117 --- /dev/null +++ b/changelog/19846.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/alicloud: upgrades dependencies +``` diff --git a/changelog/19861.txt b/changelog/19861.txt new file mode 100644 index 0000000..ee5bc70 --- /dev/null +++ b/changelog/19861.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/mongodbatlas: upgrades dependencies +``` \ No newline at end of file diff --git a/changelog/19862.txt b/changelog/19862.txt new file mode 100644 index 0000000..c1ce6d8 --- /dev/null +++ b/changelog/19862.txt @@ -0,0 +1,3 @@ +```release-note:improvement +build: Prefer GOBIN when set over GOPATH/bin when building the binary +``` diff --git a/changelog/19875.txt b/changelog/19875.txt new file mode 100644 index 0000000..1167e39 --- /dev/null +++ b/changelog/19875.txt @@ -0,0 +1,3 @@ +```release-note:bug +helper/random: Fix race condition in string generator helper +``` diff --git a/changelog/19878.txt b/changelog/19878.txt new file mode 100644 index 0000000..4135434 --- /dev/null +++ b/changelog/19878.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Remove the Bulma CSS framework. +``` \ No newline at end of file diff --git a/changelog/19891.txt b/changelog/19891.txt new file mode 100644 index 0000000..b030151 --- /dev/null +++ b/changelog/19891.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core (enterprise): add configuration for license reporting +``` \ No newline at end of file diff --git a/changelog/19901.txt b/changelog/19901.txt new file mode 100644 index 0000000..8e0bbbd --- /dev/null +++ b/changelog/19901.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Updates UI javascript dependencies +``` \ No newline at end of file diff --git a/changelog/19913.txt b/changelog/19913.txt new file mode 100644 index 0000000..eccdec6 --- /dev/null +++ b/changelog/19913.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Adds whitespace warning to secrets engine and auth method path inputs +``` \ No newline at end of file diff --git a/changelog/19954.txt b/changelog/19954.txt new file mode 100644 index 0000000..e0ff45f --- /dev/null +++ b/changelog/19954.txt @@ -0,0 +1,3 @@ +```release-note:improvement +database/mongodb: upgrade mongo driver to 1.11 +``` diff --git a/changelog/19993.txt b/changelog/19993.txt new file mode 100644 index 0000000..9065086 --- /dev/null +++ b/changelog/19993.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/openldap: upgrades dependencies +``` \ No newline at end of file diff --git a/changelog/20019.txt b/changelog/20019.txt new file mode 100644 index 0000000..0483d17 --- /dev/null +++ b/changelog/20019.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core/activity: add an endpoint to write test activity log data, guarded by a build flag +``` \ No newline at end of file diff --git a/changelog/20034.txt b/changelog/20034.txt new file mode 100644 index 0000000..c105079 --- /dev/null +++ b/changelog/20034.txt @@ -0,0 +1,3 @@ +```release-note: bug +secrets/aws: Revert changes that removed the lease on STS credentials, while leaving the new ttl field in place. +``` diff --git a/changelog/20044.txt b/changelog/20044.txt new file mode 100644 index 0000000..014e61b --- /dev/null +++ b/changelog/20044.txt @@ -0,0 +1,4 @@ +```release-note:improvement +core: Add a `raft` sub-field to the `storage` and `ha_storage` details provided by the +`/sys/config/state/sanitized` endpoint in order to include the `max_entry_size`. +``` diff --git a/changelog/20057.txt b/changelog/20057.txt new file mode 100644 index 0000000..585a07d --- /dev/null +++ b/changelog/20057.txt @@ -0,0 +1,3 @@ +```release-note: bug +secrets/pki: Ensure cross-cluster delta WAL write failure only logs to avoid unattended forwarding. +``` diff --git a/changelog/20058.txt b/changelog/20058.txt new file mode 100644 index 0000000..e43a1f4 --- /dev/null +++ b/changelog/20058.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Fix building of unified delta CRLs and recovery during unified delta WAL write failures. +``` diff --git a/changelog/20064.txt b/changelog/20064.txt new file mode 100644 index 0000000..c539119 --- /dev/null +++ b/changelog/20064.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes browser console formatting for help command output +``` \ No newline at end of file diff --git a/changelog/20070.txt b/changelog/20070.txt new file mode 100644 index 0000000..34e6e55 --- /dev/null +++ b/changelog/20070.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes remaining doc links to include /vault in path +``` \ No newline at end of file diff --git a/changelog/20073.txt b/changelog/20073.txt new file mode 100644 index 0000000..10c21a5 --- /dev/null +++ b/changelog/20073.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core/activity: refactor the activity log's generation of precomputed queries +``` \ No newline at end of file diff --git a/changelog/20078.txt b/changelog/20078.txt new file mode 100644 index 0000000..8749354 --- /dev/null +++ b/changelog/20078.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core/activity: error when attempting to update retention configuration below the minimum +``` \ No newline at end of file diff --git a/changelog/20086.txt b/changelog/20086.txt new file mode 100644 index 0000000..9511c97 --- /dev/null +++ b/changelog/20086.txt @@ -0,0 +1,4 @@ +```release-note:improvement +api: `/sys/internal/counters/config` endpoint now contains read-only +`reporting_enabled` and `billing_start_timestamp` fields. +``` diff --git a/changelog/20109.txt b/changelog/20109.txt new file mode 100644 index 0000000..8c7cb3b --- /dev/null +++ b/changelog/20109.txt @@ -0,0 +1,3 @@ +```release-note:improvement +sys/wrapping: Add example how to unwrap without authentication in Vault +``` diff --git a/changelog/20125.txt b/changelog/20125.txt new file mode 100644 index 0000000..07dd820 --- /dev/null +++ b/changelog/20125.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: updates clients configuration edit form state based on census reporting configuration +``` \ No newline at end of file diff --git a/changelog/20144.txt b/changelog/20144.txt new file mode 100644 index 0000000..ef8b9a0 --- /dev/null +++ b/changelog/20144.txt @@ -0,0 +1,4 @@ +```release-note:improvement +sdk/ldaputil: added `connection_timeout` to tune connection timeout duration +for all LDAP plugins. +``` diff --git a/changelog/20150.txt b/changelog/20150.txt new file mode 100644 index 0000000..0ea8259 --- /dev/null +++ b/changelog/20150.txt @@ -0,0 +1,4 @@ +```release-note:improvement +api: `/sys/internal/counters/config` endpoint now contains read-only +`minimum_retention_months`. +``` diff --git a/changelog/20154.txt b/changelog/20154.txt new file mode 100644 index 0000000..7bda362 --- /dev/null +++ b/changelog/20154.txt @@ -0,0 +1,2 @@ +```release-note:bug +auth/cert: Include OCSP parameters in read CA certificate role response. diff --git a/changelog/20163.txt b/changelog/20163.txt new file mode 100644 index 0000000..0b845fb --- /dev/null +++ b/changelog/20163.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: adds warning for commas in stringArray inputs and updates tooltip help text to remove references to comma separation +``` diff --git a/changelog/20181.txt b/changelog/20181.txt new file mode 100644 index 0000000..121c869 --- /dev/null +++ b/changelog/20181.txt @@ -0,0 +1,4 @@ +```release-note:bug +sdk/helper/ocsp: Workaround bug in Go's ocsp.ParseResponse(...), causing validation to fail with embedded CA certificates. +auth/cert: Fix OCSP validation against Vault's PKI engine. +``` diff --git a/changelog/20201.txt b/changelog/20201.txt new file mode 100644 index 0000000..d50c9bc --- /dev/null +++ b/changelog/20201.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Decrease size and improve compatibility of OCSP responses by removing issuer certificate. +``` diff --git a/changelog/20216.txt b/changelog/20216.txt new file mode 100644 index 0000000..59ee78c --- /dev/null +++ b/changelog/20216.txt @@ -0,0 +1,3 @@ +```release-note:bug +website/docs: Fix Kubernetes Auth Code Example to use the correct whitespace in import. +``` diff --git a/changelog/20220.txt b/changelog/20220.txt new file mode 100644 index 0000000..1cf72aa --- /dev/null +++ b/changelog/20220.txt @@ -0,0 +1,3 @@ +```release-note:bug +pki: Fix automatically turning off CRL signing on upgrade to Vault >= 1.12, if CA Key Usage disallows it +``` diff --git a/changelog/20224.txt b/changelog/20224.txt new file mode 100644 index 0000000..7ec5bf6 --- /dev/null +++ b/changelog/20224.txt @@ -0,0 +1,3 @@ +```release-note:improvement +command/server: New -dev-cluster-json writes a file describing the dev cluster in -dev and -dev-three-node modes, plus -dev-three-node now enables unauthenticated metrics and pprof requests. +``` diff --git a/changelog/20234.txt b/changelog/20234.txt new file mode 100644 index 0000000..1f20bdc --- /dev/null +++ b/changelog/20234.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/cert: Better return OCSP validation errors during login to the caller. +``` diff --git a/changelog/20235.txt b/changelog/20235.txt new file mode 100644 index 0000000..d1b9f8a --- /dev/null +++ b/changelog/20235.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: remove use of htmlSafe except when first sanitized +``` diff --git a/changelog/20243.txt b/changelog/20243.txt new file mode 100644 index 0000000..8d5b044 --- /dev/null +++ b/changelog/20243.txt @@ -0,0 +1,4 @@ +```release-note:improvement +cli/namespace: Add detailed flag to output additional namespace information +such as namespace IDs and custom metadata. +``` diff --git a/changelog/20247.txt b/changelog/20247.txt new file mode 100644 index 0000000..91f2f0d --- /dev/null +++ b/changelog/20247.txt @@ -0,0 +1,3 @@ +```release-note:improvement +sdk: Add new docker-based cluster testing framework to the sdk. +``` diff --git a/changelog/20253.txt b/changelog/20253.txt new file mode 100644 index 0000000..19edae1 --- /dev/null +++ b/changelog/20253.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Add warning when issuer lacks KeyUsage during CRL rebuilds; expose in logs and on rotation. +``` diff --git a/changelog/20257.txt b/changelog/20257.txt new file mode 100644 index 0000000..c2dba45 --- /dev/null +++ b/changelog/20257.txt @@ -0,0 +1,3 @@ +```release-note:bug +command/server: Fix incorrect paths in generated config for `-dev-tls` flag on Windows +``` diff --git a/changelog/20261.txt b/changelog/20261.txt new file mode 100644 index 0000000..5f4eb97 --- /dev/null +++ b/changelog/20261.txt @@ -0,0 +1,3 @@ +```release-note:improvement +* physical/etcd: Upgrade etcd3 client to v3.5.7 +``` \ No newline at end of file diff --git a/changelog/20263.txt b/changelog/20263.txt new file mode 100644 index 0000000..8556fe8 --- /dev/null +++ b/changelog/20263.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix OIDC provider logo showing when domain doesn't match +``` diff --git a/changelog/20265.txt b/changelog/20265.txt new file mode 100644 index 0000000..8e27875 --- /dev/null +++ b/changelog/20265.txt @@ -0,0 +1,3 @@ +```release-note:improvement +* api: Add Config.TLSConfig method to fetch the TLS configuration from a client config. +``` \ No newline at end of file diff --git a/changelog/20276.txt b/changelog/20276.txt new file mode 100644 index 0000000..71f288a --- /dev/null +++ b/changelog/20276.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Include CA serial number, key UUID on issuers list endpoint. +``` diff --git a/changelog/20285.txt b/changelog/20285.txt new file mode 100644 index 0000000..2bc2241 --- /dev/null +++ b/changelog/20285.txt @@ -0,0 +1,3 @@ +```release-note:bug +openapi: Small fixes for OpenAPI display attributes. Changed "log-in" to "login" +``` diff --git a/changelog/20294.txt b/changelog/20294.txt new file mode 100644 index 0000000..92f7c29 --- /dev/null +++ b/changelog/20294.txt @@ -0,0 +1,3 @@ +```release-note:improvement +Add debug symbols back to builds to fix Dynatrace support +``` diff --git a/changelog/20341.txt b/changelog/20341.txt new file mode 100644 index 0000000..652e573 --- /dev/null +++ b/changelog/20341.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Fix patching of leaf_not_after_behavior on issuers. +``` diff --git a/changelog/20354.txt b/changelog/20354.txt new file mode 100644 index 0000000..abdacb7 --- /dev/null +++ b/changelog/20354.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Include per-issuer enable_aia_url_templating in issuer read endpoint. +``` diff --git a/changelog/20368.txt b/changelog/20368.txt new file mode 100644 index 0000000..bca5957 --- /dev/null +++ b/changelog/20368.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/identity: Allow updates of only the custom-metadata for entity alias. +``` \ No newline at end of file diff --git a/changelog/20375.txt b/changelog/20375.txt new file mode 100644 index 0000000..92caf1e --- /dev/null +++ b/changelog/20375.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: prevent panic on login after namespace is deleted that had mfa enforcement +``` \ No newline at end of file diff --git a/changelog/20411.txt b/changelog/20411.txt new file mode 100644 index 0000000..0935090 --- /dev/null +++ b/changelog/20411.txt @@ -0,0 +1,3 @@ +```release-note:improvement +audit: add a `mount_point` field to audit requests and response entries +``` diff --git a/changelog/20418.txt b/changelog/20418.txt new file mode 100644 index 0000000..596b7e4 --- /dev/null +++ b/changelog/20418.txt @@ -0,0 +1,3 @@ +```release-note:bug +command/server: fixes panic in Vault server command when running in recovery mode +``` \ No newline at end of file diff --git a/changelog/20425.txt b/changelog/20425.txt new file mode 100644 index 0000000..20869fc --- /dev/null +++ b/changelog/20425.txt @@ -0,0 +1,3 @@ +```release-note:feature +**MongoDB Atlas Database Secrets**: Adds support for client certificate credentials +``` diff --git a/changelog/20430.txt b/changelog/20430.txt new file mode 100644 index 0000000..5ac95f1 --- /dev/null +++ b/changelog/20430.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix secret render when path includes %. Resolves #11616. +``` diff --git a/changelog/20431.txt b/changelog/20431.txt new file mode 100644 index 0000000..a0083d8 --- /dev/null +++ b/changelog/20431.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Add download button for each secret value in KV v2 +``` diff --git a/changelog/20441.txt b/changelog/20441.txt new file mode 100644 index 0000000..6287848 --- /dev/null +++ b/changelog/20441.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Allow determining existing issuers and keys on import. +``` diff --git a/changelog/20442.txt b/changelog/20442.txt new file mode 100644 index 0000000..09636b6 --- /dev/null +++ b/changelog/20442.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Add missing fields to tidy-status, include new last_auto_tidy_finished field. +``` diff --git a/changelog/20453.txt b/changelog/20453.txt new file mode 100644 index 0000000..e605791 --- /dev/null +++ b/changelog/20453.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/ldap: Set default value for `max_page_size` properly +``` diff --git a/changelog/20464.txt b/changelog/20464.txt new file mode 100644 index 0000000..6b58153 --- /dev/null +++ b/changelog/20464.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cli: Add walkSecretsTree helper function, which recursively walks secrets rooted at the given path +``` diff --git a/changelog/20477.txt b/changelog/20477.txt new file mode 100644 index 0000000..e95305a --- /dev/null +++ b/changelog/20477.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli: CLI should take days as a unit of time for ttl like flags +``` diff --git a/changelog/20481.txt b/changelog/20481.txt new file mode 100644 index 0000000..c6f2711 --- /dev/null +++ b/changelog/20481.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Add filtering by engine type and engine name to the Secret Engine list view. +``` diff --git a/changelog/20488.txt b/changelog/20488.txt new file mode 100644 index 0000000..5ea0f78 --- /dev/null +++ b/changelog/20488.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cli: Improve addPrefixToKVPath helper +``` diff --git a/changelog/20502.txt b/changelog/20502.txt new file mode 100644 index 0000000..153309a --- /dev/null +++ b/changelog/20502.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli: disable printing flags warnings messages for the ssh command +``` diff --git a/changelog/20519.txt b/changelog/20519.txt new file mode 100644 index 0000000..92f7c29 --- /dev/null +++ b/changelog/20519.txt @@ -0,0 +1,3 @@ +```release-note:improvement +Add debug symbols back to builds to fix Dynatrace support +``` diff --git a/changelog/20530.txt b/changelog/20530.txt new file mode 100644 index 0000000..6f6d04b --- /dev/null +++ b/changelog/20530.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Environment Variables through Vault Agent**: Introducing a new process-supervisor mode for Vault Agent which allows injecting secrets as environment variables into a child process using a new `env_template` configuration stanza. The process-supervisor configuration can be generated with a new `vault agent generate-config` helper tool. +``` diff --git a/changelog/20536.txt b/changelog/20536.txt new file mode 100644 index 0000000..62aa936 --- /dev/null +++ b/changelog/20536.txt @@ -0,0 +1,3 @@ +```release-note:feature +**AWS Static Roles**: The AWS Secrets Engine can manage static roles configured by users. +``` diff --git a/changelog/20548.txt b/changelog/20548.txt new file mode 100644 index 0000000..fed5d2b --- /dev/null +++ b/changelog/20548.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Vault Proxy**: Introduced Vault Proxy, a new subcommand of the Vault binary that can be invoked using `vault proxy -config=config.hcl`. It currently has the same feature set as Vault Agent's API proxy, but the two may diverge in the future. We plan to deprecate the API proxy functionality of Vault Agent in a future release. +``` diff --git a/changelog/20559.txt b/changelog/20559.txt new file mode 100644 index 0000000..2ff6422 --- /dev/null +++ b/changelog/20559.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core, secrets/pki, audit: Update dependency go-jose to v3 due to v2 deprecation. +``` diff --git a/changelog/20569.txt b/changelog/20569.txt new file mode 100644 index 0000000..e10a464 --- /dev/null +++ b/changelog/20569.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: Add logic to validate env_template entries in configuration +``` diff --git a/changelog/20590.txt b/changelog/20590.txt new file mode 100644 index 0000000..c1c7c9e --- /dev/null +++ b/changelog/20590.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Update Web CLI with examples and a new `kv-get` command for reading kv v2 data and metadata +``` diff --git a/changelog/20595.txt b/changelog/20595.txt new file mode 100644 index 0000000..982f414 --- /dev/null +++ b/changelog/20595.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Add possibility to decode a generated encoded root token via the rest API +``` diff --git a/changelog/20603.txt b/changelog/20603.txt new file mode 100644 index 0000000..c3e7e2b --- /dev/null +++ b/changelog/20603.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes issue creating mfa login enforcement from method enforcements tab +``` \ No newline at end of file diff --git a/changelog/20609.txt b/changelog/20609.txt new file mode 100644 index 0000000..fe92833 --- /dev/null +++ b/changelog/20609.txt @@ -0,0 +1,4 @@ +```release-note:improvement +command/server: Add support for dumping pprof files to the filesystem via SIGUSR2 when +`VAULT_PPROF_WRITE_TO_FILE=true` is set on the server. +``` \ No newline at end of file diff --git a/changelog/20626.txt b/changelog/20626.txt new file mode 100644 index 0000000..2a13cee --- /dev/null +++ b/changelog/20626.txt @@ -0,0 +1,4 @@ +```release-note:improvement +activitylog: EntityRecord protobufs now contain a ClientType field for +distinguishing client sources. +``` diff --git a/changelog/20628.txt b/changelog/20628.txt new file mode 100644 index 0000000..9788146 --- /dev/null +++ b/changelog/20628.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: initial implementation of a process runner for injecting secrets via environment variables via vault agent +``` \ No newline at end of file diff --git a/changelog/20629.txt b/changelog/20629.txt new file mode 100644 index 0000000..f5692f7 --- /dev/null +++ b/changelog/20629.txt @@ -0,0 +1,3 @@ +```release-note:improvement +command/server (enterprise): -dev-three-node now creates perf standbys instead of regular standbys. +``` \ No newline at end of file diff --git a/changelog/20636.txt b/changelog/20636.txt new file mode 100644 index 0000000..6e20fcd --- /dev/null +++ b/changelog/20636.txt @@ -0,0 +1,3 @@ +```release-note:bug +api: Properly Handle nil identity_policies in Secret Data +``` \ No newline at end of file diff --git a/changelog/20642.txt b/changelog/20642.txt new file mode 100644 index 0000000..8b8bc40 --- /dev/null +++ b/changelog/20642.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: add subject key identifier to read key response +``` diff --git a/changelog/20643.txt b/changelog/20643.txt new file mode 100644 index 0000000..340ec5b --- /dev/null +++ b/changelog/20643.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: report intermediate error messages during request forwarding +``` diff --git a/changelog/20652.txt b/changelog/20652.txt new file mode 100644 index 0000000..c41e750 --- /dev/null +++ b/changelog/20652.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/transit: Respond to writes with updated key policy, cache configuration. +``` diff --git a/changelog/20654.txt b/changelog/20654.txt new file mode 100644 index 0000000..91e5674 --- /dev/null +++ b/changelog/20654.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/pki: Warning when issuing leafs from CSRs with basic constraints. In the future, issuance of non-CA leaf certs from CSRs with asserted IsCA Basic Constraints will be prohibited. +``` diff --git a/changelog/20664.txt b/changelog/20664.txt new file mode 100644 index 0000000..6f2b4ab --- /dev/null +++ b/changelog/20664.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Support setting both maintain_stored_certificate_counts=false and publish_stored_certificate_count_metrics=false explicitly in tidy config. +``` diff --git a/changelog/20668.txt b/changelog/20668.txt new file mode 100644 index 0000000..f3f840c --- /dev/null +++ b/changelog/20668.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/transform: Added importing of keys and key versions into the Transform secrets engine using the command 'vault transform import' and 'vault transform import-version'. +``` diff --git a/changelog/20680.txt b/changelog/20680.txt new file mode 100644 index 0000000..ff80ac4 --- /dev/null +++ b/changelog/20680.txt @@ -0,0 +1,6 @@ +```release-note:improvement +core (enterprise): support reloading configuration for automated reporting via SIGHUP +``` +```release-note:improvement +core (enterprise): license updates trigger a reload of reporting and the activity log +``` \ No newline at end of file diff --git a/changelog/20694.txt b/changelog/20694.txt new file mode 100644 index 0000000..07f790a --- /dev/null +++ b/changelog/20694.txt @@ -0,0 +1,4 @@ +```release-note:improvement +api: GET ... /sys/internal/counters/activity?current_billing_period=true now +results in a response which contains the full billing period +``` diff --git a/changelog/20701.txt b/changelog/20701.txt new file mode 100644 index 0000000..24942d5 --- /dev/null +++ b/changelog/20701.txt @@ -0,0 +1,3 @@ +```release-notes:bug +secrets/pki: Fix race during runUnifiedTransfer when deciding to skip re-running a test within a short window. +``` diff --git a/changelog/20725.txt b/changelog/20725.txt new file mode 100644 index 0000000..04399cc --- /dev/null +++ b/changelog/20725.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/gcp: Updated plugin from v0.15.0 to v0.16.0 +``` diff --git a/changelog/20731.txt b/changelog/20731.txt new file mode 100644 index 0000000..1896c19 --- /dev/null +++ b/changelog/20731.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes auto_rotate_period ttl input for transit keys +``` diff --git a/changelog/20736.txt b/changelog/20736.txt new file mode 100644 index 0000000..1c4c3d4 --- /dev/null +++ b/changelog/20736.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/transit: Support BYOK-encrypted export of keys to securely allow synchronizing specific keys and version across clusters. +``` diff --git a/changelog/20741.txt b/changelog/20741.txt new file mode 100644 index 0000000..8034e45 --- /dev/null +++ b/changelog/20741.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: Add integration tests for agent running in process supervisor mode +``` diff --git a/changelog/20742.txt b/changelog/20742.txt new file mode 100644 index 0000000..d91237e --- /dev/null +++ b/changelog/20742.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/mongodbatlas: Updated plugin from v0.9.1 to v0.10.0 +``` diff --git a/changelog/20745.txt b/changelog/20745.txt new file mode 100644 index 0000000..57a4391 --- /dev/null +++ b/changelog/20745.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/centrify: Updated plugin from v0.14.0 to v0.15.1 +``` diff --git a/changelog/20747.txt b/changelog/20747.txt new file mode 100644 index 0000000..4c600d2 --- /dev/null +++ b/changelog/20747.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Add filtering by auth type and auth name to the Authentication Method list view. +``` diff --git a/changelog/20750.txt b/changelog/20750.txt new file mode 100644 index 0000000..75a3e1d --- /dev/null +++ b/changelog/20750.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/ad: Updated plugin from v0.10.1-0.20230329210417-0b2cdb26cf5d to v0.16.0 +``` \ No newline at end of file diff --git a/changelog/20751.txt b/changelog/20751.txt new file mode 100644 index 0000000..9b78b3d --- /dev/null +++ b/changelog/20751.txt @@ -0,0 +1,3 @@ +```release-note:change +database/redis-elasticache: Updated plugin from v0.2.0 to v0.2.1 +``` diff --git a/changelog/20752.txt b/changelog/20752.txt new file mode 100644 index 0000000..667bc37 --- /dev/null +++ b/changelog/20752.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Vault PKI ACME Server**: Support for the ACME certificate lifecycle management protocol has been added to the Vault PKI Plugin. This allows standard ACME clients, such as the EFF's certbot and the CNCF's k8s cert-manager, to request certificates from a Vault server with no knowledge of Vault APIs or authentication mechanisms. For public-facing Vault instances, we recommend requiring External Account Bindings (EAB) to limit the ability to request certificates to only authenticated clients. +``` diff --git a/changelog/20758.txt b/changelog/20758.txt new file mode 100644 index 0000000..7eed0b0 --- /dev/null +++ b/changelog/20758.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/alicloud: Updated plugin from v0.14.0 to v0.15.0 +``` \ No newline at end of file diff --git a/changelog/20763.txt b/changelog/20763.txt new file mode 100644 index 0000000..311dcb0 --- /dev/null +++ b/changelog/20763.txt @@ -0,0 +1,3 @@ +```release-note:improvement +database/redis: Upgrade plugin dependencies +``` diff --git a/changelog/20764.txt b/changelog/20764.txt new file mode 100644 index 0000000..adc14e0 --- /dev/null +++ b/changelog/20764.txt @@ -0,0 +1,3 @@ +```release-note:change +database/couchbase: Updated plugin from v0.9.0 to v0.9.2 +``` diff --git a/changelog/20767.txt b/changelog/20767.txt new file mode 100644 index 0000000..b6d853a --- /dev/null +++ b/changelog/20767.txt @@ -0,0 +1,3 @@ +```release-note:improvement +database/elasticsearch: Upgrade plugin dependencies +``` diff --git a/changelog/20771.txt b/changelog/20771.txt new file mode 100644 index 0000000..5cc1ee2 --- /dev/null +++ b/changelog/20771.txt @@ -0,0 +1,4 @@ +```release-note:improvement +auth/kerberos: Enable plugin multiplexing +auth/kerberos: Upgrade plugin dependencies +``` diff --git a/changelog/20777.txt b/changelog/20777.txt new file mode 100644 index 0000000..ec3c9e4 --- /dev/null +++ b/changelog/20777.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/aure: Updated plugin from v0.15.0 to v0.16.0 +``` \ No newline at end of file diff --git a/changelog/20783.txt b/changelog/20783.txt new file mode 100644 index 0000000..372d36c --- /dev/null +++ b/changelog/20783.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fix writes to readonly storage on performance standbys when user lockout feature is enabled. +``` \ No newline at end of file diff --git a/changelog/20784.txt b/changelog/20784.txt new file mode 100644 index 0000000..b24a857 --- /dev/null +++ b/changelog/20784.txt @@ -0,0 +1,4 @@ +```release-note:improvement +secrets/gcpkms: Enable plugin multiplexing +secrets/gcpkms: Upgrade plugin dependencies +``` diff --git a/changelog/20787.txt b/changelog/20787.txt new file mode 100644 index 0000000..a69b90d --- /dev/null +++ b/changelog/20787.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/alicloud: Updated plugin from v0.5.4-beta1.0.20230330124709-3fcfc5914a22 to v0.15.0 +``` \ No newline at end of file diff --git a/changelog/20799.txt b/changelog/20799.txt new file mode 100644 index 0000000..2e17ff9 --- /dev/null +++ b/changelog/20799.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/jwt: Updated plugin from v0.15.0 to v0.16.0 +``` diff --git a/changelog/20802.txt b/changelog/20802.txt new file mode 100644 index 0000000..de8e1b9 --- /dev/null +++ b/changelog/20802.txt @@ -0,0 +1,6 @@ +```release-note:change +secrets/kubernetes: Update plugin to v0.5.0 +``` +```release-note:change +auth/kubernetes: Update plugin to v0.16.0 +``` diff --git a/changelog/20807.txt b/changelog/20807.txt new file mode 100644 index 0000000..3a3c1f4 --- /dev/null +++ b/changelog/20807.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/database/snowflake: Updated plugin from v0.7.0 to v0.8.0 +``` \ No newline at end of file diff --git a/changelog/20816.txt b/changelog/20816.txt new file mode 100644 index 0000000..aae4b59 --- /dev/null +++ b/changelog/20816.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/azure: Updated plugin from v0.13.0 to v0.15.0 +``` diff --git a/changelog/20818.txt b/changelog/20818.txt new file mode 100644 index 0000000..885ee92 --- /dev/null +++ b/changelog/20818.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/gcp: Updated plugin from v0.15.0 to v0.16.0 +``` diff --git a/changelog/20825.txt b/changelog/20825.txt new file mode 100644 index 0000000..da99369 --- /dev/null +++ b/changelog/20825.txt @@ -0,0 +1,3 @@ +```release-note:change +storage/aerospike: Aerospike storage shouldn't be used on 32-bit architectures and is now unsupported on them. +``` \ No newline at end of file diff --git a/changelog/20826.txt b/changelog/20826.txt new file mode 100644 index 0000000..8a693d9 --- /dev/null +++ b/changelog/20826.txt @@ -0,0 +1,3 @@ +```release-note:change +core: Revert #19676 (VAULT_GRPC_MIN_CONNECT_TIMEOUT env var) as we decided it was unnecessary. +``` \ No newline at end of file diff --git a/changelog/20834.txt b/changelog/20834.txt new file mode 100644 index 0000000..f17f1d3 --- /dev/null +++ b/changelog/20834.txt @@ -0,0 +1,3 @@ +```release-note:change +core: Remove feature toggle for SSCTs, i.e. the env var VAULT_DISABLE_SERVER_SIDE_CONSISTENT_TOKENS. +``` \ No newline at end of file diff --git a/changelog/20864.txt b/changelog/20864.txt new file mode 100644 index 0000000..7193c6b --- /dev/null +++ b/changelog/20864.txt @@ -0,0 +1,5 @@ +```release-note:bug +secrets/transit: Fix export of HMAC-only key, correctly exporting the key used for sign operations. For consumers of the previously incorrect key, use the plaintext export to retrieve these incorrect keys and import them as new versions. +secrets/transit: Fix bug related to shorter dedicated HMAC key sizing. +sdk/helper/keysutil: New HMAC type policies will have HMACKey equal to Key and be copied over on import. +``` diff --git a/changelog/20881.txt b/changelog/20881.txt new file mode 100644 index 0000000..fd3e6d5 --- /dev/null +++ b/changelog/20881.txt @@ -0,0 +1,3 @@ +```release-note:improvement +sdk/framework: Fix non-deterministic ordering of 'required' fields in OpenAPI spec +``` diff --git a/changelog/20882.txt b/changelog/20882.txt new file mode 100644 index 0000000..3694468 --- /dev/null +++ b/changelog/20882.txt @@ -0,0 +1,6 @@ +```release-note:change +secrets/database/mongodbatlas: Updated plugin from v0.9.0 to v0.10.0 +``` +```release-note:feature +**MongoDB Atlas Database Secrets**: Adds support for generating X.509 certificates on dynamic roles for user authentication +``` \ No newline at end of file diff --git a/changelog/20891.txt b/changelog/20891.txt new file mode 100644 index 0000000..3057ec5 --- /dev/null +++ b/changelog/20891.txt @@ -0,0 +1,4 @@ +```release-note:improvement +secrets/consul: Improve error message when ACL bootstrapping fails. +``` + diff --git a/changelog/20897.txt b/changelog/20897.txt new file mode 100644 index 0000000..01be5ac --- /dev/null +++ b/changelog/20897.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes issue unsealing cluster for seal types other than shamir +``` \ No newline at end of file diff --git a/changelog/20907.txt b/changelog/20907.txt new file mode 100644 index 0000000..3f13a65 --- /dev/null +++ b/changelog/20907.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes key_bits and signature_bits reverting to default values when editing a pki role +``` \ No newline at end of file diff --git a/changelog/20934.txt b/changelog/20934.txt new file mode 100644 index 0000000..72c2257 --- /dev/null +++ b/changelog/20934.txt @@ -0,0 +1,3 @@ +```release-note:bug +agent: Fix bug with 'cache' stanza validation +``` diff --git a/changelog/20943.txt b/changelog/20943.txt new file mode 100644 index 0000000..7cf186d --- /dev/null +++ b/changelog/20943.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Support TLS-ALPN-01 challenge type in ACME for DNS certificate identifiers. +``` diff --git a/changelog/20964.txt b/changelog/20964.txt new file mode 100644 index 0000000..8bd9563 --- /dev/null +++ b/changelog/20964.txt @@ -0,0 +1,3 @@ +```release-note:bug +identity: Fixes duplicate groups creation with the same name but unique IDs. +``` \ No newline at end of file diff --git a/changelog/20965.txt b/changelog/20965.txt new file mode 100644 index 0000000..43c1d97 --- /dev/null +++ b/changelog/20965.txt @@ -0,0 +1,3 @@ +```release-note:bug +identity: Remove caseSensitivityKey to prevent errors while loading groups which could result in missing groups in memDB when duplicates are found. +``` \ No newline at end of file diff --git a/changelog/20981.txt b/changelog/20981.txt new file mode 100644 index 0000000..26a5304 --- /dev/null +++ b/changelog/20981.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Limit ACME issued certificates NotAfter TTL to a maximum of 90 days +``` diff --git a/changelog/20986.txt b/changelog/20986.txt new file mode 100644 index 0000000..c0615f9 --- /dev/null +++ b/changelog/20986.txt @@ -0,0 +1,3 @@ +```release-note:bug +storage/raft: Fix race where new follower joining can get pruned by dead server cleanup. +``` \ No newline at end of file diff --git a/changelog/21100.txt b/changelog/21100.txt new file mode 100644 index 0000000..50024c9 --- /dev/null +++ b/changelog/21100.txt @@ -0,0 +1,4 @@ +```release-note:bug +replication (enterprise): Fix regression causing token creation against a role +with a new entity alias to be incorrectly forwarded from perf standbys. +``` diff --git a/changelog/21110.txt b/changelog/21110.txt new file mode 100644 index 0000000..2471fac --- /dev/null +++ b/changelog/21110.txt @@ -0,0 +1,4 @@ +```release-note:bug +core/quotas (enterprise): Fix a case where we were applying login roles to lease count quotas in a non-login context. +Also fix a related potential deadlock. +``` \ No newline at end of file diff --git a/changelog/21165.txt b/changelog/21165.txt new file mode 100644 index 0000000..dd6b6d0 --- /dev/null +++ b/changelog/21165.txt @@ -0,0 +1,3 @@ +```release-note:bug +raft/autopilot: Add dr-token flag for raft autopilot cli commands +``` diff --git a/changelog/21209.txt b/changelog/21209.txt new file mode 100644 index 0000000..31ddf41 --- /dev/null +++ b/changelog/21209.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/pki: Allow issuance of root CAs without AIA, when templated AIA information includes issuer_id. +``` diff --git a/changelog/21215.txt b/changelog/21215.txt new file mode 100644 index 0000000..ec4a63a --- /dev/null +++ b/changelog/21215.txt @@ -0,0 +1,4 @@ +```release-note:change +core/namespace (enterprise): Introduce the concept of high-privilege namespace (administrative namespace), +which will have access to some system backend paths that were previously only accessible in the root namespace. +``` \ No newline at end of file diff --git a/changelog/21223.txt b/changelog/21223.txt new file mode 100644 index 0000000..96605f0 --- /dev/null +++ b/changelog/21223.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Do not cache seal configuration to fix a bug that resulted in sporadic auto unseal failures. +``` diff --git a/changelog/21249.txt b/changelog/21249.txt new file mode 100644 index 0000000..a088677 --- /dev/null +++ b/changelog/21249.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fix panic in sealed nodes using raft storage trying to emit raft metrics +``` diff --git a/changelog/21260.txt b/changelog/21260.txt new file mode 100644 index 0000000..b291ec7 --- /dev/null +++ b/changelog/21260.txt @@ -0,0 +1,4 @@ +```release-note:bug +core: Change where we evaluate filtered paths as part of mount operations; this is part of an enterprise bugfix that will +have its own changelog entry. Fix wrong lock used in ListAuths link meta interface implementation. +``` diff --git a/changelog/21282.txt b/changelog/21282.txt new file mode 100644 index 0000000..03f22e4 --- /dev/null +++ b/changelog/21282.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/ldap: Normalize HTTP response codes when invalid credentials are provided +``` diff --git a/changelog/21297.txt b/changelog/21297.txt new file mode 100644 index 0000000..9f98fd3 --- /dev/null +++ b/changelog/21297.txt @@ -0,0 +1,3 @@ +```release-note:bug +agent: Fix agent generate-config to accept -namespace, VAULT_NAMESPACE, and other client-modifying flags. +``` diff --git a/changelog/21316.txt b/changelog/21316.txt new file mode 100644 index 0000000..5573c7e --- /dev/null +++ b/changelog/21316.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Prevent deleted issuers from reappearing when migrating from a version 1 bundle to a version 2 bundle (versions including 1.13.0, 1.12.2, and 1.11.6); when managed keys were removed but referenced in the Vault 1.10 legacy CA bundle, this the error: `no managed key found with uuid`. +``` diff --git a/changelog/21342.txt b/changelog/21342.txt new file mode 100644 index 0000000..c1d8cd0 --- /dev/null +++ b/changelog/21342.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Don't exit just because we think there's a potential deadlock. +``` diff --git a/changelog/21357.txt b/changelog/21357.txt new file mode 100644 index 0000000..3b3bffd --- /dev/null +++ b/changelog/21357.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fixed issue with some durations not being properly parsed to include days. +``` \ No newline at end of file diff --git a/changelog/21449.txt b/changelog/21449.txt new file mode 100644 index 0000000..7711909 --- /dev/null +++ b/changelog/21449.txt @@ -0,0 +1,3 @@ +```release-note:bug +openapi: Fix response schema for PKI Issue requests +``` diff --git a/changelog/21458.txt b/changelog/21458.txt new file mode 100644 index 0000000..352b8a0 --- /dev/null +++ b/changelog/21458.txt @@ -0,0 +1,3 @@ +```release-note:bug +openapi: Fix schema definitions for PKI EAB APIs +``` diff --git a/changelog/21466.txt b/changelog/21466.txt new file mode 100644 index 0000000..94d0af9 --- /dev/null +++ b/changelog/21466.txt @@ -0,0 +1,3 @@ +```release-note:bug +agent: Fix "generate-config" command documentation URL +``` diff --git a/changelog/21470.txt b/changelog/21470.txt new file mode 100644 index 0000000..9f047a9 --- /dev/null +++ b/changelog/21470.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fixed an instance where incorrect route entries would get tainted. We now pre-calculate namespace specific paths to avoid this. +``` diff --git a/changelog/21503.txt b/changelog/21503.txt new file mode 100644 index 0000000..a61b22b --- /dev/null +++ b/changelog/21503.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Surface DOMException error when browser settings prevent localStorage. +``` diff --git a/changelog/21531.txt b/changelog/21531.txt new file mode 100644 index 0000000..dff421a --- /dev/null +++ b/changelog/21531.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes styling of private key input when configuring an SSH key +``` \ No newline at end of file diff --git a/changelog/21562.txt b/changelog/21562.txt new file mode 100644 index 0000000..c41d727 --- /dev/null +++ b/changelog/21562.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes issue with certain navigational links incorrectly displaying in child namespaces +``` \ No newline at end of file diff --git a/changelog/21563.txt b/changelog/21563.txt new file mode 100644 index 0000000..7426ed2 --- /dev/null +++ b/changelog/21563.txt @@ -0,0 +1,3 @@ +```release-note:improvement +openapi: Better mount points for kv-v1 and kv-v2 in openapi.json +``` diff --git a/changelog/21582.txt b/changelog/21582.txt new file mode 100644 index 0000000..6a9d9a4 --- /dev/null +++ b/changelog/21582.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes login screen display issue with Safari browser +``` \ No newline at end of file diff --git a/changelog/21623.txt b/changelog/21623.txt new file mode 100644 index 0000000..7fc272d --- /dev/null +++ b/changelog/21623.txt @@ -0,0 +1,3 @@ +```release-note:improvement +eventbus: updated go-eventlogger library to allow removal of nodes referenced by pipelines (used for subscriptions) +``` \ No newline at end of file diff --git a/changelog/21631.txt b/changelog/21631.txt new file mode 100644 index 0000000..ffdb4bb --- /dev/null +++ b/changelog/21631.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/azure: Fix intermittent 401s by preventing performance secondary clusters from rotating root credentials. +``` \ No newline at end of file diff --git a/changelog/21635.txt b/changelog/21635.txt new file mode 100644 index 0000000..6d19e8d --- /dev/null +++ b/changelog/21635.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Adds missing values to details view after generating PKI certificate +``` \ No newline at end of file diff --git a/changelog/21642.txt b/changelog/21642.txt new file mode 100644 index 0000000..84af5b6 --- /dev/null +++ b/changelog/21642.txt @@ -0,0 +1,3 @@ +```release-note:bug +serviceregistration: Fix bug where multiple nodes in a secondary cluster could be labelled active after updating the cluster's primary +``` \ No newline at end of file diff --git a/changelog/21681.txt b/changelog/21681.txt new file mode 100644 index 0000000..8d68442 --- /dev/null +++ b/changelog/21681.txt @@ -0,0 +1,3 @@ +```release-note:improvement +sys/metrics (enterprise): Adds a gauge metric that tracks whether enterprise builtin secret plugins are enabled. +``` diff --git a/changelog/21702.txt b/changelog/21702.txt new file mode 100644 index 0000000..5475a48 --- /dev/null +++ b/changelog/21702.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Add a parameter to allow ExtKeyUsage field usage from a role within ACME. +``` diff --git a/changelog/21739.txt b/changelog/21739.txt new file mode 100644 index 0000000..7b559d9 --- /dev/null +++ b/changelog/21739.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixed an issue where editing an SSH role would clear `default_critical_options` and `default_extension` if left unchanged. +``` diff --git a/changelog/21767.txt b/changelog/21767.txt new file mode 100644 index 0000000..2092442 --- /dev/null +++ b/changelog/21767.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixed secrets, leases, and policies filter dropping focus after a single character +``` diff --git a/changelog/21800.txt b/changelog/21800.txt new file mode 100644 index 0000000..bfe8f67 --- /dev/null +++ b/changelog/21800.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/azure: Fix intermittent 401s by preventing performance secondary clusters from rotating root credentials. +``` \ No newline at end of file diff --git a/changelog/21870.txt b/changelog/21870.txt new file mode 100644 index 0000000..3cb9856 --- /dev/null +++ b/changelog/21870.txt @@ -0,0 +1,6 @@ +```release-note:bug +secrets/pki: Fix bug with ACME tidy, 'unable to determine acme base folder path'. +``` +```release-note:bug +secrets/pki: Fix preserving acme_account_safety_buffer on config/auto-tidy. +``` diff --git a/changelog/21925.txt b/changelog/21925.txt new file mode 100644 index 0000000..ca89ff7 --- /dev/null +++ b/changelog/21925.txt @@ -0,0 +1,3 @@ +```release-note:improvement +kmip (enterprise): Add namespace lock and unlock support +``` diff --git a/changelog/21926.txt b/changelog/21926.txt new file mode 100644 index 0000000..a602020 --- /dev/null +++ b/changelog/21926.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes problem displaying certificates issued with unsupported signature algorithms (i.e. ed25519) +``` \ No newline at end of file diff --git a/changelog/21951.txt b/changelog/21951.txt new file mode 100644 index 0000000..d53c0f1 --- /dev/null +++ b/changelog/21951.txt @@ -0,0 +1,4 @@ +```release-note:bug +awsutil: Update awsutil to v0.2.3 to fix a regression where Vault no longer +respects `AWS_ROLE_ARN`, `AWS_WEB_IDENTITY_TOKEN_FILE`, and `AWS_ROLE_SESSION_NAME`. +``` diff --git a/changelog/22040.txt b/changelog/22040.txt new file mode 100644 index 0000000..e96a428 --- /dev/null +++ b/changelog/22040.txt @@ -0,0 +1,3 @@ +```release-note:improvement +storage/raft: Cap the minimum dead_server_last_contact_threshold to 1m. +``` diff --git a/changelog/22137.txt b/changelog/22137.txt new file mode 100644 index 0000000..6f5a3be --- /dev/null +++ b/changelog/22137.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Remove "expiration manager is nil on tokenstore" error log for unauth requests on DR secondary as they do not have expiration manager. +``` \ No newline at end of file diff --git a/changelog/22191.txt b/changelog/22191.txt new file mode 100644 index 0000000..9fa7c85 --- /dev/null +++ b/changelog/22191.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: adds allowed_user_ids field to create role form and user_ids to generate certificates form in pki +``` \ No newline at end of file diff --git a/changelog/22235.txt b/changelog/22235.txt new file mode 100644 index 0000000..3d62e70 --- /dev/null +++ b/changelog/22235.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Log rollback manager failures during unmount, remount to prevent replication failures on secondary clusters. +``` diff --git a/changelog/22249.txt b/changelog/22249.txt new file mode 100644 index 0000000..d470b97 --- /dev/null +++ b/changelog/22249.txt @@ -0,0 +1,4 @@ +```release-note:bug +sdk/ldaputil: Properly escape user filters when using UPN domains +sdk/ldaputil: use EscapeLDAPValue implementation from cap/ldap +``` \ No newline at end of file diff --git a/changelog/22253.txt b/changelog/22253.txt new file mode 100644 index 0000000..c3a9ab0 --- /dev/null +++ b/changelog/22253.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/database: Improves error logging for static role rotations by including the database and role names. +``` \ No newline at end of file diff --git a/changelog/22264.txt b/changelog/22264.txt new file mode 100644 index 0000000..5ee5378 --- /dev/null +++ b/changelog/22264.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auto-auth/azure: Added Azure Workload Identity Federation support to auto-auth (for Vault Agent and Vault Proxy). +``` diff --git a/changelog/22277.txt b/changelog/22277.txt new file mode 100644 index 0000000..0d0dbf2 --- /dev/null +++ b/changelog/22277.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/azure: Update plugin to v0.16.0 +``` diff --git a/changelog/22322.txt b/changelog/22322.txt new file mode 100644 index 0000000..8df620c --- /dev/null +++ b/changelog/22322.txt @@ -0,0 +1,4 @@ +```release-note:bug +agent: Environment variable VAULT_CACERT_BYTES now works for Vault Agent templates. +``` + diff --git a/changelog/22330.txt b/changelog/22330.txt new file mode 100644 index 0000000..427fe39 --- /dev/null +++ b/changelog/22330.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/ldap: Fix bug causing schema and password_policy to be overwritten in config. +``` \ No newline at end of file diff --git a/changelog/22355.txt b/changelog/22355.txt new file mode 100644 index 0000000..d748796 --- /dev/null +++ b/changelog/22355.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fix bug where background thread to update locked user entries runs on DR secondaries. +``` \ No newline at end of file diff --git a/changelog/22362.txt b/changelog/22362.txt new file mode 100644 index 0000000..0de5440 --- /dev/null +++ b/changelog/22362.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fix readonly errors that could occur while loading mounts/auths during unseal +``` diff --git a/changelog/22363.txt b/changelog/22363.txt new file mode 100644 index 0000000..faa5a24 --- /dev/null +++ b/changelog/22363.txt @@ -0,0 +1,3 @@ +```release-note:bug +license: Add autoloaded license path to the cache exempt list. This is to ensure the license changes on the active node is observed on the perfStandby node. +``` diff --git a/changelog/22374.txt b/changelog/22374.txt new file mode 100644 index 0000000..2f744c5 --- /dev/null +++ b/changelog/22374.txt @@ -0,0 +1,3 @@ +```release-note:bug +expiration: Fix a deadlock that could occur when a revocation failure happens while restoring leases on startup. +``` diff --git a/changelog/22390.txt b/changelog/22390.txt new file mode 100644 index 0000000..449a8a2 --- /dev/null +++ b/changelog/22390.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes text readability issue in revoke token confirmation dialog +``` \ No newline at end of file diff --git a/changelog/22394.txt b/changelog/22394.txt new file mode 100644 index 0000000..4f5a2b9 --- /dev/null +++ b/changelog/22394.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes `max_versions` default for secret metadata unintentionally overriding kv engine defaults +``` \ No newline at end of file diff --git a/changelog/22396.txt b/changelog/22396.txt new file mode 100644 index 0000000..d05cbb7 --- /dev/null +++ b/changelog/22396.txt @@ -0,0 +1,3 @@ +```release-note:improvement +website/docs: Fix link formatting in Vault lambda extension docs +``` diff --git a/changelog/22458.txt b/changelog/22458.txt new file mode 100644 index 0000000..6ce0929 --- /dev/null +++ b/changelog/22458.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes model defaults overwriting input value when user tries to clear form input +``` \ No newline at end of file diff --git a/changelog/22468.txt b/changelog/22468.txt new file mode 100644 index 0000000..538da14 --- /dev/null +++ b/changelog/22468.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/metrics: vault.raft_storage.bolt.write.time should be a counter not a summary +``` diff --git a/changelog/22471.txt b/changelog/22471.txt new file mode 100644 index 0000000..67b110c --- /dev/null +++ b/changelog/22471.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: enables create and update KV secret workflow when control group present +``` \ No newline at end of file diff --git a/changelog/22502.txt b/changelog/22502.txt new file mode 100644 index 0000000..b9d21c2 --- /dev/null +++ b/changelog/22502.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: KV View Secret card will link to list view if input ends in "/" +``` \ No newline at end of file diff --git a/changelog/22516.txt b/changelog/22516.txt new file mode 100644 index 0000000..661b77d --- /dev/null +++ b/changelog/22516.txt @@ -0,0 +1,3 @@ +```release-note:change +database/snowflake: Update plugin to v0.9.0 +``` diff --git a/changelog/22521.txt b/changelog/22521.txt new file mode 100644 index 0000000..9310b64 --- /dev/null +++ b/changelog/22521.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: do not check TLS validity on ACME requests redirected to https +``` diff --git a/changelog/22523.txt b/changelog/22523.txt new file mode 100644 index 0000000..e53ab65 --- /dev/null +++ b/changelog/22523.txt @@ -0,0 +1,3 @@ +```release-note:bug +api: Fix breakage with UNIX domain socket addresses introduced by newest Go versions as a security fix. +``` diff --git a/changelog/22541.txt b/changelog/22541.txt new file mode 100644 index 0000000..918af3e --- /dev/null +++ b/changelog/22541.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix blank page or ghost secret when canceling KV secret create +``` diff --git a/changelog/22567.txt b/changelog/22567.txt new file mode 100644 index 0000000..d9e5570 --- /dev/null +++ b/changelog/22567.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Use a worker pool for the rollback manager. Add new metrics for the rollback manager to track the queued tasks. +``` \ No newline at end of file diff --git a/changelog/22583.txt b/changelog/22583.txt new file mode 100644 index 0000000..0bc29d6 --- /dev/null +++ b/changelog/22583.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/quotas: Reduce overhead for role calculation when using cloud auth methods. +``` \ No newline at end of file diff --git a/changelog/22597.txt b/changelog/22597.txt new file mode 100644 index 0000000..0c37e56 --- /dev/null +++ b/changelog/22597.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/quotas: Only perform ResolveRoleOperation for role-based quotas and lease creation. +``` diff --git a/changelog/22651.txt b/changelog/22651.txt new file mode 100644 index 0000000..5ca2819 --- /dev/null +++ b/changelog/22651.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core/quotas: Add configuration to allow skipping of expensive role calculations +``` \ No newline at end of file diff --git a/changelog/22659.txt b/changelog/22659.txt new file mode 100644 index 0000000..501fb4e --- /dev/null +++ b/changelog/22659.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/ldap: improved login speed by adding concurrency to LDAP token group searches +``` diff --git a/changelog/22753.txt b/changelog/22753.txt new file mode 100644 index 0000000..a297337 --- /dev/null +++ b/changelog/22753.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/transit: fix panic when providing non-PEM formatted public key for import +``` diff --git a/changelog/22760.txt b/changelog/22760.txt new file mode 100644 index 0000000..cf3f7c5 --- /dev/null +++ b/changelog/22760.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/seal: add a workaround for potential connection [[hangs](https://github.com/Azure/azure-sdk-for-go/issues/21346)] in Azure autoseals. +``` diff --git a/changelog/22818.txt b/changelog/22818.txt new file mode 100644 index 0000000..1ef9b64 --- /dev/null +++ b/changelog/22818.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli: Fix the CLI failing to return wrapping information for KV PUT and PATCH operations when format is set to `table`. +``` diff --git a/changelog/22852.txt b/changelog/22852.txt new file mode 100644 index 0000000..3a667eb --- /dev/null +++ b/changelog/22852.txt @@ -0,0 +1,3 @@ +```release-note:security +secrets/transit: fix a regression that was honoring nonces provided in non-convergent modes during encryption. +``` diff --git a/changelog/22855.txt b/changelog/22855.txt new file mode 100644 index 0000000..a911e21 --- /dev/null +++ b/changelog/22855.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: don't exclude features present on license +``` \ No newline at end of file diff --git a/changelog/23007.txt b/changelog/23007.txt new file mode 100644 index 0000000..02fee8c --- /dev/null +++ b/changelog/23007.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Fix removal of issuers to clean up unreferenced CRLs. +``` diff --git a/changelog/23010.txt b/changelog/23010.txt new file mode 100644 index 0000000..f6a72ec --- /dev/null +++ b/changelog/23010.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/consul: Fix revocations when Vault has an access token using specific namespace and admin partition policies +``` \ No newline at end of file diff --git a/changelog/23013.txt b/changelog/23013.txt new file mode 100644 index 0000000..78987e6 --- /dev/null +++ b/changelog/23013.txt @@ -0,0 +1,7 @@ +```release-note:bug +storage/consul: fix a bug where an active node in a specific sort of network +partition could continue to write data to Consul after a new leader is elected +potentially causing data loss or corruption for keys with many concurrent +writers. For Enterprise clusters this could cause corruption of the merkle trees +leading to failure to complete merkle sync without a full re-index. +``` diff --git a/changelog/23025.txt b/changelog/23025.txt new file mode 100644 index 0000000..5392c75 --- /dev/null +++ b/changelog/23025.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui (enterprise): Fix error message when generating SSH credential with control group +``` \ No newline at end of file diff --git a/changelog/23066.txt b/changelog/23066.txt new file mode 100644 index 0000000..f4636b9 --- /dev/null +++ b/changelog/23066.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix the issue where confirm delete dropdown is being cut off +``` diff --git a/changelog/23119.txt b/changelog/23119.txt new file mode 100644 index 0000000..fd5f694 --- /dev/null +++ b/changelog/23119.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Added allowed_domains_template field for CA type role in SSH engine +``` diff --git a/changelog/23123.txt b/changelog/23123.txt new file mode 100644 index 0000000..4bfc0c0 --- /dev/null +++ b/changelog/23123.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes filter and search bug in secrets engines +``` diff --git a/changelog/23155.txt b/changelog/23155.txt new file mode 100644 index 0000000..0c6914a --- /dev/null +++ b/changelog/23155.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fixes list password policy to include those with names containing / characters. +``` \ No newline at end of file diff --git a/changelog/23193.txt b/changelog/23193.txt new file mode 100644 index 0000000..b895907 --- /dev/null +++ b/changelog/23193.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Add pagination to PKI roles, keys, issuers, and certificates list pages +``` diff --git a/changelog/23225.txt b/changelog/23225.txt new file mode 100644 index 0000000..31d5b64 --- /dev/null +++ b/changelog/23225.txt @@ -0,0 +1,3 @@ +```release-note:bug +docs: fix wrong api path for ldap secrets cli-commands +``` diff --git a/changelog/23232.txt b/changelog/23232.txt new file mode 100644 index 0000000..8084391 --- /dev/null +++ b/changelog/23232.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Adds tidy_revoked_certs to PKI tidy status page +``` \ No newline at end of file diff --git a/changelog/23240.txt b/changelog/23240.txt new file mode 100644 index 0000000..da202c7 --- /dev/null +++ b/changelog/23240.txt @@ -0,0 +1,3 @@ +```release-note:bug +mongo-db: allow non-admin database for root credential rotation +``` diff --git a/changelog/23260.txt b/changelog/23260.txt new file mode 100644 index 0000000..52de9b8 --- /dev/null +++ b/changelog/23260.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Adds warning before downloading KV v2 secret values +``` \ No newline at end of file diff --git a/changelog/23278.txt b/changelog/23278.txt new file mode 100644 index 0000000..cd02679 --- /dev/null +++ b/changelog/23278.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Stop processing in-flight ACME verifications when an active node steps down +``` diff --git a/changelog/23282.txt b/changelog/23282.txt new file mode 100644 index 0000000..1026ccf --- /dev/null +++ b/changelog/23282.txt @@ -0,0 +1,3 @@ +```release-note:bug +expiration: Prevent large lease loads from delaying state changes, e.g. becoming active or standby. +``` \ No newline at end of file diff --git a/changelog/23287.txt b/changelog/23287.txt new file mode 100644 index 0000000..6d3229f --- /dev/null +++ b/changelog/23287.txt @@ -0,0 +1,3 @@ +```release-note:improvement +website/docs: fix inaccuracies with unauthenticated_in_flight_requests_access parameter +``` \ No newline at end of file diff --git a/changelog/23331.txt b/changelog/23331.txt new file mode 100644 index 0000000..aef48cd --- /dev/null +++ b/changelog/23331.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix the copy token button in the sidebar navigation window when in a collapsed state. +``` \ No newline at end of file diff --git a/changelog/23457.txt b/changelog/23457.txt new file mode 100644 index 0000000..adec8ca --- /dev/null +++ b/changelog/23457.txt @@ -0,0 +1,3 @@ +```release-note:feature +cli/snapshot: Add CLI tool to inspect Vault snapshots +``` \ No newline at end of file diff --git a/changelog/23470.txt b/changelog/23470.txt new file mode 100644 index 0000000..744fa76 --- /dev/null +++ b/changelog/23470.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix AWS secret engine to allow empty policy_document field. +``` \ No newline at end of file diff --git a/changelog/23500.txt b/changelog/23500.txt new file mode 100644 index 0000000..52f95c9 --- /dev/null +++ b/changelog/23500.txt @@ -0,0 +1,3 @@ +```release-note:bug +events: Ignore sending context to give more time for events to send +``` diff --git a/changelog/23516.txt b/changelog/23516.txt new file mode 100644 index 0000000..f87ab20 --- /dev/null +++ b/changelog/23516.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes issue with sidebar navigation links disappearing when navigating to policies when a user is not authorized +``` \ No newline at end of file diff --git a/changelog/23549.txt b/changelog/23549.txt new file mode 100644 index 0000000..078cc23 --- /dev/null +++ b/changelog/23549.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api/plugins: add `tls-server-name` arg for plugin registration +``` diff --git a/changelog/23636.txt b/changelog/23636.txt new file mode 100644 index 0000000..2625560 --- /dev/null +++ b/changelog/23636.txt @@ -0,0 +1,3 @@ +```release-note:bug +command/server: Fix bug with sigusr2 where pprof files were not closed correctly +``` diff --git a/changelog/23695.txt b/changelog/23695.txt new file mode 100644 index 0000000..1046706 --- /dev/null +++ b/changelog/23695.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Decode the connection url for display on the connection details page +``` diff --git a/changelog/23700.txt b/changelog/23700.txt new file mode 100644 index 0000000..59a69fb --- /dev/null +++ b/changelog/23700.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Update flat, shell-quote and swagger-ui-dist packages. Remove swagger-ui styling overrides. +``` \ No newline at end of file diff --git a/changelog/23723.txt b/changelog/23723.txt new file mode 100644 index 0000000..25828f9 --- /dev/null +++ b/changelog/23723.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/transit: Do not allow auto rotation on managed_key key types +``` diff --git a/changelog/23747.txt b/changelog/23747.txt new file mode 100644 index 0000000..bf611ed --- /dev/null +++ b/changelog/23747.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Adds toggle to KV secrets engine value download modal to optionally stringify value in downloaded file +``` \ No newline at end of file diff --git a/changelog/23781.txt b/changelog/23781.txt new file mode 100644 index 0000000..32d3b51 --- /dev/null +++ b/changelog/23781.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/activity: Fixes segments fragment loss due to exceeding entry record size limit +``` \ No newline at end of file diff --git a/changelog/23786.txt b/changelog/23786.txt new file mode 100644 index 0000000..b6e7314 --- /dev/null +++ b/changelog/23786.txt @@ -0,0 +1,3 @@ +```release-note:change +auth/approle: Normalized error response messages when invalid credentials are provided +``` diff --git a/changelog/23802.txt b/changelog/23802.txt new file mode 100644 index 0000000..49caebc --- /dev/null +++ b/changelog/23802.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/mounts: Fix reading an "auth" mount using "sys/internal/ui/mounts/" when filter paths are enforced returns 500 error code from the secondary +``` diff --git a/changelog/23849.txt b/changelog/23849.txt new file mode 100644 index 0000000..e5d89a3 --- /dev/null +++ b/changelog/23849.txt @@ -0,0 +1,3 @@ +```release-note:change +secrets/mongodbatlas: Update plugin to v0.10.2 +``` diff --git a/changelog/23861.txt b/changelog/23861.txt new file mode 100644 index 0000000..8c4ac70 --- /dev/null +++ b/changelog/23861.txt @@ -0,0 +1,4 @@ +```release-note:bug +api/seal-status: Fix deadlock on calls to sys/seal-status with a namespace configured +on the request. +``` diff --git a/changelog/23872.txt b/changelog/23872.txt new file mode 100644 index 0000000..b486fd2 --- /dev/null +++ b/changelog/23872.txt @@ -0,0 +1,3 @@ +```release-note:improvement +storage/etcd: etcd should only return keys when calling List() +``` diff --git a/changelog/23874.txt b/changelog/23874.txt new file mode 100644 index 0000000..34ac61d --- /dev/null +++ b/changelog/23874.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: fix policies with wildcards not matching list operations due to the policy path not having a trailing slash +``` \ No newline at end of file diff --git a/changelog/23894.txt b/changelog/23894.txt new file mode 100644 index 0000000..a94e142 --- /dev/null +++ b/changelog/23894.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Skip unnecessary deriving of policies during Login MFA Check. +``` \ No newline at end of file diff --git a/changelog/23902.txt b/changelog/23902.txt new file mode 100644 index 0000000..cbfec65 --- /dev/null +++ b/changelog/23902.txt @@ -0,0 +1,5 @@ +```release-note:bug +core: fix bug where deadlock detection was always on for expiration and quotas. +These can now be configured individually with `detect_deadlocks`. +``` + diff --git a/changelog/24027.txt b/changelog/24027.txt new file mode 100644 index 0000000..d276928 --- /dev/null +++ b/changelog/24027.txt @@ -0,0 +1,3 @@ +```release-note:bug +expiration: Fix fatal error "concurrent map iteration and map write" when collecting metrics from leases. +``` diff --git a/changelog/24054.txt b/changelog/24054.txt new file mode 100644 index 0000000..2680d11 --- /dev/null +++ b/changelog/24054.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/transit: Fix a panic when attempting to export a public RSA key +``` diff --git a/changelog/24058.txt b/changelog/24058.txt new file mode 100644 index 0000000..baa7fa9 --- /dev/null +++ b/changelog/24058.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/config: Use correct HCL config value when configuring `log_requests_level`. +``` \ No newline at end of file diff --git a/changelog/24103.txt b/changelog/24103.txt new file mode 100644 index 0000000..f86bfd9 --- /dev/null +++ b/changelog/24103.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Sort list view of entities and aliases alphabetically using the item name +``` diff --git a/changelog/24108.txt b/changelog/24108.txt new file mode 100644 index 0000000..0fcb8ac --- /dev/null +++ b/changelog/24108.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/quotas: Close rate-limit blocked client purge goroutines when sealing +``` \ No newline at end of file diff --git a/changelog/24147.txt b/changelog/24147.txt new file mode 100644 index 0000000..960ae22 --- /dev/null +++ b/changelog/24147.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix error when tuning token auth configuration within namespace +``` \ No newline at end of file diff --git a/changelog/24192.txt b/changelog/24192.txt new file mode 100644 index 0000000..97a2674 --- /dev/null +++ b/changelog/24192.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Do not set nextUpdate field in OCSP responses when ocsp_expiry is 0 +``` diff --git a/changelog/24193.txt b/changelog/24193.txt new file mode 100644 index 0000000..67ea1d0 --- /dev/null +++ b/changelog/24193.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/cert: Handle errors related to expired OCSP server responses +``` diff --git a/changelog/24252.txt b/changelog/24252.txt new file mode 100644 index 0000000..343811b --- /dev/null +++ b/changelog/24252.txt @@ -0,0 +1,3 @@ +```release-note:bug +agent/logging: Agent should now honor correct -log-format and -log-file settings in logs generated by the consul-template library. +``` \ No newline at end of file diff --git a/changelog/24256.txt b/changelog/24256.txt new file mode 100644 index 0000000..7412471 --- /dev/null +++ b/changelog/24256.txt @@ -0,0 +1,4 @@ +```release-note:bug +api: Fix deadlock on calls to sys/leader with a namespace configured +on the request. +``` diff --git a/changelog/24292.txt b/changelog/24292.txt new file mode 100644 index 0000000..784e2e3 --- /dev/null +++ b/changelog/24292.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix payload sent when disabling replication +``` diff --git a/changelog/24297.txt b/changelog/24297.txt new file mode 100644 index 0000000..d1433cf --- /dev/null +++ b/changelog/24297.txt @@ -0,0 +1,2 @@ +```release-note:change +logging: Vault server, Agent and Proxy now honor log file value and only add a timestamp on rotation. \ No newline at end of file diff --git a/changelog/24325.txt b/changelog/24325.txt new file mode 100644 index 0000000..ab5ce61 --- /dev/null +++ b/changelog/24325.txt @@ -0,0 +1,4 @@ +```release-note:change +identity (enterprise): POST requests to the `/identity/entity/merge` endpoint +are now always forwarded from standbys to the active node. +``` \ No newline at end of file diff --git a/changelog/24336.txt b/changelog/24336.txt new file mode 100644 index 0000000..63594dc --- /dev/null +++ b/changelog/24336.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fix a timeout initializing Vault by only using a short timeout persisting barrier keyring encryption counts. +``` diff --git a/changelog/7277.txt b/changelog/7277.txt new file mode 100644 index 0000000..4a19cf9 --- /dev/null +++ b/changelog/7277.txt @@ -0,0 +1,3 @@ +```release-note:feature +auth/token: Add `allowed_policies_glob` and `disallowed_policies_glob` fields to token roles to allow glob matching of policies +``` diff --git a/changelog/9081.txt b/changelog/9081.txt new file mode 100644 index 0000000..03045e4 --- /dev/null +++ b/changelog/9081.txt @@ -0,0 +1,4 @@ + +```release-note:improvement +website/docs: changed to echo for all string examples instead of (<<<) here-string. +``` diff --git a/changelog/9109.txt b/changelog/9109.txt new file mode 100644 index 0000000..c85db60 --- /dev/null +++ b/changelog/9109.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: Add support for go-sockaddr templated addresses in config. +``` diff --git a/changelog/9802.txt b/changelog/9802.txt new file mode 100644 index 0000000..f5102a0 --- /dev/null +++ b/changelog/9802.txt @@ -0,0 +1,3 @@ +```release-note:improvement +agent: Send notifications to systemd on start and stop. +``` diff --git a/changelog/9972.txt b/changelog/9972.txt new file mode 100644 index 0000000..c091b47 --- /dev/null +++ b/changelog/9972.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Upgrade Ember-cli from 3.8 to 3.22. +``` diff --git a/changelog/README.md b/changelog/README.md new file mode 100644 index 0000000..cbf841f --- /dev/null +++ b/changelog/README.md @@ -0,0 +1,56 @@ +# changelog + +This folder holds changelog updates from commit 3bc7d15 onwards. + +Release notes are text files with three lines: + + 1. An opening code block with the `release-note:` type annotation. + + For example: + + ```release-note:bug + + Valid modes are: + + - `bug` - Any sort of non-security defect fix. + - `change` - A change in the product that may require action or + review by the operator. Examples would be any kind of API change + (as opposed to backwards compatible addition), a notable behavior + change, or anything that might require attention before updating. Go + version changes are also listed here since they can potentially have + large, sometimes unknown impacts. (Go updates are a special case, and + dep updates in general aren't a `change`). Discussion of any potential + `change` items in the pull request to see what other communication + might be warranted. + - `deprecation` - Announcement of a planned future removal of a + feature. Only use this if a deprecation notice also exists [in the + docs](https://www.vaultproject.io/docs/deprecation). + - `feature` - Large topical additions for a major release. These are + rarely in minor releases. Formatting for `feature` entries differs + from normal changelog formatting - see the [new features + instructions](#new-and-major-features). + - `improvement` - Most updates to the product that aren’t `bug`s, but + aren't big enough to be a `feature`, will be an `improvement`. + + 2. A component (for example, `secret/pki` or `sdk/framework` or), a colon and a space, and then a one-line description of the change. + + 3. An ending code block. + +This should be in a file named after the pull request number (e.g., `12345.txt`). + +There are many examples in this folder; check one out if you're stuck! + +See [hashicorp/go-changelog](https://github.com/hashicorp/go-changelog) for full documentation on the supported entries. + +## New and Major Features + +For features we are introducing in a new major release, we prefer a single +changelog entry representing that feature. This way, it is clear to readers +what feature is being introduced. You do not need to reference a specific PR, +and the formatting is slightly different - your changelog file should look +like: + + changelog/.txt: + ```release-note:feature + **Feature Name**: Description of feature - for example "Custom password policies are now supported for all database engines." + ``` diff --git a/changelog/_ 1686.txt b/changelog/_ 1686.txt new file mode 100644 index 0000000..a4faefa --- /dev/null +++ b/changelog/_ 1686.txt @@ -0,0 +1,3 @@ +```release-note:feature +kmip (enterprise): Use entropy augmentation to generate kmip certificates +``` \ No newline at end of file diff --git a/changelog/_10959.txt b/changelog/_10959.txt new file mode 100644 index 0000000..1f9740a --- /dev/null +++ b/changelog/_10959.txt @@ -0,0 +1,3 @@ +```release-note:bug +secret/pki: use case insensitive domain name comparison as per RFC1035 section 2.3.3 +``` \ No newline at end of file diff --git a/changelog/_1622.txt b/changelog/_1622.txt new file mode 100644 index 0000000..728f4e0 --- /dev/null +++ b/changelog/_1622.txt @@ -0,0 +1,12 @@ +```release-note:bug +transform (enterprise): Fix bug tokenization handling metadata on exportable stores +``` +```release-note:bug +transform (enterprise): Fix transform configuration not handling `stores` parameter on the legacy path +``` +```release-note:bug +transform (enterprise): Make expiration timestamps human readable +``` +```release-note:bug +transform (enterprise): Return false for invalid tokens on the validate endpoint rather than returning an HTTP error +``` diff --git a/changelog/_1633.txt b/changelog/_1633.txt new file mode 100644 index 0000000..591f623 --- /dev/null +++ b/changelog/_1633.txt @@ -0,0 +1,3 @@ +```release-note: bug +core (enterprise): Vault EGP policies attached to path * were not correctly scoped to the namespace. +``` diff --git a/changelog/_1637.txt b/changelog/_1637.txt new file mode 100644 index 0000000..cfc6cb5 --- /dev/null +++ b/changelog/_1637.txt @@ -0,0 +1,3 @@ +```release-note:bug +transform (enterprise): Fix bug where tokenization store changes are persisted but don't take effect +``` diff --git a/changelog/_1642.txt b/changelog/_1642.txt new file mode 100644 index 0000000..95558dc --- /dev/null +++ b/changelog/_1642.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core (enterprise): Update Trial Enterprise license from 30 minutes to 6 hours +``` diff --git a/changelog/_1656.txt b/changelog/_1656.txt new file mode 100644 index 0000000..6bce70b --- /dev/null +++ b/changelog/_1656.txt @@ -0,0 +1,5 @@ +```release-note:bug +storage/raft (enterprise): The parameter aws_s3_server_kms_key was misnamed and +didn't work. Renamed to aws_s3_kms_key, and make it work so that when provided +the given key will be used to encrypt the snapshot using AWS KMS. +``` diff --git a/changelog/_1659.txt b/changelog/_1659.txt new file mode 100644 index 0000000..efd4f00 --- /dev/null +++ b/changelog/_1659.txt @@ -0,0 +1,3 @@ +```release-note:bug +storage/raft (enterprise): Reading a non-existent auto snapshot config now returns 404. +``` diff --git a/changelog/_1663.txt b/changelog/_1663.txt new file mode 100644 index 0000000..e9dee0d --- /dev/null +++ b/changelog/_1663.txt @@ -0,0 +1,4 @@ +```release-note: bug +storage/raft (enterprise): Automated snapshots with Azure required specifying +`azure_blob_environment`, which should have had as a default `AZUREPUBLICCLOUD`. +``` diff --git a/changelog/_1680.txt b/changelog/_1680.txt new file mode 100644 index 0000000..8f2681a --- /dev/null +++ b/changelog/_1680.txt @@ -0,0 +1,5 @@ +```release-note: improvement +storage/raft (enterprise): Listing of peers is now allowed on DR secondary +cluster nodes, as an update operation that takes in DR operation token for +authenticating the request. +``` diff --git a/changelog/_1691.txt b/changelog/_1691.txt new file mode 100644 index 0000000..b721b8b --- /dev/null +++ b/changelog/_1691.txt @@ -0,0 +1,5 @@ +```release-note: improvement +replication (enterprise): The log shipper is now memory +as well as length bound, and length and size can be +separately configured. +``` diff --git a/changelog/_1705.txt b/changelog/_1705.txt new file mode 100644 index 0000000..1a72a1e --- /dev/null +++ b/changelog/_1705.txt @@ -0,0 +1,3 @@ +```release-note:bug +replication (enterprise): Fix bug with not starting merkle sync while requests are in progress +``` diff --git a/changelog/_1712.txt b/changelog/_1712.txt new file mode 100644 index 0000000..d355ed7 --- /dev/null +++ b/changelog/_1712.txt @@ -0,0 +1,3 @@ +```release-note:bug +quotas/lease-count (enterprise) : Fix quotas enforcing old lease count quota paths +``` diff --git a/changelog/_1739.txt b/changelog/_1739.txt new file mode 100644 index 0000000..0046676 --- /dev/null +++ b/changelog/_1739.txt @@ -0,0 +1,6 @@ +```release-note:feature +secrets/keymgmt (enterprise): Adds general availability for distributing and managing keys in Azure Key Vault. +``` +```release-note:feature +secrets/keymgmt (enterprise): Adds beta support for distributing and managing keys in AWS KMS. +``` diff --git a/changelog/_1757.txt b/changelog/_1757.txt new file mode 100644 index 0000000..0362a1d --- /dev/null +++ b/changelog/_1757.txt @@ -0,0 +1,4 @@ +```release-note: bug +transform (enterprise): Fix an issue with malformed transform configuration +storage when upgrading from 1.5 to 1.6. See Upgrade Notes for 1.6.x. +``` diff --git a/changelog/_1792.txt b/changelog/_1792.txt new file mode 100644 index 0000000..a1f7bc9 --- /dev/null +++ b/changelog/_1792.txt @@ -0,0 +1,3 @@ +```release-note:bug +core (enterprise): Fix orphan return value from auth methods executed on performance standby nodes. +``` diff --git a/changelog/_1819.txt b/changelog/_1819.txt new file mode 100644 index 0000000..9e6cb90 --- /dev/null +++ b/changelog/_1819.txt @@ -0,0 +1,3 @@ +```release-note:feature +core: Add controlled capabilities to control group policy stanza +``` diff --git a/changelog/_2021Jan20.txt b/changelog/_2021Jan20.txt new file mode 100644 index 0000000..1ffaa8d --- /dev/null +++ b/changelog/_2021Jan20.txt @@ -0,0 +1,12 @@ +```release-note:security +Mount Path Disclosure: Vault previously returned different HTTP status codes for +existent and non-existent mount paths. This behavior would allow unauthenticated +brute force attacks to reveal which paths had valid mounts. This issue affects +Vault and Vault Enterprise and is fixed in 1.6.2 (CVE-2020-25594). +``` +```release-note:security +IP Address Disclosure: We fixed a vulnerability where, under some error +conditions, Vault would return an error message disclosing internal IP +addresses. This vulnerability affects Vault and Vault Enterprise and is fixed in +1.6.2 (CVE-2021-3024). +``` diff --git a/changelog/_2021Jan26.txt b/changelog/_2021Jan26.txt new file mode 100644 index 0000000..96506e9 --- /dev/null +++ b/changelog/_2021Jan26.txt @@ -0,0 +1,8 @@ +```release-note:security +Limited Unauthenticated Remove Peer: As of Vault 1.6, the remove-peer command +on DR secondaries did not require authentication. This issue impacts the +stability of HA architecture, as a bad actor could remove all standby +nodes from a DR +secondary. This issue affects Vault Enterprise 1.6.0 and 1.6.1, and is fixed in +1.6.2 (CVE-2021-3282). +``` diff --git a/changelog/_2071.txt b/changelog/_2071.txt new file mode 100644 index 0000000..7168d22 --- /dev/null +++ b/changelog/_2071.txt @@ -0,0 +1,3 @@ +```release-note:bug +core (enterprise): Disallow autogenerated licenses to be used in diagnose even when config is specified +``` diff --git a/changelog/_22733.txt b/changelog/_22733.txt new file mode 100644 index 0000000..039e423 --- /dev/null +++ b/changelog/_22733.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes long namespace names overflow in the sidebar +``` diff --git a/changelog/_go-ver-1120.txt b/changelog/_go-ver-1120.txt new file mode 100644 index 0000000..bbb2cf7 --- /dev/null +++ b/changelog/_go-ver-1120.txt @@ -0,0 +1,3 @@ +```release-note:change +core: Bump Go version to 1.19.2. +``` diff --git a/changelog/_go-ver-1130.txt b/changelog/_go-ver-1130.txt new file mode 100644 index 0000000..c63e249 --- /dev/null +++ b/changelog/_go-ver-1130.txt @@ -0,0 +1,3 @@ +```release-note:change +core: Bump Go version to 1.20. +``` diff --git a/changelog/_go-ver-1140.txt b/changelog/_go-ver-1140.txt new file mode 100644 index 0000000..052a277 --- /dev/null +++ b/changelog/_go-ver-1140.txt @@ -0,0 +1,3 @@ +```release-note:change +core: Bump Go version to 1.20.5. +``` diff --git a/changelog/_go-ver-1142.txt b/changelog/_go-ver-1142.txt new file mode 100644 index 0000000..8e2216b --- /dev/null +++ b/changelog/_go-ver-1142.txt @@ -0,0 +1,3 @@ +```release-note:change +core: Bump Go version to 1.20.7. +``` diff --git a/changelog/_go-ver-1143.txt b/changelog/_go-ver-1143.txt new file mode 100644 index 0000000..3d84c23 --- /dev/null +++ b/changelog/_go-ver-1143.txt @@ -0,0 +1,3 @@ +```release-note:change +core: Bump Go version to 1.20.8. +``` diff --git a/changelog/_go-ver-1144.txt b/changelog/_go-ver-1144.txt new file mode 100644 index 0000000..cafce67 --- /dev/null +++ b/changelog/_go-ver-1144.txt @@ -0,0 +1,3 @@ +```release-note:change +core: Bump Go version to 1.20.10. +``` diff --git a/changelog/_go-ver-1147.txt b/changelog/_go-ver-1147.txt new file mode 100644 index 0000000..df0859e --- /dev/null +++ b/changelog/_go-ver-1147.txt @@ -0,0 +1,3 @@ +```release-note:change +core: Bump Go version to 1.20.11. +``` diff --git a/changelog/changelog.tmpl b/changelog/changelog.tmpl new file mode 100644 index 0000000..648160c --- /dev/null +++ b/changelog/changelog.tmpl @@ -0,0 +1,56 @@ +{{- if index .NotesByType "breaking-change" -}} +BREAKING CHANGES: + +{{range index .NotesByType "breaking-change" -}} +* {{ template "note" .}} +{{ end -}} +{{- end -}} + +{{- if .NotesByType.security }} +SECURITY: + +{{range .NotesByType.security -}} +* {{ template "note" . }} +{{ end -}} +{{- end -}} + +{{- if .NotesByType.change }} +CHANGES: + +{{range .NotesByType.change -}} +* {{ template "note" . }} +{{ end -}} +{{- end -}} + +{{- if .NotesByType.feature }} +FEATURES: + +{{range .NotesByType.feature -}} +* {{ template "note" . }} +{{ end -}} +{{- end -}} + +{{- if .NotesByType.improvement }} +IMPROVEMENTS: + +{{range .NotesByType.improvement -}} +* {{ template "note" . }} +{{ end -}} +{{- end -}} + +{{- if .NotesByType.deprecation }} +DEPRECATIONS: + +{{range .NotesByType.deprecation -}} +* {{ template "note" . }} +{{ end -}} +{{- end -}} + +{{- if .NotesByType.bug }} +BUG FIXES: + +{{range .NotesByType.bug -}} +* {{ template "note" . }} +{{ end -}} +{{- end -}} + diff --git a/changelog/client-counts-1.11.txt b/changelog/client-counts-1.11.txt new file mode 100644 index 0000000..2de278e --- /dev/null +++ b/changelog/client-counts-1.11.txt @@ -0,0 +1,2 @@ +```release-note:feature +**UI Client Count Improvements**: Show monthly client count changes for the billing period, including new clients within a single, historical month diff --git a/changelog/client-counts.txt b/changelog/client-counts.txt new file mode 100644 index 0000000..271414e --- /dev/null +++ b/changelog/client-counts.txt @@ -0,0 +1,3 @@ +```release-note:feature +**UI Client Count Improvements**: Restructures client count dashboard, making use of billing start date to improve accuracy. Adds mount-level distribution and filtering. +``` \ No newline at end of file diff --git a/changelog/diagnose.txt b/changelog/diagnose.txt new file mode 100644 index 0000000..335a70c --- /dev/null +++ b/changelog/diagnose.txt @@ -0,0 +1,3 @@ +```release-note:feature +operator diagnose: a new vault operator command to detect common issues with vault server setups. +``` \ No newline at end of file diff --git a/changelog/go-ver-1110.txt b/changelog/go-ver-1110.txt new file mode 100644 index 0000000..7f43e9f --- /dev/null +++ b/changelog/go-ver-1110.txt @@ -0,0 +1,3 @@ +```release-note:change +core: Bump Go version to 1.17.9. +``` diff --git a/changelog/mount-migration.txt b/changelog/mount-migration.txt new file mode 100644 index 0000000..1e0eec3 --- /dev/null +++ b/changelog/mount-migration.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Mount Migration**: Vault supports moving secrets and auth mounts both within and across namespaces. +``` \ No newline at end of file diff --git a/changelog/note.tmpl b/changelog/note.tmpl new file mode 100644 index 0000000..022546f --- /dev/null +++ b/changelog/note.tmpl @@ -0,0 +1,3 @@ +{{- define "note" -}} +{{.Body}}{{if not (stringHasPrefix .Issue "_")}} [[GH-{{- .Issue -}}](https://github.com/hashicorp/vault/pull/{{- .Issue -}})]{{end}} +{{- end -}} diff --git a/changelog/pki-ui-improvements.txt b/changelog/pki-ui-improvements.txt new file mode 100644 index 0000000..d824033 --- /dev/null +++ b/changelog/pki-ui-improvements.txt @@ -0,0 +1,3 @@ +```release-note:feature +**NEW PKI Workflow in UI**: Completes generally available rollout of new PKI UI that provides smoother mount configuration and a more guided user experience +``` \ No newline at end of file diff --git a/changelog/plugin-versioning.txt b/changelog/plugin-versioning.txt new file mode 100644 index 0000000..cfd77a4 --- /dev/null +++ b/changelog/plugin-versioning.txt @@ -0,0 +1,3 @@ +```release-note:feature +**Plugin Versioning**: Vault supports registering, managing, and running plugins with semantic versions specified. +``` \ No newline at end of file diff --git a/command/agent.go b/command/agent.go new file mode 100644 index 0000000..4e140d9 --- /dev/null +++ b/command/agent.go @@ -0,0 +1,1259 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "context" + "crypto/tls" + "errors" + "flag" + "fmt" + "io" + "net" + "net/http" + "os" + "sort" + "strings" + "sync" + "time" + + systemd "github.com/coreos/go-systemd/daemon" + ctconfig "github.com/hashicorp/consul-template/config" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-secure-stdlib/gatedwriter" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/go-secure-stdlib/reloadutil" + "github.com/kr/pretty" + "github.com/mitchellh/cli" + "github.com/oklog/run" + "github.com/posener/complete" + "golang.org/x/text/cases" + "golang.org/x/text/language" + "google.golang.org/grpc/test/bufconn" + + "github.com/hashicorp/vault/api" + agentConfig "github.com/hashicorp/vault/command/agent/config" + "github.com/hashicorp/vault/command/agent/exec" + "github.com/hashicorp/vault/command/agent/template" + "github.com/hashicorp/vault/command/agentproxyshared" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + "github.com/hashicorp/vault/command/agentproxyshared/cache" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink/file" + "github.com/hashicorp/vault/command/agentproxyshared/sink/inmem" + "github.com/hashicorp/vault/command/agentproxyshared/winsvc" + "github.com/hashicorp/vault/helper/logging" + "github.com/hashicorp/vault/helper/metricsutil" + "github.com/hashicorp/vault/helper/useragent" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/internalshared/listenerutil" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/version" +) + +var ( + _ cli.Command = (*AgentCommand)(nil) + _ cli.CommandAutocomplete = (*AgentCommand)(nil) +) + +const ( + // flagNameAgentExitAfterAuth is used as an Agent specific flag to indicate + // that agent should exit after a single successful auth + flagNameAgentExitAfterAuth = "exit-after-auth" + nameAgent = "agent" +) + +type AgentCommand struct { + *BaseCommand + logFlags logFlags + + config *agentConfig.Config + + ShutdownCh chan struct{} + SighupCh chan struct{} + + tlsReloadFuncsLock sync.RWMutex + tlsReloadFuncs []reloadutil.ReloadFunc + + logWriter io.Writer + logGate *gatedwriter.Writer + logger hclog.Logger + + // Telemetry object + metricsHelper *metricsutil.MetricsHelper + + cleanupGuard sync.Once + + startedCh chan struct{} // for tests + reloadedCh chan struct{} // for tests + + flagConfigs []string + flagExitAfterAuth bool + flagTestVerifyOnly bool +} + +func (c *AgentCommand) Synopsis() string { + return "Start a Vault agent" +} + +func (c *AgentCommand) Help() string { + helpText := ` +Usage: vault agent [options] + + This command starts a Vault Agent that can perform automatic authentication + in certain environments. + + Start an agent with a configuration file: + + $ vault agent -config=/etc/vault/config.hcl + + For a full list of examples, please see the documentation. + +` + c.Flags().Help() + return strings.TrimSpace(helpText) +} + +func (c *AgentCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP) + + f := set.NewFlagSet("Command Options") + + // Augment with the log flags + f.addLogFlags(&c.logFlags) + + f.StringSliceVar(&StringSliceVar{ + Name: "config", + Target: &c.flagConfigs, + Completion: complete.PredictOr( + complete.PredictFiles("*.hcl"), + complete.PredictFiles("*.json"), + ), + Usage: "Path to a configuration file. This configuration file should " + + "contain only agent directives.", + }) + + f.BoolVar(&BoolVar{ + Name: flagNameAgentExitAfterAuth, + Target: &c.flagExitAfterAuth, + Default: false, + Usage: "If set to true, the agent will exit with code 0 after a single " + + "successful auth, where success means that a token was retrieved and " + + "all sinks successfully wrote it", + }) + + // Internal-only flags to follow. + // + // Why hello there little source code reader! Welcome to the Vault source + // code. The remaining options are intentionally undocumented and come with + // no warranty or backwards-compatibility promise. Do not use these flags + // in production. Do not build automation using these flags. Unless you are + // developing against Vault, you should not need any of these flags. + f.BoolVar(&BoolVar{ + Name: "test-verify-only", + Target: &c.flagTestVerifyOnly, + Default: false, + Hidden: true, + }) + + // End internal-only flags. + + return set +} + +func (c *AgentCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictNothing +} + +func (c *AgentCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *AgentCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + // Create a logger. We wrap it in a gated writer so that it doesn't + // start logging too early. + c.logGate = gatedwriter.NewWriter(os.Stderr) + c.logWriter = c.logGate + + if c.logFlags.flagCombineLogs { + c.logWriter = os.Stdout + } + + // Validation + if len(c.flagConfigs) < 1 { + c.UI.Error("Must specify exactly at least one config path using -config") + return 1 + } + + config, err := c.loadConfig(c.flagConfigs) + if err != nil { + c.outputErrors(err) + return 1 + } + + if config.AutoAuth == nil { + c.UI.Info("No auto_auth block found in config, the automatic authentication feature will not be started") + } + + c.applyConfigOverrides(f, config) // This only needs to happen on start-up to aggregate config from flags and env vars + c.config = config + + l, err := c.newLogger() + if err != nil { + c.outputErrors(err) + return 1 + } + + // Update the logger and then base the log writer on that logger. + // Log writer is supplied to consul-template runners for templates and execs. + // We want to ensure that consul-template will honor the settings, for example + // if the -log-format is JSON we want JSON, not a mix of JSON and non-JSON messages. + c.logger = l + c.logWriter = l.StandardWriter(&hclog.StandardLoggerOptions{ + InferLevels: true, + InferLevelsWithTimestamp: true, + }) + + infoKeys := make([]string, 0, 10) + info := make(map[string]string) + info["log level"] = config.LogLevel + infoKeys = append(infoKeys, "log level") + + infoKeys = append(infoKeys, "version") + verInfo := version.GetVersion() + info["version"] = verInfo.FullVersionNumber(false) + if verInfo.Revision != "" { + info["version sha"] = strings.Trim(verInfo.Revision, "'") + infoKeys = append(infoKeys, "version sha") + } + infoKeys = append(infoKeys, "cgo") + info["cgo"] = "disabled" + if version.CgoEnabled { + info["cgo"] = "enabled" + } + + // Tests might not want to start a vault server and just want to verify + // the configuration. + if c.flagTestVerifyOnly { + if os.Getenv("VAULT_TEST_VERIFY_ONLY_DUMP_CONFIG") != "" { + c.UI.Output(fmt.Sprintf( + "\nConfiguration:\n%s\n", + pretty.Sprint(*c.config))) + } + return 0 + } + + // Ignore any setting of Agent's address. This client is used by the Agent + // to reach out to Vault. This should never loop back to agent. + c.flagAgentProxyAddress = "" + client, err := c.Client() + if err != nil { + c.UI.Error(fmt.Sprintf( + "Error fetching client: %v", + err)) + return 1 + } + + serverHealth, err := client.Sys().Health() + if err == nil { + // We don't exit on error here, as this is not worth stopping Agent over + serverVersion := serverHealth.Version + agentVersion := version.GetVersion().VersionNumber() + if serverVersion != agentVersion { + c.UI.Info("==> Note: Vault Agent version does not match Vault server version. " + + fmt.Sprintf("Vault Agent version: %s, Vault server version: %s", agentVersion, serverVersion)) + } + } + + if config.IsDefaultListerDefined() { + // Notably, we cannot know for sure if they are using the API proxy functionality unless + // we log on each API proxy call, which would be too noisy. + // A customer could have a listener defined but only be using e.g. the cache-clear API, + // even though the API proxy is something they have available. + c.UI.Warn("==> Note: Vault Agent will be deprecating API proxy functionality in a future " + + "release, and this functionality has moved to a new subcommand, vault proxy. If you rely on this " + + "functionality, plan to move to Vault Proxy instead.") + } + + // ctx and cancelFunc are passed to the AuthHandler, SinkServer, ExecServer and + // TemplateServer that periodically listen for ctx.Done() to fire and shut + // down accordingly. + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + + // telemetry configuration + inmemMetrics, _, prometheusEnabled, err := configutil.SetupTelemetry(&configutil.SetupTelemetryOpts{ + Config: config.Telemetry, + Ui: c.UI, + ServiceName: "vault", + DisplayName: "Vault", + UserAgent: useragent.AgentString(), + ClusterName: config.ClusterName, + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error initializing telemetry: %s", err)) + return 1 + } + c.metricsHelper = metricsutil.NewMetricsHelper(inmemMetrics, prometheusEnabled) + + var method auth.AuthMethod + var sinks []*sink.SinkConfig + var templateNamespace string + if config.AutoAuth != nil { + if client.Headers().Get(consts.NamespaceHeaderName) == "" && config.AutoAuth.Method.Namespace != "" { + client.SetNamespace(config.AutoAuth.Method.Namespace) + } + templateNamespace = client.Headers().Get(consts.NamespaceHeaderName) + + sinkClient, err := client.CloneWithHeaders() + if err != nil { + c.UI.Error(fmt.Sprintf("Error cloning client for file sink: %v", err)) + return 1 + } + + if config.DisableIdleConnsAutoAuth { + sinkClient.SetMaxIdleConnections(-1) + } + + if config.DisableKeepAlivesAutoAuth { + sinkClient.SetDisableKeepAlives(true) + } + + for _, sc := range config.AutoAuth.Sinks { + switch sc.Type { + case "file": + config := &sink.SinkConfig{ + Logger: c.logger.Named("sink.file"), + Config: sc.Config, + Client: sinkClient, + WrapTTL: sc.WrapTTL, + DHType: sc.DHType, + DeriveKey: sc.DeriveKey, + DHPath: sc.DHPath, + AAD: sc.AAD, + } + s, err := file.NewFileSink(config) + if err != nil { + c.UI.Error(fmt.Errorf("error creating file sink: %w", err).Error()) + return 1 + } + config.Sink = s + sinks = append(sinks, config) + default: + c.UI.Error(fmt.Sprintf("Unknown sink type %q", sc.Type)) + return 1 + } + } + + authConfig := &auth.AuthConfig{ + Logger: c.logger.Named(fmt.Sprintf("auth.%s", config.AutoAuth.Method.Type)), + MountPath: config.AutoAuth.Method.MountPath, + Config: config.AutoAuth.Method.Config, + } + method, err = agentproxyshared.GetAutoAuthMethodFromConfig(config.AutoAuth.Method.Type, authConfig, config.Vault.Address) + if err != nil { + c.UI.Error(fmt.Sprintf("Error creating %s auth method: %v", config.AutoAuth.Method.Type, err)) + return 1 + } + } + + // We do this after auto-auth has been configured, because we don't want to + // confuse the issue of retries for auth failures which have their own + // config and are handled a bit differently. + if os.Getenv(api.EnvVaultMaxRetries) == "" { + client.SetMaxRetries(ctconfig.DefaultRetryAttempts) + if config.Vault != nil { + if config.Vault.Retry != nil { + client.SetMaxRetries(config.Vault.Retry.NumRetries) + } + } + } + + enforceConsistency := cache.EnforceConsistencyNever + whenInconsistent := cache.WhenInconsistentFail + if config.APIProxy != nil { + switch config.APIProxy.EnforceConsistency { + case "always": + enforceConsistency = cache.EnforceConsistencyAlways + case "never", "": + default: + c.UI.Error(fmt.Sprintf("Unknown api_proxy setting for enforce_consistency: %q", config.APIProxy.EnforceConsistency)) + return 1 + } + + switch config.APIProxy.WhenInconsistent { + case "retry": + whenInconsistent = cache.WhenInconsistentRetry + case "forward": + whenInconsistent = cache.WhenInconsistentForward + case "fail", "": + default: + c.UI.Error(fmt.Sprintf("Unknown api_proxy setting for when_inconsistent: %q", config.APIProxy.WhenInconsistent)) + return 1 + } + } + // Keep Cache configuration for legacy reasons, but error if defined alongside API Proxy + if config.Cache != nil { + switch config.Cache.EnforceConsistency { + case "always": + if enforceConsistency != cache.EnforceConsistencyNever { + c.UI.Error("enforce_consistency configured in both api_proxy and cache blocks. Please remove this configuration from the cache block.") + return 1 + } else { + enforceConsistency = cache.EnforceConsistencyAlways + } + case "never", "": + default: + c.UI.Error(fmt.Sprintf("Unknown cache setting for enforce_consistency: %q", config.Cache.EnforceConsistency)) + return 1 + } + + switch config.Cache.WhenInconsistent { + case "retry": + if whenInconsistent != cache.WhenInconsistentFail { + c.UI.Error("when_inconsistent configured in both api_proxy and cache blocks. Please remove this configuration from the cache block.") + return 1 + } else { + whenInconsistent = cache.WhenInconsistentRetry + } + case "forward": + if whenInconsistent != cache.WhenInconsistentFail { + c.UI.Error("when_inconsistent configured in both api_proxy and cache blocks. Please remove this configuration from the cache block.") + return 1 + } else { + whenInconsistent = cache.WhenInconsistentForward + } + case "fail", "": + default: + c.UI.Error(fmt.Sprintf("Unknown cache setting for when_inconsistent: %q", config.Cache.WhenInconsistent)) + return 1 + } + } + + // Warn if cache _and_ cert auto-auth is enabled but certificates were not + // provided in the auto_auth.method["cert"].config stanza. + if config.Cache != nil && (config.AutoAuth != nil && config.AutoAuth.Method != nil && config.AutoAuth.Method.Type == "cert") { + _, okCertFile := config.AutoAuth.Method.Config["client_cert"] + _, okCertKey := config.AutoAuth.Method.Config["client_key"] + + // If neither of these exists in the cert stanza, agent will use the + // certs from the vault stanza. + if !okCertFile && !okCertKey { + c.UI.Warn(wrapAtLength("WARNING! Cache is enabled and using the same certificates " + + "from the 'cert' auto-auth method specified in the 'vault' stanza. Consider " + + "specifying certificate information in the 'cert' auto-auth's config stanza.")) + } + + } + + // Output the header that the agent has started + if !c.logFlags.flagCombineLogs { + c.UI.Output("==> Vault Agent started! Log data will stream in below:\n") + } + + var leaseCache *cache.LeaseCache + var previousToken string + + proxyClient, err := client.CloneWithHeaders() + if err != nil { + c.UI.Error(fmt.Sprintf("Error cloning client for proxying: %v", err)) + return 1 + } + + if config.DisableIdleConnsAPIProxy { + proxyClient.SetMaxIdleConnections(-1) + } + + if config.DisableKeepAlivesAPIProxy { + proxyClient.SetDisableKeepAlives(true) + } + + apiProxyLogger := c.logger.Named("apiproxy") + + // The API proxy to be used, if listeners are configured + apiProxy, err := cache.NewAPIProxy(&cache.APIProxyConfig{ + Client: proxyClient, + Logger: apiProxyLogger, + EnforceConsistency: enforceConsistency, + WhenInconsistentAction: whenInconsistent, + UserAgentStringFunction: useragent.AgentProxyStringWithProxiedUserAgent, + UserAgentString: useragent.AgentProxyString(), + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error creating API proxy: %v", err)) + return 1 + } + + // Parse agent cache configurations + if config.Cache != nil { + cacheLogger := c.logger.Named("cache") + + // Create the lease cache proxier and set its underlying proxier to + // the API proxier. + leaseCache, err = cache.NewLeaseCache(&cache.LeaseCacheConfig{ + Client: proxyClient, + BaseContext: ctx, + Proxier: apiProxy, + Logger: cacheLogger.Named("leasecache"), + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error creating lease cache: %v", err)) + return 1 + } + + // Configure persistent storage and add to LeaseCache + if config.Cache.Persist != nil { + deferFunc, oldToken, err := agentproxyshared.AddPersistentStorageToLeaseCache(ctx, leaseCache, config.Cache.Persist, cacheLogger) + if err != nil { + c.UI.Error(fmt.Sprintf("Error creating persistent cache: %v", err)) + return 1 + } + previousToken = oldToken + if deferFunc != nil { + defer deferFunc() + } + } + } + + var listeners []net.Listener + + // If there are templates, add an in-process listener + if len(config.Templates) > 0 || len(config.EnvTemplates) > 0 { + config.Listeners = append(config.Listeners, &configutil.Listener{Type: listenerutil.BufConnType}) + } + + // Ensure we've added all the reload funcs for TLS before anyone triggers a reload. + c.tlsReloadFuncsLock.Lock() + + for i, lnConfig := range config.Listeners { + var ln net.Listener + var tlsCfg *tls.Config + + if lnConfig.Type == listenerutil.BufConnType { + inProcListener := bufconn.Listen(1024 * 1024) + if config.Cache != nil { + config.Cache.InProcDialer = listenerutil.NewBufConnWrapper(inProcListener) + } + ln = inProcListener + } else { + lnBundle, err := cache.StartListener(lnConfig) + if err != nil { + c.UI.Error(fmt.Sprintf("Error starting listener: %v", err)) + return 1 + } + + tlsCfg = lnBundle.TLSConfig + ln = lnBundle.Listener + + // Track the reload func, so we can reload later if needed. + c.tlsReloadFuncs = append(c.tlsReloadFuncs, lnBundle.TLSReloadFunc) + } + + listeners = append(listeners, ln) + + proxyVaultToken := true + var inmemSink sink.Sink + if config.APIProxy != nil { + if config.APIProxy.UseAutoAuthToken { + apiProxyLogger.Debug("auto-auth token is allowed to be used; configuring inmem sink") + inmemSink, err = inmem.New(&sink.SinkConfig{ + Logger: apiProxyLogger, + }, leaseCache) + if err != nil { + c.UI.Error(fmt.Sprintf("Error creating inmem sink for cache: %v", err)) + return 1 + } + sinks = append(sinks, &sink.SinkConfig{ + Logger: apiProxyLogger, + Sink: inmemSink, + }) + } + proxyVaultToken = !config.APIProxy.ForceAutoAuthToken + } + + var muxHandler http.Handler + if leaseCache != nil { + muxHandler = cache.ProxyHandler(ctx, apiProxyLogger, leaseCache, inmemSink, proxyVaultToken) + } else { + muxHandler = cache.ProxyHandler(ctx, apiProxyLogger, apiProxy, inmemSink, proxyVaultToken) + } + + // Parse 'require_request_header' listener config option, and wrap + // the request handler if necessary + if lnConfig.RequireRequestHeader && ("metrics_only" != lnConfig.Role) { + muxHandler = verifyRequestHeader(muxHandler) + } + + // Create a muxer and add paths relevant for the lease cache layer + mux := http.NewServeMux() + quitEnabled := lnConfig.AgentAPI != nil && lnConfig.AgentAPI.EnableQuit + + mux.Handle(consts.AgentPathMetrics, c.handleMetrics()) + if "metrics_only" != lnConfig.Role { + mux.Handle(consts.AgentPathCacheClear, leaseCache.HandleCacheClear(ctx)) + mux.Handle(consts.AgentPathQuit, c.handleQuit(quitEnabled)) + mux.Handle("/", muxHandler) + } + + scheme := "https://" + if tlsCfg == nil { + scheme = "http://" + } + if ln.Addr().Network() == "unix" { + scheme = "unix://" + } + + infoKey := fmt.Sprintf("api address %d", i+1) + info[infoKey] = scheme + ln.Addr().String() + infoKeys = append(infoKeys, infoKey) + + server := &http.Server{ + Addr: ln.Addr().String(), + TLSConfig: tlsCfg, + Handler: mux, + ReadHeaderTimeout: 10 * time.Second, + ReadTimeout: 30 * time.Second, + IdleTimeout: 5 * time.Minute, + ErrorLog: apiProxyLogger.StandardLogger(nil), + } + + go server.Serve(ln) + } + + c.tlsReloadFuncsLock.Unlock() + + // Ensure that listeners are closed at all the exits + listenerCloseFunc := func() { + for _, ln := range listeners { + ln.Close() + } + } + defer c.cleanupGuard.Do(listenerCloseFunc) + + // Inform any tests that the server is ready + if c.startedCh != nil { + close(c.startedCh) + } + + var g run.Group + + g.Add(func() error { + for { + select { + case <-c.SighupCh: + c.UI.Output("==> Vault Agent config reload triggered") + err := c.reloadConfig(c.flagConfigs) + if err != nil { + c.outputErrors(err) + } + // Send the 'reloaded' message on the relevant channel + select { + case c.reloadedCh <- struct{}{}: + default: + } + case <-ctx.Done(): + return nil + } + } + }, func(error) { + cancelFunc() + }) + + // This run group watches for signal termination + g.Add(func() error { + for { + select { + case <-c.ShutdownCh: + c.UI.Output("==> Vault Agent shutdown triggered") + // Notify systemd that the server is shutting down + // Let the lease cache know this is a shutdown; no need to evict everything + if leaseCache != nil { + leaseCache.SetShuttingDown(true) + } + return nil + case <-ctx.Done(): + return nil + case <-winsvc.ShutdownChannel(): + return nil + } + } + }, func(error) {}) + + // Start auto-auth and sink servers + if method != nil { + enableTemplateTokenCh := len(config.Templates) > 0 + enableEnvTemplateTokenCh := len(config.EnvTemplates) > 0 + + // Auth Handler is going to set its own retry values, so we want to + // work on a copy of the client to not affect other subsystems. + ahClient, err := c.client.CloneWithHeaders() + if err != nil { + c.UI.Error(fmt.Sprintf("Error cloning client for auth handler: %v", err)) + return 1 + } + + if config.DisableIdleConnsAutoAuth { + ahClient.SetMaxIdleConnections(-1) + } + + if config.DisableKeepAlivesAutoAuth { + ahClient.SetDisableKeepAlives(true) + } + + ah := auth.NewAuthHandler(&auth.AuthHandlerConfig{ + Logger: c.logger.Named("auth.handler"), + Client: ahClient, + WrapTTL: config.AutoAuth.Method.WrapTTL, + MinBackoff: config.AutoAuth.Method.MinBackoff, + MaxBackoff: config.AutoAuth.Method.MaxBackoff, + EnableReauthOnNewCredentials: config.AutoAuth.EnableReauthOnNewCredentials, + EnableTemplateTokenCh: enableTemplateTokenCh, + EnableExecTokenCh: enableEnvTemplateTokenCh, + Token: previousToken, + ExitOnError: config.AutoAuth.Method.ExitOnError, + UserAgent: useragent.AgentAutoAuthString(), + MetricsSignifier: "agent", + }) + + ss := sink.NewSinkServer(&sink.SinkServerConfig{ + Logger: c.logger.Named("sink.server"), + Client: ahClient, + ExitAfterAuth: config.ExitAfterAuth, + }) + + ts := template.NewServer(&template.ServerConfig{ + Logger: c.logger.Named("template.server"), + LogLevel: c.logger.GetLevel(), + LogWriter: c.logWriter, + AgentConfig: c.config, + Namespace: templateNamespace, + ExitAfterAuth: config.ExitAfterAuth, + }) + + es := exec.NewServer(&exec.ServerConfig{ + AgentConfig: c.config, + Namespace: templateNamespace, + Logger: c.logger.Named("exec.server"), + LogLevel: c.logger.GetLevel(), + LogWriter: c.logWriter, + }) + + g.Add(func() error { + return ah.Run(ctx, method) + }, func(error) { + // Let the lease cache know this is a shutdown; no need to evict + // everything + if leaseCache != nil { + leaseCache.SetShuttingDown(true) + } + cancelFunc() + }) + + g.Add(func() error { + err := ss.Run(ctx, ah.OutputCh, sinks) + c.logger.Info("sinks finished, exiting") + + // Start goroutine to drain from ah.OutputCh from this point onward + // to prevent ah.Run from being blocked. + go func() { + for { + select { + case <-ctx.Done(): + return + case <-ah.OutputCh: + } + } + }() + + // Wait until templates are rendered + if len(config.Templates) > 0 { + <-ts.DoneCh + } + + return err + }, func(error) { + // Let the lease cache know this is a shutdown; no need to evict + // everything + if leaseCache != nil { + leaseCache.SetShuttingDown(true) + } + cancelFunc() + }) + + g.Add(func() error { + return ts.Run(ctx, ah.TemplateTokenCh, config.Templates) + }, func(error) { + // Let the lease cache know this is a shutdown; no need to evict + // everything + if leaseCache != nil { + leaseCache.SetShuttingDown(true) + } + cancelFunc() + ts.Stop() + }) + + g.Add(func() error { + return es.Run(ctx, ah.ExecTokenCh) + }, func(err error) { + // Let the lease cache know this is a shutdown; no need to evict + // everything + if leaseCache != nil { + leaseCache.SetShuttingDown(true) + } + cancelFunc() + }) + + } + + // Server configuration output + padding := 24 + sort.Strings(infoKeys) + caser := cases.Title(language.English) + c.UI.Output("==> Vault Agent configuration:\n") + for _, k := range infoKeys { + c.UI.Output(fmt.Sprintf( + "%s%s: %s", + strings.Repeat(" ", padding-len(k)), + caser.String(k), + info[k])) + } + c.UI.Output("") + + // Release the log gate. + c.logGate.Flush() + + // Write out the PID to the file now that server has successfully started + if err := c.storePidFile(config.PidFile); err != nil { + c.UI.Error(fmt.Sprintf("Error storing PID: %s", err)) + return 1 + } + + // Notify systemd that the server is ready (if applicable) + c.notifySystemd(systemd.SdNotifyReady) + + defer func() { + if err := c.removePidFile(config.PidFile); err != nil { + c.UI.Error(fmt.Sprintf("Error deleting the PID file: %s", err)) + } + }() + + var exitCode int + if err := g.Run(); err != nil { + var processExitError *exec.ProcessExitError + if errors.As(err, &processExitError) { + exitCode = processExitError.ExitCode + } else { + exitCode = 1 + } + + if exitCode != 0 { + c.logger.Error("runtime error encountered", "error", err, "exitCode", exitCode) + c.UI.Error("Error encountered during run, refer to logs for more details.") + } + } + + c.notifySystemd(systemd.SdNotifyStopping) + + return exitCode +} + +// applyConfigOverrides ensures that the config object accurately reflects the desired +// settings as configured by the user. It applies the relevant config setting based +// on the precedence (env var overrides file config, cli overrides env var). +// It mutates the config object supplied. +func (c *AgentCommand) applyConfigOverrides(f *FlagSets, config *agentConfig.Config) { + if config.Vault == nil { + config.Vault = &agentConfig.Vault{} + } + + f.applyLogConfigOverrides(config.SharedConfig) + + f.Visit(func(fl *flag.Flag) { + if fl.Name == flagNameAgentExitAfterAuth { + config.ExitAfterAuth = c.flagExitAfterAuth + } + }) + + c.setStringFlag(f, config.Vault.Address, &StringVar{ + Name: flagNameAddress, + Target: &c.flagAddress, + Default: "https://127.0.0.1:8200", + EnvVar: api.EnvVaultAddress, + }) + config.Vault.Address = c.flagAddress + c.setStringFlag(f, config.Vault.CACert, &StringVar{ + Name: flagNameCACert, + Target: &c.flagCACert, + Default: "", + EnvVar: api.EnvVaultCACert, + }) + config.Vault.CACert = c.flagCACert + c.setStringFlag(f, config.Vault.CAPath, &StringVar{ + Name: flagNameCAPath, + Target: &c.flagCAPath, + Default: "", + EnvVar: api.EnvVaultCAPath, + }) + config.Vault.CAPath = c.flagCAPath + c.setStringFlag(f, config.Vault.ClientCert, &StringVar{ + Name: flagNameClientCert, + Target: &c.flagClientCert, + Default: "", + EnvVar: api.EnvVaultClientCert, + }) + config.Vault.ClientCert = c.flagClientCert + c.setStringFlag(f, config.Vault.ClientKey, &StringVar{ + Name: flagNameClientKey, + Target: &c.flagClientKey, + Default: "", + EnvVar: api.EnvVaultClientKey, + }) + config.Vault.ClientKey = c.flagClientKey + c.setBoolFlag(f, config.Vault.TLSSkipVerify, &BoolVar{ + Name: flagNameTLSSkipVerify, + Target: &c.flagTLSSkipVerify, + Default: false, + EnvVar: api.EnvVaultSkipVerify, + }) + config.Vault.TLSSkipVerify = c.flagTLSSkipVerify + c.setStringFlag(f, config.Vault.TLSServerName, &StringVar{ + Name: flagTLSServerName, + Target: &c.flagTLSServerName, + Default: "", + EnvVar: api.EnvVaultTLSServerName, + }) + config.Vault.TLSServerName = c.flagTLSServerName +} + +// verifyRequestHeader wraps an http.Handler inside a Handler that checks for +// the request header that is used for SSRF protection. +func verifyRequestHeader(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if val, ok := r.Header[consts.RequestHeaderName]; !ok || len(val) != 1 || val[0] != "true" { + logical.RespondError(w, + http.StatusPreconditionFailed, + fmt.Errorf("missing %q header", consts.RequestHeaderName)) + return + } + + handler.ServeHTTP(w, r) + }) +} + +func (c *AgentCommand) notifySystemd(status string) { + sent, err := systemd.SdNotify(false, status) + if err != nil { + c.logger.Error("error notifying systemd", "error", err) + } else { + if sent { + c.logger.Debug("sent systemd notification", "notification", status) + } else { + c.logger.Debug("would have sent systemd notification (systemd not present)", "notification", status) + } + } +} + +func (c *AgentCommand) setStringFlag(f *FlagSets, configVal string, fVar *StringVar) { + var isFlagSet bool + f.Visit(func(f *flag.Flag) { + if f.Name == fVar.Name { + isFlagSet = true + } + }) + + flagEnvValue, flagEnvSet := os.LookupEnv(fVar.EnvVar) + switch { + case isFlagSet: + // Don't do anything as the flag is already set from the command line + case flagEnvSet: + // Use value from env var + *fVar.Target = flagEnvValue + case configVal != "": + // Use value from config + *fVar.Target = configVal + default: + // Use the default value + *fVar.Target = fVar.Default + } +} + +func (c *AgentCommand) setBoolFlag(f *FlagSets, configVal bool, fVar *BoolVar) { + var isFlagSet bool + f.Visit(func(f *flag.Flag) { + if f.Name == fVar.Name { + isFlagSet = true + } + }) + + flagEnvValue, flagEnvSet := os.LookupEnv(fVar.EnvVar) + switch { + case isFlagSet: + // Don't do anything as the flag is already set from the command line + case flagEnvSet: + // Use value from env var + *fVar.Target = flagEnvValue != "" + case configVal: + // Use value from config + *fVar.Target = configVal + default: + // Use the default value + *fVar.Target = fVar.Default + } +} + +// storePidFile is used to write out our PID to a file if necessary +func (c *AgentCommand) storePidFile(pidPath string) error { + // Quit fast if no pidfile + if pidPath == "" { + return nil + } + + // Open the PID file + pidFile, err := os.OpenFile(pidPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o600) + if err != nil { + return fmt.Errorf("could not open pid file: %w", err) + } + defer pidFile.Close() + + // Write out the PID + pid := os.Getpid() + _, err = pidFile.WriteString(fmt.Sprintf("%d", pid)) + if err != nil { + return fmt.Errorf("could not write to pid file: %w", err) + } + return nil +} + +// removePidFile is used to cleanup the PID file if necessary +func (c *AgentCommand) removePidFile(pidPath string) error { + if pidPath == "" { + return nil + } + return os.Remove(pidPath) +} + +func (c *AgentCommand) handleMetrics() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + logical.RespondError(w, http.StatusMethodNotAllowed, nil) + return + } + + if err := r.ParseForm(); err != nil { + logical.RespondError(w, http.StatusBadRequest, err) + return + } + + format := r.Form.Get("format") + if format == "" { + format = metricsutil.FormatFromRequest(&logical.Request{ + Headers: r.Header, + }) + } + + resp := c.metricsHelper.ResponseForFormat(format) + + status := resp.Data[logical.HTTPStatusCode].(int) + w.Header().Set("Content-Type", resp.Data[logical.HTTPContentType].(string)) + switch v := resp.Data[logical.HTTPRawBody].(type) { + case string: + w.WriteHeader(status) + w.Write([]byte(v)) + case []byte: + w.WriteHeader(status) + w.Write(v) + default: + logical.RespondError(w, http.StatusInternalServerError, fmt.Errorf("wrong response returned")) + } + }) +} + +func (c *AgentCommand) handleQuit(enabled bool) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !enabled { + w.WriteHeader(http.StatusNotFound) + return + } + + switch r.Method { + case http.MethodPost: + default: + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + c.logger.Debug("received quit request") + close(c.ShutdownCh) + }) +} + +// newLogger creates a logger based on parsed config field on the Agent Command struct. +func (c *AgentCommand) newLogger() (hclog.InterceptLogger, error) { + if c.config == nil { + return nil, fmt.Errorf("cannot create logger, no config") + } + + var errs *multierror.Error + + // Parse all the log related config + logLevel, err := logging.ParseLogLevel(c.config.LogLevel) + if err != nil { + errs = multierror.Append(errs, err) + } + + logFormat, err := logging.ParseLogFormat(c.config.LogFormat) + if err != nil { + errs = multierror.Append(errs, err) + } + + logRotateDuration, err := parseutil.ParseDurationSecond(c.config.LogRotateDuration) + if err != nil { + errs = multierror.Append(errs, err) + } + + if errs != nil { + return nil, errs + } + + logCfg, err := logging.NewLogConfig(nameAgent) + if err != nil { + return nil, err + } + logCfg.Name = nameAgent + logCfg.LogLevel = logLevel + logCfg.LogFormat = logFormat + logCfg.LogFilePath = c.config.LogFile + logCfg.LogRotateDuration = logRotateDuration + logCfg.LogRotateBytes = c.config.LogRotateBytes + logCfg.LogRotateMaxFiles = c.config.LogRotateMaxFiles + + l, err := logging.Setup(logCfg, c.logWriter) + if err != nil { + return nil, err + } + + return l, nil +} + +// loadConfig attempts to generate an Agent config from the file(s) specified. +func (c *AgentCommand) loadConfig(paths []string) (*agentConfig.Config, error) { + var errs *multierror.Error + cfg := agentConfig.NewConfig() + + for _, configPath := range paths { + configFromPath, err := agentConfig.LoadConfig(configPath) + if err != nil { + errs = multierror.Append(errs, fmt.Errorf("error loading configuration from %s: %w", configPath, err)) + } else { + cfg = cfg.Merge(configFromPath) + } + } + + if errs != nil { + return nil, errs + } + + if err := cfg.ValidateConfig(); err != nil { + return nil, fmt.Errorf("error validating configuration: %w", err) + } + + return cfg, nil +} + +// reloadConfig will attempt to reload the config from file(s) and adjust certain +// config values without requiring a restart of the Vault Agent. +// If config is retrieved without error it is stored in the config field of the AgentCommand. +// This operation is not atomic and could result in updated config but partially applied config settings. +// The error returned from this func may be a multierror. +// This function will most likely be called due to Vault Agent receiving a SIGHUP signal. +// Currently only reloading the following are supported: +// * log level +// * TLS certs for listeners +func (c *AgentCommand) reloadConfig(paths []string) error { + // Notify systemd that the server is reloading + c.notifySystemd(systemd.SdNotifyReloading) + defer c.notifySystemd(systemd.SdNotifyReady) + + var errors error + + // Reload the config + cfg, err := c.loadConfig(paths) + if err != nil { + // Returning single error as we won't continue with bad config and won't 'commit' it. + return err + } + c.config = cfg + + // Update the log level + err = c.reloadLogLevel() + if err != nil { + errors = multierror.Append(errors, err) + } + + // Update certs + err = c.reloadCerts() + if err != nil { + errors = multierror.Append(errors, err) + } + + return errors +} + +// reloadLogLevel will attempt to update the log level for the logger attached +// to the AgentComment struct using the value currently set in config. +func (c *AgentCommand) reloadLogLevel() error { + logLevel, err := logging.ParseLogLevel(c.config.LogLevel) + if err != nil { + return err + } + + c.logger.SetLevel(logLevel) + + return nil +} + +// reloadCerts will attempt to reload certificates using a reload func which +// was provided when the listeners were configured, only funcs that were appended +// to the AgentCommand slice will be invoked. +// This function returns a multierror type so that every func can report an error +// if it encounters one. +func (c *AgentCommand) reloadCerts() error { + var errors error + + c.tlsReloadFuncsLock.RLock() + defer c.tlsReloadFuncsLock.RUnlock() + + for _, reloadFunc := range c.tlsReloadFuncs { + // Non-TLS listeners will have a nil reload func. + if reloadFunc != nil { + err := reloadFunc() + if err != nil { + errors = multierror.Append(errors, err) + } + } + } + + return errors +} + +// outputErrors will take an error or multierror and handle outputting each to the UI +func (c *AgentCommand) outputErrors(err error) { + if err != nil { + if me, ok := err.(*multierror.Error); ok { + for _, err := range me.Errors { + c.UI.Error(err.Error()) + } + } else { + c.UI.Error(err.Error()) + } + } +} diff --git a/command/agent/README.md b/command/agent/README.md new file mode 100644 index 0000000..02ef021 --- /dev/null +++ b/command/agent/README.md @@ -0,0 +1,15 @@ +# Vault Agent + +Vault Agent is a client daemon that provides Auth-Auth, Caching, and Template +features. + +Vault Agent provides a number of different helper features, specifically +addressing the following challenges: + +- Automatic authentication +- Secure delivery/storage of tokens +- Lifecycle management of these tokens (renewal & re-authentication) + +See the usage documentation on the Vault website here: + +- https://www.vaultproject.io/docs/agent/ diff --git a/command/agent/alicloud_end_to_end_test.go b/command/agent/alicloud_end_to_end_test.go new file mode 100644 index 0000000..0f5cdfb --- /dev/null +++ b/command/agent/alicloud_end_to_end_test.go @@ -0,0 +1,228 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package agent + +import ( + "context" + "io/ioutil" + "os" + "strings" + "testing" + "time" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/providers" + "github.com/aliyun/alibaba-cloud-sdk-go/services/sts" + hclog "github.com/hashicorp/go-hclog" + uuid "github.com/hashicorp/go-uuid" + vaultalicloud "github.com/hashicorp/vault-plugin-auth-alicloud" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + agentalicloud "github.com/hashicorp/vault/command/agentproxyshared/auth/alicloud" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink/file" + "github.com/hashicorp/vault/helper/testhelpers" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +const ( + envVarAlicloudAccessKey = "ALICLOUD_TEST_ACCESS_KEY" + envVarAlicloudSecretKey = "ALICLOUD_TEST_SECRET_KEY" + envVarAlicloudRoleArn = "ALICLOUD_TEST_ROLE_ARN" +) + +func TestAliCloudEndToEnd(t *testing.T) { + if !runAcceptanceTests { + t.SkipNow() + } + + // Ensure each cred is populated. + credNames := []string{ + envVarAlicloudAccessKey, + envVarAlicloudSecretKey, + envVarAlicloudRoleArn, + } + testhelpers.SkipUnlessEnvVarsSet(t, credNames) + + logger := logging.NewVaultLogger(hclog.Trace) + coreConfig := &vault.CoreConfig{ + Logger: logger, + CredentialBackends: map[string]logical.Factory{ + "alicloud": vaultalicloud.Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) + client := cluster.Cores[0].Client + + // Setup Vault + if err := client.Sys().EnableAuthWithOptions("alicloud", &api.EnableAuthOptions{ + Type: "alicloud", + }); err != nil { + t.Fatal(err) + } + + if _, err := client.Logical().Write("auth/alicloud/role/test", map[string]interface{}{ + "arn": os.Getenv(envVarAlicloudRoleArn), + }); err != nil { + t.Fatal(err) + } + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + + // We're going to feed alicloud auth creds via env variables. + if err := setAliCloudEnvCreds(); err != nil { + t.Fatal(err) + } + defer func() { + if err := unsetAliCloudEnvCreds(); err != nil { + t.Fatal(err) + } + }() + + am, err := agentalicloud.NewAliCloudAuthMethod(&auth.AuthConfig{ + Logger: logger.Named("auth.alicloud"), + MountPath: "auth/alicloud", + Config: map[string]interface{}{ + "role": "test", + "region": "us-west-1", + "credential_poll_interval": 1, + }, + }) + if err != nil { + t.Fatal(err) + } + + ahConfig := &auth.AuthHandlerConfig{ + Logger: logger.Named("auth.handler"), + Client: client, + } + + ah := auth.NewAuthHandler(ahConfig) + errCh := make(chan error) + go func() { + errCh <- ah.Run(ctx, am) + }() + defer func() { + select { + case <-ctx.Done(): + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + } + }() + + tmpFile, err := ioutil.TempFile("", "auth.tokensink.test.") + if err != nil { + t.Fatal(err) + } + tokenSinkFileName := tmpFile.Name() + tmpFile.Close() + os.Remove(tokenSinkFileName) + t.Logf("output: %s", tokenSinkFileName) + + config := &sink.SinkConfig{ + Logger: logger.Named("sink.file"), + Config: map[string]interface{}{ + "path": tokenSinkFileName, + }, + WrapTTL: 10 * time.Second, + } + + fs, err := file.NewFileSink(config) + if err != nil { + t.Fatal(err) + } + config.Sink = fs + + ss := sink.NewSinkServer(&sink.SinkServerConfig{ + Logger: logger.Named("sink.server"), + Client: client, + }) + go func() { + errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config}) + }() + defer func() { + select { + case <-ctx.Done(): + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + } + }() + + // This has to be after the other defers so it happens first. It allows + // successful test runs to immediately cancel all of the runner goroutines + // and unblock any of the blocking defer calls by the runner's DoneCh that + // comes before this and avoid successful tests from taking the entire + // timeout duration. + defer cancel() + + if stat, err := os.Lstat(tokenSinkFileName); err == nil { + t.Fatalf("expected err but got %s", stat) + } else if !os.IsNotExist(err) { + t.Fatal("expected notexist err") + } + + // Wait 2 seconds for the env variables to be detected and an auth to be generated. + time.Sleep(time.Second * 2) + + token, err := readToken(tokenSinkFileName) + if err != nil { + t.Fatal(err) + } + + if token.Token == "" { + t.Fatal("expected token but didn't receive it") + } +} + +func setAliCloudEnvCreds() error { + config := sdk.NewConfig() + config.Scheme = "https" + client, err := sts.NewClientWithOptions("us-west-1", config, credentials.NewAccessKeyCredential(os.Getenv(envVarAlicloudAccessKey), os.Getenv(envVarAlicloudSecretKey))) + if err != nil { + return err + } + roleSessionName, err := uuid.GenerateUUID() + if err != nil { + return err + } + assumeRoleReq := sts.CreateAssumeRoleRequest() + assumeRoleReq.RoleArn = os.Getenv(envVarAlicloudRoleArn) + assumeRoleReq.RoleSessionName = strings.ReplaceAll(roleSessionName, "-", "") + assumeRoleResp, err := client.AssumeRole(assumeRoleReq) + if err != nil { + return err + } + + if err := os.Setenv(providers.EnvVarAccessKeyID, assumeRoleResp.Credentials.AccessKeyId); err != nil { + return err + } + if err := os.Setenv(providers.EnvVarAccessKeySecret, assumeRoleResp.Credentials.AccessKeySecret); err != nil { + return err + } + return os.Setenv(providers.EnvVarAccessKeyStsToken, assumeRoleResp.Credentials.SecurityToken) +} + +func unsetAliCloudEnvCreds() error { + if err := os.Unsetenv(providers.EnvVarAccessKeyID); err != nil { + return err + } + if err := os.Unsetenv(providers.EnvVarAccessKeySecret); err != nil { + return err + } + return os.Unsetenv(providers.EnvVarAccessKeyStsToken) +} diff --git a/command/agent/approle_end_to_end_test.go b/command/agent/approle_end_to_end_test.go new file mode 100644 index 0000000..515a13e --- /dev/null +++ b/command/agent/approle_end_to_end_test.go @@ -0,0 +1,812 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package agent + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "strings" + "testing" + "time" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + credAppRole "github.com/hashicorp/vault/builtin/credential/approle" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + agentapprole "github.com/hashicorp/vault/command/agentproxyshared/auth/approle" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink/file" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +func TestAppRoleEndToEnd(t *testing.T) { + t.Parallel() + + testCases := []struct { + removeSecretIDFile bool + bindSecretID bool + secretIDLess bool + expectToken bool + }{ + // default behaviour => token expected + {false, true, false, true}, + {true, true, false, true}, + + //bindSecretID=false, wrong secret provided => token expected + //(vault ignores the supplied secret_id if bind_secret_id=false) + {false, false, false, true}, + {true, false, false, true}, + + // bindSecretID=false, secret not provided => token expected + {false, false, true, true}, + {true, false, true, true}, + + // bindSecretID=true, secret not provided => token not expected + {false, true, true, false}, + {true, true, true, false}, + } + + for _, tc := range testCases { + secretFileAction := "preserve" + if tc.removeSecretIDFile { + secretFileAction = "remove" + } + tc := tc // capture range variable + t.Run(fmt.Sprintf("%s_secret_id_file bindSecretID=%v secretIDLess=%v expectToken=%v", secretFileAction, tc.bindSecretID, tc.secretIDLess, tc.expectToken), func(t *testing.T) { + t.Parallel() + testAppRoleEndToEnd(t, tc.removeSecretIDFile, tc.bindSecretID, tc.secretIDLess, tc.expectToken) + }) + } +} + +func testAppRoleEndToEnd(t *testing.T, removeSecretIDFile bool, bindSecretID bool, secretIDLess bool, expectToken bool) { + var err error + logger := logging.NewVaultLogger(log.Trace) + coreConfig := &vault.CoreConfig{ + DisableMlock: true, + DisableCache: true, + Logger: log.NewNullLogger(), + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + } + + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + + cluster.Start() + defer cluster.Cleanup() + + cores := cluster.Cores + + vault.TestWaitActive(t, cores[0].Core) + + client := cores[0].Client + + err = client.Sys().EnableAuthWithOptions("approle", &api.EnableAuthOptions{ + Type: "approle", + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().Write("auth/approle/role/test1", addConstraints(!bindSecretID, map[string]interface{}{ + "bind_secret_id": bindSecretID, + "token_ttl": "6s", + "token_max_ttl": "10s", + })) + + logger.Trace("vault configured with", "bind_secret_id", bindSecretID) + + if err != nil { + t.Fatal(err) + } + + secret := "" + secretID1 := "" + secretID2 := "" + if bindSecretID { + resp, err := client.Logical().Write("auth/approle/role/test1/secret-id", nil) + if err != nil { + t.Fatal(err) + } + secretID1 = resp.Data["secret_id"].(string) + } else { + logger.Trace("skipped write to auth/approle/role/test1/secret-id") + } + resp, err := client.Logical().Read("auth/approle/role/test1/role-id") + if err != nil { + t.Fatal(err) + } + roleID1 := resp.Data["role_id"].(string) + + _, err = client.Logical().Write("auth/approle/role/test2", addConstraints(!bindSecretID, map[string]interface{}{ + "bind_secret_id": bindSecretID, + "token_ttl": "6s", + "token_max_ttl": "10s", + })) + if err != nil { + t.Fatal(err) + } + if bindSecretID { + resp, err = client.Logical().Write("auth/approle/role/test2/secret-id", nil) + if err != nil { + t.Fatal(err) + } + secretID2 = resp.Data["secret_id"].(string) + } else { + logger.Trace("skipped write to auth/approle/role/test2/secret-id") + } + resp, err = client.Logical().Read("auth/approle/role/test2/role-id") + if err != nil { + t.Fatal(err) + } + roleID2 := resp.Data["role_id"].(string) + + rolef, err := ioutil.TempFile("", "auth.role-id.test.") + if err != nil { + t.Fatal(err) + } + role := rolef.Name() + rolef.Close() // WriteFile doesn't need it open + defer os.Remove(role) + t.Logf("input role_id_file_path: %s", role) + if bindSecretID { + secretf, err := ioutil.TempFile("", "auth.secret-id.test.") + if err != nil { + t.Fatal(err) + } + secret = secretf.Name() + secretf.Close() + defer os.Remove(secret) + t.Logf("input secret_id_file_path: %s", secret) + } else { + logger.Trace("skipped writing tempfile auth.secret-id.test.") + } + // We close these right away because we're just basically testing + // permissions and finding a usable file name + ouf, err := ioutil.TempFile("", "auth.tokensink.test.") + if err != nil { + t.Fatal(err) + } + out := ouf.Name() + ouf.Close() + os.Remove(out) + t.Logf("output: %s", out) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + + secretFromAgent := secret + if secretIDLess { + secretFromAgent = "" + } + if !bindSecretID && !secretIDLess { + logger.Trace("agent is providing an invalid secret that should be ignored") + secretf, err := ioutil.TempFile("", "auth.secret-id.test.") + if err != nil { + t.Fatal(err) + } + secretFromAgent = secretf.Name() + secretf.Close() + defer os.Remove(secretFromAgent) + // if the token is empty, auth.approle would fail reporting the error + if err := ioutil.WriteFile(secretFromAgent, []byte("wrong-secret"), 0o600); err != nil { + t.Fatal(err) + } else { + logger.Trace("wrote secret_id_file_path with wrong-secret", "path", secretFromAgent) + } + } + conf := map[string]interface{}{ + "role_id_file_path": role, + "secret_id_file_path": secretFromAgent, + } + logger.Trace("agent configured with", "conf", conf) + if !removeSecretIDFile { + conf["remove_secret_id_file_after_reading"] = removeSecretIDFile + } + am, err := agentapprole.NewApproleAuthMethod(&auth.AuthConfig{ + Logger: logger.Named("auth.approle"), + MountPath: "auth/approle", + Config: conf, + }) + if err != nil { + t.Fatal(err) + } + ahConfig := &auth.AuthHandlerConfig{ + Logger: logger.Named("auth.handler"), + Client: client, + } + ah := auth.NewAuthHandler(ahConfig) + errCh := make(chan error) + go func() { + errCh <- ah.Run(ctx, am) + }() + defer func() { + select { + case <-ctx.Done(): + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + } + }() + + config := &sink.SinkConfig{ + Logger: logger.Named("sink.file"), + Config: map[string]interface{}{ + "path": out, + }, + } + fs, err := file.NewFileSink(config) + if err != nil { + t.Fatal(err) + } + config.Sink = fs + + ss := sink.NewSinkServer(&sink.SinkServerConfig{ + Logger: logger.Named("sink.server"), + Client: client, + }) + + go func() { + errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config}) + }() + defer func() { + select { + case <-ctx.Done(): + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + } + }() + + // This has to be after the other defers so it happens first. It allows + // successful test runs to immediately cancel all of the runner goroutines + // and unblock any of the blocking defer calls by the runner's DoneCh that + // comes before this and avoid successful tests from taking the entire + // timeout duration. + defer cancel() + + // Check that no sink file exists + _, err = os.Lstat(out) + if err == nil { + t.Fatal("expected err") + } + if !os.IsNotExist(err) { + t.Fatal("expected notexist err") + } + + if err := ioutil.WriteFile(role, []byte(roleID1), 0o600); err != nil { + t.Fatal(err) + } else { + logger.Trace("wrote test role 1", "path", role) + } + + if bindSecretID { + if err := ioutil.WriteFile(secret, []byte(secretID1), 0o600); err != nil { + t.Fatal(err) + } else { + logger.Trace("wrote test secret 1", "path", secret) + } + } else { + logger.Trace("skipped writing test secret 1") + } + + checkToken := func() string { + timeout := time.Now().Add(10 * time.Second) + for { + if time.Now().After(timeout) { + if expectToken { + t.Fatal("did not find a written token after timeout") + } + return "" + } + val, err := ioutil.ReadFile(out) + if err == nil { + os.Remove(out) + if len(val) == 0 { + t.Fatal("written token was empty") + } + if !secretIDLess { + _, err = os.Stat(secretFromAgent) + switch { + case removeSecretIDFile && err == nil: + t.Fatal("secret file exists but was supposed to be removed") + case !removeSecretIDFile && err != nil: + t.Fatal("secret ID file does not exist but was not supposed to be removed") + } + } + client.SetToken(string(val)) + secret, err := client.Auth().Token().LookupSelf() + if err != nil { + t.Fatal(err) + } + return secret.Data["entity_id"].(string) + } + time.Sleep(250 * time.Millisecond) + } + } + origEntity := checkToken() + if !expectToken && origEntity != "" { + t.Fatal("did not expect a token to be written: " + origEntity) + } + if !expectToken && origEntity == "" { + logger.Trace("skipping entities comparison as we are not expecting tokens to be written") + return + } + + // Make sure it gets renewed + timeout := time.Now().Add(4 * time.Second) + for { + if time.Now().After(timeout) { + break + } + secret, err := client.Auth().Token().LookupSelf() + if err != nil { + t.Fatal(err) + } + ttl, err := secret.Data["ttl"].(json.Number).Int64() + if err != nil { + t.Fatal(err) + } + if ttl > 6 { + t.Fatalf("unexpected ttl: %v", secret.Data["ttl"]) + } + } + + // Write new values + if err := ioutil.WriteFile(role, []byte(roleID2), 0o600); err != nil { + t.Fatal(err) + } else { + logger.Trace("wrote test role 2", "path", role) + } + + if bindSecretID { + if err := ioutil.WriteFile(secret, []byte(secretID2), 0o600); err != nil { + t.Fatal(err) + } else { + logger.Trace("wrote test secret 2", "path", secret) + } + } else { + logger.Trace("skipped writing test secret 2") + } + + newEntity := checkToken() + if newEntity == origEntity { + t.Fatal("found same entity") + } + + timeout = time.Now().Add(4 * time.Second) + for { + if time.Now().After(timeout) { + break + } + secret, err := client.Auth().Token().LookupSelf() + if err != nil { + t.Fatal(err) + } + ttl, err := secret.Data["ttl"].(json.Number).Int64() + if err != nil { + t.Fatal(err) + } + if ttl > 6 { + t.Fatalf("unexpected ttl: %v", secret.Data["ttl"]) + } + } +} + +// TestAppRoleLongRoleName tests that the creation of an approle is a maximum of 4096 bytes +// Prior to VAULT-8518 being fixed, you were unable to delete an approle value longer than 1024 bytes +// due to a restriction put into place by PR #14746, to prevent unbounded HMAC creation. +func TestAppRoleLongRoleName(t *testing.T) { + approleName := strings.Repeat("a", 5000) + + coreConfig := &vault.CoreConfig{ + DisableMlock: true, + DisableCache: true, + Logger: log.NewNullLogger(), + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + } + + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + + cluster.Start() + defer cluster.Cleanup() + + cores := cluster.Cores + + vault.TestWaitActive(t, cores[0].Core) + + client := cores[0].Client + + err := client.Sys().EnableAuthWithOptions("approle", &api.EnableAuthOptions{ + Type: "approle", + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().Write(fmt.Sprintf("auth/approle/role/%s", approleName), map[string]interface{}{ + "token_ttl": "6s", + "token_max_ttl": "10s", + }) + if err != nil { + if !strings.Contains(err.Error(), "role_name is longer than maximum") { + t.Fatal(err) + } + } +} + +func TestAppRoleWithWrapping(t *testing.T) { + testCases := []struct { + bindSecretID bool + secretIDLess bool + expectToken bool + }{ + // default behaviour => token expected + {true, false, true}, + + //bindSecretID=false, wrong secret provided, wrapping_path provided => token not expected + //(wrapping token is not valid or does not exist) + {false, false, false}, + + // bindSecretID=false, no secret provided, wrapping_path provided but ignored => token expected + {false, true, true}, + } + for _, tc := range testCases { + t.Run(fmt.Sprintf("bindSecretID=%v secretIDLess=%v expectToken=%v", tc.bindSecretID, tc.secretIDLess, tc.expectToken), func(t *testing.T) { + testAppRoleWithWrapping(t, tc.bindSecretID, tc.secretIDLess, tc.expectToken) + }) + } +} + +func testAppRoleWithWrapping(t *testing.T, bindSecretID bool, secretIDLess bool, expectToken bool) { + var err error + logger := logging.NewVaultLogger(log.Trace) + coreConfig := &vault.CoreConfig{ + DisableMlock: true, + DisableCache: true, + Logger: log.NewNullLogger(), + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + } + + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + + cluster.Start() + defer cluster.Cleanup() + + cores := cluster.Cores + + vault.TestWaitActive(t, cores[0].Core) + + client := cores[0].Client + origToken := client.Token() + + err = client.Sys().EnableAuthWithOptions("approle", &api.EnableAuthOptions{ + Type: "approle", + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().Write("auth/approle/role/test1", addConstraints(!bindSecretID, map[string]interface{}{ + "bind_secret_id": bindSecretID, + "token_ttl": "6s", + "token_max_ttl": "10s", + })) + if err != nil { + t.Fatal(err) + } + + client.SetWrappingLookupFunc(func(operation, path string) string { + if path == "auth/approle/role/test1/secret-id" { + return "10s" + } + return "" + }) + + secret := "" + secretID1 := "" + if bindSecretID { + resp, err := client.Logical().Write("auth/approle/role/test1/secret-id", nil) + if err != nil { + t.Fatal(err) + } + secretID1 = resp.WrapInfo.Token + } else { + logger.Trace("skipped write to auth/approle/role/test1/secret-id") + } + resp, err := client.Logical().Read("auth/approle/role/test1/role-id") + if err != nil { + t.Fatal(err) + } + roleID1 := resp.Data["role_id"].(string) + + rolef, err := ioutil.TempFile("", "auth.role-id.test.") + if err != nil { + t.Fatal(err) + } + role := rolef.Name() + rolef.Close() // WriteFile doesn't need it open + defer os.Remove(role) + t.Logf("input role_id_file_path: %s", role) + + if bindSecretID { + secretf, err := ioutil.TempFile("", "auth.secret-id.test.") + if err != nil { + t.Fatal(err) + } + secret = secretf.Name() + secretf.Close() + defer os.Remove(secret) + t.Logf("input secret_id_file_path: %s", secret) + } else { + logger.Trace("skipped writing tempfile auth.secret-id.test.") + } + + // We close these right away because we're just basically testing + // permissions and finding a usable file name + ouf, err := ioutil.TempFile("", "auth.tokensink.test.") + if err != nil { + t.Fatal(err) + } + out := ouf.Name() + ouf.Close() + os.Remove(out) + t.Logf("output: %s", out) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + + secretFromAgent := secret + if secretIDLess { + secretFromAgent = "" + } + if !bindSecretID && !secretIDLess { + logger.Trace("agent is providing an invalid secret that should be ignored") + secretf, err := ioutil.TempFile("", "auth.secret-id.test.") + if err != nil { + t.Fatal(err) + } + secretFromAgent = secretf.Name() + secretf.Close() + defer os.Remove(secretFromAgent) + // if the token is empty, auth.approle would fail reporting the error + if err := ioutil.WriteFile(secretFromAgent, []byte("wrong-secret"), 0o600); err != nil { + t.Fatal(err) + } else { + logger.Trace("wrote secret_id_file_path with wrong-secret", "path", secretFromAgent) + } + } + conf := map[string]interface{}{ + "role_id_file_path": role, + "secret_id_file_path": secretFromAgent, + "secret_id_response_wrapping_path": "auth/approle/role/test1/secret-id", + "remove_secret_id_file_after_reading": true, + } + logger.Trace("agent configured with", "conf", conf) + + am, err := agentapprole.NewApproleAuthMethod(&auth.AuthConfig{ + Logger: logger.Named("auth.approle"), + MountPath: "auth/approle", + Config: conf, + }) + if err != nil { + t.Fatal(err) + } + ahConfig := &auth.AuthHandlerConfig{ + Logger: logger.Named("auth.handler"), + Client: client, + } + ah := auth.NewAuthHandler(ahConfig) + errCh := make(chan error) + go func() { + errCh <- ah.Run(ctx, am) + }() + defer func() { + select { + case <-ctx.Done(): + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + } + }() + + config := &sink.SinkConfig{ + Logger: logger.Named("sink.file"), + Config: map[string]interface{}{ + "path": out, + }, + } + fs, err := file.NewFileSink(config) + if err != nil { + t.Fatal(err) + } + config.Sink = fs + + ss := sink.NewSinkServer(&sink.SinkServerConfig{ + Logger: logger.Named("sink.server"), + Client: client, + }) + go func() { + errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config}) + }() + + defer func() { + select { + case <-ctx.Done(): + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + } + }() + + // This has to be after the other defers so it happens first. It allows + // successful test runs to immediately cancel all of the runner goroutines + // and unblock any of the blocking defer calls by the runner's DoneCh that + // comes before this and avoid successful tests from taking the entire + // timeout duration. + defer cancel() + + // Check that no sink file exists + _, err = os.Lstat(out) + if err == nil { + t.Fatal("expected err") + } + if !os.IsNotExist(err) { + t.Fatal("expected notexist err") + } + + if err := ioutil.WriteFile(role, []byte(roleID1), 0o600); err != nil { + t.Fatal(err) + } else { + logger.Trace("wrote test role 1", "path", role) + } + + if bindSecretID { + logger.Trace("WRITING TO auth.secret-id.test.", "secret", secret, "secretID1", secretID1) + + if err := ioutil.WriteFile(secret, []byte(secretID1), 0o600); err != nil { + t.Fatal(err) + } else { + logger.Trace("wrote test secret 1", "path", secret) + } + } else { + logger.Trace("skipped writing test secret 1") + } + + checkToken := func() string { + timeout := time.Now().Add(10 * time.Second) + for { + if time.Now().After(timeout) { + if expectToken { + t.Fatal("did not find a written token after timeout") + } + return "" + } + val, err := ioutil.ReadFile(out) + if err == nil { + os.Remove(out) + if len(val) == 0 { + t.Fatal("written token was empty") + } + if !secretIDLess { + if _, err := os.Stat(secret); err == nil { + t.Fatal("secret ID file does not exist but was not supposed to be removed") + } + } + + client.SetToken(string(val)) + secret, err := client.Auth().Token().LookupSelf() + if err != nil { + t.Fatal(err) + } + return secret.Data["entity_id"].(string) + } + time.Sleep(250 * time.Millisecond) + } + } + origEntity := checkToken() + logger.Trace("cheking token", "origEntity", origEntity) + + if !expectToken && origEntity != "" { + t.Fatal("did not expect a token to be written: " + origEntity) + } + if !expectToken && origEntity == "" { + logger.Trace("skipping entities comparison as we are not expecting tokens to be written") + return + } + + // Make sure it gets renewed + timeout := time.Now().Add(4 * time.Second) + for { + if time.Now().After(timeout) { + break + } + secret, err := client.Auth().Token().LookupSelf() + if err != nil { + t.Fatal(err) + } + ttl, err := secret.Data["ttl"].(json.Number).Int64() + if err != nil { + t.Fatal(err) + } + if ttl > 6 { + t.Fatalf("unexpected ttl: %v", secret.Data["ttl"]) + } + } + + // Write new values + client.SetToken(origToken) + logger.Trace("origToken set into client", "origToken", origToken) + + if bindSecretID { + resp, err = client.Logical().Write("auth/approle/role/test1/secret-id", nil) + if err != nil { + t.Fatal(err) + } + secretID2 := resp.WrapInfo.Token + if err := ioutil.WriteFile(secret, []byte(secretID2), 0o600); err != nil { + t.Fatal(err) + } else { + logger.Trace("wrote test secret 2", "path", secret) + } + } else { + logger.Trace("skipped writing test secret 2") + } + + newEntity := checkToken() + if newEntity != origEntity { + t.Fatal("did not find same entity") + } + + timeout = time.Now().Add(4 * time.Second) + for { + if time.Now().After(timeout) { + break + } + secret, err := client.Auth().Token().LookupSelf() + if err != nil { + t.Fatal(err) + } + ttl, err := secret.Data["ttl"].(json.Number).Int64() + if err != nil { + t.Fatal(err) + } + if ttl > 6 { + t.Fatalf("unexpected ttl: %v", secret.Data["ttl"]) + } + } +} + +func addConstraints(add bool, cfg map[string]interface{}) map[string]interface{} { + if add { + // extraConstraints to add when bind_secret_id=false (otherwise Vault would fail with: "at least one constraint should be enabled on the role") + extraConstraints := map[string]interface{}{ + "secret_id_bound_cidrs": "127.0.0.1/32", + "token_bound_cidrs": "127.0.0.1/32", + } + for k, v := range extraConstraints { + cfg[k] = v + } + } + return cfg +} diff --git a/command/agent/auto_auth_preload_token_end_to_end_test.go b/command/agent/auto_auth_preload_token_end_to_end_test.go new file mode 100644 index 0000000..004e817 --- /dev/null +++ b/command/agent/auto_auth_preload_token_end_to_end_test.go @@ -0,0 +1,241 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package agent + +import ( + "context" + "io/ioutil" + "os" + "testing" + "time" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + credAppRole "github.com/hashicorp/vault/builtin/credential/approle" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + agentAppRole "github.com/hashicorp/vault/command/agentproxyshared/auth/approle" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink/file" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +func TestTokenPreload_UsingAutoAuth(t *testing.T) { + logger := logging.NewVaultLogger(hclog.Trace) + coreConfig := &vault.CoreConfig{ + Logger: logger, + LogicalBackends: map[string]logical.Factory{ + "kv": vault.LeasedPassthroughBackendFactory, + }, + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) + client := cluster.Cores[0].Client + + // Setup Vault + if err := client.Sys().EnableAuthWithOptions("approle", &api.EnableAuthOptions{ + Type: "approle", + }); err != nil { + t.Fatal(err) + } + + // Setup Approle + _, err := client.Logical().Write("auth/approle/role/test1", map[string]interface{}{ + "bind_secret_id": "true", + "token_ttl": "3s", + "token_max_ttl": "10s", + "policies": []string{"test-autoauth"}, + }) + if err != nil { + t.Fatal(err) + } + + resp, err := client.Logical().Write("auth/approle/role/test1/secret-id", nil) + if err != nil { + t.Fatal(err) + } + secretID1 := resp.Data["secret_id"].(string) + + resp, err = client.Logical().Read("auth/approle/role/test1/role-id") + if err != nil { + t.Fatal(err) + } + roleID1 := resp.Data["role_id"].(string) + + rolef, err := ioutil.TempFile("", "auth.role-id.test.") + if err != nil { + t.Fatal(err) + } + role := rolef.Name() + rolef.Close() // WriteFile doesn't need it open + defer os.Remove(role) + t.Logf("input role_id_file_path: %s", role) + + secretf, err := ioutil.TempFile("", "auth.secret-id.test.") + if err != nil { + t.Fatal(err) + } + secret := secretf.Name() + secretf.Close() + defer os.Remove(secret) + t.Logf("input secret_id_file_path: %s", secret) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + + conf := map[string]interface{}{ + "role_id_file_path": role, + "secret_id_file_path": secret, + } + + if err := ioutil.WriteFile(role, []byte(roleID1), 0o600); err != nil { + t.Fatal(err) + } else { + logger.Trace("wrote test role 1", "path", role) + } + + if err := ioutil.WriteFile(secret, []byte(secretID1), 0o600); err != nil { + t.Fatal(err) + } else { + logger.Trace("wrote test secret 1", "path", secret) + } + + // Setup Preload Token + tokenRespRaw, err := client.Logical().Write("auth/token/create", map[string]interface{}{ + "ttl": "10s", + "explicit-max-ttl": "15s", + "policies": []string{""}, + }) + if err != nil { + t.Fatal(err) + } + + if tokenRespRaw.Auth == nil || tokenRespRaw.Auth.ClientToken == "" { + t.Fatal("expected token but got none") + } + token := tokenRespRaw.Auth.ClientToken + + am, err := agentAppRole.NewApproleAuthMethod(&auth.AuthConfig{ + Logger: logger.Named("auth.approle"), + MountPath: "auth/approle", + Config: conf, + }) + if err != nil { + t.Fatal(err) + } + + ahConfig := &auth.AuthHandlerConfig{ + Logger: logger.Named("auth.handler"), + Client: client, + Token: token, + } + + ah := auth.NewAuthHandler(ahConfig) + + tmpFile, err := ioutil.TempFile("", "auth.tokensink.test.") + if err != nil { + t.Fatal(err) + } + tokenSinkFileName := tmpFile.Name() + tmpFile.Close() + os.Remove(tokenSinkFileName) + t.Logf("output: %s", tokenSinkFileName) + + config := &sink.SinkConfig{ + Logger: logger.Named("sink.file"), + Config: map[string]interface{}{ + "path": tokenSinkFileName, + }, + WrapTTL: 10 * time.Second, + } + + fs, err := file.NewFileSink(config) + if err != nil { + t.Fatal(err) + } + config.Sink = fs + + ss := sink.NewSinkServer(&sink.SinkServerConfig{ + Logger: logger.Named("sink.server"), + Client: client, + }) + + errCh := make(chan error) + go func() { + errCh <- ah.Run(ctx, am) + }() + defer func() { + select { + case <-ctx.Done(): + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + } + }() + + go func() { + errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config}) + }() + defer func() { + select { + case <-ctx.Done(): + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + } + }() + + // This has to be after the other defers so it happens first. It allows + // successful test runs to immediately cancel all of the runner goroutines + // and unblock any of the blocking defer calls by the runner's DoneCh that + // comes before this and avoid successful tests from taking the entire + // timeout duration. + defer cancel() + + if stat, err := os.Lstat(tokenSinkFileName); err == nil { + t.Fatalf("expected err but got %s", stat) + } else if !os.IsNotExist(err) { + t.Fatal("expected notexist err") + } + + // Wait 2 seconds for the env variables to be detected and an auth to be generated. + time.Sleep(time.Second * 2) + + authToken, err := readToken(tokenSinkFileName) + if err != nil { + t.Fatal(err) + } + + if authToken.Token == "" { + t.Fatal("expected token but didn't receive it") + } + + wrappedToken := map[string]interface{}{ + "token": authToken.Token, + } + unwrapResp, err := client.Logical().Write("sys/wrapping/unwrap", wrappedToken) + if err != nil { + t.Fatalf("error unwrapping token: %s", err) + } + + sinkToken, ok := unwrapResp.Data["token"].(string) + if !ok { + t.Fatal("expected token but didn't receive it") + } + + if sinkToken != token { + t.Fatalf("auth token and preload token should be the same: expected: %s, actual: %s", token, sinkToken) + } +} diff --git a/command/agent/aws_end_to_end_test.go b/command/agent/aws_end_to_end_test.go new file mode 100644 index 0000000..08644bd --- /dev/null +++ b/command/agent/aws_end_to_end_test.go @@ -0,0 +1,247 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package agent + +import ( + "context" + "io/ioutil" + "os" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/sts" + hclog "github.com/hashicorp/go-hclog" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/api" + vaultaws "github.com/hashicorp/vault/builtin/credential/aws" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + agentaws "github.com/hashicorp/vault/command/agentproxyshared/auth/aws" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink/file" + "github.com/hashicorp/vault/helper/testhelpers" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +const ( + // These are the access key and secret that should be used when calling "AssumeRole" + // for the given AWS_TEST_ROLE_ARN. + envVarAwsTestAccessKey = "AWS_TEST_ACCESS_KEY" + envVarAwsTestSecretKey = "AWS_TEST_SECRET_KEY" + envVarAwsTestRoleArn = "AWS_TEST_ROLE_ARN" + + // The AWS SDK doesn't export its standard env vars so they're captured here. + // These are used for the duration of the test to make sure the agent is able to + // pick up creds from the env. + // + // To run this test, do not set these. Only the above ones need to be set. + envVarAwsAccessKey = "AWS_ACCESS_KEY_ID" + envVarAwsSecretKey = "AWS_SECRET_ACCESS_KEY" + envVarAwsSessionToken = "AWS_SESSION_TOKEN" +) + +func TestAWSEndToEnd(t *testing.T) { + if !runAcceptanceTests { + t.SkipNow() + } + + // Ensure each cred is populated. + credNames := []string{ + envVarAwsTestAccessKey, + envVarAwsTestSecretKey, + envVarAwsTestRoleArn, + } + testhelpers.SkipUnlessEnvVarsSet(t, credNames) + + logger := logging.NewVaultLogger(hclog.Trace) + coreConfig := &vault.CoreConfig{ + Logger: logger, + CredentialBackends: map[string]logical.Factory{ + "aws": vaultaws.Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) + client := cluster.Cores[0].Client + + // Setup Vault + if err := client.Sys().EnableAuthWithOptions("aws", &api.EnableAuthOptions{ + Type: "aws", + }); err != nil { + t.Fatal(err) + } + + if _, err := client.Logical().Write("auth/aws/role/test", map[string]interface{}{ + "auth_type": "iam", + "policies": "default", + // Retain thru the account number of the given arn and wildcard the rest. + "bound_iam_principal_arn": os.Getenv(envVarAwsTestRoleArn)[:25] + "*", + }); err != nil { + t.Fatal(err) + } + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + + // We're going to feed aws auth creds via env variables. + if err := setAwsEnvCreds(); err != nil { + t.Fatal(err) + } + defer func() { + if err := unsetAwsEnvCreds(); err != nil { + t.Fatal(err) + } + }() + + am, err := agentaws.NewAWSAuthMethod(&auth.AuthConfig{ + Logger: logger.Named("auth.aws"), + MountPath: "auth/aws", + Config: map[string]interface{}{ + "role": "test", + "type": "iam", + "credential_poll_interval": 1, + }, + }) + if err != nil { + t.Fatal(err) + } + + ahConfig := &auth.AuthHandlerConfig{ + Logger: logger.Named("auth.handler"), + Client: client, + } + + ah := auth.NewAuthHandler(ahConfig) + errCh := make(chan error) + go func() { + errCh <- ah.Run(ctx, am) + }() + defer func() { + select { + case <-ctx.Done(): + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + } + }() + + tmpFile, err := ioutil.TempFile("", "auth.tokensink.test.") + if err != nil { + t.Fatal(err) + } + tokenSinkFileName := tmpFile.Name() + tmpFile.Close() + os.Remove(tokenSinkFileName) + t.Logf("output: %s", tokenSinkFileName) + + config := &sink.SinkConfig{ + Logger: logger.Named("sink.file"), + Config: map[string]interface{}{ + "path": tokenSinkFileName, + }, + WrapTTL: 10 * time.Second, + } + + fs, err := file.NewFileSink(config) + if err != nil { + t.Fatal(err) + } + config.Sink = fs + + ss := sink.NewSinkServer(&sink.SinkServerConfig{ + Logger: logger.Named("sink.server"), + Client: client, + }) + go func() { + errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config}) + }() + defer func() { + select { + case <-ctx.Done(): + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + } + }() + + // This has to be after the other defers so it happens first. It allows + // successful test runs to immediately cancel all of the runner goroutines + // and unblock any of the blocking defer calls by the runner's DoneCh that + // comes before this and avoid successful tests from taking the entire + // timeout duration. + defer cancel() + + if stat, err := os.Lstat(tokenSinkFileName); err == nil { + t.Fatalf("expected err but got %s", stat) + } else if !os.IsNotExist(err) { + t.Fatal("expected notexist err") + } + + // Wait 2 seconds for the env variables to be detected and an auth to be generated. + time.Sleep(time.Second * 2) + + token, err := readToken(tokenSinkFileName) + if err != nil { + t.Fatal(err) + } + + if token.Token == "" { + t.Fatal("expected token but didn't receive it") + } +} + +func setAwsEnvCreds() error { + cfg := &aws.Config{ + Credentials: credentials.NewStaticCredentials(os.Getenv(envVarAwsTestAccessKey), os.Getenv(envVarAwsTestSecretKey), ""), + } + sess, err := session.NewSession(cfg) + if err != nil { + return err + } + client := sts.New(sess) + + roleArn := os.Getenv(envVarAwsTestRoleArn) + uid, err := uuid.GenerateUUID() + if err != nil { + return err + } + + input := &sts.AssumeRoleInput{ + RoleArn: &roleArn, + RoleSessionName: &uid, + } + output, err := client.AssumeRole(input) + if err != nil { + return err + } + + if err := os.Setenv(envVarAwsAccessKey, *output.Credentials.AccessKeyId); err != nil { + return err + } + if err := os.Setenv(envVarAwsSecretKey, *output.Credentials.SecretAccessKey); err != nil { + return err + } + return os.Setenv(envVarAwsSessionToken, *output.Credentials.SessionToken) +} + +func unsetAwsEnvCreds() error { + if err := os.Unsetenv(envVarAwsAccessKey); err != nil { + return err + } + if err := os.Unsetenv(envVarAwsSecretKey); err != nil { + return err + } + return os.Unsetenv(envVarAwsSessionToken) +} diff --git a/command/agent/cache_end_to_end_test.go b/command/agent/cache_end_to_end_test.go new file mode 100644 index 0000000..a2a359a --- /dev/null +++ b/command/agent/cache_end_to_end_test.go @@ -0,0 +1,422 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package agent + +import ( + "context" + "fmt" + "io/ioutil" + "net" + "net/http" + "os" + "testing" + "time" + + hclog "github.com/hashicorp/go-hclog" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + credAppRole "github.com/hashicorp/vault/builtin/credential/approle" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + agentapprole "github.com/hashicorp/vault/command/agentproxyshared/auth/approle" + cache "github.com/hashicorp/vault/command/agentproxyshared/cache" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink/file" + "github.com/hashicorp/vault/command/agentproxyshared/sink/inmem" + "github.com/hashicorp/vault/helper/useragent" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +const policyAutoAuthAppRole = ` +path "/kv/*" { + capabilities = ["sudo", "create", "read", "update", "delete", "list"] +} + +path "/auth/token/create" { + capabilities = ["create", "update"] +} +` + +func TestCache_UsingAutoAuthToken(t *testing.T) { + var err error + logger := logging.NewVaultLogger(log.Trace) + coreConfig := &vault.CoreConfig{ + DisableMlock: true, + DisableCache: true, + Logger: log.NewNullLogger(), + LogicalBackends: map[string]logical.Factory{ + "kv": vault.LeasedPassthroughBackendFactory, + }, + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + } + + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + + cluster.Start() + defer cluster.Cleanup() + + cores := cluster.Cores + + vault.TestWaitActive(t, cores[0].Core) + + client := cores[0].Client + + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Setenv(api.EnvVaultAddress, client.Address()) + + defer os.Setenv(api.EnvVaultCACert, os.Getenv(api.EnvVaultCACert)) + os.Setenv(api.EnvVaultCACert, fmt.Sprintf("%s/ca_cert.pem", cluster.TempDir)) + + err = client.Sys().Mount("kv", &api.MountInput{ + Type: "kv", + }) + if err != nil { + t.Fatal(err) + } + + // Create a secret in the backend + _, err = client.Logical().Write("kv/foo", map[string]interface{}{ + "value": "bar", + "ttl": "1h", + }) + if err != nil { + t.Fatal(err) + } + + // Add an kv-admin policy + if err := client.Sys().PutPolicy("test-autoauth", policyAutoAuthAppRole); err != nil { + t.Fatal(err) + } + + // Enable approle + err = client.Sys().EnableAuthWithOptions("approle", &api.EnableAuthOptions{ + Type: "approle", + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().Write("auth/approle/role/test1", map[string]interface{}{ + "bind_secret_id": "true", + "token_ttl": "3s", + "token_max_ttl": "10s", + "policies": []string{"test-autoauth"}, + }) + if err != nil { + t.Fatal(err) + } + + resp, err := client.Logical().Write("auth/approle/role/test1/secret-id", nil) + if err != nil { + t.Fatal(err) + } + secretID1 := resp.Data["secret_id"].(string) + + resp, err = client.Logical().Read("auth/approle/role/test1/role-id") + if err != nil { + t.Fatal(err) + } + roleID1 := resp.Data["role_id"].(string) + + rolef, err := ioutil.TempFile("", "auth.role-id.test.") + if err != nil { + t.Fatal(err) + } + role := rolef.Name() + rolef.Close() // WriteFile doesn't need it open + defer os.Remove(role) + t.Logf("input role_id_file_path: %s", role) + + secretf, err := ioutil.TempFile("", "auth.secret-id.test.") + if err != nil { + t.Fatal(err) + } + secret := secretf.Name() + secretf.Close() + defer os.Remove(secret) + t.Logf("input secret_id_file_path: %s", secret) + + // We close these right away because we're just basically testing + // permissions and finding a usable file name + ouf, err := ioutil.TempFile("", "auth.tokensink.test.") + if err != nil { + t.Fatal(err) + } + out := ouf.Name() + ouf.Close() + os.Remove(out) + t.Logf("output: %s", out) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + + conf := map[string]interface{}{ + "role_id_file_path": role, + "secret_id_file_path": secret, + "remove_secret_id_file_after_reading": true, + } + + cacheLogger := logging.NewVaultLogger(hclog.Trace).Named("cache") + + // Create the API proxier + apiProxy, err := cache.NewAPIProxy(&cache.APIProxyConfig{ + Client: client, + Logger: cacheLogger.Named("apiproxy"), + UserAgentStringFunction: useragent.ProxyStringWithProxiedUserAgent, + UserAgentString: useragent.ProxyAPIProxyString(), + }) + if err != nil { + t.Fatal(err) + } + + // Create the lease cache proxier and set its underlying proxier to + // the API proxier. + leaseCache, err := cache.NewLeaseCache(&cache.LeaseCacheConfig{ + Client: client, + BaseContext: ctx, + Proxier: apiProxy, + Logger: cacheLogger.Named("leasecache"), + }) + if err != nil { + t.Fatal(err) + } + + am, err := agentapprole.NewApproleAuthMethod(&auth.AuthConfig{ + Logger: logger.Named("auth.approle"), + MountPath: "auth/approle", + Config: conf, + }) + if err != nil { + t.Fatal(err) + } + ahConfig := &auth.AuthHandlerConfig{ + Logger: logger.Named("auth.handler"), + Client: client, + } + ah := auth.NewAuthHandler(ahConfig) + errCh := make(chan error) + go func() { + errCh <- ah.Run(ctx, am) + }() + defer func() { + select { + case <-ctx.Done(): + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + } + }() + + config := &sink.SinkConfig{ + Logger: logger.Named("sink.file"), + Config: map[string]interface{}{ + "path": out, + }, + } + fs, err := file.NewFileSink(config) + if err != nil { + t.Fatal(err) + } + config.Sink = fs + + ss := sink.NewSinkServer(&sink.SinkServerConfig{ + Logger: logger.Named("sink.server"), + Client: client, + }) + + inmemSinkConfig := &sink.SinkConfig{ + Logger: logger.Named("sink.inmem"), + } + + inmemSink, err := inmem.New(inmemSinkConfig, leaseCache) + if err != nil { + t.Fatal(err) + } + inmemSinkConfig.Sink = inmemSink + + go func() { + errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config, inmemSinkConfig}) + }() + defer func() { + select { + case <-ctx.Done(): + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + } + }() + + // This has to be after the other defers so it happens first. It allows + // successful test runs to immediately cancel all of the runner goroutines + // and unblock any of the blocking defer calls by the runner's DoneCh that + // comes before this and avoid successful tests from taking the entire + // timeout duration. + defer cancel() + + // Check that no sink file exists + _, err = os.Lstat(out) + if err == nil { + t.Fatal("expected err") + } + if !os.IsNotExist(err) { + t.Fatal("expected notexist err") + } + + if err := ioutil.WriteFile(role, []byte(roleID1), 0o600); err != nil { + t.Fatal(err) + } else { + logger.Trace("wrote test role 1", "path", role) + } + + if err := ioutil.WriteFile(secret, []byte(secretID1), 0o600); err != nil { + t.Fatal(err) + } else { + logger.Trace("wrote test secret 1", "path", secret) + } + + getToken := func() string { + timeout := time.Now().Add(10 * time.Second) + for { + if time.Now().After(timeout) { + t.Fatal("did not find a written token after timeout") + } + val, err := ioutil.ReadFile(out) + if err == nil { + os.Remove(out) + if len(val) == 0 { + t.Fatal("written token was empty") + } + + _, err = os.Stat(secret) + if err == nil { + t.Fatal("secret file exists but was supposed to be removed") + } + + return string(val) + } + time.Sleep(250 * time.Millisecond) + } + } + + t.Logf("auto-auth token: %q", getToken()) + + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + + defer listener.Close() + + // Create a muxer and add paths relevant for the lease cache layer + mux := http.NewServeMux() + mux.Handle(consts.AgentPathCacheClear, leaseCache.HandleCacheClear(ctx)) + + // Passing a non-nil inmemsink tells the agent to use the auto-auth token + mux.Handle("/", cache.ProxyHandler(ctx, cacheLogger, leaseCache, inmemSink, true)) + server := &http.Server{ + Handler: mux, + ReadHeaderTimeout: 10 * time.Second, + ReadTimeout: 30 * time.Second, + IdleTimeout: 5 * time.Minute, + ErrorLog: cacheLogger.StandardLogger(nil), + } + go server.Serve(listener) + + testClient, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + + if err := testClient.SetAddress("http://" + listener.Addr().String()); err != nil { + t.Fatal(err) + } + + // Wait for listeners to come up + time.Sleep(2 * time.Second) + + // This block tests that no token on the client is detected by the agent + // and the auto-auth token is used + { + // Empty the token in the client to ensure that auto-auth token is used + testClient.SetToken("") + + resp, err = testClient.Logical().Read("auth/token/lookup-self") + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatalf("failed to use the auto-auth token to perform lookup-self") + } + } + + // This block tests lease creation caching using the auto-auth token. + { + resp, err = testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + + origReqID := resp.RequestID + + resp, err = testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + + // Sleep for a bit to allow renewer logic to kick in + time.Sleep(20 * time.Millisecond) + + cacheReqID := resp.RequestID + + if origReqID != cacheReqID { + t.Fatalf("request ID mismatch, expected second request to be a cached response: %s != %s", origReqID, cacheReqID) + } + } + + // This block tests auth token creation caching (child, non-orphan tokens) + // using the auto-auth token. + { + resp, err = testClient.Logical().Write("auth/token/create", nil) + if err != nil { + t.Fatal(err) + } + origReqID := resp.RequestID + + // Sleep for a bit to allow renewer logic to kick in + time.Sleep(20 * time.Millisecond) + + resp, err = testClient.Logical().Write("auth/token/create", nil) + if err != nil { + t.Fatal(err) + } + cacheReqID := resp.RequestID + + if origReqID != cacheReqID { + t.Fatalf("request ID mismatch, expected second request to be a cached response: %s != %s", origReqID, cacheReqID) + } + } + + // This blocks tests that despite being allowed to use auto-auth token, the + // token on the request will be prioritized. + { + // Empty the token in the client to ensure that auto-auth token is used + testClient.SetToken(client.Token()) + + resp, err = testClient.Logical().Read("auth/token/lookup-self") + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.Data["id"] != client.Token() { + t.Fatalf("failed to use the cluster client token to perform lookup-self") + } + } +} diff --git a/command/agent/cert_end_to_end_test.go b/command/agent/cert_end_to_end_test.go new file mode 100644 index 0000000..12ea933 --- /dev/null +++ b/command/agent/cert_end_to_end_test.go @@ -0,0 +1,583 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package agent + +import ( + "context" + "encoding/pem" + "fmt" + "io/ioutil" + "os" + "testing" + "time" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + vaultcert "github.com/hashicorp/vault/builtin/credential/cert" + "github.com/hashicorp/vault/builtin/logical/pki" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + agentcert "github.com/hashicorp/vault/command/agentproxyshared/auth/cert" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink/file" + "github.com/hashicorp/vault/helper/dhutil" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +func TestCertEndToEnd(t *testing.T) { + cases := []struct { + name string + withCertRoleName bool + ahWrapping bool + }{ + { + "with name with wrapping", + true, + true, + }, + { + "with name without wrapping", + true, + false, + }, + { + "without name with wrapping", + false, + true, + }, + { + "without name without wrapping", + false, + false, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + testCertEndToEnd(t, tc.withCertRoleName, tc.ahWrapping) + }) + } +} + +func testCertEndToEnd(t *testing.T, withCertRoleName, ahWrapping bool) { + logger := logging.NewVaultLogger(hclog.Trace) + coreConfig := &vault.CoreConfig{ + Logger: logger, + CredentialBackends: map[string]logical.Factory{ + "cert": vaultcert.Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) + client := cluster.Cores[0].Client + + // Setup Vault + err := client.Sys().EnableAuthWithOptions("cert", &api.EnableAuthOptions{ + Type: "cert", + }) + if err != nil { + t.Fatal(err) + } + + certificatePEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cluster.CACert.Raw}) + + certRoleName := "test" + _, err = client.Logical().Write(fmt.Sprintf("auth/cert/certs/%s", certRoleName), map[string]interface{}{ + "certificate": string(certificatePEM), + "policies": "default", + }) + if err != nil { + t.Fatal(err) + } + + // Generate encryption params + pub, pri, err := dhutil.GeneratePublicPrivateKey() + if err != nil { + t.Fatal(err) + } + + ouf, err := ioutil.TempFile("", "auth.tokensink.test.") + if err != nil { + t.Fatal(err) + } + out := ouf.Name() + ouf.Close() + os.Remove(out) + t.Logf("output: %s", out) + + dhpathf, err := ioutil.TempFile("", "auth.dhpath.test.") + if err != nil { + t.Fatal(err) + } + dhpath := dhpathf.Name() + dhpathf.Close() + os.Remove(dhpath) + + // Write DH public key to file + mPubKey, err := jsonutil.EncodeJSON(&dhutil.PublicKeyInfo{ + Curve25519PublicKey: pub, + }) + if err != nil { + t.Fatal(err) + } + if err := ioutil.WriteFile(dhpath, mPubKey, 0o600); err != nil { + t.Fatal(err) + } else { + logger.Trace("wrote dh param file", "path", dhpath) + } + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + + aaConfig := map[string]interface{}{} + + if withCertRoleName { + aaConfig["name"] = certRoleName + } + + am, err := agentcert.NewCertAuthMethod(&auth.AuthConfig{ + Logger: logger.Named("auth.cert"), + MountPath: "auth/cert", + Config: aaConfig, + }) + if err != nil { + t.Fatal(err) + } + + ahConfig := &auth.AuthHandlerConfig{ + Logger: logger.Named("auth.handler"), + Client: client, + EnableReauthOnNewCredentials: true, + } + if ahWrapping { + ahConfig.WrapTTL = 10 * time.Second + } + ah := auth.NewAuthHandler(ahConfig) + errCh := make(chan error) + go func() { + errCh <- ah.Run(ctx, am) + }() + defer func() { + select { + case <-ctx.Done(): + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + } + }() + + config := &sink.SinkConfig{ + Logger: logger.Named("sink.file"), + AAD: "foobar", + DHType: "curve25519", + DHPath: dhpath, + DeriveKey: true, + Config: map[string]interface{}{ + "path": out, + }, + } + if !ahWrapping { + config.WrapTTL = 10 * time.Second + } + fs, err := file.NewFileSink(config) + if err != nil { + t.Fatal(err) + } + config.Sink = fs + + ss := sink.NewSinkServer(&sink.SinkServerConfig{ + Logger: logger.Named("sink.server"), + Client: client, + }) + go func() { + errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config}) + }() + defer func() { + select { + case <-ctx.Done(): + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + } + }() + + // This has to be after the other defers so it happens first. It allows + // successful test runs to immediately cancel all of the runner goroutines + // and unblock any of the blocking defer calls by the runner's DoneCh that + // comes before this and avoid successful tests from taking the entire + // timeout duration. + defer cancel() + + cloned, err := client.Clone() + if err != nil { + t.Fatal(err) + } + + checkToken := func() string { + timeout := time.Now().Add(5 * time.Second) + for { + if time.Now().After(timeout) { + t.Fatal("did not find a written token after timeout") + } + val, err := ioutil.ReadFile(out) + if err == nil { + os.Remove(out) + if len(val) == 0 { + t.Fatal("written token was empty") + } + + // First decrypt it + resp := new(dhutil.Envelope) + if err := jsonutil.DecodeJSON(val, resp); err != nil { + continue + } + + shared, err := dhutil.GenerateSharedSecret(pri, resp.Curve25519PublicKey) + if err != nil { + t.Fatal(err) + } + aesKey, err := dhutil.DeriveSharedKey(shared, pub, resp.Curve25519PublicKey) + if err != nil { + t.Fatal(err) + } + if len(aesKey) == 0 { + t.Fatal("got empty aes key") + } + + val, err = dhutil.DecryptAES(aesKey, resp.EncryptedPayload, resp.Nonce, []byte("foobar")) + if err != nil { + t.Fatalf("error: %v\nresp: %v", err, string(val)) + } + + // Now unwrap it + wrapInfo := new(api.SecretWrapInfo) + if err := jsonutil.DecodeJSON(val, wrapInfo); err != nil { + t.Fatal(err) + } + switch { + case wrapInfo.TTL != 10: + t.Fatalf("bad wrap info: %v", wrapInfo.TTL) + case !ahWrapping && wrapInfo.CreationPath != "sys/wrapping/wrap": + t.Fatalf("bad wrap path: %v", wrapInfo.CreationPath) + case ahWrapping && wrapInfo.CreationPath != "auth/cert/login": + t.Fatalf("bad wrap path: %v", wrapInfo.CreationPath) + case wrapInfo.Token == "": + t.Fatal("wrap token is empty") + } + cloned.SetToken(wrapInfo.Token) + secret, err := cloned.Logical().Unwrap("") + if err != nil { + t.Fatal(err) + } + if ahWrapping { + switch { + case secret.Auth == nil: + t.Fatal("unwrap secret auth is nil") + case secret.Auth.ClientToken == "": + t.Fatal("unwrap token is nil") + } + return secret.Auth.ClientToken + } else { + switch { + case secret.Data == nil: + t.Fatal("unwrap secret data is nil") + case secret.Data["token"] == nil: + t.Fatal("unwrap token is nil") + } + return secret.Data["token"].(string) + } + } + time.Sleep(250 * time.Millisecond) + } + } + checkToken() +} + +func TestCertEndToEnd_CertsInConfig(t *testing.T) { + logger := logging.NewVaultLogger(hclog.Trace) + coreConfig := &vault.CoreConfig{ + Logger: logger, + CredentialBackends: map[string]logical.Factory{ + "cert": vaultcert.Factory, + }, + LogicalBackends: map[string]logical.Factory{ + "pki": pki.Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) + client := cluster.Cores[0].Client + + // ///////////// + // PKI setup + // ///////////// + + // Mount /pki as a root CA + err := client.Sys().Mount("pki", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "32h", + }, + }) + if err != nil { + t.Fatal(err) + } + + // Set the cluster's certificate as the root CA in /pki + pemBundleRootCA := string(cluster.CACertPEM) + string(cluster.CAKeyPEM) + _, err = client.Logical().Write("pki/config/ca", map[string]interface{}{ + "pem_bundle": pemBundleRootCA, + }) + if err != nil { + t.Fatal(err) + } + + // Mount /pki2 to operate as an intermediate CA + err = client.Sys().Mount("pki2", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "32h", + }, + }) + if err != nil { + t.Fatal(err) + } + + // Create a CSR for the intermediate CA + secret, err := client.Logical().Write("pki2/intermediate/generate/internal", nil) + if err != nil { + t.Fatal(err) + } + intermediateCSR := secret.Data["csr"].(string) + + // Sign the intermediate CSR using /pki + secret, err = client.Logical().Write("pki/root/sign-intermediate", map[string]interface{}{ + "permitted_dns_domains": ".myvault.com", + "csr": intermediateCSR, + }) + if err != nil { + t.Fatal(err) + } + intermediateCertPEM := secret.Data["certificate"].(string) + + // Configure the intermediate cert as the CA in /pki2 + _, err = client.Logical().Write("pki2/intermediate/set-signed", map[string]interface{}{ + "certificate": intermediateCertPEM, + }) + if err != nil { + t.Fatal(err) + } + + // Create a role on the intermediate CA mount + _, err = client.Logical().Write("pki2/roles/myvault-dot-com", map[string]interface{}{ + "allowed_domains": "myvault.com", + "allow_subdomains": "true", + "max_ttl": "5m", + }) + if err != nil { + t.Fatal(err) + } + + // Issue a leaf cert using the intermediate CA + secret, err = client.Logical().Write("pki2/issue/myvault-dot-com", map[string]interface{}{ + "common_name": "cert.myvault.com", + "format": "pem", + "ip_sans": "127.0.0.1", + }) + if err != nil { + t.Fatal(err) + } + leafCertPEM := secret.Data["certificate"].(string) + leafCertKeyPEM := secret.Data["private_key"].(string) + + // Create temporary files for CA cert, client cert and client cert key. + // This is used to configure TLS in the api client. + caCertFile, err := ioutil.TempFile("", "caCert") + if err != nil { + t.Fatal(err) + } + defer os.Remove(caCertFile.Name()) + if _, err := caCertFile.Write([]byte(cluster.CACertPEM)); err != nil { + t.Fatal(err) + } + if err := caCertFile.Close(); err != nil { + t.Fatal(err) + } + + leafCertFile, err := ioutil.TempFile("", "leafCert") + if err != nil { + t.Fatal(err) + } + defer os.Remove(leafCertFile.Name()) + if _, err := leafCertFile.Write([]byte(leafCertPEM)); err != nil { + t.Fatal(err) + } + if err := leafCertFile.Close(); err != nil { + t.Fatal(err) + } + + leafCertKeyFile, err := ioutil.TempFile("", "leafCertKey") + if err != nil { + t.Fatal(err) + } + defer os.Remove(leafCertKeyFile.Name()) + if _, err := leafCertKeyFile.Write([]byte(leafCertKeyPEM)); err != nil { + t.Fatal(err) + } + if err := leafCertKeyFile.Close(); err != nil { + t.Fatal(err) + } + + // ///////////// + // Cert auth setup + // ///////////// + + // Enable the cert auth method + err = client.Sys().EnableAuthWithOptions("cert", &api.EnableAuthOptions{ + Type: "cert", + }) + if err != nil { + t.Fatal(err) + } + + // Set the intermediate CA cert as a trusted certificate in the backend + _, err = client.Logical().Write("auth/cert/certs/myvault-dot-com", map[string]interface{}{ + "display_name": "myvault.com", + "policies": "default", + "certificate": intermediateCertPEM, + }) + if err != nil { + t.Fatal(err) + } + + // ///////////// + // Auth handler (auto-auth) setup + // ///////////// + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + + am, err := agentcert.NewCertAuthMethod(&auth.AuthConfig{ + Logger: logger.Named("auth.cert"), + MountPath: "auth/cert", + Config: map[string]interface{}{ + "ca_cert": caCertFile.Name(), + "client_cert": leafCertFile.Name(), + "client_key": leafCertKeyFile.Name(), + }, + }) + if err != nil { + t.Fatal(err) + } + + ahConfig := &auth.AuthHandlerConfig{ + Logger: logger.Named("auth.handler"), + Client: client, + EnableReauthOnNewCredentials: true, + } + + ah := auth.NewAuthHandler(ahConfig) + errCh := make(chan error) + go func() { + errCh <- ah.Run(ctx, am) + }() + defer func() { + select { + case <-ctx.Done(): + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + } + }() + + // ///////////// + // Sink setup + // ///////////// + + // Use TempFile to get us a generated file name to use for the sink. + ouf, err := ioutil.TempFile("", "auth.tokensink.test.") + if err != nil { + t.Fatal(err) + } + ouf.Close() + out := ouf.Name() + os.Remove(out) + t.Logf("output: %s", out) + + config := &sink.SinkConfig{ + Logger: logger.Named("sink.file"), + Config: map[string]interface{}{ + "path": out, + }, + } + fs, err := file.NewFileSink(config) + if err != nil { + t.Fatal(err) + } + config.Sink = fs + + ss := sink.NewSinkServer(&sink.SinkServerConfig{ + Logger: logger.Named("sink.server"), + Client: client, + }) + go func() { + errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config}) + }() + defer func() { + select { + case <-ctx.Done(): + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + } + }() + + // This has to be after the other defers so it happens first. It allows + // successful test runs to immediately cancel all of the runner goroutines + // and unblock any of the blocking defer calls by the runner's DoneCh that + // comes before this and avoid successful tests from taking the entire + // timeout duration. + defer cancel() + + // Read the token from the sink + timeout := time.Now().Add(5 * time.Second) + for { + if time.Now().After(timeout) { + t.Fatal("did not find a written token after timeout") + } + + // Attempt to read the sink file until we get a token or the timeout is + // reached. + val, err := ioutil.ReadFile(out) + if err == nil { + os.Remove(out) + if len(val) == 0 { + t.Fatal("written token was empty") + } + + t.Logf("sink token: %s", val) + + break + } + + time.Sleep(250 * time.Millisecond) + } +} diff --git a/command/agent/cf_end_to_end_test.go b/command/agent/cf_end_to_end_test.go new file mode 100644 index 0000000..e143223 --- /dev/null +++ b/command/agent/cf_end_to_end_test.go @@ -0,0 +1,192 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package agent + +import ( + "context" + "io/ioutil" + "os" + "testing" + "time" + + hclog "github.com/hashicorp/go-hclog" + credCF "github.com/hashicorp/vault-plugin-auth-cf" + "github.com/hashicorp/vault-plugin-auth-cf/testing/certificates" + cfAPI "github.com/hashicorp/vault-plugin-auth-cf/testing/cf" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + agentcf "github.com/hashicorp/vault/command/agentproxyshared/auth/cf" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink/file" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +func TestCFEndToEnd(t *testing.T) { + logger := logging.NewVaultLogger(hclog.Trace) + + coreConfig := &vault.CoreConfig{ + DisableMlock: true, + DisableCache: true, + Logger: hclog.NewNullLogger(), + CredentialBackends: map[string]logical.Factory{ + "cf": credCF.Factory, + }, + } + + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + + cluster.Start() + defer cluster.Cleanup() + + cores := cluster.Cores + vault.TestWaitActive(t, cores[0].Core) + client := cores[0].Client + if err := client.Sys().EnableAuthWithOptions("cf", &api.EnableAuthOptions{ + Type: "cf", + }); err != nil { + t.Fatal(err) + } + + testIPAddress := "127.0.0.1" + + // Generate some valid certs that look like the ones we get from CF. + testCFCerts, err := certificates.Generate(cfAPI.FoundServiceGUID, cfAPI.FoundOrgGUID, cfAPI.FoundSpaceGUID, cfAPI.FoundAppGUID, testIPAddress) + if err != nil { + t.Fatal(err) + } + defer func() { + if err := testCFCerts.Close(); err != nil { + t.Fatal(err) + } + }() + + // Start a mock server representing their API. + mockCFAPI := cfAPI.MockServer(false, nil) + defer mockCFAPI.Close() + + // Configure a CA certificate like a Vault operator would in setting up CF. + if _, err := client.Logical().Write("auth/cf/config", map[string]interface{}{ + "identity_ca_certificates": testCFCerts.CACertificate, + "cf_api_addr": mockCFAPI.URL, + "cf_username": cfAPI.AuthUsername, + "cf_password": cfAPI.AuthPassword, + }); err != nil { + t.Fatal(err) + } + + // Configure a role to be used for logging in, another thing a Vault operator would do. + if _, err := client.Logical().Write("auth/cf/roles/test-role", map[string]interface{}{ + "bound_instance_ids": cfAPI.FoundServiceGUID, + "bound_organization_ids": cfAPI.FoundOrgGUID, + "bound_space_ids": cfAPI.FoundSpaceGUID, + "bound_application_ids": cfAPI.FoundAppGUID, + }); err != nil { + t.Fatal(err) + } + + os.Setenv(credCF.EnvVarInstanceCertificate, testCFCerts.PathToInstanceCertificate) + os.Setenv(credCF.EnvVarInstanceKey, testCFCerts.PathToInstanceKey) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + + am, err := agentcf.NewCFAuthMethod(&auth.AuthConfig{ + MountPath: "auth/cf", + Config: map[string]interface{}{ + "role": "test-role", + }, + }) + if err != nil { + t.Fatal(err) + } + + ahConfig := &auth.AuthHandlerConfig{ + Logger: logger.Named("auth.handler"), + Client: client, + } + + ah := auth.NewAuthHandler(ahConfig) + errCh := make(chan error) + go func() { + errCh <- ah.Run(ctx, am) + }() + defer func() { + select { + case <-ctx.Done(): + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + } + }() + + tmpFile, err := ioutil.TempFile("", "auth.tokensink.test.") + if err != nil { + t.Fatal(err) + } + tokenSinkFileName := tmpFile.Name() + tmpFile.Close() + os.Remove(tokenSinkFileName) + t.Logf("output: %s", tokenSinkFileName) + + config := &sink.SinkConfig{ + Logger: logger.Named("sink.file"), + Config: map[string]interface{}{ + "path": tokenSinkFileName, + }, + WrapTTL: 10 * time.Second, + } + + fs, err := file.NewFileSink(config) + if err != nil { + t.Fatal(err) + } + config.Sink = fs + + ss := sink.NewSinkServer(&sink.SinkServerConfig{ + Logger: logger.Named("sink.server"), + Client: client, + }) + go func() { + errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config}) + }() + defer func() { + select { + case <-ctx.Done(): + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + } + }() + + // This has to be after the other defers so it happens first. It allows + // successful test runs to immediately cancel all of the runner goroutines + // and unblock any of the blocking defer calls by the runner's DoneCh that + // comes before this and avoid successful tests from taking the entire + // timeout duration. + defer cancel() + + if stat, err := os.Lstat(tokenSinkFileName); err == nil { + t.Fatalf("expected err but got %s", stat) + } else if !os.IsNotExist(err) { + t.Fatal("expected notexist err") + } + + // Wait 2 seconds for the env variables to be detected and an auth to be generated. + time.Sleep(time.Second * 2) + + token, err := readToken(tokenSinkFileName) + if err != nil { + t.Fatal(err) + } + + if token.Token == "" { + t.Fatal("expected token but didn't receive it") + } +} diff --git a/command/agent/config/config.go b/command/agent/config/config.go new file mode 100644 index 0000000..eea108b --- /dev/null +++ b/command/agent/config/config.go @@ -0,0 +1,1313 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package config + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "os" + "path/filepath" + "strings" + "syscall" + "time" + + ctconfig "github.com/hashicorp/consul-template/config" + ctsignals "github.com/hashicorp/consul-template/signals" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/ast" + "github.com/mitchellh/mapstructure" + "k8s.io/utils/strings/slices" + + "github.com/hashicorp/vault/command/agentproxyshared" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/sdk/helper/pointerutil" +) + +// Config is the configuration for Vault Agent. +type Config struct { + *configutil.SharedConfig `hcl:"-"` + + AutoAuth *AutoAuth `hcl:"auto_auth"` + ExitAfterAuth bool `hcl:"exit_after_auth"` + Cache *Cache `hcl:"cache"` + APIProxy *APIProxy `hcl:"api_proxy"` + Vault *Vault `hcl:"vault"` + TemplateConfig *TemplateConfig `hcl:"template_config"` + Templates []*ctconfig.TemplateConfig `hcl:"templates"` + DisableIdleConns []string `hcl:"disable_idle_connections"` + DisableIdleConnsAPIProxy bool `hcl:"-"` + DisableIdleConnsTemplating bool `hcl:"-"` + DisableIdleConnsAutoAuth bool `hcl:"-"` + DisableKeepAlives []string `hcl:"disable_keep_alives"` + DisableKeepAlivesAPIProxy bool `hcl:"-"` + DisableKeepAlivesTemplating bool `hcl:"-"` + DisableKeepAlivesAutoAuth bool `hcl:"-"` + Exec *ExecConfig `hcl:"exec,optional"` + EnvTemplates []*ctconfig.TemplateConfig `hcl:"env_template,optional"` +} + +const ( + DisableIdleConnsEnv = "VAULT_AGENT_DISABLE_IDLE_CONNECTIONS" + DisableKeepAlivesEnv = "VAULT_AGENT_DISABLE_KEEP_ALIVES" +) + +func (c *Config) Prune() { + for _, l := range c.Listeners { + l.RawConfig = nil + l.Profiling.UnusedKeys = nil + l.Telemetry.UnusedKeys = nil + l.CustomResponseHeaders = nil + } + c.FoundKeys = nil + c.UnusedKeys = nil + c.SharedConfig.FoundKeys = nil + c.SharedConfig.UnusedKeys = nil + if c.Telemetry != nil { + c.Telemetry.FoundKeys = nil + c.Telemetry.UnusedKeys = nil + } +} + +type Retry struct { + NumRetries int `hcl:"num_retries"` +} + +// Vault contains configuration for connecting to Vault servers +type Vault struct { + Address string `hcl:"address"` + CACert string `hcl:"ca_cert"` + CAPath string `hcl:"ca_path"` + TLSSkipVerify bool `hcl:"-"` + TLSSkipVerifyRaw interface{} `hcl:"tls_skip_verify"` + ClientCert string `hcl:"client_cert"` + ClientKey string `hcl:"client_key"` + TLSServerName string `hcl:"tls_server_name"` + Retry *Retry `hcl:"retry"` +} + +// transportDialer is an interface that allows passing a custom dialer function +// to an HTTP client's transport config +type transportDialer interface { + // Dial is intended to match https://pkg.go.dev/net#Dialer.Dial + Dial(network, address string) (net.Conn, error) + + // DialContext is intended to match https://pkg.go.dev/net#Dialer.DialContext + DialContext(ctx context.Context, network, address string) (net.Conn, error) +} + +// APIProxy contains any configuration needed for proxy mode +type APIProxy struct { + UseAutoAuthTokenRaw interface{} `hcl:"use_auto_auth_token"` + UseAutoAuthToken bool `hcl:"-"` + ForceAutoAuthToken bool `hcl:"-"` + EnforceConsistency string `hcl:"enforce_consistency"` + WhenInconsistent string `hcl:"when_inconsistent"` +} + +// Cache contains any configuration needed for Cache mode +type Cache struct { + UseAutoAuthTokenRaw interface{} `hcl:"use_auto_auth_token"` + UseAutoAuthToken bool `hcl:"-"` + ForceAutoAuthToken bool `hcl:"-"` + EnforceConsistency string `hcl:"enforce_consistency"` + WhenInconsistent string `hcl:"when_inconsistent"` + Persist *agentproxyshared.PersistConfig `hcl:"persist"` + InProcDialer transportDialer `hcl:"-"` +} + +// AutoAuth is the configured authentication method and sinks +type AutoAuth struct { + Method *Method `hcl:"-"` + Sinks []*Sink `hcl:"sinks"` + + // NOTE: This is unsupported outside of testing and may disappear at any + // time. + EnableReauthOnNewCredentials bool `hcl:"enable_reauth_on_new_credentials"` +} + +// Method represents the configuration for the authentication backend +type Method struct { + Type string + MountPath string `hcl:"mount_path"` + WrapTTLRaw interface{} `hcl:"wrap_ttl"` + WrapTTL time.Duration `hcl:"-"` + MinBackoffRaw interface{} `hcl:"min_backoff"` + MinBackoff time.Duration `hcl:"-"` + MaxBackoffRaw interface{} `hcl:"max_backoff"` + MaxBackoff time.Duration `hcl:"-"` + Namespace string `hcl:"namespace"` + ExitOnError bool `hcl:"exit_on_err"` + Config map[string]interface{} +} + +// Sink defines a location to write the authenticated token +type Sink struct { + Type string + WrapTTLRaw interface{} `hcl:"wrap_ttl"` + WrapTTL time.Duration `hcl:"-"` + DHType string `hcl:"dh_type"` + DeriveKey bool `hcl:"derive_key"` + DHPath string `hcl:"dh_path"` + AAD string `hcl:"aad"` + AADEnvVar string `hcl:"aad_env_var"` + Config map[string]interface{} +} + +// TemplateConfig defines global behaviors around template +type TemplateConfig struct { + ExitOnRetryFailure bool `hcl:"exit_on_retry_failure"` + StaticSecretRenderIntRaw interface{} `hcl:"static_secret_render_interval"` + StaticSecretRenderInt time.Duration `hcl:"-"` +} + +type ExecConfig struct { + Command []string `hcl:"command,attr" mapstructure:"command"` + RestartOnSecretChanges string `hcl:"restart_on_secret_changes,optional" mapstructure:"restart_on_secret_changes"` + RestartStopSignal os.Signal `hcl:"-" mapstructure:"restart_stop_signal"` +} + +func NewConfig() *Config { + return &Config{ + SharedConfig: new(configutil.SharedConfig), + } +} + +// Merge merges two Agent configurations. +func (c *Config) Merge(c2 *Config) *Config { + if c2 == nil { + return c + } + + result := NewConfig() + + result.SharedConfig = c.SharedConfig + if c2.SharedConfig != nil { + result.SharedConfig = c.SharedConfig.Merge(c2.SharedConfig) + } + + result.AutoAuth = c.AutoAuth + if c2.AutoAuth != nil { + result.AutoAuth = c2.AutoAuth + } + + result.Cache = c.Cache + if c2.Cache != nil { + result.Cache = c2.Cache + } + + result.APIProxy = c.APIProxy + if c2.APIProxy != nil { + result.APIProxy = c2.APIProxy + } + + result.DisableMlock = c.DisableMlock + if c2.DisableMlock { + result.DisableMlock = c2.DisableMlock + } + + // For these, ignore the non-specific one and overwrite them all + result.DisableIdleConnsAutoAuth = c.DisableIdleConnsAutoAuth + if c2.DisableIdleConnsAutoAuth { + result.DisableIdleConnsAutoAuth = c2.DisableIdleConnsAutoAuth + } + + result.DisableIdleConnsAPIProxy = c.DisableIdleConnsAPIProxy + if c2.DisableIdleConnsAPIProxy { + result.DisableIdleConnsAPIProxy = c2.DisableIdleConnsAPIProxy + } + + result.DisableIdleConnsTemplating = c.DisableIdleConnsTemplating + if c2.DisableIdleConnsTemplating { + result.DisableIdleConnsTemplating = c2.DisableIdleConnsTemplating + } + + result.DisableKeepAlivesAutoAuth = c.DisableKeepAlivesAutoAuth + if c2.DisableKeepAlivesAutoAuth { + result.DisableKeepAlivesAutoAuth = c2.DisableKeepAlivesAutoAuth + } + + result.DisableKeepAlivesAPIProxy = c.DisableKeepAlivesAPIProxy + if c2.DisableKeepAlivesAPIProxy { + result.DisableKeepAlivesAPIProxy = c2.DisableKeepAlivesAPIProxy + } + + result.DisableKeepAlivesTemplating = c.DisableKeepAlivesTemplating + if c2.DisableKeepAlivesTemplating { + result.DisableKeepAlivesTemplating = c2.DisableKeepAlivesTemplating + } + + result.TemplateConfig = c.TemplateConfig + if c2.TemplateConfig != nil { + result.TemplateConfig = c2.TemplateConfig + } + + for _, l := range c.Templates { + result.Templates = append(result.Templates, l) + } + for _, l := range c2.Templates { + result.Templates = append(result.Templates, l) + } + + result.ExitAfterAuth = c.ExitAfterAuth + if c2.ExitAfterAuth { + result.ExitAfterAuth = c2.ExitAfterAuth + } + + result.Vault = c.Vault + if c2.Vault != nil { + result.Vault = c2.Vault + } + + result.PidFile = c.PidFile + if c2.PidFile != "" { + result.PidFile = c2.PidFile + } + + result.Exec = c.Exec + if c2.Exec != nil { + result.Exec = c2.Exec + } + + for _, envTmpl := range c.EnvTemplates { + result.EnvTemplates = append(result.EnvTemplates, envTmpl) + } + + for _, envTmpl := range c2.EnvTemplates { + result.EnvTemplates = append(result.EnvTemplates, envTmpl) + } + + return result +} + +// IsDefaultListerDefined returns true if a default listener has been defined +// in this config +func (c *Config) IsDefaultListerDefined() bool { + for _, l := range c.Listeners { + if l.Role != "metrics_only" { + return true + } + } + return false +} + +// ValidateConfig validates an Agent configuration after it has been fully merged together, to +// ensure that required combinations of configs are there +func (c *Config) ValidateConfig() error { + if c.APIProxy != nil && c.Cache != nil { + if c.Cache.UseAutoAuthTokenRaw != nil { + if c.APIProxy.UseAutoAuthTokenRaw != nil { + return fmt.Errorf("use_auto_auth_token defined in both api_proxy and cache config. Please remove this configuration from the cache block") + } else { + c.APIProxy.ForceAutoAuthToken = c.Cache.ForceAutoAuthToken + } + } + } + + if c.Cache != nil { + if len(c.Listeners) < 1 && len(c.Templates) < 1 && len(c.EnvTemplates) < 1 { + return fmt.Errorf("enabling the cache requires at least 1 template or 1 listener to be defined") + } + + if c.Cache.UseAutoAuthToken { + if c.AutoAuth == nil { + return fmt.Errorf("cache.use_auto_auth_token is true but auto_auth not configured") + } + if c.AutoAuth != nil && c.AutoAuth.Method != nil && c.AutoAuth.Method.WrapTTL > 0 { + return fmt.Errorf("cache.use_auto_auth_token is true and auto_auth uses wrapping") + } + } + } + + if c.APIProxy != nil { + if len(c.Listeners) < 1 { + return fmt.Errorf("configuring the api_proxy requires at least 1 listener to be defined") + } + + if c.APIProxy.UseAutoAuthToken { + if c.AutoAuth == nil { + return fmt.Errorf("api_proxy.use_auto_auth_token is true but auto_auth not configured") + } + if c.AutoAuth != nil && c.AutoAuth.Method != nil && c.AutoAuth.Method.WrapTTL > 0 { + return fmt.Errorf("api_proxy.use_auto_auth_token is true and auto_auth uses wrapping") + } + } + } + + if c.AutoAuth != nil { + if len(c.AutoAuth.Sinks) == 0 && + (c.APIProxy == nil || !c.APIProxy.UseAutoAuthToken) && + len(c.Templates) == 0 && + len(c.EnvTemplates) == 0 { + return fmt.Errorf("auto_auth requires at least one sink or at least one template or api_proxy.use_auto_auth_token=true") + } + } + + if c.AutoAuth == nil && c.Cache == nil && len(c.Listeners) == 0 { + return fmt.Errorf("no auto_auth, cache, or listener block found in config") + } + + return c.validateEnvTemplateConfig() +} + +func (c *Config) validateEnvTemplateConfig() error { + // if we are not in env-template mode, exit early + if c.Exec == nil && len(c.EnvTemplates) == 0 { + return nil + } + + if c.Exec == nil { + return fmt.Errorf("a top-level 'exec' element must be specified with 'env_template' entries") + } + + if len(c.EnvTemplates) == 0 { + return fmt.Errorf("must specify at least one 'env_template' element with a top-level 'exec' element") + } + + if c.APIProxy != nil { + return fmt.Errorf("'api_proxy' cannot be specified with 'env_template' entries") + } + + if len(c.Templates) > 0 { + return fmt.Errorf("'template' cannot be specified with 'env_template' entries") + } + + if len(c.Exec.Command) == 0 { + return fmt.Errorf("'exec' requires a non-empty 'command' field") + } + + if !slices.Contains([]string{"always", "never"}, c.Exec.RestartOnSecretChanges) { + return fmt.Errorf("'exec.restart_on_secret_changes' unexpected value: %q", c.Exec.RestartOnSecretChanges) + } + + uniqueKeys := make(map[string]struct{}) + + for _, template := range c.EnvTemplates { + // Required: + // - the key (environment variable name) + // - either "contents" or "source" + // Optional / permitted: + // - error_on_missing_key + // - error_fatal + // - left_delimiter + // - right_delimiter + // - ExtFuncMap + // - function_denylist / function_blacklist + + if template.MapToEnvironmentVariable == nil { + return fmt.Errorf("env_template: an environment variable name is required") + } + + key := *template.MapToEnvironmentVariable + + if _, exists := uniqueKeys[key]; exists { + return fmt.Errorf("env_template: duplicate environment variable name: %q", key) + } + + uniqueKeys[key] = struct{}{} + + if template.Contents == nil && template.Source == nil { + return fmt.Errorf("env_template[%s]: either 'contents' or 'source' must be specified", key) + } + + if template.Contents != nil && template.Source != nil { + return fmt.Errorf("env_template[%s]: 'contents' and 'source' cannot be specified together", key) + } + + if template.Backup != nil { + return fmt.Errorf("env_template[%s]: 'backup' is not allowed", key) + } + + if template.Command != nil { + return fmt.Errorf("env_template[%s]: 'command' is not allowed", key) + } + + if template.CommandTimeout != nil { + return fmt.Errorf("env_template[%s]: 'command_timeout' is not allowed", key) + } + + if template.CreateDestDirs != nil { + return fmt.Errorf("env_template[%s]: 'create_dest_dirs' is not allowed", key) + } + + if template.Destination != nil { + return fmt.Errorf("env_template[%s]: 'destination' is not allowed", key) + } + + if template.Exec != nil { + return fmt.Errorf("env_template[%s]: 'exec' is not allowed", key) + } + + if template.Perms != nil { + return fmt.Errorf("env_template[%s]: 'perms' is not allowed", key) + } + + if template.User != nil { + return fmt.Errorf("env_template[%s]: 'user' is not allowed", key) + } + + if template.Uid != nil { + return fmt.Errorf("env_template[%s]: 'uid' is not allowed", key) + } + + if template.Group != nil { + return fmt.Errorf("env_template[%s]: 'group' is not allowed", key) + } + + if template.Gid != nil { + return fmt.Errorf("env_template[%s]: 'gid' is not allowed", key) + } + + if template.Wait != nil { + return fmt.Errorf("env_template[%s]: 'wait' is not allowed", key) + } + + if template.SandboxPath != nil { + return fmt.Errorf("env_template[%s]: 'sandbox_path' is not allowed", key) + } + } + + return nil +} + +// LoadConfig loads the configuration at the given path, regardless if +// it's a file or directory. +func LoadConfig(path string) (*Config, error) { + fi, err := os.Stat(path) + if err != nil { + return nil, err + } + + if fi.IsDir() { + return LoadConfigDir(path) + } + return LoadConfigFile(path) +} + +// LoadConfigDir loads the configuration at the given path if it's a directory +func LoadConfigDir(dir string) (*Config, error) { + f, err := os.Open(dir) + if err != nil { + return nil, err + } + defer f.Close() + + fi, err := f.Stat() + if err != nil { + return nil, err + } + if !fi.IsDir() { + return nil, fmt.Errorf("configuration path must be a directory: %q", dir) + } + + var files []string + err = nil + for err != io.EOF { + var fis []os.FileInfo + fis, err = f.Readdir(128) + if err != nil && err != io.EOF { + return nil, err + } + + for _, fi := range fis { + // Ignore directories + if fi.IsDir() { + continue + } + + // Only care about files that are valid to load. + name := fi.Name() + skip := true + if strings.HasSuffix(name, ".hcl") { + skip = false + } else if strings.HasSuffix(name, ".json") { + skip = false + } + if skip || isTemporaryFile(name) { + continue + } + + path := filepath.Join(dir, name) + files = append(files, path) + } + } + + result := NewConfig() + for _, f := range files { + config, err := LoadConfigFile(f) + if err != nil { + return nil, fmt.Errorf("error loading %q: %w", f, err) + } + + if result == nil { + result = config + } else { + result = result.Merge(config) + } + } + + return result, nil +} + +// isTemporaryFile returns true or false depending on whether the +// provided file name is a temporary file for the following editors: +// emacs or vim. +func isTemporaryFile(name string) bool { + return strings.HasSuffix(name, "~") || // vim + strings.HasPrefix(name, ".#") || // emacs + (strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#")) // emacs +} + +// LoadConfigFile loads the configuration at the given path if it's a file +func LoadConfigFile(path string) (*Config, error) { + fi, err := os.Stat(path) + if err != nil { + return nil, err + } + + if fi.IsDir() { + return nil, fmt.Errorf("location is a directory, not a file") + } + + // Read the file + d, err := os.ReadFile(path) + if err != nil { + return nil, err + } + + // Parse! + obj, err := hcl.Parse(string(d)) + if err != nil { + return nil, err + } + + // Attribute + ast.Walk(obj, func(n ast.Node) (ast.Node, bool) { + if k, ok := n.(*ast.ObjectKey); ok { + k.Token.Pos.Filename = path + } + return n, true + }) + + // Start building the result + result := NewConfig() + if err := hcl.DecodeObject(result, obj); err != nil { + return nil, err + } + + sharedConfig, err := configutil.ParseConfig(string(d)) + if err != nil { + return nil, err + } + + // Pruning custom headers for Agent for now + for _, ln := range sharedConfig.Listeners { + ln.CustomResponseHeaders = nil + } + + result.SharedConfig = sharedConfig + + list, ok := obj.Node.(*ast.ObjectList) + if !ok { + return nil, fmt.Errorf("error parsing: file doesn't contain a root object") + } + + if err := parseAutoAuth(result, list); err != nil { + return nil, fmt.Errorf("error parsing 'auto_auth': %w", err) + } + + if err := parseCache(result, list); err != nil { + return nil, fmt.Errorf("error parsing 'cache':%w", err) + } + + if err := parseAPIProxy(result, list); err != nil { + return nil, fmt.Errorf("error parsing 'api_proxy':%w", err) + } + + if err := parseTemplateConfig(result, list); err != nil { + return nil, fmt.Errorf("error parsing 'template_config': %w", err) + } + + if err := parseTemplates(result, list); err != nil { + return nil, fmt.Errorf("error parsing 'template': %w", err) + } + + if err := parseExec(result, list); err != nil { + return nil, fmt.Errorf("error parsing 'exec': %w", err) + } + + if err := parseEnvTemplates(result, list); err != nil { + return nil, fmt.Errorf("error parsing 'env_template': %w", err) + } + + if result.Cache != nil && result.APIProxy == nil && (result.Cache.UseAutoAuthToken || result.Cache.ForceAutoAuthToken) { + result.APIProxy = &APIProxy{ + UseAutoAuthToken: result.Cache.UseAutoAuthToken, + ForceAutoAuthToken: result.Cache.ForceAutoAuthToken, + } + } + + err = parseVault(result, list) + if err != nil { + return nil, fmt.Errorf("error parsing 'vault':%w", err) + } + + if result.Vault != nil { + // Set defaults + if result.Vault.Retry == nil { + result.Vault.Retry = &Retry{} + } + switch result.Vault.Retry.NumRetries { + case 0: + result.Vault.Retry.NumRetries = ctconfig.DefaultRetryAttempts + case -1: + result.Vault.Retry.NumRetries = 0 + } + } + + if disableIdleConnsEnv := os.Getenv(DisableIdleConnsEnv); disableIdleConnsEnv != "" { + result.DisableIdleConns, err = parseutil.ParseCommaStringSlice(strings.ToLower(disableIdleConnsEnv)) + if err != nil { + return nil, fmt.Errorf("error parsing environment variable %s: %v", DisableIdleConnsEnv, err) + } + } + + for _, subsystem := range result.DisableIdleConns { + switch subsystem { + case "auto-auth": + result.DisableIdleConnsAutoAuth = true + case "caching", "proxying": + result.DisableIdleConnsAPIProxy = true + case "templating": + result.DisableIdleConnsTemplating = true + case "": + continue + default: + return nil, fmt.Errorf("unknown disable_idle_connections value: %s", subsystem) + } + } + + if disableKeepAlivesEnv := os.Getenv(DisableKeepAlivesEnv); disableKeepAlivesEnv != "" { + result.DisableKeepAlives, err = parseutil.ParseCommaStringSlice(strings.ToLower(disableKeepAlivesEnv)) + if err != nil { + return nil, fmt.Errorf("error parsing environment variable %s: %v", DisableKeepAlivesEnv, err) + } + } + + for _, subsystem := range result.DisableKeepAlives { + switch subsystem { + case "auto-auth": + result.DisableKeepAlivesAutoAuth = true + case "caching", "proxying": + result.DisableKeepAlivesAPIProxy = true + case "templating": + result.DisableKeepAlivesTemplating = true + case "": + continue + default: + return nil, fmt.Errorf("unknown disable_keep_alives value: %s", subsystem) + } + } + + return result, nil +} + +func parseVault(result *Config, list *ast.ObjectList) error { + name := "vault" + + vaultList := list.Filter(name) + if len(vaultList.Items) == 0 { + return nil + } + + if len(vaultList.Items) > 1 { + return fmt.Errorf("one and only one %q block is required", name) + } + + item := vaultList.Items[0] + + var v Vault + err := hcl.DecodeObject(&v, item.Val) + if err != nil { + return err + } + + if v.TLSSkipVerifyRaw != nil { + v.TLSSkipVerify, err = parseutil.ParseBool(v.TLSSkipVerifyRaw) + if err != nil { + return err + } + } + + result.Vault = &v + + subs, ok := item.Val.(*ast.ObjectType) + if !ok { + return fmt.Errorf("could not parse %q as an object", name) + } + + if err := parseRetry(result, subs.List); err != nil { + return fmt.Errorf("error parsing 'retry': %w", err) + } + + return nil +} + +func parseRetry(result *Config, list *ast.ObjectList) error { + name := "retry" + + retryList := list.Filter(name) + if len(retryList.Items) == 0 { + return nil + } + + if len(retryList.Items) > 1 { + return fmt.Errorf("one and only one %q block is required", name) + } + + item := retryList.Items[0] + + var r Retry + err := hcl.DecodeObject(&r, item.Val) + if err != nil { + return err + } + + result.Vault.Retry = &r + + return nil +} + +func parseAPIProxy(result *Config, list *ast.ObjectList) error { + name := "api_proxy" + + apiProxyList := list.Filter(name) + if len(apiProxyList.Items) == 0 { + return nil + } + + if len(apiProxyList.Items) > 1 { + return fmt.Errorf("one and only one %q block is required", name) + } + + item := apiProxyList.Items[0] + + var apiProxy APIProxy + err := hcl.DecodeObject(&apiProxy, item.Val) + if err != nil { + return err + } + + if apiProxy.UseAutoAuthTokenRaw != nil { + apiProxy.UseAutoAuthToken, err = parseutil.ParseBool(apiProxy.UseAutoAuthTokenRaw) + if err != nil { + // Could be a value of "force" instead of "true"/"false" + switch apiProxy.UseAutoAuthTokenRaw.(type) { + case string: + v := apiProxy.UseAutoAuthTokenRaw.(string) + + if !strings.EqualFold(v, "force") { + return fmt.Errorf("value of 'use_auto_auth_token' can be either true/false/force, %q is an invalid option", apiProxy.UseAutoAuthTokenRaw) + } + apiProxy.UseAutoAuthToken = true + apiProxy.ForceAutoAuthToken = true + + default: + return err + } + } + } + result.APIProxy = &apiProxy + + return nil +} + +func parseCache(result *Config, list *ast.ObjectList) error { + name := "cache" + + cacheList := list.Filter(name) + if len(cacheList.Items) == 0 { + return nil + } + + if len(cacheList.Items) > 1 { + return fmt.Errorf("one and only one %q block is required", name) + } + + item := cacheList.Items[0] + + var c Cache + err := hcl.DecodeObject(&c, item.Val) + if err != nil { + return err + } + + if c.UseAutoAuthTokenRaw != nil { + c.UseAutoAuthToken, err = parseutil.ParseBool(c.UseAutoAuthTokenRaw) + if err != nil { + // Could be a value of "force" instead of "true"/"false" + switch c.UseAutoAuthTokenRaw.(type) { + case string: + v := c.UseAutoAuthTokenRaw.(string) + + if !strings.EqualFold(v, "force") { + return fmt.Errorf("value of 'use_auto_auth_token' can be either true/false/force, %q is an invalid option", c.UseAutoAuthTokenRaw) + } + c.UseAutoAuthToken = true + c.ForceAutoAuthToken = true + + default: + return err + } + } + } + result.Cache = &c + + subs, ok := item.Val.(*ast.ObjectType) + if !ok { + return fmt.Errorf("could not parse %q as an object", name) + } + subList := subs.List + if err := parsePersist(result, subList); err != nil { + return fmt.Errorf("error parsing persist: %w", err) + } + + return nil +} + +func parsePersist(result *Config, list *ast.ObjectList) error { + name := "persist" + + persistList := list.Filter(name) + if len(persistList.Items) == 0 { + return nil + } + + if len(persistList.Items) > 1 { + return fmt.Errorf("only one %q block is required", name) + } + + item := persistList.Items[0] + + var p agentproxyshared.PersistConfig + err := hcl.DecodeObject(&p, item.Val) + if err != nil { + return err + } + + if p.Type == "" { + if len(item.Keys) == 1 { + p.Type = strings.ToLower(item.Keys[0].Token.Value().(string)) + } + if p.Type == "" { + return errors.New("persist type must be specified") + } + } + + result.Cache.Persist = &p + + return nil +} + +func parseAutoAuth(result *Config, list *ast.ObjectList) error { + name := "auto_auth" + + autoAuthList := list.Filter(name) + if len(autoAuthList.Items) == 0 { + return nil + } + if len(autoAuthList.Items) > 1 { + return fmt.Errorf("at most one %q block is allowed", name) + } + + // Get our item + item := autoAuthList.Items[0] + + var a AutoAuth + if err := hcl.DecodeObject(&a, item.Val); err != nil { + return err + } + + result.AutoAuth = &a + + subs, ok := item.Val.(*ast.ObjectType) + if !ok { + return fmt.Errorf("could not parse %q as an object", name) + } + subList := subs.List + + if err := parseMethod(result, subList); err != nil { + return fmt.Errorf("error parsing 'method': %w", err) + } + if a.Method == nil { + return fmt.Errorf("no 'method' block found") + } + + if err := parseSinks(result, subList); err != nil { + return fmt.Errorf("error parsing 'sink' stanzas: %w", err) + } + + if result.AutoAuth.Method.WrapTTL > 0 { + if len(result.AutoAuth.Sinks) != 1 { + return fmt.Errorf("error parsing auto_auth: wrapping enabled on auth method and 0 or many sinks defined") + } + + if result.AutoAuth.Sinks[0].WrapTTL > 0 { + return fmt.Errorf("error parsing auto_auth: wrapping enabled both on auth method and sink") + } + } + + if result.AutoAuth.Method.MaxBackoffRaw != nil { + var err error + if result.AutoAuth.Method.MaxBackoff, err = parseutil.ParseDurationSecond(result.AutoAuth.Method.MaxBackoffRaw); err != nil { + return err + } + result.AutoAuth.Method.MaxBackoffRaw = nil + } + + if result.AutoAuth.Method.MinBackoffRaw != nil { + var err error + if result.AutoAuth.Method.MinBackoff, err = parseutil.ParseDurationSecond(result.AutoAuth.Method.MinBackoffRaw); err != nil { + return err + } + result.AutoAuth.Method.MinBackoffRaw = nil + } + + return nil +} + +func parseMethod(result *Config, list *ast.ObjectList) error { + name := "method" + + methodList := list.Filter(name) + if len(methodList.Items) != 1 { + return fmt.Errorf("one and only one %q block is required", name) + } + + // Get our item + item := methodList.Items[0] + + var m Method + if err := hcl.DecodeObject(&m, item.Val); err != nil { + return err + } + + if m.Type == "" { + if len(item.Keys) == 1 { + m.Type = strings.ToLower(item.Keys[0].Token.Value().(string)) + } + if m.Type == "" { + return errors.New("method type must be specified") + } + } + + // Default to Vault's default + if m.MountPath == "" { + m.MountPath = fmt.Sprintf("auth/%s", m.Type) + } + // Standardize on no trailing slash + m.MountPath = strings.TrimSuffix(m.MountPath, "/") + + if m.WrapTTLRaw != nil { + var err error + if m.WrapTTL, err = parseutil.ParseDurationSecond(m.WrapTTLRaw); err != nil { + return err + } + m.WrapTTLRaw = nil + } + + // Canonicalize namespace path if provided + m.Namespace = namespace.Canonicalize(m.Namespace) + + result.AutoAuth.Method = &m + return nil +} + +func parseSinks(result *Config, list *ast.ObjectList) error { + name := "sink" + + sinkList := list.Filter(name) + if len(sinkList.Items) < 1 { + return nil + } + + var ts []*Sink + + for _, item := range sinkList.Items { + var s Sink + if err := hcl.DecodeObject(&s, item.Val); err != nil { + return err + } + + if s.Type == "" { + if len(item.Keys) == 1 { + s.Type = strings.ToLower(item.Keys[0].Token.Value().(string)) + } + if s.Type == "" { + return errors.New("sink type must be specified") + } + } + + if s.WrapTTLRaw != nil { + var err error + if s.WrapTTL, err = parseutil.ParseDurationSecond(s.WrapTTLRaw); err != nil { + return multierror.Prefix(err, fmt.Sprintf("sink.%s", s.Type)) + } + s.WrapTTLRaw = nil + } + + switch s.DHType { + case "": + case "curve25519": + default: + return multierror.Prefix(errors.New("invalid value for 'dh_type'"), fmt.Sprintf("sink.%s", s.Type)) + } + + if s.AADEnvVar != "" { + s.AAD = os.Getenv(s.AADEnvVar) + s.AADEnvVar = "" + } + + switch { + case s.DHPath == "" && s.DHType == "": + if s.AAD != "" { + return multierror.Prefix(errors.New("specifying AAD data without 'dh_type' does not make sense"), fmt.Sprintf("sink.%s", s.Type)) + } + if s.DeriveKey { + return multierror.Prefix(errors.New("specifying 'derive_key' data without 'dh_type' does not make sense"), fmt.Sprintf("sink.%s", s.Type)) + } + case s.DHPath != "" && s.DHType != "": + default: + return multierror.Prefix(errors.New("'dh_type' and 'dh_path' must be specified together"), fmt.Sprintf("sink.%s", s.Type)) + } + + ts = append(ts, &s) + } + + result.AutoAuth.Sinks = ts + return nil +} + +func parseTemplateConfig(result *Config, list *ast.ObjectList) error { + name := "template_config" + + templateConfigList := list.Filter(name) + if len(templateConfigList.Items) == 0 { + return nil + } + + if len(templateConfigList.Items) > 1 { + return fmt.Errorf("at most one %q block is allowed", name) + } + + // Get our item + item := templateConfigList.Items[0] + + var cfg TemplateConfig + if err := hcl.DecodeObject(&cfg, item.Val); err != nil { + return err + } + + result.TemplateConfig = &cfg + + if result.TemplateConfig.StaticSecretRenderIntRaw != nil { + var err error + if result.TemplateConfig.StaticSecretRenderInt, err = parseutil.ParseDurationSecond(result.TemplateConfig.StaticSecretRenderIntRaw); err != nil { + return err + } + result.TemplateConfig.StaticSecretRenderIntRaw = nil + } + + return nil +} + +func parseTemplates(result *Config, list *ast.ObjectList) error { + name := "template" + + templateList := list.Filter(name) + if len(templateList.Items) < 1 { + return nil + } + + var tcs []*ctconfig.TemplateConfig + + for _, item := range templateList.Items { + var shadow interface{} + if err := hcl.DecodeObject(&shadow, item.Val); err != nil { + return fmt.Errorf("error decoding config: %s", err) + } + + // Convert to a map and flatten the keys we want to flatten + parsed, ok := shadow.(map[string]interface{}) + if !ok { + return errors.New("error converting config") + } + + // flatten the wait or exec fields. The initial "wait" or "exec" value, if given, is a + // []map[string]interface{}, but we need it to be map[string]interface{}. + // Consul Template has a method flattenKeys that walks all of parsed and + // flattens every key. For Vault Agent, we only care about the wait input. + // Only one wait/exec stanza is supported, however Consul Template does not error + // with multiple instead it flattens them down, with last value winning. + // Here we take the last element of the parsed["wait"] or parsed["exec"] slice to keep + // consistency with Consul Template behavior. + wait, ok := parsed["wait"].([]map[string]interface{}) + if ok { + parsed["wait"] = wait[len(wait)-1] + } + + exec, ok := parsed["exec"].([]map[string]interface{}) + if ok { + parsed["exec"] = exec[len(exec)-1] + } + + var tc ctconfig.TemplateConfig + + // Use mapstructure to populate the basic config fields + var md mapstructure.Metadata + decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + DecodeHook: mapstructure.ComposeDecodeHookFunc( + ctconfig.StringToFileModeFunc(), + ctconfig.StringToWaitDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + mapstructure.StringToTimeDurationHookFunc(), + ), + ErrorUnused: true, + Metadata: &md, + Result: &tc, + }) + if err != nil { + return errors.New("mapstructure decoder creation failed") + } + if err := decoder.Decode(parsed); err != nil { + return err + } + tcs = append(tcs, &tc) + } + result.Templates = tcs + return nil +} + +func parseExec(result *Config, list *ast.ObjectList) error { + name := "exec" + + execList := list.Filter(name) + if len(execList.Items) == 0 { + return nil + } + + if len(execList.Items) > 1 { + return fmt.Errorf("at most one %q block is allowed", name) + } + + item := execList.Items[0] + var shadow interface{} + if err := hcl.DecodeObject(&shadow, item.Val); err != nil { + return fmt.Errorf("error decoding config: %s", err) + } + + parsed, ok := shadow.(map[string]interface{}) + if !ok { + return errors.New("error converting config") + } + + var execConfig ExecConfig + var md mapstructure.Metadata + decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + DecodeHook: mapstructure.ComposeDecodeHookFunc( + ctconfig.StringToFileModeFunc(), + ctconfig.StringToWaitDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + mapstructure.StringToTimeDurationHookFunc(), + ctsignals.StringToSignalFunc(), + ), + ErrorUnused: true, + Metadata: &md, + Result: &execConfig, + }) + if err != nil { + return errors.New("mapstructure decoder creation failed") + } + if err := decoder.Decode(parsed); err != nil { + return err + } + + // if the user does not specify a restart signal, default to SIGTERM + if execConfig.RestartStopSignal == nil { + execConfig.RestartStopSignal = syscall.SIGTERM + } + + if execConfig.RestartOnSecretChanges == "" { + execConfig.RestartOnSecretChanges = "always" + } + + result.Exec = &execConfig + return nil +} + +func parseEnvTemplates(result *Config, list *ast.ObjectList) error { + name := "env_template" + + envTemplateList := list.Filter(name) + + if len(envTemplateList.Items) < 1 { + return nil + } + + envTemplates := make([]*ctconfig.TemplateConfig, 0, len(envTemplateList.Items)) + + for _, item := range envTemplateList.Items { + var shadow interface{} + if err := hcl.DecodeObject(&shadow, item.Val); err != nil { + return fmt.Errorf("error decoding config: %s", err) + } + + // Convert to a map and flatten the keys we want to flatten + parsed, ok := shadow.(map[string]any) + if !ok { + return errors.New("error converting config") + } + + var templateConfig ctconfig.TemplateConfig + var md mapstructure.Metadata + decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + DecodeHook: mapstructure.ComposeDecodeHookFunc( + ctconfig.StringToFileModeFunc(), + ctconfig.StringToWaitDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + mapstructure.StringToTimeDurationHookFunc(), + ctsignals.StringToSignalFunc(), + ), + ErrorUnused: true, + Metadata: &md, + Result: &templateConfig, + }) + if err != nil { + return errors.New("mapstructure decoder creation failed") + } + if err := decoder.Decode(parsed); err != nil { + return err + } + + // parse the keys in the item for the environment variable name + if numberOfKeys := len(item.Keys); numberOfKeys != 1 { + return fmt.Errorf("expected one and only one environment variable name, got %d", numberOfKeys) + } + + // hcl parses this with extra quotes if quoted in config file + environmentVariableName := strings.Trim(item.Keys[0].Token.Text, `"`) + + templateConfig.MapToEnvironmentVariable = pointerutil.StringPtr(environmentVariableName) + + envTemplates = append(envTemplates, &templateConfig) + } + + result.EnvTemplates = envTemplates + return nil +} diff --git a/command/agent/config/config_test.go b/command/agent/config/config_test.go new file mode 100644 index 0000000..3be1ab3 --- /dev/null +++ b/command/agent/config/config_test.go @@ -0,0 +1,2330 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package config + +import ( + "os" + "syscall" + "testing" + "time" + + "github.com/go-test/deep" + ctconfig "github.com/hashicorp/consul-template/config" + "golang.org/x/exp/slices" + + "github.com/hashicorp/vault/command/agentproxyshared" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/sdk/helper/pointerutil" +) + +func TestLoadConfigFile_AgentCache(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config-cache.hcl") + if err != nil { + t.Fatal(err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + PidFile: "./pidfile", + Listeners: []*configutil.Listener{ + { + Type: "unix", + Address: "/path/to/socket", + TLSDisable: true, + SocketMode: "configmode", + SocketUser: "configuser", + SocketGroup: "configgroup", + }, + { + Type: "tcp", + Address: "127.0.0.1:8300", + TLSDisable: true, + }, + { + Type: "tcp", + Address: "127.0.0.1:3000", + Role: "metrics_only", + TLSDisable: true, + }, + { + Type: "tcp", + Role: "default", + Address: "127.0.0.1:8400", + TLSKeyFile: "/path/to/cakey.pem", + TLSCertFile: "/path/to/cacert.pem", + }, + }, + }, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + Sinks: []*Sink{ + { + Type: "file", + DHType: "curve25519", + DHPath: "/tmp/file-foo-dhpath", + AAD: "foobar", + Config: map[string]interface{}{ + "path": "/tmp/file-foo", + }, + }, + }, + }, + APIProxy: &APIProxy{ + UseAutoAuthToken: true, + ForceAutoAuthToken: false, + }, + Cache: &Cache{ + UseAutoAuthToken: true, + UseAutoAuthTokenRaw: true, + ForceAutoAuthToken: false, + Persist: &agentproxyshared.PersistConfig{ + Type: "kubernetes", + Path: "/vault/agent-cache/", + KeepAfterImport: true, + ExitOnErr: true, + ServiceAccountTokenFile: "/tmp/serviceaccount/token", + }, + }, + Vault: &Vault{ + Address: "http://127.0.0.1:1111", + CACert: "config_ca_cert", + CAPath: "config_ca_path", + TLSSkipVerifyRaw: interface{}("true"), + TLSSkipVerify: true, + ClientCert: "config_client_cert", + ClientKey: "config_client_key", + Retry: &Retry{ + NumRetries: 12, + }, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } + + config, err = LoadConfigFile("./test-fixtures/config-cache-embedded-type.hcl") + if err != nil { + t.Fatal(err) + } + expected.Vault.TLSSkipVerifyRaw = interface{}(true) + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestLoadConfigDir_AgentCache(t *testing.T) { + config, err := LoadConfig("./test-fixtures/config-dir-cache/") + if err != nil { + t.Fatal(err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + PidFile: "./pidfile", + Listeners: []*configutil.Listener{ + { + Type: "unix", + Address: "/path/to/socket", + TLSDisable: true, + SocketMode: "configmode", + SocketUser: "configuser", + SocketGroup: "configgroup", + }, + { + Type: "tcp", + Address: "127.0.0.1:8300", + TLSDisable: true, + }, + { + Type: "tcp", + Address: "127.0.0.1:3000", + Role: "metrics_only", + TLSDisable: true, + }, + { + Type: "tcp", + Role: "default", + Address: "127.0.0.1:8400", + TLSKeyFile: "/path/to/cakey.pem", + TLSCertFile: "/path/to/cacert.pem", + }, + }, + }, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + Sinks: []*Sink{ + { + Type: "file", + DHType: "curve25519", + DHPath: "/tmp/file-foo-dhpath", + AAD: "foobar", + Config: map[string]interface{}{ + "path": "/tmp/file-foo", + }, + }, + }, + }, + APIProxy: &APIProxy{ + UseAutoAuthToken: true, + ForceAutoAuthToken: false, + }, + Cache: &Cache{ + UseAutoAuthToken: true, + UseAutoAuthTokenRaw: true, + ForceAutoAuthToken: false, + Persist: &agentproxyshared.PersistConfig{ + Type: "kubernetes", + Path: "/vault/agent-cache/", + KeepAfterImport: true, + ExitOnErr: true, + ServiceAccountTokenFile: "/tmp/serviceaccount/token", + }, + }, + Vault: &Vault{ + Address: "http://127.0.0.1:1111", + CACert: "config_ca_cert", + CAPath: "config_ca_path", + TLSSkipVerifyRaw: interface{}("true"), + TLSSkipVerify: true, + ClientCert: "config_client_cert", + ClientKey: "config_client_key", + Retry: &Retry{ + NumRetries: 12, + }, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } + + config, err = LoadConfigFile("./test-fixtures/config-dir-cache/config-cache1.hcl") + if err != nil { + t.Fatal(err) + } + config2, err := LoadConfigFile("./test-fixtures/config-dir-cache/config-cache2.hcl") + + mergedConfig := config.Merge(config2) + + mergedConfig.Prune() + if diff := deep.Equal(mergedConfig, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestLoadConfigDir_AutoAuthAndListener(t *testing.T) { + config, err := LoadConfig("./test-fixtures/config-dir-auto-auth-and-listener/") + if err != nil { + t.Fatal(err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + PidFile: "./pidfile", + Listeners: []*configutil.Listener{ + { + Type: "tcp", + Address: "127.0.0.1:8300", + TLSDisable: true, + }, + }, + }, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + Sinks: []*Sink{ + { + Type: "file", + DHType: "curve25519", + DHPath: "/tmp/file-foo-dhpath", + AAD: "foobar", + Config: map[string]interface{}{ + "path": "/tmp/file-foo", + }, + }, + }, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } + + config, err = LoadConfigFile("./test-fixtures/config-dir-auto-auth-and-listener/config1.hcl") + if err != nil { + t.Fatal(err) + } + config2, err := LoadConfigFile("./test-fixtures/config-dir-auto-auth-and-listener/config2.hcl") + + mergedConfig := config.Merge(config2) + + mergedConfig.Prune() + if diff := deep.Equal(mergedConfig, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestLoadConfigDir_VaultBlock(t *testing.T) { + config, err := LoadConfig("./test-fixtures/config-dir-vault-block/") + if err != nil { + t.Fatal(err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + PidFile: "./pidfile", + }, + Vault: &Vault{ + Address: "http://127.0.0.1:1111", + CACert: "config_ca_cert", + CAPath: "config_ca_path", + TLSSkipVerifyRaw: interface{}("true"), + TLSSkipVerify: true, + ClientCert: "config_client_cert", + ClientKey: "config_client_key", + Retry: &Retry{ + NumRetries: 12, + }, + }, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + Sinks: []*Sink{ + { + Type: "file", + DHType: "curve25519", + DHPath: "/tmp/file-foo-dhpath", + AAD: "foobar", + Config: map[string]interface{}{ + "path": "/tmp/file-foo", + }, + }, + }, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } + + config, err = LoadConfigFile("./test-fixtures/config-dir-vault-block/config1.hcl") + if err != nil { + t.Fatal(err) + } + config2, err := LoadConfigFile("./test-fixtures/config-dir-vault-block/config2.hcl") + + mergedConfig := config.Merge(config2) + + mergedConfig.Prune() + if diff := deep.Equal(mergedConfig, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestLoadConfigFile_AgentCache_NoListeners(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config-cache-no-listeners.hcl") + if err != nil { + t.Fatal(err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + PidFile: "./pidfile", + }, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + Sinks: []*Sink{ + { + Type: "file", + DHType: "curve25519", + DHPath: "/tmp/file-foo-dhpath", + AAD: "foobar", + Config: map[string]interface{}{ + "path": "/tmp/file-foo", + }, + }, + }, + }, + APIProxy: &APIProxy{ + UseAutoAuthToken: true, + ForceAutoAuthToken: false, + }, + Cache: &Cache{ + UseAutoAuthToken: true, + UseAutoAuthTokenRaw: true, + ForceAutoAuthToken: false, + Persist: &agentproxyshared.PersistConfig{ + Type: "kubernetes", + Path: "/vault/agent-cache/", + KeepAfterImport: true, + ExitOnErr: true, + ServiceAccountTokenFile: "/tmp/serviceaccount/token", + }, + }, + Vault: &Vault{ + Address: "http://127.0.0.1:1111", + CACert: "config_ca_cert", + CAPath: "config_ca_path", + TLSSkipVerifyRaw: interface{}("true"), + TLSSkipVerify: true, + ClientCert: "config_client_cert", + ClientKey: "config_client_key", + Retry: &Retry{ + NumRetries: 12, + }, + }, + Templates: []*ctconfig.TemplateConfig{ + { + Source: pointerutil.StringPtr("/path/on/disk/to/template.ctmpl"), + Destination: pointerutil.StringPtr("/path/on/disk/where/template/will/render.txt"), + }, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestLoadConfigFile(t *testing.T) { + if err := os.Setenv("TEST_AAD_ENV", "aad"); err != nil { + t.Fatal(err) + } + defer func() { + if err := os.Unsetenv("TEST_AAD_ENV"); err != nil { + t.Fatal(err) + } + }() + + config, err := LoadConfigFile("./test-fixtures/config.hcl") + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + PidFile: "./pidfile", + LogFile: "/var/log/vault/vault-agent.log", + }, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + Namespace: "my-namespace/", + Config: map[string]interface{}{ + "role": "foobar", + }, + MaxBackoff: 0, + }, + Sinks: []*Sink{ + { + Type: "file", + DHType: "curve25519", + DHPath: "/tmp/file-foo-dhpath", + AAD: "foobar", + Config: map[string]interface{}{ + "path": "/tmp/file-foo", + }, + }, + { + Type: "file", + WrapTTL: 5 * time.Minute, + DHType: "curve25519", + DHPath: "/tmp/file-foo-dhpath2", + AAD: "aad", + DeriveKey: true, + Config: map[string]interface{}{ + "path": "/tmp/file-bar", + }, + }, + }, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } + + config, err = LoadConfigFile("./test-fixtures/config-embedded-type.hcl") + if err != nil { + t.Fatalf("err: %s", err) + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestLoadConfigFile_Method_Wrapping(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config-method-wrapping.hcl") + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + PidFile: "./pidfile", + }, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + ExitOnError: false, + WrapTTL: 5 * time.Minute, + MaxBackoff: 2 * time.Minute, + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + Sinks: []*Sink{ + { + Type: "file", + Config: map[string]interface{}{ + "path": "/tmp/file-foo", + }, + }, + }, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestLoadConfigFile_Method_InitialBackoff(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config-method-initial-backoff.hcl") + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + PidFile: "./pidfile", + }, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + ExitOnError: false, + WrapTTL: 5 * time.Minute, + MinBackoff: 5 * time.Second, + MaxBackoff: 2 * time.Minute, + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + Sinks: []*Sink{ + { + Type: "file", + Config: map[string]interface{}{ + "path": "/tmp/file-foo", + }, + }, + }, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestLoadConfigFile_Method_ExitOnErr(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config-method-exit-on-err.hcl") + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + PidFile: "./pidfile", + }, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + ExitOnError: true, + WrapTTL: 5 * time.Minute, + MinBackoff: 5 * time.Second, + MaxBackoff: 2 * time.Minute, + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + Sinks: []*Sink{ + { + Type: "file", + Config: map[string]interface{}{ + "path": "/tmp/file-foo", + }, + }, + }, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestLoadConfigFile_AgentCache_NoAutoAuth(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config-cache-no-auto_auth.hcl") + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &Config{ + Cache: &Cache{}, + SharedConfig: &configutil.SharedConfig{ + PidFile: "./pidfile", + Listeners: []*configutil.Listener{ + { + Type: "tcp", + Address: "127.0.0.1:8300", + TLSDisable: true, + }, + }, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestLoadConfigFile_Bad_AgentCache_InconsisentAutoAuth(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/bad-config-cache-inconsistent-auto_auth.hcl") + if err != nil { + t.Fatalf("LoadConfigFile should not return an error for this config, err: %v", err) + } + if config == nil { + t.Fatal("config was nil") + } + err = config.ValidateConfig() + if err == nil { + t.Fatal("ValidateConfig should return an error when use_auto_auth_token=true and no auto_auth section present") + } +} + +func TestLoadConfigFile_Bad_AgentCache_ForceAutoAuthNoMethod(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/bad-config-cache-force-token-no-auth-method.hcl") + if err != nil { + t.Fatalf("LoadConfigFile should not return an error for this config, err: %v", err) + } + if config == nil { + t.Fatal("config was nil") + } + err = config.ValidateConfig() + if err == nil { + t.Fatal("ValidateConfig should return an error when use_auto_auth_token=force and no auto_auth section present") + } +} + +func TestLoadConfigFile_Bad_AgentCache_NoListeners(t *testing.T) { + _, err := LoadConfigFile("./test-fixtures/bad-config-cache-no-listeners.hcl") + if err != nil { + t.Fatalf("LoadConfigFile should return an error for this config") + } +} + +func TestLoadConfigFile_Bad_AutoAuth_Wrapped_Multiple_Sinks(t *testing.T) { + _, err := LoadConfigFile("./test-fixtures/bad-config-auto_auth-wrapped-multiple-sinks.hcl") + if err == nil { + t.Fatalf("LoadConfigFile should return an error for this config, err: %v", err) + } +} + +func TestLoadConfigFile_Bad_AutoAuth_Nosinks_Nocache_Notemplates(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/bad-config-auto_auth-nosinks-nocache-notemplates.hcl") + if err != nil { + t.Fatalf("LoadConfigFile should not return an error for this config, err: %v", err) + } + if config == nil { + t.Fatal("config was nil") + } + err = config.ValidateConfig() + if err == nil { + t.Fatal("ValidateConfig should return an error when auto_auth configured and there are no sinks, caches or templates") + } +} + +func TestLoadConfigFile_Bad_AutoAuth_Both_Wrapping_Types(t *testing.T) { + _, err := LoadConfigFile("./test-fixtures/bad-config-method-wrapping-and-sink-wrapping.hcl") + if err == nil { + t.Fatalf("LoadConfigFile should return an error for this config") + } +} + +func TestLoadConfigFile_Bad_AgentCache_AutoAuth_Method_wrapping(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/bad-config-cache-auto_auth-method-wrapping.hcl") + if err != nil { + t.Fatalf("LoadConfigFile should not return an error for this config, err: %v", err) + } + if config == nil { + t.Fatal("config was nil") + } + err = config.ValidateConfig() + if err == nil { + t.Fatal("ValidateConfig should return an error when auth_auth.method.wrap_ttl nonzero and cache.use_auto_auth_token=true") + } +} + +func TestLoadConfigFile_Bad_APIProxy_And_Cache_Same_Config(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/bad-config-api_proxy-cache.hcl") + if err != nil { + t.Fatalf("LoadConfigFile should not return an error for this config, err: %v", err) + } + if config == nil { + t.Fatal("config was nil") + } + err = config.ValidateConfig() + if err == nil { + t.Fatal("ValidateConfig should return an error when cache and api_proxy try and configure the same value") + } +} + +func TestLoadConfigFile_AgentCache_AutoAuth_NoSink(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config-cache-auto_auth-no-sink.hcl") + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + Listeners: []*configutil.Listener{ + { + Type: "tcp", + Address: "127.0.0.1:8300", + TLSDisable: true, + }, + }, + PidFile: "./pidfile", + }, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + }, + APIProxy: &APIProxy{ + UseAutoAuthToken: true, + ForceAutoAuthToken: false, + }, + Cache: &Cache{ + UseAutoAuthToken: true, + UseAutoAuthTokenRaw: true, + ForceAutoAuthToken: false, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestLoadConfigFile_AgentCache_AutoAuth_Force(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config-cache-auto_auth-force.hcl") + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + Listeners: []*configutil.Listener{ + { + Type: "tcp", + Address: "127.0.0.1:8300", + TLSDisable: true, + }, + }, + PidFile: "./pidfile", + }, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + }, + APIProxy: &APIProxy{ + UseAutoAuthToken: true, + ForceAutoAuthToken: true, + }, + Cache: &Cache{ + UseAutoAuthToken: true, + UseAutoAuthTokenRaw: "force", + ForceAutoAuthToken: true, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestLoadConfigFile_AgentCache_AutoAuth_True(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config-cache-auto_auth-true.hcl") + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + Listeners: []*configutil.Listener{ + { + Type: "tcp", + Address: "127.0.0.1:8300", + TLSDisable: true, + }, + }, + PidFile: "./pidfile", + }, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + }, + APIProxy: &APIProxy{ + UseAutoAuthToken: true, + ForceAutoAuthToken: false, + }, + Cache: &Cache{ + UseAutoAuthToken: true, + UseAutoAuthTokenRaw: "true", + ForceAutoAuthToken: false, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestLoadConfigFile_Agent_AutoAuth_APIProxyAllConfig(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config-api_proxy-auto_auth-all-api_proxy-config.hcl") + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + Listeners: []*configutil.Listener{ + { + Type: "tcp", + Address: "127.0.0.1:8300", + TLSDisable: true, + }, + }, + PidFile: "./pidfile", + }, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + }, + APIProxy: &APIProxy{ + UseAutoAuthToken: true, + UseAutoAuthTokenRaw: "force", + ForceAutoAuthToken: true, + EnforceConsistency: "always", + WhenInconsistent: "forward", + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestLoadConfigFile_AgentCache_AutoAuth_False(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config-cache-auto_auth-false.hcl") + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + Listeners: []*configutil.Listener{ + { + Type: "tcp", + Address: "127.0.0.1:8300", + TLSDisable: true, + }, + }, + PidFile: "./pidfile", + }, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + Sinks: []*Sink{ + { + Type: "file", + DHType: "curve25519", + DHPath: "/tmp/file-foo-dhpath", + AAD: "foobar", + Config: map[string]interface{}{ + "path": "/tmp/file-foo", + }, + }, + }, + }, + Cache: &Cache{ + UseAutoAuthToken: false, + UseAutoAuthTokenRaw: "false", + ForceAutoAuthToken: false, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestLoadConfigFile_AgentCache_Persist(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config-cache-persist-false.hcl") + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &Config{ + Cache: &Cache{ + Persist: &agentproxyshared.PersistConfig{ + Type: "kubernetes", + Path: "/vault/agent-cache/", + KeepAfterImport: false, + ExitOnErr: false, + ServiceAccountTokenFile: "", + }, + }, + SharedConfig: &configutil.SharedConfig{ + PidFile: "./pidfile", + Listeners: []*configutil.Listener{ + { + Type: "tcp", + Address: "127.0.0.1:8300", + TLSDisable: true, + }, + }, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestLoadConfigFile_AgentCache_PersistMissingType(t *testing.T) { + _, err := LoadConfigFile("./test-fixtures/config-cache-persist-empty-type.hcl") + if err == nil || os.IsNotExist(err) { + t.Fatal("expected error or file is missing") + } +} + +func TestLoadConfigFile_TemplateConfig(t *testing.T) { + testCases := map[string]struct { + fixturePath string + expectedTemplateConfig TemplateConfig + }{ + "set-true": { + "./test-fixtures/config-template_config.hcl", + TemplateConfig{ + ExitOnRetryFailure: true, + StaticSecretRenderInt: 1 * time.Minute, + }, + }, + "empty": { + "./test-fixtures/config-template_config-empty.hcl", + TemplateConfig{ + ExitOnRetryFailure: false, + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + config, err := LoadConfigFile(tc.fixturePath) + if err != nil { + t.Fatal(err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{}, + Vault: &Vault{ + Address: "http://127.0.0.1:1111", + Retry: &Retry{ + NumRetries: 5, + }, + }, + TemplateConfig: &tc.expectedTemplateConfig, + Templates: []*ctconfig.TemplateConfig{ + { + Source: pointerutil.StringPtr("/path/on/disk/to/template.ctmpl"), + Destination: pointerutil.StringPtr("/path/on/disk/where/template/will/render.txt"), + }, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } + }) + } +} + +// TestLoadConfigFile_Template tests template definitions in Vault Agent +func TestLoadConfigFile_Template(t *testing.T) { + testCases := map[string]struct { + fixturePath string + expectedTemplates []*ctconfig.TemplateConfig + }{ + "min": { + fixturePath: "./test-fixtures/config-template-min.hcl", + expectedTemplates: []*ctconfig.TemplateConfig{ + { + Source: pointerutil.StringPtr("/path/on/disk/to/template.ctmpl"), + Destination: pointerutil.StringPtr("/path/on/disk/where/template/will/render.txt"), + }, + }, + }, + "full": { + fixturePath: "./test-fixtures/config-template-full.hcl", + expectedTemplates: []*ctconfig.TemplateConfig{ + { + Backup: pointerutil.BoolPtr(true), + Command: []string{"restart service foo"}, + CommandTimeout: pointerutil.TimeDurationPtr("60s"), + Contents: pointerutil.StringPtr("{{ keyOrDefault \"service/redis/maxconns@east-aws\" \"5\" }}"), + CreateDestDirs: pointerutil.BoolPtr(true), + Destination: pointerutil.StringPtr("/path/on/disk/where/template/will/render.txt"), + ErrMissingKey: pointerutil.BoolPtr(true), + LeftDelim: pointerutil.StringPtr("<<"), + Perms: pointerutil.FileModePtr(0o655), + RightDelim: pointerutil.StringPtr(">>"), + SandboxPath: pointerutil.StringPtr("/path/on/disk/where"), + Exec: &ctconfig.ExecConfig{ + Command: []string{"foo"}, + Timeout: pointerutil.TimeDurationPtr("10s"), + }, + + Wait: &ctconfig.WaitConfig{ + Min: pointerutil.TimeDurationPtr("10s"), + Max: pointerutil.TimeDurationPtr("40s"), + }, + }, + }, + }, + "many": { + fixturePath: "./test-fixtures/config-template-many.hcl", + expectedTemplates: []*ctconfig.TemplateConfig{ + { + Source: pointerutil.StringPtr("/path/on/disk/to/template.ctmpl"), + Destination: pointerutil.StringPtr("/path/on/disk/where/template/will/render.txt"), + ErrMissingKey: pointerutil.BoolPtr(false), + CreateDestDirs: pointerutil.BoolPtr(true), + Command: []string{"restart service foo"}, + Perms: pointerutil.FileModePtr(0o600), + }, + { + Source: pointerutil.StringPtr("/path/on/disk/to/template2.ctmpl"), + Destination: pointerutil.StringPtr("/path/on/disk/where/template/will/render2.txt"), + Backup: pointerutil.BoolPtr(true), + Perms: pointerutil.FileModePtr(0o755), + Wait: &ctconfig.WaitConfig{ + Min: pointerutil.TimeDurationPtr("2s"), + Max: pointerutil.TimeDurationPtr("10s"), + }, + }, + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + config, err := LoadConfigFile(tc.fixturePath) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + PidFile: "./pidfile", + }, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + Namespace: "my-namespace/", + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + Sinks: []*Sink{ + { + Type: "file", + DHType: "curve25519", + DHPath: "/tmp/file-foo-dhpath", + AAD: "foobar", + Config: map[string]interface{}{ + "path": "/tmp/file-foo", + }, + }, + }, + }, + Templates: tc.expectedTemplates, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } + }) + } +} + +// TestLoadConfigFile_Template_NoSinks tests template definitions without sinks in Vault Agent +func TestLoadConfigFile_Template_NoSinks(t *testing.T) { + testCases := map[string]struct { + fixturePath string + expectedTemplates []*ctconfig.TemplateConfig + }{ + "min": { + fixturePath: "./test-fixtures/config-template-min-nosink.hcl", + expectedTemplates: []*ctconfig.TemplateConfig{ + { + Source: pointerutil.StringPtr("/path/on/disk/to/template.ctmpl"), + Destination: pointerutil.StringPtr("/path/on/disk/where/template/will/render.txt"), + }, + }, + }, + "full": { + fixturePath: "./test-fixtures/config-template-full-nosink.hcl", + expectedTemplates: []*ctconfig.TemplateConfig{ + { + Backup: pointerutil.BoolPtr(true), + Command: []string{"restart service foo"}, + CommandTimeout: pointerutil.TimeDurationPtr("60s"), + Contents: pointerutil.StringPtr("{{ keyOrDefault \"service/redis/maxconns@east-aws\" \"5\" }}"), + CreateDestDirs: pointerutil.BoolPtr(true), + Destination: pointerutil.StringPtr("/path/on/disk/where/template/will/render.txt"), + ErrMissingKey: pointerutil.BoolPtr(true), + LeftDelim: pointerutil.StringPtr("<<"), + Perms: pointerutil.FileModePtr(0o655), + RightDelim: pointerutil.StringPtr(">>"), + SandboxPath: pointerutil.StringPtr("/path/on/disk/where"), + + Wait: &ctconfig.WaitConfig{ + Min: pointerutil.TimeDurationPtr("10s"), + Max: pointerutil.TimeDurationPtr("40s"), + }, + }, + }, + }, + "many": { + fixturePath: "./test-fixtures/config-template-many-nosink.hcl", + expectedTemplates: []*ctconfig.TemplateConfig{ + { + Source: pointerutil.StringPtr("/path/on/disk/to/template.ctmpl"), + Destination: pointerutil.StringPtr("/path/on/disk/where/template/will/render.txt"), + ErrMissingKey: pointerutil.BoolPtr(false), + CreateDestDirs: pointerutil.BoolPtr(true), + Command: []string{"restart service foo"}, + Perms: pointerutil.FileModePtr(0o600), + }, + { + Source: pointerutil.StringPtr("/path/on/disk/to/template2.ctmpl"), + Destination: pointerutil.StringPtr("/path/on/disk/where/template/will/render2.txt"), + Backup: pointerutil.BoolPtr(true), + Perms: pointerutil.FileModePtr(0o755), + Wait: &ctconfig.WaitConfig{ + Min: pointerutil.TimeDurationPtr("2s"), + Max: pointerutil.TimeDurationPtr("10s"), + }, + }, + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + config, err := LoadConfigFile(tc.fixturePath) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + PidFile: "./pidfile", + }, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + Namespace: "my-namespace/", + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + Sinks: nil, + }, + Templates: tc.expectedTemplates, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } + }) + } +} + +// TestLoadConfigFile_Template_WithCache tests ensures that cache {} stanza is +// permitted in vault agent configuration with template(s) +func TestLoadConfigFile_Template_WithCache(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config-template-with-cache.hcl") + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + PidFile: "./pidfile", + }, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + Namespace: "my-namespace/", + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + }, + Cache: &Cache{}, + Templates: []*ctconfig.TemplateConfig{ + { + Source: pointerutil.StringPtr("/path/on/disk/to/template.ctmpl"), + Destination: pointerutil.StringPtr("/path/on/disk/where/template/will/render.txt"), + }, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestLoadConfigFile_Vault_Retry(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config-vault-retry.hcl") + if err != nil { + t.Fatal(err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + PidFile: "./pidfile", + }, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + Namespace: "my-namespace/", + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + Sinks: []*Sink{ + { + Type: "file", + DHType: "curve25519", + DHPath: "/tmp/file-foo-dhpath", + AAD: "foobar", + Config: map[string]interface{}{ + "path": "/tmp/file-foo", + }, + }, + }, + }, + Vault: &Vault{ + Address: "http://127.0.0.1:1111", + Retry: &Retry{ + NumRetries: 5, + }, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestLoadConfigFile_Vault_Retry_Empty(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config-vault-retry-empty.hcl") + if err != nil { + t.Fatal(err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + PidFile: "./pidfile", + }, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + Namespace: "my-namespace/", + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + Sinks: []*Sink{ + { + Type: "file", + DHType: "curve25519", + DHPath: "/tmp/file-foo-dhpath", + AAD: "foobar", + Config: map[string]interface{}{ + "path": "/tmp/file-foo", + }, + }, + }, + }, + Vault: &Vault{ + Address: "http://127.0.0.1:1111", + Retry: &Retry{ + ctconfig.DefaultRetryAttempts, + }, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestLoadConfigFile_EnforceConsistency(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config-consistency.hcl") + if err != nil { + t.Fatal(err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + Listeners: []*configutil.Listener{ + { + Type: "tcp", + Address: "127.0.0.1:8300", + TLSDisable: true, + }, + }, + PidFile: "", + }, + Cache: &Cache{ + EnforceConsistency: "always", + WhenInconsistent: "retry", + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestLoadConfigFile_EnforceConsistency_APIProxy(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config-consistency-apiproxy.hcl") + if err != nil { + t.Fatal(err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + Listeners: []*configutil.Listener{ + { + Type: "tcp", + Address: "127.0.0.1:8300", + TLSDisable: true, + }, + }, + PidFile: "", + }, + APIProxy: &APIProxy{ + EnforceConsistency: "always", + WhenInconsistent: "retry", + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestLoadConfigFile_Disable_Idle_Conns_All(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config-disable-idle-connections-all.hcl") + if err != nil { + t.Fatal(err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + PidFile: "./pidfile", + }, + DisableIdleConns: []string{"auto-auth", "caching", "templating", "proxying"}, + DisableIdleConnsAPIProxy: true, + DisableIdleConnsAutoAuth: true, + DisableIdleConnsTemplating: true, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + Namespace: "my-namespace/", + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + Sinks: []*Sink{ + { + Type: "file", + DHType: "curve25519", + DHPath: "/tmp/file-foo-dhpath", + AAD: "foobar", + Config: map[string]interface{}{ + "path": "/tmp/file-foo", + }, + }, + }, + }, + Vault: &Vault{ + Address: "http://127.0.0.1:1111", + Retry: &Retry{ + ctconfig.DefaultRetryAttempts, + }, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestLoadConfigFile_Disable_Idle_Conns_Auto_Auth(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config-disable-idle-connections-auto-auth.hcl") + if err != nil { + t.Fatal(err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + PidFile: "./pidfile", + }, + DisableIdleConns: []string{"auto-auth"}, + DisableIdleConnsAPIProxy: false, + DisableIdleConnsAutoAuth: true, + DisableIdleConnsTemplating: false, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + Namespace: "my-namespace/", + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + Sinks: []*Sink{ + { + Type: "file", + DHType: "curve25519", + DHPath: "/tmp/file-foo-dhpath", + AAD: "foobar", + Config: map[string]interface{}{ + "path": "/tmp/file-foo", + }, + }, + }, + }, + Vault: &Vault{ + Address: "http://127.0.0.1:1111", + Retry: &Retry{ + ctconfig.DefaultRetryAttempts, + }, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestLoadConfigFile_Disable_Idle_Conns_Templating(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config-disable-idle-connections-templating.hcl") + if err != nil { + t.Fatal(err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + PidFile: "./pidfile", + }, + DisableIdleConns: []string{"templating"}, + DisableIdleConnsAPIProxy: false, + DisableIdleConnsAutoAuth: false, + DisableIdleConnsTemplating: true, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + Namespace: "my-namespace/", + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + Sinks: []*Sink{ + { + Type: "file", + DHType: "curve25519", + DHPath: "/tmp/file-foo-dhpath", + AAD: "foobar", + Config: map[string]interface{}{ + "path": "/tmp/file-foo", + }, + }, + }, + }, + Vault: &Vault{ + Address: "http://127.0.0.1:1111", + Retry: &Retry{ + ctconfig.DefaultRetryAttempts, + }, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestLoadConfigFile_Disable_Idle_Conns_Caching(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config-disable-idle-connections-caching.hcl") + if err != nil { + t.Fatal(err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + PidFile: "./pidfile", + }, + DisableIdleConns: []string{"caching"}, + DisableIdleConnsAPIProxy: true, + DisableIdleConnsAutoAuth: false, + DisableIdleConnsTemplating: false, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + Namespace: "my-namespace/", + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + Sinks: []*Sink{ + { + Type: "file", + DHType: "curve25519", + DHPath: "/tmp/file-foo-dhpath", + AAD: "foobar", + Config: map[string]interface{}{ + "path": "/tmp/file-foo", + }, + }, + }, + }, + Vault: &Vault{ + Address: "http://127.0.0.1:1111", + Retry: &Retry{ + ctconfig.DefaultRetryAttempts, + }, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestLoadConfigFile_Disable_Idle_Conns_Proxying(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config-disable-idle-connections-proxying.hcl") + if err != nil { + t.Fatal(err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + PidFile: "./pidfile", + }, + DisableIdleConns: []string{"proxying"}, + DisableIdleConnsAPIProxy: true, + DisableIdleConnsAutoAuth: false, + DisableIdleConnsTemplating: false, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + Namespace: "my-namespace/", + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + Sinks: []*Sink{ + { + Type: "file", + DHType: "curve25519", + DHPath: "/tmp/file-foo-dhpath", + AAD: "foobar", + Config: map[string]interface{}{ + "path": "/tmp/file-foo", + }, + }, + }, + }, + Vault: &Vault{ + Address: "http://127.0.0.1:1111", + Retry: &Retry{ + ctconfig.DefaultRetryAttempts, + }, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestLoadConfigFile_Disable_Idle_Conns_Empty(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config-disable-idle-connections-empty.hcl") + if err != nil { + t.Fatal(err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + PidFile: "./pidfile", + }, + DisableIdleConns: []string{}, + DisableIdleConnsAPIProxy: false, + DisableIdleConnsAutoAuth: false, + DisableIdleConnsTemplating: false, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + Namespace: "my-namespace/", + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + Sinks: []*Sink{ + { + Type: "file", + DHType: "curve25519", + DHPath: "/tmp/file-foo-dhpath", + AAD: "foobar", + Config: map[string]interface{}{ + "path": "/tmp/file-foo", + }, + }, + }, + }, + Vault: &Vault{ + Address: "http://127.0.0.1:1111", + Retry: &Retry{ + ctconfig.DefaultRetryAttempts, + }, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestLoadConfigFile_Disable_Idle_Conns_Env(t *testing.T) { + err := os.Setenv(DisableIdleConnsEnv, "auto-auth,caching,templating") + defer os.Unsetenv(DisableIdleConnsEnv) + + if err != nil { + t.Fatal(err) + } + config, err := LoadConfigFile("./test-fixtures/config-disable-idle-connections-empty.hcl") + if err != nil { + t.Fatal(err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + PidFile: "./pidfile", + }, + DisableIdleConns: []string{"auto-auth", "caching", "templating"}, + DisableIdleConnsAPIProxy: true, + DisableIdleConnsAutoAuth: true, + DisableIdleConnsTemplating: true, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + Namespace: "my-namespace/", + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + Sinks: []*Sink{ + { + Type: "file", + DHType: "curve25519", + DHPath: "/tmp/file-foo-dhpath", + AAD: "foobar", + Config: map[string]interface{}{ + "path": "/tmp/file-foo", + }, + }, + }, + }, + Vault: &Vault{ + Address: "http://127.0.0.1:1111", + Retry: &Retry{ + ctconfig.DefaultRetryAttempts, + }, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestLoadConfigFile_Bad_Value_Disable_Idle_Conns(t *testing.T) { + _, err := LoadConfigFile("./test-fixtures/bad-config-disable-idle-connections.hcl") + if err == nil { + t.Fatal("should have error, it didn't") + } +} + +func TestLoadConfigFile_Disable_Keep_Alives_All(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config-disable-keep-alives-all.hcl") + if err != nil { + t.Fatal(err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + PidFile: "./pidfile", + }, + DisableKeepAlives: []string{"auto-auth", "caching", "templating", "proxying"}, + DisableKeepAlivesAPIProxy: true, + DisableKeepAlivesAutoAuth: true, + DisableKeepAlivesTemplating: true, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + Namespace: "my-namespace/", + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + Sinks: []*Sink{ + { + Type: "file", + DHType: "curve25519", + DHPath: "/tmp/file-foo-dhpath", + AAD: "foobar", + Config: map[string]interface{}{ + "path": "/tmp/file-foo", + }, + }, + }, + }, + Vault: &Vault{ + Address: "http://127.0.0.1:1111", + Retry: &Retry{ + ctconfig.DefaultRetryAttempts, + }, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestLoadConfigFile_Disable_Keep_Alives_Auto_Auth(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config-disable-keep-alives-auto-auth.hcl") + if err != nil { + t.Fatal(err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + PidFile: "./pidfile", + }, + DisableKeepAlives: []string{"auto-auth"}, + DisableKeepAlivesAPIProxy: false, + DisableKeepAlivesAutoAuth: true, + DisableKeepAlivesTemplating: false, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + Namespace: "my-namespace/", + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + Sinks: []*Sink{ + { + Type: "file", + DHType: "curve25519", + DHPath: "/tmp/file-foo-dhpath", + AAD: "foobar", + Config: map[string]interface{}{ + "path": "/tmp/file-foo", + }, + }, + }, + }, + Vault: &Vault{ + Address: "http://127.0.0.1:1111", + Retry: &Retry{ + ctconfig.DefaultRetryAttempts, + }, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestLoadConfigFile_Disable_Keep_Alives_Templating(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config-disable-keep-alives-templating.hcl") + if err != nil { + t.Fatal(err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + PidFile: "./pidfile", + }, + DisableKeepAlives: []string{"templating"}, + DisableKeepAlivesAPIProxy: false, + DisableKeepAlivesAutoAuth: false, + DisableKeepAlivesTemplating: true, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + Namespace: "my-namespace/", + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + Sinks: []*Sink{ + { + Type: "file", + DHType: "curve25519", + DHPath: "/tmp/file-foo-dhpath", + AAD: "foobar", + Config: map[string]interface{}{ + "path": "/tmp/file-foo", + }, + }, + }, + }, + Vault: &Vault{ + Address: "http://127.0.0.1:1111", + Retry: &Retry{ + ctconfig.DefaultRetryAttempts, + }, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestLoadConfigFile_Disable_Keep_Alives_Caching(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config-disable-keep-alives-caching.hcl") + if err != nil { + t.Fatal(err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + PidFile: "./pidfile", + }, + DisableKeepAlives: []string{"caching"}, + DisableKeepAlivesAPIProxy: true, + DisableKeepAlivesAutoAuth: false, + DisableKeepAlivesTemplating: false, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + Namespace: "my-namespace/", + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + Sinks: []*Sink{ + { + Type: "file", + DHType: "curve25519", + DHPath: "/tmp/file-foo-dhpath", + AAD: "foobar", + Config: map[string]interface{}{ + "path": "/tmp/file-foo", + }, + }, + }, + }, + Vault: &Vault{ + Address: "http://127.0.0.1:1111", + Retry: &Retry{ + ctconfig.DefaultRetryAttempts, + }, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestLoadConfigFile_Disable_Keep_Alives_Proxying(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config-disable-keep-alives-proxying.hcl") + if err != nil { + t.Fatal(err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + PidFile: "./pidfile", + }, + DisableKeepAlives: []string{"proxying"}, + DisableKeepAlivesAPIProxy: true, + DisableKeepAlivesAutoAuth: false, + DisableKeepAlivesTemplating: false, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + Namespace: "my-namespace/", + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + Sinks: []*Sink{ + { + Type: "file", + DHType: "curve25519", + DHPath: "/tmp/file-foo-dhpath", + AAD: "foobar", + Config: map[string]interface{}{ + "path": "/tmp/file-foo", + }, + }, + }, + }, + Vault: &Vault{ + Address: "http://127.0.0.1:1111", + Retry: &Retry{ + ctconfig.DefaultRetryAttempts, + }, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestLoadConfigFile_Disable_Keep_Alives_Empty(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config-disable-keep-alives-empty.hcl") + if err != nil { + t.Fatal(err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + PidFile: "./pidfile", + }, + DisableKeepAlives: []string{}, + DisableKeepAlivesAPIProxy: false, + DisableKeepAlivesAutoAuth: false, + DisableKeepAlivesTemplating: false, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + Namespace: "my-namespace/", + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + Sinks: []*Sink{ + { + Type: "file", + DHType: "curve25519", + DHPath: "/tmp/file-foo-dhpath", + AAD: "foobar", + Config: map[string]interface{}{ + "path": "/tmp/file-foo", + }, + }, + }, + }, + Vault: &Vault{ + Address: "http://127.0.0.1:1111", + Retry: &Retry{ + ctconfig.DefaultRetryAttempts, + }, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestLoadConfigFile_Disable_Keep_Alives_Env(t *testing.T) { + err := os.Setenv(DisableKeepAlivesEnv, "auto-auth,caching,templating") + defer os.Unsetenv(DisableKeepAlivesEnv) + + if err != nil { + t.Fatal(err) + } + config, err := LoadConfigFile("./test-fixtures/config-disable-keep-alives-empty.hcl") + if err != nil { + t.Fatal(err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + PidFile: "./pidfile", + }, + DisableKeepAlives: []string{"auto-auth", "caching", "templating"}, + DisableKeepAlivesAPIProxy: true, + DisableKeepAlivesAutoAuth: true, + DisableKeepAlivesTemplating: true, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + Namespace: "my-namespace/", + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + Sinks: []*Sink{ + { + Type: "file", + DHType: "curve25519", + DHPath: "/tmp/file-foo-dhpath", + AAD: "foobar", + Config: map[string]interface{}{ + "path": "/tmp/file-foo", + }, + }, + }, + }, + Vault: &Vault{ + Address: "http://127.0.0.1:1111", + Retry: &Retry{ + ctconfig.DefaultRetryAttempts, + }, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestLoadConfigFile_Bad_Value_Disable_Keep_Alives(t *testing.T) { + _, err := LoadConfigFile("./test-fixtures/bad-config-disable-keep-alives.hcl") + if err == nil { + t.Fatal("should have error, it didn't") + } +} + +// TestLoadConfigFile_EnvTemplates_Simple loads and validates an env_template config +func TestLoadConfigFile_EnvTemplates_Simple(t *testing.T) { + cfg, err := LoadConfigFile("./test-fixtures/config-env-templates-simple.hcl") + if err != nil { + t.Fatalf("error loading config file: %s", err) + } + + if err := cfg.ValidateConfig(); err != nil { + t.Fatalf("validation error: %s", err) + } + + expectedKey := "MY_DATABASE_USER" + found := false + for _, envTemplate := range cfg.EnvTemplates { + if *envTemplate.MapToEnvironmentVariable == expectedKey { + found = true + } + } + if !found { + t.Fatalf("expected environment variable name to be populated") + } +} + +// TestLoadConfigFile_EnvTemplates_Complex loads and validates an env_template config +func TestLoadConfigFile_EnvTemplates_Complex(t *testing.T) { + cfg, err := LoadConfigFile("./test-fixtures/config-env-templates-complex.hcl") + if err != nil { + t.Fatalf("error loading config file: %s", err) + } + + if err := cfg.ValidateConfig(); err != nil { + t.Fatalf("validation error: %s", err) + } + + expectedKeys := []string{ + "FOO_PASSWORD", + "FOO_USER", + } + + envExists := func(key string) bool { + for _, envTmpl := range cfg.EnvTemplates { + if *envTmpl.MapToEnvironmentVariable == key { + return true + } + } + return false + } + + for _, expected := range expectedKeys { + if !envExists(expected) { + t.Fatalf("expected environment variable %s", expected) + } + } +} + +// TestLoadConfigFile_EnvTemplates_WithSource loads and validates an +// env_template config with "source" instead of "contents" +func TestLoadConfigFile_EnvTemplates_WithSource(t *testing.T) { + cfg, err := LoadConfigFile("./test-fixtures/config-env-templates-with-source.hcl") + if err != nil { + t.Fatalf("error loading config file: %s", err) + } + + if err := cfg.ValidateConfig(); err != nil { + t.Fatalf("validation error: %s", err) + } +} + +// TestLoadConfigFile_EnvTemplates_NoName ensures that env_template with no name triggers an error +func TestLoadConfigFile_EnvTemplates_NoName(t *testing.T) { + _, err := LoadConfigFile("./test-fixtures/bad-config-env-templates-no-name.hcl") + if err == nil { + t.Fatalf("expected error") + } +} + +// TestLoadConfigFile_EnvTemplates_ExecInvalidSignal ensures that an invalid signal triggers an error +func TestLoadConfigFile_EnvTemplates_ExecInvalidSignal(t *testing.T) { + _, err := LoadConfigFile("./test-fixtures/bad-config-env-templates-invalid-signal.hcl") + if err == nil { + t.Fatalf("expected error") + } +} + +// TestLoadConfigFile_EnvTemplates_ExecSimple validates the exec section with default parameters +func TestLoadConfigFile_EnvTemplates_ExecSimple(t *testing.T) { + cfg, err := LoadConfigFile("./test-fixtures/config-env-templates-simple.hcl") + if err != nil { + t.Fatalf("error loading config file: %s", err) + } + + if err := cfg.ValidateConfig(); err != nil { + t.Fatalf("validation error: %s", err) + } + + expectedCmd := []string{"/path/to/my/app", "arg1", "arg2"} + if !slices.Equal(cfg.Exec.Command, expectedCmd) { + t.Fatal("exec.command does not have expected value") + } + + // check defaults + if cfg.Exec.RestartOnSecretChanges != "always" { + t.Fatalf("expected cfg.Exec.RestartOnSecretChanges to be 'always', got '%s'", cfg.Exec.RestartOnSecretChanges) + } + + if cfg.Exec.RestartStopSignal != syscall.SIGTERM { + t.Fatalf("expected cfg.Exec.RestartStopSignal to be 'syscall.SIGTERM', got '%s'", cfg.Exec.RestartStopSignal) + } +} + +// TestLoadConfigFile_EnvTemplates_ExecComplex validates the exec section with non-default parameters +func TestLoadConfigFile_EnvTemplates_ExecComplex(t *testing.T) { + cfg, err := LoadConfigFile("./test-fixtures/config-env-templates-complex.hcl") + if err != nil { + t.Fatalf("error loading config file: %s", err) + } + + if err := cfg.ValidateConfig(); err != nil { + t.Fatalf("validation error: %s", err) + } + + if !slices.Equal(cfg.Exec.Command, []string{"env"}) { + t.Fatal("exec.command does not have expected value") + } + + if cfg.Exec.RestartOnSecretChanges != "never" { + t.Fatalf("expected cfg.Exec.RestartOnSecretChanges to be 'never', got %q", cfg.Exec.RestartOnSecretChanges) + } + + if cfg.Exec.RestartStopSignal != syscall.SIGINT { + t.Fatalf("expected cfg.Exec.RestartStopSignal to be 'syscall.SIGINT', got %q", cfg.Exec.RestartStopSignal) + } +} + +// TestLoadConfigFile_Bad_EnvTemplates_MissingExec ensures that ValidateConfig +// errors when "env_template" stanza(s) are specified but "exec" is missing +func TestLoadConfigFile_Bad_EnvTemplates_MissingExec(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/bad-config-env-templates-missing-exec.hcl") + if err != nil { + t.Fatalf("error loading config file: %s", err) + } + + if err := config.ValidateConfig(); err == nil { + t.Fatal("expected an error from ValidateConfig: exec section is missing") + } +} + +// TestLoadConfigFile_Bad_EnvTemplates_WithProxy ensures that ValidateConfig +// errors when both env_template and api_proxy stanzas are present +func TestLoadConfigFile_Bad_EnvTemplates_WithProxy(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/bad-config-env-templates-with-proxy.hcl") + if err != nil { + t.Fatalf("error loading config file: %s", err) + } + + if err := config.ValidateConfig(); err == nil { + t.Fatal("expected an error from ValidateConfig: listener / api_proxy are not compatible with env_template") + } +} + +// TestLoadConfigFile_Bad_EnvTemplates_WithFileTemplates ensures that +// ValidateConfig errors when both env_template and template stanzas are present +func TestLoadConfigFile_Bad_EnvTemplates_WithFileTemplates(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/bad-config-env-templates-with-file-templates.hcl") + if err != nil { + t.Fatalf("error loading config file: %s", err) + } + + if err := config.ValidateConfig(); err == nil { + t.Fatal("expected an error from ValidateConfig: file template stanza is not compatible with env_template") + } +} + +// TestLoadConfigFile_Bad_EnvTemplates_DisalowedFields ensure that +// ValidateConfig errors for disalowed env_template fields +func TestLoadConfigFile_Bad_EnvTemplates_DisalowedFields(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/bad-config-env-templates-disalowed-fields.hcl") + if err != nil { + t.Fatalf("error loading config file: %s", err) + } + + if err := config.ValidateConfig(); err == nil { + t.Fatal("expected an error from ValidateConfig: disallowed fields specified in env_template") + } +} diff --git a/command/agent/config/test-fixtures/bad-config-api_proxy-cache.hcl b/command/agent/config/test-fixtures/bad-config-api_proxy-cache.hcl new file mode 100644 index 0000000..7d2bf5c --- /dev/null +++ b/command/agent/config/test-fixtures/bad-config-api_proxy-cache.hcl @@ -0,0 +1,26 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" + +auto_auth { + method { + type = "aws" + config = { + role = "foobar" + } + } +} + +cache { + use_auto_auth_token = true +} + +api_proxy { + use_auto_auth_token = "force" +} + +listener "tcp" { + address = "127.0.0.1:8300" + tls_disable = true +} diff --git a/command/agent/config/test-fixtures/bad-config-auto_auth-nosinks-nocache-notemplates.hcl b/command/agent/config/test-fixtures/bad-config-auto_auth-nosinks-nocache-notemplates.hcl new file mode 100644 index 0000000..d3d5d42 --- /dev/null +++ b/command/agent/config/test-fixtures/bad-config-auto_auth-nosinks-nocache-notemplates.hcl @@ -0,0 +1,13 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" + +auto_auth { + method "aws" { + mount_path = "auth/aws" + config = { + role = "foobar" + } + } +} diff --git a/command/agent/config/test-fixtures/bad-config-auto_auth-wrapped-multiple-sinks.hcl b/command/agent/config/test-fixtures/bad-config-auto_auth-wrapped-multiple-sinks.hcl new file mode 100644 index 0000000..5c2b3fb --- /dev/null +++ b/command/agent/config/test-fixtures/bad-config-auto_auth-wrapped-multiple-sinks.hcl @@ -0,0 +1,26 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" + +auto_auth { + method "aws" { + mount_path = "auth/aws" + wrap_ttl = 300 + config = { + role = "foobar" + } + } + + sink "file" { + config = { + path = "/tmp/file-foo" + } + } + + sink "file" { + config = { + path = "/tmp/file-bar" + } + } +} diff --git a/command/agent/config/test-fixtures/bad-config-cache-auto_auth-method-wrapping.hcl b/command/agent/config/test-fixtures/bad-config-cache-auto_auth-method-wrapping.hcl new file mode 100644 index 0000000..8a39837 --- /dev/null +++ b/command/agent/config/test-fixtures/bad-config-cache-auto_auth-method-wrapping.hcl @@ -0,0 +1,32 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" + +auto_auth { + method { + type = "aws" + wrap_ttl = 300 + config = { + role = "foobar" + } + } + + sink { + type = "file" + config = { + path = "/tmp/file-foo" + } + } +} + +cache { + use_auto_auth_token = true +} + +listener "tcp" { + address = "127.0.0.1:8300" + tls_disable = true +} + + diff --git a/command/agent/config/test-fixtures/bad-config-cache-force-token-no-auth-method.hcl b/command/agent/config/test-fixtures/bad-config-cache-force-token-no-auth-method.hcl new file mode 100644 index 0000000..d1cae75 --- /dev/null +++ b/command/agent/config/test-fixtures/bad-config-cache-force-token-no-auth-method.hcl @@ -0,0 +1,13 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" + +cache { + use_auto_auth_token = "force" +} + +listener "tcp" { + address = "127.0.0.1:8300" + tls_disable = true +} diff --git a/command/agent/config/test-fixtures/bad-config-cache-inconsistent-auto_auth.hcl b/command/agent/config/test-fixtures/bad-config-cache-inconsistent-auto_auth.hcl new file mode 100644 index 0000000..38b9c2c --- /dev/null +++ b/command/agent/config/test-fixtures/bad-config-cache-inconsistent-auto_auth.hcl @@ -0,0 +1,15 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" + +cache { + use_auto_auth_token = true +} + +listener "tcp" { + address = "127.0.0.1:8300" + tls_disable = true +} + + diff --git a/command/agent/config/test-fixtures/bad-config-cache-no-listeners.hcl b/command/agent/config/test-fixtures/bad-config-cache-no-listeners.hcl new file mode 100644 index 0000000..9112183 --- /dev/null +++ b/command/agent/config/test-fixtures/bad-config-cache-no-listeners.hcl @@ -0,0 +1,8 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" + +cache { +} + diff --git a/command/agent/config/test-fixtures/bad-config-disable-idle-connections.hcl b/command/agent/config/test-fixtures/bad-config-disable-idle-connections.hcl new file mode 100644 index 0000000..34c292e --- /dev/null +++ b/command/agent/config/test-fixtures/bad-config-disable-idle-connections.hcl @@ -0,0 +1,30 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" +disable_idle_connections = ["foo","caching","templating"] + +auto_auth { + method { + type = "aws" + namespace = "my-namespace/" + + config = { + role = "foobar" + } + } + + sink { + type = "file" + config = { + path = "/tmp/file-foo" + } + aad = "foobar" + dh_type = "curve25519" + dh_path = "/tmp/file-foo-dhpath" + } +} + +vault { + address = "http://127.0.0.1:1111" +} diff --git a/command/agent/config/test-fixtures/bad-config-disable-keep-alives.hcl b/command/agent/config/test-fixtures/bad-config-disable-keep-alives.hcl new file mode 100644 index 0000000..087e2ff --- /dev/null +++ b/command/agent/config/test-fixtures/bad-config-disable-keep-alives.hcl @@ -0,0 +1,30 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" +disable_keep_alives = ["foo","caching","templating"] + +auto_auth { + method { + type = "aws" + namespace = "my-namespace/" + + config = { + role = "foobar" + } + } + + sink { + type = "file" + config = { + path = "/tmp/file-foo" + } + aad = "foobar" + dh_type = "curve25519" + dh_path = "/tmp/file-foo-dhpath" + } +} + +vault { + address = "http://127.0.0.1:1111" +} diff --git a/command/agent/config/test-fixtures/bad-config-env-templates-disalowed-fields.hcl b/command/agent/config/test-fixtures/bad-config-env-templates-disalowed-fields.hcl new file mode 100644 index 0000000..22ad96c --- /dev/null +++ b/command/agent/config/test-fixtures/bad-config-env-templates-disalowed-fields.hcl @@ -0,0 +1,33 @@ +auto_auth { + + method { + type = "token_file" + + config { + token_file_path = "/Users/avean/.vault-token" + } + } +} + +template_config { + static_secret_render_interval = "5m" + exit_on_retry_failure = true +} + +vault { + address = "http://localhost:8200" +} + +env_template "FOO_PASSWORD" { + contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.password }}{{ end }}" + + # Error: destination and create_dest_dirs are not allowed in env_template + destination = "/path/on/disk/where/template/will/render.txt" + create_dest_dirs = true +} + +exec { + command = ["./my-app", "arg1", "arg2"] + restart_on_secret_changes = "always" + restart_stop_signal = "SIGTERM" +} diff --git a/command/agent/config/test-fixtures/bad-config-env-templates-invalid-signal.hcl b/command/agent/config/test-fixtures/bad-config-env-templates-invalid-signal.hcl new file mode 100644 index 0000000..e8d822d --- /dev/null +++ b/command/agent/config/test-fixtures/bad-config-env-templates-invalid-signal.hcl @@ -0,0 +1,26 @@ +auto_auth { + + method { + type = "token_file" + + config { + token_file_path = "/home/username/.vault-token" + } + } +} + +vault { + address = "http://localhost:8200" +} + +env_template "FOO" { + contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.lock }}{{ end }}" + error_on_missing_key = false +} + + +exec { + command = ["env"] + restart_on_secret_changes = "never" + restart_stop_signal = "notasignal" +} diff --git a/command/agent/config/test-fixtures/bad-config-env-templates-missing-exec.hcl b/command/agent/config/test-fixtures/bad-config-env-templates-missing-exec.hcl new file mode 100644 index 0000000..6283e56 --- /dev/null +++ b/command/agent/config/test-fixtures/bad-config-env-templates-missing-exec.hcl @@ -0,0 +1,30 @@ +auto_auth { + + method { + type = "token_file" + + config { + token_file_path = "/Users/avean/.vault-token" + } + } +} + +template_config { + static_secret_render_interval = "5m" + exit_on_retry_failure = true +} + +vault { + address = "http://localhost:8200" +} + +env_template "FOO_PASSWORD" { + contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.password }}{{ end }}" + error_on_missing_key = false +} +env_template "FOO_USER" { + contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.user }}{{ end }}" + error_on_missing_key = false +} + +# Error: missing a required "exec" section! diff --git a/command/agent/config/test-fixtures/bad-config-env-templates-no-name.hcl b/command/agent/config/test-fixtures/bad-config-env-templates-no-name.hcl new file mode 100644 index 0000000..f77f20c --- /dev/null +++ b/command/agent/config/test-fixtures/bad-config-env-templates-no-name.hcl @@ -0,0 +1,26 @@ +auto_auth { + + method { + type = "token_file" + + config { + token_file_path = "/home/username/.vault-token" + } + } +} + +vault { + address = "http://localhost:8200" +} + +env_template { + contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.lock }}{{ end }}" + error_on_missing_key = false +} + + +exec { + command = ["env"] + restart_on_secret_changes = "never" + restart_stop_signal = "SIGTERM" +} diff --git a/command/agent/config/test-fixtures/bad-config-env-templates-with-file-templates.hcl b/command/agent/config/test-fixtures/bad-config-env-templates-with-file-templates.hcl new file mode 100644 index 0000000..811b10d --- /dev/null +++ b/command/agent/config/test-fixtures/bad-config-env-templates-with-file-templates.hcl @@ -0,0 +1,40 @@ +auto_auth { + + method { + type = "token_file" + + config { + token_file_path = "/Users/avean/.vault-token" + } + } +} + +template_config { + static_secret_render_interval = "5m" + exit_on_retry_failure = true +} + +vault { + address = "http://localhost:8200" +} + +# Error: template is incompatible with env_template! +template { + source = "/path/on/disk/to/template.ctmpl" + destination = "/path/on/disk/where/template/will/render.txt" +} + +env_template "FOO_PASSWORD" { + contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.password }}{{ end }}" + error_on_missing_key = false +} +env_template "FOO_USER" { + contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.user }}{{ end }}" + error_on_missing_key = false +} + +exec { + command = ["./my-app", "arg1", "arg2"] + restart_on_secret_changes = "always" + restart_stop_signal = "SIGTERM" +} diff --git a/command/agent/config/test-fixtures/bad-config-env-templates-with-proxy.hcl b/command/agent/config/test-fixtures/bad-config-env-templates-with-proxy.hcl new file mode 100644 index 0000000..3c6095d --- /dev/null +++ b/command/agent/config/test-fixtures/bad-config-env-templates-with-proxy.hcl @@ -0,0 +1,47 @@ +auto_auth { + + method { + type = "token_file" + + config { + token_file_path = "/Users/avean/.vault-token" + } + } +} + +template_config { + static_secret_render_interval = "5m" + exit_on_retry_failure = true +} + +vault { + address = "http://localhost:8200" +} + +env_template "FOO_PASSWORD" { + contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.password }}{{ end }}" + error_on_missing_key = false +} +env_template "FOO_USER" { + contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.user }}{{ end }}" + error_on_missing_key = false +} + +exec { + command = ["./my-app", "arg1", "arg2"] + restart_on_secret_changes = "always" + restart_stop_signal = "SIGTERM" +} + +# Error: api_proxy is incompatible with env_template +api_proxy { + use_auto_auth_token = "force" + enforce_consistency = "always" + when_inconsistent = "forward" +} + +# Error: listener is incompatible with env_template +listener "tcp" { + address = "127.0.0.1:8300" + tls_disable = true +} diff --git a/command/agent/config/test-fixtures/bad-config-method-wrapping-and-sink-wrapping.hcl b/command/agent/config/test-fixtures/bad-config-method-wrapping-and-sink-wrapping.hcl new file mode 100644 index 0000000..cb9696d --- /dev/null +++ b/command/agent/config/test-fixtures/bad-config-method-wrapping-and-sink-wrapping.hcl @@ -0,0 +1,22 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" + +auto_auth { + method { + type = "aws" + wrap_ttl = 300 + config = { + role = "foobar" + } + } + + sink { + type = "file" + wrap_ttl = 300 + config = { + path = "/tmp/file-foo" + } + } +} diff --git a/command/agent/config/test-fixtures/config-api_proxy-auto_auth-all-api_proxy-config.hcl b/command/agent/config/test-fixtures/config-api_proxy-auto_auth-all-api_proxy-config.hcl new file mode 100644 index 0000000..a3e4e5b --- /dev/null +++ b/command/agent/config/test-fixtures/config-api_proxy-auto_auth-all-api_proxy-config.hcl @@ -0,0 +1,24 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" + +auto_auth { + method { + type = "aws" + config = { + role = "foobar" + } + } +} + +api_proxy { + use_auto_auth_token = "force" + enforce_consistency = "always" + when_inconsistent = "forward" +} + +listener "tcp" { + address = "127.0.0.1:8300" + tls_disable = true +} diff --git a/command/agent/config/test-fixtures/config-cache-auto_auth-false.hcl b/command/agent/config/test-fixtures/config-cache-auto_auth-false.hcl new file mode 100644 index 0000000..252216e --- /dev/null +++ b/command/agent/config/test-fixtures/config-cache-auto_auth-false.hcl @@ -0,0 +1,33 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" + +auto_auth { + method { + type = "aws" + config = { + role = "foobar" + } + } + + sink { + type = "file" + config = { + path = "/tmp/file-foo" + } + aad = "foobar" + dh_type = "curve25519" + dh_path = "/tmp/file-foo-dhpath" + } +} + +cache { + use_auto_auth_token = "false" +} + +listener "tcp" { + address = "127.0.0.1:8300" + tls_disable = true +} + diff --git a/command/agent/config/test-fixtures/config-cache-auto_auth-force.hcl b/command/agent/config/test-fixtures/config-cache-auto_auth-force.hcl new file mode 100644 index 0000000..4296455 --- /dev/null +++ b/command/agent/config/test-fixtures/config-cache-auto_auth-force.hcl @@ -0,0 +1,23 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" + +auto_auth { + method { + type = "aws" + config = { + role = "foobar" + } + } +} + +cache { + use_auto_auth_token = "force" +} + +listener "tcp" { + address = "127.0.0.1:8300" + tls_disable = true +} + diff --git a/command/agent/config/test-fixtures/config-cache-auto_auth-no-sink.hcl b/command/agent/config/test-fixtures/config-cache-auto_auth-no-sink.hcl new file mode 100644 index 0000000..80486b3 --- /dev/null +++ b/command/agent/config/test-fixtures/config-cache-auto_auth-no-sink.hcl @@ -0,0 +1,23 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" + +auto_auth { + method { + type = "aws" + config = { + role = "foobar" + } + } +} + +cache { + use_auto_auth_token = true +} + +listener "tcp" { + address = "127.0.0.1:8300" + tls_disable = true +} + diff --git a/command/agent/config/test-fixtures/config-cache-auto_auth-true.hcl b/command/agent/config/test-fixtures/config-cache-auto_auth-true.hcl new file mode 100644 index 0000000..cebcdfb --- /dev/null +++ b/command/agent/config/test-fixtures/config-cache-auto_auth-true.hcl @@ -0,0 +1,23 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" + +auto_auth { + method { + type = "aws" + config = { + role = "foobar" + } + } +} + +cache { + use_auto_auth_token = "true" + force_auto_auth_token = false +} + +listener "tcp" { + address = "127.0.0.1:8300" + tls_disable = true +} diff --git a/command/agent/config/test-fixtures/config-cache-embedded-type.hcl b/command/agent/config/test-fixtures/config-cache-embedded-type.hcl new file mode 100644 index 0000000..4ea5257 --- /dev/null +++ b/command/agent/config/test-fixtures/config-cache-embedded-type.hcl @@ -0,0 +1,72 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" + +auto_auth { + method { + type = "aws" + config = { + role = "foobar" + } + } + + sink { + type = "file" + config = { + path = "/tmp/file-foo" + } + aad = "foobar" + dh_type = "curve25519" + dh_path = "/tmp/file-foo-dhpath" + } +} + +cache { + use_auto_auth_token = true + persist "kubernetes" { + path = "/vault/agent-cache/" + keep_after_import = true + exit_on_err = true + service_account_token_file = "/tmp/serviceaccount/token" + } +} + +listener { + type = "unix" + address = "/path/to/socket" + tls_disable = true + socket_mode = "configmode" + socket_user = "configuser" + socket_group = "configgroup" +} + +listener { + type = "tcp" + address = "127.0.0.1:8300" + tls_disable = true +} + +listener { + type = "tcp" + address = "127.0.0.1:3000" + tls_disable = true + role = "metrics_only" +} + +listener { + type = "tcp" + role = "default" + address = "127.0.0.1:8400" + tls_key_file = "/path/to/cakey.pem" + tls_cert_file = "/path/to/cacert.pem" +} + +vault { + address = "http://127.0.0.1:1111" + ca_cert = "config_ca_cert" + ca_path = "config_ca_path" + tls_skip_verify = true + client_cert = "config_client_cert" + client_key = "config_client_key" +} diff --git a/command/agent/config/test-fixtures/config-cache-no-auto_auth.hcl b/command/agent/config/test-fixtures/config-cache-no-auto_auth.hcl new file mode 100644 index 0000000..45c7141 --- /dev/null +++ b/command/agent/config/test-fixtures/config-cache-no-auto_auth.hcl @@ -0,0 +1,14 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" + +cache { +} + +listener "tcp" { + address = "127.0.0.1:8300" + tls_disable = true +} + + diff --git a/command/agent/config/test-fixtures/config-cache-no-listeners.hcl b/command/agent/config/test-fixtures/config-cache-no-listeners.hcl new file mode 100644 index 0000000..3e0abfb --- /dev/null +++ b/command/agent/config/test-fixtures/config-cache-no-listeners.hcl @@ -0,0 +1,48 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" + +auto_auth { + method { + type = "aws" + config = { + role = "foobar" + } + } + + sink { + type = "file" + config = { + path = "/tmp/file-foo" + } + aad = "foobar" + dh_type = "curve25519" + dh_path = "/tmp/file-foo-dhpath" + } +} + +cache { + use_auto_auth_token = true + persist = { + type = "kubernetes" + path = "/vault/agent-cache/" + keep_after_import = true + exit_on_err = true + service_account_token_file = "/tmp/serviceaccount/token" + } +} + +vault { + address = "http://127.0.0.1:1111" + ca_cert = "config_ca_cert" + ca_path = "config_ca_path" + tls_skip_verify = "true" + client_cert = "config_client_cert" + client_key = "config_client_key" +} + +template { + source = "/path/on/disk/to/template.ctmpl" + destination = "/path/on/disk/where/template/will/render.txt" +} diff --git a/command/agent/config/test-fixtures/config-cache-persist-empty-type.hcl b/command/agent/config/test-fixtures/config-cache-persist-empty-type.hcl new file mode 100644 index 0000000..f40715e --- /dev/null +++ b/command/agent/config/test-fixtures/config-cache-persist-empty-type.hcl @@ -0,0 +1,15 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" + +cache { + persist = { + path = "/vault/agent-cache/" + } +} + +listener "tcp" { + address = "127.0.0.1:8300" + tls_disable = true +} diff --git a/command/agent/config/test-fixtures/config-cache-persist-false.hcl b/command/agent/config/test-fixtures/config-cache-persist-false.hcl new file mode 100644 index 0000000..77bb926 --- /dev/null +++ b/command/agent/config/test-fixtures/config-cache-persist-false.hcl @@ -0,0 +1,17 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" + +cache { + persist "kubernetes" { + exit_on_err = false + keep_after_import = false + path = "/vault/agent-cache/" + } +} + +listener "tcp" { + address = "127.0.0.1:8300" + tls_disable = true +} diff --git a/command/agent/config/test-fixtures/config-cache.hcl b/command/agent/config/test-fixtures/config-cache.hcl new file mode 100644 index 0000000..87fa5af --- /dev/null +++ b/command/agent/config/test-fixtures/config-cache.hcl @@ -0,0 +1,70 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" + +auto_auth { + method { + type = "aws" + config = { + role = "foobar" + } + } + + sink { + type = "file" + config = { + path = "/tmp/file-foo" + } + aad = "foobar" + dh_type = "curve25519" + dh_path = "/tmp/file-foo-dhpath" + } +} + +cache { + use_auto_auth_token = true + persist = { + type = "kubernetes" + path = "/vault/agent-cache/" + keep_after_import = true + exit_on_err = true + service_account_token_file = "/tmp/serviceaccount/token" + } +} + +listener "unix" { + address = "/path/to/socket" + tls_disable = true + socket_mode = "configmode" + socket_user = "configuser" + socket_group = "configgroup" +} + +listener "tcp" { + address = "127.0.0.1:8300" + tls_disable = true +} + +listener { + type = "tcp" + address = "127.0.0.1:3000" + tls_disable = true + role = "metrics_only" +} + +listener "tcp" { + role = "default" + address = "127.0.0.1:8400" + tls_key_file = "/path/to/cakey.pem" + tls_cert_file = "/path/to/cacert.pem" +} + +vault { + address = "http://127.0.0.1:1111" + ca_cert = "config_ca_cert" + ca_path = "config_ca_path" + tls_skip_verify = "true" + client_cert = "config_client_cert" + client_key = "config_client_key" +} diff --git a/command/agent/config/test-fixtures/config-consistency-apiproxy.hcl b/command/agent/config/test-fixtures/config-consistency-apiproxy.hcl new file mode 100644 index 0000000..c2e662a --- /dev/null +++ b/command/agent/config/test-fixtures/config-consistency-apiproxy.hcl @@ -0,0 +1,12 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +api_proxy { + enforce_consistency = "always" + when_inconsistent = "retry" +} + +listener "tcp" { + address = "127.0.0.1:8300" + tls_disable = true +} diff --git a/command/agent/config/test-fixtures/config-consistency.hcl b/command/agent/config/test-fixtures/config-consistency.hcl new file mode 100644 index 0000000..5351811 --- /dev/null +++ b/command/agent/config/test-fixtures/config-consistency.hcl @@ -0,0 +1,12 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +cache { + enforce_consistency = "always" + when_inconsistent = "retry" +} + +listener "tcp" { + address = "127.0.0.1:8300" + tls_disable = true +} diff --git a/command/agent/config/test-fixtures/config-dir-auto-auth-and-listener/config1.hcl b/command/agent/config/test-fixtures/config-dir-auto-auth-and-listener/config1.hcl new file mode 100644 index 0000000..c900df6 --- /dev/null +++ b/command/agent/config/test-fixtures/config-dir-auto-auth-and-listener/config1.hcl @@ -0,0 +1,23 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" + +auto_auth { + method { + type = "aws" + config = { + role = "foobar" + } + } + + sink { + type = "file" + config = { + path = "/tmp/file-foo" + } + aad = "foobar" + dh_type = "curve25519" + dh_path = "/tmp/file-foo-dhpath" + } +} \ No newline at end of file diff --git a/command/agent/config/test-fixtures/config-dir-auto-auth-and-listener/config2.hcl b/command/agent/config/test-fixtures/config-dir-auto-auth-and-listener/config2.hcl new file mode 100644 index 0000000..2e942da --- /dev/null +++ b/command/agent/config/test-fixtures/config-dir-auto-auth-and-listener/config2.hcl @@ -0,0 +1,9 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" + +listener "tcp" { + address = "127.0.0.1:8300" + tls_disable = true +} \ No newline at end of file diff --git a/command/agent/config/test-fixtures/config-dir-cache/config-cache1.hcl b/command/agent/config/test-fixtures/config-dir-cache/config-cache1.hcl new file mode 100644 index 0000000..767cdd9 --- /dev/null +++ b/command/agent/config/test-fixtures/config-dir-cache/config-cache1.hcl @@ -0,0 +1,50 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" + +auto_auth { + method { + type = "aws" + config = { + role = "foobar" + } + } + + sink { + type = "file" + config = { + path = "/tmp/file-foo" + } + aad = "foobar" + dh_type = "curve25519" + dh_path = "/tmp/file-foo-dhpath" + } +} + +listener "unix" { + address = "/path/to/socket" + tls_disable = true + socket_mode = "configmode" + socket_user = "configuser" + socket_group = "configgroup" +} + +listener "tcp" { + address = "127.0.0.1:8300" + tls_disable = true +} + +listener { + type = "tcp" + address = "127.0.0.1:3000" + tls_disable = true + role = "metrics_only" +} + +listener "tcp" { + role = "default" + address = "127.0.0.1:8400" + tls_key_file = "/path/to/cakey.pem" + tls_cert_file = "/path/to/cacert.pem" +} \ No newline at end of file diff --git a/command/agent/config/test-fixtures/config-dir-cache/config-cache2.hcl b/command/agent/config/test-fixtures/config-dir-cache/config-cache2.hcl new file mode 100644 index 0000000..f4d0f47 --- /dev/null +++ b/command/agent/config/test-fixtures/config-dir-cache/config-cache2.hcl @@ -0,0 +1,22 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +cache { + use_auto_auth_token = true + persist = { + type = "kubernetes" + path = "/vault/agent-cache/" + keep_after_import = true + exit_on_err = true + service_account_token_file = "/tmp/serviceaccount/token" + } +} + +vault { + address = "http://127.0.0.1:1111" + ca_cert = "config_ca_cert" + ca_path = "config_ca_path" + tls_skip_verify = "true" + client_cert = "config_client_cert" + client_key = "config_client_key" +} diff --git a/command/agent/config/test-fixtures/config-dir-vault-block/config1.hcl b/command/agent/config/test-fixtures/config-dir-vault-block/config1.hcl new file mode 100644 index 0000000..1872953 --- /dev/null +++ b/command/agent/config/test-fixtures/config-dir-vault-block/config1.hcl @@ -0,0 +1,11 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +vault { + address = "http://127.0.0.1:1111" + ca_cert = "config_ca_cert" + ca_path = "config_ca_path" + tls_skip_verify = "true" + client_cert = "config_client_cert" + client_key = "config_client_key" +} diff --git a/command/agent/config/test-fixtures/config-dir-vault-block/config2.hcl b/command/agent/config/test-fixtures/config-dir-vault-block/config2.hcl new file mode 100644 index 0000000..c900df6 --- /dev/null +++ b/command/agent/config/test-fixtures/config-dir-vault-block/config2.hcl @@ -0,0 +1,23 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" + +auto_auth { + method { + type = "aws" + config = { + role = "foobar" + } + } + + sink { + type = "file" + config = { + path = "/tmp/file-foo" + } + aad = "foobar" + dh_type = "curve25519" + dh_path = "/tmp/file-foo-dhpath" + } +} \ No newline at end of file diff --git a/command/agent/config/test-fixtures/config-disable-idle-connections-all.hcl b/command/agent/config/test-fixtures/config-disable-idle-connections-all.hcl new file mode 100644 index 0000000..f312d42 --- /dev/null +++ b/command/agent/config/test-fixtures/config-disable-idle-connections-all.hcl @@ -0,0 +1,30 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" +disable_idle_connections = ["auto-auth","caching","templating","proxying"] + +auto_auth { + method { + type = "aws" + namespace = "my-namespace/" + + config = { + role = "foobar" + } + } + + sink { + type = "file" + config = { + path = "/tmp/file-foo" + } + aad = "foobar" + dh_type = "curve25519" + dh_path = "/tmp/file-foo-dhpath" + } +} + +vault { + address = "http://127.0.0.1:1111" +} diff --git a/command/agent/config/test-fixtures/config-disable-idle-connections-auto-auth.hcl b/command/agent/config/test-fixtures/config-disable-idle-connections-auto-auth.hcl new file mode 100644 index 0000000..abb1756 --- /dev/null +++ b/command/agent/config/test-fixtures/config-disable-idle-connections-auto-auth.hcl @@ -0,0 +1,30 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" +disable_idle_connections = ["auto-auth"] + +auto_auth { + method { + type = "aws" + namespace = "my-namespace/" + + config = { + role = "foobar" + } + } + + sink { + type = "file" + config = { + path = "/tmp/file-foo" + } + aad = "foobar" + dh_type = "curve25519" + dh_path = "/tmp/file-foo-dhpath" + } +} + +vault { + address = "http://127.0.0.1:1111" +} diff --git a/command/agent/config/test-fixtures/config-disable-idle-connections-caching.hcl b/command/agent/config/test-fixtures/config-disable-idle-connections-caching.hcl new file mode 100644 index 0000000..95a36e9 --- /dev/null +++ b/command/agent/config/test-fixtures/config-disable-idle-connections-caching.hcl @@ -0,0 +1,30 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" +disable_idle_connections = ["caching"] + +auto_auth { + method { + type = "aws" + namespace = "my-namespace/" + + config = { + role = "foobar" + } + } + + sink { + type = "file" + config = { + path = "/tmp/file-foo" + } + aad = "foobar" + dh_type = "curve25519" + dh_path = "/tmp/file-foo-dhpath" + } +} + +vault { + address = "http://127.0.0.1:1111" +} diff --git a/command/agent/config/test-fixtures/config-disable-idle-connections-empty.hcl b/command/agent/config/test-fixtures/config-disable-idle-connections-empty.hcl new file mode 100644 index 0000000..3e490bf --- /dev/null +++ b/command/agent/config/test-fixtures/config-disable-idle-connections-empty.hcl @@ -0,0 +1,30 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" +disable_idle_connections = [] + +auto_auth { + method { + type = "aws" + namespace = "my-namespace/" + + config = { + role = "foobar" + } + } + + sink { + type = "file" + config = { + path = "/tmp/file-foo" + } + aad = "foobar" + dh_type = "curve25519" + dh_path = "/tmp/file-foo-dhpath" + } +} + +vault { + address = "http://127.0.0.1:1111" +} diff --git a/command/agent/config/test-fixtures/config-disable-idle-connections-proxying.hcl b/command/agent/config/test-fixtures/config-disable-idle-connections-proxying.hcl new file mode 100644 index 0000000..88da2ef --- /dev/null +++ b/command/agent/config/test-fixtures/config-disable-idle-connections-proxying.hcl @@ -0,0 +1,30 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" +disable_idle_connections = ["proxying"] + +auto_auth { + method { + type = "aws" + namespace = "my-namespace/" + + config = { + role = "foobar" + } + } + + sink { + type = "file" + config = { + path = "/tmp/file-foo" + } + aad = "foobar" + dh_type = "curve25519" + dh_path = "/tmp/file-foo-dhpath" + } +} + +vault { + address = "http://127.0.0.1:1111" +} diff --git a/command/agent/config/test-fixtures/config-disable-idle-connections-templating.hcl b/command/agent/config/test-fixtures/config-disable-idle-connections-templating.hcl new file mode 100644 index 0000000..6e51c91 --- /dev/null +++ b/command/agent/config/test-fixtures/config-disable-idle-connections-templating.hcl @@ -0,0 +1,30 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" +disable_idle_connections = ["templating"] + +auto_auth { + method { + type = "aws" + namespace = "my-namespace/" + + config = { + role = "foobar" + } + } + + sink { + type = "file" + config = { + path = "/tmp/file-foo" + } + aad = "foobar" + dh_type = "curve25519" + dh_path = "/tmp/file-foo-dhpath" + } +} + +vault { + address = "http://127.0.0.1:1111" +} diff --git a/command/agent/config/test-fixtures/config-disable-keep-alives-all.hcl b/command/agent/config/test-fixtures/config-disable-keep-alives-all.hcl new file mode 100644 index 0000000..8c1c6d5 --- /dev/null +++ b/command/agent/config/test-fixtures/config-disable-keep-alives-all.hcl @@ -0,0 +1,30 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" +disable_keep_alives = ["auto-auth","caching","templating","proxying"] + +auto_auth { + method { + type = "aws" + namespace = "my-namespace/" + + config = { + role = "foobar" + } + } + + sink { + type = "file" + config = { + path = "/tmp/file-foo" + } + aad = "foobar" + dh_type = "curve25519" + dh_path = "/tmp/file-foo-dhpath" + } +} + +vault { + address = "http://127.0.0.1:1111" +} diff --git a/command/agent/config/test-fixtures/config-disable-keep-alives-auto-auth.hcl b/command/agent/config/test-fixtures/config-disable-keep-alives-auto-auth.hcl new file mode 100644 index 0000000..d77dfb2 --- /dev/null +++ b/command/agent/config/test-fixtures/config-disable-keep-alives-auto-auth.hcl @@ -0,0 +1,30 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" +disable_keep_alives = ["auto-auth"] + +auto_auth { + method { + type = "aws" + namespace = "my-namespace/" + + config = { + role = "foobar" + } + } + + sink { + type = "file" + config = { + path = "/tmp/file-foo" + } + aad = "foobar" + dh_type = "curve25519" + dh_path = "/tmp/file-foo-dhpath" + } +} + +vault { + address = "http://127.0.0.1:1111" +} diff --git a/command/agent/config/test-fixtures/config-disable-keep-alives-caching.hcl b/command/agent/config/test-fixtures/config-disable-keep-alives-caching.hcl new file mode 100644 index 0000000..386267e --- /dev/null +++ b/command/agent/config/test-fixtures/config-disable-keep-alives-caching.hcl @@ -0,0 +1,30 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" +disable_keep_alives = ["caching"] + +auto_auth { + method { + type = "aws" + namespace = "my-namespace/" + + config = { + role = "foobar" + } + } + + sink { + type = "file" + config = { + path = "/tmp/file-foo" + } + aad = "foobar" + dh_type = "curve25519" + dh_path = "/tmp/file-foo-dhpath" + } +} + +vault { + address = "http://127.0.0.1:1111" +} diff --git a/command/agent/config/test-fixtures/config-disable-keep-alives-empty.hcl b/command/agent/config/test-fixtures/config-disable-keep-alives-empty.hcl new file mode 100644 index 0000000..b4239a5 --- /dev/null +++ b/command/agent/config/test-fixtures/config-disable-keep-alives-empty.hcl @@ -0,0 +1,30 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" +disable_keep_alives = [] + +auto_auth { + method { + type = "aws" + namespace = "my-namespace/" + + config = { + role = "foobar" + } + } + + sink { + type = "file" + config = { + path = "/tmp/file-foo" + } + aad = "foobar" + dh_type = "curve25519" + dh_path = "/tmp/file-foo-dhpath" + } +} + +vault { + address = "http://127.0.0.1:1111" +} diff --git a/command/agent/config/test-fixtures/config-disable-keep-alives-proxying.hcl b/command/agent/config/test-fixtures/config-disable-keep-alives-proxying.hcl new file mode 100644 index 0000000..8c82a92 --- /dev/null +++ b/command/agent/config/test-fixtures/config-disable-keep-alives-proxying.hcl @@ -0,0 +1,30 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" +disable_keep_alives = ["proxying"] + +auto_auth { + method { + type = "aws" + namespace = "my-namespace/" + + config = { + role = "foobar" + } + } + + sink { + type = "file" + config = { + path = "/tmp/file-foo" + } + aad = "foobar" + dh_type = "curve25519" + dh_path = "/tmp/file-foo-dhpath" + } +} + +vault { + address = "http://127.0.0.1:1111" +} diff --git a/command/agent/config/test-fixtures/config-disable-keep-alives-templating.hcl b/command/agent/config/test-fixtures/config-disable-keep-alives-templating.hcl new file mode 100644 index 0000000..01ec095 --- /dev/null +++ b/command/agent/config/test-fixtures/config-disable-keep-alives-templating.hcl @@ -0,0 +1,30 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" +disable_keep_alives = ["templating"] + +auto_auth { + method { + type = "aws" + namespace = "my-namespace/" + + config = { + role = "foobar" + } + } + + sink { + type = "file" + config = { + path = "/tmp/file-foo" + } + aad = "foobar" + dh_type = "curve25519" + dh_path = "/tmp/file-foo-dhpath" + } +} + +vault { + address = "http://127.0.0.1:1111" +} diff --git a/command/agent/config/test-fixtures/config-embedded-type.hcl b/command/agent/config/test-fixtures/config-embedded-type.hcl new file mode 100644 index 0000000..2ce3b40 --- /dev/null +++ b/command/agent/config/test-fixtures/config-embedded-type.hcl @@ -0,0 +1,35 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" +log_file = "/var/log/vault/vault-agent.log" + +auto_auth { + method "aws" { + mount_path = "auth/aws" + namespace = "my-namespace" + config = { + role = "foobar" + } + } + + sink "file" { + config = { + path = "/tmp/file-foo" + } + aad = "foobar" + dh_type = "curve25519" + dh_path = "/tmp/file-foo-dhpath" + } + + sink "file" { + wrap_ttl = "5m" + aad_env_var = "TEST_AAD_ENV" + dh_type = "curve25519" + dh_path = "/tmp/file-foo-dhpath2" + derive_key = true + config = { + path = "/tmp/file-bar" + } + } +} diff --git a/command/agent/config/test-fixtures/config-env-templates-complex.hcl b/command/agent/config/test-fixtures/config-env-templates-complex.hcl new file mode 100644 index 0000000..639b128 --- /dev/null +++ b/command/agent/config/test-fixtures/config-env-templates-complex.hcl @@ -0,0 +1,36 @@ +auto_auth { + + method { + type = "token_file" + + config { + token_file_path = "/home/username/.vault-token" + } + } +} + +cache {} + +template_config { + static_secret_render_interval = "5m" + exit_on_retry_failure = true +} + +vault { + address = "http://localhost:8200" +} + +env_template "FOO_PASSWORD" { + contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.password }}{{ end }}" + error_on_missing_key = false +} +env_template "FOO_USER" { + contents = "{{ with secret \"secret/data/foo\" }}{{ .Data.data.user }}{{ end }}" + error_on_missing_key = false +} + +exec { + command = ["env"] + restart_on_secret_changes = "never" + restart_stop_signal = "SIGINT" +} diff --git a/command/agent/config/test-fixtures/config-env-templates-simple.hcl b/command/agent/config/test-fixtures/config-env-templates-simple.hcl new file mode 100644 index 0000000..441563b --- /dev/null +++ b/command/agent/config/test-fixtures/config-env-templates-simple.hcl @@ -0,0 +1,18 @@ +auto_auth { + + method { + type = "token_file" + + config { + token_file_path = "/Users/avean/.vault-token" + } + } +} + +env_template "MY_DATABASE_USER" { + contents = "{{ with secret \"secret/db-secret\" }}{{ .Data.data.user }}{{ end }}" +} + +exec { + command = ["/path/to/my/app", "arg1", "arg2"] +} diff --git a/command/agent/config/test-fixtures/config-env-templates-with-source.hcl b/command/agent/config/test-fixtures/config-env-templates-with-source.hcl new file mode 100644 index 0000000..d51cb55 --- /dev/null +++ b/command/agent/config/test-fixtures/config-env-templates-with-source.hcl @@ -0,0 +1,16 @@ +auto_auth { + method { + type = "token_file" + config { + token_file_path = "/home/username/.vault-token" + } + } +} + +env_template "MY_PASSWORD" { + source = "/path/on/disk/to/template.ctmpl" +} + +exec { + command = ["/path/to/my/app", "arg1", "arg2"] +} diff --git a/command/agent/config/test-fixtures/config-method-exit-on-err.hcl b/command/agent/config/test-fixtures/config-method-exit-on-err.hcl new file mode 100644 index 0000000..bbda08c --- /dev/null +++ b/command/agent/config/test-fixtures/config-method-exit-on-err.hcl @@ -0,0 +1,24 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" + +auto_auth { + method { + type = "aws" + wrap_ttl = 300 + exit_on_err = true + config = { + role = "foobar" + } + max_backoff = "2m" + min_backoff = "5s" + } + + sink { + type = "file" + config = { + path = "/tmp/file-foo" + } + } +} diff --git a/command/agent/config/test-fixtures/config-method-initial-backoff.hcl b/command/agent/config/test-fixtures/config-method-initial-backoff.hcl new file mode 100644 index 0000000..b166dab --- /dev/null +++ b/command/agent/config/test-fixtures/config-method-initial-backoff.hcl @@ -0,0 +1,23 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" + +auto_auth { + method { + type = "aws" + wrap_ttl = 300 + config = { + role = "foobar" + } + max_backoff = "2m" + min_backoff = "5s" + } + + sink { + type = "file" + config = { + path = "/tmp/file-foo" + } + } +} diff --git a/command/agent/config/test-fixtures/config-method-wrapping.hcl b/command/agent/config/test-fixtures/config-method-wrapping.hcl new file mode 100644 index 0000000..8142a19 --- /dev/null +++ b/command/agent/config/test-fixtures/config-method-wrapping.hcl @@ -0,0 +1,22 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" + +auto_auth { + method { + type = "aws" + wrap_ttl = 300 + config = { + role = "foobar" + } + max_backoff = "2m" + } + + sink { + type = "file" + config = { + path = "/tmp/file-foo" + } + } +} diff --git a/command/agent/config/test-fixtures/config-template-full-nosink.hcl b/command/agent/config/test-fixtures/config-template-full-nosink.hcl new file mode 100644 index 0000000..579aae1 --- /dev/null +++ b/command/agent/config/test-fixtures/config-template-full-nosink.hcl @@ -0,0 +1,40 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" + +auto_auth { + method { + type = "aws" + namespace = "/my-namespace" + + config = { + role = "foobar" + } + } +} + +template { + destination = "/path/on/disk/where/template/will/render.txt" + create_dest_dirs = true + contents = "{{ keyOrDefault \"service/redis/maxconns@east-aws\" \"5\" }}" + + command = "restart service foo" + command_timeout = "60s" + + error_on_missing_key = true + perms = 0655 + backup = true + left_delimiter = "<<" + right_delimiter = ">>" + + sandbox_path = "/path/on/disk/where" + wait { + min = "5s" + max = "30s" + } + wait { + min = "10s" + max = "40s" + } +} diff --git a/command/agent/config/test-fixtures/config-template-full.hcl b/command/agent/config/test-fixtures/config-template-full.hcl new file mode 100644 index 0000000..b7641cd --- /dev/null +++ b/command/agent/config/test-fixtures/config-template-full.hcl @@ -0,0 +1,57 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" + +auto_auth { + method { + type = "aws" + namespace = "/my-namespace" + + config = { + role = "foobar" + } + } + + sink { + type = "file" + + config = { + path = "/tmp/file-foo" + } + + aad = "foobar" + dh_type = "curve25519" + dh_path = "/tmp/file-foo-dhpath" + } +} + +template { + destination = "/path/on/disk/where/template/will/render.txt" + create_dest_dirs = true + contents = "{{ keyOrDefault \"service/redis/maxconns@east-aws\" \"5\" }}" + + command = "restart service foo" + command_timeout = "60s" + + error_on_missing_key = true + perms = 0655 + backup = true + left_delimiter = "<<" + right_delimiter = ">>" + + sandbox_path = "/path/on/disk/where" + wait { + min = "5s" + max = "30s" + } + wait { + min = "10s" + max = "40s" + } + + exec { + command = ["foo"] + timeout = "10s" + } +} \ No newline at end of file diff --git a/command/agent/config/test-fixtures/config-template-many-nosink.hcl b/command/agent/config/test-fixtures/config-template-many-nosink.hcl new file mode 100644 index 0000000..2f8352d --- /dev/null +++ b/command/agent/config/test-fixtures/config-template-many-nosink.hcl @@ -0,0 +1,41 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" + +auto_auth { + method { + type = "aws" + namespace = "/my-namespace" + + config = { + role = "foobar" + } + } +} + +template { + source = "/path/on/disk/to/template.ctmpl" + destination = "/path/on/disk/where/template/will/render.txt" + + create_dest_dirs = true + + command = "restart service foo" + + error_on_missing_key = false + perms = 0600 +} + +template { + source = "/path/on/disk/to/template2.ctmpl" + destination = "/path/on/disk/where/template/will/render2.txt" + + perms = 0755 + + backup = true + + wait { + min = "2s" + max = "10s" + } +} diff --git a/command/agent/config/test-fixtures/config-template-many.hcl b/command/agent/config/test-fixtures/config-template-many.hcl new file mode 100644 index 0000000..3a3ce77 --- /dev/null +++ b/command/agent/config/test-fixtures/config-template-many.hcl @@ -0,0 +1,53 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" + +auto_auth { + method { + type = "aws" + namespace = "/my-namespace" + + config = { + role = "foobar" + } + } + + sink { + type = "file" + + config = { + path = "/tmp/file-foo" + } + + aad = "foobar" + dh_type = "curve25519" + dh_path = "/tmp/file-foo-dhpath" + } +} + +template { + source = "/path/on/disk/to/template.ctmpl" + destination = "/path/on/disk/where/template/will/render.txt" + + create_dest_dirs = true + + command = "restart service foo" + + error_on_missing_key = false + perms = 0600 +} + +template { + source = "/path/on/disk/to/template2.ctmpl" + destination = "/path/on/disk/where/template/will/render2.txt" + + perms = 0755 + + backup = true + + wait { + min = "2s" + max = "10s" + } +} diff --git a/command/agent/config/test-fixtures/config-template-min-nosink.hcl b/command/agent/config/test-fixtures/config-template-min-nosink.hcl new file mode 100644 index 0000000..064b7a4 --- /dev/null +++ b/command/agent/config/test-fixtures/config-template-min-nosink.hcl @@ -0,0 +1,20 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" + +auto_auth { + method { + type = "aws" + namespace = "/my-namespace" + + config = { + role = "foobar" + } + } +} + +template { + source = "/path/on/disk/to/template.ctmpl" + destination = "/path/on/disk/where/template/will/render.txt" +} diff --git a/command/agent/config/test-fixtures/config-template-min.hcl b/command/agent/config/test-fixtures/config-template-min.hcl new file mode 100644 index 0000000..34435da --- /dev/null +++ b/command/agent/config/test-fixtures/config-template-min.hcl @@ -0,0 +1,32 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" + +auto_auth { + method { + type = "aws" + namespace = "/my-namespace" + + config = { + role = "foobar" + } + } + + sink { + type = "file" + + config = { + path = "/tmp/file-foo" + } + + aad = "foobar" + dh_type = "curve25519" + dh_path = "/tmp/file-foo-dhpath" + } +} + +template { + source = "/path/on/disk/to/template.ctmpl" + destination = "/path/on/disk/where/template/will/render.txt" +} diff --git a/command/agent/config/test-fixtures/config-template-with-cache.hcl b/command/agent/config/test-fixtures/config-template-with-cache.hcl new file mode 100644 index 0000000..8f43b83 --- /dev/null +++ b/command/agent/config/test-fixtures/config-template-with-cache.hcl @@ -0,0 +1,22 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" + +auto_auth { + method { + type = "aws" + namespace = "/my-namespace" + + config = { + role = "foobar" + } + } +} + +cache {} + +template { + source = "/path/on/disk/to/template.ctmpl" + destination = "/path/on/disk/where/template/will/render.txt" +} diff --git a/command/agent/config/test-fixtures/config-template_config-empty.hcl b/command/agent/config/test-fixtures/config-template_config-empty.hcl new file mode 100644 index 0000000..ac22dcc --- /dev/null +++ b/command/agent/config/test-fixtures/config-template_config-empty.hcl @@ -0,0 +1,16 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +vault { + address = "http://127.0.0.1:1111" + retry { + num_retries = 5 + } +} + +template_config {} + +template { + source = "/path/on/disk/to/template.ctmpl" + destination = "/path/on/disk/where/template/will/render.txt" +} \ No newline at end of file diff --git a/command/agent/config/test-fixtures/config-template_config.hcl b/command/agent/config/test-fixtures/config-template_config.hcl new file mode 100644 index 0000000..b550890 --- /dev/null +++ b/command/agent/config/test-fixtures/config-template_config.hcl @@ -0,0 +1,19 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +vault { + address = "http://127.0.0.1:1111" + retry { + num_retries = 5 + } +} + +template_config { + exit_on_retry_failure = true + static_secret_render_interval = 60 +} + +template { + source = "/path/on/disk/to/template.ctmpl" + destination = "/path/on/disk/where/template/will/render.txt" +} diff --git a/command/agent/config/test-fixtures/config-vault-retry-empty.hcl b/command/agent/config/test-fixtures/config-vault-retry-empty.hcl new file mode 100644 index 0000000..72c44e1 --- /dev/null +++ b/command/agent/config/test-fixtures/config-vault-retry-empty.hcl @@ -0,0 +1,31 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" + +auto_auth { + method { + type = "aws" + namespace = "my-namespace/" + + config = { + role = "foobar" + } + } + + sink { + type = "file" + config = { + path = "/tmp/file-foo" + } + aad = "foobar" + dh_type = "curve25519" + dh_path = "/tmp/file-foo-dhpath" + } +} + +vault { + address = "http://127.0.0.1:1111" + retry {} +} + diff --git a/command/agent/config/test-fixtures/config-vault-retry.hcl b/command/agent/config/test-fixtures/config-vault-retry.hcl new file mode 100644 index 0000000..5e4ee23 --- /dev/null +++ b/command/agent/config/test-fixtures/config-vault-retry.hcl @@ -0,0 +1,32 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" + +auto_auth { + method { + type = "aws" + namespace = "my-namespace/" + + config = { + role = "foobar" + } + } + + sink { + type = "file" + config = { + path = "/tmp/file-foo" + } + aad = "foobar" + dh_type = "curve25519" + dh_path = "/tmp/file-foo-dhpath" + } +} + +vault { + address = "http://127.0.0.1:1111" + retry { + num_retries = 5 + } +} diff --git a/command/agent/config/test-fixtures/config.hcl b/command/agent/config/test-fixtures/config.hcl new file mode 100644 index 0000000..18ec360 --- /dev/null +++ b/command/agent/config/test-fixtures/config.hcl @@ -0,0 +1,37 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" +log_file = "/var/log/vault/vault-agent.log" + +auto_auth { + method { + type = "aws" + namespace = "/my-namespace" + config = { + role = "foobar" + } + } + + sink { + type = "file" + config = { + path = "/tmp/file-foo" + } + aad = "foobar" + dh_type = "curve25519" + dh_path = "/tmp/file-foo-dhpath" + } + + sink { + type = "file" + wrap_ttl = "5m" + aad_env_var = "TEST_AAD_ENV" + dh_type = "curve25519" + dh_path = "/tmp/file-foo-dhpath2" + derive_key = true + config = { + path = "/tmp/file-bar" + } + } +} diff --git a/command/agent/doc.go b/command/agent/doc.go new file mode 100644 index 0000000..e9f0f0b --- /dev/null +++ b/command/agent/doc.go @@ -0,0 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +/* +Package agent implements a daemon mode of Vault designed to provide helper +features like auto-auth, caching, and templating. + +Agent has it's own configuration stanza and operates as a proxy to a Vault +service. +*/ +package agent diff --git a/command/agent/exec/exec.go b/command/agent/exec/exec.go new file mode 100644 index 0000000..b22e5eb --- /dev/null +++ b/command/agent/exec/exec.go @@ -0,0 +1,332 @@ +package exec + +import ( + "context" + "fmt" + "io" + "os" + "sort" + "sync" + "time" + + "github.com/hashicorp/consul-template/child" + ctconfig "github.com/hashicorp/consul-template/config" + "github.com/hashicorp/consul-template/manager" + "github.com/hashicorp/go-hclog" + "golang.org/x/exp/slices" + + "github.com/hashicorp/vault/command/agent/config" + "github.com/hashicorp/vault/command/agent/internal/ctmanager" + "github.com/hashicorp/vault/helper/useragent" + "github.com/hashicorp/vault/sdk/helper/pointerutil" +) + +type childProcessState uint8 + +const ( + childProcessStateNotStarted childProcessState = iota + childProcessStateRunning + childProcessStateRestarting + childProcessStateStopped +) + +type ServerConfig struct { + Logger hclog.Logger + AgentConfig *config.Config + + Namespace string + + // LogLevel is needed to set the internal Consul Template Runner's log level + // to match the log level of Vault Agent. The internal Runner creates it's own + // logger and can't be set externally or copied from the Template Server. + // + // LogWriter is needed to initialize Consul Template's internal logger to use + // the same io.Writer that Vault Agent itself is using. + LogLevel hclog.Level + LogWriter io.Writer +} + +type Server struct { + // config holds the ServerConfig used to create it. It's passed along in other + // methods + config *ServerConfig + + // runner is the consul-template runner + runner *manager.Runner + + // numberOfTemplates is the count of templates determined by consul-template, + // we keep the value to ensure all templates have been rendered before + // starting the child process + // NOTE: each template may have more than one TemplateConfig, so the numbers may not match up + numberOfTemplates int + + logger hclog.Logger + + childProcess *child.Child + childProcessState childProcessState + childProcessLock sync.Mutex + + // exit channel of the child process + childProcessExitCh chan int + + // lastRenderedEnvVars is the cached value of all environment variables + // rendered by the templating engine; it is used for detecting changes + lastRenderedEnvVars []string +} + +type ProcessExitError struct { + ExitCode int +} + +func (e *ProcessExitError) Error() string { + return fmt.Sprintf("process exited with %d", e.ExitCode) +} + +func NewServer(cfg *ServerConfig) *Server { + server := Server{ + logger: cfg.Logger, + config: cfg, + childProcessState: childProcessStateNotStarted, + childProcessExitCh: make(chan int), + } + + return &server +} + +func (s *Server) Run(ctx context.Context, incomingVaultToken chan string) error { + latestToken := new(string) + s.logger.Info("starting exec server") + defer func() { + s.logger.Info("exec server stopped") + }() + + if len(s.config.AgentConfig.EnvTemplates) == 0 || s.config.AgentConfig.Exec == nil { + s.logger.Info("no env templates or exec config, exiting") + <-ctx.Done() + return nil + } + + managerConfig := ctmanager.ManagerConfig{ + AgentConfig: s.config.AgentConfig, + Namespace: s.config.Namespace, + LogLevel: s.config.LogLevel, + LogWriter: s.config.LogWriter, + } + + runnerConfig, err := ctmanager.NewConfig(managerConfig, s.config.AgentConfig.EnvTemplates) + if err != nil { + return fmt.Errorf("template server failed to generate runner config: %w", err) + } + + // We leave this in "dry" mode, as there are no files to render; + // we will get the environment variables rendered contents from the incoming events + s.runner, err = manager.NewRunner(runnerConfig, true) + if err != nil { + return fmt.Errorf("template server failed to create: %w", err) + } + + // prevent the templates from being rendered to stdout in "dry" mode + s.runner.SetOutStream(io.Discard) + + s.numberOfTemplates = len(s.runner.TemplateConfigMapping()) + + // We receive multiple events every staticSecretRenderInterval + // from <-s.runner.TemplateRenderedCh(), one for each secret. Only the last + // event in a batch will contain the latest set of all secrets and the + // corresponding environment variables. This timer will fire after 2 seconds + // unless an event comes in which resets the timer back to 2 seconds. + var debounceTimer *time.Timer + + // capture the errors related to restarting the child process + restartChildProcessErrCh := make(chan error) + + for { + select { + case <-ctx.Done(): + s.runner.Stop() + s.childProcessLock.Lock() + if s.childProcess != nil { + s.childProcess.Stop() + } + s.childProcessState = childProcessStateStopped + s.childProcessLock.Unlock() + return nil + + case token := <-incomingVaultToken: + if token != *latestToken { + s.logger.Info("exec server received new token") + + s.runner.Stop() + *latestToken = token + newTokenConfig := ctconfig.Config{ + Vault: &ctconfig.VaultConfig{ + Token: latestToken, + ClientUserAgent: pointerutil.StringPtr(useragent.AgentTemplatingString()), + }, + } + + // got a new auth token, merge it in with the existing config + runnerConfig = runnerConfig.Merge(&newTokenConfig) + s.runner, err = manager.NewRunner(runnerConfig, true) + if err != nil { + s.logger.Error("template server failed with new Vault token", "error", err) + continue + } + + // prevent the templates from being rendered to stdout in "dry" mode + s.runner.SetOutStream(io.Discard) + + go s.runner.Start() + } + + case err := <-s.runner.ErrCh: + s.logger.Error("template server error", "error", err.Error()) + s.runner.StopImmediately() + + // Return after stopping the runner if exit on retry failure was specified + if s.config.AgentConfig.TemplateConfig != nil && s.config.AgentConfig.TemplateConfig.ExitOnRetryFailure { + return fmt.Errorf("template server: %w", err) + } + + s.runner, err = manager.NewRunner(runnerConfig, true) + if err != nil { + return fmt.Errorf("template server failed to create: %w", err) + } + go s.runner.Start() + + case <-s.runner.TemplateRenderedCh(): + // A template has been rendered, figure out what to do + s.logger.Trace("template rendered") + events := s.runner.RenderEvents() + + // This checks if we've finished rendering the initial set of templates, + // for every consecutive re-render len(events) should equal s.numberOfTemplates + if len(events) < s.numberOfTemplates { + // Not all templates have been rendered yet + continue + } + + // assume the renders are finished, until we find otherwise + doneRendering := true + var renderedEnvVars []string + for _, event := range events { + // This template hasn't been rendered + if event.LastWouldRender.IsZero() { + doneRendering = false + break + } else { + for _, tcfg := range event.TemplateConfigs { + envVar := fmt.Sprintf("%s=%s", *tcfg.MapToEnvironmentVariable, event.Contents) + renderedEnvVars = append(renderedEnvVars, envVar) + } + } + } + if !doneRendering { + continue + } + + // sort the environment variables for a deterministic output and easy comparison + sort.Strings(renderedEnvVars) + + s.logger.Trace("done rendering templates") + + // don't restart the process unless a change is detected + if slices.Equal(s.lastRenderedEnvVars, renderedEnvVars) { + continue + } + + s.lastRenderedEnvVars = renderedEnvVars + + s.logger.Debug("detected a change in the environment variables: restarting the child process") + + // if a timer exists, stop it + if debounceTimer != nil { + debounceTimer.Stop() + } + debounceTimer = time.AfterFunc(2*time.Second, func() { + if err := s.restartChildProcess(renderedEnvVars); err != nil { + restartChildProcessErrCh <- fmt.Errorf("unable to restart the child process: %w", err) + } + }) + + case err := <-restartChildProcessErrCh: + // catch the error from restarting + return err + + case exitCode := <-s.childProcessExitCh: + // process exited on its own + return &ProcessExitError{ExitCode: exitCode} + } + } +} + +func (s *Server) restartChildProcess(newEnvVars []string) error { + s.childProcessLock.Lock() + defer s.childProcessLock.Unlock() + + switch s.config.AgentConfig.Exec.RestartOnSecretChanges { + case "always": + if s.childProcessState == childProcessStateRunning { + // process is running, need to kill it first + s.logger.Info("stopping process", "process_id", s.childProcess.Pid()) + s.childProcessState = childProcessStateRestarting + s.childProcess.Stop() + } + case "never": + if s.childProcessState == childProcessStateRunning { + s.logger.Info("detected update, but not restarting process", "process_id", s.childProcess.Pid()) + return nil + } + default: + return fmt.Errorf("invalid value for restart-on-secret-changes: %q", s.config.AgentConfig.Exec.RestartOnSecretChanges) + } + + args, subshell, err := child.CommandPrep(s.config.AgentConfig.Exec.Command) + if err != nil { + return fmt.Errorf("unable to parse command: %w", err) + } + + childInput := &child.NewInput{ + Stdin: os.Stdin, + Stdout: os.Stdout, + Stderr: os.Stderr, + Command: args[0], + Args: args[1:], + Timeout: 0, // let it run forever + Env: append(os.Environ(), newEnvVars...), + ReloadSignal: nil, // can't reload w/ new env vars + KillSignal: s.config.AgentConfig.Exec.RestartStopSignal, + KillTimeout: 30 * time.Second, + Splay: 0, + Setpgid: subshell, + Logger: s.logger.StandardLogger(nil), + } + + proc, err := child.New(childInput) + if err != nil { + return err + } + s.childProcess = proc + + if err := s.childProcess.Start(); err != nil { + return fmt.Errorf("error starting the child process: %w", err) + } + + s.childProcessState = childProcessStateRunning + + // Listen if the child process exits and bubble it up to the main loop. + // + // NOTE: this must be invoked after child.Start() to avoid a potential + // race condition with ExitCh not being initialized. + go func() { + select { + case exitCode, ok := <-proc.ExitCh(): + // ignore ExitCh channel closures caused by our restarts + if ok { + s.childProcessExitCh <- exitCode + } + } + }() + + return nil +} diff --git a/command/agent/exec/exec_test.go b/command/agent/exec/exec_test.go new file mode 100644 index 0000000..3c13c34 --- /dev/null +++ b/command/agent/exec/exec_test.go @@ -0,0 +1,379 @@ +package exec + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/http/httptest" + "os" + "os/exec" + "path/filepath" + "strconv" + "syscall" + "testing" + "time" + + ctconfig "github.com/hashicorp/consul-template/config" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-retryablehttp" + + "github.com/hashicorp/vault/command/agent/config" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/helper/pointerutil" +) + +func fakeVaultServer(t *testing.T) *httptest.Server { + t.Helper() + + firstRequest := true + + mux := http.NewServeMux() + mux.HandleFunc("/v1/kv/my-app/creds", func(w http.ResponseWriter, r *http.Request) { + // change the password on the second request to re-render the template + var password string + + if firstRequest { + password = "s3cr3t" + } else { + password = "s3cr3t-two" + } + + firstRequest = false + + fmt.Fprintf(w, `{ + "request_id": "8af096e9-518c-7351-eff5-5ba20554b21f", + "lease_id": "", + "renewable": false, + "lease_duration": 0, + "data": { + "data": { + "password": "%s", + "user": "app-user" + }, + "metadata": { + "created_time": "2019-10-07T22:18:44.233247Z", + "deletion_time": "", + "destroyed": false, + "version": 3 + } + }, + "wrap_info": null, + "warnings": null, + "auth": null + }`, + password, + ) + }) + + return httptest.NewServer(mux) +} + +// TestExecServer_Run tests various scenarios of using vault agent as a process +// supervisor. At its core is a sample application referred to as 'test app', +// compiled from ./test-app/main.go. Each test case verifies that the test app +// is started and/or stopped correctly by exec.Server.Run(). There are 3 +// high-level scenarios we want to test for: +// +// 1. test app is started and is injected with environment variables +// 2. test app exits early (either with zero or non-zero extit code) +// 3. test app needs to be stopped (and restarted) by exec.Server +func TestExecServer_Run(t *testing.T) { + // we must build a test-app binary since 'go run' does not propagate signals correctly + goBinary, err := exec.LookPath("go") + if err != nil { + t.Fatalf("could not find go binary on path: %s", err) + } + + testAppBinary := filepath.Join(os.TempDir(), "test-app") + + if err := exec.Command(goBinary, "build", "-o", testAppBinary, "./test-app").Run(); err != nil { + t.Fatalf("could not build the test application: %s", err) + } + defer func() { + if err := os.Remove(testAppBinary); err != nil { + t.Fatalf("could not remove %q test application: %s", testAppBinary, err) + } + }() + + testCases := map[string]struct { + // skip this test case + skip bool + skipReason string + + // inputs to the exec server + envTemplates []*ctconfig.TemplateConfig + staticSecretRenderInterval time.Duration + + // test app parameters + testAppArgs []string + testAppStopSignal os.Signal + testAppPort int + + // simulate a shutdown of agent, which, in turn stops the test app + simulateShutdown bool + simulateShutdownWaitDuration time.Duration + + // expected results + expected map[string]string + expectedTestDuration time.Duration + expectedError error + }{ + "ensure_environment_variables_are_injected": { + skip: true, + envTemplates: []*ctconfig.TemplateConfig{{ + Contents: pointerutil.StringPtr(`{{ with secret "kv/my-app/creds" }}{{ .Data.data.user }}{{ end }}`), + MapToEnvironmentVariable: pointerutil.StringPtr("MY_USER"), + }, { + Contents: pointerutil.StringPtr(`{{ with secret "kv/my-app/creds" }}{{ .Data.data.password }}{{ end }}`), + MapToEnvironmentVariable: pointerutil.StringPtr("MY_PASSWORD"), + }}, + testAppArgs: []string{"--stop-after", "10s"}, + testAppStopSignal: syscall.SIGTERM, + testAppPort: 34001, + expected: map[string]string{ + "MY_USER": "app-user", + "MY_PASSWORD": "s3cr3t", + }, + expectedTestDuration: 15 * time.Second, + expectedError: nil, + }, + + "password_changes_test_app_should_restart": { + envTemplates: []*ctconfig.TemplateConfig{{ + Contents: pointerutil.StringPtr(`{{ with secret "kv/my-app/creds" }}{{ .Data.data.user }}{{ end }}`), + MapToEnvironmentVariable: pointerutil.StringPtr("MY_USER"), + }, { + Contents: pointerutil.StringPtr(`{{ with secret "kv/my-app/creds" }}{{ .Data.data.password }}{{ end }}`), + MapToEnvironmentVariable: pointerutil.StringPtr("MY_PASSWORD"), + }}, + staticSecretRenderInterval: 5 * time.Second, + testAppArgs: []string{"--stop-after", "15s", "--sleep-after-stop-signal", "0s"}, + testAppStopSignal: syscall.SIGTERM, + testAppPort: 34002, + expected: map[string]string{ + "MY_USER": "app-user", + "MY_PASSWORD": "s3cr3t-two", + }, + expectedTestDuration: 15 * time.Second, + expectedError: nil, + }, + + "test_app_exits_early": { + skip: true, + envTemplates: []*ctconfig.TemplateConfig{{ + Contents: pointerutil.StringPtr(`{{ with secret "kv/my-app/creds" }}{{ .Data.data.user }}{{ end }}`), + MapToEnvironmentVariable: pointerutil.StringPtr("MY_USER"), + }}, + testAppArgs: []string{"--stop-after", "1s"}, + testAppStopSignal: syscall.SIGTERM, + testAppPort: 34003, + expectedTestDuration: 15 * time.Second, + expectedError: &ProcessExitError{0}, + }, + + "test_app_exits_early_non_zero": { + skip: true, + envTemplates: []*ctconfig.TemplateConfig{{ + Contents: pointerutil.StringPtr(`{{ with secret "kv/my-app/creds" }}{{ .Data.data.user }}{{ end }}`), + MapToEnvironmentVariable: pointerutil.StringPtr("MY_USER"), + }}, + testAppArgs: []string{"--stop-after", "1s", "--exit-code", "5"}, + testAppStopSignal: syscall.SIGTERM, + testAppPort: 34004, + expectedTestDuration: 15 * time.Second, + expectedError: &ProcessExitError{5}, + }, + + "send_sigterm_expect_test_app_exit": { + skip: true, + envTemplates: []*ctconfig.TemplateConfig{{ + Contents: pointerutil.StringPtr(`{{ with secret "kv/my-app/creds" }}{{ .Data.data.user }}{{ end }}`), + MapToEnvironmentVariable: pointerutil.StringPtr("MY_USER"), + }}, + testAppArgs: []string{"--stop-after", "30s", "--sleep-after-stop-signal", "1s"}, + testAppStopSignal: syscall.SIGTERM, + testAppPort: 34005, + simulateShutdown: true, + simulateShutdownWaitDuration: 3 * time.Second, + expectedTestDuration: 15 * time.Second, + expectedError: nil, + }, + + "send_sigusr1_expect_test_app_exit": { + skip: true, + envTemplates: []*ctconfig.TemplateConfig{{ + Contents: pointerutil.StringPtr(`{{ with secret "kv/my-app/creds" }}{{ .Data.data.user }}{{ end }}`), + MapToEnvironmentVariable: pointerutil.StringPtr("MY_USER"), + }}, + testAppArgs: []string{"--stop-after", "30s", "--sleep-after-stop-signal", "1s", "--use-sigusr1"}, + testAppStopSignal: syscall.SIGUSR1, + testAppPort: 34006, + simulateShutdown: true, + simulateShutdownWaitDuration: 3 * time.Second, + expectedTestDuration: 15 * time.Second, + expectedError: nil, + }, + + "test_app_ignores_stop_signal": { + skip: true, + skipReason: "This test currently fails with 'go test -race' (see hashicorp/consul-template/issues/1753).", + envTemplates: []*ctconfig.TemplateConfig{{ + Contents: pointerutil.StringPtr(`{{ with secret "kv/my-app/creds" }}{{ .Data.data.user }}{{ end }}`), + MapToEnvironmentVariable: pointerutil.StringPtr("MY_USER"), + }}, + testAppArgs: []string{"--stop-after", "60s", "--sleep-after-stop-signal", "60s"}, + testAppStopSignal: syscall.SIGTERM, + testAppPort: 34007, + simulateShutdown: true, + simulateShutdownWaitDuration: 32 * time.Second, // the test app should be stopped immediately after 30s + expectedTestDuration: 45 * time.Second, + expectedError: nil, + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + if testCase.skip { + t.Skip(testCase.skipReason) + } + + t.Logf("test case %s: begin", name) + defer t.Logf("test case %s: end", name) + + fakeVault := fakeVaultServer(t) + defer fakeVault.Close() + + ctx, cancelContextFunc := context.WithTimeout(context.Background(), testCase.expectedTestDuration) + defer cancelContextFunc() + + testAppCommand := []string{ + testAppBinary, + "--port", + strconv.Itoa(testCase.testAppPort), + } + + execServer := NewServer(&ServerConfig{ + Logger: logging.NewVaultLogger(hclog.Trace), + AgentConfig: &config.Config{ + Vault: &config.Vault{ + Address: fakeVault.URL, + Retry: &config.Retry{ + NumRetries: 3, + }, + }, + Exec: &config.ExecConfig{ + RestartOnSecretChanges: "always", + Command: append(testAppCommand, testCase.testAppArgs...), + RestartStopSignal: testCase.testAppStopSignal, + }, + EnvTemplates: testCase.envTemplates, + TemplateConfig: &config.TemplateConfig{ + ExitOnRetryFailure: true, + StaticSecretRenderInt: testCase.staticSecretRenderInterval, + }, + }, + LogLevel: hclog.Trace, + LogWriter: hclog.DefaultOutput, + }) + + // start the exec server + var ( + execServerErrCh = make(chan error) + execServerTokenCh = make(chan string, 1) + ) + go func() { + execServerErrCh <- execServer.Run(ctx, execServerTokenCh) + }() + + // send a dummy token to kick off the server + execServerTokenCh <- "my-token" + + // ensure the test app is running after 3 seconds + var ( + testAppAddr = fmt.Sprintf("http://localhost:%d", testCase.testAppPort) + testAppStartedCh = make(chan error) + ) + if testCase.expectedError == nil { + time.AfterFunc(500*time.Millisecond, func() { + _, err := retryablehttp.Head(testAppAddr) + testAppStartedCh <- err + }) + } + + select { + case <-ctx.Done(): + t.Fatal("timeout reached before templates were rendered") + + case err := <-execServerErrCh: + if testCase.expectedError == nil && err != nil { + t.Fatalf("exec server did not expect an error, got: %v", err) + } + + if errors.Is(err, testCase.expectedError) { + t.Fatalf("exec server expected error %v; got %v", testCase.expectedError, err) + } + + t.Log("exec server exited without an error") + + return + + case err := <-testAppStartedCh: + if testCase.expectedError == nil && err != nil { + t.Fatalf("test app could not be started") + } + + t.Log("test app started successfully") + } + + // expect the test app to restart after staticSecretRenderInterval + debounce timer due to a password change + if testCase.staticSecretRenderInterval != 0 { + t.Logf("sleeping for %v to wait for application restart", testCase.staticSecretRenderInterval+5*time.Second) + time.Sleep(testCase.staticSecretRenderInterval + 5*time.Second) + } + + // simulate a shutdown of agent, which, in turn stops the test app + if testCase.simulateShutdown { + cancelContextFunc() + + time.Sleep(testCase.simulateShutdownWaitDuration) + + // check if the test app is still alive + if _, err := http.Head(testAppAddr); err == nil { + t.Fatalf("the test app is still alive %v after a simulated shutdown!", testCase.simulateShutdownWaitDuration) + } + + return + } + + // verify the environment variables + t.Logf("verifying test-app's environment variables") + + resp, err := retryablehttp.Get(testAppAddr) + if err != nil { + t.Fatalf("error making request to the test app: %s", err) + } + defer resp.Body.Close() + + decoder := json.NewDecoder(resp.Body) + var response struct { + EnvironmentVariables map[string]string `json:"environment_variables"` + ProcessID int `json:"process_id"` + } + if err := decoder.Decode(&response); err != nil { + t.Fatalf("unable to parse response from test app: %s", err) + } + + for key, expectedValue := range testCase.expected { + actualValue, ok := response.EnvironmentVariables[key] + if !ok { + t.Fatalf("expected the test app to return %q environment variable", key) + } + if expectedValue != actualValue { + t.Fatalf("expected environment variable %s to have a value of %q but it has a value of %q", key, expectedValue, actualValue) + } + } + }) + } +} diff --git a/command/agent/exec/test-app/main.go b/command/agent/exec/test-app/main.go new file mode 100644 index 0000000..311ac7e --- /dev/null +++ b/command/agent/exec/test-app/main.go @@ -0,0 +1,150 @@ +package main + +// This is a test application that is used by TestExecServer_Run to verify +// the behavior of vault agent running as a process supervisor. +// +// The app will automatically exit after 1 minute or the --stop-after interval, +// whichever comes first. It also can serve its loaded environment variables on +// the given --port. This app will also return the given --exit-code and +// terminate on SIGTERM unless --use-sigusr1 is specified. + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "flag" + "fmt" + "log" + "net/http" + "os" + "os/signal" + "strings" + "syscall" + "time" +) + +var ( + port uint + ignoreStopSignal bool + sleepAfterStopSignal time.Duration + useSigusr1StopSignal bool + stopAfter time.Duration + exitCode int +) + +func init() { + flag.UintVar(&port, "port", 34000, "port to run the test app on") + flag.DurationVar(&sleepAfterStopSignal, "sleep-after-stop-signal", 1*time.Second, "time to sleep after getting the signal before exiting") + flag.BoolVar(&useSigusr1StopSignal, "use-sigusr1", false, "use SIGUSR1 as the stop signal, instead of the default SIGTERM") + flag.DurationVar(&stopAfter, "stop-after", 0, "stop the process after duration (overrides all other flags if set)") + flag.IntVar(&exitCode, "exit-code", 0, "exit code to return when this script exits") +} + +type Response struct { + EnvironmentVariables map[string]string `json:"environment_variables"` + ProcessID int `json:"process_id"` +} + +func newResponse() Response { + respEnv := make(map[string]string, len(os.Environ())) + for _, envVar := range os.Environ() { + tokens := strings.Split(envVar, "=") + respEnv[tokens[0]] = tokens[1] + } + + return Response{ + EnvironmentVariables: respEnv, + ProcessID: os.Getpid(), + } +} + +func handler(w http.ResponseWriter, r *http.Request) { + var buf bytes.Buffer + encoder := json.NewEncoder(&buf) + if r.URL.Query().Get("pretty") == "1" { + encoder.SetIndent("", " ") + } + if err := encoder.Encode(newResponse()); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, _ = w.Write(buf.Bytes()) +} + +func main() { + logger := log.New(os.Stderr, "test-app: ", log.LstdFlags) + + if err := run(logger); err != nil { + log.Fatalf("error: %v\n", err) + } + + logger.Printf("exit code: %d\n", exitCode) + + os.Exit(exitCode) +} + +func run(logger *log.Logger) error { + /* */ logger.Println("run: started") + defer logger.Println("run: done") + + ctx, cancelContextFunc := context.WithTimeout(context.Background(), 60*time.Second) + defer cancelContextFunc() + + flag.Parse() + + server := http.Server{ + Addr: fmt.Sprintf(":%d", port), + Handler: http.HandlerFunc(handler), + ReadTimeout: 20 * time.Second, + WriteTimeout: 20 * time.Second, + IdleTimeout: 20 * time.Second, + } + + doneCh := make(chan struct{}) + + go func() { + defer close(doneCh) + + stopSignal := make(chan os.Signal, 1) + if useSigusr1StopSignal { + signal.Notify(stopSignal, syscall.SIGUSR1) + } else { + signal.Notify(stopSignal, syscall.SIGTERM) + } + + select { + case <-ctx.Done(): + logger.Println("context done: exiting") + + case s := <-stopSignal: + logger.Printf("signal %q: received\n", s) + + if sleepAfterStopSignal > 0 { + logger.Printf("signal %q: sleeping for %v simulate cleanup\n", s, sleepAfterStopSignal) + time.Sleep(sleepAfterStopSignal) + } + + case <-time.After(stopAfter): + logger.Printf("stopping after: %v\n", stopAfter) + } + + if err := server.Shutdown(context.Background()); err != nil { + log.Printf("server shutdown error: %v", err) + } + }() + + logger.Printf("server %s: started\n", server.Addr) + + if err := server.ListenAndServe(); !errors.Is(err, http.ErrServerClosed) { + return fmt.Errorf("could not start the server: %v", err) + } + + logger.Printf("server %s: done\n", server.Addr) + + <-doneCh + + return nil +} diff --git a/command/agent/internal/ctmanager/runner_config.go b/command/agent/internal/ctmanager/runner_config.go new file mode 100644 index 0000000..b5a58aa --- /dev/null +++ b/command/agent/internal/ctmanager/runner_config.go @@ -0,0 +1,148 @@ +package ctmanager + +import ( + "fmt" + "io" + "strings" + + ctconfig "github.com/hashicorp/consul-template/config" + ctlogging "github.com/hashicorp/consul-template/logging" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/command/agent/config" + "github.com/hashicorp/vault/sdk/helper/pointerutil" +) + +type ManagerConfig struct { + AgentConfig *config.Config + Namespace string + LogLevel hclog.Level + LogWriter io.Writer +} + +// NewConfig returns a consul-template runner configuration, setting the +// Vault and Consul configurations based on the clients configs. +func NewConfig(mc ManagerConfig, templates ctconfig.TemplateConfigs) (*ctconfig.Config, error) { + conf := ctconfig.DefaultConfig() + conf.Templates = templates.Copy() + + // Setup the Vault config + // Always set these to ensure nothing is picked up from the environment + conf.Vault.RenewToken = pointerutil.BoolPtr(false) + conf.Vault.Token = pointerutil.StringPtr("") + conf.Vault.Address = &mc.AgentConfig.Vault.Address + + if mc.Namespace != "" { + conf.Vault.Namespace = &mc.Namespace + } + + if mc.AgentConfig.TemplateConfig != nil && mc.AgentConfig.TemplateConfig.StaticSecretRenderInt != 0 { + conf.Vault.DefaultLeaseDuration = &mc.AgentConfig.TemplateConfig.StaticSecretRenderInt + } + + if mc.AgentConfig.DisableIdleConnsTemplating { + idleConns := -1 + conf.Vault.Transport.MaxIdleConns = &idleConns + } + + if mc.AgentConfig.DisableKeepAlivesTemplating { + conf.Vault.Transport.DisableKeepAlives = pointerutil.BoolPtr(true) + } + + conf.Vault.SSL = &ctconfig.SSLConfig{ + Enabled: pointerutil.BoolPtr(false), + Verify: pointerutil.BoolPtr(false), + Cert: pointerutil.StringPtr(""), + Key: pointerutil.StringPtr(""), + CaCert: pointerutil.StringPtr(""), + CaPath: pointerutil.StringPtr(""), + ServerName: pointerutil.StringPtr(""), + } + + // If Vault.Retry isn't specified, use the default of 12 retries. + // This retry value will be respected regardless of if we use the cache. + attempts := ctconfig.DefaultRetryAttempts + if mc.AgentConfig.Vault != nil && mc.AgentConfig.Vault.Retry != nil { + attempts = mc.AgentConfig.Vault.Retry.NumRetries + } + + // Use the cache if available or fallback to the Vault server values. + if mc.AgentConfig.Cache != nil { + if mc.AgentConfig.Cache.InProcDialer == nil { + return nil, fmt.Errorf("missing in-process dialer configuration") + } + if conf.Vault.Transport == nil { + conf.Vault.Transport = &ctconfig.TransportConfig{} + } + conf.Vault.Transport.CustomDialer = mc.AgentConfig.Cache.InProcDialer + // The in-process dialer ignores the address passed in, but we're still + // setting it here to override the setting at the top of this function, + // and to prevent the vault/http client from defaulting to https. + conf.Vault.Address = pointerutil.StringPtr("http://127.0.0.1:8200") + } else if strings.HasPrefix(mc.AgentConfig.Vault.Address, "https") || mc.AgentConfig.Vault.CACert != "" { + skipVerify := mc.AgentConfig.Vault.TLSSkipVerify + verify := !skipVerify + conf.Vault.SSL = &ctconfig.SSLConfig{ + Enabled: pointerutil.BoolPtr(true), + Verify: &verify, + Cert: &mc.AgentConfig.Vault.ClientCert, + Key: &mc.AgentConfig.Vault.ClientKey, + CaCert: &mc.AgentConfig.Vault.CACert, + CaPath: &mc.AgentConfig.Vault.CAPath, + ServerName: &mc.AgentConfig.Vault.TLSServerName, + } + } + enabled := attempts > 0 + conf.Vault.Retry = &ctconfig.RetryConfig{ + Attempts: &attempts, + Enabled: &enabled, + } + + // Sync Consul Template's retry with user set auto-auth initial backoff value. + // This is helpful if Auto Auth cannot get a new token and CT is trying to fetch + // secrets. + if mc.AgentConfig.AutoAuth != nil && mc.AgentConfig.AutoAuth.Method != nil { + if mc.AgentConfig.AutoAuth.Method.MinBackoff > 0 { + conf.Vault.Retry.Backoff = &mc.AgentConfig.AutoAuth.Method.MinBackoff + } + + if mc.AgentConfig.AutoAuth.Method.MaxBackoff > 0 { + conf.Vault.Retry.MaxBackoff = &mc.AgentConfig.AutoAuth.Method.MaxBackoff + } + } + + conf.Finalize() + + // setup log level from TemplateServer config + conf.LogLevel = logLevelToStringPtr(mc.LogLevel) + + if err := ctlogging.Setup(&ctlogging.Config{ + Level: *conf.LogLevel, + Writer: mc.LogWriter, + }); err != nil { + return nil, err + } + return conf, nil +} + +// logLevelToString converts a go-hclog level to a matching, uppercase string +// value. It's used to convert Vault Agent's hclog level to a string version +// suitable for use in Consul Template's runner configuration input. +func logLevelToStringPtr(level hclog.Level) *string { + // consul template's default level is WARN, but Vault Agent's default is INFO, + // so we use that for the Runner's default. + var levelStr string + + switch level { + case hclog.Trace: + levelStr = "TRACE" + case hclog.Debug: + levelStr = "DEBUG" + case hclog.Warn: + levelStr = "WARN" + case hclog.Error: + levelStr = "ERR" + default: + levelStr = "INFO" + } + return pointerutil.StringPtr(levelStr) +} diff --git a/command/agent/jwt_end_to_end_test.go b/command/agent/jwt_end_to_end_test.go new file mode 100644 index 0000000..4739a65 --- /dev/null +++ b/command/agent/jwt_end_to_end_test.go @@ -0,0 +1,434 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package agent + +import ( + "context" + "encoding/json" + "fmt" + "os" + "testing" + "time" + + hclog "github.com/hashicorp/go-hclog" + vaultjwt "github.com/hashicorp/vault-plugin-auth-jwt" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + agentjwt "github.com/hashicorp/vault/command/agentproxyshared/auth/jwt" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink/file" + "github.com/hashicorp/vault/helper/dhutil" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +func TestJWTEndToEnd(t *testing.T) { + t.Parallel() + testCases := []struct { + ahWrapping bool + useSymlink bool + removeJWTAfterReading bool + }{ + {false, false, false}, + {true, false, false}, + {false, true, false}, + {true, true, false}, + {false, false, true}, + {true, false, true}, + {false, true, true}, + {true, true, true}, + } + + for _, tc := range testCases { + tc := tc // capture range variable + t.Run(fmt.Sprintf("ahWrapping=%v, useSymlink=%v, removeJWTAfterReading=%v", tc.ahWrapping, tc.useSymlink, tc.removeJWTAfterReading), func(t *testing.T) { + t.Parallel() + testJWTEndToEnd(t, tc.ahWrapping, tc.useSymlink, tc.removeJWTAfterReading) + }) + } +} + +func testJWTEndToEnd(t *testing.T, ahWrapping, useSymlink, removeJWTAfterReading bool) { + logger := logging.NewVaultLogger(hclog.Trace) + coreConfig := &vault.CoreConfig{ + Logger: logger, + CredentialBackends: map[string]logical.Factory{ + "jwt": vaultjwt.Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) + client := cluster.Cores[0].Client + + // Setup Vault + err := client.Sys().EnableAuthWithOptions("jwt", &api.EnableAuthOptions{ + Type: "jwt", + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().Write("auth/jwt/config", map[string]interface{}{ + "bound_issuer": "https://team-vault.auth0.com/", + "jwt_validation_pubkeys": TestECDSAPubKey, + "jwt_supported_algs": "ES256", + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().Write("auth/jwt/role/test", map[string]interface{}{ + "role_type": "jwt", + "bound_subject": "r3qXcK2bix9eFECzsU3Sbmh0K16fatW6@clients", + "bound_audiences": "https://vault.plugin.auth.jwt.test", + "user_claim": "https://vault/user", + "groups_claim": "https://vault/groups", + "policies": "test", + "period": "3s", + }) + if err != nil { + t.Fatal(err) + } + + // Generate encryption params + pub, pri, err := dhutil.GeneratePublicPrivateKey() + if err != nil { + t.Fatal(err) + } + + // We close these right away because we're just basically testing + // permissions and finding a usable file name + inf, err := os.CreateTemp("", "auth.jwt.test.") + if err != nil { + t.Fatal(err) + } + in := inf.Name() + inf.Close() + os.Remove(in) + symlink, err := os.CreateTemp("", "auth.jwt.symlink.test.") + if err != nil { + t.Fatal(err) + } + symlinkName := symlink.Name() + symlink.Close() + os.Remove(symlinkName) + os.Symlink(in, symlinkName) + t.Logf("input: %s", in) + + ouf, err := os.CreateTemp("", "auth.tokensink.test.") + if err != nil { + t.Fatal(err) + } + out := ouf.Name() + ouf.Close() + os.Remove(out) + t.Logf("output: %s", out) + + dhpathf, err := os.CreateTemp("", "auth.dhpath.test.") + if err != nil { + t.Fatal(err) + } + dhpath := dhpathf.Name() + dhpathf.Close() + os.Remove(dhpath) + + // Write DH public key to file + mPubKey, err := jsonutil.EncodeJSON(&dhutil.PublicKeyInfo{ + Curve25519PublicKey: pub, + }) + if err != nil { + t.Fatal(err) + } + if err := os.WriteFile(dhpath, mPubKey, 0o600); err != nil { + t.Fatal(err) + } else { + logger.Trace("wrote dh param file", "path", dhpath) + } + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + + var fileNameToUseAsPath string + if useSymlink { + fileNameToUseAsPath = symlinkName + } else { + fileNameToUseAsPath = in + } + am, err := agentjwt.NewJWTAuthMethod(&auth.AuthConfig{ + Logger: logger.Named("auth.jwt"), + MountPath: "auth/jwt", + Config: map[string]interface{}{ + "path": fileNameToUseAsPath, + "role": "test", + "remove_jwt_after_reading": removeJWTAfterReading, + "remove_jwt_follows_symlinks": true, + "jwt_read_period": "0.5s", + }, + }) + if err != nil { + t.Fatal(err) + } + + ahConfig := &auth.AuthHandlerConfig{ + Logger: logger.Named("auth.handler"), + Client: client, + EnableReauthOnNewCredentials: true, + } + if ahWrapping { + ahConfig.WrapTTL = 10 * time.Second + } + ah := auth.NewAuthHandler(ahConfig) + errCh := make(chan error) + go func() { + errCh <- ah.Run(ctx, am) + }() + defer func() { + select { + case <-ctx.Done(): + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + } + }() + + config := &sink.SinkConfig{ + Logger: logger.Named("sink.file"), + AAD: "foobar", + DHType: "curve25519", + DHPath: dhpath, + DeriveKey: true, + Config: map[string]interface{}{ + "path": out, + }, + } + if !ahWrapping { + config.WrapTTL = 10 * time.Second + } + fs, err := file.NewFileSink(config) + if err != nil { + t.Fatal(err) + } + config.Sink = fs + + ss := sink.NewSinkServer(&sink.SinkServerConfig{ + Logger: logger.Named("sink.server"), + Client: client, + }) + go func() { + errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config}) + }() + defer func() { + select { + case <-ctx.Done(): + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + } + }() + + // This has to be after the other defers so it happens first. It allows + // successful test runs to immediately cancel all of the runner goroutines + // and unblock any of the blocking defer calls by the runner's DoneCh that + // comes before this and avoid successful tests from taking the entire + // timeout duration. + defer cancel() + + // Check that no jwt file exists + _, err = os.Lstat(in) + if err == nil { + t.Fatal("expected err") + } + if !os.IsNotExist(err) { + t.Fatal("expected notexist err") + } + _, err = os.Lstat(out) + if err == nil { + t.Fatal("expected err") + } + if !os.IsNotExist(err) { + t.Fatal("expected notexist err") + } + + cloned, err := client.Clone() + if err != nil { + t.Fatal(err) + } + + // Get a token + jwtToken, _ := GetTestJWT(t) + + if err := os.WriteFile(in, []byte(jwtToken), 0o600); err != nil { + t.Fatal(err) + } else { + logger.Trace("wrote test jwt", "path", in) + } + + checkToken := func() string { + timeout := time.Now().Add(5 * time.Second) + for { + if time.Now().After(timeout) { + t.Fatal("did not find a written token after timeout") + } + val, err := os.ReadFile(out) + if err == nil { + os.Remove(out) + if len(val) == 0 { + t.Fatal("written token was empty") + } + + // First, ensure JWT has been removed + if removeJWTAfterReading { + _, err = os.Stat(in) + if err == nil { + t.Fatal("no error returned from stat, indicating the jwt is still present") + } + if !os.IsNotExist(err) { + t.Fatalf("unexpected error: %v", err) + } + } else { + _, err := os.Stat(in) + if err != nil { + t.Fatal("JWT file removed despite removeJWTAfterReading being set to false") + } + } + + // First decrypt it + resp := new(dhutil.Envelope) + if err := jsonutil.DecodeJSON(val, resp); err != nil { + continue + } + + shared, err := dhutil.GenerateSharedSecret(pri, resp.Curve25519PublicKey) + if err != nil { + t.Fatal(err) + } + aesKey, err := dhutil.DeriveSharedKey(shared, pub, resp.Curve25519PublicKey) + if err != nil { + t.Fatal(err) + } + if len(aesKey) == 0 { + t.Fatal("got empty aes key") + } + + val, err = dhutil.DecryptAES(aesKey, resp.EncryptedPayload, resp.Nonce, []byte("foobar")) + if err != nil { + t.Fatalf("error: %v\nresp: %v", err, string(val)) + } + + // Now unwrap it + wrapInfo := new(api.SecretWrapInfo) + if err := jsonutil.DecodeJSON(val, wrapInfo); err != nil { + t.Fatal(err) + } + switch { + case wrapInfo.TTL != 10: + t.Fatalf("bad wrap info: %v", wrapInfo.TTL) + case !ahWrapping && wrapInfo.CreationPath != "sys/wrapping/wrap": + t.Fatalf("bad wrap path: %v", wrapInfo.CreationPath) + case ahWrapping && wrapInfo.CreationPath != "auth/jwt/login": + t.Fatalf("bad wrap path: %v", wrapInfo.CreationPath) + case wrapInfo.Token == "": + t.Fatal("wrap token is empty") + } + cloned.SetToken(wrapInfo.Token) + secret, err := cloned.Logical().Unwrap("") + if err != nil { + t.Fatal(err) + } + if ahWrapping { + switch { + case secret.Auth == nil: + t.Fatal("unwrap secret auth is nil") + case secret.Auth.ClientToken == "": + t.Fatal("unwrap token is nil") + } + return secret.Auth.ClientToken + } else { + switch { + case secret.Data == nil: + t.Fatal("unwrap secret data is nil") + case secret.Data["token"] == nil: + t.Fatal("unwrap token is nil") + } + return secret.Data["token"].(string) + } + } + time.Sleep(250 * time.Millisecond) + } + } + origToken := checkToken() + + // We only check this if the renewer is actually renewing for us + if !ahWrapping { + // Period of 3 seconds, so should still be alive after 7 + timeout := time.Now().Add(7 * time.Second) + cloned.SetToken(origToken) + for { + if time.Now().After(timeout) { + break + } + secret, err := cloned.Auth().Token().LookupSelf() + if err != nil { + t.Fatal(err) + } + ttl, err := secret.Data["ttl"].(json.Number).Int64() + if err != nil { + t.Fatal(err) + } + if ttl > 3 { + t.Fatalf("unexpected ttl: %v", secret.Data["ttl"]) + } + } + } + + // Get another token to test the backend pushing the need to authenticate + // to the handler + jwtToken, _ = GetTestJWT(t) + if err := os.WriteFile(in, []byte(jwtToken), 0o600); err != nil { + t.Fatal(err) + } + + newToken := checkToken() + if newToken == origToken { + t.Fatal("found same token written") + } + + if !ahWrapping { + // Repeat the period test. At the end the old token should have expired and + // the new token should still be alive after 7 + timeout := time.Now().Add(7 * time.Second) + cloned.SetToken(newToken) + for { + if time.Now().After(timeout) { + break + } + secret, err := cloned.Auth().Token().LookupSelf() + if err != nil { + t.Fatal(err) + } + ttl, err := secret.Data["ttl"].(json.Number).Int64() + if err != nil { + t.Fatal(err) + } + if ttl > 3 { + t.Fatalf("unexpected ttl: %v", secret.Data["ttl"]) + } + } + + cloned.SetToken(origToken) + _, err = cloned.Auth().Token().LookupSelf() + if err == nil { + t.Fatal("expected error") + } + } +} diff --git a/command/agent/oci_end_to_end_test.go b/command/agent/oci_end_to_end_test.go new file mode 100644 index 0000000..2349f09 --- /dev/null +++ b/command/agent/oci_end_to_end_test.go @@ -0,0 +1,231 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package agent + +import ( + "context" + "io/ioutil" + "os" + "testing" + "time" + + hclog "github.com/hashicorp/go-hclog" + vaultoci "github.com/hashicorp/vault-plugin-auth-oci" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + agentoci "github.com/hashicorp/vault/command/agentproxyshared/auth/oci" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink/file" + "github.com/hashicorp/vault/helper/testhelpers" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +const ( + envVarOCITestTenancyOCID = "OCI_TEST_TENANCY_OCID" + envVarOCITestUserOCID = "OCI_TEST_USER_OCID" + envVarOCITestFingerprint = "OCI_TEST_FINGERPRINT" + envVarOCITestPrivateKeyPath = "OCI_TEST_PRIVATE_KEY_PATH" + envVAROCITestOCIDList = "OCI_TEST_OCID_LIST" + + // The OCI SDK doesn't export its standard env vars so they're captured here. + // These are used for the duration of the test to make sure the agent is able to + // pick up creds from the env. + // + // To run this test, do not set these. Only the above ones need to be set. + envVarOCITenancyOCID = "OCI_tenancy_ocid" + envVarOCIUserOCID = "OCI_user_ocid" + envVarOCIFingerprint = "OCI_fingerprint" + envVarOCIPrivateKeyPath = "OCI_private_key_path" +) + +func TestOCIEndToEnd(t *testing.T) { + if !runAcceptanceTests { + t.SkipNow() + } + + // Ensure each cred is populated. + credNames := []string{ + envVarOCITestTenancyOCID, + envVarOCITestUserOCID, + envVarOCITestFingerprint, + envVarOCITestPrivateKeyPath, + envVAROCITestOCIDList, + } + testhelpers.SkipUnlessEnvVarsSet(t, credNames) + + logger := logging.NewVaultLogger(hclog.Trace) + coreConfig := &vault.CoreConfig{ + Logger: logger, + CredentialBackends: map[string]logical.Factory{ + "oci": vaultoci.Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) + client := cluster.Cores[0].Client + + // Setup Vault + if err := client.Sys().EnableAuthWithOptions("oci", &api.EnableAuthOptions{ + Type: "oci", + }); err != nil { + t.Fatal(err) + } + + if _, err := client.Logical().Write("auth/oci/config", map[string]interface{}{ + "home_tenancy_id": os.Getenv(envVarOCITestTenancyOCID), + }); err != nil { + t.Fatal(err) + } + + if _, err := client.Logical().Write("auth/oci/role/test", map[string]interface{}{ + "ocid_list": os.Getenv(envVAROCITestOCIDList), + }); err != nil { + t.Fatal(err) + } + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + + // We're going to feed oci auth creds via env variables. + if err := setOCIEnvCreds(); err != nil { + t.Fatal(err) + } + defer func() { + if err := unsetOCIEnvCreds(); err != nil { + t.Fatal(err) + } + }() + + vaultAddr := "http://" + cluster.Cores[0].Listeners[0].Addr().String() + + am, err := agentoci.NewOCIAuthMethod(&auth.AuthConfig{ + Logger: logger.Named("auth.oci"), + MountPath: "auth/oci", + Config: map[string]interface{}{ + "type": "apikey", + "role": "test", + }, + }, vaultAddr) + if err != nil { + t.Fatal(err) + } + + ahConfig := &auth.AuthHandlerConfig{ + Logger: logger.Named("auth.handler"), + Client: client, + } + + ah := auth.NewAuthHandler(ahConfig) + errCh := make(chan error) + go func() { + errCh <- ah.Run(ctx, am) + }() + defer func() { + select { + case <-ctx.Done(): + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + } + }() + + tmpFile, err := ioutil.TempFile("", "auth.tokensink.test.") + if err != nil { + t.Fatal(err) + } + tokenSinkFileName := tmpFile.Name() + tmpFile.Close() + os.Remove(tokenSinkFileName) + t.Logf("output: %s", tokenSinkFileName) + + config := &sink.SinkConfig{ + Logger: logger.Named("sink.file"), + Config: map[string]interface{}{ + "path": tokenSinkFileName, + }, + WrapTTL: 10 * time.Second, + } + + fs, err := file.NewFileSink(config) + if err != nil { + t.Fatal(err) + } + config.Sink = fs + + ss := sink.NewSinkServer(&sink.SinkServerConfig{ + Logger: logger.Named("sink.server"), + Client: client, + }) + go func() { + errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config}) + }() + defer func() { + select { + case <-ctx.Done(): + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + } + }() + + // This has to be after the other defers so it happens first. It allows + // successful test runs to immediately cancel all of the runner goroutines + // and unblock any of the blocking defer calls by the runner's DoneCh that + // comes before this and avoid successful tests from taking the entire + // timeout duration. + defer cancel() + + if stat, err := os.Lstat(tokenSinkFileName); err == nil { + t.Fatalf("expected err but got %s", stat) + } else if !os.IsNotExist(err) { + t.Fatal("expected notexist err") + } + + // Wait 2 seconds for the env variables to be detected and an auth to be generated. + time.Sleep(time.Second * 2) + + token, err := readToken(tokenSinkFileName) + if err != nil { + t.Fatal(err) + } + + if token.Token == "" { + t.Fatal("expected token but didn't receive it") + } +} + +func setOCIEnvCreds() error { + if err := os.Setenv(envVarOCITenancyOCID, os.Getenv(envVarOCITestTenancyOCID)); err != nil { + return err + } + if err := os.Setenv(envVarOCIUserOCID, os.Getenv(envVarOCITestUserOCID)); err != nil { + return err + } + if err := os.Setenv(envVarOCIFingerprint, os.Getenv(envVarOCITestFingerprint)); err != nil { + return err + } + return os.Setenv(envVarOCIPrivateKeyPath, os.Getenv(envVarOCITestPrivateKeyPath)) +} + +func unsetOCIEnvCreds() error { + if err := os.Unsetenv(envVarOCITenancyOCID); err != nil { + return err + } + if err := os.Unsetenv(envVarOCIUserOCID); err != nil { + return err + } + if err := os.Unsetenv(envVarOCIFingerprint); err != nil { + return err + } + return os.Unsetenv(envVarOCIPrivateKeyPath) +} diff --git a/command/agent/template/template.go b/command/agent/template/template.go new file mode 100644 index 0000000..be3ccc4 --- /dev/null +++ b/command/agent/template/template.go @@ -0,0 +1,237 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package template is responsible for rendering user supplied templates to +// disk. The Server type accepts configuration to communicate to a Vault server +// and a Vault token for authentication. Internally, the Server creates a Consul +// Template Runner which manages reading secrets from Vault and rendering +// templates to disk at configured locations +package template + +import ( + "context" + "errors" + "fmt" + "io" + + "go.uber.org/atomic" + + ctconfig "github.com/hashicorp/consul-template/config" + "github.com/hashicorp/consul-template/manager" + "github.com/hashicorp/go-hclog" + + "github.com/hashicorp/vault/command/agent/config" + "github.com/hashicorp/vault/command/agent/internal/ctmanager" + "github.com/hashicorp/vault/helper/useragent" + "github.com/hashicorp/vault/sdk/helper/pointerutil" +) + +// ServerConfig is a config struct for setting up the basic parts of the +// Server +type ServerConfig struct { + Logger hclog.Logger + // Client *api.Client + AgentConfig *config.Config + + ExitAfterAuth bool + Namespace string + + // LogLevel is needed to set the internal Consul Template Runner's log level + // to match the log level of Vault Agent. The internal Runner creates it's own + // logger and can't be set externally or copied from the Template Server. + // + // LogWriter is needed to initialize Consul Template's internal logger to use + // the same io.Writer that Vault Agent itself is using. + LogLevel hclog.Level + LogWriter io.Writer +} + +// Server manages the Consul Template Runner which renders templates +type Server struct { + // config holds the ServerConfig used to create it. It's passed along in other + // methods + config *ServerConfig + + // runner is the consul-template runner + runner *manager.Runner + runnerStarted *atomic.Bool + + // Templates holds the parsed Consul Templates + Templates []*ctconfig.TemplateConfig + + // lookupMap is a list of templates indexed by their consul-template ID. This + // is used to ensure all Vault templates have been rendered before returning + // from the runner in the event we're using exit after auth. + lookupMap map[string][]*ctconfig.TemplateConfig + + DoneCh chan struct{} + stopped *atomic.Bool + + logger hclog.Logger + exitAfterAuth bool +} + +// NewServer returns a new configured server +func NewServer(conf *ServerConfig) *Server { + ts := Server{ + DoneCh: make(chan struct{}), + stopped: atomic.NewBool(false), + runnerStarted: atomic.NewBool(false), + + logger: conf.Logger, + config: conf, + exitAfterAuth: conf.ExitAfterAuth, + } + return &ts +} + +// Run kicks off the internal Consul Template runner, and listens for changes to +// the token from the AuthHandler. If Done() is called on the context, shut down +// the Runner and return +func (ts *Server) Run(ctx context.Context, incoming chan string, templates []*ctconfig.TemplateConfig) error { + if incoming == nil { + return errors.New("template server: incoming channel is nil") + } + + latestToken := new(string) + ts.logger.Info("starting template server") + + defer func() { + ts.logger.Info("template server stopped") + }() + + // If there are no templates, we wait for context cancellation and then return + if len(templates) == 0 { + ts.logger.Info("no templates found") + <-ctx.Done() + return nil + } + + // construct a consul template vault config based the agents vault + // configuration + var runnerConfig *ctconfig.Config + var runnerConfigErr error + managerConfig := ctmanager.ManagerConfig{ + AgentConfig: ts.config.AgentConfig, + Namespace: ts.config.Namespace, + LogLevel: ts.config.LogLevel, + LogWriter: ts.config.LogWriter, + } + runnerConfig, runnerConfigErr = ctmanager.NewConfig(managerConfig, templates) + if runnerConfigErr != nil { + return fmt.Errorf("template server failed to runner generate config: %w", runnerConfigErr) + } + + var err error + ts.runner, err = manager.NewRunner(runnerConfig, false) + if err != nil { + return fmt.Errorf("template server failed to create: %w", err) + } + + // Build the lookup map using the id mapping from the Template runner. This is + // used to check the template rendering against the expected templates. This + // returns a map with a generated ID and a slice of templates for that id. The + // slice is determined by the source or contents of the template, so if a + // configuration has multiple templates specified, but are the same source / + // contents, they will be identified by the same key. + idMap := ts.runner.TemplateConfigMapping() + lookupMap := make(map[string][]*ctconfig.TemplateConfig, len(idMap)) + for id, ctmpls := range idMap { + for _, ctmpl := range ctmpls { + tl := lookupMap[id] + tl = append(tl, ctmpl) + lookupMap[id] = tl + } + } + ts.lookupMap = lookupMap + + for { + select { + case <-ctx.Done(): + ts.runner.Stop() + return nil + + case token := <-incoming: + if token != *latestToken { + ts.logger.Info("template server received new token") + + // If the runner was previously started and we intend to exit + // after auth, do not restart the runner if a new token is + // received. + if ts.exitAfterAuth && ts.runnerStarted.Load() { + ts.logger.Info("template server not restarting with new token with exit_after_auth set to true") + continue + } + + ts.runner.Stop() + *latestToken = token + ctv := ctconfig.Config{ + Vault: &ctconfig.VaultConfig{ + Token: latestToken, + ClientUserAgent: pointerutil.StringPtr(useragent.AgentTemplatingString()), + }, + } + + runnerConfig = runnerConfig.Merge(&ctv) + var runnerErr error + ts.runner, runnerErr = manager.NewRunner(runnerConfig, false) + if runnerErr != nil { + ts.logger.Error("template server failed with new Vault token", "error", runnerErr) + continue + } + ts.runnerStarted.CAS(false, true) + go ts.runner.Start() + } + + case err := <-ts.runner.ErrCh: + ts.logger.Error("template server error", "error", err.Error()) + ts.runner.StopImmediately() + + // Return after stopping the runner if exit on retry failure was + // specified + if ts.config.AgentConfig.TemplateConfig != nil && ts.config.AgentConfig.TemplateConfig.ExitOnRetryFailure { + return fmt.Errorf("template server: %w", err) + } + + ts.runner, err = manager.NewRunner(runnerConfig, false) + if err != nil { + return fmt.Errorf("template server failed to create: %w", err) + } + go ts.runner.Start() + + case <-ts.runner.TemplateRenderedCh(): + // A template has been rendered, figure out what to do + events := ts.runner.RenderEvents() + + // events are keyed by template ID, and can be matched up to the id's from + // the lookupMap + if len(events) < len(ts.lookupMap) { + // Not all templates have been rendered yet + continue + } + + // assume the renders are finished, until we find otherwise + doneRendering := true + for _, event := range events { + // This template hasn't been rendered + if event.LastWouldRender.IsZero() { + doneRendering = false + } + } + + if doneRendering && ts.exitAfterAuth { + // if we want to exit after auth, go ahead and shut down the runner and + // return. The deferred closing of the DoneCh will allow agent to + // continue with closing down + ts.runner.Stop() + return nil + } + } + } +} + +func (ts *Server) Stop() { + if ts.stopped.CAS(false, true) { + close(ts.DoneCh) + } +} diff --git a/command/agent/template/template_test.go b/command/agent/template/template_test.go new file mode 100644 index 0000000..61822bc --- /dev/null +++ b/command/agent/template/template_test.go @@ -0,0 +1,584 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package template + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "os" + "strings" + "testing" + "time" + + ctconfig "github.com/hashicorp/consul-template/config" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/command/agent/config" + "github.com/hashicorp/vault/command/agent/internal/ctmanager" + "github.com/hashicorp/vault/command/agentproxyshared" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/internalshared/listenerutil" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/helper/pointerutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/test/bufconn" +) + +func newRunnerConfig(s *ServerConfig, configs ctconfig.TemplateConfigs) (*ctconfig.Config, error) { + managerCfg := ctmanager.ManagerConfig{ + AgentConfig: s.AgentConfig, + } + cfg, err := ctmanager.NewConfig(managerCfg, configs) + return cfg, err +} + +// TestNewServer is a simple test to make sure NewServer returns a Server and +// channel +func TestNewServer(t *testing.T) { + server := NewServer(&ServerConfig{}) + if server == nil { + t.Fatal("nil server returned") + } +} + +func newAgentConfig(listeners []*configutil.Listener, enableCache, enablePersisentCache bool) *config.Config { + agentConfig := &config.Config{ + SharedConfig: &configutil.SharedConfig{ + PidFile: "./pidfile", + Listeners: listeners, + }, + AutoAuth: &config.AutoAuth{ + Method: &config.Method{ + Type: "aws", + MountPath: "auth/aws", + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + Sinks: []*config.Sink{ + { + Type: "file", + DHType: "curve25519", + DHPath: "/tmp/file-foo-dhpath", + AAD: "foobar", + Config: map[string]interface{}{ + "path": "/tmp/file-foo", + }, + }, + }, + }, + Vault: &config.Vault{ + Address: "http://127.0.0.1:1111", + CACert: "config_ca_cert", + CAPath: "config_ca_path", + TLSSkipVerifyRaw: interface{}("true"), + TLSSkipVerify: true, + ClientCert: "config_client_cert", + ClientKey: "config_client_key", + }, + } + if enableCache { + agentConfig.Cache = &config.Cache{ + UseAutoAuthToken: true, + } + } + + if enablePersisentCache { + agentConfig.Cache.Persist = &agentproxyshared.PersistConfig{Type: "kubernetes"} + } + + return agentConfig +} + +func TestCacheConfig(t *testing.T) { + listeners := []*configutil.Listener{ + { + Type: "tcp", + Address: "127.0.0.1:8300", + TLSDisable: true, + }, + { + Type: "unix", + Address: "foobar", + TLSDisable: true, + SocketMode: "configmode", + SocketUser: "configuser", + SocketGroup: "configgroup", + }, + { + Type: "tcp", + Address: "127.0.0.1:8400", + TLSKeyFile: "/path/to/cakey.pem", + TLSCertFile: "/path/to/cacert.pem", + }, + } + + cases := map[string]struct { + cacheEnabled bool + persistentCacheEnabled bool + setDialer bool + expectedErr string + expectCustomDialer bool + }{ + "persistent_cache": { + cacheEnabled: true, + persistentCacheEnabled: true, + setDialer: true, + expectedErr: "", + expectCustomDialer: true, + }, + "memory_cache": { + cacheEnabled: true, + persistentCacheEnabled: false, + setDialer: true, + expectedErr: "", + expectCustomDialer: true, + }, + "no_cache": { + cacheEnabled: false, + persistentCacheEnabled: false, + setDialer: false, + expectedErr: "", + expectCustomDialer: false, + }, + "cache_no_dialer": { + cacheEnabled: true, + persistentCacheEnabled: false, + setDialer: false, + expectedErr: "missing in-process dialer configuration", + expectCustomDialer: false, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + agentConfig := newAgentConfig(listeners, tc.cacheEnabled, tc.persistentCacheEnabled) + if tc.setDialer && tc.cacheEnabled { + bListener := bufconn.Listen(1024 * 1024) + defer bListener.Close() + agentConfig.Cache.InProcDialer = listenerutil.NewBufConnWrapper(bListener) + } + serverConfig := ServerConfig{AgentConfig: agentConfig} + + ctConfig, err := newRunnerConfig(&serverConfig, ctconfig.TemplateConfigs{}) + if len(tc.expectedErr) > 0 { + require.Error(t, err, tc.expectedErr) + return + } + + require.NoError(t, err) + require.NotNil(t, ctConfig) + assert.Equal(t, tc.expectCustomDialer, ctConfig.Vault.Transport.CustomDialer != nil) + + if tc.expectCustomDialer { + assert.Equal(t, "http://127.0.0.1:8200", *ctConfig.Vault.Address) + } else { + assert.Equal(t, "http://127.0.0.1:1111", *ctConfig.Vault.Address) + } + }) + } +} + +func TestCacheConfigNoListener(t *testing.T) { + listeners := []*configutil.Listener{} + + agentConfig := newAgentConfig(listeners, true, true) + bListener := bufconn.Listen(1024 * 1024) + defer bListener.Close() + agentConfig.Cache.InProcDialer = listenerutil.NewBufConnWrapper(bListener) + serverConfig := ServerConfig{AgentConfig: agentConfig} + + ctConfig, err := newRunnerConfig(&serverConfig, ctconfig.TemplateConfigs{}) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + assert.Equal(t, "http://127.0.0.1:8200", *ctConfig.Vault.Address) + assert.NotNil(t, ctConfig.Vault.Transport.CustomDialer) +} + +func createHttpTestServer() *httptest.Server { + // create http test server + mux := http.NewServeMux() + mux.HandleFunc("/v1/kv/myapp/config", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, jsonResponse) + }) + mux.HandleFunc("/v1/kv/myapp/config-bad", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(404) + fmt.Fprintln(w, `{"errors":[]}`) + }) + mux.HandleFunc("/v1/kv/myapp/perm-denied", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(403) + fmt.Fprintln(w, `{"errors":["1 error occurred:\n\t* permission denied\n\n"]}`) + }) + + return httptest.NewServer(mux) +} + +func TestServerRun(t *testing.T) { + ts := createHttpTestServer() + defer ts.Close() + + tmpDir, err := os.MkdirTemp("", "agent-tests") + defer os.RemoveAll(tmpDir) + if err != nil { + t.Fatal(err) + } + + // secretRender is a simple struct that represents the secret we render to + // disk. It's used to unmarshal the file contents and test against + type secretRender struct { + Username string `json:"username"` + Password string `json:"password"` + Version string `json:"version"` + } + + type templateTest struct { + template *ctconfig.TemplateConfig + } + + testCases := map[string]struct { + templateMap map[string]*templateTest + expectedValues *secretRender + expectError bool + exitOnRetryFailure bool + }{ + "simple": { + templateMap: map[string]*templateTest{ + "render_01": { + template: &ctconfig.TemplateConfig{ + Contents: pointerutil.StringPtr(templateContents), + }, + }, + }, + expectError: false, + exitOnRetryFailure: false, + }, + "multiple": { + templateMap: map[string]*templateTest{ + "render_01": { + template: &ctconfig.TemplateConfig{ + Contents: pointerutil.StringPtr(templateContents), + }, + }, + "render_02": { + template: &ctconfig.TemplateConfig{ + Contents: pointerutil.StringPtr(templateContents), + }, + }, + "render_03": { + template: &ctconfig.TemplateConfig{ + Contents: pointerutil.StringPtr(templateContents), + }, + }, + "render_04": { + template: &ctconfig.TemplateConfig{ + Contents: pointerutil.StringPtr(templateContents), + }, + }, + "render_05": { + template: &ctconfig.TemplateConfig{ + Contents: pointerutil.StringPtr(templateContents), + }, + }, + "render_06": { + template: &ctconfig.TemplateConfig{ + Contents: pointerutil.StringPtr(templateContents), + }, + }, + "render_07": { + template: &ctconfig.TemplateConfig{ + Contents: pointerutil.StringPtr(templateContents), + }, + }, + }, + expectError: false, + exitOnRetryFailure: false, + }, + "bad secret": { + templateMap: map[string]*templateTest{ + "render_01": { + template: &ctconfig.TemplateConfig{ + Contents: pointerutil.StringPtr(templateContentsBad), + }, + }, + }, + expectError: true, + exitOnRetryFailure: true, + }, + "missing key": { + templateMap: map[string]*templateTest{ + "render_01": { + template: &ctconfig.TemplateConfig{ + Contents: pointerutil.StringPtr(templateContentsMissingKey), + ErrMissingKey: pointerutil.BoolPtr(true), + }, + }, + }, + expectError: true, + exitOnRetryFailure: true, + }, + "permission denied": { + templateMap: map[string]*templateTest{ + "render_01": { + template: &ctconfig.TemplateConfig{ + Contents: pointerutil.StringPtr(templateContentsPermDenied), + }, + }, + }, + expectError: true, + exitOnRetryFailure: true, + }, + "with sprig functions": { + templateMap: map[string]*templateTest{ + "render_01": { + template: &ctconfig.TemplateConfig{ + Contents: pointerutil.StringPtr(templateContentsWithSprigFunctions), + }, + }, + }, + expectedValues: &secretRender{ + Username: "APPUSER", + Password: "passphrase", + Version: "3", + }, + expectError: false, + exitOnRetryFailure: true, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + templateTokenCh := make(chan string, 1) + var templatesToRender []*ctconfig.TemplateConfig + for fileName, templateTest := range tc.templateMap { + dstFile := fmt.Sprintf("%s/%s", tmpDir, fileName) + templateTest.template.Destination = pointerutil.StringPtr(dstFile) + templatesToRender = append(templatesToRender, templateTest.template) + } + + ctx, _ := context.WithTimeout(context.Background(), 20*time.Second) + sc := ServerConfig{ + Logger: logging.NewVaultLogger(hclog.Trace), + AgentConfig: &config.Config{ + Vault: &config.Vault{ + Address: ts.URL, + Retry: &config.Retry{ + NumRetries: 3, + }, + }, + TemplateConfig: &config.TemplateConfig{ + ExitOnRetryFailure: tc.exitOnRetryFailure, + }, + }, + LogLevel: hclog.Trace, + LogWriter: hclog.DefaultOutput, + ExitAfterAuth: true, + } + + var server *Server + server = NewServer(&sc) + if ts == nil { + t.Fatal("nil server returned") + } + + errCh := make(chan error) + go func() { + errCh <- server.Run(ctx, templateTokenCh, templatesToRender) + }() + + // send a dummy value to trigger the internal Runner to query for secret + // info + templateTokenCh <- "test" + + select { + case <-ctx.Done(): + t.Fatal("timeout reached before templates were rendered") + case err := <-errCh: + if err != nil && !tc.expectError { + t.Fatalf("did not expect error, got: %v", err) + } + if err != nil && tc.expectError { + t.Logf("received expected error: %v", err) + return + } + } + + // verify test file exists and has the content we're looking for + var fileCount int + var errs []string + for _, template := range templatesToRender { + if template.Destination == nil { + t.Fatal("nil template destination") + } + content, err := os.ReadFile(*template.Destination) + if err != nil { + errs = append(errs, err.Error()) + continue + } + fileCount++ + + secret := secretRender{} + if err := json.Unmarshal(content, &secret); err != nil { + t.Fatal(err) + } + var expectedValues secretRender + if tc.expectedValues != nil { + expectedValues = *tc.expectedValues + } else { + expectedValues = secretRender{ + Username: "appuser", + Password: "password", + Version: "3", + } + } + if secret != expectedValues { + t.Fatalf("secret didn't match, expected: %#v, got: %#v", expectedValues, secret) + } + } + if len(errs) != 0 { + t.Fatalf("Failed to find the expected files. Expected %d, got %d\n\t%s", len(templatesToRender), fileCount, strings.Join(errs, "\n\t")) + } + }) + } +} + +// TestNewServerLogLevels tests that the server can be started with any log +// level. +func TestNewServerLogLevels(t *testing.T) { + ts := createHttpTestServer() + defer ts.Close() + + tmpDir, err := os.MkdirTemp("", "agent-tests") + defer os.RemoveAll(tmpDir) + if err != nil { + t.Fatal(err) + } + + levels := []hclog.Level{hclog.NoLevel, hclog.Trace, hclog.Debug, hclog.Info, hclog.Warn, hclog.Error} + for _, level := range levels { + name := fmt.Sprintf("log_%s", level) + t.Run(name, func(t *testing.T) { + server := NewServer(&ServerConfig{ + Logger: logging.NewVaultLogger(level), + LogWriter: hclog.DefaultOutput, + LogLevel: level, + ExitAfterAuth: true, + AgentConfig: &config.Config{ + Vault: &config.Vault{ + Address: ts.URL, + }, + }, + }) + if server == nil { + t.Fatal("nil server returned") + } + defer server.Stop() + + templateTokenCh := make(chan string, 1) + + templateTest := &ctconfig.TemplateConfig{ + Contents: pointerutil.StringPtr(templateContents), + } + dstFile := fmt.Sprintf("%s/%s", tmpDir, name) + templateTest.Destination = pointerutil.StringPtr(dstFile) + templatesToRender := []*ctconfig.TemplateConfig{templateTest} + + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + + errCh := make(chan error) + go func() { + errCh <- server.Run(ctx, templateTokenCh, templatesToRender) + }() + + // send a dummy value to trigger auth so the server will exit + templateTokenCh <- "test" + + select { + case <-ctx.Done(): + t.Fatal("timeout reached before templates were rendered") + case err := <-errCh: + if err != nil { + t.Fatalf("did not expect error, got: %v", err) + } + } + }) + } +} + +var jsonResponse = ` +{ + "request_id": "8af096e9-518c-7351-eff5-5ba20554b21f", + "lease_id": "", + "renewable": false, + "lease_duration": 0, + "data": { + "data": { + "password": "password", + "username": "appuser" + }, + "metadata": { + "created_time": "2019-10-07T22:18:44.233247Z", + "deletion_time": "", + "destroyed": false, + "version": 3 + } + }, + "wrap_info": null, + "warnings": null, + "auth": null +} +` + +var templateContents = ` +{{ with secret "kv/myapp/config"}} +{ +{{ if .Data.data.username}}"username":"{{ .Data.data.username}}",{{ end }} +{{ if .Data.data.password }}"password":"{{ .Data.data.password }}",{{ end }} +{{ if .Data.metadata.version}}"version":"{{ .Data.metadata.version }}"{{ end }} +} +{{ end }} +` + +var templateContentsMissingKey = ` +{{ with secret "kv/myapp/config"}} +{ +{{ if .Data.data.foo}}"foo":"{{ .Data.data.foo}}"{{ end }} +} +{{ end }} +` + +var templateContentsBad = ` +{{ with secret "kv/myapp/config-bad"}} +{ +{{ if .Data.data.username}}"username":"{{ .Data.data.username}}",{{ end }} +{{ if .Data.data.password }}"password":"{{ .Data.data.password }}",{{ end }} +{{ if .Data.metadata.version}}"version":"{{ .Data.metadata.version }}"{{ end }} +} +{{ end }} +` + +var templateContentsPermDenied = ` +{{ with secret "kv/myapp/perm-denied"}} +{ +{{ if .Data.data.username}}"username":"{{ .Data.data.username}}",{{ end }} +{{ if .Data.data.password }}"password":"{{ .Data.data.password }}",{{ end }} +{{ if .Data.metadata.version}}"version":"{{ .Data.metadata.version }}"{{ end }} +} +{{ end }} +` + +var templateContentsWithSprigFunctions = ` +{{ with secret "kv/myapp/config"}} +{ +{{ if .Data.data.username}}"username":"{{ .Data.data.username | sprig_upper }}",{{ end }} +{{ if .Data.data.password }}"password":"{{ .Data.data.password | sprig_replace "word" "phrase" }}",{{ end }} +{{ if .Data.metadata.version}}"version":"{{ .Data.metadata.version }}"{{ end }} +} +{{ end }} +` diff --git a/command/agent/test-fixtures/reload/reload_bar.key b/command/agent/test-fixtures/reload/reload_bar.key new file mode 100644 index 0000000..10849fb --- /dev/null +++ b/command/agent/test-fixtures/reload/reload_bar.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAwF7sRAyUiLcd6es6VeaTRUBOusFFGkmKJ5lU351waCJqXFju +Z6i/SQYNAAnnRgotXSTE1fIPjE2kZNH1hvqE5IpTGgAwy50xpjJrrBBI6e9lyKqj +7T8gLVNBvtC0cpQi+pGrszEI0ckDQCSZHqi/PAzcpmLUgh2KMrgagT+YlN35KHtl +/bQ/Fsn+kqykVqNw69n/CDKNKdDHn1qPwiX9q/fTMj3EG6g+3ntKrUOh8V/gHKPz +q8QGP/wIud2K+tTSorVXr/4zx7xgzlbJkCakzcQQiP6K+paPnDRlE8fK+1gRRyR7 +XCzyp0irUl8G1NjYAR/tVWxiUhlk/jZutb8PpwIDAQABAoIBAEOzJELuindyujxQ +ZD9G3h1I/GwNCFyv9Mbq10u7BIwhUH0fbwdcA7WXQ4v38ERd4IkfH4aLoZ0m1ewF +V/sgvxQO+h/0YTfHImny5KGxOXfaoF92bipYROKuojydBmQsbgLwsRRm9UufCl3Q +g3KewG5JuH112oPQEYq379v8nZ4FxC3Ano1OFBTm9UhHIAX1Dn22kcHOIIw8jCsQ +zp7TZOW+nwtkS41cBwhvV4VIeL6yse2UgbOfRVRwI7B0OtswS5VgW3wysO2mTDKt +V/WCmeht1il/6ZogEHgi/mvDCKpj20wQ1EzGnPdFLdiFJFylf0oufQD/7N/uezbC +is0qJEECgYEA3AE7SeLpe3SZApj2RmE2lcD9/Saj1Y30PznxB7M7hK0sZ1yXEbtS +Qf894iDDD/Cn3ufA4xk/K52CXgAcqvH/h2geG4pWLYsT1mdWhGftprtOMCIvJvzU +8uWJzKdOGVMG7R59wNgEpPDZDpBISjexwQsFo3aw1L/H1/Sa8cdY3a0CgYEA39hB +1oLmGRyE32Q4GF/srG4FqKL1EsbISGDUEYTnaYg2XiM43gu3tC/ikfclk27Jwc2L +m7cA5FxxaEyfoOgfAizfU/uWTAbx9GoXgWsO0hWSN9+YNq61gc5WKoHyrJ/rfrti +y5d7k0OCeBxckLqGDuJqICQ0myiz0El6FU8h5SMCgYEAuhigmiNC9JbwRu40g9v/ +XDVfox9oPmBRVpogdC78DYKeqN/9OZaGQiUxp3GnDni2xyqqUm8srCwT9oeJuF/z +kgpUTV96/hNCuH25BU8UC5Es1jJUSFpdlwjqwx5SRcGhfjnojZMseojwUg1h2MW7 +qls0bc0cTxnaZaYW2qWRWhECgYBrT0cwyQv6GdvxJCBoPwQ9HXmFAKowWC+H0zOX +Onmd8/jsZEJM4J0uuo4Jn8vZxBDg4eL9wVuiHlcXwzP7dYv4BP8DSechh2rS21Ft +b59pQ4IXWw+jl1nYYsyYEDgAXaIN3VNder95N7ICVsZhc6n01MI/qlu1zmt1fOQT +9x2utQKBgHI9SbsfWfbGiu6oLS3+9V1t4dORhj8D8b7z3trvECrD6tPhxoZqtfrH +4apKr3OKRSXk3K+1K6pkMHJHunspucnA1ChXLhzfNF08BSRJkQDGYuaRLS6VGgab +JZTl54bGvO1GkszEBE/9QFcqNVtWGMWXnUPwNNv8t//yJT5rvQil +-----END RSA PRIVATE KEY----- diff --git a/command/agent/test-fixtures/reload/reload_bar.pem b/command/agent/test-fixtures/reload/reload_bar.pem new file mode 100644 index 0000000..a8217be --- /dev/null +++ b/command/agent/test-fixtures/reload/reload_bar.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDQzCCAiugAwIBAgIULLCz3mZKmg2xy3rWCud0f1zcmBwwDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMzEwMDIzNjQ0WhcNMzYw +MzA1MDEzNzE0WjAaMRgwFgYDVQQDEw9iYXIuZXhhbXBsZS5jb20wggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAXuxEDJSItx3p6zpV5pNFQE66wUUaSYon +mVTfnXBoImpcWO5nqL9JBg0ACedGCi1dJMTV8g+MTaRk0fWG+oTkilMaADDLnTGm +MmusEEjp72XIqqPtPyAtU0G+0LRylCL6kauzMQjRyQNAJJkeqL88DNymYtSCHYoy +uBqBP5iU3fkoe2X9tD8Wyf6SrKRWo3Dr2f8IMo0p0MefWo/CJf2r99MyPcQbqD7e +e0qtQ6HxX+Aco/OrxAY//Ai53Yr61NKitVev/jPHvGDOVsmQJqTNxBCI/or6lo+c +NGUTx8r7WBFHJHtcLPKnSKtSXwbU2NgBH+1VbGJSGWT+Nm61vw+nAgMBAAGjgYQw +gYEwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBSVoF8F +7qbzSryIFrldurAG78LvSjAfBgNVHSMEGDAWgBRzDNvqF/Tq21OgWs13B5YydZjl +vzAgBgNVHREEGTAXgg9iYXIuZXhhbXBsZS5jb22HBH8AAAEwDQYJKoZIhvcNAQEL +BQADggEBAGmz2N282iT2IaEZvOmzIE4znHGkvoxZmrr/2byq5PskBg9ysyCHfUvw +SFA8U7jWjezKTnGRUu5blB+yZdjrMtB4AePWyEqtkJwVsZ2SPeP+9V2gNYK4iktP +UF3aIgBbAbw8rNuGIIB0T4D+6Zyo9Y3MCygs6/N4bRPZgLhewWn1ilklfnl3eqaC +a+JY1NBuTgCMa28NuC+Hy3mCveqhI8tFNiOthlLdgAEbuQaOuNutAG73utZ2aq6Q +W4pajFm3lEf5zt7Lo6ZCFtY/Q8jjURJ9e4O7VjXcqIhBM5bSMI6+fgQyOH0SLboj +RNanJ2bcyF1iPVyPBGzV3dF0ngYzxEY= +-----END CERTIFICATE----- diff --git a/command/agent/test-fixtures/reload/reload_ca.pem b/command/agent/test-fixtures/reload/reload_ca.pem new file mode 100644 index 0000000..72a7444 --- /dev/null +++ b/command/agent/test-fixtures/reload/reload_ca.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDNTCCAh2gAwIBAgIUBeVo+Ce2BrdRT1cogKvJLtdOky8wDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMzEwMDIzNTM4WhcNMzYw +MzA1MDIzNjA4WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAPTQGWPRIOECGeJB6tR/ftvvtioC9f84fY2QdJ5k +JBupXjPAGYKgS4MGzyT5bz9yY400tCtmh6h7p9tZwHl/TElTugtLQ/8ilMbJTiOM +SiyaMDPHiMJJYKTjm9bu6bKeU1qPZ0Cryes4rygbqs7w2XPgA2RxNmDh7JdX7/h+ +VB5onBmv8g4WFSayowGyDcJWWCbu5yv6ZdH1bqQjgRzQ5xp17WXNmvlzdp2vate/ +9UqPdA8sdJzW/91Gvmros0o/FnG7c2pULhk22wFqO8t2HRjKb3nuxALEJvqoPvad +KjpDTaq1L1ZzxcB7wvWyhy/lNLZL7jiNWy0mN1YB0UpSWdECAwEAAaN7MHkwDgYD +VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFHMM2+oX9Orb +U6BazXcHljJ1mOW/MB8GA1UdIwQYMBaAFHMM2+oX9OrbU6BazXcHljJ1mOW/MBYG +A1UdEQQPMA2CC2V4YW1wbGUuY29tMA0GCSqGSIb3DQEBCwUAA4IBAQAp17XsOaT9 +hculRqrFptn3+zkH3HrIckHm+28R5xYT8ASFXFcLFugGizJAXVL5lvsRVRIwCoOX +Nhi8XSNEFP640VbHcEl81I84bbRIIDS+Yheu6JDZGemTaDYLv1J3D5SHwgoM+nyf +oTRgotUCIXcwJHmTpWEUkZFKuqBxsoTGzk0jO8wOP6xoJkzxVVG5PvNxs924rxY8 +Y8iaLdDfMeT7Pi0XIliBa/aSp/iqSW8XKyJl5R5vXg9+DOgZUrVzIxObaF5RBl/a +mJOeklJBdNVzQm5+iMpO42lu0TA9eWtpP+YiUEXU17XDvFeQWOocFbQ1Peo0W895 +XRz2GCwCNyvW +-----END CERTIFICATE----- diff --git a/command/agent/test-fixtures/reload/reload_foo.key b/command/agent/test-fixtures/reload/reload_foo.key new file mode 100644 index 0000000..86e6cce --- /dev/null +++ b/command/agent/test-fixtures/reload/reload_foo.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpgIBAAKCAQEAzNyVieSti9XBb5/celB5u8YKRJv3mQS9A4/X0mqY1ePznt1i +ilG7OmG0yM2VAk0ceIAQac3Bsn74jxn2cDlrrVniPXcNgYtMtW0kRqNEo4doo4EX +xZguS9vNBu29useHhif1TGX/pA3dgvaVycUCjzTEVk6qI8UEehMK6gEGZb7nOr0A +A9nipSqoeHpDLe3a4KVqj1vtlJKUvD2i1MuBuQ130cB1K9rufLCShGu7mEgzEosc +gr+K3Bf03IejbeVRyIfLtgj1zuvV1katec75UqRA/bsvt5G9JfJqiZ9mwFN0vp3g +Cr7pdQBSBQ2q4yf9s8CuY5c5w9fl3F8f5QFQoQIDAQABAoIBAQCbCb1qNFRa5ZSV +I8i6ELlwMDqJHfhOJ9XcIjpVljLAfNlcu3Ld92jYkCU/asaAjVckotbJG9yhd5Io +yp9E40/oS4P6vGTOS1vsWgMAKoPBtrKsOwCAm+E9q8UIn1fdSS/5ibgM74x+3bds +a62Em8KKGocUQkhk9a+jq1GxMsFisbHRxEHvClLmDMgGnW3FyGmWwT6yZLPSC0ey +szmmjt3ouP8cLAOmSjzcQBMmEZpQMCgR6Qckg6nrLQAGzZyTdCd875wbGA57DpWX +Lssn95+A5EFvr/6b7DkXeIFCrYBFFa+UQN3PWGEQ6Zjmiw4VgV2vO8yX2kCLlUhU +02bL393ZAoGBAPXPD/0yWINbKUPcRlx/WfWQxfz0bu50ytwIXzVK+pRoAMuNqehK +BJ6kNzTTBq40u+IZ4f5jbLDulymR+4zSkirLE7CyWFJOLNI/8K4Pf5DJUgNdrZjJ +LCtP9XRdxiPatQF0NGfdgHlSJh+/CiRJP4AgB17AnB/4z9/M0ZlJGVrzAoGBANVa +69P3Rp/WPBQv0wx6f0tWppJolWekAHKcDIdQ5HdOZE5CPAYSlTrTUW3uJuqMwU2L +M0Er2gIPKWIR5X+9r7Fvu9hQW6l2v3xLlcrGPiapp3STJvuMxzhRAmXmu3bZfVn1 +Vn7Vf1jPULHtTFSlNFEvYG5UJmygK9BeyyVO5KMbAoGBAMCyAibLQPg4jrDUDZSV +gUAwrgUO2ae1hxHWvkxY6vdMUNNByuB+pgB3W4/dnm8Sh/dHsxJpftt1Lqs39ar/ +p/ZEHLt4FCTxg9GOrm7FV4t5RwG8fko36phJpnIC0UFqQltRbYO+8OgqrhhU+u5X +PaCDe0OcWsf1lYAsYGN6GpZhAoGBAMJ5Ksa9+YEODRs1cIFKUyd/5ztC2xRqOAI/ +3WemQ2nAacuvsfizDZVeMzYpww0+maAuBt0btI719PmwaGmkpDXvK+EDdlmkpOwO +FY6MXvBs6fdnfjwCWUErDi2GQFAX9Jt/9oSL5JU1+08DhvUM1QA/V/2Y9KFE6kr3 +bOIn5F4LAoGBAKQzH/AThDGhT3hwr4ktmReF3qKxBgxzjVa8veXtkY5VWwyN09iT +jnTTt6N1CchZoK5WCETjdzNYP7cuBTcV4d3bPNRiJmxXaNVvx3Tlrk98OiffT8Qa +5DO/Wfb43rNHYXBjU6l0n2zWcQ4PUSSbu0P0bM2JTQPRCqSthXvSHw2P +-----END RSA PRIVATE KEY----- diff --git a/command/agent/test-fixtures/reload/reload_foo.pem b/command/agent/test-fixtures/reload/reload_foo.pem new file mode 100644 index 0000000..c8b868b --- /dev/null +++ b/command/agent/test-fixtures/reload/reload_foo.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDQzCCAiugAwIBAgIUFVW6i/M+yJUsDrXWgRKO/Dnb+L4wDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMzEwMDIzNjA1WhcNMzYw +MzA1MDEzNjM1WjAaMRgwFgYDVQQDEw9mb28uZXhhbXBsZS5jb20wggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDM3JWJ5K2L1cFvn9x6UHm7xgpEm/eZBL0D +j9fSapjV4/Oe3WKKUbs6YbTIzZUCTRx4gBBpzcGyfviPGfZwOWutWeI9dw2Bi0y1 +bSRGo0Sjh2ijgRfFmC5L280G7b26x4eGJ/VMZf+kDd2C9pXJxQKPNMRWTqojxQR6 +EwrqAQZlvuc6vQAD2eKlKqh4ekMt7drgpWqPW+2UkpS8PaLUy4G5DXfRwHUr2u58 +sJKEa7uYSDMSixyCv4rcF/Tch6Nt5VHIh8u2CPXO69XWRq15zvlSpED9uy+3kb0l +8mqJn2bAU3S+neAKvul1AFIFDarjJ/2zwK5jlznD1+XcXx/lAVChAgMBAAGjgYQw +gYEwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBRNJoOJ +dnazDiuqLhV6truQ4cRe9jAfBgNVHSMEGDAWgBRzDNvqF/Tq21OgWs13B5YydZjl +vzAgBgNVHREEGTAXgg9mb28uZXhhbXBsZS5jb22HBH8AAAEwDQYJKoZIhvcNAQEL +BQADggEBAHzv67mtbxMWcuMsxCFBN1PJNAyUDZVCB+1gWhk59EySbVg81hWJDCBy +fl3TKjz3i7wBGAv+C2iTxmwsSJbda22v8JQbuscXIfLFbNALsPzF+J0vxAgJs5Gc +sDbfJ7EQOIIOVKQhHLYnQoLnigSSPc1kd0JjYyHEBjgIaSuXgRRTBAeqLiBMx0yh +RKL1lQ+WoBU/9SXUZZkwokqWt5G7khi5qZkNxVXZCm8VGPg0iywf6gGyhI1SU5S2 +oR219S6kA4JY/stw1qne85/EmHmoImHGt08xex3GoU72jKAjsIpqRWopcD/+uene +Tc9nn3fTQW/Z9fsoJ5iF5OdJnDEswqE= +-----END CERTIFICATE----- diff --git a/command/agent/testing.go b/command/agent/testing.go new file mode 100644 index 0000000..04a2f06 --- /dev/null +++ b/command/agent/testing.go @@ -0,0 +1,89 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package agent + +import ( + "bytes" + "crypto/ecdsa" + "crypto/x509" + "encoding/json" + "encoding/pem" + "os" + "testing" + "time" + + "github.com/go-jose/go-jose/v3" + "github.com/go-jose/go-jose/v3/jwt" + + "github.com/hashicorp/vault/sdk/logical" +) + +const envVarRunAccTests = "VAULT_ACC" + +var runAcceptanceTests = os.Getenv(envVarRunAccTests) == "1" + +func GetTestJWT(t *testing.T) (string, *ecdsa.PrivateKey) { + t.Helper() + cl := jwt.Claims{ + Subject: "r3qXcK2bix9eFECzsU3Sbmh0K16fatW6@clients", + Issuer: "https://team-vault.auth0.com/", + NotBefore: jwt.NewNumericDate(time.Now().Add(-5 * time.Second)), + Audience: jwt.Audience{"https://vault.plugin.auth.jwt.test"}, + } + + privateCl := struct { + User string `json:"https://vault/user"` + Groups []string `json:"https://vault/groups"` + }{ + "jeff", + []string{"foo", "bar"}, + } + + var key *ecdsa.PrivateKey + block, _ := pem.Decode([]byte(TestECDSAPrivKey)) + if block != nil { + var err error + key, err = x509.ParseECPrivateKey(block.Bytes) + if err != nil { + t.Fatal(err) + } + } + + sig, err := jose.NewSigner(jose.SigningKey{Algorithm: jose.ES256, Key: key}, (&jose.SignerOptions{}).WithType("JWT")) + if err != nil { + t.Fatal(err) + } + + raw, err := jwt.Signed(sig).Claims(cl).Claims(privateCl).CompactSerialize() + if err != nil { + t.Fatal(err) + } + + return raw, key +} + +func readToken(fileName string) (*logical.HTTPWrapInfo, error) { + b, err := os.ReadFile(fileName) + if err != nil { + return nil, err + } + wrapper := &logical.HTTPWrapInfo{} + if err := json.NewDecoder(bytes.NewReader(b)).Decode(wrapper); err != nil { + return nil, err + } + return wrapper, nil +} + +const ( + TestECDSAPrivKey string = `-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIKfldwWLPYsHjRL9EVTsjSbzTtcGRu6icohNfIqcb6A+oAoGCCqGSM49 +AwEHoUQDQgAE4+SFvPwOy0miy/FiTT05HnwjpEbSq+7+1q9BFxAkzjgKnlkXk5qx +hzXQvRmS4w9ZsskoTZtuUI+XX7conJhzCQ== +-----END EC PRIVATE KEY-----` + + TestECDSAPubKey string = `-----BEGIN PUBLIC KEY----- +MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE4+SFvPwOy0miy/FiTT05HnwjpEbS +q+7+1q9BFxAkzjgKnlkXk5qxhzXQvRmS4w9ZsskoTZtuUI+XX7conJhzCQ== +-----END PUBLIC KEY-----` +) diff --git a/command/agent/token_file_end_to_end_test.go b/command/agent/token_file_end_to_end_test.go new file mode 100644 index 0000000..dc7115c --- /dev/null +++ b/command/agent/token_file_end_to_end_test.go @@ -0,0 +1,162 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package agent + +import ( + "context" + "os" + "path/filepath" + "testing" + "time" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + token_file "github.com/hashicorp/vault/command/agentproxyshared/auth/token-file" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink/file" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/vault" +) + +func TestTokenFileEndToEnd(t *testing.T) { + var err error + logger := logging.NewVaultLogger(log.Trace) + coreConfig := &vault.CoreConfig{ + DisableMlock: true, + DisableCache: true, + Logger: log.NewNullLogger(), + } + + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + + cluster.Start() + defer cluster.Cleanup() + + cores := cluster.Cores + + vault.TestWaitActive(t, cores[0].Core) + + client := cores[0].Client + + secret, err := client.Auth().Token().Create(nil) + if err != nil || secret == nil { + t.Fatal(err) + } + + tokenFile, err := os.Create(filepath.Join(t.TempDir(), "token_file")) + if err != nil { + t.Fatal(err) + } + tokenFileName := tokenFile.Name() + tokenFile.Close() // WriteFile doesn't need it open + os.WriteFile(tokenFileName, []byte(secret.Auth.ClientToken), 0o666) + defer os.Remove(tokenFileName) + + ahConfig := &auth.AuthHandlerConfig{ + Logger: logger.Named("auth.handler"), + Client: client, + } + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + + am, err := token_file.NewTokenFileAuthMethod(&auth.AuthConfig{ + Logger: logger.Named("auth.method"), + Config: map[string]interface{}{ + "token_file_path": tokenFileName, + }, + }) + if err != nil { + t.Fatal(err) + } + + ah := auth.NewAuthHandler(ahConfig) + errCh := make(chan error) + go func() { + errCh <- ah.Run(ctx, am) + }() + defer func() { + select { + case <-ctx.Done(): + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + } + }() + + // We close these right away because we're just basically testing + // permissions and finding a usable file name + sinkFile, err := os.Create(filepath.Join(t.TempDir(), "auth.tokensink.test.")) + if err != nil { + t.Fatal(err) + } + tokenSinkFileName := sinkFile.Name() + sinkFile.Close() + os.Remove(tokenSinkFileName) + t.Logf("output: %s", tokenSinkFileName) + + config := &sink.SinkConfig{ + Logger: logger.Named("sink.file"), + Config: map[string]interface{}{ + "path": tokenSinkFileName, + }, + WrapTTL: 10 * time.Second, + } + + fs, err := file.NewFileSink(config) + if err != nil { + t.Fatal(err) + } + config.Sink = fs + + ss := sink.NewSinkServer(&sink.SinkServerConfig{ + Logger: logger.Named("sink.server"), + Client: client, + }) + go func() { + errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config}) + }() + defer func() { + select { + case <-ctx.Done(): + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + } + }() + + // This has to be after the other defers, so it happens first. It allows + // successful test runs to immediately cancel all of the runner goroutines + // and unblock any of the blocking defer calls by the runner's DoneCh that + // comes before this and avoid successful tests from taking the entire + // timeout duration. + defer cancel() + + if stat, err := os.Lstat(tokenSinkFileName); err == nil { + t.Fatalf("expected err but got %s", stat) + } else if !os.IsNotExist(err) { + t.Fatal("expected notexist err") + } + + // Wait 2 seconds for the env variables to be detected and an auth to be generated. + time.Sleep(time.Second * 2) + + token, err := readToken(tokenSinkFileName) + if err != nil { + t.Fatal(err) + } + + if token.Token == "" { + t.Fatal("expected token but didn't receive it") + } + + _, err = os.Stat(tokenFileName) + if err != nil { + t.Fatal("Token file removed") + } +} diff --git a/command/agent_generate_config.go b/command/agent_generate_config.go new file mode 100644 index 0000000..9fa660a --- /dev/null +++ b/command/agent_generate_config.go @@ -0,0 +1,442 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "context" + "fmt" + "io" + "os" + paths "path" + "sort" + "strings" + "unicode" + + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/hashicorp/hcl/v2/hclwrite" + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" + "github.com/mitchellh/go-homedir" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*AgentGenerateConfigCommand)(nil) + _ cli.CommandAutocomplete = (*AgentGenerateConfigCommand)(nil) +) + +type AgentGenerateConfigCommand struct { + *BaseCommand + + flagType string + flagPaths []string + flagExec string +} + +func (c *AgentGenerateConfigCommand) Synopsis() string { + return "Generate a Vault Agent configuration file." +} + +func (c *AgentGenerateConfigCommand) Help() string { + helpText := ` +Usage: vault agent generate-config [options] [path/to/config.hcl] + + Generates a simple Vault Agent configuration file from the given parameters. + + Currently, the only supported configuration type is 'env-template', which + helps you generate a configuration file with environment variable templates + for running Vault Agent in process supervisor mode. + + For every specified secret -path, the command will attempt to generate one or + multiple 'env_template' entries based on the JSON key(s) stored in the + specified secret. If the secret -path ends with '/*', the command will + attempt to recurse through the secrets tree rooted at the given path, + generating 'env_template' entries for each encountered secret. Currently, + only kv-v1 and kv-v2 paths are supported. + + The command specified in the '-exec' option will be used to generate an + 'exec' entry, which will tell Vault Agent which child process to run. + + In addition to env_template entries, the command generates an 'auto_auth' + section with 'token_file' authentication method. While this method is very + convenient for local testing, it should NOT be used in production. Please + see https://developer.hashicorp.com/vault/docs/agent-and-proxy/autoauth/methods + for a list of production-ready auto_auth methods that you can use instead. + + By default, the file will be generated in the local directory as 'agent.hcl' + unless a path is specified as an argument. + + Generate a simple environment variable template configuration: + + $ vault agent generate-config -type="env-template" \ + -exec="./my-app arg1 arg2" \ + -path="secret/foo" + + Generate an environment variable template configuration for multiple secrets: + + $ vault agent generate-config -type="env-template" \ + -exec="./my-app arg1 arg2" \ + -path="secret/foo" \ + -path="secret/bar" \ + -path="secret/my-app/*" + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *AgentGenerateConfigCommand) Flags() *FlagSets { + // Include client-modifying flags (-address, -namespace, etc.) + set := c.flagSet(FlagSetHTTP) + + // Common Options + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "type", + Target: &c.flagType, + Usage: "Type of configuration file to generate; currently, only 'env-template' is supported.", + Completion: complete.PredictSet( + "env-template", + ), + }) + + f.StringSliceVar(&StringSliceVar{ + Name: "path", + Target: &c.flagPaths, + Usage: "Path to a kv-v1 or kv-v2 secret (e.g. secret/data/foo, kv-v2/prefix/*); multiple secrets and tail '*' wildcards are allowed.", + Completion: c.PredictVaultFolders(), + }) + + f.StringVar(&StringVar{ + Name: "exec", + Target: &c.flagExec, + Default: "env", + Usage: "The command to execute in agent process supervisor mode.", + }) + + return set +} + +func (c *AgentGenerateConfigCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictNothing +} + +func (c *AgentGenerateConfigCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *AgentGenerateConfigCommand) Run(args []string) int { + flags := c.Flags() + + if err := flags.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = flags.Args() + + if len(args) > 1 { + c.UI.Error(fmt.Sprintf("Too many arguments (expected at most 1, got %d)", len(args))) + return 1 + } + + if c.flagType == "" { + c.UI.Error(`Please specify a -type flag; currently only -type="env-template" is supported.`) + return 1 + } + + if c.flagType != "env-template" { + c.UI.Error(fmt.Sprintf(`%q is not a supported configuration type; currently only -type="env-template" is supported.`, c.flagType)) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + config, err := generateConfiguration(context.Background(), client, c.flagExec, c.flagPaths) + if err != nil { + c.UI.Error(fmt.Sprintf("Error: %v", err)) + return 2 + } + + var configPath string + if len(args) == 1 { + configPath = args[0] + } else { + configPath = "agent.hcl" + } + + f, err := os.Create(configPath) + if err != nil { + c.UI.Error(fmt.Sprintf("Could not create configuration file %q: %v", configPath, err)) + return 3 + } + defer func() { + if err := f.Close(); err != nil { + c.UI.Error(fmt.Sprintf("Could not close configuration file %q: %v", configPath, err)) + } + }() + + if _, err := config.WriteTo(f); err != nil { + c.UI.Error(fmt.Sprintf("Could not write to configuration file %q: %v", configPath, err)) + return 3 + } + + c.UI.Info(fmt.Sprintf("Successfully generated %q configuration file!", configPath)) + + c.UI.Warn("Warning: the generated file uses 'token_file' authentication method, which is not suitable for production environments.") + + return 0 +} + +func generateConfiguration(ctx context.Context, client *api.Client, flagExec string, flagPaths []string) (io.WriterTo, error) { + var execCommand []string + if flagExec != "" { + execCommand = strings.Split(flagExec, " ") + } else { + execCommand = []string{"env"} + } + + tokenPath, err := homedir.Expand("~/.vault-token") + if err != nil { + return nil, fmt.Errorf("could not expand home directory: %w", err) + } + + templates, err := constructTemplates(ctx, client, flagPaths) + if err != nil { + return nil, fmt.Errorf("could not generate templates: %w", err) + } + + config := generatedConfig{ + AutoAuth: generatedConfigAutoAuth{ + Method: generatedConfigAutoAuthMethod{ + Type: "token_file", + Config: generatedConfigAutoAuthMethodConfig{ + TokenFilePath: tokenPath, + }, + }, + }, + TemplateConfig: generatedConfigTemplateConfig{ + StaticSecretRenderInterval: "5m", + ExitOnRetryFailure: true, + }, + Vault: generatedConfigVault{ + Address: client.Address(), + }, + Exec: generatedConfigExec{ + Command: execCommand, + RestartOnSecretChanges: "always", + RestartStopSignal: "SIGTERM", + }, + EnvTemplates: templates, + } + + contents := hclwrite.NewEmptyFile() + + gohcl.EncodeIntoBody(&config, contents.Body()) + + return contents, nil +} + +func constructTemplates(ctx context.Context, client *api.Client, paths []string) ([]generatedConfigEnvTemplate, error) { + var templates []generatedConfigEnvTemplate + + for _, path := range paths { + path = sanitizePath(path) + + mountPath, v2, err := isKVv2(path, client) + if err != nil { + return nil, fmt.Errorf("could not validate secret path %q: %w", path, err) + } + + switch { + case strings.HasSuffix(path, "/*"): + // this path contains a tail wildcard, attempt to walk the tree + t, err := constructTemplatesFromTree(ctx, client, path[:len(path)-2], mountPath, v2) + if err != nil { + return nil, fmt.Errorf("could not traverse sercet at %q: %w", path, err) + } + templates = append(templates, t...) + + case strings.Contains(path, "*"): + // don't allow any other wildcards + return nil, fmt.Errorf("the path %q cannot contain '*' wildcard characters except as the last element of the path", path) + + default: + // regular secret path + t, err := constructTemplatesFromSecret(ctx, client, path, mountPath, v2) + if err != nil { + return nil, fmt.Errorf("could not read secret at %q: %v", path, err) + } + templates = append(templates, t...) + } + } + + return templates, nil +} + +func constructTemplatesFromTree(ctx context.Context, client *api.Client, path, mountPath string, v2 bool) ([]generatedConfigEnvTemplate, error) { + var templates []generatedConfigEnvTemplate + + if v2 { + metadataPath := strings.Replace( + path, + paths.Join(mountPath, "data"), + paths.Join(mountPath, "metadata"), + 1, + ) + if path != metadataPath { + path = metadataPath + } else { + path = addPrefixToKVPath(path, mountPath, "metadata", true) + } + } + + err := walkSecretsTree(ctx, client, path, func(child string, directory bool) error { + if directory { + return nil + } + + dataPath := strings.Replace( + child, + paths.Join(mountPath, "metadata"), + paths.Join(mountPath, "data"), + 1, + ) + + t, err := constructTemplatesFromSecret(ctx, client, dataPath, mountPath, v2) + if err != nil { + return err + } + templates = append(templates, t...) + + return nil + }) + if err != nil { + return nil, err + } + + return templates, nil +} + +func constructTemplatesFromSecret(ctx context.Context, client *api.Client, path, mountPath string, v2 bool) ([]generatedConfigEnvTemplate, error) { + var templates []generatedConfigEnvTemplate + + if v2 { + path = addPrefixToKVPath(path, mountPath, "data", true) + } + + resp, err := client.Logical().ReadWithContext(ctx, path) + if err != nil { + return nil, fmt.Errorf("error querying: %w", err) + } + if resp == nil { + return nil, fmt.Errorf("secret not found") + } + + var data map[string]interface{} + if v2 { + internal, ok := resp.Data["data"] + if !ok { + return nil, fmt.Errorf("secret.Data not found") + } + data = internal.(map[string]interface{}) + } else { + data = resp.Data + } + + fields := make([]string, 0, len(data)) + + for field := range data { + fields = append(fields, field) + } + + // sort for a deterministic output + sort.Strings(fields) + + var dataContents string + if v2 { + dataContents = ".Data.data" + } else { + dataContents = ".Data" + } + + for _, field := range fields { + templates = append(templates, generatedConfigEnvTemplate{ + Name: constructDefaultEnvironmentKey(path, field), + Contents: fmt.Sprintf(`{{ with secret "%s" }}{{ %s.%s }}{{ end }}`, path, dataContents, field), + ErrorOnMissingKey: true, + }) + } + + return templates, nil +} + +func constructDefaultEnvironmentKey(path string, field string) string { + pathParts := strings.Split(path, "/") + pathPartsLast := pathParts[len(pathParts)-1] + + notLetterOrNumber := func(r rune) bool { + return !unicode.IsLetter(r) && !unicode.IsNumber(r) + } + + p1 := strings.FieldsFunc(pathPartsLast, notLetterOrNumber) + p2 := strings.FieldsFunc(field, notLetterOrNumber) + + keyParts := append(p1, p2...) + + return strings.ToUpper(strings.Join(keyParts, "_")) +} + +// Below, we are redefining a subset of the configuration-related structures +// defined under command/agent/config. Using these structures we can tailor the +// output of the generated config, while using the original structures would +// have produced an HCL document with many empty fields. The structures below +// should not be used for anything other than generation. + +type generatedConfig struct { + AutoAuth generatedConfigAutoAuth `hcl:"auto_auth,block"` + TemplateConfig generatedConfigTemplateConfig `hcl:"template_config,block"` + Vault generatedConfigVault `hcl:"vault,block"` + EnvTemplates []generatedConfigEnvTemplate `hcl:"env_template,block"` + Exec generatedConfigExec `hcl:"exec,block"` +} + +type generatedConfigTemplateConfig struct { + StaticSecretRenderInterval string `hcl:"static_secret_render_interval"` + ExitOnRetryFailure bool `hcl:"exit_on_retry_failure"` +} + +type generatedConfigExec struct { + Command []string `hcl:"command"` + RestartOnSecretChanges string `hcl:"restart_on_secret_changes"` + RestartStopSignal string `hcl:"restart_stop_signal"` +} + +type generatedConfigEnvTemplate struct { + Name string `hcl:"name,label"` + Contents string `hcl:"contents,attr"` + ErrorOnMissingKey bool `hcl:"error_on_missing_key"` +} + +type generatedConfigVault struct { + Address string `hcl:"address"` +} + +type generatedConfigAutoAuth struct { + Method generatedConfigAutoAuthMethod `hcl:"method,block"` +} + +type generatedConfigAutoAuthMethod struct { + Type string `hcl:"type"` + Config generatedConfigAutoAuthMethodConfig `hcl:"config,block"` +} + +type generatedConfigAutoAuthMethodConfig struct { + TokenFilePath string `hcl:"token_file_path"` +} diff --git a/command/agent_generate_config_test.go b/command/agent_generate_config_test.go new file mode 100644 index 0000000..f225a7c --- /dev/null +++ b/command/agent_generate_config_test.go @@ -0,0 +1,274 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bytes" + "context" + "reflect" + "regexp" + "testing" + "time" +) + +// TestConstructTemplates tests the construcTemplates helper function +func TestConstructTemplates(t *testing.T) { + ctx, cancelContextFunc := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelContextFunc() + + client, closer := testVaultServerWithSecrets(ctx, t) + defer closer() + + cases := map[string]struct { + paths []string + expected []generatedConfigEnvTemplate + expectedError bool + }{ + "kv-v1-simple": { + paths: []string{"kv-v1/foo"}, + expected: []generatedConfigEnvTemplate{ + {Contents: `{{ with secret "kv-v1/foo" }}{{ .Data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_PASSWORD"}, + {Contents: `{{ with secret "kv-v1/foo" }}{{ .Data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_USER"}, + }, + expectedError: false, + }, + + "kv-v2-simple": { + paths: []string{"kv-v2/foo"}, + expected: []generatedConfigEnvTemplate{ + {Contents: `{{ with secret "kv-v2/data/foo" }}{{ .Data.data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_PASSWORD"}, + {Contents: `{{ with secret "kv-v2/data/foo" }}{{ .Data.data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_USER"}, + }, + expectedError: false, + }, + + "kv-v2-data-in-path": { + paths: []string{"kv-v2/data/foo"}, + expected: []generatedConfigEnvTemplate{ + {Contents: `{{ with secret "kv-v2/data/foo" }}{{ .Data.data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_PASSWORD"}, + {Contents: `{{ with secret "kv-v2/data/foo" }}{{ .Data.data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_USER"}, + }, + expectedError: false, + }, + + "kv-v1-nested": { + paths: []string{"kv-v1/app-1/*"}, + expected: []generatedConfigEnvTemplate{ + {Contents: `{{ with secret "kv-v1/app-1/bar" }}{{ .Data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAR_PASSWORD"}, + {Contents: `{{ with secret "kv-v1/app-1/bar" }}{{ .Data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAR_USER"}, + {Contents: `{{ with secret "kv-v1/app-1/foo" }}{{ .Data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_PASSWORD"}, + {Contents: `{{ with secret "kv-v1/app-1/foo" }}{{ .Data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_USER"}, + {Contents: `{{ with secret "kv-v1/app-1/nested/baz" }}{{ .Data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAZ_PASSWORD"}, + {Contents: `{{ with secret "kv-v1/app-1/nested/baz" }}{{ .Data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAZ_USER"}, + }, + expectedError: false, + }, + + "kv-v2-nested": { + paths: []string{"kv-v2/app-1/*"}, + expected: []generatedConfigEnvTemplate{ + {Contents: `{{ with secret "kv-v2/data/app-1/bar" }}{{ .Data.data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAR_PASSWORD"}, + {Contents: `{{ with secret "kv-v2/data/app-1/bar" }}{{ .Data.data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAR_USER"}, + {Contents: `{{ with secret "kv-v2/data/app-1/foo" }}{{ .Data.data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_PASSWORD"}, + {Contents: `{{ with secret "kv-v2/data/app-1/foo" }}{{ .Data.data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_USER"}, + {Contents: `{{ with secret "kv-v2/data/app-1/nested/baz" }}{{ .Data.data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAZ_PASSWORD"}, + {Contents: `{{ with secret "kv-v2/data/app-1/nested/baz" }}{{ .Data.data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAZ_USER"}, + }, + expectedError: false, + }, + + "kv-v1-multi-path": { + paths: []string{"kv-v1/foo", "kv-v1/app-1/bar"}, + expected: []generatedConfigEnvTemplate{ + {Contents: `{{ with secret "kv-v1/foo" }}{{ .Data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_PASSWORD"}, + {Contents: `{{ with secret "kv-v1/foo" }}{{ .Data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_USER"}, + {Contents: `{{ with secret "kv-v1/app-1/bar" }}{{ .Data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAR_PASSWORD"}, + {Contents: `{{ with secret "kv-v1/app-1/bar" }}{{ .Data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAR_USER"}, + }, + expectedError: false, + }, + + "kv-v2-multi-path": { + paths: []string{"kv-v2/foo", "kv-v2/app-1/bar"}, + expected: []generatedConfigEnvTemplate{ + {Contents: `{{ with secret "kv-v2/data/foo" }}{{ .Data.data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_PASSWORD"}, + {Contents: `{{ with secret "kv-v2/data/foo" }}{{ .Data.data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "FOO_USER"}, + {Contents: `{{ with secret "kv-v2/data/app-1/bar" }}{{ .Data.data.password }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAR_PASSWORD"}, + {Contents: `{{ with secret "kv-v2/data/app-1/bar" }}{{ .Data.data.user }}{{ end }}`, ErrorOnMissingKey: true, Name: "BAR_USER"}, + }, + expectedError: false, + }, + + "kv-v1-path-not-found": { + paths: []string{"kv-v1/does/not/exist"}, + expected: nil, + expectedError: true, + }, + + "kv-v2-path-not-found": { + paths: []string{"kv-v2/does/not/exist"}, + expected: nil, + expectedError: true, + }, + + "kv-v1-early-wildcard": { + paths: []string{"kv-v1/*/foo"}, + expected: nil, + expectedError: true, + }, + + "kv-v2-early-wildcard": { + paths: []string{"kv-v2/*/foo"}, + expected: nil, + expectedError: true, + }, + } + + for name, tc := range cases { + name, tc := name, tc + + t.Run(name, func(t *testing.T) { + templates, err := constructTemplates(ctx, client, tc.paths) + + if tc.expectedError { + if err == nil { + t.Fatal("an error was expected but the test succeeded") + } + } else { + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(tc.expected, templates) { + t.Fatalf("unexpected output; want: %v, got: %v", tc.expected, templates) + } + } + }) + } +} + +// TestGenerateConfiguration tests the generateConfiguration helper function +func TestGenerateConfiguration(t *testing.T) { + ctx, cancelContextFunc := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelContextFunc() + + client, closer := testVaultServerWithSecrets(ctx, t) + defer closer() + + cases := map[string]struct { + flagExec string + flagPaths []string + expected *regexp.Regexp + expectedError bool + }{ + "kv-v1-simple": { + flagExec: "./my-app arg1 arg2", + flagPaths: []string{"kv-v1/foo"}, + expected: regexp.MustCompile(` +auto_auth \{ + + method \{ + type = "token_file" + + config \{ + token_file_path = ".*/.vault-token" + } + } +} + +template_config \{ + static_secret_render_interval = "5m" + exit_on_retry_failure = true +} + +vault \{ + address = "https://127.0.0.1:[0-9]{5}" +} + +env_template "FOO_PASSWORD" \{ + contents = "\{\{ with secret \\"kv-v1/foo\\" }}\{\{ .Data.password }}\{\{ end }}" + error_on_missing_key = true +} +env_template "FOO_USER" \{ + contents = "\{\{ with secret \\"kv-v1/foo\\" }}\{\{ .Data.user }}\{\{ end }}" + error_on_missing_key = true +} + +exec \{ + command = \["./my-app", "arg1", "arg2"\] + restart_on_secret_changes = "always" + restart_stop_signal = "SIGTERM" +} +`), + expectedError: false, + }, + + "kv-v2-default-exec": { + flagExec: "", + flagPaths: []string{"kv-v2/foo"}, + expected: regexp.MustCompile(` +auto_auth \{ + + method \{ + type = "token_file" + + config \{ + token_file_path = ".*/.vault-token" + } + } +} + +template_config \{ + static_secret_render_interval = "5m" + exit_on_retry_failure = true +} + +vault \{ + address = "https://127.0.0.1:[0-9]{5}" +} + +env_template "FOO_PASSWORD" \{ + contents = "\{\{ with secret \\"kv-v2/data/foo\\" }}\{\{ .Data.data.password }}\{\{ end }}" + error_on_missing_key = true +} +env_template "FOO_USER" \{ + contents = "\{\{ with secret \\"kv-v2/data/foo\\" }}\{\{ .Data.data.user }}\{\{ end }}" + error_on_missing_key = true +} + +exec \{ + command = \["env"\] + restart_on_secret_changes = "always" + restart_stop_signal = "SIGTERM" +} +`), + expectedError: false, + }, + } + + for name, tc := range cases { + name, tc := name, tc + + t.Run(name, func(t *testing.T) { + var config bytes.Buffer + + c, err := generateConfiguration(ctx, client, tc.flagExec, tc.flagPaths) + c.WriteTo(&config) + + if tc.expectedError { + if err == nil { + t.Fatal("an error was expected but the test succeeded") + } + } else { + if err != nil { + t.Fatal(err) + } + + if !tc.expected.MatchString(config.String()) { + t.Fatalf("unexpected output; want: %v, got: %v", tc.expected.String(), config.String()) + } + } + }) + } +} diff --git a/command/agent_test.go b/command/agent_test.go new file mode 100644 index 0000000..91bddde --- /dev/null +++ b/command/agent_test.go @@ -0,0 +1,3225 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bufio" + "crypto/tls" + "crypto/x509" + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "os" + "path/filepath" + "reflect" + "strings" + "sync" + "testing" + "time" + + "github.com/hashicorp/go-hclog" + vaultjwt "github.com/hashicorp/vault-plugin-auth-jwt" + logicalKv "github.com/hashicorp/vault-plugin-secrets-kv" + "github.com/hashicorp/vault/api" + credAppRole "github.com/hashicorp/vault/builtin/credential/approle" + "github.com/hashicorp/vault/command/agent" + agentConfig "github.com/hashicorp/vault/command/agent/config" + "github.com/hashicorp/vault/helper/useragent" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/helper/pointerutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" + "github.com/mitchellh/cli" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + BasicHclConfig = ` +log_file = "TMPDIR/juan.log" +log_level="warn" +log_rotate_max_files=2 +log_rotate_bytes=1048576 +vault { + address = "http://127.0.0.1:8200" + retry { + num_retries = 5 + } +} + +listener "tcp" { + address = "127.0.0.1:8100" + tls_disable = false + tls_cert_file = "TMPDIR/reload_cert.pem" + tls_key_file = "TMPDIR/reload_key.pem" +}` + BasicHclConfig2 = ` +log_file = "TMPDIR/juan.log" +log_level="debug" +log_rotate_max_files=-1 +log_rotate_bytes=1048576 +vault { + address = "http://127.0.0.1:8200" + retry { + num_retries = 5 + } +} + +listener "tcp" { + address = "127.0.0.1:8100" + tls_disable = false + tls_cert_file = "TMPDIR/reload_cert.pem" + tls_key_file = "TMPDIR/reload_key.pem" +}` +) + +func testAgentCommand(tb testing.TB, logger hclog.Logger) (*cli.MockUi, *AgentCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &AgentCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + ShutdownCh: MakeShutdownCh(), + SighupCh: MakeSighupCh(), + logger: logger, + startedCh: make(chan struct{}, 5), + reloadedCh: make(chan struct{}, 5), + } +} + +func TestAgent_ExitAfterAuth(t *testing.T) { + t.Run("via_config", func(t *testing.T) { + testAgentExitAfterAuth(t, false) + }) + + t.Run("via_flag", func(t *testing.T) { + testAgentExitAfterAuth(t, true) + }) +} + +func testAgentExitAfterAuth(t *testing.T, viaFlag bool) { + logger := logging.NewVaultLogger(hclog.Trace) + coreConfig := &vault.CoreConfig{ + Logger: logger, + CredentialBackends: map[string]logical.Factory{ + "jwt": vaultjwt.Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) + client := cluster.Cores[0].Client + + // Setup Vault + err := client.Sys().EnableAuthWithOptions("jwt", &api.EnableAuthOptions{ + Type: "jwt", + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().Write("auth/jwt/config", map[string]interface{}{ + "bound_issuer": "https://team-vault.auth0.com/", + "jwt_validation_pubkeys": agent.TestECDSAPubKey, + "jwt_supported_algs": "ES256", + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().Write("auth/jwt/role/test", map[string]interface{}{ + "role_type": "jwt", + "bound_subject": "r3qXcK2bix9eFECzsU3Sbmh0K16fatW6@clients", + "bound_audiences": "https://vault.plugin.auth.jwt.test", + "user_claim": "https://vault/user", + "groups_claim": "https://vault/groups", + "policies": "test", + "period": "3s", + }) + if err != nil { + t.Fatal(err) + } + + inf, err := os.CreateTemp("", "auth.jwt.test.") + if err != nil { + t.Fatal(err) + } + in := inf.Name() + inf.Close() + os.Remove(in) + t.Logf("input: %s", in) + + sink1f, err := os.CreateTemp("", "sink1.jwt.test.") + if err != nil { + t.Fatal(err) + } + sink1 := sink1f.Name() + sink1f.Close() + os.Remove(sink1) + t.Logf("sink1: %s", sink1) + + sink2f, err := os.CreateTemp("", "sink2.jwt.test.") + if err != nil { + t.Fatal(err) + } + sink2 := sink2f.Name() + sink2f.Close() + os.Remove(sink2) + t.Logf("sink2: %s", sink2) + + conff, err := os.CreateTemp("", "conf.jwt.test.") + if err != nil { + t.Fatal(err) + } + conf := conff.Name() + conff.Close() + os.Remove(conf) + t.Logf("config: %s", conf) + + jwtToken, _ := agent.GetTestJWT(t) + if err := os.WriteFile(in, []byte(jwtToken), 0o600); err != nil { + t.Fatal(err) + } else { + logger.Trace("wrote test jwt", "path", in) + } + + exitAfterAuthTemplText := "exit_after_auth = true" + if viaFlag { + exitAfterAuthTemplText = "" + } + + config := ` +%s + +auto_auth { + method { + type = "jwt" + config = { + role = "test" + path = "%s" + } + } + + sink { + type = "file" + config = { + path = "%s" + } + } + + sink "file" { + config = { + path = "%s" + } + } +} +` + + config = fmt.Sprintf(config, exitAfterAuthTemplText, in, sink1, sink2) + if err := os.WriteFile(conf, []byte(config), 0o600); err != nil { + t.Fatal(err) + } else { + logger.Trace("wrote test config", "path", conf) + } + + doneCh := make(chan struct{}) + go func() { + ui, cmd := testAgentCommand(t, logger) + cmd.client = client + + args := []string{"-config", conf} + if viaFlag { + args = append(args, "-exit-after-auth") + } + + code := cmd.Run(args) + if code != 0 { + t.Errorf("expected %d to be %d", code, 0) + t.Logf("output from agent:\n%s", ui.OutputWriter.String()) + t.Logf("error from agent:\n%s", ui.ErrorWriter.String()) + } + close(doneCh) + }() + + select { + case <-doneCh: + break + case <-time.After(1 * time.Minute): + t.Fatal("timeout reached while waiting for agent to exit") + } + + sink1Bytes, err := os.ReadFile(sink1) + if err != nil { + t.Fatal(err) + } + if len(sink1Bytes) == 0 { + t.Fatal("got no output from sink 1") + } + + sink2Bytes, err := os.ReadFile(sink2) + if err != nil { + t.Fatal(err) + } + if len(sink2Bytes) == 0 { + t.Fatal("got no output from sink 2") + } + + if string(sink1Bytes) != string(sink2Bytes) { + t.Fatal("sink 1/2 values don't match") + } +} + +func TestAgent_RequireRequestHeader(t *testing.T) { + // newApiClient creates an *api.Client. + newApiClient := func(addr string, includeVaultRequestHeader bool) *api.Client { + conf := api.DefaultConfig() + conf.Address = addr + cli, err := api.NewClient(conf) + if err != nil { + t.Fatalf("err: %s", err) + } + + h := cli.Headers() + val, ok := h[consts.RequestHeaderName] + if !ok || !reflect.DeepEqual(val, []string{"true"}) { + t.Fatalf("invalid %s header", consts.RequestHeaderName) + } + if !includeVaultRequestHeader { + delete(h, consts.RequestHeaderName) + cli.SetHeaders(h) + } + + return cli + } + + //---------------------------------------------------- + // Start the server and agent + //---------------------------------------------------- + + // Start a vault server + logger := logging.NewVaultLogger(hclog.Trace) + cluster := vault.NewTestCluster(t, + &vault.CoreConfig{ + Logger: logger, + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + }, + &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + vault.TestWaitActive(t, cluster.Cores[0].Core) + serverClient := cluster.Cores[0].Client + + // Enable the approle auth method + roleIDPath, secretIDPath := setupAppRole(t, serverClient) + + // Create a config file + config := ` +auto_auth { + method "approle" { + mount_path = "auth/approle" + config = { + role_id_file_path = "%s" + secret_id_file_path = "%s" + } + } +} + +cache { + use_auto_auth_token = true +} + +listener "tcp" { + address = "%s" + tls_disable = true +} +listener "tcp" { + address = "%s" + tls_disable = true + require_request_header = false +} +listener "tcp" { + address = "%s" + tls_disable = true + require_request_header = true +} +` + listenAddr1 := generateListenerAddress(t) + listenAddr2 := generateListenerAddress(t) + listenAddr3 := generateListenerAddress(t) + config = fmt.Sprintf( + config, + roleIDPath, + secretIDPath, + listenAddr1, + listenAddr2, + listenAddr3, + ) + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + // Start the agent + ui, cmd := testAgentCommand(t, logger) + cmd.client = serverClient + cmd.startedCh = make(chan struct{}) + + var output string + var code int + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + code = cmd.Run([]string{"-config", configPath}) + if code != 0 { + output = ui.ErrorWriter.String() + ui.OutputWriter.String() + } + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + // defer agent shutdown + defer func() { + cmd.ShutdownCh <- struct{}{} + wg.Wait() + if code != 0 { + t.Fatalf("got a non-zero exit status: %d, stdout/stderr: %s", code, output) + } + }() + + //---------------------------------------------------- + // Perform the tests + //---------------------------------------------------- + + // Test against a listener configuration that omits + // 'require_request_header', with the header missing from the request. + agentClient := newApiClient("http://"+listenAddr1, false) + req := agentClient.NewRequest("GET", "/v1/sys/health") + request(t, agentClient, req, 200) + + // Test against a listener configuration that sets 'require_request_header' + // to 'false', with the header missing from the request. + agentClient = newApiClient("http://"+listenAddr2, false) + req = agentClient.NewRequest("GET", "/v1/sys/health") + request(t, agentClient, req, 200) + + // Test against a listener configuration that sets 'require_request_header' + // to 'true', with the header missing from the request. + agentClient = newApiClient("http://"+listenAddr3, false) + req = agentClient.NewRequest("GET", "/v1/sys/health") + resp, err := agentClient.RawRequest(req) + if err == nil { + t.Fatalf("expected error") + } + if resp.StatusCode != http.StatusPreconditionFailed { + t.Fatalf("expected status code %d, not %d", http.StatusPreconditionFailed, resp.StatusCode) + } + + // Test against a listener configuration that sets 'require_request_header' + // to 'true', with an invalid header present in the request. + agentClient = newApiClient("http://"+listenAddr3, false) + h := agentClient.Headers() + h[consts.RequestHeaderName] = []string{"bogus"} + agentClient.SetHeaders(h) + req = agentClient.NewRequest("GET", "/v1/sys/health") + resp, err = agentClient.RawRequest(req) + if err == nil { + t.Fatalf("expected error") + } + if resp.StatusCode != http.StatusPreconditionFailed { + t.Fatalf("expected status code %d, not %d", http.StatusPreconditionFailed, resp.StatusCode) + } + + // Test against a listener configuration that sets 'require_request_header' + // to 'true', with the proper header present in the request. + agentClient = newApiClient("http://"+listenAddr3, true) + req = agentClient.NewRequest("GET", "/v1/sys/health") + request(t, agentClient, req, 200) +} + +// TestAgent_RequireAutoAuthWithForce ensures that the client exits with a +// non-zero code if configured to force the use of an auto-auth token without +// configuring the auto_auth block +func TestAgent_RequireAutoAuthWithForce(t *testing.T) { + logger := logging.NewVaultLogger(hclog.Trace) + // Create a config file + config := fmt.Sprintf(` +cache { + use_auto_auth_token = "force" +} + +listener "tcp" { + address = "%s" + tls_disable = true +} +`, generateListenerAddress(t)) + + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + // Start the agent + ui, cmd := testAgentCommand(t, logger) + cmd.startedCh = make(chan struct{}) + + code := cmd.Run([]string{"-config", configPath}) + if code == 0 { + t.Errorf("expected error code, but got 0: %d", code) + t.Logf("STDOUT from agent:\n%s", ui.OutputWriter.String()) + t.Logf("STDERR from agent:\n%s", ui.ErrorWriter.String()) + } +} + +// TestAgent_Template_UserAgent Validates that the User-Agent sent to Vault +// as part of Templating requests is correct. Uses the custom handler +// userAgentHandler struct defined in this test package, so that Vault validates the +// User-Agent on requests sent by Agent. +func TestAgent_Template_UserAgent(t *testing.T) { + //---------------------------------------------------- + // Start the server and agent + //---------------------------------------------------- + logger := logging.NewVaultLogger(hclog.Trace) + var h userAgentHandler + cluster := vault.NewTestCluster(t, + &vault.CoreConfig{ + Logger: logger, + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + LogicalBackends: map[string]logical.Factory{ + "kv": logicalKv.Factory, + }, + }, + &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.HandlerFunc( + func(properties *vault.HandlerProperties) http.Handler { + h.props = properties + h.userAgentToCheckFor = useragent.AgentTemplatingString() + h.pathToCheck = "/v1/secret/data" + h.requestMethodToCheck = "GET" + h.t = t + return &h + }), + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) + serverClient := cluster.Cores[0].Client + + // Unset the environment variable so that agent picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Setenv(api.EnvVaultAddress, serverClient.Address()) + + roleIDPath, secretIDPath := setupAppRoleAndKVMounts(t, serverClient) + + // make a temp directory to hold renders. Each test will create a temp dir + // inside this one + tmpDirRoot, err := os.MkdirTemp("", "agent-test-renders") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDirRoot) + // create temp dir for this test run + tmpDir, err := os.MkdirTemp(tmpDirRoot, "TestAgent_Template_UserAgent") + if err != nil { + t.Fatal(err) + } + + // make some template files + var templatePaths []string + fileName := filepath.Join(tmpDir, "render_0.tmpl") + if err := os.WriteFile(fileName, []byte(templateContents(0)), 0o600); err != nil { + t.Fatal(err) + } + templatePaths = append(templatePaths, fileName) + + // build up the template config to be added to the Agent config.hcl file + var templateConfigStrings []string + for i, t := range templatePaths { + index := fmt.Sprintf("render_%d.json", i) + s := fmt.Sprintf(templateConfigString, t, tmpDir, index) + templateConfigStrings = append(templateConfigStrings, s) + } + + // Create a config file + config := ` +vault { + address = "%s" + tls_skip_verify = true +} + +auto_auth { + method "approle" { + mount_path = "auth/approle" + config = { + role_id_file_path = "%s" + secret_id_file_path = "%s" + remove_secret_id_file_after_reading = false + } + } +} + +%s +` + + // flatten the template configs + templateConfig := strings.Join(templateConfigStrings, " ") + + config = fmt.Sprintf(config, serverClient.Address(), roleIDPath, secretIDPath, templateConfig) + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + // Start the agent + ui, cmd := testAgentCommand(t, logger) + cmd.client = serverClient + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + code := cmd.Run([]string{"-config", configPath}) + if code != 0 { + t.Errorf("non-zero return code when running agent: %d", code) + t.Logf("STDOUT from agent:\n%s", ui.OutputWriter.String()) + t.Logf("STDERR from agent:\n%s", ui.ErrorWriter.String()) + } + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + // We need to shut down the Agent command + defer func() { + cmd.ShutdownCh <- struct{}{} + wg.Wait() + }() + + verify := func(suffix string) { + t.Helper() + // We need to poll for a bit to give Agent time to render the + // templates. Without this, the test will attempt to read + // the temp dir before Agent has had time to render and will + // likely fail the test + tick := time.Tick(1 * time.Second) + timeout := time.After(10 * time.Second) + var err error + for { + select { + case <-timeout: + t.Fatalf("timed out waiting for templates to render, last error: %v", err) + case <-tick: + } + // Check for files rendered in the directory and break + // early for shutdown if we do have all the files + // rendered + + //---------------------------------------------------- + // Perform the tests + //---------------------------------------------------- + + if numFiles := testListFiles(t, tmpDir, ".json"); numFiles != len(templatePaths) { + err = fmt.Errorf("expected (%d) templates, got (%d)", len(templatePaths), numFiles) + continue + } + + for i := range templatePaths { + fileName := filepath.Join(tmpDir, fmt.Sprintf("render_%d.json", i)) + var c []byte + c, err = os.ReadFile(fileName) + if err != nil { + continue + } + if string(c) != templateRendered(i)+suffix { + err = fmt.Errorf("expected=%q, got=%q", templateRendered(i)+suffix, string(c)) + continue + } + } + return + } + } + + verify("") + + fileName = filepath.Join(tmpDir, "render_0.tmpl") + if err := os.WriteFile(fileName, []byte(templateContents(0)+"{}"), 0o600); err != nil { + t.Fatal(err) + } + + verify("{}") +} + +// TestAgent_Template tests rendering templates +func TestAgent_Template_Basic(t *testing.T) { + //---------------------------------------------------- + // Start the server and agent + //---------------------------------------------------- + logger := logging.NewVaultLogger(hclog.Trace) + cluster := vault.NewTestCluster(t, + &vault.CoreConfig{ + Logger: logger, + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + LogicalBackends: map[string]logical.Factory{ + "kv": logicalKv.Factory, + }, + }, + &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) + serverClient := cluster.Cores[0].Client + + // Unset the environment variable so that agent picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Setenv(api.EnvVaultAddress, serverClient.Address()) + + roleIDPath, secretIDPath := setupAppRoleAndKVMounts(t, serverClient) + + // make a temp directory to hold renders. Each test will create a temp dir + // inside this one + tmpDirRoot, err := os.MkdirTemp("", "agent-test-renders") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDirRoot) + + // start test cases here + testCases := map[string]struct { + templateCount int + exitAfterAuth bool + }{ + "one": { + templateCount: 1, + }, + "one_with_exit": { + templateCount: 1, + exitAfterAuth: true, + }, + "many": { + templateCount: 15, + }, + "many_with_exit": { + templateCount: 13, + exitAfterAuth: true, + }, + } + + for tcname, tc := range testCases { + t.Run(tcname, func(t *testing.T) { + // create temp dir for this test run + tmpDir, err := os.MkdirTemp(tmpDirRoot, tcname) + if err != nil { + t.Fatal(err) + } + + // make some template files + var templatePaths []string + for i := 0; i < tc.templateCount; i++ { + fileName := filepath.Join(tmpDir, fmt.Sprintf("render_%d.tmpl", i)) + if err := os.WriteFile(fileName, []byte(templateContents(i)), 0o600); err != nil { + t.Fatal(err) + } + templatePaths = append(templatePaths, fileName) + } + + // build up the template config to be added to the Agent config.hcl file + var templateConfigStrings []string + for i, t := range templatePaths { + index := fmt.Sprintf("render_%d.json", i) + s := fmt.Sprintf(templateConfigString, t, tmpDir, index) + templateConfigStrings = append(templateConfigStrings, s) + } + + // Create a config file + config := ` +vault { + address = "%s" + tls_skip_verify = true +} + +auto_auth { + method "approle" { + mount_path = "auth/approle" + config = { + role_id_file_path = "%s" + secret_id_file_path = "%s" + remove_secret_id_file_after_reading = false + } + } +} + +%s + +%s +` + + // conditionally set the exit_after_auth flag + exitAfterAuth := "" + if tc.exitAfterAuth { + exitAfterAuth = "exit_after_auth = true" + } + + // flatten the template configs + templateConfig := strings.Join(templateConfigStrings, " ") + + config = fmt.Sprintf(config, serverClient.Address(), roleIDPath, secretIDPath, templateConfig, exitAfterAuth) + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + // Start the agent + ui, cmd := testAgentCommand(t, logger) + cmd.client = serverClient + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + code := cmd.Run([]string{"-config", configPath}) + if code != 0 { + t.Errorf("non-zero return code when running agent: %d", code) + t.Logf("STDOUT from agent:\n%s", ui.OutputWriter.String()) + t.Logf("STDERR from agent:\n%s", ui.ErrorWriter.String()) + } + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + // if using exit_after_auth, then the command will have returned at the + // end and no longer be running. If we are not using exit_after_auth, then + // we need to shut down the command + if !tc.exitAfterAuth { + defer func() { + cmd.ShutdownCh <- struct{}{} + wg.Wait() + }() + } + + verify := func(suffix string) { + t.Helper() + // We need to poll for a bit to give Agent time to render the + // templates. Without this, the test will attempt to read + // the temp dir before Agent has had time to render and will + // likely fail the test + tick := time.Tick(1 * time.Second) + timeout := time.After(10 * time.Second) + var err error + for { + select { + case <-timeout: + t.Fatalf("timed out waiting for templates to render, last error: %v", err) + case <-tick: + } + // Check for files rendered in the directory and break + // early for shutdown if we do have all the files + // rendered + + //---------------------------------------------------- + // Perform the tests + //---------------------------------------------------- + + if numFiles := testListFiles(t, tmpDir, ".json"); numFiles != len(templatePaths) { + err = fmt.Errorf("expected (%d) templates, got (%d)", len(templatePaths), numFiles) + continue + } + + for i := range templatePaths { + fileName := filepath.Join(tmpDir, fmt.Sprintf("render_%d.json", i)) + var c []byte + c, err = os.ReadFile(fileName) + if err != nil { + continue + } + if string(c) != templateRendered(i)+suffix { + err = fmt.Errorf("expected=%q, got=%q", templateRendered(i)+suffix, string(c)) + continue + } + } + return + } + } + + verify("") + + for i := 0; i < tc.templateCount; i++ { + fileName := filepath.Join(tmpDir, fmt.Sprintf("render_%d.tmpl", i)) + if err := os.WriteFile(fileName, []byte(templateContents(i)+"{}"), 0o600); err != nil { + t.Fatal(err) + } + } + + verify("{}") + }) + } +} + +func setupAppRole(t *testing.T, serverClient *api.Client) (string, string) { + t.Helper() + // Enable the approle auth method + req := serverClient.NewRequest("POST", "/v1/sys/auth/approle") + req.BodyBytes = []byte(`{ + "type": "approle" + }`) + request(t, serverClient, req, 204) + + // Create a named role + req = serverClient.NewRequest("PUT", "/v1/auth/approle/role/test-role") + req.BodyBytes = []byte(`{ + "token_ttl": "5m", + "token_policies":"default,myapp-read", + "policies":"default,myapp-read" + }`) + request(t, serverClient, req, 204) + + // Fetch the RoleID of the named role + req = serverClient.NewRequest("GET", "/v1/auth/approle/role/test-role/role-id") + body := request(t, serverClient, req, 200) + data := body["data"].(map[string]interface{}) + roleID := data["role_id"].(string) + + // Get a SecretID issued against the named role + req = serverClient.NewRequest("PUT", "/v1/auth/approle/role/test-role/secret-id") + body = request(t, serverClient, req, 200) + data = body["data"].(map[string]interface{}) + secretID := data["secret_id"].(string) + + // Write the RoleID and SecretID to temp files + roleIDPath := makeTempFile(t, "role_id.txt", roleID+"\n") + secretIDPath := makeTempFile(t, "secret_id.txt", secretID+"\n") + t.Cleanup(func() { + os.Remove(roleIDPath) + os.Remove(secretIDPath) + }) + + return roleIDPath, secretIDPath +} + +func setupAppRoleAndKVMounts(t *testing.T, serverClient *api.Client) (string, string) { + roleIDPath, secretIDPath := setupAppRole(t, serverClient) + + // give test-role permissions to read the kv secret + req := serverClient.NewRequest("PUT", "/v1/sys/policy/myapp-read") + req.BodyBytes = []byte(`{ + "policy": "path \"secret/*\" { capabilities = [\"read\", \"list\"] }" + }`) + request(t, serverClient, req, 204) + + // setup the kv secrets + req = serverClient.NewRequest("POST", "/v1/sys/mounts/secret/tune") + req.BodyBytes = []byte(`{ + "options": {"version": "2"} + }`) + request(t, serverClient, req, 200) + + // Secret: myapp + req = serverClient.NewRequest("POST", "/v1/secret/data/myapp") + req.BodyBytes = []byte(`{ + "data": { + "username": "bar", + "password": "zap" + } + }`) + request(t, serverClient, req, 200) + + // Secret: myapp2 + req = serverClient.NewRequest("POST", "/v1/secret/data/myapp2") + req.BodyBytes = []byte(`{ + "data": { + "username": "barstuff", + "password": "zap" + } + }`) + request(t, serverClient, req, 200) + + // Secret: otherapp + req = serverClient.NewRequest("POST", "/v1/secret/data/otherapp") + req.BodyBytes = []byte(`{ + "data": { + "username": "barstuff", + "password": "zap", + "cert": "something" + } + }`) + request(t, serverClient, req, 200) + + return roleIDPath, secretIDPath +} + +// TestAgent_Template_VaultClientFromEnv tests that Vault Agent can read in its +// required `vault` client details from environment variables instead of config. +func TestAgent_Template_VaultClientFromEnv(t *testing.T) { + //---------------------------------------------------- + // Start the server and agent + //---------------------------------------------------- + logger := logging.NewVaultLogger(hclog.Trace) + cluster := vault.NewTestCluster(t, + &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + LogicalBackends: map[string]logical.Factory{ + "kv": logicalKv.Factory, + }, + }, + &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) + serverClient := cluster.Cores[0].Client + + roleIDPath, secretIDPath := setupAppRoleAndKVMounts(t, serverClient) + + // make a temp directory to hold renders. Each test will create a temp dir + // inside this one + tmpDirRoot, err := os.MkdirTemp("", "agent-test-renders") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDirRoot) + + vaultAddr := "https://" + cluster.Cores[0].Listeners[0].Address.String() + testCases := map[string]struct { + env map[string]string + }{ + "VAULT_ADDR and VAULT_CACERT": { + env: map[string]string{ + api.EnvVaultAddress: vaultAddr, + api.EnvVaultCACert: cluster.CACertPEMFile, + }, + }, + "VAULT_ADDR and VAULT_CACERT_BYTES": { + env: map[string]string{ + api.EnvVaultAddress: vaultAddr, + api.EnvVaultCACertBytes: string(cluster.CACertPEM), + }, + }, + } + + for tcname, tc := range testCases { + t.Run(tcname, func(t *testing.T) { + for k, v := range tc.env { + t.Setenv(k, v) + } + tmpDir := t.TempDir() + + // Make a template. + templateFile := filepath.Join(tmpDir, "render.tmpl") + if err := os.WriteFile(templateFile, []byte(templateContents(0)), 0o600); err != nil { + t.Fatal(err) + } + + // build up the template config to be added to the Agent config.hcl file + targetFile := filepath.Join(tmpDir, "render.json") + templateConfig := fmt.Sprintf(` +template { + source = "%s" + destination = "%s" +} + `, templateFile, targetFile) + + // Create a config file + config := ` +auto_auth { + method "approle" { + mount_path = "auth/approle" + config = { + role_id_file_path = "%s" + secret_id_file_path = "%s" + remove_secret_id_file_after_reading = false + } + } +} + +%s +` + + config = fmt.Sprintf(config, roleIDPath, secretIDPath, templateConfig) + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + // Start the agent + ui, cmd := testAgentCommand(t, logger) + cmd.client = serverClient + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + code := cmd.Run([]string{"-config", configPath}) + if code != 0 { + t.Errorf("non-zero return code when running agent: %d", code) + t.Logf("STDOUT from agent:\n%s", ui.OutputWriter.String()) + t.Logf("STDERR from agent:\n%s", ui.ErrorWriter.String()) + } + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + defer func() { + cmd.ShutdownCh <- struct{}{} + wg.Wait() + }() + + // We need to poll for a bit to give Agent time to render the + // templates. Without this this, the test will attempt to read + // the temp dir before Agent has had time to render and will + // likely fail the test + tick := time.Tick(1 * time.Second) + timeout := time.After(10 * time.Second) + for { + select { + case <-timeout: + t.Fatalf("timed out waiting for templates to render, last error: %v", err) + case <-tick: + } + + contents, err := os.ReadFile(targetFile) + if err != nil { + // If the file simply doesn't exist, continue waiting for + // the template rendering to complete. + if os.IsNotExist(err) { + continue + } + t.Fatal(err) + } + + if string(contents) != templateRendered(0) { + t.Fatalf("expected=%q, got=%q", templateRendered(0), string(contents)) + } + + // Success! Break out of the retry loop. + break + } + }) + } +} + +func testListFiles(t *testing.T, dir, extension string) int { + t.Helper() + + files, err := os.ReadDir(dir) + if err != nil { + t.Fatal(err) + } + var count int + for _, f := range files { + if filepath.Ext(f.Name()) == extension { + count++ + } + } + + return count +} + +// TestAgent_Template_ExitCounter tests that Vault Agent correctly renders all +// templates before exiting when the configuration uses exit_after_auth. This is +// similar to TestAgent_Template_Basic, but differs by using a consistent number +// of secrets from multiple sources, where as the basic test could possibly +// generate a random number of secrets, but all using the same source. This test +// reproduces https://github.com/hashicorp/vault/issues/7883 +func TestAgent_Template_ExitCounter(t *testing.T) { + //---------------------------------------------------- + // Start the server and agent + //---------------------------------------------------- + logger := logging.NewVaultLogger(hclog.Trace) + cluster := vault.NewTestCluster(t, + &vault.CoreConfig{ + Logger: logger, + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + LogicalBackends: map[string]logical.Factory{ + "kv": logicalKv.Factory, + }, + }, + &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) + serverClient := cluster.Cores[0].Client + + // Unset the environment variable so that agent picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Setenv(api.EnvVaultAddress, serverClient.Address()) + + roleIDPath, secretIDPath := setupAppRoleAndKVMounts(t, serverClient) + + // make a temp directory to hold renders. Each test will create a temp dir + // inside this one + tmpDirRoot, err := os.MkdirTemp("", "agent-test-renders") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDirRoot) + + // create temp dir for this test run + tmpDir, err := os.MkdirTemp(tmpDirRoot, "agent-test") + if err != nil { + t.Fatal(err) + } + + // Create a config file + config := ` +vault { + address = "%s" + tls_skip_verify = true +} + +auto_auth { + method "approle" { + mount_path = "auth/approle" + config = { + role_id_file_path = "%s" + secret_id_file_path = "%s" + remove_secret_id_file_after_reading = false + } + } +} + +template { + contents = "{{ with secret \"secret/myapp\" }}{{ range $k, $v := .Data.data }}{{ $v }}{{ end }}{{ end }}" + destination = "%s/render-pass.txt" +} + +template { + contents = "{{ with secret \"secret/myapp2\" }}{{ .Data.data.username}}{{ end }}" + destination = "%s/render-user.txt" +} + +template { + contents = < 0 { + h.failCount-- + h.t.Logf("%s failing GET request on %s, failures left: %d", time.Now(), req.URL.Path, h.failCount) + resp.WriteHeader(500) + return + } + h.t.Logf("passing GET request on %s", req.URL.Path) + } + vaulthttp.Handler.Handler(h.props).ServeHTTP(resp, req) +} + +// userAgentHandler makes it easy to test the User-Agent header received +// by Vault +type userAgentHandler struct { + props *vault.HandlerProperties + failCount int + userAgentToCheckFor string + pathToCheck string + requestMethodToCheck string + t *testing.T +} + +func (h *userAgentHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + if req.Method == h.requestMethodToCheck && strings.Contains(req.RequestURI, h.pathToCheck) { + userAgent := req.UserAgent() + if !(userAgent == h.userAgentToCheckFor) { + h.t.Fatalf("User-Agent string not as expected. Expected to find %s, got %s", h.userAgentToCheckFor, userAgent) + } + } + vaulthttp.Handler.Handler(h.props).ServeHTTP(w, req) +} + +// TestAgent_Template_Retry verifies that the template server retries requests +// based on retry configuration. +func TestAgent_Template_Retry(t *testing.T) { + //---------------------------------------------------- + // Start the server and agent + //---------------------------------------------------- + logger := logging.NewVaultLogger(hclog.Trace) + var h handler + cluster := vault.NewTestCluster(t, + &vault.CoreConfig{ + Logger: logger, + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + LogicalBackends: map[string]logical.Factory{ + "kv": logicalKv.Factory, + }, + }, + &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.HandlerFunc( + func(properties *vault.HandlerProperties) http.Handler { + h.props = properties + h.t = t + return &h + }), + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) + serverClient := cluster.Cores[0].Client + + // Unset the environment variable so that agent picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + methodConf, cleanup := prepAgentApproleKV(t, serverClient) + defer cleanup() + + err := serverClient.Sys().TuneMount("secret", api.MountConfigInput{ + Options: map[string]string{ + "version": "2", + }, + }) + if err != nil { + t.Fatal(err) + } + + _, err = serverClient.Logical().Write("secret/data/otherapp", map[string]interface{}{ + "data": map[string]interface{}{ + "username": "barstuff", + "password": "zap", + "cert": "something", + }, + }) + if err != nil { + t.Fatal(err) + } + + // make a temp directory to hold renders. Each test will create a temp dir + // inside this one + tmpDirRoot, err := os.MkdirTemp("", "agent-test-renders") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDirRoot) + + intRef := func(i int) *int { + return &i + } + // start test cases here + testCases := map[string]struct { + retries *int + expectError bool + }{ + "none": { + retries: intRef(-1), + expectError: true, + }, + "one": { + retries: intRef(1), + expectError: true, + }, + "two": { + retries: intRef(2), + expectError: false, + }, + "missing": { + retries: nil, + expectError: false, + }, + "default": { + retries: intRef(0), + expectError: false, + }, + } + + for tcname, tc := range testCases { + t.Run(tcname, func(t *testing.T) { + // We fail the first 6 times. The consul-template code creates + // a Vault client with MaxRetries=2, so for every consul-template + // retry configured, it will in practice make up to 3 requests. + // Thus if consul-template is configured with "one" retry, it will + // fail given our failCount, but if configured with "two" retries, + // they will consume our 6th failure, and on the "third (from its + // perspective) attempt, it will succeed. + h.failCount = 6 + + // create temp dir for this test run + tmpDir, err := os.MkdirTemp(tmpDirRoot, tcname) + if err != nil { + t.Fatal(err) + } + + // make some template files + templatePath := filepath.Join(tmpDir, "render_0.tmpl") + if err := os.WriteFile(templatePath, []byte(templateContents(0)), 0o600); err != nil { + t.Fatal(err) + } + templateConfig := fmt.Sprintf(templateConfigString, templatePath, tmpDir, "render_0.json") + + var retryConf string + if tc.retries != nil { + retryConf = fmt.Sprintf("retry { num_retries = %d }", *tc.retries) + } + + config := fmt.Sprintf(` +%s +vault { + address = "%s" + %s + tls_skip_verify = true +} +%s +template_config { + exit_on_retry_failure = true +} +`, methodConf, serverClient.Address(), retryConf, templateConfig) + + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + // Start the agent + _, cmd := testAgentCommand(t, logger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + var code int + go func() { + code = cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + verify := func() error { + t.Helper() + // We need to poll for a bit to give Agent time to render the + // templates. Without this this, the test will attempt to read + // the temp dir before Agent has had time to render and will + // likely fail the test + tick := time.Tick(1 * time.Second) + timeout := time.After(15 * time.Second) + var err error + for { + select { + case <-timeout: + return fmt.Errorf("timed out waiting for templates to render, last error: %v", err) + case <-tick: + } + // Check for files rendered in the directory and break + // early for shutdown if we do have all the files + // rendered + + //---------------------------------------------------- + // Perform the tests + //---------------------------------------------------- + + if numFiles := testListFiles(t, tmpDir, ".json"); numFiles != 1 { + err = fmt.Errorf("expected 1 template, got (%d)", numFiles) + continue + } + + fileName := filepath.Join(tmpDir, "render_0.json") + var c []byte + c, err = os.ReadFile(fileName) + if err != nil { + continue + } + if string(c) != templateRendered(0) { + err = fmt.Errorf("expected=%q, got=%q", templateRendered(0), string(c)) + continue + } + return nil + } + } + + err = verify() + close(cmd.ShutdownCh) + wg.Wait() + + switch { + case (code != 0 || err != nil) && tc.expectError: + case code == 0 && err == nil && !tc.expectError: + default: + t.Fatalf("%s expectError=%v error=%v code=%d", tcname, tc.expectError, err, code) + } + }) + } +} + +// prepAgentApproleKV configures a Vault instance for approle authentication, +// such that the resulting token will have global permissions across /kv +// and /secret mounts. Returns the auto_auth config stanza to setup an Agent +// to connect using approle. +func prepAgentApproleKV(t *testing.T, client *api.Client) (string, func()) { + t.Helper() + + policyAutoAuthAppRole := ` +path "/kv/*" { + capabilities = ["create", "read", "update", "delete", "list"] +} +path "/secret/*" { + capabilities = ["create", "read", "update", "delete", "list"] +} +` + // Add an kv-admin policy + if err := client.Sys().PutPolicy("test-autoauth", policyAutoAuthAppRole); err != nil { + t.Fatal(err) + } + + // Enable approle + err := client.Sys().EnableAuthWithOptions("approle", &api.EnableAuthOptions{ + Type: "approle", + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().Write("auth/approle/role/test1", map[string]interface{}{ + "bind_secret_id": "true", + "token_ttl": "1h", + "token_max_ttl": "2h", + "policies": []string{"test-autoauth"}, + }) + if err != nil { + t.Fatal(err) + } + + resp, err := client.Logical().Write("auth/approle/role/test1/secret-id", nil) + if err != nil { + t.Fatal(err) + } + secretID := resp.Data["secret_id"].(string) + secretIDFile := makeTempFile(t, "secret_id.txt", secretID+"\n") + + resp, err = client.Logical().Read("auth/approle/role/test1/role-id") + if err != nil { + t.Fatal(err) + } + roleID := resp.Data["role_id"].(string) + roleIDFile := makeTempFile(t, "role_id.txt", roleID+"\n") + + config := fmt.Sprintf(` +auto_auth { + method "approle" { + mount_path = "auth/approle" + config = { + role_id_file_path = "%s" + secret_id_file_path = "%s" + remove_secret_id_file_after_reading = false + } + } +} +`, roleIDFile, secretIDFile) + + cleanup := func() { + _ = os.Remove(roleIDFile) + _ = os.Remove(secretIDFile) + } + return config, cleanup +} + +// TestAgent_AutoAuth_UserAgent tests that the User-Agent sent +// to Vault by Vault Agent is correct when performing Auto-Auth. +// Uses the custom handler userAgentHandler (defined above) so +// that Vault validates the User-Agent on requests sent by Agent. +func TestAgent_AutoAuth_UserAgent(t *testing.T) { + logger := logging.NewVaultLogger(hclog.Trace) + var h userAgentHandler + cluster := vault.NewTestCluster(t, &vault.CoreConfig{ + Logger: logger, + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + }, &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.HandlerFunc( + func(properties *vault.HandlerProperties) http.Handler { + h.props = properties + h.userAgentToCheckFor = useragent.AgentAutoAuthString() + h.requestMethodToCheck = "PUT" + h.pathToCheck = "auth/approle/login" + h.t = t + return &h + }), + }) + cluster.Start() + defer cluster.Cleanup() + + serverClient := cluster.Cores[0].Client + + // Enable the approle auth method + roleIDPath, secretIDPath := setupAppRole(t, serverClient) + + sinkf, err := os.CreateTemp("", "sink.test.") + if err != nil { + t.Fatal(err) + } + sink := sinkf.Name() + sinkf.Close() + os.Remove(sink) + + autoAuthConfig := fmt.Sprintf(` +auto_auth { + method "approle" { + mount_path = "auth/approle" + config = { + role_id_file_path = "%s" + secret_id_file_path = "%s" + } + } + + sink "file" { + config = { + path = "%s" + } + } +}`, roleIDPath, secretIDPath, sink) + + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` +listener "tcp" { + address = "%s" + tls_disable = true +} +`, listenAddr) + + config := fmt.Sprintf(` +vault { + address = "%s" + tls_skip_verify = true +} +api_proxy { + use_auto_auth_token = true +} +%s +%s +`, serverClient.Address(), listenConfig, autoAuthConfig) + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + // Unset the environment variable so that agent picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + // Start the agent + _, cmd := testAgentCommand(t, logger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + // Validate that the auto-auth token has been correctly attained + // and works for LookupSelf + conf := api.DefaultConfig() + conf.Address = "http://" + listenAddr + agentClient, err := api.NewClient(conf) + if err != nil { + t.Fatalf("err: %s", err) + } + + agentClient.SetToken("") + err = agentClient.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + + // Wait for the token to be sent to syncs and be available to be used + time.Sleep(5 * time.Second) + + req := agentClient.NewRequest("GET", "/v1/auth/token/lookup-self") + request(t, agentClient, req, 200) + + close(cmd.ShutdownCh) + wg.Wait() +} + +// TestAgent_APIProxyWithoutCache_UserAgent tests that the User-Agent sent +// to Vault by Vault Agent is correct using the API proxy without +// the cache configured. Uses the custom handler +// userAgentHandler struct defined in this test package, so that Vault validates the +// User-Agent on requests sent by Agent. +func TestAgent_APIProxyWithoutCache_UserAgent(t *testing.T) { + logger := logging.NewVaultLogger(hclog.Trace) + userAgentForProxiedClient := "proxied-client" + var h userAgentHandler + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.HandlerFunc( + func(properties *vault.HandlerProperties) http.Handler { + h.props = properties + h.userAgentToCheckFor = useragent.AgentProxyStringWithProxiedUserAgent(userAgentForProxiedClient) + h.pathToCheck = "/v1/auth/token/lookup-self" + h.requestMethodToCheck = "GET" + h.t = t + return &h + }), + }) + cluster.Start() + defer cluster.Cleanup() + + serverClient := cluster.Cores[0].Client + + // Unset the environment variable so that agent picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` +listener "tcp" { + address = "%s" + tls_disable = true +} +`, listenAddr) + + config := fmt.Sprintf(` +vault { + address = "%s" + tls_skip_verify = true +} +%s +`, serverClient.Address(), listenConfig) + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + // Start the agent + _, cmd := testAgentCommand(t, logger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + agentClient, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + agentClient.AddHeader("User-Agent", userAgentForProxiedClient) + agentClient.SetToken(serverClient.Token()) + agentClient.SetMaxRetries(0) + err = agentClient.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + + _, err = agentClient.Auth().Token().LookupSelf() + if err != nil { + t.Fatal(err) + } + + close(cmd.ShutdownCh) + wg.Wait() +} + +// TestAgent_APIProxyWithCache_UserAgent tests that the User-Agent sent +// to Vault by Vault Agent is correct using the API proxy with +// the cache configured. Uses the custom handler +// userAgentHandler struct defined in this test package, so that Vault validates the +// User-Agent on requests sent by Agent. +func TestAgent_APIProxyWithCache_UserAgent(t *testing.T) { + logger := logging.NewVaultLogger(hclog.Trace) + userAgentForProxiedClient := "proxied-client" + var h userAgentHandler + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.HandlerFunc( + func(properties *vault.HandlerProperties) http.Handler { + h.props = properties + h.userAgentToCheckFor = useragent.AgentProxyStringWithProxiedUserAgent(userAgentForProxiedClient) + h.pathToCheck = "/v1/auth/token/lookup-self" + h.requestMethodToCheck = "GET" + h.t = t + return &h + }), + }) + cluster.Start() + defer cluster.Cleanup() + + serverClient := cluster.Cores[0].Client + + // Unset the environment variable so that agent picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` +listener "tcp" { + address = "%s" + tls_disable = true +} +`, listenAddr) + + cacheConfig := ` +cache { +}` + + config := fmt.Sprintf(` +vault { + address = "%s" + tls_skip_verify = true +} +%s +%s +`, serverClient.Address(), listenConfig, cacheConfig) + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + // Start the agent + _, cmd := testAgentCommand(t, logger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + agentClient, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + agentClient.AddHeader("User-Agent", userAgentForProxiedClient) + agentClient.SetToken(serverClient.Token()) + agentClient.SetMaxRetries(0) + err = agentClient.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + + _, err = agentClient.Auth().Token().LookupSelf() + if err != nil { + t.Fatal(err) + } + + close(cmd.ShutdownCh) + wg.Wait() +} + +func TestAgent_Cache_DynamicSecret(t *testing.T) { + logger := logging.NewVaultLogger(hclog.Trace) + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + serverClient := cluster.Cores[0].Client + + // Unset the environment variable so that agent picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + cacheConfig := ` +cache { +} +` + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` +listener "tcp" { + address = "%s" + tls_disable = true +} +`, listenAddr) + + config := fmt.Sprintf(` +vault { + address = "%s" + tls_skip_verify = true +} +%s +%s +`, serverClient.Address(), cacheConfig, listenConfig) + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + // Start the agent + _, cmd := testAgentCommand(t, logger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + agentClient, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + agentClient.SetToken(serverClient.Token()) + agentClient.SetMaxRetries(0) + err = agentClient.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + + renewable := true + tokenCreateRequest := &api.TokenCreateRequest{ + Policies: []string{"default"}, + TTL: "30m", + Renewable: &renewable, + } + + // This was the simplest test I could find to trigger the caching behaviour, + // i.e. the most concise I could make the test that I can tell + // creating an orphan token returns Auth, is renewable, and isn't a token + // that's managed elsewhere (since it's an orphan) + secret, err := agentClient.Auth().Token().CreateOrphan(tokenCreateRequest) + if err != nil { + t.Fatal(err) + } + if secret == nil || secret.Auth == nil { + t.Fatalf("secret not as expected: %v", secret) + } + + token := secret.Auth.ClientToken + + secret, err = agentClient.Auth().Token().CreateOrphan(tokenCreateRequest) + if err != nil { + t.Fatal(err) + } + if secret == nil || secret.Auth == nil { + t.Fatalf("secret not as expected: %v", secret) + } + + token2 := secret.Auth.ClientToken + + if token != token2 { + t.Fatalf("token create response not cached when it should have been, as tokens differ") + } + + close(cmd.ShutdownCh) + wg.Wait() +} + +func TestAgent_ApiProxy_Retry(t *testing.T) { + //---------------------------------------------------- + // Start the server and agent + //---------------------------------------------------- + logger := logging.NewVaultLogger(hclog.Trace) + var h handler + cluster := vault.NewTestCluster(t, + &vault.CoreConfig{ + Logger: logger, + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + LogicalBackends: map[string]logical.Factory{ + "kv": logicalKv.Factory, + }, + }, + &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.HandlerFunc(func(properties *vault.HandlerProperties) http.Handler { + h.props = properties + h.t = t + return &h + }), + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) + serverClient := cluster.Cores[0].Client + + // Unset the environment variable so that agent picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + _, err := serverClient.Logical().Write("secret/foo", map[string]interface{}{ + "bar": "baz", + }) + if err != nil { + t.Fatal(err) + } + + intRef := func(i int) *int { + return &i + } + // start test cases here + testCases := map[string]struct { + retries *int + expectError bool + }{ + "none": { + retries: intRef(-1), + expectError: true, + }, + "one": { + retries: intRef(1), + expectError: true, + }, + "two": { + retries: intRef(2), + expectError: false, + }, + "missing": { + retries: nil, + expectError: false, + }, + "default": { + retries: intRef(0), + expectError: false, + }, + } + + for tcname, tc := range testCases { + t.Run(tcname, func(t *testing.T) { + h.failCount = 2 + + cacheConfig := ` +cache { +} +` + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` +listener "tcp" { + address = "%s" + tls_disable = true +} +`, listenAddr) + + var retryConf string + if tc.retries != nil { + retryConf = fmt.Sprintf("retry { num_retries = %d }", *tc.retries) + } + + config := fmt.Sprintf(` +vault { + address = "%s" + %s + tls_skip_verify = true +} +%s +%s +`, serverClient.Address(), retryConf, cacheConfig, listenConfig) + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + // Start the agent + _, cmd := testAgentCommand(t, logger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + client, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + client.SetToken(serverClient.Token()) + client.SetMaxRetries(0) + err = client.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + secret, err := client.Logical().Read("secret/foo") + switch { + case (err != nil || secret == nil) && tc.expectError: + case (err == nil || secret != nil) && !tc.expectError: + default: + t.Fatalf("%s expectError=%v error=%v secret=%v", tcname, tc.expectError, err, secret) + } + if secret != nil && secret.Data["foo"] != nil { + val := secret.Data["foo"].(map[string]interface{}) + if !reflect.DeepEqual(val, map[string]interface{}{"bar": "baz"}) { + t.Fatalf("expected key 'foo' to yield bar=baz, got: %v", val) + } + } + time.Sleep(time.Second) + + close(cmd.ShutdownCh) + wg.Wait() + }) + } +} + +func TestAgent_TemplateConfig_ExitOnRetryFailure(t *testing.T) { + //---------------------------------------------------- + // Start the server and agent + //---------------------------------------------------- + logger := logging.NewVaultLogger(hclog.Trace) + cluster := vault.NewTestCluster(t, + &vault.CoreConfig{ + // Logger: logger, + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + LogicalBackends: map[string]logical.Factory{ + "kv": logicalKv.Factory, + }, + }, + &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) + serverClient := cluster.Cores[0].Client + + // Unset the environment variable so that agent picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + autoAuthConfig, cleanup := prepAgentApproleKV(t, serverClient) + defer cleanup() + + err := serverClient.Sys().TuneMount("secret", api.MountConfigInput{ + Options: map[string]string{ + "version": "2", + }, + }) + if err != nil { + t.Fatal(err) + } + + _, err = serverClient.Logical().Write("secret/data/otherapp", map[string]interface{}{ + "data": map[string]interface{}{ + "username": "barstuff", + "password": "zap", + "cert": "something", + }, + }) + if err != nil { + t.Fatal(err) + } + + // make a temp directory to hold renders. Each test will create a temp dir + // inside this one + tmpDirRoot, err := os.MkdirTemp("", "agent-test-renders") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDirRoot) + + // Note that missing key is different from a non-existent secret. A missing + // key (2xx response with missing keys in the response map) can still yield + // a successful render unless error_on_missing_key is specified, whereas a + // missing secret (4xx response) always results in an error. + missingKeyTemplateContent := `{{- with secret "secret/otherapp"}}{"secret": "other", +{{- if .Data.data.foo}}"foo":"{{ .Data.data.foo}}"{{- end }}} +{{- end }}` + missingKeyTemplateRender := `{"secret": "other",}` + + badTemplateContent := `{{- with secret "secret/non-existent"}}{"secret": "other", +{{- if .Data.data.foo}}"foo":"{{ .Data.data.foo}}"{{- end }}} +{{- end }}` + + testCases := map[string]struct { + exitOnRetryFailure *bool + templateContents string + expectTemplateRender string + templateErrorOnMissingKey bool + expectError bool + expectExitFromError bool + }{ + "true, no template error": { + exitOnRetryFailure: pointerutil.BoolPtr(true), + templateContents: templateContents(0), + expectTemplateRender: templateRendered(0), + templateErrorOnMissingKey: false, + expectError: false, + expectExitFromError: false, + }, + "true, with non-existent secret": { + exitOnRetryFailure: pointerutil.BoolPtr(true), + templateContents: badTemplateContent, + expectTemplateRender: "", + templateErrorOnMissingKey: false, + expectError: true, + expectExitFromError: true, + }, + "true, with missing key": { + exitOnRetryFailure: pointerutil.BoolPtr(true), + templateContents: missingKeyTemplateContent, + expectTemplateRender: missingKeyTemplateRender, + templateErrorOnMissingKey: false, + expectError: false, + expectExitFromError: false, + }, + "true, with missing key, with error_on_missing_key": { + exitOnRetryFailure: pointerutil.BoolPtr(true), + templateContents: missingKeyTemplateContent, + expectTemplateRender: "", + templateErrorOnMissingKey: true, + expectError: true, + expectExitFromError: true, + }, + "false, no template error": { + exitOnRetryFailure: pointerutil.BoolPtr(false), + templateContents: templateContents(0), + expectTemplateRender: templateRendered(0), + templateErrorOnMissingKey: false, + expectError: false, + expectExitFromError: false, + }, + "false, with non-existent secret": { + exitOnRetryFailure: pointerutil.BoolPtr(false), + templateContents: badTemplateContent, + expectTemplateRender: "", + templateErrorOnMissingKey: false, + expectError: true, + expectExitFromError: false, + }, + "false, with missing key": { + exitOnRetryFailure: pointerutil.BoolPtr(false), + templateContents: missingKeyTemplateContent, + expectTemplateRender: missingKeyTemplateRender, + templateErrorOnMissingKey: false, + expectError: false, + expectExitFromError: false, + }, + "false, with missing key, with error_on_missing_key": { + exitOnRetryFailure: pointerutil.BoolPtr(false), + templateContents: missingKeyTemplateContent, + expectTemplateRender: missingKeyTemplateRender, + templateErrorOnMissingKey: true, + expectError: true, + expectExitFromError: false, + }, + "missing": { + exitOnRetryFailure: nil, + templateContents: templateContents(0), + expectTemplateRender: templateRendered(0), + templateErrorOnMissingKey: false, + expectError: false, + expectExitFromError: false, + }, + } + + for tcName, tc := range testCases { + t.Run(tcName, func(t *testing.T) { + // create temp dir for this test run + tmpDir, err := os.MkdirTemp(tmpDirRoot, tcName) + if err != nil { + t.Fatal(err) + } + + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` +listener "tcp" { + address = "%s" + tls_disable = true +} +`, listenAddr) + + var exitOnRetryFailure string + if tc.exitOnRetryFailure != nil { + exitOnRetryFailure = fmt.Sprintf("exit_on_retry_failure = %t", *tc.exitOnRetryFailure) + } + templateConfig := fmt.Sprintf(` +template_config = { + %s +} +`, exitOnRetryFailure) + + template := fmt.Sprintf(` +template { + contents = < 0, "no files were found") + + for _, p := range m { + f, err := os.Open(p) + require.NoError(t, err) + + fs := bufio.NewScanner(f) + fs.Split(bufio.ScanLines) + + for fs.Scan() { + s := fs.Text() + entry := make(map[string]string) + err := json.Unmarshal([]byte(s), &entry) + require.NoError(t, err) + v, ok := entry["@message"] + if !ok { + continue + } + if v == runnerLogMessage { + found = true + break + } + } + } + + require.Truef(t, found, "unable to find consul-template partial message in logs: %s", runnerLogMessage) +} + +// Get a randomly assigned port and then free it again before returning it. +// There is still a race when trying to use it, but should work better +// than a static port. +func generateListenerAddress(t *testing.T) string { + t.Helper() + + ln1, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + listenAddr := ln1.Addr().String() + ln1.Close() + return listenAddr +} diff --git a/command/agentproxyshared/auth/alicloud/alicloud.go b/command/agentproxyshared/auth/alicloud/alicloud.go new file mode 100644 index 0000000..7245976 --- /dev/null +++ b/command/agentproxyshared/auth/alicloud/alicloud.go @@ -0,0 +1,237 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package alicloud + +import ( + "context" + "errors" + "fmt" + "net/http" + "reflect" + "sync" + "time" + + aliCloudAuth "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/providers" + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault-plugin-auth-alicloud/tools" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/auth" +) + +/* +Creds can be inferred from instance metadata, and those creds +expire every 60 minutes, so we're going to need to poll for new +creds. Since we're polling anyways, let's poll once a minute so +all changes can be picked up rather quickly. This is configurable, +however. +*/ +const defaultCredCheckFreqSeconds = 60 + +func NewAliCloudAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) { + if conf == nil { + return nil, errors.New("empty config") + } + if conf.Config == nil { + return nil, errors.New("empty config data") + } + + a := &alicloudMethod{ + logger: conf.Logger, + mountPath: conf.MountPath, + credsFound: make(chan struct{}), + stopCh: make(chan struct{}), + } + + // Build the required information we'll need to create a client. + if roleRaw, ok := conf.Config["role"]; !ok { + return nil, errors.New("'role' is required but is not provided") + } else { + if a.role, ok = roleRaw.(string); !ok { + return nil, errors.New("could not convert 'role' config value to string") + } + } + if regionRaw, ok := conf.Config["region"]; !ok { + return nil, errors.New("'region' is required but is not provided") + } else { + if a.region, ok = regionRaw.(string); !ok { + return nil, errors.New("could not convert 'region' config value to string") + } + } + + // Check for an optional custom frequency at which we should poll for creds. + credCheckFreqSec := defaultCredCheckFreqSeconds + if checkFreqRaw, ok := conf.Config["credential_poll_interval"]; ok { + if credFreq, ok := checkFreqRaw.(int); ok { + credCheckFreqSec = credFreq + } else { + return nil, errors.New("could not convert 'credential_poll_interval' config value to int") + } + } + + // Build the optional, configuration-based piece of the credential chain. + credConfig := &providers.Configuration{} + + if accessKeyRaw, ok := conf.Config["access_key"]; ok { + if credConfig.AccessKeyID, ok = accessKeyRaw.(string); !ok { + return nil, errors.New("could not convert 'access_key' config value to string") + } + } + + if accessSecretRaw, ok := conf.Config["access_secret"]; ok { + if credConfig.AccessKeySecret, ok = accessSecretRaw.(string); !ok { + return nil, errors.New("could not convert 'access_secret' config value to string") + } + } + + if accessTokenRaw, ok := conf.Config["access_token"]; ok { + if credConfig.AccessKeyStsToken, ok = accessTokenRaw.(string); !ok { + return nil, errors.New("could not convert 'access_token' config value to string") + } + } + + if roleArnRaw, ok := conf.Config["role_arn"]; ok { + if credConfig.RoleArn, ok = roleArnRaw.(string); !ok { + return nil, errors.New("could not convert 'role_arn' config value to string") + } + } + + if roleSessionNameRaw, ok := conf.Config["role_session_name"]; ok { + if credConfig.RoleSessionName, ok = roleSessionNameRaw.(string); !ok { + return nil, errors.New("could not convert 'role_session_name' config value to string") + } + } + + if roleSessionExpirationRaw, ok := conf.Config["role_session_expiration"]; ok { + if roleSessionExpiration, ok := roleSessionExpirationRaw.(int); !ok { + return nil, errors.New("could not convert 'role_session_expiration' config value to int") + } else { + credConfig.RoleSessionExpiration = &roleSessionExpiration + } + } + + if privateKeyRaw, ok := conf.Config["private_key"]; ok { + if credConfig.PrivateKey, ok = privateKeyRaw.(string); !ok { + return nil, errors.New("could not convert 'private_key' config value to string") + } + } + + if publicKeyIdRaw, ok := conf.Config["public_key_id"]; ok { + if credConfig.PublicKeyID, ok = publicKeyIdRaw.(string); !ok { + return nil, errors.New("could not convert 'public_key_id' config value to string") + } + } + + if sessionExpirationRaw, ok := conf.Config["session_expiration"]; ok { + if sessionExpiration, ok := sessionExpirationRaw.(int); !ok { + return nil, errors.New("could not convert 'session_expiration' config value to int") + } else { + credConfig.SessionExpiration = &sessionExpiration + } + } + + if roleNameRaw, ok := conf.Config["role_name"]; ok { + if credConfig.RoleName, ok = roleNameRaw.(string); !ok { + return nil, errors.New("could not convert 'role_name' config value to string") + } + } + + credentialChain := []providers.Provider{ + providers.NewEnvCredentialProvider(), + providers.NewConfigurationCredentialProvider(credConfig), + providers.NewInstanceMetadataProvider(), + } + credProvider := providers.NewChainProvider(credentialChain) + + // Do an initial population of the creds because we want to err right away if we can't + // even get a first set. + lastCreds, err := credProvider.Retrieve() + if err != nil { + return nil, err + } + a.lastCreds = lastCreds + + go a.pollForCreds(credProvider, credCheckFreqSec) + + return a, nil +} + +type alicloudMethod struct { + logger hclog.Logger + mountPath string + + // These parameters are fed into building login data. + role string + region string + + // These are used to share the latest creds safely across goroutines. + credLock sync.Mutex + lastCreds aliCloudAuth.Credential + + // Notifies the outer environment that it should call Authenticate again. + credsFound chan struct{} + + // Detects that the outer environment is closing. + stopCh chan struct{} +} + +func (a *alicloudMethod) Authenticate(context.Context, *api.Client) (string, http.Header, map[string]interface{}, error) { + a.credLock.Lock() + defer a.credLock.Unlock() + + a.logger.Trace("beginning authentication") + data, err := tools.GenerateLoginData(a.role, a.lastCreds, a.region) + if err != nil { + return "", nil, nil, err + } + return fmt.Sprintf("%s/login", a.mountPath), nil, data, nil +} + +func (a *alicloudMethod) NewCreds() chan struct{} { + return a.credsFound +} + +func (a *alicloudMethod) CredSuccess() {} + +func (a *alicloudMethod) Shutdown() { + close(a.credsFound) + close(a.stopCh) +} + +func (a *alicloudMethod) pollForCreds(credProvider providers.Provider, frequencySeconds int) { + ticker := time.NewTicker(time.Duration(frequencySeconds) * time.Second) + defer ticker.Stop() + for { + select { + case <-a.stopCh: + a.logger.Trace("shutdown triggered, stopping alicloud auth handler") + return + case <-ticker.C: + if err := a.checkCreds(credProvider); err != nil { + a.logger.Warn("unable to retrieve current creds, retaining last creds", "error", err) + } + } + } +} + +func (a *alicloudMethod) checkCreds(credProvider providers.Provider) error { + a.credLock.Lock() + defer a.credLock.Unlock() + + a.logger.Trace("checking for new credentials") + currentCreds, err := credProvider.Retrieve() + if err != nil { + return err + } + // These will always have different pointers regardless of whether their + // values are identical, hence the use of DeepEqual. + if reflect.DeepEqual(currentCreds, a.lastCreds) { + a.logger.Trace("credentials are unchanged") + return nil + } + a.lastCreds = currentCreds + a.logger.Trace("new credentials detected, triggering Authenticate") + a.credsFound <- struct{}{} + return nil +} diff --git a/command/agentproxyshared/auth/approle/approle.go b/command/agentproxyshared/auth/approle/approle.go new file mode 100644 index 0000000..9f33980 --- /dev/null +++ b/command/agentproxyshared/auth/approle/approle.go @@ -0,0 +1,214 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package approle + +import ( + "context" + "errors" + "fmt" + "io/ioutil" + "net/http" + "os" + "strings" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/auth" +) + +type approleMethod struct { + logger hclog.Logger + mountPath string + + roleIDFilePath string + secretIDFilePath string + cachedRoleID string + cachedSecretID string + removeSecretIDFileAfterReading bool + secretIDResponseWrappingPath string +} + +func NewApproleAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) { + if conf == nil { + return nil, errors.New("empty config") + } + if conf.Config == nil { + return nil, errors.New("empty config data") + } + + a := &approleMethod{ + logger: conf.Logger, + mountPath: conf.MountPath, + removeSecretIDFileAfterReading: true, + } + + roleIDFilePathRaw, ok := conf.Config["role_id_file_path"] + if !ok { + return nil, errors.New("missing 'role_id_file_path' value") + } + a.roleIDFilePath, ok = roleIDFilePathRaw.(string) + if !ok { + return nil, errors.New("could not convert 'role_id_file_path' config value to string") + } + if a.roleIDFilePath == "" { + return nil, errors.New("'role_id_file_path' value is empty") + } + + secretIDFilePathRaw, ok := conf.Config["secret_id_file_path"] + if ok { + a.secretIDFilePath, ok = secretIDFilePathRaw.(string) + if !ok { + return nil, errors.New("could not convert 'secret_id_file_path' config value to string") + } + if a.secretIDFilePath == "" { + return a, nil + } + + removeSecretIDFileAfterReadingRaw, ok := conf.Config["remove_secret_id_file_after_reading"] + if ok { + removeSecretIDFileAfterReading, err := parseutil.ParseBool(removeSecretIDFileAfterReadingRaw) + if err != nil { + return nil, fmt.Errorf("error parsing 'remove_secret_id_file_after_reading' value: %w", err) + } + a.removeSecretIDFileAfterReading = removeSecretIDFileAfterReading + } + + secretIDResponseWrappingPathRaw, ok := conf.Config["secret_id_response_wrapping_path"] + if ok { + a.secretIDResponseWrappingPath, ok = secretIDResponseWrappingPathRaw.(string) + if !ok { + return nil, errors.New("could not convert 'secret_id_response_wrapping_path' config value to string") + } + if a.secretIDResponseWrappingPath == "" { + return nil, errors.New("'secret_id_response_wrapping_path' value is empty") + } + } + } + + return a, nil +} + +func (a *approleMethod) Authenticate(ctx context.Context, client *api.Client) (string, http.Header, map[string]interface{}, error) { + if _, err := os.Stat(a.roleIDFilePath); err == nil { + roleID, err := ioutil.ReadFile(a.roleIDFilePath) + if err != nil { + if a.cachedRoleID == "" { + return "", nil, nil, fmt.Errorf("error reading role ID file and no cached role ID known: %w", err) + } + a.logger.Warn("error reading role ID file", "error", err) + } + if len(roleID) == 0 { + if a.cachedRoleID == "" { + return "", nil, nil, errors.New("role ID file empty and no cached role ID known") + } + a.logger.Warn("role ID file exists but read empty value, re-using cached value") + } else { + a.cachedRoleID = strings.TrimSpace(string(roleID)) + } + } + + if a.cachedRoleID == "" { + return "", nil, nil, errors.New("no known role ID") + } + + if a.secretIDFilePath == "" { + return fmt.Sprintf("%s/login", a.mountPath), nil, map[string]interface{}{ + "role_id": a.cachedRoleID, + }, nil + } + + if _, err := os.Stat(a.secretIDFilePath); err == nil { + secretID, err := ioutil.ReadFile(a.secretIDFilePath) + if err != nil { + if a.cachedSecretID == "" { + return "", nil, nil, fmt.Errorf("error reading secret ID file and no cached secret ID known: %w", err) + } + a.logger.Warn("error reading secret ID file", "error", err) + } + if len(secretID) == 0 { + if a.cachedSecretID == "" { + return "", nil, nil, errors.New("secret ID file empty and no cached secret ID known") + } + a.logger.Warn("secret ID file exists but read empty value, re-using cached value") + } else { + stringSecretID := strings.TrimSpace(string(secretID)) + if a.secretIDResponseWrappingPath != "" { + clonedClient, err := client.Clone() + if err != nil { + return "", nil, nil, fmt.Errorf("error cloning client to unwrap secret ID: %w", err) + } + clonedClient.SetToken(stringSecretID) + // Validate the creation path + resp, err := clonedClient.Logical().ReadWithContext(ctx, "sys/wrapping/lookup") + if err != nil { + return "", nil, nil, fmt.Errorf("error looking up wrapped secret ID: %w", err) + } + if resp == nil { + return "", nil, nil, errors.New("response nil when looking up wrapped secret ID") + } + if resp.Data == nil { + return "", nil, nil, errors.New("data in response nil when looking up wrapped secret ID") + } + creationPathRaw, ok := resp.Data["creation_path"] + if !ok { + return "", nil, nil, errors.New("creation_path in response nil when looking up wrapped secret ID") + } + creationPath, ok := creationPathRaw.(string) + if !ok { + return "", nil, nil, errors.New("creation_path in response could not be parsed as string when looking up wrapped secret ID") + } + if creationPath != a.secretIDResponseWrappingPath { + a.logger.Error("SECURITY: unable to validate wrapping token creation path", "expected", a.secretIDResponseWrappingPath, "found", creationPath) + return "", nil, nil, errors.New("unable to validate wrapping token creation path") + } + // Now get the secret ID + resp, err = clonedClient.Logical().UnwrapWithContext(ctx, "") + if err != nil { + return "", nil, nil, fmt.Errorf("error unwrapping secret ID: %w", err) + } + if resp == nil { + return "", nil, nil, errors.New("response nil when unwrapping secret ID") + } + if resp.Data == nil { + return "", nil, nil, errors.New("data in response nil when unwrapping secret ID") + } + secretIDRaw, ok := resp.Data["secret_id"] + if !ok { + return "", nil, nil, errors.New("secret_id in response nil when unwrapping secret ID") + } + secretID, ok := secretIDRaw.(string) + if !ok { + return "", nil, nil, errors.New("secret_id in response could not be parsed as string when unwrapping secret ID") + } + stringSecretID = secretID + } + a.cachedSecretID = stringSecretID + if a.removeSecretIDFileAfterReading { + if err := os.Remove(a.secretIDFilePath); err != nil { + a.logger.Error("error removing secret ID file after reading", "error", err) + } + } + } + } + + if a.cachedSecretID == "" { + return "", nil, nil, errors.New("no known secret ID") + } + + return fmt.Sprintf("%s/login", a.mountPath), nil, map[string]interface{}{ + "role_id": a.cachedRoleID, + "secret_id": a.cachedSecretID, + }, nil +} + +func (a *approleMethod) NewCreds() chan struct{} { + return nil +} + +func (a *approleMethod) CredSuccess() { +} + +func (a *approleMethod) Shutdown() { +} diff --git a/command/agentproxyshared/auth/auth.go b/command/agentproxyshared/auth/auth.go new file mode 100644 index 0000000..fdcf12f --- /dev/null +++ b/command/agentproxyshared/auth/auth.go @@ -0,0 +1,553 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package auth + +import ( + "context" + "encoding/json" + "errors" + "math/rand" + "net/http" + "time" + + "github.com/armon/go-metrics" + "github.com/hashicorp/go-hclog" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/helper/jsonutil" +) + +const ( + defaultMinBackoff = 1 * time.Second + defaultMaxBackoff = 5 * time.Minute +) + +// AuthMethod is the interface that auto-auth methods implement for the agent/proxy +// to use. +type AuthMethod interface { + // Authenticate returns a mount path, header, request body, and error. + // The header may be nil if no special header is needed. + Authenticate(context.Context, *api.Client) (string, http.Header, map[string]interface{}, error) + NewCreds() chan struct{} + CredSuccess() + Shutdown() +} + +// AuthMethodWithClient is an extended interface that can return an API client +// for use during the authentication call. +type AuthMethodWithClient interface { + AuthMethod + AuthClient(client *api.Client) (*api.Client, error) +} + +type AuthConfig struct { + Logger hclog.Logger + MountPath string + WrapTTL time.Duration + Config map[string]interface{} +} + +// AuthHandler is responsible for keeping a token alive and renewed and passing +// new tokens to the sink server +type AuthHandler struct { + OutputCh chan string + TemplateTokenCh chan string + ExecTokenCh chan string + token string + userAgent string + metricsSignifier string + logger hclog.Logger + client *api.Client + random *rand.Rand + wrapTTL time.Duration + maxBackoff time.Duration + minBackoff time.Duration + enableReauthOnNewCredentials bool + enableTemplateTokenCh bool + enableExecTokenCh bool + exitOnError bool +} + +type AuthHandlerConfig struct { + Logger hclog.Logger + Client *api.Client + WrapTTL time.Duration + MaxBackoff time.Duration + MinBackoff time.Duration + Token string + // UserAgent is the HTTP UserAgent header auto-auth will use when + // communicating with Vault. + UserAgent string + // MetricsSignifier is the first argument we will give to + // metrics.IncrCounter, signifying what the name of the application is + MetricsSignifier string + EnableReauthOnNewCredentials bool + EnableTemplateTokenCh bool + EnableExecTokenCh bool + ExitOnError bool +} + +func NewAuthHandler(conf *AuthHandlerConfig) *AuthHandler { + ah := &AuthHandler{ + // This is buffered so that if we try to output after the sink server + // has been shut down, during agent/proxy shutdown, we won't block + OutputCh: make(chan string, 1), + TemplateTokenCh: make(chan string, 1), + ExecTokenCh: make(chan string, 1), + token: conf.Token, + logger: conf.Logger, + client: conf.Client, + random: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))), + wrapTTL: conf.WrapTTL, + minBackoff: conf.MinBackoff, + maxBackoff: conf.MaxBackoff, + enableReauthOnNewCredentials: conf.EnableReauthOnNewCredentials, + enableTemplateTokenCh: conf.EnableTemplateTokenCh, + enableExecTokenCh: conf.EnableExecTokenCh, + exitOnError: conf.ExitOnError, + userAgent: conf.UserAgent, + metricsSignifier: conf.MetricsSignifier, + } + + return ah +} + +func backoff(ctx context.Context, backoff *autoAuthBackoff) bool { + if backoff.exitOnErr { + return false + } + + select { + case <-time.After(backoff.current): + case <-ctx.Done(): + } + + // Increase exponential backoff for the next time if we don't + // successfully auth/renew/etc. + backoff.next() + return true +} + +func (ah *AuthHandler) Run(ctx context.Context, am AuthMethod) error { + if am == nil { + return errors.New("auth handler: nil auth method") + } + + if ah.minBackoff <= 0 { + ah.minBackoff = defaultMinBackoff + } + + backoffCfg := newAutoAuthBackoff(ah.minBackoff, ah.maxBackoff, ah.exitOnError) + + if backoffCfg.min >= backoffCfg.max { + return errors.New("auth handler: min_backoff cannot be greater than max_backoff") + } + + ah.logger.Info("starting auth handler") + defer func() { + am.Shutdown() + close(ah.OutputCh) + close(ah.TemplateTokenCh) + close(ah.ExecTokenCh) + ah.logger.Info("auth handler stopped") + }() + + credCh := am.NewCreds() + if !ah.enableReauthOnNewCredentials { + realCredCh := credCh + credCh = nil + if realCredCh != nil { + go func() { + for { + select { + case <-ctx.Done(): + return + case <-realCredCh: + } + } + }() + } + } + if credCh == nil { + credCh = make(chan struct{}) + } + + if ah.client != nil { + headers := ah.client.Headers() + if headers == nil { + headers = make(http.Header) + } + headers.Set("User-Agent", ah.userAgent) + ah.client.SetHeaders(headers) + } + + var watcher *api.LifetimeWatcher + first := true + + for { + select { + case <-ctx.Done(): + return nil + + default: + } + + var clientToUse *api.Client + var err error + var path string + var data map[string]interface{} + var header http.Header + var isTokenFileMethod bool + + switch am.(type) { + case AuthMethodWithClient: + clientToUse, err = am.(AuthMethodWithClient).AuthClient(ah.client) + if err != nil { + ah.logger.Error("error creating client for authentication call", "error", err, "backoff", backoff) + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + + if backoff(ctx, backoffCfg) { + continue + } + + return err + } + default: + clientToUse = ah.client + } + + // Disable retry on the client to ensure our backoffOrQuit function is + // the only source of retry/backoff. + clientToUse.SetMaxRetries(0) + + var secret *api.Secret = new(api.Secret) + if first && ah.token != "" { + ah.logger.Debug("using preloaded token") + + first = false + ah.logger.Debug("lookup-self with preloaded token") + clientToUse.SetToken(ah.token) + + secret, err = clientToUse.Auth().Token().LookupSelfWithContext(ctx) + if err != nil { + ah.logger.Error("could not look up token", "err", err, "backoff", backoffCfg) + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + + if backoff(ctx, backoffCfg) { + continue + } + return err + } + + duration, _ := secret.Data["ttl"].(json.Number).Int64() + secret.Auth = &api.SecretAuth{ + ClientToken: secret.Data["id"].(string), + LeaseDuration: int(duration), + Renewable: secret.Data["renewable"].(bool), + } + } else { + ah.logger.Info("authenticating") + + path, header, data, err = am.Authenticate(ctx, ah.client) + if err != nil { + ah.logger.Error("error getting path or data from method", "error", err, "backoff", backoffCfg) + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + + if backoff(ctx, backoffCfg) { + continue + } + return err + } + } + + if ah.wrapTTL > 0 { + wrapClient, err := clientToUse.Clone() + if err != nil { + ah.logger.Error("error creating client for wrapped call", "error", err, "backoff", backoffCfg) + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + + if backoff(ctx, backoffCfg) { + continue + } + return err + } + wrapClient.SetWrappingLookupFunc(func(string, string) string { + return ah.wrapTTL.String() + }) + clientToUse = wrapClient + } + for key, values := range header { + for _, value := range values { + clientToUse.AddHeader(key, value) + } + } + + // This should only happen if there's no preloaded token (regular auto-auth login) + // or if a preloaded token has expired and is now switching to auto-auth. + if secret.Auth == nil { + isTokenFileMethod = path == "auth/token/lookup-self" + if isTokenFileMethod { + token, _ := data["token"].(string) + lookupSelfClient, err := clientToUse.Clone() + if err != nil { + ah.logger.Error("failed to clone client to perform token lookup") + return err + } + lookupSelfClient.SetToken(token) + secret, err = lookupSelfClient.Auth().Token().LookupSelf() + } else { + secret, err = clientToUse.Logical().WriteWithContext(ctx, path, data) + } + + // Check errors/sanity + if err != nil { + ah.logger.Error("error authenticating", "error", err, "backoff", backoffCfg) + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + + if backoff(ctx, backoffCfg) { + continue + } + return err + } + } + + var leaseDuration int + + switch { + case ah.wrapTTL > 0: + if secret.WrapInfo == nil { + ah.logger.Error("authentication returned nil wrap info", "backoff", backoffCfg) + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + + if backoff(ctx, backoffCfg) { + continue + } + return err + } + if secret.WrapInfo.Token == "" { + ah.logger.Error("authentication returned empty wrapped client token", "backoff", backoffCfg) + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + + if backoff(ctx, backoffCfg) { + continue + } + return err + } + wrappedResp, err := jsonutil.EncodeJSON(secret.WrapInfo) + if err != nil { + ah.logger.Error("failed to encode wrapinfo", "error", err, "backoff", backoffCfg) + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + + if backoff(ctx, backoffCfg) { + continue + } + return err + } + ah.logger.Info("authentication successful, sending wrapped token to sinks and pausing") + ah.OutputCh <- string(wrappedResp) + if ah.enableTemplateTokenCh { + ah.TemplateTokenCh <- string(wrappedResp) + } + if ah.enableExecTokenCh { + ah.ExecTokenCh <- string(wrappedResp) + } + + am.CredSuccess() + backoffCfg.reset() + + select { + case <-ctx.Done(): + ah.logger.Info("shutdown triggered") + continue + + case <-credCh: + ah.logger.Info("auth method found new credentials, re-authenticating") + continue + } + + default: + // We handle the token_file method specially, as it's the only + // auth method that isn't actually authenticating, i.e. the secret + // returned does not have an Auth struct attached + isTokenFileMethod := path == "auth/token/lookup-self" + if isTokenFileMethod { + // We still check the response of the request to ensure the token is valid + // i.e. if the token is invalid, we will fail in the authentication step + if secret == nil || secret.Data == nil { + ah.logger.Error("token file validation failed, token may be invalid", "backoff", backoffCfg) + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + + if backoff(ctx, backoffCfg) { + continue + } + return err + } + token, ok := secret.Data["id"].(string) + if !ok || token == "" { + ah.logger.Error("token file validation returned empty client token", "backoff", backoffCfg) + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + + if backoff(ctx, backoffCfg) { + continue + } + return err + } + + duration, _ := secret.Data["ttl"].(json.Number).Int64() + leaseDuration = int(duration) + renewable, _ := secret.Data["renewable"].(bool) + secret.Auth = &api.SecretAuth{ + ClientToken: token, + LeaseDuration: int(duration), + Renewable: renewable, + } + ah.logger.Info("authentication successful, sending token to sinks") + ah.OutputCh <- token + if ah.enableTemplateTokenCh { + ah.TemplateTokenCh <- token + } + if ah.enableExecTokenCh { + ah.ExecTokenCh <- token + } + + tokenType := secret.Data["type"].(string) + if tokenType == "batch" { + ah.logger.Info("note that this token type is batch, and batch tokens cannot be renewed", "ttl", leaseDuration) + } + } else { + if secret == nil || secret.Auth == nil { + ah.logger.Error("authentication returned nil auth info", "backoff", backoffCfg) + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + + if backoff(ctx, backoffCfg) { + continue + } + return err + } + if secret.Auth.ClientToken == "" { + ah.logger.Error("authentication returned empty client token", "backoff", backoffCfg) + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + + if backoff(ctx, backoffCfg) { + continue + } + return err + } + + leaseDuration = secret.LeaseDuration + ah.logger.Info("authentication successful, sending token to sinks") + ah.OutputCh <- secret.Auth.ClientToken + if ah.enableTemplateTokenCh { + ah.TemplateTokenCh <- secret.Auth.ClientToken + } + if ah.enableExecTokenCh { + ah.ExecTokenCh <- secret.Auth.ClientToken + } + } + + am.CredSuccess() + backoffCfg.reset() + } + + if watcher != nil { + watcher.Stop() + } + + watcher, err = clientToUse.NewLifetimeWatcher(&api.LifetimeWatcherInput{ + Secret: secret, + }) + if err != nil { + ah.logger.Error("error creating lifetime watcher", "error", err, "backoff", backoffCfg) + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + + if backoff(ctx, backoffCfg) { + continue + } + return err + } + + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "success"}, 1) + // We don't want to trigger the renewal process for tokens with + // unlimited TTL, such as the root token. + if leaseDuration == 0 && isTokenFileMethod { + ah.logger.Info("not starting token renewal process, as token has unlimited TTL") + } else { + ah.logger.Info("starting renewal process") + go watcher.Renew() + } + + LifetimeWatcherLoop: + for { + select { + case <-ctx.Done(): + ah.logger.Info("shutdown triggered, stopping lifetime watcher") + watcher.Stop() + break LifetimeWatcherLoop + + case err := <-watcher.DoneCh(): + ah.logger.Info("lifetime watcher done channel triggered") + if err != nil { + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "failure"}, 1) + ah.logger.Error("error renewing token", "error", err) + } + break LifetimeWatcherLoop + + case <-watcher.RenewCh(): + metrics.IncrCounter([]string{ah.metricsSignifier, "auth", "success"}, 1) + ah.logger.Info("renewed auth token") + + case <-credCh: + ah.logger.Info("auth method found new credentials, re-authenticating") + break LifetimeWatcherLoop + } + } + } +} + +// autoAuthBackoff tracks exponential backoff state. +type autoAuthBackoff struct { + min time.Duration + max time.Duration + current time.Duration + exitOnErr bool +} + +func newAutoAuthBackoff(min, max time.Duration, exitErr bool) *autoAuthBackoff { + if max <= 0 { + max = defaultMaxBackoff + } + + if min <= 0 { + min = defaultMinBackoff + } + + return &autoAuthBackoff{ + current: min, + max: max, + min: min, + exitOnErr: exitErr, + } +} + +// next determines the next backoff duration that is roughly twice +// the current value, capped to a max value, with a measure of randomness. +func (b *autoAuthBackoff) next() { + maxBackoff := 2 * b.current + + if maxBackoff > b.max { + maxBackoff = b.max + } + + // Trim a random amount (0-25%) off the doubled duration + trim := rand.Int63n(int64(maxBackoff) / 4) + b.current = maxBackoff - time.Duration(trim) +} + +func (b *autoAuthBackoff) reset() { + b.current = b.min +} + +func (b autoAuthBackoff) String() string { + return b.current.Truncate(10 * time.Millisecond).String() +} diff --git a/command/agentproxyshared/auth/auth_test.go b/command/agentproxyshared/auth/auth_test.go new file mode 100644 index 0000000..5729435 --- /dev/null +++ b/command/agentproxyshared/auth/auth_test.go @@ -0,0 +1,199 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package auth + +import ( + "context" + "net/http" + "testing" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/credential/userpass" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +type userpassTestMethod struct{} + +func newUserpassTestMethod(t *testing.T, client *api.Client) AuthMethod { + err := client.Sys().EnableAuthWithOptions("userpass", &api.EnableAuthOptions{ + Type: "userpass", + Config: api.AuthConfigInput{ + DefaultLeaseTTL: "1s", + MaxLeaseTTL: "3s", + }, + }) + if err != nil { + t.Fatal(err) + } + + return &userpassTestMethod{} +} + +func (u *userpassTestMethod) Authenticate(_ context.Context, client *api.Client) (string, http.Header, map[string]interface{}, error) { + _, err := client.Logical().Write("auth/userpass/users/foo", map[string]interface{}{ + "password": "bar", + }) + if err != nil { + return "", nil, nil, err + } + return "auth/userpass/login/foo", nil, map[string]interface{}{ + "password": "bar", + }, nil +} + +func (u *userpassTestMethod) NewCreds() chan struct{} { + return nil +} + +func (u *userpassTestMethod) CredSuccess() { +} + +func (u *userpassTestMethod) Shutdown() { +} + +func TestAuthHandler(t *testing.T) { + logger := logging.NewVaultLogger(hclog.Trace) + coreConfig := &vault.CoreConfig{ + Logger: logger, + CredentialBackends: map[string]logical.Factory{ + "userpass": userpass.Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) + client := cluster.Cores[0].Client + + ctx, cancelFunc := context.WithCancel(context.Background()) + + ah := NewAuthHandler(&AuthHandlerConfig{ + Logger: logger.Named("auth.handler"), + Client: client, + }) + + am := newUserpassTestMethod(t, client) + errCh := make(chan error) + go func() { + errCh <- ah.Run(ctx, am) + }() + + // Consume tokens so we don't block + stopTime := time.Now().Add(5 * time.Second) + closed := false +consumption: + for { + select { + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + break consumption + case <-ah.OutputCh: + case <-ah.TemplateTokenCh: + // Nothing + case <-time.After(stopTime.Sub(time.Now())): + if !closed { + cancelFunc() + closed = true + } + } + } +} + +func TestAgentBackoff(t *testing.T) { + max := 1024 * time.Second + backoff := newAutoAuthBackoff(defaultMinBackoff, max, false) + + // Test initial value + if backoff.current != defaultMinBackoff { + t.Fatalf("expected 1s initial backoff, got: %v", backoff.current) + } + + // Test that backoff values are in expected range (75-100% of 2*previous) + for i := 0; i < 9; i++ { + old := backoff.current + backoff.next() + + expMax := 2 * old + expMin := 3 * expMax / 4 + + if backoff.current < expMin || backoff.current > expMax { + t.Fatalf("expected backoff in range %v to %v, got: %v", expMin, expMax, backoff) + } + } + + // Test that backoff is capped + for i := 0; i < 100; i++ { + backoff.next() + if backoff.current > max { + t.Fatalf("backoff exceeded max of 100s: %v", backoff) + } + } + + // Test reset + backoff.reset() + if backoff.current != defaultMinBackoff { + t.Fatalf("expected 1s backoff after reset, got: %v", backoff.current) + } +} + +func TestAgentMinBackoffCustom(t *testing.T) { + type test struct { + minBackoff time.Duration + want time.Duration + } + + tests := []test{ + {minBackoff: 0 * time.Second, want: 1 * time.Second}, + {minBackoff: 1 * time.Second, want: 1 * time.Second}, + {minBackoff: 5 * time.Second, want: 5 * time.Second}, + {minBackoff: 10 * time.Second, want: 10 * time.Second}, + } + + for _, test := range tests { + max := 1024 * time.Second + backoff := newAutoAuthBackoff(test.minBackoff, max, false) + + // Test initial value + if backoff.current != test.want { + t.Fatalf("expected %d initial backoff, got: %v", test.want, backoff.current) + } + + // Test that backoff values are in expected range (75-100% of 2*previous) + for i := 0; i < 5; i++ { + old := backoff.current + backoff.next() + + expMax := 2 * old + expMin := 3 * expMax / 4 + + if backoff.current < expMin || backoff.current > expMax { + t.Fatalf("expected backoff in range %v to %v, got: %v", expMin, expMax, backoff) + } + } + + // Test that backoff is capped + for i := 0; i < 100; i++ { + backoff.next() + if backoff.current > max { + t.Fatalf("backoff exceeded max of 100s: %v", backoff) + } + } + + // Test reset + backoff.reset() + if backoff.current != test.want { + t.Fatalf("expected %d backoff after reset, got: %v", test.want, backoff.current) + } + } +} diff --git a/command/agentproxyshared/auth/aws/aws.go b/command/agentproxyshared/auth/aws/aws.go new file mode 100644 index 0000000..53d1623 --- /dev/null +++ b/command/agentproxyshared/auth/aws/aws.go @@ -0,0 +1,301 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package aws + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "net/http" + "reflect" + "sync" + "time" + + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-secure-stdlib/awsutil" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/auth" +) + +const ( + typeEC2 = "ec2" + typeIAM = "iam" + + /* + + IAM creds can be inferred from instance metadata or the container + identity service, and those creds expire at varying intervals with + new creds becoming available at likewise varying intervals. Let's + default to polling once a minute so all changes can be picked up + rather quickly. This is configurable, however. + + */ + defaultCredentialPollInterval = 60 +) + +type awsMethod struct { + logger hclog.Logger + authType string + nonce string + mountPath string + role string + headerValue string + region string + + // These are used to share the latest creds safely across goroutines. + credLock sync.Mutex + lastCreds *credentials.Credentials + + // Notifies the outer environment that it should call Authenticate again. + credsFound chan struct{} + + // Detects that the outer environment is closing. + stopCh chan struct{} +} + +func NewAWSAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) { + if conf == nil { + return nil, errors.New("empty config") + } + if conf.Config == nil { + return nil, errors.New("empty config data") + } + + a := &awsMethod{ + logger: conf.Logger, + mountPath: conf.MountPath, + credsFound: make(chan struct{}), + stopCh: make(chan struct{}), + region: awsutil.DefaultRegion, + } + + typeRaw, ok := conf.Config["type"] + if !ok { + return nil, errors.New("missing 'type' value") + } + a.authType, ok = typeRaw.(string) + if !ok { + return nil, errors.New("could not convert 'type' config value to string") + } + + roleRaw, ok := conf.Config["role"] + if !ok { + return nil, errors.New("missing 'role' value") + } + a.role, ok = roleRaw.(string) + if !ok { + return nil, errors.New("could not convert 'role' config value to string") + } + + switch { + case a.role == "": + return nil, errors.New("'role' value is empty") + case a.authType == "": + return nil, errors.New("'type' value is empty") + case a.authType != typeEC2 && a.authType != typeIAM: + return nil, errors.New("'type' value is invalid") + } + + accessKey := "" + accessKeyRaw, ok := conf.Config["access_key"] + if ok { + accessKey, ok = accessKeyRaw.(string) + if !ok { + return nil, errors.New("could not convert 'access_key' value into string") + } + } + + secretKey := "" + secretKeyRaw, ok := conf.Config["secret_key"] + if ok { + secretKey, ok = secretKeyRaw.(string) + if !ok { + return nil, errors.New("could not convert 'secret_key' value into string") + } + } + + sessionToken := "" + sessionTokenRaw, ok := conf.Config["session_token"] + if ok { + sessionToken, ok = sessionTokenRaw.(string) + if !ok { + return nil, errors.New("could not convert 'session_token' value into string") + } + } + + headerValueRaw, ok := conf.Config["header_value"] + if ok { + a.headerValue, ok = headerValueRaw.(string) + if !ok { + return nil, errors.New("could not convert 'header_value' value into string") + } + } + + nonceRaw, ok := conf.Config["nonce"] + if ok { + a.nonce, ok = nonceRaw.(string) + if !ok { + return nil, errors.New("could not convert 'nonce' value into string") + } + } + + regionRaw, ok := conf.Config["region"] + if ok { + a.region, ok = regionRaw.(string) + if !ok { + return nil, errors.New("could not convert 'region' value into string") + } + } + + if a.authType == typeIAM { + + // Check for an optional custom frequency at which we should poll for creds. + credentialPollIntervalSec := defaultCredentialPollInterval + if credentialPollIntervalRaw, ok := conf.Config["credential_poll_interval"]; ok { + if credentialPollInterval, ok := credentialPollIntervalRaw.(int); ok { + credentialPollIntervalSec = credentialPollInterval + } else { + return nil, errors.New("could not convert 'credential_poll_interval' into int") + } + } + + // Do an initial population of the creds because we want to err right away if we can't + // even get a first set. + creds, err := awsutil.RetrieveCreds(accessKey, secretKey, sessionToken, a.logger) + if err != nil { + return nil, err + } + a.lastCreds = creds + + go a.pollForCreds(accessKey, secretKey, sessionToken, credentialPollIntervalSec) + } + + return a, nil +} + +func (a *awsMethod) Authenticate(ctx context.Context, client *api.Client) (retToken string, header http.Header, retData map[string]interface{}, retErr error) { + a.logger.Trace("beginning authentication") + + data := make(map[string]interface{}) + sess, err := session.NewSession() + if err != nil { + retErr = fmt.Errorf("error creating session: %w", err) + return + } + metadataSvc := ec2metadata.New(sess) + + switch a.authType { + case typeEC2: + // Fetch document + { + doc, err := metadataSvc.GetDynamicData("/instance-identity/document") + if err != nil { + retErr = fmt.Errorf("error requesting doc: %w", err) + return + } + data["identity"] = base64.StdEncoding.EncodeToString([]byte(doc)) + } + + // Fetch signature + { + signature, err := metadataSvc.GetDynamicData("/instance-identity/signature") + if err != nil { + retErr = fmt.Errorf("error requesting signature: %w", err) + return + } + data["signature"] = signature + } + + // Add the reauthentication value, if we have one + if a.nonce == "" { + uid, err := uuid.GenerateUUID() + if err != nil { + retErr = fmt.Errorf("error generating uuid for reauthentication value: %w", err) + return + } + a.nonce = uid + } + data["nonce"] = a.nonce + + default: + // This is typeIAM. + a.credLock.Lock() + defer a.credLock.Unlock() + + var err error + data, err = awsutil.GenerateLoginData(a.lastCreds, a.headerValue, a.region, a.logger) + if err != nil { + retErr = fmt.Errorf("error creating login value: %w", err) + return + } + } + + data["role"] = a.role + + return fmt.Sprintf("%s/login", a.mountPath), nil, data, nil +} + +func (a *awsMethod) NewCreds() chan struct{} { + return a.credsFound +} + +func (a *awsMethod) CredSuccess() {} + +func (a *awsMethod) Shutdown() { + close(a.credsFound) + close(a.stopCh) +} + +func (a *awsMethod) pollForCreds(accessKey, secretKey, sessionToken string, frequencySeconds int) { + ticker := time.NewTicker(time.Duration(frequencySeconds) * time.Second) + defer ticker.Stop() + for { + select { + case <-a.stopCh: + a.logger.Trace("shutdown triggered, stopping aws auth handler") + return + case <-ticker.C: + if err := a.checkCreds(accessKey, secretKey, sessionToken); err != nil { + a.logger.Warn("unable to retrieve current creds, retaining last creds", "error", err) + } + } + } +} + +func (a *awsMethod) checkCreds(accessKey, secretKey, sessionToken string) error { + a.credLock.Lock() + defer a.credLock.Unlock() + + a.logger.Trace("checking for new credentials") + currentCreds, err := awsutil.RetrieveCreds(accessKey, secretKey, sessionToken, a.logger) + if err != nil { + return err + } + + currentVal, err := currentCreds.Get() + if err != nil { + return err + } + lastVal, err := a.lastCreds.Get() + if err != nil { + return err + } + + // These will always have different pointers regardless of whether their + // values are identical, hence the use of DeepEqual. + if !a.lastCreds.IsExpired() && reflect.DeepEqual(currentVal, lastVal) { + a.logger.Trace("credentials are unchanged and still valid") + return nil + } + + a.lastCreds = currentCreds + a.logger.Trace("new credentials detected, triggering Authenticate") + a.credsFound <- struct{}{} + return nil +} diff --git a/command/agentproxyshared/auth/azure/azure.go b/command/agentproxyshared/auth/azure/azure.go new file mode 100644 index 0000000..8d9d094 --- /dev/null +++ b/command/agentproxyshared/auth/azure/azure.go @@ -0,0 +1,277 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package azure + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + + policy "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + az "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + cleanhttp "github.com/hashicorp/go-cleanhttp" + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + "github.com/hashicorp/vault/helper/useragent" + "github.com/hashicorp/vault/sdk/helper/jsonutil" +) + +const ( + instanceEndpoint = "http://169.254.169.254/metadata/instance" + identityEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" + + // minimum version 2018-02-01 needed for identity metadata + // regional availability: https://docs.microsoft.com/en-us/azure/virtual-machines/windows/instance-metadata-service + apiVersion = "2018-02-01" +) + +type azureMethod struct { + logger hclog.Logger + mountPath string + + authenticateFromEnvironment bool + role string + scope string + resource string + objectID string + clientID string +} + +func NewAzureAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) { + if conf == nil { + return nil, errors.New("empty config") + } + if conf.Config == nil { + return nil, errors.New("empty config data") + } + + a := &azureMethod{ + logger: conf.Logger, + mountPath: conf.MountPath, + } + + roleRaw, ok := conf.Config["role"] + if !ok { + return nil, errors.New("missing 'role' value") + } + a.role, ok = roleRaw.(string) + if !ok { + return nil, errors.New("could not convert 'role' config value to string") + } + + resourceRaw, ok := conf.Config["resource"] + if !ok { + return nil, errors.New("missing 'resource' value") + } + a.resource, ok = resourceRaw.(string) + if !ok { + return nil, errors.New("could not convert 'resource' config value to string") + } + + objectIDRaw, ok := conf.Config["object_id"] + if ok { + a.objectID, ok = objectIDRaw.(string) + if !ok { + return nil, errors.New("could not convert 'object_id' config value to string") + } + } + + clientIDRaw, ok := conf.Config["client_id"] + if ok { + a.clientID, ok = clientIDRaw.(string) + if !ok { + return nil, errors.New("could not convert 'client_id' config value to string") + } + } + + scopeRaw, ok := conf.Config["scope"] + if ok { + a.scope, ok = scopeRaw.(string) + if !ok { + return nil, errors.New("could not convert 'scope' config value to string") + } + } + if a.scope == "" { + a.scope = fmt.Sprintf("%s/.default", a.resource) + } + + authenticateFromEnvironmentRaw, ok := conf.Config["authenticate_from_environment"] + if ok { + a.authenticateFromEnvironment, ok = authenticateFromEnvironmentRaw.(bool) + if !ok { + return nil, errors.New("could not convert 'authenticate_from_environment' config value to bool") + } + } + + switch { + case a.role == "": + return nil, errors.New("'role' value is empty") + case a.resource == "": + return nil, errors.New("'resource' value is empty") + case a.objectID != "" && a.clientID != "": + return nil, errors.New("only one of 'object_id' or 'client_id' may be provided") + } + + return a, nil +} + +func (a *azureMethod) Authenticate(ctx context.Context, client *api.Client) (retPath string, header http.Header, retData map[string]interface{}, retErr error) { + a.logger.Trace("beginning authentication") + + // Fetch instance data + var instance struct { + Compute struct { + Name string + ResourceGroupName string + SubscriptionID string + VMScaleSetName string + ResourceID string + } + } + + body, err := getInstanceMetadataInfo(ctx) + if err != nil { + retErr = err + return + } + + err = jsonutil.DecodeJSON(body, &instance) + if err != nil { + retErr = fmt.Errorf("error parsing instance metadata response: %w", err) + return + } + + token := "" + if a.authenticateFromEnvironment { + token, err = getAzureTokenFromEnvironment(ctx, a.scope) + if err != nil { + retErr = err + return + } + } else { + token, err = getTokenFromIdentityEndpoint(ctx, a.resource, a.objectID, a.clientID) + if err != nil { + retErr = err + return + } + } + + // Attempt login + data := map[string]interface{}{ + "role": a.role, + "vm_name": instance.Compute.Name, + "vmss_name": instance.Compute.VMScaleSetName, + "resource_group_name": instance.Compute.ResourceGroupName, + "subscription_id": instance.Compute.SubscriptionID, + "jwt": token, + } + + return fmt.Sprintf("%s/login", a.mountPath), nil, data, nil +} + +func (a *azureMethod) NewCreds() chan struct{} { + return nil +} + +func (a *azureMethod) CredSuccess() { +} + +func (a *azureMethod) Shutdown() { +} + +// getAzureTokenFromEnvironment Is Azure's preferred way for authentication, and takes values +// from environment variables to form a credential. +// It uses a DefaultAzureCredential: +// https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#readme-defaultazurecredential +// Environment variables are taken into account in the following order: +// https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#readme-environment-variables +func getAzureTokenFromEnvironment(ctx context.Context, scope string) (string, error) { + cred, err := az.NewDefaultAzureCredential(nil) + if err != nil { + return "", err + } + + tokenOpts := policy.TokenRequestOptions{Scopes: []string{scope}} + tk, err := cred.GetToken(ctx, tokenOpts) + if err != nil { + return "", err + } + return tk.Token, nil +} + +// getInstanceMetadataInfo calls the Azure Instance Metadata endpoint to get +// information about the Azure environment it's running in. +func getInstanceMetadataInfo(ctx context.Context) ([]byte, error) { + return getMetadataInfo(ctx, instanceEndpoint, "", "", "") +} + +// getTokenFromIdentityEndpoint is kept for backwards compatibility purposes. Using the +// newer APIs and the Azure SDK should be preferred over this mechanism. +func getTokenFromIdentityEndpoint(ctx context.Context, resource, objectID, clientID string) (string, error) { + var identity struct { + AccessToken string `json:"access_token"` + } + + body, err := getMetadataInfo(ctx, identityEndpoint, resource, objectID, clientID) + if err != nil { + return "", err + } + + err = jsonutil.DecodeJSON(body, &identity) + if err != nil { + return "", fmt.Errorf("error parsing identity metadata response: %w", err) + } + + return identity.AccessToken, nil +} + +// getMetadataInfo calls the Azure metadata endpoint with the given parameters. +// An empty resource, objectID and clientID will return metadata information. +func getMetadataInfo(ctx context.Context, endpoint, resource, objectID, clientID string) ([]byte, error) { + req, err := http.NewRequest("GET", endpoint, nil) + if err != nil { + return nil, err + } + + q := req.URL.Query() + q.Add("api-version", apiVersion) + if resource != "" { + q.Add("resource", resource) + } + if objectID != "" { + q.Add("object_id", objectID) + } + if clientID != "" { + q.Add("client_id", clientID) + } + req.URL.RawQuery = q.Encode() + req.Header.Set("Metadata", "true") + req.Header.Set("User-Agent", useragent.String()) + req = req.WithContext(ctx) + + client := cleanhttp.DefaultClient() + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("error fetching metadata from %s: %w", endpoint, err) + } + + if resp == nil { + return nil, fmt.Errorf("empty response fetching metadata from %s", endpoint) + } + + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("error reading metadata from %s: %w", endpoint, err) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("error response in metadata from %s: %s", endpoint, body) + } + + return body, nil +} diff --git a/command/agentproxyshared/auth/cert/cert.go b/command/agentproxyshared/auth/cert/cert.go new file mode 100644 index 0000000..5270dcb --- /dev/null +++ b/command/agentproxyshared/auth/cert/cert.go @@ -0,0 +1,158 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cert + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + "github.com/hashicorp/vault/sdk/helper/consts" +) + +type certMethod struct { + logger hclog.Logger + mountPath string + name string + + caCert string + clientCert string + clientKey string + reload bool + + // Client is the cached client to use if cert info was provided. + client *api.Client +} + +var _ auth.AuthMethodWithClient = &certMethod{} + +func NewCertAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) { + if conf == nil { + return nil, errors.New("empty config") + } + + // Not concerned if the conf.Config is empty as the 'name' + // parameter is optional when using TLS Auth + + c := &certMethod{ + logger: conf.Logger, + mountPath: conf.MountPath, + } + + if conf.Config != nil { + nameRaw, ok := conf.Config["name"] + if !ok { + nameRaw = "" + } + c.name, ok = nameRaw.(string) + if !ok { + return nil, errors.New("could not convert 'name' config value to string") + } + + caCertRaw, ok := conf.Config["ca_cert"] + if ok { + c.caCert, ok = caCertRaw.(string) + if !ok { + return nil, errors.New("could not convert 'ca_cert' config value to string") + } + } + + clientCertRaw, ok := conf.Config["client_cert"] + if ok { + c.clientCert, ok = clientCertRaw.(string) + if !ok { + return nil, errors.New("could not convert 'cert_file' config value to string") + } + } + + clientKeyRaw, ok := conf.Config["client_key"] + if ok { + c.clientKey, ok = clientKeyRaw.(string) + if !ok { + return nil, errors.New("could not convert 'cert_key' config value to string") + } + } + + reload, ok := conf.Config["reload"] + if ok { + c.reload, ok = reload.(bool) + if !ok { + return nil, errors.New("could not convert 'reload' config value to bool") + } + } + } + + return c, nil +} + +func (c *certMethod) Authenticate(_ context.Context, client *api.Client) (string, http.Header, map[string]interface{}, error) { + c.logger.Trace("beginning authentication") + + authMap := map[string]interface{}{} + + if c.name != "" { + authMap["name"] = c.name + } + + return fmt.Sprintf("%s/login", c.mountPath), nil, authMap, nil +} + +func (c *certMethod) NewCreds() chan struct{} { + return nil +} + +func (c *certMethod) CredSuccess() {} + +func (c *certMethod) Shutdown() {} + +// AuthClient uses the existing client's address and returns a new client with +// the auto-auth method's certificate information if that's provided in its +// config map. +func (c *certMethod) AuthClient(client *api.Client) (*api.Client, error) { + c.logger.Trace("deriving auth client to use") + + clientToAuth := client + + if c.caCert != "" || (c.clientKey != "" && c.clientCert != "") { + // Return cached client if present + if c.client != nil && !c.reload { + return c.client, nil + } + + config := api.DefaultConfig() + if config.Error != nil { + return nil, config.Error + } + config.Address = client.Address() + + t := &api.TLSConfig{ + CACert: c.caCert, + ClientCert: c.clientCert, + ClientKey: c.clientKey, + } + + // Setup TLS config + if err := config.ConfigureTLS(t); err != nil { + return nil, err + } + + var err error + clientToAuth, err = api.NewClient(config) + if err != nil { + return nil, err + } + if ns := client.Headers().Get(consts.NamespaceHeaderName); ns != "" { + clientToAuth.SetNamespace(ns) + } + + // Cache the client for future use + c.client = clientToAuth + } + + return clientToAuth, nil +} diff --git a/command/agentproxyshared/auth/cert/cert_test.go b/command/agentproxyshared/auth/cert/cert_test.go new file mode 100644 index 0000000..43a5f83 --- /dev/null +++ b/command/agentproxyshared/auth/cert/cert_test.go @@ -0,0 +1,191 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cert + +import ( + "context" + "os" + "path" + "reflect" + "testing" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/auth" +) + +func TestCertAuthMethod_Authenticate(t *testing.T) { + config := &auth.AuthConfig{ + Logger: hclog.NewNullLogger(), + MountPath: "cert-test", + Config: map[string]interface{}{ + "name": "foo", + }, + } + + method, err := NewCertAuthMethod(config) + if err != nil { + t.Fatal(err) + } + + client, err := api.NewClient(nil) + if err != nil { + t.Fatal(err) + } + + loginPath, _, authMap, err := method.Authenticate(context.Background(), client) + if err != nil { + t.Fatal(err) + } + + expectedLoginPath := path.Join(config.MountPath, "/login") + if loginPath != expectedLoginPath { + t.Fatalf("mismatch on login path: got: %s, expected: %s", loginPath, expectedLoginPath) + } + + expectedAuthMap := map[string]interface{}{ + "name": config.Config["name"], + } + if !reflect.DeepEqual(authMap, expectedAuthMap) { + t.Fatalf("mismatch on login path:\ngot:\n\t%v\nexpected:\n\t%v", authMap, expectedAuthMap) + } +} + +func TestCertAuthMethod_AuthClient_withoutCerts(t *testing.T) { + config := &auth.AuthConfig{ + Logger: hclog.NewNullLogger(), + MountPath: "cert-test", + Config: map[string]interface{}{ + "name": "without-certs", + }, + } + + method, err := NewCertAuthMethod(config) + if err != nil { + t.Fatal(err) + } + + client, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + + clientToUse, err := method.(auth.AuthMethodWithClient).AuthClient(client) + if err != nil { + t.Fatal(err) + } + + if client != clientToUse { + t.Fatal("error: expected AuthClient to return back original client") + } +} + +func TestCertAuthMethod_AuthClient_withCerts(t *testing.T) { + clientCert, err := os.Open("./test-fixtures/keys/cert.pem") + if err != nil { + t.Fatal(err) + } + defer clientCert.Close() + + clientKey, err := os.Open("./test-fixtures/keys/key.pem") + if err != nil { + t.Fatal(err) + } + defer clientKey.Close() + + config := &auth.AuthConfig{ + Logger: hclog.NewNullLogger(), + MountPath: "cert-test", + Config: map[string]interface{}{ + "name": "with-certs", + "client_cert": clientCert.Name(), + "client_key": clientKey.Name(), + }, + } + + method, err := NewCertAuthMethod(config) + if err != nil { + t.Fatal(err) + } + + client, err := api.NewClient(nil) + if err != nil { + t.Fatal(err) + } + + clientToUse, err := method.(auth.AuthMethodWithClient).AuthClient(client) + if err != nil { + t.Fatal(err) + } + + if client == clientToUse { + t.Fatal("expected client from AuthClient to be different from original client") + } + + // Call AuthClient again to get back the cached client + cachedClient, err := method.(auth.AuthMethodWithClient).AuthClient(client) + if err != nil { + t.Fatal(err) + } + + if cachedClient != clientToUse { + t.Fatal("expected client from AuthClient to return back a cached client") + } +} + +func TestCertAuthMethod_AuthClient_withCertsReload(t *testing.T) { + clientCert, err := os.Open("./test-fixtures/keys/cert.pem") + if err != nil { + t.Fatal(err) + } + + defer clientCert.Close() + + clientKey, err := os.Open("./test-fixtures/keys/key.pem") + if err != nil { + t.Fatal(err) + } + + defer clientKey.Close() + + config := &auth.AuthConfig{ + Logger: hclog.NewNullLogger(), + MountPath: "cert-test", + Config: map[string]interface{}{ + "name": "with-certs-reloaded", + "client_cert": clientCert.Name(), + "client_key": clientKey.Name(), + "reload": true, + }, + } + + method, err := NewCertAuthMethod(config) + if err != nil { + t.Fatal(err) + } + + client, err := api.NewClient(nil) + if err != nil { + t.Fatal(err) + } + + clientToUse, err := method.(auth.AuthMethodWithClient).AuthClient(client) + if err != nil { + t.Fatal(err) + } + + if client == clientToUse { + t.Fatal("expected client from AuthClient to be different from original client") + } + + // Call AuthClient again to get back a new client with reloaded certificates + reloadedClient, err := method.(auth.AuthMethodWithClient).AuthClient(client) + if err != nil { + t.Fatal(err) + } + + if reloadedClient == clientToUse { + t.Fatal("expected client from AuthClient to return back a new client") + } +} diff --git a/command/agentproxyshared/auth/cert/test-fixtures/keys/cert.pem b/command/agentproxyshared/auth/cert/test-fixtures/keys/cert.pem new file mode 100644 index 0000000..67ef67d --- /dev/null +++ b/command/agentproxyshared/auth/cert/test-fixtures/keys/cert.pem @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDtTCCAp2gAwIBAgIUf+jhKTFBnqSs34II0WS1L4QsbbAwDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzQxWhcNMjUw +MTA1MTAyODExWjAbMRkwFwYDVQQDExBjZXJ0LmV4YW1wbGUuY29tMIIBIjANBgkq +hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsZx0Svr82YJpFpIy4fJNW5fKA6B8mhxS +TRAVnygAftetT8puHflY0ss7Y6X2OXjsU0PRn+1PswtivhKi+eLtgWkUF9cFYFGn +SgMld6ZWRhNheZhA6ZfQmeM/BF2pa5HK2SDF36ljgjL9T+nWrru2Uv0BCoHzLAmi +YYMiIWplidMmMO5NTRG3k+3AN0TkfakB6JVzjLGhTcXdOcVEMXkeQVqJMAuGouU5 +donyqtnaHuIJGuUdy54YDnX86txhOQhAv6r7dHXzZxS4pmLvw8UI1rsSf/GLcUVG +B+5+AAGF5iuHC3N2DTl4xz3FcN4Cb4w9pbaQ7+mCzz+anqiJfyr2nwIDAQABo4H1 +MIHyMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAdBgNVHQ4EFgQUm++e +HpyM3p708bgZJuRYEdX1o+UwHwYDVR0jBBgwFoAUncSzT/6HMexyuiU9/7EgHu+o +k5swOwYIKwYBBQUHAQEELzAtMCsGCCsGAQUFBzAChh9odHRwOi8vMTI3LjAuMC4x +OjgyMDAvdjEvcGtpL2NhMCEGA1UdEQQaMBiCEGNlcnQuZXhhbXBsZS5jb22HBH8A +AAEwMQYDVR0fBCowKDAmoCSgIoYgaHR0cDovLzEyNy4wLjAuMTo4MjAwL3YxL3Br +aS9jcmwwDQYJKoZIhvcNAQELBQADggEBABsuvmPSNjjKTVN6itWzdQy+SgMIrwfs +X1Yb9Lefkkwmp9ovKFNQxa4DucuCuzXcQrbKwWTfHGgR8ct4rf30xCRoA7dbQWq4 +aYqNKFWrRaBRAaaYZ/O1ApRTOrXqRx9Eqr0H1BXLsoAq+mWassL8sf6siae+CpwA +KqBko5G0dNXq5T4i2LQbmoQSVetIrCJEeMrU+idkuqfV2h1BQKgSEhFDABjFdTCN +QDAHsEHsi2M4/jRW9fqEuhHSDfl2n7tkFUI8wTHUUCl7gXwweJ4qtaSXIwKXYzNj +xqKHA8Purc1Yfybz4iE1JCROi9fInKlzr5xABq8nb9Qc/J9DIQM+Xmk= +-----END CERTIFICATE----- \ No newline at end of file diff --git a/command/agentproxyshared/auth/cert/test-fixtures/keys/key.pem b/command/agentproxyshared/auth/cert/test-fixtures/keys/key.pem new file mode 100644 index 0000000..add9820 --- /dev/null +++ b/command/agentproxyshared/auth/cert/test-fixtures/keys/key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAsZx0Svr82YJpFpIy4fJNW5fKA6B8mhxSTRAVnygAftetT8pu +HflY0ss7Y6X2OXjsU0PRn+1PswtivhKi+eLtgWkUF9cFYFGnSgMld6ZWRhNheZhA +6ZfQmeM/BF2pa5HK2SDF36ljgjL9T+nWrru2Uv0BCoHzLAmiYYMiIWplidMmMO5N +TRG3k+3AN0TkfakB6JVzjLGhTcXdOcVEMXkeQVqJMAuGouU5donyqtnaHuIJGuUd +y54YDnX86txhOQhAv6r7dHXzZxS4pmLvw8UI1rsSf/GLcUVGB+5+AAGF5iuHC3N2 +DTl4xz3FcN4Cb4w9pbaQ7+mCzz+anqiJfyr2nwIDAQABAoIBAHR7fFV0eAGaopsX +9OD0TUGlsephBXb43g0GYHfJ/1Ew18w9oaxszJEqkl+PB4W3xZ3yG3e8ZomxDOhF +RreF2WgG5xOfhDogMwu6NodbArfgnAvoC6JnW3qha8HMP4F500RFVyCRcd6A3Frd +rFtaZn/UyCsBAN8/zkwPeYHayo7xX6d9kzgRl9HluEX5PXI5+3uiBDUiM085gkLI +5Cmadh9fMdjfhDXI4x2JYmILpp/9Nlc/krB15s5n1MPNtn3yL0TI0tWp0WlwDCV7 +oUm1SfIM0F1fXGFyFDcqwoIr6JCQgXk6XtTg31YhH1xgUIclUVdtHqmAwAbLdIhQ +GAiHn2kCgYEAwD4pZ8HfpiOG/EHNoWsMATc/5yC7O8F9WbvcHZQIymLY4v/7HKZb +VyOR6UQ5/O2cztSGIuKSF6+OK1C34lOyCuTSOTFrjlgEYtLIXjdGLfFdtOO8GRQR +akVXdwuzNAjTBaH5eXbG+NKcjmCvZL48dQVlfDTVulzFGbcsVTHIMQUCgYEA7IQI +FVsKnY3KqpyGqXq92LMcsT3XgW6X1BIIV+YhJ5AFUFkFrjrbXs94/8XyLfi0xBQy +efK+8g5sMs7koF8LyZEcAXWZJQduaKB71hoLlRaU4VQkL/dl2B6VFmAII/CsRCYh +r9RmDN2PF/mp98Ih9dpC1VqcCDRGoTYsd7jLalMCgYAMgH5k1wDaZxkSMp1S0AlZ +0uP+/evvOOgT+9mWutfPgZolOQx1koQCKLgGeX9j6Xf3I28NubpSfAI84uTyfQrp +FnRtb79U5Hh0jMynA+U2e6niZ6UF5H41cQj9Hu+qhKBkj2IP+h96cwfnYnZFkPGR +kqZE65KyqfHPeFATwkcImQKBgCdrfhlpGiTWXCABhKQ8s+WpPLAB2ahV8XJEKyXT +UlVQuMIChGLcpnFv7P/cUxf8asx/fUY8Aj0/0CLLvulHziQjTmKj4gl86pb/oIQ3 +xRRtNhU0O+/OsSfLORgIm3K6C0w0esregL/GMbJSR1TnA1gBr7/1oSnw5JC8Ab9W +injHAoGAJT1MGAiQrhlt9GCGe6Ajw4omdbY0wS9NXefnFhf7EwL0es52ezZ28zpU +2LXqSFbtann5CHgpSLxiMYPDIf+er4xgg9Bz34tz1if1rDfP2Qrxdrpr4jDnrGT3 +gYC2qCpvVD9RRUMKFfnJTfl5gMQdBW/LINkHtJ82snAeLl3gjQ4= +-----END RSA PRIVATE KEY----- diff --git a/command/agentproxyshared/auth/cert/test-fixtures/keys/pkioutput b/command/agentproxyshared/auth/cert/test-fixtures/keys/pkioutput new file mode 100644 index 0000000..526ff03 --- /dev/null +++ b/command/agentproxyshared/auth/cert/test-fixtures/keys/pkioutput @@ -0,0 +1,74 @@ +Key Value +lease_id pki/issue/example-dot-com/d8214077-9976-8c68-9c07-6610da30aea4 +lease_duration 279359999 +lease_renewable false +certificate -----BEGIN CERTIFICATE----- +MIIDtTCCAp2gAwIBAgIUf+jhKTFBnqSs34II0WS1L4QsbbAwDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzQxWhcNMjUw +MTA1MTAyODExWjAbMRkwFwYDVQQDExBjZXJ0LmV4YW1wbGUuY29tMIIBIjANBgkq +hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsZx0Svr82YJpFpIy4fJNW5fKA6B8mhxS +TRAVnygAftetT8puHflY0ss7Y6X2OXjsU0PRn+1PswtivhKi+eLtgWkUF9cFYFGn +SgMld6ZWRhNheZhA6ZfQmeM/BF2pa5HK2SDF36ljgjL9T+nWrru2Uv0BCoHzLAmi +YYMiIWplidMmMO5NTRG3k+3AN0TkfakB6JVzjLGhTcXdOcVEMXkeQVqJMAuGouU5 +donyqtnaHuIJGuUdy54YDnX86txhOQhAv6r7dHXzZxS4pmLvw8UI1rsSf/GLcUVG +B+5+AAGF5iuHC3N2DTl4xz3FcN4Cb4w9pbaQ7+mCzz+anqiJfyr2nwIDAQABo4H1 +MIHyMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAdBgNVHQ4EFgQUm++e +HpyM3p708bgZJuRYEdX1o+UwHwYDVR0jBBgwFoAUncSzT/6HMexyuiU9/7EgHu+o +k5swOwYIKwYBBQUHAQEELzAtMCsGCCsGAQUFBzAChh9odHRwOi8vMTI3LjAuMC4x +OjgyMDAvdjEvcGtpL2NhMCEGA1UdEQQaMBiCEGNlcnQuZXhhbXBsZS5jb22HBH8A +AAEwMQYDVR0fBCowKDAmoCSgIoYgaHR0cDovLzEyNy4wLjAuMTo4MjAwL3YxL3Br +aS9jcmwwDQYJKoZIhvcNAQELBQADggEBABsuvmPSNjjKTVN6itWzdQy+SgMIrwfs +X1Yb9Lefkkwmp9ovKFNQxa4DucuCuzXcQrbKwWTfHGgR8ct4rf30xCRoA7dbQWq4 +aYqNKFWrRaBRAaaYZ/O1ApRTOrXqRx9Eqr0H1BXLsoAq+mWassL8sf6siae+CpwA +KqBko5G0dNXq5T4i2LQbmoQSVetIrCJEeMrU+idkuqfV2h1BQKgSEhFDABjFdTCN +QDAHsEHsi2M4/jRW9fqEuhHSDfl2n7tkFUI8wTHUUCl7gXwweJ4qtaSXIwKXYzNj +xqKHA8Purc1Yfybz4iE1JCROi9fInKlzr5xABq8nb9Qc/J9DIQM+Xmk= +-----END CERTIFICATE----- +issuing_ca -----BEGIN CERTIFICATE----- +MIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw +MjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7 +Q7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0 +z2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x +AHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb +6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH +SWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G +A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx +7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc +BgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA +wHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2 +U946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa +cNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N +ScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ +t2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk +zehNe5dFTjFpylg1o6b8Ow== +-----END CERTIFICATE----- +private_key -----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAsZx0Svr82YJpFpIy4fJNW5fKA6B8mhxSTRAVnygAftetT8pu +HflY0ss7Y6X2OXjsU0PRn+1PswtivhKi+eLtgWkUF9cFYFGnSgMld6ZWRhNheZhA +6ZfQmeM/BF2pa5HK2SDF36ljgjL9T+nWrru2Uv0BCoHzLAmiYYMiIWplidMmMO5N +TRG3k+3AN0TkfakB6JVzjLGhTcXdOcVEMXkeQVqJMAuGouU5donyqtnaHuIJGuUd +y54YDnX86txhOQhAv6r7dHXzZxS4pmLvw8UI1rsSf/GLcUVGB+5+AAGF5iuHC3N2 +DTl4xz3FcN4Cb4w9pbaQ7+mCzz+anqiJfyr2nwIDAQABAoIBAHR7fFV0eAGaopsX +9OD0TUGlsephBXb43g0GYHfJ/1Ew18w9oaxszJEqkl+PB4W3xZ3yG3e8ZomxDOhF +RreF2WgG5xOfhDogMwu6NodbArfgnAvoC6JnW3qha8HMP4F500RFVyCRcd6A3Frd +rFtaZn/UyCsBAN8/zkwPeYHayo7xX6d9kzgRl9HluEX5PXI5+3uiBDUiM085gkLI +5Cmadh9fMdjfhDXI4x2JYmILpp/9Nlc/krB15s5n1MPNtn3yL0TI0tWp0WlwDCV7 +oUm1SfIM0F1fXGFyFDcqwoIr6JCQgXk6XtTg31YhH1xgUIclUVdtHqmAwAbLdIhQ +GAiHn2kCgYEAwD4pZ8HfpiOG/EHNoWsMATc/5yC7O8F9WbvcHZQIymLY4v/7HKZb +VyOR6UQ5/O2cztSGIuKSF6+OK1C34lOyCuTSOTFrjlgEYtLIXjdGLfFdtOO8GRQR +akVXdwuzNAjTBaH5eXbG+NKcjmCvZL48dQVlfDTVulzFGbcsVTHIMQUCgYEA7IQI +FVsKnY3KqpyGqXq92LMcsT3XgW6X1BIIV+YhJ5AFUFkFrjrbXs94/8XyLfi0xBQy +efK+8g5sMs7koF8LyZEcAXWZJQduaKB71hoLlRaU4VQkL/dl2B6VFmAII/CsRCYh +r9RmDN2PF/mp98Ih9dpC1VqcCDRGoTYsd7jLalMCgYAMgH5k1wDaZxkSMp1S0AlZ +0uP+/evvOOgT+9mWutfPgZolOQx1koQCKLgGeX9j6Xf3I28NubpSfAI84uTyfQrp +FnRtb79U5Hh0jMynA+U2e6niZ6UF5H41cQj9Hu+qhKBkj2IP+h96cwfnYnZFkPGR +kqZE65KyqfHPeFATwkcImQKBgCdrfhlpGiTWXCABhKQ8s+WpPLAB2ahV8XJEKyXT +UlVQuMIChGLcpnFv7P/cUxf8asx/fUY8Aj0/0CLLvulHziQjTmKj4gl86pb/oIQ3 +xRRtNhU0O+/OsSfLORgIm3K6C0w0esregL/GMbJSR1TnA1gBr7/1oSnw5JC8Ab9W +injHAoGAJT1MGAiQrhlt9GCGe6Ajw4omdbY0wS9NXefnFhf7EwL0es52ezZ28zpU +2LXqSFbtann5CHgpSLxiMYPDIf+er4xgg9Bz34tz1if1rDfP2Qrxdrpr4jDnrGT3 +gYC2qCpvVD9RRUMKFfnJTfl5gMQdBW/LINkHtJ82snAeLl3gjQ4= +-----END RSA PRIVATE KEY----- +private_key_type rsa diff --git a/command/agentproxyshared/auth/cert/test-fixtures/root/pkioutput b/command/agentproxyshared/auth/cert/test-fixtures/root/pkioutput new file mode 100644 index 0000000..312ae18 --- /dev/null +++ b/command/agentproxyshared/auth/cert/test-fixtures/root/pkioutput @@ -0,0 +1,74 @@ +Key Value +lease_id pki/root/generate/exported/7bf99d76-dd3e-2c5b-04ce-5253062ad586 +lease_duration 315359999 +lease_renewable false +certificate -----BEGIN CERTIFICATE----- +MIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw +MjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7 +Q7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0 +z2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x +AHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb +6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH +SWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G +A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx +7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc +BgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA +wHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2 +U946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa +cNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N +ScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ +t2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk +zehNe5dFTjFpylg1o6b8Ow== +-----END CERTIFICATE----- +expiration 1.772072879e+09 +issuing_ca -----BEGIN CERTIFICATE----- +MIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw +MjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7 +Q7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0 +z2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x +AHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb +6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH +SWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G +A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx +7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc +BgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA +wHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2 +U946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa +cNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N +ScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ +t2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk +zehNe5dFTjFpylg1o6b8Ow== +-----END CERTIFICATE----- +private_key -----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEA7FMy+FO4hFzZiHFmmY/B6A/zpyCep9PWZfGLUDtDuprHbg2p +t/TQjegMxC0AmWPZEHeG9FIJvT+WQXuLUG5v5MbG4zs21mqnYXwWAbTPaZ35bpp3 +BbbquXFtAqOZG8yfrob1g9OSgmY+bG3ZNxxv35qmbBbuWyUvmPwfjzEAclMxv48w +/2Rs4dXkBuvc9PiNdQ9Sv+ZYG8GIqIcbRd38cSaXI4Q94BOHEr4jm1vqb54H7twv +0Gy9CnLXfn7ZtEDFSmG8WLk2GvIna+UC+gnxSqKCw0rcTby63rQUCgdJZF8VQUVi +18+BMNLXI4pT/P9cxTaCJC/YeuV5a3SaniOoFQIDAQABAoIBAQCoGZJC84JnnIgb +ttZNWuWKBXbCJcDVDikOQJ9hBZbqsFg1X0CfGmQS3MHf9Ubc1Ro8zVjQh15oIEfn +8lIpdzTeXcpxLdiW8ix3ekVJF20F6pnXY8ZP6UnTeOwamXY6QPZAtb0D9UXcvY+f +nw+IVRD6082XS0Rmzu+peYWVXDy+FDN+HJRANBcdJZz8gOmNBIe0qDWx1b85d/s8 +2Kk1Wwdss1IwAGeSddTSwzBNaaHdItZaMZOqPW1gRyBfVSkcUQIE6zn2RKw2b70t +grkIvyRcTdfmiKbqkkJ+eR+ITOUt0cBZSH4cDjlQA+r7hulvoBpQBRj068Toxkcc +bTagHaPBAoGBAPWPGVkHqhTbJ/DjmqDIStxby2M1fhhHt4xUGHinhUYjQjGOtDQ9 +0mfaB7HObudRiSLydRAVGAHGyNJdQcTeFxeQbovwGiYKfZSA1IGpea7dTxPpGEdN +ksA0pzSp9MfKzX/MdLuAkEtO58aAg5YzsgX9hDNxo4MhH/gremZhEGZlAoGBAPZf +lqdYvAL0fjHGJ1FUEalhzGCGE9PH2iOqsxqLCXK7bDbzYSjvuiHkhYJHAOgVdiW1 +lB34UHHYAqZ1VVoFqJ05gax6DE2+r7K5VV3FUCaC0Zm3pavxchU9R/TKP82xRrBj +AFWwdgDTxUyvQEmgPR9sqorftO71Iz2tiwyTpIfxAoGBAIhEMLzHFAse0rtKkrRG +ccR27BbRyHeQ1Lp6sFnEHKEfT8xQdI/I/snCpCJ3e/PBu2g5Q9z416mktiyGs8ib +thTNgYsGYnxZtfaCx2pssanoBcn2wBJRae5fSapf5gY49HDG9MBYR7qCvvvYtSzU +4yWP2ZzyotpRt3vwJKxLkN5BAoGAORHpZvhiDNkvxj3da7Rqpu7VleJZA2y+9hYb +iOF+HcqWhaAY+I+XcTRrTMM/zYLzLEcEeXDEyao86uwxCjpXVZw1kotvAC9UqbTO +tnr3VwRkoxPsV4kFYTAh0+1pnC8dbcxxDmhi3Uww3tOVs7hfkEDuvF6XnebA9A+Y +LyCgMzECgYEA6cCU8QODOivIKWFRXucvWckgE6MYDBaAwe6qcLsd1Q/gpE2e3yQc +4RB3bcyiPROLzMLlXFxf1vSNJQdIaVfrRv+zJeGIiivLPU8+Eq4Lrb+tl1LepcOX +OzQeADTSCn5VidOfjDkIst9UXjMlrFfV9/oJEw5Eiqa6lkNPCGDhfA8= +-----END RSA PRIVATE KEY----- +private_key_type rsa +serial_number 6f:98:9d:f8:67:1a:31:e3:27:60:1b:f7:32:f7:53:19:68:a0:c8:9d diff --git a/command/agentproxyshared/auth/cert/test-fixtures/root/root.crl b/command/agentproxyshared/auth/cert/test-fixtures/root/root.crl new file mode 100644 index 0000000..a80c9e4 --- /dev/null +++ b/command/agentproxyshared/auth/cert/test-fixtures/root/root.crl @@ -0,0 +1,12 @@ +-----BEGIN X509 CRL----- +MIIBrjCBlzANBgkqhkiG9w0BAQsFADAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbRcN +MTYwMjI5MDIyOTE3WhcNMjUwMTA1MTAyOTE3WjArMCkCFG+YnfhnGjHjJ2Ab9zL3 +UxlooMidFxExNjAyMjgyMTI5MTctMDUwMKAjMCEwHwYDVR0jBBgwFoAUncSzT/6H +MexyuiU9/7EgHu+ok5swDQYJKoZIhvcNAQELBQADggEBAG9YDXpNe4LJroKZmVCn +HqMhW8eyzyaPak2nPPGCVUnc6vt8rlBYQU+xlBizD6xatZQDMPgrT8sBl9W3ysXk +RUlliHsT/SHddMz5dAZsBPRMJ7pYWLTx8jI4w2WRfbSyI4bY/6qTRNkEBUv+Fk8J +xvwB89+EM0ENcVMhv9ghsUA8h7kOg673HKwRstLDAzxS/uLmEzFjj8SV2m5DbV2Y +UUCKRSV20/kxJMIC9x2KikZhwOSyv1UE1otD+RQvbfAoZPUDmvp2FR/E0NGjBBOg +1TtCPRrl63cjqU3s8KQ4uah9Vj+Cwcu9n/yIKKtNQq4NKHvagv8GlUsoJ4BdAxCw +IA0= +-----END X509 CRL----- diff --git a/command/agentproxyshared/auth/cert/test-fixtures/root/rootcacert.pem b/command/agentproxyshared/auth/cert/test-fixtures/root/rootcacert.pem new file mode 100644 index 0000000..dcb307a --- /dev/null +++ b/command/agentproxyshared/auth/cert/test-fixtures/root/rootcacert.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw +MjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7 +Q7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0 +z2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x +AHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb +6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH +SWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G +A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx +7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc +BgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA +wHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2 +U946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa +cNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N +ScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ +t2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk +zehNe5dFTjFpylg1o6b8Ow== +-----END CERTIFICATE----- diff --git a/command/agentproxyshared/auth/cert/test-fixtures/root/rootcakey.pem b/command/agentproxyshared/auth/cert/test-fixtures/root/rootcakey.pem new file mode 100644 index 0000000..e950da5 --- /dev/null +++ b/command/agentproxyshared/auth/cert/test-fixtures/root/rootcakey.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEA7FMy+FO4hFzZiHFmmY/B6A/zpyCep9PWZfGLUDtDuprHbg2p +t/TQjegMxC0AmWPZEHeG9FIJvT+WQXuLUG5v5MbG4zs21mqnYXwWAbTPaZ35bpp3 +BbbquXFtAqOZG8yfrob1g9OSgmY+bG3ZNxxv35qmbBbuWyUvmPwfjzEAclMxv48w +/2Rs4dXkBuvc9PiNdQ9Sv+ZYG8GIqIcbRd38cSaXI4Q94BOHEr4jm1vqb54H7twv +0Gy9CnLXfn7ZtEDFSmG8WLk2GvIna+UC+gnxSqKCw0rcTby63rQUCgdJZF8VQUVi +18+BMNLXI4pT/P9cxTaCJC/YeuV5a3SaniOoFQIDAQABAoIBAQCoGZJC84JnnIgb +ttZNWuWKBXbCJcDVDikOQJ9hBZbqsFg1X0CfGmQS3MHf9Ubc1Ro8zVjQh15oIEfn +8lIpdzTeXcpxLdiW8ix3ekVJF20F6pnXY8ZP6UnTeOwamXY6QPZAtb0D9UXcvY+f +nw+IVRD6082XS0Rmzu+peYWVXDy+FDN+HJRANBcdJZz8gOmNBIe0qDWx1b85d/s8 +2Kk1Wwdss1IwAGeSddTSwzBNaaHdItZaMZOqPW1gRyBfVSkcUQIE6zn2RKw2b70t +grkIvyRcTdfmiKbqkkJ+eR+ITOUt0cBZSH4cDjlQA+r7hulvoBpQBRj068Toxkcc +bTagHaPBAoGBAPWPGVkHqhTbJ/DjmqDIStxby2M1fhhHt4xUGHinhUYjQjGOtDQ9 +0mfaB7HObudRiSLydRAVGAHGyNJdQcTeFxeQbovwGiYKfZSA1IGpea7dTxPpGEdN +ksA0pzSp9MfKzX/MdLuAkEtO58aAg5YzsgX9hDNxo4MhH/gremZhEGZlAoGBAPZf +lqdYvAL0fjHGJ1FUEalhzGCGE9PH2iOqsxqLCXK7bDbzYSjvuiHkhYJHAOgVdiW1 +lB34UHHYAqZ1VVoFqJ05gax6DE2+r7K5VV3FUCaC0Zm3pavxchU9R/TKP82xRrBj +AFWwdgDTxUyvQEmgPR9sqorftO71Iz2tiwyTpIfxAoGBAIhEMLzHFAse0rtKkrRG +ccR27BbRyHeQ1Lp6sFnEHKEfT8xQdI/I/snCpCJ3e/PBu2g5Q9z416mktiyGs8ib +thTNgYsGYnxZtfaCx2pssanoBcn2wBJRae5fSapf5gY49HDG9MBYR7qCvvvYtSzU +4yWP2ZzyotpRt3vwJKxLkN5BAoGAORHpZvhiDNkvxj3da7Rqpu7VleJZA2y+9hYb +iOF+HcqWhaAY+I+XcTRrTMM/zYLzLEcEeXDEyao86uwxCjpXVZw1kotvAC9UqbTO +tnr3VwRkoxPsV4kFYTAh0+1pnC8dbcxxDmhi3Uww3tOVs7hfkEDuvF6XnebA9A+Y +LyCgMzECgYEA6cCU8QODOivIKWFRXucvWckgE6MYDBaAwe6qcLsd1Q/gpE2e3yQc +4RB3bcyiPROLzMLlXFxf1vSNJQdIaVfrRv+zJeGIiivLPU8+Eq4Lrb+tl1LepcOX +OzQeADTSCn5VidOfjDkIst9UXjMlrFfV9/oJEw5Eiqa6lkNPCGDhfA8= +-----END RSA PRIVATE KEY----- diff --git a/command/agentproxyshared/auth/cf/cf.go b/command/agentproxyshared/auth/cf/cf.go new file mode 100644 index 0000000..3ee2077 --- /dev/null +++ b/command/agentproxyshared/auth/cf/cf.go @@ -0,0 +1,86 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cf + +import ( + "context" + "errors" + "fmt" + "io/ioutil" + "net/http" + "os" + "time" + + cf "github.com/hashicorp/vault-plugin-auth-cf" + "github.com/hashicorp/vault-plugin-auth-cf/signatures" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/auth" +) + +type cfMethod struct { + mountPath string + roleName string +} + +func NewCFAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) { + if conf == nil { + return nil, errors.New("empty config") + } + if conf.Config == nil { + return nil, errors.New("empty config data") + } + a := &cfMethod{ + mountPath: conf.MountPath, + } + if raw, ok := conf.Config["role"]; ok { + if roleName, ok := raw.(string); ok { + a.roleName = roleName + } else { + return nil, errors.New("could not convert 'role' config value to string") + } + } else { + return nil, errors.New("missing 'role' value") + } + return a, nil +} + +func (p *cfMethod) Authenticate(ctx context.Context, client *api.Client) (string, http.Header, map[string]interface{}, error) { + pathToClientCert := os.Getenv(cf.EnvVarInstanceCertificate) + if pathToClientCert == "" { + return "", nil, nil, fmt.Errorf("missing %q value", cf.EnvVarInstanceCertificate) + } + certBytes, err := ioutil.ReadFile(pathToClientCert) + if err != nil { + return "", nil, nil, err + } + pathToClientKey := os.Getenv(cf.EnvVarInstanceKey) + if pathToClientKey == "" { + return "", nil, nil, fmt.Errorf("missing %q value", cf.EnvVarInstanceKey) + } + signingTime := time.Now().UTC() + signatureData := &signatures.SignatureData{ + SigningTime: signingTime, + Role: p.roleName, + CFInstanceCertContents: string(certBytes), + } + signature, err := signatures.Sign(pathToClientKey, signatureData) + if err != nil { + return "", nil, nil, err + } + data := map[string]interface{}{ + "role": p.roleName, + "cf_instance_cert": string(certBytes), + "signing_time": signingTime.Format(signatures.TimeFormat), + "signature": signature, + } + return fmt.Sprintf("%s/login", p.mountPath), nil, data, nil +} + +func (p *cfMethod) NewCreds() chan struct{} { + return nil +} + +func (p *cfMethod) CredSuccess() {} + +func (p *cfMethod) Shutdown() {} diff --git a/command/agentproxyshared/auth/gcp/gcp.go b/command/agentproxyshared/auth/gcp/gcp.go new file mode 100644 index 0000000..bb7c6ba --- /dev/null +++ b/command/agentproxyshared/auth/gcp/gcp.go @@ -0,0 +1,236 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package gcp + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "time" + + cleanhttp "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-gcp-common/gcputil" + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + "golang.org/x/oauth2" + "google.golang.org/api/iamcredentials/v1" +) + +const ( + typeGCE = "gce" + typeIAM = "iam" + identityEndpoint = "http://metadata/computeMetadata/v1/instance/service-accounts/%s/identity" + defaultIamMaxJwtExpMinutes = 15 +) + +type gcpMethod struct { + logger hclog.Logger + authType string + mountPath string + role string + credentials string + serviceAccount string + project string + jwtExp int64 +} + +func NewGCPAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) { + if conf == nil { + return nil, errors.New("empty config") + } + if conf.Config == nil { + return nil, errors.New("empty config data") + } + + var err error + + g := &gcpMethod{ + logger: conf.Logger, + mountPath: conf.MountPath, + serviceAccount: "default", + } + + typeRaw, ok := conf.Config["type"] + if !ok { + return nil, errors.New("missing 'type' value") + } + g.authType, ok = typeRaw.(string) + if !ok { + return nil, errors.New("could not convert 'type' config value to string") + } + + roleRaw, ok := conf.Config["role"] + if !ok { + return nil, errors.New("missing 'role' value") + } + g.role, ok = roleRaw.(string) + if !ok { + return nil, errors.New("could not convert 'role' config value to string") + } + + switch { + case g.role == "": + return nil, errors.New("'role' value is empty") + case g.authType == "": + return nil, errors.New("'type' value is empty") + case g.authType != typeGCE && g.authType != typeIAM: + return nil, errors.New("'type' value is invalid") + } + + credentialsRaw, ok := conf.Config["credentials"] + if ok { + g.credentials, ok = credentialsRaw.(string) + if !ok { + return nil, errors.New("could not convert 'credentials' value into string") + } + } + + serviceAccountRaw, ok := conf.Config["service_account"] + if ok { + g.serviceAccount, ok = serviceAccountRaw.(string) + if !ok { + return nil, errors.New("could not convert 'service_account' value into string") + } + } + + projectRaw, ok := conf.Config["project"] + if ok { + g.project, ok = projectRaw.(string) + if !ok { + return nil, errors.New("could not convert 'project' value into string") + } + } + + jwtExpRaw, ok := conf.Config["jwt_exp"] + if ok { + g.jwtExp, err = parseutil.ParseInt(jwtExpRaw) + if err != nil { + return nil, fmt.Errorf("error parsing 'jwt_raw' into integer: %w", err) + } + } + + return g, nil +} + +func (g *gcpMethod) Authenticate(ctx context.Context, client *api.Client) (retPath string, header http.Header, retData map[string]interface{}, retErr error) { + g.logger.Trace("beginning authentication") + + data := make(map[string]interface{}) + var jwt string + + switch g.authType { + case typeGCE: + httpClient := cleanhttp.DefaultClient() + + // Fetch token + { + req, err := http.NewRequest("GET", fmt.Sprintf(identityEndpoint, g.serviceAccount), nil) + if err != nil { + retErr = fmt.Errorf("error creating request: %w", err) + return + } + req = req.WithContext(ctx) + req.Header.Add("Metadata-Flavor", "Google") + q := req.URL.Query() + q.Add("audience", fmt.Sprintf("%s/vault/%s", client.Address(), g.role)) + q.Add("format", "full") + req.URL.RawQuery = q.Encode() + resp, err := httpClient.Do(req) + if err != nil { + retErr = fmt.Errorf("error fetching instance token: %w", err) + return + } + if resp == nil { + retErr = errors.New("empty response fetching instance toke") + return + } + defer resp.Body.Close() + jwtBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + retErr = fmt.Errorf("error reading instance token response body: %w", err) + return + } + + jwt = string(jwtBytes) + } + + default: + ctx := context.WithValue(context.Background(), oauth2.HTTPClient, cleanhttp.DefaultClient()) + + credentials, tokenSource, err := gcputil.FindCredentials(g.credentials, ctx, iamcredentials.CloudPlatformScope) + if err != nil { + retErr = fmt.Errorf("could not obtain credentials: %w", err) + return + } + + httpClient := oauth2.NewClient(ctx, tokenSource) + + var serviceAccount string + if g.serviceAccount == "" && credentials != nil { + serviceAccount = credentials.ClientEmail + } else { + serviceAccount = g.serviceAccount + } + if serviceAccount == "" { + retErr = errors.New("could not obtain service account from credentials (possibly Application Default Credentials are being used); a service account to authenticate as must be provided") + return + } + + ttlMin := int64(defaultIamMaxJwtExpMinutes) + if g.jwtExp != 0 { + ttlMin = g.jwtExp + } + ttl := time.Minute * time.Duration(ttlMin) + + jwtPayload := map[string]interface{}{ + "aud": fmt.Sprintf("http://vault/%s", g.role), + "sub": serviceAccount, + "exp": time.Now().Add(ttl).Unix(), + } + payloadBytes, err := json.Marshal(jwtPayload) + if err != nil { + retErr = fmt.Errorf("could not convert JWT payload to JSON string: %w", err) + return + } + + jwtReq := &iamcredentials.SignJwtRequest{ + Payload: string(payloadBytes), + } + + iamClient, err := iamcredentials.New(httpClient) + if err != nil { + retErr = fmt.Errorf("could not create IAM client: %w", err) + return + } + + resourceName := fmt.Sprintf("projects/-/serviceAccounts/%s", serviceAccount) + resp, err := iamClient.Projects.ServiceAccounts.SignJwt(resourceName, jwtReq).Do() + if err != nil { + retErr = fmt.Errorf("unable to sign JWT for %s using given Vault credentials: %w", resourceName, err) + return + } + + jwt = resp.SignedJwt + } + + data["role"] = g.role + data["jwt"] = jwt + + return fmt.Sprintf("%s/login", g.mountPath), nil, data, nil +} + +func (g *gcpMethod) NewCreds() chan struct{} { + return nil +} + +func (g *gcpMethod) CredSuccess() { +} + +func (g *gcpMethod) Shutdown() { +} diff --git a/command/agentproxyshared/auth/jwt/jwt.go b/command/agentproxyshared/auth/jwt/jwt.go new file mode 100644 index 0000000..fa87827 --- /dev/null +++ b/command/agentproxyshared/auth/jwt/jwt.go @@ -0,0 +1,260 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jwt + +import ( + "context" + "errors" + "fmt" + "io/fs" + "net/http" + "os" + "path/filepath" + "sync" + "sync/atomic" + "time" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + "github.com/hashicorp/vault/sdk/helper/parseutil" +) + +type jwtMethod struct { + logger hclog.Logger + path string + mountPath string + role string + removeJWTAfterReading bool + removeJWTFollowsSymlinks bool + credsFound chan struct{} + watchCh chan string + stopCh chan struct{} + doneCh chan struct{} + credSuccessGate chan struct{} + ticker *time.Ticker + once *sync.Once + latestToken *atomic.Value +} + +// NewJWTAuthMethod returns an implementation of Agent's auth.AuthMethod +// interface for JWT auth. +func NewJWTAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) { + if conf == nil { + return nil, errors.New("empty config") + } + if conf.Config == nil { + return nil, errors.New("empty config data") + } + + j := &jwtMethod{ + logger: conf.Logger, + mountPath: conf.MountPath, + removeJWTAfterReading: true, + credsFound: make(chan struct{}), + watchCh: make(chan string), + stopCh: make(chan struct{}), + doneCh: make(chan struct{}), + credSuccessGate: make(chan struct{}), + once: new(sync.Once), + latestToken: new(atomic.Value), + } + j.latestToken.Store("") + + pathRaw, ok := conf.Config["path"] + if !ok { + return nil, errors.New("missing 'path' value") + } + j.path, ok = pathRaw.(string) + if !ok { + return nil, errors.New("could not convert 'path' config value to string") + } + + roleRaw, ok := conf.Config["role"] + if !ok { + return nil, errors.New("missing 'role' value") + } + j.role, ok = roleRaw.(string) + if !ok { + return nil, errors.New("could not convert 'role' config value to string") + } + + if removeJWTAfterReadingRaw, ok := conf.Config["remove_jwt_after_reading"]; ok { + removeJWTAfterReading, err := parseutil.ParseBool(removeJWTAfterReadingRaw) + if err != nil { + return nil, fmt.Errorf("error parsing 'remove_jwt_after_reading' value: %w", err) + } + j.removeJWTAfterReading = removeJWTAfterReading + } + + if removeJWTFollowsSymlinksRaw, ok := conf.Config["remove_jwt_follows_symlinks"]; ok { + removeJWTFollowsSymlinks, err := parseutil.ParseBool(removeJWTFollowsSymlinksRaw) + if err != nil { + return nil, fmt.Errorf("error parsing 'remove_jwt_follows_symlinks' value: %w", err) + } + j.removeJWTFollowsSymlinks = removeJWTFollowsSymlinks + } + + switch { + case j.path == "": + return nil, errors.New("'path' value is empty") + case j.role == "": + return nil, errors.New("'role' value is empty") + } + + // Default readPeriod + readPeriod := 1 * time.Minute + + if jwtReadPeriodRaw, ok := conf.Config["jwt_read_period"]; ok { + jwtReadPeriod, err := parseutil.ParseDurationSecond(jwtReadPeriodRaw) + if err != nil { + return nil, fmt.Errorf("error parsing 'jwt_read_period' value: %w", err) + } + readPeriod = jwtReadPeriod + } else { + // If we don't delete the JWT after reading, use a slower reload period, + // otherwise we would re-read the whole file every 500ms, instead of just + // doing a stat on the file every 500ms. + if j.removeJWTAfterReading { + readPeriod = 500 * time.Millisecond + } + } + + j.ticker = time.NewTicker(readPeriod) + + go j.runWatcher() + + j.logger.Info("jwt auth method created", "path", j.path) + + return j, nil +} + +func (j *jwtMethod) Authenticate(_ context.Context, _ *api.Client) (string, http.Header, map[string]interface{}, error) { + j.logger.Trace("beginning authentication") + + j.ingressToken() + + latestToken := j.latestToken.Load().(string) + if latestToken == "" { + return "", nil, nil, errors.New("latest known jwt is empty, cannot authenticate") + } + + return fmt.Sprintf("%s/login", j.mountPath), nil, map[string]interface{}{ + "role": j.role, + "jwt": latestToken, + }, nil +} + +func (j *jwtMethod) NewCreds() chan struct{} { + return j.credsFound +} + +func (j *jwtMethod) CredSuccess() { + j.once.Do(func() { + close(j.credSuccessGate) + }) +} + +func (j *jwtMethod) Shutdown() { + j.ticker.Stop() + close(j.stopCh) + <-j.doneCh +} + +func (j *jwtMethod) runWatcher() { + defer close(j.doneCh) + + select { + case <-j.stopCh: + return + + case <-j.credSuccessGate: + // We only start the next loop once we're initially successful, + // since at startup Authenticate will be called, and we don't want + // to end up immediately re-authenticating by having found a new + // value + } + + for { + select { + case <-j.stopCh: + return + + case <-j.ticker.C: + latestToken := j.latestToken.Load().(string) + j.ingressToken() + newToken := j.latestToken.Load().(string) + if newToken != latestToken { + j.logger.Debug("new jwt file found") + j.credsFound <- struct{}{} + } + } + } +} + +func (j *jwtMethod) ingressToken() { + fi, err := os.Lstat(j.path) + if err != nil { + if os.IsNotExist(err) { + return + } + j.logger.Error("error encountered stat'ing jwt file", "error", err) + return + } + + // Check that the path refers to a file. + // If it's a symlink, it could still be a symlink to a directory, + // but os.ReadFile below will return a descriptive error. + evalSymlinkPath := j.path + switch mode := fi.Mode(); { + case mode.IsRegular(): + // regular file + case mode&fs.ModeSymlink != 0: + // If our file path is a symlink, we should also return early (like above) without error + // if the file that is linked to is not present, otherwise we will error when trying + // to read that file by following the link in the os.ReadFile call. + evalSymlinkPath, err = filepath.EvalSymlinks(j.path) + if err != nil { + j.logger.Error("error encountered evaluating symlinks", "error", err) + return + } + _, err := os.Stat(evalSymlinkPath) + if err != nil { + if os.IsNotExist(err) { + return + } + j.logger.Error("error encountered stat'ing jwt file after evaluating symlinks", "error", err) + return + } + default: + j.logger.Error("jwt file is not a regular file or symlink") + return + } + + token, err := os.ReadFile(j.path) + if err != nil { + j.logger.Error("failed to read jwt file", "error", err) + return + } + + switch len(token) { + case 0: + j.logger.Warn("empty jwt file read") + + default: + j.latestToken.Store(string(token)) + } + + if j.removeJWTAfterReading { + pathToRemove := j.path + if j.removeJWTFollowsSymlinks { + // If removeJWTFollowsSymlinks is set, we follow the symlink and delete the jwt, + // not just the symlink that links to the jwt + pathToRemove = evalSymlinkPath + } + if err := os.Remove(pathToRemove); err != nil { + j.logger.Error("error removing jwt file", "error", err) + } + } +} diff --git a/command/agentproxyshared/auth/jwt/jwt_test.go b/command/agentproxyshared/auth/jwt/jwt_test.go new file mode 100644 index 0000000..3e0db40 --- /dev/null +++ b/command/agentproxyshared/auth/jwt/jwt_test.go @@ -0,0 +1,262 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jwt + +import ( + "bytes" + "os" + "path" + "strings" + "sync/atomic" + "testing" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/command/agentproxyshared/auth" +) + +func TestIngressToken(t *testing.T) { + const ( + dir = "dir" + file = "file" + empty = "empty" + missing = "missing" + symlinked = "symlinked" + ) + + rootDir, err := os.MkdirTemp("", "vault-agent-jwt-auth-test") + if err != nil { + t.Fatalf("failed to create temp dir: %s", err) + } + defer os.RemoveAll(rootDir) + + setupTestDir := func() string { + testDir, err := os.MkdirTemp(rootDir, "") + if err != nil { + t.Fatal(err) + } + err = os.WriteFile(path.Join(testDir, file), []byte("test"), 0o644) + if err != nil { + t.Fatal(err) + } + _, err = os.Create(path.Join(testDir, empty)) + if err != nil { + t.Fatal(err) + } + err = os.Mkdir(path.Join(testDir, dir), 0o755) + if err != nil { + t.Fatal(err) + } + err = os.Symlink(path.Join(testDir, file), path.Join(testDir, symlinked)) + if err != nil { + t.Fatal(err) + } + + return testDir + } + + for _, tc := range []struct { + name string + path string + errString string + }{ + { + "happy path", + file, + "", + }, + { + "path is directory", + dir, + "[ERROR] jwt file is not a regular file or symlink", + }, + { + "path is symlink", + symlinked, + "", + }, + { + "path is missing (implies nothing for ingressToken to do)", + missing, + "", + }, + { + "path is empty file", + empty, + "[WARN] empty jwt file read", + }, + } { + testDir := setupTestDir() + logBuffer := bytes.Buffer{} + jwtAuth := &jwtMethod{ + logger: hclog.New(&hclog.LoggerOptions{ + Output: &logBuffer, + }), + latestToken: new(atomic.Value), + path: path.Join(testDir, tc.path), + } + + jwtAuth.ingressToken() + + if tc.errString != "" { + if !strings.Contains(logBuffer.String(), tc.errString) { + t.Fatal("logs did no contain expected error", tc.errString, logBuffer.String()) + } + } else { + if strings.Contains(logBuffer.String(), "[ERROR]") || strings.Contains(logBuffer.String(), "[WARN]") { + t.Fatal("logs contained unexpected error", logBuffer.String()) + } + } + } +} + +func TestDeleteAfterReading(t *testing.T) { + for _, tc := range map[string]struct { + configValue string + shouldDelete bool + }{ + "default": { + "", + true, + }, + "explicit true": { + "true", + true, + }, + "false": { + "false", + false, + }, + } { + rootDir, err := os.MkdirTemp("", "vault-agent-jwt-auth-test") + if err != nil { + t.Fatalf("failed to create temp dir: %s", err) + } + defer os.RemoveAll(rootDir) + tokenPath := path.Join(rootDir, "token") + err = os.WriteFile(tokenPath, []byte("test"), 0o644) + if err != nil { + t.Fatal(err) + } + + config := &auth.AuthConfig{ + Config: map[string]interface{}{ + "path": tokenPath, + "role": "unusedrole", + }, + Logger: hclog.Default(), + } + if tc.configValue != "" { + config.Config["remove_jwt_after_reading"] = tc.configValue + } + + jwtAuth, err := NewJWTAuthMethod(config) + if err != nil { + t.Fatal(err) + } + + jwtAuth.(*jwtMethod).ingressToken() + + if _, err := os.Lstat(tokenPath); tc.shouldDelete { + if err == nil || !os.IsNotExist(err) { + t.Fatal(err) + } + } else { + if err != nil { + t.Fatal(err) + } + } + } +} + +func TestDeleteAfterReadingSymlink(t *testing.T) { + for _, tc := range map[string]struct { + configValue string + shouldDelete bool + removeJWTFollowsSymlinks bool + }{ + "default": { + "", + true, + false, + }, + "explicit true": { + "true", + true, + false, + }, + "false": { + "false", + false, + false, + }, + "default + removeJWTFollowsSymlinks": { + "", + true, + true, + }, + "explicit true + removeJWTFollowsSymlinks": { + "true", + true, + true, + }, + "false + removeJWTFollowsSymlinks": { + "false", + false, + true, + }, + } { + rootDir, err := os.MkdirTemp("", "vault-agent-jwt-auth-test") + if err != nil { + t.Fatalf("failed to create temp dir: %s", err) + } + defer os.RemoveAll(rootDir) + tokenPath := path.Join(rootDir, "token") + err = os.WriteFile(tokenPath, []byte("test"), 0o644) + if err != nil { + t.Fatal(err) + } + + symlink, err := os.CreateTemp("", "auth.jwt.symlink.test.") + if err != nil { + t.Fatal(err) + } + symlinkName := symlink.Name() + symlink.Close() + os.Remove(symlinkName) + os.Symlink(tokenPath, symlinkName) + + config := &auth.AuthConfig{ + Config: map[string]interface{}{ + "path": symlinkName, + "role": "unusedrole", + }, + Logger: hclog.Default(), + } + if tc.configValue != "" { + config.Config["remove_jwt_after_reading"] = tc.configValue + } + config.Config["remove_jwt_follows_symlinks"] = tc.removeJWTFollowsSymlinks + + jwtAuth, err := NewJWTAuthMethod(config) + if err != nil { + t.Fatal(err) + } + + jwtAuth.(*jwtMethod).ingressToken() + + pathToCheck := symlinkName + if tc.removeJWTFollowsSymlinks { + pathToCheck = tokenPath + } + if _, err := os.Lstat(pathToCheck); tc.shouldDelete { + if err == nil || !os.IsNotExist(err) { + t.Fatal(err) + } + } else { + if err != nil { + t.Fatal(err) + } + } + } +} diff --git a/command/agentproxyshared/auth/kerberos/integtest/integrationtest.sh b/command/agentproxyshared/auth/kerberos/integtest/integrationtest.sh new file mode 100755 index 0000000..b3d9edf --- /dev/null +++ b/command/agentproxyshared/auth/kerberos/integtest/integrationtest.sh @@ -0,0 +1,173 @@ +#!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +# Instructions +# This integration test is for the Vault Kerberos agent. +# Before running, execute: +# pip install --quiet requests-kerberos +# Then run this test from Vault's home directory. +# ./command/agent/auth/kerberos/integtest/integrationtest.sh + +if [[ "$OSTYPE" == "darwin"* ]]; then + base64cmd="base64 -D" +else + base64cmd="base64 -d" +fi + +VAULT_PORT=8200 +SAMBA_VER=4.8.12 + +export VAULT_TOKEN=${VAULT_TOKEN:-myroot} +DOMAIN_ADMIN_PASS=Pa55word! +DOMAIN_VAULT_ACCOUNT=vault_svc +DOMAIN_VAULT_PASS=vaultPa55word! +DOMAIN_USER_ACCOUNT=grace +DOMAIN_USER_PASS=gracePa55word! + +SAMBA_CONF_FILE=/srv/etc/smb.conf +DOMAIN_NAME=matrix +DNS_NAME=host +REALM_NAME=MATRIX.LAN +DOMAIN_DN=DC=MATRIX,DC=LAN +TESTS_DIR=/tmp/vault_plugin_tests + +function add_user() { + + username="${1}" + password="${2}" + + if [[ $(check_user ${username}) -eq 0 ]] + then + echo "add user '${username}'" + + docker exec $SAMBA_CONTAINER \ + /usr/bin/samba-tool user create \ + ${username} \ + ${password}\ + --configfile=${SAMBA_CONF_FILE} + fi +} + +function check_user() { + + username="${1}" + + docker exec $SAMBA_CONTAINER \ + /usr/bin/samba-tool user list \ + --configfile=${SAMBA_CONF_FILE} \ + | grep -c ${username} +} + +function create_keytab() { + + username="${1}" + password="${2}" + + user_kvno=$(docker exec $SAMBA_CONTAINER \ + bash -c "ldapsearch -H ldaps://localhost -D \"Administrator@${REALM_NAME}\" -w \"${DOMAIN_ADMIN_PASS}\" -b \"CN=Users,${DOMAIN_DN}\" -LLL \"(&(objectClass=user)(sAMAccountName=${username}))\" msDS-KeyVersionNumber | sed -n 's/^[ \t]*msDS-KeyVersionNumber:[ \t]*\(.*\)/\1/p'") + + docker exec $SAMBA_CONTAINER \ + bash -c "printf \"%b\" \"addent -password -p \"${username}@${REALM_NAME}\" -k ${user_kvno} -e rc4-hmac\n${password}\nwrite_kt ${username}.keytab\" | ktutil" + + docker exec $SAMBA_CONTAINER \ + bash -c "printf \"%b\" \"read_kt ${username}.keytab\nlist\" | ktutil" + + docker exec $SAMBA_CONTAINER \ + base64 ${username}.keytab > ${TESTS_DIR}/integration/${username}.keytab.base64 + + docker cp $SAMBA_CONTAINER:/${username}.keytab ${TESTS_DIR}/integration/ +} + +function main() { + # make and start vault + make dev + vault server -dev -dev-root-token-id=root & + + # start our domain controller + SAMBA_CONTAINER=$(docker run --net=${DNS_NAME} -d -ti --privileged -e "SAMBA_DC_ADMIN_PASSWD=${DOMAIN_ADMIN_PASS}" -e "KERBEROS_PASSWORD=${DOMAIN_ADMIN_PASS}" -e SAMBA_DC_DOMAIN=${DOMAIN_NAME} -e SAMBA_DC_REALM=${REALM_NAME} "bodsch/docker-samba4:${SAMBA_VER}") + sleep 15 + + # set up users + add_user $DOMAIN_VAULT_ACCOUNT $DOMAIN_VAULT_PASS + create_keytab $DOMAIN_VAULT_ACCOUNT $DOMAIN_VAULT_PASS + + add_user $DOMAIN_USER_ACCOUNT $DOMAIN_USER_PASS + create_keytab $DOMAIN_USER_ACCOUNT $DOMAIN_USER_PASS + + # add the service principals we'll need + docker exec $SAMBA_CONTAINER \ + samba-tool spn add HTTP/localhost ${DOMAIN_VAULT_ACCOUNT} --configfile=${SAMBA_CONF_FILE} + docker exec $SAMBA_CONTAINER \ + samba-tool spn add HTTP/localhost:${VAULT_PORT} ${DOMAIN_VAULT_ACCOUNT} --configfile=${SAMBA_CONF_FILE} + docker exec $SAMBA_CONTAINER \ + samba-tool spn add HTTP/localhost.${DNS_NAME} ${DOMAIN_VAULT_ACCOUNT} --configfile=${SAMBA_CONF_FILE} + docker exec $SAMBA_CONTAINER \ + samba-tool spn add HTTP/localhost.${DNS_NAME}:${VAULT_PORT} ${DOMAIN_VAULT_ACCOUNT} --configfile=${SAMBA_CONF_FILE} + + # enable and configure the kerberos plugin in Vault + vault auth enable -passthrough-request-headers=Authorization -allowed-response-headers=www-authenticate kerberos + vault write auth/kerberos/config keytab=@${TESTS_DIR}/integration/vault_svc.keytab.base64 service_account="vault_svc" + vault write auth/kerberos/config/ldap binddn=${DOMAIN_VAULT_ACCOUNT}@${REALM_NAME} bindpass=${DOMAIN_VAULT_PASS} groupattr=sAMAccountName groupdn="${DOMAIN_DN}" groupfilter="(&(objectClass=group)(member:1.2.840.113556.1.4.1941:={{.UserDN}}))" insecure_tls=true starttls=true userdn="CN=Users,${DOMAIN_DN}" userattr=sAMAccountName upndomain=${REALM_NAME} url=ldaps://localhost:636 + + mkdir -p ${TESTS_DIR}/integration + + echo " +[libdefaults] + default_realm = ${REALM_NAME} + dns_lookup_realm = false + dns_lookup_kdc = true + ticket_lifetime = 24h + renew_lifetime = 7d + forwardable = true + rdns = false + preferred_preauth_types = 23 +[realms] + ${REALM_NAME} = { + kdc = localhost + admin_server = localhost + master_kdc = localhost + default_domain = localhost + } +" > ${TESTS_DIR}/integration/krb5.conf + + echo " +auto_auth { + method \"kerberos\" { + mount_path = \"auth/kerberos\" + config = { + username = \"$DOMAIN_USER_ACCOUNT\" + service = \"HTTP/localhost:8200\" + realm = \"$REALM_NAME\" + keytab_path = \"$TESTS_DIR/integration/grace.keytab\" + krb5conf_path = \"$TESTS_DIR/integration/krb5.conf\" + } + } + sink \"file\" { + config = { + path = \"$TESTS_DIR/integration/agent-token.txt\" + } + } +} +" > ${TESTS_DIR}/integration/agent.conf + + vault agent -config=${TESTS_DIR}/integration/agent.conf & + sleep 10 + token=$(cat $TESTS_DIR/integration/agent-token.txt) + + # clean up: kill vault and stop the docker container we started + kill -9 $(ps aux | grep vault | awk '{print $2}' | head -1) # kill vault server + kill -9 $(ps aux | grep vault | awk '{print $2}' | head -1) # kill vault agent + docker rm -f ${SAMBA_CONTAINER} + + # a valid Vault token starts with "s.", check for that + if [[ $token != s.* ]]; then + echo "received invalid token: $token" + return 1 + fi + + echo "vault kerberos agent obtained auth token: $token" + echo "exiting successfully!" + return 0 +} +main diff --git a/command/agentproxyshared/auth/kerberos/kerberos.go b/command/agentproxyshared/auth/kerberos/kerberos.go new file mode 100644 index 0000000..67a3109 --- /dev/null +++ b/command/agentproxyshared/auth/kerberos/kerberos.go @@ -0,0 +1,106 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package kerberos + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-secure-stdlib/parseutil" + kerberos "github.com/hashicorp/vault-plugin-auth-kerberos" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + "github.com/jcmturner/gokrb5/v8/spnego" +) + +type kerberosMethod struct { + logger hclog.Logger + mountPath string + loginCfg *kerberos.LoginCfg +} + +func NewKerberosAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) { + if conf == nil { + return nil, errors.New("empty config") + } + if conf.Config == nil { + return nil, errors.New("empty config data") + } + username, err := read("username", conf.Config) + if err != nil { + return nil, err + } + service, err := read("service", conf.Config) + if err != nil { + return nil, err + } + realm, err := read("realm", conf.Config) + if err != nil { + return nil, err + } + keytabPath, err := read("keytab_path", conf.Config) + if err != nil { + return nil, err + } + krb5ConfPath, err := read("krb5conf_path", conf.Config) + if err != nil { + return nil, err + } + + disableFast := false + disableFastRaw, ok := conf.Config["disable_fast_negotiation"] + if ok { + disableFast, err = parseutil.ParseBool(disableFastRaw) + if err != nil { + return nil, fmt.Errorf("error parsing 'disable_fast_negotiation': %s", err) + } + } + + return &kerberosMethod{ + logger: conf.Logger, + mountPath: conf.MountPath, + loginCfg: &kerberos.LoginCfg{ + Username: username, + Service: service, + Realm: realm, + KeytabPath: keytabPath, + Krb5ConfPath: krb5ConfPath, + DisableFASTNegotiation: disableFast, + }, + }, nil +} + +func (k *kerberosMethod) Authenticate(context.Context, *api.Client) (string, http.Header, map[string]interface{}, error) { + k.logger.Trace("beginning authentication") + authHeaderVal, err := kerberos.GetAuthHeaderVal(k.loginCfg) + if err != nil { + return "", nil, nil, err + } + var header http.Header + header = make(map[string][]string) + header.Set(spnego.HTTPHeaderAuthRequest, authHeaderVal) + return k.mountPath + "/login", header, make(map[string]interface{}), nil +} + +// These functions are implemented to meet the AuthHandler interface, +// but we don't need to take advantage of them. +func (k *kerberosMethod) NewCreds() chan struct{} { return nil } +func (k *kerberosMethod) CredSuccess() {} +func (k *kerberosMethod) Shutdown() {} + +// read reads a key from a map and convert its value to a string. +func read(key string, m map[string]interface{}) (string, error) { + raw, ok := m[key] + if !ok { + return "", fmt.Errorf("%q is required", key) + } + v, ok := raw.(string) + if !ok { + return "", fmt.Errorf("%q must be a string", key) + } + return v, nil +} diff --git a/command/agentproxyshared/auth/kerberos/kerberos_test.go b/command/agentproxyshared/auth/kerberos/kerberos_test.go new file mode 100644 index 0000000..070893d --- /dev/null +++ b/command/agentproxyshared/auth/kerberos/kerberos_test.go @@ -0,0 +1,87 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package kerberos + +import ( + "testing" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/command/agentproxyshared/auth" +) + +func TestNewKerberosAuthMethod(t *testing.T) { + if _, err := NewKerberosAuthMethod(nil); err == nil { + t.Fatal("err should be returned for nil input") + } + if _, err := NewKerberosAuthMethod(&auth.AuthConfig{}); err == nil { + t.Fatal("err should be returned for nil config map") + } + + authConfig := simpleAuthConfig() + delete(authConfig.Config, "username") + if _, err := NewKerberosAuthMethod(authConfig); err == nil { + t.Fatal("err should be returned for missing username") + } + + authConfig = simpleAuthConfig() + delete(authConfig.Config, "service") + if _, err := NewKerberosAuthMethod(authConfig); err == nil { + t.Fatal("err should be returned for missing service") + } + + authConfig = simpleAuthConfig() + delete(authConfig.Config, "realm") + if _, err := NewKerberosAuthMethod(authConfig); err == nil { + t.Fatal("err should be returned for missing realm") + } + + authConfig = simpleAuthConfig() + delete(authConfig.Config, "keytab_path") + if _, err := NewKerberosAuthMethod(authConfig); err == nil { + t.Fatal("err should be returned for missing keytab_path") + } + + authConfig = simpleAuthConfig() + delete(authConfig.Config, "krb5conf_path") + if _, err := NewKerberosAuthMethod(authConfig); err == nil { + t.Fatal("err should be returned for missing krb5conf_path") + } + + authConfig = simpleAuthConfig() + authMethod, err := NewKerberosAuthMethod(authConfig) + if err != nil { + t.Fatal(err) + } + + // False by default + if actual := authMethod.(*kerberosMethod).loginCfg.DisableFASTNegotiation; actual { + t.Fatalf("disable_fast_negotation should be false, it wasn't: %t", actual) + } + + authConfig.Config["disable_fast_negotiation"] = "true" + authMethod, err = NewKerberosAuthMethod(authConfig) + if err != nil { + t.Fatal(err) + } + + // True from override + if actual := authMethod.(*kerberosMethod).loginCfg.DisableFASTNegotiation; !actual { + t.Fatalf("disable_fast_negotation should be true, it wasn't: %t", actual) + } +} + +func simpleAuthConfig() *auth.AuthConfig { + return &auth.AuthConfig{ + Logger: hclog.NewNullLogger(), + MountPath: "kerberos", + WrapTTL: 20, + Config: map[string]interface{}{ + "username": "grace", + "service": "HTTP/05a65fad28ef.matrix.lan:8200", + "realm": "MATRIX.LAN", + "keytab_path": "grace.keytab", + "krb5conf_path": "krb5.conf", + }, + } +} diff --git a/command/agentproxyshared/auth/kubernetes/kubernetes.go b/command/agentproxyshared/auth/kubernetes/kubernetes.go new file mode 100644 index 0000000..acbb8c0 --- /dev/null +++ b/command/agentproxyshared/auth/kubernetes/kubernetes.go @@ -0,0 +1,132 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package kubernetes + +import ( + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "strings" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/auth" +) + +const ( + serviceAccountFile = "/var/run/secrets/kubernetes.io/serviceaccount/token" +) + +type kubernetesMethod struct { + logger hclog.Logger + mountPath string + + role string + + // tokenPath is an optional path to a projected service account token inside + // the pod, for use instead of the default service account token. + tokenPath string + + // jwtData is a ReadCloser used to inject a ReadCloser for mocking tests. + jwtData io.ReadCloser +} + +// NewKubernetesAuthMethod reads the user configuration and returns a configured +// AuthMethod +func NewKubernetesAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) { + if conf == nil { + return nil, errors.New("empty config") + } + if conf.Config == nil { + return nil, errors.New("empty config data") + } + + k := &kubernetesMethod{ + logger: conf.Logger, + mountPath: conf.MountPath, + } + + roleRaw, ok := conf.Config["role"] + if !ok { + return nil, errors.New("missing 'role' value") + } + k.role, ok = roleRaw.(string) + if !ok { + return nil, errors.New("could not convert 'role' config value to string") + } + + tokenPathRaw, ok := conf.Config["token_path"] + if ok { + k.tokenPath, ok = tokenPathRaw.(string) + if !ok { + return nil, errors.New("could not convert 'token_path' config value to string") + } + } + + if k.role == "" { + return nil, errors.New("'role' value is empty") + } + + return k, nil +} + +func (k *kubernetesMethod) Authenticate(ctx context.Context, client *api.Client) (string, http.Header, map[string]interface{}, error) { + k.logger.Trace("beginning authentication") + + jwtString, err := k.readJWT() + if err != nil { + return "", nil, nil, fmt.Errorf("error reading JWT with Kubernetes Auth: %w", err) + } + + return fmt.Sprintf("%s/login", k.mountPath), nil, map[string]interface{}{ + "role": k.role, + "jwt": jwtString, + }, nil +} + +func (k *kubernetesMethod) NewCreds() chan struct{} { + return nil +} + +func (k *kubernetesMethod) CredSuccess() { +} + +func (k *kubernetesMethod) Shutdown() { +} + +// readJWT reads the JWT data for the Agent to submit to Vault. The default is +// to read the JWT from the default service account location, defined by the +// constant serviceAccountFile. In normal use k.jwtData is nil at invocation and +// the method falls back to reading the token path with os.Open, opening a file +// from either the default location or from the token_path path specified in +// configuration. +func (k *kubernetesMethod) readJWT() (string, error) { + // load configured token path if set, default to serviceAccountFile + tokenFilePath := serviceAccountFile + if k.tokenPath != "" { + tokenFilePath = k.tokenPath + } + + data := k.jwtData + // k.jwtData should only be non-nil in tests + if data == nil { + f, err := os.Open(tokenFilePath) + if err != nil { + return "", err + } + data = f + } + defer data.Close() + + contentBytes, err := ioutil.ReadAll(data) + if err != nil { + return "", err + } + + return strings.TrimSpace(string(contentBytes)), nil +} diff --git a/command/agentproxyshared/auth/kubernetes/kubernetes_test.go b/command/agentproxyshared/auth/kubernetes/kubernetes_test.go new file mode 100644 index 0000000..cbf6170 --- /dev/null +++ b/command/agentproxyshared/auth/kubernetes/kubernetes_test.go @@ -0,0 +1,123 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package kubernetes + +import ( + "bytes" + "context" + "errors" + "io" + "testing" + + "github.com/hashicorp/errwrap" + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + "github.com/hashicorp/vault/sdk/helper/logging" +) + +func TestKubernetesAuth_basic(t *testing.T) { + testCases := map[string]struct { + tokenPath string + data *mockJWTFile + e error + }{ + "normal": { + data: newMockJWTFile(jwtData), + }, + "projected": { + tokenPath: "/some/other/path", + data: newMockJWTFile(jwtProjectedData), + }, + "not_found": { + e: errors.New("open /var/run/secrets/kubernetes.io/serviceaccount/token: no such file or directory"), + }, + "projected_not_found": { + tokenPath: "/some/other/path", + e: errors.New("open /some/other/path: no such file or directory"), + }, + } + + for k, tc := range testCases { + t.Run(k, func(t *testing.T) { + authCfg := auth.AuthConfig{ + Logger: logging.NewVaultLogger(hclog.Trace), + MountPath: "kubernetes", + Config: map[string]interface{}{ + "role": "plugin-test", + }, + } + + if tc.tokenPath != "" { + authCfg.Config["token_path"] = tc.tokenPath + } + + a, err := NewKubernetesAuthMethod(&authCfg) + if err != nil { + t.Fatal(err) + } + + // Type assert to set the kubernetesMethod jwtData, to mock out reading + // files from the pod. + k := a.(*kubernetesMethod) + if tc.data != nil { + k.jwtData = tc.data + } + + _, _, data, err := k.Authenticate(context.Background(), nil) + if err != nil && tc.e == nil { + t.Fatal(err) + } + + if err != nil && !errwrap.Contains(err, tc.e.Error()) { + t.Fatalf("expected \"no such file\" error, got: (%s)", err) + } + + if err == nil && tc.e != nil { + t.Fatal("expected error, but got none") + } + + if tc.e == nil { + authJWTraw, ok := data["jwt"] + if !ok { + t.Fatal("expected to find jwt data") + } + + authJWT := authJWTraw.(string) + token := jwtData + if tc.tokenPath != "" { + token = jwtProjectedData + } + if authJWT != token { + t.Fatalf("error with auth tokens, expected (%s) got (%s)", token, authJWT) + } + } + }) + } +} + +// jwt for default service account +var jwtData = "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJkZWZhdWx0Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6InZhdWx0LWF1dGgtdG9rZW4tdDVwY24iLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoidmF1bHQtYXV0aCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6ImQ3N2Y4OWJjLTkwNTUtMTFlNy1hMDY4LTA4MDAyNzZkOTliZiIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDpkZWZhdWx0OnZhdWx0LWF1dGgifQ.HKUcqgrvan5ZC_mnpaMEx4RW3KrhfyH_u8G_IA2vUfkLK8tH3T7fJuJaPr7W6K_BqCrbeM5y3owszOzb4NR0Lvw6GBt2cFcen2x1Ua4Wokr0bJjTT7xQOIOw7UvUDyVS17wAurlfUnmWMwMMMOebpqj5K1t6GnyqghH1wPdHYRGX-q5a6C323dBCgM5t6JY_zTTaBgM6EkFq0poBaifmSMiJRPrdUN_-IgyK8fgQRiFYYkgS6DMIU4k4nUOb_sUFf5xb8vMs3SMteKiuWFAIt4iszXTj5IyBUNqe0cXA3zSY3QiNCV6bJ2CWW0Qf9WDtniT79VAqcR4GYaTC_gxjNA" + +// jwt for projected service account +var jwtProjectedData = "eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJhdWQiOlsia3ViZXJuZXRlcy5kZWZhdWx0LnN2YyJdLCJleHAiOjE2MDMwNTM1NjMsImlhdCI6MTUzOTk4MTU2MywiaXNzIjoia3ViZXJuZXRlcy9zZXJ2aWNlYWNjb3VudCIsImt1YmVybmV0ZXMuaW8iOnsibmFtZXNwYWNlIjoiZGVmYXVsdCIsInBvZCI6eyJuYW1lIjoidmF1bHQiLCJ1aWQiOiIxMDA2YTA2Yy1kM2RmLTExZTgtOGZlMi0wODAwMjdlNTVlYTgifSwic2VydmljZWFjY291bnQiOnsibmFtZSI6ImRlZmF1bHQiLCJ1aWQiOiJiMzg5YjNiMi1kMzAyLTExZTgtYjE0Yy0wODAwMjdlNTVlYTgifX0sIm5iZiI6MTUzOTk4MTU2Mywic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50OmRlZmF1bHQ6ZGVmYXVsdCJ9.byu3BpCbs0tzQvEBCRTayXF3-kV1Ey7YvStBcCwovfSl6evBze43FFaDps78HtdDAMszjE_yn55_1BMN87EzOZYsF3GBoPLWxkofxhPIy88wmPTpurBsSx-nCKdjf4ayXhTpqGG9gy0xlkUc_xL4pM3Q8XZiqYqwq_T0PHXOpSfdzVy1oabFSZXr5QTZ377v8bvrMgAVWJF_4vZsSMG3XVCK8KBWNRw4_wt6yOelVKE5OGLPJvNu1CFjEKh4HBFBcQnB_Sgpe1nPlnm5utp-1-OVfd7zopOGDAp_Pk_Apu8OPDdPSafn6HpzIeuhMtWXcv1K8ZhZYDLC1wLywZPNyw" + +// mockJWTFile provides a mock ReadCloser struct to inject into +// kubernetesMethod.jwtData +type mockJWTFile struct { + b *bytes.Buffer +} + +var _ io.ReadCloser = &mockJWTFile{} + +func (j *mockJWTFile) Read(p []byte) (n int, err error) { + return j.b.Read(p) +} + +func (j *mockJWTFile) Close() error { return nil } + +func newMockJWTFile(s string) *mockJWTFile { + return &mockJWTFile{ + b: bytes.NewBufferString(s), + } +} diff --git a/command/agentproxyshared/auth/oci/oci.go b/command/agentproxyshared/auth/oci/oci.go new file mode 100644 index 0000000..ec4d9eb --- /dev/null +++ b/command/agentproxyshared/auth/oci/oci.go @@ -0,0 +1,265 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package oci + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "os/user" + "path" + "sync" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + "github.com/oracle/oci-go-sdk/common" + ociAuth "github.com/oracle/oci-go-sdk/common/auth" +) + +const ( + typeAPIKey = "apikey" + typeInstance = "instance" + + /* + + IAM creds can be inferred from instance metadata or the container + identity service, and those creds expire at varying intervals with + new creds becoming available at likewise varying intervals. Let's + default to polling once a minute so all changes can be picked up + rather quickly. This is configurable, however. + + */ + defaultCredCheckFreqSeconds = 60 * time.Second + + defaultConfigFileName = "config" + defaultConfigDirName = ".oci" + configFilePathEnvVarName = "OCI_CONFIG_FILE" + secondaryConfigDirName = ".oraclebmc" +) + +func NewOCIAuthMethod(conf *auth.AuthConfig, vaultAddress string) (auth.AuthMethod, error) { + if conf == nil { + return nil, errors.New("empty config") + } + if conf.Config == nil { + return nil, errors.New("empty config data") + } + + a := &ociMethod{ + logger: conf.Logger, + vaultAddress: vaultAddress, + mountPath: conf.MountPath, + credsFound: make(chan struct{}), + stopCh: make(chan struct{}), + } + + typeRaw, ok := conf.Config["type"] + if !ok { + return nil, errors.New("missing 'type' value") + } + authType, ok := typeRaw.(string) + if !ok { + return nil, errors.New("could not convert 'type' config value to string") + } + + roleRaw, ok := conf.Config["role"] + if !ok { + return nil, errors.New("missing 'role' value") + } + a.role, ok = roleRaw.(string) + if !ok { + return nil, errors.New("could not convert 'role' config value to string") + } + + // Check for an optional custom frequency at which we should poll for creds. + credCheckFreqSec := defaultCredCheckFreqSeconds + if checkFreqRaw, ok := conf.Config["credential_poll_interval"]; ok { + checkFreq, err := parseutil.ParseDurationSecond(checkFreqRaw) + if err != nil { + return nil, fmt.Errorf("could not parse credential_poll_interval: %v", err) + } + credCheckFreqSec = checkFreq + } + + switch { + case a.role == "": + return nil, errors.New("'role' value is empty") + case authType == "": + return nil, errors.New("'type' value is empty") + case authType != typeAPIKey && authType != typeInstance: + return nil, errors.New("'type' value is invalid") + case authType == typeAPIKey: + defaultConfigFile := getDefaultConfigFilePath() + homeFolder := getHomeFolder() + secondaryConfigFile := path.Join(homeFolder, secondaryConfigDirName, defaultConfigFileName) + + environmentProvider := common.ConfigurationProviderEnvironmentVariables("OCI", "") + defaultFileProvider, _ := common.ConfigurationProviderFromFile(defaultConfigFile, "") + secondaryFileProvider, _ := common.ConfigurationProviderFromFile(secondaryConfigFile, "") + + provider, _ := common.ComposingConfigurationProvider([]common.ConfigurationProvider{environmentProvider, defaultFileProvider, secondaryFileProvider}) + a.configurationProvider = provider + case authType == typeInstance: + configurationProvider, err := ociAuth.InstancePrincipalConfigurationProvider() + if err != nil { + return nil, fmt.Errorf("failed to create instance principal configuration provider: %v", err) + } + a.configurationProvider = configurationProvider + } + + // Do an initial population of the creds because we want to err right away if we can't + // even get a first set. + creds, err := a.configurationProvider.KeyID() + if err != nil { + return nil, err + } + a.lastCreds = creds + + go a.pollForCreds(credCheckFreqSec) + + return a, nil +} + +type ociMethod struct { + logger hclog.Logger + vaultAddress string + mountPath string + + configurationProvider common.ConfigurationProvider + role string + + // These are used to share the latest creds safely across goroutines. + credLock sync.Mutex + lastCreds string + + // Notifies the outer environment that it should call Authenticate again. + credsFound chan struct{} + + // Detects that the outer environment is closing. + stopCh chan struct{} +} + +func (a *ociMethod) Authenticate(context.Context, *api.Client) (string, http.Header, map[string]interface{}, error) { + a.credLock.Lock() + defer a.credLock.Unlock() + + a.logger.Trace("beginning authentication") + + requestPath := fmt.Sprintf("/v1/%s/login/%s", a.mountPath, a.role) + requestURL := fmt.Sprintf("%s%s", a.vaultAddress, requestPath) + + request, err := http.NewRequest("GET", requestURL, nil) + if err != nil { + return "", nil, nil, fmt.Errorf("error creating authentication request: %w", err) + } + + request.Header.Set("Date", time.Now().UTC().Format(http.TimeFormat)) + + signer := common.DefaultRequestSigner(a.configurationProvider) + + err = signer.Sign(request) + + if err != nil { + return "", nil, nil, fmt.Errorf("error signing authentication request: %w", err) + } + + parsedVaultAddress, err := url.Parse(a.vaultAddress) + if err != nil { + return "", nil, nil, fmt.Errorf("unable to parse vault address: %w", err) + } + + request.Header.Set("Host", parsedVaultAddress.Host) + request.Header.Set("(request-target)", fmt.Sprintf("%s %s", "get", requestPath)) + + data := map[string]interface{}{ + "request_headers": request.Header, + } + + return fmt.Sprintf("%s/login/%s", a.mountPath, a.role), nil, data, nil +} + +func (a *ociMethod) NewCreds() chan struct{} { + return a.credsFound +} + +func (a *ociMethod) CredSuccess() {} + +func (a *ociMethod) Shutdown() { + close(a.credsFound) + close(a.stopCh) +} + +func (a *ociMethod) pollForCreds(frequency time.Duration) { + ticker := time.NewTicker(frequency) + defer ticker.Stop() + for { + select { + case <-a.stopCh: + a.logger.Trace("shutdown triggered, stopping OCI auth handler") + return + case <-ticker.C: + if err := a.checkCreds(); err != nil { + a.logger.Warn("unable to retrieve current creds, retaining last creds", "error", err) + } + } + } +} + +func (a *ociMethod) checkCreds() error { + a.credLock.Lock() + defer a.credLock.Unlock() + + a.logger.Trace("checking for new credentials") + currentCreds, err := a.configurationProvider.KeyID() + if err != nil { + return err + } + // These will always have different pointers regardless of whether their + // values are identical, hence the use of DeepEqual. + if currentCreds == a.lastCreds { + a.logger.Trace("credentials are unchanged") + return nil + } + a.lastCreds = currentCreds + a.logger.Trace("new credentials detected, triggering Authenticate") + a.credsFound <- struct{}{} + return nil +} + +func getHomeFolder() string { + current, e := user.Current() + if e != nil { + // Give up and try to return something sensible + home, err := os.UserHomeDir() + if err != nil { + return "" + } + return home + } + return current.HomeDir +} + +func getDefaultConfigFilePath() string { + homeFolder := getHomeFolder() + defaultConfigFile := path.Join(homeFolder, defaultConfigDirName, defaultConfigFileName) + if _, err := os.Stat(defaultConfigFile); err == nil { + return defaultConfigFile + } + + // Read configuration file path from OCI_CONFIG_FILE env var + fallbackConfigFile, existed := os.LookupEnv(configFilePathEnvVarName) + if !existed { + return defaultConfigFile + } + if _, err := os.Stat(fallbackConfigFile); os.IsNotExist(err) { + return defaultConfigFile + } + return fallbackConfigFile +} diff --git a/command/agentproxyshared/auth/token-file/token_file.go b/command/agentproxyshared/auth/token-file/token_file.go new file mode 100644 index 0000000..4c7eaa2 --- /dev/null +++ b/command/agentproxyshared/auth/token-file/token_file.go @@ -0,0 +1,86 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package token_file + +import ( + "context" + "errors" + "fmt" + "net/http" + "os" + "strings" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/auth" +) + +type tokenFileMethod struct { + logger hclog.Logger + mountPath string + + cachedToken string + tokenFilePath string +} + +func NewTokenFileAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) { + if conf == nil { + return nil, errors.New("empty config") + } + if conf.Config == nil { + return nil, errors.New("empty config data") + } + + a := &tokenFileMethod{ + logger: conf.Logger, + mountPath: "auth/token", + } + + tokenFilePathRaw, ok := conf.Config["token_file_path"] + if !ok { + return nil, errors.New("missing 'token_file_path' value") + } + a.tokenFilePath, ok = tokenFilePathRaw.(string) + if !ok { + return nil, errors.New("could not convert 'token_file_path' config value to string") + } + if a.tokenFilePath == "" { + return nil, errors.New("'token_file_path' value is empty") + } + + return a, nil +} + +func (a *tokenFileMethod) Authenticate(ctx context.Context, client *api.Client) (string, http.Header, map[string]interface{}, error) { + token, err := os.ReadFile(a.tokenFilePath) + if err != nil { + if a.cachedToken == "" { + return "", nil, nil, fmt.Errorf("error reading token file and no cached token known: %w", err) + } + a.logger.Warn("error reading token file", "error", err) + } + if len(token) == 0 { + if a.cachedToken == "" { + return "", nil, nil, errors.New("token file empty and no cached token known") + } + a.logger.Warn("token file exists but read empty value, re-using cached value") + } else { + a.cachedToken = strings.TrimSpace(string(token)) + } + + // i.e. auth/token/lookup-self + return fmt.Sprintf("%s/lookup-self", a.mountPath), nil, map[string]interface{}{ + "token": a.cachedToken, + }, nil +} + +func (a *tokenFileMethod) NewCreds() chan struct{} { + return nil +} + +func (a *tokenFileMethod) CredSuccess() { +} + +func (a *tokenFileMethod) Shutdown() { +} diff --git a/command/agentproxyshared/auth/token-file/token_file_test.go b/command/agentproxyshared/auth/token-file/token_file_test.go new file mode 100644 index 0000000..eb89fc0 --- /dev/null +++ b/command/agentproxyshared/auth/token-file/token_file_test.go @@ -0,0 +1,84 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package token_file + +import ( + "os" + "path/filepath" + "testing" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + "github.com/hashicorp/vault/sdk/helper/logging" +) + +func TestNewTokenFileAuthMethodEmptyConfig(t *testing.T) { + logger := logging.NewVaultLogger(log.Trace) + _, err := NewTokenFileAuthMethod(&auth.AuthConfig{ + Logger: logger.Named("auth.method"), + Config: map[string]interface{}{}, + }) + if err == nil { + t.Fatal("Expected error due to empty config") + } +} + +func TestNewTokenFileEmptyFilePath(t *testing.T) { + logger := logging.NewVaultLogger(log.Trace) + _, err := NewTokenFileAuthMethod(&auth.AuthConfig{ + Logger: logger.Named("auth.method"), + Config: map[string]interface{}{ + "token_file_path": "", + }, + }) + if err == nil { + t.Fatalf("Expected error when giving empty file path") + } +} + +func TestNewTokenFileAuthenticate(t *testing.T) { + tokenFile, err := os.Create(filepath.Join(t.TempDir(), "token_file")) + tokenFileContents := "super-secret-token" + if err != nil { + t.Fatal(err) + } + tokenFileName := tokenFile.Name() + tokenFile.Close() // WriteFile doesn't need it open + os.WriteFile(tokenFileName, []byte(tokenFileContents), 0o666) + defer os.Remove(tokenFileName) + + logger := logging.NewVaultLogger(log.Trace) + am, err := NewTokenFileAuthMethod(&auth.AuthConfig{ + Logger: logger.Named("auth.method"), + Config: map[string]interface{}{ + "token_file_path": tokenFileName, + }, + }) + if err != nil { + t.Fatal(err) + } + + path, headers, data, err := am.Authenticate(nil, nil) + if err != nil { + t.Fatal(err) + } + if path != "auth/token/lookup-self" { + t.Fatalf("Incorrect path, was %s", path) + } + if headers != nil { + t.Fatalf("Expected no headers, instead got %v", headers) + } + if data == nil { + t.Fatal("Data was nil") + } + tokenDataFromAuthMethod := data["token"].(string) + if tokenDataFromAuthMethod != tokenFileContents { + t.Fatalf("Incorrect token file contents return by auth method, expected %s, got %s", tokenFileContents, tokenDataFromAuthMethod) + } + + _, err = os.Stat(tokenFileName) + if err != nil { + t.Fatal("Token file removed") + } +} diff --git a/command/agentproxyshared/cache/api_proxy.go b/command/agentproxyshared/cache/api_proxy.go new file mode 100644 index 0000000..e03bc15 --- /dev/null +++ b/command/agentproxyshared/cache/api_proxy.go @@ -0,0 +1,174 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cache + +import ( + "context" + "fmt" + gohttp "net/http" + "sync" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-retryablehttp" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/http" +) + +type EnforceConsistency int + +const ( + EnforceConsistencyNever EnforceConsistency = iota + EnforceConsistencyAlways +) + +type WhenInconsistentAction int + +const ( + WhenInconsistentFail WhenInconsistentAction = iota + WhenInconsistentRetry + WhenInconsistentForward +) + +// APIProxy is an implementation of the proxier interface that is used to +// forward the request to Vault and get the response. +type APIProxy struct { + client *api.Client + logger hclog.Logger + enforceConsistency EnforceConsistency + whenInconsistentAction WhenInconsistentAction + l sync.RWMutex + lastIndexStates []string + userAgentString string + userAgentStringFunction func(string) string +} + +var _ Proxier = &APIProxy{} + +type APIProxyConfig struct { + Client *api.Client + Logger hclog.Logger + EnforceConsistency EnforceConsistency + WhenInconsistentAction WhenInconsistentAction + // UserAgentString is used as the User Agent when the proxied client + // does not have a user agent of its own. + UserAgentString string + // UserAgentStringFunction is the function to transform the proxied client's + // user agent into one that includes Vault-specific information. + UserAgentStringFunction func(string) string +} + +func NewAPIProxy(config *APIProxyConfig) (Proxier, error) { + if config.Client == nil { + return nil, fmt.Errorf("nil API client") + } + return &APIProxy{ + client: config.Client, + logger: config.Logger, + enforceConsistency: config.EnforceConsistency, + whenInconsistentAction: config.WhenInconsistentAction, + userAgentString: config.UserAgentString, + userAgentStringFunction: config.UserAgentStringFunction, + }, nil +} + +func (ap *APIProxy) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) { + client, err := ap.client.Clone() + if err != nil { + return nil, err + } + client.SetToken(req.Token) + + // Derive and set a logger for the client + clientLogger := ap.logger.Named("client") + client.SetLogger(clientLogger) + + // http.Transport will transparently request gzip and decompress the response, but only if + // the client doesn't manually set the header. Removing any Accept-Encoding header allows the + // transparent compression to occur. + req.Request.Header.Del("Accept-Encoding") + + if req.Request.Header == nil { + req.Request.Header = make(gohttp.Header) + } + + // Set our User-Agent to be one indicating we are Vault Agent's API proxy. + // If the sending client had one, preserve it. + if req.Request.Header.Get("User-Agent") != "" { + initialUserAgent := req.Request.Header.Get("User-Agent") + req.Request.Header.Set("User-Agent", ap.userAgentStringFunction(initialUserAgent)) + } else { + req.Request.Header.Set("User-Agent", ap.userAgentString) + } + + client.SetHeaders(req.Request.Header) + + fwReq := client.NewRequest(req.Request.Method, req.Request.URL.Path) + fwReq.BodyBytes = req.RequestBody + + query := req.Request.URL.Query() + if len(query) != 0 { + fwReq.Params = query + } + + var newState string + manageState := ap.enforceConsistency == EnforceConsistencyAlways && + req.Request.Header.Get(http.VaultIndexHeaderName) == "" && + req.Request.Header.Get(http.VaultForwardHeaderName) == "" && + req.Request.Header.Get(http.VaultInconsistentHeaderName) == "" + + if manageState { + client = client.WithResponseCallbacks(api.RecordState(&newState)) + ap.l.RLock() + lastStates := ap.lastIndexStates + ap.l.RUnlock() + if len(lastStates) != 0 { + client = client.WithRequestCallbacks(api.RequireState(lastStates...)) + switch ap.whenInconsistentAction { + case WhenInconsistentFail: + // In this mode we want to delegate handling of inconsistency + // failures to the external client talking to Agent. + client.SetCheckRetry(retryablehttp.DefaultRetryPolicy) + case WhenInconsistentRetry: + // In this mode we want to handle retries due to inconsistency + // internally. This is the default api.Client behaviour so + // we needn't do anything. + case WhenInconsistentForward: + fwReq.Headers.Set(http.VaultInconsistentHeaderName, http.VaultInconsistentForward) + } + } + } + + // Make the request to Vault and get the response + ap.logger.Info("forwarding request to Vault", "method", req.Request.Method, "path", req.Request.URL.Path) + + resp, err := client.RawRequestWithContext(ctx, fwReq) + if resp == nil && err != nil { + // We don't want to cache nil responses, so we simply return the error + return nil, err + } + + if newState != "" { + ap.l.Lock() + // We want to be using the "newest" states seen, but newer isn't well + // defined here. There can be two states S1 and S2 which aren't strictly ordered: + // S1 could have a newer localindex and S2 could have a newer replicatedindex. So + // we need to merge them. But we can't merge them because we wouldn't be able to + // "sign" the resulting header because we don't have access to the HMAC key that + // Vault uses to do so. So instead we compare any of the 0-2 saved states + // we have to the new header, keeping the newest 1-2 of these, and sending + // them to Vault to evaluate. + ap.lastIndexStates = api.MergeReplicationStates(ap.lastIndexStates, newState) + ap.l.Unlock() + } + + // Before error checking from the request call, we'd want to initialize a SendResponse to + // potentially return + sendResponse, newErr := NewSendResponse(resp, nil) + if newErr != nil { + return nil, newErr + } + + // Bubble back the api.Response as well for error checking/handling at the handler layer. + return sendResponse, err +} diff --git a/command/agentproxyshared/cache/api_proxy_test.go b/command/agentproxyshared/cache/api_proxy_test.go new file mode 100644 index 0000000..6671b17 --- /dev/null +++ b/command/agentproxyshared/cache/api_proxy_test.go @@ -0,0 +1,339 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cache + +import ( + "context" + "fmt" + "net" + "net/http" + "os" + "testing" + "time" + + "github.com/hashicorp/vault/helper/useragent" + + "github.com/hashicorp/vault/builtin/credential/userpass" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/logging" +) + +const policyAdmin = ` +path "*" { + capabilities = ["sudo", "create", "read", "update", "delete", "list"] +} +` + +func TestAPIProxy(t *testing.T) { + cleanup, client, _, _ := setupClusterAndAgent(namespace.RootContext(nil), t, nil) + defer cleanup() + + proxier, err := NewAPIProxy(&APIProxyConfig{ + Client: client, + Logger: logging.NewVaultLogger(hclog.Trace), + UserAgentStringFunction: useragent.ProxyStringWithProxiedUserAgent, + UserAgentString: useragent.ProxyAPIProxyString(), + }) + if err != nil { + t.Fatal(err) + } + + r := client.NewRequest("GET", "/v1/sys/health") + req, err := r.ToHTTP() + if err != nil { + t.Fatal(err) + } + + resp, err := proxier.Send(namespace.RootContext(nil), &SendRequest{ + Request: req, + }) + if err != nil { + t.Fatal(err) + } + + var result api.HealthResponse + err = jsonutil.DecodeJSONFromReader(resp.Response.Body, &result) + if err != nil { + t.Fatal(err) + } + + if !result.Initialized || result.Sealed || result.Standby { + t.Fatalf("bad sys/health response: %#v", result) + } +} + +func TestAPIProxyNoCache(t *testing.T) { + cleanup, client, _, _ := setupClusterAndAgentNoCache(namespace.RootContext(nil), t, nil) + defer cleanup() + + proxier, err := NewAPIProxy(&APIProxyConfig{ + Client: client, + Logger: logging.NewVaultLogger(hclog.Trace), + UserAgentStringFunction: useragent.ProxyStringWithProxiedUserAgent, + UserAgentString: useragent.ProxyAPIProxyString(), + }) + if err != nil { + t.Fatal(err) + } + + r := client.NewRequest("GET", "/v1/sys/health") + req, err := r.ToHTTP() + if err != nil { + t.Fatal(err) + } + + resp, err := proxier.Send(namespace.RootContext(nil), &SendRequest{ + Request: req, + }) + if err != nil { + t.Fatal(err) + } + + var result api.HealthResponse + err = jsonutil.DecodeJSONFromReader(resp.Response.Body, &result) + if err != nil { + t.Fatal(err) + } + + if !result.Initialized || result.Sealed || result.Standby { + t.Fatalf("bad sys/health response: %#v", result) + } +} + +func TestAPIProxy_queryParams(t *testing.T) { + // Set up an agent that points to a standby node for this particular test + // since it needs to proxy a /sys/health?standbyok=true request to a standby + cleanup, client, _, _ := setupClusterAndAgentOnStandby(namespace.RootContext(nil), t, nil) + defer cleanup() + + proxier, err := NewAPIProxy(&APIProxyConfig{ + Client: client, + Logger: logging.NewVaultLogger(hclog.Trace), + UserAgentStringFunction: useragent.ProxyStringWithProxiedUserAgent, + UserAgentString: useragent.ProxyAPIProxyString(), + }) + if err != nil { + t.Fatal(err) + } + + r := client.NewRequest("GET", "/v1/sys/health") + req, err := r.ToHTTP() + if err != nil { + t.Fatal(err) + } + + // Add a query parameter for testing + q := req.URL.Query() + q.Add("standbyok", "true") + req.URL.RawQuery = q.Encode() + + resp, err := proxier.Send(namespace.RootContext(nil), &SendRequest{ + Request: req, + }) + if err != nil { + t.Fatal(err) + } + + var result api.HealthResponse + err = jsonutil.DecodeJSONFromReader(resp.Response.Body, &result) + if err != nil { + t.Fatal(err) + } + + if !result.Initialized || result.Sealed || !result.Standby { + t.Fatalf("bad sys/health response: %#v", result) + } + + if resp.Response.StatusCode != http.StatusOK { + t.Fatalf("exptected standby to return 200, got: %v", resp.Response.StatusCode) + } +} + +// setupClusterAndAgent is a helper func used to set up a test cluster and +// caching agent against the active node. It returns a cleanup func that should +// be deferred immediately along with two clients, one for direct cluster +// communication and another to talk to the caching agent. +func setupClusterAndAgent(ctx context.Context, t *testing.T, coreConfig *vault.CoreConfig) (func(), *api.Client, *api.Client, *LeaseCache) { + return setupClusterAndAgentCommon(ctx, t, coreConfig, false, true) +} + +// setupClusterAndAgentNoCache is a helper func used to set up a test cluster and +// proxying agent against the active node. It returns a cleanup func that should +// be deferred immediately along with two clients, one for direct cluster +// communication and another to talk to the caching agent. +func setupClusterAndAgentNoCache(ctx context.Context, t *testing.T, coreConfig *vault.CoreConfig) (func(), *api.Client, *api.Client, *LeaseCache) { + return setupClusterAndAgentCommon(ctx, t, coreConfig, false, false) +} + +// setupClusterAndAgentOnStandby is a helper func used to set up a test cluster +// and caching agent against a standby node. It returns a cleanup func that +// should be deferred immediately along with two clients, one for direct cluster +// communication and another to talk to the caching agent. +func setupClusterAndAgentOnStandby(ctx context.Context, t *testing.T, coreConfig *vault.CoreConfig) (func(), *api.Client, *api.Client, *LeaseCache) { + return setupClusterAndAgentCommon(ctx, t, coreConfig, true, true) +} + +func setupClusterAndAgentCommon(ctx context.Context, t *testing.T, coreConfig *vault.CoreConfig, onStandby bool, useCache bool) (func(), *api.Client, *api.Client, *LeaseCache) { + t.Helper() + + if ctx == nil { + ctx = context.Background() + } + + // Handle sane defaults + if coreConfig == nil { + coreConfig = &vault.CoreConfig{ + DisableMlock: true, + DisableCache: true, + Logger: logging.NewVaultLogger(hclog.Trace), + } + } + + // Always set up the userpass backend since we use that to generate an admin + // token for the client that will make proxied requests to through the agent. + if coreConfig.CredentialBackends == nil || coreConfig.CredentialBackends["userpass"] == nil { + coreConfig.CredentialBackends = map[string]logical.Factory{ + "userpass": userpass.Factory, + } + } + + // Init new test cluster + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + + cores := cluster.Cores + vault.TestWaitActive(t, cores[0].Core) + + activeClient := cores[0].Client + standbyClient := cores[1].Client + + // clienToUse is the client for the agent to point to. + clienToUse := activeClient + if onStandby { + clienToUse = standbyClient + } + + // Add an admin policy + if err := activeClient.Sys().PutPolicy("admin", policyAdmin); err != nil { + t.Fatal(err) + } + + // Set up the userpass auth backend and an admin user. Used for getting a token + // for the agent later down in this func. + err := activeClient.Sys().EnableAuthWithOptions("userpass", &api.EnableAuthOptions{ + Type: "userpass", + }) + if err != nil { + t.Fatal(err) + } + + _, err = activeClient.Logical().Write("auth/userpass/users/foo", map[string]interface{}{ + "password": "bar", + "policies": []string{"admin"}, + }) + if err != nil { + t.Fatal(err) + } + + // Set up env vars for agent consumption + origEnvVaultAddress := os.Getenv(api.EnvVaultAddress) + os.Setenv(api.EnvVaultAddress, clienToUse.Address()) + + origEnvVaultCACert := os.Getenv(api.EnvVaultCACert) + os.Setenv(api.EnvVaultCACert, fmt.Sprintf("%s/ca_cert.pem", cluster.TempDir)) + + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + + apiProxyLogger := logging.NewVaultLogger(hclog.Trace).Named("apiproxy") + + // Create the API proxier + apiProxy, err := NewAPIProxy(&APIProxyConfig{ + Client: clienToUse, + Logger: apiProxyLogger, + UserAgentStringFunction: useragent.ProxyStringWithProxiedUserAgent, + UserAgentString: useragent.ProxyAPIProxyString(), + }) + if err != nil { + t.Fatal(err) + } + + // Create a muxer and add paths relevant for the lease cache layer and API proxy layer + mux := http.NewServeMux() + + var leaseCache *LeaseCache + if useCache { + cacheLogger := logging.NewVaultLogger(hclog.Trace).Named("cache") + + // Create the lease cache proxier and set its underlying proxier to + // the API proxier. + leaseCache, err = NewLeaseCache(&LeaseCacheConfig{ + Client: clienToUse, + BaseContext: ctx, + Proxier: apiProxy, + Logger: cacheLogger.Named("leasecache"), + }) + if err != nil { + t.Fatal(err) + } + + mux.Handle("/agent/v1/cache-clear", leaseCache.HandleCacheClear(ctx)) + + mux.Handle("/", ProxyHandler(ctx, cacheLogger, leaseCache, nil, true)) + } else { + mux.Handle("/", ProxyHandler(ctx, apiProxyLogger, apiProxy, nil, true)) + } + + server := &http.Server{ + Handler: mux, + ReadHeaderTimeout: 10 * time.Second, + ReadTimeout: 30 * time.Second, + IdleTimeout: 5 * time.Minute, + ErrorLog: apiProxyLogger.StandardLogger(nil), + } + go server.Serve(listener) + + // testClient is the client that is used to talk to the agent for proxying/caching behavior. + testClient, err := activeClient.Clone() + if err != nil { + t.Fatal(err) + } + + if err := testClient.SetAddress("http://" + listener.Addr().String()); err != nil { + t.Fatal(err) + } + + // Login via userpass method to derive a managed token. Set that token as the + // testClient's token + resp, err := testClient.Logical().Write("auth/userpass/login/foo", map[string]interface{}{ + "password": "bar", + }) + if err != nil { + t.Fatal(err) + } + testClient.SetToken(resp.Auth.ClientToken) + + cleanup := func() { + // We wait for a tiny bit for things such as agent renewal to exit properly + time.Sleep(50 * time.Millisecond) + + cluster.Cleanup() + os.Setenv(api.EnvVaultAddress, origEnvVaultAddress) + os.Setenv(api.EnvVaultCACert, origEnvVaultCACert) + listener.Close() + } + + return cleanup, clienToUse, testClient, leaseCache +} diff --git a/command/agentproxyshared/cache/cache_test.go b/command/agentproxyshared/cache/cache_test.go new file mode 100644 index 0000000..4786950 --- /dev/null +++ b/command/agentproxyshared/cache/cache_test.go @@ -0,0 +1,1242 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cache + +import ( + "context" + "encoding/json" + "fmt" + "io" + "math/rand" + "net" + "net/http" + "sync" + "testing" + "time" + + "github.com/go-test/deep" + "github.com/hashicorp/go-hclog" + kv "github.com/hashicorp/vault-plugin-secrets-kv" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/cache/cachememdb" + "github.com/hashicorp/vault/command/agentproxyshared/sink/mock" + "github.com/hashicorp/vault/helper/namespace" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +func tokenRevocationValidation(t *testing.T, sampleSpace map[string]string, expected map[string]string, leaseCache *LeaseCache) { + t.Helper() + for val, valType := range sampleSpace { + index, err := leaseCache.db.Get(valType, val) + if err != nil { + t.Fatal(err) + } + if expected[val] == "" && index != nil { + t.Fatalf("failed to evict index from the cache: type: %q, value: %q", valType, val) + } + if expected[val] != "" && index == nil { + t.Fatalf("evicted an undesired index from cache: type: %q, value: %q", valType, val) + } + } +} + +func TestCache_AutoAuthTokenStripping(t *testing.T) { + response1 := `{"data": {"id": "testid", "accessor": "testaccessor", "request": "lookup-self"}}` + response2 := `{"data": {"id": "testid", "accessor": "testaccessor", "request": "lookup"}}` + response3 := `{"auth": {"client_token": "testid", "accessor": "testaccessor"}}` + response4 := `{"auth": {"client_token": "testid", "accessor": "testaccessor"}}` + responses := []*SendResponse{ + newTestSendResponse(http.StatusOK, response1), + newTestSendResponse(http.StatusOK, response2), + newTestSendResponse(http.StatusOK, response3), + newTestSendResponse(http.StatusOK, response4), + } + + leaseCache := testNewLeaseCache(t, responses) + + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + cores := cluster.Cores + vault.TestWaitActive(t, cores[0].Core) + client := cores[0].Client + + cacheLogger := logging.NewVaultLogger(hclog.Trace).Named("cache") + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + + ctx := namespace.RootContext(nil) + + // Create a muxer and add paths relevant for the lease cache layer + mux := http.NewServeMux() + mux.Handle(consts.AgentPathCacheClear, leaseCache.HandleCacheClear(ctx)) + + mux.Handle("/", ProxyHandler(ctx, cacheLogger, leaseCache, mock.NewSink("testid"), true)) + server := &http.Server{ + Handler: mux, + ReadHeaderTimeout: 10 * time.Second, + ReadTimeout: 30 * time.Second, + IdleTimeout: 5 * time.Minute, + ErrorLog: cacheLogger.StandardLogger(nil), + } + go server.Serve(listener) + + testClient, err := client.Clone() + if err != nil { + t.Fatal(err) + } + + if err := testClient.SetAddress("http://" + listener.Addr().String()); err != nil { + t.Fatal(err) + } + + // Empty the token in the client. Auto-auth token should be put to use. + testClient.SetToken("") + secret, err := testClient.Auth().Token().LookupSelf() + if err != nil { + t.Fatal(err) + } + if secret.Data["id"] != nil || secret.Data["accessor"] != nil || secret.Data["request"].(string) != "lookup-self" { + t.Fatalf("failed to strip off auto-auth token on lookup-self") + } + + secret, err = testClient.Auth().Token().Lookup("") + if err != nil { + t.Fatal(err) + } + if secret.Data["id"] != nil || secret.Data["accessor"] != nil || secret.Data["request"].(string) != "lookup" { + t.Fatalf("failed to strip off auto-auth token on lookup") + } + + secret, err = testClient.Auth().Token().RenewSelf(1) + if err != nil { + t.Fatal(err) + } + if secret.Auth == nil { + secretJson, _ := json.Marshal(secret) + t.Fatalf("Expected secret to have Auth but was %s", secretJson) + } + if secret.Auth.ClientToken != "" || secret.Auth.Accessor != "" { + t.Fatalf("failed to strip off auto-auth token on renew-self") + } + + secret, err = testClient.Auth().Token().Renew("testid", 1) + if err != nil { + t.Fatal(err) + } + if secret.Auth == nil { + secretJson, _ := json.Marshal(secret) + t.Fatalf("Expected secret to have Auth but was %s", secretJson) + } + if secret.Auth.ClientToken != "" || secret.Auth.Accessor != "" { + t.Fatalf("failed to strip off auto-auth token on renew") + } +} + +func TestCache_AutoAuthClientTokenProxyStripping(t *testing.T) { + leaseCache := &mockTokenVerifierProxier{} + dummyToken := "DUMMY" + realToken := "testid" + + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + cores := cluster.Cores + vault.TestWaitActive(t, cores[0].Core) + client := cores[0].Client + + cacheLogger := logging.NewVaultLogger(hclog.Trace).Named("cache") + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + + ctx := namespace.RootContext(nil) + + // Create a muxer and add paths relevant for the lease cache layer + mux := http.NewServeMux() + // mux.Handle(consts.AgentPathCacheClear, leaseCache.HandleCacheClear(ctx)) + + mux.Handle("/", ProxyHandler(ctx, cacheLogger, leaseCache, mock.NewSink(realToken), false)) + server := &http.Server{ + Handler: mux, + ReadHeaderTimeout: 10 * time.Second, + ReadTimeout: 30 * time.Second, + IdleTimeout: 5 * time.Minute, + ErrorLog: cacheLogger.StandardLogger(nil), + } + go server.Serve(listener) + + testClient, err := client.Clone() + if err != nil { + t.Fatal(err) + } + + if err := testClient.SetAddress("http://" + listener.Addr().String()); err != nil { + t.Fatal(err) + } + + // Empty the token in the client. Auto-auth token should be put to use. + testClient.SetToken(dummyToken) + _, err = testClient.Auth().Token().LookupSelf() + if err != nil { + t.Fatal(err) + } + if leaseCache.currentToken != realToken { + t.Fatalf("failed to use real token from auto-auth") + } +} + +func TestCache_ConcurrentRequests(t *testing.T) { + coreConfig := &vault.CoreConfig{ + DisableMlock: true, + DisableCache: true, + Logger: hclog.NewNullLogger(), + LogicalBackends: map[string]logical.Factory{ + "kv": vault.LeasedPassthroughBackendFactory, + }, + } + + cleanup, _, testClient, _ := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) + defer cleanup() + + err := testClient.Sys().Mount("kv", &api.MountInput{ + Type: "kv", + }) + if err != nil { + t.Fatal(err) + } + + wg := &sync.WaitGroup{} + for i := 0; i < 100; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + key := fmt.Sprintf("kv/foo/%d_%d", i, rand.Int()) + _, err := testClient.Logical().Write(key, map[string]interface{}{ + "key": key, + }) + if err != nil { + t.Fatal(err) + } + secret, err := testClient.Logical().Read(key) + if err != nil { + t.Fatal(err) + } + if secret == nil || secret.Data["key"].(string) != key { + t.Fatal(fmt.Sprintf("failed to read value for key: %q", key)) + } + }(i) + + } + wg.Wait() +} + +func TestCache_TokenRevocations_RevokeOrphan(t *testing.T) { + coreConfig := &vault.CoreConfig{ + DisableMlock: true, + DisableCache: true, + Logger: hclog.NewNullLogger(), + LogicalBackends: map[string]logical.Factory{ + "kv": vault.LeasedPassthroughBackendFactory, + }, + } + + sampleSpace := make(map[string]string) + + cleanup, _, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) + defer cleanup() + + token1 := testClient.Token() + sampleSpace[token1] = "token" + + // Mount the kv backend + err := testClient.Sys().Mount("kv", &api.MountInput{ + Type: "kv", + }) + if err != nil { + t.Fatal(err) + } + + // Create a secret in the backend + _, err = testClient.Logical().Write("kv/foo", map[string]interface{}{ + "value": "bar", + "ttl": "1h", + }) + if err != nil { + t.Fatal(err) + } + + // Read the secret and create a lease + leaseResp, err := testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease1 := leaseResp.LeaseID + sampleSpace[lease1] = "lease" + + resp, err := testClient.Logical().Write("auth/token/create", nil) + if err != nil { + t.Fatal(err) + } + token2 := resp.Auth.ClientToken + sampleSpace[token2] = "token" + + testClient.SetToken(token2) + + leaseResp, err = testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease2 := leaseResp.LeaseID + sampleSpace[lease2] = "lease" + + resp, err = testClient.Logical().Write("auth/token/create", nil) + if err != nil { + t.Fatal(err) + } + token3 := resp.Auth.ClientToken + sampleSpace[token3] = "token" + + testClient.SetToken(token3) + + leaseResp, err = testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease3 := leaseResp.LeaseID + sampleSpace[lease3] = "lease" + + expected := make(map[string]string) + for k, v := range sampleSpace { + expected[k] = v + } + tokenRevocationValidation(t, sampleSpace, expected, leaseCache) + + // Revoke-orphan the intermediate token. This should result in its own + // eviction and evictions of the revoked token's leases. All other things + // including the child tokens and leases of the child tokens should be + // untouched. + testClient.SetToken(token2) + err = testClient.Auth().Token().RevokeOrphan(token2) + if err != nil { + t.Fatal(err) + } + time.Sleep(1 * time.Second) + + expected = map[string]string{ + token1: "token", + lease1: "lease", + token3: "token", + lease3: "lease", + } + tokenRevocationValidation(t, sampleSpace, expected, leaseCache) +} + +func TestCache_TokenRevocations_LeafLevelToken(t *testing.T) { + coreConfig := &vault.CoreConfig{ + DisableMlock: true, + DisableCache: true, + Logger: hclog.NewNullLogger(), + LogicalBackends: map[string]logical.Factory{ + "kv": vault.LeasedPassthroughBackendFactory, + }, + } + + sampleSpace := make(map[string]string) + + cleanup, _, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) + defer cleanup() + + token1 := testClient.Token() + sampleSpace[token1] = "token" + + // Mount the kv backend + err := testClient.Sys().Mount("kv", &api.MountInput{ + Type: "kv", + }) + if err != nil { + t.Fatal(err) + } + + // Create a secret in the backend + _, err = testClient.Logical().Write("kv/foo", map[string]interface{}{ + "value": "bar", + "ttl": "1h", + }) + if err != nil { + t.Fatal(err) + } + + // Read the secret and create a lease + leaseResp, err := testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease1 := leaseResp.LeaseID + sampleSpace[lease1] = "lease" + + resp, err := testClient.Logical().Write("auth/token/create", nil) + if err != nil { + t.Fatal(err) + } + token2 := resp.Auth.ClientToken + sampleSpace[token2] = "token" + + testClient.SetToken(token2) + + leaseResp, err = testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease2 := leaseResp.LeaseID + sampleSpace[lease2] = "lease" + + resp, err = testClient.Logical().Write("auth/token/create", nil) + if err != nil { + t.Fatal(err) + } + token3 := resp.Auth.ClientToken + sampleSpace[token3] = "token" + + testClient.SetToken(token3) + + leaseResp, err = testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease3 := leaseResp.LeaseID + sampleSpace[lease3] = "lease" + + expected := make(map[string]string) + for k, v := range sampleSpace { + expected[k] = v + } + tokenRevocationValidation(t, sampleSpace, expected, leaseCache) + + // Revoke the lef token. This should evict all the leases belonging to this + // token, evict entries for all the child tokens and their respective + // leases. + testClient.SetToken(token3) + err = testClient.Auth().Token().RevokeSelf("") + if err != nil { + t.Fatal(err) + } + time.Sleep(1 * time.Second) + + expected = map[string]string{ + token1: "token", + lease1: "lease", + token2: "token", + lease2: "lease", + } + tokenRevocationValidation(t, sampleSpace, expected, leaseCache) +} + +func TestCache_TokenRevocations_IntermediateLevelToken(t *testing.T) { + coreConfig := &vault.CoreConfig{ + DisableMlock: true, + DisableCache: true, + Logger: hclog.NewNullLogger(), + LogicalBackends: map[string]logical.Factory{ + "kv": vault.LeasedPassthroughBackendFactory, + }, + } + + sampleSpace := make(map[string]string) + + cleanup, _, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) + defer cleanup() + + token1 := testClient.Token() + sampleSpace[token1] = "token" + + // Mount the kv backend + err := testClient.Sys().Mount("kv", &api.MountInput{ + Type: "kv", + }) + if err != nil { + t.Fatal(err) + } + + // Create a secret in the backend + _, err = testClient.Logical().Write("kv/foo", map[string]interface{}{ + "value": "bar", + "ttl": "1h", + }) + if err != nil { + t.Fatal(err) + } + + // Read the secret and create a lease + leaseResp, err := testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease1 := leaseResp.LeaseID + sampleSpace[lease1] = "lease" + + resp, err := testClient.Logical().Write("auth/token/create", nil) + if err != nil { + t.Fatal(err) + } + token2 := resp.Auth.ClientToken + sampleSpace[token2] = "token" + + testClient.SetToken(token2) + + leaseResp, err = testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease2 := leaseResp.LeaseID + sampleSpace[lease2] = "lease" + + resp, err = testClient.Logical().Write("auth/token/create", nil) + if err != nil { + t.Fatal(err) + } + token3 := resp.Auth.ClientToken + sampleSpace[token3] = "token" + + testClient.SetToken(token3) + + leaseResp, err = testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease3 := leaseResp.LeaseID + sampleSpace[lease3] = "lease" + + expected := make(map[string]string) + for k, v := range sampleSpace { + expected[k] = v + } + tokenRevocationValidation(t, sampleSpace, expected, leaseCache) + + // Revoke the second level token. This should evict all the leases + // belonging to this token, evict entries for all the child tokens and + // their respective leases. + testClient.SetToken(token2) + err = testClient.Auth().Token().RevokeSelf("") + if err != nil { + t.Fatal(err) + } + time.Sleep(1 * time.Second) + + expected = map[string]string{ + token1: "token", + lease1: "lease", + } + tokenRevocationValidation(t, sampleSpace, expected, leaseCache) +} + +func TestCache_TokenRevocations_TopLevelToken(t *testing.T) { + coreConfig := &vault.CoreConfig{ + DisableMlock: true, + DisableCache: true, + Logger: hclog.NewNullLogger(), + LogicalBackends: map[string]logical.Factory{ + "kv": vault.LeasedPassthroughBackendFactory, + }, + } + + sampleSpace := make(map[string]string) + + cleanup, _, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) + defer cleanup() + + token1 := testClient.Token() + sampleSpace[token1] = "token" + + // Mount the kv backend + err := testClient.Sys().Mount("kv", &api.MountInput{ + Type: "kv", + }) + if err != nil { + t.Fatal(err) + } + + // Create a secret in the backend + _, err = testClient.Logical().Write("kv/foo", map[string]interface{}{ + "value": "bar", + "ttl": "1h", + }) + if err != nil { + t.Fatal(err) + } + + // Read the secret and create a lease + leaseResp, err := testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease1 := leaseResp.LeaseID + sampleSpace[lease1] = "lease" + + resp, err := testClient.Logical().Write("auth/token/create", nil) + if err != nil { + t.Fatal(err) + } + token2 := resp.Auth.ClientToken + sampleSpace[token2] = "token" + + testClient.SetToken(token2) + + leaseResp, err = testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease2 := leaseResp.LeaseID + sampleSpace[lease2] = "lease" + + resp, err = testClient.Logical().Write("auth/token/create", nil) + if err != nil { + t.Fatal(err) + } + token3 := resp.Auth.ClientToken + sampleSpace[token3] = "token" + + testClient.SetToken(token3) + + leaseResp, err = testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease3 := leaseResp.LeaseID + sampleSpace[lease3] = "lease" + + expected := make(map[string]string) + for k, v := range sampleSpace { + expected[k] = v + } + tokenRevocationValidation(t, sampleSpace, expected, leaseCache) + + // Revoke the top level token. This should evict all the leases belonging + // to this token, evict entries for all the child tokens and their + // respective leases. + testClient.SetToken(token1) + err = testClient.Auth().Token().RevokeSelf("") + if err != nil { + t.Fatal(err) + } + time.Sleep(1 * time.Second) + + expected = make(map[string]string) + tokenRevocationValidation(t, sampleSpace, expected, leaseCache) +} + +func TestCache_TokenRevocations_Shutdown(t *testing.T) { + coreConfig := &vault.CoreConfig{ + DisableMlock: true, + DisableCache: true, + Logger: hclog.NewNullLogger(), + LogicalBackends: map[string]logical.Factory{ + "kv": vault.LeasedPassthroughBackendFactory, + }, + } + + sampleSpace := make(map[string]string) + + ctx, rootCancelFunc := context.WithCancel(namespace.RootContext(nil)) + cleanup, _, testClient, leaseCache := setupClusterAndAgent(ctx, t, coreConfig) + defer cleanup() + + token1 := testClient.Token() + sampleSpace[token1] = "token" + + // Mount the kv backend + err := testClient.Sys().Mount("kv", &api.MountInput{ + Type: "kv", + }) + if err != nil { + t.Fatal(err) + } + + // Create a secret in the backend + _, err = testClient.Logical().Write("kv/foo", map[string]interface{}{ + "value": "bar", + "ttl": "1h", + }) + if err != nil { + t.Fatal(err) + } + + // Read the secret and create a lease + leaseResp, err := testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease1 := leaseResp.LeaseID + sampleSpace[lease1] = "lease" + + resp, err := testClient.Logical().Write("auth/token/create", nil) + if err != nil { + t.Fatal(err) + } + token2 := resp.Auth.ClientToken + sampleSpace[token2] = "token" + + testClient.SetToken(token2) + + leaseResp, err = testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease2 := leaseResp.LeaseID + sampleSpace[lease2] = "lease" + + resp, err = testClient.Logical().Write("auth/token/create", nil) + if err != nil { + t.Fatal(err) + } + token3 := resp.Auth.ClientToken + sampleSpace[token3] = "token" + + testClient.SetToken(token3) + + leaseResp, err = testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease3 := leaseResp.LeaseID + sampleSpace[lease3] = "lease" + + expected := make(map[string]string) + for k, v := range sampleSpace { + expected[k] = v + } + tokenRevocationValidation(t, sampleSpace, expected, leaseCache) + + rootCancelFunc() + time.Sleep(1 * time.Second) + + // Ensure that all the entries are now gone + expected = make(map[string]string) + tokenRevocationValidation(t, sampleSpace, expected, leaseCache) +} + +func TestCache_TokenRevocations_BaseContextCancellation(t *testing.T) { + coreConfig := &vault.CoreConfig{ + DisableMlock: true, + DisableCache: true, + Logger: hclog.NewNullLogger(), + LogicalBackends: map[string]logical.Factory{ + "kv": vault.LeasedPassthroughBackendFactory, + }, + } + + sampleSpace := make(map[string]string) + + cleanup, _, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) + defer cleanup() + + token1 := testClient.Token() + sampleSpace[token1] = "token" + + // Mount the kv backend + err := testClient.Sys().Mount("kv", &api.MountInput{ + Type: "kv", + }) + if err != nil { + t.Fatal(err) + } + + // Create a secret in the backend + _, err = testClient.Logical().Write("kv/foo", map[string]interface{}{ + "value": "bar", + "ttl": "1h", + }) + if err != nil { + t.Fatal(err) + } + + // Read the secret and create a lease + leaseResp, err := testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease1 := leaseResp.LeaseID + sampleSpace[lease1] = "lease" + + resp, err := testClient.Logical().Write("auth/token/create", nil) + if err != nil { + t.Fatal(err) + } + token2 := resp.Auth.ClientToken + sampleSpace[token2] = "token" + + testClient.SetToken(token2) + + leaseResp, err = testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease2 := leaseResp.LeaseID + sampleSpace[lease2] = "lease" + + resp, err = testClient.Logical().Write("auth/token/create", nil) + if err != nil { + t.Fatal(err) + } + token3 := resp.Auth.ClientToken + sampleSpace[token3] = "token" + + testClient.SetToken(token3) + + leaseResp, err = testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + lease3 := leaseResp.LeaseID + sampleSpace[lease3] = "lease" + + expected := make(map[string]string) + for k, v := range sampleSpace { + expected[k] = v + } + tokenRevocationValidation(t, sampleSpace, expected, leaseCache) + + // Cancel the base context of the lease cache. This should trigger + // evictions of all the entries from the cache. + leaseCache.baseCtxInfo.CancelFunc() + time.Sleep(1 * time.Second) + + // Ensure that all the entries are now gone + expected = make(map[string]string) + tokenRevocationValidation(t, sampleSpace, expected, leaseCache) +} + +func TestCache_NonCacheable(t *testing.T) { + coreConfig := &vault.CoreConfig{ + DisableMlock: true, + DisableCache: true, + Logger: hclog.NewNullLogger(), + LogicalBackends: map[string]logical.Factory{ + "kv": kv.Factory, + }, + } + + cleanup, _, testClient, _ := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) + defer cleanup() + + // Query mounts first + origMounts, err := testClient.Sys().ListMounts() + if err != nil { + t.Fatal(err) + } + + // Mount a kv backend + if err := testClient.Sys().Mount("kv", &api.MountInput{ + Type: "kv", + Options: map[string]string{ + "version": "2", + }, + }); err != nil { + t.Fatal(err) + } + + // Query mounts again + newMounts, err := testClient.Sys().ListMounts() + if err != nil { + t.Fatal(err) + } + + if diff := deep.Equal(origMounts, newMounts); diff == nil { + t.Logf("response #1: %#v", origMounts) + t.Logf("response #2: %#v", newMounts) + t.Fatal("expected requests to be not cached") + } + + // Query a non-existing mount, expect an error from api.Response + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + r := testClient.NewRequest("GET", "/v1/kv-invalid") + + apiResp, err := testClient.RawRequestWithContext(ctx, r) + if apiResp != nil { + defer apiResp.Body.Close() + } + if apiResp.Error() == nil || (apiResp != nil && apiResp.StatusCode != 404) { + t.Fatalf("expected an error response and a 404 from requesting an invalid path, got: %#v", apiResp) + } + if err == nil { + t.Fatal("expected an error from requesting an invalid path") + } +} + +func TestCache_Caching_AuthResponse(t *testing.T) { + cleanup, _, testClient, _ := setupClusterAndAgent(namespace.RootContext(nil), t, nil) + defer cleanup() + + resp, err := testClient.Logical().Write("auth/token/create", nil) + if err != nil { + t.Fatal(err) + } + token := resp.Auth.ClientToken + testClient.SetToken(token) + + authTokeCreateReq := func(t *testing.T, policies map[string]interface{}) *api.Secret { + resp, err := testClient.Logical().Write("auth/token/create", policies) + if err != nil { + t.Fatal(err) + } + if resp.Auth == nil || resp.Auth.ClientToken == "" { + t.Fatalf("expected a valid client token in the response, got = %#v", resp) + } + + return resp + } + + // Test on auth response by creating a child token + { + proxiedResp := authTokeCreateReq(t, map[string]interface{}{ + "policies": "default", + }) + + cachedResp := authTokeCreateReq(t, map[string]interface{}{ + "policies": "default", + }) + + if diff := deep.Equal(proxiedResp.Auth.ClientToken, cachedResp.Auth.ClientToken); diff != nil { + t.Fatal(diff) + } + } + + // Test on *non-renewable* auth response by creating a child root token + { + proxiedResp := authTokeCreateReq(t, nil) + + cachedResp := authTokeCreateReq(t, nil) + + if diff := deep.Equal(proxiedResp.Auth.ClientToken, cachedResp.Auth.ClientToken); diff != nil { + t.Fatal(diff) + } + } +} + +func TestCache_Caching_LeaseResponse(t *testing.T) { + coreConfig := &vault.CoreConfig{ + DisableMlock: true, + DisableCache: true, + Logger: hclog.NewNullLogger(), + LogicalBackends: map[string]logical.Factory{ + "kv": vault.LeasedPassthroughBackendFactory, + }, + } + + cleanup, client, testClient, _ := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) + defer cleanup() + + err := client.Sys().Mount("kv", &api.MountInput{ + Type: "kv", + }) + if err != nil { + t.Fatal(err) + } + + // Test proxy by issuing two different requests + { + // Write data to the lease-kv backend + _, err := testClient.Logical().Write("kv/foo", map[string]interface{}{ + "value": "bar", + "ttl": "1h", + }) + if err != nil { + t.Fatal(err) + } + _, err = testClient.Logical().Write("kv/foobar", map[string]interface{}{ + "value": "bar", + "ttl": "1h", + }) + if err != nil { + t.Fatal(err) + } + + firstResp, err := testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + + secondResp, err := testClient.Logical().Read("kv/foobar") + if err != nil { + t.Fatal(err) + } + + if diff := deep.Equal(firstResp, secondResp); diff == nil { + t.Logf("response: %#v", firstResp) + t.Fatal("expected proxied responses, got cached response on second request") + } + } + + // Test caching behavior by issue the same request twice + { + _, err := testClient.Logical().Write("kv/baz", map[string]interface{}{ + "value": "foo", + "ttl": "1h", + }) + if err != nil { + t.Fatal(err) + } + + proxiedResp, err := testClient.Logical().Read("kv/baz") + if err != nil { + t.Fatal(err) + } + + cachedResp, err := testClient.Logical().Read("kv/baz") + if err != nil { + t.Fatal(err) + } + + if diff := deep.Equal(proxiedResp, cachedResp); diff != nil { + t.Fatal(diff) + } + } +} + +func TestCache_Caching_CacheClear(t *testing.T) { + t.Run("request_path", func(t *testing.T) { + testCachingCacheClearCommon(t, "request_path") + }) + + t.Run("lease", func(t *testing.T) { + testCachingCacheClearCommon(t, "lease") + }) + + t.Run("token", func(t *testing.T) { + testCachingCacheClearCommon(t, "token") + }) + + t.Run("token_accessor", func(t *testing.T) { + testCachingCacheClearCommon(t, "token_accessor") + }) + + t.Run("all", func(t *testing.T) { + testCachingCacheClearCommon(t, "all") + }) +} + +func testCachingCacheClearCommon(t *testing.T, clearType string) { + coreConfig := &vault.CoreConfig{ + DisableMlock: true, + DisableCache: true, + Logger: hclog.NewNullLogger(), + LogicalBackends: map[string]logical.Factory{ + "kv": vault.LeasedPassthroughBackendFactory, + }, + } + + cleanup, client, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, coreConfig) + defer cleanup() + + err := client.Sys().Mount("kv", &api.MountInput{ + Type: "kv", + }) + if err != nil { + t.Fatal(err) + } + + // Write data to the lease-kv backend + _, err = testClient.Logical().Write("kv/foo", map[string]interface{}{ + "value": "bar", + "ttl": "1h", + }) + if err != nil { + t.Fatal(err) + } + + // Proxy this request, agent should cache the response + resp, err := testClient.Logical().Read("kv/foo") + if err != nil { + t.Fatal(err) + } + gotLeaseID := resp.LeaseID + + // Verify the entry exists + idx, err := leaseCache.db.Get(cachememdb.IndexNameLease, gotLeaseID) + if err != nil { + t.Fatal(err) + } + + if idx == nil { + t.Fatalf("expected cached entry, got: %v", idx) + } + + data := map[string]interface{}{ + "type": clearType, + } + + // We need to set the value here depending on what we're trying to test. + // Some values are be static, but others are dynamically generated at runtime. + switch clearType { + case "request_path": + data["value"] = "/v1/kv/foo" + case "lease": + data["value"] = resp.LeaseID + case "token": + data["value"] = testClient.Token() + case "token_accessor": + lookupResp, err := client.Auth().Token().Lookup(testClient.Token()) + if err != nil { + t.Fatal(err) + } + data["value"] = lookupResp.Data["accessor"] + case "all": + default: + t.Fatalf("invalid type provided: %v", clearType) + } + + r := testClient.NewRequest("PUT", consts.AgentPathCacheClear) + if err := r.SetJSONBody(data); err != nil { + t.Fatal(err) + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + apiResp, err := testClient.RawRequestWithContext(ctx, r) + if apiResp != nil { + defer apiResp.Body.Close() + } + if apiResp != nil && apiResp.StatusCode == 404 { + _, parseErr := api.ParseSecret(apiResp.Body) + switch parseErr { + case nil: + case io.EOF: + default: + t.Fatal(err) + } + } + if err != nil { + t.Fatal(err) + } + + time.Sleep(100 * time.Millisecond) + + // Verify the entry is cleared + idx, err = leaseCache.db.Get(cachememdb.IndexNameLease, gotLeaseID) + if err != nil { + t.Fatal(err) + } + + if idx != nil { + t.Fatalf("expected entry to be nil, got: %v", idx) + } +} + +func TestCache_AuthTokenCreateOrphan(t *testing.T) { + t.Run("create", func(t *testing.T) { + t.Run("managed", func(t *testing.T) { + cleanup, _, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, nil) + defer cleanup() + + reqOpts := &api.TokenCreateRequest{ + Policies: []string{"default"}, + NoParent: true, + } + resp, err := testClient.Auth().Token().Create(reqOpts) + if err != nil { + t.Fatal(err) + } + token := resp.Auth.ClientToken + + idx, err := leaseCache.db.Get(cachememdb.IndexNameToken, token) + if err != nil { + t.Fatal(err) + } + if idx == nil { + t.Fatalf("expected entry to be non-nil, got: %#v", idx) + } + }) + + t.Run("non-managed", func(t *testing.T) { + cleanup, clusterClient, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, nil) + defer cleanup() + + reqOpts := &api.TokenCreateRequest{ + Policies: []string{"default"}, + NoParent: true, + } + + // Use the test client but set the token to one that's not managed by agent + testClient.SetToken(clusterClient.Token()) + + resp, err := testClient.Auth().Token().Create(reqOpts) + if err != nil { + t.Fatal(err) + } + token := resp.Auth.ClientToken + + idx, err := leaseCache.db.Get(cachememdb.IndexNameToken, token) + if err != nil { + t.Fatal(err) + } + if idx == nil { + t.Fatalf("expected entry to be non-nil, got: %#v", idx) + } + }) + }) + + t.Run("create-orphan", func(t *testing.T) { + t.Run("managed", func(t *testing.T) { + cleanup, _, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, nil) + defer cleanup() + + reqOpts := &api.TokenCreateRequest{ + Policies: []string{"default"}, + } + resp, err := testClient.Auth().Token().CreateOrphan(reqOpts) + if err != nil { + t.Fatal(err) + } + token := resp.Auth.ClientToken + + idx, err := leaseCache.db.Get(cachememdb.IndexNameToken, token) + if err != nil { + t.Fatal(err) + } + if idx == nil { + t.Fatalf("expected entry to be non-nil, got: %#v", idx) + } + }) + + t.Run("non-managed", func(t *testing.T) { + cleanup, clusterClient, testClient, leaseCache := setupClusterAndAgent(namespace.RootContext(nil), t, nil) + defer cleanup() + + reqOpts := &api.TokenCreateRequest{ + Policies: []string{"default"}, + } + + // Use the test client but set the token to one that's not managed by agent + testClient.SetToken(clusterClient.Token()) + + resp, err := testClient.Auth().Token().CreateOrphan(reqOpts) + if err != nil { + t.Fatal(err) + } + token := resp.Auth.ClientToken + + idx, err := leaseCache.db.Get(cachememdb.IndexNameToken, token) + if err != nil { + t.Fatal(err) + } + if idx == nil { + t.Fatalf("expected entry to be non-nil, got: %#v", idx) + } + }) + }) +} diff --git a/command/agentproxyshared/cache/cacheboltdb/bolt.go b/command/agentproxyshared/cache/cacheboltdb/bolt.go new file mode 100644 index 0000000..434b411 --- /dev/null +++ b/command/agentproxyshared/cache/cacheboltdb/bolt.go @@ -0,0 +1,436 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cacheboltdb + +import ( + "context" + "encoding/binary" + "fmt" + "os" + "path/filepath" + "time" + + "github.com/golang/protobuf/proto" + "github.com/hashicorp/go-hclog" + wrapping "github.com/hashicorp/go-kms-wrapping/v2" + "github.com/hashicorp/go-multierror" + bolt "go.etcd.io/bbolt" +) + +const ( + // Keep track of schema version for future migrations + storageVersionKey = "version" + storageVersion = "2" // v2 merges auth-lease and secret-lease buckets into one ordered bucket + + // DatabaseFileName - filename for the persistent cache file + DatabaseFileName = "vault-agent-cache.db" + + // metaBucketName - naming the meta bucket that holds the version and + // bootstrapping keys + metaBucketName = "meta" + + // DEPRECATED: secretLeaseType - v1 Bucket/type for leases with secret info + secretLeaseType = "secret-lease" + + // DEPRECATED: authLeaseType - v1 Bucket/type for leases with auth info + authLeaseType = "auth-lease" + + // TokenType - Bucket/type for auto-auth tokens + TokenType = "token" + + // LeaseType - v2 Bucket/type for auth AND secret leases. + // + // This bucket stores keys in the same order they were created using + // auto-incrementing keys and the fact that BoltDB stores keys in byte + // slice order. This means when we iterate through this bucket during + // restore, we will always restore parent tokens before their children, + // allowing us to correctly attach child contexts to their parent's context. + LeaseType = "lease" + + // lookupType - v2 Bucket/type to map from a memcachedb index ID to an + // auto-incrementing BoltDB key. Facilitates deletes from the lease + // bucket using an ID instead of the auto-incrementing BoltDB key. + lookupType = "lookup" + + // AutoAuthToken - key for the latest auto-auth token + AutoAuthToken = "auto-auth-token" + + // RetrievalTokenMaterial is the actual key or token in the key bucket + RetrievalTokenMaterial = "retrieval-token-material" +) + +// BoltStorage is a persistent cache using a bolt db. Items are organized with +// the version and bootstrapping items in the "meta" bucket, and tokens, auth +// leases, and secret leases in their own buckets. +type BoltStorage struct { + db *bolt.DB + logger hclog.Logger + wrapper wrapping.Wrapper + aad string +} + +// BoltStorageConfig is the collection of input parameters for setting up bolt +// storage +type BoltStorageConfig struct { + Path string + Logger hclog.Logger + Wrapper wrapping.Wrapper + AAD string +} + +// NewBoltStorage opens a new bolt db at the specified file path and returns it. +// If the db already exists the buckets will just be created if they don't +// exist. +func NewBoltStorage(config *BoltStorageConfig) (*BoltStorage, error) { + dbPath := filepath.Join(config.Path, DatabaseFileName) + db, err := bolt.Open(dbPath, 0o600, &bolt.Options{Timeout: 1 * time.Second}) + if err != nil { + return nil, err + } + err = db.Update(func(tx *bolt.Tx) error { + return createBoltSchema(tx, storageVersion) + }) + if err != nil { + return nil, err + } + bs := &BoltStorage{ + db: db, + logger: config.Logger, + wrapper: config.Wrapper, + aad: config.AAD, + } + return bs, nil +} + +func createBoltSchema(tx *bolt.Tx, createVersion string) error { + switch { + case createVersion == "1": + if err := createV1BoltSchema(tx); err != nil { + return err + } + case createVersion == "2": + if err := createV2BoltSchema(tx); err != nil { + return err + } + default: + return fmt.Errorf("schema version %s not supported", createVersion) + } + + meta, err := tx.CreateBucketIfNotExists([]byte(metaBucketName)) + if err != nil { + return fmt.Errorf("failed to create bucket %s: %w", metaBucketName, err) + } + + // Check and set file version in the meta bucket. + version := meta.Get([]byte(storageVersionKey)) + switch { + case version == nil: + err = meta.Put([]byte(storageVersionKey), []byte(createVersion)) + if err != nil { + return fmt.Errorf("failed to set storage version: %w", err) + } + + return nil + + case string(version) == createVersion: + return nil + + case string(version) == "1" && createVersion == "2": + return migrateFromV1ToV2Schema(tx) + + default: + return fmt.Errorf("storage migration from %s to %s not implemented", string(version), createVersion) + } +} + +func createV1BoltSchema(tx *bolt.Tx) error { + // Create the buckets for tokens and leases. + for _, bucket := range []string{TokenType, authLeaseType, secretLeaseType} { + if _, err := tx.CreateBucketIfNotExists([]byte(bucket)); err != nil { + return fmt.Errorf("failed to create %s bucket: %w", bucket, err) + } + } + + return nil +} + +func createV2BoltSchema(tx *bolt.Tx) error { + // Create the buckets for tokens and leases. + for _, bucket := range []string{TokenType, LeaseType, lookupType} { + if _, err := tx.CreateBucketIfNotExists([]byte(bucket)); err != nil { + return fmt.Errorf("failed to create %s bucket: %w", bucket, err) + } + } + + return nil +} + +func migrateFromV1ToV2Schema(tx *bolt.Tx) error { + if err := createV2BoltSchema(tx); err != nil { + return err + } + + for _, v1BucketType := range []string{authLeaseType, secretLeaseType} { + if bucket := tx.Bucket([]byte(v1BucketType)); bucket != nil { + bucket.ForEach(func(key, value []byte) error { + autoIncKey, err := autoIncrementedLeaseKey(tx, string(key)) + if err != nil { + return fmt.Errorf("error migrating %s %q key to auto incremented key: %w", v1BucketType, string(key), err) + } + if err := tx.Bucket([]byte(LeaseType)).Put(autoIncKey, value); err != nil { + return fmt.Errorf("error migrating %s %q from v1 to v2 schema: %w", v1BucketType, string(key), err) + } + return nil + }) + + if err := tx.DeleteBucket([]byte(v1BucketType)); err != nil { + return fmt.Errorf("failed to clean up %s bucket during v1 to v2 schema migration: %w", v1BucketType, err) + } + } + } + + meta, err := tx.CreateBucketIfNotExists([]byte(metaBucketName)) + if err != nil { + return fmt.Errorf("failed to create meta bucket: %w", err) + } + if err := meta.Put([]byte(storageVersionKey), []byte(storageVersion)); err != nil { + return fmt.Errorf("failed to update schema from v1 to v2: %w", err) + } + + return nil +} + +func autoIncrementedLeaseKey(tx *bolt.Tx, id string) ([]byte, error) { + leaseBucket := tx.Bucket([]byte(LeaseType)) + keyValue, err := leaseBucket.NextSequence() + if err != nil { + return nil, fmt.Errorf("failed to generate lookup key for id %q: %w", id, err) + } + + key := make([]byte, 8) + // MUST be big endian, because keys are ordered by byte slice comparison + // which progressively compares each byte in the slice starting at index 0. + // BigEndian in the range [255-257] looks like this: + // [0 0 0 0 0 0 0 255] + // [0 0 0 0 0 0 1 0] + // [0 0 0 0 0 0 1 1] + // LittleEndian in the same range looks like this: + // [255 0 0 0 0 0 0 0] + // [0 1 0 0 0 0 0 0] + // [1 1 0 0 0 0 0 0] + binary.BigEndian.PutUint64(key, keyValue) + + err = tx.Bucket([]byte(lookupType)).Put([]byte(id), key) + if err != nil { + return nil, err + } + + return key, nil +} + +// Set an index (token or lease) in bolt storage +func (b *BoltStorage) Set(ctx context.Context, id string, plaintext []byte, indexType string) error { + blob, err := b.wrapper.Encrypt(ctx, plaintext, wrapping.WithAad([]byte(b.aad))) + if err != nil { + return fmt.Errorf("error encrypting %s index: %w", indexType, err) + } + + protoBlob, err := proto.Marshal(blob) + if err != nil { + return err + } + + return b.db.Update(func(tx *bolt.Tx) error { + var key []byte + switch indexType { + case LeaseType: + // If this is a lease type, generate an auto-incrementing key and + // store an ID -> key lookup entry + key, err = autoIncrementedLeaseKey(tx, id) + if err != nil { + return err + } + case TokenType: + // If this is an auto-auth token, also stash it in the meta bucket for + // easy retrieval upon restore + key = []byte(id) + meta := tx.Bucket([]byte(metaBucketName)) + if err := meta.Put([]byte(AutoAuthToken), protoBlob); err != nil { + return fmt.Errorf("failed to set latest auto-auth token: %w", err) + } + default: + return fmt.Errorf("called Set for unsupported type %q", indexType) + } + s := tx.Bucket([]byte(indexType)) + if s == nil { + return fmt.Errorf("bucket %q not found", indexType) + } + return s.Put(key, protoBlob) + }) +} + +// Delete an index (token or lease) by key from bolt storage +func (b *BoltStorage) Delete(id string, indexType string) error { + return b.db.Update(func(tx *bolt.Tx) error { + key := []byte(id) + if indexType == LeaseType { + key = tx.Bucket([]byte(lookupType)).Get(key) + if key == nil { + return fmt.Errorf("failed to lookup bolt DB key for id %q", id) + } + + err := tx.Bucket([]byte(lookupType)).Delete([]byte(id)) + if err != nil { + return fmt.Errorf("failed to delete %q from lookup bucket: %w", id, err) + } + } + + bucket := tx.Bucket([]byte(indexType)) + if bucket == nil { + return fmt.Errorf("bucket %q not found during delete", indexType) + } + if err := bucket.Delete(key); err != nil { + return fmt.Errorf("failed to delete %q from %q bucket: %w", id, indexType, err) + } + b.logger.Trace("deleted index from bolt db", "id", id) + return nil + }) +} + +func (b *BoltStorage) decrypt(ctx context.Context, ciphertext []byte) ([]byte, error) { + var blob wrapping.BlobInfo + if err := proto.Unmarshal(ciphertext, &blob); err != nil { + return nil, err + } + + return b.wrapper.Decrypt(ctx, &blob, wrapping.WithAad([]byte(b.aad))) +} + +// GetByType returns a list of stored items of the specified type +func (b *BoltStorage) GetByType(ctx context.Context, indexType string) ([][]byte, error) { + var returnBytes [][]byte + + err := b.db.View(func(tx *bolt.Tx) error { + var errors *multierror.Error + + bucket := tx.Bucket([]byte(indexType)) + if bucket == nil { + return fmt.Errorf("bucket %q not found", indexType) + } + bucket.ForEach(func(key, ciphertext []byte) error { + plaintext, err := b.decrypt(ctx, ciphertext) + if err != nil { + errors = multierror.Append(errors, fmt.Errorf("error decrypting entry %s: %w", key, err)) + return nil + } + + returnBytes = append(returnBytes, plaintext) + return nil + }) + return errors.ErrorOrNil() + }) + + return returnBytes, err +} + +// GetAutoAuthToken retrieves the latest auto-auth token, and returns nil if non +// exists yet +func (b *BoltStorage) GetAutoAuthToken(ctx context.Context) ([]byte, error) { + var encryptedToken []byte + + err := b.db.View(func(tx *bolt.Tx) error { + meta := tx.Bucket([]byte(metaBucketName)) + if meta == nil { + return fmt.Errorf("bucket %q not found", metaBucketName) + } + value := meta.Get([]byte(AutoAuthToken)) + if value != nil { + encryptedToken = make([]byte, len(value)) + copy(encryptedToken, value) + } + return nil + }) + if err != nil { + return nil, err + } + + if encryptedToken == nil { + return nil, nil + } + + plaintext, err := b.decrypt(ctx, encryptedToken) + if err != nil { + return nil, fmt.Errorf("failed to decrypt auto-auth token: %w", err) + } + return plaintext, nil +} + +// GetRetrievalToken retrieves a plaintext token from the KeyBucket, which will +// be used by the key manager to retrieve the encryption key, nil if none set +func (b *BoltStorage) GetRetrievalToken() ([]byte, error) { + var token []byte + + err := b.db.View(func(tx *bolt.Tx) error { + metaBucket := tx.Bucket([]byte(metaBucketName)) + if metaBucket == nil { + return fmt.Errorf("bucket %q not found", metaBucketName) + } + value := metaBucket.Get([]byte(RetrievalTokenMaterial)) + if value != nil { + token = make([]byte, len(value)) + copy(token, value) + } + return nil + }) + if err != nil { + return nil, err + } + + return token, err +} + +// StoreRetrievalToken sets plaintext token material in the RetrievalTokenBucket +func (b *BoltStorage) StoreRetrievalToken(token []byte) error { + return b.db.Update(func(tx *bolt.Tx) error { + bucket := tx.Bucket([]byte(metaBucketName)) + if bucket == nil { + return fmt.Errorf("bucket %q not found", metaBucketName) + } + return bucket.Put([]byte(RetrievalTokenMaterial), token) + }) +} + +// Close the boltdb +func (b *BoltStorage) Close() error { + b.logger.Trace("closing bolt db", "path", b.db.Path()) + return b.db.Close() +} + +// Clear the boltdb by deleting all the token and lease buckets and recreating +// the schema/layout +func (b *BoltStorage) Clear() error { + return b.db.Update(func(tx *bolt.Tx) error { + for _, name := range []string{TokenType, LeaseType, lookupType} { + b.logger.Trace("deleting bolt bucket", "name", name) + if err := tx.DeleteBucket([]byte(name)); err != nil { + return err + } + } + return createBoltSchema(tx, storageVersion) + }) +} + +// DBFileExists checks whether the vault agent cache file at `filePath` exists +func DBFileExists(path string) (bool, error) { + checkFile, err := os.OpenFile(filepath.Join(path, DatabaseFileName), os.O_RDWR, 0o600) + defer checkFile.Close() + switch { + case err == nil: + return true, nil + case os.IsNotExist(err): + return false, nil + default: + return false, fmt.Errorf("failed to check if bolt file exists at path %s: %w", path, err) + } +} diff --git a/command/agentproxyshared/cache/cacheboltdb/bolt_test.go b/command/agentproxyshared/cache/cacheboltdb/bolt_test.go new file mode 100644 index 0000000..95aacd2 --- /dev/null +++ b/command/agentproxyshared/cache/cacheboltdb/bolt_test.go @@ -0,0 +1,381 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cacheboltdb + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/golang/protobuf/proto" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/command/agentproxyshared/cache/keymanager" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + bolt "go.etcd.io/bbolt" +) + +func getTestKeyManager(t *testing.T) keymanager.KeyManager { + t.Helper() + + km, err := keymanager.NewPassthroughKeyManager(context.Background(), nil) + require.NoError(t, err) + + return km +} + +func TestBolt_SetGet(t *testing.T) { + ctx := context.Background() + + path, err := ioutil.TempDir("", "bolt-test") + require.NoError(t, err) + defer os.RemoveAll(path) + + b, err := NewBoltStorage(&BoltStorageConfig{ + Path: path, + Logger: hclog.Default(), + Wrapper: getTestKeyManager(t).Wrapper(), + }) + require.NoError(t, err) + + secrets, err := b.GetByType(ctx, LeaseType) + assert.NoError(t, err) + require.Len(t, secrets, 0) + + err = b.Set(ctx, "test1", []byte("hello"), LeaseType) + assert.NoError(t, err) + secrets, err = b.GetByType(ctx, LeaseType) + assert.NoError(t, err) + require.Len(t, secrets, 1) + assert.Equal(t, []byte("hello"), secrets[0]) +} + +func TestBoltDelete(t *testing.T) { + ctx := context.Background() + + path, err := ioutil.TempDir("", "bolt-test") + require.NoError(t, err) + defer os.RemoveAll(path) + + b, err := NewBoltStorage(&BoltStorageConfig{ + Path: path, + Logger: hclog.Default(), + Wrapper: getTestKeyManager(t).Wrapper(), + }) + require.NoError(t, err) + + err = b.Set(ctx, "secret-test1", []byte("hello1"), LeaseType) + require.NoError(t, err) + err = b.Set(ctx, "secret-test2", []byte("hello2"), LeaseType) + require.NoError(t, err) + + secrets, err := b.GetByType(ctx, LeaseType) + require.NoError(t, err) + assert.Len(t, secrets, 2) + assert.ElementsMatch(t, [][]byte{[]byte("hello1"), []byte("hello2")}, secrets) + + err = b.Delete("secret-test1", LeaseType) + require.NoError(t, err) + secrets, err = b.GetByType(ctx, LeaseType) + require.NoError(t, err) + require.Len(t, secrets, 1) + assert.Equal(t, []byte("hello2"), secrets[0]) +} + +func TestBoltClear(t *testing.T) { + ctx := context.Background() + + path, err := ioutil.TempDir("", "bolt-test") + require.NoError(t, err) + defer os.RemoveAll(path) + + b, err := NewBoltStorage(&BoltStorageConfig{ + Path: path, + Logger: hclog.Default(), + Wrapper: getTestKeyManager(t).Wrapper(), + }) + require.NoError(t, err) + + // Populate the bolt db + err = b.Set(ctx, "secret-test1", []byte("hello1"), LeaseType) + require.NoError(t, err) + secrets, err := b.GetByType(ctx, LeaseType) + require.NoError(t, err) + require.Len(t, secrets, 1) + assert.Equal(t, []byte("hello1"), secrets[0]) + + err = b.Set(ctx, "auth-test1", []byte("hello2"), LeaseType) + require.NoError(t, err) + auths, err := b.GetByType(ctx, LeaseType) + require.NoError(t, err) + require.Len(t, auths, 2) + assert.Equal(t, []byte("hello1"), auths[0]) + assert.Equal(t, []byte("hello2"), auths[1]) + + err = b.Set(ctx, "token-test1", []byte("hello"), TokenType) + require.NoError(t, err) + tokens, err := b.GetByType(ctx, TokenType) + require.NoError(t, err) + require.Len(t, tokens, 1) + assert.Equal(t, []byte("hello"), tokens[0]) + + // Clear the bolt db, and check that it's indeed clear + err = b.Clear() + require.NoError(t, err) + auths, err = b.GetByType(ctx, LeaseType) + require.NoError(t, err) + assert.Len(t, auths, 0) + tokens, err = b.GetByType(ctx, TokenType) + require.NoError(t, err) + assert.Len(t, tokens, 0) +} + +func TestBoltSetAutoAuthToken(t *testing.T) { + ctx := context.Background() + + path, err := ioutil.TempDir("", "bolt-test") + require.NoError(t, err) + defer os.RemoveAll(path) + + b, err := NewBoltStorage(&BoltStorageConfig{ + Path: path, + Logger: hclog.Default(), + Wrapper: getTestKeyManager(t).Wrapper(), + }) + require.NoError(t, err) + + token, err := b.GetAutoAuthToken(ctx) + assert.NoError(t, err) + assert.Nil(t, token) + + // set first token + err = b.Set(ctx, "token-test1", []byte("hello 1"), TokenType) + require.NoError(t, err) + secrets, err := b.GetByType(ctx, TokenType) + require.NoError(t, err) + require.Len(t, secrets, 1) + assert.Equal(t, []byte("hello 1"), secrets[0]) + token, err = b.GetAutoAuthToken(ctx) + assert.NoError(t, err) + assert.Equal(t, []byte("hello 1"), token) + + // set second token + err = b.Set(ctx, "token-test2", []byte("hello 2"), TokenType) + require.NoError(t, err) + secrets, err = b.GetByType(ctx, TokenType) + require.NoError(t, err) + require.Len(t, secrets, 2) + assert.ElementsMatch(t, [][]byte{[]byte("hello 1"), []byte("hello 2")}, secrets) + token, err = b.GetAutoAuthToken(ctx) + assert.NoError(t, err) + assert.Equal(t, []byte("hello 2"), token) +} + +func TestDBFileExists(t *testing.T) { + testCases := []struct { + name string + mkDir bool + createFile bool + expectExist bool + }{ + { + name: "all exists", + mkDir: true, + createFile: true, + expectExist: true, + }, + { + name: "dir exist, file missing", + mkDir: true, + createFile: false, + expectExist: false, + }, + { + name: "all missing", + mkDir: false, + createFile: false, + expectExist: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var tmpPath string + var err error + if tc.mkDir { + tmpPath, err = ioutil.TempDir("", "test-db-path") + require.NoError(t, err) + } + if tc.createFile { + err = ioutil.WriteFile(path.Join(tmpPath, DatabaseFileName), []byte("test-db-path"), 0o600) + require.NoError(t, err) + } + exists, err := DBFileExists(tmpPath) + assert.NoError(t, err) + assert.Equal(t, tc.expectExist, exists) + }) + } +} + +func Test_SetGetRetrievalToken(t *testing.T) { + testCases := []struct { + name string + tokenToSet []byte + expectedToken []byte + }{ + { + name: "normal set and get", + tokenToSet: []byte("test token"), + expectedToken: []byte("test token"), + }, + { + name: "no token set", + tokenToSet: nil, + expectedToken: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + path, err := ioutil.TempDir("", "bolt-test") + require.NoError(t, err) + defer os.RemoveAll(path) + + b, err := NewBoltStorage(&BoltStorageConfig{ + Path: path, + Logger: hclog.Default(), + Wrapper: getTestKeyManager(t).Wrapper(), + }) + require.NoError(t, err) + defer b.Close() + + if tc.tokenToSet != nil { + err := b.StoreRetrievalToken(tc.tokenToSet) + require.NoError(t, err) + } + gotKey, err := b.GetRetrievalToken() + assert.NoError(t, err) + assert.Equal(t, tc.expectedToken, gotKey) + }) + } +} + +func TestBolt_MigrateFromV1ToV2Schema(t *testing.T) { + ctx := context.Background() + + path, err := ioutil.TempDir("", "bolt-test") + require.NoError(t, err) + defer os.RemoveAll(path) + + dbPath := filepath.Join(path, DatabaseFileName) + db, err := bolt.Open(dbPath, 0o600, &bolt.Options{Timeout: 1 * time.Second}) + require.NoError(t, err) + err = db.Update(func(tx *bolt.Tx) error { + return createBoltSchema(tx, "1") + }) + require.NoError(t, err) + b := &BoltStorage{ + db: db, + logger: hclog.Default(), + wrapper: getTestKeyManager(t).Wrapper(), + } + + // Manually insert some items into the v1 schema. + err = db.Update(func(tx *bolt.Tx) error { + blob, err := b.wrapper.Encrypt(ctx, []byte("ignored-contents")) + if err != nil { + return fmt.Errorf("error encrypting contents: %w", err) + } + protoBlob, err := proto.Marshal(blob) + if err != nil { + return err + } + + if err := tx.Bucket([]byte(authLeaseType)).Put([]byte("test-auth-id-1"), protoBlob); err != nil { + return err + } + if err := tx.Bucket([]byte(authLeaseType)).Put([]byte("test-auth-id-2"), protoBlob); err != nil { + return err + } + if err := tx.Bucket([]byte(secretLeaseType)).Put([]byte("test-secret-id-1"), protoBlob); err != nil { + return err + } + + return nil + }) + require.NoError(t, err) + + // Check we have the contents we would expect for the v1 schema. + leases, err := b.GetByType(ctx, authLeaseType) + require.NoError(t, err) + assert.Len(t, leases, 2) + leases, err = b.GetByType(ctx, secretLeaseType) + require.NoError(t, err) + assert.Len(t, leases, 1) + leases, err = b.GetByType(ctx, LeaseType) + require.Error(t, err) + assert.True(t, strings.Contains(err.Error(), "not found")) + + // Now migrate to the v2 schema. + err = db.Update(migrateFromV1ToV2Schema) + require.NoError(t, err) + + // Check all the leases have been migrated into one bucket. + leases, err = b.GetByType(ctx, authLeaseType) + require.Error(t, err) + assert.True(t, strings.Contains(err.Error(), "not found")) + leases, err = b.GetByType(ctx, secretLeaseType) + require.Error(t, err) + assert.True(t, strings.Contains(err.Error(), "not found")) + leases, err = b.GetByType(ctx, LeaseType) + require.NoError(t, err) + assert.Len(t, leases, 3) +} + +func TestBolt_MigrateFromInvalidToV2Schema(t *testing.T) { + ctx := context.Background() + + path, err := ioutil.TempDir("", "bolt-test") + require.NoError(t, err) + defer os.RemoveAll(path) + + dbPath := filepath.Join(path, DatabaseFileName) + db, err := bolt.Open(dbPath, 0o600, &bolt.Options{Timeout: 1 * time.Second}) + require.NoError(t, err) + b := &BoltStorage{ + db: db, + logger: hclog.Default(), + wrapper: getTestKeyManager(t).Wrapper(), + } + + // All GetByType calls should fail as there's no schema + for _, bucket := range []string{authLeaseType, secretLeaseType, LeaseType} { + _, err = b.GetByType(ctx, bucket) + require.Error(t, err) + assert.True(t, strings.Contains(err.Error(), "not found")) + } + + // Now migrate to the v2 schema. + err = db.Update(migrateFromV1ToV2Schema) + require.NoError(t, err) + + // Deprecated auth and secret lease buckets still shouldn't exist + // All GetByType calls should fail as there's no schema + for _, bucket := range []string{authLeaseType, secretLeaseType} { + _, err = b.GetByType(ctx, bucket) + require.Error(t, err) + assert.True(t, strings.Contains(err.Error(), "not found")) + } + + // GetByType for LeaseType should now return an empty result + leases, err := b.GetByType(ctx, LeaseType) + require.NoError(t, err) + require.Len(t, leases, 0) +} diff --git a/command/agentproxyshared/cache/cachememdb/cache_memdb.go b/command/agentproxyshared/cache/cachememdb/cache_memdb.go new file mode 100644 index 0000000..93aa2bf --- /dev/null +++ b/command/agentproxyshared/cache/cachememdb/cache_memdb.go @@ -0,0 +1,243 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cachememdb + +import ( + "errors" + "fmt" + "sync/atomic" + + memdb "github.com/hashicorp/go-memdb" +) + +const ( + tableNameIndexer = "indexer" +) + +// CacheMemDB is the underlying cache database for storing indexes. +type CacheMemDB struct { + db *atomic.Value +} + +// New creates a new instance of CacheMemDB. +func New() (*CacheMemDB, error) { + db, err := newDB() + if err != nil { + return nil, err + } + + c := &CacheMemDB{ + db: new(atomic.Value), + } + c.db.Store(db) + + return c, nil +} + +func newDB() (*memdb.MemDB, error) { + cacheSchema := &memdb.DBSchema{ + Tables: map[string]*memdb.TableSchema{ + tableNameIndexer: { + Name: tableNameIndexer, + Indexes: map[string]*memdb.IndexSchema{ + // This index enables fetching the cached item based on the + // identifier of the index. + IndexNameID: { + Name: IndexNameID, + Unique: true, + Indexer: &memdb.StringFieldIndex{ + Field: "ID", + }, + }, + // This index enables fetching all the entries in cache for + // a given request path, in a given namespace. + IndexNameRequestPath: { + Name: IndexNameRequestPath, + Unique: false, + Indexer: &memdb.CompoundIndex{ + Indexes: []memdb.Indexer{ + &memdb.StringFieldIndex{ + Field: "Namespace", + }, + &memdb.StringFieldIndex{ + Field: "RequestPath", + }, + }, + }, + }, + // This index enables fetching all the entries in cache + // belonging to the leases of a given token. + IndexNameLeaseToken: { + Name: IndexNameLeaseToken, + Unique: false, + AllowMissing: true, + Indexer: &memdb.StringFieldIndex{ + Field: "LeaseToken", + }, + }, + // This index enables fetching all the entries in cache + // that are tied to the given token, regardless of the + // entries belonging to the token or belonging to the + // lease. + IndexNameToken: { + Name: IndexNameToken, + Unique: true, + AllowMissing: true, + Indexer: &memdb.StringFieldIndex{ + Field: "Token", + }, + }, + // This index enables fetching all the entries in cache for + // the given parent token. + IndexNameTokenParent: { + Name: IndexNameTokenParent, + Unique: false, + AllowMissing: true, + Indexer: &memdb.StringFieldIndex{ + Field: "TokenParent", + }, + }, + // This index enables fetching all the entries in cache for + // the given accessor. + IndexNameTokenAccessor: { + Name: IndexNameTokenAccessor, + Unique: true, + AllowMissing: true, + Indexer: &memdb.StringFieldIndex{ + Field: "TokenAccessor", + }, + }, + // This index enables fetching all the entries in cache for + // the given lease identifier. + IndexNameLease: { + Name: IndexNameLease, + Unique: true, + AllowMissing: true, + Indexer: &memdb.StringFieldIndex{ + Field: "Lease", + }, + }, + }, + }, + }, + } + + db, err := memdb.NewMemDB(cacheSchema) + if err != nil { + return nil, err + } + return db, nil +} + +// Get returns the index based on the indexer and the index values provided. +func (c *CacheMemDB) Get(indexName string, indexValues ...interface{}) (*Index, error) { + if !validIndexName(indexName) { + return nil, fmt.Errorf("invalid index name %q", indexName) + } + + txn := c.db.Load().(*memdb.MemDB).Txn(false) + + raw, err := txn.First(tableNameIndexer, indexName, indexValues...) + if err != nil { + return nil, err + } + + if raw == nil { + return nil, nil + } + + index, ok := raw.(*Index) + if !ok { + return nil, errors.New("unable to parse index value from the cache") + } + + return index, nil +} + +// Set stores the index into the cache. +func (c *CacheMemDB) Set(index *Index) error { + if index == nil { + return errors.New("nil index provided") + } + + txn := c.db.Load().(*memdb.MemDB).Txn(true) + defer txn.Abort() + + if err := txn.Insert(tableNameIndexer, index); err != nil { + return fmt.Errorf("unable to insert index into cache: %v", err) + } + + txn.Commit() + + return nil +} + +// GetByPrefix returns all the cached indexes based on the index name and the +// value prefix. +func (c *CacheMemDB) GetByPrefix(indexName string, indexValues ...interface{}) ([]*Index, error) { + if !validIndexName(indexName) { + return nil, fmt.Errorf("invalid index name %q", indexName) + } + + indexName = indexName + "_prefix" + + // Get all the objects + txn := c.db.Load().(*memdb.MemDB).Txn(false) + + iter, err := txn.Get(tableNameIndexer, indexName, indexValues...) + if err != nil { + return nil, err + } + + var indexes []*Index + for { + obj := iter.Next() + if obj == nil { + break + } + index, ok := obj.(*Index) + if !ok { + return nil, fmt.Errorf("failed to cast cached index") + } + + indexes = append(indexes, index) + } + + return indexes, nil +} + +// Evict removes an index from the cache based on index name and value. +func (c *CacheMemDB) Evict(indexName string, indexValues ...interface{}) error { + index, err := c.Get(indexName, indexValues...) + if err != nil { + return fmt.Errorf("unable to fetch index on cache deletion: %v", err) + } + + if index == nil { + return nil + } + + txn := c.db.Load().(*memdb.MemDB).Txn(true) + defer txn.Abort() + + if err := txn.Delete(tableNameIndexer, index); err != nil { + return fmt.Errorf("unable to delete index from cache: %v", err) + } + + txn.Commit() + + return nil +} + +// Flush resets the underlying cache object. +func (c *CacheMemDB) Flush() error { + newDB, err := newDB() + if err != nil { + return err + } + + c.db.Store(newDB) + + return nil +} diff --git a/command/agentproxyshared/cache/cachememdb/cache_memdb_test.go b/command/agentproxyshared/cache/cachememdb/cache_memdb_test.go new file mode 100644 index 0000000..87b8eee --- /dev/null +++ b/command/agentproxyshared/cache/cachememdb/cache_memdb_test.go @@ -0,0 +1,395 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cachememdb + +import ( + "context" + "testing" + + "github.com/go-test/deep" +) + +func testContextInfo() *ContextInfo { + ctx, cancelFunc := context.WithCancel(context.Background()) + + return &ContextInfo{ + Ctx: ctx, + CancelFunc: cancelFunc, + } +} + +func TestNew(t *testing.T) { + _, err := New() + if err != nil { + t.Fatal(err) + } +} + +func TestCacheMemDB_Get(t *testing.T) { + cache, err := New() + if err != nil { + t.Fatal(err) + } + + // Test invalid index name + _, err = cache.Get("foo", "bar") + if err == nil { + t.Fatal("expected error") + } + + // Test on empty cache + index, err := cache.Get(IndexNameID, "foo") + if err != nil { + t.Fatal(err) + } + if index != nil { + t.Fatalf("expected nil index, got: %v", index) + } + + // Populate cache + in := &Index{ + ID: "test_id", + Namespace: "test_ns/", + RequestPath: "/v1/request/path", + Token: "test_token", + TokenAccessor: "test_accessor", + Lease: "test_lease", + Response: []byte("hello world"), + } + + if err := cache.Set(in); err != nil { + t.Fatal(err) + } + + testCases := []struct { + name string + indexName string + indexValues []interface{} + }{ + { + "by_index_id", + "id", + []interface{}{in.ID}, + }, + { + "by_request_path", + "request_path", + []interface{}{in.Namespace, in.RequestPath}, + }, + { + "by_lease", + "lease", + []interface{}{in.Lease}, + }, + { + "by_token", + "token", + []interface{}{in.Token}, + }, + { + "by_token_accessor", + "token_accessor", + []interface{}{in.TokenAccessor}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + out, err := cache.Get(tc.indexName, tc.indexValues...) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(in, out); diff != nil { + t.Fatal(diff) + } + }) + } +} + +func TestCacheMemDB_GetByPrefix(t *testing.T) { + cache, err := New() + if err != nil { + t.Fatal(err) + } + + // Test invalid index name + _, err = cache.GetByPrefix("foo", "bar", "baz") + if err == nil { + t.Fatal("expected error") + } + + // Test on empty cache + index, err := cache.GetByPrefix(IndexNameRequestPath, "foo", "bar") + if err != nil { + t.Fatal(err) + } + if index != nil { + t.Fatalf("expected nil index, got: %v", index) + } + + // Populate cache + in := &Index{ + ID: "test_id", + Namespace: "test_ns/", + RequestPath: "/v1/request/path/1", + Token: "test_token", + TokenParent: "test_token_parent", + TokenAccessor: "test_accessor", + Lease: "path/to/test_lease/1", + LeaseToken: "test_lease_token", + Response: []byte("hello world"), + } + + if err := cache.Set(in); err != nil { + t.Fatal(err) + } + + // Populate cache + in2 := &Index{ + ID: "test_id_2", + Namespace: "test_ns/", + RequestPath: "/v1/request/path/2", + Token: "test_token2", + TokenParent: "test_token_parent", + TokenAccessor: "test_accessor2", + Lease: "path/to/test_lease/2", + LeaseToken: "test_lease_token", + Response: []byte("hello world"), + } + + if err := cache.Set(in2); err != nil { + t.Fatal(err) + } + + testCases := []struct { + name string + indexName string + indexValues []interface{} + }{ + { + "by_request_path", + "request_path", + []interface{}{"test_ns/", "/v1/request/path"}, + }, + { + "by_lease", + "lease", + []interface{}{"path/to/test_lease"}, + }, + { + "by_token_parent", + "token_parent", + []interface{}{"test_token_parent"}, + }, + { + "by_lease_token", + "lease_token", + []interface{}{"test_lease_token"}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + out, err := cache.GetByPrefix(tc.indexName, tc.indexValues...) + if err != nil { + t.Fatal(err) + } + + if diff := deep.Equal([]*Index{in, in2}, out); diff != nil { + t.Fatal(diff) + } + }) + } +} + +func TestCacheMemDB_Set(t *testing.T) { + cache, err := New() + if err != nil { + t.Fatal(err) + } + + testCases := []struct { + name string + index *Index + wantErr bool + }{ + { + "nil", + nil, + true, + }, + { + "empty_fields", + &Index{}, + true, + }, + { + "missing_required_fields", + &Index{ + Lease: "foo", + }, + true, + }, + { + "all_fields", + &Index{ + ID: "test_id", + Namespace: "test_ns/", + RequestPath: "/v1/request/path", + Token: "test_token", + TokenAccessor: "test_accessor", + Lease: "test_lease", + RenewCtxInfo: testContextInfo(), + }, + false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + if err := cache.Set(tc.index); (err != nil) != tc.wantErr { + t.Fatalf("CacheMemDB.Set() error = %v, wantErr = %v", err, tc.wantErr) + } + }) + } +} + +func TestCacheMemDB_Evict(t *testing.T) { + cache, err := New() + if err != nil { + t.Fatal(err) + } + + // Test on empty cache + if err := cache.Evict(IndexNameID, "foo"); err != nil { + t.Fatal(err) + } + + testIndex := &Index{ + ID: "test_id", + Namespace: "test_ns/", + RequestPath: "/v1/request/path", + Token: "test_token", + TokenAccessor: "test_token_accessor", + Lease: "test_lease", + RenewCtxInfo: testContextInfo(), + } + + testCases := []struct { + name string + indexName string + indexValues []interface{} + insertIndex *Index + wantErr bool + }{ + { + "empty_params", + "", + []interface{}{""}, + nil, + true, + }, + { + "invalid_params", + "foo", + []interface{}{"bar"}, + nil, + true, + }, + { + "by_id", + "id", + []interface{}{"test_id"}, + testIndex, + false, + }, + { + "by_request_path", + "request_path", + []interface{}{"test_ns/", "/v1/request/path"}, + testIndex, + false, + }, + { + "by_token", + "token", + []interface{}{"test_token"}, + testIndex, + false, + }, + { + "by_token_accessor", + "token_accessor", + []interface{}{"test_accessor"}, + testIndex, + false, + }, + { + "by_lease", + "lease", + []interface{}{"test_lease"}, + testIndex, + false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + if tc.insertIndex != nil { + if err := cache.Set(tc.insertIndex); err != nil { + t.Fatal(err) + } + } + + if err := cache.Evict(tc.indexName, tc.indexValues...); (err != nil) != tc.wantErr { + t.Fatal(err) + } + + // Verify that the cache doesn't contain the entry any more + index, err := cache.Get(tc.indexName, tc.indexValues...) + if (err != nil) != tc.wantErr { + t.Fatal(err) + } + + if index != nil { + t.Fatalf("expected nil entry, got = %#v", index) + } + }) + } +} + +func TestCacheMemDB_Flush(t *testing.T) { + cache, err := New() + if err != nil { + t.Fatal(err) + } + + // Populate cache + in := &Index{ + ID: "test_id", + Token: "test_token", + Lease: "test_lease", + Namespace: "test_ns/", + RequestPath: "/v1/request/path", + Response: []byte("hello world"), + } + + if err := cache.Set(in); err != nil { + t.Fatal(err) + } + + // Reset the cache + if err := cache.Flush(); err != nil { + t.Fatal(err) + } + + // Check the cache doesn't contain inserted index + out, err := cache.Get(IndexNameID, "test_id") + if err != nil { + t.Fatal(err) + } + if out != nil { + t.Fatalf("expected cache to be empty, got = %v", out) + } +} diff --git a/command/agentproxyshared/cache/cachememdb/index.go b/command/agentproxyshared/cache/cachememdb/index.go new file mode 100644 index 0000000..a7da2ed --- /dev/null +++ b/command/agentproxyshared/cache/cachememdb/index.go @@ -0,0 +1,153 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cachememdb + +import ( + "context" + "encoding/json" + "net/http" + "time" +) + +// Index holds the response to be cached along with multiple other values that +// serve as pointers to refer back to this index. +type Index struct { + // ID is a value that uniquely represents the request held by this + // index. This is computed by serializing and hashing the response object. + // Required: true, Unique: true + ID string + + // Token is the token that fetched the response held by this index + // Required: true, Unique: true + Token string + + // TokenParent is the parent token of the token held by this index + // Required: false, Unique: false + TokenParent string + + // TokenAccessor is the accessor of the token being cached in this index + // Required: true, Unique: true + TokenAccessor string + + // Namespace is the namespace that was provided in the request path as the + // Vault namespace to query + Namespace string + + // RequestPath is the path of the request that resulted in the response + // held by this index. + // Required: true, Unique: false + RequestPath string + + // Lease is the identifier of the lease in Vault, that belongs to the + // response held by this index. + // Required: false, Unique: true + Lease string + + // LeaseToken is the identifier of the token that created the lease held by + // this index. + // Required: false, Unique: false + LeaseToken string + + // Response is the serialized response object that the agent is caching. + Response []byte + + // RenewCtxInfo holds the context and the corresponding cancel func for the + // goroutine that manages the renewal of the secret belonging to the + // response in this index. + RenewCtxInfo *ContextInfo + + // RequestMethod is the HTTP method of the request + RequestMethod string + + // RequestToken is the token used in the request + RequestToken string + + // RequestHeader is the header used in the request + RequestHeader http.Header + + // LastRenewed is the timestamp of last renewal + LastRenewed time.Time + + // Type is the index type (token, auth-lease, secret-lease) + Type string +} + +type IndexName uint32 + +const ( + // IndexNameID is the ID of the index constructed from the serialized request. + IndexNameID = "id" + + // IndexNameLease is the lease of the index. + IndexNameLease = "lease" + + // IndexNameRequestPath is the request path of the index. + IndexNameRequestPath = "request_path" + + // IndexNameToken is the token of the index. + IndexNameToken = "token" + + // IndexNameTokenAccessor is the token accessor of the index. + IndexNameTokenAccessor = "token_accessor" + + // IndexNameTokenParent is the token parent of the index. + IndexNameTokenParent = "token_parent" + + // IndexNameLeaseToken is the token that created the lease. + IndexNameLeaseToken = "lease_token" +) + +func validIndexName(indexName string) bool { + switch indexName { + case "id": + case "lease": + case "request_path": + case "token": + case "token_accessor": + case "token_parent": + case "lease_token": + default: + return false + } + return true +} + +type ContextInfo struct { + Ctx context.Context + CancelFunc context.CancelFunc + DoneCh chan struct{} +} + +func NewContextInfo(ctx context.Context) *ContextInfo { + if ctx == nil { + return nil + } + + ctxInfo := new(ContextInfo) + ctxInfo.Ctx, ctxInfo.CancelFunc = context.WithCancel(ctx) + ctxInfo.DoneCh = make(chan struct{}) + return ctxInfo +} + +// Serialize returns a json marshal'ed Index object, without the RenewCtxInfo +func (i Index) Serialize() ([]byte, error) { + i.RenewCtxInfo = nil + + indexBytes, err := json.Marshal(i) + if err != nil { + return nil, err + } + + return indexBytes, nil +} + +// Deserialize converts json bytes to an Index object +// Note: RenewCtxInfo will need to be reconstructed elsewhere. +func Deserialize(indexBytes []byte) (*Index, error) { + index := new(Index) + if err := json.Unmarshal(indexBytes, index); err != nil { + return nil, err + } + return index, nil +} diff --git a/command/agentproxyshared/cache/cachememdb/index_test.go b/command/agentproxyshared/cache/cachememdb/index_test.go new file mode 100644 index 0000000..c59ec5c --- /dev/null +++ b/command/agentproxyshared/cache/cachememdb/index_test.go @@ -0,0 +1,45 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cachememdb + +import ( + "context" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSerializeDeserialize(t *testing.T) { + testIndex := &Index{ + ID: "testid", + Token: "testtoken", + TokenParent: "parent token", + TokenAccessor: "test accessor", + Namespace: "test namespace", + RequestPath: "/test/path", + Lease: "lease id", + LeaseToken: "lease token id", + Response: []byte(`{"something": "here"}`), + RenewCtxInfo: NewContextInfo(context.Background()), + RequestMethod: "GET", + RequestToken: "request token", + RequestHeader: http.Header{ + "X-Test": []string{"vault", "agent"}, + }, + LastRenewed: time.Now().UTC(), + } + indexBytes, err := testIndex.Serialize() + require.NoError(t, err) + assert.True(t, len(indexBytes) > 0) + assert.NotNil(t, testIndex.RenewCtxInfo, "Serialize should not modify original Index object") + + restoredIndex, err := Deserialize(indexBytes) + require.NoError(t, err) + + testIndex.RenewCtxInfo = nil + assert.Equal(t, testIndex, restoredIndex, "They should be equal without RenewCtxInfo set on the original") +} diff --git a/command/agentproxyshared/cache/handler.go b/command/agentproxyshared/cache/handler.go new file mode 100644 index 0000000..bfb4434 --- /dev/null +++ b/command/agentproxyshared/cache/handler.go @@ -0,0 +1,232 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cache + +import ( + "bufio" + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "time" + + "github.com/armon/go-metrics" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" +) + +func ProxyHandler(ctx context.Context, logger hclog.Logger, proxier Proxier, inmemSink sink.Sink, proxyVaultToken bool) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + logger.Info("received request", "method", r.Method, "path", r.URL.Path) + + if !proxyVaultToken { + r.Header.Del(consts.AuthHeaderName) + } + + token := r.Header.Get(consts.AuthHeaderName) + + if token == "" && inmemSink != nil { + logger.Debug("using auto auth token", "method", r.Method, "path", r.URL.Path) + token = inmemSink.(sink.SinkReader).Token() + } + + // Parse and reset body. + reqBody, err := io.ReadAll(r.Body) + if err != nil { + logger.Error("failed to read request body") + logical.RespondError(w, http.StatusInternalServerError, errors.New("failed to read request body")) + return + } + if r.Body != nil { + r.Body.Close() + } + r.Body = io.NopCloser(bytes.NewReader(reqBody)) + req := &SendRequest{ + Token: token, + Request: r, + RequestBody: reqBody, + } + + resp, err := proxier.Send(ctx, req) + if err != nil { + // If this is an api.Response error, don't wrap the response. + if resp != nil && resp.Response.Error() != nil { + copyHeader(w.Header(), resp.Response.Header) + w.WriteHeader(resp.Response.StatusCode) + io.Copy(w, resp.Response.Body) + metrics.IncrCounter([]string{"agent", "proxy", "client_error"}, 1) + } else { + metrics.IncrCounter([]string{"agent", "proxy", "error"}, 1) + logical.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to get the response: %w", err)) + } + return + } + + err = sanitizeAutoAuthTokenResponse(ctx, logger, inmemSink, req, resp) + if err != nil { + logical.RespondError(w, http.StatusInternalServerError, fmt.Errorf("failed to process token lookup response: %w", err)) + return + } + + defer resp.Response.Body.Close() + + metrics.IncrCounter([]string{"agent", "proxy", "success"}, 1) + if resp.CacheMeta != nil { + if resp.CacheMeta.Hit { + metrics.IncrCounter([]string{"agent", "cache", "hit"}, 1) + } else { + metrics.IncrCounter([]string{"agent", "cache", "miss"}, 1) + } + } + + // Set headers + setHeaders(w, resp) + + // Set response body + io.Copy(w, resp.Response.Body) + return + }) +} + +// setHeaders is a helper that sets the header values based on SendResponse. It +// copies over the headers from the original response and also includes any +// cache-related headers. +func setHeaders(w http.ResponseWriter, resp *SendResponse) { + // Set header values + copyHeader(w.Header(), resp.Response.Header) + if resp.CacheMeta != nil { + xCacheVal := "MISS" + + if resp.CacheMeta.Hit { + xCacheVal = "HIT" + + // If this is a cache hit, we also set the Age header + age := fmt.Sprintf("%.0f", resp.CacheMeta.Age.Seconds()) + w.Header().Set("Age", age) + + // Update the date value + w.Header().Set("Date", time.Now().Format(http.TimeFormat)) + } + + w.Header().Set("X-Cache", xCacheVal) + } + + // Set status code + w.WriteHeader(resp.Response.StatusCode) +} + +// sanitizeAutoAuthTokenResponse checks if the request was a lookup or renew +// and if the auto-auth token was used to perform lookup-self, the identifier +// of the token and its accessor same will be stripped off of the response. +func sanitizeAutoAuthTokenResponse(ctx context.Context, logger hclog.Logger, inmemSink sink.Sink, req *SendRequest, resp *SendResponse) error { + // If auto-auth token is not being used, there is nothing to do. + if inmemSink == nil { + return nil + } + autoAuthToken := inmemSink.(sink.SinkReader).Token() + + // If lookup responded with non 200 status, there is nothing to do. + if resp.Response.StatusCode != http.StatusOK { + return nil + } + + _, path := deriveNamespaceAndRevocationPath(req) + switch path { + case vaultPathTokenLookupSelf, vaultPathTokenRenewSelf: + if req.Token != autoAuthToken { + return nil + } + case vaultPathTokenLookup, vaultPathTokenRenew: + jsonBody := map[string]interface{}{} + if err := json.Unmarshal(req.RequestBody, &jsonBody); err != nil { + return err + } + tokenRaw, ok := jsonBody["token"] + if !ok { + // Input error will be caught by the API + return nil + } + token, ok := tokenRaw.(string) + if !ok { + // Input error will be caught by the API + return nil + } + if token != "" && token != autoAuthToken { + // Lookup is performed on the non-auto-auth token + return nil + } + default: + return nil + } + + logger.Info("stripping auto-auth token from the response", "method", req.Request.Method, "path", req.Request.URL.Path) + secret, err := api.ParseSecret(bytes.NewReader(resp.ResponseBody)) + if err != nil { + return fmt.Errorf("failed to parse token lookup response: %v", err) + } + if secret == nil { + return nil + } else if secret.Data != nil { + // lookup endpoints + if secret.Data["id"] == nil && secret.Data["accessor"] == nil { + return nil + } + delete(secret.Data, "id") + delete(secret.Data, "accessor") + } else if secret.Auth != nil { + // renew endpoints + if secret.Auth.Accessor == "" && secret.Auth.ClientToken == "" { + return nil + } + secret.Auth.Accessor = "" + secret.Auth.ClientToken = "" + } else { + // nothing to redact + return nil + } + + bodyBytes, err := json.Marshal(secret) + if err != nil { + return err + } + if resp.Response.Body != nil { + resp.Response.Body.Close() + } + resp.Response.Body = ioutil.NopCloser(bytes.NewReader(bodyBytes)) + resp.Response.ContentLength = int64(len(bodyBytes)) + + // Serialize and re-read the response + var respBytes bytes.Buffer + err = resp.Response.Write(&respBytes) + if err != nil { + return fmt.Errorf("failed to serialize the updated response: %v", err) + } + + updatedResponse, err := http.ReadResponse(bufio.NewReader(bytes.NewReader(respBytes.Bytes())), nil) + if err != nil { + return fmt.Errorf("failed to deserialize the updated response: %v", err) + } + + resp.Response = &api.Response{ + Response: updatedResponse, + } + resp.ResponseBody = bodyBytes + + return nil +} + +func copyHeader(dst, src http.Header) { + for k, vv := range src { + for _, v := range vv { + dst.Add(k, v) + } + } +} diff --git a/command/agentproxyshared/cache/keymanager/manager.go b/command/agentproxyshared/cache/keymanager/manager.go new file mode 100644 index 0000000..0cecc03 --- /dev/null +++ b/command/agentproxyshared/cache/keymanager/manager.go @@ -0,0 +1,23 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package keymanager + +import ( + "context" + + wrapping "github.com/hashicorp/go-kms-wrapping/v2" +) + +const ( + KeyID = "root" +) + +type KeyManager interface { + // Returns a wrapping.Wrapper which can be used to perform key-related operations. + Wrapper() wrapping.Wrapper + // RetrievalToken is the material returned which can be used to source back the + // encryption key. Depending on the implementation, the token can be the + // encryption key itself or a token/identifier used to exchange the token. + RetrievalToken(ctx context.Context) ([]byte, error) +} diff --git a/command/agentproxyshared/cache/keymanager/passthrough.go b/command/agentproxyshared/cache/keymanager/passthrough.go new file mode 100644 index 0000000..cda6b6e --- /dev/null +++ b/command/agentproxyshared/cache/keymanager/passthrough.go @@ -0,0 +1,71 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package keymanager + +import ( + "context" + "crypto/rand" + "fmt" + + wrapping "github.com/hashicorp/go-kms-wrapping/v2" + "github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2" +) + +var _ KeyManager = (*PassthroughKeyManager)(nil) + +type PassthroughKeyManager struct { + wrapper *aead.Wrapper +} + +// NewPassthroughKeyManager returns a new instance of the Kube encryption key. +// If a key is provided, it will be used as the encryption key for the wrapper, +// otherwise one will be generated. +func NewPassthroughKeyManager(ctx context.Context, key []byte) (*PassthroughKeyManager, error) { + var rootKey []byte = nil + switch len(key) { + case 0: + newKey := make([]byte, 32) + _, err := rand.Read(newKey) + if err != nil { + return nil, err + } + rootKey = newKey + case 32: + rootKey = key + default: + return nil, fmt.Errorf("invalid key size, should be 32, got %d", len(key)) + } + + wrapper := aead.NewWrapper() + + if _, err := wrapper.SetConfig(ctx, wrapping.WithConfigMap(map[string]string{"key_id": KeyID})); err != nil { + return nil, err + } + + if err := wrapper.SetAesGcmKeyBytes(rootKey); err != nil { + return nil, err + } + + k := &PassthroughKeyManager{ + wrapper: wrapper, + } + + return k, nil +} + +// Wrapper returns the manager's wrapper for key operations. +func (w *PassthroughKeyManager) Wrapper() wrapping.Wrapper { + return w.wrapper +} + +// RetrievalToken returns the key that was used on the wrapper since this key +// manager is simply a passthrough and does not provide a mechanism to abstract +// this key. +func (w *PassthroughKeyManager) RetrievalToken(ctx context.Context) ([]byte, error) { + if w.wrapper == nil { + return nil, fmt.Errorf("unable to get wrapper for token retrieval") + } + + return w.wrapper.KeyBytes(ctx) +} diff --git a/command/agentproxyshared/cache/keymanager/passthrough_test.go b/command/agentproxyshared/cache/keymanager/passthrough_test.go new file mode 100644 index 0000000..9327ee3 --- /dev/null +++ b/command/agentproxyshared/cache/keymanager/passthrough_test.go @@ -0,0 +1,61 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package keymanager + +import ( + "bytes" + "context" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestKeyManager_PassthrougKeyManager(t *testing.T) { + tests := []struct { + name string + key []byte + wantErr bool + }{ + { + "new key", + nil, + false, + }, + { + "existing valid key", + []byte("e679e2f3d8d0e489d408bc617c6890d6"), + false, + }, + { + "invalid key length", + []byte("foobar"), + true, + }, + } + + ctx := context.Background() + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + m, err := NewPassthroughKeyManager(ctx, tc.key) + if tc.wantErr { + require.Error(t, err) + return + } + require.NoError(t, err) + + if w := m.Wrapper(); w == nil { + t.Fatalf("expected non-nil wrapper from the key manager") + } + + token, err := m.RetrievalToken(ctx) + if err != nil { + t.Fatalf("unable to retrieve token: %s", err) + } + + if len(tc.key) != 0 && !bytes.Equal(tc.key, token) { + t.Fatalf("expected key bytes: %x, got: %x", tc.key, token) + } + }) + } +} diff --git a/command/agentproxyshared/cache/lease_cache.go b/command/agentproxyshared/cache/lease_cache.go new file mode 100644 index 0000000..3bcb580 --- /dev/null +++ b/command/agentproxyshared/cache/lease_cache.go @@ -0,0 +1,1329 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cache + +import ( + "bufio" + "bytes" + "context" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + "sync" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-secure-stdlib/base62" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/cache/cacheboltdb" + "github.com/hashicorp/vault/command/agentproxyshared/cache/cachememdb" + "github.com/hashicorp/vault/helper/namespace" + nshelper "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/helper/useragent" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/cryptoutil" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/locksutil" + "github.com/hashicorp/vault/sdk/logical" + gocache "github.com/patrickmn/go-cache" + "go.uber.org/atomic" +) + +const ( + vaultPathTokenCreate = "/v1/auth/token/create" + vaultPathTokenRevoke = "/v1/auth/token/revoke" + vaultPathTokenRevokeSelf = "/v1/auth/token/revoke-self" + vaultPathTokenRevokeAccessor = "/v1/auth/token/revoke-accessor" + vaultPathTokenRevokeOrphan = "/v1/auth/token/revoke-orphan" + vaultPathTokenLookup = "/v1/auth/token/lookup" + vaultPathTokenLookupSelf = "/v1/auth/token/lookup-self" + vaultPathTokenRenew = "/v1/auth/token/renew" + vaultPathTokenRenewSelf = "/v1/auth/token/renew-self" + vaultPathLeaseRevoke = "/v1/sys/leases/revoke" + vaultPathLeaseRevokeForce = "/v1/sys/leases/revoke-force" + vaultPathLeaseRevokePrefix = "/v1/sys/leases/revoke-prefix" +) + +var ( + contextIndexID = contextIndex{} + errInvalidType = errors.New("invalid type provided") + revocationPaths = []string{ + strings.TrimPrefix(vaultPathTokenRevoke, "/v1"), + strings.TrimPrefix(vaultPathTokenRevokeSelf, "/v1"), + strings.TrimPrefix(vaultPathTokenRevokeAccessor, "/v1"), + strings.TrimPrefix(vaultPathTokenRevokeOrphan, "/v1"), + strings.TrimPrefix(vaultPathLeaseRevoke, "/v1"), + strings.TrimPrefix(vaultPathLeaseRevokeForce, "/v1"), + strings.TrimPrefix(vaultPathLeaseRevokePrefix, "/v1"), + } +) + +type contextIndex struct{} + +type cacheClearRequest struct { + Type string `json:"type"` + Value string `json:"value"` + Namespace string `json:"namespace"` +} + +// LeaseCache is an implementation of Proxier that handles +// the caching of responses. It passes the incoming request +// to an underlying Proxier implementation. +type LeaseCache struct { + client *api.Client + proxier Proxier + logger hclog.Logger + db *cachememdb.CacheMemDB + baseCtxInfo *cachememdb.ContextInfo + l *sync.RWMutex + + // idLocks is used during cache lookup to ensure that identical requests made + // in parallel won't trigger multiple renewal goroutines. + idLocks []*locksutil.LockEntry + + // inflightCache keeps track of inflight requests + inflightCache *gocache.Cache + + // ps is the persistent storage for tokens and leases + ps *cacheboltdb.BoltStorage + + // shuttingDown is used to determine if cache needs to be evicted or not + // when the context is cancelled + shuttingDown atomic.Bool +} + +// LeaseCacheConfig is the configuration for initializing a new +// Lease. +type LeaseCacheConfig struct { + Client *api.Client + BaseContext context.Context + Proxier Proxier + Logger hclog.Logger + Storage *cacheboltdb.BoltStorage +} + +type inflightRequest struct { + // ch is closed by the request that ends up processing the set of + // parallel request + ch chan struct{} + + // remaining is the number of remaining inflight request that needs to + // be processed before this object can be cleaned up + remaining *atomic.Uint64 +} + +func newInflightRequest() *inflightRequest { + return &inflightRequest{ + ch: make(chan struct{}), + remaining: atomic.NewUint64(0), + } +} + +// NewLeaseCache creates a new instance of a LeaseCache. +func NewLeaseCache(conf *LeaseCacheConfig) (*LeaseCache, error) { + if conf == nil { + return nil, errors.New("nil configuration provided") + } + + if conf.Proxier == nil || conf.Logger == nil { + return nil, fmt.Errorf("missing configuration required params: %v", conf) + } + + if conf.Client == nil { + return nil, fmt.Errorf("nil API client") + } + + db, err := cachememdb.New() + if err != nil { + return nil, err + } + + // Create a base context for the lease cache layer + baseCtxInfo := cachememdb.NewContextInfo(conf.BaseContext) + + return &LeaseCache{ + client: conf.Client, + proxier: conf.Proxier, + logger: conf.Logger, + db: db, + baseCtxInfo: baseCtxInfo, + l: &sync.RWMutex{}, + idLocks: locksutil.CreateLocks(), + inflightCache: gocache.New(gocache.NoExpiration, gocache.NoExpiration), + ps: conf.Storage, + }, nil +} + +// SetShuttingDown is a setter for the shuttingDown field +func (c *LeaseCache) SetShuttingDown(in bool) { + c.shuttingDown.Store(in) +} + +// SetPersistentStorage is a setter for the persistent storage field in +// LeaseCache +func (c *LeaseCache) SetPersistentStorage(storageIn *cacheboltdb.BoltStorage) { + c.ps = storageIn +} + +// PersistentStorage is a getter for the persistent storage field in +// LeaseCache +func (c *LeaseCache) PersistentStorage() *cacheboltdb.BoltStorage { + return c.ps +} + +// checkCacheForRequest checks the cache for a particular request based on its +// computed ID. It returns a non-nil *SendResponse if an entry is found. +func (c *LeaseCache) checkCacheForRequest(id string) (*SendResponse, error) { + index, err := c.db.Get(cachememdb.IndexNameID, id) + if err != nil { + return nil, err + } + + if index == nil { + return nil, nil + } + + // Cached request is found, deserialize the response + reader := bufio.NewReader(bytes.NewReader(index.Response)) + resp, err := http.ReadResponse(reader, nil) + if err != nil { + c.logger.Error("failed to deserialize response", "error", err) + return nil, err + } + + sendResp, err := NewSendResponse(&api.Response{Response: resp}, index.Response) + if err != nil { + c.logger.Error("failed to create new send response", "error", err) + return nil, err + } + sendResp.CacheMeta.Hit = true + + respTime, err := http.ParseTime(resp.Header.Get("Date")) + if err != nil { + c.logger.Error("failed to parse cached response date", "error", err) + return nil, err + } + sendResp.CacheMeta.Age = time.Now().Sub(respTime) + + return sendResp, nil +} + +// Send performs a cache lookup on the incoming request. If it's a cache hit, +// it will return the cached response, otherwise it will delegate to the +// underlying Proxier and cache the received response. +func (c *LeaseCache) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) { + // Compute the index ID + id, err := computeIndexID(req) + if err != nil { + c.logger.Error("failed to compute cache key", "error", err) + return nil, err + } + + // Check the inflight cache to see if there are other inflight requests + // of the same kind, based on the computed ID. If so, we increment a counter + + var inflight *inflightRequest + + defer func() { + // Cleanup on the cache if there are no remaining inflight requests. + // This is the last step, so we defer the call first + if inflight != nil && inflight.remaining.Load() == 0 { + c.inflightCache.Delete(id) + } + }() + + idLock := locksutil.LockForKey(c.idLocks, id) + + // Briefly grab an ID-based lock in here to emulate a load-or-store behavior + // and prevent concurrent cacheable requests from being proxied twice if + // they both miss the cache due to it being clean when peeking the cache + // entry. + idLock.Lock() + inflightRaw, found := c.inflightCache.Get(id) + if found { + idLock.Unlock() + inflight = inflightRaw.(*inflightRequest) + inflight.remaining.Inc() + defer inflight.remaining.Dec() + + // If found it means that there's an inflight request being processed. + // We wait until that's finished before proceeding further. + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-inflight.ch: + } + } else { + inflight = newInflightRequest() + inflight.remaining.Inc() + defer inflight.remaining.Dec() + + c.inflightCache.Set(id, inflight, gocache.NoExpiration) + idLock.Unlock() + + // Signal that the processing request is done + defer close(inflight.ch) + } + + // Check if the response for this request is already in the cache + cachedResp, err := c.checkCacheForRequest(id) + if err != nil { + return nil, err + } + if cachedResp != nil { + c.logger.Debug("returning cached response", "path", req.Request.URL.Path) + return cachedResp, nil + } + + c.logger.Debug("forwarding request from cache", "method", req.Request.Method, "path", req.Request.URL.Path) + + // Pass the request down and get a response + resp, err := c.proxier.Send(ctx, req) + if err != nil { + return resp, err + } + + // If this is a non-2xx or if the returned response does not contain JSON payload, + // we skip caching + if resp.Response.StatusCode >= 300 || resp.Response.Header.Get("Content-Type") != "application/json" { + return resp, err + } + + // Get the namespace from the request header + namespace := req.Request.Header.Get(consts.NamespaceHeaderName) + // We need to populate an empty value since go-memdb will skip over indexes + // that contain empty values. + if namespace == "" { + namespace = "root/" + } + + // Build the index to cache based on the response received + index := &cachememdb.Index{ + ID: id, + Namespace: namespace, + RequestPath: req.Request.URL.Path, + LastRenewed: time.Now().UTC(), + } + + secret, err := api.ParseSecret(bytes.NewReader(resp.ResponseBody)) + if err != nil { + c.logger.Error("failed to parse response as secret", "error", err) + return nil, err + } + + isRevocation, err := c.handleRevocationRequest(ctx, req, resp) + if err != nil { + c.logger.Error("failed to process the response", "error", err) + return nil, err + } + + // If this is a revocation request, do not go through cache logic. + if isRevocation { + return resp, nil + } + + // Fast path for responses with no secrets + if secret == nil { + c.logger.Debug("pass-through response; no secret in response", "method", req.Request.Method, "path", req.Request.URL.Path) + return resp, nil + } + + // Short-circuit if the secret is not renewable + tokenRenewable, err := secret.TokenIsRenewable() + if err != nil { + c.logger.Error("failed to parse renewable param", "error", err) + return nil, err + } + if !secret.Renewable && !tokenRenewable { + c.logger.Debug("pass-through response; secret not renewable", "method", req.Request.Method, "path", req.Request.URL.Path) + return resp, nil + } + + var renewCtxInfo *cachememdb.ContextInfo + switch { + case secret.LeaseID != "": + c.logger.Debug("processing lease response", "method", req.Request.Method, "path", req.Request.URL.Path) + entry, err := c.db.Get(cachememdb.IndexNameToken, req.Token) + if err != nil { + return nil, err + } + // If the lease belongs to a token that is not managed by the agent, + // return the response without caching it. + if entry == nil { + c.logger.Debug("pass-through lease response; token not managed by agent", "method", req.Request.Method, "path", req.Request.URL.Path) + return resp, nil + } + + // Derive a context for renewal using the token's context + renewCtxInfo = cachememdb.NewContextInfo(entry.RenewCtxInfo.Ctx) + + index.Lease = secret.LeaseID + index.LeaseToken = req.Token + + index.Type = cacheboltdb.LeaseType + + case secret.Auth != nil: + c.logger.Debug("processing auth response", "method", req.Request.Method, "path", req.Request.URL.Path) + + // Check if this token creation request resulted in a non-orphan token, and if so + // correctly set the parentCtx to the request's token context. + var parentCtx context.Context + if !secret.Auth.Orphan { + entry, err := c.db.Get(cachememdb.IndexNameToken, req.Token) + if err != nil { + return nil, err + } + // If parent token is not managed by the agent, child shouldn't be + // either. + if entry == nil { + c.logger.Debug("pass-through auth response; parent token not managed by agent", "method", req.Request.Method, "path", req.Request.URL.Path) + return resp, nil + } + + c.logger.Debug("setting parent context", "method", req.Request.Method, "path", req.Request.URL.Path) + parentCtx = entry.RenewCtxInfo.Ctx + + index.TokenParent = req.Token + } + + renewCtxInfo = c.createCtxInfo(parentCtx) + index.Token = secret.Auth.ClientToken + index.TokenAccessor = secret.Auth.Accessor + + index.Type = cacheboltdb.LeaseType + + default: + // We shouldn't be hitting this, but will err on the side of caution and + // simply proxy. + c.logger.Debug("pass-through response; secret without lease and token", "method", req.Request.Method, "path", req.Request.URL.Path) + return resp, nil + } + + // Serialize the response to store it in the cached index + var respBytes bytes.Buffer + err = resp.Response.Write(&respBytes) + if err != nil { + c.logger.Error("failed to serialize response", "error", err) + return nil, err + } + + // Reset the response body for upper layers to read + if resp.Response.Body != nil { + resp.Response.Body.Close() + } + resp.Response.Body = ioutil.NopCloser(bytes.NewReader(resp.ResponseBody)) + + // Set the index's Response + index.Response = respBytes.Bytes() + + // Store the index ID in the lifetimewatcher context + renewCtx := context.WithValue(renewCtxInfo.Ctx, contextIndexID, index.ID) + + // Store the lifetime watcher context in the index + index.RenewCtxInfo = &cachememdb.ContextInfo{ + Ctx: renewCtx, + CancelFunc: renewCtxInfo.CancelFunc, + DoneCh: renewCtxInfo.DoneCh, + } + + // Add extra information necessary for restoring from persisted cache + index.RequestMethod = req.Request.Method + index.RequestToken = req.Token + index.RequestHeader = req.Request.Header + + // Store the index in the cache + c.logger.Debug("storing response into the cache", "method", req.Request.Method, "path", req.Request.URL.Path) + err = c.Set(ctx, index) + if err != nil { + c.logger.Error("failed to cache the proxied response", "error", err) + return nil, err + } + + // Start renewing the secret in the response + go c.startRenewing(renewCtx, index, req, secret) + + return resp, nil +} + +func (c *LeaseCache) createCtxInfo(ctx context.Context) *cachememdb.ContextInfo { + if ctx == nil { + c.l.RLock() + ctx = c.baseCtxInfo.Ctx + c.l.RUnlock() + } + return cachememdb.NewContextInfo(ctx) +} + +func (c *LeaseCache) startRenewing(ctx context.Context, index *cachememdb.Index, req *SendRequest, secret *api.Secret) { + defer func() { + id := ctx.Value(contextIndexID).(string) + if c.shuttingDown.Load() { + c.logger.Trace("not evicting index from cache during shutdown", "id", id, "method", req.Request.Method, "path", req.Request.URL.Path) + return + } + c.logger.Debug("evicting index from cache", "id", id, "method", req.Request.Method, "path", req.Request.URL.Path) + err := c.Evict(index) + if err != nil { + c.logger.Error("failed to evict index", "id", id, "error", err) + return + } + }() + + client, err := c.client.Clone() + if err != nil { + c.logger.Error("failed to create API client in the lifetime watcher", "error", err) + return + } + client.SetToken(req.Token) + + headers := client.Headers() + if headers == nil { + headers = make(http.Header) + } + + // We do not preserve the initial User-Agent here (i.e. use + // AgentProxyStringWithProxiedUserAgent) since these requests are from + // the proxy subsystem, but are made by Agent's lifetime watcher, + // not triggered by a specific request. + headers.Set("User-Agent", useragent.AgentProxyString()) + client.SetHeaders(headers) + + watcher, err := client.NewLifetimeWatcher(&api.LifetimeWatcherInput{ + Secret: secret, + }) + if err != nil { + c.logger.Error("failed to create secret lifetime watcher", "error", err) + return + } + + c.logger.Debug("initiating renewal", "method", req.Request.Method, "path", req.Request.URL.Path) + go watcher.Start() + defer watcher.Stop() + + for { + select { + case <-ctx.Done(): + // This is the case which captures context cancellations from token + // and leases. Since all the contexts are derived from the agent's + // context, this will also cover the shutdown scenario. + c.logger.Debug("context cancelled; stopping lifetime watcher", "path", req.Request.URL.Path) + return + case err := <-watcher.DoneCh(): + // This case covers renewal completion and renewal errors + if err != nil { + c.logger.Error("failed to renew secret", "error", err) + return + } + c.logger.Debug("renewal halted; evicting from cache", "path", req.Request.URL.Path) + return + case <-watcher.RenewCh(): + c.logger.Debug("secret renewed", "path", req.Request.URL.Path) + if c.ps != nil { + if err := c.updateLastRenewed(ctx, index, time.Now().UTC()); err != nil { + c.logger.Warn("not able to update lastRenewed time for cached index", "id", index.ID) + } + } + case <-index.RenewCtxInfo.DoneCh: + // This case indicates the renewal process to shutdown and evict + // the cache entry. This is triggered when a specific secret + // renewal needs to be killed without affecting any of the derived + // context renewals. + c.logger.Debug("done channel closed") + return + } + } +} + +func (c *LeaseCache) updateLastRenewed(ctx context.Context, index *cachememdb.Index, t time.Time) error { + idLock := locksutil.LockForKey(c.idLocks, index.ID) + idLock.Lock() + defer idLock.Unlock() + + getIndex, err := c.db.Get(cachememdb.IndexNameID, index.ID) + if err != nil { + return err + } + index.LastRenewed = t + if err := c.Set(ctx, getIndex); err != nil { + return err + } + return nil +} + +// computeIndexID results in a value that uniquely identifies a request +// received by the agent. It does so by SHA256 hashing the serialized request +// object containing the request path, query parameters and body parameters. +func computeIndexID(req *SendRequest) (string, error) { + var b bytes.Buffer + + cloned := req.Request.Clone(context.Background()) + cloned.Header.Del(vaulthttp.VaultIndexHeaderName) + cloned.Header.Del(vaulthttp.VaultForwardHeaderName) + cloned.Header.Del(vaulthttp.VaultInconsistentHeaderName) + // Serialize the request + if err := cloned.Write(&b); err != nil { + return "", fmt.Errorf("failed to serialize request: %v", err) + } + + // Reset the request body after it has been closed by Write + req.Request.Body = ioutil.NopCloser(bytes.NewReader(req.RequestBody)) + + // Append req.Token into the byte slice. This is needed since auto-auth'ed + // requests sets the token directly into SendRequest.Token + if _, err := b.Write([]byte(req.Token)); err != nil { + return "", fmt.Errorf("failed to write token to hash input: %w", err) + } + + return hex.EncodeToString(cryptoutil.Blake2b256Hash(string(b.Bytes()))), nil +} + +// HandleCacheClear returns a handlerFunc that can perform cache clearing operations. +func (c *LeaseCache) HandleCacheClear(ctx context.Context) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // If the cache is not enabled, return a 200 + if c == nil { + return + } + + // Only handle POST/PUT requests + switch r.Method { + case http.MethodPost: + case http.MethodPut: + default: + return + } + + req := new(cacheClearRequest) + if err := jsonutil.DecodeJSONFromReader(r.Body, req); err != nil { + if err == io.EOF { + err = errors.New("empty JSON provided") + } + logical.RespondError(w, http.StatusBadRequest, fmt.Errorf("failed to parse JSON input: %w", err)) + return + } + + c.logger.Debug("received cache-clear request", "type", req.Type, "namespace", req.Namespace, "value", req.Value) + + in, err := parseCacheClearInput(req) + if err != nil { + c.logger.Error("unable to parse clear input", "error", err) + logical.RespondError(w, http.StatusBadRequest, fmt.Errorf("failed to parse clear input: %w", err)) + return + } + + if err := c.handleCacheClear(ctx, in); err != nil { + // Default to 500 on error, unless the user provided an invalid type, + // which would then be a 400. + httpStatus := http.StatusInternalServerError + if err == errInvalidType { + httpStatus = http.StatusBadRequest + } + logical.RespondError(w, httpStatus, fmt.Errorf("failed to clear cache: %w", err)) + return + } + + return + }) +} + +func (c *LeaseCache) handleCacheClear(ctx context.Context, in *cacheClearInput) error { + if in == nil { + return errors.New("no value(s) provided to clear corresponding cache entries") + } + + switch in.Type { + case "request_path": + // For this particular case, we need to ensure that there are 2 provided + // indexers for the proper lookup. + if in.RequestPath == "" { + return errors.New("request path not provided") + } + + // The first value provided for this case will be the namespace, but if it's + // an empty value we need to overwrite it with "root/" to ensure proper + // cache lookup. + if in.Namespace == "" { + in.Namespace = "root/" + } + + // Find all the cached entries which has the given request path and + // cancel the contexts of all the respective lifetime watchers + indexes, err := c.db.GetByPrefix(cachememdb.IndexNameRequestPath, in.Namespace, in.RequestPath) + if err != nil { + return err + } + for _, index := range indexes { + index.RenewCtxInfo.CancelFunc() + } + + case "token": + if in.Token == "" { + return errors.New("token not provided") + } + + // Get the context for the given token and cancel its context + index, err := c.db.Get(cachememdb.IndexNameToken, in.Token) + if err != nil { + return err + } + if index == nil { + return nil + } + + c.logger.Debug("canceling context of index attached to token") + + index.RenewCtxInfo.CancelFunc() + + case "token_accessor": + if in.TokenAccessor == "" { + return errors.New("token accessor not provided") + } + + // Get the cached index and cancel the corresponding lifetime watcher + // context + index, err := c.db.Get(cachememdb.IndexNameTokenAccessor, in.TokenAccessor) + if err != nil { + return err + } + if index == nil { + return nil + } + + c.logger.Debug("canceling context of index attached to accessor") + + index.RenewCtxInfo.CancelFunc() + + case "lease": + if in.Lease == "" { + return errors.New("lease not provided") + } + + // Get the cached index and cancel the corresponding lifetime watcher + // context + index, err := c.db.Get(cachememdb.IndexNameLease, in.Lease) + if err != nil { + return err + } + if index == nil { + return nil + } + + c.logger.Debug("canceling context of index attached to accessor") + + index.RenewCtxInfo.CancelFunc() + + case "all": + // Cancel the base context which triggers all the goroutines to + // stop and evict entries from cache. + c.logger.Debug("canceling base context") + c.l.Lock() + c.baseCtxInfo.CancelFunc() + // Reset the base context + baseCtx, baseCancel := context.WithCancel(ctx) + c.baseCtxInfo = &cachememdb.ContextInfo{ + Ctx: baseCtx, + CancelFunc: baseCancel, + } + c.l.Unlock() + + // Reset the memdb instance (and persistent storage if enabled) + if err := c.Flush(); err != nil { + return err + } + + default: + return errInvalidType + } + + c.logger.Debug("successfully cleared matching cache entries") + + return nil +} + +// handleRevocationRequest checks whether the originating request is a +// revocation request, and if so perform applicable cache cleanups. +// Returns true is this is a revocation request. +func (c *LeaseCache) handleRevocationRequest(ctx context.Context, req *SendRequest, resp *SendResponse) (bool, error) { + // Lease and token revocations return 204's on success. Fast-path if that's + // not the case. + if resp.Response.StatusCode != http.StatusNoContent { + return false, nil + } + + _, path := deriveNamespaceAndRevocationPath(req) + + switch { + case path == vaultPathTokenRevoke: + // Get the token from the request body + jsonBody := map[string]interface{}{} + if err := json.Unmarshal(req.RequestBody, &jsonBody); err != nil { + return false, err + } + tokenRaw, ok := jsonBody["token"] + if !ok { + return false, fmt.Errorf("failed to get token from request body") + } + token, ok := tokenRaw.(string) + if !ok { + return false, fmt.Errorf("expected token in the request body to be string") + } + + // Clear the cache entry associated with the token and all the other + // entries belonging to the leases derived from this token. + in := &cacheClearInput{ + Type: "token", + Token: token, + } + if err := c.handleCacheClear(ctx, in); err != nil { + return false, err + } + + case path == vaultPathTokenRevokeSelf: + // Clear the cache entry associated with the token and all the other + // entries belonging to the leases derived from this token. + in := &cacheClearInput{ + Type: "token", + Token: req.Token, + } + if err := c.handleCacheClear(ctx, in); err != nil { + return false, err + } + + case path == vaultPathTokenRevokeAccessor: + jsonBody := map[string]interface{}{} + if err := json.Unmarshal(req.RequestBody, &jsonBody); err != nil { + return false, err + } + accessorRaw, ok := jsonBody["accessor"] + if !ok { + return false, fmt.Errorf("failed to get accessor from request body") + } + accessor, ok := accessorRaw.(string) + if !ok { + return false, fmt.Errorf("expected accessor in the request body to be string") + } + + in := &cacheClearInput{ + Type: "token_accessor", + TokenAccessor: accessor, + } + if err := c.handleCacheClear(ctx, in); err != nil { + return false, err + } + + case path == vaultPathTokenRevokeOrphan: + jsonBody := map[string]interface{}{} + if err := json.Unmarshal(req.RequestBody, &jsonBody); err != nil { + return false, err + } + tokenRaw, ok := jsonBody["token"] + if !ok { + return false, fmt.Errorf("failed to get token from request body") + } + token, ok := tokenRaw.(string) + if !ok { + return false, fmt.Errorf("expected token in the request body to be string") + } + + // Kill the lifetime watchers of all the leases attached to the revoked + // token + indexes, err := c.db.GetByPrefix(cachememdb.IndexNameLeaseToken, token) + if err != nil { + return false, err + } + for _, index := range indexes { + index.RenewCtxInfo.CancelFunc() + } + + // Kill the lifetime watchers of the revoked token + index, err := c.db.Get(cachememdb.IndexNameToken, token) + if err != nil { + return false, err + } + if index == nil { + return true, nil + } + + // Indicate the lifetime watcher goroutine for this index to return. + // This will not affect the child tokens because the context is not + // getting cancelled. + close(index.RenewCtxInfo.DoneCh) + + // Clear the parent references of the revoked token in the entries + // belonging to the child tokens of the revoked token. + indexes, err = c.db.GetByPrefix(cachememdb.IndexNameTokenParent, token) + if err != nil { + return false, err + } + for _, index := range indexes { + index.TokenParent = "" + err = c.db.Set(index) + if err != nil { + c.logger.Error("failed to persist index", "error", err) + return false, err + } + } + + case path == vaultPathLeaseRevoke: + // TODO: Should lease present in the URL itself be considered here? + // Get the lease from the request body + jsonBody := map[string]interface{}{} + if err := json.Unmarshal(req.RequestBody, &jsonBody); err != nil { + return false, err + } + leaseIDRaw, ok := jsonBody["lease_id"] + if !ok { + return false, fmt.Errorf("failed to get lease_id from request body") + } + leaseID, ok := leaseIDRaw.(string) + if !ok { + return false, fmt.Errorf("expected lease_id the request body to be string") + } + in := &cacheClearInput{ + Type: "lease", + Lease: leaseID, + } + if err := c.handleCacheClear(ctx, in); err != nil { + return false, err + } + + case strings.HasPrefix(path, vaultPathLeaseRevokeForce): + // Trim the URL path to get the request path prefix + prefix := strings.TrimPrefix(path, vaultPathLeaseRevokeForce) + // Get all the cache indexes that use the request path containing the + // prefix and cancel the lifetime watcher context of each. + indexes, err := c.db.GetByPrefix(cachememdb.IndexNameLease, prefix) + if err != nil { + return false, err + } + + _, tokenNSID := namespace.SplitIDFromString(req.Token) + for _, index := range indexes { + _, leaseNSID := namespace.SplitIDFromString(index.Lease) + // Only evict leases that match the token's namespace + if tokenNSID == leaseNSID { + index.RenewCtxInfo.CancelFunc() + } + } + + case strings.HasPrefix(path, vaultPathLeaseRevokePrefix): + // Trim the URL path to get the request path prefix + prefix := strings.TrimPrefix(path, vaultPathLeaseRevokePrefix) + // Get all the cache indexes that use the request path containing the + // prefix and cancel the lifetime watcher context of each. + indexes, err := c.db.GetByPrefix(cachememdb.IndexNameLease, prefix) + if err != nil { + return false, err + } + + _, tokenNSID := namespace.SplitIDFromString(req.Token) + for _, index := range indexes { + _, leaseNSID := namespace.SplitIDFromString(index.Lease) + // Only evict leases that match the token's namespace + if tokenNSID == leaseNSID { + index.RenewCtxInfo.CancelFunc() + } + } + + default: + return false, nil + } + + c.logger.Debug("triggered caching eviction from revocation request") + + return true, nil +} + +// Set stores the index in the cachememdb, and also stores it in the persistent +// cache (if enabled) +func (c *LeaseCache) Set(ctx context.Context, index *cachememdb.Index) error { + if err := c.db.Set(index); err != nil { + return err + } + + if c.ps != nil { + plaintext, err := index.Serialize() + if err != nil { + return err + } + + if err := c.ps.Set(ctx, index.ID, plaintext, index.Type); err != nil { + return err + } + c.logger.Trace("set entry in persistent storage", "type", index.Type, "path", index.RequestPath, "id", index.ID) + } + + return nil +} + +// Evict removes an Index from the cachememdb, and also removes it from the +// persistent cache (if enabled) +func (c *LeaseCache) Evict(index *cachememdb.Index) error { + if err := c.db.Evict(cachememdb.IndexNameID, index.ID); err != nil { + return err + } + + if c.ps != nil { + if err := c.ps.Delete(index.ID, index.Type); err != nil { + return err + } + c.logger.Trace("deleted item from persistent storage", "id", index.ID) + } + + return nil +} + +// Flush the cachememdb and persistent cache (if enabled) +func (c *LeaseCache) Flush() error { + if err := c.db.Flush(); err != nil { + return err + } + + if c.ps != nil { + c.logger.Trace("clearing persistent storage") + return c.ps.Clear() + } + + return nil +} + +// Restore loads the cachememdb from the persistent storage passed in. Loads +// tokens first, since restoring a lease's renewal context and watcher requires +// looking up the token in the cachememdb. +func (c *LeaseCache) Restore(ctx context.Context, storage *cacheboltdb.BoltStorage) error { + var errs *multierror.Error + + // Process tokens first + tokens, err := storage.GetByType(ctx, cacheboltdb.TokenType) + if err != nil { + errs = multierror.Append(errs, err) + } else { + if err := c.restoreTokens(tokens); err != nil { + errs = multierror.Append(errs, err) + } + } + + // Then process leases + leases, err := storage.GetByType(ctx, cacheboltdb.LeaseType) + if err != nil { + errs = multierror.Append(errs, err) + } else { + for _, lease := range leases { + newIndex, err := cachememdb.Deserialize(lease) + if err != nil { + errs = multierror.Append(errs, err) + continue + } + + c.logger.Trace("restoring lease", "id", newIndex.ID, "path", newIndex.RequestPath) + + // Check if this lease has already expired + expired, err := c.hasExpired(time.Now().UTC(), newIndex) + if err != nil { + c.logger.Warn("failed to check if lease is expired", "id", newIndex.ID, "error", err) + } + if expired { + continue + } + + if err := c.restoreLeaseRenewCtx(newIndex); err != nil { + errs = multierror.Append(errs, err) + continue + } + if err := c.db.Set(newIndex); err != nil { + errs = multierror.Append(errs, err) + continue + } + c.logger.Trace("restored lease", "id", newIndex.ID, "path", newIndex.RequestPath) + } + } + + return errs.ErrorOrNil() +} + +func (c *LeaseCache) restoreTokens(tokens [][]byte) error { + var errors *multierror.Error + + for _, token := range tokens { + newIndex, err := cachememdb.Deserialize(token) + if err != nil { + errors = multierror.Append(errors, err) + continue + } + newIndex.RenewCtxInfo = c.createCtxInfo(nil) + if err := c.db.Set(newIndex); err != nil { + errors = multierror.Append(errors, err) + continue + } + c.logger.Trace("restored token", "id", newIndex.ID) + } + + return errors.ErrorOrNil() +} + +// restoreLeaseRenewCtx re-creates a RenewCtx for an index object and starts +// the watcher go routine +func (c *LeaseCache) restoreLeaseRenewCtx(index *cachememdb.Index) error { + if index.Response == nil { + return fmt.Errorf("cached response was nil for %s", index.ID) + } + + // Parse the secret to determine which type it is + reader := bufio.NewReader(bytes.NewReader(index.Response)) + resp, err := http.ReadResponse(reader, nil) + if err != nil { + c.logger.Error("failed to deserialize response", "error", err) + return err + } + secret, err := api.ParseSecret(resp.Body) + if err != nil { + c.logger.Error("failed to parse response as secret", "error", err) + return err + } + + var renewCtxInfo *cachememdb.ContextInfo + switch { + case secret.LeaseID != "": + entry, err := c.db.Get(cachememdb.IndexNameToken, index.RequestToken) + if err != nil { + return err + } + + if entry == nil { + return fmt.Errorf("could not find parent Token %s for req path %s", index.RequestToken, index.RequestPath) + } + + // Derive a context for renewal using the token's context + renewCtxInfo = cachememdb.NewContextInfo(entry.RenewCtxInfo.Ctx) + + case secret.Auth != nil: + var parentCtx context.Context + if !secret.Auth.Orphan { + entry, err := c.db.Get(cachememdb.IndexNameToken, index.RequestToken) + if err != nil { + return err + } + // If parent token is not managed by the agent, child shouldn't be + // either. + if entry == nil { + return fmt.Errorf("could not find parent Token %s for req path %s", index.RequestToken, index.RequestPath) + } + + c.logger.Debug("setting parent context", "method", index.RequestMethod, "path", index.RequestPath) + parentCtx = entry.RenewCtxInfo.Ctx + } + renewCtxInfo = c.createCtxInfo(parentCtx) + default: + return fmt.Errorf("unknown cached index item: %s", index.ID) + } + + renewCtx := context.WithValue(renewCtxInfo.Ctx, contextIndexID, index.ID) + index.RenewCtxInfo = &cachememdb.ContextInfo{ + Ctx: renewCtx, + CancelFunc: renewCtxInfo.CancelFunc, + DoneCh: renewCtxInfo.DoneCh, + } + + sendReq := &SendRequest{ + Token: index.RequestToken, + Request: &http.Request{ + Header: index.RequestHeader, + Method: index.RequestMethod, + URL: &url.URL{ + Path: index.RequestPath, + }, + }, + } + go c.startRenewing(renewCtx, index, sendReq, secret) + + return nil +} + +// deriveNamespaceAndRevocationPath returns the namespace and relative path for +// revocation paths. +// +// If the path contains a namespace, but it's not a revocation path, it will be +// returned as-is, since there's no way to tell where the namespace ends and +// where the request path begins purely based off a string. +// +// Case 1: /v1/ns1/leases/revoke -> ns1/, /v1/leases/revoke +// Case 2: ns1/ /v1/leases/revoke -> ns1/, /v1/leases/revoke +// Case 3: /v1/ns1/foo/bar -> root/, /v1/ns1/foo/bar +// Case 4: ns1/ /v1/foo/bar -> ns1/, /v1/foo/bar +func deriveNamespaceAndRevocationPath(req *SendRequest) (string, string) { + namespace := "root/" + nsHeader := req.Request.Header.Get(consts.NamespaceHeaderName) + if nsHeader != "" { + namespace = nsHeader + } + + fullPath := req.Request.URL.Path + nonVersionedPath := strings.TrimPrefix(fullPath, "/v1") + + for _, pathToCheck := range revocationPaths { + // We use strings.Contains here for paths that can contain + // vars in the path, e.g. /v1/lease/revoke-prefix/:prefix + i := strings.Index(nonVersionedPath, pathToCheck) + // If there's no match, move on to the next check + if i == -1 { + continue + } + + // If the index is 0, this is a relative path with no namespace preppended, + // so we can break early + if i == 0 { + break + } + + // We need to turn /ns1 into ns1/, this makes it easy + namespaceInPath := nshelper.Canonicalize(nonVersionedPath[:i]) + + // If it's root, we replace, otherwise we join + if namespace == "root/" { + namespace = namespaceInPath + } else { + namespace = namespace + namespaceInPath + } + + return namespace, fmt.Sprintf("/v1%s", nonVersionedPath[i:]) + } + + return namespace, fmt.Sprintf("/v1%s", nonVersionedPath) +} + +// RegisterAutoAuthToken adds the provided auto-token into the cache. This is +// primarily used to register the auto-auth token and should only be called +// within a sink's WriteToken func. +func (c *LeaseCache) RegisterAutoAuthToken(token string) error { + // Get the token from the cache + oldIndex, err := c.db.Get(cachememdb.IndexNameToken, token) + if err != nil { + return err + } + + // If the index is found, just keep it in the cache and ignore the incoming + // token (since they're the same) + if oldIndex != nil { + c.logger.Trace("auto-auth token already exists in cache; no need to store it again") + return nil + } + + // The following randomly generated values are required for index stored by + // the cache, but are not actually used. We use random values to prevent + // accidental access. + id, err := base62.Random(5) + if err != nil { + return err + } + namespace, err := base62.Random(5) + if err != nil { + return err + } + requestPath, err := base62.Random(5) + if err != nil { + return err + } + + index := &cachememdb.Index{ + ID: id, + Token: token, + Namespace: namespace, + RequestPath: requestPath, + Type: cacheboltdb.TokenType, + } + + // Derive a context off of the lease cache's base context + ctxInfo := c.createCtxInfo(nil) + + index.RenewCtxInfo = &cachememdb.ContextInfo{ + Ctx: ctxInfo.Ctx, + CancelFunc: ctxInfo.CancelFunc, + DoneCh: ctxInfo.DoneCh, + } + + // Store the index in the cache + c.logger.Debug("storing auto-auth token into the cache") + err = c.Set(c.baseCtxInfo.Ctx, index) + if err != nil { + c.logger.Error("failed to cache the auto-auth token", "error", err) + return err + } + + return nil +} + +type cacheClearInput struct { + Type string + + RequestPath string + Namespace string + Token string + TokenAccessor string + Lease string +} + +func parseCacheClearInput(req *cacheClearRequest) (*cacheClearInput, error) { + if req == nil { + return nil, errors.New("nil request options provided") + } + + if req.Type == "" { + return nil, errors.New("no type provided") + } + + in := &cacheClearInput{ + Type: req.Type, + Namespace: req.Namespace, + } + + switch req.Type { + case "request_path": + in.RequestPath = req.Value + case "token": + in.Token = req.Value + case "token_accessor": + in.TokenAccessor = req.Value + case "lease": + in.Lease = req.Value + } + + return in, nil +} + +func (c *LeaseCache) hasExpired(currentTime time.Time, index *cachememdb.Index) (bool, error) { + reader := bufio.NewReader(bytes.NewReader(index.Response)) + resp, err := http.ReadResponse(reader, nil) + if err != nil { + return false, fmt.Errorf("failed to deserialize response: %w", err) + } + secret, err := api.ParseSecret(resp.Body) + if err != nil { + return false, fmt.Errorf("failed to parse response as secret: %w", err) + } + + elapsed := currentTime.Sub(index.LastRenewed) + var leaseDuration int + switch { + case secret.LeaseID != "": + leaseDuration = secret.LeaseDuration + case secret.Auth != nil: + leaseDuration = secret.Auth.LeaseDuration + default: + return false, errors.New("secret without lease encountered in expiration check") + } + + if int(elapsed.Seconds()) > leaseDuration { + c.logger.Trace("secret has expired", "id", index.ID, "elapsed", elapsed, "lease duration", leaseDuration) + return true, nil + } + return false, nil +} diff --git a/command/agentproxyshared/cache/lease_cache_test.go b/command/agentproxyshared/cache/lease_cache_test.go new file mode 100644 index 0000000..2de4c56 --- /dev/null +++ b/command/agentproxyshared/cache/lease_cache_test.go @@ -0,0 +1,1232 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cache + +import ( + "context" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "reflect" + "strings" + "sync" + "testing" + "time" + + "github.com/go-test/deep" + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/cache/cacheboltdb" + "github.com/hashicorp/vault/command/agentproxyshared/cache/cachememdb" + "github.com/hashicorp/vault/command/agentproxyshared/cache/keymanager" + "github.com/hashicorp/vault/helper/useragent" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/atomic" +) + +func testNewLeaseCache(t *testing.T, responses []*SendResponse) *LeaseCache { + t.Helper() + + client, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + lc, err := NewLeaseCache(&LeaseCacheConfig{ + Client: client, + BaseContext: context.Background(), + Proxier: NewMockProxier(responses), + Logger: logging.NewVaultLogger(hclog.Trace).Named("cache.leasecache"), + }) + if err != nil { + t.Fatal(err) + } + return lc +} + +func testNewLeaseCacheWithDelay(t *testing.T, cacheable bool, delay int) *LeaseCache { + t.Helper() + + client, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + + lc, err := NewLeaseCache(&LeaseCacheConfig{ + Client: client, + BaseContext: context.Background(), + Proxier: &mockDelayProxier{cacheable, delay}, + Logger: logging.NewVaultLogger(hclog.Trace).Named("cache.leasecache"), + }) + if err != nil { + t.Fatal(err) + } + + return lc +} + +func testNewLeaseCacheWithPersistence(t *testing.T, responses []*SendResponse, storage *cacheboltdb.BoltStorage) *LeaseCache { + t.Helper() + + client, err := api.NewClient(api.DefaultConfig()) + require.NoError(t, err) + + lc, err := NewLeaseCache(&LeaseCacheConfig{ + Client: client, + BaseContext: context.Background(), + Proxier: NewMockProxier(responses), + Logger: logging.NewVaultLogger(hclog.Trace).Named("cache.leasecache"), + Storage: storage, + }) + require.NoError(t, err) + + return lc +} + +func TestCache_ComputeIndexID(t *testing.T) { + type args struct { + req *http.Request + } + tests := []struct { + name string + req *SendRequest + want string + wantErr bool + }{ + { + "basic", + &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "test", + }, + }, + }, + "7b5db388f211fd9edca8c6c254831fb01ad4e6fe624dbb62711f256b5e803717", + false, + }, + { + "ignore consistency headers", + &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "test", + }, + Header: http.Header{ + vaulthttp.VaultIndexHeaderName: []string{"foo"}, + vaulthttp.VaultInconsistentHeaderName: []string{"foo"}, + vaulthttp.VaultForwardHeaderName: []string{"foo"}, + }, + }, + }, + "7b5db388f211fd9edca8c6c254831fb01ad4e6fe624dbb62711f256b5e803717", + false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := computeIndexID(tt.req) + if (err != nil) != tt.wantErr { + t.Errorf("actual_error: %v, expected_error: %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, string(tt.want)) { + t.Errorf("bad: index id; actual: %q, expected: %q", got, string(tt.want)) + } + }) + } +} + +func TestLeaseCache_EmptyToken(t *testing.T) { + responses := []*SendResponse{ + newTestSendResponse(http.StatusCreated, `{"value": "invalid", "auth": {"client_token": "testtoken"}}`), + } + lc := testNewLeaseCache(t, responses) + + // Even if the send request doesn't have a token on it, a successful + // cacheable response should result in the index properly getting populated + // with a token and memdb shouldn't complain while inserting the index. + urlPath := "http://example.com/v1/sample/api" + sendReq := &SendRequest{ + Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input"}`)), + } + resp, err := lc.Send(context.Background(), sendReq) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatalf("expected a non empty response") + } +} + +func TestLeaseCache_SendCacheable(t *testing.T) { + // Emulate 2 responses from the api proxy. One returns a new token and the + // other returns a lease. + responses := []*SendResponse{ + newTestSendResponse(http.StatusCreated, `{"auth": {"client_token": "testtoken", "renewable": true}}`), + newTestSendResponse(http.StatusOK, `{"lease_id": "foo", "renewable": true, "data": {"value": "foo"}}`), + } + + lc := testNewLeaseCache(t, responses) + // Register an token so that the token and lease requests are cached + require.NoError(t, lc.RegisterAutoAuthToken("autoauthtoken")) + + // Make a request. A response with a new token is returned to the lease + // cache and that will be cached. + urlPath := "http://example.com/v1/sample/api" + sendReq := &SendRequest{ + Token: "autoauthtoken", + Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input"}`)), + } + resp, err := lc.Send(context.Background(), sendReq) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(resp.Response.StatusCode, responses[0].Response.StatusCode); diff != nil { + t.Fatalf("expected getting proxied response: got %v", diff) + } + + // Send the same request again to get the cached response + sendReq = &SendRequest{ + Token: "autoauthtoken", + Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input"}`)), + } + resp, err = lc.Send(context.Background(), sendReq) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(resp.Response.StatusCode, responses[0].Response.StatusCode); diff != nil { + t.Fatalf("expected getting proxied response: got %v", diff) + } + + // Check TokenParent + cachedItem, err := lc.db.Get(cachememdb.IndexNameToken, "testtoken") + if err != nil { + t.Fatal(err) + } + if cachedItem == nil { + t.Fatalf("expected token entry from cache") + } + if cachedItem.TokenParent != "autoauthtoken" { + t.Fatalf("unexpected value for tokenparent: %s", cachedItem.TokenParent) + } + + // Modify the request a little bit to ensure the second response is + // returned to the lease cache. + sendReq = &SendRequest{ + Token: "autoauthtoken", + Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input_changed"}`)), + } + resp, err = lc.Send(context.Background(), sendReq) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(resp.Response.StatusCode, responses[1].Response.StatusCode); diff != nil { + t.Fatalf("expected getting proxied response: got %v", diff) + } + + // Make the same request again and ensure that the same response is returned + // again. + sendReq = &SendRequest{ + Token: "autoauthtoken", + Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input_changed"}`)), + } + resp, err = lc.Send(context.Background(), sendReq) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(resp.Response.StatusCode, responses[1].Response.StatusCode); diff != nil { + t.Fatalf("expected getting proxied response: got %v", diff) + } +} + +func TestLeaseCache_SendNonCacheable(t *testing.T) { + responses := []*SendResponse{ + newTestSendResponse(http.StatusOK, `{"value": "output"}`), + newTestSendResponse(http.StatusNotFound, `{"value": "invalid"}`), + newTestSendResponse(http.StatusOK, `Hello`), + newTestSendResponse(http.StatusTemporaryRedirect, ""), + } + + lc := testNewLeaseCache(t, responses) + + // Send a request through the lease cache which is not cacheable (there is + // no lease information or auth information in the response) + sendReq := &SendRequest{ + Request: httptest.NewRequest("GET", "http://example.com", strings.NewReader(`{"value": "input"}`)), + } + resp, err := lc.Send(context.Background(), sendReq) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(resp.Response, responses[0].Response); diff != nil { + t.Fatalf("expected getting proxied response: got %v", diff) + } + + // Since the response is non-cacheable, the second response will be + // returned. + sendReq = &SendRequest{ + Token: "foo", + Request: httptest.NewRequest("GET", "http://example.com", strings.NewReader(`{"value": "input"}`)), + } + resp, err = lc.Send(context.Background(), sendReq) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(resp.Response, responses[1].Response); diff != nil { + t.Fatalf("expected getting proxied response: got %v", diff) + } + + // Since the response is non-cacheable, the third response will be + // returned. + sendReq = &SendRequest{ + Token: "foo", + Request: httptest.NewRequest("GET", "http://example.com", nil), + } + resp, err = lc.Send(context.Background(), sendReq) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(resp.Response, responses[2].Response); diff != nil { + t.Fatalf("expected getting proxied response: got %v", diff) + } + + // Since the response is non-cacheable, the fourth response will be + // returned. + sendReq = &SendRequest{ + Token: "foo", + Request: httptest.NewRequest("GET", "http://example.com", nil), + } + resp, err = lc.Send(context.Background(), sendReq) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(resp.Response, responses[3].Response); diff != nil { + t.Fatalf("expected getting proxied response: got %v", diff) + } +} + +func TestLeaseCache_SendNonCacheableNonTokenLease(t *testing.T) { + // Create the cache + responses := []*SendResponse{ + newTestSendResponse(http.StatusOK, `{"value": "output", "lease_id": "foo"}`), + newTestSendResponse(http.StatusCreated, `{"value": "invalid", "auth": {"client_token": "testtoken"}}`), + } + lc := testNewLeaseCache(t, responses) + + // Send a request through lease cache which returns a response containing + // lease_id. Response will not be cached because it doesn't belong to a + // token that is managed by the lease cache. + urlPath := "http://example.com/v1/sample/api" + sendReq := &SendRequest{ + Token: "foo", + Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input"}`)), + } + resp, err := lc.Send(context.Background(), sendReq) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(resp.Response, responses[0].Response); diff != nil { + t.Fatalf("expected getting proxied response: got %v", diff) + } + + idx, err := lc.db.Get(cachememdb.IndexNameRequestPath, "root/", urlPath) + if err != nil { + t.Fatal(err) + } + if idx != nil { + t.Fatalf("expected nil entry, got: %#v", idx) + } + + // Verify that the response is not cached by sending the same request and + // by expecting a different response. + sendReq = &SendRequest{ + Token: "foo", + Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input"}`)), + } + resp, err = lc.Send(context.Background(), sendReq) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(resp.Response, responses[1].Response); diff != nil { + t.Fatalf("expected getting proxied response: got %v", diff) + } + + idx, err = lc.db.Get(cachememdb.IndexNameRequestPath, "root/", urlPath) + if err != nil { + t.Fatal(err) + } + if idx != nil { + t.Fatalf("expected nil entry, got: %#v", idx) + } +} + +func TestLeaseCache_HandleCacheClear(t *testing.T) { + lc := testNewLeaseCache(t, nil) + + handler := lc.HandleCacheClear(context.Background()) + ts := httptest.NewServer(handler) + defer ts.Close() + + // Test missing body, should return 400 + resp, err := http.Post(ts.URL, "application/json", nil) + if err != nil { + t.Fatal() + } + if resp.StatusCode != http.StatusBadRequest { + t.Fatalf("status code mismatch: expected = %v, got = %v", http.StatusBadRequest, resp.StatusCode) + } + + testCases := []struct { + name string + reqType string + reqValue string + expectedStatusCode int + }{ + { + "invalid_type", + "foo", + "", + http.StatusBadRequest, + }, + { + "invalid_value", + "", + "bar", + http.StatusBadRequest, + }, + { + "all", + "all", + "", + http.StatusOK, + }, + { + "by_request_path", + "request_path", + "foo", + http.StatusOK, + }, + { + "by_token", + "token", + "foo", + http.StatusOK, + }, + { + "by_lease", + "lease", + "foo", + http.StatusOK, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + reqBody := fmt.Sprintf("{\"type\": \"%s\", \"value\": \"%s\"}", tc.reqType, tc.reqValue) + resp, err := http.Post(ts.URL, "application/json", strings.NewReader(reqBody)) + if err != nil { + t.Fatal(err) + } + if tc.expectedStatusCode != resp.StatusCode { + t.Fatalf("status code mismatch: expected = %v, got = %v", tc.expectedStatusCode, resp.StatusCode) + } + }) + } +} + +func TestCache_DeriveNamespaceAndRevocationPath(t *testing.T) { + tests := []struct { + name string + req *SendRequest + wantNamespace string + wantRelativePath string + }{ + { + "non_revocation_full_path", + &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/ns1/sys/mounts", + }, + }, + }, + "root/", + "/v1/ns1/sys/mounts", + }, + { + "non_revocation_relative_path", + &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/sys/mounts", + }, + Header: http.Header{ + consts.NamespaceHeaderName: []string{"ns1/"}, + }, + }, + }, + "ns1/", + "/v1/sys/mounts", + }, + { + "non_revocation_relative_path", + &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/ns2/sys/mounts", + }, + Header: http.Header{ + consts.NamespaceHeaderName: []string{"ns1/"}, + }, + }, + }, + "ns1/", + "/v1/ns2/sys/mounts", + }, + { + "revocation_full_path", + &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/ns1/sys/leases/revoke", + }, + }, + }, + "ns1/", + "/v1/sys/leases/revoke", + }, + { + "revocation_relative_path", + &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/sys/leases/revoke", + }, + Header: http.Header{ + consts.NamespaceHeaderName: []string{"ns1/"}, + }, + }, + }, + "ns1/", + "/v1/sys/leases/revoke", + }, + { + "revocation_relative_partial_ns", + &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/ns2/sys/leases/revoke", + }, + Header: http.Header{ + consts.NamespaceHeaderName: []string{"ns1/"}, + }, + }, + }, + "ns1/ns2/", + "/v1/sys/leases/revoke", + }, + { + "revocation_prefix_full_path", + &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/ns1/sys/leases/revoke-prefix/foo", + }, + }, + }, + "ns1/", + "/v1/sys/leases/revoke-prefix/foo", + }, + { + "revocation_prefix_relative_path", + &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/sys/leases/revoke-prefix/foo", + }, + Header: http.Header{ + consts.NamespaceHeaderName: []string{"ns1/"}, + }, + }, + }, + "ns1/", + "/v1/sys/leases/revoke-prefix/foo", + }, + { + "revocation_prefix_partial_ns", + &SendRequest{ + Request: &http.Request{ + URL: &url.URL{ + Path: "/v1/ns2/sys/leases/revoke-prefix/foo", + }, + Header: http.Header{ + consts.NamespaceHeaderName: []string{"ns1/"}, + }, + }, + }, + "ns1/ns2/", + "/v1/sys/leases/revoke-prefix/foo", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotNamespace, gotRelativePath := deriveNamespaceAndRevocationPath(tt.req) + if gotNamespace != tt.wantNamespace { + t.Errorf("deriveNamespaceAndRevocationPath() gotNamespace = %v, want %v", gotNamespace, tt.wantNamespace) + } + if gotRelativePath != tt.wantRelativePath { + t.Errorf("deriveNamespaceAndRevocationPath() gotRelativePath = %v, want %v", gotRelativePath, tt.wantRelativePath) + } + }) + } +} + +func TestLeaseCache_Concurrent_NonCacheable(t *testing.T) { + lc := testNewLeaseCacheWithDelay(t, false, 50) + + // We are going to send 100 requests, each taking 50ms to process. If these + // requests are processed serially, it will take ~5seconds to finish. we + // use a ContextWithTimeout to tell us if this is the case by giving ample + // time for it process them concurrently but time out if they get processed + // serially. + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + + wgDoneCh := make(chan struct{}) + errCh := make(chan error) + + go func() { + var wg sync.WaitGroup + // 100 concurrent requests + for i := 0; i < 100; i++ { + wg.Add(1) + + go func() { + defer wg.Done() + + // Send a request through the lease cache which is not cacheable (there is + // no lease information or auth information in the response) + sendReq := &SendRequest{ + Request: httptest.NewRequest("GET", "http://example.com", nil), + } + + _, err := lc.Send(ctx, sendReq) + if err != nil { + errCh <- err + } + }() + } + + wg.Wait() + close(wgDoneCh) + }() + + select { + case <-ctx.Done(): + t.Fatalf("request timed out: %s", ctx.Err()) + case <-wgDoneCh: + case err := <-errCh: + t.Fatal(err) + } +} + +func TestLeaseCache_Concurrent_Cacheable(t *testing.T) { + lc := testNewLeaseCacheWithDelay(t, true, 50) + + if err := lc.RegisterAutoAuthToken("autoauthtoken"); err != nil { + t.Fatal(err) + } + + // We are going to send 100 requests, each taking 50ms to process. If these + // requests are processed serially, it will take ~5seconds to finish, so we + // use a ContextWithTimeout to tell us if this is the case. + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + + var cacheCount atomic.Uint32 + wgDoneCh := make(chan struct{}) + errCh := make(chan error) + + go func() { + var wg sync.WaitGroup + // Start 100 concurrent requests + for i := 0; i < 100; i++ { + wg.Add(1) + + go func() { + defer wg.Done() + + sendReq := &SendRequest{ + Token: "autoauthtoken", + Request: httptest.NewRequest("GET", "http://example.com/v1/sample/api", nil), + } + + resp, err := lc.Send(ctx, sendReq) + if err != nil { + errCh <- err + } + + if resp.CacheMeta != nil && resp.CacheMeta.Hit { + cacheCount.Inc() + } + }() + } + + wg.Wait() + close(wgDoneCh) + }() + + select { + case <-ctx.Done(): + t.Fatalf("request timed out: %s", ctx.Err()) + case <-wgDoneCh: + case err := <-errCh: + t.Fatal(err) + } + + // Ensure that all but one request got proxied. The other 99 should be + // returned from the cache. + if cacheCount.Load() != 99 { + t.Fatalf("Should have returned a cached response 99 times, got %d", cacheCount.Load()) + } +} + +func setupBoltStorage(t *testing.T) (tempCacheDir string, boltStorage *cacheboltdb.BoltStorage) { + t.Helper() + + km, err := keymanager.NewPassthroughKeyManager(context.Background(), nil) + require.NoError(t, err) + + tempCacheDir, err = ioutil.TempDir("", "agent-cache-test") + require.NoError(t, err) + boltStorage, err = cacheboltdb.NewBoltStorage(&cacheboltdb.BoltStorageConfig{ + Path: tempCacheDir, + Logger: hclog.Default(), + Wrapper: km.Wrapper(), + }) + require.NoError(t, err) + require.NotNil(t, boltStorage) + // The calling function should `defer boltStorage.Close()` and `defer os.RemoveAll(tempCacheDir)` + return tempCacheDir, boltStorage +} + +func compareBeforeAndAfter(t *testing.T, before, after *LeaseCache, beforeLen, afterLen int) { + beforeDB, err := before.db.GetByPrefix(cachememdb.IndexNameID) + require.NoError(t, err) + assert.Len(t, beforeDB, beforeLen) + afterDB, err := after.db.GetByPrefix(cachememdb.IndexNameID) + require.NoError(t, err) + assert.Len(t, afterDB, afterLen) + for _, cachedItem := range beforeDB { + if strings.Contains(cachedItem.RequestPath, "expect-missing") { + continue + } + restoredItem, err := after.db.Get(cachememdb.IndexNameID, cachedItem.ID) + require.NoError(t, err) + + assert.NoError(t, err) + assert.Equal(t, cachedItem.ID, restoredItem.ID) + assert.Equal(t, cachedItem.Lease, restoredItem.Lease) + assert.Equal(t, cachedItem.LeaseToken, restoredItem.LeaseToken) + assert.Equal(t, cachedItem.Namespace, restoredItem.Namespace) + assert.EqualValues(t, cachedItem.RequestHeader, restoredItem.RequestHeader) + assert.Equal(t, cachedItem.RequestMethod, restoredItem.RequestMethod) + assert.Equal(t, cachedItem.RequestPath, restoredItem.RequestPath) + assert.Equal(t, cachedItem.RequestToken, restoredItem.RequestToken) + assert.Equal(t, cachedItem.Response, restoredItem.Response) + assert.Equal(t, cachedItem.Token, restoredItem.Token) + assert.Equal(t, cachedItem.TokenAccessor, restoredItem.TokenAccessor) + assert.Equal(t, cachedItem.TokenParent, restoredItem.TokenParent) + + // check what we can in the renewal context + assert.NotEmpty(t, restoredItem.RenewCtxInfo.CancelFunc) + assert.NotZero(t, restoredItem.RenewCtxInfo.DoneCh) + require.NotEmpty(t, restoredItem.RenewCtxInfo.Ctx) + assert.Equal(t, + cachedItem.RenewCtxInfo.Ctx.Value(contextIndexID), + restoredItem.RenewCtxInfo.Ctx.Value(contextIndexID), + ) + } +} + +func TestLeaseCache_PersistAndRestore(t *testing.T) { + // Emulate responses from the api proxy. The first two use the auto-auth + // token, and the others use another token. + // The test re-sends each request to ensure that the response is cached + // so the number of responses and cacheTests specified should always be equal. + responses := []*SendResponse{ + newTestSendResponse(200, `{"auth": {"client_token": "testtoken", "renewable": true, "lease_duration": 600}}`), + newTestSendResponse(201, `{"lease_id": "foo", "renewable": true, "data": {"value": "foo"}, "lease_duration": 600}`), + // The auth token will get manually deleted from the bolt DB storage, causing both of the following two responses + // to be missing from the cache after a restore, because the lease is a child of the auth token. + newTestSendResponse(202, `{"auth": {"client_token": "testtoken2", "renewable": true, "orphan": true, "lease_duration": 600}}`), + newTestSendResponse(203, `{"lease_id": "secret2-lease", "renewable": true, "data": {"number": "two"}, "lease_duration": 600}`), + // 204 No content gets special handling - avoid. + newTestSendResponse(250, `{"auth": {"client_token": "testtoken3", "renewable": true, "orphan": true, "lease_duration": 600}}`), + newTestSendResponse(251, `{"lease_id": "secret3-lease", "renewable": true, "data": {"number": "three"}, "lease_duration": 600}`), + } + + tempDir, boltStorage := setupBoltStorage(t) + defer os.RemoveAll(tempDir) + defer boltStorage.Close() + lc := testNewLeaseCacheWithPersistence(t, responses, boltStorage) + + // Register an auto-auth token so that the token and lease requests are cached + err := lc.RegisterAutoAuthToken("autoauthtoken") + require.NoError(t, err) + + cacheTests := []struct { + token string + method string + urlPath string + body string + deleteFromPersistentStore bool // If true, will be deleted from bolt DB to induce an error on restore + expectMissingAfterRestore bool // If true, the response is not expected to be present in the restored cache + }{ + { + // Make a request. A response with a new token is returned to the + // lease cache and that will be cached. + token: "autoauthtoken", + method: "GET", + urlPath: "http://example.com/v1/sample/api", + body: `{"value": "input"}`, + }, + { + // Modify the request a little bit to ensure the second response is + // returned to the lease cache. + token: "autoauthtoken", + method: "GET", + urlPath: "http://example.com/v1/sample/api", + body: `{"value": "input_changed"}`, + }, + { + // Simulate an approle login to get another token + method: "PUT", + urlPath: "http://example.com/v1/auth/approle-expect-missing/login", + body: `{"role_id": "my role", "secret_id": "my secret"}`, + deleteFromPersistentStore: true, + expectMissingAfterRestore: true, + }, + { + // Test caching with the token acquired from the approle login + token: "testtoken2", + method: "GET", + urlPath: "http://example.com/v1/sample-expect-missing/api", + body: `{"second": "input"}`, + // This will be missing from the restored cache because its parent token was deleted + expectMissingAfterRestore: true, + }, + { + // Simulate another approle login to get another token + method: "PUT", + urlPath: "http://example.com/v1/auth/approle/login", + body: `{"role_id": "my role", "secret_id": "my secret"}`, + }, + { + // Test caching with the token acquired from the latest approle login + token: "testtoken3", + method: "GET", + urlPath: "http://example.com/v1/sample3/api", + body: `{"third": "input"}`, + }, + } + + var deleteIDs []string + for i, ct := range cacheTests { + // Send once to cache + req := httptest.NewRequest(ct.method, ct.urlPath, strings.NewReader(ct.body)) + req.Header.Set("User-Agent", useragent.AgentProxyString()) + + sendReq := &SendRequest{ + Token: ct.token, + Request: req, + } + if ct.deleteFromPersistentStore { + deleteID, err := computeIndexID(sendReq) + require.NoError(t, err) + deleteIDs = append(deleteIDs, deleteID) + // Now reset the body after calculating the index + req = httptest.NewRequest(ct.method, ct.urlPath, strings.NewReader(ct.body)) + req.Header.Set("User-Agent", useragent.AgentProxyString()) + sendReq.Request = req + } + resp, err := lc.Send(context.Background(), sendReq) + require.NoError(t, err) + assert.Equal(t, responses[i].Response.StatusCode, resp.Response.StatusCode, "expected proxied response") + assert.Nil(t, resp.CacheMeta) + + // Send again to test cache. If this isn't cached, the response returned + // will be the next in the list and the status code will not match. + req = httptest.NewRequest(ct.method, ct.urlPath, strings.NewReader(ct.body)) + req.Header.Set("User-Agent", useragent.AgentProxyString()) + sendCacheReq := &SendRequest{ + Token: ct.token, + Request: req, + } + respCached, err := lc.Send(context.Background(), sendCacheReq) + require.NoError(t, err, "failed to send request %+v", ct) + assert.Equal(t, responses[i].Response.StatusCode, respCached.Response.StatusCode, "expected proxied response") + require.NotNil(t, respCached.CacheMeta) + assert.True(t, respCached.CacheMeta.Hit) + } + + require.NotEmpty(t, deleteIDs) + for _, deleteID := range deleteIDs { + err = boltStorage.Delete(deleteID, cacheboltdb.LeaseType) + require.NoError(t, err) + } + + // Now we know the cache is working, so try restoring from the persisted + // cache's storage. Responses 3 and 4 have been cleared from the cache, so + // re-send those. + restoredCache := testNewLeaseCache(t, responses[2:4]) + + err = restoredCache.Restore(context.Background(), boltStorage) + errors, ok := err.(*multierror.Error) + require.True(t, ok) + assert.Len(t, errors.Errors, 1) + assert.Contains(t, errors.Error(), "could not find parent Token testtoken2") + + // Now compare the cache contents before and after + compareBeforeAndAfter(t, lc, restoredCache, 7, 5) + + // And finally send the cache requests once to make sure they're all being + // served from the restoredCache unless they were intended to be missing after restore. + for i, ct := range cacheTests { + req := httptest.NewRequest(ct.method, ct.urlPath, strings.NewReader(ct.body)) + req.Header.Set("User-Agent", useragent.AgentProxyString()) + sendCacheReq := &SendRequest{ + Token: ct.token, + Request: req, + } + respCached, err := restoredCache.Send(context.Background(), sendCacheReq) + require.NoError(t, err, "failed to send request %+v", ct) + assert.Equal(t, responses[i].Response.StatusCode, respCached.Response.StatusCode, "expected proxied response") + if ct.expectMissingAfterRestore { + require.Nil(t, respCached.CacheMeta) + } else { + require.NotNil(t, respCached.CacheMeta) + assert.True(t, respCached.CacheMeta.Hit) + } + } +} + +func TestLeaseCache_PersistAndRestore_WithManyDependencies(t *testing.T) { + tempDir, boltStorage := setupBoltStorage(t) + defer os.RemoveAll(tempDir) + defer boltStorage.Close() + + var requests []*SendRequest + var responses []*SendResponse + var orderedRequestPaths []string + + // helper func to generate new auth leases with a child secret lease attached + authAndSecretLease := func(id int, parentToken, newToken string) { + t.Helper() + path := fmt.Sprintf("/v1/auth/approle-%d/login", id) + orderedRequestPaths = append(orderedRequestPaths, path) + requests = append(requests, &SendRequest{ + Token: parentToken, + Request: httptest.NewRequest("PUT", "http://example.com"+path, strings.NewReader("")), + }) + responses = append(responses, newTestSendResponse(200, fmt.Sprintf(`{"auth": {"client_token": "%s", "renewable": true, "lease_duration": 600}}`, newToken))) + + // Fetch a leased secret using the new token + path = fmt.Sprintf("/v1/kv/%d", id) + orderedRequestPaths = append(orderedRequestPaths, path) + requests = append(requests, &SendRequest{ + Token: newToken, + Request: httptest.NewRequest("GET", "http://example.com"+path, strings.NewReader("")), + }) + responses = append(responses, newTestSendResponse(200, fmt.Sprintf(`{"lease_id": "secret-%d-lease", "renewable": true, "data": {"number": %d}, "lease_duration": 600}`, id, id))) + } + + // Pathological case: a long chain of child tokens + authAndSecretLease(0, "autoauthtoken", "many-ancestors-token;0") + for i := 1; i <= 50; i++ { + // Create a new generation of child token + authAndSecretLease(i, fmt.Sprintf("many-ancestors-token;%d", i-1), fmt.Sprintf("many-ancestors-token;%d", i)) + } + + // Lots of sibling tokens with auto auth token as their parent + for i := 51; i <= 100; i++ { + authAndSecretLease(i, "autoauthtoken", fmt.Sprintf("many-siblings-token;%d", i)) + } + + // Also create some extra siblings for an auth token further down the chain + for i := 101; i <= 110; i++ { + authAndSecretLease(i, "many-ancestors-token;25", fmt.Sprintf("many-siblings-for-ancestor-token;%d", i)) + } + + lc := testNewLeaseCacheWithPersistence(t, responses, boltStorage) + + // Register an auto-auth token so that the token and lease requests are cached + err := lc.RegisterAutoAuthToken("autoauthtoken") + require.NoError(t, err) + + for _, req := range requests { + // Send once to cache + resp, err := lc.Send(context.Background(), req) + require.NoError(t, err) + assert.Equal(t, 200, resp.Response.StatusCode, "expected success") + assert.Nil(t, resp.CacheMeta) + } + + // Ensure leases are retrieved in the correct order + var processed int + + leases, err := boltStorage.GetByType(context.Background(), cacheboltdb.LeaseType) + require.NoError(t, err) + for _, lease := range leases { + index, err := cachememdb.Deserialize(lease) + require.NoError(t, err) + require.Equal(t, orderedRequestPaths[processed], index.RequestPath) + processed++ + } + + assert.Equal(t, len(orderedRequestPaths), processed) + + restoredCache := testNewLeaseCache(t, nil) + err = restoredCache.Restore(context.Background(), boltStorage) + require.NoError(t, err) + + // Now compare the cache contents before and after + compareBeforeAndAfter(t, lc, restoredCache, 223, 223) +} + +func TestEvictPersistent(t *testing.T) { + ctx := context.Background() + + responses := []*SendResponse{ + newTestSendResponse(201, `{"lease_id": "foo", "renewable": true, "data": {"value": "foo"}}`), + } + + tempDir, boltStorage := setupBoltStorage(t) + defer os.RemoveAll(tempDir) + defer boltStorage.Close() + lc := testNewLeaseCacheWithPersistence(t, responses, boltStorage) + + require.NoError(t, lc.RegisterAutoAuthToken("autoauthtoken")) + + // populate cache by sending request through + sendReq := &SendRequest{ + Token: "autoauthtoken", + Request: httptest.NewRequest("GET", "http://example.com/v1/sample/api", strings.NewReader(`{"value": "some_input"}`)), + } + resp, err := lc.Send(context.Background(), sendReq) + require.NoError(t, err) + assert.Equal(t, resp.Response.StatusCode, 201, "expected proxied response") + assert.Nil(t, resp.CacheMeta) + + // Check bolt for the cached lease + secrets, err := lc.ps.GetByType(ctx, cacheboltdb.LeaseType) + require.NoError(t, err) + assert.Len(t, secrets, 1) + + // Call clear for the request path + err = lc.handleCacheClear(context.Background(), &cacheClearInput{ + Type: "request_path", + RequestPath: "/v1/sample/api", + }) + require.NoError(t, err) + + time.Sleep(2 * time.Second) + + // Check that cached item is gone + secrets, err = lc.ps.GetByType(ctx, cacheboltdb.LeaseType) + require.NoError(t, err) + assert.Len(t, secrets, 0) +} + +func TestRegisterAutoAuth_sameToken(t *testing.T) { + // If the auto-auth token already exists in the cache, it should not be + // stored again in a new index. + lc := testNewLeaseCache(t, nil) + err := lc.RegisterAutoAuthToken("autoauthtoken") + assert.NoError(t, err) + + oldTokenIndex, err := lc.db.Get(cachememdb.IndexNameToken, "autoauthtoken") + assert.NoError(t, err) + oldTokenID := oldTokenIndex.ID + + // register the same token again + err = lc.RegisterAutoAuthToken("autoauthtoken") + assert.NoError(t, err) + + // check that there's only one index for autoauthtoken + entries, err := lc.db.GetByPrefix(cachememdb.IndexNameToken, "autoauthtoken") + assert.NoError(t, err) + assert.Len(t, entries, 1) + + newTokenIndex, err := lc.db.Get(cachememdb.IndexNameToken, "autoauthtoken") + assert.NoError(t, err) + + // compare the ID's since those are randomly generated when an index for a + // token is added to the cache, so if a new token was added, the id's will + // not match. + assert.Equal(t, oldTokenID, newTokenIndex.ID) +} + +func Test_hasExpired(t *testing.T) { + responses := []*SendResponse{ + newTestSendResponse(200, `{"auth": {"client_token": "testtoken", "renewable": true, "lease_duration": 60}}`), + newTestSendResponse(201, `{"lease_id": "foo", "renewable": true, "data": {"value": "foo"}, "lease_duration": 60}`), + } + lc := testNewLeaseCache(t, responses) + require.NoError(t, lc.RegisterAutoAuthToken("autoauthtoken")) + + cacheTests := []struct { + token string + urlPath string + leaseType string + wantStatusCode int + }{ + { + // auth lease + token: "autoauthtoken", + urlPath: "/v1/sample/auth", + leaseType: cacheboltdb.LeaseType, + wantStatusCode: responses[0].Response.StatusCode, + }, + { + // secret lease + token: "autoauthtoken", + urlPath: "/v1/sample/secret", + leaseType: cacheboltdb.LeaseType, + wantStatusCode: responses[1].Response.StatusCode, + }, + } + + for _, ct := range cacheTests { + // Send once to cache + urlPath := "http://example.com" + ct.urlPath + sendReq := &SendRequest{ + Token: ct.token, + Request: httptest.NewRequest("GET", urlPath, strings.NewReader(`{"value": "input"}`)), + } + resp, err := lc.Send(context.Background(), sendReq) + require.NoError(t, err) + assert.Equal(t, resp.Response.StatusCode, ct.wantStatusCode, "expected proxied response") + assert.Nil(t, resp.CacheMeta) + + // get the Index out of the mem cache + index, err := lc.db.Get(cachememdb.IndexNameRequestPath, "root/", ct.urlPath) + require.NoError(t, err) + assert.Equal(t, ct.leaseType, index.Type) + + // The lease duration is 60 seconds, so time.Now() should be within that + notExpired, err := lc.hasExpired(time.Now().UTC(), index) + require.NoError(t, err) + assert.False(t, notExpired) + + // In 90 seconds the index should be "expired" + futureTime := time.Now().UTC().Add(time.Second * 90) + expired, err := lc.hasExpired(futureTime, index) + require.NoError(t, err) + assert.True(t, expired) + } +} + +func TestLeaseCache_hasExpired_wrong_type(t *testing.T) { + index := &cachememdb.Index{ + Type: cacheboltdb.TokenType, + Response: []byte(`HTTP/0.0 200 OK +Content-Type: application/json +Date: Tue, 02 Mar 2021 17:54:16 GMT + +{}`), + } + + lc := testNewLeaseCache(t, nil) + expired, err := lc.hasExpired(time.Now().UTC(), index) + assert.False(t, expired) + assert.EqualError(t, err, `secret without lease encountered in expiration check`) +} + +func TestLeaseCacheRestore_expired(t *testing.T) { + // Emulate 2 responses from the api proxy, both expired + responses := []*SendResponse{ + newTestSendResponse(200, `{"auth": {"client_token": "testtoken", "renewable": true, "lease_duration": -600}}`), + newTestSendResponse(201, `{"lease_id": "foo", "renewable": true, "data": {"value": "foo"}, "lease_duration": -600}`), + } + + tempDir, boltStorage := setupBoltStorage(t) + defer os.RemoveAll(tempDir) + defer boltStorage.Close() + lc := testNewLeaseCacheWithPersistence(t, responses, boltStorage) + + // Register an auto-auth token so that the token and lease requests are cached in mem + require.NoError(t, lc.RegisterAutoAuthToken("autoauthtoken")) + + cacheTests := []struct { + token string + method string + urlPath string + body string + wantStatusCode int + }{ + { + // Make a request. A response with a new token is returned to the + // lease cache and that will be cached. + token: "autoauthtoken", + method: "GET", + urlPath: "http://example.com/v1/sample/api", + body: `{"value": "input"}`, + wantStatusCode: responses[0].Response.StatusCode, + }, + { + // Modify the request a little bit to ensure the second response is + // returned to the lease cache. + token: "autoauthtoken", + method: "GET", + urlPath: "http://example.com/v1/sample/api", + body: `{"value": "input_changed"}`, + wantStatusCode: responses[1].Response.StatusCode, + }, + } + + for _, ct := range cacheTests { + // Send once to cache + sendReq := &SendRequest{ + Token: ct.token, + Request: httptest.NewRequest(ct.method, ct.urlPath, strings.NewReader(ct.body)), + } + resp, err := lc.Send(context.Background(), sendReq) + require.NoError(t, err) + assert.Equal(t, resp.Response.StatusCode, ct.wantStatusCode, "expected proxied response") + assert.Nil(t, resp.CacheMeta) + } + + // Restore from the persisted cache's storage + restoredCache := testNewLeaseCache(t, nil) + + err := restoredCache.Restore(context.Background(), boltStorage) + assert.NoError(t, err) + + // The original mem cache should have all three items + beforeDB, err := lc.db.GetByPrefix(cachememdb.IndexNameID) + require.NoError(t, err) + assert.Len(t, beforeDB, 3) + + // There should only be one item in the restored cache: the autoauth token + afterDB, err := restoredCache.db.GetByPrefix(cachememdb.IndexNameID) + require.NoError(t, err) + assert.Len(t, afterDB, 1) + + // Just verify that the one item in the restored mem cache matches one in the original mem cache, and that it's the auto-auth token + beforeItem, err := lc.db.Get(cachememdb.IndexNameID, afterDB[0].ID) + require.NoError(t, err) + assert.NotNil(t, beforeItem) + + assert.Equal(t, "autoauthtoken", afterDB[0].Token) + assert.Equal(t, cacheboltdb.TokenType, afterDB[0].Type) +} diff --git a/command/agentproxyshared/cache/listener.go b/command/agentproxyshared/cache/listener.go new file mode 100644 index 0000000..c8ed722 --- /dev/null +++ b/command/agentproxyshared/cache/listener.go @@ -0,0 +1,84 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cache + +import ( + "crypto/tls" + "fmt" + "net" + "strings" + + "github.com/hashicorp/go-secure-stdlib/reloadutil" + "github.com/hashicorp/vault/command/server" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/internalshared/listenerutil" +) + +type ListenerBundle struct { + Listener net.Listener + TLSConfig *tls.Config + TLSReloadFunc reloadutil.ReloadFunc +} + +func StartListener(lnConfig *configutil.Listener) (*ListenerBundle, error) { + addr := lnConfig.Address + + var ln net.Listener + var err error + switch lnConfig.Type { + case "tcp": + if addr == "" { + addr = "127.0.0.1:8200" + } + + bindProto := "tcp" + // If they've passed 0.0.0.0, we only want to bind on IPv4 + // rather than golang's dual stack default + if strings.HasPrefix(addr, "0.0.0.0:") { + bindProto = "tcp4" + } + + ln, err = net.Listen(bindProto, addr) + if err != nil { + return nil, err + } + ln = &server.TCPKeepAliveListener{ln.(*net.TCPListener)} + + case "unix": + var uConfig *listenerutil.UnixSocketsConfig + if lnConfig.SocketMode != "" && + lnConfig.SocketUser != "" && + lnConfig.SocketGroup != "" { + uConfig = &listenerutil.UnixSocketsConfig{ + Mode: lnConfig.SocketMode, + User: lnConfig.SocketUser, + Group: lnConfig.SocketGroup, + } + } + ln, err = listenerutil.UnixSocketListener(addr, uConfig) + if err != nil { + return nil, err + } + + default: + return nil, fmt.Errorf("invalid listener type: %q", lnConfig.Type) + } + + props := map[string]string{"addr": ln.Addr().String()} + tlsConf, reloadFunc, err := listenerutil.TLSConfig(lnConfig, props, nil) + if err != nil { + return nil, err + } + if tlsConf != nil { + ln = tls.NewListener(ln, tlsConf) + } + + cfg := &ListenerBundle{ + Listener: ln, + TLSConfig: tlsConf, + TLSReloadFunc: reloadFunc, + } + + return cfg, nil +} diff --git a/command/agentproxyshared/cache/proxy.go b/command/agentproxyshared/cache/proxy.go new file mode 100644 index 0000000..4dcd180 --- /dev/null +++ b/command/agentproxyshared/cache/proxy.go @@ -0,0 +1,79 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cache + +import ( + "bytes" + "context" + "io" + "net/http" + "time" + + "github.com/hashicorp/vault/api" +) + +// SendRequest is the input for Proxier.Send. +type SendRequest struct { + Token string + Request *http.Request + + // RequestBody is the stored body bytes from Request.Body. It is set here to + // avoid reading and re-setting the stream multiple times. + RequestBody []byte +} + +// SendResponse is the output from Proxier.Send. +type SendResponse struct { + Response *api.Response + + // ResponseBody is the stored body bytes from Response.Body. It is set here to + // avoid reading and re-setting the stream multiple times. + ResponseBody []byte + CacheMeta *CacheMeta +} + +// CacheMeta contains metadata information about the response, +// such as whether it was a cache hit or miss, and the age of the +// cached entry. +type CacheMeta struct { + Hit bool + Age time.Duration +} + +// Proxier is the interface implemented by different components that are +// responsible for performing specific tasks, such as caching and proxying. All +// these tasks combined together would serve the request received by the agent. +type Proxier interface { + Send(ctx context.Context, req *SendRequest) (*SendResponse, error) +} + +// NewSendResponse creates a new SendResponse and takes care of initializing its +// fields properly. +func NewSendResponse(apiResponse *api.Response, responseBody []byte) (*SendResponse, error) { + resp := &SendResponse{ + Response: apiResponse, + CacheMeta: &CacheMeta{}, + } + + // If a response body is separately provided we set that as the SendResponse.ResponseBody, + // otherwise we will do an ioutil.ReadAll to extract the response body from apiResponse. + switch { + case len(responseBody) > 0: + resp.ResponseBody = responseBody + case apiResponse.Body != nil: + respBody, err := io.ReadAll(apiResponse.Body) + if err != nil { + return nil, err + } + // Close the old body + apiResponse.Body.Close() + + // Re-set the response body after reading from the Reader + apiResponse.Body = io.NopCloser(bytes.NewReader(respBody)) + + resp.ResponseBody = respBody + } + + return resp, nil +} diff --git a/command/agentproxyshared/cache/testing.go b/command/agentproxyshared/cache/testing.go new file mode 100644 index 0000000..9fe9e6f --- /dev/null +++ b/command/agentproxyshared/cache/testing.go @@ -0,0 +1,114 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cache + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "math/rand" + "net/http" + "strings" + "time" + + "github.com/hashicorp/vault/helper/useragent" + + "github.com/hashicorp/vault/api" +) + +// mockProxier is a mock implementation of the Proxier interface, used for testing purposes. +// The mock will return the provided responses every time it reaches its Send method, up to +// the last provided response. This lets tests control what the next/underlying Proxier layer +// might expect to return. +type mockProxier struct { + proxiedResponses []*SendResponse + responseIndex int +} + +func NewMockProxier(responses []*SendResponse) *mockProxier { + return &mockProxier{ + proxiedResponses: responses, + } +} + +func (p *mockProxier) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) { + if p.responseIndex >= len(p.proxiedResponses) { + return nil, fmt.Errorf("index out of bounds: responseIndex = %d, responses = %d", p.responseIndex, len(p.proxiedResponses)) + } + resp := p.proxiedResponses[p.responseIndex] + + p.responseIndex++ + + return resp, nil +} + +func (p *mockProxier) ResponseIndex() int { + return p.responseIndex +} + +func newTestSendResponse(status int, body string) *SendResponse { + headers := make(http.Header) + headers.Add("User-Agent", useragent.AgentProxyString()) + resp := &SendResponse{ + Response: &api.Response{ + Response: &http.Response{ + StatusCode: status, + Header: headers, + }, + }, + } + resp.Response.Header.Set("Date", time.Now().Format(http.TimeFormat)) + + if body != "" { + resp.Response.Body = ioutil.NopCloser(strings.NewReader(body)) + resp.ResponseBody = []byte(body) + } + + if json.Valid([]byte(body)) { + resp.Response.Header.Set("content-type", "application/json") + } + + return resp +} + +type mockTokenVerifierProxier struct { + currentToken string +} + +func (p *mockTokenVerifierProxier) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) { + p.currentToken = req.Token + resp := newTestSendResponse(http.StatusOK, + `{"data": {"id": "`+p.currentToken+`"}}`) + + return resp, nil +} + +func (p *mockTokenVerifierProxier) GetCurrentRequestToken() string { + return p.currentToken +} + +type mockDelayProxier struct { + cacheableResp bool + delay int +} + +func (p *mockDelayProxier) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) { + if p.delay > 0 { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-time.After(time.Duration(p.delay) * time.Millisecond): + } + } + + // If this is a cacheable response, we return a unique response every time + if p.cacheableResp { + rand.Seed(time.Now().Unix()) + s := fmt.Sprintf(`{"lease_id": "%d", "renewable": true, "data": {"foo": "bar"}}`, rand.Int()) + return newTestSendResponse(http.StatusOK, s), nil + } + + return newTestSendResponse(http.StatusOK, `{"value": "output"}`), nil +} diff --git a/command/agentproxyshared/helpers.go b/command/agentproxyshared/helpers.go new file mode 100644 index 0000000..d148717 --- /dev/null +++ b/command/agentproxyshared/helpers.go @@ -0,0 +1,237 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package agentproxyshared + +import ( + "context" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + "github.com/hashicorp/vault/command/agentproxyshared/auth/alicloud" + "github.com/hashicorp/vault/command/agentproxyshared/auth/approle" + "github.com/hashicorp/vault/command/agentproxyshared/auth/aws" + "github.com/hashicorp/vault/command/agentproxyshared/auth/azure" + "github.com/hashicorp/vault/command/agentproxyshared/auth/cert" + "github.com/hashicorp/vault/command/agentproxyshared/auth/cf" + "github.com/hashicorp/vault/command/agentproxyshared/auth/gcp" + "github.com/hashicorp/vault/command/agentproxyshared/auth/jwt" + "github.com/hashicorp/vault/command/agentproxyshared/auth/kerberos" + "github.com/hashicorp/vault/command/agentproxyshared/auth/kubernetes" + "github.com/hashicorp/vault/command/agentproxyshared/auth/oci" + token_file "github.com/hashicorp/vault/command/agentproxyshared/auth/token-file" + "github.com/hashicorp/vault/command/agentproxyshared/cache" + "github.com/hashicorp/vault/command/agentproxyshared/cache/cacheboltdb" + "github.com/hashicorp/vault/command/agentproxyshared/cache/cachememdb" + "github.com/hashicorp/vault/command/agentproxyshared/cache/keymanager" +) + +// GetAutoAuthMethodFromConfig Calls the appropriate NewAutoAuthMethod function, initializing +// the auto-auth method, based on the auto-auth method type. Returns an error if one happens or +// the method type is invalid. +func GetAutoAuthMethodFromConfig(autoAuthMethodType string, authConfig *auth.AuthConfig, vaultAddress string) (auth.AuthMethod, error) { + switch autoAuthMethodType { + case "alicloud": + return alicloud.NewAliCloudAuthMethod(authConfig) + case "aws": + return aws.NewAWSAuthMethod(authConfig) + case "azure": + return azure.NewAzureAuthMethod(authConfig) + case "cert": + return cert.NewCertAuthMethod(authConfig) + case "cf": + return cf.NewCFAuthMethod(authConfig) + case "gcp": + return gcp.NewGCPAuthMethod(authConfig) + case "jwt": + return jwt.NewJWTAuthMethod(authConfig) + case "kerberos": + return kerberos.NewKerberosAuthMethod(authConfig) + case "kubernetes": + return kubernetes.NewKubernetesAuthMethod(authConfig) + case "approle": + return approle.NewApproleAuthMethod(authConfig) + case "oci": + return oci.NewOCIAuthMethod(authConfig, vaultAddress) + case "token_file": + return token_file.NewTokenFileAuthMethod(authConfig) + case "pcf": // Deprecated. + return cf.NewCFAuthMethod(authConfig) + default: + return nil, errors.New(fmt.Sprintf("unknown auth method %q", autoAuthMethodType)) + } +} + +// PersistConfig contains configuration needed for persistent caching +type PersistConfig struct { + Type string + Path string `hcl:"path"` + KeepAfterImport bool `hcl:"keep_after_import"` + ExitOnErr bool `hcl:"exit_on_err"` + ServiceAccountTokenFile string `hcl:"service_account_token_file"` +} + +// AddPersistentStorageToLeaseCache adds persistence to a lease cache, based on a given PersistConfig +// Returns a close function to be deferred and the old token, if found, or an error +func AddPersistentStorageToLeaseCache(ctx context.Context, leaseCache *cache.LeaseCache, persistConfig *PersistConfig, logger log.Logger) (func() error, string, error) { + if persistConfig == nil { + return nil, "", errors.New("persist config was nil") + } + + if persistConfig.Path == "" { + return nil, "", errors.New("must specify persistent cache path") + } + + // Set AAD based on key protection type + var aad string + var err error + switch persistConfig.Type { + case "kubernetes": + aad, err = getServiceAccountJWT(persistConfig.ServiceAccountTokenFile) + if err != nil { + tokenFileName := persistConfig.ServiceAccountTokenFile + if len(tokenFileName) == 0 { + tokenFileName = "/var/run/secrets/kubernetes.io/serviceaccount/token" + } + return nil, "", fmt.Errorf("failed to read service account token from %s: %w", tokenFileName, err) + } + default: + return nil, "", fmt.Errorf("persistent key protection type %q not supported", persistConfig.Type) + } + + // Check if bolt file exists already + dbFileExists, err := cacheboltdb.DBFileExists(persistConfig.Path) + if err != nil { + return nil, "", fmt.Errorf("failed to check if bolt file exists at path %s: %w", persistConfig.Path, err) + } + if dbFileExists { + // Open the bolt file, but wait to setup Encryption + ps, err := cacheboltdb.NewBoltStorage(&cacheboltdb.BoltStorageConfig{ + Path: persistConfig.Path, + Logger: logger.Named("cacheboltdb"), + }) + if err != nil { + return nil, "", fmt.Errorf("error opening persistent cache %v", err) + } + + // Get the token from bolt for retrieving the encryption key, + // then setup encryption so that restore is possible + token, err := ps.GetRetrievalToken() + if err != nil { + return nil, "", fmt.Errorf("error getting retrieval token from persistent cache: %w", err) + } + + if err := ps.Close(); err != nil { + return nil, "", fmt.Errorf("failed to close persistent cache file after getting retrieval token: %w", err) + } + + km, err := keymanager.NewPassthroughKeyManager(ctx, token) + if err != nil { + return nil, "", fmt.Errorf("failed to configure persistence encryption for cache: %w", err) + } + + // Open the bolt file with the wrapper provided + ps, err = cacheboltdb.NewBoltStorage(&cacheboltdb.BoltStorageConfig{ + Path: persistConfig.Path, + Logger: logger.Named("cacheboltdb"), + Wrapper: km.Wrapper(), + AAD: aad, + }) + if err != nil { + return nil, "", fmt.Errorf("error opening persistent cache with wrapper: %w", err) + } + + // Restore anything in the persistent cache to the memory cache + if err := leaseCache.Restore(ctx, ps); err != nil { + logger.Error(fmt.Sprintf("error restoring in-memory cache from persisted file: %v", err)) + if persistConfig.ExitOnErr { + return nil, "", fmt.Errorf("exiting with error as exit_on_err is set to true") + } + } + logger.Info("loaded memcache from persistent storage") + + // Check for previous auto-auth token + oldTokenBytes, err := ps.GetAutoAuthToken(ctx) + if err != nil { + logger.Error(fmt.Sprintf("error in fetching previous auto-auth token: %v", err)) + if persistConfig.ExitOnErr { + return nil, "", fmt.Errorf("exiting with error as exit_on_err is set to true") + } + } + var previousToken string + if len(oldTokenBytes) > 0 { + oldToken, err := cachememdb.Deserialize(oldTokenBytes) + if err != nil { + logger.Error(fmt.Sprintf("error in deserializing previous auto-auth token cache entryn: %v", err)) + if persistConfig.ExitOnErr { + return nil, "", fmt.Errorf("exiting with error as exit_on_err is set to true") + } + } + previousToken = oldToken.Token + } + + // If keep_after_import true, set persistent storage layer in + // leaseCache, else remove db file + if persistConfig.KeepAfterImport { + leaseCache.SetPersistentStorage(ps) + return ps.Close, previousToken, nil + } else { + if err := ps.Close(); err != nil { + logger.Warn(fmt.Sprintf("failed to close persistent cache file: %s", err)) + } + dbFile := filepath.Join(persistConfig.Path, cacheboltdb.DatabaseFileName) + if err := os.Remove(dbFile); err != nil { + logger.Error(fmt.Sprintf("failed to remove persistent storage file %s: %v", dbFile, err)) + if persistConfig.ExitOnErr { + return nil, "", fmt.Errorf("exiting with error as exit_on_err is set to true") + } + } + return nil, previousToken, nil + } + } else { + km, err := keymanager.NewPassthroughKeyManager(ctx, nil) + if err != nil { + return nil, "", fmt.Errorf("failed to configure persistence encryption for cache: %w", err) + } + ps, err := cacheboltdb.NewBoltStorage(&cacheboltdb.BoltStorageConfig{ + Path: persistConfig.Path, + Logger: logger.Named("cacheboltdb"), + Wrapper: km.Wrapper(), + AAD: aad, + }) + if err != nil { + return nil, "", fmt.Errorf("error creating persistent cache: %w", err) + } + logger.Info("configured persistent storage", "path", persistConfig.Path) + + // Stash the key material in bolt + token, err := km.RetrievalToken(ctx) + if err != nil { + return nil, "", fmt.Errorf("error getting persistence key: %w", err) + } + if err := ps.StoreRetrievalToken(token); err != nil { + return nil, "", fmt.Errorf("error setting key in persistent cache: %w", err) + } + + leaseCache.SetPersistentStorage(ps) + return ps.Close, "", nil + } +} + +// getServiceAccountJWT attempts to read the service account JWT from the specified token file path. +// Defaults to using the Kubernetes default service account file path if token file path is empty. +func getServiceAccountJWT(tokenFile string) (string, error) { + if len(tokenFile) == 0 { + tokenFile = "/var/run/secrets/kubernetes.io/serviceaccount/token" + } + token, err := os.ReadFile(tokenFile) + if err != nil { + return "", err + } + return strings.TrimSpace(string(token)), nil +} diff --git a/command/agentproxyshared/helpers_test.go b/command/agentproxyshared/helpers_test.go new file mode 100644 index 0000000..24fdf1d --- /dev/null +++ b/command/agentproxyshared/helpers_test.go @@ -0,0 +1,92 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package agentproxyshared + +import ( + "context" + "os" + "testing" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared/cache" + "github.com/hashicorp/vault/sdk/helper/logging" +) + +func testNewLeaseCache(t *testing.T, responses []*cache.SendResponse) *cache.LeaseCache { + t.Helper() + + client, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + lc, err := cache.NewLeaseCache(&cache.LeaseCacheConfig{ + Client: client, + BaseContext: context.Background(), + Proxier: cache.NewMockProxier(responses), + Logger: logging.NewVaultLogger(hclog.Trace).Named("cache.leasecache"), + }) + if err != nil { + t.Fatal(err) + } + return lc +} + +func populateTempFile(t *testing.T, name, contents string) *os.File { + t.Helper() + + file, err := os.CreateTemp(t.TempDir(), name) + if err != nil { + t.Fatal(err) + } + + _, err = file.WriteString(contents) + if err != nil { + t.Fatal(err) + } + + err = file.Close() + if err != nil { + t.Fatal(err) + } + + return file +} + +// Test_AddPersistentStorageToLeaseCache Tests that AddPersistentStorageToLeaseCache() correctly +// adds persistent storage to a lease cache +func Test_AddPersistentStorageToLeaseCache(t *testing.T) { + tempDir := t.TempDir() + serviceAccountTokenFile := populateTempFile(t, "proxy-config.hcl", "token") + + persistConfig := &PersistConfig{ + Type: "kubernetes", + Path: tempDir, + KeepAfterImport: false, + ExitOnErr: false, + ServiceAccountTokenFile: serviceAccountTokenFile.Name(), + } + + leaseCache := testNewLeaseCache(t, nil) + if leaseCache.PersistentStorage() != nil { + t.Fatal("persistent storage was available before ours was added") + } + + deferFunc, token, err := AddPersistentStorageToLeaseCache(context.Background(), leaseCache, persistConfig, logging.NewVaultLogger(hclog.Info)) + if err != nil { + t.Fatal(err) + } + + if leaseCache.PersistentStorage() == nil { + t.Fatal("persistent storage was not added") + } + + if token != "" { + t.Fatal("expected token to be empty") + } + + if deferFunc == nil { + t.Fatal("expected deferFunc to not be nil") + } +} diff --git a/command/agentproxyshared/sink/file/file_sink.go b/command/agentproxyshared/sink/file/file_sink.go new file mode 100644 index 0000000..c25d991 --- /dev/null +++ b/command/agentproxyshared/sink/file/file_sink.go @@ -0,0 +1,131 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package file + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + hclog "github.com/hashicorp/go-hclog" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/command/agentproxyshared/sink" +) + +// fileSink is a Sink implementation that writes a token to a file +type fileSink struct { + path string + mode os.FileMode + logger hclog.Logger +} + +// NewFileSink creates a new file sink with the given configuration +func NewFileSink(conf *sink.SinkConfig) (sink.Sink, error) { + if conf.Logger == nil { + return nil, errors.New("nil logger provided") + } + + conf.Logger.Info("creating file sink") + + f := &fileSink{ + logger: conf.Logger, + mode: 0o640, + } + + pathRaw, ok := conf.Config["path"] + if !ok { + return nil, errors.New("'path' not specified for file sink") + } + path, ok := pathRaw.(string) + if !ok { + return nil, errors.New("could not parse 'path' as string") + } + + f.path = path + + if modeRaw, ok := conf.Config["mode"]; ok { + f.logger.Debug("verifying override for default file sink mode") + mode, typeOK := modeRaw.(int) + if !typeOK { + return nil, errors.New("could not parse 'mode' as integer") + } + + if !os.FileMode(mode).IsRegular() { + return nil, fmt.Errorf("file mode does not represent a regular file") + } + + f.logger.Debug("overriding default file sink", "mode", mode) + f.mode = os.FileMode(mode) + } + + if err := f.WriteToken(""); err != nil { + return nil, fmt.Errorf("error during write check: %w", err) + } + + f.logger.Info("file sink configured", "path", f.path, "mode", f.mode) + + return f, nil +} + +// WriteToken implements the Server interface and writes the token to a path on +// disk. It writes into the path's directory into a temp file and does an +// atomic rename to ensure consistency. If a blank token is passed in, it +// performs a write check but does not write a blank value to the final +// location. +func (f *fileSink) WriteToken(token string) error { + f.logger.Trace("enter write_token", "path", f.path) + defer f.logger.Trace("exit write_token", "path", f.path) + + u, err := uuid.GenerateUUID() + if err != nil { + return fmt.Errorf("error generating a uuid during write check: %w", err) + } + + targetDir := filepath.Dir(f.path) + fileName := filepath.Base(f.path) + tmpSuffix := strings.Split(u, "-")[0] + + tmpFile, err := os.OpenFile(filepath.Join(targetDir, fmt.Sprintf("%s.tmp.%s", fileName, tmpSuffix)), os.O_WRONLY|os.O_CREATE, f.mode) + if err != nil { + return fmt.Errorf("error opening temp file in dir %s for writing: %w", targetDir, err) + } + + valToWrite := token + if token == "" { + valToWrite = u + } + + _, err = tmpFile.WriteString(valToWrite) + if err != nil { + // Attempt closing and deleting but ignore any error + tmpFile.Close() + os.Remove(tmpFile.Name()) + return fmt.Errorf("error writing to %s: %w", tmpFile.Name(), err) + } + + err = tmpFile.Close() + if err != nil { + return fmt.Errorf("error closing %s: %w", tmpFile.Name(), err) + } + + // Now, if we were just doing a write check (blank token), remove the file + // and exit; otherwise, atomically rename it + if token == "" { + err = os.Remove(tmpFile.Name()) + if err != nil { + return fmt.Errorf("error removing temp file %s during write check: %w", tmpFile.Name(), err) + } + return nil + } + + err = os.Rename(tmpFile.Name(), f.path) + if err != nil { + return fmt.Errorf("error renaming temp file %s to target file %s: %w", tmpFile.Name(), f.path, err) + } + + f.logger.Info("token written", "path", f.path) + return nil +} diff --git a/command/agentproxyshared/sink/file/file_sink_test.go b/command/agentproxyshared/sink/file/file_sink_test.go new file mode 100644 index 0000000..95db8df --- /dev/null +++ b/command/agentproxyshared/sink/file/file_sink_test.go @@ -0,0 +1,147 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package file + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" + + hclog "github.com/hashicorp/go-hclog" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/sdk/helper/logging" +) + +const ( + fileServerTestDir = "vault-agent-file-test" +) + +func testFileSink(t *testing.T, log hclog.Logger) (*sink.SinkConfig, string) { + tmpDir, err := ioutil.TempDir("", fmt.Sprintf("%s.", fileServerTestDir)) + if err != nil { + t.Fatal(err) + } + + path := filepath.Join(tmpDir, "token") + + config := &sink.SinkConfig{ + Logger: log.Named("sink.file"), + Config: map[string]interface{}{ + "path": path, + }, + } + + s, err := NewFileSink(config) + if err != nil { + t.Fatal(err) + } + config.Sink = s + + return config, tmpDir +} + +func TestFileSink(t *testing.T) { + log := logging.NewVaultLogger(hclog.Trace) + + fs, tmpDir := testFileSink(t, log) + defer os.RemoveAll(tmpDir) + + path := filepath.Join(tmpDir, "token") + + uuidStr, _ := uuid.GenerateUUID() + if err := fs.WriteToken(uuidStr); err != nil { + t.Fatal(err) + } + + file, err := os.Open(path) + if err != nil { + t.Fatal(err) + } + + fi, err := file.Stat() + if err != nil { + t.Fatal(err) + } + if fi.Mode() != os.FileMode(0o640) { + t.Fatalf("wrong file mode was detected at %s", path) + } + err = file.Close() + if err != nil { + t.Fatal(err) + } + + fileBytes, err := ioutil.ReadFile(path) + if err != nil { + t.Fatal(err) + } + + if string(fileBytes) != uuidStr { + t.Fatalf("expected %s, got %s", uuidStr, string(fileBytes)) + } +} + +func testFileSinkMode(t *testing.T, log hclog.Logger) (*sink.SinkConfig, string) { + tmpDir, err := ioutil.TempDir("", fmt.Sprintf("%s.", fileServerTestDir)) + if err != nil { + t.Fatal(err) + } + + path := filepath.Join(tmpDir, "token") + + config := &sink.SinkConfig{ + Logger: log.Named("sink.file"), + Config: map[string]interface{}{ + "path": path, + "mode": 0o644, + }, + } + + s, err := NewFileSink(config) + if err != nil { + t.Fatal(err) + } + config.Sink = s + + return config, tmpDir +} + +func TestFileSinkMode(t *testing.T) { + log := logging.NewVaultLogger(hclog.Trace) + + fs, tmpDir := testFileSinkMode(t, log) + defer os.RemoveAll(tmpDir) + + path := filepath.Join(tmpDir, "token") + + uuidStr, _ := uuid.GenerateUUID() + if err := fs.WriteToken(uuidStr); err != nil { + t.Fatal(err) + } + + file, err := os.Open(path) + if err != nil { + t.Fatal(err) + } + defer file.Close() + + fi, err := file.Stat() + if err != nil { + t.Fatal(err) + } + if fi.Mode() != os.FileMode(0o644) { + t.Fatalf("wrong file mode was detected at %s", path) + } + + fileBytes, err := ioutil.ReadFile(path) + if err != nil { + t.Fatal(err) + } + + if string(fileBytes) != uuidStr { + t.Fatalf("expected %s, got %s", uuidStr, string(fileBytes)) + } +} diff --git a/command/agentproxyshared/sink/file/sink_test.go b/command/agentproxyshared/sink/file/sink_test.go new file mode 100644 index 0000000..de07400 --- /dev/null +++ b/command/agentproxyshared/sink/file/sink_test.go @@ -0,0 +1,141 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package file + +import ( + "context" + "errors" + "fmt" + "io/ioutil" + "os" + "sync/atomic" + "testing" + "time" + + hclog "github.com/hashicorp/go-hclog" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/sdk/helper/logging" +) + +func TestSinkServer(t *testing.T) { + log := logging.NewVaultLogger(hclog.Trace) + + fs1, path1 := testFileSink(t, log) + defer os.RemoveAll(path1) + fs2, path2 := testFileSink(t, log) + defer os.RemoveAll(path2) + + ctx, cancelFunc := context.WithCancel(context.Background()) + + ss := sink.NewSinkServer(&sink.SinkServerConfig{ + Logger: log.Named("sink.server"), + }) + + uuidStr, _ := uuid.GenerateUUID() + in := make(chan string) + sinks := []*sink.SinkConfig{fs1, fs2} + errCh := make(chan error) + go func() { + errCh <- ss.Run(ctx, in, sinks) + }() + + // Seed a token + in <- uuidStr + + // Tell it to shut down and give it time to do so + timer := time.AfterFunc(3*time.Second, func() { + cancelFunc() + }) + defer timer.Stop() + + select { + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + } + + for _, path := range []string{path1, path2} { + fileBytes, err := ioutil.ReadFile(fmt.Sprintf("%s/token", path)) + if err != nil { + t.Fatal(err) + } + + if string(fileBytes) != uuidStr { + t.Fatalf("expected %s, got %s", uuidStr, string(fileBytes)) + } + } +} + +type badSink struct { + tryCount uint32 + logger hclog.Logger +} + +func (b *badSink) WriteToken(token string) error { + switch token { + case "bad": + atomic.AddUint32(&b.tryCount, 1) + b.logger.Info("got bad") + return errors.New("bad") + case "good": + atomic.StoreUint32(&b.tryCount, 0) + b.logger.Info("got good") + return nil + default: + return errors.New("unknown case") + } +} + +func TestSinkServerRetry(t *testing.T) { + log := logging.NewVaultLogger(hclog.Trace) + + b1 := &badSink{logger: log.Named("b1")} + b2 := &badSink{logger: log.Named("b2")} + + ctx, cancelFunc := context.WithCancel(context.Background()) + + ss := sink.NewSinkServer(&sink.SinkServerConfig{ + Logger: log.Named("sink.server"), + }) + + in := make(chan string) + sinks := []*sink.SinkConfig{{Sink: b1}, {Sink: b2}} + errCh := make(chan error) + go func() { + errCh <- ss.Run(ctx, in, sinks) + }() + + // Seed a token + in <- "bad" + + // During this time we should see it retry multiple times + time.Sleep(10 * time.Second) + if atomic.LoadUint32(&b1.tryCount) < 2 { + t.Fatal("bad try count") + } + if atomic.LoadUint32(&b2.tryCount) < 2 { + t.Fatal("bad try count") + } + + in <- "good" + + time.Sleep(2 * time.Second) + if atomic.LoadUint32(&b1.tryCount) != 0 { + t.Fatal("bad try count") + } + if atomic.LoadUint32(&b2.tryCount) != 0 { + t.Fatal("bad try count") + } + + // Tell it to shut down and give it time to do so + cancelFunc() + select { + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + } +} diff --git a/command/agentproxyshared/sink/inmem/inmem_sink.go b/command/agentproxyshared/sink/inmem/inmem_sink.go new file mode 100644 index 0000000..e5804d8 --- /dev/null +++ b/command/agentproxyshared/sink/inmem/inmem_sink.go @@ -0,0 +1,48 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package inmem + +import ( + "errors" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/command/agentproxyshared/cache" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "go.uber.org/atomic" +) + +// inmemSink retains the auto-auth token in memory and exposes it via +// sink.SinkReader interface. +type inmemSink struct { + logger hclog.Logger + token *atomic.String + leaseCache *cache.LeaseCache +} + +// New creates a new instance of inmemSink. +func New(conf *sink.SinkConfig, leaseCache *cache.LeaseCache) (sink.Sink, error) { + if conf.Logger == nil { + return nil, errors.New("nil logger provided") + } + + return &inmemSink{ + logger: conf.Logger, + leaseCache: leaseCache, + token: atomic.NewString(""), + }, nil +} + +func (s *inmemSink) WriteToken(token string) error { + s.token.Store(token) + + if s.leaseCache != nil { + s.leaseCache.RegisterAutoAuthToken(token) + } + + return nil +} + +func (s *inmemSink) Token() string { + return s.token.Load() +} diff --git a/command/agentproxyshared/sink/mock/mock_sink.go b/command/agentproxyshared/sink/mock/mock_sink.go new file mode 100644 index 0000000..c39baf9 --- /dev/null +++ b/command/agentproxyshared/sink/mock/mock_sink.go @@ -0,0 +1,27 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package mock + +import ( + "github.com/hashicorp/vault/command/agentproxyshared/sink" +) + +type mockSink struct { + token string +} + +func NewSink(token string) sink.Sink { + return &mockSink{ + token: token, + } +} + +func (m *mockSink) WriteToken(token string) error { + m.token = token + return nil +} + +func (m *mockSink) Token() string { + return m.token +} diff --git a/command/agentproxyshared/sink/sink.go b/command/agentproxyshared/sink/sink.go new file mode 100644 index 0000000..2b64c17 --- /dev/null +++ b/command/agentproxyshared/sink/sink.go @@ -0,0 +1,270 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package sink + +import ( + "context" + "errors" + "fmt" + "io/ioutil" + "math/rand" + "os" + "sync/atomic" + "time" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/helper/dhutil" + "github.com/hashicorp/vault/sdk/helper/jsonutil" +) + +type Sink interface { + WriteToken(string) error +} + +type SinkReader interface { + Token() string +} + +type SinkConfig struct { + Sink + Logger hclog.Logger + Config map[string]interface{} + Client *api.Client + WrapTTL time.Duration + DHType string + DHPath string + DeriveKey bool + AAD string + cachedRemotePubKey []byte + cachedPubKey []byte + cachedPriKey []byte +} + +type SinkServerConfig struct { + Logger hclog.Logger + Client *api.Client + Context context.Context + ExitAfterAuth bool +} + +// SinkServer is responsible for pushing tokens to sinks +type SinkServer struct { + logger hclog.Logger + client *api.Client + random *rand.Rand + exitAfterAuth bool + remaining *int32 +} + +func NewSinkServer(conf *SinkServerConfig) *SinkServer { + ss := &SinkServer{ + logger: conf.Logger, + client: conf.Client, + random: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))), + exitAfterAuth: conf.ExitAfterAuth, + remaining: new(int32), + } + + return ss +} + +// Run executes the server's run loop, which is responsible for reading +// in new tokens and pushing them out to the various sinks. +func (ss *SinkServer) Run(ctx context.Context, incoming chan string, sinks []*SinkConfig) error { + latestToken := new(string) + writeSink := func(currSink *SinkConfig, currToken string) error { + if currToken != *latestToken { + return nil + } + var err error + + if currSink.WrapTTL != 0 { + if currToken, err = currSink.wrapToken(ss.client, currSink.WrapTTL, currToken); err != nil { + return err + } + } + + if currSink.DHType != "" { + if currToken, err = currSink.encryptToken(currToken); err != nil { + return err + } + } + + return currSink.WriteToken(currToken) + } + + if incoming == nil { + return errors.New("sink server: incoming channel is nil") + } + + ss.logger.Info("starting sink server") + defer func() { + ss.logger.Info("sink server stopped") + }() + + type sinkToken struct { + sink *SinkConfig + token string + } + sinkCh := make(chan sinkToken, len(sinks)) + for { + select { + case <-ctx.Done(): + return nil + + case token := <-incoming: + if len(sinks) > 0 { + if token != *latestToken { + + // Drain the existing funcs + drainLoop: + for { + select { + case <-sinkCh: + atomic.AddInt32(ss.remaining, -1) + default: + break drainLoop + } + } + + *latestToken = token + + for _, s := range sinks { + atomic.AddInt32(ss.remaining, 1) + sinkCh <- sinkToken{s, token} + } + } + } else { + ss.logger.Trace("no sinks, ignoring new token") + if ss.exitAfterAuth { + ss.logger.Trace("no sinks, exitAfterAuth, bye") + return nil + } + } + case st := <-sinkCh: + atomic.AddInt32(ss.remaining, -1) + select { + case <-ctx.Done(): + return nil + default: + } + + if err := writeSink(st.sink, st.token); err != nil { + backoff := 2*time.Second + time.Duration(ss.random.Int63()%int64(time.Second*2)-int64(time.Second)) + ss.logger.Error("error returned by sink function, retrying", "error", err, "backoff", backoff.String()) + timer := time.NewTimer(backoff) + select { + case <-ctx.Done(): + timer.Stop() + return nil + case <-timer.C: + atomic.AddInt32(ss.remaining, 1) + sinkCh <- st + } + } else { + if atomic.LoadInt32(ss.remaining) == 0 && ss.exitAfterAuth { + return nil + } + } + } + } +} + +func (s *SinkConfig) encryptToken(token string) (string, error) { + var aesKey []byte + var err error + resp := new(dhutil.Envelope) + switch s.DHType { + case "curve25519": + if len(s.cachedRemotePubKey) == 0 { + _, err = os.Lstat(s.DHPath) + if err != nil { + if !os.IsNotExist(err) { + return "", fmt.Errorf("error stat-ing dh parameters file: %w", err) + } + return "", errors.New("no dh parameters file found, and no cached pub key") + } + fileBytes, err := ioutil.ReadFile(s.DHPath) + if err != nil { + return "", fmt.Errorf("error reading file for dh parameters: %w", err) + } + theirPubKey := new(dhutil.PublicKeyInfo) + if err := jsonutil.DecodeJSON(fileBytes, theirPubKey); err != nil { + return "", fmt.Errorf("error decoding public key: %w", err) + } + if len(theirPubKey.Curve25519PublicKey) == 0 { + return "", errors.New("public key is nil") + } + s.cachedRemotePubKey = theirPubKey.Curve25519PublicKey + } + if len(s.cachedPubKey) == 0 { + s.cachedPubKey, s.cachedPriKey, err = dhutil.GeneratePublicPrivateKey() + if err != nil { + return "", fmt.Errorf("error generating pub/pri curve25519 keys: %w", err) + } + } + resp.Curve25519PublicKey = s.cachedPubKey + } + + secret, err := dhutil.GenerateSharedSecret(s.cachedPriKey, s.cachedRemotePubKey) + if err != nil { + return "", fmt.Errorf("error calculating shared key: %w", err) + } + if s.DeriveKey { + aesKey, err = dhutil.DeriveSharedKey(secret, s.cachedPubKey, s.cachedRemotePubKey) + } else { + aesKey = secret + } + + if err != nil { + return "", fmt.Errorf("error deriving shared key: %w", err) + } + if len(aesKey) == 0 { + return "", errors.New("derived AES key is empty") + } + + resp.EncryptedPayload, resp.Nonce, err = dhutil.EncryptAES(aesKey, []byte(token), []byte(s.AAD)) + if err != nil { + return "", fmt.Errorf("error encrypting with shared key: %w", err) + } + m, err := jsonutil.EncodeJSON(resp) + if err != nil { + return "", fmt.Errorf("error encoding encrypted payload: %w", err) + } + + return string(m), nil +} + +func (s *SinkConfig) wrapToken(client *api.Client, wrapTTL time.Duration, token string) (string, error) { + wrapClient, err := client.CloneWithHeaders() + if err != nil { + return "", fmt.Errorf("error deriving client for wrapping, not writing out to sink: %w)", err) + } + + wrapClient.SetToken(token) + wrapClient.SetWrappingLookupFunc(func(string, string) string { + return wrapTTL.String() + }) + + secret, err := wrapClient.Logical().Write("sys/wrapping/wrap", map[string]interface{}{ + "token": token, + }) + if err != nil { + return "", fmt.Errorf("error wrapping token, not writing out to sink: %w)", err) + } + if secret == nil { + return "", errors.New("nil secret returned, not writing out to sink") + } + if secret.WrapInfo == nil { + return "", errors.New("nil wrap info returned, not writing out to sink") + } + + m, err := jsonutil.EncodeJSON(secret.WrapInfo) + if err != nil { + return "", fmt.Errorf("error marshaling token, not writing out to sink: %w)", err) + } + + return string(m), nil +} diff --git a/command/agentproxyshared/winsvc/service.go b/command/agentproxyshared/winsvc/service.go new file mode 100644 index 0000000..edd234e --- /dev/null +++ b/command/agentproxyshared/winsvc/service.go @@ -0,0 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package winsvc + +var chanGraceExit = make(chan int) + +// ShutdownChannel returns a channel that sends a message that a shutdown +// signal has been received for the service. +func ShutdownChannel() <-chan int { + return chanGraceExit +} diff --git a/command/agentproxyshared/winsvc/service_windows.go b/command/agentproxyshared/winsvc/service_windows.go new file mode 100644 index 0000000..bb16bf9 --- /dev/null +++ b/command/agentproxyshared/winsvc/service_windows.go @@ -0,0 +1,46 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build windows + +package winsvc + +import ( + wsvc "golang.org/x/sys/windows/svc" +) + +type serviceWindows struct{} + +func init() { + interactive, err := wsvc.IsAnInteractiveSession() + if err != nil { + panic(err) + } + // Cannot run as a service when running interactively + if interactive { + return + } + go wsvc.Run("", serviceWindows{}) +} + +// Execute implements the Windows service Handler type. It will be +// called at the start of the service, and the service will exit +// once Execute completes. +func (serviceWindows) Execute(args []string, r <-chan wsvc.ChangeRequest, s chan<- wsvc.Status) (svcSpecificEC bool, exitCode uint32) { + const accCommands = wsvc.AcceptStop | wsvc.AcceptShutdown + s <- wsvc.Status{State: wsvc.StartPending} + s <- wsvc.Status{State: wsvc.Running, Accepts: accCommands} + for { + c := <-r + switch c.Cmd { + case wsvc.Interrogate: + s <- c.CurrentStatus + case wsvc.Stop, wsvc.Shutdown: + s <- wsvc.Status{State: wsvc.StopPending} + chanGraceExit <- 1 + return false, 0 + } + } + + return false, 0 +} diff --git a/command/approle_concurrency_integ_test.go b/command/approle_concurrency_integ_test.go new file mode 100644 index 0000000..934f8b3 --- /dev/null +++ b/command/approle_concurrency_integ_test.go @@ -0,0 +1,95 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "context" + "sync" + "testing" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + auth "github.com/hashicorp/vault/api/auth/approle" + credAppRole "github.com/hashicorp/vault/builtin/credential/approle" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +func TestAppRole_Integ_ConcurrentLogins(t *testing.T) { + var err error + coreConfig := &vault.CoreConfig{ + DisableMlock: true, + DisableCache: true, + Logger: log.NewNullLogger(), + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + } + + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + + cluster.Start() + defer cluster.Cleanup() + + cores := cluster.Cores + + vault.TestWaitActive(t, cores[0].Core) + + client := cores[0].Client + + err = client.Sys().EnableAuthWithOptions("approle", &api.EnableAuthOptions{ + Type: "approle", + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().Write("auth/approle/role/role1", map[string]interface{}{ + "bind_secret_id": "true", + "period": "300", + }) + if err != nil { + t.Fatal(err) + } + + secret, err := client.Logical().Write("auth/approle/role/role1/secret-id", nil) + if err != nil { + t.Fatal(err) + } + secretID := secret.Data["secret_id"].(string) + + secret, err = client.Logical().Read("auth/approle/role/role1/role-id") + if err != nil { + t.Fatal(err) + } + roleID := secret.Data["role_id"].(string) + + wg := &sync.WaitGroup{} + + for i := 0; i < 100; i++ { + wg.Add(1) + go func() { + defer wg.Done() + appRoleAuth, err := auth.NewAppRoleAuth(roleID, &auth.SecretID{FromString: secretID}) + if err != nil { + t.Error(err) + return + } + secret, err := client.Auth().Login(context.TODO(), appRoleAuth) + if err != nil { + t.Error(err) + return + } + if secret.Auth.ClientToken == "" { + t.Error("expected a successful login") + return + } + }() + + } + wg.Wait() +} diff --git a/command/audit.go b/command/audit.go new file mode 100644 index 0000000..606de73 --- /dev/null +++ b/command/audit.go @@ -0,0 +1,49 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + + "github.com/mitchellh/cli" +) + +var _ cli.Command = (*AuditCommand)(nil) + +type AuditCommand struct { + *BaseCommand +} + +func (c *AuditCommand) Synopsis() string { + return "Interact with audit devices" +} + +func (c *AuditCommand) Help() string { + helpText := ` +Usage: vault audit [options] [args] + + This command groups subcommands for interacting with Vault's audit devices. + Users can list, enable, and disable audit devices. + + *NOTE*: Once an audit device has been enabled, failure to audit could prevent + Vault from servicing future requests. It is highly recommended that you enable + multiple audit devices. + + List all enabled audit devices: + + $ vault audit list + + Enable a new audit device "file"; + + $ vault audit enable file file_path=/var/log/audit.log + + Please see the individual subcommand help for detailed usage information. +` + + return strings.TrimSpace(helpText) +} + +func (c *AuditCommand) Run(args []string) int { + return cli.RunResultHelp +} diff --git a/command/audit_disable.go b/command/audit_disable.go new file mode 100644 index 0000000..ef9288b --- /dev/null +++ b/command/audit_disable.go @@ -0,0 +1,92 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*AuditDisableCommand)(nil) + _ cli.CommandAutocomplete = (*AuditDisableCommand)(nil) +) + +type AuditDisableCommand struct { + *BaseCommand +} + +func (c *AuditDisableCommand) Synopsis() string { + return "Disables an audit device" +} + +func (c *AuditDisableCommand) Help() string { + helpText := ` +Usage: vault audit disable [options] PATH + + Disables an audit device. Once an audit device is disabled, no future audit + logs are dispatched to it. The data associated with the audit device is not + affected. + + The argument corresponds to the PATH of audit device, not the TYPE! + + Disable the audit device enabled at "file/": + + $ vault audit disable file/ + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *AuditDisableCommand) Flags() *FlagSets { + return c.flagSet(FlagSetHTTP) +} + +func (c *AuditDisableCommand) AutocompleteArgs() complete.Predictor { + return c.PredictVaultAudits() +} + +func (c *AuditDisableCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *AuditDisableCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + } + + path := ensureTrailingSlash(sanitizePath(args[0])) + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + if err := client.Sys().DisableAudit(path); err != nil { + c.UI.Error(fmt.Sprintf("Error disabling audit device: %s", err)) + return 2 + } + + c.UI.Output(fmt.Sprintf("Success! Disabled audit device (if it was enabled) at: %s", path)) + + return 0 +} diff --git a/command/audit_disable_test.go b/command/audit_disable_test.go new file mode 100644 index 0000000..44b782f --- /dev/null +++ b/command/audit_disable_test.go @@ -0,0 +1,163 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "testing" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" +) + +func testAuditDisableCommand(tb testing.TB) (*cli.MockUi, *AuditDisableCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &AuditDisableCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestAuditDisableCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "not_enough_args", + nil, + "Not enough arguments", + 1, + }, + { + "too_many_args", + []string{"foo", "bar", "baz"}, + "Too many arguments", + 1, + }, + { + "not_real", + []string{"not_real"}, + "Success! Disabled audit device (if it was enabled) at: not_real/", + 0, + }, + { + "default", + []string{"file"}, + "Success! Disabled audit device (if it was enabled) at: file/", + 0, + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + if err := client.Sys().EnableAuditWithOptions("file", &api.EnableAuditOptions{ + Type: "file", + Options: map[string]string{ + "file_path": "discard", + }, + }); err != nil { + t.Fatal(err) + } + + ui, cmd := testAuditDisableCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + + t.Run("integration", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + if err := client.Sys().EnableAuditWithOptions("integration_audit_disable", &api.EnableAuditOptions{ + Type: "file", + Options: map[string]string{ + "file_path": "discard", + }, + }); err != nil { + t.Fatal(err) + } + + ui, cmd := testAuditDisableCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "integration_audit_disable/", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! Disabled audit device (if it was enabled) at: integration_audit_disable/" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + mounts, err := client.Sys().ListMounts() + if err != nil { + t.Fatal(err) + } + + if _, ok := mounts["integration_audit_disable"]; ok { + t.Errorf("expected mount to not exist: %#v", mounts) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testAuditDisableCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "file", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error disabling audit device: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testAuditDisableCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/audit_enable.go b/command/audit_enable.go new file mode 100644 index 0000000..652c3c2 --- /dev/null +++ b/command/audit_enable.go @@ -0,0 +1,159 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*AuditEnableCommand)(nil) + _ cli.CommandAutocomplete = (*AuditEnableCommand)(nil) +) + +type AuditEnableCommand struct { + *BaseCommand + + flagDescription string + flagPath string + flagLocal bool + + testStdin io.Reader // For tests +} + +func (c *AuditEnableCommand) Synopsis() string { + return "Enables an audit device" +} + +func (c *AuditEnableCommand) Help() string { + helpText := ` +Usage: vault audit enable [options] TYPE [CONFIG K=V...] + + Enables an audit device at a given path. + + This command enables an audit device of TYPE. Additional options for + configuring the audit device can be specified after the type in the same + format as the "vault write" command in key/value pairs. + + For example, to configure the file audit device to write audit logs at the + path "/var/log/audit.log": + + $ vault audit enable file file_path=/var/log/audit.log + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *AuditEnableCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP) + + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "description", + Target: &c.flagDescription, + Default: "", + EnvVar: "", + Completion: complete.PredictAnything, + Usage: "Human-friendly description for the purpose of this audit " + + "device.", + }) + + f.StringVar(&StringVar{ + Name: "path", + Target: &c.flagPath, + Default: "", // The default is complex, so we have to manually document + EnvVar: "", + Completion: complete.PredictAnything, + Usage: "Place where the audit device will be accessible. This must be " + + "unique across all audit devices. This defaults to the \"type\" of the " + + "audit device.", + }) + + f.BoolVar(&BoolVar{ + Name: "local", + Target: &c.flagLocal, + Default: false, + EnvVar: "", + Usage: "Mark the audit device as a local-only device. Local devices " + + "are not replicated or removed by replication.", + }) + + return set +} + +func (c *AuditEnableCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictSet( + "file", + "syslog", + "socket", + ) +} + +func (c *AuditEnableCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *AuditEnableCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + if len(args) < 1 { + c.UI.Error("Error enabling audit device: audit type missing. Valid types include 'file', 'socket' and 'syslog'.") + return 1 + } + + // Grab the type + auditType := strings.TrimSpace(args[0]) + + auditPath := c.flagPath + if auditPath == "" { + auditPath = auditType + } + auditPath = ensureTrailingSlash(auditPath) + + // Pull our fake stdin if needed + stdin := (io.Reader)(os.Stdin) + if c.testStdin != nil { + stdin = c.testStdin + } + + options, err := parseArgsDataString(stdin, args[1:]) + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to parse K=V data: %s", err)) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + if err := client.Sys().EnableAuditWithOptions(auditPath, &api.EnableAuditOptions{ + Type: auditType, + Description: c.flagDescription, + Options: options, + Local: c.flagLocal, + }); err != nil { + c.UI.Error(fmt.Sprintf("Error enabling audit device: %s", err)) + return 2 + } + + c.UI.Output(fmt.Sprintf("Success! Enabled the %s audit device at: %s", auditType, auditPath)) + return 0 +} diff --git a/command/audit_enable_test.go b/command/audit_enable_test.go new file mode 100644 index 0000000..e7dc4ae --- /dev/null +++ b/command/audit_enable_test.go @@ -0,0 +1,214 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "io/ioutil" + "os" + "strings" + "testing" + + "github.com/mitchellh/cli" +) + +func testAuditEnableCommand(tb testing.TB) (*cli.MockUi, *AuditEnableCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &AuditEnableCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestAuditEnableCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "empty", + nil, + "Error enabling audit device: audit type missing. Valid types include 'file', 'socket' and 'syslog'.", + 1, + }, + { + "not_a_valid_type", + []string{"nope_definitely_not_a_valid_type_like_ever"}, + "", + 2, + }, + { + "enable", + []string{"file", "file_path=discard"}, + "Success! Enabled the file audit device at: file/", + 0, + }, + { + "enable_path", + []string{ + "-path", "audit_path", + "file", + "file_path=discard", + }, + "Success! Enabled the file audit device at: audit_path/", + 0, + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testAuditEnableCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + + t.Run("integration", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testAuditEnableCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-path", "audit_enable_integration/", + "-description", "The best kind of test", + "file", + "file_path=discard", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! Enabled the file audit device at: audit_enable_integration/" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + audits, err := client.Sys().ListAudit() + if err != nil { + t.Fatal(err) + } + + auditInfo, ok := audits["audit_enable_integration/"] + if !ok { + t.Fatalf("expected audit to exist") + } + if exp := "file"; auditInfo.Type != exp { + t.Errorf("expected %q to be %q", auditInfo.Type, exp) + } + if exp := "The best kind of test"; auditInfo.Description != exp { + t.Errorf("expected %q to be %q", auditInfo.Description, exp) + } + + filePath, ok := auditInfo.Options["file_path"] + if !ok || filePath != "discard" { + t.Errorf("missing some options: %#v", auditInfo) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testAuditEnableCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "pki", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error enabling audit device: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testAuditEnableCommand(t) + assertNoTabs(t, cmd) + }) + + t.Run("mount_all", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerAllBackends(t) + defer closer() + + files, err := ioutil.ReadDir("../builtin/audit") + if err != nil { + t.Fatal(err) + } + + var backends []string + for _, f := range files { + if f.IsDir() { + backends = append(backends, f.Name()) + } + } + + for _, b := range backends { + ui, cmd := testAuditEnableCommand(t) + cmd.client = client + + args := []string{ + b, + } + switch b { + case "file": + args = append(args, "file_path=discard") + case "socket": + args = append(args, "address=127.0.0.1:8888", + "skip_test=true") + case "syslog": + if _, exists := os.LookupEnv("WSLENV"); exists { + t.Log("skipping syslog test on WSL") + continue + } + if os.Getenv("CIRCLECI") == "true" { + // TODO install syslog in docker image we run our tests in + t.Log("skipping syslog test on CircleCI") + continue + } + } + code := cmd.Run(args) + if exp := 0; code != exp { + t.Errorf("type %s, expected %d to be %d - %s", b, code, exp, ui.OutputWriter.String()+ui.ErrorWriter.String()) + } + } + }) +} diff --git a/command/audit_list.go b/command/audit_list.go new file mode 100644 index 0000000..e5af852 --- /dev/null +++ b/command/audit_list.go @@ -0,0 +1,171 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "sort" + "strings" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*AuditListCommand)(nil) + _ cli.CommandAutocomplete = (*AuditListCommand)(nil) +) + +type AuditListCommand struct { + *BaseCommand + + flagDetailed bool +} + +func (c *AuditListCommand) Synopsis() string { + return "Lists enabled audit devices" +} + +func (c *AuditListCommand) Help() string { + helpText := ` +Usage: vault audit list [options] + + Lists the enabled audit devices in the Vault server. The output lists the + enabled audit devices and the options for those devices. + + List all audit devices: + + $ vault audit list + + List detailed output about the audit devices: + + $ vault audit list -detailed + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *AuditListCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + f := set.NewFlagSet("Command Options") + + f.BoolVar(&BoolVar{ + Name: "detailed", + Target: &c.flagDetailed, + Default: false, + EnvVar: "", + Usage: "Print detailed information such as options and replication " + + "status about each auth device.", + }) + + return set +} + +func (c *AuditListCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *AuditListCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *AuditListCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + if len(args) > 0 { + c.UI.Error(fmt.Sprintf("Too many arguments (expected 0, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + audits, err := client.Sys().ListAudit() + if err != nil { + c.UI.Error(fmt.Sprintf("Error listing audits: %s", err)) + return 2 + } + + switch Format(c.UI) { + case "table": + if len(audits) == 0 { + c.UI.Output("No audit devices are enabled.") + return 2 + } + + if c.flagDetailed { + c.UI.Output(tableOutput(c.detailedAudits(audits), nil)) + return 0 + } + c.UI.Output(tableOutput(c.simpleAudits(audits), nil)) + return 0 + default: + return OutputData(c.UI, audits) + } +} + +func (c *AuditListCommand) simpleAudits(audits map[string]*api.Audit) []string { + paths := make([]string, 0, len(audits)) + for path := range audits { + paths = append(paths, path) + } + sort.Strings(paths) + + columns := []string{"Path | Type | Description"} + for _, path := range paths { + audit := audits[path] + columns = append(columns, fmt.Sprintf("%s | %s | %s", + audit.Path, + audit.Type, + audit.Description, + )) + } + + return columns +} + +func (c *AuditListCommand) detailedAudits(audits map[string]*api.Audit) []string { + paths := make([]string, 0, len(audits)) + for path := range audits { + paths = append(paths, path) + } + sort.Strings(paths) + + columns := []string{"Path | Type | Description | Replication | Options"} + for _, path := range paths { + audit := audits[path] + + opts := make([]string, 0, len(audit.Options)) + for k, v := range audit.Options { + opts = append(opts, k+"="+v) + } + + replication := "replicated" + if audit.Local { + replication = "local" + } + + columns = append(columns, fmt.Sprintf("%s | %s | %s | %s | %s", + path, + audit.Type, + audit.Description, + replication, + strings.Join(opts, " "), + )) + } + + return columns +} diff --git a/command/audit_list_test.go b/command/audit_list_test.go new file mode 100644 index 0000000..c2e6eac --- /dev/null +++ b/command/audit_list_test.go @@ -0,0 +1,114 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "testing" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" +) + +func testAuditListCommand(tb testing.TB) (*cli.MockUi, *AuditListCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &AuditListCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestAuditListCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "too_many_args", + []string{"foo"}, + "Too many arguments", + 1, + }, + { + "lists", + nil, + "Path", + 0, + }, + { + "detailed", + []string{"-detailed"}, + "Options", + 0, + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + if err := client.Sys().EnableAuditWithOptions("file", &api.EnableAuditOptions{ + Type: "file", + Options: map[string]string{ + "file_path": "discard", + }, + }); err != nil { + t.Fatal(err) + } + + ui, cmd := testAuditListCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testAuditListCommand(t) + cmd.client = client + + code := cmd.Run([]string{}) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error listing audits: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testAuditListCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/auth.go b/command/auth.go new file mode 100644 index 0000000..e2bdb81 --- /dev/null +++ b/command/auth.go @@ -0,0 +1,52 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + + "github.com/mitchellh/cli" +) + +var _ cli.Command = (*AuthCommand)(nil) + +type AuthCommand struct { + *BaseCommand +} + +func (c *AuthCommand) Synopsis() string { + return "Interact with auth methods" +} + +func (c *AuthCommand) Help() string { + return strings.TrimSpace(` +Usage: vault auth [options] [args] + + This command groups subcommands for interacting with Vault's auth methods. + Users can list, enable, disable, and get help for different auth methods. + + To authenticate to Vault as a user or machine, use the "vault login" command + instead. This command is for interacting with the auth methods themselves, not + authenticating to Vault. + + List all enabled auth methods: + + $ vault auth list + + Enable a new auth method "userpass"; + + $ vault auth enable userpass + + Get detailed help information about how to authenticate to a particular auth + method: + + $ vault auth help github + + Please see the individual subcommand help for detailed usage information. +`) +} + +func (c *AuthCommand) Run(args []string) int { + return cli.RunResultHelp +} diff --git a/command/auth_disable.go b/command/auth_disable.go new file mode 100644 index 0000000..735103b --- /dev/null +++ b/command/auth_disable.go @@ -0,0 +1,92 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*AuthDisableCommand)(nil) + _ cli.CommandAutocomplete = (*AuthDisableCommand)(nil) +) + +type AuthDisableCommand struct { + *BaseCommand +} + +func (c *AuthDisableCommand) Synopsis() string { + return "Disables an auth method" +} + +func (c *AuthDisableCommand) Help() string { + helpText := ` +Usage: vault auth disable [options] PATH + + Disables an existing auth method at the given PATH. The argument corresponds + to the PATH of the mount, not the TYPE!. Once the auth method is disabled its + path can no longer be used to authenticate. + + All access tokens generated via the disabled auth method are immediately + revoked. This command will block until all tokens are revoked. + + Disable the auth method at userpass/: + + $ vault auth disable userpass/ + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *AuthDisableCommand) Flags() *FlagSets { + return c.flagSet(FlagSetHTTP) +} + +func (c *AuthDisableCommand) AutocompleteArgs() complete.Predictor { + return c.PredictVaultAuths() +} + +func (c *AuthDisableCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *AuthDisableCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + } + + path := ensureTrailingSlash(sanitizePath(args[0])) + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + if err := client.Sys().DisableAuth(path); err != nil { + c.UI.Error(fmt.Sprintf("Error disabling auth method at %s: %s", path, err)) + return 2 + } + + c.UI.Output(fmt.Sprintf("Success! Disabled the auth method (if it existed) at: %s", path)) + return 0 +} diff --git a/command/auth_disable_test.go b/command/auth_disable_test.go new file mode 100644 index 0000000..385bc4e --- /dev/null +++ b/command/auth_disable_test.go @@ -0,0 +1,140 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "testing" + + "github.com/mitchellh/cli" +) + +func testAuthDisableCommand(tb testing.TB) (*cli.MockUi, *AuthDisableCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &AuthDisableCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestAuthDisableCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "not_enough_args", + nil, + "Not enough arguments", + 1, + }, + { + "too_many_args", + []string{"foo", "bar"}, + "Too many arguments", + 1, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testAuthDisableCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("integration", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + if err := client.Sys().EnableAuth("my-auth", "userpass", ""); err != nil { + t.Fatal(err) + } + + ui, cmd := testAuthDisableCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "my-auth", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! Disabled the auth method" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + auths, err := client.Sys().ListAuth() + if err != nil { + t.Fatal(err) + } + + if auth, ok := auths["my-auth/"]; ok { + t.Errorf("expected auth to be disabled: %#v", auth) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testAuthDisableCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "my-auth", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error disabling auth method at my-auth/: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testAuthDisableCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/auth_enable.go b/command/auth_enable.go new file mode 100644 index 0000000..7c7af55 --- /dev/null +++ b/command/auth_enable.go @@ -0,0 +1,331 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "flag" + "fmt" + "strconv" + "strings" + "time" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*AuthEnableCommand)(nil) + _ cli.CommandAutocomplete = (*AuthEnableCommand)(nil) +) + +type AuthEnableCommand struct { + *BaseCommand + + flagDescription string + flagPath string + flagDefaultLeaseTTL time.Duration + flagMaxLeaseTTL time.Duration + flagAuditNonHMACRequestKeys []string + flagAuditNonHMACResponseKeys []string + flagListingVisibility string + flagPluginName string + flagPassthroughRequestHeaders []string + flagAllowedResponseHeaders []string + flagOptions map[string]string + flagLocal bool + flagSealWrap bool + flagExternalEntropyAccess bool + flagTokenType string + flagVersion int + flagPluginVersion string +} + +func (c *AuthEnableCommand) Synopsis() string { + return "Enables a new auth method" +} + +func (c *AuthEnableCommand) Help() string { + helpText := ` +Usage: vault auth enable [options] TYPE + + Enables a new auth method. An auth method is responsible for authenticating + users or machines and assigning them policies with which they can access + Vault. + + Enable the userpass auth method at userpass/: + + $ vault auth enable userpass + + Enable the LDAP auth method at auth-prod/: + + $ vault auth enable -path=auth-prod ldap + + Enable a custom auth plugin (after it's registered in the plugin registry): + + $ vault auth enable -path=my-auth -plugin-name=my-auth-plugin plugin + + OR (preferred way): + + $ vault auth enable -path=my-auth my-auth-plugin + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *AuthEnableCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP) + + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "description", + Target: &c.flagDescription, + Completion: complete.PredictAnything, + Usage: "Human-friendly description for the purpose of this " + + "auth method.", + }) + + f.StringVar(&StringVar{ + Name: "path", + Target: &c.flagPath, + Default: "", // The default is complex, so we have to manually document + Completion: complete.PredictAnything, + Usage: "Place where the auth method will be accessible. This must be " + + "unique across all auth methods. This defaults to the \"type\" of " + + "the auth method. The auth method will be accessible at " + + "\"/auth/\".", + }) + + f.DurationVar(&DurationVar{ + Name: "default-lease-ttl", + Target: &c.flagDefaultLeaseTTL, + Completion: complete.PredictAnything, + Usage: "The default lease TTL for this auth method. If unspecified, " + + "this defaults to the Vault server's globally configured default lease " + + "TTL.", + }) + + f.DurationVar(&DurationVar{ + Name: "max-lease-ttl", + Target: &c.flagMaxLeaseTTL, + Completion: complete.PredictAnything, + Usage: "The maximum lease TTL for this auth method. If unspecified, " + + "this defaults to the Vault server's globally configured maximum lease " + + "TTL.", + }) + + f.StringSliceVar(&StringSliceVar{ + Name: flagNameAuditNonHMACRequestKeys, + Target: &c.flagAuditNonHMACRequestKeys, + Usage: "Key that will not be HMAC'd by audit devices in the request data object. " + + "To specify multiple values, specify this flag multiple times.", + }) + + f.StringSliceVar(&StringSliceVar{ + Name: flagNameAuditNonHMACResponseKeys, + Target: &c.flagAuditNonHMACResponseKeys, + Usage: "Key that will not be HMAC'd by audit devices in the response data object. " + + "To specify multiple values, specify this flag multiple times.", + }) + + f.StringVar(&StringVar{ + Name: flagNameListingVisibility, + Target: &c.flagListingVisibility, + Usage: "Determines the visibility of the mount in the UI-specific listing endpoint.", + }) + + f.StringSliceVar(&StringSliceVar{ + Name: flagNamePassthroughRequestHeaders, + Target: &c.flagPassthroughRequestHeaders, + Usage: "Request header value that will be sent to the plugin. To specify multiple " + + "values, specify this flag multiple times.", + }) + + f.StringSliceVar(&StringSliceVar{ + Name: flagNameAllowedResponseHeaders, + Target: &c.flagAllowedResponseHeaders, + Usage: "Response header value that plugins will be allowed to set. To specify multiple " + + "values, specify this flag multiple times.", + }) + + f.StringVar(&StringVar{ + Name: "plugin-name", + Target: &c.flagPluginName, + Completion: c.PredictVaultPlugins(api.PluginTypeCredential), + Usage: "Name of the auth method plugin. This plugin name must already " + + "exist in the Vault server's plugin catalog.", + }) + + f.StringMapVar(&StringMapVar{ + Name: "options", + Target: &c.flagOptions, + Completion: complete.PredictAnything, + Usage: "Key-value pair provided as key=value for the mount options. " + + "This can be specified multiple times.", + }) + + f.BoolVar(&BoolVar{ + Name: "local", + Target: &c.flagLocal, + Default: false, + Usage: "Mark the auth method as local-only. Local auth methods are " + + "not replicated nor removed by replication.", + }) + + f.BoolVar(&BoolVar{ + Name: "seal-wrap", + Target: &c.flagSealWrap, + Default: false, + Usage: "Enable seal wrapping of critical values in the secrets engine.", + }) + + f.BoolVar(&BoolVar{ + Name: "external-entropy-access", + Target: &c.flagExternalEntropyAccess, + Default: false, + Usage: "Enable auth method to access Vault's external entropy source.", + }) + + f.StringVar(&StringVar{ + Name: flagNameTokenType, + Target: &c.flagTokenType, + Usage: "Sets a forced token type for the mount.", + }) + + f.IntVar(&IntVar{ + Name: "version", + Target: &c.flagVersion, + Default: 0, + Usage: "Select the version of the auth method to run. Not supported by all auth methods.", + }) + + f.StringVar(&StringVar{ + Name: flagNamePluginVersion, + Target: &c.flagPluginVersion, + Default: "", + Usage: "Select the semantic version of the plugin to enable.", + }) + + return set +} + +func (c *AuthEnableCommand) AutocompleteArgs() complete.Predictor { + return c.PredictVaultAvailableAuths() +} + +func (c *AuthEnableCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *AuthEnableCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + authType := strings.TrimSpace(args[0]) + if authType == "plugin" { + authType = c.flagPluginName + } + + // If no path is specified, we default the path to the backend type + // or use the plugin name if it's a plugin backend + authPath := c.flagPath + if authPath == "" { + if authType == "plugin" { + authPath = c.flagPluginName + } else { + authPath = authType + } + } + + // Append a trailing slash to indicate it's a path in output + authPath = ensureTrailingSlash(authPath) + + if c.flagVersion > 0 { + if c.flagOptions == nil { + c.flagOptions = make(map[string]string) + } + c.flagOptions["version"] = strconv.Itoa(c.flagVersion) + } + + authOpts := &api.EnableAuthOptions{ + Type: authType, + Description: c.flagDescription, + Local: c.flagLocal, + SealWrap: c.flagSealWrap, + ExternalEntropyAccess: c.flagExternalEntropyAccess, + Config: api.AuthConfigInput{ + DefaultLeaseTTL: c.flagDefaultLeaseTTL.String(), + MaxLeaseTTL: c.flagMaxLeaseTTL.String(), + }, + Options: c.flagOptions, + } + + // Set these values only if they are provided in the CLI + f.Visit(func(fl *flag.Flag) { + if fl.Name == flagNameAuditNonHMACRequestKeys { + authOpts.Config.AuditNonHMACRequestKeys = c.flagAuditNonHMACRequestKeys + } + + if fl.Name == flagNameAuditNonHMACResponseKeys { + authOpts.Config.AuditNonHMACResponseKeys = c.flagAuditNonHMACResponseKeys + } + + if fl.Name == flagNameListingVisibility { + authOpts.Config.ListingVisibility = c.flagListingVisibility + } + + if fl.Name == flagNamePassthroughRequestHeaders { + authOpts.Config.PassthroughRequestHeaders = c.flagPassthroughRequestHeaders + } + + if fl.Name == flagNameAllowedResponseHeaders { + authOpts.Config.AllowedResponseHeaders = c.flagAllowedResponseHeaders + } + + if fl.Name == flagNameTokenType { + authOpts.Config.TokenType = c.flagTokenType + } + + if fl.Name == flagNamePluginVersion { + authOpts.Config.PluginVersion = c.flagPluginVersion + } + }) + + if err := client.Sys().EnableAuthWithOptions(authPath, authOpts); err != nil { + c.UI.Error(fmt.Sprintf("Error enabling %s auth: %s", authType, err)) + return 2 + } + + authThing := authType + " auth method" + if authType == "plugin" { + authThing = c.flagPluginName + " plugin" + } + if c.flagPluginVersion != "" { + authThing += " version " + c.flagPluginVersion + } + c.UI.Output(fmt.Sprintf("Success! Enabled %s at: %s", authThing, authPath)) + return 0 +} diff --git a/command/auth_enable_test.go b/command/auth_enable_test.go new file mode 100644 index 0000000..4a4292c --- /dev/null +++ b/command/auth_enable_test.go @@ -0,0 +1,242 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "io/ioutil" + "strings" + "testing" + + "github.com/go-test/deep" + "github.com/hashicorp/vault/helper/builtinplugins" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/mitchellh/cli" +) + +func testAuthEnableCommand(tb testing.TB) (*cli.MockUi, *AuthEnableCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &AuthEnableCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestAuthEnableCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "not_enough_args", + nil, + "Not enough arguments", + 1, + }, + { + "too_many_args", + []string{"foo", "bar"}, + "Too many arguments", + 1, + }, + { + "not_a_valid_auth", + []string{"nope_definitely_not_a_valid_mount_like_ever"}, + "", + 2, + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testAuthEnableCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected command return code to be %d, got %d", tc.code, code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q in response\n got: %+v", tc.out, combined) + } + }) + } + + t.Run("integration", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testAuthEnableCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-path", "auth_integration/", + "-description", "The best kind of test", + "-audit-non-hmac-request-keys", "foo,bar", + "-audit-non-hmac-response-keys", "foo,bar", + "-passthrough-request-headers", "authorization,authentication", + "-passthrough-request-headers", "www-authentication", + "-allowed-response-headers", "authorization", + "-listing-visibility", "unauth", + "userpass", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! Enabled userpass auth method at:" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + auths, err := client.Sys().ListAuth() + if err != nil { + t.Fatal(err) + } + + authInfo, ok := auths["auth_integration/"] + if !ok { + t.Fatalf("expected mount to exist") + } + if exp := "userpass"; authInfo.Type != exp { + t.Errorf("expected %q to be %q", authInfo.Type, exp) + } + if exp := "The best kind of test"; authInfo.Description != exp { + t.Errorf("expected %q to be %q", authInfo.Description, exp) + } + if diff := deep.Equal([]string{"authorization,authentication", "www-authentication"}, authInfo.Config.PassthroughRequestHeaders); len(diff) > 0 { + t.Errorf("Failed to find expected values in PassthroughRequestHeaders. Difference is: %v", diff) + } + if diff := deep.Equal([]string{"authorization"}, authInfo.Config.AllowedResponseHeaders); len(diff) > 0 { + t.Errorf("Failed to find expected values in AllowedResponseHeaders. Difference is: %v", diff) + } + if diff := deep.Equal([]string{"foo,bar"}, authInfo.Config.AuditNonHMACRequestKeys); len(diff) > 0 { + t.Errorf("Failed to find expected values in AuditNonHMACRequestKeys. Difference is: %v", diff) + } + if diff := deep.Equal([]string{"foo,bar"}, authInfo.Config.AuditNonHMACResponseKeys); len(diff) > 0 { + t.Errorf("Failed to find expected values in AuditNonHMACResponseKeys. Difference is: %v", diff) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testAuthEnableCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "userpass", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error enabling userpass auth: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testAuthEnableCommand(t) + assertNoTabs(t, cmd) + }) + + t.Run("mount_all", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerAllBackends(t) + defer closer() + + files, err := ioutil.ReadDir("../builtin/credential") + if err != nil { + t.Fatal(err) + } + + var backends []string + for _, f := range files { + if f.IsDir() { + backends = append(backends, f.Name()) + } + } + + modFile, err := ioutil.ReadFile("../go.mod") + if err != nil { + t.Fatal(err) + } + modLines := strings.Split(string(modFile), "\n") + for _, p := range modLines { + splitLine := strings.Split(strings.TrimSpace(p), " ") + if len(splitLine) == 0 { + continue + } + potPlug := strings.TrimPrefix(splitLine[0], "github.com/hashicorp/") + if strings.HasPrefix(potPlug, "vault-plugin-auth-") { + backends = append(backends, strings.TrimPrefix(potPlug, "vault-plugin-auth-")) + } + } + // Since "pcf" plugin in the Vault registry is also pointed at the "vault-plugin-auth-cf" + // repository, we need to manually append it here so it'll tie out with our expected number + // of credential backends. + backends = append(backends, "pcf") + + // Add 1 to account for the "token" backend, which is visible when you walk the filesystem but + // is treated as special and excluded from the registry. + // Subtract 1 to account for "oidc" which is an alias of "jwt" and not a separate plugin. + expected := len(builtinplugins.Registry.Keys(consts.PluginTypeCredential)) + if len(backends) != expected { + t.Fatalf("expected %d credential backends, got %d", expected, len(backends)) + } + + for _, b := range backends { + var expectedResult int = 0 + + // Not a builtin + if b == "token" { + continue + } + + ui, cmd := testAuthEnableCommand(t) + cmd.client = client + + actualResult := cmd.Run([]string{ + b, + }) + + // Need to handle deprecated builtins specially + status, _ := builtinplugins.Registry.DeprecationStatus(b, consts.PluginTypeCredential) + if status == consts.PendingRemoval || status == consts.Removed { + expectedResult = 2 + } + + if actualResult != expectedResult { + t.Errorf("type: %s - got: %d, expected: %d - %s", b, actualResult, expectedResult, ui.OutputWriter.String()+ui.ErrorWriter.String()) + } + } + }) +} diff --git a/command/auth_help.go b/command/auth_help.go new file mode 100644 index 0000000..34b6b9f --- /dev/null +++ b/command/auth_help.go @@ -0,0 +1,128 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*AuthHelpCommand)(nil) + _ cli.CommandAutocomplete = (*AuthHelpCommand)(nil) +) + +type AuthHelpCommand struct { + *BaseCommand + + Handlers map[string]LoginHandler +} + +func (c *AuthHelpCommand) Synopsis() string { + return "Prints usage for an auth method" +} + +func (c *AuthHelpCommand) Help() string { + helpText := ` +Usage: vault auth help [options] TYPE | PATH + + Prints usage and help for an auth method. + + - If given a TYPE, this command prints the default help for the + auth method of that type. + + - If given a PATH, this command prints the help output for the + auth method enabled at that path. This path must already + exist. + + Get usage instructions for the userpass auth method: + + $ vault auth help userpass + + Print usage for the auth method enabled at my-method/: + + $ vault auth help my-method/ + + Each auth method produces its own help output. + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *AuthHelpCommand) Flags() *FlagSets { + return c.flagSet(FlagSetHTTP) +} + +func (c *AuthHelpCommand) AutocompleteArgs() complete.Predictor { + handlers := make([]string, 0, len(c.Handlers)) + for k := range c.Handlers { + handlers = append(handlers, k) + } + return complete.PredictSet(handlers...) +} + +func (c *AuthHelpCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *AuthHelpCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + // Start with the assumption that we have an auth type, not a path. + authType := strings.TrimSpace(args[0]) + + authHandler, ok := c.Handlers[authType] + if !ok { + // There was no auth type by that name, see if it's a mount + auths, err := client.Sys().ListAuth() + if err != nil { + c.UI.Error(fmt.Sprintf("Error listing auth methods: %s", err)) + return 2 + } + + authPath := ensureTrailingSlash(sanitizePath(args[0])) + auth, ok := auths[authPath] + if !ok { + c.UI.Warn(fmt.Sprintf( + "No auth method available on the server at %q", authPath)) + return 1 + } + + authHandler, ok = c.Handlers[auth.Type] + if !ok { + c.UI.Warn(wrapAtLength(fmt.Sprintf( + "No method-specific CLI handler available for auth method %q", + authType))) + return 2 + } + } + + c.UI.Output(authHandler.Help()) + return 0 +} diff --git a/command/auth_help_test.go b/command/auth_help_test.go new file mode 100644 index 0000000..a83695e --- /dev/null +++ b/command/auth_help_test.go @@ -0,0 +1,159 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "testing" + + "github.com/mitchellh/cli" + + credUserpass "github.com/hashicorp/vault/builtin/credential/userpass" +) + +func testAuthHelpCommand(tb testing.TB) (*cli.MockUi, *AuthHelpCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &AuthHelpCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + Handlers: map[string]LoginHandler{ + "userpass": &credUserpass.CLIHandler{ + DefaultMount: "userpass", + }, + }, + } +} + +func TestAuthHelpCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "too_many_args", + []string{"foo", "bar"}, + "Too many arguments", + 1, + }, + { + "not_enough_args", + nil, + "Not enough arguments", + 1, + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testAuthHelpCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + + t.Run("path", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + if err := client.Sys().EnableAuth("foo", "userpass", ""); err != nil { + t.Fatal(err) + } + + ui, cmd := testAuthHelpCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "foo/", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Usage: vault login -method=userpass" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("type", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + // No mounted auth methods + + ui, cmd := testAuthHelpCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "userpass", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Usage: vault login -method=userpass" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testAuthHelpCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "sys/mounts", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error listing auth methods: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testAuthHelpCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/auth_list.go b/command/auth_list.go new file mode 100644 index 0000000..25103a1 --- /dev/null +++ b/command/auth_list.go @@ -0,0 +1,189 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "sort" + "strconv" + "strings" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*AuthListCommand)(nil) + _ cli.CommandAutocomplete = (*AuthListCommand)(nil) +) + +type AuthListCommand struct { + *BaseCommand + + flagDetailed bool +} + +func (c *AuthListCommand) Synopsis() string { + return "Lists enabled auth methods" +} + +func (c *AuthListCommand) Help() string { + helpText := ` +Usage: vault auth list [options] + + Lists the enabled auth methods on the Vault server. This command also outputs + information about the method including configuration and human-friendly + descriptions. A TTL of "system" indicates that the system default is in use. + + List all enabled auth methods: + + $ vault auth list + + List all enabled auth methods with detailed output: + + $ vault auth list -detailed + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *AuthListCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + f := set.NewFlagSet("Command Options") + + f.BoolVar(&BoolVar{ + Name: "detailed", + Target: &c.flagDetailed, + Default: false, + Usage: "Print detailed information such as configuration and replication " + + "status about each auth method. This option is only applicable to " + + "table-formatted output.", + }) + + return set +} + +func (c *AuthListCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *AuthListCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *AuthListCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + if len(args) > 0 { + c.UI.Error(fmt.Sprintf("Too many arguments (expected 0, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + auths, err := client.Sys().ListAuth() + if err != nil { + c.UI.Error(fmt.Sprintf("Error listing enabled authentications: %s", err)) + return 2 + } + + switch Format(c.UI) { + case "table": + if c.flagDetailed { + c.UI.Output(tableOutput(c.detailedMounts(auths), nil)) + return 0 + } + c.UI.Output(tableOutput(c.simpleMounts(auths), nil)) + return 0 + default: + return OutputData(c.UI, auths) + } +} + +func (c *AuthListCommand) simpleMounts(auths map[string]*api.AuthMount) []string { + paths := make([]string, 0, len(auths)) + for path := range auths { + paths = append(paths, path) + } + sort.Strings(paths) + + out := []string{"Path | Type | Accessor | Description | Version"} + for _, path := range paths { + mount := auths[path] + out = append(out, fmt.Sprintf("%s | %s | %s | %s | %s", path, mount.Type, mount.Accessor, mount.Description, mount.PluginVersion)) + } + + return out +} + +func (c *AuthListCommand) detailedMounts(auths map[string]*api.AuthMount) []string { + paths := make([]string, 0, len(auths)) + for path := range auths { + paths = append(paths, path) + } + sort.Strings(paths) + + calcTTL := func(typ string, ttl int) string { + switch { + case typ == "system", typ == "cubbyhole": + return "" + case ttl != 0: + return strconv.Itoa(ttl) + default: + return "system" + } + } + + out := []string{"Path | Plugin | Accessor | Default TTL | Max TTL | Token Type | Replication | Seal Wrap | External Entropy Access | Options | Description | UUID | Version | Running Version | Running SHA256 | Deprecation Status"} + for _, path := range paths { + mount := auths[path] + + defaultTTL := calcTTL(mount.Type, mount.Config.DefaultLeaseTTL) + maxTTL := calcTTL(mount.Type, mount.Config.MaxLeaseTTL) + + replication := "replicated" + if mount.Local { + replication = "local" + } + + pluginName := mount.Type + if pluginName == "plugin" { + pluginName = mount.Config.PluginName + } + + out = append(out, fmt.Sprintf("%s | %s | %s | %s | %s | %s | %s | %t | %v | %s | %s | %s | %s | %s | %s | %s", + path, + pluginName, + mount.Accessor, + defaultTTL, + maxTTL, + mount.Config.TokenType, + replication, + mount.SealWrap, + mount.ExternalEntropyAccess, + mount.Options, + mount.Description, + mount.UUID, + mount.PluginVersion, + mount.RunningVersion, + mount.RunningSha256, + mount.DeprecationStatus, + )) + } + + return out +} diff --git a/command/auth_list_test.go b/command/auth_list_test.go new file mode 100644 index 0000000..2e96f9f --- /dev/null +++ b/command/auth_list_test.go @@ -0,0 +1,108 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "testing" + + "github.com/mitchellh/cli" +) + +func testAuthListCommand(tb testing.TB) (*cli.MockUi, *AuthListCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &AuthListCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestAuthListCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "too_many_args", + []string{"foo"}, + "Too many arguments", + 1, + }, + { + "lists", + nil, + "Path", + 0, + }, + { + "detailed", + []string{"-detailed"}, + "Default TTL", + 0, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testAuthListCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testAuthListCommand(t) + cmd.client = client + + code := cmd.Run([]string{}) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error listing enabled authentications: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testAuthListCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/auth_move.go b/command/auth_move.go new file mode 100644 index 0000000..2af5ab6 --- /dev/null +++ b/command/auth_move.go @@ -0,0 +1,126 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + "time" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*AuthMoveCommand)(nil) + _ cli.CommandAutocomplete = (*AuthMoveCommand)(nil) +) + +type AuthMoveCommand struct { + *BaseCommand +} + +func (c *AuthMoveCommand) Synopsis() string { + return "Move an auth method to a new path" +} + +func (c *AuthMoveCommand) Help() string { + helpText := ` +Usage: vault auth move [options] SOURCE DESTINATION + + Moves an existing auth method to a new path. Any leases from the old + auth method are revoked, but all configuration associated with the method + is preserved. It initiates the migration and intermittently polls its status, + exiting if a final state is reached. + + This command works within or across namespaces, both source and destination paths + can be prefixed with a namespace heirarchy relative to the current namespace. + + WARNING! Moving an auth method will revoke any leases from the + old method. + + Move the auth method at approle/ to generic/: + + $ vault auth move approle/ generic/ + + Move the auth method at ns1/approle/ across namespaces to ns2/generic/, + where ns1 and ns2 are child namespaces of the current namespace: + + $ vault auth move ns1/approle/ ns2/generic/ + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *AuthMoveCommand) Flags() *FlagSets { + return c.flagSet(FlagSetHTTP) +} + +func (c *AuthMoveCommand) AutocompleteArgs() complete.Predictor { + return c.PredictVaultMounts() +} + +func (c *AuthMoveCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *AuthMoveCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) < 2: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 2, got %d)", len(args))) + return 1 + case len(args) > 2: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 2, got %d)", len(args))) + return 1 + } + + // Grab the source and destination + source := ensureTrailingSlash(args[0]) + destination := ensureTrailingSlash(args[1]) + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + remountResp, err := client.Sys().StartRemount(source, destination) + if err != nil { + c.UI.Error(fmt.Sprintf("Error moving auth method %s to %s: %s", source, destination, err)) + return 2 + } + + c.UI.Output(fmt.Sprintf("Started moving auth method %s to %s, with migration ID %s", source, destination, remountResp.MigrationID)) + + // Poll the status endpoint with the returned migration ID + // Exit if a terminal status is reached, else wait and retry + for { + remountStatusResp, err := client.Sys().RemountStatus(remountResp.MigrationID) + if err != nil { + c.UI.Error(fmt.Sprintf("Error checking migration status of auth method %s to %s: %s", source, destination, err)) + return 2 + } + if remountStatusResp.MigrationInfo.MigrationStatus == MountMigrationStatusSuccess { + c.UI.Output(fmt.Sprintf("Success! Finished moving auth method %s to %s, with migration ID %s", source, destination, remountResp.MigrationID)) + return 0 + } + if remountStatusResp.MigrationInfo.MigrationStatus == MountMigrationStatusFailure { + c.UI.Error(fmt.Sprintf("Failure! Error encountered moving auth method %s to %s, with migration ID %s", source, destination, remountResp.MigrationID)) + return 0 + } + c.UI.Output(fmt.Sprintf("Waiting for terminal status in migration of auth method %s to %s, with migration ID %s", source, destination, remountResp.MigrationID)) + time.Sleep(10 * time.Second) + } + + return 0 +} diff --git a/command/auth_move_test.go b/command/auth_move_test.go new file mode 100644 index 0000000..877afd2 --- /dev/null +++ b/command/auth_move_test.go @@ -0,0 +1,149 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "testing" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" +) + +func testAuthMoveCommand(tb testing.TB) (*cli.MockUi, *AuthMoveCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &AuthMoveCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestAuthMoveCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "not_enough_args", + []string{}, + "Not enough arguments", + 1, + }, + { + "too_many_args", + []string{"foo", "bar", "baz"}, + "Too many arguments", + 1, + }, + { + "non_existent", + []string{"not_real", "over_here"}, + "Error moving auth method not_real/ to over_here/", + 2, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testAuthMoveCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("integration", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testAuthMoveCommand(t) + cmd.client = client + + if err := client.Sys().EnableAuthWithOptions("my-auth", &api.EnableAuthOptions{ + Type: "userpass", + }); err != nil { + t.Fatal(err) + } + + code := cmd.Run([]string{ + "auth/my-auth/", "auth/my-auth-2/", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! Finished moving auth method auth/my-auth/ to auth/my-auth-2/" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + mounts, err := client.Sys().ListAuth() + if err != nil { + t.Fatal(err) + } + + if _, ok := mounts["my-auth-2/"]; !ok { + t.Errorf("expected mount at my-auth-2/: %#v", mounts) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testAuthMoveCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "auth/my-auth/", "auth/my-auth-2/", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error moving auth method auth/my-auth/ to auth/my-auth-2/:" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testAuthMoveCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/auth_test.go b/command/auth_test.go new file mode 100644 index 0000000..dd8abb0 --- /dev/null +++ b/command/auth_test.go @@ -0,0 +1,37 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "testing" + + "github.com/mitchellh/cli" + + "github.com/hashicorp/vault/command/token" +) + +func testAuthCommand(tb testing.TB) (*cli.MockUi, *AuthCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &AuthCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + + // Override to our own token helper + tokenHelper: token.NewTestingTokenHelper(), + }, + } +} + +func TestAuthCommand_Run(t *testing.T) { + t.Parallel() + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testAuthCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/auth_tune.go b/command/auth_tune.go new file mode 100644 index 0000000..a7a0979 --- /dev/null +++ b/command/auth_tune.go @@ -0,0 +1,310 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "flag" + "fmt" + "strconv" + "strings" + "time" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*AuthTuneCommand)(nil) + _ cli.CommandAutocomplete = (*AuthTuneCommand)(nil) +) + +type AuthTuneCommand struct { + *BaseCommand + + flagAuditNonHMACRequestKeys []string + flagAuditNonHMACResponseKeys []string + flagDefaultLeaseTTL time.Duration + flagDescription string + flagListingVisibility string + flagMaxLeaseTTL time.Duration + flagPassthroughRequestHeaders []string + flagAllowedResponseHeaders []string + flagOptions map[string]string + flagTokenType string + flagVersion int + flagPluginVersion string + flagUserLockoutThreshold uint + flagUserLockoutDuration time.Duration + flagUserLockoutCounterResetDuration time.Duration + flagUserLockoutDisable bool +} + +func (c *AuthTuneCommand) Synopsis() string { + return "Tunes an auth method configuration" +} + +func (c *AuthTuneCommand) Help() string { + helpText := ` +Usage: vault auth tune [options] PATH + + Tunes the configuration options for the auth method at the given PATH. The + argument corresponds to the PATH where the auth method is enabled, not the + TYPE! + + Tune the default lease for the github auth method: + + $ vault auth tune -default-lease-ttl=72h github/ + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *AuthTuneCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP) + + f := set.NewFlagSet("Command Options") + + f.StringSliceVar(&StringSliceVar{ + Name: flagNameAuditNonHMACRequestKeys, + Target: &c.flagAuditNonHMACRequestKeys, + Usage: "Key that will not be HMAC'd by audit devices in the request data " + + "object. To specify multiple values, specify this flag multiple times.", + }) + + f.StringSliceVar(&StringSliceVar{ + Name: flagNameAuditNonHMACResponseKeys, + Target: &c.flagAuditNonHMACResponseKeys, + Usage: "Key that will not be HMAC'd by audit devices in the response data " + + "object. To specify multiple values, specify this flag multiple times.", + }) + + f.DurationVar(&DurationVar{ + Name: "default-lease-ttl", + Target: &c.flagDefaultLeaseTTL, + Default: 0, + EnvVar: "", + Completion: complete.PredictAnything, + Usage: "The default lease TTL for this auth method. If unspecified, this " + + "defaults to the Vault server's globally configured default lease TTL, " + + "or a previously configured value for the auth method.", + }) + + f.StringVar(&StringVar{ + Name: flagNameDescription, + Target: &c.flagDescription, + Usage: "Human-friendly description of the this auth method. This overrides " + + "the current stored value, if any.", + }) + + f.StringVar(&StringVar{ + Name: flagNameListingVisibility, + Target: &c.flagListingVisibility, + Usage: "Determines the visibility of the mount in the UI-specific listing " + + "endpoint.", + }) + + f.DurationVar(&DurationVar{ + Name: "max-lease-ttl", + Target: &c.flagMaxLeaseTTL, + Default: 0, + EnvVar: "", + Completion: complete.PredictAnything, + Usage: "The maximum lease TTL for this auth method. If unspecified, this " + + "defaults to the Vault server's globally configured maximum lease TTL, " + + "or a previously configured value for the auth method.", + }) + + f.StringSliceVar(&StringSliceVar{ + Name: flagNamePassthroughRequestHeaders, + Target: &c.flagPassthroughRequestHeaders, + Usage: "Request header value that will be sent to the plugin. To specify " + + "multiple values, specify this flag multiple times.", + }) + + f.StringSliceVar(&StringSliceVar{ + Name: flagNameAllowedResponseHeaders, + Target: &c.flagAllowedResponseHeaders, + Usage: "Response header value that plugins will be allowed to set. To specify " + + "multiple values, specify this flag multiple times.", + }) + + f.StringMapVar(&StringMapVar{ + Name: "options", + Target: &c.flagOptions, + Completion: complete.PredictAnything, + Usage: "Key-value pair provided as key=value for the mount options. " + + "This can be specified multiple times.", + }) + + f.StringVar(&StringVar{ + Name: flagNameTokenType, + Target: &c.flagTokenType, + Usage: "Sets a forced token type for the mount.", + }) + + f.IntVar(&IntVar{ + Name: "version", + Target: &c.flagVersion, + Default: 0, + Usage: "Select the version of the auth method to run. Not supported by all auth methods.", + }) + + f.UintVar(&UintVar{ + Name: flagNameUserLockoutThreshold, + Target: &c.flagUserLockoutThreshold, + Usage: "The threshold for user lockout for this auth method. If unspecified, this " + + "defaults to the Vault server's globally configured user lockout threshold, " + + "or a previously configured value for the auth method.", + }) + + f.DurationVar(&DurationVar{ + Name: flagNameUserLockoutDuration, + Target: &c.flagUserLockoutDuration, + Completion: complete.PredictAnything, + Usage: "The user lockout duration for this auth method. If unspecified, this " + + "defaults to the Vault server's globally configured user lockout duration, " + + "or a previously configured value for the auth method.", + }) + + f.DurationVar(&DurationVar{ + Name: flagNameUserLockoutCounterResetDuration, + Target: &c.flagUserLockoutCounterResetDuration, + Completion: complete.PredictAnything, + Usage: "The user lockout counter reset duration for this auth method. If unspecified, this " + + "defaults to the Vault server's globally configured user lockout counter reset duration, " + + "or a previously configured value for the auth method.", + }) + + f.BoolVar(&BoolVar{ + Name: flagNameUserLockoutDisable, + Target: &c.flagUserLockoutDisable, + Default: false, + Usage: "Disable user lockout for this auth method. If unspecified, this " + + "defaults to the Vault server's globally configured user lockout disable, " + + "or a previously configured value for the auth method.", + }) + + f.StringVar(&StringVar{ + Name: flagNamePluginVersion, + Target: &c.flagPluginVersion, + Default: "", + Usage: "Select the semantic version of the plugin to run. The new version must be registered in " + + "the plugin catalog, and will not start running until the plugin is reloaded.", + }) + + return set +} + +func (c *AuthTuneCommand) AutocompleteArgs() complete.Predictor { + return c.PredictVaultAuths() +} + +func (c *AuthTuneCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *AuthTuneCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + if c.flagVersion > 0 { + if c.flagOptions == nil { + c.flagOptions = make(map[string]string) + } + c.flagOptions["version"] = strconv.Itoa(c.flagVersion) + } + + mountConfigInput := api.MountConfigInput{ + DefaultLeaseTTL: ttlToAPI(c.flagDefaultLeaseTTL), + MaxLeaseTTL: ttlToAPI(c.flagMaxLeaseTTL), + Options: c.flagOptions, + } + + // Set these values only if they are provided in the CLI + f.Visit(func(fl *flag.Flag) { + if fl.Name == flagNameAuditNonHMACRequestKeys { + mountConfigInput.AuditNonHMACRequestKeys = c.flagAuditNonHMACRequestKeys + } + + if fl.Name == flagNameAuditNonHMACResponseKeys { + mountConfigInput.AuditNonHMACResponseKeys = c.flagAuditNonHMACResponseKeys + } + + if fl.Name == flagNameDescription { + mountConfigInput.Description = &c.flagDescription + } + + if fl.Name == flagNameListingVisibility { + mountConfigInput.ListingVisibility = c.flagListingVisibility + } + + if fl.Name == flagNamePassthroughRequestHeaders { + mountConfigInput.PassthroughRequestHeaders = c.flagPassthroughRequestHeaders + } + + if fl.Name == flagNameAllowedResponseHeaders { + mountConfigInput.AllowedResponseHeaders = c.flagAllowedResponseHeaders + } + + if fl.Name == flagNameTokenType { + mountConfigInput.TokenType = c.flagTokenType + } + switch fl.Name { + case flagNameUserLockoutThreshold, flagNameUserLockoutDuration, flagNameUserLockoutCounterResetDuration, flagNameUserLockoutDisable: + if mountConfigInput.UserLockoutConfig == nil { + mountConfigInput.UserLockoutConfig = &api.UserLockoutConfigInput{} + } + } + if fl.Name == flagNameUserLockoutThreshold { + mountConfigInput.UserLockoutConfig.LockoutThreshold = strconv.FormatUint(uint64(c.flagUserLockoutThreshold), 10) + } + if fl.Name == flagNameUserLockoutDuration { + mountConfigInput.UserLockoutConfig.LockoutDuration = ttlToAPI(c.flagUserLockoutDuration) + } + if fl.Name == flagNameUserLockoutCounterResetDuration { + mountConfigInput.UserLockoutConfig.LockoutCounterResetDuration = ttlToAPI(c.flagUserLockoutCounterResetDuration) + } + if fl.Name == flagNameUserLockoutDisable { + mountConfigInput.UserLockoutConfig.DisableLockout = &c.flagUserLockoutDisable + } + + if fl.Name == flagNamePluginVersion { + mountConfigInput.PluginVersion = c.flagPluginVersion + } + }) + + // Append /auth (since that's where auths live) and a trailing slash to + // indicate it's a path in output + mountPath := ensureTrailingSlash(sanitizePath(args[0])) + + if err := client.Sys().TuneMount("/auth/"+mountPath, mountConfigInput); err != nil { + c.UI.Error(fmt.Sprintf("Error tuning auth method %s: %s", mountPath, err)) + return 2 + } + + c.UI.Output(fmt.Sprintf("Success! Tuned the auth method at: %s", mountPath)) + return 0 +} diff --git a/command/auth_tune_test.go b/command/auth_tune_test.go new file mode 100644 index 0000000..aabcd83 --- /dev/null +++ b/command/auth_tune_test.go @@ -0,0 +1,292 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "testing" + + "github.com/go-test/deep" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/mitchellh/cli" +) + +func testAuthTuneCommand(tb testing.TB) (*cli.MockUi, *AuthTuneCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &AuthTuneCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestAuthTuneCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "not_enough_args", + []string{}, + "Not enough arguments", + 1, + }, + { + "too_many_args", + []string{"foo", "bar"}, + "Too many arguments", + 1, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testAuthTuneCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("integration", func(t *testing.T) { + t.Run("flags_all", func(t *testing.T) { + t.Parallel() + pluginDir, cleanup := corehelpers.MakeTestPluginDir(t) + defer cleanup(t) + + client, _, closer := testVaultServerPluginDir(t, pluginDir) + defer closer() + + ui, cmd := testAuthTuneCommand(t) + cmd.client = client + + // Mount + if err := client.Sys().EnableAuthWithOptions("my-auth", &api.EnableAuthOptions{ + Type: "userpass", + }); err != nil { + t.Fatal(err) + } + + auths, err := client.Sys().ListAuth() + if err != nil { + t.Fatal(err) + } + mountInfo, ok := auths["my-auth/"] + if !ok { + t.Fatalf("expected mount to exist: %#v", auths) + } + + if exp := ""; mountInfo.PluginVersion != exp { + t.Errorf("expected %q to be %q", mountInfo.PluginVersion, exp) + } + + _, _, version := testPluginCreateAndRegisterVersioned(t, client, pluginDir, "userpass", api.PluginTypeCredential) + + code := cmd.Run([]string{ + "-description", "new description", + "-default-lease-ttl", "30m", + "-max-lease-ttl", "1h", + "-audit-non-hmac-request-keys", "foo,bar", + "-audit-non-hmac-response-keys", "foo,bar", + "-passthrough-request-headers", "authorization", + "-passthrough-request-headers", "www-authentication", + "-allowed-response-headers", "authorization,www-authentication", + "-listing-visibility", "unauth", + "-plugin-version", version, + "my-auth/", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! Tuned the auth method at: my-auth/" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + auths, err = client.Sys().ListAuth() + if err != nil { + t.Fatal(err) + } + + mountInfo, ok = auths["my-auth/"] + if !ok { + t.Fatalf("expected auth to exist") + } + if exp := "new description"; mountInfo.Description != exp { + t.Errorf("expected %q to be %q", mountInfo.Description, exp) + } + if exp := "userpass"; mountInfo.Type != exp { + t.Errorf("expected %q to be %q", mountInfo.Type, exp) + } + if exp := version; mountInfo.PluginVersion != exp { + t.Errorf("expected %q to be %q", mountInfo.PluginVersion, exp) + } + if exp := 1800; mountInfo.Config.DefaultLeaseTTL != exp { + t.Errorf("expected %d to be %d", mountInfo.Config.DefaultLeaseTTL, exp) + } + if exp := 3600; mountInfo.Config.MaxLeaseTTL != exp { + t.Errorf("expected %d to be %d", mountInfo.Config.MaxLeaseTTL, exp) + } + if diff := deep.Equal([]string{"authorization", "www-authentication"}, mountInfo.Config.PassthroughRequestHeaders); len(diff) > 0 { + t.Errorf("Failed to find expected values in PassthroughRequestHeaders. Difference is: %v", diff) + } + if diff := deep.Equal([]string{"authorization,www-authentication"}, mountInfo.Config.AllowedResponseHeaders); len(diff) > 0 { + t.Errorf("Failed to find expected values in AllowedResponseHeaders. Difference is: %v", diff) + } + if diff := deep.Equal([]string{"foo,bar"}, mountInfo.Config.AuditNonHMACRequestKeys); len(diff) > 0 { + t.Errorf("Failed to find expected values in AuditNonHMACRequestKeys. Difference is: %v", diff) + } + if diff := deep.Equal([]string{"foo,bar"}, mountInfo.Config.AuditNonHMACResponseKeys); len(diff) > 0 { + t.Errorf("Failed to find expected values in AuditNonHMACResponseKeys. Difference is: %v", diff) + } + }) + + t.Run("flags_description", func(t *testing.T) { + t.Parallel() + t.Run("not_provided", func(t *testing.T) { + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testAuthTuneCommand(t) + cmd.client = client + + // Mount + if err := client.Sys().EnableAuthWithOptions("my-auth", &api.EnableAuthOptions{ + Type: "userpass", + Description: "initial description", + }); err != nil { + t.Fatal(err) + } + + code := cmd.Run([]string{ + "-default-lease-ttl", "30m", + "my-auth/", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! Tuned the auth method at: my-auth/" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + auths, err := client.Sys().ListAuth() + if err != nil { + t.Fatal(err) + } + + mountInfo, ok := auths["my-auth/"] + if !ok { + t.Fatalf("expected auth to exist") + } + if exp := "initial description"; mountInfo.Description != exp { + t.Errorf("expected %q to be %q", mountInfo.Description, exp) + } + }) + + t.Run("provided_empty", func(t *testing.T) { + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testAuthTuneCommand(t) + cmd.client = client + + // Mount + if err := client.Sys().EnableAuthWithOptions("my-auth", &api.EnableAuthOptions{ + Type: "userpass", + Description: "initial description", + }); err != nil { + t.Fatal(err) + } + + code := cmd.Run([]string{ + "-description", "", + "my-auth/", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! Tuned the auth method at: my-auth/" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + auths, err := client.Sys().ListAuth() + if err != nil { + t.Fatal(err) + } + + mountInfo, ok := auths["my-auth/"] + if !ok { + t.Fatalf("expected auth to exist") + } + if exp := ""; mountInfo.Description != exp { + t.Errorf("expected %q to be %q", mountInfo.Description, exp) + } + }) + }) + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testAuthTuneCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "userpass/", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error tuning auth method userpass/: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testAuthTuneCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/base.go b/command/base.go new file mode 100644 index 0000000..641fcb3 --- /dev/null +++ b/command/base.go @@ -0,0 +1,711 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bytes" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "regexp" + "strings" + "sync" + "time" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/token" + "github.com/hashicorp/vault/helper/namespace" + "github.com/mattn/go-isatty" + "github.com/mitchellh/cli" + "github.com/pkg/errors" + "github.com/posener/complete" +) + +const ( + // maxLineLength is the maximum width of any line. + maxLineLength int = 78 + + // notSetValue is a flag value for a not-set value + notSetValue = "(not set)" +) + +// reRemoveWhitespace is a regular expression for stripping whitespace from +// a string. +var reRemoveWhitespace = regexp.MustCompile(`[\s]+`) + +type BaseCommand struct { + UI cli.Ui + + flags *FlagSets + flagsOnce sync.Once + + flagAddress string + flagAgentProxyAddress string + flagCACert string + flagCAPath string + flagClientCert string + flagClientKey string + flagNamespace string + flagNS string + flagPolicyOverride bool + flagTLSServerName string + flagTLSSkipVerify bool + flagDisableRedirects bool + flagWrapTTL time.Duration + flagUnlockKey string + + flagFormat string + flagField string + flagDetailed bool + flagOutputCurlString bool + flagOutputPolicy bool + flagNonInteractive bool + + flagMFA []string + + flagHeader map[string]string + + tokenHelper token.TokenHelper + + client *api.Client +} + +// Client returns the HTTP API client. The client is cached on the command to +// save performance on future calls. +func (c *BaseCommand) Client() (*api.Client, error) { + // Read the test client if present + if c.client != nil { + return c.client, nil + } + + config := api.DefaultConfig() + + if err := config.ReadEnvironment(); err != nil { + return nil, errors.Wrap(err, "failed to read environment") + } + + if c.flagAddress != "" { + config.Address = c.flagAddress + } + if c.flagAgentProxyAddress != "" { + config.Address = c.flagAgentProxyAddress + } + + if c.flagOutputCurlString { + config.OutputCurlString = c.flagOutputCurlString + } + if c.flagOutputPolicy { + config.OutputPolicy = c.flagOutputPolicy + } + + // If we need custom TLS configuration, then set it + if c.flagCACert != "" || c.flagCAPath != "" || c.flagClientCert != "" || + c.flagClientKey != "" || c.flagTLSServerName != "" || c.flagTLSSkipVerify { + t := &api.TLSConfig{ + CACert: c.flagCACert, + CAPath: c.flagCAPath, + ClientCert: c.flagClientCert, + ClientKey: c.flagClientKey, + TLSServerName: c.flagTLSServerName, + Insecure: c.flagTLSSkipVerify, + } + + // Setup TLS config + if err := config.ConfigureTLS(t); err != nil { + return nil, errors.Wrap(err, "failed to setup TLS config") + } + } + + // Build the client + client, err := api.NewClient(config) + if err != nil { + return nil, errors.Wrap(err, "failed to create client") + } + + // Turn off retries on the CLI + if os.Getenv(api.EnvVaultMaxRetries) == "" { + client.SetMaxRetries(0) + } + + // Set the wrapping function + client.SetWrappingLookupFunc(c.DefaultWrappingLookupFunc) + + // Get the token if it came in from the environment + token := client.Token() + + // If we don't have a token, check the token helper + if token == "" { + helper, err := c.TokenHelper() + if err != nil { + return nil, errors.Wrap(err, "failed to get token helper") + } + token, err = helper.Get() + if err != nil { + return nil, errors.Wrap(err, "failed to get token from token helper") + } + } + + // Set the token + if token != "" { + client.SetToken(token) + } + + client.SetMFACreds(c.flagMFA) + + // flagNS takes precedence over flagNamespace. After resolution, point both + // flags to the same value to be able to use them interchangeably anywhere. + if c.flagNS != notSetValue { + c.flagNamespace = c.flagNS + } + if c.flagNamespace != notSetValue { + client.SetNamespace(namespace.Canonicalize(c.flagNamespace)) + } + if c.flagPolicyOverride { + client.SetPolicyOverride(c.flagPolicyOverride) + } + + if c.flagHeader != nil { + + var forbiddenHeaders []string + for key, val := range c.flagHeader { + + if strings.HasPrefix(key, "X-Vault-") { + forbiddenHeaders = append(forbiddenHeaders, key) + continue + } + client.AddHeader(key, val) + } + + if len(forbiddenHeaders) > 0 { + return nil, fmt.Errorf("failed to setup Headers[%s]: Header starting by 'X-Vault-' are for internal usage only", strings.Join(forbiddenHeaders, ", ")) + } + } + + c.client = client + + return client, nil +} + +// SetAddress sets the token helper on the command; useful for the demo server and other outside cases. +func (c *BaseCommand) SetAddress(addr string) { + c.flagAddress = addr +} + +// SetTokenHelper sets the token helper on the command. +func (c *BaseCommand) SetTokenHelper(th token.TokenHelper) { + c.tokenHelper = th +} + +// TokenHelper returns the token helper attached to the command. +func (c *BaseCommand) TokenHelper() (token.TokenHelper, error) { + if c.tokenHelper != nil { + return c.tokenHelper, nil + } + + helper, err := DefaultTokenHelper() + if err != nil { + return nil, err + } + return helper, nil +} + +// DefaultWrappingLookupFunc is the default wrapping function based on the +// CLI flag. +func (c *BaseCommand) DefaultWrappingLookupFunc(operation, path string) string { + if c.flagWrapTTL != 0 { + return c.flagWrapTTL.String() + } + + return api.DefaultWrappingLookupFunc(operation, path) +} + +// getValidationRequired checks to see if the secret exists and has an MFA +// requirement. If MFA is required and the number of constraints is greater than +// 1, we can assert that interactive validation is not required. +func (c *BaseCommand) getMFAValidationRequired(secret *api.Secret) bool { + if secret != nil && secret.Auth != nil && secret.Auth.MFARequirement != nil { + if c.flagMFA == nil && len(secret.Auth.MFARequirement.MFAConstraints) == 1 { + return true + } else if len(secret.Auth.MFARequirement.MFAConstraints) > 1 { + return true + } + } + + return false +} + +// getInteractiveMFAMethodInfo returns MFA method information only if operating +// in interactive mode and one MFA method is configured. +func (c *BaseCommand) getInteractiveMFAMethodInfo(secret *api.Secret) *MFAMethodInfo { + if secret == nil || secret.Auth == nil || secret.Auth.MFARequirement == nil { + return nil + } + + mfaConstraints := secret.Auth.MFARequirement.MFAConstraints + if c.flagNonInteractive || len(mfaConstraints) != 1 || !isatty.IsTerminal(os.Stdin.Fd()) { + return nil + } + + for _, mfaConstraint := range mfaConstraints { + if len(mfaConstraint.Any) != 1 { + return nil + } + + return &MFAMethodInfo{ + methodType: mfaConstraint.Any[0].Type, + methodID: mfaConstraint.Any[0].ID, + usePasscode: mfaConstraint.Any[0].UsesPasscode, + } + } + + return nil +} + +func (c *BaseCommand) validateMFA(reqID string, methodInfo MFAMethodInfo) (*api.Secret, error) { + var passcode string + var err error + if methodInfo.usePasscode { + passcode, err = c.UI.AskSecret(fmt.Sprintf("Enter the passphrase for methodID %q of type %q:", methodInfo.methodID, methodInfo.methodType)) + if err != nil { + return nil, fmt.Errorf("failed to read passphrase: %w. please validate the login by sending a request to sys/mfa/validate", err) + } + } else { + c.UI.Warn("Asking Vault to perform MFA validation with upstream service. " + + "You should receive a push notification in your authenticator app shortly") + } + + // passcode could be an empty string + mfaPayload := map[string]interface{}{ + methodInfo.methodID: []string{passcode}, + } + + client, err := c.Client() + if err != nil { + return nil, err + } + + return client.Sys().MFAValidate(reqID, mfaPayload) +} + +type FlagSetBit uint + +const ( + FlagSetNone FlagSetBit = 1 << iota + FlagSetHTTP + FlagSetOutputField + FlagSetOutputFormat + FlagSetOutputDetailed +) + +// flagSet creates the flags for this command. The result is cached on the +// command to save performance on future calls. +func (c *BaseCommand) flagSet(bit FlagSetBit) *FlagSets { + c.flagsOnce.Do(func() { + set := NewFlagSets(c.UI) + + // These flag sets will apply to all leaf subcommands. + // TODO: Optional, but FlagSetHTTP can be safely removed from the individual + // Flags() subcommands. + bit = bit | FlagSetHTTP + + if bit&FlagSetHTTP != 0 { + f := set.NewFlagSet("HTTP Options") + + addrStringVar := &StringVar{ + Name: flagNameAddress, + Target: &c.flagAddress, + EnvVar: api.EnvVaultAddress, + Completion: complete.PredictAnything, + Usage: "Address of the Vault server.", + } + if c.flagAddress != "" { + addrStringVar.Default = c.flagAddress + } else { + addrStringVar.Default = "https://127.0.0.1:8200" + } + f.StringVar(addrStringVar) + + agentAddrStringVar := &StringVar{ + Name: "agent-address", + Target: &c.flagAgentProxyAddress, + EnvVar: api.EnvVaultAgentAddr, + Completion: complete.PredictAnything, + Usage: "Address of the Agent.", + } + f.StringVar(agentAddrStringVar) + + f.StringVar(&StringVar{ + Name: flagNameCACert, + Target: &c.flagCACert, + Default: "", + EnvVar: api.EnvVaultCACert, + Completion: complete.PredictFiles("*"), + Usage: "Path on the local disk to a single PEM-encoded CA " + + "certificate to verify the Vault server's SSL certificate. This " + + "takes precedence over -ca-path.", + }) + + f.StringVar(&StringVar{ + Name: flagNameCAPath, + Target: &c.flagCAPath, + Default: "", + EnvVar: api.EnvVaultCAPath, + Completion: complete.PredictDirs("*"), + Usage: "Path on the local disk to a directory of PEM-encoded CA " + + "certificates to verify the Vault server's SSL certificate.", + }) + + f.StringVar(&StringVar{ + Name: flagNameClientCert, + Target: &c.flagClientCert, + Default: "", + EnvVar: api.EnvVaultClientCert, + Completion: complete.PredictFiles("*"), + Usage: "Path on the local disk to a single PEM-encoded CA " + + "certificate to use for TLS authentication to the Vault server. If " + + "this flag is specified, -client-key is also required.", + }) + + f.StringVar(&StringVar{ + Name: flagNameClientKey, + Target: &c.flagClientKey, + Default: "", + EnvVar: api.EnvVaultClientKey, + Completion: complete.PredictFiles("*"), + Usage: "Path on the local disk to a single PEM-encoded private key " + + "matching the client certificate from -client-cert.", + }) + + f.StringVar(&StringVar{ + Name: "namespace", + Target: &c.flagNamespace, + Default: notSetValue, // this can never be a real value + EnvVar: api.EnvVaultNamespace, + Completion: complete.PredictAnything, + Usage: "The namespace to use for the command. Setting this is not " + + "necessary but allows using relative paths. -ns can be used as " + + "shortcut.", + }) + + f.StringVar(&StringVar{ + Name: "ns", + Target: &c.flagNS, + Default: notSetValue, // this can never be a real value + Completion: complete.PredictAnything, + Hidden: true, + Usage: "Alias for -namespace. This takes precedence over -namespace.", + }) + + f.StringVar(&StringVar{ + Name: flagTLSServerName, + Target: &c.flagTLSServerName, + Default: "", + EnvVar: api.EnvVaultTLSServerName, + Completion: complete.PredictAnything, + Usage: "Name to use as the SNI host when connecting to the Vault " + + "server via TLS.", + }) + + f.BoolVar(&BoolVar{ + Name: flagNameTLSSkipVerify, + Target: &c.flagTLSSkipVerify, + Default: false, + EnvVar: api.EnvVaultSkipVerify, + Usage: "Disable verification of TLS certificates. Using this option " + + "is highly discouraged as it decreases the security of data " + + "transmissions to and from the Vault server.", + }) + + f.BoolVar(&BoolVar{ + Name: flagNameDisableRedirects, + Target: &c.flagDisableRedirects, + Default: false, + EnvVar: api.EnvVaultDisableRedirects, + Usage: "Disable the default client behavior, which honors a single " + + "redirect response from a request", + }) + + f.BoolVar(&BoolVar{ + Name: "policy-override", + Target: &c.flagPolicyOverride, + Default: false, + Usage: "Override a Sentinel policy that has a soft-mandatory " + + "enforcement_level specified", + }) + + f.DurationVar(&DurationVar{ + Name: "wrap-ttl", + Target: &c.flagWrapTTL, + Default: 0, + EnvVar: api.EnvVaultWrapTTL, + Completion: complete.PredictAnything, + Usage: "Wraps the response in a cubbyhole token with the requested " + + "TTL. The response is available via the \"vault unwrap\" command. " + + "The TTL is specified as a numeric string with suffix like \"30s\" " + + "or \"5m\".", + }) + + f.StringSliceVar(&StringSliceVar{ + Name: "mfa", + Target: &c.flagMFA, + Default: nil, + EnvVar: api.EnvVaultMFA, + Completion: complete.PredictAnything, + Usage: "Supply MFA credentials as part of X-Vault-MFA header.", + }) + + f.BoolVar(&BoolVar{ + Name: "output-curl-string", + Target: &c.flagOutputCurlString, + Default: false, + Usage: "Instead of executing the request, print an equivalent cURL " + + "command string and exit.", + }) + + f.BoolVar(&BoolVar{ + Name: "output-policy", + Target: &c.flagOutputPolicy, + Default: false, + Usage: "Instead of executing the request, print an example HCL " + + "policy that would be required to run this command, and exit.", + }) + + f.StringVar(&StringVar{ + Name: "unlock-key", + Target: &c.flagUnlockKey, + Default: notSetValue, + Completion: complete.PredictNothing, + Usage: "Key to unlock a namespace API lock.", + }) + + f.StringMapVar(&StringMapVar{ + Name: "header", + Target: &c.flagHeader, + Completion: complete.PredictAnything, + Usage: "Key-value pair provided as key=value to provide http header added to any request done by the CLI." + + "Trying to add headers starting with 'X-Vault-' is forbidden and will make the command fail " + + "This can be specified multiple times.", + }) + + f.BoolVar(&BoolVar{ + Name: "non-interactive", + Target: &c.flagNonInteractive, + Default: false, + Usage: "When set true, prevents asking the user for input via the terminal.", + }) + + } + + if bit&(FlagSetOutputField|FlagSetOutputFormat|FlagSetOutputDetailed) != 0 { + outputSet := set.NewFlagSet("Output Options") + + if bit&FlagSetOutputField != 0 { + outputSet.StringVar(&StringVar{ + Name: "field", + Target: &c.flagField, + Default: "", + Completion: complete.PredictAnything, + Usage: "Print only the field with the given name. Specifying " + + "this option will take precedence over other formatting " + + "directives. The result will not have a trailing newline " + + "making it ideal for piping to other processes.", + }) + } + + if bit&FlagSetOutputFormat != 0 { + outputSet.StringVar(&StringVar{ + Name: "format", + Target: &c.flagFormat, + Default: "table", + EnvVar: EnvVaultFormat, + Completion: complete.PredictSet("table", "json", "yaml", "pretty", "raw"), + Usage: `Print the output in the given format. Valid formats + are "table", "json", "yaml", or "pretty". "raw" is allowed + for 'vault read' operations only.`, + }) + } + + if bit&FlagSetOutputDetailed != 0 { + outputSet.BoolVar(&BoolVar{ + Name: "detailed", + Target: &c.flagDetailed, + Default: false, + EnvVar: EnvVaultDetailed, + Usage: "Enables additional metadata during some operations", + }) + } + } + + c.flags = set + }) + + return c.flags +} + +// FlagSets is a group of flag sets. +type FlagSets struct { + flagSets []*FlagSet + mainSet *flag.FlagSet + hiddens map[string]struct{} + completions complete.Flags + ui cli.Ui +} + +// NewFlagSets creates a new flag sets. +func NewFlagSets(ui cli.Ui) *FlagSets { + mainSet := flag.NewFlagSet("", flag.ContinueOnError) + + // Errors and usage are controlled by the CLI. + mainSet.Usage = func() {} + mainSet.SetOutput(ioutil.Discard) + + return &FlagSets{ + flagSets: make([]*FlagSet, 0, 6), + mainSet: mainSet, + hiddens: make(map[string]struct{}), + completions: complete.Flags{}, + ui: ui, + } +} + +// NewFlagSet creates a new flag set from the given flag sets. +func (f *FlagSets) NewFlagSet(name string) *FlagSet { + flagSet := NewFlagSet(name) + flagSet.mainSet = f.mainSet + flagSet.completions = f.completions + f.flagSets = append(f.flagSets, flagSet) + return flagSet +} + +// Completions returns the completions for this flag set. +func (f *FlagSets) Completions() complete.Flags { + return f.completions +} + +type ( + ParseOptions interface{} + ParseOptionAllowRawFormat bool + DisableDisplayFlagWarning bool +) + +// Parse parses the given flags, returning any errors. +// Warnings, if any, regarding the arguments format are sent to stdout +func (f *FlagSets) Parse(args []string, opts ...ParseOptions) error { + err := f.mainSet.Parse(args) + + displayFlagWarningsDisabled := false + for _, opt := range opts { + if value, ok := opt.(DisableDisplayFlagWarning); ok { + displayFlagWarningsDisabled = bool(value) + } + } + if !displayFlagWarningsDisabled { + warnings := generateFlagWarnings(f.Args()) + if warnings != "" && Format(f.ui) == "table" { + f.ui.Warn(warnings) + } + } + + if err != nil { + return err + } + + // Now surface any other errors. + return generateFlagErrors(f, opts...) +} + +// Parsed reports whether the command-line flags have been parsed. +func (f *FlagSets) Parsed() bool { + return f.mainSet.Parsed() +} + +// Args returns the remaining args after parsing. +func (f *FlagSets) Args() []string { + return f.mainSet.Args() +} + +// Visit visits the flags in lexicographical order, calling fn for each. It +// visits only those flags that have been set. +func (f *FlagSets) Visit(fn func(*flag.Flag)) { + f.mainSet.Visit(fn) +} + +// Help builds custom help for this command, grouping by flag set. +func (f *FlagSets) Help() string { + var out bytes.Buffer + + for _, set := range f.flagSets { + printFlagTitle(&out, set.name+":") + set.VisitAll(func(f *flag.Flag) { + // Skip any hidden flags + if v, ok := f.Value.(FlagVisibility); ok && v.Hidden() { + return + } + printFlagDetail(&out, f) + }) + } + + return strings.TrimRight(out.String(), "\n") +} + +// FlagSet is a grouped wrapper around a real flag set and a grouped flag set. +type FlagSet struct { + name string + flagSet *flag.FlagSet + mainSet *flag.FlagSet + completions complete.Flags +} + +// NewFlagSet creates a new flag set. +func NewFlagSet(name string) *FlagSet { + return &FlagSet{ + name: name, + flagSet: flag.NewFlagSet(name, flag.ContinueOnError), + } +} + +// Name returns the name of this flag set. +func (f *FlagSet) Name() string { + return f.name +} + +func (f *FlagSet) Visit(fn func(*flag.Flag)) { + f.flagSet.Visit(fn) +} + +func (f *FlagSet) VisitAll(fn func(*flag.Flag)) { + f.flagSet.VisitAll(fn) +} + +// printFlagTitle prints a consistently-formatted title to the given writer. +func printFlagTitle(w io.Writer, s string) { + fmt.Fprintf(w, "%s\n\n", s) +} + +// printFlagDetail prints a single flag to the given writer. +func printFlagDetail(w io.Writer, f *flag.Flag) { + // Check if the flag is hidden - do not print any flag detail or help output + // if it is hidden. + if h, ok := f.Value.(FlagVisibility); ok && h.Hidden() { + return + } + + // Check for a detailed example + example := "" + if t, ok := f.Value.(FlagExample); ok { + example = t.Example() + } + + if example != "" { + fmt.Fprintf(w, " -%s=<%s>\n", f.Name, example) + } else { + fmt.Fprintf(w, " -%s\n", f.Name) + } + + usage := reRemoveWhitespace.ReplaceAllString(f.Usage, " ") + indented := wrapAtLengthWithPadding(usage, 6) + fmt.Fprintf(w, "%s\n\n", indented) +} diff --git a/command/base_flags.go b/command/base_flags.go new file mode 100644 index 0000000..d2acf29 --- /dev/null +++ b/command/base_flags.go @@ -0,0 +1,991 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "errors" + "flag" + "fmt" + "math" + "os" + "sort" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/posener/complete" +) + +// FlagExample is an interface which declares an example value. +type FlagExample interface { + Example() string +} + +// FlagVisibility is an interface which declares whether a flag should be +// hidden from help and completions. This is usually used for deprecations +// on "internal-only" flags. +type FlagVisibility interface { + Hidden() bool +} + +// FlagBool is an interface which boolean flags implement. +type FlagBool interface { + IsBoolFlag() bool +} + +// BoolPtr is a bool which is aware if it has been set. +type BoolPtr struct { + v *bool +} + +func (b *BoolPtr) Set(v string) error { + val, err := strconv.ParseBool(v) + if err != nil { + return err + } + + if b.v == nil { + b.v = new(bool) + } + *b.v = val + + return nil +} + +func (b *BoolPtr) IsSet() bool { + return b.v != nil +} + +func (b *BoolPtr) Get() bool { + if b.v == nil { + return false + } + return *b.v +} + +func (b *BoolPtr) String() string { + var current bool + if b.v != nil { + current = *(b.v) + } + return fmt.Sprintf("%v", current) +} + +type boolPtrValue struct { + hidden bool + target *BoolPtr +} + +func newBoolPtrValue(def *bool, target *BoolPtr, hidden bool) *boolPtrValue { + val := &boolPtrValue{ + hidden: hidden, + target: target, + } + if def != nil { + _ = val.target.Set(strconv.FormatBool(*def)) + } + return val +} + +func (b *boolPtrValue) IsBoolFlag() bool { + return true +} + +func (b *boolPtrValue) Set(s string) error { + if b.target == nil { + b.target = new(BoolPtr) + } + return b.target.Set(s) +} + +func (b *boolPtrValue) Get() interface{} { return *b.target } +func (b *boolPtrValue) String() string { return b.target.String() } +func (b *boolPtrValue) Example() string { return "*bool" } +func (b *boolPtrValue) Hidden() bool { return b.hidden } + +type BoolPtrVar struct { + Name string + Aliases []string + Usage string + Hidden bool + EnvVar string + Default *bool + Target *BoolPtr + Completion complete.Predictor +} + +func (f *FlagSet) BoolPtrVar(i *BoolPtrVar) { + def := i.Default + if v, exist := os.LookupEnv(i.EnvVar); exist { + if b, err := strconv.ParseBool(v); err == nil { + if def == nil { + def = new(bool) + } + *def = b + } + } + + f.VarFlag(&VarFlag{ + Name: i.Name, + Aliases: i.Aliases, + Usage: i.Usage, + Value: newBoolPtrValue(i.Default, i.Target, i.Hidden), + Completion: i.Completion, + }) +} + +// -- BoolVar and boolValue +type BoolVar struct { + Name string + Aliases []string + Usage string + Default bool + Hidden bool + EnvVar string + Target *bool + Completion complete.Predictor +} + +func (f *FlagSet) BoolVar(i *BoolVar) { + def := i.Default + if v, exist := os.LookupEnv(i.EnvVar); exist { + if b, err := strconv.ParseBool(v); err == nil { + def = b + } + } + + f.VarFlag(&VarFlag{ + Name: i.Name, + Aliases: i.Aliases, + Usage: i.Usage, + Default: strconv.FormatBool(i.Default), + EnvVar: i.EnvVar, + Value: newBoolValue(def, i.Target, i.Hidden), + Completion: i.Completion, + }) +} + +type boolValue struct { + hidden bool + target *bool +} + +func newBoolValue(def bool, target *bool, hidden bool) *boolValue { + *target = def + + return &boolValue{ + hidden: hidden, + target: target, + } +} + +func (b *boolValue) Set(s string) error { + v, err := strconv.ParseBool(s) + if err != nil { + return err + } + + *b.target = v + return nil +} + +func (b *boolValue) Get() interface{} { return *b.target } +func (b *boolValue) String() string { return strconv.FormatBool(*b.target) } +func (b *boolValue) Example() string { return "" } +func (b *boolValue) Hidden() bool { return b.hidden } +func (b *boolValue) IsBoolFlag() bool { return true } + +// -- IntVar and intValue +type IntVar struct { + Name string + Aliases []string + Usage string + Default int + Hidden bool + EnvVar string + Target *int + Completion complete.Predictor +} + +func (f *FlagSet) IntVar(i *IntVar) { + initial := i.Default + if v, exist := os.LookupEnv(i.EnvVar); exist { + if i, err := parseutil.SafeParseInt(v); err == nil { + initial = i + } + } + + def := "" + if i.Default != 0 { + def = strconv.FormatInt(int64(i.Default), 10) + } + + f.VarFlag(&VarFlag{ + Name: i.Name, + Aliases: i.Aliases, + Usage: i.Usage, + Default: def, + EnvVar: i.EnvVar, + Value: newIntValue(initial, i.Target, i.Hidden), + Completion: i.Completion, + }) +} + +type intValue struct { + hidden bool + target *int +} + +func newIntValue(def int, target *int, hidden bool) *intValue { + *target = def + return &intValue{ + hidden: hidden, + target: target, + } +} + +func (i *intValue) Set(s string) error { + v, err := parseutil.SafeParseInt(s) + if err != nil { + return err + } + if v >= math.MinInt && v <= math.MaxInt { + *i.target = v + return nil + } + return fmt.Errorf("Incorrect conversion of a 64-bit integer to a lower bit size. Value %d is not within bounds for int32", v) +} + +func (i *intValue) Get() interface{} { return *i.target } +func (i *intValue) String() string { return strconv.Itoa(*i.target) } +func (i *intValue) Example() string { return "int" } +func (i *intValue) Hidden() bool { return i.hidden } + +// -- Int64Var and int64Value +type Int64Var struct { + Name string + Aliases []string + Usage string + Default int64 + Hidden bool + EnvVar string + Target *int64 + Completion complete.Predictor +} + +func (f *FlagSet) Int64Var(i *Int64Var) { + initial := i.Default + if v, exist := os.LookupEnv(i.EnvVar); exist { + if i, err := strconv.ParseInt(v, 0, 64); err == nil { + initial = i + } + } + + def := "" + if i.Default != 0 { + def = strconv.FormatInt(int64(i.Default), 10) + } + + f.VarFlag(&VarFlag{ + Name: i.Name, + Aliases: i.Aliases, + Usage: i.Usage, + Default: def, + EnvVar: i.EnvVar, + Value: newInt64Value(initial, i.Target, i.Hidden), + Completion: i.Completion, + }) +} + +type int64Value struct { + hidden bool + target *int64 +} + +func newInt64Value(def int64, target *int64, hidden bool) *int64Value { + *target = def + return &int64Value{ + hidden: hidden, + target: target, + } +} + +func (i *int64Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + if err != nil { + return err + } + + *i.target = v + return nil +} + +func (i *int64Value) Get() interface{} { return int64(*i.target) } +func (i *int64Value) String() string { return strconv.FormatInt(int64(*i.target), 10) } +func (i *int64Value) Example() string { return "int" } +func (i *int64Value) Hidden() bool { return i.hidden } + +// -- UintVar && uintValue +type UintVar struct { + Name string + Aliases []string + Usage string + Default uint + Hidden bool + EnvVar string + Target *uint + Completion complete.Predictor +} + +func (f *FlagSet) UintVar(i *UintVar) { + initial := i.Default + if v, exist := os.LookupEnv(i.EnvVar); exist { + if i, err := strconv.ParseUint(v, 0, 64); err == nil { + initial = uint(i) + } + } + + def := "" + if i.Default != 0 { + def = strconv.FormatUint(uint64(i.Default), 10) + } + + f.VarFlag(&VarFlag{ + Name: i.Name, + Aliases: i.Aliases, + Usage: i.Usage, + Default: def, + EnvVar: i.EnvVar, + Value: newUintValue(initial, i.Target, i.Hidden), + Completion: i.Completion, + }) +} + +type uintValue struct { + hidden bool + target *uint +} + +func newUintValue(def uint, target *uint, hidden bool) *uintValue { + *target = def + return &uintValue{ + hidden: hidden, + target: target, + } +} + +func (i *uintValue) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + if err != nil { + return err + } + if v >= 0 && v <= math.MaxUint { + *i.target = uint(v) + return nil + } + + return fmt.Errorf("Incorrect conversion of a 64-bit integer to a lower bit size. Value %d is not within bounds for uint32", v) +} + +func (i *uintValue) Get() interface{} { return uint(*i.target) } +func (i *uintValue) String() string { return strconv.FormatUint(uint64(*i.target), 10) } +func (i *uintValue) Example() string { return "uint" } +func (i *uintValue) Hidden() bool { return i.hidden } + +// -- Uint64Var and uint64Value +type Uint64Var struct { + Name string + Aliases []string + Usage string + Default uint64 + Hidden bool + EnvVar string + Target *uint64 + Completion complete.Predictor +} + +func (f *FlagSet) Uint64Var(i *Uint64Var) { + initial := i.Default + if v, exist := os.LookupEnv(i.EnvVar); exist { + if i, err := strconv.ParseUint(v, 0, 64); err == nil { + initial = i + } + } + + def := "" + if i.Default != 0 { + strconv.FormatUint(i.Default, 10) + } + + f.VarFlag(&VarFlag{ + Name: i.Name, + Aliases: i.Aliases, + Usage: i.Usage, + Default: def, + EnvVar: i.EnvVar, + Value: newUint64Value(initial, i.Target, i.Hidden), + Completion: i.Completion, + }) +} + +type uint64Value struct { + hidden bool + target *uint64 +} + +func newUint64Value(def uint64, target *uint64, hidden bool) *uint64Value { + *target = def + return &uint64Value{ + hidden: hidden, + target: target, + } +} + +func (i *uint64Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + if err != nil { + return err + } + + *i.target = v + return nil +} + +func (i *uint64Value) Get() interface{} { return uint64(*i.target) } +func (i *uint64Value) String() string { return strconv.FormatUint(uint64(*i.target), 10) } +func (i *uint64Value) Example() string { return "uint" } +func (i *uint64Value) Hidden() bool { return i.hidden } + +// -- StringVar and stringValue +type StringVar struct { + Name string + Aliases []string + Usage string + Default string + Hidden bool + EnvVar string + Target *string + Completion complete.Predictor +} + +func (f *FlagSet) StringVar(i *StringVar) { + initial := i.Default + if v, exist := os.LookupEnv(i.EnvVar); exist { + initial = v + } + + def := "" + if i.Default != "" { + def = i.Default + } + + f.VarFlag(&VarFlag{ + Name: i.Name, + Aliases: i.Aliases, + Usage: i.Usage, + Default: def, + EnvVar: i.EnvVar, + Value: newStringValue(initial, i.Target, i.Hidden), + Completion: i.Completion, + }) +} + +type stringValue struct { + hidden bool + target *string +} + +func newStringValue(def string, target *string, hidden bool) *stringValue { + *target = def + return &stringValue{ + hidden: hidden, + target: target, + } +} + +func (s *stringValue) Set(val string) error { + *s.target = val + return nil +} + +func (s *stringValue) Get() interface{} { return *s.target } +func (s *stringValue) String() string { return *s.target } +func (s *stringValue) Example() string { return "string" } +func (s *stringValue) Hidden() bool { return s.hidden } + +// -- Float64Var and float64Value +type Float64Var struct { + Name string + Aliases []string + Usage string + Default float64 + Hidden bool + EnvVar string + Target *float64 + Completion complete.Predictor +} + +func (f *FlagSet) Float64Var(i *Float64Var) { + initial := i.Default + if v, exist := os.LookupEnv(i.EnvVar); exist { + if i, err := strconv.ParseFloat(v, 64); err == nil { + initial = i + } + } + + def := "" + if i.Default != 0 { + def = strconv.FormatFloat(i.Default, 'e', -1, 64) + } + + f.VarFlag(&VarFlag{ + Name: i.Name, + Aliases: i.Aliases, + Usage: i.Usage, + Default: def, + EnvVar: i.EnvVar, + Value: newFloat64Value(initial, i.Target, i.Hidden), + Completion: i.Completion, + }) +} + +type float64Value struct { + hidden bool + target *float64 +} + +func newFloat64Value(def float64, target *float64, hidden bool) *float64Value { + *target = def + return &float64Value{ + hidden: hidden, + target: target, + } +} + +func (f *float64Value) Set(s string) error { + v, err := strconv.ParseFloat(s, 64) + if err != nil { + return err + } + + *f.target = v + return nil +} + +func (f *float64Value) Get() interface{} { return float64(*f.target) } +func (f *float64Value) String() string { return strconv.FormatFloat(float64(*f.target), 'g', -1, 64) } +func (f *float64Value) Example() string { return "float" } +func (f *float64Value) Hidden() bool { return f.hidden } + +// -- DurationVar and durationValue +type DurationVar struct { + Name string + Aliases []string + Usage string + Default time.Duration + Hidden bool + EnvVar string + Target *time.Duration + Completion complete.Predictor +} + +func (f *FlagSet) DurationVar(i *DurationVar) { + initial := i.Default + if v, exist := os.LookupEnv(i.EnvVar); exist { + if d, err := parseutil.ParseDurationSecond(v); err == nil { + initial = d + } + } + + def := "" + if i.Default != 0 { + def = i.Default.String() + } + + f.VarFlag(&VarFlag{ + Name: i.Name, + Aliases: i.Aliases, + Usage: i.Usage, + Default: def, + EnvVar: i.EnvVar, + Value: newDurationValue(initial, i.Target, i.Hidden), + Completion: i.Completion, + }) +} + +type durationValue struct { + hidden bool + target *time.Duration +} + +func newDurationValue(def time.Duration, target *time.Duration, hidden bool) *durationValue { + *target = def + return &durationValue{ + hidden: hidden, + target: target, + } +} + +func (d *durationValue) Set(s string) error { + // Maintain bc for people specifying "system" as the value. + if s == "system" { + s = "-1" + } + + v, err := parseutil.ParseDurationSecond(s) + if err != nil { + return err + } + *d.target = v + return nil +} + +func (d *durationValue) Get() interface{} { return time.Duration(*d.target) } +func (d *durationValue) String() string { return (*d.target).String() } +func (d *durationValue) Example() string { return "duration" } +func (d *durationValue) Hidden() bool { return d.hidden } + +// appendDurationSuffix is used as a backwards-compat tool for assuming users +// meant "seconds" when they do not provide a suffixed duration value. +func appendDurationSuffix(s string) string { + if strings.HasSuffix(s, "s") || strings.HasSuffix(s, "m") || strings.HasSuffix(s, "h") { + return s + } + return s + "s" +} + +// -- StringSliceVar and stringSliceValue +type StringSliceVar struct { + Name string + Aliases []string + Usage string + Default []string + Hidden bool + EnvVar string + Target *[]string + Completion complete.Predictor +} + +func (f *FlagSet) StringSliceVar(i *StringSliceVar) { + initial := i.Default + if v, exist := os.LookupEnv(i.EnvVar); exist { + parts := strings.Split(v, ",") + for i := range parts { + parts[i] = strings.TrimSpace(parts[i]) + } + initial = parts + } + + def := "" + if i.Default != nil { + def = strings.Join(i.Default, ",") + } + + f.VarFlag(&VarFlag{ + Name: i.Name, + Aliases: i.Aliases, + Usage: i.Usage, + Default: def, + EnvVar: i.EnvVar, + Value: newStringSliceValue(initial, i.Target, i.Hidden), + Completion: i.Completion, + }) +} + +type stringSliceValue struct { + hidden bool + target *[]string +} + +func newStringSliceValue(def []string, target *[]string, hidden bool) *stringSliceValue { + *target = def + return &stringSliceValue{ + hidden: hidden, + target: target, + } +} + +func (s *stringSliceValue) Set(val string) error { + *s.target = append(*s.target, strings.TrimSpace(val)) + return nil +} + +func (s *stringSliceValue) Get() interface{} { return *s.target } +func (s *stringSliceValue) String() string { return strings.Join(*s.target, ",") } +func (s *stringSliceValue) Example() string { return "string" } +func (s *stringSliceValue) Hidden() bool { return s.hidden } + +// -- StringMapVar and stringMapValue +type StringMapVar struct { + Name string + Aliases []string + Usage string + Default map[string]string + Hidden bool + Target *map[string]string + Completion complete.Predictor +} + +func (f *FlagSet) StringMapVar(i *StringMapVar) { + def := "" + if i.Default != nil { + def = mapToKV(i.Default) + } + + f.VarFlag(&VarFlag{ + Name: i.Name, + Aliases: i.Aliases, + Usage: i.Usage, + Default: def, + Value: newStringMapValue(i.Default, i.Target, i.Hidden), + Completion: i.Completion, + }) +} + +type stringMapValue struct { + hidden bool + target *map[string]string +} + +func newStringMapValue(def map[string]string, target *map[string]string, hidden bool) *stringMapValue { + *target = def + return &stringMapValue{ + hidden: hidden, + target: target, + } +} + +func (s *stringMapValue) Set(val string) error { + idx := strings.Index(val, "=") + if idx == -1 { + return fmt.Errorf("missing = in KV pair: %q", val) + } + + if *s.target == nil { + *s.target = make(map[string]string) + } + + k, v := val[0:idx], val[idx+1:] + (*s.target)[k] = v + return nil +} + +func (s *stringMapValue) Get() interface{} { return *s.target } +func (s *stringMapValue) String() string { return mapToKV(*s.target) } +func (s *stringMapValue) Example() string { return "key=value" } +func (s *stringMapValue) Hidden() bool { return s.hidden } + +func mapToKV(m map[string]string) string { + list := make([]string, 0, len(m)) + for k := range m { + list = append(list, k) + } + sort.Strings(list) + + for i, k := range list { + list[i] = k + "=" + m[k] + } + + return strings.Join(list, ",") +} + +// -- VarFlag +type VarFlag struct { + Name string + Aliases []string + Usage string + Default string + EnvVar string + Value flag.Value + Completion complete.Predictor +} + +func (f *FlagSet) VarFlag(i *VarFlag) { + // If the flag is marked as hidden, just add it to the set and return to + // avoid unnecessary computations here. We do not want to add completions or + // generate help output for hidden flags. + if v, ok := i.Value.(FlagVisibility); ok && v.Hidden() { + f.Var(i.Value, i.Name, "") + return + } + + // Calculate the full usage + usage := i.Usage + + if len(i.Aliases) > 0 { + sentence := make([]string, len(i.Aliases)) + for i, a := range i.Aliases { + sentence[i] = fmt.Sprintf(`"-%s"`, a) + } + + aliases := "" + switch len(sentence) { + case 0: + // impossible... + case 1: + aliases = sentence[0] + case 2: + aliases = sentence[0] + " and " + sentence[1] + default: + sentence[len(sentence)-1] = "and " + sentence[len(sentence)-1] + aliases = strings.Join(sentence, ", ") + } + + usage += fmt.Sprintf(" This is aliased as %s.", aliases) + } + + if i.Default != "" { + usage += fmt.Sprintf(" The default is %s.", i.Default) + } + + if i.EnvVar != "" { + usage += fmt.Sprintf(" This can also be specified via the %s "+ + "environment variable.", i.EnvVar) + } + + // Add aliases to the main set + for _, a := range i.Aliases { + f.mainSet.Var(i.Value, a, "") + } + + f.Var(i.Value, i.Name, usage) + f.completions["-"+i.Name] = i.Completion +} + +// Var is a lower-level API for adding something to the flags. It should be used +// with caution, since it bypasses all validation. Consider VarFlag instead. +func (f *FlagSet) Var(value flag.Value, name, usage string) { + f.mainSet.Var(value, name, usage) + f.flagSet.Var(value, name, usage) +} + +// -- TimeVar and timeValue +type TimeVar struct { + Name string + Aliases []string + Usage string + Default time.Time + Hidden bool + EnvVar string + Target *time.Time + Completion complete.Predictor + Formats TimeFormat +} + +// Identify the allowable formats, identified by the minimum +// precision accepted. +// TODO: move this somewhere where it can be re-used for the API. +type TimeFormat int + +const ( + TimeVar_EpochSecond TimeFormat = 1 << iota + TimeVar_RFC3339Nano + TimeVar_RFC3339Second + TimeVar_Day + TimeVar_Month +) + +// Default value to use +const TimeVar_TimeOrDay TimeFormat = TimeVar_EpochSecond | TimeVar_RFC3339Nano | TimeVar_RFC3339Second | TimeVar_Day + +// parseTimeAlternatives attempts several different allowable variants +// of the time field. +func parseTimeAlternatives(input string, allowedFormats TimeFormat) (time.Time, error) { + // The RFC3339 formats require the inclusion of a time zone. + if allowedFormats&TimeVar_RFC3339Nano != 0 { + t, err := time.Parse(time.RFC3339Nano, input) + if err == nil { + return t, nil + } + } + + if allowedFormats&TimeVar_RFC3339Second != 0 { + t, err := time.Parse(time.RFC3339, input) + if err == nil { + return t, nil + } + } + + if allowedFormats&TimeVar_Day != 0 { + t, err := time.Parse("2006-01-02", input) + if err == nil { + return t, nil + } + } + + if allowedFormats&TimeVar_Month != 0 { + t, err := time.Parse("2006-01", input) + if err == nil { + return t, nil + } + } + + if allowedFormats&TimeVar_EpochSecond != 0 { + i, err := strconv.ParseInt(input, 10, 64) + if err == nil { + // If a customer enters 20200101 we don't want + // to parse that as an epoch time. + // This arbitrarily-chosen cutoff is around year 2000. + if i > 946000000 { + return time.Unix(i, 0), nil + } + } + } + + return time.Time{}, errors.New("Could not parse as absolute time.") +} + +func (f *FlagSet) TimeVar(i *TimeVar) { + initial := i.Default + if v, exist := os.LookupEnv(i.EnvVar); exist { + if d, err := parseTimeAlternatives(v, i.Formats); err == nil { + initial = d + } + } + + def := "" + if !i.Default.IsZero() { + def = i.Default.String() + } + + f.VarFlag(&VarFlag{ + Name: i.Name, + Aliases: i.Aliases, + Usage: i.Usage, + Default: def, + EnvVar: i.EnvVar, + Value: newTimeValue(initial, i.Target, i.Hidden, i.Formats), + Completion: i.Completion, + }) +} + +type timeValue struct { + hidden bool + target *time.Time + formats TimeFormat +} + +func newTimeValue(def time.Time, target *time.Time, hidden bool, f TimeFormat) *timeValue { + *target = def + return &timeValue{ + hidden: hidden, + target: target, + formats: f, + } +} + +func (d *timeValue) Set(s string) error { + v, err := parseTimeAlternatives(s, d.formats) + if err != nil { + return err + } + *d.target = v + return nil +} + +func (d *timeValue) Get() interface{} { return *d.target } +func (d *timeValue) String() string { return (*d.target).String() } +func (d *timeValue) Example() string { return "time" } +func (d *timeValue) Hidden() bool { return d.hidden } diff --git a/command/base_flags_test.go b/command/base_flags_test.go new file mode 100644 index 0000000..580e163 --- /dev/null +++ b/command/base_flags_test.go @@ -0,0 +1,157 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func Test_BoolPtr(t *testing.T) { + var boolPtr BoolPtr + value := newBoolPtrValue(nil, &boolPtr, false) + + require.False(t, boolPtr.IsSet()) + require.False(t, boolPtr.Get()) + + err := value.Set("false") + require.NoError(t, err) + + require.True(t, boolPtr.IsSet()) + require.False(t, boolPtr.Get()) + + err = value.Set("true") + require.NoError(t, err) + + require.True(t, boolPtr.IsSet()) + require.True(t, boolPtr.Get()) + + var boolPtrFalseDefault BoolPtr + _ = newBoolPtrValue(new(bool), &boolPtrFalseDefault, false) + + require.True(t, boolPtrFalseDefault.IsSet()) + require.False(t, boolPtrFalseDefault.Get()) + + var boolPtrTrueDefault BoolPtr + defTrue := true + _ = newBoolPtrValue(&defTrue, &boolPtrTrueDefault, false) + + require.True(t, boolPtrTrueDefault.IsSet()) + require.True(t, boolPtrTrueDefault.Get()) + + var boolPtrHidden BoolPtr + value = newBoolPtrValue(nil, &boolPtrHidden, true) + require.Equal(t, true, value.Hidden()) +} + +func Test_TimeParsing(t *testing.T) { + var zeroTime time.Time + + testCases := []struct { + Input string + Formats TimeFormat + Valid bool + Expected time.Time + }{ + { + "2020-08-24", + TimeVar_TimeOrDay, + true, + time.Date(2020, 8, 24, 0, 0, 0, 0, time.UTC), + }, + { + "2099-09", + TimeVar_TimeOrDay, + false, + zeroTime, + }, + { + "2099-09", + TimeVar_TimeOrDay | TimeVar_Month, + true, + time.Date(2099, 9, 1, 0, 0, 0, 0, time.UTC), + }, + { + "2021-01-02T03:04:05-02:00", + TimeVar_TimeOrDay, + true, + time.Date(2021, 1, 2, 5, 4, 5, 0, time.UTC), + }, + { + "2021-01-02T03:04:05", + TimeVar_TimeOrDay, + false, // Missing timezone not supported + time.Date(2021, 1, 2, 3, 4, 5, 0, time.UTC), + }, + { + "2021-01-02T03:04:05+02:00", + TimeVar_TimeOrDay, + true, + time.Date(2021, 1, 2, 1, 4, 5, 0, time.UTC), + }, + { + "1598313593", + TimeVar_TimeOrDay, + true, + time.Date(2020, 8, 24, 23, 59, 53, 0, time.UTC), + }, + { + "2037", + TimeVar_TimeOrDay, + false, + zeroTime, + }, + { + "20201212", + TimeVar_TimeOrDay, + false, + zeroTime, + }, + { + "9999999999999999999999999999999999999999999999", + TimeVar_TimeOrDay, + false, + zeroTime, + }, + { + "2021-13-02T03:04:05-02:00", + TimeVar_TimeOrDay, + false, + zeroTime, + }, + { + "2021-12-02T24:04:05+00:00", + TimeVar_TimeOrDay, + false, + zeroTime, + }, + { + "2021-01-02T03:04:05.234567890Z", + TimeVar_TimeOrDay, + true, + time.Date(2021, 1, 2, 3, 4, 5, 234567890, time.UTC), + }, + } + + for _, tc := range testCases { + var result time.Time + timeVal := newTimeValue(zeroTime, &result, false, tc.Formats) + err := timeVal.Set(tc.Input) + if err == nil && !tc.Valid { + t.Errorf("Time %q parsed without error as %v, but is not valid", tc.Input, result) + continue + } + if err != nil { + if tc.Valid { + t.Errorf("Time %q parsed as error, but is valid", tc.Input) + } + continue + } + if !tc.Expected.Equal(result) { + t.Errorf("Time %q parsed incorrectly, expected %v but got %v", tc.Input, tc.Expected.UTC(), result.UTC()) + } + } +} diff --git a/command/base_helpers.go b/command/base_helpers.go new file mode 100644 index 0000000..2595dc5 --- /dev/null +++ b/command/base_helpers.go @@ -0,0 +1,344 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "strings" + "time" + + kvbuilder "github.com/hashicorp/go-secure-stdlib/kv-builder" + "github.com/hashicorp/vault/api" + "github.com/kr/text" + homedir "github.com/mitchellh/go-homedir" + "github.com/mitchellh/mapstructure" + "github.com/pkg/errors" + "github.com/ryanuber/columnize" +) + +// extractListData reads the secret and returns a typed list of data and a +// boolean indicating whether the extraction was successful. +func extractListData(secret *api.Secret) ([]interface{}, bool) { + if secret == nil || secret.Data == nil { + return nil, false + } + + k, ok := secret.Data["keys"] + if !ok || k == nil { + return nil, false + } + + i, ok := k.([]interface{}) + return i, ok +} + +// sanitizePath removes any leading or trailing things from a "path". +func sanitizePath(s string) string { + return ensureNoTrailingSlash(ensureNoLeadingSlash(s)) +} + +// ensureTrailingSlash ensures the given string has a trailing slash. +func ensureTrailingSlash(s string) string { + s = strings.TrimSpace(s) + if s == "" { + return "" + } + + for len(s) > 0 && s[len(s)-1] != '/' { + s = s + "/" + } + return s +} + +// ensureNoTrailingSlash ensures the given string does not have a trailing slash. +func ensureNoTrailingSlash(s string) string { + s = strings.TrimSpace(s) + if s == "" { + return "" + } + + for len(s) > 0 && s[len(s)-1] == '/' { + s = s[:len(s)-1] + } + return s +} + +// ensureNoLeadingSlash ensures the given string does not have a leading slash. +func ensureNoLeadingSlash(s string) string { + s = strings.TrimSpace(s) + if s == "" { + return "" + } + + for len(s) > 0 && s[0] == '/' { + s = s[1:] + } + return s +} + +// columnOuput prints the list of items as a table with no headers. +func columnOutput(list []string, c *columnize.Config) string { + if len(list) == 0 { + return "" + } + + if c == nil { + c = &columnize.Config{} + } + if c.Glue == "" { + c.Glue = " " + } + if c.Empty == "" { + c.Empty = "n/a" + } + + return columnize.Format(list, c) +} + +// tableOutput prints the list of items as columns, where the first row is +// the list of headers. +func tableOutput(list []string, c *columnize.Config) string { + if len(list) == 0 { + return "" + } + + delim := "|" + if c != nil && c.Delim != "" { + delim = c.Delim + } + + underline := "" + headers := strings.Split(list[0], delim) + for i, h := range headers { + h = strings.TrimSpace(h) + u := strings.Repeat("-", len(h)) + + underline = underline + u + if i != len(headers)-1 { + underline = underline + delim + } + } + + list = append(list, "") + copy(list[2:], list[1:]) + list[1] = underline + + return columnOutput(list, c) +} + +// parseArgsData parses the given args in the format key=value into a map of +// the provided arguments. The given reader can also supply key=value pairs. +func parseArgsData(stdin io.Reader, args []string) (map[string]interface{}, error) { + builder := &kvbuilder.Builder{Stdin: stdin} + if err := builder.Add(args...); err != nil { + return nil, err + } + + return builder.Map(), nil +} + +// parseArgsDataString parses the args data and returns the values as strings. +// If the values cannot be represented as strings, an error is returned. +func parseArgsDataString(stdin io.Reader, args []string) (map[string]string, error) { + raw, err := parseArgsData(stdin, args) + if err != nil { + return nil, err + } + + var result map[string]string + if err := mapstructure.WeakDecode(raw, &result); err != nil { + return nil, errors.Wrap(err, "failed to convert values to strings") + } + if result == nil { + result = make(map[string]string) + } + return result, nil +} + +// parseArgsDataStringLists parses the args data and returns the values as +// string lists. If the values cannot be represented as strings, an error is +// returned. +func parseArgsDataStringLists(stdin io.Reader, args []string) (map[string][]string, error) { + raw, err := parseArgsData(stdin, args) + if err != nil { + return nil, err + } + + var result map[string][]string + if err := mapstructure.WeakDecode(raw, &result); err != nil { + return nil, errors.Wrap(err, "failed to convert values to strings") + } + return result, nil +} + +// truncateToSeconds truncates the given duration to the number of seconds. If +// the duration is less than 1s, it is returned as 0. The integer represents +// the whole number unit of seconds for the duration. +func truncateToSeconds(d time.Duration) int { + d = d.Truncate(1 * time.Second) + + // Handle the case where someone requested a ridiculously short increment - + // increments must be larger than a second. + if d < 1*time.Second { + return 0 + } + + return int(d.Seconds()) +} + +// printKeyStatus prints the KeyStatus response from the API. +func printKeyStatus(ks *api.KeyStatus) string { + return columnOutput([]string{ + fmt.Sprintf("Key Term | %d", ks.Term), + fmt.Sprintf("Install Time | %s", ks.InstallTime.UTC().Format(time.RFC822)), + fmt.Sprintf("Encryption Count | %d", ks.Encryptions), + }, nil) +} + +// expandPath takes a filepath and returns the full expanded path, accounting +// for user-relative things like ~/. +func expandPath(s string) string { + if s == "" { + return "" + } + + e, err := homedir.Expand(s) + if err != nil { + return s + } + return e +} + +// wrapAtLengthWithPadding wraps the given text at the maxLineLength, taking +// into account any provided left padding. +func wrapAtLengthWithPadding(s string, pad int) string { + wrapped := text.Wrap(s, maxLineLength-pad) + lines := strings.Split(wrapped, "\n") + for i, line := range lines { + lines[i] = strings.Repeat(" ", pad) + line + } + return strings.Join(lines, "\n") +} + +// wrapAtLength wraps the given text to maxLineLength. +func wrapAtLength(s string) string { + return wrapAtLengthWithPadding(s, 0) +} + +// ttlToAPI converts a user-supplied ttl into an API-compatible string. If +// the TTL is 0, this returns the empty string. If the TTL is negative, this +// returns "system" to indicate to use the system values. Otherwise, the +// time.Duration ttl is used. +func ttlToAPI(d time.Duration) string { + if d == 0 { + return "" + } + + if d < 0 { + return "system" + } + + return d.String() +} + +// humanDuration prints the time duration without those pesky zeros. +func humanDuration(d time.Duration) string { + if d == 0 { + return "0s" + } + + s := d.String() + if strings.HasSuffix(s, "m0s") { + s = s[:len(s)-2] + } + if idx := strings.Index(s, "h0m"); idx > 0 { + s = s[:idx+1] + s[idx+3:] + } + return s +} + +// humanDurationInt prints the given int as if it were a time.Duration number +// of seconds. +func humanDurationInt(i interface{}) interface{} { + switch i := i.(type) { + case int: + return humanDuration(time.Duration(i) * time.Second) + case int64: + return humanDuration(time.Duration(i) * time.Second) + case json.Number: + if i, err := i.Int64(); err == nil { + return humanDuration(time.Duration(i) * time.Second) + } + } + + // If we don't know what type it is, just return the original value + return i +} + +// parseFlagFile accepts a flag value returns the contets of that value. If the +// value starts with '@', that indicates the value is a file and its content +// should be read and returned. Otherwise, the raw value is returned. +func parseFlagFile(raw string) (string, error) { + // check if the provided argument should be read from file + if len(raw) > 0 && raw[0] == '@' { + contents, err := ioutil.ReadFile(raw[1:]) + if err != nil { + return "", fmt.Errorf("error reading file: %w", err) + } + + return string(contents), nil + } + + return raw, nil +} + +func generateFlagWarnings(args []string) string { + var trailingFlags []string + for _, arg := range args { + // "-" can be used where a file is expected to denote stdin. + if !strings.HasPrefix(arg, "-") || arg == "-" { + continue + } + + isGlobalFlag := false + trimmedArg, _, _ := strings.Cut(strings.TrimLeft(arg, "-"), "=") + for _, flag := range globalFlags { + if trimmedArg == flag { + isGlobalFlag = true + } + } + if isGlobalFlag { + continue + } + + trailingFlags = append(trailingFlags, arg) + } + + if len(trailingFlags) > 0 { + return fmt.Sprintf("Command flags must be provided before positional arguments. "+ + "The following arguments will not be parsed as flags: [%s]", strings.Join(trailingFlags, ",")) + } else { + return "" + } +} + +func generateFlagErrors(f *FlagSets, opts ...ParseOptions) error { + if Format(f.ui) == "raw" { + canUseRaw := false + for _, opt := range opts { + if value, ok := opt.(ParseOptionAllowRawFormat); ok { + canUseRaw = bool(value) + } + } + + if !canUseRaw { + return fmt.Errorf("This command does not support the -format=raw option.") + } + } + + return nil +} diff --git a/command/base_helpers_test.go b/command/base_helpers_test.go new file mode 100644 index 0000000..50cd264 --- /dev/null +++ b/command/base_helpers_test.go @@ -0,0 +1,284 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "testing" + "time" +) + +func TestParseArgsData(t *testing.T) { + t.Parallel() + + t.Run("stdin_full", func(t *testing.T) { + t.Parallel() + + stdinR, stdinW := io.Pipe() + go func() { + stdinW.Write([]byte(`{"foo":"bar"}`)) + stdinW.Close() + }() + + m, err := parseArgsData(stdinR, []string{"-"}) + if err != nil { + t.Fatal(err) + } + + if v, ok := m["foo"]; !ok || v != "bar" { + t.Errorf("expected %q to be %q", v, "bar") + } + }) + + t.Run("stdin_value", func(t *testing.T) { + t.Parallel() + + stdinR, stdinW := io.Pipe() + go func() { + stdinW.Write([]byte(`bar`)) + stdinW.Close() + }() + + m, err := parseArgsData(stdinR, []string{"foo=-"}) + if err != nil { + t.Fatal(err) + } + + if v, ok := m["foo"]; !ok || v != "bar" { + t.Errorf("expected %q to be %q", v, "bar") + } + }) + + t.Run("file_full", func(t *testing.T) { + t.Parallel() + + f, err := ioutil.TempFile("", "vault") + if err != nil { + t.Fatal(err) + } + f.Write([]byte(`{"foo":"bar"}`)) + f.Close() + defer os.Remove(f.Name()) + + m, err := parseArgsData(os.Stdin, []string{"@" + f.Name()}) + if err != nil { + t.Fatal(err) + } + + if v, ok := m["foo"]; !ok || v != "bar" { + t.Errorf("expected %q to be %q", v, "bar") + } + }) + + t.Run("file_value", func(t *testing.T) { + t.Parallel() + + f, err := ioutil.TempFile("", "vault") + if err != nil { + t.Fatal(err) + } + f.Write([]byte(`bar`)) + f.Close() + defer os.Remove(f.Name()) + + m, err := parseArgsData(os.Stdin, []string{"foo=@" + f.Name()}) + if err != nil { + t.Fatal(err) + } + + if v, ok := m["foo"]; !ok || v != "bar" { + t.Errorf("expected %q to be %q", v, "bar") + } + }) + + t.Run("file_value_escaped", func(t *testing.T) { + t.Parallel() + + m, err := parseArgsData(os.Stdin, []string{`foo=\@`}) + if err != nil { + t.Fatal(err) + } + + if v, ok := m["foo"]; !ok || v != "@" { + t.Errorf("expected %q to be %q", v, "@") + } + }) +} + +func TestTruncateToSeconds(t *testing.T) { + t.Parallel() + + cases := []struct { + d time.Duration + exp int + }{ + { + 10 * time.Nanosecond, + 0, + }, + { + 10 * time.Microsecond, + 0, + }, + { + 10 * time.Millisecond, + 0, + }, + { + 1 * time.Second, + 1, + }, + { + 10 * time.Second, + 10, + }, + { + 100 * time.Second, + 100, + }, + { + 3 * time.Minute, + 180, + }, + { + 3 * time.Hour, + 10800, + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.d.String(), func(t *testing.T) { + t.Parallel() + + act := truncateToSeconds(tc.d) + if act != tc.exp { + t.Errorf("expected %d to be %d", act, tc.exp) + } + }) + } +} + +func TestParseFlagFile(t *testing.T) { + t.Parallel() + + content := "some raw content" + tmpFile, err := ioutil.TempFile(os.TempDir(), "TestParseFlagFile") + if err != nil { + t.Fatalf("failed to create temporary file: %v", err) + } + + defer os.Remove(tmpFile.Name()) + + if _, err := tmpFile.WriteString(content); err != nil { + t.Fatalf("failed to write to temporary file: %v", err) + } + + cases := []struct { + value string + exp string + }{ + { + "", + "", + }, + { + content, + content, + }, + { + fmt.Sprintf("@%s", tmpFile.Name()), + content, + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.value, func(t *testing.T) { + content, err := parseFlagFile(tc.value) + if err != nil { + t.Fatalf("unexpected error parsing flag value: %v", err) + } + + if content != tc.exp { + t.Fatalf("expected %s to be %s", content, tc.exp) + } + }) + } +} + +func TestArgWarnings(t *testing.T) { + t.Parallel() + + cases := []struct { + args []string + expected string + }{ + { + []string{"a", "b", "c"}, + "", + }, + { + []string{"a", "-b"}, + "-b", + }, + { + []string{"a", "--b"}, + "--b", + }, + { + []string{"a-b", "-c"}, + "-c", + }, + { + []string{"a", "-b-c"}, + "-b-c", + }, + { + []string{"-a", "b"}, + "-a", + }, + { + []string{globalFlagDetailed}, + "", + }, + { + []string{"-" + globalFlagOutputCurlString + "=true"}, + "", + }, + { + []string{"--" + globalFlagFormat + "=false"}, + "", + }, + { + []string{"-x" + globalFlagDetailed}, + "-x" + globalFlagDetailed, + }, + { + []string{"--x=" + globalFlagDetailed}, + "--x=" + globalFlagDetailed, + }, + { + []string{"policy", "write", "my-policy", "-"}, + "", + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.expected, func(t *testing.T) { + warnings := generateFlagWarnings(tc.args) + if !strings.Contains(warnings, tc.expected) { + t.Fatalf("expected %s to contain %s", warnings, tc.expected) + } + }) + } +} diff --git a/command/base_predict.go b/command/base_predict.go new file mode 100644 index 0000000..ee2a771 --- /dev/null +++ b/command/base_predict.go @@ -0,0 +1,582 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "os" + "sort" + "strings" + "sync" + + "github.com/hashicorp/vault/api" + "github.com/posener/complete" +) + +type Predict struct { + client *api.Client + clientOnce sync.Once +} + +func NewPredict() *Predict { + return &Predict{} +} + +func (p *Predict) Client() *api.Client { + p.clientOnce.Do(func() { + if p.client == nil { // For tests + client, _ := api.NewClient(nil) + + if client.Token() == "" { + helper, err := DefaultTokenHelper() + if err != nil { + return + } + token, err := helper.Get() + if err != nil { + return + } + client.SetToken(token) + } + + // Turn off retries for prediction + if os.Getenv(api.EnvVaultMaxRetries) == "" { + client.SetMaxRetries(0) + } + + p.client = client + } + }) + return p.client +} + +// defaultPredictVaultMounts is the default list of mounts to return to the +// user. This is a best-guess, given we haven't communicated with the Vault +// server. If the user has no token or if the token does not have the default +// policy attached, it won't be able to read cubbyhole/, but it's a better UX +// that returning nothing. +var defaultPredictVaultMounts = []string{"cubbyhole/"} + +// predictClient is the API client to use for prediction. We create this at the +// beginning once, because completions are generated for each command (and this +// doesn't change), and the only way to configure the predict/autocomplete +// client is via environment variables. Even if the user specifies a flag, we +// can't parse that flag until after the command is submitted. +var ( + predictClient *api.Client + predictClientOnce sync.Once +) + +// PredictClient returns the cached API client for the predictor. +func PredictClient() *api.Client { + predictClientOnce.Do(func() { + if predictClient == nil { // For tests + predictClient, _ = api.NewClient(nil) + } + }) + return predictClient +} + +// PredictVaultAvailableMounts returns a predictor for the available mounts in +// Vault. For now, there is no way to programmatically get this list. If, in the +// future, such a list exists, we can adapt it here. Until then, it's +// hard-coded. +func (b *BaseCommand) PredictVaultAvailableMounts() complete.Predictor { + // This list does not contain deprecated backends. At present, there is no + // API that lists all available secret backends, so this is hard-coded :(. + return complete.PredictSet( + "aws", + "consul", + "database", + "generic", + "pki", + "plugin", + "rabbitmq", + "ssh", + "totp", + "transit", + ) +} + +// PredictVaultAvailableAuths returns a predictor for the available auths in +// Vault. For now, there is no way to programmatically get this list. If, in the +// future, such a list exists, we can adapt it here. Until then, it's +// hard-coded. +func (b *BaseCommand) PredictVaultAvailableAuths() complete.Predictor { + return complete.PredictSet( + "app-id", + "approle", + "aws", + "cert", + "gcp", + "github", + "ldap", + "okta", + "plugin", + "radius", + "userpass", + ) +} + +// PredictVaultFiles returns a predictor for Vault mounts and paths based on the +// configured client for the base command. Unfortunately this happens pre-flag +// parsing, so users must rely on environment variables for autocomplete if they +// are not using Vault at the default endpoints. +func (b *BaseCommand) PredictVaultFiles() complete.Predictor { + return NewPredict().VaultFiles() +} + +// PredictVaultFolders returns a predictor for "folders". See PredictVaultFiles +// for more information and restrictions. +func (b *BaseCommand) PredictVaultFolders() complete.Predictor { + return NewPredict().VaultFolders() +} + +// PredictVaultNamespaces returns a predictor for "namespaces". See PredictVaultFiles +// for more information an restrictions. +func (b *BaseCommand) PredictVaultNamespaces() complete.Predictor { + return NewPredict().VaultNamespaces() +} + +// PredictVaultMounts returns a predictor for "folders". See PredictVaultFiles +// for more information and restrictions. +func (b *BaseCommand) PredictVaultMounts() complete.Predictor { + return NewPredict().VaultMounts() +} + +// PredictVaultAudits returns a predictor for "folders". See PredictVaultFiles +// for more information and restrictions. +func (b *BaseCommand) PredictVaultAudits() complete.Predictor { + return NewPredict().VaultAudits() +} + +// PredictVaultAuths returns a predictor for "folders". See PredictVaultFiles +// for more information and restrictions. +func (b *BaseCommand) PredictVaultAuths() complete.Predictor { + return NewPredict().VaultAuths() +} + +// PredictVaultPlugins returns a predictor for installed plugins. +func (b *BaseCommand) PredictVaultPlugins(pluginTypes ...api.PluginType) complete.Predictor { + return NewPredict().VaultPlugins(pluginTypes...) +} + +// PredictVaultPolicies returns a predictor for "folders". See PredictVaultFiles +// for more information and restrictions. +func (b *BaseCommand) PredictVaultPolicies() complete.Predictor { + return NewPredict().VaultPolicies() +} + +func (b *BaseCommand) PredictVaultDebugTargets() complete.Predictor { + return complete.PredictSet( + "config", + "host", + "metrics", + "pprof", + "replication-status", + "server-status", + ) +} + +// VaultFiles returns a predictor for Vault "files". This is a public API for +// consumers, but you probably want BaseCommand.PredictVaultFiles instead. +func (p *Predict) VaultFiles() complete.Predictor { + return p.vaultPaths(true) +} + +// VaultFolders returns a predictor for Vault "folders". This is a public +// API for consumers, but you probably want BaseCommand.PredictVaultFolders +// instead. +func (p *Predict) VaultFolders() complete.Predictor { + return p.vaultPaths(false) +} + +// VaultNamespaces returns a predictor for Vault "namespaces". This is a public +// API for consumers, but you probably want BaseCommand.PredictVaultNamespaces +// instead. +func (p *Predict) VaultNamespaces() complete.Predictor { + return p.filterFunc(p.namespaces) +} + +// VaultMounts returns a predictor for Vault "folders". This is a public +// API for consumers, but you probably want BaseCommand.PredictVaultMounts +// instead. +func (p *Predict) VaultMounts() complete.Predictor { + return p.filterFunc(p.mounts) +} + +// VaultAudits returns a predictor for Vault "folders". This is a public API for +// consumers, but you probably want BaseCommand.PredictVaultAudits instead. +func (p *Predict) VaultAudits() complete.Predictor { + return p.filterFunc(p.audits) +} + +// VaultAuths returns a predictor for Vault "folders". This is a public API for +// consumers, but you probably want BaseCommand.PredictVaultAuths instead. +func (p *Predict) VaultAuths() complete.Predictor { + return p.filterFunc(p.auths) +} + +// VaultPlugins returns a predictor for Vault's plugin catalog. This is a public +// API for consumers, but you probably want BaseCommand.PredictVaultPlugins +// instead. +func (p *Predict) VaultPlugins(pluginTypes ...api.PluginType) complete.Predictor { + filterFunc := func() []string { + return p.plugins(pluginTypes...) + } + return p.filterFunc(filterFunc) +} + +// VaultPolicies returns a predictor for Vault "folders". This is a public API for +// consumers, but you probably want BaseCommand.PredictVaultPolicies instead. +func (p *Predict) VaultPolicies() complete.Predictor { + return p.filterFunc(p.policies) +} + +// vaultPaths parses the CLI options and returns the "best" list of possible +// paths. If there are any errors, this function returns an empty result. All +// errors are suppressed since this is a prediction function. +func (p *Predict) vaultPaths(includeFiles bool) complete.PredictFunc { + return func(args complete.Args) []string { + // Do not predict more than one paths + if p.hasPathArg(args.All) { + return nil + } + + client := p.Client() + if client == nil { + return nil + } + + path := args.Last + + // Trim path with potential mount + var relativePath string + mountInfos, err := p.mountInfos() + if err != nil { + return nil + } + + var mountType, mountVersion string + for mount, mountInfo := range mountInfos { + if strings.HasPrefix(path, mount) { + relativePath = strings.TrimPrefix(path, mount+"/") + mountType = mountInfo.Type + if mountInfo.Options != nil { + mountVersion = mountInfo.Options["version"] + } + break + } + } + + // Predict path or mount depending on path separator + var predictions []string + if strings.Contains(relativePath, "/") { + predictions = p.paths(mountType, mountVersion, path, includeFiles) + } else { + predictions = p.filter(p.mounts(), path) + } + + // Either no results or many results, so return. + if len(predictions) != 1 { + return predictions + } + + // If this is not a "folder", do not try to recurse. + if !strings.HasSuffix(predictions[0], "/") { + return predictions + } + + // If the prediction is the same as the last guess, return it (we have no + // new information and we won't get anymore). + if predictions[0] == args.Last { + return predictions + } + + // Re-predict with the remaining path + args.Last = predictions[0] + return p.vaultPaths(includeFiles).Predict(args) + } +} + +// paths predicts all paths which start with the given path. +func (p *Predict) paths(mountType, mountVersion, path string, includeFiles bool) []string { + client := p.Client() + if client == nil { + return nil + } + + // Vault does not support listing based on a sub-key, so we have to back-pedal + // to the last "/" and return all paths on that "folder". Then we perform + // client-side filtering. + root := path + idx := strings.LastIndex(root, "/") + if idx > 0 && idx < len(root) { + root = root[:idx+1] + } + + paths := p.listPaths(buildAPIListPath(root, mountType, mountVersion)) + + var predictions []string + for _, p := range paths { + // Calculate the absolute "path" for matching. + p = root + p + + if strings.HasPrefix(p, path) { + // Ensure this is a directory or we've asked to include files. + if includeFiles || strings.HasSuffix(p, "/") { + predictions = append(predictions, p) + } + } + } + + // Add root to the path + if len(predictions) == 0 { + predictions = append(predictions, path) + } + + return predictions +} + +func buildAPIListPath(path, mountType, mountVersion string) string { + if mountType == "kv" && mountVersion == "2" { + return toKVv2ListPath(path) + } + return path +} + +func toKVv2ListPath(path string) string { + firstSlashIdx := strings.Index(path, "/") + if firstSlashIdx < 0 { + return path + } + + return path[:firstSlashIdx] + "/metadata" + path[firstSlashIdx:] +} + +// audits returns a sorted list of the audit backends for Vault server for +// which the client is configured to communicate with. +func (p *Predict) audits() []string { + client := p.Client() + if client == nil { + return nil + } + + audits, err := client.Sys().ListAudit() + if err != nil { + return nil + } + + list := make([]string, 0, len(audits)) + for m := range audits { + list = append(list, m) + } + sort.Strings(list) + return list +} + +// auths returns a sorted list of the enabled auth provides for Vault server for +// which the client is configured to communicate with. +func (p *Predict) auths() []string { + client := p.Client() + if client == nil { + return nil + } + + auths, err := client.Sys().ListAuth() + if err != nil { + return nil + } + + list := make([]string, 0, len(auths)) + for m := range auths { + list = append(list, m) + } + sort.Strings(list) + return list +} + +// plugins returns a sorted list of the plugins in the catalog. +func (p *Predict) plugins(pluginTypes ...api.PluginType) []string { + // This method's signature doesn't enforce that a pluginType must be passed in. + // If it's not, it's likely the caller's intent is go get a list of all of them, + // so let's help them out. + if len(pluginTypes) == 0 { + pluginTypes = append(pluginTypes, api.PluginTypeUnknown) + } + + client := p.Client() + if client == nil { + return nil + } + + var plugins []string + pluginsAdded := make(map[string]bool) + for _, pluginType := range pluginTypes { + result, err := client.Sys().ListPlugins(&api.ListPluginsInput{Type: api.PluginType(pluginType)}) + if err != nil { + return nil + } + if result == nil { + return nil + } + for _, names := range result.PluginsByType { + for _, name := range names { + if _, ok := pluginsAdded[name]; !ok { + plugins = append(plugins, name) + pluginsAdded[name] = true + } + } + } + } + sort.Strings(plugins) + return plugins +} + +// policies returns a sorted list of the policies stored in this Vault +// server. +func (p *Predict) policies() []string { + client := p.Client() + if client == nil { + return nil + } + + policies, err := client.Sys().ListPolicies() + if err != nil { + return nil + } + sort.Strings(policies) + return policies +} + +// mountInfos returns a map with mount paths as keys and MountOutputs as values +// for the Vault server which the client is configured to communicate with. +// Returns error if server communication fails. +func (p *Predict) mountInfos() (map[string]*api.MountOutput, error) { + client := p.Client() + if client == nil { + return nil, nil + } + + mounts, err := client.Sys().ListMounts() + if err != nil { + return nil, err + } + + return mounts, nil +} + +// mounts returns a sorted list of the mount paths for Vault server for +// which the client is configured to communicate with. This function returns the +// default list of mounts if an error occurs. +func (p *Predict) mounts() []string { + mounts, err := p.mountInfos() + if err != nil { + return defaultPredictVaultMounts + } + + list := make([]string, 0, len(mounts)) + for m := range mounts { + list = append(list, m) + } + sort.Strings(list) + return list +} + +// namespaces returns a sorted list of the namespace paths for Vault server for +// which the client is configured to communicate with. This function returns +// an empty list in any error occurs. +func (p *Predict) namespaces() []string { + client := p.Client() + if client == nil { + return nil + } + + secret, err := client.Logical().List("sys/namespaces") + if err != nil { + return nil + } + namespaces, ok := extractListData(secret) + if !ok { + return nil + } + + list := make([]string, 0, len(namespaces)) + for _, n := range namespaces { + s, ok := n.(string) + if !ok { + continue + } + list = append(list, s) + } + sort.Strings(list) + return list +} + +// listPaths returns a list of paths (HTTP LIST) for the given path. This +// function returns an empty list of any errors occur. +func (p *Predict) listPaths(path string) []string { + client := p.Client() + if client == nil { + return nil + } + + secret, err := client.Logical().List(path) + if err != nil || secret == nil || secret.Data == nil { + return nil + } + + paths, ok := secret.Data["keys"].([]interface{}) + if !ok { + return nil + } + + list := make([]string, 0, len(paths)) + for _, p := range paths { + if str, ok := p.(string); ok { + list = append(list, str) + } + } + sort.Strings(list) + return list +} + +// hasPathArg determines if the args have already accepted a path. +func (p *Predict) hasPathArg(args []string) bool { + var nonFlags []string + for _, a := range args { + if !strings.HasPrefix(a, "-") { + nonFlags = append(nonFlags, a) + } + } + + return len(nonFlags) > 2 +} + +// filterFunc is used to compose a complete predictor that filters an array +// of strings as per the filter function. +func (p *Predict) filterFunc(f func() []string) complete.Predictor { + return complete.PredictFunc(func(args complete.Args) []string { + if p.hasPathArg(args.All) { + return nil + } + + client := p.Client() + if client == nil { + return nil + } + + return p.filter(f(), args.Last) + }) +} + +// filter filters the given list for items that start with the prefix. +func (p *Predict) filter(list []string, prefix string) []string { + var predictions []string + for _, item := range list { + if strings.HasPrefix(item, prefix) { + predictions = append(predictions, item) + } + } + return predictions +} diff --git a/command/base_predict_test.go b/command/base_predict_test.go new file mode 100644 index 0000000..20af0f6 --- /dev/null +++ b/command/base_predict_test.go @@ -0,0 +1,743 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "reflect" + "testing" + + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/api" + "github.com/posener/complete" +) + +func TestPredictVaultPaths(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + data := map[string]interface{}{"a": "b"} + if _, err := client.Logical().Write("secret/bar", data); err != nil { + t.Fatal(err) + } + if _, err := client.Logical().Write("secret/foo", data); err != nil { + t.Fatal(err) + } + if _, err := client.Logical().Write("secret/zip/zap", data); err != nil { + t.Fatal(err) + } + if _, err := client.Logical().Write("secret/zip/zonk", data); err != nil { + t.Fatal(err) + } + if _, err := client.Logical().Write("secret/zip/twoot", data); err != nil { + t.Fatal(err) + } + if err := client.Sys().Mount("level1a/level2a/level3a", &api.MountInput{Type: "kv"}); err != nil { + t.Fatal(err) + } + if err := client.Sys().Mount("level1a/level2a/level3b", &api.MountInput{Type: "kv"}); err != nil { + t.Fatal(err) + } + + cases := []struct { + name string + args complete.Args + includeFiles bool + exp []string + }{ + { + "has_args", + complete.Args{ + All: []string{"read", "secret/foo", "a=b"}, + Last: "a=b", + }, + true, + nil, + }, + { + "has_args_no_files", + complete.Args{ + All: []string{"read", "secret/foo", "a=b"}, + Last: "a=b", + }, + false, + nil, + }, + { + "part_mount", + complete.Args{ + All: []string{"read", "s"}, + Last: "s", + }, + true, + []string{"secret/", "sys/"}, + }, + { + "part_mount_no_files", + complete.Args{ + All: []string{"read", "s"}, + Last: "s", + }, + false, + []string{"secret/", "sys/"}, + }, + { + "only_mount", + complete.Args{ + All: []string{"read", "sec"}, + Last: "sec", + }, + true, + []string{"secret/bar", "secret/foo", "secret/zip/"}, + }, + { + "only_mount_no_files", + complete.Args{ + All: []string{"read", "sec"}, + Last: "sec", + }, + false, + []string{"secret/zip/"}, + }, + { + "full_mount", + complete.Args{ + All: []string{"read", "secret"}, + Last: "secret", + }, + true, + []string{"secret/bar", "secret/foo", "secret/zip/"}, + }, + { + "full_mount_no_files", + complete.Args{ + All: []string{"read", "secret"}, + Last: "secret", + }, + false, + []string{"secret/zip/"}, + }, + { + "full_mount_slash", + complete.Args{ + All: []string{"read", "secret/"}, + Last: "secret/", + }, + true, + []string{"secret/bar", "secret/foo", "secret/zip/"}, + }, + { + "full_mount_slash_no_files", + complete.Args{ + All: []string{"read", "secret/"}, + Last: "secret/", + }, + false, + []string{"secret/zip/"}, + }, + { + "path_partial", + complete.Args{ + All: []string{"read", "secret/z"}, + Last: "secret/z", + }, + true, + []string{"secret/zip/twoot", "secret/zip/zap", "secret/zip/zonk"}, + }, + { + "path_partial_no_files", + complete.Args{ + All: []string{"read", "secret/z"}, + Last: "secret/z", + }, + false, + []string{"secret/zip/"}, + }, + { + "subpath_partial_z", + complete.Args{ + All: []string{"read", "secret/zip/z"}, + Last: "secret/zip/z", + }, + true, + []string{"secret/zip/zap", "secret/zip/zonk"}, + }, + { + "subpath_partial_z_no_files", + complete.Args{ + All: []string{"read", "secret/zip/z"}, + Last: "secret/zip/z", + }, + false, + []string{"secret/zip/z"}, + }, + { + "subpath_partial_t", + complete.Args{ + All: []string{"read", "secret/zip/t"}, + Last: "secret/zip/t", + }, + true, + []string{"secret/zip/twoot"}, + }, + { + "subpath_partial_t_no_files", + complete.Args{ + All: []string{"read", "secret/zip/t"}, + Last: "secret/zip/t", + }, + false, + []string{"secret/zip/t"}, + }, + { + "multi_nested", + complete.Args{ + All: []string{"read", "level1a/level2a"}, + Last: "level1a/level2a", + }, + false, + []string{ + "level1a/level2a/level3a/", + "level1a/level2a/level3b/", + }, + }, + } + + t.Run("group", func(t *testing.T) { + for _, tc := range cases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + p := NewPredict() + p.client = client + + f := p.vaultPaths(tc.includeFiles) + act := f(tc.args) + if !reflect.DeepEqual(act, tc.exp) { + t.Errorf("expected %q to be %q", act, tc.exp) + } + }) + } + }) +} + +func TestPredict_Audits(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + badClient, badCloser := testVaultServerBad(t) + defer badCloser() + + if err := client.Sys().EnableAuditWithOptions("file", &api.EnableAuditOptions{ + Type: "file", + Options: map[string]string{ + "file_path": "discard", + }, + }); err != nil { + t.Fatal(err) + } + + cases := []struct { + name string + client *api.Client + exp []string + }{ + { + "not_connected_client", + badClient, + nil, + }, + { + "good_path", + client, + []string{"file/"}, + }, + } + + t.Run("group", func(t *testing.T) { + for _, tc := range cases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + p := NewPredict() + p.client = tc.client + + act := p.audits() + if !reflect.DeepEqual(act, tc.exp) { + t.Errorf("expected %q to be %q", act, tc.exp) + } + }) + } + }) +} + +func TestPredict_Mounts(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + badClient, badCloser := testVaultServerBad(t) + defer badCloser() + + cases := []struct { + name string + client *api.Client + exp []string + }{ + { + "not_connected_client", + badClient, + defaultPredictVaultMounts, + }, + { + "good_path", + client, + []string{"cubbyhole/", "identity/", "secret/", "sys/"}, + }, + } + + t.Run("group", func(t *testing.T) { + for _, tc := range cases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + p := NewPredict() + p.client = tc.client + + act := p.mounts() + if !reflect.DeepEqual(act, tc.exp) { + t.Errorf("expected %q to be %q", act, tc.exp) + } + }) + } + }) +} + +func TestPredict_Plugins(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + badClient, badCloser := testVaultServerBad(t) + defer badCloser() + + cases := []struct { + name string + client *api.Client + exp []string + }{ + { + "not_connected_client", + badClient, + nil, + }, + { + "good_path", + client, + []string{ + "ad", + "alicloud", + "approle", + "aws", + "azure", + "cassandra-database-plugin", + "centrify", + "cert", + "cf", + "consul", + "couchbase-database-plugin", + "elasticsearch-database-plugin", + "gcp", + "gcpkms", + "github", + "hana-database-plugin", + "influxdb-database-plugin", + "jwt", + "kerberos", + "keymgmt", + "kmip", + "kubernetes", + "kv", + "ldap", + "mongodb-database-plugin", + "mongodbatlas", + "mongodbatlas-database-plugin", + "mssql-database-plugin", + "mysql-aurora-database-plugin", + "mysql-database-plugin", + "mysql-legacy-database-plugin", + "mysql-rds-database-plugin", + "nomad", + "oci", + "oidc", + "okta", + "openldap", + "pcf", // Deprecated. + "pki", + "postgresql-database-plugin", + "rabbitmq", + "radius", + "redis-database-plugin", + "redis-elasticache-database-plugin", + "redshift-database-plugin", + "snowflake-database-plugin", + "ssh", + "terraform", + "totp", + "transform", + "transit", + "userpass", + }, + }, + } + + t.Run("group", func(t *testing.T) { + for _, tc := range cases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + p := NewPredict() + p.client = tc.client + + act := p.plugins() + + if !strutil.StrListContains(act, "keymgmt") { + for i, v := range tc.exp { + if v == "keymgmt" { + tc.exp = append(tc.exp[:i], tc.exp[i+1:]...) + break + } + } + } + if !strutil.StrListContains(act, "kmip") { + for i, v := range tc.exp { + if v == "kmip" { + tc.exp = append(tc.exp[:i], tc.exp[i+1:]...) + break + } + } + } + if !strutil.StrListContains(act, "transform") { + for i, v := range tc.exp { + if v == "transform" { + tc.exp = append(tc.exp[:i], tc.exp[i+1:]...) + break + } + } + } + if !reflect.DeepEqual(act, tc.exp) { + t.Errorf("expected: %q, got: %q, diff: %v", tc.exp, act, strutil.Difference(act, tc.exp, true)) + } + }) + } + }) +} + +func TestPredict_Policies(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + badClient, badCloser := testVaultServerBad(t) + defer badCloser() + + cases := []struct { + name string + client *api.Client + exp []string + }{ + { + "not_connected_client", + badClient, + nil, + }, + { + "good_path", + client, + []string{"default", "root"}, + }, + } + + t.Run("group", func(t *testing.T) { + for _, tc := range cases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + p := NewPredict() + p.client = tc.client + + act := p.policies() + if !reflect.DeepEqual(act, tc.exp) { + t.Errorf("expected %q to be %q", act, tc.exp) + } + }) + } + }) +} + +func TestPredict_Paths(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + data := map[string]interface{}{"a": "b"} + if _, err := client.Logical().Write("secret/bar", data); err != nil { + t.Fatal(err) + } + if _, err := client.Logical().Write("secret/foo", data); err != nil { + t.Fatal(err) + } + if _, err := client.Logical().Write("secret/zip/zap", data); err != nil { + t.Fatal(err) + } + + cases := []struct { + name string + path string + includeFiles bool + exp []string + }{ + { + "bad_path", + "nope/not/a/real/path/ever", + true, + []string{"nope/not/a/real/path/ever"}, + }, + { + "good_path", + "secret/", + true, + []string{"secret/bar", "secret/foo", "secret/zip/"}, + }, + { + "good_path_no_files", + "secret/", + false, + []string{"secret/zip/"}, + }, + { + "partial_match", + "secret/z", + true, + []string{"secret/zip/"}, + }, + { + "partial_match_no_files", + "secret/z", + false, + []string{"secret/zip/"}, + }, + } + + t.Run("group", func(t *testing.T) { + for _, tc := range cases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + p := NewPredict() + p.client = client + + act := p.paths("kv", "1", tc.path, tc.includeFiles) + if !reflect.DeepEqual(act, tc.exp) { + t.Errorf("expected %q to be %q", act, tc.exp) + } + }) + } + }) +} + +func TestPredict_PathsKVv2(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerWithKVVersion(t, "2") + defer closer() + + data := map[string]interface{}{"data": map[string]interface{}{"a": "b"}} + if _, err := client.Logical().Write("secret/data/bar", data); err != nil { + t.Fatal(err) + } + if _, err := client.Logical().Write("secret/data/foo", data); err != nil { + t.Fatal(err) + } + if _, err := client.Logical().Write("secret/data/zip/zap", data); err != nil { + t.Fatal(err) + } + + cases := []struct { + name string + path string + includeFiles bool + exp []string + }{ + { + "bad_path", + "nope/not/a/real/path/ever", + true, + []string{"nope/not/a/real/path/ever"}, + }, + { + "good_path", + "secret/", + true, + []string{"secret/bar", "secret/foo", "secret/zip/"}, + }, + { + "good_path_no_files", + "secret/", + false, + []string{"secret/zip/"}, + }, + { + "partial_match", + "secret/z", + true, + []string{"secret/zip/"}, + }, + { + "partial_match_no_files", + "secret/z", + false, + []string{"secret/zip/"}, + }, + } + + t.Run("group", func(t *testing.T) { + for _, tc := range cases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + p := NewPredict() + p.client = client + + act := p.paths("kv", "2", tc.path, tc.includeFiles) + if !reflect.DeepEqual(act, tc.exp) { + t.Errorf("expected %q to be %q", act, tc.exp) + } + }) + } + }) +} + +func TestPredict_ListPaths(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + badClient, badCloser := testVaultServerBad(t) + defer badCloser() + + data := map[string]interface{}{"a": "b"} + if _, err := client.Logical().Write("secret/bar", data); err != nil { + t.Fatal(err) + } + if _, err := client.Logical().Write("secret/foo", data); err != nil { + t.Fatal(err) + } + + cases := []struct { + name string + client *api.Client + path string + exp []string + }{ + { + "bad_path", + client, + "nope/not/a/real/path/ever", + nil, + }, + { + "good_path", + client, + "secret/", + []string{"bar", "foo"}, + }, + { + "not_connected_client", + badClient, + "secret/", + nil, + }, + } + + t.Run("group", func(t *testing.T) { + for _, tc := range cases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + p := NewPredict() + p.client = tc.client + + act := p.listPaths(tc.path) + if !reflect.DeepEqual(act, tc.exp) { + t.Errorf("expected %q to be %q", act, tc.exp) + } + }) + } + }) +} + +func TestPredict_HasPathArg(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + exp bool + }{ + { + "nil", + nil, + false, + }, + { + "empty", + []string{}, + false, + }, + { + "empty_string", + []string{""}, + false, + }, + { + "single", + []string{"foo"}, + false, + }, + { + "multiple", + []string{"foo", "bar", "baz"}, + true, + }, + } + + for _, tc := range cases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + p := NewPredict() + if act := p.hasPathArg(tc.args); act != tc.exp { + t.Errorf("expected %t to be %t", act, tc.exp) + } + }) + } +} diff --git a/command/base_test.go b/command/base_test.go new file mode 100644 index 0000000..af4f0a4 --- /dev/null +++ b/command/base_test.go @@ -0,0 +1,72 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "net/http" + "reflect" + "testing" +) + +func getDefaultCliHeaders(t *testing.T) http.Header { + bc := &BaseCommand{} + cli, err := bc.Client() + if err != nil { + t.Fatal(err) + } + return cli.Headers() +} + +func TestClient_FlagHeader(t *testing.T) { + defaultHeaders := getDefaultCliHeaders(t) + + cases := []struct { + Input map[string]string + Valid bool + }{ + { + map[string]string{}, + true, + }, + { + map[string]string{"foo": "bar", "header2": "value2"}, + true, + }, + { + map[string]string{"X-Vault-foo": "bar", "header2": "value2"}, + false, + }, + } + + for _, tc := range cases { + expectedHeaders := defaultHeaders.Clone() + for key, val := range tc.Input { + expectedHeaders.Add(key, val) + } + + bc := &BaseCommand{flagHeader: tc.Input} + cli, err := bc.Client() + + if err == nil && !tc.Valid { + t.Errorf("No error for input[%#v], but not valid", tc.Input) + continue + } + + if err != nil { + if tc.Valid { + t.Errorf("Error[%v] with input[%#v], but valid", err, tc.Input) + } + continue + } + + if cli == nil { + t.Error("client should not be nil") + } + + actualHeaders := cli.Headers() + if !reflect.DeepEqual(expectedHeaders, actualHeaders) { + t.Errorf("expected [%#v] but got [%#v]", expectedHeaders, actualHeaders) + } + } +} diff --git a/command/command_test.go b/command/command_test.go new file mode 100644 index 0000000..f1a5269 --- /dev/null +++ b/command/command_test.go @@ -0,0 +1,354 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "context" + "encoding/base64" + "net" + "net/http" + "strings" + "testing" + "time" + + log "github.com/hashicorp/go-hclog" + kv "github.com/hashicorp/vault-plugin-secrets-kv" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/audit" + "github.com/hashicorp/vault/builtin/logical/pki" + "github.com/hashicorp/vault/builtin/logical/ssh" + "github.com/hashicorp/vault/builtin/logical/transit" + "github.com/hashicorp/vault/helper/benchhelpers" + "github.com/hashicorp/vault/helper/builtinplugins" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/physical/inmem" + "github.com/hashicorp/vault/vault" + "github.com/hashicorp/vault/vault/seal" + "github.com/mitchellh/cli" + + auditFile "github.com/hashicorp/vault/builtin/audit/file" + credUserpass "github.com/hashicorp/vault/builtin/credential/userpass" + vaulthttp "github.com/hashicorp/vault/http" +) + +var ( + defaultVaultLogger = log.NewNullLogger() + + defaultVaultCredentialBackends = map[string]logical.Factory{ + "userpass": credUserpass.Factory, + } + + defaultVaultAuditBackends = map[string]audit.Factory{ + "file": auditFile.Factory, + } + + defaultVaultLogicalBackends = map[string]logical.Factory{ + "generic-leased": vault.LeasedPassthroughBackendFactory, + "pki": pki.Factory, + "ssh": ssh.Factory, + "transit": transit.Factory, + "kv": kv.Factory, + } +) + +// assertNoTabs asserts the CLI help has no tab characters. +func assertNoTabs(tb testing.TB, c cli.Command) { + tb.Helper() + + if strings.ContainsRune(c.Help(), '\t') { + tb.Errorf("%#v help output contains tabs", c) + } +} + +// testVaultServer creates a test vault cluster and returns a configured API +// client and closer function. +func testVaultServer(tb testing.TB) (*api.Client, func()) { + tb.Helper() + + client, _, closer := testVaultServerUnseal(tb) + return client, closer +} + +func testVaultServerWithSecrets(ctx context.Context, tb testing.TB) (*api.Client, func()) { + tb.Helper() + + client, _, closer := testVaultServerUnseal(tb) + + // enable kv-v1 backend + if err := client.Sys().Mount("kv-v1/", &api.MountInput{ + Type: "kv-v1", + }); err != nil { + tb.Fatal(err) + } + + // enable kv-v2 backend + if err := client.Sys().Mount("kv-v2/", &api.MountInput{ + Type: "kv-v2", + }); err != nil { + tb.Fatal(err) + } + + // populate dummy secrets + for _, path := range []string{ + "foo", + "app-1/foo", + "app-1/bar", + "app-1/nested/baz", + } { + if err := client.KVv1("kv-v1").Put(ctx, path, map[string]interface{}{ + "user": "test", + "password": "Hashi123", + }); err != nil { + tb.Fatal(err) + } + + if _, err := client.KVv2("kv-v2").Put(ctx, path, map[string]interface{}{ + "user": "test", + "password": "Hashi123", + }); err != nil { + tb.Fatal(err) + } + } + + return client, closer +} + +func testVaultServerWithKVVersion(tb testing.TB, kvVersion string) (*api.Client, func()) { + tb.Helper() + + client, _, closer := testVaultServerUnsealWithKVVersionWithSeal(tb, kvVersion, nil) + return client, closer +} + +func testVaultServerAllBackends(tb testing.TB) (*api.Client, func()) { + tb.Helper() + + client, _, closer := testVaultServerCoreConfig(tb, &vault.CoreConfig{ + DisableMlock: true, + DisableCache: true, + Logger: defaultVaultLogger, + CredentialBackends: credentialBackends, + AuditBackends: auditBackends, + LogicalBackends: logicalBackends, + BuiltinRegistry: builtinplugins.Registry, + }) + return client, closer +} + +// testVaultServerAutoUnseal creates a test vault cluster and sets it up with auto unseal +// the function returns a client, the recovery keys, and a closer function +func testVaultServerAutoUnseal(tb testing.TB) (*api.Client, []string, func()) { + testSeal, _ := seal.NewTestSeal(nil) + autoSeal, err := vault.NewAutoSeal(testSeal) + if err != nil { + tb.Fatal("unable to create autoseal", err) + } + return testVaultServerUnsealWithKVVersionWithSeal(tb, "1", autoSeal) +} + +// testVaultServerUnseal creates a test vault cluster and returns a configured +// API client, list of unseal keys (as strings), and a closer function. +func testVaultServerUnseal(tb testing.TB) (*api.Client, []string, func()) { + return testVaultServerUnsealWithKVVersionWithSeal(tb, "1", nil) +} + +func testVaultServerUnsealWithKVVersionWithSeal(tb testing.TB, kvVersion string, seal vault.Seal) (*api.Client, []string, func()) { + tb.Helper() + logger := log.NewInterceptLogger(&log.LoggerOptions{ + Output: log.DefaultOutput, + Level: log.Debug, + JSONFormat: logging.ParseEnvLogFormat() == logging.JSONFormat, + }) + + return testVaultServerCoreConfigWithOpts(tb, &vault.CoreConfig{ + DisableMlock: true, + DisableCache: true, + Logger: logger, + CredentialBackends: defaultVaultCredentialBackends, + AuditBackends: defaultVaultAuditBackends, + LogicalBackends: defaultVaultLogicalBackends, + BuiltinRegistry: builtinplugins.Registry, + Seal: seal, + }, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + NumCores: 1, + KVVersion: kvVersion, + }) +} + +// testVaultServerUnseal creates a test vault cluster and returns a configured +// API client, list of unseal keys (as strings), and a closer function +// configured with the given plugin directory. +func testVaultServerPluginDir(tb testing.TB, pluginDir string) (*api.Client, []string, func()) { + tb.Helper() + + return testVaultServerCoreConfig(tb, &vault.CoreConfig{ + DisableMlock: true, + DisableCache: true, + Logger: defaultVaultLogger, + CredentialBackends: defaultVaultCredentialBackends, + AuditBackends: defaultVaultAuditBackends, + LogicalBackends: defaultVaultLogicalBackends, + PluginDirectory: pluginDir, + BuiltinRegistry: builtinplugins.Registry, + }) +} + +func testVaultServerCoreConfig(tb testing.TB, coreConfig *vault.CoreConfig) (*api.Client, []string, func()) { + return testVaultServerCoreConfigWithOpts(tb, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + NumCores: 1, // Default is 3, but we don't need that many + }) +} + +// testVaultServerCoreConfig creates a new vault cluster with the given core +// configuration. This is a lower-level test helper. If the seal config supports recovery keys, then +// recovery keys are returned. Otherwise, unseal keys are returned +func testVaultServerCoreConfigWithOpts(tb testing.TB, coreConfig *vault.CoreConfig, opts *vault.TestClusterOptions) (*api.Client, []string, func()) { + tb.Helper() + + cluster := vault.NewTestCluster(benchhelpers.TBtoT(tb), coreConfig, opts) + cluster.Start() + + // Make it easy to get access to the active + core := cluster.Cores[0].Core + vault.TestWaitActive(benchhelpers.TBtoT(tb), core) + + // Get the client already setup for us! + client := cluster.Cores[0].Client + client.SetToken(cluster.RootToken) + + var keys [][]byte + if coreConfig.Seal != nil && coreConfig.Seal.RecoveryKeySupported() { + keys = cluster.RecoveryKeys + } else { + keys = cluster.BarrierKeys + } + + return client, encodeKeys(keys), cluster.Cleanup +} + +// Convert the unseal keys to base64 encoded, since these are how the user +// will get them. +func encodeKeys(rawKeys [][]byte) []string { + keys := make([]string, len(rawKeys)) + for i := range rawKeys { + keys[i] = base64.StdEncoding.EncodeToString(rawKeys[i]) + } + return keys +} + +// testVaultServerUninit creates an uninitialized server. +func testVaultServerUninit(tb testing.TB) (*api.Client, func()) { + tb.Helper() + + inm, err := inmem.NewInmem(nil, defaultVaultLogger) + if err != nil { + tb.Fatal(err) + } + + core, err := vault.NewCore(&vault.CoreConfig{ + DisableMlock: true, + DisableCache: true, + Logger: defaultVaultLogger, + Physical: inm, + CredentialBackends: defaultVaultCredentialBackends, + AuditBackends: defaultVaultAuditBackends, + LogicalBackends: defaultVaultLogicalBackends, + BuiltinRegistry: builtinplugins.Registry, + }) + if err != nil { + tb.Fatal(err) + } + + ln, addr := vaulthttp.TestServer(tb, core) + + client, err := api.NewClient(&api.Config{ + Address: addr, + }) + if err != nil { + tb.Fatal(err) + } + + closer := func() { + core.Shutdown() + ln.Close() + } + + return client, closer +} + +// testVaultServerBad creates an http server that returns a 500 on each request +// to simulate failures. +func testVaultServerBad(tb testing.TB) (*api.Client, func()) { + tb.Helper() + + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + tb.Fatal(err) + } + + server := &http.Server{ + Addr: "127.0.0.1:0", + Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.Error(w, "500 internal server error", http.StatusInternalServerError) + }), + ReadTimeout: 1 * time.Second, + ReadHeaderTimeout: 1 * time.Second, + WriteTimeout: 1 * time.Second, + IdleTimeout: 1 * time.Second, + } + + go func() { + if err := server.Serve(listener); err != nil && err != http.ErrServerClosed { + tb.Fatal(err) + } + }() + + client, err := api.NewClient(&api.Config{ + Address: "http://" + listener.Addr().String(), + }) + if err != nil { + tb.Fatal(err) + } + + return client, func() { + ctx, done := context.WithTimeout(context.Background(), 5*time.Second) + defer done() + + server.Shutdown(ctx) + } +} + +// testTokenAndAccessor creates a new authentication token capable of being renewed with +// the default policy attached. It returns the token and it's accessor. +func testTokenAndAccessor(tb testing.TB, client *api.Client) (string, string) { + tb.Helper() + + secret, err := client.Auth().Token().Create(&api.TokenCreateRequest{ + Policies: []string{"default"}, + TTL: "30m", + }) + if err != nil { + tb.Fatal(err) + } + if secret == nil || secret.Auth == nil || secret.Auth.ClientToken == "" { + tb.Fatalf("missing auth data: %#v", secret) + } + return secret.Auth.ClientToken, secret.Auth.Accessor +} + +func testClient(tb testing.TB, addr string, token string) *api.Client { + tb.Helper() + config := api.DefaultConfig() + config.Address = addr + client, err := api.NewClient(config) + if err != nil { + tb.Fatal(err) + } + client.SetToken(token) + + return client +} diff --git a/command/commands.go b/command/commands.go new file mode 100644 index 0000000..9a27577 --- /dev/null +++ b/command/commands.go @@ -0,0 +1,923 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "os" + "os/signal" + "syscall" + + "github.com/hashicorp/vault/audit" + "github.com/hashicorp/vault/builtin/plugin" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/physical" + "github.com/hashicorp/vault/version" + "github.com/mitchellh/cli" + + /* + The builtinplugins package is initialized here because it, in turn, + initializes the database plugins. + They register multiple database drivers for the "database/sql" package. + */ + _ "github.com/hashicorp/vault/helper/builtinplugins" + + auditFile "github.com/hashicorp/vault/builtin/audit/file" + auditSocket "github.com/hashicorp/vault/builtin/audit/socket" + auditSyslog "github.com/hashicorp/vault/builtin/audit/syslog" + + credAliCloud "github.com/hashicorp/vault-plugin-auth-alicloud" + credCentrify "github.com/hashicorp/vault-plugin-auth-centrify" + credCF "github.com/hashicorp/vault-plugin-auth-cf" + credGcp "github.com/hashicorp/vault-plugin-auth-gcp/plugin" + credOIDC "github.com/hashicorp/vault-plugin-auth-jwt" + credKerb "github.com/hashicorp/vault-plugin-auth-kerberos" + credOCI "github.com/hashicorp/vault-plugin-auth-oci" + credAws "github.com/hashicorp/vault/builtin/credential/aws" + credCert "github.com/hashicorp/vault/builtin/credential/cert" + credGitHub "github.com/hashicorp/vault/builtin/credential/github" + credLdap "github.com/hashicorp/vault/builtin/credential/ldap" + credOkta "github.com/hashicorp/vault/builtin/credential/okta" + credToken "github.com/hashicorp/vault/builtin/credential/token" + credUserpass "github.com/hashicorp/vault/builtin/credential/userpass" + + logicalKv "github.com/hashicorp/vault-plugin-secrets-kv" + logicalDb "github.com/hashicorp/vault/builtin/logical/database" + + physAerospike "github.com/hashicorp/vault/physical/aerospike" + physAliCloudOSS "github.com/hashicorp/vault/physical/alicloudoss" + physAzure "github.com/hashicorp/vault/physical/azure" + physCassandra "github.com/hashicorp/vault/physical/cassandra" + physCockroachDB "github.com/hashicorp/vault/physical/cockroachdb" + physConsul "github.com/hashicorp/vault/physical/consul" + physCouchDB "github.com/hashicorp/vault/physical/couchdb" + physDynamoDB "github.com/hashicorp/vault/physical/dynamodb" + physEtcd "github.com/hashicorp/vault/physical/etcd" + physFoundationDB "github.com/hashicorp/vault/physical/foundationdb" + physGCS "github.com/hashicorp/vault/physical/gcs" + physManta "github.com/hashicorp/vault/physical/manta" + physMSSQL "github.com/hashicorp/vault/physical/mssql" + physMySQL "github.com/hashicorp/vault/physical/mysql" + physOCI "github.com/hashicorp/vault/physical/oci" + physPostgreSQL "github.com/hashicorp/vault/physical/postgresql" + physRaft "github.com/hashicorp/vault/physical/raft" + physS3 "github.com/hashicorp/vault/physical/s3" + physSpanner "github.com/hashicorp/vault/physical/spanner" + physSwift "github.com/hashicorp/vault/physical/swift" + physZooKeeper "github.com/hashicorp/vault/physical/zookeeper" + physFile "github.com/hashicorp/vault/sdk/physical/file" + physInmem "github.com/hashicorp/vault/sdk/physical/inmem" + + sr "github.com/hashicorp/vault/serviceregistration" + csr "github.com/hashicorp/vault/serviceregistration/consul" + ksr "github.com/hashicorp/vault/serviceregistration/kubernetes" +) + +const ( + // EnvVaultCLINoColor is an env var that toggles colored UI output. + EnvVaultCLINoColor = `VAULT_CLI_NO_COLOR` + // EnvVaultFormat is the output format + EnvVaultFormat = `VAULT_FORMAT` + // EnvVaultLicense is an env var used in Vault Enterprise to provide a license blob + EnvVaultLicense = "VAULT_LICENSE" + // EnvVaultLicensePath is an env var used in Vault Enterprise to provide a + // path to a license file on disk + EnvVaultLicensePath = "VAULT_LICENSE_PATH" + // EnvVaultDetailed is to output detailed information (e.g., ListResponseWithInfo). + EnvVaultDetailed = `VAULT_DETAILED` + // EnvVaultLogFormat is used to specify the log format. Supported values are "standard" and "json" + EnvVaultLogFormat = "VAULT_LOG_FORMAT" + // EnvVaultLogLevel is used to specify the log level applied to logging + // Supported log levels: Trace, Debug, Error, Warn, Info + EnvVaultLogLevel = "VAULT_LOG_LEVEL" + // EnvVaultExperiments defines the experiments to enable for a server as a + // comma separated list. See experiments.ValidExperiments() for the list of + // valid experiments. Not mutable or persisted in storage, only read and + // logged at startup _per node_. This was initially introduced for the events + // system being developed over multiple release cycles. + EnvVaultExperiments = "VAULT_EXPERIMENTS" + + // flagNameAddress is the flag used in the base command to read in the + // address of the Vault server. + flagNameAddress = "address" + // flagnameCACert is the flag used in the base command to read in the CA + // cert. + flagNameCACert = "ca-cert" + // flagnameCAPath is the flag used in the base command to read in the CA + // cert path. + flagNameCAPath = "ca-path" + // flagNameClientCert is the flag used in the base command to read in the + // client key + flagNameClientKey = "client-key" + // flagNameClientCert is the flag used in the base command to read in the + // client cert + flagNameClientCert = "client-cert" + // flagNameTLSSkipVerify is the flag used in the base command to read in + // the option to ignore TLS certificate verification. + flagNameTLSSkipVerify = "tls-skip-verify" + // flagTLSServerName is the flag used in the base command to read in + // the TLS server name. + flagTLSServerName = "tls-server-name" + // flagNameAuditNonHMACRequestKeys is the flag name used for auth/secrets enable + flagNameAuditNonHMACRequestKeys = "audit-non-hmac-request-keys" + // flagNameAuditNonHMACResponseKeys is the flag name used for auth/secrets enable + flagNameAuditNonHMACResponseKeys = "audit-non-hmac-response-keys" + // flagNameDescription is the flag name used for tuning the secret and auth mount description parameter + flagNameDescription = "description" + // flagListingVisibility is the flag to toggle whether to show the mount in the UI-specific listing endpoint + flagNameListingVisibility = "listing-visibility" + // flagNamePassthroughRequestHeaders is the flag name used to set passthrough request headers to the backend + flagNamePassthroughRequestHeaders = "passthrough-request-headers" + // flagNameAllowedResponseHeaders is used to set allowed response headers from a plugin + flagNameAllowedResponseHeaders = "allowed-response-headers" + // flagNameTokenType is the flag name used to force a specific token type + flagNameTokenType = "token-type" + // flagNameAllowedManagedKeys is the flag name used for auth/secrets enable + flagNameAllowedManagedKeys = "allowed-managed-keys" + // flagNamePluginVersion selects what version of a plugin should be used. + flagNamePluginVersion = "plugin-version" + // flagNameUserLockoutThreshold is the flag name used for tuning the auth mount lockout threshold parameter + flagNameUserLockoutThreshold = "user-lockout-threshold" + // flagNameUserLockoutDuration is the flag name used for tuning the auth mount lockout duration parameter + flagNameUserLockoutDuration = "user-lockout-duration" + // flagNameUserLockoutCounterResetDuration is the flag name used for tuning the auth mount lockout counter reset parameter + flagNameUserLockoutCounterResetDuration = "user-lockout-counter-reset-duration" + // flagNameUserLockoutDisable is the flag name used for tuning the auth mount disable lockout parameter + flagNameUserLockoutDisable = "user-lockout-disable" + // flagNameDisableRedirects is used to prevent the client from honoring a single redirect as a response to a request + flagNameDisableRedirects = "disable-redirects" + // flagNameCombineLogs is used to specify whether log output should be combined and sent to stdout + flagNameCombineLogs = "combine-logs" + // flagNameLogFile is used to specify the path to the log file that Vault should use for logging + flagNameLogFile = "log-file" + // flagNameLogRotateBytes is the flag used to specify the number of bytes a log file should be before it is rotated. + flagNameLogRotateBytes = "log-rotate-bytes" + // flagNameLogRotateDuration is the flag used to specify the duration after which a log file should be rotated. + flagNameLogRotateDuration = "log-rotate-duration" + // flagNameLogRotateMaxFiles is the flag used to specify the maximum number of older/archived log files to keep. + flagNameLogRotateMaxFiles = "log-rotate-max-files" + // flagNameLogFormat is the flag used to specify the log format. Supported values are "standard" and "json" + flagNameLogFormat = "log-format" + // flagNameLogLevel is used to specify the log level applied to logging + // Supported log levels: Trace, Debug, Error, Warn, Info + flagNameLogLevel = "log-level" +) + +var ( + auditBackends = map[string]audit.Factory{ + "file": auditFile.Factory, + "socket": auditSocket.Factory, + "syslog": auditSyslog.Factory, + } + + credentialBackends = map[string]logical.Factory{ + "plugin": plugin.Factory, + } + + logicalBackends = map[string]logical.Factory{ + "plugin": plugin.Factory, + "database": logicalDb.Factory, + // This is also available in the plugin catalog, but is here due to the need to + // automatically mount it. + "kv": logicalKv.Factory, + } + + physicalBackends = map[string]physical.Factory{ + "aerospike": physAerospike.NewAerospikeBackend, + "alicloudoss": physAliCloudOSS.NewAliCloudOSSBackend, + "azure": physAzure.NewAzureBackend, + "cassandra": physCassandra.NewCassandraBackend, + "cockroachdb": physCockroachDB.NewCockroachDBBackend, + "consul": physConsul.NewConsulBackend, + "couchdb_transactional": physCouchDB.NewTransactionalCouchDBBackend, + "couchdb": physCouchDB.NewCouchDBBackend, + "dynamodb": physDynamoDB.NewDynamoDBBackend, + "etcd": physEtcd.NewEtcdBackend, + "file_transactional": physFile.NewTransactionalFileBackend, + "file": physFile.NewFileBackend, + "foundationdb": physFoundationDB.NewFDBBackend, + "gcs": physGCS.NewBackend, + "inmem_ha": physInmem.NewInmemHA, + "inmem_transactional_ha": physInmem.NewTransactionalInmemHA, + "inmem_transactional": physInmem.NewTransactionalInmem, + "inmem": physInmem.NewInmem, + "manta": physManta.NewMantaBackend, + "mssql": physMSSQL.NewMSSQLBackend, + "mysql": physMySQL.NewMySQLBackend, + "oci": physOCI.NewBackend, + "postgresql": physPostgreSQL.NewPostgreSQLBackend, + "s3": physS3.NewS3Backend, + "spanner": physSpanner.NewBackend, + "swift": physSwift.NewSwiftBackend, + "raft": physRaft.NewRaftBackend, + "zookeeper": physZooKeeper.NewZooKeeperBackend, + } + + serviceRegistrations = map[string]sr.Factory{ + "consul": csr.NewServiceRegistration, + "kubernetes": ksr.NewServiceRegistration, + } + + initCommandsEnt = func(ui, serverCmdUi cli.Ui, runOpts *RunOptions, commands map[string]cli.CommandFactory) {} +) + +func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) map[string]cli.CommandFactory { + loginHandlers := map[string]LoginHandler{ + "alicloud": &credAliCloud.CLIHandler{}, + "aws": &credAws.CLIHandler{}, + "centrify": &credCentrify.CLIHandler{}, + "cert": &credCert.CLIHandler{}, + "cf": &credCF.CLIHandler{}, + "gcp": &credGcp.CLIHandler{}, + "github": &credGitHub.CLIHandler{}, + "kerberos": &credKerb.CLIHandler{}, + "ldap": &credLdap.CLIHandler{}, + "oci": &credOCI.CLIHandler{}, + "oidc": &credOIDC.CLIHandler{}, + "okta": &credOkta.CLIHandler{}, + "pcf": &credCF.CLIHandler{}, // Deprecated. + "radius": &credUserpass.CLIHandler{ + DefaultMount: "radius", + }, + "token": &credToken.CLIHandler{}, + "userpass": &credUserpass.CLIHandler{ + DefaultMount: "userpass", + }, + } + + getBaseCommand := func() *BaseCommand { + return &BaseCommand{ + UI: ui, + tokenHelper: runOpts.TokenHelper, + flagAddress: runOpts.Address, + client: runOpts.Client, + } + } + + commands := map[string]cli.CommandFactory{ + "agent": func() (cli.Command, error) { + return &AgentCommand{ + BaseCommand: &BaseCommand{ + UI: serverCmdUi, + }, + ShutdownCh: MakeShutdownCh(), + SighupCh: MakeSighupCh(), + }, nil + }, + "agent generate-config": func() (cli.Command, error) { + return &AgentGenerateConfigCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "audit": func() (cli.Command, error) { + return &AuditCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "audit disable": func() (cli.Command, error) { + return &AuditDisableCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "audit enable": func() (cli.Command, error) { + return &AuditEnableCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "audit list": func() (cli.Command, error) { + return &AuditListCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "auth tune": func() (cli.Command, error) { + return &AuthTuneCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "auth": func() (cli.Command, error) { + return &AuthCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "auth disable": func() (cli.Command, error) { + return &AuthDisableCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "auth enable": func() (cli.Command, error) { + return &AuthEnableCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "auth help": func() (cli.Command, error) { + return &AuthHelpCommand{ + BaseCommand: getBaseCommand(), + Handlers: loginHandlers, + }, nil + }, + "auth list": func() (cli.Command, error) { + return &AuthListCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "auth move": func() (cli.Command, error) { + return &AuthMoveCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "debug": func() (cli.Command, error) { + return &DebugCommand{ + BaseCommand: getBaseCommand(), + ShutdownCh: MakeShutdownCh(), + }, nil + }, + "delete": func() (cli.Command, error) { + return &DeleteCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "events subscribe": func() (cli.Command, error) { + return &EventsSubscribeCommands{ + BaseCommand: getBaseCommand(), + }, nil + }, + "lease": func() (cli.Command, error) { + return &LeaseCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "lease renew": func() (cli.Command, error) { + return &LeaseRenewCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "lease lookup": func() (cli.Command, error) { + return &LeaseLookupCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "lease revoke": func() (cli.Command, error) { + return &LeaseRevokeCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "list": func() (cli.Command, error) { + return &ListCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "login": func() (cli.Command, error) { + return &LoginCommand{ + BaseCommand: getBaseCommand(), + Handlers: loginHandlers, + }, nil + }, + "namespace": func() (cli.Command, error) { + return &NamespaceCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "namespace list": func() (cli.Command, error) { + return &NamespaceListCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "namespace lookup": func() (cli.Command, error) { + return &NamespaceLookupCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "namespace create": func() (cli.Command, error) { + return &NamespaceCreateCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "namespace patch": func() (cli.Command, error) { + return &NamespacePatchCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "namespace delete": func() (cli.Command, error) { + return &NamespaceDeleteCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "namespace lock": func() (cli.Command, error) { + return &NamespaceAPILockCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "namespace unlock": func() (cli.Command, error) { + return &NamespaceAPIUnlockCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator": func() (cli.Command, error) { + return &OperatorCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator diagnose": func() (cli.Command, error) { + return &OperatorDiagnoseCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator generate-root": func() (cli.Command, error) { + return &OperatorGenerateRootCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator init": func() (cli.Command, error) { + return &OperatorInitCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator key-status": func() (cli.Command, error) { + return &OperatorKeyStatusCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator migrate": func() (cli.Command, error) { + return &OperatorMigrateCommand{ + BaseCommand: getBaseCommand(), + PhysicalBackends: physicalBackends, + ShutdownCh: MakeShutdownCh(), + }, nil + }, + "operator raft": func() (cli.Command, error) { + return &OperatorRaftCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator raft autopilot get-config": func() (cli.Command, error) { + return &OperatorRaftAutopilotGetConfigCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator raft autopilot set-config": func() (cli.Command, error) { + return &OperatorRaftAutopilotSetConfigCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator raft autopilot state": func() (cli.Command, error) { + return &OperatorRaftAutopilotStateCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator raft list-peers": func() (cli.Command, error) { + return &OperatorRaftListPeersCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator raft join": func() (cli.Command, error) { + return &OperatorRaftJoinCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator raft remove-peer": func() (cli.Command, error) { + return &OperatorRaftRemovePeerCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator raft snapshot": func() (cli.Command, error) { + return &OperatorRaftSnapshotCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator raft snapshot inspect": func() (cli.Command, error) { + return &OperatorRaftSnapshotInspectCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator raft snapshot restore": func() (cli.Command, error) { + return &OperatorRaftSnapshotRestoreCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator raft snapshot save": func() (cli.Command, error) { + return &OperatorRaftSnapshotSaveCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator rekey": func() (cli.Command, error) { + return &OperatorRekeyCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator rotate": func() (cli.Command, error) { + return &OperatorRotateCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator seal": func() (cli.Command, error) { + return &OperatorSealCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator step-down": func() (cli.Command, error) { + return &OperatorStepDownCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator usage": func() (cli.Command, error) { + return &OperatorUsageCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator unseal": func() (cli.Command, error) { + return &OperatorUnsealCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator members": func() (cli.Command, error) { + return &OperatorMembersCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "patch": func() (cli.Command, error) { + return &PatchCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "path-help": func() (cli.Command, error) { + return &PathHelpCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "pki": func() (cli.Command, error) { + return &PKICommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "pki health-check": func() (cli.Command, error) { + return &PKIHealthCheckCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "pki issue": func() (cli.Command, error) { + return &PKIIssueCACommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "pki list-intermediates": func() (cli.Command, error) { + return &PKIListIntermediateCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "pki reissue": func() (cli.Command, error) { + return &PKIReIssueCACommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "pki verify-sign": func() (cli.Command, error) { + return &PKIVerifySignCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "plugin": func() (cli.Command, error) { + return &PluginCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "plugin deregister": func() (cli.Command, error) { + return &PluginDeregisterCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "plugin info": func() (cli.Command, error) { + return &PluginInfoCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "plugin list": func() (cli.Command, error) { + return &PluginListCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "plugin register": func() (cli.Command, error) { + return &PluginRegisterCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "plugin reload": func() (cli.Command, error) { + return &PluginReloadCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "plugin reload-status": func() (cli.Command, error) { + return &PluginReloadStatusCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "proxy": func() (cli.Command, error) { + return &ProxyCommand{ + BaseCommand: &BaseCommand{ + UI: serverCmdUi, + }, + ShutdownCh: MakeShutdownCh(), + SighupCh: MakeSighupCh(), + }, nil + }, + "policy": func() (cli.Command, error) { + return &PolicyCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "policy delete": func() (cli.Command, error) { + return &PolicyDeleteCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "policy fmt": func() (cli.Command, error) { + return &PolicyFmtCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "policy list": func() (cli.Command, error) { + return &PolicyListCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "policy read": func() (cli.Command, error) { + return &PolicyReadCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "policy write": func() (cli.Command, error) { + return &PolicyWriteCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "print": func() (cli.Command, error) { + return &PrintCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "print token": func() (cli.Command, error) { + return &PrintTokenCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "read": func() (cli.Command, error) { + return &ReadCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "secrets": func() (cli.Command, error) { + return &SecretsCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "secrets disable": func() (cli.Command, error) { + return &SecretsDisableCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "secrets enable": func() (cli.Command, error) { + return &SecretsEnableCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "secrets list": func() (cli.Command, error) { + return &SecretsListCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "secrets move": func() (cli.Command, error) { + return &SecretsMoveCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "secrets tune": func() (cli.Command, error) { + return &SecretsTuneCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "server": func() (cli.Command, error) { + return &ServerCommand{ + BaseCommand: &BaseCommand{ + UI: serverCmdUi, + tokenHelper: runOpts.TokenHelper, + flagAddress: runOpts.Address, + }, + AuditBackends: auditBackends, + CredentialBackends: credentialBackends, + LogicalBackends: logicalBackends, + PhysicalBackends: physicalBackends, + + ServiceRegistrations: serviceRegistrations, + + ShutdownCh: MakeShutdownCh(), + SighupCh: MakeSighupCh(), + SigUSR2Ch: MakeSigUSR2Ch(), + }, nil + }, + "ssh": func() (cli.Command, error) { + return &SSHCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "status": func() (cli.Command, error) { + return &StatusCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "transform": func() (cli.Command, error) { + return &TransformCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "transform import": func() (cli.Command, error) { + return &TransformImportCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "transform import-version": func() (cli.Command, error) { + return &TransformImportVersionCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "transit": func() (cli.Command, error) { + return &TransitCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "transit import": func() (cli.Command, error) { + return &TransitImportCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "transit import-version": func() (cli.Command, error) { + return &TransitImportVersionCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "token": func() (cli.Command, error) { + return &TokenCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "token create": func() (cli.Command, error) { + return &TokenCreateCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "token capabilities": func() (cli.Command, error) { + return &TokenCapabilitiesCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "token lookup": func() (cli.Command, error) { + return &TokenLookupCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "token renew": func() (cli.Command, error) { + return &TokenRenewCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "token revoke": func() (cli.Command, error) { + return &TokenRevokeCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "unwrap": func() (cli.Command, error) { + return &UnwrapCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "version": func() (cli.Command, error) { + return &VersionCommand{ + VersionInfo: version.GetVersion(), + BaseCommand: getBaseCommand(), + }, nil + }, + "version-history": func() (cli.Command, error) { + return &VersionHistoryCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "write": func() (cli.Command, error) { + return &WriteCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "kv": func() (cli.Command, error) { + return &KVCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "kv put": func() (cli.Command, error) { + return &KVPutCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "kv patch": func() (cli.Command, error) { + return &KVPatchCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "kv rollback": func() (cli.Command, error) { + return &KVRollbackCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "kv get": func() (cli.Command, error) { + return &KVGetCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "kv delete": func() (cli.Command, error) { + return &KVDeleteCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "kv list": func() (cli.Command, error) { + return &KVListCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "kv destroy": func() (cli.Command, error) { + return &KVDestroyCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "kv undelete": func() (cli.Command, error) { + return &KVUndeleteCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "kv enable-versioning": func() (cli.Command, error) { + return &KVEnableVersioningCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "kv metadata": func() (cli.Command, error) { + return &KVMetadataCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "kv metadata put": func() (cli.Command, error) { + return &KVMetadataPutCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "kv metadata patch": func() (cli.Command, error) { + return &KVMetadataPatchCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "kv metadata get": func() (cli.Command, error) { + return &KVMetadataGetCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "kv metadata delete": func() (cli.Command, error) { + return &KVMetadataDeleteCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "monitor": func() (cli.Command, error) { + return &MonitorCommand{ + BaseCommand: getBaseCommand(), + ShutdownCh: MakeShutdownCh(), + }, nil + }, + } + + initCommandsEnt(ui, serverCmdUi, runOpts, commands) + return commands +} + +// MakeShutdownCh returns a channel that can be used for shutdown +// notifications for commands. This channel will send a message for every +// SIGINT or SIGTERM received. +func MakeShutdownCh() chan struct{} { + resultCh := make(chan struct{}) + + shutdownCh := make(chan os.Signal, 4) + signal.Notify(shutdownCh, os.Interrupt, syscall.SIGTERM) + go func() { + <-shutdownCh + close(resultCh) + }() + return resultCh +} + +// MakeSighupCh returns a channel that can be used for SIGHUP +// reloading. This channel will send a message for every +// SIGHUP received. +func MakeSighupCh() chan struct{} { + resultCh := make(chan struct{}) + + signalCh := make(chan os.Signal, 4) + signal.Notify(signalCh, syscall.SIGHUP) + go func() { + for { + <-signalCh + resultCh <- struct{}{} + } + }() + return resultCh +} diff --git a/command/commands_nonwindows.go b/command/commands_nonwindows.go new file mode 100644 index 0000000..f8d128c --- /dev/null +++ b/command/commands_nonwindows.go @@ -0,0 +1,29 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !windows + +package command + +import ( + "os" + "os/signal" + "syscall" +) + +// MakeSigUSR2Ch returns a channel that can be used for SIGUSR2 +// goroutine logging. This channel will send a message for every +// SIGUSR2 received. +func MakeSigUSR2Ch() chan struct{} { + resultCh := make(chan struct{}) + + signalCh := make(chan os.Signal, 4) + signal.Notify(signalCh, syscall.SIGUSR2) + go func() { + for { + <-signalCh + resultCh <- struct{}{} + } + }() + return resultCh +} diff --git a/command/commands_windows.go b/command/commands_windows.go new file mode 100644 index 0000000..541a6e4 --- /dev/null +++ b/command/commands_windows.go @@ -0,0 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build windows + +package command + +// MakeSigUSR2Ch does nothing useful on Windows. +func MakeSigUSR2Ch() chan struct{} { + return make(chan struct{}) +} diff --git a/command/config.go b/command/config.go new file mode 100644 index 0000000..3fbc53a --- /dev/null +++ b/command/config.go @@ -0,0 +1,57 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "github.com/hashicorp/vault/command/config" +) + +const ( + // DefaultConfigPath is the default path to the configuration file + DefaultConfigPath = "~/.vault" + + // ConfigPathEnv is the environment variable that can be used to + // override where the Vault configuration is. + ConfigPathEnv = "VAULT_CONFIG_PATH" +) + +// Config is the CLI configuration for Vault that can be specified via +// a `$HOME/.vault` file which is HCL-formatted (therefore HCL or JSON). +type DefaultConfig struct { + // TokenHelper is the executable/command that is executed for storing + // and retrieving the authentication token for the Vault CLI. If this + // is not specified, then vault's internal token store will be used, which + // stores the token on disk unencrypted. + TokenHelper string `hcl:"token_helper"` +} + +// Config loads the configuration and returns it. If the configuration +// is already loaded, it is returned. +// +// Config just calls into config.Config for backwards compatibility purposes. +// Use config.Config instead. +func Config() (*DefaultConfig, error) { + conf, err := config.Config() + return (*DefaultConfig)(conf), err +} + +// LoadConfig reads the configuration from the given path. If path is +// empty, then the default path will be used, or the environment variable +// if set. +// +// LoadConfig just calls into config.LoadConfig for backwards compatibility +// purposes. Use config.LoadConfig instead. +func LoadConfig(path string) (*DefaultConfig, error) { + conf, err := config.LoadConfig(path) + return (*DefaultConfig)(conf), err +} + +// ParseConfig parses the given configuration as a string. +// +// ParseConfig just calls into config.ParseConfig for backwards compatibility +// purposes. Use config.ParseConfig instead. +func ParseConfig(contents string) (*DefaultConfig, error) { + conf, err := config.ParseConfig(contents) + return (*DefaultConfig)(conf), err +} diff --git a/command/config/config.go b/command/config/config.go new file mode 100644 index 0000000..71f9127 --- /dev/null +++ b/command/config/config.go @@ -0,0 +1,103 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package config + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/vault/sdk/helper/hclutil" + homedir "github.com/mitchellh/go-homedir" +) + +const ( + // DefaultConfigPath is the default path to the configuration file + DefaultConfigPath = "~/.vault" + + // ConfigPathEnv is the environment variable that can be used to + // override where the Vault configuration is. + ConfigPathEnv = "VAULT_CONFIG_PATH" +) + +// Config is the CLI configuration for Vault that can be specified via +// a `$HOME/.vault` file which is HCL-formatted (therefore HCL or JSON). +type DefaultConfig struct { + // TokenHelper is the executable/command that is executed for storing + // and retrieving the authentication token for the Vault CLI. If this + // is not specified, then vault's internal token store will be used, which + // stores the token on disk unencrypted. + TokenHelper string `hcl:"token_helper"` +} + +// Config loads the configuration and returns it. If the configuration +// is already loaded, it is returned. +func Config() (*DefaultConfig, error) { + var err error + config, err := LoadConfig("") + if err != nil { + return nil, err + } + + return config, nil +} + +// LoadConfig reads the configuration from the given path. If path is +// empty, then the default path will be used, or the environment variable +// if set. +func LoadConfig(path string) (*DefaultConfig, error) { + if path == "" { + path = DefaultConfigPath + } + if v := os.Getenv(ConfigPathEnv); v != "" { + path = v + } + + // NOTE: requires HOME env var to be set + path, err := homedir.Expand(path) + if err != nil { + return nil, fmt.Errorf("error expanding config path %q: %w", path, err) + } + + contents, err := ioutil.ReadFile(path) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + + conf, err := ParseConfig(string(contents)) + if err != nil { + return nil, fmt.Errorf("error parsing config file at %q: %w; ensure that the file is valid; Ansible Vault is known to conflict with it.", path, err) + } + + return conf, nil +} + +// ParseConfig parses the given configuration as a string. +func ParseConfig(contents string) (*DefaultConfig, error) { + root, err := hcl.Parse(contents) + if err != nil { + return nil, err + } + + // Top-level item should be the object list + list, ok := root.Node.(*ast.ObjectList) + if !ok { + return nil, fmt.Errorf("failed to parse config; does not contain a root object") + } + + valid := []string{ + "token_helper", + } + if err := hclutil.CheckHCLKeys(list, valid); err != nil { + return nil, err + } + + var c DefaultConfig + if err := hcl.DecodeObject(&c, list); err != nil { + return nil, err + } + return &c, nil +} diff --git a/command/config/config_test.go b/command/config/config_test.go new file mode 100644 index 0000000..fef1516 --- /dev/null +++ b/command/config/config_test.go @@ -0,0 +1,52 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package config + +import ( + "path/filepath" + "reflect" + "strings" + "testing" +) + +const FixturePath = "../test-fixtures" + +func TestLoadConfig(t *testing.T) { + config, err := LoadConfig(filepath.Join(FixturePath, "config.hcl")) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &DefaultConfig{ + TokenHelper: "foo", + } + if !reflect.DeepEqual(expected, config) { + t.Fatalf("bad: %#v", config) + } +} + +func TestLoadConfig_noExist(t *testing.T) { + config, err := LoadConfig("nope/not-once/.never") + if err != nil { + t.Fatal(err) + } + + if config.TokenHelper != "" { + t.Errorf("expected %q to be %q", config.TokenHelper, "") + } +} + +func TestParseConfig_badKeys(t *testing.T) { + _, err := ParseConfig(` +token_helper = "/token" +nope = "true" +`) + if err == nil { + t.Fatal("expected error") + } + + if !strings.Contains(err.Error(), `invalid key "nope" on line 3`) { + t.Errorf("bad error: %s", err.Error()) + } +} diff --git a/command/config/util.go b/command/config/util.go new file mode 100644 index 0000000..f295f46 --- /dev/null +++ b/command/config/util.go @@ -0,0 +1,28 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package config + +import ( + "github.com/hashicorp/vault/command/token" +) + +// DefaultTokenHelper returns the token helper that is configured for Vault. +// This helper should only be used for non-server CLI commands. +func DefaultTokenHelper() (token.TokenHelper, error) { + config, err := LoadConfig("") + if err != nil { + return nil, err + } + + path := config.TokenHelper + if path == "" { + return token.NewInternalTokenHelper() + } + + path, err = token.ExternalTokenHelperPath(path) + if err != nil { + return nil, err + } + return &token.ExternalTokenHelper{BinaryPath: path}, nil +} diff --git a/command/config/validate_listener.go b/command/config/validate_listener.go new file mode 100644 index 0000000..e2d2716 --- /dev/null +++ b/command/config/validate_listener.go @@ -0,0 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !fips_140_3 + +package config + +import "github.com/hashicorp/vault/internalshared/configutil" + +func IsValidListener(listener *configutil.Listener) error { + return nil +} diff --git a/command/config_test.go b/command/config_test.go new file mode 100644 index 0000000..787c679 --- /dev/null +++ b/command/config_test.go @@ -0,0 +1,52 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "path/filepath" + "reflect" + "strings" + "testing" +) + +const FixturePath = "./test-fixtures" + +func TestLoadConfig(t *testing.T) { + config, err := LoadConfig(filepath.Join(FixturePath, "config.hcl")) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &DefaultConfig{ + TokenHelper: "foo", + } + if !reflect.DeepEqual(expected, config) { + t.Fatalf("bad: %#v", config) + } +} + +func TestLoadConfig_noExist(t *testing.T) { + config, err := LoadConfig("nope/not-once/.never") + if err != nil { + t.Fatal(err) + } + + if config.TokenHelper != "" { + t.Errorf("expected %q to be %q", config.TokenHelper, "") + } +} + +func TestParseConfig_badKeys(t *testing.T) { + _, err := ParseConfig(` +token_helper = "/token" +nope = "true" +`) + if err == nil { + t.Fatal("expected error") + } + + if !strings.Contains(err.Error(), `invalid key "nope" on line 3`) { + t.Errorf("bad error: %s", err.Error()) + } +} diff --git a/command/debug.go b/command/debug.go new file mode 100644 index 0000000..e5440b3 --- /dev/null +++ b/command/debug.go @@ -0,0 +1,1111 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/url" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-secure-stdlib/gatedwriter" + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/helper/osutil" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/version" + "github.com/mholt/archiver/v3" + "github.com/mitchellh/cli" + "github.com/oklog/run" + "github.com/posener/complete" +) + +const ( + // debugIndexVersion tracks the canonical version in the index file + // for compatibility with future format/layout changes on the bundle. + debugIndexVersion = 1 + + // debugMinInterval is the minimum acceptable interval capture value. This + // value applies to duration and all interval-related flags. + debugMinInterval = 5 * time.Second + + // debugDurationGrace is the grace period added to duration to allow for + // "last frame" capture if the interval falls into the last duration time + // value. For instance, using default values, adding a grace duration lets + // the command capture 5 intervals (0, 30, 60, 90, and 120th second) before + // exiting. + debugDurationGrace = 1 * time.Second + + // debugCompressionExt is the default compression extension used if + // compression is enabled. + debugCompressionExt = ".tar.gz" + + // fileFriendlyTimeFormat is the time format used for file and directory + // naming. + fileFriendlyTimeFormat = "2006-01-02T15-04-05Z" +) + +// debugIndex represents the data structure in the index file +type debugIndex struct { + Version int `json:"version"` + VaultAddress string `json:"vault_address"` + ClientVersion string `json:"client_version"` + ServerVersion string `json:"server_version"` + Timestamp time.Time `json:"timestamp"` + DurationSeconds int `json:"duration_seconds"` + IntervalSeconds int `json:"interval_seconds"` + MetricsIntervalSeconds int `json:"metrics_interval_seconds"` + Compress bool `json:"compress"` + RawArgs []string `json:"raw_args"` + Targets []string `json:"targets"` + Output map[string]interface{} `json:"output"` + Errors []*captureError `json:"errors"` +} + +// captureError holds an error entry that can occur during polling capture. +// It includes the timestamp, the target, and the error itself. +type captureError struct { + TargetError string `json:"error"` + Target string `json:"target"` + Timestamp time.Time `json:"timestamp"` +} + +var ( + _ cli.Command = (*DebugCommand)(nil) + _ cli.CommandAutocomplete = (*DebugCommand)(nil) +) + +type DebugCommand struct { + *BaseCommand + + flagCompress bool + flagDuration time.Duration + flagInterval time.Duration + flagMetricsInterval time.Duration + flagOutput string + flagTargets []string + + // logFormat defines the output format for Monitor + logFormat string + + // debugIndex is used to keep track of the index state, which gets written + // to a file at the end. + debugIndex *debugIndex + + // skipTimingChecks bypasses timing-related checks, used primarily for tests + skipTimingChecks bool + // logger is the logger used for outputting capture progress + logger hclog.Logger + + // ShutdownCh is used to capture interrupt signal and end polling capture + ShutdownCh chan struct{} + + // Collection slices to hold data + hostInfoCollection []map[string]interface{} + metricsCollection []map[string]interface{} + replicationStatusCollection []map[string]interface{} + serverStatusCollection []map[string]interface{} + inFlightReqStatusCollection []map[string]interface{} + + // cachedClient holds the client retrieved during preflight + cachedClient *api.Client + + // errLock is used to lock error capture into the index file + errLock sync.Mutex +} + +func (c *DebugCommand) AutocompleteArgs() complete.Predictor { + // Predict targets + return c.PredictVaultDebugTargets() +} + +func (c *DebugCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *DebugCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP) + + f := set.NewFlagSet("Command Options") + + f.BoolVar(&BoolVar{ + Name: "compress", + Target: &c.flagCompress, + Default: true, + Usage: "Toggles whether to compress output package", + }) + + f.DurationVar(&DurationVar{ + Name: "duration", + Target: &c.flagDuration, + Completion: complete.PredictAnything, + Default: 2 * time.Minute, + Usage: "Duration to run the command.", + }) + + f.DurationVar(&DurationVar{ + Name: "interval", + Target: &c.flagInterval, + Completion: complete.PredictAnything, + Default: 30 * time.Second, + Usage: "The polling interval at which to collect profiling data and server state.", + }) + + f.DurationVar(&DurationVar{ + Name: "metrics-interval", + Target: &c.flagMetricsInterval, + Completion: complete.PredictAnything, + Default: 10 * time.Second, + Usage: "The polling interval at which to collect metrics data.", + }) + + f.StringVar(&StringVar{ + Name: "output", + Target: &c.flagOutput, + Completion: complete.PredictAnything, + Usage: "Specifies the output path for the debug package.", + }) + + f.StringSliceVar(&StringSliceVar{ + Name: "target", + Target: &c.flagTargets, + Usage: "Target to capture, defaulting to all if none specified. " + + "This can be specified multiple times to capture multiple targets. " + + "Available targets are: config, host, metrics, pprof, " + + "replication-status, server-status, log.", + }) + + f.StringVar(&StringVar{ + Name: "log-format", + Target: &c.logFormat, + Default: "standard", + Usage: "Log format to be captured if \"log\" target specified. " + + "Supported values are \"standard\" and \"json\". The default is \"standard\".", + }) + + return set +} + +func (c *DebugCommand) Help() string { + helpText := ` +Usage: vault debug [options] + + Probes a specific Vault server node for a specified period of time, recording + information about the node, its cluster, and its host environment. The + information collected is packaged and written to the specified path. + + Certain endpoints that this command uses require ACL permissions to access. + If not permitted, the information from these endpoints will not be part of the + output. The command uses the Vault address and token as specified via + the login command, environment variables, or CLI flags. + + To create a debug package using default duration and interval values in the + current directory that captures all applicable targets: + + $ vault debug + + To create a debug package with a specific duration and interval in the current + directory that capture all applicable targets: + + $ vault debug -duration=10m -interval=1m + + To create a debug package in the current directory with a specific sub-set of + targets: + + $ vault debug -target=host -target=metrics + +` + c.Flags().Help() + + return helpText +} + +func (c *DebugCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + parsedArgs := f.Args() + if len(parsedArgs) > 0 { + c.UI.Error(fmt.Sprintf("Too many arguments (expected 0, got %d)", len(parsedArgs))) + return 1 + } + + // Initialize the logger for debug output + gatedWriter := gatedwriter.NewWriter(os.Stderr) + if c.logger == nil { + c.logger = logging.NewVaultLoggerWithWriter(gatedWriter, hclog.Trace) + } + + dstOutputFile, err := c.preflight(args) + if err != nil { + c.UI.Error(fmt.Sprintf("Error during validation: %s", err)) + return 1 + } + + // Print debug information + c.UI.Output("==> Starting debug capture...") + c.UI.Info(fmt.Sprintf(" Vault Address: %s", c.debugIndex.VaultAddress)) + c.UI.Info(fmt.Sprintf(" Client Version: %s", c.debugIndex.ClientVersion)) + c.UI.Info(fmt.Sprintf(" Server Version: %s", c.debugIndex.ServerVersion)) + c.UI.Info(fmt.Sprintf(" Duration: %s", c.flagDuration)) + c.UI.Info(fmt.Sprintf(" Interval: %s", c.flagInterval)) + c.UI.Info(fmt.Sprintf(" Metrics Interval: %s", c.flagMetricsInterval)) + c.UI.Info(fmt.Sprintf(" Targets: %s", strings.Join(c.flagTargets, ", "))) + c.UI.Info(fmt.Sprintf(" Output: %s", dstOutputFile)) + c.UI.Output("") + + // Release the log gate. + c.logger.(hclog.OutputResettable).ResetOutputWithFlush(&hclog.LoggerOptions{ + Output: os.Stderr, + }, gatedWriter) + + // Capture static information + c.UI.Info("==> Capturing static information...") + if err := c.captureStaticTargets(); err != nil { + c.UI.Error(fmt.Sprintf("Error capturing static information: %s", err)) + return 2 + } + + c.UI.Output("") + + // Capture polling information + c.UI.Info("==> Capturing dynamic information...") + if err := c.capturePollingTargets(); err != nil { + c.UI.Error(fmt.Sprintf("Error capturing dynamic information: %s", err)) + return 2 + } + + c.UI.Output("Finished capturing information, bundling files...") + + // Generate index file + if err := c.generateIndex(); err != nil { + c.UI.Error(fmt.Sprintf("Error generating index: %s", err)) + return 1 + } + + if c.flagCompress { + if err := c.compress(dstOutputFile); err != nil { + c.UI.Error(fmt.Sprintf("Error encountered during bundle compression: %s", err)) + // We want to inform that data collection was captured and stored in + // a directory even if compression fails + c.UI.Info(fmt.Sprintf("Data written to: %s", c.flagOutput)) + return 1 + } + } + + c.UI.Info(fmt.Sprintf("Success! Bundle written to: %s", dstOutputFile)) + return 0 +} + +func (c *DebugCommand) Synopsis() string { + return "Runs the debug command" +} + +func (c *DebugCommand) generateIndex() error { + outputLayout := map[string]interface{}{ + "files": []string{}, + } + // Walk the directory to generate the output layout + err := filepath.Walk(c.flagOutput, func(path string, info os.FileInfo, err error) error { + // Prevent panic by handling failure accessing a path + if err != nil { + return err + } + + // Skip the base dir + if path == c.flagOutput { + return nil + } + + // If we're a directory, simply add a corresponding map + if info.IsDir() { + parsedTime, err := time.Parse(fileFriendlyTimeFormat, info.Name()) + if err != nil { + return err + } + + outputLayout[info.Name()] = map[string]interface{}{ + "timestamp": parsedTime, + "files": []string{}, + } + return nil + } + + relPath, err := filepath.Rel(c.flagOutput, path) + if err != nil { + return err + } + + dir, file := filepath.Split(relPath) + if len(dir) != 0 { + dir = filepath.Clean(dir) + filesArr := outputLayout[dir].(map[string]interface{})["files"] + outputLayout[dir].(map[string]interface{})["files"] = append(filesArr.([]string), file) + } else { + outputLayout["files"] = append(outputLayout["files"].([]string), file) + } + + return nil + }) + if err != nil { + return fmt.Errorf("error generating directory output layout: %s", err) + } + + c.debugIndex.Output = outputLayout + + // Marshal into json + bytes, err := json.MarshalIndent(c.debugIndex, "", " ") + if err != nil { + return fmt.Errorf("error marshaling index file: %s", err) + } + + // Write out file + if err := ioutil.WriteFile(filepath.Join(c.flagOutput, "index.json"), bytes, 0o600); err != nil { + return fmt.Errorf("error generating index file; %s", err) + } + + return nil +} + +// preflight performs various checks against the provided flags to ensure they +// are valid/reasonable values. It also takes care of instantiating a client and +// index object for use by the command. +func (c *DebugCommand) preflight(rawArgs []string) (string, error) { + if !c.skipTimingChecks { + // Guard duration and interval values to acceptable values + if c.flagDuration < debugMinInterval { + c.UI.Info(fmt.Sprintf("Overwriting duration value %q to the minimum value of %q", c.flagDuration, debugMinInterval)) + c.flagDuration = debugMinInterval + } + if c.flagInterval < debugMinInterval { + c.UI.Info(fmt.Sprintf("Overwriting interval value %q to the minimum value of %q", c.flagInterval, debugMinInterval)) + c.flagInterval = debugMinInterval + } + if c.flagMetricsInterval < debugMinInterval { + c.UI.Info(fmt.Sprintf("Overwriting metrics interval value %q to the minimum value of %q", c.flagMetricsInterval, debugMinInterval)) + c.flagMetricsInterval = debugMinInterval + } + } + + // These timing checks are always applicable since interval shouldn't be + // greater than the duration + if c.flagInterval > c.flagDuration { + c.UI.Info(fmt.Sprintf("Overwriting interval value %q to the duration value %q", c.flagInterval, c.flagDuration)) + c.flagInterval = c.flagDuration + } + if c.flagMetricsInterval > c.flagDuration { + c.UI.Info(fmt.Sprintf("Overwriting metrics interval value %q to the duration value %q", c.flagMetricsInterval, c.flagDuration)) + c.flagMetricsInterval = c.flagDuration + } + + if len(c.flagTargets) == 0 { + c.flagTargets = c.defaultTargets() + } else { + // Check for any invalid targets and ignore them if found + invalidTargets := strutil.Difference(c.flagTargets, c.defaultTargets(), true) + if len(invalidTargets) != 0 { + c.UI.Info(fmt.Sprintf("Ignoring invalid targets: %s", strings.Join(invalidTargets, ", "))) + c.flagTargets = strutil.Difference(c.flagTargets, invalidTargets, true) + } + } + + // Make sure we can talk to the server + client, err := c.Client() + if err != nil { + return "", fmt.Errorf("unable to create client to connect to Vault: %s", err) + } + serverHealth, err := client.Sys().Health() + if err != nil { + return "", fmt.Errorf("unable to connect to the server: %s", err) + } + + // Check if server is DR Secondary and we need to further + // ignore any targets due to endpoint restrictions + if serverHealth.ReplicationDRMode == "secondary" { + invalidDRTargets := strutil.Difference(c.flagTargets, c.validDRSecondaryTargets(), true) + if len(invalidDRTargets) != 0 { + c.UI.Info(fmt.Sprintf("Ignoring invalid targets for DR Secondary: %s", strings.Join(invalidDRTargets, ", "))) + c.flagTargets = strutil.Difference(c.flagTargets, invalidDRTargets, true) + } + } + c.cachedClient = client + + captureTime := time.Now().UTC() + if len(c.flagOutput) == 0 { + formattedTime := captureTime.Format(fileFriendlyTimeFormat) + c.flagOutput = fmt.Sprintf("vault-debug-%s", formattedTime) + } + + // Strip trailing slash before proceeding + c.flagOutput = filepath.Clean(c.flagOutput) + + // If compression is enabled, trim the extension so that the files are + // written to a directory even if compression somehow fails. We ensure the + // extension during compression. We also prevent overwriting if the file + // already exists. + dstOutputFile := c.flagOutput + if c.flagCompress { + if !strings.HasSuffix(dstOutputFile, ".tar.gz") && !strings.HasSuffix(dstOutputFile, ".tgz") { + dstOutputFile = dstOutputFile + debugCompressionExt + } + + // Ensure that the file doesn't already exist, and ensure that we always + // trim the extension from flagOutput since we'll be progressively + // writing to that. + _, err := os.Stat(dstOutputFile) + switch { + case os.IsNotExist(err): + c.flagOutput = strings.TrimSuffix(c.flagOutput, ".tar.gz") + c.flagOutput = strings.TrimSuffix(c.flagOutput, ".tgz") + case err != nil: + return "", fmt.Errorf("unable to stat file: %s", err) + default: + return "", fmt.Errorf("output file already exists: %s", dstOutputFile) + } + } + + // Stat check the directory to ensure we don't override any existing data. + _, err = os.Stat(c.flagOutput) + switch { + case os.IsNotExist(err): + err := os.MkdirAll(c.flagOutput, 0o700) + if err != nil { + return "", fmt.Errorf("unable to create output directory: %s", err) + } + case err != nil: + return "", fmt.Errorf("unable to stat directory: %s", err) + default: + return "", fmt.Errorf("output directory already exists: %s", c.flagOutput) + } + + // Populate initial index fields + c.debugIndex = &debugIndex{ + VaultAddress: client.Address(), + ClientVersion: version.GetVersion().VersionNumber(), + ServerVersion: serverHealth.Version, + Compress: c.flagCompress, + DurationSeconds: int(c.flagDuration.Seconds()), + IntervalSeconds: int(c.flagInterval.Seconds()), + MetricsIntervalSeconds: int(c.flagMetricsInterval.Seconds()), + RawArgs: rawArgs, + Version: debugIndexVersion, + Targets: c.flagTargets, + Timestamp: captureTime, + Errors: []*captureError{}, + } + + return dstOutputFile, nil +} + +func (c *DebugCommand) defaultTargets() []string { + return []string{"config", "host", "requests", "metrics", "pprof", "replication-status", "server-status", "log"} +} + +func (c *DebugCommand) validDRSecondaryTargets() []string { + return []string{"metrics", "replication-status", "server-status"} +} + +func (c *DebugCommand) captureStaticTargets() error { + // Capture configuration state + if strutil.StrListContains(c.flagTargets, "config") { + c.logger.Info("capturing configuration state") + + resp, err := c.cachedClient.Logical().Read("sys/config/state/sanitized") + if err != nil { + c.captureError("config", err) + c.logger.Error("config: error capturing config state", "error", err) + return nil + } + + if resp != nil && resp.Data != nil { + collection := []map[string]interface{}{ + { + "timestamp": time.Now().UTC(), + "config": resp.Data, + }, + } + if err := c.persistCollection(collection, "config.json"); err != nil { + c.UI.Error(fmt.Sprintf("Error writing data to %s: %v", "config.json", err)) + } + } + } + + return nil +} + +// capturePollingTargets captures all dynamic targets over the specified +// duration and interval. +func (c *DebugCommand) capturePollingTargets() error { + var g run.Group + + ctx, cancelFunc := context.WithTimeout(context.Background(), c.flagDuration+debugDurationGrace) + defer cancelFunc() + + // This run group watches for interrupt or duration + g.Add(func() error { + for { + select { + case <-c.ShutdownCh: + return nil + case <-ctx.Done(): + return nil + } + } + }, func(error) {}) + + // Collect host-info if target is specified + if strutil.StrListContains(c.flagTargets, "host") { + g.Add(func() error { + c.collectHostInfo(ctx) + return nil + }, func(error) { + cancelFunc() + }) + } + + // Collect metrics if target is specified + if strutil.StrListContains(c.flagTargets, "metrics") { + g.Add(func() error { + c.collectMetrics(ctx) + return nil + }, func(error) { + cancelFunc() + }) + } + + // Collect pprof data if target is specified + if strutil.StrListContains(c.flagTargets, "pprof") { + g.Add(func() error { + c.collectPprof(ctx) + return nil + }, func(error) { + cancelFunc() + }) + } + + // Collect replication status if target is specified + if strutil.StrListContains(c.flagTargets, "replication-status") { + g.Add(func() error { + c.collectReplicationStatus(ctx) + return nil + }, func(error) { + cancelFunc() + }) + } + + // Collect server status if target is specified + if strutil.StrListContains(c.flagTargets, "server-status") { + g.Add(func() error { + c.collectServerStatus(ctx) + return nil + }, func(error) { + cancelFunc() + }) + } + + // Collect in-flight request status if target is specified + if strutil.StrListContains(c.flagTargets, "requests") { + g.Add(func() error { + c.collectInFlightRequestStatus(ctx) + return nil + }, func(error) { + cancelFunc() + }) + } + + if strutil.StrListContains(c.flagTargets, "log") { + g.Add(func() error { + c.writeLogs(ctx) + // If writeLogs returned earlier due to an error, wait for context + // to terminate so we don't abort everything. + <-ctx.Done() + return nil + }, func(error) { + cancelFunc() + }) + } + + // We shouldn't bump across errors since none is returned by the interrupts, + // but we error check for sanity here. + if err := g.Run(); err != nil { + return err + } + + // Write collected data to their corresponding files + if err := c.persistCollection(c.metricsCollection, "metrics.json"); err != nil { + c.UI.Error(fmt.Sprintf("Error writing data to %s: %v", "metrics.json", err)) + } + if err := c.persistCollection(c.serverStatusCollection, "server_status.json"); err != nil { + c.UI.Error(fmt.Sprintf("Error writing data to %s: %v", "server_status.json", err)) + } + if err := c.persistCollection(c.replicationStatusCollection, "replication_status.json"); err != nil { + c.UI.Error(fmt.Sprintf("Error writing data to %s: %v", "replication_status.json", err)) + } + if err := c.persistCollection(c.hostInfoCollection, "host_info.json"); err != nil { + c.UI.Error(fmt.Sprintf("Error writing data to %s: %v", "host_info.json", err)) + } + if err := c.persistCollection(c.inFlightReqStatusCollection, "requests.json"); err != nil { + c.UI.Error(fmt.Sprintf("Error writing data to %s: %v", "requests.json", err)) + } + return nil +} + +func (c *DebugCommand) collectHostInfo(ctx context.Context) { + idxCount := 0 + intervalTicker := time.Tick(c.flagInterval) + + for { + if idxCount > 0 { + select { + case <-ctx.Done(): + return + case <-intervalTicker: + } + } + + c.logger.Info("capturing host information", "count", idxCount) + idxCount++ + + r := c.cachedClient.NewRequest("GET", "/v1/sys/host-info") + resp, err := c.cachedClient.RawRequestWithContext(ctx, r) + if err != nil { + c.captureError("host", err) + return + } + if resp != nil { + defer resp.Body.Close() + + secret, err := api.ParseSecret(resp.Body) + if err != nil { + c.captureError("host", err) + return + } + if secret != nil && secret.Data != nil { + hostEntry := secret.Data + c.hostInfoCollection = append(c.hostInfoCollection, hostEntry) + } + } + } +} + +func (c *DebugCommand) collectMetrics(ctx context.Context) { + idxCount := 0 + intervalTicker := time.Tick(c.flagMetricsInterval) + + for { + if idxCount > 0 { + select { + case <-ctx.Done(): + return + case <-intervalTicker: + } + } + + c.logger.Info("capturing metrics", "count", idxCount) + idxCount++ + + // Perform metrics request + r := c.cachedClient.NewRequest("GET", "/v1/sys/metrics") + resp, err := c.cachedClient.RawRequestWithContext(ctx, r) + if err != nil { + c.captureError("metrics", err) + continue + } + if resp != nil { + defer resp.Body.Close() + + metricsEntry := make(map[string]interface{}) + err := json.NewDecoder(resp.Body).Decode(&metricsEntry) + if err != nil { + c.captureError("metrics", err) + continue + } + c.metricsCollection = append(c.metricsCollection, metricsEntry) + } + } +} + +func (c *DebugCommand) collectPprof(ctx context.Context) { + idxCount := 0 + startTime := time.Now() + intervalTicker := time.Tick(c.flagInterval) + + for { + if idxCount > 0 { + select { + case <-ctx.Done(): + return + case <-intervalTicker: + } + } + + currentTimestamp := time.Now().UTC() + c.logger.Info("capturing pprof data", "count", idxCount) + idxCount++ + + // Create a sub-directory for pprof data + currentDir := currentTimestamp.Format(fileFriendlyTimeFormat) + dirName := filepath.Join(c.flagOutput, currentDir) + if err := os.MkdirAll(dirName, 0o700); err != nil { + c.UI.Error(fmt.Sprintf("Error creating sub-directory for time interval: %s", err)) + continue + } + + var wg sync.WaitGroup + + for _, target := range []string{"threadcreate", "allocs", "block", "mutex", "goroutine", "heap"} { + wg.Add(1) + go func(target string) { + defer wg.Done() + data, err := pprofTarget(ctx, c.cachedClient, target, nil) + if err != nil { + c.captureError("pprof."+target, err) + return + } + + err = ioutil.WriteFile(filepath.Join(dirName, target+".prof"), data, 0o600) + if err != nil { + c.captureError("pprof."+target, err) + } + }(target) + } + + // As a convenience, we'll also fetch the goroutine target using debug=2, which yields a text + // version of the stack traces that don't require using `go tool pprof` to view. + wg.Add(1) + go func() { + defer wg.Done() + data, err := pprofTarget(ctx, c.cachedClient, "goroutine", url.Values{"debug": []string{"2"}}) + if err != nil { + c.captureError("pprof.goroutines-text", err) + return + } + + err = ioutil.WriteFile(filepath.Join(dirName, "goroutines.txt"), data, 0o600) + if err != nil { + c.captureError("pprof.goroutines-text", err) + } + }() + + // If the our remaining duration is less than the interval value + // skip profile and trace. + runDuration := currentTimestamp.Sub(startTime) + if (c.flagDuration+debugDurationGrace)-runDuration < c.flagInterval { + wg.Wait() + continue + } + + // Capture profile + wg.Add(1) + go func() { + defer wg.Done() + data, err := pprofProfile(ctx, c.cachedClient, c.flagInterval) + if err != nil { + c.captureError("pprof.profile", err) + return + } + + err = ioutil.WriteFile(filepath.Join(dirName, "profile.prof"), data, 0o600) + if err != nil { + c.captureError("pprof.profile", err) + } + }() + + // Capture trace + wg.Add(1) + go func() { + defer wg.Done() + data, err := pprofTrace(ctx, c.cachedClient, c.flagInterval) + if err != nil { + c.captureError("pprof.trace", err) + return + } + + err = ioutil.WriteFile(filepath.Join(dirName, "trace.out"), data, 0o600) + if err != nil { + c.captureError("pprof.trace", err) + } + }() + + wg.Wait() + } +} + +func (c *DebugCommand) collectReplicationStatus(ctx context.Context) { + idxCount := 0 + intervalTicker := time.Tick(c.flagInterval) + + for { + if idxCount > 0 { + select { + case <-ctx.Done(): + return + case <-intervalTicker: + } + } + + c.logger.Info("capturing replication status", "count", idxCount) + idxCount++ + + r := c.cachedClient.NewRequest("GET", "/v1/sys/replication/status") + resp, err := c.cachedClient.RawRequestWithContext(ctx, r) + if err != nil { + c.captureError("replication-status", err) + return + } + if resp != nil { + defer resp.Body.Close() + + secret, err := api.ParseSecret(resp.Body) + if err != nil { + c.captureError("replication-status", err) + return + } + if secret != nil && secret.Data != nil { + replicationEntry := secret.Data + replicationEntry["timestamp"] = time.Now().UTC() + c.replicationStatusCollection = append(c.replicationStatusCollection, replicationEntry) + } + } + } +} + +func (c *DebugCommand) collectServerStatus(ctx context.Context) { + idxCount := 0 + intervalTicker := time.Tick(c.flagInterval) + + for { + if idxCount > 0 { + select { + case <-ctx.Done(): + return + case <-intervalTicker: + } + } + + c.logger.Info("capturing server status", "count", idxCount) + idxCount++ + + healthInfo, err := c.cachedClient.Sys().Health() + if err != nil { + c.captureError("server-status.health", err) + } + sealInfo, err := c.cachedClient.Sys().SealStatus() + if err != nil { + c.captureError("server-status.seal", err) + } + + statusEntry := map[string]interface{}{ + "timestamp": time.Now().UTC(), + "health": healthInfo, + "seal": sealInfo, + } + c.serverStatusCollection = append(c.serverStatusCollection, statusEntry) + } +} + +func (c *DebugCommand) collectInFlightRequestStatus(ctx context.Context) { + idxCount := 0 + intervalTicker := time.Tick(c.flagInterval) + + for { + if idxCount > 0 { + select { + case <-ctx.Done(): + return + case <-intervalTicker: + } + } + + c.logger.Info("capturing in-flight request status", "count", idxCount) + idxCount++ + + req := c.cachedClient.NewRequest("GET", "/v1/sys/in-flight-req") + resp, err := c.cachedClient.RawRequestWithContext(ctx, req) + if err != nil { + c.captureError("requests", err) + return + } + + var data map[string]interface{} + if resp != nil { + defer resp.Body.Close() + err = jsonutil.DecodeJSONFromReader(resp.Body, &data) + if err != nil { + c.captureError("requests", err) + return + } + + statusEntry := map[string]interface{}{ + "timestamp": time.Now().UTC(), + "in_flight_requests": data, + } + c.inFlightReqStatusCollection = append(c.inFlightReqStatusCollection, statusEntry) + } + } +} + +// persistCollection writes the collected data for a particular target onto the +// specified file. If the collection is empty, it returns immediately. +func (c *DebugCommand) persistCollection(collection []map[string]interface{}, outFile string) error { + if len(collection) == 0 { + return nil + } + + // Write server-status file and update the index + bytes, err := json.MarshalIndent(collection, "", " ") + if err != nil { + return err + } + if err := ioutil.WriteFile(filepath.Join(c.flagOutput, outFile), bytes, 0o600); err != nil { + return err + } + + return nil +} + +func (c *DebugCommand) compress(dst string) error { + if runtime.GOOS != "windows" { + defer osutil.Umask(osutil.Umask(0o077)) + } + + tgz := archiver.NewTarGz() + if err := tgz.Archive([]string{c.flagOutput}, dst); err != nil { + return fmt.Errorf("failed to compress data: %s", err) + } + + // If everything is fine up to this point, remove original directory + if err := os.RemoveAll(c.flagOutput); err != nil { + return fmt.Errorf("failed to remove data directory: %s", err) + } + + return nil +} + +func pprofTarget(ctx context.Context, client *api.Client, target string, params url.Values) ([]byte, error) { + req := client.NewRequest("GET", "/v1/sys/pprof/"+target) + if params != nil { + req.Params = params + } + resp, err := client.RawRequestWithContext(ctx, req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + return data, nil +} + +func pprofProfile(ctx context.Context, client *api.Client, duration time.Duration) ([]byte, error) { + seconds := int(duration.Seconds()) + secStr := strconv.Itoa(seconds) + + req := client.NewRequest("GET", "/v1/sys/pprof/profile") + req.Params.Add("seconds", secStr) + resp, err := client.RawRequestWithContext(ctx, req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + return data, nil +} + +func pprofTrace(ctx context.Context, client *api.Client, duration time.Duration) ([]byte, error) { + seconds := int(duration.Seconds()) + secStr := strconv.Itoa(seconds) + + req := client.NewRequest("GET", "/v1/sys/pprof/trace") + req.Params.Add("seconds", secStr) + resp, err := client.RawRequestWithContext(ctx, req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + return data, nil +} + +// newCaptureError instantiates a new captureError. +func (c *DebugCommand) captureError(target string, err error) { + c.errLock.Lock() + c.debugIndex.Errors = append(c.debugIndex.Errors, &captureError{ + TargetError: err.Error(), + Target: target, + Timestamp: time.Now().UTC(), + }) + c.errLock.Unlock() +} + +func (c *DebugCommand) writeLogs(ctx context.Context) { + out, err := os.OpenFile(filepath.Join(c.flagOutput, "vault.log"), os.O_CREATE|os.O_WRONLY, 0o600) + if err != nil { + c.captureError("log", err) + return + } + defer out.Close() + + // Create Monitor specific client based on the cached client + mClient, err := c.cachedClient.Clone() + if err != nil { + c.captureError("log", err) + return + } + mClient.SetToken(c.cachedClient.Token()) + + // Set timeout to match the context explicitly + mClient.SetClientTimeout(c.flagDuration + debugDurationGrace) + + logCh, err := mClient.Sys().Monitor(ctx, "trace", c.logFormat) + if err != nil { + c.captureError("log", err) + return + } + + for { + select { + case log := <-logCh: + if len(log) > 0 { + if !strings.HasSuffix(log, "\n") { + log += "\n" + } + _, err = out.WriteString(log) + if err != nil { + c.captureError("log", err) + return + } + } + case <-ctx.Done(): + return + } + } +} diff --git a/command/debug_test.go b/command/debug_test.go new file mode 100644 index 0000000..e63e1eb --- /dev/null +++ b/command/debug_test.go @@ -0,0 +1,846 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "archive/tar" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "syscall" + "testing" + "time" + + "github.com/hashicorp/vault/api" + "github.com/mholt/archiver/v3" + "github.com/mitchellh/cli" +) + +func testDebugCommand(tb testing.TB) (*cli.MockUi, *DebugCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &DebugCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestDebugCommand_Run(t *testing.T) { + t.Parallel() + + testDir, err := ioutil.TempDir("", "vault-debug") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(testDir) + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "valid", + []string{ + "-duration=1s", + fmt.Sprintf("-output=%s/valid", testDir), + }, + "", + 0, + }, + { + "too_many_args", + []string{ + "-duration=1s", + fmt.Sprintf("-output=%s/too_many_args", testDir), + "foo", + }, + "Too many arguments", + 1, + }, + { + "invalid_target", + []string{ + "-duration=1s", + fmt.Sprintf("-output=%s/invalid_target", testDir), + "-target=foo", + }, + "Ignoring invalid targets: foo", + 0, + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testDebugCommand(t) + cmd.client = client + cmd.skipTimingChecks = true + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Fatalf("expected %q to contain %q", combined, tc.out) + } + }) + } +} + +func TestDebugCommand_Archive(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + ext string + expectError bool + }{ + { + "no-ext", + "", + false, + }, + { + "with-ext-tar-gz", + ".tar.gz", + false, + }, + { + "with-ext-tgz", + ".tgz", + false, + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + // Create temp dirs for each test case since os.Stat and tgz.Walk + // (called down below) exhibits raciness otherwise. + testDir, err := ioutil.TempDir("", "vault-debug") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(testDir) + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testDebugCommand(t) + cmd.client = client + cmd.skipTimingChecks = true + + // We use tc.name as the base path and apply the extension per + // test case. + basePath := tc.name + outputPath := filepath.Join(testDir, basePath+tc.ext) + args := []string{ + "-duration=1s", + fmt.Sprintf("-output=%s", outputPath), + "-target=server-status", + } + + code := cmd.Run(args) + if exp := 0; code != exp { + t.Log(ui.OutputWriter.String()) + t.Log(ui.ErrorWriter.String()) + t.Fatalf("expected %d to be %d", code, exp) + } + // If we expect an error we're done here + if tc.expectError { + return + } + + expectedExt := tc.ext + if expectedExt == "" { + expectedExt = debugCompressionExt + } + + bundlePath := filepath.Join(testDir, basePath+expectedExt) + _, err = os.Stat(bundlePath) + if os.IsNotExist(err) { + t.Log(ui.OutputWriter.String()) + t.Fatal(err) + } + + tgz := archiver.NewTarGz() + err = tgz.Walk(bundlePath, func(f archiver.File) error { + fh, ok := f.Header.(*tar.Header) + if !ok { + return fmt.Errorf("invalid file header: %#v", f.Header) + } + + // Ignore base directory and index file + if fh.Name == basePath+"/" || fh.Name == filepath.Join(basePath, "index.json") { + return nil + } + + if fh.Name != filepath.Join(basePath, "server_status.json") { + return fmt.Errorf("unexpected file: %s", fh.Name) + } + return nil + }) + if err != nil { + t.Fatal(err) + } + }) + } +} + +func TestDebugCommand_CaptureTargets(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + targets []string + expectedFiles []string + }{ + { + "config", + []string{"config"}, + []string{"config.json"}, + }, + { + "host-info", + []string{"host"}, + []string{"host_info.json"}, + }, + { + "metrics", + []string{"metrics"}, + []string{"metrics.json"}, + }, + { + "replication-status", + []string{"replication-status"}, + []string{"replication_status.json"}, + }, + { + "server-status", + []string{"server-status"}, + []string{"server_status.json"}, + }, + { + "in-flight-req", + []string{"requests"}, + []string{"requests.json"}, + }, + { + "all-minus-pprof", + []string{"config", "host", "metrics", "replication-status", "server-status"}, + []string{"config.json", "host_info.json", "metrics.json", "replication_status.json", "server_status.json"}, + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + testDir, err := ioutil.TempDir("", "vault-debug") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(testDir) + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testDebugCommand(t) + cmd.client = client + cmd.skipTimingChecks = true + + basePath := tc.name + args := []string{ + "-duration=1s", + fmt.Sprintf("-output=%s/%s", testDir, basePath), + } + for _, target := range tc.targets { + args = append(args, fmt.Sprintf("-target=%s", target)) + } + + code := cmd.Run(args) + if exp := 0; code != exp { + t.Log(ui.ErrorWriter.String()) + t.Fatalf("expected %d to be %d", code, exp) + } + + bundlePath := filepath.Join(testDir, basePath+debugCompressionExt) + _, err = os.Open(bundlePath) + if err != nil { + t.Fatalf("failed to open archive: %s", err) + } + + tgz := archiver.NewTarGz() + err = tgz.Walk(bundlePath, func(f archiver.File) error { + fh, ok := f.Header.(*tar.Header) + if !ok { + t.Fatalf("invalid file header: %#v", f.Header) + } + + // Ignore base directory and index file + if fh.Name == basePath+"/" || fh.Name == filepath.Join(basePath, "index.json") { + return nil + } + + for _, fileName := range tc.expectedFiles { + if fh.Name == filepath.Join(basePath, fileName) { + return nil + } + } + + // If we reach here, it means that this is an unexpected file + return fmt.Errorf("unexpected file: %s", fh.Name) + }) + if err != nil { + t.Fatal(err) + } + }) + } +} + +func TestDebugCommand_Pprof(t *testing.T) { + testDir, err := ioutil.TempDir("", "vault-debug") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(testDir) + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testDebugCommand(t) + cmd.client = client + cmd.skipTimingChecks = true + + basePath := "pprof" + outputPath := filepath.Join(testDir, basePath) + // pprof requires a minimum interval of 1s, we set it to 2 to ensure it + // runs through and reduce flakiness on slower systems. + args := []string{ + "-compress=false", + "-duration=2s", + "-interval=2s", + fmt.Sprintf("-output=%s", outputPath), + "-target=pprof", + } + + code := cmd.Run(args) + if exp := 0; code != exp { + t.Log(ui.ErrorWriter.String()) + t.Fatalf("expected %d to be %d", code, exp) + } + + profiles := []string{"heap.prof", "goroutine.prof"} + pollingProfiles := []string{"profile.prof", "trace.out"} + + // These are captures on the first (0th) and last (1st) frame + for _, v := range profiles { + files, _ := filepath.Glob(fmt.Sprintf("%s/*/%s", outputPath, v)) + if len(files) != 2 { + t.Errorf("2 output files should exist for %s: got: %v", v, files) + } + } + + // Since profile and trace are polling outputs, these only get captured + // on the first (0th) frame. + for _, v := range pollingProfiles { + files, _ := filepath.Glob(fmt.Sprintf("%s/*/%s", outputPath, v)) + if len(files) != 1 { + t.Errorf("1 output file should exist for %s: got: %v", v, files) + } + } + + t.Log(ui.OutputWriter.String()) + t.Log(ui.ErrorWriter.String()) +} + +func TestDebugCommand_IndexFile(t *testing.T) { + t.Parallel() + + testDir, err := ioutil.TempDir("", "vault-debug") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(testDir) + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testDebugCommand(t) + cmd.client = client + cmd.skipTimingChecks = true + + basePath := "index-test" + outputPath := filepath.Join(testDir, basePath) + // pprof requires a minimum interval of 1s + args := []string{ + "-compress=false", + "-duration=1s", + "-interval=1s", + "-metrics-interval=1s", + fmt.Sprintf("-output=%s", outputPath), + } + + code := cmd.Run(args) + if exp := 0; code != exp { + t.Log(ui.ErrorWriter.String()) + t.Fatalf("expected %d to be %d", code, exp) + } + + content, err := ioutil.ReadFile(filepath.Join(outputPath, "index.json")) + if err != nil { + t.Fatal(err) + } + + index := &debugIndex{} + if err := json.Unmarshal(content, index); err != nil { + t.Fatal(err) + } + if len(index.Output) == 0 { + t.Fatalf("expected valid index file: got: %v", index) + } +} + +func TestDebugCommand_TimingChecks(t *testing.T) { + t.Parallel() + + testDir, err := ioutil.TempDir("", "vault-debug") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(testDir) + + cases := []struct { + name string + duration string + interval string + metricsInterval string + }{ + { + "short-values-all", + "10ms", + "10ms", + "10ms", + }, + { + "short-duration", + "10ms", + "", + "", + }, + { + "short-interval", + debugMinInterval.String(), + "10ms", + "", + }, + { + "short-metrics-interval", + debugMinInterval.String(), + "", + "10ms", + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + // If we are past the minimum duration + some grace, trigger shutdown + // to prevent hanging + grace := 10 * time.Second + shutdownCh := make(chan struct{}) + go func() { + time.AfterFunc(grace, func() { + close(shutdownCh) + }) + }() + + ui, cmd := testDebugCommand(t) + cmd.client = client + cmd.ShutdownCh = shutdownCh + + basePath := tc.name + outputPath := filepath.Join(testDir, basePath) + // pprof requires a minimum interval of 1s + args := []string{ + "-target=server-status", + fmt.Sprintf("-output=%s", outputPath), + } + if tc.duration != "" { + args = append(args, fmt.Sprintf("-duration=%s", tc.duration)) + } + if tc.interval != "" { + args = append(args, fmt.Sprintf("-interval=%s", tc.interval)) + } + if tc.metricsInterval != "" { + args = append(args, fmt.Sprintf("-metrics-interval=%s", tc.metricsInterval)) + } + + code := cmd.Run(args) + if exp := 0; code != exp { + t.Log(ui.ErrorWriter.String()) + t.Fatalf("expected %d to be %d", code, exp) + } + + if !strings.Contains(ui.OutputWriter.String(), "Duration: 5s") { + t.Fatal("expected minimum duration value") + } + + if tc.interval != "" { + if !strings.Contains(ui.OutputWriter.String(), " Interval: 5s") { + t.Fatal("expected minimum interval value") + } + } + + if tc.metricsInterval != "" { + if !strings.Contains(ui.OutputWriter.String(), "Metrics Interval: 5s") { + t.Fatal("expected minimum metrics interval value") + } + } + }) + } +} + +func TestDebugCommand_NoConnection(t *testing.T) { + t.Parallel() + + client, err := api.NewClient(nil) + if err != nil { + t.Fatal(err) + } + + if err := client.SetAddress(""); err != nil { + t.Fatal(err) + } + + _, cmd := testDebugCommand(t) + cmd.client = client + cmd.skipTimingChecks = true + + args := []string{ + "-duration=1s", + "-target=server-status", + } + + code := cmd.Run(args) + if exp := 1; code != exp { + t.Fatalf("expected %d to be %d", code, exp) + } +} + +func TestDebugCommand_OutputExists(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + compress bool + outputFile string + expectedError string + }{ + { + "no-compress", + false, + "output-exists", + "output directory already exists", + }, + { + "compress", + true, + "output-exist.tar.gz", + "output file already exists", + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + testDir, err := ioutil.TempDir("", "vault-debug") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(testDir) + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testDebugCommand(t) + cmd.client = client + cmd.skipTimingChecks = true + + outputPath := filepath.Join(testDir, tc.outputFile) + + // Create a conflicting file/directory + if tc.compress { + _, err = os.Create(outputPath) + if err != nil { + t.Fatal(err) + } + } else { + err = os.Mkdir(outputPath, 0o700) + if err != nil { + t.Fatal(err) + } + } + + args := []string{ + fmt.Sprintf("-compress=%t", tc.compress), + "-duration=1s", + "-interval=1s", + "-metrics-interval=1s", + fmt.Sprintf("-output=%s", outputPath), + } + + code := cmd.Run(args) + if exp := 1; code != exp { + t.Log(ui.OutputWriter.String()) + t.Log(ui.ErrorWriter.String()) + t.Errorf("expected %d to be %d", code, exp) + } + + output := ui.ErrorWriter.String() + ui.OutputWriter.String() + if !strings.Contains(output, tc.expectedError) { + t.Fatalf("expected %s, got: %s", tc.expectedError, output) + } + }) + } +} + +func TestDebugCommand_PartialPermissions(t *testing.T) { + t.Parallel() + + testDir, err := ioutil.TempDir("", "vault-debug") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(testDir) + + client, closer := testVaultServer(t) + defer closer() + + // Create a new token with default policy + resp, err := client.Logical().Write("auth/token/create", map[string]interface{}{ + "policies": "default", + }) + if err != nil { + t.Fatal(err) + } + + client.SetToken(resp.Auth.ClientToken) + + ui, cmd := testDebugCommand(t) + cmd.client = client + cmd.skipTimingChecks = true + + basePath := "with-default-policy-token" + args := []string{ + "-duration=1s", + fmt.Sprintf("-output=%s/%s", testDir, basePath), + } + + code := cmd.Run(args) + if exp := 0; code != exp { + t.Log(ui.ErrorWriter.String()) + t.Fatalf("expected %d to be %d", code, exp) + } + + bundlePath := filepath.Join(testDir, basePath+debugCompressionExt) + _, err = os.Open(bundlePath) + if err != nil { + t.Fatalf("failed to open archive: %s", err) + } + + tgz := archiver.NewTarGz() + err = tgz.Walk(bundlePath, func(f archiver.File) error { + fh, ok := f.Header.(*tar.Header) + if !ok { + t.Fatalf("invalid file header: %#v", f.Header) + } + + // Ignore base directory and index file + if fh.Name == basePath+"/" { + return nil + } + + // Ignore directories, which still get created by pprof but should + // otherwise be empty. + if fh.FileInfo().IsDir() { + return nil + } + + switch { + case fh.Name == filepath.Join(basePath, "index.json"): + case fh.Name == filepath.Join(basePath, "replication_status.json"): + case fh.Name == filepath.Join(basePath, "server_status.json"): + case fh.Name == filepath.Join(basePath, "vault.log"): + default: + return fmt.Errorf("unexpected file: %s", fh.Name) + } + + return nil + }) + if err != nil { + t.Fatal(err) + } +} + +// set insecure umask to see if the files and directories get created with right permissions +func TestDebugCommand_InsecureUmask(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("test does not work in windows environment") + } + t.Parallel() + + cases := []struct { + name string + compress bool + outputFile string + expectError bool + }{ + { + "with-compress", + true, + "with-compress.tar.gz", + false, + }, + { + "no-compress", + false, + "no-compress", + false, + }, + } + + for _, tc := range cases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + // set insecure umask + defer syscall.Umask(syscall.Umask(0)) + + testDir, err := ioutil.TempDir("", "vault-debug") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(testDir) + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testDebugCommand(t) + cmd.client = client + cmd.skipTimingChecks = true + + outputPath := filepath.Join(testDir, tc.outputFile) + + args := []string{ + fmt.Sprintf("-compress=%t", tc.compress), + "-duration=1s", + "-interval=1s", + "-metrics-interval=1s", + fmt.Sprintf("-output=%s", outputPath), + } + + code := cmd.Run(args) + if exp := 0; code != exp { + t.Log(ui.ErrorWriter.String()) + t.Fatalf("expected %d to be %d", code, exp) + } + // If we expect an error we're done here + if tc.expectError { + return + } + + bundlePath := filepath.Join(testDir, tc.outputFile) + fs, err := os.Stat(bundlePath) + if os.IsNotExist(err) { + t.Log(ui.OutputWriter.String()) + t.Fatal(err) + } + // check permissions of the parent debug directory + err = isValidFilePermissions(fs) + if err != nil { + t.Fatalf(err.Error()) + } + + // check permissions of the files within the parent directory + switch tc.compress { + case true: + tgz := archiver.NewTarGz() + + err = tgz.Walk(bundlePath, func(f archiver.File) error { + fh, ok := f.Header.(*tar.Header) + if !ok { + return fmt.Errorf("invalid file header: %#v", f.Header) + } + err = isValidFilePermissions(fh.FileInfo()) + if err != nil { + t.Fatalf(err.Error()) + } + return nil + }) + + case false: + err = filepath.Walk(bundlePath, func(path string, info os.FileInfo, err error) error { + err = isValidFilePermissions(info) + if err != nil { + t.Fatalf(err.Error()) + } + return nil + }) + } + + if err != nil { + t.Fatal(err) + } + }) + } +} + +func isValidFilePermissions(info os.FileInfo) (err error) { + mode := info.Mode() + // check group permissions + for i := 4; i < 7; i++ { + if string(mode.String()[i]) != "-" { + return fmt.Errorf("expected no permissions for group but got %s permissions for file %s", string(mode.String()[i]), info.Name()) + } + } + + // check others permissions + for i := 7; i < 10; i++ { + if string(mode.String()[i]) != "-" { + return fmt.Errorf("expected no permissions for others but got %s permissions for file %s", string(mode.String()[i]), info.Name()) + } + } + return err +} diff --git a/command/delete.go b/command/delete.go new file mode 100644 index 0000000..7da6dd2 --- /dev/null +++ b/command/delete.go @@ -0,0 +1,128 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*DeleteCommand)(nil) + _ cli.CommandAutocomplete = (*DeleteCommand)(nil) +) + +type DeleteCommand struct { + *BaseCommand + + testStdin io.Reader // for tests +} + +func (c *DeleteCommand) Synopsis() string { + return "Delete secrets and configuration" +} + +func (c *DeleteCommand) Help() string { + helpText := ` +Usage: vault delete [options] PATH + + Deletes secrets and configuration from Vault at the given path. The behavior + of "delete" is delegated to the backend corresponding to the given path. + + Remove data in the status secret backend: + + $ vault delete secret/my-secret + + Uninstall an encryption key in the transit backend: + + $ vault delete transit/keys/my-key + + Delete an IAM role: + + $ vault delete aws/roles/ops + + For a full list of examples and paths, please see the documentation that + corresponds to the secret backend in use. + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *DeleteCommand) Flags() *FlagSets { + return c.flagSet(FlagSetHTTP | FlagSetOutputField | FlagSetOutputFormat) +} + +func (c *DeleteCommand) AutocompleteArgs() complete.Predictor { + return c.PredictVaultFiles() +} + +func (c *DeleteCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *DeleteCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected at least 1, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + // Pull our fake stdin if needed + stdin := (io.Reader)(os.Stdin) + if c.testStdin != nil { + stdin = c.testStdin + } + + path := sanitizePath(args[0]) + + data, err := parseArgsDataStringLists(stdin, args[1:]) + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to parse string list data: %s", err)) + return 1 + } + + secret, err := client.Logical().DeleteWithData(path, data) + if err != nil { + c.UI.Error(fmt.Sprintf("Error deleting %s: %s", path, err)) + if secret != nil { + OutputSecret(c.UI, secret) + } + return 2 + } + + if secret == nil { + // Don't output anything unless using the "table" format + if Format(c.UI) == "table" { + c.UI.Info(fmt.Sprintf("Success! Data deleted (if it existed) at: %s", path)) + } + return 0 + } + + // Handle single field output + if c.flagField != "" { + return PrintRawField(c.UI, secret, c.flagField) + } + + return OutputSecret(c.UI, secret) +} diff --git a/command/delete_test.go b/command/delete_test.go new file mode 100644 index 0000000..629be7a --- /dev/null +++ b/command/delete_test.go @@ -0,0 +1,144 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "testing" + + "github.com/mitchellh/cli" +) + +func testDeleteCommand(tb testing.TB) (*cli.MockUi, *DeleteCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &DeleteCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestDeleteCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "default", + []string{"secret/foo"}, + "", + 0, + }, + { + "optional_args", + []string{"secret/foo", "bar=baz"}, + "", + 0, + }, + { + "not_enough_args", + []string{}, + "Not enough arguments", + 1, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testDeleteCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("integration", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + if _, err := client.Logical().Write("secret/delete/foo", map[string]interface{}{ + "foo": "bar", + }); err != nil { + t.Fatal(err) + } + + ui, cmd := testDeleteCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "secret/delete/foo", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! Data deleted (if it existed) at: secret/delete/foo" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + secret, _ := client.Logical().Read("secret/delete/foo") + if secret != nil { + t.Errorf("expected deletion: %#v", secret) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testDeleteCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "secret/delete/foo", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error deleting secret/delete/foo: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testDeleteCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/events.go b/command/events.go new file mode 100644 index 0000000..353c979 --- /dev/null +++ b/command/events.go @@ -0,0 +1,127 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "context" + "fmt" + "net/http" + "os" + "strings" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" + "github.com/posener/complete" + "nhooyr.io/websocket" +) + +var ( + _ cli.Command = (*EventsSubscribeCommands)(nil) + _ cli.CommandAutocomplete = (*EventsSubscribeCommands)(nil) +) + +type EventsSubscribeCommands struct { + *BaseCommand +} + +func (c *EventsSubscribeCommands) Synopsis() string { + return "Subscribe to events" +} + +func (c *EventsSubscribeCommands) Help() string { + helpText := ` +Usage: vault events subscribe [-format=json] [-timeout=XYZs] eventType + + Subscribe to events of the given event type (topic). The events will be + output to standard out. + + The output will be a JSON object serialized using the default protobuf + JSON serialization format, with one line per event received. +` + c.Flags().Help() + return strings.TrimSpace(helpText) +} + +func (c *EventsSubscribeCommands) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP) + + return set +} + +func (c *EventsSubscribeCommands) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *EventsSubscribeCommands) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *EventsSubscribeCommands) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + err = c.subscribeRequest(client, "sys/events/subscribe/"+args[0]) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + return 0 +} + +func (c *EventsSubscribeCommands) subscribeRequest(client *api.Client, path string) error { + r := client.NewRequest("GET", "/v1/"+path) + u := r.URL + if u.Scheme == "http" { + u.Scheme = "ws" + } else { + u.Scheme = "wss" + } + q := u.Query() + q.Set("json", "true") + u.RawQuery = q.Encode() + client.AddHeader("X-Vault-Token", client.Token()) + client.AddHeader("X-Vault-Namesapce", client.Namespace()) + ctx := context.Background() + conn, resp, err := websocket.Dial(ctx, u.String(), &websocket.DialOptions{ + HTTPClient: client.CloneConfig().HttpClient, + HTTPHeader: client.Headers(), + }) + if err != nil { + if resp != nil && resp.StatusCode == http.StatusNotFound { + return fmt.Errorf("events endpoint not found; check `vault read sys/experiments` to see if an events experiment is available but disabled") + } + return err + } + defer conn.Close(websocket.StatusNormalClosure, "") + + for { + _, message, err := conn.Read(ctx) + if err != nil { + return err + } + _, err = os.Stdout.Write(message) + if err != nil { + return err + } + } +} diff --git a/command/events_test.go b/command/events_test.go new file mode 100644 index 0000000..bb2aef0 --- /dev/null +++ b/command/events_test.go @@ -0,0 +1,71 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "testing" + + "github.com/mitchellh/cli" +) + +func testEventsSubscribeCommand(tb testing.TB) (*cli.MockUi, *EventsSubscribeCommands) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &EventsSubscribeCommands{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +// TestEventsSubscribeCommand_Run tests that the command argument parsing is working as expected. +func TestEventsSubscribeCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "not_enough_args", + []string{}, + "Not enough arguments", + 1, + }, + { + "too_many_args", + []string{"foo", "bar"}, + "Too many arguments", + 1, + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testEventsSubscribeCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } +} diff --git a/command/format.go b/command/format.go new file mode 100644 index 0000000..5e42d31 --- /dev/null +++ b/command/format.go @@ -0,0 +1,725 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "os" + "sort" + "strings" + "time" + + "github.com/ghodss/yaml" + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" + "github.com/ryanuber/columnize" +) + +const ( + // hopeDelim is the delimiter to use when splitting columns. We call it a + // hopeDelim because we hope that it's never contained in a secret. + hopeDelim = "♨" +) + +type FormatOptions struct { + Format string +} + +func OutputSecret(ui cli.Ui, secret *api.Secret) int { + return outputWithFormat(ui, secret, secret) +} + +func OutputList(ui cli.Ui, data interface{}) int { + switch data := data.(type) { + case *api.Secret: + secret := data + return outputWithFormat(ui, secret, secret.Data["keys"]) + default: + return outputWithFormat(ui, nil, data) + } +} + +func OutputData(ui cli.Ui, data interface{}) int { + return outputWithFormat(ui, nil, data) +} + +func outputWithFormat(ui cli.Ui, secret *api.Secret, data interface{}) int { + format := Format(ui) + formatter, ok := Formatters[format] + if !ok { + ui.Error(fmt.Sprintf("Invalid output format: %s", format)) + return 1 + } + + if err := formatter.Output(ui, secret, data); err != nil { + ui.Error(fmt.Sprintf("Could not parse output: %s", err.Error())) + return 1 + } + return 0 +} + +type Formatter interface { + Output(ui cli.Ui, secret *api.Secret, data interface{}) error + Format(data interface{}) ([]byte, error) +} + +var Formatters = map[string]Formatter{ + "json": JsonFormatter{}, + "table": TableFormatter{}, + "yaml": YamlFormatter{}, + "yml": YamlFormatter{}, + "pretty": PrettyFormatter{}, + "raw": RawFormatter{}, +} + +func Format(ui cli.Ui) string { + switch ui := ui.(type) { + case *VaultUI: + return ui.format + } + + format := os.Getenv(EnvVaultFormat) + if format == "" { + format = "table" + } + + return format +} + +func Detailed(ui cli.Ui) bool { + switch ui := ui.(type) { + case *VaultUI: + return ui.detailed + } + + return false +} + +// An output formatter for json output of an object +type JsonFormatter struct{} + +func (j JsonFormatter) Format(data interface{}) ([]byte, error) { + return json.MarshalIndent(data, "", " ") +} + +func (j JsonFormatter) Output(ui cli.Ui, secret *api.Secret, data interface{}) error { + b, err := j.Format(data) + if err != nil { + return err + } + + if secret != nil { + shouldListWithInfo := Detailed(ui) + + // Show the raw JSON of the LIST call, rather than only the + // list of keys. + if shouldListWithInfo { + b, err = j.Format(secret) + if err != nil { + return err + } + } + } + + ui.Output(string(b)) + return nil +} + +// An output formatter for raw output of the original request object +type RawFormatter struct{} + +func (r RawFormatter) Format(data interface{}) ([]byte, error) { + byte_data, ok := data.([]byte) + if !ok { + return nil, fmt.Errorf("This command does not support the -format=raw option; only `vault read` does.") + } + + return byte_data, nil +} + +func (r RawFormatter) Output(ui cli.Ui, secret *api.Secret, data interface{}) error { + b, err := r.Format(data) + if err != nil { + return err + } + ui.Output(string(b)) + return nil +} + +// An output formatter for yaml output format of an object +type YamlFormatter struct{} + +func (y YamlFormatter) Format(data interface{}) ([]byte, error) { + return yaml.Marshal(data) +} + +func (y YamlFormatter) Output(ui cli.Ui, secret *api.Secret, data interface{}) error { + b, err := y.Format(data) + if err == nil { + ui.Output(strings.TrimSpace(string(b))) + } + return err +} + +type PrettyFormatter struct{} + +func (p PrettyFormatter) Format(data interface{}) ([]byte, error) { + return nil, nil +} + +func (p PrettyFormatter) Output(ui cli.Ui, secret *api.Secret, data interface{}) error { + switch data.(type) { + case *api.AutopilotState: + p.OutputAutopilotState(ui, data) + default: + return errors.New("cannot use the pretty formatter for this type") + } + return nil +} + +func outputStringSlice(buffer *bytes.Buffer, indent string, values []string) { + for _, val := range values { + buffer.WriteString(fmt.Sprintf("%s%s\n", indent, val)) + } +} + +type mapOutput struct { + key string + value string +} + +func formatServer(srv *api.AutopilotServer) string { + var buffer bytes.Buffer + + buffer.WriteString(fmt.Sprintf(" %s\n", srv.ID)) + buffer.WriteString(fmt.Sprintf(" Name: %s\n", srv.Name)) + buffer.WriteString(fmt.Sprintf(" Address: %s\n", srv.Address)) + buffer.WriteString(fmt.Sprintf(" Status: %s\n", srv.Status)) + buffer.WriteString(fmt.Sprintf(" Node Status: %s\n", srv.NodeStatus)) + buffer.WriteString(fmt.Sprintf(" Healthy: %t\n", srv.Healthy)) + buffer.WriteString(fmt.Sprintf(" Last Contact: %s\n", srv.LastContact)) + buffer.WriteString(fmt.Sprintf(" Last Term: %d\n", srv.LastTerm)) + buffer.WriteString(fmt.Sprintf(" Last Index: %d\n", srv.LastIndex)) + buffer.WriteString(fmt.Sprintf(" Version: %s\n", srv.Version)) + + if srv.UpgradeVersion != "" { + buffer.WriteString(fmt.Sprintf(" Upgrade Version: %s\n", srv.UpgradeVersion)) + } + if srv.RedundancyZone != "" { + buffer.WriteString(fmt.Sprintf(" Redundancy Zone: %s\n", srv.RedundancyZone)) + } + if srv.NodeType != "" { + buffer.WriteString(fmt.Sprintf(" Node Type: %s\n", srv.NodeType)) + } + + return buffer.String() +} + +func (p PrettyFormatter) OutputAutopilotState(ui cli.Ui, data interface{}) { + state := data.(*api.AutopilotState) + + var buffer bytes.Buffer + buffer.WriteString(fmt.Sprintf("Healthy: %t\n", state.Healthy)) + buffer.WriteString(fmt.Sprintf("Failure Tolerance: %d\n", state.FailureTolerance)) + buffer.WriteString(fmt.Sprintf("Leader: %s\n", state.Leader)) + buffer.WriteString("Voters:\n") + outputStringSlice(&buffer, " ", state.Voters) + + if len(state.NonVoters) > 0 { + buffer.WriteString("Non Voters:\n") + outputStringSlice(&buffer, " ", state.NonVoters) + } + + if state.OptimisticFailureTolerance > 0 { + buffer.WriteString(fmt.Sprintf("Optimistic Failure Tolerance: %d\n", state.OptimisticFailureTolerance)) + } + + // Servers + buffer.WriteString("Servers:\n") + var outputs []mapOutput + for id, srv := range state.Servers { + outputs = append(outputs, mapOutput{key: id, value: formatServer(srv)}) + } + sort.Slice(outputs, func(i, j int) bool { + return outputs[i].key < outputs[j].key + }) + for _, output := range outputs { + buffer.WriteString(output.value) + } + + // Redundancy Zones + if len(state.RedundancyZones) > 0 { + buffer.WriteString("Redundancy Zones:\n") + zoneList := make([]string, 0, len(state.RedundancyZones)) + for z := range state.RedundancyZones { + zoneList = append(zoneList, z) + } + sort.Strings(zoneList) + for _, zoneName := range zoneList { + zone := state.RedundancyZones[zoneName] + servers := zone.Servers + voters := zone.Voters + sort.Strings(servers) + sort.Strings(voters) + buffer.WriteString(fmt.Sprintf(" %s\n", zoneName)) + buffer.WriteString(fmt.Sprintf(" Servers: %s\n", strings.Join(servers, ", "))) + buffer.WriteString(fmt.Sprintf(" Voters: %s\n", strings.Join(voters, ", "))) + buffer.WriteString(fmt.Sprintf(" Failure Tolerance: %d\n", zone.FailureTolerance)) + } + } + + // Upgrade Info + if state.Upgrade != nil { + buffer.WriteString("Upgrade Info:\n") + buffer.WriteString(fmt.Sprintf(" Status: %s\n", state.Upgrade.Status)) + buffer.WriteString(fmt.Sprintf(" Target Version: %s\n", state.Upgrade.TargetVersion)) + buffer.WriteString(fmt.Sprintf(" Target Version Voters: %s\n", strings.Join(state.Upgrade.TargetVersionVoters, ", "))) + buffer.WriteString(fmt.Sprintf(" Target Version Non-Voters: %s\n", strings.Join(state.Upgrade.TargetVersionNonVoters, ", "))) + buffer.WriteString(fmt.Sprintf(" Other Version Voters: %s\n", strings.Join(state.Upgrade.OtherVersionVoters, ", "))) + buffer.WriteString(fmt.Sprintf(" Other Version Non-Voters: %s\n", strings.Join(state.Upgrade.OtherVersionNonVoters, ", "))) + + if len(state.Upgrade.RedundancyZones) > 0 { + buffer.WriteString(" Redundancy Zones:\n") + for zoneName, zoneVersion := range state.Upgrade.RedundancyZones { + buffer.WriteString(fmt.Sprintf(" %s\n", zoneName)) + buffer.WriteString(fmt.Sprintf(" Target Version Voters: %s\n", strings.Join(zoneVersion.TargetVersionVoters, ", "))) + buffer.WriteString(fmt.Sprintf(" Target Version Non-Voters: %s\n", strings.Join(zoneVersion.TargetVersionNonVoters, ", "))) + buffer.WriteString(fmt.Sprintf(" Other Version Voters: %s\n", strings.Join(zoneVersion.OtherVersionVoters, ", "))) + buffer.WriteString(fmt.Sprintf(" Other Version Non-Voters: %s\n", strings.Join(zoneVersion.OtherVersionNonVoters, ", "))) + } + } + } + + ui.Output(buffer.String()) +} + +// An output formatter for table output of an object +type TableFormatter struct{} + +// We don't use this due to the TableFormatter introducing a bug when the -field flag is supplied: +// https://github.com/hashicorp/vault/commit/b24cf9a8af2190e96c614205b8cdf06d8c4b6718 . +func (t TableFormatter) Format(data interface{}) ([]byte, error) { + return nil, nil +} + +func (t TableFormatter) Output(ui cli.Ui, secret *api.Secret, data interface{}) error { + switch data := data.(type) { + case *api.Secret: + return t.OutputSecret(ui, secret) + case []interface{}: + return t.OutputList(ui, secret, data) + case []string: + return t.OutputList(ui, nil, data) + case map[string]interface{}: + return t.OutputMap(ui, data) + case SealStatusOutput: + return t.OutputSealStatusStruct(ui, nil, data) + default: + return errors.New("cannot use the table formatter for this type") + } +} + +func (t TableFormatter) OutputSealStatusStruct(ui cli.Ui, secret *api.Secret, data interface{}) error { + var status SealStatusOutput = data.(SealStatusOutput) + var sealPrefix string + if status.RecoverySeal { + sealPrefix = "Recovery " + } + + out := []string{} + out = append(out, "Key | Value") + out = append(out, fmt.Sprintf("%sSeal Type | %s", sealPrefix, status.Type)) + out = append(out, fmt.Sprintf("Initialized | %t", status.Initialized)) + out = append(out, fmt.Sprintf("Sealed | %t", status.Sealed)) + out = append(out, fmt.Sprintf("Total %sShares | %d", sealPrefix, status.N)) + out = append(out, fmt.Sprintf("Threshold | %d", status.T)) + + if status.Sealed { + out = append(out, fmt.Sprintf("Unseal Progress | %d/%d", status.Progress, status.T)) + out = append(out, fmt.Sprintf("Unseal Nonce | %s", status.Nonce)) + } + + if status.Migration { + out = append(out, fmt.Sprintf("Seal Migration in Progress | %t", status.Migration)) + } + + out = append(out, fmt.Sprintf("Version | %s", status.Version)) + out = append(out, fmt.Sprintf("Build Date | %s", status.BuildDate)) + out = append(out, fmt.Sprintf("Storage Type | %s", status.StorageType)) + + if status.ClusterName != "" && status.ClusterID != "" { + out = append(out, fmt.Sprintf("Cluster Name | %s", status.ClusterName)) + out = append(out, fmt.Sprintf("Cluster ID | %s", status.ClusterID)) + } + + // Output if HCP link is configured + if status.HCPLinkStatus != "" { + out = append(out, fmt.Sprintf("HCP Link Status | %s", status.HCPLinkStatus)) + out = append(out, fmt.Sprintf("HCP Link Resource ID | %s", status.HCPLinkResourceID)) + } + + // Output if HA is enabled + out = append(out, fmt.Sprintf("HA Enabled | %t", status.HAEnabled)) + + if status.HAEnabled { + mode := "sealed" + if !status.Sealed { + out = append(out, fmt.Sprintf("HA Cluster | %s", status.LeaderClusterAddress)) + mode = "standby" + showLeaderAddr := false + if status.IsSelf { + mode = "active" + } else { + if status.LeaderAddress == "" { + status.LeaderAddress = "" + } + showLeaderAddr = true + } + out = append(out, fmt.Sprintf("HA Mode | %s", mode)) + + if status.IsSelf && !status.ActiveTime.IsZero() { + out = append(out, fmt.Sprintf("Active Since | %s", status.ActiveTime.Format(time.RFC3339Nano))) + } + // This is down here just to keep ordering consistent + if showLeaderAddr { + out = append(out, fmt.Sprintf("Active Node Address | %s", status.LeaderAddress)) + } + + if status.PerfStandby { + out = append(out, fmt.Sprintf("Performance Standby Node | %t", status.PerfStandby)) + out = append(out, fmt.Sprintf("Performance Standby Last Remote WAL | %d", status.PerfStandbyLastRemoteWAL)) + } + } + } + + if status.RaftCommittedIndex > 0 { + out = append(out, fmt.Sprintf("Raft Committed Index | %d", status.RaftCommittedIndex)) + } + if status.RaftAppliedIndex > 0 { + out = append(out, fmt.Sprintf("Raft Applied Index | %d", status.RaftAppliedIndex)) + } + if status.LastWAL != 0 { + out = append(out, fmt.Sprintf("Last WAL | %d", status.LastWAL)) + } + if len(status.Warnings) > 0 { + out = append(out, fmt.Sprintf("Warnings | %v", status.Warnings)) + } + + ui.Output(tableOutput(out, &columnize.Config{ + Delim: "|", + })) + return nil +} + +func (t TableFormatter) OutputList(ui cli.Ui, secret *api.Secret, data interface{}) error { + t.printWarnings(ui, secret) + + // Determine if we have additional information from a ListResponseWithInfo endpoint. + var additionalInfo map[string]interface{} + if secret != nil { + shouldListWithInfo := Detailed(ui) + if additional, ok := secret.Data["key_info"]; shouldListWithInfo && ok && len(additional.(map[string]interface{})) > 0 { + additionalInfo = additional.(map[string]interface{}) + } + } + + switch data := data.(type) { + case []interface{}: + case []string: + ui.Output(tableOutput(data, nil)) + return nil + default: + return errors.New("error: table formatter cannot output list for this data type") + } + + list := data.([]interface{}) + + if len(list) > 0 { + keys := make([]string, len(list)) + for i, v := range list { + typed, ok := v.(string) + if !ok { + return fmt.Errorf("%v is not a string", v) + } + keys[i] = typed + } + sort.Strings(keys) + + // If we have a ListResponseWithInfo endpoint, we'll need to show + // additional headers. To satisfy the table outputter, we'll need + // to concat them with the deliminator. + var headers []string + header := "Keys" + if len(additionalInfo) > 0 { + seenHeaders := make(map[string]bool) + for key, rawValues := range additionalInfo { + // Most endpoints use the well-behaved ListResponseWithInfo. + // However, some use a hand-rolled equivalent, where the + // returned "keys" doesn't match the key of the "key_info" + // member (namely, /sys/policies/egp). We seek to exclude + // headers only visible from "non-visitable" key_info rows, + // to make table output less confusing. These non-visitable + // rows will still be visible in the JSON output. + index := sort.SearchStrings(keys, key) + if index < len(keys) && keys[index] != key { + continue + } + + values := rawValues.(map[string]interface{}) + for key := range values { + seenHeaders[key] = true + } + } + + for key := range seenHeaders { + headers = append(headers, key) + } + sort.Strings(headers) + + header = header + hopeDelim + strings.Join(headers, hopeDelim) + } + + // Finally, if we have a ListResponseWithInfo, we'll need to update + // the returned rows to not just have the keys (in the sorted order), + // but also have the values for each header (in their sorted order). + rows := keys + if len(additionalInfo) > 0 && len(headers) > 0 { + for index, row := range rows { + formatted := []string{row} + if rawValues, ok := additionalInfo[row]; ok { + values := rawValues.(map[string]interface{}) + for _, header := range headers { + if rawValue, ok := values[header]; ok { + if looksLikeDuration(header) { + rawValue = humanDurationInt(rawValue) + } + + formatted = append(formatted, fmt.Sprintf("%v", rawValue)) + } else { + // Show a default empty n/a when this field is + // missing from the additional information. + formatted = append(formatted, "n/a") + } + } + } + + rows[index] = strings.Join(formatted, hopeDelim) + } + } + + // Prepend the header to the formatted rows. + output := append([]string{header}, rows...) + ui.Output(tableOutput(output, &columnize.Config{ + Delim: hopeDelim, + })) + } + + return nil +} + +// printWarnings prints any warnings in the secret. +func (t TableFormatter) printWarnings(ui cli.Ui, secret *api.Secret) { + if secret != nil && len(secret.Warnings) > 0 { + ui.Warn("WARNING! The following warnings were returned from Vault:\n") + for _, warning := range secret.Warnings { + ui.Warn(wrapAtLengthWithPadding(fmt.Sprintf("* %s", warning), 2)) + ui.Warn("") + } + } +} + +func (t TableFormatter) OutputSecret(ui cli.Ui, secret *api.Secret) error { + if secret == nil { + return nil + } + + t.printWarnings(ui, secret) + + out := make([]string, 0, 8) + if secret.LeaseDuration > 0 { + if secret.LeaseID != "" { + out = append(out, fmt.Sprintf("lease_id %s %s", hopeDelim, secret.LeaseID)) + out = append(out, fmt.Sprintf("lease_duration %s %v", hopeDelim, humanDurationInt(secret.LeaseDuration))) + out = append(out, fmt.Sprintf("lease_renewable %s %t", hopeDelim, secret.Renewable)) + } else { + // This is probably the generic secret backend which has leases, but we + // print them as refresh_interval to reduce confusion. + out = append(out, fmt.Sprintf("refresh_interval %s %v", hopeDelim, humanDurationInt(secret.LeaseDuration))) + } + } + + if secret.Auth != nil { + if secret.Auth.MFARequirement != nil { + out = append(out, fmt.Sprintf("mfa_request_id %s %s", hopeDelim, secret.Auth.MFARequirement.MFARequestID)) + + for k, constraintSet := range secret.Auth.MFARequirement.MFAConstraints { + for _, constraint := range constraintSet.Any { + out = append(out, fmt.Sprintf("mfa_constraint_%s_%s_id %s %s", k, constraint.Type, hopeDelim, constraint.ID)) + out = append(out, fmt.Sprintf("mfa_constraint_%s_%s_uses_passcode %s %t", k, constraint.Type, hopeDelim, constraint.UsesPasscode)) + if constraint.Name != "" { + out = append(out, fmt.Sprintf("mfa_constraint_%s_%s_name %s %s", k, constraint.Type, hopeDelim, constraint.Name)) + } + } + } + } else { // Token information only makes sense if no further MFA requirement (i.e. if we actually have a token) + out = append(out, fmt.Sprintf("token %s %s", hopeDelim, secret.Auth.ClientToken)) + out = append(out, fmt.Sprintf("token_accessor %s %s", hopeDelim, secret.Auth.Accessor)) + // If the lease duration is 0, it's likely a root token, so output the + // duration as "infinity" to clear things up. + if secret.Auth.LeaseDuration == 0 { + out = append(out, fmt.Sprintf("token_duration %s %s", hopeDelim, "∞")) + } else { + out = append(out, fmt.Sprintf("token_duration %s %v", hopeDelim, humanDurationInt(secret.Auth.LeaseDuration))) + } + out = append(out, fmt.Sprintf("token_renewable %s %t", hopeDelim, secret.Auth.Renewable)) + out = append(out, fmt.Sprintf("token_policies %s %q", hopeDelim, secret.Auth.TokenPolicies)) + out = append(out, fmt.Sprintf("identity_policies %s %q", hopeDelim, secret.Auth.IdentityPolicies)) + out = append(out, fmt.Sprintf("policies %s %q", hopeDelim, secret.Auth.Policies)) + for k, v := range secret.Auth.Metadata { + out = append(out, fmt.Sprintf("token_meta_%s %s %v", k, hopeDelim, v)) + } + } + } + + if secret.WrapInfo != nil { + out = append(out, fmt.Sprintf("wrapping_token: %s %s", hopeDelim, secret.WrapInfo.Token)) + out = append(out, fmt.Sprintf("wrapping_accessor: %s %s", hopeDelim, secret.WrapInfo.Accessor)) + out = append(out, fmt.Sprintf("wrapping_token_ttl: %s %v", hopeDelim, humanDurationInt(secret.WrapInfo.TTL))) + out = append(out, fmt.Sprintf("wrapping_token_creation_time: %s %s", hopeDelim, secret.WrapInfo.CreationTime.String())) + out = append(out, fmt.Sprintf("wrapping_token_creation_path: %s %s", hopeDelim, secret.WrapInfo.CreationPath)) + if secret.WrapInfo.WrappedAccessor != "" { + out = append(out, fmt.Sprintf("wrapped_accessor: %s %s", hopeDelim, secret.WrapInfo.WrappedAccessor)) + } + } + + if len(secret.Data) > 0 { + keys := make([]string, 0, len(secret.Data)) + for k := range secret.Data { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + v := secret.Data[k] + + // If the field "looks" like a TTL, print it as a time duration instead. + if looksLikeDuration(k) { + v = humanDurationInt(v) + } + + out = append(out, fmt.Sprintf("%s %s %v", k, hopeDelim, v)) + } + } + + // If we got this far and still don't have any data, there's nothing to print, + // sorry. + if len(out) == 0 { + return nil + } + + // Prepend the header + out = append([]string{"Key" + hopeDelim + "Value"}, out...) + + ui.Output(tableOutput(out, &columnize.Config{ + Delim: hopeDelim, + })) + return nil +} + +func (t TableFormatter) OutputMap(ui cli.Ui, data map[string]interface{}) error { + out := make([]string, 0, len(data)+1) + if len(data) > 0 { + keys := make([]string, 0, len(data)) + for k := range data { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + v := data[k] + + // If the field "looks" like a TTL, print it as a time duration instead. + if looksLikeDuration(k) { + v = humanDurationInt(v) + } + + out = append(out, fmt.Sprintf("%s %s %v", k, hopeDelim, v)) + } + } + + // If we got this far and still don't have any data, there's nothing to print, + // sorry. + if len(out) == 0 { + return nil + } + + // Prepend the header + out = append([]string{"Key" + hopeDelim + "Value"}, out...) + + ui.Output(tableOutput(out, &columnize.Config{ + Delim: hopeDelim, + })) + return nil +} + +// OutputSealStatus will print *api.SealStatusResponse in the CLI according to the format provided +func OutputSealStatus(ui cli.Ui, client *api.Client, status *api.SealStatusResponse) int { + sealStatusOutput := SealStatusOutput{SealStatusResponse: *status} + + // Mask the 'Vault is sealed' error, since this means HA is enabled, but that + // we cannot query for the leader since we are sealed. + leaderStatus, err := client.Sys().Leader() + if err != nil && strings.Contains(err.Error(), "Vault is sealed") { + leaderStatus = &api.LeaderResponse{HAEnabled: true} + err = nil + } + if err != nil { + ui.Error(fmt.Sprintf("Error checking leader status: %s", err)) + return 1 + } + + // copy leaderStatus fields into sealStatusOutput for display later + sealStatusOutput.HAEnabled = leaderStatus.HAEnabled + sealStatusOutput.IsSelf = leaderStatus.IsSelf + sealStatusOutput.ActiveTime = leaderStatus.ActiveTime + sealStatusOutput.LeaderAddress = leaderStatus.LeaderAddress + sealStatusOutput.LeaderClusterAddress = leaderStatus.LeaderClusterAddress + sealStatusOutput.PerfStandby = leaderStatus.PerfStandby + sealStatusOutput.PerfStandbyLastRemoteWAL = leaderStatus.PerfStandbyLastRemoteWAL + sealStatusOutput.LastWAL = leaderStatus.LastWAL + sealStatusOutput.RaftCommittedIndex = leaderStatus.RaftCommittedIndex + sealStatusOutput.RaftAppliedIndex = leaderStatus.RaftAppliedIndex + OutputData(ui, sealStatusOutput) + return 0 +} + +// looksLikeDuration checks if the given key "k" looks like a duration value. +// This is used to pretty-format duration values in responses, especially from +// plugins. +func looksLikeDuration(k string) bool { + return k == "period" || strings.HasSuffix(k, "_period") || + k == "ttl" || strings.HasSuffix(k, "_ttl") || + k == "duration" || strings.HasSuffix(k, "_duration") || + k == "lease_max" || k == "ttl_max" +} + +// This struct is responsible for capturing all the fields to be output by a +// vault status command, including fields that do not come from the status API. +// Currently we are adding the fields from api.LeaderResponse +type SealStatusOutput struct { + api.SealStatusResponse + HAEnabled bool `json:"ha_enabled"` + IsSelf bool `json:"is_self,omitempty"` + ActiveTime time.Time `json:"active_time,omitempty"` + LeaderAddress string `json:"leader_address,omitempty"` + LeaderClusterAddress string `json:"leader_cluster_address,omitempty"` + PerfStandby bool `json:"performance_standby,omitempty"` + PerfStandbyLastRemoteWAL uint64 `json:"performance_standby_last_remote_wal,omitempty"` + LastWAL uint64 `json:"last_wal,omitempty"` + RaftCommittedIndex uint64 `json:"raft_committed_index,omitempty"` + RaftAppliedIndex uint64 `json:"raft_applied_index,omitempty"` +} diff --git a/command/format_test.go b/command/format_test.go new file mode 100644 index 0000000..2bdc45e --- /dev/null +++ b/command/format_test.go @@ -0,0 +1,293 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bytes" + "fmt" + "os" + "strings" + "testing" + "time" + + "github.com/ghodss/yaml" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/helper/jsonutil" +) + +type mockUi struct { + t *testing.T + SampleData string + outputData *string +} + +func (m mockUi) Ask(_ string) (string, error) { + m.t.FailNow() + return "", nil +} + +func (m mockUi) AskSecret(_ string) (string, error) { + m.t.FailNow() + return "", nil +} +func (m mockUi) Output(s string) { *m.outputData = s } +func (m mockUi) Info(s string) { m.t.Log(s) } +func (m mockUi) Error(s string) { m.t.Log(s) } +func (m mockUi) Warn(s string) { m.t.Log(s) } + +func TestJsonFormatter(t *testing.T) { + os.Setenv(EnvVaultFormat, "json") + var output string + ui := mockUi{t: t, SampleData: "something", outputData: &output} + if err := outputWithFormat(ui, nil, ui); err != 0 { + t.Fatal(err) + } + var newUi mockUi + if err := jsonutil.DecodeJSON([]byte(output), &newUi); err != nil { + t.Fatal(err) + } + if newUi.SampleData != ui.SampleData { + t.Fatalf(`values not equal ("%s" != "%s")`, + newUi.SampleData, + ui.SampleData) + } +} + +func TestYamlFormatter(t *testing.T) { + os.Setenv(EnvVaultFormat, "yaml") + var output string + ui := mockUi{t: t, SampleData: "something", outputData: &output} + if err := outputWithFormat(ui, nil, ui); err != 0 { + t.Fatal(err) + } + var newUi mockUi + err := yaml.Unmarshal([]byte(output), &newUi) + if err != nil { + t.Fatal(err) + } + if newUi.SampleData != ui.SampleData { + t.Fatalf(`values not equal ("%s" != "%s")`, + newUi.SampleData, + ui.SampleData) + } +} + +func TestTableFormatter(t *testing.T) { + os.Setenv(EnvVaultFormat, "table") + var output string + ui := mockUi{t: t, outputData: &output} + + // Testing secret formatting + s := api.Secret{Data: map[string]interface{}{"k": "something"}} + if err := outputWithFormat(ui, &s, &s); err != 0 { + t.Fatal(err) + } + if !strings.Contains(output, "something") { + t.Fatal("did not find 'something'") + } +} + +// TestStatusFormat tests to verify that the embedded struct +// SealStatusOutput ignores omitEmpty fields and prints out +// fields in the embedded struct explicitly. It also checks the spacing, +// indentation, and delimiters of table formatting explicitly. +func TestStatusFormat(t *testing.T) { + var output string + ui := mockUi{t: t, outputData: &output} + os.Setenv(EnvVaultFormat, "table") + + statusHA := getMockStatusData(false) + statusOmitEmpty := getMockStatusData(true) + + // Testing that HA fields are formatted properly for table. + // All fields (including new HA fields) are expected + if err := outputWithFormat(ui, nil, statusHA); err != 0 { + t.Fatal(err) + } + + expectedOutputString := `Key Value +--- ----- +Recovery Seal Type type +Initialized true +Sealed true +Total Recovery Shares 2 +Threshold 1 +Unseal Progress 3/1 +Unseal Nonce nonce +Seal Migration in Progress true +Version version +Build Date build date +Storage Type storage type +Cluster Name cluster name +Cluster ID cluster id +HA Enabled true +Raft Committed Index 3 +Raft Applied Index 4 +Last WAL 2 +Warnings [warning]` + + if expectedOutputString != output { + fmt.Printf("%s\n%+v\n %s\n%+v\n", "output found was: ", output, "versus", expectedOutputString) + t.Fatal("format output for status does not match expected format. Check print statements above.") + } + + // Testing that omitEmpty fields are omitted from status + // no HA fields are expected, except HA Enabled + if err := outputWithFormat(ui, nil, statusOmitEmpty); err != 0 { + t.Fatal(err) + } + + expectedOutputString = `Key Value +--- ----- +Recovery Seal Type type +Initialized true +Sealed true +Total Recovery Shares 2 +Threshold 1 +Unseal Progress 3/1 +Unseal Nonce nonce +Seal Migration in Progress true +Version version +Build Date build date +Storage Type n/a +HA Enabled false` + + if expectedOutputString != output { + fmt.Printf("%s\n%+v\n %s\n%+v\n", "output found was: ", output, "versus", expectedOutputString) + t.Fatal("format output for status does not match expected format. Check print statements above.") + } +} + +// getMockStatusData outputs a SealStatusOutput struct from format.go to be used +// for testing. The emptyfields parameter specifies whether the struct will be +// initialized with all the omitempty fields as empty or not. +func getMockStatusData(emptyFields bool) SealStatusOutput { + var status SealStatusOutput + var sealStatusResponseMock api.SealStatusResponse + if !emptyFields { + sealStatusResponseMock = api.SealStatusResponse{ + Type: "type", + Initialized: true, + Sealed: true, + T: 1, + N: 2, + Progress: 3, + Nonce: "nonce", + Version: "version", + BuildDate: "build date", + Migration: true, + ClusterName: "cluster name", + ClusterID: "cluster id", + RecoverySeal: true, + StorageType: "storage type", + Warnings: []string{"warning"}, + } + + // must initialize this struct without explicit field names due to embedding + status = SealStatusOutput{ + sealStatusResponseMock, + true, // HAEnabled + true, // IsSelf + time.Time{}.UTC(), // ActiveTime + "leader address", // LeaderAddress + "leader cluster address", // LeaderClusterAddress + true, // PerfStandby + 1, // PerfStandbyLastRemoteWAL + 2, // LastWAL + 3, // RaftCommittedIndex + 4, // RaftAppliedIndex + } + } else { + sealStatusResponseMock = api.SealStatusResponse{ + Type: "type", + Initialized: true, + Sealed: true, + T: 1, + N: 2, + Progress: 3, + Nonce: "nonce", + Version: "version", + BuildDate: "build date", + Migration: true, + ClusterName: "", + ClusterID: "", + RecoverySeal: true, + StorageType: "", + } + + // must initialize this struct without explicit field names due to embedding + status = SealStatusOutput{ + sealStatusResponseMock, + false, // HAEnabled + false, // IsSelf + time.Time{}.UTC(), // ActiveTime + "", // LeaderAddress + "", // LeaderClusterAddress + false, // PerfStandby + 0, // PerfStandbyLastRemoteWAL + 0, // LastWAL + 0, // RaftCommittedIndex + 0, // RaftAppliedIndex + } + } + return status +} + +func Test_Format_Parsing(t *testing.T) { + defer func() { + os.Setenv(EnvVaultCLINoColor, "") + os.Setenv(EnvVaultFormat, "") + }() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "format", + []string{"token", "renew", "-format", "json"}, + "{", + 0, + }, + { + "format_bad", + []string{"token", "renew", "-format", "nope-not-real"}, + "Invalid output format", + 1, + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + client, closer := testVaultServer(t) + defer closer() + + stdout := bytes.NewBuffer(nil) + stderr := bytes.NewBuffer(nil) + runOpts := &RunOptions{ + Stdout: stdout, + Stderr: stderr, + Client: client, + } + + // Login with the token so we can renew-self. + token, _ := testTokenAndAccessor(t, client) + client.SetToken(token) + + code := RunCustom(tc.args, runOpts) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := stdout.String() + stderr.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } +} diff --git a/command/healthcheck/healthcheck.go b/command/healthcheck/healthcheck.go new file mode 100644 index 0000000..a6fb204 --- /dev/null +++ b/command/healthcheck/healthcheck.go @@ -0,0 +1,292 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +/* + * The healthcheck package attempts to allow generic checks of arbitrary + * engines, while providing a common framework with some performance + * efficiencies in mind. + * + * The core of this package is the Executor context; a caller would + * provision a set of checks, an API client, and a configuration, + * which the executor would use to decide which checks to execute + * and how. + * + * Checks are based around a series of remote paths that are fetched by + * the client; these are broken into two categories: static paths, which + * can always be fetched; and dynamic paths, which the check fetches based + * on earlier results. + * + * For instance, a basic PKI CA lifetime check will have static fetch against + * the list of CAs, and a dynamic fetch, using that earlier list, to fetch the + * PEMs of all CAs. + * + * This allows health checks to share data: many PKI checks will need the + * issuer list and so repeatedly fetching this may result in a performance + * impact. + */ + +package healthcheck + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/logical" +) + +type Executor struct { + Client *api.Client + Mount string + DefaultEnabled bool + + Config map[string]map[string]interface{} + + Resources map[string]map[logical.Operation]*PathFetch + + Checkers []Check +} + +func NewExecutor(client *api.Client, mount string) *Executor { + return &Executor{ + Client: client, + DefaultEnabled: true, + Mount: mount, + Config: make(map[string]map[string]interface{}), + Resources: make(map[string]map[logical.Operation]*PathFetch), + } +} + +func (e *Executor) AddCheck(c Check) { + e.Checkers = append(e.Checkers, c) +} + +func (e *Executor) BuildConfig(external map[string]interface{}) error { + merged := e.Config + + for index, checker := range e.Checkers { + name := checker.Name() + if _, present := merged[name]; name == "" || present { + return fmt.Errorf("bad checker %v: name is empty or already present: %v", index, name) + } + + // Fetch the default configuration; if the check returns enabled + // status, verify it matches our expectations (in the event it should + // be disabled by default), otherwise, add it in. + config := checker.DefaultConfig() + enabled, present := config["enabled"] + if !present { + config["enabled"] = e.DefaultEnabled + } else if enabled.(bool) && !e.DefaultEnabled { + config["enabled"] = e.DefaultEnabled + } + + // Now apply any external config for this check. + if econfig, present := external[name]; present { + for param, evalue := range econfig.(map[string]interface{}) { + if _, ok := config[param]; !ok { + // Assumption: default configs have all possible + // configuration options. This external config has + // an unknown option, so we want to error out. + return fmt.Errorf("unknown configuration option for %v: %v", name, param) + } + + config[param] = evalue + } + } + + // Now apply it and save it. + if err := checker.LoadConfig(config); err != nil { + return fmt.Errorf("error saving merged config for %v: %w", name, err) + } + merged[name] = config + } + + return nil +} + +func (e *Executor) Execute() (map[string][]*Result, error) { + ret := make(map[string][]*Result) + for _, checker := range e.Checkers { + if !checker.IsEnabled() { + continue + } + + if err := checker.FetchResources(e); err != nil { + return nil, fmt.Errorf("failed to fetch resources %v: %w", checker.Name(), err) + } + + results, err := checker.Evaluate(e) + if err != nil { + return nil, fmt.Errorf("failed to evaluate %v: %w", checker.Name(), err) + } + + if results == nil { + results = []*Result{} + } + + for _, result := range results { + result.Endpoint = e.templatePath(result.Endpoint) + result.StatusDisplay = ResultStatusNameMap[result.Status] + } + + ret[checker.Name()] = results + } + + return ret, nil +} + +func (e *Executor) templatePath(path string) string { + return strings.ReplaceAll(path, "{{mount}}", e.Mount) +} + +func (e *Executor) FetchIfNotFetched(op logical.Operation, rawPath string) (*PathFetch, error) { + path := e.templatePath(rawPath) + + byOp, present := e.Resources[path] + if present && byOp != nil { + result, present := byOp[op] + if present && result != nil { + return result, result.FetchSurfaceError() + } + } + + // Must not exist in cache; create it. + if byOp == nil { + e.Resources[path] = make(map[logical.Operation]*PathFetch) + } + + ret := &PathFetch{ + Operation: op, + Path: path, + ParsedCache: make(map[string]interface{}), + } + + data := map[string][]string{} + if op == logical.ListOperation { + data["list"] = []string{"true"} + } else if op != logical.ReadOperation { + return nil, fmt.Errorf("unknown operation: %v on %v", op, path) + } + + // client.ReadRaw* methods require a manual timeout override + ctx, cancel := context.WithTimeout(context.Background(), e.Client.ClientTimeout()) + defer cancel() + + response, err := e.Client.Logical().ReadRawWithDataWithContext(ctx, path, data) + ret.Response = response + if err != nil { + ret.FetchError = err + } else { + // Not all secrets will parse correctly. Sometimes we really want + // to fetch a raw endpoint, sometimes we're run with a bad mount + // or missing permissions. + secret, secretErr := e.Client.Logical().ParseRawResponseAndCloseBody(response, err) + if secretErr != nil { + ret.SecretParseError = secretErr + } else { + ret.Secret = secret + } + } + + e.Resources[path][op] = ret + return ret, ret.FetchSurfaceError() +} + +type PathFetch struct { + Operation logical.Operation + Path string + Response *api.Response + FetchError error + Secret *api.Secret + SecretParseError error + ParsedCache map[string]interface{} +} + +func (p *PathFetch) IsOK() bool { + return p.FetchError == nil && p.Response != nil +} + +func (p *PathFetch) IsSecretOK() bool { + return p.IsOK() && p.SecretParseError == nil && p.Secret != nil +} + +func (p *PathFetch) FetchSurfaceError() error { + if p.IsOK() || p.IsSecretPermissionsError() || p.IsUnsupportedPathError() || p.IsMissingResource() || p.Is404NotFound() { + return nil + } + + if strings.Contains(p.FetchError.Error(), "route entry not found") { + return fmt.Errorf("Error making API request: was a bad mount given?\n\nOperation: %v\nPath: %v\nOriginal Error:\n%w", p.Operation, p.Path, p.FetchError) + } + + return p.FetchError +} + +func (p *PathFetch) IsSecretPermissionsError() bool { + return !p.IsOK() && strings.Contains(p.FetchError.Error(), "permission denied") +} + +func (p *PathFetch) IsUnsupportedPathError() bool { + return !p.IsOK() && strings.Contains(p.FetchError.Error(), "unsupported path") +} + +func (p *PathFetch) IsMissingResource() bool { + return !p.IsOK() && strings.Contains(p.FetchError.Error(), "unable to find") +} + +func (p *PathFetch) Is404NotFound() bool { + return !p.IsOK() && strings.HasSuffix(strings.TrimSpace(p.FetchError.Error()), "Code: 404. Errors:") +} + +type Check interface { + Name() string + IsEnabled() bool + + DefaultConfig() map[string]interface{} + LoadConfig(config map[string]interface{}) error + + FetchResources(e *Executor) error + + Evaluate(e *Executor) ([]*Result, error) +} + +type ResultStatus int + +const ( + ResultNotApplicable ResultStatus = iota + ResultOK + ResultInformational + ResultWarning + ResultCritical + ResultInvalidVersion + ResultInsufficientPermissions +) + +var ResultStatusNameMap = map[ResultStatus]string{ + ResultNotApplicable: "not_applicable", + ResultOK: "ok", + ResultInformational: "informational", + ResultWarning: "warning", + ResultCritical: "critical", + ResultInvalidVersion: "invalid_version", + ResultInsufficientPermissions: "insufficient_permissions", +} + +var NameResultStatusMap = map[string]ResultStatus{ + "not_applicable": ResultNotApplicable, + "ok": ResultOK, + "informational": ResultInformational, + "warning": ResultWarning, + "critical": ResultCritical, + "invalid_version": ResultInvalidVersion, + "insufficient_permissions": ResultInsufficientPermissions, +} + +type Result struct { + Status ResultStatus `json:"status_code"` + StatusDisplay string `json:"status"` + Endpoint string `json:"endpoint,omitempty"` + Message string `json:"message,omitempty"` +} diff --git a/command/healthcheck/pki.go b/command/healthcheck/pki.go new file mode 100644 index 0000000..42f4fc4 --- /dev/null +++ b/command/healthcheck/pki.go @@ -0,0 +1,283 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package healthcheck + +import ( + "crypto/x509" + "encoding/pem" + "fmt" + + "github.com/hashicorp/vault/sdk/logical" +) + +func pkiFetchIssuersList(e *Executor, versionError func()) (bool, *PathFetch, []string, error) { + issuersRet, err := e.FetchIfNotFetched(logical.ListOperation, "/{{mount}}/issuers") + if err != nil { + return true, issuersRet, nil, err + } + + if !issuersRet.IsSecretOK() { + if issuersRet.IsUnsupportedPathError() { + versionError() + } + + if issuersRet.Is404NotFound() { + return true, issuersRet, nil, fmt.Errorf("this mount lacks any configured issuers, limiting health check usefulness") + } + + return true, issuersRet, nil, nil + } + + if len(issuersRet.ParsedCache) == 0 { + var issuers []string + for _, rawIssuerId := range issuersRet.Secret.Data["keys"].([]interface{}) { + issuers = append(issuers, rawIssuerId.(string)) + } + issuersRet.ParsedCache["issuers"] = issuers + } + + return false, issuersRet, issuersRet.ParsedCache["issuers"].([]string), nil +} + +func parsePEM(contents string) ([]byte, error) { + // Need to parse out the issuer from its PEM format. + pemBlock, _ := pem.Decode([]byte(contents)) + if pemBlock == nil { + return nil, fmt.Errorf("invalid PEM block") + } + + return pemBlock.Bytes, nil +} + +func ParsePEMCert(contents string) (*x509.Certificate, error) { + parsed, err := parsePEM(contents) + if err != nil { + return nil, err + } + + cert, err := x509.ParseCertificate(parsed) + if err != nil { + return nil, fmt.Errorf("invalid certificate: %w", err) + } + + return cert, nil +} + +func parsePEMCRL(contents string) (*x509.RevocationList, error) { + parsed, err := parsePEM(contents) + if err != nil { + return nil, err + } + + crl, err := x509.ParseRevocationList(parsed) + if err != nil { + return nil, fmt.Errorf("invalid CRL: %w", err) + } + + return crl, nil +} + +func pkiFetchIssuer(e *Executor, issuer string, versionError func()) (bool, *PathFetch, *x509.Certificate, error) { + issuerRet, err := e.FetchIfNotFetched(logical.ReadOperation, "/{{mount}}/issuer/"+issuer+"/json") + if err != nil { + return true, issuerRet, nil, err + } + + if !issuerRet.IsSecretOK() { + if issuerRet.IsUnsupportedPathError() { + versionError() + } + return true, issuerRet, nil, nil + } + + if len(issuerRet.ParsedCache) == 0 { + cert, err := ParsePEMCert(issuerRet.Secret.Data["certificate"].(string)) + if err != nil { + return true, issuerRet, nil, fmt.Errorf("unable to parse issuer %v's certificate: %w", issuer, err) + } + + issuerRet.ParsedCache["certificate"] = cert + } + + return false, issuerRet, issuerRet.ParsedCache["certificate"].(*x509.Certificate), nil +} + +func pkiFetchIssuerEntry(e *Executor, issuer string, versionError func()) (bool, *PathFetch, map[string]interface{}, error) { + issuerRet, err := e.FetchIfNotFetched(logical.ReadOperation, "/{{mount}}/issuer/"+issuer) + if err != nil { + return true, issuerRet, nil, err + } + + if !issuerRet.IsSecretOK() { + if issuerRet.IsUnsupportedPathError() { + versionError() + } + return true, issuerRet, nil, nil + } + + if len(issuerRet.ParsedCache) == 0 { + cert, err := ParsePEMCert(issuerRet.Secret.Data["certificate"].(string)) + if err != nil { + return true, issuerRet, nil, fmt.Errorf("unable to parse issuer %v's certificate: %w", issuer, err) + } + + issuerRet.ParsedCache["certificate"] = cert + } + + var data map[string]interface{} = nil + if issuerRet.Secret != nil && len(issuerRet.Secret.Data) > 0 { + data = issuerRet.Secret.Data + } + + return false, issuerRet, data, nil +} + +func pkiFetchIssuerCRL(e *Executor, issuer string, delta bool, versionError func()) (bool, *PathFetch, *x509.RevocationList, error) { + path := "/{{mount}}/issuer/" + issuer + "/crl" + name := "CRL" + if delta { + path += "/delta" + name = "Delta CRL" + } + + crlRet, err := e.FetchIfNotFetched(logical.ReadOperation, path) + if err != nil { + return true, crlRet, nil, err + } + + if !crlRet.IsSecretOK() { + if crlRet.IsUnsupportedPathError() { + versionError() + } + return true, crlRet, nil, nil + } + + if len(crlRet.ParsedCache) == 0 { + crl, err := parsePEMCRL(crlRet.Secret.Data["crl"].(string)) + if err != nil { + return true, crlRet, nil, fmt.Errorf("unable to parse issuer %v's %v: %w", issuer, name, err) + } + crlRet.ParsedCache["crl"] = crl + } + + return false, crlRet, crlRet.ParsedCache["crl"].(*x509.RevocationList), nil +} + +func pkiFetchKeyEntry(e *Executor, key string, versionError func()) (bool, *PathFetch, map[string]interface{}, error) { + keyRet, err := e.FetchIfNotFetched(logical.ReadOperation, "/{{mount}}/key/"+key) + if err != nil { + return true, keyRet, nil, err + } + + if !keyRet.IsSecretOK() { + if keyRet.IsUnsupportedPathError() { + versionError() + } + return true, keyRet, nil, nil + } + + var data map[string]interface{} = nil + if keyRet.Secret != nil && len(keyRet.Secret.Data) > 0 { + data = keyRet.Secret.Data + } + + return false, keyRet, data, nil +} + +func pkiFetchLeavesList(e *Executor, versionError func()) (bool, *PathFetch, []string, error) { + leavesRet, err := e.FetchIfNotFetched(logical.ListOperation, "/{{mount}}/certs") + if err != nil { + return true, leavesRet, nil, err + } + + if !leavesRet.IsSecretOK() { + if leavesRet.IsUnsupportedPathError() { + versionError() + } + + return true, leavesRet, nil, nil + } + + if len(leavesRet.ParsedCache) == 0 { + var leaves []string + for _, rawSerial := range leavesRet.Secret.Data["keys"].([]interface{}) { + leaves = append(leaves, rawSerial.(string)) + } + leavesRet.ParsedCache["leaves"] = leaves + leavesRet.ParsedCache["count"] = len(leaves) + } + + return false, leavesRet, leavesRet.ParsedCache["leaves"].([]string), nil +} + +func pkiFetchLeaf(e *Executor, serial string, versionError func()) (bool, *PathFetch, *x509.Certificate, error) { + leafRet, err := e.FetchIfNotFetched(logical.ReadOperation, "/{{mount}}/cert/"+serial) + if err != nil { + return true, leafRet, nil, err + } + + if !leafRet.IsSecretOK() { + if leafRet.IsUnsupportedPathError() { + versionError() + } + return true, leafRet, nil, nil + } + + if len(leafRet.ParsedCache) == 0 { + cert, err := ParsePEMCert(leafRet.Secret.Data["certificate"].(string)) + if err != nil { + return true, leafRet, nil, fmt.Errorf("unable to parse leaf %v's certificate: %w", serial, err) + } + + leafRet.ParsedCache["certificate"] = cert + } + + return false, leafRet, leafRet.ParsedCache["certificate"].(*x509.Certificate), nil +} + +func pkiFetchRolesList(e *Executor, versionError func()) (bool, *PathFetch, []string, error) { + rolesRet, err := e.FetchIfNotFetched(logical.ListOperation, "/{{mount}}/roles") + if err != nil { + return true, rolesRet, nil, err + } + + if !rolesRet.IsSecretOK() { + if rolesRet.IsUnsupportedPathError() { + versionError() + } + + return true, rolesRet, nil, nil + } + + if len(rolesRet.ParsedCache) == 0 { + var roles []string + for _, roleName := range rolesRet.Secret.Data["keys"].([]interface{}) { + roles = append(roles, roleName.(string)) + } + rolesRet.ParsedCache["roles"] = roles + } + + return false, rolesRet, rolesRet.ParsedCache["roles"].([]string), nil +} + +func pkiFetchRole(e *Executor, name string, versionError func()) (bool, *PathFetch, map[string]interface{}, error) { + roleRet, err := e.FetchIfNotFetched(logical.ReadOperation, "/{{mount}}/roles/"+name) + if err != nil { + return true, roleRet, nil, err + } + + if !roleRet.IsSecretOK() { + if roleRet.IsUnsupportedPathError() { + versionError() + } + return true, roleRet, nil, nil + } + + var data map[string]interface{} = nil + if roleRet.Secret != nil && len(roleRet.Secret.Data) > 0 { + data = roleRet.Secret.Data + } + + return false, roleRet, data, nil +} diff --git a/command/healthcheck/pki_allow_acme_headers.go b/command/healthcheck/pki_allow_acme_headers.go new file mode 100644 index 0000000..2015ac2 --- /dev/null +++ b/command/healthcheck/pki_allow_acme_headers.go @@ -0,0 +1,155 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package healthcheck + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/sdk/logical" +) + +type AllowAcmeHeaders struct { + Enabled bool + UnsupportedVersion bool + + TuneFetcher *PathFetch + TuneData map[string]interface{} + + AcmeConfigFetcher *PathFetch +} + +func NewAllowAcmeHeaders() Check { + return &AllowAcmeHeaders{} +} + +func (h *AllowAcmeHeaders) Name() string { + return "allow_acme_headers" +} + +func (h *AllowAcmeHeaders) IsEnabled() bool { + return h.Enabled +} + +func (h *AllowAcmeHeaders) DefaultConfig() map[string]interface{} { + return map[string]interface{}{} +} + +func (h *AllowAcmeHeaders) LoadConfig(config map[string]interface{}) error { + enabled, err := parseutil.ParseBool(config["enabled"]) + if err != nil { + return fmt.Errorf("error parsing %v.enabled: %w", h.Name(), err) + } + h.Enabled = enabled + + return nil +} + +func (h *AllowAcmeHeaders) FetchResources(e *Executor) error { + var err error + h.AcmeConfigFetcher, err = e.FetchIfNotFetched(logical.ReadOperation, "/{{mount}}/config/acme") + if err != nil { + return err + } + + if h.AcmeConfigFetcher.IsUnsupportedPathError() { + h.UnsupportedVersion = true + } + + _, h.TuneFetcher, h.TuneData, err = fetchMountTune(e, func() { + h.UnsupportedVersion = true + }) + if err != nil { + return err + } + + return nil +} + +func (h *AllowAcmeHeaders) Evaluate(e *Executor) ([]*Result, error) { + if h.UnsupportedVersion { + ret := Result{ + Status: ResultInvalidVersion, + Endpoint: h.AcmeConfigFetcher.Path, + Message: "This health check requires Vault 1.14+ but an earlier version of Vault Server was contacted, preventing this health check from running.", + } + return []*Result{&ret}, nil + } + + if h.AcmeConfigFetcher.IsSecretPermissionsError() { + msg := "Without read access to ACME configuration, this health check is unable to function." + return craftInsufficientPermissionResult(e, h.AcmeConfigFetcher.Path, msg), nil + } + + acmeEnabled, err := isAcmeEnabled(h.AcmeConfigFetcher) + if err != nil { + return nil, err + } + + if !acmeEnabled { + ret := Result{ + Status: ResultNotApplicable, + Endpoint: h.AcmeConfigFetcher.Path, + Message: "ACME is not enabled, no additional response headers required.", + } + return []*Result{&ret}, nil + } + + if h.TuneFetcher.IsSecretPermissionsError() { + msg := "Without access to mount tune information, this health check is unable to function." + return craftInsufficientPermissionResult(e, h.TuneFetcher.Path, msg), nil + } + + resp, err := StringList(h.TuneData["allowed_response_headers"]) + if err != nil { + return nil, fmt.Errorf("unable to parse value from server for allowed_response_headers: %w", err) + } + + requiredResponseHeaders := []string{"Replay-Nonce", "Link", "Location"} + foundResponseHeaders := []string{} + for _, param := range resp { + for _, reqHeader := range requiredResponseHeaders { + if strings.EqualFold(param, reqHeader) { + foundResponseHeaders = append(foundResponseHeaders, reqHeader) + break + } + } + } + + foundAllHeaders := strutil.EquivalentSlices(requiredResponseHeaders, foundResponseHeaders) + + if !foundAllHeaders { + ret := Result{ + Status: ResultWarning, + Endpoint: "/sys/mounts/{{mount}}/tune", + Message: "Mount hasn't enabled 'Replay-Nonce', 'Link', 'Location' response headers, these are required for ACME to function.", + } + return []*Result{&ret}, nil + } + + ret := Result{ + Status: ResultOK, + Endpoint: "/sys/mounts/{{mount}}/tune", + Message: "Mount has enabled 'Replay-Nonce', 'Link', 'Location' response headers.", + } + return []*Result{&ret}, nil +} + +func craftInsufficientPermissionResult(e *Executor, path, errorMsg string) []*Result { + ret := Result{ + Status: ResultInsufficientPermissions, + Endpoint: path, + Message: errorMsg, + } + + if e.Client.Token() == "" { + ret.Message = "No token available so unable read the tune endpoint for this mount. " + ret.Message + } else { + ret.Message = "This token lacks permission to read the tune endpoint for this mount. " + ret.Message + } + + return []*Result{&ret} +} diff --git a/command/healthcheck/pki_allow_if_modified_since.go b/command/healthcheck/pki_allow_if_modified_since.go new file mode 100644 index 0000000..bb5306e --- /dev/null +++ b/command/healthcheck/pki_allow_if_modified_since.go @@ -0,0 +1,132 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package healthcheck + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-secure-stdlib/parseutil" +) + +type AllowIfModifiedSince struct { + Enabled bool + UnsupportedVersion bool + + TuneData map[string]interface{} + Fetcher *PathFetch +} + +func NewAllowIfModifiedSinceCheck() Check { + return &AllowIfModifiedSince{} +} + +func (h *AllowIfModifiedSince) Name() string { + return "allow_if_modified_since" +} + +func (h *AllowIfModifiedSince) IsEnabled() bool { + return h.Enabled +} + +func (h *AllowIfModifiedSince) DefaultConfig() map[string]interface{} { + return map[string]interface{}{} +} + +func (h *AllowIfModifiedSince) LoadConfig(config map[string]interface{}) error { + var err error + + h.Enabled, err = parseutil.ParseBool(config["enabled"]) + if err != nil { + return fmt.Errorf("error parsing %v.enabled: %w", h.Name(), err) + } + + return nil +} + +func (h *AllowIfModifiedSince) FetchResources(e *Executor) error { + var exit bool + var err error + + exit, h.Fetcher, h.TuneData, err = fetchMountTune(e, func() { + h.UnsupportedVersion = true + }) + + if exit || err != nil { + return err + } + return nil +} + +func (h *AllowIfModifiedSince) Evaluate(e *Executor) (results []*Result, err error) { + if h.UnsupportedVersion { + ret := Result{ + Status: ResultInvalidVersion, + Endpoint: "/sys/mounts/{{mount}}/tune", + Message: "This health check requires Vault 1.12+ but an earlier version of Vault Server was contacted, preventing this health check from running.", + } + return []*Result{&ret}, nil + } + + if h.Fetcher.IsSecretPermissionsError() { + ret := Result{ + Status: ResultInsufficientPermissions, + Endpoint: "/sys/mounts/{{mount}}/tune", + Message: "Without this information, this health check is unable to function.", + } + + if e.Client.Token() == "" { + ret.Message = "No token available so unable read the tune endpoint for this mount. " + ret.Message + } else { + ret.Message = "This token lacks permission to read the tune endpoint for this mount. " + ret.Message + } + + results = append(results, &ret) + return + } + + req, err := StringList(h.TuneData["passthrough_request_headers"]) + if err != nil { + return nil, fmt.Errorf("unable to parse value from server for passthrough_request_headers: %w", err) + } + + resp, err := StringList(h.TuneData["allowed_response_headers"]) + if err != nil { + return nil, fmt.Errorf("unable to parse value from server for allowed_response_headers: %w", err) + } + + foundIMS := false + for _, param := range req { + if strings.EqualFold(param, "If-Modified-Since") { + foundIMS = true + break + } + } + + foundLM := false + for _, param := range resp { + if strings.EqualFold(param, "Last-Modified") { + foundLM = true + break + } + } + + if !foundIMS || !foundLM { + ret := Result{ + Status: ResultInformational, + Endpoint: "/sys/mounts/{{mount}}/tune", + Message: "Mount hasn't enabled If-Modified-Since Request or Last-Modified Response headers; consider enabling these headers to allow clients to fetch CAs and CRLs only when they've changed, reducing total bandwidth.", + } + results = append(results, &ret) + } else { + ret := Result{ + Status: ResultOK, + Endpoint: "/sys/mounts/{{mount}}/tune", + Message: "Mount allows the If-Modified-Since request header and Last-Modified response header.", + } + results = append(results, &ret) + } + + return +} diff --git a/command/healthcheck/pki_audit_visibility.go b/command/healthcheck/pki_audit_visibility.go new file mode 100644 index 0000000..6b3834f --- /dev/null +++ b/command/healthcheck/pki_audit_visibility.go @@ -0,0 +1,216 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package healthcheck + +import ( + "fmt" + + "github.com/hashicorp/go-secure-stdlib/parseutil" +) + +var VisibleReqParams = []string{ + "csr", + "certificate", + "issuer_ref", + "common_name", + "alt_names", + "other_sans", + "ip_sans", + "uri_sans", + "ttl", + "not_after", + "serial_number", + "key_type", + "private_key_format", + "managed_key_name", + "managed_key_id", + "ou", + "organization", + "country", + "locality", + "province", + "street_address", + "postal_code", + "permitted_dns_domains", + "policy_identifiers", + "ext_key_usage_oids", +} + +var VisibleRespParams = []string{ + "certificate", + "issuing_ca", + "serial_number", + "error", + "ca_chain", +} + +var HiddenReqParams = []string{ + "private_key", + "pem_bundle", +} + +var HiddenRespParams = []string{ + "private_key", + "pem_bundle", +} + +type AuditVisibility struct { + Enabled bool + UnsupportedVersion bool + + IgnoredParameters map[string]bool + TuneData map[string]interface{} + Fetcher *PathFetch +} + +func NewAuditVisibilityCheck() Check { + return &AuditVisibility{ + IgnoredParameters: make(map[string]bool), + } +} + +func (h *AuditVisibility) Name() string { + return "audit_visibility" +} + +func (h *AuditVisibility) IsEnabled() bool { + return h.Enabled +} + +func (h *AuditVisibility) DefaultConfig() map[string]interface{} { + return map[string]interface{}{ + "ignored_parameters": []string{}, + } +} + +func (h *AuditVisibility) LoadConfig(config map[string]interface{}) error { + var err error + + coerced, err := StringList(config["ignored_parameters"]) + if err != nil { + return fmt.Errorf("error parsing %v.ignored_parameters: %v", h.Name(), err) + } + for _, ignored := range coerced { + h.IgnoredParameters[ignored] = true + } + + h.Enabled, err = parseutil.ParseBool(config["enabled"]) + if err != nil { + return fmt.Errorf("error parsing %v.enabled: %w", h.Name(), err) + } + + return nil +} + +func (h *AuditVisibility) FetchResources(e *Executor) error { + var exit bool + var err error + + exit, h.Fetcher, h.TuneData, err = fetchMountTune(e, func() { + h.UnsupportedVersion = true + }) + + if exit || err != nil { + return err + } + return nil +} + +func (h *AuditVisibility) Evaluate(e *Executor) (results []*Result, err error) { + if h.UnsupportedVersion { + ret := Result{ + Status: ResultInvalidVersion, + Endpoint: "/sys/mounts/{{mount}}/tune", + Message: "This health check requires Vault 1.9+ but an earlier version of Vault Server was contacted, preventing this health check from running.", + } + return []*Result{&ret}, nil + } + + if h.Fetcher.IsSecretPermissionsError() { + ret := Result{ + Status: ResultInsufficientPermissions, + Endpoint: "/sys/mounts/{{mount}}/tune", + Message: "Without this information, this health check is unable to function.", + } + + if e.Client.Token() == "" { + ret.Message = "No token available so unable read the tune endpoint for this mount. " + ret.Message + } else { + ret.Message = "This token lacks permission to read the tune endpoint for this mount. " + ret.Message + } + + results = append(results, &ret) + return + } + + sourceMap := map[string][]string{ + "audit_non_hmac_request_keys": VisibleReqParams, + "audit_non_hmac_response_keys": VisibleRespParams, + } + for source, visibleList := range sourceMap { + actual, err := StringList(h.TuneData[source]) + if err != nil { + return nil, fmt.Errorf("error parsing %v from server: %v", source, err) + } + + for _, param := range visibleList { + found := false + for _, tuned := range actual { + if param == tuned { + found = true + break + } + } + + if !found { + ret := Result{ + Status: ResultInformational, + Endpoint: "/sys/mounts/{{mount}}/tune", + Message: fmt.Sprintf("Mount currently HMACs %v because it is not in %v; as this is not a sensitive security parameter, it is encouraged to disable HMACing to allow better auditing of the PKI engine.", param, source), + } + results = append(results, &ret) + } + } + } + + sourceMap = map[string][]string{ + "audit_non_hmac_request_keys": HiddenReqParams, + "audit_non_hmac_response_keys": HiddenRespParams, + } + for source, hiddenList := range sourceMap { + actual, err := StringList(h.TuneData[source]) + if err != nil { + return nil, fmt.Errorf("error parsing %v from server: %v", source, err) + } + for _, param := range hiddenList { + found := false + for _, tuned := range actual { + if param == tuned { + found = true + break + } + } + + if found { + ret := Result{ + Status: ResultWarning, + Endpoint: "/sys/mounts/{{mount}}/tune", + Message: fmt.Sprintf("Mount currently doesn't HMAC %v because it is in %v; as this is a sensitive security parameter it is encouraged to HMAC it in the Audit logs.", param, source), + } + results = append(results, &ret) + } + } + } + + if len(results) == 0 { + ret := Result{ + Status: ResultOK, + Endpoint: "/sys/mounts/{{mount}}/tune", + Message: "Mount audit information is configured appropriately.", + } + results = append(results, &ret) + } + + return +} diff --git a/command/healthcheck/pki_ca_validity_period.go b/command/healthcheck/pki_ca_validity_period.go new file mode 100644 index 0000000..511de75 --- /dev/null +++ b/command/healthcheck/pki_ca_validity_period.go @@ -0,0 +1,169 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package healthcheck + +import ( + "bytes" + "crypto/x509" + "fmt" + "strings" + "time" + + "github.com/hashicorp/go-secure-stdlib/parseutil" +) + +type CAValidityPeriod struct { + Enabled bool + + RootExpiries map[ResultStatus]time.Duration + IntermediateExpieries map[ResultStatus]time.Duration + + UnsupportedVersion bool + + Issuers map[string]*x509.Certificate +} + +func NewCAValidityPeriodCheck() Check { + return &CAValidityPeriod{ + RootExpiries: make(map[ResultStatus]time.Duration, 3), + IntermediateExpieries: make(map[ResultStatus]time.Duration, 3), + Issuers: make(map[string]*x509.Certificate), + } +} + +func (h *CAValidityPeriod) Name() string { + return "ca_validity_period" +} + +func (h *CAValidityPeriod) IsEnabled() bool { + return h.Enabled +} + +func (h *CAValidityPeriod) DefaultConfig() map[string]interface{} { + return map[string]interface{}{ + "root_expiry_critical": "180d", + "intermediate_expiry_critical": "30d", + "root_expiry_warning": "365d", + "intermediate_expiry_warning": "60d", + "root_expiry_informational": "730d", + "intermediate_expiry_informational": "180d", + } +} + +func (h *CAValidityPeriod) LoadConfig(config map[string]interface{}) error { + parameters := []string{ + "root_expiry_critical", + "intermediate_expiry_critical", + "root_expiry_warning", + "intermediate_expiry_warning", + "root_expiry_informational", + "intermediate_expiry_informational", + } + for _, parameter := range parameters { + name_split := strings.Split(parameter, "_") + if len(name_split) != 3 || name_split[1] != "expiry" { + return fmt.Errorf("bad parameter: %v / %v / %v", parameter, len(name_split), name_split[1]) + } + + status, present := NameResultStatusMap[name_split[2]] + if !present { + return fmt.Errorf("bad parameter: %v's type %v isn't in name map", parameter, name_split[2]) + } + + value_raw, present := config[parameter] + if !present { + return fmt.Errorf("parameter not present in config; Executor should've handled this for us: %v", parameter) + } + + value, err := parseutil.ParseDurationSecond(value_raw) + if err != nil { + return fmt.Errorf("failed to parse parameter (%v=%v): %w", parameter, value_raw, err) + } + + if name_split[0] == "root" { + h.RootExpiries[status] = value + } else if name_split[0] == "intermediate" { + h.IntermediateExpieries[status] = value + } else { + return fmt.Errorf("bad parameter: %v's CA type isn't root/intermediate: %v", parameters, name_split[0]) + } + } + + enabled, err := parseutil.ParseBool(config["enabled"]) + if err != nil { + return fmt.Errorf("error parsing %v.enabled: %w", h.Name(), err) + } + h.Enabled = enabled + + return nil +} + +func (h *CAValidityPeriod) FetchResources(e *Executor) error { + exit, _, issuers, err := pkiFetchIssuersList(e, func() { + h.UnsupportedVersion = true + }) + if exit || err != nil { + return err + } + + for _, issuer := range issuers { + skip, _, cert, err := pkiFetchIssuer(e, issuer, func() { + h.UnsupportedVersion = true + }) + if skip || err != nil { + if err != nil { + return err + } + continue + } + + h.Issuers[issuer] = cert + } + + return nil +} + +func (h *CAValidityPeriod) Evaluate(e *Executor) (results []*Result, err error) { + if h.UnsupportedVersion { + ret := Result{ + Status: ResultInvalidVersion, + Endpoint: "/{{mount}}/issuers", + Message: "This health check requires Vault 1.11+ but an earlier version of Vault Server was contacted, preventing this health check from running.", + } + return []*Result{&ret}, nil + } + + now := time.Now() + + for name, cert := range h.Issuers { + var ret Result + ret.Status = ResultOK + ret.Endpoint = "/{{mount}}/issuer/" + name + ret.Message = fmt.Sprintf("Issuer's validity (%v) is OK", cert.NotAfter.Format("2006-01-02")) + + hasSelfReference := bytes.Equal(cert.RawSubject, cert.RawIssuer) + isSelfSigned := cert.CheckSignatureFrom(cert) == nil + isRoot := hasSelfReference && isSelfSigned + + for _, criticality := range []ResultStatus{ResultCritical, ResultWarning, ResultInformational} { + var d time.Duration + if isRoot { + d = h.RootExpiries[criticality] + } else { + d = h.IntermediateExpieries[criticality] + } + + windowExpiry := now.Add(d) + if cert.NotAfter.Before(windowExpiry) { + ret.Status = criticality + ret.Message = fmt.Sprintf("Issuer's validity is outside of the suggested rotation window: issuer is valid until %v but expires within %v (ending on %v). It is suggested to start rotating this issuer to new key material to avoid future downtime caused by this current issuer expiring.", cert.NotAfter.Format("2006-01-02"), FormatDuration(d), windowExpiry.Format("2006-01-02")) + break + } + } + + results = append(results, &ret) + } + + return +} diff --git a/command/healthcheck/pki_crl_validity_period.go b/command/healthcheck/pki_crl_validity_period.go new file mode 100644 index 0000000..8450a05 --- /dev/null +++ b/command/healthcheck/pki_crl_validity_period.go @@ -0,0 +1,208 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package healthcheck + +import ( + "crypto/x509" + "fmt" + "time" + + "github.com/hashicorp/vault/sdk/logical" + + "github.com/hashicorp/go-secure-stdlib/parseutil" +) + +type CRLValidityPeriod struct { + Enabled bool + + CRLExpiryPercentage int + DeltaCRLExpiryPercentage int + + UnsupportedVersion bool + NoDeltas bool + + CRLs map[string]*x509.RevocationList + DeltaCRLs map[string]*x509.RevocationList + + CRLConfig *PathFetch +} + +func NewCRLValidityPeriodCheck() Check { + return &CRLValidityPeriod{ + CRLs: make(map[string]*x509.RevocationList), + DeltaCRLs: make(map[string]*x509.RevocationList), + } +} + +func (h *CRLValidityPeriod) Name() string { + return "crl_validity_period" +} + +func (h *CRLValidityPeriod) IsEnabled() bool { + return h.Enabled +} + +func (h *CRLValidityPeriod) DefaultConfig() map[string]interface{} { + return map[string]interface{}{ + "crl_expiry_pct_critical": "95", + "delta_crl_expiry_pct_critical": "95", + } +} + +func (h *CRLValidityPeriod) LoadConfig(config map[string]interface{}) error { + value, err := parseutil.SafeParseIntRange(config["crl_expiry_pct_critical"], 1, 99) + if err != nil { + return fmt.Errorf("error parsing %v.crl_expiry_pct_critical=%v: %w", h.Name(), config["crl_expiry_pct_critical"], err) + } + h.CRLExpiryPercentage = int(value) + + value, err = parseutil.SafeParseIntRange(config["delta_crl_expiry_pct_critical"], 1, 99) + if err != nil { + return fmt.Errorf("error parsing %v.delta_crl_expiry_pct_critical=%v: %w", h.Name(), config["delta_crl_expiry_pct_critical"], err) + } + h.DeltaCRLExpiryPercentage = int(value) + + enabled, err := parseutil.ParseBool(config["enabled"]) + if err != nil { + return fmt.Errorf("error parsing %v.enabled: %w", h.Name(), err) + } + h.Enabled = enabled + + return nil +} + +func (h *CRLValidityPeriod) FetchResources(e *Executor) error { + exit, _, issuers, err := pkiFetchIssuersList(e, func() { + h.UnsupportedVersion = true + }) + if exit || err != nil { + return err + } + + for _, issuer := range issuers { + exit, _, crl, err := pkiFetchIssuerCRL(e, issuer, false, func() { + h.UnsupportedVersion = true + }) + if exit || err != nil { + if err != nil { + return err + } + continue + } + + h.CRLs[issuer] = crl + + exit, _, delta, err := pkiFetchIssuerCRL(e, issuer, true, func() { + h.NoDeltas = true + }) + if exit || err != nil { + if err != nil { + return err + } + continue + } + + h.DeltaCRLs[issuer] = delta + } + + // Check if the issuer is fetched yet. + configRet, err := e.FetchIfNotFetched(logical.ReadOperation, "/{{mount}}/config/crl") + if err != nil { + return err + } + + h.CRLConfig = configRet + + return nil +} + +func (h *CRLValidityPeriod) Evaluate(e *Executor) (results []*Result, err error) { + if h.UnsupportedVersion { + ret := Result{ + Status: ResultInvalidVersion, + Endpoint: "/{{mount}}/issuers", + Message: "This health check requires Vault 1.11+ but an earlier version of Vault Server was contacted, preventing this health check from running.", + } + return []*Result{&ret}, nil + } + + now := time.Now() + crlDisabled := false + if h.CRLConfig != nil { + if h.CRLConfig.IsSecretPermissionsError() { + ret := Result{ + Status: ResultInsufficientPermissions, + Endpoint: "/{{mount}}/config/crl", + Message: "This prevents the health check from seeing if the CRL is disabled and dropping the severity of this check accordingly.", + } + + if e.Client.Token() == "" { + ret.Message = "No token available so unable read authenticated CRL configuration for this mount. " + ret.Message + } else { + ret.Message = "This token lacks permission to read the CRL configuration for this mount. " + ret.Message + } + + results = append(results, &ret) + } else if h.CRLConfig.Secret != nil && h.CRLConfig.Secret.Data["disabled"] != nil { + crlDisabled = h.CRLConfig.Secret.Data["disabled"].(bool) + } + } + + if h.NoDeltas && len(h.DeltaCRLs) == 0 { + ret := Result{ + Status: ResultInvalidVersion, + Endpoint: "/{{mount}}/issuer/*/crl/delta", + Message: "This health check validates Delta CRLs on Vault 1.12+, but an earlier version of Vault was used. No results about delta CRL validity will be returned.", + } + results = append(results, &ret) + } + + for name, crl := range h.CRLs { + var ret Result + ret.Status = ResultOK + ret.Endpoint = "/{{mount}}/issuer/" + name + "/crl" + ret.Message = fmt.Sprintf("CRL's validity (%v to %v) is OK.", crl.ThisUpdate.Format("2006-01-02"), crl.NextUpdate.Format("2006-01-02")) + + used := now.Sub(crl.ThisUpdate) + total := crl.NextUpdate.Sub(crl.ThisUpdate) + ratio := time.Duration((int64(total) * int64(h.CRLExpiryPercentage)) / int64(100)) + if used >= ratio { + expWhen := crl.ThisUpdate.Add(ratio) + ret.Status = ResultCritical + ret.Message = fmt.Sprintf("CRL's validity is outside of suggested rotation window: CRL's next update is expected at %v, but expires within %v%% of validity window (starting on %v and ending on %v). It is suggested to rotate this CRL and start propagating it to hosts to avoid any issues caused by stale CRLs.", crl.NextUpdate.Format("2006-01-02"), h.CRLExpiryPercentage, crl.ThisUpdate.Format("2006-01-02"), expWhen.Format("2006-01-02")) + + if crlDisabled { + ret.Status = ResultInformational + ret.Message += " Because the CRL is disabled, this is less of a concern." + } + } + + results = append(results, &ret) + } + + for name, crl := range h.DeltaCRLs { + var ret Result + ret.Status = ResultOK + ret.Endpoint = "/{{mount}}/issuer/" + name + "/crl/delta" + ret.Message = fmt.Sprintf("Delta CRL's validity (%v to %v) is OK.", crl.ThisUpdate.Format("2006-01-02"), crl.NextUpdate.Format("2006-01-02")) + + used := now.Sub(crl.ThisUpdate) + total := crl.NextUpdate.Sub(crl.ThisUpdate) + ratio := time.Duration((int64(total) * int64(h.DeltaCRLExpiryPercentage)) / int64(100)) + if used >= ratio { + expWhen := crl.ThisUpdate.Add(ratio) + ret.Status = ResultCritical + ret.Message = fmt.Sprintf("Delta CRL's validity is outside of suggested rotation window: Delta CRL's next update is expected at %v, but expires within %v%% of validity window (starting on %v and ending on %v). It is suggested to rotate this Delta CRL and start propagating it to hosts to avoid any issues caused by stale CRLs.", crl.NextUpdate.Format("2006-01-02"), h.CRLExpiryPercentage, crl.ThisUpdate.Format("2006-01-02"), expWhen.Format("2006-01-02")) + + if crlDisabled { + ret.Status = ResultInformational + ret.Message += " Because the CRL is disabled, this is less of a concern." + } + } + + results = append(results, &ret) + } + + return +} diff --git a/command/healthcheck/pki_enable_acme_issuance.go b/command/healthcheck/pki_enable_acme_issuance.go new file mode 100644 index 0000000..986165d --- /dev/null +++ b/command/healthcheck/pki_enable_acme_issuance.go @@ -0,0 +1,237 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package healthcheck + +import ( + "bytes" + "context" + "crypto/tls" + "fmt" + "net/http" + "net/url" + + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/sdk/logical" + + "golang.org/x/crypto/acme" +) + +type EnableAcmeIssuance struct { + Enabled bool + UnsupportedVersion bool + + AcmeConfigFetcher *PathFetch + ClusterConfigFetcher *PathFetch + TotalIssuers int + RootIssuers int +} + +func NewEnableAcmeIssuance() Check { + return &EnableAcmeIssuance{} +} + +func (h *EnableAcmeIssuance) Name() string { + return "enable_acme_issuance" +} + +func (h *EnableAcmeIssuance) IsEnabled() bool { + return h.Enabled +} + +func (h *EnableAcmeIssuance) DefaultConfig() map[string]interface{} { + return map[string]interface{}{} +} + +func (h *EnableAcmeIssuance) LoadConfig(config map[string]interface{}) error { + enabled, err := parseutil.ParseBool(config["enabled"]) + if err != nil { + return fmt.Errorf("error parsing %v.enabled: %w", h.Name(), err) + } + h.Enabled = enabled + + return nil +} + +func (h *EnableAcmeIssuance) FetchResources(e *Executor) error { + var err error + h.AcmeConfigFetcher, err = e.FetchIfNotFetched(logical.ReadOperation, "/{{mount}}/config/acme") + if err != nil { + return err + } + + if h.AcmeConfigFetcher.IsUnsupportedPathError() { + h.UnsupportedVersion = true + } + + h.ClusterConfigFetcher, err = e.FetchIfNotFetched(logical.ReadOperation, "/{{mount}}/config/cluster") + if err != nil { + return err + } + + if h.ClusterConfigFetcher.IsUnsupportedPathError() { + h.UnsupportedVersion = true + } + + h.TotalIssuers, h.RootIssuers, err = doesMountContainOnlyRootIssuers(e) + + return nil +} + +func doesMountContainOnlyRootIssuers(e *Executor) (int, int, error) { + exit, _, issuers, err := pkiFetchIssuersList(e, func() {}) + if exit || err != nil { + return 0, 0, err + } + + totalIssuers := 0 + rootIssuers := 0 + + for _, issuer := range issuers { + skip, _, cert, err := pkiFetchIssuer(e, issuer, func() {}) + + if skip || err != nil { + if err != nil { + return 0, 0, err + } + continue + } + totalIssuers++ + + if !bytes.Equal(cert.RawSubject, cert.RawIssuer) { + continue + } + if err := cert.CheckSignatureFrom(cert); err != nil { + continue + } + rootIssuers++ + } + + return totalIssuers, rootIssuers, nil +} + +func isAcmeEnabled(fetcher *PathFetch) (bool, error) { + isEnabledRaw, ok := fetcher.Secret.Data["enabled"] + if !ok { + return false, fmt.Errorf("enabled configuration field missing from acme config") + } + + parseBool, err := parseutil.ParseBool(isEnabledRaw) + if err != nil { + return false, fmt.Errorf("failed parsing 'enabled' field from ACME config: %w", err) + } + + return parseBool, nil +} + +func verifyLocalPathUrl(h *EnableAcmeIssuance) error { + localPathRaw, ok := h.ClusterConfigFetcher.Secret.Data["path"] + if !ok { + return fmt.Errorf("'path' field missing from config") + } + + localPath, err := parseutil.ParseString(localPathRaw) + if err != nil { + return fmt.Errorf("failed converting 'path' field from local config: %w", err) + } + + if localPath == "" { + return fmt.Errorf("'path' field not configured within /{{mount}}/config/cluster") + } + + parsedUrl, err := url.Parse(localPath) + if err != nil { + return fmt.Errorf("failed to parse URL from path config: %v: %w", localPathRaw, err) + } + + if parsedUrl.Scheme != "https" { + return fmt.Errorf("the configured 'path' field in /{{mount}}/config/cluster was not using an https scheme") + } + + // Avoid issues with SSL certificates for this check, we just want to validate that we would + // hit an ACME server with the path they specified in configuration + tr := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + } + client := &http.Client{Transport: tr} + acmeDirectoryUrl := parsedUrl.JoinPath("/acme/", "directory") + acmeClient := acme.Client{HTTPClient: client, DirectoryURL: acmeDirectoryUrl.String()} + _, err = acmeClient.Discover(context.Background()) + if err != nil { + return fmt.Errorf("using configured 'path' field ('%s') in /{{mount}}/config/cluster failed to reach the ACME"+ + " directory: %s: %w", parsedUrl.String(), acmeDirectoryUrl.String(), err) + } + + return nil +} + +func (h *EnableAcmeIssuance) Evaluate(e *Executor) (results []*Result, err error) { + if h.UnsupportedVersion { + ret := Result{ + Status: ResultInvalidVersion, + Endpoint: h.AcmeConfigFetcher.Path, + Message: "This health check requires Vault 1.14+ but an earlier version of Vault Server was contacted, preventing this health check from running.", + } + return []*Result{&ret}, nil + } + + if h.AcmeConfigFetcher.IsSecretPermissionsError() { + msg := "Without this information, this health check is unable to function." + return craftInsufficientPermissionResult(e, h.AcmeConfigFetcher.Path, msg), nil + } + + acmeEnabled, err := isAcmeEnabled(h.AcmeConfigFetcher) + if err != nil { + return nil, err + } + + if !acmeEnabled { + if h.TotalIssuers == 0 { + ret := Result{ + Status: ResultNotApplicable, + Endpoint: h.AcmeConfigFetcher.Path, + Message: "No issuers in mount, ACME is not required.", + } + return []*Result{&ret}, nil + } + + if h.TotalIssuers == h.RootIssuers { + ret := Result{ + Status: ResultNotApplicable, + Endpoint: h.AcmeConfigFetcher.Path, + Message: "Mount contains only root issuers, ACME is not required.", + } + return []*Result{&ret}, nil + } + + ret := Result{ + Status: ResultInformational, + Endpoint: h.AcmeConfigFetcher.Path, + Message: "Consider enabling ACME support to support a self-rotating PKI infrastructure.", + } + return []*Result{&ret}, nil + } + + if h.ClusterConfigFetcher.IsSecretPermissionsError() { + msg := "Without this information, this health check is unable to function." + return craftInsufficientPermissionResult(e, h.ClusterConfigFetcher.Path, msg), nil + } + + localPathIssue := verifyLocalPathUrl(h) + + if localPathIssue != nil { + ret := Result{ + Status: ResultWarning, + Endpoint: h.ClusterConfigFetcher.Path, + Message: "ACME enabled in config but not functional: " + localPathIssue.Error(), + } + return []*Result{&ret}, nil + } + + ret := Result{ + Status: ResultOK, + Endpoint: h.ClusterConfigFetcher.Path, + Message: "ACME enabled and successfully connected to the ACME directory.", + } + return []*Result{&ret}, nil +} diff --git a/command/healthcheck/pki_enable_auto_tidy.go b/command/healthcheck/pki_enable_auto_tidy.go new file mode 100644 index 0000000..1734d1a --- /dev/null +++ b/command/healthcheck/pki_enable_auto_tidy.go @@ -0,0 +1,197 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package healthcheck + +import ( + "fmt" + "time" + + "github.com/hashicorp/vault/sdk/logical" + + "github.com/hashicorp/go-secure-stdlib/parseutil" +) + +type EnableAutoTidy struct { + Enabled bool + UnsupportedVersion bool + + IntervalDurationCritical time.Duration + IntervalDurationWarning time.Duration + PauseDurationCritical time.Duration + PauseDurationWarning time.Duration + + TidyConfig *PathFetch +} + +func NewEnableAutoTidyCheck() Check { + return &EnableAutoTidy{} +} + +func (h *EnableAutoTidy) Name() string { + return "enable_auto_tidy" +} + +func (h *EnableAutoTidy) IsEnabled() bool { + return h.Enabled +} + +func (h *EnableAutoTidy) DefaultConfig() map[string]interface{} { + return map[string]interface{}{ + "interval_duration_critical": "7d", + "interval_duration_warning": "2d", + "pause_duration_critical": "1s", + "pause_duration_warning": "200ms", + } +} + +func (h *EnableAutoTidy) fromConfig(config map[string]interface{}, param string) (time.Duration, error) { + value, err := parseutil.ParseDurationSecond(config[param]) + if err != nil { + return time.Duration(0), fmt.Errorf("failed to parse parameter %v.%v=%v: %w", h.Name(), param, config[param], err) + } + + return value, nil +} + +func (h *EnableAutoTidy) LoadConfig(config map[string]interface{}) error { + var err error + + h.IntervalDurationCritical, err = h.fromConfig(config, "interval_duration_critical") + if err != nil { + return err + } + + h.IntervalDurationWarning, err = h.fromConfig(config, "interval_duration_warning") + if err != nil { + return err + } + + h.PauseDurationCritical, err = h.fromConfig(config, "pause_duration_critical") + if err != nil { + return err + } + + h.PauseDurationWarning, err = h.fromConfig(config, "pause_duration_warning") + if err != nil { + return err + } + + enabled, err := parseutil.ParseBool(config["enabled"]) + if err != nil { + return fmt.Errorf("error parsing %v.enabled: %w", h.Name(), err) + } + h.Enabled = enabled + + return nil +} + +func (h *EnableAutoTidy) FetchResources(e *Executor) error { + var err error + h.TidyConfig, err = e.FetchIfNotFetched(logical.ReadOperation, "/{{mount}}/config/auto-tidy") + if err != nil { + return err + } + + if h.TidyConfig.IsUnsupportedPathError() { + h.UnsupportedVersion = true + } + + return nil +} + +func (h *EnableAutoTidy) Evaluate(e *Executor) (results []*Result, err error) { + if h.UnsupportedVersion { + ret := Result{ + Status: ResultInvalidVersion, + Endpoint: "/{{mount}}/config/auto-tidy", + Message: "This health check requires Vault 1.12+, but an earlier version of Vault Server was contacted, preventing this health check from running.", + } + return []*Result{&ret}, nil + } + + if h.TidyConfig == nil { + return + } + + if h.TidyConfig.IsSecretPermissionsError() { + ret := Result{ + Status: ResultInsufficientPermissions, + Endpoint: "/{{mount}}/config/auto-tidy", + Message: "This prevents the health check from functioning at all, as it cannot .", + } + + if e.Client.Token() == "" { + ret.Message = "No token available so unable read authenticated auto-tidy configuration for this mount. " + ret.Message + } else { + ret.Message = "This token lacks permission to read the auto-tidy configuration for this mount. " + ret.Message + } + + return []*Result{&ret}, nil + } + + isEnabled := h.TidyConfig.Secret.Data["enabled"].(bool) + intervalDuration, err := parseutil.ParseDurationSecond(h.TidyConfig.Secret.Data["interval_duration"]) + if err != nil { + return nil, fmt.Errorf("error parsing API response from server for interval_duration: %w", err) + } + + pauseDuration, err := parseutil.ParseDurationSecond(h.TidyConfig.Secret.Data["pause_duration"]) + if err != nil { + return nil, fmt.Errorf("error parsing API response from server for pause_duration: %w", err) + } + + if !isEnabled { + ret := Result{ + Status: ResultInformational, + Endpoint: "/{{mount}}/config/auto-tidy", + Message: "Auto-tidy is currently disabled; consider enabling auto-tidy to execute tidy operations periodically. This helps the health and performance of a mount.", + } + results = append(results, &ret) + } else { + baseMsg := "Auto-tidy is configured with too long of a value for %v (%v); this could impact performance as tidies run too infrequently or take too long to execute." + + if intervalDuration >= h.IntervalDurationCritical { + ret := Result{ + Status: ResultCritical, + Endpoint: "/{{mount}}/config/auto-tidy", + Message: fmt.Sprintf(baseMsg, "interval_duration", intervalDuration), + } + results = append(results, &ret) + } else if intervalDuration >= h.IntervalDurationWarning { + ret := Result{ + Status: ResultWarning, + Endpoint: "/{{mount}}/config/auto-tidy", + Message: fmt.Sprintf(baseMsg, "interval_duration", intervalDuration), + } + results = append(results, &ret) + } + + if pauseDuration >= h.PauseDurationCritical { + ret := Result{ + Status: ResultCritical, + Endpoint: "/{{mount}}/config/auto-tidy", + Message: fmt.Sprintf(baseMsg, "pause_duration", pauseDuration), + } + results = append(results, &ret) + } else if pauseDuration >= h.PauseDurationWarning { + ret := Result{ + Status: ResultWarning, + Endpoint: "/{{mount}}/config/auto-tidy", + Message: fmt.Sprintf(baseMsg, "pause_duration", pauseDuration), + } + results = append(results, &ret) + } + + if len(results) == 0 { + ret := Result{ + Status: ResultOK, + Endpoint: "/{{mount}}/config/auto-tidy", + Message: "Auto-tidy is enabled and configured appropriately.", + } + results = append(results, &ret) + } + } + + return +} diff --git a/command/healthcheck/pki_hardware_backed_root.go b/command/healthcheck/pki_hardware_backed_root.go new file mode 100644 index 0000000..2fdda6e --- /dev/null +++ b/command/healthcheck/pki_hardware_backed_root.go @@ -0,0 +1,158 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package healthcheck + +import ( + "bytes" + "crypto/x509" + "fmt" + + "github.com/hashicorp/go-secure-stdlib/parseutil" +) + +type HardwareBackedRoot struct { + Enabled bool + + UnsupportedVersion bool + + FetchIssues map[string]*PathFetch + IssuerKeyMap map[string]string + KeyIsManaged map[string]string +} + +func NewHardwareBackedRootCheck() Check { + return &HardwareBackedRoot{ + FetchIssues: make(map[string]*PathFetch), + IssuerKeyMap: make(map[string]string), + KeyIsManaged: make(map[string]string), + } +} + +func (h *HardwareBackedRoot) Name() string { + return "hardware_backed_root" +} + +func (h *HardwareBackedRoot) IsEnabled() bool { + return h.Enabled +} + +func (h *HardwareBackedRoot) DefaultConfig() map[string]interface{} { + return map[string]interface{}{ + "enabled": false, + } +} + +func (h *HardwareBackedRoot) LoadConfig(config map[string]interface{}) error { + enabled, err := parseutil.ParseBool(config["enabled"]) + if err != nil { + return fmt.Errorf("error parsing %v.enabled: %w", h.Name(), err) + } + h.Enabled = enabled + + return nil +} + +func (h *HardwareBackedRoot) FetchResources(e *Executor) error { + exit, _, issuers, err := pkiFetchIssuersList(e, func() { + h.UnsupportedVersion = true + }) + if exit || err != nil { + return err + } + + for _, issuer := range issuers { + skip, ret, entry, err := pkiFetchIssuerEntry(e, issuer, func() { + h.UnsupportedVersion = true + }) + if skip || err != nil || entry == nil { + if err != nil { + return err + } + h.FetchIssues[issuer] = ret + continue + } + + // Ensure we only check Root CAs. + cert := ret.ParsedCache["certificate"].(*x509.Certificate) + if !bytes.Equal(cert.RawSubject, cert.RawIssuer) { + continue + } + if err := cert.CheckSignatureFrom(cert); err != nil { + continue + } + + // Ensure we only check issuers with keys. + keyId, present := entry["key_id"].(string) + if !present || len(keyId) == 0 { + continue + } + + h.IssuerKeyMap[issuer] = keyId + skip, ret, keyEntry, err := pkiFetchKeyEntry(e, keyId, func() { + h.UnsupportedVersion = true + }) + if skip || err != nil || keyEntry == nil { + if err != nil { + return err + } + + h.FetchIssues[issuer] = ret + continue + } + + uuid, present := keyEntry["managed_key_id"].(string) + if present { + h.KeyIsManaged[keyId] = uuid + } + } + + return nil +} + +func (h *HardwareBackedRoot) Evaluate(e *Executor) (results []*Result, err error) { + if h.UnsupportedVersion { + ret := Result{ + Status: ResultInvalidVersion, + Endpoint: "/{{mount}}/issuers", + Message: "This health check requires Vault 1.11+ but an earlier version of Vault Server was contacted, preventing this health check from running.", + } + return []*Result{&ret}, nil + } + + for issuer, fetchPath := range h.FetchIssues { + if fetchPath != nil && fetchPath.IsSecretPermissionsError() { + delete(h.IssuerKeyMap, issuer) + ret := Result{ + Status: ResultInsufficientPermissions, + Endpoint: fetchPath.Path, + Message: "Without this information, this health check is unable to function.", + } + + if e.Client.Token() == "" { + ret.Message = "No token available so unable for the endpoint for this mount. " + ret.Message + } else { + ret.Message = "This token lacks permission for the endpoint for this mount. " + ret.Message + } + + results = append(results, &ret) + } + } + + for name, keyId := range h.IssuerKeyMap { + var ret Result + ret.Status = ResultInformational + ret.Endpoint = "/{{mount}}/issuer/" + name + ret.Message = "Root issuer was created using Vault-backed software keys; for added safety of long-lived, important root CAs, you may wish to consider using a HSM or KSM Managed Key to store key material for this issuer." + + uuid, present := h.KeyIsManaged[keyId] + if present { + ret.Status = ResultOK + ret.Message = fmt.Sprintf("Root issuer was backed by a HSM or KMS Managed Key: %v.", uuid) + } + + results = append(results, &ret) + } + + return +} diff --git a/command/healthcheck/pki_role_allows_glob_wildcards.go b/command/healthcheck/pki_role_allows_glob_wildcards.go new file mode 100644 index 0000000..83c55c2 --- /dev/null +++ b/command/healthcheck/pki_role_allows_glob_wildcards.go @@ -0,0 +1,186 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package healthcheck + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-secure-stdlib/parseutil" +) + +type RoleAllowsGlobWildcards struct { + Enabled bool + UnsupportedVersion bool + + RoleListFetchIssue *PathFetch + RoleFetchIssues map[string]*PathFetch + RoleEntryMap map[string]map[string]interface{} +} + +func NewRoleAllowsGlobWildcardsCheck() Check { + return &RoleAllowsGlobWildcards{ + RoleFetchIssues: make(map[string]*PathFetch), + RoleEntryMap: make(map[string]map[string]interface{}), + } +} + +func (h *RoleAllowsGlobWildcards) Name() string { + return "role_allows_glob_wildcards" +} + +func (h *RoleAllowsGlobWildcards) IsEnabled() bool { + return h.Enabled +} + +func (h *RoleAllowsGlobWildcards) DefaultConfig() map[string]interface{} { + return map[string]interface{}{} +} + +func (h *RoleAllowsGlobWildcards) LoadConfig(config map[string]interface{}) error { + enabled, err := parseutil.ParseBool(config["enabled"]) + if err != nil { + return fmt.Errorf("error parsing %v.enabled: %w", h.Name(), err) + } + h.Enabled = enabled + + return nil +} + +func (h *RoleAllowsGlobWildcards) FetchResources(e *Executor) error { + exit, f, roles, err := pkiFetchRolesList(e, func() { + h.UnsupportedVersion = true + }) + if exit || err != nil { + if f != nil && f.IsSecretPermissionsError() { + h.RoleListFetchIssue = f + } + return err + } + + for _, role := range roles { + skip, f, entry, err := pkiFetchRole(e, role, func() { + h.UnsupportedVersion = true + }) + if skip || err != nil || entry == nil { + if f != nil && f.IsSecretPermissionsError() { + h.RoleFetchIssues[role] = f + } + if err != nil { + return err + } + continue + } + + h.RoleEntryMap[role] = entry + } + + return nil +} + +func (h *RoleAllowsGlobWildcards) Evaluate(e *Executor) (results []*Result, err error) { + if h.UnsupportedVersion { + // Shouldn't happen; roles have been around forever. + ret := Result{ + Status: ResultInvalidVersion, + Endpoint: "/{{mount}}/roles", + Message: "This health check requires Vault 1.11+ but an earlier version of Vault Server was contacted, preventing this health check from running.", + } + return []*Result{&ret}, nil + } + if h.RoleListFetchIssue != nil && h.RoleListFetchIssue.IsSecretPermissionsError() { + ret := Result{ + Status: ResultInsufficientPermissions, + Endpoint: h.RoleListFetchIssue.Path, + Message: "lacks permission either to list the roles. This restricts the ability to fully execute this health check.", + } + if e.Client.Token() == "" { + ret.Message = "No token available and so this health check " + ret.Message + } else { + ret.Message = "This token " + ret.Message + } + return []*Result{&ret}, nil + } + + for role, fetchPath := range h.RoleFetchIssues { + if fetchPath != nil && fetchPath.IsSecretPermissionsError() { + delete(h.RoleEntryMap, role) + ret := Result{ + Status: ResultInsufficientPermissions, + Endpoint: fetchPath.Path, + Message: "Without this information, this health check is unable to function.", + } + + if e.Client.Token() == "" { + ret.Message = "No token available so unable for the endpoint for this mount. " + ret.Message + } else { + ret.Message = "This token lacks permission the endpoint for this mount. " + ret.Message + } + + results = append(results, &ret) + } + } + + for role, entry := range h.RoleEntryMap { + allowsWildcards, present := entry["allow_wildcard_certificates"] + if !present { + ret := Result{ + Status: ResultInvalidVersion, + Endpoint: "/{{mount}}/roles", + Message: "This health check requires a version of Vault with allow_wildcard_certificates (Vault 1.8.9+, 1.9.4+, or 1.10.0+), but an earlier version of Vault Server was contacted, preventing this health check from running.", + } + return []*Result{&ret}, nil + } + if !allowsWildcards.(bool) { + continue + } + + allowsGlobs := entry["allow_glob_domains"].(bool) + if !allowsGlobs { + continue + } + + rawAllowedDomains := entry["allowed_domains"].([]interface{}) + var allowedDomains []string + for _, rawDomain := range rawAllowedDomains { + allowedDomains = append(allowedDomains, rawDomain.(string)) + } + + if len(allowedDomains) == 0 { + continue + } + + hasGlobbedDomain := false + for _, domain := range allowedDomains { + if strings.Contains(domain, "*") { + hasGlobbedDomain = true + break + } + } + + if !hasGlobbedDomain { + continue + } + + ret := Result{ + Status: ResultWarning, + Endpoint: "/{{mount}}/roles/" + role, + Message: fmt.Sprintf("Role currently allows wildcard issuance while allowing globs in allowed_domains (%v). Because globs can expand to one or more wildcard character, including wildcards under additional subdomains, these options are dangerous to enable together. If glob domains are required to be enabled, it is suggested to either disable wildcard issuance if not desired, or create two separate roles -- one with wildcard issuance for specified domains and one with glob matching enabled for concrete domain identifiers.", allowedDomains), + } + + results = append(results, &ret) + } + + if len(results) == 0 && len(h.RoleEntryMap) > 0 { + ret := Result{ + Status: ResultOK, + Endpoint: "/{{mount}}/roles", + Message: "Roles follow best practices regarding restricting wildcard certificate issuance in roles.", + } + + results = append(results, &ret) + } + + return +} diff --git a/command/healthcheck/pki_role_allows_localhost.go b/command/healthcheck/pki_role_allows_localhost.go new file mode 100644 index 0000000..0c9b780 --- /dev/null +++ b/command/healthcheck/pki_role_allows_localhost.go @@ -0,0 +1,161 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package healthcheck + +import ( + "fmt" + + "github.com/hashicorp/go-secure-stdlib/parseutil" +) + +type RoleAllowsLocalhost struct { + Enabled bool + UnsupportedVersion bool + + RoleListFetchIssue *PathFetch + RoleFetchIssues map[string]*PathFetch + RoleEntryMap map[string]map[string]interface{} +} + +func NewRoleAllowsLocalhostCheck() Check { + return &RoleAllowsLocalhost{ + RoleFetchIssues: make(map[string]*PathFetch), + RoleEntryMap: make(map[string]map[string]interface{}), + } +} + +func (h *RoleAllowsLocalhost) Name() string { + return "role_allows_localhost" +} + +func (h *RoleAllowsLocalhost) IsEnabled() bool { + return h.Enabled +} + +func (h *RoleAllowsLocalhost) DefaultConfig() map[string]interface{} { + return map[string]interface{}{} +} + +func (h *RoleAllowsLocalhost) LoadConfig(config map[string]interface{}) error { + enabled, err := parseutil.ParseBool(config["enabled"]) + if err != nil { + return fmt.Errorf("error parsing %v.enabled: %w", h.Name(), err) + } + h.Enabled = enabled + + return nil +} + +func (h *RoleAllowsLocalhost) FetchResources(e *Executor) error { + exit, f, roles, err := pkiFetchRolesList(e, func() { + h.UnsupportedVersion = true + }) + if exit || err != nil { + if f != nil && f.IsSecretPermissionsError() { + h.RoleListFetchIssue = f + } + return err + } + + for _, role := range roles { + skip, f, entry, err := pkiFetchRole(e, role, func() { + h.UnsupportedVersion = true + }) + if skip || err != nil || entry == nil { + if f != nil && f.IsSecretPermissionsError() { + h.RoleFetchIssues[role] = f + } + if err != nil { + return err + } + continue + } + + h.RoleEntryMap[role] = entry + } + + return nil +} + +func (h *RoleAllowsLocalhost) Evaluate(e *Executor) (results []*Result, err error) { + if h.UnsupportedVersion { + // Shouldn't happen; roles have been around forever. + ret := Result{ + Status: ResultInvalidVersion, + Endpoint: "/{{mount}}/roles", + Message: "This health check requires Vault 1.11+ but an earlier version of Vault Server was contacted, preventing this health check from running.", + } + return []*Result{&ret}, nil + } + + if h.RoleListFetchIssue != nil && h.RoleListFetchIssue.IsSecretPermissionsError() { + ret := Result{ + Status: ResultInsufficientPermissions, + Endpoint: h.RoleListFetchIssue.Path, + Message: "lacks permission either to list the roles. This restricts the ability to fully execute this health check.", + } + if e.Client.Token() == "" { + ret.Message = "No token available and so this health check " + ret.Message + } else { + ret.Message = "This token " + ret.Message + } + return []*Result{&ret}, nil + } + + for role, fetchPath := range h.RoleFetchIssues { + if fetchPath != nil && fetchPath.IsSecretPermissionsError() { + delete(h.RoleEntryMap, role) + ret := Result{ + Status: ResultInsufficientPermissions, + Endpoint: fetchPath.Path, + Message: "Without this information, this health check is unable to function.", + } + + if e.Client.Token() == "" { + ret.Message = "No token available so unable for the endpoint for this mount. " + ret.Message + } else { + ret.Message = "This token lacks permission the endpoint for this mount. " + ret.Message + } + + results = append(results, &ret) + } + } + + for role, entry := range h.RoleEntryMap { + allowsLocalhost := entry["allow_localhost"].(bool) + if !allowsLocalhost { + continue + } + + rawAllowedDomains := entry["allowed_domains"].([]interface{}) + var allowedDomains []string + for _, rawDomain := range rawAllowedDomains { + allowedDomains = append(allowedDomains, rawDomain.(string)) + } + + if len(allowedDomains) == 0 { + continue + } + + ret := Result{ + Status: ResultWarning, + Endpoint: "/{{mount}}/roles/" + role, + Message: fmt.Sprintf("Role currently allows localhost issuance with a non-empty allowed_domains (%v): this role is intended for issuing other hostnames and the allow_localhost=true option may be overlooked by operators. If this role is intended to issue certificates valid for localhost, consider setting allow_localhost=false and explicitly adding localhost to the list of allowed domains.", allowedDomains), + } + + results = append(results, &ret) + } + + if len(results) == 0 && len(h.RoleEntryMap) > 0 { + ret := Result{ + Status: ResultOK, + Endpoint: "/{{mount}}/roles", + Message: "Roles follow best practices regarding allowing issuance for localhost domains.", + } + + results = append(results, &ret) + } + + return +} diff --git a/command/healthcheck/pki_role_no_store_false.go b/command/healthcheck/pki_role_no_store_false.go new file mode 100644 index 0000000..882955e --- /dev/null +++ b/command/healthcheck/pki_role_no_store_false.go @@ -0,0 +1,200 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package healthcheck + +import ( + "fmt" + + "github.com/hashicorp/vault/sdk/logical" + + "github.com/hashicorp/go-secure-stdlib/parseutil" +) + +type RoleNoStoreFalse struct { + Enabled bool + UnsupportedVersion bool + + AllowedRoles map[string]bool + + RoleListFetchIssue *PathFetch + RoleFetchIssues map[string]*PathFetch + RoleEntryMap map[string]map[string]interface{} + CRLConfig *PathFetch +} + +func NewRoleNoStoreFalseCheck() Check { + return &RoleNoStoreFalse{ + RoleFetchIssues: make(map[string]*PathFetch), + AllowedRoles: make(map[string]bool), + RoleEntryMap: make(map[string]map[string]interface{}), + } +} + +func (h *RoleNoStoreFalse) Name() string { + return "role_no_store_false" +} + +func (h *RoleNoStoreFalse) IsEnabled() bool { + return h.Enabled +} + +func (h *RoleNoStoreFalse) DefaultConfig() map[string]interface{} { + return map[string]interface{}{ + "allowed_roles": []string{}, + } +} + +func (h *RoleNoStoreFalse) LoadConfig(config map[string]interface{}) error { + value, present := config["allowed_roles"].([]interface{}) + if present { + for _, rawValue := range value { + h.AllowedRoles[rawValue.(string)] = true + } + } + + enabled, err := parseutil.ParseBool(config["enabled"]) + if err != nil { + return fmt.Errorf("error parsing %v.enabled: %w", h.Name(), err) + } + h.Enabled = enabled + + return nil +} + +func (h *RoleNoStoreFalse) FetchResources(e *Executor) error { + exit, f, roles, err := pkiFetchRolesList(e, func() { + h.UnsupportedVersion = true + }) + if exit || err != nil { + if f != nil && f.IsSecretPermissionsError() { + h.RoleListFetchIssue = f + } + return err + } + + for _, role := range roles { + skip, f, entry, err := pkiFetchRole(e, role, func() { + h.UnsupportedVersion = true + }) + if skip || err != nil || entry == nil { + if f != nil && f.IsSecretPermissionsError() { + h.RoleFetchIssues[role] = f + } + if err != nil { + return err + } + continue + } + + h.RoleEntryMap[role] = entry + } + + // Check if the issuer is fetched yet. + configRet, err := e.FetchIfNotFetched(logical.ReadOperation, "/{{mount}}/config/crl") + if err != nil { + return err + } + + h.CRLConfig = configRet + + return nil +} + +func (h *RoleNoStoreFalse) Evaluate(e *Executor) (results []*Result, err error) { + if h.UnsupportedVersion { + // Shouldn't happen; roles have been around forever. + ret := Result{ + Status: ResultInvalidVersion, + Endpoint: "/{{mount}}/roles", + Message: "This health check requires Vault 1.11+ but an earlier version of Vault Server was contacted, preventing this health check from running.", + } + return []*Result{&ret}, nil + } + + if h.RoleListFetchIssue != nil && h.RoleListFetchIssue.IsSecretPermissionsError() { + ret := Result{ + Status: ResultInsufficientPermissions, + Endpoint: h.RoleListFetchIssue.Path, + Message: "lacks permission either to list the roles. This restricts the ability to fully execute this health check.", + } + if e.Client.Token() == "" { + ret.Message = "No token available and so this health check " + ret.Message + } else { + ret.Message = "This token " + ret.Message + } + return []*Result{&ret}, nil + } + + for role, fetchPath := range h.RoleFetchIssues { + if fetchPath != nil && fetchPath.IsSecretPermissionsError() { + delete(h.RoleEntryMap, role) + ret := Result{ + Status: ResultInsufficientPermissions, + Endpoint: fetchPath.Path, + Message: "Without this information, this health check is unable to function.", + } + + if e.Client.Token() == "" { + ret.Message = "No token available so unable for the endpoint for this mount. " + ret.Message + } else { + ret.Message = "This token lacks permission the endpoint for this mount. " + ret.Message + } + + results = append(results, &ret) + } + } + + crlAutoRebuild := false + if h.CRLConfig != nil { + if h.CRLConfig.IsSecretPermissionsError() { + ret := Result{ + Status: ResultInsufficientPermissions, + Endpoint: "/{{mount}}/config/crl", + Message: "This prevents the health check from seeing if the CRL is set to auto_rebuild=true and lowering the severity of check results appropriately.", + } + + if e.Client.Token() == "" { + ret.Message = "No token available so unable read authenticated CRL configuration for this mount. " + ret.Message + } else { + ret.Message = "This token lacks so permission to read the CRL configuration for this mount. " + ret.Message + } + + results = append(results, &ret) + } else if h.CRLConfig.Secret != nil && h.CRLConfig.Secret.Data["auto_rebuild"] != nil { + crlAutoRebuild = h.CRLConfig.Secret.Data["auto_rebuild"].(bool) + } + } + + for role, entry := range h.RoleEntryMap { + noStore := entry["no_store"].(bool) + if noStore { + continue + } + + ret := Result{ + Status: ResultWarning, + Endpoint: "/{{mount}}/roles/" + role, + Message: "Role currently stores every issued certificate (no_store=false). Too many issued and/or revoked certificates can exceed Vault's storage limits and make operations slow. It is encouraged to enable auto-rebuild of CRLs to prevent every revocation from creating a new CRL, and to limit the number of certificates issued under roles with no_store=false: use shorter lifetimes and/or BYOC revocation instead.", + } + + if crlAutoRebuild { + ret.Status = ResultInformational + ret.Message = "Role currently stores every issued certificate (no_store=false). With auto-rebuild CRL enabled, less performance impact occur on CRL rebuilding, but note that too many issued and/or revoked certificates can exceed Vault's storage limits and make operations slow. It is suggested to limit the number of certificates issued under roles with no_store=false: use shorter lifetimes to avoid revocation and/or BYOC revocation instead." + } + + results = append(results, &ret) + } + + if len(results) == 0 && len(h.RoleEntryMap) > 0 { + ret := Result{ + Status: ResultOK, + Endpoint: "/{{mount}}/roles", + Message: "Roles follow best practices regarding certificate storage.", + } + + results = append(results, &ret) + } + + return +} diff --git a/command/healthcheck/pki_root_issued_leaves.go b/command/healthcheck/pki_root_issued_leaves.go new file mode 100644 index 0000000..615684b --- /dev/null +++ b/command/healthcheck/pki_root_issued_leaves.go @@ -0,0 +1,205 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package healthcheck + +import ( + "bytes" + "crypto/x509" + "fmt" + + "github.com/hashicorp/go-secure-stdlib/parseutil" +) + +type RootIssuedLeaves struct { + Enabled bool + UnsupportedVersion bool + + CertsToFetch int + + FetchIssues map[string]*PathFetch + RootCertMap map[string]*x509.Certificate + LeafCertMap map[string]*x509.Certificate +} + +func NewRootIssuedLeavesCheck() Check { + return &RootIssuedLeaves{ + FetchIssues: make(map[string]*PathFetch), + RootCertMap: make(map[string]*x509.Certificate), + LeafCertMap: make(map[string]*x509.Certificate), + } +} + +func (h *RootIssuedLeaves) Name() string { + return "root_issued_leaves" +} + +func (h *RootIssuedLeaves) IsEnabled() bool { + return h.Enabled +} + +func (h *RootIssuedLeaves) DefaultConfig() map[string]interface{} { + return map[string]interface{}{ + "certs_to_fetch": 100, + } +} + +func (h *RootIssuedLeaves) LoadConfig(config map[string]interface{}) error { + count, err := parseutil.SafeParseIntRange(config["certs_to_fetch"], 1, 100000) + if err != nil { + return fmt.Errorf("error parsing %v.certs_to_fetch: %w", h.Name(), err) + } + h.CertsToFetch = int(count) + + enabled, err := parseutil.ParseBool(config["enabled"]) + if err != nil { + return fmt.Errorf("error parsing %v.enabled: %w", h.Name(), err) + } + h.Enabled = enabled + + return nil +} + +func (h *RootIssuedLeaves) FetchResources(e *Executor) error { + exit, _, issuers, err := pkiFetchIssuersList(e, func() { + h.UnsupportedVersion = true + }) + if exit || err != nil { + return err + } + + for _, issuer := range issuers { + skip, pathFetch, cert, err := pkiFetchIssuer(e, issuer, func() { + h.UnsupportedVersion = true + }) + h.FetchIssues[issuer] = pathFetch + if skip || err != nil { + if err != nil { + return err + } + continue + } + + // Ensure we only check Root CAs. + if !bytes.Equal(cert.RawSubject, cert.RawIssuer) { + continue + } + if err := cert.CheckSignatureFrom(cert); err != nil { + continue + } + + h.RootCertMap[issuer] = cert + } + + exit, f, leaves, err := pkiFetchLeavesList(e, func() { + h.UnsupportedVersion = true + }) + if exit || err != nil { + if f != nil && f.IsSecretPermissionsError() { + for _, issuer := range issuers { + h.FetchIssues[issuer] = f + } + } + return err + } + + var leafCount int + for _, serial := range leaves { + if leafCount >= h.CertsToFetch { + break + } + + skip, _, cert, err := pkiFetchLeaf(e, serial, func() { + h.UnsupportedVersion = true + }) + if skip || err != nil { + if err != nil { + return err + } + continue + } + + // Ignore other CAs. + if cert.BasicConstraintsValid && cert.IsCA { + continue + } + + leafCount += 1 + h.LeafCertMap[serial] = cert + } + + return nil +} + +func (h *RootIssuedLeaves) Evaluate(e *Executor) (results []*Result, err error) { + if h.UnsupportedVersion { + ret := Result{ + Status: ResultInvalidVersion, + Endpoint: "/{{mount}}/issuers", + Message: "This health check requires Vault 1.11+ but an earlier version of Vault Server was contacted, preventing this health check from running.", + } + return []*Result{&ret}, nil + } + + for issuer, fetchPath := range h.FetchIssues { + if fetchPath != nil && fetchPath.IsSecretPermissionsError() { + delete(h.RootCertMap, issuer) + ret := Result{ + Status: ResultInsufficientPermissions, + Endpoint: fetchPath.Path, + Message: "Without this information, this health check is unable to function.", + } + + if e.Client.Token() == "" { + ret.Message = "No token available so unable for the endpoint for this mount. " + ret.Message + } else { + ret.Message = "This token lacks permission for the endpoint for this mount. " + ret.Message + } + + results = append(results, &ret) + } + } + + issuerHasLeaf := make(map[string]bool) + for serial, leaf := range h.LeafCertMap { + if len(issuerHasLeaf) == len(h.RootCertMap) { + break + } + + for issuer, root := range h.RootCertMap { + if issuerHasLeaf[issuer] { + continue + } + + if !bytes.Equal(leaf.RawIssuer, root.RawSubject) { + continue + } + + if err := leaf.CheckSignatureFrom(root); err != nil { + continue + } + + ret := Result{ + Status: ResultWarning, + Endpoint: "/{{mount}}/issuer/" + issuer, + Message: fmt.Sprintf("Root issuer has directly issued non-CA leaf certificates (%v) instead of via an intermediate CA. This can make rotating the root CA harder as direct cross-signing of the roots must be used, rather than cross-signing of the intermediates. It is encouraged to set up and use an intermediate CA and tidy the mount when all directly issued leaves have expired.", serial), + } + + issuerHasLeaf[issuer] = true + + results = append(results, &ret) + } + } + + if len(results) == 0 && len(h.RootCertMap) > 0 { + ret := Result{ + Status: ResultOK, + Endpoint: "/{{mount}}/certs", + Message: "Root certificate(s) in this mount have not directly issued non-CA leaf certificates.", + } + + results = append(results, &ret) + } + + return +} diff --git a/command/healthcheck/pki_tidy_last_run.go b/command/healthcheck/pki_tidy_last_run.go new file mode 100644 index 0000000..9d07369 --- /dev/null +++ b/command/healthcheck/pki_tidy_last_run.go @@ -0,0 +1,147 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package healthcheck + +import ( + "fmt" + "time" + + "github.com/hashicorp/vault/sdk/logical" + + "github.com/hashicorp/go-secure-stdlib/parseutil" +) + +type TidyLastRun struct { + Enabled bool + UnsupportedVersion bool + + LastRunCritical time.Duration + LastRunWarning time.Duration + + TidyStatus *PathFetch +} + +func NewTidyLastRunCheck() Check { + return &TidyLastRun{} +} + +func (h *TidyLastRun) Name() string { + return "tidy_last_run" +} + +func (h *TidyLastRun) IsEnabled() bool { + return h.Enabled +} + +func (h *TidyLastRun) DefaultConfig() map[string]interface{} { + return map[string]interface{}{ + "last_run_critical": "7d", + "last_run_warning": "2d", + } +} + +func (h *TidyLastRun) LoadConfig(config map[string]interface{}) error { + var err error + h.LastRunCritical, err = parseutil.ParseDurationSecond(config["last_run_critical"]) + if err != nil { + return fmt.Errorf("failed to parse parameter %v.%v=%v: %w", h.Name(), "last_run_critical", config["last_run_critical"], err) + } + + h.LastRunWarning, err = parseutil.ParseDurationSecond(config["last_run_warning"]) + if err != nil { + return fmt.Errorf("failed to parse parameter %v.%v=%v: %w", h.Name(), "last_run_warning", config["last_run_warning"], err) + } + + enabled, err := parseutil.ParseBool(config["enabled"]) + if err != nil { + return fmt.Errorf("error parsing %v.enabled: %w", h.Name(), err) + } + h.Enabled = enabled + + return nil +} + +func (h *TidyLastRun) FetchResources(e *Executor) error { + var err error + + h.TidyStatus, err = e.FetchIfNotFetched(logical.ReadOperation, "/{{mount}}/tidy-status") + if err != nil { + return fmt.Errorf("failed to fetch mount's tidy-status value: %v", err) + } + + if h.TidyStatus.IsUnsupportedPathError() { + h.UnsupportedVersion = true + } + + return nil +} + +func (h *TidyLastRun) Evaluate(e *Executor) (results []*Result, err error) { + if h.UnsupportedVersion { + // Shouldn't happen; roles have been around forever. + ret := Result{ + Status: ResultInvalidVersion, + Endpoint: "/{{mount}}/tidy-status", + Message: "This health check requires Vault 1.10+ but an earlier version of Vault Server was contacted, preventing this health check from running.", + } + return []*Result{&ret}, nil + } + + if h.TidyStatus == nil { + return nil, nil + } + + if h.TidyStatus.IsSecretPermissionsError() { + ret := Result{ + Status: ResultInsufficientPermissions, + Endpoint: "/{{mount}}/tidy-status", + Message: "Without this information, this health check is unable to function.", + } + + if e.Client.Token() == "" { + ret.Message = "No token available so unable read tidy status endpoint for this mount. " + ret.Message + } else { + ret.Message = "This token lacks permission to read the tidy status endpoint for this mount. " + ret.Message + } + + results = append(results, &ret) + } + + baseMsg := "Tidy hasn't run in the last %v; this can point to problems with the mount's auto-tidy configuration or an external tidy executor; this can impact PKI's and Vault's performance if not run regularly." + + if h.TidyStatus.Secret != nil && h.TidyStatus.Secret.Data != nil { + ret := Result{ + Status: ResultOK, + Endpoint: "/{{mount}}/tidy-status", + Message: "Tidy has run recently on this mount.", + } + + when := h.TidyStatus.Secret.Data["time_finished"] + if when == nil { + ret.Status = ResultCritical + ret.Message = "Tidy hasn't run since this mount was created; this can point to problems with the mount's auto-tidy configuration or an external tidy executor; this can impact PKI's and Vault's performance if not run regularly. It is suggested to enable auto-tidy on this mount." + } else { + now := time.Now() + lastRunCritical := now.Add(-1 * h.LastRunCritical) + lastRunWarning := now.Add(-1 * h.LastRunWarning) + + whenT, err := parseutil.ParseAbsoluteTime(when) + if err != nil { + return nil, fmt.Errorf("error parsing time value (%v): %w", when, err) + } + + if whenT.Before(lastRunCritical) { + ret.Status = ResultCritical + ret.Message = fmt.Sprintf(baseMsg, h.LastRunCritical) + } else if whenT.Before(lastRunWarning) { + ret.Status = ResultWarning + ret.Message = fmt.Sprintf(baseMsg, h.LastRunWarning) + } + } + + results = append(results, &ret) + } + + return +} diff --git a/command/healthcheck/pki_too_many_certs.go b/command/healthcheck/pki_too_many_certs.go new file mode 100644 index 0000000..59722ab --- /dev/null +++ b/command/healthcheck/pki_too_many_certs.go @@ -0,0 +1,124 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package healthcheck + +import ( + "fmt" + + "github.com/hashicorp/go-secure-stdlib/parseutil" +) + +type TooManyCerts struct { + Enabled bool + UnsupportedVersion bool + + CountCritical int + CountWarning int + + CertCounts int + FetchIssue *PathFetch +} + +func NewTooManyCertsCheck() Check { + return &TooManyCerts{} +} + +func (h *TooManyCerts) Name() string { + return "too_many_certs" +} + +func (h *TooManyCerts) IsEnabled() bool { + return h.Enabled +} + +func (h *TooManyCerts) DefaultConfig() map[string]interface{} { + return map[string]interface{}{ + "count_critical": 250000, + "count_warning": 50000, + } +} + +func (h *TooManyCerts) LoadConfig(config map[string]interface{}) error { + value, err := parseutil.SafeParseIntRange(config["count_critical"], 1, 15000000) + if err != nil { + return fmt.Errorf("error parsing %v.count_critical: %w", h.Name(), err) + } + h.CountCritical = int(value) + + value, err = parseutil.SafeParseIntRange(config["count_warning"], 1, 15000000) + if err != nil { + return fmt.Errorf("error parsing %v.count_warning: %w", h.Name(), err) + } + h.CountWarning = int(value) + + h.Enabled, err = parseutil.ParseBool(config["enabled"]) + if err != nil { + return fmt.Errorf("error parsing %v.enabled: %w", h.Name(), err) + } + + return nil +} + +func (h *TooManyCerts) FetchResources(e *Executor) error { + exit, leavesRet, _, err := pkiFetchLeavesList(e, func() { + h.UnsupportedVersion = true + }) + h.FetchIssue = leavesRet + + if exit || err != nil { + return err + } + + h.CertCounts = leavesRet.ParsedCache["count"].(int) + + return nil +} + +func (h *TooManyCerts) Evaluate(e *Executor) (results []*Result, err error) { + if h.UnsupportedVersion { + // Shouldn't happen; /certs has been around forever. + ret := Result{ + Status: ResultInvalidVersion, + Endpoint: "/{{mount}}/certs", + Message: "This health check requires Vault 1.11+ but an earlier version of Vault Server was contacted, preventing this health check from running.", + } + return []*Result{&ret}, nil + } + + if h.FetchIssue != nil && h.FetchIssue.IsSecretPermissionsError() { + ret := Result{ + Status: ResultInsufficientPermissions, + Endpoint: h.FetchIssue.Path, + Message: "Without this information, this health check is unable to function.", + } + + if e.Client.Token() == "" { + ret.Message = "No token available so unable to list the endpoint for this mount. " + ret.Message + } else { + ret.Message = "This token lacks permission to list the endpoint for this mount. " + ret.Message + } + + results = append(results, &ret) + return + } + + ret := Result{ + Status: ResultOK, + Endpoint: "/{{mount}}/certs", + Message: "This mount has an OK number of stored certificates.", + } + + baseMsg := "This PKI mount has %v outstanding stored certificates; consider using no_store=false on roles, running tidy operations periodically, and using shorter certificate lifetimes to reduce the storage pressure on this mount." + if h.CertCounts >= h.CountCritical { + ret.Status = ResultCritical + ret.Message = fmt.Sprintf(baseMsg, h.CertCounts) + } else if h.CertCounts >= h.CountWarning { + ret.Status = ResultWarning + ret.Message = fmt.Sprintf(baseMsg, h.CertCounts) + } + + results = append(results, &ret) + + return +} diff --git a/command/healthcheck/shared.go b/command/healthcheck/shared.go new file mode 100644 index 0000000..4097704 --- /dev/null +++ b/command/healthcheck/shared.go @@ -0,0 +1,58 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package healthcheck + +import ( + "fmt" + + "github.com/hashicorp/vault/sdk/logical" +) + +func StringList(source interface{}) ([]string, error) { + if source == nil { + return nil, nil + } + + if value, ok := source.([]string); ok { + return value, nil + } + + if rValues, ok := source.([]interface{}); ok { + var result []string + for index, rValue := range rValues { + value, ok := rValue.(string) + if !ok { + return nil, fmt.Errorf("unknown source type for []string coercion at index %v: %T", index, rValue) + } + + result = append(result, value) + } + + return result, nil + } + + return nil, fmt.Errorf("unknown source type for []string coercion: %T", source) +} + +func fetchMountTune(e *Executor, versionError func()) (bool, *PathFetch, map[string]interface{}, error) { + tuneRet, err := e.FetchIfNotFetched(logical.ReadOperation, "/sys/mounts/{{mount}}/tune") + if err != nil { + return true, nil, nil, fmt.Errorf("failed to fetch mount tune information: %w", err) + } + + if !tuneRet.IsSecretOK() { + if tuneRet.IsUnsupportedPathError() { + versionError() + } + + return true, tuneRet, nil, nil + } + + var data map[string]interface{} = nil + if len(tuneRet.Secret.Data) > 0 { + data = tuneRet.Secret.Data + } + + return false, tuneRet, data, nil +} diff --git a/command/healthcheck/util.go b/command/healthcheck/util.go new file mode 100644 index 0000000..dd5d66e --- /dev/null +++ b/command/healthcheck/util.go @@ -0,0 +1,42 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package healthcheck + +import ( + "fmt" + "time" +) + +var ( + oneDay = 24 * time.Hour + oneWeek = 7 * oneDay + oneMonth = 30 * oneDay + oneYear = 365 * oneDay +) + +var suffixDurationMap = map[string]time.Duration{ + "y": oneYear, + "mo": oneMonth, + "w": oneWeek, + "d": oneDay, +} +var orderedSuffixes = []string{"y", "mo", "w", "d"} + +func FormatDuration(d time.Duration) string { + var result string + for _, suffix := range orderedSuffixes { + unit := suffixDurationMap[suffix] + if d > unit { + quantity := int64(d / unit) + result = fmt.Sprintf("%v%v%v", quantity, suffix, result) + d = d - (time.Duration(quantity) * unit) + } + } + + if d > 0 { + result = d.String() + result + } + + return result +} diff --git a/command/kv.go b/command/kv.go new file mode 100644 index 0000000..b0834c7 --- /dev/null +++ b/command/kv.go @@ -0,0 +1,61 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + + "github.com/mitchellh/cli" +) + +var _ cli.Command = (*KVCommand)(nil) + +type KVCommand struct { + *BaseCommand +} + +func (c *KVCommand) Synopsis() string { + return "Interact with Vault's Key-Value storage" +} + +func (c *KVCommand) Help() string { + helpText := ` +Usage: vault kv [options] [args] + + This command has subcommands for interacting with Vault's key-value + store. Here are some simple examples, and more detailed examples are + available in the subcommands or the documentation. + + Create or update the key named "foo" in the "secret" mount with the value + "bar=baz": + + $ vault kv put -mount=secret foo bar=baz + + Read this value back: + + $ vault kv get -mount=secret foo + + Get metadata for the key: + + $ vault kv metadata get -mount=secret foo + + Get a specific version of the key: + + $ vault kv get -mount=secret -version=1 foo + + The deprecated path-like syntax can also be used, but this should be avoided + for KV v2, as the fact that it is not actually the full API path to + the secret (secret/data/foo) can cause confusion: + + $ vault kv get secret/foo + + Please see the individual subcommand help for detailed usage information. +` + + return strings.TrimSpace(helpText) +} + +func (c *KVCommand) Run(args []string) int { + return cli.RunResultHelp +} diff --git a/command/kv_delete.go b/command/kv_delete.go new file mode 100644 index 0000000..a365c89 --- /dev/null +++ b/command/kv_delete.go @@ -0,0 +1,209 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "path" + "strings" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*KVDeleteCommand)(nil) + _ cli.CommandAutocomplete = (*KVDeleteCommand)(nil) +) + +type KVDeleteCommand struct { + *BaseCommand + + flagVersions []string + flagMount string +} + +func (c *KVDeleteCommand) Synopsis() string { + return "Deletes versions in the KV store" +} + +func (c *KVDeleteCommand) Help() string { + helpText := ` +Usage: vault kv delete [options] PATH + + Deletes the data for the provided version and path in the key-value store. The + versioned data will not be fully removed, but marked as deleted and will no + longer be returned in normal get requests. + + To delete the latest version of the key "foo": + + $ vault kv delete -mount=secret foo + + The deprecated path-like syntax can also be used, but this should be avoided + for KV v2, as the fact that it is not actually the full API path to + the secret (secret/data/foo) can cause confusion: + + $ vault kv delete secret/foo + + To delete version 3 of key foo: + + $ vault kv delete -mount=secret -versions=3 foo + + To delete all versions and metadata, see the "vault kv metadata" subcommand. + + Additional flags and more advanced use cases are detailed below. + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *KVDeleteCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputField | FlagSetOutputFormat) + // Common Options + f := set.NewFlagSet("Common Options") + + f.StringSliceVar(&StringSliceVar{ + Name: "versions", + Target: &c.flagVersions, + Default: nil, + Usage: `Specifies the version numbers to delete.`, + }) + + f.StringVar(&StringVar{ + Name: "mount", + Target: &c.flagMount, + Default: "", // no default, because the handling of the next arg is determined by whether this flag has a value + Usage: `Specifies the path where the KV backend is mounted. If specified, + the next argument will be interpreted as the secret path. If this flag is + not specified, the next argument will be interpreted as the combined mount + path and secret path, with /data/ automatically appended between KV + v2 secrets.`, + }) + + return set +} + +func (c *KVDeleteCommand) AutocompleteArgs() complete.Predictor { + return c.PredictVaultFiles() +} + +func (c *KVDeleteCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *KVDeleteCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + // If true, we're working with "-mount=secret foo" syntax. + // If false, we're using "secret/foo" syntax. + mountFlagSyntax := c.flagMount != "" + + var ( + mountPath string + partialPath string + v2 bool + ) + + // Parse the paths and grab the KV version + if mountFlagSyntax { + // In this case, this arg is the secret path (e.g. "foo"). + partialPath = sanitizePath(args[0]) + mountPath, v2, err = isKVv2(sanitizePath(c.flagMount), client) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + if v2 { + partialPath = path.Join(mountPath, partialPath) + } + } else { + // In this case, this arg is a path-like combination of mountPath/secretPath. + // (e.g. "secret/foo") + partialPath = sanitizePath(args[0]) + mountPath, v2, err = isKVv2(partialPath, client) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + } + + var secret *api.Secret + var fullPath string + if v2 { + secret, err = c.deleteV2(partialPath, mountPath, client) + fullPath = addPrefixToKVPath(partialPath, mountPath, "data", false) + } else { + // v1 + if mountFlagSyntax { + fullPath = path.Join(mountPath, partialPath) + } else { + fullPath = partialPath + } + secret, err = client.Logical().Delete(fullPath) + } + + if err != nil { + c.UI.Error(fmt.Sprintf("Error deleting %s: %s", fullPath, err)) + if secret != nil { + OutputSecret(c.UI, secret) + } + return 2 + } + + if secret == nil { + // Don't output anything unless using the "table" format + if Format(c.UI) == "table" { + c.UI.Info(fmt.Sprintf("Success! Data deleted (if it existed) at: %s", fullPath)) + } + return 0 + } + + if c.flagField != "" { + return PrintRawField(c.UI, secret, c.flagField) + } + + return OutputSecret(c.UI, secret) +} + +func (c *KVDeleteCommand) deleteV2(path, mountPath string, client *api.Client) (*api.Secret, error) { + var err error + var secret *api.Secret + switch { + case len(c.flagVersions) > 0: + path = addPrefixToKVPath(path, mountPath, "delete", false) + data := map[string]interface{}{ + "versions": kvParseVersionsFlags(c.flagVersions), + } + secret, err = client.Logical().Write(path, data) + default: + path = addPrefixToKVPath(path, mountPath, "data", false) + secret, err = client.Logical().Delete(path) + } + + return secret, err +} diff --git a/command/kv_destroy.go b/command/kv_destroy.go new file mode 100644 index 0000000..1167ec8 --- /dev/null +++ b/command/kv_destroy.go @@ -0,0 +1,185 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "path" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*KVDestroyCommand)(nil) + _ cli.CommandAutocomplete = (*KVDestroyCommand)(nil) +) + +type KVDestroyCommand struct { + *BaseCommand + + flagVersions []string + flagMount string +} + +func (c *KVDestroyCommand) Synopsis() string { + return "Permanently removes one or more versions in the KV store" +} + +func (c *KVDestroyCommand) Help() string { + helpText := ` +Usage: vault kv destroy [options] KEY + + Permanently removes the specified versions' data from the key-value store. If + no key exists at the path, no action is taken. + + To destroy version 3 of key foo: + + $ vault kv destroy -mount=secret -versions=3 foo + + The deprecated path-like syntax can also be used, but this should be avoided + for KV v2, as the fact that it is not actually the full API path to + the secret (secret/data/foo) can cause confusion: + + $ vault kv destroy -versions=3 secret/foo + + Additional flags and more advanced use cases are detailed below. + +` + c.Flags().Help() + return strings.TrimSpace(helpText) +} + +func (c *KVDestroyCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + // Common Options + f := set.NewFlagSet("Common Options") + + f.StringSliceVar(&StringSliceVar{ + Name: "versions", + Target: &c.flagVersions, + Default: nil, + Usage: `Specifies the version numbers to destroy.`, + }) + + f.StringVar(&StringVar{ + Name: "mount", + Target: &c.flagMount, + Default: "", // no default, because the handling of the next arg is determined by whether this flag has a value + Usage: `Specifies the path where the KV backend is mounted. If specified, + the next argument will be interpreted as the secret path. If this flag is + not specified, the next argument will be interpreted as the combined mount + path and secret path, with /data/ automatically appended between KV + v2 secrets.`, + }) + + return set +} + +func (c *KVDestroyCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *KVDestroyCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *KVDestroyCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + } + + if len(c.flagVersions) == 0 { + c.UI.Error("No versions provided, use the \"-versions\" flag to specify the version to destroy.") + return 1 + } + + var err error + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + // If true, we're working with "-mount=secret foo" syntax. + // If false, we're using "secret/foo" syntax. + mountFlagSyntax := c.flagMount != "" + + var ( + mountPath string + partialPath string + v2 bool + ) + + // Parse the paths and grab the KV version + if mountFlagSyntax { + // In this case, this arg is the secret path (e.g. "foo"). + partialPath = sanitizePath(args[0]) + mountPath, v2, err = isKVv2(sanitizePath(c.flagMount), client) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + if v2 { + partialPath = path.Join(mountPath, partialPath) + } + } else { + // In this case, this arg is a path-like combination of mountPath/secretPath. + // (e.g. "secret/foo") + partialPath = sanitizePath(args[0]) + mountPath, v2, err = isKVv2(partialPath, client) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + } + + if !v2 { + c.UI.Error("Destroy not supported on KV Version 1") + return 1 + } + destroyPath := addPrefixToKVPath(partialPath, mountPath, "destroy", false) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + data := map[string]interface{}{ + "versions": kvParseVersionsFlags(c.flagVersions), + } + + secret, err := client.Logical().Write(destroyPath, data) + if err != nil { + c.UI.Error(fmt.Sprintf("Error writing data to %s: %s", destroyPath, err)) + if secret != nil { + OutputSecret(c.UI, secret) + } + return 2 + } + if secret == nil { + // Don't output anything unless using the "table" format + if Format(c.UI) == "table" { + c.UI.Info(fmt.Sprintf("Success! Data written to: %s", destroyPath)) + } + return 0 + } + + return OutputSecret(c.UI, secret) +} diff --git a/command/kv_enable_versioning.go b/command/kv_enable_versioning.go new file mode 100644 index 0000000..8282dd2 --- /dev/null +++ b/command/kv_enable_versioning.go @@ -0,0 +1,94 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*KVEnableVersioningCommand)(nil) + _ cli.CommandAutocomplete = (*KVEnableVersioningCommand)(nil) +) + +type KVEnableVersioningCommand struct { + *BaseCommand +} + +func (c *KVEnableVersioningCommand) Synopsis() string { + return "Turns on versioning for a KV store" +} + +func (c *KVEnableVersioningCommand) Help() string { + helpText := ` +Usage: vault kv enable-versioning [options] KEY + + This command turns on versioning for the backend at the provided path. + + $ vault kv enable-versioning secret + + Additional flags and more advanced use cases are detailed below. + +` + c.Flags().Help() + return strings.TrimSpace(helpText) +} + +func (c *KVEnableVersioningCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + return set +} + +func (c *KVEnableVersioningCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *KVEnableVersioningCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *KVEnableVersioningCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + // Append a trailing slash to indicate it's a path in output + mountPath := ensureTrailingSlash(sanitizePath(args[0])) + + if err := client.Sys().TuneMount(mountPath, api.MountConfigInput{ + Options: map[string]string{ + "version": "2", + }, + }); err != nil { + c.UI.Error(fmt.Sprintf("Error tuning secrets engine %s: %s", mountPath, err)) + return 2 + } + + c.UI.Output(fmt.Sprintf("Success! Tuned the secrets engine at: %s", mountPath)) + return 0 +} diff --git a/command/kv_get.go b/command/kv_get.go new file mode 100644 index 0000000..1d0e330 --- /dev/null +++ b/command/kv_get.go @@ -0,0 +1,243 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "path" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*KVGetCommand)(nil) + _ cli.CommandAutocomplete = (*KVGetCommand)(nil) +) + +type KVGetCommand struct { + *BaseCommand + + flagVersion int + flagMount string +} + +func (c *KVGetCommand) Synopsis() string { + return "Retrieves data from the KV store" +} + +func (c *KVGetCommand) Help() string { + helpText := ` +Usage: vault kv get [options] KEY + + Retrieves the value from Vault's key-value store at the given key name. If no + key exists with that name, an error is returned. If a key exists with that + name but has no data, nothing is returned. + + $ vault kv get -mount=secret foo + + The deprecated path-like syntax can also be used, but this should be avoided + for KV v2, as the fact that it is not actually the full API path to + the secret (secret/data/foo) can cause confusion: + + $ vault kv get secret/foo + + To view the given key name at a specific version in time, specify the "-version" + flag: + + $ vault kv get -mount=secret -version=1 foo + + Additional flags and more advanced use cases are detailed below. + +` + c.Flags().Help() + return strings.TrimSpace(helpText) +} + +func (c *KVGetCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputField | FlagSetOutputFormat) + + // Common Options + f := set.NewFlagSet("Common Options") + + f.IntVar(&IntVar{ + Name: "version", + Target: &c.flagVersion, + Default: 0, + Usage: `If passed, the value at the version number will be returned.`, + }) + + f.StringVar(&StringVar{ + Name: "mount", + Target: &c.flagMount, + Default: "", // no default, because the handling of the next arg is determined by whether this flag has a value + Usage: `Specifies the path where the KV backend is mounted. If specified, + the next argument will be interpreted as the secret path. If this flag is + not specified, the next argument will be interpreted as the combined mount + path and secret path, with /data/ automatically appended between KV + v2 secrets.`, + }) + + return set +} + +func (c *KVGetCommand) AutocompleteArgs() complete.Predictor { + return c.PredictVaultFiles() +} + +func (c *KVGetCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *KVGetCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + // If true, we're working with "-mount=secret foo" syntax. + // If false, we're using "secret/foo" syntax. + mountFlagSyntax := c.flagMount != "" + + var ( + mountPath string + v2 bool + ) + + // Ignore leading slash + partialPath := strings.TrimPrefix(args[0], "/") + + // Parse the paths and grab the KV version + if mountFlagSyntax { + // In this case, this arg is the secret path (e.g. "foo"). + mountPath, v2, err = isKVv2(sanitizePath(c.flagMount), client) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + if v2 { + partialPath = path.Join(mountPath, partialPath) + } + } else { + // In this case, this arg is a path-like combination of mountPath/secretPath. + // (e.g. "secret/foo") + mountPath, v2, err = isKVv2(partialPath, client) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + } + + var versionParam map[string]string + var fullPath string + // Add /data to v2 paths only + if v2 { + fullPath = addPrefixToKVPath(partialPath, mountPath, "data", false) + + if c.flagVersion > 0 { + versionParam = map[string]string{ + "version": fmt.Sprintf("%d", c.flagVersion), + } + } + } else { + // v1 + if mountFlagSyntax { + fullPath = path.Join(mountPath, partialPath) + } else { + fullPath = partialPath + } + } + + secret, err := kvReadRequest(client, fullPath, versionParam) + if err != nil { + c.UI.Error(fmt.Sprintf("Error reading %s: %s", fullPath, err)) + if secret != nil { + OutputSecret(c.UI, secret) + } + return 2 + } + if secret == nil { + c.UI.Error(fmt.Sprintf("No value found at %s", fullPath)) + return 2 + } + + if c.flagField != "" { + if v2 { + // This is a v2, pass in the data field + if data, ok := secret.Data["data"]; ok && data != nil { + // If they requested a literal "data" see if they meant actual + // value or the data block itself + if c.flagField == "data" { + if dataMap, ok := data.(map[string]interface{}); ok { + if _, ok := dataMap["data"]; ok { + return PrintRawField(c.UI, dataMap, c.flagField) + } + } + return PrintRawField(c.UI, secret, c.flagField) + } + return PrintRawField(c.UI, data, c.flagField) + } else { + c.UI.Error(fmt.Sprintf("No data found at %s", fullPath)) + return 2 + } + } else { + return PrintRawField(c.UI, secret, c.flagField) + } + } + + // If we have wrap info print the secret normally. + if secret.WrapInfo != nil || c.flagFormat != "table" { + return OutputSecret(c.UI, secret) + } + + if len(secret.Warnings) > 0 { + tf := TableFormatter{} + tf.printWarnings(c.UI, secret) + } + + if v2 { + outputPath(c.UI, fullPath, "Secret Path") + } + + if metadata, ok := secret.Data["metadata"]; ok && metadata != nil { + c.UI.Info(getHeaderForMap("Metadata", metadata.(map[string]interface{}))) + OutputData(c.UI, metadata) + c.UI.Info("") + } + + data := secret.Data + if v2 && data != nil { + data = nil + dataRaw := secret.Data["data"] + if dataRaw != nil { + data = dataRaw.(map[string]interface{}) + } + } + + if data != nil { + c.UI.Info(getHeaderForMap("Data", data)) + OutputData(c.UI, data) + } + + return 0 +} diff --git a/command/kv_helpers.go b/command/kv_helpers.go new file mode 100644 index 0000000..adf2ec3 --- /dev/null +++ b/command/kv_helpers.go @@ -0,0 +1,269 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "context" + "errors" + "fmt" + "io" + paths "path" + "sort" + "strings" + + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" +) + +func kvReadRequest(client *api.Client, path string, params map[string]string) (*api.Secret, error) { + r := client.NewRequest("GET", "/v1/"+path) + for k, v := range params { + r.Params.Set(k, v) + } + resp, err := client.RawRequest(r) + if resp != nil { + defer resp.Body.Close() + } + if resp != nil && resp.StatusCode == 404 { + secret, parseErr := api.ParseSecret(resp.Body) + switch parseErr { + case nil: + case io.EOF: + return nil, nil + default: + return nil, err + } + if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) { + return secret, nil + } + return nil, nil + } + if err != nil { + return nil, err + } + + return api.ParseSecret(resp.Body) +} + +func kvPreflightVersionRequest(client *api.Client, path string) (string, int, error) { + // We don't want to use a wrapping call here so save any custom value and + // restore after + currentWrappingLookupFunc := client.CurrentWrappingLookupFunc() + client.SetWrappingLookupFunc(nil) + defer client.SetWrappingLookupFunc(currentWrappingLookupFunc) + currentOutputCurlString := client.OutputCurlString() + client.SetOutputCurlString(false) + defer client.SetOutputCurlString(currentOutputCurlString) + currentOutputPolicy := client.OutputPolicy() + client.SetOutputPolicy(false) + defer client.SetOutputPolicy(currentOutputPolicy) + + r := client.NewRequest("GET", "/v1/sys/internal/ui/mounts/"+path) + resp, err := client.RawRequest(r) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + // If we get a 404 we are using an older version of vault, default to + // version 1 + if resp != nil { + if resp.StatusCode == 404 { + return "", 1, nil + } + + // if the original request had the -output-curl-string or -output-policy flag, + if (currentOutputCurlString || currentOutputPolicy) && resp.StatusCode == 403 { + // we provide a more helpful error for the user, + // who may not understand why the flag isn't working. + err = fmt.Errorf( + `This output flag requires the success of a preflight request +to determine the version of a KV secrets engine. Please +re-run this command with a token with read access to %s. +Note that if the path you are trying to reach is a KV v2 path, your token's policy must +allow read access to that path in the format 'mount-path/data/foo', not just 'mount-path/foo'.`, path) + } + } + + return "", 0, err + } + + secret, err := api.ParseSecret(resp.Body) + if err != nil { + return "", 0, err + } + if secret == nil { + return "", 0, errors.New("nil response from pre-flight request") + } + var mountPath string + if mountPathRaw, ok := secret.Data["path"]; ok { + mountPath = mountPathRaw.(string) + } + options := secret.Data["options"] + if options == nil { + return mountPath, 1, nil + } + versionRaw := options.(map[string]interface{})["version"] + if versionRaw == nil { + return mountPath, 1, nil + } + version := versionRaw.(string) + switch version { + case "", "1": + return mountPath, 1, nil + case "2": + return mountPath, 2, nil + } + + return mountPath, 1, nil +} + +func isKVv2(path string, client *api.Client) (string, bool, error) { + mountPath, version, err := kvPreflightVersionRequest(client, path) + if err != nil { + return "", false, err + } + + return mountPath, version == 2, nil +} + +func addPrefixToKVPath(path, mountPath, apiPrefix string, skipIfExists bool) string { + if path == mountPath || path == strings.TrimSuffix(mountPath, "/") { + return paths.Join(mountPath, apiPrefix) + } + + pathSuffix := strings.TrimPrefix(path, mountPath) + for { + // If the entire mountPath is included in the path, we are done + if pathSuffix != path { + break + } + // Trim the parts of the mountPath that are not included in the + // path, for example, in cases where the mountPath contains + // namespaces which are not included in the path. + partialMountPath := strings.SplitN(mountPath, "/", 2) + if len(partialMountPath) <= 1 || partialMountPath[1] == "" { + break + } + mountPath = strings.TrimSuffix(partialMountPath[1], "/") + pathSuffix = strings.TrimPrefix(pathSuffix, mountPath) + } + + if skipIfExists { + if strings.HasPrefix(pathSuffix, apiPrefix) || strings.HasPrefix(pathSuffix, "/"+apiPrefix) { + return paths.Join(mountPath, pathSuffix) + } + } + + return paths.Join(mountPath, apiPrefix, pathSuffix) +} + +func getHeaderForMap(header string, data map[string]interface{}) string { + maxKey := 0 + for k := range data { + if len(k) > maxKey { + maxKey = len(k) + } + } + + // 4 for the column spaces and 5 for the len("value") + totalLen := maxKey + 4 + 5 + + return padEqualSigns(header, totalLen) +} + +func kvParseVersionsFlags(versions []string) []string { + versionsOut := make([]string, 0, len(versions)) + for _, v := range versions { + versionsOut = append(versionsOut, strutil.ParseStringSlice(v, ",")...) + } + + return versionsOut +} + +func outputPath(ui cli.Ui, path string, title string) { + ui.Info(padEqualSigns(title, len(path))) + ui.Info(path) + ui.Info("") +} + +// Pad the table header with equal signs on each side +func padEqualSigns(header string, totalLen int) string { + equalSigns := totalLen - (len(header) + 2) + + // If we have zero or fewer equal signs bump it back up to two on either + // side of the header. + if equalSigns <= 0 { + equalSigns = 4 + } + + // If the number of equal signs is not divisible by two add a sign. + if equalSigns%2 != 0 { + equalSigns = equalSigns + 1 + } + + return fmt.Sprintf("%s %s %s", strings.Repeat("=", equalSigns/2), header, strings.Repeat("=", equalSigns/2)) +} + +// walkSecretsTree dfs-traverses the secrets tree rooted at the given path +// and calls the `visit` functor for each of the directory and leaf paths. +// Note: for kv-v2, a "metadata" path is expected and "metadata" paths will be +// returned in the visit functor. +func walkSecretsTree(ctx context.Context, client *api.Client, path string, visit func(path string, directory bool) error) error { + resp, err := client.Logical().ListWithContext(ctx, path) + if err != nil { + return fmt.Errorf("could not list %q path: %w", path, err) + } + + if resp == nil || resp.Data == nil { + return fmt.Errorf("no value found at %q: %w", path, err) + } + + keysRaw, ok := resp.Data["keys"] + if !ok { + return fmt.Errorf("unexpected list response at %q", path) + } + + keysRawSlice, ok := keysRaw.([]interface{}) + if !ok { + return fmt.Errorf("unexpected list response type %T at %q", keysRaw, path) + } + + keys := make([]string, 0, len(keysRawSlice)) + + for _, keyRaw := range keysRawSlice { + key, ok := keyRaw.(string) + if !ok { + return fmt.Errorf("unexpected key type %T at %q", keyRaw, path) + } + keys = append(keys, key) + } + + // sort the keys for a deterministic output + sort.Strings(keys) + + for _, key := range keys { + // the keys are relative to the current path: combine them + child := paths.Join(path, key) + + if strings.HasSuffix(key, "/") { + // visit the directory + if err := visit(child, true); err != nil { + return err + } + + // this is not a leaf node: we need to go deeper... + if err := walkSecretsTree(ctx, client, child, visit); err != nil { + return err + } + } else { + // this is a leaf node: add it to the list + if err := visit(child, false); err != nil { + return err + } + } + } + + return nil +} diff --git a/command/kv_helpers_test.go b/command/kv_helpers_test.go new file mode 100644 index 0000000..ca5f7e8 --- /dev/null +++ b/command/kv_helpers_test.go @@ -0,0 +1,272 @@ +package command + +import ( + "context" + "reflect" + "testing" + "time" + + "github.com/hashicorp/vault/api" +) + +// TestAddPrefixToKVPath tests the addPrefixToKVPath helper function +func TestAddPrefixToKVPath(t *testing.T) { + cases := map[string]struct { + path string + mountPath string + apiPrefix string + skipIfExists bool + expected string + }{ + "simple": { + path: "kv-v2/foo", + mountPath: "kv-v2/", + apiPrefix: "data", + skipIfExists: false, + expected: "kv-v2/data/foo", + }, + + "multi-part": { + path: "my/kv-v2/mount/path/foo/bar/baz", + mountPath: "my/kv-v2/mount/path", + apiPrefix: "metadata", + skipIfExists: false, + expected: "my/kv-v2/mount/path/metadata/foo/bar/baz", + }, + + "with-namespace": { + path: "my/kv-v2/mount/path/foo/bar/baz", + mountPath: "my/ns1/my/kv-v2/mount/path", + apiPrefix: "metadata", + skipIfExists: false, + expected: "my/kv-v2/mount/path/metadata/foo/bar/baz", + }, + + "skip-if-exists-true": { + path: "kv-v2/data/foo", + mountPath: "kv-v2/", + apiPrefix: "data", + skipIfExists: true, + expected: "kv-v2/data/foo", + }, + + "skip-if-exists-false": { + path: "kv-v2/data/foo", + mountPath: "kv-v2", + apiPrefix: "data", + skipIfExists: false, + expected: "kv-v2/data/data/foo", + }, + + "skip-if-exists-with-namespace": { + path: "my/kv-v2/mount/path/metadata/foo/bar/baz", + mountPath: "my/ns1/my/kv-v2/mount/path", + apiPrefix: "metadata", + skipIfExists: true, + expected: "my/kv-v2/mount/path/metadata/foo/bar/baz", + }, + } + + for name, tc := range cases { + name, tc := name, tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + actual := addPrefixToKVPath( + tc.path, + tc.mountPath, + tc.apiPrefix, + tc.skipIfExists, + ) + + if tc.expected != actual { + t.Fatalf("unexpected output; want: %v, got: %v", tc.expected, actual) + } + }) + } +} + +// TestWalkSecretsTree tests the walkSecretsTree helper function +func TestWalkSecretsTree(t *testing.T) { + // test setup + client, closer := testVaultServer(t) + defer closer() + + // enable kv-v1 backend + if err := client.Sys().Mount("kv-v1/", &api.MountInput{ + Type: "kv-v1", + }); err != nil { + t.Fatal(err) + } + time.Sleep(time.Second) + + // enable kv-v2 backend + if err := client.Sys().Mount("kv-v2/", &api.MountInput{ + Type: "kv-v2", + }); err != nil { + t.Fatal(err) + } + time.Sleep(time.Second) + + ctx, cancelContextFunc := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelContextFunc() + + // populate secrets + for _, path := range []string{ + "foo", + "app-1/foo", + "app-1/bar", + "app-1/nested/x/y/z", + "app-1/nested/x/y", + "app-1/nested/bar", + } { + if err := client.KVv1("kv-v1").Put(ctx, path, map[string]interface{}{ + "password": "Hashi123", + }); err != nil { + t.Fatal(err) + } + + if _, err := client.KVv2("kv-v2").Put(ctx, path, map[string]interface{}{ + "password": "Hashi123", + }); err != nil { + t.Fatal(err) + } + } + + type treePath struct { + path string + directory bool + } + + cases := map[string]struct { + path string + expected []treePath + expectedError bool + }{ + "kv-v1-simple": { + path: "kv-v1/app-1/nested/x/y", + expected: []treePath{ + {path: "kv-v1/app-1/nested/x/y/z", directory: false}, + }, + expectedError: false, + }, + + "kv-v2-simple": { + path: "kv-v2/metadata/app-1/nested/x/y", + expected: []treePath{ + {path: "kv-v2/metadata/app-1/nested/x/y/z", directory: false}, + }, + expectedError: false, + }, + + "kv-v1-nested": { + path: "kv-v1/app-1/nested/", + expected: []treePath{ + {path: "kv-v1/app-1/nested/bar", directory: false}, + {path: "kv-v1/app-1/nested/x", directory: true}, + {path: "kv-v1/app-1/nested/x/y", directory: false}, + {path: "kv-v1/app-1/nested/x/y", directory: true}, + {path: "kv-v1/app-1/nested/x/y/z", directory: false}, + }, + expectedError: false, + }, + + "kv-v2-nested": { + path: "kv-v2/metadata/app-1/nested/", + expected: []treePath{ + {path: "kv-v2/metadata/app-1/nested/bar", directory: false}, + {path: "kv-v2/metadata/app-1/nested/x", directory: true}, + {path: "kv-v2/metadata/app-1/nested/x/y", directory: false}, + {path: "kv-v2/metadata/app-1/nested/x/y", directory: true}, + {path: "kv-v2/metadata/app-1/nested/x/y/z", directory: false}, + }, + expectedError: false, + }, + + "kv-v1-all": { + path: "kv-v1", + expected: []treePath{ + {path: "kv-v1/app-1", directory: true}, + {path: "kv-v1/app-1/bar", directory: false}, + {path: "kv-v1/app-1/foo", directory: false}, + {path: "kv-v1/app-1/nested", directory: true}, + {path: "kv-v1/app-1/nested/bar", directory: false}, + {path: "kv-v1/app-1/nested/x", directory: true}, + {path: "kv-v1/app-1/nested/x/y", directory: false}, + {path: "kv-v1/app-1/nested/x/y", directory: true}, + {path: "kv-v1/app-1/nested/x/y/z", directory: false}, + {path: "kv-v1/foo", directory: false}, + }, + expectedError: false, + }, + + "kv-v2-all": { + path: "kv-v2/metadata", + expected: []treePath{ + {path: "kv-v2/metadata/app-1", directory: true}, + {path: "kv-v2/metadata/app-1/bar", directory: false}, + {path: "kv-v2/metadata/app-1/foo", directory: false}, + {path: "kv-v2/metadata/app-1/nested", directory: true}, + {path: "kv-v2/metadata/app-1/nested/bar", directory: false}, + {path: "kv-v2/metadata/app-1/nested/x", directory: true}, + {path: "kv-v2/metadata/app-1/nested/x/y", directory: false}, + {path: "kv-v2/metadata/app-1/nested/x/y", directory: true}, + {path: "kv-v2/metadata/app-1/nested/x/y/z", directory: false}, + {path: "kv-v2/metadata/foo", directory: false}, + }, + expectedError: false, + }, + + "kv-v1-not-found": { + path: "kv-v1/does/not/exist", + expected: nil, + expectedError: true, + }, + + "kv-v2-not-found": { + path: "kv-v2/metadata/does/not/exist", + expected: nil, + expectedError: true, + }, + + "kv-v1-not-listable-leaf-node": { + path: "kv-v1/foo", + expected: nil, + expectedError: true, + }, + + "kv-v2-not-listable-leaf-node": { + path: "kv-v2/metadata/foo", + expected: nil, + expectedError: true, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + var descendants []treePath + + err := walkSecretsTree(ctx, client, tc.path, func(path string, directory bool) error { + descendants = append(descendants, treePath{ + path: path, + directory: directory, + }) + return nil + }) + + if tc.expectedError { + if err == nil { + t.Fatal("an error was expected but the test succeeded") + } + } else { + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(tc.expected, descendants) { + t.Fatalf("unexpected list output; want: %v, got: %v", tc.expected, descendants) + } + } + }) + } +} diff --git a/command/kv_list.go b/command/kv_list.go new file mode 100644 index 0000000..25ad4d2 --- /dev/null +++ b/command/kv_list.go @@ -0,0 +1,179 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "path" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*KVListCommand)(nil) + _ cli.CommandAutocomplete = (*KVListCommand)(nil) +) + +type KVListCommand struct { + *BaseCommand + flagMount string +} + +func (c *KVListCommand) Synopsis() string { + return "List data or secrets" +} + +func (c *KVListCommand) Help() string { + helpText := ` + +Usage: vault kv list [options] PATH + + Lists data from Vault's key-value store at the given path. + + List values under the "my-app" folder of the key-value store: + + $ vault kv list secret/my-app/ + + Additional flags and more advanced use cases are detailed below. + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *KVListCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + // Common Options + f := set.NewFlagSet("Common Options") + + f.StringVar(&StringVar{ + Name: "mount", + Target: &c.flagMount, + Default: "", // no default, because the handling of the next arg is determined by whether this flag has a value + Usage: `Specifies the path where the KV backend is mounted. If specified, + the next argument will be interpreted as the secret path. If this flag is + not specified, the next argument will be interpreted as the combined mount + path and secret path, with /data/ automatically appended between KV + v2 secrets.`, + }) + + return set +} + +func (c *KVListCommand) AutocompleteArgs() complete.Predictor { + return c.PredictVaultFolders() +} + +func (c *KVListCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *KVListCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) < 1: + if c.flagMount == "" { + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + } + args = []string{""} + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + // If true, we're working with "-mount=secret foo" syntax. + // If false, we're using "secret/foo" syntax. + mountFlagSyntax := c.flagMount != "" + + var ( + mountPath string + partialPath string + v2 bool + ) + + // Parse the paths and grab the KV version + if mountFlagSyntax { + // In this case, this arg is the secret path (e.g. "foo"). + partialPath = sanitizePath(args[0]) + mountPath, v2, err = isKVv2(sanitizePath(c.flagMount), client) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + if v2 { + partialPath = path.Join(mountPath, partialPath) + } + } else { + // In this case, this arg is a path-like combination of mountPath/secretPath. + // (e.g. "secret/foo") + partialPath = sanitizePath(args[0]) + mountPath, v2, err = isKVv2(partialPath, client) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + } + + // Add /metadata to v2 paths only + var fullPath string + if v2 { + fullPath = addPrefixToKVPath(partialPath, mountPath, "metadata", false) + } else { + // v1 + if mountFlagSyntax { + fullPath = path.Join(mountPath, partialPath) + } else { + fullPath = partialPath + } + } + + secret, err := client.Logical().List(fullPath) + if err != nil { + c.UI.Error(fmt.Sprintf("Error listing %s: %s", fullPath, err)) + return 2 + } + + // If the secret is wrapped, return the wrapped response. + if secret != nil && secret.WrapInfo != nil && secret.WrapInfo.TTL != 0 { + return OutputSecret(c.UI, secret) + } + + _, ok := extractListData(secret) + if Format(c.UI) != "table" { + if secret == nil || secret.Data == nil || !ok { + OutputData(c.UI, map[string]interface{}{}) + return 2 + } + } + + if secret == nil || secret.Data == nil { + c.UI.Error(fmt.Sprintf("No value found at %s", fullPath)) + return 2 + } + + if !ok { + c.UI.Error(fmt.Sprintf("No entries found at %s", fullPath)) + return 2 + } + + return OutputList(c.UI, secret) +} diff --git a/command/kv_metadata.go b/command/kv_metadata.go new file mode 100644 index 0000000..4350311 --- /dev/null +++ b/command/kv_metadata.go @@ -0,0 +1,57 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + + "github.com/mitchellh/cli" +) + +var _ cli.Command = (*KVMetadataCommand)(nil) + +type KVMetadataCommand struct { + *BaseCommand +} + +func (c *KVMetadataCommand) Synopsis() string { + return "Interact with Vault's Key-Value storage" +} + +func (c *KVMetadataCommand) Help() string { + helpText := ` +Usage: vault kv metadata [options] [args] + + This command has subcommands for interacting with the metadata endpoint in + Vault's key-value store. Here are some simple examples, and more detailed + examples are available in the subcommands or the documentation. + + Create or update a metadata entry for a key: + + $ vault kv metadata put -mount=secret -max-versions=5 -delete-version-after=3h25m19s foo + + Get the metadata for a key, this provides information about each existing + version: + + $ vault kv metadata get -mount=secret foo + + Delete a key and all existing versions: + + $ vault kv metadata delete -mount=secret foo + + The deprecated path-like syntax can also be used, but this should be avoided + for KV v2, as the fact that it is not actually the full API path to + the secret (secret/metadata/foo) can cause confusion: + + $ vault kv metadata get secret/foo + + Please see the individual subcommand help for detailed usage information. +` + + return strings.TrimSpace(helpText) +} + +func (c *KVMetadataCommand) Run(args []string) int { + return cli.RunResultHelp +} diff --git a/command/kv_metadata_delete.go b/command/kv_metadata_delete.go new file mode 100644 index 0000000..6217506 --- /dev/null +++ b/command/kv_metadata_delete.go @@ -0,0 +1,152 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "path" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*KVMetadataDeleteCommand)(nil) + _ cli.CommandAutocomplete = (*KVMetadataDeleteCommand)(nil) +) + +type KVMetadataDeleteCommand struct { + *BaseCommand + flagMount string +} + +func (c *KVMetadataDeleteCommand) Synopsis() string { + return "Deletes all versions and metadata for a key in the KV store" +} + +func (c *KVMetadataDeleteCommand) Help() string { + helpText := ` +Usage: vault kv metadata delete [options] PATH + + Deletes all versions and metadata for the provided key. + + $ vault kv metadata delete -mount=secret foo + + The deprecated path-like syntax can also be used, but this should be avoided + for KV v2, as the fact that it is not actually the full API path to + the secret (secret/metadata/foo) can cause confusion: + + $ vault kv metadata delete secret/foo + + Additional flags and more advanced use cases are detailed below. + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *KVMetadataDeleteCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP) + + // Common Options + f := set.NewFlagSet("Common Options") + + f.StringVar(&StringVar{ + Name: "mount", + Target: &c.flagMount, + Default: "", // no default, because the handling of the next arg is determined by whether this flag has a value + Usage: `Specifies the path where the KV backend is mounted. If specified, + the next argument will be interpreted as the secret path. If this flag is + not specified, the next argument will be interpreted as the combined mount + path and secret path, with /metadata/ automatically appended between KV + v2 secrets.`, + }) + + return set +} + +func (c *KVMetadataDeleteCommand) AutocompleteArgs() complete.Predictor { + return c.PredictVaultFiles() +} + +func (c *KVMetadataDeleteCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *KVMetadataDeleteCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + // If true, we're working with "-mount=secret foo" syntax. + // If false, we're using "secret/foo" syntax. + mountFlagSyntax := c.flagMount != "" + + var ( + mountPath string + partialPath string + v2 bool + ) + + // Parse the paths and grab the KV version + if mountFlagSyntax { + // In this case, this arg is the secret path (e.g. "foo"). + partialPath = sanitizePath(args[0]) + mountPath, v2, err = isKVv2(sanitizePath(c.flagMount), client) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + if v2 { + partialPath = path.Join(mountPath, partialPath) + } + } else { + // In this case, this arg is a path-like combination of mountPath/secretPath. + // (e.g. "secret/foo") + partialPath = sanitizePath(args[0]) + mountPath, v2, err = isKVv2(partialPath, client) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + } + + if !v2 { + c.UI.Error("Metadata not supported on KV Version 1") + return 1 + } + + fullPath := addPrefixToKVPath(partialPath, mountPath, "metadata", false) + if secret, err := client.Logical().Delete(fullPath); err != nil { + c.UI.Error(fmt.Sprintf("Error deleting %s: %s", fullPath, err)) + if secret != nil { + OutputSecret(c.UI, secret) + } + return 2 + } + + c.UI.Info(fmt.Sprintf("Success! Data deleted (if it existed) at: %s", fullPath)) + return 0 +} diff --git a/command/kv_metadata_get.go b/command/kv_metadata_get.go new file mode 100644 index 0000000..8d17210 --- /dev/null +++ b/command/kv_metadata_get.go @@ -0,0 +1,197 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "path" + "sort" + "strconv" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*KVMetadataGetCommand)(nil) + _ cli.CommandAutocomplete = (*KVMetadataGetCommand)(nil) +) + +type KVMetadataGetCommand struct { + *BaseCommand + flagMount string +} + +func (c *KVMetadataGetCommand) Synopsis() string { + return "Retrieves key metadata from the KV store" +} + +func (c *KVMetadataGetCommand) Help() string { + helpText := ` +Usage: vault kv metadata get [options] KEY + + Retrieves the metadata from Vault's key-value store at the given key name. If no + key exists with that name, an error is returned. + + $ vault kv metadata get -mount=secret foo + + The deprecated path-like syntax can also be used, but this should be avoided + for KV v2, as the fact that it is not actually the full API path to + the secret (secret/metadata/foo) can cause confusion: + + $ vault kv metadata get secret/foo + + Additional flags and more advanced use cases are detailed below. + +` + c.Flags().Help() + return strings.TrimSpace(helpText) +} + +func (c *KVMetadataGetCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + // Common Options + f := set.NewFlagSet("Common Options") + + f.StringVar(&StringVar{ + Name: "mount", + Target: &c.flagMount, + Default: "", // no default, because the handling of the next arg is determined by whether this flag has a value + Usage: `Specifies the path where the KV backend is mounted. If specified, + the next argument will be interpreted as the secret path. If this flag is + not specified, the next argument will be interpreted as the combined mount + path and secret path, with /metadata/ automatically appended between KV + v2 secrets.`, + }) + + return set +} + +func (c *KVMetadataGetCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *KVMetadataGetCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *KVMetadataGetCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + // If true, we're working with "-mount=secret foo" syntax. + // If false, we're using "secret/foo" syntax. + mountFlagSyntax := c.flagMount != "" + + var ( + mountPath string + partialPath string + v2 bool + ) + + // Parse the paths and grab the KV version + if mountFlagSyntax { + // In this case, this arg is the secret path (e.g. "foo"). + partialPath = sanitizePath(args[0]) + mountPath, v2, err = isKVv2(sanitizePath(c.flagMount), client) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + if v2 { + partialPath = path.Join(mountPath, partialPath) + } + } else { + // In this case, this arg is a path-like combination of mountPath/secretPath. + // (e.g. "secret/foo") + partialPath = sanitizePath(args[0]) + mountPath, v2, err = isKVv2(partialPath, client) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + } + + if !v2 { + c.UI.Error("Metadata not supported on KV Version 1") + return 1 + } + + fullPath := addPrefixToKVPath(partialPath, mountPath, "metadata", false) + secret, err := client.Logical().Read(fullPath) + if err != nil { + c.UI.Error(fmt.Sprintf("Error reading %s: %s", fullPath, err)) + return 2 + } + if secret == nil { + c.UI.Error(fmt.Sprintf("No value found at %s", fullPath)) + return 2 + } + + if c.flagField != "" { + return PrintRawField(c.UI, secret, c.flagField) + } + + // If we have wrap info print the secret normally. + if secret.WrapInfo != nil || c.flagFormat != "table" { + return OutputSecret(c.UI, secret) + } + + versionsRaw, ok := secret.Data["versions"] + if !ok || versionsRaw == nil { + c.UI.Error(fmt.Sprintf("No value found at %s", fullPath)) + OutputSecret(c.UI, secret) + return 2 + } + versions := versionsRaw.(map[string]interface{}) + + delete(secret.Data, "versions") + + outputPath(c.UI, fullPath, "Metadata Path") + + c.UI.Info(getHeaderForMap("Metadata", secret.Data)) + OutputSecret(c.UI, secret) + + versionKeys := []int{} + for k := range versions { + i, err := strconv.Atoi(k) + if err != nil { + c.UI.Error(fmt.Sprintf("Error parsing version %s", k)) + return 2 + } + + versionKeys = append(versionKeys, i) + } + + sort.Ints(versionKeys) + + for _, v := range versionKeys { + c.UI.Info("\n" + getHeaderForMap(fmt.Sprintf("Version %d", v), versions[strconv.Itoa(v)].(map[string]interface{}))) + OutputData(c.UI, versions[strconv.Itoa(v)]) + } + + return 0 +} diff --git a/command/kv_metadata_patch.go b/command/kv_metadata_patch.go new file mode 100644 index 0000000..60d1a3e --- /dev/null +++ b/command/kv_metadata_patch.go @@ -0,0 +1,262 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "context" + "fmt" + "io" + "path" + "strings" + "time" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*KVMetadataPutCommand)(nil) + _ cli.CommandAutocomplete = (*KVMetadataPutCommand)(nil) +) + +type KVMetadataPatchCommand struct { + *BaseCommand + + flagMaxVersions int + flagCASRequired BoolPtr + flagDeleteVersionAfter time.Duration + flagCustomMetadata map[string]string + flagRemoveCustomMetadata []string + flagMount string + testStdin io.Reader // for tests +} + +func (c *KVMetadataPatchCommand) Synopsis() string { + return "Patches key settings in the KV store" +} + +func (c *KVMetadataPatchCommand) Help() string { + helpText := ` +Usage: vault kv metadata patch [options] KEY + + This command can be used to create a blank key in the key-value store or to + update key configuration for a specified key. + + Create a key in the key-value store with no data: + + $ vault kv metadata patch -mount=secret foo + + The deprecated path-like syntax can also be used, but this should be avoided + for KV v2, as the fact that it is not actually the full API path to + the secret (secret/metadata/foo) can cause confusion: + + $ vault kv metadata patch secret/foo + + Set a max versions setting on the key: + + $ vault kv metadata patch -mount=secret -max-versions=5 foo + + Set delete-version-after on the key: + + $ vault kv metadata patch -mount=secret -delete-version-after=3h25m19s foo + + Require Check-and-Set for this key: + + $ vault kv metadata patch -mount=secret -cas-required foo + + Set custom metadata on the key: + + $ vault kv metadata patch -mount=secret -custom-metadata=foo=abc -custom-metadata=bar=123 foo + + To remove custom meta data from the corresponding path in the key-value store, kv metadata patch can be used. + + $ vault kv metadata patch -mount=secret -remove-custom-metadata=bar foo + + Additional flags and more advanced use cases are detailed below. + +` + c.Flags().Help() + return strings.TrimSpace(helpText) +} + +func (c *KVMetadataPatchCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + // Common Options + f := set.NewFlagSet("Common Options") + + f.IntVar(&IntVar{ + Name: "max-versions", + Target: &c.flagMaxVersions, + Default: -1, + Usage: `The number of versions to keep. If not set, the backend’s configured max version is used.`, + }) + + f.BoolPtrVar(&BoolPtrVar{ + Name: "cas-required", + Target: &c.flagCASRequired, + Usage: `If true the key will require the cas parameter to be set on all write requests. If false, the backend’s configuration will be used.`, + }) + + f.DurationVar(&DurationVar{ + Name: "delete-version-after", + Target: &c.flagDeleteVersionAfter, + Default: -1, + EnvVar: "", + Completion: complete.PredictAnything, + Usage: `Specifies the length of time before a version is deleted. + If not set, the backend's configured delete-version-after is used. Cannot be + greater than the backend's delete-version-after. The delete-version-after is + specified as a numeric string with a suffix like "30s" or + "3h25m19s".`, + }) + + f.StringMapVar(&StringMapVar{ + Name: "custom-metadata", + Target: &c.flagCustomMetadata, + Default: map[string]string{}, + Usage: `Specifies arbitrary version-agnostic key=value metadata meant to describe a secret. + This can be specified multiple times to add multiple pieces of metadata.`, + }) + + f.StringSliceVar(&StringSliceVar{ + Name: "remove-custom-metadata", + Target: &c.flagRemoveCustomMetadata, + Default: []string{}, + Usage: "Key to remove from custom metadata. To specify multiple values, specify this flag multiple times.", + }) + + f.StringVar(&StringVar{ + Name: "mount", + Target: &c.flagMount, + Default: "", // no default, because the handling of the next arg is determined by whether this flag has a value + Usage: `Specifies the path where the KV backend is mounted. If specified, + the next argument will be interpreted as the secret path. If this flag is + not specified, the next argument will be interpreted as the combined mount + path and secret path, with /metadata/ automatically appended between KV + v2 secrets.`, + }) + + return set +} + +func (c *KVMetadataPatchCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *KVMetadataPatchCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *KVMetadataPatchCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + // If true, we're working with "-mount=secret foo" syntax. + // If false, we're using "secret/foo" syntax. + mountFlagSyntax := c.flagMount != "" + + var ( + mountPath string + partialPath string + v2 bool + ) + + // Parse the paths and grab the KV version + if mountFlagSyntax { + // In this case, this arg is the secret path (e.g. "foo"). + partialPath = sanitizePath(args[0]) + mountPath, v2, err = isKVv2(sanitizePath(c.flagMount), client) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + if v2 { + partialPath = path.Join(mountPath, partialPath) + } + } else { + // In this case, this arg is a path-like combination of mountPath/secretPath. + // (e.g. "secret/foo") + partialPath = sanitizePath(args[0]) + mountPath, v2, err = isKVv2(partialPath, client) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + } + if !v2 { + c.UI.Error("Metadata not supported on KV Version 1") + return 1 + } + + fullPath := addPrefixToKVPath(partialPath, mountPath, "metadata", false) + + data := make(map[string]interface{}, 0) + + if c.flagMaxVersions >= 0 { + data["max_versions"] = c.flagMaxVersions + } + + if c.flagCASRequired.IsSet() { + data["cas_required"] = c.flagCASRequired.Get() + } + + if c.flagDeleteVersionAfter >= 0 { + data["delete_version_after"] = c.flagDeleteVersionAfter.String() + } + + customMetadata := make(map[string]interface{}) + + for key, value := range c.flagCustomMetadata { + customMetadata[key] = value + } + + for _, key := range c.flagRemoveCustomMetadata { + // A null in a JSON merge patch payload will remove the associated key + customMetadata[key] = nil + } + + data["custom_metadata"] = customMetadata + + secret, err := client.Logical().JSONMergePatch(context.Background(), fullPath, data) + if err != nil { + c.UI.Error(fmt.Sprintf("Error writing data to %s: %s", fullPath, err)) + + if secret != nil { + OutputSecret(c.UI, secret) + } + return 2 + } + + if secret == nil { + // Don't output anything unless using the "table" format + if Format(c.UI) == "table" { + c.UI.Info(fmt.Sprintf("Success! Data written to: %s", fullPath)) + } + return 0 + } + + return OutputSecret(c.UI, secret) +} diff --git a/command/kv_metadata_patch_test.go b/command/kv_metadata_patch_test.go new file mode 100644 index 0000000..58f4f91 --- /dev/null +++ b/command/kv_metadata_patch_test.go @@ -0,0 +1,299 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "encoding/json" + "io" + "strings" + "testing" + + "github.com/go-test/deep" + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" +) + +func testKVMetadataPatchCommand(tb testing.TB) (*cli.MockUi, *KVMetadataPatchCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &KVMetadataPatchCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func kvMetadataPatchWithRetry(t *testing.T, client *api.Client, args []string, stdin *io.PipeReader) (int, string) { + t.Helper() + + return retryKVCommand(t, func() (int, string) { + ui, cmd := testKVMetadataPatchCommand(t) + cmd.client = client + + if stdin != nil { + cmd.testStdin = stdin + } + + code := cmd.Run(args) + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + + return code, combined + }) +} + +func kvMetadataPutWithRetry(t *testing.T, client *api.Client, args []string, stdin *io.PipeReader) (int, string) { + t.Helper() + + return retryKVCommand(t, func() (int, string) { + ui, cmd := testKVMetadataPutCommand(t) + cmd.client = client + + if stdin != nil { + cmd.testStdin = stdin + } + + code := cmd.Run(args) + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + + return code, combined + }) +} + +func TestKvMetadataPatchCommand_EmptyArgs(t *testing.T) { + client, closer := testVaultServer(t) + defer closer() + + if err := client.Sys().Mount("kv/", &api.MountInput{ + Type: "kv-v2", + }); err != nil { + t.Fatalf("kv-v2 mount error: %#v", err) + } + + args := make([]string, 0) + code, combined := kvMetadataPatchWithRetry(t, client, args, nil) + + expectedCode := 1 + expectedOutput := "Not enough arguments" + + if code != expectedCode { + t.Fatalf("expected code to be %d but was %d for patch cmd with args %#v", expectedCode, code, args) + } + + if !strings.Contains(combined, expectedOutput) { + t.Fatalf("expected output to be %q but was %q for patch cmd with args %#v", expectedOutput, combined, args) + } +} + +func TestKvMetadataPatchCommand_Flags(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + expectedUpdates map[string]interface{} + }{ + { + "cas_required_success", + []string{"-cas-required=true"}, + "Success!", + 0, + map[string]interface{}{ + "cas_required": true, + }, + }, + { + "cas_required_invalid", + []string{"-cas-required=12345"}, + "invalid boolean value", + 1, + map[string]interface{}{}, + }, + { + "custom_metadata_success", + []string{"-custom-metadata=baz=ghi"}, + "Success!", + 0, + map[string]interface{}{ + "custom_metadata": map[string]interface{}{ + "foo": "abc", + "bar": "def", + "baz": "ghi", + }, + }, + }, + { + "remove-custom_metadata", + []string{"-custom-metadata=baz=ghi", "-remove-custom-metadata=foo"}, + "Success!", + 0, + map[string]interface{}{ + "custom_metadata": map[string]interface{}{ + "bar": "def", + "baz": "ghi", + }, + }, + }, + { + "remove-custom_metadata-multiple", + []string{"-custom-metadata=baz=ghi", "-remove-custom-metadata=foo", "-remove-custom-metadata=bar"}, + "Success!", + 0, + map[string]interface{}{ + "custom_metadata": map[string]interface{}{ + "baz": "ghi", + }, + }, + }, + { + "delete_version_after_success", + []string{"-delete-version-after=5s"}, + "Success!", + 0, + map[string]interface{}{ + "delete_version_after": "5s", + }, + }, + { + "delete_version_after_invalid", + []string{"-delete-version-after=false"}, + "invalid duration", + 1, + map[string]interface{}{}, + }, + { + "max_versions_success", + []string{"-max-versions=10"}, + "Success!", + 0, + map[string]interface{}{ + "max_versions": json.Number("10"), + }, + }, + { + "max_versions_invalid", + []string{"-max-versions=false"}, + "invalid syntax", + 1, + map[string]interface{}{}, + }, + { + "multiple_flags_success", + []string{"-max-versions=20", "-custom-metadata=baz=123"}, + "Success!", + 0, + map[string]interface{}{ + "max_versions": json.Number("20"), + "custom_metadata": map[string]interface{}{ + "foo": "abc", + "bar": "def", + "baz": "123", + }, + }, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + client, closer := testVaultServer(t) + defer closer() + + basePath := t.Name() + "/" + secretPath := basePath + "my-secret" + metadataPath := basePath + "metadata/" + "my-secret" + + if err := client.Sys().Mount(basePath, &api.MountInput{ + Type: "kv-v2", + }); err != nil { + t.Fatalf("kv-v2 mount error: %#v", err) + } + + putArgs := []string{"-cas-required=true", "-custom-metadata=foo=abc", "-custom-metadata=bar=def", secretPath} + code, combined := kvMetadataPutWithRetry(t, client, putArgs, nil) + + if code != 0 { + t.Fatalf("initial metadata put failed, code: %d, output: %s", code, combined) + } + + initialMetadata, err := client.Logical().Read(metadataPath) + if err != nil { + t.Fatalf("metadata read failed, err: %#v", err) + } + + patchArgs := append(tc.args, secretPath) + + code, combined = kvMetadataPatchWithRetry(t, client, patchArgs, nil) + + if !strings.Contains(combined, tc.out) { + t.Fatalf("expected output to be %q but was %q for patch cmd with args %#v", tc.out, combined, patchArgs) + } + if code != tc.code { + t.Fatalf("expected code to be %d but was %d for patch cmd with args %#v", tc.code, code, patchArgs) + } + + patchedMetadata, err := client.Logical().Read(metadataPath) + if err != nil { + t.Fatalf("metadata read failed, err: %#v", err) + } + + for k, v := range patchedMetadata.Data { + var expectedVal interface{} + + if inputVal, ok := tc.expectedUpdates[k]; ok { + expectedVal = inputVal + } else { + expectedVal = initialMetadata.Data[k] + } + + if diff := deep.Equal(expectedVal, v); len(diff) > 0 { + t.Fatalf("patched %q mismatch, diff: %#v", k, diff) + } + } + }) + } +} + +func TestKvMetadataPatchCommand_CasWarning(t *testing.T) { + client, closer := testVaultServer(t) + defer closer() + + basePath := "kv/" + if err := client.Sys().Mount(basePath, &api.MountInput{ + Type: "kv-v2", + }); err != nil { + t.Fatalf("kv-v2 mount error: %#v", err) + } + + secretPath := basePath + "my-secret" + + args := []string{"-cas-required=true", secretPath} + code, combined := kvMetadataPutWithRetry(t, client, args, nil) + + if code != 0 { + t.Fatalf("metadata put failed, code: %d, output: %s", code, combined) + } + + casConfig := map[string]interface{}{ + "cas_required": true, + } + + _, err := client.Logical().Write(basePath+"config", casConfig) + if err != nil { + t.Fatalf("config write failed, err: #%v", err) + } + + args = []string{"-cas-required=false", secretPath} + code, combined = kvMetadataPatchWithRetry(t, client, args, nil) + + if code != 0 { + t.Fatalf("expected code to be 0 but was %d for patch cmd with args %#v", code, args) + } + + expectedOutput := "\"cas_required\" set to false, but is mandated by backend config" + if !strings.Contains(combined, expectedOutput) { + t.Fatalf("expected output to be %q but was %q for patch cmd with args %#v", expectedOutput, combined, args) + } +} diff --git a/command/kv_metadata_put.go b/command/kv_metadata_put.go new file mode 100644 index 0000000..9ec43e0 --- /dev/null +++ b/command/kv_metadata_put.go @@ -0,0 +1,238 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "io" + "path" + "strings" + "time" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*KVMetadataPutCommand)(nil) + _ cli.CommandAutocomplete = (*KVMetadataPutCommand)(nil) +) + +type KVMetadataPutCommand struct { + *BaseCommand + + flagMaxVersions int + flagCASRequired BoolPtr + flagDeleteVersionAfter time.Duration + flagCustomMetadata map[string]string + flagMount string + testStdin io.Reader // for tests +} + +func (c *KVMetadataPutCommand) Synopsis() string { + return "Sets or updates key settings in the KV store" +} + +func (c *KVMetadataPutCommand) Help() string { + helpText := ` +Usage: vault kv metadata put [options] KEY + + This command can be used to create a blank key in the key-value store or to + update key configuration for a specified key. + + Create a key in the key-value store with no data: + + $ vault kv metadata put -mount=secret foo + + The deprecated path-like syntax can also be used, but this should be avoided + for KV v2, as the fact that it is not actually the full API path to + the secret (secret/metadata/foo) can cause confusion: + + $ vault kv metadata put secret/foo + + Set a max versions setting on the key: + + $ vault kv metadata put -mount=secret -max-versions=5 foo + + Set delete-version-after on the key: + + $ vault kv metadata put -mount=secret -delete-version-after=3h25m19s foo + + Require Check-and-Set for this key: + + $ vault kv metadata put -mount=secret -cas-required foo + + Set custom metadata on the key: + + $ vault kv metadata put -mount=secret -custom-metadata=foo=abc -custom-metadata=bar=123 foo + + Additional flags and more advanced use cases are detailed below. + +` + c.Flags().Help() + return strings.TrimSpace(helpText) +} + +func (c *KVMetadataPutCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + // Common Options + f := set.NewFlagSet("Common Options") + + f.IntVar(&IntVar{ + Name: "max-versions", + Target: &c.flagMaxVersions, + Default: -1, + Usage: `The number of versions to keep. If not set, the backend’s configured max version is used.`, + }) + + f.BoolPtrVar(&BoolPtrVar{ + Name: "cas-required", + Target: &c.flagCASRequired, + Usage: `If true the key will require the cas parameter to be set on all write requests. If false, the backend’s configuration will be used.`, + }) + + f.DurationVar(&DurationVar{ + Name: "delete-version-after", + Target: &c.flagDeleteVersionAfter, + Default: -1, + EnvVar: "", + Completion: complete.PredictAnything, + Usage: `Specifies the length of time before a version is deleted. + If not set, the backend's configured delete-version-after is used. Cannot be + greater than the backend's delete-version-after. The delete-version-after is + specified as a numeric string with a suffix like "30s" or + "3h25m19s".`, + }) + + f.StringMapVar(&StringMapVar{ + Name: "custom-metadata", + Target: &c.flagCustomMetadata, + Default: map[string]string{}, + Usage: "Specifies arbitrary version-agnostic key=value metadata meant to describe a secret." + + "This can be specified multiple times to add multiple pieces of metadata.", + }) + + f.StringVar(&StringVar{ + Name: "mount", + Target: &c.flagMount, + Default: "", // no default, because the handling of the next arg is determined by whether this flag has a value + Usage: `Specifies the path where the KV backend is mounted. If specified, + the next argument will be interpreted as the secret path. If this flag is + not specified, the next argument will be interpreted as the combined mount + path and secret path, with /metadata/ automatically appended between KV + v2 secrets.`, + }) + + return set +} + +func (c *KVMetadataPutCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *KVMetadataPutCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *KVMetadataPutCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + // If true, we're working with "-mount=secret foo" syntax. + // If false, we're using "secret/foo" syntax. + mountFlagSyntax := c.flagMount != "" + + var ( + mountPath string + partialPath string + v2 bool + ) + + // Parse the paths and grab the KV version + if mountFlagSyntax { + // In this case, this arg is the secret path (e.g. "foo"). + partialPath = sanitizePath(args[0]) + mountPath, v2, err = isKVv2(sanitizePath(c.flagMount), client) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + if v2 { + partialPath = path.Join(mountPath, partialPath) + } + } else { + // In this case, this arg is a path-like combination of mountPath/secretPath. + // (e.g. "secret/foo") + partialPath = sanitizePath(args[0]) + mountPath, v2, err = isKVv2(partialPath, client) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + } + + if !v2 { + c.UI.Error("Metadata not supported on KV Version 1") + return 1 + } + + fullPath := addPrefixToKVPath(partialPath, mountPath, "metadata", false) + data := map[string]interface{}{} + + if c.flagMaxVersions >= 0 { + data["max_versions"] = c.flagMaxVersions + } + + if c.flagDeleteVersionAfter >= 0 { + data["delete_version_after"] = c.flagDeleteVersionAfter.String() + } + + if c.flagCASRequired.IsSet() { + data["cas_required"] = c.flagCASRequired.Get() + } + + if len(c.flagCustomMetadata) > 0 { + data["custom_metadata"] = c.flagCustomMetadata + } + + secret, err := client.Logical().Write(fullPath, data) + if err != nil { + c.UI.Error(fmt.Sprintf("Error writing data to %s: %s", fullPath, err)) + if secret != nil { + OutputSecret(c.UI, secret) + } + return 2 + } + if secret == nil { + // Don't output anything unless using the "table" format + if Format(c.UI) == "table" { + c.UI.Info(fmt.Sprintf("Success! Data written to: %s", fullPath)) + } + return 0 + } + + return OutputSecret(c.UI, secret) +} diff --git a/command/kv_metadata_put_test.go b/command/kv_metadata_put_test.go new file mode 100644 index 0000000..008ded9 --- /dev/null +++ b/command/kv_metadata_put_test.go @@ -0,0 +1,204 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "encoding/json" + "strings" + "testing" + + "github.com/go-test/deep" + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" +) + +func testKVMetadataPutCommand(tb testing.TB) (*cli.MockUi, *KVMetadataPutCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &KVMetadataPutCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestKvMetadataPutCommand_DeleteVersionAfter(t *testing.T) { + client, closer := testVaultServer(t) + defer closer() + + basePath := t.Name() + "/" + if err := client.Sys().Mount(basePath, &api.MountInput{ + Type: "kv-v2", + }); err != nil { + t.Fatal(err) + } + + ui, cmd := testKVMetadataPutCommand(t) + cmd.client = client + + // Set a limit of 1s first. + code := cmd.Run([]string{"-delete-version-after=1s", basePath + "secret/my-secret"}) + if code != 0 { + t.Fatalf("expected %d but received %d", 0, code) + } + + metaFullPath := basePath + "metadata/secret/my-secret" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + success := "Success! Data written to: " + metaFullPath + if !strings.Contains(combined, success) { + t.Fatalf("expected %q but received %q", success, combined) + } + + secret, err := client.Logical().Read(metaFullPath) + if err != nil { + t.Fatal(err) + } + if secret.Data["delete_version_after"] != "1s" { + t.Fatalf("expected 1s but received %q", secret.Data["delete_version_after"]) + } + + // Now verify that we can return it to 0s. + ui, cmd = testKVMetadataPutCommand(t) + cmd.client = client + + // Set a limit of 1s first. + code = cmd.Run([]string{"-delete-version-after=0", basePath + "secret/my-secret"}) + if code != 0 { + t.Errorf("expected %d but received %d", 0, code) + } + + combined = ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, success) { + t.Errorf("expected %q but received %q", success, combined) + } + + secret, err = client.Logical().Read(metaFullPath) + if err != nil { + t.Fatal(err) + } + if secret.Data["delete_version_after"] != "0s" { + t.Fatalf("expected 0s but received %q", secret.Data["delete_version_after"]) + } +} + +func TestKvMetadataPutCommand_CustomMetadata(t *testing.T) { + client, closer := testVaultServer(t) + defer closer() + + basePath := t.Name() + "/" + secretPath := basePath + "secret/my-secret" + + if err := client.Sys().Mount(basePath, &api.MountInput{ + Type: "kv-v2", + }); err != nil { + t.Fatalf("kv-v2 mount error: %#v", err) + } + + ui, cmd := testKVMetadataPutCommand(t) + cmd.client = client + + exitStatus := cmd.Run([]string{"-custom-metadata=foo=abc", "-custom-metadata=bar=123", secretPath}) + + if exitStatus != 0 { + t.Fatalf("Expected 0 exit status but received %d", exitStatus) + } + + metaFullPath := basePath + "metadata/secret/my-secret" + commandOutput := ui.OutputWriter.String() + ui.ErrorWriter.String() + expectedOutput := "Success! Data written to: " + metaFullPath + + if !strings.Contains(commandOutput, expectedOutput) { + t.Fatalf("Expected command output %q but received %q", expectedOutput, commandOutput) + } + + metadata, err := client.Logical().Read(metaFullPath) + if err != nil { + t.Fatalf("Metadata read error: %#v", err) + } + + // JSON output from read decoded into map[string]interface{} + expectedCustomMetadata := map[string]interface{}{ + "foo": "abc", + "bar": "123", + } + + if diff := deep.Equal(metadata.Data["custom_metadata"], expectedCustomMetadata); len(diff) > 0 { + t.Fatal(diff) + } + + ui, cmd = testKVMetadataPutCommand(t) + cmd.client = client + + // Overwrite entire custom metadata with a single key + exitStatus = cmd.Run([]string{"-custom-metadata=baz=abc123", secretPath}) + + if exitStatus != 0 { + t.Fatalf("Expected 0 exit status but received %d", exitStatus) + } + + commandOutput = ui.OutputWriter.String() + ui.ErrorWriter.String() + + if !strings.Contains(commandOutput, expectedOutput) { + t.Fatalf("Expected command output %q but received %q", expectedOutput, commandOutput) + } + + metadata, err = client.Logical().Read(metaFullPath) + + if err != nil { + t.Fatalf("Metadata read error: %#v", err) + } + + expectedCustomMetadata = map[string]interface{}{ + "baz": "abc123", + } + + if diff := deep.Equal(metadata.Data["custom_metadata"], expectedCustomMetadata); len(diff) > 0 { + t.Fatal(diff) + } +} + +func TestKvMetadataPutCommand_UnprovidedFlags(t *testing.T) { + client, closer := testVaultServer(t) + defer closer() + + basePath := t.Name() + "/" + secretPath := basePath + "my-secret" + + if err := client.Sys().Mount(basePath, &api.MountInput{ + Type: "kv-v2", + }); err != nil { + t.Fatalf("kv-v2 mount error: %#v", err) + } + + _, cmd := testKVMetadataPutCommand(t) + cmd.client = client + + args := []string{"-cas-required=true", "-max-versions=10", secretPath} + code, _ := kvMetadataPutWithRetry(t, client, args, nil) + + if code != 0 { + t.Fatalf("expected 0 exit status but received %d", code) + } + + args = []string{"-custom-metadata=foo=bar", secretPath} + code, _ = kvMetadataPutWithRetry(t, client, args, nil) + + if code != 0 { + t.Fatalf("expected 0 exit status but received %d", code) + } + + secret, err := client.Logical().Read(basePath + "metadata/" + "my-secret") + if err != nil { + t.Fatal(err) + } + + if secret.Data["cas_required"] != true { + t.Fatalf("expected cas_required to be true but received %#v", secret.Data["cas_required"]) + } + + if secret.Data["max_versions"] != json.Number("10") { + t.Fatalf("expected max_versions to be 10 but received %#v", secret.Data["max_versions"]) + } +} diff --git a/command/kv_patch.go b/command/kv_patch.go new file mode 100644 index 0000000..d7d287c --- /dev/null +++ b/command/kv_patch.go @@ -0,0 +1,409 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "context" + "fmt" + "io" + "os" + "path" + "strings" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*KVPatchCommand)(nil) + _ cli.CommandAutocomplete = (*KVPatchCommand)(nil) +) + +type KVPatchCommand struct { + *BaseCommand + + flagCAS int + flagMethod string + flagMount string + testStdin io.Reader // for tests + flagRemoveData []string +} + +func (c *KVPatchCommand) Synopsis() string { + return "Sets or updates data in the KV store without overwriting" +} + +func (c *KVPatchCommand) Help() string { + helpText := ` +Usage: vault kv patch [options] KEY [DATA] + + *NOTE*: This is only supported for KV v2 engine mounts. + + Writes the data to the corresponding path in the key-value store. The data can be of + any type. + + $ vault kv patch -mount=secret foo bar=baz + + The deprecated path-like syntax can also be used, but this should be avoided, + as the fact that it is not actually the full API path to + the secret (secret/data/foo) can cause confusion: + + $ vault kv patch secret/foo bar=baz + + The data can also be consumed from a file on disk by prefixing with the "@" + symbol. For example: + + $ vault kv patch -mount=secret foo @data.json + + Or it can be read from stdin using the "-" symbol: + + $ echo "abcd1234" | vault kv patch -mount=secret foo bar=- + + To perform a Check-And-Set operation, specify the -cas flag with the + appropriate version number corresponding to the key you want to perform + the CAS operation on: + + $ vault kv patch -mount=secret -cas=1 foo bar=baz + + By default, this operation will attempt an HTTP PATCH operation. If your + policy does not allow that, it will fall back to a read/local update/write approach. + If you wish to specify which method this command should use, you may do so + with the -method flag. When -method=patch is specified, only an HTTP PATCH + operation will be tried. If it fails, the entire command will fail. + + $ vault kv patch -mount=secret -method=patch foo bar=baz + + When -method=rw is specified, only a read/local update/write approach will be tried. + This was the default behavior previous to Vault 1.9. + + $ vault kv patch -mount=secret -method=rw foo bar=baz + + To remove data from the corresponding path in the key-value store, kv patch can be used. + + $ vault kv patch -mount=secret -remove-data=bar foo + + Additional flags and more advanced use cases are detailed below. + +` + c.Flags().Help() + return strings.TrimSpace(helpText) +} + +func (c *KVPatchCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputField | FlagSetOutputFormat) + + // Patch specific options + f := set.NewFlagSet("Common Options") + + f.IntVar(&IntVar{ + Name: "cas", + Target: &c.flagCAS, + Default: 0, + Usage: `Specifies to use a Check-And-Set operation. If set to 0 or not + set, the patch will be allowed. If the index is non-zero the patch will + only be allowed if the key’s current version matches the version + specified in the cas parameter.`, + }) + + f.StringVar(&StringVar{ + Name: "method", + Target: &c.flagMethod, + Usage: `Specifies which method of patching to use. If set to "patch", then + an HTTP PATCH request will be issued. If set to "rw", then a read will be + performed, then a local update, followed by a remote update.`, + }) + + f.StringVar(&StringVar{ + Name: "mount", + Target: &c.flagMount, + Default: "", // no default, because the handling of the next arg is determined by whether this flag has a value + Usage: `Specifies the path where the KV backend is mounted. If specified, + the next argument will be interpreted as the secret path. If this flag is + not specified, the next argument will be interpreted as the combined mount + path and secret path, with /data/ automatically appended between KV + v2 secrets.`, + }) + + f.StringSliceVar(&StringSliceVar{ + Name: "remove-data", + Target: &c.flagRemoveData, + Default: []string{}, + Usage: "Key to remove from data. To specify multiple values, specify this flag multiple times.", + }) + + return set +} + +func (c *KVPatchCommand) AutocompleteArgs() complete.Predictor { + return c.PredictVaultFiles() +} + +func (c *KVPatchCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *KVPatchCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + // Pull our fake stdin if needed + stdin := (io.Reader)(os.Stdin) + if c.testStdin != nil { + stdin = c.testStdin + } + + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected >1, got %d)", len(args))) + return 1 + case len(c.flagRemoveData) == 0 && len(args) == 1: + c.UI.Error("Must supply data") + return 1 + } + + var err error + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + newData, err := parseArgsData(stdin, args[1:]) + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to parse K=V data: %s", err)) + return 1 + } + + // If true, we're working with "-mount=secret foo" syntax. + // If false, we're using "secret/foo" syntax. + mountFlagSyntax := c.flagMount != "" + + var ( + mountPath string + partialPath string + v2 bool + ) + + // Parse the paths and grab the KV version + if mountFlagSyntax { + // In this case, this arg is the secret path (e.g. "foo"). + partialPath = sanitizePath(args[0]) + mountPath, v2, err = isKVv2(sanitizePath(c.flagMount), client) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + if v2 { + partialPath = path.Join(mountPath, partialPath) + } + } else { + // In this case, this arg is a path-like combination of mountPath/secretPath. + // (e.g. "secret/foo") + partialPath = sanitizePath(args[0]) + mountPath, v2, err = isKVv2(partialPath, client) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + } + + if !v2 { + c.UI.Error("K/V engine mount must be version 2 for patch support") + return 2 + } + + fullPath := addPrefixToKVPath(partialPath, mountPath, "data", false) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + // collecting data to be removed + if newData == nil { + newData = make(map[string]interface{}) + } + + for _, key := range c.flagRemoveData { + // A null in a JSON merge patch payload will remove the associated key + newData[key] = nil + } + + // Check the method and behave accordingly + var secret *api.Secret + var code int + + switch c.flagMethod { + case "rw": + secret, code = c.readThenWrite(client, fullPath, newData) + case "patch": + secret, code = c.mergePatch(client, fullPath, newData, false) + case "": + secret, code = c.mergePatch(client, fullPath, newData, true) + default: + c.UI.Error(fmt.Sprintf("Unsupported method provided to -method flag: %s", c.flagMethod)) + return 2 + } + + if code != 0 { + return code + } + if secret == nil { + // Don't output anything if there's no secret + return 0 + } + + if c.flagField != "" { + return PrintRawField(c.UI, secret, c.flagField) + } + + // If the secret is wrapped, return the wrapped response. + if secret.WrapInfo != nil && secret.WrapInfo.TTL != 0 { + return OutputSecret(c.UI, secret) + } + + if Format(c.UI) == "table" { + outputPath(c.UI, fullPath, "Secret Path") + metadata := secret.Data + c.UI.Info(getHeaderForMap("Metadata", metadata)) + return OutputData(c.UI, metadata) + } + + return OutputSecret(c.UI, secret) +} + +func (c *KVPatchCommand) readThenWrite(client *api.Client, path string, newData map[string]interface{}) (*api.Secret, int) { + // First, do a read. + // Note that we don't want to see curl output for the read request. + curOutputCurl := client.OutputCurlString() + client.SetOutputCurlString(false) + outputPolicy := client.OutputPolicy() + client.SetOutputPolicy(false) + secret, err := kvReadRequest(client, path, nil) + if err != nil { + c.UI.Error(fmt.Sprintf("Error doing pre-read at %s: %s", path, err)) + return nil, 2 + } + client.SetOutputCurlString(curOutputCurl) + client.SetOutputPolicy(outputPolicy) + + // Make sure a value already exists + if secret == nil || secret.Data == nil { + c.UI.Error(fmt.Sprintf("No value found at %s", path)) + return nil, 2 + } + + // Verify metadata found + rawMeta, ok := secret.Data["metadata"] + if !ok || rawMeta == nil { + c.UI.Error(fmt.Sprintf("No metadata found at %s; patch only works on existing data", path)) + return nil, 2 + } + meta, ok := rawMeta.(map[string]interface{}) + if !ok { + c.UI.Error(fmt.Sprintf("Metadata found at %s is not the expected type (JSON object)", path)) + return nil, 2 + } + if meta == nil { + c.UI.Error(fmt.Sprintf("No metadata found at %s; patch only works on existing data", path)) + return nil, 2 + } + + // Verify old data found + rawData, ok := secret.Data["data"] + if !ok || rawData == nil { + c.UI.Error(fmt.Sprintf("No data found at %s; patch only works on existing data", path)) + return nil, 2 + } + data, ok := rawData.(map[string]interface{}) + if !ok { + c.UI.Error(fmt.Sprintf("Data found at %s is not the expected type (JSON object)", path)) + return nil, 2 + } + if data == nil { + c.UI.Error(fmt.Sprintf("No data found at %s; patch only works on existing data", path)) + return nil, 2 + } + + // Copy new data over + for k, v := range newData { + data[k] = v + } + + secret, err = client.Logical().Write(path, map[string]interface{}{ + "data": data, + "options": map[string]interface{}{ + "cas": meta["version"], + }, + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error writing data to %s: %s", path, err)) + return nil, 2 + } + + if secret == nil { + // Don't output anything unless using the "table" format + if Format(c.UI) == "table" { + c.UI.Info(fmt.Sprintf("Success! Data written to: %s", path)) + } + return nil, 0 + } + + if c.flagField != "" { + return nil, PrintRawField(c.UI, secret, c.flagField) + } + + return secret, 0 +} + +func (c *KVPatchCommand) mergePatch(client *api.Client, path string, newData map[string]interface{}, rwFallback bool) (*api.Secret, int) { + data := map[string]interface{}{ + "data": newData, + "options": map[string]interface{}{}, + } + + if c.flagCAS > 0 { + data["options"].(map[string]interface{})["cas"] = c.flagCAS + } + + secret, err := client.Logical().JSONMergePatch(context.Background(), path, data) + if err != nil { + // If it's a 405, that probably means the server is running a pre-1.9 + // Vault version that doesn't support the HTTP PATCH method. + // Fall back to the old way of doing it if the user didn't specify a -method. + // If they did, and it was "patch", then just error. + if re, ok := err.(*api.ResponseError); ok && re.StatusCode == 405 && rwFallback { + return c.readThenWrite(client, path, newData) + } + // If it's a 403, that probably means they don't have the patch capability in their policy. Fall back to + // the old way of doing it if the user didn't specify a -method. If they did, and it was "patch", then just error. + if re, ok := err.(*api.ResponseError); ok && re.StatusCode == 403 && rwFallback { + c.UI.Warn(fmt.Sprintf("Data was written to %s but we recommend that you add the \"patch\" capability to your ACL policy in order to use HTTP PATCH in the future.", path)) + return c.readThenWrite(client, path, newData) + } + + c.UI.Error(fmt.Sprintf("Error writing data to %s: %s", path, err)) + return nil, 2 + } + + if secret == nil { + // Don't output anything unless using the "table" format + if Format(c.UI) == "table" { + c.UI.Info(fmt.Sprintf("Success! Data written to: %s", path)) + } + return nil, 0 + } + + if c.flagField != "" { + return nil, PrintRawField(c.UI, secret, c.flagField) + } + + return secret, 0 +} diff --git a/command/kv_put.go b/command/kv_put.go new file mode 100644 index 0000000..92d68f9 --- /dev/null +++ b/command/kv_put.go @@ -0,0 +1,235 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "io" + "os" + "path" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*KVPutCommand)(nil) + _ cli.CommandAutocomplete = (*KVPutCommand)(nil) +) + +type KVPutCommand struct { + *BaseCommand + + flagCAS int + flagMount string + testStdin io.Reader // for tests +} + +func (c *KVPutCommand) Synopsis() string { + return "Sets or updates data in the KV store" +} + +func (c *KVPutCommand) Help() string { + helpText := ` +Usage: vault kv put [options] KEY [DATA] + + Writes the data to the given path in the key-value store. The data can be of + any type. + + $ vault kv put -mount=secret foo bar=baz + + The deprecated path-like syntax can also be used, but this should be avoided + for KV v2, as the fact that it is not actually the full API path to + the secret (secret/data/foo) can cause confusion: + + $ vault kv put secret/foo bar=baz + + The data can also be consumed from a file on disk by prefixing with the "@" + symbol. For example: + + $ vault kv put -mount=secret foo @data.json + + Or it can be read from stdin using the "-" symbol: + + $ echo "abcd1234" | vault kv put -mount=secret foo bar=- + + To perform a Check-And-Set operation, specify the -cas flag with the + appropriate version number corresponding to the key you want to perform + the CAS operation on: + + $ vault kv put -mount=secret -cas=1 foo bar=baz + + Additional flags and more advanced use cases are detailed below. + +` + c.Flags().Help() + return strings.TrimSpace(helpText) +} + +func (c *KVPutCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputField | FlagSetOutputFormat) + + // Common Options + f := set.NewFlagSet("Common Options") + + f.IntVar(&IntVar{ + Name: "cas", + Target: &c.flagCAS, + Default: -1, + Usage: `Specifies to use a Check-And-Set operation. If not set the write + will be allowed. If set to 0 a write will only be allowed if the key + doesn’t exist. If the index is non-zero the write will only be allowed + if the key’s current version matches the version specified in the cas + parameter.`, + }) + + f.StringVar(&StringVar{ + Name: "mount", + Target: &c.flagMount, + Default: "", // no default, because the handling of the next arg is determined by whether this flag has a value + Usage: `Specifies the path where the KV backend is mounted. If specified, + the next argument will be interpreted as the secret path. If this flag is + not specified, the next argument will be interpreted as the combined mount + path and secret path, with /data/ automatically appended between KV + v2 secrets.`, + }) + + return set +} + +func (c *KVPutCommand) AutocompleteArgs() complete.Predictor { + return c.PredictVaultFolders() +} + +func (c *KVPutCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *KVPutCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + // Pull our fake stdin if needed + stdin := (io.Reader)(os.Stdin) + if c.testStdin != nil { + stdin = c.testStdin + } + + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected >1, got %d)", len(args))) + return 1 + case len(args) == 1: + c.UI.Error("Must supply data") + return 1 + } + + var err error + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + data, err := parseArgsData(stdin, args[1:]) + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to parse K=V data: %s", err)) + return 1 + } + + // If true, we're working with "-mount=secret foo" syntax. + // If false, we're using "secret/foo" syntax. + mountFlagSyntax := c.flagMount != "" + + var ( + mountPath string + partialPath string + v2 bool + ) + + // Parse the paths and grab the KV version + if mountFlagSyntax { + // In this case, this arg is the secret path (e.g. "foo"). + partialPath = sanitizePath(args[0]) + mountPath, v2, err = isKVv2(sanitizePath(c.flagMount), client) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + if v2 { + partialPath = path.Join(mountPath, partialPath) + } + } else { + // In this case, this arg is a path-like combination of mountPath/secretPath. + // (e.g. "secret/foo") + partialPath = sanitizePath(args[0]) + mountPath, v2, err = isKVv2(partialPath, client) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + } + + // Add /data to v2 paths only + var fullPath string + if v2 { + fullPath = addPrefixToKVPath(partialPath, mountPath, "data", false) + data = map[string]interface{}{ + "data": data, + "options": map[string]interface{}{}, + } + + if c.flagCAS > -1 { + data["options"].(map[string]interface{})["cas"] = c.flagCAS + } + } else { + // v1 + if mountFlagSyntax { + fullPath = path.Join(mountPath, partialPath) + } else { + fullPath = partialPath + } + } + + secret, err := client.Logical().Write(fullPath, data) + if err != nil { + c.UI.Error(fmt.Sprintf("Error writing data to %s: %s", fullPath, err)) + if secret != nil { + OutputSecret(c.UI, secret) + } + return 2 + } + if secret == nil { + // Don't output anything unless using the "table" format + if Format(c.UI) == "table" { + c.UI.Info(fmt.Sprintf("Success! Data written to: %s", fullPath)) + } + return 0 + } + + if c.flagField != "" { + return PrintRawField(c.UI, secret, c.flagField) + } + + // If the secret is wrapped, return the wrapped response. + if secret.WrapInfo != nil && secret.WrapInfo.TTL != 0 { + return OutputSecret(c.UI, secret) + } + + if Format(c.UI) == "table" { + outputPath(c.UI, fullPath, "Secret Path") + metadata := secret.Data + c.UI.Info(getHeaderForMap("Metadata", metadata)) + return OutputData(c.UI, metadata) + } + + return OutputSecret(c.UI, secret) +} diff --git a/command/kv_rollback.go b/command/kv_rollback.go new file mode 100644 index 0000000..c54d7bc --- /dev/null +++ b/command/kv_rollback.go @@ -0,0 +1,294 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "flag" + "fmt" + "path" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*KVRollbackCommand)(nil) + _ cli.CommandAutocomplete = (*KVRollbackCommand)(nil) +) + +type KVRollbackCommand struct { + *BaseCommand + + flagVersion int + flagMount string +} + +func (c *KVRollbackCommand) Synopsis() string { + return "Rolls back to a previous version of data" +} + +func (c *KVRollbackCommand) Help() string { + helpText := ` +Usage: vault kv rollback [options] KEY + + *NOTE*: This is only supported for KV v2 engine mounts. + + Restores a given previous version to the current version at the given path. + The value is written as a new version; for instance, if the current version + is 5 and the rollback version is 2, the data from version 2 will become + version 6. + + $ vault kv rollback -mount=secret -version=2 foo + + The deprecated path-like syntax can also be used, but this should be avoided, + as the fact that it is not actually the full API path to + the secret (secret/data/foo) can cause confusion: + + $ vault kv rollback -version=2 secret/foo + + Additional flags and more advanced use cases are detailed below. + +` + c.Flags().Help() + return strings.TrimSpace(helpText) +} + +func (c *KVRollbackCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + // Common Options + f := set.NewFlagSet("Common Options") + + f.IntVar(&IntVar{ + Name: "version", + Target: &c.flagVersion, + Usage: `Specifies the version number that should be made current again.`, + }) + + f.StringVar(&StringVar{ + Name: "mount", + Target: &c.flagMount, + Default: "", // no default, because the handling of the next arg is determined by whether this flag has a value + Usage: `Specifies the path where the KV backend is mounted. If specified, + the next argument will be interpreted as the secret path. If this flag is + not specified, the next argument will be interpreted as the combined mount + path and secret path, with /data/ automatically appended between KV + v2 secrets.`, + }) + + return set +} + +func (c *KVRollbackCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *KVRollbackCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *KVRollbackCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + var version *int + f.Visit(func(fl *flag.Flag) { + if fl.Name == "version" { + version = &c.flagVersion + } + }) + + args = f.Args() + + switch { + case len(args) != 1: + c.UI.Error(fmt.Sprintf("Invalid number of arguments (expected 1, got %d)", len(args))) + return 1 + case version == nil: + c.UI.Error("Version flag must be specified") + return 1 + case c.flagVersion <= 0: + c.UI.Error(fmt.Sprintf("Invalid value %d for the version flag", c.flagVersion)) + return 1 + } + + var err error + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + // If true, we're working with "-mount=secret foo" syntax. + // If false, we're using "secret/foo" syntax. + mountFlagSyntax := c.flagMount != "" + + var ( + mountPath string + partialPath string + v2 bool + ) + + // Parse the paths and grab the KV version + if mountFlagSyntax { + // In this case, this arg is the secret path (e.g. "foo"). + partialPath = sanitizePath(args[0]) + mountPath, v2, err = isKVv2(sanitizePath(c.flagMount), client) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + if v2 { + partialPath = path.Join(mountPath, partialPath) + } + } else { + // In this case, this arg is a path-like combination of mountPath/secretPath. + // (e.g. "secret/foo") + partialPath = sanitizePath(args[0]) + mountPath, v2, err = isKVv2(partialPath, client) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + } + + if !v2 { + c.UI.Error("K/V engine mount must be version 2 for rollback support") + return 2 + } + + fullPath := addPrefixToKVPath(partialPath, mountPath, "data", false) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + // First, do a read to get the current version for check-and-set + var meta map[string]interface{} + { + secret, err := kvReadRequest(client, fullPath, nil) + if err != nil { + c.UI.Error(fmt.Sprintf("Error doing pre-read at %s: %s", fullPath, err)) + return 2 + } + + // Make sure a value already exists + if secret == nil || secret.Data == nil { + c.UI.Error(fmt.Sprintf("No value found at %s", fullPath)) + return 2 + } + + // Verify metadata found + rawMeta, ok := secret.Data["metadata"] + if !ok || rawMeta == nil { + c.UI.Error(fmt.Sprintf("No metadata found at %s; rollback only works on existing data", fullPath)) + return 2 + } + meta, ok = rawMeta.(map[string]interface{}) + if !ok { + c.UI.Error(fmt.Sprintf("Metadata found at %s is not the expected type (JSON object)", fullPath)) + return 2 + } + if meta == nil { + c.UI.Error(fmt.Sprintf("No metadata found at %s; rollback only works on existing data", fullPath)) + return 2 + } + } + + casVersion := meta["version"] + + // Set the version parameter + versionParam := map[string]string{ + "version": fmt.Sprintf("%d", c.flagVersion), + } + + // Now run it again and read the version we want to roll back to + var data map[string]interface{} + { + secret, err := kvReadRequest(client, fullPath, versionParam) + if err != nil { + c.UI.Error(fmt.Sprintf("Error doing pre-read at %s: %s", fullPath, err)) + return 2 + } + + // Make sure a value already exists + if secret == nil || secret.Data == nil { + c.UI.Error(fmt.Sprintf("No value found at %s", fullPath)) + return 2 + } + + // Verify metadata found + rawMeta, ok := secret.Data["metadata"] + if !ok || rawMeta == nil { + c.UI.Error(fmt.Sprintf("No metadata found at %s; rollback only works on existing data", fullPath)) + return 2 + } + meta, ok := rawMeta.(map[string]interface{}) + if !ok { + c.UI.Error(fmt.Sprintf("Metadata found at %s is not the expected type (JSON object)", fullPath)) + return 2 + } + if meta == nil { + c.UI.Error(fmt.Sprintf("No metadata found at %s; rollback only works on existing data", fullPath)) + return 2 + } + + // Verify it hasn't been deleted + if meta["deletion_time"] != nil && meta["deletion_time"].(string) != "" { + c.UI.Error("Cannot roll back to a version that has been deleted") + return 2 + } + + if meta["destroyed"] != nil && meta["destroyed"].(bool) { + c.UI.Error("Cannot roll back to a version that has been destroyed") + return 2 + } + + // Verify old data found + rawData, ok := secret.Data["data"] + if !ok || rawData == nil { + c.UI.Error(fmt.Sprintf("No data found at %s; rollback only works on existing data", fullPath)) + return 2 + } + data, ok = rawData.(map[string]interface{}) + if !ok { + c.UI.Error(fmt.Sprintf("Data found at %s is not the expected type (JSON object)", fullPath)) + return 2 + } + if data == nil { + c.UI.Error(fmt.Sprintf("No data found at %s; rollback only works on existing data", fullPath)) + return 2 + } + } + + secret, err := client.Logical().Write(fullPath, map[string]interface{}{ + "data": data, + "options": map[string]interface{}{ + "cas": casVersion, + }, + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error writing data to %s: %s", fullPath, err)) + return 2 + } + if secret == nil { + // Don't output anything unless using the "table" format + if Format(c.UI) == "table" { + c.UI.Info(fmt.Sprintf("Success! Data written to: %s", fullPath)) + } + return 0 + } + + if c.flagField != "" { + return PrintRawField(c.UI, secret, c.flagField) + } + + return OutputSecret(c.UI, secret) +} diff --git a/command/kv_test.go b/command/kv_test.go new file mode 100644 index 0000000..6564208 --- /dev/null +++ b/command/kv_test.go @@ -0,0 +1,1546 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "context" + "fmt" + "io" + "strings" + "testing" + "time" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" +) + +func testKVPutCommand(tb testing.TB) (*cli.MockUi, *KVPutCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &KVPutCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func retryKVCommand(t *testing.T, cmdFunc func() (int, string)) (int, string) { + t.Helper() + + var code int + var combined string + + // Loop until return message does not indicate upgrade, or timeout. + timeout := time.After(20 * time.Second) + ticker := time.Tick(time.Second) + + for { + select { + case <-timeout: + t.Errorf("timeout expired waiting for upgrade: %q", combined) + return code, combined + case <-ticker: + code, combined = cmdFunc() + + // This is an error if a v1 mount, but test case doesn't + // currently contain the information to know the difference. + if !strings.Contains(combined, "Upgrading from non-versioned to versioned") { + return code, combined + } + } + } +} + +func kvPutWithRetry(t *testing.T, client *api.Client, args []string) (int, string) { + t.Helper() + + return retryKVCommand(t, func() (int, string) { + ui, cmd := testKVPutCommand(t) + cmd.client = client + + code := cmd.Run(args) + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + + return code, combined + }) +} + +func kvPatchWithRetry(t *testing.T, client *api.Client, args []string, stdin *io.PipeReader) (int, string) { + t.Helper() + + return retryKVCommand(t, func() (int, string) { + ui, cmd := testKVPatchCommand(t) + cmd.client = client + + if stdin != nil { + cmd.testStdin = stdin + } + + code := cmd.Run(args) + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + + return code, combined + }) +} + +func TestKVPutCommand(t *testing.T) { + t.Parallel() + + v2ExpectedFields := []string{"created_time", "custom_metadata", "deletion_time", "deletion_time", "version"} + + cases := []struct { + name string + args []string + outStrings []string + code int + }{ + { + "not_enough_args", + []string{}, + []string{"Not enough arguments"}, + 1, + }, + { + "empty_kvs", + []string{"secret/write/foo"}, + []string{"Must supply data"}, + 1, + }, + { + "kvs_no_value", + []string{"secret/write/foo", "foo"}, + []string{"Failed to parse K=V data"}, + 1, + }, + { + "single_value", + []string{"secret/write/foo", "foo=bar"}, + []string{"Success!"}, + 0, + }, + { + "multi_value", + []string{"secret/write/foo", "foo=bar", "zip=zap"}, + []string{"Success!"}, + 0, + }, + { + "v1_mount_flag_syntax", + []string{"-mount", "secret", "write/foo", "foo=bar"}, + []string{"Success!"}, + 0, + }, + { + "v1_mount_flag_syntax_key_same_as_mount", + []string{"-mount", "secret", "secret", "foo=bar"}, + []string{"Success!"}, + 0, + }, + { + "v2_single_value", + []string{"kv/write/foo", "foo=bar"}, + v2ExpectedFields, + 0, + }, + { + "v2_multi_value", + []string{"kv/write/foo", "foo=bar", "zip=zap"}, + v2ExpectedFields, + 0, + }, + { + "v2_secret_path", + []string{"kv/write/foo", "foo=bar"}, + []string{"== Secret Path ==", "kv/data/write/foo"}, + 0, + }, + { + "v2_mount_flag_syntax", + []string{"-mount", "kv", "write/foo", "foo=bar"}, + v2ExpectedFields, + 0, + }, + { + "v2_mount_flag_syntax_key_same_as_mount", + []string{"-mount", "kv", "kv", "foo=bar"}, + v2ExpectedFields, + 0, + }, + { + "v2_single_value_backslash", + []string{"kv/write/foo", "foo=\\"}, + []string{"== Secret Path ==", "kv/data/write/foo"}, + 0, + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + if err := client.Sys().Mount("kv/", &api.MountInput{ + Type: "kv-v2", + }); err != nil { + t.Fatal(err) + } + + code, combined := kvPutWithRetry(t, client, tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + for _, str := range tc.outStrings { + if !strings.Contains(combined, str) { + t.Errorf("expected %q to contain %q", combined, str) + } + } + }) + } + + t.Run("v2_cas", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + if err := client.Sys().Mount("kv/", &api.MountInput{ + Type: "kv-v2", + }); err != nil { + t.Fatal(err) + } + + // Only have to potentially retry the first time. + code, combined := kvPutWithRetry(t, client, []string{ + "-cas", "0", "kv/write/cas", "bar=baz", + }) + if code != 0 { + t.Fatalf("expected 0 to be %d", code) + } + + for _, str := range v2ExpectedFields { + if !strings.Contains(combined, str) { + t.Errorf("expected %q to contain %q", combined, str) + } + } + + ui, cmd := testKVPutCommand(t) + cmd.client = client + code = cmd.Run([]string{ + "-cas", "1", "kv/write/cas", "bar=baz", + }) + if code != 0 { + t.Fatalf("expected 0 to be %d", code) + } + combined = ui.OutputWriter.String() + ui.ErrorWriter.String() + + for _, str := range v2ExpectedFields { + if !strings.Contains(combined, str) { + t.Errorf("expected %q to contain %q", combined, str) + } + } + + ui, cmd = testKVPutCommand(t) + cmd.client = client + code = cmd.Run([]string{ + "-cas", "1", "kv/write/cas", "bar=baz", + }) + if code != 2 { + t.Fatalf("expected 2 to be %d", code) + } + combined = ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, "check-and-set parameter did not match the current version") { + t.Errorf("expected %q to contain %q", combined, "check-and-set parameter did not match the current version") + } + }) + + t.Run("v1_data", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testKVPutCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "secret/write/data", "bar=baz", + }) + if code != 0 { + t.Fatalf("expected 0 to be %d", code) + } + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, "Success!") { + t.Errorf("expected %q to contain %q", combined, "created_time") + } + + ui, rcmd := testReadCommand(t) + rcmd.client = client + code = rcmd.Run([]string{ + "secret/write/data", + }) + if code != 0 { + t.Fatalf("expected 0 to be %d", code) + } + combined = ui.OutputWriter.String() + ui.ErrorWriter.String() + if strings.Contains(combined, "data") { + t.Errorf("expected %q not to contain %q", combined, "data") + } + }) + + t.Run("stdin_full", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + stdinR, stdinW := io.Pipe() + go func() { + stdinW.Write([]byte(`{"foo":"bar"}`)) + stdinW.Close() + }() + + _, cmd := testKVPutCommand(t) + cmd.client = client + cmd.testStdin = stdinR + + code := cmd.Run([]string{ + "secret/write/stdin_full", "-", + }) + if code != 0 { + t.Fatalf("expected 0 to be %d", code) + } + + secret, err := client.Logical().Read("secret/write/stdin_full") + if err != nil { + t.Fatal(err) + } + if secret == nil || secret.Data == nil { + t.Fatal("expected secret to have data") + } + if exp, act := "bar", secret.Data["foo"].(string); exp != act { + t.Errorf("expected %q to be %q", act, exp) + } + }) + + t.Run("stdin_value", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + stdinR, stdinW := io.Pipe() + go func() { + stdinW.Write([]byte("bar")) + stdinW.Close() + }() + + _, cmd := testKVPutCommand(t) + cmd.client = client + cmd.testStdin = stdinR + + code := cmd.Run([]string{ + "secret/write/stdin_value", "foo=-", + }) + if code != 0 { + t.Fatalf("expected 0 to be %d", code) + } + + secret, err := client.Logical().Read("secret/write/stdin_value") + if err != nil { + t.Fatal(err) + } + if secret == nil || secret.Data == nil { + t.Fatal("expected secret to have data") + } + if exp, act := "bar", secret.Data["foo"].(string); exp != act { + t.Errorf("expected %q to be %q", act, exp) + } + }) + + t.Run("integration", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + _, cmd := testKVPutCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "secret/write/integration", "foo=bar", "zip=zap", + }) + if code != 0 { + t.Fatalf("expected 0 to be %d", code) + } + + secret, err := client.Logical().Read("secret/write/integration") + if err != nil { + t.Fatal(err) + } + if secret == nil || secret.Data == nil { + t.Fatal("expected secret to have data") + } + if exp, act := "bar", secret.Data["foo"].(string); exp != act { + t.Errorf("expected %q to be %q", act, exp) + } + if exp, act := "zap", secret.Data["zip"].(string); exp != act { + t.Errorf("expected %q to be %q", act, exp) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testKVPutCommand(t) + assertNoTabs(t, cmd) + }) +} + +func testKVGetCommand(tb testing.TB) (*cli.MockUi, *KVGetCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &KVGetCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestKVGetCommand(t *testing.T) { + t.Parallel() + + baseV2ExpectedFields := []string{"created_time", "custom_metadata", "deletion_time", "deletion_time", "version"} + + cases := []struct { + name string + args []string + outStrings []string + code int + }{ + { + "not_enough_args", + []string{}, + []string{"Not enough arguments"}, + 1, + }, + { + "too_many_args", + []string{"foo", "bar"}, + []string{"Too many arguments"}, + 1, + }, + { + "not_found", + []string{"secret/nope/not/once/never"}, + []string{"No value found at secret/nope/not/once/never"}, + 2, + }, + { + "default", + []string{"secret/read/foo"}, + []string{"foo"}, + 0, + }, + { + "v1_field", + []string{"-field", "foo", "secret/read/foo"}, + []string{"bar"}, + 0, + }, + { + "v1_mount_flag_syntax", + []string{"-mount", "secret", "read/foo"}, + []string{"foo"}, + 0, + }, + { + "v2_field", + []string{"-field", "foo", "kv/read/foo"}, + []string{"bar"}, + 0, + }, + { + "v2_mount_flag_syntax", + []string{"-mount", "kv", "read/foo"}, + append(baseV2ExpectedFields, "foo"), + 0, + }, + { + "v2_mount_flag_syntax_leading_slash", + []string{"-mount", "kv", "/read/foo"}, + append(baseV2ExpectedFields, "foo"), + 0, + }, + { + "v1_mount_flag_syntax_key_same_as_mount", + []string{"-mount", "kv", "kv"}, + append(baseV2ExpectedFields, "foo"), + 0, + }, + { + "v2_mount_flag_syntax_key_same_as_mount", + []string{"-mount", "kv", "kv"}, + append(baseV2ExpectedFields, "foo"), + 0, + }, + { + "v2_not_found", + []string{"kv/nope/not/once/never"}, + []string{"No value found at kv/data/nope/not/once/never"}, + 2, + }, + { + "v2_read", + []string{"kv/read/foo"}, + append(baseV2ExpectedFields, "foo"), + 0, + }, + { + "v2_read_leading_slash", + []string{"/kv/read/foo"}, + append(baseV2ExpectedFields, "foo"), + 0, + }, + { + "v2_read_version", + []string{"--version", "1", "kv/read/foo"}, + append(baseV2ExpectedFields, "foo"), + 0, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + if err := client.Sys().Mount("kv/", &api.MountInput{ + Type: "kv-v2", + }); err != nil { + t.Fatal(err) + } + + // Give time for the upgrade code to run/finish + time.Sleep(time.Second) + + if _, err := client.Logical().Write("secret/read/foo", map[string]interface{}{ + "foo": "bar", + }); err != nil { + t.Fatal(err) + } + + if _, err := client.Logical().Write("kv/data/read/foo", map[string]interface{}{ + "data": map[string]interface{}{ + "foo": "bar", + }, + }); err != nil { + t.Fatal(err) + } + + // create KV entries to test -mount flag where secret key is same as mount path + if _, err := client.Logical().Write("secret/secret", map[string]interface{}{ + "foo": "bar", + }); err != nil { + t.Fatal(err) + } + + if _, err := client.Logical().Write("kv/data/kv", map[string]interface{}{ + "data": map[string]interface{}{ + "foo": "bar", + }, + }); err != nil { + t.Fatal(err) + } + + ui, cmd := testKVGetCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + + for _, str := range tc.outStrings { + if !strings.Contains(combined, str) { + t.Errorf("expected %q to contain %q", combined, str) + } + } + }) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testKVGetCommand(t) + assertNoTabs(t, cmd) + }) +} + +func testKVListCommand(tb testing.TB) (*cli.MockUi, *KVListCommand) { + tb.Helper() + ui := cli.NewMockUi() + cmd := &KVListCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } + + return ui, cmd +} + +// TestKVListCommand runs tests for `vault kv list` +func TestKVListCommand(t *testing.T) { + testCases := []struct { + name string + args []string + outStrings []string + code int + }{ + { + name: "default", + args: []string{"kv/my-prefix"}, + outStrings: []string{"secret-0", "secret-1", "secret-2"}, + code: 0, + }, + { + name: "not_enough_args", + args: []string{}, + outStrings: []string{"Not enough arguments"}, + code: 1, + }, + { + name: "v2_default_with_mount", + args: []string{"-mount", "kv", "my-prefix"}, + outStrings: []string{"secret-0", "secret-1", "secret-2"}, + code: 0, + }, + { + name: "v1_default_with_mount", + args: []string{"kv/my-prefix"}, + outStrings: []string{"secret-0", "secret-1", "secret-2"}, + code: 0, + }, + { + name: "v2_not_found", + args: []string{"kv/nope/not/once/never"}, + outStrings: []string{"No value found at kv/metadata/nope/not/once/never"}, + code: 2, + }, + { + name: "v1_mount_only", + args: []string{"kv"}, + outStrings: []string{"my-prefix"}, + code: 0, + }, + { + name: "v2_mount_only", + args: []string{"-mount", "kv"}, + outStrings: []string{"my-prefix"}, + code: 0, + }, + { + // this is behavior that should be tested + // `kv` here is an explicit mount + // `my-prefix` is not + // the current kv code will ignore `my-prefix` + name: "ignore_multi_part_mounts", + args: []string{"-mount", "kv/my-prefix"}, + outStrings: []string{"my-prefix"}, + code: 0, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, testCase := range testCases { + testCase := testCase + + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + // test setup + client, closer := testVaultServer(t) + defer closer() + + // enable kv-v2 backend + if err := client.Sys().Mount("kv/", &api.MountInput{ + Type: "kv-v2", + }); err != nil { + t.Fatal(err) + } + time.Sleep(time.Second) + + ctx := context.Background() + for i := 0; i < 3; i++ { + path := fmt.Sprintf("my-prefix/secret-%d", i) + _, err := client.KVv2("kv/").Put(ctx, path, map[string]interface{}{ + "foo": "bar", + }) + if err != nil { + t.Fatal(err) + } + } + + ui, cmd := testKVListCommand(t) + cmd.client = client + + code := cmd.Run(testCase.args) + if code != testCase.code { + t.Errorf("expected %d to be %d", code, testCase.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + for _, str := range testCase.outStrings { + if !strings.Contains(combined, str) { + t.Errorf("expected %q to contain %q", combined, str) + } + } + }) + } + }) +} + +func testKVMetadataGetCommand(tb testing.TB) (*cli.MockUi, *KVMetadataGetCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &KVMetadataGetCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestKVMetadataGetCommand(t *testing.T) { + t.Parallel() + + expectedTopLevelFields := []string{ + "cas_required", + "created_time", + "current_version", + "custom_metadata", + "delete_version_after", + "max_versions", + "oldest_version", + "updated_time", + } + + expectedVersionFields := []string{ + "created_time", // field is redundant + "deletion_time", + "destroyed", + } + + cases := []struct { + name string + args []string + outStrings []string + code int + }{ + { + "v1", + []string{"secret/foo"}, + []string{"Metadata not supported on KV Version 1"}, + 1, + }, + { + "metadata_exists", + []string{"kv/foo"}, + expectedTopLevelFields, + 0, + }, + // ensure that all top-level and version-level fields are output along with version num + { + "versions_exist", + []string{"kv/foo"}, + append(expectedTopLevelFields, expectedVersionFields[:]...), + 0, + }, + { + "mount_flag_syntax", + []string{"-mount", "kv", "foo"}, + expectedTopLevelFields, + 0, + }, + { + "mount_flag_syntax_key_same_as_mount", + []string{"-mount", "kv", "kv"}, + expectedTopLevelFields, + 0, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + if err := client.Sys().Mount("kv/", &api.MountInput{ + Type: "kv-v2", + }); err != nil { + t.Fatal(err) + } + + // Give time for the upgrade code to run/finish + time.Sleep(time.Second) + + if _, err := client.Logical().Write("kv/data/foo", map[string]interface{}{ + "data": map[string]interface{}{ + "foo": "bar", + }, + }); err != nil { + t.Fatal(err) + } + + // create KV entry to test -mount flag where secret key is same as mount path + if _, err := client.Logical().Write("kv/data/kv", map[string]interface{}{ + "data": map[string]interface{}{ + "foo": "bar", + }, + }); err != nil { + t.Fatal(err) + } + + ui, cmd := testKVMetadataGetCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + for _, str := range tc.outStrings { + if !strings.Contains(combined, str) { + t.Errorf("expected %q to contain %q", combined, str) + } + } + }) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testKVMetadataGetCommand(t) + assertNoTabs(t, cmd) + }) +} + +func testKVPatchCommand(tb testing.TB) (*cli.MockUi, *KVPatchCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &KVPatchCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestKVPatchCommand_ArgValidation(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "not_enough_args", + []string{}, + "Not enough arguments", + 1, + }, + { + "empty_kvs", + []string{"kv/patch/foo"}, + "Must supply data", + 1, + }, + { + "kvs_no_value", + []string{"kv/patch/foo", "foo"}, + "Failed to parse K=V data", + 1, + }, + { + "mount_flag_syntax", + []string{"-mount", "kv"}, + "Not enough arguments", + 1, + }, + } + + for _, tc := range cases { + tc := tc // capture range variable + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + if err := client.Sys().Mount("kv/", &api.MountInput{ + Type: "kv-v2", + }); err != nil { + t.Fatalf("kv-v2 mount attempt failed - err: %#v\n", err) + } + + code, combined := kvPatchWithRetry(t, client, tc.args, nil) + + if code != tc.code { + t.Fatalf("expected code to be %d but was %d for patch cmd with args %#v\n", tc.code, code, tc.args) + } + + if !strings.Contains(combined, tc.out) { + t.Fatalf("expected output to be %q but was %q for patch cmd with args %#v\n", tc.out, combined, tc.args) + } + }) + } +} + +// expectedPatchFields produces a deterministic slice of +// expected fields for patch command output since const +// slices are not supported +func expectedPatchFields() []string { + return []string{ + "created_time", + "custom_metadata", + "deletion_time", + "destroyed", + "version", + } +} + +func TestKVPatchCommand_StdinFull(t *testing.T) { + client, closer := testVaultServer(t) + defer closer() + + if err := client.Sys().Mount("kv/", &api.MountInput{ + Type: "kv-v2", + }); err != nil { + t.Fatalf("kv-v2 mount attempt failed - err: %#v\n", err) + } + + if _, err := client.Logical().Write("kv/data/patch/foo", map[string]interface{}{ + "data": map[string]interface{}{ + "foo": "a", + }, + }); err != nil { + t.Fatalf("write failed, err: %#v\n", err) + } + + cases := [][]string{ + {"kv/patch/foo", "-"}, + {"-mount", "kv", "patch/foo", "-"}, + } + for i, args := range cases { + stdinR, stdinW := io.Pipe() + go func() { + stdinW.Write([]byte(fmt.Sprintf(`{"foo%d":"bar%d"}`, i, i))) + stdinW.Close() + }() + code, combined := kvPatchWithRetry(t, client, args, stdinR) + + for _, str := range expectedPatchFields() { + if !strings.Contains(combined, str) { + t.Errorf("expected %q to contain %q", combined, str) + } + } + + if code != 0 { + t.Fatalf("expected code to be 0 but was %d for patch cmd with args %#v\n", code, args) + } + + secret, err := client.Logical().ReadWithContext(context.Background(), "kv/data/patch/foo") + if err != nil { + t.Fatalf("read failed, err: %#v\n", err) + } + + if secret == nil || secret.Data == nil { + t.Fatal("expected secret to have data") + } + + secretDataRaw, ok := secret.Data["data"] + + if !ok { + t.Fatalf("expected secret to have nested data key, data: %#v", secret.Data) + } + + secretData := secretDataRaw.(map[string]interface{}) + foo, ok := secretData[fmt.Sprintf("foo%d", i)].(string) + if !ok { + t.Fatal("expected foo to be a string but it wasn't") + } + + if exp, act := fmt.Sprintf("bar%d", i), foo; exp != act { + t.Fatalf("expected %q to be %q, data: %#v\n", act, exp, secret.Data) + } + } +} + +func TestKVPatchCommand_StdinValue(t *testing.T) { + client, closer := testVaultServer(t) + defer closer() + + if err := client.Sys().Mount("kv/", &api.MountInput{ + Type: "kv-v2", + }); err != nil { + t.Fatalf("kv-v2 mount attempt failed - err: %#v\n", err) + } + + if _, err := client.Logical().Write("kv/data/patch/foo", map[string]interface{}{ + "data": map[string]interface{}{ + "foo": "a", + }, + }); err != nil { + t.Fatalf("write failed, err: %#v\n", err) + } + + cases := [][]string{ + {"kv/patch/foo", "foo=-"}, + {"-mount", "kv", "patch/foo", "foo=-"}, + } + + for i, args := range cases { + stdinR, stdinW := io.Pipe() + go func() { + stdinW.Write([]byte(fmt.Sprintf("bar%d", i))) + stdinW.Close() + }() + + code, combined := kvPatchWithRetry(t, client, args, stdinR) + if code != 0 { + t.Fatalf("expected code to be 0 but was %d for patch cmd with args %#v\n", code, args) + } + + for _, str := range expectedPatchFields() { + if !strings.Contains(combined, str) { + t.Errorf("expected %q to contain %q", combined, str) + } + } + + secret, err := client.Logical().ReadWithContext(context.Background(), "kv/data/patch/foo") + if err != nil { + t.Fatalf("read failed, err: %#v\n", err) + } + + if secret == nil || secret.Data == nil { + t.Fatal("expected secret to have data") + } + + secretDataRaw, ok := secret.Data["data"] + + if !ok { + t.Fatalf("expected secret to have nested data key, data: %#v\n", secret.Data) + } + + secretData := secretDataRaw.(map[string]interface{}) + + if exp, act := fmt.Sprintf("bar%d", i), secretData["foo"].(string); exp != act { + t.Fatalf("expected %q to be %q, data: %#v\n", act, exp, secret.Data) + } + } +} + +func TestKVPatchCommand_RWMethodNotExists(t *testing.T) { + client, closer := testVaultServer(t) + defer closer() + + if err := client.Sys().Mount("kv/", &api.MountInput{ + Type: "kv-v2", + }); err != nil { + t.Fatalf("kv-v2 mount attempt failed - err: %#v\n", err) + } + + cases := [][]string{ + {"-method", "rw", "kv/patch/foo", "foo=a"}, + {"-method", "rw", "-mount", "kv", "patch/foo", "foo=a"}, + } + + for _, args := range cases { + code, combined := kvPatchWithRetry(t, client, args, nil) + + if code != 2 { + t.Fatalf("expected code to be 2 but was %d for patch cmd with args %#v\n", code, args) + } + + expectedOutputSubstr := "No value found" + if !strings.Contains(combined, expectedOutputSubstr) { + t.Fatalf("expected output %q to contain %q for patch cmd with args %#v\n", combined, expectedOutputSubstr, args) + } + } +} + +func TestKVPatchCommand_RWMethodSucceeds(t *testing.T) { + client, closer := testVaultServer(t) + defer closer() + + if err := client.Sys().Mount("kv/", &api.MountInput{ + Type: "kv-v2", + }); err != nil { + t.Fatalf("kv-v2 mount attempt failed - err: %#v\n", err) + } + + if _, err := client.Logical().Write("kv/data/patch/foo", map[string]interface{}{ + "data": map[string]interface{}{ + "foo": "a", + "bar": "b", + }, + }); err != nil { + t.Fatalf("write failed, err: %#v\n", err) + } + + // Test single value + args := []string{"-method", "rw", "kv/patch/foo", "foo=aa"} + code, combined := kvPatchWithRetry(t, client, args, nil) + + if code != 0 { + t.Fatalf("expected code to be 0 but was %d for patch cmd with args %#v\n", code, args) + } + + for _, str := range expectedPatchFields() { + if !strings.Contains(combined, str) { + t.Errorf("expected %q to contain %q", combined, str) + } + } + + // Test that full path was output + for _, str := range []string{"== Secret Path ==", "kv/data/patch/foo"} { + if !strings.Contains(combined, str) { + t.Errorf("expected %q to contain %q", combined, str) + } + } + + // Test multi value + args = []string{"-method", "rw", "kv/patch/foo", "foo=aaa", "bar=bbb"} + code, combined = kvPatchWithRetry(t, client, args, nil) + + if code != 0 { + t.Fatalf("expected code to be 0 but was %d for patch cmd with args %#v\n", code, args) + } + + for _, str := range expectedPatchFields() { + if !strings.Contains(combined, str) { + t.Errorf("expected %q to contain %q", combined, str) + } + } +} + +func TestKVPatchCommand_CAS(t *testing.T) { + cases := []struct { + name string + key string + args []string + expected string + outStrings []string + code int + }{ + { + "right version", + "foo", + []string{"-cas", "1", "kv/foo", "bar=quux"}, + "quux", + expectedPatchFields(), + 0, + }, + { + "wrong version", + "foo", + []string{"-cas", "2", "kv/foo", "bar=wibble"}, + "baz", + []string{"check-and-set parameter did not match the current version"}, + 2, + }, + { + "mount_flag_syntax", + "foo", + []string{"-mount", "kv", "-cas", "1", "foo", "bar=quux"}, + "quux", + expectedPatchFields(), + 0, + }, + { + "v2_mount_flag_syntax_key_same_as_mount", + "kv", + []string{"-mount", "kv", "-cas", "1", "kv", "bar=quux"}, + "quux", + expectedPatchFields(), + 0, + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + if err := client.Sys().Mount("kv/", &api.MountInput{ + Type: "kv-v2", + }); err != nil { + t.Fatalf("kv-v2 mount attempt failed - err: %#v\n", err) + } + + // create a policy with patch capability + policy := `path "kv/*" { capabilities = ["create", "update", "read", "patch"] }` + secretAuth, err := createTokenForPolicy(t, client, policy) + if err != nil { + t.Fatalf("policy/token creation failed for policy %s, err: %#v\n", policy, err) + } + + kvClient, err := client.Clone() + if err != nil { + t.Fatal(err) + } + + kvClient.SetToken(secretAuth.ClientToken) + + data := map[string]interface{}{ + "bar": "baz", + } + + _, err = kvClient.Logical().Write("kv/data/"+tc.key, map[string]interface{}{"data": data}) + if err != nil { + t.Fatal(err) + } + + code, combined := kvPatchWithRetry(t, kvClient, tc.args, nil) + + if code != tc.code { + t.Fatalf("expected code to be %d but was %d", tc.code, code) + } + + for _, str := range tc.outStrings { + if !strings.Contains(combined, str) { + t.Errorf("expected %q to contain %q", combined, str) + } + } + + secret, err := kvClient.Logical().ReadWithContext(context.Background(), "kv/data/"+tc.key) + if err != nil { + t.Fatal(err) + } + bar := secret.Data["data"].(map[string]interface{})["bar"] + if bar != tc.expected { + t.Fatalf("expected bar to be %q but it was %q", tc.expected, bar) + } + }) + } +} + +func TestKVPatchCommand_Methods(t *testing.T) { + cases := []struct { + name string + args []string + expected string + code int + }{ + { + "rw", + []string{"-method", "rw", "kv/foo", "bar=quux"}, + "quux", + 0, + }, + { + "patch", + []string{"-method", "patch", "kv/foo", "bar=wibble"}, + "wibble", + 0, + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + if err := client.Sys().Mount("kv/", &api.MountInput{ + Type: "kv-v2", + }); err != nil { + t.Fatalf("kv-v2 mount attempt failed - err: %#v\n", err) + } + + // create a policy with patch capability + policy := `path "kv/*" { capabilities = ["create", "update", "read", "patch"] }` + secretAuth, err := createTokenForPolicy(t, client, policy) + if err != nil { + t.Fatalf("policy/token creation failed for policy %s, err: %#v\n", policy, err) + } + + kvClient, err := client.Clone() + if err != nil { + t.Fatal(err) + } + + kvClient.SetToken(secretAuth.ClientToken) + + _, err = kvClient.Logical().Write("kv/data/foo", map[string]interface{}{"data": map[string]interface{}{"bar": "baz"}}) + if err != nil { + t.Fatal(err) + } + + code, _ := kvPatchWithRetry(t, kvClient, tc.args, nil) + + if code != tc.code { + t.Fatalf("expected code to be %d but was %d", tc.code, code) + } + + secret, err := kvClient.Logical().ReadWithContext(context.Background(), "kv/data/foo") + if err != nil { + t.Fatal(err) + } + bar := secret.Data["data"].(map[string]interface{})["bar"] + if bar != tc.expected { + t.Fatalf("expected bar to be %q but it was %q", tc.expected, bar) + } + }) + } +} + +func TestKVPatchCommand_403Fallback(t *testing.T) { + cases := []struct { + name string + args []string + expected string + code int + }{ + // if no -method is specified, and patch fails, it should fall back to rw and succeed + { + "unspecified", + []string{"kv/foo", "bar=quux"}, + `add the "patch" capability to your ACL policy`, + 0, + }, + // if -method=patch is specified, and patch fails, it should not fall back, and just error + { + "specifying patch", + []string{"-method", "patch", "kv/foo", "bar=quux"}, + "permission denied", + 2, + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + client, closer := testVaultServer(t) + defer closer() + + if err := client.Sys().Mount("kv/", &api.MountInput{ + Type: "kv-v2", + }); err != nil { + t.Fatalf("kv-v2 mount attempt failed - err: %#v\n", err) + } + + // create a policy without patch capability + policy := `path "kv/*" { capabilities = ["create", "update", "read"] }` + secretAuth, err := createTokenForPolicy(t, client, policy) + if err != nil { + t.Fatalf("policy/token creation failed for policy %s, err: %#v\n", policy, err) + } + + kvClient, err := client.Clone() + if err != nil { + t.Fatal(err) + } + + kvClient.SetToken(secretAuth.ClientToken) + + // Write a value then attempt to patch it + _, err = kvClient.Logical().Write("kv/data/foo", map[string]interface{}{"data": map[string]interface{}{"bar": "baz"}}) + if err != nil { + t.Fatal(err) + } + + code, combined := kvPatchWithRetry(t, kvClient, tc.args, nil) + + if code != tc.code { + t.Fatalf("expected code to be %d but was %d", tc.code, code) + } + + if !strings.Contains(combined, tc.expected) { + t.Errorf("expected %q to contain %q", combined, tc.expected) + } + }) + } +} + +func TestKVPatchCommand_RWMethodPolicyVariations(t *testing.T) { + cases := []struct { + name string + args []string + policy string + expected string + code int + }{ + // if the policy doesn't have read capability and -method=rw is specified, it fails + { + "no read", + []string{"-method", "rw", "kv/foo", "bar=quux"}, + `path "kv/*" { capabilities = ["create", "update"] }`, + "permission denied", + 2, + }, + // if the policy doesn't have update capability and -method=rw is specified, it fails + { + "no update", + []string{"-method", "rw", "kv/foo", "bar=quux"}, + `path "kv/*" { capabilities = ["create", "read"] }`, + "permission denied", + 2, + }, + // if the policy has both read and update and -method=rw is specified, it succeeds + { + "read and update", + []string{"-method", "rw", "kv/foo", "bar=quux"}, + `path "kv/*" { capabilities = ["create", "read", "update"] }`, + "", + 0, + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + client, closer := testVaultServer(t) + defer closer() + + if err := client.Sys().Mount("kv/", &api.MountInput{ + Type: "kv-v2", + }); err != nil { + t.Fatalf("kv-v2 mount attempt failed - err: %#v\n", err) + } + + secretAuth, err := createTokenForPolicy(t, client, tc.policy) + if err != nil { + t.Fatalf("policy/token creation failed for policy %s, err: %#v\n", tc.policy, err) + } + + client.SetToken(secretAuth.ClientToken) + + putArgs := []string{"kv/foo", "foo=bar", "bar=baz"} + code, combined := kvPutWithRetry(t, client, putArgs) + if code != 0 { + t.Errorf("write failed, expected %d to be 0, output: %s", code, combined) + } + + code, combined = kvPatchWithRetry(t, client, tc.args, nil) + if code != tc.code { + t.Fatalf("expected code to be %d but was %d for patch cmd with args %#v\n", tc.code, code, tc.args) + } + + if code != 0 { + if !strings.Contains(combined, tc.expected) { + t.Fatalf("expected output %q to contain %q for patch cmd with args %#v\n", combined, tc.expected, tc.args) + } + } + }) + } +} + +func TestPadEqualSigns(t *testing.T) { + t.Parallel() + + header := "Test Header" + + cases := []struct { + name string + totalPathLen int + expectedCount int + }{ + { + name: "path with even length", + totalPathLen: 20, + expectedCount: 4, + }, + { + name: "path with odd length", + totalPathLen: 19, + expectedCount: 3, + }, + { + name: "smallest possible path", + totalPathLen: 8, + expectedCount: 2, + }, + } + + for _, tc := range cases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + padded := padEqualSigns(header, tc.totalPathLen) + + signs := strings.Split(padded, fmt.Sprintf(" %s ", header)) + if len(signs[0]) != len(signs[1]) { + t.Fatalf("expected an equal number of equal signs on both sides") + } + for _, sign := range signs { + count := strings.Count(sign, "=") + if count != tc.expectedCount { + t.Fatalf("expected %d equal signs but there were %d", tc.expectedCount, count) + } + } + }) + } +} + +func createTokenForPolicy(t *testing.T, client *api.Client, policy string) (*api.SecretAuth, error) { + t.Helper() + + if err := client.Sys().PutPolicy("policy", policy); err != nil { + return nil, err + } + + secret, err := client.Auth().Token().Create(&api.TokenCreateRequest{ + Policies: []string{"policy"}, + TTL: "30m", + }) + if err != nil { + return nil, err + } + + if secret == nil || secret.Auth == nil || secret.Auth.ClientToken == "" { + return nil, fmt.Errorf("missing auth data: %#v", secret) + } + + return secret.Auth, err +} diff --git a/command/kv_undelete.go b/command/kv_undelete.go new file mode 100644 index 0000000..25de588 --- /dev/null +++ b/command/kv_undelete.go @@ -0,0 +1,175 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*KVUndeleteCommand)(nil) + _ cli.CommandAutocomplete = (*KVUndeleteCommand)(nil) +) + +type KVUndeleteCommand struct { + *BaseCommand + + flagVersions []string + flagMount string +} + +func (c *KVUndeleteCommand) Synopsis() string { + return "Undeletes versions in the KV store" +} + +func (c *KVUndeleteCommand) Help() string { + helpText := ` +Usage: vault kv undelete [options] KEY + + Undeletes the data for the provided version and path in the key-value store. + This restores the data, allowing it to be returned on get requests. + + To undelete version 3 of key "foo": + + $ vault kv undelete -mount=secret -versions=3 foo + + The deprecated path-like syntax can also be used, but this should be avoided, + as the fact that it is not actually the full API path to + the secret (secret/data/foo) can cause confusion: + + $ vault kv undelete -versions=3 secret/foo + + Additional flags and more advanced use cases are detailed below. + +` + c.Flags().Help() + return strings.TrimSpace(helpText) +} + +func (c *KVUndeleteCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + // Common Options + f := set.NewFlagSet("Common Options") + + f.StringSliceVar(&StringSliceVar{ + Name: "versions", + Target: &c.flagVersions, + Default: nil, + Usage: `Specifies the version numbers to undelete.`, + }) + + f.StringVar(&StringVar{ + Name: "mount", + Target: &c.flagMount, + Default: "", // no default, because the handling of the next arg is determined by whether this flag has a value + Usage: `Specifies the path where the KV backend is mounted. If specified, + the next argument will be interpreted as the secret path. If this flag is + not specified, the next argument will be interpreted as the combined mount + path and secret path, with /data/ automatically appended between KV + v2 secrets.`, + }) + + return set +} + +func (c *KVUndeleteCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *KVUndeleteCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *KVUndeleteCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + } + + if len(c.flagVersions) == 0 { + c.UI.Error("No versions provided, use the \"-versions\" flag to specify the version to undelete.") + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + // If true, we're working with "-mount=secret foo" syntax. + // If false, we're using "secret/foo" syntax. + mountFlagSyntax := (c.flagMount != "") + + var ( + mountPath string + partialPath string + v2 bool + ) + + // Parse the paths and grab the KV version + if mountFlagSyntax { + // In this case, this arg is the secret path (e.g. "foo"). + partialPath = sanitizePath(args[0]) + mountPath = sanitizePath(c.flagMount) + _, v2, err = isKVv2(mountPath, client) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + } else { + // In this case, this arg is a path-like combination of mountPath/secretPath. + // (e.g. "secret/foo") + partialPath = sanitizePath(args[0]) + mountPath, v2, err = isKVv2(partialPath, client) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + } + + if !v2 { + c.UI.Error("Undelete not supported on KV Version 1") + return 1 + } + + undeletePath := addPrefixToKVPath(partialPath, mountPath, "undelete", false) + data := map[string]interface{}{ + "versions": kvParseVersionsFlags(c.flagVersions), + } + + secret, err := client.Logical().Write(undeletePath, data) + if err != nil { + c.UI.Error(fmt.Sprintf("Error writing data to %s: %s", undeletePath, err)) + if secret != nil { + OutputSecret(c.UI, secret) + } + return 2 + } + if secret == nil { + // Don't output anything unless using the "table" format + if Format(c.UI) == "table" { + c.UI.Info(fmt.Sprintf("Success! Data written to: %s", undeletePath)) + } + return 0 + } + + return OutputSecret(c.UI, secret) +} diff --git a/command/lease.go b/command/lease.go new file mode 100644 index 0000000..29ef79f --- /dev/null +++ b/command/lease.go @@ -0,0 +1,43 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + + "github.com/mitchellh/cli" +) + +var _ cli.Command = (*LeaseCommand)(nil) + +type LeaseCommand struct { + *BaseCommand +} + +func (c *LeaseCommand) Synopsis() string { + return "Interact with leases" +} + +func (c *LeaseCommand) Help() string { + helpText := ` +Usage: vault lease [options] [args] + + This command groups subcommands for interacting with leases. Users can revoke + or renew leases. + + Renew a lease: + + $ vault lease renew database/creds/readonly/2f6a614c... + + Revoke a lease: + + $ vault lease revoke database/creds/readonly/2f6a614c... +` + + return strings.TrimSpace(helpText) +} + +func (c *LeaseCommand) Run(args []string) int { + return cli.RunResultHelp +} diff --git a/command/lease_lookup.go b/command/lease_lookup.go new file mode 100644 index 0000000..ef53ce5 --- /dev/null +++ b/command/lease_lookup.go @@ -0,0 +1,94 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*LeaseLookupCommand)(nil) + _ cli.CommandAutocomplete = (*LeaseLookupCommand)(nil) +) + +type LeaseLookupCommand struct { + *BaseCommand +} + +func (c *LeaseLookupCommand) Synopsis() string { + return "Lookup the lease of a secret" +} + +func (c *LeaseLookupCommand) Help() string { + helpText := ` +Usage: vault lease lookup ID + + Lookup the lease information of a secret. + + Every secret in Vault has a lease associated with it. Users can look up + information on the lease by referencing the lease ID. + + Lookup lease of a secret: + + $ vault lease lookup database/creds/readonly/2f6a614c... + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *LeaseLookupCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + return set +} + +func (c *LeaseLookupCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictAnything +} + +func (c *LeaseLookupCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *LeaseLookupCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + leaseID := "" + + args = f.Args() + switch len(args) { + case 0: + c.UI.Error("Missing ID!") + return 1 + case 1: + leaseID = strings.TrimSpace(args[0]) + default: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + secret, err := client.Sys().Lookup(leaseID) + if err != nil { + c.UI.Error(fmt.Sprintf("error looking up lease id %s: %s", leaseID, err)) + return 2 + } + + return OutputSecret(c.UI, secret) +} diff --git a/command/lease_lookup_test.go b/command/lease_lookup_test.go new file mode 100644 index 0000000..536c29c --- /dev/null +++ b/command/lease_lookup_test.go @@ -0,0 +1,103 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "testing" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" +) + +func testLeaseLookupCommand(tb testing.TB) (*cli.MockUi, *LeaseLookupCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &LeaseLookupCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +// testLeaseLookupCommandMountAndLease mounts a leased secret backend and returns +// the leaseID of an item. +func testLeaseLookupCommandMountAndLease(tb testing.TB, client *api.Client) string { + if err := client.Sys().Mount("testing", &api.MountInput{ + Type: "generic-leased", + }); err != nil { + tb.Fatal(err) + } + + if _, err := client.Logical().Write("testing/foo", map[string]interface{}{ + "key": "value", + "lease": "5m", + }); err != nil { + tb.Fatal(err) + } + + // Read the secret back to get the leaseID + secret, err := client.Logical().Read("testing/foo") + if err != nil { + tb.Fatal(err) + } + if secret == nil || secret.LeaseID == "" { + tb.Fatalf("missing secret or lease: %#v", secret) + } + + return secret.LeaseID +} + +// TestLeaseLookupCommand_Run tests basic lookup +func TestLeaseLookupCommand_Run(t *testing.T) { + t.Parallel() + + t.Run("empty", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + _ = testLeaseLookupCommandMountAndLease(t, client) + + ui, cmd := testLeaseLookupCommand(t) + cmd.client = client + + code := cmd.Run(nil) + if exp := 1; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + expectedMsg := "Missing ID!" + if !strings.Contains(combined, expectedMsg) { + t.Errorf("expected %q to contain %q", combined, expectedMsg) + } + }) + + t.Run("integration", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + leaseID := testLeaseLookupCommandMountAndLease(t, client) + + _, cmd := testLeaseLookupCommand(t) + cmd.client = client + + code := cmd.Run([]string{leaseID}) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testLeaseLookupCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/lease_renew.go b/command/lease_renew.go new file mode 100644 index 0000000..aad41d6 --- /dev/null +++ b/command/lease_renew.go @@ -0,0 +1,117 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + "time" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*LeaseRenewCommand)(nil) + _ cli.CommandAutocomplete = (*LeaseRenewCommand)(nil) +) + +type LeaseRenewCommand struct { + *BaseCommand + + flagIncrement time.Duration +} + +func (c *LeaseRenewCommand) Synopsis() string { + return "Renews the lease of a secret" +} + +func (c *LeaseRenewCommand) Help() string { + helpText := ` +Usage: vault lease renew [options] ID + + Renews the lease on a secret, extending the time that it can be used before + it is revoked by Vault. + + Every secret in Vault has a lease associated with it. If the owner of the + secret wants to use it longer than the lease, then it must be renewed. + Renewing the lease does not change the contents of the secret. The ID is the + full path lease ID. + + Renew a secret: + + $ vault lease renew database/creds/readonly/2f6a614c... + + Lease renewal will fail if the secret is not renewable, the secret has already + been revoked, or if the secret has already reached its maximum TTL. + + For a full list of examples, please see the documentation. + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *LeaseRenewCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + f := set.NewFlagSet("Command Options") + + f.DurationVar(&DurationVar{ + Name: "increment", + Target: &c.flagIncrement, + Default: 0, + EnvVar: "", + Completion: complete.PredictAnything, + Usage: "Request a specific increment in seconds. Vault is not required " + + "to honor this request.", + }) + + return set +} + +func (c *LeaseRenewCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictAnything +} + +func (c *LeaseRenewCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *LeaseRenewCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + leaseID := "" + increment := c.flagIncrement + + args = f.Args() + switch len(args) { + case 0: + c.UI.Error("Missing ID!") + return 1 + case 1: + leaseID = strings.TrimSpace(args[0]) + default: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1-2, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + secret, err := client.Sys().Renew(leaseID, truncateToSeconds(increment)) + if err != nil { + c.UI.Error(fmt.Sprintf("Error renewing %s: %s", leaseID, err)) + return 2 + } + + return OutputSecret(c.UI, secret) +} diff --git a/command/lease_renew_test.go b/command/lease_renew_test.go new file mode 100644 index 0000000..c24b812 --- /dev/null +++ b/command/lease_renew_test.go @@ -0,0 +1,161 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "testing" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" +) + +func testLeaseRenewCommand(tb testing.TB) (*cli.MockUi, *LeaseRenewCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &LeaseRenewCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +// testLeaseRenewCommandMountAndLease mounts a leased secret backend and returns +// the leaseID of an item. +func testLeaseRenewCommandMountAndLease(tb testing.TB, client *api.Client) string { + if err := client.Sys().Mount("testing", &api.MountInput{ + Type: "generic-leased", + }); err != nil { + tb.Fatal(err) + } + + if _, err := client.Logical().Write("testing/foo", map[string]interface{}{ + "key": "value", + "lease": "5m", + }); err != nil { + tb.Fatal(err) + } + + // Read the secret back to get the leaseID + secret, err := client.Logical().Read("testing/foo") + if err != nil { + tb.Fatal(err) + } + if secret == nil || secret.LeaseID == "" { + tb.Fatalf("missing secret or lease: %#v", secret) + } + + return secret.LeaseID +} + +func TestLeaseRenewCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "empty", + nil, + "Missing ID!", + 1, + }, + { + "increment", + []string{"-increment", "60s"}, + "foo", + 0, + }, + { + "increment_no_suffix", + []string{"-increment", "60"}, + "foo", + 0, + }, + } + + t.Run("group", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + leaseID := testLeaseRenewCommandMountAndLease(t, client) + + ui, cmd := testLeaseRenewCommand(t) + cmd.client = client + + if tc.args != nil { + tc.args = append(tc.args, leaseID) + } + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("integration", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + leaseID := testLeaseRenewCommandMountAndLease(t, client) + + _, cmd := testLeaseRenewCommand(t) + cmd.client = client + + code := cmd.Run([]string{leaseID}) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testLeaseRenewCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "foo/bar", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error renewing foo/bar: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testLeaseRenewCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/lease_revoke.go b/command/lease_revoke.go new file mode 100644 index 0000000..5efd5ec --- /dev/null +++ b/command/lease_revoke.go @@ -0,0 +1,181 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*LeaseRevokeCommand)(nil) + _ cli.CommandAutocomplete = (*LeaseRevokeCommand)(nil) +) + +type LeaseRevokeCommand struct { + *BaseCommand + + flagForce bool + flagPrefix bool + flagSync bool +} + +func (c *LeaseRevokeCommand) Synopsis() string { + return "Revokes leases and secrets" +} + +func (c *LeaseRevokeCommand) Help() string { + helpText := ` +Usage: vault lease revoke [options] ID + + Revokes secrets by their lease ID. This command can revoke a single secret + or multiple secrets based on a path-matched prefix. + + The default behavior when not using -force is to revoke asynchronously; Vault + will queue the revocation and keep trying if it fails (including across + restarts). The -sync flag can be used to force a synchronous operation, but + it is then up to the caller to retry on failure. Force mode always operates + synchronously. + + Revoke a single lease: + + $ vault lease revoke database/creds/readonly/2f6a614c... + + Revoke all leases for a role: + + $ vault lease revoke -prefix aws/creds/deploy + + Force delete leases from Vault even if secret engine revocation fails: + + $ vault lease revoke -force -prefix consul/creds + + For a full list of examples and paths, please see the documentation that + corresponds to the secret engine in use. + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *LeaseRevokeCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP) + f := set.NewFlagSet("Command Options") + + f.BoolVar(&BoolVar{ + Name: "force", + Aliases: []string{"f"}, + Target: &c.flagForce, + Default: false, + Usage: "Delete the lease from Vault even if the secret engine revocation " + + "fails. This is meant for recovery situations where the secret " + + "in the target secret engine was manually removed. If this flag is " + + "specified, -prefix is also required.", + }) + + f.BoolVar(&BoolVar{ + Name: "prefix", + Target: &c.flagPrefix, + Default: false, + Usage: "Treat the ID as a prefix instead of an exact lease ID. This can " + + "revoke multiple leases simultaneously.", + }) + + f.BoolVar(&BoolVar{ + Name: "sync", + Target: &c.flagSync, + Default: false, + Usage: "Force a synchronous operation; on failure it is up to the client " + + "to retry.", + }) + + return set +} + +func (c *LeaseRevokeCommand) AutocompleteArgs() complete.Predictor { + return c.PredictVaultFiles() +} + +func (c *LeaseRevokeCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *LeaseRevokeCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + } + + if c.flagForce && !c.flagPrefix { + c.UI.Error("Specifying -force requires also specifying -prefix") + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + leaseID := strings.TrimSpace(args[0]) + + revokeOpts := &api.RevokeOptions{ + LeaseID: leaseID, + Force: c.flagForce, + Prefix: c.flagPrefix, + Sync: c.flagSync, + } + + if c.flagForce { + c.UI.Warn(wrapAtLength("Warning! Force-removing leases can cause Vault " + + "to become out of sync with secret engines!")) + } + + err = client.Sys().RevokeWithOptions(revokeOpts) + if err != nil { + switch { + case c.flagForce: + c.UI.Error(fmt.Sprintf("Error force revoking leases with prefix %s: %s", leaseID, err)) + return 2 + case c.flagPrefix: + c.UI.Error(fmt.Sprintf("Error revoking leases with prefix %s: %s", leaseID, err)) + return 2 + default: + c.UI.Error(fmt.Sprintf("Error revoking lease %s: %s", leaseID, err)) + return 2 + } + } + + if c.flagForce { + c.UI.Output(fmt.Sprintf("Success! Force revoked any leases with prefix: %s", leaseID)) + return 0 + } + + if c.flagSync { + if c.flagPrefix { + c.UI.Output(fmt.Sprintf("Success! Revoked any leases with prefix: %s", leaseID)) + return 0 + } + c.UI.Output(fmt.Sprintf("Success! Revoked lease: %s", leaseID)) + return 0 + } + + c.UI.Output("All revocation operations queued successfully!") + return 0 +} diff --git a/command/lease_revoke_test.go b/command/lease_revoke_test.go new file mode 100644 index 0000000..261041e --- /dev/null +++ b/command/lease_revoke_test.go @@ -0,0 +1,149 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "testing" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" +) + +func testLeaseRevokeCommand(tb testing.TB) (*cli.MockUi, *LeaseRevokeCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &LeaseRevokeCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestLeaseRevokeCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "force_without_prefix", + []string{"-force"}, + "requires also specifying -prefix", + 1, + }, + { + "single", + nil, + "All revocation operations queued successfully", + 0, + }, + { + "single_sync", + []string{"-sync"}, + "Success", + 0, + }, + { + "force_prefix", + []string{"-force", "-prefix"}, + "Success", + 0, + }, + { + "prefix", + []string{"-prefix"}, + "All revocation operations queued successfully", + 0, + }, + { + "prefix_sync", + []string{"-prefix", "-sync"}, + "Success", + 0, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + if err := client.Sys().Mount("secret-leased", &api.MountInput{ + Type: "generic-leased", + }); err != nil { + t.Fatal(err) + } + + path := "secret-leased/revoke/" + tc.name + data := map[string]interface{}{ + "key": "value", + "lease": "1m", + } + if _, err := client.Logical().Write(path, data); err != nil { + t.Fatal(err) + } + secret, err := client.Logical().Read(path) + if err != nil { + t.Fatal(err) + } + + ui, cmd := testLeaseRevokeCommand(t) + cmd.client = client + + tc.args = append(tc.args, secret.LeaseID) + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testLeaseRevokeCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "foo/bar", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error revoking lease foo/bar: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testLeaseRevokeCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/list.go b/command/list.go new file mode 100644 index 0000000..028f0d3 --- /dev/null +++ b/command/list.go @@ -0,0 +1,120 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*ListCommand)(nil) + _ cli.CommandAutocomplete = (*ListCommand)(nil) +) + +type ListCommand struct { + *BaseCommand +} + +func (c *ListCommand) Synopsis() string { + return "List data or secrets" +} + +func (c *ListCommand) Help() string { + helpText := ` + +Usage: vault list [options] PATH + + Lists data from Vault at the given path. This can be used to list keys in a, + given secret engine. + + List values under the "my-app" folder of the generic secret engine: + + $ vault list secret/my-app/ + + For a full list of examples and paths, please see the documentation that + corresponds to the secret engine in use. Not all engines support listing. + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *ListCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat | FlagSetOutputDetailed) + return set +} + +func (c *ListCommand) AutocompleteArgs() complete.Predictor { + return c.PredictVaultFolders() +} + +func (c *ListCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *ListCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + path := sanitizePath(args[0]) + secret, err := client.Logical().List(path) + if err != nil { + c.UI.Error(fmt.Sprintf("Error listing %s: %s", path, err)) + return 2 + } + + // If the secret is wrapped, return the wrapped response. + if secret != nil && secret.WrapInfo != nil && secret.WrapInfo.TTL != 0 { + return OutputSecret(c.UI, secret) + } + + _, ok := extractListData(secret) + if Format(c.UI) != "table" { + if secret == nil || secret.Data == nil || !ok { + OutputData(c.UI, map[string]interface{}{}) + return 2 + } + } + + if secret == nil { + c.UI.Error(fmt.Sprintf("No value found at %s", path)) + return 2 + } + if secret.Data == nil { + // If secret wasn't nil, we have warnings, so output them anyways. We + // may also have non-keys info. + return OutputSecret(c.UI, secret) + } + + if !ok { + c.UI.Error(fmt.Sprintf("No entries found at %s", path)) + return 2 + } + + return OutputList(c.UI, secret) +} diff --git a/command/list_test.go b/command/list_test.go new file mode 100644 index 0000000..070184b --- /dev/null +++ b/command/list_test.go @@ -0,0 +1,135 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "testing" + + "github.com/mitchellh/cli" +) + +func testListCommand(tb testing.TB) (*cli.MockUi, *ListCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &ListCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestListCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "not_enough_args", + []string{}, + "Not enough arguments", + 1, + }, + { + "too_many_args", + []string{"foo", "bar"}, + "Too many arguments", + 1, + }, + { + "not_found", + []string{"nope/not/once/never"}, + "", + 2, + }, + { + "default", + []string{"secret/list"}, + "bar\nbaz\nfoo", + 0, + }, + { + "default_slash", + []string{"secret/list/"}, + "bar\nbaz\nfoo", + 0, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + keys := []string{ + "secret/list/foo", + "secret/list/bar", + "secret/list/baz", + } + for _, k := range keys { + if _, err := client.Logical().Write(k, map[string]interface{}{ + "foo": "bar", + }); err != nil { + t.Fatal(err) + } + } + + ui, cmd := testListCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testListCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "secret/list", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error listing secret/list: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testListCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/log_flags.go b/command/log_flags.go new file mode 100644 index 0000000..5213d06 --- /dev/null +++ b/command/log_flags.go @@ -0,0 +1,178 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "flag" + "os" + "strconv" + + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/posener/complete" +) + +// logFlags are the 'log' related flags that can be shared across commands. +type logFlags struct { + flagCombineLogs bool + flagLogLevel string + flagLogFormat string + flagLogFile string + flagLogRotateBytes int + flagLogRotateDuration string + flagLogRotateMaxFiles int +} + +// valuesProvider has the intention of providing a way to supply a func with a +// way to retrieve values for flags and environment variables without having to +// directly call a specific implementation. +// The reasoning for its existence is to facilitate testing. +type valuesProvider struct { + flagProvider func(string) (flag.Value, bool) + envVarProvider func(string) (string, bool) +} + +// addLogFlags will add the set of 'log' related flags to a flag set. +func (f *FlagSet) addLogFlags(l *logFlags) { + f.BoolVar(&BoolVar{ + Name: flagNameCombineLogs, + Target: &l.flagCombineLogs, + Default: false, + Hidden: true, + }) + + f.StringVar(&StringVar{ + Name: flagNameLogLevel, + Target: &l.flagLogLevel, + Default: notSetValue, + EnvVar: EnvVaultLogLevel, + Completion: complete.PredictSet("trace", "debug", "info", "warn", "error"), + Usage: "Log verbosity level. Supported values (in order of detail) are " + + "\"trace\", \"debug\", \"info\", \"warn\", and \"error\".", + }) + + f.StringVar(&StringVar{ + Name: flagNameLogFormat, + Target: &l.flagLogFormat, + Default: notSetValue, + EnvVar: EnvVaultLogFormat, + Completion: complete.PredictSet("standard", "json"), + Usage: `Log format. Supported values are "standard" and "json".`, + }) + + f.StringVar(&StringVar{ + Name: flagNameLogFile, + Target: &l.flagLogFile, + Usage: "Path to the log file that Vault should use for logging", + }) + + f.IntVar(&IntVar{ + Name: flagNameLogRotateBytes, + Target: &l.flagLogRotateBytes, + Usage: "Number of bytes that should be written to a log before it needs to be rotated. " + + "Unless specified, there is no limit to the number of bytes that can be written to a log file", + }) + + f.StringVar(&StringVar{ + Name: flagNameLogRotateDuration, + Target: &l.flagLogRotateDuration, + Usage: "The maximum duration a log should be written to before it needs to be rotated. " + + "Must be a duration value such as 30s", + }) + + f.IntVar(&IntVar{ + Name: flagNameLogRotateMaxFiles, + Target: &l.flagLogRotateMaxFiles, + Usage: "The maximum number of older log file archives to keep", + }) +} + +// envVarValue attempts to get a named value from the environment variables. +// The value will be returned as a string along with a boolean value indiciating +// to the caller whether the named env var existed. +func envVarValue(key string) (string, bool) { + if key == "" { + return "", false + } + return os.LookupEnv(key) +} + +// flagValue attempts to find the named flag in a set of FlagSets. +// The flag.Value is returned if it was specified, and the boolean value indicates +// to the caller if the flag was specified by the end user. +func (f *FlagSets) flagValue(flagName string) (flag.Value, bool) { + var result flag.Value + var isFlagSpecified bool + + if f != nil { + f.Visit(func(fl *flag.Flag) { + if fl.Name == flagName { + result = fl.Value + isFlagSpecified = true + } + }) + } + + return result, isFlagSpecified +} + +// overrideValue uses the provided keys to check CLI flags and environment +// variables for values that may be used to override any specified configuration. +func (p *valuesProvider) overrideValue(flagKey, envVarKey string) (string, bool) { + var result string + found := true + + flg, flgFound := p.flagProvider(flagKey) + env, envFound := p.envVarProvider(envVarKey) + + switch { + case flgFound: + result = flg.String() + case envFound: + result = env + default: + found = false + } + + return result, found +} + +// applyLogConfigOverrides will accept a shared config and specifically attempt to update the 'log' related config keys. +// For each 'log' key, we aggregate file config, env vars and CLI flags to select the one with the highest precedence. +// This method mutates the config object passed into it. +func (f *FlagSets) applyLogConfigOverrides(config *configutil.SharedConfig) { + p := &valuesProvider{ + flagProvider: f.flagValue, + envVarProvider: envVarValue, + } + + // Update log level + if val, found := p.overrideValue(flagNameLogLevel, EnvVaultLogLevel); found { + config.LogLevel = val + } + + // Update log format + if val, found := p.overrideValue(flagNameLogFormat, EnvVaultLogFormat); found { + config.LogFormat = val + } + + // Update log file name + if val, found := p.overrideValue(flagNameLogFile, ""); found { + config.LogFile = val + } + + // Update log rotation duration + if val, found := p.overrideValue(flagNameLogRotateDuration, ""); found { + config.LogRotateDuration = val + } + + // Update log max files + if val, found := p.overrideValue(flagNameLogRotateMaxFiles, ""); found { + config.LogRotateMaxFiles, _ = strconv.Atoi(val) + } + + // Update log rotation max bytes + if val, found := p.overrideValue(flagNameLogRotateBytes, ""); found { + config.LogRotateBytes, _ = strconv.Atoi(val) + } +} diff --git a/command/log_flags_test.go b/command/log_flags_test.go new file mode 100644 index 0000000..38bfa52 --- /dev/null +++ b/command/log_flags_test.go @@ -0,0 +1,94 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "flag" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestLogFlags_ValuesProvider(t *testing.T) { + cases := map[string]struct { + flagKey string + envVarKey string + wantValue string + wantFound bool + }{ + "flag-missing": { + flagKey: "invalid", + envVarKey: "valid-env-var", + wantValue: "envVarValue", + wantFound: true, + }, + "envVar-missing": { + flagKey: "valid-flag", + envVarKey: "invalid", + wantValue: "flagValue", + wantFound: true, + }, + "all-present": { + flagKey: "valid-flag", + envVarKey: "valid-env-var", + wantValue: "flagValue", + wantFound: true, + }, + "all-missing": { + flagKey: "invalid", + envVarKey: "invalid", + wantValue: "", + wantFound: false, + }, + } + + // Sneaky little fake providers + flagFaker := func(key string) (flag.Value, bool) { + var result fakeFlag + var found bool + + if key == "valid-flag" { + result.Set("flagValue") + found = true + } + + return &result, found + } + + envFaker := func(key string) (string, bool) { + var found bool + var result string + + if key == "valid-env-var" { + result = "envVarValue" + found = true + } + + return result, found + } + + vp := valuesProvider{ + flagProvider: flagFaker, + envVarProvider: envFaker, + } + + for name, tc := range cases { + val, found := vp.overrideValue(tc.flagKey, tc.envVarKey) + assert.Equal(t, tc.wantFound, found, name) + assert.Equal(t, tc.wantValue, val, name) + } +} + +type fakeFlag struct { + value string +} + +func (v *fakeFlag) String() string { + return v.value +} + +func (v *fakeFlag) Set(raw string) error { + v.value = raw + return nil +} diff --git a/command/login.go b/command/login.go new file mode 100644 index 0000000..c8bc232 --- /dev/null +++ b/command/login.go @@ -0,0 +1,390 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/hashicorp/vault/api" + "github.com/posener/complete" +) + +// LoginHandler is the interface that any auth handlers must implement to enable +// auth via the CLI. +type LoginHandler interface { + Auth(*api.Client, map[string]string) (*api.Secret, error) + Help() string +} + +type LoginCommand struct { + *BaseCommand + + Handlers map[string]LoginHandler + + flagMethod string + flagPath string + flagNoStore bool + flagNoPrint bool + flagTokenOnly bool + + testStdin io.Reader // for tests +} + +func (c *LoginCommand) Synopsis() string { + return "Authenticate locally" +} + +func (c *LoginCommand) Help() string { + helpText := ` +Usage: vault login [options] [AUTH K=V...] + + Authenticates users or machines to Vault using the provided arguments. A + successful authentication results in a Vault token - conceptually similar to + a session token on a website. By default, this token is cached on the local + machine for future requests. + + The default auth method is "token". If not supplied via the CLI, + Vault will prompt for input. If the argument is "-", the values are read + from stdin. + + The -method flag allows using other auth methods, such as userpass, github, or + cert. For these, additional "K=V" pairs may be required. For example, to + authenticate to the userpass auth method: + + $ vault login -method=userpass username=my-username + + For more information about the list of configuration parameters available for + a given auth method, use the "vault auth help TYPE" command. You can also use + "vault auth list" to see the list of enabled auth methods. + + If an auth method is enabled at a non-standard path, the -method flag still + refers to the canonical type, but the -path flag refers to the enabled path. + If a github auth method was enabled at "github-prod", authenticate like this: + + $ vault login -method=github -path=github-prod + + If the authentication is requested with response wrapping (via -wrap-ttl), + the returned token is automatically unwrapped unless: + + - The -token-only flag is used, in which case this command will output + the wrapping token. + + - The -no-store flag is used, in which case this command will output the + details of the wrapping token. + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *LoginCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputField | FlagSetOutputFormat) + + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "method", + Target: &c.flagMethod, + Default: "token", + Completion: c.PredictVaultAvailableAuths(), + Usage: "Type of authentication to use such as \"userpass\" or " + + "\"ldap\". Note this corresponds to the TYPE, not the enabled path. " + + "Use -path to specify the path where the authentication is enabled.", + }) + + f.StringVar(&StringVar{ + Name: "path", + Target: &c.flagPath, + Default: "", + Completion: c.PredictVaultAuths(), + Usage: "Remote path in Vault where the auth method is enabled. " + + "This defaults to the TYPE of method (e.g. userpass -> userpass/).", + }) + + f.BoolVar(&BoolVar{ + Name: "no-store", + Target: &c.flagNoStore, + Default: false, + Usage: "Do not persist the token to the token helper (usually the " + + "local filesystem) after authentication for use in future requests. " + + "The token will only be displayed in the command output.", + }) + + f.BoolVar(&BoolVar{ + Name: "no-print", + Target: &c.flagNoPrint, + Default: false, + Usage: "Do not display the token. The token will be still be stored to the " + + "configured token helper.", + }) + + f.BoolVar(&BoolVar{ + Name: "token-only", + Target: &c.flagTokenOnly, + Default: false, + Usage: "Output only the token with no verification. This flag is a " + + "shortcut for \"-field=token -no-store\". Setting those flags to other " + + "values will have no affect.", + }) + + return set +} + +func (c *LoginCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *LoginCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *LoginCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + + // Set the right flags if the user requested token-only - this overrides + // any previously configured values, as documented. + if c.flagTokenOnly { + c.flagNoStore = true + c.flagField = "token" + } + + if c.flagNoStore && c.flagNoPrint { + c.UI.Error(wrapAtLength( + "-no-store and -no-print cannot be used together")) + return 1 + } + + // Get the auth method + authMethod := sanitizePath(c.flagMethod) + if authMethod == "" { + authMethod = "token" + } + + // If no path is specified, we default the path to the method type + // or use the plugin name if it's a plugin + authPath := c.flagPath + if authPath == "" { + authPath = ensureTrailingSlash(authMethod) + } + + // Get the handler function + authHandler, ok := c.Handlers[authMethod] + if !ok { + c.UI.Error(wrapAtLength(fmt.Sprintf( + "Unknown auth method: %s. Use \"vault auth list\" to see the "+ + "complete list of auth methods. Additionally, some "+ + "auth methods are only available via the HTTP API.", + authMethod))) + return 1 + } + + // Pull our fake stdin if needed + stdin := (io.Reader)(os.Stdin) + if c.testStdin != nil { + stdin = c.testStdin + } + + // If the user provided a token, pass it along to the auth provider. + if authMethod == "token" && len(args) > 0 && !strings.Contains(args[0], "=") { + args = append([]string{"token=" + args[0]}, args[1:]...) + } + + config, err := parseArgsDataString(stdin, args) + if err != nil { + c.UI.Error(fmt.Sprintf("Error parsing configuration: %s", err)) + return 1 + } + + // If the user did not specify a mount path, use the provided mount path. + if config["mount"] == "" && authPath != "" { + config["mount"] = authPath + } + + // Create the client + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + // Evolving token formats across Vault versions have caused issues during CLI logins. Unless + // token auth is being used, omit any token picked up from TokenHelper. + if authMethod != "token" { + client.SetToken("") + } + + // Authenticate delegation to the auth handler + secret, err := authHandler.Auth(client, config) + if err != nil { + c.UI.Error(fmt.Sprintf("Error authenticating: %s", err)) + return 2 + } + + // If there is only one MFA method configured and c.NonInteractive flag is + // unset, the login request is validated interactively. + // + // interactiveMethodInfo here means that `validateMFA` will complete the MFA + // by prompting for a password or directing you to a push notification. In + // this scenario, no external validation is needed. + interactiveMethodInfo := c.getInteractiveMFAMethodInfo(secret) + if interactiveMethodInfo != nil { + c.UI.Warn("Initiating Interactive MFA Validation...") + secret, err = c.validateMFA(secret.Auth.MFARequirement.MFARequestID, *interactiveMethodInfo) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + } else if c.getMFAValidationRequired(secret) { + // Warn about existing login token, but return here, since the secret + // won't have any token information if further validation is required. + c.checkForAndWarnAboutLoginToken() + c.UI.Warn(wrapAtLength("A login request was issued that is subject to "+ + "MFA validation. Please make sure to validate the login by sending another "+ + "request to sys/mfa/validate endpoint.") + "\n") + return OutputSecret(c.UI, secret) + } + + // Unset any previous token wrapping functionality. If the original request + // was for a wrapped token, we don't want future requests to be wrapped. + client.SetWrappingLookupFunc(func(string, string) string { return "" }) + + // Recursively extract the token, handling wrapping + unwrap := !c.flagTokenOnly && !c.flagNoStore + secret, isWrapped, err := c.extractToken(client, secret, unwrap) + if err != nil { + c.UI.Error(fmt.Sprintf("Error extracting token: %s", err)) + return 2 + } + if secret == nil { + c.UI.Error("Vault returned an empty secret") + return 2 + } + + // Handle special cases if the token was wrapped + if isWrapped { + if c.flagTokenOnly { + return PrintRawField(c.UI, secret, "wrapping_token") + } + if c.flagNoStore { + return OutputSecret(c.UI, secret) + } + } + + // If we got this far, verify we have authentication data before continuing + if secret.Auth == nil { + c.UI.Error(wrapAtLength( + "Vault returned a secret, but the secret has no authentication " + + "information attached. This should never happen and is likely a " + + "bug.")) + return 2 + } + + // Pull the token itself out, since we don't need the rest of the auth + // information anymore/. + token := secret.Auth.ClientToken + + if !c.flagNoStore { + // Grab the token helper so we can store + tokenHelper, err := c.TokenHelper() + if err != nil { + c.UI.Error(wrapAtLength(fmt.Sprintf( + "Error initializing token helper. Please verify that the token "+ + "helper is available and properly configured for your system. The "+ + "error was: %s", err))) + return 1 + } + + // Store the token in the local client + if err := tokenHelper.Store(token); err != nil { + c.UI.Error(fmt.Sprintf("Error storing token: %s", err)) + c.UI.Error(wrapAtLength( + "Authentication was successful, but the token was not persisted. The "+ + "resulting token is shown below for your records.") + "\n") + OutputSecret(c.UI, secret) + return 2 + } + + c.checkForAndWarnAboutLoginToken() + } else if !c.flagTokenOnly { + // If token-only the user knows it won't be stored, so don't warn + c.UI.Warn(wrapAtLength( + "The token was not stored in token helper. Set the VAULT_TOKEN "+ + "environment variable or pass the token below with each request to "+ + "Vault.") + "\n") + } + + if c.flagNoPrint { + return 0 + } + + // If the user requested a particular field, print that out now since we + // are likely piping to another process. + if c.flagField != "" { + return PrintRawField(c.UI, secret, c.flagField) + } + + // Print some yay! text, but only in table mode. + if Format(c.UI) == "table" { + c.UI.Output(wrapAtLength( + "Success! You are now authenticated. The token information displayed "+ + "below is already stored in the token helper. You do NOT need to run "+ + "\"vault login\" again. Future Vault requests will automatically use "+ + "this token.") + "\n") + } + + return OutputSecret(c.UI, secret) +} + +// extractToken extracts the token from the given secret, automatically +// unwrapping responses and handling error conditions if unwrap is true. The +// result also returns whether it was a wrapped response that was not unwrapped. +func (c *LoginCommand) extractToken(client *api.Client, secret *api.Secret, unwrap bool) (*api.Secret, bool, error) { + switch { + case secret == nil: + return nil, false, fmt.Errorf("empty response from auth helper") + + case secret.Auth != nil: + return secret, false, nil + + case secret.WrapInfo != nil: + if secret.WrapInfo.WrappedAccessor == "" { + return nil, false, fmt.Errorf("wrapped response does not contain a token") + } + + if !unwrap { + return secret, true, nil + } + + client.SetToken(secret.WrapInfo.Token) + secret, err := client.Logical().Unwrap("") + if err != nil { + return nil, false, err + } + return c.extractToken(client, secret, unwrap) + + default: + return nil, false, fmt.Errorf("no auth or wrapping info in response") + } +} + +// Warn if the VAULT_TOKEN environment variable is set, as that will take +// precedence. We output as a warning, so piping should still work since it +// will be on a different stream. +func (c *LoginCommand) checkForAndWarnAboutLoginToken() { + if os.Getenv("VAULT_TOKEN") != "" { + c.UI.Warn(wrapAtLength("WARNING! The VAULT_TOKEN environment variable "+ + "is set! The value of this variable will take precedence; if this is unwanted "+ + "please unset VAULT_TOKEN or update its value accordingly.") + "\n") + } +} diff --git a/command/login_test.go b/command/login_test.go new file mode 100644 index 0000000..3d41d8e --- /dev/null +++ b/command/login_test.go @@ -0,0 +1,616 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "context" + "regexp" + "strings" + "testing" + "time" + + "github.com/mitchellh/cli" + + "github.com/hashicorp/vault/api" + credToken "github.com/hashicorp/vault/builtin/credential/token" + credUserpass "github.com/hashicorp/vault/builtin/credential/userpass" + "github.com/hashicorp/vault/command/token" + "github.com/hashicorp/vault/helper/testhelpers" + "github.com/hashicorp/vault/vault" +) + +// minTokenLengthExternal is the minimum size of SSC +// tokens we are currently handing out to end users, without any +// namespace information +const minTokenLengthExternal = 91 + +func testLoginCommand(tb testing.TB) (*cli.MockUi, *LoginCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &LoginCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + + // Override to our own token helper + tokenHelper: token.NewTestingTokenHelper(), + }, + Handlers: map[string]LoginHandler{ + "token": &credToken.CLIHandler{}, + "userpass": &credUserpass.CLIHandler{}, + }, + } +} + +func TestCustomPath(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + if err := client.Sys().EnableAuth("my-auth", "userpass", ""); err != nil { + t.Fatal(err) + } + if _, err := client.Logical().Write("auth/my-auth/users/test", map[string]interface{}{ + "password": "test", + "policies": "default", + }); err != nil { + t.Fatal(err) + } + + ui, cmd := testLoginCommand(t) + cmd.client = client + + tokenHelper, err := cmd.TokenHelper() + if err != nil { + t.Fatal(err) + } + + // Emulate an unknown token format present in ~/.vault-token, for example + client.SetToken("a.a") + + code := cmd.Run([]string{ + "-method", "userpass", + "-path", "my-auth", + "username=test", + "password=test", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! You are now authenticated." + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to be %q", combined, expected) + } + + storedToken, err := tokenHelper.Get() + if err != nil { + t.Fatal(err) + } + + if l, exp := len(storedToken), minTokenLengthExternal+vault.TokenPrefixLength; l < exp { + t.Errorf("expected token to be %d characters, was %d: %q", exp, l, storedToken) + } +} + +// Do not persist the token to the token helper +func TestNoStore(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + secret, err := client.Auth().Token().Create(&api.TokenCreateRequest{ + Policies: []string{"default"}, + TTL: "30m", + }) + if err != nil { + t.Fatal(err) + } + token := secret.Auth.ClientToken + + _, cmd := testLoginCommand(t) + cmd.client = client + + tokenHelper, err := cmd.TokenHelper() + if err != nil { + t.Fatal(err) + } + + // Ensure we have no token to start + if storedToken, err := tokenHelper.Get(); err != nil || storedToken != "" { + t.Errorf("expected token helper to be empty: %s: %q", err, storedToken) + } + + code := cmd.Run([]string{ + "-no-store", + token, + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + storedToken, err := tokenHelper.Get() + if err != nil { + t.Fatal(err) + } + + if exp := ""; storedToken != exp { + t.Errorf("expected %q to be %q", storedToken, exp) + } +} + +func TestStores(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + secret, err := client.Auth().Token().Create(&api.TokenCreateRequest{ + Policies: []string{"default"}, + TTL: "30m", + }) + if err != nil { + t.Fatal(err) + } + token := secret.Auth.ClientToken + + _, cmd := testLoginCommand(t) + cmd.client = client + + tokenHelper, err := cmd.TokenHelper() + if err != nil { + t.Fatal(err) + } + + code := cmd.Run([]string{ + token, + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + storedToken, err := tokenHelper.Get() + if err != nil { + t.Fatal(err) + } + + if storedToken != token { + t.Errorf("expected %q to be %q", storedToken, token) + } +} + +func TestTokenOnly(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + if err := client.Sys().EnableAuth("userpass", "userpass", ""); err != nil { + t.Fatal(err) + } + if _, err := client.Logical().Write("auth/userpass/users/test", map[string]interface{}{ + "password": "test", + "policies": "default", + }); err != nil { + t.Fatal(err) + } + + ui, cmd := testLoginCommand(t) + cmd.client = client + + tokenHelper, err := cmd.TokenHelper() + if err != nil { + t.Fatal(err) + } + + code := cmd.Run([]string{ + "-token-only", + "-method", "userpass", + "username=test", + "password=test", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + // Verify only the token was printed + token := ui.OutputWriter.String() + if l, exp := len(token), minTokenLengthExternal+vault.TokenPrefixLength; l != exp { + t.Errorf("expected token to be %d characters, was %d: %q", exp, l, token) + } + + // Verify the token was not stored + if storedToken, err := tokenHelper.Get(); err != nil || storedToken != "" { + t.Fatalf("expected token to not be stored: %s: %q", err, storedToken) + } +} + +func TestFailureNoStore(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testLoginCommand(t) + cmd.client = client + + tokenHelper, err := cmd.TokenHelper() + if err != nil { + t.Fatal(err) + } + + code := cmd.Run([]string{ + "not-a-real-token", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error authenticating: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + if storedToken, err := tokenHelper.Get(); err != nil || storedToken != "" { + t.Fatalf("expected token to not be stored: %s: %q", err, storedToken) + } +} + +func TestWrapAutoUnwrap(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + if err := client.Sys().EnableAuth("userpass", "userpass", ""); err != nil { + t.Fatal(err) + } + if _, err := client.Logical().Write("auth/userpass/users/test", map[string]interface{}{ + "password": "test", + "policies": "default", + }); err != nil { + t.Fatal(err) + } + + _, cmd := testLoginCommand(t) + cmd.client = client + + // Set the wrapping ttl to 5s. We can't set this via the flag because we + // override the client object before that particular flag is parsed. + client.SetWrappingLookupFunc(func(string, string) string { return "5m" }) + + code := cmd.Run([]string{ + "-method", "userpass", + "username=test", + "password=test", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + // Unset the wrapping + client.SetWrappingLookupFunc(func(string, string) string { return "" }) + + tokenHelper, err := cmd.TokenHelper() + if err != nil { + t.Fatal(err) + } + token, err := tokenHelper.Get() + if err != nil || token == "" { + t.Fatalf("expected token from helper: %s: %q", err, token) + } + client.SetToken(token) + + // Ensure the resulting token is unwrapped + secret, err := client.Auth().Token().LookupSelf() + if err != nil { + t.Error(err) + } + if secret == nil { + t.Fatal("secret was nil") + } + + if secret.WrapInfo != nil { + t.Errorf("expected to be unwrapped: %#v", secret) + } +} + +func TestWrapTokenOnly(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + if err := client.Sys().EnableAuth("userpass", "userpass", ""); err != nil { + t.Fatal(err) + } + if _, err := client.Logical().Write("auth/userpass/users/test", map[string]interface{}{ + "password": "test", + "policies": "default", + }); err != nil { + t.Fatal(err) + } + + ui, cmd := testLoginCommand(t) + cmd.client = client + + // Set the wrapping ttl to 5s. We can't set this via the flag because we + // override the client object before that particular flag is parsed. + client.SetWrappingLookupFunc(func(string, string) string { return "5m" }) + + code := cmd.Run([]string{ + "-token-only", + "-method", "userpass", + "username=test", + "password=test", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + // Unset the wrapping + client.SetWrappingLookupFunc(func(string, string) string { return "" }) + + tokenHelper, err := cmd.TokenHelper() + if err != nil { + t.Fatal(err) + } + storedToken, err := tokenHelper.Get() + if err != nil || storedToken != "" { + t.Fatalf("expected token to not be stored: %s: %q", err, storedToken) + } + + token := strings.TrimSpace(ui.OutputWriter.String()) + if token == "" { + t.Errorf("expected %q to not be %q", token, "") + } + + // Ensure the resulting token is, in fact, still wrapped. + client.SetToken(token) + secret, err := client.Logical().Unwrap("") + if err != nil { + t.Error(err) + } + if secret == nil || secret.Auth == nil || secret.Auth.ClientToken == "" { + t.Fatalf("expected secret to have auth: %#v", secret) + } +} + +func TestWrapNoStore(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + if err := client.Sys().EnableAuth("userpass", "userpass", ""); err != nil { + t.Fatal(err) + } + if _, err := client.Logical().Write("auth/userpass/users/test", map[string]interface{}{ + "password": "test", + "policies": "default", + }); err != nil { + t.Fatal(err) + } + + ui, cmd := testLoginCommand(t) + cmd.client = client + + // Set the wrapping ttl to 5s. We can't set this via the flag because we + // override the client object before that particular flag is parsed. + client.SetWrappingLookupFunc(func(string, string) string { return "5m" }) + + code := cmd.Run([]string{ + "-no-store", + "-method", "userpass", + "username=test", + "password=test", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + // Unset the wrapping + client.SetWrappingLookupFunc(func(string, string) string { return "" }) + + tokenHelper, err := cmd.TokenHelper() + if err != nil { + t.Fatal(err) + } + storedToken, err := tokenHelper.Get() + if err != nil || storedToken != "" { + t.Fatalf("expected token to not be stored: %s: %q", err, storedToken) + } + + expected := "wrapping_token" + output := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(output, expected) { + t.Errorf("expected %q to contain %q", output, expected) + } +} + +func TestCommunicationFailure(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testLoginCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "token", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error authenticating: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } +} + +func TestNoTabs(t *testing.T) { + t.Parallel() + + _, cmd := testLoginCommand(t) + assertNoTabs(t, cmd) +} + +func TestLoginMFASinglePhase(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + methodName := "foo" + waitPeriod := 5 + userClient, entityID, methodID := testhelpers.SetupLoginMFATOTP(t, client, methodName, waitPeriod) + enginePath := testhelpers.RegisterEntityInTOTPEngine(t, client, entityID, methodID) + + runCommand := func(methodIdentifier string) { + // the time required for the totp engine to generate a new code + time.Sleep(time.Duration(waitPeriod) * time.Second) + totpCode := testhelpers.GetTOTPCodeFromEngine(t, client, enginePath) + ui, cmd := testLoginCommand(t) + cmd.client = userClient + // login command bails early for test clients, so we have to explicitly set this + cmd.client.SetMFACreds([]string{methodIdentifier + ":" + totpCode}) + code := cmd.Run([]string{ + "-method", "userpass", + "username=testuser1", + "password=testpassword", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + tokenHelper, err := cmd.TokenHelper() + if err != nil { + t.Fatal(err) + } + storedToken, err := tokenHelper.Get() + if err != nil { + t.Fatal(err) + } + if storedToken == "" { + t.Fatal("expected non-empty stored token") + } + output := ui.OutputWriter.String() + if !strings.Contains(output, storedToken) { + t.Fatalf("expected stored token: %q, got: %q", storedToken, output) + } + } + runCommand(methodID) + runCommand(methodName) +} + +func TestLoginMFATwoPhase(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testLoginCommand(t) + + userclient, entityID, methodID := testhelpers.SetupLoginMFATOTP(t, client, "", 5) + cmd.client = userclient + + _ = testhelpers.RegisterEntityInTOTPEngine(t, client, entityID, methodID) + + // clear the MFA creds just to be sure + cmd.client.SetMFACreds([]string{}) + + code := cmd.Run([]string{ + "-method", "userpass", + "username=testuser1", + "password=testpassword", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := methodID + output := ui.OutputWriter.String() + if !strings.Contains(output, expected) { + t.Fatalf("expected stored token: %q, got: %q", expected, output) + } + + tokenHelper, err := cmd.TokenHelper() + if err != nil { + t.Fatal(err) + } + storedToken, err := tokenHelper.Get() + if storedToken != "" { + t.Fatal("expected empty stored token") + } + if err != nil { + t.Fatal(err) + } +} + +func TestLoginMFATwoPhaseNonInteractiveMethodName(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testLoginCommand(t) + + methodName := "foo" + waitPeriod := 5 + userclient, entityID, methodID := testhelpers.SetupLoginMFATOTP(t, client, methodName, waitPeriod) + cmd.client = userclient + + engineName := testhelpers.RegisterEntityInTOTPEngine(t, client, entityID, methodID) + + // clear the MFA creds just to be sure + cmd.client.SetMFACreds([]string{}) + + code := cmd.Run([]string{ + "-method", "userpass", + "-non-interactive", + "username=testuser1", + "password=testpassword", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + output := ui.OutputWriter.String() + + reqIdReg := regexp.MustCompile(`mfa_request_id\s+([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})\s+mfa_constraint`) + reqIDRaw := reqIdReg.FindAllStringSubmatch(output, -1) + if len(reqIDRaw) == 0 || len(reqIDRaw[0]) < 2 { + t.Fatal("failed to MFA request ID from output") + } + mfaReqID := reqIDRaw[0][1] + + validateFunc := func(methodIdentifier string) { + // the time required for the totp engine to generate a new code + time.Sleep(time.Duration(waitPeriod) * time.Second) + totpPasscode1 := "passcode=" + testhelpers.GetTOTPCodeFromEngine(t, client, engineName) + + secret, err := cmd.client.Logical().WriteWithContext(context.Background(), "sys/mfa/validate", map[string]interface{}{ + "mfa_request_id": mfaReqID, + "mfa_payload": map[string][]string{ + methodIdentifier: {totpPasscode1}, + }, + }) + if err != nil { + t.Fatalf("mfa validation failed: %v", err) + } + + if secret.Auth == nil || secret.Auth.ClientToken == "" { + t.Fatalf("mfa validation did not return a client token") + } + } + + validateFunc(methodName) +} diff --git a/command/main.go b/command/main.go new file mode 100644 index 0000000..13fbe21 --- /dev/null +++ b/command/main.go @@ -0,0 +1,363 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bufio" + "bytes" + "fmt" + "io" + "os" + "sort" + "strconv" + "strings" + "text/tabwriter" + + "github.com/fatih/color" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/token" + colorable "github.com/mattn/go-colorable" + "github.com/mitchellh/cli" +) + +type VaultUI struct { + cli.Ui + format string + detailed bool +} + +const ( + globalFlagOutputCurlString = "output-curl-string" + globalFlagOutputPolicy = "output-policy" + globalFlagFormat = "format" + globalFlagDetailed = "detailed" +) + +var globalFlags = []string{ + globalFlagOutputCurlString, globalFlagOutputPolicy, globalFlagFormat, globalFlagDetailed, +} + +// setupEnv parses args and may replace them and sets some env vars to known +// values based on format options +func setupEnv(args []string) (retArgs []string, format string, detailed bool, outputCurlString bool, outputPolicy bool) { + var err error + var nextArgFormat bool + var haveDetailed bool + + for _, arg := range args { + if nextArgFormat { + nextArgFormat = false + format = arg + continue + } + + if arg == "--" { + break + } + + if len(args) == 1 && (arg == "-v" || arg == "-version" || arg == "--version") { + args = []string{"version"} + break + } + + if isGlobalFlag(arg, globalFlagOutputCurlString) { + outputCurlString = true + continue + } + + if isGlobalFlag(arg, globalFlagOutputPolicy) { + outputPolicy = true + continue + } + + // Parse a given flag here, which overrides the env var + if isGlobalFlagWithValue(arg, globalFlagFormat) { + format = getGlobalFlagValue(arg) + } + // For backwards compat, it could be specified without an equal sign + if isGlobalFlag(arg, globalFlagFormat) { + nextArgFormat = true + } + + // Parse a given flag here, which overrides the env var + if isGlobalFlagWithValue(arg, globalFlagDetailed) { + detailed, err = strconv.ParseBool(getGlobalFlagValue(globalFlagDetailed)) + if err != nil { + detailed = false + } + haveDetailed = true + } + // For backwards compat, it could be specified without an equal sign to enable + // detailed output. + if isGlobalFlag(arg, globalFlagDetailed) { + detailed = true + haveDetailed = true + } + } + + envVaultFormat := os.Getenv(EnvVaultFormat) + // If we did not parse a value, fetch the env var + if format == "" && envVaultFormat != "" { + format = envVaultFormat + } + // Lowercase for consistency + format = strings.ToLower(format) + if format == "" { + format = "table" + } + + envVaultDetailed := os.Getenv(EnvVaultDetailed) + // If we did not parse a value, fetch the env var + if !haveDetailed && envVaultDetailed != "" { + detailed, err = strconv.ParseBool(envVaultDetailed) + if err != nil { + detailed = false + } + } + + return args, format, detailed, outputCurlString, outputPolicy +} + +func isGlobalFlag(arg string, flag string) bool { + return arg == "-"+flag || arg == "--"+flag +} + +func isGlobalFlagWithValue(arg string, flag string) bool { + return strings.HasPrefix(arg, "--"+flag+"=") || strings.HasPrefix(arg, "-"+flag+"=") +} + +func getGlobalFlagValue(arg string) string { + _, value, _ := strings.Cut(arg, "=") + + return value +} + +type RunOptions struct { + TokenHelper token.TokenHelper + Stdout io.Writer + Stderr io.Writer + Address string + Client *api.Client +} + +func Run(args []string) int { + return RunCustom(args, nil) +} + +// RunCustom allows passing in a base command template to pass to other +// commands. Currently, this is only used for setting a custom token helper. +func RunCustom(args []string, runOpts *RunOptions) int { + if runOpts == nil { + runOpts = &RunOptions{} + } + + var format string + var detailed bool + var outputCurlString bool + var outputPolicy bool + args, format, detailed, outputCurlString, outputPolicy = setupEnv(args) + + // Don't use color if disabled + useColor := true + if os.Getenv(EnvVaultCLINoColor) != "" || color.NoColor { + useColor = false + } + + if runOpts.Stdout == nil { + runOpts.Stdout = os.Stdout + } + if runOpts.Stderr == nil { + runOpts.Stderr = os.Stderr + } + + // Only use colored UI if stdout is a tty, and not disabled + if useColor && format == "table" { + if f, ok := runOpts.Stdout.(*os.File); ok { + runOpts.Stdout = colorable.NewColorable(f) + } + if f, ok := runOpts.Stderr.(*os.File); ok { + runOpts.Stderr = colorable.NewColorable(f) + } + } else { + runOpts.Stdout = colorable.NewNonColorable(runOpts.Stdout) + runOpts.Stderr = colorable.NewNonColorable(runOpts.Stderr) + } + + uiErrWriter := runOpts.Stderr + if outputCurlString || outputPolicy { + uiErrWriter = &bytes.Buffer{} + } + + ui := &VaultUI{ + Ui: &cli.ColoredUi{ + ErrorColor: cli.UiColorRed, + WarnColor: cli.UiColorYellow, + Ui: &cli.BasicUi{ + Reader: bufio.NewReader(os.Stdin), + Writer: runOpts.Stdout, + ErrorWriter: uiErrWriter, + }, + }, + format: format, + detailed: detailed, + } + + serverCmdUi := &VaultUI{ + Ui: &cli.ColoredUi{ + ErrorColor: cli.UiColorRed, + WarnColor: cli.UiColorYellow, + Ui: &cli.BasicUi{ + Reader: bufio.NewReader(os.Stdin), + Writer: runOpts.Stdout, + }, + }, + format: format, + } + + if _, ok := Formatters[format]; !ok { + ui.Error(fmt.Sprintf("Invalid output format: %s", format)) + return 1 + } + + commands := initCommands(ui, serverCmdUi, runOpts) + + hiddenCommands := []string{"version"} + + cli := &cli.CLI{ + Name: "vault", + Args: args, + Commands: commands, + HelpFunc: groupedHelpFunc( + cli.BasicHelpFunc("vault"), + ), + HelpWriter: runOpts.Stdout, + ErrorWriter: runOpts.Stderr, + HiddenCommands: hiddenCommands, + Autocomplete: true, + AutocompleteNoDefaultFlags: true, + } + + exitCode, err := cli.Run() + if outputCurlString { + return generateCurlString(exitCode, runOpts, uiErrWriter.(*bytes.Buffer)) + } else if outputPolicy { + return generatePolicy(exitCode, runOpts, uiErrWriter.(*bytes.Buffer)) + } else if err != nil { + fmt.Fprintf(runOpts.Stderr, "Error executing CLI: %s\n", err.Error()) + return 1 + } + + return exitCode +} + +var commonCommands = []string{ + "read", + "write", + "delete", + "list", + "login", + "agent", + "server", + "status", + "unwrap", +} + +func groupedHelpFunc(f cli.HelpFunc) cli.HelpFunc { + return func(commands map[string]cli.CommandFactory) string { + var b bytes.Buffer + tw := tabwriter.NewWriter(&b, 0, 2, 6, ' ', 0) + + fmt.Fprintf(tw, "Usage: vault [args]\n\n") + fmt.Fprintf(tw, "Common commands:\n") + for _, v := range commonCommands { + printCommand(tw, v, commands[v]) + } + + otherCommands := make([]string, 0, len(commands)) + for k := range commands { + found := false + for _, v := range commonCommands { + if k == v { + found = true + break + } + } + + if !found { + otherCommands = append(otherCommands, k) + } + } + sort.Strings(otherCommands) + + fmt.Fprintf(tw, "\n") + fmt.Fprintf(tw, "Other commands:\n") + for _, v := range otherCommands { + printCommand(tw, v, commands[v]) + } + + tw.Flush() + + return strings.TrimSpace(b.String()) + } +} + +func printCommand(w io.Writer, name string, cmdFn cli.CommandFactory) { + cmd, err := cmdFn() + if err != nil { + panic(fmt.Sprintf("failed to load %q command: %s", name, err)) + } + fmt.Fprintf(w, " %s\t%s\n", name, cmd.Synopsis()) +} + +func generateCurlString(exitCode int, runOpts *RunOptions, preParsingErrBuf *bytes.Buffer) int { + if exitCode == 0 { + fmt.Fprint(runOpts.Stderr, "Could not generate cURL command") + return 1 + } + + if api.LastOutputStringError == nil { + if exitCode == 127 { + // Usage, just pass it through + return exitCode + } + runOpts.Stderr.Write(preParsingErrBuf.Bytes()) + runOpts.Stderr.Write([]byte("Unable to generate cURL string from command\n")) + return exitCode + } + + cs, err := api.LastOutputStringError.CurlString() + if err != nil { + runOpts.Stderr.Write([]byte(fmt.Sprintf("Error creating request string: %s\n", err))) + return 1 + } + + runOpts.Stdout.Write([]byte(fmt.Sprintf("%s\n", cs))) + return 0 +} + +func generatePolicy(exitCode int, runOpts *RunOptions, preParsingErrBuf *bytes.Buffer) int { + if exitCode == 0 { + fmt.Fprint(runOpts.Stderr, "Could not generate policy") + return 1 + } + + if api.LastOutputPolicyError == nil { + if exitCode == 127 { + // Usage, just pass it through + return exitCode + } + runOpts.Stderr.Write(preParsingErrBuf.Bytes()) + runOpts.Stderr.Write([]byte("Unable to generate policy from command\n")) + return exitCode + } + + hcl, err := api.LastOutputPolicyError.HCLString() + if err != nil { + runOpts.Stderr.Write([]byte(fmt.Sprintf("Error assembling policy HCL: %s\n", err))) + return 1 + } + + runOpts.Stdout.Write([]byte(fmt.Sprintf("%s\n", hcl))) + return 0 +} diff --git a/command/monitor.go b/command/monitor.go new file mode 100644 index 0000000..7545b82 --- /dev/null +++ b/command/monitor.go @@ -0,0 +1,138 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*MonitorCommand)(nil) + _ cli.CommandAutocomplete = (*MonitorCommand)(nil) +) + +type MonitorCommand struct { + *BaseCommand + + logLevel string + logFormat string + + // ShutdownCh is used to capture interrupt signal and end streaming + ShutdownCh chan struct{} +} + +func (c *MonitorCommand) Synopsis() string { + return "Stream log messages from a Vault server" +} + +func (c *MonitorCommand) Help() string { + helpText := ` +Usage: vault monitor [options] + + Stream log messages of a Vault server. The monitor command lets you listen + for log levels that may be filtered out of the server logs. For example, + the server may be logging at the INFO level, but with the monitor command + you can set -log-level=DEBUG. + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *MonitorCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP) + + f := set.NewFlagSet("Monitor Options") + f.StringVar(&StringVar{ + Name: "log-level", + Target: &c.logLevel, + Default: "info", + Completion: complete.PredictSet("trace", "debug", "info", "warn", "error"), + Usage: "If passed, the log level to monitor logs. Supported values" + + "(in order of detail) are \"trace\", \"debug\", \"info\", \"warn\"" + + " and \"error\". These are not case sensitive.", + }) + f.StringVar(&StringVar{ + Name: "log-format", + Target: &c.logFormat, + Default: "standard", + Completion: complete.PredictSet("standard", "json"), + Usage: "Output format of logs. Supported values are \"standard\" and \"json\".", + }) + + return set +} + +func (c *MonitorCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictNothing +} + +func (c *MonitorCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *MonitorCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + parsedArgs := f.Args() + if len(parsedArgs) > 0 { + c.UI.Error(fmt.Sprintf("Too many arguments (expected 0, got %d)", len(parsedArgs))) + return 1 + } + + c.logLevel = strings.ToLower(c.logLevel) + validLevels := []string{"trace", "debug", "info", "warn", "error"} + if !strutil.StrListContains(validLevels, c.logLevel) { + c.UI.Error(fmt.Sprintf("%s is an unknown log level. Valid log levels are: %s", c.logLevel, validLevels)) + return 1 + } + + c.logFormat = strings.ToLower(c.logFormat) + validFormats := []string{"standard", "json"} + if !strutil.StrListContains(validFormats, c.logFormat) { + c.UI.Error(fmt.Sprintf("%s is an unknown log format. Valid log formats are: %s", c.logFormat, validFormats)) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + // Remove the default 60 second timeout so we can stream indefinitely + client.SetClientTimeout(0) + + var logCh chan string + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + logCh, err = client.Sys().Monitor(ctx, c.logLevel, c.logFormat) + if err != nil { + c.UI.Error(fmt.Sprintf("Error starting monitor: %s", err)) + return 1 + } + + for { + select { + case log, ok := <-logCh: + if !ok { + return 0 + } + c.UI.Info(log) + case <-c.ShutdownCh: + return 0 + } + } +} diff --git a/command/monitor_test.go b/command/monitor_test.go new file mode 100644 index 0000000..0cc722c --- /dev/null +++ b/command/monitor_test.go @@ -0,0 +1,94 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "sync/atomic" + "testing" + "time" + + "github.com/mitchellh/cli" +) + +func testMonitorCommand(tb testing.TB) (*cli.MockUi, *MonitorCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &MonitorCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestMonitorCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int64 + }{ + { + "valid", + []string{ + "-log-level=debug", + }, + "", + 0, + }, + { + "too_many_args", + []string{ + "-log-level=debug", + "foo", + }, + "Too many arguments", + 1, + }, + { + "unknown_log_level", + []string{ + "-log-level=haha", + }, + "haha is an unknown log level", + 1, + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + client, closer := testVaultServer(t) + defer closer() + + var code int64 + shutdownCh := make(chan struct{}) + + ui, cmd := testMonitorCommand(t) + cmd.client = client + cmd.ShutdownCh = shutdownCh + + go func() { + atomic.StoreInt64(&code, int64(cmd.Run(tc.args))) + }() + + <-time.After(3 * time.Second) + close(shutdownCh) + + if atomic.LoadInt64(&code) != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Fatalf("expected %q to contain %q", combined, tc.out) + } + }) + } +} diff --git a/command/namespace.go b/command/namespace.go new file mode 100644 index 0000000..18bc6e9 --- /dev/null +++ b/command/namespace.go @@ -0,0 +1,66 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + + "github.com/mitchellh/cli" +) + +var _ cli.Command = (*NamespaceCommand)(nil) + +type NamespaceCommand struct { + *BaseCommand +} + +func (c *NamespaceCommand) Synopsis() string { + return "Interact with namespaces" +} + +func (c *NamespaceCommand) Help() string { + helpText := ` +Usage: vault namespace [options] [args] + + This command groups subcommands for interacting with Vault namespaces. + These subcommands operate in the context of the namespace that the + currently logged in token belongs to. + + List enabled child namespaces: + + $ vault namespace list + + Look up an existing namespace: + + $ vault namespace lookup + + Create a new namespace: + + $ vault namespace create + + Patch an existing namespace: + + $ vault namespace patch + + Delete an existing namespace: + + $ vault namespace delete + + Lock the API for an existing namespace: + + $ vault namespace lock + + Unlock the API for an existing namespace: + + $ vault namespace unlock + + Please see the individual subcommand help for detailed usage information. +` + + return strings.TrimSpace(helpText) +} + +func (c *NamespaceCommand) Run(args []string) int { + return cli.RunResultHelp +} diff --git a/command/namespace_api_lock.go b/command/namespace_api_lock.go new file mode 100644 index 0000000..57b1969 --- /dev/null +++ b/command/namespace_api_lock.go @@ -0,0 +1,90 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/hashicorp/vault/helper/namespace" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*NamespaceAPILockCommand)(nil) + _ cli.CommandAutocomplete = (*NamespaceAPILockCommand)(nil) +) + +type NamespaceAPILockCommand struct { + *BaseCommand +} + +func (c *NamespaceAPILockCommand) Synopsis() string { + return "Lock the API for particular namespaces" +} + +func (c *NamespaceAPILockCommand) Help() string { + helpText := ` +Usage: vault namespace lock PATH + + Lock the current namespace, and all descendants: + + $ vault namespace lock + + Lock a child namespace, and all of its descendants (e.g. ns1/ns2/): + + $ vault namespace lock ns1/ns2 + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *NamespaceAPILockCommand) Flags() *FlagSets { + return c.flagSet(FlagSetHTTP | FlagSetOutputFormat) +} + +func (c *NamespaceAPILockCommand) AutocompleteArgs() complete.Predictor { + return c.PredictVaultNamespaces() +} + +func (c *NamespaceAPILockCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *NamespaceAPILockCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + if len(args) > 1 { + c.UI.Error(fmt.Sprintf("Too many arguments (expected 0 or 1, got %d)", len(args))) + return 1 + } + + // current namespace is already encoded in the :client: + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + optionalChildNSPath := "" + if len(args) == 1 { + optionalChildNSPath = fmt.Sprintf("/%s", namespace.Canonicalize(args[0])) + } + + resp, err := client.Logical().Write(fmt.Sprintf("sys/namespaces/api-lock/lock%s", optionalChildNSPath), nil) + if err != nil { + c.UI.Error(fmt.Sprintf("Error locking namespace: %v", err)) + return 2 + } + + return OutputSecret(c.UI, resp) +} diff --git a/command/namespace_api_unlock.go b/command/namespace_api_unlock.go new file mode 100644 index 0000000..77e8291 --- /dev/null +++ b/command/namespace_api_unlock.go @@ -0,0 +1,96 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/hashicorp/vault/helper/namespace" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*NamespaceAPIUnlockCommand)(nil) + _ cli.CommandAutocomplete = (*NamespaceAPIUnlockCommand)(nil) +) + +type NamespaceAPIUnlockCommand struct { + *BaseCommand +} + +func (c *NamespaceAPIUnlockCommand) Synopsis() string { + return "Unlock the API for particular namespaces" +} + +func (c *NamespaceAPIUnlockCommand) Help() string { + helpText := ` +Usage: vault namespace unlock [options] PATH + + Unlock the current namespace, and all descendants, with unlock key: + + $ vault namespace unlock -unlock-key= + + Unlock the current namespace, and all descendants (from a root token): + + $ vault namespace unlock + + Unlock a child namespace, and all of its descendants (e.g. ns1/ns2/): + + $ vault namespace lock -unlock-key= ns1/ns2 + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *NamespaceAPIUnlockCommand) Flags() *FlagSets { + return c.flagSet(FlagSetHTTP | FlagSetOutputFormat) +} + +func (c *NamespaceAPIUnlockCommand) AutocompleteArgs() complete.Predictor { + return c.PredictVaultNamespaces() +} + +func (c *NamespaceAPIUnlockCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *NamespaceAPIUnlockCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + if len(args) > 1 { + c.UI.Error(fmt.Sprintf("Too many arguments (expected 0 or 1, got %d)", len(args))) + return 1 + } + + // current namespace is already encoded in the :client: + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + optionalChildNSPath := "" + if len(args) == 1 { + optionalChildNSPath = fmt.Sprintf("/%s", namespace.Canonicalize(args[0])) + } + + _, err = client.Logical().Write(fmt.Sprintf("sys/namespaces/api-lock/unlock%s", optionalChildNSPath), map[string]interface{}{ + "unlock_key": c.flagUnlockKey, + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error unlocking namespace: %v", err)) + return 2 + } + + return 0 +} diff --git a/command/namespace_create.go b/command/namespace_create.go new file mode 100644 index 0000000..60df834 --- /dev/null +++ b/command/namespace_create.go @@ -0,0 +1,115 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*NamespaceCreateCommand)(nil) + _ cli.CommandAutocomplete = (*NamespaceCreateCommand)(nil) +) + +type NamespaceCreateCommand struct { + *BaseCommand + + flagCustomMetadata map[string]string +} + +func (c *NamespaceCreateCommand) Synopsis() string { + return "Create a new namespace" +} + +func (c *NamespaceCreateCommand) Help() string { + helpText := ` +Usage: vault namespace create [options] PATH + + Create a child namespace. The namespace created will be relative to the + namespace provided in either the VAULT_NAMESPACE environment variable or + -namespace CLI flag. + + Create a child namespace (e.g. ns1/): + + $ vault namespace create ns1 + + Create a child namespace from a parent namespace (e.g. ns1/ns2/): + + $ vault namespace create -namespace=ns1 ns2 + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *NamespaceCreateCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputField | FlagSetOutputFormat) + + f := set.NewFlagSet("Command Options") + f.StringMapVar(&StringMapVar{ + Name: "custom-metadata", + Target: &c.flagCustomMetadata, + Default: map[string]string{}, + Usage: "Specifies arbitrary key=value metadata meant to describe a namespace." + + "This can be specified multiple times to add multiple pieces of metadata.", + }) + + return set +} + +func (c *NamespaceCreateCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictNothing +} + +func (c *NamespaceCreateCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *NamespaceCreateCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + } + + namespacePath := strings.TrimSpace(args[0]) + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + data := map[string]interface{}{ + "custom_metadata": c.flagCustomMetadata, + } + + secret, err := client.Logical().Write("sys/namespaces/"+namespacePath, data) + if err != nil { + c.UI.Error(fmt.Sprintf("Error creating namespace: %s", err)) + return 2 + } + + // Handle single field output + if c.flagField != "" { + return PrintRawField(c.UI, secret, c.flagField) + } + + return OutputSecret(c.UI, secret) +} diff --git a/command/namespace_delete.go b/command/namespace_delete.go new file mode 100644 index 0000000..5c79c35 --- /dev/null +++ b/command/namespace_delete.go @@ -0,0 +1,103 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*NamespaceDeleteCommand)(nil) + _ cli.CommandAutocomplete = (*NamespaceDeleteCommand)(nil) +) + +type NamespaceDeleteCommand struct { + *BaseCommand +} + +func (c *NamespaceDeleteCommand) Synopsis() string { + return "Delete an existing namespace" +} + +func (c *NamespaceDeleteCommand) Help() string { + helpText := ` +Usage: vault namespace delete [options] PATH + + Delete an existing namespace. The namespace deleted will be relative to the + namespace provided in either the VAULT_NAMESPACE environment variable or + -namespace CLI flag. + + Delete a namespace (e.g. ns1/): + + $ vault namespace delete ns1 + + Delete a namespace namespace from a parent namespace (e.g. ns1/ns2/): + + $ vault namespace delete -namespace=ns1 ns2 + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *NamespaceDeleteCommand) Flags() *FlagSets { + return c.flagSet(FlagSetHTTP) +} + +func (c *NamespaceDeleteCommand) AutocompleteArgs() complete.Predictor { + return c.PredictVaultNamespaces() +} + +func (c *NamespaceDeleteCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *NamespaceDeleteCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + } + + namespacePath := strings.TrimSpace(args[0]) + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + secret, err := client.Logical().Delete("sys/namespaces/" + namespacePath) + if err != nil { + c.UI.Error(fmt.Sprintf("Error deleting namespace: %s", err)) + return 2 + } + + if secret != nil { + // Likely, we have warnings + return OutputSecret(c.UI, secret) + } + + if !strings.HasSuffix(namespacePath, "/") { + namespacePath = namespacePath + "/" + } + + c.UI.Output(fmt.Sprintf("Success! Namespace deleted at: %s", namespacePath)) + return 0 +} diff --git a/command/namespace_list.go b/command/namespace_list.go new file mode 100644 index 0000000..6394dae --- /dev/null +++ b/command/namespace_list.go @@ -0,0 +1,123 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*NamespaceListCommand)(nil) + _ cli.CommandAutocomplete = (*NamespaceListCommand)(nil) +) + +type NamespaceListCommand struct { + *BaseCommand +} + +func (c *NamespaceListCommand) Synopsis() string { + return "List child namespaces" +} + +func (c *NamespaceListCommand) Help() string { + helpText := ` +Usage: vault namespace list [options] + + Lists the enabled child namespaces. + + List all enabled child namespaces: + + $ vault namespace list + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *NamespaceListCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + f := set.NewFlagSet("Command Options") + + f.BoolVar(&BoolVar{ + Name: "detailed", + Target: &c.flagDetailed, + Default: false, + Usage: "Print detailed information such as namespace ID.", + }) + + return set +} + +func (c *NamespaceListCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictNothing +} + +func (c *NamespaceListCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *NamespaceListCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + if len(args) > 0 { + c.UI.Error(fmt.Sprintf("Too many arguments (expected 0, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + secret, err := client.Logical().List("sys/namespaces") + if err != nil { + c.UI.Error(fmt.Sprintf("Error listing namespaces: %s", err)) + return 2 + } + + _, ok := extractListData(secret) + if Format(c.UI) != "table" { + if secret == nil || secret.Data == nil || !ok { + OutputData(c.UI, map[string]interface{}{}) + return 2 + } + } + + if secret == nil { + c.UI.Error("No namespaces found") + return 2 + } + + // There could be e.g. warnings + if secret.Data == nil { + return OutputSecret(c.UI, secret) + } + + if secret.WrapInfo != nil && secret.WrapInfo.TTL != 0 { + return OutputSecret(c.UI, secret) + } + + if !ok { + c.UI.Error("No entries found") + return 2 + } + + if c.flagDetailed && Format(c.UI) != "table" { + return OutputData(c.UI, secret.Data["key_info"]) + } + + return OutputList(c.UI, secret) +} diff --git a/command/namespace_lookup.go b/command/namespace_lookup.go new file mode 100644 index 0000000..ee18736 --- /dev/null +++ b/command/namespace_lookup.go @@ -0,0 +1,93 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*NamespaceLookupCommand)(nil) + _ cli.CommandAutocomplete = (*NamespaceLookupCommand)(nil) +) + +type NamespaceLookupCommand struct { + *BaseCommand +} + +func (c *NamespaceLookupCommand) Synopsis() string { + return "Look up an existing namespace" +} + +func (c *NamespaceLookupCommand) Help() string { + helpText := ` +Usage: vault namespace lookup [options] PATH + + Get information about the namespace of the locally authenticated token: + + $ vault namespace lookup + + Get information about the namespace of a particular child token (e.g. ns1/ns2/): + + $ vault namespace lookup -namespace=ns1 ns2 + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *NamespaceLookupCommand) Flags() *FlagSets { + return c.flagSet(FlagSetHTTP | FlagSetOutputFormat) +} + +func (c *NamespaceLookupCommand) AutocompleteArgs() complete.Predictor { + return c.PredictVaultNamespaces() +} + +func (c *NamespaceLookupCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *NamespaceLookupCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + } + + namespacePath := strings.TrimSpace(args[0]) + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + secret, err := client.Logical().Read("sys/namespaces/" + namespacePath) + if err != nil { + c.UI.Error(fmt.Sprintf("Error looking up namespace: %s", err)) + return 2 + } + if secret == nil { + c.UI.Error("Namespace not found") + return 2 + } + + return OutputSecret(c.UI, secret) +} diff --git a/command/namespace_patch.go b/command/namespace_patch.go new file mode 100644 index 0000000..2a4a6dc --- /dev/null +++ b/command/namespace_patch.go @@ -0,0 +1,141 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*NamespacePatchCommand)(nil) + _ cli.CommandAutocomplete = (*NamespacePatchCommand)(nil) +) + +type NamespacePatchCommand struct { + *BaseCommand + + flagCustomMetadata map[string]string + flagRemoveCustomMetadata []string +} + +func (c *NamespacePatchCommand) Synopsis() string { + return "Patch an existing namespace" +} + +func (c *NamespacePatchCommand) Help() string { + helpText := ` +Usage: vault namespace patch [options] PATH + + Patch an existing namespace. The namespace patched will be relative to the + namespace provided in either the VAULT_NAMESPACE environment variable or + -namespace CLI flag. + + Patch an existing child namespace by adding and removing custom-metadata (e.g. ns1/): + + $ vault namespace patch -custom-metadata=foo=abc -remove-custom-metadata=bar ns1 + + Patch an existing child namespace from a parent namespace (e.g. ns1/ns2/): + + $ vault namespace patch -namespace=ns1 -custom-metadata=foo=abc ns2 + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *NamespacePatchCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputField | FlagSetOutputFormat) + + f := set.NewFlagSet("Command Options") + f.StringMapVar(&StringMapVar{ + Name: "custom-metadata", + Target: &c.flagCustomMetadata, + Default: map[string]string{}, + Usage: "Specifies arbitrary key=value metadata meant to describe a namespace." + + "This can be specified multiple times to add multiple pieces of metadata.", + }) + + f.StringSliceVar(&StringSliceVar{ + Name: "remove-custom-metadata", + Target: &c.flagRemoveCustomMetadata, + Default: []string{}, + Usage: "Key to remove from custom metadata. To specify multiple values, specify this flag multiple times.", + }) + + return set +} + +func (c *NamespacePatchCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictNothing +} + +func (c *NamespacePatchCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *NamespacePatchCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + } + + namespacePath := strings.TrimSpace(args[0]) + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + data := make(map[string]interface{}) + customMetadata := make(map[string]interface{}) + + for key, value := range c.flagCustomMetadata { + customMetadata[key] = value + } + + for _, key := range c.flagRemoveCustomMetadata { + // A null in a JSON merge patch payload will remove the associated key + customMetadata[key] = nil + } + + data["custom_metadata"] = customMetadata + + secret, err := client.Logical().JSONMergePatch(context.Background(), "sys/namespaces/"+namespacePath, data) + if err != nil { + if re, ok := err.(*api.ResponseError); ok && re.StatusCode == http.StatusNotFound { + c.UI.Error("Namespace not found") + return 2 + } + + c.UI.Error(fmt.Sprintf("Error patching namespace: %s", err)) + return 2 + } + + // Handle single field output + if c.flagField != "" { + return PrintRawField(c.UI, secret, c.flagField) + } + + return OutputSecret(c.UI, secret) +} diff --git a/command/operator.go b/command/operator.go new file mode 100644 index 0000000..a79f7bf --- /dev/null +++ b/command/operator.go @@ -0,0 +1,50 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + + "github.com/mitchellh/cli" +) + +var _ cli.Command = (*OperatorCommand)(nil) + +type OperatorCommand struct { + *BaseCommand +} + +func (c *OperatorCommand) Synopsis() string { + return "Perform operator-specific tasks" +} + +func (c *OperatorCommand) Help() string { + helpText := ` +Usage: vault operator [options] [args] + + This command groups subcommands for operators interacting with Vault. Most + users will not need to interact with these commands. Here are a few examples + of the operator commands: + + Initialize a new Vault cluster: + + $ vault operator init + + Force a Vault to resign leadership in a cluster: + + $ vault operator step-down + + Rotate Vault's underlying encryption key: + + $ vault operator rotate + + Please see the individual subcommand help for detailed usage information. +` + + return strings.TrimSpace(helpText) +} + +func (c *OperatorCommand) Run(args []string) int { + return cli.RunResultHelp +} diff --git a/command/operator_diagnose.go b/command/operator_diagnose.go new file mode 100644 index 0000000..5abddb7 --- /dev/null +++ b/command/operator_diagnose.go @@ -0,0 +1,767 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "strings" + "sync" + "time" + + "golang.org/x/term" + + wrapping "github.com/hashicorp/go-kms-wrapping/v2" + + "github.com/hashicorp/consul/api" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-secure-stdlib/reloadutil" + uuid "github.com/hashicorp/go-uuid" + cserver "github.com/hashicorp/vault/command/server" + "github.com/hashicorp/vault/helper/constants" + "github.com/hashicorp/vault/helper/metricsutil" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/internalshared/listenerutil" + physconsul "github.com/hashicorp/vault/physical/consul" + "github.com/hashicorp/vault/physical/raft" + "github.com/hashicorp/vault/sdk/physical" + sr "github.com/hashicorp/vault/serviceregistration" + srconsul "github.com/hashicorp/vault/serviceregistration/consul" + "github.com/hashicorp/vault/vault" + "github.com/hashicorp/vault/vault/diagnose" + "github.com/hashicorp/vault/vault/hcp_link" + "github.com/hashicorp/vault/version" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +const CoreConfigUninitializedErr = "Diagnose cannot attempt this step because core config could not be set." + +var ( + _ cli.Command = (*OperatorDiagnoseCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorDiagnoseCommand)(nil) +) + +type OperatorDiagnoseCommand struct { + *BaseCommand + diagnose *diagnose.Session + + flagDebug bool + flagSkips []string + flagConfigs []string + cleanupGuard sync.Once + + reloadFuncsLock *sync.RWMutex + reloadFuncs *map[string][]reloadutil.ReloadFunc + ServiceRegistrations map[string]sr.Factory + startedCh chan struct{} // for tests + reloadedCh chan struct{} // for tests + skipEndEnd bool // for tests +} + +func (c *OperatorDiagnoseCommand) Synopsis() string { + return "Troubleshoot problems starting Vault" +} + +func (c *OperatorDiagnoseCommand) Help() string { + helpText := ` +Usage: vault operator diagnose + + This command troubleshoots Vault startup issues, such as TLS configuration or + auto-unseal. It should be run using the same environment variables and configuration + files as the "vault server" command, so that startup problems can be accurately + reproduced. + + Start diagnose with a configuration file: + + $ vault operator diagnose -config=/etc/vault/config.hcl + + Perform a diagnostic check while Vault is still running: + + $ vault operator diagnose -config=/etc/vault/config.hcl -skip=listener + +` + c.Flags().Help() + return strings.TrimSpace(helpText) +} + +func (c *OperatorDiagnoseCommand) Flags() *FlagSets { + set := NewFlagSets(c.UI) + f := set.NewFlagSet("Command Options") + + f.StringSliceVar(&StringSliceVar{ + Name: "config", + Target: &c.flagConfigs, + Completion: complete.PredictOr( + complete.PredictFiles("*.hcl"), + complete.PredictFiles("*.json"), + complete.PredictDirs("*"), + ), + Usage: "Path to a Vault configuration file or directory of configuration " + + "files. This flag can be specified multiple times to load multiple " + + "configurations. If the path is a directory, all files which end in " + + ".hcl or .json are loaded.", + }) + + f.StringSliceVar(&StringSliceVar{ + Name: "skip", + Target: &c.flagSkips, + Usage: "Skip the health checks named as arguments. May be 'listener', 'storage', or 'autounseal'.", + }) + + f.BoolVar(&BoolVar{ + Name: "debug", + Target: &c.flagDebug, + Default: false, + Usage: "Dump all information collected by Diagnose.", + }) + + f.StringVar(&StringVar{ + Name: "format", + Target: &c.flagFormat, + Usage: "The output format", + }) + return set +} + +func (c *OperatorDiagnoseCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictNothing +} + +func (c *OperatorDiagnoseCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +const ( + status_unknown = "[ ] " + status_ok = "\u001b[32m[ ok ]\u001b[0m " + status_failed = "\u001b[31m[failed]\u001b[0m " + status_warn = "\u001b[33m[ warn ]\u001b[0m " + same_line = "\u001b[F" +) + +func (c *OperatorDiagnoseCommand) Run(args []string) int { + f := c.Flags() + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 3 + } + return c.RunWithParsedFlags() +} + +func (c *OperatorDiagnoseCommand) RunWithParsedFlags() int { + if len(c.flagConfigs) == 0 { + c.UI.Error("Must specify a configuration file using -config.") + return 3 + } + + if c.diagnose == nil { + if c.flagFormat == "json" { + c.diagnose = diagnose.New(io.Discard) + } else { + c.UI.Output(version.GetVersion().FullVersionNumber(true)) + c.diagnose = diagnose.New(os.Stdout) + } + } + ctx := diagnose.Context(context.Background(), c.diagnose) + c.diagnose.SkipFilters = c.flagSkips + err := c.offlineDiagnostics(ctx) + + results := c.diagnose.Finalize(ctx) + if c.flagFormat == "json" { + resultsJS, err := json.MarshalIndent(results, "", " ") + if err != nil { + fmt.Fprintf(os.Stderr, "Error marshalling results: %v.", err) + return 4 + } + c.UI.Output(string(resultsJS)) + } else { + c.UI.Output("\nResults:") + w, _, err := term.GetSize(0) + if err == nil { + results.Write(os.Stdout, w) + } else { + results.Write(os.Stdout, 0) + } + } + + if err != nil { + return 4 + } + // Use a different return code + switch results.Status { + case diagnose.WarningStatus: + return 2 + case diagnose.ErrorStatus: + return 1 + } + return 0 +} + +func (c *OperatorDiagnoseCommand) offlineDiagnostics(ctx context.Context) error { + rloadFuncs := make(map[string][]reloadutil.ReloadFunc) + server := &ServerCommand{ + // TODO: set up a different one? + // In particular, a UI instance that won't output? + BaseCommand: c.BaseCommand, + + // TODO: refactor to a common place? + AuditBackends: auditBackends, + CredentialBackends: credentialBackends, + LogicalBackends: logicalBackends, + PhysicalBackends: physicalBackends, + ServiceRegistrations: serviceRegistrations, + + // TODO: other ServerCommand options? + + logger: log.NewInterceptLogger(&log.LoggerOptions{ + Level: log.Off, + }), + allLoggers: []log.Logger{}, + reloadFuncs: &rloadFuncs, + reloadFuncsLock: new(sync.RWMutex), + } + + ctx, span := diagnose.StartSpan(ctx, "Vault Diagnose") + defer span.End() + + // OS Specific checks + diagnose.OSChecks(ctx) + + var config *cserver.Config + + diagnose.Test(ctx, "Parse Configuration", func(ctx context.Context) (err error) { + server.flagConfigs = c.flagConfigs + var configErrors []configutil.ConfigError + config, configErrors, err = server.parseConfig() + if err != nil { + return fmt.Errorf("Could not parse configuration: %w.", err) + } + for _, ce := range configErrors { + diagnose.Warn(ctx, diagnose.CapitalizeFirstLetter(ce.String())+".") + } + diagnose.Success(ctx, "Vault configuration syntax is ok.") + return nil + }) + if config == nil { + return fmt.Errorf("No vault server configuration found.") + } + + diagnose.Test(ctx, "Check Telemetry", func(ctx context.Context) (err error) { + if config.Telemetry == nil { + diagnose.Warn(ctx, "Telemetry is using default configuration") + diagnose.Advise(ctx, "By default only Prometheus and JSON metrics are available. Ignore this warning if you are using telemetry or are using these metrics and are satisfied with the default retention time and gauge period.") + } else { + t := config.Telemetry + // If any Circonus setting is present but we're missing the basic fields... + if coalesce(t.CirconusAPIURL, t.CirconusAPIToken, t.CirconusCheckID, t.CirconusCheckTags, t.CirconusCheckSearchTag, + t.CirconusBrokerID, t.CirconusBrokerSelectTag, t.CirconusCheckForceMetricActivation, t.CirconusCheckInstanceID, + t.CirconusCheckSubmissionURL, t.CirconusCheckDisplayName) != nil { + if t.CirconusAPIURL == "" { + return errors.New("incomplete Circonus telemetry configuration, missing circonus_api_url") + } else if t.CirconusAPIToken != "" { + return errors.New("incomplete Circonus telemetry configuration, missing circonus_api_token") + } + } + if len(t.DogStatsDTags) > 0 && t.DogStatsDAddr == "" { + return errors.New("incomplete DogStatsD telemetry configuration, missing dogstatsd_addr, while dogstatsd_tags specified") + } + + // If any Stackdriver setting is present but we're missing the basic fields... + if coalesce(t.StackdriverNamespace, t.StackdriverLocation, t.StackdriverDebugLogs, t.StackdriverNamespace) != nil { + if t.StackdriverProjectID == "" { + return errors.New("incomplete Stackdriver telemetry configuration, missing stackdriver_project_id") + } + if t.StackdriverLocation == "" { + return errors.New("incomplete Stackdriver telemetry configuration, missing stackdriver_location") + } + if t.StackdriverNamespace == "" { + return errors.New("incomplete Stackdriver telemetry configuration, missing stackdriver_namespace") + } + } + } + return nil + }) + + var metricSink *metricsutil.ClusterMetricSink + var metricsHelper *metricsutil.MetricsHelper + + var backend *physical.Backend + diagnose.Test(ctx, "Check Storage", func(ctx context.Context) error { + // Ensure that there is a storage stanza + if config.Storage == nil { + diagnose.Advise(ctx, "To learn how to specify a storage backend, see the Vault server configuration documentation.") + return fmt.Errorf("No storage stanza in Vault server configuration.") + } + + diagnose.Test(ctx, "Create Storage Backend", func(ctx context.Context) error { + b, err := server.setupStorage(config) + if err != nil { + return err + } + if b == nil { + diagnose.Advise(ctx, "To learn how to specify a storage backend, see the Vault server configuration documentation.") + return fmt.Errorf("Storage backend could not be initialized.") + } + backend = &b + return nil + }) + + if backend == nil { + diagnose.Fail(ctx, "Diagnose could not initialize storage backend.") + span.End() + return fmt.Errorf("Diagnose could not initialize storage backend.") + } + + // Check for raft quorum status + if config.Storage.Type == storageTypeRaft { + path := os.Getenv(raft.EnvVaultRaftPath) + if path == "" { + path, ok := config.Storage.Config["path"] + if !ok { + diagnose.SpotError(ctx, "Check Raft Folder Permissions", fmt.Errorf("Storage folder path is required.")) + } + diagnose.RaftFileChecks(ctx, path) + } + diagnose.RaftStorageQuorum(ctx, (*backend).(*raft.RaftBackend)) + } + + // Consul storage checks + if config.Storage != nil && config.Storage.Type == storageTypeConsul { + diagnose.Test(ctx, "Check Consul TLS", func(ctx context.Context) error { + err := physconsul.SetupSecureTLS(ctx, api.DefaultConfig(), config.Storage.Config, server.logger, true) + if err != nil { + return err + } + return nil + }) + + diagnose.Test(ctx, "Check Consul Direct Storage Access", func(ctx context.Context) error { + dirAccess := diagnose.ConsulDirectAccess(config.Storage.Config) + if dirAccess != "" { + diagnose.Warn(ctx, dirAccess) + } + if dirAccess == diagnose.DirAccessErr { + diagnose.Advise(ctx, diagnose.DirAccessAdvice) + } + return nil + }) + } + + // Attempt to use storage backend + if !c.skipEndEnd && config.Storage.Type != storageTypeRaft { + diagnose.Test(ctx, "Check Storage Access", diagnose.WithTimeout(30*time.Second, func(ctx context.Context) error { + maxDurationCrudOperation := "write" + maxDuration := time.Duration(0) + uuidSuffix, err := uuid.GenerateUUID() + if err != nil { + return err + } + uuid := "diagnose/latency/" + uuidSuffix + dur, err := diagnose.EndToEndLatencyCheckWrite(ctx, uuid, *backend) + if err != nil { + return err + } + maxDuration = dur + dur, err = diagnose.EndToEndLatencyCheckRead(ctx, uuid, *backend) + if err != nil { + return err + } + if dur > maxDuration { + maxDuration = dur + maxDurationCrudOperation = "read" + } + dur, err = diagnose.EndToEndLatencyCheckDelete(ctx, uuid, *backend) + if err != nil { + return err + } + if dur > maxDuration { + maxDuration = dur + maxDurationCrudOperation = "delete" + } + + if maxDuration > time.Duration(0) { + diagnose.Warn(ctx, diagnose.LatencyWarning+fmt.Sprintf("duration: %s, operation: %s", maxDuration, maxDurationCrudOperation)) + } + return nil + })) + } + return nil + }) + + // Return from top-level span when backend is nil + if backend == nil { + return fmt.Errorf("Diagnose could not initialize storage backend.") + } + + var configSR sr.ServiceRegistration + diagnose.Test(ctx, "Check Service Discovery", func(ctx context.Context) error { + if config.ServiceRegistration == nil || config.ServiceRegistration.Config == nil { + diagnose.Skipped(ctx, "No service registration configured.") + return nil + } + srConfig := config.ServiceRegistration.Config + + diagnose.Test(ctx, "Check Consul Service Discovery TLS", func(ctx context.Context) error { + // SetupSecureTLS for service discovery uses the same cert and key to set up physical + // storage. See the consul package in physical for details. + err := srconsul.SetupSecureTLS(ctx, api.DefaultConfig(), srConfig, server.logger, true) + if err != nil { + return err + } + return nil + }) + + if config.ServiceRegistration != nil && config.ServiceRegistration.Type == "consul" { + diagnose.Test(ctx, "Check Consul Direct Service Discovery", func(ctx context.Context) error { + dirAccess := diagnose.ConsulDirectAccess(config.ServiceRegistration.Config) + if dirAccess != "" { + diagnose.Warn(ctx, dirAccess) + } + if dirAccess == diagnose.DirAccessErr { + diagnose.Advise(ctx, diagnose.DirAccessAdvice) + } + return nil + }) + } + return nil + }) + + sealcontext, sealspan := diagnose.StartSpan(ctx, "Create Vault Server Configuration Seals") + var seals []vault.Seal + var sealConfigError error + + barrierSeal, barrierWrapper, unwrapSeal, seals, sealConfigError, err := setSeal(server, config, make([]string, 0), make(map[string]string)) + // Check error here + if err != nil { + diagnose.Advise(ctx, "For assistance with the seal stanza, see the Vault configuration documentation.") + diagnose.Fail(sealcontext, fmt.Sprintf("Seal creation resulted in the following error: %s.", err.Error())) + goto SEALFAIL + } + if sealConfigError != nil { + diagnose.Fail(sealcontext, "Seal could not be configured: seals may already be initialized.") + goto SEALFAIL + } + + for _, seal := range seals { + // There is always one nil seal. We need to skip it so we don't start an empty Finalize-Seal-Shamir + // section. + if seal == nil { + continue + } + seal := seal // capture range variable + // Ensure that the seal finalizer is called, even if using verify-only + defer func(seal *vault.Seal) { + sealType := diagnose.CapitalizeFirstLetter((*seal).BarrierType().String()) + finalizeSealContext, finalizeSealSpan := diagnose.StartSpan(ctx, "Finalize "+sealType+" Seal") + err = (*seal).Finalize(finalizeSealContext) + if err != nil { + diagnose.Fail(finalizeSealContext, "Error finalizing seal.") + diagnose.Advise(finalizeSealContext, "This likely means that the barrier is still in use; therefore, finalizing the seal timed out.") + finalizeSealSpan.End() + } + finalizeSealSpan.End() + }(&seal) + } + + if barrierSeal == nil { + diagnose.Fail(sealcontext, "Could not create barrier seal. No error was generated, but it is likely that the seal stanza is misconfigured. For guidance, see Vault's configuration documentation on the seal stanza.") + } + +SEALFAIL: + sealspan.End() + + diagnose.Test(ctx, "Check Transit Seal TLS", func(ctx context.Context) error { + var checkSealTransit bool + for _, seal := range config.Seals { + if seal.Type == "transit" { + checkSealTransit = true + + tlsSkipVerify, _ := seal.Config["tls_skip_verify"] + if tlsSkipVerify == "true" { + diagnose.Warn(ctx, "TLS verification is skipped. This is highly discouraged and decreases the security of data transmissions to and from the Vault server.") + return nil + } + + // Checking tls_client_cert and tls_client_key + tlsClientCert, ok := seal.Config["tls_client_cert"] + if !ok { + diagnose.Warn(ctx, "Missing tls_client_cert in the seal configuration.") + return nil + } + tlsClientKey, ok := seal.Config["tls_client_key"] + if !ok { + diagnose.Warn(ctx, "Missing tls_client_key in the seal configuration.") + return nil + } + _, err := diagnose.TLSFileChecks(tlsClientCert, tlsClientKey) + if err != nil { + return fmt.Errorf("The TLS certificate and key configured through the tls_client_cert and tls_client_key fields of the transit seal configuration are invalid: %w.", err) + } + + // checking tls_ca_cert + tlsCACert, ok := seal.Config["tls_ca_cert"] + if !ok { + diagnose.Warn(ctx, "Missing tls_ca_cert in the seal configuration.") + return nil + } + warnings, err := diagnose.TLSCAFileCheck(tlsCACert) + if len(warnings) != 0 { + for _, warning := range warnings { + diagnose.Warn(ctx, warning) + } + } + if err != nil { + return fmt.Errorf("The TLS CA certificate configured through the tls_ca_cert field of the transit seal configuration is invalid: %w.", err) + } + } + } + if !checkSealTransit { + diagnose.Skipped(ctx, "No transit seal found in seal configuration.") + } + return nil + }) + + var coreConfig vault.CoreConfig + diagnose.Test(ctx, "Create Core Configuration", func(ctx context.Context) error { + var secureRandomReader io.Reader + // prepare a secure random reader for core + randReaderTestName := "Initialize Randomness for Core" + secureRandomReader, err = configutil.CreateSecureRandomReaderFunc(config.SharedConfig, barrierWrapper) + if err != nil { + return diagnose.SpotError(ctx, randReaderTestName, fmt.Errorf("Could not initialize randomness for core: %w.", err)) + } + diagnose.SpotOk(ctx, randReaderTestName, "") + coreConfig = createCoreConfig(server, config, *backend, configSR, barrierSeal, unwrapSeal, metricsHelper, metricSink, secureRandomReader) + return nil + }) + + var disableClustering bool + diagnose.Test(ctx, "HA Storage", func(ctx context.Context) error { + diagnose.Test(ctx, "Create HA Storage Backend", func(ctx context.Context) error { + // Initialize the separate HA storage backend, if it exists + disableClustering, err = initHaBackend(server, config, &coreConfig, *backend) + if err != nil { + return err + } + return nil + }) + + diagnose.Test(ctx, "Check HA Consul Direct Storage Access", func(ctx context.Context) error { + if config.HAStorage == nil { + diagnose.Skipped(ctx, "No HA storage stanza is configured.") + } else { + dirAccess := diagnose.ConsulDirectAccess(config.HAStorage.Config) + if dirAccess != "" { + diagnose.Warn(ctx, dirAccess) + } + if dirAccess == diagnose.DirAccessErr { + diagnose.Advise(ctx, diagnose.DirAccessAdvice) + } + } + return nil + }) + if config.HAStorage != nil && config.HAStorage.Type == storageTypeConsul { + diagnose.Test(ctx, "Check Consul TLS", func(ctx context.Context) error { + err = physconsul.SetupSecureTLS(ctx, api.DefaultConfig(), config.HAStorage.Config, server.logger, true) + if err != nil { + return err + } + return nil + }) + } + return nil + }) + + // Determine the redirect address from environment variables + err = determineRedirectAddr(server, &coreConfig, config) + if err != nil { + return diagnose.SpotError(ctx, "Determine Redirect Address", fmt.Errorf("Redirect Address could not be determined: %w.", err)) + } + diagnose.SpotOk(ctx, "Determine Redirect Address", "") + + err = findClusterAddress(server, &coreConfig, config, disableClustering) + if err != nil { + return diagnose.SpotError(ctx, "Check Cluster Address", fmt.Errorf("Cluster Address could not be determined or was invalid: %w.", err), + diagnose.Advice("Please check that the API and Cluster addresses are different, and that the API, Cluster and Redirect addresses have both a host and port.")) + } + diagnose.SpotOk(ctx, "Check Cluster Address", "Cluster address is logically valid and can be found.") + + var vaultCore *vault.Core + + // Run all the checks that are utilized when initializing a core object + // without actually calling core.Init. These are in the init-core section + // as they are runtime checks. + diagnose.Test(ctx, "Check Core Creation", func(ctx context.Context) error { + var newCoreError error + if coreConfig.RawConfig == nil { + return fmt.Errorf(CoreConfigUninitializedErr) + } + core, newCoreError := vault.CreateCore(&coreConfig) + if newCoreError != nil { + if vault.IsFatalError(newCoreError) { + return fmt.Errorf("Error initializing core: %s.", newCoreError) + } + diagnose.Warn(ctx, wrapAtLength( + "A non-fatal error occurred during initialization. Please check the logs for more information.")) + } else { + vaultCore = core + } + return nil + }) + + if vaultCore == nil { + return fmt.Errorf("Diagnose could not initialize the Vault core from the Vault server configuration.") + } + + licenseCtx, licenseSpan := diagnose.StartSpan(ctx, "Check For Autoloaded License") + // If we are not in enterprise, return from the check + if !constants.IsEnterprise { + diagnose.Skipped(licenseCtx, "License check will not run on OSS Vault.") + } else { + // Load License from environment variables. These take precedence over the + // configured license. + if envLicensePath := os.Getenv(EnvVaultLicensePath); envLicensePath != "" { + coreConfig.LicensePath = envLicensePath + } + if envLicense := os.Getenv(EnvVaultLicense); envLicense != "" { + coreConfig.License = envLicense + } + vault.DiagnoseCheckLicense(licenseCtx, vaultCore, coreConfig, false) + } + licenseSpan.End() + + var lns []listenerutil.Listener + diagnose.Test(ctx, "Start Listeners", func(ctx context.Context) error { + disableClustering := config.HAStorage != nil && config.HAStorage.DisableClustering + infoKeys := make([]string, 0, 10) + info := make(map[string]string) + var listeners []listenerutil.Listener + var status int + + diagnose.ListenerChecks(ctx, config.Listeners) + + diagnose.Test(ctx, "Create Listeners", func(ctx context.Context) error { + status, listeners, _, err = server.InitListeners(config, disableClustering, &infoKeys, &info) + if status != 0 { + return err + } + return nil + }) + + lns = listeners + + // Make sure we close all listeners from this point on + listenerCloseFunc := func() { + for _, ln := range lns { + ln.Listener.Close() + } + } + + c.cleanupGuard.Do(listenerCloseFunc) + + return nil + }) + + // TODO: Diagnose logging configuration + + // The unseal diagnose check will simply attempt to use the barrier to encrypt and + // decrypt a mock value. It will not call runUnseal. + diagnose.Test(ctx, "Check Autounseal Encryption", diagnose.WithTimeout(30*time.Second, func(ctx context.Context) error { + if barrierSeal == nil { + return fmt.Errorf("Diagnose could not create a barrier seal object.") + } + if barrierSeal.BarrierType() == wrapping.WrapperTypeShamir { + diagnose.Skipped(ctx, "Skipping barrier encryption test. Only supported for auto-unseal.") + return nil + } + barrierUUID, err := uuid.GenerateUUID() + if err != nil { + return fmt.Errorf("Diagnose could not create unique UUID for unsealing.") + } + barrierEncValue := "diagnose-" + barrierUUID + ciphertext, err := barrierWrapper.Encrypt(ctx, []byte(barrierEncValue), nil) + if err != nil { + return fmt.Errorf("Error encrypting with seal barrier: %w.", err) + } + plaintext, err := barrierWrapper.Decrypt(ctx, ciphertext, nil) + if err != nil { + return fmt.Errorf("Error decrypting with seal barrier: %w", err) + } + if string(plaintext) != barrierEncValue { + return fmt.Errorf("Barrier returned incorrect decrypted value for mock data.") + } + return nil + })) + + // The following block contains static checks that are run during the + // startHttpServers portion of server run. In other words, they are static + // checks during resource creation. Currently there is nothing important in this + // diagnose check. For now it is a placeholder for any checks that will be done + // before server run. + diagnose.Test(ctx, "Check Server Before Runtime", func(ctx context.Context) error { + for _, ln := range lns { + if ln.Config == nil { + return fmt.Errorf("Found no listener config after parsing the Vault configuration.") + } + } + return nil + }) + + // Checking HCP link to make sure Vault could connect to SCADA. + // If it could not connect to SCADA in 5 seconds, diagnose reports an issue + if !constants.IsEnterprise { + diagnose.Skipped(ctx, "HCP link check will not run on OSS Vault.") + } else { + if config.HCPLinkConf != nil { + // we need to override API and Passthrough capabilities + // as they could not be initialized when Vault http handler + // is not fully initialized + config.HCPLinkConf.EnablePassThroughCapability = false + config.HCPLinkConf.EnableAPICapability = false + + diagnose.Test(ctx, "Check HCP Connection", func(ctx context.Context) error { + hcpLink, err := hcp_link.NewHCPLink(config.HCPLinkConf, vaultCore, server.logger) + if err != nil || hcpLink == nil { + return fmt.Errorf("failed to start HCP link, %w", err) + } + + // check if a SCADA session is established successfully + deadline := time.Now().Add(5 * time.Second) + linkSessionStatus := "disconnected" + for time.Now().Before(deadline) { + linkSessionStatus = hcpLink.GetConnectionStatusMessage(hcpLink.GetScadaSessionStatus()) + if linkSessionStatus == "connected" { + break + } + time.Sleep(500 * time.Millisecond) + } + if linkSessionStatus != "connected" { + return fmt.Errorf("failed to connect to HCP in 5 seconds. HCP session status is: %s", linkSessionStatus) + } + + err = hcpLink.Shutdown() + if err != nil { + return fmt.Errorf("failed to shutdown HCP link: %w", err) + } + + return nil + }) + } + } + + return nil +} + +func coalesce(values ...interface{}) interface{} { + for _, val := range values { + if val != nil && val != "" { + return val + } + } + return nil +} diff --git a/command/operator_diagnose_test.go b/command/operator_diagnose_test.go new file mode 100644 index 0000000..2c9a1a0 --- /dev/null +++ b/command/operator_diagnose_test.go @@ -0,0 +1,560 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !race + +package command + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "strings" + "testing" + + "github.com/hashicorp/vault/vault/diagnose" + "github.com/mitchellh/cli" +) + +func testOperatorDiagnoseCommand(tb testing.TB) *OperatorDiagnoseCommand { + tb.Helper() + + ui := cli.NewMockUi() + return &OperatorDiagnoseCommand{ + diagnose: diagnose.New(ioutil.Discard), + BaseCommand: &BaseCommand{ + UI: ui, + }, + skipEndEnd: true, + } +} + +func TestOperatorDiagnoseCommand_Run(t *testing.T) { + t.Parallel() + cases := []struct { + name string + args []string + expected []*diagnose.Result + }{ + { + "diagnose_ok", + []string{ + "-config", "./server/test-fixtures/config_diagnose_ok.hcl", + }, + []*diagnose.Result{ + { + Name: "Parse Configuration", + Status: diagnose.OkStatus, + }, + { + Name: "Start Listeners", + Status: diagnose.WarningStatus, + Children: []*diagnose.Result{ + { + Name: "Create Listeners", + Status: diagnose.OkStatus, + }, + { + Name: "Check Listener TLS", + Status: diagnose.WarningStatus, + Warnings: []string{ + "TLS is disabled in a listener config stanza.", + }, + }, + }, + }, + { + Name: "Check Storage", + Status: diagnose.OkStatus, + Children: []*diagnose.Result{ + { + Name: "Create Storage Backend", + Status: diagnose.OkStatus, + }, + { + Name: "Check Consul TLS", + Status: diagnose.SkippedStatus, + }, + { + Name: "Check Consul Direct Storage Access", + Status: diagnose.OkStatus, + }, + }, + }, + { + Name: "Check Service Discovery", + Status: diagnose.OkStatus, + Children: []*diagnose.Result{ + { + Name: "Check Consul Service Discovery TLS", + Status: diagnose.SkippedStatus, + }, + { + Name: "Check Consul Direct Service Discovery", + Status: diagnose.OkStatus, + }, + }, + }, + { + Name: "Create Vault Server Configuration Seals", + Status: diagnose.OkStatus, + }, + { + Name: "Create Core Configuration", + Status: diagnose.OkStatus, + Children: []*diagnose.Result{ + { + Name: "Initialize Randomness for Core", + Status: diagnose.OkStatus, + }, + }, + }, + { + Name: "HA Storage", + Status: diagnose.OkStatus, + Children: []*diagnose.Result{ + { + Name: "Create HA Storage Backend", + Status: diagnose.OkStatus, + }, + { + Name: "Check HA Consul Direct Storage Access", + Status: diagnose.OkStatus, + }, + { + Name: "Check Consul TLS", + Status: diagnose.SkippedStatus, + }, + }, + }, + { + Name: "Determine Redirect Address", + Status: diagnose.OkStatus, + }, + { + Name: "Check Cluster Address", + Status: diagnose.OkStatus, + }, + { + Name: "Check Core Creation", + Status: diagnose.OkStatus, + }, + { + Name: "Start Listeners", + Status: diagnose.WarningStatus, + Children: []*diagnose.Result{ + { + Name: "Create Listeners", + Status: diagnose.OkStatus, + }, + { + Name: "Check Listener TLS", + Status: diagnose.WarningStatus, + Warnings: []string{ + "TLS is disabled in a listener config stanza.", + }, + }, + }, + }, + { + Name: "Check Autounseal Encryption", + Status: diagnose.SkippedStatus, + Message: "Skipping barrier encryption", + }, + { + Name: "Check Server Before Runtime", + Status: diagnose.OkStatus, + }, + { + Name: "Finalize Shamir Seal", + Status: diagnose.OkStatus, + }, + }, + }, + { + "diagnose_raft_problems", + []string{ + "-config", "./server/test-fixtures/config_raft.hcl", + }, + []*diagnose.Result{ + { + Name: "Check Storage", + Status: diagnose.WarningStatus, + Children: []*diagnose.Result{ + { + Name: "Create Storage Backend", + Status: diagnose.OkStatus, + }, + { + Name: "Check Raft Folder Permissions", + Status: diagnose.WarningStatus, + Message: "too many permissions", + }, + { + Name: "Check For Raft Quorum", + Status: diagnose.WarningStatus, + Message: "0 voters found", + }, + }, + }, + }, + }, + { + "diagnose_invalid_storage", + []string{ + "-config", "./server/test-fixtures/nostore_config.hcl", + }, + []*diagnose.Result{ + { + Name: "Check Storage", + Status: diagnose.ErrorStatus, + Message: "No storage stanza in Vault server configuration.", + }, + }, + }, + { + "diagnose_listener_config_ok", + []string{ + "-config", "./server/test-fixtures/tls_config_ok.hcl", + }, + []*diagnose.Result{ + { + Name: "Start Listeners", + Status: diagnose.OkStatus, + Children: []*diagnose.Result{ + { + Name: "Create Listeners", + Status: diagnose.OkStatus, + }, + { + Name: "Check Listener TLS", + Status: diagnose.OkStatus, + }, + }, + }, + }, + }, + { + "diagnose_invalid_https_storage", + []string{ + "-config", "./server/test-fixtures/config_bad_https_storage.hcl", + }, + []*diagnose.Result{ + { + Name: "Check Storage", + Status: diagnose.ErrorStatus, + Children: []*diagnose.Result{ + { + Name: "Create Storage Backend", + Status: diagnose.OkStatus, + }, + { + Name: "Check Consul TLS", + Status: diagnose.ErrorStatus, + Message: "certificate has expired or is not yet valid", + Warnings: []string{ + "expired or near expiry", + }, + }, + { + Name: "Check Consul Direct Storage Access", + Status: diagnose.OkStatus, + }, + }, + }, + }, + }, + { + "diagnose_invalid_https_hastorage", + []string{ + "-config", "./server/test-fixtures/config_diagnose_hastorage_bad_https.hcl", + }, + []*diagnose.Result{ + { + Name: "Check Storage", + Status: diagnose.WarningStatus, + Children: []*diagnose.Result{ + { + Name: "Create Storage Backend", + Status: diagnose.OkStatus, + }, + { + Name: "Check Consul TLS", + Status: diagnose.SkippedStatus, + }, + { + Name: "Check Consul Direct Storage Access", + Status: diagnose.WarningStatus, + Advice: "We recommend connecting to a local agent.", + Warnings: []string{ + "Vault storage is directly connected to a Consul server.", + }, + }, + }, + }, + { + Name: "HA Storage", + Status: diagnose.ErrorStatus, + Children: []*diagnose.Result{ + { + Name: "Create HA Storage Backend", + Status: diagnose.OkStatus, + }, + { + Name: "Check HA Consul Direct Storage Access", + Status: diagnose.WarningStatus, + Advice: "We recommend connecting to a local agent.", + Warnings: []string{ + "Vault storage is directly connected to a Consul server.", + }, + }, + { + Name: "Check Consul TLS", + Status: diagnose.ErrorStatus, + Message: "certificate has expired or is not yet valid", + Warnings: []string{ + "expired or near expiry", + }, + }, + }, + }, + { + Name: "Check Cluster Address", + Status: diagnose.ErrorStatus, + }, + }, + }, + { + "diagnose_seal_transit_tls_check_fail", + []string{ + "-config", "./server/test-fixtures/diagnose_seal_transit_tls_check.hcl", + }, + []*diagnose.Result{ + { + Name: "Check Transit Seal TLS", + Status: diagnose.WarningStatus, + Warnings: []string{ + "Found at least one intermediate certificate in the CA certificate file.", + }, + }, + }, + }, + { + "diagnose_invalid_https_sr", + []string{ + "-config", "./server/test-fixtures/diagnose_bad_https_consul_sr.hcl", + }, + []*diagnose.Result{ + { + Name: "Check Service Discovery", + Status: diagnose.ErrorStatus, + Children: []*diagnose.Result{ + { + Name: "Check Consul Service Discovery TLS", + Status: diagnose.ErrorStatus, + Message: "certificate has expired or is not yet valid", + Warnings: []string{ + "expired or near expiry", + }, + }, + { + Name: "Check Consul Direct Service Discovery", + Status: diagnose.WarningStatus, + Warnings: []string{ + diagnose.DirAccessErr, + }, + }, + }, + }, + }, + }, + { + "diagnose_direct_storage_access", + []string{ + "-config", "./server/test-fixtures/diagnose_ok_storage_direct_access.hcl", + }, + []*diagnose.Result{ + { + Name: "Check Storage", + Status: diagnose.WarningStatus, + Children: []*diagnose.Result{ + { + Name: "Create Storage Backend", + Status: diagnose.OkStatus, + }, + { + Name: "Check Consul TLS", + Status: diagnose.SkippedStatus, + }, + { + Name: "Check Consul Direct Storage Access", + Status: diagnose.WarningStatus, + Warnings: []string{ + diagnose.DirAccessErr, + }, + }, + }, + }, + }, + }, + { + "diagnose_raft_no_folder_backend", + []string{ + "-config", "./server/test-fixtures/diagnose_raft_no_bolt_folder.hcl", + }, + []*diagnose.Result{ + { + Name: "Check Storage", + Status: diagnose.ErrorStatus, + Message: "Diagnose could not initialize storage backend.", + Children: []*diagnose.Result{ + { + Name: "Create Storage Backend", + Status: diagnose.ErrorStatus, + Message: "no such file or directory", + }, + }, + }, + }, + }, + { + "diagnose_telemetry_partial_circonus", + []string{ + "-config", "./server/test-fixtures/diagnose_bad_telemetry1.hcl", + }, + []*diagnose.Result{ + { + Name: "Check Telemetry", + Status: diagnose.ErrorStatus, + Message: "incomplete Circonus telemetry configuration, missing circonus_api_url", + }, + }, + }, + { + "diagnose_telemetry_partial_dogstats", + []string{ + "-config", "./server/test-fixtures/diagnose_bad_telemetry2.hcl", + }, + []*diagnose.Result{ + { + Name: "Check Telemetry", + Status: diagnose.ErrorStatus, + Message: "incomplete DogStatsD telemetry configuration, missing dogstatsd_addr, while dogstatsd_tags specified", + }, + }, + }, + { + "diagnose_telemetry_partial_stackdriver", + []string{ + "-config", "./server/test-fixtures/diagnose_bad_telemetry3.hcl", + }, + []*diagnose.Result{ + { + Name: "Check Telemetry", + Status: diagnose.ErrorStatus, + Message: "incomplete Stackdriver telemetry configuration, missing stackdriver_project_id", + }, + }, + }, + { + "diagnose_telemetry_default", + []string{ + "-config", "./server/test-fixtures/config4.hcl", + }, + []*diagnose.Result{ + { + Name: "Check Telemetry", + Status: diagnose.WarningStatus, + Warnings: []string{"Telemetry is using default configuration"}, + }, + }, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + client, closer := testVaultServer(t) + defer closer() + cmd := testOperatorDiagnoseCommand(t) + cmd.client = client + + cmd.Run(tc.args) + result := cmd.diagnose.Finalize(context.Background()) + + if err := compareResults(tc.expected, result.Children); err != nil { + t.Fatalf("Did not find expected test results: %v", err) + } + }) + } + }) +} + +func compareResults(expected []*diagnose.Result, actual []*diagnose.Result) error { + for _, exp := range expected { + found := false + // Check them all so we don't have to be order specific + for _, act := range actual { + fmt.Printf("%+v", act) + if exp.Name == act.Name { + found = true + if err := compareResult(exp, act); err != nil { + return err + } + break + } + } + if !found { + return fmt.Errorf("could not find expected test result: %s", exp.Name) + } + } + return nil +} + +func compareResult(exp *diagnose.Result, act *diagnose.Result) error { + if exp.Name != act.Name { + return fmt.Errorf("names mismatch: %s vs %s", exp.Name, act.Name) + } + if exp.Status != act.Status { + if act.Status != diagnose.OkStatus { + return fmt.Errorf("section %s, status mismatch: %s vs %s, got error %s", exp.Name, exp.Status, act.Status, act.Message) + } + return fmt.Errorf("section %s, status mismatch: %s vs %s", exp.Name, exp.Status, act.Status) + } + if exp.Message != "" && exp.Message != act.Message && !strings.Contains(act.Message, exp.Message) { + return fmt.Errorf("section %s, message not found: %s in %s", exp.Name, exp.Message, act.Message) + } + if exp.Advice != "" && exp.Advice != act.Advice && !strings.Contains(act.Advice, exp.Advice) { + return fmt.Errorf("section %s, advice not found: %s in %s", exp.Name, exp.Advice, act.Advice) + } + if len(exp.Warnings) != len(act.Warnings) { + return fmt.Errorf("section %s, warning count mismatch: %d vs %d", exp.Name, len(exp.Warnings), len(act.Warnings)) + } + for j := range exp.Warnings { + if !strings.Contains(act.Warnings[j], exp.Warnings[j]) { + return fmt.Errorf("section %s, warning message not found: %s in %s", exp.Name, exp.Warnings[j], act.Warnings[j]) + } + } + if len(exp.Children) > len(act.Children) { + errStrings := []string{} + for _, c := range act.Children { + errStrings = append(errStrings, fmt.Sprintf("%+v", c)) + } + return fmt.Errorf(strings.Join(errStrings, ",")) + } + + if len(exp.Children) > 0 { + return compareResults(exp.Children, act.Children) + } + + // Remove raft file if it exists + os.Remove("./server/test-fixtures/vault.db") + os.RemoveAll("./server/test-fixtures/raft") + + return nil +} diff --git a/command/operator_generate_root.go b/command/operator_generate_root.go new file mode 100644 index 0000000..6665e8b --- /dev/null +++ b/command/operator_generate_root.go @@ -0,0 +1,569 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bytes" + "fmt" + "io" + "os" + "strings" + + "github.com/hashicorp/go-secure-stdlib/password" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/helper/pgpkeys" + "github.com/hashicorp/vault/sdk/helper/roottoken" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*OperatorGenerateRootCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorGenerateRootCommand)(nil) +) + +type generateRootKind int + +const ( + generateRootRegular generateRootKind = iota + generateRootDR + generateRootRecovery +) + +type OperatorGenerateRootCommand struct { + *BaseCommand + + flagInit bool + flagCancel bool + flagStatus bool + flagDecode string + flagOTP string + flagPGPKey string + flagNonce string + flagGenerateOTP bool + flagDRToken bool + flagRecoveryToken bool + + testStdin io.Reader // for tests +} + +func (c *OperatorGenerateRootCommand) Synopsis() string { + return "Generates a new root token" +} + +func (c *OperatorGenerateRootCommand) Help() string { + helpText := ` +Usage: vault operator generate-root [options] [KEY] + + Generates a new root token by combining a quorum of share holders. One of + the following must be provided to start the root token generation: + + - A base64-encoded one-time-password (OTP) provided via the "-otp" flag. + Use the "-generate-otp" flag to generate a usable value. The resulting + token is XORed with this value when it is returned. Use the "-decode" + flag to output the final value. + + - A file containing a PGP key or a keybase username in the "-pgp-key" + flag. The resulting token is encrypted with this public key. + + An unseal key may be provided directly on the command line as an argument to + the command. If key is specified as "-", the command will read from stdin. If + a TTY is available, the command will prompt for text. + + Generate an OTP code for the final token: + + $ vault operator generate-root -generate-otp + + Start a root token generation: + + $ vault operator generate-root -init -otp="..." + $ vault operator generate-root -init -pgp-key="..." + + Enter an unseal key to progress root token generation: + + $ vault operator generate-root -otp="..." + +` + c.Flags().Help() + return strings.TrimSpace(helpText) +} + +func (c *OperatorGenerateRootCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + f := set.NewFlagSet("Command Options") + + f.BoolVar(&BoolVar{ + Name: "init", + Target: &c.flagInit, + Default: false, + EnvVar: "", + Completion: complete.PredictNothing, + Usage: "Start a root token generation. This can only be done if " + + "there is not currently one in progress.", + }) + + f.BoolVar(&BoolVar{ + Name: "cancel", + Target: &c.flagCancel, + Default: false, + EnvVar: "", + Completion: complete.PredictNothing, + Usage: "Reset the root token generation progress. This will discard any " + + "submitted unseal keys or configuration.", + }) + + f.BoolVar(&BoolVar{ + Name: "status", + Target: &c.flagStatus, + Default: false, + EnvVar: "", + Completion: complete.PredictNothing, + Usage: "Print the status of the current attempt without providing an " + + "unseal key.", + }) + + f.StringVar(&StringVar{ + Name: "decode", + Target: &c.flagDecode, + Default: "", + EnvVar: "", + Completion: complete.PredictAnything, + Usage: "The value to decode; setting this triggers a decode operation. " + + " If the value is \"-\" then read the encoded token from stdin.", + }) + + f.BoolVar(&BoolVar{ + Name: "generate-otp", + Target: &c.flagGenerateOTP, + Default: false, + EnvVar: "", + Completion: complete.PredictNothing, + Usage: "Generate and print a high-entropy one-time-password (OTP) " + + "suitable for use with the \"-init\" flag.", + }) + + f.BoolVar(&BoolVar{ + Name: "dr-token", + Target: &c.flagDRToken, + Default: false, + EnvVar: "", + Completion: complete.PredictNothing, + Usage: "Set this flag to do generate root operations on DR Operational " + + "tokens.", + }) + + f.BoolVar(&BoolVar{ + Name: "recovery-token", + Target: &c.flagRecoveryToken, + Default: false, + EnvVar: "", + Completion: complete.PredictNothing, + Usage: "Set this flag to do generate root operations on Recovery Operational " + + "tokens.", + }) + + f.StringVar(&StringVar{ + Name: "otp", + Target: &c.flagOTP, + Default: "", + EnvVar: "", + Completion: complete.PredictAnything, + Usage: "OTP code to use with \"-decode\" or \"-init\".", + }) + + f.VarFlag(&VarFlag{ + Name: "pgp-key", + Value: (*pgpkeys.PubKeyFileFlag)(&c.flagPGPKey), + Default: "", + EnvVar: "", + Completion: complete.PredictAnything, + Usage: "Path to a file on disk containing a binary or base64-encoded " + + "public PGP key. This can also be specified as a Keybase username " + + "using the format \"keybase:\". When supplied, the generated " + + "root token will be encrypted and base64-encoded with the given public " + + "key.", + }) + + f.StringVar(&StringVar{ + Name: "nonce", + Target: &c.flagNonce, + Default: "", + EnvVar: "", + Completion: complete.PredictAnything, + Usage: "Nonce value provided at initialization. The same nonce value " + + "must be provided with each unseal key.", + }) + + return set +} + +func (c *OperatorGenerateRootCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *OperatorGenerateRootCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *OperatorGenerateRootCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + if len(args) > 1 { + c.UI.Error(fmt.Sprintf("Too many arguments (expected 0-1, got %d)", len(args))) + return 1 + } + + if c.flagDRToken && c.flagRecoveryToken { + c.UI.Error("Both -recovery-token and -dr-token flags are set") + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + kind := generateRootRegular + switch { + case c.flagDRToken: + kind = generateRootDR + case c.flagRecoveryToken: + kind = generateRootRecovery + } + + switch { + case c.flagGenerateOTP: + otp, code := c.generateOTP(client, kind) + if code == 0 { + switch Format(c.UI) { + case "", "table": + return PrintRaw(c.UI, otp) + default: + status := map[string]interface{}{ + "otp": otp, + "otp_length": len(otp), + } + return OutputData(c.UI, status) + } + } + return code + case c.flagDecode != "": + return c.decode(client, c.flagDecode, c.flagOTP, kind) + case c.flagCancel: + return c.cancel(client, kind) + case c.flagInit: + return c.init(client, c.flagOTP, c.flagPGPKey, kind) + case c.flagStatus: + return c.status(client, kind) + default: + // If there are no other flags, prompt for an unseal key. + key := "" + if len(args) > 0 { + key = strings.TrimSpace(args[0]) + } + return c.provide(client, key, kind) + } +} + +// generateOTP generates a suitable OTP code for generating a root token. +func (c *OperatorGenerateRootCommand) generateOTP(client *api.Client, kind generateRootKind) (string, int) { + f := client.Sys().GenerateRootStatus + switch kind { + case generateRootDR: + f = client.Sys().GenerateDROperationTokenStatus + case generateRootRecovery: + f = client.Sys().GenerateRecoveryOperationTokenStatus + } + + status, err := f() + if err != nil { + c.UI.Error(fmt.Sprintf("Error getting root generation status: %s", err)) + return "", 2 + } + + otp, err := roottoken.GenerateOTP(status.OTPLength) + var retCode int + if err != nil { + retCode = 2 + c.UI.Error(err.Error()) + } else { + retCode = 0 + } + return otp, retCode +} + +// decode decodes the given value using the otp. +func (c *OperatorGenerateRootCommand) decode(client *api.Client, encoded, otp string, kind generateRootKind) int { + if encoded == "" { + c.UI.Error("Missing encoded value: use -decode= to supply it") + return 1 + } + if otp == "" { + c.UI.Error("Missing otp: use -otp to supply it") + return 1 + } + + if encoded == "-" { + // Pull our fake stdin if needed + stdin := (io.Reader)(os.Stdin) + if c.testStdin != nil { + stdin = c.testStdin + } + + var buf bytes.Buffer + if _, err := io.Copy(&buf, stdin); err != nil { + c.UI.Error(fmt.Sprintf("Failed to read from stdin: %s", err)) + return 1 + } + + encoded = buf.String() + + if encoded == "" { + c.UI.Error("Missing encoded value. When using -decode=\"-\" value must be passed via stdin.") + return 1 + } + } + + f := client.Sys().GenerateRootStatus + switch kind { + case generateRootDR: + f = client.Sys().GenerateDROperationTokenStatus + case generateRootRecovery: + f = client.Sys().GenerateRecoveryOperationTokenStatus + } + + status, err := f() + if err != nil { + c.UI.Error(fmt.Sprintf("Error getting root generation status: %s", err)) + return 2 + } + + token, err := roottoken.DecodeToken(encoded, otp, status.OTPLength) + if err != nil { + c.UI.Error(fmt.Sprintf("Error decoding root token: %s", err)) + return 1 + } + + switch Format(c.UI) { + case "", "table": + return PrintRaw(c.UI, token) + default: + tokenJSON := map[string]interface{}{ + "token": token, + } + return OutputData(c.UI, tokenJSON) + } +} + +// init is used to start the generation process +func (c *OperatorGenerateRootCommand) init(client *api.Client, otp, pgpKey string, kind generateRootKind) int { + // Validate incoming fields. Either OTP OR PGP keys must be supplied. + if otp != "" && pgpKey != "" { + c.UI.Error("Error initializing: cannot specify both -otp and -pgp-key") + return 1 + } + + // Start the root generation + f := client.Sys().GenerateRootInit + switch kind { + case generateRootDR: + f = client.Sys().GenerateDROperationTokenInit + case generateRootRecovery: + f = client.Sys().GenerateRecoveryOperationTokenInit + } + status, err := f(otp, pgpKey) + if err != nil { + c.UI.Error(fmt.Sprintf("Error initializing root generation: %s", err)) + return 2 + } + + switch Format(c.UI) { + case "table": + return c.printStatus(status) + default: + return OutputData(c.UI, status) + } +} + +// provide prompts the user for the seal key and posts it to the update root +// endpoint. If this is the last unseal, this function outputs it. +func (c *OperatorGenerateRootCommand) provide(client *api.Client, key string, kind generateRootKind) int { + f := client.Sys().GenerateRootStatus + switch kind { + case generateRootDR: + f = client.Sys().GenerateDROperationTokenStatus + case generateRootRecovery: + f = client.Sys().GenerateRecoveryOperationTokenStatus + } + status, err := f() + if err != nil { + c.UI.Error(fmt.Sprintf("Error getting root generation status: %s", err)) + return 2 + } + + // Verify a root token generation is in progress. If there is not one in + // progress, return an error instructing the user to start one. + if !status.Started { + c.UI.Error(wrapAtLength( + "No root generation is in progress. Start a root generation by " + + "running \"vault operator generate-root -init\".")) + c.UI.Warn(wrapAtLength(fmt.Sprintf( + "If starting root generation using the OTP method and generating "+ + "your own OTP, the length of the OTP string needs to be %d "+ + "characters in length.", status.OTPLength))) + return 1 + } + + var nonce string + + switch key { + case "-": // Read from stdin + nonce = c.flagNonce + + // Pull our fake stdin if needed + stdin := (io.Reader)(os.Stdin) + if c.testStdin != nil { + stdin = c.testStdin + } + + var buf bytes.Buffer + if _, err := io.Copy(&buf, stdin); err != nil { + c.UI.Error(fmt.Sprintf("Failed to read from stdin: %s", err)) + return 1 + } + + key = buf.String() + case "": // Prompt using the tty + // Nonce value is not required if we are prompting via the terminal + nonce = status.Nonce + + w := getWriterFromUI(c.UI) + fmt.Fprintf(w, "Operation nonce: %s\n", nonce) + fmt.Fprintf(w, "Unseal Key (will be hidden): ") + key, err = password.Read(os.Stdin) + fmt.Fprintf(w, "\n") + if err != nil { + if err == password.ErrInterrupted { + c.UI.Error("user canceled") + return 1 + } + + c.UI.Error(wrapAtLength(fmt.Sprintf("An error occurred attempting to "+ + "ask for the unseal key. The raw error message is shown below, but "+ + "usually this is because you attempted to pipe a value into the "+ + "command or you are executing outside of a terminal (tty). If you "+ + "want to pipe the value, pass \"-\" as the argument to read from "+ + "stdin. The raw error was: %s", err))) + return 1 + } + default: // Supplied directly as an arg + nonce = c.flagNonce + } + + // Trim any whitespace from they key, especially since we might have prompted + // the user for it. + key = strings.TrimSpace(key) + + // Verify we have a nonce value + if nonce == "" { + c.UI.Error("Missing nonce value: specify it via the -nonce flag") + return 1 + } + + // Provide the key, this may potentially complete the update + fUpd := client.Sys().GenerateRootUpdate + switch kind { + case generateRootDR: + fUpd = client.Sys().GenerateDROperationTokenUpdate + case generateRootRecovery: + fUpd = client.Sys().GenerateRecoveryOperationTokenUpdate + } + status, err = fUpd(key, nonce) + if err != nil { + c.UI.Error(fmt.Sprintf("Error posting unseal key: %s", err)) + return 2 + } + switch Format(c.UI) { + case "table": + return c.printStatus(status) + default: + return OutputData(c.UI, status) + } +} + +// cancel cancels the root token generation +func (c *OperatorGenerateRootCommand) cancel(client *api.Client, kind generateRootKind) int { + f := client.Sys().GenerateRootCancel + switch kind { + case generateRootDR: + f = client.Sys().GenerateDROperationTokenCancel + case generateRootRecovery: + f = client.Sys().GenerateRecoveryOperationTokenCancel + } + if err := f(); err != nil { + c.UI.Error(fmt.Sprintf("Error canceling root token generation: %s", err)) + return 2 + } + c.UI.Output("Success! Root token generation canceled (if it was started)") + return 0 +} + +// status is used just to fetch and dump the status +func (c *OperatorGenerateRootCommand) status(client *api.Client, kind generateRootKind) int { + f := client.Sys().GenerateRootStatus + switch kind { + case generateRootDR: + f = client.Sys().GenerateDROperationTokenStatus + case generateRootRecovery: + f = client.Sys().GenerateRecoveryOperationTokenStatus + } + + status, err := f() + if err != nil { + c.UI.Error(fmt.Sprintf("Error getting root generation status: %s", err)) + return 2 + } + switch Format(c.UI) { + case "table": + return c.printStatus(status) + default: + return OutputData(c.UI, status) + } +} + +// printStatus dumps the status to output +func (c *OperatorGenerateRootCommand) printStatus(status *api.GenerateRootStatusResponse) int { + out := []string{} + out = append(out, fmt.Sprintf("Nonce | %s", status.Nonce)) + out = append(out, fmt.Sprintf("Started | %t", status.Started)) + out = append(out, fmt.Sprintf("Progress | %d/%d", status.Progress, status.Required)) + out = append(out, fmt.Sprintf("Complete | %t", status.Complete)) + if status.PGPFingerprint != "" { + out = append(out, fmt.Sprintf("PGP Fingerprint | %s", status.PGPFingerprint)) + } + switch { + case status.EncodedToken != "": + out = append(out, fmt.Sprintf("Encoded Token | %s", status.EncodedToken)) + case status.EncodedRootToken != "": + out = append(out, fmt.Sprintf("Encoded Root Token | %s", status.EncodedRootToken)) + } + if status.OTP != "" { + c.UI.Warn(wrapAtLength("A One-Time-Password has been generated for you and is shown in the OTP field. You will need this value to decode the resulting root token, so keep it safe.")) + out = append(out, fmt.Sprintf("OTP | %s", status.OTP)) + } + if status.OTPLength != 0 { + out = append(out, fmt.Sprintf("OTP Length | %d", status.OTPLength)) + } + + output := columnOutput(out, nil) + c.UI.Output(output) + return 0 +} diff --git a/command/operator_generate_root_test.go b/command/operator_generate_root_test.go new file mode 100644 index 0000000..1436ab2 --- /dev/null +++ b/command/operator_generate_root_test.go @@ -0,0 +1,561 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !race + +package command + +import ( + "encoding/base64" + "io" + "os" + "regexp" + "strings" + "testing" + + "github.com/hashicorp/vault/sdk/helper/xor" + "github.com/hashicorp/vault/vault" + "github.com/mitchellh/cli" +) + +func testOperatorGenerateRootCommand(tb testing.TB) (*cli.MockUi, *OperatorGenerateRootCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &OperatorGenerateRootCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestOperatorGenerateRootCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "init_invalid_otp", + []string{ + "-init", + "-otp", "not-a-valid-otp", + }, + "OTP string is wrong length", + 2, + }, + { + "init_pgp_multi", + []string{ + "-init", + "-pgp-key", "keybase:hashicorp", + "-pgp-key", "keybase:jefferai", + }, + "can only be specified once", + 1, + }, + { + "init_pgp_multi_inline", + []string{ + "-init", + "-pgp-key", "keybase:hashicorp,keybase:jefferai", + }, + "can only specify one pgp key", + 1, + }, + { + "init_pgp_otp", + []string{ + "-init", + "-pgp-key", "keybase:hashicorp", + "-otp", "abcd1234", + }, + "cannot specify both -otp and -pgp-key", + 1, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testOperatorGenerateRootCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("%s: expected %d to be %d", tc.name, code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("%s: expected %q to contain %q", tc.name, combined, tc.out) + } + }) + } + }) + + t.Run("generate_otp", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + _, cmd := testOperatorGenerateRootCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-generate-otp", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + }) + + t.Run("decode", func(t *testing.T) { + t.Parallel() + + encoded := "Bxg9JQQqOCNKBRICNwMIRzo2J3cWCBRi" + otp := "3JhHkONiyiaNYj14nnD9xZQS" + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testOperatorGenerateRootCommand(t) + cmd.client = client + + // Simulate piped output to print raw output + old := os.Stdout + _, w, err := os.Pipe() + if err != nil { + t.Fatal(err) + } + os.Stdout = w + + code := cmd.Run([]string{ + "-decode", encoded, + "-otp", otp, + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + w.Close() + os.Stdout = old + + expected := "4RUmoevJ3lsLni9sTXcNnRE1" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if combined != expected { + t.Errorf("expected %q to be %q", combined, expected) + } + }) + + t.Run("decode_from_stdin", func(t *testing.T) { + t.Parallel() + + encoded := "Bxg9JQQqOCNKBRICNwMIRzo2J3cWCBRi" + otp := "3JhHkONiyiaNYj14nnD9xZQS" + + client, closer := testVaultServer(t) + defer closer() + + stdinR, stdinW := io.Pipe() + go func() { + stdinW.Write([]byte(encoded)) + stdinW.Close() + }() + + ui, cmd := testOperatorGenerateRootCommand(t) + cmd.client = client + cmd.testStdin = stdinR + + // Simulate piped output to print raw output + old := os.Stdout + _, w, err := os.Pipe() + if err != nil { + t.Fatal(err) + } + os.Stdout = w + + code := cmd.Run([]string{ + "-decode", "-", // read from stdin + "-otp", otp, + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + w.Close() + os.Stdout = old + + expected := "4RUmoevJ3lsLni9sTXcNnRE1" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if combined != expected { + t.Errorf("expected %q to be %q", combined, expected) + } + }) + + t.Run("decode_from_stdin_empty", func(t *testing.T) { + t.Parallel() + + encoded := "" + otp := "3JhHkONiyiaNYj14nnD9xZQS" + + client, closer := testVaultServer(t) + defer closer() + + stdinR, stdinW := io.Pipe() + go func() { + stdinW.Write([]byte(encoded)) + stdinW.Close() + }() + + ui, cmd := testOperatorGenerateRootCommand(t) + cmd.client = client + cmd.testStdin = stdinR + + // Simulate piped output to print raw output + old := os.Stdout + _, w, err := os.Pipe() + if err != nil { + t.Fatal(err) + } + os.Stdout = w + + code := cmd.Run([]string{ + "-decode", "-", // read from stdin + "-otp", otp, + }) + if exp := 1; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + w.Close() + os.Stdout = old + + expected := "Missing encoded value" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("cancel", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + // Initialize a generation + if _, err := client.Sys().GenerateRootInit("", ""); err != nil { + t.Fatal(err) + } + + ui, cmd := testOperatorGenerateRootCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-cancel", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! Root token generation canceled" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + status, err := client.Sys().GenerateRootStatus() + if err != nil { + t.Fatal(err) + } + + if status.Started { + t.Errorf("expected status to be canceled: %#v", status) + } + }) + + t.Run("init_otp", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testOperatorGenerateRootCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-init", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Nonce" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + status, err := client.Sys().GenerateRootStatus() + if err != nil { + t.Fatal(err) + } + + if !status.Started { + t.Errorf("expected status to be started: %#v", status) + } + }) + + t.Run("init_pgp", func(t *testing.T) { + t.Parallel() + + pgpKey := "keybase:hashicorp" + pgpFingerprint := "c874011f0ab405110d02105534365d9472d7468f" + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testOperatorGenerateRootCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-init", + "-pgp-key", pgpKey, + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Nonce" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + status, err := client.Sys().GenerateRootStatus() + if err != nil { + t.Fatal(err) + } + + if !status.Started { + t.Errorf("expected status to be started: %#v", status) + } + if status.PGPFingerprint != pgpFingerprint { + t.Errorf("expected %q to be %q", status.PGPFingerprint, pgpFingerprint) + } + }) + + t.Run("status", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testOperatorGenerateRootCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-status", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Nonce" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("provide_arg", func(t *testing.T) { + t.Parallel() + + client, keys, closer := testVaultServerUnseal(t) + defer closer() + + // Initialize a generation + status, err := client.Sys().GenerateRootInit("", "") + if err != nil { + t.Fatal(err) + } + nonce := status.Nonce + otp := status.OTP + + // Supply the first n-1 unseal keys + for _, key := range keys[:len(keys)-1] { + _, cmd := testOperatorGenerateRootCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-nonce", nonce, + key, + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + } + + ui, cmd := testOperatorGenerateRootCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-nonce", nonce, + keys[len(keys)-1], // the last unseal key + }) + if exp := 0; code != exp { + t.Fatalf("expected %d to be %d, out=%q, err=%q", code, exp, ui.OutputWriter, ui.ErrorWriter) + } + + reToken := regexp.MustCompile(`Encoded Token\s+(.+)`) + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + match := reToken.FindAllStringSubmatch(combined, -1) + if len(match) < 1 || len(match[0]) < 2 { + t.Fatalf("no match: %#v", match) + } + + tokenBytes, err := base64.RawStdEncoding.DecodeString(match[0][1]) + if err != nil { + t.Fatal(err) + } + + token, err := xor.XORBytes(tokenBytes, []byte(otp)) + if err != nil { + t.Fatal(err) + } + + if l, exp := len(token), vault.TokenLength+vault.TokenPrefixLength; l != exp { + t.Errorf("expected %d to be %d: %s", l, exp, token) + } + }) + + t.Run("provide_stdin", func(t *testing.T) { + t.Parallel() + + client, keys, closer := testVaultServerUnseal(t) + defer closer() + + // Initialize a generation + status, err := client.Sys().GenerateRootInit("", "") + if err != nil { + t.Fatal(err) + } + nonce := status.Nonce + otp := status.OTP + + // Supply the first n-1 unseal keys + for _, key := range keys[:len(keys)-1] { + stdinR, stdinW := io.Pipe() + go func() { + stdinW.Write([]byte(key)) + stdinW.Close() + }() + + _, cmd := testOperatorGenerateRootCommand(t) + cmd.client = client + cmd.testStdin = stdinR + + code := cmd.Run([]string{ + "-nonce", nonce, + "-", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + } + + stdinR, stdinW := io.Pipe() + go func() { + stdinW.Write([]byte(keys[len(keys)-1])) // the last unseal key + stdinW.Close() + }() + + ui, cmd := testOperatorGenerateRootCommand(t) + cmd.client = client + cmd.testStdin = stdinR + + code := cmd.Run([]string{ + "-nonce", nonce, + "-", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + reToken := regexp.MustCompile(`Encoded Token\s+(.+)`) + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + match := reToken.FindAllStringSubmatch(combined, -1) + if len(match) < 1 || len(match[0]) < 2 { + t.Fatalf("no match: %#v", match) + } + + // encodedOTP := base64.RawStdEncoding.EncodeToString([]byte(otp)) + + // tokenBytes, err := xor.XORBase64(match[0][1], encodedOTP) + // if err != nil { + // t.Fatal(err) + // } + // token, err := uuid.FormatUUID(tokenBytes) + // if err != nil { + // t.Fatal(err) + // } + + tokenBytes, err := base64.RawStdEncoding.DecodeString(match[0][1]) + if err != nil { + t.Fatal(err) + } + + token, err := xor.XORBytes(tokenBytes, []byte(otp)) + if err != nil { + t.Fatal(err) + } + + if l, exp := len(token), vault.TokenLength+vault.TokenPrefixLength; l != exp { + t.Errorf("expected %d to be %d: %s", l, exp, token) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testOperatorGenerateRootCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "secret/foo", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error getting root generation status: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testOperatorGenerateRootCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/operator_init.go b/command/operator_init.go new file mode 100644 index 0000000..080f585 --- /dev/null +++ b/command/operator_init.go @@ -0,0 +1,595 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "net/url" + "runtime" + "strings" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/helper/pgpkeys" + "github.com/mitchellh/cli" + "github.com/posener/complete" + + consulapi "github.com/hashicorp/consul/api" +) + +var ( + _ cli.Command = (*OperatorInitCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorInitCommand)(nil) +) + +type OperatorInitCommand struct { + *BaseCommand + + flagStatus bool + flagKeyShares int + flagKeyThreshold int + flagPGPKeys []string + flagRootTokenPGPKey string + + // Auto Unseal + flagRecoveryShares int + flagRecoveryThreshold int + flagRecoveryPGPKeys []string + flagStoredShares int + + // Consul + flagConsulAuto bool + flagConsulService string +} + +const ( + defKeyShares = 5 + defKeyThreshold = 3 + defRecoveryShares = 5 + defRecoveryThreshold = 3 +) + +func (c *OperatorInitCommand) Synopsis() string { + return "Initializes a server" +} + +func (c *OperatorInitCommand) Help() string { + helpText := ` +Usage: vault operator init [options] + + Initializes a Vault server. Initialization is the process by which Vault's + storage backend is prepared to receive data. Since Vault servers share the + same storage backend in HA mode, you only need to initialize one Vault to + initialize the storage backend. + + During initialization, Vault generates an in-memory root key and applies + Shamir's secret sharing algorithm to disassemble that root key into a + configuration number of key shares such that a configurable subset of those + key shares must come together to regenerate the root key. These keys are + often called "unseal keys" in Vault's documentation. + + This command cannot be run against an already-initialized Vault cluster. + + Start initialization with the default options: + + $ vault operator init + + Initialize, but encrypt the unseal keys with pgp keys: + + $ vault operator init \ + -key-shares=3 \ + -key-threshold=2 \ + -pgp-keys="keybase:hashicorp,keybase:jefferai,keybase:sethvargo" + + Encrypt the initial root token using a pgp key: + + $ vault operator init -root-token-pgp-key="keybase:hashicorp" + +` + c.Flags().Help() + return strings.TrimSpace(helpText) +} + +func (c *OperatorInitCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + // Common Options + f := set.NewFlagSet("Common Options") + + f.BoolVar(&BoolVar{ + Name: "status", + Target: &c.flagStatus, + Default: false, + Usage: "Print the current initialization status. An exit code of 0 means " + + "the Vault is already initialized. An exit code of 1 means an error " + + "occurred. An exit code of 2 means the Vault is not initialized.", + }) + + f.IntVar(&IntVar{ + Name: "key-shares", + Aliases: []string{"n"}, + Target: &c.flagKeyShares, + Completion: complete.PredictAnything, + Usage: "Number of key shares to split the generated root key into. " + + "This is the number of \"unseal keys\" to generate.", + }) + + f.IntVar(&IntVar{ + Name: "key-threshold", + Aliases: []string{"t"}, + Target: &c.flagKeyThreshold, + Completion: complete.PredictAnything, + Usage: "Number of key shares required to reconstruct the root key. " + + "This must be less than or equal to -key-shares.", + }) + + f.VarFlag(&VarFlag{ + Name: "pgp-keys", + Value: (*pgpkeys.PubKeyFilesFlag)(&c.flagPGPKeys), + Completion: complete.PredictAnything, + Usage: "Comma-separated list of paths to files on disk containing " + + "public PGP keys OR a comma-separated list of Keybase usernames using " + + "the format \"keybase:\". When supplied, the generated " + + "unseal keys will be encrypted and base64-encoded in the order " + + "specified in this list. The number of entries must match -key-shares, " + + "unless -stored-shares are used.", + }) + + f.VarFlag(&VarFlag{ + Name: "root-token-pgp-key", + Value: (*pgpkeys.PubKeyFileFlag)(&c.flagRootTokenPGPKey), + Completion: complete.PredictAnything, + Usage: "Path to a file on disk containing a binary or base64-encoded " + + "public PGP key. This can also be specified as a Keybase username " + + "using the format \"keybase:\". When supplied, the generated " + + "root token will be encrypted and base64-encoded with the given public " + + "key.", + }) + + f.IntVar(&IntVar{ + Name: "stored-shares", + Target: &c.flagStoredShares, + Default: -1, + Usage: "DEPRECATED: This flag does nothing. It will be removed in Vault 1.3.", + }) + + // Consul Options + f = set.NewFlagSet("Consul Options") + + f.BoolVar(&BoolVar{ + Name: "consul-auto", + Target: &c.flagConsulAuto, + Default: false, + Usage: "Perform automatic service discovery using Consul in HA mode. " + + "When all nodes in a Vault HA cluster are registered with Consul, " + + "enabling this option will trigger automatic service discovery based " + + "on the provided -consul-service value. When Consul is Vault's HA " + + "backend, this functionality is automatically enabled. Ensure the " + + "proper Consul environment variables are set (CONSUL_HTTP_ADDR, etc). " + + "When only one Vault server is discovered, it will be initialized " + + "automatically. When more than one Vault server is discovered, they " + + "will each be output for selection.", + }) + + f.StringVar(&StringVar{ + Name: "consul-service", + Target: &c.flagConsulService, + Default: "vault", + Completion: complete.PredictAnything, + Usage: "Name of the service in Consul under which the Vault servers are " + + "registered.", + }) + + // Auto Unseal Options + f = set.NewFlagSet("Auto Unseal Options") + + f.IntVar(&IntVar{ + Name: "recovery-shares", + Target: &c.flagRecoveryShares, + Completion: complete.PredictAnything, + Usage: "Number of key shares to split the recovery key into. " + + "This is only used in auto-unseal mode.", + }) + + f.IntVar(&IntVar{ + Name: "recovery-threshold", + Target: &c.flagRecoveryThreshold, + Completion: complete.PredictAnything, + Usage: "Number of key shares required to reconstruct the recovery key. " + + "This is only used in Auto Unseal mode.", + }) + + f.VarFlag(&VarFlag{ + Name: "recovery-pgp-keys", + Value: (*pgpkeys.PubKeyFilesFlag)(&c.flagRecoveryPGPKeys), + Completion: complete.PredictAnything, + Usage: "Behaves like -pgp-keys, but for the recovery key shares. This " + + "is only used in Auto Unseal mode.", + }) + + return set +} + +func (c *OperatorInitCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *OperatorInitCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *OperatorInitCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + if len(args) > 0 { + c.UI.Error(fmt.Sprintf("Too many arguments (expected 0, got %d)", len(args))) + return 1 + } + + if c.flagStoredShares != -1 { + c.UI.Warn("-stored-shares has no effect and will be removed in Vault 1.3.\n") + } + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + // -output-curl string returns curl command for seal status + // setting this to false and then setting actual value after reading seal status + currentOutputCurlString := client.OutputCurlString() + client.SetOutputCurlString(false) + // -output-policy string returns minimum required policy HCL for seal status + // setting this to false and then setting actual value after reading seal status + outputPolicy := client.OutputPolicy() + client.SetOutputPolicy(false) + + // Set defaults based on use of auto unseal seal + sealInfo, err := client.Sys().SealStatus() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + client.SetOutputCurlString(currentOutputCurlString) + client.SetOutputPolicy(outputPolicy) + + switch sealInfo.RecoverySeal { + case true: + if c.flagRecoveryShares == 0 { + c.flagRecoveryShares = defRecoveryShares + } + if c.flagRecoveryThreshold == 0 { + c.flagRecoveryThreshold = defRecoveryThreshold + } + default: + if c.flagKeyShares == 0 { + c.flagKeyShares = defKeyShares + } + if c.flagKeyThreshold == 0 { + c.flagKeyThreshold = defKeyThreshold + } + } + + // Build the initial init request + initReq := &api.InitRequest{ + SecretShares: c.flagKeyShares, + SecretThreshold: c.flagKeyThreshold, + PGPKeys: c.flagPGPKeys, + RootTokenPGPKey: c.flagRootTokenPGPKey, + + RecoveryShares: c.flagRecoveryShares, + RecoveryThreshold: c.flagRecoveryThreshold, + RecoveryPGPKeys: c.flagRecoveryPGPKeys, + } + + // Check auto mode + switch { + case c.flagStatus: + return c.status(client) + case c.flagConsulAuto: + return c.consulAuto(client, initReq) + default: + return c.init(client, initReq) + } +} + +// consulAuto enables auto-joining via Consul. +func (c *OperatorInitCommand) consulAuto(client *api.Client, req *api.InitRequest) int { + // Capture the client original address and reset it + originalAddr := client.Address() + defer client.SetAddress(originalAddr) + + // Create a client to communicate with Consul + consulClient, err := consulapi.NewClient(consulapi.DefaultConfig()) + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to create Consul client:%v", err)) + return 1 + } + + // Pull the scheme from the Vault client to determine if the Consul agent + // should talk via HTTP or HTTPS. + addr := client.Address() + clientURL, err := url.Parse(addr) + if err != nil || clientURL == nil { + c.UI.Error(fmt.Sprintf("Failed to parse Vault address %s: %s", addr, err)) + return 1 + } + + var uninitedVaults []string + var initedVault string + + // Query the nodes belonging to the cluster + services, _, err := consulClient.Catalog().Service(c.flagConsulService, "", &consulapi.QueryOptions{ + AllowStale: true, + }) + if err == nil { + for _, service := range services { + // Set the address on the client temporarily + vaultAddr := (&url.URL{ + Scheme: clientURL.Scheme, + Host: fmt.Sprintf("%s:%d", service.ServiceAddress, service.ServicePort), + }).String() + client.SetAddress(vaultAddr) + + // Check the initialization status of the discovered node + inited, err := client.Sys().InitStatus() + if err != nil { + c.UI.Error(fmt.Sprintf("Error checking init status of %q: %s", vaultAddr, err)) + } + if inited { + initedVault = vaultAddr + break + } + + // If we got this far, we communicated successfully with Vault, but it + // was not initialized. + uninitedVaults = append(uninitedVaults, vaultAddr) + } + } + + // Get the correct export keywords and quotes for *nix vs Windows + export := "export" + quote := "\"" + if runtime.GOOS == "windows" { + export = "set" + quote = "" + } + + if initedVault != "" { + vaultURL, err := url.Parse(initedVault) + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to parse Vault address %q: %s", initedVault, err)) + return 2 + } + vaultAddr := vaultURL.String() + + c.UI.Output(wrapAtLength(fmt.Sprintf( + "Discovered an initialized Vault node at %q with Consul service name "+ + "%q. Set the following environment variable to target the discovered "+ + "Vault server:", + vaultURL.String(), c.flagConsulService))) + c.UI.Output("") + c.UI.Output(fmt.Sprintf(" $ %s VAULT_ADDR=%s%s%s", export, quote, vaultAddr, quote)) + c.UI.Output("") + return 0 + } + + switch len(uninitedVaults) { + case 0: + c.UI.Error(fmt.Sprintf("No Vault nodes registered as %q in Consul", c.flagConsulService)) + return 2 + case 1: + // There was only one node found in the Vault cluster and it was + // uninitialized. + vaultURL, err := url.Parse(uninitedVaults[0]) + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to parse Vault address %q: %s", initedVault, err)) + return 2 + } + vaultAddr := vaultURL.String() + + // Update the client to connect to this Vault server + client.SetAddress(vaultAddr) + + // Let the client know that initialization is performed on the + // discovered node. + c.UI.Output(wrapAtLength(fmt.Sprintf( + "Discovered an initialized Vault node at %q with Consul service name "+ + "%q. Set the following environment variable to target the discovered "+ + "Vault server:", + vaultURL.String(), c.flagConsulService))) + c.UI.Output("") + c.UI.Output(fmt.Sprintf(" $ %s VAULT_ADDR=%s%s%s", export, quote, vaultAddr, quote)) + c.UI.Output("") + c.UI.Output("Attempting to initialize it...") + c.UI.Output("") + + // Attempt to initialize it + return c.init(client, req) + default: + // If more than one Vault node were discovered, print out all of them, + // requiring the client to update VAULT_ADDR and to run init again. + c.UI.Output(wrapAtLength(fmt.Sprintf( + "Discovered %d uninitialized Vault servers with Consul service name "+ + "%q. To initialize these Vaults, set any one of the following "+ + "environment variables and run \"vault operator init\":", + len(uninitedVaults), c.flagConsulService))) + c.UI.Output("") + + // Print valid commands to make setting the variables easier + for _, node := range uninitedVaults { + vaultURL, err := url.Parse(node) + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to parse Vault address %q: %s", initedVault, err)) + return 2 + } + vaultAddr := vaultURL.String() + + c.UI.Output(fmt.Sprintf(" $ %s VAULT_ADDR=%s%s%s", export, quote, vaultAddr, quote)) + } + + c.UI.Output("") + return 0 + } +} + +func (c *OperatorInitCommand) init(client *api.Client, req *api.InitRequest) int { + resp, err := client.Sys().Init(req) + if err != nil { + c.UI.Error(fmt.Sprintf("Error initializing: %s", err)) + return 2 + } + + switch Format(c.UI) { + case "table": + default: + return OutputData(c.UI, newMachineInit(req, resp)) + } + + for i, key := range resp.Keys { + if resp.KeysB64 != nil && len(resp.KeysB64) == len(resp.Keys) { + c.UI.Output(fmt.Sprintf("Unseal Key %d: %s", i+1, resp.KeysB64[i])) + } else { + c.UI.Output(fmt.Sprintf("Unseal Key %d: %s", i+1, key)) + } + } + for i, key := range resp.RecoveryKeys { + if resp.RecoveryKeysB64 != nil && len(resp.RecoveryKeysB64) == len(resp.RecoveryKeys) { + c.UI.Output(fmt.Sprintf("Recovery Key %d: %s", i+1, resp.RecoveryKeysB64[i])) + } else { + c.UI.Output(fmt.Sprintf("Recovery Key %d: %s", i+1, key)) + } + } + + c.UI.Output("") + c.UI.Output(fmt.Sprintf("Initial Root Token: %s", resp.RootToken)) + + if len(resp.Keys) > 0 { + c.UI.Output("") + c.UI.Output(wrapAtLength(fmt.Sprintf( + "Vault initialized with %d key shares and a key threshold of %d. Please "+ + "securely distribute the key shares printed above. When the Vault is "+ + "re-sealed, restarted, or stopped, you must supply at least %d of "+ + "these keys to unseal it before it can start servicing requests.", + req.SecretShares, + req.SecretThreshold, + req.SecretThreshold))) + + c.UI.Output("") + c.UI.Output(wrapAtLength(fmt.Sprintf( + "Vault does not store the generated root key. Without at least %d "+ + "keys to reconstruct the root key, Vault will remain permanently "+ + "sealed!", + req.SecretThreshold))) + + c.UI.Output("") + c.UI.Output(wrapAtLength( + "It is possible to generate new unseal keys, provided you have a quorum " + + "of existing unseal keys shares. See \"vault operator rekey\" for " + + "more information.")) + } else { + c.UI.Output("") + c.UI.Output("Success! Vault is initialized") + } + + if len(resp.RecoveryKeys) > 0 { + c.UI.Output("") + c.UI.Output(wrapAtLength(fmt.Sprintf( + "Recovery key initialized with %d key shares and a key threshold of %d. "+ + "Please securely distribute the key shares printed above.", + req.RecoveryShares, + req.RecoveryThreshold))) + } + + return 0 +} + +// status inspects the init status of vault and returns an appropriate error +// code and message. +func (c *OperatorInitCommand) status(client *api.Client) int { + inited, err := client.Sys().InitStatus() + if err != nil { + c.UI.Error(fmt.Sprintf("Error checking init status: %s", err)) + return 1 // Normally we'd return 2, but 2 means something special here + } + + errorCode := 0 + + if !inited { + errorCode = 2 + } + + switch Format(c.UI) { + case "table": + if inited { + c.UI.Output("Vault is initialized") + } else { + c.UI.Output("Vault is not initialized") + } + default: + data := api.InitStatusResponse{Initialized: inited} + OutputData(c.UI, data) + } + + return errorCode +} + +// machineInit is used to output information about the init command. +type machineInit struct { + UnsealKeysB64 []string `json:"unseal_keys_b64"` + UnsealKeysHex []string `json:"unseal_keys_hex"` + UnsealShares int `json:"unseal_shares"` + UnsealThreshold int `json:"unseal_threshold"` + RecoveryKeysB64 []string `json:"recovery_keys_b64"` + RecoveryKeysHex []string `json:"recovery_keys_hex"` + RecoveryShares int `json:"recovery_keys_shares"` + RecoveryThreshold int `json:"recovery_keys_threshold"` + RootToken string `json:"root_token"` +} + +func newMachineInit(req *api.InitRequest, resp *api.InitResponse) *machineInit { + init := &machineInit{} + + init.UnsealKeysHex = make([]string, len(resp.Keys)) + for i, v := range resp.Keys { + init.UnsealKeysHex[i] = v + } + + init.UnsealKeysB64 = make([]string, len(resp.KeysB64)) + for i, v := range resp.KeysB64 { + init.UnsealKeysB64[i] = v + } + + // If we don't get a set of keys back, it means that we are storing the keys, + // so the key shares and threshold has been set to 1. + if len(resp.Keys) == 0 { + init.UnsealShares = 1 + init.UnsealThreshold = 1 + } else { + init.UnsealShares = req.SecretShares + init.UnsealThreshold = req.SecretThreshold + } + + init.RecoveryKeysHex = make([]string, len(resp.RecoveryKeys)) + for i, v := range resp.RecoveryKeys { + init.RecoveryKeysHex[i] = v + } + + init.RecoveryKeysB64 = make([]string, len(resp.RecoveryKeysB64)) + for i, v := range resp.RecoveryKeysB64 { + init.RecoveryKeysB64[i] = v + } + + init.RecoveryShares = req.RecoveryShares + init.RecoveryThreshold = req.RecoveryThreshold + + init.RootToken = resp.RootToken + + return init +} diff --git a/command/operator_init_test.go b/command/operator_init_test.go new file mode 100644 index 0000000..06647d7 --- /dev/null +++ b/command/operator_init_test.go @@ -0,0 +1,374 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !race + +package command + +import ( + "fmt" + "os" + "regexp" + "strconv" + "strings" + "testing" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/helper/pgpkeys" + "github.com/hashicorp/vault/vault" + "github.com/mitchellh/cli" +) + +func testOperatorInitCommand(tb testing.TB) (*cli.MockUi, *OperatorInitCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &OperatorInitCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestOperatorInitCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "too_many_args", + []string{"foo"}, + "Too many arguments", + 1, + }, + { + "pgp_keys_multi", + []string{ + "-pgp-keys", "keybase:hashicorp", + "-pgp-keys", "keybase:jefferai", + }, + "can only be specified once", + 1, + }, + { + "root_token_pgp_key_multi", + []string{ + "-root-token-pgp-key", "keybase:hashicorp", + "-root-token-pgp-key", "keybase:jefferai", + }, + "can only be specified once", + 1, + }, + { + "root_token_pgp_key_multi_inline", + []string{ + "-root-token-pgp-key", "keybase:hashicorp,keybase:jefferai", + }, + "can only specify one pgp key", + 1, + }, + { + "recovery_pgp_keys_multi", + []string{ + "-recovery-pgp-keys", "keybase:hashicorp", + "-recovery-pgp-keys", "keybase:jefferai", + }, + "can only be specified once", + 1, + }, + { + "key_shares_pgp_less", + []string{ + "-key-shares", "10", + "-pgp-keys", "keybase:jefferai,keybase:sethvargo", + }, + "incorrect number", + 2, + }, + { + "key_shares_pgp_more", + []string{ + "-key-shares", "1", + "-pgp-keys", "keybase:jefferai,keybase:sethvargo", + }, + "incorrect number", + 2, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testOperatorInitCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("status", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerUninit(t) + defer closer() + + ui, cmd := testOperatorInitCommand(t) + cmd.client = client + + // Verify the non-init response code + code := cmd.Run([]string{ + "-status", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d: %s", code, exp, ui.ErrorWriter.String()) + } + + // Now init to verify the init response code + if _, err := client.Sys().Init(&api.InitRequest{ + SecretShares: 1, + SecretThreshold: 1, + }); err != nil { + t.Fatal(err) + } + + // Verify the init response code + ui, cmd = testOperatorInitCommand(t) + cmd.client = client + code = cmd.Run([]string{ + "-status", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d: %s", code, exp, ui.ErrorWriter.String()) + } + }) + + t.Run("default", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerUninit(t) + defer closer() + + ui, cmd := testOperatorInitCommand(t) + cmd.client = client + + code := cmd.Run([]string{}) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d: %s", code, exp, ui.ErrorWriter.String()) + } + + init, err := client.Sys().InitStatus() + if err != nil { + t.Fatal(err) + } + if !init { + t.Error("expected initialized") + } + + re := regexp.MustCompile(`Unseal Key \d+: (.+)`) + output := ui.OutputWriter.String() + match := re.FindAllStringSubmatch(output, -1) + if len(match) < 5 || len(match[0]) < 2 { + t.Fatalf("no match: %#v", match) + } + + keys := make([]string, len(match)) + for i := range match { + keys[i] = match[i][1] + } + + // Try unsealing with those keys - only use 3, which is the default + // threshold. + for i, key := range keys[:3] { + resp, err := client.Sys().Unseal(key) + if err != nil { + t.Fatal(err) + } + + exp := (i + 1) % 3 // 1, 2, 0 + if resp.Progress != exp { + t.Errorf("expected %d to be %d", resp.Progress, exp) + } + } + + status, err := client.Sys().SealStatus() + if err != nil { + t.Fatal(err) + } + if status.Sealed { + t.Errorf("expected vault to be unsealed: %#v", status) + } + }) + + t.Run("custom_shares_threshold", func(t *testing.T) { + t.Parallel() + + keyShares, keyThreshold := 20, 15 + + client, closer := testVaultServerUninit(t) + defer closer() + + ui, cmd := testOperatorInitCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-key-shares", strconv.Itoa(keyShares), + "-key-threshold", strconv.Itoa(keyThreshold), + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d: %s", code, exp, ui.ErrorWriter.String()) + } + + init, err := client.Sys().InitStatus() + if err != nil { + t.Fatal(err) + } + if !init { + t.Error("expected initialized") + } + + re := regexp.MustCompile(`Unseal Key \d+: (.+)`) + output := ui.OutputWriter.String() + match := re.FindAllStringSubmatch(output, -1) + if len(match) < keyShares || len(match[0]) < 2 { + t.Fatalf("no match: %#v", match) + } + + keys := make([]string, len(match)) + for i := range match { + keys[i] = match[i][1] + } + + // Try unsealing with those keys - only use 3, which is the default + // threshold. + for i, key := range keys[:keyThreshold] { + resp, err := client.Sys().Unseal(key) + if err != nil { + t.Fatal(err) + } + + exp := (i + 1) % keyThreshold + if resp.Progress != exp { + t.Errorf("expected %d to be %d", resp.Progress, exp) + } + } + + status, err := client.Sys().SealStatus() + if err != nil { + t.Fatal(err) + } + if status.Sealed { + t.Errorf("expected vault to be unsealed: %#v", status) + } + }) + + t.Run("pgp", func(t *testing.T) { + t.Parallel() + + tempDir, pubFiles, err := getPubKeyFiles(t) + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempDir) + + client, closer := testVaultServerUninit(t) + defer closer() + + ui, cmd := testOperatorInitCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-key-shares", "4", + "-key-threshold", "2", + "-pgp-keys", fmt.Sprintf("%s,@%s, %s, %s ", + pubFiles[0], pubFiles[1], pubFiles[2], pubFiles[3]), + "-root-token-pgp-key", pubFiles[0], + }) + if exp := 0; code != exp { + t.Fatalf("expected %d to be %d: %s", code, exp, ui.ErrorWriter.String()) + } + + re := regexp.MustCompile(`Unseal Key \d+: (.+)`) + output := ui.OutputWriter.String() + match := re.FindAllStringSubmatch(output, -1) + if len(match) < 4 || len(match[0]) < 2 { + t.Fatalf("no match: %#v", match) + } + + keys := make([]string, len(match)) + for i := range match { + keys[i] = match[i][1] + } + + // Try unsealing with one key + decryptedKey := testPGPDecrypt(t, pgpkeys.TestPrivKey1, keys[0]) + if _, err := client.Sys().Unseal(decryptedKey); err != nil { + t.Fatal(err) + } + + // Decrypt the root token + reToken := regexp.MustCompile(`Root Token: (.+)`) + match = reToken.FindAllStringSubmatch(output, -1) + if len(match) < 1 || len(match[0]) < 2 { + t.Fatalf("no match") + } + root := match[0][1] + decryptedRoot := testPGPDecrypt(t, pgpkeys.TestPrivKey1, root) + + if l, exp := len(decryptedRoot), vault.TokenLength+vault.TokenPrefixLength; l != exp { + t.Errorf("expected %d to be %d", l, exp) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testOperatorInitCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-key-shares=1", + "-key-threshold=1", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error making API request" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testOperatorInitCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/operator_key_status.go b/command/operator_key_status.go new file mode 100644 index 0000000..412a00c --- /dev/null +++ b/command/operator_key_status.go @@ -0,0 +1,84 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*OperatorKeyStatusCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorKeyStatusCommand)(nil) +) + +type OperatorKeyStatusCommand struct { + *BaseCommand +} + +func (c *OperatorKeyStatusCommand) Synopsis() string { + return "Provides information about the active encryption key" +} + +func (c *OperatorKeyStatusCommand) Help() string { + helpText := ` +Usage: vault operator key-status [options] + + Provides information about the active encryption key. Specifically, + the current key term and the key installation time. + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *OperatorKeyStatusCommand) Flags() *FlagSets { + return c.flagSet(FlagSetHTTP | FlagSetOutputFormat) +} + +func (c *OperatorKeyStatusCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *OperatorKeyStatusCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *OperatorKeyStatusCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + if len(args) > 0 { + c.UI.Error(fmt.Sprintf("Too many arguments (expected 0, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + status, err := client.Sys().KeyStatus() + if err != nil { + c.UI.Error(fmt.Sprintf("Error reading key status: %s", err)) + return 2 + } + + switch Format(c.UI) { + case "table": + c.UI.Output(printKeyStatus(status)) + return 0 + default: + return OutputData(c.UI, status) + } +} diff --git a/command/operator_key_status_test.go b/command/operator_key_status_test.go new file mode 100644 index 0000000..9f8fbb0 --- /dev/null +++ b/command/operator_key_status_test.go @@ -0,0 +1,117 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "testing" + + "github.com/mitchellh/cli" +) + +func testOperatorKeyStatusCommand(tb testing.TB) (*cli.MockUi, *OperatorKeyStatusCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &OperatorKeyStatusCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestOperatorKeyStatusCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "too_many_args", + []string{"foo", "bar"}, + "Too many arguments", + 1, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testOperatorKeyStatusCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("integration", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testOperatorKeyStatusCommand(t) + cmd.client = client + + code := cmd.Run([]string{}) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Key Term" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testOperatorKeyStatusCommand(t) + cmd.client = client + + code := cmd.Run([]string{}) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error reading key status: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testOperatorKeyStatusCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/operator_members.go b/command/operator_members.go new file mode 100644 index 0000000..986313a --- /dev/null +++ b/command/operator_members.go @@ -0,0 +1,94 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + "time" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*OperatorMembersCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorMembersCommand)(nil) +) + +type OperatorMembersCommand struct { + *BaseCommand +} + +func (c *OperatorMembersCommand) Synopsis() string { + return "Returns the list of nodes in the cluster" +} + +func (c *OperatorMembersCommand) Help() string { + helpText := ` +Usage: vault operator members + + Provides the details of all the nodes in the cluster. + + $ vault operator members + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *OperatorMembersCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + return set +} + +func (c *OperatorMembersCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictAnything +} + +func (c *OperatorMembersCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *OperatorMembersCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + resp, err := client.Sys().HAStatus() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + switch Format(c.UI) { + case "table": + out := make([]string, 0) + cols := []string{"Host Name", "API Address", "Cluster Address", "Active Node", "Version", "Upgrade Version", "Redundancy Zone", "Last Echo"} + out = append(out, strings.Join(cols, " | ")) + for _, node := range resp.Nodes { + cols := []string{node.Hostname, node.APIAddress, node.ClusterAddress, fmt.Sprintf("%t", node.ActiveNode), node.Version, node.UpgradeVersion, node.RedundancyZone} + if node.LastEcho != nil { + cols = append(cols, node.LastEcho.Format(time.RFC3339)) + } else { + cols = append(cols, "") + } + out = append(out, strings.Join(cols, " | ")) + } + c.UI.Output(tableOutput(out, nil)) + return 0 + default: + return OutputData(c.UI, resp) + } +} diff --git a/command/operator_migrate.go b/command/operator_migrate.go new file mode 100644 index 0000000..01e8bbb --- /dev/null +++ b/command/operator_migrate.go @@ -0,0 +1,426 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "context" + "fmt" + "io/ioutil" + "math" + "net/url" + "os" + "sort" + "strings" + "time" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/vault/command/server" + "github.com/hashicorp/vault/physical/raft" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/physical" + "github.com/hashicorp/vault/vault" + "github.com/mitchellh/cli" + "github.com/pkg/errors" + "github.com/posener/complete" + "golang.org/x/sync/errgroup" +) + +var ( + _ cli.Command = (*OperatorMigrateCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorMigrateCommand)(nil) +) + +var errAbort = errors.New("Migration aborted") + +type OperatorMigrateCommand struct { + *BaseCommand + + PhysicalBackends map[string]physical.Factory + flagConfig string + flagLogLevel string + flagStart string + flagReset bool + flagMaxParallel int + logger log.Logger + ShutdownCh chan struct{} +} + +type migratorConfig struct { + StorageSource *server.Storage `hcl:"-"` + StorageDestination *server.Storage `hcl:"-"` + ClusterAddr string `hcl:"cluster_addr"` +} + +func (c *OperatorMigrateCommand) Synopsis() string { + return "Migrates Vault data between storage backends" +} + +func (c *OperatorMigrateCommand) Help() string { + helpText := ` +Usage: vault operator migrate [options] + + This command starts a storage backend migration process to copy all data + from one backend to another. This operates directly on encrypted data and + does not require a Vault server, nor any unsealing. + + Start a migration with a configuration file: + + $ vault operator migrate -config=migrate.hcl + + For more information, please see the documentation. + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *OperatorMigrateCommand) Flags() *FlagSets { + set := NewFlagSets(c.UI) + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "config", + Target: &c.flagConfig, + Completion: complete.PredictOr( + complete.PredictFiles("*.hcl"), + ), + Usage: "Path to a configuration file. This configuration file should " + + "contain only migrator directives.", + }) + + f.StringVar(&StringVar{ + Name: "start", + Target: &c.flagStart, + Usage: "Only copy keys lexicographically at or after this value.", + }) + + f.BoolVar(&BoolVar{ + Name: "reset", + Target: &c.flagReset, + Usage: "Reset the migration lock. No migration will occur.", + }) + + f.IntVar(&IntVar{ + Name: "max-parallel", + Default: 10, + Target: &c.flagMaxParallel, + Usage: "Specifies the maximum number of parallel migration threads (goroutines) that may be used when migrating. " + + "This can speed up the migration process on slow backends but uses more resources.", + }) + + f.StringVar(&StringVar{ + Name: "log-level", + Target: &c.flagLogLevel, + Default: "info", + EnvVar: "VAULT_LOG_LEVEL", + Completion: complete.PredictSet("trace", "debug", "info", "warn", "error"), + Usage: "Log verbosity level. Supported values (in order of detail) are " + + "\"trace\", \"debug\", \"info\", \"warn\", and \"error\". These are not case sensitive.", + }) + + return set +} + +func (c *OperatorMigrateCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *OperatorMigrateCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *OperatorMigrateCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + c.flagLogLevel = strings.ToLower(c.flagLogLevel) + validLevels := []string{"trace", "debug", "info", "warn", "error"} + if !strutil.StrListContains(validLevels, c.flagLogLevel) { + c.UI.Error(fmt.Sprintf("%s is an unknown log level. Valid log levels are: %s", c.flagLogLevel, validLevels)) + return 1 + } + c.logger = logging.NewVaultLogger(log.LevelFromString(c.flagLogLevel)) + + if c.flagMaxParallel < 1 { + c.UI.Error(fmt.Sprintf("Argument to flag -max-parallel must be between 1 and %d", math.MaxInt)) + return 1 + } + + if c.flagConfig == "" { + c.UI.Error("Must specify exactly one config path using -config") + return 1 + } + + config, err := c.loadMigratorConfig(c.flagConfig) + if err != nil { + c.UI.Error(fmt.Sprintf("Error loading configuration from %s: %s", c.flagConfig, err)) + return 1 + } + + if err := c.migrate(config); err != nil { + if err == errAbort { + return 0 + } + c.UI.Error(fmt.Sprintf("Error migrating: %s", err)) + return 2 + } + + if c.flagReset { + c.UI.Output("Success! Migration lock reset (if it was set).") + } else { + c.UI.Output("Success! All of the keys have been migrated.") + } + + return 0 +} + +// migrate attempts to instantiate the source and destinations backends, +// and then invoke the migration the root of the keyspace. +func (c *OperatorMigrateCommand) migrate(config *migratorConfig) error { + from, err := c.newBackend(config.StorageSource.Type, config.StorageSource.Config) + if err != nil { + return fmt.Errorf("error mounting 'storage_source': %w", err) + } + + if c.flagReset { + if err := SetStorageMigration(from, false); err != nil { + return fmt.Errorf("error resetting migration lock: %w", err) + } + return nil + } + + to, err := c.createDestinationBackend(config.StorageDestination.Type, config.StorageDestination.Config, config) + if err != nil { + return fmt.Errorf("error mounting 'storage_destination': %w", err) + } + + migrationStatus, err := CheckStorageMigration(from) + if err != nil { + return fmt.Errorf("error checking migration status: %w", err) + } + + if migrationStatus != nil { + return fmt.Errorf("storage migration in progress (started: %s)", migrationStatus.Start.Format(time.RFC3339)) + } + + switch config.StorageSource.Type { + case "raft": + // Raft storage cannot be written to when shutdown. Also the boltDB file + // already uses file locking to ensure two processes are not accessing + // it. + default: + if err := SetStorageMigration(from, true); err != nil { + return fmt.Errorf("error setting migration lock: %w", err) + } + + defer SetStorageMigration(from, false) + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + + doneCh := make(chan error) + go func() { + doneCh <- c.migrateAll(ctx, from, to, c.flagMaxParallel) + }() + + select { + case err := <-doneCh: + cancelFunc() + return err + case <-c.ShutdownCh: + c.UI.Output("==> Migration shutdown triggered\n") + cancelFunc() + <-doneCh + return errAbort + } +} + +// migrateAll copies all keys in lexicographic order. +func (c *OperatorMigrateCommand) migrateAll(ctx context.Context, from physical.Backend, to physical.Backend, maxParallel int) error { + return dfsScan(ctx, from, maxParallel, func(ctx context.Context, path string) error { + if path < c.flagStart || path == storageMigrationLock || path == vault.CoreLockPath { + return nil + } + + entry, err := from.Get(ctx, path) + if err != nil { + return fmt.Errorf("error reading entry: %w", err) + } + + if entry == nil { + return nil + } + + if err := to.Put(ctx, entry); err != nil { + return fmt.Errorf("error writing entry: %w", err) + } + c.logger.Info("copied key", "path", path) + return nil + }) +} + +func (c *OperatorMigrateCommand) newBackend(kind string, conf map[string]string) (physical.Backend, error) { + factory, ok := c.PhysicalBackends[kind] + if !ok { + return nil, fmt.Errorf("no Vault storage backend named: %+q", kind) + } + + return factory(conf, c.logger) +} + +func (c *OperatorMigrateCommand) createDestinationBackend(kind string, conf map[string]string, config *migratorConfig) (physical.Backend, error) { + storage, err := c.newBackend(kind, conf) + if err != nil { + return nil, err + } + + switch kind { + case "raft": + if len(config.ClusterAddr) == 0 { + return nil, errors.New("cluster_addr config not set") + } + + raftStorage, ok := storage.(*raft.RaftBackend) + if !ok { + return nil, errors.New("wrong storage type for raft backend") + } + + parsedClusterAddr, err := url.Parse(config.ClusterAddr) + if err != nil { + return nil, fmt.Errorf("error parsing cluster address: %w", err) + } + if err := raftStorage.Bootstrap([]raft.Peer{ + { + ID: raftStorage.NodeID(), + Address: parsedClusterAddr.Host, + }, + }); err != nil { + return nil, fmt.Errorf("could not bootstrap clustered storage: %w", err) + } + + if err := raftStorage.SetupCluster(context.Background(), raft.SetupOpts{ + StartAsLeader: true, + }); err != nil { + return nil, fmt.Errorf("could not start clustered storage: %w", err) + } + } + + return storage, nil +} + +// loadMigratorConfig loads the configuration at the given path +func (c *OperatorMigrateCommand) loadMigratorConfig(path string) (*migratorConfig, error) { + fi, err := os.Stat(path) + if err != nil { + return nil, err + } + + if fi.IsDir() { + return nil, fmt.Errorf("location is a directory, not a file") + } + + d, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + + obj, err := hcl.ParseBytes(d) + if err != nil { + return nil, err + } + + var result migratorConfig + if err := hcl.DecodeObject(&result, obj); err != nil { + return nil, err + } + + list, ok := obj.Node.(*ast.ObjectList) + if !ok { + return nil, fmt.Errorf("error parsing: file doesn't contain a root object") + } + + // Look for storage_* stanzas + for _, stanza := range []string{"storage_source", "storage_destination"} { + o := list.Filter(stanza) + if len(o.Items) != 1 { + return nil, fmt.Errorf("exactly one %q block is required", stanza) + } + + if err := parseStorage(&result, o, stanza); err != nil { + return nil, fmt.Errorf("error parsing %q: %w", stanza, err) + } + } + return &result, nil +} + +// parseStorage reuses the existing storage parsing that's part of the main Vault +// config processing, but only keeps the storage result. +func parseStorage(result *migratorConfig, list *ast.ObjectList, name string) error { + tmpConfig := new(server.Config) + + if err := server.ParseStorage(tmpConfig, list, name); err != nil { + return err + } + + switch name { + case "storage_source": + result.StorageSource = tmpConfig.Storage + case "storage_destination": + result.StorageDestination = tmpConfig.Storage + default: + return fmt.Errorf("unknown storage name: %s", name) + } + + return nil +} + +// dfsScan will invoke cb with every key from source. +// Keys will be traversed in lexicographic, depth-first order. +func dfsScan(ctx context.Context, source physical.Backend, maxParallel int, cb func(ctx context.Context, path string) error) error { + dfs := []string{""} + + eg, ctx := errgroup.WithContext(ctx) + eg.SetLimit(maxParallel) + + for l := len(dfs); l > 0; l = len(dfs) { + // Check for cancellation + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + key := dfs[len(dfs)-1] + if key == "" || strings.HasSuffix(key, "/") { + children, err := source.List(ctx, key) + if err != nil { + return fmt.Errorf("failed to scan for children: %w", err) + } + sort.Strings(children) + + // remove List-triggering key and add children in reverse order + dfs = dfs[:len(dfs)-1] + for i := len(children) - 1; i >= 0; i-- { + if children[i] != "" { + dfs = append(dfs, key+children[i]) + } + } + } else { + // Pooling + eg.Go(func() error { + return cb(ctx, key) + }) + + dfs = dfs[:len(dfs)-1] + } + } + + return eg.Wait() +} diff --git a/command/operator_migrate_test.go b/command/operator_migrate_test.go new file mode 100644 index 0000000..3d28430 --- /dev/null +++ b/command/operator_migrate_test.go @@ -0,0 +1,412 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bytes" + "context" + "fmt" + "math/rand" + "os" + "path/filepath" + "reflect" + "sort" + "strings" + "sync" + "testing" + "time" + + "github.com/go-test/deep" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-secure-stdlib/base62" + "github.com/hashicorp/vault/command/server" + "github.com/hashicorp/vault/sdk/physical" + "github.com/hashicorp/vault/vault" +) + +const trailing_slash_key = "trailing_slash/" + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +func TestMigration(t *testing.T) { + t.Run("Default", func(t *testing.T) { + data := generateData() + + fromFactory := physicalBackends["file"] + + folder := t.TempDir() + + confFrom := map[string]string{ + "path": folder, + } + + from, err := fromFactory(confFrom, nil) + if err != nil { + t.Fatal(err) + } + if err := storeData(from, data); err != nil { + t.Fatal(err) + } + + toFactory := physicalBackends["inmem"] + confTo := map[string]string{} + to, err := toFactory(confTo, nil) + if err != nil { + t.Fatal(err) + } + cmd := OperatorMigrateCommand{ + logger: log.NewNullLogger(), + } + if err := cmd.migrateAll(context.Background(), from, to, 1); err != nil { + t.Fatal(err) + } + + if err := compareStoredData(to, data, ""); err != nil { + t.Fatal(err) + } + }) + + t.Run("Concurrent migration", func(t *testing.T) { + data := generateData() + + fromFactory := physicalBackends["file"] + + folder := t.TempDir() + + confFrom := map[string]string{ + "path": folder, + } + + from, err := fromFactory(confFrom, nil) + if err != nil { + t.Fatal(err) + } + if err := storeData(from, data); err != nil { + t.Fatal(err) + } + + toFactory := physicalBackends["inmem"] + confTo := map[string]string{} + to, err := toFactory(confTo, nil) + if err != nil { + t.Fatal(err) + } + + cmd := OperatorMigrateCommand{ + logger: log.NewNullLogger(), + } + + if err := cmd.migrateAll(context.Background(), from, to, 10); err != nil { + t.Fatal(err) + } + if err := compareStoredData(to, data, ""); err != nil { + t.Fatal(err) + } + }) + + t.Run("Start option", func(t *testing.T) { + data := generateData() + + fromFactory := physicalBackends["inmem"] + confFrom := map[string]string{} + from, err := fromFactory(confFrom, nil) + if err != nil { + t.Fatal(err) + } + if err := storeData(from, data); err != nil { + t.Fatal(err) + } + + toFactory := physicalBackends["file"] + folder := t.TempDir() + confTo := map[string]string{ + "path": folder, + } + + to, err := toFactory(confTo, nil) + if err != nil { + t.Fatal(err) + } + + const start = "m" + + cmd := OperatorMigrateCommand{ + logger: log.NewNullLogger(), + flagStart: start, + } + if err := cmd.migrateAll(context.Background(), from, to, 1); err != nil { + t.Fatal(err) + } + + if err := compareStoredData(to, data, start); err != nil { + t.Fatal(err) + } + }) + + t.Run("Start option (parallel)", func(t *testing.T) { + data := generateData() + + fromFactory := physicalBackends["inmem"] + confFrom := map[string]string{} + from, err := fromFactory(confFrom, nil) + if err != nil { + t.Fatal(err) + } + if err := storeData(from, data); err != nil { + t.Fatal(err) + } + + toFactory := physicalBackends["file"] + folder := t.TempDir() + confTo := map[string]string{ + "path": folder, + } + + to, err := toFactory(confTo, nil) + if err != nil { + t.Fatal(err) + } + + const start = "m" + + cmd := OperatorMigrateCommand{ + logger: log.NewNullLogger(), + flagStart: start, + } + if err := cmd.migrateAll(context.Background(), from, to, 10); err != nil { + t.Fatal(err) + } + + if err := compareStoredData(to, data, start); err != nil { + t.Fatal(err) + } + }) + + t.Run("Config parsing", func(t *testing.T) { + cmd := new(OperatorMigrateCommand) + cfgName := filepath.Join(t.TempDir(), "migrator") + os.WriteFile(cfgName, []byte(` +storage_source "src_type" { + path = "src_path" +} + +storage_destination "dest_type" { + path = "dest_path" +}`), 0o644) + + expCfg := &migratorConfig{ + StorageSource: &server.Storage{ + Type: "src_type", + Config: map[string]string{ + "path": "src_path", + }, + }, + StorageDestination: &server.Storage{ + Type: "dest_type", + Config: map[string]string{ + "path": "dest_path", + }, + }, + } + cfg, err := cmd.loadMigratorConfig(cfgName) + if err != nil { + t.Fatal(cfg) + } + if diff := deep.Equal(cfg, expCfg); diff != nil { + t.Fatal(diff) + } + + verifyBad := func(cfg string) { + os.WriteFile(cfgName, []byte(cfg), 0o644) + _, err := cmd.loadMigratorConfig(cfgName) + if err == nil { + t.Fatalf("expected error but none received from: %v", cfg) + } + } + + // missing source + verifyBad(` +storage_destination "dest_type" { + path = "dest_path" +}`) + + // missing destination + verifyBad(` +storage_source "src_type" { + path = "src_path" +}`) + + // duplicate source + verifyBad(` +storage_source "src_type" { + path = "src_path" +} + +storage_source "src_type2" { + path = "src_path" +} + +storage_destination "dest_type" { + path = "dest_path" +}`) + + // duplicate destination + verifyBad(` +storage_source "src_type" { + path = "src_path" +} + +storage_destination "dest_type" { + path = "dest_path" +} + +storage_destination "dest_type2" { + path = "dest_path" +}`) + }) + + t.Run("DFS Scan", func(t *testing.T) { + s, _ := physicalBackends["inmem"](map[string]string{}, nil) + + data := generateData() + data["cc"] = []byte{} + data["c/d/e/f"] = []byte{} + data["c/d/e/g"] = []byte{} + data["c"] = []byte{} + storeData(s, data) + + l := randomLister{s} + + type SafeAppend struct { + out []string + lock sync.Mutex + } + outKeys := SafeAppend{} + dfsScan(context.Background(), l, 10, func(ctx context.Context, path string) error { + outKeys.lock.Lock() + defer outKeys.lock.Unlock() + + outKeys.out = append(outKeys.out, path) + return nil + }) + + delete(data, trailing_slash_key) + delete(data, "") + + var keys []string + for key := range data { + keys = append(keys, key) + } + sort.Strings(keys) + outKeys.lock.Lock() + sort.Strings(outKeys.out) + outKeys.lock.Unlock() + if !reflect.DeepEqual(keys, outKeys.out) { + t.Fatalf("expected equal: %v, %v", keys, outKeys.out) + } + }) +} + +// randomLister wraps a physical backend, providing a List method +// that returns results in a random order. +type randomLister struct { + b physical.Backend +} + +func (l randomLister) List(ctx context.Context, path string) ([]string, error) { + result, err := l.b.List(ctx, path) + if err != nil { + return nil, err + } + rand.Shuffle(len(result), func(i, j int) { + result[i], result[j] = result[j], result[i] + }) + return result, err +} + +func (l randomLister) Get(ctx context.Context, path string) (*physical.Entry, error) { + return l.b.Get(ctx, path) +} + +func (l randomLister) Put(ctx context.Context, entry *physical.Entry) error { + return l.b.Put(ctx, entry) +} + +func (l randomLister) Delete(ctx context.Context, path string) error { + return l.b.Delete(ctx, path) +} + +// generateData creates a map of 500 random keys and values +func generateData() map[string][]byte { + result := make(map[string][]byte) + for i := 0; i < 500; i++ { + segments := make([]string, rand.Intn(8)+1) + for j := 0; j < len(segments); j++ { + s, _ := base62.Random(6) + segments[j] = s + } + data := make([]byte, 100) + rand.Read(data) + result[strings.Join(segments, "/")] = data + } + + // Add special keys that should be excluded from migration + result[storageMigrationLock] = []byte{} + result[vault.CoreLockPath] = []byte{} + + // Empty keys are now prevented in Vault, but older data sets + // might contain them. + result[""] = []byte{} + result[trailing_slash_key] = []byte{} + + return result +} + +func storeData(s physical.Backend, ref map[string][]byte) error { + for k, v := range ref { + entry := physical.Entry{ + Key: k, + Value: v, + } + + err := s.Put(context.Background(), &entry) + if err != nil { + return err + } + } + return nil +} + +func compareStoredData(s physical.Backend, ref map[string][]byte, start string) error { + for k, v := range ref { + entry, err := s.Get(context.Background(), k) + if err != nil { + return err + } + + if k == storageMigrationLock || k == vault.CoreLockPath || k == "" || strings.HasSuffix(k, "/") { + if entry == nil { + continue + } + return fmt.Errorf("key found that should have been excluded: %s", k) + } + + if k >= start { + if entry == nil { + return fmt.Errorf("key not found: %s", k) + } + if !bytes.Equal(v, entry.Value) { + return fmt.Errorf("values differ for key: %s", k) + } + } else { + if entry != nil { + return fmt.Errorf("found key the should have been skipped by start option: %s", k) + } + } + } + + return nil +} diff --git a/command/operator_raft.go b/command/operator_raft.go new file mode 100644 index 0000000..8720b78 --- /dev/null +++ b/command/operator_raft.go @@ -0,0 +1,54 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + + "github.com/mitchellh/cli" +) + +var _ cli.Command = (*OperatorRaftCommand)(nil) + +type OperatorRaftCommand struct { + *BaseCommand +} + +func (c *OperatorRaftCommand) Synopsis() string { + return "Interact with Vault's raft storage backend" +} + +func (c *OperatorRaftCommand) Help() string { + helpText := ` +Usage: vault operator raft [options] [args] + + This command groups subcommands for operators interacting with the Vault raft + storage backend. Most users will not need to interact with these commands. Here + are a few examples of the raft operator commands: + + Joins a node to the raft cluster: + + $ vault operator raft join https://127.0.0.1:8200 + + Returns the set of raft peers: + + $ vault operator raft list-peers + + Removes a node from the raft cluster: + + $ vault operator raft remove-peer + + Restores and saves snapshots from the raft cluster: + + $ vault operator raft snapshot save out.snap + + Please see the individual subcommand help for detailed usage information. +` + + return strings.TrimSpace(helpText) +} + +func (c *OperatorRaftCommand) Run(args []string) int { + return cli.RunResultHelp +} diff --git a/command/operator_raft_autopilot_get_config.go b/command/operator_raft_autopilot_get_config.go new file mode 100644 index 0000000..736469b --- /dev/null +++ b/command/operator_raft_autopilot_get_config.go @@ -0,0 +1,112 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*OperatorRaftAutopilotGetConfigCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorRaftAutopilotGetConfigCommand)(nil) +) + +type OperatorRaftAutopilotGetConfigCommand struct { + *BaseCommand + flagDRToken string +} + +func (c *OperatorRaftAutopilotGetConfigCommand) Synopsis() string { + return "Returns the configuration of the autopilot subsystem under integrated storage" +} + +func (c *OperatorRaftAutopilotGetConfigCommand) Help() string { + helpText := ` +Usage: vault operator raft autopilot get-config + + Returns the configuration of the autopilot subsystem under integrated storage. +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *OperatorRaftAutopilotGetConfigCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "dr-token", + Target: &c.flagDRToken, + Default: "", + EnvVar: "", + Completion: complete.PredictAnything, + Usage: "DR operation token used to authorize this request (if a DR secondary node).", + }) + + return set +} + +func (c *OperatorRaftAutopilotGetConfigCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictAnything +} + +func (c *OperatorRaftAutopilotGetConfigCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *OperatorRaftAutopilotGetConfigCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch len(args) { + case 0: + default: + c.UI.Error(fmt.Sprintf("Incorrect arguments (expected 0, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + var config *api.AutopilotConfig + switch { + case c.flagDRToken != "": + config, err = client.Sys().RaftAutopilotConfigurationWithDRToken(c.flagDRToken) + default: + config, err = client.Sys().RaftAutopilotConfiguration() + } + + if config == nil { + return 0 + } + + if Format(c.UI) != "table" { + return OutputData(c.UI, config) + } + + entries := []string{"Key | Value"} + entries = append(entries, fmt.Sprintf("%s | %t", "Cleanup Dead Servers", config.CleanupDeadServers)) + entries = append(entries, fmt.Sprintf("%s | %s", "Last Contact Threshold", config.LastContactThreshold.String())) + entries = append(entries, fmt.Sprintf("%s | %s", "Dead Server Last Contact Threshold", config.DeadServerLastContactThreshold.String())) + entries = append(entries, fmt.Sprintf("%s | %s", "Server Stabilization Time", config.ServerStabilizationTime.String())) + entries = append(entries, fmt.Sprintf("%s | %d", "Min Quorum", config.MinQuorum)) + entries = append(entries, fmt.Sprintf("%s | %d", "Max Trailing Logs", config.MaxTrailingLogs)) + entries = append(entries, fmt.Sprintf("%s | %t", "Disable Upgrade Migration", config.DisableUpgradeMigration)) + + return OutputData(c.UI, entries) +} diff --git a/command/operator_raft_autopilot_set_config.go b/command/operator_raft_autopilot_set_config.go new file mode 100644 index 0000000..3846670 --- /dev/null +++ b/command/operator_raft_autopilot_set_config.go @@ -0,0 +1,171 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + "time" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*OperatorRaftAutopilotSetConfigCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorRaftAutopilotSetConfigCommand)(nil) +) + +type OperatorRaftAutopilotSetConfigCommand struct { + *BaseCommand + flagCleanupDeadServers BoolPtr + flagLastContactThreshold time.Duration + flagDeadServerLastContactThreshold time.Duration + flagMaxTrailingLogs uint64 + flagMinQuorum uint + flagServerStabilizationTime time.Duration + flagDisableUpgradeMigration BoolPtr + flagDRToken string +} + +func (c *OperatorRaftAutopilotSetConfigCommand) Synopsis() string { + return "Modify the configuration of the autopilot subsystem under integrated storage" +} + +func (c *OperatorRaftAutopilotSetConfigCommand) Help() string { + helpText := ` +Usage: vault operator raft autopilot set-config [options] + + Modify the configuration of the autopilot subsystem under integrated storage. +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *OperatorRaftAutopilotSetConfigCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + f := set.NewFlagSet("Common Options") + + f.BoolPtrVar(&BoolPtrVar{ + Name: "cleanup-dead-servers", + Target: &c.flagCleanupDeadServers, + Usage: "Controls whether to remove dead servers from the Raft peer list periodically or when a new server joins.", + }) + + f.DurationVar(&DurationVar{ + Name: "last-contact-threshold", + Target: &c.flagLastContactThreshold, + Usage: "Limit on the amount of time a server can go without leader contact before being considered unhealthy.", + }) + + f.DurationVar(&DurationVar{ + Name: "dead-server-last-contact-threshold", + Target: &c.flagDeadServerLastContactThreshold, + Usage: "Limit on the amount of time a server can go without leader contact before being considered failed. This takes effect only when cleanup_dead_servers is set.", + }) + + f.Uint64Var(&Uint64Var{ + Name: "max-trailing-logs", + Target: &c.flagMaxTrailingLogs, + Usage: "Amount of entries in the Raft Log that a server can be behind before being considered unhealthy.", + }) + + f.UintVar(&UintVar{ + Name: "min-quorum", + Target: &c.flagMinQuorum, + Usage: "Minimum number of servers allowed in a cluster before autopilot can prune dead servers. This should at least be 3.", + }) + + f.DurationVar(&DurationVar{ + Name: "server-stabilization-time", + Target: &c.flagServerStabilizationTime, + Usage: "Minimum amount of time a server must be in a stable, healthy state before it can be added to the cluster.", + }) + + f.BoolPtrVar(&BoolPtrVar{ + Name: "disable-upgrade-migration", + Target: &c.flagDisableUpgradeMigration, + Usage: "Whether or not to perform automated version upgrades.", + }) + + f.StringVar(&StringVar{ + Name: "dr-token", + Target: &c.flagDRToken, + Default: "", + EnvVar: "", + Completion: complete.PredictAnything, + Usage: "DR operation token used to authorize this request (if a DR secondary node).", + }) + + return set +} + +func (c *OperatorRaftAutopilotSetConfigCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictAnything +} + +func (c *OperatorRaftAutopilotSetConfigCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *OperatorRaftAutopilotSetConfigCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch len(args) { + case 0: + default: + c.UI.Error(fmt.Sprintf("Incorrect arguments (expected 0, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + data := make(map[string]interface{}) + if c.flagCleanupDeadServers.IsSet() { + data["cleanup_dead_servers"] = c.flagCleanupDeadServers.Get() + } + if c.flagMaxTrailingLogs > 0 { + data["max_trailing_logs"] = c.flagMaxTrailingLogs + } + if c.flagMinQuorum > 0 { + data["min_quorum"] = c.flagMinQuorum + } + if c.flagLastContactThreshold > 0 { + data["last_contact_threshold"] = c.flagLastContactThreshold.String() + } + if c.flagDeadServerLastContactThreshold > 0 { + data["dead_server_last_contact_threshold"] = c.flagDeadServerLastContactThreshold.String() + } + if c.flagServerStabilizationTime > 0 { + data["server_stabilization_time"] = c.flagServerStabilizationTime.String() + } + if c.flagDisableUpgradeMigration.IsSet() { + data["disable_upgrade_migration"] = c.flagDisableUpgradeMigration.Get() + } + if c.flagDRToken != "" { + data["dr_operation_token"] = c.flagDRToken + } + + secret, err := client.Logical().Write("sys/storage/raft/autopilot/configuration", data) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + if secret == nil { + return 0 + } + + return OutputSecret(c.UI, secret) +} diff --git a/command/operator_raft_autopilot_state.go b/command/operator_raft_autopilot_state.go new file mode 100644 index 0000000..8e7b670 --- /dev/null +++ b/command/operator_raft_autopilot_state.go @@ -0,0 +1,117 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "flag" + "fmt" + "strings" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*OperatorRaftAutopilotStateCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorRaftAutopilotStateCommand)(nil) +) + +type OperatorRaftAutopilotStateCommand struct { + *BaseCommand + flagDRToken string +} + +func (c *OperatorRaftAutopilotStateCommand) Synopsis() string { + return "Displays the state of the raft cluster under integrated storage as seen by autopilot" +} + +func (c *OperatorRaftAutopilotStateCommand) Help() string { + helpText := ` +Usage: vault operator raft autopilot state + + Displays the state of the raft cluster under integrated storage as seen by autopilot. +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *OperatorRaftAutopilotStateCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "dr-token", + Target: &c.flagDRToken, + Default: "", + EnvVar: "", + Completion: complete.PredictAnything, + Usage: "DR operation token used to authorize this request (if a DR secondary node).", + }) + + // The output of the state endpoint contains nested values and is not fit for + // the default "table" display format. Override the default display format to + // "pretty", both in the flag and in the UI. + set.mainSet.VisitAll(func(fl *flag.Flag) { + if fl.Name == "format" { + fl.DefValue = "pretty" + } + }) + ui, ok := c.UI.(*VaultUI) + if ok && ui.format == "table" { + ui.format = "pretty" + } + return set +} + +func (c *OperatorRaftAutopilotStateCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictAnything +} + +func (c *OperatorRaftAutopilotStateCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *OperatorRaftAutopilotStateCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch len(args) { + case 0: + default: + c.UI.Error(fmt.Sprintf("Incorrect arguments (expected 0, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + var state *api.AutopilotState + switch { + case c.flagDRToken != "": + state, err = client.Sys().RaftAutopilotStateWithDRToken(c.flagDRToken) + default: + state, err = client.Sys().RaftAutopilotState() + } + + if err != nil { + c.UI.Error(fmt.Sprintf("Error checking autopilot state: %s", err)) + return 2 + } + + if state == nil { + return 0 + } + + return OutputData(c.UI, state) +} diff --git a/command/operator_raft_join.go b/command/operator_raft_join.go new file mode 100644 index 0000000..57e14a8 --- /dev/null +++ b/command/operator_raft_join.go @@ -0,0 +1,220 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*OperatorRaftJoinCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorRaftJoinCommand)(nil) +) + +type OperatorRaftJoinCommand struct { + flagRetry bool + flagNonVoter bool + flagLeaderCACert string + flagLeaderClientCert string + flagLeaderClientKey string + flagAutoJoinScheme string + flagAutoJoinPort uint + *BaseCommand +} + +func (c *OperatorRaftJoinCommand) Synopsis() string { + return "Joins a node to the Raft cluster" +} + +func (c *OperatorRaftJoinCommand) Help() string { + helpText := ` +Usage: vault operator raft join [options] + + Join the current node as a peer to the Raft cluster by providing the address + of the Raft leader node. + + $ vault operator raft join "http://127.0.0.2:8200" + + Join the current node as a peer to the Raft cluster by providing cloud auto-join + configuration. + + $ vault operator raft join "provider=aws region=eu-west-1 ..." + + Join the current node as a peer to the Raft cluster by providing cloud auto-join + configuration with an explicit URI scheme and port. + + $ vault operator raft join -auto-join-scheme="http" -auto-join-port=8201 \ + "provider=aws region=eu-west-1 ..." + + TLS certificate data can also be consumed from a file on disk by prefixing with + the "@" symbol. For example: + + $ vault operator raft join "http://127.0.0.2:8200" \ + -leader-ca-cert=@leader_ca.crt \ + -leader-client-cert=@leader_client.crt \ + -leader-client-key=@leader.key + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *OperatorRaftJoinCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "auto-join-scheme", + Target: &c.flagAutoJoinScheme, + Completion: complete.PredictNothing, + Default: "https", + Usage: "An optional URI protocol scheme used for addresses discovered via auto-join.", + }) + + f.UintVar(&UintVar{ + Name: "auto-join-port", + Target: &c.flagAutoJoinPort, + Completion: complete.PredictNothing, + Default: 8200, + Usage: "An optional port used for addresses discovered via auto-join.", + }) + + f.StringVar(&StringVar{ + Name: "leader-ca-cert", + Target: &c.flagLeaderCACert, + Completion: complete.PredictNothing, + Usage: "CA cert to use when verifying the Raft leader certificate.", + }) + + f.StringVar(&StringVar{ + Name: "leader-client-cert", + Target: &c.flagLeaderClientCert, + Completion: complete.PredictNothing, + Usage: "Client cert to use when authenticating with the Raft leader.", + }) + + f.StringVar(&StringVar{ + Name: "leader-client-key", + Target: &c.flagLeaderClientKey, + Completion: complete.PredictNothing, + Usage: "Client key to use when authenticating with the Raft leader.", + }) + + f.BoolVar(&BoolVar{ + Name: "retry", + Target: &c.flagRetry, + Default: false, + Usage: "Continuously retry joining the Raft cluster upon failures.", + }) + + f.BoolVar(&BoolVar{ + Name: "non-voter", + Target: &c.flagNonVoter, + Default: false, + Usage: "(Enterprise-only) This flag is used to make the server not participate in the Raft quorum, and have it only receive the data replication stream. This can be used to add read scalability to a cluster in cases where a high volume of reads to servers are needed.", + }) + + return set +} + +func (c *OperatorRaftJoinCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictAnything +} + +func (c *OperatorRaftJoinCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *OperatorRaftJoinCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + leaderInfo := "" + + args = f.Args() + switch len(args) { + case 0: + // No-op: This is acceptable if we're using raft for HA-only + case 1: + leaderInfo = strings.TrimSpace(args[0]) + default: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 0-1, got %d)", len(args))) + return 1 + } + + leaderCACert, err := parseFlagFile(c.flagLeaderCACert) + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to parse leader CA certificate: %s", err)) + return 1 + } + + leaderClientCert, err := parseFlagFile(c.flagLeaderClientCert) + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to parse leader client certificate: %s", err)) + return 1 + } + + leaderClientKey, err := parseFlagFile(c.flagLeaderClientKey) + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to parse leader client key: %s", err)) + return 1 + } + + if c.flagAutoJoinScheme != "" && (c.flagAutoJoinScheme != "http" && c.flagAutoJoinScheme != "https") { + c.UI.Error(fmt.Sprintf("invalid scheme %q; must either be http or https", c.flagAutoJoinScheme)) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + joinReq := &api.RaftJoinRequest{ + LeaderCACert: leaderCACert, + LeaderClientCert: leaderClientCert, + LeaderClientKey: leaderClientKey, + Retry: c.flagRetry, + NonVoter: c.flagNonVoter, + } + + if strings.Contains(leaderInfo, "provider=") { + joinReq.AutoJoin = leaderInfo + joinReq.AutoJoinScheme = c.flagAutoJoinScheme + joinReq.AutoJoinPort = c.flagAutoJoinPort + } else { + joinReq.LeaderAPIAddr = leaderInfo + } + + resp, err := client.Sys().RaftJoin(joinReq) + if err != nil { + c.UI.Error(fmt.Sprintf("Error joining the node to the Raft cluster: %s", err)) + return 2 + } + + switch Format(c.UI) { + case "table": + default: + return OutputData(c.UI, resp) + } + + out := []string{ + "Key | Value", + fmt.Sprintf("Joined | %t", resp.Joined), + } + c.UI.Output(tableOutput(out, nil)) + + return 0 +} diff --git a/command/operator_raft_listpeers.go b/command/operator_raft_listpeers.go new file mode 100644 index 0000000..4e82c15 --- /dev/null +++ b/command/operator_raft_listpeers.go @@ -0,0 +1,124 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*OperatorRaftListPeersCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorRaftListPeersCommand)(nil) +) + +type OperatorRaftListPeersCommand struct { + *BaseCommand + flagDRToken string +} + +func (c *OperatorRaftListPeersCommand) Synopsis() string { + return "Returns the Raft peer set" +} + +func (c *OperatorRaftListPeersCommand) Help() string { + helpText := ` +Usage: vault operator raft list-peers + + Provides the details of all the peers in the Raft cluster. + + $ vault operator raft list-peers + + Provides the details of all the peers in the Raft cluster of a DR secondary + cluster. This command should be invoked on the DR secondary nodes. + + $ vault operator raft list-peers -dr-token + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *OperatorRaftListPeersCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "dr-token", + Target: &c.flagDRToken, + Default: "", + EnvVar: "", + Completion: complete.PredictAnything, + Usage: "DR operation token used to authorize this request (if a DR secondary node).", + }) + + return set +} + +func (c *OperatorRaftListPeersCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictAnything +} + +func (c *OperatorRaftListPeersCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *OperatorRaftListPeersCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + var secret *api.Secret + switch { + case c.flagDRToken != "": + secret, err = client.Logical().Write("sys/storage/raft/configuration", map[string]interface{}{ + "dr_operation_token": c.flagDRToken, + }) + default: + secret, err = client.Logical().Read("sys/storage/raft/configuration") + } + if err != nil { + c.UI.Error(fmt.Sprintf("Error reading the raft cluster configuration: %s", err)) + return 2 + } + if secret == nil { + c.UI.Error("No raft cluster configuration found") + return 2 + } + + if Format(c.UI) != "table" { + return OutputSecret(c.UI, secret) + } + + config := secret.Data["config"].(map[string]interface{}) + + servers := config["servers"].([]interface{}) + out := []string{"Node | Address | State | Voter"} + for _, serverRaw := range servers { + server := serverRaw.(map[string]interface{}) + state := "follower" + if server["leader"].(bool) { + state = "leader" + } + + out = append(out, fmt.Sprintf("%s | %s | %s | %t", server["node_id"].(string), server["address"].(string), state, server["voter"].(bool))) + } + + c.UI.Output(tableOutput(out, nil)) + return 0 +} diff --git a/command/operator_raft_remove_peer.go b/command/operator_raft_remove_peer.go new file mode 100644 index 0000000..84b516c --- /dev/null +++ b/command/operator_raft_remove_peer.go @@ -0,0 +1,108 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*OperatorRaftRemovePeerCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorRaftRemovePeerCommand)(nil) +) + +type OperatorRaftRemovePeerCommand struct { + *BaseCommand + + flagDRToken string +} + +func (c *OperatorRaftRemovePeerCommand) Synopsis() string { + return "Removes a node from the Raft cluster" +} + +func (c *OperatorRaftRemovePeerCommand) Help() string { + helpText := ` +Usage: vault operator raft remove-peer + + Removes a node from the Raft cluster. + + $ vault operator raft remove-peer node1 + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *OperatorRaftRemovePeerCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "dr-token", + Target: &c.flagDRToken, + Default: "", + EnvVar: "", + Completion: complete.PredictAnything, + Usage: "DR operation token used to authorize this request (if a DR secondary node).", + }) + + return set +} + +func (c *OperatorRaftRemovePeerCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictAnything +} + +func (c *OperatorRaftRemovePeerCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *OperatorRaftRemovePeerCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + serverID := "" + + args = f.Args() + switch len(args) { + case 1: + serverID = strings.TrimSpace(args[0]) + default: + c.UI.Error(fmt.Sprintf("Incorrect arguments (expected 1, got %d)", len(args))) + return 1 + } + + if len(serverID) == 0 { + c.UI.Error("Server id is required") + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + _, err = client.Logical().Write("sys/storage/raft/remove-peer", map[string]interface{}{ + "server_id": serverID, + "dr_operation_token": c.flagDRToken, + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error removing the peer from raft cluster: %s", err)) + return 2 + } + + c.UI.Output("Peer removed successfully!") + + return 0 +} diff --git a/command/operator_raft_snapshot.go b/command/operator_raft_snapshot.go new file mode 100644 index 0000000..500b442 --- /dev/null +++ b/command/operator_raft_snapshot.go @@ -0,0 +1,50 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + + "github.com/mitchellh/cli" +) + +var _ cli.Command = (*OperatorRaftSnapshotCommand)(nil) + +type OperatorRaftSnapshotCommand struct { + *BaseCommand +} + +func (c *OperatorRaftSnapshotCommand) Synopsis() string { + return "Restores and saves snapshots from the Raft cluster" +} + +func (c *OperatorRaftSnapshotCommand) Help() string { + helpText := ` +Usage: vault operator raft snapshot [options] [args] + + This command groups subcommands for operators interacting with the snapshot + functionality of the integrated Raft storage backend. Here are a few examples of + the Raft snapshot operator commands: + + Installs the provided snapshot, returning the cluster to the state defined in it: + + $ vault operator raft snapshot restore raft.snap + + Saves a snapshot of the current state of the Raft cluster into a file: + + $ vault operator raft snapshot save raft.snap + + Inspects a snapshot based on a file: + + $ vault operator raft snapshot inspect raft.snap + + Please see the individual subcommand help for detailed usage information. +` + + return strings.TrimSpace(helpText) +} + +func (c *OperatorRaftSnapshotCommand) Run(args []string) int { + return cli.RunResultHelp +} diff --git a/command/operator_raft_snapshot_inspect.go b/command/operator_raft_snapshot_inspect.go new file mode 100644 index 0000000..a64c5ba --- /dev/null +++ b/command/operator_raft_snapshot_inspect.go @@ -0,0 +1,568 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "archive/tar" + "bufio" + "bytes" + "compress/gzip" + "crypto/sha256" + "encoding/json" + "fmt" + "hash" + "io" + "math" + "os" + "sort" + "strconv" + "strings" + "text/tabwriter" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/raft" + protoio "github.com/hashicorp/vault/physical/raft" + "github.com/hashicorp/vault/sdk/plugin/pb" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*OperatorRaftSnapshotInspectCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorRaftSnapshotInspectCommand)(nil) +) + +type OperatorRaftSnapshotInspectCommand struct { + *BaseCommand + details bool + depth int + filter string +} + +func (c *OperatorRaftSnapshotInspectCommand) Synopsis() string { + return "Inspects raft snapshot" +} + +func (c *OperatorRaftSnapshotInspectCommand) Help() string { + helpText := ` + Usage: vault operator raft snapshot inspect + + Inspects a snapshot file. + + $ vault operator raft snapshot inspect raft.snap + + ` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *OperatorRaftSnapshotInspectCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + f := set.NewFlagSet("Command Options") + + f.BoolVar(&BoolVar{ + Name: "details", + Target: &c.details, + Default: true, + Usage: "Provides information about usage for data stored in the snapshot.", + }) + + f.IntVar(&IntVar{ + Name: "depth", + Target: &c.depth, + Default: 2, + Usage: "Can only be used with -details. The key prefix depth used to breakdown KV store data. If set to 0, all keys will be returned. Defaults to 2.", + }) + + f.StringVar(&StringVar{ + Name: "filter", + Target: &c.filter, + Default: "", + Usage: "Can only be used with -details. Limits the key breakdown using this prefix filter.", + }) + + return set +} + +func (c *OperatorRaftSnapshotInspectCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictAnything +} + +func (c *OperatorRaftSnapshotInspectCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +type OutputFormat struct { + Meta *MetadataInfo + StatsKV []typeStats + TotalCountKV int + TotalSizeKV int +} + +// SnapshotInfo is used for passing snapshot stat +// information between functions +type SnapshotInfo struct { + Meta MetadataInfo + StatsKV map[string]typeStats + TotalCountKV int + TotalSizeKV int +} + +type MetadataInfo struct { + ID string + Size int64 + Index uint64 + Term uint64 + Version raft.SnapshotVersion +} + +type typeStats struct { + Name string + Count int + Size int +} + +func (c *OperatorRaftSnapshotInspectCommand) Run(args []string) int { + flags := c.Flags() + + if err := flags.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + // Validate flags + if c.depth < 0 { + c.UI.Error("Depth must be equal to or greater than 0") + return 1 + } + + var file string + args = c.flags.Args() + + switch len(args) { + case 0: + c.UI.Error("Missing FILE argument") + return 1 + case 1: + file = args[0] + default: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + } + + // Open the file. + f, err := os.Open(file) + if err != nil { + c.UI.Error(fmt.Sprintf("Error opening snapshot file: %s", err)) + return 1 + } + defer f.Close() + + // Extract metadata and snapshot info from snapshot file + var info *SnapshotInfo + var meta *raft.SnapshotMeta + info, meta, err = c.Read(hclog.New(nil), f) + if err != nil { + c.UI.Error(fmt.Sprintf("Error reading snapshot: %s", err)) + return 1 + } + + if info == nil { + c.UI.Error(fmt.Sprintf("Error calculating snapshot info: %s", err)) + return 1 + } + + // Generate structs for the formatter with information we read in + metaformat := &MetadataInfo{ + ID: meta.ID, + Size: meta.Size, + Index: meta.Index, + Term: meta.Term, + Version: meta.Version, + } + + formattedStatsKV := generateKVStats(*info) + + data := &OutputFormat{ + Meta: metaformat, + StatsKV: formattedStatsKV, + TotalCountKV: info.TotalCountKV, + TotalSizeKV: info.TotalSizeKV, + } + + if Format(c.UI) != "table" { + return OutputData(c.UI, data) + } + + tableData, err := formatTable(data) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + + c.UI.Output(tableData) + + return 0 +} + +func (c *OperatorRaftSnapshotInspectCommand) kvEnhance(val *pb.StorageEntry, info *SnapshotInfo, read int) { + if !c.details { + return + } + + if val.Key == "" { + return + } + + // check for whether a filter is specified. if it is, skip + // any keys that don't match. + if len(c.filter) > 0 && !strings.HasPrefix(val.Key, c.filter) { + return + } + + split := strings.Split(val.Key, "/") + + // handle the situation where the key is shorter than + // the specified depth. + actualDepth := c.depth + if c.depth == 0 || c.depth > len(split) { + actualDepth = len(split) + } + + prefix := strings.Join(split[0:actualDepth], "/") + kvs := info.StatsKV[prefix] + if kvs.Name == "" { + kvs.Name = prefix + } + + kvs.Count++ + kvs.Size += read + info.TotalCountKV++ + info.TotalSizeKV += read + info.StatsKV[prefix] = kvs +} + +// Read from snapshot's state.bin and update the SnapshotInfo struct +func (c *OperatorRaftSnapshotInspectCommand) parseState(r io.Reader) (SnapshotInfo, error) { + info := SnapshotInfo{ + StatsKV: make(map[string]typeStats), + } + + protoReader := protoio.NewDelimitedReader(r, math.MaxInt32) + + for { + s := new(pb.StorageEntry) + if err := protoReader.ReadMsg(s); err != nil { + if err == io.EOF { + break + } + return info, err + } + size := protoReader.GetLastReadSize() + c.kvEnhance(s, &info, size) + } + + return info, nil +} + +// Read contents of snapshot. Parse metadata and snapshot info +// Also, verify validity of snapshot +func (c *OperatorRaftSnapshotInspectCommand) Read(logger hclog.Logger, in io.Reader) (*SnapshotInfo, *raft.SnapshotMeta, error) { + // Wrap the reader in a gzip decompressor. + decomp, err := gzip.NewReader(in) + if err != nil { + return nil, nil, fmt.Errorf("failed to decompress snapshot: %v", err) + } + + defer func() { + if decomp == nil { + return + } + + if err := decomp.Close(); err != nil { + logger.Error("Failed to close snapshot decompressor", "error", err) + } + }() + + // Read the archive. + snapshotInfo, metadata, err := c.read(decomp) + if err != nil { + return nil, nil, fmt.Errorf("failed to read snapshot file: %v", err) + } + + if err := concludeGzipRead(decomp); err != nil { + return nil, nil, err + } + + if err := decomp.Close(); err != nil { + return nil, nil, err + } + decomp = nil + return snapshotInfo, metadata, nil +} + +func formatTable(info *OutputFormat) (string, error) { + var b bytes.Buffer + tw := tabwriter.NewWriter(&b, 8, 8, 6, ' ', 0) + + fmt.Fprintf(tw, " ID\t%s", info.Meta.ID) + fmt.Fprintf(tw, "\n Size\t%d", info.Meta.Size) + fmt.Fprintf(tw, "\n Index\t%d", info.Meta.Index) + fmt.Fprintf(tw, "\n Term\t%d", info.Meta.Term) + fmt.Fprintf(tw, "\n Version\t%d", info.Meta.Version) + fmt.Fprintf(tw, "\n") + + if info.StatsKV != nil { + fmt.Fprintf(tw, "\n") + fmt.Fprintln(tw, "\n Key Name\tCount\tSize") + fmt.Fprintf(tw, " %s\t%s\t%s", "----", "----", "----") + + for _, s := range info.StatsKV { + fmt.Fprintf(tw, "\n %s\t%d\t%s", s.Name, s.Count, ByteSize(uint64(s.Size))) + } + + fmt.Fprintf(tw, "\n %s\t%s", "----", "----") + fmt.Fprintf(tw, "\n Total Size\t\t%s", ByteSize(uint64(info.TotalSizeKV))) + } + + if err := tw.Flush(); err != nil { + return b.String(), err + } + + return b.String(), nil +} + +const ( + BYTE = 1 << (10 * iota) + KILOBYTE + MEGABYTE + GIGABYTE + TERABYTE +) + +func ByteSize(bytes uint64) string { + unit := "" + value := float64(bytes) + + switch { + case bytes >= TERABYTE: + unit = "TB" + value = value / TERABYTE + case bytes >= GIGABYTE: + unit = "GB" + value = value / GIGABYTE + case bytes >= MEGABYTE: + unit = "MB" + value = value / MEGABYTE + case bytes >= KILOBYTE: + unit = "KB" + value = value / KILOBYTE + case bytes >= BYTE: + unit = "B" + case bytes == 0: + return "0" + } + + result := strconv.FormatFloat(value, 'f', 1, 64) + result = strings.TrimSuffix(result, ".0") + return result + unit +} + +// sortTypeStats sorts the stat slice by count and then +// alphabetically in the case the counts are equal +func sortTypeStats(stats []typeStats) []typeStats { + // sort alphabetically if size is equal + sort.Slice(stats, func(i, j int) bool { + // Sort alphabetically if count is equal + if stats[i].Count == stats[j].Count { + return stats[i].Name < stats[j].Name + } + return stats[i].Count > stats[j].Count + }) + + return stats +} + +// generateKVStats reformats the KV stats to work with +// the output struct that's used to produce the printed +// output the user sees. +func generateKVStats(info SnapshotInfo) []typeStats { + kvLen := len(info.StatsKV) + if kvLen > 0 { + ks := make([]typeStats, 0, kvLen) + + for _, s := range info.StatsKV { + ks = append(ks, s) + } + + ks = sortTypeStats(ks) + + return ks + } + + return nil +} + +// hashList manages a list of filenames and their hashes. +type hashList struct { + hashes map[string]hash.Hash +} + +// newHashList returns a new hashList. +func newHashList() *hashList { + return &hashList{ + hashes: make(map[string]hash.Hash), + } +} + +// Add creates a new hash for the given file. +func (hl *hashList) Add(file string) hash.Hash { + if existing, ok := hl.hashes[file]; ok { + return existing + } + + h := sha256.New() + hl.hashes[file] = h + return h +} + +// Encode takes the current sum of all the hashes and saves the hash list as a +// SHA256SUMS-style text file. +func (hl *hashList) Encode(w io.Writer) error { + for file, h := range hl.hashes { + if _, err := fmt.Fprintf(w, "%x %s\n", h.Sum([]byte{}), file); err != nil { + return err + } + } + return nil +} + +// DecodeAndVerify reads a SHA256SUMS-style text file and checks the results +// against the current sums for all the hashes. +func (hl *hashList) DecodeAndVerify(r io.Reader) error { + // Read the file and make sure everything in there has a matching hash. + seen := make(map[string]struct{}) + s := bufio.NewScanner(r) + for s.Scan() { + sha := make([]byte, sha256.Size) + var file string + if _, err := fmt.Sscanf(s.Text(), "%x %s", &sha, &file); err != nil { + return err + } + + h, ok := hl.hashes[file] + if !ok { + return fmt.Errorf("list missing hash for %q", file) + } + if !bytes.Equal(sha, h.Sum([]byte{})) { + return fmt.Errorf("hash check failed for %q", file) + } + seen[file] = struct{}{} + } + if err := s.Err(); err != nil { + return err + } + + // Make sure everything we had a hash for was seen. + for file := range hl.hashes { + if _, ok := seen[file]; !ok { + return fmt.Errorf("file missing for %q", file) + } + } + + return nil +} + +// read takes a reader and extracts the snapshot metadata and snapshot +// info. It also checks the integrity of the snapshot data. +func (c *OperatorRaftSnapshotInspectCommand) read(in io.Reader) (*SnapshotInfo, *raft.SnapshotMeta, error) { + // Start a new tar reader. + archive := tar.NewReader(in) + + // Create a hash list that we will use to compare with the SHA256SUMS + // file in the archive. + hl := newHashList() + + // Populate the hashes for all the files we expect to see. The check at + // the end will make sure these are all present in the SHA256SUMS file + // and that the hashes match. + metaHash := hl.Add("meta.json") + snapHash := hl.Add("state.bin") + + // Look through the archive for the pieces we care about. + var shaBuffer bytes.Buffer + var snapshotInfo SnapshotInfo + var metadata raft.SnapshotMeta + for { + hdr, err := archive.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, nil, fmt.Errorf("failed reading snapshot: %v", err) + } + + switch hdr.Name { + case "meta.json": + // Previously we used json.Decode to decode the archive stream. There are + // edgecases in which it doesn't read all the bytes from the stream, even + // though the json object is still being parsed properly. Since we + // simultaneously feeded everything to metaHash, our hash ended up being + // different than what we calculated when creating the snapshot. Which in + // turn made the snapshot verification fail. By explicitly reading the + // whole thing first we ensure that we calculate the correct hash + // independent of how json.Decode works internally. + buf, err := io.ReadAll(io.TeeReader(archive, metaHash)) + if err != nil { + return nil, nil, fmt.Errorf("failed to read snapshot metadata: %v", err) + } + if err := json.Unmarshal(buf, &metadata); err != nil { + return nil, nil, fmt.Errorf("failed to decode snapshot metadata: %v", err) + } + case "state.bin": + // create reader that writes to snapHash what it reads from archive + wrappedReader := io.TeeReader(archive, snapHash) + var err error + snapshotInfo, err = c.parseState(wrappedReader) + if err != nil { + return nil, nil, fmt.Errorf("error parsing snapshot state: %v", err) + } + + case "SHA256SUMS": + if _, err := io.CopyN(&shaBuffer, archive, 10000); err != nil && err != io.EOF { + return nil, nil, fmt.Errorf("failed to read snapshot hashes: %v", err) + } + + case "SHA256SUMS.sealed": + // Add verification of sealed sum in future + continue + + default: + return nil, nil, fmt.Errorf("unexpected file %q in snapshot", hdr.Name) + } + } + + // Verify all the hashes. + if err := hl.DecodeAndVerify(&shaBuffer); err != nil { + return nil, nil, fmt.Errorf("failed checking integrity of snapshot: %v", err) + } + + return &snapshotInfo, &metadata, nil +} + +// concludeGzipRead should be invoked after you think you've consumed all of +// the data from the gzip stream. It will error if the stream was corrupt. +// +// The docs for gzip.Reader say: "Clients should treat data returned by Read as +// tentative until they receive the io.EOF marking the end of the data." +func concludeGzipRead(decomp *gzip.Reader) error { + extra, err := io.ReadAll(decomp) // ReadAll consumes the EOF + if err != nil { + return err + } + if len(extra) != 0 { + return fmt.Errorf("%d unread uncompressed bytes remain", len(extra)) + } + return nil +} diff --git a/command/operator_raft_snapshot_inspect_test.go b/command/operator_raft_snapshot_inspect_test.go new file mode 100644 index 0000000..de30659 --- /dev/null +++ b/command/operator_raft_snapshot_inspect_test.go @@ -0,0 +1,141 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "context" + "fmt" + "os" + "strings" + "testing" + + "github.com/hashicorp/vault/physical/raft" + "github.com/hashicorp/vault/sdk/physical" + "github.com/mitchellh/cli" +) + +func testOperatorRaftSnapshotInspectCommand(tb testing.TB) (*cli.MockUi, *OperatorRaftSnapshotInspectCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &OperatorRaftSnapshotInspectCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func createSnapshot(tb testing.TB) (*os.File, func(), error) { + // Create new raft backend + r, raftDir := raft.GetRaft(tb, true, false) + defer os.RemoveAll(raftDir) + + // Write some data + for i := 0; i < 100; i++ { + err := r.Put(context.Background(), &physical.Entry{ + Key: fmt.Sprintf("key-%d", i), + Value: []byte(fmt.Sprintf("value-%d", i)), + }) + if err != nil { + return nil, nil, fmt.Errorf("Error adding data to snapshot %s", err) + } + } + + // Create temporary file to save snapshot to + snap, err := os.CreateTemp("", "temp_snapshot.snap") + if err != nil { + return nil, nil, fmt.Errorf("Error creating temporary file %s", err) + } + + cleanup := func() { + err := os.RemoveAll(snap.Name()) + if err != nil { + tb.Errorf("Error deleting temporary snapshot %s", err) + } + } + + // Save snapshot + err = r.Snapshot(snap, nil) + if err != nil { + return nil, nil, fmt.Errorf("Error saving raft snapshot %s", err) + } + + return snap, cleanup, nil +} + +func TestOperatorRaftSnapshotInspectCommand_Run(t *testing.T) { + t.Parallel() + + file1, cleanup1, err := createSnapshot(t) + if err != nil { + t.Fatalf("Error creating snapshot %s", err) + } + + file2, cleanup2, err := createSnapshot(t) + if err != nil { + t.Fatalf("Error creating snapshot %s", err) + } + + cases := []struct { + name string + args []string + out string + code int + cleanup func() + }{ + { + "too_many_args", + []string{"test.snap", "test"}, + "Too many arguments", + 1, + nil, + }, + { + "default", + []string{file1.Name()}, + "ID bolt-snapshot", + 0, + cleanup1, + }, + { + "all_flags", + []string{"-details", "-depth", "10", "-filter", "key", file2.Name()}, + "Key Name", + 0, + cleanup2, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testOperatorRaftSnapshotInspectCommand(t) + + cmd.client = client + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + + if tc.cleanup != nil { + tc.cleanup() + } + }) + } + }) +} diff --git a/command/operator_raft_snapshot_restore.go b/command/operator_raft_snapshot_restore.go new file mode 100644 index 0000000..6067adc --- /dev/null +++ b/command/operator_raft_snapshot_restore.go @@ -0,0 +1,109 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "os" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*OperatorRaftSnapshotRestoreCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorRaftSnapshotRestoreCommand)(nil) +) + +type OperatorRaftSnapshotRestoreCommand struct { + flagForce bool + *BaseCommand +} + +func (c *OperatorRaftSnapshotRestoreCommand) Synopsis() string { + return "Installs the provided snapshot, returning the cluster to the state defined in it" +} + +func (c *OperatorRaftSnapshotRestoreCommand) Help() string { + helpText := ` +Usage: vault operator raft snapshot restore + + Installs the provided snapshot, returning the cluster to the state defined in it. + + $ vault operator raft snapshot restore raft.snap + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *OperatorRaftSnapshotRestoreCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + f := set.NewFlagSet("Command Options") + + f.BoolVar(&BoolVar{ + Name: "force", + Target: &c.flagForce, + Default: false, + Usage: "This bypasses checks ensuring the Autounseal or shamir keys are consistent with the snapshot data.", + }) + + return set +} + +func (c *OperatorRaftSnapshotRestoreCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictAnything +} + +func (c *OperatorRaftSnapshotRestoreCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *OperatorRaftSnapshotRestoreCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + snapFile := "" + + args = f.Args() + switch len(args) { + case 1: + snapFile = strings.TrimSpace(args[0]) + default: + c.UI.Error(fmt.Sprintf("Incorrect arguments (expected 1, got %d)", len(args))) + return 1 + } + + if len(snapFile) == 0 { + c.UI.Error("Snapshot file name is required") + return 1 + } + + snapReader, err := os.Open(snapFile) + if err != nil { + c.UI.Error(fmt.Sprintf("Error opening policy file: %s", err)) + return 2 + } + defer snapReader.Close() + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + err = client.Sys().RaftSnapshotRestore(snapReader, c.flagForce) + if err != nil { + c.UI.Error(fmt.Sprintf("Error installing the snapshot: %s", err)) + return 2 + } + + return 0 +} diff --git a/command/operator_raft_snapshot_save.go b/command/operator_raft_snapshot_save.go new file mode 100644 index 0000000..2abbb0a --- /dev/null +++ b/command/operator_raft_snapshot_save.go @@ -0,0 +1,129 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*OperatorRaftSnapshotSaveCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorRaftSnapshotSaveCommand)(nil) +) + +type OperatorRaftSnapshotSaveCommand struct { + *BaseCommand +} + +func (c *OperatorRaftSnapshotSaveCommand) Synopsis() string { + return "Saves a snapshot of the current state of the Raft cluster into a file" +} + +func (c *OperatorRaftSnapshotSaveCommand) Help() string { + helpText := ` +Usage: vault operator raft snapshot save + + Saves a snapshot of the current state of the Raft cluster into a file. + + $ vault operator raft snapshot save raft.snap + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *OperatorRaftSnapshotSaveCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + return set +} + +func (c *OperatorRaftSnapshotSaveCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictAnything +} + +func (c *OperatorRaftSnapshotSaveCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *OperatorRaftSnapshotSaveCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + path := "" + + args = f.Args() + switch len(args) { + case 1: + path = strings.TrimSpace(args[0]) + default: + c.UI.Error(fmt.Sprintf("Incorrect arguments (expected 1, got %d)", len(args))) + return 1 + } + + if len(path) == 0 { + c.UI.Error("Output file name is required") + return 1 + } + + w := &lazyOpenWriter{ + openFunc: func() (io.WriteCloser, error) { + return os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o600) + }, + } + + client, err := c.Client() + if err != nil { + w.Close() + c.UI.Error(err.Error()) + return 2 + } + + err = client.Sys().RaftSnapshot(w) + if err != nil { + w.Close() + c.UI.Error(fmt.Sprintf("Error taking the snapshot: %s", err)) + return 2 + } + + err = w.Close() + if err != nil { + c.UI.Error(fmt.Sprintf("Error taking the snapshot: %s", err)) + return 2 + } + return 0 +} + +type lazyOpenWriter struct { + openFunc func() (io.WriteCloser, error) + writer io.WriteCloser +} + +func (l *lazyOpenWriter) Write(p []byte) (n int, err error) { + if l.writer == nil { + var err error + l.writer, err = l.openFunc() + if err != nil { + return 0, err + } + } + return l.writer.Write(p) +} + +func (l *lazyOpenWriter) Close() error { + if l.writer != nil { + return l.writer.Close() + } + return nil +} diff --git a/command/operator_rekey.go b/command/operator_rekey.go new file mode 100644 index 0000000..dde0e58 --- /dev/null +++ b/command/operator_rekey.go @@ -0,0 +1,786 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bytes" + "fmt" + "io" + "os" + "strings" + + "github.com/fatih/structs" + "github.com/hashicorp/go-secure-stdlib/password" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/helper/pgpkeys" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*OperatorRekeyCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorRekeyCommand)(nil) +) + +const ( + keyTypeRecovery = "Recovery" + keyTypeUnseal = "Unseal" +) + +type OperatorRekeyCommand struct { + *BaseCommand + + flagCancel bool + flagInit bool + flagKeyShares int + flagKeyThreshold int + flagNonce string + flagPGPKeys []string + flagStatus bool + flagTarget string + flagVerify bool + + // Backup options + flagBackup bool + flagBackupDelete bool + flagBackupRetrieve bool + + testStdin io.Reader // for tests +} + +func (c *OperatorRekeyCommand) Synopsis() string { + return "Generates new unseal keys" +} + +func (c *OperatorRekeyCommand) Help() string { + helpText := ` +Usage: vault operator rekey [options] [KEY] + + Generates a new set of unseal keys. This can optionally change the total + number of key shares or the required threshold of those key shares to + reconstruct the root key. This operation is zero downtime, but it requires + the Vault is unsealed and a quorum of existing unseal keys are provided. + + An unseal key may be provided directly on the command line as an argument to + the command. If key is specified as "-", the command will read from stdin. If + a TTY is available, the command will prompt for text. + + If the flag -target=recovery is supplied, then this operation will require a + quorum of recovery keys in order to generate a new set of recovery keys. + + Initialize a rekey: + + $ vault operator rekey \ + -init \ + -key-shares=15 \ + -key-threshold=9 + + Rekey and encrypt the resulting unseal keys with PGP: + + $ vault operator rekey \ + -init \ + -key-shares=3 \ + -key-threshold=2 \ + -pgp-keys="keybase:hashicorp,keybase:jefferai,keybase:sethvargo" + + Store encrypted PGP keys in Vault's core: + + $ vault operator rekey \ + -init \ + -pgp-keys="..." \ + -backup + + Retrieve backed-up unseal keys: + + $ vault operator rekey -backup-retrieve + + Delete backed-up unseal keys: + + $ vault operator rekey -backup-delete + +` + c.Flags().Help() + return strings.TrimSpace(helpText) +} + +func (c *OperatorRekeyCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + f := set.NewFlagSet("Common Options") + + f.BoolVar(&BoolVar{ + Name: "init", + Target: &c.flagInit, + Default: false, + Usage: "Initialize the rekeying operation. This can only be done if no " + + "rekeying operation is in progress. Customize the new number of key " + + "shares and key threshold using the -key-shares and -key-threshold " + + "flags.", + }) + + f.BoolVar(&BoolVar{ + Name: "cancel", + Target: &c.flagCancel, + Default: false, + Usage: "Reset the rekeying progress. This will discard any submitted " + + "unseal keys, recovery keys, or configuration.", + }) + + f.BoolVar(&BoolVar{ + Name: "status", + Target: &c.flagStatus, + Default: false, + Usage: "Print the status of the current attempt without providing an " + + "unseal or recovery key.", + }) + + f.IntVar(&IntVar{ + Name: "key-shares", + Aliases: []string{"n"}, + Target: &c.flagKeyShares, + Default: 5, + Completion: complete.PredictAnything, + Usage: "Number of key shares to split the generated root key into. " + + "This is the number of \"unseal keys\" or \"recovery keys\" to generate.", + }) + + f.IntVar(&IntVar{ + Name: "key-threshold", + Aliases: []string{"t"}, + Target: &c.flagKeyThreshold, + Default: 3, + Completion: complete.PredictAnything, + Usage: "Number of key shares required to reconstruct the root key. " + + "This must be less than or equal to -key-shares.", + }) + + f.StringVar(&StringVar{ + Name: "nonce", + Target: &c.flagNonce, + Default: "", + EnvVar: "", + Completion: complete.PredictAnything, + Usage: "Nonce value provided at initialization. The same nonce value " + + "must be provided with each unseal or recovery key.", + }) + + f.StringVar(&StringVar{ + Name: "target", + Target: &c.flagTarget, + Default: "barrier", + EnvVar: "", + Completion: complete.PredictSet("barrier", "recovery"), + Usage: "Target for rekeying. \"recovery\" only applies when HSM support " + + "is enabled.", + }) + + f.BoolVar(&BoolVar{ + Name: "verify", + Target: &c.flagVerify, + Default: false, + Usage: "Indicates that the action (-status, -cancel, or providing a key " + + "share) will be affecting verification for the current rekey " + + "attempt.", + }) + + f.VarFlag(&VarFlag{ + Name: "pgp-keys", + Value: (*pgpkeys.PubKeyFilesFlag)(&c.flagPGPKeys), + Completion: complete.PredictAnything, + Usage: "Comma-separated list of paths to files on disk containing " + + "public PGP keys OR a comma-separated list of Keybase usernames using " + + "the format \"keybase:\". When supplied, the generated " + + "unseal or recovery keys will be encrypted and base64-encoded in the order " + + "specified in this list.", + }) + + f = set.NewFlagSet("Backup Options") + + f.BoolVar(&BoolVar{ + Name: "backup", + Target: &c.flagBackup, + Default: false, + Usage: "Store a backup of the current PGP encrypted unseal or recovery keys in " + + "Vault's core. The encrypted values can be recovered in the event of " + + "failure or discarded after success. See the -backup-delete and " + + "-backup-retrieve options for more information. This option only " + + "applies when the existing unseal or recovery keys were PGP encrypted.", + }) + + f.BoolVar(&BoolVar{ + Name: "backup-delete", + Target: &c.flagBackupDelete, + Default: false, + Usage: "Delete any stored backup unseal or recovery keys.", + }) + + f.BoolVar(&BoolVar{ + Name: "backup-retrieve", + Target: &c.flagBackupRetrieve, + Default: false, + Usage: "Retrieve the backed-up unseal or recovery keys. This option is only available " + + "if the PGP keys were provided and the backup has not been deleted.", + }) + + return set +} + +func (c *OperatorRekeyCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictAnything +} + +func (c *OperatorRekeyCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *OperatorRekeyCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + if len(args) > 1 { + c.UI.Error(fmt.Sprintf("Too many arguments (expected 0-1, got %d)", len(args))) + return 1 + } + + // Create the client + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + switch { + case c.flagBackupDelete: + return c.backupDelete(client) + case c.flagBackupRetrieve: + return c.backupRetrieve(client) + case c.flagCancel: + return c.cancel(client) + case c.flagInit: + return c.init(client) + case c.flagStatus: + return c.status(client) + default: + // If there are no other flags, prompt for an unseal key. + key := "" + if len(args) > 0 { + key = strings.TrimSpace(args[0]) + } + return c.provide(client, key) + } +} + +// init starts the rekey process. +func (c *OperatorRekeyCommand) init(client *api.Client) int { + // Handle the different API requests + var fn func(*api.RekeyInitRequest) (*api.RekeyStatusResponse, error) + keyTypeRequired := keyTypeUnseal + switch strings.ToLower(strings.TrimSpace(c.flagTarget)) { + case "barrier": + fn = client.Sys().RekeyInit + case "recovery", "hsm": + keyTypeRequired = keyTypeRecovery + fn = client.Sys().RekeyRecoveryKeyInit + default: + c.UI.Error(fmt.Sprintf("Unknown target: %s", c.flagTarget)) + return 1 + } + + // Make the request + status, err := fn(&api.RekeyInitRequest{ + SecretShares: c.flagKeyShares, + SecretThreshold: c.flagKeyThreshold, + PGPKeys: c.flagPGPKeys, + Backup: c.flagBackup, + RequireVerification: c.flagVerify, + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error initializing rekey: %s", err)) + return 2 + } + + // Print warnings about recovery, etc. + if len(c.flagPGPKeys) == 0 { + if Format(c.UI) == "table" { + c.UI.Warn(wrapAtLength( + fmt.Sprintf("WARNING! If you lose the keys after they are returned, there is no "+ + "recovery. Consider canceling this operation and re-initializing "+ + "with the -pgp-keys flag to protect the returned %s keys along "+ + "with -backup to allow recovery of the encrypted keys in case of "+ + "emergency. You can delete the stored keys later using the -delete "+ + "flag.", strings.ToLower(keyTypeRequired)))) + c.UI.Output("") + } + } + if len(c.flagPGPKeys) > 0 && !c.flagBackup { + if Format(c.UI) == "table" { + c.UI.Warn(wrapAtLength( + fmt.Sprintf("WARNING! You are using PGP keys for encrypted the resulting %s "+ + "keys, but you did not enable the option to backup the keys to "+ + "Vault's core. If you lose the encrypted keys after they are "+ + "returned, you will not be able to recover them. Consider canceling "+ + "this operation and re-running with -backup to allow recovery of the "+ + "encrypted unseal keys in case of emergency. You can delete the "+ + "stored keys later using the -delete flag.", strings.ToLower(keyTypeRequired)))) + c.UI.Output("") + } + } + + // Provide the current status + return c.printStatus(status) +} + +// cancel is used to abort the rekey process. +func (c *OperatorRekeyCommand) cancel(client *api.Client) int { + // Handle the different API requests + var fn func() error + switch strings.ToLower(strings.TrimSpace(c.flagTarget)) { + case "barrier": + fn = client.Sys().RekeyCancel + if c.flagVerify { + fn = client.Sys().RekeyVerificationCancel + } + case "recovery", "hsm": + fn = client.Sys().RekeyRecoveryKeyCancel + if c.flagVerify { + fn = client.Sys().RekeyRecoveryKeyVerificationCancel + } + + default: + c.UI.Error(fmt.Sprintf("Unknown target: %s", c.flagTarget)) + return 1 + } + + // Make the request + if err := fn(); err != nil { + c.UI.Error(fmt.Sprintf("Error canceling rekey: %s", err)) + return 2 + } + + c.UI.Output("Success! Canceled rekeying (if it was started)") + return 0 +} + +// provide prompts the user for the seal key and posts it to the update root +// endpoint. If this is the last unseal, this function outputs it. +func (c *OperatorRekeyCommand) provide(client *api.Client, key string) int { + var statusFn func() (interface{}, error) + var updateFn func(string, string) (interface{}, error) + keyTypeRequired := keyTypeUnseal + switch strings.ToLower(strings.TrimSpace(c.flagTarget)) { + case "barrier": + statusFn = func() (interface{}, error) { + return client.Sys().RekeyStatus() + } + updateFn = func(s1 string, s2 string) (interface{}, error) { + return client.Sys().RekeyUpdate(s1, s2) + } + if c.flagVerify { + statusFn = func() (interface{}, error) { + return client.Sys().RekeyVerificationStatus() + } + updateFn = func(s1 string, s2 string) (interface{}, error) { + return client.Sys().RekeyVerificationUpdate(s1, s2) + } + } + case "recovery", "hsm": + keyTypeRequired = keyTypeRecovery + statusFn = func() (interface{}, error) { + return client.Sys().RekeyRecoveryKeyStatus() + } + updateFn = func(s1 string, s2 string) (interface{}, error) { + return client.Sys().RekeyRecoveryKeyUpdate(s1, s2) + } + if c.flagVerify { + statusFn = func() (interface{}, error) { + return client.Sys().RekeyRecoveryKeyVerificationStatus() + } + updateFn = func(s1 string, s2 string) (interface{}, error) { + return client.Sys().RekeyRecoveryKeyVerificationUpdate(s1, s2) + } + } + default: + c.UI.Error(fmt.Sprintf("Unknown target: %s", c.flagTarget)) + return 1 + } + + status, err := statusFn() + if err != nil { + c.UI.Error(fmt.Sprintf("Error getting rekey status: %s", err)) + return 2 + } + + var started bool + var nonce string + + switch status := status.(type) { + case *api.RekeyStatusResponse: + stat := status + started = stat.Started + nonce = stat.Nonce + case *api.RekeyVerificationStatusResponse: + stat := status + started = stat.Started + nonce = stat.Nonce + default: + c.UI.Error("Unknown status type") + return 1 + } + + // Verify a root token generation is in progress. If there is not one in + // progress, return an error instructing the user to start one. + if !started { + c.UI.Error(wrapAtLength( + "No rekey is in progress. Start a rekey process by running " + + "\"vault operator rekey -init\".")) + return 1 + } + + switch key { + case "-": // Read from stdin + nonce = c.flagNonce + + // Pull our fake stdin if needed + stdin := (io.Reader)(os.Stdin) + if c.testStdin != nil { + stdin = c.testStdin + } + + var buf bytes.Buffer + if _, err := io.Copy(&buf, stdin); err != nil { + c.UI.Error(fmt.Sprintf("Failed to read from stdin: %s", err)) + return 1 + } + + key = buf.String() + case "": // Prompt using the tty + // Nonce value is not required if we are prompting via the terminal + w := getWriterFromUI(c.UI) + fmt.Fprintf(w, "Rekey operation nonce: %s\n", nonce) + fmt.Fprintf(w, "%s Key (will be hidden): ", keyTypeRequired) + key, err = password.Read(os.Stdin) + fmt.Fprintf(w, "\n") + if err != nil { + if err == password.ErrInterrupted { + c.UI.Error("user canceled") + return 1 + } + + c.UI.Error(wrapAtLength(fmt.Sprintf("An error occurred attempting to "+ + "ask for the %s key. The raw error message is shown below, but "+ + "usually this is because you attempted to pipe a value into the "+ + "command or you are executing outside of a terminal (tty). If you "+ + "want to pipe the value, pass \"-\" as the argument to read from "+ + "stdin. The raw error was: %s", strings.ToLower(keyTypeRequired), err))) + return 1 + } + default: // Supplied directly as an arg + nonce = c.flagNonce + } + + // Trim any whitespace from they key, especially since we might have + // prompted the user for it. + key = strings.TrimSpace(key) + + // Verify we have a nonce value + if nonce == "" { + c.UI.Error("Missing nonce value: specify it via the -nonce flag") + return 1 + } + + // Provide the key, this may potentially complete the update + resp, err := updateFn(key, nonce) + if err != nil { + c.UI.Error(fmt.Sprintf("Error posting unseal key: %s", err)) + return 2 + } + + var complete bool + var mightContainUnsealKeys bool + + switch resp := resp.(type) { + case *api.RekeyUpdateResponse: + complete = resp.Complete + mightContainUnsealKeys = true + case *api.RekeyVerificationUpdateResponse: + complete = resp.Complete + default: + c.UI.Error("Unknown update response type") + return 1 + } + + if !complete { + return c.status(client) + } + + if mightContainUnsealKeys { + return c.printUnsealKeys(client, status.(*api.RekeyStatusResponse), + resp.(*api.RekeyUpdateResponse)) + } + + c.UI.Output(wrapAtLength("Rekey verification successful. The rekey operation is complete and the new keys are now active.")) + return 0 +} + +// status is used just to fetch and dump the status. +func (c *OperatorRekeyCommand) status(client *api.Client) int { + // Handle the different API requests + var fn func() (interface{}, error) + switch strings.ToLower(strings.TrimSpace(c.flagTarget)) { + case "barrier": + fn = func() (interface{}, error) { + return client.Sys().RekeyStatus() + } + if c.flagVerify { + fn = func() (interface{}, error) { + return client.Sys().RekeyVerificationStatus() + } + } + case "recovery", "hsm": + fn = func() (interface{}, error) { + return client.Sys().RekeyRecoveryKeyStatus() + } + if c.flagVerify { + fn = func() (interface{}, error) { + return client.Sys().RekeyRecoveryKeyVerificationStatus() + } + } + default: + c.UI.Error(fmt.Sprintf("Unknown target: %s", c.flagTarget)) + return 1 + } + + // Make the request + status, err := fn() + if err != nil { + c.UI.Error(fmt.Sprintf("Error reading rekey status: %s", err)) + return 2 + } + + return c.printStatus(status) +} + +// backupRetrieve retrieves the stored backup keys. +func (c *OperatorRekeyCommand) backupRetrieve(client *api.Client) int { + // Handle the different API requests + var fn func() (*api.RekeyRetrieveResponse, error) + switch strings.ToLower(strings.TrimSpace(c.flagTarget)) { + case "barrier": + fn = client.Sys().RekeyRetrieveBackup + case "recovery", "hsm": + fn = client.Sys().RekeyRetrieveRecoveryBackup + default: + c.UI.Error(fmt.Sprintf("Unknown target: %s", c.flagTarget)) + return 1 + } + + // Make the request + storedKeys, err := fn() + if err != nil { + c.UI.Error(fmt.Sprintf("Error retrieving rekey stored keys: %s", err)) + return 2 + } + + secret := &api.Secret{ + Data: structs.New(storedKeys).Map(), + } + + return OutputSecret(c.UI, secret) +} + +// backupDelete deletes the stored backup keys. +func (c *OperatorRekeyCommand) backupDelete(client *api.Client) int { + // Handle the different API requests + var fn func() error + switch strings.ToLower(strings.TrimSpace(c.flagTarget)) { + case "barrier": + fn = client.Sys().RekeyDeleteBackup + case "recovery", "hsm": + fn = client.Sys().RekeyDeleteRecoveryBackup + default: + c.UI.Error(fmt.Sprintf("Unknown target: %s", c.flagTarget)) + return 1 + } + + // Make the request + if err := fn(); err != nil { + c.UI.Error(fmt.Sprintf("Error deleting rekey stored keys: %s", err)) + return 2 + } + + c.UI.Output("Success! Delete stored keys (if they existed)") + return 0 +} + +// printStatus dumps the status to output +func (c *OperatorRekeyCommand) printStatus(in interface{}) int { + out := []string{} + out = append(out, "Key | Value") + + switch in := in.(type) { + case *api.RekeyStatusResponse: + status := in + out = append(out, fmt.Sprintf("Nonce | %s", status.Nonce)) + out = append(out, fmt.Sprintf("Started | %t", status.Started)) + if status.Started { + if status.Progress == status.Required { + out = append(out, fmt.Sprintf("Rekey Progress | %d/%d (verification in progress)", status.Progress, status.Required)) + } else { + out = append(out, fmt.Sprintf("Rekey Progress | %d/%d", status.Progress, status.Required)) + } + out = append(out, fmt.Sprintf("New Shares | %d", status.N)) + out = append(out, fmt.Sprintf("New Threshold | %d", status.T)) + out = append(out, fmt.Sprintf("Verification Required | %t", status.VerificationRequired)) + if status.VerificationNonce != "" { + out = append(out, fmt.Sprintf("Verification Nonce | %s", status.VerificationNonce)) + } + } + if len(status.PGPFingerprints) > 0 { + out = append(out, fmt.Sprintf("PGP Fingerprints | %s", status.PGPFingerprints)) + out = append(out, fmt.Sprintf("Backup | %t", status.Backup)) + } + case *api.RekeyVerificationStatusResponse: + status := in + out = append(out, fmt.Sprintf("Started | %t", status.Started)) + out = append(out, fmt.Sprintf("New Shares | %d", status.N)) + out = append(out, fmt.Sprintf("New Threshold | %d", status.T)) + out = append(out, fmt.Sprintf("Verification Nonce | %s", status.Nonce)) + out = append(out, fmt.Sprintf("Verification Progress | %d/%d", status.Progress, status.T)) + default: + c.UI.Error("Unknown status type") + return 1 + } + + switch Format(c.UI) { + case "table": + c.UI.Output(tableOutput(out, nil)) + return 0 + default: + return OutputData(c.UI, in) + } +} + +func (c *OperatorRekeyCommand) printUnsealKeys(client *api.Client, status *api.RekeyStatusResponse, resp *api.RekeyUpdateResponse) int { + switch Format(c.UI) { + case "table": + default: + return OutputData(c.UI, resp) + } + + // Space between the key prompt, if any, and the output + c.UI.Output("") + + // Provide the keys + var haveB64 bool + if resp.KeysB64 != nil && len(resp.KeysB64) == len(resp.Keys) { + haveB64 = true + } + for i, key := range resp.Keys { + if len(resp.PGPFingerprints) > 0 { + if haveB64 { + c.UI.Output(fmt.Sprintf("Key %d fingerprint: %s; value: %s", i+1, resp.PGPFingerprints[i], resp.KeysB64[i])) + } else { + c.UI.Output(fmt.Sprintf("Key %d fingerprint: %s; value: %s", i+1, resp.PGPFingerprints[i], key)) + } + } else { + if haveB64 { + c.UI.Output(fmt.Sprintf("Key %d: %s", i+1, resp.KeysB64[i])) + } else { + c.UI.Output(fmt.Sprintf("Key %d: %s", i+1, key)) + } + } + } + + c.UI.Output("") + c.UI.Output(fmt.Sprintf("Operation nonce: %s", resp.Nonce)) + + if len(resp.PGPFingerprints) > 0 && resp.Backup { + c.UI.Output("") + switch strings.ToLower(strings.TrimSpace(c.flagTarget)) { + case "barrier": + c.UI.Output(wrapAtLength(fmt.Sprintf( + "The encrypted unseal keys are backed up to \"core/unseal-keys-backup\" " + + "in the storage backend. Remove these keys at any time using " + + "\"vault operator rekey -backup-delete\". Vault does not automatically " + + "remove these keys.", + ))) + case "recovery", "hsm": + c.UI.Output(wrapAtLength(fmt.Sprintf( + "The encrypted recovery keys are backed up to \"core/recovery-keys-backup\" " + + "in the storage backend. Remove these keys at any time using " + + "\"vault operator rekey -backup-delete -target=recovery\". Vault does not automatically " + + "remove these keys.", + ))) + } + } + + switch status.VerificationRequired { + case false: + c.UI.Output("") + switch strings.ToLower(strings.TrimSpace(c.flagTarget)) { + case "barrier": + c.UI.Output(wrapAtLength(fmt.Sprintf( + "Vault unseal keys rekeyed with %d key shares and a key threshold of %d. Please "+ + "securely distribute the key shares printed above. When Vault is "+ + "re-sealed, restarted, or stopped, you must supply at least %d of "+ + "these keys to unseal it before it can start servicing requests.", + status.N, + status.T, + status.T))) + case "recovery", "hsm": + c.UI.Output(wrapAtLength(fmt.Sprintf( + "Vault recovery keys rekeyed with %d key shares and a key threshold of %d. Please "+ + "securely distribute the key shares printed above.", + status.N, + status.T))) + } + + default: + c.UI.Output("") + var warningText string + switch strings.ToLower(strings.TrimSpace(c.flagTarget)) { + case "barrier": + c.UI.Output(wrapAtLength(fmt.Sprintf( + "Vault has created a new unseal key, split into %d key shares and a key threshold "+ + "of %d. These will not be active until after verification is complete. "+ + "Please securely distribute the key shares printed above. When Vault "+ + "is re-sealed, restarted, or stopped, you must supply at least %d of "+ + "these keys to unseal it before it can start servicing requests.", + status.N, + status.T, + status.T))) + warningText = "unseal" + case "recovery", "hsm": + c.UI.Output(wrapAtLength(fmt.Sprintf( + "Vault has created a new recovery key, split into %d key shares and a key threshold "+ + "of %d. These will not be active until after verification is complete. "+ + "Please securely distribute the key shares printed above.", + status.N, + status.T))) + warningText = "authenticate with" + + } + c.UI.Output("") + c.UI.Warn(wrapAtLength(fmt.Sprintf( + "Again, these key shares are _not_ valid until verification is performed. "+ + "Do not lose or discard your current key shares until after verification "+ + "is complete or you will be unable to %s Vault. If you cancel the "+ + "rekey process or seal Vault before verification is complete the new "+ + "shares will be discarded and the current shares will remain valid.", warningText))) + c.UI.Output("") + c.UI.Warn(wrapAtLength( + "The current verification status, including initial nonce, is shown below.", + )) + c.UI.Output("") + + c.flagVerify = true + return c.status(client) + } + + return 0 +} diff --git a/command/operator_rekey_test.go b/command/operator_rekey_test.go new file mode 100644 index 0000000..570cfe4 --- /dev/null +++ b/command/operator_rekey_test.go @@ -0,0 +1,687 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !race + +package command + +import ( + "io" + "reflect" + "regexp" + "strings" + "testing" + + "github.com/hashicorp/vault/sdk/helper/roottoken" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" +) + +func testOperatorRekeyCommand(tb testing.TB) (*cli.MockUi, *OperatorRekeyCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &OperatorRekeyCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestOperatorRekeyCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "pgp_keys_multi", + []string{ + "-init", + "-pgp-keys", "keybase:hashicorp", + "-pgp-keys", "keybase:jefferai", + }, + "can only be specified once", + 1, + }, + { + "key_shares_pgp_less", + []string{ + "-init", + "-key-shares", "10", + "-pgp-keys", "keybase:jefferai,keybase:sethvargo", + }, + "incorrect number", + 2, + }, + { + "key_shares_pgp_more", + []string{ + "-init", + "-key-shares", "1", + "-pgp-keys", "keybase:jefferai,keybase:sethvargo", + }, + "incorrect number", + 2, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testOperatorRekeyCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("status", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testOperatorRekeyCommand(t) + cmd.client = client + + // Verify the non-init response + code := cmd.Run([]string{ + "-status", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d: %s", code, exp, ui.ErrorWriter.String()) + } + + expected := "Nonce" + combined := ui.OutputWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + // Now init to verify the init response + if _, err := client.Sys().RekeyInit(&api.RekeyInitRequest{ + SecretShares: 1, + SecretThreshold: 1, + }); err != nil { + t.Fatal(err) + } + + // Verify the init response + ui, cmd = testOperatorRekeyCommand(t) + cmd.client = client + code = cmd.Run([]string{ + "-status", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d: %s", code, exp, ui.ErrorWriter.String()) + } + + expected = "Progress" + combined = ui.OutputWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("cancel", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + // Initialize a rekey + if _, err := client.Sys().RekeyInit(&api.RekeyInitRequest{ + SecretShares: 1, + SecretThreshold: 1, + }); err != nil { + t.Fatal(err) + } + + ui, cmd := testOperatorRekeyCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-cancel", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! Canceled rekeying" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + status, err := client.Sys().GenerateRootStatus() + if err != nil { + t.Fatal(err) + } + + if status.Started { + t.Errorf("expected status to be canceled: %#v", status) + } + }) + + t.Run("init", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testOperatorRekeyCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-init", + "-key-shares", "1", + "-key-threshold", "1", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d: %s", code, exp, ui.ErrorWriter.String()) + } + + expected := "Nonce" + combined := ui.OutputWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + status, err := client.Sys().RekeyStatus() + if err != nil { + t.Fatal(err) + } + if !status.Started { + t.Errorf("expected status to be started: %#v", status) + } + }) + + t.Run("init_pgp", func(t *testing.T) { + t.Parallel() + + pgpKey := "keybase:hashicorp" + pgpFingerprints := []string{"c874011f0ab405110d02105534365d9472d7468f"} + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testOperatorRekeyCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-init", + "-key-shares", "1", + "-key-threshold", "1", + "-pgp-keys", pgpKey, + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d: %s", code, exp, ui.ErrorWriter.String()) + } + + expected := "Nonce" + combined := ui.OutputWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + status, err := client.Sys().RekeyStatus() + if err != nil { + t.Fatal(err) + } + if !status.Started { + t.Errorf("expected status to be started: %#v", status) + } + if !reflect.DeepEqual(status.PGPFingerprints, pgpFingerprints) { + t.Errorf("expected %#v to be %#v", status.PGPFingerprints, pgpFingerprints) + } + }) + + t.Run("provide_arg_recovery_keys", func(t *testing.T) { + t.Parallel() + + client, keys, closer := testVaultServerAutoUnseal(t) + defer closer() + + // Initialize a rekey + status, err := client.Sys().RekeyRecoveryKeyInit(&api.RekeyInitRequest{ + SecretShares: 1, + SecretThreshold: 1, + }) + if err != nil { + t.Fatal(err) + } + nonce := status.Nonce + + // Supply the first n-1 recovery keys + for _, key := range keys[:len(keys)-1] { + ui, cmd := testOperatorRekeyCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-nonce", nonce, + "-target", "recovery", + key, + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d: %s", code, exp, ui.ErrorWriter.String()) + } + } + + ui, cmd := testOperatorRekeyCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-nonce", nonce, + "-target", "recovery", + keys[len(keys)-1], // the last recovery key + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d: %s", code, exp, ui.ErrorWriter.String()) + } + + re := regexp.MustCompile(`Key 1: (.+)`) + output := ui.OutputWriter.String() + match := re.FindAllStringSubmatch(output, -1) + if len(match) < 1 || len(match[0]) < 2 { + t.Fatalf("bad match: %#v", match) + } + recoveryKey := match[0][1] + + if strings.Contains(strings.ToLower(output), "unseal key") { + t.Fatalf(`output %s shouldn't contain "unseal key"`, output) + } + + // verify that we can perform operations with the recovery key + // below we generate a root token using the recovery key + rootStatus, err := client.Sys().GenerateRootStatus() + if err != nil { + t.Fatal(err) + } + otp, err := roottoken.GenerateOTP(rootStatus.OTPLength) + if err != nil { + t.Fatal(err) + } + genRoot, err := client.Sys().GenerateRootInit(otp, "") + if err != nil { + t.Fatal(err) + } + r, err := client.Sys().GenerateRootUpdate(recoveryKey, genRoot.Nonce) + if err != nil { + t.Fatal(err) + } + if !r.Complete { + t.Fatal("expected root update to be complete") + } + }) + t.Run("provide_arg", func(t *testing.T) { + t.Parallel() + + client, keys, closer := testVaultServerUnseal(t) + defer closer() + + // Initialize a rekey + status, err := client.Sys().RekeyInit(&api.RekeyInitRequest{ + SecretShares: 1, + SecretThreshold: 1, + }) + if err != nil { + t.Fatal(err) + } + nonce := status.Nonce + + // Supply the first n-1 unseal keys + for _, key := range keys[:len(keys)-1] { + ui, cmd := testOperatorRekeyCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-nonce", nonce, + key, + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d: %s", code, exp, ui.ErrorWriter.String()) + } + } + + ui, cmd := testOperatorRekeyCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-nonce", nonce, + keys[len(keys)-1], // the last unseal key + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d: %s", code, exp, ui.ErrorWriter.String()) + } + + re := regexp.MustCompile(`Key 1: (.+)`) + output := ui.OutputWriter.String() + match := re.FindAllStringSubmatch(output, -1) + if len(match) < 1 || len(match[0]) < 2 { + t.Fatalf("bad match: %#v", match) + } + + // Grab the unseal key and try to unseal + unsealKey := match[0][1] + if err := client.Sys().Seal(); err != nil { + t.Fatal(err) + } + sealStatus, err := client.Sys().Unseal(unsealKey) + if err != nil { + t.Fatal(err) + } + if sealStatus.Sealed { + t.Errorf("expected vault to be unsealed: %#v", sealStatus) + } + }) + + t.Run("provide_stdin", func(t *testing.T) { + t.Parallel() + + client, keys, closer := testVaultServerUnseal(t) + defer closer() + + // Initialize a rekey + status, err := client.Sys().RekeyInit(&api.RekeyInitRequest{ + SecretShares: 1, + SecretThreshold: 1, + }) + if err != nil { + t.Fatal(err) + } + nonce := status.Nonce + + // Supply the first n-1 unseal keys + for _, key := range keys[:len(keys)-1] { + stdinR, stdinW := io.Pipe() + go func() { + stdinW.Write([]byte(key)) + stdinW.Close() + }() + + ui, cmd := testOperatorRekeyCommand(t) + cmd.client = client + cmd.testStdin = stdinR + + code := cmd.Run([]string{ + "-nonce", nonce, + "-", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d: %s", code, exp, ui.ErrorWriter.String()) + } + } + + stdinR, stdinW := io.Pipe() + go func() { + stdinW.Write([]byte(keys[len(keys)-1])) // the last unseal key + stdinW.Close() + }() + + ui, cmd := testOperatorRekeyCommand(t) + cmd.client = client + cmd.testStdin = stdinR + + code := cmd.Run([]string{ + "-nonce", nonce, + "-", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + re := regexp.MustCompile(`Key 1: (.+)`) + output := ui.OutputWriter.String() + match := re.FindAllStringSubmatch(output, -1) + if len(match) < 1 || len(match[0]) < 2 { + t.Fatalf("bad match: %#v", match) + } + + // Grab the unseal key and try to unseal + unsealKey := match[0][1] + if err := client.Sys().Seal(); err != nil { + t.Fatal(err) + } + sealStatus, err := client.Sys().Unseal(unsealKey) + if err != nil { + t.Fatal(err) + } + if sealStatus.Sealed { + t.Errorf("expected vault to be unsealed: %#v", sealStatus) + } + }) + + t.Run("provide_stdin_recovery_keys", func(t *testing.T) { + t.Parallel() + + client, keys, closer := testVaultServerAutoUnseal(t) + defer closer() + + // Initialize a rekey + status, err := client.Sys().RekeyRecoveryKeyInit(&api.RekeyInitRequest{ + SecretShares: 1, + SecretThreshold: 1, + }) + if err != nil { + t.Fatal(err) + } + nonce := status.Nonce + for _, key := range keys[:len(keys)-1] { + stdinR, stdinW := io.Pipe() + go func() { + _, _ = stdinW.Write([]byte(key)) + _ = stdinW.Close() + }() + + ui, cmd := testOperatorRekeyCommand(t) + cmd.client = client + cmd.testStdin = stdinR + + code := cmd.Run([]string{ + "-target", "recovery", + "-nonce", nonce, + "-", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d: %s", code, exp, ui.ErrorWriter.String()) + } + } + + stdinR, stdinW := io.Pipe() + go func() { + _, _ = stdinW.Write([]byte(keys[len(keys)-1])) // the last recovery key + _ = stdinW.Close() + }() + + ui, cmd := testOperatorRekeyCommand(t) + cmd.client = client + cmd.testStdin = stdinR + + code := cmd.Run([]string{ + "-nonce", nonce, + "-target", "recovery", + "-", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d: %s", code, exp, ui.ErrorWriter.String()) + } + + re := regexp.MustCompile(`Key 1: (.+)`) + output := ui.OutputWriter.String() + match := re.FindAllStringSubmatch(output, -1) + if len(match) < 1 || len(match[0]) < 2 { + t.Fatalf("bad match: %#v", match) + } + recoveryKey := match[0][1] + + if strings.Contains(strings.ToLower(output), "unseal key") { + t.Fatalf(`output %s shouldn't contain "unseal key"`, output) + } + // verify that we can perform operations with the recovery key + // below we generate a root token using the recovery key + rootStatus, err := client.Sys().GenerateRootStatus() + if err != nil { + t.Fatal(err) + } + otp, err := roottoken.GenerateOTP(rootStatus.OTPLength) + if err != nil { + t.Fatal(err) + } + genRoot, err := client.Sys().GenerateRootInit(otp, "") + if err != nil { + t.Fatal(err) + } + r, err := client.Sys().GenerateRootUpdate(recoveryKey, genRoot.Nonce) + if err != nil { + t.Fatal(err) + } + if !r.Complete { + t.Fatal("expected root update to be complete") + } + }) + t.Run("backup", func(t *testing.T) { + t.Parallel() + + pgpKey := "keybase:hashicorp" + // pgpFingerprints := []string{"c874011f0ab405110d02105534365d9472d7468f"} + + client, keys, closer := testVaultServerUnseal(t) + defer closer() + + ui, cmd := testOperatorRekeyCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-init", + "-key-shares", "1", + "-key-threshold", "1", + "-pgp-keys", pgpKey, + "-backup", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d: %s", code, exp, ui.ErrorWriter.String()) + } + + // Get the status for the nonce + status, err := client.Sys().RekeyStatus() + if err != nil { + t.Fatal(err) + } + nonce := status.Nonce + + var combined string + // Supply the unseal keys + for _, key := range keys { + ui, cmd := testOperatorRekeyCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-nonce", nonce, + key, + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d: %s", code, exp, ui.ErrorWriter.String()) + } + + // Append to our output string + combined += ui.OutputWriter.String() + } + + re := regexp.MustCompile(`Key 1 fingerprint: (.+); value: (.+)`) + match := re.FindAllStringSubmatch(combined, -1) + if len(match) < 1 || len(match[0]) < 3 { + t.Fatalf("bad match: %#v", match) + } + + // Grab the output fingerprint and encrypted key + fingerprint, encryptedKey := match[0][1], match[0][2] + + // Get the backup + ui, cmd = testOperatorRekeyCommand(t) + cmd.client = client + + code = cmd.Run([]string{ + "-backup-retrieve", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d: %s", code, exp, ui.ErrorWriter.String()) + } + + output := ui.OutputWriter.String() + if !strings.Contains(output, fingerprint) { + t.Errorf("expected %q to contain %q", output, fingerprint) + } + if !strings.Contains(output, encryptedKey) { + t.Errorf("expected %q to contain %q", output, encryptedKey) + } + + // Delete the backup + ui, cmd = testOperatorRekeyCommand(t) + cmd.client = client + + code = cmd.Run([]string{ + "-backup-delete", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d: %s", code, exp, ui.ErrorWriter.String()) + } + + secret, err := client.Sys().RekeyRetrieveBackup() + if err == nil { + t.Errorf("expected error: %#v", secret) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testOperatorRekeyCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "secret/foo", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error getting rekey status: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testOperatorRekeyCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/operator_seal.go b/command/operator_seal.go new file mode 100644 index 0000000..f52665f --- /dev/null +++ b/command/operator_seal.go @@ -0,0 +1,89 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*OperatorSealCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorSealCommand)(nil) +) + +type OperatorSealCommand struct { + *BaseCommand +} + +func (c *OperatorSealCommand) Synopsis() string { + return "Seals the Vault server" +} + +func (c *OperatorSealCommand) Help() string { + helpText := ` +Usage: vault operator seal [options] + + Seals the Vault server. Sealing tells the Vault server to stop responding + to any operations until it is unsealed. When sealed, the Vault server + discards its in-memory root key to unlock the data, so it is physically + blocked from responding to operations unsealed. + + If an unseal is in progress, sealing the Vault will reset the unsealing + process. Users will have to re-enter their portions of the root key again. + + This command does nothing if the Vault server is already sealed. + + Seal the Vault server: + + $ vault operator seal + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *OperatorSealCommand) Flags() *FlagSets { + return c.flagSet(FlagSetHTTP) +} + +func (c *OperatorSealCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *OperatorSealCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *OperatorSealCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + if len(args) > 0 { + c.UI.Error(fmt.Sprintf("Too many arguments (expected 0, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + if err := client.Sys().Seal(); err != nil { + c.UI.Error(fmt.Sprintf("Error sealing: %s", err)) + return 2 + } + + c.UI.Output("Success! Vault is sealed.") + return 0 +} diff --git a/command/operator_seal_test.go b/command/operator_seal_test.go new file mode 100644 index 0000000..43e150f --- /dev/null +++ b/command/operator_seal_test.go @@ -0,0 +1,125 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "testing" + + "github.com/mitchellh/cli" +) + +func testOperatorSealCommand(tb testing.TB) (*cli.MockUi, *OperatorSealCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &OperatorSealCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestOperatorSealCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "args", + []string{"foo"}, + "Too many arguments", + 1, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testOperatorSealCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("integration", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testOperatorSealCommand(t) + cmd.client = client + + code := cmd.Run([]string{}) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! Vault is sealed." + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + sealStatus, err := client.Sys().SealStatus() + if err != nil { + t.Fatal(err) + } + if !sealStatus.Sealed { + t.Errorf("expected to be sealed") + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testOperatorSealCommand(t) + cmd.client = client + + code := cmd.Run([]string{}) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error sealing: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testOperatorSealCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/operator_step_down.go b/command/operator_step_down.go new file mode 100644 index 0000000..bfa2d89 --- /dev/null +++ b/command/operator_step_down.go @@ -0,0 +1,85 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*OperatorStepDownCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorStepDownCommand)(nil) +) + +type OperatorStepDownCommand struct { + *BaseCommand +} + +func (c *OperatorStepDownCommand) Synopsis() string { + return "Forces Vault to resign active duty" +} + +func (c *OperatorStepDownCommand) Help() string { + helpText := ` +Usage: vault operator step-down [options] + + Forces the Vault server at the given address to step down from active duty. + While the affected node will have a delay before attempting to acquire the + leader lock again, if no other Vault nodes acquire the lock beforehand, it + is possible for the same node to re-acquire the lock and become active + again. + + Force Vault to step down as the leader: + + $ vault operator step-down + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *OperatorStepDownCommand) Flags() *FlagSets { + return c.flagSet(FlagSetHTTP) +} + +func (c *OperatorStepDownCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *OperatorStepDownCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *OperatorStepDownCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + if len(args) > 0 { + c.UI.Error(fmt.Sprintf("Too many arguments (expected 0, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + if err := client.Sys().StepDown(); err != nil { + c.UI.Error(fmt.Sprintf("Error stepping down: %s", err)) + return 2 + } + + c.UI.Output(fmt.Sprintf("Success! Stepped down: %s", client.Address())) + return 0 +} diff --git a/command/operator_step_down_test.go b/command/operator_step_down_test.go new file mode 100644 index 0000000..fbe0779 --- /dev/null +++ b/command/operator_step_down_test.go @@ -0,0 +1,102 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "testing" + + "github.com/mitchellh/cli" +) + +func testOperatorStepDownCommand(tb testing.TB) (*cli.MockUi, *OperatorStepDownCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &OperatorStepDownCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestOperatorStepDownCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "too_many_args", + []string{"foo"}, + "Too many arguments", + 1, + }, + { + "default", + nil, + "Success! Stepped down: ", + 0, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testOperatorStepDownCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testOperatorStepDownCommand(t) + cmd.client = client + + code := cmd.Run([]string{}) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error stepping down: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testOperatorStepDownCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/operator_unseal.go b/command/operator_unseal.go new file mode 100644 index 0000000..32d9140 --- /dev/null +++ b/command/operator_unseal.go @@ -0,0 +1,165 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/hashicorp/go-secure-stdlib/password" + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*OperatorUnsealCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorUnsealCommand)(nil) +) + +type OperatorUnsealCommand struct { + *BaseCommand + + flagReset bool + flagMigrate bool + + testOutput io.Writer // for tests +} + +func (c *OperatorUnsealCommand) Synopsis() string { + return "Unseals the Vault server" +} + +func (c *OperatorUnsealCommand) Help() string { + helpText := ` +Usage: vault operator unseal [options] [KEY] + + Provide a portion of the root key to unseal a Vault server. Vault starts + in a sealed state. It cannot perform operations until it is unsealed. This + command accepts a portion of the root key (an "unseal key"). + + The unseal key can be supplied as an argument to the command, but this is + not recommended as the unseal key will be available in your history: + + $ vault operator unseal IXyR0OJnSFobekZMMCKCoVEpT7wI6l+USMzE3IcyDyo= + + Instead, run the command with no arguments and it will prompt for the key: + + $ vault operator unseal + Key (will be hidden): IXyR0OJnSFobekZMMCKCoVEpT7wI6l+USMzE3IcyDyo= + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *OperatorUnsealCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + f := set.NewFlagSet("Command Options") + + f.BoolVar(&BoolVar{ + Name: "reset", + Aliases: []string{}, + Target: &c.flagReset, + Default: false, + EnvVar: "", + Completion: complete.PredictNothing, + Usage: "Discard any previously entered keys to the unseal process.", + }) + + f.BoolVar(&BoolVar{ + Name: "migrate", + Aliases: []string{}, + Target: &c.flagMigrate, + Default: false, + EnvVar: "", + Completion: complete.PredictNothing, + Usage: "Indicate that this share is provided with the intent that it is part of a seal migration process.", + }) + + return set +} + +func (c *OperatorUnsealCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictAnything +} + +func (c *OperatorUnsealCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *OperatorUnsealCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + unsealKey := "" + + args = f.Args() + switch len(args) { + case 0: + // We will prompt for the unseal key later + case 1: + unsealKey = strings.TrimSpace(args[0]) + default: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + if c.flagReset { + status, err := client.Sys().ResetUnsealProcess() + if err != nil { + c.UI.Error(fmt.Sprintf("Error resetting unseal process: %s", err)) + return 2 + } + return OutputSealStatus(c.UI, client, status) + } + + if unsealKey == "" { + // Override the output + writer := (io.Writer)(os.Stdout) + if c.testOutput != nil { + writer = c.testOutput + } + + fmt.Fprintf(writer, "Unseal Key (will be hidden): ") + value, err := password.Read(os.Stdin) + fmt.Fprintf(writer, "\n") + if err != nil { + c.UI.Error(wrapAtLength(fmt.Sprintf("An error occurred attempting to "+ + "ask for an unseal key. The raw error message is shown below, but "+ + "usually this is because you attempted to pipe a value into the "+ + "unseal command or you are executing outside of a terminal (tty). "+ + "You should run the unseal command from a terminal for maximum "+ + "security. If this is not an option, the unseal key can be provided "+ + "as the first argument to the unseal command. The raw error "+ + "was:\n\n%s", err))) + return 1 + } + unsealKey = strings.TrimSpace(value) + } + + status, err := client.Sys().UnsealWithOptions(&api.UnsealOpts{ + Key: unsealKey, + Migrate: c.flagMigrate, + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error unsealing: %s", err)) + return 2 + } + + return OutputSealStatus(c.UI, client, status) +} diff --git a/command/operator_unseal_test.go b/command/operator_unseal_test.go new file mode 100644 index 0000000..cb4d196 --- /dev/null +++ b/command/operator_unseal_test.go @@ -0,0 +1,189 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "os" + "strings" + "testing" + + "github.com/mitchellh/cli" +) + +func testOperatorUnsealCommand(tb testing.TB) (*cli.MockUi, *OperatorUnsealCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &OperatorUnsealCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestOperatorUnsealCommand_Run(t *testing.T) { + t.Parallel() + + t.Run("error_non_terminal", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testOperatorUnsealCommand(t) + cmd.client = client + cmd.testOutput = ioutil.Discard + + code := cmd.Run(nil) + if exp := 1; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "is not a terminal" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("reset", func(t *testing.T) { + t.Parallel() + + client, keys, closer := testVaultServerUnseal(t) + defer closer() + + // Seal so we can unseal + if err := client.Sys().Seal(); err != nil { + t.Fatal(err) + } + + // Enter an unseal key + if _, err := client.Sys().Unseal(keys[0]); err != nil { + t.Fatal(err) + } + + ui, cmd := testOperatorUnsealCommand(t) + cmd.client = client + cmd.testOutput = ioutil.Discard + + // Reset and check output + code := cmd.Run([]string{ + "-reset", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + expected := "0/3" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("full", func(t *testing.T) { + t.Parallel() + + client, keys, closer := testVaultServerUnseal(t) + defer closer() + + // Seal so we can unseal + if err := client.Sys().Seal(); err != nil { + t.Fatal(err) + } + + for _, key := range keys { + ui, cmd := testOperatorUnsealCommand(t) + cmd.client = client + cmd.testOutput = ioutil.Discard + + // Reset and check output + code := cmd.Run([]string{ + key, + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d: %s", code, exp, ui.ErrorWriter.String()) + } + } + + status, err := client.Sys().SealStatus() + if err != nil { + t.Fatal(err) + } + if status.Sealed { + t.Error("expected unsealed") + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testOperatorUnsealCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "abcd", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error unsealing: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testOperatorUnsealCommand(t) + assertNoTabs(t, cmd) + }) +} + +func TestOperatorUnsealCommand_Format(t *testing.T) { + defer func() { + os.Setenv(EnvVaultCLINoColor, "") + }() + + client, keys, closer := testVaultServerUnseal(t) + defer closer() + + // Seal so we can unseal + if err := client.Sys().Seal(); err != nil { + t.Fatal(err) + } + + stdout := bytes.NewBuffer(nil) + stderr := bytes.NewBuffer(nil) + runOpts := &RunOptions{ + Stdout: stdout, + Stderr: stderr, + Client: client, + } + + args, format, _, _, _ := setupEnv([]string{"operator", "unseal", "-format", "json"}) + if format != "json" { + t.Fatalf("expected %q, got %q", "json", format) + } + + // Unseal with one key + code := RunCustom(append(args, []string{ + keys[0], + }...), runOpts) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d: %s", code, exp, stderr.String()) + } + + if !json.Valid(stdout.Bytes()) { + t.Error("expected output to be valid JSON") + } +} diff --git a/command/operator_usage.go b/command/operator_usage.go new file mode 100644 index 0000000..8db538a --- /dev/null +++ b/command/operator_usage.go @@ -0,0 +1,327 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "encoding/json" + "errors" + "fmt" + "sort" + "strings" + "time" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" + "github.com/posener/complete" + "github.com/ryanuber/columnize" +) + +var ( + _ cli.Command = (*OperatorUsageCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorUsageCommand)(nil) +) + +type OperatorUsageCommand struct { + *BaseCommand + flagStartTime time.Time + flagEndTime time.Time +} + +func (c *OperatorUsageCommand) Synopsis() string { + return "Lists historical client counts" +} + +func (c *OperatorUsageCommand) Help() string { + helpText := ` +Usage: vault operator usage + + List the client counts for the default reporting period. + + $ vault operator usage + + List the client counts for a specific time period. + + $ vault operator usage -start-time=2020-10 -end-time=2020-11 + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *OperatorUsageCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + f := set.NewFlagSet("Command Options") + + f.TimeVar(&TimeVar{ + Name: "start-time", + Usage: "Start of report period. Defaults to 'default_reporting_period' before end time.", + Target: &c.flagStartTime, + Completion: complete.PredictNothing, + Default: time.Time{}, + Formats: TimeVar_TimeOrDay | TimeVar_Month, + }) + f.TimeVar(&TimeVar{ + Name: "end-time", + Usage: "End of report period. Defaults to end of last month.", + Target: &c.flagEndTime, + Completion: complete.PredictNothing, + Default: time.Time{}, + Formats: TimeVar_TimeOrDay | TimeVar_Month, + }) + + return set +} + +func (c *OperatorUsageCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictAnything +} + +func (c *OperatorUsageCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *OperatorUsageCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + data := make(map[string][]string) + if !c.flagStartTime.IsZero() { + data["start_time"] = []string{c.flagStartTime.Format(time.RFC3339)} + } + if !c.flagEndTime.IsZero() { + data["end_time"] = []string{c.flagEndTime.Format(time.RFC3339)} + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + resp, err := client.Logical().ReadWithData("sys/internal/counters/activity", data) + if err != nil { + c.UI.Error(fmt.Sprintf("Error retrieving client counts: %v", err)) + return 2 + } + + if resp == nil || resp.Data == nil { + if c.noReportAvailable(client) { + c.UI.Warn("Vault does not have any usage data available. A report will be available\n" + + "after the first calendar month in which monitoring is enabled.") + } else { + c.UI.Warn("No data is available for the given time range.") + } + // No further output + return 0 + } + + switch Format(c.UI) { + case "table": + default: + // Handle JSON, YAML, etc. + return OutputData(c.UI, resp) + } + + // Show this before the headers + c.outputTimestamps(resp.Data) + + out := []string{ + "Namespace path | Distinct entities | Non-Entity tokens | Active clients", + } + + out = append(out, c.namespacesOutput(resp.Data)...) + out = append(out, c.totalOutput(resp.Data)...) + + colConfig := columnize.DefaultConfig() + colConfig.Empty = " " // Do not show n/a on intentional blank lines + colConfig.Glue = " " + c.UI.Output(tableOutput(out, colConfig)) + return 0 +} + +// noReportAvailable checks whether we can definitively say that no +// queries can be answered; if there's an error, just fall back to +// reporting that the response is empty. +func (c *OperatorUsageCommand) noReportAvailable(client *api.Client) bool { + if c.flagOutputCurlString || c.flagOutputPolicy { + // Don't mess up the original query string + return false + } + + resp, err := client.Logical().Read("sys/internal/counters/config") + if err != nil || resp == nil || resp.Data == nil { + c.UI.Warn("bad response from config") + return false + } + + qaRaw, ok := resp.Data["queries_available"] + if !ok { + c.UI.Warn("no queries_available key") + return false + } + + qa, ok := qaRaw.(bool) + if !ok { + c.UI.Warn("wrong type") + return false + } + + return !qa +} + +func (c *OperatorUsageCommand) outputTimestamps(data map[string]interface{}) { + c.UI.Output(fmt.Sprintf("Period start: %v\nPeriod end: %v\n", + data["start_time"].(string), + data["end_time"].(string))) +} + +type UsageCommandNamespace struct { + formattedLine string + sortOrder string + + // Sort order: + // -- root first + // -- namespaces in lexicographic order + // -- deleted namespace "xxxxx" last +} + +type UsageResponse struct { + namespacePath string + entityCount int64 + // As per 1.9, the tokenCount field will contain the distinct non-entity + // token clients instead of each individual token. + tokenCount int64 + + clientCount int64 +} + +func jsonNumberOK(m map[string]interface{}, key string) (int64, bool) { + val, ok := m[key].(json.Number) + if !ok { + return 0, false + } + intVal, err := val.Int64() + if err != nil { + return 0, false + } + return intVal, true +} + +// TODO: provide a function in the API module for doing this conversion? +func (c *OperatorUsageCommand) parseNamespaceCount(rawVal interface{}) (UsageResponse, error) { + var ret UsageResponse + + val, ok := rawVal.(map[string]interface{}) + if !ok { + return ret, errors.New("value is not a map") + } + + ret.namespacePath, ok = val["namespace_path"].(string) + if !ok { + return ret, errors.New("bad namespace path") + } + + counts, ok := val["counts"].(map[string]interface{}) + if !ok { + return ret, errors.New("missing counts") + } + + ret.entityCount, ok = jsonNumberOK(counts, "distinct_entities") + if !ok { + return ret, errors.New("missing distinct_entities") + } + + ret.tokenCount, ok = jsonNumberOK(counts, "non_entity_tokens") + if !ok { + return ret, errors.New("missing non_entity_tokens") + } + + ret.clientCount, ok = jsonNumberOK(counts, "clients") + if !ok { + return ret, errors.New("missing clients") + } + + return ret, nil +} + +func (c *OperatorUsageCommand) namespacesOutput(data map[string]interface{}) []string { + byNs, ok := data["by_namespace"].([]interface{}) + if !ok { + c.UI.Error("missing namespace breakdown in response") + return nil + } + + nsOut := make([]UsageCommandNamespace, 0, len(byNs)) + + for _, rawVal := range byNs { + val, err := c.parseNamespaceCount(rawVal) + if err != nil { + c.UI.Error(fmt.Sprintf("malformed namespace in response: %v", err)) + continue + } + + sortOrder := "1" + val.namespacePath + if val.namespacePath == "" { + val.namespacePath = "[root]" + sortOrder = "0" + } else if strings.HasPrefix(val.namespacePath, "deleted namespace") { + sortOrder = "2" + val.namespacePath + } + + formattedLine := fmt.Sprintf("%s | %d | %d | %d", + val.namespacePath, val.entityCount, val.tokenCount, val.clientCount) + nsOut = append(nsOut, UsageCommandNamespace{ + formattedLine: formattedLine, + sortOrder: sortOrder, + }) + } + + sort.Slice(nsOut, func(i, j int) bool { + return nsOut[i].sortOrder < nsOut[j].sortOrder + }) + + out := make([]string, len(nsOut)) + for i := range nsOut { + out[i] = nsOut[i].formattedLine + } + + return out +} + +func (c *OperatorUsageCommand) totalOutput(data map[string]interface{}) []string { + // blank line separating it from namespaces + out := []string{" | | | "} + + total, ok := data["total"].(map[string]interface{}) + if !ok { + c.UI.Error("missing total in response") + return out + } + + entityCount, ok := jsonNumberOK(total, "distinct_entities") + if !ok { + c.UI.Error("missing distinct_entities in total") + return out + } + + tokenCount, ok := jsonNumberOK(total, "non_entity_tokens") + if !ok { + c.UI.Error("missing non_entity_tokens in total") + return out + } + clientCount, ok := jsonNumberOK(total, "clients") + if !ok { + c.UI.Error("missing clients in total") + return out + } + + out = append(out, fmt.Sprintf("Total | %d | %d | %d", + entityCount, tokenCount, clientCount)) + return out +} diff --git a/command/patch.go b/command/patch.go new file mode 100644 index 0000000..9a4cd58 --- /dev/null +++ b/command/patch.go @@ -0,0 +1,138 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "context" + "fmt" + "io" + "os" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*PatchCommand)(nil) + _ cli.CommandAutocomplete = (*PatchCommand)(nil) +) + +// PatchCommand is a Command that puts data into the Vault. +type PatchCommand struct { + *BaseCommand + + flagForce bool + + testStdin io.Reader // for tests +} + +func (c *PatchCommand) Synopsis() string { + return "Patch data, configuration, and secrets" +} + +func (c *PatchCommand) Help() string { + helpText := ` +Usage: vault patch [options] PATH [DATA K=V...] + + Patches data in Vault at the given path. The data can be credentials, secrets, + configuration, or arbitrary data. The specific behavior of this command is + determined at the thing mounted at the path. + + Data is specified as "key=value" pairs. If the value begins with an "@", then + it is loaded from a file. If the value is "-", Vault will read the value from + stdin. + + Unlike write, patch will only modify specified fields. + + Persist data in the generic secrets engine without modifying any other fields: + + $ vault patch pki/roles/example allow_localhost=false + + The data can also be consumed from a file on disk by prefixing with the "@" + symbol. For example: + + $ vault patch pki/roles/example @role.json + + Or it can be read from stdin using the "-" symbol: + + $ echo "example.com" | vault patch pki/roles/example allowed_domains=- + + For a full list of examples and paths, please see the documentation that + corresponds to the secret engines in use. + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *PatchCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputField | FlagSetOutputFormat) + f := set.NewFlagSet("Command Options") + + f.BoolVar(&BoolVar{ + Name: "force", + Aliases: []string{"f"}, + Target: &c.flagForce, + Default: false, + EnvVar: "", + Completion: complete.PredictNothing, + Usage: "Allow the operation to continue with no key=value pairs. This " + + "allows writing to keys that do not need or expect data.", + }) + + return set +} + +func (c *PatchCommand) AutocompleteArgs() complete.Predictor { + // Return an anything predictor here. Without a way to access help + // information, we don't know what paths we could patch. + return complete.PredictAnything +} + +func (c *PatchCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *PatchCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) == 1 && !c.flagForce: + c.UI.Error("Must supply data or use -force") + return 1 + } + + // Pull our fake stdin if needed + stdin := (io.Reader)(os.Stdin) + if c.testStdin != nil { + stdin = c.testStdin + } + + path := sanitizePath(args[0]) + + data, err := parseArgsData(stdin, args[1:]) + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to parse K=V data: %s", err)) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + secret, err := client.Logical().JSONMergePatch(context.Background(), path, data) + return handleWriteSecretOutput(c.BaseCommand, path, secret, err) +} diff --git a/command/patch_test.go b/command/patch_test.go new file mode 100644 index 0000000..410e644 --- /dev/null +++ b/command/patch_test.go @@ -0,0 +1,205 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "io" + "strings" + "testing" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" +) + +func testPatchCommand(tb testing.TB) (*cli.MockUi, *PatchCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &PatchCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestPatchCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "not_enough_args", + []string{}, + "Not enough arguments", + 1, + }, + { + "empty_kvs", + []string{"secret/write/foo"}, + "Must supply data or use -force", + 1, + }, + { + "force_kvs", + []string{"-force", "pki/roles/example"}, + "allow_localhost", + 0, + }, + { + "force_f_kvs", + []string{"-f", "pki/roles/example"}, + "allow_localhost", + 0, + }, + { + "kvs_no_value", + []string{"pki/roles/example", "foo"}, + "Failed to parse K=V data", + 1, + }, + { + "single_value", + []string{"pki/roles/example", "allow_localhost=true"}, + "allow_localhost", + 0, + }, + { + "multi_value", + []string{"pki/roles/example", "allow_localhost=true", "allowed_domains=true"}, + "allow_localhost", + 0, + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + if err := client.Sys().Mount("pki", &api.MountInput{ + Type: "pki", + }); err != nil { + t.Fatalf("pki mount error: %#v", err) + } + + if _, err := client.Logical().Write("pki/roles/example", nil); err != nil { + t.Fatalf("failed to prime role: %v", err) + } + + if _, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ + "key_type": "ec", + "common_name": "Root X1", + }); err != nil { + t.Fatalf("failed to prime CA: %v", err) + } + + ui, cmd := testPatchCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + + t.Run("stdin_full", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + if err := client.Sys().Mount("pki", &api.MountInput{ + Type: "pki", + }); err != nil { + t.Fatalf("pki mount error: %#v", err) + } + + if _, err := client.Logical().Write("pki/roles/example", nil); err != nil { + t.Fatalf("failed to prime role: %v", err) + } + + if _, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ + "key_type": "ec", + "common_name": "Root X1", + }); err != nil { + t.Fatalf("failed to prime CA: %v", err) + } + + stdinR, stdinW := io.Pipe() + go func() { + stdinW.Write([]byte(`{"allow_localhost":"false","allow_wildcard_certificates":"false"}`)) + stdinW.Close() + }() + + ui, cmd := testPatchCommand(t) + cmd.client = client + cmd.testStdin = stdinR + + code := cmd.Run([]string{ + "pki/roles/example", "-", + }) + if code != 0 { + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + t.Fatalf("expected retcode=%d to be 0\nOutput:\n%v", code, combined) + } + + secret, err := client.Logical().Read("pki/roles/example") + if err != nil { + t.Fatal(err) + } + if secret == nil || secret.Data == nil { + t.Fatal("expected secret to have data") + } + if exp, act := false, secret.Data["allow_localhost"].(bool); exp != act { + t.Errorf("expected allowed_localhost=%v to be %v", act, exp) + } + if exp, act := false, secret.Data["allow_wildcard_certificates"].(bool); exp != act { + t.Errorf("expected allow_wildcard_certificates=%v to be %v", act, exp) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testPatchCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "foo/bar", "a=b", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error writing data to foo/bar: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testPatchCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/path_help.go b/command/path_help.go new file mode 100644 index 0000000..41f3bce --- /dev/null +++ b/command/path_help.go @@ -0,0 +1,121 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*PathHelpCommand)(nil) + _ cli.CommandAutocomplete = (*PathHelpCommand)(nil) +) + +var pathHelpVaultSealedMessage = strings.TrimSpace(` +Error: Vault is sealed. + +The "path-help" command requires the Vault to be unsealed so that the mount +points of the secret engines are known. +`) + +type PathHelpCommand struct { + *BaseCommand +} + +func (c *PathHelpCommand) Synopsis() string { + return "Retrieve API help for paths" +} + +func (c *PathHelpCommand) Help() string { + helpText := ` +Usage: vault path-help [options] PATH + + Retrieves API help for paths. All endpoints in Vault provide built-in help + in markdown format. This includes system paths, secret engines, and auth + methods. + + Get help for the thing mounted at database/: + + $ vault path-help database/ + + The response object will return additional paths to retrieve help: + + $ vault path-help database/roles/ + + Each secret engine produces different help output. + + If -format is specified as JSON, the output will be in OpenAPI format. + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *PathHelpCommand) Flags() *FlagSets { + return c.flagSet(FlagSetHTTP | FlagSetOutputFormat) +} + +func (c *PathHelpCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictAnything // TODO: programatic way to invoke help +} + +func (c *PathHelpCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *PathHelpCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + path := sanitizePath(args[0]) + + help, err := client.Help(path) + if err != nil { + if strings.Contains(err.Error(), "Vault is sealed") { + c.UI.Error(pathHelpVaultSealedMessage) + } else { + c.UI.Error(fmt.Sprintf("Error retrieving help: %s", err)) + } + return 2 + } + + switch c.flagFormat { + case "json": + b, err := json.Marshal(help.OpenAPI) + if err != nil { + c.UI.Error(fmt.Sprintf("Error marshaling OpenAPI: %s", err)) + return 2 + } + c.UI.Output(string(b)) + default: + c.UI.Output(help.Help) + } + + return 0 +} diff --git a/command/path_help_test.go b/command/path_help_test.go new file mode 100644 index 0000000..eaf4fe8 --- /dev/null +++ b/command/path_help_test.go @@ -0,0 +1,118 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "testing" + + "github.com/mitchellh/cli" +) + +func testPathHelpCommand(tb testing.TB) (*cli.MockUi, *PathHelpCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &PathHelpCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestPathHelpCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "not_enough_args", + []string{}, + "Not enough arguments", + 1, + }, + { + "too_many_args", + []string{"foo", "bar"}, + "Too many arguments", + 1, + }, + { + "not_found", + []string{"nope/not/once/never"}, + "", + 2, + }, + { + "kv", + []string{"secret/"}, + "The kv backend", + 0, + }, + { + "sys", + []string{"sys/mounts"}, + "currently mounted backends", + 0, + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testPathHelpCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testPathHelpCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "sys/mounts", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error retrieving help: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testPathHelpCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/pgp_test.go b/command/pgp_test.go new file mode 100644 index 0000000..f37e488 --- /dev/null +++ b/command/pgp_test.go @@ -0,0 +1,213 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bytes" + "encoding/base64" + "encoding/hex" + "io/ioutil" + "reflect" + "regexp" + "sort" + "testing" + + "github.com/hashicorp/vault/helper/pgpkeys" + "github.com/hashicorp/vault/vault" + + "github.com/ProtonMail/go-crypto/openpgp" + "github.com/ProtonMail/go-crypto/openpgp/packet" +) + +func getPubKeyFiles(t *testing.T) (string, []string, error) { + tempDir, err := ioutil.TempDir("", "vault-test") + if err != nil { + t.Fatalf("Error creating temporary directory: %s", err) + } + + pubFiles := []string{ + tempDir + "/pubkey1", + tempDir + "/pubkey2", + tempDir + "/pubkey3", + tempDir + "/aapubkey1", + } + decoder := base64.StdEncoding + pub1Bytes, err := decoder.DecodeString(pgpkeys.TestPubKey1) + if err != nil { + t.Fatalf("Error decoding bytes for public key 1: %s", err) + } + err = ioutil.WriteFile(pubFiles[0], pub1Bytes, 0o755) + if err != nil { + t.Fatalf("Error writing pub key 1 to temp file: %s", err) + } + pub2Bytes, err := decoder.DecodeString(pgpkeys.TestPubKey2) + if err != nil { + t.Fatalf("Error decoding bytes for public key 2: %s", err) + } + err = ioutil.WriteFile(pubFiles[1], pub2Bytes, 0o755) + if err != nil { + t.Fatalf("Error writing pub key 2 to temp file: %s", err) + } + pub3Bytes, err := decoder.DecodeString(pgpkeys.TestPubKey3) + if err != nil { + t.Fatalf("Error decoding bytes for public key 3: %s", err) + } + err = ioutil.WriteFile(pubFiles[2], pub3Bytes, 0o755) + if err != nil { + t.Fatalf("Error writing pub key 3 to temp file: %s", err) + } + err = ioutil.WriteFile(pubFiles[3], []byte(pgpkeys.TestAAPubKey1), 0o755) + if err != nil { + t.Fatalf("Error writing aa pub key 1 to temp file: %s", err) + } + + return tempDir, pubFiles, nil +} + +func testPGPDecrypt(tb testing.TB, privKey, enc string) string { + tb.Helper() + + privKeyBytes, err := base64.StdEncoding.DecodeString(privKey) + if err != nil { + tb.Fatal(err) + } + + ptBuf := bytes.NewBuffer(nil) + entity, err := openpgp.ReadEntity(packet.NewReader(bytes.NewBuffer(privKeyBytes))) + if err != nil { + tb.Fatal(err) + } + + var rootBytes []byte + rootBytes, err = base64.StdEncoding.DecodeString(enc) + if err != nil { + tb.Fatal(err) + } + + entityList := &openpgp.EntityList{entity} + md, err := openpgp.ReadMessage(bytes.NewBuffer(rootBytes), entityList, nil, nil) + if err != nil { + tb.Fatal(err) + } + ptBuf.ReadFrom(md.UnverifiedBody) + return ptBuf.String() +} + +func parseDecryptAndTestUnsealKeys(t *testing.T, + input, rootToken string, + fingerprints bool, + backupKeys map[string][]string, + backupKeysB64 map[string][]string, + core *vault.Core, +) { + decoder := base64.StdEncoding + priv1Bytes, err := decoder.DecodeString(pgpkeys.TestPrivKey1) + if err != nil { + t.Fatalf("Error decoding bytes for private key 1: %s", err) + } + priv2Bytes, err := decoder.DecodeString(pgpkeys.TestPrivKey2) + if err != nil { + t.Fatalf("Error decoding bytes for private key 2: %s", err) + } + priv3Bytes, err := decoder.DecodeString(pgpkeys.TestPrivKey3) + if err != nil { + t.Fatalf("Error decoding bytes for private key 3: %s", err) + } + + privBytes := [][]byte{ + priv1Bytes, + priv2Bytes, + priv3Bytes, + } + + testFunc := func(bkeys map[string][]string) { + var re *regexp.Regexp + if fingerprints { + re, err = regexp.Compile(`\s*Key\s+\d+\s+fingerprint:\s+([0-9a-fA-F]+);\s+value:\s+(.*)`) + } else { + re, err = regexp.Compile(`\s*Key\s+\d+:\s+(.*)`) + } + if err != nil { + t.Fatalf("Error compiling regex: %s", err) + } + matches := re.FindAllStringSubmatch(input, -1) + if len(matches) != 4 { + t.Fatalf("Unexpected number of keys returned, got %d, matches was \n\n%#v\n\n, input was \n\n%s\n\n", len(matches), matches, input) + } + + encodedKeys := []string{} + matchedFingerprints := []string{} + for _, tuple := range matches { + if fingerprints { + if len(tuple) != 3 { + t.Fatalf("Key not found: %#v", tuple) + } + matchedFingerprints = append(matchedFingerprints, tuple[1]) + encodedKeys = append(encodedKeys, tuple[2]) + } else { + if len(tuple) != 2 { + t.Fatalf("Key not found: %#v", tuple) + } + encodedKeys = append(encodedKeys, tuple[1]) + } + } + + if bkeys != nil && len(matchedFingerprints) != 0 { + testMap := map[string][]string{} + for i, v := range matchedFingerprints { + testMap[v] = append(testMap[v], encodedKeys[i]) + sort.Strings(testMap[v]) + } + if !reflect.DeepEqual(testMap, bkeys) { + t.Fatalf("test map and backup map do not match, test map is\n%#v\nbackup map is\n%#v", testMap, bkeys) + } + } + + unsealKeys := []string{} + ptBuf := bytes.NewBuffer(nil) + for i, privKeyBytes := range privBytes { + if i > 2 { + break + } + ptBuf.Reset() + entity, err := openpgp.ReadEntity(packet.NewReader(bytes.NewBuffer(privKeyBytes))) + if err != nil { + t.Fatalf("Error parsing private key %d: %s", i, err) + } + var keyBytes []byte + keyBytes, err = base64.StdEncoding.DecodeString(encodedKeys[i]) + if err != nil { + t.Fatalf("Error decoding key %d: %s", i, err) + } + entityList := &openpgp.EntityList{entity} + md, err := openpgp.ReadMessage(bytes.NewBuffer(keyBytes), entityList, nil, nil) + if err != nil { + t.Fatalf("Error decrypting with key %d (%s): %s", i, encodedKeys[i], err) + } + ptBuf.ReadFrom(md.UnverifiedBody) + unsealKeys = append(unsealKeys, ptBuf.String()) + } + + err = core.Seal(rootToken) + if err != nil { + t.Fatalf("Error sealing vault with provided root token: %s", err) + } + + for i, unsealKey := range unsealKeys { + unsealBytes, err := hex.DecodeString(unsealKey) + if err != nil { + t.Fatalf("Error hex decoding unseal key %s: %s", unsealKey, err) + } + unsealed, err := core.Unseal(unsealBytes) + if err != nil { + t.Fatalf("Error using unseal key %s: %s", unsealKey, err) + } + if i >= 2 && !unsealed { + t.Fatalf("Error: Provided two unseal keys but core is not unsealed") + } + } + } + + testFunc(backupKeysB64) +} diff --git a/command/pki.go b/command/pki.go new file mode 100644 index 0000000..89770fa --- /dev/null +++ b/command/pki.go @@ -0,0 +1,42 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + + "github.com/mitchellh/cli" +) + +var _ cli.Command = (*PKICommand)(nil) + +type PKICommand struct { + *BaseCommand +} + +func (c *PKICommand) Synopsis() string { + return "Interact with Vault's PKI Secrets Engine" +} + +func (c *PKICommand) Help() string { + helpText := ` +Usage: vault pki [options] [args] + + This command has subcommands for interacting with Vault's PKI Secrets + Engine. Here are some simple examples, and more detailed examples are + available in the subcommands or the documentation. + + Check the health of a PKI mount, to the best of this token's abilities: + + $ vault pki health-check pki + + Please see the individual subcommand help for detailed usage information. +` + + return strings.TrimSpace(helpText) +} + +func (c *PKICommand) Run(args []string) int { + return cli.RunResultHelp +} diff --git a/command/pki_health_check.go b/command/pki_health_check.go new file mode 100644 index 0000000..188c95c --- /dev/null +++ b/command/pki_health_check.go @@ -0,0 +1,384 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "encoding/json" + "fmt" + "os" + "strings" + + "github.com/hashicorp/vault/command/healthcheck" + + "github.com/ghodss/yaml" + "github.com/mitchellh/cli" + "github.com/posener/complete" + "github.com/ryanuber/columnize" +) + +const ( + pkiRetOK int = iota + pkiRetUsage + pkiRetInformational + pkiRetWarning + pkiRetCritical + pkiRetInvalidVersion + pkiRetInsufficientPermissions +) + +var ( + _ cli.Command = (*PKIHealthCheckCommand)(nil) + _ cli.CommandAutocomplete = (*PKIHealthCheckCommand)(nil) + + // Ensure the above return codes match (outside of OK/Usage) the values in + // the healthcheck package. + _ = pkiRetInformational == int(healthcheck.ResultInformational) + _ = pkiRetWarning == int(healthcheck.ResultWarning) + _ = pkiRetCritical == int(healthcheck.ResultCritical) + _ = pkiRetInvalidVersion == int(healthcheck.ResultInvalidVersion) + _ = pkiRetInsufficientPermissions == int(healthcheck.ResultInsufficientPermissions) +) + +type PKIHealthCheckCommand struct { + *BaseCommand + + flagConfig string + flagReturnIndicator string + flagDefaultDisabled bool + flagList bool +} + +func (c *PKIHealthCheckCommand) Synopsis() string { + return "Check a PKI Secrets Engine mount's health and operational status" +} + +func (c *PKIHealthCheckCommand) Help() string { + helpText := ` +Usage: vault pki health-check [options] MOUNT + + Reports status of the specified mount against best practices and pending + failures. This is an informative command and not all recommendations will + apply to all mounts; consider using a configuration file to tune the + executed health checks. + + To check the pki-root mount with default configuration: + + $ vault pki health-check pki-root + + To specify a configuration: + + $ vault pki health-check -health-config=mycorp-root.json /pki-root + + Return codes indicate failure type: + + 0 - Everything is good. + 1 - Usage error (check CLI parameters). + 2 - Informational message from a health check. + 3 - Warning message from a health check. + 4 - Critical message from a health check. + 5 - A version mismatch between health check and Vault Server occurred, + preventing one or more health checks from being run. + 6 - A permission denied message was returned from Vault Server for + one or more health checks. + +For more detailed information, refer to the online documentation about the +vault pki health-check command. + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *PKIHealthCheckCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "health-config", + Target: &c.flagConfig, + Default: "", + EnvVar: "", + Usage: "Path to JSON configuration file to modify health check execution and parameters.", + }) + + f.StringVar(&StringVar{ + Name: "return-indicator", + Target: &c.flagReturnIndicator, + Default: "default", + EnvVar: "", + Completion: complete.PredictSet("default", "informational", "warning", "critical", "permission"), + Usage: `Behavior of the return value: + - permission, for exiting with a non-zero code when the tool lacks + permissions or has a version mismatch with the server; + - critical, for exiting with a non-zero code when a check returns a + critical status in addition to the above; + - warning, for exiting with a non-zero status when a check returns a + warning status in addition to the above; + - informational, for exiting with a non-zero status when a check returns + an informational status in addition to the above; + - default, for the default behavior based on severity of message and + only returning a zero exit status when all checks have passed + and no execution errors have occurred. + `, + }) + + f.BoolVar(&BoolVar{ + Name: "default-disabled", + Target: &c.flagDefaultDisabled, + Default: false, + EnvVar: "", + Usage: `When specified, results in all health checks being disabled by +default unless enabled by the configuration file explicitly.`, + }) + + f.BoolVar(&BoolVar{ + Name: "list", + Target: &c.flagList, + Default: false, + EnvVar: "", + Usage: `When specified, no health checks are run, but all known health +checks are printed.`, + }) + + return set +} + +func (c *PKIHealthCheckCommand) isValidRetIndicator() bool { + switch c.flagReturnIndicator { + case "", "default", "informational", "warning", "critical", "permission": + return true + default: + return false + } +} + +func (c *PKIHealthCheckCommand) AutocompleteArgs() complete.Predictor { + // Return an anything predictor here, similar to `vault write`. We + // don't know what values are valid for the mount path. + return complete.PredictAnything +} + +func (c *PKIHealthCheckCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *PKIHealthCheckCommand) Run(args []string) int { + // Parse and validate the arguments. + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return pkiRetUsage + } + + args = f.Args() + if !c.flagList && len(args) < 1 { + c.UI.Error("Not enough arguments (expected mount path, got nothing)") + return pkiRetUsage + } else if !c.flagList && len(args) > 1 { + c.UI.Error(fmt.Sprintf("Too many arguments (expected only mount path, got %d arguments)", len(args))) + for _, arg := range args { + if strings.HasPrefix(arg, "-") { + c.UI.Warn(fmt.Sprintf("Options (%v) must be specified before positional arguments (%v)", arg, args[0])) + break + } + } + return pkiRetUsage + } + + if !c.isValidRetIndicator() { + c.UI.Error(fmt.Sprintf("Invalid flag -return-indicator=%v; known options are default, informational, warning, critical, and permission", c.flagReturnIndicator)) + return pkiRetUsage + } + + // Setup the client and the executor. + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return pkiRetUsage + } + + // When listing is enabled, we lack an argument here, but do not contact + // the server at all, so we're safe to use a hard-coded default here. + pkiPath := "" + if len(args) == 1 { + pkiPath = args[0] + } + + mount := sanitizePath(pkiPath) + executor := healthcheck.NewExecutor(client, mount) + executor.AddCheck(healthcheck.NewCAValidityPeriodCheck()) + executor.AddCheck(healthcheck.NewCRLValidityPeriodCheck()) + executor.AddCheck(healthcheck.NewHardwareBackedRootCheck()) + executor.AddCheck(healthcheck.NewRootIssuedLeavesCheck()) + executor.AddCheck(healthcheck.NewRoleAllowsLocalhostCheck()) + executor.AddCheck(healthcheck.NewRoleAllowsGlobWildcardsCheck()) + executor.AddCheck(healthcheck.NewRoleNoStoreFalseCheck()) + executor.AddCheck(healthcheck.NewAuditVisibilityCheck()) + executor.AddCheck(healthcheck.NewAllowIfModifiedSinceCheck()) + executor.AddCheck(healthcheck.NewEnableAutoTidyCheck()) + executor.AddCheck(healthcheck.NewTidyLastRunCheck()) + executor.AddCheck(healthcheck.NewTooManyCertsCheck()) + executor.AddCheck(healthcheck.NewEnableAcmeIssuance()) + executor.AddCheck(healthcheck.NewAllowAcmeHeaders()) + if c.flagDefaultDisabled { + executor.DefaultEnabled = false + } + + // Handle listing, if necessary. + if c.flagList { + uiFormat := Format(c.UI) + if uiFormat == "yaml" { + c.UI.Error("YAML output format is not supported by the --list command") + return pkiRetUsage + } + + if uiFormat != "json" { + c.UI.Output("Default health check config:") + } + config := map[string]map[string]interface{}{} + for _, checker := range executor.Checkers { + config[checker.Name()] = checker.DefaultConfig() + } + + marshaled, err := json.MarshalIndent(config, "", " ") + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to marshal default config for check: %v", err)) + return pkiRetUsage + } + + c.UI.Output(string(marshaled)) + return pkiRetOK + } + + // Handle config merging. + external_config := map[string]interface{}{} + if c.flagConfig != "" { + contents, err := os.Open(c.flagConfig) + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to read configuration file %v: %v", c.flagConfig, err)) + return pkiRetUsage + } + + decoder := json.NewDecoder(contents) + decoder.UseNumber() // Use json.Number instead of float64 values as we are decoding to an interface{}. + + if err := decoder.Decode(&external_config); err != nil { + c.UI.Error(fmt.Sprintf("Failed to parse configuration file %v: %v", c.flagConfig, err)) + return pkiRetUsage + } + } + + if err := executor.BuildConfig(external_config); err != nil { + c.UI.Error(fmt.Sprintf("Failed to build health check configuration: %v", err)) + return pkiRetUsage + } + + // Run the health checks. + results, err := executor.Execute() + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to run health check: %v", err)) + return pkiRetUsage + } + + // Display the output. + if err := c.outputResults(executor, results); err != nil { + c.UI.Error(fmt.Sprintf("Failed to render results for display: %v", err)) + } + + // Select an appropriate return code. + return c.selectRetCode(results) +} + +func (c *PKIHealthCheckCommand) outputResults(e *healthcheck.Executor, results map[string][]*healthcheck.Result) error { + switch Format(c.UI) { + case "", "table": + return c.outputResultsTable(e, results) + case "json": + return c.outputResultsJSON(results) + case "yaml": + return c.outputResultsYAML(results) + default: + return fmt.Errorf("unknown output format: %v", Format(c.UI)) + } +} + +func (c *PKIHealthCheckCommand) outputResultsTable(e *healthcheck.Executor, results map[string][]*healthcheck.Result) error { + // Iterate in checker order to ensure stable output. + for _, checker := range e.Checkers { + if !checker.IsEnabled() { + continue + } + + scanner := checker.Name() + findings := results[scanner] + + c.UI.Output(scanner) + c.UI.Output(strings.Repeat("-", len(scanner))) + data := []string{"status" + hopeDelim + "endpoint" + hopeDelim + "message"} + for _, finding := range findings { + row := []string{ + finding.StatusDisplay, + finding.Endpoint, + finding.Message, + } + data = append(data, strings.Join(row, hopeDelim)) + } + + c.UI.Output(tableOutput(data, &columnize.Config{ + Delim: hopeDelim, + })) + c.UI.Output("\n") + } + + return nil +} + +func (c *PKIHealthCheckCommand) outputResultsJSON(results map[string][]*healthcheck.Result) error { + bytes, err := json.MarshalIndent(results, "", " ") + if err != nil { + return err + } + + c.UI.Output(string(bytes)) + return nil +} + +func (c *PKIHealthCheckCommand) outputResultsYAML(results map[string][]*healthcheck.Result) error { + bytes, err := yaml.Marshal(results) + if err != nil { + return err + } + + c.UI.Output(string(bytes)) + return nil +} + +func (c *PKIHealthCheckCommand) selectRetCode(results map[string][]*healthcheck.Result) int { + var highestResult healthcheck.ResultStatus = healthcheck.ResultNotApplicable + for _, findings := range results { + for _, finding := range findings { + if finding.Status > highestResult { + highestResult = finding.Status + } + } + } + + cutOff := healthcheck.ResultInformational + switch c.flagReturnIndicator { + case "", "default", "informational": + case "permission": + cutOff = healthcheck.ResultInvalidVersion + case "critical": + cutOff = healthcheck.ResultCritical + case "warning": + cutOff = healthcheck.ResultWarning + } + + if highestResult >= cutOff { + return int(highestResult) + } + + return pkiRetOK +} diff --git a/command/pki_health_check_test.go b/command/pki_health_check_test.go new file mode 100644 index 0000000..93d2315 --- /dev/null +++ b/command/pki_health_check_test.go @@ -0,0 +1,694 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bytes" + "encoding/json" + "fmt" + "net/url" + "strings" + "testing" + "time" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/healthcheck" + + "github.com/mitchellh/cli" + "github.com/stretchr/testify/require" +) + +func TestPKIHC_AllGood(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + if err := client.Sys().Mount("pki", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + AuditNonHMACRequestKeys: healthcheck.VisibleReqParams, + AuditNonHMACResponseKeys: healthcheck.VisibleRespParams, + PassthroughRequestHeaders: []string{"If-Modified-Since"}, + AllowedResponseHeaders: []string{"Last-Modified", "Replay-Nonce", "Link", "Location"}, + MaxLeaseTTL: "36500d", + }, + }); err != nil { + t.Fatalf("pki mount error: %#v", err) + } + + if resp, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ + "key_type": "ec", + "common_name": "Root X1", + "ttl": "3650d", + }); err != nil || resp == nil { + t.Fatalf("failed to prime CA: %v", err) + } + + if _, err := client.Logical().Read("pki/crl/rotate"); err != nil { + t.Fatalf("failed to rotate CRLs: %v", err) + } + + if _, err := client.Logical().Write("pki/roles/testing", map[string]interface{}{ + "allow_any_name": true, + "no_store": true, + }); err != nil { + t.Fatalf("failed to write role: %v", err) + } + + if _, err := client.Logical().Write("pki/config/auto-tidy", map[string]interface{}{ + "enabled": true, + "tidy_cert_store": true, + }); err != nil { + t.Fatalf("failed to write auto-tidy config: %v", err) + } + + if _, err := client.Logical().Write("pki/tidy", map[string]interface{}{ + "tidy_cert_store": true, + }); err != nil { + t.Fatalf("failed to run tidy: %v", err) + } + + path, err := url.Parse(client.Address()) + require.NoError(t, err, "failed parsing client address") + + if _, err := client.Logical().Write("pki/config/cluster", map[string]interface{}{ + "path": path.JoinPath("/v1/", "pki/").String(), + }); err != nil { + t.Fatalf("failed to update local cluster: %v", err) + } + + if _, err := client.Logical().Write("pki/config/acme", map[string]interface{}{ + "enabled": "true", + }); err != nil { + t.Fatalf("failed to update acme config: %v", err) + } + + _, _, results := execPKIHC(t, client, true) + + validateExpectedPKIHC(t, expectedAllGood, results) +} + +func TestPKIHC_AllBad(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + if err := client.Sys().Mount("pki", &api.MountInput{ + Type: "pki", + }); err != nil { + t.Fatalf("pki mount error: %#v", err) + } + + if resp, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ + "key_type": "ec", + "common_name": "Root X1", + "ttl": "35d", + }); err != nil || resp == nil { + t.Fatalf("failed to prime CA: %v", err) + } + + if _, err := client.Logical().Write("pki/config/crl", map[string]interface{}{ + "expiry": "5s", + }); err != nil { + t.Fatalf("failed to issue leaf cert: %v", err) + } + + if _, err := client.Logical().Read("pki/crl/rotate"); err != nil { + t.Fatalf("failed to rotate CRLs: %v", err) + } + + time.Sleep(5 * time.Second) + + if _, err := client.Logical().Write("pki/roles/testing", map[string]interface{}{ + "allow_localhost": true, + "allowed_domains": "*.example.com", + "allow_glob_domains": true, + "allow_wildcard_certificates": true, + "no_store": false, + "key_type": "ec", + "ttl": "30d", + }); err != nil { + t.Fatalf("failed to write role: %v", err) + } + + if _, err := client.Logical().Write("pki/issue/testing", map[string]interface{}{ + "common_name": "something.example.com", + }); err != nil { + t.Fatalf("failed to issue leaf cert: %v", err) + } + + if _, err := client.Logical().Write("pki/config/auto-tidy", map[string]interface{}{ + "enabled": false, + "tidy_cert_store": false, + }); err != nil { + t.Fatalf("failed to write auto-tidy config: %v", err) + } + + _, _, results := execPKIHC(t, client, true) + + validateExpectedPKIHC(t, expectedAllBad, results) +} + +func TestPKIHC_OnlyIssuer(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + if err := client.Sys().Mount("pki", &api.MountInput{ + Type: "pki", + }); err != nil { + t.Fatalf("pki mount error: %#v", err) + } + + if resp, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ + "key_type": "ec", + "common_name": "Root X1", + "ttl": "35d", + }); err != nil || resp == nil { + t.Fatalf("failed to prime CA: %v", err) + } + + _, _, results := execPKIHC(t, client, true) + validateExpectedPKIHC(t, expectedEmptyWithIssuer, results) +} + +func TestPKIHC_NoMount(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + code, message, _ := execPKIHC(t, client, false) + if code != 1 { + t.Fatalf("Expected return code 1 from invocation on non-existent mount, got %v\nOutput: %v", code, message) + } + + if !strings.Contains(message, "route entry not found") { + t.Fatalf("Expected failure to talk about missing route entry, got exit code %v\nOutput: %v", code, message) + } +} + +func TestPKIHC_ExpectedEmptyMount(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + if err := client.Sys().Mount("pki", &api.MountInput{ + Type: "pki", + }); err != nil { + t.Fatalf("pki mount error: %#v", err) + } + + code, message, _ := execPKIHC(t, client, false) + if code != 1 { + t.Fatalf("Expected return code 1 from invocation on empty mount, got %v\nOutput: %v", code, message) + } + + if !strings.Contains(message, "lacks any configured issuers,") { + t.Fatalf("Expected failure to talk about no issuers, got exit code %v\nOutput: %v", code, message) + } +} + +func TestPKIHC_NoPerm(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + if err := client.Sys().Mount("pki", &api.MountInput{ + Type: "pki", + }); err != nil { + t.Fatalf("pki mount error: %#v", err) + } + + if resp, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ + "key_type": "ec", + "common_name": "Root X1", + "ttl": "35d", + }); err != nil || resp == nil { + t.Fatalf("failed to prime CA: %v", err) + } + + if _, err := client.Logical().Write("pki/config/crl", map[string]interface{}{ + "expiry": "5s", + }); err != nil { + t.Fatalf("failed to issue leaf cert: %v", err) + } + + if _, err := client.Logical().Read("pki/crl/rotate"); err != nil { + t.Fatalf("failed to rotate CRLs: %v", err) + } + + time.Sleep(5 * time.Second) + + if _, err := client.Logical().Write("pki/roles/testing", map[string]interface{}{ + "allow_localhost": true, + "allowed_domains": "*.example.com", + "allow_glob_domains": true, + "allow_wildcard_certificates": true, + "no_store": false, + "key_type": "ec", + "ttl": "30d", + }); err != nil { + t.Fatalf("failed to write role: %v", err) + } + + if _, err := client.Logical().Write("pki/issue/testing", map[string]interface{}{ + "common_name": "something.example.com", + }); err != nil { + t.Fatalf("failed to issue leaf cert: %v", err) + } + + if _, err := client.Logical().Write("pki/config/auto-tidy", map[string]interface{}{ + "enabled": false, + "tidy_cert_store": false, + }); err != nil { + t.Fatalf("failed to write auto-tidy config: %v", err) + } + + // Remove client token. + client.ClearToken() + + _, _, results := execPKIHC(t, client, true) + validateExpectedPKIHC(t, expectedNoPerm, results) +} + +func testPKIHealthCheckCommand(tb testing.TB) (*cli.MockUi, *PKIHealthCheckCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &PKIHealthCheckCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func execPKIHC(t *testing.T, client *api.Client, ok bool) (int, string, map[string][]map[string]interface{}) { + t.Helper() + + stdout := bytes.NewBuffer(nil) + stderr := bytes.NewBuffer(nil) + runOpts := &RunOptions{ + Stdout: stdout, + Stderr: stderr, + Client: client, + } + + code := RunCustom([]string{"pki", "health-check", "-format=json", "pki"}, runOpts) + combined := stdout.String() + stderr.String() + + var results map[string][]map[string]interface{} + if err := json.Unmarshal([]byte(combined), &results); err != nil { + if ok { + t.Fatalf("failed to decode json (ret %v): %v\njson:\n%v", code, err, combined) + } + } + + t.Log(combined) + + return code, combined, results +} + +func validateExpectedPKIHC(t *testing.T, expected, results map[string][]map[string]interface{}) { + t.Helper() + + for test, subtest := range expected { + actual, ok := results[test] + require.True(t, ok, fmt.Sprintf("expected top-level test %v to be present", test)) + + if subtest == nil { + continue + } + + require.NotNil(t, actual, fmt.Sprintf("expected top-level test %v to be non-empty; wanted wireframe format %v", test, subtest)) + require.Equal(t, len(subtest), len(actual), fmt.Sprintf("top-level test %v has different number of results %v in wireframe, %v in test output\nwireframe: %v\noutput: %v\n", test, len(subtest), len(actual), subtest, actual)) + + for index, subset := range subtest { + for key, value := range subset { + a_value, present := actual[index][key] + require.True(t, present) + if value != nil { + require.Equal(t, value, a_value, fmt.Sprintf("in test: %v / result %v - when validating key %v\nWanted: %v\nGot: %v", test, index, key, subset, actual[index])) + } + } + } + } + + for name := range results { + if _, present := expected[name]; !present { + t.Fatalf("got unexpected health check: %v\n%v", name, results[name]) + } + } +} + +var expectedAllGood = map[string][]map[string]interface{}{ + "ca_validity_period": { + { + "status": "ok", + }, + }, + "crl_validity_period": { + { + "status": "ok", + }, + { + "status": "ok", + }, + }, + "allow_acme_headers": { + { + "status": "ok", + }, + }, + "allow_if_modified_since": { + { + "status": "ok", + }, + }, + "audit_visibility": { + { + "status": "ok", + }, + }, + "enable_acme_issuance": { + { + "status": "ok", + }, + }, + "enable_auto_tidy": { + { + "status": "ok", + }, + }, + "role_allows_glob_wildcards": { + { + "status": "ok", + }, + }, + "role_allows_localhost": { + { + "status": "ok", + }, + }, + "role_no_store_false": { + { + "status": "ok", + }, + }, + "root_issued_leaves": { + { + "status": "ok", + }, + }, + "tidy_last_run": { + { + "status": "ok", + }, + }, + "too_many_certs": { + { + "status": "ok", + }, + }, +} + +var expectedAllBad = map[string][]map[string]interface{}{ + "ca_validity_period": { + { + "status": "critical", + }, + }, + "crl_validity_period": { + { + "status": "critical", + }, + { + "status": "critical", + }, + }, + "allow_acme_headers": { + { + "status": "not_applicable", + }, + }, + "allow_if_modified_since": { + { + "status": "informational", + }, + }, + "audit_visibility": { + { + "status": "informational", + }, + { + "status": "informational", + }, + { + "status": "informational", + }, + { + "status": "informational", + }, + { + "status": "informational", + }, + { + "status": "informational", + }, + { + "status": "informational", + }, + { + "status": "informational", + }, + { + "status": "informational", + }, + { + "status": "informational", + }, + { + "status": "informational", + }, + { + "status": "informational", + }, + { + "status": "informational", + }, + { + "status": "informational", + }, + { + "status": "informational", + }, + { + "status": "informational", + }, + { + "status": "informational", + }, + { + "status": "informational", + }, + { + "status": "informational", + }, + { + "status": "informational", + }, + { + "status": "informational", + }, + { + "status": "informational", + }, + { + "status": "informational", + }, + { + "status": "informational", + }, + { + "status": "informational", + }, + { + "status": "informational", + }, + { + "status": "informational", + }, + { + "status": "informational", + }, + { + "status": "informational", + }, + { + "status": "informational", + }, + }, + "enable_acme_issuance": { + { + "status": "not_applicable", + }, + }, + "enable_auto_tidy": { + { + "status": "informational", + }, + }, + "role_allows_glob_wildcards": { + { + "status": "warning", + }, + }, + "role_allows_localhost": { + { + "status": "warning", + }, + }, + "role_no_store_false": { + { + "status": "warning", + }, + }, + "root_issued_leaves": { + { + "status": "warning", + }, + }, + "tidy_last_run": { + { + "status": "critical", + }, + }, + "too_many_certs": { + { + "status": "ok", + }, + }, +} + +var expectedEmptyWithIssuer = map[string][]map[string]interface{}{ + "ca_validity_period": { + { + "status": "critical", + }, + }, + "crl_validity_period": { + { + "status": "ok", + }, + { + "status": "ok", + }, + }, + "allow_acme_headers": { + { + "status": "not_applicable", + }, + }, + "allow_if_modified_since": nil, + "audit_visibility": nil, + "enable_acme_issuance": { + { + "status": "not_applicable", + }, + }, + "enable_auto_tidy": { + { + "status": "informational", + }, + }, + "role_allows_glob_wildcards": nil, + "role_allows_localhost": nil, + "role_no_store_false": nil, + "root_issued_leaves": { + { + "status": "ok", + }, + }, + "tidy_last_run": { + { + "status": "critical", + }, + }, + "too_many_certs": { + { + "status": "ok", + }, + }, +} + +var expectedNoPerm = map[string][]map[string]interface{}{ + "ca_validity_period": { + { + "status": "critical", + }, + }, + "crl_validity_period": { + { + "status": "insufficient_permissions", + }, + { + "status": "critical", + }, + { + "status": "critical", + }, + }, + "allow_acme_headers": { + { + "status": "insufficient_permissions", + }, + }, + "allow_if_modified_since": nil, + "audit_visibility": nil, + "enable_acme_issuance": { + { + "status": "insufficient_permissions", + }, + }, + "enable_auto_tidy": { + { + "status": "insufficient_permissions", + }, + }, + "role_allows_glob_wildcards": { + { + "status": "insufficient_permissions", + }, + }, + "role_allows_localhost": { + { + "status": "insufficient_permissions", + }, + }, + "role_no_store_false": { + { + "status": "insufficient_permissions", + }, + }, + "root_issued_leaves": { + { + "status": "insufficient_permissions", + }, + }, + "tidy_last_run": { + { + "status": "insufficient_permissions", + }, + }, + "too_many_certs": { + { + "status": "insufficient_permissions", + }, + }, +} diff --git a/command/pki_issue_intermediate.go b/command/pki_issue_intermediate.go new file mode 100644 index 0000000..fe16fda --- /dev/null +++ b/command/pki_issue_intermediate.go @@ -0,0 +1,367 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "context" + "fmt" + "io" + "os" + paths "path" + "strings" + + "github.com/hashicorp/vault/api" + "github.com/posener/complete" +) + +type PKIIssueCACommand struct { + *BaseCommand + + flagConfig string + flagReturnIndicator string + flagDefaultDisabled bool + flagList bool + + flagKeyStorageSource string + flagNewIssuerName string +} + +func (c *PKIIssueCACommand) Synopsis() string { + return "Given a parent certificate, and a list of generation parameters, creates an issuer on a specified mount" +} + +func (c *PKIIssueCACommand) Help() string { + helpText := ` +Usage: vault pki issue PARENT CHILD_MOUNT options + +PARENT is the fully qualified path of the Certificate Authority in vault which will issue the new intermediate certificate. + +CHILD_MOUNT is the path of the mount in vault where the new issuer is saved. + +options are the superset of the options passed to generate/intermediate and sign-intermediate commands. At least one option must be set. + +This command creates a intermediate certificate authority certificate signed by the parent in the CHILD_MOUNT. + +` + c.Flags().Help() + return strings.TrimSpace(helpText) +} + +func (c *PKIIssueCACommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "type", + Target: &c.flagKeyStorageSource, + Default: "internal", + EnvVar: "", + Usage: `Options are "existing" - to use an existing key inside vault, "internal" - to generate a new key inside vault, or "kms" - to link to an external key. Exported keys are not available through this API.`, + Completion: complete.PredictSet("internal", "existing", "kms"), + }) + + f.StringVar(&StringVar{ + Name: "issuer_name", + Target: &c.flagNewIssuerName, + Default: "", + EnvVar: "", + Usage: `If present, the newly created issuer will be given this name.`, + }) + + return set +} + +func (c *PKIIssueCACommand) Run(args []string) int { + // Parse Args + f := c.Flags() + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + args = f.Args() + + if len(args) < 3 { + c.UI.Error("Not enough arguments expected parent issuer and child-mount location and some key_value argument") + return 1 + } + + stdin := (io.Reader)(os.Stdin) + data, err := parseArgsData(stdin, args[2:]) + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to parse K=V data: %s", err)) + return 1 + } + + parentMountIssuer := sanitizePath(args[0]) // /pki/issuer/default + + intermediateMount := sanitizePath(args[1]) + + return pkiIssue(c.BaseCommand, parentMountIssuer, intermediateMount, c.flagNewIssuerName, c.flagKeyStorageSource, data) +} + +func pkiIssue(c *BaseCommand, parentMountIssuer string, intermediateMount string, flagNewIssuerName string, flagKeyStorageSource string, data map[string]interface{}) int { + // Check We Have a Client + client, err := c.Client() + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to obtain client: %v", err)) + return 1 + } + + // Sanity Check the Parent Issuer + if !strings.Contains(parentMountIssuer, "/issuer/") { + c.UI.Error(fmt.Sprintf("Parent Issuer %v is Not a PKI Issuer Path of the format /mount/issuer/issuer-ref", parentMountIssuer)) + return 1 + } + _, err = readIssuer(client, parentMountIssuer) + if err != nil { + c.UI.Error(fmt.Sprintf("Unable to access parent issuer %v: %v", parentMountIssuer, err)) + return 1 + } + + // Set-up Failure State (Immediately Before First Write Call) + failureState := inCaseOfFailure{ + intermediateMount: intermediateMount, + parentMount: strings.Split(parentMountIssuer, "/issuer/")[0], + parentIssuer: parentMountIssuer, + newName: flagNewIssuerName, + } + + // Generate Certificate Signing Request + csrResp, err := client.Logical().Write(intermediateMount+"/intermediate/generate/"+flagKeyStorageSource, data) + if err != nil { + if strings.Contains(err.Error(), "no handler for route") { // Mount Given Does Not Exist + c.UI.Error(fmt.Sprintf("Given Intermediate Mount %v Does Not Exist: %v", intermediateMount, err)) + } else if strings.Contains(err.Error(), "unsupported path") { // Expected if Not a PKI Mount + c.UI.Error(fmt.Sprintf("Given Intermeidate Mount %v Is Not a PKI Mount: %v", intermediateMount, err)) + } else { + c.UI.Error(fmt.Sprintf("Failled to Generate Intermediate CSR on %v: %v", intermediateMount, err)) + } + return 1 + } + // Parse CSR Response, Also Verifies that this is a PKI Mount + // (e.g. calling the above call on cubbyhole/ won't return an error response) + csrPemRaw, present := csrResp.Data["csr"] + if !present { + c.UI.Error(fmt.Sprintf("Failed to Generate Intermediate CSR on %v, got response: %v", intermediateMount, csrResp)) + return 1 + } + keyIdRaw, present := csrResp.Data["key_id"] + if !present && flagKeyStorageSource == "internal" { + c.UI.Error(fmt.Sprintf("Failed to Generate Key on %v, got response: %v", intermediateMount, csrResp)) + return 1 + } + + // If that all Parses, then we've successfully generated a CSR! Save It (and the Key-ID) + failureState.csrGenerated = true + if flagKeyStorageSource == "internal" { + failureState.createdKeyId = keyIdRaw.(string) + } + csr := csrPemRaw.(string) + failureState.csr = csr + data["csr"] = csr + + // Next, Sign the CSR + rootResp, err := client.Logical().Write(parentMountIssuer+"/sign-intermediate", data) + if err != nil { + c.UI.Error(failureState.generateFailureMessage()) + c.UI.Error(fmt.Sprintf("Error Signing Intermiate On %v", err)) + return 1 + } + // Success! Save Our Progress (and Parse the Response) + failureState.csrSigned = true + serialNumber := rootResp.Data["serial_number"].(string) + failureState.certSerialNumber = serialNumber + + caChain := rootResp.Data["ca_chain"].([]interface{}) + caChainPemBundle := "" + for _, cert := range caChain { + caChainPemBundle += cert.(string) + "\n" + } + failureState.caChain = caChainPemBundle + + // Next Import Certificate + certificate := rootResp.Data["certificate"].(string) + issuerId, err := importIssuerWithName(client, intermediateMount, certificate, flagNewIssuerName) + failureState.certIssuerId = issuerId + if err != nil { + if strings.Contains(err.Error(), "error naming issuer") { + failureState.certImported = true + c.UI.Error(failureState.generateFailureMessage()) + c.UI.Error(fmt.Sprintf("Error Naming Newly Imported Issuer: %v", err)) + return 1 + } else { + c.UI.Error(failureState.generateFailureMessage()) + c.UI.Error(fmt.Sprintf("Error Importing Into %v Newly Created Issuer %v: %v", intermediateMount, certificate, err)) + return 1 + } + } + failureState.certImported = true + + // Then Import Issuing Certificate + issuingCa := rootResp.Data["issuing_ca"].(string) + _, parentIssuerName := paths.Split(parentMountIssuer) + _, err = importIssuerWithName(client, intermediateMount, issuingCa, parentIssuerName) + if err != nil { + if strings.Contains(err.Error(), "error naming issuer") { + c.UI.Warn(fmt.Sprintf("Unable to Set Name on Parent Cert from %v Imported Into %v with serial %v, err: %v", parentIssuerName, intermediateMount, serialNumber, err)) + } else { + c.UI.Error(failureState.generateFailureMessage()) + c.UI.Error(fmt.Sprintf("Error Importing Into %v Newly Created Issuer %v: %v", intermediateMount, certificate, err)) + return 1 + } + } + + // Finally Import CA_Chain (just in case there's more information) + if len(caChain) > 2 { // We've already imported parent cert and newly issued cert above + importData := map[string]interface{}{ + "pem_bundle": caChainPemBundle, + } + _, err := client.Logical().Write(intermediateMount+"/issuers/import/cert", importData) + if err != nil { + c.UI.Error(failureState.generateFailureMessage()) + c.UI.Error(fmt.Sprintf("Error Importing CaChain into %v: %v", intermediateMount, err)) + return 1 + } + } + failureState.caChainImported = true + + // Finally we read our newly issued certificate in order to tell our caller about it + readAndOutputNewCertificate(client, intermediateMount, issuerId, c) + + return 0 +} + +func readAndOutputNewCertificate(client *api.Client, intermediateMount string, issuerId string, c *BaseCommand) { + resp, err := client.Logical().Read(sanitizePath(intermediateMount + "/issuer/" + issuerId)) + if err != nil || resp == nil { + c.UI.Error(fmt.Sprintf("Error Reading Fully Imported Certificate from %v : %v", + intermediateMount+"/issuer/"+issuerId, err)) + return + } + + OutputSecret(c.UI, resp) +} + +func importIssuerWithName(client *api.Client, mount string, bundle string, name string) (issuerUUID string, err error) { + importData := map[string]interface{}{ + "pem_bundle": bundle, + } + writeResp, err := client.Logical().Write(mount+"/issuers/import/cert", importData) + if err != nil { + return "", err + } + mapping := writeResp.Data["mapping"].(map[string]interface{}) + if len(mapping) > 1 { + return "", fmt.Errorf("multiple issuers returned, while expected one, got %v", writeResp) + } + for issuerId := range mapping { + issuerUUID = issuerId + } + if name != "" && name != "default" { + nameReq := map[string]interface{}{ + "issuer_name": name, + } + ctx := context.Background() + _, err = client.Logical().JSONMergePatch(ctx, mount+"/issuer/"+issuerUUID, nameReq) + if err != nil { + return issuerUUID, fmt.Errorf("error naming issuer %v to %v: %v", issuerUUID, name, err) + } + } + return issuerUUID, nil +} + +type inCaseOfFailure struct { + csrGenerated bool + csrSigned bool + certImported bool + certNamed bool + caChainImported bool + + intermediateMount string + createdKeyId string + csr string + caChain string + parentMount string + parentIssuer string + certSerialNumber string + certIssuerId string + newName string +} + +func (state inCaseOfFailure) generateFailureMessage() string { + message := "A failure has occurred" + + if state.csrGenerated { + message += fmt.Sprintf(" after \n a Certificate Signing Request was successfully generated on mount %v", state.intermediateMount) + } + if state.csrSigned { + message += fmt.Sprintf(" and after \n that Certificate Signing Request was successfully signed by mount %v", state.parentMount) + } + if state.certImported { + message += fmt.Sprintf(" and after \n the signed certificate was reimported into mount %v , with issuerID %v", state.intermediateMount, state.certIssuerId) + } + + if state.csrGenerated { + message += "\n\nTO CONTINUE: \n" + state.toContinue() + } + if state.csrGenerated && !state.certImported { + message += "\n\nTO ABORT: \n" + state.toAbort() + } + + message += "\n" + + return message +} + +func (state inCaseOfFailure) toContinue() string { + message := "" + if !state.csrSigned { + message += fmt.Sprintf("You can continue to work with this Certificate Signing Request CSR PEM, by saving"+ + " it as `pki_int.csr`: %v \n Then call `vault write %v/sign-intermediate csr=@pki_int.csr ...` adding the "+ + "same key-value arguements as to `pki issue` (except key_type and issuer_name) to generate the certificate "+ + "and ca_chain", state.csr, state.parentIssuer) + } + if !state.certImported { + if state.caChain != "" { + message += fmt.Sprintf("The certificate chain, signed by %v, for this new certificate is: %v", state.parentIssuer, state.caChain) + } + message += fmt.Sprintf("You can continue to work with this Certificate (and chain) by saving it as "+ + "chain.pem and importing it as `vault write %v/issuers/import/cert pem_bundle=@chain.pem`", + state.intermediateMount) + } + if !state.certNamed { + issuerId := state.certIssuerId + if issuerId == "" { + message += fmt.Sprintf("The issuer_id is returned as the key in a key_value map from importing the " + + "certificate chain.") + issuerId = "" + } + message += fmt.Sprintf("You can name the newly imported issuer by calling `vault patch %v/issuer/%v "+ + "issuer_name=%v`", state.intermediateMount, issuerId, state.newName) + } + return message +} + +func (state inCaseOfFailure) toAbort() string { + if !state.csrGenerated || (!state.csrSigned && state.createdKeyId == "") { + return "No state was created by running this command. Try rerunning this command after resolving the error." + } + message := "" + if state.csrGenerated && state.createdKeyId != "" { + message += fmt.Sprintf(" A key, with key ID %v was created on mount %v as part of this command."+ + " If you do not with to use this key and corresponding CSR/cert, you can delete that information by calling"+ + " `vault delete %v/key/%v`", state.createdKeyId, state.intermediateMount, state.intermediateMount, state.createdKeyId) + } + if state.csrSigned { + message += fmt.Sprintf("A certificate with serial number %v was signed by mount %v as part of this command."+ + " If you do not want to use this certificate, consider revoking it by calling `vault write %v/revoke/%v`", + state.certSerialNumber, state.parentMount, state.parentMount, state.certSerialNumber) + } + //if state.certImported { + // message += fmt.Sprintf("An issuer with UUID %v was created on mount %v as part of this command. " + + // "If you do not wish to use this issuer, consider deleting it by calling `vault delete %v/issuer/%v`", + // state.certIssuerId, state.intermediateMount, state.intermediateMount, state.certIssuerId) + //} + + return message +} diff --git a/command/pki_issue_intermediate_test.go b/command/pki_issue_intermediate_test.go new file mode 100644 index 0000000..58f9e62 --- /dev/null +++ b/command/pki_issue_intermediate_test.go @@ -0,0 +1,208 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bytes" + "encoding/json" + "testing" + + "github.com/hashicorp/vault/api" +) + +func TestPKIIssueIntermediate(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + // Relationship Map to Create + // pki-root | pki-newroot | pki-empty + // RootX1 RootX2 RootX4 RootX3 + // | | + // ---------------------------------------------- + // v v + // IntX1 IntX2 pki-int + // | | + // v v + // IntX3 (-----------------------) IntX3 + // + // Here X1,X2 have the same name (same mount) + // RootX4 uses the same key as RootX1 (but a different common_name/subject) + // RootX3 has the same name, and is on a different mount + // RootX1 has issued IntX1; RootX3 has issued IntX2 + createComplicatedIssuerSetUpWithIssueIntermediate(t, client) + + runPkiVerifySignTests(t, client) + + runPkiListIntermediateTests(t, client) +} + +func createComplicatedIssuerSetUpWithIssueIntermediate(t *testing.T, client *api.Client) { + // Relationship Map to Create + // pki-root | pki-newroot | pki-empty + // RootX1 RootX2 RootX4 RootX3 + // | | + // ---------------------------------------------- + // v v + // IntX1 IntX2 pki-int + // | | + // v v + // IntX3 (-----------------------) IntX3 + // + // Here X1,X2 have the same name (same mount) + // RootX4 uses the same key as RootX1 (but a different common_name/subject) + // RootX3 has the same name, and is on a different mount + // RootX1 has issued IntX1; RootX3 has issued IntX2 + + if err := client.Sys().Mount("pki-root", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + MaxLeaseTTL: "36500d", + }, + }); err != nil { + t.Fatalf("pki mount error: %#v", err) + } + + if err := client.Sys().Mount("pki-newroot", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + MaxLeaseTTL: "36500d", + }, + }); err != nil { + t.Fatalf("pki mount error: %#v", err) + } + + if err := client.Sys().Mount("pki-int", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + MaxLeaseTTL: "36500d", + }, + }); err != nil { + t.Fatalf("pki mount error: %#v", err) + } + + // Used to check handling empty list responses: Not Used for Any Issuers / Certificates + if err := client.Sys().Mount("pki-empty", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{}, + }); err != nil { + t.Fatalf("pki mount error: %#v", err) + } + + resp, err := client.Logical().Write("pki-root/root/generate/internal", map[string]interface{}{ + "key_type": "ec", + "common_name": "Root X", + "ttl": "3650d", + "issuer_name": "rootX1", + "key_name": "rootX1", + }) + if err != nil || resp == nil { + t.Fatalf("failed to prime CA: %v", err) + } + + resp, err = client.Logical().Write("pki-root/root/generate/internal", map[string]interface{}{ + "key_type": "ec", + "common_name": "Root X", + "ttl": "3650d", + "issuer_name": "rootX2", + }) + if err != nil || resp == nil { + t.Fatalf("failed to prime CA: %v", err) + } + + if resp, err := client.Logical().Write("pki-newroot/root/generate/internal", map[string]interface{}{ + "key_type": "ec", + "common_name": "Root X", + "ttl": "3650d", + "issuer_name": "rootX3", + }); err != nil || resp == nil { + t.Fatalf("failed to prime CA: %v", err) + } + + if resp, err := client.Logical().Write("pki-root/root/generate/existing", map[string]interface{}{ + "common_name": "Root X4", + "ttl": "3650d", + "issuer_name": "rootX4", + "key_ref": "rootX1", + }); err != nil || resp == nil { + t.Fatalf("failed to prime CA: %v", err) + } + + // Next we create the Intermediates Using the Issue Intermediate Command + stdout := bytes.NewBuffer(nil) + stderr := bytes.NewBuffer(nil) + runOpts := &RunOptions{ + Stdout: stdout, + Stderr: stderr, + Client: client, + } + + // Intermediate X1 + intX1CallArgs := []string{ + "pki", "issue", "-format=json", "-issuer_name=intX1", + "pki-root/issuer/rootX1", + "pki-int/", + "key_type=rsa", + "common_name=Int X1", + "ttl=3650d", + } + codeOut := RunCustom(intX1CallArgs, runOpts) + if codeOut != 0 { + t.Fatalf("error issuing intermediate X1, code: %d \n stdout: %v \n stderr: %v", codeOut, stdout, stderr) + } + + // Intermediate X2 + intX2CallArgs := []string{ + "pki", "issue", "-format=json", "-issuer_name=intX2", + "pki-newroot/issuer/rootX3", + "pki-int/", + "key_type=ec", + "common_name=Int X2", + "ttl=3650d", + } + codeOut = RunCustom(intX2CallArgs, runOpts) + if codeOut != 0 { + t.Fatalf("error issuing intermediate X2, code: %d \n stdout: %v \n stderr: %v", codeOut, stdout, stderr) + } + + // Intermediate X3 + // Clear Buffers so that we can unmarshall json of just this call + stdout = bytes.NewBuffer(nil) + stderr = bytes.NewBuffer(nil) + runOpts = &RunOptions{ + Stdout: stdout, + Stderr: stderr, + Client: client, + } + intX3OriginalCallArgs := []string{ + "pki", "issue", "-format=json", "-issuer_name=intX3", + "pki-int/issuer/intX1", + "pki-int/", + "key_type=rsa", + "common_name=Int X3", + "ttl=3650d", + } + codeOut = RunCustom(intX3OriginalCallArgs, runOpts) + if codeOut != 0 { + t.Fatalf("error issuing intermediate X3, code: %d \n stdout: %v \n stderr: %v", codeOut, stdout, stderr) + } + var intX3Resp map[string]interface{} + json.Unmarshal(stdout.Bytes(), &intX3Resp) + intX3Data := intX3Resp["data"].(map[string]interface{}) + keyId := intX3Data["key_id"].(string) + + intX3AdaptedCallArgs := []string{ + "pki", "issue", "-format=json", "-issuer_name=intX3also", "-type=existing", + "pki-int/issuer/intX2", + "pki-int/", + "key_ref=" + keyId, + "common_name=Int X3", + "ttl=3650d", + } + codeOut = RunCustom(intX3AdaptedCallArgs, runOpts) + if codeOut != 0 { + t.Fatalf("error issuing intermediate X3also, code: %d \n stdout: %v \n stderr: %v", codeOut, stdout, stderr) + } +} diff --git a/command/pki_list_intermediate.go b/command/pki_list_intermediate.go new file mode 100644 index 0000000..c62c580 --- /dev/null +++ b/command/pki_list_intermediate.go @@ -0,0 +1,304 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + + "github.com/hashicorp/vault/api" + + "github.com/ghodss/yaml" + "github.com/ryanuber/columnize" +) + +type PKIListIntermediateCommand struct { + *BaseCommand + + flagConfig string + flagReturnIndicator string + flagDefaultDisabled bool + flagList bool + + flagUseNames bool + + flagSignatureMatch bool + flagIndirectSignMatch bool + flagKeyIdMatch bool + flagSubjectMatch bool + flagPathMatch bool +} + +func (c *PKIListIntermediateCommand) Synopsis() string { + return "Determine which of a list of certificates, were issued by a given parent certificate" +} + +func (c *PKIListIntermediateCommand) Help() string { + helpText := ` +Usage: vault pki list-intermediates PARENT [CHILD] [CHILD] [CHILD] ... + + Lists the set of intermediate CAs issued by this parent issuer. + + PARENT is the certificate that might be the issuer that everything should + be verified against. + + CHILD is an optional list of paths to certificates to be compared to the + PARENT, or pki mounts to look for certificates on. If CHILD is omitted + entirely, the list will be constructed from all accessible pki mounts. + + This returns a list of issuing certificates, and whether they are a match. + By default, the type of match required is whether the PARENT has the + expected subject, key_id, and could have (directly) signed this issuer. + The match criteria can be updated by changed the corresponding flag. + +` + c.Flags().Help() + return strings.TrimSpace(helpText) +} + +func (c *PKIListIntermediateCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + f := set.NewFlagSet("Command Options") + + f.BoolVar(&BoolVar{ + Name: "subject_match", + Target: &c.flagSubjectMatch, + Default: true, + EnvVar: "", + Usage: `Whether the subject name of the potential parent cert matches the issuer name of the child cert.`, + }) + + f.BoolVar(&BoolVar{ + Name: "key_id_match", + Target: &c.flagKeyIdMatch, + Default: true, + EnvVar: "", + Usage: `Whether the subject key id (SKID) of the potential parent cert matches the authority key id (AKID) of the child cert.`, + }) + + f.BoolVar(&BoolVar{ + Name: "path_match", + Target: &c.flagPathMatch, + Default: false, + EnvVar: "", + Usage: `Whether the potential parent appears in the certificate chain field (ca_chain) of the issued cert.`, + }) + + f.BoolVar(&BoolVar{ + Name: "direct_sign", + Target: &c.flagSignatureMatch, + Default: true, + EnvVar: "", + Usage: `Whether the key of the potential parent directly signed this issued certificate.`, + }) + + f.BoolVar(&BoolVar{ + Name: "indirect_sign", + Target: &c.flagIndirectSignMatch, + Default: true, + EnvVar: "", + Usage: `Whether trusting the parent certificate is sufficient to trust the child certificate.`, + }) + + f.BoolVar(&BoolVar{ + Name: "use_names", + Target: &c.flagUseNames, + Default: false, + EnvVar: "", + Usage: `Whether the list of issuers returned is referred to by name (when it exists) rather than by uuid.`, + }) + + return set +} + +func (c *PKIListIntermediateCommand) Run(args []string) int { + f := c.Flags() + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + + if len(args) < 1 { + c.UI.Error("Not enough arguments (expected potential parent, got nothing)") + return 1 + } else if len(args) > 2 { + for _, arg := range args { + if strings.HasPrefix(arg, "-") { + c.UI.Warn(fmt.Sprintf("Options (%v) must be specified before positional arguments (%v)", arg, args[0])) + break + } + } + } + + client, err := c.Client() + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to obtain client: %s", err)) + return 1 + } + + issuer := sanitizePath(args[0]) + var issued []string + if len(args) > 1 { + for _, arg := range args[1:] { + cleanPath := sanitizePath(arg) + // Arg Might be a Fully Qualified Path + if strings.Contains(cleanPath, "/issuer/") || + strings.Contains(cleanPath, "/certs/") || + strings.Contains(cleanPath, "/revoked/") { + issued = append(issued, cleanPath) + } else { // Or Arg Might be a Mount + mountCaList, err := c.getIssuerListFromMount(client, arg) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + issued = append(issued, mountCaList...) + } + } + } else { + mountListRaw, err := client.Logical().Read("/sys/mounts/") + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to Read List of Mounts With Potential Issuers: %v", err)) + return 1 + } + for path, rawValueMap := range mountListRaw.Data { + valueMap := rawValueMap.(map[string]interface{}) + if valueMap["type"].(string) == "pki" { + mountCaList, err := c.getIssuerListFromMount(client, sanitizePath(path)) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + issued = append(issued, mountCaList...) + } + } + } + + childrenMatches := make(map[string]bool) + + constraintMap := map[string]bool{ + // This comparison isn't strictly correct, despite a standard ordering these are sets + "subject_match": c.flagSubjectMatch, + "path_match": c.flagPathMatch, + "trust_match": c.flagIndirectSignMatch, + "key_id_match": c.flagKeyIdMatch, + "signature_match": c.flagSignatureMatch, + } + + issuerResp, err := readIssuer(client, issuer) + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to read parent issuer on path %s: %s", issuer, err.Error())) + return 1 + } + + for _, child := range issued { + path := sanitizePath(child) + if path != "" { + verifyResults, err := verifySignBetween(client, issuerResp, path) + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to run verification on path %v: %v", path, err)) + return 1 + } + childrenMatches[path] = checkIfResultsMatchFilters(verifyResults, constraintMap) + } + } + + err = c.outputResults(childrenMatches) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + + return 0 +} + +func (c *PKIListIntermediateCommand) getIssuerListFromMount(client *api.Client, mountString string) ([]string, error) { + var issuerList []string + issuerListEndpoint := sanitizePath(mountString) + "/issuers" + rawIssuersResp, err := client.Logical().List(issuerListEndpoint) + if err != nil { + return issuerList, fmt.Errorf("failed to read list of issuers within mount %v: %v", mountString, err) + } + if rawIssuersResp == nil { // No Issuers (Empty Mount) + return issuerList, nil + } + issuersMap := rawIssuersResp.Data["keys"] + certList := issuersMap.([]interface{}) + for _, certId := range certList { + identifier := certId.(string) + if c.flagUseNames { + issuerReadResp, err := client.Logical().Read(sanitizePath(mountString) + "/issuer/" + identifier) + if err != nil { + c.UI.Warn(fmt.Sprintf("Unable to Fetch Issuer to Recover Name at: %v", sanitizePath(mountString)+"/issuer/"+identifier)) + } + if issuerReadResp != nil { + issuerName := issuerReadResp.Data["issuer_name"].(string) + if issuerName != "" { + identifier = issuerName + } + } + } + issuerList = append(issuerList, sanitizePath(mountString)+"/issuer/"+identifier) + } + return issuerList, nil +} + +func checkIfResultsMatchFilters(verifyResults, constraintMap map[string]bool) bool { + for key, required := range constraintMap { + if required && !verifyResults[key] { + return false + } + } + return true +} + +func (c *PKIListIntermediateCommand) outputResults(results map[string]bool) error { + switch Format(c.UI) { + case "", "table": + return c.outputResultsTable(results) + case "json": + return c.outputResultsJSON(results) + case "yaml": + return c.outputResultsYAML(results) + default: + return fmt.Errorf("unknown output format: %v", Format(c.UI)) + } +} + +func (c *PKIListIntermediateCommand) outputResultsTable(results map[string]bool) error { + data := []string{"intermediate" + hopeDelim + "match?"} + for field, finding := range results { + row := field + hopeDelim + strconv.FormatBool(finding) + data = append(data, row) + } + c.UI.Output(tableOutput(data, &columnize.Config{ + Delim: hopeDelim, + })) + c.UI.Output("\n") + + return nil +} + +func (c *PKIListIntermediateCommand) outputResultsJSON(results map[string]bool) error { + bytes, err := json.MarshalIndent(results, "", " ") + if err != nil { + return err + } + + c.UI.Output(string(bytes)) + return nil +} + +func (c *PKIListIntermediateCommand) outputResultsYAML(results map[string]bool) error { + bytes, err := yaml.Marshal(results) + if err != nil { + return err + } + + c.UI.Output(string(bytes)) + return nil +} diff --git a/command/pki_list_intermediate_test.go b/command/pki_list_intermediate_test.go new file mode 100644 index 0000000..d494c19 --- /dev/null +++ b/command/pki_list_intermediate_test.go @@ -0,0 +1,246 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "testing" + + "github.com/hashicorp/vault/api" +) + +func TestPKIListIntermediate(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + // Relationship Map to Create + // pki-root | pki-newroot | pki-empty + // RootX1 RootX2 RootX4 RootX3 + // | | + // ---------------------------------------------- + // v v + // IntX1 IntX2 pki-int + // | | + // v v + // IntX3 (-----------------------) IntX3(also) + // + // Here X1,X2 have the same name (same mount) + // RootX4 uses the same key as RootX1 (but a different common_name/subject) + // RootX3 has the same name, and is on a different mount + // RootX1 has issued IntX1; RootX3 has issued IntX2 + createComplicatedIssuerSetUp(t, client) + + runPkiListIntermediateTests(t, client) +} + +func runPkiListIntermediateTests(t *testing.T, client *api.Client) { + cases := []struct { + name string + args []string + expectedMatches map[string]bool + jsonOut bool + shouldError bool + expectErrorCont string + expectErrorNotCont string + nonJsonOutputCont string + }{ + { + "rootX1-match-everything-no-constraints", + []string{ + "pki", "list-intermediates", "-format=json", "-use_names=true", + "-subject_match=false", "-key_id_match=false", "-direct_sign=false", "-indirect_sign=false", "-path_match=false", + "pki-root/issuer/rootX1", + }, + map[string]bool{ + "pki-root/issuer/rootX1": true, + "pki-root/issuer/rootX2": true, + "pki-newroot/issuer/rootX3": true, + "pki-root/issuer/rootX4": true, + "pki-int/issuer/intX1": true, + "pki-int/issuer/intX2": true, + "pki-int/issuer/intX3": true, + "pki-int/issuer/intX3also": true, + "pki-int/issuer/rootX1": true, + "pki-int/issuer/rootX3": true, + }, + true, + false, + "", + "", + "", + }, + { + "rootX1-default-children", + []string{"pki", "list-intermediates", "-format=json", "-use_names=true", "pki-root/issuer/rootX1"}, + map[string]bool{ + "pki-root/issuer/rootX1": true, + "pki-root/issuer/rootX2": false, + "pki-newroot/issuer/rootX3": false, + "pki-root/issuer/rootX4": false, + "pki-int/issuer/intX1": true, + "pki-int/issuer/intX2": false, + "pki-int/issuer/intX3": false, + "pki-int/issuer/intX3also": false, + "pki-int/issuer/rootX1": true, + "pki-int/issuer/rootX3": false, + }, + true, + false, + "", + "", + "", + }, + { + "rootX1-subject-match-only", + []string{ + "pki", "list-intermediates", "-format=json", "-use_names=true", + "-key_id_match=false", "-direct_sign=false", "-indirect_sign=false", + "pki-root/issuer/rootX1", + }, + map[string]bool{ + "pki-root/issuer/rootX1": true, + "pki-root/issuer/rootX2": true, + "pki-newroot/issuer/rootX3": true, + "pki-root/issuer/rootX4": false, + "pki-int/issuer/intX1": true, + "pki-int/issuer/intX2": true, + "pki-int/issuer/intX3": false, + "pki-int/issuer/intX3also": false, + "pki-int/issuer/rootX1": true, + "pki-int/issuer/rootX3": true, + }, + true, + false, + "", + "", + "", + }, + { + "rootX1-in-path", + []string{ + "pki", "list-intermediates", "-format=json", "-use_names=true", + "-subject_match=false", "-key_id_match=false", "-direct_sign=false", "-indirect_sign=false", "-path_match=true", + "pki-root/issuer/rootX1", + }, + map[string]bool{ + "pki-root/issuer/rootX1": true, + "pki-root/issuer/rootX2": false, + "pki-newroot/issuer/rootX3": false, + "pki-root/issuer/rootX4": false, + "pki-int/issuer/intX1": true, + "pki-int/issuer/intX2": false, + "pki-int/issuer/intX3": true, + "pki-int/issuer/intX3also": false, + "pki-int/issuer/rootX1": true, + "pki-int/issuer/rootX3": false, + }, + true, + false, + "", + "", + "", + }, + { + "rootX1-only-int-mount", + []string{ + "pki", "list-intermediates", "-format=json", "-use_names=true", + "-subject_match=false", "-key_id_match=false", "-direct_sign=false", "-indirect_sign=false", "-path_match=true", + "pki-root/issuer/rootX1", "pki-int/", + }, + map[string]bool{ + "pki-int/issuer/intX1": true, + "pki-int/issuer/intX2": false, + "pki-int/issuer/intX3": true, + "pki-int/issuer/intX3also": false, + "pki-int/issuer/rootX1": true, + "pki-int/issuer/rootX3": false, + }, + true, + false, + "", + "", + "", + }, + { + "rootX1-subject-match-root-mounts-only", + []string{ + "pki", "list-intermediates", "-format=json", "-use_names=true", + "-key_id_match=false", "-direct_sign=false", "-indirect_sign=false", + "pki-root/issuer/rootX1", "pki-root/", "pki-newroot", "pki-empty", + }, + map[string]bool{ + "pki-root/issuer/rootX1": true, + "pki-root/issuer/rootX2": true, + "pki-newroot/issuer/rootX3": true, + "pki-root/issuer/rootX4": false, + }, + true, + false, + "", + "", + "", + }, + { + "rootX1-subject-match-these-certs-only", + []string{ + "pki", "list-intermediates", "-format=json", "-use_names=true", + "-key_id_match=false", "-direct_sign=false", "-indirect_sign=false", + "pki-root/issuer/rootX1", "pki-root/issuer/rootX2", "pki-newroot/issuer/rootX3", "pki-root/issuer/rootX4", + }, + map[string]bool{ + "pki-root/issuer/rootX2": true, + "pki-newroot/issuer/rootX3": true, + "pki-root/issuer/rootX4": false, + }, + true, + false, + "", + "", + "", + }, + } + for _, testCase := range cases { + var errString string + var results map[string]interface{} + var stdOut string + + if testCase.jsonOut { + results, errString = execPKIVerifyJson(t, client, false, testCase.shouldError, testCase.args) + } else { + stdOut, errString = execPKIVerifyNonJson(t, client, testCase.shouldError, testCase.args) + } + + // Verify Error Behavior + if testCase.shouldError { + if errString == "" { + t.Fatalf("Expected error in Testcase %s : no error produced, got results %s", testCase.name, results) + } + if testCase.expectErrorCont != "" && !strings.Contains(errString, testCase.expectErrorCont) { + t.Fatalf("Expected error in Testcase %s to contain %s, but got error %s", testCase.name, testCase.expectErrorCont, errString) + } + if testCase.expectErrorNotCont != "" && strings.Contains(errString, testCase.expectErrorNotCont) { + t.Fatalf("Expected error in Testcase %s to not contain %s, but got error %s", testCase.name, testCase.expectErrorNotCont, errString) + } + } else { + if errString != "" { + t.Fatalf("Error in Testcase %s : no error expected, but got error: %s", testCase.name, errString) + } + } + + // Verify Output + if testCase.jsonOut { + isMatch, errString := verifyExpectedJson(testCase.expectedMatches, results) + if !isMatch { + t.Fatalf("Expected Results for Testcase %s, do not match returned results %s", testCase.name, errString) + } + } else { + if !strings.Contains(stdOut, testCase.nonJsonOutputCont) { + t.Fatalf("Expected standard output for Testcase %s to contain %s, but got %s", testCase.name, testCase.nonJsonOutputCont, stdOut) + } + } + + } +} diff --git a/command/pki_reissue_intermediate.go b/command/pki_reissue_intermediate.go new file mode 100644 index 0000000..852c0c0 --- /dev/null +++ b/command/pki_reissue_intermediate.go @@ -0,0 +1,293 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "crypto/x509" + "encoding/hex" + "fmt" + "io" + "net" + "net/url" + "os" + "strings" + + "github.com/posener/complete" +) + +type PKIReIssueCACommand struct { + *BaseCommand + + flagConfig string + flagReturnIndicator string + flagDefaultDisabled bool + flagList bool + + flagKeyStorageSource string + flagNewIssuerName string +} + +func (c *PKIReIssueCACommand) Synopsis() string { + return "Uses a parent certificate and a template certificate to create a new issuer on a child mount" +} + +func (c *PKIReIssueCACommand) Help() string { + helpText := ` +Usage: vault pki reissue PARENT TEMPLATE CHILD_MOUNT options +` + return strings.TrimSpace(helpText) +} + +func (c *PKIReIssueCACommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "type", + Target: &c.flagKeyStorageSource, + Default: "internal", + EnvVar: "", + Usage: `Options are “existing” - to use an existing key inside vault, “internal” - to generate a new key inside vault, or “kms” - to link to an external key. Exported keys are not available through this API.`, + Completion: complete.PredictSet("internal", "existing", "kms"), + }) + + f.StringVar(&StringVar{ + Name: "issuer_name", + Target: &c.flagNewIssuerName, + Default: "", + EnvVar: "", + Usage: `If present, the newly created issuer will be given this name`, + }) + + return set +} + +func (c *PKIReIssueCACommand) Run(args []string) int { + // Parse Args + f := c.Flags() + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + args = f.Args() + + if len(args) < 3 { + c.UI.Error("Not enough arguments: expected parent issuer and child-mount location and some key_value argument") + return 1 + } + + stdin := (io.Reader)(os.Stdin) + userData, err := parseArgsData(stdin, args[3:]) + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to parse K=V data: %s", err)) + return 1 + } + + // Check We Have a Client + client, err := c.Client() + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to obtain client: %v", err)) + return 1 + } + + parentIssuer := sanitizePath(args[0]) // /pki/issuer/default + templateIssuer := sanitizePath(args[1]) + intermediateMount := sanitizePath(args[2]) + + templateIssuerBundle, err := readIssuer(client, templateIssuer) + if err != nil { + c.UI.Error(fmt.Sprintf("Error fetching template certificate %v : %v", templateIssuer, err)) + return 1 + } + certificate := templateIssuerBundle.certificate + + useExistingKey := c.flagKeyStorageSource == "existing" + keyRef := "" + if useExistingKey { + keyRef = templateIssuerBundle.keyId + + if keyRef == "" { + c.UI.Error(fmt.Sprintf("Template issuer %s did not have a key id field set in response which is required", templateIssuer)) + return 1 + } + } + + templateData, err := parseTemplateCertificate(*certificate, useExistingKey, keyRef) + data := updateTemplateWithData(templateData, userData) + + return pkiIssue(c.BaseCommand, parentIssuer, intermediateMount, c.flagNewIssuerName, c.flagKeyStorageSource, data) +} + +func updateTemplateWithData(template map[string]interface{}, changes map[string]interface{}) map[string]interface{} { + data := map[string]interface{}{} + + for key, value := range template { + data[key] = value + } + + // ttl and not_after set the same thing. Delete template ttl if using not_after: + if _, ok := changes["not_after"]; ok { + delete(data, "ttl") + } + + // If we are updating the key_type, do not set key_bits + if _, ok := changes["key_type"]; ok && changes["key_type"] != template["key_type"] { + delete(data, "key_bits") + } + + for key, value := range changes { + data[key] = value + } + + return data +} + +func parseTemplateCertificate(certificate x509.Certificate, useExistingKey bool, keyRef string) (templateData map[string]interface{}, err error) { + // Generate Certificate Signing Parameters + templateData = map[string]interface{}{ + "common_name": certificate.Subject.CommonName, + "alt_names": makeAltNamesCommaSeparatedString(certificate.DNSNames, certificate.EmailAddresses), + "ip_sans": makeIpAddressCommaSeparatedString(certificate.IPAddresses), + "uri_sans": makeUriCommaSeparatedString(certificate.URIs), + // other_sans (string: "") - Specifies custom OID/UTF8-string SANs. These must match values specified on the role in allowed_other_sans (see role creation for allowed_other_sans globbing rules). The format is the same as OpenSSL: ;: where the only current valid type is UTF8. This can be a comma-delimited list or a JSON string slice. + // Punting on Other_SANs, shouldn't really be on CAs + "signature_bits": findSignatureBits(certificate.SignatureAlgorithm), + "exclude_cn_from_sans": determineExcludeCnFromSans(certificate), + "ou": certificate.Subject.OrganizationalUnit, + "organization": certificate.Subject.Organization, + "country": certificate.Subject.Country, + "locality": certificate.Subject.Locality, + "province": certificate.Subject.Province, + "street_address": certificate.Subject.StreetAddress, + "postal_code": certificate.Subject.PostalCode, + "serial_number": certificate.Subject.SerialNumber, + "ttl": (certificate.NotAfter.Sub(certificate.NotBefore)).String(), + "max_path_length": certificate.MaxPathLen, + "permitted_dns_domains": strings.Join(certificate.PermittedDNSDomains, ","), + "use_pss": isPSS(certificate.SignatureAlgorithm), + } + + if useExistingKey { + templateData["skid"] = hex.EncodeToString(certificate.SubjectKeyId) // TODO: Double Check this with someone + if keyRef == "" { + return nil, fmt.Errorf("unable to create certificate template for existing key without a key_id") + } + templateData["key_ref"] = keyRef + } else { + templateData["key_type"] = getKeyType(certificate.PublicKeyAlgorithm.String()) + templateData["key_bits"] = findBitLength(certificate.PublicKey) + } + + return templateData, nil +} + +func isPSS(algorithm x509.SignatureAlgorithm) bool { + switch algorithm { + case x509.SHA384WithRSAPSS, x509.SHA512WithRSAPSS, x509.SHA256WithRSAPSS: + return true + default: + return false + } +} + +func makeAltNamesCommaSeparatedString(names []string, emails []string) string { + return strings.Join(names, ",") + "," + strings.Join(emails, ",") +} + +func makeUriCommaSeparatedString(uris []*url.URL) string { + stringAddresses := make([]string, len(uris)) + for i, uri := range uris { + stringAddresses[i] = uri.String() + } + return strings.Join(stringAddresses, ",") +} + +func makeIpAddressCommaSeparatedString(addresses []net.IP) string { + stringAddresses := make([]string, len(addresses)) + for i, address := range addresses { + stringAddresses[i] = address.String() + } + return strings.Join(stringAddresses, ",") +} + +func determineExcludeCnFromSans(certificate x509.Certificate) bool { + cn := certificate.Subject.CommonName + if cn == "" { + return false + } + + emails := certificate.EmailAddresses + for _, email := range emails { + if email == cn { + return false + } + } + + dnses := certificate.DNSNames + for _, dns := range dnses { + if dns == cn { + return false + } + } + + return true +} + +func findBitLength(publicKey any) int { + if publicKey == nil { + return 0 + } + switch pub := publicKey.(type) { + case *rsa.PublicKey: + return pub.N.BitLen() + case *ecdsa.PublicKey: + switch pub.Curve { + case elliptic.P224(): + return 224 + case elliptic.P256(): + return 256 + case elliptic.P384(): + return 384 + case elliptic.P521(): + return 521 + default: + return 0 + } + default: + return 0 + } +} + +func findSignatureBits(algo x509.SignatureAlgorithm) int { + switch algo { + case x509.MD2WithRSA, x509.MD5WithRSA, x509.SHA1WithRSA, x509.DSAWithSHA1, x509.ECDSAWithSHA1: + return -1 + case x509.SHA256WithRSA, x509.DSAWithSHA256, x509.ECDSAWithSHA256, x509.SHA256WithRSAPSS: + return 256 + case x509.SHA384WithRSA, x509.ECDSAWithSHA384, x509.SHA384WithRSAPSS: + return 384 + case x509.SHA512WithRSA, x509.SHA512WithRSAPSS, x509.ECDSAWithSHA512: + return 512 + case x509.PureEd25519: + return 0 + default: + return -1 + } +} + +func getKeyType(goKeyType string) string { + switch goKeyType { + case "RSA": + return "rsa" + case "ECDSA": + return "ec" + case "Ed25519": + return "ed25519" + default: + return "" + } +} diff --git a/command/pki_reissue_intermediate_test.go b/command/pki_reissue_intermediate_test.go new file mode 100644 index 0000000..e485f04 --- /dev/null +++ b/command/pki_reissue_intermediate_test.go @@ -0,0 +1,198 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bytes" + "testing" + + "github.com/hashicorp/vault/api" +) + +// TestPKIReIssueIntermediate tests that the pki reissue command line tool accurately copies information from the +// template certificate to the newly issued certificate, by issuing and reissuing several certificates and seeing how +// they related to each other. +func TestPKIReIssueIntermediate(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + // Relationship Map to Create + // pki-root | pki-newroot | pki-empty + // RootX1 RootX2 RootX4 RootX3 + // | | + // ---------------------------------------------- + // v v + // IntX1 IntX2 pki-int + // | | + // v v + // IntX3 (-----------------------) IntX3 + // + // Here X1,X2 have the same name (same mount) + // RootX4 uses the same key as RootX1 (but a different common_name/subject) + // RootX3 has the same name, and is on a different mount + // RootX1 has issued IntX1; RootX3 has issued IntX2 + createComplicatedIssuerSetUpWithReIssueIntermediate(t, client) + + runPkiVerifySignTests(t, client) + + runPkiListIntermediateTests(t, client) +} + +func createComplicatedIssuerSetUpWithReIssueIntermediate(t *testing.T, client *api.Client) { + // Relationship Map to Create + // pki-root | pki-newroot | pki-empty + // RootX1 RootX2 RootX4 RootX3 + // | | + // ---------------------------------------------- + // v v + // IntX1 IntX2 pki-int + // | | + // v v + // IntX3 (-----------------------) IntX3 + // + // Here X1,X2 have the same name (same mount) + // RootX4 uses the same key as RootX1 (but a different common_name/subject) + // RootX3 has the same name, and is on a different mount + // RootX1 has issued IntX1; RootX3 has issued IntX2 + + if err := client.Sys().Mount("pki-root", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + MaxLeaseTTL: "36500d", + }, + }); err != nil { + t.Fatalf("pki mount error: %#v", err) + } + + if err := client.Sys().Mount("pki-newroot", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + MaxLeaseTTL: "36500d", + }, + }); err != nil { + t.Fatalf("pki mount error: %#v", err) + } + + if err := client.Sys().Mount("pki-int", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + MaxLeaseTTL: "36500d", + }, + }); err != nil { + t.Fatalf("pki mount error: %#v", err) + } + + // Used to check handling empty list responses: Not Used for Any Issuers / Certificates + if err := client.Sys().Mount("pki-empty", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{}, + }); err != nil { + t.Fatalf("pki mount error: %#v", err) + } + + resp, err := client.Logical().Write("pki-root/root/generate/internal", map[string]interface{}{ + "key_type": "ec", + "common_name": "Root X", + "ttl": "3650d", + "issuer_name": "rootX1", + "key_name": "rootX1", + }) + if err != nil || resp == nil { + t.Fatalf("failed to prime CA: %v", err) + } + + resp, err = client.Logical().Write("pki-root/root/generate/internal", map[string]interface{}{ + "key_type": "ec", + "common_name": "Root X", + "ttl": "3650d", + "issuer_name": "rootX2", + }) + if err != nil || resp == nil { + t.Fatalf("failed to prime CA: %v", err) + } + + if resp, err := client.Logical().Write("pki-newroot/root/generate/internal", map[string]interface{}{ + "key_type": "ec", + "common_name": "Root X", + "ttl": "3650d", + "issuer_name": "rootX3", + }); err != nil || resp == nil { + t.Fatalf("failed to prime CA: %v", err) + } + + if resp, err := client.Logical().Write("pki-root/root/generate/existing", map[string]interface{}{ + "common_name": "Root X4", + "ttl": "3650d", + "issuer_name": "rootX4", + "key_ref": "rootX1", + }); err != nil || resp == nil { + t.Fatalf("failed to prime CA: %v", err) + } + + stdout := bytes.NewBuffer(nil) + stderr := bytes.NewBuffer(nil) + runOpts := &RunOptions{ + Stdout: stdout, + Stderr: stderr, + Client: client, + } + + // Intermediate X1 + intX1CallArgs := []string{ + "pki", "issue", "-format=json", "-issuer_name=intX1", + "pki-root/issuer/rootX1", + "pki-int/", + "key_type=rsa", + "common_name=Int X1", + "ou=thing", + "ttl=3650d", + } + codeOut := RunCustom(intX1CallArgs, runOpts) + if codeOut != 0 { + t.Fatalf("error issuing intermediate X1, code: %d \n stdout: %v \n stderr: %v", codeOut, stdout, stderr) + } + + // Intermediate X2 - using ReIssue + intX2CallArgs := []string{ + "pki", "reissue", "-format=json", "-issuer_name=intX2", + "pki-newroot/issuer/rootX3", + "pki-int/issuer/intX1", + "pki-int/", + "key_type=ec", + "common_name=Int X2", + } + codeOut = RunCustom(intX2CallArgs, runOpts) + if codeOut != 0 { + t.Fatalf("error issuing intermediate X2, code: %d \n stdout: %v \n stderr: %v", codeOut, stdout, stderr) + } + + // Intermediate X3 + intX3OriginalCallArgs := []string{ + "pki", "issue", "-format=json", "-issuer_name=intX3", + "pki-int/issuer/intX1", + "pki-int/", + "key_type=ec", + "use_pss=true", // This is meaningful because rootX1 is an RSA key + "signature_bits=512", + "common_name=Int X3", + "ttl=3650d", + } + codeOut = RunCustom(intX3OriginalCallArgs, runOpts) + if codeOut != 0 { + t.Fatalf("error issuing intermediate X3, code: %d \n stdout: %v \n stderr: %v", codeOut, stdout, stderr) + } + + intX3AdaptedCallArgs := []string{ + "pki", "reissue", "-format=json", "-issuer_name=intX3also", "-type=existing", + "pki-int/issuer/intX2", // This is a EC key + "pki-int/issuer/intX3", // This template includes use_pss = true which can't be accomodated + "pki-int/", + } + codeOut = RunCustom(intX3AdaptedCallArgs, runOpts) + if codeOut != 0 { + t.Fatalf("error issuing intermediate X3also, code: %d \n stdout: %v \n stderr: %v", codeOut, stdout, stderr) + } +} diff --git a/command/pki_verify_sign.go b/command/pki_verify_sign.go new file mode 100644 index 0000000..b5a864f --- /dev/null +++ b/command/pki_verify_sign.go @@ -0,0 +1,309 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bytes" + "crypto/x509" + "encoding/json" + "fmt" + "strconv" + "strings" + + "github.com/hashicorp/vault/command/healthcheck" + + "github.com/ghodss/yaml" + "github.com/hashicorp/vault/api" + "github.com/ryanuber/columnize" +) + +type PKIVerifySignCommand struct { + *BaseCommand + + flagConfig string + flagReturnIndicator string + flagDefaultDisabled bool + flagList bool +} + +func (c *PKIVerifySignCommand) Synopsis() string { + return "Check whether one certificate validates another specified certificate" +} + +func (c *PKIVerifySignCommand) Help() string { + helpText := ` +Usage: vault pki verify-sign POSSIBLE-ISSUER POSSIBLE-ISSUED + + Verifies whether the listed issuer has signed the listed issued certificate. + + POSSIBLE-ISSUER and POSSIBLE-ISSUED are the fully name-spaced path to + an issuer certificate, for instance: 'ns1/mount1/issuer/issuerName/json'. + + Returns five fields of information: + + - signature_match: was the key of the issuer used to sign the issued. + - path_match: the possible issuer appears in the valid certificate chain + of the issued. + - key_id_match: does the key-id of the issuer match the key_id of the + subject. + - subject_match: does the subject name of the issuer match the issuer + subject of the issued. + - trust_match: if someone trusted the parent issuer, is the chain + provided sufficient to trust the child issued. + +` + c.Flags().Help() + return strings.TrimSpace(helpText) +} + +func (c *PKIVerifySignCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + return set +} + +func (c *PKIVerifySignCommand) Run(args []string) int { + f := c.Flags() + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + + if len(args) < 2 { + if len(args) == 0 { + c.UI.Error("Not enough arguments (expected potential issuer and issued, got nothing)") + } else { + c.UI.Error("Not enough arguments (expected both potential issuer and issued, got only one)") + } + return 1 + } else if len(args) > 2 { + c.UI.Error(fmt.Sprintf("Too many arguments (expected only potential issuer and issued, got %d arguments)", len(args))) + for _, arg := range args { + if strings.HasPrefix(arg, "-") { + c.UI.Warn(fmt.Sprintf("Options (%v) must be specified before positional arguments (%v)", arg, args[0])) + break + } + } + return 1 + } + + issuer := sanitizePath(args[0]) + issued := sanitizePath(args[1]) + + client, err := c.Client() + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to obtain client: %s", err)) + return 1 + } + + issuerResp, err := readIssuer(client, issuer) + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to read issuer: %s: %s", issuer, err.Error())) + return 1 + } + + results, err := verifySignBetween(client, issuerResp, issued) + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to run verification: %v", err)) + return pkiRetUsage + } + + c.outputResults(results, issuer, issued) + + return 0 +} + +func verifySignBetween(client *api.Client, issuerResp *issuerResponse, issuedPath string) (map[string]bool, error) { + // Note that this eats warnings + + issuerCert := issuerResp.certificate + issuerKeyId := issuerCert.SubjectKeyId + + // Fetch and Parse the Potential Issued Cert + issuedCertBundle, err := readIssuer(client, issuedPath) + if err != nil { + return nil, fmt.Errorf("error: unable to fetch issuer %v: %w", issuedPath, err) + } + parentKeyId := issuedCertBundle.certificate.AuthorityKeyId + + // Check the Chain-Match + rootCertPool := x509.NewCertPool() + rootCertPool.AddCert(issuerCert) + checkTrustPathOptions := x509.VerifyOptions{ + Roots: rootCertPool, + } + trust := false + trusts, err := issuedCertBundle.certificate.Verify(checkTrustPathOptions) + if err != nil && !strings.Contains(err.Error(), "certificate signed by unknown authority") { + return nil, err + } else if err == nil { + for _, chain := range trusts { + // Output of this Should Only Have One Trust with Chain of Length Two (Child followed by Parent) + for _, cert := range chain { + if issuedCertBundle.certificate.Equal(cert) { + trust = true + break + } + } + } + } + + pathMatch := false + for _, cert := range issuedCertBundle.caChain { + if bytes.Equal(cert.Raw, issuerCert.Raw) { + pathMatch = true + break + } + } + + signatureMatch := false + err = issuedCertBundle.certificate.CheckSignatureFrom(issuerCert) + if err == nil { + signatureMatch = true + } + + result := map[string]bool{ + // This comparison isn't strictly correct, despite a standard ordering these are sets + "subject_match": bytes.Equal(issuerCert.RawSubject, issuedCertBundle.certificate.RawIssuer), + "path_match": pathMatch, + "trust_match": trust, // TODO: Refactor into a reasonable function + "key_id_match": bytes.Equal(parentKeyId, issuerKeyId), + "signature_match": signatureMatch, + } + + return result, nil +} + +type issuerResponse struct { + keyId string + certificate *x509.Certificate + caChain []*x509.Certificate +} + +func readIssuer(client *api.Client, issuerPath string) (*issuerResponse, error) { + issuerResp, err := client.Logical().Read(issuerPath) + if err != nil { + return nil, err + } + issuerCertPem, err := requireStrRespField(issuerResp, "certificate") + if err != nil { + return nil, err + } + issuerCert, err := healthcheck.ParsePEMCert(issuerCertPem) + if err != nil { + return nil, fmt.Errorf("unable to parse issuer %v's certificate: %w", issuerPath, err) + } + + caChainPem, err := requireStrListRespField(issuerResp, "ca_chain") + if err != nil { + return nil, fmt.Errorf("unable to parse issuer %v's CA chain: %w", issuerPath, err) + } + + var caChain []*x509.Certificate + for _, pem := range caChainPem { + trimmedPem := strings.TrimSpace(pem) + if trimmedPem == "" { + continue + } + cert, err := healthcheck.ParsePEMCert(trimmedPem) + if err != nil { + return nil, err + } + caChain = append(caChain, cert) + } + + keyId := optStrRespField(issuerResp, "key_id") + + return &issuerResponse{ + keyId: keyId, + certificate: issuerCert, + caChain: caChain, + }, nil +} + +func optStrRespField(resp *api.Secret, reqField string) string { + if resp == nil || resp.Data == nil { + return "" + } + if val, present := resp.Data[reqField]; !present { + return "" + } else if strVal, castOk := val.(string); !castOk || strVal == "" { + return "" + } else { + return strVal + } +} + +func requireStrRespField(resp *api.Secret, reqField string) (string, error) { + if resp == nil || resp.Data == nil { + return "", fmt.Errorf("nil response received, %s field unavailable", reqField) + } + if val, present := resp.Data[reqField]; !present { + return "", fmt.Errorf("response did not contain field: %s", reqField) + } else if strVal, castOk := val.(string); !castOk || strVal == "" { + return "", fmt.Errorf("field %s value was blank or not a string: %v", reqField, val) + } else { + return strVal, nil + } +} + +func requireStrListRespField(resp *api.Secret, reqField string) ([]string, error) { + if resp == nil || resp.Data == nil { + return nil, fmt.Errorf("nil response received, %s field unavailable", reqField) + } + if val, present := resp.Data[reqField]; !present { + return nil, fmt.Errorf("response did not contain field: %s", reqField) + } else { + return healthcheck.StringList(val) + } +} + +func (c *PKIVerifySignCommand) outputResults(results map[string]bool, potentialParent, potentialChild string) error { + switch Format(c.UI) { + case "", "table": + return c.outputResultsTable(results, potentialParent, potentialChild) + case "json": + return c.outputResultsJSON(results) + case "yaml": + return c.outputResultsYAML(results) + default: + return fmt.Errorf("unknown output format: %v", Format(c.UI)) + } +} + +func (c *PKIVerifySignCommand) outputResultsTable(results map[string]bool, potentialParent, potentialChild string) error { + c.UI.Output("issuer:" + potentialParent) + c.UI.Output("issued:" + potentialChild + "\n") + data := []string{"field" + hopeDelim + "value"} + for field, finding := range results { + row := field + hopeDelim + strconv.FormatBool(finding) + data = append(data, row) + } + c.UI.Output(tableOutput(data, &columnize.Config{ + Delim: hopeDelim, + })) + c.UI.Output("\n") + + return nil +} + +func (c *PKIVerifySignCommand) outputResultsJSON(results map[string]bool) error { + bytes, err := json.MarshalIndent(results, "", " ") + if err != nil { + return err + } + + c.UI.Output(string(bytes)) + return nil +} + +func (c *PKIVerifySignCommand) outputResultsYAML(results map[string]bool) error { + bytes, err := yaml.Marshal(results) + if err != nil { + return err + } + + c.UI.Output(string(bytes)) + return nil +} diff --git a/command/pki_verify_sign_test.go b/command/pki_verify_sign_test.go new file mode 100644 index 0000000..3f8986a --- /dev/null +++ b/command/pki_verify_sign_test.go @@ -0,0 +1,468 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "strings" + "testing" + + "github.com/hashicorp/vault/api" +) + +func TestPKIVerifySign(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + // Relationship Map to Create + // pki-root | pki-newroot | pki-empty + // RootX1 RootX2 RootX4 RootX3 + // | | + // ---------------------------------------------- + // v v + // IntX1 IntX2 pki-int + // | | + // v v + // IntX3 (-----------------------) IntX3 + // + // Here X1,X2 have the same name (same mount) + // RootX4 uses the same key as RootX1 (but a different common_name/subject) + // RootX3 has the same name, and is on a different mount + // RootX1 has issued IntX1; RootX3 has issued IntX2 + createComplicatedIssuerSetUp(t, client) + + runPkiVerifySignTests(t, client) +} + +func runPkiVerifySignTests(t *testing.T, client *api.Client) { + cases := []struct { + name string + args []string + expectedMatches map[string]bool + jsonOut bool + shouldError bool + expectErrorCont string + expectErrorNotCont string + nonJsonOutputCont string + }{ + { + "rootX1-matches-rootX1", + []string{"pki", "verify-sign", "-format=json", "pki-root/issuer/rootX1", "pki-root/issuer/rootX1"}, + map[string]bool{ + "key_id_match": true, + "path_match": true, + "signature_match": true, + "subject_match": true, + "trust_match": true, + }, + true, + false, + "", + "", + "", + }, + { + "rootX1-on-rootX2-onlySameName", + []string{"pki", "verify-sign", "-format=json", "pki-root/issuer/rootX1", "pki-root/issuer/rootX2"}, + map[string]bool{ + "key_id_match": false, + "path_match": false, + "signature_match": false, + "subject_match": true, + "trust_match": false, + }, + true, + false, + "", + "", + "", + }, + } + for _, testCase := range cases { + var errString string + var results map[string]interface{} + var stdOut string + + if testCase.jsonOut { + results, errString = execPKIVerifyJson(t, client, false, testCase.shouldError, testCase.args) + } else { + stdOut, errString = execPKIVerifyNonJson(t, client, testCase.shouldError, testCase.args) + } + + // Verify Error Behavior + if testCase.shouldError { + if errString == "" { + t.Fatalf("Expected error in Testcase %s : no error produced, got results %s", testCase.name, results) + } + if testCase.expectErrorCont != "" && !strings.Contains(errString, testCase.expectErrorCont) { + t.Fatalf("Expected error in Testcase %s to contain %s, but got error %s", testCase.name, testCase.expectErrorCont, errString) + } + if testCase.expectErrorNotCont != "" && strings.Contains(errString, testCase.expectErrorNotCont) { + t.Fatalf("Expected error in Testcase %s to not contain %s, but got error %s", testCase.name, testCase.expectErrorNotCont, errString) + } + } else { + if errString != "" { + t.Fatalf("Error in Testcase %s : no error expected, but got error: %s", testCase.name, errString) + } + } + + // Verify Output + if testCase.jsonOut { + isMatch, errString := verifyExpectedJson(testCase.expectedMatches, results) + if !isMatch { + t.Fatalf("Expected Results for Testcase %s, do not match returned results %s", testCase.name, errString) + } + } else { + if !strings.Contains(stdOut, testCase.nonJsonOutputCont) { + t.Fatalf("Expected standard output for Testcase %s to contain %s, but got %s", testCase.name, testCase.nonJsonOutputCont, stdOut) + } + } + + } +} + +func execPKIVerifyJson(t *testing.T, client *api.Client, expectErrorUnmarshalling bool, expectErrorOut bool, callArgs []string) (map[string]interface{}, string) { + stdout, stderr := execPKIVerifyNonJson(t, client, expectErrorOut, callArgs) + + var results map[string]interface{} + if err := json.Unmarshal([]byte(stdout), &results); err != nil && !expectErrorUnmarshalling { + t.Fatalf("failed to decode json response : %v \n json: \n%v", err, stdout) + } + + return results, stderr +} + +func execPKIVerifyNonJson(t *testing.T, client *api.Client, expectErrorOut bool, callArgs []string) (string, string) { + stdout := bytes.NewBuffer(nil) + stderr := bytes.NewBuffer(nil) + runOpts := &RunOptions{ + Stdout: stdout, + Stderr: stderr, + Client: client, + } + + code := RunCustom(callArgs, runOpts) + if !expectErrorOut && code != 0 { + t.Fatalf("running command `%v` unsuccessful (ret %v)\nerr: %v", strings.Join(callArgs, " "), code, stderr.String()) + } + + t.Log(stdout.String() + stderr.String()) + + return stdout.String(), stderr.String() +} + +func convertListOfInterfaceToString(list []interface{}, sep string) string { + newList := make([]string, len(list)) + for i, interfa := range list { + newList[i] = interfa.(string) + } + return strings.Join(newList, sep) +} + +func createComplicatedIssuerSetUp(t *testing.T, client *api.Client) { + // Relationship Map to Create + // pki-root | pki-newroot | pki-empty + // RootX1 RootX2 RootX4 RootX3 + // | | + // ---------------------------------------------- + // v v + // IntX1 IntX2 pki-int + // | | + // v v + // IntX3 (-----------------------) IntX3 + // + // Here X1,X2 have the same name (same mount) + // RootX4 uses the same key as RootX1 (but a different common_name/subject) + // RootX3 has the same name, and is on a different mount + // RootX1 has issued IntX1; RootX3 has issued IntX2 + + if err := client.Sys().Mount("pki-root", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + MaxLeaseTTL: "36500d", + }, + }); err != nil { + t.Fatalf("pki mount error: %#v", err) + } + + if err := client.Sys().Mount("pki-newroot", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + MaxLeaseTTL: "36500d", + }, + }); err != nil { + t.Fatalf("pki mount error: %#v", err) + } + + if err := client.Sys().Mount("pki-int", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + MaxLeaseTTL: "36500d", + }, + }); err != nil { + t.Fatalf("pki mount error: %#v", err) + } + + // Used to check handling empty list responses: Not Used for Any Issuers / Certificates + if err := client.Sys().Mount("pki-empty", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{}, + }); err != nil { + t.Fatalf("pki mount error: %#v", err) + } + + resp, err := client.Logical().Write("pki-root/root/generate/internal", map[string]interface{}{ + "key_type": "ec", + "common_name": "Root X", + "ttl": "3650d", + "issuer_name": "rootX1", + "key_name": "rootX1", + }) + if err != nil || resp == nil { + t.Fatalf("failed to prime CA: %v", err) + } + + resp, err = client.Logical().Write("pki-root/root/generate/internal", map[string]interface{}{ + "key_type": "ec", + "common_name": "Root X", + "ttl": "3650d", + "issuer_name": "rootX2", + }) + if err != nil || resp == nil { + t.Fatalf("failed to prime CA: %v", err) + } + + if resp, err := client.Logical().Write("pki-newroot/root/generate/internal", map[string]interface{}{ + "key_type": "ec", + "common_name": "Root X", + "ttl": "3650d", + "issuer_name": "rootX3", + }); err != nil || resp == nil { + t.Fatalf("failed to prime CA: %v", err) + } + + if resp, err := client.Logical().Write("pki-root/root/generate/existing", map[string]interface{}{ + "common_name": "Root X4", + "ttl": "3650d", + "issuer_name": "rootX4", + "key_ref": "rootX1", + }); err != nil || resp == nil { + t.Fatalf("failed to prime CA: %v", err) + } + + // Intermediate X1 + int1CsrResp, err := client.Logical().Write("pki-int/intermediate/generate/internal", map[string]interface{}{ + "key_type": "rsa", + "common_name": "Int X1", + "ttl": "3650d", + }) + if err != nil || int1CsrResp == nil { + t.Fatalf("failed to generate CSR: %v", err) + } + int1KeyId, ok := int1CsrResp.Data["key_id"] + if !ok { + t.Fatalf("no key_id produced when generating csr, response %v", int1CsrResp.Data) + } + int1CsrRaw, ok := int1CsrResp.Data["csr"] + if !ok { + t.Fatalf("no csr produced when generating intermediate, resp: %v", int1CsrResp) + } + int1Csr := int1CsrRaw.(string) + int1CertResp, err := client.Logical().Write("pki-root/issuer/rootX1/sign-intermediate", map[string]interface{}{ + "csr": int1Csr, + }) + if err != nil || int1CertResp == nil { + t.Fatalf("failed to sign CSR: %v", err) + } + int1CertChainRaw, ok := int1CertResp.Data["ca_chain"] + if !ok { + t.Fatalf("no ca_chain produced when signing intermediate, resp: %v", int1CertResp) + } + int1CertChain := convertListOfInterfaceToString(int1CertChainRaw.([]interface{}), "\n") + importInt1Resp, err := client.Logical().Write("pki-int/issuers/import/cert", map[string]interface{}{ + "pem_bundle": int1CertChain, + }) + if err != nil || importInt1Resp == nil { + t.Fatalf("failed to import certificate: %v", err) + } + importIssuerIdMap, ok := importInt1Resp.Data["mapping"] + if !ok { + t.Fatalf("no mapping data returned on issuer import: %v", importInt1Resp) + } + for key, value := range importIssuerIdMap.(map[string]interface{}) { + if value != nil && len(value.(string)) > 0 { + if value != int1KeyId { + t.Fatalf("Expected exactly one key_match to %v, got multiple: %v", int1KeyId, importIssuerIdMap) + } + if resp, err := client.Logical().JSONMergePatch(context.Background(), "pki-int/issuer/"+key, map[string]interface{}{ + "issuer_name": "intX1", + }); err != nil || resp == nil { + t.Fatalf("error naming issuer %v", err) + } + } else { + if resp, err := client.Logical().JSONMergePatch(context.Background(), "pki-int/issuer/"+key, map[string]interface{}{ + "issuer_name": "rootX1", + }); err != nil || resp == nil { + t.Fatalf("error naming issuer parent %v", err) + } + } + } + + // Intermediate X2 + int2CsrResp, err := client.Logical().Write("pki-int/intermediate/generate/internal", map[string]interface{}{ + "key_type": "ec", + "common_name": "Int X2", + "ttl": "3650d", + }) + if err != nil || int2CsrResp == nil { + t.Fatalf("failed to generate CSR: %v", err) + } + int2KeyId, ok := int2CsrResp.Data["key_id"] + if !ok { + t.Fatalf("no key material returned from producing csr, resp: %v", int2CsrResp) + } + int2CsrRaw, ok := int2CsrResp.Data["csr"] + if !ok { + t.Fatalf("no csr produced when generating intermediate, resp: %v", int2CsrResp) + } + int2Csr := int2CsrRaw.(string) + int2CertResp, err := client.Logical().Write("pki-newroot/issuer/rootX3/sign-intermediate", map[string]interface{}{ + "csr": int2Csr, + }) + if err != nil || int2CertResp == nil { + t.Fatalf("failed to sign CSR: %v", err) + } + int2CertChainRaw, ok := int2CertResp.Data["ca_chain"] + if !ok { + t.Fatalf("no ca_chain produced when signing intermediate, resp: %v", int2CertResp) + } + int2CertChain := convertListOfInterfaceToString(int2CertChainRaw.([]interface{}), "\n") + importInt2Resp, err := client.Logical().Write("pki-int/issuers/import/cert", map[string]interface{}{ + "pem_bundle": int2CertChain, + }) + if err != nil || importInt2Resp == nil { + t.Fatalf("failed to import certificate: %v", err) + } + importIssuer2IdMap, ok := importInt2Resp.Data["mapping"] + if !ok { + t.Fatalf("no mapping data returned on issuer import: %v", importInt2Resp) + } + for key, value := range importIssuer2IdMap.(map[string]interface{}) { + if value != nil && len(value.(string)) > 0 { + if value != int2KeyId { + t.Fatalf("unexpected key_match with ca_chain, expected only %v, got %v", int2KeyId, importIssuer2IdMap) + } + if resp, err := client.Logical().JSONMergePatch(context.Background(), "pki-int/issuer/"+key, map[string]interface{}{ + "issuer_name": "intX2", + }); err != nil || resp == nil { + t.Fatalf("error naming issuer %v", err) + } + } else { + if resp, err := client.Logical().Write("pki-int/issuer/"+key, map[string]interface{}{ + "issuer_name": "rootX3", + }); err != nil || resp == nil { + t.Fatalf("error naming parent issuer %v", err) + } + } + } + + // Intermediate X3 + int3CsrResp, err := client.Logical().Write("pki-int/intermediate/generate/internal", map[string]interface{}{ + "key_type": "rsa", + "common_name": "Int X3", + "ttl": "3650d", + }) + if err != nil || int3CsrResp == nil { + t.Fatalf("failed to generate CSR: %v", err) + } + int3KeyId, ok := int3CsrResp.Data["key_id"] + int3CsrRaw, ok := int3CsrResp.Data["csr"] + if !ok { + t.Fatalf("no csr produced when generating intermediate, resp: %v", int3CsrResp) + } + int3Csr := int3CsrRaw.(string) + // sign by intX1 and import + int3CertResp1, err := client.Logical().Write("pki-int/issuer/intX1/sign-intermediate", map[string]interface{}{ + "csr": int3Csr, + }) + if err != nil || int3CertResp1 == nil { + t.Fatalf("failed to sign CSR: %v", err) + } + int3CertChainRaw1, ok := int3CertResp1.Data["ca_chain"] + if !ok { + t.Fatalf("no ca_chain produced when signing intermediate, resp: %v", int3CertResp1) + } + int3CertChain1 := convertListOfInterfaceToString(int3CertChainRaw1.([]interface{}), "\n") + importInt3Resp1, err := client.Logical().Write("pki-int/issuers/import/cert", map[string]interface{}{ + "pem_bundle": int3CertChain1, + }) + if err != nil || importInt3Resp1 == nil { + t.Fatalf("failed to import certificate: %v", err) + } + importIssuer3IdMap1, ok := importInt3Resp1.Data["mapping"] + if !ok { + t.Fatalf("no mapping data returned on issuer import: %v", importInt2Resp) + } + for key, value := range importIssuer3IdMap1.(map[string]interface{}) { + if value != nil && len(value.(string)) > 0 && value == int3KeyId { + if resp, err := client.Logical().JSONMergePatch(context.Background(), "pki-int/issuer/"+key, map[string]interface{}{ + "issuer_name": "intX3", + }); err != nil || resp == nil { + t.Fatalf("error naming issuer %v", err) + } + break + } + } + + // sign by intX2 and import + int3CertResp2, err := client.Logical().Write("pki-int/issuer/intX2/sign-intermediate", map[string]interface{}{ + "csr": int3Csr, + }) + if err != nil || int3CertResp2 == nil { + t.Fatalf("failed to sign CSR: %v", err) + } + int3CertChainRaw2, ok := int3CertResp2.Data["ca_chain"] + if !ok { + t.Fatalf("no ca_chain produced when signing intermediate, resp: %v", int3CertResp2) + } + int3CertChain2 := convertListOfInterfaceToString(int3CertChainRaw2.([]interface{}), "\n") + importInt3Resp2, err := client.Logical().Write("pki-int/issuers/import/cert", map[string]interface{}{ + "pem_bundle": int3CertChain2, + }) + if err != nil || importInt3Resp2 == nil { + t.Fatalf("failed to import certificate: %v", err) + } + importIssuer3IdMap2, ok := importInt3Resp2.Data["mapping"] + if !ok { + t.Fatalf("no mapping data returned on issuer import: %v", importInt2Resp) + } + for key, value := range importIssuer3IdMap2.(map[string]interface{}) { + if value != nil && len(value.(string)) > 0 && value == int3KeyId { + if resp, err := client.Logical().JSONMergePatch(context.Background(), "pki-int/issuer/"+key, map[string]interface{}{ + "issuer_name": "intX3also", + }); err != nil || resp == nil { + t.Fatalf("error naming issuer %v", err) + } + break // Parent Certs Already Named + } + } +} + +func verifyExpectedJson(expectedResults map[string]bool, results map[string]interface{}) (isMatch bool, error string) { + if len(expectedResults) != len(results) { + return false, fmt.Sprintf("Different Number of Keys in Expected Results (%d), than results (%d)", + len(expectedResults), len(results)) + } + for key, value := range expectedResults { + if results[key].(bool) != value { + return false, fmt.Sprintf("Different value for key %s : expected %t got %s", key, value, results[key]) + } + } + return true, "" +} diff --git a/command/plugin.go b/command/plugin.go new file mode 100644 index 0000000..ca55a4b --- /dev/null +++ b/command/plugin.go @@ -0,0 +1,51 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + + "github.com/mitchellh/cli" +) + +var _ cli.Command = (*PluginCommand)(nil) + +type PluginCommand struct { + *BaseCommand +} + +func (c *PluginCommand) Synopsis() string { + return "Interact with Vault plugins and catalog" +} + +func (c *PluginCommand) Help() string { + helpText := ` +Usage: vault plugin [options] [args] + + This command groups subcommands for interacting with Vault's plugins and the + plugin catalog. The plugin catalog is divided into three types: "auth", + "database", and "secret" plugins. A type must be specified on each call. Here + are a few examples of the plugin commands. + + List all available plugins in the catalog of a particular type: + + $ vault plugin list database + + Register a new plugin to the catalog as a particular type: + + $ vault plugin register -sha256=d3f0a8b... auth my-custom-plugin + + Get information about a plugin in the catalog listed under a particular type: + + $ vault plugin info auth my-custom-plugin + + Please see the individual subcommand help for detailed usage information. +` + + return strings.TrimSpace(helpText) +} + +func (c *PluginCommand) Run(args []string) int { + return cli.RunResultHelp +} diff --git a/command/plugin_deregister.go b/command/plugin_deregister.go new file mode 100644 index 0000000..86b329f --- /dev/null +++ b/command/plugin_deregister.go @@ -0,0 +1,132 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + semver "github.com/hashicorp/go-version" + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*PluginDeregisterCommand)(nil) + _ cli.CommandAutocomplete = (*PluginDeregisterCommand)(nil) +) + +type PluginDeregisterCommand struct { + *BaseCommand + + flagPluginVersion string +} + +func (c *PluginDeregisterCommand) Synopsis() string { + return "Deregister an existing plugin in the catalog" +} + +func (c *PluginDeregisterCommand) Help() string { + helpText := ` +Usage: vault plugin deregister [options] TYPE NAME + + Deregister an existing plugin in the catalog. If the plugin does not exist, + no action is taken (the command is idempotent). The TYPE argument + takes "auth", "database", or "secret". + + Deregister the unversioned auth plugin named my-custom-plugin: + + $ vault plugin deregister auth my-custom-plugin + + Deregister the auth plugin named my-custom-plugin, version 1.0.0: + + $ vault plugin deregister -version=v1.0.0 auth my-custom-plugin + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *PluginDeregisterCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP) + + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "version", + Target: &c.flagPluginVersion, + Completion: complete.PredictAnything, + Usage: "Semantic version of the plugin to deregister. If unset, " + + "only an unversioned plugin may be deregistered.", + }) + + return set +} + +func (c *PluginDeregisterCommand) AutocompleteArgs() complete.Predictor { + return c.PredictVaultPlugins(api.PluginTypeUnknown) +} + +func (c *PluginDeregisterCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *PluginDeregisterCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + var pluginNameRaw, pluginTypeRaw string + args = f.Args() + switch len(args) { + case 0: + c.UI.Error("Not enough arguments (expected 1, or 2, got 0)") + return 1 + case 1: + pluginTypeRaw = "unknown" + pluginNameRaw = args[0] + case 2: + pluginTypeRaw = args[0] + pluginNameRaw = args[1] + default: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, or 2, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + pluginType, err := api.ParsePluginType(strings.TrimSpace(pluginTypeRaw)) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + pluginName := strings.TrimSpace(pluginNameRaw) + if c.flagPluginVersion != "" { + _, err := semver.NewSemver(c.flagPluginVersion) + if err != nil { + c.UI.Error(fmt.Sprintf("version %q is not a valid semantic version: %v", c.flagPluginVersion, err)) + return 2 + } + } + + if err := client.Sys().DeregisterPlugin(&api.DeregisterPluginInput{ + Name: pluginName, + Type: pluginType, + Version: c.flagPluginVersion, + }); err != nil { + c.UI.Error(fmt.Sprintf("Error deregistering plugin named %s: %s", pluginName, err)) + return 2 + } + + c.UI.Output(fmt.Sprintf("Success! Deregistered plugin (if it was registered): %s", pluginName)) + return 0 +} diff --git a/command/plugin_deregister_test.go b/command/plugin_deregister_test.go new file mode 100644 index 0000000..b05644e --- /dev/null +++ b/command/plugin_deregister_test.go @@ -0,0 +1,263 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "testing" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/mitchellh/cli" +) + +func testPluginDeregisterCommand(tb testing.TB) (*cli.MockUi, *PluginDeregisterCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &PluginDeregisterCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestPluginDeregisterCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "not_enough_args", + nil, + "Not enough arguments", + 1, + }, + { + "too_many_args", + []string{"foo", "bar", "fizz"}, + "Too many arguments", + 1, + }, + { + "not_a_plugin", + []string{consts.PluginTypeCredential.String(), "nope_definitely_never_a_plugin_nope"}, + "", + 0, + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testPluginDeregisterCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + + t.Run("integration", func(t *testing.T) { + t.Parallel() + + pluginDir, cleanup := corehelpers.MakeTestPluginDir(t) + defer cleanup(t) + + client, _, closer := testVaultServerPluginDir(t, pluginDir) + defer closer() + + pluginName := "my-plugin" + _, sha256Sum := testPluginCreateAndRegister(t, client, pluginDir, pluginName, api.PluginTypeCredential, "") + + ui, cmd := testPluginDeregisterCommand(t) + cmd.client = client + + if err := client.Sys().RegisterPlugin(&api.RegisterPluginInput{ + Name: pluginName, + Type: api.PluginTypeCredential, + Command: pluginName, + SHA256: sha256Sum, + }); err != nil { + t.Fatal(err) + } + + code := cmd.Run([]string{ + consts.PluginTypeCredential.String(), + pluginName, + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! Deregistered plugin (if it was registered): " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + resp, err := client.Sys().ListPlugins(&api.ListPluginsInput{ + Type: api.PluginTypeCredential, + }) + if err != nil { + t.Fatal(err) + } + + found := false + for _, plugins := range resp.PluginsByType { + for _, p := range plugins { + if p == pluginName { + found = true + } + } + } + if found { + t.Errorf("expected %q to not be in %q", pluginName, resp.PluginsByType) + } + }) + + t.Run("integration with version", func(t *testing.T) { + t.Parallel() + + pluginDir, cleanup := corehelpers.MakeTestPluginDir(t) + defer cleanup(t) + + client, _, closer := testVaultServerPluginDir(t, pluginDir) + defer closer() + + pluginName := "my-plugin" + _, _, version := testPluginCreateAndRegisterVersioned(t, client, pluginDir, pluginName, api.PluginTypeCredential) + + ui, cmd := testPluginDeregisterCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-version=" + version, + consts.PluginTypeCredential.String(), + pluginName, + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! Deregistered plugin (if it was registered): " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + resp, err := client.Sys().ListPlugins(&api.ListPluginsInput{ + Type: api.PluginTypeUnknown, + }) + if err != nil { + t.Fatal(err) + } + + found := false + for _, p := range resp.Details { + if p.Name == pluginName { + found = true + } + } + if found { + t.Errorf("expected %q to not be in %#v", pluginName, resp.Details) + } + }) + + t.Run("integration with missing version", func(t *testing.T) { + t.Parallel() + + pluginDir, cleanup := corehelpers.MakeTestPluginDir(t) + defer cleanup(t) + + client, _, closer := testVaultServerPluginDir(t, pluginDir) + defer closer() + + pluginName := "my-plugin" + testPluginCreateAndRegisterVersioned(t, client, pluginDir, pluginName, api.PluginTypeCredential) + + ui, cmd := testPluginDeregisterCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + consts.PluginTypeCredential.String(), + pluginName, + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! Deregistered plugin (if it was registered): " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + resp, err := client.Sys().ListPlugins(&api.ListPluginsInput{ + Type: api.PluginTypeUnknown, + }) + if err != nil { + t.Fatal(err) + } + + found := false + for _, p := range resp.Details { + if p.Name == pluginName { + found = true + } + } + if !found { + t.Errorf("expected %q to be in %#v", pluginName, resp.Details) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testPluginDeregisterCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + consts.PluginTypeCredential.String(), + "my-plugin", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error deregistering plugin named my-plugin: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testPluginDeregisterCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/plugin_info.go b/command/plugin_info.go new file mode 100644 index 0000000..1fa9555 --- /dev/null +++ b/command/plugin_info.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*PluginInfoCommand)(nil) + _ cli.CommandAutocomplete = (*PluginInfoCommand)(nil) +) + +type PluginInfoCommand struct { + *BaseCommand + + flagVersion string +} + +func (c *PluginInfoCommand) Synopsis() string { + return "Read information about a plugin in the catalog" +} + +func (c *PluginInfoCommand) Help() string { + helpText := ` +Usage: vault plugin info [options] TYPE NAME + + Displays information about a plugin in the catalog with the given name. If + the plugin does not exist, an error is returned. The argument of type + takes "auth", "database", or "secret". + + Get info about a plugin: + + $ vault plugin info database mysql-database-plugin + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *PluginInfoCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputField | FlagSetOutputFormat) + + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "version", + Target: &c.flagVersion, + Completion: complete.PredictAnything, + Usage: "Semantic version of the plugin. Optional.", + }) + + return set +} + +func (c *PluginInfoCommand) AutocompleteArgs() complete.Predictor { + return c.PredictVaultPlugins(api.PluginTypeUnknown) +} + +func (c *PluginInfoCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *PluginInfoCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + var pluginNameRaw, pluginTypeRaw string + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1 or 2, got %d)", len(args))) + return 1 + case len(args) > 2: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1 or 2, got %d)", len(args))) + return 1 + + // These cases should come after invalid cases have been checked + case len(args) == 1: + pluginTypeRaw = "unknown" + pluginNameRaw = args[0] + case len(args) == 2: + pluginTypeRaw = args[0] + pluginNameRaw = args[1] + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + pluginType, err := api.ParsePluginType(strings.TrimSpace(pluginTypeRaw)) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + pluginName := strings.TrimSpace(pluginNameRaw) + + resp, err := client.Sys().GetPlugin(&api.GetPluginInput{ + Name: pluginName, + Type: pluginType, + Version: c.flagVersion, + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error reading plugin named %s: %s", pluginName, err)) + return 2 + } + + if resp == nil { + c.UI.Error(fmt.Sprintf("No value found for plugin %q", pluginName)) + return 2 + } + + data := map[string]interface{}{ + "args": resp.Args, + "builtin": resp.Builtin, + "command": resp.Command, + "name": resp.Name, + "sha256": resp.SHA256, + "deprecation_status": resp.DeprecationStatus, + "version": resp.Version, + } + + if c.flagField != "" { + return PrintRawField(c.UI, data, c.flagField) + } + return OutputData(c.UI, data) +} diff --git a/command/plugin_info_test.go b/command/plugin_info_test.go new file mode 100644 index 0000000..921014c --- /dev/null +++ b/command/plugin_info_test.go @@ -0,0 +1,214 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "testing" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/hashicorp/vault/helper/versions" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/mitchellh/cli" +) + +func testPluginInfoCommand(tb testing.TB) (*cli.MockUi, *PluginInfoCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &PluginInfoCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestPluginInfoCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "too_many_args", + []string{"foo", "bar", "fizz"}, + "Too many arguments", + 1, + }, + { + "no_plugin_exist", + []string{api.PluginTypeCredential.String(), "not-a-real-plugin-like-ever"}, + "Error reading plugin", + 2, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testPluginInfoCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("default", func(t *testing.T) { + t.Parallel() + + pluginDir, cleanup := corehelpers.MakeTestPluginDir(t) + defer cleanup(t) + + client, _, closer := testVaultServerPluginDir(t, pluginDir) + defer closer() + + pluginName := "my-plugin" + _, sha256Sum := testPluginCreateAndRegister(t, client, pluginDir, pluginName, api.PluginTypeCredential, "") + + ui, cmd := testPluginInfoCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + api.PluginTypeCredential.String(), pluginName, + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, pluginName) { + t.Errorf("expected %q to contain %q", combined, pluginName) + } + if !strings.Contains(combined, sha256Sum) { + t.Errorf("expected %q to contain %q", combined, sha256Sum) + } + }) + + t.Run("version flag", func(t *testing.T) { + t.Parallel() + + pluginDir, cleanup := corehelpers.MakeTestPluginDir(t) + defer cleanup(t) + + client, _, closer := testVaultServerPluginDir(t, pluginDir) + defer closer() + + const pluginName = "azure" + _, sha256Sum := testPluginCreateAndRegister(t, client, pluginDir, pluginName, api.PluginTypeCredential, "v1.0.0") + + for name, tc := range map[string]struct { + version string + expectedSHA string + }{ + "versioned": {"v1.0.0", sha256Sum}, + "builtin version": {versions.GetBuiltinVersion(consts.PluginTypeSecrets, pluginName), ""}, + } { + t.Run(name, func(t *testing.T) { + ui, cmd := testPluginInfoCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-version=" + tc.version, + api.PluginTypeCredential.String(), pluginName, + }) + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if exp := 0; code != exp { + t.Errorf("expected %d to be %d: %s", code, exp, combined) + } + + if !strings.Contains(combined, pluginName) { + t.Errorf("expected %q to contain %q", combined, pluginName) + } + if !strings.Contains(combined, tc.expectedSHA) { + t.Errorf("expected %q to contain %q", combined, tc.expectedSHA) + } + if !strings.Contains(combined, tc.version) { + t.Errorf("expected %q to contain %q", combined, tc.version) + } + }) + } + }) + + t.Run("field", func(t *testing.T) { + t.Parallel() + + pluginDir, cleanup := corehelpers.MakeTestPluginDir(t) + defer cleanup(t) + + client, _, closer := testVaultServerPluginDir(t, pluginDir) + defer closer() + + pluginName := "my-plugin" + testPluginCreateAndRegister(t, client, pluginDir, pluginName, api.PluginTypeCredential, "") + + ui, cmd := testPluginInfoCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-field", "builtin", + api.PluginTypeCredential.String(), pluginName, + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if exp := "false"; combined != exp { + t.Errorf("expected %q to be %q", combined, exp) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testPluginInfoCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + api.PluginTypeCredential.String(), "my-plugin", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error reading plugin named my-plugin: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testPluginInfoCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/plugin_list.go b/command/plugin_list.go new file mode 100644 index 0000000..f1b0e5e --- /dev/null +++ b/command/plugin_list.go @@ -0,0 +1,169 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*PluginListCommand)(nil) + _ cli.CommandAutocomplete = (*PluginListCommand)(nil) +) + +type PluginListCommand struct { + *BaseCommand + + flagDetailed bool +} + +func (c *PluginListCommand) Synopsis() string { + return "Lists available plugins" +} + +func (c *PluginListCommand) Help() string { + helpText := ` +Usage: vault plugin list [options] [TYPE] + + Lists available plugins registered in the catalog. This does not list whether + plugins are in use, but rather just their availability. The last argument of + type takes "auth", "database", or "secret". + + List all available plugins in the catalog: + + $ vault plugin list + + List all available database plugins in the catalog: + + $ vault plugin list database + + List all available plugins with detailed output: + + $ vault plugin list -detailed + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *PluginListCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + f := set.NewFlagSet("Command Options") + + f.BoolVar(&BoolVar{ + Name: "detailed", + Target: &c.flagDetailed, + Default: false, + Usage: "Print detailed plugin information such as plugin type, " + + "version, and deprecation status for each plugin. This option " + + "is only applicable to table-formatted output.", + }) + + return set +} + +func (c *PluginListCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictNothing +} + +func (c *PluginListCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *PluginListCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 0 or 1, got %d)", len(args))) + return 1 + } + + pluginType := api.PluginTypeUnknown + if len(args) > 0 { + pluginTypeStr := strings.TrimSpace(args[0]) + if pluginTypeStr != "" { + var err error + pluginType, err = api.ParsePluginType(pluginTypeStr) + if err != nil { + c.UI.Error(fmt.Sprintf("Error parsing type: %s", err)) + return 2 + } + } + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + resp, err := client.Sys().ListPlugins(&api.ListPluginsInput{ + Type: pluginType, + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error listing available plugins: %s", err)) + return 2 + } + if resp == nil { + c.UI.Error("No response from server when listing plugins") + return 2 + } + + switch Format(c.UI) { + case "table": + if c.flagDetailed { + c.UI.Output(tableOutput(c.detailedResponse(resp), nil)) + return 0 + } + c.UI.Output(tableOutput(c.simpleResponse(resp, pluginType), nil)) + return 0 + default: + res := make(map[string]interface{}) + for k, v := range resp.PluginsByType { + res[k.String()] = v + } + res["details"] = resp.Details + return OutputData(c.UI, res) + } +} + +func (c *PluginListCommand) simpleResponse(plugins *api.ListPluginsResponse, pluginType api.PluginType) []string { + var out []string + switch pluginType { + case api.PluginTypeUnknown: + out = []string{"Name | Type | Version"} + for _, plugin := range plugins.Details { + out = append(out, fmt.Sprintf("%s | %s | %s", plugin.Name, plugin.Type, plugin.Version)) + } + default: + out = []string{"Name | Version"} + for _, plugin := range plugins.Details { + out = append(out, fmt.Sprintf("%s | %s", plugin.Name, plugin.Version)) + } + } + + return out +} + +func (c *PluginListCommand) detailedResponse(plugins *api.ListPluginsResponse) []string { + out := []string{"Name | Type | Version | Deprecation Status"} + for _, plugin := range plugins.Details { + out = append(out, fmt.Sprintf("%s | %s | %s | %s", plugin.Name, plugin.Type, plugin.Version, plugin.DeprecationStatus)) + } + + return out +} diff --git a/command/plugin_list_test.go b/command/plugin_list_test.go new file mode 100644 index 0000000..edae765 --- /dev/null +++ b/command/plugin_list_test.go @@ -0,0 +1,104 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "regexp" + "strings" + "testing" + + "github.com/mitchellh/cli" +) + +func testPluginListCommand(tb testing.TB) (*cli.MockUi, *PluginListCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &PluginListCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestPluginListCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "too_many_args", + []string{"foo", "fizz"}, + "Too many arguments", + 1, + }, + { + "lists", + nil, + "Name\\s+Type\\s+Version", + 0, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testPluginListCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + matcher := regexp.MustCompile(tc.out) + if !matcher.MatchString(combined) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testPluginListCommand(t) + cmd.client = client + + code := cmd.Run([]string{"database"}) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error listing available plugins: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testPluginListCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/plugin_register.go b/command/plugin_register.go new file mode 100644 index 0000000..e9d2e5b --- /dev/null +++ b/command/plugin_register.go @@ -0,0 +1,166 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*PluginRegisterCommand)(nil) + _ cli.CommandAutocomplete = (*PluginRegisterCommand)(nil) +) + +type PluginRegisterCommand struct { + *BaseCommand + + flagArgs []string + flagCommand string + flagSHA256 string + flagVersion string +} + +func (c *PluginRegisterCommand) Synopsis() string { + return "Registers a new plugin in the catalog" +} + +func (c *PluginRegisterCommand) Help() string { + helpText := ` +Usage: vault plugin register [options] TYPE NAME + + Registers a new plugin in the catalog. The plugin binary must exist in Vault's + configured plugin directory. The argument of type takes "auth", "database", + or "secret". + + Register the plugin named my-custom-plugin: + + $ vault plugin register -sha256=d3f0a8b... -version=v1.0.0 auth my-custom-plugin + + Register a plugin with custom arguments: + + $ vault plugin register \ + -sha256=d3f0a8b... \ + -version=v1.0.0 \ + -args=--with-glibc,--with-cgo \ + auth my-custom-plugin + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *PluginRegisterCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP) + + f := set.NewFlagSet("Command Options") + + f.StringSliceVar(&StringSliceVar{ + Name: "args", + Target: &c.flagArgs, + Completion: complete.PredictAnything, + Usage: "Arguments to pass to the plugin when starting. Separate " + + "multiple arguments with a comma.", + }) + + f.StringVar(&StringVar{ + Name: "command", + Target: &c.flagCommand, + Completion: complete.PredictAnything, + Usage: "Command to spawn the plugin. This defaults to the name of the " + + "plugin if unspecified.", + }) + + f.StringVar(&StringVar{ + Name: "sha256", + Target: &c.flagSHA256, + Completion: complete.PredictAnything, + Usage: "SHA256 of the plugin binary. This is required for all plugins.", + }) + + f.StringVar(&StringVar{ + Name: "version", + Target: &c.flagVersion, + Completion: complete.PredictAnything, + Usage: "Semantic version of the plugin. Optional.", + }) + + return set +} + +func (c *PluginRegisterCommand) AutocompleteArgs() complete.Predictor { + return c.PredictVaultPlugins(api.PluginTypeUnknown) +} + +func (c *PluginRegisterCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *PluginRegisterCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + var pluginNameRaw, pluginTypeRaw string + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1 or 2, got %d)", len(args))) + return 1 + case len(args) > 2: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1 or 2, got %d)", len(args))) + return 1 + case c.flagSHA256 == "": + c.UI.Error("SHA256 is required for all plugins, please provide -sha256") + return 1 + + // These cases should come after invalid cases have been checked + case len(args) == 1: + pluginTypeRaw = "unknown" + pluginNameRaw = args[0] + case len(args) == 2: + pluginTypeRaw = args[0] + pluginNameRaw = args[1] + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + pluginType, err := api.ParsePluginType(strings.TrimSpace(pluginTypeRaw)) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + pluginName := strings.TrimSpace(pluginNameRaw) + + command := c.flagCommand + if command == "" { + command = pluginName + } + + if err := client.Sys().RegisterPlugin(&api.RegisterPluginInput{ + Name: pluginName, + Type: pluginType, + Args: c.flagArgs, + Command: command, + SHA256: c.flagSHA256, + Version: c.flagVersion, + }); err != nil { + c.UI.Error(fmt.Sprintf("Error registering plugin %s: %s", pluginName, err)) + return 2 + } + + c.UI.Output(fmt.Sprintf("Success! Registered plugin: %s", pluginName)) + return 0 +} diff --git a/command/plugin_register_test.go b/command/plugin_register_test.go new file mode 100644 index 0000000..eccd5c1 --- /dev/null +++ b/command/plugin_register_test.go @@ -0,0 +1,231 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "reflect" + "sort" + "strings" + "testing" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/mitchellh/cli" +) + +func testPluginRegisterCommand(tb testing.TB) (*cli.MockUi, *PluginRegisterCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &PluginRegisterCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestPluginRegisterCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "not_enough_args", + nil, + "Not enough arguments", + 1, + }, + { + "too_many_args", + []string{"foo", "bar", "fizz"}, + "Too many arguments", + 1, + }, + { + "not_a_plugin", + []string{consts.PluginTypeCredential.String(), "nope_definitely_never_a_plugin_nope"}, + "", + 2, + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testPluginRegisterCommand(t) + cmd.client = client + + args := append([]string{"-sha256", "abcd1234"}, tc.args...) + code := cmd.Run(args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + + t.Run("integration", func(t *testing.T) { + t.Parallel() + + pluginDir, cleanup := corehelpers.MakeTestPluginDir(t) + defer cleanup(t) + + client, _, closer := testVaultServerPluginDir(t, pluginDir) + defer closer() + + pluginName := "my-plugin" + _, sha256Sum := testPluginCreate(t, pluginDir, pluginName) + + ui, cmd := testPluginRegisterCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-sha256", sha256Sum, + consts.PluginTypeCredential.String(), pluginName, + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! Registered plugin: my-plugin" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + resp, err := client.Sys().ListPlugins(&api.ListPluginsInput{ + Type: api.PluginTypeCredential, + }) + if err != nil { + t.Fatal(err) + } + + found := false + for _, plugins := range resp.PluginsByType { + for _, p := range plugins { + if p == pluginName { + found = true + } + } + } + if !found { + t.Errorf("expected %q to be in %q", pluginName, resp.PluginsByType) + } + }) + + t.Run("integration with version", func(t *testing.T) { + t.Parallel() + + pluginDir, cleanup := corehelpers.MakeTestPluginDir(t) + defer cleanup(t) + + client, _, closer := testVaultServerPluginDir(t, pluginDir) + defer closer() + + const pluginName = "my-plugin" + versions := []string{"v1.0.0", "v2.0.1"} + _, sha256Sum := testPluginCreate(t, pluginDir, pluginName) + types := []api.PluginType{api.PluginTypeCredential, api.PluginTypeDatabase, api.PluginTypeSecrets} + + for _, typ := range types { + for _, version := range versions { + ui, cmd := testPluginRegisterCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-version=" + version, + "-sha256=" + sha256Sum, + typ.String(), + pluginName, + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! Registered plugin: my-plugin" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + } + } + + resp, err := client.Sys().ListPlugins(&api.ListPluginsInput{ + Type: api.PluginTypeUnknown, + }) + if err != nil { + t.Fatal(err) + } + + found := make(map[api.PluginType]int) + versionsFound := make(map[api.PluginType][]string) + for _, p := range resp.Details { + if p.Name == pluginName { + typ, err := api.ParsePluginType(p.Type) + if err != nil { + t.Fatal(err) + } + found[typ]++ + versionsFound[typ] = append(versionsFound[typ], p.Version) + } + } + + for _, typ := range types { + if found[typ] != 2 { + t.Fatalf("expected %q to be found 2 times, but found it %d times for %s type in %#v", pluginName, found[typ], typ.String(), resp.Details) + } + sort.Strings(versions) + sort.Strings(versionsFound[typ]) + if !reflect.DeepEqual(versions, versionsFound[typ]) { + t.Fatalf("expected %v versions but got %v", versions, versionsFound[typ]) + } + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testPluginRegisterCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-sha256", "abcd1234", + consts.PluginTypeCredential.String(), "my-plugin", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error registering plugin my-plugin:" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testPluginRegisterCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/plugin_reload.go b/command/plugin_reload.go new file mode 100644 index 0000000..2e95fdd --- /dev/null +++ b/command/plugin_reload.go @@ -0,0 +1,135 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*PluginReloadCommand)(nil) + _ cli.CommandAutocomplete = (*PluginReloadCommand)(nil) +) + +type PluginReloadCommand struct { + *BaseCommand + plugin string + mounts []string + scope string +} + +func (c *PluginReloadCommand) Synopsis() string { + return "Reload mounted plugin backend" +} + +func (c *PluginReloadCommand) Help() string { + helpText := ` +Usage: vault plugin reload [options] + + Reloads mounted plugins. Either the plugin name or the desired plugin + mount(s) must be provided, but not both. In case the plugin name is provided, + all of its corresponding mounted paths that use the plugin backend will be reloaded. + + Reload the plugin named "my-custom-plugin": + + $ vault plugin reload -plugin=my-custom-plugin + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *PluginReloadCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP) + + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "plugin", + Target: &c.plugin, + Completion: complete.PredictAnything, + Usage: "The name of the plugin to reload, as registered in the plugin catalog.", + }) + + f.StringSliceVar(&StringSliceVar{ + Name: "mounts", + Target: &c.mounts, + Completion: complete.PredictAnything, + Usage: "Array or comma-separated string mount paths of the plugin backends to reload.", + }) + + f.StringVar(&StringVar{ + Name: "scope", + Target: &c.scope, + Completion: complete.PredictAnything, + Usage: "The scope of the reload, omitted for local, 'global', for replicated reloads", + }) + + return set +} + +func (c *PluginReloadCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *PluginReloadCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *PluginReloadCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + switch { + case c.plugin == "" && len(c.mounts) == 0: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case c.plugin != "" && len(c.mounts) > 0: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + case c.scope != "" && c.scope != "global": + c.UI.Error(fmt.Sprintf("Invalid reload scope: %s", c.scope)) + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + rid, err := client.Sys().ReloadPlugin(&api.ReloadPluginInput{ + Plugin: c.plugin, + Mounts: c.mounts, + Scope: c.scope, + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error reloading plugin/mounts: %s", err)) + return 2 + } + + if len(c.mounts) > 0 { + if rid != "" { + c.UI.Output(fmt.Sprintf("Success! Reloading mounts: %s, reload_id: %s", c.mounts, rid)) + } else { + c.UI.Output(fmt.Sprintf("Success! Reloaded mounts: %s", c.mounts)) + } + } else { + if rid != "" { + c.UI.Output(fmt.Sprintf("Success! Reloading plugin: %s, reload_id: %s", c.plugin, rid)) + } else { + c.UI.Output(fmt.Sprintf("Success! Reloaded plugin: %s", c.plugin)) + } + } + + return 0 +} diff --git a/command/plugin_reload_status.go b/command/plugin_reload_status.go new file mode 100644 index 0000000..e527a07 --- /dev/null +++ b/command/plugin_reload_status.go @@ -0,0 +1,96 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*PluginReloadCommand)(nil) + _ cli.CommandAutocomplete = (*PluginReloadCommand)(nil) +) + +type PluginReloadStatusCommand struct { + *BaseCommand +} + +func (c *PluginReloadStatusCommand) Synopsis() string { + return "Get the status of an active or recently completed global plugin reload" +} + +func (c *PluginReloadStatusCommand) Help() string { + helpText := ` +Usage: vault plugin reload-status RELOAD_ID + + Retrieves the status of a recent cluster plugin reload. The reload id must be provided. + + $ vault plugin reload-status d60a3e83-a598-4f3a-879d-0ddd95f11d4e + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *PluginReloadStatusCommand) Flags() *FlagSets { + return c.flagSet(FlagSetHTTP) +} + +func (c *PluginReloadStatusCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictNothing +} + +func (c *PluginReloadStatusCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *PluginReloadStatusCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + } + + reloadId := strings.TrimSpace(args[0]) + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + r, err := client.Sys().ReloadPluginStatus(&api.ReloadPluginStatusInput{ + ReloadID: reloadId, + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error retrieving plugin reload status: %s", err)) + return 2 + } + out := []string{"Time | Participant | Success | Message "} + for i, s := range r.Results { + out = append(out, fmt.Sprintf("%s | %s | %t | %s ", + s.Timestamp.Format("15:04:05"), + i, + s.Error == "", + s.Error)) + } + c.UI.Output(tableOutput(out, nil)) + return 0 +} diff --git a/command/plugin_reload_test.go b/command/plugin_reload_test.go new file mode 100644 index 0000000..646fda9 --- /dev/null +++ b/command/plugin_reload_test.go @@ -0,0 +1,165 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "testing" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/mitchellh/cli" +) + +func testPluginReloadCommand(tb testing.TB) (*cli.MockUi, *PluginReloadCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &PluginReloadCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func testPluginReloadStatusCommand(tb testing.TB) (*cli.MockUi, *PluginReloadStatusCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &PluginReloadStatusCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestPluginReloadCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "not_enough_args", + nil, + "Not enough arguments", + 1, + }, + { + "too_many_args", + []string{"-plugin", "foo", "-mounts", "bar"}, + "Too many arguments", + 1, + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testPluginReloadCommand(t) + cmd.client = client + + args := append([]string{}, tc.args...) + code := cmd.Run(args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + + t.Run("integration", func(t *testing.T) { + t.Parallel() + + pluginDir, cleanup := corehelpers.MakeTestPluginDir(t) + defer cleanup(t) + + client, _, closer := testVaultServerPluginDir(t, pluginDir) + defer closer() + + pluginName := "my-plugin" + _, sha256Sum := testPluginCreateAndRegister(t, client, pluginDir, pluginName, api.PluginTypeCredential, "") + + ui, cmd := testPluginReloadCommand(t) + cmd.client = client + + if err := client.Sys().RegisterPlugin(&api.RegisterPluginInput{ + Name: pluginName, + Type: api.PluginTypeCredential, + Command: pluginName, + SHA256: sha256Sum, + }); err != nil { + t.Fatal(err) + } + + code := cmd.Run([]string{ + "-plugin", pluginName, + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! Reloaded plugin: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) +} + +func TestPluginReloadStatusCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "not_enough_args", + nil, + "Not enough arguments", + 1, + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testPluginReloadCommand(t) + cmd.client = client + + args := append([]string{}, tc.args...) + code := cmd.Run(args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } +} diff --git a/command/plugin_test.go b/command/plugin_test.go new file mode 100644 index 0000000..08c350c --- /dev/null +++ b/command/plugin_test.go @@ -0,0 +1,78 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "crypto/sha256" + "fmt" + "io" + "io/ioutil" + "os" + "testing" + + "github.com/hashicorp/vault/api" +) + +// testPluginCreate creates a sample plugin in a tempdir and returns the shasum +// and filepath to the plugin. +func testPluginCreate(tb testing.TB, dir, name string) (string, string) { + tb.Helper() + + pth := dir + "/" + name + if err := ioutil.WriteFile(pth, nil, 0o755); err != nil { + tb.Fatal(err) + } + + f, err := os.Open(pth) + if err != nil { + tb.Fatal(err) + } + defer f.Close() + + h := sha256.New() + if _, err := io.Copy(h, f); err != nil { + tb.Fatal(err) + } + sha256Sum := fmt.Sprintf("%x", h.Sum(nil)) + + return pth, sha256Sum +} + +// testPluginCreateAndRegister creates a plugin and registers it in the catalog. +func testPluginCreateAndRegister(tb testing.TB, client *api.Client, dir, name string, pluginType api.PluginType, version string) (string, string) { + tb.Helper() + + pth, sha256Sum := testPluginCreate(tb, dir, name) + + if err := client.Sys().RegisterPlugin(&api.RegisterPluginInput{ + Name: name, + Type: pluginType, + Command: name, + SHA256: sha256Sum, + Version: version, + }); err != nil { + tb.Fatal(err) + } + + return pth, sha256Sum +} + +// testPluginCreateAndRegisterVersioned creates a versioned plugin and registers it in the catalog. +func testPluginCreateAndRegisterVersioned(tb testing.TB, client *api.Client, dir, name string, pluginType api.PluginType) (string, string, string) { + tb.Helper() + + pth, sha256Sum := testPluginCreate(tb, dir, name) + + if err := client.Sys().RegisterPlugin(&api.RegisterPluginInput{ + Name: name, + Type: pluginType, + Command: name, + SHA256: sha256Sum, + Version: "v1.0.0", + }); err != nil { + tb.Fatal(err) + } + + return pth, sha256Sum, "v1.0.0" +} diff --git a/command/policy.go b/command/policy.go new file mode 100644 index 0000000..289aae1 --- /dev/null +++ b/command/policy.go @@ -0,0 +1,50 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + + "github.com/mitchellh/cli" +) + +var _ cli.Command = (*PolicyCommand)(nil) + +// PolicyCommand is a Command that holds the audit commands +type PolicyCommand struct { + *BaseCommand +} + +func (c *PolicyCommand) Synopsis() string { + return "Interact with policies" +} + +func (c *PolicyCommand) Help() string { + helpText := ` +Usage: vault policy [options] [args] + + This command groups subcommands for interacting with policies. + Users can write, read, and list policies in Vault. + + List all enabled policies: + + $ vault policy list + + Create a policy named "my-policy" from contents on local disk: + + $ vault policy write my-policy ./my-policy.hcl + + Delete the policy named my-policy: + + $ vault policy delete my-policy + + Please see the individual subcommand help for detailed usage information. +` + + return strings.TrimSpace(helpText) +} + +func (c *PolicyCommand) Run(args []string) int { + return cli.RunResultHelp +} diff --git a/command/policy_delete.go b/command/policy_delete.go new file mode 100644 index 0000000..199fb74 --- /dev/null +++ b/command/policy_delete.go @@ -0,0 +1,90 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*PolicyDeleteCommand)(nil) + _ cli.CommandAutocomplete = (*PolicyDeleteCommand)(nil) +) + +type PolicyDeleteCommand struct { + *BaseCommand +} + +func (c *PolicyDeleteCommand) Synopsis() string { + return "Deletes a policy by name" +} + +func (c *PolicyDeleteCommand) Help() string { + helpText := ` +Usage: vault policy delete [options] NAME + + Deletes the policy named NAME in the Vault server. Once the policy is deleted, + all tokens associated with the policy are affected immediately. + + Delete the policy named "my-policy": + + $ vault policy delete my-policy + + Note that it is not possible to delete the "default" or "root" policies. + These are built-in policies. + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *PolicyDeleteCommand) Flags() *FlagSets { + return c.flagSet(FlagSetHTTP) +} + +func (c *PolicyDeleteCommand) AutocompleteArgs() complete.Predictor { + return c.PredictVaultPolicies() +} + +func (c *PolicyDeleteCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *PolicyDeleteCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + name := strings.TrimSpace(strings.ToLower(args[0])) + if err := client.Sys().DeletePolicy(name); err != nil { + c.UI.Error(fmt.Sprintf("Error deleting %s: %s", name, err)) + return 2 + } + + c.UI.Output(fmt.Sprintf("Success! Deleted policy: %s", name)) + return 0 +} diff --git a/command/policy_delete_test.go b/command/policy_delete_test.go new file mode 100644 index 0000000..008cd59 --- /dev/null +++ b/command/policy_delete_test.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "reflect" + "strings" + "testing" + + "github.com/mitchellh/cli" +) + +func testPolicyDeleteCommand(tb testing.TB) (*cli.MockUi, *PolicyDeleteCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &PolicyDeleteCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestPolicyDeleteCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "not_enough_args", + nil, + "Not enough arguments", + 1, + }, + { + "too_many_args", + []string{"foo", "bar"}, + "Too many arguments", + 1, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testPolicyDeleteCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("integration", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + policy := `path "secret/" {}` + if err := client.Sys().PutPolicy("my-policy", policy); err != nil { + t.Fatal(err) + } + + ui, cmd := testPolicyDeleteCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "my-policy", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! Deleted policy: my-policy" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + policies, err := client.Sys().ListPolicies() + if err != nil { + t.Fatal(err) + } + + list := []string{"default", "root"} + if !reflect.DeepEqual(policies, list) { + t.Errorf("expected %q to be %q", policies, list) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testPolicyDeleteCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "my-policy", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error deleting my-policy: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testPolicyDeleteCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/policy_fmt.go b/command/policy_fmt.go new file mode 100644 index 0000000..75a9179 --- /dev/null +++ b/command/policy_fmt.go @@ -0,0 +1,116 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "io/ioutil" + "strings" + + "github.com/hashicorp/hcl/hcl/printer" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/vault" + "github.com/mitchellh/cli" + homedir "github.com/mitchellh/go-homedir" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*PolicyFmtCommand)(nil) + _ cli.CommandAutocomplete = (*PolicyFmtCommand)(nil) +) + +type PolicyFmtCommand struct { + *BaseCommand +} + +func (c *PolicyFmtCommand) Synopsis() string { + return "Formats a policy on disk" +} + +func (c *PolicyFmtCommand) Help() string { + helpText := ` +Usage: vault policy fmt [options] PATH + + Formats a local policy file to the policy specification. This command will + overwrite the file at the given PATH with the properly-formatted policy + file contents. + + Format the local file "my-policy.hcl" as a policy file: + + $ vault policy fmt my-policy.hcl + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *PolicyFmtCommand) Flags() *FlagSets { + return c.flagSet(FlagSetNone) +} + +func (c *PolicyFmtCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictFiles("*.hcl") +} + +func (c *PolicyFmtCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *PolicyFmtCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + } + + // Get the filepath, accounting for ~ and stuff + path, err := homedir.Expand(strings.TrimSpace(args[0])) + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to expand path: %s", err)) + return 1 + } + + // Read the entire contents into memory - it would be nice if we could use + // a buffer, but hcl wants the full contents. + b, err := ioutil.ReadFile(path) + if err != nil { + c.UI.Error(fmt.Sprintf("Error reading source file: %s", err)) + return 1 + } + + // Actually parse the policy. We always use the root namespace here because + // we don't want to modify the results. + if _, err := vault.ParseACLPolicy(namespace.RootNamespace, string(b)); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + // Generate final contents + result, err := printer.Format(b) + if err != nil { + c.UI.Error(fmt.Sprintf("Error printing result: %s", err)) + return 1 + } + + // Write them back out + if err := ioutil.WriteFile(path, result, 0o644); err != nil { + c.UI.Error(fmt.Sprintf("Error writing result: %s", err)) + return 1 + } + + c.UI.Output(fmt.Sprintf("Success! Formatted policy: %s", path)) + return 0 +} diff --git a/command/policy_fmt_test.go b/command/policy_fmt_test.go new file mode 100644 index 0000000..89ed521 --- /dev/null +++ b/command/policy_fmt_test.go @@ -0,0 +1,236 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "io/ioutil" + "os" + "strings" + "testing" + + "github.com/mitchellh/cli" +) + +func testPolicyFmtCommand(tb testing.TB) (*cli.MockUi, *PolicyFmtCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &PolicyFmtCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestPolicyFmtCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "not_enough_args", + []string{}, + "Not enough arguments", + 1, + }, + { + "too_many_args", + []string{"foo", "bar"}, + "Too many arguments", + 1, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testPolicyFmtCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("default", func(t *testing.T) { + t.Parallel() + + policy := strings.TrimSpace(` +path "secret" { + capabilities = ["create", "update","delete"] + +} +`) + + f, err := ioutil.TempFile("", "") + if err != nil { + t.Fatal(err) + } + defer os.Remove(f.Name()) + if _, err := f.Write([]byte(policy)); err != nil { + t.Fatal(err) + } + f.Close() + + client, closer := testVaultServer(t) + defer closer() + + _, cmd := testPolicyFmtCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + f.Name(), + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := strings.TrimSpace(` +path "secret" { + capabilities = ["create", "update", "delete"] +} +`) + "\n" + + contents, err := ioutil.ReadFile(f.Name()) + if err != nil { + t.Fatal(err) + } + if string(contents) != expected { + t.Errorf("expected %q to be %q", string(contents), expected) + } + }) + + t.Run("bad_hcl", func(t *testing.T) { + t.Parallel() + + policy := `dafdaf` + + f, err := ioutil.TempFile("", "") + if err != nil { + t.Fatal(err) + } + defer os.Remove(f.Name()) + if _, err := f.Write([]byte(policy)); err != nil { + t.Fatal(err) + } + f.Close() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testPolicyFmtCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + f.Name(), + }) + if exp := 1; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + stderr := ui.ErrorWriter.String() + expected := "failed to parse policy" + if !strings.Contains(stderr, expected) { + t.Errorf("expected %q to include %q", stderr, expected) + } + }) + + t.Run("bad_policy", func(t *testing.T) { + t.Parallel() + + policy := `banana "foo" {}` + + f, err := ioutil.TempFile("", "") + if err != nil { + t.Fatal(err) + } + defer os.Remove(f.Name()) + if _, err := f.Write([]byte(policy)); err != nil { + t.Fatal(err) + } + f.Close() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testPolicyFmtCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + f.Name(), + }) + if exp := 1; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + stderr := ui.ErrorWriter.String() + expected := "failed to parse policy" + if !strings.Contains(stderr, expected) { + t.Errorf("expected %q to include %q", stderr, expected) + } + }) + + t.Run("bad_policy", func(t *testing.T) { + t.Parallel() + + policy := `path "secret/" { capabilities = ["bogus"] }` + + f, err := ioutil.TempFile("", "") + if err != nil { + t.Fatal(err) + } + defer os.Remove(f.Name()) + if _, err := f.Write([]byte(policy)); err != nil { + t.Fatal(err) + } + f.Close() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testPolicyFmtCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + f.Name(), + }) + if exp := 1; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + stderr := ui.ErrorWriter.String() + expected := "failed to parse policy" + if !strings.Contains(stderr, expected) { + t.Errorf("expected %q to include %q", stderr, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testPolicyFmtCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/policy_list.go b/command/policy_list.go new file mode 100644 index 0000000..7b5bfc1 --- /dev/null +++ b/command/policy_list.go @@ -0,0 +1,86 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*PolicyListCommand)(nil) + _ cli.CommandAutocomplete = (*PolicyListCommand)(nil) +) + +type PolicyListCommand struct { + *BaseCommand +} + +func (c *PolicyListCommand) Synopsis() string { + return "Lists the installed policies" +} + +func (c *PolicyListCommand) Help() string { + helpText := ` +Usage: vault policy list [options] + + Lists the names of the policies that are installed on the Vault server. + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *PolicyListCommand) Flags() *FlagSets { + return c.flagSet(FlagSetHTTP | FlagSetOutputFormat) +} + +func (c *PolicyListCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *PolicyListCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *PolicyListCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) > 0: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 0, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + policies, err := client.Sys().ListPolicies() + if err != nil { + c.UI.Error(fmt.Sprintf("Error listing policies: %s", err)) + return 2 + } + + switch Format(c.UI) { + case "table": + for _, p := range policies { + c.UI.Output(p) + } + return 0 + default: + return OutputData(c.UI, policies) + } +} diff --git a/command/policy_list_test.go b/command/policy_list_test.go new file mode 100644 index 0000000..1976697 --- /dev/null +++ b/command/policy_list_test.go @@ -0,0 +1,117 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "testing" + + "github.com/mitchellh/cli" +) + +func testPolicyListCommand(tb testing.TB) (*cli.MockUi, *PolicyListCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &PolicyListCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestPolicyListCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "too_many_args", + []string{"foo"}, + "Too many arguments", + 1, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testPolicyListCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("default", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testPolicyListCommand(t) + cmd.client = client + + code := cmd.Run([]string{}) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "default\nroot" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testPolicyListCommand(t) + cmd.client = client + + code := cmd.Run([]string{}) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error listing policies: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testPolicyListCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/policy_read.go b/command/policy_read.go new file mode 100644 index 0000000..4f22644 --- /dev/null +++ b/command/policy_read.go @@ -0,0 +1,100 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*PolicyReadCommand)(nil) + _ cli.CommandAutocomplete = (*PolicyReadCommand)(nil) +) + +type PolicyReadCommand struct { + *BaseCommand +} + +func (c *PolicyReadCommand) Synopsis() string { + return "Prints the contents of a policy" +} + +func (c *PolicyReadCommand) Help() string { + helpText := ` +Usage: vault policy read [options] [NAME] + + Prints the contents and metadata of the Vault policy named NAME. If the policy + does not exist, an error is returned. + + Read the policy named "my-policy": + + $ vault policy read my-policy + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *PolicyReadCommand) Flags() *FlagSets { + return c.flagSet(FlagSetHTTP | FlagSetOutputFormat) +} + +func (c *PolicyReadCommand) AutocompleteArgs() complete.Predictor { + return c.PredictVaultPolicies() +} + +func (c *PolicyReadCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *PolicyReadCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + name := strings.ToLower(strings.TrimSpace(args[0])) + rules, err := client.Sys().GetPolicy(name) + if err != nil { + c.UI.Error(fmt.Sprintf("Error reading policy named %s: %s", name, err)) + return 2 + } + if rules == "" { + c.UI.Error(fmt.Sprintf("No policy named: %s", name)) + return 2 + } + + switch Format(c.UI) { + case "table": + c.UI.Output(strings.TrimSpace(rules)) + return 0 + default: + resp := map[string]string{ + "policy": rules, + } + return OutputData(c.UI, &resp) + } +} diff --git a/command/policy_read_test.go b/command/policy_read_test.go new file mode 100644 index 0000000..f091749 --- /dev/null +++ b/command/policy_read_test.go @@ -0,0 +1,131 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "testing" + + "github.com/mitchellh/cli" +) + +func testPolicyReadCommand(tb testing.TB) (*cli.MockUi, *PolicyReadCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &PolicyReadCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestPolicyReadCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "too_many_args", + []string{"foo", "bar"}, + "Too many arguments", + 1, + }, + { + "no_policy_exists", + []string{"not-a-real-policy"}, + "No policy named", + 2, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testPolicyReadCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("default", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + policy := `path "secret/" {}` + if err := client.Sys().PutPolicy("my-policy", policy); err != nil { + t.Fatal(err) + } + + ui, cmd := testPolicyReadCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "my-policy", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, policy) { + t.Errorf("expected %q to contain %q", combined, policy) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testPolicyReadCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "my-policy", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error reading policy named my-policy: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testPolicyReadCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/policy_write.go b/command/policy_write.go new file mode 100644 index 0000000..81ff2b3 --- /dev/null +++ b/command/policy_write.go @@ -0,0 +1,137 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bytes" + "fmt" + "io" + "os" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*PolicyWriteCommand)(nil) + _ cli.CommandAutocomplete = (*PolicyWriteCommand)(nil) +) + +type PolicyWriteCommand struct { + *BaseCommand + + testStdin io.Reader // for tests +} + +func (c *PolicyWriteCommand) Synopsis() string { + return "Uploads a named policy from a file" +} + +func (c *PolicyWriteCommand) Help() string { + helpText := ` +Usage: vault policy write [options] NAME PATH + + Uploads a policy with name NAME from the contents of a local file PATH or + stdin. If PATH is "-", the policy is read from stdin. Otherwise, it is + loaded from the file at the given path on the local disk. + + Upload a policy named "my-policy" from "/tmp/policy.hcl" on the local disk: + + $ vault policy write my-policy /tmp/policy.hcl + + Upload a policy from stdin: + + $ cat my-policy.hcl | vault policy write my-policy - + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *PolicyWriteCommand) Flags() *FlagSets { + return c.flagSet(FlagSetHTTP) +} + +func (c *PolicyWriteCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictFunc(func(args complete.Args) []string { + // Predict the LAST argument hcl files - we don't want to predict the + // name argument as a filepath. + if len(args.All) == 3 { + return complete.PredictFiles("*.hcl").Predict(args) + } + return nil + }) +} + +func (c *PolicyWriteCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *PolicyWriteCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) < 2: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 2, got %d)", len(args))) + return 1 + case len(args) > 2: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 2, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + // Policies are normalized to lowercase + policyName := args[0] + formattedName := strings.TrimSpace(strings.ToLower(policyName)) + path := strings.TrimSpace(args[1]) + + // Get the policy contents, either from stdin of a file + var reader io.Reader + if path == "-" { + reader = os.Stdin + if c.testStdin != nil { + reader = c.testStdin + } + } else { + file, err := os.Open(path) + if err != nil { + c.UI.Error(fmt.Sprintf("Error opening policy file: %s", err)) + return 2 + } + defer file.Close() + reader = file + } + + // Read the policy + var buf bytes.Buffer + if _, err := io.Copy(&buf, reader); err != nil { + c.UI.Error(fmt.Sprintf("Error reading policy: %s", err)) + return 2 + } + rules := buf.String() + + if err := client.Sys().PutPolicy(formattedName, rules); err != nil { + c.UI.Error(fmt.Sprintf("Error uploading policy: %s", err)) + return 2 + } + + if policyName != formattedName { + c.UI.Warn(fmt.Sprintf("Policy name was converted from \"%s\" to \"%s\"", policyName, formattedName)) + } + + c.UI.Output(fmt.Sprintf("Success! Uploaded policy: %s", formattedName)) + return 0 +} diff --git a/command/policy_write_test.go b/command/policy_write_test.go new file mode 100644 index 0000000..8294ef1 --- /dev/null +++ b/command/policy_write_test.go @@ -0,0 +1,210 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bytes" + "io" + "io/ioutil" + "os" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/cli" +) + +func testPolicyWriteCommand(tb testing.TB) (*cli.MockUi, *PolicyWriteCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &PolicyWriteCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func testPolicyWritePolicyContents(tb testing.TB) []byte { + return bytes.TrimSpace([]byte(` +path "secret/" { + capabilities = ["read"] +} + `)) +} + +func TestPolicyWriteCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "too_many_args", + []string{"foo", "bar", "baz"}, + "Too many arguments", + 1, + }, + { + "not_enough_args", + []string{"foo"}, + "Not enough arguments", + 1, + }, + { + "bad_file", + []string{"my-policy", "/not/a/real/path.hcl"}, + "Error opening policy file", + 2, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testPolicyWriteCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("file", func(t *testing.T) { + t.Parallel() + + policy := testPolicyWritePolicyContents(t) + f, err := ioutil.TempFile("", "vault-policy-write") + if err != nil { + t.Fatal(err) + } + if _, err := f.Write(policy); err != nil { + t.Fatal(err) + } + if err := f.Close(); err != nil { + t.Fatal(err) + } + defer os.Remove(f.Name()) + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testPolicyWriteCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "my-policy", f.Name(), + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! Uploaded policy: my-policy" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + policies, err := client.Sys().ListPolicies() + if err != nil { + t.Fatal(err) + } + + list := []string{"default", "my-policy", "root"} + if !reflect.DeepEqual(policies, list) { + t.Errorf("expected %q to be %q", policies, list) + } + }) + + t.Run("stdin", func(t *testing.T) { + t.Parallel() + + stdinR, stdinW := io.Pipe() + go func() { + policy := testPolicyWritePolicyContents(t) + stdinW.Write(policy) + stdinW.Close() + }() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testPolicyWriteCommand(t) + cmd.client = client + cmd.testStdin = stdinR + + code := cmd.Run([]string{ + "my-policy", "-", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! Uploaded policy: my-policy" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + policies, err := client.Sys().ListPolicies() + if err != nil { + t.Fatal(err) + } + + list := []string{"default", "my-policy", "root"} + if !reflect.DeepEqual(policies, list) { + t.Errorf("expected %q to be %q", policies, list) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testPolicyWriteCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "my-policy", "-", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error uploading policy: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testPolicyWriteCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/print.go b/command/print.go new file mode 100644 index 0000000..19ac0a6 --- /dev/null +++ b/command/print.go @@ -0,0 +1,48 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*PrintCommand)(nil) + _ cli.CommandAutocomplete = (*PrintCommand)(nil) +) + +type PrintCommand struct { + *BaseCommand +} + +func (c *PrintCommand) Synopsis() string { + return "Prints runtime configurations" +} + +func (c *PrintCommand) Help() string { + helpText := ` +Usage: vault print + + This command groups subcommands for interacting with Vault's runtime values. + +Subcommands: + token Token currently in use +` + return strings.TrimSpace(helpText) +} + +func (c *PrintCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *PrintCommand) AutocompleteFlags() complete.Flags { + return nil +} + +func (c *PrintCommand) Run(args []string) int { + return cli.RunResultHelp +} diff --git a/command/print_token.go b/command/print_token.go new file mode 100644 index 0000000..862af23 --- /dev/null +++ b/command/print_token.go @@ -0,0 +1,56 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*PrintTokenCommand)(nil) + _ cli.CommandAutocomplete = (*PrintTokenCommand)(nil) +) + +type PrintTokenCommand struct { + *BaseCommand +} + +func (c *PrintTokenCommand) Synopsis() string { + return "Prints the vault token currently in use" +} + +func (c *PrintTokenCommand) Help() string { + helpText := ` +Usage: vault print token + + Prints the value of the Vault token that will be used for commands, after + taking into account the configured token-helper and the environment. + + $ vault print token + +` + return strings.TrimSpace(helpText) +} + +func (c *PrintTokenCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *PrintTokenCommand) AutocompleteFlags() complete.Flags { + return nil +} + +func (c *PrintTokenCommand) Run(args []string) int { + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + c.UI.Output(client.Token()) + return 0 +} diff --git a/command/proxy.go b/command/proxy.go new file mode 100644 index 0000000..c6f707c --- /dev/null +++ b/command/proxy.go @@ -0,0 +1,1119 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "context" + "crypto/tls" + "flag" + "fmt" + "io" + "net" + "net/http" + "os" + "sort" + "strings" + "sync" + "time" + + systemd "github.com/coreos/go-systemd/daemon" + ctconfig "github.com/hashicorp/consul-template/config" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-secure-stdlib/gatedwriter" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/go-secure-stdlib/reloadutil" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/agentproxyshared" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + "github.com/hashicorp/vault/command/agentproxyshared/cache" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink/file" + "github.com/hashicorp/vault/command/agentproxyshared/sink/inmem" + "github.com/hashicorp/vault/command/agentproxyshared/winsvc" + proxyConfig "github.com/hashicorp/vault/command/proxy/config" + "github.com/hashicorp/vault/helper/logging" + "github.com/hashicorp/vault/helper/metricsutil" + "github.com/hashicorp/vault/helper/useragent" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/internalshared/listenerutil" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/version" + "github.com/kr/pretty" + "github.com/mitchellh/cli" + "github.com/oklog/run" + "github.com/posener/complete" + "golang.org/x/text/cases" + "golang.org/x/text/language" + "google.golang.org/grpc/test/bufconn" +) + +var ( + _ cli.Command = (*ProxyCommand)(nil) + _ cli.CommandAutocomplete = (*ProxyCommand)(nil) +) + +const ( + // flagNameProxyExitAfterAuth is used as a Proxy specific flag to indicate + // that proxy should exit after a single successful auth + flagNameProxyExitAfterAuth = "exit-after-auth" + nameProxy = "proxy" +) + +type ProxyCommand struct { + *BaseCommand + logFlags logFlags + + config *proxyConfig.Config + + ShutdownCh chan struct{} + SighupCh chan struct{} + + tlsReloadFuncsLock sync.RWMutex + tlsReloadFuncs []reloadutil.ReloadFunc + + logWriter io.Writer + logGate *gatedwriter.Writer + logger log.Logger + + // Telemetry object + metricsHelper *metricsutil.MetricsHelper + + cleanupGuard sync.Once + + startedCh chan struct{} // for tests + reloadedCh chan struct{} // for tests + + flagConfigs []string + flagExitAfterAuth bool + flagTestVerifyOnly bool +} + +func (c *ProxyCommand) Synopsis() string { + return "Start a Vault Proxy" +} + +func (c *ProxyCommand) Help() string { + helpText := ` +Usage: vault proxy [options] + + This command starts a Vault Proxy that can perform automatic authentication + in certain environments. + + Start a proxy with a configuration file: + + $ vault proxy -config=/etc/vault/config.hcl + + For a full list of examples, please see the documentation. + +` + c.Flags().Help() + return strings.TrimSpace(helpText) +} + +func (c *ProxyCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP) + + f := set.NewFlagSet("Command Options") + + // Augment with the log flags + f.addLogFlags(&c.logFlags) + + f.StringSliceVar(&StringSliceVar{ + Name: "config", + Target: &c.flagConfigs, + Completion: complete.PredictOr( + complete.PredictFiles("*.hcl"), + complete.PredictFiles("*.json"), + ), + Usage: "Path to a configuration file. This configuration file should " + + "contain only proxy directives.", + }) + + f.BoolVar(&BoolVar{ + Name: flagNameProxyExitAfterAuth, + Target: &c.flagExitAfterAuth, + Default: false, + Usage: "If set to true, the proxy will exit with code 0 after a single " + + "successful auth, where success means that a token was retrieved and " + + "all sinks successfully wrote it", + }) + + // Internal-only flags to follow. + // + // Why hello there little source code reader! Welcome to the Vault source + // code. The remaining options are intentionally undocumented and come with + // no warranty or backwards-compatibility promise. Do not use these flags + // in production. Do not build automation using these flags. Unless you are + // developing against Vault, you should not need any of these flags. + f.BoolVar(&BoolVar{ + Name: "test-verify-only", + Target: &c.flagTestVerifyOnly, + Default: false, + Hidden: true, + }) + + // End internal-only flags. + + return set +} + +func (c *ProxyCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictNothing +} + +func (c *ProxyCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *ProxyCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + // Create a logger. We wrap it in a gated writer so that it doesn't + // start logging too early. + c.logGate = gatedwriter.NewWriter(os.Stderr) + c.logWriter = c.logGate + + if c.logFlags.flagCombineLogs { + c.logWriter = os.Stdout + } + + // Validation + if len(c.flagConfigs) < 1 { + c.UI.Error("Must specify exactly at least one config path using -config") + return 1 + } + + config, err := c.loadConfig(c.flagConfigs) + if err != nil { + c.outputErrors(err) + return 1 + } + + if config.AutoAuth == nil { + c.UI.Info("No auto_auth block found in config, the automatic authentication feature will not be started") + } + + c.applyConfigOverrides(f, config) // This only needs to happen on start-up to aggregate config from flags and env vars + c.config = config + + l, err := c.newLogger() + if err != nil { + c.outputErrors(err) + return 1 + } + c.logger = l + + infoKeys := make([]string, 0, 10) + info := make(map[string]string) + info["log level"] = config.LogLevel + infoKeys = append(infoKeys, "log level") + + infoKeys = append(infoKeys, "version") + verInfo := version.GetVersion() + info["version"] = verInfo.FullVersionNumber(false) + if verInfo.Revision != "" { + info["version sha"] = strings.Trim(verInfo.Revision, "'") + infoKeys = append(infoKeys, "version sha") + } + infoKeys = append(infoKeys, "cgo") + info["cgo"] = "disabled" + if version.CgoEnabled { + info["cgo"] = "enabled" + } + + // Tests might not want to start a vault server and just want to verify + // the configuration. + if c.flagTestVerifyOnly { + if os.Getenv("VAULT_TEST_VERIFY_ONLY_DUMP_CONFIG") != "" { + c.UI.Output(fmt.Sprintf( + "\nConfiguration:\n%s\n", + pretty.Sprint(*c.config))) + } + return 0 + } + + // Ignore any setting of Agent/Proxy's address. This client is used by the Proxy + // to reach out to Vault. This should never loop back to the proxy. + c.flagAgentProxyAddress = "" + client, err := c.Client() + if err != nil { + c.UI.Error(fmt.Sprintf( + "Error fetching client: %v", + err)) + return 1 + } + + serverHealth, err := client.Sys().Health() + // We don't have any special behaviour if the error != nil, as this + // is not worth stopping the Proxy process over. + if err == nil { + // Note that we don't exit if the versions don't match, as this is a valid + // configuration, but we should still let the user know. + serverVersion := serverHealth.Version + proxyVersion := version.GetVersion().VersionNumber() + if serverVersion != proxyVersion { + c.UI.Info("==> Note: Vault Proxy version does not match Vault server version. " + + fmt.Sprintf("Vault Proxy version: %s, Vault server version: %s", proxyVersion, serverVersion)) + } + } + + // telemetry configuration + inmemMetrics, _, prometheusEnabled, err := configutil.SetupTelemetry(&configutil.SetupTelemetryOpts{ + Config: config.Telemetry, + Ui: c.UI, + ServiceName: "vault", + DisplayName: "Vault", + UserAgent: useragent.ProxyString(), + ClusterName: config.ClusterName, + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error initializing telemetry: %s", err)) + return 1 + } + c.metricsHelper = metricsutil.NewMetricsHelper(inmemMetrics, prometheusEnabled) + + var method auth.AuthMethod + var sinks []*sink.SinkConfig + if config.AutoAuth != nil { + if client.Headers().Get(consts.NamespaceHeaderName) == "" && config.AutoAuth.Method.Namespace != "" { + client.SetNamespace(config.AutoAuth.Method.Namespace) + } + + sinkClient, err := client.CloneWithHeaders() + if err != nil { + c.UI.Error(fmt.Sprintf("Error cloning client for file sink: %v", err)) + return 1 + } + + if config.DisableIdleConnsAutoAuth { + sinkClient.SetMaxIdleConnections(-1) + } + + if config.DisableKeepAlivesAutoAuth { + sinkClient.SetDisableKeepAlives(true) + } + + for _, sc := range config.AutoAuth.Sinks { + switch sc.Type { + case "file": + config := &sink.SinkConfig{ + Logger: c.logger.Named("sink.file"), + Config: sc.Config, + Client: sinkClient, + WrapTTL: sc.WrapTTL, + DHType: sc.DHType, + DeriveKey: sc.DeriveKey, + DHPath: sc.DHPath, + AAD: sc.AAD, + } + s, err := file.NewFileSink(config) + if err != nil { + c.UI.Error(fmt.Errorf("error creating file sink: %w", err).Error()) + return 1 + } + config.Sink = s + sinks = append(sinks, config) + default: + c.UI.Error(fmt.Sprintf("Unknown sink type %q", sc.Type)) + return 1 + } + } + + authConfig := &auth.AuthConfig{ + Logger: c.logger.Named(fmt.Sprintf("auth.%s", config.AutoAuth.Method.Type)), + MountPath: config.AutoAuth.Method.MountPath, + Config: config.AutoAuth.Method.Config, + } + method, err = agentproxyshared.GetAutoAuthMethodFromConfig(config.AutoAuth.Method.Type, authConfig, config.Vault.Address) + if err != nil { + c.UI.Error(fmt.Sprintf("Error creating %s auth method: %v", config.AutoAuth.Method.Type, err)) + return 1 + } + } + + // We do this after auto-auth has been configured, because we don't want to + // confuse the issue of retries for auth failures which have their own + // config and are handled a bit differently. + if os.Getenv(api.EnvVaultMaxRetries) == "" { + client.SetMaxRetries(ctconfig.DefaultRetryAttempts) + if config.Vault != nil { + if config.Vault.Retry != nil { + client.SetMaxRetries(config.Vault.Retry.NumRetries) + } + } + } + + enforceConsistency := cache.EnforceConsistencyNever + whenInconsistent := cache.WhenInconsistentFail + if config.APIProxy != nil { + switch config.APIProxy.EnforceConsistency { + case "always": + enforceConsistency = cache.EnforceConsistencyAlways + case "never", "": + default: + c.UI.Error(fmt.Sprintf("Unknown api_proxy setting for enforce_consistency: %q", config.APIProxy.EnforceConsistency)) + return 1 + } + + switch config.APIProxy.WhenInconsistent { + case "retry": + whenInconsistent = cache.WhenInconsistentRetry + case "forward": + whenInconsistent = cache.WhenInconsistentForward + case "fail", "": + default: + c.UI.Error(fmt.Sprintf("Unknown api_proxy setting for when_inconsistent: %q", config.APIProxy.WhenInconsistent)) + return 1 + } + } + + // Warn if cache _and_ cert auto-auth is enabled but certificates were not + // provided in the auto_auth.method["cert"].config stanza. + if config.Cache != nil && (config.AutoAuth != nil && config.AutoAuth.Method != nil && config.AutoAuth.Method.Type == "cert") { + _, okCertFile := config.AutoAuth.Method.Config["client_cert"] + _, okCertKey := config.AutoAuth.Method.Config["client_key"] + + // If neither of these exists in the cert stanza, proxy will use the + // certs from the vault stanza. + if !okCertFile && !okCertKey { + c.UI.Warn(wrapAtLength("WARNING! Cache is enabled and using the same certificates " + + "from the 'cert' auto-auth method specified in the 'vault' stanza. Consider " + + "specifying certificate information in the 'cert' auto-auth's config stanza.")) + } + + } + + // Output the header that the proxy has started + if !c.logFlags.flagCombineLogs { + c.UI.Output("==> Vault Proxy started! Log data will stream in below:\n") + } + + var leaseCache *cache.LeaseCache + var previousToken string + + proxyClient, err := client.CloneWithHeaders() + if err != nil { + c.UI.Error(fmt.Sprintf("Error cloning client for proxying: %v", err)) + return 1 + } + + if config.DisableIdleConnsAPIProxy { + proxyClient.SetMaxIdleConnections(-1) + } + + if config.DisableKeepAlivesAPIProxy { + proxyClient.SetDisableKeepAlives(true) + } + + apiProxyLogger := c.logger.Named("apiproxy") + + // The API proxy to be used, if listeners are configured + apiProxy, err := cache.NewAPIProxy(&cache.APIProxyConfig{ + Client: proxyClient, + Logger: apiProxyLogger, + EnforceConsistency: enforceConsistency, + WhenInconsistentAction: whenInconsistent, + UserAgentStringFunction: useragent.ProxyStringWithProxiedUserAgent, + UserAgentString: useragent.ProxyAPIProxyString(), + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error creating API proxy: %v", err)) + return 1 + } + + // ctx and cancelFunc are passed to the AuthHandler, SinkServer, + // and other subsystems, so that they can listen for ctx.Done() to + // fire and shut down accordingly. + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + + // Parse proxy cache configurations + if config.Cache != nil { + cacheLogger := c.logger.Named("cache") + + // Create the lease cache proxier and set its underlying proxier to + // the API proxier. + leaseCache, err = cache.NewLeaseCache(&cache.LeaseCacheConfig{ + Client: proxyClient, + BaseContext: ctx, + Proxier: apiProxy, + Logger: cacheLogger.Named("leasecache"), + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error creating lease cache: %v", err)) + return 1 + } + + // Configure persistent storage and add to LeaseCache + if config.Cache.Persist != nil { + deferFunc, oldToken, err := agentproxyshared.AddPersistentStorageToLeaseCache(ctx, leaseCache, config.Cache.Persist, cacheLogger) + if err != nil { + c.UI.Error(fmt.Sprintf("Error creating persistent cache: %v", err)) + return 1 + } + previousToken = oldToken + if deferFunc != nil { + defer deferFunc() + } + } + } + + var listeners []net.Listener + + // Ensure we've added all the reload funcs for TLS before anyone triggers a reload. + c.tlsReloadFuncsLock.Lock() + + for i, lnConfig := range config.Listeners { + var ln net.Listener + var tlsCfg *tls.Config + + if lnConfig.Type == listenerutil.BufConnType { + inProcListener := bufconn.Listen(1024 * 1024) + if config.Cache != nil { + config.Cache.InProcDialer = listenerutil.NewBufConnWrapper(inProcListener) + } + ln = inProcListener + } else { + lnBundle, err := cache.StartListener(lnConfig) + if err != nil { + c.UI.Error(fmt.Sprintf("Error starting listener: %v", err)) + return 1 + } + + tlsCfg = lnBundle.TLSConfig + ln = lnBundle.Listener + + // Track the reload func, so we can reload later if needed. + c.tlsReloadFuncs = append(c.tlsReloadFuncs, lnBundle.TLSReloadFunc) + } + + listeners = append(listeners, ln) + + proxyVaultToken := true + var inmemSink sink.Sink + if config.APIProxy != nil { + if config.APIProxy.UseAutoAuthToken { + apiProxyLogger.Debug("auto-auth token is allowed to be used; configuring inmem sink") + inmemSink, err = inmem.New(&sink.SinkConfig{ + Logger: apiProxyLogger, + }, leaseCache) + if err != nil { + c.UI.Error(fmt.Sprintf("Error creating inmem sink for cache: %v", err)) + return 1 + } + sinks = append(sinks, &sink.SinkConfig{ + Logger: apiProxyLogger, + Sink: inmemSink, + }) + } + proxyVaultToken = !config.APIProxy.ForceAutoAuthToken + } + + var muxHandler http.Handler + if leaseCache != nil { + muxHandler = cache.ProxyHandler(ctx, apiProxyLogger, leaseCache, inmemSink, proxyVaultToken) + } else { + muxHandler = cache.ProxyHandler(ctx, apiProxyLogger, apiProxy, inmemSink, proxyVaultToken) + } + + // Parse 'require_request_header' listener config option, and wrap + // the request handler if necessary + if lnConfig.RequireRequestHeader && ("metrics_only" != lnConfig.Role) { + muxHandler = verifyRequestHeader(muxHandler) + } + + // Create a muxer and add paths relevant for the lease cache layer + mux := http.NewServeMux() + quitEnabled := lnConfig.ProxyAPI != nil && lnConfig.ProxyAPI.EnableQuit + + mux.Handle(consts.ProxyPathMetrics, c.handleMetrics()) + if "metrics_only" != lnConfig.Role { + mux.Handle(consts.ProxyPathCacheClear, leaseCache.HandleCacheClear(ctx)) + mux.Handle(consts.ProxyPathQuit, c.handleQuit(quitEnabled)) + mux.Handle("/", muxHandler) + } + + scheme := "https://" + if tlsCfg == nil { + scheme = "http://" + } + if ln.Addr().Network() == "unix" { + scheme = "unix://" + } + + infoKey := fmt.Sprintf("api address %d", i+1) + info[infoKey] = scheme + ln.Addr().String() + infoKeys = append(infoKeys, infoKey) + + server := &http.Server{ + Addr: ln.Addr().String(), + TLSConfig: tlsCfg, + Handler: mux, + ReadHeaderTimeout: 10 * time.Second, + ReadTimeout: 30 * time.Second, + IdleTimeout: 5 * time.Minute, + ErrorLog: apiProxyLogger.StandardLogger(nil), + } + + go server.Serve(ln) + } + + c.tlsReloadFuncsLock.Unlock() + + // Ensure that listeners are closed at all the exits + listenerCloseFunc := func() { + for _, ln := range listeners { + ln.Close() + } + } + defer c.cleanupGuard.Do(listenerCloseFunc) + + // Inform any tests that the server is ready + if c.startedCh != nil { + close(c.startedCh) + } + + var g run.Group + + g.Add(func() error { + for { + select { + case <-c.SighupCh: + c.UI.Output("==> Vault Proxy config reload triggered") + err := c.reloadConfig(c.flagConfigs) + if err != nil { + c.outputErrors(err) + } + // Send the 'reloaded' message on the relevant channel + select { + case c.reloadedCh <- struct{}{}: + default: + } + case <-ctx.Done(): + return nil + } + } + }, func(error) { + cancelFunc() + }) + + // This run group watches for signal termination + g.Add(func() error { + for { + select { + case <-c.ShutdownCh: + c.UI.Output("==> Vault Proxy shutdown triggered") + // Notify systemd that the server is shutting down + // Let the lease cache know this is a shutdown; no need to evict everything + if leaseCache != nil { + leaseCache.SetShuttingDown(true) + } + return nil + case <-ctx.Done(): + return nil + case <-winsvc.ShutdownChannel(): + return nil + } + } + }, func(error) {}) + + // Start auto-auth and sink servers + if method != nil { + // Auth Handler is going to set its own retry values, so we want to + // work on a copy of the client to not affect other subsystems. + ahClient, err := c.client.CloneWithHeaders() + if err != nil { + c.UI.Error(fmt.Sprintf("Error cloning client for auth handler: %v", err)) + return 1 + } + + if config.DisableIdleConnsAutoAuth { + ahClient.SetMaxIdleConnections(-1) + } + + if config.DisableKeepAlivesAutoAuth { + ahClient.SetDisableKeepAlives(true) + } + + ah := auth.NewAuthHandler(&auth.AuthHandlerConfig{ + Logger: c.logger.Named("auth.handler"), + Client: ahClient, + WrapTTL: config.AutoAuth.Method.WrapTTL, + MinBackoff: config.AutoAuth.Method.MinBackoff, + MaxBackoff: config.AutoAuth.Method.MaxBackoff, + EnableReauthOnNewCredentials: config.AutoAuth.EnableReauthOnNewCredentials, + Token: previousToken, + ExitOnError: config.AutoAuth.Method.ExitOnError, + UserAgent: useragent.ProxyAutoAuthString(), + MetricsSignifier: "proxy", + }) + + ss := sink.NewSinkServer(&sink.SinkServerConfig{ + Logger: c.logger.Named("sink.server"), + Client: ahClient, + ExitAfterAuth: config.ExitAfterAuth, + }) + + g.Add(func() error { + return ah.Run(ctx, method) + }, func(error) { + // Let the lease cache know this is a shutdown; no need to evict + // everything + if leaseCache != nil { + leaseCache.SetShuttingDown(true) + } + cancelFunc() + }) + + g.Add(func() error { + err := ss.Run(ctx, ah.OutputCh, sinks) + c.logger.Info("sinks finished, exiting") + + // Start goroutine to drain from ah.OutputCh from this point onward + // to prevent ah.Run from being blocked. + go func() { + for { + select { + case <-ctx.Done(): + return + case <-ah.OutputCh: + } + } + }() + + return err + }, func(error) { + // Let the lease cache know this is a shutdown; no need to evict + // everything + if leaseCache != nil { + leaseCache.SetShuttingDown(true) + } + cancelFunc() + }) + } + + // Server configuration output + padding := 24 + sort.Strings(infoKeys) + caser := cases.Title(language.English) + c.UI.Output("==> Vault Proxy configuration:\n") + for _, k := range infoKeys { + c.UI.Output(fmt.Sprintf( + "%s%s: %s", + strings.Repeat(" ", padding-len(k)), + caser.String(k), + info[k])) + } + c.UI.Output("") + + // Release the log gate. + c.logGate.Flush() + + // Write out the PID to the file now that server has successfully started + if err := c.storePidFile(config.PidFile); err != nil { + c.UI.Error(fmt.Sprintf("Error storing PID: %s", err)) + return 1 + } + + // Notify systemd that the server is ready (if applicable) + c.notifySystemd(systemd.SdNotifyReady) + + defer func() { + if err := c.removePidFile(config.PidFile); err != nil { + c.UI.Error(fmt.Sprintf("Error deleting the PID file: %s", err)) + } + }() + + var exitCode int + if err := g.Run(); err != nil { + c.logger.Error("runtime error encountered", "error", err) + c.UI.Error("Error encountered during run, refer to logs for more details.") + exitCode = 1 + } + c.notifySystemd(systemd.SdNotifyStopping) + return exitCode +} + +// applyConfigOverrides ensures that the config object accurately reflects the desired +// settings as configured by the user. It applies the relevant config setting based +// on the precedence (env var overrides file config, cli overrides env var). +// It mutates the config object supplied. +func (c *ProxyCommand) applyConfigOverrides(f *FlagSets, config *proxyConfig.Config) { + if config.Vault == nil { + config.Vault = &proxyConfig.Vault{} + } + + f.applyLogConfigOverrides(config.SharedConfig) + + f.Visit(func(fl *flag.Flag) { + if fl.Name == flagNameProxyExitAfterAuth { + config.ExitAfterAuth = c.flagExitAfterAuth + } + }) + + c.setStringFlag(f, config.Vault.Address, &StringVar{ + Name: flagNameAddress, + Target: &c.flagAddress, + Default: "https://127.0.0.1:8200", + EnvVar: api.EnvVaultAddress, + }) + config.Vault.Address = c.flagAddress + c.setStringFlag(f, config.Vault.CACert, &StringVar{ + Name: flagNameCACert, + Target: &c.flagCACert, + Default: "", + EnvVar: api.EnvVaultCACert, + }) + config.Vault.CACert = c.flagCACert + c.setStringFlag(f, config.Vault.CAPath, &StringVar{ + Name: flagNameCAPath, + Target: &c.flagCAPath, + Default: "", + EnvVar: api.EnvVaultCAPath, + }) + config.Vault.CAPath = c.flagCAPath + c.setStringFlag(f, config.Vault.ClientCert, &StringVar{ + Name: flagNameClientCert, + Target: &c.flagClientCert, + Default: "", + EnvVar: api.EnvVaultClientCert, + }) + config.Vault.ClientCert = c.flagClientCert + c.setStringFlag(f, config.Vault.ClientKey, &StringVar{ + Name: flagNameClientKey, + Target: &c.flagClientKey, + Default: "", + EnvVar: api.EnvVaultClientKey, + }) + config.Vault.ClientKey = c.flagClientKey + c.setBoolFlag(f, config.Vault.TLSSkipVerify, &BoolVar{ + Name: flagNameTLSSkipVerify, + Target: &c.flagTLSSkipVerify, + Default: false, + EnvVar: api.EnvVaultSkipVerify, + }) + config.Vault.TLSSkipVerify = c.flagTLSSkipVerify + c.setStringFlag(f, config.Vault.TLSServerName, &StringVar{ + Name: flagTLSServerName, + Target: &c.flagTLSServerName, + Default: "", + EnvVar: api.EnvVaultTLSServerName, + }) + config.Vault.TLSServerName = c.flagTLSServerName +} + +func (c *ProxyCommand) notifySystemd(status string) { + sent, err := systemd.SdNotify(false, status) + if err != nil { + c.logger.Error("error notifying systemd", "error", err) + } else { + if sent { + c.logger.Debug("sent systemd notification", "notification", status) + } else { + c.logger.Debug("would have sent systemd notification (systemd not present)", "notification", status) + } + } +} + +func (c *ProxyCommand) setStringFlag(f *FlagSets, configVal string, fVar *StringVar) { + var isFlagSet bool + f.Visit(func(f *flag.Flag) { + if f.Name == fVar.Name { + isFlagSet = true + } + }) + + flagEnvValue, flagEnvSet := os.LookupEnv(fVar.EnvVar) + switch { + case isFlagSet: + // Don't do anything as the flag is already set from the command line + case flagEnvSet: + // Use value from env var + *fVar.Target = flagEnvValue + case configVal != "": + // Use value from config + *fVar.Target = configVal + default: + // Use the default value + *fVar.Target = fVar.Default + } +} + +func (c *ProxyCommand) setBoolFlag(f *FlagSets, configVal bool, fVar *BoolVar) { + var isFlagSet bool + f.Visit(func(f *flag.Flag) { + if f.Name == fVar.Name { + isFlagSet = true + } + }) + + flagEnvValue, flagEnvSet := os.LookupEnv(fVar.EnvVar) + switch { + case isFlagSet: + // Don't do anything as the flag is already set from the command line + case flagEnvSet: + // Use value from env var + *fVar.Target = flagEnvValue != "" + case configVal: + // Use value from config + *fVar.Target = configVal + default: + // Use the default value + *fVar.Target = fVar.Default + } +} + +// storePidFile is used to write out our PID to a file if necessary +func (c *ProxyCommand) storePidFile(pidPath string) error { + // Quit fast if no pidfile + if pidPath == "" { + return nil + } + + // Open the PID file + pidFile, err := os.OpenFile(pidPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o600) + if err != nil { + return fmt.Errorf("could not open pid file: %w", err) + } + defer pidFile.Close() + + // Write out the PID + pid := os.Getpid() + _, err = pidFile.WriteString(fmt.Sprintf("%d", pid)) + if err != nil { + return fmt.Errorf("could not write to pid file: %w", err) + } + return nil +} + +// removePidFile is used to cleanup the PID file if necessary +func (c *ProxyCommand) removePidFile(pidPath string) error { + if pidPath == "" { + return nil + } + return os.Remove(pidPath) +} + +func (c *ProxyCommand) handleMetrics() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + logical.RespondError(w, http.StatusMethodNotAllowed, nil) + return + } + + if err := r.ParseForm(); err != nil { + logical.RespondError(w, http.StatusBadRequest, err) + return + } + + format := r.Form.Get("format") + if format == "" { + format = metricsutil.FormatFromRequest(&logical.Request{ + Headers: r.Header, + }) + } + + resp := c.metricsHelper.ResponseForFormat(format) + + status := resp.Data[logical.HTTPStatusCode].(int) + w.Header().Set("Content-Type", resp.Data[logical.HTTPContentType].(string)) + switch v := resp.Data[logical.HTTPRawBody].(type) { + case string: + w.WriteHeader(status) + w.Write([]byte(v)) + case []byte: + w.WriteHeader(status) + w.Write(v) + default: + logical.RespondError(w, http.StatusInternalServerError, fmt.Errorf("wrong response returned")) + } + }) +} + +func (c *ProxyCommand) handleQuit(enabled bool) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !enabled { + w.WriteHeader(http.StatusNotFound) + return + } + + switch r.Method { + case http.MethodPost: + default: + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + c.logger.Debug("received quit request") + close(c.ShutdownCh) + }) +} + +// newLogger creates a logger based on parsed config field on the Proxy Command struct. +func (c *ProxyCommand) newLogger() (log.InterceptLogger, error) { + if c.config == nil { + return nil, fmt.Errorf("cannot create logger, no config") + } + + var errors error + + // Parse all the log related config + logLevel, err := logging.ParseLogLevel(c.config.LogLevel) + if err != nil { + errors = multierror.Append(errors, err) + } + + logFormat, err := logging.ParseLogFormat(c.config.LogFormat) + if err != nil { + errors = multierror.Append(errors, err) + } + + logRotateDuration, err := parseutil.ParseDurationSecond(c.config.LogRotateDuration) + if err != nil { + errors = multierror.Append(errors, err) + } + + if errors != nil { + return nil, errors + } + + logCfg, err := logging.NewLogConfig(nameProxy) + if err != nil { + return nil, err + } + logCfg.Name = nameProxy + logCfg.LogLevel = logLevel + logCfg.LogFormat = logFormat + logCfg.LogFilePath = c.config.LogFile + logCfg.LogRotateDuration = logRotateDuration + logCfg.LogRotateBytes = c.config.LogRotateBytes + logCfg.LogRotateMaxFiles = c.config.LogRotateMaxFiles + + l, err := logging.Setup(logCfg, c.logWriter) + if err != nil { + return nil, err + } + + return l, nil +} + +// loadConfig attempts to generate a Proxy config from the file(s) specified. +func (c *ProxyCommand) loadConfig(paths []string) (*proxyConfig.Config, error) { + var errors error + cfg := proxyConfig.NewConfig() + + for _, configPath := range paths { + configFromPath, err := proxyConfig.LoadConfig(configPath) + if err != nil { + errors = multierror.Append(errors, fmt.Errorf("error loading configuration from %s: %w", configPath, err)) + } else { + cfg = cfg.Merge(configFromPath) + } + } + + if errors != nil { + return nil, errors + } + + if err := cfg.ValidateConfig(); err != nil { + return nil, fmt.Errorf("error validating configuration: %w", err) + } + + return cfg, nil +} + +// reloadConfig will attempt to reload the config from file(s) and adjust certain +// config values without requiring a restart of the Vault Proxy. +// If config is retrieved without error it is stored in the config field of the ProxyCommand. +// This operation is not atomic and could result in updated config but partially applied config settings. +// The error returned from this func may be a multierror. +// This function will most likely be called due to Vault Proxy receiving a SIGHUP signal. +// Currently only reloading the following are supported: +// * log level +// * TLS certs for listeners +func (c *ProxyCommand) reloadConfig(paths []string) error { + // Notify systemd that the server is reloading + c.notifySystemd(systemd.SdNotifyReloading) + defer c.notifySystemd(systemd.SdNotifyReady) + + var errors error + + // Reload the config + cfg, err := c.loadConfig(paths) + if err != nil { + // Returning single error as we won't continue with bad config and won't 'commit' it. + return err + } + c.config = cfg + + // Update the log level + err = c.reloadLogLevel() + if err != nil { + errors = multierror.Append(errors, err) + } + + // Update certs + err = c.reloadCerts() + if err != nil { + errors = multierror.Append(errors, err) + } + + return errors +} + +// reloadLogLevel will attempt to update the log level for the logger attached +// to the ProxyCommand struct using the value currently set in config. +func (c *ProxyCommand) reloadLogLevel() error { + logLevel, err := logging.ParseLogLevel(c.config.LogLevel) + if err != nil { + return err + } + + c.logger.SetLevel(logLevel) + + return nil +} + +// reloadCerts will attempt to reload certificates using a reload func which +// was provided when the listeners were configured, only funcs that were appended +// to the ProxyCommand slice will be invoked. +// This function returns a multierror type so that every func can report an error +// if it encounters one. +func (c *ProxyCommand) reloadCerts() error { + var errors error + + c.tlsReloadFuncsLock.RLock() + defer c.tlsReloadFuncsLock.RUnlock() + + for _, reloadFunc := range c.tlsReloadFuncs { + // Non-TLS listeners will have a nil reload func. + if reloadFunc != nil { + err := reloadFunc() + if err != nil { + errors = multierror.Append(errors, err) + } + } + } + + return errors +} + +// outputErrors will take an error or multierror and handle outputting each to the UI +func (c *ProxyCommand) outputErrors(err error) { + if err != nil { + if me, ok := err.(*multierror.Error); ok { + for _, err := range me.Errors { + c.UI.Error(err.Error()) + } + } else { + c.UI.Error(err.Error()) + } + } +} diff --git a/command/proxy/config/config.go b/command/proxy/config/config.go new file mode 100644 index 0000000..f760f1e --- /dev/null +++ b/command/proxy/config/config.go @@ -0,0 +1,832 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package config + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "os" + "path/filepath" + "strings" + "time" + + ctconfig "github.com/hashicorp/consul-template/config" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/vault/command/agentproxyshared" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/internalshared/configutil" +) + +// Config is the configuration for Vault Proxy. +type Config struct { + *configutil.SharedConfig `hcl:"-"` + + AutoAuth *AutoAuth `hcl:"auto_auth"` + ExitAfterAuth bool `hcl:"exit_after_auth"` + Cache *Cache `hcl:"cache"` + APIProxy *APIProxy `hcl:"api_proxy""` + Vault *Vault `hcl:"vault"` + DisableIdleConns []string `hcl:"disable_idle_connections"` + DisableIdleConnsAPIProxy bool `hcl:"-"` + DisableIdleConnsAutoAuth bool `hcl:"-"` + DisableKeepAlives []string `hcl:"disable_keep_alives"` + DisableKeepAlivesAPIProxy bool `hcl:"-"` + DisableKeepAlivesAutoAuth bool `hcl:"-"` +} + +const ( + DisableIdleConnsEnv = "VAULT_PROXY_DISABLE_IDLE_CONNECTIONS" + DisableKeepAlivesEnv = "VAULT_PROXY_DISABLE_KEEP_ALIVES" +) + +func (c *Config) Prune() { + for _, l := range c.Listeners { + l.RawConfig = nil + l.Profiling.UnusedKeys = nil + l.Telemetry.UnusedKeys = nil + l.CustomResponseHeaders = nil + } + c.FoundKeys = nil + c.UnusedKeys = nil + c.SharedConfig.FoundKeys = nil + c.SharedConfig.UnusedKeys = nil + if c.Telemetry != nil { + c.Telemetry.FoundKeys = nil + c.Telemetry.UnusedKeys = nil + } +} + +type Retry struct { + NumRetries int `hcl:"num_retries"` +} + +// Vault contains configuration for connecting to Vault servers +type Vault struct { + Address string `hcl:"address"` + CACert string `hcl:"ca_cert"` + CAPath string `hcl:"ca_path"` + TLSSkipVerify bool `hcl:"-"` + TLSSkipVerifyRaw interface{} `hcl:"tls_skip_verify"` + ClientCert string `hcl:"client_cert"` + ClientKey string `hcl:"client_key"` + TLSServerName string `hcl:"tls_server_name"` + Retry *Retry `hcl:"retry"` +} + +// transportDialer is an interface that allows passing a custom dialer function +// to an HTTP client's transport config +type transportDialer interface { + // Dial is intended to match https://pkg.go.dev/net#Dialer.Dial + Dial(network, address string) (net.Conn, error) + + // DialContext is intended to match https://pkg.go.dev/net#Dialer.DialContext + DialContext(ctx context.Context, network, address string) (net.Conn, error) +} + +// APIProxy contains any configuration needed for proxy mode +type APIProxy struct { + UseAutoAuthTokenRaw interface{} `hcl:"use_auto_auth_token"` + UseAutoAuthToken bool `hcl:"-"` + ForceAutoAuthToken bool `hcl:"-"` + EnforceConsistency string `hcl:"enforce_consistency"` + WhenInconsistent string `hcl:"when_inconsistent"` +} + +// Cache contains any configuration needed for Cache mode +type Cache struct { + Persist *agentproxyshared.PersistConfig `hcl:"persist"` + InProcDialer transportDialer `hcl:"-"` +} + +// AutoAuth is the configured authentication method and sinks +type AutoAuth struct { + Method *Method `hcl:"-"` + Sinks []*Sink `hcl:"sinks"` + + // NOTE: This is unsupported outside of testing and may disappear at any + // time. + EnableReauthOnNewCredentials bool `hcl:"enable_reauth_on_new_credentials"` +} + +// Method represents the configuration for the authentication backend +type Method struct { + Type string + MountPath string `hcl:"mount_path"` + WrapTTLRaw interface{} `hcl:"wrap_ttl"` + WrapTTL time.Duration `hcl:"-"` + MinBackoffRaw interface{} `hcl:"min_backoff"` + MinBackoff time.Duration `hcl:"-"` + MaxBackoffRaw interface{} `hcl:"max_backoff"` + MaxBackoff time.Duration `hcl:"-"` + Namespace string `hcl:"namespace"` + ExitOnError bool `hcl:"exit_on_err"` + Config map[string]interface{} +} + +// Sink defines a location to write the authenticated token +type Sink struct { + Type string + WrapTTLRaw interface{} `hcl:"wrap_ttl"` + WrapTTL time.Duration `hcl:"-"` + DHType string `hcl:"dh_type"` + DeriveKey bool `hcl:"derive_key"` + DHPath string `hcl:"dh_path"` + AAD string `hcl:"aad"` + AADEnvVar string `hcl:"aad_env_var"` + Config map[string]interface{} +} + +func NewConfig() *Config { + return &Config{ + SharedConfig: new(configutil.SharedConfig), + } +} + +// Merge merges two Proxy configurations. +func (c *Config) Merge(c2 *Config) *Config { + if c2 == nil { + return c + } + + result := NewConfig() + + result.SharedConfig = c.SharedConfig + if c2.SharedConfig != nil { + result.SharedConfig = c.SharedConfig.Merge(c2.SharedConfig) + } + + result.AutoAuth = c.AutoAuth + if c2.AutoAuth != nil { + result.AutoAuth = c2.AutoAuth + } + + result.Cache = c.Cache + if c2.Cache != nil { + result.Cache = c2.Cache + } + + result.APIProxy = c.APIProxy + if c2.APIProxy != nil { + result.APIProxy = c2.APIProxy + } + + result.DisableMlock = c.DisableMlock + if c2.DisableMlock { + result.DisableMlock = c2.DisableMlock + } + + // For these, ignore the non-specific one and overwrite them all + result.DisableIdleConnsAutoAuth = c.DisableIdleConnsAutoAuth + if c2.DisableIdleConnsAutoAuth { + result.DisableIdleConnsAutoAuth = c2.DisableIdleConnsAutoAuth + } + + result.DisableIdleConnsAPIProxy = c.DisableIdleConnsAPIProxy + if c2.DisableIdleConnsAPIProxy { + result.DisableIdleConnsAPIProxy = c2.DisableIdleConnsAPIProxy + } + + result.DisableKeepAlivesAutoAuth = c.DisableKeepAlivesAutoAuth + if c2.DisableKeepAlivesAutoAuth { + result.DisableKeepAlivesAutoAuth = c2.DisableKeepAlivesAutoAuth + } + + result.DisableKeepAlivesAPIProxy = c.DisableKeepAlivesAPIProxy + if c2.DisableKeepAlivesAPIProxy { + result.DisableKeepAlivesAPIProxy = c2.DisableKeepAlivesAPIProxy + } + + result.ExitAfterAuth = c.ExitAfterAuth + if c2.ExitAfterAuth { + result.ExitAfterAuth = c2.ExitAfterAuth + } + + result.Vault = c.Vault + if c2.Vault != nil { + result.Vault = c2.Vault + } + + result.PidFile = c.PidFile + if c2.PidFile != "" { + result.PidFile = c2.PidFile + } + + return result +} + +// ValidateConfig validates a Vault configuration after it has been fully merged together, to +// ensure that required combinations of configs are there +func (c *Config) ValidateConfig() error { + if c.Cache != nil { + if len(c.Listeners) < 1 { + return fmt.Errorf("enabling the cache requires at least 1 listener to be defined") + } + } + + if c.APIProxy != nil { + if len(c.Listeners) < 1 { + return fmt.Errorf("configuring the api_proxy requires at least 1 listener to be defined") + } + + if c.APIProxy.UseAutoAuthToken { + if c.AutoAuth == nil { + return fmt.Errorf("api_proxy.use_auto_auth_token is true but auto_auth not configured") + } + if c.AutoAuth != nil && c.AutoAuth.Method != nil && c.AutoAuth.Method.WrapTTL > 0 { + return fmt.Errorf("api_proxy.use_auto_auth_token is true and auto_auth uses wrapping") + } + } + } + + if c.AutoAuth != nil { + if len(c.AutoAuth.Sinks) == 0 && + (c.APIProxy == nil || !c.APIProxy.UseAutoAuthToken) { + return fmt.Errorf("auto_auth requires at least one sink or api_proxy.use_auto_auth_token=true") + } + } + + if c.AutoAuth == nil && c.Cache == nil && len(c.Listeners) == 0 { + return fmt.Errorf("no auto_auth, cache, or listener block found in config") + } + + return nil +} + +// LoadConfig loads the configuration at the given path, regardless if +// it's a file or directory. +func LoadConfig(path string) (*Config, error) { + fi, err := os.Stat(path) + if err != nil { + return nil, err + } + + if fi.IsDir() { + return LoadConfigDir(path) + } + return LoadConfigFile(path) +} + +// LoadConfigDir loads the configuration at the given path if it's a directory +func LoadConfigDir(dir string) (*Config, error) { + f, err := os.Open(dir) + if err != nil { + return nil, err + } + defer f.Close() + + fi, err := f.Stat() + if err != nil { + return nil, err + } + if !fi.IsDir() { + return nil, fmt.Errorf("configuration path must be a directory: %q", dir) + } + + var files []string + err = nil + for err != io.EOF { + var fis []os.FileInfo + fis, err = f.Readdir(128) + if err != nil && err != io.EOF { + return nil, err + } + + for _, fi := range fis { + // Ignore directories + if fi.IsDir() { + continue + } + + // Only care about files that are valid to load. + name := fi.Name() + skip := true + if strings.HasSuffix(name, ".hcl") { + skip = false + } else if strings.HasSuffix(name, ".json") { + skip = false + } + if skip || isTemporaryFile(name) { + continue + } + + path := filepath.Join(dir, name) + files = append(files, path) + } + } + + result := NewConfig() + for _, f := range files { + config, err := LoadConfigFile(f) + if err != nil { + return nil, fmt.Errorf("error loading %q: %w", f, err) + } + + if result == nil { + result = config + } else { + result = result.Merge(config) + } + } + + return result, nil +} + +// isTemporaryFile returns true or false depending on whether the +// provided file name is a temporary file for the following editors: +// emacs or vim. +func isTemporaryFile(name string) bool { + return strings.HasSuffix(name, "~") || // vim + strings.HasPrefix(name, ".#") || // emacs + (strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#")) // emacs +} + +// LoadConfigFile loads the configuration at the given path if it's a file +func LoadConfigFile(path string) (*Config, error) { + fi, err := os.Stat(path) + if err != nil { + return nil, err + } + + if fi.IsDir() { + return nil, fmt.Errorf("location is a directory, not a file") + } + + // Read the file + d, err := os.ReadFile(path) + if err != nil { + return nil, err + } + + // Parse! + obj, err := hcl.Parse(string(d)) + if err != nil { + return nil, err + } + + // Attribute + ast.Walk(obj, func(n ast.Node) (ast.Node, bool) { + if k, ok := n.(*ast.ObjectKey); ok { + k.Token.Pos.Filename = path + } + return n, true + }) + + // Start building the result + result := NewConfig() + if err := hcl.DecodeObject(result, obj); err != nil { + return nil, err + } + + sharedConfig, err := configutil.ParseConfig(string(d)) + if err != nil { + return nil, err + } + + // Pruning custom headers for Vault for now + for _, ln := range sharedConfig.Listeners { + ln.CustomResponseHeaders = nil + } + + result.SharedConfig = sharedConfig + + list, ok := obj.Node.(*ast.ObjectList) + if !ok { + return nil, fmt.Errorf("error parsing: file doesn't contain a root object") + } + + if err := parseAutoAuth(result, list); err != nil { + return nil, fmt.Errorf("error parsing 'auto_auth': %w", err) + } + + if err := parseCache(result, list); err != nil { + return nil, fmt.Errorf("error parsing 'cache':%w", err) + } + + if err := parseAPIProxy(result, list); err != nil { + return nil, fmt.Errorf("error parsing 'api_proxy':%w", err) + } + + err = parseVault(result, list) + if err != nil { + return nil, fmt.Errorf("error parsing 'vault':%w", err) + } + + if result.Vault != nil { + // Set defaults + if result.Vault.Retry == nil { + result.Vault.Retry = &Retry{} + } + switch result.Vault.Retry.NumRetries { + case 0: + result.Vault.Retry.NumRetries = ctconfig.DefaultRetryAttempts + case -1: + result.Vault.Retry.NumRetries = 0 + } + } + + if disableIdleConnsEnv := os.Getenv(DisableIdleConnsEnv); disableIdleConnsEnv != "" { + result.DisableIdleConns, err = parseutil.ParseCommaStringSlice(strings.ToLower(disableIdleConnsEnv)) + if err != nil { + return nil, fmt.Errorf("error parsing environment variable %s: %v", DisableIdleConnsEnv, err) + } + } + + for _, subsystem := range result.DisableIdleConns { + switch subsystem { + case "auto-auth": + result.DisableIdleConnsAutoAuth = true + case "caching", "proxying": + result.DisableIdleConnsAPIProxy = true + case "": + continue + default: + return nil, fmt.Errorf("unknown disable_idle_connections value: %s", subsystem) + } + } + + if disableKeepAlivesEnv := os.Getenv(DisableKeepAlivesEnv); disableKeepAlivesEnv != "" { + result.DisableKeepAlives, err = parseutil.ParseCommaStringSlice(strings.ToLower(disableKeepAlivesEnv)) + if err != nil { + return nil, fmt.Errorf("error parsing environment variable %s: %v", DisableKeepAlivesEnv, err) + } + } + + for _, subsystem := range result.DisableKeepAlives { + switch subsystem { + case "auto-auth": + result.DisableKeepAlivesAutoAuth = true + case "caching", "proxying": + result.DisableKeepAlivesAPIProxy = true + case "": + continue + default: + return nil, fmt.Errorf("unknown disable_keep_alives value: %s", subsystem) + } + } + + return result, nil +} + +func parseVault(result *Config, list *ast.ObjectList) error { + name := "vault" + + vaultList := list.Filter(name) + if len(vaultList.Items) == 0 { + return nil + } + + if len(vaultList.Items) > 1 { + return fmt.Errorf("one and only one %q block is required", name) + } + + item := vaultList.Items[0] + + var v Vault + err := hcl.DecodeObject(&v, item.Val) + if err != nil { + return err + } + + if v.TLSSkipVerifyRaw != nil { + v.TLSSkipVerify, err = parseutil.ParseBool(v.TLSSkipVerifyRaw) + if err != nil { + return err + } + } + + result.Vault = &v + + subs, ok := item.Val.(*ast.ObjectType) + if !ok { + return fmt.Errorf("could not parse %q as an object", name) + } + + if err := parseRetry(result, subs.List); err != nil { + return fmt.Errorf("error parsing 'retry': %w", err) + } + + return nil +} + +func parseRetry(result *Config, list *ast.ObjectList) error { + name := "retry" + + retryList := list.Filter(name) + if len(retryList.Items) == 0 { + return nil + } + + if len(retryList.Items) > 1 { + return fmt.Errorf("one and only one %q block is required", name) + } + + item := retryList.Items[0] + + var r Retry + err := hcl.DecodeObject(&r, item.Val) + if err != nil { + return err + } + + result.Vault.Retry = &r + + return nil +} + +func parseAPIProxy(result *Config, list *ast.ObjectList) error { + name := "api_proxy" + + apiProxyList := list.Filter(name) + if len(apiProxyList.Items) == 0 { + return nil + } + + if len(apiProxyList.Items) > 1 { + return fmt.Errorf("one and only one %q block is required", name) + } + + item := apiProxyList.Items[0] + + var apiProxy APIProxy + err := hcl.DecodeObject(&apiProxy, item.Val) + if err != nil { + return err + } + + if apiProxy.UseAutoAuthTokenRaw != nil { + apiProxy.UseAutoAuthToken, err = parseutil.ParseBool(apiProxy.UseAutoAuthTokenRaw) + if err != nil { + // Could be a value of "force" instead of "true"/"false" + switch apiProxy.UseAutoAuthTokenRaw.(type) { + case string: + v := apiProxy.UseAutoAuthTokenRaw.(string) + + if !strings.EqualFold(v, "force") { + return fmt.Errorf("value of 'use_auto_auth_token' can be either true/false/force, %q is an invalid option", apiProxy.UseAutoAuthTokenRaw) + } + apiProxy.UseAutoAuthToken = true + apiProxy.ForceAutoAuthToken = true + + default: + return err + } + } + } + result.APIProxy = &apiProxy + + return nil +} + +func parseCache(result *Config, list *ast.ObjectList) error { + name := "cache" + + cacheList := list.Filter(name) + if len(cacheList.Items) == 0 { + return nil + } + + if len(cacheList.Items) > 1 { + return fmt.Errorf("one and only one %q block is required", name) + } + + item := cacheList.Items[0] + + var c Cache + err := hcl.DecodeObject(&c, item.Val) + if err != nil { + return err + } + + result.Cache = &c + + subs, ok := item.Val.(*ast.ObjectType) + if !ok { + return fmt.Errorf("could not parse %q as an object", name) + } + subList := subs.List + if err := parsePersist(result, subList); err != nil { + return fmt.Errorf("error parsing persist: %w", err) + } + + return nil +} + +func parsePersist(result *Config, list *ast.ObjectList) error { + name := "persist" + + persistList := list.Filter(name) + if len(persistList.Items) == 0 { + return nil + } + + if len(persistList.Items) > 1 { + return fmt.Errorf("only one %q block is required", name) + } + + item := persistList.Items[0] + + var p agentproxyshared.PersistConfig + err := hcl.DecodeObject(&p, item.Val) + if err != nil { + return err + } + + if p.Type == "" { + if len(item.Keys) == 1 { + p.Type = strings.ToLower(item.Keys[0].Token.Value().(string)) + } + if p.Type == "" { + return errors.New("persist type must be specified") + } + } + + result.Cache.Persist = &p + + return nil +} + +func parseAutoAuth(result *Config, list *ast.ObjectList) error { + name := "auto_auth" + + autoAuthList := list.Filter(name) + if len(autoAuthList.Items) == 0 { + return nil + } + if len(autoAuthList.Items) > 1 { + return fmt.Errorf("at most one %q block is allowed", name) + } + + // Get our item + item := autoAuthList.Items[0] + + var a AutoAuth + if err := hcl.DecodeObject(&a, item.Val); err != nil { + return err + } + + result.AutoAuth = &a + + subs, ok := item.Val.(*ast.ObjectType) + if !ok { + return fmt.Errorf("could not parse %q as an object", name) + } + subList := subs.List + + if err := parseMethod(result, subList); err != nil { + return fmt.Errorf("error parsing 'method': %w", err) + } + if a.Method == nil { + return fmt.Errorf("no 'method' block found") + } + + if err := parseSinks(result, subList); err != nil { + return fmt.Errorf("error parsing 'sink' stanzas: %w", err) + } + + if result.AutoAuth.Method.WrapTTL > 0 { + if len(result.AutoAuth.Sinks) != 1 { + return fmt.Errorf("error parsing auto_auth: wrapping enabled on auth method and 0 or many sinks defined") + } + + if result.AutoAuth.Sinks[0].WrapTTL > 0 { + return fmt.Errorf("error parsing auto_auth: wrapping enabled both on auth method and sink") + } + } + + if result.AutoAuth.Method.MaxBackoffRaw != nil { + var err error + if result.AutoAuth.Method.MaxBackoff, err = parseutil.ParseDurationSecond(result.AutoAuth.Method.MaxBackoffRaw); err != nil { + return err + } + result.AutoAuth.Method.MaxBackoffRaw = nil + } + + if result.AutoAuth.Method.MinBackoffRaw != nil { + var err error + if result.AutoAuth.Method.MinBackoff, err = parseutil.ParseDurationSecond(result.AutoAuth.Method.MinBackoffRaw); err != nil { + return err + } + result.AutoAuth.Method.MinBackoffRaw = nil + } + + return nil +} + +func parseMethod(result *Config, list *ast.ObjectList) error { + name := "method" + + methodList := list.Filter(name) + if len(methodList.Items) != 1 { + return fmt.Errorf("one and only one %q block is required", name) + } + + // Get our item + item := methodList.Items[0] + + var m Method + if err := hcl.DecodeObject(&m, item.Val); err != nil { + return err + } + + if m.Type == "" { + if len(item.Keys) == 1 { + m.Type = strings.ToLower(item.Keys[0].Token.Value().(string)) + } + if m.Type == "" { + return errors.New("method type must be specified") + } + } + + // Default to Vault's default + if m.MountPath == "" { + m.MountPath = fmt.Sprintf("auth/%s", m.Type) + } + // Standardize on no trailing slash + m.MountPath = strings.TrimSuffix(m.MountPath, "/") + + if m.WrapTTLRaw != nil { + var err error + if m.WrapTTL, err = parseutil.ParseDurationSecond(m.WrapTTLRaw); err != nil { + return err + } + m.WrapTTLRaw = nil + } + + // Canonicalize namespace path if provided + m.Namespace = namespace.Canonicalize(m.Namespace) + + result.AutoAuth.Method = &m + return nil +} + +func parseSinks(result *Config, list *ast.ObjectList) error { + name := "sink" + + sinkList := list.Filter(name) + if len(sinkList.Items) < 1 { + return nil + } + + var ts []*Sink + + for _, item := range sinkList.Items { + var s Sink + if err := hcl.DecodeObject(&s, item.Val); err != nil { + return err + } + + if s.Type == "" { + if len(item.Keys) == 1 { + s.Type = strings.ToLower(item.Keys[0].Token.Value().(string)) + } + if s.Type == "" { + return errors.New("sink type must be specified") + } + } + + if s.WrapTTLRaw != nil { + var err error + if s.WrapTTL, err = parseutil.ParseDurationSecond(s.WrapTTLRaw); err != nil { + return multierror.Prefix(err, fmt.Sprintf("sink.%s", s.Type)) + } + s.WrapTTLRaw = nil + } + + switch s.DHType { + case "": + case "curve25519": + default: + return multierror.Prefix(errors.New("invalid value for 'dh_type'"), fmt.Sprintf("sink.%s", s.Type)) + } + + if s.AADEnvVar != "" { + s.AAD = os.Getenv(s.AADEnvVar) + s.AADEnvVar = "" + } + + switch { + case s.DHPath == "" && s.DHType == "": + if s.AAD != "" { + return multierror.Prefix(errors.New("specifying AAD data without 'dh_type' does not make sense"), fmt.Sprintf("sink.%s", s.Type)) + } + if s.DeriveKey { + return multierror.Prefix(errors.New("specifying 'derive_key' data without 'dh_type' does not make sense"), fmt.Sprintf("sink.%s", s.Type)) + } + case s.DHPath != "" && s.DHType != "": + default: + return multierror.Prefix(errors.New("'dh_type' and 'dh_path' must be specified together"), fmt.Sprintf("sink.%s", s.Type)) + } + + ts = append(ts, &s) + } + + result.AutoAuth.Sinks = ts + return nil +} diff --git a/command/proxy/config/config_test.go b/command/proxy/config/config_test.go new file mode 100644 index 0000000..612d7a6 --- /dev/null +++ b/command/proxy/config/config_test.go @@ -0,0 +1,119 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package config + +import ( + "testing" + + "github.com/go-test/deep" + "github.com/hashicorp/vault/command/agentproxyshared" + "github.com/hashicorp/vault/internalshared/configutil" +) + +// TestLoadConfigFile_ProxyCache tests loading a config file containing a cache +// as well as a valid proxy config. +func TestLoadConfigFile_ProxyCache(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config-cache.hcl") + if err != nil { + t.Fatal(err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + PidFile: "./pidfile", + Listeners: []*configutil.Listener{ + { + Type: "unix", + Address: "/path/to/socket", + TLSDisable: true, + SocketMode: "configmode", + SocketUser: "configuser", + SocketGroup: "configgroup", + }, + { + Type: "tcp", + Address: "127.0.0.1:8300", + TLSDisable: true, + }, + { + Type: "tcp", + Address: "127.0.0.1:3000", + Role: "metrics_only", + TLSDisable: true, + }, + { + Type: "tcp", + Role: "default", + Address: "127.0.0.1:8400", + TLSKeyFile: "/path/to/cakey.pem", + TLSCertFile: "/path/to/cacert.pem", + }, + }, + }, + AutoAuth: &AutoAuth{ + Method: &Method{ + Type: "aws", + MountPath: "auth/aws", + Config: map[string]interface{}{ + "role": "foobar", + }, + }, + Sinks: []*Sink{ + { + Type: "file", + DHType: "curve25519", + DHPath: "/tmp/file-foo-dhpath", + AAD: "foobar", + Config: map[string]interface{}{ + "path": "/tmp/file-foo", + }, + }, + }, + }, + APIProxy: &APIProxy{ + EnforceConsistency: "always", + WhenInconsistent: "retry", + UseAutoAuthTokenRaw: true, + UseAutoAuthToken: true, + ForceAutoAuthToken: false, + }, + Cache: &Cache{ + Persist: &agentproxyshared.PersistConfig{ + Type: "kubernetes", + Path: "/vault/agent-cache/", + KeepAfterImport: true, + ExitOnErr: true, + ServiceAccountTokenFile: "/tmp/serviceaccount/token", + }, + }, + Vault: &Vault{ + Address: "http://127.0.0.1:1111", + CACert: "config_ca_cert", + CAPath: "config_ca_path", + TLSSkipVerifyRaw: interface{}("true"), + TLSSkipVerify: true, + ClientCert: "config_client_cert", + ClientKey: "config_client_key", + Retry: &Retry{ + NumRetries: 12, + }, + }, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } + + config, err = LoadConfigFile("./test-fixtures/config-cache-embedded-type.hcl") + if err != nil { + t.Fatal(err) + } + expected.Vault.TLSSkipVerifyRaw = interface{}(true) + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} diff --git a/command/proxy/config/test-fixtures/config-cache-embedded-type.hcl b/command/proxy/config/test-fixtures/config-cache-embedded-type.hcl new file mode 100644 index 0000000..a7d8ef4 --- /dev/null +++ b/command/proxy/config/test-fixtures/config-cache-embedded-type.hcl @@ -0,0 +1,77 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" + +auto_auth { + method { + type = "aws" + config = { + role = "foobar" + } + } + + sink { + type = "file" + config = { + path = "/tmp/file-foo" + } + aad = "foobar" + dh_type = "curve25519" + dh_path = "/tmp/file-foo-dhpath" + } +} + +api_proxy { + use_auto_auth_token = true + enforce_consistency = "always" + when_inconsistent = "retry" +} + +cache { + persist "kubernetes" { + path = "/vault/agent-cache/" + keep_after_import = true + exit_on_err = true + service_account_token_file = "/tmp/serviceaccount/token" + } +} + +listener { + type = "unix" + address = "/path/to/socket" + tls_disable = true + socket_mode = "configmode" + socket_user = "configuser" + socket_group = "configgroup" +} + +listener { + type = "tcp" + address = "127.0.0.1:8300" + tls_disable = true +} + +listener { + type = "tcp" + address = "127.0.0.1:3000" + tls_disable = true + role = "metrics_only" +} + +listener { + type = "tcp" + role = "default" + address = "127.0.0.1:8400" + tls_key_file = "/path/to/cakey.pem" + tls_cert_file = "/path/to/cacert.pem" +} + +vault { + address = "http://127.0.0.1:1111" + ca_cert = "config_ca_cert" + ca_path = "config_ca_path" + tls_skip_verify = true + client_cert = "config_client_cert" + client_key = "config_client_key" +} diff --git a/command/proxy/config/test-fixtures/config-cache.hcl b/command/proxy/config/test-fixtures/config-cache.hcl new file mode 100644 index 0000000..d770391 --- /dev/null +++ b/command/proxy/config/test-fixtures/config-cache.hcl @@ -0,0 +1,75 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +pid_file = "./pidfile" + +auto_auth { + method { + type = "aws" + config = { + role = "foobar" + } + } + + sink { + type = "file" + config = { + path = "/tmp/file-foo" + } + aad = "foobar" + dh_type = "curve25519" + dh_path = "/tmp/file-foo-dhpath" + } +} + +api_proxy { + use_auto_auth_token = true + enforce_consistency = "always" + when_inconsistent = "retry" +} + +cache { + persist = { + type = "kubernetes" + path = "/vault/agent-cache/" + keep_after_import = true + exit_on_err = true + service_account_token_file = "/tmp/serviceaccount/token" + } +} + +listener "unix" { + address = "/path/to/socket" + tls_disable = true + socket_mode = "configmode" + socket_user = "configuser" + socket_group = "configgroup" +} + +listener "tcp" { + address = "127.0.0.1:8300" + tls_disable = true +} + +listener { + type = "tcp" + address = "127.0.0.1:3000" + tls_disable = true + role = "metrics_only" +} + +listener "tcp" { + role = "default" + address = "127.0.0.1:8400" + tls_key_file = "/path/to/cakey.pem" + tls_cert_file = "/path/to/cacert.pem" +} + +vault { + address = "http://127.0.0.1:1111" + ca_cert = "config_ca_cert" + ca_path = "config_ca_path" + tls_skip_verify = "true" + client_cert = "config_client_cert" + client_key = "config_client_key" +} diff --git a/command/proxy/test-fixtures/reload/reload_bar.key b/command/proxy/test-fixtures/reload/reload_bar.key new file mode 100644 index 0000000..10849fb --- /dev/null +++ b/command/proxy/test-fixtures/reload/reload_bar.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAwF7sRAyUiLcd6es6VeaTRUBOusFFGkmKJ5lU351waCJqXFju +Z6i/SQYNAAnnRgotXSTE1fIPjE2kZNH1hvqE5IpTGgAwy50xpjJrrBBI6e9lyKqj +7T8gLVNBvtC0cpQi+pGrszEI0ckDQCSZHqi/PAzcpmLUgh2KMrgagT+YlN35KHtl +/bQ/Fsn+kqykVqNw69n/CDKNKdDHn1qPwiX9q/fTMj3EG6g+3ntKrUOh8V/gHKPz +q8QGP/wIud2K+tTSorVXr/4zx7xgzlbJkCakzcQQiP6K+paPnDRlE8fK+1gRRyR7 +XCzyp0irUl8G1NjYAR/tVWxiUhlk/jZutb8PpwIDAQABAoIBAEOzJELuindyujxQ +ZD9G3h1I/GwNCFyv9Mbq10u7BIwhUH0fbwdcA7WXQ4v38ERd4IkfH4aLoZ0m1ewF +V/sgvxQO+h/0YTfHImny5KGxOXfaoF92bipYROKuojydBmQsbgLwsRRm9UufCl3Q +g3KewG5JuH112oPQEYq379v8nZ4FxC3Ano1OFBTm9UhHIAX1Dn22kcHOIIw8jCsQ +zp7TZOW+nwtkS41cBwhvV4VIeL6yse2UgbOfRVRwI7B0OtswS5VgW3wysO2mTDKt +V/WCmeht1il/6ZogEHgi/mvDCKpj20wQ1EzGnPdFLdiFJFylf0oufQD/7N/uezbC +is0qJEECgYEA3AE7SeLpe3SZApj2RmE2lcD9/Saj1Y30PznxB7M7hK0sZ1yXEbtS +Qf894iDDD/Cn3ufA4xk/K52CXgAcqvH/h2geG4pWLYsT1mdWhGftprtOMCIvJvzU +8uWJzKdOGVMG7R59wNgEpPDZDpBISjexwQsFo3aw1L/H1/Sa8cdY3a0CgYEA39hB +1oLmGRyE32Q4GF/srG4FqKL1EsbISGDUEYTnaYg2XiM43gu3tC/ikfclk27Jwc2L +m7cA5FxxaEyfoOgfAizfU/uWTAbx9GoXgWsO0hWSN9+YNq61gc5WKoHyrJ/rfrti +y5d7k0OCeBxckLqGDuJqICQ0myiz0El6FU8h5SMCgYEAuhigmiNC9JbwRu40g9v/ +XDVfox9oPmBRVpogdC78DYKeqN/9OZaGQiUxp3GnDni2xyqqUm8srCwT9oeJuF/z +kgpUTV96/hNCuH25BU8UC5Es1jJUSFpdlwjqwx5SRcGhfjnojZMseojwUg1h2MW7 +qls0bc0cTxnaZaYW2qWRWhECgYBrT0cwyQv6GdvxJCBoPwQ9HXmFAKowWC+H0zOX +Onmd8/jsZEJM4J0uuo4Jn8vZxBDg4eL9wVuiHlcXwzP7dYv4BP8DSechh2rS21Ft +b59pQ4IXWw+jl1nYYsyYEDgAXaIN3VNder95N7ICVsZhc6n01MI/qlu1zmt1fOQT +9x2utQKBgHI9SbsfWfbGiu6oLS3+9V1t4dORhj8D8b7z3trvECrD6tPhxoZqtfrH +4apKr3OKRSXk3K+1K6pkMHJHunspucnA1ChXLhzfNF08BSRJkQDGYuaRLS6VGgab +JZTl54bGvO1GkszEBE/9QFcqNVtWGMWXnUPwNNv8t//yJT5rvQil +-----END RSA PRIVATE KEY----- diff --git a/command/proxy/test-fixtures/reload/reload_bar.pem b/command/proxy/test-fixtures/reload/reload_bar.pem new file mode 100644 index 0000000..a8217be --- /dev/null +++ b/command/proxy/test-fixtures/reload/reload_bar.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDQzCCAiugAwIBAgIULLCz3mZKmg2xy3rWCud0f1zcmBwwDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMzEwMDIzNjQ0WhcNMzYw +MzA1MDEzNzE0WjAaMRgwFgYDVQQDEw9iYXIuZXhhbXBsZS5jb20wggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAXuxEDJSItx3p6zpV5pNFQE66wUUaSYon +mVTfnXBoImpcWO5nqL9JBg0ACedGCi1dJMTV8g+MTaRk0fWG+oTkilMaADDLnTGm +MmusEEjp72XIqqPtPyAtU0G+0LRylCL6kauzMQjRyQNAJJkeqL88DNymYtSCHYoy +uBqBP5iU3fkoe2X9tD8Wyf6SrKRWo3Dr2f8IMo0p0MefWo/CJf2r99MyPcQbqD7e +e0qtQ6HxX+Aco/OrxAY//Ai53Yr61NKitVev/jPHvGDOVsmQJqTNxBCI/or6lo+c +NGUTx8r7WBFHJHtcLPKnSKtSXwbU2NgBH+1VbGJSGWT+Nm61vw+nAgMBAAGjgYQw +gYEwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBSVoF8F +7qbzSryIFrldurAG78LvSjAfBgNVHSMEGDAWgBRzDNvqF/Tq21OgWs13B5YydZjl +vzAgBgNVHREEGTAXgg9iYXIuZXhhbXBsZS5jb22HBH8AAAEwDQYJKoZIhvcNAQEL +BQADggEBAGmz2N282iT2IaEZvOmzIE4znHGkvoxZmrr/2byq5PskBg9ysyCHfUvw +SFA8U7jWjezKTnGRUu5blB+yZdjrMtB4AePWyEqtkJwVsZ2SPeP+9V2gNYK4iktP +UF3aIgBbAbw8rNuGIIB0T4D+6Zyo9Y3MCygs6/N4bRPZgLhewWn1ilklfnl3eqaC +a+JY1NBuTgCMa28NuC+Hy3mCveqhI8tFNiOthlLdgAEbuQaOuNutAG73utZ2aq6Q +W4pajFm3lEf5zt7Lo6ZCFtY/Q8jjURJ9e4O7VjXcqIhBM5bSMI6+fgQyOH0SLboj +RNanJ2bcyF1iPVyPBGzV3dF0ngYzxEY= +-----END CERTIFICATE----- diff --git a/command/proxy/test-fixtures/reload/reload_ca.pem b/command/proxy/test-fixtures/reload/reload_ca.pem new file mode 100644 index 0000000..72a7444 --- /dev/null +++ b/command/proxy/test-fixtures/reload/reload_ca.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDNTCCAh2gAwIBAgIUBeVo+Ce2BrdRT1cogKvJLtdOky8wDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMzEwMDIzNTM4WhcNMzYw +MzA1MDIzNjA4WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAPTQGWPRIOECGeJB6tR/ftvvtioC9f84fY2QdJ5k +JBupXjPAGYKgS4MGzyT5bz9yY400tCtmh6h7p9tZwHl/TElTugtLQ/8ilMbJTiOM +SiyaMDPHiMJJYKTjm9bu6bKeU1qPZ0Cryes4rygbqs7w2XPgA2RxNmDh7JdX7/h+ +VB5onBmv8g4WFSayowGyDcJWWCbu5yv6ZdH1bqQjgRzQ5xp17WXNmvlzdp2vate/ +9UqPdA8sdJzW/91Gvmros0o/FnG7c2pULhk22wFqO8t2HRjKb3nuxALEJvqoPvad +KjpDTaq1L1ZzxcB7wvWyhy/lNLZL7jiNWy0mN1YB0UpSWdECAwEAAaN7MHkwDgYD +VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFHMM2+oX9Orb +U6BazXcHljJ1mOW/MB8GA1UdIwQYMBaAFHMM2+oX9OrbU6BazXcHljJ1mOW/MBYG +A1UdEQQPMA2CC2V4YW1wbGUuY29tMA0GCSqGSIb3DQEBCwUAA4IBAQAp17XsOaT9 +hculRqrFptn3+zkH3HrIckHm+28R5xYT8ASFXFcLFugGizJAXVL5lvsRVRIwCoOX +Nhi8XSNEFP640VbHcEl81I84bbRIIDS+Yheu6JDZGemTaDYLv1J3D5SHwgoM+nyf +oTRgotUCIXcwJHmTpWEUkZFKuqBxsoTGzk0jO8wOP6xoJkzxVVG5PvNxs924rxY8 +Y8iaLdDfMeT7Pi0XIliBa/aSp/iqSW8XKyJl5R5vXg9+DOgZUrVzIxObaF5RBl/a +mJOeklJBdNVzQm5+iMpO42lu0TA9eWtpP+YiUEXU17XDvFeQWOocFbQ1Peo0W895 +XRz2GCwCNyvW +-----END CERTIFICATE----- diff --git a/command/proxy/test-fixtures/reload/reload_foo.key b/command/proxy/test-fixtures/reload/reload_foo.key new file mode 100644 index 0000000..86e6cce --- /dev/null +++ b/command/proxy/test-fixtures/reload/reload_foo.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpgIBAAKCAQEAzNyVieSti9XBb5/celB5u8YKRJv3mQS9A4/X0mqY1ePznt1i +ilG7OmG0yM2VAk0ceIAQac3Bsn74jxn2cDlrrVniPXcNgYtMtW0kRqNEo4doo4EX +xZguS9vNBu29useHhif1TGX/pA3dgvaVycUCjzTEVk6qI8UEehMK6gEGZb7nOr0A +A9nipSqoeHpDLe3a4KVqj1vtlJKUvD2i1MuBuQ130cB1K9rufLCShGu7mEgzEosc +gr+K3Bf03IejbeVRyIfLtgj1zuvV1katec75UqRA/bsvt5G9JfJqiZ9mwFN0vp3g +Cr7pdQBSBQ2q4yf9s8CuY5c5w9fl3F8f5QFQoQIDAQABAoIBAQCbCb1qNFRa5ZSV +I8i6ELlwMDqJHfhOJ9XcIjpVljLAfNlcu3Ld92jYkCU/asaAjVckotbJG9yhd5Io +yp9E40/oS4P6vGTOS1vsWgMAKoPBtrKsOwCAm+E9q8UIn1fdSS/5ibgM74x+3bds +a62Em8KKGocUQkhk9a+jq1GxMsFisbHRxEHvClLmDMgGnW3FyGmWwT6yZLPSC0ey +szmmjt3ouP8cLAOmSjzcQBMmEZpQMCgR6Qckg6nrLQAGzZyTdCd875wbGA57DpWX +Lssn95+A5EFvr/6b7DkXeIFCrYBFFa+UQN3PWGEQ6Zjmiw4VgV2vO8yX2kCLlUhU +02bL393ZAoGBAPXPD/0yWINbKUPcRlx/WfWQxfz0bu50ytwIXzVK+pRoAMuNqehK +BJ6kNzTTBq40u+IZ4f5jbLDulymR+4zSkirLE7CyWFJOLNI/8K4Pf5DJUgNdrZjJ +LCtP9XRdxiPatQF0NGfdgHlSJh+/CiRJP4AgB17AnB/4z9/M0ZlJGVrzAoGBANVa +69P3Rp/WPBQv0wx6f0tWppJolWekAHKcDIdQ5HdOZE5CPAYSlTrTUW3uJuqMwU2L +M0Er2gIPKWIR5X+9r7Fvu9hQW6l2v3xLlcrGPiapp3STJvuMxzhRAmXmu3bZfVn1 +Vn7Vf1jPULHtTFSlNFEvYG5UJmygK9BeyyVO5KMbAoGBAMCyAibLQPg4jrDUDZSV +gUAwrgUO2ae1hxHWvkxY6vdMUNNByuB+pgB3W4/dnm8Sh/dHsxJpftt1Lqs39ar/ +p/ZEHLt4FCTxg9GOrm7FV4t5RwG8fko36phJpnIC0UFqQltRbYO+8OgqrhhU+u5X +PaCDe0OcWsf1lYAsYGN6GpZhAoGBAMJ5Ksa9+YEODRs1cIFKUyd/5ztC2xRqOAI/ +3WemQ2nAacuvsfizDZVeMzYpww0+maAuBt0btI719PmwaGmkpDXvK+EDdlmkpOwO +FY6MXvBs6fdnfjwCWUErDi2GQFAX9Jt/9oSL5JU1+08DhvUM1QA/V/2Y9KFE6kr3 +bOIn5F4LAoGBAKQzH/AThDGhT3hwr4ktmReF3qKxBgxzjVa8veXtkY5VWwyN09iT +jnTTt6N1CchZoK5WCETjdzNYP7cuBTcV4d3bPNRiJmxXaNVvx3Tlrk98OiffT8Qa +5DO/Wfb43rNHYXBjU6l0n2zWcQ4PUSSbu0P0bM2JTQPRCqSthXvSHw2P +-----END RSA PRIVATE KEY----- diff --git a/command/proxy/test-fixtures/reload/reload_foo.pem b/command/proxy/test-fixtures/reload/reload_foo.pem new file mode 100644 index 0000000..c8b868b --- /dev/null +++ b/command/proxy/test-fixtures/reload/reload_foo.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDQzCCAiugAwIBAgIUFVW6i/M+yJUsDrXWgRKO/Dnb+L4wDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMzEwMDIzNjA1WhcNMzYw +MzA1MDEzNjM1WjAaMRgwFgYDVQQDEw9mb28uZXhhbXBsZS5jb20wggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDM3JWJ5K2L1cFvn9x6UHm7xgpEm/eZBL0D +j9fSapjV4/Oe3WKKUbs6YbTIzZUCTRx4gBBpzcGyfviPGfZwOWutWeI9dw2Bi0y1 +bSRGo0Sjh2ijgRfFmC5L280G7b26x4eGJ/VMZf+kDd2C9pXJxQKPNMRWTqojxQR6 +EwrqAQZlvuc6vQAD2eKlKqh4ekMt7drgpWqPW+2UkpS8PaLUy4G5DXfRwHUr2u58 +sJKEa7uYSDMSixyCv4rcF/Tch6Nt5VHIh8u2CPXO69XWRq15zvlSpED9uy+3kb0l +8mqJn2bAU3S+neAKvul1AFIFDarjJ/2zwK5jlznD1+XcXx/lAVChAgMBAAGjgYQw +gYEwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBRNJoOJ +dnazDiuqLhV6truQ4cRe9jAfBgNVHSMEGDAWgBRzDNvqF/Tq21OgWs13B5YydZjl +vzAgBgNVHREEGTAXgg9mb28uZXhhbXBsZS5jb22HBH8AAAEwDQYJKoZIhvcNAQEL +BQADggEBAHzv67mtbxMWcuMsxCFBN1PJNAyUDZVCB+1gWhk59EySbVg81hWJDCBy +fl3TKjz3i7wBGAv+C2iTxmwsSJbda22v8JQbuscXIfLFbNALsPzF+J0vxAgJs5Gc +sDbfJ7EQOIIOVKQhHLYnQoLnigSSPc1kd0JjYyHEBjgIaSuXgRRTBAeqLiBMx0yh +RKL1lQ+WoBU/9SXUZZkwokqWt5G7khi5qZkNxVXZCm8VGPg0iywf6gGyhI1SU5S2 +oR219S6kA4JY/stw1qne85/EmHmoImHGt08xex3GoU72jKAjsIpqRWopcD/+uene +Tc9nn3fTQW/Z9fsoJ5iF5OdJnDEswqE= +-----END CERTIFICATE----- diff --git a/command/proxy_test.go b/command/proxy_test.go new file mode 100644 index 0000000..9b30af9 --- /dev/null +++ b/command/proxy_test.go @@ -0,0 +1,1274 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "net/http" + "os" + "path/filepath" + "reflect" + "strings" + "sync" + "testing" + "time" + + "github.com/hashicorp/go-hclog" + vaultjwt "github.com/hashicorp/vault-plugin-auth-jwt" + logicalKv "github.com/hashicorp/vault-plugin-secrets-kv" + "github.com/hashicorp/vault/api" + credAppRole "github.com/hashicorp/vault/builtin/credential/approle" + "github.com/hashicorp/vault/command/agent" + proxyConfig "github.com/hashicorp/vault/command/proxy/config" + "github.com/hashicorp/vault/helper/useragent" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" + "github.com/mitchellh/cli" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func testProxyCommand(tb testing.TB, logger hclog.Logger) (*cli.MockUi, *ProxyCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &ProxyCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + ShutdownCh: MakeShutdownCh(), + SighupCh: MakeSighupCh(), + logger: logger, + startedCh: make(chan struct{}, 5), + reloadedCh: make(chan struct{}, 5), + } +} + +// TestProxy_ExitAfterAuth tests the exit_after_auth flag, provided both +// as config and via -exit-after-auth. +func TestProxy_ExitAfterAuth(t *testing.T) { + t.Run("via_config", func(t *testing.T) { + testProxyExitAfterAuth(t, false) + }) + + t.Run("via_flag", func(t *testing.T) { + testProxyExitAfterAuth(t, true) + }) +} + +func testProxyExitAfterAuth(t *testing.T, viaFlag bool) { + logger := logging.NewVaultLogger(hclog.Trace) + coreConfig := &vault.CoreConfig{ + Logger: logger, + CredentialBackends: map[string]logical.Factory{ + "jwt": vaultjwt.Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) + client := cluster.Cores[0].Client + + // Setup Vault + err := client.Sys().EnableAuthWithOptions("jwt", &api.EnableAuthOptions{ + Type: "jwt", + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().Write("auth/jwt/config", map[string]interface{}{ + "bound_issuer": "https://team-vault.auth0.com/", + "jwt_validation_pubkeys": agent.TestECDSAPubKey, + "jwt_supported_algs": "ES256", + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().Write("auth/jwt/role/test", map[string]interface{}{ + "role_type": "jwt", + "bound_subject": "r3qXcK2bix9eFECzsU3Sbmh0K16fatW6@clients", + "bound_audiences": "https://vault.plugin.auth.jwt.test", + "user_claim": "https://vault/user", + "groups_claim": "https://vault/groups", + "policies": "test", + "period": "3s", + }) + if err != nil { + t.Fatal(err) + } + + dir := t.TempDir() + inf, err := os.CreateTemp(dir, "auth.jwt.test.") + if err != nil { + t.Fatal(err) + } + in := inf.Name() + inf.Close() + // We remove these files in this test since we don't need the files, we just need + // a non-conflicting file name for the config. + os.Remove(in) + t.Logf("input: %s", in) + + sink1f, err := os.CreateTemp(dir, "sink1.jwt.test.") + if err != nil { + t.Fatal(err) + } + sink1 := sink1f.Name() + sink1f.Close() + os.Remove(sink1) + t.Logf("sink1: %s", sink1) + + sink2f, err := os.CreateTemp(dir, "sink2.jwt.test.") + if err != nil { + t.Fatal(err) + } + sink2 := sink2f.Name() + sink2f.Close() + os.Remove(sink2) + t.Logf("sink2: %s", sink2) + + conff, err := os.CreateTemp(dir, "conf.jwt.test.") + if err != nil { + t.Fatal(err) + } + conf := conff.Name() + conff.Close() + os.Remove(conf) + t.Logf("config: %s", conf) + + jwtToken, _ := agent.GetTestJWT(t) + if err := os.WriteFile(in, []byte(jwtToken), 0o600); err != nil { + t.Fatal(err) + } else { + logger.Trace("wrote test jwt", "path", in) + } + + exitAfterAuthTemplText := "exit_after_auth = true" + if viaFlag { + exitAfterAuthTemplText = "" + } + + config := ` +%s + +auto_auth { + method { + type = "jwt" + config = { + role = "test" + path = "%s" + } + } + + sink { + type = "file" + config = { + path = "%s" + } + } + + sink "file" { + config = { + path = "%s" + } + } +} +` + + config = fmt.Sprintf(config, exitAfterAuthTemplText, in, sink1, sink2) + if err := os.WriteFile(conf, []byte(config), 0o600); err != nil { + t.Fatal(err) + } else { + logger.Trace("wrote test config", "path", conf) + } + + doneCh := make(chan struct{}) + go func() { + ui, cmd := testProxyCommand(t, logger) + cmd.client = client + + args := []string{"-config", conf} + if viaFlag { + args = append(args, "-exit-after-auth") + } + + code := cmd.Run(args) + if code != 0 { + t.Errorf("expected %d to be %d", code, 0) + t.Logf("output from proxy:\n%s", ui.OutputWriter.String()) + t.Logf("error from proxy:\n%s", ui.ErrorWriter.String()) + } + close(doneCh) + }() + + select { + case <-doneCh: + break + case <-time.After(1 * time.Minute): + t.Fatal("timeout reached while waiting for proxy to exit") + } + + sink1Bytes, err := os.ReadFile(sink1) + if err != nil { + t.Fatal(err) + } + if len(sink1Bytes) == 0 { + t.Fatal("got no output from sink 1") + } + + sink2Bytes, err := os.ReadFile(sink2) + if err != nil { + t.Fatal(err) + } + if len(sink2Bytes) == 0 { + t.Fatal("got no output from sink 2") + } + + if string(sink1Bytes) != string(sink2Bytes) { + t.Fatal("sink 1/2 values don't match") + } +} + +// TestProxy_AutoAuth_UserAgent tests that the User-Agent sent +// to Vault by Vault Proxy is correct when performing Auto-Auth. +// Uses the custom handler userAgentHandler (defined above) so +// that Vault validates the User-Agent on requests sent by Proxy. +func TestProxy_AutoAuth_UserAgent(t *testing.T) { + logger := logging.NewVaultLogger(hclog.Trace) + var h userAgentHandler + cluster := vault.NewTestCluster(t, &vault.CoreConfig{ + Logger: logger, + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + }, &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.HandlerFunc( + func(properties *vault.HandlerProperties) http.Handler { + h.props = properties + h.userAgentToCheckFor = useragent.ProxyAutoAuthString() + h.requestMethodToCheck = "PUT" + h.pathToCheck = "auth/approle/login" + h.t = t + return &h + }), + }) + cluster.Start() + defer cluster.Cleanup() + + serverClient := cluster.Cores[0].Client + + // Enable the approle auth method + req := serverClient.NewRequest("POST", "/v1/sys/auth/approle") + req.BodyBytes = []byte(`{ + "type": "approle" + }`) + request(t, serverClient, req, 204) + + // Create a named role + req = serverClient.NewRequest("PUT", "/v1/auth/approle/role/test-role") + req.BodyBytes = []byte(`{ + "secret_id_num_uses": "10", + "secret_id_ttl": "1m", + "token_max_ttl": "1m", + "token_num_uses": "10", + "token_ttl": "1m", + "policies": "default" + }`) + request(t, serverClient, req, 204) + + // Fetch the RoleID of the named role + req = serverClient.NewRequest("GET", "/v1/auth/approle/role/test-role/role-id") + body := request(t, serverClient, req, 200) + data := body["data"].(map[string]interface{}) + roleID := data["role_id"].(string) + + // Get a SecretID issued against the named role + req = serverClient.NewRequest("PUT", "/v1/auth/approle/role/test-role/secret-id") + body = request(t, serverClient, req, 200) + data = body["data"].(map[string]interface{}) + secretID := data["secret_id"].(string) + + // Write the RoleID and SecretID to temp files + roleIDPath := makeTempFile(t, "role_id.txt", roleID+"\n") + secretIDPath := makeTempFile(t, "secret_id.txt", secretID+"\n") + defer os.Remove(roleIDPath) + defer os.Remove(secretIDPath) + + sinkf, err := os.CreateTemp("", "sink.test.") + if err != nil { + t.Fatal(err) + } + sink := sinkf.Name() + sinkf.Close() + os.Remove(sink) + + autoAuthConfig := fmt.Sprintf(` +auto_auth { + method "approle" { + mount_path = "auth/approle" + config = { + role_id_file_path = "%s" + secret_id_file_path = "%s" + } + } + + sink "file" { + config = { + path = "%s" + } + } +}`, roleIDPath, secretIDPath, sink) + + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` +listener "tcp" { + address = "%s" + tls_disable = true +} +`, listenAddr) + + config := fmt.Sprintf(` +vault { + address = "%s" + tls_skip_verify = true +} +api_proxy { + use_auto_auth_token = true +} +%s +%s +`, serverClient.Address(), listenConfig, autoAuthConfig) + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + // Unset the environment variable so that proxy picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + // Start proxy + _, cmd := testProxyCommand(t, logger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + // Validate that the auto-auth token has been correctly attained + // and works for LookupSelf + conf := api.DefaultConfig() + conf.Address = "http://" + listenAddr + proxyClient, err := api.NewClient(conf) + if err != nil { + t.Fatalf("err: %s", err) + } + + proxyClient.SetToken("") + err = proxyClient.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + + // Wait for the token to be sent to syncs and be available to be used + time.Sleep(5 * time.Second) + + req = proxyClient.NewRequest("GET", "/v1/auth/token/lookup-self") + body = request(t, proxyClient, req, 200) + + close(cmd.ShutdownCh) + wg.Wait() +} + +// TestProxy_APIProxyWithoutCache_UserAgent tests that the User-Agent sent +// to Vault by Vault Proxy is correct using the API proxy without +// the cache configured. Uses the custom handler +// userAgentHandler struct defined in this test package, so that Vault validates the +// User-Agent on requests sent by Proxy. +func TestProxy_APIProxyWithoutCache_UserAgent(t *testing.T) { + logger := logging.NewVaultLogger(hclog.Trace) + userAgentForProxiedClient := "proxied-client" + var h userAgentHandler + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.HandlerFunc( + func(properties *vault.HandlerProperties) http.Handler { + h.props = properties + h.userAgentToCheckFor = useragent.ProxyStringWithProxiedUserAgent(userAgentForProxiedClient) + h.pathToCheck = "/v1/auth/token/lookup-self" + h.requestMethodToCheck = "GET" + h.t = t + return &h + }), + }) + cluster.Start() + defer cluster.Cleanup() + + serverClient := cluster.Cores[0].Client + + // Unset the environment variable so that proxy picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` +listener "tcp" { + address = "%s" + tls_disable = true +} +`, listenAddr) + + config := fmt.Sprintf(` +vault { + address = "%s" + tls_skip_verify = true +} +%s +`, serverClient.Address(), listenConfig) + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + // Start the proxy + _, cmd := testProxyCommand(t, logger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + proxyClient, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + proxyClient.AddHeader("User-Agent", userAgentForProxiedClient) + proxyClient.SetToken(serverClient.Token()) + proxyClient.SetMaxRetries(0) + err = proxyClient.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + + _, err = proxyClient.Auth().Token().LookupSelf() + if err != nil { + t.Fatal(err) + } + + close(cmd.ShutdownCh) + wg.Wait() +} + +// TestProxy_APIProxyWithCache_UserAgent tests that the User-Agent sent +// to Vault by Vault Proxy is correct using the API proxy with +// the cache configured. Uses the custom handler +// userAgentHandler struct defined in this test package, so that Vault validates the +// User-Agent on requests sent by Proxy. +func TestProxy_APIProxyWithCache_UserAgent(t *testing.T) { + logger := logging.NewVaultLogger(hclog.Trace) + userAgentForProxiedClient := "proxied-client" + var h userAgentHandler + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.HandlerFunc( + func(properties *vault.HandlerProperties) http.Handler { + h.props = properties + h.userAgentToCheckFor = useragent.ProxyStringWithProxiedUserAgent(userAgentForProxiedClient) + h.pathToCheck = "/v1/auth/token/lookup-self" + h.requestMethodToCheck = "GET" + h.t = t + return &h + }), + }) + cluster.Start() + defer cluster.Cleanup() + + serverClient := cluster.Cores[0].Client + + // Unset the environment variable so that proxy picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` +listener "tcp" { + address = "%s" + tls_disable = true +} +`, listenAddr) + + cacheConfig := ` +cache { +}` + + config := fmt.Sprintf(` +vault { + address = "%s" + tls_skip_verify = true +} +%s +%s +`, serverClient.Address(), listenConfig, cacheConfig) + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + // Start the proxy + _, cmd := testProxyCommand(t, logger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + proxyClient, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + proxyClient.AddHeader("User-Agent", userAgentForProxiedClient) + proxyClient.SetToken(serverClient.Token()) + proxyClient.SetMaxRetries(0) + err = proxyClient.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + + _, err = proxyClient.Auth().Token().LookupSelf() + if err != nil { + t.Fatal(err) + } + + close(cmd.ShutdownCh) + wg.Wait() +} + +// TestProxy_Cache_DynamicSecret Tests that the cache successfully caches a dynamic secret +// going through the Proxy, +func TestProxy_Cache_DynamicSecret(t *testing.T) { + logger := logging.NewVaultLogger(hclog.Trace) + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + serverClient := cluster.Cores[0].Client + + // Unset the environment variable so that proxy picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + cacheConfig := ` +cache { +} +` + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` +listener "tcp" { + address = "%s" + tls_disable = true +} +`, listenAddr) + + config := fmt.Sprintf(` +vault { + address = "%s" + tls_skip_verify = true +} +%s +%s +`, serverClient.Address(), cacheConfig, listenConfig) + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + // Start proxy + _, cmd := testProxyCommand(t, logger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + proxyClient, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + proxyClient.SetToken(serverClient.Token()) + proxyClient.SetMaxRetries(0) + err = proxyClient.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + + renewable := true + tokenCreateRequest := &api.TokenCreateRequest{ + Policies: []string{"default"}, + TTL: "30m", + Renewable: &renewable, + } + + // This was the simplest test I could find to trigger the caching behaviour, + // i.e. the most concise I could make the test that I can tell + // creating an orphan token returns Auth, is renewable, and isn't a token + // that's managed elsewhere (since it's an orphan) + secret, err := proxyClient.Auth().Token().CreateOrphan(tokenCreateRequest) + if err != nil { + t.Fatal(err) + } + if secret == nil || secret.Auth == nil { + t.Fatalf("secret not as expected: %v", secret) + } + + token := secret.Auth.ClientToken + + secret, err = proxyClient.Auth().Token().CreateOrphan(tokenCreateRequest) + if err != nil { + t.Fatal(err) + } + if secret == nil || secret.Auth == nil { + t.Fatalf("secret not as expected: %v", secret) + } + + token2 := secret.Auth.ClientToken + + if token != token2 { + t.Fatalf("token create response not cached when it should have been, as tokens differ") + } + + close(cmd.ShutdownCh) + wg.Wait() +} + +// TestProxy_ApiProxy_Retry Tests the retry functionalities of Vault Proxy's API Proxy +func TestProxy_ApiProxy_Retry(t *testing.T) { + //---------------------------------------------------- + // Start the server and proxy + //---------------------------------------------------- + logger := logging.NewVaultLogger(hclog.Trace) + var h handler + cluster := vault.NewTestCluster(t, + &vault.CoreConfig{ + Logger: logger, + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + LogicalBackends: map[string]logical.Factory{ + "kv": logicalKv.Factory, + }, + }, + &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.HandlerFunc(func(properties *vault.HandlerProperties) http.Handler { + h.props = properties + h.t = t + return &h + }), + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) + serverClient := cluster.Cores[0].Client + + // Unset the environment variable so that proxy picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + _, err := serverClient.Logical().Write("secret/foo", map[string]interface{}{ + "bar": "baz", + }) + if err != nil { + t.Fatal(err) + } + + intRef := func(i int) *int { + return &i + } + // start test cases here + testCases := map[string]struct { + retries *int + expectError bool + }{ + "none": { + retries: intRef(-1), + expectError: true, + }, + "one": { + retries: intRef(1), + expectError: true, + }, + "two": { + retries: intRef(2), + expectError: false, + }, + "missing": { + retries: nil, + expectError: false, + }, + "default": { + retries: intRef(0), + expectError: false, + }, + } + + for tcname, tc := range testCases { + t.Run(tcname, func(t *testing.T) { + h.failCount = 2 + + cacheConfig := ` +cache { +} +` + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` +listener "tcp" { + address = "%s" + tls_disable = true +} +`, listenAddr) + + var retryConf string + if tc.retries != nil { + retryConf = fmt.Sprintf("retry { num_retries = %d }", *tc.retries) + } + + config := fmt.Sprintf(` +vault { + address = "%s" + %s + tls_skip_verify = true +} +%s +%s +`, serverClient.Address(), retryConf, cacheConfig, listenConfig) + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + _, cmd := testProxyCommand(t, logger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + client, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + client.SetToken(serverClient.Token()) + client.SetMaxRetries(0) + err = client.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + secret, err := client.Logical().Read("secret/foo") + switch { + case (err != nil || secret == nil) && tc.expectError: + case (err == nil || secret != nil) && !tc.expectError: + default: + t.Fatalf("%s expectError=%v error=%v secret=%v", tcname, tc.expectError, err, secret) + } + if secret != nil && secret.Data["foo"] != nil { + val := secret.Data["foo"].(map[string]interface{}) + if !reflect.DeepEqual(val, map[string]interface{}{"bar": "baz"}) { + t.Fatalf("expected key 'foo' to yield bar=baz, got: %v", val) + } + } + time.Sleep(time.Second) + + close(cmd.ShutdownCh) + wg.Wait() + }) + } +} + +// TestProxy_Metrics tests that metrics are being properly reported. +func TestProxy_Metrics(t *testing.T) { + // Start a vault server + logger := logging.NewVaultLogger(hclog.Trace) + cluster := vault.NewTestCluster(t, + &vault.CoreConfig{ + Logger: logger, + }, + &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + vault.TestWaitActive(t, cluster.Cores[0].Core) + serverClient := cluster.Cores[0].Client + + // Create a config file + listenAddr := generateListenerAddress(t) + config := fmt.Sprintf(` +cache {} + +listener "tcp" { + address = "%s" + tls_disable = true +} +`, listenAddr) + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + ui, cmd := testProxyCommand(t, logger) + cmd.client = serverClient + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + code := cmd.Run([]string{"-config", configPath}) + if code != 0 { + t.Errorf("non-zero return code when running proxy: %d", code) + t.Logf("STDOUT from proxy:\n%s", ui.OutputWriter.String()) + t.Logf("STDERR from proxy:\n%s", ui.ErrorWriter.String()) + } + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + // defer proxy shutdown + defer func() { + cmd.ShutdownCh <- struct{}{} + wg.Wait() + }() + + conf := api.DefaultConfig() + conf.Address = "http://" + listenAddr + proxyClient, err := api.NewClient(conf) + if err != nil { + t.Fatalf("err: %s", err) + } + + req := proxyClient.NewRequest("GET", "/proxy/v1/metrics") + body := request(t, proxyClient, req, 200) + keys := []string{} + for k := range body { + keys = append(keys, k) + } + require.ElementsMatch(t, keys, []string{ + "Counters", + "Samples", + "Timestamp", + "Gauges", + "Points", + }) +} + +// TestProxy_QuitAPI Tests the /proxy/v1/quit API that can be enabled for the proxy. +func TestProxy_QuitAPI(t *testing.T) { + logger := logging.NewVaultLogger(hclog.Error) + cluster := vault.NewTestCluster(t, + &vault.CoreConfig{ + Logger: logger, + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + LogicalBackends: map[string]logical.Factory{ + "kv": logicalKv.Factory, + }, + }, + &vault.TestClusterOptions{ + NumCores: 1, + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) + serverClient := cluster.Cores[0].Client + + // Unset the environment variable so that proxy picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + err := os.Unsetenv(api.EnvVaultAddress) + if err != nil { + t.Fatal(err) + } + + listenAddr := generateListenerAddress(t) + listenAddr2 := generateListenerAddress(t) + config := fmt.Sprintf(` +vault { + address = "%s" + tls_skip_verify = true +} + +listener "tcp" { + address = "%s" + tls_disable = true +} + +listener "tcp" { + address = "%s" + tls_disable = true + proxy_api { + enable_quit = true + } +} + +cache {} +`, serverClient.Address(), listenAddr, listenAddr2) + + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + _, cmd := testProxyCommand(t, logger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + client, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + client.SetToken(serverClient.Token()) + client.SetMaxRetries(0) + err = client.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + + // First try on listener 1 where the API should be disabled. + resp, err := client.RawRequest(client.NewRequest(http.MethodPost, "/proxy/v1/quit")) + if err == nil { + t.Fatalf("expected error") + } + if resp != nil && resp.StatusCode != http.StatusNotFound { + t.Fatalf("expected %d but got: %d", http.StatusNotFound, resp.StatusCode) + } + + // Now try on listener 2 where the quit API should be enabled. + err = client.SetAddress("http://" + listenAddr2) + if err != nil { + t.Fatal(err) + } + + _, err = client.RawRequest(client.NewRequest(http.MethodPost, "/proxy/v1/quit")) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + select { + case <-cmd.ShutdownCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + wg.Wait() +} + +// TestProxy_LogFile_CliOverridesConfig tests that the CLI values +// override the config for log files +func TestProxy_LogFile_CliOverridesConfig(t *testing.T) { + // Create basic config + configFile := populateTempFile(t, "proxy-config.hcl", BasicHclConfig) + cfg, err := proxyConfig.LoadConfigFile(configFile.Name()) + if err != nil { + t.Fatal("Cannot load config to test update/merge", err) + } + + // Sanity check that the config value is the current value + assert.Equal(t, "TMPDIR/juan.log", cfg.LogFile) + + // Initialize the command and parse any flags + cmd := &ProxyCommand{BaseCommand: &BaseCommand{}} + f := cmd.Flags() + // Simulate the flag being specified + err = f.Parse([]string{"-log-file=/foo/bar/test.log"}) + if err != nil { + t.Fatal(err) + } + + // Update the config based on the inputs. + cmd.applyConfigOverrides(f, cfg) + + assert.NotEqual(t, "TMPDIR/juan.log", cfg.LogFile) + assert.NotEqual(t, "/squiggle/logs.txt", cfg.LogFile) + assert.Equal(t, "/foo/bar/test.log", cfg.LogFile) +} + +// TestProxy_LogFile_Config tests log file config when loaded from config +func TestProxy_LogFile_Config(t *testing.T) { + configFile := populateTempFile(t, "proxy-config.hcl", BasicHclConfig) + + cfg, err := proxyConfig.LoadConfigFile(configFile.Name()) + if err != nil { + t.Fatal("Cannot load config to test update/merge", err) + } + + // Sanity check that the config value is the current value + assert.Equal(t, "TMPDIR/juan.log", cfg.LogFile, "sanity check on log config failed") + assert.Equal(t, 2, cfg.LogRotateMaxFiles) + assert.Equal(t, 1048576, cfg.LogRotateBytes) + + // Parse the cli flags (but we pass in an empty slice) + cmd := &ProxyCommand{BaseCommand: &BaseCommand{}} + f := cmd.Flags() + err = f.Parse([]string{}) + if err != nil { + t.Fatal(err) + } + + // Should change nothing... + cmd.applyConfigOverrides(f, cfg) + + assert.Equal(t, "TMPDIR/juan.log", cfg.LogFile, "actual config check") + assert.Equal(t, 2, cfg.LogRotateMaxFiles) + assert.Equal(t, 1048576, cfg.LogRotateBytes) +} + +// TestProxy_Config_NewLogger_Default Tests defaults for log level and +// specifically cmd.newLogger() +func TestProxy_Config_NewLogger_Default(t *testing.T) { + cmd := &ProxyCommand{BaseCommand: &BaseCommand{}} + cmd.config = proxyConfig.NewConfig() + logger, err := cmd.newLogger() + + assert.NoError(t, err) + assert.NotNil(t, logger) + assert.Equal(t, hclog.Info.String(), logger.GetLevel().String()) +} + +// TestProxy_Config_ReloadLogLevel Tests reloading updates the log +// level as expected. +func TestProxy_Config_ReloadLogLevel(t *testing.T) { + cmd := &ProxyCommand{BaseCommand: &BaseCommand{}} + var err error + tempDir := t.TempDir() + + // Load an initial config + hcl := strings.ReplaceAll(BasicHclConfig, "TMPDIR", tempDir) + configFile := populateTempFile(t, "proxy-config.hcl", hcl) + cmd.config, err = proxyConfig.LoadConfigFile(configFile.Name()) + if err != nil { + t.Fatal("Cannot load config to test update/merge", err) + } + + // Tweak the loaded config to make sure we can put log files into a temp dir + // and systemd log attempts work fine, this would usually happen during Run. + cmd.logWriter = os.Stdout + cmd.logger, err = cmd.newLogger() + if err != nil { + t.Fatal("logger required for systemd log messages", err) + } + + // Sanity check + assert.Equal(t, "warn", cmd.config.LogLevel) + + // Load a new config + hcl = strings.ReplaceAll(BasicHclConfig2, "TMPDIR", tempDir) + configFile = populateTempFile(t, "proxy-config.hcl", hcl) + err = cmd.reloadConfig([]string{configFile.Name()}) + assert.NoError(t, err) + assert.Equal(t, "debug", cmd.config.LogLevel) +} + +// TestProxy_Config_ReloadTls Tests that the TLS certs for the listener are +// correctly reloaded. +func TestProxy_Config_ReloadTls(t *testing.T) { + var wg sync.WaitGroup + wd, err := os.Getwd() + if err != nil { + t.Fatal("unable to get current working directory") + } + workingDir := filepath.Join(wd, "/proxy/test-fixtures/reload") + fooCert := "reload_foo.pem" + fooKey := "reload_foo.key" + + barCert := "reload_bar.pem" + barKey := "reload_bar.key" + + reloadCert := "reload_cert.pem" + reloadKey := "reload_key.pem" + caPem := "reload_ca.pem" + + tempDir := t.TempDir() + + // Set up initial 'foo' certs + inBytes, err := os.ReadFile(filepath.Join(workingDir, fooCert)) + if err != nil { + t.Fatal("unable to read cert required for test", fooCert, err) + } + err = os.WriteFile(filepath.Join(tempDir, reloadCert), inBytes, 0o777) + if err != nil { + t.Fatal("unable to write temp cert required for test", reloadCert, err) + } + + inBytes, err = os.ReadFile(filepath.Join(workingDir, fooKey)) + if err != nil { + t.Fatal("unable to read cert key required for test", fooKey, err) + } + err = os.WriteFile(filepath.Join(tempDir, reloadKey), inBytes, 0o777) + if err != nil { + t.Fatal("unable to write temp cert key required for test", reloadKey, err) + } + + inBytes, err = os.ReadFile(filepath.Join(workingDir, caPem)) + if err != nil { + t.Fatal("unable to read CA pem required for test", caPem, err) + } + certPool := x509.NewCertPool() + ok := certPool.AppendCertsFromPEM(inBytes) + if !ok { + t.Fatal("not ok when appending CA cert") + } + + replacedHcl := strings.ReplaceAll(BasicHclConfig, "TMPDIR", tempDir) + configFile := populateTempFile(t, "proxy-config.hcl", replacedHcl) + + // Set up Proxy + logger := logging.NewVaultLogger(hclog.Trace) + ui, cmd := testProxyCommand(t, logger) + + var output string + var code int + wg.Add(1) + args := []string{"-config", configFile.Name()} + go func() { + if code = cmd.Run(args); code != 0 { + output = ui.ErrorWriter.String() + ui.OutputWriter.String() + } + wg.Done() + }() + + testCertificateName := func(cn string) error { + conn, err := tls.Dial("tcp", "127.0.0.1:8100", &tls.Config{ + RootCAs: certPool, + }) + if err != nil { + return err + } + defer conn.Close() + if err = conn.Handshake(); err != nil { + return err + } + servName := conn.ConnectionState().PeerCertificates[0].Subject.CommonName + if servName != cn { + return fmt.Errorf("expected %s, got %s", cn, servName) + } + return nil + } + + // Start + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Fatalf("timeout") + } + + if err := testCertificateName("foo.example.com"); err != nil { + t.Fatalf("certificate name didn't check out: %s", err) + } + + // Swap out certs + inBytes, err = os.ReadFile(filepath.Join(workingDir, barCert)) + if err != nil { + t.Fatal("unable to read cert required for test", barCert, err) + } + err = os.WriteFile(filepath.Join(tempDir, reloadCert), inBytes, 0o777) + if err != nil { + t.Fatal("unable to write temp cert required for test", reloadCert, err) + } + + inBytes, err = os.ReadFile(filepath.Join(workingDir, barKey)) + if err != nil { + t.Fatal("unable to read cert key required for test", barKey, err) + } + err = os.WriteFile(filepath.Join(tempDir, reloadKey), inBytes, 0o777) + if err != nil { + t.Fatal("unable to write temp cert key required for test", reloadKey, err) + } + + // Reload + cmd.SighupCh <- struct{}{} + select { + case <-cmd.reloadedCh: + case <-time.After(5 * time.Second): + t.Fatalf("timeout") + } + + if err := testCertificateName("bar.example.com"); err != nil { + t.Fatalf("certificate name didn't check out: %s", err) + } + + // Shut down + cmd.ShutdownCh <- struct{}{} + wg.Wait() + + if code != 0 { + t.Fatalf("got a non-zero exit status: %d, stdout/stderr: %s", code, output) + } +} diff --git a/command/read.go b/command/read.go new file mode 100644 index 0000000..17b8552 --- /dev/null +++ b/command/read.go @@ -0,0 +1,138 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "context" + "fmt" + "io" + "os" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*ReadCommand)(nil) + _ cli.CommandAutocomplete = (*ReadCommand)(nil) +) + +type ReadCommand struct { + *BaseCommand + + testStdin io.Reader // for tests +} + +func (c *ReadCommand) Synopsis() string { + return "Read data and retrieves secrets" +} + +func (c *ReadCommand) Help() string { + helpText := ` +Usage: vault read [options] PATH + + Reads data from Vault at the given path. This can be used to read secrets, + generate dynamic credentials, get configuration details, and more. + + Read a secret from the static secrets engine: + + $ vault read secret/my-secret + + For a full list of examples and paths, please see the documentation that + corresponds to the secrets engine in use. + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *ReadCommand) Flags() *FlagSets { + return c.flagSet(FlagSetHTTP | FlagSetOutputField | FlagSetOutputFormat) +} + +func (c *ReadCommand) AutocompleteArgs() complete.Predictor { + return c.PredictVaultFiles() +} + +func (c *ReadCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *ReadCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args, ParseOptionAllowRawFormat(true)); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + // client.ReadRaw* methods require a manual timeout override + ctx, cancel := context.WithTimeout(context.Background(), client.ClientTimeout()) + defer cancel() + + // Pull our fake stdin if needed + stdin := (io.Reader)(os.Stdin) + if c.testStdin != nil { + stdin = c.testStdin + } + + path := sanitizePath(args[0]) + + data, err := parseArgsDataStringLists(stdin, args[1:]) + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to parse K=V data: %s", err)) + return 1 + } + + if Format(c.UI) != "raw" { + secret, err := client.Logical().ReadWithDataWithContext(ctx, path, data) + if err != nil { + c.UI.Error(fmt.Sprintf("Error reading %s: %s", path, err)) + return 2 + } + if secret == nil { + c.UI.Error(fmt.Sprintf("No value found at %s", path)) + return 2 + } + + if c.flagField != "" { + return PrintRawField(c.UI, secret, c.flagField) + } + + return OutputSecret(c.UI, secret) + } + + resp, err := client.Logical().ReadRawWithDataWithContext(ctx, path, data) + if err != nil { + c.UI.Error(fmt.Sprintf("Error reading: %s: %s", path, err)) + return 2 + } + if resp == nil || resp.Body == nil { + c.UI.Error(fmt.Sprintf("No value found at %s", path)) + return 2 + } + defer resp.Body.Close() + + contents, err := io.ReadAll(resp.Body) + if err != nil { + c.UI.Error(fmt.Sprintf("Error reading: %s: %s", path, err)) + return 2 + } + + return OutputData(c.UI, contents) +} diff --git a/command/read_test.go b/command/read_test.go new file mode 100644 index 0000000..fbe7ab4 --- /dev/null +++ b/command/read_test.go @@ -0,0 +1,167 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "testing" + + "github.com/mitchellh/cli" +) + +func testReadCommand(tb testing.TB) (*cli.MockUi, *ReadCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &ReadCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestReadCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "not_enough_args", + []string{}, + "Not enough arguments", + 1, + }, + { + "proper_args", + []string{"foo", "bar=baz"}, + "No value found at foo\n", + 2, + }, + { + "not_found", + []string{"nope/not/once/never"}, + "", + 2, + }, + { + "default", + []string{"secret/read/foo"}, + "foo", + 0, + }, + { + "field", + []string{ + "-field", "foo", + "secret/read/foo", + }, + "bar", + 0, + }, + { + "field_not_found", + []string{ + "-field", "not-a-real-field", + "secret/read/foo", + }, + "not present in secret", + 1, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + if _, err := client.Logical().Write("secret/read/foo", map[string]interface{}{ + "foo": "bar", + }); err != nil { + t.Fatal(err) + } + + ui, cmd := testReadCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("%s: expected %q to contain %q", tc.name, combined, tc.out) + } + }) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testReadCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "secret/foo", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error reading secret/foo: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_data_object_from_api_response", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testReadCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "sys/health", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + expected := []string{ + "cluster_id", "cluster_name", "initialized", "performance_standby", "replication_dr_mode", "replication_performance_mode", "sealed", + "server_time_utc", "standby", "version", + } + for _, expectedField := range expected { + if !strings.Contains(combined, expectedField) { + t.Errorf("expected %q to contain %q", combined, expected) + } + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testReadCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/rotate.go b/command/rotate.go new file mode 100644 index 0000000..7a174f3 --- /dev/null +++ b/command/rotate.go @@ -0,0 +1,106 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*OperatorRotateCommand)(nil) + _ cli.CommandAutocomplete = (*OperatorRotateCommand)(nil) +) + +type OperatorRotateCommand struct { + *BaseCommand +} + +func (c *OperatorRotateCommand) Synopsis() string { + return "Rotates the underlying encryption key" +} + +func (c *OperatorRotateCommand) Help() string { + helpText := ` +Usage: vault operator rotate [options] + + Rotates the underlying encryption key which is used to secure data written + to the storage backend. This installs a new key in the key ring. This new + key is used to encrypted new data, while older keys in the ring are used to + decrypt older data. + + This is an online operation and does not cause downtime. This command is run + per-cluster (not per-server), since Vault servers in HA mode share the same + storage backend. + + Rotate Vault's encryption key: + + $ vault operator rotate + + For a full list of examples, please see the documentation. + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *OperatorRotateCommand) Flags() *FlagSets { + return c.flagSet(FlagSetHTTP | FlagSetOutputFormat) +} + +func (c *OperatorRotateCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *OperatorRotateCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *OperatorRotateCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + if len(args) > 0 { + c.UI.Error(fmt.Sprintf("Too many arguments (expected 0, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + // Rotate the key + err = client.Sys().Rotate() + if err != nil { + c.UI.Error(fmt.Sprintf("Error rotating key: %s", err)) + return 2 + } + + // Print the key status + status, err := client.Sys().KeyStatus() + if err != nil { + c.UI.Error(fmt.Sprintf("Error reading key status: %s", err)) + return 2 + } + + switch Format(c.UI) { + case "table": + c.UI.Output("Success! Rotated key") + c.UI.Output("") + c.UI.Output(printKeyStatus(status)) + return 0 + default: + return OutputData(c.UI, status) + } +} diff --git a/command/rotate_test.go b/command/rotate_test.go new file mode 100644 index 0000000..bfd48f7 --- /dev/null +++ b/command/rotate_test.go @@ -0,0 +1,125 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "testing" + + "github.com/mitchellh/cli" +) + +func testOperatorRotateCommand(tb testing.TB) (*cli.MockUi, *OperatorRotateCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &OperatorRotateCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestOperatorRotateCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "too_many_args", + []string{"abcd1234"}, + "Too many arguments", + 1, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testOperatorRotateCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("default", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testOperatorRotateCommand(t) + cmd.client = client + + code := cmd.Run([]string{}) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! Rotated key" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + status, err := client.Sys().KeyStatus() + if err != nil { + t.Fatal(err) + } + if exp := 1; status.Term < exp { + t.Errorf("expected %d to be less than %d", status.Term, exp) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testOperatorRotateCommand(t) + cmd.client = client + + code := cmd.Run([]string{}) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error rotating key: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testOperatorRotateCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/secrets.go b/command/secrets.go new file mode 100644 index 0000000..3201672 --- /dev/null +++ b/command/secrets.go @@ -0,0 +1,46 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + + "github.com/mitchellh/cli" +) + +var _ cli.Command = (*SecretsCommand)(nil) + +type SecretsCommand struct { + *BaseCommand +} + +func (c *SecretsCommand) Synopsis() string { + return "Interact with secrets engines" +} + +func (c *SecretsCommand) Help() string { + helpText := ` +Usage: vault secrets [options] [args] + + This command groups subcommands for interacting with Vault's secrets engines. + Each secret engine behaves differently. Please see the documentation for + more information. + + List all enabled secrets engines: + + $ vault secrets list + + Enable a new secrets engine: + + $ vault secrets enable database + + Please see the individual subcommand help for detailed usage information. +` + + return strings.TrimSpace(helpText) +} + +func (c *SecretsCommand) Run(args []string) int { + return cli.RunResultHelp +} diff --git a/command/secrets_disable.go b/command/secrets_disable.go new file mode 100644 index 0000000..8d782a5 --- /dev/null +++ b/command/secrets_disable.go @@ -0,0 +1,89 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*SecretsDisableCommand)(nil) + _ cli.CommandAutocomplete = (*SecretsDisableCommand)(nil) +) + +type SecretsDisableCommand struct { + *BaseCommand +} + +func (c *SecretsDisableCommand) Synopsis() string { + return "Disable a secret engine" +} + +func (c *SecretsDisableCommand) Help() string { + helpText := ` +Usage: vault secrets disable [options] PATH + + Disables a secrets engine at the given PATH. The argument corresponds to + the enabled PATH of the engine, not the TYPE! All secrets created by this + engine are revoked and its Vault data is removed. + + Disable the secrets engine enabled at aws/: + + $ vault secrets disable aws/ + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *SecretsDisableCommand) Flags() *FlagSets { + return c.flagSet(FlagSetHTTP) +} + +func (c *SecretsDisableCommand) AutocompleteArgs() complete.Predictor { + return c.PredictVaultMounts() +} + +func (c *SecretsDisableCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *SecretsDisableCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + path := ensureTrailingSlash(sanitizePath(args[0])) + + if err := client.Sys().Unmount(path); err != nil { + c.UI.Error(fmt.Sprintf("Error disabling secrets engine at %s: %s", path, err)) + return 2 + } + + c.UI.Output(fmt.Sprintf("Success! Disabled the secrets engine (if it existed) at: %s", path)) + return 0 +} diff --git a/command/secrets_disable_test.go b/command/secrets_disable_test.go new file mode 100644 index 0000000..d7c7da7 --- /dev/null +++ b/command/secrets_disable_test.go @@ -0,0 +1,155 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "testing" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" +) + +func testSecretsDisableCommand(tb testing.TB) (*cli.MockUi, *SecretsDisableCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &SecretsDisableCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestSecretsDisableCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "not_enough_args", + []string{}, + "Not enough arguments", + 1, + }, + { + "too_many_args", + []string{"foo", "bar"}, + "Too many arguments", + 1, + }, + { + "not_real", + []string{"not_real"}, + "Success! Disabled the secrets engine (if it existed) at: not_real/", + 0, + }, + { + "default", + []string{"secret"}, + "Success! Disabled the secrets engine (if it existed) at: secret/", + 0, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testSecretsDisableCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("integration", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + if err := client.Sys().Mount("my-secret/", &api.MountInput{ + Type: "generic", + }); err != nil { + t.Fatal(err) + } + + ui, cmd := testSecretsDisableCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "my-secret/", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! Disabled the secrets engine (if it existed) at: my-secret/" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + mounts, err := client.Sys().ListMounts() + if err != nil { + t.Fatal(err) + } + + if _, ok := mounts["integration_unmount"]; ok { + t.Errorf("expected mount to not exist: %#v", mounts) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testSecretsDisableCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "pki/", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error disabling secrets engine at pki/: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testSecretsDisableCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/secrets_enable.go b/command/secrets_enable.go new file mode 100644 index 0000000..39ce3bf --- /dev/null +++ b/command/secrets_enable.go @@ -0,0 +1,353 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "flag" + "fmt" + "strconv" + "strings" + "time" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*SecretsEnableCommand)(nil) + _ cli.CommandAutocomplete = (*SecretsEnableCommand)(nil) +) + +type SecretsEnableCommand struct { + *BaseCommand + + flagDescription string + flagPath string + flagDefaultLeaseTTL time.Duration + flagMaxLeaseTTL time.Duration + flagAuditNonHMACRequestKeys []string + flagAuditNonHMACResponseKeys []string + flagListingVisibility string + flagPassthroughRequestHeaders []string + flagAllowedResponseHeaders []string + flagForceNoCache bool + flagPluginName string + flagPluginVersion string + flagOptions map[string]string + flagLocal bool + flagSealWrap bool + flagExternalEntropyAccess bool + flagVersion int + flagAllowedManagedKeys []string +} + +func (c *SecretsEnableCommand) Synopsis() string { + return "Enable a secrets engine" +} + +func (c *SecretsEnableCommand) Help() string { + helpText := ` +Usage: vault secrets enable [options] TYPE + + Enables a secrets engine. By default, secrets engines are enabled at the path + corresponding to their TYPE, but users can customize the path using the + -path option. + + Once enabled, Vault will route all requests which begin with the path to the + secrets engine. + + Enable the AWS secrets engine at aws/: + + $ vault secrets enable aws + + Enable the SSH secrets engine at ssh-prod/: + + $ vault secrets enable -path=ssh-prod ssh + + Enable the database secrets engine with an explicit maximum TTL of 30m: + + $ vault secrets enable -max-lease-ttl=30m database + + Enable a custom plugin (after it is registered in the plugin registry): + + $ vault secrets enable -path=my-secrets -plugin-name=my-plugin plugin + + OR (preferred way): + + $ vault secrets enable -path=my-secrets my-plugin + + For a full list of secrets engines and examples, please see the documentation. + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *SecretsEnableCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP) + + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "description", + Target: &c.flagDescription, + Completion: complete.PredictAnything, + Usage: "Human-friendly description for the purpose of this engine.", + }) + + f.StringVar(&StringVar{ + Name: "path", + Target: &c.flagPath, + Default: "", // The default is complex, so we have to manually document + Completion: complete.PredictAnything, + Usage: "Place where the secrets engine will be accessible. This must be " + + "unique cross all secrets engines. This defaults to the \"type\" of the " + + "secrets engine.", + }) + + f.DurationVar(&DurationVar{ + Name: "default-lease-ttl", + Target: &c.flagDefaultLeaseTTL, + Completion: complete.PredictAnything, + Usage: "The default lease TTL for this secrets engine. If unspecified, " + + "this defaults to the Vault server's globally configured default lease " + + "TTL.", + }) + + f.DurationVar(&DurationVar{ + Name: "max-lease-ttl", + Target: &c.flagMaxLeaseTTL, + Completion: complete.PredictAnything, + Usage: "The maximum lease TTL for this secrets engine. If unspecified, " + + "this defaults to the Vault server's globally configured maximum lease " + + "TTL.", + }) + + f.StringSliceVar(&StringSliceVar{ + Name: flagNameAuditNonHMACRequestKeys, + Target: &c.flagAuditNonHMACRequestKeys, + Usage: "Key that will not be HMAC'd by audit devices in the request data object. " + + "To specify multiple values, specify this flag multiple times.", + }) + + f.StringSliceVar(&StringSliceVar{ + Name: flagNameAuditNonHMACResponseKeys, + Target: &c.flagAuditNonHMACResponseKeys, + Usage: "Key that will not be HMAC'd by audit devices in the response data object. " + + "To specify multiple values, specify this flag multiple times.", + }) + + f.StringVar(&StringVar{ + Name: flagNameListingVisibility, + Target: &c.flagListingVisibility, + Usage: "Determines the visibility of the mount in the UI-specific listing endpoint.", + }) + + f.StringSliceVar(&StringSliceVar{ + Name: flagNamePassthroughRequestHeaders, + Target: &c.flagPassthroughRequestHeaders, + Usage: "Request header value that will be sent to the plugins. To specify multiple " + + "values, specify this flag multiple times.", + }) + + f.StringSliceVar(&StringSliceVar{ + Name: flagNameAllowedResponseHeaders, + Target: &c.flagAllowedResponseHeaders, + Usage: "Response header value that plugins will be allowed to set. To specify multiple " + + "values, specify this flag multiple times.", + }) + + f.BoolVar(&BoolVar{ + Name: "force-no-cache", + Target: &c.flagForceNoCache, + Default: false, + Usage: "Force the secrets engine to disable caching. If unspecified, this " + + "defaults to the Vault server's globally configured cache settings. " + + "This does not affect caching of the underlying encrypted data storage.", + }) + + f.StringVar(&StringVar{ + Name: "plugin-name", + Target: &c.flagPluginName, + Completion: c.PredictVaultPlugins(api.PluginTypeSecrets, api.PluginTypeDatabase), + Usage: "Name of the secrets engine plugin. This plugin name must already " + + "exist in Vault's plugin catalog.", + }) + + f.StringVar(&StringVar{ + Name: flagNamePluginVersion, + Target: &c.flagPluginVersion, + Default: "", + Usage: "Select the semantic version of the plugin to enable.", + }) + + f.StringMapVar(&StringMapVar{ + Name: "options", + Target: &c.flagOptions, + Completion: complete.PredictAnything, + Usage: "Key-value pair provided as key=value for the mount options. " + + "This can be specified multiple times.", + }) + + f.BoolVar(&BoolVar{ + Name: "local", + Target: &c.flagLocal, + Default: false, + Usage: "Mark the secrets engine as local-only. Local engines are not " + + "replicated or removed by replication.", + }) + + f.BoolVar(&BoolVar{ + Name: "seal-wrap", + Target: &c.flagSealWrap, + Default: false, + Usage: "Enable seal wrapping of critical values in the secrets engine.", + }) + + f.BoolVar(&BoolVar{ + Name: "external-entropy-access", + Target: &c.flagExternalEntropyAccess, + Default: false, + Usage: "Enable secrets engine to access Vault's external entropy source.", + }) + + f.IntVar(&IntVar{ + Name: "version", + Target: &c.flagVersion, + Default: 0, + Usage: "Select the version of the engine to run. Not supported by all engines.", + }) + + f.StringSliceVar(&StringSliceVar{ + Name: flagNameAllowedManagedKeys, + Target: &c.flagAllowedManagedKeys, + Usage: "Managed key name(s) that the mount in question is allowed to access. " + + "Note that multiple keys may be specified by providing this option multiple times, " + + "each time with 1 key.", + }) + + return set +} + +func (c *SecretsEnableCommand) AutocompleteArgs() complete.Predictor { + return c.PredictVaultAvailableMounts() +} + +func (c *SecretsEnableCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *SecretsEnableCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + // Get the engine type type (first arg) + engineType := strings.TrimSpace(args[0]) + if engineType == "plugin" { + engineType = c.flagPluginName + } + + // If no path is specified, we default the path to the backend type + // or use the plugin name if it's a plugin backend + mountPath := c.flagPath + if mountPath == "" { + if engineType == "plugin" { + mountPath = c.flagPluginName + } else { + mountPath = engineType + } + } + + if c.flagVersion > 0 { + if c.flagOptions == nil { + c.flagOptions = make(map[string]string) + } + c.flagOptions["version"] = strconv.Itoa(c.flagVersion) + } + + // Append a trailing slash to indicate it's a path in output + mountPath = ensureTrailingSlash(mountPath) + + // Build mount input + mountInput := &api.MountInput{ + Type: engineType, + Description: c.flagDescription, + Local: c.flagLocal, + SealWrap: c.flagSealWrap, + ExternalEntropyAccess: c.flagExternalEntropyAccess, + Config: api.MountConfigInput{ + DefaultLeaseTTL: c.flagDefaultLeaseTTL.String(), + MaxLeaseTTL: c.flagMaxLeaseTTL.String(), + ForceNoCache: c.flagForceNoCache, + }, + Options: c.flagOptions, + } + + // Set these values only if they are provided in the CLI + f.Visit(func(fl *flag.Flag) { + if fl.Name == flagNameAuditNonHMACRequestKeys { + mountInput.Config.AuditNonHMACRequestKeys = c.flagAuditNonHMACRequestKeys + } + + if fl.Name == flagNameAuditNonHMACResponseKeys { + mountInput.Config.AuditNonHMACResponseKeys = c.flagAuditNonHMACResponseKeys + } + + if fl.Name == flagNameListingVisibility { + mountInput.Config.ListingVisibility = c.flagListingVisibility + } + + if fl.Name == flagNamePassthroughRequestHeaders { + mountInput.Config.PassthroughRequestHeaders = c.flagPassthroughRequestHeaders + } + + if fl.Name == flagNameAllowedResponseHeaders { + mountInput.Config.AllowedResponseHeaders = c.flagAllowedResponseHeaders + } + + if fl.Name == flagNameAllowedManagedKeys { + mountInput.Config.AllowedManagedKeys = c.flagAllowedManagedKeys + } + + if fl.Name == flagNamePluginVersion { + mountInput.Config.PluginVersion = c.flagPluginVersion + } + }) + + if err := client.Sys().Mount(mountPath, mountInput); err != nil { + c.UI.Error(fmt.Sprintf("Error enabling: %s", err)) + return 2 + } + + thing := engineType + " secrets engine" + if engineType == "plugin" { + thing = c.flagPluginName + " plugin" + } + if c.flagPluginVersion != "" { + thing += " version " + c.flagPluginVersion + } + c.UI.Output(fmt.Sprintf("Success! Enabled the %s at: %s", thing, mountPath)) + return 0 +} diff --git a/command/secrets_enable_test.go b/command/secrets_enable_test.go new file mode 100644 index 0000000..93984b3 --- /dev/null +++ b/command/secrets_enable_test.go @@ -0,0 +1,275 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "errors" + "io/ioutil" + "os" + "strings" + "testing" + + "github.com/go-test/deep" + "github.com/hashicorp/vault/helper/builtinplugins" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/mitchellh/cli" +) + +// logicalBackendAdjustmentFactor is set to plus 1 for the database backend +// which is a plugin but not found in go.mod files, and minus 1 for the ldap +// and openldap secret backends which have the same underlying plugin. +var logicalBackendAdjustmentFactor = 1 - 1 + +func testSecretsEnableCommand(tb testing.TB) (*cli.MockUi, *SecretsEnableCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &SecretsEnableCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestSecretsEnableCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "not_enough_args", + []string{}, + "Not enough arguments", + 1, + }, + { + "too_many_args", + []string{"foo", "bar"}, + "Too many arguments", + 1, + }, + { + "not_a_valid_mount", + []string{"nope_definitely_not_a_valid_mount_like_ever"}, + "", + 2, + }, + { + "mount", + []string{"transit"}, + "Success! Enabled the transit secrets engine at: transit/", + 0, + }, + { + "mount_path", + []string{ + "-path", "transit_mount_point", + "transit", + }, + "Success! Enabled the transit secrets engine at: transit_mount_point/", + 0, + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testSecretsEnableCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + + t.Run("integration", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testSecretsEnableCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-path", "mount_integration/", + "-description", "The best kind of test", + "-default-lease-ttl", "30m", + "-max-lease-ttl", "1h", + "-audit-non-hmac-request-keys", "foo,bar", + "-audit-non-hmac-response-keys", "foo,bar", + "-passthrough-request-headers", "authorization,authentication", + "-passthrough-request-headers", "www-authentication", + "-allowed-response-headers", "authorization", + "-allowed-managed-keys", "key1,key2", + "-force-no-cache", + "pki", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! Enabled the pki secrets engine at: mount_integration/" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + mounts, err := client.Sys().ListMounts() + if err != nil { + t.Fatal(err) + } + + mountInfo, ok := mounts["mount_integration/"] + if !ok { + t.Fatalf("expected mount to exist") + } + if exp := "pki"; mountInfo.Type != exp { + t.Errorf("expected %q to be %q", mountInfo.Type, exp) + } + if exp := "The best kind of test"; mountInfo.Description != exp { + t.Errorf("expected %q to be %q", mountInfo.Description, exp) + } + if exp := 1800; mountInfo.Config.DefaultLeaseTTL != exp { + t.Errorf("expected %d to be %d", mountInfo.Config.DefaultLeaseTTL, exp) + } + if exp := 3600; mountInfo.Config.MaxLeaseTTL != exp { + t.Errorf("expected %d to be %d", mountInfo.Config.MaxLeaseTTL, exp) + } + if exp := true; mountInfo.Config.ForceNoCache != exp { + t.Errorf("expected %t to be %t", mountInfo.Config.ForceNoCache, exp) + } + if diff := deep.Equal([]string{"authorization,authentication", "www-authentication"}, mountInfo.Config.PassthroughRequestHeaders); len(diff) > 0 { + t.Errorf("Failed to find expected values in PassthroughRequestHeaders. Difference is: %v", diff) + } + if diff := deep.Equal([]string{"authorization"}, mountInfo.Config.AllowedResponseHeaders); len(diff) > 0 { + t.Errorf("Failed to find expected values in AllowedResponseHeaders. Difference is: %v", diff) + } + if diff := deep.Equal([]string{"foo,bar"}, mountInfo.Config.AuditNonHMACRequestKeys); len(diff) > 0 { + t.Errorf("Failed to find expected values in AuditNonHMACRequestKeys. Difference is: %v", diff) + } + if diff := deep.Equal([]string{"foo,bar"}, mountInfo.Config.AuditNonHMACResponseKeys); len(diff) > 0 { + t.Errorf("Failed to find expected values in AuditNonHMACResponseKeys. Difference is: %v", diff) + } + if diff := deep.Equal([]string{"key1,key2"}, mountInfo.Config.AllowedManagedKeys); len(diff) > 0 { + t.Errorf("Failed to find expected values in AllowedManagedKeys. Difference is: %v", diff) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testSecretsEnableCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "pki", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error enabling: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testSecretsEnableCommand(t) + assertNoTabs(t, cmd) + }) + + t.Run("mount_all", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerAllBackends(t) + defer closer() + + files, err := ioutil.ReadDir("../builtin/logical") + if err != nil { + t.Fatal(err) + } + + var backends []string + for _, f := range files { + if f.IsDir() { + if f.Name() == "plugin" { + continue + } + if _, err := os.Stat("../builtin/logical/" + f.Name() + "/backend.go"); errors.Is(err, os.ErrNotExist) { + // Skip ext test packages (fake plugins without backends). + continue + } + backends = append(backends, f.Name()) + } + } + + modFile, err := ioutil.ReadFile("../go.mod") + if err != nil { + t.Fatal(err) + } + modLines := strings.Split(string(modFile), "\n") + for _, p := range modLines { + splitLine := strings.Split(strings.TrimSpace(p), " ") + if len(splitLine) == 0 { + continue + } + potPlug := strings.TrimPrefix(splitLine[0], "github.com/hashicorp/") + if strings.HasPrefix(potPlug, "vault-plugin-secrets-") { + backends = append(backends, strings.TrimPrefix(potPlug, "vault-plugin-secrets-")) + } + } + + // backends are found by walking the directory, which includes the database backend, + // however, the plugins registry omits that one + if len(backends) != len(builtinplugins.Registry.Keys(consts.PluginTypeSecrets))+logicalBackendAdjustmentFactor { + t.Fatalf("expected %d logical backends, got %d", len(builtinplugins.Registry.Keys(consts.PluginTypeSecrets))+logicalBackendAdjustmentFactor, len(backends)) + } + + for _, b := range backends { + expectedResult := 0 + + ui, cmd := testSecretsEnableCommand(t) + cmd.client = client + + actualResult := cmd.Run([]string{ + b, + }) + + // Need to handle deprecated builtins specially + status, _ := builtinplugins.Registry.DeprecationStatus(b, consts.PluginTypeSecrets) + if status == consts.PendingRemoval || status == consts.Removed { + expectedResult = 2 + } + + if actualResult != expectedResult { + t.Errorf("type: %s - got: %d, expected: %d - %s", b, actualResult, expectedResult, ui.OutputWriter.String()+ui.ErrorWriter.String()) + } + } + }) +} diff --git a/command/secrets_list.go b/command/secrets_list.go new file mode 100644 index 0000000..90a8fe8 --- /dev/null +++ b/command/secrets_list.go @@ -0,0 +1,189 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "sort" + "strconv" + "strings" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*SecretsListCommand)(nil) + _ cli.CommandAutocomplete = (*SecretsListCommand)(nil) +) + +type SecretsListCommand struct { + *BaseCommand + + flagDetailed bool +} + +func (c *SecretsListCommand) Synopsis() string { + return "List enabled secrets engines" +} + +func (c *SecretsListCommand) Help() string { + helpText := ` +Usage: vault secrets list [options] + + Lists the enabled secret engines on the Vault server. This command also + outputs information about the enabled path including configured TTLs and + human-friendly descriptions. A TTL of "system" indicates that the system + default is in use. + + List all enabled secrets engines: + + $ vault secrets list + + List all enabled secrets engines with detailed output: + + $ vault secrets list -detailed + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *SecretsListCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + f := set.NewFlagSet("Command Options") + + f.BoolVar(&BoolVar{ + Name: "detailed", + Target: &c.flagDetailed, + Default: false, + Usage: "Print detailed information such as TTLs and replication status " + + "about each secrets engine.", + }) + + return set +} + +func (c *SecretsListCommand) AutocompleteArgs() complete.Predictor { + return c.PredictVaultFiles() +} + +func (c *SecretsListCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *SecretsListCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + if len(args) > 0 { + c.UI.Error(fmt.Sprintf("Too many arguments (expected 0, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + mounts, err := client.Sys().ListMounts() + if err != nil { + c.UI.Error(fmt.Sprintf("Error listing secrets engines: %s", err)) + return 2 + } + + switch Format(c.UI) { + case "table": + if c.flagDetailed { + c.UI.Output(tableOutput(c.detailedMounts(mounts), nil)) + return 0 + } + c.UI.Output(tableOutput(c.simpleMounts(mounts), nil)) + return 0 + default: + return OutputData(c.UI, mounts) + } +} + +func (c *SecretsListCommand) simpleMounts(mounts map[string]*api.MountOutput) []string { + paths := make([]string, 0, len(mounts)) + for path := range mounts { + paths = append(paths, path) + } + sort.Strings(paths) + + out := []string{"Path | Type | Accessor | Description"} + for _, path := range paths { + mount := mounts[path] + out = append(out, fmt.Sprintf("%s | %s | %s | %s", path, mount.Type, mount.Accessor, mount.Description)) + } + + return out +} + +func (c *SecretsListCommand) detailedMounts(mounts map[string]*api.MountOutput) []string { + paths := make([]string, 0, len(mounts)) + for path := range mounts { + paths = append(paths, path) + } + sort.Strings(paths) + + calcTTL := func(typ string, ttl int) string { + switch { + case typ == "system", typ == "cubbyhole": + return "" + case ttl != 0: + return strconv.Itoa(ttl) + default: + return "system" + } + } + + out := []string{"Path | Plugin | Accessor | Default TTL | Max TTL | Force No Cache | Replication | Seal Wrap | External Entropy Access | Options | Description | UUID | Version | Running Version | Running SHA256 | Deprecation Status"} + for _, path := range paths { + mount := mounts[path] + + defaultTTL := calcTTL(mount.Type, mount.Config.DefaultLeaseTTL) + maxTTL := calcTTL(mount.Type, mount.Config.MaxLeaseTTL) + + replication := "replicated" + if mount.Local { + replication = "local" + } + + pluginName := mount.Type + if pluginName == "plugin" { + pluginName = mount.Config.PluginName + } + + out = append(out, fmt.Sprintf("%s | %s | %s | %s | %s | %t | %s | %t | %v | %s | %s | %s | %s | %s | %s | %s", + path, + pluginName, + mount.Accessor, + defaultTTL, + maxTTL, + mount.Config.ForceNoCache, + replication, + mount.SealWrap, + mount.ExternalEntropyAccess, + mount.Options, + mount.Description, + mount.UUID, + mount.PluginVersion, + mount.RunningVersion, + mount.RunningSha256, + mount.DeprecationStatus, + )) + } + + return out +} diff --git a/command/secrets_list_test.go b/command/secrets_list_test.go new file mode 100644 index 0000000..95b60e3 --- /dev/null +++ b/command/secrets_list_test.go @@ -0,0 +1,108 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "testing" + + "github.com/mitchellh/cli" +) + +func testSecretsListCommand(tb testing.TB) (*cli.MockUi, *SecretsListCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &SecretsListCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestSecretsListCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "too_many_args", + []string{"foo"}, + "Too many arguments", + 1, + }, + { + "lists", + nil, + "Path", + 0, + }, + { + "detailed", + []string{"-detailed"}, + "Deprecation Status", + 0, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testSecretsListCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testSecretsListCommand(t) + cmd.client = client + + code := cmd.Run([]string{}) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error listing secrets engines: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testSecretsListCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/secrets_move.go b/command/secrets_move.go new file mode 100644 index 0000000..b74adcd --- /dev/null +++ b/command/secrets_move.go @@ -0,0 +1,131 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + "time" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*SecretsMoveCommand)(nil) + _ cli.CommandAutocomplete = (*SecretsMoveCommand)(nil) +) + +const ( + MountMigrationStatusSuccess = "success" + MountMigrationStatusFailure = "failure" +) + +type SecretsMoveCommand struct { + *BaseCommand +} + +func (c *SecretsMoveCommand) Synopsis() string { + return "Move a secrets engine to a new path" +} + +func (c *SecretsMoveCommand) Help() string { + helpText := ` +Usage: vault secrets move [options] SOURCE DESTINATION + + Moves an existing secrets engine to a new path. Any leases from the old + secrets engine are revoked, but all configuration associated with the engine + is preserved. It initiates the migration and intermittently polls its status, + exiting if a final state is reached. + + This command works within or across namespaces, both source and destination paths + can be prefixed with a namespace heirarchy relative to the current namespace. + + WARNING! Moving a secrets engine will revoke any leases from the + old engine. + + Move the secrets engine at secret/ to generic/: + + $ vault secrets move secret/ generic/ + + Move the secrets engine at ns1/secret/ across namespaces to ns2/generic/, + where ns1 and ns2 are child namespaces of the current namespace: + + $ vault secrets move ns1/secret/ ns2/generic/ + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *SecretsMoveCommand) Flags() *FlagSets { + return c.flagSet(FlagSetHTTP) +} + +func (c *SecretsMoveCommand) AutocompleteArgs() complete.Predictor { + return c.PredictVaultMounts() +} + +func (c *SecretsMoveCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *SecretsMoveCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) < 2: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 2, got %d)", len(args))) + return 1 + case len(args) > 2: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 2, got %d)", len(args))) + return 1 + } + + // Grab the source and destination + source := ensureTrailingSlash(args[0]) + destination := ensureTrailingSlash(args[1]) + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + remountResp, err := client.Sys().StartRemount(source, destination) + if err != nil { + c.UI.Error(fmt.Sprintf("Error moving secrets engine %s to %s: %s", source, destination, err)) + return 2 + } + + c.UI.Output(fmt.Sprintf("Started moving secrets engine %s to %s, with migration ID %s", source, destination, remountResp.MigrationID)) + + // Poll the status endpoint with the returned migration ID + // Exit if a terminal status is reached, else wait and retry + for { + remountStatusResp, err := client.Sys().RemountStatus(remountResp.MigrationID) + if err != nil { + c.UI.Error(fmt.Sprintf("Error checking migration status of secrets engine %s to %s: %s", source, destination, err)) + return 2 + } + if remountStatusResp.MigrationInfo.MigrationStatus == MountMigrationStatusSuccess { + c.UI.Output(fmt.Sprintf("Success! Finished moving secrets engine %s to %s, with migration ID %s", source, destination, remountResp.MigrationID)) + return 0 + } + if remountStatusResp.MigrationInfo.MigrationStatus == MountMigrationStatusFailure { + c.UI.Error(fmt.Sprintf("Failure! Error encountered moving secrets engine %s to %s, with migration ID %s", source, destination, remountResp.MigrationID)) + return 0 + } + c.UI.Output(fmt.Sprintf("Waiting for terminal status in migration of secrets engine %s to %s, with migration ID %s", source, destination, remountResp.MigrationID)) + time.Sleep(10 * time.Second) + } + + return 0 +} diff --git a/command/secrets_move_test.go b/command/secrets_move_test.go new file mode 100644 index 0000000..3aabaa1 --- /dev/null +++ b/command/secrets_move_test.go @@ -0,0 +1,142 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "testing" + + "github.com/mitchellh/cli" +) + +func testSecretsMoveCommand(tb testing.TB) (*cli.MockUi, *SecretsMoveCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &SecretsMoveCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestSecretsMoveCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "not_enough_args", + []string{}, + "Not enough arguments", + 1, + }, + { + "too_many_args", + []string{"foo", "bar", "baz"}, + "Too many arguments", + 1, + }, + { + "non_existent", + []string{"not_real", "over_here"}, + "Error moving secrets engine not_real/ to over_here/", + 2, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testSecretsMoveCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("integration", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testSecretsMoveCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "secret/", "generic/", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! Finished moving secrets engine secret/ to generic/" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + mounts, err := client.Sys().ListMounts() + if err != nil { + t.Fatal(err) + } + + if _, ok := mounts["generic/"]; !ok { + t.Errorf("expected mount at generic/: %#v", mounts) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testSecretsMoveCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "secret/", "generic/", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error moving secrets engine secret/ to generic/:" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testSecretsMoveCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/secrets_tune.go b/command/secrets_tune.go new file mode 100644 index 0000000..74753e2 --- /dev/null +++ b/command/secrets_tune.go @@ -0,0 +1,254 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "flag" + "fmt" + "strconv" + "strings" + "time" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*SecretsTuneCommand)(nil) + _ cli.CommandAutocomplete = (*SecretsTuneCommand)(nil) +) + +type SecretsTuneCommand struct { + *BaseCommand + + flagAuditNonHMACRequestKeys []string + flagAuditNonHMACResponseKeys []string + flagDefaultLeaseTTL time.Duration + flagDescription string + flagListingVisibility string + flagMaxLeaseTTL time.Duration + flagPassthroughRequestHeaders []string + flagAllowedResponseHeaders []string + flagOptions map[string]string + flagVersion int + flagPluginVersion string + flagAllowedManagedKeys []string +} + +func (c *SecretsTuneCommand) Synopsis() string { + return "Tune a secrets engine configuration" +} + +func (c *SecretsTuneCommand) Help() string { + helpText := ` +Usage: vault secrets tune [options] PATH + + Tunes the configuration options for the secrets engine at the given PATH. + The argument corresponds to the PATH where the secrets engine is enabled, + not the TYPE! + + Tune the default lease for the PKI secrets engine: + + $ vault secrets tune -default-lease-ttl=72h pki/ + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *SecretsTuneCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP) + + f := set.NewFlagSet("Command Options") + + f.StringSliceVar(&StringSliceVar{ + Name: flagNameAuditNonHMACRequestKeys, + Target: &c.flagAuditNonHMACRequestKeys, + Usage: "Key that will not be HMAC'd by audit devices in the request data " + + "object. To specify multiple values, specify this flag multiple times.", + }) + + f.StringSliceVar(&StringSliceVar{ + Name: flagNameAuditNonHMACResponseKeys, + Target: &c.flagAuditNonHMACResponseKeys, + Usage: "Key that will not be HMAC'd by audit devices in the response data " + + "object. To specify multiple values, specify this flag multiple times.", + }) + + f.DurationVar(&DurationVar{ + Name: "default-lease-ttl", + Target: &c.flagDefaultLeaseTTL, + Default: 0, + EnvVar: "", + Completion: complete.PredictAnything, + Usage: "The default lease TTL for this secrets engine. If unspecified, " + + "this defaults to the Vault server's globally configured default lease " + + "TTL, or a previously configured value for the secrets engine.", + }) + + f.StringVar(&StringVar{ + Name: flagNameDescription, + Target: &c.flagDescription, + Usage: "Human-friendly description of this secret engine. This overrides the " + + "current stored value, if any.", + }) + + f.StringVar(&StringVar{ + Name: flagNameListingVisibility, + Target: &c.flagListingVisibility, + Usage: "Determines the visibility of the mount in the UI-specific listing " + + "endpoint.", + }) + + f.DurationVar(&DurationVar{ + Name: "max-lease-ttl", + Target: &c.flagMaxLeaseTTL, + Default: 0, + EnvVar: "", + Completion: complete.PredictAnything, + Usage: "The maximum lease TTL for this secrets engine. If unspecified, " + + "this defaults to the Vault server's globally configured maximum lease " + + "TTL, or a previously configured value for the secrets engine.", + }) + + f.StringSliceVar(&StringSliceVar{ + Name: flagNamePassthroughRequestHeaders, + Target: &c.flagPassthroughRequestHeaders, + Usage: "Request header value that will be sent to the plugin. To specify " + + "multiple values, specify this flag multiple times.", + }) + + f.StringSliceVar(&StringSliceVar{ + Name: flagNameAllowedResponseHeaders, + Target: &c.flagAllowedResponseHeaders, + Usage: "Response header value that plugins will be allowed to set. To " + + "specify multiple values, specify this flag multiple times.", + }) + + f.StringMapVar(&StringMapVar{ + Name: "options", + Target: &c.flagOptions, + Completion: complete.PredictAnything, + Usage: "Key-value pair provided as key=value for the mount options. " + + "This can be specified multiple times.", + }) + + f.IntVar(&IntVar{ + Name: "version", + Target: &c.flagVersion, + Default: 0, + Usage: "Select the version of the engine to run. Not supported by all engines.", + }) + + f.StringSliceVar(&StringSliceVar{ + Name: flagNameAllowedManagedKeys, + Target: &c.flagAllowedManagedKeys, + Usage: "Managed key name(s) that the mount in question is allowed to access. " + + "Note that multiple keys may be specified by providing this option multiple times, " + + "each time with 1 key.", + }) + + f.StringVar(&StringVar{ + Name: flagNamePluginVersion, + Target: &c.flagPluginVersion, + Default: "", + Usage: "Select the semantic version of the plugin to run. The new version must be registered in " + + "the plugin catalog, and will not start running until the plugin is reloaded.", + }) + + return set +} + +func (c *SecretsTuneCommand) AutocompleteArgs() complete.Predictor { + return c.PredictVaultMounts() +} + +func (c *SecretsTuneCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *SecretsTuneCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + if c.flagVersion > 0 { + if c.flagOptions == nil { + c.flagOptions = make(map[string]string) + } + c.flagOptions["version"] = strconv.Itoa(c.flagVersion) + } + + // Append a trailing slash to indicate it's a path in output + mountPath := ensureTrailingSlash(sanitizePath(args[0])) + + mountConfigInput := api.MountConfigInput{ + DefaultLeaseTTL: ttlToAPI(c.flagDefaultLeaseTTL), + MaxLeaseTTL: ttlToAPI(c.flagMaxLeaseTTL), + Options: c.flagOptions, + } + + // Set these values only if they are provided in the CLI + f.Visit(func(fl *flag.Flag) { + if fl.Name == flagNameAuditNonHMACRequestKeys { + mountConfigInput.AuditNonHMACRequestKeys = c.flagAuditNonHMACRequestKeys + } + + if fl.Name == flagNameAuditNonHMACResponseKeys { + mountConfigInput.AuditNonHMACResponseKeys = c.flagAuditNonHMACResponseKeys + } + + if fl.Name == flagNameDescription { + mountConfigInput.Description = &c.flagDescription + } + + if fl.Name == flagNameListingVisibility { + mountConfigInput.ListingVisibility = c.flagListingVisibility + } + + if fl.Name == flagNamePassthroughRequestHeaders { + mountConfigInput.PassthroughRequestHeaders = c.flagPassthroughRequestHeaders + } + + if fl.Name == flagNameAllowedResponseHeaders { + mountConfigInput.AllowedResponseHeaders = c.flagAllowedResponseHeaders + } + + if fl.Name == flagNameAllowedManagedKeys { + mountConfigInput.AllowedManagedKeys = c.flagAllowedManagedKeys + } + + if fl.Name == flagNamePluginVersion { + mountConfigInput.PluginVersion = c.flagPluginVersion + } + }) + + if err := client.Sys().TuneMount(mountPath, mountConfigInput); err != nil { + c.UI.Error(fmt.Sprintf("Error tuning secrets engine %s: %s", mountPath, err)) + return 2 + } + + c.UI.Output(fmt.Sprintf("Success! Tuned the secrets engine at: %s", mountPath)) + return 0 +} diff --git a/command/secrets_tune_test.go b/command/secrets_tune_test.go new file mode 100644 index 0000000..25b8a7c --- /dev/null +++ b/command/secrets_tune_test.go @@ -0,0 +1,370 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "testing" + + "github.com/go-test/deep" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/mitchellh/cli" +) + +func testSecretsTuneCommand(tb testing.TB) (*cli.MockUi, *SecretsTuneCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &SecretsTuneCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestSecretsTuneCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "not_enough_args", + []string{}, + "Not enough arguments", + 1, + }, + { + "too_many_args", + []string{"foo", "bar"}, + "Too many arguments", + 1, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testSecretsTuneCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("protect_downgrade", func(t *testing.T) { + t.Parallel() + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testSecretsTuneCommand(t) + cmd.client = client + + // Mount + if err := client.Sys().Mount("kv", &api.MountInput{ + Type: "kv", + Options: map[string]string{ + "version": "2", + }, + }); err != nil { + t.Fatal(err) + } + + // confirm default max_versions + mounts, err := client.Sys().ListMounts() + if err != nil { + t.Fatal(err) + } + + mountInfo, ok := mounts["kv/"] + if !ok { + t.Fatalf("expected mount to exist") + } + if exp := "kv"; mountInfo.Type != exp { + t.Errorf("expected %q to be %q", mountInfo.Type, exp) + } + if exp := "2"; mountInfo.Options["version"] != exp { + t.Errorf("expected %q to be %q", mountInfo.Options["version"], exp) + } + + if exp := ""; mountInfo.Options["max_versions"] != exp { + t.Errorf("expected %s to be empty", mountInfo.Options["max_versions"]) + } + + // omitting the version should not cause a downgrade + code := cmd.Run([]string{ + "-options", "max_versions=2", + "kv/", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! Tuned the secrets engine at: kv/" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + mounts, err = client.Sys().ListMounts() + if err != nil { + t.Fatal(err) + } + + mountInfo, ok = mounts["kv/"] + if !ok { + t.Fatalf("expected mount to exist") + } + if exp := "2"; mountInfo.Options["version"] != exp { + t.Errorf("expected %q to be %q", mountInfo.Options["version"], exp) + } + if exp := "kv"; mountInfo.Type != exp { + t.Errorf("expected %q to be %q", mountInfo.Type, exp) + } + if exp := "2"; mountInfo.Options["max_versions"] != exp { + t.Errorf("expected %s to be %s", mountInfo.Options["max_versions"], exp) + } + }) + + t.Run("integration", func(t *testing.T) { + t.Run("flags_all", func(t *testing.T) { + t.Parallel() + pluginDir, cleanup := corehelpers.MakeTestPluginDir(t) + defer cleanup(t) + + client, _, closer := testVaultServerPluginDir(t, pluginDir) + defer closer() + + ui, cmd := testSecretsTuneCommand(t) + cmd.client = client + + // Mount + if err := client.Sys().Mount("mount_tune_integration", &api.MountInput{ + Type: "pki", + }); err != nil { + t.Fatal(err) + } + + mounts, err := client.Sys().ListMounts() + if err != nil { + t.Fatal(err) + } + mountInfo, ok := mounts["mount_tune_integration/"] + if !ok { + t.Fatalf("expected mount to exist") + } + + if exp := ""; mountInfo.PluginVersion != exp { + t.Errorf("expected %q to be %q", mountInfo.PluginVersion, exp) + } + + _, _, version := testPluginCreateAndRegisterVersioned(t, client, pluginDir, "pki", api.PluginTypeSecrets) + + code := cmd.Run([]string{ + "-description", "new description", + "-default-lease-ttl", "30m", + "-max-lease-ttl", "1h", + "-audit-non-hmac-request-keys", "foo,bar", + "-audit-non-hmac-response-keys", "foo,bar", + "-passthrough-request-headers", "authorization", + "-passthrough-request-headers", "www-authentication", + "-allowed-response-headers", "authorization,www-authentication", + "-allowed-managed-keys", "key1,key2", + "-listing-visibility", "unauth", + "-plugin-version", version, + "mount_tune_integration/", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! Tuned the secrets engine at: mount_tune_integration/" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + mounts, err = client.Sys().ListMounts() + if err != nil { + t.Fatal(err) + } + + mountInfo, ok = mounts["mount_tune_integration/"] + if !ok { + t.Fatalf("expected mount to exist") + } + if exp := "new description"; mountInfo.Description != exp { + t.Errorf("expected %q to be %q", mountInfo.Description, exp) + } + if exp := "pki"; mountInfo.Type != exp { + t.Errorf("expected %q to be %q", mountInfo.Type, exp) + } + if exp := version; mountInfo.PluginVersion != exp { + t.Errorf("expected %q to be %q", mountInfo.PluginVersion, exp) + } + if exp := 1800; mountInfo.Config.DefaultLeaseTTL != exp { + t.Errorf("expected %d to be %d", mountInfo.Config.DefaultLeaseTTL, exp) + } + if exp := 3600; mountInfo.Config.MaxLeaseTTL != exp { + t.Errorf("expected %d to be %d", mountInfo.Config.MaxLeaseTTL, exp) + } + if diff := deep.Equal([]string{"authorization", "www-authentication"}, mountInfo.Config.PassthroughRequestHeaders); len(diff) > 0 { + t.Errorf("Failed to find expected values for PassthroughRequestHeaders. Difference is: %v", diff) + } + if diff := deep.Equal([]string{"authorization,www-authentication"}, mountInfo.Config.AllowedResponseHeaders); len(diff) > 0 { + t.Errorf("Failed to find expected values in AllowedResponseHeaders. Difference is: %v", diff) + } + if diff := deep.Equal([]string{"foo,bar"}, mountInfo.Config.AuditNonHMACRequestKeys); len(diff) > 0 { + t.Errorf("Failed to find expected values in AuditNonHMACRequestKeys. Difference is: %v", diff) + } + if diff := deep.Equal([]string{"foo,bar"}, mountInfo.Config.AuditNonHMACResponseKeys); len(diff) > 0 { + t.Errorf("Failed to find expected values in AuditNonHMACResponseKeys. Difference is: %v", diff) + } + if diff := deep.Equal([]string{"key1,key2"}, mountInfo.Config.AllowedManagedKeys); len(diff) > 0 { + t.Errorf("Failed to find expected values in AllowedManagedKeys. Difference is: %v", diff) + } + }) + + t.Run("flags_description", func(t *testing.T) { + t.Parallel() + t.Run("not_provided", func(t *testing.T) { + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testSecretsTuneCommand(t) + cmd.client = client + + // Mount + if err := client.Sys().Mount("mount_tune_integration", &api.MountInput{ + Type: "pki", + Description: "initial description", + }); err != nil { + t.Fatal(err) + } + + code := cmd.Run([]string{ + "-default-lease-ttl", "30m", + "mount_tune_integration/", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! Tuned the secrets engine at: mount_tune_integration/" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + mounts, err := client.Sys().ListMounts() + if err != nil { + t.Fatal(err) + } + + mountInfo, ok := mounts["mount_tune_integration/"] + if !ok { + t.Fatalf("expected mount to exist") + } + if exp := "initial description"; mountInfo.Description != exp { + t.Errorf("expected %q to be %q", mountInfo.Description, exp) + } + }) + + t.Run("provided_empty", func(t *testing.T) { + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testSecretsTuneCommand(t) + cmd.client = client + + // Mount + if err := client.Sys().Mount("mount_tune_integration", &api.MountInput{ + Type: "pki", + Description: "initial description", + }); err != nil { + t.Fatal(err) + } + + code := cmd.Run([]string{ + "-description", "", + "mount_tune_integration/", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! Tuned the secrets engine at: mount_tune_integration/" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + mounts, err := client.Sys().ListMounts() + if err != nil { + t.Fatal(err) + } + + mountInfo, ok := mounts["mount_tune_integration/"] + if !ok { + t.Fatalf("expected mount to exist") + } + if exp := ""; mountInfo.Description != exp { + t.Errorf("expected %q to be %q", mountInfo.Description, exp) + } + }) + }) + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testSecretsTuneCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "pki/", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error tuning secrets engine pki/: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testSecretsTuneCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/server.go b/command/server.go new file mode 100644 index 0000000..9a22156 --- /dev/null +++ b/command/server.go @@ -0,0 +1,3089 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "context" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "path/filepath" + "runtime" + "runtime/pprof" + "sort" + "strconv" + "strings" + "sync" + "time" + + systemd "github.com/coreos/go-systemd/daemon" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-hclog" + wrapping "github.com/hashicorp/go-kms-wrapping/v2" + aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-secure-stdlib/gatedwriter" + "github.com/hashicorp/go-secure-stdlib/mlock" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/go-secure-stdlib/reloadutil" + "github.com/hashicorp/vault/audit" + config2 "github.com/hashicorp/vault/command/config" + "github.com/hashicorp/vault/command/server" + "github.com/hashicorp/vault/helper/builtinplugins" + "github.com/hashicorp/vault/helper/constants" + "github.com/hashicorp/vault/helper/experiments" + loghelper "github.com/hashicorp/vault/helper/logging" + "github.com/hashicorp/vault/helper/metricsutil" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/helper/testhelpers/teststorage" + "github.com/hashicorp/vault/helper/useragent" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/internalshared/listenerutil" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/hashicorp/vault/sdk/helper/testcluster" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/physical" + sr "github.com/hashicorp/vault/serviceregistration" + "github.com/hashicorp/vault/vault" + "github.com/hashicorp/vault/vault/hcp_link" + vaultseal "github.com/hashicorp/vault/vault/seal" + "github.com/hashicorp/vault/version" + "github.com/mitchellh/cli" + "github.com/mitchellh/go-testing-interface" + "github.com/pkg/errors" + "github.com/posener/complete" + "github.com/sasha-s/go-deadlock" + "go.uber.org/atomic" + "golang.org/x/net/http/httpproxy" + "google.golang.org/grpc/grpclog" +) + +var ( + _ cli.Command = (*ServerCommand)(nil) + _ cli.CommandAutocomplete = (*ServerCommand)(nil) +) + +var memProfilerEnabled = false + +var enableFourClusterDev = func(c *ServerCommand, base *vault.CoreConfig, info map[string]string, infoKeys []string, devListenAddress, tempDir string) int { + c.logger.Error("-dev-four-cluster only supported in enterprise Vault") + return 1 +} + +const ( + storageMigrationLock = "core/migration" + + // Even though there are more types than the ones below, the following consts + // are declared internally for value comparison and reusability. + storageTypeRaft = "raft" + storageTypeConsul = "consul" +) + +type ServerCommand struct { + *BaseCommand + logFlags logFlags + + AuditBackends map[string]audit.Factory + CredentialBackends map[string]logical.Factory + LogicalBackends map[string]logical.Factory + PhysicalBackends map[string]physical.Factory + + ServiceRegistrations map[string]sr.Factory + + ShutdownCh chan struct{} + SighupCh chan struct{} + SigUSR2Ch chan struct{} + + WaitGroup *sync.WaitGroup + + logWriter io.Writer + logGate *gatedwriter.Writer + logger hclog.InterceptLogger + + cleanupGuard sync.Once + + reloadFuncsLock *sync.RWMutex + reloadFuncs *map[string][]reloadutil.ReloadFunc + startedCh chan (struct{}) // for tests + reloadedCh chan (struct{}) // for tests + licenseReloadedCh chan (error) // for tests + + allLoggers []hclog.Logger + + flagConfigs []string + flagRecovery bool + flagExperiments []string + flagDev bool + flagDevTLS bool + flagDevTLSCertDir string + flagDevRootTokenID string + flagDevListenAddr string + flagDevNoStoreToken bool + flagDevPluginDir string + flagDevPluginInit bool + flagDevHA bool + flagDevLatency int + flagDevLatencyJitter int + flagDevLeasedKV bool + flagDevKVV1 bool + flagDevSkipInit bool + flagDevThreeNode bool + flagDevFourCluster bool + flagDevTransactional bool + flagDevAutoSeal bool + flagDevClusterJson string + flagTestVerifyOnly bool + flagTestServerConfig bool + flagDevConsul bool + flagExitOnCoreShutdown bool +} + +func (c *ServerCommand) Synopsis() string { + return "Start a Vault server" +} + +func (c *ServerCommand) Help() string { + helpText := ` +Usage: vault server [options] + + This command starts a Vault server that responds to API requests. By default, + Vault will start in a "sealed" state. The Vault cluster must be initialized + before use, usually by the "vault operator init" command. Each Vault server must + also be unsealed using the "vault operator unseal" command or the API before the + server can respond to requests. + + Start a server with a configuration file: + + $ vault server -config=/etc/vault/config.hcl + + Run in "dev" mode: + + $ vault server -dev -dev-root-token-id="root" + + For a full list of examples, please see the documentation. + +` + c.Flags().Help() + return strings.TrimSpace(helpText) +} + +func (c *ServerCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP) + + f := set.NewFlagSet("Command Options") + + // Augment with the log flags + f.addLogFlags(&c.logFlags) + + f.StringSliceVar(&StringSliceVar{ + Name: "config", + Target: &c.flagConfigs, + Completion: complete.PredictOr( + complete.PredictFiles("*.hcl"), + complete.PredictFiles("*.json"), + complete.PredictDirs("*"), + ), + Usage: "Path to a configuration file or directory of configuration " + + "files. This flag can be specified multiple times to load multiple " + + "configurations. If the path is a directory, all files which end in " + + ".hcl or .json are loaded.", + }) + + f.BoolVar(&BoolVar{ + Name: "exit-on-core-shutdown", + Target: &c.flagExitOnCoreShutdown, + Default: false, + Usage: "Exit the vault server if the vault core is shutdown.", + }) + + f.BoolVar(&BoolVar{ + Name: "recovery", + Target: &c.flagRecovery, + Usage: "Enable recovery mode. In this mode, Vault is used to perform recovery actions." + + "Using a recovery operation token, \"sys/raw\" API can be used to manipulate the storage.", + }) + + f.StringSliceVar(&StringSliceVar{ + Name: "experiment", + Target: &c.flagExperiments, + Completion: complete.PredictSet(experiments.ValidExperiments()...), + Usage: "Name of an experiment to enable. Experiments should NOT be used in production, and " + + "the associated APIs may have backwards incompatible changes between releases. This " + + "flag can be specified multiple times to specify multiple experiments. This can also be " + + fmt.Sprintf("specified via the %s environment variable as a comma-separated list. ", EnvVaultExperiments) + + "Valid experiments are: " + strings.Join(experiments.ValidExperiments(), ", "), + }) + + f = set.NewFlagSet("Dev Options") + + f.BoolVar(&BoolVar{ + Name: "dev", + Target: &c.flagDev, + Usage: "Enable development mode. In this mode, Vault runs in-memory and " + + "starts unsealed. As the name implies, do not run \"dev\" mode in " + + "production.", + }) + + f.BoolVar(&BoolVar{ + Name: "dev-tls", + Target: &c.flagDevTLS, + Usage: "Enable TLS development mode. In this mode, Vault runs in-memory and " + + "starts unsealed, with a generated TLS CA, certificate and key. " + + "As the name implies, do not run \"dev-tls\" mode in " + + "production.", + }) + + f.StringVar(&StringVar{ + Name: "dev-tls-cert-dir", + Target: &c.flagDevTLSCertDir, + Default: "", + Usage: "Directory where generated TLS files are created if `-dev-tls` is " + + "specified. If left unset, files are generated in a temporary directory.", + }) + + f.StringVar(&StringVar{ + Name: "dev-root-token-id", + Target: &c.flagDevRootTokenID, + Default: "", + EnvVar: "VAULT_DEV_ROOT_TOKEN_ID", + Usage: "Initial root token. This only applies when running in \"dev\" " + + "mode.", + }) + + f.StringVar(&StringVar{ + Name: "dev-listen-address", + Target: &c.flagDevListenAddr, + Default: "127.0.0.1:8200", + EnvVar: "VAULT_DEV_LISTEN_ADDRESS", + Usage: "Address to bind to in \"dev\" mode.", + }) + f.BoolVar(&BoolVar{ + Name: "dev-no-store-token", + Target: &c.flagDevNoStoreToken, + Default: false, + Usage: "Do not persist the dev root token to the token helper " + + "(usually the local filesystem) for use in future requests. " + + "The token will only be displayed in the command output.", + }) + + // Internal-only flags to follow. + // + // Why hello there little source code reader! Welcome to the Vault source + // code. The remaining options are intentionally undocumented and come with + // no warranty or backwards-compatibility promise. Do not use these flags + // in production. Do not build automation using these flags. Unless you are + // developing against Vault, you should not need any of these flags. + + f.StringVar(&StringVar{ + Name: "dev-plugin-dir", + Target: &c.flagDevPluginDir, + Default: "", + Completion: complete.PredictDirs("*"), + Hidden: true, + }) + + f.BoolVar(&BoolVar{ + Name: "dev-plugin-init", + Target: &c.flagDevPluginInit, + Default: true, + Hidden: true, + }) + + f.BoolVar(&BoolVar{ + Name: "dev-ha", + Target: &c.flagDevHA, + Default: false, + Hidden: true, + }) + + f.BoolVar(&BoolVar{ + Name: "dev-transactional", + Target: &c.flagDevTransactional, + Default: false, + Hidden: true, + }) + + f.IntVar(&IntVar{ + Name: "dev-latency", + Target: &c.flagDevLatency, + Hidden: true, + }) + + f.IntVar(&IntVar{ + Name: "dev-latency-jitter", + Target: &c.flagDevLatencyJitter, + Hidden: true, + }) + + f.BoolVar(&BoolVar{ + Name: "dev-leased-kv", + Target: &c.flagDevLeasedKV, + Default: false, + Hidden: true, + }) + + f.BoolVar(&BoolVar{ + Name: "dev-kv-v1", + Target: &c.flagDevKVV1, + Default: false, + Hidden: true, + }) + + f.BoolVar(&BoolVar{ + Name: "dev-auto-seal", + Target: &c.flagDevAutoSeal, + Default: false, + Hidden: true, + }) + + f.BoolVar(&BoolVar{ + Name: "dev-skip-init", + Target: &c.flagDevSkipInit, + Default: false, + Hidden: true, + }) + + f.BoolVar(&BoolVar{ + Name: "dev-three-node", + Target: &c.flagDevThreeNode, + Default: false, + Hidden: true, + }) + + f.BoolVar(&BoolVar{ + Name: "dev-four-cluster", + Target: &c.flagDevFourCluster, + Default: false, + Hidden: true, + }) + + f.BoolVar(&BoolVar{ + Name: "dev-consul", + Target: &c.flagDevConsul, + Default: false, + Hidden: true, + }) + + f.StringVar(&StringVar{ + Name: "dev-cluster-json", + Target: &c.flagDevClusterJson, + Usage: "File to write cluster definition to", + }) + + // TODO: should the below flags be public? + f.BoolVar(&BoolVar{ + Name: "test-verify-only", + Target: &c.flagTestVerifyOnly, + Default: false, + Hidden: true, + }) + + f.BoolVar(&BoolVar{ + Name: "test-server-config", + Target: &c.flagTestServerConfig, + Default: false, + Hidden: true, + }) + + // End internal-only flags. + + return set +} + +func (c *ServerCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictNothing +} + +func (c *ServerCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *ServerCommand) flushLog() { + c.logger.(hclog.OutputResettable).ResetOutputWithFlush(&hclog.LoggerOptions{ + Output: c.logWriter, + }, c.logGate) +} + +func (c *ServerCommand) parseConfig() (*server.Config, []configutil.ConfigError, error) { + var configErrors []configutil.ConfigError + // Load the configuration + var config *server.Config + for _, path := range c.flagConfigs { + current, err := server.LoadConfig(path) + if err != nil { + return nil, nil, fmt.Errorf("error loading configuration from %s: %w", path, err) + } + + configErrors = append(configErrors, current.Validate(path)...) + + if config == nil { + config = current + } else { + config = config.Merge(current) + } + } + + if config != nil && config.Entropy != nil && config.Entropy.Mode == configutil.EntropyAugmentation && constants.IsFIPS() { + c.UI.Warn("WARNING: Entropy Augmentation is not supported in FIPS 140-2 Inside mode; disabling from server configuration!\n") + config.Entropy = nil + } + + return config, configErrors, nil +} + +func (c *ServerCommand) runRecoveryMode() int { + config, configErrors, err := c.parseConfig() + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + + // Ensure at least one config was found. + if config == nil { + c.UI.Output(wrapAtLength( + "No configuration files found. Please provide configurations with the " + + "-config flag. If you are supplying the path to a directory, please " + + "ensure the directory contains files with the .hcl or .json " + + "extension.")) + return 1 + } + + // Update the 'log' related aspects of shared config based on config/env var/cli + c.flags.applyLogConfigOverrides(config.SharedConfig) + l, err := c.configureLogging(config) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + c.logger = l + c.allLoggers = append(c.allLoggers, l) + + // reporting Errors found in the config + for _, cErr := range configErrors { + c.logger.Warn(cErr.String()) + } + + // Ensure logging is flushed if initialization fails + defer c.flushLog() + + // create GRPC logger + namedGRPCLogFaker := c.logger.Named("grpclogfaker") + grpclog.SetLogger(&grpclogFaker{ + logger: namedGRPCLogFaker, + log: os.Getenv("VAULT_GRPC_LOGGING") != "", + }) + + if config.Storage == nil { + c.UI.Output("A storage backend must be specified") + return 1 + } + + if config.DefaultMaxRequestDuration != 0 { + vault.DefaultMaxRequestDuration = config.DefaultMaxRequestDuration + } + + logProxyEnvironmentVariables(c.logger) + + // Initialize the storage backend + factory, exists := c.PhysicalBackends[config.Storage.Type] + if !exists { + c.UI.Error(fmt.Sprintf("Unknown storage type %s", config.Storage.Type)) + return 1 + } + if config.Storage.Type == storageTypeRaft || (config.HAStorage != nil && config.HAStorage.Type == storageTypeRaft) { + if envCA := os.Getenv("VAULT_CLUSTER_ADDR"); envCA != "" { + config.ClusterAddr = envCA + } + + if len(config.ClusterAddr) == 0 { + c.UI.Error("Cluster address must be set when using raft storage") + return 1 + } + } + + namedStorageLogger := c.logger.Named("storage." + config.Storage.Type) + backend, err := factory(config.Storage.Config, namedStorageLogger) + if err != nil { + c.UI.Error(fmt.Sprintf("Error initializing storage of type %s: %s", config.Storage.Type, err)) + return 1 + } + + infoKeys := make([]string, 0, 10) + info := make(map[string]string) + info["log level"] = config.LogLevel + infoKeys = append(infoKeys, "log level") + + var barrierSeal vault.Seal + var sealConfigError error + var wrapper wrapping.Wrapper + + if len(config.Seals) == 0 { + config.Seals = append(config.Seals, &configutil.KMS{Type: wrapping.WrapperTypeShamir.String()}) + } + + if len(config.Seals) > 1 { + c.UI.Error("Only one seal block is accepted in recovery mode") + return 1 + } + + configSeal := config.Seals[0] + sealType := wrapping.WrapperTypeShamir.String() + if !configSeal.Disabled && os.Getenv("VAULT_SEAL_TYPE") != "" { + sealType = os.Getenv("VAULT_SEAL_TYPE") + configSeal.Type = sealType + } else { + sealType = configSeal.Type + } + + infoKeys = append(infoKeys, "Seal Type") + info["Seal Type"] = sealType + + var seal vault.Seal + defaultSeal := vault.NewDefaultSeal(vaultseal.NewAccess(aeadwrapper.NewShamirWrapper())) + sealLogger := c.logger.ResetNamed(fmt.Sprintf("seal.%s", sealType)) + wrapper, sealConfigError = configutil.ConfigureWrapper(configSeal, &infoKeys, &info, sealLogger) + if sealConfigError != nil { + if !errwrap.ContainsType(sealConfigError, new(logical.KeyNotFoundError)) { + c.UI.Error(fmt.Sprintf( + "Error parsing Seal configuration: %s", sealConfigError)) + return 1 + } + } + if wrapper == nil { + seal = defaultSeal + } else { + seal, err = vault.NewAutoSeal(vaultseal.NewAccess(wrapper)) + if err != nil { + c.UI.Error(fmt.Sprintf("error creating auto seal: %v", err)) + } + } + barrierSeal = seal + + // Ensure that the seal finalizer is called, even if using verify-only + defer func() { + err = seal.Finalize(context.Background()) + if err != nil { + c.UI.Error(fmt.Sprintf("Error finalizing seals: %v", err)) + } + }() + + coreConfig := &vault.CoreConfig{ + Physical: backend, + StorageType: config.Storage.Type, + Seal: barrierSeal, + LogLevel: config.LogLevel, + Logger: c.logger, + DisableMlock: config.DisableMlock, + RecoveryMode: c.flagRecovery, + ClusterAddr: config.ClusterAddr, + } + + core, newCoreError := vault.NewCore(coreConfig) + if newCoreError != nil { + if vault.IsFatalError(newCoreError) { + c.UI.Error(fmt.Sprintf("Error initializing core: %s", newCoreError)) + return 1 + } + } + + if err := core.InitializeRecovery(context.Background()); err != nil { + c.UI.Error(fmt.Sprintf("Error initializing core in recovery mode: %s", err)) + return 1 + } + + // Compile server information for output later + infoKeys = append(infoKeys, "storage") + info["storage"] = config.Storage.Type + + if coreConfig.ClusterAddr != "" { + info["cluster address"] = coreConfig.ClusterAddr + infoKeys = append(infoKeys, "cluster address") + } + + // Initialize the listeners + lns := make([]listenerutil.Listener, 0, len(config.Listeners)) + for _, lnConfig := range config.Listeners { + ln, _, _, err := server.NewListener(lnConfig, c.logGate, c.UI) + if err != nil { + c.UI.Error(fmt.Sprintf("Error initializing listener of type %s: %s", lnConfig.Type, err)) + return 1 + } + + lns = append(lns, listenerutil.Listener{ + Listener: ln, + Config: lnConfig, + }) + } + + listenerCloseFunc := func() { + for _, ln := range lns { + ln.Listener.Close() + } + } + + defer c.cleanupGuard.Do(listenerCloseFunc) + + infoKeys = append(infoKeys, "version") + verInfo := version.GetVersion() + info["version"] = verInfo.FullVersionNumber(false) + + if verInfo.Revision != "" { + info["version sha"] = strings.Trim(verInfo.Revision, "'") + infoKeys = append(infoKeys, "version sha") + } + + infoKeys = append(infoKeys, "recovery mode") + info["recovery mode"] = "true" + + infoKeys = append(infoKeys, "go version") + info["go version"] = runtime.Version() + + fipsStatus := getFIPSInfoKey() + if fipsStatus != "" { + infoKeys = append(infoKeys, "fips") + info["fips"] = fipsStatus + } + + // Server configuration output + padding := 24 + + sort.Strings(infoKeys) + c.UI.Output("==> Vault server configuration:\n") + + for _, k := range infoKeys { + c.UI.Output(fmt.Sprintf( + "%s%s: %s", + strings.Repeat(" ", padding-len(k)), + strings.Title(k), + info[k])) + } + + c.UI.Output("") + + // Tests might not want to start a vault server and just want to verify + // the configuration. + if c.flagTestVerifyOnly { + return 0 + } + + for _, ln := range lns { + handler := vaulthttp.Handler.Handler(&vault.HandlerProperties{ + Core: core, + ListenerConfig: ln.Config, + DisablePrintableCheck: config.DisablePrintableCheck, + RecoveryMode: c.flagRecovery, + RecoveryToken: atomic.NewString(""), + }) + + server := &http.Server{ + Handler: handler, + ReadHeaderTimeout: 10 * time.Second, + ReadTimeout: 30 * time.Second, + IdleTimeout: 5 * time.Minute, + ErrorLog: c.logger.StandardLogger(nil), + } + + go server.Serve(ln.Listener) + } + + if sealConfigError != nil { + init, err := core.InitializedLocally(context.Background()) + if err != nil { + c.UI.Error(fmt.Sprintf("Error checking if core is initialized: %v", err)) + return 1 + } + if init { + c.UI.Error("Vault is initialized but no Seal key could be loaded") + return 1 + } + } + + if newCoreError != nil { + c.UI.Warn(wrapAtLength( + "WARNING! A non-fatal error occurred during initialization. Please " + + "check the logs for more information.")) + c.UI.Warn("") + } + + if !c.logFlags.flagCombineLogs { + c.UI.Output("==> Vault server started! Log data will stream in below:\n") + } + + c.flushLog() + + for { + select { + case <-c.ShutdownCh: + c.UI.Output("==> Vault shutdown triggered") + + c.cleanupGuard.Do(listenerCloseFunc) + + if err := core.Shutdown(); err != nil { + c.UI.Error(fmt.Sprintf("Error with core shutdown: %s", err)) + } + + return 0 + + case <-c.SigUSR2Ch: + buf := make([]byte, 32*1024*1024) + n := runtime.Stack(buf[:], true) + c.logger.Info("goroutine trace", "stack", string(buf[:n])) + } + } +} + +func logProxyEnvironmentVariables(logger hclog.Logger) { + proxyCfg := httpproxy.FromEnvironment() + cfgMap := map[string]string{ + "http_proxy": proxyCfg.HTTPProxy, + "https_proxy": proxyCfg.HTTPSProxy, + "no_proxy": proxyCfg.NoProxy, + } + for k, v := range cfgMap { + u, err := url.Parse(v) + if err != nil { + // Env vars may contain URLs or host:port values. We only care + // about the former. + continue + } + if _, ok := u.User.Password(); ok { + u.User = url.UserPassword("redacted-username", "redacted-password") + } else if user := u.User.Username(); user != "" { + u.User = url.User("redacted-username") + } + cfgMap[k] = u.String() + } + logger.Info("proxy environment", "http_proxy", cfgMap["http_proxy"], + "https_proxy", cfgMap["https_proxy"], "no_proxy", cfgMap["no_proxy"]) +} + +type quiescenceSink struct { + t *time.Timer +} + +func (q quiescenceSink) Accept(name string, level hclog.Level, msg string, args ...interface{}) { + q.t.Reset(100 * time.Millisecond) +} + +func (c *ServerCommand) setupStorage(config *server.Config) (physical.Backend, error) { + // Ensure that a backend is provided + if config.Storage == nil { + return nil, errors.New("A storage backend must be specified") + } + + // Initialize the backend + factory, exists := c.PhysicalBackends[config.Storage.Type] + if !exists { + return nil, fmt.Errorf("Unknown storage type %s", config.Storage.Type) + } + + // Do any custom configuration needed per backend + switch config.Storage.Type { + case storageTypeConsul: + if config.ServiceRegistration == nil { + // If Consul is configured for storage and service registration is unconfigured, + // use Consul for service registration without requiring additional configuration. + // This maintains backward-compatibility. + config.ServiceRegistration = &server.ServiceRegistration{ + Type: "consul", + Config: config.Storage.Config, + } + } + case storageTypeRaft: + if envCA := os.Getenv("VAULT_CLUSTER_ADDR"); envCA != "" { + config.ClusterAddr = envCA + } + if len(config.ClusterAddr) == 0 { + return nil, errors.New("Cluster address must be set when using raft storage") + } + } + + namedStorageLogger := c.logger.Named("storage." + config.Storage.Type) + c.allLoggers = append(c.allLoggers, namedStorageLogger) + backend, err := factory(config.Storage.Config, namedStorageLogger) + if err != nil { + return nil, fmt.Errorf("Error initializing storage of type %s: %w", config.Storage.Type, err) + } + + return backend, nil +} + +func beginServiceRegistration(c *ServerCommand, config *server.Config) (sr.ServiceRegistration, error) { + sdFactory, ok := c.ServiceRegistrations[config.ServiceRegistration.Type] + if !ok { + return nil, fmt.Errorf("Unknown service_registration type %s", config.ServiceRegistration.Type) + } + + namedSDLogger := c.logger.Named("service_registration." + config.ServiceRegistration.Type) + c.allLoggers = append(c.allLoggers, namedSDLogger) + + // Since we haven't even begun starting Vault's core yet, + // we know that Vault is in its pre-running state. + state := sr.State{ + VaultVersion: version.GetVersion().VersionNumber(), + IsInitialized: false, + IsSealed: true, + IsActive: false, + IsPerformanceStandby: false, + } + var err error + configSR, err := sdFactory(config.ServiceRegistration.Config, namedSDLogger, state) + if err != nil { + return nil, fmt.Errorf("Error initializing service_registration of type %s: %s", config.ServiceRegistration.Type, err) + } + + return configSR, nil +} + +// InitListeners returns a response code, error message, Listeners, and a TCP Address list. +func (c *ServerCommand) InitListeners(config *server.Config, disableClustering bool, infoKeys *[]string, info *map[string]string) (int, []listenerutil.Listener, []*net.TCPAddr, error) { + clusterAddrs := []*net.TCPAddr{} + + // Initialize the listeners + lns := make([]listenerutil.Listener, 0, len(config.Listeners)) + + c.reloadFuncsLock.Lock() + + defer c.reloadFuncsLock.Unlock() + + var errMsg error + for i, lnConfig := range config.Listeners { + ln, props, reloadFunc, err := server.NewListener(lnConfig, c.logGate, c.UI) + if err != nil { + errMsg = fmt.Errorf("Error initializing listener of type %s: %s", lnConfig.Type, err) + return 1, nil, nil, errMsg + } + + if reloadFunc != nil { + relSlice := (*c.reloadFuncs)["listener|"+lnConfig.Type] + relSlice = append(relSlice, reloadFunc) + (*c.reloadFuncs)["listener|"+lnConfig.Type] = relSlice + } + + if !disableClustering && lnConfig.Type == "tcp" { + addr := lnConfig.ClusterAddress + if addr != "" { + tcpAddr, err := net.ResolveTCPAddr("tcp", lnConfig.ClusterAddress) + if err != nil { + errMsg = fmt.Errorf("Error resolving cluster_address: %s", err) + return 1, nil, nil, errMsg + } + clusterAddrs = append(clusterAddrs, tcpAddr) + } else { + tcpAddr, ok := ln.Addr().(*net.TCPAddr) + if !ok { + errMsg = fmt.Errorf("Failed to parse tcp listener") + return 1, nil, nil, errMsg + } + clusterAddr := &net.TCPAddr{ + IP: tcpAddr.IP, + Port: tcpAddr.Port + 1, + } + clusterAddrs = append(clusterAddrs, clusterAddr) + addr = clusterAddr.String() + } + props["cluster address"] = addr + } + + if lnConfig.MaxRequestSize == 0 { + lnConfig.MaxRequestSize = vaulthttp.DefaultMaxRequestSize + } + props["max_request_size"] = fmt.Sprintf("%d", lnConfig.MaxRequestSize) + + if lnConfig.MaxRequestDuration == 0 { + lnConfig.MaxRequestDuration = vault.DefaultMaxRequestDuration + } + props["max_request_duration"] = lnConfig.MaxRequestDuration.String() + + lns = append(lns, listenerutil.Listener{ + Listener: ln, + Config: lnConfig, + }) + + // Store the listener props for output later + key := fmt.Sprintf("listener %d", i+1) + propsList := make([]string, 0, len(props)) + for k, v := range props { + propsList = append(propsList, fmt.Sprintf( + "%s: %q", k, v)) + } + sort.Strings(propsList) + *infoKeys = append(*infoKeys, key) + (*info)[key] = fmt.Sprintf( + "%s (%s)", lnConfig.Type, strings.Join(propsList, ", ")) + + } + if !disableClustering { + if c.logger.IsDebug() { + c.logger.Debug("cluster listener addresses synthesized", "cluster_addresses", clusterAddrs) + } + } + return 0, lns, clusterAddrs, nil +} + +func configureDevTLS(c *ServerCommand) (func(), *server.Config, string, error) { + var devStorageType string + + switch { + case c.flagDevConsul: + devStorageType = "consul" + case c.flagDevHA && c.flagDevTransactional: + devStorageType = "inmem_transactional_ha" + case !c.flagDevHA && c.flagDevTransactional: + devStorageType = "inmem_transactional" + case c.flagDevHA && !c.flagDevTransactional: + devStorageType = "inmem_ha" + default: + devStorageType = "inmem" + } + + var certDir string + var err error + var config *server.Config + var f func() + + if c.flagDevTLS { + if c.flagDevTLSCertDir != "" { + if _, err = os.Stat(c.flagDevTLSCertDir); err != nil { + return nil, nil, "", err + } + + certDir = c.flagDevTLSCertDir + } else { + if certDir, err = os.MkdirTemp("", "vault-tls"); err != nil { + return nil, nil, certDir, err + } + } + config, err = server.DevTLSConfig(devStorageType, certDir) + + f = func() { + if err := os.Remove(fmt.Sprintf("%s/%s", certDir, server.VaultDevCAFilename)); err != nil { + c.UI.Error(err.Error()) + } + + if err := os.Remove(fmt.Sprintf("%s/%s", certDir, server.VaultDevCertFilename)); err != nil { + c.UI.Error(err.Error()) + } + + if err := os.Remove(fmt.Sprintf("%s/%s", certDir, server.VaultDevKeyFilename)); err != nil { + c.UI.Error(err.Error()) + } + + // Only delete temp directories we made. + if c.flagDevTLSCertDir == "" { + if err := os.Remove(certDir); err != nil { + c.UI.Error(err.Error()) + } + } + } + + } else { + config, err = server.DevConfig(devStorageType) + } + + return f, config, certDir, err +} + +func (c *ServerCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + // Don't exit just because we saw a potential deadlock. + deadlock.Opts.OnPotentialDeadlock = func() {} + + c.logGate = gatedwriter.NewWriter(os.Stderr) + c.logWriter = c.logGate + + if c.logFlags.flagCombineLogs { + c.logWriter = os.Stdout + } + + if c.flagRecovery { + return c.runRecoveryMode() + } + + // Automatically enable dev mode if other dev flags are provided. + if c.flagDevConsul || c.flagDevHA || c.flagDevTransactional || c.flagDevLeasedKV || c.flagDevThreeNode || c.flagDevFourCluster || c.flagDevAutoSeal || c.flagDevKVV1 || c.flagDevTLS { + c.flagDev = true + } + + // Validation + if !c.flagDev { + switch { + case len(c.flagConfigs) == 0: + c.UI.Error("Must specify at least one config path using -config") + return 1 + case c.flagDevRootTokenID != "": + c.UI.Warn(wrapAtLength( + "You cannot specify a custom root token ID outside of \"dev\" mode. " + + "Your request has been ignored.")) + c.flagDevRootTokenID = "" + } + } + + // Load the configuration + var config *server.Config + var certDir string + if c.flagDev { + df, cfg, dir, err := configureDevTLS(c) + if df != nil { + defer df() + } + + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + + config = cfg + certDir = dir + + if c.flagDevListenAddr != "" { + config.Listeners[0].Address = c.flagDevListenAddr + } + config.Listeners[0].Telemetry.UnauthenticatedMetricsAccess = true + } + + parsedConfig, configErrors, err := c.parseConfig() + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + if config == nil { + config = parsedConfig + } else { + config = config.Merge(parsedConfig) + } + + // Ensure at least one config was found. + if config == nil { + c.UI.Output(wrapAtLength( + "No configuration files found. Please provide configurations with the " + + "-config flag. If you are supplying the path to a directory, please " + + "ensure the directory contains files with the .hcl or .json " + + "extension.")) + return 1 + } + + f.applyLogConfigOverrides(config.SharedConfig) + + // Set 'trace' log level for the following 'dev' clusters + if c.flagDevThreeNode || c.flagDevFourCluster { + config.LogLevel = "trace" + } + + l, err := c.configureLogging(config) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + c.logger = l + c.allLoggers = append(c.allLoggers, l) + + // reporting Errors found in the config + for _, cErr := range configErrors { + c.logger.Warn(cErr.String()) + } + + // Ensure logging is flushed if initialization fails + defer c.flushLog() + + // create GRPC logger + namedGRPCLogFaker := c.logger.Named("grpclogfaker") + c.allLoggers = append(c.allLoggers, namedGRPCLogFaker) + grpclog.SetLogger(&grpclogFaker{ + logger: namedGRPCLogFaker, + log: os.Getenv("VAULT_GRPC_LOGGING") != "", + }) + + if memProfilerEnabled { + c.startMemProfiler() + } + + if config.DefaultMaxRequestDuration != 0 { + vault.DefaultMaxRequestDuration = config.DefaultMaxRequestDuration + } + + logProxyEnvironmentVariables(c.logger) + + if envMlock := os.Getenv("VAULT_DISABLE_MLOCK"); envMlock != "" { + var err error + config.DisableMlock, err = strconv.ParseBool(envMlock) + if err != nil { + c.UI.Output("Error parsing the environment variable VAULT_DISABLE_MLOCK") + return 1 + } + } + + if envLicensePath := os.Getenv(EnvVaultLicensePath); envLicensePath != "" { + config.LicensePath = envLicensePath + } + if envLicense := os.Getenv(EnvVaultLicense); envLicense != "" { + config.License = envLicense + } + + if err := server.ExperimentsFromEnvAndCLI(config, EnvVaultExperiments, c.flagExperiments); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + // If mlockall(2) isn't supported, show a warning. We disable this in dev + // because it is quite scary to see when first using Vault. We also disable + // this if the user has explicitly disabled mlock in configuration. + if !c.flagDev && !config.DisableMlock && !mlock.Supported() { + c.UI.Warn(wrapAtLength( + "WARNING! mlock is not supported on this system! An mlockall(2)-like " + + "syscall to prevent memory from being swapped to disk is not " + + "supported on this system. For better security, only run Vault on " + + "systems where this call is supported. If you are running Vault " + + "in a Docker container, provide the IPC_LOCK cap to the container.")) + } + + inmemMetrics, metricSink, prometheusEnabled, err := configutil.SetupTelemetry(&configutil.SetupTelemetryOpts{ + Config: config.Telemetry, + Ui: c.UI, + ServiceName: "vault", + DisplayName: "Vault", + UserAgent: useragent.String(), + ClusterName: config.ClusterName, + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error initializing telemetry: %s", err)) + return 1 + } + metricsHelper := metricsutil.NewMetricsHelper(inmemMetrics, prometheusEnabled) + + // Initialize the storage backend + var backend physical.Backend + if !c.flagDev || config.Storage != nil { + backend, err = c.setupStorage(config) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + // Prevent server startup if migration is active + // TODO: Use OpenTelemetry to integrate this into Diagnose + if c.storageMigrationActive(backend) { + return 1 + } + } + + // Initialize the Service Discovery, if there is one + var configSR sr.ServiceRegistration + if config.ServiceRegistration != nil { + configSR, err = beginServiceRegistration(c, config) + if err != nil { + c.UI.Output(err.Error()) + return 1 + } + } + + infoKeys := make([]string, 0, 10) + info := make(map[string]string) + info["log level"] = config.LogLevel + infoKeys = append(infoKeys, "log level") + + // returns a slice of env vars formatted as "key=value" + envVars := os.Environ() + var envVarKeys []string + for _, v := range envVars { + splitEnvVars := strings.Split(v, "=") + envVarKeys = append(envVarKeys, splitEnvVars[0]) + } + + sort.Strings(envVarKeys) + + key := "environment variables" + info[key] = strings.Join(envVarKeys, ", ") + infoKeys = append(infoKeys, key) + + if len(config.Experiments) != 0 { + expKey := "experiments" + info[expKey] = strings.Join(config.Experiments, ", ") + infoKeys = append(infoKeys, expKey) + } + + barrierSeal, barrierWrapper, unwrapSeal, seals, sealConfigError, err := setSeal(c, config, infoKeys, info) + // Check error here + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + + for _, seal := range seals { + // There is always one nil seal. We need to skip it so we don't start an empty Finalize-Seal-Shamir + // section. + if seal == nil { + continue + } + seal := seal // capture range variable + // Ensure that the seal finalizer is called, even if using verify-only + defer func(seal *vault.Seal) { + err = (*seal).Finalize(context.Background()) + if err != nil { + c.UI.Error(fmt.Sprintf("Error finalizing seals: %v", err)) + } + }(&seal) + } + + if barrierSeal == nil { + c.UI.Error("Could not create barrier seal! Most likely proper Seal configuration information was not set, but no error was generated.") + return 1 + } + + // prepare a secure random reader for core + secureRandomReader, err := configutil.CreateSecureRandomReaderFunc(config.SharedConfig, barrierWrapper) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + + coreConfig := createCoreConfig(c, config, backend, configSR, barrierSeal, unwrapSeal, metricsHelper, metricSink, secureRandomReader) + if c.flagDevThreeNode { + return c.enableThreeNodeDevCluster(&coreConfig, info, infoKeys, c.flagDevListenAddr, os.Getenv("VAULT_DEV_TEMP_DIR")) + } + + if c.flagDevFourCluster { + return enableFourClusterDev(c, &coreConfig, info, infoKeys, c.flagDevListenAddr, os.Getenv("VAULT_DEV_TEMP_DIR")) + } + + if allowPendingRemoval := os.Getenv(consts.EnvVaultAllowPendingRemovalMounts); allowPendingRemoval != "" { + var err error + coreConfig.PendingRemovalMountsAllowed, err = strconv.ParseBool(allowPendingRemoval) + if err != nil { + c.UI.Warn(wrapAtLength("WARNING! failed to parse " + + consts.EnvVaultAllowPendingRemovalMounts + " env var: " + + "defaulting to false.")) + } + } + + // Initialize the separate HA storage backend, if it exists + disableClustering, err := initHaBackend(c, config, &coreConfig, backend) + if err != nil { + c.UI.Output(err.Error()) + return 1 + } + + // Determine the redirect address from environment variables + err = determineRedirectAddr(c, &coreConfig, config) + if err != nil { + c.UI.Output(err.Error()) + } + + // After the redirect bits are sorted out, if no cluster address was + // explicitly given, derive one from the redirect addr + err = findClusterAddress(c, &coreConfig, config, disableClustering) + if err != nil { + c.UI.Output(err.Error()) + return 1 + } + + // Override the UI enabling config by the environment variable + if enableUI := os.Getenv("VAULT_UI"); enableUI != "" { + var err error + coreConfig.EnableUI, err = strconv.ParseBool(enableUI) + if err != nil { + c.UI.Output("Error parsing the environment variable VAULT_UI") + return 1 + } + } + + // If ServiceRegistration is configured, then the backend must support HA + isBackendHA := coreConfig.HAPhysical != nil && coreConfig.HAPhysical.HAEnabled() + if !c.flagDev && (coreConfig.GetServiceRegistration() != nil) && !isBackendHA { + c.UI.Output("service_registration is configured, but storage does not support HA") + return 1 + } + + // Apply any enterprise configuration onto the coreConfig. + adjustCoreConfigForEnt(config, &coreConfig) + + if !storageSupportedForEnt(&coreConfig) { + c.UI.Warn("") + c.UI.Warn(wrapAtLength(fmt.Sprintf("WARNING: storage configured to use %q which is not supported for Vault Enterprise, must be \"raft\" or \"consul\"", coreConfig.StorageType))) + c.UI.Warn("") + } + + if !c.flagDev { + inMemStorageTypes := []string{ + "inmem", "inmem_ha", "inmem_transactional", "inmem_transactional_ha", + } + + if strutil.StrListContains(inMemStorageTypes, coreConfig.StorageType) { + c.UI.Warn("") + c.UI.Warn(wrapAtLength(fmt.Sprintf("WARNING: storage configured to use %q which should NOT be used in production", coreConfig.StorageType))) + c.UI.Warn("") + } + } + + // Initialize the core + core, newCoreError := vault.NewCore(&coreConfig) + if newCoreError != nil { + if vault.IsFatalError(newCoreError) { + c.UI.Error(fmt.Sprintf("Error initializing core: %s", newCoreError)) + return 1 + } + c.UI.Warn(wrapAtLength( + "WARNING! A non-fatal error occurred during initialization. Please " + + "check the logs for more information.")) + c.UI.Warn("") + + } + + // Copy the reload funcs pointers back + c.reloadFuncs = coreConfig.ReloadFuncs + c.reloadFuncsLock = coreConfig.ReloadFuncsLock + + // Compile server information for output later + info["storage"] = config.Storage.Type + info["mlock"] = fmt.Sprintf( + "supported: %v, enabled: %v", + mlock.Supported(), !config.DisableMlock && mlock.Supported()) + infoKeys = append(infoKeys, "mlock", "storage") + + if coreConfig.ClusterAddr != "" { + info["cluster address"] = coreConfig.ClusterAddr + infoKeys = append(infoKeys, "cluster address") + } + if coreConfig.RedirectAddr != "" { + info["api address"] = coreConfig.RedirectAddr + infoKeys = append(infoKeys, "api address") + } + + if config.HAStorage != nil { + info["HA storage"] = config.HAStorage.Type + infoKeys = append(infoKeys, "HA storage") + } else { + // If the storage supports HA, then note it + if coreConfig.HAPhysical != nil { + if coreConfig.HAPhysical.HAEnabled() { + info["storage"] += " (HA available)" + } else { + info["storage"] += " (HA disabled)" + } + } + } + + status, lns, clusterAddrs, errMsg := c.InitListeners(config, disableClustering, &infoKeys, &info) + + if status != 0 { + c.UI.Output("Error parsing listener configuration.") + c.UI.Error(errMsg.Error()) + return 1 + } + + // Make sure we close all listeners from this point on + listenerCloseFunc := func() { + for _, ln := range lns { + ln.Listener.Close() + } + } + + defer c.cleanupGuard.Do(listenerCloseFunc) + + infoKeys = append(infoKeys, "version") + verInfo := version.GetVersion() + info["version"] = verInfo.FullVersionNumber(false) + if verInfo.Revision != "" { + info["version sha"] = strings.Trim(verInfo.Revision, "'") + infoKeys = append(infoKeys, "version sha") + } + + infoKeys = append(infoKeys, "cgo") + info["cgo"] = "disabled" + if version.CgoEnabled { + info["cgo"] = "enabled" + } + + infoKeys = append(infoKeys, "recovery mode") + info["recovery mode"] = "false" + + infoKeys = append(infoKeys, "go version") + info["go version"] = runtime.Version() + + fipsStatus := getFIPSInfoKey() + if fipsStatus != "" { + infoKeys = append(infoKeys, "fips") + info["fips"] = fipsStatus + } + + if config.HCPLinkConf != nil { + infoKeys = append(infoKeys, "HCP organization") + info["HCP organization"] = config.HCPLinkConf.Resource.Organization + + infoKeys = append(infoKeys, "HCP project") + info["HCP project"] = config.HCPLinkConf.Resource.Project + + infoKeys = append(infoKeys, "HCP resource ID") + info["HCP resource ID"] = config.HCPLinkConf.Resource.ID + } + + infoKeys = append(infoKeys, "administrative namespace") + info["administrative namespace"] = config.AdministrativeNamespacePath + + sort.Strings(infoKeys) + c.UI.Output("==> Vault server configuration:\n") + + for _, k := range infoKeys { + c.UI.Output(fmt.Sprintf( + "%24s: %s", + strings.Title(k), + info[k])) + } + + c.UI.Output("") + + // Tests might not want to start a vault server and just want to verify + // the configuration. + if c.flagTestVerifyOnly { + return 0 + } + + // This needs to happen before we first unseal, so before we trigger dev + // mode if it's set + core.SetClusterListenerAddrs(clusterAddrs) + core.SetClusterHandler(vaulthttp.Handler.Handler(&vault.HandlerProperties{ + Core: core, + })) + + // Attempt unsealing in a background goroutine. This is needed for when a + // Vault cluster with multiple servers is configured with auto-unseal but is + // uninitialized. Once one server initializes the storage backend, this + // goroutine will pick up the unseal keys and unseal this instance. + if !core.IsInSealMigrationMode(true) { + go runUnseal(c, core, context.Background()) + } + + // When the underlying storage is raft, kick off retry join if it was specified + // in the configuration + // TODO: Should we also support retry_join for ha_storage? + if config.Storage.Type == storageTypeRaft { + if err := core.InitiateRetryJoin(context.Background()); err != nil { + c.UI.Error(fmt.Sprintf("Failed to initiate raft retry join, %q", err.Error())) + return 1 + } + } + + // Perform initialization of HTTP server after the verifyOnly check. + + // Instantiate the wait group + c.WaitGroup = &sync.WaitGroup{} + + // If service discovery is available, run service discovery + err = runListeners(c, &coreConfig, config, configSR) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + + // If we're in Dev mode, then initialize the core + clusterJson := &testcluster.ClusterJson{} + err = initDevCore(c, &coreConfig, config, core, certDir, clusterJson) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + + // Initialize the HTTP servers + err = startHttpServers(c, core, config, lns) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + + hcpLogger := c.logger.Named("hcp-connectivity") + hcpLink, err := hcp_link.NewHCPLink(config.HCPLinkConf, core, hcpLogger) + if err != nil { + c.logger.Error("failed to establish HCP connection", "error", err) + } else if hcpLink != nil { + c.logger.Trace("established HCP connection") + } + + if c.flagTestServerConfig { + return 0 + } + + if sealConfigError != nil { + init, err := core.InitializedLocally(context.Background()) + if err != nil { + c.UI.Error(fmt.Sprintf("Error checking if core is initialized: %v", err)) + return 1 + } + if init { + c.UI.Error("Vault is initialized but no Seal key could be loaded") + return 1 + } + } + + // Output the header that the server has started + if !c.logFlags.flagCombineLogs { + c.UI.Output("==> Vault server started! Log data will stream in below:\n") + } + + // Inform any tests that the server is ready + select { + case c.startedCh <- struct{}{}: + default: + } + + // Release the log gate. + c.flushLog() + + // Write out the PID to the file now that server has successfully started + if err := c.storePidFile(config.PidFile); err != nil { + c.UI.Error(fmt.Sprintf("Error storing PID: %s", err)) + return 1 + } + + // Notify systemd that the server is ready (if applicable) + c.notifySystemd(systemd.SdNotifyReady) + + if c.flagDev { + protocol := "http://" + if c.flagDevTLS { + protocol = "https://" + } + clusterJson.Nodes = []testcluster.ClusterNode{ + { + APIAddress: protocol + config.Listeners[0].Address, + }, + } + if c.flagDevTLS { + clusterJson.CACertPath = fmt.Sprintf("%s/%s", certDir, server.VaultDevCAFilename) + } + + if c.flagDevClusterJson != "" && !c.flagDevThreeNode { + b, err := jsonutil.EncodeJSON(clusterJson) + if err != nil { + c.UI.Error(fmt.Sprintf("Error encoding cluster.json: %s", err)) + return 1 + } + err = os.WriteFile(c.flagDevClusterJson, b, 0o600) + if err != nil { + c.UI.Error(fmt.Sprintf("Error writing cluster.json %q: %s", c.flagDevClusterJson, err)) + return 1 + } + } + } + + defer func() { + if err := c.removePidFile(config.PidFile); err != nil { + c.UI.Error(fmt.Sprintf("Error deleting the PID file: %s", err)) + } + }() + + var coreShutdownDoneCh <-chan struct{} + if c.flagExitOnCoreShutdown { + coreShutdownDoneCh = core.ShutdownDone() + } + + // Wait for shutdown + shutdownTriggered := false + retCode := 0 + + for !shutdownTriggered { + select { + case <-coreShutdownDoneCh: + c.UI.Output("==> Vault core was shut down") + retCode = 1 + shutdownTriggered = true + case <-c.ShutdownCh: + c.UI.Output("==> Vault shutdown triggered") + shutdownTriggered = true + case <-c.SighupCh: + c.UI.Output("==> Vault reload triggered") + + // Notify systemd that the server is reloading config + c.notifySystemd(systemd.SdNotifyReloading) + + // Check for new log level + var config *server.Config + var configErrors []configutil.ConfigError + for _, path := range c.flagConfigs { + current, err := server.LoadConfig(path) + if err != nil { + c.logger.Error("could not reload config", "path", path, "error", err) + goto RUNRELOADFUNCS + } + + configErrors = append(configErrors, current.Validate(path)...) + + if config == nil { + config = current + } else { + config = config.Merge(current) + } + } + + // Ensure at least one config was found. + if config == nil { + c.logger.Error("no config found at reload time") + goto RUNRELOADFUNCS + } + + // reporting Errors found in the config + for _, cErr := range configErrors { + c.logger.Warn(cErr.String()) + } + + core.SetConfig(config) + + // reloading custom response headers to make sure we have + // the most up to date headers after reloading the config file + if err = core.ReloadCustomResponseHeaders(); err != nil { + c.logger.Error(err.Error()) + } + + // Setting log request with the new value in the config after reload + core.ReloadLogRequestsLevel() + + // reloading HCP link + hcpLink, err = c.reloadHCPLink(hcpLink, config, core, hcpLogger) + if err != nil { + c.logger.Error(err.Error()) + } + + // Reload log level for loggers + if config.LogLevel != "" { + level, err := loghelper.ParseLogLevel(config.LogLevel) + if err != nil { + c.logger.Error("unknown log level found on reload", "level", config.LogLevel) + goto RUNRELOADFUNCS + } + core.SetLogLevel(level) + } + + RUNRELOADFUNCS: + if err := c.Reload(c.reloadFuncsLock, c.reloadFuncs, c.flagConfigs, core); err != nil { + c.UI.Error(fmt.Sprintf("Error(s) were encountered during reload: %s", err)) + } + + // Reload license file + if err = vault.LicenseReload(core); err != nil { + c.UI.Error(err.Error()) + } + + if err := core.ReloadCensus(); err != nil { + c.UI.Error(err.Error()) + } + select { + case c.licenseReloadedCh <- err: + default: + } + + // Let the managedKeyRegistry react to configuration changes (i.e. + // changes in kms_libraries) + core.ReloadManagedKeyRegistryConfig() + + // Notify systemd that the server has completed reloading config + c.notifySystemd(systemd.SdNotifyReady) + + case <-c.SigUSR2Ch: + logWriter := c.logger.StandardWriter(&hclog.StandardLoggerOptions{}) + pprof.Lookup("goroutine").WriteTo(logWriter, 2) + + if os.Getenv("VAULT_STACKTRACE_WRITE_TO_FILE") != "" { + c.logger.Info("Writing stacktrace to file") + + dir := "" + path := os.Getenv("VAULT_STACKTRACE_FILE_PATH") + if path != "" { + if _, err := os.Stat(path); err != nil { + c.logger.Error("Checking stacktrace path failed", "error", err) + continue + } + dir = path + } else { + dir, err = os.MkdirTemp("", "vault-stacktrace") + if err != nil { + c.logger.Error("Could not create temporary directory for stacktrace", "error", err) + continue + } + } + + f, err := os.CreateTemp(dir, "stacktrace") + if err != nil { + c.logger.Error("Could not create stacktrace file", "error", err) + continue + } + + if err := pprof.Lookup("goroutine").WriteTo(f, 2); err != nil { + f.Close() + c.logger.Error("Could not write stacktrace to file", "error", err) + continue + } + + c.logger.Info(fmt.Sprintf("Wrote stacktrace to: %s", f.Name())) + f.Close() + } + + // We can only get pprof outputs via the API but sometimes Vault can get + // into a state where it cannot process requests so we can get pprof outputs + // via SIGUSR2. + if os.Getenv("VAULT_PPROF_WRITE_TO_FILE") != "" { + dir := "" + path := os.Getenv("VAULT_PPROF_FILE_PATH") + if path != "" { + if _, err := os.Stat(path); err != nil { + c.logger.Error("Checking pprof path failed", "error", err) + continue + } + dir = path + } else { + dir, err = os.MkdirTemp("", "vault-pprof") + if err != nil { + c.logger.Error("Could not create temporary directory for pprof", "error", err) + continue + } + } + + dumps := []string{"goroutine", "heap", "allocs", "threadcreate"} + for _, dump := range dumps { + pFile, err := os.Create(filepath.Join(dir, dump)) + if err != nil { + c.logger.Error("error creating pprof file", "name", dump, "error", err) + break + } + + err = pprof.Lookup(dump).WriteTo(pFile, 0) + if err != nil { + c.logger.Error("error generating pprof data", "name", dump, "error", err) + pFile.Close() + break + } + pFile.Close() + } + + c.logger.Info(fmt.Sprintf("Wrote pprof files to: %s", dir)) + } + } + } + // Notify systemd that the server is shutting down + c.notifySystemd(systemd.SdNotifyStopping) + + // Stop the listeners so that we don't process further client requests. + c.cleanupGuard.Do(listenerCloseFunc) + + if hcpLink != nil { + if err := hcpLink.Shutdown(); err != nil { + c.UI.Error(fmt.Sprintf("Error with HCP Link shutdown: %v", err.Error())) + } + } + + // Finalize will wait until after Vault is sealed, which means the + // request forwarding listeners will also be closed (and also + // waited for). + if err := core.Shutdown(); err != nil { + c.UI.Error(fmt.Sprintf("Error with core shutdown: %s", err)) + } + + // Wait for dependent goroutines to complete + c.WaitGroup.Wait() + return retCode +} + +// configureLogging takes the configuration and attempts to parse config values into 'log' friendly configuration values +// If all goes to plan, a logger is created and setup. +func (c *ServerCommand) configureLogging(config *server.Config) (hclog.InterceptLogger, error) { + // Parse all the log related config + logLevel, err := loghelper.ParseLogLevel(config.LogLevel) + if err != nil { + return nil, err + } + + logFormat, err := loghelper.ParseLogFormat(config.LogFormat) + if err != nil { + return nil, err + } + + logRotateDuration, err := parseutil.ParseDurationSecond(config.LogRotateDuration) + if err != nil { + return nil, err + } + + logCfg, err := loghelper.NewLogConfig("vault") + if err != nil { + return nil, err + } + logCfg.LogLevel = logLevel + logCfg.LogFormat = logFormat + logCfg.LogFilePath = config.LogFile + logCfg.LogRotateDuration = logRotateDuration + logCfg.LogRotateBytes = config.LogRotateBytes + logCfg.LogRotateMaxFiles = config.LogRotateMaxFiles + + return loghelper.Setup(logCfg, c.logWriter) +} + +func (c *ServerCommand) reloadHCPLink(hcpLinkVault *hcp_link.HCPLinkVault, conf *server.Config, core *vault.Core, hcpLogger hclog.Logger) (*hcp_link.HCPLinkVault, error) { + // trigger a shutdown + if hcpLinkVault != nil { + err := hcpLinkVault.Shutdown() + if err != nil { + return nil, err + } + } + + if conf.HCPLinkConf == nil { + // if cloud stanza is not configured, we should not show anything + // in the seal-status related to HCP link + core.SetHCPLinkStatus("", "") + return nil, nil + } + + // starting HCP link + hcpLink, err := hcp_link.NewHCPLink(conf.HCPLinkConf, core, hcpLogger) + if err != nil { + return nil, fmt.Errorf("failed to restart HCP Link and it is no longer running, %w", err) + } + + return hcpLink, nil +} + +func (c *ServerCommand) notifySystemd(status string) { + sent, err := systemd.SdNotify(false, status) + if err != nil { + c.logger.Error("error notifying systemd", "error", err) + } else { + if sent { + c.logger.Debug("sent systemd notification", "notification", status) + } else { + c.logger.Debug("would have sent systemd notification (systemd not present)", "notification", status) + } + } +} + +func (c *ServerCommand) enableDev(core *vault.Core, coreConfig *vault.CoreConfig) (*vault.InitResult, error) { + ctx := namespace.ContextWithNamespace(context.Background(), namespace.RootNamespace) + + var recoveryConfig *vault.SealConfig + barrierConfig := &vault.SealConfig{ + SecretShares: 1, + SecretThreshold: 1, + } + + if core.SealAccess().RecoveryKeySupported() { + recoveryConfig = &vault.SealConfig{ + SecretShares: 1, + SecretThreshold: 1, + } + } + + if core.SealAccess().StoredKeysSupported() != vaultseal.StoredKeysNotSupported { + barrierConfig.StoredShares = 1 + } + + // Initialize it with a basic single key + init, err := core.Initialize(ctx, &vault.InitParams{ + BarrierConfig: barrierConfig, + RecoveryConfig: recoveryConfig, + }) + if err != nil { + return nil, err + } + + // Handle unseal with stored keys + if core.SealAccess().StoredKeysSupported() == vaultseal.StoredKeysSupportedGeneric { + err := core.UnsealWithStoredKeys(ctx) + if err != nil { + return nil, err + } + } else { + // Copy the key so that it can be zeroed + key := make([]byte, len(init.SecretShares[0])) + copy(key, init.SecretShares[0]) + + // Unseal the core + unsealed, err := core.Unseal(key) + if err != nil { + return nil, err + } + if !unsealed { + return nil, fmt.Errorf("failed to unseal Vault for dev mode") + } + } + + isLeader, _, _, err := core.Leader() + if err != nil && err != vault.ErrHANotEnabled { + return nil, fmt.Errorf("failed to check active status: %w", err) + } + if err == nil { + leaderCount := 5 + for !isLeader { + if leaderCount == 0 { + buf := make([]byte, 1<<16) + runtime.Stack(buf, true) + return nil, fmt.Errorf("failed to get active status after five seconds; call stack is\n%s", buf) + } + time.Sleep(1 * time.Second) + isLeader, _, _, err = core.Leader() + if err != nil { + return nil, fmt.Errorf("failed to check active status: %w", err) + } + leaderCount-- + } + } + + // Generate a dev root token if one is provided in the flag + if coreConfig.DevToken != "" { + req := &logical.Request{ + ID: "dev-gen-root", + Operation: logical.UpdateOperation, + ClientToken: init.RootToken, + Path: "auth/token/create", + Data: map[string]interface{}{ + "id": coreConfig.DevToken, + "policies": []string{"root"}, + "no_parent": true, + "no_default_policy": true, + }, + } + resp, err := core.HandleRequest(ctx, req) + if err != nil { + return nil, fmt.Errorf("failed to create root token with ID %q: %w", coreConfig.DevToken, err) + } + if resp == nil { + return nil, fmt.Errorf("nil response when creating root token with ID %q", coreConfig.DevToken) + } + if resp.Auth == nil { + return nil, fmt.Errorf("nil auth when creating root token with ID %q", coreConfig.DevToken) + } + + init.RootToken = resp.Auth.ClientToken + + req.ID = "dev-revoke-init-root" + req.Path = "auth/token/revoke-self" + req.Data = nil + _, err = core.HandleRequest(ctx, req) + if err != nil { + return nil, fmt.Errorf("failed to revoke initial root token: %w", err) + } + } + + // Set the token + if !c.flagDevNoStoreToken { + tokenHelper, err := c.TokenHelper() + if err != nil { + return nil, err + } + if err := tokenHelper.Store(init.RootToken); err != nil { + return nil, err + } + } + + kvVer := "2" + if c.flagDevKVV1 || c.flagDevLeasedKV { + kvVer = "1" + } + req := &logical.Request{ + Operation: logical.UpdateOperation, + ClientToken: init.RootToken, + Path: "sys/mounts/secret", + Data: map[string]interface{}{ + "type": "kv", + "path": "secret/", + "description": "key/value secret storage", + "options": map[string]string{ + "version": kvVer, + }, + }, + } + resp, err := core.HandleRequest(ctx, req) + if err != nil { + return nil, fmt.Errorf("error creating default K/V store: %w", err) + } + if resp.IsError() { + return nil, fmt.Errorf("failed to create default K/V store: %w", resp.Error()) + } + + return init, nil +} + +func (c *ServerCommand) enableThreeNodeDevCluster(base *vault.CoreConfig, info map[string]string, infoKeys []string, devListenAddress, tempDir string) int { + conf, opts := teststorage.ClusterSetup(base, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + BaseListenAddress: c.flagDevListenAddr, + Logger: c.logger, + TempDir: tempDir, + DefaultHandlerProperties: vault.HandlerProperties{ + ListenerConfig: &configutil.Listener{ + Profiling: configutil.ListenerProfiling{ + UnauthenticatedPProfAccess: true, + }, + Telemetry: configutil.ListenerTelemetry{ + UnauthenticatedMetricsAccess: true, + }, + }, + }, + }, nil) + testCluster := vault.NewTestCluster(&testing.RuntimeT{}, conf, opts) + defer c.cleanupGuard.Do(testCluster.Cleanup) + + if constants.IsEnterprise { + err := testcluster.WaitForActiveNodeAndPerfStandbys(context.Background(), testCluster) + if err != nil { + c.UI.Error(fmt.Sprintf("perf standbys didn't become ready: %v", err)) + return 1 + } + } + + info["cluster parameters path"] = testCluster.TempDir + infoKeys = append(infoKeys, "cluster parameters path") + + for i, core := range testCluster.Cores { + info[fmt.Sprintf("node %d api address", i)] = fmt.Sprintf("https://%s", core.Listeners[0].Address.String()) + infoKeys = append(infoKeys, fmt.Sprintf("node %d api address", i)) + } + + infoKeys = append(infoKeys, "version") + verInfo := version.GetVersion() + info["version"] = verInfo.FullVersionNumber(false) + if verInfo.Revision != "" { + info["version sha"] = strings.Trim(verInfo.Revision, "'") + infoKeys = append(infoKeys, "version sha") + } + + infoKeys = append(infoKeys, "cgo") + info["cgo"] = "disabled" + if version.CgoEnabled { + info["cgo"] = "enabled" + } + + infoKeys = append(infoKeys, "go version") + info["go version"] = runtime.Version() + + fipsStatus := getFIPSInfoKey() + if fipsStatus != "" { + infoKeys = append(infoKeys, "fips") + info["fips"] = fipsStatus + } + + // Server configuration output + padding := 24 + + sort.Strings(infoKeys) + c.UI.Output("==> Vault server configuration:\n") + + for _, k := range infoKeys { + c.UI.Output(fmt.Sprintf( + "%s%s: %s", + strings.Repeat(" ", padding-len(k)), + strings.Title(k), + info[k])) + } + + c.UI.Output("") + + for _, core := range testCluster.Cores { + core.Server.Handler = vaulthttp.Handler.Handler(&vault.HandlerProperties{ + Core: core.Core, + }) + core.SetClusterHandler(core.Server.Handler) + } + + testCluster.Start() + + ctx := namespace.ContextWithNamespace(context.Background(), namespace.RootNamespace) + + if base.DevToken != "" { + req := &logical.Request{ + ID: "dev-gen-root", + Operation: logical.UpdateOperation, + ClientToken: testCluster.RootToken, + Path: "auth/token/create", + Data: map[string]interface{}{ + "id": base.DevToken, + "policies": []string{"root"}, + "no_parent": true, + "no_default_policy": true, + }, + } + resp, err := testCluster.Cores[0].HandleRequest(ctx, req) + if err != nil { + c.UI.Error(fmt.Sprintf("failed to create root token with ID %s: %s", base.DevToken, err)) + return 1 + } + if resp == nil { + c.UI.Error(fmt.Sprintf("nil response when creating root token with ID %s", base.DevToken)) + return 1 + } + if resp.Auth == nil { + c.UI.Error(fmt.Sprintf("nil auth when creating root token with ID %s", base.DevToken)) + return 1 + } + + testCluster.RootToken = resp.Auth.ClientToken + + req.ID = "dev-revoke-init-root" + req.Path = "auth/token/revoke-self" + req.Data = nil + _, err = testCluster.Cores[0].HandleRequest(ctx, req) + if err != nil { + c.UI.Output(fmt.Sprintf("failed to revoke initial root token: %s", err)) + return 1 + } + } + + // Set the token + tokenHelper, err := c.TokenHelper() + if err != nil { + c.UI.Error(fmt.Sprintf("Error getting token helper: %s", err)) + return 1 + } + if err := tokenHelper.Store(testCluster.RootToken); err != nil { + c.UI.Error(fmt.Sprintf("Error storing in token helper: %s", err)) + return 1 + } + + if err := ioutil.WriteFile(filepath.Join(testCluster.TempDir, "root_token"), []byte(testCluster.RootToken), 0o600); err != nil { + c.UI.Error(fmt.Sprintf("Error writing token to tempfile: %s", err)) + return 1 + } + + c.UI.Output(fmt.Sprintf( + "==> Three node dev mode is enabled\n\n" + + "The unseal key and root token are reproduced below in case you\n" + + "want to seal/unseal the Vault or play with authentication.\n", + )) + + for i, key := range testCluster.BarrierKeys { + c.UI.Output(fmt.Sprintf( + "Unseal Key %d: %s", + i+1, base64.StdEncoding.EncodeToString(key), + )) + } + + c.UI.Output(fmt.Sprintf( + "\nRoot Token: %s\n", testCluster.RootToken, + )) + + c.UI.Output(fmt.Sprintf( + "\nUseful env vars:\n"+ + "VAULT_TOKEN=%s\n"+ + "VAULT_ADDR=%s\n"+ + "VAULT_CACERT=%s/ca_cert.pem\n", + testCluster.RootToken, + testCluster.Cores[0].Client.Address(), + testCluster.TempDir, + )) + + if c.flagDevClusterJson != "" { + clusterJson := testcluster.ClusterJson{ + Nodes: []testcluster.ClusterNode{}, + CACertPath: filepath.Join(testCluster.TempDir, "ca_cert.pem"), + RootToken: testCluster.RootToken, + } + for _, core := range testCluster.Cores { + clusterJson.Nodes = append(clusterJson.Nodes, testcluster.ClusterNode{ + APIAddress: core.Client.Address(), + }) + } + b, err := jsonutil.EncodeJSON(clusterJson) + if err != nil { + c.UI.Error(fmt.Sprintf("Error encoding cluster.json: %s", err)) + return 1 + } + err = os.WriteFile(c.flagDevClusterJson, b, 0o600) + if err != nil { + c.UI.Error(fmt.Sprintf("Error writing cluster.json %q: %s", c.flagDevClusterJson, err)) + return 1 + } + } + + // Output the header that the server has started + c.UI.Output("==> Vault server started! Log data will stream in below:\n") + + // Inform any tests that the server is ready + select { + case c.startedCh <- struct{}{}: + default: + } + + // Release the log gate. + c.flushLog() + + // Wait for shutdown + shutdownTriggered := false + + for !shutdownTriggered { + select { + case <-c.ShutdownCh: + c.UI.Output("==> Vault shutdown triggered") + + // Stop the listeners so that we don't process further client requests. + c.cleanupGuard.Do(testCluster.Cleanup) + + // Finalize will wait until after Vault is sealed, which means the + // request forwarding listeners will also be closed (and also + // waited for). + for _, core := range testCluster.Cores { + if err := core.Shutdown(); err != nil { + c.UI.Error(fmt.Sprintf("Error with core shutdown: %s", err)) + } + } + + shutdownTriggered = true + + case <-c.SighupCh: + c.UI.Output("==> Vault reload triggered") + for _, core := range testCluster.Cores { + if err := c.Reload(core.ReloadFuncsLock, core.ReloadFuncs, nil, core.Core); err != nil { + c.UI.Error(fmt.Sprintf("Error(s) were encountered during reload: %s", err)) + } + } + } + } + + return 0 +} + +// addPlugin adds any plugins to the catalog +func (c *ServerCommand) addPlugin(path, token string, core *vault.Core) error { + // Get the sha256 of the file at the given path. + pluginSum := func(p string) (string, error) { + hasher := sha256.New() + f, err := os.Open(p) + if err != nil { + return "", err + } + defer f.Close() + if _, err := io.Copy(hasher, f); err != nil { + return "", err + } + return hex.EncodeToString(hasher.Sum(nil)), nil + } + + // Mount any test plugins. We do this explicitly before we inform tests of + // a completely booted server intentionally. + sha256sum, err := pluginSum(path) + if err != nil { + return err + } + + // Default the name to the basename of the binary + name := filepath.Base(path) + + // File a request against core to enable the plugin + req := &logical.Request{ + Operation: logical.UpdateOperation, + ClientToken: token, + Path: fmt.Sprintf("sys/plugins/catalog/%s", name), + Data: map[string]interface{}{ + "sha256": sha256sum, + "command": name, + }, + } + ctx := namespace.ContextWithNamespace(context.Background(), namespace.RootNamespace) + if _, err := core.HandleRequest(ctx, req); err != nil { + return err + } + + return nil +} + +// detectRedirect is used to attempt redirect address detection +func (c *ServerCommand) detectRedirect(detect physical.RedirectDetect, + config *server.Config, +) (string, error) { + // Get the hostname + host, err := detect.DetectHostAddr() + if err != nil { + return "", err + } + + // set [] for ipv6 addresses + if strings.Contains(host, ":") && !strings.Contains(host, "]") { + host = "[" + host + "]" + } + + // Default the port and scheme + scheme := "https" + port := 8200 + + // Attempt to detect overrides + for _, list := range config.Listeners { + // Only attempt TCP + if list.Type != "tcp" { + continue + } + + // Check if TLS is disabled + if list.TLSDisable { + scheme = "http" + } + + // Check for address override + addr := list.Address + if addr == "" { + addr = "127.0.0.1:8200" + } + + // Check for localhost + hostStr, portStr, err := net.SplitHostPort(addr) + if err != nil { + continue + } + if hostStr == "127.0.0.1" { + host = hostStr + } + + // Check for custom port + listPort, err := strconv.Atoi(portStr) + if err != nil { + continue + } + port = listPort + } + + // Build a URL + url := &url.URL{ + Scheme: scheme, + Host: fmt.Sprintf("%s:%d", host, port), + } + + // Return the URL string + return url.String(), nil +} + +func (c *ServerCommand) Reload(lock *sync.RWMutex, reloadFuncs *map[string][]reloadutil.ReloadFunc, configPath []string, core *vault.Core) error { + lock.RLock() + defer lock.RUnlock() + + var reloadErrors *multierror.Error + + for k, relFuncs := range *reloadFuncs { + switch { + case strings.HasPrefix(k, "listener|"): + for _, relFunc := range relFuncs { + if relFunc != nil { + if err := relFunc(); err != nil { + reloadErrors = multierror.Append(reloadErrors, fmt.Errorf("error encountered reloading listener: %w", err)) + } + } + } + + case strings.HasPrefix(k, "audit_file|"): + for _, relFunc := range relFuncs { + if relFunc != nil { + if err := relFunc(); err != nil { + reloadErrors = multierror.Append(reloadErrors, fmt.Errorf("error encountered reloading file audit device at path %q: %w", strings.TrimPrefix(k, "audit_file|"), err)) + } + } + } + } + } + + // Set Introspection Endpoint to enabled with new value in the config after reload + core.ReloadIntrospectionEndpointEnabled() + + // Send a message that we reloaded. This prevents "guessing" sleep times + // in tests. + select { + case c.reloadedCh <- struct{}{}: + default: + } + + return reloadErrors.ErrorOrNil() +} + +// storePidFile is used to write out our PID to a file if necessary +func (c *ServerCommand) storePidFile(pidPath string) error { + // Quit fast if no pidfile + if pidPath == "" { + return nil + } + + // Open the PID file + pidFile, err := os.OpenFile(pidPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o600) + if err != nil { + return fmt.Errorf("could not open pid file: %w", err) + } + defer pidFile.Close() + + // Write out the PID + pid := os.Getpid() + _, err = pidFile.WriteString(fmt.Sprintf("%d", pid)) + if err != nil { + return fmt.Errorf("could not write to pid file: %w", err) + } + return nil +} + +// removePidFile is used to cleanup the PID file if necessary +func (c *ServerCommand) removePidFile(pidPath string) error { + if pidPath == "" { + return nil + } + return os.Remove(pidPath) +} + +// storageMigrationActive checks and warns against in-progress storage migrations. +// This function will block until storage is available. +func (c *ServerCommand) storageMigrationActive(backend physical.Backend) bool { + first := true + + for { + migrationStatus, err := CheckStorageMigration(backend) + if err == nil { + if migrationStatus != nil { + startTime := migrationStatus.Start.Format(time.RFC3339) + c.UI.Error(wrapAtLength(fmt.Sprintf("ERROR! Storage migration in progress (started: %s). "+ + "Server startup is prevented until the migration completes. Use 'vault operator migrate -reset' "+ + "to force clear the migration lock.", startTime))) + return true + } + return false + } + if first { + first = false + c.UI.Warn("\nWARNING! Unable to read storage migration status.") + + // unexpected state, so stop buffering log messages + c.flushLog() + } + c.logger.Warn("storage migration check error", "error", err.Error()) + + timer := time.NewTimer(2 * time.Second) + select { + case <-timer.C: + case <-c.ShutdownCh: + timer.Stop() + return true + } + } +} + +type StorageMigrationStatus struct { + Start time.Time `json:"start"` +} + +func CheckStorageMigration(b physical.Backend) (*StorageMigrationStatus, error) { + entry, err := b.Get(context.Background(), storageMigrationLock) + if err != nil { + return nil, err + } + + if entry == nil { + return nil, nil + } + + var status StorageMigrationStatus + if err := jsonutil.DecodeJSON(entry.Value, &status); err != nil { + return nil, err + } + + return &status, nil +} + +// setSeal return barrierSeal, barrierWrapper, unwrapSeal, and all the created seals from the configs so we can close them in Run +// The two errors are the sealConfigError and the regular error +func setSeal(c *ServerCommand, config *server.Config, infoKeys []string, info map[string]string) (vault.Seal, wrapping.Wrapper, vault.Seal, []vault.Seal, error, error) { + var barrierSeal vault.Seal + var unwrapSeal vault.Seal + + var sealConfigError error + var wrapper wrapping.Wrapper + var barrierWrapper wrapping.Wrapper + if c.flagDevAutoSeal { + var err error + access, _ := vaultseal.NewTestSeal(nil) + barrierSeal, err = vault.NewAutoSeal(access) + if err != nil { + return nil, nil, nil, nil, nil, err + } + return barrierSeal, nil, nil, nil, nil, nil + } + + // Handle the case where no seal is provided + switch len(config.Seals) { + case 0: + config.Seals = append(config.Seals, &configutil.KMS{Type: wrapping.WrapperTypeShamir.String()}) + case 1: + // If there's only one seal and it's disabled assume they want to + // migrate to a shamir seal and simply didn't provide it + if config.Seals[0].Disabled { + config.Seals = append(config.Seals, &configutil.KMS{Type: wrapping.WrapperTypeShamir.String()}) + } + } + var createdSeals []vault.Seal = make([]vault.Seal, len(config.Seals)) + for _, configSeal := range config.Seals { + sealType := wrapping.WrapperTypeShamir.String() + if !configSeal.Disabled && os.Getenv("VAULT_SEAL_TYPE") != "" { + sealType = os.Getenv("VAULT_SEAL_TYPE") + configSeal.Type = sealType + } else { + sealType = configSeal.Type + } + + var seal vault.Seal + sealLogger := c.logger.ResetNamed(fmt.Sprintf("seal.%s", sealType)) + c.allLoggers = append(c.allLoggers, sealLogger) + defaultSeal := vault.NewDefaultSeal(vaultseal.NewAccess(aeadwrapper.NewShamirWrapper())) + var sealInfoKeys []string + sealInfoMap := map[string]string{} + wrapper, sealConfigError = configutil.ConfigureWrapper(configSeal, &sealInfoKeys, &sealInfoMap, sealLogger) + if sealConfigError != nil { + if !errwrap.ContainsType(sealConfigError, new(logical.KeyNotFoundError)) { + return barrierSeal, barrierWrapper, unwrapSeal, createdSeals, sealConfigError, fmt.Errorf( + "Error parsing Seal configuration: %s", sealConfigError) + } + } + if wrapper == nil { + seal = defaultSeal + } else { + var err error + seal, err = vault.NewAutoSeal(vaultseal.NewAccess(wrapper)) + if err != nil { + return nil, nil, nil, nil, nil, err + } + } + infoPrefix := "" + if configSeal.Disabled { + unwrapSeal = seal + infoPrefix = "Old " + } else { + barrierSeal = seal + barrierWrapper = wrapper + } + for _, k := range sealInfoKeys { + infoKeys = append(infoKeys, infoPrefix+k) + info[infoPrefix+k] = sealInfoMap[k] + } + createdSeals = append(createdSeals, seal) + } + return barrierSeal, barrierWrapper, unwrapSeal, createdSeals, sealConfigError, nil +} + +func initHaBackend(c *ServerCommand, config *server.Config, coreConfig *vault.CoreConfig, backend physical.Backend) (bool, error) { + // Initialize the separate HA storage backend, if it exists + var ok bool + if config.HAStorage != nil { + if config.Storage.Type == storageTypeRaft && config.HAStorage.Type == storageTypeRaft { + return false, fmt.Errorf("Raft cannot be set both as 'storage' and 'ha_storage'. Setting 'storage' to 'raft' will automatically set it up for HA operations as well") + } + + if config.Storage.Type == storageTypeRaft { + return false, fmt.Errorf("HA storage cannot be declared when Raft is the storage type") + } + + factory, exists := c.PhysicalBackends[config.HAStorage.Type] + if !exists { + return false, fmt.Errorf("Unknown HA storage type %s", config.HAStorage.Type) + } + + namedHALogger := c.logger.Named("ha." + config.HAStorage.Type) + c.allLoggers = append(c.allLoggers, namedHALogger) + habackend, err := factory(config.HAStorage.Config, namedHALogger) + if err != nil { + return false, fmt.Errorf("Error initializing HA storage of type %s: %s", config.HAStorage.Type, err) + } + + if coreConfig.HAPhysical, ok = habackend.(physical.HABackend); !ok { + return false, fmt.Errorf("Specified HA storage does not support HA") + } + + if !coreConfig.HAPhysical.HAEnabled() { + return false, fmt.Errorf("Specified HA storage has HA support disabled; please consult documentation") + } + + coreConfig.RedirectAddr = config.HAStorage.RedirectAddr + disableClustering := config.HAStorage.DisableClustering + + if config.HAStorage.Type == storageTypeRaft && disableClustering { + return disableClustering, fmt.Errorf("Disable clustering cannot be set to true when Raft is the HA storage type") + } + + if !disableClustering { + coreConfig.ClusterAddr = config.HAStorage.ClusterAddr + } + } else { + if coreConfig.HAPhysical, ok = backend.(physical.HABackend); ok { + coreConfig.RedirectAddr = config.Storage.RedirectAddr + disableClustering := config.Storage.DisableClustering + + if (config.Storage.Type == storageTypeRaft) && disableClustering { + return disableClustering, fmt.Errorf("Disable clustering cannot be set to true when Raft is the storage type") + } + + if !disableClustering { + coreConfig.ClusterAddr = config.Storage.ClusterAddr + } + } + } + return config.DisableClustering, nil +} + +func determineRedirectAddr(c *ServerCommand, coreConfig *vault.CoreConfig, config *server.Config) error { + var retErr error + if envRA := os.Getenv("VAULT_API_ADDR"); envRA != "" { + coreConfig.RedirectAddr = envRA + } else if envRA := os.Getenv("VAULT_REDIRECT_ADDR"); envRA != "" { + coreConfig.RedirectAddr = envRA + } else if envAA := os.Getenv("VAULT_ADVERTISE_ADDR"); envAA != "" { + coreConfig.RedirectAddr = envAA + } + + // Attempt to detect the redirect address, if possible + if coreConfig.RedirectAddr == "" { + c.logger.Warn("no `api_addr` value specified in config or in VAULT_API_ADDR; falling back to detection if possible, but this value should be manually set") + } + + var ok bool + var detect physical.RedirectDetect + if coreConfig.HAPhysical != nil && coreConfig.HAPhysical.HAEnabled() { + detect, ok = coreConfig.HAPhysical.(physical.RedirectDetect) + } else { + detect, ok = coreConfig.Physical.(physical.RedirectDetect) + } + if ok && coreConfig.RedirectAddr == "" { + redirect, err := c.detectRedirect(detect, config) + // the following errors did not cause Run to return, so I'm not returning these + // as errors. + if err != nil { + retErr = fmt.Errorf("Error detecting api address: %s", err) + } else if redirect == "" { + retErr = fmt.Errorf("Failed to detect api address") + } else { + coreConfig.RedirectAddr = redirect + } + } + if coreConfig.RedirectAddr == "" && c.flagDev { + protocol := "http" + if c.flagDevTLS { + protocol = "https" + } + coreConfig.RedirectAddr = fmt.Sprintf("%s://%s", protocol, config.Listeners[0].Address) + } + return retErr +} + +func findClusterAddress(c *ServerCommand, coreConfig *vault.CoreConfig, config *server.Config, disableClustering bool) error { + if disableClustering { + coreConfig.ClusterAddr = "" + } else if envCA := os.Getenv("VAULT_CLUSTER_ADDR"); envCA != "" { + coreConfig.ClusterAddr = envCA + } else { + var addrToUse string + switch { + case coreConfig.ClusterAddr == "" && coreConfig.RedirectAddr != "": + addrToUse = coreConfig.RedirectAddr + case c.flagDev: + addrToUse = fmt.Sprintf("http://%s", config.Listeners[0].Address) + default: + goto CLUSTER_SYNTHESIS_COMPLETE + } + u, err := url.ParseRequestURI(addrToUse) + if err != nil { + return fmt.Errorf("Error parsing synthesized cluster address %s: %v", addrToUse, err) + } + host, port, err := net.SplitHostPort(u.Host) + if err != nil { + // This sucks, as it's a const in the function but not exported in the package + if strings.Contains(err.Error(), "missing port in address") { + host = u.Host + port = "443" + } else { + return fmt.Errorf("Error parsing api address: %v", err) + } + } + nPort, err := strconv.Atoi(port) + if err != nil { + return fmt.Errorf("Error parsing synthesized address; failed to convert %q to a numeric: %v", port, err) + } + u.Host = net.JoinHostPort(host, strconv.Itoa(nPort+1)) + // Will always be TLS-secured + u.Scheme = "https" + coreConfig.ClusterAddr = u.String() + } + +CLUSTER_SYNTHESIS_COMPLETE: + + if coreConfig.RedirectAddr == coreConfig.ClusterAddr && len(coreConfig.RedirectAddr) != 0 { + return fmt.Errorf("Address %q used for both API and cluster addresses", coreConfig.RedirectAddr) + } + + if coreConfig.ClusterAddr != "" { + rendered, err := configutil.ParseSingleIPTemplate(coreConfig.ClusterAddr) + if err != nil { + return fmt.Errorf("Error parsing cluster address %s: %v", coreConfig.ClusterAddr, err) + } + coreConfig.ClusterAddr = rendered + // Force https as we'll always be TLS-secured + u, err := url.ParseRequestURI(coreConfig.ClusterAddr) + if err != nil { + return fmt.Errorf("Error parsing cluster address %s: %v", coreConfig.ClusterAddr, err) + } + u.Scheme = "https" + coreConfig.ClusterAddr = u.String() + } + return nil +} + +func runUnseal(c *ServerCommand, core *vault.Core, ctx context.Context) { + for { + err := core.UnsealWithStoredKeys(ctx) + if err == nil { + return + } + + if vault.IsFatalError(err) { + c.logger.Error("error unsealing core", "error", err) + return + } + c.logger.Warn("failed to unseal core", "error", err) + + timer := time.NewTimer(5 * time.Second) + select { + case <-c.ShutdownCh: + timer.Stop() + return + case <-timer.C: + } + } +} + +func createCoreConfig(c *ServerCommand, config *server.Config, backend physical.Backend, configSR sr.ServiceRegistration, barrierSeal, unwrapSeal vault.Seal, + metricsHelper *metricsutil.MetricsHelper, metricSink *metricsutil.ClusterMetricSink, secureRandomReader io.Reader, +) vault.CoreConfig { + coreConfig := &vault.CoreConfig{ + RawConfig: config, + Physical: backend, + RedirectAddr: config.Storage.RedirectAddr, + StorageType: config.Storage.Type, + HAPhysical: nil, + ServiceRegistration: configSR, + Seal: barrierSeal, + UnwrapSeal: unwrapSeal, + AuditBackends: c.AuditBackends, + CredentialBackends: c.CredentialBackends, + LogicalBackends: c.LogicalBackends, + LogLevel: config.LogLevel, + Logger: c.logger, + DetectDeadlocks: config.DetectDeadlocks, + ImpreciseLeaseRoleTracking: config.ImpreciseLeaseRoleTracking, + DisableSentinelTrace: config.DisableSentinelTrace, + DisableCache: config.DisableCache, + DisableMlock: config.DisableMlock, + MaxLeaseTTL: config.MaxLeaseTTL, + DefaultLeaseTTL: config.DefaultLeaseTTL, + ClusterName: config.ClusterName, + CacheSize: config.CacheSize, + PluginDirectory: config.PluginDirectory, + PluginFileUid: config.PluginFileUid, + PluginFilePermissions: config.PluginFilePermissions, + EnableUI: config.EnableUI, + EnableRaw: config.EnableRawEndpoint, + EnableIntrospection: config.EnableIntrospectionEndpoint, + DisableSealWrap: config.DisableSealWrap, + DisablePerformanceStandby: config.DisablePerformanceStandby, + DisableIndexing: config.DisableIndexing, + AllLoggers: c.allLoggers, + BuiltinRegistry: builtinplugins.Registry, + DisableKeyEncodingChecks: config.DisablePrintableCheck, + MetricsHelper: metricsHelper, + MetricSink: metricSink, + SecureRandomReader: secureRandomReader, + EnableResponseHeaderHostname: config.EnableResponseHeaderHostname, + EnableResponseHeaderRaftNodeID: config.EnableResponseHeaderRaftNodeID, + License: config.License, + LicensePath: config.LicensePath, + DisableSSCTokens: config.DisableSSCTokens, + Experiments: config.Experiments, + AdministrativeNamespacePath: config.AdministrativeNamespacePath, + } + + if c.flagDev { + coreConfig.EnableRaw = true + coreConfig.EnableIntrospection = true + coreConfig.DevToken = c.flagDevRootTokenID + if c.flagDevLeasedKV { + coreConfig.LogicalBackends["kv"] = vault.LeasedPassthroughBackendFactory + } + if c.flagDevPluginDir != "" { + coreConfig.PluginDirectory = c.flagDevPluginDir + } + if c.flagDevLatency > 0 { + injectLatency := time.Duration(c.flagDevLatency) * time.Millisecond + if _, txnOK := backend.(physical.Transactional); txnOK { + coreConfig.Physical = physical.NewTransactionalLatencyInjector(backend, injectLatency, c.flagDevLatencyJitter, c.logger) + } else { + coreConfig.Physical = physical.NewLatencyInjector(backend, injectLatency, c.flagDevLatencyJitter, c.logger) + } + } + } + return *coreConfig +} + +func runListeners(c *ServerCommand, coreConfig *vault.CoreConfig, config *server.Config, configSR sr.ServiceRegistration) error { + if sd := coreConfig.GetServiceRegistration(); sd != nil { + if err := configSR.Run(c.ShutdownCh, c.WaitGroup, coreConfig.RedirectAddr); err != nil { + return fmt.Errorf("Error running service_registration of type %s: %s", config.ServiceRegistration.Type, err) + } + } + return nil +} + +func initDevCore(c *ServerCommand, coreConfig *vault.CoreConfig, config *server.Config, core *vault.Core, certDir string, clusterJSON *testcluster.ClusterJson) error { + if c.flagDev && !c.flagDevSkipInit { + + init, err := c.enableDev(core, coreConfig) + if err != nil { + return fmt.Errorf("Error initializing Dev mode: %s", err) + } + + if clusterJSON != nil { + clusterJSON.RootToken = init.RootToken + } + + var plugins, pluginsNotLoaded []string + if c.flagDevPluginDir != "" && c.flagDevPluginInit { + + f, err := os.Open(c.flagDevPluginDir) + if err != nil { + return fmt.Errorf("Error reading plugin dir: %s", err) + } + + list, err := f.Readdirnames(0) + f.Close() + if err != nil { + return fmt.Errorf("Error listing plugins: %s", err) + } + + for _, name := range list { + path := filepath.Join(f.Name(), name) + if err := c.addPlugin(path, init.RootToken, core); err != nil { + if !errwrap.Contains(err, vault.ErrPluginBadType.Error()) { + return fmt.Errorf("Error enabling plugin %s: %s", name, err) + } + pluginsNotLoaded = append(pluginsNotLoaded, name) + continue + } + plugins = append(plugins, name) + } + + sort.Strings(plugins) + } + + var qw *quiescenceSink + var qwo sync.Once + qw = &quiescenceSink{ + t: time.AfterFunc(100*time.Millisecond, func() { + qwo.Do(func() { + c.logger.DeregisterSink(qw) + + // Print the big dev mode warning! + c.UI.Warn(wrapAtLength( + "WARNING! dev mode is enabled! In this mode, Vault runs entirely " + + "in-memory and starts unsealed with a single unseal key. The root " + + "token is already authenticated to the CLI, so you can immediately " + + "begin using Vault.")) + c.UI.Warn("") + c.UI.Warn("You may need to set the following environment variables:") + c.UI.Warn("") + + protocol := "http://" + if c.flagDevTLS { + protocol = "https://" + } + + endpointURL := protocol + config.Listeners[0].Address + if runtime.GOOS == "windows" { + c.UI.Warn("PowerShell:") + c.UI.Warn(fmt.Sprintf(" $env:VAULT_ADDR=\"%s\"", endpointURL)) + c.UI.Warn("cmd.exe:") + c.UI.Warn(fmt.Sprintf(" set VAULT_ADDR=%s", endpointURL)) + } else { + c.UI.Warn(fmt.Sprintf(" $ export VAULT_ADDR='%s'", endpointURL)) + } + + if c.flagDevTLS { + if runtime.GOOS == "windows" { + c.UI.Warn("PowerShell:") + c.UI.Warn(fmt.Sprintf(" $env:VAULT_CACERT=\"%s/vault-ca.pem\"", certDir)) + c.UI.Warn("cmd.exe:") + c.UI.Warn(fmt.Sprintf(" set VAULT_CACERT=%s/vault-ca.pem", certDir)) + } else { + c.UI.Warn(fmt.Sprintf(" $ export VAULT_CACERT='%s/vault-ca.pem'", certDir)) + } + c.UI.Warn("") + } + + // Unseal key is not returned if stored shares is supported + if len(init.SecretShares) > 0 { + c.UI.Warn("") + c.UI.Warn(wrapAtLength( + "The unseal key and root token are displayed below in case you want " + + "to seal/unseal the Vault or re-authenticate.")) + c.UI.Warn("") + c.UI.Warn(fmt.Sprintf("Unseal Key: %s", base64.StdEncoding.EncodeToString(init.SecretShares[0]))) + } + + if len(init.RecoveryShares) > 0 { + c.UI.Warn("") + c.UI.Warn(wrapAtLength( + "The recovery key and root token are displayed below in case you want " + + "to seal/unseal the Vault or re-authenticate.")) + c.UI.Warn("") + c.UI.Warn(fmt.Sprintf("Recovery Key: %s", base64.StdEncoding.EncodeToString(init.RecoveryShares[0]))) + } + + c.UI.Warn(fmt.Sprintf("Root Token: %s", init.RootToken)) + + if len(plugins) > 0 { + c.UI.Warn("") + c.UI.Warn(wrapAtLength( + "The following dev plugins are registered in the catalog:")) + for _, p := range plugins { + c.UI.Warn(fmt.Sprintf(" - %s", p)) + } + } + + if len(pluginsNotLoaded) > 0 { + c.UI.Warn("") + c.UI.Warn(wrapAtLength( + "The following dev plugins FAILED to be registered in the catalog due to unknown type:")) + for _, p := range pluginsNotLoaded { + c.UI.Warn(fmt.Sprintf(" - %s", p)) + } + } + + c.UI.Warn("") + c.UI.Warn(wrapAtLength( + "Development mode should NOT be used in production installations!")) + c.UI.Warn("") + }) + }), + } + c.logger.RegisterSink(qw) + } + return nil +} + +// Initialize the HTTP servers +func startHttpServers(c *ServerCommand, core *vault.Core, config *server.Config, lns []listenerutil.Listener) error { + for _, ln := range lns { + if ln.Config == nil { + return fmt.Errorf("Found nil listener config after parsing") + } + + if err := config2.IsValidListener(ln.Config); err != nil { + return err + } + + handler := vaulthttp.Handler.Handler(&vault.HandlerProperties{ + Core: core, + ListenerConfig: ln.Config, + DisablePrintableCheck: config.DisablePrintableCheck, + RecoveryMode: c.flagRecovery, + }) + + if len(ln.Config.XForwardedForAuthorizedAddrs) > 0 { + handler = vaulthttp.WrapForwardedForHandler(handler, ln.Config) + } + + // server defaults + server := &http.Server{ + Handler: handler, + ReadHeaderTimeout: 10 * time.Second, + ReadTimeout: 30 * time.Second, + IdleTimeout: 5 * time.Minute, + ErrorLog: c.logger.StandardLogger(nil), + } + + // override server defaults with config values for read/write/idle timeouts if configured + if ln.Config.HTTPReadHeaderTimeout > 0 { + server.ReadHeaderTimeout = ln.Config.HTTPReadHeaderTimeout + } + if ln.Config.HTTPReadTimeout > 0 { + server.ReadTimeout = ln.Config.HTTPReadTimeout + } + if ln.Config.HTTPWriteTimeout > 0 { + server.WriteTimeout = ln.Config.HTTPWriteTimeout + } + if ln.Config.HTTPIdleTimeout > 0 { + server.IdleTimeout = ln.Config.HTTPIdleTimeout + } + + // server config tests can exit now + if c.flagTestServerConfig { + continue + } + + go server.Serve(ln.Listener) + } + return nil +} + +func SetStorageMigration(b physical.Backend, active bool) error { + if !active { + return b.Delete(context.Background(), storageMigrationLock) + } + + status := StorageMigrationStatus{ + Start: time.Now(), + } + + enc, err := jsonutil.EncodeJSON(status) + if err != nil { + return err + } + + entry := &physical.Entry{ + Key: storageMigrationLock, + Value: enc, + } + + return b.Put(context.Background(), entry) +} + +type grpclogFaker struct { + logger hclog.Logger + log bool +} + +func (g *grpclogFaker) Fatal(args ...interface{}) { + g.logger.Error(fmt.Sprint(args...)) + os.Exit(1) +} + +func (g *grpclogFaker) Fatalf(format string, args ...interface{}) { + g.logger.Error(fmt.Sprintf(format, args...)) + os.Exit(1) +} + +func (g *grpclogFaker) Fatalln(args ...interface{}) { + g.logger.Error(fmt.Sprintln(args...)) + os.Exit(1) +} + +func (g *grpclogFaker) Print(args ...interface{}) { + if g.log && g.logger.IsDebug() { + g.logger.Debug(fmt.Sprint(args...)) + } +} + +func (g *grpclogFaker) Printf(format string, args ...interface{}) { + if g.log && g.logger.IsDebug() { + g.logger.Debug(fmt.Sprintf(format, args...)) + } +} + +func (g *grpclogFaker) Println(args ...interface{}) { + if g.log && g.logger.IsDebug() { + g.logger.Debug(fmt.Sprintln(args...)) + } +} diff --git a/command/server/config.go b/command/server/config.go new file mode 100644 index 0000000..2075b6e --- /dev/null +++ b/command/server/config.go @@ -0,0 +1,1234 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package server + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "math" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/vault/helper/experiments" + "github.com/hashicorp/vault/helper/osutil" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/hashicorp/vault/sdk/helper/testcluster" + "github.com/mitchellh/mapstructure" +) + +const ( + VaultDevCAFilename = "vault-ca.pem" + VaultDevCertFilename = "vault-cert.pem" + VaultDevKeyFilename = "vault-key.pem" +) + +var ( + entConfigValidate = func(_ *Config, _ string) []configutil.ConfigError { + return nil + } + + // Modified internally for testing. + validExperiments = experiments.ValidExperiments() +) + +// Config is the configuration for the vault server. +type Config struct { + UnusedKeys configutil.UnusedKeyMap `hcl:",unusedKeyPositions"` + FoundKeys []string `hcl:",decodedFields"` + entConfig + + *configutil.SharedConfig `hcl:"-"` + + Storage *Storage `hcl:"-"` + HAStorage *Storage `hcl:"-"` + + ServiceRegistration *ServiceRegistration `hcl:"-"` + + Experiments []string `hcl:"experiments"` + + CacheSize int `hcl:"cache_size"` + DisableCache bool `hcl:"-"` + DisableCacheRaw interface{} `hcl:"disable_cache"` + DisablePrintableCheck bool `hcl:"-"` + DisablePrintableCheckRaw interface{} `hcl:"disable_printable_check"` + + EnableUI bool `hcl:"-"` + EnableUIRaw interface{} `hcl:"ui"` + + MaxLeaseTTL time.Duration `hcl:"-"` + MaxLeaseTTLRaw interface{} `hcl:"max_lease_ttl,alias:MaxLeaseTTL"` + DefaultLeaseTTL time.Duration `hcl:"-"` + DefaultLeaseTTLRaw interface{} `hcl:"default_lease_ttl,alias:DefaultLeaseTTL"` + + ClusterCipherSuites string `hcl:"cluster_cipher_suites"` + + PluginDirectory string `hcl:"plugin_directory"` + + PluginFileUid int `hcl:"plugin_file_uid"` + + PluginFilePermissions int `hcl:"-"` + PluginFilePermissionsRaw interface{} `hcl:"plugin_file_permissions,alias:PluginFilePermissions"` + + EnableIntrospectionEndpoint bool `hcl:"-"` + EnableIntrospectionEndpointRaw interface{} `hcl:"introspection_endpoint,alias:EnableIntrospectionEndpoint"` + + EnableRawEndpoint bool `hcl:"-"` + EnableRawEndpointRaw interface{} `hcl:"raw_storage_endpoint,alias:EnableRawEndpoint"` + + APIAddr string `hcl:"api_addr"` + ClusterAddr string `hcl:"cluster_addr"` + DisableClustering bool `hcl:"-"` + DisableClusteringRaw interface{} `hcl:"disable_clustering,alias:DisableClustering"` + + DisablePerformanceStandby bool `hcl:"-"` + DisablePerformanceStandbyRaw interface{} `hcl:"disable_performance_standby,alias:DisablePerformanceStandby"` + + DisableSealWrap bool `hcl:"-"` + DisableSealWrapRaw interface{} `hcl:"disable_sealwrap,alias:DisableSealWrap"` + + DisableIndexing bool `hcl:"-"` + DisableIndexingRaw interface{} `hcl:"disable_indexing,alias:DisableIndexing"` + + DisableSentinelTrace bool `hcl:"-"` + DisableSentinelTraceRaw interface{} `hcl:"disable_sentinel_trace,alias:DisableSentinelTrace"` + + EnableResponseHeaderHostname bool `hcl:"-"` + EnableResponseHeaderHostnameRaw interface{} `hcl:"enable_response_header_hostname"` + + LogRequestsLevel string `hcl:"-"` + LogRequestsLevelRaw interface{} `hcl:"log_requests_level"` + + DetectDeadlocks string `hcl:"detect_deadlocks"` + + ImpreciseLeaseRoleTracking bool `hcl:"imprecise_lease_role_tracking"` + + EnableResponseHeaderRaftNodeID bool `hcl:"-"` + EnableResponseHeaderRaftNodeIDRaw interface{} `hcl:"enable_response_header_raft_node_id"` + + License string `hcl:"-"` + LicensePath string `hcl:"license_path"` + DisableSSCTokens bool `hcl:"-"` +} + +const ( + sectionSeal = "Seal" +) + +func (c *Config) Validate(sourceFilePath string) []configutil.ConfigError { + results := configutil.ValidateUnusedFields(c.UnusedKeys, sourceFilePath) + if c.Telemetry != nil { + results = append(results, c.Telemetry.Validate(sourceFilePath)...) + } + if c.ServiceRegistration != nil { + results = append(results, c.ServiceRegistration.Validate(sourceFilePath)...) + } + for _, l := range c.Listeners { + results = append(results, l.Validate(sourceFilePath)...) + } + results = append(results, c.validateEnt(sourceFilePath)...) + return results +} + +func (c *Config) validateEnt(sourceFilePath string) []configutil.ConfigError { + return entConfigValidate(c, sourceFilePath) +} + +// DevConfig is a Config that is used for dev mode of Vault. +func DevConfig(storageType string) (*Config, error) { + hclStr := ` +disable_mlock = true + +listener "tcp" { + address = "127.0.0.1:8200" + tls_disable = true + proxy_protocol_behavior = "allow_authorized" + proxy_protocol_authorized_addrs = "127.0.0.1:8200" +} + +telemetry { + prometheus_retention_time = "24h" + disable_hostname = true +} +enable_raw_endpoint = true + +storage "%s" { +} + +ui = true +` + + hclStr = fmt.Sprintf(hclStr, storageType) + parsed, err := ParseConfig(hclStr, "") + if err != nil { + return nil, fmt.Errorf("error parsing dev config: %w", err) + } + return parsed, nil +} + +// DevTLSConfig is a Config that is used for dev tls mode of Vault. +func DevTLSConfig(storageType, certDir string) (*Config, error) { + ca, err := GenerateCA() + if err != nil { + return nil, err + } + + cert, key, err := GenerateCert(ca.Template, ca.Signer) + if err != nil { + return nil, err + } + + if err := os.WriteFile(fmt.Sprintf("%s/%s", certDir, VaultDevCAFilename), []byte(ca.PEM), 0o444); err != nil { + return nil, err + } + + if err := os.WriteFile(fmt.Sprintf("%s/%s", certDir, VaultDevCertFilename), []byte(cert), 0o400); err != nil { + return nil, err + } + + if err := os.WriteFile(fmt.Sprintf("%s/%s", certDir, VaultDevKeyFilename), []byte(key), 0o400); err != nil { + return nil, err + } + return parseDevTLSConfig(storageType, certDir) +} + +func parseDevTLSConfig(storageType, certDir string) (*Config, error) { + hclStr := ` +disable_mlock = true + +listener "tcp" { + address = "[::]:8200" + tls_cert_file = "%s/vault-cert.pem" + tls_key_file = "%s/vault-key.pem" + proxy_protocol_behavior = "allow_authorized" + proxy_protocol_authorized_addrs = "[::]:8200" +} + +telemetry { + prometheus_retention_time = "24h" + disable_hostname = true +} +enable_raw_endpoint = true + +storage "%s" { +} + +ui = true +` + certDirEscaped := strings.Replace(certDir, "\\", "\\\\", -1) + hclStr = fmt.Sprintf(hclStr, certDirEscaped, certDirEscaped, storageType) + parsed, err := ParseConfig(hclStr, "") + if err != nil { + return nil, err + } + + return parsed, nil +} + +// Storage is the underlying storage configuration for the server. +type Storage struct { + Type string + RedirectAddr string + ClusterAddr string + DisableClustering bool + Config map[string]string +} + +func (b *Storage) GoString() string { + return fmt.Sprintf("*%#v", *b) +} + +// ServiceRegistration is the optional service discovery for the server. +type ServiceRegistration struct { + UnusedKeys configutil.UnusedKeyMap `hcl:",unusedKeyPositions"` + Type string + Config map[string]string +} + +func (b *ServiceRegistration) Validate(source string) []configutil.ConfigError { + return configutil.ValidateUnusedFields(b.UnusedKeys, source) +} + +func (b *ServiceRegistration) GoString() string { + return fmt.Sprintf("*%#v", *b) +} + +func NewConfig() *Config { + return &Config{ + SharedConfig: new(configutil.SharedConfig), + } +} + +// Merge merges two configurations. +func (c *Config) Merge(c2 *Config) *Config { + if c2 == nil { + return c + } + + result := NewConfig() + + result.SharedConfig = c.SharedConfig + if c2.SharedConfig != nil { + result.SharedConfig = c.SharedConfig.Merge(c2.SharedConfig) + } + + result.Storage = c.Storage + if c2.Storage != nil { + result.Storage = c2.Storage + } + + result.HAStorage = c.HAStorage + if c2.HAStorage != nil { + result.HAStorage = c2.HAStorage + } + + result.ServiceRegistration = c.ServiceRegistration + if c2.ServiceRegistration != nil { + result.ServiceRegistration = c2.ServiceRegistration + } + + result.CacheSize = c.CacheSize + if c2.CacheSize != 0 { + result.CacheSize = c2.CacheSize + } + + // merging these booleans via an OR operation + result.DisableCache = c.DisableCache + if c2.DisableCache { + result.DisableCache = c2.DisableCache + } + + result.DisableSentinelTrace = c.DisableSentinelTrace + if c2.DisableSentinelTrace { + result.DisableSentinelTrace = c2.DisableSentinelTrace + } + + result.DisablePrintableCheck = c.DisablePrintableCheck + if c2.DisablePrintableCheckRaw != nil { + result.DisablePrintableCheck = c2.DisablePrintableCheck + } + + // merge these integers via a MAX operation + result.MaxLeaseTTL = c.MaxLeaseTTL + if c2.MaxLeaseTTL > result.MaxLeaseTTL { + result.MaxLeaseTTL = c2.MaxLeaseTTL + } + + result.DefaultLeaseTTL = c.DefaultLeaseTTL + if c2.DefaultLeaseTTL > result.DefaultLeaseTTL { + result.DefaultLeaseTTL = c2.DefaultLeaseTTL + } + + result.ClusterCipherSuites = c.ClusterCipherSuites + if c2.ClusterCipherSuites != "" { + result.ClusterCipherSuites = c2.ClusterCipherSuites + } + + result.EnableUI = c.EnableUI + if c2.EnableUI { + result.EnableUI = c2.EnableUI + } + + result.EnableRawEndpoint = c.EnableRawEndpoint + if c2.EnableRawEndpoint { + result.EnableRawEndpoint = c2.EnableRawEndpoint + } + + result.EnableIntrospectionEndpoint = c.EnableIntrospectionEndpoint + if c2.EnableIntrospectionEndpoint { + result.EnableIntrospectionEndpoint = c2.EnableIntrospectionEndpoint + } + + result.APIAddr = c.APIAddr + if c2.APIAddr != "" { + result.APIAddr = c2.APIAddr + } + + result.ClusterAddr = c.ClusterAddr + if c2.ClusterAddr != "" { + result.ClusterAddr = c2.ClusterAddr + } + + // Retain raw value so that it can be assigned to storage objects + result.DisableClustering = c.DisableClustering + result.DisableClusteringRaw = c.DisableClusteringRaw + if c2.DisableClusteringRaw != nil { + result.DisableClustering = c2.DisableClustering + result.DisableClusteringRaw = c2.DisableClusteringRaw + } + + result.PluginDirectory = c.PluginDirectory + if c2.PluginDirectory != "" { + result.PluginDirectory = c2.PluginDirectory + } + + result.PluginFileUid = c.PluginFileUid + if c2.PluginFileUid != 0 { + result.PluginFileUid = c2.PluginFileUid + } + + result.PluginFilePermissions = c.PluginFilePermissions + if c2.PluginFilePermissionsRaw != nil { + result.PluginFilePermissions = c2.PluginFilePermissions + result.PluginFilePermissionsRaw = c2.PluginFilePermissionsRaw + } + + result.DisablePerformanceStandby = c.DisablePerformanceStandby + if c2.DisablePerformanceStandby { + result.DisablePerformanceStandby = c2.DisablePerformanceStandby + } + + result.DisableSealWrap = c.DisableSealWrap + if c2.DisableSealWrap { + result.DisableSealWrap = c2.DisableSealWrap + } + + result.DisableIndexing = c.DisableIndexing + if c2.DisableIndexing { + result.DisableIndexing = c2.DisableIndexing + } + + result.EnableResponseHeaderHostname = c.EnableResponseHeaderHostname + if c2.EnableResponseHeaderHostname { + result.EnableResponseHeaderHostname = c2.EnableResponseHeaderHostname + } + + result.LogRequestsLevel = c.LogRequestsLevel + if c2.LogRequestsLevel != "" { + result.LogRequestsLevel = c2.LogRequestsLevel + } + + result.DetectDeadlocks = c.DetectDeadlocks + if c2.DetectDeadlocks != "" { + result.DetectDeadlocks = c2.DetectDeadlocks + } + + result.ImpreciseLeaseRoleTracking = c.ImpreciseLeaseRoleTracking + if c2.ImpreciseLeaseRoleTracking { + result.ImpreciseLeaseRoleTracking = c2.ImpreciseLeaseRoleTracking + } + + result.EnableResponseHeaderRaftNodeID = c.EnableResponseHeaderRaftNodeID + if c2.EnableResponseHeaderRaftNodeID { + result.EnableResponseHeaderRaftNodeID = c2.EnableResponseHeaderRaftNodeID + } + + result.LicensePath = c.LicensePath + if c2.LicensePath != "" { + result.LicensePath = c2.LicensePath + } + + // Use values from top-level configuration for storage if set + if storage := result.Storage; storage != nil { + if result.APIAddr != "" { + storage.RedirectAddr = result.APIAddr + } + if result.ClusterAddr != "" { + storage.ClusterAddr = result.ClusterAddr + } + if result.DisableClusteringRaw != nil { + storage.DisableClustering = result.DisableClustering + } + } + + if haStorage := result.HAStorage; haStorage != nil { + if result.APIAddr != "" { + haStorage.RedirectAddr = result.APIAddr + } + if result.ClusterAddr != "" { + haStorage.ClusterAddr = result.ClusterAddr + } + if result.DisableClusteringRaw != nil { + haStorage.DisableClustering = result.DisableClustering + } + } + + result.AdministrativeNamespacePath = c.AdministrativeNamespacePath + if c2.AdministrativeNamespacePath != "" { + result.AdministrativeNamespacePath = c2.AdministrativeNamespacePath + } + + result.entConfig = c.entConfig.Merge(c2.entConfig) + + result.Experiments = mergeExperiments(c.Experiments, c2.Experiments) + + return result +} + +// LoadConfig loads the configuration at the given path, regardless if +// its a file or directory. +func LoadConfig(path string) (*Config, error) { + fi, err := os.Stat(path) + if err != nil { + return nil, err + } + + if fi.IsDir() { + // check permissions on the config directory + var enableFilePermissionsCheck bool + if enableFilePermissionsCheckEnv := os.Getenv(consts.VaultEnableFilePermissionsCheckEnv); enableFilePermissionsCheckEnv != "" { + var err error + enableFilePermissionsCheck, err = strconv.ParseBool(enableFilePermissionsCheckEnv) + if err != nil { + return nil, errors.New("Error parsing the environment variable VAULT_ENABLE_FILE_PERMISSIONS_CHECK") + } + } + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + if enableFilePermissionsCheck { + err = osutil.OwnerPermissionsMatchFile(f, 0, 0) + if err != nil { + return nil, err + } + } + return CheckConfig(LoadConfigDir(path)) + } + return CheckConfig(LoadConfigFile(path)) +} + +func CheckConfig(c *Config, e error) (*Config, error) { + if e != nil { + return c, e + } + + if len(c.Seals) == 2 { + switch { + case c.Seals[0].Disabled && c.Seals[1].Disabled: + return nil, errors.New("seals: two seals provided but both are disabled") + case !c.Seals[0].Disabled && !c.Seals[1].Disabled: + return nil, errors.New("seals: two seals provided but neither is disabled") + } + } + + return c, nil +} + +// LoadConfigFile loads the configuration from the given file. +func LoadConfigFile(path string) (*Config, error) { + // Open the file + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + // Read the file + d, err := io.ReadAll(f) + if err != nil { + return nil, err + } + + conf, err := ParseConfig(string(d), path) + if err != nil { + return nil, err + } + + var enableFilePermissionsCheck bool + if enableFilePermissionsCheckEnv := os.Getenv(consts.VaultEnableFilePermissionsCheckEnv); enableFilePermissionsCheckEnv != "" { + var err error + enableFilePermissionsCheck, err = strconv.ParseBool(enableFilePermissionsCheckEnv) + if err != nil { + return nil, errors.New("Error parsing the environment variable VAULT_ENABLE_FILE_PERMISSIONS_CHECK") + } + } + + if enableFilePermissionsCheck { + // check permissions of the config file + err = osutil.OwnerPermissionsMatchFile(f, 0, 0) + if err != nil { + return nil, err + } + // check permissions of the plugin directory + if conf.PluginDirectory != "" { + + err = osutil.OwnerPermissionsMatch(conf.PluginDirectory, conf.PluginFileUid, conf.PluginFilePermissions) + if err != nil { + return nil, err + } + } + } + return conf, nil +} + +func ParseConfig(d, source string) (*Config, error) { + // Parse! + obj, err := hcl.Parse(d) + if err != nil { + return nil, err + } + + // Start building the result + result := NewConfig() + if err := hcl.DecodeObject(result, obj); err != nil { + return nil, err + } + + if rendered, err := configutil.ParseSingleIPTemplate(result.APIAddr); err != nil { + return nil, err + } else { + result.APIAddr = rendered + } + if rendered, err := configutil.ParseSingleIPTemplate(result.ClusterAddr); err != nil { + return nil, err + } else { + result.ClusterAddr = rendered + } + + sharedConfig, err := configutil.ParseConfig(d) + if err != nil { + return nil, err + } + result.SharedConfig = sharedConfig + + if result.MaxLeaseTTLRaw != nil { + if result.MaxLeaseTTL, err = parseutil.ParseDurationSecond(result.MaxLeaseTTLRaw); err != nil { + return nil, err + } + } + if result.DefaultLeaseTTLRaw != nil { + if result.DefaultLeaseTTL, err = parseutil.ParseDurationSecond(result.DefaultLeaseTTLRaw); err != nil { + return nil, err + } + } + + if result.EnableUIRaw != nil { + if result.EnableUI, err = parseutil.ParseBool(result.EnableUIRaw); err != nil { + return nil, err + } + } + + if result.DisableCacheRaw != nil { + if result.DisableCache, err = parseutil.ParseBool(result.DisableCacheRaw); err != nil { + return nil, err + } + } + + if result.DisablePrintableCheckRaw != nil { + if result.DisablePrintableCheck, err = parseutil.ParseBool(result.DisablePrintableCheckRaw); err != nil { + return nil, err + } + } + + if result.EnableRawEndpointRaw != nil { + if result.EnableRawEndpoint, err = parseutil.ParseBool(result.EnableRawEndpointRaw); err != nil { + return nil, err + } + } + + if result.EnableIntrospectionEndpointRaw != nil { + if result.EnableIntrospectionEndpoint, err = parseutil.ParseBool(result.EnableIntrospectionEndpointRaw); err != nil { + return nil, err + } + } + + if result.DisableClusteringRaw != nil { + if result.DisableClustering, err = parseutil.ParseBool(result.DisableClusteringRaw); err != nil { + return nil, err + } + } + + if result.PluginFilePermissionsRaw != nil { + octalPermissionsString, err := parseutil.ParseString(result.PluginFilePermissionsRaw) + if err != nil { + return nil, err + } + pluginFilePermissions, err := strconv.ParseInt(octalPermissionsString, 8, 64) + if err != nil { + return nil, err + } + if pluginFilePermissions < math.MinInt || pluginFilePermissions > math.MaxInt { + return nil, fmt.Errorf("file permission value %v cannot be safely cast to int: exceeds bounds (%v, %v)", pluginFilePermissions, math.MinInt, math.MaxInt) + } + result.PluginFilePermissions = int(pluginFilePermissions) + } + + if result.DisableSentinelTraceRaw != nil { + if result.DisableSentinelTrace, err = parseutil.ParseBool(result.DisableSentinelTraceRaw); err != nil { + return nil, err + } + } + + if result.DisablePerformanceStandbyRaw != nil { + if result.DisablePerformanceStandby, err = parseutil.ParseBool(result.DisablePerformanceStandbyRaw); err != nil { + return nil, err + } + } + + if result.DisableSealWrapRaw != nil { + if result.DisableSealWrap, err = parseutil.ParseBool(result.DisableSealWrapRaw); err != nil { + return nil, err + } + } + + if result.DisableIndexingRaw != nil { + if result.DisableIndexing, err = parseutil.ParseBool(result.DisableIndexingRaw); err != nil { + return nil, err + } + } + + if result.EnableResponseHeaderHostnameRaw != nil { + if result.EnableResponseHeaderHostname, err = parseutil.ParseBool(result.EnableResponseHeaderHostnameRaw); err != nil { + return nil, err + } + } + + if result.LogRequestsLevelRaw != nil { + result.LogRequestsLevel = strings.ToLower(strings.TrimSpace(result.LogRequestsLevelRaw.(string))) + result.LogRequestsLevelRaw = "" + } + + if result.EnableResponseHeaderRaftNodeIDRaw != nil { + if result.EnableResponseHeaderRaftNodeID, err = parseutil.ParseBool(result.EnableResponseHeaderRaftNodeIDRaw); err != nil { + return nil, err + } + } + + list, ok := obj.Node.(*ast.ObjectList) + if !ok { + return nil, fmt.Errorf("error parsing: file doesn't contain a root object") + } + + // Look for storage but still support old backend + if o := list.Filter("storage"); len(o.Items) > 0 { + delete(result.UnusedKeys, "storage") + if err := ParseStorage(result, o, "storage"); err != nil { + return nil, fmt.Errorf("error parsing 'storage': %w", err) + } + result.found(result.Storage.Type, result.Storage.Type) + } else { + delete(result.UnusedKeys, "backend") + if o := list.Filter("backend"); len(o.Items) > 0 { + if err := ParseStorage(result, o, "backend"); err != nil { + return nil, fmt.Errorf("error parsing 'backend': %w", err) + } + } + } + + if o := list.Filter("ha_storage"); len(o.Items) > 0 { + delete(result.UnusedKeys, "ha_storage") + if err := parseHAStorage(result, o, "ha_storage"); err != nil { + return nil, fmt.Errorf("error parsing 'ha_storage': %w", err) + } + } else { + if o := list.Filter("ha_backend"); len(o.Items) > 0 { + delete(result.UnusedKeys, "ha_backend") + if err := parseHAStorage(result, o, "ha_backend"); err != nil { + return nil, fmt.Errorf("error parsing 'ha_backend': %w", err) + } + } + } + + // Parse service discovery + if o := list.Filter("service_registration"); len(o.Items) > 0 { + delete(result.UnusedKeys, "service_registration") + if err := parseServiceRegistration(result, o, "service_registration"); err != nil { + return nil, fmt.Errorf("error parsing 'service_registration': %w", err) + } + } + + if err := validateExperiments(result.Experiments); err != nil { + return nil, fmt.Errorf("error validating experiment(s) from config: %w", err) + } + + if err := result.parseConfig(list); err != nil { + return nil, fmt.Errorf("error parsing enterprise config: %w", err) + } + + // Remove all unused keys from Config that were satisfied by SharedConfig. + result.UnusedKeys = configutil.UnusedFieldDifference(result.UnusedKeys, nil, append(result.FoundKeys, sharedConfig.FoundKeys...)) + // Assign file info + for _, v := range result.UnusedKeys { + for i := range v { + v[i].Filename = source + } + } + + return result, nil +} + +func ExperimentsFromEnvAndCLI(config *Config, envKey string, flagExperiments []string) error { + if envExperimentsRaw := os.Getenv(envKey); envExperimentsRaw != "" { + envExperiments := strings.Split(envExperimentsRaw, ",") + err := validateExperiments(envExperiments) + if err != nil { + return fmt.Errorf("error validating experiment(s) from environment variable %q: %w", envKey, err) + } + + config.Experiments = mergeExperiments(config.Experiments, envExperiments) + } + + if len(flagExperiments) != 0 { + err := validateExperiments(flagExperiments) + if err != nil { + return fmt.Errorf("error validating experiment(s) from command line flag: %w", err) + } + + config.Experiments = mergeExperiments(config.Experiments, flagExperiments) + } + + return nil +} + +// Validate checks each experiment is a known experiment. +func validateExperiments(experiments []string) error { + var invalid []string + + for _, experiment := range experiments { + if !strutil.StrListContains(validExperiments, experiment) { + invalid = append(invalid, experiment) + } + } + + if len(invalid) != 0 { + return fmt.Errorf("valid experiment(s) are %s, but received the following invalid experiment(s): %s", + strings.Join(validExperiments, ", "), + strings.Join(invalid, ", ")) + } + + return nil +} + +// mergeExperiments returns the logical OR of the two sets. +func mergeExperiments(left, right []string) []string { + processed := map[string]struct{}{} + var result []string + for _, l := range left { + if _, seen := processed[l]; !seen { + result = append(result, l) + } + processed[l] = struct{}{} + } + + for _, r := range right { + if _, seen := processed[r]; !seen { + result = append(result, r) + processed[r] = struct{}{} + } + } + + return result +} + +// LoadConfigDir loads all the configurations in the given directory +// in alphabetical order. +func LoadConfigDir(dir string) (*Config, error) { + f, err := os.Open(dir) + if err != nil { + return nil, err + } + defer f.Close() + + fi, err := f.Stat() + if err != nil { + return nil, err + } + if !fi.IsDir() { + return nil, fmt.Errorf("configuration path must be a directory: %q", dir) + } + + var files []string + err = nil + for err != io.EOF { + var fis []os.FileInfo + fis, err = f.Readdir(128) + if err != nil && err != io.EOF { + return nil, err + } + + for _, fi := range fis { + // Ignore directories + if fi.IsDir() { + continue + } + + // Only care about files that are valid to load. + name := fi.Name() + skip := true + if strings.HasSuffix(name, ".hcl") { + skip = false + } else if strings.HasSuffix(name, ".json") { + skip = false + } + if skip || isTemporaryFile(name) { + continue + } + + path := filepath.Join(dir, name) + files = append(files, path) + } + } + + result := NewConfig() + for _, f := range files { + config, err := LoadConfigFile(f) + if err != nil { + return nil, fmt.Errorf("error loading %q: %w", f, err) + } + + if result == nil { + result = config + } else { + result = result.Merge(config) + } + } + + return result, nil +} + +// isTemporaryFile returns true or false depending on whether the +// provided file name is a temporary file for the following editors: +// emacs or vim. +func isTemporaryFile(name string) bool { + return strings.HasSuffix(name, "~") || // vim + strings.HasPrefix(name, ".#") || // emacs + (strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#")) // emacs +} + +func ParseStorage(result *Config, list *ast.ObjectList, name string) error { + if len(list.Items) > 1 { + return fmt.Errorf("only one %q block is permitted", name) + } + + // Get our item + item := list.Items[0] + + key := name + if len(item.Keys) > 0 { + key = item.Keys[0].Token.Value().(string) + } + + var config map[string]interface{} + if err := hcl.DecodeObject(&config, item.Val); err != nil { + return multierror.Prefix(err, fmt.Sprintf("%s.%s:", name, key)) + } + + m := make(map[string]string) + for key, val := range config { + valStr, ok := val.(string) + if ok { + m[key] = valStr + continue + } + valBytes, err := json.Marshal(val) + if err != nil { + return err + } + m[key] = string(valBytes) + } + + // Pull out the redirect address since it's common to all backends + var redirectAddr string + if v, ok := m["redirect_addr"]; ok { + redirectAddr = v + delete(m, "redirect_addr") + } else if v, ok := m["advertise_addr"]; ok { + redirectAddr = v + delete(m, "advertise_addr") + } + + // Pull out the cluster address since it's common to all backends + var clusterAddr string + if v, ok := m["cluster_addr"]; ok { + clusterAddr = v + delete(m, "cluster_addr") + } + + var disableClustering bool + var err error + if v, ok := m["disable_clustering"]; ok { + disableClustering, err = strconv.ParseBool(v) + if err != nil { + return multierror.Prefix(err, fmt.Sprintf("%s.%s:", name, key)) + } + delete(m, "disable_clustering") + } + + // Override with top-level values if they are set + if result.APIAddr != "" { + redirectAddr = result.APIAddr + } + + if result.ClusterAddr != "" { + clusterAddr = result.ClusterAddr + } + + if result.DisableClusteringRaw != nil { + disableClustering = result.DisableClustering + } + + result.Storage = &Storage{ + RedirectAddr: redirectAddr, + ClusterAddr: clusterAddr, + DisableClustering: disableClustering, + Type: strings.ToLower(key), + Config: m, + } + return nil +} + +func parseHAStorage(result *Config, list *ast.ObjectList, name string) error { + if len(list.Items) > 1 { + return fmt.Errorf("only one %q block is permitted", name) + } + + // Get our item + item := list.Items[0] + + key := name + if len(item.Keys) > 0 { + key = item.Keys[0].Token.Value().(string) + } + + var config map[string]interface{} + if err := hcl.DecodeObject(&config, item.Val); err != nil { + return multierror.Prefix(err, fmt.Sprintf("%s.%s:", name, key)) + } + + m := make(map[string]string) + for key, val := range config { + valStr, ok := val.(string) + if ok { + m[key] = valStr + continue + } + valBytes, err := json.Marshal(val) + if err != nil { + return err + } + m[key] = string(valBytes) + } + + // Pull out the redirect address since it's common to all backends + var redirectAddr string + if v, ok := m["redirect_addr"]; ok { + redirectAddr = v + delete(m, "redirect_addr") + } else if v, ok := m["advertise_addr"]; ok { + redirectAddr = v + delete(m, "advertise_addr") + } + + // Pull out the cluster address since it's common to all backends + var clusterAddr string + if v, ok := m["cluster_addr"]; ok { + clusterAddr = v + delete(m, "cluster_addr") + } + + var disableClustering bool + var err error + if v, ok := m["disable_clustering"]; ok { + disableClustering, err = strconv.ParseBool(v) + if err != nil { + return multierror.Prefix(err, fmt.Sprintf("%s.%s:", name, key)) + } + delete(m, "disable_clustering") + } + + // Override with top-level values if they are set + if result.APIAddr != "" { + redirectAddr = result.APIAddr + } + + if result.ClusterAddr != "" { + clusterAddr = result.ClusterAddr + } + + if result.DisableClusteringRaw != nil { + disableClustering = result.DisableClustering + } + + result.HAStorage = &Storage{ + RedirectAddr: redirectAddr, + ClusterAddr: clusterAddr, + DisableClustering: disableClustering, + Type: strings.ToLower(key), + Config: m, + } + return nil +} + +func parseServiceRegistration(result *Config, list *ast.ObjectList, name string) error { + if len(list.Items) > 1 { + return fmt.Errorf("only one %q block is permitted", name) + } + + // Get our item + item := list.Items[0] + key := name + if len(item.Keys) > 0 { + key = item.Keys[0].Token.Value().(string) + } + + var m map[string]string + if err := hcl.DecodeObject(&m, item.Val); err != nil { + return multierror.Prefix(err, fmt.Sprintf("%s.%s:", name, key)) + } + + result.ServiceRegistration = &ServiceRegistration{ + Type: strings.ToLower(key), + Config: m, + } + return nil +} + +// Sanitized returns a copy of the config with all values that are considered +// sensitive stripped. It also strips all `*Raw` values that are mainly +// used for parsing. +// +// Specifically, the fields that this method strips are: +// - Storage.Config +// - HAStorage.Config +// - Seals.Config +// - Telemetry.CirconusAPIToken +func (c *Config) Sanitized() map[string]interface{} { + // Create shared config if it doesn't exist (e.g. in tests) so that map + // keys are actually populated + if c.SharedConfig == nil { + c.SharedConfig = new(configutil.SharedConfig) + } + sharedResult := c.SharedConfig.Sanitized() + result := map[string]interface{}{ + "cache_size": c.CacheSize, + "disable_sentinel_trace": c.DisableSentinelTrace, + "disable_cache": c.DisableCache, + "disable_printable_check": c.DisablePrintableCheck, + + "enable_ui": c.EnableUI, + + "max_lease_ttl": c.MaxLeaseTTL / time.Second, + "default_lease_ttl": c.DefaultLeaseTTL / time.Second, + + "cluster_cipher_suites": c.ClusterCipherSuites, + + "plugin_directory": c.PluginDirectory, + + "plugin_file_uid": c.PluginFileUid, + + "plugin_file_permissions": c.PluginFilePermissions, + + "raw_storage_endpoint": c.EnableRawEndpoint, + + "introspection_endpoint": c.EnableIntrospectionEndpoint, + + "api_addr": c.APIAddr, + "cluster_addr": c.ClusterAddr, + "disable_clustering": c.DisableClustering, + + "disable_performance_standby": c.DisablePerformanceStandby, + + "disable_sealwrap": c.DisableSealWrap, + + "disable_indexing": c.DisableIndexing, + + "enable_response_header_hostname": c.EnableResponseHeaderHostname, + + "enable_response_header_raft_node_id": c.EnableResponseHeaderRaftNodeID, + + "log_requests_level": c.LogRequestsLevel, + "experiments": c.Experiments, + + "detect_deadlocks": c.DetectDeadlocks, + + "imprecise_lease_role_tracking": c.ImpreciseLeaseRoleTracking, + } + for k, v := range sharedResult { + result[k] = v + } + + // Sanitize storage stanza + if c.Storage != nil { + storageType := c.Storage.Type + sanitizedStorage := map[string]interface{}{ + "type": storageType, + "redirect_addr": c.Storage.RedirectAddr, + "cluster_addr": c.Storage.ClusterAddr, + "disable_clustering": c.Storage.DisableClustering, + } + + if storageType == "raft" { + sanitizedStorage["raft"] = map[string]interface{}{ + "max_entry_size": c.Storage.Config["max_entry_size"], + } + } + + result["storage"] = sanitizedStorage + } + + // Sanitize HA storage stanza + if c.HAStorage != nil { + haStorageType := c.HAStorage.Type + sanitizedHAStorage := map[string]interface{}{ + "type": haStorageType, + "redirect_addr": c.HAStorage.RedirectAddr, + "cluster_addr": c.HAStorage.ClusterAddr, + "disable_clustering": c.HAStorage.DisableClustering, + } + + if haStorageType == "raft" { + sanitizedHAStorage["raft"] = map[string]interface{}{ + "max_entry_size": c.HAStorage.Config["max_entry_size"], + } + } + + result["ha_storage"] = sanitizedHAStorage + } + + // Sanitize service_registration stanza + if c.ServiceRegistration != nil { + sanitizedServiceRegistration := map[string]interface{}{ + "type": c.ServiceRegistration.Type, + } + result["service_registration"] = sanitizedServiceRegistration + } + + entConfigResult := c.entConfig.Sanitized() + for k, v := range entConfigResult { + result[k] = v + } + + return result +} + +func (c *Config) Prune() { + for _, l := range c.Listeners { + l.RawConfig = nil + l.UnusedKeys = nil + } + c.FoundKeys = nil + c.UnusedKeys = nil + c.SharedConfig.FoundKeys = nil + c.SharedConfig.UnusedKeys = nil + if c.Telemetry != nil { + c.Telemetry.FoundKeys = nil + c.Telemetry.UnusedKeys = nil + } +} + +func (c *Config) found(s, k string) { + delete(c.UnusedKeys, s) + c.FoundKeys = append(c.FoundKeys, k) +} + +func (c *Config) ToVaultNodeConfig() (*testcluster.VaultNodeConfig, error) { + var vnc testcluster.VaultNodeConfig + err := mapstructure.Decode(c, &vnc) + if err != nil { + return nil, err + } + return &vnc, nil +} diff --git a/command/server/config_custom_response_headers_test.go b/command/server/config_custom_response_headers_test.go new file mode 100644 index 0000000..11c4300 --- /dev/null +++ b/command/server/config_custom_response_headers_test.go @@ -0,0 +1,112 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package server + +import ( + "fmt" + "testing" + + "github.com/go-test/deep" +) + +var defaultCustomHeaders = map[string]string{ + "Strict-Transport-Security": "max-age=1; domains", + "Content-Security-Policy": "default-src 'others'", + "X-Vault-Ignored": "ignored", + "X-Custom-Header": "Custom header value default", +} + +var customHeaders307 = map[string]string{ + "X-Custom-Header": "Custom header value 307", +} + +var customHeader3xx = map[string]string{ + "X-Vault-Ignored-3xx": "Ignored 3xx", + "X-Custom-Header": "Custom header value 3xx", +} + +var customHeaders200 = map[string]string{ + "Someheader-200": "200", + "X-Custom-Header": "Custom header value 200", +} + +var customHeader2xx = map[string]string{ + "X-Custom-Header": "Custom header value 2xx", +} + +var customHeader400 = map[string]string{ + "Someheader-400": "400", +} + +var defaultCustomHeadersMultiListener = map[string]string{ + "Strict-Transport-Security": "max-age=31536000; includeSubDomains", + "Content-Security-Policy": "default-src 'others'", + "X-Vault-Ignored": "ignored", + "X-Custom-Header": "Custom header value default", +} + +var defaultSTS = map[string]string{ + "Strict-Transport-Security": "max-age=31536000; includeSubDomains", +} + +func TestCustomResponseHeadersConfigs(t *testing.T) { + expectedCustomResponseHeader := map[string]map[string]string{ + "default": defaultCustomHeaders, + "307": customHeaders307, + "3xx": customHeader3xx, + "200": customHeaders200, + "2xx": customHeader2xx, + "400": customHeader400, + } + + config, err := LoadConfigFile("./test-fixtures/config_custom_response_headers_1.hcl") + if err != nil { + t.Fatalf("Error encountered when loading config %+v", err) + } + if diff := deep.Equal(expectedCustomResponseHeader, config.Listeners[0].CustomResponseHeaders); diff != nil { + t.Fatalf(fmt.Sprintf("parsed custom headers do not match the expected ones, difference: %v", diff)) + } +} + +func TestCustomResponseHeadersConfigsMultipleListeners(t *testing.T) { + expectedCustomResponseHeader := map[string]map[string]string{ + "default": defaultCustomHeadersMultiListener, + "307": customHeaders307, + "3xx": customHeader3xx, + "200": customHeaders200, + "2xx": customHeader2xx, + "400": customHeader400, + } + + config, err := LoadConfigFile("./test-fixtures/config_custom_response_headers_multiple_listeners.hcl") + if err != nil { + t.Fatalf("Error encountered when loading config %+v", err) + } + if diff := deep.Equal(expectedCustomResponseHeader, config.Listeners[0].CustomResponseHeaders); diff != nil { + t.Fatalf(fmt.Sprintf("parsed custom headers do not match the expected ones, difference: %v", diff)) + } + + if diff := deep.Equal(expectedCustomResponseHeader, config.Listeners[1].CustomResponseHeaders); diff == nil { + t.Fatalf(fmt.Sprintf("parsed custom headers do not match the expected ones, difference: %v", diff)) + } + if diff := deep.Equal(expectedCustomResponseHeader["default"], config.Listeners[1].CustomResponseHeaders["default"]); diff != nil { + t.Fatalf(fmt.Sprintf("parsed custom headers do not match the expected ones, difference: %v", diff)) + } + + if diff := deep.Equal(expectedCustomResponseHeader, config.Listeners[2].CustomResponseHeaders); diff == nil { + t.Fatalf(fmt.Sprintf("parsed custom headers do not match the expected ones, difference: %v", diff)) + } + + if diff := deep.Equal(defaultSTS, config.Listeners[2].CustomResponseHeaders["default"]); diff != nil { + t.Fatalf(fmt.Sprintf("parsed custom headers do not match the expected ones, difference: %v", diff)) + } + + if diff := deep.Equal(expectedCustomResponseHeader, config.Listeners[3].CustomResponseHeaders); diff == nil { + t.Fatalf(fmt.Sprintf("parsed custom headers do not match the expected ones, difference: %v", diff)) + } + + if diff := deep.Equal(defaultSTS, config.Listeners[3].CustomResponseHeaders["default"]); diff != nil { + t.Fatalf(fmt.Sprintf("parsed custom headers do not match the expected ones, difference: %v", diff)) + } +} diff --git a/command/server/config_oss_test.go b/command/server/config_oss_test.go new file mode 100644 index 0000000..4a08ddf --- /dev/null +++ b/command/server/config_oss_test.go @@ -0,0 +1,22 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !enterprise + +package server + +import ( + "testing" +) + +func TestLoadConfigFile_topLevel(t *testing.T) { + testLoadConfigFile_topLevel(t, nil) +} + +func TestLoadConfigFile_json2(t *testing.T) { + testLoadConfigFile_json2(t, nil) +} + +func TestParseEntropy(t *testing.T) { + testParseEntropy(t, true) +} diff --git a/command/server/config_telemetry_test.go b/command/server/config_telemetry_test.go new file mode 100644 index 0000000..54245d0 --- /dev/null +++ b/command/server/config_telemetry_test.go @@ -0,0 +1,43 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package server + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMetricFilterConfigs(t *testing.T) { + t.Parallel() + cases := []struct { + configFile string + expectedFilterDefault *bool + expectedPrefixFilter []string + }{ + { + "./test-fixtures/telemetry/valid_prefix_filter.hcl", + nil, + []string{"-vault.expire", "-vault.audit", "+vault.expire.num_irrevocable_leases"}, + }, + { + "./test-fixtures/telemetry/filter_default_override.hcl", + boolPointer(false), + []string(nil), + }, + } + t.Run("validate metric filter configs", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + config, err := LoadConfigFile(tc.configFile) + if err != nil { + t.Fatalf("Error encountered when loading config %+v", err) + } + + assert.Equal(t, tc.expectedFilterDefault, config.SharedConfig.Telemetry.FilterDefault) + assert.Equal(t, tc.expectedPrefixFilter, config.SharedConfig.Telemetry.PrefixFilter) + } + }) +} diff --git a/command/server/config_test.go b/command/server/config_test.go new file mode 100644 index 0000000..c6ff965 --- /dev/null +++ b/command/server/config_test.go @@ -0,0 +1,222 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package server + +import ( + "fmt" + "reflect" + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestLoadConfigFile(t *testing.T) { + testLoadConfigFile(t) +} + +func TestLoadConfigFile_json(t *testing.T) { + testLoadConfigFile_json(t) +} + +func TestLoadConfigFileIntegerAndBooleanValues(t *testing.T) { + testLoadConfigFileIntegerAndBooleanValues(t) +} + +func TestLoadConfigFileIntegerAndBooleanValuesJson(t *testing.T) { + testLoadConfigFileIntegerAndBooleanValuesJson(t) +} + +func TestLoadConfigFileWithLeaseMetricTelemetry(t *testing.T) { + testLoadConfigFileLeaseMetrics(t) +} + +func TestLoadConfigDir(t *testing.T) { + testLoadConfigDir(t) +} + +func TestConfig_Sanitized(t *testing.T) { + testConfig_Sanitized(t) +} + +func TestParseListeners(t *testing.T) { + testParseListeners(t) +} + +func TestParseUserLockouts(t *testing.T) { + testParseUserLockouts(t) +} + +func TestParseSockaddrTemplate(t *testing.T) { + testParseSockaddrTemplate(t) +} + +func TestConfigRaftRetryJoin(t *testing.T) { + testConfigRaftRetryJoin(t) +} + +func TestParseSeals(t *testing.T) { + testParseSeals(t) +} + +func TestParseStorage(t *testing.T) { + testParseStorageTemplate(t) +} + +// TestConfigWithAdministrativeNamespace tests that .hcl and .json configurations are correctly parsed when the administrative_namespace_path is present. +func TestConfigWithAdministrativeNamespace(t *testing.T) { + testConfigWithAdministrativeNamespaceHcl(t) + testConfigWithAdministrativeNamespaceJson(t) +} + +func TestUnknownFieldValidation(t *testing.T) { + testUnknownFieldValidation(t) +} + +func TestUnknownFieldValidationJson(t *testing.T) { + testUnknownFieldValidationJson(t) +} + +func TestUnknownFieldValidationHcl(t *testing.T) { + testUnknownFieldValidationHcl(t) +} + +func TestUnknownFieldValidationListenerAndStorage(t *testing.T) { + testUnknownFieldValidationStorageAndListener(t) +} + +func TestExperimentsConfigParsing(t *testing.T) { + const envKey = "VAULT_EXPERIMENTS" + originalValue := validExperiments + validExperiments = []string{"foo", "bar", "baz"} + t.Cleanup(func() { + validExperiments = originalValue + }) + + for name, tc := range map[string]struct { + fromConfig []string + fromEnv []string + fromCLI []string + expected []string + expectedError string + }{ + // Multiple sources. + "duplication": {[]string{"foo"}, []string{"foo"}, []string{"foo"}, []string{"foo"}, ""}, + "disjoint set": {[]string{"foo"}, []string{"bar"}, []string{"baz"}, []string{"foo", "bar", "baz"}, ""}, + + // Single source. + "config only": {[]string{"foo"}, nil, nil, []string{"foo"}, ""}, + "env only": {nil, []string{"foo"}, nil, []string{"foo"}, ""}, + "CLI only": {nil, nil, []string{"foo"}, []string{"foo"}, ""}, + + // Validation errors. + "config invalid": {[]string{"invalid"}, nil, nil, nil, "from config"}, + "env invalid": {nil, []string{"invalid"}, nil, nil, "from environment variable"}, + "CLI invalid": {nil, nil, []string{"invalid"}, nil, "from command line flag"}, + } { + t.Run(name, func(t *testing.T) { + var configString string + t.Setenv(envKey, strings.Join(tc.fromEnv, ",")) + if len(tc.fromConfig) != 0 { + configString = fmt.Sprintf("experiments = [\"%s\"]", strings.Join(tc.fromConfig, "\", \"")) + } + config, err := ParseConfig(configString, "") + if err == nil { + err = ExperimentsFromEnvAndCLI(config, envKey, tc.fromCLI) + } + + switch tc.expectedError { + case "": + if err != nil { + t.Fatal(err) + } + + default: + if err == nil || !strings.Contains(err.Error(), tc.expectedError) { + t.Fatalf("Expected error to contain %q, but got: %s", tc.expectedError, err) + } + } + }) + } +} + +func TestValidate(t *testing.T) { + originalValue := validExperiments + for name, tc := range map[string]struct { + validSet []string + input []string + expectError bool + }{ + // Valid cases + "minimal valid": {[]string{"foo"}, []string{"foo"}, false}, + "valid subset": {[]string{"foo", "bar"}, []string{"bar"}, false}, + "repeated": {[]string{"foo"}, []string{"foo", "foo"}, false}, + + // Error cases + "partially valid": {[]string{"foo", "bar"}, []string{"foo", "baz"}, true}, + "empty": {[]string{"foo"}, []string{""}, true}, + "no valid experiments": {[]string{}, []string{"foo"}, true}, + } { + t.Run(name, func(t *testing.T) { + t.Cleanup(func() { + validExperiments = originalValue + }) + + validExperiments = tc.validSet + err := validateExperiments(tc.input) + if tc.expectError && err == nil { + t.Fatal("Expected error but got none") + } + if !tc.expectError && err != nil { + t.Fatal("Did not expect error but got", err) + } + }) + } +} + +func TestMerge(t *testing.T) { + for name, tc := range map[string]struct { + left []string + right []string + expected []string + }{ + "disjoint": {[]string{"foo"}, []string{"bar"}, []string{"foo", "bar"}}, + "empty left": {[]string{}, []string{"foo"}, []string{"foo"}}, + "empty right": {[]string{"foo"}, []string{}, []string{"foo"}}, + "overlapping": {[]string{"foo", "bar"}, []string{"foo", "baz"}, []string{"foo", "bar", "baz"}}, + } { + t.Run(name, func(t *testing.T) { + result := mergeExperiments(tc.left, tc.right) + if !reflect.DeepEqual(tc.expected, result) { + t.Fatalf("Expected %v but got %v", tc.expected, result) + } + }) + } +} + +// Test_parseDevTLSConfig verifies that both Windows and Unix directories are correctly escaped when creating a dev TLS +// configuration in HCL +func Test_parseDevTLSConfig(t *testing.T) { + tests := []struct { + name string + certDirectory string + }{ + { + name: "windows path", + certDirectory: `C:\Users\ADMINI~1\AppData\Local\Temp\2\vault-tls4169358130`, + }, + { + name: "unix path", + certDirectory: "/tmp/vault-tls4169358130", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg, err := parseDevTLSConfig("file", tt.certDirectory) + require.NoError(t, err) + require.Equal(t, fmt.Sprintf("%s/%s", tt.certDirectory, VaultDevCertFilename), cfg.Listeners[0].TLSCertFile) + require.Equal(t, fmt.Sprintf("%s/%s", tt.certDirectory, VaultDevKeyFilename), cfg.Listeners[0].TLSKeyFile) + }) + } +} diff --git a/command/server/config_test_helpers.go b/command/server/config_test_helpers.go new file mode 100644 index 0000000..ce33c09 --- /dev/null +++ b/command/server/config_test_helpers.go @@ -0,0 +1,1261 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package server + +import ( + "fmt" + "reflect" + "sort" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/go-test/deep" + "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/token" + "github.com/hashicorp/vault/internalshared/configutil" +) + +var DefaultCustomHeaders = map[string]map[string]string{ + "default": { + "Strict-Transport-Security": configutil.StrictTransportSecurity, + }, +} + +func boolPointer(x bool) *bool { + return &x +} + +func testConfigRaftRetryJoin(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/raft_retry_join.hcl") + if err != nil { + t.Fatal(err) + } + retryJoinConfig := `[{"leader_api_addr":"http://127.0.0.1:8200"},{"leader_api_addr":"http://127.0.0.2:8200"},{"leader_api_addr":"http://127.0.0.3:8200"}]` + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + Listeners: []*configutil.Listener{ + { + Type: "tcp", + Address: "127.0.0.1:8200", + CustomResponseHeaders: DefaultCustomHeaders, + }, + }, + DisableMlock: true, + }, + + Storage: &Storage{ + Type: "raft", + Config: map[string]string{ + "path": "/storage/path/raft", + "node_id": "raft1", + "retry_join": retryJoinConfig, + }, + }, + } + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + +func testLoadConfigFile_topLevel(t *testing.T, entropy *configutil.Entropy) { + config, err := LoadConfigFile("./test-fixtures/config2.hcl") + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + Listeners: []*configutil.Listener{ + { + Type: "tcp", + Address: "127.0.0.1:443", + CustomResponseHeaders: DefaultCustomHeaders, + }, + }, + + Telemetry: &configutil.Telemetry{ + StatsdAddr: "bar", + StatsiteAddr: "foo", + DisableHostname: false, + DogStatsDAddr: "127.0.0.1:7254", + DogStatsDTags: []string{"tag_1:val_1", "tag_2:val_2"}, + PrometheusRetentionTime: 30 * time.Second, + UsageGaugePeriod: 5 * time.Minute, + MaximumGaugeCardinality: 125, + LeaseMetricsEpsilon: time.Hour, + NumLeaseMetricsTimeBuckets: 168, + LeaseMetricsNameSpaceLabels: false, + }, + + DisableMlock: true, + + PidFile: "./pidfile", + + ClusterName: "testcluster", + + Seals: []*configutil.KMS{ + { + Type: "nopurpose", + }, + { + Type: "stringpurpose", + Purpose: []string{"foo"}, + }, + { + Type: "commastringpurpose", + Purpose: []string{"foo", "bar"}, + }, + { + Type: "slicepurpose", + Purpose: []string{"zip", "zap"}, + }, + }, + }, + + Storage: &Storage{ + Type: "consul", + RedirectAddr: "top_level_api_addr", + ClusterAddr: "top_level_cluster_addr", + Config: map[string]string{ + "foo": "bar", + }, + }, + + HAStorage: &Storage{ + Type: "consul", + RedirectAddr: "top_level_api_addr", + ClusterAddr: "top_level_cluster_addr", + Config: map[string]string{ + "bar": "baz", + }, + DisableClustering: true, + }, + + ServiceRegistration: &ServiceRegistration{ + Type: "consul", + Config: map[string]string{ + "foo": "bar", + }, + }, + + DisableCache: true, + DisableCacheRaw: true, + EnableUI: true, + EnableUIRaw: true, + + EnableRawEndpoint: true, + EnableRawEndpointRaw: true, + + DisableSealWrap: true, + DisableSealWrapRaw: true, + + MaxLeaseTTL: 10 * time.Hour, + MaxLeaseTTLRaw: "10h", + DefaultLeaseTTL: 10 * time.Hour, + DefaultLeaseTTLRaw: "10h", + + APIAddr: "top_level_api_addr", + ClusterAddr: "top_level_cluster_addr", + } + addExpectedEntConfig(expected, []string{}) + + if entropy != nil { + expected.Entropy = entropy + } + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + +func testLoadConfigFile_json2(t *testing.T, entropy *configutil.Entropy) { + config, err := LoadConfigFile("./test-fixtures/config2.hcl.json") + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + Listeners: []*configutil.Listener{ + { + Type: "tcp", + Address: "127.0.0.1:443", + CustomResponseHeaders: DefaultCustomHeaders, + }, + { + Type: "tcp", + Address: "127.0.0.1:444", + CustomResponseHeaders: DefaultCustomHeaders, + }, + }, + + Telemetry: &configutil.Telemetry{ + StatsiteAddr: "foo", + StatsdAddr: "bar", + DisableHostname: true, + UsageGaugePeriod: 5 * time.Minute, + MaximumGaugeCardinality: 125, + CirconusAPIToken: "0", + CirconusAPIApp: "vault", + CirconusAPIURL: "http://api.circonus.com/v2", + CirconusSubmissionInterval: "10s", + CirconusCheckSubmissionURL: "https://someplace.com/metrics", + CirconusCheckID: "0", + CirconusCheckForceMetricActivation: "true", + CirconusCheckInstanceID: "node1:vault", + CirconusCheckSearchTag: "service:vault", + CirconusCheckDisplayName: "node1:vault", + CirconusCheckTags: "cat1:tag1,cat2:tag2", + CirconusBrokerID: "0", + CirconusBrokerSelectTag: "dc:sfo", + PrometheusRetentionTime: 30 * time.Second, + LeaseMetricsEpsilon: time.Hour, + NumLeaseMetricsTimeBuckets: 168, + LeaseMetricsNameSpaceLabels: false, + }, + }, + + Storage: &Storage{ + Type: "consul", + Config: map[string]string{ + "foo": "bar", + }, + }, + + HAStorage: &Storage{ + Type: "consul", + Config: map[string]string{ + "bar": "baz", + }, + DisableClustering: true, + }, + + ServiceRegistration: &ServiceRegistration{ + Type: "consul", + Config: map[string]string{ + "foo": "bar", + }, + }, + + CacheSize: 45678, + + EnableUI: true, + EnableUIRaw: true, + + EnableRawEndpoint: true, + EnableRawEndpointRaw: true, + + DisableSealWrap: true, + DisableSealWrapRaw: true, + } + addExpectedEntConfig(expected, []string{"http"}) + + if entropy != nil { + expected.Entropy = entropy + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + +func testParseEntropy(t *testing.T, oss bool) { + tests := []struct { + inConfig string + outErr error + outEntropy configutil.Entropy + }{ + { + inConfig: `entropy "seal" { + mode = "augmentation" + }`, + outErr: nil, + outEntropy: configutil.Entropy{Mode: configutil.EntropyAugmentation}, + }, + { + inConfig: `entropy "seal" { + mode = "a_mode_that_is_not_supported" + }`, + outErr: fmt.Errorf("the specified entropy mode %q is not supported", "a_mode_that_is_not_supported"), + }, + { + inConfig: `entropy "device_that_is_not_supported" { + mode = "augmentation" + }`, + outErr: fmt.Errorf("only the %q type of external entropy is supported", "seal"), + }, + { + inConfig: `entropy "seal" { + mode = "augmentation" + } + entropy "seal" { + mode = "augmentation" + }`, + outErr: fmt.Errorf("only one %q block is permitted", "entropy"), + }, + } + + config := Config{ + SharedConfig: &configutil.SharedConfig{}, + } + + for _, test := range tests { + obj, _ := hcl.Parse(strings.TrimSpace(test.inConfig)) + list, _ := obj.Node.(*ast.ObjectList) + objList := list.Filter("entropy") + err := configutil.ParseEntropy(config.SharedConfig, objList, "entropy") + // validate the error, both should be nil or have the same Error() + switch { + case oss: + if config.Entropy != nil { + t.Fatalf("parsing Entropy should not be possible in oss but got a non-nil config.Entropy: %#v", config.Entropy) + } + case err != nil && test.outErr != nil: + if err.Error() != test.outErr.Error() { + t.Fatalf("error mismatch: expected %#v got %#v", err, test.outErr) + } + case err != test.outErr: + t.Fatalf("error mismatch: expected %#v got %#v", err, test.outErr) + case err == nil && config.Entropy != nil && *config.Entropy != test.outEntropy: + t.Fatalf("entropy config mismatch: expected %#v got %#v", test.outEntropy, *config.Entropy) + } + } +} + +func testLoadConfigFileIntegerAndBooleanValues(t *testing.T) { + testLoadConfigFileIntegerAndBooleanValuesCommon(t, "./test-fixtures/config4.hcl") +} + +func testLoadConfigFileIntegerAndBooleanValuesJson(t *testing.T) { + testLoadConfigFileIntegerAndBooleanValuesCommon(t, "./test-fixtures/config4.hcl.json") +} + +func testLoadConfigFileIntegerAndBooleanValuesCommon(t *testing.T, path string) { + config, err := LoadConfigFile(path) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + Listeners: []*configutil.Listener{ + { + Type: "tcp", + Address: "127.0.0.1:8200", + CustomResponseHeaders: DefaultCustomHeaders, + }, + }, + DisableMlock: true, + }, + + Storage: &Storage{ + Type: "raft", + Config: map[string]string{ + "path": "/storage/path/raft", + "node_id": "raft1", + "performance_multiplier": "1", + "foo": "bar", + "baz": "true", + }, + ClusterAddr: "127.0.0.1:8201", + }, + + ClusterAddr: "127.0.0.1:8201", + + DisableCache: true, + DisableCacheRaw: true, + EnableUI: true, + EnableUIRaw: true, + } + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + +func testLoadConfigFile(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config.hcl") + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + Listeners: []*configutil.Listener{ + { + Type: "tcp", + Address: "127.0.0.1:443", + CustomResponseHeaders: DefaultCustomHeaders, + }, + }, + + Telemetry: &configutil.Telemetry{ + StatsdAddr: "bar", + StatsiteAddr: "foo", + DisableHostname: false, + UsageGaugePeriod: 5 * time.Minute, + MaximumGaugeCardinality: 100, + DogStatsDAddr: "127.0.0.1:7254", + DogStatsDTags: []string{"tag_1:val_1", "tag_2:val_2"}, + PrometheusRetentionTime: configutil.PrometheusDefaultRetentionTime, + MetricsPrefix: "myprefix", + LeaseMetricsEpsilon: time.Hour, + NumLeaseMetricsTimeBuckets: 168, + LeaseMetricsNameSpaceLabels: false, + }, + + DisableMlock: true, + + Entropy: nil, + + PidFile: "./pidfile", + + ClusterName: "testcluster", + }, + + Storage: &Storage{ + Type: "consul", + RedirectAddr: "foo", + Config: map[string]string{ + "foo": "bar", + }, + }, + + HAStorage: &Storage{ + Type: "consul", + RedirectAddr: "snafu", + Config: map[string]string{ + "bar": "baz", + }, + DisableClustering: true, + }, + + ServiceRegistration: &ServiceRegistration{ + Type: "consul", + Config: map[string]string{ + "foo": "bar", + }, + }, + + DisableCache: true, + DisableCacheRaw: true, + DisablePrintableCheckRaw: true, + DisablePrintableCheck: true, + EnableUI: true, + EnableUIRaw: true, + + EnableRawEndpoint: true, + EnableRawEndpointRaw: true, + + EnableIntrospectionEndpoint: true, + EnableIntrospectionEndpointRaw: true, + + DisableSealWrap: true, + DisableSealWrapRaw: true, + + MaxLeaseTTL: 10 * time.Hour, + MaxLeaseTTLRaw: "10h", + DefaultLeaseTTL: 10 * time.Hour, + DefaultLeaseTTLRaw: "10h", + + EnableResponseHeaderHostname: true, + EnableResponseHeaderHostnameRaw: true, + EnableResponseHeaderRaftNodeID: true, + EnableResponseHeaderRaftNodeIDRaw: true, + + LicensePath: "/path/to/license", + } + + addExpectedEntConfig(expected, []string{}) + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + +func testUnknownFieldValidationStorageAndListener(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/storage-listener-config.json") + if err != nil { + t.Fatalf("err: %s", err) + } + if len(config.UnusedKeys) != 0 { + t.Fatalf("unused keys for valid config are %+v\n", config.UnusedKeys) + } +} + +func testUnknownFieldValidation(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config.hcl") + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := []configutil.ConfigError{ + { + Problem: "unknown or unsupported field bad_value found in configuration", + Position: token.Pos{ + Filename: "./test-fixtures/config.hcl", + Offset: 651, + Line: 37, + Column: 5, + }, + }, + } + errors := config.Validate("./test-fixtures/config.hcl") + + for _, er1 := range errors { + found := false + if strings.Contains(er1.String(), "sentinel") { + // This happens on OSS, and is fine + continue + } + for _, ex := range expected { + // TODO: Only test the string, pos may change + if ex.Problem == er1.Problem && reflect.DeepEqual(ex.Position, er1.Position) { + found = true + break + } + } + if !found { + t.Fatalf("found unexpected error: %v", er1.String()) + } + } + for _, ex := range expected { + found := false + for _, er1 := range errors { + if ex.Problem == er1.Problem && reflect.DeepEqual(ex.Position, er1.Position) { + found = true + } + } + if !found { + t.Fatalf("could not find expected error: %v", ex.String()) + } + } +} + +// testUnknownFieldValidationJson tests that this valid json config does not result in +// errors. Prior to VAULT-8519, it reported errors even with a valid config that was +// parsed properly. +func testUnknownFieldValidationJson(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config_small.json") + if err != nil { + t.Fatalf("err: %s", err) + } + + errors := config.Validate("./test-fixtures/config_small.json") + if errors != nil { + t.Fatal(errors) + } +} + +// testUnknownFieldValidationHcl tests that this valid hcl config does not result in +// errors. Prior to VAULT-8519, the json version of this config reported errors even +// with a valid config that was parsed properly. +// In short, this ensures the same for HCL as we test in testUnknownFieldValidationJson +func testUnknownFieldValidationHcl(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config_small.hcl") + if err != nil { + t.Fatalf("err: %s", err) + } + + errors := config.Validate("./test-fixtures/config_small.hcl") + if errors != nil { + t.Fatal(errors) + } +} + +// testConfigWithAdministrativeNamespaceJson tests that a config with a valid administrative namespace path is correctly validated and loaded. +func testConfigWithAdministrativeNamespaceJson(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config_with_valid_admin_ns.json") + require.NoError(t, err) + + configErrors := config.Validate("./test-fixtures/config_with_valid_admin_ns.json") + require.Empty(t, configErrors) + + require.NotEmpty(t, config.AdministrativeNamespacePath) +} + +// testConfigWithAdministrativeNamespaceHcl tests that a config with a valid administrative namespace path is correctly validated and loaded. +func testConfigWithAdministrativeNamespaceHcl(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config_with_valid_admin_ns.hcl") + require.NoError(t, err) + + configErrors := config.Validate("./test-fixtures/config_with_valid_admin_ns.hcl") + require.Empty(t, configErrors) + + require.NotEmpty(t, config.AdministrativeNamespacePath) +} + +func testLoadConfigFile_json(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config.hcl.json") + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + Listeners: []*configutil.Listener{ + { + Type: "tcp", + Address: "127.0.0.1:443", + CustomResponseHeaders: DefaultCustomHeaders, + }, + }, + + Telemetry: &configutil.Telemetry{ + StatsiteAddr: "baz", + StatsdAddr: "", + DisableHostname: false, + UsageGaugePeriod: 5 * time.Minute, + MaximumGaugeCardinality: 100, + CirconusAPIToken: "", + CirconusAPIApp: "", + CirconusAPIURL: "", + CirconusSubmissionInterval: "", + CirconusCheckSubmissionURL: "", + CirconusCheckID: "", + CirconusCheckForceMetricActivation: "", + CirconusCheckInstanceID: "", + CirconusCheckSearchTag: "", + CirconusCheckDisplayName: "", + CirconusCheckTags: "", + CirconusBrokerID: "", + CirconusBrokerSelectTag: "", + PrometheusRetentionTime: configutil.PrometheusDefaultRetentionTime, + LeaseMetricsEpsilon: time.Hour, + NumLeaseMetricsTimeBuckets: 168, + LeaseMetricsNameSpaceLabels: false, + }, + + PidFile: "./pidfile", + Entropy: nil, + ClusterName: "testcluster", + }, + + Storage: &Storage{ + Type: "consul", + Config: map[string]string{ + "foo": "bar", + }, + DisableClustering: true, + }, + + ServiceRegistration: &ServiceRegistration{ + Type: "consul", + Config: map[string]string{ + "foo": "bar", + }, + }, + + ClusterCipherSuites: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", + + MaxLeaseTTL: 10 * time.Hour, + MaxLeaseTTLRaw: "10h", + DefaultLeaseTTL: 10 * time.Hour, + DefaultLeaseTTLRaw: "10h", + DisableCacheRaw: interface{}(nil), + EnableUI: true, + EnableUIRaw: true, + EnableRawEndpoint: true, + EnableRawEndpointRaw: true, + DisableSealWrap: true, + DisableSealWrapRaw: true, + } + + addExpectedEntConfig(expected, []string{}) + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + +func testLoadConfigDir(t *testing.T) { + config, err := LoadConfigDir("./test-fixtures/config-dir") + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + DisableMlock: true, + + Listeners: []*configutil.Listener{ + { + Type: "tcp", + Address: "127.0.0.1:443", + CustomResponseHeaders: DefaultCustomHeaders, + }, + }, + + Telemetry: &configutil.Telemetry{ + StatsiteAddr: "qux", + StatsdAddr: "baz", + DisableHostname: true, + UsageGaugePeriod: 5 * time.Minute, + MaximumGaugeCardinality: 100, + PrometheusRetentionTime: configutil.PrometheusDefaultRetentionTime, + LeaseMetricsEpsilon: time.Hour, + NumLeaseMetricsTimeBuckets: 168, + LeaseMetricsNameSpaceLabels: false, + }, + ClusterName: "testcluster", + }, + + DisableCache: true, + DisableClustering: false, + DisableClusteringRaw: false, + + APIAddr: "https://vault.local", + ClusterAddr: "https://127.0.0.1:444", + + Storage: &Storage{ + Type: "consul", + Config: map[string]string{ + "foo": "bar", + }, + RedirectAddr: "https://vault.local", + ClusterAddr: "https://127.0.0.1:444", + DisableClustering: false, + }, + + EnableUI: true, + + EnableRawEndpoint: true, + + MaxLeaseTTL: 10 * time.Hour, + DefaultLeaseTTL: 10 * time.Hour, + } + + addExpectedEntConfig(expected, []string{"http"}) + + config.Prune() + if diff := deep.Equal(config, expected); diff != nil { + t.Fatal(diff) + } +} + +func testConfig_Sanitized(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/config3.hcl") + if err != nil { + t.Fatalf("err: %s", err) + } + sanitizedConfig := config.Sanitized() + + expected := map[string]interface{}{ + "api_addr": "top_level_api_addr", + "cache_size": 0, + "cluster_addr": "top_level_cluster_addr", + "cluster_cipher_suites": "", + "cluster_name": "testcluster", + "default_lease_ttl": (365 * 24 * time.Hour) / time.Second, + "default_max_request_duration": 0 * time.Second, + "disable_cache": true, + "disable_clustering": false, + "disable_indexing": false, + "disable_mlock": true, + "disable_performance_standby": false, + "experiments": []string(nil), + "plugin_file_uid": 0, + "plugin_file_permissions": 0, + "disable_printable_check": false, + "disable_sealwrap": true, + "raw_storage_endpoint": true, + "introspection_endpoint": false, + "disable_sentinel_trace": true, + "detect_deadlocks": "", + "enable_ui": true, + "enable_response_header_hostname": false, + "enable_response_header_raft_node_id": false, + "log_requests_level": "basic", + "ha_storage": map[string]interface{}{ + "cluster_addr": "top_level_cluster_addr", + "disable_clustering": true, + "redirect_addr": "top_level_api_addr", + "type": "consul", + }, + "listeners": []interface{}{ + map[string]interface{}{ + "config": map[string]interface{}{ + "address": "127.0.0.1:443", + }, + "type": "tcp", + }, + }, + "log_format": "", + "log_level": "", + "max_lease_ttl": (30 * 24 * time.Hour) / time.Second, + "pid_file": "./pidfile", + "plugin_directory": "", + "seals": []interface{}{ + map[string]interface{}{ + "disabled": false, + "type": "awskms", + }, + }, + "storage": map[string]interface{}{ + "cluster_addr": "top_level_cluster_addr", + "disable_clustering": false, + "redirect_addr": "top_level_api_addr", + "type": "consul", + }, + "service_registration": map[string]interface{}{ + "type": "consul", + }, + "telemetry": map[string]interface{}{ + "usage_gauge_period": 5 * time.Minute, + "maximum_gauge_cardinality": 100, + "circonus_api_app": "", + "circonus_api_token": "", + "circonus_api_url": "", + "circonus_broker_id": "", + "circonus_broker_select_tag": "", + "circonus_check_display_name": "", + "circonus_check_force_metric_activation": "", + "circonus_check_id": "", + "circonus_check_instance_id": "", + "circonus_check_search_tag": "", + "circonus_submission_url": "", + "circonus_check_tags": "", + "circonus_submission_interval": "", + "disable_hostname": false, + "metrics_prefix": "pfx", + "dogstatsd_addr": "", + "dogstatsd_tags": []string(nil), + "prometheus_retention_time": 24 * time.Hour, + "stackdriver_location": "", + "stackdriver_namespace": "", + "stackdriver_project_id": "", + "stackdriver_debug_logs": false, + "statsd_address": "bar", + "statsite_address": "", + "lease_metrics_epsilon": time.Hour, + "num_lease_metrics_buckets": 168, + "add_lease_metrics_namespace_labels": false, + }, + "administrative_namespace_path": "admin/", + "imprecise_lease_role_tracking": false, + } + + addExpectedEntSanitizedConfig(expected, []string{"http"}) + + config.Prune() + if diff := deep.Equal(sanitizedConfig, expected); len(diff) > 0 { + t.Fatalf("bad, diff: %#v", diff) + } +} + +func testParseListeners(t *testing.T) { + obj, _ := hcl.Parse(strings.TrimSpace(` +listener "tcp" { + address = "127.0.0.1:443" + cluster_address = "127.0.0.1:8201" + tls_disable = false + tls_cert_file = "./certs/server.crt" + tls_key_file = "./certs/server.key" + tls_client_ca_file = "./certs/rootca.crt" + tls_min_version = "tls12" + tls_max_version = "tls13" + tls_require_and_verify_client_cert = true + tls_disable_client_certs = true + telemetry { + unauthenticated_metrics_access = true + } + profiling { + unauthenticated_pprof_access = true + } + agent_api { + enable_quit = true + } + proxy_api { + enable_quit = true + } +}`)) + + config := Config{ + SharedConfig: &configutil.SharedConfig{}, + } + list, _ := obj.Node.(*ast.ObjectList) + objList := list.Filter("listener") + configutil.ParseListeners(config.SharedConfig, objList) + listeners := config.Listeners + if len(listeners) == 0 { + t.Fatalf("expected at least one listener in the config") + } + listener := listeners[0] + if listener.Type != "tcp" { + t.Fatalf("expected tcp listener in the config") + } + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + Listeners: []*configutil.Listener{ + { + Type: "tcp", + Address: "127.0.0.1:443", + ClusterAddress: "127.0.0.1:8201", + TLSCertFile: "./certs/server.crt", + TLSKeyFile: "./certs/server.key", + TLSClientCAFile: "./certs/rootca.crt", + TLSMinVersion: "tls12", + TLSMaxVersion: "tls13", + TLSRequireAndVerifyClientCert: true, + TLSDisableClientCerts: true, + Telemetry: configutil.ListenerTelemetry{ + UnauthenticatedMetricsAccess: true, + }, + Profiling: configutil.ListenerProfiling{ + UnauthenticatedPProfAccess: true, + }, + AgentAPI: &configutil.AgentAPI{ + EnableQuit: true, + }, + ProxyAPI: &configutil.ProxyAPI{ + EnableQuit: true, + }, + CustomResponseHeaders: DefaultCustomHeaders, + }, + }, + }, + } + config.Prune() + if diff := deep.Equal(config, *expected); diff != nil { + t.Fatal(diff) + } +} + +func testParseUserLockouts(t *testing.T) { + obj, _ := hcl.Parse(strings.TrimSpace(` + user_lockout "all" { + lockout_duration = "40m" + lockout_counter_reset = "45m" + disable_lockout = "false" + } + user_lockout "userpass" { + lockout_threshold = "100" + lockout_duration = "20m" + } + user_lockout "ldap" { + disable_lockout = "true" + }`)) + + config := Config{ + SharedConfig: &configutil.SharedConfig{}, + } + list, _ := obj.Node.(*ast.ObjectList) + objList := list.Filter("user_lockout") + configutil.ParseUserLockouts(config.SharedConfig, objList) + + sort.Slice(config.SharedConfig.UserLockouts[:], func(i, j int) bool { + return config.SharedConfig.UserLockouts[i].Type < config.SharedConfig.UserLockouts[j].Type + }) + + expected := &Config{ + SharedConfig: &configutil.SharedConfig{ + UserLockouts: []*configutil.UserLockout{ + { + Type: "all", + LockoutThreshold: 5, + LockoutDuration: 2400000000000, + LockoutCounterReset: 2700000000000, + DisableLockout: false, + }, + { + Type: "userpass", + LockoutThreshold: 100, + LockoutDuration: 1200000000000, + LockoutCounterReset: 2700000000000, + DisableLockout: false, + }, + { + Type: "ldap", + LockoutThreshold: 5, + LockoutDuration: 2400000000000, + LockoutCounterReset: 2700000000000, + DisableLockout: true, + }, + }, + }, + } + + sort.Slice(expected.SharedConfig.UserLockouts[:], func(i, j int) bool { + return expected.SharedConfig.UserLockouts[i].Type < expected.SharedConfig.UserLockouts[j].Type + }) + config.Prune() + require.Equal(t, config, *expected) +} + +func testParseSockaddrTemplate(t *testing.T) { + config, err := ParseConfig(` +api_addr = < 0 { + props["x_forwarded_for_authorized_addrs"] = fmt.Sprintf("%v", l.XForwardedForAuthorizedAddrs) + } + + if l.XForwardedForHopSkips > 0 { + props["x_forwarded_for_hop_skips"] = fmt.Sprintf("%d", l.XForwardedForHopSkips) + } else if len(l.XForwardedForAuthorizedAddrs) > 0 { + props["x_forwarded_for_hop_skips"] = "0" + } + + if len(l.XForwardedForAuthorizedAddrs) > 0 { + props["x_forwarded_for_reject_not_present"] = strconv.FormatBool(l.XForwardedForRejectNotPresent) + } + + if len(l.XForwardedForAuthorizedAddrs) > 0 { + props["x_forwarded_for_reject_not_authorized"] = strconv.FormatBool(l.XForwardedForRejectNotAuthorized) + } + } + + tlsConfig, reloadFunc, err := listenerutil.TLSConfig(l, props, ui) + if err != nil { + return nil, nil, nil, err + } + if tlsConfig != nil { + ln = tls.NewListener(ln, tlsConfig) + } + + return ln, props, reloadFunc, nil +} + +// TCPKeepAliveListener sets TCP keep-alive timeouts on accepted +// connections. It's used by ListenAndServe and ListenAndServeTLS so +// dead TCP connections (e.g. closing laptop mid-download) eventually +// go away. +// +// This is copied directly from the Go source code. +type TCPKeepAliveListener struct { + *net.TCPListener +} + +func (ln TCPKeepAliveListener) Accept() (c net.Conn, err error) { + tc, err := ln.AcceptTCP() + if err != nil { + return + } + tc.SetKeepAlive(true) + tc.SetKeepAlivePeriod(3 * time.Minute) + return tc, nil +} diff --git a/command/server/listener_tcp_test.go b/command/server/listener_tcp_test.go new file mode 100644 index 0000000..6d73cf2 --- /dev/null +++ b/command/server/listener_tcp_test.go @@ -0,0 +1,466 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package server + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "math/rand" + "net" + "os" + "testing" + "time" + + "github.com/hashicorp/go-sockaddr" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/mitchellh/cli" + "github.com/pires/go-proxyproto" +) + +func TestTCPListener(t *testing.T) { + ln, _, _, err := tcpListenerFactory(&configutil.Listener{ + Address: "127.0.0.1:0", + TLSDisable: true, + }, nil, cli.NewMockUi()) + if err != nil { + t.Fatalf("err: %s", err) + } + + connFn := func(lnReal net.Listener) (net.Conn, error) { + return net.Dial("tcp", ln.Addr().String()) + } + + testListenerImpl(t, ln, connFn, "", 0, "127.0.0.1", false) +} + +// TestTCPListener_tls tests TLS generally +func TestTCPListener_tls(t *testing.T) { + wd, _ := os.Getwd() + wd += "/test-fixtures/reload/" + + td, err := ioutil.TempDir("", fmt.Sprintf("vault-test-%d", rand.New(rand.NewSource(time.Now().Unix())).Int63())) + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(td) + + // Setup initial certs + inBytes, _ := ioutil.ReadFile(wd + "reload_ca.pem") + certPool := x509.NewCertPool() + ok := certPool.AppendCertsFromPEM(inBytes) + if !ok { + t.Fatal("not ok when appending CA cert") + } + + ln, _, _, err := tcpListenerFactory(&configutil.Listener{ + Address: "127.0.0.1:0", + TLSCertFile: wd + "reload_foo.pem", + TLSKeyFile: wd + "reload_foo.key", + TLSRequireAndVerifyClientCert: true, + TLSClientCAFile: wd + "reload_ca.pem", + }, nil, cli.NewMockUi()) + if err != nil { + t.Fatalf("err: %s", err) + } + cwd, _ := os.Getwd() + + clientCert, _ := tls.LoadX509KeyPair( + cwd+"/test-fixtures/reload/reload_foo.pem", + cwd+"/test-fixtures/reload/reload_foo.key") + + connFn := func(clientCerts bool) func(net.Listener) (net.Conn, error) { + return func(lnReal net.Listener) (net.Conn, error) { + conf := &tls.Config{ + RootCAs: certPool, + } + if clientCerts { + conf.Certificates = []tls.Certificate{clientCert} + } + conn, err := tls.Dial("tcp", ln.Addr().String(), conf) + if err != nil { + return nil, err + } + if err = conn.Handshake(); err != nil { + return nil, err + } + return conn, nil + } + } + + testListenerImpl(t, ln, connFn(true), "foo.example.com", 0, "127.0.0.1", false) + + ln, _, _, err = tcpListenerFactory(&configutil.Listener{ + Address: "127.0.0.1:0", + TLSCertFile: wd + "reload_foo.pem", + TLSKeyFile: wd + "reload_foo.key", + TLSRequireAndVerifyClientCert: true, + TLSDisableClientCerts: true, + TLSClientCAFile: wd + "reload_ca.pem", + }, nil, cli.NewMockUi()) + if err == nil { + t.Fatal("expected error due to mutually exclusive client cert options") + } + + ln, _, _, err = tcpListenerFactory(&configutil.Listener{ + Address: "127.0.0.1:0", + TLSCertFile: wd + "reload_foo.pem", + TLSKeyFile: wd + "reload_foo.key", + TLSDisableClientCerts: true, + TLSClientCAFile: wd + "reload_ca.pem", + }, nil, cli.NewMockUi()) + if err != nil { + t.Fatalf("err: %s", err) + } + + testListenerImpl(t, ln, connFn(false), "foo.example.com", 0, "127.0.0.1", false) +} + +func TestTCPListener_tls13(t *testing.T) { + wd, _ := os.Getwd() + wd += "/test-fixtures/reload/" + + td, err := ioutil.TempDir("", fmt.Sprintf("vault-test-%d", rand.New(rand.NewSource(time.Now().Unix())).Int63())) + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(td) + + // Setup initial certs + inBytes, _ := ioutil.ReadFile(wd + "reload_ca.pem") + certPool := x509.NewCertPool() + ok := certPool.AppendCertsFromPEM(inBytes) + if !ok { + t.Fatal("not ok when appending CA cert") + } + + ln, _, _, err := tcpListenerFactory(&configutil.Listener{ + Address: "127.0.0.1:0", + TLSCertFile: wd + "reload_foo.pem", + TLSKeyFile: wd + "reload_foo.key", + TLSRequireAndVerifyClientCert: true, + TLSClientCAFile: wd + "reload_ca.pem", + TLSMinVersion: "tls13", + }, nil, cli.NewMockUi()) + if err != nil { + t.Fatalf("err: %s", err) + } + cwd, _ := os.Getwd() + + clientCert, _ := tls.LoadX509KeyPair( + cwd+"/test-fixtures/reload/reload_foo.pem", + cwd+"/test-fixtures/reload/reload_foo.key") + + connFn := func(clientCerts bool) func(net.Listener) (net.Conn, error) { + return func(lnReal net.Listener) (net.Conn, error) { + conf := &tls.Config{ + RootCAs: certPool, + } + if clientCerts { + conf.Certificates = []tls.Certificate{clientCert} + } + conn, err := tls.Dial("tcp", ln.Addr().String(), conf) + if err != nil { + return nil, err + } + if err = conn.Handshake(); err != nil { + return nil, err + } + return conn, nil + } + } + + testListenerImpl(t, ln, connFn(true), "foo.example.com", tls.VersionTLS13, "127.0.0.1", false) + + ln, _, _, err = tcpListenerFactory(&configutil.Listener{ + Address: "127.0.0.1:0", + TLSCertFile: wd + "reload_foo.pem", + TLSKeyFile: wd + "reload_foo.key", + TLSRequireAndVerifyClientCert: true, + TLSDisableClientCerts: true, + TLSClientCAFile: wd + "reload_ca.pem", + TLSMinVersion: "tls13", + }, nil, cli.NewMockUi()) + if err == nil { + t.Fatal("expected error due to mutually exclusive client cert options") + } + + ln, _, _, err = tcpListenerFactory(&configutil.Listener{ + Address: "127.0.0.1:0", + TLSCertFile: wd + "reload_foo.pem", + TLSKeyFile: wd + "reload_foo.key", + TLSDisableClientCerts: true, + TLSClientCAFile: wd + "reload_ca.pem", + TLSMinVersion: "tls13", + }, nil, cli.NewMockUi()) + if err != nil { + t.Fatalf("err: %s", err) + } + + testListenerImpl(t, ln, connFn(false), "foo.example.com", tls.VersionTLS13, "127.0.0.1", false) + + ln, _, _, err = tcpListenerFactory(&configutil.Listener{ + Address: "127.0.0.1:0", + TLSCertFile: wd + "reload_foo.pem", + TLSKeyFile: wd + "reload_foo.key", + TLSDisableClientCerts: true, + TLSClientCAFile: wd + "reload_ca.pem", + TLSMaxVersion: "tls12", + }, nil, cli.NewMockUi()) + if err != nil { + t.Fatalf("err: %s", err) + } + + testListenerImpl(t, ln, connFn(false), "foo.example.com", tls.VersionTLS12, "127.0.0.1", false) +} + +func TestTCPListener_proxyProtocol(t *testing.T) { + for name, tc := range map[string]struct { + Behavior string + Header *proxyproto.Header + AuthorizedAddr string + ExpectedAddr string + ExpectError bool + }{ + "none-no-header": { + Behavior: "", + ExpectedAddr: "127.0.0.1", + Header: nil, + }, + "none-v1": { + Behavior: "", + ExpectedAddr: "127.0.0.1", + ExpectError: true, + Header: &proxyproto.Header{ + Version: 1, + Command: proxyproto.PROXY, + TransportProtocol: proxyproto.TCPv4, + SourceAddr: &net.TCPAddr{ + IP: net.ParseIP("10.1.1.1"), + Port: 1000, + }, + DestinationAddr: &net.TCPAddr{ + IP: net.ParseIP("20.2.2.2"), + Port: 2000, + }, + }, + }, + "none-v2": { + Behavior: "", + ExpectedAddr: "127.0.0.1", + ExpectError: true, + Header: &proxyproto.Header{ + Version: 2, + Command: proxyproto.PROXY, + TransportProtocol: proxyproto.TCPv4, + SourceAddr: &net.TCPAddr{ + IP: net.ParseIP("10.1.1.1"), + Port: 1000, + }, + DestinationAddr: &net.TCPAddr{ + IP: net.ParseIP("20.2.2.2"), + Port: 2000, + }, + }, + }, + + // use_always makes it possible to send the PROXY header but does not + // require it + "use_always-no-header": { + Behavior: "use_always", + ExpectedAddr: "127.0.0.1", + Header: nil, + }, + + "use_always-header-v1": { + Behavior: "use_always", + ExpectedAddr: "10.1.1.1", + Header: &proxyproto.Header{ + Version: 1, + Command: proxyproto.PROXY, + TransportProtocol: proxyproto.TCPv4, + SourceAddr: &net.TCPAddr{ + IP: net.ParseIP("10.1.1.1"), + Port: 1000, + }, + DestinationAddr: &net.TCPAddr{ + IP: net.ParseIP("20.2.2.2"), + Port: 2000, + }, + }, + }, + "use_always-header-v1-unknown": { + Behavior: "use_always", + ExpectedAddr: "127.0.0.1", + Header: &proxyproto.Header{ + Version: 1, + Command: proxyproto.PROXY, + TransportProtocol: proxyproto.UNSPEC, + }, + }, + "use_always-header-v2": { + Behavior: "use_always", + ExpectedAddr: "10.1.1.1", + Header: &proxyproto.Header{ + Version: 2, + Command: proxyproto.PROXY, + TransportProtocol: proxyproto.TCPv4, + SourceAddr: &net.TCPAddr{ + IP: net.ParseIP("10.1.1.1"), + Port: 1000, + }, + DestinationAddr: &net.TCPAddr{ + IP: net.ParseIP("20.2.2.2"), + Port: 2000, + }, + }, + }, + "use_always-header-v2-unknown": { + Behavior: "use_always", + ExpectedAddr: "127.0.0.1", + Header: &proxyproto.Header{ + Version: 2, + Command: proxyproto.LOCAL, + TransportProtocol: proxyproto.UNSPEC, + }, + }, + "allow_authorized-no-header-in": { + Behavior: "allow_authorized", + AuthorizedAddr: "127.0.0.1/32", + ExpectedAddr: "127.0.0.1", + }, + "allow_authorized-no-header-not-in": { + Behavior: "allow_authorized", + AuthorizedAddr: "10.0.0.1/32", + ExpectedAddr: "127.0.0.1", + }, + "allow_authorized-v1-in": { + Behavior: "allow_authorized", + AuthorizedAddr: "127.0.0.1/32", + ExpectedAddr: "10.1.1.1", + Header: &proxyproto.Header{ + Version: 1, + Command: proxyproto.PROXY, + TransportProtocol: proxyproto.TCPv4, + SourceAddr: &net.TCPAddr{ + IP: net.ParseIP("10.1.1.1"), + Port: 1000, + }, + DestinationAddr: &net.TCPAddr{ + IP: net.ParseIP("20.2.2.2"), + Port: 2000, + }, + }, + }, + + // allow_authorized still accepts the PROXY header when not in the + // authorized addresses but discards it silently + "allow_authorized-v1-not-in": { + Behavior: "allow_authorized", + AuthorizedAddr: "10.0.0.1/32", + ExpectedAddr: "127.0.0.1", + Header: &proxyproto.Header{ + Version: 1, + Command: proxyproto.PROXY, + TransportProtocol: proxyproto.TCPv4, + SourceAddr: &net.TCPAddr{ + IP: net.ParseIP("10.1.1.1"), + Port: 1000, + }, + DestinationAddr: &net.TCPAddr{ + IP: net.ParseIP("20.2.2.2"), + Port: 2000, + }, + }, + }, + + "deny_unauthorized-no-header-in": { + Behavior: "deny_unauthorized", + AuthorizedAddr: "127.0.0.1/32", + ExpectedAddr: "127.0.0.1", + }, + "deny_unauthorized-no-header-not-in": { + Behavior: "deny_unauthorized", + AuthorizedAddr: "10.0.0.1/32", + ExpectedAddr: "127.0.0.1", + ExpectError: true, + }, + "deny_unauthorized-v1-in": { + Behavior: "deny_unauthorized", + AuthorizedAddr: "127.0.0.1/32", + ExpectedAddr: "10.1.1.1", + Header: &proxyproto.Header{ + Version: 1, + Command: proxyproto.PROXY, + TransportProtocol: proxyproto.TCPv4, + SourceAddr: &net.TCPAddr{ + IP: net.ParseIP("10.1.1.1"), + Port: 1000, + }, + DestinationAddr: &net.TCPAddr{ + IP: net.ParseIP("20.2.2.2"), + Port: 2000, + }, + }, + }, + "deny_unauthorized-v1-not-in": { + Behavior: "deny_unauthorized", + AuthorizedAddr: "10.0.0.1/32", + ExpectedAddr: "127.0.0.1", + ExpectError: true, + Header: &proxyproto.Header{ + Version: 1, + Command: proxyproto.PROXY, + TransportProtocol: proxyproto.TCPv4, + SourceAddr: &net.TCPAddr{ + IP: net.ParseIP("10.1.1.1"), + Port: 1000, + }, + DestinationAddr: &net.TCPAddr{ + IP: net.ParseIP("20.2.2.2"), + Port: 2000, + }, + }, + }, + } { + t.Run(name, func(t *testing.T) { + proxyProtocolAuthorizedAddrs := []*sockaddr.SockAddrMarshaler{} + if tc.AuthorizedAddr != "" { + sockAddr, err := sockaddr.NewSockAddr(tc.AuthorizedAddr) + if err != nil { + t.Fatal(err) + } + proxyProtocolAuthorizedAddrs = append( + proxyProtocolAuthorizedAddrs, + &sockaddr.SockAddrMarshaler{SockAddr: sockAddr}, + ) + } + + ln, _, _, err := tcpListenerFactory(&configutil.Listener{ + Address: "127.0.0.1:0", + TLSDisable: true, + ProxyProtocolBehavior: tc.Behavior, + ProxyProtocolAuthorizedAddrs: proxyProtocolAuthorizedAddrs, + }, nil, cli.NewMockUi()) + if err != nil { + t.Fatalf("err: %s", err) + } + + connFn := func(lnReal net.Listener) (net.Conn, error) { + conn, err := net.Dial("tcp", ln.Addr().String()) + if err != nil { + return nil, err + } + + if tc.Header != nil { + _, err = tc.Header.WriteTo(conn) + } + return conn, err + } + + testListenerImpl(t, ln, connFn, "", 0, tc.ExpectedAddr, tc.ExpectError) + }) + } +} diff --git a/command/server/listener_test.go b/command/server/listener_test.go new file mode 100644 index 0000000..f4d555c --- /dev/null +++ b/command/server/listener_test.go @@ -0,0 +1,103 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package server + +import ( + "bytes" + "crypto/tls" + "io" + "net" + "testing" +) + +type testListenerConnFn func(net.Listener) (net.Conn, error) + +func testListenerImpl(t *testing.T, ln net.Listener, connFn testListenerConnFn, certName string, expectedVersion uint16, expectedAddr string, expectError bool) { + serverCh := make(chan net.Conn, 1) + go func() { + server, err := ln.Accept() + if err != nil { + if !expectError { + t.Errorf("err: %s", err) + } + close(serverCh) + return + } + if certName != "" { + tlsConn := server.(*tls.Conn) + tlsConn.Handshake() + } + serverCh <- server + if expectedAddr == "" { + return + } + addr, _, err := net.SplitHostPort(server.RemoteAddr().String()) + if err != nil { + t.Error(err) + } + if addr != expectedAddr { + t.Errorf("expected: %s, got: %s", expectedAddr, addr) + } + }() + + client, err := connFn(ln) + if err != nil { + t.Fatalf("err: %s", err) + } + + if certName != "" { + tlsConn := client.(*tls.Conn) + if expectedVersion != 0 && tlsConn.ConnectionState().Version != expectedVersion { + t.Fatalf("expected version %d, got %d", expectedVersion, tlsConn.ConnectionState().Version) + } + if len(tlsConn.ConnectionState().PeerCertificates) != 1 { + t.Fatalf("err: number of certs too long") + } + peerName := tlsConn.ConnectionState().PeerCertificates[0].Subject.CommonName + if peerName != certName { + t.Fatalf("err: bad cert name %s, expected %s", peerName, certName) + } + } + + server := <-serverCh + + if server == nil { + if !expectError { + // Something failed already so we abort the test early + t.Fatal("aborting test because the server did not accept the connection") + } + return + } + + defer client.Close() + defer server.Close() + + var buf bytes.Buffer + copyCh := make(chan struct{}) + go func() { + io.Copy(&buf, server) + close(copyCh) + }() + + if _, err := client.Write([]byte("foo")); err != nil { + t.Fatalf("err: %s", err) + } + + client.Close() + + <-copyCh + if (buf.String() != "foo" && !expectError) || (buf.String() == "foo" && expectError) { + t.Fatalf("bad: %q, expectError: %t", buf.String(), expectError) + } +} + +func TestProfilingUnauthenticatedInFlightAccess(t *testing.T) { + config, err := LoadConfigFile("./test-fixtures/unauth_in_flight_access.hcl") + if err != nil { + t.Fatalf("Error encountered when loading config %+v", err) + } + if !config.Listeners[0].InFlightRequestLogging.UnauthenticatedInFlightAccess { + t.Fatalf("failed to read UnauthenticatedInFlightAccess") + } +} diff --git a/command/server/listener_unix.go b/command/server/listener_unix.go new file mode 100644 index 0000000..d5ea772 --- /dev/null +++ b/command/server/listener_unix.go @@ -0,0 +1,39 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package server + +import ( + "io" + "net" + + "github.com/hashicorp/go-secure-stdlib/reloadutil" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/internalshared/listenerutil" + "github.com/mitchellh/cli" +) + +func unixListenerFactory(l *configutil.Listener, _ io.Writer, ui cli.Ui) (net.Listener, map[string]string, reloadutil.ReloadFunc, error) { + addr := l.Address + if addr == "" { + addr = "/run/vault.sock" + } + + var cfg *listenerutil.UnixSocketsConfig + if l.SocketMode != "" && + l.SocketUser != "" && + l.SocketGroup != "" { + cfg = &listenerutil.UnixSocketsConfig{ + Mode: l.SocketMode, + User: l.SocketUser, + Group: l.SocketGroup, + } + } + + ln, err := listenerutil.UnixSocketListener(addr, cfg) + if err != nil { + return nil, nil, nil, err + } + + return ln, map[string]string{}, nil, nil +} diff --git a/command/server/listener_unix_test.go b/command/server/listener_unix_test.go new file mode 100644 index 0000000..91eaf12 --- /dev/null +++ b/command/server/listener_unix_test.go @@ -0,0 +1,28 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package server + +import ( + "net" + "path/filepath" + "testing" + + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/mitchellh/cli" +) + +func TestUnixListener(t *testing.T) { + ln, _, _, err := unixListenerFactory(&configutil.Listener{ + Address: filepath.Join(t.TempDir(), "/vault.sock"), + }, nil, cli.NewMockUi()) + if err != nil { + t.Fatalf("err: %s", err) + } + + connFn := func(lnReal net.Listener) (net.Conn, error) { + return net.Dial("unix", ln.Addr().String()) + } + + testListenerImpl(t, ln, connFn, "", 0, "", false) +} diff --git a/command/server/server_seal_transit_acc_test.go b/command/server/server_seal_transit_acc_test.go new file mode 100644 index 0000000..074893f --- /dev/null +++ b/command/server/server_seal_transit_acc_test.go @@ -0,0 +1,184 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package server + +import ( + "context" + "fmt" + "net/url" + "path" + "reflect" + "testing" + "time" + + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/sdk/helper/docker" +) + +func TestTransitWrapper_Lifecycle(t *testing.T) { + cleanup, config := prepareTestContainer(t) + defer cleanup() + + wrapperConfig := map[string]string{ + "address": config.URL().String(), + "token": config.token, + "mount_path": config.mountPath, + "key_name": config.keyName, + } + + kms, _, err := configutil.GetTransitKMSFunc(&configutil.KMS{Config: wrapperConfig}) + if err != nil { + t.Fatalf("error setting wrapper config: %v", err) + } + + // Test Encrypt and Decrypt calls + input := []byte("foo") + swi, err := kms.Encrypt(context.Background(), input, nil) + if err != nil { + t.Fatalf("err: %s", err.Error()) + } + + pt, err := kms.Decrypt(context.Background(), swi, nil) + if err != nil { + t.Fatalf("err: %s", err.Error()) + } + + if !reflect.DeepEqual(input, pt) { + t.Fatalf("expected %s, got %s", input, pt) + } +} + +func TestTransitSeal_TokenRenewal(t *testing.T) { + cleanup, config := prepareTestContainer(t) + defer cleanup() + + remoteClient, err := api.NewClient(config.apiConfig()) + if err != nil { + t.Fatalf("err: %s", err) + } + remoteClient.SetToken(config.token) + + req := &api.TokenCreateRequest{ + Period: "5s", + } + rsp, err := remoteClient.Auth().Token().Create(req) + if err != nil { + t.Fatalf("err: %s", err) + } + + wrapperConfig := map[string]string{ + "address": config.URL().String(), + "token": rsp.Auth.ClientToken, + "mount_path": config.mountPath, + "key_name": config.keyName, + } + kms, _, err := configutil.GetTransitKMSFunc(&configutil.KMS{Config: wrapperConfig}) + if err != nil { + t.Fatalf("error setting wrapper config: %v", err) + } + + time.Sleep(7 * time.Second) + + // Test Encrypt and Decrypt calls + input := []byte("foo") + swi, err := kms.Encrypt(context.Background(), input, nil) + if err != nil { + t.Fatalf("err: %s", err.Error()) + } + + pt, err := kms.Decrypt(context.Background(), swi, nil) + if err != nil { + t.Fatalf("err: %s", err.Error()) + } + + if !reflect.DeepEqual(input, pt) { + t.Fatalf("expected %s, got %s", input, pt) + } +} + +type DockerVaultConfig struct { + docker.ServiceURL + token string + mountPath string + keyName string + tlsConfig *api.TLSConfig +} + +func (c *DockerVaultConfig) apiConfig() *api.Config { + vaultConfig := api.DefaultConfig() + vaultConfig.Address = c.URL().String() + if err := vaultConfig.ConfigureTLS(c.tlsConfig); err != nil { + panic("unable to configure TLS") + } + + return vaultConfig +} + +var _ docker.ServiceConfig = &DockerVaultConfig{} + +func prepareTestContainer(t *testing.T) (func(), *DockerVaultConfig) { + rootToken, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("err: %s", err) + } + testMountPath, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("err: %s", err) + } + testKeyName, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("err: %s", err) + } + + runner, err := docker.NewServiceRunner(docker.RunOptions{ + ContainerName: "vault", + ImageRepo: "docker.mirror.hashicorp.services/hashicorp/vault", + ImageTag: "latest", + Cmd: []string{ + "server", "-log-level=trace", "-dev", fmt.Sprintf("-dev-root-token-id=%s", rootToken), + "-dev-listen-address=0.0.0.0:8200", + }, + Ports: []string{"8200/tcp"}, + }) + if err != nil { + t.Fatalf("could not start docker vault: %s", err) + } + + svc, err := runner.StartService(context.Background(), func(ctx context.Context, host string, port int) (docker.ServiceConfig, error) { + c := &DockerVaultConfig{ + ServiceURL: *docker.NewServiceURL(url.URL{Scheme: "http", Host: fmt.Sprintf("%s:%d", host, port)}), + tlsConfig: &api.TLSConfig{ + Insecure: true, + }, + token: rootToken, + mountPath: testMountPath, + keyName: testKeyName, + } + vault, err := api.NewClient(c.apiConfig()) + if err != nil { + return nil, err + } + vault.SetToken(rootToken) + + // Set up transit + if err := vault.Sys().Mount(testMountPath, &api.MountInput{ + Type: "transit", + }); err != nil { + return nil, err + } + + // Create default aesgcm key + if _, err := vault.Logical().Write(path.Join(testMountPath, "keys", testKeyName), map[string]interface{}{}); err != nil { + return nil, err + } + + return c, nil + }) + if err != nil { + t.Fatalf("could not start docker vault: %s", err) + } + return svc.Cleanup, svc.Config.(*DockerVaultConfig) +} diff --git a/command/server/test-fixtures/config-dir/bar.json b/command/server/test-fixtures/config-dir/bar.json new file mode 100644 index 0000000..48e5606 --- /dev/null +++ b/command/server/test-fixtures/config-dir/bar.json @@ -0,0 +1,14 @@ +{ + "ui":false, + + "listener": { + "tcp": { + "address": "127.0.0.1:443" + } + }, + + "max_lease_ttl": "10h", + + "api_addr": "https://vault.local", + "cluster_addr": "https://127.0.0.1:444" +} diff --git a/command/server/test-fixtures/config-dir/baz.hcl b/command/server/test-fixtures/config-dir/baz.hcl new file mode 100644 index 0000000..171a07d --- /dev/null +++ b/command/server/test-fixtures/config-dir/baz.hcl @@ -0,0 +1,17 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +telemetry { + statsd_address = "baz" + statsite_address = "qux" + disable_hostname = true + usage_gauge_period = "5m" + maximum_gauge_cardinality = 100 +} +sentinel { + additional_enabled_modules = ["http"] +} +ui=true +raw_storage_endpoint=true +default_lease_ttl = "10h" +cluster_name = "testcluster" diff --git a/command/server/test-fixtures/config-dir/foo.hcl b/command/server/test-fixtures/config-dir/foo.hcl new file mode 100644 index 0000000..0ef439a --- /dev/null +++ b/command/server/test-fixtures/config-dir/foo.hcl @@ -0,0 +1,12 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +disable_cache = true +disable_mlock = true + +backend "consul" { + foo = "bar" + disable_clustering = "true" +} + +disable_clustering = false diff --git a/command/server/test-fixtures/config.hcl b/command/server/test-fixtures/config.hcl new file mode 100644 index 0000000..2a53289 --- /dev/null +++ b/command/server/test-fixtures/config.hcl @@ -0,0 +1,54 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +disable_cache = true +disable_mlock = true + +ui = true + +listener "tcp" { + address = "127.0.0.1:443" +} + +backend "consul" { + foo = "bar" + advertise_addr = "foo" +} + +ha_backend "consul" { + bar = "baz" + advertise_addr = "snafu" + disable_clustering = "true" +} + +service_registration "consul" { + foo = "bar" +} + +telemetry { + statsd_address = "bar" + usage_gauge_period = "5m" + maximum_gauge_cardinality = 100 + + statsite_address = "foo" + dogstatsd_addr = "127.0.0.1:7254" + dogstatsd_tags = ["tag_1:val_1", "tag_2:val_2"] + metrics_prefix = "myprefix" + bad_value = "shouldn't be here" +} + +sentinel { + additional_enabled_modules = [] +} + +max_lease_ttl = "10h" +default_lease_ttl = "10h" +cluster_name = "testcluster" +pid_file = "./pidfile" +raw_storage_endpoint = true +introspection_endpoint = true +disable_sealwrap = true +disable_printable_check = true +enable_response_header_hostname = true +enable_response_header_raft_node_id = true +license_path = "/path/to/license" \ No newline at end of file diff --git a/command/server/test-fixtures/config.hcl.json b/command/server/test-fixtures/config.hcl.json new file mode 100644 index 0000000..92c1986 --- /dev/null +++ b/command/server/test-fixtures/config.hcl.json @@ -0,0 +1,34 @@ +{ + "listener": [{ + "tcp": { + "address": "127.0.0.1:443" + } + }], + "cluster_cipher_suites": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", + "storage": { + "consul": { + "foo": "bar", + "disable_clustering": "true" + } + }, + "service_registration": { + "consul": { + "foo": "bar" + } + }, + "telemetry": { + "statsite_address": "baz", + "usage_gauge_period": "5m", + "maximum_gauge_cardinality": 100 + }, + "sentinel": { + "additional_enabled_modules": [] + }, + "max_lease_ttl": "10h", + "default_lease_ttl": "10h", + "cluster_name":"testcluster", + "ui":true, + "pid_file":"./pidfile", + "raw_storage_endpoint":true, + "disable_sealwrap":true +} diff --git a/command/server/test-fixtures/config2.hcl b/command/server/test-fixtures/config2.hcl new file mode 100644 index 0000000..4d9cdf7 --- /dev/null +++ b/command/server/test-fixtures/config2.hcl @@ -0,0 +1,65 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +disable_cache = true +disable_mlock = true + +ui = true + +api_addr = "top_level_api_addr" +cluster_addr = "top_level_cluster_addr" + +listener "tcp" { + address = "127.0.0.1:443" +} + +storage "consul" { + foo = "bar" + redirect_addr = "foo" +} + +ha_storage "consul" { + bar = "baz" + redirect_addr = "snafu" + disable_clustering = "true" +} + +service_registration "consul" { + foo = "bar" +} + +telemetry { + statsd_address = "bar" + usage_gauge_period = "5m" + maximum_gauge_cardinality = 125 + statsite_address = "foo" + dogstatsd_addr = "127.0.0.1:7254" + dogstatsd_tags = ["tag_1:val_1", "tag_2:val_2"] + prometheus_retention_time = "30s" +} + +entropy "seal" { + mode = "augmentation" +} + +sentinel { + additional_enabled_modules = [] +} +kms "commastringpurpose" { + purpose = "foo,bar" +} +kms "slicepurpose" { + purpose = ["zip", "zap"] +} +seal "nopurpose" { +} +seal "stringpurpose" { + purpose = "foo" +} + +max_lease_ttl = "10h" +default_lease_ttl = "10h" +cluster_name = "testcluster" +pid_file = "./pidfile" +raw_storage_endpoint = true +disable_sealwrap = true diff --git a/command/server/test-fixtures/config2.hcl.json b/command/server/test-fixtures/config2.hcl.json new file mode 100644 index 0000000..6010061 --- /dev/null +++ b/command/server/test-fixtures/config2.hcl.json @@ -0,0 +1,64 @@ +{ + "ui":true, + "raw_storage_endpoint":true, + "disable_sealwrap":true, + "listener":[ + { + "tcp":{ + "address":"127.0.0.1:443" + } + }, + { + "tcp":{ + "address":"127.0.0.1:444" + } + } + ], + "storage":{ + "consul":{ + "foo":"bar" + } + }, + "ha_storage":{ + "consul":{ + "bar":"baz", + "disable_clustering": "true" + } + }, + "service_registration":{ + "consul":{ + "foo":"bar" + } + }, + "cache_size": 45678, + "telemetry":{ + "statsd_address":"bar", + "statsite_address":"foo", + "disable_hostname":true, + "usage_gauge_period": "5m", + "maximum_gauge_cardinality": 125, + + "circonus_api_token": "0", + "circonus_api_app": "vault", + "circonus_api_url": "http://api.circonus.com/v2", + "circonus_submission_interval": "10s", + "circonus_submission_url": "https://someplace.com/metrics", + "circonus_check_id": "0", + "circonus_check_force_metric_activation": "true", + "circonus_check_instance_id": "node1:vault", + "circonus_check_search_tag": "service:vault", + "circonus_check_display_name": "node1:vault", + "circonus_check_tags": "cat1:tag1,cat2:tag2", + "circonus_broker_id": "0", + "circonus_broker_select_tag": "dc:sfo", + "prometheus_retention_time": "30s" + }, + "sentinel": { + "additional_enabled_modules": ["http"] + }, + "entropy": { + "seal": { + "mode": "augmentation" + } + } +} diff --git a/command/server/test-fixtures/config3.hcl b/command/server/test-fixtures/config3.hcl new file mode 100644 index 0000000..87adb96 --- /dev/null +++ b/command/server/test-fixtures/config3.hcl @@ -0,0 +1,58 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +disable_cache = true +disable_mlock = true +log_requests_level = "Basic" + +ui = true + +api_addr = "top_level_api_addr" +cluster_addr = "top_level_cluster_addr" + +listener "tcp" { + address = "127.0.0.1:443" +} + +backend "consul" { + advertise_addr = "foo" + token = "foo" +} + +ha_backend "consul" { + bar = "baz" + advertise_addr = "snafu" + disable_clustering = "true" + token = "foo" +} + +service_registration "consul" { + token = "foo" +} + +telemetry { + statsd_address = "bar" + circonus_api_token = "baz" + metrics_prefix = "pfx" + usage_gauge_period = "5m" + maximum_gauge_cardinality = 100 +} + +sentinel { + additional_enabled_modules = ["http"] +} + +seal "awskms" { + region = "us-east-1" + access_key = "AKIAIOSFODNN7EXAMPLE" + secret_key = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" +} + +max_lease_ttl = "30d" +default_lease_ttl = "365d" +cluster_name = "testcluster" +pid_file = "./pidfile" +raw_storage_endpoint = true +disable_sealwrap = true +disable_sentinel_trace = true +administrative_namespace_path = "admin/" diff --git a/command/server/test-fixtures/config4.hcl b/command/server/test-fixtures/config4.hcl new file mode 100644 index 0000000..be49453 --- /dev/null +++ b/command/server/test-fixtures/config4.hcl @@ -0,0 +1,20 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +disable_cache = true +disable_mlock = true +ui = true + +listener "tcp" { + address = "127.0.0.1:8200" +} + +storage "raft" { + path = "/storage/path/raft" + node_id = "raft1" + performance_multiplier = 1 + foo = "bar" + baz = true +} + +cluster_addr = "127.0.0.1:8201" diff --git a/command/server/test-fixtures/config4.hcl.json b/command/server/test-fixtures/config4.hcl.json new file mode 100644 index 0000000..e0ca7e8 --- /dev/null +++ b/command/server/test-fixtures/config4.hcl.json @@ -0,0 +1,20 @@ +{ + "disable_cache": true, + "disable_mlock": true, + "ui":true, + "listener": [{ + "tcp": { + "address": "127.0.0.1:8200" + } + }], + "storage": { + "raft": { + "path": "/storage/path/raft", + "node_id": "raft1", + "performance_multiplier": 1, + "foo": "bar", + "baz": true + } + }, + "cluster_addr": "127.0.0.1:8201" +} diff --git a/command/server/test-fixtures/config5.hcl b/command/server/test-fixtures/config5.hcl new file mode 100644 index 0000000..3f5b246 --- /dev/null +++ b/command/server/test-fixtures/config5.hcl @@ -0,0 +1,54 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +disable_cache = true + disable_mlock = true + + ui = true + + listener "tcp" { + address = "127.0.0.1:443" + allow_stuff = true + } + + backend "consul" { + foo = "bar" + advertise_addr = "foo" + } + + ha_backend "consul" { + bar = "baz" + advertise_addr = "snafu" + disable_clustering = "true" + } + + service_registration "consul" { + foo = "bar" + } + + telemetry { + statsd_address = "bar" + usage_gauge_period = "5m" + maximum_gauge_cardinality = 100 + + statsite_address = "foo" + dogstatsd_addr = "127.0.0.1:7254" + dogstatsd_tags = ["tag_1:val_1", "tag_2:val_2"] + metrics_prefix = "myprefix" + + lease_metrics_epsilon = "1h" + num_lease_metrics_buckets = 2 + add_lease_metrics_namespace_labels = true + } + + sentinel { + additional_enabled_modules = [] + } + + max_lease_ttl = "10h" + default_lease_ttl = "10h" + cluster_name = "testcluster" + pid_file = "./pidfile" + raw_storage_endpoint = true + disable_sealwrap = true + disable_printable_check = true \ No newline at end of file diff --git a/command/server/test-fixtures/config_bad_https_storage.hcl b/command/server/test-fixtures/config_bad_https_storage.hcl new file mode 100644 index 0000000..b53673c --- /dev/null +++ b/command/server/test-fixtures/config_bad_https_storage.hcl @@ -0,0 +1,55 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +disable_cache = true +disable_mlock = true + +ui = true + +listener "tcp" { + address = "127.0.0.1:1027" + tls_disable = true +} + +backend "consul" { + address = "127.0.0.1:8500" + foo = "bar" + advertise_addr = "foo" + scheme = "https" + tls_cert_file = "./../vault/diagnose/test-fixtures/expiredcert.pem" + tls_key_file = "./../vault/diagnose/test-fixtures/expiredprivatekey.pem" +} + +ha_backend "consul" { + address = "127.0.0.1:8500" + bar = "baz" + advertise_addr = "https://127.0.0.1:8500" + disable_clustering = "true" +} + +service_registration "consul" { + foo = "bar" +} + +telemetry { + statsd_address = "bar" + usage_gauge_period = "5m" + maximum_gauge_cardinality = 100 + + statsite_address = "foo" + dogstatsd_addr = "127.0.0.1:7254" + dogstatsd_tags = ["tag_1:val_1", "tag_2:val_2"] + metrics_prefix = "myprefix" +} + +sentinel { + additional_enabled_modules = [] +} + +max_lease_ttl = "10h" +default_lease_ttl = "10h" +cluster_name = "testcluster" +pid_file = "./pidfile" +raw_storage_endpoint = true +disable_sealwrap = true +disable_printable_check = true diff --git a/command/server/test-fixtures/config_custom_response_headers_1.hcl b/command/server/test-fixtures/config_custom_response_headers_1.hcl new file mode 100644 index 0000000..bc458da --- /dev/null +++ b/command/server/test-fixtures/config_custom_response_headers_1.hcl @@ -0,0 +1,34 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +storage "inmem" {} +listener "tcp" { + address = "127.0.0.1:8200" + tls_disable = true + custom_response_headers { + "default" = { + "Strict-Transport-Security" = ["max-age=1","domains"], + "Content-Security-Policy" = ["default-src 'others'"], + "X-Vault-Ignored" = ["ignored"], + "X-Custom-Header" = ["Custom header value default"], + } + "307" = { + "X-Custom-Header" = ["Custom header value 307"], + } + "3xx" = { + "X-Vault-Ignored-3xx" = ["Ignored 3xx"], + "X-Custom-Header" = ["Custom header value 3xx"] + } + "200" = { + "someheader-200" = ["200"], + "X-Custom-Header" = ["Custom header value 200"] + } + "2xx" = { + "X-Custom-Header" = ["Custom header value 2xx"] + } + "400" = { + "someheader-400" = ["400"] + } + } +} +disable_mlock = true diff --git a/command/server/test-fixtures/config_custom_response_headers_multiple_listeners.hcl b/command/server/test-fixtures/config_custom_response_headers_multiple_listeners.hcl new file mode 100644 index 0000000..9ae5b40 --- /dev/null +++ b/command/server/test-fixtures/config_custom_response_headers_multiple_listeners.hcl @@ -0,0 +1,59 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +storage "inmem" {} +listener "tcp" { + address = "127.0.0.1:8200" + tls_disable = true + custom_response_headers { + "default" = { + "Content-Security-Policy" = ["default-src 'others'"], + "X-Vault-Ignored" = ["ignored"], + "X-Custom-Header" = ["Custom header value default"], + } + "307" = { + "X-Custom-Header" = ["Custom header value 307"], + } + "3xx" = { + "X-Vault-Ignored-3xx" = ["Ignored 3xx"], + "X-Custom-Header" = ["Custom header value 3xx"] + } + "200" = { + "someheader-200" = ["200"], + "X-Custom-Header" = ["Custom header value 200"] + } + "2xx" = { + "X-Custom-Header" = ["Custom header value 2xx"] + } + "400" = { + "someheader-400" = ["400"] + } + } +} +listener "tcp" { + address = "127.0.0.2:8200" + tls_disable = true + custom_response_headers { + "default" = { + "Content-Security-Policy" = ["default-src 'others'"], + "X-Vault-Ignored" = ["ignored"], + "X-Custom-Header" = ["Custom header value default"], + } + } +} +listener "tcp" { + address = "127.0.0.3:8200" + tls_disable = true + custom_response_headers { + "2xx" = { + "X-Custom-Header" = ["Custom header value 2xx"] + } + } +} +listener "tcp" { + address = "127.0.0.4:8200" + tls_disable = true +} + + +disable_mlock = true diff --git a/command/server/test-fixtures/config_diagnose_hastorage_bad_https.hcl b/command/server/test-fixtures/config_diagnose_hastorage_bad_https.hcl new file mode 100644 index 0000000..9e1ac5e --- /dev/null +++ b/command/server/test-fixtures/config_diagnose_hastorage_bad_https.hcl @@ -0,0 +1,57 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +disable_cache = true +disable_mlock = true + +ui = true + +listener "tcp" { + address = "127.0.0.1:1028" + tls_disable = true +} + +backend "consul" { + foo = "bar" + advertise_addr = "foo" + address = "http://remoteconsulserverIP:1028" +} + +ha_backend "consul" { + bar = "baz" + address = "https://remoteconsulserverIP:1028" + advertise_addr = "snafu" + disable_clustering = "true" + scheme = "https" + tls_cert_file = "./../vault/diagnose/test-fixtures/expiredcert.pem" + tls_key_file = "./../vault/diagnose/test-fixtures/expiredprivatekey.pem" +} + +service_registration "consul" { + foo = "bar" + address = "127.0.0.1:1028" + +} + +telemetry { + statsd_address = "bar" + usage_gauge_period = "5m" + maximum_gauge_cardinality = 100 + + statsite_address = "foo" + dogstatsd_addr = "127.0.0.1:7254" + dogstatsd_tags = ["tag_1:val_1", "tag_2:val_2"] + metrics_prefix = "myprefix" +} + +sentinel { + additional_enabled_modules = [] +} + +max_lease_ttl = "10h" +default_lease_ttl = "10h" +cluster_name = "testcluster" +pid_file = "./pidfile" +raw_storage_endpoint = true +disable_sealwrap = true +disable_printable_check = true diff --git a/command/server/test-fixtures/config_diagnose_ok.hcl b/command/server/test-fixtures/config_diagnose_ok.hcl new file mode 100644 index 0000000..0b903ee --- /dev/null +++ b/command/server/test-fixtures/config_diagnose_ok.hcl @@ -0,0 +1,47 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +disable_cache = true +disable_mlock = true + +ui = true + +listener "tcp" { + address = "127.0.0.1:1024" + tls_disable = true +} + +backend "consul" { + address = "127.0.0.1:1025" +} + +ha_backend "consul" { + address = "127.0.0.1:8500" + bar = "baz" + advertise_addr = "https://127.0.0.1:8500" + disable_clustering = "true" +} + +service_registration "consul" { + address = "127.0.0.1:8500" + foo = "bar" +} + +telemetry { + statsd_address = "bar" + usage_gauge_period = "5m" + maximum_gauge_cardinality = 100 + + statsite_address = "foo" + dogstatsd_addr = "127.0.0.1:7254" + dogstatsd_tags = ["tag_1:val_1", "tag_2:val_2"] + metrics_prefix = "myprefix" +} + +max_lease_ttl = "10h" +default_lease_ttl = "10h" +cluster_name = "testcluster" +pid_file = "./pidfile" +raw_storage_endpoint = true +disable_sealwrap = true +disable_printable_check = true diff --git a/command/server/test-fixtures/config_raft.hcl b/command/server/test-fixtures/config_raft.hcl new file mode 100644 index 0000000..7cb585b --- /dev/null +++ b/command/server/test-fixtures/config_raft.hcl @@ -0,0 +1,41 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +disable_cache = true +disable_mlock = true + +ui = true + +listener "tcp" { + address = "127.0.0.1:1024" + tls_disable = true +} + +storage "raft" { + path = "./server/test-fixtures/" + node_id = "raft_node_1" +} +cluster_addr = "http://127.0.0.1:8201" + +telemetry { + statsd_address = "bar" + usage_gauge_period = "5m" + maximum_gauge_cardinality = 100 + + statsite_address = "foo" + dogstatsd_addr = "127.0.0.1:7254" + dogstatsd_tags = ["tag_1:val_1", "tag_2:val_2"] + metrics_prefix = "myprefix" +} + +sentinel { + additional_enabled_modules = [] +} + +max_lease_ttl = "10h" +default_lease_ttl = "10h" +cluster_name = "testcluster" +pid_file = "./pidfile" +raw_storage_endpoint = true +disable_sealwrap = true +disable_printable_check = true diff --git a/command/server/test-fixtures/config_seals.hcl b/command/server/test-fixtures/config_seals.hcl new file mode 100644 index 0000000..6fdd133 --- /dev/null +++ b/command/server/test-fixtures/config_seals.hcl @@ -0,0 +1,37 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +listener "tcp" { + address = "127.0.0.1:443" +} + +backend "consul" { +} + +seal "pkcs11" { + purpose = "many,purposes" + lib = "/usr/lib/libcklog2.so" + slot = "0.0" + pin = "XXXXXXXX" + key_label = "HASHICORP" + mechanism = "0x1082" + hmac_mechanism = "0x0251" + hmac_key_label = "vault-hsm-hmac-key" + default_hmac_key_label = "vault-hsm-hmac-key" + generate_key = "true" +} + +seal "pkcs11" { + purpose = "single" + disabled = "true" + lib = "/usr/lib/libcklog2.so" + slot = "0.0" + pin = "XXXXXXXX" + key_label = "HASHICORP" + mechanism = 0x1082 + hmac_mechanism = 0x0251 + hmac_key_label = "vault-hsm-hmac-key" + default_hmac_key_label = "vault-hsm-hmac-key" + generate_key = "true" +} + diff --git a/command/server/test-fixtures/config_small.hcl b/command/server/test-fixtures/config_small.hcl new file mode 100644 index 0000000..a8e3c7a --- /dev/null +++ b/command/server/test-fixtures/config_small.hcl @@ -0,0 +1,18 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +storage "raft" { + path = "/path/to/raft" + node_id = "raft_node_1" +} +listener "tcp" { + address = "127.0.0.1:8200" + tls_cert_file = "/path/to/cert.pem" + tls_key_file = "/path/to/key.key" +} +seal "awskms" { + kms_key_id = "alias/kms-unseal-key" +} +service_registration "consul" { + address = "127.0.0.1:8500" +} diff --git a/command/server/test-fixtures/config_small.json b/command/server/test-fixtures/config_small.json new file mode 100644 index 0000000..b9366bd --- /dev/null +++ b/command/server/test-fixtures/config_small.json @@ -0,0 +1,31 @@ +{ + "listener": { + "tcp": { + "address": "0.0.0.0:8200", + "tls_cert_file": "/path/to/cert.pem", + "tls_key_file": "/path/to/key.key" + } + }, + + "seal": { + "awskms": { + "kms_key_id": "alias/kms-unseal-key" + } + }, + + "storage": { + "raft": { + "path": "/path/to/raft", + "node_id": "raft_node_1" + } + }, + + "cluster_addr": "http://127.0.0.1:8201", + "api_addr": "http://127.0.0.1:8200", + + "service_registration": { + "consul": { + "address": "127.0.0.1:8500" + } + } +} \ No newline at end of file diff --git a/command/server/test-fixtures/config_with_valid_admin_ns.hcl b/command/server/test-fixtures/config_with_valid_admin_ns.hcl new file mode 100644 index 0000000..312a42a --- /dev/null +++ b/command/server/test-fixtures/config_with_valid_admin_ns.hcl @@ -0,0 +1,19 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +storage "raft" { + path = "/path/to/raft" + node_id = "raft_node_1" +} +listener "tcp" { + address = "127.0.0.1:8200" + tls_cert_file = "/path/to/cert.pem" + tls_key_file = "/path/to/key.key" +} +seal "awskms" { + kms_key_id = "alias/kms-unseal-key" +} +service_registration "consul" { + address = "127.0.0.1:8500" +} +administrative_namespace_path = "admin/" \ No newline at end of file diff --git a/command/server/test-fixtures/config_with_valid_admin_ns.json b/command/server/test-fixtures/config_with_valid_admin_ns.json new file mode 100644 index 0000000..9f60413 --- /dev/null +++ b/command/server/test-fixtures/config_with_valid_admin_ns.json @@ -0,0 +1,28 @@ +{ + "listener": { + "tcp": { + "address": "0.0.0.0:8200", + "tls_cert_file": "/path/to/cert.pem", + "tls_key_file": "/path/to/key.key" + } + }, + "seal": { + "awskms": { + "kms_key_id": "alias/kms-unseal-key" + } + }, + "storage": { + "raft": { + "path": "/path/to/raft", + "node_id": "raft_node_1" + } + }, + "cluster_addr": "http://127.0.0.1:8201", + "api_addr": "http://127.0.0.1:8200", + "service_registration": { + "consul": { + "address": "127.0.0.1:8500" + } + }, + "administrative_namespace_path": "admin/" +} \ No newline at end of file diff --git a/command/server/test-fixtures/diagnose_bad_https_consul_sr.hcl b/command/server/test-fixtures/diagnose_bad_https_consul_sr.hcl new file mode 100644 index 0000000..bc5a71e --- /dev/null +++ b/command/server/test-fixtures/diagnose_bad_https_consul_sr.hcl @@ -0,0 +1,44 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +disable_cache = true +disable_mlock = true + +ui = true + +listener "tcp" { + address = "127.0.0.1:1029" + tls_disable = true +} + +backend "consul" { + foo = "bar" + advertise_addr = "foo" + address = "127.0.0.1:8500" +} + +ha_backend "consul" { + bar = "baz" + advertise_addr = "snafu" + disable_clustering = "true" + address = "127.0.0.1:8500" +} + +service_registration "consul" { + address = "https://consulserverIP:8500" + foo = "bar" + tls_cert_file = "./../vault/diagnose/test-fixtures/expiredcert.pem" + tls_key_file = "./../vault/diagnose/test-fixtures/expiredprivatekey.pem" +} + +sentinel { + additional_enabled_modules = [] +} + +max_lease_ttl = "10h" +default_lease_ttl = "10h" +cluster_name = "testcluster" +pid_file = "./pidfile" +raw_storage_endpoint = true +disable_sealwrap = true +disable_printable_check = true diff --git a/command/server/test-fixtures/diagnose_bad_telemetry1.hcl b/command/server/test-fixtures/diagnose_bad_telemetry1.hcl new file mode 100644 index 0000000..a634b16 --- /dev/null +++ b/command/server/test-fixtures/diagnose_bad_telemetry1.hcl @@ -0,0 +1,21 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +disable_cache = true +disable_mlock = true +ui = true + +listener "tcp" { + address = "127.0.0.1:8200" +} + +backend "consul" { + advertise_addr = "foo" + token = "foo" +} + +telemetry { + circonus_check_id = "bar" +} + +cluster_addr = "127.0.0.1:8201" diff --git a/command/server/test-fixtures/diagnose_bad_telemetry2.hcl b/command/server/test-fixtures/diagnose_bad_telemetry2.hcl new file mode 100644 index 0000000..afb195d --- /dev/null +++ b/command/server/test-fixtures/diagnose_bad_telemetry2.hcl @@ -0,0 +1,21 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +disable_cache = true +disable_mlock = true +ui = true + +listener "tcp" { + address = "127.0.0.1:8200" +} + +backend "consul" { + advertise_addr = "foo" + token = "foo" +} + +telemetry { + dogstatsd_tags = ["bar"] +} + +cluster_addr = "127.0.0.1:8201" diff --git a/command/server/test-fixtures/diagnose_bad_telemetry3.hcl b/command/server/test-fixtures/diagnose_bad_telemetry3.hcl new file mode 100644 index 0000000..422351f --- /dev/null +++ b/command/server/test-fixtures/diagnose_bad_telemetry3.hcl @@ -0,0 +1,21 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +disable_cache = true +disable_mlock = true +ui = true + +listener "tcp" { + address = "127.0.0.1:8200" +} + +backend "consul" { + advertise_addr = "foo" + token = "foo" +} + +telemetry { + stackdriver_namespace = "bar" +} + +cluster_addr = "127.0.0.1:8201" diff --git a/command/server/test-fixtures/diagnose_ok_storage_direct_access.hcl b/command/server/test-fixtures/diagnose_ok_storage_direct_access.hcl new file mode 100644 index 0000000..cda9e2a --- /dev/null +++ b/command/server/test-fixtures/diagnose_ok_storage_direct_access.hcl @@ -0,0 +1,30 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +disable_cache = true +disable_mlock = true + +ui = true + +listener "tcp" { + address = "127.0.0.1:1024" + tls_disable = true +} + +backend "consul" { + address = "consulserver:8500" + foo = "bar" + advertise_addr = "foo" +} + +ha_backend "consul" { + address = "127.0.0.1:1024" + bar = "baz" + advertise_addr = "https://127.0.0.1:8500" + disable_clustering = "true" +} + +service_registration "consul" { + address = "127.0.0.1:8500" + foo = "bar" +} \ No newline at end of file diff --git a/command/server/test-fixtures/diagnose_raft_no_bolt_folder.hcl b/command/server/test-fixtures/diagnose_raft_no_bolt_folder.hcl new file mode 100644 index 0000000..e28c1bc --- /dev/null +++ b/command/server/test-fixtures/diagnose_raft_no_bolt_folder.hcl @@ -0,0 +1,11 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +storage "raft" { + path = "/path/to/raft/data" + node_id = "raft_node_1" +} + +api_addr = "http://127.0.0.1:8200" +cluster_addr = "https://127.0.0.1:8201" +ui = true diff --git a/command/server/test-fixtures/diagnose_seal_transit_tls_check.hcl b/command/server/test-fixtures/diagnose_seal_transit_tls_check.hcl new file mode 100644 index 0000000..0c0edcf --- /dev/null +++ b/command/server/test-fixtures/diagnose_seal_transit_tls_check.hcl @@ -0,0 +1,59 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +disable_cache = true +disable_mlock = true + +ui = true + +listener "tcp" { + address = "127.0.0.1:1024" + tls_disable = true +} + +backend "consul" { + address = "127.0.0.1:8500" + foo = "bar" + advertise_addr = "foo" +} + +seal "transit" { + + // TLS Configuration + tls_ca_cert = "./../vault/diagnose/test-fixtures/chain.crt.pem" + tls_client_cert = "./../vault/diagnose/test-fixtures/goodcertwithroot.pem" + tls_client_key = "./../vault/diagnose//test-fixtures/goodkey.pem" + tls_server_name = "vault" + tls_skip_verify = "false" +} + +ha_backend "consul" { + address = "127.0.0.1:8500" + bar = "baz" + advertise_addr = "https://127.0.0.1:8500" + disable_clustering = "true" +} + +service_registration "consul" { + address = "127.0.0.1:8500" + foo = "bar" +} + +telemetry { + statsd_address = "bar" + usage_gauge_period = "5m" + maximum_gauge_cardinality = 100 + + statsite_address = "foo" + dogstatsd_addr = "127.0.0.1:7254" + dogstatsd_tags = ["tag_1:val_1", "tag_2:val_2"] + metrics_prefix = "myprefix" +} + +max_lease_ttl = "10h" +default_lease_ttl = "10h" +cluster_name = "testcluster" +pid_file = "./pidfile" +raw_storage_endpoint = true +disable_sealwrap = true +disable_printable_check = true diff --git a/command/server/test-fixtures/hcp_link_config.hcl b/command/server/test-fixtures/hcp_link_config.hcl new file mode 100644 index 0000000..1a909e3 --- /dev/null +++ b/command/server/test-fixtures/hcp_link_config.hcl @@ -0,0 +1,14 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +storage "inmem" {} +listener "tcp" { + address = "127.0.0.1:8200" + tls_disable = true +} +cloud { + resource_id = "organization/bc58b3d0-2eab-4ab8-abf4-f61d3c9975ff/project/1c78e888-2142-4000-8918-f933bbbc7690/hashicorp.example.resource/example" + client_id = "J2TtcSYOyPUkPV2z0mSyDtvitxLVjJmu" + client_secret = "N9JtHZyOnHrIvJZs82pqa54vd4jnkyU3xCcqhFXuQKJZZuxqxxbP1xCfBZVB82vY" +} +disable_mlock = true \ No newline at end of file diff --git a/command/server/test-fixtures/nostore_config.hcl b/command/server/test-fixtures/nostore_config.hcl new file mode 100644 index 0000000..a80e385 --- /dev/null +++ b/command/server/test-fixtures/nostore_config.hcl @@ -0,0 +1,20 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +disable_cache = true +disable_mlock = true + +ui = true + +listener "tcp" { + address = "127.0.0.1:1024" + tls_disable = true +} + +ha_backend "consul" { + bar = "baz" + advertise_addr = "snafu" + disable_clustering = "true" +} + +// No backend stanza in config! diff --git a/command/server/test-fixtures/raft_retry_join.hcl b/command/server/test-fixtures/raft_retry_join.hcl new file mode 100644 index 0000000..6f7fe9e --- /dev/null +++ b/command/server/test-fixtures/raft_retry_join.hcl @@ -0,0 +1,22 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +storage "raft" { + path = "/storage/path/raft" + node_id = "raft1" + retry_join = [ + { + "leader_api_addr" = "http://127.0.0.1:8200" + }, + { + "leader_api_addr" = "http://127.0.0.2:8200" + }, + { + "leader_api_addr" = "http://127.0.0.3:8200" + } + ] +} +listener "tcp" { + address = "127.0.0.1:8200" +} +disable_mlock = true diff --git a/command/server/test-fixtures/raft_storage_file.db b/command/server/test-fixtures/raft_storage_file.db new file mode 100644 index 0000000..e69de29 diff --git a/command/server/test-fixtures/reload/reload_bar.key b/command/server/test-fixtures/reload/reload_bar.key new file mode 100644 index 0000000..10849fb --- /dev/null +++ b/command/server/test-fixtures/reload/reload_bar.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAwF7sRAyUiLcd6es6VeaTRUBOusFFGkmKJ5lU351waCJqXFju +Z6i/SQYNAAnnRgotXSTE1fIPjE2kZNH1hvqE5IpTGgAwy50xpjJrrBBI6e9lyKqj +7T8gLVNBvtC0cpQi+pGrszEI0ckDQCSZHqi/PAzcpmLUgh2KMrgagT+YlN35KHtl +/bQ/Fsn+kqykVqNw69n/CDKNKdDHn1qPwiX9q/fTMj3EG6g+3ntKrUOh8V/gHKPz +q8QGP/wIud2K+tTSorVXr/4zx7xgzlbJkCakzcQQiP6K+paPnDRlE8fK+1gRRyR7 +XCzyp0irUl8G1NjYAR/tVWxiUhlk/jZutb8PpwIDAQABAoIBAEOzJELuindyujxQ +ZD9G3h1I/GwNCFyv9Mbq10u7BIwhUH0fbwdcA7WXQ4v38ERd4IkfH4aLoZ0m1ewF +V/sgvxQO+h/0YTfHImny5KGxOXfaoF92bipYROKuojydBmQsbgLwsRRm9UufCl3Q +g3KewG5JuH112oPQEYq379v8nZ4FxC3Ano1OFBTm9UhHIAX1Dn22kcHOIIw8jCsQ +zp7TZOW+nwtkS41cBwhvV4VIeL6yse2UgbOfRVRwI7B0OtswS5VgW3wysO2mTDKt +V/WCmeht1il/6ZogEHgi/mvDCKpj20wQ1EzGnPdFLdiFJFylf0oufQD/7N/uezbC +is0qJEECgYEA3AE7SeLpe3SZApj2RmE2lcD9/Saj1Y30PznxB7M7hK0sZ1yXEbtS +Qf894iDDD/Cn3ufA4xk/K52CXgAcqvH/h2geG4pWLYsT1mdWhGftprtOMCIvJvzU +8uWJzKdOGVMG7R59wNgEpPDZDpBISjexwQsFo3aw1L/H1/Sa8cdY3a0CgYEA39hB +1oLmGRyE32Q4GF/srG4FqKL1EsbISGDUEYTnaYg2XiM43gu3tC/ikfclk27Jwc2L +m7cA5FxxaEyfoOgfAizfU/uWTAbx9GoXgWsO0hWSN9+YNq61gc5WKoHyrJ/rfrti +y5d7k0OCeBxckLqGDuJqICQ0myiz0El6FU8h5SMCgYEAuhigmiNC9JbwRu40g9v/ +XDVfox9oPmBRVpogdC78DYKeqN/9OZaGQiUxp3GnDni2xyqqUm8srCwT9oeJuF/z +kgpUTV96/hNCuH25BU8UC5Es1jJUSFpdlwjqwx5SRcGhfjnojZMseojwUg1h2MW7 +qls0bc0cTxnaZaYW2qWRWhECgYBrT0cwyQv6GdvxJCBoPwQ9HXmFAKowWC+H0zOX +Onmd8/jsZEJM4J0uuo4Jn8vZxBDg4eL9wVuiHlcXwzP7dYv4BP8DSechh2rS21Ft +b59pQ4IXWw+jl1nYYsyYEDgAXaIN3VNder95N7ICVsZhc6n01MI/qlu1zmt1fOQT +9x2utQKBgHI9SbsfWfbGiu6oLS3+9V1t4dORhj8D8b7z3trvECrD6tPhxoZqtfrH +4apKr3OKRSXk3K+1K6pkMHJHunspucnA1ChXLhzfNF08BSRJkQDGYuaRLS6VGgab +JZTl54bGvO1GkszEBE/9QFcqNVtWGMWXnUPwNNv8t//yJT5rvQil +-----END RSA PRIVATE KEY----- diff --git a/command/server/test-fixtures/reload/reload_bar.pem b/command/server/test-fixtures/reload/reload_bar.pem new file mode 100644 index 0000000..a8217be --- /dev/null +++ b/command/server/test-fixtures/reload/reload_bar.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDQzCCAiugAwIBAgIULLCz3mZKmg2xy3rWCud0f1zcmBwwDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMzEwMDIzNjQ0WhcNMzYw +MzA1MDEzNzE0WjAaMRgwFgYDVQQDEw9iYXIuZXhhbXBsZS5jb20wggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAXuxEDJSItx3p6zpV5pNFQE66wUUaSYon +mVTfnXBoImpcWO5nqL9JBg0ACedGCi1dJMTV8g+MTaRk0fWG+oTkilMaADDLnTGm +MmusEEjp72XIqqPtPyAtU0G+0LRylCL6kauzMQjRyQNAJJkeqL88DNymYtSCHYoy +uBqBP5iU3fkoe2X9tD8Wyf6SrKRWo3Dr2f8IMo0p0MefWo/CJf2r99MyPcQbqD7e +e0qtQ6HxX+Aco/OrxAY//Ai53Yr61NKitVev/jPHvGDOVsmQJqTNxBCI/or6lo+c +NGUTx8r7WBFHJHtcLPKnSKtSXwbU2NgBH+1VbGJSGWT+Nm61vw+nAgMBAAGjgYQw +gYEwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBSVoF8F +7qbzSryIFrldurAG78LvSjAfBgNVHSMEGDAWgBRzDNvqF/Tq21OgWs13B5YydZjl +vzAgBgNVHREEGTAXgg9iYXIuZXhhbXBsZS5jb22HBH8AAAEwDQYJKoZIhvcNAQEL +BQADggEBAGmz2N282iT2IaEZvOmzIE4znHGkvoxZmrr/2byq5PskBg9ysyCHfUvw +SFA8U7jWjezKTnGRUu5blB+yZdjrMtB4AePWyEqtkJwVsZ2SPeP+9V2gNYK4iktP +UF3aIgBbAbw8rNuGIIB0T4D+6Zyo9Y3MCygs6/N4bRPZgLhewWn1ilklfnl3eqaC +a+JY1NBuTgCMa28NuC+Hy3mCveqhI8tFNiOthlLdgAEbuQaOuNutAG73utZ2aq6Q +W4pajFm3lEf5zt7Lo6ZCFtY/Q8jjURJ9e4O7VjXcqIhBM5bSMI6+fgQyOH0SLboj +RNanJ2bcyF1iPVyPBGzV3dF0ngYzxEY= +-----END CERTIFICATE----- diff --git a/command/server/test-fixtures/reload/reload_ca.pem b/command/server/test-fixtures/reload/reload_ca.pem new file mode 100644 index 0000000..72a7444 --- /dev/null +++ b/command/server/test-fixtures/reload/reload_ca.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDNTCCAh2gAwIBAgIUBeVo+Ce2BrdRT1cogKvJLtdOky8wDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMzEwMDIzNTM4WhcNMzYw +MzA1MDIzNjA4WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAPTQGWPRIOECGeJB6tR/ftvvtioC9f84fY2QdJ5k +JBupXjPAGYKgS4MGzyT5bz9yY400tCtmh6h7p9tZwHl/TElTugtLQ/8ilMbJTiOM +SiyaMDPHiMJJYKTjm9bu6bKeU1qPZ0Cryes4rygbqs7w2XPgA2RxNmDh7JdX7/h+ +VB5onBmv8g4WFSayowGyDcJWWCbu5yv6ZdH1bqQjgRzQ5xp17WXNmvlzdp2vate/ +9UqPdA8sdJzW/91Gvmros0o/FnG7c2pULhk22wFqO8t2HRjKb3nuxALEJvqoPvad +KjpDTaq1L1ZzxcB7wvWyhy/lNLZL7jiNWy0mN1YB0UpSWdECAwEAAaN7MHkwDgYD +VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFHMM2+oX9Orb +U6BazXcHljJ1mOW/MB8GA1UdIwQYMBaAFHMM2+oX9OrbU6BazXcHljJ1mOW/MBYG +A1UdEQQPMA2CC2V4YW1wbGUuY29tMA0GCSqGSIb3DQEBCwUAA4IBAQAp17XsOaT9 +hculRqrFptn3+zkH3HrIckHm+28R5xYT8ASFXFcLFugGizJAXVL5lvsRVRIwCoOX +Nhi8XSNEFP640VbHcEl81I84bbRIIDS+Yheu6JDZGemTaDYLv1J3D5SHwgoM+nyf +oTRgotUCIXcwJHmTpWEUkZFKuqBxsoTGzk0jO8wOP6xoJkzxVVG5PvNxs924rxY8 +Y8iaLdDfMeT7Pi0XIliBa/aSp/iqSW8XKyJl5R5vXg9+DOgZUrVzIxObaF5RBl/a +mJOeklJBdNVzQm5+iMpO42lu0TA9eWtpP+YiUEXU17XDvFeQWOocFbQ1Peo0W895 +XRz2GCwCNyvW +-----END CERTIFICATE----- diff --git a/command/server/test-fixtures/reload/reload_foo.key b/command/server/test-fixtures/reload/reload_foo.key new file mode 100644 index 0000000..86e6cce --- /dev/null +++ b/command/server/test-fixtures/reload/reload_foo.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpgIBAAKCAQEAzNyVieSti9XBb5/celB5u8YKRJv3mQS9A4/X0mqY1ePznt1i +ilG7OmG0yM2VAk0ceIAQac3Bsn74jxn2cDlrrVniPXcNgYtMtW0kRqNEo4doo4EX +xZguS9vNBu29useHhif1TGX/pA3dgvaVycUCjzTEVk6qI8UEehMK6gEGZb7nOr0A +A9nipSqoeHpDLe3a4KVqj1vtlJKUvD2i1MuBuQ130cB1K9rufLCShGu7mEgzEosc +gr+K3Bf03IejbeVRyIfLtgj1zuvV1katec75UqRA/bsvt5G9JfJqiZ9mwFN0vp3g +Cr7pdQBSBQ2q4yf9s8CuY5c5w9fl3F8f5QFQoQIDAQABAoIBAQCbCb1qNFRa5ZSV +I8i6ELlwMDqJHfhOJ9XcIjpVljLAfNlcu3Ld92jYkCU/asaAjVckotbJG9yhd5Io +yp9E40/oS4P6vGTOS1vsWgMAKoPBtrKsOwCAm+E9q8UIn1fdSS/5ibgM74x+3bds +a62Em8KKGocUQkhk9a+jq1GxMsFisbHRxEHvClLmDMgGnW3FyGmWwT6yZLPSC0ey +szmmjt3ouP8cLAOmSjzcQBMmEZpQMCgR6Qckg6nrLQAGzZyTdCd875wbGA57DpWX +Lssn95+A5EFvr/6b7DkXeIFCrYBFFa+UQN3PWGEQ6Zjmiw4VgV2vO8yX2kCLlUhU +02bL393ZAoGBAPXPD/0yWINbKUPcRlx/WfWQxfz0bu50ytwIXzVK+pRoAMuNqehK +BJ6kNzTTBq40u+IZ4f5jbLDulymR+4zSkirLE7CyWFJOLNI/8K4Pf5DJUgNdrZjJ +LCtP9XRdxiPatQF0NGfdgHlSJh+/CiRJP4AgB17AnB/4z9/M0ZlJGVrzAoGBANVa +69P3Rp/WPBQv0wx6f0tWppJolWekAHKcDIdQ5HdOZE5CPAYSlTrTUW3uJuqMwU2L +M0Er2gIPKWIR5X+9r7Fvu9hQW6l2v3xLlcrGPiapp3STJvuMxzhRAmXmu3bZfVn1 +Vn7Vf1jPULHtTFSlNFEvYG5UJmygK9BeyyVO5KMbAoGBAMCyAibLQPg4jrDUDZSV +gUAwrgUO2ae1hxHWvkxY6vdMUNNByuB+pgB3W4/dnm8Sh/dHsxJpftt1Lqs39ar/ +p/ZEHLt4FCTxg9GOrm7FV4t5RwG8fko36phJpnIC0UFqQltRbYO+8OgqrhhU+u5X +PaCDe0OcWsf1lYAsYGN6GpZhAoGBAMJ5Ksa9+YEODRs1cIFKUyd/5ztC2xRqOAI/ +3WemQ2nAacuvsfizDZVeMzYpww0+maAuBt0btI719PmwaGmkpDXvK+EDdlmkpOwO +FY6MXvBs6fdnfjwCWUErDi2GQFAX9Jt/9oSL5JU1+08DhvUM1QA/V/2Y9KFE6kr3 +bOIn5F4LAoGBAKQzH/AThDGhT3hwr4ktmReF3qKxBgxzjVa8veXtkY5VWwyN09iT +jnTTt6N1CchZoK5WCETjdzNYP7cuBTcV4d3bPNRiJmxXaNVvx3Tlrk98OiffT8Qa +5DO/Wfb43rNHYXBjU6l0n2zWcQ4PUSSbu0P0bM2JTQPRCqSthXvSHw2P +-----END RSA PRIVATE KEY----- diff --git a/command/server/test-fixtures/reload/reload_foo.pem b/command/server/test-fixtures/reload/reload_foo.pem new file mode 100644 index 0000000..c8b868b --- /dev/null +++ b/command/server/test-fixtures/reload/reload_foo.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDQzCCAiugAwIBAgIUFVW6i/M+yJUsDrXWgRKO/Dnb+L4wDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMzEwMDIzNjA1WhcNMzYw +MzA1MDEzNjM1WjAaMRgwFgYDVQQDEw9mb28uZXhhbXBsZS5jb20wggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDM3JWJ5K2L1cFvn9x6UHm7xgpEm/eZBL0D +j9fSapjV4/Oe3WKKUbs6YbTIzZUCTRx4gBBpzcGyfviPGfZwOWutWeI9dw2Bi0y1 +bSRGo0Sjh2ijgRfFmC5L280G7b26x4eGJ/VMZf+kDd2C9pXJxQKPNMRWTqojxQR6 +EwrqAQZlvuc6vQAD2eKlKqh4ekMt7drgpWqPW+2UkpS8PaLUy4G5DXfRwHUr2u58 +sJKEa7uYSDMSixyCv4rcF/Tch6Nt5VHIh8u2CPXO69XWRq15zvlSpED9uy+3kb0l +8mqJn2bAU3S+neAKvul1AFIFDarjJ/2zwK5jlznD1+XcXx/lAVChAgMBAAGjgYQw +gYEwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBRNJoOJ +dnazDiuqLhV6truQ4cRe9jAfBgNVHSMEGDAWgBRzDNvqF/Tq21OgWs13B5YydZjl +vzAgBgNVHREEGTAXgg9mb28uZXhhbXBsZS5jb22HBH8AAAEwDQYJKoZIhvcNAQEL +BQADggEBAHzv67mtbxMWcuMsxCFBN1PJNAyUDZVCB+1gWhk59EySbVg81hWJDCBy +fl3TKjz3i7wBGAv+C2iTxmwsSJbda22v8JQbuscXIfLFbNALsPzF+J0vxAgJs5Gc +sDbfJ7EQOIIOVKQhHLYnQoLnigSSPc1kd0JjYyHEBjgIaSuXgRRTBAeqLiBMx0yh +RKL1lQ+WoBU/9SXUZZkwokqWt5G7khi5qZkNxVXZCm8VGPg0iywf6gGyhI1SU5S2 +oR219S6kA4JY/stw1qne85/EmHmoImHGt08xex3GoU72jKAjsIpqRWopcD/+uene +Tc9nn3fTQW/Z9fsoJ5iF5OdJnDEswqE= +-----END CERTIFICATE----- diff --git a/command/server/test-fixtures/storage-listener-config.json b/command/server/test-fixtures/storage-listener-config.json new file mode 100644 index 0000000..505c3b0 --- /dev/null +++ b/command/server/test-fixtures/storage-listener-config.json @@ -0,0 +1,17 @@ +{ + "api_addr": "https://localhost:8200", + "default_lease_ttl": "6h", + "disable_mlock": true, + "listener": { + "tcp": { + "address": "0.0.0.0:8200" + } + }, + "log_level": "info", + "storage": { + "consul": { + "address": "127.0.0.1:8500" + } + }, + "ui": true +} \ No newline at end of file diff --git a/command/server/test-fixtures/telemetry/filter_default_override.hcl b/command/server/test-fixtures/telemetry/filter_default_override.hcl new file mode 100644 index 0000000..4fc70e9 --- /dev/null +++ b/command/server/test-fixtures/telemetry/filter_default_override.hcl @@ -0,0 +1,10 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +disable_mlock = true +ui = true + +telemetry { + statsd_address = "foo" + filter_default = false +} \ No newline at end of file diff --git a/command/server/test-fixtures/telemetry/valid_prefix_filter.hcl b/command/server/test-fixtures/telemetry/valid_prefix_filter.hcl new file mode 100644 index 0000000..055f122 --- /dev/null +++ b/command/server/test-fixtures/telemetry/valid_prefix_filter.hcl @@ -0,0 +1,10 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +disable_mlock = true +ui = true + +telemetry { + statsd_address = "foo" + prefix_filter = ["-vault.expire", "-vault.audit", "+vault.expire.num_irrevocable_leases"] +} \ No newline at end of file diff --git a/command/server/test-fixtures/tls_config_ok.hcl b/command/server/test-fixtures/tls_config_ok.hcl new file mode 100644 index 0000000..4cbd4fa --- /dev/null +++ b/command/server/test-fixtures/tls_config_ok.hcl @@ -0,0 +1,54 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +disable_cache = true +disable_mlock = true + +ui = true + +listener "tcp" { + address = "127.0.0.1:1025" + tls_cert_file = "./../api/test-fixtures/keys/cert.pem" + tls_key_file = "./../api/test-fixtures/keys/key.pem" +} + +backend "consul" { + foo = "bar" + advertise_addr = "foo" + address = "127.0.0.1:8500" +} + +ha_backend "consul" { + bar = "baz" + advertise_addr = "http://blah:8500" + disable_clustering = "true" + address = "127.0.0.1:8500" +} + +service_registration "consul" { + foo = "bar" + address = "127.0.0.1:8500" +} + +telemetry { + statsd_address = "bar" + usage_gauge_period = "5m" + maximum_gauge_cardinality = 100 + + statsite_address = "foo" + dogstatsd_addr = "127.0.0.1:7254" + dogstatsd_tags = ["tag_1:val_1", "tag_2:val_2"] + metrics_prefix = "myprefix" +} + +sentinel { + additional_enabled_modules = [] +} + +max_lease_ttl = "10h" +default_lease_ttl = "10h" +cluster_name = "testcluster" +pid_file = "./pidfile" +raw_storage_endpoint = true +disable_sealwrap = true +disable_printable_check = true diff --git a/command/server/test-fixtures/unauth_in_flight_access.hcl b/command/server/test-fixtures/unauth_in_flight_access.hcl new file mode 100644 index 0000000..c191f13 --- /dev/null +++ b/command/server/test-fixtures/unauth_in_flight_access.hcl @@ -0,0 +1,12 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +storage "inmem" {} +listener "tcp" { + address = "127.0.0.1:8200" + tls_disable = true + inflight_requests_logging { + unauthenticated_in_flight_requests_access = true + } +} +disable_mlock = true diff --git a/command/server/tls_util.go b/command/server/tls_util.go new file mode 100644 index 0000000..a038c2a --- /dev/null +++ b/command/server/tls_util.go @@ -0,0 +1,176 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package server + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "math/big" + "net" + "os" + "time" + + "github.com/hashicorp/vault/sdk/helper/certutil" +) + +type CaCert struct { + PEM string + Template *x509.Certificate + Signer crypto.Signer +} + +// GenerateCert creates a new leaf cert from provided CA template and signer +func GenerateCert(caCertTemplate *x509.Certificate, caSigner crypto.Signer) (string, string, error) { + // Create the private key + signer, keyPEM, err := privateKey() + if err != nil { + return "", "", fmt.Errorf("error generating private key for server certificate: %v", err) + } + + // The serial number for the cert + sn, err := serialNumber() + if err != nil { + return "", "", fmt.Errorf("error generating serial number: %v", err) + } + + signerKeyId, err := certutil.GetSubjKeyID(signer) + if err != nil { + return "", "", fmt.Errorf("error getting subject key id from key: %v", err) + } + + hostname, err := os.Hostname() + if err != nil { + return "", "", fmt.Errorf("error getting hostname: %v", err) + } + + if hostname == "" { + hostname = "localhost" + } + + // Create the leaf cert + template := x509.Certificate{ + SerialNumber: sn, + Subject: pkix.Name{CommonName: hostname}, + KeyUsage: x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, + NotAfter: time.Now().Add(365 * 24 * time.Hour), + NotBefore: time.Now().Add(-1 * time.Minute), + IPAddresses: []net.IP{net.ParseIP("127.0.0.1")}, + DNSNames: []string{"localhost", "localhost4", "localhost6", "localhost.localdomain"}, + AuthorityKeyId: caCertTemplate.AuthorityKeyId, + SubjectKeyId: signerKeyId, + } + + // Only add our hostname to SANs if it isn't found. + foundHostname := false + for _, value := range template.DNSNames { + if value == hostname { + foundHostname = true + break + } + } + if !foundHostname { + template.DNSNames = append(template.DNSNames, hostname) + } + + bs, err := x509.CreateCertificate( + rand.Reader, &template, caCertTemplate, signer.Public(), caSigner) + if err != nil { + return "", "", fmt.Errorf("error creating server certificate: %v", err) + } + var buf bytes.Buffer + err = pem.Encode(&buf, &pem.Block{Type: "CERTIFICATE", Bytes: bs}) + if err != nil { + return "", "", fmt.Errorf("error encoding server certificate: %v", err) + } + + return buf.String(), keyPEM, nil +} + +// GenerateCA generates a new self-signed CA cert and returns a +// CaCert struct containing the PEM encoded cert, +// X509 Certificate Template, and crypto.Signer +func GenerateCA() (*CaCert, error) { + // Create the private key we'll use for this CA cert. + signer, _, err := privateKey() + if err != nil { + return nil, fmt.Errorf("error generating private key for CA: %v", err) + } + + signerKeyId, err := certutil.GetSubjKeyID(signer) + if err != nil { + return nil, fmt.Errorf("error getting subject key id from key: %v", err) + } + + // The serial number for the cert + sn, err := serialNumber() + if err != nil { + return nil, fmt.Errorf("error generating serial number: %v", err) + } + + // Create the CA cert + template := x509.Certificate{ + SerialNumber: sn, + Subject: pkix.Name{CommonName: "Vault Dev CA"}, + BasicConstraintsValid: true, + KeyUsage: x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, + IsCA: true, + NotAfter: time.Now().Add(10 * 365 * 24 * time.Hour), + NotBefore: time.Now().Add(-1 * time.Minute), + AuthorityKeyId: signerKeyId, + SubjectKeyId: signerKeyId, + } + + bs, err := x509.CreateCertificate( + rand.Reader, &template, &template, signer.Public(), signer) + if err != nil { + return nil, fmt.Errorf("error creating CA certificate: %v", err) + } + + var buf bytes.Buffer + err = pem.Encode(&buf, &pem.Block{Type: "CERTIFICATE", Bytes: bs}) + if err != nil { + return nil, fmt.Errorf("error encoding CA certificate: %v", err) + } + return &CaCert{ + PEM: buf.String(), + Template: &template, + Signer: signer, + }, nil +} + +// privateKey returns a new ECDSA-based private key. Both a crypto.Signer +// and the key in PEM format are returned. +func privateKey() (crypto.Signer, string, error) { + pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + return nil, "", err + } + + bs, err := x509.MarshalECPrivateKey(pk) + if err != nil { + return nil, "", err + } + + var buf bytes.Buffer + err = pem.Encode(&buf, &pem.Block{Type: "EC PRIVATE KEY", Bytes: bs}) + if err != nil { + return nil, "", err + } + + return pk, buf.String(), nil +} + +// serialNumber generates a new random serial number. +func serialNumber() (*big.Int, error) { + return rand.Int(rand.Reader, (&big.Int{}).Exp(big.NewInt(2), big.NewInt(159), nil)) +} diff --git a/command/server_noprofile.go b/command/server_noprofile.go new file mode 100644 index 0000000..1cf516a --- /dev/null +++ b/command/server_noprofile.go @@ -0,0 +1,9 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !memprofiler + +package command + +func (c *ServerCommand) startMemProfiler() { +} diff --git a/command/server_profile.go b/command/server_profile.go new file mode 100644 index 0000000..42f07d2 --- /dev/null +++ b/command/server_profile.go @@ -0,0 +1,43 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build memprofiler + +package command + +import ( + "os" + "path/filepath" + "runtime" + "runtime/pprof" + "time" +) + +func init() { + memProfilerEnabled = true +} + +func (c *ServerCommand) startMemProfiler() { + profileDir := filepath.Join(os.TempDir(), "vaultprof") + if err := os.MkdirAll(profileDir, 0o700); err != nil { + c.logger.Debug("could not create profile directory", "error", err) + return + } + + go func() { + for { + filename := filepath.Join(profileDir, time.Now().UTC().Format("20060102_150405")) + ".pprof" + f, err := os.Create(filename) + if err != nil { + c.logger.Debug("could not create memory profile", "error", err) + } + runtime.GC() + if err := pprof.WriteHeapProfile(f); err != nil { + c.logger.Debug("could not write memory profile", "error", err) + } + f.Close() + c.logger.Debug("wrote memory profile", "filename", filename) + time.Sleep(5 * time.Minute) + } + }() +} diff --git a/command/server_test.go b/command/server_test.go new file mode 100644 index 0000000..bfe5b14 --- /dev/null +++ b/command/server_test.go @@ -0,0 +1,395 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !race && !hsm && !fips_140_3 + +// NOTE: we can't use this with HSM. We can't set testing mode on and it's not +// safe to use env vars since that provides an attack vector in the real world. +// +// The server tests have a go-metrics/exp manager race condition :(. + +package command + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "os" + "strings" + "sync" + "testing" + "time" + + "github.com/hashicorp/vault/sdk/physical" + physInmem "github.com/hashicorp/vault/sdk/physical/inmem" + "github.com/mitchellh/cli" + "github.com/stretchr/testify/require" +) + +func init() { + if signed := os.Getenv("VAULT_LICENSE_CI"); signed != "" { + os.Setenv(EnvVaultLicense, signed) + } +} + +func testBaseHCL(tb testing.TB, listenerExtras string) string { + tb.Helper() + + return strings.TrimSpace(fmt.Sprintf(` + disable_mlock = true + listener "tcp" { + address = "127.0.0.1:%d" + tls_disable = "true" + %s + } + `, 0, listenerExtras)) +} + +const ( + goodListenerTimeouts = `http_read_header_timeout = 12 + http_read_timeout = "34s" + http_write_timeout = "56m" + http_idle_timeout = "78h"` + + badListenerReadHeaderTimeout = `http_read_header_timeout = "12km"` + badListenerReadTimeout = `http_read_timeout = "34日"` + badListenerWriteTimeout = `http_write_timeout = "56lbs"` + badListenerIdleTimeout = `http_idle_timeout = "78gophers"` + + inmemHCL = ` +backend "inmem_ha" { + advertise_addr = "http://127.0.0.1:8200" +} +` + haInmemHCL = ` +ha_backend "inmem_ha" { + redirect_addr = "http://127.0.0.1:8200" +} +` + + badHAInmemHCL = ` +ha_backend "inmem" {} +` + + reloadHCL = ` +backend "inmem" {} +disable_mlock = true +listener "tcp" { + address = "127.0.0.1:8203" + tls_cert_file = "TMPDIR/reload_cert.pem" + tls_key_file = "TMPDIR/reload_key.pem" +} +` + cloudHCL = ` +cloud { + resource_id = "organization/bc58b3d0-2eab-4ab8-abf4-f61d3c9975ff/project/1c78e888-2142-4000-8918-f933bbbc7690/hashicorp.example.resource/example" + client_id = "J2TtcSYOyPUkPV2z0mSyDtvitxLVjJmu" + client_secret = "N9JtHZyOnHrIvJZs82pqa54vd4jnkyU3xCcqhFXuQKJZZuxqxxbP1xCfBZVB82vY" +} +` +) + +func testServerCommand(tb testing.TB) (*cli.MockUi, *ServerCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &ServerCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + ShutdownCh: MakeShutdownCh(), + SighupCh: MakeSighupCh(), + SigUSR2Ch: MakeSigUSR2Ch(), + PhysicalBackends: map[string]physical.Factory{ + "inmem": physInmem.NewInmem, + "inmem_ha": physInmem.NewInmemHA, + }, + + // These prevent us from random sleep guessing... + startedCh: make(chan struct{}, 5), + reloadedCh: make(chan struct{}, 5), + licenseReloadedCh: make(chan error), + } +} + +func TestServer_ReloadListener(t *testing.T) { + t.Parallel() + + wd, _ := os.Getwd() + wd += "/server/test-fixtures/reload/" + + td, err := ioutil.TempDir("", "vault-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(td) + + wg := &sync.WaitGroup{} + // Setup initial certs + inBytes, _ := ioutil.ReadFile(wd + "reload_foo.pem") + ioutil.WriteFile(td+"/reload_cert.pem", inBytes, 0o777) + inBytes, _ = ioutil.ReadFile(wd + "reload_foo.key") + ioutil.WriteFile(td+"/reload_key.pem", inBytes, 0o777) + + relhcl := strings.ReplaceAll(reloadHCL, "TMPDIR", td) + ioutil.WriteFile(td+"/reload.hcl", []byte(relhcl), 0o777) + + inBytes, _ = ioutil.ReadFile(wd + "reload_ca.pem") + certPool := x509.NewCertPool() + ok := certPool.AppendCertsFromPEM(inBytes) + if !ok { + t.Fatal("not ok when appending CA cert") + } + + ui, cmd := testServerCommand(t) + _ = ui + + wg.Add(1) + args := []string{"-config", td + "/reload.hcl"} + go func() { + if code := cmd.Run(args); code != 0 { + output := ui.ErrorWriter.String() + ui.OutputWriter.String() + t.Errorf("got a non-zero exit status: %s", output) + } + wg.Done() + }() + + testCertificateName := func(cn string) error { + conn, err := tls.Dial("tcp", "127.0.0.1:8203", &tls.Config{ + RootCAs: certPool, + }) + if err != nil { + return err + } + defer conn.Close() + if err = conn.Handshake(); err != nil { + return err + } + servName := conn.ConnectionState().PeerCertificates[0].Subject.CommonName + if servName != cn { + return fmt.Errorf("expected %s, got %s", cn, servName) + } + return nil + } + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Fatalf("timeout") + } + + if err := testCertificateName("foo.example.com"); err != nil { + t.Fatalf("certificate name didn't check out: %s", err) + } + + relhcl = strings.ReplaceAll(reloadHCL, "TMPDIR", td) + inBytes, _ = ioutil.ReadFile(wd + "reload_bar.pem") + ioutil.WriteFile(td+"/reload_cert.pem", inBytes, 0o777) + inBytes, _ = ioutil.ReadFile(wd + "reload_bar.key") + ioutil.WriteFile(td+"/reload_key.pem", inBytes, 0o777) + ioutil.WriteFile(td+"/reload.hcl", []byte(relhcl), 0o777) + + cmd.SighupCh <- struct{}{} + select { + case <-cmd.reloadedCh: + case <-time.After(5 * time.Second): + t.Fatalf("timeout") + } + + if err := testCertificateName("bar.example.com"); err != nil { + t.Fatalf("certificate name didn't check out: %s", err) + } + + cmd.ShutdownCh <- struct{}{} + + wg.Wait() +} + +func TestServer(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + contents string + exp string + code int + args []string + }{ + { + "common_ha", + testBaseHCL(t, "") + inmemHCL, + "(HA available)", + 0, + []string{"-test-verify-only"}, + }, + { + "separate_ha", + testBaseHCL(t, "") + inmemHCL + haInmemHCL, + "HA Storage:", + 0, + []string{"-test-verify-only"}, + }, + { + "bad_separate_ha", + testBaseHCL(t, "") + inmemHCL + badHAInmemHCL, + "Specified HA storage does not support HA", + 1, + []string{"-test-verify-only"}, + }, + { + "good_listener_timeout_config", + testBaseHCL(t, goodListenerTimeouts) + inmemHCL, + "", + 0, + []string{"-test-server-config"}, + }, + { + "bad_listener_read_header_timeout_config", + testBaseHCL(t, badListenerReadHeaderTimeout) + inmemHCL, + "unknown unit \"km\" in duration \"12km\"", + 1, + []string{"-test-server-config"}, + }, + { + "bad_listener_read_timeout_config", + testBaseHCL(t, badListenerReadTimeout) + inmemHCL, + "unknown unit \"\\xe6\\x97\\xa5\" in duration", + 1, + []string{"-test-server-config"}, + }, + { + "bad_listener_write_timeout_config", + testBaseHCL(t, badListenerWriteTimeout) + inmemHCL, + "unknown unit \"lbs\" in duration \"56lbs\"", + 1, + []string{"-test-server-config"}, + }, + { + "bad_listener_idle_timeout_config", + testBaseHCL(t, badListenerIdleTimeout) + inmemHCL, + "unknown unit \"gophers\" in duration \"78gophers\"", + 1, + []string{"-test-server-config"}, + }, + { + "environment_variables_logged", + testBaseHCL(t, "") + inmemHCL, + "Environment Variables", + 0, + []string{"-test-verify-only"}, + }, + { + "cloud_config", + testBaseHCL(t, "") + inmemHCL + cloudHCL, + "HCP Organization: bc58b3d0-2eab-4ab8-abf4-f61d3c9975ff", + 0, + []string{"-test-verify-only"}, + }, + { + "recovery_mode", + testBaseHCL(t, "") + inmemHCL, + "", + 0, + []string{"-test-verify-only", "-recovery"}, + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + ui, cmd := testServerCommand(t) + + f, err := os.CreateTemp(t.TempDir(), "") + require.NoErrorf(t, err, "error creating temp dir: %v", err) + + _, err = f.WriteString(tc.contents) + require.NoErrorf(t, err, "cannot write temp file contents") + + err = f.Close() + require.NoErrorf(t, err, "unable to close temp file") + + args := append(tc.args, "-config", f.Name()) + code := cmd.Run(args) + output := ui.ErrorWriter.String() + ui.OutputWriter.String() + require.Equal(t, tc.code, code, "expected %d to be %d: %s", code, tc.code, output) + require.Contains(t, output, tc.exp, "expected %q to contain %q", output, tc.exp) + }) + } +} + +// TestServer_DevTLS verifies that a vault server starts up correctly with the -dev-tls flag +func TestServer_DevTLS(t *testing.T) { + ui, cmd := testServerCommand(t) + args := []string{"-dev-tls", "-dev-listen-address=127.0.0.1:0", "-test-server-config"} + retCode := cmd.Run(args) + output := ui.ErrorWriter.String() + ui.OutputWriter.String() + require.Equal(t, 0, retCode, output) + require.Contains(t, output, `tls: "enabled"`) +} + +// TestConfigureDevTLS verifies the various logic paths that flow through the +// configureDevTLS function. +func TestConfigureDevTLS(t *testing.T) { + testcases := []struct { + ServerCommand *ServerCommand + DeferFuncNotNil bool + ConfigNotNil bool + TLSDisable bool + CertPathEmpty bool + ErrNotNil bool + TestDescription string + }{ + { + ServerCommand: &ServerCommand{ + flagDevTLS: false, + }, + ConfigNotNil: true, + TLSDisable: true, + CertPathEmpty: true, + ErrNotNil: false, + TestDescription: "flagDev is false, nothing will be configured", + }, + { + ServerCommand: &ServerCommand{ + flagDevTLS: true, + flagDevTLSCertDir: "", + }, + DeferFuncNotNil: true, + ConfigNotNil: true, + ErrNotNil: false, + TestDescription: "flagDevTLSCertDir is empty", + }, + { + ServerCommand: &ServerCommand{ + flagDevTLS: true, + flagDevTLSCertDir: "@/#", + }, + CertPathEmpty: true, + ErrNotNil: true, + TestDescription: "flagDevTLSCertDir is set to something invalid", + }, + } + + for _, testcase := range testcases { + fun, cfg, certPath, err := configureDevTLS(testcase.ServerCommand) + if fun != nil { + // If a function is returned, call it right away to clean up + // files created in the temporary directory before anything else has + // a chance to fail this test. + fun() + } + + require.Equal(t, testcase.DeferFuncNotNil, (fun != nil), "test description %s", testcase.TestDescription) + require.Equal(t, testcase.ConfigNotNil, cfg != nil, "test description %s", testcase.TestDescription) + if testcase.ConfigNotNil { + require.True(t, len(cfg.Listeners) > 0, "test description %s", testcase.TestDescription) + require.Equal(t, testcase.TLSDisable, cfg.Listeners[0].TLSDisable, "test description %s", testcase.TestDescription) + } + require.Equal(t, testcase.CertPathEmpty, len(certPath) == 0, "test description %s", testcase.TestDescription) + require.Equal(t, testcase.ErrNotNil, (err != nil), "test description %s", testcase.TestDescription) + } +} diff --git a/command/server_util.go b/command/server_util.go new file mode 100644 index 0000000..7bf3196 --- /dev/null +++ b/command/server_util.go @@ -0,0 +1,27 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "github.com/hashicorp/vault/command/server" + "github.com/hashicorp/vault/vault" +) + +var ( + adjustCoreConfigForEnt = adjustCoreConfigForEntNoop + storageSupportedForEnt = checkStorageTypeForEntNoop +) + +func adjustCoreConfigForEntNoop(config *server.Config, coreConfig *vault.CoreConfig) { +} + +var getFIPSInfoKey = getFIPSInfoKeyNoop + +func getFIPSInfoKeyNoop() string { + return "" +} + +func checkStorageTypeForEntNoop(coreConfig *vault.CoreConfig) bool { + return true +} diff --git a/command/ssh.go b/command/ssh.go new file mode 100644 index 0000000..90bb030 --- /dev/null +++ b/command/ssh.go @@ -0,0 +1,893 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "os" + "os/exec" + "os/user" + "strings" + "syscall" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/ssh" + "github.com/mitchellh/cli" + "github.com/mitchellh/mapstructure" + "github.com/pkg/errors" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*SSHCommand)(nil) + _ cli.CommandAutocomplete = (*SSHCommand)(nil) +) + +type SSHCommand struct { + *BaseCommand + + // Common SSH options + flagMode string + flagRole string + flagNoExec bool + flagMountPoint string + flagStrictHostKeyChecking string + flagSSHExecutable string + flagUserKnownHostsFile string + + // SSH CA Mode options + flagPublicKeyPath string + flagPrivateKeyPath string + flagHostKeyMountPoint string + flagHostKeyHostnames string + flagValidPrincipals string +} + +func (c *SSHCommand) Synopsis() string { + return "Initiate an SSH session" +} + +func (c *SSHCommand) Help() string { + helpText := ` +Usage: vault ssh [options] username@ip [ssh options] + + Establishes an SSH connection with the target machine. + + This command uses one of the SSH secrets engines to authenticate and + automatically establish an SSH connection to a host. This operation requires + that the SSH secrets engine is mounted and configured. + + SSH using the OTP mode (requires sshpass for full automation): + + $ vault ssh -mode=otp -role=my-role user@1.2.3.4 + + SSH using the CA mode: + + $ vault ssh -mode=ca -role=my-role user@1.2.3.4 + + SSH using CA mode with host key verification: + + $ vault ssh \ + -mode=ca \ + -role=my-role \ + -host-key-mount-point=host-signer \ + -host-key-hostnames=example.com \ + user@example.com + + For the full list of options and arguments, please see the documentation. + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *SSHCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputField | FlagSetOutputFormat) + + f := set.NewFlagSet("SSH Options") + + // TODO: doc field? + + // General + f.StringVar(&StringVar{ + Name: "mode", + Target: &c.flagMode, + Default: "", + EnvVar: "", + Completion: complete.PredictSet("ca", "dynamic", "otp"), + Usage: "Name of the authentication mode (ca, dynamic, otp).", + }) + + f.StringVar(&StringVar{ + Name: "role", + Target: &c.flagRole, + Default: "", + EnvVar: "", + Completion: complete.PredictAnything, + Usage: "Name of the role to use to generate the key.", + }) + + f.BoolVar(&BoolVar{ + Name: "no-exec", + Target: &c.flagNoExec, + Default: false, + EnvVar: "", + Completion: complete.PredictNothing, + Usage: "Print the generated credentials, but do not establish a " + + "connection.", + }) + + f.StringVar(&StringVar{ + Name: "mount-point", + Target: &c.flagMountPoint, + Default: "ssh/", + EnvVar: "", + Completion: complete.PredictAnything, + Usage: "Mount point to the SSH secrets engine.", + }) + + f.StringVar(&StringVar{ + Name: "strict-host-key-checking", + Target: &c.flagStrictHostKeyChecking, + Default: "ask", + EnvVar: "VAULT_SSH_STRICT_HOST_KEY_CHECKING", + Completion: complete.PredictSet("ask", "no", "yes"), + Usage: "Value to use for the SSH configuration option " + + "\"StrictHostKeyChecking\".", + }) + + f.StringVar(&StringVar{ + Name: "user-known-hosts-file", + Target: &c.flagUserKnownHostsFile, + Default: "", + EnvVar: "VAULT_SSH_USER_KNOWN_HOSTS_FILE", + Completion: complete.PredictFiles("*"), + Usage: "Value to use for the SSH configuration option " + + "\"UserKnownHostsFile\".", + }) + + // SSH CA + f = set.NewFlagSet("CA Mode Options") + + f.StringVar(&StringVar{ + Name: "public-key-path", + Target: &c.flagPublicKeyPath, + Default: "~/.ssh/id_rsa.pub", + EnvVar: "", + Completion: complete.PredictFiles("*"), + Usage: "Path to the SSH public key to send to Vault for signing.", + }) + + f.StringVar(&StringVar{ + Name: "private-key-path", + Target: &c.flagPrivateKeyPath, + Default: "~/.ssh/id_rsa", + EnvVar: "", + Completion: complete.PredictFiles("*"), + Usage: "Path to the SSH private key to use for authentication. This must " + + "be the corresponding private key to -public-key-path.", + }) + + f.StringVar(&StringVar{ + Name: "host-key-mount-point", + Target: &c.flagHostKeyMountPoint, + Default: "", + EnvVar: "VAULT_SSH_HOST_KEY_MOUNT_POINT", + Completion: complete.PredictAnything, + Usage: "Mount point to the SSH secrets engine where host keys are signed. " + + "When given a value, Vault will generate a custom \"known_hosts\" file " + + "with delegation to the CA at the provided mount point to verify the " + + "SSH connection's host keys against the provided CA. By default, host " + + "keys are validated against the user's local \"known_hosts\" file. " + + "This flag forces strict key host checking and ignores a custom user " + + "known hosts file.", + }) + + f.StringVar(&StringVar{ + Name: "host-key-hostnames", + Target: &c.flagHostKeyHostnames, + Default: "*", + EnvVar: "VAULT_SSH_HOST_KEY_HOSTNAMES", + Completion: complete.PredictAnything, + Usage: "List of hostnames to delegate for the CA. The default value " + + "allows all domains and IPs. This is specified as a comma-separated " + + "list of values.", + }) + + f.StringVar(&StringVar{ + Name: "valid-principals", + Target: &c.flagValidPrincipals, + Default: "", + EnvVar: "", + Completion: complete.PredictAnything, + Usage: "List of valid principal names to include in the generated " + + "user certificate. This is specified as a comma-separated list of values.", + }) + + f.StringVar(&StringVar{ + Name: "ssh-executable", + Target: &c.flagSSHExecutable, + Default: "ssh", + EnvVar: "VAULT_SSH_EXECUTABLE", + Completion: complete.PredictAnything, + Usage: "Path to the SSH executable to use when connecting to the host", + }) + + return set +} + +func (c *SSHCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *SSHCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +// Structure to hold the fields returned when asked for a credential from SSH +// secrets engine. +type SSHCredentialResp struct { + KeyType string `mapstructure:"key_type"` + Key string `mapstructure:"key"` + Username string `mapstructure:"username"` + IP string `mapstructure:"ip"` + Port string `mapstructure:"port"` +} + +func (c *SSHCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args, DisableDisplayFlagWarning(true)); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + // Use homedir to expand any relative paths such as ~/.ssh + c.flagUserKnownHostsFile = expandPath(c.flagUserKnownHostsFile) + c.flagPublicKeyPath = expandPath(c.flagPublicKeyPath) + c.flagPrivateKeyPath = expandPath(c.flagPrivateKeyPath) + + args = f.Args() + if len(args) < 1 { + c.UI.Error(fmt.Sprintf("Not enough arguments, (expected 1-n, got %d)", len(args))) + return 1 + } + + // Extract the hostname, username and port from the ssh command + hostname, username, port, err := c.parseSSHCommand(args) + if err != nil { + c.UI.Error(fmt.Sprintf("Error parsing the ssh command: %q", err)) + return 1 + } + + // Use the current user if no user was specified in the ssh command + if username == "" { + u, err := user.Current() + if err != nil { + c.UI.Error(fmt.Sprintf("Error getting the current user: %q", err)) + return 1 + } + username = u.Username + } + + ip, err := c.resolveHostname(hostname) + if err != nil { + c.UI.Error(fmt.Sprintf("Error resolving the ssh hostname: %q", err)) + return 1 + } + + // Set the client in the command + _, err = c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + + // Credentials are generated only against a registered role. If user + // does not specify a role with the SSH command, then lookup API is used + // to fetch all the roles with which this IP is associated. If there is + // only one role associated with it, use it to establish the connection. + // + // TODO: remove in 0.9.0, convert to validation error + if c.flagRole == "" { + c.UI.Warn(wrapAtLength( + "WARNING: No -role specified. Use -role to tell Vault which ssh role " + + "to use for authentication. In the future, you will need to tell " + + "Vault which role to use. For now, Vault will attempt to guess based " + + "on the API response. This will be removed in the Vault 1.1.")) + + role, err := c.defaultRole(c.flagMountPoint, ip) + if err != nil { + c.UI.Error(fmt.Sprintf("Error choosing role: %v", err)) + return 1 + } + // Print the default role chosen so that user knows the role name + // if something doesn't work. If the role chosen is not allowed to + // be used by the user (ACL enforcement), then user should see an + // error message accordingly. + c.UI.Output(fmt.Sprintf("Vault SSH: Role: %q", role)) + c.flagRole = role + } + + // If no mode was given, perform the old-school lookup. Keep this now for + // backwards-compatibility, but print a warning. + // + // TODO: remove in 0.9.0, convert to validation error + if c.flagMode == "" { + c.UI.Warn(wrapAtLength( + "WARNING: No -mode specified. Use -mode to tell Vault which ssh " + + "authentication mode to use. In the future, you will need to tell " + + "Vault which mode to use. For now, Vault will attempt to guess based " + + "on the API response. This guess involves creating a temporary " + + "credential, reading its type, and then revoking it. To reduce the " + + "number of API calls and surface area, specify -mode directly. This " + + "will be removed in Vault 1.1.")) + secret, cred, err := c.generateCredential(username, ip) + if err != nil { + // This is _very_ hacky, but is the only sane backwards-compatible way + // to do this. If the error is "key type unknown", we just assume the + // type is "ca". In the future, mode will be required as an option. + if strings.Contains(err.Error(), "key type unknown") { + c.flagMode = ssh.KeyTypeCA + } else { + c.UI.Error(fmt.Sprintf("Error getting credential: %s", err)) + return 1 + } + } else { + c.flagMode = cred.KeyType + } + + // Revoke the secret, since the child functions will generate their own + // credential. Users wishing to avoid this should specify -mode. + if secret != nil { + if err := c.client.Sys().Revoke(secret.LeaseID); err != nil { + c.UI.Warn(fmt.Sprintf("Failed to revoke temporary key: %s", err)) + } + } + } + + switch strings.ToLower(c.flagMode) { + case ssh.KeyTypeCA: + return c.handleTypeCA(username, ip, port, args) + case ssh.KeyTypeOTP: + return c.handleTypeOTP(username, ip, port, args) + case ssh.KeyTypeDynamic: + return c.handleTypeDynamic(username, ip, port, args) + default: + c.UI.Error(fmt.Sprintf("Unknown SSH mode: %s", c.flagMode)) + return 1 + } +} + +// handleTypeCA is used to handle SSH logins using the "CA" key type. +func (c *SSHCommand) handleTypeCA(username, ip, port string, sshArgs []string) int { + // Read the key from disk + publicKey, err := ioutil.ReadFile(c.flagPublicKeyPath) + if err != nil { + c.UI.Error(fmt.Sprintf("failed to read public key %s: %s", + c.flagPublicKeyPath, err)) + return 1 + } + + sshClient := c.client.SSHWithMountPoint(c.flagMountPoint) + + principals := username + if c.flagValidPrincipals != "" { + principals = c.flagValidPrincipals + } + + // Attempt to sign the public key + secret, err := sshClient.SignKey(c.flagRole, map[string]interface{}{ + // WARNING: publicKey is []byte, which is b64 encoded on JSON upload. We + // have to convert it to a string. SV lost many hours to this... + "public_key": string(publicKey), + "valid_principals": principals, + "cert_type": "user", + + // TODO: let the user configure these. In the interim, if users want to + // customize these values, they can produce the key themselves. + "extensions": map[string]string{ + "permit-X11-forwarding": "", + "permit-agent-forwarding": "", + "permit-port-forwarding": "", + "permit-pty": "", + "permit-user-rc": "", + }, + }) + if err != nil { + c.UI.Error(fmt.Sprintf("failed to sign public key %s: %s", + c.flagPublicKeyPath, err)) + return 2 + } + if secret == nil || secret.Data == nil { + c.UI.Error("missing signed key") + return 2 + } + + // Handle no-exec + if c.flagNoExec { + if c.flagField != "" { + return PrintRawField(c.UI, secret, c.flagField) + } + return OutputSecret(c.UI, secret) + } + + // Extract public key + key, ok := secret.Data["signed_key"].(string) + if !ok || key == "" { + c.UI.Error("signed key is empty") + return 2 + } + + // Capture the current value - this could be overwritten later if the user + // enabled host key signing verification. + userKnownHostsFile := c.flagUserKnownHostsFile + strictHostKeyChecking := c.flagStrictHostKeyChecking + + // Handle host key signing verification. If the user specified a mount point, + // download the public key, trust it with the given domains, and use that + // instead of the user's regular known_hosts file. + if c.flagHostKeyMountPoint != "" { + secret, err := c.client.Logical().Read(c.flagHostKeyMountPoint + "/config/ca") + if err != nil { + c.UI.Error(fmt.Sprintf("failed to get host signing key: %s", err)) + return 2 + } + if secret == nil || secret.Data == nil { + c.UI.Error("missing host signing key") + return 2 + } + publicKey, ok := secret.Data["public_key"].(string) + if !ok || publicKey == "" { + c.UI.Error("host signing key is empty") + return 2 + } + + // Write the known_hosts file + name := fmt.Sprintf("vault_ssh_ca_known_hosts_%s_%s", username, ip) + data := fmt.Sprintf("@cert-authority %s %s", c.flagHostKeyHostnames, publicKey) + knownHosts, err, closer := c.writeTemporaryFile(name, []byte(data), 0o644) + defer closer() + if err != nil { + c.UI.Error(fmt.Sprintf("failed to write host public key: %s", err)) + return 1 + } + + // Update the variables + userKnownHostsFile = knownHosts + strictHostKeyChecking = "yes" + } + + // Write the signed public key to disk + name := fmt.Sprintf("vault_ssh_ca_%s_%s", username, ip) + signedPublicKeyPath, err, closer := c.writeTemporaryKey(name, []byte(key)) + defer closer() + if err != nil { + c.UI.Error(fmt.Sprintf("failed to write signed public key: %s", err)) + return 2 + } + + args := []string{ + "-i", c.flagPrivateKeyPath, + "-i", signedPublicKeyPath, + "-o StrictHostKeyChecking=" + strictHostKeyChecking, + } + + if userKnownHostsFile != "" { + args = append(args, + "-o UserKnownHostsFile="+userKnownHostsFile, + ) + } + + // Add extra user defined ssh arguments + args = append(args, sshArgs...) + + cmd := exec.Command(c.flagSSHExecutable, args...) + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + err = cmd.Run() + if err != nil { + exitCode := 2 + + if exitError, ok := err.(*exec.ExitError); ok { + if exitError.Success() { + return 0 + } + if ws, ok := exitError.Sys().(syscall.WaitStatus); ok { + exitCode = ws.ExitStatus() + } + } + + c.UI.Error(fmt.Sprintf("failed to run ssh command: %s", err)) + return exitCode + } + + // There is no secret to revoke, since it's a certificate signing + return 0 +} + +// handleTypeOTP is used to handle SSH logins using the "otp" key type. +func (c *SSHCommand) handleTypeOTP(username, ip, port string, sshArgs []string) int { + secret, cred, err := c.generateCredential(username, ip) + if err != nil { + c.UI.Error(fmt.Sprintf("failed to generate credential: %s", err)) + return 2 + } + + // Handle no-exec + if c.flagNoExec { + if c.flagField != "" { + return PrintRawField(c.UI, secret, c.flagField) + } + return OutputSecret(c.UI, secret) + } + + var cmd *exec.Cmd + + // Check if the application 'sshpass' is installed in the client machine. If + // it is then, use it to automate typing in OTP to the prompt. Unfortunately, + // it was not possible to automate it without a third-party application, with + // only the Go libraries. Feel free to try and remove this dependency. + args := make([]string, 0) + env := os.Environ() + sshCmd := c.flagSSHExecutable + + sshpassPath, err := exec.LookPath("sshpass") + if err != nil { + // No sshpass available so using normal ssh client + c.UI.Warn(wrapAtLength( + "Vault could not locate \"sshpass\". The OTP code for the session is " + + "displayed below. Enter this code in the SSH password prompt. If you " + + "install sshpass, Vault can automatically perform this step for you.")) + c.UI.Output("OTP for the session is: " + cred.Key) + } else { + // sshpass is available so lets use it instead + sshCmd = sshpassPath + args = append(args, + "-e", // Read password for SSHPASS environment variable + c.flagSSHExecutable, + ) + env = append(env, fmt.Sprintf("SSHPASS=%s", string(cred.Key))) + } + + // Only harcode the knownhostsfile path if it has been set + if c.flagUserKnownHostsFile != "" { + args = append(args, + "-o UserKnownHostsFile="+c.flagUserKnownHostsFile, + ) + } + + // If a port wasn't specified in the ssh arguments lets use the port we got back from vault + if port == "" { + args = append(args, "-p", cred.Port) + } + + args = append(args, + "-o StrictHostKeyChecking="+c.flagStrictHostKeyChecking, + ) + + // Add the rest of the ssh args appended by the user + args = append(args, sshArgs...) + + cmd = exec.Command(sshCmd, args...) + cmd.Env = env + + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + err = cmd.Run() + if err != nil { + exitCode := 2 + + if exitError, ok := err.(*exec.ExitError); ok { + if exitError.Success() { + return 0 + } + if ws, ok := exitError.Sys().(syscall.WaitStatus); ok { + exitCode = ws.ExitStatus() + } + } + + c.UI.Error(fmt.Sprintf("failed to run ssh command: %s", err)) + return exitCode + } + + // Revoke the key if it's longer than expected + if err := c.client.Sys().Revoke(secret.LeaseID); err != nil { + c.UI.Error(fmt.Sprintf("failed to revoke key: %s", err)) + return 2 + } + + return 0 +} + +// handleTypeDynamic is used to handle SSH logins using the "dyanmic" key type. +func (c *SSHCommand) handleTypeDynamic(username, ip, port string, sshArgs []string) int { + // Generate the credential + secret, cred, err := c.generateCredential(username, ip) + if err != nil { + c.UI.Error(fmt.Sprintf("failed to generate credential: %s", err)) + return 2 + } + + // Handle no-exec + if c.flagNoExec { + if c.flagField != "" { + return PrintRawField(c.UI, secret, c.flagField) + } + return OutputSecret(c.UI, secret) + } + + // Write the dynamic key to disk + name := fmt.Sprintf("vault_ssh_dynamic_%s_%s", username, ip) + keyPath, err, closer := c.writeTemporaryKey(name, []byte(cred.Key)) + defer closer() + if err != nil { + c.UI.Error(fmt.Sprintf("failed to write dynamic key: %s", err)) + return 1 + } + + args := make([]string, 0) + // If a port wasn't specified in the ssh arguments lets use the port we got back from vault + if port == "" { + args = append(args, "-p", cred.Port) + } + + args = append(args, + "-i", keyPath, + "-o UserKnownHostsFile="+c.flagUserKnownHostsFile, + "-o StrictHostKeyChecking="+c.flagStrictHostKeyChecking, + ) + + // Add extra user defined ssh arguments + args = append(args, sshArgs...) + + cmd := exec.Command(c.flagSSHExecutable, args...) + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + err = cmd.Run() + if err != nil { + exitCode := 2 + + if exitError, ok := err.(*exec.ExitError); ok { + if exitError.Success() { + return 0 + } + if ws, ok := exitError.Sys().(syscall.WaitStatus); ok { + exitCode = ws.ExitStatus() + } + } + + c.UI.Error(fmt.Sprintf("failed to run ssh command: %s", err)) + return exitCode + } + + // Revoke the key if it's longer than expected + if err := c.client.Sys().Revoke(secret.LeaseID); err != nil { + c.UI.Error(fmt.Sprintf("failed to revoke key: %s", err)) + return 2 + } + + return 0 +} + +// generateCredential generates a credential for the given role and returns the +// decoded secret data. +func (c *SSHCommand) generateCredential(username, ip string) (*api.Secret, *SSHCredentialResp, error) { + sshClient := c.client.SSHWithMountPoint(c.flagMountPoint) + + // Attempt to generate the credential. + secret, err := sshClient.Credential(c.flagRole, map[string]interface{}{ + "username": username, + "ip": ip, + }) + if err != nil { + return nil, nil, errors.Wrap(err, "failed to get credentials") + } + if secret == nil || secret.Data == nil { + return nil, nil, fmt.Errorf("vault returned empty credentials") + } + + // Port comes back as a json.Number which mapstructure doesn't like, so + // convert it + if d, ok := secret.Data["port"].(json.Number); ok { + secret.Data["port"] = d.String() + } + + // Use mapstructure to decode the response + var resp SSHCredentialResp + if err := mapstructure.Decode(secret.Data, &resp); err != nil { + return nil, nil, errors.Wrap(err, "failed to decode credential") + } + + // Check for an empty key response + if len(resp.Key) == 0 { + return nil, nil, fmt.Errorf("vault returned an invalid key") + } + + return secret, &resp, nil +} + +// writeTemporaryFile writes a file to a temp location with the given data and +// file permissions. +func (c *SSHCommand) writeTemporaryFile(name string, data []byte, perms os.FileMode) (string, error, func() error) { + // default closer to prevent panic + closer := func() error { return nil } + + f, err := ioutil.TempFile("", name) + if err != nil { + return "", errors.Wrap(err, "creating temporary file"), closer + } + + closer = func() error { return os.Remove(f.Name()) } + + if err := ioutil.WriteFile(f.Name(), data, perms); err != nil { + return "", errors.Wrap(err, "writing temporary key"), closer + } + + if err := f.Close(); err != nil { + return "", errors.Wrap(err, "closing temporary key"), closer + } + + return f.Name(), nil, closer +} + +// writeTemporaryKey writes the key to a temporary file and returns the path. +// The caller should defer the closer to cleanup the key. +func (c *SSHCommand) writeTemporaryKey(name string, data []byte) (string, error, func() error) { + return c.writeTemporaryFile(name, data, 0o600) +} + +// If user did not provide the role with which SSH connection has +// to be established and if there is only one role associated with +// the IP, it is used by default. +func (c *SSHCommand) defaultRole(mountPoint, ip string) (string, error) { + data := map[string]interface{}{ + "ip": ip, + } + secret, err := c.client.Logical().Write(mountPoint+"/lookup", data) + if err != nil { + return "", fmt.Errorf("error finding roles for IP %q: %w", ip, err) + } + if secret == nil || secret.Data == nil { + return "", fmt.Errorf("error finding roles for IP %q: %w", ip, err) + } + + if secret.Data["roles"] == nil { + return "", fmt.Errorf("no matching roles found for IP %q", ip) + } + + if len(secret.Data["roles"].([]interface{})) == 1 { + return secret.Data["roles"].([]interface{})[0].(string), nil + } else { + var roleNames string + for _, item := range secret.Data["roles"].([]interface{}) { + roleNames += item.(string) + ", " + } + roleNames = strings.TrimRight(roleNames, ", ") + return "", fmt.Errorf("Roles: %q. "+` + Multiple roles are registered for this IP. + Select a role using '-role' option. + Note that all roles may not be permitted, based on ACLs.`, roleNames) + } +} + +func (c *SSHCommand) isSingleSSHArg(arg string) bool { + // list of single SSH arguments is taken from + // https://github.com/openssh/openssh-portable/blob/28013759f09ed3ebf7e8335e83a62936bd7a7f47/ssh.c#L204 + singleArgs := []string{ + "4", "6", "A", "a", "C", "f", "G", "g", "K", "k", "M", "N", "n", "q", + "s", "T", "t", "V", "v", "X", "x", "Y", "y", + } + + // We want to get the first character after the dash. This is so args like -vvv are picked up as just being -v + flag := string(arg[1]) + + for _, a := range singleArgs { + if flag == a { + return true + } + } + return false +} + +// Finds the hostname, username (optional) and port (optional) from any valid ssh command +// Supports usrname@hostname but also specifying valid ssh flags like -o User=username, +// -o Port=2222 and -p 2222 anywhere in the command +func (c *SSHCommand) parseSSHCommand(args []string) (hostname string, username string, port string, err error) { + lastArg := "" + for _, i := range args { + arg := lastArg + lastArg = "" + + // If -p has been specified then this is our ssh port + if arg == "-p" { + port = i + continue + } + + // this is an ssh option, lets see if User or Port have been set and use it + if arg == "-o" { + split := strings.Split(i, "=") + key := split[0] + // Incase the value contains = signs we want to get all of them + value := strings.Join(split[1:], " ") + + if key == "User" { + // Don't overwrite the user if it is already set by username@hostname + // This matches the behaviour for how regular ssh reponds when both are specified + if username == "" { + username = value + } + } + + if key == "Port" { + // Don't overwrite the port if it is already set by -p + // This matches the behaviour for how regular ssh reponds when both are specified + if port == "" { + port = value + } + } + continue + } + + // This isn't an ssh argument that we care about. Lets keep on parsing the command + if arg != "" { + continue + } + + // If this is an ssh argument with a value we want to look at it in the next loop + if strings.HasPrefix(i, "-") { + // If this isn't a single SSH arg we want to store the flag to we can look at the value next loop + if !c.isSingleSSHArg(i) { + lastArg = i + } + continue + } + + // If we have gotten this far it means this is a bare argument + // The first bare argument is the hostname + // The second bare argument is the command to run on the remote host + + // If the hostname hasn't been set yet than it means we have found the first bare argument + if hostname == "" { + if strings.Contains(i, "@") { + split := strings.Split(i, "@") + username = split[0] + hostname = split[1] + } else { + hostname = i + } + continue + } else { + // The second bare argument is the command to run on the remote host. + // We need to break out and stop parsing arguments now + break + } + + } + if hostname == "" { + return "", "", "", errors.Wrap( + err, + fmt.Sprintf("failed to find a hostname in ssh command %q", strings.Join(args, " ")), + ) + } + return hostname, username, port, nil +} + +func (c *SSHCommand) resolveHostname(hostname string) (ip string, err error) { + // Resolving domain names to IP address on the client side. + // Vault only deals with IP addresses. + ipAddr, err := net.ResolveIPAddr("ip", hostname) + if err != nil { + return "", errors.Wrap(err, "failed to resolve IP address") + } + ip = ipAddr.String() + return ip, nil +} diff --git a/command/ssh_test.go b/command/ssh_test.go new file mode 100644 index 0000000..137e541 --- /dev/null +++ b/command/ssh_test.go @@ -0,0 +1,235 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "testing" + + "github.com/mitchellh/cli" +) + +func testSSHCommand(tb testing.TB) (*cli.MockUi, *SSHCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &SSHCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestSSHCommand_Run(t *testing.T) { + t.Parallel() + t.Skip("Need a way to setup target infrastructure") +} + +func TestParseSSHCommand(t *testing.T) { + t.Parallel() + + _, cmd := testSSHCommand(t) + tests := []struct { + name string + args []string + hostname string + username string + port string + err error + }{ + { + "Parse just a hostname", + []string{ + "hostname", + }, + "hostname", + "", + "", + nil, + }, + { + "Parse the standard username@hostname", + []string{ + "username@hostname", + }, + "hostname", + "username", + "", + nil, + }, + { + "Parse the username out of -o User=username", + []string{ + "-o", "User=username", + "hostname", + }, + "hostname", + "username", + "", + nil, + }, + { + "If the username is specified with -o User=username and realname@hostname prefer realname@", + []string{ + "-o", "User=username", + "realname@hostname", + }, + "hostname", + "realname", + "", + nil, + }, + { + "Parse the port out of -o Port=2222", + []string{ + "-o", "Port=2222", + "hostname", + }, + "hostname", + "", + "2222", + nil, + }, + { + "Parse the port out of -p 2222", + []string{ + "-p", "2222", + "hostname", + }, + "hostname", + "", + "2222", + nil, + }, + { + "If port is defined with -o Port=2222 and -p 2244 prefer -p", + []string{ + "-p", "2244", + "-o", "Port=2222", + "hostname", + }, + "hostname", + "", + "2244", + nil, + }, + { + "Ssh args with a command", + []string{ + "hostname", + "command", + }, + "hostname", + "", + "", + nil, + }, + { + "Flags after the ssh command are not passed because they are part of the command", + []string{ + "username@hostname", + "command", + "-p 22", + }, + "hostname", + "username", + "", + nil, + }, + { + "Allow single args which don't have a value", + []string{ + "-v", + "hostname", + }, + "hostname", + "", + "", + nil, + }, + { + "Allow single args before and after the hostname and command", + []string{ + "-v", + "hostname", + "-v", + "command", + "-v", + }, + "hostname", + "", + "", + nil, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + hostname, username, port, err := cmd.parseSSHCommand(test.args) + if err != test.err { + t.Errorf("got error: %q want %q", err, test.err) + } + if hostname != test.hostname { + t.Errorf("got hostname: %q want %q", hostname, test.hostname) + } + if username != test.username { + t.Errorf("got username: %q want %q", username, test.username) + } + if port != test.port { + t.Errorf("got port: %q want %q", port, test.port) + } + }) + } +} + +func TestIsSingleSSHArg(t *testing.T) { + t.Parallel() + + _, cmd := testSSHCommand(t) + tests := []struct { + name string + arg string + want bool + }{ + { + "-v is a single ssh arg", + "-v", + true, + }, + { + "-o is NOT a single ssh arg", + "-o", + false, + }, + { + "Repeated args like -vvv is still a single ssh arg", + "-vvv", + true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := cmd.isSingleSSHArg(test.arg) + if got != test.want { + t.Errorf("arg %q got %v want %v", test.arg, got, test.want) + } + }) + } +} + +// TestSSHCommandOmitFlagWarning checks if flags warning messages are printed +// in the output of the CLI command or not. If so, it will fail. +func TestSSHCommandOmitFlagWarning(t *testing.T) { + t.Parallel() + + ui, cmd := testSSHCommand(t) + + _ = cmd.Run([]string{"-mode", "ca", "-role", "otp_key_role", "user@1.2.3.4", "-extraFlag", "bug"}) + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if strings.Contains(combined, "Command flags must be provided before positional arguments. The following arguments will not be parsed as flags") { + t.Fatalf("ssh command displayed flag warnings") + } +} diff --git a/command/status.go b/command/status.go new file mode 100644 index 0000000..0b7c619 --- /dev/null +++ b/command/status.go @@ -0,0 +1,99 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*StatusCommand)(nil) + _ cli.CommandAutocomplete = (*StatusCommand)(nil) +) + +type StatusCommand struct { + *BaseCommand +} + +func (c *StatusCommand) Synopsis() string { + return "Print seal and HA status" +} + +func (c *StatusCommand) Help() string { + helpText := ` +Usage: vault status [options] + + Prints the current state of Vault including whether it is sealed and if HA + mode is enabled. This command prints regardless of whether the Vault is + sealed. + + The exit code reflects the seal status: + + - 0 - unsealed + - 1 - error + - 2 - sealed + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *StatusCommand) Flags() *FlagSets { + return c.flagSet(FlagSetHTTP | FlagSetOutputFormat) +} + +func (c *StatusCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictNothing +} + +func (c *StatusCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *StatusCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + if len(args) > 0 { + c.UI.Error(fmt.Sprintf("Too many arguments (expected 0, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + // We return 2 everywhere else, but 2 is reserved for "sealed" here + return 1 + } + + // Always query in the root namespace. + // Although seal-status is present in other namespaces, it will not + // be available until Vault is unsealed. + client.SetNamespace("") + + status, err := client.Sys().SealStatus() + if err != nil { + c.UI.Error(fmt.Sprintf("Error checking seal status: %s", err)) + return 1 + } + + // Do not return the int here yet, since we may want to return a custom error + // code depending on the seal status. + code := OutputSealStatus(c.UI, client, status) + + if status.Sealed { + return 2 + } + + return code +} diff --git a/command/status_test.go b/command/status_test.go new file mode 100644 index 0000000..5731945 --- /dev/null +++ b/command/status_test.go @@ -0,0 +1,118 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "testing" + + "github.com/mitchellh/cli" +) + +func testStatusCommand(tb testing.TB) (*cli.MockUi, *StatusCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &StatusCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestStatusCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + sealed bool + out string + code int + }{ + { + "unsealed", + nil, + false, + "Sealed false", + 0, + }, + { + "sealed", + nil, + true, + "Sealed true", + 2, + }, + { + "args", + []string{"foo"}, + false, + "Too many arguments", + 1, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + if tc.sealed { + if err := client.Sys().Seal(); err != nil { + t.Fatal(err) + } + } + + ui, cmd := testStatusCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testStatusCommand(t) + cmd.client = client + + code := cmd.Run([]string{}) + if exp := 1; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error checking seal status: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testStatusCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/test-backend/main.go b/command/test-backend/main.go new file mode 100644 index 0000000..90ffa4e --- /dev/null +++ b/command/test-backend/main.go @@ -0,0 +1 @@ +package test_backend diff --git a/command/test-fixtures/config.hcl b/command/test-fixtures/config.hcl new file mode 100644 index 0000000..164acd2 --- /dev/null +++ b/command/test-fixtures/config.hcl @@ -0,0 +1,4 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +token_helper = "foo" diff --git a/command/test-fixtures/policy.hcl b/command/test-fixtures/policy.hcl new file mode 100644 index 0000000..267fc5c --- /dev/null +++ b/command/test-fixtures/policy.hcl @@ -0,0 +1,10 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +path "secret/foo" { + policy = "write" +} + +path "secret/bar/*" { + capabilities = ["create", "read", "update"] +} diff --git a/command/token.go b/command/token.go new file mode 100644 index 0000000..7b15275 --- /dev/null +++ b/command/token.go @@ -0,0 +1,49 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + + "github.com/mitchellh/cli" +) + +var _ cli.Command = (*TokenCommand)(nil) + +type TokenCommand struct { + *BaseCommand +} + +func (c *TokenCommand) Synopsis() string { + return "Interact with tokens" +} + +func (c *TokenCommand) Help() string { + helpText := ` +Usage: vault token [options] [args] + + This command groups subcommands for interacting with tokens. Users can + create, lookup, renew, and revoke tokens. + + Create a new token: + + $ vault token create + + Revoke a token: + + $ vault token revoke 96ddf4bc-d217-f3ba-f9bd-017055595017 + + Renew a token: + + $ vault token renew 96ddf4bc-d217-f3ba-f9bd-017055595017 + + Please see the individual subcommand help for detailed usage information. +` + + return strings.TrimSpace(helpText) +} + +func (c *TokenCommand) Run(args []string) int { + return cli.RunResultHelp +} diff --git a/command/token/helper.go b/command/token/helper.go new file mode 100644 index 0000000..c8ce763 --- /dev/null +++ b/command/token/helper.go @@ -0,0 +1,16 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package token + +// TokenHelper is an interface that contains basic operations that must be +// implemented by a token helper +type TokenHelper interface { + // Path displays a method-specific path; for the internal helper this + // is the location of the token stored on disk; for the external helper + // this is the location of the binary being invoked + Path() string + Erase() error + Get() (string, error) + Store(string) error +} diff --git a/command/token/helper_external.go b/command/token/helper_external.go new file mode 100644 index 0000000..12557a4 --- /dev/null +++ b/command/token/helper_external.go @@ -0,0 +1,136 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package token + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" +) + +// ExternalTokenHelperPath should only be used in dev mode. +// ExternalTokenHelperPath takes the configured path to a helper and expands it to +// a full absolute path that can be executed. As of 0.5, the default token +// helper is internal, to avoid problems running in dev mode (see GH-850 and +// GH-783), so special assumptions of prepending "vault token-" no longer +// apply. +// +// As an additional result, only absolute paths are now allowed. Looking in the +// path or a current directory for an arbitrary executable could allow someone +// to switch the expected binary for one further up the path (or in the current +// directory), potentially opening up execution of an arbitrary binary. +func ExternalTokenHelperPath(path string) (string, error) { + if !filepath.IsAbs(path) { + var err error + path, err = filepath.Abs(path) + if err != nil { + return "", err + } + } + + if _, err := os.Stat(path); err != nil { + return "", fmt.Errorf("unknown error getting the external helper path") + } + + return path, nil +} + +var _ TokenHelper = (*ExternalTokenHelper)(nil) + +// ExternalTokenHelper should only be used in a dev mode. For all other cases, +// InternalTokenHelper should be used. +// ExternalTokenHelper is the struct that has all the logic for storing and retrieving +// tokens from the token helper. The API for the helpers is simple: the +// BinaryPath is executed within a shell with environment Env. The last argument +// appended will be the operation, which is: +// +// - "get" - Read the value of the token and write it to stdout. +// - "store" - Store the value of the token which is on stdin. Output +// nothing. +// - "erase" - Erase the contents stored. Output nothing. +// +// Any errors can be written on stdout. If the helper exits with a non-zero +// exit code then the stderr will be made part of the error value. +type ExternalTokenHelper struct { + BinaryPath string + Env []string +} + +// Erase deletes the contents from the helper. +func (h *ExternalTokenHelper) Erase() error { + cmd, err := h.cmd("erase") + if err != nil { + return err + } + if output, err := cmd.CombinedOutput(); err != nil { + return fmt.Errorf("%q: %w", string(output), err) + } + return nil +} + +// Get gets the token value from the helper. +func (h *ExternalTokenHelper) Get() (string, error) { + var buf, stderr bytes.Buffer + cmd, err := h.cmd("get") + if err != nil { + return "", err + } + cmd.Stdout = &buf + cmd.Stderr = &stderr + if err := cmd.Run(); err != nil { + return "", fmt.Errorf("%q: %w", stderr.String(), err) + } + + return buf.String(), nil +} + +// Store stores the token value into the helper. +func (h *ExternalTokenHelper) Store(v string) error { + buf := bytes.NewBufferString(v) + cmd, err := h.cmd("store") + if err != nil { + return err + } + cmd.Stdin = buf + if output, err := cmd.CombinedOutput(); err != nil { + return fmt.Errorf("%q: %w", string(output), err) + } + + return nil +} + +func (h *ExternalTokenHelper) Path() string { + return h.BinaryPath +} + +func (h *ExternalTokenHelper) cmd(op string) (*exec.Cmd, error) { + script := strings.ReplaceAll(h.BinaryPath, "\\", "\\\\") + " " + op + cmd, err := ExecScript(script) + if err != nil { + return nil, err + } + cmd.Env = h.Env + return cmd, nil +} + +// ExecScript returns a command to execute a script +func ExecScript(script string) (*exec.Cmd, error) { + var shell, flag string + if runtime.GOOS == "windows" { + shell = "cmd" + flag = "/C" + } else { + shell = "/bin/sh" + flag = "-c" + } + if other := os.Getenv("SHELL"); other != "" { + shell = other + } + cmd := exec.Command(shell, flag, script) + return cmd, nil +} diff --git a/command/token/helper_external_test.go b/command/token/helper_external_test.go new file mode 100644 index 0000000..d95c889 --- /dev/null +++ b/command/token/helper_external_test.go @@ -0,0 +1,141 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package token + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + "strings" + "testing" +) + +func TestExternalTokenHelperPath(t *testing.T) { + cases := map[string]string{} + + unixCases := map[string]string{ + "/foo": "/foo", + } + windowsCases := map[string]string{ + "C:/foo": "C:/foo", + `C:\Program Files`: `C:\Program Files`, + } + + var runtimeCases map[string]string + if runtime.GOOS == "windows" { + runtimeCases = windowsCases + } else { + runtimeCases = unixCases + } + + for k, v := range runtimeCases { + cases[k] = v + } + + // We don't expect those to actually exist, so we expect an error. For now, + // I'm commenting out the rest of this code as we don't have real external + // helpers to test with and the os.Stat will fail with our fake test cases. + /* + for k, v := range cases { + actual, err := ExternalTokenHelperPath(k) + if err != nil { + t.Fatalf("error getting external helper path: %v", err) + } + if actual != v { + t.Fatalf( + "input: %s, expected: %s, got: %s", + k, v, actual) + } + } + */ +} + +func TestExternalTokenHelper(t *testing.T) { + Test(t, testExternalTokenHelper(t)) +} + +func testExternalTokenHelper(t *testing.T) *ExternalTokenHelper { + return &ExternalTokenHelper{BinaryPath: helperPath("helper"), Env: helperEnv()} +} + +func helperPath(s ...string) string { + cs := []string{"-test.run=TestExternalTokenHelperProcess", "--"} + cs = append(cs, s...) + return fmt.Sprintf( + "%s %s", + os.Args[0], + strings.Join(cs, " ")) +} + +func helperEnv() []string { + var env []string + + tf, err := ioutil.TempFile("", "vault") + if err != nil { + panic(err) + } + tf.Close() + + env = append(env, "GO_HELPER_PATH="+tf.Name(), "GO_WANT_HELPER_PROCESS=1") + return env +} + +// This is not a real test. This is just a helper process kicked off by tests. +func TestExternalTokenHelperProcess(*testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { + return + } + + defer os.Exit(0) + + args := os.Args + for len(args) > 0 { + if args[0] == "--" { + args = args[1:] + break + } + + args = args[1:] + } + + if len(args) == 0 { + fmt.Fprintf(os.Stderr, "No command\n") + os.Exit(2) + } + + cmd, args := args[0], args[1:] + switch cmd { + case "helper": + path := os.Getenv("GO_HELPER_PATH") + + switch args[0] { + case "erase": + os.Remove(path) + case "get": + f, err := os.Open(path) + if os.IsNotExist(err) { + return + } + if err != nil { + fmt.Fprintf(os.Stderr, "Err: %s\n", err) + os.Exit(1) + } + defer f.Close() + io.Copy(os.Stdout, f) + case "store": + f, err := os.Create(path) + if err != nil { + fmt.Fprintf(os.Stderr, "Err: %s\n", err) + os.Exit(1) + } + defer f.Close() + io.Copy(f, os.Stdin) + } + default: + fmt.Fprintf(os.Stderr, "Unknown command: %q\n", cmd) + os.Exit(2) + } +} diff --git a/command/token/helper_internal.go b/command/token/helper_internal.go new file mode 100644 index 0000000..aeb4faa --- /dev/null +++ b/command/token/helper_internal.go @@ -0,0 +1,104 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package token + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + homedir "github.com/mitchellh/go-homedir" + "github.com/natefinch/atomic" +) + +var _ TokenHelper = (*InternalTokenHelper)(nil) + +// InternalTokenHelper fulfills the TokenHelper interface when no external +// token-helper is configured, and avoids shelling out +type InternalTokenHelper struct { + tokenPath string + homeDir string +} + +func NewInternalTokenHelper() (*InternalTokenHelper, error) { + homeDir, err := homedir.Dir() + if err != nil { + panic(fmt.Sprintf("error getting user's home directory: %v", err)) + } + return &InternalTokenHelper{homeDir: homeDir}, err +} + +// populateTokenPath figures out the token path using homedir to get the user's +// home directory +func (i *InternalTokenHelper) populateTokenPath() { + i.tokenPath = filepath.Join(i.homeDir, ".vault-token") +} + +func (i *InternalTokenHelper) Path() string { + return i.tokenPath +} + +// Get gets the value of the stored token, if any +func (i *InternalTokenHelper) Get() (string, error) { + i.populateTokenPath() + f, err := os.Open(i.tokenPath) + if os.IsNotExist(err) { + return "", nil + } + if err != nil { + return "", err + } + defer f.Close() + + buf := bytes.NewBuffer(nil) + if _, err := io.Copy(buf, f); err != nil { + return "", err + } + + return strings.TrimSpace(buf.String()), nil +} + +// Store stores the value of the token to the file. We always overwrite any +// existing file atomically to ensure that ownership and permissions are set +// appropriately. +func (i *InternalTokenHelper) Store(input string) error { + i.populateTokenPath() + tmpFile := i.tokenPath + ".tmp" + f, err := os.OpenFile(tmpFile, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0o600) + if err != nil { + return err + } + defer f.Close() + defer os.Remove(tmpFile) + + _, err = io.WriteString(f, input) + if err != nil { + return err + } + err = f.Close() + if err != nil { + return err + } + + // We don't care so much about atomic writes here. We're using this package + // because we don't have a portable way of verifying that the target file + // is owned by the correct user. The simplest way of ensuring that is + // to simply re-write it, and the simplest way to ensure that we don't + // damage an existing working file due to error is the write-rename pattern. + // os.Rename on Windows will return an error if the target already exists. + return atomic.ReplaceFile(tmpFile, i.tokenPath) +} + +// Erase erases the value of the token +func (i *InternalTokenHelper) Erase() error { + i.populateTokenPath() + if err := os.Remove(i.tokenPath); err != nil && !os.IsNotExist(err) { + return err + } + + return nil +} diff --git a/command/token/helper_internal_test.go b/command/token/helper_internal_test.go new file mode 100644 index 0000000..e68359c --- /dev/null +++ b/command/token/helper_internal_test.go @@ -0,0 +1,65 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package token + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +// TestCommand re-uses the existing Test function to ensure proper behavior of +// the internal token helper +func TestCommand(t *testing.T) { + helper, err := NewInternalTokenHelper() + if err != nil { + t.Fatal(err) + } + Test(t, helper) +} + +func TestInternalHelperFilePerms(t *testing.T) { + tmpDir, err := ioutil.TempDir("", t.Name()) + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + helper, err := NewInternalTokenHelper() + if err != nil { + t.Fatal(err) + } + helper.homeDir = tmpDir + + tmpFile := filepath.Join(tmpDir, ".vault-token") + f, err := os.Create(tmpFile) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + fi, err := os.Stat(tmpFile) + if err != nil { + t.Fatal(err) + } + + if fi.Mode().Perm()&0o04 != 0o04 { + t.Fatalf("expected world-readable/writable permission bits, got: %o", fi.Mode().Perm()) + } + + err = helper.Store("bogus_token") + if err != nil { + t.Fatal(err) + } + + fi, err = os.Stat(tmpFile) + if err != nil { + t.Fatal(err) + } + + if fi.Mode().Perm()&0o04 != 0 { + t.Fatalf("expected no world-readable/writable permission bits, got: %o", fi.Mode().Perm()) + } +} diff --git a/command/token/helper_testing.go b/command/token/helper_testing.go new file mode 100644 index 0000000..e95ff35 --- /dev/null +++ b/command/token/helper_testing.go @@ -0,0 +1,45 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package token + +import ( + "sync" +) + +var _ TokenHelper = (*TestingTokenHelper)(nil) + +// TestingTokenHelper implements token.TokenHelper which runs entirely +// in-memory. This should not be used outside of testing. +type TestingTokenHelper struct { + lock sync.RWMutex + token string +} + +func NewTestingTokenHelper() *TestingTokenHelper { + return &TestingTokenHelper{} +} + +func (t *TestingTokenHelper) Erase() error { + t.lock.Lock() + defer t.lock.Unlock() + t.token = "" + return nil +} + +func (t *TestingTokenHelper) Get() (string, error) { + t.lock.RLock() + defer t.lock.RUnlock() + return t.token, nil +} + +func (t *TestingTokenHelper) Path() string { + return "" +} + +func (t *TestingTokenHelper) Store(token string) error { + t.lock.Lock() + defer t.lock.Unlock() + t.token = token + return nil +} diff --git a/command/token/testing.go b/command/token/testing.go new file mode 100644 index 0000000..85da084 --- /dev/null +++ b/command/token/testing.go @@ -0,0 +1,81 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package token + +import ( + "fmt" + "os" + "strings" + "testing" + + "github.com/mitchellh/cli" +) + +// Test is a public function that can be used in other tests to +// test that a helper is functioning properly. +func Test(t *testing.T, h TokenHelper) { + if err := h.Store("foo"); err != nil { + t.Fatalf("err: %s", err) + } + + v, err := h.Get() + if err != nil { + t.Fatalf("err: %s", err) + } + + if v != "foo" { + t.Fatalf("bad: %#v", v) + } + + if err := h.Erase(); err != nil { + t.Fatalf("err: %s", err) + } + + v, err = h.Get() + if err != nil { + t.Fatalf("err: %s", err) + } + + if v != "" { + t.Fatalf("bad: %#v", v) + } +} + +// TestProcess is used to re-execute this test in order to use it as the +// helper process. For this to work, the TestExternalTokenHelperProcess function must +// exist. +func TestProcess(t *testing.T, s ...string) { + h := &ExternalTokenHelper{BinaryPath: TestProcessPath(t, s...)} + Test(t, h) +} + +// TestProcessPath returns the path to the test process. +func TestProcessPath(t *testing.T, s ...string) string { + cs := []string{"-test.run=TestExternalTokenHelperProcess", "--", "GO_WANT_HELPER_PROCESS"} + cs = append(cs, s...) + return fmt.Sprintf( + "%s %s", + os.Args[0], + strings.Join(cs, " ")) +} + +// TestExternalTokenHelperProcessCLI can be called to implement TestExternalTokenHelperProcess +// for TestProcess that just executes a CLI command. +func TestExternalTokenHelperProcessCLI(t *testing.T, cmd cli.Command) { + args := os.Args + for len(args) > 0 { + if args[0] == "--" { + args = args[1:] + break + } + + args = args[1:] + } + if len(args) == 0 || args[0] != "GO_WANT_HELPER_PROCESS" { + return + } + args = args[1:] + + os.Exit(cmd.Run(args)) +} diff --git a/command/token_capabilities.go b/command/token_capabilities.go new file mode 100644 index 0000000..f7e359c --- /dev/null +++ b/command/token_capabilities.go @@ -0,0 +1,117 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "sort" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*TokenCapabilitiesCommand)(nil) + _ cli.CommandAutocomplete = (*TokenCapabilitiesCommand)(nil) +) + +type TokenCapabilitiesCommand struct { + *BaseCommand +} + +func (c *TokenCapabilitiesCommand) Synopsis() string { + return "Print capabilities of a token on a path" +} + +func (c *TokenCapabilitiesCommand) Help() string { + helpText := ` +Usage: vault token capabilities [options] [TOKEN] PATH + + Fetches the capabilities of a token for a given path. If a TOKEN is provided + as an argument, the "/sys/capabilities" endpoint and permission is used. If + no TOKEN is provided, the "/sys/capabilities-self" endpoint and permission + is used with the locally authenticated token. + + List capabilities for the local token on the "secret/foo" path: + + $ vault token capabilities secret/foo + + List capabilities for a token on the "cubbyhole/foo" path: + + $ vault token capabilities 96ddf4bc-d217-f3ba-f9bd-017055595017 cubbyhole/foo + + For a full list of examples, please see the documentation. + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *TokenCapabilitiesCommand) Flags() *FlagSets { + return c.flagSet(FlagSetHTTP | FlagSetOutputFormat) +} + +func (c *TokenCapabilitiesCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *TokenCapabilitiesCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *TokenCapabilitiesCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + token := "" + path := "" + args = f.Args() + switch len(args) { + case 0: + c.UI.Error("Not enough arguments (expected 1-2, got 0)") + return 1 + case 1: + path = args[0] + case 2: + token, path = args[0], args[1] + default: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1-2, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + var capabilities []string + if token == "" { + capabilities, err = client.Sys().CapabilitiesSelf(path) + } else { + capabilities, err = client.Sys().Capabilities(token, path) + } + if err != nil { + c.UI.Error(fmt.Sprintf("Error listing capabilities: %s", err)) + return 2 + } + if capabilities == nil { + c.UI.Error("No capabilities found") + return 1 + } + + switch Format(c.UI) { + case "table": + sort.Strings(capabilities) + c.UI.Output(strings.Join(capabilities, ", ")) + return 0 + default: + return OutputData(c.UI, capabilities) + } +} diff --git a/command/token_capabilities_test.go b/command/token_capabilities_test.go new file mode 100644 index 0000000..14b0f27 --- /dev/null +++ b/command/token_capabilities_test.go @@ -0,0 +1,194 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "testing" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" +) + +func testTokenCapabilitiesCommand(tb testing.TB) (*cli.MockUi, *TokenCapabilitiesCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &TokenCapabilitiesCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestTokenCapabilitiesCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "too_many_args", + []string{"foo", "bar", "zip"}, + "Too many arguments", + 1, + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testTokenCapabilitiesCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + + t.Run("token", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + policy := `path "secret/foo" { capabilities = ["read"] }` + if err := client.Sys().PutPolicy("policy", policy); err != nil { + t.Error(err) + } + + secret, err := client.Auth().Token().Create(&api.TokenCreateRequest{ + Policies: []string{"policy"}, + TTL: "30m", + }) + if err != nil { + t.Fatal(err) + } + if secret == nil || secret.Auth == nil || secret.Auth.ClientToken == "" { + t.Fatalf("missing auth data: %#v", secret) + } + token := secret.Auth.ClientToken + + ui, cmd := testTokenCapabilitiesCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + token, "secret/foo", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "read" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("local", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + policy := `path "secret/foo" { capabilities = ["read"] }` + if err := client.Sys().PutPolicy("policy", policy); err != nil { + t.Error(err) + } + + secret, err := client.Auth().Token().Create(&api.TokenCreateRequest{ + Policies: []string{"policy"}, + TTL: "30m", + }) + if err != nil { + t.Fatal(err) + } + if secret == nil || secret.Auth == nil || secret.Auth.ClientToken == "" { + t.Fatalf("missing auth data: %#v", secret) + } + token := secret.Auth.ClientToken + + client.SetToken(token) + + ui, cmd := testTokenCapabilitiesCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "secret/foo", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "read" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testTokenCapabilitiesCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "foo", "bar", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error listing capabilities: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("multiple_paths", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + _, cmd := testTokenCapabilitiesCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "secret/foo,secret/bar", + }) + if exp := 1; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testTokenCapabilitiesCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/token_create.go b/command/token_create.go new file mode 100644 index 0000000..1efee5e --- /dev/null +++ b/command/token_create.go @@ -0,0 +1,262 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + "time" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*TokenCreateCommand)(nil) + _ cli.CommandAutocomplete = (*TokenCreateCommand)(nil) +) + +type TokenCreateCommand struct { + *BaseCommand + + flagID string + flagDisplayName string + flagTTL time.Duration + flagExplicitMaxTTL time.Duration + flagPeriod time.Duration + flagRenewable bool + flagOrphan bool + flagNoDefaultPolicy bool + flagUseLimit int + flagRole string + flagType string + flagMetadata map[string]string + flagPolicies []string + flagEntityAlias string +} + +func (c *TokenCreateCommand) Synopsis() string { + return "Create a new token" +} + +func (c *TokenCreateCommand) Help() string { + helpText := ` +Usage: vault token create [options] + + Creates a new token that can be used for authentication. This token will be + created as a child of the currently authenticated token. The generated token + will inherit all policies and permissions of the currently authenticated + token unless you explicitly define a subset list policies to assign to the + token. + + A ttl can also be associated with the token. If a ttl is not associated + with the token, then it cannot be renewed. If a ttl is associated with + the token, it will expire after that amount of time unless it is renewed. + + Metadata associated with the token (specified with "-metadata") is written + to the audit log when the token is used. + + If a role is specified, the role may override parameters specified here. + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *TokenCreateCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputField | FlagSetOutputFormat) + + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "id", + Target: &c.flagID, + Completion: complete.PredictAnything, + Usage: "Value for the token. By default, this is an auto-generated " + + "string. Specifying this value requires sudo permissions.", + }) + + f.StringVar(&StringVar{ + Name: "display-name", + Target: &c.flagDisplayName, + Completion: complete.PredictAnything, + Usage: "Name to associate with this token. This is a non-sensitive value " + + "that can be used to help identify created secrets (e.g. prefixes).", + }) + + f.DurationVar(&DurationVar{ + Name: "ttl", + Target: &c.flagTTL, + Completion: complete.PredictAnything, + Usage: "Initial TTL to associate with the token. Token renewals may be " + + "able to extend beyond this value, depending on the configured maximum " + + "TTLs. This is specified as a numeric string with suffix like \"30s\" " + + "or \"5m\".", + }) + + f.DurationVar(&DurationVar{ + Name: "explicit-max-ttl", + Target: &c.flagExplicitMaxTTL, + Completion: complete.PredictAnything, + Usage: "Explicit maximum lifetime for the token. Unlike normal TTLs, the " + + "maximum TTL is a hard limit and cannot be exceeded. This is specified " + + "as a numeric string with suffix like \"30s\" or \"5m\".", + }) + + f.DurationVar(&DurationVar{ + Name: "period", + Target: &c.flagPeriod, + Completion: complete.PredictAnything, + Usage: "If specified, every renewal will use the given period. Periodic " + + "tokens do not expire (unless -explicit-max-ttl is also provided). " + + "Setting this value requires sudo permissions. This is specified as a " + + "numeric string with suffix like \"30s\" or \"5m\".", + }) + + f.BoolVar(&BoolVar{ + Name: "renewable", + Target: &c.flagRenewable, + Default: true, + Usage: "Allow the token to be renewed up to it's maximum TTL.", + }) + + f.BoolVar(&BoolVar{ + Name: "orphan", + Target: &c.flagOrphan, + Default: false, + Usage: "Create the token with no parent. This prevents the token from " + + "being revoked when the token which created it expires. Setting this " + + "value requires root or sudo permissions.", + }) + + f.BoolVar(&BoolVar{ + Name: "no-default-policy", + Target: &c.flagNoDefaultPolicy, + Default: false, + Usage: "Detach the \"default\" policy from the policy set for this " + + "token.", + }) + + f.IntVar(&IntVar{ + Name: "use-limit", + Target: &c.flagUseLimit, + Default: 0, + Usage: "Number of times this token can be used. After the last use, the " + + "token is automatically revoked. By default, tokens can be used an " + + "unlimited number of times until their expiration.", + }) + + f.StringVar(&StringVar{ + Name: "role", + Target: &c.flagRole, + Default: "", + Usage: "Name of the role to create the token against. Specifying -role " + + "may override other arguments. The locally authenticated Vault token " + + "must have permission for \"auth/token/create/\".", + }) + + f.StringVar(&StringVar{ + Name: "type", + Target: &c.flagType, + Default: "service", + Usage: `The type of token to create. Can be "service" or "batch".`, + }) + + f.StringMapVar(&StringMapVar{ + Name: "metadata", + Target: &c.flagMetadata, + Completion: complete.PredictAnything, + Usage: "Arbitrary key=value metadata to associate with the token. " + + "This metadata will show in the audit log when the token is used. " + + "This can be specified multiple times to add multiple pieces of " + + "metadata.", + }) + + f.StringSliceVar(&StringSliceVar{ + Name: "policy", + Target: &c.flagPolicies, + Completion: c.PredictVaultPolicies(), + Usage: "Name of a policy to associate with this token. This can be " + + "specified multiple times to attach multiple policies.", + }) + + f.StringVar(&StringVar{ + Name: "entity-alias", + Target: &c.flagEntityAlias, + Default: "", + Usage: "Name of the entity alias to associate with during token creation. " + + "Only works in combination with -role argument and used entity alias " + + "must be listed in allowed_entity_aliases. If this has been specified, " + + "the entity will not be inherited from the parent.", + }) + + return set +} + +func (c *TokenCreateCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *TokenCreateCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *TokenCreateCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + if len(args) > 0 { + c.UI.Error(fmt.Sprintf("Too many arguments (expected 0, got %d)", len(args))) + return 1 + } + + if c.flagType == "batch" { + c.flagRenewable = false + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + tcr := &api.TokenCreateRequest{ + ID: c.flagID, + Policies: c.flagPolicies, + Metadata: c.flagMetadata, + TTL: c.flagTTL.String(), + NoParent: c.flagOrphan, + NoDefaultPolicy: c.flagNoDefaultPolicy, + DisplayName: c.flagDisplayName, + NumUses: c.flagUseLimit, + Renewable: &c.flagRenewable, + ExplicitMaxTTL: c.flagExplicitMaxTTL.String(), + Period: c.flagPeriod.String(), + Type: c.flagType, + EntityAlias: c.flagEntityAlias, + } + + var secret *api.Secret + if c.flagRole != "" { + secret, err = client.Auth().Token().CreateWithRole(tcr, c.flagRole) + } else { + secret, err = client.Auth().Token().Create(tcr) + } + if err != nil { + c.UI.Error(fmt.Sprintf("Error creating token: %s", err)) + return 2 + } + + if c.flagField != "" { + return PrintRawField(c.UI, secret, c.flagField) + } + + return OutputSecret(c.UI, secret) +} diff --git a/command/token_create_test.go b/command/token_create_test.go new file mode 100644 index 0000000..31bbd24 --- /dev/null +++ b/command/token_create_test.go @@ -0,0 +1,236 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "reflect" + "strings" + "testing" + + "github.com/mitchellh/cli" +) + +func testTokenCreateCommand(tb testing.TB) (*cli.MockUi, *TokenCreateCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &TokenCreateCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestTokenCreateCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "too_many_args", + []string{"abcd1234"}, + "Too many arguments", + 1, + }, + { + "default", + nil, + "token", + 0, + }, + { + "metadata", + []string{"-metadata", "foo=bar", "-metadata", "zip=zap"}, + "token", + 0, + }, + { + "policies", + []string{"-policy", "foo", "-policy", "bar"}, + "token", + 0, + }, + { + "field", + []string{ + "-field", "token_renewable", + }, + "false", + 0, + }, + { + "field_not_found", + []string{ + "-field", "not-a-real-field", + }, + "not present in secret", + 1, + }, + { + "ttl", + []string{"-ttl", "1d", "-explicit-max-ttl", "2d"}, + "token", + 0, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testTokenCreateCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("default", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testTokenCreateCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-field", "token", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + token := strings.TrimSpace(ui.OutputWriter.String()) + secret, err := client.Auth().Token().Lookup(token) + if secret == nil || err != nil { + t.Fatal(err) + } + }) + + t.Run("metadata", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testTokenCreateCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-metadata", "foo=bar", + "-metadata", "zip=zap", + "-field", "token", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + token := strings.TrimSpace(ui.OutputWriter.String()) + secret, err := client.Auth().Token().Lookup(token) + if secret == nil || err != nil { + t.Fatal(err) + } + + meta, ok := secret.Data["meta"].(map[string]interface{}) + if !ok { + t.Fatalf("missing meta: %#v", secret) + } + if _, ok := meta["foo"]; !ok { + t.Errorf("missing meta.foo: %#v", meta) + } + if _, ok := meta["zip"]; !ok { + t.Errorf("missing meta.bar: %#v", meta) + } + }) + + t.Run("policies", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testTokenCreateCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-policy", "foo", + "-policy", "bar", + "-field", "token", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + token := strings.TrimSpace(ui.OutputWriter.String()) + secret, err := client.Auth().Token().Lookup(token) + if secret == nil || err != nil { + t.Fatal(err) + } + + raw, ok := secret.Data["policies"].([]interface{}) + if !ok { + t.Fatalf("missing policies: %#v", secret) + } + + policies := make([]string, len(raw)) + for i := range raw { + policies[i] = raw[i].(string) + } + + expected := []string{"bar", "default", "foo"} + if !reflect.DeepEqual(policies, expected) { + t.Errorf("expected %q to be %q", policies, expected) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testTokenCreateCommand(t) + cmd.client = client + + code := cmd.Run([]string{}) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error creating token: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testTokenCreateCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/token_lookup.go b/command/token_lookup.go new file mode 100644 index 0000000..2416139 --- /dev/null +++ b/command/token_lookup.go @@ -0,0 +1,133 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*TokenLookupCommand)(nil) + _ cli.CommandAutocomplete = (*TokenLookupCommand)(nil) +) + +type TokenLookupCommand struct { + *BaseCommand + + flagAccessor bool +} + +func (c *TokenLookupCommand) Synopsis() string { + return "Display information about a token" +} + +func (c *TokenLookupCommand) Help() string { + helpText := ` +Usage: vault token lookup [options] [TOKEN | ACCESSOR] + + Displays information about a token or accessor. If a TOKEN is not provided, + the locally authenticated token is used. + + Get information about the locally authenticated token (this uses the + /auth/token/lookup-self endpoint and permission): + + $ vault token lookup + + Get information about a particular token (this uses the /auth/token/lookup + endpoint and permission): + + $ vault token lookup 96ddf4bc-d217-f3ba-f9bd-017055595017 + + Get information about a token via its accessor: + + $ vault token lookup -accessor 9793c9b3-e04a-46f3-e7b8-748d7da248da + + For a full list of examples, please see the documentation. + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *TokenLookupCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + f := set.NewFlagSet("Command Options") + + f.BoolVar(&BoolVar{ + Name: "accessor", + Target: &c.flagAccessor, + Default: false, + EnvVar: "", + Completion: complete.PredictNothing, + Usage: "Treat the argument as an accessor instead of a token. When " + + "this option is selected, the output will NOT include the token.", + }) + + return set +} + +func (c *TokenLookupCommand) AutocompleteArgs() complete.Predictor { + return c.PredictVaultFiles() +} + +func (c *TokenLookupCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *TokenLookupCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + token := "" + + args = f.Args() + switch { + case c.flagAccessor && len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments with -accessor (expected 1, got %d)", len(args))) + return 1 + case c.flagAccessor && len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments with -accessor (expected 1, got %d)", len(args))) + return 1 + case len(args) == 0: + // Use the local token + case len(args) == 1: + token = strings.TrimSpace(args[0]) + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 0-1, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + var secret *api.Secret + switch { + case token == "": + secret, err = client.Auth().Token().LookupSelf() + case c.flagAccessor: + secret, err = client.Auth().Token().LookupAccessor(token) + default: + secret, err = client.Auth().Token().Lookup(token) + } + + if err != nil { + c.UI.Error(fmt.Sprintf("Error looking up token: %s", err)) + return 2 + } + + return OutputSecret(c.UI, secret) +} diff --git a/command/token_lookup_test.go b/command/token_lookup_test.go new file mode 100644 index 0000000..3f56365 --- /dev/null +++ b/command/token_lookup_test.go @@ -0,0 +1,180 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "testing" + + "github.com/mitchellh/cli" +) + +func testTokenLookupCommand(tb testing.TB) (*cli.MockUi, *TokenLookupCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &TokenLookupCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestTokenLookupCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "accessor_no_args", + []string{"-accessor"}, + "Not enough arguments", + 1, + }, + { + "accessor_too_many_args", + []string{"-accessor", "abcd1234", "efgh5678"}, + "Too many arguments", + 1, + }, + { + "too_many_args", + []string{"abcd1234", "efgh5678"}, + "Too many arguments", + 1, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testTokenLookupCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("token", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + token, _ := testTokenAndAccessor(t, client) + + ui, cmd := testTokenLookupCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + token, + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := token + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("self", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testTokenLookupCommand(t) + cmd.client = client + + code := cmd.Run([]string{}) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "display_name" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("accessor", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + _, accessor := testTokenAndAccessor(t, client) + + ui, cmd := testTokenLookupCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-accessor", + accessor, + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := accessor + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testTokenLookupCommand(t) + cmd.client = client + + code := cmd.Run([]string{}) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error looking up token: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testTokenLookupCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/token_renew.go b/command/token_renew.go new file mode 100644 index 0000000..7a61487 --- /dev/null +++ b/command/token_renew.go @@ -0,0 +1,144 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + "time" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*TokenRenewCommand)(nil) + _ cli.CommandAutocomplete = (*TokenRenewCommand)(nil) +) + +type TokenRenewCommand struct { + *BaseCommand + + flagAccessor bool + flagIncrement time.Duration +} + +func (c *TokenRenewCommand) Synopsis() string { + return "Renew a token lease" +} + +func (c *TokenRenewCommand) Help() string { + helpText := ` +Usage: vault token renew [options] [TOKEN] + + Renews a token's lease, extending the amount of time it can be used. If a + TOKEN is not provided, the locally authenticated token is used. A token + accessor can be used as well. Lease renewal will fail if the token is not + renewable, the token has already been revoked, or if the token has already + reached its maximum TTL. + + Renew a token (this uses the /auth/token/renew endpoint and permission): + + $ vault token renew 96ddf4bc-d217-f3ba-f9bd-017055595017 + + Renew the currently authenticated token (this uses the /auth/token/renew-self + endpoint and permission): + + $ vault token renew + + Renew a token requesting a specific increment value: + + $ vault token renew -increment=30m 96ddf4bc-d217-f3ba-f9bd-017055595017 + + For a full list of examples, please see the documentation. + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *TokenRenewCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + f := set.NewFlagSet("Command Options") + + f.BoolVar(&BoolVar{ + Name: "accessor", + Target: &c.flagAccessor, + Default: false, + EnvVar: "", + Completion: complete.PredictNothing, + Usage: "Treat the argument as an accessor instead of a token. When " + + "this option is selected, the output will NOT include the token.", + }) + + f.DurationVar(&DurationVar{ + Name: "increment", + Aliases: []string{"i"}, + Target: &c.flagIncrement, + Default: 0, + EnvVar: "", + Completion: complete.PredictAnything, + Usage: "Request a specific increment for renewal. This increment may " + + "not be honored, for instance in the case of periodic tokens. If not " + + "supplied, Vault will use the default TTL. This is specified as a " + + "numeric string with suffix like \"30s\" or \"5m\".", + }) + + return set +} + +func (c *TokenRenewCommand) AutocompleteArgs() complete.Predictor { + return c.PredictVaultFiles() +} + +func (c *TokenRenewCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *TokenRenewCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + token := "" + increment := c.flagIncrement + + args = f.Args() + switch len(args) { + case 0: + // Use the local token + case 1: + token = strings.TrimSpace(args[0]) + default: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + var secret *api.Secret + inc := truncateToSeconds(increment) + switch { + case token == "": + secret, err = client.Auth().Token().RenewSelf(inc) + case c.flagAccessor: + secret, err = client.Auth().Token().RenewAccessor(token, inc) + default: + secret, err = client.Auth().Token().Renew(token, inc) + } + if err != nil { + c.UI.Error(fmt.Sprintf("Error renewing token: %s", err)) + return 2 + } + + return OutputSecret(c.UI, secret) +} diff --git a/command/token_renew_test.go b/command/token_renew_test.go new file mode 100644 index 0000000..29d9292 --- /dev/null +++ b/command/token_renew_test.go @@ -0,0 +1,230 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "encoding/json" + "strconv" + "strings" + "testing" + + "github.com/mitchellh/cli" +) + +func testTokenRenewCommand(tb testing.TB) (*cli.MockUi, *TokenRenewCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &TokenRenewCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestTokenRenewCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "too_many_args", + []string{"foo", "bar", "baz"}, + "Too many arguments", + 1, + }, + { + "default", + nil, + "", + 0, + }, + { + "increment", + []string{"-increment", "60s"}, + "", + 0, + }, + { + "increment_no_suffix", + []string{"-increment", "60"}, + "", + 0, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + // Login with the token so we can renew-self. + token, _ := testTokenAndAccessor(t, client) + client.SetToken(token) + + ui, cmd := testTokenRenewCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("token", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + token, _ := testTokenAndAccessor(t, client) + + _, cmd := testTokenRenewCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-increment", "30m", + token, + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + secret, err := client.Auth().Token().Lookup(token) + if err != nil { + t.Fatal(err) + } + + str := string(secret.Data["ttl"].(json.Number)) + ttl, err := strconv.ParseInt(str, 10, 64) + if err != nil { + t.Fatalf("bad ttl: %#v", secret.Data["ttl"]) + } + if exp := int64(1800); ttl > exp { + t.Errorf("expected %d to be <= to %d", ttl, exp) + } + }) + + t.Run("accessor", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + token, accessor := testTokenAndAccessor(t, client) + + _, cmd := testTokenRenewCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-increment", "30m", + "-accessor", + accessor, + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + secret, err := client.Auth().Token().Lookup(token) + if err != nil { + t.Fatal(err) + } + + str := string(secret.Data["ttl"].(json.Number)) + ttl, err := strconv.ParseInt(str, 10, 64) + if err != nil { + t.Fatalf("bad ttl: %#v", secret.Data["ttl"]) + } + if exp := int64(1800); ttl > exp { + t.Errorf("expected %d to be <= to %d", ttl, exp) + } + }) + + t.Run("self", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + token, _ := testTokenAndAccessor(t, client) + + // Get the old token and login as the new token. We need the old token + // to query after the lookup, but we need the new token on the client. + oldToken := client.Token() + client.SetToken(token) + + _, cmd := testTokenRenewCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-increment", "30m", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + client.SetToken(oldToken) + secret, err := client.Auth().Token().Lookup(token) + if err != nil { + t.Fatal(err) + } + + str := string(secret.Data["ttl"].(json.Number)) + ttl, err := strconv.ParseInt(str, 10, 64) + if err != nil { + t.Fatalf("bad ttl: %#v", secret.Data["ttl"]) + } + if exp := int64(1800); ttl > exp { + t.Errorf("expected %d to be <= to %d", ttl, exp) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testTokenRenewCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "foo/bar", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error renewing token: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testTokenRenewCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/token_revoke.go b/command/token_revoke.go new file mode 100644 index 0000000..48ccc27 --- /dev/null +++ b/command/token_revoke.go @@ -0,0 +1,179 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*TokenRevokeCommand)(nil) + _ cli.CommandAutocomplete = (*TokenRevokeCommand)(nil) +) + +type TokenRevokeCommand struct { + *BaseCommand + + flagAccessor bool + flagSelf bool + flagMode string +} + +func (c *TokenRevokeCommand) Synopsis() string { + return "Revoke a token and its children" +} + +func (c *TokenRevokeCommand) Help() string { + helpText := ` +Usage: vault token revoke [options] [TOKEN | ACCESSOR] + + Revokes authentication tokens and their children. If a TOKEN is not provided, + the locally authenticated token is used. The "-mode" flag can be used to + control the behavior of the revocation. See the "-mode" flag documentation + for more information. + + Revoke a token and all the token's children: + + $ vault token revoke 96ddf4bc-d217-f3ba-f9bd-017055595017 + + Revoke a token leaving the token's children: + + $ vault token revoke -mode=orphan 96ddf4bc-d217-f3ba-f9bd-017055595017 + + Revoke a token by accessor: + + $ vault token revoke -accessor 9793c9b3-e04a-46f3-e7b8-748d7da248da + + For a full list of examples, please see the documentation. + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *TokenRevokeCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP) + + f := set.NewFlagSet("Command Options") + + f.BoolVar(&BoolVar{ + Name: "accessor", + Target: &c.flagAccessor, + Default: false, + EnvVar: "", + Completion: complete.PredictNothing, + Usage: "Treat the argument as an accessor instead of a token.", + }) + + f.BoolVar(&BoolVar{ + Name: "self", + Target: &c.flagSelf, + Default: false, + EnvVar: "", + Completion: complete.PredictNothing, + Usage: "Perform the revocation on the currently authenticated token.", + }) + + f.StringVar(&StringVar{ + Name: "mode", + Target: &c.flagMode, + Default: "", + EnvVar: "", + Completion: complete.PredictSet("orphan", "path"), + Usage: "Type of revocation to perform. If unspecified, Vault will revoke " + + "the token and all of the token's children. If \"orphan\", Vault will " + + "revoke only the token, leaving the children as orphans. If \"path\", " + + "tokens created from the given authentication path prefix are deleted " + + "along with their children.", + }) + + return set +} + +func (c *TokenRevokeCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *TokenRevokeCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *TokenRevokeCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + token := "" + if len(args) > 0 { + token = strings.TrimSpace(args[0]) + } + + switch c.flagMode { + case "", "orphan", "path": + default: + c.UI.Error(fmt.Sprintf("Invalid mode: %s", c.flagMode)) + return 1 + } + + switch { + case c.flagSelf && len(args) > 0: + c.UI.Error(fmt.Sprintf("Too many arguments with -self (expected 0, got %d)", len(args))) + return 1 + case !c.flagSelf && len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1 or -self, got %d)", len(args))) + return 1 + case !c.flagSelf && len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1 or -self, got %d)", len(args))) + return 1 + case c.flagSelf && c.flagAccessor: + c.UI.Error("Cannot use -self with -accessor!") + return 1 + case c.flagSelf && c.flagMode != "": + c.UI.Error("Cannot use -self with -mode!") + return 1 + case c.flagAccessor && c.flagMode == "orphan": + c.UI.Error("Cannot use -accessor with -mode=orphan!") + return 1 + case c.flagAccessor && c.flagMode == "path": + c.UI.Error("Cannot use -accessor with -mode=path!") + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + var revokeFn func(string) error + // Handle all 6 possible combinations + switch { + case !c.flagAccessor && c.flagSelf && c.flagMode == "": + revokeFn = client.Auth().Token().RevokeSelf + case !c.flagAccessor && !c.flagSelf && c.flagMode == "": + revokeFn = client.Auth().Token().RevokeTree + case !c.flagAccessor && !c.flagSelf && c.flagMode == "orphan": + revokeFn = client.Auth().Token().RevokeOrphan + case !c.flagAccessor && !c.flagSelf && c.flagMode == "path": + revokeFn = client.Sys().RevokePrefix + case c.flagAccessor && !c.flagSelf && c.flagMode == "": + revokeFn = client.Auth().Token().RevokeAccessor + } + + if err := revokeFn(token); err != nil { + c.UI.Error(fmt.Sprintf("Error revoking token: %s", err)) + return 2 + } + + c.UI.Output("Success! Revoked token (if it existed)") + return 0 +} diff --git a/command/token_revoke_test.go b/command/token_revoke_test.go new file mode 100644 index 0000000..6ff8898 --- /dev/null +++ b/command/token_revoke_test.go @@ -0,0 +1,229 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "testing" + + "github.com/mitchellh/cli" +) + +func testTokenRevokeCommand(tb testing.TB) (*cli.MockUi, *TokenRevokeCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &TokenRevokeCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestTokenRevokeCommand_Run(t *testing.T) { + t.Parallel() + + validations := []struct { + name string + args []string + out string + code int + }{ + { + "bad_mode", + []string{"-mode=banana"}, + "Invalid mode", + 1, + }, + { + "empty", + nil, + "Not enough arguments", + 1, + }, + { + "args_with_self", + []string{"-self", "abcd1234"}, + "Too many arguments", + 1, + }, + { + "too_many_args", + []string{"abcd1234", "efgh5678"}, + "Too many arguments", + 1, + }, + { + "self_and_accessor", + []string{"-self", "-accessor"}, + "Cannot use -self with -accessor", + 1, + }, + { + "self_and_mode", + []string{"-self", "-mode=orphan"}, + "Cannot use -self with -mode", + 1, + }, + { + "accessor_and_mode_orphan", + []string{"-accessor", "-mode=orphan", "abcd1234"}, + "Cannot use -accessor with -mode=orphan", + 1, + }, + { + "accessor_and_mode_path", + []string{"-accessor", "-mode=path", "abcd1234"}, + "Cannot use -accessor with -mode=path", + 1, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range validations { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testTokenRevokeCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("token", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + token, _ := testTokenAndAccessor(t, client) + + ui, cmd := testTokenRevokeCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + token, + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! Revoked token" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + secret, err := client.Auth().Token().Lookup(token) + if secret != nil || err == nil { + t.Errorf("expected token to be revoked: %#v", secret) + } + }) + + t.Run("self", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testTokenRevokeCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-self", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! Revoked token" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + secret, err := client.Auth().Token().LookupSelf() + if secret != nil || err == nil { + t.Errorf("expected token to be revoked: %#v", secret) + } + }) + + t.Run("accessor", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + token, accessor := testTokenAndAccessor(t, client) + + ui, cmd := testTokenRevokeCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-accessor", + accessor, + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! Revoked token" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + secret, err := client.Auth().Token().Lookup(token) + if secret != nil || err == nil { + t.Errorf("expected token to be revoked: %#v", secret) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testTokenRevokeCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "abcd1234", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error revoking token: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testTokenRevokeCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/transform.go b/command/transform.go new file mode 100644 index 0000000..27345a4 --- /dev/null +++ b/command/transform.go @@ -0,0 +1,41 @@ +package command + +import ( + "strings" + + "github.com/mitchellh/cli" +) + +var _ cli.Command = (*TransformCommand)(nil) + +type TransformCommand struct { + *BaseCommand +} + +func (c *TransformCommand) Synopsis() string { + return "Interact with Vault's Transform Secrets Engine" +} + +func (c *TransformCommand) Help() string { + helpText := ` +Usage: vault transform [options] [args] + + This command has subcommands for interacting with Vault's Transform Secrets + Engine. Here are some simple examples, and more detailed examples are + available in the subcommands or the documentation. + + To import a key into a new FPE transformation: + + $ vault transform import transform/transformations/fpe/new-transformation @path/to/key \ + template=identifier \ + allowed_roles=physical-access + + Please see the individual subcommand help for detailed usage information. +` + + return strings.TrimSpace(helpText) +} + +func (c *TransformCommand) Run(args []string) int { + return cli.RunResultHelp +} diff --git a/command/transform_import_key.go b/command/transform_import_key.go new file mode 100644 index 0000000..e41fa62 --- /dev/null +++ b/command/transform_import_key.go @@ -0,0 +1,76 @@ +package command + +import ( + "errors" + "regexp" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*TransformImportCommand)(nil) + _ cli.CommandAutocomplete = (*TransformImportCommand)(nil) + transformKeyPath = regexp.MustCompile("^(.*)/transformations/(fpe|tokenization)/([^/]*)$") +) + +type TransformImportCommand struct { + *BaseCommand +} + +func (c *TransformImportCommand) Synopsis() string { + return "Import a key into the Transform secrets engines." +} + +func (c *TransformImportCommand) Help() string { + helpText := ` +Usage: vault transform import PATH KEY [options...] + + Using the Transform key wrapping system, imports key material from + the base64 encoded KEY (either directly on the CLI or via @path notation), + into a new FPE or tokenization transformation whose API path is PATH. + + To import a new key version into an existing tokenization transformation, + use import_version. + + The remaining options after KEY (key=value style) are passed on to + Create/Update FPE Transformation or Create/Update Tokenization Transformation + API endpoints. + + For example: + $ vault transform import transform/transformations/tokenization/application-form @path/to/key \ + allowed_roles=legacy-system +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *TransformImportCommand) Flags() *FlagSets { + return c.flagSet(FlagSetHTTP) +} + +func (c *TransformImportCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *TransformImportCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *TransformImportCommand) Run(args []string) int { + return ImportKey(c.BaseCommand, "import", transformImportKeyPath, c.Flags(), args) +} + +func transformImportKeyPath(s string, operation string) (path string, apiPath string, err error) { + parts := transformKeyPath.FindStringSubmatch(s) + if len(parts) != 4 { + return "", "", errors.New("expected transform path and key name in the form :path:/transformations/fpe|tokenization/:name:") + } + path = parts[1] + transformation := parts[2] + keyName := parts[3] + apiPath = path + "/transformations/" + transformation + "/" + keyName + "/" + operation + + return path, apiPath, nil +} diff --git a/command/transform_import_key_version.go b/command/transform_import_key_version.go new file mode 100644 index 0000000..6ed8cb1 --- /dev/null +++ b/command/transform_import_key_version.go @@ -0,0 +1,59 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*TransformImportVersionCommand)(nil) + _ cli.CommandAutocomplete = (*TransformImportVersionCommand)(nil) +) + +type TransformImportVersionCommand struct { + *BaseCommand +} + +func (c *TransformImportVersionCommand) Synopsis() string { + return "Import key material into a new key version in the Transform secrets engines." +} + +func (c *TransformImportVersionCommand) Help() string { + helpText := ` +Usage: vault transform import-version PATH KEY [...] + + Using the Transform key wrapping system, imports new key material from + the base64 encoded KEY (either directly on the CLI or via @path notation), + into an existing tokenization transformation whose API path is PATH. + + The remaining options after KEY (key=value style) are passed on to + Create/Update Tokenization Transformation API endpoint. + + For example: + $ vault transform import-version transform/transformations/tokenization/application-form @path/to/new_version \ + allowed_roles=legacy-system +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *TransformImportVersionCommand) Flags() *FlagSets { + return c.flagSet(FlagSetHTTP) +} + +func (c *TransformImportVersionCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *TransformImportVersionCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *TransformImportVersionCommand) Run(args []string) int { + return ImportKey(c.BaseCommand, "import_version", transformImportKeyPath, c.Flags(), args) +} diff --git a/command/transit.go b/command/transit.go new file mode 100644 index 0000000..a48fef7 --- /dev/null +++ b/command/transit.go @@ -0,0 +1,42 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + + "github.com/mitchellh/cli" +) + +var _ cli.Command = (*TransitCommand)(nil) + +type TransitCommand struct { + *BaseCommand +} + +func (c *TransitCommand) Synopsis() string { + return "Interact with Vault's Transit Secrets Engine" +} + +func (c *TransitCommand) Help() string { + helpText := ` +Usage: vault transit [options] [args] + + This command has subcommands for interacting with Vault's Transit Secrets + Engine. Here are some simple examples, and more detailed examples are + available in the subcommands or the documentation. + + To import a key into the specified Transit mount: + + $ vault transit import transit/keys/newly-imported @path/to/key type=rsa-2048 + + Please see the individual subcommand help for detailed usage information. +` + + return strings.TrimSpace(helpText) +} + +func (c *TransitCommand) Run(args []string) int { + return cli.RunResultHelp +} diff --git a/command/transit_import_key.go b/command/transit_import_key.go new file mode 100644 index 0000000..3eea700 --- /dev/null +++ b/command/transit_import_key.go @@ -0,0 +1,211 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "errors" + "fmt" + "os" + "regexp" + "strings" + + "github.com/hashicorp/vault/api" + + "github.com/google/tink/go/kwp/subtle" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*TransitImportCommand)(nil) + _ cli.CommandAutocomplete = (*TransitImportCommand)(nil) + keyPath = regexp.MustCompile("^(.*)/keys/([^/]*)$") +) + +type TransitImportCommand struct { + *BaseCommand +} + +func (c *TransitImportCommand) Synopsis() string { + return "Import a key into the Transit secrets engines." +} + +func (c *TransitImportCommand) Help() string { + helpText := ` +Usage: vault transit import PATH KEY [options...] + + Using the Transit key wrapping system, imports key material from + the base64 encoded KEY (either directly on the CLI or via @path notation), + into a new key whose API path is PATH. To import a new version into an + existing key, use import_version. The remaining options after KEY (key=value + style) are passed on to the Transit create key endpoint. If your + system or device natively supports the RSA AES key wrap mechanism (such as + the PKCS#11 mechanism CKM_RSA_AES_KEY_WRAP), you should use it directly + rather than this command. + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *TransitImportCommand) Flags() *FlagSets { + return c.flagSet(FlagSetHTTP) +} + +func (c *TransitImportCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *TransitImportCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *TransitImportCommand) Run(args []string) int { + return ImportKey(c.BaseCommand, "import", transitImportKeyPath, c.Flags(), args) +} + +func transitImportKeyPath(s string, operation string) (path string, apiPath string, err error) { + parts := keyPath.FindStringSubmatch(s) + if len(parts) != 3 { + return "", "", errors.New("expected transit path and key name in the form :path:/keys/:name:") + } + path = parts[1] + keyName := parts[2] + apiPath = path + "/keys/" + keyName + "/" + operation + + return path, apiPath, nil +} + +type ImportKeyFunc func(s string, operation string) (path string, apiPath string, err error) + +// error codes: 1: user error, 2: internal computation error, 3: remote api call error +func ImportKey(c *BaseCommand, operation string, pathFunc ImportKeyFunc, flags *FlagSets, args []string) int { + // Parse and validate the arguments. + if err := flags.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = flags.Args() + if len(args) < 2 { + c.UI.Error(fmt.Sprintf("Incorrect argument count (expected 2+, got %d). Wanted PATH to import into and KEY material.", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + ephemeralAESKey := make([]byte, 32) + _, err = rand.Read(ephemeralAESKey) + if err != nil { + c.UI.Error(fmt.Sprintf("failed to generate ephemeral key: %v", err)) + } + path, apiPath, err := pathFunc(args[0], operation) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + keyMaterial := args[1] + if keyMaterial[0] == '@' { + keyMaterialBytes, err := os.ReadFile(keyMaterial[1:]) + if err != nil { + c.UI.Error(fmt.Sprintf("error reading key material file: %v", err)) + return 1 + } + + keyMaterial = string(keyMaterialBytes) + } + + key, err := base64.StdEncoding.DecodeString(keyMaterial) + if err != nil { + c.UI.Error(fmt.Sprintf("error base64 decoding source key material: %v", err)) + return 1 + } + // Fetch the wrapping key + c.UI.Output("Retrieving wrapping key.") + wrappingKey, err := fetchWrappingKey(c, client, path) + if err != nil { + c.UI.Error(fmt.Sprintf("failed to fetch wrapping key: %v", err)) + return 3 + } + c.UI.Output("Wrapping source key with ephemeral key.") + wrapKWP, err := subtle.NewKWP(ephemeralAESKey) + if err != nil { + c.UI.Error(fmt.Sprintf("failure building key wrapping key: %v", err)) + return 2 + } + wrappedTargetKey, err := wrapKWP.Wrap(key) + if err != nil { + c.UI.Error(fmt.Sprintf("failure wrapping source key: %v", err)) + return 2 + } + c.UI.Output("Encrypting ephemeral key with wrapping key.") + wrappedAESKey, err := rsa.EncryptOAEP( + sha256.New(), + rand.Reader, + wrappingKey.(*rsa.PublicKey), + ephemeralAESKey, + []byte{}, + ) + if err != nil { + c.UI.Error(fmt.Sprintf("failure encrypting wrapped key: %v", err)) + return 2 + } + combinedCiphertext := append(wrappedAESKey, wrappedTargetKey...) + importCiphertext := base64.StdEncoding.EncodeToString(combinedCiphertext) + + // Parse all the key options + data, err := parseArgsData(os.Stdin, args[2:]) + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to parse extra K=V data: %s", err)) + return 1 + } + if data == nil { + data = make(map[string]interface{}, 1) + } + + data["ciphertext"] = importCiphertext + + c.UI.Output("Submitting wrapped key.") + // Finally, call import + + _, err = client.Logical().Write(apiPath, data) + if err != nil { + c.UI.Error(fmt.Sprintf("failed to call import:%v", err)) + return 3 + } else { + c.UI.Output("Success!") + return 0 + } +} + +func fetchWrappingKey(c *BaseCommand, client *api.Client, path string) (any, error) { + resp, err := client.Logical().Read(path + "/wrapping_key") + if err != nil { + return nil, fmt.Errorf("error fetching wrapping key: %w", err) + } + if resp == nil { + return nil, fmt.Errorf("no mount found at %s: %v", path, err) + } + key, ok := resp.Data["public_key"] + if !ok { + c.UI.Error("could not find wrapping key") + } + keyBlock, _ := pem.Decode([]byte(key.(string))) + parsedKey, err := x509.ParsePKIXPublicKey(keyBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("error parsing wrapping key: %w", err) + } + return parsedKey, nil +} diff --git a/command/transit_import_key_test.go b/command/transit_import_key_test.go new file mode 100644 index 0000000..e01c03f --- /dev/null +++ b/command/transit_import_key_test.go @@ -0,0 +1,189 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bytes" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "testing" + + "github.com/hashicorp/vault/api" + + "github.com/stretchr/testify/require" +) + +// Validate the `vault transit import` command works. +func TestTransitImport(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + if err := client.Sys().Mount("transit", &api.MountInput{ + Type: "transit", + }); err != nil { + t.Fatalf("transit mount error: %#v", err) + } + + rsa1, rsa2, aes128, aes256 := generateKeys(t) + + type testCase struct { + variant string + path string + key []byte + args []string + shouldFail bool + } + tests := []testCase{ + { + "import", + "transit/keys/rsa1", + rsa1, + []string{"type=rsa-2048"}, + false, /* first import */ + }, + { + "import", + "transit/keys/rsa1", + rsa2, + []string{"type=rsa-2048"}, + true, /* already exists */ + }, + { + "import-version", + "transit/keys/rsa1", + rsa2, + []string{"type=rsa-2048"}, + false, /* new version */ + }, + { + "import", + "transit/keys/rsa2", + rsa2, + []string{"type=rsa-4096"}, + true, /* wrong type */ + }, + { + "import", + "transit/keys/rsa2", + rsa2, + []string{"type=rsa-2048"}, + false, /* new name */ + }, + { + "import", + "transit/keys/aes1", + aes128, + []string{"type=aes128-gcm96"}, + false, /* first import */ + }, + { + "import", + "transit/keys/aes1", + aes256, + []string{"type=aes256-gcm96"}, + true, /* already exists */ + }, + { + "import-version", + "transit/keys/aes1", + aes256, + []string{"type=aes256-gcm96"}, + true, /* new version, different type */ + }, + { + "import-version", + "transit/keys/aes1", + aes128, + []string{"type=aes128-gcm96"}, + false, /* new version */ + }, + { + "import", + "transit/keys/aes2", + aes256, + []string{"type=aes128-gcm96"}, + true, /* wrong type */ + }, + { + "import", + "transit/keys/aes2", + aes256, + []string{"type=aes256-gcm96"}, + false, /* new name */ + }, + } + + for index, tc := range tests { + t.Logf("Running test case %d: %v", index, tc) + execTransitImport(t, client, tc.variant, tc.path, tc.key, tc.args, tc.shouldFail) + } +} + +func execTransitImport(t *testing.T, client *api.Client, method string, path string, key []byte, data []string, expectFailure bool) { + t.Helper() + + keyBase64 := base64.StdEncoding.EncodeToString(key) + + var args []string + args = append(args, "transit") + args = append(args, method) + args = append(args, path) + args = append(args, keyBase64) + args = append(args, data...) + + stdout := bytes.NewBuffer(nil) + stderr := bytes.NewBuffer(nil) + runOpts := &RunOptions{ + Stdout: stdout, + Stderr: stderr, + Client: client, + } + + code := RunCustom(args, runOpts) + combined := stdout.String() + stderr.String() + + if code != 0 { + if !expectFailure { + t.Fatalf("Got unexpected failure from test (ret %d): %v", code, combined) + } + } else { + if expectFailure { + t.Fatalf("Expected failure, got success from test (ret %d): %v", code, combined) + } + } +} + +func generateKeys(t *testing.T) (rsa1 []byte, rsa2 []byte, aes128 []byte, aes256 []byte) { + t.Helper() + + priv1, err := rsa.GenerateKey(rand.Reader, 2048) + require.NotNil(t, priv1, "failed generating RSA 1 key") + require.NoError(t, err, "failed generating RSA 1 key") + + rsa1, err = x509.MarshalPKCS8PrivateKey(priv1) + require.NotNil(t, rsa1, "failed marshaling RSA 1 key") + require.NoError(t, err, "failed marshaling RSA 1 key") + + priv2, err := rsa.GenerateKey(rand.Reader, 2048) + require.NotNil(t, priv2, "failed generating RSA 2 key") + require.NoError(t, err, "failed generating RSA 2 key") + + rsa2, err = x509.MarshalPKCS8PrivateKey(priv2) + require.NotNil(t, rsa2, "failed marshaling RSA 2 key") + require.NoError(t, err, "failed marshaling RSA 2 key") + + aes128 = make([]byte, 128/8) + _, err = rand.Read(aes128) + require.NoError(t, err, "failed generating AES 128 key") + + aes256 = make([]byte, 256/8) + _, err = rand.Read(aes256) + require.NoError(t, err, "failed generating AES 256 key") + + return +} diff --git a/command/transit_import_key_version.go b/command/transit_import_key_version.go new file mode 100644 index 0000000..1a25078 --- /dev/null +++ b/command/transit_import_key_version.go @@ -0,0 +1,58 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*TransitImportVersionCommand)(nil) + _ cli.CommandAutocomplete = (*TransitImportVersionCommand)(nil) +) + +type TransitImportVersionCommand struct { + *BaseCommand +} + +func (c *TransitImportVersionCommand) Synopsis() string { + return "Import key material into a new key version in the Transit secrets engines." +} + +func (c *TransitImportVersionCommand) Help() string { + helpText := ` +Usage: vault transit import-version PATH KEY [...] + + Using the Transit key wrapping system, imports key material from + the base64 encoded KEY (either directly on the CLI or via @path notation), + into a new key whose API path is PATH. To import a new Transit + key, use the import command instead. The remaining options after KEY + (key=value style) are passed on to the Transit create key endpoint. + If your system or device natively supports the RSA AES key wrap mechanism + (such as the PKCS#11 mechanism CKM_RSA_AES_KEY_WRAP), you should use it + directly rather than this command. + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *TransitImportVersionCommand) Flags() *FlagSets { + return c.flagSet(FlagSetHTTP) +} + +func (c *TransitImportVersionCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *TransitImportVersionCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *TransitImportVersionCommand) Run(args []string) int { + return ImportKey(c.BaseCommand, "import_version", transitImportKeyPath, c.Flags(), args) +} diff --git a/command/unwrap.go b/command/unwrap.go new file mode 100644 index 0000000..1f920e7 --- /dev/null +++ b/command/unwrap.go @@ -0,0 +1,113 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*UnwrapCommand)(nil) + _ cli.CommandAutocomplete = (*UnwrapCommand)(nil) +) + +// UnwrapCommand is a Command that behaves like ReadCommand but specifically for +// unwrapping cubbyhole-wrapped secrets +type UnwrapCommand struct { + *BaseCommand +} + +func (c *UnwrapCommand) Synopsis() string { + return "Unwrap a wrapped secret" +} + +func (c *UnwrapCommand) Help() string { + helpText := ` +Usage: vault unwrap [options] [TOKEN] + + Unwraps a wrapped secret from Vault by the given token. The result is the + same as the "vault read" operation on the non-wrapped secret. If no token + is given, the data in the currently authenticated token is unwrapped. + + Unwrap the data in the cubbyhole secrets engine for a token: + + $ vault unwrap 3de9ece1-b347-e143-29b0-dc2dc31caafd + + Unwrap the data in the active token: + + $ vault login 848f9ccf-7176-098c-5e2b-75a0689d41cd + $ vault unwrap # unwraps 848f9ccf... + + For a full list of examples and paths, please see the online documentation. + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *UnwrapCommand) Flags() *FlagSets { + return c.flagSet(FlagSetHTTP | FlagSetOutputField | FlagSetOutputFormat) +} + +func (c *UnwrapCommand) AutocompleteArgs() complete.Predictor { + return c.PredictVaultFiles() +} + +func (c *UnwrapCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *UnwrapCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + token := "" + switch len(args) { + case 0: + // Leave token as "", that will use the local token + case 1: + token = strings.TrimSpace(args[0]) + default: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 0-1, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + secret, err := client.Logical().Unwrap(token) + if err != nil { + c.UI.Error(fmt.Sprintf("Error unwrapping: %s", err)) + return 2 + } + if secret == nil { + if Format(c.UI) == "table" { + c.UI.Info("Successfully unwrapped. There was no data in the wrapped secret.") + } + return 0 + } + + // Handle single field output + if c.flagField != "" { + return PrintRawField(c.UI, secret, c.flagField) + } + + // Check if the original was a list response and format as a list + if _, ok := extractListData(secret); ok { + return OutputList(c.UI, secret) + } + return OutputSecret(c.UI, secret) +} diff --git a/command/unwrap_test.go b/command/unwrap_test.go new file mode 100644 index 0000000..608edff --- /dev/null +++ b/command/unwrap_test.go @@ -0,0 +1,163 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "testing" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" +) + +func testUnwrapCommand(tb testing.TB) (*cli.MockUi, *UnwrapCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &UnwrapCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func testUnwrapWrappedToken(tb testing.TB, client *api.Client, data map[string]interface{}) string { + tb.Helper() + + wrapped, err := client.Logical().Write("sys/wrapping/wrap", data) + if err != nil { + tb.Fatal(err) + } + if wrapped == nil || wrapped.WrapInfo == nil || wrapped.WrapInfo.Token == "" { + tb.Fatalf("missing wrap info: %v", wrapped) + } + return wrapped.WrapInfo.Token +} + +func TestUnwrapCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "too_many_args", + []string{"foo", "bar"}, + "Too many arguments", + 1, + }, + { + "default", + nil, // Token comes in the test func + "bar", + 0, + }, + { + "field", + []string{"-field", "foo"}, + "bar", + 0, + }, + { + "field_not_found", + []string{"-field", "not-a-real-field"}, + "not present in secret", + 1, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + wrappedToken := testUnwrapWrappedToken(t, client, map[string]interface{}{ + "foo": "bar", + }) + + ui, cmd := testUnwrapCommand(t) + cmd.client = client + + tc.args = append(tc.args, wrappedToken) + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testUnwrapCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "foo", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error unwrapping: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + // This test needs its own client and server because it modifies the client + // to the wrapping token + t.Run("local_token", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + wrappedToken := testUnwrapWrappedToken(t, client, map[string]interface{}{ + "foo": "bar", + }) + + ui, cmd := testUnwrapCommand(t) + cmd.client = client + cmd.client.SetToken(wrappedToken) + + // Intentionally don't pass the token here - it should use the local token + code := cmd.Run([]string{}) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, "bar") { + t.Errorf("expected %q to contain %q", combined, "bar") + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testUnwrapCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/util.go b/command/util.go new file mode 100644 index 0000000..e24d65d --- /dev/null +++ b/command/util.go @@ -0,0 +1,163 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "io" + "os" + "time" + + "github.com/fatih/color" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/config" + "github.com/hashicorp/vault/command/token" + "github.com/mitchellh/cli" +) + +// DefaultTokenHelper returns the token helper that is configured for Vault. +// This helper should only be used for non-server CLI commands. +func DefaultTokenHelper() (token.TokenHelper, error) { + return config.DefaultTokenHelper() +} + +// RawField extracts the raw field from the given data and returns it as a +// string for printing purposes. +func RawField(secret *api.Secret, field string) interface{} { + var val interface{} + switch { + case secret.Auth != nil: + switch field { + case "token": + val = secret.Auth.ClientToken + case "token_accessor": + val = secret.Auth.Accessor + case "token_duration": + val = secret.Auth.LeaseDuration + case "token_renewable": + val = secret.Auth.Renewable + case "token_policies": + val = secret.Auth.TokenPolicies + case "identity_policies": + val = secret.Auth.IdentityPolicies + case "policies": + val = secret.Auth.Policies + default: + val = secret.Data[field] + } + + case secret.WrapInfo != nil: + switch field { + case "wrapping_token": + val = secret.WrapInfo.Token + case "wrapping_accessor": + val = secret.WrapInfo.Accessor + case "wrapping_token_ttl": + val = secret.WrapInfo.TTL + case "wrapping_token_creation_time": + val = secret.WrapInfo.CreationTime.Format(time.RFC3339Nano) + case "wrapping_token_creation_path": + val = secret.WrapInfo.CreationPath + case "wrapped_accessor": + val = secret.WrapInfo.WrappedAccessor + default: + val = secret.Data[field] + } + + default: + switch field { + case "lease_duration": + val = secret.LeaseDuration + case "lease_id": + val = secret.LeaseID + case "request_id": + val = secret.RequestID + case "renewable": + val = secret.Renewable + case "refresh_interval": + val = secret.LeaseDuration + case "data": + var ok bool + val, ok = secret.Data["data"] + if !ok { + val = secret.Data + } + default: + val = secret.Data[field] + } + } + + return val +} + +// PrintRawField prints raw field from the secret. +func PrintRawField(ui cli.Ui, data interface{}, field string) int { + var val interface{} + switch data := data.(type) { + case *api.Secret: + val = RawField(data, field) + case map[string]interface{}: + val = data[field] + } + + if val == nil { + ui.Error(fmt.Sprintf("Field %q not present in secret", field)) + return 1 + } + + format := Format(ui) + if format == "" || format == "table" || format == "raw" { + return PrintRaw(ui, fmt.Sprintf("%v", val)) + } + + // Handle specific format flags as best as possible + formatter, ok := Formatters[format] + if !ok { + ui.Error(fmt.Sprintf("Invalid output format: %s", format)) + return 1 + } + + b, err := formatter.Format(val) + if err != nil { + ui.Error(fmt.Sprintf("Error formatting output: %s", err)) + return 1 + } + + return PrintRaw(ui, string(b)) +} + +// PrintRaw prints a raw value to the terminal. If the process is being "piped" +// to something else, the "raw" value is printed without a newline character. +// Otherwise the value is printed as normal. +func PrintRaw(ui cli.Ui, str string) int { + if !color.NoColor { + ui.Output(str) + } else { + // The cli.Ui prints a CR, which is not wanted since the user probably wants + // just the raw value. + w := getWriterFromUI(ui) + fmt.Fprint(w, str) + } + return 0 +} + +// getWriterFromUI accepts a cli.Ui and returns the underlying io.Writer by +// unwrapping as many wrapped Uis as necessary. If there is an unknown UI +// type, this falls back to os.Stdout. +func getWriterFromUI(ui cli.Ui) io.Writer { + switch t := ui.(type) { + case *VaultUI: + return getWriterFromUI(t.Ui) + case *cli.BasicUi: + return t.Writer + case *cli.ColoredUi: + return getWriterFromUI(t.Ui) + case *cli.ConcurrentUi: + return getWriterFromUI(t.Ui) + case *cli.MockUi: + return t.OutputWriter + default: + return os.Stdout + } +} diff --git a/command/version.go b/command/version.go new file mode 100644 index 0000000..e9b1722 --- /dev/null +++ b/command/version.go @@ -0,0 +1,66 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + + "github.com/hashicorp/vault/version" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*VersionCommand)(nil) + _ cli.CommandAutocomplete = (*VersionCommand)(nil) +) + +// VersionCommand is a Command implementation prints the version. +type VersionCommand struct { + *BaseCommand + + VersionInfo *version.VersionInfo +} + +func (c *VersionCommand) Synopsis() string { + return "Prints the Vault CLI version" +} + +func (c *VersionCommand) Help() string { + helpText := ` +Usage: vault version + + Prints the version of this Vault CLI. This does not print the target Vault + server version. + + Print the version: + + $ vault version + + There are no arguments or flags to this command. Any additional arguments or + flags are ignored. +` + return strings.TrimSpace(helpText) +} + +func (c *VersionCommand) Flags() *FlagSets { + return nil +} + +func (c *VersionCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *VersionCommand) AutocompleteFlags() complete.Flags { + return nil +} + +func (c *VersionCommand) Run(_ []string) int { + out := c.VersionInfo.FullVersionNumber(true) + if version.CgoEnabled { + out += " (cgo)" + } + c.UI.Output(out) + return 0 +} diff --git a/command/version_history.go b/command/version_history.go new file mode 100644 index 0000000..b2dfbae --- /dev/null +++ b/command/version_history.go @@ -0,0 +1,134 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "strings" + + "github.com/mitchellh/cli" + "github.com/posener/complete" + "github.com/ryanuber/columnize" +) + +var ( + _ cli.Command = (*VersionHistoryCommand)(nil) + _ cli.CommandAutocomplete = (*VersionHistoryCommand)(nil) +) + +// VersionHistoryCommand is a Command implementation prints the version. +type VersionHistoryCommand struct { + *BaseCommand +} + +func (c *VersionHistoryCommand) Synopsis() string { + return "Prints the version history of the target Vault server" +} + +func (c *VersionHistoryCommand) Help() string { + helpText := ` +Usage: vault version-history + + Prints the version history of the target Vault server. + + Print the version history: + + $ vault version-history +` + c.Flags().Help() + return strings.TrimSpace(helpText) +} + +func (c *VersionHistoryCommand) Flags() *FlagSets { + return c.flagSet(FlagSetOutputFormat) +} + +func (c *VersionHistoryCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictNothing +} + +func (c *VersionHistoryCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +const versionTrackingWarning = `Note: +Use of this command requires a server running Vault 1.10.0 or greater. +Version tracking was added in 1.9.0. Earlier versions have not been tracked. +` + +func (c *VersionHistoryCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + resp, err := client.Logical().List("sys/version-history") + if err != nil { + c.UI.Error(fmt.Sprintf("Error reading version history: %s", err)) + return 2 + } + + if resp == nil || resp.Data == nil { + c.UI.Error("Invalid response returned from Vault") + return 2 + } + + if c.flagFormat == "json" { + c.UI.Warn("") + c.UI.Warn(versionTrackingWarning) + c.UI.Warn("") + + return OutputData(c.UI, resp) + } + + var keyInfo map[string]interface{} + + keys, ok := extractListData(resp) + if !ok { + c.UI.Error("Expected keys in response to be an array") + return 2 + } + + keyInfo, ok = resp.Data["key_info"].(map[string]interface{}) + if !ok { + c.UI.Error("Expected key_info in response to be a map") + return 2 + } + + table := []string{"Version | Installation Time | Build Date"} + columnConfig := columnize.DefaultConfig() + + for _, versionRaw := range keys { + version, ok := versionRaw.(string) + + if !ok { + c.UI.Error("Expected version to be string") + return 2 + } + + versionInfoRaw := keyInfo[version] + + versionInfo, ok := versionInfoRaw.(map[string]interface{}) + if !ok { + c.UI.Error(fmt.Sprintf("Expected version info for %q to be map", version)) + return 2 + } + + table = append(table, fmt.Sprintf("%s | %s | %s", version, versionInfo["timestamp_installed"], versionInfo["build_date"])) + } + + c.UI.Warn("") + c.UI.Warn(versionTrackingWarning) + c.UI.Warn("") + c.UI.Output(tableOutput(table, columnConfig)) + + return 0 +} diff --git a/command/version_history_test.go b/command/version_history_test.go new file mode 100644 index 0000000..c011a4b --- /dev/null +++ b/command/version_history_test.go @@ -0,0 +1,114 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "bytes" + "encoding/json" + "strings" + "testing" + + "github.com/hashicorp/vault/version" + "github.com/mitchellh/cli" +) + +func testVersionHistoryCommand(tb testing.TB) (*cli.MockUi, *VersionHistoryCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &VersionHistoryCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestVersionHistoryCommand_TableOutput(t *testing.T) { + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testVersionHistoryCommand(t) + cmd.client = client + + code := cmd.Run([]string{}) + + if expectedCode := 0; code != expectedCode { + t.Fatalf("expected %d to be %d: %s", code, expectedCode, ui.ErrorWriter.String()) + } + + if errorString := ui.ErrorWriter.String(); !strings.Contains(errorString, versionTrackingWarning) { + t.Errorf("expected %q to contain %q", errorString, versionTrackingWarning) + } + + output := ui.OutputWriter.String() + + if !strings.Contains(output, version.Version) { + t.Errorf("expected %q to contain version %q", output, version.Version) + } +} + +func TestVersionHistoryCommand_JsonOutput(t *testing.T) { + client, closer := testVaultServer(t) + defer closer() + + stdout := bytes.NewBuffer(nil) + stderr := bytes.NewBuffer(nil) + runOpts := &RunOptions{ + Stdout: stdout, + Stderr: stderr, + Client: client, + } + + args, format, _, _, _ := setupEnv([]string{"version-history", "-format", "json"}) + if format != "json" { + t.Fatalf("expected format to be %q, actual %q", "json", format) + } + + code := RunCustom(args, runOpts) + + if expectedCode := 0; code != expectedCode { + t.Fatalf("expected %d to be %d: %s", code, expectedCode, stderr.String()) + } + + if stderrString := stderr.String(); !strings.Contains(stderrString, versionTrackingWarning) { + t.Errorf("expected %q to contain %q", stderrString, versionTrackingWarning) + } + + stdoutBytes := stdout.Bytes() + + if !json.Valid(stdoutBytes) { + t.Fatalf("expected output %q to be valid JSON", stdoutBytes) + } + + var versionHistoryResp map[string]interface{} + err := json.Unmarshal(stdoutBytes, &versionHistoryResp) + if err != nil { + t.Fatalf("failed to unmarshal json from STDOUT, err: %s", err.Error()) + } + + var respData map[string]interface{} + var ok bool + var keys []interface{} + var keyInfo map[string]interface{} + + if respData, ok = versionHistoryResp["data"].(map[string]interface{}); !ok { + t.Fatalf("expected data key to be map, actual: %#v", versionHistoryResp["data"]) + } + + if keys, ok = respData["keys"].([]interface{}); !ok { + t.Fatalf("expected keys to be array, actual: %#v", respData["keys"]) + } + + if keyInfo, ok = respData["key_info"].(map[string]interface{}); !ok { + t.Fatalf("expected key_info to be map, actual: %#v", respData["key_info"]) + } + + if len(keys) != 1 { + t.Fatalf("expected single version history entry for %q", version.Version) + } + + if keyInfo[version.Version] == nil { + t.Fatalf("expected version %s to be present in key_info, actual: %#v", version.Version, keyInfo) + } +} diff --git a/command/version_test.go b/command/version_test.go new file mode 100644 index 0000000..ede21e6 --- /dev/null +++ b/command/version_test.go @@ -0,0 +1,56 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "strings" + "testing" + + "github.com/hashicorp/vault/version" + "github.com/mitchellh/cli" +) + +func testVersionCommand(tb testing.TB) (*cli.MockUi, *VersionCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &VersionCommand{ + VersionInfo: &version.VersionInfo{}, + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestVersionCommand_Run(t *testing.T) { + t.Parallel() + + t.Run("output", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testVersionCommand(t) + cmd.client = client + + code := cmd.Run(nil) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Vault" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to equal %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testVersionCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/command/write.go b/command/write.go new file mode 100644 index 0000000..2cc93b8 --- /dev/null +++ b/command/write.go @@ -0,0 +1,185 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*WriteCommand)(nil) + _ cli.CommandAutocomplete = (*WriteCommand)(nil) +) + +// MFAMethodInfo contains the information about an MFA method +type MFAMethodInfo struct { + methodID string + methodType string + usePasscode bool +} + +// WriteCommand is a Command that puts data into the Vault. +type WriteCommand struct { + *BaseCommand + + flagForce bool + + testStdin io.Reader // for tests +} + +func (c *WriteCommand) Synopsis() string { + return "Write data, configuration, and secrets" +} + +func (c *WriteCommand) Help() string { + helpText := ` +Usage: vault write [options] PATH [DATA K=V...] + + Writes data to Vault at the given path. The data can be credentials, secrets, + configuration, or arbitrary data. The specific behavior of this command is + determined at the thing mounted at the path. + + Data is specified as "key=value" pairs. If the value begins with an "@", then + it is loaded from a file. If the value is "-", Vault will read the value from + stdin. + + Persist data in the generic secrets engine: + + $ vault write secret/my-secret foo=bar + + Create a new encryption key in the transit secrets engine: + + $ vault write -f transit/keys/my-key + + Upload an AWS IAM policy from a file on disk: + + $ vault write aws/roles/ops policy=@policy.json + + Configure access to Consul by providing an access token: + + $ echo $MY_TOKEN | vault write consul/config/access token=- + + For a full list of examples and paths, please see the documentation that + corresponds to the secret engines in use. + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *WriteCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputField | FlagSetOutputFormat) + f := set.NewFlagSet("Command Options") + + f.BoolVar(&BoolVar{ + Name: "force", + Aliases: []string{"f"}, + Target: &c.flagForce, + Default: false, + EnvVar: "", + Completion: complete.PredictNothing, + Usage: "Allow the operation to continue with no key=value pairs. This " + + "allows writing to keys that do not need or expect data.", + }) + + return set +} + +func (c *WriteCommand) AutocompleteArgs() complete.Predictor { + // Return an anything predictor here. Without a way to access help + // information, we don't know what paths we could write to. + return complete.PredictAnything +} + +func (c *WriteCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *WriteCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) == 1 && !c.flagForce: + c.UI.Error("Must supply data or use -force") + return 1 + } + + // Pull our fake stdin if needed + stdin := (io.Reader)(os.Stdin) + if c.testStdin != nil { + stdin = c.testStdin + } + + path := sanitizePath(args[0]) + + data, err := parseArgsData(stdin, args[1:]) + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to parse K=V data: %s", err)) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + secret, err := client.Logical().Write(path, data) + return handleWriteSecretOutput(c.BaseCommand, path, secret, err) +} + +func handleWriteSecretOutput(c *BaseCommand, path string, secret *api.Secret, err error) int { + if err != nil { + c.UI.Error(fmt.Sprintf("Error writing data to %s: %s", path, err)) + if secret != nil { + OutputSecret(c.UI, secret) + } + return 2 + } + if secret == nil { + // Don't output anything unless using the "table" format + if Format(c.UI) == "table" { + c.UI.Info(fmt.Sprintf("Success! Data written to: %s", path)) + } + return 0 + } + + // Currently, if there is only one MFA method configured, the login + // request is validated interactively + methodInfo := c.getInteractiveMFAMethodInfo(secret) + if methodInfo != nil { + secret, err = c.validateMFA(secret.Auth.MFARequirement.MFARequestID, *methodInfo) + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + } else if c.getMFAValidationRequired(secret) { + c.UI.Warn(wrapAtLength("A login request was issued that is subject to "+ + "MFA validation. Please make sure to validate the login by sending another "+ + "request to sys/mfa/validate endpoint.") + "\n") + } + + // Handle single field output + if c.flagField != "" { + return PrintRawField(c.UI, secret, c.flagField) + } + + return OutputSecret(c.UI, secret) +} diff --git a/command/write_test.go b/command/write_test.go new file mode 100644 index 0000000..9b76d39 --- /dev/null +++ b/command/write_test.go @@ -0,0 +1,283 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package command + +import ( + "io" + "strings" + "testing" + + "github.com/hashicorp/vault/api" + "github.com/mitchellh/cli" +) + +func testWriteCommand(tb testing.TB) (*cli.MockUi, *WriteCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &WriteCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestWriteCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "not_enough_args", + []string{}, + "Not enough arguments", + 1, + }, + { + "empty_kvs", + []string{"secret/write/foo"}, + "Must supply data or use -force", + 1, + }, + { + "force_kvs", + []string{"-force", "auth/token/create"}, + "token", + 0, + }, + { + "force_f_kvs", + []string{"-f", "auth/token/create"}, + "token", + 0, + }, + { + "kvs_no_value", + []string{"secret/write/foo", "foo"}, + "Failed to parse K=V data", + 1, + }, + { + "single_value", + []string{"secret/write/foo", "foo=bar"}, + "Success!", + 0, + }, + { + "multi_value", + []string{"secret/write/foo", "foo=bar", "zip=zap"}, + "Success!", + 0, + }, + { + "field", + []string{ + "-field", "token_renewable", + "auth/token/create", "display_name=foo", + }, + "false", + 0, + }, + { + "field_not_found", + []string{ + "-field", "not-a-real-field", + "auth/token/create", "display_name=foo", + }, + "not present in secret", + 1, + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testWriteCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + + t.Run("force", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + if err := client.Sys().Mount("transit/", &api.MountInput{ + Type: "transit", + }); err != nil { + t.Fatal(err) + } + + ui, cmd := testWriteCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-force", + "transit/keys/my-key", + }) + if exp := 0; code != exp { + t.Fatalf("expected %d to be %d: %q", code, exp, ui.ErrorWriter.String()) + } + + secret, err := client.Logical().Read("transit/keys/my-key") + if err != nil { + t.Fatal(err) + } + if secret == nil || secret.Data == nil { + t.Fatal("expected secret to have data") + } + }) + + t.Run("stdin_full", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + stdinR, stdinW := io.Pipe() + go func() { + stdinW.Write([]byte(`{"foo":"bar"}`)) + stdinW.Close() + }() + + _, cmd := testWriteCommand(t) + cmd.client = client + cmd.testStdin = stdinR + + code := cmd.Run([]string{ + "secret/write/stdin_full", "-", + }) + if code != 0 { + t.Fatalf("expected 0 to be %d", code) + } + + secret, err := client.Logical().Read("secret/write/stdin_full") + if err != nil { + t.Fatal(err) + } + if secret == nil || secret.Data == nil { + t.Fatal("expected secret to have data") + } + if exp, act := "bar", secret.Data["foo"].(string); exp != act { + t.Errorf("expected %q to be %q", act, exp) + } + }) + + t.Run("stdin_value", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + stdinR, stdinW := io.Pipe() + go func() { + stdinW.Write([]byte("bar")) + stdinW.Close() + }() + + _, cmd := testWriteCommand(t) + cmd.client = client + cmd.testStdin = stdinR + + code := cmd.Run([]string{ + "secret/write/stdin_value", "foo=-", + }) + if code != 0 { + t.Fatalf("expected 0 to be %d", code) + } + + secret, err := client.Logical().Read("secret/write/stdin_value") + if err != nil { + t.Fatal(err) + } + if secret == nil || secret.Data == nil { + t.Fatal("expected secret to have data") + } + if exp, act := "bar", secret.Data["foo"].(string); exp != act { + t.Errorf("expected %q to be %q", act, exp) + } + }) + + t.Run("integration", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + _, cmd := testWriteCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "secret/write/integration", "foo=bar", "zip=zap", + }) + if code != 0 { + t.Fatalf("expected 0 to be %d", code) + } + + secret, err := client.Logical().Read("secret/write/integration") + if err != nil { + t.Fatal(err) + } + if secret == nil || secret.Data == nil { + t.Fatal("expected secret to have data") + } + if exp, act := "bar", secret.Data["foo"].(string); exp != act { + t.Errorf("expected %q to be %q", act, exp) + } + if exp, act := "zap", secret.Data["zip"].(string); exp != act { + t.Errorf("expected %q to be %q", act, exp) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testWriteCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "foo/bar", "a=b", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error writing data to foo/bar: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testWriteCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/dependencies/2-25-21/deps-upgrade-output.txt b/dependencies/2-25-21/deps-upgrade-output.txt new file mode 100644 index 0000000..52eea40 --- /dev/null +++ b/dependencies/2-25-21/deps-upgrade-output.txt @@ -0,0 +1,17 @@ +hridoyroy@Hridoys-MBP vault % python3 deps_upgrade.py dep.txt +github.com/satori/go.uuid +golang.org/x/text +github.com/hashicorp/go-gcp-common +github.com/hashicorp/vault-plugin-secrets-azure +go.mongodb.org/mongo-driver +github.com/Microsoft/hcsshim +package github.com/Microsoft/hcsshim + imports github.com/Microsoft/go-winio/pkg/guid + imports golang.org/x/sys/windows: build constraints exclude all Go files in /Users/hridoyroy/go/pkg/mod/golang.org/x/sys@v0.0.0-20210124154548-22da62e12c0c/windows +golang.org/x/crypto +github.com/containerd/containerd +github.com/aws/aws-sdk-go +github.com/hashicorp/serf +github.com/miekg/dns +github.com/hashicorp/go-discover +github.com/hashicorp/serf \ No newline at end of file diff --git a/dependencies/2-25-21/deps-upgrade.txt b/dependencies/2-25-21/deps-upgrade.txt new file mode 100644 index 0000000..64c7dae --- /dev/null +++ b/dependencies/2-25-21/deps-upgrade.txt @@ -0,0 +1,12 @@ +golang.org/x/text +github.com/hashicorp/go-gcp-common +github.com/hashicorp/vault-plugin-secrets-azure +go.mongodb.org/mongo-driver +github.com/Microsoft/hcsshim +golang.org/x/crypto +github.com/containerd/containerd +github.com/aws/aws-sdk-go +github.com/hashicorp/serf +github.com/miekg/dns +github.com/hashicorp/go-discover +github.com/hashicorp/serf \ No newline at end of file diff --git a/enos/Makefile b/enos/Makefile new file mode 100644 index 0000000..d1f9334 --- /dev/null +++ b/enos/Makefile @@ -0,0 +1,35 @@ +.PHONY: default +default: check-fmt shellcheck + +.PHONY: check-fmt +check-fmt: check-fmt-enos check-fmt-modules + +.PHONY: fmt +fmt: fmt-enos fmt-modules + +.PHONY: check-fmt-enos +check-fmt-enos: + enos fmt --check --diff . + +.PHONY: fmt-enos +fmt-enos: + enos fmt . + +.PHONY: check-fmt-modules +check-fmt-modules: + terraform fmt -check -diff -recursive ./modules + +.PHONY: fmt-modules +fmt-modules: + terraform fmt -diff -recursive ./modules + +.PHONY: validate-enos +validate-enos: + enos scenario validate --timeout 30m0s + +.PHONY: lint +lint: check-fmt shellcheck validate-enos + +.PHONY: shellcheck +shellcheck: + find ./modules/ -type f -name '*.sh' | xargs shellcheck diff --git a/enos/README.md b/enos/README.md new file mode 100644 index 0000000..a33f4ab --- /dev/null +++ b/enos/README.md @@ -0,0 +1,279 @@ +# Enos + +Enos is an quality testing framework that allows composing and executing quality +requirement scenarios as code. For Vault, it is currently used to perform +infrastructure integration testing using the artifacts that are created as part +of the `build` workflow. While intended to be executed via Github Actions using +the results of the `build` workflow, scenarios are also executable from a developer +machine that has the requisite dependencies and configuration. + +Refer to the [Enos documentation](https://github.com/hashicorp/Enos-Docs) +for further information regarding installation, execution or composing Enos scenarios. + +## When to use Enos +Determining whether to use `vault.NewTestCluster()` or Enos for testing a feature +or scenario is ultimately up to the author. Sometimes one, the other, or both +might be appropriate depending on the requirements. Generally, `vault.NewTestCluster()` +is going to give you faster feedback and execution time, whereas Enos is going +to give you a real-world execution and validation of the requirement. Consider +the following cases as examples of when one might opt for an Enos scenario: + +* The feature require third-party integrations. Whether that be networked + dependencies like a real Consul backend, a real KMS key to test awskms + auto-unseal, auto-join discovery using AWS tags, or Cloud hardware KMS's. +* The feature might behave differently under multiple configuration variants + and therefore should be tested with both combinations, e.g. auto-unseal and + manual shamir unseal or replication in HA mode with integrated storage or + Consul storage. +* The scenario requires coordination between multiple targets. For example, + consider the complex lifecycle event of migrating the seal type or storage, + or manually triggering a raft disaster scenario by partitioning the network + between the leader and follower nodes. Or perhaps an auto-pilot upgrade between + a stable version of Vault and our candidate version. +* The scenario has specific deployment strategy requirements. For example, + if we want to add a regression test for an issue that only arises when the + software is deployed in a certain manner. +* The scenario needs to use actual build artifacts that will be promoted + through the pipeline. + +## Requirements +* AWS access. HashiCorp Vault developers should use Doormat. +* Terraform >= 1.2 +* Enos >= v0.0.10. You can [install it from a release channel](https://github.com/hashicorp/Enos-Docs/blob/main/installation.md). +* Access to the QTI org in Terraform Cloud. HashiCorp Vault developers can + access a shared token in 1Password or request their own in #team-quality on + Slack. +* An SSH keypair in the AWS region you wish to run the scenario. You can use + Doormat to log in to the AWS console to create or upload an existing keypair. +* A Vault artifact is downloaded from the GHA artifacts when using the `artifact_source:crt` variants, from Artifactory when using `artifact_source:artifactory`, and is built locally from the current branch when using `artifact_source:local` variant. + +## Scenario Variables +In CI, each scenario is executed via Github Actions and has been configured using +environment variable inputs that follow the `ENOS_VAR_varname` pattern. + +For local execution you can specify all the required variables using environment +variables, or you can update `enos.vars.hcl` with values and uncomment the lines. + +Variables that are required: +* `aws_ssh_keypair_name` +* `aws_ssh_private_key_path` +* `tfc_api_token` +* `vault_bundle_path` +* `vault_license_path` (only required for non-OSS editions) + +See [enos.vars.hcl](./enos.vars.hcl) or [enos-variables.hcl](./enos-variables.hcl) +for further descriptions of the variables. + +## Executing Scenarios +From the `enos` directory: + +```bash +# List all available scenarios +enos scenario list +# Run the smoke or upgrade scenario with an artifact that is built locally. Make sure +# the local machine has been configured as detailed in the requirements +# section. This will execute the scenario and clean up any resources if successful. +enos scenario run smoke artifact_source:local +enos scenario run upgrade artifact_source:local +# To run the same scenario variants that are run in CI, refer to the scenarios listed +# in json files under .github/enos-run-matrices directory, +# adding `artifact_source:local` to run locally. +enos scenario run smoke backend:consul consul_version:1.12.3 distro:ubuntu seal:awskms artifact_source:local arch:amd64 edition:oss +# Launch an individual scenario but leave infrastructure up after execution +enos scenario launch smoke artifact_source:local +# Check an individual scenario for validity. This is useful during scenario +# authoring and debugging. +enos scenario validate smoke artifact_source:local +# If you've run the tests and desire to see the outputs, such as the URL or +# credentials, you can run the output command to see them. Please note that +# after "run" or destroy there will be no "outputs" as the infrastructure +# will have been destroyed and state cleared. +enos scenario output smoke artifact_source:local +# Explicitly destroy all existing infrastructure +enos scenario destroy smoke artifact_source:local +``` + +Refer to the [Enos documentation](https://github.com/hashicorp/Enos-Docs) +for further information regarding installation, execution or composing scenarios. + +# Scenarios +There are current two scenarios: `smoke` and `upgrade`. Both begin by building Vault +as specified by the selected `artifact_source` variant (see Variants section below for more +information). + +## Smoke +The [`smoke` scenario](./enos-scenario-smoke.hcl) creates a Vault cluster using +the version from the current branch (either in CI or locally), with the backend +specified by the `backend` variant (`raft` or `consul`). Next, it unseals with the +appropriate method (`awskms` or `shamir`) and performs different verifications +depending on the backend and seal type. + +## Upgrade +The [`upgrade` scenario](./enos-scenario-upgrade.hcl) creates a Vault cluster using +the version specified in `vault_upgrade_initial_release`, with the backend specified +by the `backend` variant (`raft` or `consul`). Next, it upgrades the Vault binary +that is determined by the `artifact_source` variant. After the upgrade, it verifies that +cluster is at the desired version, along with additional verifications. + + +## Autopilot +The [`autopilot` scenario](./enos-scenario-autopilot.hcl) creates a Vault cluster using +the version specified in `vault_upgrade_initial_release`. It writes test data to the Vault cluster. Next, it creates additional nodes with the candidate version of Vault as determined by the `vault_product_version` variable set. +The module uses AWS auto-join to handle discovery and unseals with auto-unseal +or Shamir depending on the `seal` variant. After the new nodes have joined and been +unsealed, it verifies reading stored data on the new nodes. Autopilot upgrade verification checks the upgrade status is "await-server-removal" and the target version is set to the version of upgraded nodes. This test also verifies the undo_logs status for Vault versions 1.13.x + +## Replication +The [`replication` scenario](./enos-scenario-replication.hcl) creates two 3-node Vault clusters and runs following verification steps: + + 1. Writes data on the primary cluster + 1. Enables performance replication + 1. Verifies reading stored data from secondary cluster + 1. Verifies initial replication status between both clusters + 1. Replaces the leader node and one standby node on the primary Vault cluster + 1. Verifies updated replication status between both clusters + + This scenario verifies the performance replication status on both clusters to have their connection_status as "connected" and that the secondary cluster has known_primaries cluster addresses updated to the active nodes IP addresses of the primary Vault cluster. This scenario currently works around issues VAULT-12311 and VAULT-12309. The scenario fails when the primary storage backend is Consul due to issue VAULT-12332 + +## UI Tests +The [`ui` scenario](./enos-scenario-ui.hcl) creates a Vault cluster (deployed to AWS) using a version +built from the current checkout of the project. Once the cluster is available the UI acceptance tests +are run in a headless browser. +### Variables +In addition to the required variables that must be set, as described in the [Scenario Variables](#Scenario Variables), +the `ui` scenario has two optional variables: + +**ui_test_filter** - An optional test filter to limit the tests that are run, i.e. `'!enterprise'`. +To set a filter export the variable as follows: +```shell +> export ENOS_VAR_ui_test_filter="some filter" +``` +**ui_run_tests** - An optional boolean variable to run or not run the tests. The default value is true. +Setting this value to false is useful in the case where you want to create a cluster, but run the tests +manually. The section [Running the Tests](#Running the Tests) describes the different ways to run the +'UI' acceptance tests. + +### Running the Tests +The UI tests can be run fully automated or manually. +#### Fully Automated +The following will deploy the cluster, run the tests, and subsequently tear down the cluster: +```shell +> export ENOS_VAR_ui_test_filter="some filter" # <-- optional +> cd enos +> enos scenario ui run edition:oss +``` +#### Manually +The UI tests can be run manually as follows: +```shell +> export ENOS_VAR_ui_test_filter="some filter" # <-- optional +> export ENOS_VAR_ui_run_tests=false +> cd enos +> enos scenario ui launch edition:oss +# once complete the scenario will output a set of environment variables that must be exported. The +# output will look as follows: +export TEST_FILTER='some filter>' \ +export VAULT_ADDR='http://:8200' \ +export VAULT_TOKEN='' \ +export VAULT_UNSEAL_KEYS='["","",""]' +# copy and paste the above into the terminal to export the values +> cd ../ui +> yarn test:enos # run headless +# or +> yarn test:enos -s # run manually in a web browser +# once testing is complete +> cd ../enos +> enos scenario ui destroy edition:oss +``` + +# Variants +Both scenarios support a matrix of variants. In order to achieve broad coverage while +keeping test run time reasonable, the variants executed by the `enos-run` Github +Actions are tailored to maximize variant distribution per scenario. + +## `artifact_source:crt` +This variant is designed for use in Github Actions. The `enos-run.yml` workflow +downloads the artifact built by the `build.yml` workflow, unzips it, and sets the +`vault_bundle_path` to the zip file and the `vault_local_binary_path` to the binary. + +## `artifact_source:local` +This variant is for running the Enos scenario locally. It builds the Vault bundle +from the current branch, placing the bundle at the `vault_bundle_path` and the +unzipped Vault binary at the `vault_local_binary_path`. + +## `artifact_source:artifactory` +This variant is for running the Enos scenario to test an artifact from Artifactory. It requires following Enos variables to be set: +* `artifactory_username` +* `artifactory_token` +* `aws_ssh_keypair_name` +* `aws_ssh_private_key_path` +* `tfc_api_token` +* `vault_product_version` +* `vault_revision` + +# CI Bootstrap +In order to execute any of the scenarios in this repository, it is first necessary to bootstrap the +CI AWS account with the required permissions, service quotas and supporting AWS resources. There are +two Terraform modules which are used for this purpose, [service-user-iam](./ci/service-user-iam) for +the account permissions, and service quotas and [bootstrap](./ci/bootstrap) for the supporting resources. + +**Supported Regions** - enos scenarios are supported in the following regions: +`"us-east-1", "us-east-2", "us-west-1", "us-west-2"` + +## Bootstrap Process +These steps should be followed to bootstrap this repo for enos scenario execution: + +### Set up CI service user IAM role and Service Quotas +The service user that is used when executing enos scenarios from any GitHub Action workflow must have +a properly configured IAM role granting the access required to create resources in AWS. Additionally, +service quotas need to be adjusted to ensure that normal use of the ci account does not cause any +service quotas to be exceeded. The [service-user-iam](./ci/service-user-iam) module contains the IAM +Policy and Role for that grants this access as well as the service quota increase requests to adjust +the service quotas. This module should be updated whenever a new AWS resource type is required for a +scenario or a service quota limit needs to be increased. Since this is persistent and cannot be created +and destroyed each time a scenario is run, the Terraform state will be managed by Terraform Cloud. +Here are the steps to configure the GitHub Actions service user: + +#### Pre-requisites +- Access to the `hashicorp-qti` organization in Terraform Cloud. +- Full access to the CI AWS account is required. + +**Notes:** +- For help with access to Terraform Cloud and the CI Account, contact the QT team on Slack (#team-quality) + for an invite. After receiving an invite to Terraform Cloud, a personal access token can be created + by clicking `User Settings` --> `Tokens` --> `Create an API token`. +- Access to the AWS account can be done via Doormat, at: https://doormat.hashicorp.services/. + - For the vault repo the account is: `vault_ci` and for the vault-enterprise repo, the account is: + `vault-enterprise_ci`. + - Access can be requested by clicking: `Cloud Access` --> `AWS` --> `Request Account Access`. + +1. **Create the Terraform Cloud Workspace** - The name of the workspace to be created depends on the + repository for which it is being created, but the pattern is: `-ci-service-user-iam`, + e.g. `vault-ci-service-user-iam`. It is important that the execution mode for the workspace be set + to `local`. For help on setting up the workspace, contact the QT team on Slack (#team-quality) + + +2. **Execute the Terraform module** +```shell +> cd ./enos/ci/service-user-iam +> export TF_WORKSPACE=-ci-service-user-iam +> export TF_TOKEN_app_terraform_io= +> export TF_VAR_repository= +> terraform init +> terraform plan +> terraform apply -auto-approve +``` + +### Bootstrap the CI resources +Bootstrapping of the resources in the CI account is accomplished via the GitHub Actions workflow: +[enos-bootstrap-ci](../.github/workflows/enos-bootstrap-ci.yml). Before this workflow can be run a +workspace must be created as follows: + +1. **Create the Terraform Cloud Workspace** - The name workspace to be created depends on the repository + for which it is being created, but the pattern is: `-ci-bootstrap`, e.g. + `vault-ci-bootstrap`. It is important that the execution mode for the workspace be set to + `local`. For help on setting up the workspace, contact the QT team on Slack (#team-quality). + +Once the workspace has been created, changes to the bootstrap module will automatically be applied via +the GitHub PR workflow. Each time a PR is created for changes to files within that module the module +will be planned via the workflow described above. If the plan is ok and the PR is merged, the module +will automatically be applied via the same workflow. diff --git a/enos/ci/aws-nuke.yml b/enos/ci/aws-nuke.yml new file mode 100644 index 0000000..50a5677 --- /dev/null +++ b/enos/ci/aws-nuke.yml @@ -0,0 +1,398 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +regions: +- eu-north-1 +- ap-south-1 +- eu-west-3 +- eu-west-2 +- eu-west-1 +- ap-northeast-3 +- ap-northeast-2 +- ap-northeast-1 +- sa-east-1 +- ca-central-1 +- ap-southeast-1 +- ap-southeast-2 +- eu-central-1 +- us-east-1 +- us-east-2 +- us-west-1 +- us-west-2 +- global + +account-blocklist: + - 1234567890 + +accounts: + # replaced in CI + ACCOUNT_NUM: + presets: + - default + - olderthan + - honeybee + - enos + +presets: + default: + # Ignores default VPC resources + filters: + EC2VPC: + - property: IsDefault + value: "true" + EC2RouteTable: + - property: DefaultVPC + value: "true" + EC2DHCPOption: + - property: DefaultVPC + value: "true" + EC2InternetGateway: + - property: DefaultVPC + value: "true" + EC2Subnet: + - property: DefaultVPC + value: "true" + EC2InternetGatewayAttachment: + - property: DefaultVPC + value: "true" + olderthan: + # Filters resources by age (when available) + # TIME_LIMIT replaced in CI + filters: + EC2Instance: + - property: LaunchTime + type: dateOlderThan + value: "TIME_LIMIT" + EC2NetworkACL: + EC2RouteTable: + EC2SecurityGroup: + EC2Subnet: + EC2Volume: + EC2VPC: + - property: tag:cloud-nuke-first-seen + type: dateOlderThan + value: "TIME_LIMIT" + ELBv2: + - property: tag:cloud-nuke-first-seen + type: dateOlderThan + value: "TIME_LIMIT" + ELBv2TargetGroup: + EC2NetworkInterface: + EC2InternetGateway: + EC2InternetGatewayAttachment: + RDSInstance: + - property: InstanceCreateTime + type: dateOlderThan + value: "TIME_LIMIT" + + honeybee: + # Cloudsec + filters: + IAMRole: + - property: tag:hc-config-as-code + value: "honeybee" + IAMRolePolicy: + - property: tag:role:hc-config-as-code + value: "honeybee" + IAMRolePolicyAttachment: + - property: tag:role:hc-config-as-code + value: "honeybee" + + enos: + # Existing CI to be cleaned up later + filters: + LambdaFunction: + - property: Name + value: "enos_cleanup" + IAMRole: + - property: Name + type: glob + value: "github_actions-*" + - property: Name + value: "rds-monitoring-role" + IAMRolePolicy: + - property: role:RoleName + type: glob + value: "github_actions*" + - property: role:RoleName + type: glob + value: "rds-*" + IAMRolePolicyAttachment: + - "rds-monitoring-role -> AmazonRDSEnhancedMonitoringRole" + IAMUserPolicy: + - "github_actions-vault_ci -> AssumeServiceUserRole" + - "github_actions-vault_enterprise_ci -> AssumeServiceUserRole" + +resource-types: + # Run against everything, excluding these: + excludes: + # Avoid cloudsec things + - IAMUser + - IAMPolicy + - IAMUserAccessKey + - S3Object + - S3Bucket + - EC2KeyPair + - CloudWatchEventsTarget + - CloudWatchEventsRule + - CloudWatchLogsLogGroup + - ConfigServiceConfigurationRecorder + - ConfigServiceConfigRule + - ConfigServiceDeliveryChannel + - CloudTrailTrail + - RDSSnapshot + - RDSClusterSnapshot + - WAFWebACL + - WAFv2WebACL + - WAFRegionalWebACL + - GuardDutyDetector + + # Unused services, filtering these speeds up runs and + # removes errors about things we don't have enabled + - ACMCertificate + - ACMPCACertificateAuthority + - ACMPCACertificateAuthorityState + - AMGWorkspace + - AMPWorkspace + - APIGatewayAPIKey + - APIGatewayClientCertificate + - APIGatewayDomainName + - APIGatewayRestAPI + - APIGatewayUsagePlan + - APIGatewayV2API + - APIGatewayV2VpcLink + - APIGatewayVpcLink + - AWS::AppFlow::ConnectorProfile + - AWS::AppFlow::Flow + - AWS::AppRunner::Service + - AWS::ApplicationInsights::Application + - AWS::Backup::Framework + - AWS::MWAA::Environment + - AWS::NetworkFirewall::Firewall + - AWS::NetworkFirewall::FirewallPolicy + - AWS::NetworkFirewall::RuleGroup + - AWS::Synthetics::Canary + - AWS::Timestream::Database + - AWS::Timestream::ScheduledQuery + - AWS::Timestream::Table + - AWS::Transfer::Workflow + - AWSBackupPlan + - AWSBackupRecoveryPoint + - AWSBackupSelection + - AWSBackupVault + - AWSBackupVaultAccessPolicy + - AccessAnalyzer + - AppMeshMesh + - AppMeshRoute + - AppMeshVirtualGateway + - AppMeshVirtualNode + - AppMeshVirtualRouter + - AppMeshVirtualService + - AppStreamDirectoryConfig + - AppStreamFleet + - AppStreamFleetState + - AppStreamImage + - AppStreamImageBuilder + - AppStreamImageBuilderWaiter + - AppStreamStack + - AppStreamStackFleetAttachment + - AppSyncGraphqlAPI + - ApplicationAutoScalingScalableTarget + - ArchiveRule + - AthenaNamedQuery + - AthenaWorkGroup + - BatchComputeEnvironment + - BatchComputeEnvironmentState + - BatchJobQueue + - BatchJobQueueState + - BillingCostandUsageReport + - Budget + - Cloud9Environment + - CloudDirectoryDirectory + - CloudDirectorySchema + - CodeArtifactDomain + - CodeArtifactRepository + - CodeBuildProject + - CodeCommitRepository + - CodeDeployApplication + - CodePipelinePipeline + - CodeStarConnection + - CodeStarNotificationRule + - CodeStarProject + - CognitoIdentityPool + - CognitoIdentityProvider + - CognitoUserPool + - CognitoUserPoolClient + - CognitoUserPoolDomain + - ComprehendDocumentClassifier + - ComprehendDominantLanguageDetectionJob + - ComprehendEndpoint + - ComprehendEntitiesDetectionJob + - ComprehendEntityRecognizer + - ComprehendKeyPhrasesDetectionJob + - ComprehendSentimentDetectionJob + - ConfigServiceConfigRule + - ConfigServiceConfigurationRecorder + - ConfigServiceDeliveryChannel + - DAXCluster + - DAXParameterGroup + - DAXSubnetGroup + - DataPipelinePipeline + - DatabaseMigrationServiceCertificate + - DatabaseMigrationServiceEndpoint + - DatabaseMigrationServiceEventSubscription + - DatabaseMigrationServiceReplicationInstance + - DatabaseMigrationServiceReplicationTask + - DatabaseMigrationServiceSubnetGroup + - DeviceFarmProject + - DirectoryServiceDirectory + - EC2ClientVpnEndpointAttachment + - EC2ClientVpnEndpoint + - EC2DefaultSecurityGroupRule + - FMSNotificationChannel + - FMSPolicy + - FSxBackup + - FSxFileSystem + - FirehoseDeliveryStream + - GlobalAccelerator + - GlobalAcceleratorEndpointGroup + - GlobalAcceleratorListener + - GlueClassifier + - GlueConnection + - GlueCrawler + - GlueDatabase + - GlueDevEndpoint + - GlueJob + - GlueTrigger + - Inspector2 + - InspectorAssessmentRun + - InspectorAssessmentTarget + - InspectorAssessmentTemplate + - IoTAuthorizer + - IoTCACertificate + - IoTCertificate + - IoTJob + - IoTOTAUpdate + - IoTPolicy + - IoTRoleAlias + - IoTStream + - IoTThing + - IoTThingGroup + - IoTThingType + - IoTThingTypeState + - IoTTopicRule + - KendraIndex + - KinesisAnalyticsApplication + - KinesisStream + - KinesisVideoProject + - LexBot + - LexIntent + - LexModelBuildingServiceBotAlias + - LexSlotType + - LifecycleHook + - LightsailDisk + - LightsailDomain + - LightsailInstance + - LightsailKeyPair + - LightsailLoadBalancer + - LightsailStaticIP + - MQBroker + - MSKCluster + - MSKConfiguration + - MachineLearningBranchPrediction + - MachineLearningDataSource + - MachineLearningEvaluation + - MachineLearningMLModel + - Macie + - MediaConvertJobTemplate + - MediaConvertPreset + - MediaConvertQueue + - MediaLiveChannel + - MediaLiveInput + - MediaLiveInputSecurityGroup + - MediaPackageChannel + - MediaPackageOriginEndpoint + - MediaStoreContainer + - MediaStoreDataItems + - MediaTailorConfiguration + - MobileProject + - NeptuneCluster + - NeptuneInstance + - NetpuneSnapshot + - OpsWorksApp + - OpsWorksCMBackup + - OpsWorksCMServer + - OpsWorksCMServerState + - OpsWorksInstance + - OpsWorksLayer + - OpsWorksUserProfile + - QLDBLedger + - RoboMakerRobotApplication + - RoboMakerSimulationApplication + - RoboMakerSimulationJob + - SESConfigurationSet + - SESIdentity + - SESReceiptFilter + - SESReceiptRuleSet + - SESTemplate + - SSMActivation + - SSMAssociation + - SSMDocument + - SSMMaintenanceWindow + - SSMParameter + - SSMPatchBaseline + - SSMResourceDataSync + - SageMakerApp + - SageMakerDomain + - SageMakerEndpoint + - SageMakerEndpointConfig + - SageMakerModel + - SageMakerNotebookInstance + - SageMakerNotebookInstanceLifecycleConfig + - SageMakerNotebookInstanceState + - SageMakerUserProfiles + - ServiceCatalogConstraintPortfolioAttachment + - ServiceCatalogPortfolio + - ServiceCatalogPortfolioProductAttachment + - ServiceCatalogPortfolioShareAttachment + - ServiceCatalogPrincipalPortfolioAttachment + - ServiceCatalogProduct + - ServiceCatalogProvisionedProduct + - ServiceCatalogTagOption + - ServiceCatalogTagOptionPortfolioAttachment + - ServiceDiscoveryInstance + - ServiceDiscoveryNamespace + - ServiceDiscoveryService + - SimpleDBDomain + - StorageGatewayFileShare + - StorageGatewayGateway + - StorageGatewayTape + - StorageGatewayVolume + - TransferServer + - TransferServerUser + - WAFRegionalByteMatchSet + - WAFRegionalByteMatchSetIP + - WAFRegionalIPSet + - WAFRegionalIPSetIP + - WAFRegionalRateBasedRule + - WAFRegionalRateBasedRulePredicate + - WAFRegionalRegexMatchSet + - WAFRegionalRegexMatchTuple + - WAFRegionalRegexPatternSet + - WAFRegionalRegexPatternString + - WAFRegionalRule + - WAFRegionalRuleGroup + - WAFRegionalRulePredicate + - WAFRegionalWebACL + - WAFRegionalWebACLRuleAttachment + - WAFRule + - WAFWebACL + - WAFWebACLRuleAttachment + - WAFv2IPSet + - WAFv2RegexPatternSet + - WAFv2RuleGroup + - WAFv2WebACL + - WorkLinkFleet + - WorkSpacesWorkspace + - XRayGroup + - XRaySamplingRule diff --git a/enos/ci/bootstrap/main.tf b/enos/ci/bootstrap/main.tf new file mode 100644 index 0000000..c5ce812 --- /dev/null +++ b/enos/ci/bootstrap/main.tf @@ -0,0 +1,69 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + } + } + + cloud { + hostname = "app.terraform.io" + organization = "hashicorp-qti" + // workspace must be exported in the environment as: TF_WORKSPACE=-ci-enos-boostrap + } +} + +provider "aws" { + region = "us-east-1" + alias = "us_east_1" +} + +provider "aws" { + region = "us-east-2" + alias = "us_east_2" +} + +provider "aws" { + region = "us-west-1" + alias = "us_west_1" +} + +provider "aws" { + region = "us-west-2" + alias = "us_west_2" +} + + +locals { + key_name = "${var.repository}-ci-ssh-key" +} + +resource "aws_key_pair" "enos_ci_key_us_east_1" { + key_name = local.key_name + public_key = var.aws_ssh_public_key + + provider = aws.us_east_1 +} + +resource "aws_key_pair" "enos_ci_key_us_east_2" { + key_name = local.key_name + public_key = var.aws_ssh_public_key + + provider = aws.us_east_2 +} + +resource "aws_key_pair" "enos_ci_key_us_west_1" { + key_name = local.key_name + public_key = var.aws_ssh_public_key + + provider = aws.us_west_1 +} + +resource "aws_key_pair" "enos_ci_key_us_west_2" { + key_name = local.key_name + public_key = var.aws_ssh_public_key + + provider = aws.us_west_2 +} diff --git a/enos/ci/bootstrap/outputs.tf b/enos/ci/bootstrap/outputs.tf new file mode 100644 index 0000000..e6ff372 --- /dev/null +++ b/enos/ci/bootstrap/outputs.tf @@ -0,0 +1,23 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +output "keys" { + value = { + "us-east-1" = { + name = aws_key_pair.enos_ci_key_us_east_1.key_name + arn = aws_key_pair.enos_ci_key_us_east_1.arn + } + "us-east-2" = { + name = aws_key_pair.enos_ci_key_us_east_2.key_name + arn = aws_key_pair.enos_ci_key_us_east_2.arn + } + "us-west-1" = { + name = aws_key_pair.enos_ci_key_us_west_1.key_name + arn = aws_key_pair.enos_ci_key_us_west_1.arn + } + "us-west-2" = { + name = aws_key_pair.enos_ci_key_us_west_2.key_name + arn = aws_key_pair.enos_ci_key_us_west_2.arn + } + } +} diff --git a/enos/ci/bootstrap/variables.tf b/enos/ci/bootstrap/variables.tf new file mode 100644 index 0000000..3fb53bc --- /dev/null +++ b/enos/ci/bootstrap/variables.tf @@ -0,0 +1,16 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +variable "aws_ssh_public_key" { + description = "The public key to use for the ssh key" + type = string +} + +variable "repository" { + description = "The repository to bootstrap the ci for, either 'vault' or 'vault-enterprise'" + type = string + validation { + condition = contains(["vault", "vault-enterprise"], var.repository) + error_message = "Repository must be one of either 'vault' or 'vault-enterprise'" + } +} diff --git a/enos/ci/service-user-iam/main.tf b/enos/ci/service-user-iam/main.tf new file mode 100644 index 0000000..06a1576 --- /dev/null +++ b/enos/ci/service-user-iam/main.tf @@ -0,0 +1,225 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + } + } + + cloud { + hostname = "app.terraform.io" + organization = "hashicorp-qti" + // workspace must be exported in the environment as: TF_WORKSPACE=-ci-enos-service-user-iam + } +} + +locals { + enterprise_repositories = ["vault-enterprise"] + is_ent = contains(local.enterprise_repositories, var.repository) + ci_account_prefix = local.is_ent ? "vault_enterprise" : "vault" + service_user = "github_actions-${local.ci_account_prefix}_ci" + aws_account_id = local.is_ent ? "505811019928" : "040730498200" +} + +resource "aws_iam_role" "role" { + provider = aws.us_east_1 + name = local.service_user + assume_role_policy = data.aws_iam_policy_document.assume_role_policy_document.json +} + +data "aws_iam_policy_document" "assume_role_policy_document" { + provider = aws.us_east_1 + + statement { + effect = "Allow" + actions = ["sts:AssumeRole"] + + principals { + type = "AWS" + identifiers = ["arn:aws:iam::${local.aws_account_id}:user/${local.service_user}"] + } + } +} + +resource "aws_iam_role_policy" "role_policy" { + provider = aws.us_east_1 + role = aws_iam_role.role.name + name = "${local.service_user}_policy" + policy = data.aws_iam_policy_document.role_policy.json +} + +data "aws_iam_policy_document" "role_policy" { + source_policy_documents = [ + data.aws_iam_policy_document.enos_scenario.json, + data.aws_iam_policy_document.aws_nuke.json, + ] +} + +data "aws_iam_policy_document" "aws_nuke" { + provider = aws.us_east_1 + + statement { + effect = "Allow" + actions = [ + "ec2:DescribeInternetGateways", + "ec2:DescribeNatGateways", + "ec2:DescribeRegions", + "ec2:DescribeVpnGateways", + "iam:DeleteAccessKey", + "iam:DeleteUser", + "iam:DeleteUserPolicy", + "iam:GetUser", + "iam:ListAccessKeys", + "iam:ListAccountAliases", + "iam:ListGroupsForUser", + "iam:ListUserPolicies", + "iam:ListUserTags", + "iam:ListUsers", + "iam:UntagUser", + "servicequotas:ListServiceQuotas" + ] + + resources = ["*"] + } +} + +data "aws_iam_policy_document" "enos_scenario" { + provider = aws.us_east_1 + + statement { + effect = "Allow" + actions = [ + "ec2:AssociateRouteTable", + "ec2:AttachInternetGateway", + "ec2:AuthorizeSecurityGroupEgress", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CancelSpotFleetRequests", + "ec2:CancelSpotInstanceRequests", + "ec2:CreateInternetGateway", + "ec2:CreateKeyPair", + "ec2:CreateFleet", + "ec2:CreateLaunchTemplate", + "ec2:CreateLaunchTemplateVersion", + "ec2:CreateRoute", + "ec2:CreateRouteTable", + "ec2:CreateSecurityGroup", + "ec2:CreateSpotDatafeedSubscription", + "ec2:CreateSubnet", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:CreateVPC", + "ec2:DeleteFleets", + "ec2:DeleteInternetGateway", + "ec2:DeleteLaunchTemplate", + "ec2:DeleteLaunchTemplateVersions", + "ec2:DeleteKeyPair", + "ec2:DeleteRoute", + "ec2:DeleteRouteTable", + "ec2:DeleteSecurityGroup", + "ec2:DeleteSpotDatafeedSubscription", + "ec2:DeleteSubnet", + "ec2:DeleteTags", + "ec2:DeleteVolume", + "ec2:DeleteVPC", + "ec2:DescribeAccountAttributes", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeFleets", + "ec2:DescribeFleetHistory", + "ec2:DescribeFleetInstances", + "ec2:DescribeImages", + "ec2:DescribeInstanceAttribute", + "ec2:DescribeInstanceCreditSpecifications", + "ec2:DescribeInstances", + "ec2:DescribeInstanceTypeOfferings", + "ec2:DescribeInstanceTypes", + "ec2:DescribeInternetGateways", + "ec2:DescribeKeyPairs", + "ec2:DescribeLaunchTemplates", + "ec2:DescribeLaunchTemplateVersions", + "ec2:DescribeNatGateways", + "ec2:DescribeNetworkAcls", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeRegions", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSpotDatafeedSubscription", + "ec2:DescribeSpotFleetInstances", + "ec2:DescribeSpotFleetInstanceRequests", + "ec2:DescribeSpotFleetRequests", + "ec2:DescribeSpotFleetRequestHistory", + "ec2:DescribeSpotInstanceRequests", + "ec2:DescribeSpotPriceHistory", + "ec2:DescribeSubnets", + "ec2:DescribeTags", + "ec2:DescribeVolumes", + "ec2:DescribeVpcAttribute", + "ec2:DescribeVpcClassicLink", + "ec2:DescribeVpcClassicLinkDnsSupport", + "ec2:DescribeVpcs", + "ec2:DescribeVpnGateways", + "ec2:DetachInternetGateway", + "ec2:DisassociateRouteTable", + "ec2:GetLaunchTemplateData", + "ec2:GetSpotPlacementScores", + "ec2:ImportKeyPair", + "ec2:ModifyFleet", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyLaunchTemplate", + "ec2:ModifySpotFleetRequest", + "ec2:ModifySubnetAttribute", + "ec2:ModifyVPCAttribute", + "ec2:RequestSpotInstances", + "ec2:RequestSpotFleet", + "ec2:ResetInstanceAttribute", + "ec2:RevokeSecurityGroupEgress", + "ec2:RevokeSecurityGroupIngress", + "ec2:RunInstances", + "ec2:SendSpotInstanceInterruptions", + "ec2:TerminateInstances", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeTargetGroups", + "iam:AddRoleToInstanceProfile", + "iam:AttachRolePolicy", + "iam:CreateInstanceProfile", + "iam:CreatePolicy", + "iam:CreateRole", + "iam:CreateServiceLinkedRole", + "iam:DeleteInstanceProfile", + "iam:DeletePolicy", + "iam:DeleteRole", + "iam:DeleteRolePolicy", + "iam:DetachRolePolicy", + "iam:GetInstanceProfile", + "iam:GetRole", + "iam:GetRolePolicy", + "iam:ListAccountAliases", + "iam:ListAttachedRolePolicies", + "iam:ListInstanceProfiles", + "iam:ListInstanceProfilesForRole", + "iam:ListPolicies", + "iam:ListRolePolicies", + "iam:ListRoles", + "iam:PassRole", + "iam:PutRolePolicy", + "iam:RemoveRoleFromInstanceProfile", + "kms:CreateAlias", + "kms:CreateKey", + "kms:Decrypt", + "kms:DeleteAlias", + "kms:DescribeKey", + "kms:Encrypt", + "kms:GetKeyPolicy", + "kms:GetKeyRotationStatus", + "kms:ListAliases", + "kms:ListKeys", + "kms:ListResourceTags", + "kms:ScheduleKeyDeletion", + "kms:TagResource", + "servicequotas:ListServiceQuotas" + ] + + resources = ["*"] + } +} diff --git a/enos/ci/service-user-iam/outputs.tf b/enos/ci/service-user-iam/outputs.tf new file mode 100644 index 0000000..ba980d5 --- /dev/null +++ b/enos/ci/service-user-iam/outputs.tf @@ -0,0 +1,16 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +output "ci_role" { + value = { + name = aws_iam_role.role.name + arn = aws_iam_role.role.arn + } +} + +output "ci_role_policy" { + value = { + name = aws_iam_role_policy.role_policy.name + policy = aws_iam_role_policy.role_policy.policy + } +} diff --git a/enos/ci/service-user-iam/providers.tf b/enos/ci/service-user-iam/providers.tf new file mode 100644 index 0000000..7baba33 --- /dev/null +++ b/enos/ci/service-user-iam/providers.tf @@ -0,0 +1,22 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + region = "us-east-1" + alias = "us_east_1" +} + +provider "aws" { + region = "us-east-2" + alias = "us_east_2" +} + +provider "aws" { + region = "us-west-1" + alias = "us_west_1" +} + +provider "aws" { + region = "us-west-2" + alias = "us_west_2" +} diff --git a/enos/ci/service-user-iam/service-quotas.tf b/enos/ci/service-user-iam/service-quotas.tf new file mode 100644 index 0000000..3044d41 --- /dev/null +++ b/enos/ci/service-user-iam/service-quotas.tf @@ -0,0 +1,65 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +locals { + // This is the code of the service quota to request a change for. Each adjustable limit has a + // unique code. See, https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/servicequotas_service_quota#quota_code + subnets_per_vpcs_quota = "L-F678F1CE" + standard_spot_instance_requests_quota = "L-34B43A08" +} + +resource "aws_servicequotas_service_quota" "vpcs_per_region_us_east_1" { + provider = aws.us_east_1 + quota_code = local.subnets_per_vpcs_quota + service_code = "vpc" + value = 100 +} + +resource "aws_servicequotas_service_quota" "vpcs_per_region_us_east_2" { + provider = aws.us_east_2 + quota_code = local.subnets_per_vpcs_quota + service_code = "vpc" + value = 100 +} + +resource "aws_servicequotas_service_quota" "vpcs_per_region_us_west_1" { + provider = aws.us_west_1 + quota_code = local.subnets_per_vpcs_quota + service_code = "vpc" + value = 100 +} + +resource "aws_servicequotas_service_quota" "vpcs_per_region_us_west_2" { + provider = aws.us_west_2 + quota_code = local.subnets_per_vpcs_quota + service_code = "vpc" + value = 100 +} + +resource "aws_servicequotas_service_quota" "spot_requests_per_region_us_east_1" { + provider = aws.us_east_1 + quota_code = local.standard_spot_instance_requests_quota + service_code = "ec2" + value = 640 +} + +resource "aws_servicequotas_service_quota" "spot_requests_per_region_us_east_2" { + provider = aws.us_east_2 + quota_code = local.standard_spot_instance_requests_quota + service_code = "ec2" + value = 640 +} + +resource "aws_servicequotas_service_quota" "spot_requests_per_region_us_west_1" { + provider = aws.us_west_1 + quota_code = local.standard_spot_instance_requests_quota + service_code = "ec2" + value = 640 +} + +resource "aws_servicequotas_service_quota" "spot_requests_per_region_us_west_2" { + provider = aws.us_west_2 + quota_code = local.standard_spot_instance_requests_quota + service_code = "ec2" + value = 640 +} diff --git a/enos/ci/service-user-iam/variables.tf b/enos/ci/service-user-iam/variables.tf new file mode 100644 index 0000000..a80d83c --- /dev/null +++ b/enos/ci/service-user-iam/variables.tf @@ -0,0 +1,11 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +variable "repository" { + description = "The GitHub repository, either vault or vault-enterprise" + type = string + validation { + condition = contains(["vault", "vault-enterprise"], var.repository) + error_message = "Invalid repository, only vault or vault-enterprise are supported" + } +} diff --git a/enos/enos-globals.hcl b/enos/enos-globals.hcl new file mode 100644 index 0000000..9abb0f4 --- /dev/null +++ b/enos/enos-globals.hcl @@ -0,0 +1,38 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +globals { + backend_tag_key = "VaultStorage" + build_tags = { + "ce" = ["ui"] + "ent" = ["ui", "enterprise", "ent"] + "ent.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.fips1402"] + "ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"] + "ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"] + } + distro_version = { + "rhel" = var.rhel_distro_version + "ubuntu" = var.ubuntu_distro_version + } + packages = ["jq"] + distro_packages = { + ubuntu = ["netcat"] + rhel = ["nc"] + } + sample_attributes = { + # NOTE(9/28/23): Temporarily use us-east-2 due to another networking in us-east-1 + # aws_region = ["us-east-1", "us-west-2"] + aws_region = ["us-east-2", "us-west-2"] + } + tags = merge({ + "Project Name" : var.project_name + "Project" : "Enos", + "Environment" : "ci" + }, var.tags) + vault_install_dir_packages = { + rhel = "/bin" + ubuntu = "/usr/bin" + } + vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic")) + vault_tag_key = "Type" // enos_vault_start expects Type as the tag key +} diff --git a/enos/enos-modules.hcl b/enos/enos-modules.hcl new file mode 100644 index 0000000..ff5e8bc --- /dev/null +++ b/enos/enos-modules.hcl @@ -0,0 +1,288 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +module "autopilot_upgrade_storageconfig" { + source = "./modules/autopilot_upgrade_storageconfig" +} + +module "backend_consul" { + source = "./modules/backend_consul" + + license = var.backend_license_path == null ? null : file(abspath(var.backend_license_path)) + log_level = var.backend_log_level +} + +module "backend_raft" { + source = "./modules/backend_raft" +} + +module "build_crt" { + source = "./modules/build_crt" +} + +module "build_local" { + source = "./modules/build_local" +} + +module "build_artifactory" { + source = "./modules/vault_artifactory_artifact" +} + +module "create_vpc" { + source = "./modules/create_vpc" + + environment = "ci" + common_tags = var.tags +} + +module "ec2_info" { + source = "./modules/ec2_info" +} + +module "get_local_metadata" { + source = "./modules/get_local_metadata" +} + +module "generate_secondary_token" { + source = "./modules/generate_secondary_token" + + vault_install_dir = var.vault_install_dir +} + +module "read_license" { + source = "./modules/read_license" +} + +module "replication_data" { + source = "./modules/replication_data" +} + +module "seal_key_awskms" { + source = "./modules/seal_key_awskms" + + common_tags = var.tags +} + +module "seal_key_shamir" { + source = "./modules/seal_key_shamir" + + common_tags = var.tags +} + +module "shutdown_node" { + source = "./modules/shutdown_node" +} + +module "shutdown_multiple_nodes" { + source = "./modules/shutdown_multiple_nodes" +} + +module "start_vault" { + source = "./modules/start_vault" + + install_dir = var.vault_install_dir + log_level = var.vault_log_level +} + +module "stop_vault" { + source = "./modules/stop_vault" +} + +# create target instances using ec2:CreateFleet +module "target_ec2_fleet" { + source = "./modules/target_ec2_fleet" + + common_tags = var.tags + project_name = var.project_name + ssh_keypair = var.aws_ssh_keypair_name +} + +# create target instances using ec2:RunInstances +module "target_ec2_instances" { + source = "./modules/target_ec2_instances" + + common_tags = var.tags + project_name = var.project_name + ssh_keypair = var.aws_ssh_keypair_name +} + +# don't create instances but satisfy the module interface +module "target_ec2_shim" { + source = "./modules/target_ec2_shim" + + common_tags = var.tags + project_name = var.project_name + ssh_keypair = var.aws_ssh_keypair_name +} + +# create target instances using ec2:RequestSpotFleet +module "target_ec2_spot_fleet" { + source = "./modules/target_ec2_spot_fleet" + + common_tags = var.tags + project_name = var.project_name + ssh_keypair = var.aws_ssh_keypair_name +} + +module "vault_agent" { + source = "./modules/vault_agent" + + vault_install_dir = var.vault_install_dir + vault_instance_count = var.vault_instance_count +} + +module "vault_proxy" { + source = "./modules/vault_proxy" + + vault_install_dir = var.vault_install_dir + vault_instance_count = var.vault_instance_count +} + +module "vault_verify_agent_output" { + source = "./modules/vault_verify_agent_output" + + vault_instance_count = var.vault_instance_count +} + +module "vault_cluster" { + source = "./modules/vault_cluster" + + install_dir = var.vault_install_dir + consul_license = var.backend_license_path == null ? null : file(abspath(var.backend_license_path)) + log_level = var.vault_log_level +} + +module "vault_get_cluster_ips" { + source = "./modules/vault_get_cluster_ips" + + vault_install_dir = var.vault_install_dir + vault_instance_count = var.vault_instance_count +} + +module "vault_raft_remove_peer" { + source = "./modules/vault_raft_remove_peer" + vault_install_dir = var.vault_install_dir +} + +module "vault_setup_perf_secondary" { + source = "./modules/vault_setup_perf_secondary" + + vault_install_dir = var.vault_install_dir +} + +module "vault_test_ui" { + source = "./modules/vault_test_ui" + + ui_run_tests = var.ui_run_tests +} + +module "vault_unseal_nodes" { + source = "./modules/vault_unseal_nodes" + + vault_install_dir = var.vault_install_dir + vault_instance_count = var.vault_instance_count +} + +module "vault_upgrade" { + source = "./modules/vault_upgrade" + + vault_install_dir = var.vault_install_dir + vault_instance_count = var.vault_instance_count +} + + +module "vault_verify_autopilot" { + source = "./modules/vault_verify_autopilot" + + vault_autopilot_upgrade_status = "await-server-removal" + vault_install_dir = var.vault_install_dir + vault_instance_count = var.vault_instance_count +} + +module "vault_verify_raft_auto_join_voter" { + source = "./modules/vault_verify_raft_auto_join_voter" + + vault_install_dir = var.vault_install_dir + vault_instance_count = var.vault_instance_count +} + +module "vault_verify_undo_logs" { + source = "./modules/vault_verify_undo_logs" + + vault_install_dir = var.vault_install_dir + vault_instance_count = var.vault_instance_count +} + +module "vault_verify_replication" { + source = "./modules/vault_verify_replication" + + vault_install_dir = var.vault_install_dir + vault_instance_count = var.vault_instance_count +} + +module "vault_verify_ui" { + source = "./modules/vault_verify_ui" + + vault_instance_count = var.vault_instance_count +} + +module "vault_verify_unsealed" { + source = "./modules/vault_verify_unsealed" + + vault_install_dir = var.vault_install_dir + vault_instance_count = var.vault_instance_count +} + +module "vault_setup_perf_primary" { + source = "./modules/vault_setup_perf_primary" + + vault_install_dir = var.vault_install_dir +} + +module "vault_verify_read_data" { + source = "./modules/vault_verify_read_data" + + vault_install_dir = var.vault_install_dir + vault_instance_count = var.vault_instance_count +} + +module "vault_verify_performance_replication" { + source = "./modules/vault_verify_performance_replication" + + vault_install_dir = var.vault_install_dir +} + +module "vault_verify_version" { + source = "./modules/vault_verify_version" + + vault_install_dir = var.vault_install_dir + vault_instance_count = var.vault_instance_count +} + +module "vault_verify_write_data" { + source = "./modules/vault_verify_write_data" + + vault_install_dir = var.vault_install_dir + vault_instance_count = var.vault_instance_count +} + +module "vault_wait_for_leader" { + source = "./modules/vault_wait_for_leader" + + vault_install_dir = var.vault_install_dir + vault_instance_count = var.vault_instance_count +} + +module "vault_wait_for_seal_rewrap" { + source = "./modules/vault_wait_for_seal_rewrap" + + vault_install_dir = var.vault_install_dir + vault_instance_count = var.vault_instance_count +} + +module "verify_seal_type" { + source = "./modules/verify_seal_type" + + vault_install_dir = var.vault_install_dir + vault_instance_count = var.vault_instance_count +} diff --git a/enos/enos-providers.hcl b/enos/enos-providers.hcl new file mode 100644 index 0000000..f277c57 --- /dev/null +++ b/enos/enos-providers.hcl @@ -0,0 +1,24 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" "default" { + region = var.aws_region +} + +provider "enos" "rhel" { + transport = { + ssh = { + user = "ec2-user" + private_key_path = abspath(var.aws_ssh_private_key_path) + } + } +} + +provider "enos" "ubuntu" { + transport = { + ssh = { + user = "ubuntu" + private_key_path = abspath(var.aws_ssh_private_key_path) + } + } +} diff --git a/enos/enos-samples-ce-build.hcl b/enos/enos-samples-ce-build.hcl new file mode 100644 index 0000000..e67c7c8 --- /dev/null +++ b/enos/enos-samples-ce-build.hcl @@ -0,0 +1,264 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +sample "build_ce_linux_amd64_deb" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["amd64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["amd64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["amd64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["amd64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } +} + +sample "build_ce_linux_arm64_deb" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } +} + +sample "build_ce_linux_arm64_rpm" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } +} + +sample "build_ce_linux_amd64_rpm" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["amd64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["amd64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["amd64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["amd64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + + exclude { + // Don't test from these versions in the build pipeline because of known issues + // in those older versions. + initial_version = ["1.8.12", "1.9.10", "1.10.11"] + } + } + } +} + +sample "build_ce_linux_amd64_zip" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["amd64"] + artifact_type = ["bundle"] + artifact_source = ["crt"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["amd64"] + artifact_type = ["bundle"] + artifact_source = ["crt"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["amd64"] + artifact_type = ["bundle"] + artifact_source = ["crt"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["amd64"] + artifact_type = ["bundle"] + artifact_source = ["crt"] + edition = ["ce"] + } + } +} + +sample "build_ce_linux_arm64_zip" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["bundle"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["bundle"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["bundle"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["bundle"] + edition = ["ce"] + } + } +} diff --git a/enos/enos-samples-ce-release.hcl b/enos/enos-samples-ce-release.hcl new file mode 100644 index 0000000..18f23bf --- /dev/null +++ b/enos/enos-samples-ce-release.hcl @@ -0,0 +1,258 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +sample "release_ce_linux_amd64_deb" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["amd64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["amd64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["amd64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["amd64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } +} + +sample "release_ce_linux_arm64_deb" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } +} + +sample "release_ce_linux_arm64_rpm" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } +} + +sample "release_ce_linux_amd64_rpm" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["amd64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["amd64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["amd64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["amd64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } +} + +sample "release_ce_linux_amd64_zip" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["amd64"] + artifact_type = ["bundle"] + artifact_source = ["artifactory"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["amd64"] + artifact_type = ["bundle"] + artifact_source = ["artifactory"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["amd64"] + artifact_type = ["bundle"] + artifact_source = ["artifactory"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["amd64"] + artifact_type = ["bundle"] + artifact_source = ["artifactory"] + edition = ["ce"] + } + } +} + +sample "release_ce_linux_arm64_zip" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["bundle"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["bundle"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["bundle"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["bundle"] + edition = ["ce"] + } + } +} diff --git a/enos/enos-scenario-agent.hcl b/enos/enos-scenario-agent.hcl new file mode 100644 index 0000000..408c19d --- /dev/null +++ b/enos/enos-scenario-agent.hcl @@ -0,0 +1,457 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +scenario "agent" { + matrix { + arch = ["amd64", "arm64"] + artifact_source = ["local", "crt", "artifactory"] + artifact_type = ["bundle", "package"] + backend = ["consul", "raft"] + consul_version = ["1.12.9", "1.13.9", "1.14.9", "1.15.5", "1.16.1"] + distro = ["ubuntu", "rhel"] + edition = ["ce", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + seal = ["awskms", "shamir"] + seal_ha_beta = ["true", "false"] + + # Our local builder always creates bundles + exclude { + artifact_source = ["local"] + artifact_type = ["package"] + } + + # HSM and FIPS 140-2 are only supported on amd64 + exclude { + arch = ["arm64"] + edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + } + } + + terraform_cli = terraform_cli.default + terraform = terraform.default + providers = [ + provider.aws.default, + provider.enos.ubuntu, + provider.enos.rhel + ] + + locals { + artifact_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null + enos_provider = { + rhel = provider.enos.rhel + ubuntu = provider.enos.ubuntu + } + manage_service = matrix.artifact_type == "bundle" + vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : global.vault_install_dir_packages[matrix.distro] + } + + step "get_local_metadata" { + skip_step = matrix.artifact_source != "local" + module = module.get_local_metadata + } + + step "build_vault" { + module = "build_${matrix.artifact_source}" + + variables { + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] + artifact_path = local.artifact_path + goarch = matrix.arch + goos = "linux" + artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null + artifactory_repo = matrix.artifact_source == "artifactory" ? var.artifactory_repo : null + artifactory_username = matrix.artifact_source == "artifactory" ? var.artifactory_username : null + artifactory_token = matrix.artifact_source == "artifactory" ? var.artifactory_token : null + arch = matrix.artifact_source == "artifactory" ? matrix.arch : null + product_version = var.vault_product_version + artifact_type = matrix.artifact_type + distro = matrix.artifact_source == "artifactory" ? matrix.distro : null + edition = matrix.artifact_source == "artifactory" ? matrix.edition : null + revision = var.vault_revision + } + } + + step "ec2_info" { + module = module.ec2_info + } + + step "create_vpc" { + module = module.create_vpc + + variables { + common_tags = global.tags + } + } + + step "create_seal_key" { + module = "seal_key_${matrix.seal}" + + variables { + cluster_id = step.create_vpc.cluster_id + common_tags = global.tags + } + } + + // This step reads the contents of the backend license if we're using a Consul backend and + // the edition is "ent". + step "read_backend_license" { + skip_step = matrix.backend == "raft" || var.backend_edition == "ce" + module = module.read_license + + variables { + file_name = global.backend_license_path + } + } + + step "read_vault_license" { + skip_step = matrix.edition == "ce" + module = module.read_license + + variables { + file_name = global.vault_license_path + } + } + + step "create_vault_cluster_targets" { + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_tag_key = global.vault_tag_key + common_tags = global.tags + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_vault_cluster_backend_targets" { + module = matrix.backend == "consul" ? module.target_ec2_instances : module.target_ec2_shim + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"] + cluster_tag_key = global.backend_tag_key + common_tags = global.tags + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_backend_cluster" { + module = "backend_${matrix.backend}" + depends_on = [ + step.create_vault_cluster_backend_targets + ] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_name = step.create_vault_cluster_backend_targets.cluster_name + cluster_tag_key = global.backend_tag_key + license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + release = { + edition = var.backend_edition + version = matrix.consul_version + } + target_hosts = step.create_vault_cluster_backend_targets.hosts + } + } + + step "create_vault_cluster" { + module = module.vault_cluster + depends_on = [ + step.create_backend_cluster, + step.build_vault, + step.create_vault_cluster_targets + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + backend_cluster_name = step.create_vault_cluster_backend_targets.cluster_name + backend_cluster_tag_key = global.backend_tag_key + cluster_name = step.create_vault_cluster_targets.cluster_name + consul_license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + consul_release = matrix.backend == "consul" ? { + edition = var.backend_edition + version = matrix.consul_version + } : null + enable_audit_devices = var.vault_enable_audit_devices + install_dir = local.vault_install_dir + license = matrix.edition != "ce" ? step.read_vault_license.license : null + local_artifact_path = local.artifact_path + manage_service = local.manage_service + packages = concat(global.packages, global.distro_packages[matrix.distro]) + seal_ha_beta = matrix.seal_ha_beta + seal_key_name = step.create_seal_key.resource_name + seal_type = matrix.seal + storage_backend = matrix.backend + target_hosts = step.create_vault_cluster_targets.hosts + } + } + + // Wait for our cluster to elect a leader + step "wait_for_leader" { + module = module.vault_wait_for_leader + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + timeout = 120 # seconds + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "start_vault_agent" { + module = "vault_agent" + depends_on = [ + step.build_vault, + step.create_vault_cluster, + step.wait_for_leader, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster_targets.hosts + vault_root_token = step.create_vault_cluster.root_token + vault_agent_template_destination = "/tmp/agent_output.txt" + vault_agent_template_contents = "{{ with secret \\\"auth/token/lookup-self\\\" }}orphan={{ .Data.orphan }} display_name={{ .Data.display_name }}{{ end }}" + } + } + + step "verify_vault_agent_output" { + module = module.vault_verify_agent_output + depends_on = [ + step.create_vault_cluster, + step.start_vault_agent, + step.wait_for_leader, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_vault_cluster_targets.hosts + vault_agent_template_destination = "/tmp/agent_output.txt" + vault_agent_expected_output = "orphan=true display_name=approle" + } + } + + step "get_vault_cluster_ips" { + module = module.vault_get_cluster_ips + depends_on = [step.wait_for_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_vault_version" { + module = module.vault_verify_version + depends_on = [step.wait_for_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_vault_cluster_targets.hosts + vault_edition = matrix.edition + vault_install_dir = local.vault_install_dir + vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version + vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision + vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_vault_unsealed" { + module = module.vault_verify_unsealed + depends_on = [step.wait_for_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster_targets.hosts + } + } + + step "verify_write_test_data" { + module = module.vault_verify_write_data + depends_on = [ + step.create_vault_cluster, + step.get_vault_cluster_ips + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + leader_public_ip = step.get_vault_cluster_ips.leader_public_ip + leader_private_ip = step.get_vault_cluster_ips.leader_private_ip + vault_instances = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_raft_auto_join_voter" { + skip_step = matrix.backend != "raft" + module = module.vault_verify_raft_auto_join_voter + depends_on = [ + step.create_vault_cluster, + step.get_vault_cluster_ips + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster_targets.hosts + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_replication" { + module = module.vault_verify_replication + depends_on = [ + step.create_vault_cluster, + step.get_vault_cluster_ips + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_edition = matrix.edition + vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster_targets.hosts + } + } + + step "verify_read_test_data" { + module = module.vault_verify_read_data + depends_on = [ + step.verify_write_test_data, + step.verify_replication + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + node_public_ips = step.get_vault_cluster_ips.follower_public_ips + vault_install_dir = local.vault_install_dir + } + } + + step "verify_ui" { + module = module.vault_verify_ui + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_vault_cluster_targets.hosts + } + } + + output "audit_device_file_path" { + description = "The file path for the file audit device, if enabled" + value = step.create_vault_cluster.audit_device_file_path + } + + output "cluster_name" { + description = "The Vault cluster name" + value = step.create_vault_cluster.cluster_name + } + + output "hosts" { + description = "The Vault cluster target hosts" + value = step.create_vault_cluster.target_hosts + } + + output "private_ips" { + description = "The Vault cluster private IPs" + value = step.create_vault_cluster.private_ips + } + + output "public_ips" { + description = "The Vault cluster public IPs" + value = step.create_vault_cluster.public_ips + } + + output "root_token" { + description = "The Vault cluster root token" + value = step.create_vault_cluster.root_token + } + + output "recovery_key_shares" { + description = "The Vault cluster recovery key shares" + value = step.create_vault_cluster.recovery_key_shares + } + + output "recovery_keys_b64" { + description = "The Vault cluster recovery keys b64" + value = step.create_vault_cluster.recovery_keys_b64 + } + + output "recovery_keys_hex" { + description = "The Vault cluster recovery keys hex" + value = step.create_vault_cluster.recovery_keys_hex + } + + output "seal_key_name" { + description = "The name of the cluster seal key" + value = step.create_seal_key.resource_name + } + + output "unseal_keys_b64" { + description = "The Vault cluster unseal keys" + value = step.create_vault_cluster.unseal_keys_b64 + } + + output "unseal_keys_hex" { + description = "The Vault cluster unseal keys hex" + value = step.create_vault_cluster.unseal_keys_hex + } +} diff --git a/enos/enos-scenario-autopilot.hcl b/enos/enos-scenario-autopilot.hcl new file mode 100644 index 0000000..09f8764 --- /dev/null +++ b/enos/enos-scenario-autopilot.hcl @@ -0,0 +1,587 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +scenario "autopilot" { + matrix { + arch = ["amd64", "arm64"] + artifact_source = ["local", "crt", "artifactory"] + artifact_type = ["bundle", "package"] + distro = ["ubuntu", "rhel"] + edition = ["ce", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + // NOTE: when backporting, make sure that our initial versions are less than that + // release branch's version. + initial_version = ["1.11.12", "1.12.11", "1.13.6", "1.14.2"] + seal = ["awskms", "shamir"] + seal_ha_beta = ["true", "false"] + + # Our local builder always creates bundles + exclude { + artifact_source = ["local"] + artifact_type = ["package"] + } + + # HSM and FIPS 140-2 are only supported on amd64 + exclude { + arch = ["arm64"] + edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + } + } + + terraform_cli = terraform_cli.default + terraform = terraform.default + providers = [ + provider.aws.default, + provider.enos.ubuntu, + provider.enos.rhel + ] + + locals { + artifact_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null + enos_provider = { + rhel = provider.enos.rhel + ubuntu = provider.enos.ubuntu + } + manage_service = matrix.artifact_type == "bundle" + vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : global.vault_install_dir_packages[matrix.distro] + } + + step "build_vault" { + module = "build_${matrix.artifact_source}" + + variables { + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] + artifact_path = local.artifact_path + goarch = matrix.arch + goos = "linux" + artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null + artifactory_repo = matrix.artifact_source == "artifactory" ? var.artifactory_repo : null + artifactory_username = matrix.artifact_source == "artifactory" ? var.artifactory_username : null + artifactory_token = matrix.artifact_source == "artifactory" ? var.artifactory_token : null + arch = matrix.artifact_source == "artifactory" ? matrix.arch : null + product_version = var.vault_product_version + artifact_type = matrix.artifact_type + distro = matrix.artifact_source == "artifactory" ? matrix.distro : null + edition = matrix.artifact_source == "artifactory" ? matrix.edition : null + revision = var.vault_revision + } + } + + step "ec2_info" { + module = module.ec2_info + } + + step "create_vpc" { + module = module.create_vpc + + variables { + common_tags = global.tags + } + } + + step "create_seal_key" { + module = "seal_key_${matrix.seal}" + + variables { + cluster_id = step.create_vpc.cluster_id + common_tags = global.tags + } + } + + step "read_license" { + module = module.read_license + + variables { + file_name = global.vault_license_path + } + } + + step "create_vault_cluster_targets" { + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_tag_key = global.vault_tag_key + common_tags = global.tags + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_vault_cluster" { + module = module.vault_cluster + depends_on = [ + step.build_vault, + step.create_vault_cluster_targets + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + cluster_name = step.create_vault_cluster_targets.cluster_name + enable_audit_devices = var.vault_enable_audit_devices + install_dir = local.vault_install_dir + license = matrix.edition != "ce" ? step.read_license.license : null + packages = concat(global.packages, global.distro_packages[matrix.distro]) + release = { + edition = matrix.edition + version = matrix.initial_version + } + seal_ha_beta = matrix.seal_ha_beta + seal_key_name = step.create_seal_key.resource_name + seal_type = matrix.seal + storage_backend = "raft" + storage_backend_addl_config = { + autopilot_upgrade_version = matrix.initial_version + } + target_hosts = step.create_vault_cluster_targets.hosts + } + } + + step "get_local_metadata" { + skip_step = matrix.artifact_source != "local" + module = module.get_local_metadata + } + + step "get_vault_cluster_ips" { + module = module.vault_get_cluster_ips + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.create_vault_cluster.target_hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_write_test_data" { + module = module.vault_verify_write_data + depends_on = [ + step.create_vault_cluster, + step.get_vault_cluster_ips + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + leader_public_ip = step.get_vault_cluster_ips.leader_public_ip + leader_private_ip = step.get_vault_cluster_ips.leader_private_ip + vault_instances = step.create_vault_cluster.target_hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "create_autopilot_upgrade_storageconfig" { + module = module.autopilot_upgrade_storageconfig + + variables { + vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version + } + } + + step "create_vault_cluster_upgrade_targets" { + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + common_tags = global.tags + cluster_name = step.create_vault_cluster_targets.cluster_name + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "upgrade_vault_cluster_with_autopilot" { + module = module.vault_cluster + depends_on = [ + step.build_vault, + step.create_vault_cluster, + step.create_autopilot_upgrade_storageconfig, + step.verify_write_test_data + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + enable_audit_devices = var.vault_enable_audit_devices + cluster_name = step.create_vault_cluster_targets.cluster_name + log_level = var.vault_log_level + force_unseal = matrix.seal == "shamir" + initialize_cluster = false + install_dir = local.vault_install_dir + license = matrix.edition != "ce" ? step.read_license.license : null + local_artifact_path = local.artifact_path + manage_service = local.manage_service + packages = concat(global.packages, global.distro_packages[matrix.distro]) + root_token = step.create_vault_cluster.root_token + seal_ha_beta = matrix.seal_ha_beta + seal_key_name = step.create_seal_key.resource_name + seal_type = matrix.seal + shamir_unseal_keys = matrix.seal == "shamir" ? step.create_vault_cluster.unseal_keys_hex : null + storage_backend = "raft" + storage_backend_addl_config = step.create_autopilot_upgrade_storageconfig.storage_addl_config + storage_node_prefix = "upgrade_node" + target_hosts = step.create_vault_cluster_upgrade_targets.hosts + } + } + + step "verify_vault_unsealed" { + module = module.vault_verify_unsealed + depends_on = [ + step.create_vault_cluster, + step.create_vault_cluster_upgrade_targets, + step.upgrade_vault_cluster_with_autopilot, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + vault_instances = step.upgrade_vault_cluster_with_autopilot.target_hosts + } + } + + step "verify_raft_auto_join_voter" { + module = module.vault_verify_raft_auto_join_voter + depends_on = [ + step.upgrade_vault_cluster_with_autopilot, + step.verify_vault_unsealed + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + vault_instances = step.upgrade_vault_cluster_with_autopilot.target_hosts + vault_root_token = step.upgrade_vault_cluster_with_autopilot.root_token + } + } + + step "verify_autopilot_await_server_removal_state" { + module = module.vault_verify_autopilot + depends_on = [ + step.create_vault_cluster_upgrade_targets, + step.upgrade_vault_cluster_with_autopilot, + step.verify_raft_auto_join_voter + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_autopilot_upgrade_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version + vault_autopilot_upgrade_status = "await-server-removal" + vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster.target_hosts + vault_root_token = step.upgrade_vault_cluster_with_autopilot.root_token + } + } + + step "wait_for_leader_in_upgrade_targets" { + module = module.vault_wait_for_leader + depends_on = [ + step.create_vault_cluster, + step.create_vault_cluster_upgrade_targets, + step.get_vault_cluster_ips, + step.upgrade_vault_cluster_with_autopilot + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + vault_hosts = step.upgrade_vault_cluster_with_autopilot.target_hosts + } + } + + step "get_updated_vault_cluster_ips" { + module = module.vault_get_cluster_ips + depends_on = [ + step.create_vault_cluster, + step.create_vault_cluster_upgrade_targets, + step.get_vault_cluster_ips, + step.upgrade_vault_cluster_with_autopilot, + step.wait_for_leader_in_upgrade_targets, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.upgrade_vault_cluster_with_autopilot.target_hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_read_test_data" { + module = module.vault_verify_read_data + depends_on = [ + step.get_updated_vault_cluster_ips, + step.verify_write_test_data, + step.upgrade_vault_cluster_with_autopilot, + step.verify_raft_auto_join_voter + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + node_public_ips = step.get_updated_vault_cluster_ips.follower_public_ips + vault_instance_count = 6 + vault_install_dir = local.vault_install_dir + } + } + + step "raft_remove_peers" { + module = module.vault_raft_remove_peer + depends_on = [ + step.create_vault_cluster_upgrade_targets, + step.get_updated_vault_cluster_ips, + step.upgrade_vault_cluster_with_autopilot, + step.verify_autopilot_await_server_removal_state + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + operator_instance = step.get_updated_vault_cluster_ips.leader_public_ip + remove_vault_instances = step.create_vault_cluster.target_hosts + vault_install_dir = local.vault_install_dir + vault_instance_count = 3 + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "remove_old_nodes" { + module = module.shutdown_multiple_nodes + depends_on = [ + step.create_vault_cluster, + step.raft_remove_peers + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + old_vault_instances = step.create_vault_cluster.target_hosts + vault_instance_count = 3 + } + } + + step "verify_autopilot_idle_state" { + module = module.vault_verify_autopilot + depends_on = [ + step.create_vault_cluster_upgrade_targets, + step.upgrade_vault_cluster_with_autopilot, + step.verify_raft_auto_join_voter, + step.remove_old_nodes + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_autopilot_upgrade_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version + vault_autopilot_upgrade_status = "idle" + vault_install_dir = local.vault_install_dir + vault_instances = step.upgrade_vault_cluster_with_autopilot.target_hosts + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_replication" { + module = module.vault_verify_replication + depends_on = [ + step.create_vault_cluster_upgrade_targets, + step.upgrade_vault_cluster_with_autopilot, + step.verify_raft_auto_join_voter, + step.remove_old_nodes + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_edition = matrix.edition + vault_install_dir = local.vault_install_dir + vault_instances = step.upgrade_vault_cluster_with_autopilot.target_hosts + } + } + + step "verify_vault_version" { + module = module.vault_verify_version + depends_on = [ + step.create_vault_cluster_upgrade_targets, + step.upgrade_vault_cluster_with_autopilot, + step.verify_raft_auto_join_voter, + step.remove_old_nodes + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.upgrade_vault_cluster_with_autopilot.target_hosts + vault_edition = matrix.edition + vault_install_dir = local.vault_install_dir + vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version + vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision + vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_ui" { + module = module.vault_verify_ui + depends_on = [ + step.create_vault_cluster_upgrade_targets, + step.upgrade_vault_cluster_with_autopilot, + step.verify_raft_auto_join_voter, + step.remove_old_nodes + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.upgrade_vault_cluster_with_autopilot.target_hosts + } + } + + step "verify_undo_logs_status" { + skip_step = true + # NOTE: temporarily disable undo logs checking until it is fixed. See VAULT-20259 + # skip_step = semverconstraint(var.vault_product_version, "<1.13.0-0") + module = module.vault_verify_undo_logs + depends_on = [ + step.create_vault_cluster_upgrade_targets, + step.remove_old_nodes, + step.upgrade_vault_cluster_with_autopilot, + step.verify_autopilot_idle_state + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + vault_instances = step.upgrade_vault_cluster_with_autopilot.target_hosts + vault_root_token = step.create_vault_cluster.root_token + } + } + + output "audit_device_file_path" { + description = "The file path for the file audit device, if enabled" + value = step.create_vault_cluster.audit_device_file_path + } + + output "cluster_name" { + description = "The Vault cluster name" + value = step.create_vault_cluster.cluster_name + } + + output "hosts" { + description = "The Vault cluster target hosts" + value = step.create_vault_cluster.target_hosts + } + + output "private_ips" { + description = "The Vault cluster private IPs" + value = step.create_vault_cluster.private_ips + } + + output "public_ips" { + description = "The Vault cluster public IPs" + value = step.create_vault_cluster.public_ips + } + + output "root_token" { + description = "The Vault cluster root token" + value = step.create_vault_cluster.root_token + } + + output "recovery_key_shares" { + description = "The Vault cluster recovery key shares" + value = step.create_vault_cluster.recovery_key_shares + } + + output "recovery_keys_b64" { + description = "The Vault cluster recovery keys b64" + value = step.create_vault_cluster.recovery_keys_b64 + } + + output "recovery_keys_hex" { + description = "The Vault cluster recovery keys hex" + value = step.create_vault_cluster.recovery_keys_hex + } + + output "seal_key_name" { + description = "The Vault cluster seal key name" + value = step.create_seal_key.resource_name + } + + output "unseal_keys_b64" { + description = "The Vault cluster unseal keys" + value = step.create_vault_cluster.unseal_keys_b64 + } + + output "unseal_keys_hex" { + description = "The Vault cluster unseal keys hex" + value = step.create_vault_cluster.unseal_keys_hex + } + + output "upgrade_hosts" { + description = "The Vault cluster target hosts" + value = step.upgrade_vault_cluster_with_autopilot.target_hosts + } + + output "upgrade_private_ips" { + description = "The Vault cluster private IPs" + value = step.upgrade_vault_cluster_with_autopilot.private_ips + } + + output "upgrade_public_ips" { + description = "The Vault cluster public IPs" + value = step.upgrade_vault_cluster_with_autopilot.public_ips + } +} diff --git a/enos/enos-scenario-proxy.hcl b/enos/enos-scenario-proxy.hcl new file mode 100644 index 0000000..2bc4dd5 --- /dev/null +++ b/enos/enos-scenario-proxy.hcl @@ -0,0 +1,429 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +scenario "proxy" { + matrix { + arch = ["amd64", "arm64"] + artifact_source = ["local", "crt", "artifactory"] + artifact_type = ["bundle", "package"] + backend = ["consul", "raft"] + consul_version = ["1.12.9", "1.13.9", "1.14.9", "1.15.5", "1.16.1"] + distro = ["ubuntu", "rhel"] + edition = ["ce", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + seal = ["awskms", "shamir"] + seal_ha_beta = ["true", "false"] + + # Our local builder always creates bundles + exclude { + artifact_source = ["local"] + artifact_type = ["package"] + } + + # HSM and FIPS 140-2 are only supported on amd64 + exclude { + arch = ["arm64"] + edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + } + } + + terraform_cli = terraform_cli.default + terraform = terraform.default + providers = [ + provider.aws.default, + provider.enos.ubuntu, + provider.enos.rhel + ] + + locals { + artifact_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null + enos_provider = { + rhel = provider.enos.rhel + ubuntu = provider.enos.ubuntu + } + manage_service = matrix.artifact_type == "bundle" + vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : global.vault_install_dir_packages[matrix.distro] + } + + step "get_local_metadata" { + skip_step = matrix.artifact_source != "local" + module = module.get_local_metadata + } + + step "build_vault" { + module = "build_${matrix.artifact_source}" + + variables { + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] + artifact_path = local.artifact_path + goarch = matrix.arch + goos = "linux" + artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null + artifactory_repo = matrix.artifact_source == "artifactory" ? var.artifactory_repo : null + artifactory_username = matrix.artifact_source == "artifactory" ? var.artifactory_username : null + artifactory_token = matrix.artifact_source == "artifactory" ? var.artifactory_token : null + arch = matrix.artifact_source == "artifactory" ? matrix.arch : null + product_version = var.vault_product_version + artifact_type = matrix.artifact_type + distro = matrix.artifact_source == "artifactory" ? matrix.distro : null + edition = matrix.artifact_source == "artifactory" ? matrix.edition : null + revision = var.vault_revision + } + } + + step "ec2_info" { + module = module.ec2_info + } + + step "create_vpc" { + module = module.create_vpc + + variables { + common_tags = global.tags + } + } + + step "create_seal_key" { + module = "seal_key_${matrix.seal}" + + variables { + cluster_id = step.create_vpc.cluster_id + common_tags = global.tags + } + } + + // This step reads the contents of the backend license if we're using a Consul backend and + // the edition is "ent". + step "read_backend_license" { + skip_step = matrix.backend == "raft" || var.backend_edition == "ce" + module = module.read_license + + variables { + file_name = global.backend_license_path + } + } + + step "read_vault_license" { + skip_step = matrix.edition == "ce" + module = module.read_license + + variables { + file_name = global.vault_license_path + } + } + + step "create_vault_cluster_targets" { + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_tag_key = global.vault_tag_key + common_tags = global.tags + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_vault_cluster_backend_targets" { + module = matrix.backend == "consul" ? module.target_ec2_instances : module.target_ec2_shim + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"] + cluster_tag_key = global.backend_tag_key + common_tags = global.tags + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_backend_cluster" { + module = "backend_${matrix.backend}" + depends_on = [ + step.create_vault_cluster_backend_targets + ] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_name = step.create_vault_cluster_backend_targets.cluster_name + cluster_tag_key = global.backend_tag_key + license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + release = { + edition = var.backend_edition + version = matrix.consul_version + } + target_hosts = step.create_vault_cluster_backend_targets.hosts + } + } + + step "create_vault_cluster" { + module = module.vault_cluster + depends_on = [ + step.create_backend_cluster, + step.build_vault, + step.create_vault_cluster_targets + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + backend_cluster_name = step.create_vault_cluster_backend_targets.cluster_name + backend_cluster_tag_key = global.backend_tag_key + cluster_name = step.create_vault_cluster_targets.cluster_name + consul_license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + consul_release = matrix.backend == "consul" ? { + edition = var.backend_edition + version = matrix.consul_version + } : null + enable_audit_devices = var.vault_enable_audit_devices + install_dir = local.vault_install_dir + license = matrix.edition != "ce" ? step.read_vault_license.license : null + local_artifact_path = local.artifact_path + manage_service = local.manage_service + packages = concat(global.packages, global.distro_packages[matrix.distro]) + seal_ha_beta = matrix.seal_ha_beta + seal_key_name = step.create_seal_key.resource_name + seal_type = matrix.seal + storage_backend = matrix.backend + target_hosts = step.create_vault_cluster_targets.hosts + } + } + + // Wait for our cluster to elect a leader + step "wait_for_leader" { + module = module.vault_wait_for_leader + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + timeout = 120 # seconds + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "start_vault_proxy" { + module = "vault_proxy" + depends_on = [ + step.build_vault, + step.create_vault_cluster, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster_targets.hosts + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "get_vault_cluster_ips" { + module = module.vault_get_cluster_ips + depends_on = [step.wait_for_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_vault_version" { + module = module.vault_verify_version + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_vault_cluster_targets.hosts + vault_edition = matrix.edition + vault_install_dir = local.vault_install_dir + vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version + vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision + vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_vault_unsealed" { + module = module.vault_verify_unsealed + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster_targets.hosts + } + } + + step "verify_write_test_data" { + module = module.vault_verify_write_data + depends_on = [ + step.create_vault_cluster, + step.get_vault_cluster_ips + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + leader_public_ip = step.get_vault_cluster_ips.leader_public_ip + leader_private_ip = step.get_vault_cluster_ips.leader_private_ip + vault_instances = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_raft_auto_join_voter" { + skip_step = matrix.backend != "raft" + module = module.vault_verify_raft_auto_join_voter + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster_targets.hosts + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_replication" { + module = module.vault_verify_replication + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_edition = matrix.edition + vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster_targets.hosts + } + } + + step "verify_read_test_data" { + module = module.vault_verify_read_data + depends_on = [ + step.verify_write_test_data, + step.verify_replication + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + node_public_ips = step.get_vault_cluster_ips.follower_public_ips + vault_install_dir = local.vault_install_dir + } + } + + step "verify_ui" { + module = module.vault_verify_ui + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_vault_cluster_targets.hosts + } + } + + output "audit_device_file_path" { + description = "The file path for the file audit device, if enabled" + value = step.create_vault_cluster.audit_device_file_path + } + + output "cluster_name" { + description = "The Vault cluster name" + value = step.create_vault_cluster.cluster_name + } + + output "hosts" { + description = "The Vault cluster target hosts" + value = step.create_vault_cluster.target_hosts + } + + output "private_ips" { + description = "The Vault cluster private IPs" + value = step.create_vault_cluster.private_ips + } + + output "public_ips" { + description = "The Vault cluster public IPs" + value = step.create_vault_cluster.public_ips + } + + output "root_token" { + description = "The Vault cluster root token" + value = step.create_vault_cluster.root_token + } + + output "recovery_key_shares" { + description = "The Vault cluster recovery key shares" + value = step.create_vault_cluster.recovery_key_shares + } + + output "recovery_keys_b64" { + description = "The Vault cluster recovery keys b64" + value = step.create_vault_cluster.recovery_keys_b64 + } + + output "recovery_keys_hex" { + description = "The Vault cluster recovery keys hex" + value = step.create_vault_cluster.recovery_keys_hex + } + + output "seal_key_name" { + description = "The Vault cluster seal key name" + value = step.create_seal_key.resource_name + } + + output "unseal_keys_b64" { + description = "The Vault cluster unseal keys" + value = step.create_vault_cluster.unseal_keys_b64 + } + + output "unseal_keys_hex" { + description = "The Vault cluster unseal keys hex" + value = step.create_vault_cluster.unseal_keys_hex + } +} diff --git a/enos/enos-scenario-replication.hcl b/enos/enos-scenario-replication.hcl new file mode 100644 index 0000000..753bed5 --- /dev/null +++ b/enos/enos-scenario-replication.hcl @@ -0,0 +1,898 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +// The replication scenario configures performance replication between two Vault clusters and verifies +// known_primary_cluster_addrs are updated on secondary Vault cluster with the IP addresses of replaced +// nodes on primary Vault cluster +scenario "replication" { + matrix { + arch = ["amd64", "arm64"] + artifact_source = ["local", "crt", "artifactory"] + artifact_type = ["bundle", "package"] + consul_version = ["1.12.9", "1.13.9", "1.14.9", "1.15.5", "1.16.1"] + distro = ["ubuntu", "rhel"] + edition = ["ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + primary_backend = ["raft", "consul"] + primary_seal = ["awskms", "shamir"] + seal_ha_beta = ["true", "false"] + secondary_backend = ["raft", "consul"] + secondary_seal = ["awskms", "shamir"] + + # Our local builder always creates bundles + exclude { + artifact_source = ["local"] + artifact_type = ["package"] + } + + # HSM and FIPS 140-2 are only supported on amd64 + exclude { + arch = ["arm64"] + edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + } + } + + terraform_cli = terraform_cli.default + terraform = terraform.default + providers = [ + provider.aws.default, + provider.enos.ubuntu, + provider.enos.rhel + ] + + locals { + artifact_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null + enos_provider = { + rhel = provider.enos.rhel + ubuntu = provider.enos.ubuntu + } + manage_service = matrix.artifact_type == "bundle" + vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : global.vault_install_dir_packages[matrix.distro] + } + + step "get_local_metadata" { + skip_step = matrix.artifact_source != "local" + module = module.get_local_metadata + } + + step "build_vault" { + module = "build_${matrix.artifact_source}" + + variables { + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] + artifact_path = local.artifact_path + goarch = matrix.arch + goos = "linux" + artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null + artifactory_repo = matrix.artifact_source == "artifactory" ? var.artifactory_repo : null + artifactory_username = matrix.artifact_source == "artifactory" ? var.artifactory_username : null + artifactory_token = matrix.artifact_source == "artifactory" ? var.artifactory_token : null + arch = matrix.artifact_source == "artifactory" ? matrix.arch : null + product_version = var.vault_product_version + artifact_type = matrix.artifact_type + distro = matrix.artifact_source == "artifactory" ? matrix.distro : null + edition = matrix.artifact_source == "artifactory" ? matrix.edition : null + revision = var.vault_revision + } + } + + step "ec2_info" { + module = module.ec2_info + } + + step "create_vpc" { + module = module.create_vpc + + variables { + common_tags = global.tags + } + } + + step "create_primary_seal_key" { + module = "seal_key_${matrix.primary_seal}" + + variables { + cluster_id = step.create_vpc.cluster_id + cluster_meta = "primary" + common_tags = global.tags + } + } + + step "create_secondary_seal_key" { + module = "seal_key_${matrix.secondary_seal}" + + variables { + cluster_id = step.create_vpc.cluster_id + cluster_meta = "secondary" + common_tags = global.tags + } + } + + // This step reads the contents of the backend license if we're using a Consul backend and + // the edition is "ent". + step "read_backend_license" { + skip_step = (matrix.primary_backend == "raft" && matrix.secondary_backend == "raft") || var.backend_edition == "ce" + module = module.read_license + + variables { + file_name = global.backend_license_path + } + } + + step "read_vault_license" { + module = module.read_license + + variables { + file_name = abspath(joinpath(path.root, "./support/vault.hclic")) + } + } + + # Create all of our instances for both primary and secondary clusters + step "create_primary_cluster_targets" { + module = module.target_ec2_instances + depends_on = [ + step.create_vpc, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_tag_key = global.vault_tag_key + common_tags = global.tags + seal_key_names = step.create_primary_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_primary_cluster_backend_targets" { + module = matrix.primary_backend == "consul" ? module.target_ec2_instances : module.target_ec2_shim + depends_on = [ + step.create_vpc, + ] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"] + cluster_tag_key = global.backend_tag_key + common_tags = global.tags + seal_key_names = step.create_primary_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_primary_cluster_additional_targets" { + module = module.target_ec2_instances + depends_on = [ + step.create_vpc, + step.create_primary_cluster_targets, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_name = step.create_primary_cluster_targets.cluster_name + cluster_tag_key = global.vault_tag_key + common_tags = global.tags + seal_key_names = step.create_primary_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_secondary_cluster_targets" { + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_tag_key = global.vault_tag_key + common_tags = global.tags + seal_key_names = step.create_secondary_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_secondary_cluster_backend_targets" { + module = matrix.secondary_backend == "consul" ? module.target_ec2_instances : module.target_ec2_shim + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"] + cluster_tag_key = global.backend_tag_key + common_tags = global.tags + seal_key_names = step.create_secondary_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_primary_backend_cluster" { + module = "backend_${matrix.primary_backend}" + depends_on = [ + step.create_primary_cluster_backend_targets, + ] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_name = step.create_primary_cluster_backend_targets.cluster_name + cluster_tag_key = global.backend_tag_key + license = (matrix.primary_backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + release = { + edition = var.backend_edition + version = matrix.consul_version + } + target_hosts = step.create_primary_cluster_backend_targets.hosts + } + } + + step "create_primary_cluster" { + module = module.vault_cluster + depends_on = [ + step.create_primary_backend_cluster, + step.build_vault, + step.create_primary_cluster_targets + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + backend_cluster_name = step.create_primary_cluster_backend_targets.cluster_name + backend_cluster_tag_key = global.backend_tag_key + consul_license = (matrix.primary_backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + cluster_name = step.create_primary_cluster_targets.cluster_name + consul_release = matrix.primary_backend == "consul" ? { + edition = var.backend_edition + version = matrix.consul_version + } : null + enable_audit_devices = var.vault_enable_audit_devices + install_dir = local.vault_install_dir + license = matrix.edition != "ce" ? step.read_vault_license.license : null + local_artifact_path = local.artifact_path + manage_service = local.manage_service + packages = concat(global.packages, global.distro_packages[matrix.distro]) + seal_ha_beta = matrix.seal_ha_beta + seal_key_name = step.create_primary_seal_key.resource_name + seal_type = matrix.primary_seal + storage_backend = matrix.primary_backend + target_hosts = step.create_primary_cluster_targets.hosts + } + } + + step "create_secondary_backend_cluster" { + module = "backend_${matrix.secondary_backend}" + depends_on = [ + step.create_secondary_cluster_backend_targets + ] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_name = step.create_secondary_cluster_backend_targets.cluster_name + cluster_tag_key = global.backend_tag_key + license = (matrix.secondary_backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + release = { + edition = var.backend_edition + version = matrix.consul_version + } + target_hosts = step.create_secondary_cluster_backend_targets.hosts + } + } + + step "create_secondary_cluster" { + module = module.vault_cluster + depends_on = [ + step.create_secondary_backend_cluster, + step.build_vault, + step.create_secondary_cluster_targets + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + backend_cluster_name = step.create_secondary_cluster_backend_targets.cluster_name + backend_cluster_tag_key = global.backend_tag_key + consul_license = (matrix.secondary_backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + cluster_name = step.create_secondary_cluster_targets.cluster_name + consul_release = matrix.secondary_backend == "consul" ? { + edition = var.backend_edition + version = matrix.consul_version + } : null + enable_audit_devices = var.vault_enable_audit_devices + install_dir = local.vault_install_dir + license = matrix.edition != "ce" ? step.read_vault_license.license : null + local_artifact_path = local.artifact_path + manage_service = local.manage_service + packages = concat(global.packages, global.distro_packages[matrix.distro]) + seal_ha_beta = matrix.seal_ha_beta + seal_key_name = step.create_secondary_seal_key.resource_name + seal_type = matrix.secondary_seal + storage_backend = matrix.secondary_backend + target_hosts = step.create_secondary_cluster_targets.hosts + } + } + + step "verify_that_vault_primary_cluster_is_unsealed" { + module = module.vault_verify_unsealed + depends_on = [ + step.create_primary_cluster + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_primary_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + } + } + + step "verify_that_vault_secondary_cluster_is_unsealed" { + module = module.vault_verify_unsealed + depends_on = [ + step.create_secondary_cluster + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_secondary_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + } + } + + step "verify_vault_version" { + module = module.vault_verify_version + depends_on = [ + step.create_primary_cluster + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_primary_cluster_targets.hosts + vault_edition = matrix.edition + vault_install_dir = local.vault_install_dir + vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version + vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision + vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date + vault_root_token = step.create_primary_cluster.root_token + } + } + + step "verify_ui" { + module = module.vault_verify_ui + depends_on = [ + step.create_primary_cluster + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_primary_cluster_targets.hosts + } + } + + step "get_primary_cluster_ips" { + module = module.vault_get_cluster_ips + depends_on = [ + step.verify_vault_version, + step.verify_ui, + step.verify_that_vault_primary_cluster_is_unsealed, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.create_primary_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_primary_cluster.root_token + } + } + + step "get_primary_cluster_replication_data" { + module = module.replication_data + depends_on = [step.get_primary_cluster_ips] + + variables { + follower_hosts = step.get_primary_cluster_ips.follower_hosts + } + } + + step "get_secondary_cluster_ips" { + module = module.vault_get_cluster_ips + depends_on = [step.verify_that_vault_secondary_cluster_is_unsealed] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.create_secondary_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_secondary_cluster.root_token + } + } + + step "write_test_data_on_primary" { + module = module.vault_verify_write_data + depends_on = [step.get_primary_cluster_ips] + + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + leader_public_ip = step.get_primary_cluster_ips.leader_public_ip + leader_private_ip = step.get_primary_cluster_ips.leader_private_ip + vault_instances = step.create_primary_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_primary_cluster.root_token + } + } + + step "configure_performance_replication_primary" { + module = module.vault_setup_perf_primary + depends_on = [ + step.get_primary_cluster_ips, + step.get_secondary_cluster_ips, + step.write_test_data_on_primary + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + primary_leader_public_ip = step.get_primary_cluster_ips.leader_public_ip + primary_leader_private_ip = step.get_primary_cluster_ips.leader_private_ip + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_primary_cluster.root_token + } + } + + step "generate_secondary_token" { + module = module.generate_secondary_token + depends_on = [step.configure_performance_replication_primary] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + primary_leader_public_ip = step.get_primary_cluster_ips.leader_public_ip + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_primary_cluster.root_token + } + } + + step "configure_performance_replication_secondary" { + module = module.vault_setup_perf_secondary + depends_on = [step.generate_secondary_token] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + secondary_leader_public_ip = step.get_secondary_cluster_ips.leader_public_ip + secondary_leader_private_ip = step.get_secondary_cluster_ips.leader_private_ip + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_secondary_cluster.root_token + wrapping_token = step.generate_secondary_token.secondary_token + } + } + + // After replication is enabled, the secondary cluster followers need to be unsealed + // Secondary unseal keys are passed using the guide https://developer.hashicorp.com/vault/docs/enterprise/replication#seals + step "unseal_secondary_followers" { + module = module.vault_unseal_nodes + depends_on = [ + step.create_primary_cluster, + step.create_secondary_cluster, + step.get_secondary_cluster_ips, + step.configure_performance_replication_secondary + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + follower_public_ips = step.get_secondary_cluster_ips.follower_public_ips + vault_install_dir = local.vault_install_dir + vault_unseal_keys = matrix.primary_seal == "shamir" ? step.create_primary_cluster.unseal_keys_hex : step.create_primary_cluster.recovery_keys_hex + vault_seal_type = matrix.primary_seal == "shamir" ? matrix.primary_seal : matrix.secondary_seal + } + } + + step "verify_secondary_cluster_is_unsealed_after_enabling_replication" { + module = module.vault_verify_unsealed + depends_on = [ + step.unseal_secondary_followers + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_secondary_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + } + } + + step "verify_performance_replication" { + module = module.vault_verify_performance_replication + depends_on = [step.verify_secondary_cluster_is_unsealed_after_enabling_replication] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + primary_leader_public_ip = step.get_primary_cluster_ips.leader_public_ip + primary_leader_private_ip = step.get_primary_cluster_ips.leader_private_ip + secondary_leader_public_ip = step.get_secondary_cluster_ips.leader_public_ip + secondary_leader_private_ip = step.get_secondary_cluster_ips.leader_private_ip + vault_install_dir = local.vault_install_dir + } + } + + step "verify_replicated_data" { + module = module.vault_verify_read_data + depends_on = [ + step.verify_performance_replication, + step.get_secondary_cluster_ips, + step.write_test_data_on_primary + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + node_public_ips = step.get_secondary_cluster_ips.follower_public_ips + vault_install_dir = local.vault_install_dir + } + } + + step "add_additional_nodes_to_primary_cluster" { + module = module.vault_cluster + depends_on = [ + step.create_vpc, + step.create_primary_backend_cluster, + step.create_primary_cluster, + step.verify_replicated_data, + step.create_primary_cluster_additional_targets + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + backend_cluster_name = step.create_primary_cluster_backend_targets.cluster_name + backend_cluster_tag_key = global.backend_tag_key + cluster_name = step.create_primary_cluster_targets.cluster_name + consul_license = (matrix.primary_backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + consul_release = matrix.primary_backend == "consul" ? { + edition = var.backend_edition + version = matrix.consul_version + } : null + enable_audit_devices = var.vault_enable_audit_devices + force_unseal = matrix.primary_seal == "shamir" + initialize_cluster = false + install_dir = local.vault_install_dir + license = matrix.edition != "ce" ? step.read_vault_license.license : null + local_artifact_path = local.artifact_path + manage_service = local.manage_service + packages = concat(global.packages, global.distro_packages[matrix.distro]) + root_token = step.create_primary_cluster.root_token + seal_ha_beta = matrix.seal_ha_beta + seal_key_name = step.create_primary_seal_key.resource_name + seal_type = matrix.primary_seal + shamir_unseal_keys = matrix.primary_seal == "shamir" ? step.create_primary_cluster.unseal_keys_hex : null + storage_backend = matrix.primary_backend + storage_node_prefix = "newprimary_node" + target_hosts = step.create_primary_cluster_additional_targets.hosts + } + } + + step "verify_additional_primary_nodes_are_unsealed" { + module = module.vault_verify_unsealed + depends_on = [step.add_additional_nodes_to_primary_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_primary_cluster_additional_targets.hosts + vault_install_dir = local.vault_install_dir + } + } + + step "verify_raft_auto_join_voter" { + skip_step = matrix.primary_backend != "raft" + module = module.vault_verify_raft_auto_join_voter + depends_on = [ + step.add_additional_nodes_to_primary_cluster, + step.create_primary_cluster, + step.verify_additional_primary_nodes_are_unsealed + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_primary_cluster_additional_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_primary_cluster.root_token + } + } + + step "remove_primary_follower_1" { + module = module.shutdown_node + depends_on = [ + step.get_primary_cluster_replication_data, + step.verify_additional_primary_nodes_are_unsealed + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + node_public_ip = step.get_primary_cluster_replication_data.follower_public_ip_1 + } + } + + step "remove_primary_leader" { + module = module.shutdown_node + depends_on = [ + step.get_primary_cluster_ips, + step.remove_primary_follower_1 + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + node_public_ip = step.get_primary_cluster_ips.leader_public_ip + } + } + + // After we've removed two nodes from the cluster we need to get an updated set of vault hosts + // to work with. + step "get_remaining_hosts_replication_data" { + module = module.replication_data + depends_on = [ + step.get_primary_cluster_ips, + step.remove_primary_leader, + ] + + variables { + added_hosts = step.create_primary_cluster_additional_targets.hosts + added_hosts_count = var.vault_instance_count + initial_hosts = step.create_primary_cluster_targets.hosts + initial_hosts_count = var.vault_instance_count + removed_follower_host = step.get_primary_cluster_replication_data.follower_host_1 + removed_primary_host = step.get_primary_cluster_ips.leader_host + } + } + + // Wait for the remaining hosts in our cluster to elect a new leader. + step "wait_for_leader_in_remaining_hosts" { + module = module.vault_wait_for_leader + depends_on = [ + step.remove_primary_leader, + step.get_remaining_hosts_replication_data, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + timeout = 120 # seconds + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_primary_cluster.root_token + vault_hosts = step.get_remaining_hosts_replication_data.remaining_hosts + } + } + + // Get our new leader and follower IP addresses. + step "get_updated_primary_cluster_ips" { + module = module.vault_get_cluster_ips + depends_on = [ + step.get_remaining_hosts_replication_data, + step.wait_for_leader_in_remaining_hosts, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.get_remaining_hosts_replication_data.remaining_hosts + vault_install_dir = local.vault_install_dir + vault_instance_count = step.get_remaining_hosts_replication_data.remaining_hosts_count + vault_root_token = step.create_primary_cluster.root_token + } + } + + // Make sure the cluster has the correct performance replication state after the new leader election. + step "verify_updated_performance_replication" { + module = module.vault_verify_performance_replication + depends_on = [ + step.get_remaining_hosts_replication_data, + step.wait_for_leader_in_remaining_hosts, + step.get_updated_primary_cluster_ips, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + primary_leader_public_ip = step.get_updated_primary_cluster_ips.leader_public_ip + primary_leader_private_ip = step.get_updated_primary_cluster_ips.leader_private_ip + secondary_leader_public_ip = step.get_secondary_cluster_ips.leader_public_ip + secondary_leader_private_ip = step.get_secondary_cluster_ips.leader_private_ip + vault_install_dir = local.vault_install_dir + } + } + + output "audit_device_file_path" { + description = "The file path for the file audit device, if enabled" + value = step.create_primary_cluster.audit_device_file_path + } + + output "primary_cluster_hosts" { + description = "The Vault primary cluster target hosts" + value = step.create_primary_cluster_targets.hosts + } + + output "primary_cluster_additional_hosts" { + description = "The Vault added new node on primary cluster target hosts" + value = step.create_primary_cluster_additional_targets.hosts + } + + output "primary_cluster_root_token" { + description = "The Vault primary cluster root token" + value = step.create_primary_cluster.root_token + } + + output "primary_cluster_unseal_keys_b64" { + description = "The Vault primary cluster unseal keys" + value = step.create_primary_cluster.unseal_keys_b64 + } + + output "primary_cluster_unseal_keys_hex" { + description = "The Vault primary cluster unseal keys hex" + value = step.create_primary_cluster.unseal_keys_hex + } + + output "primary_cluster_recovery_key_shares" { + description = "The Vault primary cluster recovery key shares" + value = step.create_primary_cluster.recovery_key_shares + } + + output "primary_cluster_recovery_keys_b64" { + description = "The Vault primary cluster recovery keys b64" + value = step.create_primary_cluster.recovery_keys_b64 + } + + output "primary_cluster_recovery_keys_hex" { + description = "The Vault primary cluster recovery keys hex" + value = step.create_primary_cluster.recovery_keys_hex + } + + output "secondary_cluster_hosts" { + description = "The Vault secondary cluster public IPs" + value = step.create_secondary_cluster_targets.hosts + } + + output "secondary_cluster_root_token" { + description = "The Vault secondary cluster root token" + value = step.create_secondary_cluster.root_token + } + + output "performance_secondary_token" { + description = "The performance secondary replication token" + value = step.generate_secondary_token.secondary_token + } + + output "remaining_hosts" { + description = "The Vault cluster primary hosts after removing the leader and follower" + value = step.get_remaining_hosts_replication_data.remaining_hosts + } + + output "initial_primary_replication_status" { + description = "The Vault primary cluster performance replication status" + value = step.verify_performance_replication.primary_replication_status + } + + output "initial_known_primary_cluster_addresses" { + description = "The Vault secondary cluster performance replication status" + value = step.verify_performance_replication.known_primary_cluster_addrs + } + + output "initial_secondary_performance_replication_status" { + description = "The Vault secondary cluster performance replication status" + value = step.verify_performance_replication.secondary_replication_status + } + + output "intial_primary_replication_data_secondaries" { + description = "The Vault primary cluster secondaries connection status" + value = step.verify_performance_replication.primary_replication_data_secondaries + } + + output "initial_secondary_replication_data_primaries" { + description = "The Vault secondary cluster primaries connection status" + value = step.verify_performance_replication.secondary_replication_data_primaries + } + + output "updated_primary_replication_status" { + description = "The Vault updated primary cluster performance replication status" + value = step.verify_updated_performance_replication.primary_replication_status + } + + output "updated_known_primary_cluster_addresses" { + description = "The Vault secondary cluster performance replication status" + value = step.verify_updated_performance_replication.known_primary_cluster_addrs + } + + output "updated_secondary_replication_status" { + description = "The Vault updated secondary cluster performance replication status" + value = step.verify_updated_performance_replication.secondary_replication_status + } + + output "updated_primary_replication_data_secondaries" { + description = "The Vault updated primary cluster secondaries connection status" + value = step.verify_updated_performance_replication.primary_replication_data_secondaries + } + + output "updated_secondary_replication_data_primaries" { + description = "The Vault updated secondary cluster primaries connection status" + value = step.verify_updated_performance_replication.secondary_replication_data_primaries + } +} diff --git a/enos/enos-scenario-seal-ha.hcl b/enos/enos-scenario-seal-ha.hcl new file mode 100644 index 0000000..7f9a989 --- /dev/null +++ b/enos/enos-scenario-seal-ha.hcl @@ -0,0 +1,713 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +scenario "seal_ha" { + matrix { + arch = ["amd64", "arm64"] + artifact_source = ["local", "crt", "artifactory"] + artifact_type = ["bundle", "package"] + backend = ["consul", "raft"] + consul_version = ["1.12.9", "1.13.9", "1.14.9", "1.15.5", "1.16.1"] + distro = ["ubuntu", "rhel"] + edition = ["ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + primary_seal = ["awskms"] + secondary_seal = ["awskms"] + + # Our local builder always creates bundles + exclude { + artifact_source = ["local"] + artifact_type = ["package"] + } + + # HSM and FIPS 140-2 are only supported on amd64 + exclude { + arch = ["arm64"] + edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + } + } + + terraform_cli = terraform_cli.default + terraform = terraform.default + providers = [ + provider.aws.default, + provider.enos.ubuntu, + provider.enos.rhel + ] + + locals { + artifact_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null + enos_provider = { + rhel = provider.enos.rhel + ubuntu = provider.enos.ubuntu + } + manage_service = matrix.artifact_type == "bundle" + vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : global.vault_install_dir_packages[matrix.distro] + } + + step "get_local_metadata" { + skip_step = matrix.artifact_source != "local" + module = module.get_local_metadata + } + + step "build_vault" { + module = "build_${matrix.artifact_source}" + + variables { + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] + artifact_path = local.artifact_path + goarch = matrix.arch + goos = "linux" + artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null + artifactory_repo = matrix.artifact_source == "artifactory" ? var.artifactory_repo : null + artifactory_username = matrix.artifact_source == "artifactory" ? var.artifactory_username : null + artifactory_token = matrix.artifact_source == "artifactory" ? var.artifactory_token : null + arch = matrix.artifact_source == "artifactory" ? matrix.arch : null + product_version = var.vault_product_version + artifact_type = matrix.artifact_type + distro = matrix.artifact_source == "artifactory" ? matrix.distro : null + edition = matrix.artifact_source == "artifactory" ? matrix.edition : null + revision = var.vault_revision + } + } + + step "ec2_info" { + module = module.ec2_info + } + + step "create_vpc" { + module = module.create_vpc + + variables { + common_tags = global.tags + } + } + + step "create_primary_seal_key" { + module = "seal_key_${matrix.primary_seal}" + + variables { + cluster_id = step.create_vpc.cluster_id + cluster_meta = "primary" + common_tags = global.tags + } + } + + step "create_secondary_seal_key" { + module = "seal_key_${matrix.secondary_seal}" + + variables { + cluster_id = step.create_vpc.cluster_id + cluster_meta = "secondary" + common_tags = global.tags + other_resources = step.create_primary_seal_key.resource_names + } + } + + // This step reads the contents of the backend license if we're using a Consul backend and + // the edition is "ent". + step "read_backend_license" { + skip_step = matrix.backend == "raft" || var.backend_edition == "ce" + module = module.read_license + + variables { + file_name = global.backend_license_path + } + } + + step "read_vault_license" { + skip_step = matrix.edition == "ce" + module = module.read_license + + variables { + file_name = global.vault_license_path + } + } + + step "create_vault_cluster_targets" { + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_tag_key = global.vault_tag_key + common_tags = global.tags + seal_key_names = step.create_secondary_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_vault_cluster_backend_targets" { + module = matrix.backend == "consul" ? module.target_ec2_instances : module.target_ec2_shim + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"] + seal_key_names = step.create_secondary_seal_key.resource_names + cluster_tag_key = global.backend_tag_key + common_tags = global.tags + vpc_id = step.create_vpc.id + } + } + + step "create_backend_cluster" { + module = "backend_${matrix.backend}" + depends_on = [ + step.create_vault_cluster_backend_targets + ] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_name = step.create_vault_cluster_backend_targets.cluster_name + cluster_tag_key = global.backend_tag_key + license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + release = { + edition = var.backend_edition + version = matrix.consul_version + } + target_hosts = step.create_vault_cluster_backend_targets.hosts + } + } + + step "create_vault_cluster" { + module = module.vault_cluster + depends_on = [ + step.create_backend_cluster, + step.build_vault, + step.create_vault_cluster_targets + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + backend_cluster_name = step.create_vault_cluster_backend_targets.cluster_name + backend_cluster_tag_key = global.backend_tag_key + cluster_name = step.create_vault_cluster_targets.cluster_name + consul_license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + consul_release = matrix.backend == "consul" ? { + edition = var.backend_edition + version = matrix.consul_version + } : null + enable_audit_devices = var.vault_enable_audit_devices + install_dir = local.vault_install_dir + license = matrix.edition != "ce" ? step.read_vault_license.license : null + local_artifact_path = local.artifact_path + manage_service = local.manage_service + packages = concat(global.packages, global.distro_packages[matrix.distro]) + // Only configure our primary seal during our initial cluster setup + seal_type = matrix.primary_seal + seal_key_name = step.create_primary_seal_key.resource_name + storage_backend = matrix.backend + target_hosts = step.create_vault_cluster_targets.hosts + } + } + + // Wait for our cluster to elect a leader + step "wait_for_leader" { + module = module.vault_wait_for_leader + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + timeout = 120 # seconds + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "get_vault_cluster_ips" { + module = module.vault_get_cluster_ips + depends_on = [step.wait_for_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_vault_unsealed" { + module = module.vault_verify_unsealed + depends_on = [step.wait_for_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster_targets.hosts + } + } + + // Write some test data before we create the new seal + step "verify_write_test_data" { + module = module.vault_verify_write_data + depends_on = [ + step.create_vault_cluster, + step.get_vault_cluster_ips, + step.verify_vault_unsealed, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + leader_public_ip = step.get_vault_cluster_ips.leader_public_ip + leader_private_ip = step.get_vault_cluster_ips.leader_private_ip + vault_instances = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + // Wait for the initial seal rewrap to complete before we add our HA seal. + step "wait_for_initial_seal_rewrap" { + module = module.vault_wait_for_seal_rewrap + depends_on = [ + step.verify_write_test_data, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + // Stop the vault service on all nodes before we restart with new seal config + step "stop_vault" { + module = module.stop_vault + depends_on = [ + step.create_vault_cluster, + step.verify_write_test_data, + step.wait_for_initial_seal_rewrap, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + target_hosts = step.create_vault_cluster_targets.hosts + } + } + + // Add the secondary seal to the cluster + step "add_ha_seal_to_cluster" { + module = module.start_vault + depends_on = [step.stop_vault] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + cluster_name = step.create_vault_cluster_targets.cluster_name + install_dir = local.vault_install_dir + license = matrix.edition != "ce" ? step.read_vault_license.license : null + manage_service = local.manage_service + seal_type = matrix.primary_seal + seal_key_name = step.create_primary_seal_key.resource_name + seal_type_secondary = matrix.secondary_seal + seal_key_name_secondary = step.create_secondary_seal_key.resource_name + storage_backend = matrix.backend + target_hosts = step.create_vault_cluster_targets.hosts + } + } + + // Wait for our cluster to elect a leader + step "wait_for_new_leader" { + module = module.vault_wait_for_leader + depends_on = [step.add_ha_seal_to_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + timeout = 120 # seconds + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "get_updated_cluster_ips" { + module = module.vault_get_cluster_ips + depends_on = [step.wait_for_new_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_vault_unsealed_with_new_seal" { + module = module.vault_verify_unsealed + depends_on = [step.wait_for_new_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster_targets.hosts + } + } + + // Wait for the seal rewrap to complete and verify that no entries failed + step "wait_for_seal_rewrap" { + module = module.vault_wait_for_seal_rewrap + depends_on = [ + step.add_ha_seal_to_cluster, + step.verify_vault_unsealed_with_new_seal, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + // Perform all of our standard verifications after we've enabled multiseal + step "verify_vault_version" { + module = module.vault_verify_version + depends_on = [step.wait_for_seal_rewrap] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_vault_cluster_targets.hosts + vault_edition = matrix.edition + vault_install_dir = local.vault_install_dir + vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version + vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision + vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_raft_auto_join_voter" { + skip_step = matrix.backend != "raft" + module = module.vault_verify_raft_auto_join_voter + depends_on = [step.wait_for_seal_rewrap] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster_targets.hosts + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_replication" { + module = module.vault_verify_replication + depends_on = [step.wait_for_seal_rewrap] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_edition = matrix.edition + vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster_targets.hosts + } + } + + // Make sure our data is still available + step "verify_read_test_data" { + module = module.vault_verify_read_data + depends_on = [step.wait_for_seal_rewrap] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + node_public_ips = step.get_updated_cluster_ips.follower_public_ips + vault_install_dir = local.vault_install_dir + } + } + + step "verify_ui" { + module = module.vault_verify_ui + depends_on = [step.wait_for_seal_rewrap] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_vault_cluster_targets.hosts + } + } + + // Make sure we have a "multiseal" seal type + step "verify_seal_type" { + // Don't run this on versions less than 1.16.0-beta1 until VAULT-21053 is fixed on prior branches. + skip_step = semverconstraint(var.vault_product_version, "< 1.16.0-beta1") + module = module.verify_seal_type + depends_on = [step.wait_for_seal_rewrap] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + vault_hosts = step.create_vault_cluster_targets.hosts + seal_type = "multiseal" + } + } + + // Now we'll migrate away from our initial seal to our secondary seal + + // Stop the vault service on all nodes before we restart with new seal config + step "stop_vault_for_migration" { + module = module.stop_vault + depends_on = [ + step.wait_for_seal_rewrap, + step.verify_read_test_data, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + target_hosts = step.create_vault_cluster_targets.hosts + } + } + + // Remove the "primary" seal from the cluster. Set our "secondary" seal to priority 1. We do this + // by restarting vault with the correct config. + step "remove_primary_seal" { + module = module.start_vault + depends_on = [step.stop_vault_for_migration] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + cluster_name = step.create_vault_cluster_targets.cluster_name + install_dir = local.vault_install_dir + license = matrix.edition != "ce" ? step.read_vault_license.license : null + manage_service = local.manage_service + seal_alias = "secondary" + seal_type = matrix.secondary_seal + seal_key_name = step.create_secondary_seal_key.resource_name + storage_backend = matrix.backend + target_hosts = step.create_vault_cluster_targets.hosts + } + } + + // Wait for our cluster to elect a leader after restarting vault with a new primary seal + step "wait_for_leader_after_migration" { + module = module.vault_wait_for_leader + depends_on = [step.remove_primary_seal] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + timeout = 120 # seconds + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + // Since we've restarted our cluster we might have a new leader and followers. Get the new IPs. + step "get_cluster_ips_after_migration" { + module = module.vault_get_cluster_ips + depends_on = [step.wait_for_leader_after_migration] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + // Make sure we unsealed + step "verify_vault_unsealed_after_migration" { + module = module.vault_verify_unsealed + depends_on = [step.wait_for_leader_after_migration] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster_targets.hosts + } + } + + // Wait for the seal rewrap to complete and verify that no entries failed + step "wait_for_seal_rewrap_after_migration" { + module = module.vault_wait_for_seal_rewrap + depends_on = [ + step.wait_for_leader_after_migration, + step.verify_vault_unsealed_after_migration, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + // Make sure our data is still available after migration + step "verify_read_test_data_after_migration" { + module = module.vault_verify_read_data + depends_on = [step.wait_for_seal_rewrap_after_migration] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + node_public_ips = step.get_cluster_ips_after_migration.follower_public_ips + vault_install_dir = local.vault_install_dir + } + } + + // Make sure we have our secondary seal type after migration + step "verify_seal_type_after_migration" { + // Don't run this on versions less than 1.16.0-beta1 until VAULT-21053 is fixed on prior branches. + skip_step = semverconstraint(var.vault_product_version, "<= 1.16.0-beta1") + module = module.verify_seal_type + depends_on = [step.wait_for_seal_rewrap_after_migration] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + vault_hosts = step.create_vault_cluster_targets.hosts + seal_type = matrix.secondary_seal + } + } + + output "audit_device_file_path" { + description = "The file path for the file audit device, if enabled" + value = step.create_vault_cluster.audit_device_file_path + } + + output "cluster_name" { + description = "The Vault cluster name" + value = step.create_vault_cluster.cluster_name + } + + output "hosts" { + description = "The Vault cluster target hosts" + value = step.create_vault_cluster.target_hosts + } + + output "primary_seal_key_name" { + description = "The Vault cluster primary seal key name" + value = step.create_primary_seal_key.resource_name + } + + output "private_ips" { + description = "The Vault cluster private IPs" + value = step.create_vault_cluster.private_ips + } + + output "public_ips" { + description = "The Vault cluster public IPs" + value = step.create_vault_cluster.public_ips + } + + output "root_token" { + description = "The Vault cluster root token" + value = step.create_vault_cluster.root_token + } + + output "recovery_key_shares" { + description = "The Vault cluster recovery key shares" + value = step.create_vault_cluster.recovery_key_shares + } + + output "recovery_keys_b64" { + description = "The Vault cluster recovery keys b64" + value = step.create_vault_cluster.recovery_keys_b64 + } + + output "recovery_keys_hex" { + description = "The Vault cluster recovery keys hex" + value = step.create_vault_cluster.recovery_keys_hex + } + + output "secondary_seal_key_name" { + description = "The Vault cluster secondary seal key name" + value = step.create_secondary_seal_key.resource_name + } + + output "unseal_keys_b64" { + description = "The Vault cluster unseal keys" + value = step.create_vault_cluster.unseal_keys_b64 + } + + output "unseal_keys_hex" { + description = "The Vault cluster unseal keys hex" + value = step.create_vault_cluster.unseal_keys_hex + } +} diff --git a/enos/enos-scenario-smoke.hcl b/enos/enos-scenario-smoke.hcl new file mode 100644 index 0000000..4f68c92 --- /dev/null +++ b/enos/enos-scenario-smoke.hcl @@ -0,0 +1,420 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +scenario "smoke" { + matrix { + arch = ["amd64", "arm64"] + artifact_source = ["local", "crt", "artifactory"] + artifact_type = ["bundle", "package"] + backend = ["consul", "raft"] + consul_version = ["1.12.9", "1.13.9", "1.14.9", "1.15.5", "1.16.1"] + distro = ["ubuntu", "rhel"] + edition = ["ce", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + seal = ["awskms", "shamir"] + seal_ha_beta = ["true", "false"] + + # Our local builder always creates bundles + exclude { + artifact_source = ["local"] + artifact_type = ["package"] + } + + # HSM and FIPS 140-2 are only supported on amd64 + exclude { + arch = ["arm64"] + edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + } + } + + terraform_cli = terraform_cli.default + terraform = terraform.default + providers = [ + provider.aws.default, + provider.enos.ubuntu, + provider.enos.rhel + ] + + locals { + artifact_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null + enos_provider = { + rhel = provider.enos.rhel + ubuntu = provider.enos.ubuntu + } + manage_service = matrix.artifact_type == "bundle" + vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : global.vault_install_dir_packages[matrix.distro] + } + + step "get_local_metadata" { + skip_step = matrix.artifact_source != "local" + module = module.get_local_metadata + } + + step "build_vault" { + module = "build_${matrix.artifact_source}" + + variables { + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] + artifact_path = local.artifact_path + goarch = matrix.arch + goos = "linux" + artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null + artifactory_repo = matrix.artifact_source == "artifactory" ? var.artifactory_repo : null + artifactory_username = matrix.artifact_source == "artifactory" ? var.artifactory_username : null + artifactory_token = matrix.artifact_source == "artifactory" ? var.artifactory_token : null + arch = matrix.artifact_source == "artifactory" ? matrix.arch : null + product_version = var.vault_product_version + artifact_type = matrix.artifact_type + distro = matrix.artifact_source == "artifactory" ? matrix.distro : null + edition = matrix.artifact_source == "artifactory" ? matrix.edition : null + revision = var.vault_revision + } + } + + step "ec2_info" { + module = module.ec2_info + } + + step "create_vpc" { + module = module.create_vpc + + variables { + common_tags = global.tags + } + } + + step "create_seal_key" { + module = "seal_key_${matrix.seal}" + + variables { + cluster_id = step.create_vpc.cluster_id + common_tags = global.tags + } + } + + // This step reads the contents of the backend license if we're using a Consul backend and + // the edition is "ent". + step "read_backend_license" { + skip_step = matrix.backend == "raft" || var.backend_edition == "ce" + module = module.read_license + + variables { + file_name = global.backend_license_path + } + } + + step "read_vault_license" { + skip_step = matrix.edition == "ce" + module = module.read_license + + variables { + file_name = global.vault_license_path + } + } + + step "create_vault_cluster_targets" { + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_tag_key = global.vault_tag_key + common_tags = global.tags + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_vault_cluster_backend_targets" { + module = matrix.backend == "consul" ? module.target_ec2_instances : module.target_ec2_shim + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"] + cluster_tag_key = global.backend_tag_key + common_tags = global.tags + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_backend_cluster" { + module = "backend_${matrix.backend}" + depends_on = [ + step.create_vault_cluster_backend_targets + ] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_name = step.create_vault_cluster_backend_targets.cluster_name + cluster_tag_key = global.backend_tag_key + license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + release = { + edition = var.backend_edition + version = matrix.consul_version + } + target_hosts = step.create_vault_cluster_backend_targets.hosts + } + } + + step "create_vault_cluster" { + module = module.vault_cluster + depends_on = [ + step.create_backend_cluster, + step.build_vault, + step.create_vault_cluster_targets + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + backend_cluster_name = step.create_vault_cluster_backend_targets.cluster_name + backend_cluster_tag_key = global.backend_tag_key + cluster_name = step.create_vault_cluster_targets.cluster_name + consul_license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + consul_release = matrix.backend == "consul" ? { + edition = var.backend_edition + version = matrix.consul_version + } : null + enable_audit_devices = var.vault_enable_audit_devices + install_dir = local.vault_install_dir + license = matrix.edition != "ce" ? step.read_vault_license.license : null + local_artifact_path = local.artifact_path + manage_service = local.manage_service + packages = concat(global.packages, global.distro_packages[matrix.distro]) + seal_ha_beta = matrix.seal_ha_beta + seal_key_name = step.create_seal_key.resource_name + seal_type = matrix.seal + storage_backend = matrix.backend + target_hosts = step.create_vault_cluster_targets.hosts + } + } + + // Wait for our cluster to elect a leader + step "wait_for_leader" { + module = module.vault_wait_for_leader + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + timeout = 120 # seconds + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "get_vault_cluster_ips" { + module = module.vault_get_cluster_ips + depends_on = [step.wait_for_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_vault_version" { + module = module.vault_verify_version + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_vault_cluster_targets.hosts + vault_edition = matrix.edition + vault_install_dir = local.vault_install_dir + vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version + vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision + vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_vault_unsealed" { + module = module.vault_verify_unsealed + depends_on = [step.wait_for_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster_targets.hosts + } + } + + step "verify_write_test_data" { + module = module.vault_verify_write_data + depends_on = [ + step.create_vault_cluster, + step.get_vault_cluster_ips + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + leader_public_ip = step.get_vault_cluster_ips.leader_public_ip + leader_private_ip = step.get_vault_cluster_ips.leader_private_ip + vault_instances = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_raft_auto_join_voter" { + skip_step = matrix.backend != "raft" + module = module.vault_verify_raft_auto_join_voter + depends_on = [ + step.create_vault_cluster, + step.get_vault_cluster_ips + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster_targets.hosts + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_replication" { + module = module.vault_verify_replication + depends_on = [ + step.create_vault_cluster, + step.get_vault_cluster_ips + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_edition = matrix.edition + vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster_targets.hosts + } + } + + step "verify_read_test_data" { + module = module.vault_verify_read_data + depends_on = [ + step.verify_write_test_data, + step.verify_replication + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + node_public_ips = step.get_vault_cluster_ips.follower_public_ips + vault_install_dir = local.vault_install_dir + } + } + + step "verify_ui" { + module = module.vault_verify_ui + depends_on = [ + step.create_vault_cluster, + step.get_vault_cluster_ips + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_vault_cluster_targets.hosts + } + } + + output "audit_device_file_path" { + description = "The file path for the file audit device, if enabled" + value = step.create_vault_cluster.audit_device_file_path + } + + output "cluster_name" { + description = "The Vault cluster name" + value = step.create_vault_cluster.cluster_name + } + + output "hosts" { + description = "The Vault cluster target hosts" + value = step.create_vault_cluster.target_hosts + } + + output "private_ips" { + description = "The Vault cluster private IPs" + value = step.create_vault_cluster.private_ips + } + + output "public_ips" { + description = "The Vault cluster public IPs" + value = step.create_vault_cluster.public_ips + } + + output "root_token" { + description = "The Vault cluster root token" + value = step.create_vault_cluster.root_token + } + + output "recovery_key_shares" { + description = "The Vault cluster recovery key shares" + value = step.create_vault_cluster.recovery_key_shares + } + + output "recovery_keys_b64" { + description = "The Vault cluster recovery keys b64" + value = step.create_vault_cluster.recovery_keys_b64 + } + + output "recovery_keys_hex" { + description = "The Vault cluster recovery keys hex" + value = step.create_vault_cluster.recovery_keys_hex + } + + output "seal_key_name" { + description = "The Vault cluster seal key name" + value = step.create_seal_key.name + } + + output "unseal_keys_b64" { + description = "The Vault cluster unseal keys" + value = step.create_vault_cluster.unseal_keys_b64 + } + + output "unseal_keys_hex" { + description = "The Vault cluster unseal keys hex" + value = step.create_vault_cluster.unseal_keys_hex + } +} diff --git a/enos/enos-scenario-ui.hcl b/enos/enos-scenario-ui.hcl new file mode 100644 index 0000000..a234ad7 --- /dev/null +++ b/enos/enos-scenario-ui.hcl @@ -0,0 +1,295 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +scenario "ui" { + matrix { + edition = ["ce", "ent"] + backend = ["consul", "raft"] + seal_ha_beta = ["true", "false"] + } + + terraform_cli = terraform_cli.default + terraform = terraform.default + providers = [ + provider.aws.default, + provider.enos.ubuntu + ] + + locals { + arch = "amd64" + artifact_type = "bundle" + backend_license_path = abspath(var.backend_license_path != null ? var.backend_license_path : joinpath(path.root, "./support/consul.hclic")) + backend_tag_key = "VaultStorage" + build_tags = { + "ce" = ["ui"] + "ent" = ["ui", "enterprise", "ent"] + } + bundle_path = abspath(var.vault_artifact_path) + distro = "ubuntu" + consul_version = "1.16.1" + seal = "awskms" + tags = merge({ + "Project Name" : var.project_name + "Project" : "Enos", + "Environment" : "ci" + }, var.tags) + vault_install_dir_packages = { + rhel = "/bin" + ubuntu = "/usr/bin" + } + vault_install_dir = var.vault_install_dir + vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic")) + vault_tag_key = "Type" // enos_vault_start expects Type as the tag key + ui_test_filter = var.ui_test_filter != null && try(trimspace(var.ui_test_filter), "") != "" ? var.ui_test_filter : (matrix.edition == "ce") ? "!enterprise" : null + } + + step "build_vault" { + module = module.build_local + + variables { + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : local.build_tags[matrix.edition] + bundle_path = local.bundle_path + goarch = local.arch + goos = "linux" + product_version = var.vault_product_version + artifact_type = local.artifact_type + revision = var.vault_revision + } + } + + step "ec2_info" { + module = module.ec2_info + } + + step "create_vpc" { + module = module.create_vpc + + variables { + common_tags = local.tags + } + } + + step "create_seal_key" { + module = "seal_key_${local.seal}" + + variables { + cluster_id = step.create_vpc.cluster_id + common_tags = global.tags + } + } + + // This step reads the contents of the backend license if we're using a Consul backend and + // the edition is "ent". + step "read_backend_license" { + skip_step = matrix.backend == "raft" || var.backend_edition == "ce" + module = module.read_license + + variables { + file_name = local.backend_license_path + } + } + + step "read_vault_license" { + skip_step = matrix.edition == "ce" + module = module.read_license + + variables { + file_name = local.vault_license_path + } + } + + step "create_vault_cluster_targets" { + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + ami_id = step.ec2_info.ami_ids[local.arch][local.distro][var.ubuntu_distro_version] + cluster_tag_key = local.vault_tag_key + common_tags = local.tags + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_vault_cluster_backend_targets" { + module = matrix.backend == "consul" ? module.target_ec2_instances : module.target_ec2_shim + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"] + cluster_tag_key = local.backend_tag_key + common_tags = local.tags + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_backend_cluster" { + module = "backend_${matrix.backend}" + depends_on = [ + step.create_vault_cluster_backend_targets, + ] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_name = step.create_vault_cluster_backend_targets.cluster_name + cluster_tag_key = local.backend_tag_key + license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + release = { + edition = var.backend_edition + version = local.consul_version + } + target_hosts = step.create_vault_cluster_backend_targets.hosts + } + } + + step "create_vault_cluster" { + module = module.vault_cluster + depends_on = [ + step.create_backend_cluster, + step.build_vault, + step.create_vault_cluster_targets + ] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + backend_cluster_name = step.create_vault_cluster_backend_targets.cluster_name + backend_cluster_tag_key = local.backend_tag_key + cluster_name = step.create_vault_cluster_targets.cluster_name + consul_license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + consul_release = matrix.backend == "consul" ? { + edition = var.backend_edition + version = local.consul_version + } : null + enable_audit_devices = var.vault_enable_audit_devices + install_dir = local.vault_install_dir + license = matrix.edition != "ce" ? step.read_vault_license.license : null + local_artifact_path = local.bundle_path + packages = global.distro_packages["ubuntu"] + seal_ha_beta = matrix.seal_ha_beta + seal_key_name = step.create_seal_key.resource_name + seal_type = local.seal + storage_backend = matrix.backend + target_hosts = step.create_vault_cluster_targets.hosts + } + } + + // Wait for our cluster to elect a leader + step "wait_for_leader" { + module = module.vault_wait_for_leader + depends_on = [step.create_vault_cluster] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + timeout = 120 # seconds + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "test_ui" { + module = module.vault_test_ui + depends_on = [step.wait_for_leader] + + variables { + vault_addr = step.create_vault_cluster_targets.hosts[0].public_ip + vault_root_token = step.create_vault_cluster.root_token + vault_unseal_keys = step.create_vault_cluster.recovery_keys_b64 + vault_recovery_threshold = step.create_vault_cluster.recovery_threshold + ui_test_filter = local.ui_test_filter + } + } + + output "audit_device_file_path" { + description = "The file path for the file audit device, if enabled" + value = step.create_vault_cluster.audit_device_file_path + } + + output "cluster_name" { + description = "The Vault cluster name" + value = step.create_vault_cluster.cluster_name + } + + output "hosts" { + description = "The Vault cluster target hosts" + value = step.create_vault_cluster.target_hosts + } + + output "private_ips" { + description = "The Vault cluster private IPs" + value = step.create_vault_cluster.private_ips + } + + output "public_ips" { + description = "The Vault cluster public IPs" + value = step.create_vault_cluster.public_ips + } + + output "recovery_key_shares" { + description = "The Vault cluster recovery key shares" + value = step.create_vault_cluster.recovery_key_shares + } + + output "recovery_keys_b64" { + description = "The Vault cluster recovery keys b64" + value = step.create_vault_cluster.recovery_keys_b64 + } + + output "recovery_keys_hex" { + description = "The Vault cluster recovery keys hex" + value = step.create_vault_cluster.recovery_keys_hex + } + + output "root_token" { + description = "The Vault cluster root token" + value = step.create_vault_cluster.root_token + } + + output "seal_key_name" { + description = "The Vault cluster seal key name" + value = step.create_seal_key.resource_name + } + + output "ui_test_environment" { + value = step.test_ui.ui_test_environment + description = "The environment variables that are required in order to run the test:enos yarn target" + } + + output "ui_test_stderr" { + description = "The stderr of the ui tests that ran" + value = step.test_ui.ui_test_stderr + } + + output "ui_test_stdout" { + description = "The stdout of the ui tests that ran" + value = step.test_ui.ui_test_stdout + } + + output "unseal_keys_b64" { + description = "The Vault cluster unseal keys" + value = step.create_vault_cluster.unseal_keys_b64 + } + + output "unseal_keys_hex" { + description = "The Vault cluster unseal keys hex" + value = step.create_vault_cluster.unseal_keys_hex + } +} diff --git a/enos/enos-scenario-upgrade.hcl b/enos/enos-scenario-upgrade.hcl new file mode 100644 index 0000000..569264d --- /dev/null +++ b/enos/enos-scenario-upgrade.hcl @@ -0,0 +1,481 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +scenario "upgrade" { + matrix { + arch = ["amd64", "arm64"] + artifact_source = ["local", "crt", "artifactory"] + artifact_type = ["bundle", "package"] + backend = ["consul", "raft"] + consul_version = ["1.14.9", "1.15.5", "1.16.1"] + distro = ["ubuntu", "rhel"] + edition = ["ce", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + // NOTE: when backporting the initial version make sure we don't include initial versions that + // are a higher minor version that our release candidate. Also, prior to 1.11.x the + // /v1/sys/seal-status API has known issues that could cause this scenario to fail when using + // those earlier versions. + initial_version = ["1.11.12", "1.12.11", "1.13.6", "1.14.2"] + seal = ["awskms", "shamir"] + seal_ha_beta = ["true", "false"] + + # Our local builder always creates bundles + exclude { + artifact_source = ["local"] + artifact_type = ["package"] + } + + # HSM and FIPS 140-2 are only supported on amd64 + exclude { + arch = ["arm64"] + edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + } + + # FIPS 140-2 editions began at 1.10 + exclude { + edition = ["ent.fips1402", "ent.hsm.fips1402"] + initial_version = ["1.8.12", "1.9.10"] + } + } + + terraform_cli = terraform_cli.default + terraform = terraform.default + providers = [ + provider.aws.default, + provider.enos.ubuntu, + provider.enos.rhel + ] + + locals { + artifact_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null + enos_provider = { + rhel = provider.enos.rhel + ubuntu = provider.enos.ubuntu + } + manage_service = matrix.artifact_type == "bundle" + vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : global.vault_install_dir_packages[matrix.distro] + } + + step "get_local_metadata" { + skip_step = matrix.artifact_source != "local" + module = module.get_local_metadata + } + + # This step gets/builds the upgrade artifact that we will upgrade to + step "build_vault" { + module = "build_${matrix.artifact_source}" + + variables { + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] + artifact_path = local.artifact_path + goarch = matrix.arch + goos = "linux" + artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null + artifactory_repo = matrix.artifact_source == "artifactory" ? var.artifactory_repo : null + artifactory_username = matrix.artifact_source == "artifactory" ? var.artifactory_username : null + artifactory_token = matrix.artifact_source == "artifactory" ? var.artifactory_token : null + arch = matrix.artifact_source == "artifactory" ? matrix.arch : null + product_version = var.vault_product_version + artifact_type = matrix.artifact_type + distro = matrix.artifact_source == "artifactory" ? matrix.distro : null + edition = matrix.artifact_source == "artifactory" ? matrix.edition : null + revision = var.vault_revision + } + } + + step "ec2_info" { + module = module.ec2_info + } + + step "create_vpc" { + module = module.create_vpc + + variables { + common_tags = global.tags + } + } + + step "create_seal_key" { + module = "seal_key_${matrix.seal}" + + variables { + cluster_id = step.create_vpc.cluster_id + common_tags = global.tags + } + } + + // This step reads the contents of the backend license if we're using a Consul backend and + // the edition is "ent". + step "read_backend_license" { + skip_step = matrix.backend == "raft" || var.backend_edition == "ce" + module = module.read_license + + variables { + file_name = global.backend_license_path + } + } + + step "read_vault_license" { + skip_step = matrix.edition == "ce" + module = module.read_license + + variables { + file_name = global.vault_license_path + } + } + + step "create_vault_cluster_targets" { + module = module.target_ec2_instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]] + cluster_tag_key = global.vault_tag_key + common_tags = global.tags + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_vault_cluster_backend_targets" { + module = matrix.backend == "consul" ? module.target_ec2_instances : module.target_ec2_shim + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"] + cluster_tag_key = global.backend_tag_key + common_tags = global.tags + seal_key_names = step.create_seal_key.resource_names + vpc_id = step.create_vpc.id + } + } + + step "create_backend_cluster" { + module = "backend_${matrix.backend}" + depends_on = [ + step.create_vault_cluster_backend_targets, + ] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_name = step.create_vault_cluster_backend_targets.cluster_name + cluster_tag_key = global.backend_tag_key + license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + release = { + edition = var.backend_edition + version = matrix.consul_version + } + target_hosts = step.create_vault_cluster_backend_targets.hosts + } + } + + step "create_vault_cluster" { + module = module.vault_cluster + depends_on = [ + step.create_backend_cluster, + step.build_vault, + step.create_vault_cluster_targets + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + backend_cluster_name = step.create_vault_cluster_backend_targets.cluster_name + backend_cluster_tag_key = global.backend_tag_key + consul_license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + cluster_name = step.create_vault_cluster_targets.cluster_name + consul_release = matrix.backend == "consul" ? { + edition = var.backend_edition + version = matrix.consul_version + } : null + enable_audit_devices = var.vault_enable_audit_devices + install_dir = local.vault_install_dir + license = matrix.edition != "ce" ? step.read_vault_license.license : null + packages = concat(global.packages, global.distro_packages[matrix.distro]) + release = { + edition = matrix.edition + version = matrix.initial_version + } + seal_ha_beta = matrix.seal_ha_beta + seal_key_name = step.create_seal_key.resource_name + seal_type = matrix.seal + storage_backend = matrix.backend + target_hosts = step.create_vault_cluster_targets.hosts + } + } + + step "get_vault_cluster_ips" { + module = module.vault_get_cluster_ips + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_write_test_data" { + module = module.vault_verify_write_data + depends_on = [ + step.create_vault_cluster, + step.get_vault_cluster_ips, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + leader_public_ip = step.get_vault_cluster_ips.leader_public_ip + leader_private_ip = step.get_vault_cluster_ips.leader_private_ip + vault_instances = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + # This step upgrades the Vault cluster to the var.vault_product_version + # by getting a bundle or package of that version from the matrix.artifact_source + step "upgrade_vault" { + module = module.vault_upgrade + depends_on = [ + step.create_vault_cluster, + step.verify_write_test_data, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_api_addr = "http://localhost:8200" + vault_instances = step.create_vault_cluster_targets.hosts + vault_local_artifact_path = local.artifact_path + vault_artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + vault_install_dir = local.vault_install_dir + vault_unseal_keys = matrix.seal == "shamir" ? step.create_vault_cluster.unseal_keys_hex : null + vault_seal_type = matrix.seal + } + } + + // Wait for our upgraded cluster to elect a leader + step "wait_for_leader_after_upgrade" { + module = module.vault_wait_for_leader + depends_on = [ + step.create_vault_cluster, + step.upgrade_vault, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + timeout = 120 # seconds + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "get_updated_vault_cluster_ips" { + module = module.vault_get_cluster_ips + depends_on = [ + step.create_vault_cluster, + step.upgrade_vault, + step.wait_for_leader_after_upgrade, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_vault_version" { + module = module.vault_verify_version + depends_on = [ + step.get_updated_vault_cluster_ips, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_vault_cluster_targets.hosts + vault_edition = matrix.edition + vault_install_dir = local.vault_install_dir + vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version + vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision + vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_vault_unsealed" { + module = module.vault_verify_unsealed + depends_on = [ + step.get_updated_vault_cluster_ips, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + } + } + + step "verify_read_test_data" { + module = module.vault_verify_read_data + depends_on = [ + step.get_updated_vault_cluster_ips, + step.verify_write_test_data, + step.verify_vault_unsealed + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + node_public_ips = step.get_updated_vault_cluster_ips.follower_public_ips + vault_install_dir = local.vault_install_dir + } + } + + step "verify_raft_auto_join_voter" { + skip_step = matrix.backend != "raft" + module = module.vault_verify_raft_auto_join_voter + depends_on = [ + step.get_updated_vault_cluster_ips, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster_targets.hosts + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_replication" { + module = module.vault_verify_replication + depends_on = [ + step.get_updated_vault_cluster_ips, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_edition = matrix.edition + vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster_targets.hosts + } + } + + step "verify_ui" { + module = module.vault_verify_ui + depends_on = [ + step.get_updated_vault_cluster_ips, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_vault_cluster_targets.hosts + } + } + + output "audit_device_file_path" { + description = "The file path for the file audit device, if enabled" + value = step.create_vault_cluster.audit_device_file_path + } + + output "cluster_name" { + description = "The Vault cluster name" + value = step.create_vault_cluster.cluster_name + } + + output "hosts" { + description = "The Vault cluster target hosts" + value = step.create_vault_cluster.target_hosts + } + + output "private_ips" { + description = "The Vault cluster private IPs" + value = step.create_vault_cluster.private_ips + } + + output "public_ips" { + description = "The Vault cluster public IPs" + value = step.create_vault_cluster.public_ips + } + + output "root_token" { + description = "The Vault cluster root token" + value = step.create_vault_cluster.root_token + } + + output "recovery_key_shares" { + description = "The Vault cluster recovery key shares" + value = step.create_vault_cluster.recovery_key_shares + } + + output "recovery_keys_b64" { + description = "The Vault cluster recovery keys b64" + value = step.create_vault_cluster.recovery_keys_b64 + } + + output "recovery_keys_hex" { + description = "The Vault cluster recovery keys hex" + value = step.create_vault_cluster.recovery_keys_hex + } + + output "seal_key_name" { + description = "The Vault cluster seal key name" + value = step.create_seal_key.resource_name + } + + output "unseal_keys_b64" { + description = "The Vault cluster unseal keys" + value = step.create_vault_cluster.unseal_keys_b64 + } + + output "unseal_keys_hex" { + description = "The Vault cluster unseal keys hex" + value = step.create_vault_cluster.unseal_keys_hex + } +} diff --git a/enos/enos-terraform.hcl b/enos/enos-terraform.hcl new file mode 100644 index 0000000..e945f3f --- /dev/null +++ b/enos/enos-terraform.hcl @@ -0,0 +1,34 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform_cli "default" { + plugin_cache_dir = var.terraform_plugin_cache_dir != null ? abspath(var.terraform_plugin_cache_dir) : null + + credentials "app.terraform.io" { + token = var.tfc_api_token + } + + /* + provider_installation { + dev_overrides = { + "app.terraform.io/hashicorp-qti/enos" = abspath("../../enos-provider/dist") + } + direct {} + } + */ +} + +terraform "default" { + required_version = ">= 1.2.0" + + required_providers { + aws = { + source = "hashicorp/aws" + } + + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + version = ">= 0.4.0" + } + } +} diff --git a/enos/enos-variables.hcl b/enos/enos-variables.hcl new file mode 100644 index 0000000..df5e62b --- /dev/null +++ b/enos/enos-variables.hcl @@ -0,0 +1,192 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +variable "artifactory_username" { + type = string + description = "The username to use when testing an artifact from artifactory" + default = null + sensitive = true +} + +variable "artifactory_token" { + type = string + description = "The token to use when authenticating to artifactory" + default = null + sensitive = true +} + +variable "artifactory_host" { + type = string + description = "The artifactory host to search for vault artifacts" + default = "https://artifactory.hashicorp.engineering/artifactory" +} + +variable "artifactory_repo" { + type = string + description = "The artifactory repo to search for vault artifacts" + default = "hashicorp-crt-stable-local*" +} + +variable "aws_region" { + description = "The AWS region where we'll create infrastructure" + type = string + default = "us-east-1" +} + +variable "aws_ssh_keypair_name" { + description = "The AWS keypair to use for SSH" + type = string + default = "enos-ci-ssh-key" +} + +variable "aws_ssh_private_key_path" { + description = "The path to the AWS keypair private key" + type = string + default = "./support/private_key.pem" +} + +variable "backend_edition" { + description = "The backend release edition if applicable" + type = string + default = "ce" // or "ent" +} + +variable "backend_instance_type" { + description = "The instance type to use for the Vault backend. Must be arm64/nitro compatible" + type = string + default = "t4g.small" +} + +variable "backend_license_path" { + description = "The license for the backend if applicable (Consul Enterprise)" + type = string + default = null +} + +variable "backend_log_level" { + description = "The server log level for the backend. Supported values include 'trace', 'debug', 'info', 'warn', 'error'" + type = string + default = "trace" +} + +variable "project_name" { + description = "The description of the project" + type = string + default = "vault-enos-integration" +} + +variable "rhel_distro_version" { + description = "The version of RHEL to use" + type = string + default = "9.1" // or "8.8" +} + +variable "tags" { + description = "Tags that will be applied to infrastructure resources that support tagging" + type = map(string) + default = null +} + +variable "terraform_plugin_cache_dir" { + description = "The directory to cache Terraform modules and providers" + type = string + default = null +} + +variable "tfc_api_token" { + description = "The Terraform Cloud QTI Organization API token. This is used to download the enos Terraform provider." + type = string + sensitive = true +} + +variable "ubuntu_distro_version" { + description = "The version of ubuntu to use" + type = string + default = "22.04" // or "20.04", "18.04" +} + +variable "ui_test_filter" { + type = string + description = "A test filter to limit the ui tests to execute. Will be appended to the ember test command as '-f=\"\"'" + default = null +} + +variable "ui_run_tests" { + type = bool + description = "Whether to run the UI tests or not. If set to false a cluster will be created but no tests will be run" + default = true +} + +variable "vault_artifact_type" { + description = "The type of Vault artifact to use when installing Vault from artifactory. It should be 'package' for .deb or # .rpm package and 'bundle' for .zip bundles" + default = "bundle" +} + +variable "vault_artifact_path" { + description = "Path to CRT generated or local vault.zip bundle" + type = string + default = "/tmp/vault.zip" +} + +variable "vault_build_date" { + description = "The build date for Vault artifact" + type = string + default = "" +} + +variable "vault_enable_audit_devices" { + description = "If true every audit device will be enabled" + type = bool + default = true +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" + default = "/opt/vault/bin" +} + +variable "vault_instance_count" { + description = "How many instances to create for the Vault cluster" + type = number + default = 3 +} + +variable "vault_license_path" { + description = "The path to a valid Vault enterprise edition license. This is only required for non-ce editions" + type = string + default = null +} + +variable "vault_local_build_tags" { + description = "The build tags to pass to the Go compiler for builder:local variants" + type = list(string) + default = null +} + +variable "vault_log_level" { + description = "The server log level for Vault logs. Supported values (in order of detail) are trace, debug, info, warn, and err." + type = string + default = "trace" +} + +variable "vault_product_version" { + description = "The version of Vault we are testing" + type = string + default = null +} + +variable "vault_revision" { + description = "The git sha of Vault artifact we are testing" + type = string + default = null +} + +variable "vault_upgrade_initial_release" { + description = "The Vault release to deploy before upgrading" + default = { + edition = "ce" + // Vault 1.10.5 has a known issue with retry_join. + version = "1.10.4" + } +} diff --git a/enos/enos.vars.hcl b/enos/enos.vars.hcl new file mode 100644 index 0000000..c7c06f9 --- /dev/null +++ b/enos/enos.vars.hcl @@ -0,0 +1,119 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +# artifactory_username is the username to use when testing an artifact stored in artfactory. +# artifactory_username = "yourname@hashicorp.com" + +# artifactory_token is the token to use when authenticating to artifactory. +# artifactory_token = "yourtoken" + +# artifactory_host is the artifactory host to search for vault artifacts. +# artifactory_host = "https://artifactory.hashicorp.engineering/artifactory" + +# artifactory_repo is the artifactory repo to search for vault artifacts. +# artifactory_repo = "hashicorp-crt-stable-local*" + +# aws_region is the AWS region where we'll create infrastructure +# for the smoke scenario +# aws_region = "us-east-1" + +# aws_ssh_keypair_name is the AWS keypair to use for SSH +# aws_ssh_keypair_name = "enos-ci-ssh-key" + +# aws_ssh_private_key_path is the path to the AWS keypair private key +# aws_ssh_private_key_path = "./support/private_key.pem" + +# backend_edition is the backend (consul) release edition if applicable to the scenario. +# backend_edition = "ce" + +# backend_license_path is the license for the backend if applicable (Consul Enterprise)". +# backend_license_path = "./support/consul.hclic" + +# backend_log_level is the server log level for the backend. Supported values include 'trace', +# 'debug', 'info', 'warn', 'error'" +# backend_log_level = "trace" + +# backend_instance_type is the instance type to use for the Vault backend. Must support arm64 +# backend_instance_type = "t4g.small" + +# project_name is the description of the project. It will often be used to tag infrastructure +# resources. +# project_name = "vault-enos-integration" + +# rhel_distro_version is the version of RHEL to use for "distro:rhel" variants. +# rhel_distro_version = "9.1" // or "8.8" + +# tags are a map of tags that will be applied to infrastructure resources that +# support tagging. +# tags = { "Project Name" : "Vault", "Something Cool" : "Value" } + +# terraform_plugin_cache_dir is the directory to cache Terraform modules and providers. +# It must exist. +# terraform_plugin_cache_dir = "/Users//.terraform/plugin-cache-dir + +# tfc_api_token is the Terraform Cloud QTI Organization API token. We need this +# to download the enos Terraform provider and the enos Terraform modules. +# tfc_api_token = "XXXXX.atlasv1.XXXXX..." + +# ui_test_filter is the test filter to limit the ui tests to execute for the ui scenario. It will +# be appended to the ember test command as '-f=\"\"'. +# ui_test_filter = "sometest" + +# ui_run_tests sets whether to run the UI tests or not for the ui scenario. If set to false a +# cluster will be created but no tests will be run. +# ui_run_tests = true + +# ubuntu_distro_version is the version of ubuntu to use for "distro:ubuntu" variants +# ubuntu_distro_version = "22.04" // or "20.04", "18.04" + +# vault_artifact_path is the path to CRT generated or local vault.zip bundle. When +# using the "builder:local" variant a bundle will be built from the current branch. +# In CI it will use the output of the build workflow. +# vault_artifact_path = "./dist/vault.zip" + +# vault_artifact_type is the type of Vault artifact to use when installing Vault from artifactory. +# It should be 'package' for .deb or # .rpm package and 'bundle' for .zip bundles" +# vault_artifact_type = "bundle" + +# vault_build_date is the build date for Vault artifact. Some validations will require the binary build +# date to match" +# vault_build_date = "2023-07-07T14:06:37Z" // make ci-get-date for example + +# vault_enable_audit_devices sets whether or not to enable every audit device. It true +# a file audit device will be enabled at the path /var/log/vault_audit.log, the syslog +# audit device will be enabled, and a socket audit device connecting to 127.0.0.1:9090 +# will be enabled. The netcat program is run in listening mode to provide an endpoint +# that the socket audit device can connect to. +# vault_enable_audit_devices = true + +# vault_install_dir is the directory where the vault binary will be installed on +# the remote machines. +# vault_install_dir = "/opt/vault/bin" + +# vault_local_binary_path is the path of the local binary that we're upgrading to. +# vault_local_binary_path = "./support/vault" + +# vault_instance_type is the instance type to use for the Vault backend +# vault_instance_type = "t3.small" + +# vault_instance_count is how many instances to create for the Vault cluster. +# vault_instance_count = 3 + +# vault_license_path is the path to a valid Vault enterprise edition license. +# This is only required for non-ce editions" +# vault_license_path = "./support/vault.hclic" + +# vault_local_build_tags override the build tags we pass to the Go compiler for builder:local variants. +# vault_local_build_tags = ["ui", "ent"] + +# vault_log_level is the server log level for Vault logs. Supported values (in order of detail) are +# trace, debug, info, warn, and err." +# vault_log_level = "trace" + +# vault_product_version is the version of Vault we are testing. Some validations will expect the vault +# binary and cluster to report this version. +# vault_product_version = "1.15.0" + +# vault_revision is the git sha of Vault artifact we are testing. Some validations will expect the vault +# binary and cluster to report this revision. +# vault_revision = "df733361af26f8bb29b63704168bbc5ab8d083de" diff --git a/enos/k8s/enos-modules-k8s.hcl b/enos/k8s/enos-modules-k8s.hcl new file mode 100644 index 0000000..7651840 --- /dev/null +++ b/enos/k8s/enos-modules-k8s.hcl @@ -0,0 +1,52 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +module "create_kind_cluster" { + source = "../modules/local_kind_cluster" +} + +module "load_docker_image" { + source = "../modules/load_docker_image" +} + +module "k8s_deploy_vault" { + source = "../modules/k8s_deploy_vault" + + vault_instance_count = var.vault_instance_count +} + +module "k8s_verify_build_date" { + source = "../modules/k8s_vault_verify_build_date" + + vault_instance_count = var.vault_instance_count +} + +module "k8s_verify_replication" { + source = "../modules/k8s_vault_verify_replication" + + vault_instance_count = var.vault_instance_count +} + +module "k8s_verify_ui" { + source = "../modules/k8s_vault_verify_ui" + + vault_instance_count = var.vault_instance_count +} + +module "k8s_verify_version" { + source = "../modules/k8s_vault_verify_version" + + vault_instance_count = var.vault_instance_count + vault_product_version = var.vault_product_version + vault_product_revision = var.vault_product_revision +} + +module "k8s_verify_write_data" { + source = "../modules/k8s_vault_verify_write_data" + + vault_instance_count = var.vault_instance_count +} + +module "read_license" { + source = "../modules/read_license" +} diff --git a/enos/k8s/enos-providers-k8s.hcl b/enos/k8s/enos-providers-k8s.hcl new file mode 100644 index 0000000..7e3d7a7 --- /dev/null +++ b/enos/k8s/enos-providers-k8s.hcl @@ -0,0 +1,10 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "enos" "default" {} + +provider "helm" "default" { + kubernetes { + config_path = abspath(joinpath(path.root, "kubeconfig")) + } +} diff --git a/enos/k8s/enos-scenario-k8s.hcl b/enos/k8s/enos-scenario-k8s.hcl new file mode 100644 index 0000000..6dbd645 --- /dev/null +++ b/enos/k8s/enos-scenario-k8s.hcl @@ -0,0 +1,143 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +scenario "k8s" { + matrix { + edition = ["ce", "ent"] + } + + terraform_cli = terraform_cli.default + terraform = terraform.k8s + + providers = [ + provider.enos.default, + provider.helm.default, + ] + + locals { + image_path = abspath(var.vault_docker_image_archive) + + image_repo = var.vault_image_repository != null ? var.vault_image_repository : matrix.edition == "ce" ? "hashicorp/vault" : "hashicorp/vault-enterprise" + image_tag = replace(var.vault_product_version, "+ent", "-ent") + + // The additional '-0' is required in the constraint since without it, the semver function will + // only compare the non-pre-release parts (Major.Minor.Patch) of the version and the constraint, + // which can lead to unexpected results. + version_includes_build_date = semverconstraint(var.vault_product_version, ">=1.11.0-0") + } + + step "read_license" { + skip_step = matrix.edition == "ce" + module = module.read_license + + variables { + file_name = abspath(joinpath(path.root, "../support/vault.hclic")) + } + } + + step "create_kind_cluster" { + module = module.create_kind_cluster + + variables { + kubeconfig_path = abspath(joinpath(path.root, "kubeconfig")) + } + } + + step "load_docker_image" { + module = module.load_docker_image + + variables { + cluster_name = step.create_kind_cluster.cluster_name + image = local.image_repo + tag = local.image_tag + archive = var.vault_docker_image_archive + } + + depends_on = [step.create_kind_cluster] + } + + step "deploy_vault" { + module = module.k8s_deploy_vault + + variables { + image_tag = step.load_docker_image.tag + context_name = step.create_kind_cluster.context_name + image_repository = step.load_docker_image.repository + kubeconfig_base64 = step.create_kind_cluster.kubeconfig_base64 + vault_edition = matrix.edition + vault_log_level = var.vault_log_level + ent_license = matrix.edition != "ce" ? step.read_license.license : null + } + + depends_on = [step.load_docker_image, step.create_kind_cluster] + } + + step "verify_build_date" { + skip_step = !local.version_includes_build_date + module = module.k8s_verify_build_date + + variables { + vault_pods = step.deploy_vault.vault_pods + vault_root_token = step.deploy_vault.vault_root_token + kubeconfig_base64 = step.create_kind_cluster.kubeconfig_base64 + context_name = step.create_kind_cluster.context_name + } + + depends_on = [step.deploy_vault] + } + + step "verify_replication" { + module = module.k8s_verify_replication + + variables { + vault_pods = step.deploy_vault.vault_pods + vault_edition = matrix.edition + kubeconfig_base64 = step.create_kind_cluster.kubeconfig_base64 + context_name = step.create_kind_cluster.context_name + } + + depends_on = [step.deploy_vault] + } + + step "verify_ui" { + module = module.k8s_verify_ui + skip_step = matrix.edition == "ce" + + variables { + vault_pods = step.deploy_vault.vault_pods + kubeconfig_base64 = step.create_kind_cluster.kubeconfig_base64 + context_name = step.create_kind_cluster.context_name + } + + depends_on = [step.deploy_vault] + } + + step "verify_version" { + module = module.k8s_verify_version + + variables { + vault_pods = step.deploy_vault.vault_pods + vault_root_token = step.deploy_vault.vault_root_token + vault_edition = matrix.edition + kubeconfig_base64 = step.create_kind_cluster.kubeconfig_base64 + context_name = step.create_kind_cluster.context_name + check_build_date = local.version_includes_build_date + vault_build_date = var.vault_build_date + } + + depends_on = [step.deploy_vault] + } + + step "verify_write_data" { + module = module.k8s_verify_write_data + + variables { + vault_pods = step.deploy_vault.vault_pods + vault_root_token = step.deploy_vault.vault_root_token + kubeconfig_base64 = step.create_kind_cluster.kubeconfig_base64 + context_name = step.create_kind_cluster.context_name + } + + depends_on = [step.deploy_vault] + } +} diff --git a/enos/k8s/enos-terraform-k8s.hcl b/enos/k8s/enos-terraform-k8s.hcl new file mode 100644 index 0000000..5b68bca --- /dev/null +++ b/enos/k8s/enos-terraform-k8s.hcl @@ -0,0 +1,25 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform "k8s" { + required_version = ">= 1.2.0" + + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + version = "< 0.4.0" + } + + helm = { + source = "hashicorp/helm" + } + } +} + +terraform_cli "default" { + plugin_cache_dir = var.terraform_plugin_cache_dir != null ? abspath(var.terraform_plugin_cache_dir) : null + + credentials "app.terraform.io" { + token = var.tfc_api_token + } +} diff --git a/enos/k8s/enos-variables-k8s.hcl b/enos/k8s/enos-variables-k8s.hcl new file mode 100644 index 0000000..86bf9d5 --- /dev/null +++ b/enos/k8s/enos-variables-k8s.hcl @@ -0,0 +1,55 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +variable "vault_image_repository" { + description = "The repository for the docker image to load, i.e. hashicorp/vault" + type = string + default = null +} + +variable "vault_log_level" { + description = "The server log level for Vault logs. Supported values (in order of detail) are trace, debug, info, warn, and err." + type = string + default = "info" +} + +variable "vault_product_version" { + description = "The vault product version to test" + type = string + default = null +} + +variable "vault_product_revision" { + type = string + description = "The vault product revision to test" + default = null +} + +variable "vault_docker_image_archive" { + description = "The path to the location of the docker image archive to test" + type = string + default = null +} + +variable "vault_instance_count" { + description = "How many instances to create for the Vault cluster" + type = number + default = 3 +} + +variable "terraform_plugin_cache_dir" { + description = "The directory to cache Terraform modules and providers" + type = string + default = null +} + +variable "tfc_api_token" { + description = "The Terraform Cloud QTI Organization API token." + type = string +} + +variable "vault_build_date" { + description = "The build date for the vault docker image" + type = string + default = "" +} diff --git a/enos/modules/autopilot_upgrade_storageconfig/main.tf b/enos/modules/autopilot_upgrade_storageconfig/main.tf new file mode 100644 index 0000000..68f47d1 --- /dev/null +++ b/enos/modules/autopilot_upgrade_storageconfig/main.tf @@ -0,0 +1,10 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +variable "vault_product_version" {} + +output "storage_addl_config" { + value = { + autopilot_upgrade_version = var.vault_product_version + } +} diff --git a/enos/modules/backend_consul/main.tf b/enos/modules/backend_consul/main.tf new file mode 100644 index 0000000..8092548 --- /dev/null +++ b/enos/modules/backend_consul/main.tf @@ -0,0 +1,56 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_version = ">= 1.2.0" + + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + version = ">= 0.4.4" + } + } +} + +locals { + bin_path = "${var.install_dir}/consul" +} + +resource "enos_bundle_install" "consul" { + for_each = var.target_hosts + + destination = var.install_dir + release = merge(var.release, { product = "consul" }) + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +resource "enos_consul_start" "consul" { + for_each = enos_bundle_install.consul + + bin_path = local.bin_path + data_dir = var.data_dir + config_dir = var.config_dir + config = { + data_dir = var.data_dir + datacenter = "dc1" + retry_join = ["provider=aws tag_key=${var.cluster_tag_key} tag_value=${var.cluster_name}"] + server = true + bootstrap_expect = length(var.target_hosts) + log_level = var.log_level + log_file = var.log_dir + } + license = var.license + unit_name = "consul" + username = "consul" + + transport = { + ssh = { + host = var.target_hosts[each.key].public_ip + } + } +} diff --git a/enos/modules/backend_consul/outputs.tf b/enos/modules/backend_consul/outputs.tf new file mode 100644 index 0000000..fc7a188 --- /dev/null +++ b/enos/modules/backend_consul/outputs.tf @@ -0,0 +1,18 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +output "private_ips" { + description = "Consul cluster target host private_ips" + value = [for host in var.target_hosts : host.private_ip] +} + +output "public_ips" { + description = "Consul cluster target host public_ips" + value = [for host in var.target_hosts : host.public_ip] +} + +output "target_hosts" { + description = "The Consul cluster instances that were created" + + value = var.target_hosts +} diff --git a/enos/modules/backend_consul/variables.tf b/enos/modules/backend_consul/variables.tf new file mode 100644 index 0000000..95e61a5 --- /dev/null +++ b/enos/modules/backend_consul/variables.tf @@ -0,0 +1,76 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +variable "cluster_name" { + type = string + description = "The name of the Consul cluster" + default = null +} + +variable "cluster_tag_key" { + type = string + description = "The tag key for searching for Consul nodes" + default = null +} + +variable "config_dir" { + type = string + description = "The directory where the consul will write config files" + default = "/etc/consul.d" +} + +variable "data_dir" { + type = string + description = "The directory where the consul will store data" + default = "/opt/consul/data" +} + +variable "install_dir" { + type = string + description = "The directory where the consul binary will be installed" + default = "/opt/consul/bin" +} + +variable "license" { + type = string + sensitive = true + description = "The consul enterprise license" + default = null +} + +variable "log_dir" { + type = string + description = "The directory where the consul will write log files" + default = "/var/log/consul.d" +} + +variable "log_level" { + type = string + description = "The consul service log level" + default = "info" + + validation { + condition = contains(["trace", "debug", "info", "warn", "error"], var.log_level) + error_message = "The log_level must be one of 'trace', 'debug', 'info', 'warn', or 'error'." + } +} + +variable "release" { + type = object({ + version = string + edition = string + }) + description = "Consul release version and edition to install from releases.hashicorp.com" + default = { + version = "1.15.3" + edition = "ce" + } +} + +variable "target_hosts" { + description = "The target machines host addresses to use for the consul cluster" + type = map(object({ + private_ip = string + public_ip = string + })) +} diff --git a/enos/modules/backend_raft/main.tf b/enos/modules/backend_raft/main.tf new file mode 100644 index 0000000..0101dc5 --- /dev/null +++ b/enos/modules/backend_raft/main.tf @@ -0,0 +1,70 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +// Shim module to handle the fact that Vault doesn't actually need a backend module when we use raft. +terraform { + required_version = ">= 1.2.0" + + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + version = ">= 0.4.0" + } + } +} + +variable "cluster_name" { + default = null +} + +variable "cluster_tag_key" { + default = null +} + +variable "config_dir" { + default = null +} + +variable "consul_log_level" { + default = null +} + +variable "data_dir" { + default = null +} + +variable "install_dir" { + default = null +} + +variable "license" { + default = null +} + +variable "log_dir" { + default = null +} + +variable "log_level" { + default = null +} + +variable "release" { + default = null +} + +variable "target_hosts" { + default = null +} + +output "private_ips" { + value = [for host in var.target_hosts : host.private_ip] +} + +output "public_ips" { + value = [for host in var.target_hosts : host.public_ip] +} + +output "target_hosts" { + value = var.target_hosts +} diff --git a/enos/modules/build_crt/main.tf b/enos/modules/build_crt/main.tf new file mode 100644 index 0000000..de1fda2 --- /dev/null +++ b/enos/modules/build_crt/main.tf @@ -0,0 +1,49 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +# Shim module since CRT provided things will use the crt_bundle_path variable +variable "bundle_path" { + default = "/tmp/vault.zip" +} + +variable "build_tags" { + default = ["ui"] +} + +variable "goarch" { + type = string + description = "The Go architecture target" + default = "amd64" +} + +variable "goos" { + type = string + description = "The Go OS target" + default = "linux" +} + +variable "artifactory_host" { default = null } +variable "artifactory_repo" { default = null } +variable "artifactory_username" { default = null } +variable "artifactory_token" { default = null } +variable "arch" { + default = null +} +variable "artifact_path" { + default = null +} +variable "artifact_type" { + default = null +} +variable "distro" { + default = null +} +variable "edition" { + default = null +} +variable "revision" { + default = null +} +variable "product_version" { + default = null +} diff --git a/enos/modules/build_local/main.tf b/enos/modules/build_local/main.tf new file mode 100644 index 0000000..9b16863 --- /dev/null +++ b/enos/modules/build_local/main.tf @@ -0,0 +1,63 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +variable "bundle_path" { + type = string + default = "/tmp/vault.zip" +} + +variable "build_tags" { + type = list(string) + description = "The build tags to pass to the Go compiler" +} + +variable "goarch" { + type = string + description = "The Go architecture target" + default = "amd64" +} + +variable "goos" { + type = string + description = "The Go OS target" + default = "linux" +} + +variable "artifactory_host" { default = null } +variable "artifactory_repo" { default = null } +variable "artifactory_username" { default = null } +variable "artifactory_token" { default = null } +variable "arch" { default = null } +variable "artifact_path" { default = null } +variable "artifact_type" { default = null } +variable "distro" { default = null } +variable "edition" { default = null } +variable "revision" { default = null } +variable "product_version" { default = null } + +module "local_metadata" { + source = "../get_local_metadata" +} + +resource "enos_local_exec" "build" { + scripts = [abspath("${path.module}/scripts/build.sh")] + + environment = { + BASE_VERSION = module.local_metadata.version_base + BIN_PATH = "dist" + BUNDLE_PATH = var.bundle_path, + GO_TAGS = join(" ", var.build_tags) + GOARCH = var.goarch + GOOS = var.goos + PRERELEASE_VERSION = module.local_metadata.version_pre + VERSION_METADATA = module.local_metadata.version_meta + } +} diff --git a/enos/modules/build_local/scripts/build.sh b/enos/modules/build_local/scripts/build.sh new file mode 100755 index 0000000..26bf9c1 --- /dev/null +++ b/enos/modules/build_local/scripts/build.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +set -eux -o pipefail + +# Install yarn so we can build the UI +npm install --global yarn || true + +export CGO_ENABLED=0 + +root_dir="$(git rev-parse --show-toplevel)" +pushd "$root_dir" > /dev/null +make ci-build-ui ci-build + +: "${BIN_PATH:="dist"}" +: "${BUNDLE_PATH:=$(git rev-parse --show-toplevel)/vault.zip}" +echo "--> Bundling $BIN_PATH/* to $BUNDLE_PATH" +zip -r -j "$BUNDLE_PATH" "$BIN_PATH/" + +popd > /dev/null diff --git a/enos/modules/create_vpc/main.tf b/enos/modules/create_vpc/main.tf new file mode 100644 index 0000000..f412d74 --- /dev/null +++ b/enos/modules/create_vpc/main.tf @@ -0,0 +1,91 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +data "aws_availability_zones" "available" { + state = "available" + + filter { + name = "zone-name" + values = ["*"] + } +} + +resource "random_string" "cluster_id" { + length = 8 + lower = true + upper = false + numeric = false + special = false +} + +resource "aws_vpc" "vpc" { + cidr_block = var.cidr + enable_dns_hostnames = true + enable_dns_support = true + + tags = merge( + var.common_tags, + { + "Name" = var.name + }, + ) +} + +resource "aws_subnet" "subnet" { + count = length(data.aws_availability_zones.available.names) + vpc_id = aws_vpc.vpc.id + cidr_block = cidrsubnet(var.cidr, 8, count.index) + availability_zone = data.aws_availability_zones.available.names[count.index] + map_public_ip_on_launch = true + + tags = merge( + var.common_tags, + { + "Name" = "${var.name}-subnet-${data.aws_availability_zones.available.names[count.index]}" + }, + ) +} + +resource "aws_internet_gateway" "igw" { + vpc_id = aws_vpc.vpc.id + + tags = merge( + var.common_tags, + { + "Name" = "${var.name}-igw" + }, + ) +} + +resource "aws_route" "igw" { + route_table_id = aws_vpc.vpc.default_route_table_id + destination_cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.igw.id +} + +resource "aws_security_group" "default" { + vpc_id = aws_vpc.vpc.id + + ingress { + description = "allow_ingress_from_all" + from_port = 0 + to_port = 0 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + description = "allow_egress_from_all" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = merge( + var.common_tags, + { + "Name" = "${var.name}-default" + }, + ) +} diff --git a/enos/modules/create_vpc/outputs.tf b/enos/modules/create_vpc/outputs.tf new file mode 100644 index 0000000..1f98477 --- /dev/null +++ b/enos/modules/create_vpc/outputs.tf @@ -0,0 +1,17 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +output "id" { + description = "Created VPC ID" + value = aws_vpc.vpc.id +} + +output "cidr" { + description = "CIDR for whole VPC" + value = var.cidr +} + +output "cluster_id" { + description = "A unique string associated with the VPC" + value = random_string.cluster_id.result +} diff --git a/enos/modules/create_vpc/variables.tf b/enos/modules/create_vpc/variables.tf new file mode 100644 index 0000000..8b6acd3 --- /dev/null +++ b/enos/modules/create_vpc/variables.tf @@ -0,0 +1,26 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +variable "name" { + type = string + default = "vault-ci" + description = "The name of the VPC" +} + +variable "cidr" { + type = string + default = "10.13.0.0/16" + description = "CIDR block for the VPC" +} + +variable "environment" { + description = "Name of the environment." + type = string + default = "vault-ci" +} + +variable "common_tags" { + description = "Tags to set for all resources" + type = map(string) + default = { "Project" : "vault-ci" } +} diff --git a/enos/modules/ec2_info/main.tf b/enos/modules/ec2_info/main.tf new file mode 100644 index 0000000..5016102 --- /dev/null +++ b/enos/modules/ec2_info/main.tf @@ -0,0 +1,190 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +locals { + architectures = toset(["arm64", "x86_64"]) + canonical_owner_id = "099720109477" + rhel_owner_id = "309956199498" + ids = { + "arm64" = { + "rhel" = { + "8.8" = data.aws_ami.rhel_88["arm64"].id + "9.1" = data.aws_ami.rhel_91["arm64"].id + } + "ubuntu" = { + "18.04" = data.aws_ami.ubuntu_1804["arm64"].id + "20.04" = data.aws_ami.ubuntu_2004["arm64"].id + "22.04" = data.aws_ami.ubuntu_2204["arm64"].id + } + } + "amd64" = { + "rhel" = { + "7.9" = data.aws_ami.rhel_79.id + "8.8" = data.aws_ami.rhel_88["x86_64"].id + "9.1" = data.aws_ami.rhel_91["x86_64"].id + } + "ubuntu" = { + "18.04" = data.aws_ami.ubuntu_1804["x86_64"].id + "20.04" = data.aws_ami.ubuntu_2004["x86_64"].id + "22.04" = data.aws_ami.ubuntu_2204["x86_64"].id + } + } + } +} + +data "aws_ami" "ubuntu_1804" { + most_recent = true + for_each = local.architectures + + filter { + name = "name" + values = ["ubuntu/images/hvm-ssd/ubuntu-*-18.04-*-server-*"] + } + + filter { + name = "virtualization-type" + values = ["hvm"] + } + + filter { + name = "architecture" + values = [each.value] + } + + owners = [local.canonical_owner_id] +} + +data "aws_ami" "ubuntu_2004" { + most_recent = true + for_each = local.architectures + + filter { + name = "name" + values = ["ubuntu/images/hvm-ssd/ubuntu-*-20.04-*-server-*"] + } + + filter { + name = "virtualization-type" + values = ["hvm"] + } + + filter { + name = "architecture" + values = [each.value] + } + + owners = [local.canonical_owner_id] +} + +data "aws_ami" "ubuntu_2204" { + most_recent = true + for_each = local.architectures + + filter { + name = "name" + values = ["ubuntu/images/hvm-ssd/ubuntu-*-22.04-*-server-*"] + } + + filter { + name = "virtualization-type" + values = ["hvm"] + } + + filter { + name = "architecture" + values = [each.value] + } + + owners = [local.canonical_owner_id] +} + +data "aws_ami" "rhel_79" { + most_recent = true + + # Currently latest latest point release-1 + filter { + name = "name" + values = ["RHEL-7.9*HVM-20*"] + } + + filter { + name = "virtualization-type" + values = ["hvm"] + } + + filter { + name = "architecture" + values = ["x86_64"] + } + + owners = [local.rhel_owner_id] +} + +data "aws_ami" "rhel_88" { + most_recent = true + for_each = local.architectures + + # Currently latest latest point release-1 + filter { + name = "name" + values = ["RHEL-8.8*HVM-20*"] + } + + filter { + name = "virtualization-type" + values = ["hvm"] + } + + filter { + name = "architecture" + values = [each.value] + } + + owners = [local.rhel_owner_id] +} + +data "aws_ami" "rhel_91" { + most_recent = true + for_each = local.architectures + + # Currently latest latest point release-1 + filter { + name = "name" + values = ["RHEL-9.1*HVM-20*"] + } + + filter { + name = "virtualization-type" + values = ["hvm"] + } + + filter { + name = "architecture" + values = [each.value] + } + + owners = [local.rhel_owner_id] +} + +data "aws_region" "current" {} + +data "aws_availability_zones" "available" { + state = "available" + + filter { + name = "zone-name" + values = ["*"] + } +} + +output "ami_ids" { + value = local.ids +} + +output "current_region" { + value = data.aws_region.current +} + +output "availability_zones" { + value = data.aws_availability_zones.available +} diff --git a/enos/modules/generate_secondary_token/main.tf b/enos/modules/generate_secondary_token/main.tf new file mode 100644 index 0000000..49a4a15 --- /dev/null +++ b/enos/modules/generate_secondary_token/main.tf @@ -0,0 +1,55 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + random = { + source = "hashicorp/random" + version = ">= 3.4.3" + } + } +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "primary_leader_public_ip" { + type = string + description = "Vault primary cluster leader Public IP address" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +locals { + token_id = random_uuid.token_id.id + secondary_token = enos_remote_exec.fetch_secondary_token.stdout +} +resource "random_uuid" "token_id" {} + +resource "enos_remote_exec" "fetch_secondary_token" { + depends_on = [random_uuid.token_id] + environment = { + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_TOKEN = var.vault_root_token + } + + inline = ["${var.vault_install_dir}/vault write sys/replication/performance/primary/secondary-token id=${local.token_id} |sed -n '/^wrapping_token:/p' |awk '{print $2}'"] + + transport = { + ssh = { + host = var.primary_leader_public_ip + } + } +} + +output "secondary_token" { + value = local.secondary_token +} diff --git a/enos/modules/get_local_metadata/main.tf b/enos/modules/get_local_metadata/main.tf new file mode 100644 index 0000000..aec176c --- /dev/null +++ b/enos/modules/get_local_metadata/main.tf @@ -0,0 +1,58 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +resource "enos_local_exec" "get_build_date" { + scripts = [abspath("${path.module}/scripts/build_date.sh")] +} + +resource "enos_local_exec" "get_revision" { + inline = ["git rev-parse HEAD"] +} + +resource "enos_local_exec" "get_version" { + inline = ["${abspath("${path.module}/scripts/version.sh")} version"] +} + +resource "enos_local_exec" "get_version_base" { + inline = ["${abspath("${path.module}/scripts/version.sh")} version-base"] +} + +resource "enos_local_exec" "get_version_pre" { + inline = ["${abspath("${path.module}/scripts/version.sh")} version-pre"] +} + +resource "enos_local_exec" "get_version_meta" { + inline = ["${abspath("${path.module}/scripts/version.sh")} version-meta"] +} + +output "build_date" { + value = trimspace(enos_local_exec.get_build_date.stdout) +} + +output "revision" { + value = trimspace(enos_local_exec.get_revision.stdout) +} + +output "version" { + value = trimspace(enos_local_exec.get_version.stdout) +} + +output "version_base" { + value = trimspace(enos_local_exec.get_version_base.stdout) +} + +output "version_pre" { + value = trimspace(enos_local_exec.get_version_pre.stdout) +} + +output "version_meta" { + value = trimspace(enos_local_exec.get_version_meta.stdout) +} diff --git a/enos/modules/get_local_metadata/scripts/build_date.sh b/enos/modules/get_local_metadata/scripts/build_date.sh new file mode 100755 index 0000000..918159f --- /dev/null +++ b/enos/modules/get_local_metadata/scripts/build_date.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +set -eu -o pipefail + +pushd "$(git rev-parse --show-toplevel)" > /dev/null +make ci-get-date +popd > /dev/null diff --git a/enos/modules/get_local_metadata/scripts/version.sh b/enos/modules/get_local_metadata/scripts/version.sh new file mode 100755 index 0000000..0b59b27 --- /dev/null +++ b/enos/modules/get_local_metadata/scripts/version.sh @@ -0,0 +1,97 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +set -euo pipefail + +# Get the full version information +# this is only needed for local enos builds in order to get the default version from version_base.go +# this should match the default version that the binary has been built with +# CRT release builds use the new static version from ./release/VERSION +function version() { + local version + local prerelease + local metadata + + version=$(version_base) + prerelease=$(version_pre) + metadata=$(version_metadata) + + if [ -n "$metadata" ] && [ -n "$prerelease" ]; then + echo "$version-$prerelease+$metadata" + elif [ -n "$metadata" ]; then + echo "$version+$metadata" + elif [ -n "$prerelease" ]; then + echo "$version-$prerelease" + else + echo "$version" + fi +} + +# Get the base version +function version_base() { + : "${VAULT_VERSION:=""}" + + if [ -n "$VAULT_VERSION" ]; then + echo "$VAULT_VERSION" + return + fi + + : "${VERSION_FILE:=$(repo_root)/version/VERSION}" + awk -F- '{ print $1 }' < "$VERSION_FILE" +} + +# Get the version pre-release +function version_pre() { + : "${VAULT_PRERELEASE:=""}" + + if [ -n "$VAULT_PRERELEASE" ]; then + echo "$VAULT_PRERELEASE" + return + fi + + : "${VERSION_FILE:=$(repo_root)/version/VERSION}" + awk -F- '{ print $2 }' < "$VERSION_FILE" +} + +# Get the version metadata, which is commonly the edition +function version_metadata() { + : "${VAULT_METADATA:=""}" + + if [ -n "$VAULT_METADATA" ]; then + echo "$VAULT_METADATA" + return + fi + + : "${VERSION_FILE:=$(repo_root)/version/version_base.go}" + awk '$1 == "VersionMetadata" && $2 == "=" { gsub(/"/, "", $3); print $3 }' < "$VERSION_FILE" +} + +# Determine the root directory of the repository +function repo_root() { + git rev-parse --show-toplevel +} + +# Run Enos local +function main() { + case $1 in + version) + version + ;; + version-base) + version_base + ;; + version-pre) + version_pre + ;; + version-meta) + version_metadata + ;; + *) + echo "unknown sub-command" >&2 + exit 1 + ;; + esac +} + +main "$@" diff --git a/enos/modules/k8s_deploy_vault/main.tf b/enos/modules/k8s_deploy_vault/main.tf new file mode 100644 index 0000000..72f4f47 --- /dev/null +++ b/enos/modules/k8s_deploy_vault/main.tf @@ -0,0 +1,165 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_version = ">= 1.0" + + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + + helm = { + source = "hashicorp/helm" + version = "2.6.0" + } + } +} + +locals { + helm_chart_settings = { + "server.ha.enabled" = "true" + "server.ha.replicas" = var.vault_instance_count + "server.ha.raft.enabled" = "true" + "server.affinity" = "" + "server.image.repository" = var.image_repository + "server.image.tag" = var.image_tag + "server.image.pullPolicy" = "Never" # Forces local image use + "server.resources.requests.cpu" = "50m" + "server.limits.memory" = "200m" + "server.limits.cpu" = "200m" + "server.ha.raft.config" = file("${abspath(path.module)}/raft-config.hcl") + "server.dataStorage.size" = "100m" + "server.logLevel" = var.vault_log_level + } + all_helm_chart_settings = var.ent_license == null ? local.helm_chart_settings : merge(local.helm_chart_settings, { + "server.extraEnvironmentVars.VAULT_LICENSE" = var.ent_license + }) + + vault_address = "http://127.0.0.1:8200" + + instance_indexes = [for idx in range(var.vault_instance_count) : tostring(idx)] + + leader_idx = local.instance_indexes[0] + followers_idx = toset(slice(local.instance_indexes, 1, var.vault_instance_count)) +} + +resource "helm_release" "vault" { + name = "vault" + + repository = "https://helm.releases.hashicorp.com" + chart = "vault" + + dynamic "set" { + for_each = local.all_helm_chart_settings + + content { + name = set.key + value = set.value + } + } +} + +data "enos_kubernetes_pods" "vault_pods" { + kubeconfig_base64 = var.kubeconfig_base64 + context_name = var.context_name + namespace = helm_release.vault.namespace + label_selectors = [ + "app.kubernetes.io/name=vault", + "component=server" + ] + + depends_on = [helm_release.vault] +} + +resource "enos_vault_init" "leader" { + bin_path = "/bin/vault" + vault_addr = local.vault_address + + key_shares = 5 + key_threshold = 3 + + transport = { + kubernetes = { + kubeconfig_base64 = var.kubeconfig_base64 + context_name = var.context_name + pod = data.enos_kubernetes_pods.vault_pods.pods[local.leader_idx].name + namespace = data.enos_kubernetes_pods.vault_pods.pods[local.leader_idx].namespace + } + } +} + +resource "enos_vault_unseal" "leader" { + bin_path = "/bin/vault" + vault_addr = local.vault_address + seal_type = "shamir" + unseal_keys = enos_vault_init.leader.unseal_keys_b64 + + transport = { + kubernetes = { + kubeconfig_base64 = var.kubeconfig_base64 + context_name = var.context_name + pod = data.enos_kubernetes_pods.vault_pods.pods[local.leader_idx].name + namespace = data.enos_kubernetes_pods.vault_pods.pods[local.leader_idx].namespace + } + } + + depends_on = [enos_vault_init.leader] +} + +// We need to manually join the followers since the join request must only happen after the leader +// has been initialized. We could use retry join, but in that case we'd need to restart the follower +// pods once the leader is setup. The default helm deployment configuration for an HA cluster as +// documented here: https://learn.hashicorp.com/tutorials/vault/kubernetes-raft-deployment-guide#configure-vault-helm-chart +// uses a liveness probe that automatically restarts nodes that are not healthy. This works well for +// clusters that are configured with auto-unseal as eventually the nodes would join and unseal. +resource "enos_remote_exec" "raft_join" { + for_each = local.followers_idx + + inline = [ + // asserts that vault is ready + "for i in 1 2 3 4 5; do vault status > /dev/null 2>&1 && break || sleep 5; done", + // joins the follower to the leader + "vault operator raft join http://vault-0.vault-internal:8200" + ] + + transport = { + kubernetes = { + kubeconfig_base64 = var.kubeconfig_base64 + context_name = var.context_name + pod = data.enos_kubernetes_pods.vault_pods.pods[each.key].name + namespace = data.enos_kubernetes_pods.vault_pods.pods[each.key].namespace + } + } + + depends_on = [enos_vault_unseal.leader] +} + + +resource "enos_vault_unseal" "followers" { + for_each = local.followers_idx + + bin_path = "/bin/vault" + vault_addr = local.vault_address + seal_type = "shamir" + unseal_keys = enos_vault_init.leader.unseal_keys_b64 + + transport = { + kubernetes = { + kubeconfig_base64 = var.kubeconfig_base64 + context_name = var.context_name + pod = data.enos_kubernetes_pods.vault_pods.pods[each.key].name + namespace = data.enos_kubernetes_pods.vault_pods.pods[each.key].namespace + } + } + + depends_on = [enos_remote_exec.raft_join] +} + +output "vault_root_token" { + value = enos_vault_init.leader.root_token +} + +output "vault_pods" { + value = data.enos_kubernetes_pods.vault_pods.pods +} diff --git a/enos/modules/k8s_deploy_vault/raft-config.hcl b/enos/modules/k8s_deploy_vault/raft-config.hcl new file mode 100644 index 0000000..423390b --- /dev/null +++ b/enos/modules/k8s_deploy_vault/raft-config.hcl @@ -0,0 +1,12 @@ +ui = true +listener "tcp" { + address = "[::]:8200" + cluster_address = "[::]:8201" + tls_disable = true +} + +storage "raft" { + path = "/vault/data" +} + +service_registration "kubernetes" {} diff --git a/enos/modules/k8s_deploy_vault/variables.tf b/enos/modules/k8s_deploy_vault/variables.tf new file mode 100644 index 0000000..55fa6f1 --- /dev/null +++ b/enos/modules/k8s_deploy_vault/variables.tf @@ -0,0 +1,42 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +variable "context_name" { + type = string + description = "The name of the k8s context for Vault" +} + +variable "ent_license" { + type = string + description = "The value of a valid Vault Enterprise license" +} + +variable "image_repository" { + type = string + description = "The name of the Vault repository, ie hashicorp/vault or hashicorp/vault-enterprise for the image to deploy" +} + +variable "image_tag" { + type = string + description = "The tag of the vault image to deploy" +} + +variable "kubeconfig_base64" { + type = string + description = "The base64 encoded version of the Kubernetes configuration file" +} + +variable "vault_edition" { + type = string + description = "The Vault product edition" +} + +variable "vault_instance_count" { + type = number + description = "How many vault instances are in the cluster" +} + +variable "vault_log_level" { + description = "The server log level for Vault logs. Supported values (in order of detail) are trace, debug, info, warn, and err." + type = string +} diff --git a/enos/modules/k8s_vault_verify_build_date/main.tf b/enos/modules/k8s_vault_verify_build_date/main.tf new file mode 100644 index 0000000..366497d --- /dev/null +++ b/enos/modules/k8s_vault_verify_build_date/main.tf @@ -0,0 +1,61 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +locals { + vault_instances = toset([for idx in range(var.vault_instance_count) : tostring(idx)]) +} + +# Get the date from the vault status command - status_date +# Format the original status output with ISO-8601 - formatted_date +# Format the original status output with awk - awk_date +# Compare the formatted outputs - date_comparison +resource "enos_remote_exec" "status_date" { + for_each = local.vault_instances + + transport = { + kubernetes = { + kubeconfig_base64 = var.kubeconfig_base64 + context_name = var.context_name + pod = var.vault_pods[each.key].name + namespace = var.vault_pods[each.key].namespace + } + } + + inline = ["${var.vault_bin_path} status -format=json | grep build_date | cut -d \\\" -f 4"] +} + +resource "enos_remote_exec" "formatted_date" { + for_each = local.vault_instances + + transport = { + kubernetes = { + kubeconfig_base64 = var.kubeconfig_base64 + context_name = var.context_name + pod = var.vault_pods[each.key].name + namespace = var.vault_pods[each.key].namespace + } + } + + inline = ["date -d \"${enos_remote_exec.status_date[each.key].stdout}\" -D '%Y-%m-%dT%H:%M:%SZ' -I"] +} + +resource "enos_local_exec" "awk_date" { + for_each = local.vault_instances + + inline = ["echo ${enos_remote_exec.status_date[each.key].stdout} | awk -F\"T\" '{printf $1}'"] +} + +resource "enos_local_exec" "date_comparison" { + for_each = local.vault_instances + + inline = ["[[ ${enos_local_exec.awk_date[each.key].stdout} == ${enos_remote_exec.formatted_date[each.key].stdout} ]] && echo \"Verification for build date format ${enos_remote_exec.status_date[each.key].stdout} succeeded\" || \"invalid build_date, must be formatted as RFC 3339: ${enos_remote_exec.status_date[each.key].stdout}\""] +} diff --git a/enos/modules/k8s_vault_verify_build_date/variables.tf b/enos/modules/k8s_vault_verify_build_date/variables.tf new file mode 100644 index 0000000..d960b78 --- /dev/null +++ b/enos/modules/k8s_vault_verify_build_date/variables.tf @@ -0,0 +1,36 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +variable "vault_instance_count" { + type = number + description = "How many vault instances are in the cluster" +} + +variable "vault_pods" { + type = list(object({ + name = string + namespace = string + })) + description = "The vault instances for the cluster to verify" +} + +variable "vault_bin_path" { + type = string + description = "The path to the vault binary" + default = "/bin/vault" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +variable "kubeconfig_base64" { + type = string + description = "The base64 encoded version of the Kubernetes configuration file" +} + +variable "context_name" { + type = string + description = "The name of the k8s context for Vault" +} diff --git a/enos/modules/k8s_vault_verify_replication/main.tf b/enos/modules/k8s_vault_verify_replication/main.tf new file mode 100644 index 0000000..27824dc --- /dev/null +++ b/enos/modules/k8s_vault_verify_replication/main.tf @@ -0,0 +1,42 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +locals { + instances = toset([for idx in range(var.vault_instance_count) : tostring(idx)]) +} + +resource "enos_remote_exec" "replication_status" { + for_each = local.instances + + inline = ["vault read -format=json sys/replication/status"] + + transport = { + kubernetes = { + kubeconfig_base64 = var.kubeconfig_base64 + context_name = var.context_name + pod = var.vault_pods[each.key].name + namespace = var.vault_pods[each.key].namespace + } + } +} + +resource "enos_local_exec" "verify_replication_status" { + + for_each = enos_remote_exec.replication_status + + environment = { + STATUS = each.value.stdout + VAULT_EDITION = var.vault_edition + } + + content = abspath("${path.module}/scripts/smoke-verify-replication.sh") +} diff --git a/enos/modules/k8s_vault_verify_replication/scripts/smoke-verify-replication.sh b/enos/modules/k8s_vault_verify_replication/scripts/smoke-verify-replication.sh new file mode 100755 index 0000000..babfabd --- /dev/null +++ b/enos/modules/k8s_vault_verify_replication/scripts/smoke-verify-replication.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +# The Vault replication smoke test, documented in +# https://docs.google.com/document/d/16sjIk3hzFDPyY5A9ncxTZV_9gnpYSF1_Vx6UA1iiwgI/edit#heading=h.kgrxf0f1et25 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +# Replication STATUS endpoint should have data.mode disabled for CE release +if [ "$VAULT_EDITION" == "ce" ]; then + if [ "$(echo "${STATUS}" | jq -r '.data.mode')" != "disabled" ]; then + fail "replication data mode is not disabled for CE release!" + fi +else + if [ "$(echo "${STATUS}" | jq -r '.data.dr')" == "" ]; then + fail "DR replication should be available for an ENT release!" + fi + if [ "$(echo "${STATUS}" | jq -r '.data.performance')" == "" ]; then + fail "Performance replication should be available for an ENT release!" + fi +fi diff --git a/enos/modules/k8s_vault_verify_replication/variables.tf b/enos/modules/k8s_vault_verify_replication/variables.tf new file mode 100644 index 0000000..7d43378 --- /dev/null +++ b/enos/modules/k8s_vault_verify_replication/variables.tf @@ -0,0 +1,30 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +variable "vault_instance_count" { + type = number + description = "How many vault instances are in the cluster" +} + +variable "vault_edition" { + type = string + description = "The vault product edition" +} + +variable "vault_pods" { + type = list(object({ + name = string + namespace = string + })) + description = "The vault instances for the cluster to verify" +} + +variable "kubeconfig_base64" { + type = string + description = "The base64 encoded version of the Kubernetes configuration file" +} + +variable "context_name" { + type = string + description = "The name of the k8s context for Vault" +} diff --git a/enos/modules/k8s_vault_verify_ui/main.tf b/enos/modules/k8s_vault_verify_ui/main.tf new file mode 100644 index 0000000..ce57960 --- /dev/null +++ b/enos/modules/k8s_vault_verify_ui/main.tf @@ -0,0 +1,45 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +terraform { + required_providers { + enos = { + version = ">= 0.1.17" + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +locals { + instances = toset([for idx in range(var.vault_instance_count) : tostring(idx)]) +} + +resource "enos_remote_exec" "curl_ui" { + for_each = local.instances + + inline = [ + "curl -s -o /dev/null -w '%%{redirect_url}' http://localhost:8200/", + "curl -s -o /dev/null -Iw '%%{http_code}\n' http://localhost:8200/ui/" + ] + + transport = { + kubernetes = { + kubeconfig_base64 = var.kubeconfig_base64 + context_name = var.context_name + pod = var.vault_pods[each.key].name + namespace = var.vault_pods[each.key].namespace + } + } +} + +resource "enos_local_exec" "verify_ui" { + for_each = enos_remote_exec.curl_ui + + environment = { + REDIRECT_URL = split("\n", each.value.stdout)[0] + UI_URL_RESULT = split("\n", each.value.stdout)[1] + } + + scripts = [abspath("${path.module}/scripts/smoke-verify-ui.sh")] +} diff --git a/enos/modules/k8s_vault_verify_ui/scripts/smoke-verify-ui.sh b/enos/modules/k8s_vault_verify_ui/scripts/smoke-verify-ui.sh new file mode 100755 index 0000000..ce985e7 --- /dev/null +++ b/enos/modules/k8s_vault_verify_ui/scripts/smoke-verify-ui.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +if [ "${REDIRECT_URL}" != "http://localhost:8200/ui/" ]; then + fail "Port 8200 not redirecting to UI" +fi +if [ "${UI_URL_RESULT}" != "200" ]; then + fail "Vault UI is not available" +fi diff --git a/enos/modules/k8s_vault_verify_ui/variables.tf b/enos/modules/k8s_vault_verify_ui/variables.tf new file mode 100644 index 0000000..c39f24e --- /dev/null +++ b/enos/modules/k8s_vault_verify_ui/variables.tf @@ -0,0 +1,25 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +variable "vault_instance_count" { + type = number + description = "How many vault instances are in the cluster" +} + +variable "vault_pods" { + type = list(object({ + name = string + namespace = string + })) + description = "The vault instances for the cluster to verify" +} + +variable "kubeconfig_base64" { + type = string + description = "The base64 encoded version of the Kubernetes configuration file" +} + +variable "context_name" { + type = string + description = "The name of the k8s context for Vault" +} diff --git a/enos/modules/k8s_vault_verify_version/main.tf b/enos/modules/k8s_vault_verify_version/main.tf new file mode 100644 index 0000000..23ce8c8 --- /dev/null +++ b/enos/modules/k8s_vault_verify_version/main.tf @@ -0,0 +1,51 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +locals { + instances = toset([for idx in range(var.vault_instance_count) : tostring(idx)]) + expected_version = var.vault_edition == "ce" ? var.vault_product_version : "${var.vault_product_version}-ent" +} + +resource "enos_remote_exec" "release_info" { + for_each = local.instances + + environment = { + VAULT_BIN_PATH = var.vault_bin_path + } + + scripts = [abspath("${path.module}/scripts/get-status.sh")] + + transport = { + kubernetes = { + kubeconfig_base64 = var.kubeconfig_base64 + context_name = var.context_name + pod = var.vault_pods[each.key].name + namespace = var.vault_pods[each.key].namespace + } + } +} + +resource "enos_local_exec" "smoke-verify-version" { + for_each = enos_remote_exec.release_info + + environment = { + ACTUAL_VERSION = jsondecode(each.value.stdout).version + BUILD_DATE = var.vault_build_date + CHECK_BUILD_DATE = var.check_build_date + EXPECTED_VERSION = var.vault_product_version, + VAULT_EDITION = var.vault_edition, + VAULT_REVISION = var.vault_product_revision, + VAULT_STATUS = jsonencode(jsondecode(each.value.stdout).status) + } + + scripts = [abspath("${path.module}/scripts/smoke-verify-version.sh")] +} diff --git a/enos/modules/k8s_vault_verify_version/scripts/get-status.sh b/enos/modules/k8s_vault_verify_version/scripts/get-status.sh new file mode 100755 index 0000000..a799ebc --- /dev/null +++ b/enos/modules/k8s_vault_verify_version/scripts/get-status.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env sh +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +set -e + +status=$(${VAULT_BIN_PATH} status -format=json) +version=$(${VAULT_BIN_PATH} version) + +echo "{\"status\": ${status}, \"version\": \"${version}\"}" diff --git a/enos/modules/k8s_vault_verify_version/scripts/smoke-verify-version.sh b/enos/modules/k8s_vault_verify_version/scripts/smoke-verify-version.sh new file mode 100755 index 0000000..a1da7c8 --- /dev/null +++ b/enos/modules/k8s_vault_verify_version/scripts/smoke-verify-version.sh @@ -0,0 +1,46 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +# The Vault smoke test to verify the Vault version installed + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +if [[ "${CHECK_BUILD_DATE}" == "false" ]]; then + expected_build_date="" +else + cfg_build_date="${BUILD_DATE}" + if [[ "${cfg_build_date}" == "" ]]; then + cfg_build_date=$(echo "${VAULT_STATUS}" | jq -Mr .build_date) + fi + expected_build_date=", built $cfg_build_date" +fi + +vault_expected_version="Vault v${EXPECTED_VERSION} (${VAULT_REVISION})" + +case "${VAULT_EDITION}" in + ce) version_expected="${vault_expected_version}${expected_build_date}";; + ent) version_expected="${vault_expected_version}${expected_build_date}";; + ent.hsm) version_expected="${vault_expected_version}${expected_build_date} (cgo)";; + ent.fips1402) version_expected="${vault_expected_version}${expected_build_date} (cgo)" ;; + ent.hsm.fips1402) version_expected="${vault_expected_version}${expected_build_date} (cgo)" ;; + *) fail "(${VAULT_EDITION}) does not match any known Vault editions" +esac + +version_expected_nosha=$(echo "$version_expected" | awk '!($3="")' | sed 's/ / /' | sed -e 's/[[:space:]]*$//') + +if [[ "${ACTUAL_VERSION}" == "$version_expected_nosha" ]] || [[ "${ACTUAL_VERSION}" == "$version_expected" ]]; then + echo "Version verification succeeded!" +else + echo "Version checking enabled: ${CHECK_BUILD_DATE}" 1>&2 + echo "Given build date: ${BUILD_DATE}" 1>&2 + echo "Interpreted build date: ${cfg_build_date}" 1>&2 + + fail "expected Version=$version_expected or $version_expected_nosha, got: ${ACTUAL_VERSION}" +fi diff --git a/enos/modules/k8s_vault_verify_version/variables.tf b/enos/modules/k8s_vault_verify_version/variables.tf new file mode 100644 index 0000000..58940a8 --- /dev/null +++ b/enos/modules/k8s_vault_verify_version/variables.tf @@ -0,0 +1,62 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +variable "vault_instance_count" { + type = number + description = "How many vault instances are in the cluster" +} + +variable "vault_pods" { + type = list(object({ + name = string + namespace = string + })) + description = "The vault instances for the cluster to verify" +} + +variable "vault_bin_path" { + type = string + description = "The path to the vault binary" + default = "/bin/vault" +} + +variable "vault_product_version" { + type = string + description = "The vault product version" +} + +variable "vault_product_revision" { + type = string + description = "The vault product revision" +} + +variable "vault_edition" { + type = string + description = "The vault product edition" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +variable "kubeconfig_base64" { + type = string + description = "The base64 encoded version of the Kubernetes configuration file" +} + +variable "context_name" { + type = string + description = "The name of the k8s context for Vault" +} + +variable "check_build_date" { + type = bool + description = "Whether or not to verify that the version includes the build date" +} + +variable "vault_build_date" { + type = string + description = "The build date of the vault docker image to check" + default = "" +} diff --git a/enos/modules/k8s_vault_verify_write_data/main.tf b/enos/modules/k8s_vault_verify_write_data/main.tf new file mode 100644 index 0000000..5606b89 --- /dev/null +++ b/enos/modules/k8s_vault_verify_write_data/main.tf @@ -0,0 +1,53 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +locals { + instances = toset([for idx in range(var.vault_instance_count) : tostring(idx)]) +} + +resource "enos_remote_exec" "smoke-enable-secrets-kv" { + environment = { + VAULT_TOKEN = var.vault_root_token + } + + inline = ["${var.vault_bin_path} secrets enable -path=\"secret\" kv"] + + transport = { + kubernetes = { + kubeconfig_base64 = var.kubeconfig_base64 + context_name = var.context_name + pod = var.vault_pods[0].name + namespace = var.vault_pods[0].namespace + } + } +} + +# Verify that we can enable the k/v secrets engine and write data to it. +resource "enos_remote_exec" "smoke-write-test-data" { + depends_on = [enos_remote_exec.smoke-enable-secrets-kv] + for_each = local.instances + + environment = { + VAULT_TOKEN = var.vault_root_token + } + + inline = ["${var.vault_bin_path} kv put secret/test smoke${each.key}=fire"] + + transport = { + kubernetes = { + kubeconfig_base64 = var.kubeconfig_base64 + context_name = var.context_name + pod = var.vault_pods[each.key].name + namespace = var.vault_pods[each.key].namespace + } + } +} diff --git a/enos/modules/k8s_vault_verify_write_data/variables.tf b/enos/modules/k8s_vault_verify_write_data/variables.tf new file mode 100644 index 0000000..d960b78 --- /dev/null +++ b/enos/modules/k8s_vault_verify_write_data/variables.tf @@ -0,0 +1,36 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +variable "vault_instance_count" { + type = number + description = "How many vault instances are in the cluster" +} + +variable "vault_pods" { + type = list(object({ + name = string + namespace = string + })) + description = "The vault instances for the cluster to verify" +} + +variable "vault_bin_path" { + type = string + description = "The path to the vault binary" + default = "/bin/vault" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +variable "kubeconfig_base64" { + type = string + description = "The base64 encoded version of the Kubernetes configuration file" +} + +variable "context_name" { + type = string + description = "The name of the k8s context for Vault" +} diff --git a/enos/modules/load_docker_image/main.tf b/enos/modules/load_docker_image/main.tf new file mode 100644 index 0000000..4e5f293 --- /dev/null +++ b/enos/modules/load_docker_image/main.tf @@ -0,0 +1,53 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +variable "cluster_name" { + type = string + description = "The name of the cluster to load the image into" +} + +variable "image" { + type = string + description = "The image name for the image to load, i.e. hashicorp/vault" +} + +variable "tag" { + type = string + description = "The tag for the image to load, i.e. 1.12.0-dev" +} + +variable "archive" { + type = string + description = "The path to the image archive to load" + default = null +} + +resource "enos_local_kind_load_image" "vault" { + cluster_name = var.cluster_name + image = var.image + tag = var.tag + archive = var.archive +} + +output "tag" { + value = var.tag + description = "The tag of the docker image to load without the tag, i.e. 1.10.0" +} + +output "image" { + value = var.image + description = "The tag of the docker image to load without the tag, i.e. vault" +} + +output "repository" { + value = enos_local_kind_load_image.vault.loaded_images.repository + description = "The name of the image's repository, i.e. hashicorp/vault" +} diff --git a/enos/modules/local_kind_cluster/main.tf b/enos/modules/local_kind_cluster/main.tf new file mode 100644 index 0000000..5a35167 --- /dev/null +++ b/enos/modules/local_kind_cluster/main.tf @@ -0,0 +1,53 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + random = { + source = "hashicorp/random" + version = ">= 3.4.3" + } + } +} + +resource "random_pet" "cluster_name" {} + +resource "enos_local_kind_cluster" "this" { + name = random_pet.cluster_name.id + kubeconfig_path = var.kubeconfig_path +} + +variable "kubeconfig_path" { + type = string +} + +output "cluster_name" { + value = random_pet.cluster_name.id +} + +output "kubeconfig_base64" { + value = enos_local_kind_cluster.this.kubeconfig_base64 +} + +output "context_name" { + value = enos_local_kind_cluster.this.context_name +} + +output "host" { + value = enos_local_kind_cluster.this.endpoint +} + +output "client_certificate" { + value = enos_local_kind_cluster.this.client_certificate +} + +output "client_key" { + value = enos_local_kind_cluster.this.client_key +} + +output "cluster_ca_certificate" { + value = enos_local_kind_cluster.this.cluster_ca_certificate +} diff --git a/enos/modules/read_license/main.tf b/enos/modules/read_license/main.tf new file mode 100644 index 0000000..a1358b3 --- /dev/null +++ b/enos/modules/read_license/main.tf @@ -0,0 +1,8 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +variable "file_name" {} + +output "license" { + value = file(var.file_name) +} diff --git a/enos/modules/replication_data/main.tf b/enos/modules/replication_data/main.tf new file mode 100644 index 0000000..448e708 --- /dev/null +++ b/enos/modules/replication_data/main.tf @@ -0,0 +1,104 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +// An arithmetic module for calculating inputs and outputs for various replication steps. + +// Get the first follower out of the hosts set +variable "follower_hosts" { + type = map(object({ + private_ip = string + public_ip = string + })) + default = {} +} + +output "follower_host_1" { + value = try(var.follower_hosts[0], null) +} + +output "follower_public_ip_1" { + value = try(var.follower_hosts[0].public_ip, null) +} + +output "follower_private_ip_1" { + value = try(var.follower_hosts[0].private_ip, null) +} + +output "follower_host_2" { + value = try(var.follower_hosts[1], null) +} + +output "follower_public_ip_2" { + value = try(var.follower_hosts[1].public_ip, null) +} + +output "follower_private_ip_2" { + value = try(var.follower_hosts[1].private_ip, null) +} + +// Calculate our remainder hosts after we've added and removed leader +variable "initial_hosts" { + type = map(object({ + private_ip = string + public_ip = string + })) + default = {} +} + +variable "initial_hosts_count" { + type = number + default = 0 +} + +variable "added_hosts" { + type = map(object({ + private_ip = string + public_ip = string + })) + default = {} +} + +variable "added_hosts_count" { + type = number + default = 0 +} + +variable "removed_primary_host" { + type = object({ + private_ip = string + public_ip = string + }) + default = null +} + +variable "removed_follower_host" { + type = object({ + private_ip = string + public_ip = string + }) + default = null +} + +locals { + remaining_hosts_count = max((var.initial_hosts_count + var.added_hosts_count - 2), 0) + indices = [for idx in range(local.remaining_hosts_count) : idx] + remaining_initial = setsubtract(values(var.initial_hosts), [var.removed_primary_host, var.removed_follower_host]) + remaining_hosts_list = tolist(setunion(values(var.added_hosts), local.remaining_initial)) + remaining_hosts = zipmap(local.indices, local.remaining_hosts_list) +} + +output "remaining_initial_count" { + value = length(local.remaining_initial) +} + +output "remaining_initial_hosts" { + value = local.remaining_initial +} + +output "remaining_hosts_count" { + value = local.remaining_hosts_count +} + +output "remaining_hosts" { + value = local.remaining_hosts +} diff --git a/enos/modules/seal_key_awskms/main.tf b/enos/modules/seal_key_awskms/main.tf new file mode 100644 index 0000000..d8ec65b --- /dev/null +++ b/enos/modules/seal_key_awskms/main.tf @@ -0,0 +1,56 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +variable "cluster_id" { + type = string +} + +variable "cluster_meta" { + type = string + default = null +} + +variable "common_tags" { + type = map(string) + default = null +} + +variable "other_resources" { + type = list(string) + default = [] +} + +locals { + cluster_name = var.cluster_meta == null ? var.cluster_id : "${var.cluster_id}-${var.cluster_meta}" +} + +resource "aws_kms_key" "key" { + description = "auto-unseal-key-${local.cluster_name}" + deletion_window_in_days = 7 // 7 is the shortest allowed window + tags = var.common_tags +} + +resource "aws_kms_alias" "alias" { + name = "alias/auto-unseal-key-${local.cluster_name}" + target_key_id = aws_kms_key.key.key_id +} + +output "alias" { + description = "The key alias name" + value = aws_kms_alias.alias.name +} + +output "id" { + description = "The key ID" + value = aws_kms_key.key.key_id +} + +output "resource_name" { + description = "The ARN" + value = aws_kms_key.key.arn +} + +output "resource_names" { + description = "The list of names" + value = compact(concat([aws_kms_key.key.arn], var.other_resources)) +} diff --git a/enos/modules/seal_key_shamir/main.tf b/enos/modules/seal_key_shamir/main.tf new file mode 100644 index 0000000..cdd2d9c --- /dev/null +++ b/enos/modules/seal_key_shamir/main.tf @@ -0,0 +1,17 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +# A shim unseal key module for shamir seal types + +variable "cluster_id" { default = null } +variable "cluster_meta" { default = null } +variable "common_tags" { default = null } +variable "names" { + type = list(string) + default = [] +} + +output "alias" { value = null } +output "id" { value = null } +output "resource_name" { value = null } +output "resource_names" { value = var.names } diff --git a/enos/modules/shutdown_multiple_nodes/main.tf b/enos/modules/shutdown_multiple_nodes/main.tf new file mode 100644 index 0000000..86045db --- /dev/null +++ b/enos/modules/shutdown_multiple_nodes/main.tf @@ -0,0 +1,43 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +variable "vault_instance_count" { + type = number + description = "How many vault instances are in the cluster" +} + +variable "old_vault_instances" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The vault cluster instances to be shutdown" +} + +locals { + public_ips = { + for idx in range(var.vault_instance_count) : idx => { + public_ip = values(var.old_vault_instances)[idx].public_ip + private_ip = values(var.old_vault_instances)[idx].private_ip + } + } +} + +resource "enos_remote_exec" "shutdown_multiple_nodes" { + for_each = local.public_ips + inline = ["sudo shutdown -H --no-wall; exit 0"] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/shutdown_node/main.tf b/enos/modules/shutdown_node/main.tf new file mode 100644 index 0000000..f27de68 --- /dev/null +++ b/enos/modules/shutdown_node/main.tf @@ -0,0 +1,25 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +variable "node_public_ip" { + type = string + description = "Node Public IP address" +} + +resource "enos_remote_exec" "shutdown_node" { + inline = ["sudo shutdown -H --no-wall; exit 0"] + + transport = { + ssh = { + host = var.node_public_ip + } + } +} diff --git a/enos/modules/start_vault/main.tf b/enos/modules/start_vault/main.tf new file mode 100644 index 0000000..c2999e8 --- /dev/null +++ b/enos/modules/start_vault/main.tf @@ -0,0 +1,169 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + # We need to specify the provider source in each module until we publish it + # to the public registry + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + version = ">= 0.4.7" + } + } +} + +data "enos_environment" "localhost" {} + +locals { + bin_path = "${var.install_dir}/vault" + environment = local.seal_secondary == null ? var.environment : merge( + var.environment, + { VAULT_ENABLE_SEAL_HA_BETA : tobool(var.seal_ha_beta) }, + ) + // In order to get Terraform to plan we have to use collections with keys + // that are known at plan time. In order for our module to work our var.target_hosts + // must be a map with known keys at plan time. Here we're creating locals + // that keep track of index values that point to our target hosts. + followers = toset(slice(local.instances, 1, length(local.instances))) + instances = [for idx in range(length(var.target_hosts)) : tostring(idx)] + key_shares = { + "awskms" = null + "shamir" = 5 + } + key_threshold = { + "awskms" = null + "shamir" = 3 + } + leader = toset(slice(local.instances, 0, 1)) + recovery_shares = { + "awskms" = 5 + "shamir" = null + } + recovery_threshold = { + "awskms" = 3 + "shamir" = null + } + seals = local.seal_secondary.type == "none" ? { primary = local.seal_primary } : { + primary = local.seal_primary + secondary = local.seal_secondary + } + seals_primary = { + "awskms" = { + type = "awskms" + attributes = { + name = var.seal_alias + priority = var.seal_priority + kms_key_id = var.seal_key_name + } + } + "shamir" = { + type = "shamir" + attributes = null + } + } + seal_primary = local.seals_primary[var.seal_type] + seals_secondary = { + "awskms" = { + type = "awskms" + attributes = { + name = var.seal_alias_secondary + priority = var.seal_priority_secondary + kms_key_id = var.seal_key_name_secondary + } + } + "none" = { + type = "none" + attributes = null + } + } + seal_secondary = local.seals_secondary[var.seal_type_secondary] + storage_config = [for idx, host in var.target_hosts : (var.storage_backend == "raft" ? + merge( + { + node_id = "${var.storage_node_prefix}_${idx}" + }, + var.storage_backend_attrs + ) : + { + address = "127.0.0.1:8500" + path = "vault" + }) + ] +} + +resource "enos_vault_start" "leader" { + for_each = local.leader + + bin_path = local.bin_path + config_dir = var.config_dir + environment = local.environment + config = { + api_addr = "http://${var.target_hosts[each.value].private_ip}:8200" + cluster_addr = "http://${var.target_hosts[each.value].private_ip}:8201" + cluster_name = var.cluster_name + listener = { + type = "tcp" + attributes = { + address = "0.0.0.0:8200" + tls_disable = "true" + } + } + log_level = var.log_level + storage = { + type = var.storage_backend + attributes = ({ for key, value in local.storage_config[each.key] : key => value }) + } + seals = local.seals + ui = true + } + license = var.license + manage_service = var.manage_service + username = var.service_username + unit_name = "vault" + + transport = { + ssh = { + host = var.target_hosts[each.value].public_ip + } + } +} + +resource "enos_vault_start" "followers" { + depends_on = [ + enos_vault_start.leader, + ] + for_each = local.followers + + bin_path = local.bin_path + config_dir = var.config_dir + environment = local.environment + config = { + api_addr = "http://${var.target_hosts[each.value].private_ip}:8200" + cluster_addr = "http://${var.target_hosts[each.value].private_ip}:8201" + cluster_name = var.cluster_name + listener = { + type = "tcp" + attributes = { + address = "0.0.0.0:8200" + tls_disable = "true" + } + } + log_level = var.log_level + storage = { + type = var.storage_backend + attributes = { for key, value in local.storage_config[each.key] : key => value } + } + seals = local.seals + ui = true + } + license = var.license + manage_service = var.manage_service + username = var.service_username + unit_name = "vault" + + transport = { + ssh = { + host = var.target_hosts[each.value].public_ip + } + } +} diff --git a/enos/modules/start_vault/outputs.tf b/enos/modules/start_vault/outputs.tf new file mode 100644 index 0000000..8f82c2f --- /dev/null +++ b/enos/modules/start_vault/outputs.tf @@ -0,0 +1,33 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +output "cluster_name" { + description = "The Vault cluster name" + value = var.cluster_name +} + +output "followers" { + description = "The follower enos_vault_start resources" + value = enos_vault_start.followers +} + +output "leader" { + description = "The leader enos_vault_start resource" + value = enos_vault_start.leader +} + +output "private_ips" { + description = "Vault cluster target host private_ips" + value = [for host in var.target_hosts : host.private_ip] +} + +output "public_ips" { + description = "Vault cluster target host public_ips" + value = [for host in var.target_hosts : host.public_ip] +} + +output "target_hosts" { + description = "The vault cluster instances that were created" + + value = var.target_hosts +} diff --git a/enos/modules/start_vault/variables.tf b/enos/modules/start_vault/variables.tf new file mode 100644 index 0000000..6961b86 --- /dev/null +++ b/enos/modules/start_vault/variables.tf @@ -0,0 +1,149 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +variable "cluster_name" { + type = string + description = "The Vault cluster name" +} + +variable "config_dir" { + type = string + description = "The directory to use for Vault configuration" + default = "/etc/vault.d" +} + +variable "environment" { + description = "Optional Vault configuration environment variables to set starting Vault" + type = map(string) + default = null +} + +variable "install_dir" { + type = string + description = "The directory where the vault binary will be installed" + default = "/opt/vault/bin" +} + +variable "license" { + type = string + sensitive = true + description = "The value of the Vault license" + default = null +} + +variable "log_level" { + type = string + description = "The vault service log level" + default = "info" + + validation { + condition = contains(["trace", "debug", "info", "warn", "error"], var.log_level) + error_message = "The log_level must be one of 'trace', 'debug', 'info', 'warn', or 'error'." + } +} + +variable "manage_service" { + type = bool + description = "Manage the Vault service users and systemd unit. Disable this to use configuration in RPM and Debian packages" + default = true +} + +variable "seal_ha_beta" { + description = "Enable using Seal HA on clusters that meet minimum version requirements and are enterprise editions" + default = true +} + +variable "seal_alias" { + type = string + description = "The primary seal alias name" + default = "primary" +} + +variable "seal_alias_secondary" { + type = string + description = "The secondary seal alias name" + default = "secondary" +} + +variable "seal_key_name" { + type = string + description = "The primary auto-unseal key name" + default = null +} + +variable "seal_key_name_secondary" { + type = string + description = "The secondary auto-unseal key name" + default = null +} + +variable "seal_priority" { + type = string + description = "The primary seal priority" + default = "1" +} + +variable "seal_priority_secondary" { + type = string + description = "The secondary seal priority" + default = "2" +} + +variable "seal_type" { + type = string + description = "The method by which to unseal the Vault cluster" + default = "awskms" + + validation { + condition = contains(["awskms", "shamir"], var.seal_type) + error_message = "The seal_type must be either awskms or shamir. No other unseal methods are supported." + } +} + +variable "seal_type_secondary" { + type = string + description = "A secondary HA seal method. Only supported in Vault Enterprise >= 1.15" + default = "none" + + validation { + condition = contains(["awskms", "none"], var.seal_type_secondary) + error_message = "The secondary_seal_type must be 'awskms' or 'none'. No other secondary unseal methods are supported." + } +} + +variable "service_username" { + type = string + description = "The host username to own the vault service" + default = "vault" +} + +variable "storage_backend" { + type = string + description = "The storage backend to use" + default = "raft" + + validation { + condition = contains(["raft", "consul"], var.storage_backend) + error_message = "The storage_backend must be either raft or consul. No other storage backends are supported." + } +} + +variable "storage_backend_attrs" { + type = map(any) + description = "An optional set of key value pairs to inject into the storage block" + default = {} +} + +variable "storage_node_prefix" { + type = string + description = "A prefix to use for each node in the Vault storage configuration" + default = "node" +} + +variable "target_hosts" { + description = "The target machines host addresses to use for the Vault cluster" + type = map(object({ + private_ip = string + public_ip = string + })) +} diff --git a/enos/modules/stop_vault/main.tf b/enos/modules/stop_vault/main.tf new file mode 100644 index 0000000..2d07787 --- /dev/null +++ b/enos/modules/stop_vault/main.tf @@ -0,0 +1,38 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + # We need to specify the provider source in each module until we publish it + # to the public registry + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + version = ">= 0.4.0" + } + } +} + +variable "service_name" { + type = string + description = "The Vault systemd service name" + default = "vault" +} + +variable "target_hosts" { + description = "The target machines host addresses to use for the Vault cluster" + type = map(object({ + private_ip = string + public_ip = string + })) +} + +resource "enos_remote_exec" "shutdown_multiple_nodes" { + for_each = var.target_hosts + inline = ["sudo systemctl stop ${var.service_name}.service; sleep 5"] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/target_ec2_fleet/main.tf b/enos/modules/target_ec2_fleet/main.tf new file mode 100644 index 0000000..a8f0355 --- /dev/null +++ b/enos/modules/target_ec2_fleet/main.tf @@ -0,0 +1,338 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + # We need to specify the provider source in each module until we publish it + # to the public registry + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + version = ">= 0.3.24" + } + } +} + +data "aws_vpc" "vpc" { + id = var.vpc_id +} + +data "aws_subnets" "vpc" { + filter { + name = "vpc-id" + values = [var.vpc_id] + } +} + +data "aws_iam_policy_document" "target" { + statement { + resources = ["*"] + + actions = [ + "ec2:DescribeInstances", + "secretsmanager:*" + ] + } + + dynamic "statement" { + for_each = var.seal_key_names + + content { + resources = [statement.value] + + actions = [ + "kms:DescribeKey", + "kms:ListKeys", + "kms:Encrypt", + "kms:Decrypt", + "kms:GenerateDataKey" + ] + } + } +} + +data "aws_iam_policy_document" "target_role" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["ec2.amazonaws.com"] + } + } +} + +data "enos_environment" "localhost" {} + +resource "random_string" "random_cluster_name" { + length = 8 + lower = true + upper = false + numeric = false + special = false +} + +resource "random_string" "unique_id" { + length = 4 + lower = true + upper = false + numeric = false + special = false +} + +// ec2:CreateFleet only allows up to 4 InstanceRequirements overrides so we can only ever request +// a fleet across 4 or fewer subnets if we want to bid with InstanceRequirements instead of +// weighted instance types. +resource "random_shuffle" "subnets" { + input = data.aws_subnets.vpc.ids + result_count = 4 +} + +locals { + spot_allocation_strategy = "lowestPrice" + on_demand_allocation_strategy = "lowestPrice" + instances = toset([for idx in range(var.instance_count) : tostring(idx)]) + cluster_name = coalesce(var.cluster_name, random_string.random_cluster_name.result) + name_prefix = "${var.project_name}-${local.cluster_name}-${random_string.unique_id.result}" + fleet_tag = "${local.name_prefix}-spot-fleet-target" + fleet_tags = { + Name = "${local.name_prefix}-${var.cluster_tag_key}-target" + "${var.cluster_tag_key}" = local.cluster_name + Fleet = local.fleet_tag + } +} + +resource "aws_iam_role" "target" { + name = "${local.name_prefix}-target-role" + assume_role_policy = data.aws_iam_policy_document.target_role.json +} + +resource "aws_iam_instance_profile" "target" { + name = "${local.name_prefix}-target-profile" + role = aws_iam_role.target.name +} + +resource "aws_iam_role_policy" "target" { + name = "${local.name_prefix}-target-policy" + role = aws_iam_role.target.id + policy = data.aws_iam_policy_document.target.json +} + +resource "aws_security_group" "target" { + name = "${local.name_prefix}-target" + description = "Target instance security group" + vpc_id = var.vpc_id + + # SSH traffic + ingress { + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + # Vault traffic + ingress { + from_port = 8200 + to_port = 8201 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + formatlist("%s/32", var.ssh_allow_ips) + ]) + } + + # Consul traffic + ingress { + from_port = 8300 + to_port = 8302 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8301 + to_port = 8302 + protocol = "udp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8500 + to_port = 8503 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8600 + to_port = 8600 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8600 + to_port = 8600 + protocol = "udp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + # Internal traffic + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + self = true + } + + # External traffic + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = merge( + var.common_tags, + { + Name = "${local.name_prefix}-sg" + }, + ) +} + +resource "aws_launch_template" "target" { + name = "${local.name_prefix}-target" + image_id = var.ami_id + key_name = var.ssh_keypair + + iam_instance_profile { + name = aws_iam_instance_profile.target.name + } + + instance_requirements { + burstable_performance = "included" + + memory_mib { + min = var.instance_mem_min + max = var.instance_mem_max + } + + vcpu_count { + min = var.instance_cpu_min + max = var.instance_cpu_max + } + } + + network_interfaces { + associate_public_ip_address = true + delete_on_termination = true + security_groups = [aws_security_group.target.id] + } + + tag_specifications { + resource_type = "instance" + + tags = merge( + var.common_tags, + local.fleet_tags, + ) + } +} + +# There are three primary knobs we can turn to try and optimize our costs by +# using a spot fleet: our min and max instance requirements, our max bid +# price, and the allocation strategy to use when fulfilling the spot request. +# We've currently configured our instance requirements to allow for anywhere +# from 2-4 vCPUs and 4-16GB of RAM. We intentionally have a wide range +# to allow for a large instance size pool to be considered. Our next knob is our +# max bid price. As we're using spot fleets to save on instance cost, we never +# want to pay more for an instance than we were on-demand. We've set the max price +# to equal what we pay for t3.medium instances on-demand, which are the smallest +# reliable size for Vault scenarios. The final knob is the allocation strategy +# that AWS will use when looking for instances that meet our resource and cost +# requirements. We're using the "lowestPrice" strategy to get the absolute +# cheapest machines that will fit the requirements, but it comes with a slightly +# higher capacity risk than say, "capacityOptimized" or "priceCapacityOptimized". +# Unless we see capacity issues or instances being shut down then we ought to +# stick with that strategy. +resource "aws_ec2_fleet" "targets" { + replace_unhealthy_instances = false + terminate_instances = true // terminate instances when we "delete" the fleet + terminate_instances_with_expiration = false + tags = merge( + var.common_tags, + local.fleet_tags, + ) + type = "instant" // make a synchronous request for the entire fleet + + launch_template_config { + launch_template_specification { + launch_template_id = aws_launch_template.target.id + version = aws_launch_template.target.latest_version + } + + dynamic "override" { + for_each = random_shuffle.subnets.result + + content { + subnet_id = override.value + } + } + } + + on_demand_options { + allocation_strategy = local.on_demand_allocation_strategy + max_total_price = (var.max_price * var.instance_count) + min_target_capacity = var.capacity_type == "on-demand" ? var.instance_count : null + // One of these has to be set to enforce our on-demand target capacity minimum + single_availability_zone = false + single_instance_type = true + } + + spot_options { + allocation_strategy = local.spot_allocation_strategy + // The instance_pools_to_use_count is only valid for the allocation_strategy + // lowestPrice. When we are using that strategy we'll want to always set it + // to non-zero to avoid rebuilding the fleet on a re-run. For any other strategy + // set it to zero to avoid rebuilding the fleet on a re-run. + instance_pools_to_use_count = local.spot_allocation_strategy == "lowestPrice" ? 1 : null + } + + // Try and provision only spot instances and fall back to on-demand. + target_capacity_specification { + default_target_capacity_type = var.capacity_type + spot_target_capacity = var.capacity_type == "spot" ? var.instance_count : 0 + on_demand_target_capacity = var.capacity_type == "on-demand" ? var.instance_count : 0 + target_capacity_unit_type = "units" // units == instance count + total_target_capacity = var.instance_count + } +} + +data "aws_instance" "targets" { + depends_on = [ + aws_ec2_fleet.targets, + ] + for_each = local.instances + + instance_id = aws_ec2_fleet.targets.fleet_instance_set[0].instance_ids[each.key] +} diff --git a/enos/modules/target_ec2_fleet/outputs.tf b/enos/modules/target_ec2_fleet/outputs.tf new file mode 100644 index 0000000..15384bd --- /dev/null +++ b/enos/modules/target_ec2_fleet/outputs.tf @@ -0,0 +1,14 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +output "cluster_name" { + value = local.cluster_name +} + +output "hosts" { + description = "The ec2 fleet target hosts" + value = { for idx in range(var.instance_count) : idx => { + public_ip = data.aws_instance.targets[idx].public_ip + private_ip = data.aws_instance.targets[idx].private_ip + } } +} diff --git a/enos/modules/target_ec2_fleet/variables.tf b/enos/modules/target_ec2_fleet/variables.tf new file mode 100644 index 0000000..499105f --- /dev/null +++ b/enos/modules/target_ec2_fleet/variables.tf @@ -0,0 +1,101 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +variable "ami_id" { + description = "The machine image identifier" + type = string +} + +variable "cluster_name" { + type = string + description = "A unique cluster identifier" + default = null +} + +variable "cluster_tag_key" { + type = string + description = "The key name for the cluster tag" + default = "TargetCluster" +} + +variable "common_tags" { + description = "Common tags for cloud resources" + type = map(string) + default = { + Project = "vault-ci" + } +} + +variable "instance_mem_min" { + description = "The minimum amount of memory in mebibytes for each instance in the fleet. (1 MiB = 1024 bytes)" + type = number + default = 4096 // ~4 GB +} + +variable "instance_mem_max" { + description = "The maximum amount of memory in mebibytes for each instance in the fleet. (1 MiB = 1024 bytes)" + type = number + default = 16385 // ~16 GB +} + +variable "instance_cpu_min" { + description = "The minimum number of vCPU's for each instance in the fleet" + type = number + default = 2 +} + +variable "instance_cpu_max" { + description = "The maximum number of vCPU's for each instance in the fleet" + type = number + default = 8 // Unlikely we'll ever get that high due to spot price bid protection +} + +variable "instance_count" { + description = "The number of target instances to create" + type = number + default = 3 +} + +variable "max_price" { + description = "The maximum hourly price to pay for each target instance" + type = string + default = "0.0416" +} + +variable "project_name" { + description = "A unique project name" + type = string +} + +variable "seal_key_names" { + type = list(string) + description = "The key management seal key names" + default = null +} + +variable "ssh_allow_ips" { + description = "Allowlisted IP addresses for SSH access to target nodes. The IP address of the machine running Enos will automatically allowlisted" + type = list(string) + default = [] +} + +variable "ssh_keypair" { + description = "SSH keypair used to connect to EC2 instances" + type = string +} + +variable "capacity_type" { + description = "What capacity type to use for EC2 instances" + type = string + default = "on-demand" + + validation { + condition = contains(["on-demand", "spot"], var.capacity_type) + error_message = "The capacity_type must be either 'on-demand' or 'spot'." + } +} + +variable "vpc_id" { + description = "The identifier of the VPC where the target instances will be created" + type = string +} diff --git a/enos/modules/target_ec2_instances/main.tf b/enos/modules/target_ec2_instances/main.tf new file mode 100644 index 0000000..a1ec2d3 --- /dev/null +++ b/enos/modules/target_ec2_instances/main.tf @@ -0,0 +1,259 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + # We need to specify the provider source in each module until we publish it + # to the public registry + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + version = ">= 0.3.24" + } + } +} + +data "aws_vpc" "vpc" { + id = var.vpc_id +} + +data "aws_ami" "ami" { + filter { + name = "image-id" + values = [var.ami_id] + } +} + +data "aws_ec2_instance_type_offerings" "instance" { + filter { + name = "instance-type" + values = [local.instance_type] + } + + location_type = "availability-zone" +} + +data "aws_availability_zones" "available" { + state = "available" + + filter { + name = "zone-name" + values = data.aws_ec2_instance_type_offerings.instance.locations + } +} + +data "aws_subnets" "vpc" { + filter { + name = "availability-zone" + values = data.aws_availability_zones.available.names + } + + filter { + name = "vpc-id" + values = [var.vpc_id] + } +} + +data "aws_iam_policy_document" "target" { + statement { + resources = ["*"] + + actions = [ + "ec2:DescribeInstances", + "secretsmanager:*" + ] + } + + dynamic "statement" { + for_each = var.seal_key_names + + content { + resources = [statement.value] + + actions = [ + "kms:DescribeKey", + "kms:ListKeys", + "kms:Encrypt", + "kms:Decrypt", + "kms:GenerateDataKey" + ] + } + } +} + +data "aws_iam_policy_document" "target_instance_role" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["ec2.amazonaws.com"] + } + } +} + +data "enos_environment" "localhost" {} + +locals { + cluster_name = coalesce(var.cluster_name, random_string.cluster_name.result) + instance_type = local.instance_types[data.aws_ami.ami.architecture] + instance_types = { + "arm64" = var.instance_types["arm64"] + "x86_64" = var.instance_types["amd64"] + } + instances = toset([for idx in range(var.instance_count) : tostring(idx)]) + name_prefix = "${var.project_name}-${local.cluster_name}-${random_string.unique_id.result}" +} + +resource "random_string" "cluster_name" { + length = 8 + lower = true + upper = false + numeric = false + special = false +} + +resource "random_string" "unique_id" { + length = 4 + lower = true + upper = false + numeric = false + special = false +} + +resource "aws_iam_role" "target_instance_role" { + name = "${local.name_prefix}-instance-role" + assume_role_policy = data.aws_iam_policy_document.target_instance_role.json +} + +resource "aws_iam_instance_profile" "target" { + name = "${local.name_prefix}-instance-profile" + role = aws_iam_role.target_instance_role.name +} + +resource "aws_iam_role_policy" "target" { + name = "${local.name_prefix}-role-policy" + role = aws_iam_role.target_instance_role.id + policy = data.aws_iam_policy_document.target.json +} + +resource "aws_security_group" "target" { + name = "${local.name_prefix}-sg" + description = "Target instance security group" + vpc_id = var.vpc_id + + # SSH traffic + ingress { + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + # Vault traffic + ingress { + from_port = 8200 + to_port = 8201 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + formatlist("%s/32", var.ssh_allow_ips) + ]) + } + + # Consul traffic + ingress { + from_port = 8300 + to_port = 8302 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8301 + to_port = 8302 + protocol = "udp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8500 + to_port = 8503 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8600 + to_port = 8600 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8600 + to_port = 8600 + protocol = "udp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + # Internal traffic + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + self = true + } + + # External traffic + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = merge( + var.common_tags, + { + Name = "${local.name_prefix}-sg" + }, + ) +} + +resource "aws_instance" "targets" { + for_each = local.instances + + ami = var.ami_id + iam_instance_profile = aws_iam_instance_profile.target.name + instance_type = local.instance_type + key_name = var.ssh_keypair + subnet_id = data.aws_subnets.vpc.ids[tonumber(each.key) % length(data.aws_subnets.vpc.ids)] + vpc_security_group_ids = [aws_security_group.target.id] + + tags = merge( + var.common_tags, + { + Name = "${local.name_prefix}-${var.cluster_tag_key}-instance-target" + "${var.cluster_tag_key}" = local.cluster_name + }, + ) +} diff --git a/enos/modules/target_ec2_instances/outputs.tf b/enos/modules/target_ec2_instances/outputs.tf new file mode 100644 index 0000000..89bb7b8 --- /dev/null +++ b/enos/modules/target_ec2_instances/outputs.tf @@ -0,0 +1,14 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +output "cluster_name" { + value = local.cluster_name +} + +output "hosts" { + description = "The ec2 instance target hosts" + value = { for idx in range(var.instance_count) : idx => { + public_ip = aws_instance.targets[idx].public_ip + private_ip = aws_instance.targets[idx].private_ip + } } +} diff --git a/enos/modules/target_ec2_instances/variables.tf b/enos/modules/target_ec2_instances/variables.tf new file mode 100644 index 0000000..e08f135 --- /dev/null +++ b/enos/modules/target_ec2_instances/variables.tf @@ -0,0 +1,70 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +variable "ami_id" { + description = "The machine image identifier" + type = string +} + +variable "cluster_name" { + type = string + description = "A unique cluster identifier" + default = null +} + +variable "cluster_tag_key" { + type = string + description = "The key name for the cluster tag" + default = "TargetCluster" +} + +variable "common_tags" { + description = "Common tags for cloud resources" + type = map(string) + default = { "Project" : "vault-ci" } +} + +variable "instance_count" { + description = "The number of target instances to create" + type = number + default = 3 +} + +variable "instance_types" { + description = "The instance types to use depending on architecture" + type = object({ + amd64 = string + arm64 = string + }) + default = { + amd64 = "t3a.medium" + arm64 = "t4g.medium" + } +} + +variable "project_name" { + description = "A unique project name" + type = string +} + +variable "seal_key_names" { + type = list(string) + description = "The key management seal key names" + default = null +} + +variable "ssh_allow_ips" { + description = "Allowlisted IP addresses for SSH access to target nodes. The IP address of the machine running Enos will automatically allowlisted" + type = list(string) + default = [] +} + +variable "ssh_keypair" { + description = "SSH keypair used to connect to EC2 instances" + type = string +} + +variable "vpc_id" { + description = "The identifier of the VPC where the target instances will be created" + type = string +} diff --git a/enos/modules/target_ec2_shim/main.tf b/enos/modules/target_ec2_shim/main.tf new file mode 100644 index 0000000..035a958 --- /dev/null +++ b/enos/modules/target_ec2_shim/main.tf @@ -0,0 +1,49 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + # We need to specify the provider source in each module until we publish it + # to the public registry + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + version = ">= 0.3.24" + } + } +} + +variable "ami_id" { default = null } +variable "cluster_name" { default = null } +variable "cluster_tag_key" { default = null } +variable "common_tags" { default = null } +variable "instance_count" { default = 3 } +variable "instance_cpu_max" { default = null } +variable "instance_cpu_min" { default = null } +variable "instance_mem_max" { default = null } +variable "instance_mem_min" { default = null } +variable "instance_types" { default = null } +variable "max_price" { default = null } +variable "project_name" { default = null } +variable "seal_key_names" { default = null } +variable "ssh_allow_ips" { default = null } +variable "ssh_keypair" { default = null } +variable "vpc_id" { default = null } + +resource "random_string" "cluster_name" { + length = 8 + lower = true + upper = false + numeric = false + special = false +} + +output "cluster_name" { + value = coalesce(var.cluster_name, random_string.cluster_name.result) +} + +output "hosts" { + value = { for idx in range(var.instance_count) : idx => { + public_ip = "null-public-${idx}" + private_ip = "null-private-${idx}" + } } +} diff --git a/enos/modules/target_ec2_spot_fleet/main.tf b/enos/modules/target_ec2_spot_fleet/main.tf new file mode 100644 index 0000000..d3c0abf --- /dev/null +++ b/enos/modules/target_ec2_spot_fleet/main.tf @@ -0,0 +1,456 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + # We need to specify the provider source in each module until we publish it + # to the public registry + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + version = ">= 0.3.24" + } + } +} + +data "aws_vpc" "vpc" { + id = var.vpc_id +} + +data "aws_subnets" "vpc" { + filter { + name = "vpc-id" + values = [var.vpc_id] + } +} + +data "aws_iam_policy_document" "target" { + statement { + resources = ["*"] + + actions = [ + "ec2:DescribeInstances", + "secretsmanager:*" + ] + } + + dynamic "statement" { + for_each = var.seal_key_names + + content { + resources = [statement.value] + + actions = [ + "kms:DescribeKey", + "kms:ListKeys", + "kms:Encrypt", + "kms:Decrypt", + "kms:GenerateDataKey" + ] + } + } +} + +data "aws_iam_policy_document" "target_role" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["ec2.amazonaws.com"] + } + } +} + +data "aws_iam_policy_document" "fleet" { + statement { + resources = ["*"] + + actions = [ + "ec2:DescribeImages", + "ec2:DescribeSubnets", + "ec2:RequestSpotInstances", + "ec2:TerminateInstances", + "ec2:DescribeInstanceStatus", + "ec2:CancelSpotFleetRequests", + "ec2:CreateTags", + "ec2:RunInstances", + "ec2:StartInstances", + "ec2:StopInstances", + ] + } + + statement { + effect = "Deny" + + resources = [ + "arn:aws:ec2:*:*:instance/*", + ] + + actions = [ + "ec2:RunInstances", + ] + + condition { + test = "StringNotEquals" + variable = "ec2:InstanceMarketType" + values = ["spot"] + } + } + + statement { + resources = ["*"] + + actions = [ + "iam:PassRole", + ] + + condition { + test = "StringEquals" + variable = "iam:PassedToService" + values = [ + "ec2.amazonaws.com", + ] + } + } + + statement { + resources = [ + "arn:aws:elasticloadbalancing:*:*:loadbalancer/*", + ] + + actions = [ + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + ] + } + + statement { + resources = [ + "arn:aws:elasticloadbalancing:*:*:*/*" + ] + + actions = [ + "elasticloadbalancing:RegisterTargets" + ] + } +} + +data "aws_iam_policy_document" "fleet_role" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["spotfleet.amazonaws.com"] + } + } +} + +data "enos_environment" "localhost" {} + +resource "random_string" "random_cluster_name" { + length = 8 + lower = true + upper = false + numeric = false + special = false +} + +resource "random_string" "unique_id" { + length = 4 + lower = true + upper = false + numeric = false + special = false +} + +// ec2:RequestSpotFleet only allows up to 4 InstanceRequirements overrides so we can only ever +// request a fleet across 4 or fewer subnets if we want to bid with InstanceRequirements instead of +// weighted instance types. +resource "random_shuffle" "subnets" { + input = data.aws_subnets.vpc.ids + result_count = 4 +} + +locals { + allocation_strategy = "lowestPrice" + instances = toset([for idx in range(var.instance_count) : tostring(idx)]) + cluster_name = coalesce(var.cluster_name, random_string.random_cluster_name.result) + name_prefix = "${var.project_name}-${local.cluster_name}-${random_string.unique_id.result}" + fleet_tag = "${local.name_prefix}-spot-fleet-target" + fleet_tags = { + Name = "${local.name_prefix}-${var.cluster_tag_key}-target" + "${var.cluster_tag_key}" = local.cluster_name + Fleet = local.fleet_tag + } +} + +resource "aws_iam_role" "target" { + name = "${local.name_prefix}-target-role" + assume_role_policy = data.aws_iam_policy_document.target_role.json +} + +resource "aws_iam_instance_profile" "target" { + name = "${local.name_prefix}-target-profile" + role = aws_iam_role.target.name +} + +resource "aws_iam_role_policy" "target" { + name = "${local.name_prefix}-target-policy" + role = aws_iam_role.target.id + policy = data.aws_iam_policy_document.target.json +} + +resource "aws_iam_role" "fleet" { + name = "${local.name_prefix}-fleet-role" + assume_role_policy = data.aws_iam_policy_document.fleet_role.json +} + +resource "aws_iam_role_policy" "fleet" { + name = "${local.name_prefix}-fleet-policy" + role = aws_iam_role.fleet.id + policy = data.aws_iam_policy_document.fleet.json +} + +resource "aws_security_group" "target" { + name = "${local.name_prefix}-target" + description = "Target instance security group" + vpc_id = var.vpc_id + + # SSH traffic + ingress { + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + # Vault traffic + ingress { + from_port = 8200 + to_port = 8201 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + formatlist("%s/32", var.ssh_allow_ips) + ]) + } + + # Consul traffic + ingress { + from_port = 8300 + to_port = 8302 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8301 + to_port = 8302 + protocol = "udp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8500 + to_port = 8503 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8600 + to_port = 8600 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8600 + to_port = 8600 + protocol = "udp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + # Internal traffic + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + self = true + } + + # External traffic + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = merge( + var.common_tags, + { + Name = "${local.name_prefix}-sg" + }, + ) +} + +resource "aws_launch_template" "target" { + name = "${local.name_prefix}-target" + image_id = var.ami_id + instance_type = null + key_name = var.ssh_keypair + + iam_instance_profile { + name = aws_iam_instance_profile.target.name + } + + instance_requirements { + burstable_performance = "included" + + memory_mib { + min = var.instance_mem_min + max = var.instance_mem_max + } + + vcpu_count { + min = var.instance_cpu_min + max = var.instance_cpu_max + } + } + + network_interfaces { + associate_public_ip_address = true + delete_on_termination = true + security_groups = [aws_security_group.target.id] + } + + tag_specifications { + resource_type = "instance" + + tags = merge( + var.common_tags, + local.fleet_tags, + ) + } +} + +# There are three primary knobs we can turn to try and optimize our costs by +# using a spot fleet: our min and max instance requirements, our max bid +# price, and the allocation strategy to use when fulfilling the spot request. +# We've currently configured our instance requirements to allow for anywhere +# from 2-4 vCPUs and 4-16GB of RAM. We intentionally have a wide range +# to allow for a large instance size pool to be considered. Our next knob is our +# max bid price. As we're using spot fleets to save on instance cost, we never +# want to pay more for an instance than we were on-demand. We've set the max price +# to equal what we pay for t3.medium instances on-demand, which are the smallest +# reliable size for Vault scenarios. The final knob is the allocation strategy +# that AWS will use when looking for instances that meet our resource and cost +# requirements. We're using the "lowestPrice" strategy to get the absolute +# cheapest machines that will fit the requirements, but it comes with a slightly +# higher capacity risk than say, "capacityOptimized" or "priceCapacityOptimized". +# Unless we see capacity issues or instances being shut down then we ought to +# stick with that strategy. +resource "aws_spot_fleet_request" "targets" { + allocation_strategy = local.allocation_strategy + fleet_type = "request" + iam_fleet_role = aws_iam_role.fleet.arn + // The instance_pools_to_use_count is only valid for the allocation_strategy + // lowestPrice. When we are using that strategy we'll want to always set it + // to 1 to avoid rebuilding the fleet on a re-run. For any other strategy + // set it to zero to avoid rebuilding the fleet on a re-run. + instance_pools_to_use_count = local.allocation_strategy == "lowestPrice" ? 1 : 0 + spot_price = var.max_price + target_capacity = var.instance_count + terminate_instances_on_delete = true + wait_for_fulfillment = true + + launch_template_config { + launch_template_specification { + id = aws_launch_template.target.id + version = aws_launch_template.target.latest_version + } + + // We cannot currently use more than one subnet[0]. Until the bug has been resolved + // we'll choose a random subnet. It would be ideal to bid across all subnets to get + // the absolute cheapest available at the time of bidding. + // + // [0] https://github.com/hashicorp/terraform-provider-aws/issues/30505 + + /* + dynamic "overrides" { + for_each = random_shuffle.subnets.result + + content { + subnet_id = overrides.value + } + } + */ + + overrides { + subnet_id = random_shuffle.subnets.result[0] + } + } + + tags = merge( + var.common_tags, + local.fleet_tags, + ) +} + +resource "time_sleep" "wait_for_fulfillment" { + depends_on = [aws_spot_fleet_request.targets] + create_duration = "2s" +} + +data "aws_instances" "targets" { + depends_on = [ + time_sleep.wait_for_fulfillment, + aws_spot_fleet_request.targets, + ] + + instance_tags = local.fleet_tags + instance_state_names = [ + "pending", + "running", + ] + + filter { + name = "image-id" + values = [var.ami_id] + } + + filter { + name = "iam-instance-profile.arn" + values = [aws_iam_instance_profile.target.arn] + } +} + +data "aws_instance" "targets" { + depends_on = [ + aws_spot_fleet_request.targets, + data.aws_instances.targets + ] + for_each = local.instances + + instance_id = data.aws_instances.targets.ids[each.key] +} diff --git a/enos/modules/target_ec2_spot_fleet/outputs.tf b/enos/modules/target_ec2_spot_fleet/outputs.tf new file mode 100644 index 0000000..15384bd --- /dev/null +++ b/enos/modules/target_ec2_spot_fleet/outputs.tf @@ -0,0 +1,14 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +output "cluster_name" { + value = local.cluster_name +} + +output "hosts" { + description = "The ec2 fleet target hosts" + value = { for idx in range(var.instance_count) : idx => { + public_ip = data.aws_instance.targets[idx].public_ip + private_ip = data.aws_instance.targets[idx].private_ip + } } +} diff --git a/enos/modules/target_ec2_spot_fleet/variables.tf b/enos/modules/target_ec2_spot_fleet/variables.tf new file mode 100644 index 0000000..3746be1 --- /dev/null +++ b/enos/modules/target_ec2_spot_fleet/variables.tf @@ -0,0 +1,90 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +variable "ami_id" { + description = "The machine image identifier" + type = string +} + +variable "cluster_name" { + type = string + description = "A unique cluster identifier" + default = null +} + +variable "cluster_tag_key" { + type = string + description = "The key name for the cluster tag" + default = "TargetCluster" +} + +variable "common_tags" { + description = "Common tags for cloud resources" + type = map(string) + default = { + Project = "Vault" + } +} + +variable "instance_mem_min" { + description = "The minimum amount of memory in mebibytes for each instance in the fleet. (1 MiB = 1024 bytes)" + type = number + default = 4096 // ~4 GB +} + +variable "instance_mem_max" { + description = "The maximum amount of memory in mebibytes for each instance in the fleet. (1 MiB = 1024 bytes)" + type = number + default = 16385 // ~16 GB +} + +variable "instance_cpu_min" { + description = "The minimum number of vCPU's for each instance in the fleet" + type = number + default = 2 +} + +variable "instance_cpu_max" { + description = "The maximum number of vCPU's for each instance in the fleet" + type = number + default = 8 // Unlikely we'll ever get that high due to spot price bid protection +} + +variable "instance_count" { + description = "The number of target instances to create" + type = number + default = 3 +} + +variable "project_name" { + description = "A unique project name" + type = string +} + +variable "max_price" { + description = "The maximum hourly price to pay for each target instance" + type = string + default = "0.0416" +} + +variable "seal_key_names" { + type = list(string) + description = "The key management seal key names" + default = null +} + +variable "ssh_allow_ips" { + description = "Allowlisted IP addresses for SSH access to target nodes. The IP address of the machine running Enos will automatically allowlisted" + type = list(string) + default = [] +} + +variable "ssh_keypair" { + description = "SSH keypair used to connect to EC2 instances" + type = string +} + +variable "vpc_id" { + description = "The identifier of the VPC where the target instances will be created" + type = string +} diff --git a/enos/modules/vault_agent/main.tf b/enos/modules/vault_agent/main.tf new file mode 100644 index 0000000..9b95ca0 --- /dev/null +++ b/enos/modules/vault_agent/main.tf @@ -0,0 +1,72 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + } + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +variable "vault_agent_template_destination" { + type = string + description = "The destination of the template rendered by Agent" +} + +variable "vault_agent_template_contents" { + type = string + description = "The template contents to be rendered by Agent" +} + +variable "vault_root_token" { + type = string + description = "The Vault root token" +} + +variable "vault_instances" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The Vault cluster instances that were created" +} + +variable "vault_instance_count" { + type = number + description = "How many vault instances are in the cluster" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +locals { + vault_instances = { + for idx in range(var.vault_instance_count) : idx => { + public_ip = values(var.vault_instances)[idx].public_ip + private_ip = values(var.vault_instances)[idx].private_ip + } + } +} + +resource "enos_remote_exec" "set_up_approle_auth_and_agent" { + environment = { + VAULT_INSTALL_DIR = var.vault_install_dir, + VAULT_TOKEN = var.vault_root_token, + VAULT_AGENT_TEMPLATE_DESTINATION = var.vault_agent_template_destination, + VAULT_AGENT_TEMPLATE_CONTENTS = var.vault_agent_template_contents, + } + + scripts = [abspath("${path.module}/scripts/set-up-approle-and-agent.sh")] + + transport = { + ssh = { + host = local.vault_instances[0].public_ip + } + } +} diff --git a/enos/modules/vault_agent/scripts/set-up-approle-and-agent.sh b/enos/modules/vault_agent/scripts/set-up-approle-and-agent.sh new file mode 100644 index 0000000..1264df8 --- /dev/null +++ b/enos/modules/vault_agent/scripts/set-up-approle-and-agent.sh @@ -0,0 +1,95 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +set -e + +binpath=${VAULT_INSTALL_DIR}/vault + +fail() { + echo "$1" 1>&2 + return 1 +} + +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_ADDR='http://127.0.0.1:8200' +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +# If approle was already enabled, disable it as we're about to re-enable it (the || true is so we don't fail if it doesn't already exist) +$binpath auth disable approle || true + +$binpath auth enable approle + +$binpath write auth/approle/role/agent-role secret_id_ttl=700h token_num_uses=1000 token_ttl=600h token_max_ttl=700h secret_id_num_uses=1000 + +ROLEID=$($binpath read --format=json auth/approle/role/agent-role/role-id | jq -r '.data.role_id') + +if [[ "$ROLEID" == '' ]]; then + fail "expected ROLEID to be nonempty, but it is empty" +fi + +SECRETID=$($binpath write -f --format=json auth/approle/role/agent-role/secret-id | jq -r '.data.secret_id') + +if [[ "$SECRETID" == '' ]]; then + fail "expected SECRETID to be nonempty, but it is empty" +fi + +echo "$ROLEID" > /tmp/role-id +echo "$SECRETID" > /tmp/secret-id + +cat > /tmp/vault-agent.hcl <<- EOM +pid_file = "/tmp/pidfile" + +vault { + address = "http://127.0.0.1:8200" + tls_skip_verify = true + retry { + num_retries = 10 + } +} + +cache { + enforce_consistency = "always" + use_auto_auth_token = true +} + +listener "tcp" { + address = "127.0.0.1:8100" + tls_disable = true +} + +template { + destination = "${VAULT_AGENT_TEMPLATE_DESTINATION}" + contents = "${VAULT_AGENT_TEMPLATE_CONTENTS}" + exec { + command = "pkill -F /tmp/pidfile" + } +} + +auto_auth { + method { + type = "approle" + config = { + role_id_file_path = "/tmp/role-id" + secret_id_file_path = "/tmp/secret-id" + } + } + sink { + type = "file" + config = { + path = "/tmp/token" + } + } +} +EOM + +# If Agent is still running from a previous run, kill it +pkill -F /tmp/pidfile || true + +# If the template file already exists, remove it +rm "${VAULT_AGENT_TEMPLATE_DESTINATION}" || true + +# Run agent (it will kill itself when it finishes rendering the template) +$binpath agent -config=/tmp/vault-agent.hcl > /tmp/agent-logs.txt 2>&1 diff --git a/enos/modules/vault_artifactory_artifact/locals.tf b/enos/modules/vault_artifactory_artifact/locals.tf new file mode 100644 index 0000000..6fde1b8 --- /dev/null +++ b/enos/modules/vault_artifactory_artifact/locals.tf @@ -0,0 +1,51 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +locals { + + // file name extensions for the install packages of vault for the various architectures, distributions and editions + package_extensions = { + amd64 = { + ubuntu = "-1_amd64.deb" + rhel = "-1.x86_64.rpm" + } + arm64 = { + ubuntu = "-1_arm64.deb" + rhel = "-1.aarch64.rpm" + } + } + + // product_version --> artifact_version + artifact_version = replace(var.product_version, var.edition, "ent") + + // file name prefixes for the install packages of vault for the various distributions and artifact types (package or bundle) + artifact_package_release_names = { + ubuntu = { + "ce" = "vault_" + "ent" = "vault-enterprise_", + "ent.fips1402" = "vault-enterprise-fips1402_", + "ent.hsm" = "vault-enterprise-hsm_", + "ent.hsm.fips1402" = "vault-enterprise-hsm-fips1402_", + }, + rhel = { + "ce" = "vault-" + "ent" = "vault-enterprise-", + "ent.fips1402" = "vault-enterprise-fips1402-", + "ent.hsm" = "vault-enterprise-hsm-", + "ent.hsm.fips1402" = "vault-enterprise-hsm-fips1402-", + } + } + + // edition --> artifact name edition + artifact_name_edition = { + "ce" = "" + "ent" = "" + "ent.hsm" = ".hsm" + "ent.fips1402" = ".fips1402" + "ent.hsm.fips1402" = ".hsm.fips1402" + } + + artifact_name_prefix = var.artifact_type == "package" ? local.artifact_package_release_names[var.distro][var.edition] : "vault_" + artifact_name_extension = var.artifact_type == "package" ? local.package_extensions[var.arch][var.distro] : "_linux_${var.arch}.zip" + artifact_name = var.artifact_type == "package" ? "${local.artifact_name_prefix}${replace(local.artifact_version, "-", "~")}${local.artifact_name_extension}" : "${local.artifact_name_prefix}${var.product_version}${local.artifact_name_extension}" +} diff --git a/enos/modules/vault_artifactory_artifact/main.tf b/enos/modules/vault_artifactory_artifact/main.tf new file mode 100644 index 0000000..b2c02e0 --- /dev/null +++ b/enos/modules/vault_artifactory_artifact/main.tf @@ -0,0 +1,25 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + version = ">= 0.2.3" + } + } +} + +data "enos_artifactory_item" "vault" { + username = var.artifactory_username + token = var.artifactory_token + name = local.artifact_name + host = var.artifactory_host + repo = var.artifactory_repo + path = var.edition == "ce" ? "vault/*" : "vault-enterprise/*" + properties = tomap({ + "commit" = var.revision + "product-name" = var.edition == "ce" ? "vault" : "vault-enterprise" + "product-version" = local.artifact_version + }) +} diff --git a/enos/modules/vault_artifactory_artifact/outputs.tf b/enos/modules/vault_artifactory_artifact/outputs.tf new file mode 100644 index 0000000..c100c45 --- /dev/null +++ b/enos/modules/vault_artifactory_artifact/outputs.tf @@ -0,0 +1,32 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +output "url" { + value = data.enos_artifactory_item.vault.results[0].url + description = "The artifactory download url for the artifact" +} + +output "sha256" { + value = data.enos_artifactory_item.vault.results[0].sha256 + description = "The sha256 checksum for the artifact" +} + +output "size" { + value = data.enos_artifactory_item.vault.results[0].size + description = "The size in bytes of the artifact" +} + +output "name" { + value = data.enos_artifactory_item.vault.results[0].name + description = "The name of the artifact" +} + +output "vault_artifactory_release" { + value = { + url = data.enos_artifactory_item.vault.results[0].url + sha256 = data.enos_artifactory_item.vault.results[0].sha256 + username = var.artifactory_username + token = var.artifactory_token + } +} diff --git a/enos/modules/vault_artifactory_artifact/variables.tf b/enos/modules/vault_artifactory_artifact/variables.tf new file mode 100644 index 0000000..1ec4ae3 --- /dev/null +++ b/enos/modules/vault_artifactory_artifact/variables.tf @@ -0,0 +1,37 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 +variable "artifactory_username" { + type = string + description = "The username to use when connecting to artifactory" + default = null +} + +variable "artifactory_token" { + type = string + description = "The token to use when connecting to artifactory" + default = null + sensitive = true +} + +variable "artifactory_host" { + type = string + description = "The artifactory host to search for vault artifacts" + default = "https://artifactory.hashicorp.engineering/artifactory" +} + +variable "artifactory_repo" { + type = string + description = "The artifactory repo to search for vault artifacts" + default = "hashicorp-crt-stable-local*" +} +variable "arch" {} +variable "artifact_type" {} +variable "artifact_path" {} +variable "distro" {} +variable "edition" {} +variable "revision" {} +variable "product_version" {} +variable "build_tags" { default = null } +variable "bundle_path" { default = null } +variable "goarch" { default = null } +variable "goos" { default = null } diff --git a/enos/modules/vault_cluster/main.tf b/enos/modules/vault_cluster/main.tf new file mode 100644 index 0000000..e376520 --- /dev/null +++ b/enos/modules/vault_cluster/main.tf @@ -0,0 +1,309 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + # We need to specify the provider source in each module until we publish it + # to the public registry + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + version = ">= 0.4.0" + } + } +} + +data "enos_environment" "localhost" {} + +locals { + audit_device_file_path = "/var/log/vault/vault_audit.log" + bin_path = "${var.install_dir}/vault" + consul_bin_path = "${var.consul_install_dir}/consul" + enable_audit_devices = var.enable_audit_devices && var.initialize_cluster + // In order to get Terraform to plan we have to use collections with keys + // that are known at plan time. In order for our module to work our var.target_hosts + // must be a map with known keys at plan time. Here we're creating locals + // that keep track of index values that point to our target hosts. + followers = toset(slice(local.instances, 1, length(local.instances))) + instances = [for idx in range(length(var.target_hosts)) : tostring(idx)] + key_shares = { + "awskms" = null + "shamir" = 5 + } + key_threshold = { + "awskms" = null + "shamir" = 3 + } + leader = toset(slice(local.instances, 0, 1)) + recovery_shares = { + "awskms" = 5 + "shamir" = null + } + recovery_threshold = { + "awskms" = 3 + "shamir" = null + } + vault_service_user = "vault" +} + +resource "enos_bundle_install" "consul" { + for_each = { + for idx, host in var.target_hosts : idx => var.target_hosts[idx] + if var.storage_backend == "consul" + } + + destination = var.consul_install_dir + release = merge(var.consul_release, { product = "consul" }) + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +resource "enos_bundle_install" "vault" { + for_each = var.target_hosts + + destination = var.install_dir + release = var.release == null ? var.release : merge({ product = "vault" }, var.release) + artifactory = var.artifactory_release + path = var.local_artifact_path + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +resource "enos_remote_exec" "install_packages" { + depends_on = [ + enos_bundle_install.vault, // Don't race for the package manager locks with vault install + ] + for_each = { + for idx, host in var.target_hosts : idx => var.target_hosts[idx] + if length(var.packages) > 0 + } + + environment = { + PACKAGES = join(" ", var.packages) + } + + scripts = [abspath("${path.module}/scripts/install-packages.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +resource "enos_consul_start" "consul" { + for_each = enos_bundle_install.consul + + bin_path = local.consul_bin_path + data_dir = var.consul_data_dir + config = { + data_dir = var.consul_data_dir + datacenter = "dc1" + retry_join = ["provider=aws tag_key=${var.backend_cluster_tag_key} tag_value=${var.backend_cluster_name}"] + server = false + bootstrap_expect = 0 + license = var.consul_license + log_level = var.consul_log_level + log_file = var.consul_log_file + } + license = var.consul_license + unit_name = "consul" + username = "consul" + + transport = { + ssh = { + host = var.target_hosts[each.key].public_ip + } + } +} + +module "start_vault" { + source = "../start_vault" + + depends_on = [ + enos_consul_start.consul, + enos_bundle_install.vault, + ] + + cluster_name = var.cluster_name + config_dir = var.config_dir + install_dir = var.install_dir + license = var.license + log_level = var.log_level + manage_service = var.manage_service + seal_ha_beta = var.seal_ha_beta + seal_key_name = var.seal_key_name + seal_key_name_secondary = var.seal_key_name_secondary + seal_type = var.seal_type + seal_type_secondary = var.seal_type_secondary + service_username = local.vault_service_user + storage_backend = var.storage_backend + storage_backend_attrs = var.storage_backend_addl_config + storage_node_prefix = var.storage_node_prefix + target_hosts = var.target_hosts +} + +resource "enos_vault_init" "leader" { + depends_on = [ + module.start_vault, + ] + for_each = toset([ + for idx, leader in local.leader : leader + if var.initialize_cluster + ]) + + bin_path = local.bin_path + vault_addr = module.start_vault.leader[0].config.api_addr + + key_shares = local.key_shares[var.seal_type] + key_threshold = local.key_threshold[var.seal_type] + + recovery_shares = local.recovery_shares[var.seal_type] + recovery_threshold = local.recovery_threshold[var.seal_type] + + transport = { + ssh = { + host = var.target_hosts[each.value].public_ip + } + } +} + +resource "enos_vault_unseal" "leader" { + depends_on = [ + module.start_vault, + enos_vault_init.leader, + ] + for_each = enos_vault_init.leader // only unseal the leader if we initialized it + + bin_path = local.bin_path + vault_addr = module.start_vault.leader[each.key].config.api_addr + seal_type = var.seal_type + unseal_keys = var.seal_type != "shamir" ? null : coalesce(var.shamir_unseal_keys, enos_vault_init.leader[0].unseal_keys_hex) + + transport = { + ssh = { + host = var.target_hosts[tolist(local.leader)[0]].public_ip + } + } +} + +resource "enos_vault_unseal" "followers" { + depends_on = [ + enos_vault_init.leader, + enos_vault_unseal.leader, + ] + // Only unseal followers if we're not using an auto-unseal method and we've + // initialized the cluster + for_each = toset([ + for idx, follower in local.followers : follower + if var.seal_type == "shamir" && var.initialize_cluster + ]) + + bin_path = local.bin_path + vault_addr = module.start_vault.followers[each.key].config.api_addr + seal_type = var.seal_type + unseal_keys = var.seal_type != "shamir" ? null : coalesce(var.shamir_unseal_keys, enos_vault_init.leader[0].unseal_keys_hex) + + transport = { + ssh = { + host = var.target_hosts[each.value].public_ip + } + } +} + +// Force unseal the cluster. This is used if the vault-cluster module is used +// to add additional nodes to a cluster via auto-pilot, or some other means. +// When that happens we'll want to set initialize_cluster to false and +// force_unseal to true. +resource "enos_vault_unseal" "maybe_force_unseal" { + depends_on = [ + module.start_vault.followers, + ] + for_each = { + for idx, host in var.target_hosts : idx => host + if var.force_unseal && !var.initialize_cluster + } + + bin_path = local.bin_path + vault_addr = "http://localhost:8200" + seal_type = var.seal_type + unseal_keys = coalesce( + var.shamir_unseal_keys, + try(enos_vault_init.leader[0].unseal_keys_hex, null), + ) + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +# We need to ensure that the directory used for audit logs is present and accessible to the vault +# user on all nodes, since logging will only happen on the leader. +resource "enos_remote_exec" "create_audit_log_dir" { + depends_on = [ + module.start_vault, + enos_vault_unseal.leader, + enos_vault_unseal.followers, + enos_vault_unseal.maybe_force_unseal, + ] + for_each = toset([ + for idx, host in toset(local.instances) : idx + if var.enable_audit_devices + ]) + + environment = { + LOG_FILE_PATH = local.audit_device_file_path + SERVICE_USER = local.vault_service_user + } + + scripts = [abspath("${path.module}/scripts/create_audit_log_dir.sh")] + + transport = { + ssh = { + host = var.target_hosts[each.value].public_ip + } + } +} + +resource "enos_remote_exec" "enable_audit_devices" { + depends_on = [ + enos_remote_exec.create_audit_log_dir, + ] + for_each = toset([ + for idx in local.leader : idx + if local.enable_audit_devices + ]) + + environment = { + VAULT_TOKEN = enos_vault_init.leader[each.key].root_token + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_BIN_PATH = local.bin_path + LOG_FILE_PATH = local.audit_device_file_path + SERVICE_USER = local.vault_service_user + } + + scripts = [abspath("${path.module}/scripts/enable_audit_logging.sh")] + + transport = { + ssh = { + host = var.target_hosts[each.key].public_ip + } + } +} + +resource "enos_local_exec" "wait_for_install_packages" { + depends_on = [ + enos_remote_exec.install_packages, + ] + + inline = ["true"] +} diff --git a/enos/modules/vault_cluster/outputs.tf b/enos/modules/vault_cluster/outputs.tf new file mode 100644 index 0000000..e3f1045 --- /dev/null +++ b/enos/modules/vault_cluster/outputs.tf @@ -0,0 +1,64 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +output "audit_device_file_path" { + description = "The file path for the audit device, if enabled" + value = var.enable_audit_devices ? local.audit_device_file_path : "file audit device not enabled" +} + +output "cluster_name" { + description = "The Vault cluster name" + value = var.cluster_name +} + +output "private_ips" { + description = "Vault cluster target host private_ips" + value = [for host in var.target_hosts : host.private_ip] +} + +output "public_ips" { + description = "Vault cluster target host public_ips" + value = [for host in var.target_hosts : host.public_ip] +} + +output "recovery_keys_b64" { + value = try(enos_vault_init.leader[0].recovery_keys_b64, []) +} + +output "recovery_keys_hex" { + value = try(enos_vault_init.leader[0].recovery_keys_hex, []) +} + +output "recovery_key_shares" { + value = try(enos_vault_init.leader[0].recovery_keys_shares, -1) +} + +output "recovery_threshold" { + value = try(enos_vault_init.leader[0].recovery_keys_threshold, -1) +} + +output "root_token" { + value = coalesce(var.root_token, try(enos_vault_init.leader[0].root_token, null), "none") +} + +output "target_hosts" { + description = "The vault cluster instances that were created" + + value = var.target_hosts +} + +output "unseal_keys_b64" { + value = try(enos_vault_init.leader[0].unseal_keys_b64, []) +} + +output "unseal_keys_hex" { + value = try(enos_vault_init.leader[0].unseal_keys_hex, null) +} + +output "unseal_shares" { + value = try(enos_vault_init.leader[0].unseal_keys_shares, -1) +} + +output "unseal_threshold" { + value = try(enos_vault_init.leader[0].unseal_keys_threshold, -1) +} diff --git a/enos/modules/vault_cluster/scripts/create_audit_log_dir.sh b/enos/modules/vault_cluster/scripts/create_audit_log_dir.sh new file mode 100755 index 0000000..1af37d9 --- /dev/null +++ b/enos/modules/vault_cluster/scripts/create_audit_log_dir.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +set -eux + +LOG_DIR=$(dirname "$LOG_FILE_PATH") + +function retry { + local retries=$1 + shift + local count=0 + + until "$@"; do + exit=$? + wait=10 + count=$((count + 1)) + + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + return "$exit" + fi + done + + return 0 +} + +retry 7 id -a "$SERVICE_USER" + +sudo mkdir -p "$LOG_DIR" +sudo chown -R "$SERVICE_USER":"$SERVICE_USER" "$LOG_DIR" diff --git a/enos/modules/vault_cluster/scripts/enable_audit_logging.sh b/enos/modules/vault_cluster/scripts/enable_audit_logging.sh new file mode 100644 index 0000000..7bfe750 --- /dev/null +++ b/enos/modules/vault_cluster/scripts/enable_audit_logging.sh @@ -0,0 +1,38 @@ +#!/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +set -exo pipefail + +# Run nc to listen on port 9090 for the socket auditor. We spawn nc +# with nohup to ensure that the listener doesn't expect a SIGHUP and +# thus block the SSH session from exiting or terminating on exit. +# We immediately write to STDIN from /dev/null to give nc an +# immediate EOF so as to not block on expecting STDIN. +nohup nc -kl 9090 &> /dev/null < /dev/null & + +# Wait for nc to be listening before we attempt to enable the socket auditor. +attempts=3 +count=0 +until nc -zv 127.0.0.1 9090 &> /dev/null < /dev/null; do + wait=$((2 ** count)) + count=$((count + 1)) + + if [ "$count" -le "$attempts" ]; then + sleep "$wait" + if ! pgrep -x nc; then + nohup nc -kl 9090 &> /dev/null < /dev/null & + fi + else + + echo "Timed out waiting for nc to listen on 127.0.0.1:9090" 1>&2 + exit 1 + fi +done + +sleep 1 + +# Enable the auditors. +$VAULT_BIN_PATH audit enable file file_path="$LOG_FILE_PATH" +$VAULT_BIN_PATH audit enable syslog tag="vault" facility="AUTH" +$VAULT_BIN_PATH audit enable socket address="127.0.0.1:9090" || true diff --git a/enos/modules/vault_cluster/scripts/install-packages.sh b/enos/modules/vault_cluster/scripts/install-packages.sh new file mode 100755 index 0000000..2e9d642 --- /dev/null +++ b/enos/modules/vault_cluster/scripts/install-packages.sh @@ -0,0 +1,48 @@ +#!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +set -ex -o pipefail + +if [ "$PACKAGES" == "" ] +then + echo "No dependencies to install." + exit 0 +fi + +function retry { + local retries=$1 + shift + local count=0 + + until "$@"; do + exit=$? + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + exit "$exit" + fi + done + + return 0 +} + +echo "Installing Dependencies: $PACKAGES" +if [ -f /etc/debian_version ]; then + # Do our best to make sure that we don't race with cloud-init. Wait a reasonable time until we + # see ec2 in the sources list. Very rarely cloud-init will take longer than we wait. In that case + # we'll just install our packages. + retry 7 grep ec2 /etc/apt/sources.list || true + + cd /tmp + retry 5 sudo apt update + # shellcheck disable=2068 + retry 5 sudo apt install -y ${PACKAGES[@]} +else + cd /tmp + # shellcheck disable=2068 + retry 7 sudo yum -y install ${PACKAGES[@]} +fi diff --git a/enos/modules/vault_cluster/scripts/vault-write-license.sh b/enos/modules/vault_cluster/scripts/vault-write-license.sh new file mode 100755 index 0000000..6b92272 --- /dev/null +++ b/enos/modules/vault_cluster/scripts/vault-write-license.sh @@ -0,0 +1,40 @@ +#!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +if test "$LICENSE" = "none"; then + exit 0 +fi + +function retry { + local retries=$1 + shift + local count=0 + + until "$@"; do + exit=$? + wait=$((2 ** count)) + count=$((count + 1)) + + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + return "$exit" + fi + done + + return 0 +} + +export VAULT_ADDR=http://localhost:8200 +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +# Temporary hack until we can make the unseal resource handle legacy license +# setting. If we're running 1.8 and above then we shouldn't try to set a license. +ver=$(${BIN_PATH} version) +if [[ "$(echo "$ver" |awk '{print $2}' |awk -F'.' '{print $2}')" -ge 8 ]]; then + exit 0 +fi + +retry 5 "${BIN_PATH}" write /sys/license text="$LICENSE" diff --git a/enos/modules/vault_cluster/variables.tf b/enos/modules/vault_cluster/variables.tf new file mode 100644 index 0000000..a1108c8 --- /dev/null +++ b/enos/modules/vault_cluster/variables.tf @@ -0,0 +1,242 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +variable "artifactory_release" { + type = object({ + username = string + token = string + url = string + sha256 = string + }) + description = "The Artifactory release information to install Vault artifacts from Artifactory" + default = null +} + +variable "backend_cluster_name" { + type = string + description = "The name of the backend cluster" + default = null +} + +variable "backend_cluster_tag_key" { + type = string + description = "The tag key for searching for backend nodes" + default = null +} + +variable "cluster_name" { + type = string + description = "The Vault cluster name" + default = null +} + +variable "config_dir" { + type = string + description = "The directory to use for Vault configuration" + default = "/etc/vault.d" +} + +variable "config_env_vars" { + description = "Optional Vault configuration environment variables to set starting Vault" + type = map(string) + default = null +} + +variable "consul_data_dir" { + type = string + description = "The directory where the consul will store data" + default = "/opt/consul/data" +} + +variable "consul_install_dir" { + type = string + description = "The directory where the consul binary will be installed" + default = "/opt/consul/bin" +} + +variable "consul_license" { + type = string + sensitive = true + description = "The consul enterprise license" + default = null +} + +variable "consul_log_file" { + type = string + description = "The file where the consul will write log output" + default = "/var/log/consul.log" +} + +variable "consul_log_level" { + type = string + description = "The consul service log level" + default = "info" + + validation { + condition = contains(["trace", "debug", "info", "warn", "error"], var.consul_log_level) + error_message = "The consul_log_level must be one of 'trace', 'debug', 'info', 'warn', or 'error'." + } +} + +variable "consul_release" { + type = object({ + version = string + edition = string + }) + description = "Consul release version and edition to install from releases.hashicorp.com" + default = { + version = "1.15.1" + edition = "ce" + } +} + +variable "enable_audit_devices" { + description = "If true every audit device will be enabled" + type = bool + default = true +} + +variable "force_unseal" { + type = bool + description = "Always unseal the Vault cluster even if we're not initializing it" + default = false +} + +variable "initialize_cluster" { + type = bool + description = "Initialize the Vault cluster" + default = true +} + +variable "install_dir" { + type = string + description = "The directory where the vault binary will be installed" + default = "/opt/vault/bin" +} + +variable "license" { + type = string + sensitive = true + description = "The value of the Vault license" + default = null +} + +variable "local_artifact_path" { + type = string + description = "The path to a locally built vault artifact to install. It can be a zip archive, RPM, or Debian package" + default = null +} + +variable "log_level" { + type = string + description = "The vault service log level" + default = "info" + + validation { + condition = contains(["trace", "debug", "info", "warn", "error"], var.log_level) + error_message = "The log_level must be one of 'trace', 'debug', 'info', 'warn', or 'error'." + } +} + +variable "manage_service" { + type = bool + description = "Manage the Vault service users and systemd unit. Disable this to use configuration in RPM and Debian packages" + default = true +} + +variable "packages" { + type = list(string) + description = "A list of packages to install via the target host package manager" + default = [] +} + +variable "release" { + type = object({ + version = string + edition = string + }) + description = "Vault release version and edition to install from releases.hashicorp.com" + default = null +} + +variable "root_token" { + type = string + description = "The Vault root token that we can use to intialize and configure the cluster" + default = null +} + +variable "seal_ha_beta" { + description = "Enable using Seal HA on clusters that meet minimum version requirements and are enterprise editions" + default = true +} + +variable "seal_key_name" { + type = string + description = "The auto-unseal key name" + default = null +} + +variable "seal_key_name_secondary" { + type = string + description = "The secondary auto-unseal key name" + default = null +} + +variable "seal_type" { + type = string + description = "The method by which to unseal the Vault cluster" + default = "awskms" + + validation { + condition = contains(["awskms", "shamir"], var.seal_type) + error_message = "The seal_type must be either awskms or shamir. No other unseal methods are supported." + } +} + +variable "seal_type_secondary" { + type = string + description = "A secondary HA seal method. Only supported in Vault Enterprise >= 1.15" + default = "none" + + validation { + condition = contains(["awskms", "none"], var.seal_type_secondary) + error_message = "The secondary_seal_type must be 'awskms' or 'none'. No other secondary unseal methods are supported." + } +} + +variable "shamir_unseal_keys" { + type = list(string) + description = "Shamir unseal keys. Often only used adding additional nodes to an already initialized cluster." + default = null +} + +variable "storage_backend" { + type = string + description = "The storage backend to use" + default = "raft" + + validation { + condition = contains(["raft", "consul"], var.storage_backend) + error_message = "The storage_backend must be either raft or consul. No other storage backends are supported." + } +} + +variable "storage_backend_addl_config" { + type = map(any) + description = "An optional set of key value pairs to inject into the storage block" + default = {} +} + +variable "storage_node_prefix" { + type = string + description = "A prefix to use for each node in the Vault storage configuration" + default = "node" +} + +variable "target_hosts" { + description = "The target machines host addresses to use for the Vault cluster" + type = map(object({ + private_ip = string + public_ip = string + })) +} diff --git a/enos/modules/vault_get_cluster_ips/main.tf b/enos/modules/vault_get_cluster_ips/main.tf new file mode 100644 index 0000000..d29dff3 --- /dev/null +++ b/enos/modules/vault_get_cluster_ips/main.tf @@ -0,0 +1,115 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +variable "vault_instance_count" { + type = number + description = "The number of instances in the vault cluster" +} + +variable "vault_hosts" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The vault cluster hosts. These are required to map private ip addresses to public addresses." +} + +locals { + follower_hosts_list = [for idx in range(var.vault_instance_count - 1) : { + private_ip = local.follower_private_ips[idx] + public_ip = local.follower_public_ips[idx] + } + ] + follower_hosts = { + for idx in range(var.vault_instance_count - 1) : idx => try(local.follower_hosts_list[idx], null) + } + follower_private_ips = jsondecode(enos_remote_exec.get_follower_private_ips.stdout) + follower_public_ips = [for idx in range(var.vault_instance_count) : var.vault_hosts[idx].public_ip if contains( + local.follower_private_ips, var.vault_hosts[idx].private_ip) + ] + leader_host = { + private_ip = local.leader_private_ip + public_ip = local.leader_public_ip + } + leader_private_ip = trimspace(enos_remote_exec.get_leader_private_ip.stdout) + leader_public_ip = element([ + for idx in range(var.vault_instance_count) : var.vault_hosts[idx].public_ip if var.vault_hosts[idx].private_ip == local.leader_private_ip + ], 0) + private_ips = [for k, v in values(tomap(var.vault_hosts)) : tostring(v["private_ip"])] +} + +resource "enos_remote_exec" "get_leader_private_ip" { + environment = { + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/get-leader-private-ip.sh")] + + transport = { + ssh = { + host = var.vault_hosts[0].public_ip + } + } +} + +resource "enos_remote_exec" "get_follower_private_ips" { + environment = { + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_TOKEN = var.vault_root_token + VAULT_LEADER_PRIVATE_IP = local.leader_private_ip + VAULT_INSTANCE_PRIVATE_IPS = jsonencode(local.private_ips) + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/get-follower-private-ips.sh")] + + transport = { + ssh = { + host = var.vault_hosts[0].public_ip + } + } +} + +output "follower_hosts" { + value = local.follower_hosts +} + +output "follower_private_ips" { + value = local.follower_private_ips +} + +output "follower_public_ips" { + value = local.follower_public_ips +} + +output "leader_host" { + value = local.leader_host +} + +output "leader_private_ip" { + value = local.leader_private_ip +} + +output "leader_public_ip" { + value = local.leader_public_ip +} diff --git a/enos/modules/vault_get_cluster_ips/scripts/get-follower-private-ips.sh b/enos/modules/vault_get_cluster_ips/scripts/get-follower-private-ips.sh new file mode 100644 index 0000000..60553e7 --- /dev/null +++ b/enos/modules/vault_get_cluster_ips/scripts/get-follower-private-ips.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +set -e + +function fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_INSTANCE_PRIVATE_IPS" ]] && fail "VAULT_INSTANCE_PRIVATE_IPS env variable has not been set" +[[ -z "$VAULT_LEADER_PRIVATE_IP" ]] && fail "VAULT_LEADER_PRIVATE_IP env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "Unable to locate vault binary at $binpath" + +count=0 +retries=10 +while :; do + # Vault >= 1.10.x has the operator members. If we have that then we'll use it. + if $binpath operator -h 2>&1 | grep members &> /dev/null; then + # Get the folllowers that are part of our private ips. + if members=$($binpath operator members -format json); then + if followers=$(echo "$members" | jq --argjson expected "$VAULT_INSTANCE_PRIVATE_IPS" -c '.Nodes | map(select(any(.; .active_node==false)) | .api_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")) as $followers | $expected - ($expected - $followers)'); then + # Make sure that we got all the followers + if jq --argjson expected "$VAULT_INSTANCE_PRIVATE_IPS" --argjson followers "$followers" -ne '$expected | length as $el | $followers | length as $fl | $fl == $el-1' > /dev/null; then + echo "$followers" + exit 0 + fi + fi + fi + else + # We're using an old version of vault so we'll just return ips that don't match the leader. + # Get the public ip addresses of the followers + if followers=$(jq --arg ip "$VAULT_LEADER_PRIVATE_IP" -c '. | map(select(.!=$ip))' <<< "$VAULT_INSTANCE_PRIVATE_IPS"); then + if [[ -n "$followers" ]]; then + echo "$followers" + exit 0 + fi + fi + fi + + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + fail "Timed out trying to obtain the cluster followers" + fi +done diff --git a/enos/modules/vault_get_cluster_ips/scripts/get-leader-private-ip.sh b/enos/modules/vault_get_cluster_ips/scripts/get-leader-private-ip.sh new file mode 100644 index 0000000..bf8d27a --- /dev/null +++ b/enos/modules/vault_get_cluster_ips/scripts/get-leader-private-ip.sh @@ -0,0 +1,46 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +set -e + +function fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "Unable to locate vault binary at $binpath" + +count=0 +retries=5 +while :; do + # Find the leader private IP address + if ip=$($binpath read sys/leader -format=json | jq -r '.data.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")'); then + if [[ -n "$ip" ]]; then + echo "$ip" + exit 0 + fi + fi + + # Some older versions of vault don't support reading sys/leader. Try falling back to the cli status. + if ip=$($binpath status -format json | jq -r '.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")'); then + if [[ -n "$ip" ]]; then + echo "$ip" + exit 0 + fi + fi + + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + fail "Timed out trying to obtain the cluster leader" + fi +done diff --git a/enos/modules/vault_proxy/main.tf b/enos/modules/vault_proxy/main.tf new file mode 100644 index 0000000..71803b1 --- /dev/null +++ b/enos/modules/vault_proxy/main.tf @@ -0,0 +1,89 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + } + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +variable "vault_root_token" { + type = string + description = "The Vault root token" +} + +variable "vault_instances" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The Vault cluster instances that were created" +} + +variable "vault_instance_count" { + type = number + description = "How many vault instances are in the cluster" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_proxy_pidfile" { + type = string + description = "The filepath where the Vault Proxy pid file is kept" + default = "/tmp/pidfile" +} + +locals { + vault_instances = { + for idx in range(var.vault_instance_count) : idx => { + public_ip = values(var.vault_instances)[idx].public_ip + private_ip = values(var.vault_instances)[idx].private_ip + } + } + vault_proxy_address = "127.0.0.1:8100" +} + +resource "enos_remote_exec" "set_up_approle_auth_and_proxy" { + environment = { + VAULT_INSTALL_DIR = var.vault_install_dir + VAULT_TOKEN = var.vault_root_token + VAULT_PROXY_PIDFILE = var.vault_proxy_pidfile + VAULT_PROXY_ADDRESS = local.vault_proxy_address + } + + scripts = [abspath("${path.module}/scripts/set-up-approle-and-proxy.sh")] + + transport = { + ssh = { + host = local.vault_instances[0].public_ip + } + } +} + +resource "enos_remote_exec" "use_proxy" { + environment = { + VAULT_INSTALL_DIR = var.vault_install_dir + VAULT_PROXY_PIDFILE = var.vault_proxy_pidfile + VAULT_PROXY_ADDRESS = local.vault_proxy_address + } + + scripts = [abspath("${path.module}/scripts/use-proxy.sh")] + + transport = { + ssh = { + host = local.vault_instances[0].public_ip + } + } + + depends_on = [ + enos_remote_exec.set_up_approle_auth_and_proxy + ] +} diff --git a/enos/modules/vault_proxy/scripts/set-up-approle-and-proxy.sh b/enos/modules/vault_proxy/scripts/set-up-approle-and-proxy.sh new file mode 100644 index 0000000..2ec3ed1 --- /dev/null +++ b/enos/modules/vault_proxy/scripts/set-up-approle-and-proxy.sh @@ -0,0 +1,87 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +set -e + +binpath=${VAULT_INSTALL_DIR}/vault + +fail() { + echo "$1" 1>&2 + return 1 +} + +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_ADDR='http://127.0.0.1:8200' +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +# If approle was already enabled, disable it as we're about to re-enable it (the || true is so we don't fail if it doesn't already exist) +$binpath auth disable approle || true + +$binpath auth enable approle + +$binpath write auth/approle/role/proxy-role secret_id_ttl=700h token_num_uses=1000 token_ttl=600h token_max_ttl=700h secret_id_num_uses=1000 + +ROLEID=$($binpath read --format=json auth/approle/role/proxy-role/role-id | jq -r '.data.role_id') + +if [[ "$ROLEID" == '' ]]; then + fail "expected ROLEID to be nonempty, but it is empty" +fi + +SECRETID=$($binpath write -f --format=json auth/approle/role/proxy-role/secret-id | jq -r '.data.secret_id') + +if [[ "$SECRETID" == '' ]]; then + fail "expected SECRETID to be nonempty, but it is empty" +fi + +echo "$ROLEID" > /tmp/role-id +echo "$SECRETID" > /tmp/secret-id + +# Write the Vault Proxy's configuration to /tmp/vault-proxy.hcl +# The Proxy references the fixed Vault server address of http://127.0.0.1:8200 +# The Proxy itself listens at the address http://127.0.0.1:8100 +cat > /tmp/vault-proxy.hcl <<- EOM +pid_file = "${VAULT_PROXY_PIDFILE}" + +vault { + address = "http://127.0.0.1:8200" + tls_skip_verify = true + retry { + num_retries = 10 + } +} + +api_proxy { + enforce_consistency = "always" + use_auto_auth_token = true +} + +listener "tcp" { + address = "${VAULT_PROXY_ADDRESS}" + tls_disable = true +} + +auto_auth { + method { + type = "approle" + config = { + role_id_file_path = "/tmp/role-id" + secret_id_file_path = "/tmp/secret-id" + } + } + sink { + type = "file" + config = { + path = "/tmp/token" + } + } +} +EOM + +# If Proxy is still running from a previous run, kill it +pkill -F "${VAULT_PROXY_PIDFILE}" || true + +# Run proxy in the background +$binpath proxy -config=/tmp/vault-proxy.hcl > /tmp/proxy-logs.txt 2>&1 & diff --git a/enos/modules/vault_proxy/scripts/use-proxy.sh b/enos/modules/vault_proxy/scripts/use-proxy.sh new file mode 100644 index 0000000..0c14803 --- /dev/null +++ b/enos/modules/vault_proxy/scripts/use-proxy.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +set -e + +binpath=${VAULT_INSTALL_DIR}/vault + +fail() { + echo "$1" 1>&2 + return 1 +} + +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +# Will cause the Vault CLI to communicate with the Vault Proxy, since it +# is listening at port 8100. +export VAULT_ADDR="http://${VAULT_PROXY_ADDRESS}" + +# Explicitly unsetting VAULT_TOKEN to make sure that the Vault Proxy's token +# is used. +unset VAULT_TOKEN + +# Use the Vault CLI to communicate with the Vault Proxy (via the VAULT_ADDR env +# var) to lookup the details of the Proxy's token and make sure that the +# .data.path field contains 'auth/approle/login', thus confirming that the Proxy +# automatically authenticated itself. +$binpath token lookup -format=json | jq -r '.data.path' | grep -q 'auth/approle/login' + +# Now that we're done, kill the proxy +pkill -F "${VAULT_PROXY_PIDFILE}" || true diff --git a/enos/modules/vault_raft_remove_peer/main.tf b/enos/modules/vault_raft_remove_peer/main.tf new file mode 100644 index 0000000..622b922 --- /dev/null +++ b/enos/modules/vault_raft_remove_peer/main.tf @@ -0,0 +1,72 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +variable "vault_cluster_addr_port" { + description = "The Raft cluster address port" + type = string + default = "8201" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_instance_count" { + type = number + description = "How many vault instances are in the cluster" +} + +variable "operator_instance" { + type = string + description = "The ip address of the operator (Voter) node" +} + +variable "remove_vault_instances" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The old vault nodes to be removed" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +locals { + instances = { + for idx in range(var.vault_instance_count) : idx => { + public_ip = values(var.remove_vault_instances)[idx].public_ip + private_ip = values(var.remove_vault_instances)[idx].private_ip + } + } +} + +resource "enos_remote_exec" "vault_raft_remove_peer" { + for_each = local.instances + + environment = { + REMOVE_VAULT_CLUSTER_ADDR = "${each.value.private_ip}:${var.vault_cluster_addr_port}" + VAULT_TOKEN = var.vault_root_token + VAULT_ADDR = "http://localhost:8200" + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/raft-remove-peer.sh")] + + transport = { + ssh = { + host = var.operator_instance + } + } +} diff --git a/enos/modules/vault_raft_remove_peer/scripts/raft-remove-peer.sh b/enos/modules/vault_raft_remove_peer/scripts/raft-remove-peer.sh new file mode 100644 index 0000000..f0a6a5b --- /dev/null +++ b/enos/modules/vault_raft_remove_peer/scripts/raft-remove-peer.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +set -e + +binpath=${VAULT_INSTALL_DIR}/vault +node_addr=${REMOVE_VAULT_CLUSTER_ADDR} + +fail() { + echo "$1" 2>&1 + return 1 +} + +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +retry() { + local retries=$1 + shift + local count=0 + + until "$@"; do + exit=$? + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + echo "retry $count" + else + return "$exit" + fi + done + + return 0 +} + +remove_peer() { + if ! node_id=$("$binpath" operator raft list-peers -format json | jq -Mr --argjson expected "false" '.data.config.servers[] | select(.address=='\""$node_addr"\"') | select(.voter==$expected) | .node_id'); then + fail "failed to get node id of a non-voter node" + fi + + $binpath operator raft remove-peer "$node_id" +} + +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +# Retry a few times because it can take some time for things to settle after autopilot upgrade +retry 5 remove_peer diff --git a/enos/modules/vault_setup_perf_primary/main.tf b/enos/modules/vault_setup_perf_primary/main.tf new file mode 100644 index 0000000..20556d5 --- /dev/null +++ b/enos/modules/vault_setup_perf_primary/main.tf @@ -0,0 +1,52 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +variable "vault_cluster_addr_port" { + description = "The Raft cluster address port" + type = string + default = "8201" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "primary_leader_public_ip" { + type = string + description = "Vault primary cluster leader Public IP address" +} + +variable "primary_leader_private_ip" { + type = string + description = "Vault primary cluster leader Private IP address" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +resource "enos_remote_exec" "configure_pr_primary" { + environment = { + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/configure-vault-pr-primary.sh")] + + transport = { + ssh = { + host = var.primary_leader_public_ip + } + } +} diff --git a/enos/modules/vault_setup_perf_primary/scripts/configure-vault-pr-primary.sh b/enos/modules/vault_setup_perf_primary/scripts/configure-vault-pr-primary.sh new file mode 100644 index 0000000..1629ac7 --- /dev/null +++ b/enos/modules/vault_setup_perf_primary/scripts/configure-vault-pr-primary.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +set -e + +binpath=${VAULT_INSTALL_DIR}/vault + +fail() { + echo "$1" 1>&2 + return 1 +} + +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +# Create superuser policy +$binpath policy write superuser -< /dev/null + +echo "running test-ember-enos" +make test-ember-enos +popd > /dev/null diff --git a/enos/modules/vault_test_ui/variables.tf b/enos/modules/vault_test_ui/variables.tf new file mode 100644 index 0000000..c2db5c5 --- /dev/null +++ b/enos/modules/vault_test_ui/variables.tf @@ -0,0 +1,34 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +variable "vault_addr" { + description = "The host address for the vault instance to test" + type = string +} + +variable "vault_root_token" { + description = "The vault root token" + type = string +} + +variable "ui_test_filter" { + type = string + description = "A test filter to limit the ui tests to execute. Will be appended to the ember test command as '-f='" + default = null +} + +variable "vault_unseal_keys" { + description = "Base64 encoded recovery keys to use for the seal/unseal test" + type = list(string) +} + +variable "vault_recovery_threshold" { + description = "The number of recovery keys to require when unsealing Vault" + type = string +} + +variable "ui_run_tests" { + type = bool + description = "Whether to run the UI tests or not. If set to false a cluster will be created but no tests will be run" + default = true +} diff --git a/enos/modules/vault_unseal_nodes/main.tf b/enos/modules/vault_unseal_nodes/main.tf new file mode 100644 index 0000000..b8b86b3 --- /dev/null +++ b/enos/modules/vault_unseal_nodes/main.tf @@ -0,0 +1,127 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +# This module unseals the replication secondary follower nodes +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_instance_count" { + type = number + description = "How many vault instances are in the cluster" +} + +variable "follower_public_ips" { + type = list(string) + description = "Vault cluster follower Public IP addresses" +} + +variable "vault_seal_type" { + type = string + description = "The Vault seal type" +} + +variable "vault_unseal_keys" {} + +locals { + followers = toset([for idx in range(var.vault_instance_count - 1) : tostring(idx)]) + vault_bin_path = "${var.vault_install_dir}/vault" +} + +# After replication is enabled the secondary follower nodes are expected to be sealed, +# so we wait for the secondary follower nodes to update the seal status +resource "enos_remote_exec" "wait_until_sealed" { + for_each = { + for idx, follower in local.followers : idx => follower + } + environment = { + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/wait-until-sealed.sh")] + + transport = { + ssh = { + host = element(var.follower_public_ips, each.key) + } + } +} + +# The follower nodes on secondary replication cluster incorrectly report +# unseal progress 2/3 (Issue: https://hashicorp.atlassian.net/browse/VAULT-12309), +# so we restart the followers to clear the status and to autounseal incase of awskms seal type +resource "enos_remote_exec" "restart_followers" { + depends_on = [enos_remote_exec.wait_until_sealed] + for_each = { + for idx, follower in local.followers : idx => follower + } + + inline = ["sudo systemctl restart vault"] + + transport = { + ssh = { + host = element(var.follower_public_ips, each.key) + } + } +} + +# We cannot use the vault_unseal resouce due to the known issue +# (https://hashicorp.atlassian.net/browse/VAULT-12311). We use a custom +# script to allow retry for unsealing the secondary followers +resource "enos_remote_exec" "unseal_followers" { + depends_on = [enos_remote_exec.restart_followers] + # The unseal keys are required only for seal_type shamir + for_each = { + for idx, follower in local.followers : idx => follower + if var.vault_seal_type == "shamir" + } + + environment = { + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_INSTALL_DIR = var.vault_install_dir + UNSEAL_KEYS = join(",", var.vault_unseal_keys) + } + + scripts = [abspath("${path.module}/scripts/unseal-node.sh")] + + transport = { + ssh = { + host = element(var.follower_public_ips, each.key) + } + } +} + +# This is a second attempt needed to unseal the secondary followers +# using a custom script due to get past the known issue +# (https://hashicorp.atlassian.net/browse/VAULT-12311) +resource "enos_remote_exec" "unseal_followers_again" { + depends_on = [enos_remote_exec.unseal_followers] + for_each = { + for idx, follower in local.followers : idx => follower + if var.vault_seal_type == "shamir" + } + + environment = { + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_INSTALL_DIR = var.vault_install_dir + UNSEAL_KEYS = join(",", var.vault_unseal_keys) + } + + scripts = [abspath("${path.module}/scripts/unseal-node.sh")] + + transport = { + ssh = { + host = element(var.follower_public_ips, each.key) + } + } +} diff --git a/enos/modules/vault_unseal_nodes/scripts/unseal-node.sh b/enos/modules/vault_unseal_nodes/scripts/unseal-node.sh new file mode 100755 index 0000000..3e78138 --- /dev/null +++ b/enos/modules/vault_unseal_nodes/scripts/unseal-node.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +binpath=${VAULT_INSTALL_DIR}/vault + +IFS="," read -r -a keys <<< "${UNSEAL_KEYS}" + +function fail() { + echo "$1" 1>&2 + exit 1 +} + +count=0 +retries=5 +while :; do + for key in "${keys[@]}"; do + + # Check the Vault seal status + seal_status=$($binpath status -format json | jq '.sealed') + + if [[ "$seal_status" == "true" ]]; then + echo "running unseal with $key count $count with retry $retries" >> /tmp/unseal_script.out + "$binpath" operator unseal "$key" > /dev/null 2>&1 + else + exit 0 + fi + done + + wait=$((1 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + fail "failed to unseal node" + fi +done diff --git a/enos/modules/vault_unseal_nodes/scripts/wait-until-sealed.sh b/enos/modules/vault_unseal_nodes/scripts/wait-until-sealed.sh new file mode 100644 index 0000000..c612be2 --- /dev/null +++ b/enos/modules/vault_unseal_nodes/scripts/wait-until-sealed.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +binpath=${VAULT_INSTALL_DIR}/vault + +function fail() { + echo "$1" 1>&2 + exit 1 +} + +count=0 +retries=5 +while :; do + # Check the Vault seal status + seal_status=$($binpath status -format json | jq '.sealed') + + if [[ "$seal_status" == "true" ]]; then + exit 0 + fi + + wait=$((3 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + fail "Expected node to be sealed" + fi +done diff --git a/enos/modules/vault_upgrade/main.tf b/enos/modules/vault_upgrade/main.tf new file mode 100644 index 0000000..819f6f6 --- /dev/null +++ b/enos/modules/vault_upgrade/main.tf @@ -0,0 +1,183 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + } + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +variable "vault_api_addr" { + type = string + description = "The API address of the Vault cluster" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_instance_count" { + type = number + description = "How many vault instances are in the cluster" +} + +variable "vault_instances" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The vault cluster instances that were created" +} + +variable "vault_local_artifact_path" { + type = string + description = "The path to a locally built vault artifact to install" + default = null +} + +variable "vault_artifactory_release" { + type = object({ + username = string + token = string + url = string + sha256 = string + }) + description = "Vault release version and edition to install from artifactory.hashicorp.engineering" + default = null +} + +variable "vault_seal_type" { + type = string + description = "The Vault seal type" +} + +variable "vault_unseal_keys" { + type = list(string) + description = "The keys to use to unseal Vault when not using auto-unseal" + default = null +} + +locals { + instances = { + for idx in range(var.vault_instance_count) : idx => { + public_ip = values(var.vault_instances)[idx].public_ip + private_ip = values(var.vault_instances)[idx].private_ip + } + } + followers = toset([for idx in range(var.vault_instance_count - 1) : tostring(idx)]) + follower_ips = compact(split(" ", enos_remote_exec.get_follower_public_ips.stdout)) + vault_bin_path = "${var.vault_install_dir}/vault" +} + +resource "enos_bundle_install" "upgrade_vault_binary" { + for_each = local.instances + + destination = var.vault_install_dir + artifactory = var.vault_artifactory_release + path = var.vault_local_artifact_path + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +resource "enos_remote_exec" "get_leader_public_ip" { + depends_on = [enos_bundle_install.upgrade_vault_binary] + + scripts = [abspath("${path.module}/scripts/get-leader-public-ip.sh")] + + environment = { + VAULT_INSTALL_DIR = var.vault_install_dir, + VAULT_INSTANCES = jsonencode(local.instances) + } + + transport = { + ssh = { + host = local.instances[0].public_ip + } + } +} + +resource "enos_remote_exec" "get_follower_public_ips" { + depends_on = [enos_bundle_install.upgrade_vault_binary] + + environment = { + VAULT_INSTALL_DIR = var.vault_install_dir, + VAULT_INSTANCES = jsonencode(local.instances) + } + + scripts = [abspath("${path.module}/scripts/get-follower-public-ips.sh")] + + transport = { + ssh = { + host = local.instances[0].public_ip + } + } +} + +resource "enos_remote_exec" "restart_followers" { + for_each = local.followers + depends_on = [enos_remote_exec.get_follower_public_ips] + + scripts = [abspath("${path.module}/scripts/restart-vault.sh")] + + transport = { + ssh = { + host = trimspace(local.follower_ips[tonumber(each.key)]) + } + } +} + +resource "enos_vault_unseal" "followers" { + depends_on = [enos_remote_exec.restart_followers] + for_each = { + for idx, follower in local.followers : idx => follower + if var.vault_seal_type == "shamir" + } + bin_path = local.vault_bin_path + vault_addr = var.vault_api_addr + seal_type = var.vault_seal_type + unseal_keys = var.vault_unseal_keys + + transport = { + ssh = { + host = trimspace(local.follower_ips[each.key]) + } + } +} + +resource "enos_remote_exec" "restart_leader" { + depends_on = [enos_vault_unseal.followers] + + scripts = [abspath("${path.module}/scripts/restart-vault.sh")] + + transport = { + ssh = { + host = trimspace(enos_remote_exec.get_leader_public_ip.stdout) + } + } +} + +resource "enos_vault_unseal" "leader" { + count = var.vault_seal_type == "shamir" ? 1 : 0 + depends_on = [enos_remote_exec.restart_leader] + + bin_path = local.vault_bin_path + vault_addr = var.vault_api_addr + seal_type = var.vault_seal_type + unseal_keys = var.vault_unseal_keys + + transport = { + ssh = { + host = trimspace(enos_remote_exec.get_leader_public_ip.stdout) + } + } +} diff --git a/enos/modules/vault_upgrade/scripts/get-follower-public-ips.sh b/enos/modules/vault_upgrade/scripts/get-follower-public-ips.sh new file mode 100644 index 0000000..6c4c508 --- /dev/null +++ b/enos/modules/vault_upgrade/scripts/get-follower-public-ips.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +set -e + +binpath=${VAULT_INSTALL_DIR}/vault +export VAULT_ADDR="http://localhost:8200" + +instances=${VAULT_INSTANCES} + +# Find the leader +leader_address=$($binpath status -format json | jq '.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")') + +# Get the public ip addresses of the followers +follower_ips=$(jq ".[] | select(.private_ip!=$leader_address) | .public_ip" <<< "$instances") + +echo "$follower_ips" | sed 's/\"//g' | tr '\n' ' ' diff --git a/enos/modules/vault_upgrade/scripts/get-leader-public-ip.sh b/enos/modules/vault_upgrade/scripts/get-leader-public-ip.sh new file mode 100644 index 0000000..1e8f367 --- /dev/null +++ b/enos/modules/vault_upgrade/scripts/get-leader-public-ip.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +set -e + +binpath=${VAULT_INSTALL_DIR}/vault +export VAULT_ADDR="http://localhost:8200" + +instances=${VAULT_INSTANCES} + +# Find the leader +leader_address=$($binpath status -format json | jq '.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")') + +# Get the public ip address of the leader +leader_public=$(jq ".[] | select(.private_ip==$leader_address) | .public_ip" <<< "$instances") +#shellcheck disable=SC2001 +echo "$leader_public" | sed 's/\"//g' diff --git a/enos/modules/vault_upgrade/scripts/restart-vault.sh b/enos/modules/vault_upgrade/scripts/restart-vault.sh new file mode 100644 index 0000000..fc6b007 --- /dev/null +++ b/enos/modules/vault_upgrade/scripts/restart-vault.sh @@ -0,0 +1,8 @@ +#!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +set -eux + +sudo systemctl restart vault diff --git a/enos/modules/vault_verify_agent_output/main.tf b/enos/modules/vault_verify_agent_output/main.tf new file mode 100644 index 0000000..7e105b3 --- /dev/null +++ b/enos/modules/vault_verify_agent_output/main.tf @@ -0,0 +1,58 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +variable "vault_agent_template_destination" { + type = string + description = "The destination of the template rendered by Agent" +} + +variable "vault_agent_expected_output" { + type = string + description = "The output that's expected in the rendered template at vault_agent_template_destination" +} + +variable "vault_instance_count" { + type = number + description = "How many vault instances are in the cluster" +} + +variable "vault_instances" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The vault cluster instances that were created" +} + +locals { + vault_instances = { + for idx in range(var.vault_instance_count) : idx => { + public_ip = values(var.vault_instances)[idx].public_ip + private_ip = values(var.vault_instances)[idx].private_ip + } + } +} + +resource "enos_remote_exec" "verify_vault_agent_output" { + environment = { + VAULT_AGENT_TEMPLATE_DESTINATION = var.vault_agent_template_destination + VAULT_AGENT_EXPECTED_OUTPUT = var.vault_agent_expected_output + VAULT_INSTANCES = jsonencode(local.vault_instances) + } + + scripts = [abspath("${path.module}/scripts/verify-vault-agent-output.sh")] + + transport = { + ssh = { + host = local.vault_instances[0].public_ip + } + } +} diff --git a/enos/modules/vault_verify_agent_output/scripts/verify-vault-agent-output.sh b/enos/modules/vault_verify_agent_output/scripts/verify-vault-agent-output.sh new file mode 100644 index 0000000..8d7c7b3 --- /dev/null +++ b/enos/modules/vault_verify_agent_output/scripts/verify-vault-agent-output.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +set -e + +fail() { + echo "$1" 1>&2 + return 1 +} + +actual_output=$(cat "${VAULT_AGENT_TEMPLATE_DESTINATION}") +if [[ "$actual_output" != "${VAULT_AGENT_EXPECTED_OUTPUT}" ]]; then + fail "expected '${VAULT_AGENT_EXPECTED_OUTPUT}' to be the Agent output, but got: '$actual_output'" +fi diff --git a/enos/modules/vault_verify_autopilot/main.tf b/enos/modules/vault_verify_autopilot/main.tf new file mode 100644 index 0000000..60ccf34 --- /dev/null +++ b/enos/modules/vault_verify_autopilot/main.tf @@ -0,0 +1,71 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_instance_count" { + type = number + description = "How many vault instances are in the cluster" +} + +variable "vault_instances" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The vault cluster instances that were created" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +variable "vault_autopilot_upgrade_version" { + type = string + description = "The Vault upgraded version" +} + +variable "vault_autopilot_upgrade_status" { + type = string + description = "The autopilot upgrade expected status" +} + +locals { + public_ips = { + for idx in range(var.vault_instance_count) : idx => { + public_ip = values(var.vault_instances)[idx].public_ip + private_ip = values(var.vault_instances)[idx].private_ip + } + } +} + +resource "enos_remote_exec" "smoke-verify-autopilot" { + for_each = local.public_ips + + environment = { + VAULT_INSTALL_DIR = var.vault_install_dir, + VAULT_TOKEN = var.vault_root_token, + VAULT_AUTOPILOT_UPGRADE_STATUS = var.vault_autopilot_upgrade_status, + VAULT_AUTOPILOT_UPGRADE_VERSION = var.vault_autopilot_upgrade_version, + } + + scripts = [abspath("${path.module}/scripts/smoke-verify-autopilot.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/vault_verify_autopilot/scripts/smoke-verify-autopilot.sh b/enos/modules/vault_verify_autopilot/scripts/smoke-verify-autopilot.sh new file mode 100755 index 0000000..9570c27 --- /dev/null +++ b/enos/modules/vault_verify_autopilot/scripts/smoke-verify-autopilot.sh @@ -0,0 +1,43 @@ +#!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +fail() { + echo "$1" 1>&2 + exit 1 +} + +export VAULT_ADDR="http://localhost:8200" + +[[ -z "$VAULT_AUTOPILOT_UPGRADE_STATUS" ]] && fail "VAULT_AUTOPILOT_UPGRADE_STATUS env variable has not been set" +[[ -z "$VAULT_AUTOPILOT_UPGRADE_VERSION" ]] && fail "VAULT_AUTOPILOT_UPGRADE_VERSION env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +count=0 +retries=8 +while :; do + state=$($binpath read -format=json sys/storage/raft/autopilot/state) + status="$(jq -r '.data.upgrade_info.status' <<< "$state")" + target_version="$(jq -r '.data.upgrade_info.target_version' <<< "$state")" + + if [ "$status" = "$VAULT_AUTOPILOT_UPGRADE_STATUS" ] && [ "$target_version" = "$VAULT_AUTOPILOT_UPGRADE_VERSION" ]; then + exit 0 + fi + + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + echo "Expected autopilot status to be $VAULT_AUTOPILOT_UPGRADE_STATUS, got $status" + echo "Expected autopilot target_version to be $VAULT_AUTOPILOT_UPGRADE_VERSION, got $target_version" + sleep "$wait" + else + echo "$state" + echo "Expected autopilot status to be $VAULT_AUTOPILOT_UPGRADE_STATUS, got $status" + echo "Expected autopilot target_version to be $VAULT_AUTOPILOT_UPGRADE_VERSION, got $target_version" + fail "Autopilot did not get into the correct status" + fi +done diff --git a/enos/modules/vault_verify_performance_replication/main.tf b/enos/modules/vault_verify_performance_replication/main.tf new file mode 100644 index 0000000..6604f8c --- /dev/null +++ b/enos/modules/vault_verify_performance_replication/main.tf @@ -0,0 +1,106 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +variable "vault_cluster_addr_port" { + description = "The Raft cluster address port" + type = string + default = "8201" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "primary_leader_public_ip" { + type = string + description = "Vault primary cluster leader Public IP address" +} + +variable "primary_leader_private_ip" { + type = string + description = "Vault primary cluster leader Private IP address" +} + +variable "secondary_leader_public_ip" { + type = string + description = "Vault secondary cluster leader Public IP address" +} + +variable "secondary_leader_private_ip" { + type = string + description = "Vault secondary cluster leader Private IP address" +} + +variable "wrapping_token" { + type = string + description = "The wrapping token created on primary cluster" + default = null +} + +locals { + primary_replication_status = jsondecode(enos_remote_exec.verify_replication_status_on_primary.stdout) + secondary_replication_status = jsondecode(enos_remote_exec.verify_replication_status_on_secondary.stdout) +} + +resource "enos_remote_exec" "verify_replication_status_on_primary" { + environment = { + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_INSTALL_DIR = var.vault_install_dir + PRIMARY_LEADER_PRIV_IP = var.primary_leader_private_ip + SECONDARY_LEADER_PRIV_IP = var.secondary_leader_private_ip + } + + scripts = [abspath("${path.module}/scripts/verify-replication-status.sh")] + + transport = { + ssh = { + host = var.primary_leader_public_ip + } + } +} + +resource "enos_remote_exec" "verify_replication_status_on_secondary" { + environment = { + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_INSTALL_DIR = var.vault_install_dir + PRIMARY_LEADER_PRIV_IP = var.primary_leader_private_ip + SECONDARY_LEADER_PRIV_IP = var.secondary_leader_private_ip + } + + scripts = [abspath("${path.module}/scripts/verify-replication-status.sh")] + + transport = { + ssh = { + host = var.secondary_leader_public_ip + } + } +} + +output "primary_replication_status" { + value = local.primary_replication_status +} + +output "known_primary_cluster_addrs" { + value = local.secondary_replication_status.data.known_primary_cluster_addrs +} + +output "secondary_replication_status" { + value = local.secondary_replication_status +} + +output "primary_replication_data_secondaries" { + value = local.primary_replication_status.data.secondaries +} + +output "secondary_replication_data_primaries" { + value = local.secondary_replication_status.data.primaries +} diff --git a/enos/modules/vault_verify_performance_replication/scripts/verify-replication-status.sh b/enos/modules/vault_verify_performance_replication/scripts/verify-replication-status.sh new file mode 100644 index 0000000..138678e --- /dev/null +++ b/enos/modules/vault_verify_performance_replication/scripts/verify-replication-status.sh @@ -0,0 +1,85 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +# This script waits for the replication status to be established +# then verifies the performance replication between primary and +# secondary clusters + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$PRIMARY_LEADER_PRIV_IP" ]] && fail "PRIMARY_LEADER_PRIV_IP env variable has not been set" +[[ -z "$SECONDARY_LEADER_PRIV_IP" ]] && fail "SECONDARY_LEADER_PRIV_IP env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +retry() { + local retries=$1 + shift + local count=0 + + until "$@"; do + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + fail "$($binpath read -format=json sys/replication/performance/status)" + fi + done +} + +check_pr_status() { + pr_status=$($binpath read -format=json sys/replication/performance/status) + cluster_state=$(echo "$pr_status" | jq -r '.data.state') + connection_mode=$(echo "$pr_status" | jq -r '.data.mode') + + if [[ "$cluster_state" == 'idle' ]]; then + echo "replication cluster state is idle" 1>&2 + return 1 + fi + + if [[ "$connection_mode" == "primary" ]]; then + connection_status=$(echo "$pr_status" | jq -r '.data.secondaries[0].connection_status') + if [[ "$connection_status" == 'disconnected' ]]; then + echo ".data.secondaries[0].connection_status from primary node is 'disconnected'" 1>&2 + return 1 + fi + secondary_cluster_addr=$(echo "$pr_status" | jq -r '.data.secondaries[0].cluster_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")') + if [[ "$secondary_cluster_addr" != "$SECONDARY_LEADER_PRIV_IP" ]]; then + echo ".data.secondaries[0].cluster_address should have an IP address of $SECONDARY_LEADER_PRIV_IP, got: $secondary_cluster_addr" 1>&2 + return 1 + fi + else + connection_status=$(echo "$pr_status" | jq -r '.data.primaries[0].connection_status') + if [[ "$connection_status" == 'disconnected' ]]; then + echo ".data.primaries[0].connection_status from secondary node is 'disconnected'" 1>&2 + return 1 + fi + primary_cluster_addr=$(echo "$pr_status" | jq -r '.data.primaries[0].cluster_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")') + if [[ "$primary_cluster_addr" != "$PRIMARY_LEADER_PRIV_IP" ]]; then + echo ".data.primaries[0].cluster_address should have an IP address of $PRIMARY_LEADER_PRIV_IP, got: $primary_cluster_addr" 1>&2 + return 1 + fi + known_primary_cluster_addrs=$(echo "$pr_status" | jq -r '.data.known_primary_cluster_addrs') + if ! echo "$known_primary_cluster_addrs" | grep -q "$PRIMARY_LEADER_PRIV_IP"; then + echo "$PRIMARY_LEADER_PRIV_IP is not in .data.known_primary_cluster_addrs: $known_primary_cluster_addrs" 1>&2 + return 1 + fi + fi + + echo "$pr_status" + return 0 +} + +# Retry for a while because it can take some time for replication to sync +retry 10 check_pr_status diff --git a/enos/modules/vault_verify_raft_auto_join_voter/main.tf b/enos/modules/vault_verify_raft_auto_join_voter/main.tf new file mode 100644 index 0000000..50d3989 --- /dev/null +++ b/enos/modules/vault_verify_raft_auto_join_voter/main.tf @@ -0,0 +1,67 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +variable "vault_cluster_addr_port" { + description = "The Raft cluster address port" + type = string + default = "8201" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_instance_count" { + type = number + description = "How many vault instances are in the cluster" +} + +variable "vault_instances" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The vault cluster instances that were created" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +locals { + instances = { + for idx in range(var.vault_instance_count) : idx => { + public_ip = values(var.vault_instances)[idx].public_ip + private_ip = values(var.vault_instances)[idx].private_ip + } + } +} + +resource "enos_remote_exec" "verify_raft_auto_join_voter" { + for_each = local.instances + + environment = { + VAULT_CLUSTER_ADDR = "${each.value.private_ip}:${var.vault_cluster_addr_port}" + VAULT_INSTALL_DIR = var.vault_install_dir + VAULT_LOCAL_BINARY_PATH = "${var.vault_install_dir}/vault" + VAULT_TOKEN = var.vault_root_token + } + + scripts = [abspath("${path.module}/scripts/verify-raft-auto-join-voter.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/vault_verify_raft_auto_join_voter/scripts/verify-raft-auto-join-voter.sh b/enos/modules/vault_verify_raft_auto_join_voter/scripts/verify-raft-auto-join-voter.sh new file mode 100644 index 0000000..6e5ba40 --- /dev/null +++ b/enos/modules/vault_verify_raft_auto_join_voter/scripts/verify-raft-auto-join-voter.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +set -e + +binpath=${VAULT_INSTALL_DIR}/vault + +fail() { + echo "$1" 2>&1 + return 1 +} + +retry() { + local retries=$1 + shift + local count=0 + + until "$@"; do + exit=$? + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + echo "retry $count" + else + return "$exit" + fi + done + + return 0 +} + +check_voter_status() { + voter_status=$($binpath operator raft list-peers -format json | jq -Mr --argjson expected "true" --arg ADDR "$VAULT_CLUSTER_ADDR" '.data.config.servers[] | select(.address==$ADDR) | .voter == $expected') + + if [[ "$voter_status" != 'true' ]]; then + fail "expected $VAULT_CLUSTER_ADDR to be raft voter, got raft status for node: $($binpath operator raft list-peers -format json | jq -Mr --arg ADDR "$VAULT_CLUSTER_ADDR" '.data.config.servers[] | select(.address==$ADDR)')" + fi +} + +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_ADDR='http://127.0.0.1:8200' +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +# Retry a few times because it can take some time for things to settle after +# all the nodes are unsealed +retry 5 check_voter_status diff --git a/enos/modules/vault_verify_read_data/main.tf b/enos/modules/vault_verify_read_data/main.tf new file mode 100644 index 0000000..8a4d7ea --- /dev/null +++ b/enos/modules/vault_verify_read_data/main.tf @@ -0,0 +1,48 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_instance_count" { + type = number + description = "How many vault instances are in the cluster" +} + +variable "node_public_ips" { + type = list(string) + description = "Vault cluster node Public IP address" +} + +locals { + followers = toset([for idx in range(var.vault_instance_count - 1) : tostring(idx)]) + vault_bin_path = "${var.vault_install_dir}/vault" +} + +resource "enos_remote_exec" "verify_kv_on_node" { + for_each = { + for idx, follower in local.followers : idx => follower + } + environment = { + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/verify-data.sh")] + + transport = { + ssh = { + host = element(var.node_public_ips, each.key) + } + } +} diff --git a/enos/modules/vault_verify_read_data/scripts/verify-data.sh b/enos/modules/vault_verify_read_data/scripts/verify-data.sh new file mode 100644 index 0000000..7af6be3 --- /dev/null +++ b/enos/modules/vault_verify_read_data/scripts/verify-data.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +set -e + +function retry { + local retries=$1 + shift + local count=0 + + until "$@"; do + exit=$? + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + return "$exit" + fi + done + + return 0 +} + +fail() { + echo "$1" 1>&2 + return 1 +} + +binpath="${VAULT_INSTALL_DIR}/vault" + +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +# To keep the authentication method and module verification consistent between all +# Enos scenarios we authenticate using testuser created by vault_verify_write_data module +retry 5 "$binpath" login -method=userpass username=testuser password=passuser1 +retry 5 "$binpath" kv get secret/test diff --git a/enos/modules/vault_verify_replication/main.tf b/enos/modules/vault_verify_replication/main.tf new file mode 100644 index 0000000..3bfb31b --- /dev/null +++ b/enos/modules/vault_verify_replication/main.tf @@ -0,0 +1,36 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +locals { + instances = { + for idx in range(var.vault_instance_count) : idx => { + public_ip = values(var.vault_instances)[idx].public_ip + private_ip = values(var.vault_instances)[idx].private_ip + } + } +} + +resource "enos_remote_exec" "smoke-verify-replication" { + for_each = local.instances + + environment = { + VAULT_EDITION = var.vault_edition + } + + scripts = [abspath("${path.module}/scripts/smoke-verify-replication.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/vault_verify_replication/scripts/smoke-verify-replication.sh b/enos/modules/vault_verify_replication/scripts/smoke-verify-replication.sh new file mode 100644 index 0000000..dfe2bef --- /dev/null +++ b/enos/modules/vault_verify_replication/scripts/smoke-verify-replication.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +# The Vault replication smoke test, documented in +# https://docs.google.com/document/d/16sjIk3hzFDPyY5A9ncxTZV_9gnpYSF1_Vx6UA1iiwgI/edit#heading=h.kgrxf0f1et25 + +set -e + +function fail() { + echo "$1" 1>&2 + exit 1 +} + +# Replication status endpoint should have data.mode disabled for CE release +status=$(curl -s http://localhost:8200/v1/sys/replication/status) +if [ "$VAULT_EDITION" == "ce" ]; then + if [ "$(jq -r '.data.mode' <<< "$status")" != "disabled" ]; then + fail "replication data mode is not disabled for CE release!" + fi +else + if [ "$(jq -r '.data.dr' <<< "$status")" == "" ]; then + fail "DR replication should be available for an ENT release!" + fi + if [ "$(jq -r '.data.performance' <<< "$status")" == "" ]; then + fail "Performance replication should be available for an ENT release!" + fi +fi diff --git a/enos/modules/vault_verify_replication/variables.tf b/enos/modules/vault_verify_replication/variables.tf new file mode 100644 index 0000000..26ac75c --- /dev/null +++ b/enos/modules/vault_verify_replication/variables.tf @@ -0,0 +1,27 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +variable "vault_edition" { + type = string + description = "The vault product edition" + default = null +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_instance_count" { + type = number + description = "How many vault instances are in the cluster" +} + +variable "vault_instances" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The vault cluster instances that were created" +} diff --git a/enos/modules/vault_verify_ui/main.tf b/enos/modules/vault_verify_ui/main.tf new file mode 100644 index 0000000..a37b7cd --- /dev/null +++ b/enos/modules/vault_verify_ui/main.tf @@ -0,0 +1,36 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +locals { + instances = { + for idx in range(var.vault_instance_count) : idx => { + public_ip = values(var.vault_instances)[idx].public_ip + private_ip = values(var.vault_instances)[idx].private_ip + } + } +} + +resource "enos_remote_exec" "smoke-verify-ui" { + for_each = local.instances + + environment = { + VAULT_ADDR = var.vault_addr, + } + + scripts = [abspath("${path.module}/scripts/smoke-verify-ui.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/vault_verify_ui/scripts/smoke-verify-ui.sh b/enos/modules/vault_verify_ui/scripts/smoke-verify-ui.sh new file mode 100644 index 0000000..9642238 --- /dev/null +++ b/enos/modules/vault_verify_ui/scripts/smoke-verify-ui.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +url_effective=$(curl -w "%{url_effective}\n" -I -L -s -S "${VAULT_ADDR}" -o /dev/null) +expected="${VAULT_ADDR}/ui/" +if [ "${url_effective}" != "${expected}" ]; then + fail "Expecting Vault to redirect to UI.\nExpected: ${expected}\nGot: ${url_effective}" +fi + +if curl -s "${VAULT_ADDR}/ui/" | grep -q 'Vault UI is not available'; then + fail "Vault UI is not available" +fi diff --git a/enos/modules/vault_verify_ui/variables.tf b/enos/modules/vault_verify_ui/variables.tf new file mode 100644 index 0000000..f22fdf2 --- /dev/null +++ b/enos/modules/vault_verify_ui/variables.tf @@ -0,0 +1,21 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +variable "vault_addr" { + type = string + description = "The vault cluster address" + default = "http://localhost:8200" +} + +variable "vault_instance_count" { + type = number + description = "How many vault instances are in the cluster" +} + +variable "vault_instances" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The vault cluster instances that were created" +} diff --git a/enos/modules/vault_verify_undo_logs/main.tf b/enos/modules/vault_verify_undo_logs/main.tf new file mode 100644 index 0000000..4bd0349 --- /dev/null +++ b/enos/modules/vault_verify_undo_logs/main.tf @@ -0,0 +1,60 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_instance_count" { + type = number + description = "How many vault instances are in the cluster" +} + +variable "vault_instances" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The vault cluster instances that were created" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +locals { + public_ips = { + for idx in range(var.vault_instance_count) : idx => { + public_ip = values(var.vault_instances)[idx].public_ip + private_ip = values(var.vault_instances)[idx].private_ip + } + } +} + +resource "enos_remote_exec" "smoke-verify-undo-logs" { + for_each = local.public_ips + + environment = { + VAULT_ADDR = "http://localhost:8200" + VAULT_INSTALL_DIR = var.vault_install_dir + VAULT_TOKEN = var.vault_root_token + } + + scripts = [abspath("${path.module}/scripts/smoke-verify-undo-logs.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/vault_verify_undo_logs/scripts/smoke-verify-undo-logs.sh b/enos/modules/vault_verify_undo_logs/scripts/smoke-verify-undo-logs.sh new file mode 100644 index 0000000..16c0b6f --- /dev/null +++ b/enos/modules/vault_verify_undo_logs/scripts/smoke-verify-undo-logs.sh @@ -0,0 +1,36 @@ +#!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +function fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +count=0 +retries=5 +while :; do + state=$($binpath read sys/metrics -format=json | jq -r '.data.Gauges[] | select(.Name == "vault.core.replication.write_undo_logs")') + target_undo_logs_status="$(jq -r '.Value' <<< "$state")" + + if [ "$target_undo_logs_status" == "1" ]; then + exit 0 + fi + + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + echo "Waiting for vault.core.replication.write_undo_logs to have Value:1" + echo "$state" + sleep "$wait" + else + fail "Timed out waiting for vault.core.replication.write_undo_logs to have Value:1" + fi +done diff --git a/enos/modules/vault_verify_unsealed/main.tf b/enos/modules/vault_verify_unsealed/main.tf new file mode 100644 index 0000000..4e4362f --- /dev/null +++ b/enos/modules/vault_verify_unsealed/main.tf @@ -0,0 +1,60 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +variable "vault_cluster_addr_port" { + description = "The Raft cluster address port" + type = string + default = "8201" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_instance_count" { + type = number + description = "How many vault instances are in the cluster" +} + +variable "vault_instances" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The vault cluster instances that were created" +} + +locals { + instances = { + for idx in range(var.vault_instance_count) : idx => { + public_ip = values(var.vault_instances)[idx].public_ip + private_ip = values(var.vault_instances)[idx].private_ip + } + } +} + +resource "enos_remote_exec" "verify_node_unsealed" { + for_each = local.instances + + scripts = [abspath("${path.module}/scripts/verify-vault-node-unsealed.sh")] + + environment = { + VAULT_CLUSTER_ADDR = "${each.value.private_ip}:${var.vault_cluster_addr_port}" + VAULT_INSTALL_DIR = var.vault_install_dir + } + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/vault_verify_unsealed/scripts/verify-vault-node-unsealed.sh b/enos/modules/vault_verify_unsealed/scripts/verify-vault-node-unsealed.sh new file mode 100644 index 0000000..39dc20d --- /dev/null +++ b/enos/modules/vault_verify_unsealed/scripts/verify-vault-node-unsealed.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +set -e + +binpath=${VAULT_INSTALL_DIR}/vault + +fail() { + echo "$1" 1>&2 + exit 1 +} + +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_ADDR=http://localhost:8200 + +count=0 +retries=4 +while :; do + health_status=$(curl -s "${VAULT_CLUSTER_ADDR}/v1/sys/health" |jq '.') + unseal_status=$($binpath status -format json | jq -Mr --argjson expected "false" '.sealed == $expected') + if [[ "$unseal_status" == 'true' ]]; then + echo "$health_status" + exit 0 + fi + + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + fail "expected ${VAULT_CLUSTER_ADDR} to be unsealed, got unseal status: $unseal_status" + fi +done diff --git a/enos/modules/vault_verify_version/main.tf b/enos/modules/vault_verify_version/main.tf new file mode 100644 index 0000000..cefebfe --- /dev/null +++ b/enos/modules/vault_verify_version/main.tf @@ -0,0 +1,88 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +variable "vault_build_date" { + type = string + description = "The Vault artifact build date" + default = null +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_instance_count" { + type = number + description = "How many Vault instances are in the cluster" +} + +variable "vault_instances" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The Vault cluster instances that were created" +} + +variable "vault_product_version" { + type = string + description = "The Vault product version" + default = null +} + +variable "vault_edition" { + type = string + description = "The Vault product edition" + default = null +} + +variable "vault_revision" { + type = string + description = "The Vault product revision" + default = null +} + +variable "vault_root_token" { + type = string + description = "The Vault root token" + default = null +} + +locals { + instances = { + for idx in range(var.vault_instance_count) : idx => { + public_ip = values(var.vault_instances)[idx].public_ip + private_ip = values(var.vault_instances)[idx].private_ip + } + } +} + +resource "enos_remote_exec" "verify_all_nodes_have_updated_version" { + for_each = local.instances + + environment = { + VAULT_INSTALL_DIR = var.vault_install_dir, + VAULT_BUILD_DATE = var.vault_build_date, + VAULT_VERSION = var.vault_product_version, + VAULT_EDITION = var.vault_edition, + VAULT_REVISION = var.vault_revision, + VAULT_TOKEN = var.vault_root_token, + } + + scripts = [abspath("${path.module}/scripts/verify-cluster-version.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/vault_verify_version/scripts/verify-cluster-version.sh b/enos/modules/vault_verify_version/scripts/verify-cluster-version.sh new file mode 100644 index 0000000..97edd67 --- /dev/null +++ b/enos/modules/vault_verify_version/scripts/verify-cluster-version.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +# Verify the Vault "version" includes the correct base version, build date, +# revision SHA, and edition metadata. +set -e + +binpath=${VAULT_INSTALL_DIR}/vault +edition=${VAULT_EDITION} +version=${VAULT_VERSION} +sha=${VAULT_REVISION} +build_date=${VAULT_BUILD_DATE} +# VAULT_TOKEN must also be set + +fail() { + echo "$1" 1>&2 + exit 1 +} + +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +export VAULT_ADDR='http://127.0.0.1:8200' +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +version_expected="Vault v$version ($sha), built $build_date" + +case "$edition" in + *ce) ;; + *ent) ;; + *ent.hsm) version_expected="$version_expected (cgo)";; + *ent.fips1402) version_expected="$version_expected (cgo)" ;; + *ent.hsm.fips1402) version_expected="$version_expected (cgo)" ;; + *) fail "Unknown Vault edition: ($edition)" ;; +esac + +version_expected_nosha=$(echo "$version_expected" | awk '!($3="")' | sed 's/ / /' | sed -e 's/[[:space:]]*$//') +version_output=$("$binpath" version) + +if [[ "$version_output" == "$version_expected_nosha" ]] || [[ "$version_output" == "$version_expected" ]]; then + echo "Version verification succeeded!" +else + fail "expected Version=$version_expected or $version_expected_nosha, got: $version_output" +fi diff --git a/enos/modules/vault_verify_write_data/main.tf b/enos/modules/vault_verify_write_data/main.tf new file mode 100644 index 0000000..2369e51 --- /dev/null +++ b/enos/modules/vault_verify_write_data/main.tf @@ -0,0 +1,95 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_instance_count" { + type = number + description = "How many Vault instances are in the cluster" +} + +variable "leader_public_ip" { + type = string + description = "Vault cluster leader Public IP address" +} + +variable "leader_private_ip" { + type = string + description = "Vault cluster leader Private IP address" +} + +variable "vault_instances" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The Vault cluster instances that were created" +} + +variable "vault_root_token" { + type = string + description = "The Vault root token" + default = null +} + +locals { + instances = { + for idx in range(var.vault_instance_count) : idx => { + public_ip = values(var.vault_instances)[idx].public_ip + private_ip = values(var.vault_instances)[idx].private_ip + } + } +} + +# We use this module to verify write data in all Enos scenarios. Since we cannot use +# Vault token to authenticate to secondary clusters in replication scenario we add a regular user +# here to keep the authentication method and module verification consistent between all scenarios +resource "enos_remote_exec" "smoke-enable-secrets-kv" { + # Only enable the secrets engine on the leader node + environment = { + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/smoke-enable-secrets-kv.sh")] + + transport = { + ssh = { + host = var.leader_public_ip + } + } +} + +# Verify that we can enable the k/v secrets engine and write data to it. +resource "enos_remote_exec" "smoke-write-test-data" { + depends_on = [enos_remote_exec.smoke-enable-secrets-kv] + for_each = local.instances + + environment = { + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + TEST_KEY = "smoke${each.key}" + TEST_VALUE = "fire" + } + + scripts = [abspath("${path.module}/scripts/smoke-write-test-data.sh")] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/vault_verify_write_data/scripts/smoke-enable-secrets-kv.sh b/enos/modules/vault_verify_write_data/scripts/smoke-enable-secrets-kv.sh new file mode 100644 index 0000000..042cb62 --- /dev/null +++ b/enos/modules/vault_verify_write_data/scripts/smoke-enable-secrets-kv.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +set -e + +retry() { + local retries=$1 + shift + local count=0 + + until "$@"; do + exit=$? + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + return "$exit" + fi + done + + return 0 +} + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault + +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +retry 5 "$binpath" status > /dev/null 2>&1 + +# Create user policy +retry 5 "$binpath" policy write reguser -< /dev/null 2>&1 + +# Create new user and attach reguser policy +retry 5 "$binpath" write auth/userpass/users/testuser password="passuser1" policies="reguser" + +retry 5 "$binpath" secrets enable -path="secret" kv diff --git a/enos/modules/vault_verify_write_data/scripts/smoke-write-test-data.sh b/enos/modules/vault_verify_write_data/scripts/smoke-write-test-data.sh new file mode 100644 index 0000000..7c2577a --- /dev/null +++ b/enos/modules/vault_verify_write_data/scripts/smoke-write-test-data.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +set -e + +retry() { + local retries=$1 + shift + local count=0 + + until "$@"; do + exit=$? + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + return "$exit" + fi + done + + return 0 +} + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$TEST_KEY" ]] && fail "TEST_KEY env variable has not been set" +[[ -z "$TEST_VALUE" ]] && fail "TEST_VALUE env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault + +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +retry 5 "$binpath" kv put secret/test "$TEST_KEY=$TEST_VALUE" diff --git a/enos/modules/vault_wait_for_leader/main.tf b/enos/modules/vault_wait_for_leader/main.tf new file mode 100644 index 0000000..1f02181 --- /dev/null +++ b/enos/modules/vault_wait_for_leader/main.tf @@ -0,0 +1,68 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +variable "vault_instance_count" { + type = number + description = "The number of instances in the vault cluster" +} + +variable "vault_hosts" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The vault cluster hosts that can be expected as a leader" +} + +variable "timeout" { + type = number + description = "The max number of seconds to wait before timing out" + default = 60 +} + +variable "retry_interval" { + type = number + description = "How many seconds to wait between each retry" + default = 2 +} + +locals { + private_ips = [for k, v in values(tomap(var.vault_hosts)) : tostring(v["private_ip"])] +} + +resource "enos_remote_exec" "wait_for_leader_in_vault_hosts" { + environment = { + RETRY_INTERVAL = var.retry_interval + TIMEOUT_SECONDS = var.timeout + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_TOKEN = var.vault_root_token + VAULT_INSTANCE_PRIVATE_IPS = jsonencode(local.private_ips) + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/wait-for-leader.sh")] + + transport = { + ssh = { + host = var.vault_hosts[0].public_ip + } + } +} diff --git a/enos/modules/vault_wait_for_leader/scripts/wait-for-leader.sh b/enos/modules/vault_wait_for_leader/scripts/wait-for-leader.sh new file mode 100644 index 0000000..43705cc --- /dev/null +++ b/enos/modules/vault_wait_for_leader/scripts/wait-for-leader.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$RETRY_INTERVAL" ]] && fail "RETRY_INTERVAL env variable has not been set" +[[ -z "$TIMEOUT_SECONDS" ]] && fail "TIMEOUT_SECONDS env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_INSTANCE_PRIVATE_IPS" ]] && fail "VAULT_INSTANCE_PRIVATE_IPS env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +findLeaderInPrivateIPs() { + # Find the leader private IP address + local leader_private_ip + if ! leader_private_ip=$($binpath read sys/leader -format=json | jq -er '.data.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")') ; then + # Some older versions of vault don't support reading sys/leader. Fallback to the cli status. + if ! leader_private_ip=$($binpath status -format json | jq -er '.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")'); then + return 1 + fi + fi + + if isIn=$(jq -er --arg ip "$leader_private_ip" 'map(select(. == $ip)) | length == 1' <<< "$VAULT_INSTANCE_PRIVATE_IPS"); then + if [[ "$isIn" == "true" ]]; then + echo "$leader_private_ip" + return 0 + fi + fi + + return 1 +} + +begin_time=$(date +%s) +end_time=$((begin_time + TIMEOUT_SECONDS)) +while [ "$(date +%s)" -lt "$end_time" ]; do + if findLeaderInPrivateIPs; then + exit 0 + fi + + sleep "$RETRY_INTERVAL" +done + +fail "Timed out waiting for one of $VAULT_INSTANCE_PRIVATE_IPS to be leader." diff --git a/enos/modules/vault_wait_for_seal_rewrap/main.tf b/enos/modules/vault_wait_for_seal_rewrap/main.tf new file mode 100644 index 0000000..ba6cbb3 --- /dev/null +++ b/enos/modules/vault_wait_for_seal_rewrap/main.tf @@ -0,0 +1,67 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +variable "vault_instance_count" { + type = number + description = "The number of instances in the vault cluster" +} + +variable "vault_hosts" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The vault cluster hosts that can be expected as a leader" +} + +variable "timeout" { + type = number + description = "The max number of seconds to wait before timing out" + default = 60 +} + +variable "retry_interval" { + type = number + description = "How many seconds to wait between each retry" + default = 2 +} + +locals { + private_ips = [for k, v in values(tomap(var.vault_hosts)) : tostring(v["private_ip"])] +} + +resource "enos_remote_exec" "wait_for_seal_rewrap_to_be_completed" { + environment = { + RETRY_INTERVAL = var.retry_interval + TIMEOUT_SECONDS = var.timeout + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/wait-for-seal-rewrap.sh")] + + transport = { + ssh = { + host = var.vault_hosts[0].public_ip + } + } +} diff --git a/enos/modules/vault_wait_for_seal_rewrap/scripts/wait-for-seal-rewrap.sh b/enos/modules/vault_wait_for_seal_rewrap/scripts/wait-for-seal-rewrap.sh new file mode 100644 index 0000000..0125b86 --- /dev/null +++ b/enos/modules/vault_wait_for_seal_rewrap/scripts/wait-for-seal-rewrap.sh @@ -0,0 +1,67 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$RETRY_INTERVAL" ]] && fail "RETRY_INTERVAL env variable has not been set" +[[ -z "$TIMEOUT_SECONDS" ]] && fail "TIMEOUT_SECONDS env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +getRewrapData() { + $binpath read sys/sealwrap/rewrap -format=json | jq -eMc '.data' +} + +waitForRewrap() { + local data + if ! data=$(getRewrapData); then + echo "failed getting /v1/sys/sealwrap/rewrap data" 1>&2 + return 1 + fi + + if ! jq -e '.is_running == false' <<< "$data" &> /dev/null; then + echo "rewrap is running" 1>&2 + return 1 + fi + + if ! jq -e '.entries.failed == 0' <<< "$data" &> /dev/null; then + local entries + entries=$(jq -Mc '.entries.failed' <<< "$data") + echo "rewrap has $entries failed entries" 1>&2 + return 1 + fi + + if ! jq -e '.entries.processed == .entries.succeeded' <<< "$data" &> /dev/null; then + local processed + local succeeded + processed=$(jq -Mc '.entries.processed' <<< "$data") + succeeded=$(jq -Mc '.entries.succeeded' <<< "$data") + echo "the number of processed entries ($processed) does not equal then number of succeeded ($succeeded)" 1>&2 + return 1 + fi + + return 0 +} + +begin_time=$(date +%s) +end_time=$((begin_time + TIMEOUT_SECONDS)) +while [ "$(date +%s)" -lt "$end_time" ]; do + if waitForRewrap; then + exit 0 + fi + + sleep "$RETRY_INTERVAL" +done + +fail "Timed out waiting for seal rewrap to be completed. Data:\n\t$(getRewrapData)" diff --git a/enos/modules/verify_seal_type/main.tf b/enos/modules/verify_seal_type/main.tf new file mode 100644 index 0000000..2a1aa9f --- /dev/null +++ b/enos/modules/verify_seal_type/main.tf @@ -0,0 +1,52 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_instance_count" { + type = number + description = "How many vault instances are in the cluster" +} + +variable "vault_hosts" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The vault cluster instances that were created" +} + +variable "seal_type" { + type = string + description = "The expected seal type" + default = "shamir" +} + +resource "enos_remote_exec" "verify_seal_type" { + for_each = var.vault_hosts + + scripts = [abspath("${path.module}/scripts/verify-seal-type.sh")] + + environment = { + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_INSTALL_DIR = var.vault_install_dir + EXPECTED_SEAL_TYPE = var.seal_type + } + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/verify_seal_type/scripts/verify-seal-type.sh b/enos/modules/verify_seal_type/scripts/verify-seal-type.sh new file mode 100644 index 0000000..adc8644 --- /dev/null +++ b/enos/modules/verify_seal_type/scripts/verify-seal-type.sh @@ -0,0 +1,37 @@ +#!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$EXPECTED_SEAL_TYPE" ]] && fail "EXPECTED_SEAL_TYPE env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +count=0 +retries=2 +while :; do + if seal_status=$($binpath read sys/seal-status -format=json); then + if jq -Mer --arg expected "$EXPECTED_SEAL_TYPE" '.data.type == $expected' <<< "$seal_status" &> /dev/null; then + exit 0 + fi + fi + + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + printf "Seal Status: %s\n" "$seal_status" + got=$(jq -Mer '.data.type' <<< "$seal_status") + fail "Expected seal type to be $EXPECTED_SEAL_TYPE, got: $got" + fi +done diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..0cdfea2 --- /dev/null +++ b/go.mod @@ -0,0 +1,519 @@ +module github.com/hashicorp/vault + +// The go version directive value isn't consulted when building our production binaries, +// and the vault module isn't intended to be imported into other projects. As such the +// impact of this setting is usually rather limited. Note however that in some cases the +// Go project introduces new semantics for handling of go.mod depending on the value. +// +// The general policy for updating it is: when the Go major version used on the branch is +// updated. If we choose not to do so at some point (e.g. because we don't want some new +// semantic related to Go module handling), this comment should be updated to explain that. +// +// Whenever this value gets updated, sdk/go.mod should be updated to the same value. +go 1.20 + +replace github.com/hashicorp/vault/api => ./api + +replace github.com/hashicorp/vault/api/auth/approle => ./api/auth/approle + +replace github.com/hashicorp/vault/api/auth/kubernetes => ./api/auth/kubernetes + +replace github.com/hashicorp/vault/api/auth/userpass => ./api/auth/userpass + +replace github.com/hashicorp/vault/sdk => ./sdk + +require ( + cloud.google.com/go/monitoring v1.15.1 + cloud.google.com/go/spanner v1.47.0 + cloud.google.com/go/storage v1.30.1 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 + github.com/Azure/azure-storage-blob-go v0.15.0 + github.com/Azure/go-autorest/autorest v0.11.29 + github.com/Azure/go-autorest/autorest/adal v0.9.22 + github.com/ProtonMail/go-crypto v0.0.0-20230626094100-7e9e0395ebec + github.com/SAP/go-hdb v0.14.1 + github.com/Sectorbob/mlab-ns2 v0.0.0-20171030222938-d3aa0c295a8a + github.com/aerospike/aerospike-client-go/v5 v5.6.0 + github.com/aliyun/alibaba-cloud-sdk-go v1.62.301 + github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5 + github.com/apple/foundationdb/bindings/go v0.0.0-20190411004307-cd5c9d91fad2 + github.com/armon/go-metrics v0.4.1 + github.com/armon/go-radix v1.0.0 + github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef + github.com/aws/aws-sdk-go v1.44.269 + github.com/aws/aws-sdk-go-v2/config v1.18.19 + github.com/axiomhq/hyperloglog v0.0.0-20220105174342-98591331716a + github.com/cenkalti/backoff/v3 v3.2.2 + github.com/chrismalek/oktasdk-go v0.0.0-20181212195951-3430665dfaa0 + github.com/client9/misspell v0.3.4 + github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c + github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf + github.com/denisenkom/go-mssqldb v0.12.2 + github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74 + github.com/dustin/go-humanize v1.0.1 + github.com/fatih/color v1.15.0 + github.com/fatih/structs v1.1.0 + github.com/favadi/protoc-go-inject-tag v1.4.0 + github.com/gammazero/workerpool v1.1.3 + github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 + github.com/go-errors/errors v1.4.2 + github.com/go-jose/go-jose/v3 v3.0.1 + github.com/go-ldap/ldap/v3 v3.4.4 + github.com/go-sql-driver/mysql v1.6.0 + github.com/go-test/deep v1.1.0 + github.com/go-zookeeper/zk v1.0.3 + github.com/gocql/gocql v1.0.0 + github.com/golang-jwt/jwt/v4 v4.5.0 + github.com/golang/protobuf v1.5.3 + github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 + github.com/google/go-cmp v0.5.9 + github.com/google/go-github v17.0.0+incompatible + github.com/google/go-metrics-stackdriver v0.2.0 + github.com/google/tink/go v1.7.0 + github.com/hashicorp/cap v0.3.0 + github.com/hashicorp/consul-template v0.33.0 + github.com/hashicorp/consul/api v1.23.0 + github.com/hashicorp/errwrap v1.1.0 + github.com/hashicorp/eventlogger v0.2.1 + github.com/hashicorp/go-cleanhttp v0.5.2 + github.com/hashicorp/go-discover v0.0.0-20210818145131-c573d69da192 + github.com/hashicorp/go-gcp-common v0.8.0 + github.com/hashicorp/go-hclog v1.5.0 + github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0 + github.com/hashicorp/go-kms-wrapping/v2 v2.0.10 + github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2 v2.0.7-1 + github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms/v2 v2.0.1 + github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.7 + github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 v2.0.8-0.20230905162003-bfa3347a7c85 + github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.8 + github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/v2 v2.0.7 + github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2 v2.0.7 + github.com/hashicorp/go-memdb v1.3.4 + github.com/hashicorp/go-msgpack v1.1.5 + github.com/hashicorp/go-multierror v1.1.1 + github.com/hashicorp/go-plugin v1.4.9 + github.com/hashicorp/go-raftchunking v0.6.3-0.20191002164813-7e9e8525653a + github.com/hashicorp/go-retryablehttp v0.7.2 + github.com/hashicorp/go-rootcerts v1.0.2 + github.com/hashicorp/go-secure-stdlib/awsutil v0.2.3 + github.com/hashicorp/go-secure-stdlib/base62 v0.1.2 + github.com/hashicorp/go-secure-stdlib/gatedwriter v0.1.1 + github.com/hashicorp/go-secure-stdlib/kv-builder v0.1.2 + github.com/hashicorp/go-secure-stdlib/mlock v0.1.3 + github.com/hashicorp/go-secure-stdlib/nonceutil v0.1.0 + github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 + github.com/hashicorp/go-secure-stdlib/password v0.1.1 + github.com/hashicorp/go-secure-stdlib/reloadutil v0.1.1 + github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 + github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.2 + github.com/hashicorp/go-sockaddr v1.0.2 + github.com/hashicorp/go-syslog v1.0.0 + github.com/hashicorp/go-uuid v1.0.3 + github.com/hashicorp/go-version v1.6.0 + github.com/hashicorp/golang-lru v0.5.4 + github.com/hashicorp/hcl v1.0.1-vault-5 + github.com/hashicorp/hcl/v2 v2.16.2 + github.com/hashicorp/hcp-link v0.1.0 + github.com/hashicorp/hcp-scada-provider v0.2.1 + github.com/hashicorp/hcp-sdk-go v0.23.0 + github.com/hashicorp/nomad/api v0.0.0-20230519153805-2275a83cbfdf + github.com/hashicorp/raft v1.3.10 + github.com/hashicorp/raft-autopilot v0.2.0 + github.com/hashicorp/raft-boltdb/v2 v2.0.0-20210421194847-a7e34179d62c + github.com/hashicorp/raft-snapshot v1.0.4 + github.com/hashicorp/vault-plugin-auth-alicloud v0.15.0 + github.com/hashicorp/vault-plugin-auth-azure v0.16.0 + github.com/hashicorp/vault-plugin-auth-centrify v0.15.1 + github.com/hashicorp/vault-plugin-auth-cf v0.15.0 + github.com/hashicorp/vault-plugin-auth-gcp v0.16.0 + github.com/hashicorp/vault-plugin-auth-jwt v0.16.0 + github.com/hashicorp/vault-plugin-auth-kerberos v0.10.0 + github.com/hashicorp/vault-plugin-auth-kubernetes v0.16.0 + github.com/hashicorp/vault-plugin-auth-oci v0.14.0 + github.com/hashicorp/vault-plugin-database-couchbase v0.9.2 + github.com/hashicorp/vault-plugin-database-elasticsearch v0.13.2 + github.com/hashicorp/vault-plugin-database-mongodbatlas v0.10.0 + github.com/hashicorp/vault-plugin-database-redis v0.2.1 + github.com/hashicorp/vault-plugin-database-redis-elasticache v0.2.1 + github.com/hashicorp/vault-plugin-database-snowflake v0.9.0 + github.com/hashicorp/vault-plugin-mock v0.16.1 + github.com/hashicorp/vault-plugin-secrets-ad v0.16.0 + github.com/hashicorp/vault-plugin-secrets-alicloud v0.15.0 + github.com/hashicorp/vault-plugin-secrets-azure v0.16.1 + github.com/hashicorp/vault-plugin-secrets-gcp v0.16.0 + github.com/hashicorp/vault-plugin-secrets-gcpkms v0.15.0 + github.com/hashicorp/vault-plugin-secrets-kubernetes v0.5.0 + github.com/hashicorp/vault-plugin-secrets-kv v0.15.0 + github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.10.2 + github.com/hashicorp/vault-plugin-secrets-openldap v0.11.1 + github.com/hashicorp/vault-plugin-secrets-terraform v0.7.1 + github.com/hashicorp/vault-testing-stepwise v0.1.3 + github.com/hashicorp/vault/api v1.9.2 + github.com/hashicorp/vault/api/auth/approle v0.1.0 + github.com/hashicorp/vault/api/auth/userpass v0.1.0 + github.com/hashicorp/vault/sdk v0.9.2 + github.com/hashicorp/vault/vault/hcp_link/proto v0.0.0-20230201201504-b741fa893d77 + github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab + github.com/jackc/pgx/v4 v4.18.1 + github.com/jcmturner/gokrb5/v8 v8.4.4 + github.com/jefferai/isbadcipher v0.0.0-20190226160619-51d2077c035f + github.com/jefferai/jsonx v1.0.0 + github.com/joyent/triton-go v1.7.1-0.20200416154420-6801d15b779f + github.com/klauspost/compress v1.16.5 + github.com/kr/pretty v0.3.1 + github.com/kr/text v0.2.0 + github.com/mattn/go-colorable v0.1.13 + github.com/mattn/go-isatty v0.0.19 + github.com/mholt/archiver/v3 v3.5.1 + github.com/michaelklishin/rabbit-hole/v2 v2.12.0 + github.com/mikesmitty/edkey v0.0.0-20170222072505-3356ea4e686a + github.com/mitchellh/cli v1.1.5 + github.com/mitchellh/copystructure v1.2.0 + github.com/mitchellh/go-homedir v1.1.0 + github.com/mitchellh/go-testing-interface v1.14.1 + github.com/mitchellh/go-wordwrap v1.0.0 + github.com/mitchellh/mapstructure v1.5.0 + github.com/mitchellh/reflectwalk v1.0.2 + github.com/natefinch/atomic v0.0.0-20150920032501-a62ce929ffcc + github.com/ncw/swift v1.0.47 + github.com/oklog/run v1.1.0 + github.com/okta/okta-sdk-golang/v2 v2.12.1 + github.com/oracle/oci-go-sdk v24.3.0+incompatible + github.com/ory/dockertest v3.3.5+incompatible + github.com/ory/dockertest/v3 v3.10.0 + github.com/patrickmn/go-cache v2.1.0+incompatible + github.com/pires/go-proxyproto v0.6.1 + github.com/pkg/errors v0.9.1 + github.com/posener/complete v1.2.3 + github.com/pquerna/otp v1.2.1-0.20191009055518-468c2dd2b58d + github.com/prometheus/client_golang v1.14.0 + github.com/prometheus/common v0.37.0 + github.com/rboyer/safeio v0.2.1 + github.com/ryanuber/columnize v2.1.0+incompatible + github.com/ryanuber/go-glob v1.0.0 + github.com/sasha-s/go-deadlock v0.2.0 + github.com/sethvargo/go-limiter v0.7.1 + github.com/shirou/gopsutil/v3 v3.22.6 + github.com/stretchr/testify v1.8.4 + go.etcd.io/bbolt v1.3.7 + go.etcd.io/etcd/client/pkg/v3 v3.5.7 + go.etcd.io/etcd/client/v2 v2.305.5 + go.etcd.io/etcd/client/v3 v3.5.7 + go.mongodb.org/atlas v0.33.0 + go.mongodb.org/mongo-driver v1.11.6 + go.opentelemetry.io/otel v1.16.0 + go.opentelemetry.io/otel/sdk v1.14.0 + go.opentelemetry.io/otel/trace v1.16.0 + go.uber.org/atomic v1.11.0 + go.uber.org/goleak v1.2.1 + golang.org/x/crypto v0.15.0 + golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 + golang.org/x/net v0.17.0 + golang.org/x/oauth2 v0.10.0 + golang.org/x/sync v0.3.0 + golang.org/x/sys v0.14.0 + golang.org/x/term v0.14.0 + golang.org/x/text v0.14.0 + golang.org/x/tools v0.10.0 + google.golang.org/api v0.126.0 + google.golang.org/grpc v1.58.3 + google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0 + google.golang.org/protobuf v1.31.0 + gopkg.in/ory-am/dockertest.v3 v3.3.4 + gotest.tools/gotestsum v1.10.0 + honnef.co/go/tools v0.4.3 + k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 + layeh.com/radius v0.0.0-20190322222518-890bc1058917 + mvdan.cc/gofumpt v0.3.1 + nhooyr.io/websocket v1.8.7 +) + +require ( + cloud.google.com/go v0.110.4 // indirect + cloud.google.com/go/compute v1.21.0 // indirect + cloud.google.com/go/compute/metadata v0.2.3 // indirect + cloud.google.com/go/iam v1.1.1 // indirect + cloud.google.com/go/kms v1.12.1 // indirect + code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f // indirect + github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect + github.com/99designs/keyring v1.2.2 // indirect + github.com/Azure/azure-pipeline-go v0.2.3 // indirect + github.com/Azure/azure-sdk-for-go v67.2.0+incompatible // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/msi/armmsi v1.1.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect + github.com/Azure/go-autorest v14.2.0+incompatible // indirect + github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 // indirect + github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 // indirect + github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect + github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect + github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect + github.com/Azure/go-autorest/logger v0.2.1 // indirect + github.com/Azure/go-autorest/tracing v0.6.0 // indirect + github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect + github.com/BurntSushi/toml v1.3.2 // indirect + github.com/DataDog/datadog-go v3.2.0+incompatible // indirect + github.com/Jeffail/gabs v1.1.1 // indirect + github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c // indirect + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/semver v1.5.0 // indirect + github.com/Masterminds/semver/v3 v3.2.1 // indirect + github.com/Masterminds/sprig/v3 v3.2.3 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect + github.com/agext/levenshtein v1.2.1 // indirect + github.com/andybalholm/brotli v1.0.4 // indirect + github.com/apache/arrow/go/v12 v12.0.1 // indirect + github.com/apache/thrift v0.16.0 // indirect + github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect + github.com/aws/aws-sdk-go-v2 v1.17.7 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.13.18 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.1 // indirect + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.59 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.31 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.25 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.3.32 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.23 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.26 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.25 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.0 // indirect + github.com/aws/aws-sdk-go-v2/service/s3 v1.31.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.12.6 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.6 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.18.7 // indirect + github.com/aws/smithy-go v1.13.5 // indirect + github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/bgentry/speakeasy v0.1.0 // indirect + github.com/boombuler/barcode v1.0.1 // indirect + github.com/cenkalti/backoff v2.2.1+incompatible // indirect + github.com/cenkalti/backoff/v4 v4.2.0 // indirect + github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect + github.com/centrify/cloud-golang-sdk v0.0.0-20210923165758-a8c48d049166 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible // indirect + github.com/circonus-labs/circonusllhist v0.1.3 // indirect + github.com/cjlapao/common-go v0.0.39 // indirect + github.com/cloudflare/circl v1.3.3 // indirect + github.com/cloudfoundry-community/go-cfclient v0.0.0-20210823134051-721f0e559306 // indirect + github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe // indirect + github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 // indirect + github.com/containerd/containerd v1.7.0 // indirect + github.com/containerd/continuity v0.3.0 // indirect + github.com/coreos/go-oidc v2.2.1+incompatible // indirect + github.com/coreos/go-oidc/v3 v3.5.0 // indirect + github.com/coreos/go-semver v0.3.0 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/couchbase/gocb/v2 v2.6.3 // indirect + github.com/couchbase/gocbcore/v10 v10.2.3 // indirect + github.com/danieljoos/wincred v1.1.2 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba // indirect + github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc // indirect + github.com/digitalocean/godo v1.7.5 // indirect + github.com/dimchansky/utfbom v1.1.1 // indirect + github.com/dnephin/pflag v1.0.7 // indirect + github.com/docker/cli v20.10.20+incompatible // indirect + github.com/docker/distribution v2.8.2+incompatible // indirect + github.com/docker/docker v24.0.5+incompatible // indirect + github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect + github.com/dvsekhvalnov/jose2go v1.5.0 // indirect + github.com/emicklei/go-restful/v3 v3.10.1 // indirect + github.com/envoyproxy/go-control-plane v0.11.1 // indirect + github.com/envoyproxy/protoc-gen-validate v1.0.2 // indirect + github.com/evanphx/json-patch/v5 v5.6.0 // indirect + github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/gabriel-vasile/mimetype v1.4.2 // indirect + github.com/gammazero/deque v0.2.1 // indirect + github.com/go-asn1-ber/asn1-ber v1.5.4 // indirect + github.com/go-ldap/ldif v0.0.0-20200320164324-fd88d9b715b3 // indirect + github.com/go-logr/logr v1.2.4 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect + github.com/go-openapi/analysis v0.20.0 // indirect + github.com/go-openapi/errors v0.20.1 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.1 // indirect + github.com/go-openapi/loads v0.20.2 // indirect + github.com/go-openapi/runtime v0.19.24 // indirect + github.com/go-openapi/spec v0.20.3 // indirect + github.com/go-openapi/strfmt v0.20.0 // indirect + github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-openapi/validate v0.20.2 // indirect + github.com/go-ozzo/ozzo-validation v3.6.0+incompatible // indirect + github.com/goccy/go-json v0.10.0 // indirect + github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect + github.com/gofrs/uuid v4.3.0+incompatible // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe // indirect + github.com/golang-sql/sqlexp v0.1.0 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/flatbuffers v23.1.21+incompatible // indirect + github.com/google/gnostic v0.5.7-v3refs // indirect + github.com/google/go-querystring v1.1.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/s2a-go v0.1.4 // indirect + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect + github.com/googleapis/gax-go/v2 v2.11.0 // indirect + github.com/gophercloud/gophercloud v0.1.0 // indirect + github.com/gorilla/websocket v1.5.0 // indirect + github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect + github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect + github.com/hashicorp/cronexpr v1.1.1 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/go-msgpack/v2 v2.0.0 // indirect + github.com/hashicorp/go-secure-stdlib/fileutil v0.1.0 // indirect + github.com/hashicorp/go-slug v0.11.1 // indirect + github.com/hashicorp/go-tfe v1.25.1 // indirect + github.com/hashicorp/jsonapi v0.0.0-20210826224640-ee7dae0fb22d // indirect + github.com/hashicorp/logutils v1.0.0 // indirect + github.com/hashicorp/mdns v1.0.4 // indirect + github.com/hashicorp/net-rpc-msgpackrpc/v2 v2.0.0 // indirect + github.com/hashicorp/serf v0.10.1 // indirect + github.com/hashicorp/vault/api/auth/kubernetes v0.4.1 // indirect + github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443 // indirect + github.com/hashicorp/yamux v0.1.1 // indirect + github.com/huandu/xstrings v1.4.0 // indirect + github.com/imdario/mergo v0.3.15 // indirect + github.com/jackc/chunkreader/v2 v2.0.1 // indirect + github.com/jackc/pgconn v1.14.0 // indirect + github.com/jackc/pgio v1.0.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgproto3/v2 v2.3.2 // indirect + github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect + github.com/jackc/pgtype v1.14.0 // indirect + github.com/jackc/pgx v3.3.0+incompatible // indirect + github.com/jcmturner/aescts/v2 v2.0.0 // indirect + github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect + github.com/jcmturner/gofork v1.7.6 // indirect + github.com/jcmturner/goidentity/v6 v6.0.1 // indirect + github.com/jcmturner/rpc/v2 v2.0.3 // indirect + github.com/jeffchao/backoff v0.0.0-20140404060208-9d7fd7aa17f2 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/kelseyhightower/envconfig v1.4.0 // indirect + github.com/klauspost/asmfmt v1.3.2 // indirect + github.com/klauspost/cpuid/v2 v2.2.3 // indirect + github.com/klauspost/pgzip v1.2.5 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect + github.com/lib/pq v1.10.9 // indirect + github.com/linode/linodego v0.7.1 // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-ieproxy v0.0.1 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/mediocregopher/radix/v4 v4.1.2 // indirect + github.com/microsoft/kiota-abstractions-go v1.1.0 // indirect + github.com/microsoft/kiota-authentication-azure-go v1.0.0 // indirect + github.com/microsoft/kiota-http-go v1.0.0 // indirect + github.com/microsoft/kiota-serialization-form-go v1.0.0 // indirect + github.com/microsoft/kiota-serialization-json-go v1.0.4 // indirect + github.com/microsoft/kiota-serialization-text-go v1.0.0 // indirect + github.com/microsoftgraph/msgraph-sdk-go v1.13.0 // indirect + github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0 // indirect + github.com/miekg/dns v1.1.43 // indirect + github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 // indirect + github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 // indirect + github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect + github.com/mitchellh/hashstructure v1.1.0 // indirect + github.com/mitchellh/pointerstructure v1.2.1 // indirect + github.com/moby/patternmatcher v0.5.0 // indirect + github.com/moby/sys/sequential v0.5.0 // indirect + github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/mongodb-forks/digest v1.0.5 // indirect + github.com/montanaflynn/stats v0.7.0 // indirect + github.com/mtibben/percent v0.2.1 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2 // indirect + github.com/nwaples/rardecode v1.1.2 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b // indirect + github.com/opencontainers/runc v1.1.6 // indirect + github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b // indirect + github.com/oracle/oci-go-sdk/v60 v60.0.0 // indirect + github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c // indirect + github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect + github.com/pierrec/lz4 v2.6.1+incompatible // indirect + github.com/pierrec/lz4/v4 v4.1.17 // indirect + github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/pquerna/cachecontrol v0.1.0 // indirect + github.com/prometheus/client_model v0.4.0 // indirect + github.com/prometheus/procfs v0.8.0 // indirect + github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03 // indirect + github.com/rogpeppe/go-internal v1.10.0 // indirect + github.com/shopspring/decimal v1.3.1 // indirect + github.com/sirupsen/logrus v1.9.0 // indirect + github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect + github.com/snowflakedb/gosnowflake v1.6.24 // indirect + github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d // indirect + github.com/sony/gobreaker v0.4.2-0.20210216022020-dd874f9dd33b // indirect + github.com/spf13/cast v1.5.1 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/stretchr/objx v0.5.0 // indirect + github.com/tencentcloud/tencentcloud-sdk-go v1.0.162 // indirect + github.com/tilinna/clock v1.1.0 // indirect + github.com/tklauser/go-sysconf v0.3.10 // indirect + github.com/tklauser/numcpus v0.4.0 // indirect + github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c // indirect + github.com/ulikunitz/xz v0.5.10 // indirect + github.com/vmware/govmomi v0.18.0 // indirect + github.com/xdg-go/pbkdf2 v1.0.0 // indirect + github.com/xdg-go/scram v1.1.1 // indirect + github.com/xdg-go/stringprep v1.0.3 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect + github.com/xeipuuv/gojsonschema v1.2.0 // indirect + github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect + github.com/yosida95/uritemplate/v3 v3.0.2 // indirect + github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect + github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 // indirect + github.com/yusufpapurcu/wmi v1.2.2 // indirect + github.com/zclconf/go-cty v1.12.1 // indirect + github.com/zeebo/xxh3 v1.0.2 // indirect + go.etcd.io/etcd/api/v3 v3.5.7 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/otel/metric v1.16.0 // indirect + go.uber.org/multierr v1.7.0 // indirect + go.uber.org/zap v1.19.1 // indirect + golang.org/x/exp/typeparams v0.0.0-20221208152030-732eee02a75a // indirect + golang.org/x/mod v0.11.0 // indirect + golang.org/x/time v0.3.0 // indirect + golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/ini.v1 v1.66.2 // indirect + gopkg.in/jcmturner/goidentity.v3 v3.0.0 // indirect + gopkg.in/resty.v1 v1.12.0 // indirect + gopkg.in/square/go-jose.v2 v2.6.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/api v0.27.2 // indirect + k8s.io/apimachinery v0.27.2 // indirect + k8s.io/client-go v0.27.2 // indirect + k8s.io/klog/v2 v2.90.1 // indirect + k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..0bb0615 --- /dev/null +++ b/go.sum @@ -0,0 +1,4048 @@ +bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.39.0/go.mod h1:rVLT6fkc8chs9sfPtFc1SBH6em7n+ZoXaG+87tDISts= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= +cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +cloud.google.com/go v0.110.4 h1:1JYyxKMN9hd5dR2MYTPWkGUgcoxVVhg0LKNKEo0qvmk= +cloud.google.com/go v0.110.4/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= +cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= +cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM= +cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= +cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= +cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= +cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= +cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute v1.21.0 h1:JNBsyXVoOoNJtTQcnEY5uYpZIbeCTYIeDe0Xh1bySMk= +cloud.google.com/go/compute v1.21.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= +cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM= +cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= +cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= +cloud.google.com/go/iam v1.1.1 h1:lW7fzj15aVIXYHREOqjRBV9PsH0Z6u8Y46a1YGvQP4Y= +cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o= +cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= +cloud.google.com/go/kms v1.12.1 h1:xZmZuwy2cwzsocmKDOPu4BL7umg8QXagQx6fKVmf45U= +cloud.google.com/go/kms v1.12.1/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= +cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= +cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +cloud.google.com/go/longrunning v0.5.1 h1:Fr7TXftcqTudoyRJa113hyaqlGdiBQkp0Gq7tErFDWI= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= +cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= +cloud.google.com/go/monitoring v1.15.1 h1:65JhLMd+JiYnXr6j5Z63dUYCuOg770p8a/VC+gil/58= +cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= +cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= +cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= +cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= +cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= +cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= +cloud.google.com/go/spanner v1.47.0 h1:aqiMP8dhsEXgn9K5EZBWxPG7dxIiyM2VaikqeU4iteg= +cloud.google.com/go/spanner v1.47.0/go.mod h1:IXsJwVW2j4UKs0eYDqodab6HgGuA1bViSqW4uH9lfUI= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= +cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= +cloud.google.com/go/storage v1.30.1 h1:uOdMxAs8HExqBlnLtnQyP0YkvbiDpdGShGKtx6U/oNM= +cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc= +cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= +cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= +cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= +code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f h1:UrKzEwTgeiff9vxdrfdqxibzpWjxLnuXDI5m6z3GJAk= +code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f/go.mod h1:sk5LnIjB/nIEU7yP5sDQExVm62wu0pBh3yrElngUisI= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= +git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= +github.com/99designs/keyring v1.2.2 h1:pZd3neh/EmUzWONb35LxQfvuY7kiSXAq3HQd97+XBn0= +github.com/99designs/keyring v1.2.2/go.mod h1:wes/FrByc8j7lFOAGLGSNEg8f/PaI3cgTBqhFkHUrPk= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20221206110420-d395f97c4830/go.mod h1:VzwV+t+dZ9j/H867F1M2ziD+yLHtB46oM35FxxMJ4d0= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 h1:EKPd1INOIyr5hWOWhvpmQpY6tKjeG0hT1s3AMC/9fic= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1/go.mod h1:VzwV+t+dZ9j/H867F1M2ziD+yLHtB46oM35FxxMJ4d0= +github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20221215162035-5330a85ea652/go.mod h1:OahwfttHWG6eJ0clwcfBAHoDI6X/LV/15hx/wlMZSrU= +github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= +github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= +github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v44.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v56.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v67.2.0+incompatible h1:Uu/Ww6ernvPTrpq31kITVTIm/I5jlJ1wjtEH/bmSB2k= +github.com/Azure/azure-sdk-for-go v67.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v0.19.0/go.mod h1:h6H6c8enJmmocHUbLiiGY6sx7f9i+X3m1CHdd5c6Rdw= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.0.0/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.3.0/go.mod h1:tZoQYdDZNOiIjdSn0dVWVfl0NEPGOJqVLzSrcFk4Is0= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0/go.mod h1:ON4tFdPTwRcgWEaVDrN3584Ef+b7GgSJaXxe5fW9t4M= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 h1:8q4SaHjFsClSvuVne0ID/5Ka8u3fcIHyqkLjcFpNRHQ= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.11.0/go.mod h1:HcM1YX14R7CJcghJGOYCgdezslRSVzqwLf/q+4Y2r/0= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0/go.mod h1:bhXu1AjYL+wutSL/kpSq6s7733q2Rb0yuot9Zgfqa/0= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U= +github.com/Azure/azure-sdk-for-go/sdk/internal v0.7.0/go.mod h1:yqy467j36fJxcRV2TzfVZ1pCb5vxm4BtZPUdYWe/Xo8= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.1/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1 h1:UPeCRD+XY7QlaGQte2EVI2iOcWvUYA2XY8w5T/8v0NQ= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1/go.mod h1:oGV6NlB0cvi1ZbYRR2UN44QHxWFyGk+iylgD0qaMXjA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.1.2 h1:mLY+pNLjCUeKhgnAJWAKhEUQM+RJQo2H1fuGSw1Ky1E= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/managementgroups/armmanagementgroups v1.0.0 h1:pPvTJ1dY0sA35JOeFq6TsY2xj6Z85Yo23Pj4wCCvu4o= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/msi/armmsi v1.1.0 h1:Q707jfTFqfunSnh73YkCBDXR3GQJKno3chPRxXw//ho= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/msi/armmsi v1.1.0/go.mod h1:vjoxsjVnPwhjHZw4PuuhpgYlcxWl5tyNedLHUl0ulFA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork v1.0.0 h1:nBy98uKOIfun5z6wx6jwWLrULcM0+cjBalBFZlEZ7CA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1 h1:7CBQ+Ei8SP2c6ydQTGCCrS35bDxgTMfoP2miAwK++OU= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1/go.mod h1:c/wcGeGx5FUPbM/JltUYHZcKmigwyVLJlDq+4HdtXaw= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 h1:u/LLAOFgsMv7HmNL4Qufg58y+qElGOt5qv0z1mURkRY= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0/go.mod h1:2e8rMJtl2+2j+HXbTBwnyGpm5Nou7KhvSfxOq8JpTag= +github.com/Azure/azure-storage-blob-go v0.15.0 h1:rXtgp8tN1p29GvpGgfJetavIG0V7OgcSXPpwp3tx6qk= +github.com/Azure/azure-storage-blob-go v0.15.0/go.mod h1:vbjsVbX0dlxnRc4FFMPsS9BsJWPcne7GB7onqlPvz58= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.11.0/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= +github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc= +github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= +github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= +github.com/Azure/go-autorest/autorest/adal v0.9.22 h1:/GblQdIudfEM3AWWZ0mrYJQSd7JS4S/Mbzh6F0ov0Xc= +github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.0/go.mod h1:QRTvSZQpxqm8mSErhnbI+tANIBAKP7B+UIE2z4ypUO0= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 h1:wkAZRgT/pn8HhFyzfe9UnqOjJYqlembgCTi72Bm/xKk= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.12/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.0/go.mod h1:JljT387FplPzBA31vUcvsetLKF3pec5bdAxjVU4kI2s= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 h1:0W/yGmFdTIT77fvdlGZ0LMISoLHFJ7Tx4U0yeB+uFs4= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw= +github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= +github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= +github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= +github.com/Azure/go-autorest/autorest/validation v0.3.0/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= +github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= +github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= +github.com/Azure/go-ntlmssp v0.0.0-20220621081337-cb9428e4ac1e/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= +github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8= +github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= +github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1/go.mod h1:Vt9sXTKwMyGcOxSmLDMnGPgqsUg7m8pe215qMLrDXw4= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= +github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/Jeffail/gabs v1.1.1 h1:V0uzR08Hj22EX8+8QMhyI9sX2hwRu+/RJhJUmnwda/E= +github.com/Jeffail/gabs v1.1.1/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc= +github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c h1:RGWPOewvKIROun94nF7v2cua9qP+thov/7M50KEoeSU= +github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= +github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= +github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/sprig/v3 v3.2.1/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= +github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= +github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= +github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= +github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= +github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= +github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= +github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= +github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= +github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= +github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= +github.com/Microsoft/hcsshim v0.9.3/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= +github.com/Microsoft/hcsshim v0.9.4/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= +github.com/Microsoft/hcsshim v0.9.6/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= +github.com/Microsoft/hcsshim v0.10.0-rc.7 h1:HBytQPxcv8Oy4244zbQbe6hnOnx544eL5QPUqhJldz8= +github.com/Microsoft/hcsshim v0.10.0-rc.7/go.mod h1:ILuwjA+kNW+MrN/w5un7n3mTqkwsFu4Bp05/okFUZlE= +github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= +github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= +github.com/ProtonMail/go-crypto v0.0.0-20230626094100-7e9e0395ebec h1:vV3RryLxt42+ZIVOFbYJCH1jsZNTNmj2NYru5zfx+4E= +github.com/ProtonMail/go-crypto v0.0.0-20230626094100-7e9e0395ebec/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/SAP/go-hdb v0.14.1 h1:hkw4ozGZ/i4eak7ZuGkY5e0hxiXFdNUBNhr4AvZVNFE= +github.com/SAP/go-hdb v0.14.1/go.mod h1:7fdQLVC2lER3urZLjZCm0AuMQfApof92n3aylBPEkMo= +github.com/Sectorbob/mlab-ns2 v0.0.0-20171030222938-d3aa0c295a8a h1:KFHLI4QGttB0i7M3qOkAo8Zn/GSsxwwCnInFqBaYtkM= +github.com/Sectorbob/mlab-ns2 v0.0.0-20171030222938-d3aa0c295a8a/go.mod h1:D73UAuEPckrDorYZdtlCu2ySOLuPB5W4rhIkmmc/XbI= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= +github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af h1:DBNMBMuMiWYu0b+8KMJuWmfCkcxl09JwdlqwDZZ6U14= +github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af/go.mod h1:5Jv4cbFiHJMsVxt52+i0Ha45fjshj6wxYr1r19tB9bw= +github.com/aerospike/aerospike-client-go/v5 v5.6.0 h1:tRxcUq0HY8fFPQEzF3EgrknF+w1xFO0YDfUb9Nm8yRI= +github.com/aerospike/aerospike-client-go/v5 v5.6.0/go.mod h1:rJ/KpmClE7kiBPfvAPrGw9WuNOiz8v2uKbQaUyYPXtI= +github.com/agext/levenshtein v1.2.1 h1:QmvMAjj2aEICytGiWzmxoE0x2KZvE0fvmqMOfy2tjT8= +github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= +github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= +github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= +github.com/alexflint/go-filemutex v1.1.0/go.mod h1:7P4iRhttt/nUvUOrYIhcpMzv2G6CY9UnI16Z+UJqRyk= +github.com/alexflint/go-filemutex v1.2.0/go.mod h1:mYyQSWvw9Tx2/H2n9qXPb52tTYfE0pZAWcBq5mK025c= +github.com/aliyun/alibaba-cloud-sdk-go v1.62.301 h1:8mgvCpqsv3mQAcqZ/baAaMGUBj5J6MKMhxLd+K8L27Q= +github.com/aliyun/alibaba-cloud-sdk-go v1.62.301/go.mod h1:Api2AkmMgGaSUAhmk76oaFObkoeCPc/bKAqcyplPODs= +github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5 h1:nWDRPCyCltiTsANwC/n3QZH7Vww33Npq9MKqlwRzI/c= +github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/andybalholm/brotli v1.0.1/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= +github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY= +github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20220418222510-f25a4f6275ed/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= +github.com/antlr/antlr4/runtime/Go/antlr v1.4.10/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= +github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= +github.com/apache/arrow/go/v12 v12.0.1 h1:JsR2+hzYYjgSUkBSaahpqCetqZMr76djX80fF/DiJbg= +github.com/apache/arrow/go/v12 v12.0.1/go.mod h1:weuTY7JvTG/HDPtMQxEUp7pU73vkLWMLpY67QwZ/WWw= +github.com/apache/thrift v0.16.0 h1:qEy6UW60iVOlUy+b9ZR0d5WzUWYGOo4HfopoyBaNmoY= +github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= +github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= +github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= +github.com/apple/foundationdb/bindings/go v0.0.0-20190411004307-cd5c9d91fad2 h1:VoHKYIXEQU5LWoambPBOvYxyLqZYHuj+rj5DVnMUc3k= +github.com/apple/foundationdb/bindings/go v0.0.0-20190411004307-cd5c9d91fad2/go.mod h1:OMVSB21p9+xQUIqlGizHPZfjK+SHws1ht+ZytVDoz9U= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= +github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= +github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef h1:46PFijGLmAjMPwCCCo7Jf0W6f9slllCkkv7vyc1yOSg= +github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/aws/aws-sdk-go v1.25.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.34.0/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= +github.com/aws/aws-sdk-go v1.43.9/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.43.16/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.44.269 h1:NUNq++KMjhWUVVUIx7HYLgBpX16bWfTY1EdQRraLALo= +github.com/aws/aws-sdk-go v1.44.269/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go-v2 v1.17.7 h1:CLSjnhJSTSogvqUGhIC6LqFKATMRexcxLZ0i/Nzk9Eg= +github.com/aws/aws-sdk-go-v2 v1.17.7/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 h1:dK82zF6kkPeCo8J1e+tGx4JdvDIQzj7ygIoLg8WMuGs= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10/go.mod h1:VeTZetY5KRJLuD/7fkQXMU6Mw7H5m/KP2J5Iy9osMno= +github.com/aws/aws-sdk-go-v2/config v1.18.19 h1:AqFK6zFNtq4i1EYu+eC7lcKHYnZagMn6SW171la0bGw= +github.com/aws/aws-sdk-go-v2/config v1.18.19/go.mod h1:XvTmGMY8d52ougvakOv1RpiTLPz9dlG/OQHsKU/cMmY= +github.com/aws/aws-sdk-go-v2/credentials v1.13.18 h1:EQMdtHwz0ILTW1hoP+EwuWhwCG1hD6l3+RWFQABET4c= +github.com/aws/aws-sdk-go-v2/credentials v1.13.18/go.mod h1:vnwlwjIe+3XJPBYKu1et30ZPABG3VaXJYr8ryohpIyM= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.1 h1:gt57MN3liKiyGopcqgNzJb2+d9MJaKT/q1OksHNXVE4= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.1/go.mod h1:lfUx8puBRdM5lVVMQlwt2v+ofiG/X6Ms+dy0UkG/kXw= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.59 h1:E3Y+OfzOK1+rmRo/K2G0ml8Vs+Xqk0kOnf4nS0kUtBc= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.59/go.mod h1:1M4PLSBUVfBI0aP+C9XI7SM6kZPCGYyI6izWz0TGprE= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.31 h1:sJLYcS+eZn5EeNINGHSCRAwUJMFVqklwkH36Vbyai7M= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.31/go.mod h1:QT0BqUvX1Bh2ABdTGnjqEjvjzrCfIniM9Sc8zn9Yndo= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.25 h1:1mnRASEKnkqsntcxHaysxwgVoUUp5dkiB+l3llKnqyg= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.25/go.mod h1:zBHOPwhBc3FlQjQJE/D3IfPWiWaQmT06Vq9aNukDo0k= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.32 h1:p5luUImdIqywn6JpQsW3tq5GNOxKmOnEpybzPx+d1lk= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.32/go.mod h1:XGhIBZDEgfqmFIugclZ6FU7v75nHhBDtzuB4xB/tEi4= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.23 h1:DWYZIsyqagnWL00f8M/SOr9fN063OEQWn9LLTbdYXsk= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.23/go.mod h1:uIiFgURZbACBEQJfqTZPb/jxO7R+9LeoHUFudtIdeQI= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 h1:y2+VQzC6Zh2ojtV2LoC0MNwHWc6qXv/j2vrQtlftkdA= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11/go.mod h1:iV4q2hsqtNECrfmlXyord9u4zyuFEJX9eLgLpSPzWA8= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.26 h1:CeuSeq/8FnYpPtnuIeLQEEvDv9zUjneuYi8EghMBdwQ= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.26/go.mod h1:2UqAAwMUXKeRkAHIlDJqvMVgOWkUi/AUXPk/YIe+Dg4= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.25 h1:5LHn8JQ0qvjD9L9JhMtylnkcw7j05GDZqM9Oin6hpr0= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.25/go.mod h1:/95IA+0lMnzW6XzqYJRpjjsAbKEORVeO0anQqjd2CNU= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.0 h1:e2ooMhpYGhDnBfSvIyusvAwX7KexuZaHbQY2Dyei7VU= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.0/go.mod h1:bh2E0CXKZsQN+faiKVqC40vfNMAWheoULBCnEgO9K+8= +github.com/aws/aws-sdk-go-v2/service/s3 v1.31.0 h1:B1G2pSPvbAtQjilPq+Y7jLIzCOwKzuVEl+aBBaNG0AQ= +github.com/aws/aws-sdk-go-v2/service/s3 v1.31.0/go.mod h1:ncltU6n4Nof5uJttDtcNQ537uNuwYqsZZQcpkd2/GUQ= +github.com/aws/aws-sdk-go-v2/service/sso v1.12.6 h1:5V7DWLBd7wTELVz5bPpwzYy/sikk0gsgZfj40X+l5OI= +github.com/aws/aws-sdk-go-v2/service/sso v1.12.6/go.mod h1:Y1VOmit/Fn6Tz1uFAeCO6Q7M2fmfXSCLeL5INVYsLuY= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.6 h1:B8cauxOH1W1v7rd8RdI/MWnoR4Ze0wIHWrb90qczxj4= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.6/go.mod h1:Lh/bc9XUf8CfOY6Jp5aIkQtN+j1mc+nExc+KXj9jx2s= +github.com/aws/aws-sdk-go-v2/service/sts v1.18.7 h1:bWNgNdRko2x6gqa0blfATqAZKZokPIeM1vfmQt2pnvM= +github.com/aws/aws-sdk-go-v2/service/sts v1.18.7/go.mod h1:JuTnSoeePXmMVe9G8NcjjwgOKEfZ4cOjMuT2IBT/2eI= +github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8= +github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/axiomhq/hyperloglog v0.0.0-20220105174342-98591331716a h1:eqjiAL3qooftPm8b9C1GsSSRcmlw7iOva8vdBTmV2PY= +github.com/axiomhq/hyperloglog v0.0.0-20220105174342-98591331716a/go.mod h1:2stgcRjl6QmW+gU2h5E7BQXg4HU0gzxKWDuT5HviN9s= +github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA= +github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= +github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 h1:mXoPYz/Ul5HYEDvkta6I8/rnYM5gSdSV2tJ6XbZuEtY= +github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/boombuler/barcode v1.0.1 h1:NDBbPmhS+EqABEs5Kg3n/5ZNjy73Pz7SIV+KCeqyXcs= +github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= +github.com/bytecodealliance/wasmtime-go v0.36.0/go.mod h1:q320gUxqyI8yB+ZqRuaJOEnGkAnHh6WtJjMaT2CW4wI= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M= +github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4= +github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +github.com/centrify/cloud-golang-sdk v0.0.0-20210923165758-a8c48d049166 h1:jQ93fKqb/wRmK/KiHpa7Tk9rmHeKXhp4j+5Sg/tENiY= +github.com/centrify/cloud-golang-sdk v0.0.0-20210923165758-a8c48d049166/go.mod h1:c/gmvyN8lq6lYtHvrqqoXrg2xyN65N0mBmbikxFWXNE= +github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= +github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= +github.com/chrismalek/oktasdk-go v0.0.0-20181212195951-3430665dfaa0 h1:CWU8piLyqoi9qXEUwzOh5KFKGgmSU5ZhktJyYcq6ryQ= +github.com/chrismalek/oktasdk-go v0.0.0-20181212195951-3430665dfaa0/go.mod h1:5d8DqS60xkj9k3aXfL3+mXBH0DPYO0FQjcKosxl+b/Q= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= +github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= +github.com/cilium/ebpf v0.9.1/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible h1:C29Ae4G5GtYyYMm1aztcyj/J5ckgJm2zwdDajFbx1NY= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/cjlapao/common-go v0.0.39 h1:bAAUrj2B9v0kMzbAOhzjSmiyDy+rd56r2sy7oEiQLlA= +github.com/cjlapao/common-go v0.0.39/go.mod h1:M3dzazLjTjEtZJbbxoA5ZDiGCiHmpwqW9l4UWaddwOA= +github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs= +github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= +github.com/cloudfoundry-community/go-cfclient v0.0.0-20210823134051-721f0e559306 h1:k8q2Nsz7kNaUlysVCnWIFLMUSqiKXaGLdIf9P0GsX2Y= +github.com/cloudfoundry-community/go-cfclient v0.0.0-20210823134051-721f0e559306/go.mod h1:0FdHblxw7g3M2PPICOw9i8YZOHP9dZTHbJUtoxL7Z/E= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe h1:QQ3GSy+MqSHxm/d8nCtnAiZdYFd45cYZPs8vOOIYKfk= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c h1:2zRrJWIt/f9c9HhNHAgrRgq0San5gRRUJTBXLkchal0= +github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= +github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= +github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= +github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q= +github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= +github.com/container-orchestrated-devices/container-device-interface v0.5.4/go.mod h1:DjE95rfPiiSmG7uVXtg0z6MnPm/Lx4wxKCIts0ZE0vg= +github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= +github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= +github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= +github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/btrfs/v2 v2.0.0/go.mod h1:swkD/7j9HApWpzl8OHfrHNxppPd9l44DFZdF94BUj9k= +github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= +github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= +github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= +github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= +github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= +github.com/containerd/cgroups/v3 v3.0.1/go.mod h1:/vtwk1VXrtoa5AaZLkypuOJgA/6DyPMZHJPGQNtlHnw= +github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= +github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= +github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= +github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= +github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= +github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= +github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= +github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= +github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= +github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= +github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s= +github.com/containerd/containerd v1.6.1/go.mod h1:1nJz5xCZPusx6jJU8Frfct988y0NpumIq9ODB0kLtoE= +github.com/containerd/containerd v1.6.6/go.mod h1:ZoP1geJldzCVY3Tonoz7b1IXk8rIX0Nltt5QE4OMNk0= +github.com/containerd/containerd v1.6.8/go.mod h1:By6p5KqPK0/7/CgO/A6t/Gz+CUYUu2zf1hUaaymVXB0= +github.com/containerd/containerd v1.6.9/go.mod h1:XVicUvkxOrftE2Q1YWUXgZwkkAxwQYNOFzYWvfVfEfQ= +github.com/containerd/containerd v1.7.0 h1:G/ZQr3gMZs6ZT0qPUZ15znx5QSdQdASW11nXTLTM2Pg= +github.com/containerd/containerd v1.7.0/go.mod h1:QfR7Efgb/6X2BDpTPJRvPTYDE9rsF0FsXX9J8sIs/sc= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= +github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= +github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= +github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= +github.com/containerd/continuity v0.2.2/go.mod h1:pWygW9u7LtS1o4N/Tn0FoCFDIXZ7rxcMX7HX1Dmibvk= +github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= +github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= +github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o= +github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= +github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= +github.com/containerd/go-cni v1.1.0/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= +github.com/containerd/go-cni v1.1.3/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= +github.com/containerd/go-cni v1.1.6/go.mod h1:BWtoWl5ghVymxu6MBjg79W9NZrCRyHIdUtk4cauMe34= +github.com/containerd/go-cni v1.1.9/go.mod h1:XYrZJ1d5W6E2VOvjffL3IZq0Dz6bsVlERHbekNK90PM= +github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= +github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= +github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= +github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= +github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= +github.com/containerd/imgcrypt v1.1.3/go.mod h1:/TPA1GIDXMzbj01yd8pIbQiLdQxed5ue1wb8bP7PQu4= +github.com/containerd/imgcrypt v1.1.4/go.mod h1:LorQnPtzL/T0IyCeftcsMEO7AqxUDbdO8j/tSUpgxvo= +github.com/containerd/imgcrypt v1.1.7/go.mod h1:FD8gqIcX5aTotCtOmjeCsi3A1dHmTZpnMISGKSczt4k= +github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= +github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/nri v0.3.0/go.mod h1:Zw9q2lP16sdg0zYybemZ9yTDy8g7fPCIB3KXOGlggXI= +github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= +github.com/containerd/stargz-snapshotter/estargz v0.12.1/go.mod h1:12VUuCq3qPq4y8yUW+l5w3+oXV3cx2Po3KSe/SmPGqw= +github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= +github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= +github.com/containerd/ttrpc v1.1.1-0.20220420014843-944ef4a40df3/go.mod h1:YYyNVhZrTMiaf51Vj6WhAJqJw+vl/nzABhj8pWrzle4= +github.com/containerd/ttrpc v1.2.1/go.mod h1:sIT6l32Ph/H9cvnJsfXM5drIVzTr5A2flTf1G5tYZak= +github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= +github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= +github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= +github.com/containerd/typeurl/v2 v2.1.0/go.mod h1:IDp2JFvbwZ31H8dQbEIY7sDl2L3o3HZj1hsSQlywkQ0= +github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= +github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= +github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v1.0.1/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtrBI6QcRV0NiNt15Y= +github.com/containernetworking/cni v1.1.1/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= +github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= +github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= +github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= +github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNBDZcxSOplJT5ico8/FLE= +github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19sZPp3ry5uHSkI4LPxV8= +github.com/containernetworking/plugins v1.2.0/go.mod h1:/VjX4uHecW5vVimFa1wkG4s+r/s9qIfPdqlLF4TW8c4= +github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= +github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= +github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/containers/ocicrypt v1.1.3/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g= +github.com/containers/ocicrypt v1.1.6/go.mod h1:WgjxPWdTJMqYMjf3M6cuIFFA1/MpyyhIM99YInA+Rvc= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-oidc v2.2.1+incompatible h1:mh48q/BqXqgjVHpy2ZY7WnWAbenxRjsz9N1i1YxjHAk= +github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-oidc/v3 v3.5.0 h1:VxKtbccHZxs8juq7RdJntSqtXFtde9YpNpGn0yqgEHw= +github.com/coreos/go-oidc/v3 v3.5.0/go.mod h1:ecXRtV4romGPeO6ieExAsUK9cb/3fp9hXNz1tlv8PIM= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= +github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/couchbase/gocb/v2 v2.6.3 h1:5RsMo+RRfK0mVxHLAfpBz3/tHlgXZb1WBNItLk9Ab+c= +github.com/couchbase/gocb/v2 v2.6.3/go.mod h1:yF5F6BHTZ/ZowhEuZbySbXrlI4rHd1TIhm5azOaMbJU= +github.com/couchbase/gocbcore/v10 v10.2.3 h1:PEkRSNSkKjUBXx82Ucr094+anoiCG5GleOOQZOHo6D4= +github.com/couchbase/gocbcore/v10 v10.2.3/go.mod h1:lYQIIk+tzoMcwtwU5GzPbDdqEkwkH3isI2rkSpfL0oM= +github.com/couchbaselabs/gocaves/client v0.0.0-20230307083111-cc3960c624b1/go.mod h1:AVekAZwIY2stsJOMWLAS/0uA/+qdp7pjO8EHnl61QkY= +github.com/couchbaselabs/gocaves/client v0.0.0-20230404095311-05e3ba4f0259 h1:2TXy68EGEzIMHOx9UvczR5ApVecwCfQZ0LjkmwMI6g4= +github.com/couchbaselabs/gocaves/client v0.0.0-20230404095311-05e3ba4f0259/go.mod h1:AVekAZwIY2stsJOMWLAS/0uA/+qdp7pjO8EHnl61QkY= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI= +github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= +github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= +github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= +github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= +github.com/danieljoos/wincred v1.1.0/go.mod h1:XYlo+eRTsVA9aHGp7NGjFkPla4m+DCL7hqDjlFjiygg= +github.com/danieljoos/wincred v1.1.2 h1:QLdCxFs1/Yl4zduvBdcHB8goaYk9RARS2SgLLRuAyr0= +github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.0-20210816181553-5444fa50b93d/go.mod h1:tmAIfUFEirG/Y8jhZ9M+h36obRZAk/1fcSpXwAVlfqE= +github.com/denisenkom/go-mssqldb v0.12.2 h1:1OcPn5GBIobjWNd+8yjfHNIaFX14B1pWI3F9HZy5KXw= +github.com/denisenkom/go-mssqldb v0.12.2/go.mod h1:lnIw1mZukFRZDJYQ0Pb833QS2IaC3l5HkEfra2LJ+sk= +github.com/denverdino/aliyungo v0.0.0-20170926055100-d3308649c661/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= +github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba h1:p6poVbjHDkKa+wtC8frBMwQtT3BmqGYBjzMwJ63tuR4= +github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= +github.com/dgraph-io/badger/v3 v3.2103.2/go.mod h1:RHo4/GmYcKKh5Lxu63wLEMHJ70Pac2JqZRYGhlyAo2M= +github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= +github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc h1:8WFBn63wegobsYAX0YjD+8suexZDga5CctH4CCTx2+8= +github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/digitalocean/godo v1.7.5 h1:JOQbAO6QT1GGjor0doT0mXefX2FgUDPOpYh2RaXA+ko= +github.com/digitalocean/godo v1.7.5/go.mod h1:h6faOIcZ8lWIwNQ+DN7b3CgX4Kwby5T+nbpNqkUIozU= +github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= +github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= +github.com/distribution/distribution/v3 v3.0.0-20220526142353-ffbd94cbe269/go.mod h1:28YO/VJk9/64+sTGNuYaBjWxrXTPrj0C0XmgTIOjxX4= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko= +github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= +github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= +github.com/dnephin/pflag v1.0.7 h1:oxONGlWxhmUct0YzKTgrpQv9AUA1wtPBn7zuSjJqptk= +github.com/dnephin/pflag v1.0.7/go.mod h1:uxE91IoWURlOiTUIA8Mq5ZZkAv3dPUfZNaT80Zm7OQE= +github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v20.10.17+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v20.10.20+incompatible h1:lWQbHSHUFs7KraSN2jOJK7zbMS2jNCHI4mt4xUFUVQ4= +github.com/docker/cli v20.10.20+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= +github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.20+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v23.0.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v24.0.5+incompatible h1:WmgcE4fxyI6EEXxBRxsHnZXrO1pQ3smi0k/jho4HLeY= +github.com/docker/docker v24.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c= +github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 h1:iFaUwBSo5Svw6L7HYpRu/0lE3e0BaElwnNO1qkNQxBY= +github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5/go.mod h1:qssHWj60/X5sZFNxpG4HBPDHVqxNm4DfnCKgrbZOT+s= +github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= +github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74 h1:2MIhn2R6oXQbgW5yHfS+d6YqyMfXiu2L55rFZC4UD/M= +github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74/go.mod h1:UqXY1lYT/ERa4OEAywUqdok1T4RCRdArkhic1Opuavo= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/dvsekhvalnov/jose2go v1.5.0 h1:3j8ya4Z4kMCwT5nXIKFSV84YS+HdqSSO0VsTQxaLAeM= +github.com/dvsekhvalnov/jose2go v1.5.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ= +github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= +github.com/envoyproxy/go-control-plane v0.11.1 h1:wSUXTlLfiAQRWs2F+p+EKOY9rUyis1MyGqJ2DIk5HpM= +github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= +github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= +github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= +github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= +github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= +github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/favadi/protoc-go-inject-tag v1.4.0 h1:K3KXxbgRw5WT4f43LbglARGz/8jVsDOS7uMjG4oNvXY= +github.com/favadi/protoc-go-inject-tag v1.4.0/go.mod h1:AZ+PK+QDKUOLlBRG0rYiKkUX5Hw7+7GTFzlU99GFSbQ= +github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/form3tech-oss/jwt-go v3.2.5+incompatible h1:/l4kBbb4/vGSsdtB5nUe8L7B9mImVMaBPw9L/0TBHU8= +github.com/form3tech-oss/jwt-go v3.2.5+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/foxcpp/go-mockdns v0.0.0-20210729171921-fb145fc6f897/go.mod h1:lgRN6+KxQBawyIghpnl5CezHFGS9VLzvtVlwxvzXTQ4= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= +github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= +github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= +github.com/fxamacker/cbor/v2 v2.4.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= +github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= +github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= +github.com/gammazero/deque v0.2.1 h1:qSdsbG6pgp6nL7A0+K/B7s12mcCY/5l5SIUpMOl+dC0= +github.com/gammazero/deque v0.2.1/go.mod h1:LFroj8x4cMYCukHJDbxFCkT+r9AndaJnFMuZDV34tuU= +github.com/gammazero/workerpool v1.1.3 h1:WixN4xzukFoN0XSeXF6puqEqFTl2mECI9S6W44HWy9Q= +github.com/gammazero/workerpool v1.1.3/go.mod h1:wPjyBLDbyKnUn2XwwyD3EEwo9dHutia9/fwNmSHWACc= +github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= +github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= +github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.6.3 h1:ahKqKTFpO5KTPHxWZjEdPScmYaGtLo8Y4DMHoEsnp14= +github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= +github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-asn1-ber/asn1-ber v1.4.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-asn1-ber/asn1-ber v1.5.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-asn1-ber/asn1-ber v1.5.4 h1:vXT6d/FNDiELJnLb6hGNa309LMsrCoYFvpwHDF0+Y1A= +github.com/go-asn1-ber/asn1-ber v1.5.4/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= +github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= +github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-ini/ini v1.66.6/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= +github.com/go-jose/go-jose/v3 v3.0.1 h1:pWmKFVtt+Jl0vBZTIpz/eAKwsm6LkIxDVVbFHKkchhA= +github.com/go-jose/go-jose/v3 v3.0.1/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= +github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= +github.com/go-ldap/ldap/v3 v3.1.7/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q= +github.com/go-ldap/ldap/v3 v3.4.1/go.mod h1:iYS1MdmrmceOJ1QOTnRXrIs7i3kloqtmGQjRvjKpyMg= +github.com/go-ldap/ldap/v3 v3.4.4 h1:qPjipEpt+qDa6SI/h1fzuGWoRUY+qqQ9sOZq67/PYUs= +github.com/go-ldap/ldap/v3 v3.4.4/go.mod h1:fe1MsuN5eJJ1FeLT/LEBVdWfNWKh459R7aXgXtJC+aI= +github.com/go-ldap/ldif v0.0.0-20200320164324-fd88d9b715b3 h1:sfz1YppV05y4sYaW7kXZtrocU/+vimnIWt4cxAYh7+o= +github.com/go-ldap/ldif v0.0.0-20200320164324-fd88d9b715b3/go.mod h1:ZXFhGda43Z2TVbfGZefXyMJzsDHhCh0go3bZUcwTx7o= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= +github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab h1:xveKWz2iaueeTaUgdetzel+U7exyigDYBryyVfV/rZk= +github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= +github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.4/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= +github.com/go-openapi/analysis v0.19.10/go.mod h1:qmhS3VNFxBlquFJ0RGoDtylO9y4pgTAUNE9AEEMdlJQ= +github.com/go-openapi/analysis v0.19.16/go.mod h1:GLInF007N83Ad3m8a/CbQ5TPzdnGT7workfHwuVjNVk= +github.com/go-openapi/analysis v0.20.0 h1:UN09o0kNhleunxW7LR+KnltD0YrJ8FF03pSqvAN3Vro= +github.com/go-openapi/analysis v0.20.0/go.mod h1:BMchjvaHDykmRMsK40iPtvyOfFdMMxlOmQr9FBZk+Og= +github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/errors v0.19.3/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/errors v0.19.6/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.7/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.1 h1:j23mMDtRxMwIobkpId7sWh7Ddcx4ivaoqUbfXx5P+a8= +github.com/go-openapi/errors v0.20.1/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= +github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8= +github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= +github.com/go-openapi/loads v0.19.3/go.mod h1:YVfqhUCdahYwR3f3iiwQLhicVRvLlU/WO5WPaZvcvSI= +github.com/go-openapi/loads v0.19.5/go.mod h1:dswLCAdonkRufe/gSUC3gN8nTSaB9uaS2es0x5/IbjY= +github.com/go-openapi/loads v0.19.6/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc= +github.com/go-openapi/loads v0.19.7/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc= +github.com/go-openapi/loads v0.20.0/go.mod h1:2LhKquiE513rN5xC6Aan6lYOSddlL8Mp20AW9kpviM4= +github.com/go-openapi/loads v0.20.2 h1:z5p5Xf5wujMxS1y8aP+vxwW5qYT2zdJBbXKmQUG3lcc= +github.com/go-openapi/loads v0.20.2/go.mod h1:hTVUotJ+UonAMMZsvakEgmWKgtulweO9vYP2bQYKA/o= +github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= +github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= +github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= +github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2gUt9nTQPQZuoo= +github.com/go-openapi/runtime v0.19.16/go.mod h1:5P9104EJgYcizotuXhEuUrzVc+j1RiSjahULvYmlv98= +github.com/go-openapi/runtime v0.19.24 h1:TqagMVlRAOTwllE/7hNKx6rQ10O6T8ZzeJdMjSTKaD4= +github.com/go-openapi/runtime v0.19.24/go.mod h1:Lm9YGCeecBnUUkFTxPC4s1+lwrkJ0pthx8YvyjCfkgk= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/spec v0.19.15/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= +github.com/go-openapi/spec v0.20.0/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= +github.com/go-openapi/spec v0.20.1/go.mod h1:93x7oh+d+FQsmsieroS4cmR3u0p/ywH649a3qwC9OsQ= +github.com/go-openapi/spec v0.20.3 h1:uH9RQ6vdyPSs2pSy9fL8QPspDF2AMIMPtmK5coSSjtQ= +github.com/go-openapi/spec v0.20.3/go.mod h1:gG4F8wdEDN+YPBMVnzE85Rbhf+Th2DTvA9nFPQ5AYEg= +github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= +github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= +github.com/go-openapi/strfmt v0.19.11/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= +github.com/go-openapi/strfmt v0.20.0 h1:l2omNtmNbMc39IGptl9BuXBEKcZfS8zjrTsPKTiJiDM= +github.com/go-openapi/strfmt v0.20.0/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= +github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= +github.com/go-openapi/swag v0.19.12/go.mod h1:eFdyEBkTdoAf/9RXBvj4cr1nH7GD8Kzo5HTt47gr72M= +github.com/go-openapi/swag v0.19.13/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= +github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= +github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= +github.com/go-openapi/validate v0.19.10/go.mod h1:RKEZTUWDkxKQxN2jDT7ZnZi2bhZlbNMAuKvKB+IaGx8= +github.com/go-openapi/validate v0.19.12/go.mod h1:Rzou8hA/CBw8donlS6WNEUQupNvUZ0waH08tGe6kAQ4= +github.com/go-openapi/validate v0.19.15/go.mod h1:tbn/fdOwYHgrhPBzidZfJC2MIVvs9GA7monOmWBbeCI= +github.com/go-openapi/validate v0.20.1/go.mod h1:b60iJT+xNNLfaQJUqLI7946tYiFEOuE9E4k54HpKcJ0= +github.com/go-openapi/validate v0.20.2 h1:AhqDegYV3J3iQkMPJSXkvzymHKMTw0BST3RK3hTT4ts= +github.com/go-openapi/validate v0.20.2/go.mod h1:e7OJoKNgd0twXZwIn0A43tHbvIcr/rZIVCbJBpTUoY0= +github.com/go-ozzo/ozzo-validation v3.6.0+incompatible h1:msy24VGS42fKO9K1vLz82/GeYW1cILu7Nuuj1N3BBkE= +github.com/go-ozzo/ozzo-validation v3.6.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU= +github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/validator/v10 v10.2.0 h1:KgJ0snyC2R9VXYN2rneOtQcw5aHQB1Vv0sFl1UcHBOY= +github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-test/deep v1.0.4/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= +github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= +github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= +github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= +github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= +github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= +github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= +github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= +github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= +github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= +github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= +github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= +github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= +github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= +github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= +github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= +github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= +github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= +github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= +github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8= +github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo= +github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/goccy/go-json v0.10.0 h1:mXKd9Qw4NuzShiRlOXKews24ufknHO7gx30lsDyokKA= +github.com/goccy/go-json v0.10.0/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/gocql/gocql v1.0.0 h1:UnbTERpP72VZ/viKE1Q1gPtmLvyTZTvuAstvSRydw/c= +github.com/gocql/gocql v1.0.0/go.mod h1:3gM2c4D3AnkISwBxGnMMsS8Oy4y2lhbPRsH4xnJrHG8= +github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.3.0+incompatible h1:CaSVZxm5B+7o45rtab4jC2G37WGYX1zQfuU2i6DSvnc= +github.com/gofrs/uuid v4.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= +github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= +github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A= +github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 h1:DIPQnGy2Gv2FSA4B/hh8Q7xx3B7AIDk3DAMeHclH1vQ= +github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6/go.mod h1:0AKcRCkMoKvUvlf89F6O7H2LYdhr1zBh736mBItOdRs= +github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/cel-go v0.12.6/go.mod h1:Jk7ljRzLBhkmiAwBoUxB1sZSCVBAzkqPF25olK/iRDw= +github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v23.1.21+incompatible h1:bUqzx/MXCDxuS0hRJL2EfjyZL3uQrPbMocUa8zGqsTA= +github.com/google/flatbuffers v23.1.21+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= +github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= +github.com/google/go-containerregistry v0.13.0/go.mod h1:J9FQ+eSS4a1aC2GNZxvNpbWhgp0487v+cgiilB4FqDo= +github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-metrics-stackdriver v0.2.0 h1:rbs2sxHAPn2OtUj9JdR/Gij1YKGl0BTVD0augB+HEjE= +github.com/google/go-metrics-stackdriver v0.2.0/go.mod h1:KLcPyp3dWJAFD+yHisGlJSZktIsTjb50eB72U2YZ9K0= +github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= +github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= +github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/tink/go v1.7.0 h1:6Eox8zONGebBFcCBqkVmt60LaWZa6xg1cl/DwAh/J1w= +github.com/google/tink/go v1.7.0/go.mod h1:GAUOd+QE3pgj9q8VKIGTCP33c/B7eb4NhxLcgTJZStM= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4= +github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= +github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/gophercloud/gophercloud v0.1.0 h1:P/nh25+rzXouhytV2pUHBb65fnds26Ghl8/391+sT5o= +github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= +github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI= +github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= +github.com/hashicorp/cap v0.3.0 h1:zFzVxuWy78lO6QRLHu/ONkjx/Jh0lpfvPgmpDGri43E= +github.com/hashicorp/cap v0.3.0/go.mod h1:dHTmyMIVbzT981XxRoci5G//dfWmd/HhuNiCH6J5+IA= +github.com/hashicorp/consul-template v0.33.0 h1:UNyf7V/nFeh8edh5X6pP8f+9LZVn+DG9uNLLcTpLsFc= +github.com/hashicorp/consul-template v0.33.0/go.mod h1:3RayddSLvOGQwdifbbe4doVwamgJU4QvxTtf5DNeclw= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/api v1.23.0 h1:L6e4v1AfoumqAHq/Rrsmuulev+nd7vltM3k8H329tyI= +github.com/hashicorp/consul/api v1.23.0/go.mod h1:SfvUIT74b0EplDuNgAJQ/FVqSO6KyK2ia80UI39/Ye8= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.14.0 h1:Hly+BMNMssVzoWddbBnBFi3W+Fzytvm0haSkihhj3GU= +github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c= +github.com/hashicorp/cronexpr v1.1.1/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/eventlogger v0.2.1 h1:sjAOKO62BDDBn10516Uo7QDf5KEqzhU0LkUnbBptVUU= +github.com/hashicorp/eventlogger v0.2.1/go.mod h1://CHt6/j+Q2lc0NlUB5af4aS2M0c0aVBg9/JfcpAyhM= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-discover v0.0.0-20210818145131-c573d69da192 h1:eje2KOX8Sf7aYPiAsLnpWdAIrGRMcpFjN/Go/Exb7Zo= +github.com/hashicorp/go-discover v0.0.0-20210818145131-c573d69da192/go.mod h1:3/4dzY4lR1Hzt9bBqMhBzG7lngZ0GKx/nL6G/ad62wE= +github.com/hashicorp/go-gatedio v0.5.0 h1:Jm1X5yP4yCqqWj5L1TgW7iZwCVPGtVc+mro5r/XX7Tg= +github.com/hashicorp/go-gcp-common v0.8.0 h1:/2vGAbCU1v+BZ3YHXTCzTvxqma9WOJHYtADTfhZixLo= +github.com/hashicorp/go-gcp-common v0.8.0/go.mod h1:Q7zYRy9ue9SuaEN2s9YLIQs4SoKHdoRmKRcImY3SLgs= +github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.4.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= +github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0 h1:pSjQfW3vPtrOTcasTUKgCTQT7OGPPTTMVRrOfU6FJD8= +github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0/go.mod h1:xvb32K2keAc+R8DSFG2IwDcydK9DBQE+fGA5fsw6hSk= +github.com/hashicorp/go-kms-wrapping/v2 v2.0.8/go.mod h1:qTCjxGig/kjuj3hk1z8pOUrzbse/GxB1tGfbrq8tGJg= +github.com/hashicorp/go-kms-wrapping/v2 v2.0.10 h1:A51EguZ576URdtcQ0l8mT/tOD948oAtmP1soqIHIFfI= +github.com/hashicorp/go-kms-wrapping/v2 v2.0.10/go.mod h1:NtMaPhqSlfQ72XWDD2g80o8HI8RKkowIB8/WZHMyPY4= +github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2 v2.0.7-1 h1:ZV26VJYcITBom0QqYSUOIj4HOHCVPEFjLqjxyXV/AbA= +github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2 v2.0.7-1/go.mod h1:b99cDSA+OzcyRoBZroSf174/ss/e6gUuS45wue9ZQfc= +github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms/v2 v2.0.1 h1:ydUCtmr8f9F+mHZ1iCsvzqFTXqNVpewX3s9zcYipMKI= +github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms/v2 v2.0.1/go.mod h1:Sl/ffzV57UAyjtSg1h5Km0rN5+dtzZJm1CUztkoCW2c= +github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.7 h1:E3eEWpkofgPNrYyYznfS1+drq4/jFcqHQVNcL7WhUCo= +github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.7/go.mod h1:j5vefRoguQUG7iM4reS/hKIZssU1lZRqNPM5Wow6UnM= +github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 v2.0.8-0.20230905162003-bfa3347a7c85 h1:yZqD2ZQ4kWyVI2reKGC8Hl78ywWBtl1iLz/Bb5GBvMA= +github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 v2.0.8-0.20230905162003-bfa3347a7c85/go.mod h1:0mKsr+G70TGABNbdS5dGiZTVoXe9qM/mhEIQL3lOQRc= +github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.8 h1:16I8OqBEuxZIowwn3jiLvhlx+z+ia4dJc9stvz0yUBU= +github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.8/go.mod h1:6QUMo5BrXAtbzSuZilqmx0A4px2u6PeFK7vfp2WIzeM= +github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/v2 v2.0.7 h1:KeG3QGrbxbr2qAqCJdf3NR4ijAYwdcWLTmwSbR0yusM= +github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/v2 v2.0.7/go.mod h1:rXxYzjjGw4HltEwxPp9zYSRIo6R+rBf1MSPk01bvodc= +github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2 v2.0.7 h1:G25tZFw/LrAzJWxvS0/BFI7V1xAP/UsAIsgBwiE0mwo= +github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2 v2.0.7/go.mod h1:hxNA5oTfAvwPacWVg1axtF/lvTafwlAa6a6K4uzWHhw= +github.com/hashicorp/go-memdb v1.3.4 h1:XSL3NR682X/cVk2IeV0d70N4DZ9ljI885xAEU8IoK3c= +github.com/hashicorp/go-memdb v1.3.4/go.mod h1:uBTr1oQbtuMgd1SSGoR8YV27eT3sBHbYiNm53bMpgSg= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack v1.1.5 h1:9byZdVjKTe5mce63pRVNP1L7UAmdHOTEMGehn6KvJWs= +github.com/hashicorp/go-msgpack v1.1.5/go.mod h1:gWVc3sv/wbDmR3rQsj1CAktEZzoz1YNK9NfGLXJ69/4= +github.com/hashicorp/go-msgpack/v2 v2.0.0 h1:c1fiLq1LNghmLOry1ipGhvLDi+/zEoaEP2JrE1oFJ9s= +github.com/hashicorp/go-msgpack/v2 v2.0.0/go.mod h1:JIxYkkFJRDDRSoWQBSh7s9QAVThq+82iWmUpmE4jKak= +github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= +github.com/hashicorp/go-plugin v1.4.8/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s= +github.com/hashicorp/go-plugin v1.4.9 h1:ESiK220/qE0aGxWdzKIvRH69iLiuN/PjoLTm69RoWtU= +github.com/hashicorp/go-plugin v1.4.9/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s= +github.com/hashicorp/go-raftchunking v0.6.3-0.20191002164813-7e9e8525653a h1:FmnBDwGwlTgugDGbVxwV8UavqSMACbGrUpfc98yFLR4= +github.com/hashicorp/go-raftchunking v0.6.3-0.20191002164813-7e9e8525653a/go.mod h1:xbXnmKqX9/+RhPkJ4zrEx4738HacP72aaUPlT2RZ4sU= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-retryablehttp v0.7.1/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-retryablehttp v0.7.2 h1:AcYqCvkpalPnPF2pn0KamgwamS42TqUDDYFRKq/RAd0= +github.com/hashicorp/go-retryablehttp v0.7.2/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/awsutil v0.2.3 h1:AAQ6Vmo/ncfrZYtbpjhO+g0Qt+iNpYtl3UWT1NLmbYY= +github.com/hashicorp/go-secure-stdlib/awsutil v0.2.3/go.mod h1:oKHSQs4ivIfZ3fbXGQOop1XuDfdSb8RIsWTGaAanSfg= +github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= +github.com/hashicorp/go-secure-stdlib/base62 v0.1.2 h1:ET4pqyjiGmY09R5y+rSd70J2w45CtbWDNvGqWp/R3Ng= +github.com/hashicorp/go-secure-stdlib/base62 v0.1.2/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= +github.com/hashicorp/go-secure-stdlib/fileutil v0.1.0 h1:f2mwVgMJjXuX/+eWD6ZW30+oIRgCofL+XMWknFkB1WM= +github.com/hashicorp/go-secure-stdlib/fileutil v0.1.0/go.mod h1:uwcr2oga9pN5+OkHZyTN5MDk3+1YHOuMukhpnPaQAoI= +github.com/hashicorp/go-secure-stdlib/gatedwriter v0.1.1 h1:9um9R8i0+HbRHS9d64kdvWR0/LJvo12sIonvR9zr1+U= +github.com/hashicorp/go-secure-stdlib/gatedwriter v0.1.1/go.mod h1:6RoRTSMDK2H/rKh3P/JIsk1tK8aatKTt3JyvIopi3GQ= +github.com/hashicorp/go-secure-stdlib/kv-builder v0.1.2 h1:NS6BHieb/pDfx3M9jDdaPpGyyVp+aD4A3DjX3dgRmzs= +github.com/hashicorp/go-secure-stdlib/kv-builder v0.1.2/go.mod h1:rf5JPE13wi+NwjgsmGkbg4b2CgHq8v7Htn/F0nDe/hg= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.2/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.3 h1:kH3Rhiht36xhAfhuHyWJDgdXXEx9IIZhDGRk24CDhzg= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.3/go.mod h1:ov1Q0oEDjC3+A4BwsG2YdKltrmEw8sf9Pau4V9JQ4Vo= +github.com/hashicorp/go-secure-stdlib/nonceutil v0.1.0 h1:iJG9Q3iUme12yH+wzBMGYrw/Am4CfX3sDcA8m5OGfhQ= +github.com/hashicorp/go-secure-stdlib/nonceutil v0.1.0/go.mod h1:s28ohJ0kU6tersf0it/WsBCyZSdziPlP+G1FRA3ar28= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 h1:UpiO20jno/eV1eVZcxqWnUohyKRe1g8FPV/xH1s/2qs= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/password v0.1.1 h1:6JzmBqXprakgFEHwBgdchsjaA9x3GyjdI568bXKxa60= +github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo= +github.com/hashicorp/go-secure-stdlib/reloadutil v0.1.1 h1:SMGUnbpAcat8rIKHkBPjfv81yC46a8eCNZ2hsR2l1EI= +github.com/hashicorp/go-secure-stdlib/reloadutil v0.1.1/go.mod h1:Ch/bf00Qnx77MZd49JRgHYqHQjtEmTgGU2faufpVZb0= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= +github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.2 h1:phcbL8urUzF/kxA/Oj6awENaRwfWsjP59GW7u2qlDyY= +github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.2/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs= +github.com/hashicorp/go-slug v0.11.1 h1:c6lLdQnlhUWbS5I7hw8SvfymoFuy6EmiFDedy6ir994= +github.com/hashicorp/go-slug v0.11.1/go.mod h1:Ib+IWBYfEfJGI1ZyXMGNbu2BU+aa3Dzu41RKLH301v4= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-tfe v1.25.1 h1:OxjDhY8Rj36n/uTSmhdFRLcnhXFfRTsopiovYSkJjak= +github.com/hashicorp/go-tfe v1.25.1/go.mod h1:1Y6nsdMuJ14lYdc1VMLl/erlthvMzUsJn+WYWaAdSc4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= +github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= +github.com/hashicorp/hcl/v2 v2.16.2 h1:mpkHZh/Tv+xet3sy3F9Ld4FyI2tUpWe9x3XtPx9f1a0= +github.com/hashicorp/hcl/v2 v2.16.2/go.mod h1:JRmR89jycNkrrqnMmvPDMd56n1rQJ2Q6KocSLCMCXng= +github.com/hashicorp/hcp-link v0.1.0 h1:F6F1cpADc+o5EBI5CbJn5RX4qdFSLpuA4fN69eeE5lQ= +github.com/hashicorp/hcp-link v0.1.0/go.mod h1:BWVDuJDHrKJtWc5qI07bX5xlLjSgWq6kYLQUeG1g5dM= +github.com/hashicorp/hcp-scada-provider v0.2.1 h1:yr+Uxini7SWTZ2t49d3Xi+6+X/rbsSFx8gq6WVcC91c= +github.com/hashicorp/hcp-scada-provider v0.2.1/go.mod h1:Q0WpS2RyhBKOPD4X/8oW7AJe7jA2HXB09EwDzwRTao0= +github.com/hashicorp/hcp-sdk-go v0.23.0 h1:3WarkQSK0VzxJaH6psHIGQagag3ujL+NjWagZZHpiZM= +github.com/hashicorp/hcp-sdk-go v0.23.0/go.mod h1:/9UoDY2FYYA8lFaKBb2HmM/jKYZGANmf65q9QRc/cVw= +github.com/hashicorp/jsonapi v0.0.0-20210826224640-ee7dae0fb22d h1:9ARUJJ1VVynB176G1HCwleORqCaXm/Vx0uUi0dL26I0= +github.com/hashicorp/jsonapi v0.0.0-20210826224640-ee7dae0fb22d/go.mod h1:Yog5+CPEM3c99L1CL2CFCYoSzgWm5vTU58idbRUaLik= +github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= +github.com/hashicorp/mdns v1.0.4 h1:sY0CMhFmjIPDMlTB+HfymFHCaYLhgifZ0QhjaYKD/UQ= +github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= +github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= +github.com/hashicorp/net-rpc-msgpackrpc/v2 v2.0.0 h1:kBpVVl1sl3MaSrs97e0+pDQhSrqJv9gVbSUrPpVfl1w= +github.com/hashicorp/net-rpc-msgpackrpc/v2 v2.0.0/go.mod h1:6pdNz0vo0mF0GvhwDG56O3N18qBrAz/XRIcfINfTbwo= +github.com/hashicorp/nomad/api v0.0.0-20230519153805-2275a83cbfdf h1:cKXVf1UJqwdkGiTF3idqCOLApAql0310OSmJxeiaMWg= +github.com/hashicorp/nomad/api v0.0.0-20230519153805-2275a83cbfdf/go.mod h1:rb38DqjaaIfhJRiLeCAGgIt+wV7o78rB+liyFE3mVzE= +github.com/hashicorp/raft v1.0.1/go.mod h1:DVSAWItjLjTOkVbSpWQ0j0kUADIvDaCtBxIcbNAQLkI= +github.com/hashicorp/raft v1.1.0/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= +github.com/hashicorp/raft v1.1.2-0.20191002163536-9c6bd3e3eb17/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= +github.com/hashicorp/raft v1.2.0/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= +github.com/hashicorp/raft v1.3.10 h1:LR5QZX1VQd0DFWZfeCwWawyeKfpS/Tm1yjnJIY5X4Tw= +github.com/hashicorp/raft v1.3.10/go.mod h1:J8naEwc6XaaCfts7+28whSeRvCqTd6e20BlCU3LtEO4= +github.com/hashicorp/raft-autopilot v0.2.0 h1:2/R2RPgamgRKgNWGQioULZvjeKXQZmDuw5Ty+6c+H7Y= +github.com/hashicorp/raft-autopilot v0.2.0/go.mod h1:q6tZ8UAZ5xio2gv2JvjgmtOlh80M6ic8xQYBe2Egkg8= +github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= +github.com/hashicorp/raft-boltdb/v2 v2.0.0-20210421194847-a7e34179d62c h1:oiKun9QlrOz5yQxMZJ3tf1kWtFYuKSJzxzEDxDPevj4= +github.com/hashicorp/raft-boltdb/v2 v2.0.0-20210421194847-a7e34179d62c/go.mod h1:kiPs9g148eLShc2TYagUAyKDnD+dH9U+CQKsXzlY9xo= +github.com/hashicorp/raft-snapshot v1.0.4 h1:EuDuayAJPdiDmVk1ygTDnG2zDzrs0/6/yBuma1IYSow= +github.com/hashicorp/raft-snapshot v1.0.4/go.mod h1:5sL9eUn72lH5DzsFIJ9jaysITbHksSSszImWSOTC8Ic= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= +github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= +github.com/hashicorp/vault-plugin-auth-alicloud v0.15.0 h1:R2SVwOeVLG5DXzUx42UWhjfFqS0Z9+ncfebPu+gO9VA= +github.com/hashicorp/vault-plugin-auth-alicloud v0.15.0/go.mod h1:YQXpa2s4rGYKm3Oa/Nkgh5SuGVfHFNEIUwDDYWyhloE= +github.com/hashicorp/vault-plugin-auth-azure v0.16.0 h1:jyq9l/lrnfeg3KPhzyFwzYGdD828QVkAOKOu/TJfpUQ= +github.com/hashicorp/vault-plugin-auth-azure v0.16.0/go.mod h1:7UROiYI5ICJBnmR4M6Qa95M94d/xVBI2eSlkh4E8D0U= +github.com/hashicorp/vault-plugin-auth-centrify v0.15.1 h1:6StAr5tltpySNgyUwWC8czm9ZqkO7NIZfcRmxxtFwQ8= +github.com/hashicorp/vault-plugin-auth-centrify v0.15.1/go.mod h1:xXs4I5yLxbQ5VHcpvSxkRhShCTXd8Zyrni8qnFrfQ4Y= +github.com/hashicorp/vault-plugin-auth-cf v0.15.0 h1:zIVGlYXCRBY/ElucWdFC9xF27d2QMGMQPm9wSezGREI= +github.com/hashicorp/vault-plugin-auth-cf v0.15.0/go.mod h1:FEIjQkYmzno4MfU36MAjFUG9/JUWeMPxvBG5DRTMYVM= +github.com/hashicorp/vault-plugin-auth-gcp v0.16.0 h1:DA/ZDLCrUsbHS/7Xqkkw7l2SgbQE9rWEHLLWYTGu8rw= +github.com/hashicorp/vault-plugin-auth-gcp v0.16.0/go.mod h1:R0z/qdyxn0uq6hkKgux8KwenjV/n/CCaEz+qOF9GdPg= +github.com/hashicorp/vault-plugin-auth-jwt v0.16.0 h1:BUk03WDSGZuB+kEq3HTOQ7ecEH2Z1Idit42jfB5EnpE= +github.com/hashicorp/vault-plugin-auth-jwt v0.16.0/go.mod h1:Ve3r228afZOShwNvp+MGEKtm+ROskv10GG7bMXZb5OA= +github.com/hashicorp/vault-plugin-auth-kerberos v0.10.0 h1:YH2x9kIV0jKXk22tVkpydhmPeEgprC7IOfN8l0pjF6c= +github.com/hashicorp/vault-plugin-auth-kerberos v0.10.0/go.mod h1:I6ulXug4oxx77DFYjqI1kVl+72TgXEo3Oju4tTOVfU4= +github.com/hashicorp/vault-plugin-auth-kubernetes v0.16.0 h1:vuXNJvtMyoqQ01Sfwf2TNcJNkGcxP1vD3C7gpvuVkCU= +github.com/hashicorp/vault-plugin-auth-kubernetes v0.16.0/go.mod h1:onx9W/rDwENQkN+1yEnJvS51PVkkGAPOBXasne7lnnk= +github.com/hashicorp/vault-plugin-auth-oci v0.14.0 h1:B7uyigqgUAO3gebvi8mMmsq7l4QAG0bLEP6rAKyDVuw= +github.com/hashicorp/vault-plugin-auth-oci v0.14.0/go.mod h1:SYdTtQhzMxqOCbdC0E0UOrkc4eGXXcJmXXbe1MHVPtE= +github.com/hashicorp/vault-plugin-database-couchbase v0.9.2 h1:UWPWUADWUE08a3qeZixd/diIcNIm0NTqdPNTNbUljuQ= +github.com/hashicorp/vault-plugin-database-couchbase v0.9.2/go.mod h1:BvyZMbDEhvT4chbb7lgnL8xsVy9rF+hbDWuJ/eKkgpI= +github.com/hashicorp/vault-plugin-database-elasticsearch v0.13.2 h1:N81xJfdVjAo49dUu5Wo95C0fv5scpbYL9z4ykWeHxJg= +github.com/hashicorp/vault-plugin-database-elasticsearch v0.13.2/go.mod h1:P4cUbvtXgvfWZassvJzyXC4nIGRUO72ds9rE5WpQnuw= +github.com/hashicorp/vault-plugin-database-mongodbatlas v0.10.0 h1:fgsiuSq3AeFcYnbPkXOLSkKDrS2blaS/6MAmHEIAH28= +github.com/hashicorp/vault-plugin-database-mongodbatlas v0.10.0/go.mod h1:jH0OvjQ3Otg0HoOR5NugTqC3JA1KJ+J5OL0NdAzgSb4= +github.com/hashicorp/vault-plugin-database-redis v0.2.1 h1:E+UeZcpNtQO8nMfVebwE5ZS2sJpNjzbKwYJX1y8FFNk= +github.com/hashicorp/vault-plugin-database-redis v0.2.1/go.mod h1:T0i639Xnh2DY5ij8ofS83ZauBh8N0drKzqXYDrH87tM= +github.com/hashicorp/vault-plugin-database-redis-elasticache v0.2.1 h1:D8mdwkB6CyC37wkpdW9mgJNNrqral956bFoVj3AoQoE= +github.com/hashicorp/vault-plugin-database-redis-elasticache v0.2.1/go.mod h1:1RdJ0uxD8Mquzx9DBfoFKkmHgeZrPTN5nZHGyDrVCuY= +github.com/hashicorp/vault-plugin-database-snowflake v0.9.0 h1:kYr7DXkxuznPstyvt0HQ6HfMBv7M3agGjDcgiOJuxm4= +github.com/hashicorp/vault-plugin-database-snowflake v0.9.0/go.mod h1:szELJH+NFTZPB4KbmIkcBD3E+BfVVdew7YCOFW7u2LY= +github.com/hashicorp/vault-plugin-mock v0.16.1 h1:5QQvSUHxDjEEbrd2REOeacqyJnCLPD51IQzy71hx8P0= +github.com/hashicorp/vault-plugin-mock v0.16.1/go.mod h1:83G4JKlOwUtxVourn5euQfze3ZWyXcUiLj2wqrKSDIM= +github.com/hashicorp/vault-plugin-secrets-ad v0.16.0 h1:6RCpd2PbBvmi5xmxXhggE0Xv+/Gag896/NNZeMKH+8A= +github.com/hashicorp/vault-plugin-secrets-ad v0.16.0/go.mod h1:6IeXly3xi+dVodzFSx6aVZjdhd3syboPyhxr1/WMcyo= +github.com/hashicorp/vault-plugin-secrets-alicloud v0.15.0 h1:uVpcx2s3PwYXSOHmjA/Ai6+V0c3wgvSApELZez8b9mI= +github.com/hashicorp/vault-plugin-secrets-alicloud v0.15.0/go.mod h1:wMTkhPGxDa2PCdSBqd6A8SMcRrltu3NRbwX8m8W1MCU= +github.com/hashicorp/vault-plugin-secrets-azure v0.16.1 h1:eMU5qYPa5dQQALPP7B+UPB0QCSHzB6LKrqbNCcRr7Ts= +github.com/hashicorp/vault-plugin-secrets-azure v0.16.1/go.mod h1:tNzshPyCxkuOL4PLF3cybN/XaSlWgvfl6dwEbCARybY= +github.com/hashicorp/vault-plugin-secrets-gcp v0.16.0 h1:5ozLtt38Bw/DLt37dbccT8j56A+2T7CWFfYecKleGl4= +github.com/hashicorp/vault-plugin-secrets-gcp v0.16.0/go.mod h1:Ax9/ALmpzyjU8mcqHVYR9lwjcyazdmimrShDYeK9CHc= +github.com/hashicorp/vault-plugin-secrets-gcpkms v0.15.0 h1:CueteKXEuO52qGu1nUaDc/euSTSfQD9MONkXuvWdZQw= +github.com/hashicorp/vault-plugin-secrets-gcpkms v0.15.0/go.mod h1:a0Z2DVGd2SPPwLb8edXeHyer3CXei/Y0cb7EFkiFMfA= +github.com/hashicorp/vault-plugin-secrets-kubernetes v0.5.0 h1:g0W1ybHjO945jDtuDEFcqTINyW/s06wxZarE/7aLumc= +github.com/hashicorp/vault-plugin-secrets-kubernetes v0.5.0/go.mod h1:2wobeIypBESGQYmhv12vuAorCvfETHpBoMyrb+6QTmQ= +github.com/hashicorp/vault-plugin-secrets-kv v0.15.0 h1:S2d1t4m4ilDNJRdMUzNUimvyu/+ll8huq5QncVgYz+s= +github.com/hashicorp/vault-plugin-secrets-kv v0.15.0/go.mod h1:xu/eiT+BB2b2Gh/AZFJ1xCS8E7S29gOQcuh9VMxros8= +github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.10.2 h1:5eFlzhFXoSe+ntm26wromhtLbPjTCdXcdwpMv7wFeHk= +github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.10.2/go.mod h1:OdXvez+GH0XBSRS7gxbS8B1rLUPb8bGk+bDVyEaAzI8= +github.com/hashicorp/vault-plugin-secrets-openldap v0.11.1 h1:8TI1l3Dt1pdkPPDG/1SyoKbWB/PBc1kHJ/nSD+2jTR4= +github.com/hashicorp/vault-plugin-secrets-openldap v0.11.1/go.mod h1:aeTnHAsh580Kml5O+kyn9g0ywE5f7EQXkXFeraMF08A= +github.com/hashicorp/vault-plugin-secrets-terraform v0.7.1 h1:Icb3EDpNvb4ltnGff2Zrm3JVNDDdbbL2wdA2LouD2KQ= +github.com/hashicorp/vault-plugin-secrets-terraform v0.7.1/go.mod h1:JHHo1nWOgYPsbTqE/PVwkTKRkLSlPSqo9RBqZ7NLKB8= +github.com/hashicorp/vault-testing-stepwise v0.1.3 h1:GYvm98EB4nUKUntkBcLicnKsebeV89KPHmAGJUCPU/c= +github.com/hashicorp/vault-testing-stepwise v0.1.3/go.mod h1:Ym1T/kMM2sT6qgCIIJ3an7uaSWCJ8O7ohsWB9UiB5tI= +github.com/hashicorp/vault/vault/hcp_link/proto v0.0.0-20230201201504-b741fa893d77 h1:Y/+BtwxmRak3Us9jrByARvYW6uNeqZlEpMylIdXVIjY= +github.com/hashicorp/vault/vault/hcp_link/proto v0.0.0-20230201201504-b741fa893d77/go.mod h1:a2crHoMWwY6aiL8GWT8hYj7vKD64uX0EdRPbnsHF5wU= +github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443 h1:O/pT5C1Q3mVXMyuqg7yuAWUg/jMZR1/0QTzTRdNR6Uw= +github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443/go.mod h1:bEpDU35nTu0ey1EXjwNwPjI9xErAsoOCmcMb9GKvyxo= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= +github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= +github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= +github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= +github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= +github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/influxdata/influxdb v1.7.6/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= +github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab h1:HqW4xhhynfjrtEiiSGcQUd6vrK23iMam1FO8rI7mwig= +github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ= +github.com/intel/goresctrl v0.3.0/go.mod h1:fdz3mD85cmP9sHD8JUlrNWAxvwM86CrbmVXltEKd7zk= +github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= +github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw= +github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= +github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc= +github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= +github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= +github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= +github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= +github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= +github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= +github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgconn v1.14.0 h1:vrbA9Ud87g6JdFWkHTJXppVce58qPIdP7N8y0Ml/A7Q= +github.com/jackc/pgconn v1.14.0/go.mod h1:9mBNlny0UvkgJdCDvdVHYSjI+8tD2rnKK69Wz8ti++E= +github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= +github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.3.2 h1:7eY55bdBeCz1F2fTzSz69QC+pG46jYq9/jtSPiJ5nn0= +github.com/jackc/pgproto3/v2 v2.3.2/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= +github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= +github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= +github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= +github.com/jackc/pgtype v1.14.0 h1:y+xUdabmyMkJLyApYuPj38mW+aAIqCe5uuBB51rH3Vw= +github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgx v3.3.0+incompatible h1:Wa90/+qsITBAPkAZjiByeIGHFcj3Ztu+VzrrIpHjL90= +github.com/jackc/pgx v3.3.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= +github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= +github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= +github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= +github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= +github.com/jackc/pgx/v4 v4.18.1 h1:YP7G1KABtKpB5IHrO9vYwSrCOhs7p3uqhvhhQBptya0= +github.com/jackc/pgx/v4 v4.18.1/go.mod h1:FydWkUyadDmdNH/mHnGob881GawxeEm7TcMCzkb+qQE= +github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jarcoal/httpmock v0.0.0-20180424175123-9c70cfe4a1da/go.mod h1:ks+b9deReOc7jgqp+e7LuFiCBH6Rm5hL32cLcEAArb4= +github.com/jarcoal/httpmock v1.0.7 h1:d1a2VFpSdm5gtjhCPWsQHSnx8+5V3ms5431YwvmkuNk= +github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= +github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= +github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= +github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= +github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg= +github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= +github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= +github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= +github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8= +github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= +github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= +github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= +github.com/jeffchao/backoff v0.0.0-20140404060208-9d7fd7aa17f2 h1:mex1izRBCD+7WjieGgRdy7e651vD/lvB1bD9vNE/3K4= +github.com/jeffchao/backoff v0.0.0-20140404060208-9d7fd7aa17f2/go.mod h1:xkfESuHriIekR+4RoV+fu91j/CfnYM29Zi2tMFw5iD4= +github.com/jefferai/isbadcipher v0.0.0-20190226160619-51d2077c035f h1:E87tDTVS5W65euzixn7clSzK66puSt1H4I5SC0EmHH4= +github.com/jefferai/isbadcipher v0.0.0-20190226160619-51d2077c035f/go.mod h1:3J2qVK16Lq8V+wfiL2lPeDZ7UWMxk5LemerHa1p6N00= +github.com/jefferai/jsonx v1.0.0 h1:Xoz0ZbmkpBvED5W9W1B5B/zc3Oiq7oXqiW7iRV3B6EI= +github.com/jefferai/jsonx v1.0.0/go.mod h1:OGmqmi2tTeI/PS+qQfBDToLHHJIy/RMp24fPo8vFvoQ= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= +github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/joyent/triton-go v0.0.0-20180628001255-830d2b111e62/go.mod h1:U+RSyWxWd04xTqnuOQxnai7XGS2PrPY2cfGoDKtMHjA= +github.com/joyent/triton-go v1.7.1-0.20200416154420-6801d15b779f h1:ENpDacvnr8faw5ugQmEF1QYk+f/Y9lXFvuYmRxykago= +github.com/joyent/triton-go v1.7.1-0.20200416154420-6801d15b779f/go.mod h1:KDSfL7qe5ZfQqvlDMkVjCztbmcpp/c8M77vhQP8ZPvk= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= +github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= +github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/asmfmt v1.3.2 h1:4Ri7ox3EwapiOjCki+hw14RyKk201CN4rzyCJRFLpK4= +github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= +github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= +github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4= +github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI= +github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.2.3 h1:sxCkb+qR91z4vsqw4vGGZlDgPz3G7gjaLyK3V8y70BU= +github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= +github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/lestrrat-go/backoff/v2 v2.0.8/go.mod h1:rHP/q/r9aT27n24JQLa7JhSQZCKBBOiM/uP402WwN8Y= +github.com/lestrrat-go/blackmagic v1.0.0/go.mod h1:TNgH//0vYSs8VXDCfkZLgIrVTTXQELZffUV0tz3MtdQ= +github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E= +github.com/lestrrat-go/iter v1.0.1/go.mod h1:zIdgO1mRKhn8l9vrZJZz9TUMMFbQbLeTsbqPDrJ/OJc= +github.com/lestrrat-go/jwx v1.2.25/go.mod h1:zoNuZymNl5lgdcu6P7K6ie2QRll5HVfF4xwxBBK1NxY= +github.com/lestrrat-go/option v1.0.0/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= +github.com/lib/pq v0.0.0-20180327071824-d34b9ff171c2/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/linode/linodego v0.7.1 h1:4WZmMpSA2NRwlPZcc0+4Gyn7rr99Evk9bnr0B3gXRKE= +github.com/linode/linodego v0.7.1/go.mod h1:ga11n3ivecUrPCHN0rANxKmfWBJVkOXfLMZinAbj2sY= +github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= +github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11 h1:YFh+sjyJTMQSYjKwM4dFKhJPJC/wfo98tPUc17HdoYw= +github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11/go.mod h1:Ah2dBMoxZEqk118as2T4u4fjfXarE0pPnMJaArZQZsI= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI= +github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= +github.com/mediocregopher/radix/v4 v4.1.2 h1:Pj7XnNK5WuzzFy63g98pnccainAePK+aZNQRvxSvj2I= +github.com/mediocregopher/radix/v4 v4.1.2/go.mod h1:ajchozX/6ELmydxWeWM6xCFHVpZ4+67LXHOTOVR0nCE= +github.com/mholt/archiver/v3 v3.5.1 h1:rDjOBX9JSF5BvoJGvjqK479aL70qh9DIpZCl+k7Clwo= +github.com/mholt/archiver/v3 v3.5.1/go.mod h1:e3dqJ7H78uzsRSEACH1joayhuSyhnonssnDhppzS1L4= +github.com/michaelklishin/rabbit-hole/v2 v2.12.0 h1:946p6jOYFcVJdtBBX8MwXvuBkpPjwm1Nm2Qg8oX+uFk= +github.com/michaelklishin/rabbit-hole/v2 v2.12.0/go.mod h1:AN/3zyz7d++OHf+4WUo/LR0+Q5nlPHMaXasIsG/mPY0= +github.com/microsoft/kiota-abstractions-go v1.1.0 h1:X1aKlsYCRs/0RSChr/fbq4j/+kxRzbSY5GeWhtHQNYI= +github.com/microsoft/kiota-abstractions-go v1.1.0/go.mod h1:RkxyZ5x87Njik7iVeQY9M2wtrrL1MJZcXiI/BxD/82g= +github.com/microsoft/kiota-authentication-azure-go v1.0.0 h1:29FNZZ/4nnCOwFcGWlB/sxPvWz487HA2bXH8jR5k2Rk= +github.com/microsoft/kiota-authentication-azure-go v1.0.0/go.mod h1:rnx3PRlkGdXDcA/0lZQTbBwyYGmc+3POt7HpE/e4jGw= +github.com/microsoft/kiota-http-go v1.0.0 h1:F1hd6gMlLeEgH2CkRB7z13ow7LxMKMWEmms/t0VfS+k= +github.com/microsoft/kiota-http-go v1.0.0/go.mod h1:eujxJliqodotsYepIc6ihhK+vXMMt5Q8YiSNL7+7M7U= +github.com/microsoft/kiota-serialization-form-go v1.0.0 h1:UNdrkMnLFqUCccQZerKjblsyVgifS11b3WCx+eFEsAI= +github.com/microsoft/kiota-serialization-form-go v1.0.0/go.mod h1:h4mQOO6KVTNciMF6azi1J9QB19ujSw3ULKcSNyXXOMA= +github.com/microsoft/kiota-serialization-json-go v1.0.4 h1:5TaISWwd2Me8clrK7SqNATo0tv9seOq59y4I5953egQ= +github.com/microsoft/kiota-serialization-json-go v1.0.4/go.mod h1:rM4+FsAY+9AEpBsBzkFFis+b/LZLlNKKewuLwK9Q6Mg= +github.com/microsoft/kiota-serialization-text-go v1.0.0 h1:XOaRhAXy+g8ZVpcq7x7a0jlETWnWrEum0RhmbYrTFnA= +github.com/microsoft/kiota-serialization-text-go v1.0.0/go.mod h1:sM1/C6ecnQ7IquQOGUrUldaO5wj+9+v7G2W3sQ3fy6M= +github.com/microsoftgraph/msgraph-sdk-go v1.13.0 h1:k+3FJJYCSBcnIueefLrO4Ofsd4UP4NzOQ9k3La8I2oU= +github.com/microsoftgraph/msgraph-sdk-go v1.13.0/go.mod h1:ccLv84FJFtwdSzYWM/HlTes5FLzkzzBsYh9kg93/WS8= +github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0 h1:7NWTfyXvOjoizW7PmxNp3+8wCKPgpODs/D1cUZ3fkAY= +github.com/microsoftgraph/msgraph-sdk-go-core v1.0.0/go.mod h1:tQb4q3YMIj2dWhhXhQSJ4ELpol931ANKzHSYK5kX1qE= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.25/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg= +github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= +github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/mikesmitty/edkey v0.0.0-20170222072505-3356ea4e686a h1:eU8j/ClY2Ty3qdHnn0TyW3ivFoPC/0F1gQZz8yTxbbE= +github.com/mikesmitty/edkey v0.0.0-20170222072505-3356ea4e686a/go.mod h1:v8eSC2SMp9/7FTKUncp7fH9IwPfw+ysMObcEz5FWheQ= +github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs= +github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= +github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 h1:+n/aFZefKZp7spd8DFdX7uMikMLXX4oubIzJF4kv/wI= +github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= +github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= +github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/cli v1.1.5 h1:OxRIeJXpAMztws/XHlN2vu6imG5Dpq+j61AzAX5fLng= +github.com/mitchellh/cli v1.1.5/go.mod h1:v8+iFts2sPIKUV1ltktPXMCC8fumSKFItNcD2cLtRR4= +github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ= +github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= +github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= +github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/hashstructure v1.1.0 h1:P6P1hdjqAAknpY/M1CGipelZgp+4y9ja9kmUZPXP+H0= +github.com/mitchellh/hashstructure v1.1.0/go.mod h1:xUDAozZz0Wmdiufv0uyhnHkUTN6/6d8ulp4AwfLKrmA= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= +github.com/mitchellh/pointerstructure v1.2.1 h1:ZhBBeX8tSlRpu/FFhXH4RC4OJzFlqsQhoHZAz4x7TIw= +github.com/mitchellh/pointerstructure v1.2.1/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mndrix/tap-go v0.0.0-20171203230836-629fa407e90b/go.mod h1:pzzDgJWZ34fGzaAZGFW22KVZDfyrYW+QABMrWnJBnSs= +github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= +github.com/moby/patternmatcher v0.5.0 h1:YCZgJOeULcxLw1Q+sVR636pmS7sPEn1Qo2iAN6M7DBo= +github.com/moby/patternmatcher v0.5.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= +github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= +github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= +github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= +github.com/moby/sys/signal v0.6.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= +github.com/moby/sys/signal v0.7.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= +github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= +github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= +github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= +github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA= +github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= +github.com/mongodb-forks/digest v1.0.5 h1:EJu3wtLZcA0HCvsZpX5yuD193/sW9tHiNvrEM5apXMk= +github.com/mongodb-forks/digest v1.0.5/go.mod h1:rb+EX8zotClD5Dj4NdgxnJXG9nwrlx3NWKJ8xttz1Dg= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/montanaflynn/stats v0.6.6/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= +github.com/montanaflynn/stats v0.7.0 h1:r3y12KyNxj/Sb/iOE46ws+3mS1+MZca1wlHQFPsY/JU= +github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= +github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= +github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/natefinch/atomic v0.0.0-20150920032501-a62ce929ffcc h1:7xGrl4tTpBQu5Zjll08WupHyq+Sp0Z/adtyf1cfk3Q8= +github.com/natefinch/atomic v0.0.0-20150920032501-a62ce929ffcc/go.mod h1:1rLVY/DWf3U6vSZgH16S7pymfrhK2lcUlXjgGglw/lY= +github.com/ncw/swift v1.0.47 h1:4DQRPj35Y41WogBxyhOXlrI37nzGlyEcsforeudyYPQ= +github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/networkplumbing/go-nft v0.2.0/go.mod h1:HnnM+tYvlGAsMU7yoYwXEVLLiDW9gdMmb5HoGcwpuQs= +github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2 h1:BQ1HW7hr4IVovMwWg0E0PYcyW8CzqDcVmaew9cujU4s= +github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2/go.mod h1:TLb2Sg7HQcgGdloNxkrmtgDNR9uVYF3lfdFIN4Ro6Sk= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nwaples/rardecode v1.1.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= +github.com/nwaples/rardecode v1.1.2 h1:Cj0yZY6T1Zx1R7AhTbyGSALm44/Mmq+BAPc4B/p/d3M= +github.com/nwaples/rardecode v1.1.2/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/okta/okta-sdk-golang/v2 v2.12.1 h1:U+smE7trkHSZO8Mval3Ow85dbxawO+pMAr692VZq9gM= +github.com/okta/okta-sdk-golang/v2 v2.12.1/go.mod h1:KRoAArk1H216oiRnQT77UN6JAhBOnOWkK27yA1SM7FQ= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.0-20180130162743-b8a9be070da4/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= +github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk= +github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0= +github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= +github.com/onsi/ginkgo/v2 v2.5.0/go.mod h1:Luc4sArBICYCS8THh8v3i3i5CuSZO+RaQRaJoeNwomw= +github.com/onsi/ginkgo/v2 v2.6.1/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo= +github.com/onsi/ginkgo/v2 v2.9.1 h1:zie5Ly042PD3bsCvsSOPvRnFwyo3rKe64TJlD6nu0mk= +github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= +github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= +github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= +github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc= +github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM= +github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= +github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= +github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM= +github.com/onsi/gomega v1.24.2/go.mod h1:gs3J10IS7Z7r7eXRoNJIrNqU4ToQukCJhFtKrWgHWnk= +github.com/onsi/gomega v1.27.4 h1:Z2AnStgsdSayCMDiCU42qIz+HLqEPcgiOCXjAU/w+8E= +github.com/open-policy-agent/opa v0.42.2/go.mod h1:MrmoTi/BsKWT58kXlVayBb+rYVeaMwuBm3nYAN3923s= +github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= +github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b h1:YWuSjZCQAPM8UUBLkYUk1e+rZcvWHJmFb6i6rM44Xs8= +github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= +github.com/opencontainers/runc v1.1.2/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= +github.com/opencontainers/runc v1.1.4/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= +github.com/opencontainers/runc v1.1.5/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= +github.com/opencontainers/runc v1.1.6 h1:XbhB8IfG/EsnhNvZtNdLB0GBw92GYEFvKlhaJk9jUgA= +github.com/opencontainers/runc v1.1.6/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50= +github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20220825212826-86290f6a00fb/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.1.0-rc.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626/go.mod h1:BRHJJd0E+cx42OybVYSgUvZmU0B8P9gZuRXlZUP7TKI= +github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= +github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= +github.com/opencontainers/selinux v1.9.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= +github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= +github.com/opencontainers/selinux v1.10.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= +github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b h1:FfH+VrHHk6Lxt9HdVS0PXzSXFyS2NbZKXv33FYPol0A= +github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b/go.mod h1:AC62GU6hc0BrNm+9RK9VSiwa/EUe1bkIeFORAMcHvJU= +github.com/oracle/oci-go-sdk v24.3.0+incompatible h1:x4mcfb4agelf1O4/1/auGlZ1lr97jXRSSN5MxTgG/zU= +github.com/oracle/oci-go-sdk v24.3.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888= +github.com/oracle/oci-go-sdk/v60 v60.0.0 h1:EJAWjEi4SY5Raha6iUzq4LTQ0uM5YFw/wat/L1ehIEM= +github.com/oracle/oci-go-sdk/v60 v60.0.0/go.mod h1:krz+2gkSzlSL/L4PvP0Z9pZpag9HYLNtsMd1PmxlA2w= +github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= +github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= +github.com/ory/dockertest/v3 v3.10.0 h1:4K3z2VMe8Woe++invjaTB7VRyQXQy5UY+loujO4aNE4= +github.com/ory/dockertest/v3 v3.10.0/go.mod h1:nr57ZbRWMqfsdGdFNLHz5jjNdDb7VVFnzAeW1n5N1Lg= +github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2 h1:CXwSGu/LYmbjEab5aMCs5usQRVBGThelUKBNnoSOuso= +github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2/go.mod h1:L3UMQOThbttwfYRNFOWLLVXMhk5Lkio4GGOtw5UrxS0= +github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c h1:vwpFWvAO8DeIZfFeqASzZfsxuWPno9ncAebBEP0N3uE= +github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c/go.mod h1:otzZQXgoO96RTzDB/Hycg0qZcXZsWJGJRSXbmEIJ+4M= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= +github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= +github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= +github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/peterh/liner v0.0.0-20170211195444-bf27d3ba8e1d/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= +github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= +github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= +github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= +github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= +github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= +github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4/v4 v4.1.2/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc= +github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pires/go-proxyproto v0.6.1 h1:EBupykFmo22SDjv4fQVQd2J9NOoLPmyZA/15ldOGkPw= +github.com/pires/go-proxyproto v0.6.1/go.mod h1:Odh9VFOZJCf9G8cLW5o435Xf1J95Jw9Gw5rnCjcwzAY= +github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= +github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/pquerna/cachecontrol v0.1.0 h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8cTqKc= +github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= +github.com/pquerna/otp v1.2.1-0.20191009055518-468c2dd2b58d h1:PinQItctnaL2LtkaSM678+ZLLy5TajwOeXzWvYC7tII= +github.com/pquerna/otp v1.2.1-0.20191009055518-468c2dd2b58d/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= +github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= +github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= +github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= +github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= +github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= +github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rboyer/safeio v0.2.1 h1:05xhhdRNAdS3apYm7JRjOqngf4xruaW959jmRxGDuSU= +github.com/rboyer/safeio v0.2.1/go.mod h1:Cq/cEPK+YXFn622lsQ0K4KsPZSPtaptHHEldsy7Fmig= +github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03 h1:Wdi9nwnhFNAlseAOekn6B5G/+GMtks9UKbvRU/CMM/o= +github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03/go.mod h1:gRAiPF5C5Nd0eyyRdqIu9qTiFSoZzpTq727b5B8fkkU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.4.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= +github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/columnize v2.1.0+incompatible h1:j1Wcmh8OrK4Q7GXY+V7SVSY8nUWQxHW5TkBe7YUl+2s= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/safchain/ethtool v0.2.0/go.mod h1:WkKB1DnNtvsMlDmQ50sgwowDJV/hGbJSOvJoEXs1AJQ= +github.com/sasha-s/go-deadlock v0.2.0 h1:lMqc+fUb7RrFS3gQLtoQsJ7/6TV/pAIFvBsqX73DK8Y= +github.com/sasha-s/go-deadlock v0.2.0/go.mod h1:StQn567HiB1fF2yJ44N9au7wOhrPS3iZqiDbRupzT10= +github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= +github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= +github.com/sean-/conswriter v0.0.0-20180208195008-f5ae3917a627/go.mod h1:7zjs06qF79/FKAJpBvFx3P8Ww4UTIMAe+lpNXDHziac= +github.com/sean-/pager v0.0.0-20180208200047-666be9bf53b5/go.mod h1:BeybITEsBEg6qbIiqJ6/Bqeq25bCLbL7YFmpaFfJDuM= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= +github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/sethvargo/go-limiter v0.7.1 h1:wWNhTj0pxjyJ7wuJHpRJpYwJn+bUnjYfw2a85eu5w9U= +github.com/sethvargo/go-limiter v0.7.1/go.mod h1:C0kbSFbiriE5k2FFOe18M1YZbAR2Fiwf72uGu0CXCcU= +github.com/shirou/gopsutil/v3 v3.22.6 h1:FnHOFOh+cYAM0C30P+zysPISzlknLC5Z1G4EAElznfQ= +github.com/shirou/gopsutil/v3 v3.22.6/go.mod h1:EdIubSnZhbAvBS1yJ7Xi+AShB/hxwLHOMz4MCYz7yMs= +github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= +github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= +github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/snowflakedb/gosnowflake v1.6.24 h1:NiBh1WSstNtr12qywmdFMS1XHaYdF5iWWGnjIQb1cEY= +github.com/snowflakedb/gosnowflake v1.6.24/go.mod h1:KfO4F7bk+aXPUIvBqYxvPhxLlu2/w4TtSC8Rw/yr5Mg= +github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d h1:bVQRCxQvfjNUeRqaY/uT0tFuvuFY0ulgnczuR684Xic= +github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d/go.mod h1:Cw4GTlQccdRGSEf6KiMju767x0NEHE0YIVPJSaXjlsw= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/sony/gobreaker v0.4.2-0.20210216022020-dd874f9dd33b h1:br+bPNZsJWKicw/5rALEo67QHs5weyD5tf8WST+4sJ0= +github.com/sony/gobreaker v0.4.2-0.20210216022020-dd874f9dd33b/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= +github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= +github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= +github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= +github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM= +github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/streadway/amqp v1.0.0 h1:kuuDrUJFZL1QYL9hUNuCxNObNzB0bV/ZG5jV3RWAQgo= +github.com/streadway/amqp v1.0.0/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.3.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= +github.com/tencentcloud/tencentcloud-sdk-go v1.0.162 h1:8fDzz4GuVg4skjY2B0nMN7h6uN61EDVkuLyI2+qGHhI= +github.com/tencentcloud/tencentcloud-sdk-go v1.0.162/go.mod h1:asUz5BPXxgoPGaRgZaVm1iGcUAuHyYUo1nXqKa83cvI= +github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tilinna/clock v1.0.2/go.mod h1:ZsP7BcY7sEEz7ktc0IVy8Us6boDrK8VradlKRUGfOao= +github.com/tilinna/clock v1.1.0 h1:6IQQQCo6KoBxVudv6gwtY8o4eDfhHo8ojA5dP0MfhSs= +github.com/tilinna/clock v1.1.0/go.mod h1:ZsP7BcY7sEEz7ktc0IVy8Us6boDrK8VradlKRUGfOao= +github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw= +github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= +github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o= +github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ= +github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= +github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= +github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= +github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.9/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8= +github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.19.1/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8= +github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI= +github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= +github.com/vektah/gqlparser/v2 v2.4.5/go.mod h1:flJWIR04IMQPGz+BXLrORkrARBxv/rtyIAFvd/MceW0= +github.com/veraison/go-cose v1.0.0-rc.1/go.mod h1:7ziE85vSq4ScFTg6wyoMXjucIGOf4JkFEZi/an96Ct4= +github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vmware/govmomi v0.18.0 h1:f7QxSmP7meCtoAmiKZogvVbLInT+CZx6Px6K5rYsJZo= +github.com/vmware/govmomi v0.18.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= +github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.1.1 h1:VOMT+81stJgXW3CpHyqHN3AXDYIMsx56mEFrB37Mb/E= +github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= +github.com/xdg-go/stringprep v1.0.3 h1:kdwGpVNwPFtjs98xCGkHjQtGKh86rDcRZN17QEMCOIs= +github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= +github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo= +github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yashtewari/glob-intersection v0.1.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok= +github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4= +github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yuin/gopher-lua v0.0.0-20200816102855-ee81675732da/go.mod h1:E1AXubJBdNmFERAOucpDIxNzeGfLzg0mYh+UfMWdChA= +github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 h1:k/gmLsJDWwWqbLCur2yWnJzwQEKRcAHXo6seXGuSwWw= +github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9/go.mod h1:E1AXubJBdNmFERAOucpDIxNzeGfLzg0mYh+UfMWdChA= +github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= +github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +github.com/zclconf/go-cty v1.12.1 h1:PcupnljUm9EIvbgSHQnHhUr3fO6oFmkOrvs2BAFNXXY= +github.com/zclconf/go-cty v1.12.1/go.mod h1:s9IfD1LK5ccNMSWCVFCE2rJfHiZgi7JijgeWIMfhLvA= +github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ= +github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= +github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= +go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/api/v3 v3.5.5/go.mod h1:KFtNaxGDw4Yx/BA4iPPwevUTAuqcsPxzyX8PHydchN8= +go.etcd.io/etcd/api/v3 v3.5.7 h1:sbcmosSVesNrWOJ58ZQFitHMdncusIifYcrBfwrlJSY= +go.etcd.io/etcd/api/v3 v3.5.7/go.mod h1:9qew1gCdDDLu+VwmeG+iFpL+QlpHTo7iubavdVDgCAA= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/pkg/v3 v3.5.5/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ= +go.etcd.io/etcd/client/pkg/v3 v3.5.7 h1:y3kf5Gbp4e4q7egZdn5T7W9TSHUvkClN6u+Rq9mEOmg= +go.etcd.io/etcd/client/pkg/v3 v3.5.7/go.mod h1:o0Abi1MK86iad3YrWhgUsbGx1pmTS+hrORWc2CamuhY= +go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.etcd.io/etcd/client/v2 v2.305.5 h1:DktRP60//JJpnPC0VBymAN/7V71GHMdjDCBt4ZPXDjI= +go.etcd.io/etcd/client/v2 v2.305.5/go.mod h1:zQjKllfqfBVyVStbt4FaosoX2iYd8fV/GRy/PbowgP4= +go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= +go.etcd.io/etcd/client/v3 v3.5.5/go.mod h1:aApjR4WGlSumpnJ2kloS75h6aHUmAyaPLjHMxpc7E7c= +go.etcd.io/etcd/client/v3 v3.5.7 h1:u/OhpiuCgYY8awOHlhIhmGIGpxfBU/GZBUP3m/3/Iz4= +go.etcd.io/etcd/client/v3 v3.5.7/go.mod h1:sOWmj9DZUMyAngS7QQwCyAXXAL6WhgTOPLNS/NabQgw= +go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= +go.etcd.io/etcd/pkg/v3 v3.5.5/go.mod h1:6ksYFxttiUGzC2uxyqiyOEvhAiD0tuIqSZkX3TyPdaE= +go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= +go.etcd.io/etcd/raft/v3 v3.5.5/go.mod h1:76TA48q03g1y1VpTue92jZLr9lIHKUNcYdZOOGyx8rI= +go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= +go.etcd.io/etcd/server/v3 v3.5.5/go.mod h1:rZ95vDw/jrvsbj9XpTqPrTAB9/kzchVdhRirySPkUBc= +go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= +go.mongodb.org/atlas v0.33.0 h1:qJhkEuJufh7sVDVHorTF/D7G7naQ1EJAzqf1aV29JWs= +go.mongodb.org/atlas v0.33.0/go.mod h1:L4BKwVx/OeEhOVjCSdgo90KJm4469iv7ZLzQms/EPTg= +go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= +go.mongodb.org/mongo-driver v1.3.4/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= +go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= +go.mongodb.org/mongo-driver v1.4.4/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= +go.mongodb.org/mongo-driver v1.4.6/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= +go.mongodb.org/mongo-driver v1.11.6 h1:XM7G6PjiGAO5betLF13BIa5TlLUUE3uJ/2Ox3Lz1K+o= +go.mongodb.org/mongo-driver v1.11.6/go.mod h1:G9TgswdsWjX4tmDA5zfs2+6AEPpYJwqblyjsfuh8oXY= +go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.25.0/go.mod h1:E5NNboN0UqSAki0Atn9kVwaN7I+l25gGxDqBueo/74E= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0/go.mod h1:vEhqr0m4eTc+DWxfsXoXue2GBgV2uUwVznkGIHW/e5w= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.40.0/go.mod h1:UMklln0+MRhZC4e3PwmN3pCtq4DyIadWw4yikh6bNrw= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.32.0/go.mod h1:5eCOqeGphOyz6TsY3ZDNjE33SM/TFAK3RGuCL2naTgY= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.0/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= +go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= +go.opentelemetry.io/otel v1.0.1/go.mod h1:OPEOD4jIT2SlZPMmwT6FqZz2C0ZNdQqiWcoK6M0SNFU= +go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs= +go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk= +go.opentelemetry.io/otel v1.8.0/go.mod h1:2pkj+iMj0o03Y+cW6/m8Y4WkRdYN3AvCXCnzRMp9yvM= +go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= +go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU= +go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= +go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= +go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.7.0/go.mod h1:M1hVZHNxcbkAlcvrOMlpQ4YOO3Awf+4N2dxkZL3xm04= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0/go.mod h1:UFG7EBMRdXyFstOwH028U0sVf+AvukSGhF0g8+dmNG8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.1/go.mod h1:Kv8liBeVNFkkkbilbgWRpV+wWuu+H5xdOT6HAgd30iw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0/go.mod h1:hO1KLR7jcKaDDKDkvI9dP/FIhpmna5lkqPUQdEjFAM8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.7.0/go.mod h1:ceUgdyfNv4h4gLxHR0WNfDiiVmZFodZhZSbOLhpxqXE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0/go.mod h1:Krqnjl22jUJ0HgMzw5eveuCvFDXY4nSYb4F8t5gdrag= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0/go.mod h1:HrbCVv40OOLTABmOn1ZWty6CHXkU8DK/Urc43tHug70= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.1/go.mod h1:xOvWoTOrQjxjW61xtOmD/WKGRYb/P4NzRo3bs65U6Rk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1:keUU7UfnwWTWpJ+FWnyqmogPa82nuU5VUANFq49hlMY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.7.0/go.mod h1:E+/KKhwOSw8yoPxSSuUHG6vKppkvhN+S1Jc7Nib3k3o= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0/go.mod h1:OfUCyyIiDvNXHWpcWgbF+MWvqPZiNa3YDEnivcnYsV0= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0/go.mod h1:5w41DY6S9gZrbjuq6Y+753e96WfPha5IcsOSZTtullM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.14.0/go.mod h1:+N7zNjIJv4K+DeX67XXET0P+eIciESgaFDBqh+ZJFS4= +go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= +go.opentelemetry.io/otel/metric v0.30.0/go.mod h1:/ShZ7+TS4dHzDFmfi1kSXMhMVubNoP0oIaBp70J6UXU= +go.opentelemetry.io/otel/metric v0.31.0/go.mod h1:ohmwj9KTSIeBnDBm/ZwH2PSZxZzoOaG2xZeekTRzL5A= +go.opentelemetry.io/otel/metric v0.37.0/go.mod h1:DmdaHfGt54iV6UKxsV9slj2bBRJcKC1B1uvDLIioc1s= +go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= +go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4= +go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= +go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= +go.opentelemetry.io/otel/sdk v1.0.1/go.mod h1:HrdXne+BiwsOHYYkBE5ysIcv2bvdZstxzmCQhxTcZkI= +go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs= +go.opentelemetry.io/otel/sdk v1.7.0/go.mod h1:uTEOTwaqIVuTGiJN7ii13Ibp75wJmYUDe374q6cZwUU= +go.opentelemetry.io/otel/sdk v1.10.0/go.mod h1:vO06iKzD5baltJz1zarxMCNHFpUlUiOy4s65ECtn6kE= +go.opentelemetry.io/otel/sdk v1.14.0 h1:PDCppFRDq8A1jL9v6KMI6dYesaq+DFcDZvjsoGvxGzY= +go.opentelemetry.io/otel/sdk v1.14.0/go.mod h1:bwIC5TjrNG6QDCHNWvW4HLHtUQ4I+VQDsnjhvyZCALM= +go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= +go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= +go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= +go.opentelemetry.io/otel/trace v1.0.1/go.mod h1:5g4i4fKLaX2BQpSBsxw8YYcgKpMMSW3x7ZTuYBr3sUk= +go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk= +go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU= +go.opentelemetry.io/otel/trace v1.8.0/go.mod h1:0Bt3PXY8w+3pheS3hQUt+wow8b1ojPaTBoTCh2zIFI4= +go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= +go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8= +go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= +go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg= +go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/proto/otlp v0.16.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/automaxprocs v1.5.1/go.mod h1:BF4eumQw0P9GtnuxxovUd06vwm1o18oMzFtK66vU6XU= +go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.7.0 h1:zaiO/rmgFjbmCXdSYJWQcdvOCsthmdaHfr3Gm2Kx4Ec= +go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI= +go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= +golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220313003712-b769efc7c000/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= +golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA= +golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= +golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc= +golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= +golang.org/x/exp/typeparams v0.0.0-20221208152030-732eee02a75a h1:Jw5wfR+h9mnIYH+OtGT2im5wV1YGGDora5vTv/aa5bE= +golang.org/x/exp/typeparams v0.0.0-20221208152030-732eee02a75a/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= +golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A= +golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk= +golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= +golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= +golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190515120540-06a5c4944438/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211031064116-611d5d643895/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220405210540-1e041c57c461/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.14.0 h1:LGK9IlZ8T9jvdy6cTdfKUCltatMFOehAQo9SRC46UQ8= +golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww= +golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220411224347-583f2d630306/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190424220101-1e8e1cfdf96b/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= +golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= +golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.10.0 h1:tvDr/iQoUqNdohiYm0LmmKcBk+q86lb9EprIUFhHHGg= +golang.org/x/tools v0.10.0/go.mod h1:UJwyiVBsOA2uwvK/e5OY3GTpDUJriEd+/YlqAwLPmyM= +golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= +gonum.org/v1/gonum v0.11.0 h1:f1IJhK4Km5tBJmaiJXtk/PkL4cdVX6J+tGiM187uT5E= +gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= +gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo= +google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= +google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= +google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= +google.golang.org/api v0.126.0 h1:q4GJq+cAdMAC7XP7njvQ4tvohGLiSlytuL4BQxbIZ+o= +google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190508193815-b515fa19cec8/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201204527-e3fa12d562f3/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230112194545-e10362b5ecf9/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230123190316-2c411cf9d197/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= +google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 h1:Z0hjGZePRE0ZBWotvtrwxFNrNE9CUAGtplaDK5NNI/g= +google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98/go.mod h1:S7mY02OqCJTD0E1OiQy1F72PWFB4bZJ87cAtLPYgDR0= +google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 h1:FmF5cCW94Ij59cfpoLiwTgodWmm60eEV0CjlsVg2fuw= +google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= +google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= +google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= +google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0 h1:M1YKkFIboKNieVO5DLUEVzQfGwJD30Nv2jfUgzb5UcE= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.66.2 h1:XfR1dOYubytKy4Shzc2LHrrGhU0lDCfDGG1yLPmpgsI= +gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI= +gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/ory-am/dockertest.v3 v3.3.4 h1:oen8RiwxVNxtQ1pRoV4e4jqh6UjNsOuIZ1NXns6jdcw= +gopkg.in/ory-am/dockertest.v3 v3.3.4/go.mod h1:s9mmoLkaGeAh97qygnNj4xWkiN7e1SKekYC6CovU+ek= +gopkg.in/resty.v1 v1.12.0 h1:CuXP0Pjfw9rOuY6EP+UvtNvt5DSqHpIxILZKT/quCZI= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= +gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/gotestsum v1.10.0 h1:lVO4uQJoxdsJb7jgmr1fg8QW7zGQ/tuqvsq5fHKyoHQ= +gotest.tools/gotestsum v1.10.0/go.mod h1:6JHCiN6TEjA7Kaz23q1bH0e2Dc3YJjDUZ0DmctFZf+w= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +gotest.tools/v3 v3.3.0/go.mod h1:Mcr9QNxkg0uMvy/YElmo4SpXgJKWgQvYrT7Kw5RzJ1A= +gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= +gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +honnef.co/go/tools v0.4.3 h1:o/n5/K5gXqk8Gozvs2cnL0F2S1/g1vcGCAx2vETjITw= +honnef.co/go/tools v0.4.3/go.mod h1:36ZgoUOrqOk1GxwHhyryEkq8FQWkUO2xGuSMhUCcdvA= +k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78= +k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= +k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= +k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= +k8s.io/api v0.22.5/go.mod h1:mEhXyLaSD1qTOf40rRiKXkc+2iCem09rWLlFwhCEiAs= +k8s.io/api v0.26.2/go.mod h1:1kjMQsFE+QHPfskEcVNgL3+Hp88B80uj0QtSOlj8itU= +k8s.io/api v0.27.2 h1:+H17AJpUMvl+clT+BPnKf0E3ksMAzoBBg7CntpSuADo= +k8s.io/api v0.27.2/go.mod h1:ENmbocXfBT2ADujUXcBhHV55RIT31IIEvkntP6vZKS4= +k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= +k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= +k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= +k8s.io/apimachinery v0.22.5/go.mod h1:xziclGKwuuJ2RM5/rSFQSYAj0zdbci3DH8kj+WvyN0U= +k8s.io/apimachinery v0.25.0/go.mod h1:qMx9eAk0sZQGsXGu86fab8tZdffHbwUfsvzqKn4mfB0= +k8s.io/apimachinery v0.26.2/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I= +k8s.io/apimachinery v0.27.2 h1:vBjGaKKieaIreI+oQwELalVG4d8f3YAMNpWLzDXkxeg= +k8s.io/apimachinery v0.27.2/go.mod h1:XNfZ6xklnMCOGGFNqXG7bUrQCoR04dh/E7FprV6pb+E= +k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= +k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= +k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= +k8s.io/apiserver v0.22.5/go.mod h1:s2WbtgZAkTKt679sYtSudEQrTGWUSQAPe6MupLnlmaQ= +k8s.io/apiserver v0.26.2/go.mod h1:GHcozwXgXsPuOJ28EnQ/jXEM9QeG6HT22YxSNmpYNh8= +k8s.io/client-go v0.18.2/go.mod h1:Xcm5wVGXX9HAA2JJ2sSBUn3tCJ+4SVlCbl2MNNv+CIU= +k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= +k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= +k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= +k8s.io/client-go v0.22.5/go.mod h1:cs6yf/61q2T1SdQL5Rdcjg9J1ElXSwbjSrW2vFImM4Y= +k8s.io/client-go v0.26.2/go.mod h1:u5EjOuSyBa09yqqyY7m3abZeovO/7D/WehVVlZ2qcqU= +k8s.io/client-go v0.27.2 h1:vDLSeuYvCHKeoQRhCXjxXO45nHVv2Ip4Fe0MfioMrhE= +k8s.io/client-go v0.27.2/go.mod h1:tY0gVmUsHrAmjzHX9zs7eCjxcBsf8IiNe7KQ52biTcQ= +k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= +k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= +k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= +k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= +k8s.io/component-base v0.22.5/go.mod h1:VK3I+TjuF9eaa+Ln67dKxhGar5ynVbwnGrUiNF4MqCI= +k8s.io/component-base v0.26.2/go.mod h1:DxbuIe9M3IZPRxPIzhch2m1eT7uFrSBJUBuVCQEBivs= +k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= +k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= +k8s.io/cri-api v0.23.1/go.mod h1:REJE3PSU0h/LOV1APBrupxrEJqnoxZC8KWzkBUHwrK4= +k8s.io/cri-api v0.25.0/go.mod h1:J1rAyQkSJ2Q6I+aBMOVgg2/cbbebso6FNa0UagiR0kc= +k8s.io/cri-api v0.25.3/go.mod h1:riC/P0yOGUf2K1735wW+CXs1aY2ctBgePtnnoFLd0dU= +k8s.io/cri-api v0.26.2/go.mod h1:Oo8O7MKFPNDxfDf2LmrF/3Hf30q1C6iliGuv3la3tIA= +k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= +k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kms v0.26.2/go.mod h1:69qGnf1NsFOQP07fBYqNLZklqEHSJF024JqYCaeVxHg= +k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= +k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= +k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= +k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU= +k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= +k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg= +k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg= +k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 h1:kmDqav+P+/5e1i9tFfHq1qcF3sOrDp+YEkVDAHu7Jwk= +k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +layeh.com/radius v0.0.0-20190322222518-890bc1058917 h1:BDXFaFzUt5EIqe/4wrTc4AcYZWP6iC6Ult+jQWLh5eU= +layeh.com/radius v0.0.0-20190322222518-890bc1058917/go.mod h1:fywZKyu//X7iRzaxLgPWsvc0L26IUpVvE/aeIL2JtIQ= +lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.37.0/go.mod h1:vtL+3mdHx/wcj3iEGz84rQa8vEqR6XM84v5Lcvfph20= +modernc.org/cc/v3 v3.40.0/go.mod h1:/bTg4dnWkSXowUO6ssQKnOV0yMVxDYNIsIrzqTFDGH0= +modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= +modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= +modernc.org/ccgo/v3 v3.0.0-20220904174949-82d86e1b6d56/go.mod h1:YSXjPL62P2AMSxBphRHPn7IkzhVHqkvOnRKAKh+W6ZI= +modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws= +modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo= +modernc.org/ccgo/v3 v3.16.13-0.20221017192402-261537637ce8/go.mod h1:fUB3Vn0nVPReA+7IG7yZDfjv1TMWjhQP8gCxrFAtL5g= +modernc.org/ccgo/v3 v3.16.13/go.mod h1:2Quk+5YgpImhPjv2Qsob1DnZ/4som1lJTodubIcoUkY= +modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= +modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A= +modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU= +modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= +modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= +modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s= +modernc.org/libc v1.17.4/go.mod h1:WNg2ZH56rDEwdropAJeZPQkXmDwh+JCA1s/htl6r2fA= +modernc.org/libc v1.18.0/go.mod h1:vj6zehR5bfc98ipowQOM2nIDUZnVew/wNC/2tOGS+q0= +modernc.org/libc v1.20.3/go.mod h1:ZRfIaEkgrYgZDl6pa4W39HgN5G/yDW+NRmNKZBDFrk0= +modernc.org/libc v1.21.4/go.mod h1:przBsL5RDOZajTVslkugzLBj1evTue36jEomFQOoYuI= +modernc.org/libc v1.22.2/go.mod h1:uvQavJ1pZ0hIoC/jfqNoMLURIMhKzINIWypNM17puug= +modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/memory v1.3.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/memory v1.4.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/memory v1.5.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= +modernc.org/sqlite v1.18.2/go.mod h1:kvrTLEWgxUcHa2GfHBQtanR1H9ht3hTJNtKpzH9k1u0= +modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= +modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= +modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= +modernc.org/tcl v1.13.2/go.mod h1:7CLiGIPo1M8Rv1Mitpv5akc2+8fxUd2y2UzC/MfMzy0= +modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= +mvdan.cc/gofumpt v0.1.1/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48= +mvdan.cc/gofumpt v0.3.1 h1:avhhrOmv0IuvQVK7fvwV91oFSGAk5/6Po8GXTzICeu8= +mvdan.cc/gofumpt v0.3.1/go.mod h1:w3ymliuxvzVx8DAutBnVyDqYb1Niy/yCJt/lk821YCE= +nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= +nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +oras.land/oras-go v1.2.0/go.mod h1:pFNs7oHp2dYsYMSS82HaX5l4mpnGO7hbpPN6EWH2ltc= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.35/go.mod h1:WxjusMwXlKzfAs4p9km6XJRndVt2FROgMVCE4cdohFo= +sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/helper/benchhelpers/benchhelpers.go b/helper/benchhelpers/benchhelpers.go new file mode 100644 index 0000000..06dcde6 --- /dev/null +++ b/helper/benchhelpers/benchhelpers.go @@ -0,0 +1,22 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package benchhelpers + +import ( + "testing" + + testinginterface "github.com/mitchellh/go-testing-interface" +) + +type tbWrapper struct { + testing.TB +} + +func (b tbWrapper) Parallel() { + // no-op +} + +func TBtoT(tb testing.TB) testinginterface.T { + return tbWrapper{tb} +} diff --git a/helper/builtinplugins/registry.go b/helper/builtinplugins/registry.go new file mode 100644 index 0000000..b4d3da5 --- /dev/null +++ b/helper/builtinplugins/registry.go @@ -0,0 +1,297 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package builtinplugins + +import ( + "context" + + credAliCloud "github.com/hashicorp/vault-plugin-auth-alicloud" + credAzure "github.com/hashicorp/vault-plugin-auth-azure" + credCentrify "github.com/hashicorp/vault-plugin-auth-centrify" + credCF "github.com/hashicorp/vault-plugin-auth-cf" + credGcp "github.com/hashicorp/vault-plugin-auth-gcp/plugin" + credJWT "github.com/hashicorp/vault-plugin-auth-jwt" + credKerb "github.com/hashicorp/vault-plugin-auth-kerberos" + credKube "github.com/hashicorp/vault-plugin-auth-kubernetes" + credOCI "github.com/hashicorp/vault-plugin-auth-oci" + dbCouchbase "github.com/hashicorp/vault-plugin-database-couchbase" + dbElastic "github.com/hashicorp/vault-plugin-database-elasticsearch" + dbMongoAtlas "github.com/hashicorp/vault-plugin-database-mongodbatlas" + dbRedis "github.com/hashicorp/vault-plugin-database-redis" + dbRedisElastiCache "github.com/hashicorp/vault-plugin-database-redis-elasticache" + dbSnowflake "github.com/hashicorp/vault-plugin-database-snowflake" + logicalAd "github.com/hashicorp/vault-plugin-secrets-ad/plugin" + logicalAlicloud "github.com/hashicorp/vault-plugin-secrets-alicloud" + logicalAzure "github.com/hashicorp/vault-plugin-secrets-azure" + logicalGcp "github.com/hashicorp/vault-plugin-secrets-gcp/plugin" + logicalGcpKms "github.com/hashicorp/vault-plugin-secrets-gcpkms" + logicalKube "github.com/hashicorp/vault-plugin-secrets-kubernetes" + logicalKv "github.com/hashicorp/vault-plugin-secrets-kv" + logicalMongoAtlas "github.com/hashicorp/vault-plugin-secrets-mongodbatlas" + logicalLDAP "github.com/hashicorp/vault-plugin-secrets-openldap" + logicalTerraform "github.com/hashicorp/vault-plugin-secrets-terraform" + credAppRole "github.com/hashicorp/vault/builtin/credential/approle" + credAws "github.com/hashicorp/vault/builtin/credential/aws" + credCert "github.com/hashicorp/vault/builtin/credential/cert" + credGitHub "github.com/hashicorp/vault/builtin/credential/github" + credLdap "github.com/hashicorp/vault/builtin/credential/ldap" + credOkta "github.com/hashicorp/vault/builtin/credential/okta" + credRadius "github.com/hashicorp/vault/builtin/credential/radius" + credUserpass "github.com/hashicorp/vault/builtin/credential/userpass" + logicalAws "github.com/hashicorp/vault/builtin/logical/aws" + logicalConsul "github.com/hashicorp/vault/builtin/logical/consul" + logicalNomad "github.com/hashicorp/vault/builtin/logical/nomad" + logicalPki "github.com/hashicorp/vault/builtin/logical/pki" + logicalRabbit "github.com/hashicorp/vault/builtin/logical/rabbitmq" + logicalSsh "github.com/hashicorp/vault/builtin/logical/ssh" + logicalTotp "github.com/hashicorp/vault/builtin/logical/totp" + logicalTransit "github.com/hashicorp/vault/builtin/logical/transit" + dbCass "github.com/hashicorp/vault/plugins/database/cassandra" + dbHana "github.com/hashicorp/vault/plugins/database/hana" + dbInflux "github.com/hashicorp/vault/plugins/database/influxdb" + dbMongo "github.com/hashicorp/vault/plugins/database/mongodb" + dbMssql "github.com/hashicorp/vault/plugins/database/mssql" + dbMysql "github.com/hashicorp/vault/plugins/database/mysql" + dbPostgres "github.com/hashicorp/vault/plugins/database/postgresql" + dbRedshift "github.com/hashicorp/vault/plugins/database/redshift" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" +) + +// Registry is inherently thread-safe because it's immutable. +// Thus, rather than creating multiple instances of it, we only need one. +var Registry = newRegistry() + +var addExternalPlugins = addExtPluginsImpl + +// BuiltinFactory is the func signature that should be returned by +// the plugin's New() func. +type BuiltinFactory func() (interface{}, error) + +// There are three forms of Backends which exist in the BuiltinRegistry. +type credentialBackend struct { + logical.Factory + consts.DeprecationStatus +} + +type databasePlugin struct { + Factory BuiltinFactory + consts.DeprecationStatus +} + +type logicalBackend struct { + logical.Factory + consts.DeprecationStatus +} + +type removedBackend struct { + *framework.Backend +} + +func removedFactory(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) { + removedBackend := &removedBackend{} + removedBackend.Backend = &framework.Backend{} + return removedBackend, nil +} + +func newRegistry() *registry { + reg := ®istry{ + credentialBackends: map[string]credentialBackend{ + "alicloud": {Factory: credAliCloud.Factory}, + "app-id": { + Factory: removedFactory, + DeprecationStatus: consts.Removed, + }, + "approle": {Factory: credAppRole.Factory}, + "aws": {Factory: credAws.Factory}, + "azure": {Factory: credAzure.Factory}, + "centrify": {Factory: credCentrify.Factory}, + "cert": {Factory: credCert.Factory}, + "cf": {Factory: credCF.Factory}, + "gcp": {Factory: credGcp.Factory}, + "github": {Factory: credGitHub.Factory}, + "jwt": {Factory: credJWT.Factory}, + "kerberos": {Factory: credKerb.Factory}, + "kubernetes": {Factory: credKube.Factory}, + "ldap": {Factory: credLdap.Factory}, + "oci": {Factory: credOCI.Factory}, + "oidc": {Factory: credJWT.Factory}, + "okta": {Factory: credOkta.Factory}, + "pcf": { + Factory: credCF.Factory, + DeprecationStatus: consts.Deprecated, + }, + "radius": {Factory: credRadius.Factory}, + "userpass": {Factory: credUserpass.Factory}, + }, + databasePlugins: map[string]databasePlugin{ + // These four plugins all use the same mysql implementation but with + // different username settings passed by the constructor. + "mysql-database-plugin": {Factory: dbMysql.New(dbMysql.DefaultUserNameTemplate)}, + "mysql-aurora-database-plugin": {Factory: dbMysql.New(dbMysql.DefaultLegacyUserNameTemplate)}, + "mysql-rds-database-plugin": {Factory: dbMysql.New(dbMysql.DefaultLegacyUserNameTemplate)}, + "mysql-legacy-database-plugin": {Factory: dbMysql.New(dbMysql.DefaultLegacyUserNameTemplate)}, + + "cassandra-database-plugin": {Factory: dbCass.New}, + "couchbase-database-plugin": {Factory: dbCouchbase.New}, + "elasticsearch-database-plugin": {Factory: dbElastic.New}, + "hana-database-plugin": {Factory: dbHana.New}, + "influxdb-database-plugin": {Factory: dbInflux.New}, + "mongodb-database-plugin": {Factory: dbMongo.New}, + "mongodbatlas-database-plugin": {Factory: dbMongoAtlas.New}, + "mssql-database-plugin": {Factory: dbMssql.New}, + "postgresql-database-plugin": {Factory: dbPostgres.New}, + "redshift-database-plugin": {Factory: dbRedshift.New}, + "redis-database-plugin": {Factory: dbRedis.New}, + "redis-elasticache-database-plugin": {Factory: dbRedisElastiCache.New}, + "snowflake-database-plugin": {Factory: dbSnowflake.New}, + }, + logicalBackends: map[string]logicalBackend{ + "ad": { + Factory: logicalAd.Factory, + DeprecationStatus: consts.Deprecated, + }, + "alicloud": {Factory: logicalAlicloud.Factory}, + "aws": {Factory: logicalAws.Factory}, + "azure": {Factory: logicalAzure.Factory}, + "cassandra": { + Factory: removedFactory, + DeprecationStatus: consts.Removed, + }, + "consul": {Factory: logicalConsul.Factory}, + "gcp": {Factory: logicalGcp.Factory}, + "gcpkms": {Factory: logicalGcpKms.Factory}, + "kubernetes": {Factory: logicalKube.Factory}, + "kv": {Factory: logicalKv.Factory}, + "mongodb": { + Factory: removedFactory, + DeprecationStatus: consts.Removed, + }, + // The mongodbatlas secrets engine is not the same as the database plugin equivalent + // (`mongodbatlas-database-plugin`), and thus will not be deprecated at this time. + "mongodbatlas": {Factory: logicalMongoAtlas.Factory}, + "mssql": { + Factory: removedFactory, + DeprecationStatus: consts.Removed, + }, + "mysql": { + Factory: removedFactory, + DeprecationStatus: consts.Removed, + }, + "nomad": {Factory: logicalNomad.Factory}, + "openldap": {Factory: logicalLDAP.Factory}, + "ldap": {Factory: logicalLDAP.Factory}, + "pki": {Factory: logicalPki.Factory}, + "postgresql": { + Factory: removedFactory, + DeprecationStatus: consts.Removed, + }, + "rabbitmq": {Factory: logicalRabbit.Factory}, + "ssh": {Factory: logicalSsh.Factory}, + "terraform": {Factory: logicalTerraform.Factory}, + "totp": {Factory: logicalTotp.Factory}, + "transit": {Factory: logicalTransit.Factory}, + }, + } + + addExternalPlugins(reg) + + return reg +} + +func addExtPluginsImpl(r *registry) {} + +type registry struct { + credentialBackends map[string]credentialBackend + databasePlugins map[string]databasePlugin + logicalBackends map[string]logicalBackend +} + +// Get returns the Factory func for a particular backend plugin from the +// plugins map. +func (r *registry) Get(name string, pluginType consts.PluginType) (func() (interface{}, error), bool) { + switch pluginType { + case consts.PluginTypeCredential: + if f, ok := r.credentialBackends[name]; ok { + return toFunc(f.Factory), ok + } + case consts.PluginTypeSecrets: + if f, ok := r.logicalBackends[name]; ok { + return toFunc(f.Factory), ok + } + case consts.PluginTypeDatabase: + if f, ok := r.databasePlugins[name]; ok { + return f.Factory, ok + } + default: + return nil, false + } + + return nil, false +} + +// Keys returns the list of plugin names that are considered builtin plugins. +func (r *registry) Keys(pluginType consts.PluginType) []string { + var keys []string + switch pluginType { + case consts.PluginTypeDatabase: + for key, backend := range r.databasePlugins { + keys = appendIfNotRemoved(keys, key, backend.DeprecationStatus) + } + case consts.PluginTypeCredential: + for key, backend := range r.credentialBackends { + keys = appendIfNotRemoved(keys, key, backend.DeprecationStatus) + } + case consts.PluginTypeSecrets: + for key, backend := range r.logicalBackends { + keys = appendIfNotRemoved(keys, key, backend.DeprecationStatus) + } + } + return keys +} + +func (r *registry) Contains(name string, pluginType consts.PluginType) bool { + for _, key := range r.Keys(pluginType) { + if key == name { + return true + } + } + return false +} + +// DeprecationStatus returns the Deprecation status for a builtin with type `pluginType` +func (r *registry) DeprecationStatus(name string, pluginType consts.PluginType) (consts.DeprecationStatus, bool) { + switch pluginType { + case consts.PluginTypeCredential: + if f, ok := r.credentialBackends[name]; ok { + return f.DeprecationStatus, ok + } + case consts.PluginTypeSecrets: + if f, ok := r.logicalBackends[name]; ok { + return f.DeprecationStatus, ok + } + case consts.PluginTypeDatabase: + if f, ok := r.databasePlugins[name]; ok { + return f.DeprecationStatus, ok + } + default: + return consts.Unknown, false + } + + return consts.Unknown, false +} + +func toFunc(ifc interface{}) func() (interface{}, error) { + return func() (interface{}, error) { + return ifc, nil + } +} + +func appendIfNotRemoved(keys []string, name string, status consts.DeprecationStatus) []string { + if status != consts.Removed { + return append(keys, name) + } + return keys +} diff --git a/helper/builtinplugins/registry_test.go b/helper/builtinplugins/registry_test.go new file mode 100644 index 0000000..55acdd9 --- /dev/null +++ b/helper/builtinplugins/registry_test.go @@ -0,0 +1,334 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package builtinplugins + +import ( + "bufio" + "fmt" + "os" + "reflect" + "regexp" + "testing" + + credUserpass "github.com/hashicorp/vault/builtin/credential/userpass" + dbMysql "github.com/hashicorp/vault/plugins/database/mysql" + "github.com/hashicorp/vault/sdk/helper/consts" + + "golang.org/x/exp/slices" +) + +// Test_RegistryGet exercises the (registry).Get functionality by comparing +// factory types and ok response. +func Test_RegistryGet(t *testing.T) { + tests := []struct { + name string + builtin string + pluginType consts.PluginType + want BuiltinFactory + wantOk bool + }{ + { + name: "non-existent builtin", + builtin: "foo", + pluginType: consts.PluginTypeCredential, + want: nil, + wantOk: false, + }, + { + name: "bad plugin type", + builtin: "app-id", + pluginType: 9000, + want: nil, + wantOk: false, + }, + { + name: "known builtin lookup", + builtin: "userpass", + pluginType: consts.PluginTypeCredential, + want: toFunc(credUserpass.Factory), + wantOk: true, + }, + { + name: "removed builtin lookup", + builtin: "app-id", + pluginType: consts.PluginTypeCredential, + want: nil, + wantOk: true, + }, + { + name: "known builtin lookup", + builtin: "mysql-database-plugin", + pluginType: consts.PluginTypeDatabase, + want: dbMysql.New(dbMysql.DefaultUserNameTemplate), + wantOk: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var got BuiltinFactory + got, ok := Registry.Get(tt.builtin, tt.pluginType) + if ok { + if reflect.TypeOf(got) != reflect.TypeOf(tt.want) { + t.Fatalf("got type: %T, want type: %T", got, tt.want) + } + } + if tt.wantOk != ok { + t.Fatalf("error: got %v, want %v", ok, tt.wantOk) + } + }) + } +} + +// Test_RegistryKeyCounts is a light unit test used to check the builtin +// registry lists for each plugin type and make sure they match in length. +func Test_RegistryKeyCounts(t *testing.T) { + tests := []struct { + name string + pluginType consts.PluginType + want int // use slice length as test condition + wantOk bool + }{ + { + name: "bad plugin type", + pluginType: 9001, + want: 0, + }, + { + name: "number of auth plugins", + pluginType: consts.PluginTypeCredential, + want: 19, + }, + { + name: "number of database plugins", + pluginType: consts.PluginTypeDatabase, + want: 17, + }, + { + name: "number of secrets plugins", + pluginType: consts.PluginTypeSecrets, + want: 19, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + keys := Registry.Keys(tt.pluginType) + if len(keys) != tt.want { + t.Fatalf("got size: %d, want size: %d", len(keys), tt.want) + } + }) + } +} + +// Test_RegistryContains exercises the (registry).Contains functionality. +func Test_RegistryContains(t *testing.T) { + tests := []struct { + name string + builtin string + pluginType consts.PluginType + want bool + }{ + { + name: "non-existent builtin", + builtin: "foo", + pluginType: consts.PluginTypeCredential, + want: false, + }, + { + name: "bad plugin type", + builtin: "app-id", + pluginType: 9001, + want: false, + }, + { + name: "known builtin lookup", + builtin: "approle", + pluginType: consts.PluginTypeCredential, + want: true, + }, + { + name: "removed builtin lookup", + builtin: "app-id", + pluginType: consts.PluginTypeCredential, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := Registry.Contains(tt.builtin, tt.pluginType) + if got != tt.want { + t.Fatalf("error: got %v, wanted %v", got, tt.want) + } + }) + } +} + +// Test_RegistryStatus exercises the (registry).Status functionality. +func Test_RegistryStatus(t *testing.T) { + tests := []struct { + name string + builtin string + pluginType consts.PluginType + want consts.DeprecationStatus + wantOk bool + }{ + { + name: "non-existent builtin and valid type", + builtin: "foo", + pluginType: consts.PluginTypeCredential, + want: consts.Unknown, + wantOk: false, + }, + { + name: "mismatch builtin and plugin type", + builtin: "app-id", + pluginType: consts.PluginTypeSecrets, + want: consts.Unknown, + wantOk: false, + }, + { + name: "existing builtin and invalid plugin type", + builtin: "app-id", + pluginType: 9000, + want: consts.Unknown, + wantOk: false, + }, + { + name: "supported builtin lookup", + builtin: "approle", + pluginType: consts.PluginTypeCredential, + want: consts.Supported, + wantOk: true, + }, + { + name: "deprecated builtin lookup", + builtin: "pcf", + pluginType: consts.PluginTypeCredential, + want: consts.Deprecated, + wantOk: true, + }, + { + name: "removed builtin lookup", + builtin: "app-id", + pluginType: consts.PluginTypeCredential, + want: consts.Removed, + wantOk: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, ok := Registry.DeprecationStatus(tt.builtin, tt.pluginType) + if got != tt.want { + t.Fatalf("got %+v, wanted %+v", got, tt.want) + } + if ok != tt.wantOk { + t.Fatalf("got ok: %t, want ok: %t", ok, tt.wantOk) + } + }) + } +} + +// Test_RegistryMatchesGenOpenapi ensures that the plugins mounted in gen_openapi.sh match registry.go +func Test_RegistryMatchesGenOpenapi(t *testing.T) { + const scriptPath = "../../scripts/gen_openapi.sh" + + // parseScript fetches the contents of gen_openapi.sh script & extract the relevant lines + parseScript := func(path string) ([]string, []string, error) { + f, err := os.Open(scriptPath) + if err != nil { + return nil, nil, fmt.Errorf("could not open gen_openapi.sh script: %w", err) + } + defer f.Close() + + var ( + credentialBackends []string + credentialBackendsRe = regexp.MustCompile(`^vault auth enable (?:-.+ )*(?:"([a-zA-Z]+)"|([a-zA-Z]+))$`) + + secretsBackends []string + secretsBackendsRe = regexp.MustCompile(`^vault secrets enable (?:-.+ )*(?:"([a-zA-Z]+)"|([a-zA-Z]+))$`) + ) + + scanner := bufio.NewScanner(f) + + for scanner.Scan() { + line := scanner.Text() + + if m := credentialBackendsRe.FindStringSubmatch(line); m != nil { + credentialBackends = append(credentialBackends, m[1]) + } + if m := secretsBackendsRe.FindStringSubmatch(line); m != nil { + secretsBackends = append(secretsBackends, m[1]) + } + } + + if err := scanner.Err(); err != nil { + return nil, nil, fmt.Errorf("error scanning gen_openapi.sh: %v", err) + } + + return credentialBackends, secretsBackends, nil + } + + // ensureInRegistry ensures that the given plugin is in registry and marked as "supported" + ensureInRegistry := func(t *testing.T, name string, pluginType consts.PluginType) { + t.Helper() + + // "database" will not be present in registry, it is represented as + // a list of database plugins instead + if name == "database" && pluginType == consts.PluginTypeSecrets { + return + } + + deprecationStatus, ok := Registry.DeprecationStatus(name, pluginType) + if !ok { + t.Fatalf("%q %s backend is missing from registry.go; please remove it from gen_openapi.sh", name, pluginType) + } + + if deprecationStatus == consts.Removed { + t.Fatalf("%q %s backend is marked 'removed' in registry.go; please remove it from gen_openapi.sh", name, pluginType) + } + } + + // ensureInScript ensures that the given plugin name in in gen_openapi.sh script + ensureInScript := func(t *testing.T, scriptBackends []string, name string) { + t.Helper() + + for _, excluded := range []string{ + "oidc", // alias for "jwt" + "openldap", // alias for "ldap" + } { + if name == excluded { + return + } + } + + if !slices.Contains(scriptBackends, name) { + t.Fatalf("%q backend could not be found in gen_openapi.sh, please add it there", name) + } + } + + // test starts here + scriptCredentialBackends, scriptSecretsBackends, err := parseScript(scriptPath) + if err != nil { + t.Fatal(err) + } + + for _, name := range scriptCredentialBackends { + ensureInRegistry(t, name, consts.PluginTypeCredential) + } + + for _, name := range scriptSecretsBackends { + ensureInRegistry(t, name, consts.PluginTypeSecrets) + } + + for name, backend := range Registry.credentialBackends { + if backend.DeprecationStatus == consts.Supported { + ensureInScript(t, scriptCredentialBackends, name) + } + } + + for name, backend := range Registry.logicalBackends { + if backend.DeprecationStatus == consts.Supported { + ensureInScript(t, scriptSecretsBackends, name) + } + } +} diff --git a/helper/builtinplugins/registry_util.go b/helper/builtinplugins/registry_util.go new file mode 100644 index 0000000..948092e --- /dev/null +++ b/helper/builtinplugins/registry_util.go @@ -0,0 +1,10 @@ +//go:build !enterprise + +package builtinplugins + +import "github.com/hashicorp/vault/sdk/helper/consts" + +// IsBuiltinEntPlugin checks whether the plugin is an enterprise only builtin plugin +func (r *registry) IsBuiltinEntPlugin(name string, pluginType consts.PluginType) bool { + return false +} diff --git a/helper/constants/constants_oss.go b/helper/constants/constants_oss.go new file mode 100644 index 0000000..8675f70 --- /dev/null +++ b/helper/constants/constants_oss.go @@ -0,0 +1,8 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !enterprise + +package constants + +var IsEnterprise = false diff --git a/helper/constants/fips.go b/helper/constants/fips.go new file mode 100644 index 0000000..9632d01 --- /dev/null +++ b/helper/constants/fips.go @@ -0,0 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !fips + +package constants + +// IsFIPS returns true if Vault is operating in a FIPS-140-{2,3} mode. +func IsFIPS() bool { + return false +} diff --git a/helper/constants/fips_build_check.go b/helper/constants/fips_build_check.go new file mode 100644 index 0000000..10e07e5 --- /dev/null +++ b/helper/constants/fips_build_check.go @@ -0,0 +1,41 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build (!fips && (fips_140_2 || fips_140_3)) || (fips && !fips_140_2 && !fips_140_3) || (fips_140_2 && fips_140_3) + +package constants + +import "C" + +// This function is the equivalent of an external (CGo) function definition, +// without implementation in any imported or built library. This results in +// a linker err if the above build constraints are satisfied: +// +// /home/cipherboy/GitHub/cipherboy/vault-enterprise/helper/constants/fips_build_check.go:10: undefined reference to `github.com/hashicorp/vault/helper/constants.VaultFIPSBuildRequiresVersionAgnosticTagAndOneVersionTag' +// +// This indicates that a build error has occurred due to mismatched tags. +// +// In particular, we use this to enforce the following restrictions on build +// tags: +// +// - If a versioned fips_140_* tag is specified, the unversioned tag must +// also be. +// - If the unversioned tag is specified, a versioned tag must be. +// - Both versioned flags cannot be specified at the same time. +// +// In the unlikely event that a FFI implementation for this function exists +// in the future, it should be renamed to a new function which does not +// exist. +// +// This approach was chosen above the other implementation in fips_cgo_check.go +// because this version does not break static analysis tools: most tools do not +// cross the CGo boundary and thus do not know that the below function is +// missing an implementation. However, in the other file, the function call is +// not marked as CGo (in part large because the lack of a cgo build tag +// prohibits us from using the same technique) and thus it must be a Go +// declaration, that is missing. +func VaultFIPSBuildRequiresVersionAgnosticTagAndOneVersionTag() + +func init() { + VaultFIPSBuildRequiresVersionAgnosticTagAndOneVersionTag() +} diff --git a/helper/constants/fips_cgo_check.go b/helper/constants/fips_cgo_check.go new file mode 100644 index 0000000..6de7d9f --- /dev/null +++ b/helper/constants/fips_cgo_check.go @@ -0,0 +1,21 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build (fips || fips_140_2 || fips_140_3) && !cgo + +package constants + +func init() { + // See note in fips_build_check.go. + // + // This function call is missing a declaration, causing the build to + // fail on improper tags (fips specified but cgo not specified). This + // ensures Vault fails to build if a FIPS build is requested but CGo + // support is not enabled. + // + // Note that this could confuse static analysis tools as this function + // should not ever be defined. If this function is defined in the future, + // the below reference should be renamed to a new name that is not + // defined to ensure we get a build failure. + VaultFIPSBuildTagMustEnableCGo() +} diff --git a/helper/dhutil/dhutil.go b/helper/dhutil/dhutil.go new file mode 100644 index 0000000..97552d4 --- /dev/null +++ b/helper/dhutil/dhutil.go @@ -0,0 +1,164 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dhutil + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "crypto/sha256" + "errors" + "fmt" + "io" + + "golang.org/x/crypto/hkdf" + + "golang.org/x/crypto/curve25519" +) + +type PublicKeyInfo struct { + Curve25519PublicKey []byte `json:"curve25519_public_key"` +} + +type Envelope struct { + Curve25519PublicKey []byte `json:"curve25519_public_key"` + Nonce []byte `json:"nonce"` + EncryptedPayload []byte `json:"encrypted_payload"` +} + +// generatePublicPrivateKey uses curve25519 to generate a public and private key +// pair. +func GeneratePublicPrivateKey() ([]byte, []byte, error) { + var scalar, public [32]byte + + if _, err := io.ReadFull(rand.Reader, scalar[:]); err != nil { + return nil, nil, err + } + + curve25519.ScalarBaseMult(&public, &scalar) + return public[:], scalar[:], nil +} + +// GenerateSharedSecret uses the private key and the other party's public key to +// generate the shared secret. +func GenerateSharedSecret(ourPrivate, theirPublic []byte) ([]byte, error) { + if len(ourPrivate) != 32 { + return nil, fmt.Errorf("invalid private key length: %d", len(ourPrivate)) + } + if len(theirPublic) != 32 { + return nil, fmt.Errorf("invalid public key length: %d", len(theirPublic)) + } + + return curve25519.X25519(ourPrivate, theirPublic) +} + +// DeriveSharedKey uses HKDF to derive a key from a shared secret and public keys +func DeriveSharedKey(secret, ourPublic, theirPublic []byte) ([]byte, error) { + // Derive the final key from the HKDF of the secret and public keys. + + /* + Internally, HKDF hashes the secret and two public keys. If Alice and Bob are doing DH key exchange, Alice calculates: + + HKDF(secret, A, B) since ourPublic is A. + + Bob calculates HKDF(secret, B, A), since Bob's ours is B. That produces a different value. Now we only care + that both public keys participate in the derivation, so simply sorting them so they are in a consistent + numerical order (either one would do) arrives at an agreed value. + */ + + var pub1 []byte + var pub2 []byte + switch bytes.Compare(ourPublic, theirPublic) { + case 0: + return nil, errors.New("same public key supplied for both participants") + case -1: + pub1 = ourPublic + pub2 = theirPublic + case 1: + pub1 = theirPublic + pub2 = ourPublic + } + + kio := hkdf.New(sha256.New, secret, pub1, pub2) + + var key [32]byte + n, err := io.ReadFull(kio, key[:]) + if err != nil { + // Don't return the key along with the error to prevent misuse + return nil, err + } + if n != 32 { + return nil, errors.New("short read from hkdf") + } + + return key[:], nil +} + +// Use AES256-GCM to encrypt some plaintext with a provided key. The returned values are +// the ciphertext, the nonce, and error respectively. +func EncryptAES(key, plaintext, aad []byte) ([]byte, []byte, error) { + // We enforce AES-256, so check explicitly for 32 bytes on the key + if len(key) != 32 { + return nil, nil, fmt.Errorf("invalid key length: %d", len(key)) + } + + if len(plaintext) == 0 { + return nil, nil, errors.New("empty plaintext provided") + } + + block, err := aes.NewCipher(key) + if err != nil { + return nil, nil, err + } + + // Never use more than 2^32 random nonces with a given key because of the risk of a repeat. + nonce := make([]byte, 12) + if _, err := io.ReadFull(rand.Reader, nonce); err != nil { + return nil, nil, err + } + + aesgcm, err := cipher.NewGCM(block) + if err != nil { + return nil, nil, err + } + + ciphertext := aesgcm.Seal(nil, nonce, plaintext, aad) + + return ciphertext, nonce, nil +} + +// Use AES256-GCM to decrypt some ciphertext with a provided key and nonce. The +// returned values are the plaintext and error respectively. +func DecryptAES(key, ciphertext, nonce, aad []byte) ([]byte, error) { + // We enforce AES-256, so check explicitly for 32 bytes on the key + if len(key) != 32 { + return nil, fmt.Errorf("invalid key length: %d", len(key)) + } + + if len(ciphertext) == 0 { + return nil, errors.New("empty ciphertext provided") + } + + if len(nonce) == 0 { + return nil, errors.New("empty nonce provided") + } + + block, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + aesgcm, err := cipher.NewGCM(block) + if err != nil { + return nil, err + } + + plaintext, err := aesgcm.Open(nil, nonce, ciphertext, aad) + if err != nil { + return nil, err + } + + return plaintext, nil +} diff --git a/helper/dhutil/dhutil_test.go b/helper/dhutil/dhutil_test.go new file mode 100644 index 0000000..4b94f60 --- /dev/null +++ b/helper/dhutil/dhutil_test.go @@ -0,0 +1,4 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dhutil diff --git a/helper/experiments/experiments.go b/helper/experiments/experiments.go new file mode 100644 index 0000000..538430e --- /dev/null +++ b/helper/experiments/experiments.go @@ -0,0 +1,19 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package experiments + +const VaultExperimentEventsAlpha1 = "events.alpha1" + +var validExperiments = []string{ + VaultExperimentEventsAlpha1, +} + +// ValidExperiments exposes the list without exposing a mutable global variable. +// Experiments can only be enabled when starting a server, and will typically +// enable pre-GA API functionality. +func ValidExperiments() []string { + result := make([]string, len(validExperiments)) + copy(result, validExperiments) + return result +} diff --git a/helper/fairshare/fairshare_testing_util.go b/helper/fairshare/fairshare_testing_util.go new file mode 100644 index 0000000..8061795 --- /dev/null +++ b/helper/fairshare/fairshare_testing_util.go @@ -0,0 +1,63 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package fairshare + +import ( + "fmt" + "testing" + + log "github.com/hashicorp/go-hclog" + uuid "github.com/hashicorp/go-uuid" +) + +type testJob struct { + id string + ex func(id string) error + onFail func(error) +} + +func (t *testJob) Execute() error { + return t.ex(t.id) +} + +func (t *testJob) OnFailure(err error) { + t.onFail(err) +} + +func newTestJob(t *testing.T, id string, ex func(string) error, onFail func(error)) testJob { + t.Helper() + if ex == nil { + t.Errorf("ex cannot be nil") + } + if onFail == nil { + t.Errorf("onFail cannot be nil") + } + + return testJob{ + id: id, + ex: ex, + onFail: onFail, + } +} + +func newDefaultTestJob(t *testing.T, id string) testJob { + ex := func(_ string) error { return nil } + onFail := func(_ error) {} + return newTestJob(t, id, ex, onFail) +} + +func newTestLogger(name string) log.Logger { + guid, err := uuid.GenerateUUID() + if err != nil { + guid = "no-guid" + } + return log.New(&log.LoggerOptions{ + Name: fmt.Sprintf("%s-%s", name, guid), + Level: log.LevelFromString("TRACE"), + }) +} + +func GetNumWorkers(j *JobManager) int { + return j.workerPool.numWorkers +} diff --git a/helper/fairshare/jobmanager.go b/helper/fairshare/jobmanager.go new file mode 100644 index 0000000..dc9a619 --- /dev/null +++ b/helper/fairshare/jobmanager.go @@ -0,0 +1,358 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package fairshare + +import ( + "container/list" + "fmt" + "io/ioutil" + "math" + "sync" + "time" + + "github.com/armon/go-metrics" + log "github.com/hashicorp/go-hclog" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/helper/metricsutil" + "github.com/hashicorp/vault/sdk/helper/logging" +) + +type JobManager struct { + name string + queues map[string]*list.List + + quit chan struct{} + newWork chan struct{} // must be buffered + + workerPool *dispatcher + workerCount map[string]int + + onceStart sync.Once + onceStop sync.Once + + logger log.Logger + + totalJobs int + metricSink *metricsutil.ClusterMetricSink + + // waitgroup for testing stop functionality + wg sync.WaitGroup + + // protects `queues`, `workerCount`, `queuesIndex`, `lastQueueAccessed` + l sync.RWMutex + + // track queues by index for round robin worker assignment + queuesIndex []string + lastQueueAccessed int +} + +// NewJobManager creates a job manager, with an optional name +func NewJobManager(name string, numWorkers int, l log.Logger, metricSink *metricsutil.ClusterMetricSink) *JobManager { + if l == nil { + l = logging.NewVaultLoggerWithWriter(ioutil.Discard, log.NoLevel) + } + if name == "" { + guid, err := uuid.GenerateUUID() + if err != nil { + l.Warn("uuid generator failed, using 'no-uuid'", "err", err) + guid = "no-uuid" + } + + name = fmt.Sprintf("jobmanager-%s", guid) + } + + wp := newDispatcher(fmt.Sprintf("%s-dispatcher", name), numWorkers, l) + + j := JobManager{ + name: name, + queues: make(map[string]*list.List), + quit: make(chan struct{}), + newWork: make(chan struct{}, 1), + workerPool: wp, + workerCount: make(map[string]int), + logger: l, + metricSink: metricSink, + queuesIndex: make([]string, 0), + lastQueueAccessed: -1, + } + + j.logger.Trace("created job manager", "name", name, "pool_size", numWorkers) + return &j +} + +// Start starts the job manager +// note: a given job manager cannot be restarted after it has been stopped +func (j *JobManager) Start() { + j.onceStart.Do(func() { + j.logger.Trace("starting job manager", "name", j.name) + j.workerPool.start() + j.assignWork() + }) +} + +// Stop stops the job manager asynchronously +func (j *JobManager) Stop() { + j.onceStop.Do(func() { + j.logger.Trace("terminating job manager...") + close(j.quit) + j.workerPool.stop() + }) +} + +// AddJob adds a job to the given queue, creating the queue if it doesn't exist +func (j *JobManager) AddJob(job Job, queueID string) { + j.l.Lock() + if len(j.queues) == 0 { + defer func() { + // newWork must be buffered to avoid deadlocks if work is added + // before the job manager is started + j.newWork <- struct{}{} + }() + } + defer j.l.Unlock() + + if _, ok := j.queues[queueID]; !ok { + j.addQueue(queueID) + } + + j.queues[queueID].PushBack(job) + j.totalJobs++ + + if j.metricSink != nil { + j.metricSink.AddSampleWithLabels([]string{j.name, "job_manager", "queue_length"}, float32(j.queues[queueID].Len()), []metrics.Label{{"queue_id", queueID}}) + j.metricSink.AddSample([]string{j.name, "job_manager", "total_jobs"}, float32(j.totalJobs)) + } +} + +// GetCurrentJobCount returns the total number of pending jobs in the job manager +func (j *JobManager) GetPendingJobCount() int { + j.l.RLock() + defer j.l.RUnlock() + + cnt := 0 + for _, q := range j.queues { + cnt += q.Len() + } + + return cnt +} + +// GetWorkerCounts() returns a map of queue ID to number of active workers +func (j *JobManager) GetWorkerCounts() map[string]int { + j.l.RLock() + defer j.l.RUnlock() + return j.workerCount +} + +// GetWorkQueueLengths() returns a map of queue ID to number of jobs in the queue +func (j *JobManager) GetWorkQueueLengths() map[string]int { + out := make(map[string]int) + + j.l.RLock() + defer j.l.RUnlock() + + for k, v := range j.queues { + out[k] = v.Len() + } + + return out +} + +// getNextJob pops the next job to be processed and prunes empty queues +// it also returns the ID of the queue the job is associated with +func (j *JobManager) getNextJob() (Job, string) { + j.l.Lock() + defer j.l.Unlock() + + if len(j.queues) == 0 { + return nil, "" + } + + queueID, canAssignWorker := j.getNextQueue() + if !canAssignWorker { + return nil, "" + } + + jobElement := j.queues[queueID].Front() + jobRaw := j.queues[queueID].Remove(jobElement) + + j.totalJobs-- + + if j.metricSink != nil { + j.metricSink.AddSampleWithLabels([]string{j.name, "job_manager", "queue_length"}, float32(j.queues[queueID].Len()), []metrics.Label{{"queue_id", queueID}}) + j.metricSink.AddSample([]string{j.name, "job_manager", "total_jobs"}, float32(j.totalJobs)) + } + + if j.queues[queueID].Len() == 0 { + // we remove the empty queue, but we don't remove the worker count + // in case we are still working on previous jobs from this queue. + // worker count cleanup is handled in j.decrementWorkerCount + j.removeLastQueueAccessed() + } + + return jobRaw.(Job), queueID +} + +// returns the next queue to assign work from, and a bool if there is a queue +// that can have a worker assigned. if there is work to be assigned, +// j.lastQueueAccessed will be updated to that queue. +// note: this must be called with j.l held +func (j *JobManager) getNextQueue() (string, bool) { + var nextQueue string + var canAssignWorker bool + + // ensure we loop through all existing queues until we find an eligible + // queue, if one exists. + queueIdx := j.nextQueueIndex(j.lastQueueAccessed) + for i := 0; i < len(j.queuesIndex); i++ { + potentialQueueID := j.queuesIndex[queueIdx] + + if !j.queueWorkersSaturated(potentialQueueID) { + nextQueue = potentialQueueID + canAssignWorker = true + j.lastQueueAccessed = queueIdx + break + } + + queueIdx = j.nextQueueIndex(queueIdx) + } + + return nextQueue, canAssignWorker +} + +// get the index of the next queue in round-robin order +// note: this must be called with j.l held +func (j *JobManager) nextQueueIndex(currentIdx int) int { + return (currentIdx + 1) % len(j.queuesIndex) +} + +// returns true if there are already too many workers on this queue +// note: this must be called with j.l held (at least for read). +// note: we may want to eventually factor in queue length relative to num queues +func (j *JobManager) queueWorkersSaturated(queueID string) bool { + numActiveQueues := float64(len(j.queues)) + numTotalWorkers := float64(j.workerPool.numWorkers) + maxWorkersPerQueue := math.Ceil(0.9 * numTotalWorkers / numActiveQueues) + + numWorkersPerQueue := j.workerCount + + return numWorkersPerQueue[queueID] >= int(maxWorkersPerQueue) +} + +// increment the worker count for this queue +func (j *JobManager) incrementWorkerCount(queueID string) { + j.l.Lock() + defer j.l.Unlock() + + j.workerCount[queueID]++ +} + +// decrement the worker count for this queue +// this also removes worker tracking for this queue if needed +func (j *JobManager) decrementWorkerCount(queueID string) { + j.l.Lock() + defer j.l.Unlock() + + j.workerCount[queueID]-- + + _, queueExists := j.queues[queueID] + if !queueExists && j.workerCount[queueID] < 1 { + delete(j.workerCount, queueID) + } +} + +// assignWork continually loops checks for new jobs and dispatches them to the +// worker pool +func (j *JobManager) assignWork() { + j.wg.Add(1) + + go func() { + // ticker is used to prevent memory leak of using time.After in + // for - select pattern. + ticker := time.NewTicker(50 * time.Millisecond) + defer ticker.Stop() + for { + for { + // assign work while there are jobs to distribute + select { + case <-j.quit: + j.wg.Done() + return + case <-j.newWork: + // keep the channel empty since we're already processing work + default: + } + + job, queueID := j.getNextJob() + if job != nil { + j.workerPool.dispatch(job, + func() { + j.incrementWorkerCount(queueID) + }, + func() { + j.decrementWorkerCount(queueID) + }) + } else { + break + } + } + + ticker.Reset(50 * time.Millisecond) + select { + case <-j.quit: + j.wg.Done() + return + case <-j.newWork: + // listen for wake-up when an empty job manager has been given work + case <-ticker.C: + // periodically check if new workers can be assigned. with the + // fairsharing worker distribution it can be the case that there + // is work waiting, but no queues are eligible for another worker + } + } + }() +} + +// addQueue generates a new queue if a queue for `queueID` doesn't exist +// it also starts tracking workers on that queue, if not already tracked +// note: this must be called with j.l held for write +func (j *JobManager) addQueue(queueID string) { + if _, ok := j.queues[queueID]; !ok { + j.queues[queueID] = list.New() + j.queuesIndex = append(j.queuesIndex, queueID) + } + + // it's possible the queue ran out of work and was pruned, but there were + // still workers operating on data formerly in that queue, which were still + // being tracked. if that is the case, we don't want to wipe out that worker + // count when the queue is re-initialized. + if _, ok := j.workerCount[queueID]; !ok { + j.workerCount[queueID] = 0 + } +} + +// removes the queue and index tracker for the last queue accessed. +// it is to be used when the last queue accessed has emptied. +// note: this must be called with j.l held. +func (j *JobManager) removeLastQueueAccessed() { + if j.lastQueueAccessed == -1 || j.lastQueueAccessed > len(j.queuesIndex)-1 { + j.logger.Warn("call to remove queue out of bounds", "idx", j.lastQueueAccessed) + return + } + + queueID := j.queuesIndex[j.lastQueueAccessed] + + // remove the queue + delete(j.queues, queueID) + + // remove the index for the queue + j.queuesIndex = append(j.queuesIndex[:j.lastQueueAccessed], j.queuesIndex[j.lastQueueAccessed+1:]...) + + // correct the last queue accessed for round robining + if j.lastQueueAccessed > 0 { + j.lastQueueAccessed-- + } else { + j.lastQueueAccessed = len(j.queuesIndex) - 1 + } +} diff --git a/helper/fairshare/jobmanager_test.go b/helper/fairshare/jobmanager_test.go new file mode 100644 index 0000000..3d6638a --- /dev/null +++ b/helper/fairshare/jobmanager_test.go @@ -0,0 +1,749 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package fairshare + +import ( + "fmt" + "reflect" + "sync" + "testing" + "time" +) + +func TestJobManager_NewJobManager(t *testing.T) { + testCases := []struct { + name string + numWorkers int + expectedNumWorkers int + }{ + { + name: "", + numWorkers: 0, + expectedNumWorkers: 1, + }, + { + name: "", + numWorkers: 5, + expectedNumWorkers: 5, + }, + { + name: "", + numWorkers: 5, + expectedNumWorkers: 5, + }, + { + name: "", + numWorkers: 5, + expectedNumWorkers: 5, + }, + { + name: "", + numWorkers: 5, + expectedNumWorkers: 5, + }, + } + + l := newTestLogger("jobmanager-test") + for tcNum, tc := range testCases { + j := NewJobManager(tc.name, tc.numWorkers, l, nil) + + if tc.name != "" && tc.name != j.name { + t.Errorf("tc %d: expected name %s, got %s", tcNum, tc.name, j.name) + } + if j.queues == nil { + t.Errorf("tc %d: queues not set up properly", tcNum) + } + if j.queuesIndex == nil { + t.Errorf("tc %d: queues index not set up properly", tcNum) + } + if j.quit == nil { + t.Errorf("tc %d: quit channel not set up properly", tcNum) + } + if j.workerPool.numWorkers != tc.expectedNumWorkers { + t.Errorf("tc %d: expected %d workers, got %d", tcNum, tc.expectedNumWorkers, j.workerPool.numWorkers) + } + if j.logger == nil { + t.Errorf("tc %d: logger not set up properly", tcNum) + } + } +} + +func TestJobManager_Start(t *testing.T) { + numJobs := 10 + j := NewJobManager("job-mgr-test", 3, newTestLogger("jobmanager-test"), nil) + + var wg sync.WaitGroup + wg.Add(numJobs) + j.Start() + defer j.Stop() + + doneCh := make(chan struct{}) + timeout := time.After(5 * time.Second) + go func() { + wg.Wait() + doneCh <- struct{}{} + }() + + ex := func(_ string) error { + wg.Done() + return nil + } + onFail := func(_ error) {} + + for i := 0; i < numJobs; i++ { + // distribute jobs between 3 queues in the job manager + job := newTestJob(t, fmt.Sprintf("test-job-%d", i), ex, onFail) + j.AddJob(&job, fmt.Sprintf("queue-%d", i%3)) + } + + select { + case <-doneCh: + break + case <-timeout: + t.Fatal("timed out") + } +} + +func TestJobManager_StartAndPause(t *testing.T) { + numJobs := 10 + j := NewJobManager("job-mgr-test", 3, newTestLogger("jobmanager-test"), nil) + + var wg sync.WaitGroup + wg.Add(numJobs) + j.Start() + defer j.Stop() + + doneCh := make(chan struct{}) + timeout := time.After(5 * time.Second) + go func() { + wg.Wait() + doneCh <- struct{}{} + }() + + ex := func(_ string) error { + wg.Done() + return nil + } + onFail := func(_ error) {} + + for i := 0; i < numJobs; i++ { + // distribute jobs between 3 queues in the job manager + job := newTestJob(t, fmt.Sprintf("test-job-%d", i), ex, onFail) + j.AddJob(&job, fmt.Sprintf("queue-%d", i%3)) + } + + select { + case <-doneCh: + break + case <-timeout: + t.Fatal("timed out") + } + + // now that the work queue is empty, let's add more jobs and make sure + // we pick up where we left off + + for i := 0; i < 5; i++ { + numAdditionalJobs := 5 + wg.Add(numAdditionalJobs) + + timeout = time.After(5 * time.Second) + go func() { + wg.Wait() + doneCh <- struct{}{} + }() + + for i := numJobs; i < numJobs+numAdditionalJobs; i++ { + // distribute jobs between 3 queues in the job manager + job := newTestJob(t, fmt.Sprintf("test-job-%d", i), ex, onFail) + j.AddJob(&job, fmt.Sprintf("queue-%d", i%3)) + } + + select { + case <-doneCh: + break + case <-timeout: + t.Fatal("timed out") + } + + numJobs += numAdditionalJobs + } +} + +func TestJobManager_Stop(t *testing.T) { + j := NewJobManager("job-mgr-test", 5, newTestLogger("jobmanager-test"), nil) + + j.Start() + + doneCh := make(chan struct{}) + timeout := time.After(5 * time.Second) + go func() { + j.Stop() + j.wg.Wait() + doneCh <- struct{}{} + }() + + select { + case <-doneCh: + break + case <-timeout: + t.Fatal("timed out") + } +} + +func TestFairshare_StopMultiple(t *testing.T) { + j := NewJobManager("job-mgr-test", 5, newTestLogger("jobmanager-test"), nil) + + j.Start() + + doneCh := make(chan struct{}) + timeout := time.After(5 * time.Second) + go func() { + j.Stop() + j.wg.Wait() + doneCh <- struct{}{} + }() + + select { + case <-doneCh: + break + case <-timeout: + t.Fatal("timed out") + } + + // essentially, we don't want to panic here + var r interface{} + go func() { + t.Helper() + + defer func() { + r = recover() + doneCh <- struct{}{} + }() + + j.Stop() + j.wg.Wait() + }() + + select { + case <-doneCh: + break + case <-timeout: + t.Fatal("timed out") + } + + if r != nil { + t.Fatalf("panic during second stop: %v", r) + } +} + +func TestJobManager_AddJob(t *testing.T) { + testCases := []struct { + name string + queueID string + }{ + { + name: "test1", + queueID: "q1", + }, + { + name: "test2", + queueID: "q1", + }, + { + name: "test3", + queueID: "q1", + }, + { + name: "test4", + queueID: "q2", + }, + { + name: "test5", + queueID: "q3", + }, + } + + j := NewJobManager("job-mgr-test", 3, newTestLogger("jobmanager-test"), nil) + + expectedCount := make(map[string]int) + for _, tc := range testCases { + if _, ok := expectedCount[tc.queueID]; !ok { + expectedCount[tc.queueID] = 1 + } else { + expectedCount[tc.queueID]++ + } + + job := newDefaultTestJob(t, tc.name) + j.AddJob(&job, tc.queueID) + } + + if len(expectedCount) != len(j.queues) { + t.Fatalf("expected %d queues, got %d", len(expectedCount), len(j.queues)) + } + + for k, v := range j.queues { + if v.Len() != expectedCount[k] { + t.Fatalf("queue %s has bad count. expected %d, got %d", k, expectedCount[k], v.Len()) + } + } +} + +func TestJobManager_GetPendingJobCount(t *testing.T) { + numJobs := 15 + j := NewJobManager("test-job-mgr", 3, newTestLogger("jobmanager-test"), nil) + + for i := 0; i < numJobs; i++ { + job := newDefaultTestJob(t, fmt.Sprintf("job-%d", i)) + j.AddJob(&job, fmt.Sprintf("queue-%d", i%4)) + } + + pendingJobs := j.GetPendingJobCount() + if pendingJobs != numJobs { + t.Errorf("expected %d jobs, got %d", numJobs, pendingJobs) + } +} + +func TestJobManager_GetWorkQueueLengths(t *testing.T) { + j := NewJobManager("test-job-mgr", 3, newTestLogger("jobmanager-test"), nil) + + expected := make(map[string]int) + for i := 0; i < 25; i++ { + queueID := fmt.Sprintf("queue-%d", i%4) + job := newDefaultTestJob(t, fmt.Sprintf("job-%d", i)) + + j.AddJob(&job, queueID) + + if _, ok := expected[queueID]; !ok { + expected[queueID] = 0 + } + + expected[queueID]++ + } + + pendingJobs := j.GetWorkQueueLengths() + if !reflect.DeepEqual(pendingJobs, expected) { + t.Errorf("expected %v job count, got %v", expected, pendingJobs) + } +} + +func TestJobManager_removeLastQueueAccessed(t *testing.T) { + j := NewJobManager("job-mgr-test", 1, newTestLogger("jobmanager-test"), nil) + + testCases := []struct { + lastQueueAccessed int + updatedLastQueueAccessed int + len int + expectedQueues []string + }{ + { + // remove with bad index (too low) + lastQueueAccessed: -1, + updatedLastQueueAccessed: -1, + len: 3, + expectedQueues: []string{"queue-0", "queue-1", "queue-2"}, + }, + { + // remove with bad index (too high) + lastQueueAccessed: 3, + updatedLastQueueAccessed: 3, + len: 3, + expectedQueues: []string{"queue-0", "queue-1", "queue-2"}, + }, + { + // remove queue-1 (index 1) + lastQueueAccessed: 1, + updatedLastQueueAccessed: 0, + len: 2, + expectedQueues: []string{"queue-0", "queue-2"}, + }, + { + // remove queue-0 (index 0) + lastQueueAccessed: 0, + updatedLastQueueAccessed: 0, + len: 1, + expectedQueues: []string{"queue-2"}, + }, + { + // remove queue-1 (index 1) + lastQueueAccessed: 0, + updatedLastQueueAccessed: -1, + len: 0, + expectedQueues: []string{}, + }, + } + + j.l.Lock() + defer j.l.Unlock() + + j.addQueue("queue-0") + j.addQueue("queue-1") + j.addQueue("queue-2") + + for _, tc := range testCases { + j.lastQueueAccessed = tc.lastQueueAccessed + j.removeLastQueueAccessed() + + if j.lastQueueAccessed != tc.updatedLastQueueAccessed { + t.Errorf("last queue access update failed. expected %d, got %d", tc.updatedLastQueueAccessed, j.lastQueueAccessed) + } + if len(j.queuesIndex) != tc.len { + t.Fatalf("queue index update failed. expected %d elements, found %v", tc.len, j.queues) + } + if len(j.queues) != len(tc.expectedQueues) { + t.Fatalf("bad amount of queues. expected %d, found %v", len(tc.expectedQueues), j.queues) + } + + for _, q := range tc.expectedQueues { + if _, ok := j.queues[q]; !ok { + t.Errorf("bad queue. expected %s in %v", q, j.queues) + } + } + } +} + +func TestJobManager_EndToEnd(t *testing.T) { + testCases := []struct { + name string + queueID string + }{ + { + name: "job-1", + queueID: "queue-1", + }, + { + name: "job-2", + queueID: "queue-2", + }, + { + name: "job-3", + queueID: "queue-1", + }, + { + name: "job-4", + queueID: "queue-3", + }, + { + name: "job-5", + queueID: "queue-3", + }, + } + + // we add the jobs before starting the workers, so we'd expect the round + // robin to pick the least-recently-added job from each queue, and cycle + // through queues in a round-robin fashion. jobs would appear on the queues + // as illustrated below, and we expect to round robin as: + // queue-1 -> queue-2 -> queue-3 -> queue-1 ... + // + // queue-1 [job-3, job-1] + // queue-2 [job-2] + // queue-3 [job-5, job-4] + + // ... where jobs are pushed to the left side and popped from the right side + + expectedOrder := []string{"job-1", "job-2", "job-4", "job-3", "job-5"} + + resultsCh := make(chan string) + defer close(resultsCh) + + var mu sync.Mutex + order := make([]string, 0) + + go func() { + for { + select { + case res, ok := <-resultsCh: + if !ok { + return + } + + mu.Lock() + order = append(order, res) + mu.Unlock() + } + } + }() + + var wg sync.WaitGroup + ex := func(name string) error { + resultsCh <- name + time.Sleep(50 * time.Millisecond) + wg.Done() + return nil + } + onFail := func(_ error) {} + + // use one worker to guarantee ordering + j := NewJobManager("test-job-mgr", 1, newTestLogger("jobmanager-test"), nil) + for _, tc := range testCases { + wg.Add(1) + job := newTestJob(t, tc.name, ex, onFail) + j.AddJob(&job, tc.queueID) + } + + j.Start() + defer j.Stop() + + doneCh := make(chan struct{}) + go func() { + wg.Wait() + doneCh <- struct{}{} + }() + + timeout := time.After(5 * time.Second) + select { + case <-doneCh: + break + case <-timeout: + t.Fatal("timed out") + } + + mu.Lock() + defer mu.Unlock() + if !reflect.DeepEqual(order, expectedOrder) { + t.Fatalf("results out of order. \nexpected: %v\ngot: %v", expectedOrder, order) + } +} + +func TestFairshare_StressTest(t *testing.T) { + var wg sync.WaitGroup + ex := func(name string) error { + wg.Done() + return nil + } + onFail := func(_ error) {} + + j := NewJobManager("test-job-mgr", 15, nil, nil) + j.Start() + defer j.Stop() + + for i := 0; i < 3000; i++ { + wg.Add(1) + job := newTestJob(t, fmt.Sprintf("a-job-%d", i), ex, onFail) + j.AddJob(&job, "a") + } + for i := 0; i < 4000; i++ { + wg.Add(1) + job := newTestJob(t, fmt.Sprintf("b-job-%d", i), ex, onFail) + j.AddJob(&job, "b") + } + for i := 0; i < 3000; i++ { + wg.Add(1) + job := newTestJob(t, fmt.Sprintf("c-job-%d", i), ex, onFail) + j.AddJob(&job, "c") + } + + doneCh := make(chan struct{}) + go func() { + wg.Wait() + doneCh <- struct{}{} + }() + + timeout := time.After(5 * time.Second) + select { + case <-doneCh: + break + case <-timeout: + t.Fatal("timed out") + } +} + +func TestFairshare_nilLoggerJobManager(t *testing.T) { + j := NewJobManager("test-job-mgr", 1, nil, nil) + if j.logger == nil { + t.Error("logger not set up properly") + } +} + +func TestFairshare_getNextQueue(t *testing.T) { + j := NewJobManager("test-job-mgr", 18, nil, nil) + + for i := 0; i < 10; i++ { + job := newDefaultTestJob(t, fmt.Sprintf("job-%d", i)) + j.AddJob(&job, "a") + j.AddJob(&job, "b") + j.AddJob(&job, "c") + } + + j.l.Lock() + defer j.l.Unlock() + + // fake out some number of workers with various remaining work scenario + // no queue can be assigned more than 6 workers + j.workerCount["a"] = 1 + j.workerCount["b"] = 2 + j.workerCount["c"] = 5 + + expectedOrder := []string{"a", "b", "c", "a", "b", "a", "b", "a", "b", "a"} + + for _, expectedQueueID := range expectedOrder { + queueID, canAssignWorker := j.getNextQueue() + + if !canAssignWorker { + t.Fatalf("expected have work true, got false for queue %q", queueID) + } + if queueID != expectedQueueID { + t.Errorf("expected queueID %q, got %q", expectedQueueID, queueID) + } + + // simulate a worker being added to that queue + j.workerCount[queueID]++ + } + + // queues are saturated with work, we shouldn't be able to find a queue + // eligible for a worker (and last accessed queue shouldn't update) + expectedLastQueueAccessed := j.lastQueueAccessed + queueID, canAssignWork := j.getNextQueue() + if canAssignWork { + t.Error("should not be able to assign work with all queues saturated") + } + if queueID != "" { + t.Errorf("expected no queueID, got %s", queueID) + } + if j.lastQueueAccessed != expectedLastQueueAccessed { + t.Errorf("expected no last queue accessed update. had %d, got %d", expectedLastQueueAccessed, j.lastQueueAccessed) + } +} + +func TestJobManager_pruneEmptyQueues(t *testing.T) { + j := NewJobManager("test-job-mgr", 18, nil, nil) + + // add a few jobs to test out queue pruning + // for test simplicity, we'll keep the number of workers per queue at 0 + testJob := newDefaultTestJob(t, "job-0") + j.AddJob(&testJob, "a") + j.AddJob(&testJob, "a") + j.AddJob(&testJob, "b") + + job, queueID := j.getNextJob() + if queueID != "a" || job == nil { + t.Fatalf("bad next job: queueID %s, job: %#v", queueID, job) + } + + j.l.RLock() + if _, ok := j.queues["a"]; !ok { + t.Error("expected queue 'a' to exist") + } + if _, ok := j.queues["b"]; !ok { + t.Error("expected queue 'b' to exist") + } + j.l.RUnlock() + + job, queueID = j.getNextJob() + if queueID != "b" || job == nil { + t.Fatalf("bad next job: queueID %s, job: %#v", queueID, job) + } + + j.l.RLock() + if _, ok := j.queues["a"]; !ok { + t.Error("expected queue 'a' to exist") + } + if _, ok := j.queues["b"]; ok { + t.Error("expected queue 'b' to be pruned") + } + j.l.RUnlock() + + job, queueID = j.getNextJob() + if queueID != "a" || job == nil { + t.Fatalf("bad next job: queueID %s, job: %#v", queueID, job) + } + + j.l.RLock() + if _, ok := j.queues["a"]; ok { + t.Error("expected queue 'a' to be pruned") + } + if _, ok := j.queues["b"]; ok { + t.Error("expected queue 'b' to be pruned") + } + j.l.RUnlock() + + job, queueID = j.getNextJob() + if job != nil { + t.Errorf("expected no more jobs (out of queues). queueID: %s, job: %#v", queueID, job) + } +} + +func TestFairshare_WorkerCount_IncrementAndDecrement(t *testing.T) { + j := NewJobManager("test-job-mgr", 18, nil, nil) + + job := newDefaultTestJob(t, "job-0") + j.AddJob(&job, "a") + j.AddJob(&job, "b") + j.AddJob(&job, "c") + + // test to make sure increment works + j.incrementWorkerCount("a") + workerCounts := j.GetWorkerCounts() + if workerCounts["a"] != 1 { + t.Fatalf("expected 1 worker on 'a', got %d", workerCounts["a"]) + } + if workerCounts["b"] != 0 { + t.Fatalf("expected 0 workers on 'b', got %d", workerCounts["b"]) + } + if workerCounts["c"] != 0 { + t.Fatalf("expected 0 workers on 'c', got %d", workerCounts["c"]) + } + + // test to make sure decrement works (when there is still work for the queue) + j.decrementWorkerCount("a") + workerCounts = j.GetWorkerCounts() + if workerCounts["a"] != 0 { + t.Fatalf("expected 0 workers on 'a', got %d", workerCounts["a"]) + } + + // add a worker to queue "a" and remove all work to ensure worker count gets + // cleared out for "a" + j.incrementWorkerCount("a") + j.l.Lock() + delete(j.queues, "a") + j.l.Unlock() + + j.decrementWorkerCount("a") + workerCounts = j.GetWorkerCounts() + if _, ok := workerCounts["a"]; ok { + t.Fatalf("expected no worker count for 'a', got %#v", workerCounts) + } +} + +func TestFairshare_queueWorkersSaturated(t *testing.T) { + j := NewJobManager("test-job-mgr", 20, nil, nil) + + job := newDefaultTestJob(t, "job-0") + j.AddJob(&job, "a") + j.AddJob(&job, "b") + + // no more than 9 workers can be assigned to a single queue in this example + for i := 0; i < 8; i++ { + j.incrementWorkerCount("a") + j.incrementWorkerCount("b") + + j.l.RLock() + if j.queueWorkersSaturated("a") { + j.l.RUnlock() + t.Fatalf("queue 'a' falsely saturated: %#v", j.GetWorkerCounts()) + } + if j.queueWorkersSaturated("b") { + j.l.RUnlock() + t.Fatalf("queue 'b' falsely saturated: %#v", j.GetWorkerCounts()) + } + j.l.RUnlock() + } + + // adding the 9th and 10th workers should saturate the number of workers we + // can have per queue + for i := 8; i < 10; i++ { + j.incrementWorkerCount("a") + j.incrementWorkerCount("b") + + j.l.RLock() + if !j.queueWorkersSaturated("a") { + j.l.RUnlock() + t.Fatalf("queue 'a' falsely unsaturated: %#v", j.GetWorkerCounts()) + } + if !j.queueWorkersSaturated("b") { + j.l.RUnlock() + t.Fatalf("queue 'b' falsely unsaturated: %#v", j.GetWorkerCounts()) + } + j.l.RUnlock() + } +} diff --git a/helper/fairshare/workerpool.go b/helper/fairshare/workerpool.go new file mode 100644 index 0000000..e655a90 --- /dev/null +++ b/helper/fairshare/workerpool.go @@ -0,0 +1,189 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package fairshare + +import ( + "fmt" + "io/ioutil" + "sync" + + log "github.com/hashicorp/go-hclog" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/sdk/helper/logging" +) + +// Job is an interface for jobs used with this job manager +type Job interface { + // Execute performs the work. + // It should be synchronous if a cleanupFn is provided. + Execute() error + + // OnFailure handles the error resulting from a failed Execute(). + // It should be synchronous if a cleanupFn is provided. + OnFailure(err error) +} + +type ( + initFn func() + cleanupFn func() +) + +type wrappedJob struct { + job Job + init initFn + cleanup cleanupFn +} + +// worker represents a single worker in a pool +type worker struct { + name string + jobCh <-chan wrappedJob + quit chan struct{} + logger log.Logger + + // waitgroup for testing stop functionality + wg *sync.WaitGroup +} + +// start starts the worker listening and working until the quit channel is closed +func (w *worker) start() { + w.wg.Add(1) + + go func() { + for { + select { + case <-w.quit: + w.wg.Done() + return + case wJob := <-w.jobCh: + if wJob.init != nil { + wJob.init() + } + + err := wJob.job.Execute() + if err != nil { + wJob.job.OnFailure(err) + } + + if wJob.cleanup != nil { + wJob.cleanup() + } + } + } + }() +} + +// dispatcher represents a worker pool +type dispatcher struct { + name string + numWorkers int + workers []worker + jobCh chan wrappedJob + onceStart sync.Once + onceStop sync.Once + quit chan struct{} + logger log.Logger + wg *sync.WaitGroup +} + +// newDispatcher generates a new worker dispatcher and populates it with workers +func newDispatcher(name string, numWorkers int, l log.Logger) *dispatcher { + d := createDispatcher(name, numWorkers, l) + + d.init() + return d +} + +// dispatch dispatches a job to the worker pool, with optional initialization +// and cleanup functions (useful for tracking job progress) +func (d *dispatcher) dispatch(job Job, init initFn, cleanup cleanupFn) { + wJob := wrappedJob{ + init: init, + job: job, + cleanup: cleanup, + } + + select { + case d.jobCh <- wJob: + case <-d.quit: + d.logger.Info("shutting down during dispatch") + } +} + +// start starts all the workers listening on the job channel +// this will only start the workers for this dispatch once +func (d *dispatcher) start() { + d.onceStart.Do(func() { + d.logger.Trace("starting dispatcher") + for _, w := range d.workers { + worker := w + worker.start() + } + }) +} + +// stop stops the worker pool asynchronously +func (d *dispatcher) stop() { + d.onceStop.Do(func() { + d.logger.Trace("terminating dispatcher") + close(d.quit) + }) +} + +// createDispatcher generates a new Dispatcher object, but does not initialize the +// worker pool +func createDispatcher(name string, numWorkers int, l log.Logger) *dispatcher { + if l == nil { + l = logging.NewVaultLoggerWithWriter(ioutil.Discard, log.NoLevel) + } + if numWorkers <= 0 { + numWorkers = 1 + l.Warn("must have 1 or more workers. setting number of workers to 1") + } + + if name == "" { + guid, err := uuid.GenerateUUID() + if err != nil { + l.Warn("uuid generator failed, using 'no-uuid'", "err", err) + guid = "no-uuid" + } + + name = fmt.Sprintf("dispatcher-%s", guid) + } + + var wg sync.WaitGroup + d := dispatcher{ + name: name, + numWorkers: numWorkers, + workers: make([]worker, 0), + jobCh: make(chan wrappedJob), + quit: make(chan struct{}), + logger: l, + wg: &wg, + } + + d.logger.Trace("created dispatcher", "name", d.name, "num_workers", d.numWorkers) + return &d +} + +func (d *dispatcher) init() { + for len(d.workers) < d.numWorkers { + d.initializeWorker() + } + + d.logger.Trace("initialized dispatcher", "num_workers", d.numWorkers) +} + +// initializeWorker initializes and adds a new worker, with an optional name +func (d *dispatcher) initializeWorker() { + w := worker{ + name: fmt.Sprint("worker-", len(d.workers)), + jobCh: d.jobCh, + quit: d.quit, + logger: d.logger, + wg: d.wg, + } + + d.workers = append(d.workers, w) +} diff --git a/helper/fairshare/workerpool_test.go b/helper/fairshare/workerpool_test.go new file mode 100644 index 0000000..eb56314 --- /dev/null +++ b/helper/fairshare/workerpool_test.go @@ -0,0 +1,398 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package fairshare + +import ( + "fmt" + "reflect" + "sync" + "testing" + "time" +) + +func TestFairshare_newDispatcher(t *testing.T) { + testCases := []struct { + name string + numWorkers int + expectedNumWorkers int + }{ + { + name: "", + numWorkers: 0, + expectedNumWorkers: 1, + }, + { + name: "", + numWorkers: 10, + expectedNumWorkers: 10, + }, + { + name: "test-dispatcher", + numWorkers: 10, + expectedNumWorkers: 10, + }, + } + + l := newTestLogger("workerpool-test") + for tcNum, tc := range testCases { + d := newDispatcher(tc.name, tc.numWorkers, l) + + if tc.name != "" && d.name != tc.name { + t.Errorf("tc %d: expected name %s, got %s", tcNum, tc.name, d.name) + } + if len(d.workers) != tc.expectedNumWorkers { + t.Errorf("tc %d: expected %d workers, got %d", tcNum, tc.expectedNumWorkers, len(d.workers)) + } + if d.jobCh == nil { + t.Errorf("tc %d: work channel not set up properly", tcNum) + } + } +} + +func TestFairshare_createDispatcher(t *testing.T) { + testCases := []struct { + name string + numWorkers int + expectedNumWorkers int + }{ + { + name: "", + numWorkers: -1, + expectedNumWorkers: 1, + }, + { + name: "", + numWorkers: 0, + expectedNumWorkers: 1, + }, + { + name: "", + numWorkers: 10, + expectedNumWorkers: 10, + }, + { + name: "", + numWorkers: 10, + expectedNumWorkers: 10, + }, + { + name: "test-dispatcher", + numWorkers: 10, + expectedNumWorkers: 10, + }, + } + + l := newTestLogger("workerpool-test") + for tcNum, tc := range testCases { + d := createDispatcher(tc.name, tc.numWorkers, l) + if d == nil { + t.Fatalf("tc %d: expected non-nil object", tcNum) + } + + if tc.name != "" && d.name != tc.name { + t.Errorf("tc %d: expected name %s, got %s", tcNum, tc.name, d.name) + } + if len(d.name) == 0 { + t.Errorf("tc %d: expected name to be set", tcNum) + } + if d.numWorkers != tc.expectedNumWorkers { + t.Errorf("tc %d: expected %d workers, got %d", tcNum, tc.expectedNumWorkers, d.numWorkers) + } + if d.workers == nil { + t.Errorf("tc %d: expected non-nil workers", tcNum) + } + if d.jobCh == nil { + t.Errorf("tc %d: work channel not set up properly", tcNum) + } + if d.quit == nil { + t.Errorf("tc %d: expected non-nil quit channel", tcNum) + } + if d.logger == nil { + t.Errorf("tc %d: expected non-nil logger", tcNum) + } + } +} + +func TestFairshare_initDispatcher(t *testing.T) { + testCases := []struct { + numWorkers int + }{ + { + numWorkers: 1, + }, + { + numWorkers: 10, + }, + { + numWorkers: 100, + }, + { + numWorkers: 1000, + }, + } + + l := newTestLogger("workerpool-test") + for tcNum, tc := range testCases { + d := createDispatcher("", tc.numWorkers, l) + + d.init() + if len(d.workers) != tc.numWorkers { + t.Fatalf("tc %d: expected %d workers, got %d", tcNum, tc.numWorkers, len(d.workers)) + } + } +} + +func TestFairshare_initializeWorker(t *testing.T) { + numWorkers := 3 + + d := createDispatcher("", numWorkers, newTestLogger("workerpool-test")) + + for workerNum := 0; workerNum < numWorkers; workerNum++ { + d.initializeWorker() + + w := d.workers[workerNum] + expectedName := fmt.Sprint("worker-", workerNum) + if w.name != expectedName { + t.Errorf("tc %d: expected name %s, got %s", workerNum, expectedName, w.name) + } + if w.jobCh != d.jobCh { + t.Errorf("tc %d: work channel not set up properly", workerNum) + } + if w.quit == nil || w.quit != d.quit { + t.Errorf("tc %d: quit channel not set up properly", workerNum) + } + if w.logger == nil || w.logger != d.logger { + t.Errorf("tc %d: logger not set up properly", workerNum) + } + } +} + +func TestFairshare_startWorker(t *testing.T) { + d := newDispatcher("", 1, newTestLogger("workerpool-test")) + + d.workers[0].start() + defer d.stop() + + var wg sync.WaitGroup + ex := func(_ string) error { + wg.Done() + return nil + } + onFail := func(_ error) {} + + job := newTestJob(t, "test job", ex, onFail) + + doneCh := make(chan struct{}) + timeout := time.After(5 * time.Second) + + wg.Add(1) + d.dispatch(&job, nil, nil) + go func() { + wg.Wait() + doneCh <- struct{}{} + }() + + select { + case <-doneCh: + break + case <-timeout: + t.Fatal("timed out") + } +} + +func TestFairshare_start(t *testing.T) { + numJobs := 10 + var wg sync.WaitGroup + ex := func(_ string) error { + wg.Done() + return nil + } + onFail := func(_ error) {} + + wg.Add(numJobs) + d := newDispatcher("", 3, newTestLogger("workerpool-test")) + + d.start() + defer d.stop() + + doneCh := make(chan struct{}) + timeout := time.After(5 * time.Second) + go func() { + wg.Wait() + doneCh <- struct{}{} + }() + + for i := 0; i < numJobs; i++ { + job := newTestJob(t, fmt.Sprintf("job-%d", i), ex, onFail) + d.dispatch(&job, nil, nil) + } + + select { + case <-doneCh: + break + case <-timeout: + t.Fatal("timed out") + } +} + +func TestFairshare_stop(t *testing.T) { + d := newDispatcher("", 5, newTestLogger("workerpool-test")) + + d.start() + + doneCh := make(chan struct{}) + timeout := time.After(5 * time.Second) + + go func() { + d.stop() + d.wg.Wait() + doneCh <- struct{}{} + }() + + select { + case <-doneCh: + break + case <-timeout: + t.Fatal("timed out") + } +} + +func TestFairshare_stopMultiple(t *testing.T) { + d := newDispatcher("", 5, newTestLogger("workerpool-test")) + + d.start() + + doneCh := make(chan struct{}) + timeout := time.After(5 * time.Second) + + go func() { + d.stop() + d.wg.Wait() + doneCh <- struct{}{} + }() + + select { + case <-doneCh: + break + case <-timeout: + t.Fatal("timed out") + } + + // essentially, we don't want to panic here + var r interface{} + go func() { + t.Helper() + + defer func() { + r = recover() + doneCh <- struct{}{} + }() + + d.stop() + d.wg.Wait() + }() + + select { + case <-doneCh: + break + case <-timeout: + t.Fatal("timed out") + } + + if r != nil { + t.Fatalf("panic during second stop: %v", r) + } +} + +func TestFairshare_dispatch(t *testing.T) { + d := newDispatcher("", 1, newTestLogger("workerpool-test")) + + var wg sync.WaitGroup + accumulatedIDs := make([]string, 0) + ex := func(id string) error { + accumulatedIDs = append(accumulatedIDs, id) + wg.Done() + return nil + } + onFail := func(_ error) {} + + expectedIDs := []string{"job-1", "job-2", "job-3", "job-4"} + go func() { + for _, id := range expectedIDs { + job := newTestJob(t, id, ex, onFail) + d.dispatch(&job, nil, nil) + } + }() + + wg.Add(len(expectedIDs)) + d.start() + defer d.stop() + + doneCh := make(chan struct{}) + go func() { + wg.Wait() + doneCh <- struct{}{} + }() + + timeout := time.After(5 * time.Second) + select { + case <-doneCh: + break + case <-timeout: + t.Fatal("timed out") + } + + if !reflect.DeepEqual(accumulatedIDs, expectedIDs) { + t.Fatalf("bad job ids. expected %v, got %v", expectedIDs, accumulatedIDs) + } +} + +func TestFairshare_jobFailure(t *testing.T) { + numJobs := 10 + testErr := fmt.Errorf("test error") + var wg sync.WaitGroup + + ex := func(_ string) error { + return testErr + } + onFail := func(err error) { + if err != testErr { + t.Errorf("got unexpected error. expected %v, got %v", testErr, err) + } + + wg.Done() + } + + wg.Add(numJobs) + d := newDispatcher("", 3, newTestLogger("workerpool-test")) + + d.start() + defer d.stop() + + doneCh := make(chan struct{}) + timeout := time.After(5 * time.Second) + go func() { + wg.Wait() + doneCh <- struct{}{} + }() + + for i := 0; i < numJobs; i++ { + job := newTestJob(t, fmt.Sprintf("job-%d", i), ex, onFail) + d.dispatch(&job, nil, nil) + } + + select { + case <-doneCh: + break + case <-timeout: + t.Fatal("timed out") + } +} + +func TestFairshare_nilLoggerDispatcher(t *testing.T) { + d := newDispatcher("test-job-mgr", 1, nil) + if d.logger == nil { + t.Error("logger not set up properly") + } +} diff --git a/helper/flag-kv/flag.go b/helper/flag-kv/flag.go new file mode 100644 index 0000000..a3b04ce --- /dev/null +++ b/helper/flag-kv/flag.go @@ -0,0 +1,32 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package kvFlag + +import ( + "fmt" + "strings" +) + +// Flag is a flag.Value implementation for parsing user variables +// from the command-line in the format of '-var key=value'. +type Flag map[string]string + +func (v *Flag) String() string { + return "" +} + +func (v *Flag) Set(raw string) error { + idx := strings.Index(raw, "=") + if idx == -1 { + return fmt.Errorf("no '=' value in arg: %q", raw) + } + + if *v == nil { + *v = make(map[string]string) + } + + key, value := raw[0:idx], raw[idx+1:] + (*v)[key] = value + return nil +} diff --git a/helper/flag-kv/flag_test.go b/helper/flag-kv/flag_test.go new file mode 100644 index 0000000..b083d52 --- /dev/null +++ b/helper/flag-kv/flag_test.go @@ -0,0 +1,59 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package kvFlag + +import ( + "flag" + "reflect" + "testing" +) + +func TestFlag_impl(t *testing.T) { + var _ flag.Value = new(Flag) +} + +func TestFlag(t *testing.T) { + cases := []struct { + Input string + Output map[string]string + Error bool + }{ + { + "key=value", + map[string]string{"key": "value"}, + false, + }, + + { + "key=", + map[string]string{"key": ""}, + false, + }, + + { + "key=foo=bar", + map[string]string{"key": "foo=bar"}, + false, + }, + + { + "key", + nil, + true, + }, + } + + for _, tc := range cases { + f := new(Flag) + err := f.Set(tc.Input) + if (err != nil) != tc.Error { + t.Fatalf("bad error. Input: %#v", tc.Input) + } + + actual := map[string]string(*f) + if !reflect.DeepEqual(actual, tc.Output) { + t.Fatalf("bad: %#v", actual) + } + } +} diff --git a/helper/flag-slice/flag.go b/helper/flag-slice/flag.go new file mode 100644 index 0000000..b823438 --- /dev/null +++ b/helper/flag-slice/flag.go @@ -0,0 +1,19 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package sliceflag + +import "strings" + +// StringFlag implements the flag.Value interface and allows multiple +// calls to the same variable to append a list. +type StringFlag []string + +func (s *StringFlag) String() string { + return strings.Join(*s, ",") +} + +func (s *StringFlag) Set(value string) error { + *s = append(*s, value) + return nil +} diff --git a/helper/flag-slice/flag_test.go b/helper/flag-slice/flag_test.go new file mode 100644 index 0000000..7973d57 --- /dev/null +++ b/helper/flag-slice/flag_test.go @@ -0,0 +1,36 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package sliceflag + +import ( + "flag" + "reflect" + "testing" +) + +func TestStringFlag_implements(t *testing.T) { + var raw interface{} + raw = new(StringFlag) + if _, ok := raw.(flag.Value); !ok { + t.Fatalf("StringFlag should be a Value") + } +} + +func TestStringFlagSet(t *testing.T) { + sv := new(StringFlag) + err := sv.Set("foo") + if err != nil { + t.Fatalf("err: %s", err) + } + + err = sv.Set("bar") + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := []string{"foo", "bar"} + if !reflect.DeepEqual([]string(*sv), expected) { + t.Fatalf("Bad: %#v", sv) + } +} diff --git a/helper/forwarding/types.pb.go b/helper/forwarding/types.pb.go new file mode 100644 index 0000000..bf579d0 --- /dev/null +++ b/helper/forwarding/types.pb.go @@ -0,0 +1,519 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.12 +// source: helper/forwarding/types.proto + +package forwarding + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Not used right now but reserving in case it turns out that streaming + // makes things more economical on the gRPC side + // uint64 id = 1; + Method string `protobuf:"bytes,2,opt,name=method,proto3" json:"method,omitempty"` + Url *URL `protobuf:"bytes,3,opt,name=url,proto3" json:"url,omitempty"` + HeaderEntries map[string]*HeaderEntry `protobuf:"bytes,4,rep,name=header_entries,json=headerEntries,proto3" json:"header_entries,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Body []byte `protobuf:"bytes,5,opt,name=body,proto3" json:"body,omitempty"` + Host string `protobuf:"bytes,6,opt,name=host,proto3" json:"host,omitempty"` + RemoteAddr string `protobuf:"bytes,7,opt,name=remote_addr,json=remoteAddr,proto3" json:"remote_addr,omitempty"` + PeerCertificates [][]byte `protobuf:"bytes,8,rep,name=peer_certificates,json=peerCertificates,proto3" json:"peer_certificates,omitempty"` +} + +func (x *Request) Reset() { + *x = Request{} + if protoimpl.UnsafeEnabled { + mi := &file_helper_forwarding_types_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Request) ProtoMessage() {} + +func (x *Request) ProtoReflect() protoreflect.Message { + mi := &file_helper_forwarding_types_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Request.ProtoReflect.Descriptor instead. +func (*Request) Descriptor() ([]byte, []int) { + return file_helper_forwarding_types_proto_rawDescGZIP(), []int{0} +} + +func (x *Request) GetMethod() string { + if x != nil { + return x.Method + } + return "" +} + +func (x *Request) GetUrl() *URL { + if x != nil { + return x.Url + } + return nil +} + +func (x *Request) GetHeaderEntries() map[string]*HeaderEntry { + if x != nil { + return x.HeaderEntries + } + return nil +} + +func (x *Request) GetBody() []byte { + if x != nil { + return x.Body + } + return nil +} + +func (x *Request) GetHost() string { + if x != nil { + return x.Host + } + return "" +} + +func (x *Request) GetRemoteAddr() string { + if x != nil { + return x.RemoteAddr + } + return "" +} + +func (x *Request) GetPeerCertificates() [][]byte { + if x != nil { + return x.PeerCertificates + } + return nil +} + +type URL struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Scheme string `protobuf:"bytes,1,opt,name=scheme,proto3" json:"scheme,omitempty"` + Opaque string `protobuf:"bytes,2,opt,name=opaque,proto3" json:"opaque,omitempty"` + // This isn't needed now but might be in the future, so we'll skip the + // number to keep the ordering in net/url + // UserInfo user = 3; + Host string `protobuf:"bytes,4,opt,name=host,proto3" json:"host,omitempty"` + Path string `protobuf:"bytes,5,opt,name=path,proto3" json:"path,omitempty"` + RawPath string `protobuf:"bytes,6,opt,name=raw_path,json=rawPath,proto3" json:"raw_path,omitempty"` + // This also isn't needed right now, but we'll reserve the number + // bool force_query = 7; + RawQuery string `protobuf:"bytes,8,opt,name=raw_query,json=rawQuery,proto3" json:"raw_query,omitempty"` + Fragment string `protobuf:"bytes,9,opt,name=fragment,proto3" json:"fragment,omitempty"` +} + +func (x *URL) Reset() { + *x = URL{} + if protoimpl.UnsafeEnabled { + mi := &file_helper_forwarding_types_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *URL) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*URL) ProtoMessage() {} + +func (x *URL) ProtoReflect() protoreflect.Message { + mi := &file_helper_forwarding_types_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use URL.ProtoReflect.Descriptor instead. +func (*URL) Descriptor() ([]byte, []int) { + return file_helper_forwarding_types_proto_rawDescGZIP(), []int{1} +} + +func (x *URL) GetScheme() string { + if x != nil { + return x.Scheme + } + return "" +} + +func (x *URL) GetOpaque() string { + if x != nil { + return x.Opaque + } + return "" +} + +func (x *URL) GetHost() string { + if x != nil { + return x.Host + } + return "" +} + +func (x *URL) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *URL) GetRawPath() string { + if x != nil { + return x.RawPath + } + return "" +} + +func (x *URL) GetRawQuery() string { + if x != nil { + return x.RawQuery + } + return "" +} + +func (x *URL) GetFragment() string { + if x != nil { + return x.Fragment + } + return "" +} + +type HeaderEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Values []string `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` +} + +func (x *HeaderEntry) Reset() { + *x = HeaderEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_helper_forwarding_types_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HeaderEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeaderEntry) ProtoMessage() {} + +func (x *HeaderEntry) ProtoReflect() protoreflect.Message { + mi := &file_helper_forwarding_types_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeaderEntry.ProtoReflect.Descriptor instead. +func (*HeaderEntry) Descriptor() ([]byte, []int) { + return file_helper_forwarding_types_proto_rawDescGZIP(), []int{2} +} + +func (x *HeaderEntry) GetValues() []string { + if x != nil { + return x.Values + } + return nil +} + +type Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Not used right now but reserving in case it turns out that streaming + // makes things more economical on the gRPC side + // uint64 id = 1; + StatusCode uint32 `protobuf:"varint,2,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"` + Body []byte `protobuf:"bytes,3,opt,name=body,proto3" json:"body,omitempty"` + // Added in 0.6.2 to ensure that the content-type is set appropriately, as + // well as any other information + HeaderEntries map[string]*HeaderEntry `protobuf:"bytes,4,rep,name=header_entries,json=headerEntries,proto3" json:"header_entries,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + LastRemoteWal uint64 `protobuf:"varint,5,opt,name=last_remote_wal,json=lastRemoteWal,proto3" json:"last_remote_wal,omitempty"` +} + +func (x *Response) Reset() { + *x = Response{} + if protoimpl.UnsafeEnabled { + mi := &file_helper_forwarding_types_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Response) ProtoMessage() {} + +func (x *Response) ProtoReflect() protoreflect.Message { + mi := &file_helper_forwarding_types_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Response.ProtoReflect.Descriptor instead. +func (*Response) Descriptor() ([]byte, []int) { + return file_helper_forwarding_types_proto_rawDescGZIP(), []int{3} +} + +func (x *Response) GetStatusCode() uint32 { + if x != nil { + return x.StatusCode + } + return 0 +} + +func (x *Response) GetBody() []byte { + if x != nil { + return x.Body + } + return nil +} + +func (x *Response) GetHeaderEntries() map[string]*HeaderEntry { + if x != nil { + return x.HeaderEntries + } + return nil +} + +func (x *Response) GetLastRemoteWal() uint64 { + if x != nil { + return x.LastRemoteWal + } + return 0 +} + +var File_helper_forwarding_types_proto protoreflect.FileDescriptor + +var file_helper_forwarding_types_proto_rawDesc = []byte{ + 0x0a, 0x1d, 0x68, 0x65, 0x6c, 0x70, 0x65, 0x72, 0x2f, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, + 0x69, 0x6e, 0x67, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x0a, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x22, 0xe4, 0x02, 0x0a, 0x07, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, + 0x21, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, + 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x2e, 0x55, 0x52, 0x4c, 0x52, 0x03, 0x75, + 0x72, 0x6c, 0x12, 0x4d, 0x0a, 0x0e, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x74, + 0x72, 0x69, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x66, 0x6f, 0x72, + 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x0d, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, + 0x73, 0x12, 0x12, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x65, + 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, + 0x08, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x10, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x1a, 0x59, 0x0a, 0x12, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x2e, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x22, 0xb1, 0x01, 0x0a, 0x03, 0x55, 0x52, 0x4c, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, + 0x68, 0x65, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, + 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x70, 0x61, 0x71, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x6f, 0x70, 0x61, 0x71, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, + 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x12, + 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x12, 0x19, 0x0a, 0x08, 0x72, 0x61, 0x77, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x72, 0x61, 0x77, 0x50, 0x61, 0x74, 0x68, 0x12, 0x1b, 0x0a, + 0x09, 0x72, 0x61, 0x77, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x72, 0x61, 0x77, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x66, 0x72, + 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x72, + 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x22, 0x25, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x92, 0x02, + 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x62, + 0x6f, 0x64, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, + 0x4e, 0x0a, 0x0e, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, + 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, + 0x64, 0x69, 0x6e, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x0d, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, + 0x26, 0x0a, 0x0f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x77, + 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6c, 0x61, 0x73, 0x74, 0x52, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x57, 0x61, 0x6c, 0x1a, 0x59, 0x0a, 0x12, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x2e, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x42, 0x2e, 0x5a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, + 0x2f, 0x68, 0x65, 0x6c, 0x70, 0x65, 0x72, 0x2f, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x69, + 0x6e, 0x67, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_helper_forwarding_types_proto_rawDescOnce sync.Once + file_helper_forwarding_types_proto_rawDescData = file_helper_forwarding_types_proto_rawDesc +) + +func file_helper_forwarding_types_proto_rawDescGZIP() []byte { + file_helper_forwarding_types_proto_rawDescOnce.Do(func() { + file_helper_forwarding_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_helper_forwarding_types_proto_rawDescData) + }) + return file_helper_forwarding_types_proto_rawDescData +} + +var file_helper_forwarding_types_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_helper_forwarding_types_proto_goTypes = []interface{}{ + (*Request)(nil), // 0: forwarding.Request + (*URL)(nil), // 1: forwarding.URL + (*HeaderEntry)(nil), // 2: forwarding.HeaderEntry + (*Response)(nil), // 3: forwarding.Response + nil, // 4: forwarding.Request.HeaderEntriesEntry + nil, // 5: forwarding.Response.HeaderEntriesEntry +} +var file_helper_forwarding_types_proto_depIdxs = []int32{ + 1, // 0: forwarding.Request.url:type_name -> forwarding.URL + 4, // 1: forwarding.Request.header_entries:type_name -> forwarding.Request.HeaderEntriesEntry + 5, // 2: forwarding.Response.header_entries:type_name -> forwarding.Response.HeaderEntriesEntry + 2, // 3: forwarding.Request.HeaderEntriesEntry.value:type_name -> forwarding.HeaderEntry + 2, // 4: forwarding.Response.HeaderEntriesEntry.value:type_name -> forwarding.HeaderEntry + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_helper_forwarding_types_proto_init() } +func file_helper_forwarding_types_proto_init() { + if File_helper_forwarding_types_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_helper_forwarding_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_helper_forwarding_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*URL); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_helper_forwarding_types_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeaderEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_helper_forwarding_types_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_helper_forwarding_types_proto_rawDesc, + NumEnums: 0, + NumMessages: 6, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_helper_forwarding_types_proto_goTypes, + DependencyIndexes: file_helper_forwarding_types_proto_depIdxs, + MessageInfos: file_helper_forwarding_types_proto_msgTypes, + }.Build() + File_helper_forwarding_types_proto = out.File + file_helper_forwarding_types_proto_rawDesc = nil + file_helper_forwarding_types_proto_goTypes = nil + file_helper_forwarding_types_proto_depIdxs = nil +} diff --git a/helper/forwarding/types.proto b/helper/forwarding/types.proto new file mode 100644 index 0000000..7624257 --- /dev/null +++ b/helper/forwarding/types.proto @@ -0,0 +1,52 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +syntax = "proto3"; + +option go_package = "github.com/hashicorp/vault/helper/forwarding"; + +package forwarding; + +message Request { + // Not used right now but reserving in case it turns out that streaming + // makes things more economical on the gRPC side + //uint64 id = 1; + string method = 2; + URL url = 3; + map header_entries = 4; + bytes body = 5; + string host = 6; + string remote_addr = 7; + repeated bytes peer_certificates = 8; +} + +message URL { + string scheme = 1; + string opaque = 2; + // This isn't needed now but might be in the future, so we'll skip the + // number to keep the ordering in net/url + //UserInfo user = 3; + string host = 4; + string path = 5; + string raw_path = 6; + // This also isn't needed right now, but we'll reserve the number + //bool force_query = 7; + string raw_query = 8; + string fragment = 9; +} + +message HeaderEntry { + repeated string values = 1; +} + +message Response { + // Not used right now but reserving in case it turns out that streaming + // makes things more economical on the gRPC side + //uint64 id = 1; + uint32 status_code = 2; + bytes body = 3; + // Added in 0.6.2 to ensure that the content-type is set appropriately, as + // well as any other information + map header_entries = 4; + uint64 last_remote_wal = 5; +} diff --git a/helper/forwarding/util.go b/helper/forwarding/util.go new file mode 100644 index 0000000..0df733f --- /dev/null +++ b/helper/forwarding/util.go @@ -0,0 +1,207 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package forwarding + +import ( + "bytes" + "crypto/tls" + "crypto/x509" + "io" + "net/http" + "net/url" + "os" + + "github.com/golang/protobuf/proto" + "github.com/hashicorp/vault/sdk/helper/compressutil" + "github.com/hashicorp/vault/sdk/helper/jsonutil" +) + +type bufCloser struct { + *bytes.Buffer +} + +func (b bufCloser) Close() error { + b.Reset() + return nil +} + +// GenerateForwardedRequest generates a new http.Request that contains the +// original requests's information in the new request's body. +func GenerateForwardedHTTPRequest(req *http.Request, addr string) (*http.Request, error) { + fq, err := GenerateForwardedRequest(req) + if err != nil { + return nil, err + } + + var newBody []byte + switch os.Getenv("VAULT_MESSAGE_TYPE") { + case "json": + newBody, err = jsonutil.EncodeJSON(fq) + case "json_compress": + newBody, err = jsonutil.EncodeJSONAndCompress(fq, &compressutil.CompressionConfig{ + Type: compressutil.CompressionTypeLZW, + }) + case "proto3": + fallthrough + default: + newBody, err = proto.Marshal(fq) + } + if err != nil { + return nil, err + } + + ret, err := http.NewRequest("POST", addr, bytes.NewBuffer(newBody)) + if err != nil { + return nil, err + } + + return ret, nil +} + +func GenerateForwardedRequest(req *http.Request) (*Request, error) { + var reader io.Reader = req.Body + body, err := io.ReadAll(reader) + if err != nil { + return nil, err + } + + fq := Request{ + Method: req.Method, + HeaderEntries: make(map[string]*HeaderEntry, len(req.Header)), + Host: req.Host, + RemoteAddr: req.RemoteAddr, + Body: body, + } + + reqURL := req.URL + fq.Url = &URL{ + Scheme: reqURL.Scheme, + Opaque: reqURL.Opaque, + Host: reqURL.Host, + Path: reqURL.Path, + RawPath: reqURL.RawPath, + RawQuery: reqURL.RawQuery, + Fragment: reqURL.Fragment, + } + + for k, v := range req.Header { + fq.HeaderEntries[k] = &HeaderEntry{ + Values: v, + } + } + + if req.TLS != nil && req.TLS.PeerCertificates != nil && len(req.TLS.PeerCertificates) > 0 { + fq.PeerCertificates = make([][]byte, len(req.TLS.PeerCertificates)) + for i, cert := range req.TLS.PeerCertificates { + fq.PeerCertificates[i] = cert.Raw + } + } + + return &fq, nil +} + +// ParseForwardedRequest generates a new http.Request that is comprised of the +// values in the given request's body, assuming it correctly parses into a +// ForwardedRequest. +func ParseForwardedHTTPRequest(req *http.Request) (*http.Request, error) { + buf := bytes.NewBuffer(nil) + _, err := buf.ReadFrom(req.Body) + if err != nil { + return nil, err + } + + fq := new(Request) + switch os.Getenv("VAULT_MESSAGE_TYPE") { + case "json", "json_compress": + err = jsonutil.DecodeJSON(buf.Bytes(), fq) + default: + err = proto.Unmarshal(buf.Bytes(), fq) + } + if err != nil { + return nil, err + } + + return ParseForwardedRequest(fq) +} + +func ParseForwardedRequest(fq *Request) (*http.Request, error) { + buf := bufCloser{ + Buffer: bytes.NewBuffer(fq.Body), + } + + ret := &http.Request{ + Method: fq.Method, + Header: make(map[string][]string, len(fq.HeaderEntries)), + Body: buf, + Host: fq.Host, + RemoteAddr: fq.RemoteAddr, + } + + ret.URL = &url.URL{ + Scheme: fq.Url.Scheme, + Opaque: fq.Url.Opaque, + Host: fq.Url.Host, + Path: fq.Url.Path, + RawPath: fq.Url.RawPath, + RawQuery: fq.Url.RawQuery, + Fragment: fq.Url.Fragment, + } + + for k, v := range fq.HeaderEntries { + ret.Header[k] = v.Values + } + + if fq.PeerCertificates != nil && len(fq.PeerCertificates) > 0 { + ret.TLS = &tls.ConnectionState{ + PeerCertificates: make([]*x509.Certificate, len(fq.PeerCertificates)), + } + for i, certBytes := range fq.PeerCertificates { + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + ret.TLS.PeerCertificates[i] = cert + } + } + + return ret, nil +} + +type RPCResponseWriter struct { + statusCode int + header http.Header + body *bytes.Buffer +} + +// NewRPCResponseWriter returns an initialized RPCResponseWriter +func NewRPCResponseWriter() *RPCResponseWriter { + w := &RPCResponseWriter{ + header: make(http.Header), + body: new(bytes.Buffer), + statusCode: 200, + } + // w.header.Set("Content-Type", "application/octet-stream") + return w +} + +func (w *RPCResponseWriter) Header() http.Header { + return w.header +} + +func (w *RPCResponseWriter) Write(buf []byte) (int, error) { + w.body.Write(buf) + return len(buf), nil +} + +func (w *RPCResponseWriter) WriteHeader(code int) { + w.statusCode = code +} + +func (w *RPCResponseWriter) StatusCode() int { + return w.statusCode +} + +func (w *RPCResponseWriter) Body() *bytes.Buffer { + return w.body +} diff --git a/helper/forwarding/util_test.go b/helper/forwarding/util_test.go new file mode 100644 index 0000000..192646a --- /dev/null +++ b/helper/forwarding/util_test.go @@ -0,0 +1,129 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package forwarding + +import ( + "bufio" + "bytes" + "net/http" + "os" + "reflect" + "testing" +) + +func Test_ForwardedRequest_GenerateParse(t *testing.T) { + testForwardedRequestGenerateParse(t) +} + +func Benchmark_ForwardedRequest_GenerateParse_JSON(b *testing.B) { + os.Setenv("VAULT_MESSAGE_TYPE", "json") + var totalSize int64 + var numRuns int64 + for i := 0; i < b.N; i++ { + totalSize += testForwardedRequestGenerateParse(b) + numRuns++ + } + b.Logf("message size per op: %d", totalSize/numRuns) +} + +func Benchmark_ForwardedRequest_GenerateParse_JSON_Compressed(b *testing.B) { + os.Setenv("VAULT_MESSAGE_TYPE", "json_compress") + var totalSize int64 + var numRuns int64 + for i := 0; i < b.N; i++ { + totalSize += testForwardedRequestGenerateParse(b) + numRuns++ + } + b.Logf("message size per op: %d", totalSize/numRuns) +} + +func Benchmark_ForwardedRequest_GenerateParse_Proto3(b *testing.B) { + os.Setenv("VAULT_MESSAGE_TYPE", "proto3") + var totalSize int64 + var numRuns int64 + for i := 0; i < b.N; i++ { + totalSize += testForwardedRequestGenerateParse(b) + numRuns++ + } + b.Logf("message size per op: %d", totalSize/numRuns) +} + +func testForwardedRequestGenerateParse(t testing.TB) int64 { + bodBuf := bytes.NewReader([]byte(`{ "foo": "bar", "zip": { "argle": "bargle", neet: 0 } }`)) + req, err := http.NewRequest("FOOBAR", "https://pushit.real.good:9281/snicketysnack?furbleburble=bloopetybloop", bodBuf) + if err != nil { + t.Fatal(err) + } + + // We want to get the fields we would expect from an incoming request, so + // we write it out and then read it again + buf1 := bytes.NewBuffer(nil) + err = req.Write(buf1) + if err != nil { + t.Fatal(err) + } + + // Read it back in, parsing like a server + bufr1 := bufio.NewReader(buf1) + initialReq, err := http.ReadRequest(bufr1) + if err != nil { + t.Fatal(err) + } + + // Generate the request with the forwarded request in the body + req, err = GenerateForwardedHTTPRequest(initialReq, "https://bloopety.bloop:8201") + if err != nil { + t.Fatal(err) + } + + // Perform another "round trip" + buf2 := bytes.NewBuffer(nil) + err = req.Write(buf2) + if err != nil { + t.Fatal(err) + } + size := int64(buf2.Len()) + bufr2 := bufio.NewReader(buf2) + intreq, err := http.ReadRequest(bufr2) + if err != nil { + t.Fatal(err) + } + + // Now extract the forwarded request to generate a final request for processing + finalReq, err := ParseForwardedHTTPRequest(intreq) + if err != nil { + t.Fatal(err) + } + + switch { + case initialReq.Method != finalReq.Method: + t.Fatalf("bad method:\ninitialReq:\n%#v\nfinalReq:\n%#v\n", *initialReq, *finalReq) + case initialReq.RemoteAddr != finalReq.RemoteAddr: + t.Fatalf("bad remoteaddr:\ninitialReq:\n%#v\nfinalReq:\n%#v\n", *initialReq, *finalReq) + case initialReq.Host != finalReq.Host: + t.Fatalf("bad host:\ninitialReq:\n%#v\nfinalReq:\n%#v\n", *initialReq, *finalReq) + case !reflect.DeepEqual(initialReq.URL, finalReq.URL): + t.Fatalf("bad url:\ninitialReq:\n%#v\nfinalReq:\n%#v\n", *initialReq.URL, *finalReq.URL) + case !reflect.DeepEqual(initialReq.Header, finalReq.Header): + t.Fatalf("bad header:\ninitialReq:\n%#v\nfinalReq:\n%#v\n", *initialReq, *finalReq) + default: + // Compare bodies + bodBuf.Seek(0, 0) + initBuf := bytes.NewBuffer(nil) + _, err = initBuf.ReadFrom(bodBuf) + if err != nil { + t.Fatal(err) + } + finBuf := bytes.NewBuffer(nil) + _, err = finBuf.ReadFrom(finalReq.Body) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(initBuf.Bytes(), finBuf.Bytes()) { + t.Fatalf("badbody :\ninitialReq:\n%#v\nfinalReq:\n%#v\n", initBuf.Bytes(), finBuf.Bytes()) + } + } + + return size +} diff --git a/helper/hostutil/hostinfo.go b/helper/hostutil/hostinfo.go new file mode 100644 index 0000000..25c11e0 --- /dev/null +++ b/helper/hostutil/hostinfo.go @@ -0,0 +1,161 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !openbsd + +package hostutil + +import ( + "context" + "fmt" + "time" + + "github.com/hashicorp/go-multierror" + "github.com/shirou/gopsutil/v3/cpu" + "github.com/shirou/gopsutil/v3/disk" + "github.com/shirou/gopsutil/v3/host" + "github.com/shirou/gopsutil/v3/mem" +) + +// HostInfo holds all the information that gets captured on the host. The +// set of information captured depends on the host operating system. For more +// information, refer to: https://github.com/shirou/gopsutil#current-status +type HostInfo struct { + // Timestamp returns the timestamp in UTC on the collection time. + Timestamp time.Time `json:"timestamp"` + // CPU returns information about the CPU such as family, model, cores, etc. + CPU []cpu.InfoStat `json:"cpu"` + // CPUTimes returns statistics on CPU usage represented in Jiffies. + CPUTimes []cpu.TimesStat `json:"cpu_times"` + // Disk returns statitics on disk usage for all accessible partitions. + Disk []*disk.UsageStat `json:"disk"` + // Host returns general host information such as hostname, platform, uptime, + // kernel version, etc. + Host *HostInfoStat `json:"host"` + // Memory contains statistics about the memory such as total, available, and + // used memory in number of bytes. + Memory *VirtualMemoryStat `json:"memory"` +} + +// CollectHostInfo returns information on the host, which includes general +// host status, CPU, memory, and disk utilization. +// +// The function does a best-effort capture on the most information possible, +// continuing on capture errors encountered and appending them to a resulting +// multierror.Error that gets returned at the end. +func CollectHostInfo(ctx context.Context) (*HostInfo, error) { + var retErr *multierror.Error + info := &HostInfo{Timestamp: time.Now().UTC()} + + if h, err := CollectHostInfoStat(ctx); err != nil { + retErr = multierror.Append(retErr, &HostInfoError{"host", err}) + } else { + info.Host = h + } + + if v, err := CollectHostMemory(ctx); err != nil { + retErr = multierror.Append(retErr, &HostInfoError{"memory", err}) + } else { + info.Memory = v + } + + parts, err := disk.PartitionsWithContext(ctx, false) + if err != nil { + retErr = multierror.Append(retErr, &HostInfoError{"disk", err}) + } else { + var usage []*disk.UsageStat + for i, part := range parts { + u, err := disk.UsageWithContext(ctx, part.Mountpoint) + if err != nil { + retErr = multierror.Append(retErr, &HostInfoError{fmt.Sprintf("disk.%d", i), err}) + continue + } + usage = append(usage, u) + + } + info.Disk = usage + } + + if c, err := cpu.InfoWithContext(ctx); err != nil { + retErr = multierror.Append(retErr, &HostInfoError{"cpu", err}) + } else { + info.CPU = c + } + + t, err := cpu.TimesWithContext(ctx, true) + if err != nil { + retErr = multierror.Append(retErr, &HostInfoError{"cpu_times", err}) + } else { + info.CPUTimes = t + } + + return info, retErr.ErrorOrNil() +} + +func CollectHostMemory(ctx context.Context) (*VirtualMemoryStat, error) { + m, err := mem.VirtualMemoryWithContext(ctx) + if err != nil { + return nil, err + } + + return &VirtualMemoryStat{ + Total: m.Total, + Available: m.Available, + Used: m.Used, + UsedPercent: m.UsedPercent, + Free: m.Free, + Active: m.Active, + Inactive: m.Inactive, + Wired: m.Wired, + Laundry: m.Laundry, + Buffers: m.Buffers, + Cached: m.Cached, + Writeback: m.WriteBack, + Dirty: m.Dirty, + WritebackTmp: m.WriteBackTmp, + Shared: m.Shared, + Slab: m.Slab, + SReclaimable: m.Sreclaimable, + SUnreclaim: m.Sunreclaim, + PageTables: m.PageTables, + SwapCached: m.SwapCached, + CommitLimit: m.CommitLimit, + CommittedAS: m.CommittedAS, + HighTotal: m.HighTotal, + HighFree: m.HighFree, + LowTotal: m.LowTotal, + LowFree: m.LowFree, + SwapTotal: m.SwapTotal, + SwapFree: m.SwapFree, + Mapped: m.Mapped, + VMallocTotal: m.VmallocTotal, + VMallocUsed: m.VmallocUsed, + VMallocChunk: m.VmallocChunk, + HugePagesTotal: m.HugePagesTotal, + HugePagesFree: m.HugePagesFree, + HugePageSize: m.HugePageSize, + }, nil +} + +func CollectHostInfoStat(ctx context.Context) (*HostInfoStat, error) { + h, err := host.InfoWithContext(ctx) + if err != nil { + return nil, err + } + + return &HostInfoStat{ + Hostname: h.Hostname, + Uptime: h.Uptime, + BootTime: h.BootTime, + Procs: h.Procs, + OS: h.OS, + Platform: h.Platform, + PlatformFamily: h.PlatformFamily, + PlatformVersion: h.PlatformVersion, + KernelVersion: h.KernelVersion, + KernelArch: h.KernelArch, + VirtualizationSystem: h.VirtualizationSystem, + VirtualizationRole: h.VirtualizationRole, + HostID: h.HostID, + }, nil +} diff --git a/helper/hostutil/hostinfo_error.go b/helper/hostutil/hostinfo_error.go new file mode 100644 index 0000000..afbec28 --- /dev/null +++ b/helper/hostutil/hostinfo_error.go @@ -0,0 +1,20 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package hostutil + +import "fmt" + +// HostInfoError is a typed error for more convenient error checking. +type HostInfoError struct { + Type string + Err error +} + +func (e *HostInfoError) WrappedErrors() []error { + return []error{e.Err} +} + +func (e *HostInfoError) Error() string { + return fmt.Sprintf("%s: %s", e.Type, e.Err.Error()) +} diff --git a/helper/hostutil/hostinfo_openbsd.go b/helper/hostutil/hostinfo_openbsd.go new file mode 100644 index 0000000..dbe1655 --- /dev/null +++ b/helper/hostutil/hostinfo_openbsd.go @@ -0,0 +1,29 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build openbsd + +package hostutil + +import ( + "context" + "fmt" + "time" +) + +type HostInfo struct { + Timestamp time.Time `json:"timestamp"` + CPU []interface{} `json:"cpu"` + CPUTimes []interface{} `json:"cpu_times"` + Disk []interface{} `json:"disk"` + Host interface{} `json:"host"` + Memory interface{} `json:"memory"` +} + +func CollectHostInfo(ctx context.Context) (*HostInfo, error) { + return nil, fmt.Errorf("host info not supported on this platform") +} + +func CollectHostMemory(ctx context.Context) (*VirtualMemoryStat, error) { + return nil, fmt.Errorf("host info not supported on this platform") +} diff --git a/helper/hostutil/hostinfo_test.go b/helper/hostutil/hostinfo_test.go new file mode 100644 index 0000000..0f53744 --- /dev/null +++ b/helper/hostutil/hostinfo_test.go @@ -0,0 +1,63 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package hostutil + +import ( + "context" + "strings" + "testing" + + "github.com/hashicorp/errwrap" +) + +func TestCollectHostInfo(t *testing.T) { + info, err := CollectHostInfo(context.Background()) + if err != nil && !errwrap.ContainsType(err, new(HostInfoError)) { + t.Fatal(err) + } + + // Get all the possible HostInfoError errors and check for the resulting + // stat if the package is able to fetch it for the platform we're testing + // on. + errs := errwrap.GetAllType(err, new(HostInfoError)) + + if info.Timestamp.IsZero() { + t.Fatal("expected non-zero Timestamp") + } + if !checkErrTypeExists(errs, "cpu") && info.CPU == nil { + t.Fatal("expected non-nil CPU value") + } + if !checkErrTypeExists(errs, "cpu_times") && info.CPUTimes == nil { + t.Fatal("expected non-nil CPUTimes value") + } + if !checkErrTypeExists(errs, "disk") && info.Disk == nil { + t.Fatal("expected non-nil Disk value") + } + if !checkErrTypeExists(errs, "host") && info.Host == nil { + t.Fatal("expected non-nil Host value") + } + if !checkErrTypeExists(errs, "memory") && info.Memory == nil { + t.Fatal("expected non-nil Memory value") + } +} + +// checkErrTypeExists is a helper that checks whether an particular +// HostInfoError.Type exists within a set of errors. +func checkErrTypeExists(errs []error, errType string) bool { + for _, e := range errs { + err, ok := e.(*HostInfoError) + if !ok { + return false + } + + // This is mainly for disk error since the type string can contain an + // index for the disk. + parts := strings.SplitN(err.Type, ".", 2) + + if parts[0] == errType { + return true + } + } + return false +} diff --git a/helper/hostutil/hostinfo_util.go b/helper/hostutil/hostinfo_util.go new file mode 100644 index 0000000..8811746 --- /dev/null +++ b/helper/hostutil/hostinfo_util.go @@ -0,0 +1,124 @@ +// Copyright (c) 2014, WAKAYAMA Shirou +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of the gopsutil authors nor the names of its contributors +// may be used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Copied from https://github.com/shirou/gopsutil/blob/b49f37e9f30f49530cf2ad6038a4dac1b746c8f7/mem/mem.go#L15 +// Copied from https://github.com/shirou/gopsutil/blob/b49f37e9f30f49530cf2ad6038a4dac1b746c8f7/host/host.go#L17 + +package hostutil + +// VirtualMemoryStat holds commonly used memory measurements. We must have a +// local type here in order to avoid building the gopsutil library on certain +// arch types. +// +// This struct is copied to maintain backwards compatibility in the Vault host-info API. +// This is done because gopsutil changed JSON struct tags between its v2 and v3 releases. +// For details see https://github.com/shirou/gopsutil/tree/master/_tools/v3migration. +type VirtualMemoryStat struct { + // Total amount of RAM on this system + Total uint64 `json:"total"` + + // RAM available for programs to allocate + // + // This value is computed from the kernel specific values. + Available uint64 `json:"available"` + + // RAM used by programs + // + // This value is computed from the kernel specific values. + Used uint64 `json:"used"` + + // Percentage of RAM used by programs + // + // This value is computed from the kernel specific values. + UsedPercent float64 `json:"usedPercent"` + + // This is the kernel's notion of free memory; RAM chips whose bits nobody + // cares about the value of right now. For a human consumable number, + // Available is what you really want. + Free uint64 `json:"free"` + + // OS X / BSD specific numbers: + // http://www.macyourself.com/2010/02/17/what-is-free-wired-active-and-inactive-system-memory-ram/ + Active uint64 `json:"active"` + Inactive uint64 `json:"inactive"` + Wired uint64 `json:"wired"` + + // FreeBSD specific numbers: + // https://reviews.freebsd.org/D8467 + Laundry uint64 `json:"laundry"` + + // Linux specific numbers + // https://www.centos.org/docs/5/html/5.1/Deployment_Guide/s2-proc-meminfo.html + // https://www.kernel.org/doc/Documentation/filesystems/proc.txt + // https://www.kernel.org/doc/Documentation/vm/overcommit-accounting + Buffers uint64 `json:"buffers"` + Cached uint64 `json:"cached"` + Writeback uint64 `json:"writeback"` + Dirty uint64 `json:"dirty"` + WritebackTmp uint64 `json:"writebacktmp"` + Shared uint64 `json:"shared"` + Slab uint64 `json:"slab"` + SReclaimable uint64 `json:"sreclaimable"` + SUnreclaim uint64 `json:"sunreclaim"` + PageTables uint64 `json:"pagetables"` + SwapCached uint64 `json:"swapcached"` + CommitLimit uint64 `json:"commitlimit"` + CommittedAS uint64 `json:"committedas"` + HighTotal uint64 `json:"hightotal"` + HighFree uint64 `json:"highfree"` + LowTotal uint64 `json:"lowtotal"` + LowFree uint64 `json:"lowfree"` + SwapTotal uint64 `json:"swaptotal"` + SwapFree uint64 `json:"swapfree"` + Mapped uint64 `json:"mapped"` + VMallocTotal uint64 `json:"vmalloctotal"` + VMallocUsed uint64 `json:"vmallocused"` + VMallocChunk uint64 `json:"vmallocchunk"` + HugePagesTotal uint64 `json:"hugepagestotal"` + HugePagesFree uint64 `json:"hugepagesfree"` + HugePageSize uint64 `json:"hugepagesize"` +} + +// HostInfoStat describes the host status. +// +// This struct is copied to maintain backwards compatibility in the Vault host-info API. +// This is done because gopsutil changed JSON struct tags between its v2 and v3 releases. +// For details see https://github.com/shirou/gopsutil/tree/master/_tools/v3migration. +type HostInfoStat struct { + Hostname string `json:"hostname"` + Uptime uint64 `json:"uptime"` + BootTime uint64 `json:"bootTime"` + Procs uint64 `json:"procs"` + OS string `json:"os"` + Platform string `json:"platform"` + PlatformFamily string `json:"platformFamily"` + PlatformVersion string `json:"platformVersion"` + KernelVersion string `json:"kernelVersion"` + KernelArch string `json:"kernelArch"` + VirtualizationSystem string `json:"virtualizationSystem"` + VirtualizationRole string `json:"virtualizationRole"` + HostID string `json:"hostid"` +} diff --git a/helper/identity/identity.go b/helper/identity/identity.go new file mode 100644 index 0000000..a7769f0 --- /dev/null +++ b/helper/identity/identity.go @@ -0,0 +1,159 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package identity + +import ( + "fmt" + + proto "github.com/golang/protobuf/proto" + "github.com/hashicorp/vault/sdk/logical" +) + +func (g *Group) Clone() (*Group, error) { + if g == nil { + return nil, fmt.Errorf("nil group") + } + + marshaledGroup, err := proto.Marshal(g) + if err != nil { + return nil, fmt.Errorf("failed to marshal group: %w", err) + } + + var clonedGroup Group + err = proto.Unmarshal(marshaledGroup, &clonedGroup) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal group: %w", err) + } + + return &clonedGroup, nil +} + +func (e *Entity) Clone() (*Entity, error) { + if e == nil { + return nil, fmt.Errorf("nil entity") + } + + marshaledEntity, err := proto.Marshal(e) + if err != nil { + return nil, fmt.Errorf("failed to marshal entity: %w", err) + } + + var clonedEntity Entity + err = proto.Unmarshal(marshaledEntity, &clonedEntity) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal entity: %w", err) + } + + return &clonedEntity, nil +} + +func (e *Entity) UpsertAlias(alias *Alias) { + for i, item := range e.Aliases { + if item.ID == alias.ID { + e.Aliases[i] = alias + return + } + } + e.Aliases = append(e.Aliases, alias) +} + +func (p *Alias) Clone() (*Alias, error) { + if p == nil { + return nil, fmt.Errorf("nil alias") + } + + marshaledAlias, err := proto.Marshal(p) + if err != nil { + return nil, fmt.Errorf("failed to marshal alias: %w", err) + } + + var clonedAlias Alias + err = proto.Unmarshal(marshaledAlias, &clonedAlias) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal alias: %w", err) + } + + return &clonedAlias, nil +} + +// ToSDKAlias converts the provided alias to an SDK compatible alias. +func ToSDKAlias(a *Alias) *logical.Alias { + if a == nil { + return nil + } + metadata := make(map[string]string, len(a.Metadata)) + for k, v := range a.Metadata { + metadata[k] = v + } + + return &logical.Alias{ + Name: a.Name, + ID: a.ID, + MountAccessor: a.MountAccessor, + MountType: a.MountType, + Metadata: metadata, + NamespaceID: a.NamespaceID, + CustomMetadata: a.CustomMetadata, + } +} + +// ToSDKEntity converts the provided entity to an SDK compatible entity. +func ToSDKEntity(e *Entity) *logical.Entity { + if e == nil { + return nil + } + + aliases := make([]*logical.Alias, len(e.Aliases)) + + for i, a := range e.Aliases { + aliases[i] = ToSDKAlias(a) + } + + metadata := make(map[string]string, len(e.Metadata)) + for k, v := range e.Metadata { + metadata[k] = v + } + + return &logical.Entity{ + ID: e.ID, + Name: e.Name, + Disabled: e.Disabled, + Aliases: aliases, + Metadata: metadata, + NamespaceID: e.NamespaceID, + } +} + +// ToSDKGroup converts the provided group to an SDK compatible group. +func ToSDKGroup(g *Group) *logical.Group { + if g == nil { + return nil + } + + metadata := make(map[string]string, len(g.Metadata)) + for k, v := range g.Metadata { + metadata[k] = v + } + + return &logical.Group{ + ID: g.ID, + Name: g.Name, + Metadata: metadata, + NamespaceID: g.NamespaceID, + } +} + +// ToSDKGroups converts the provided group list to an SDK compatible group list. +func ToSDKGroups(groups []*Group) []*logical.Group { + if groups == nil { + return nil + } + + ret := make([]*logical.Group, len(groups)) + + for i, g := range groups { + ret[i] = ToSDKGroup(g) + } + return ret +} diff --git a/helper/identity/mfa/mfa.go b/helper/identity/mfa/mfa.go new file mode 100644 index 0000000..1f8af4f --- /dev/null +++ b/helper/identity/mfa/mfa.go @@ -0,0 +1,48 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package mfa + +import ( + "fmt" + + "github.com/golang/protobuf/proto" +) + +func (c *Config) Clone() (*Config, error) { + if c == nil { + return nil, fmt.Errorf("nil config") + } + + marshaledConfig, err := proto.Marshal(c) + if err != nil { + return nil, fmt.Errorf("failed to marshal config: %w", err) + } + + var clonedConfig Config + err = proto.Unmarshal(marshaledConfig, &clonedConfig) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal config: %w", err) + } + + return &clonedConfig, nil +} + +func (c *MFAEnforcementConfig) Clone() (*MFAEnforcementConfig, error) { + if c == nil { + return nil, fmt.Errorf("nil config") + } + + marshaledConfig, err := proto.Marshal(c) + if err != nil { + return nil, fmt.Errorf("failed to marshal config: %w", err) + } + + var clonedConfig MFAEnforcementConfig + err = proto.Unmarshal(marshaledConfig, &clonedConfig) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal config: %w", err) + } + + return &clonedConfig, nil +} diff --git a/helper/identity/mfa/sentinel.go b/helper/identity/mfa/sentinel.go new file mode 100644 index 0000000..a587aa7 --- /dev/null +++ b/helper/identity/mfa/sentinel.go @@ -0,0 +1,28 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package mfa + +func (c *Config) SentinelGet(key string) (interface{}, error) { + if c == nil { + return nil, nil + } + switch key { + case "type": + return c.Type, nil + case "name": + return c.Name, nil + case "mount_accessor": + return c.MountAccessor, nil + } + + return nil, nil +} + +func (c *Config) SentinelKeys() []string { + return []string{ + "type", + "name", + "mount_accessor", + } +} diff --git a/helper/identity/mfa/types.pb.go b/helper/identity/mfa/types.pb.go new file mode 100644 index 0000000..57dbab0 --- /dev/null +++ b/helper/identity/mfa/types.pb.go @@ -0,0 +1,1180 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.12 +// source: helper/identity/mfa/types.proto + +package mfa + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Config represents the configuration information used *along with* the MFA +// secret tied to caller's identity, to verify the MFA credentials supplied. +// Configuration information differs by type. Handler of each type should know +// what to expect from the Config field. +type Config struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // @inject_tag: sentinel:"-" + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty" sentinel:"-"` + // @inject_tag: sentinel:"-" + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty" sentinel:"-"` + // @inject_tag: sentinel:"-" + ID string `protobuf:"bytes,3,opt,name=id,proto3" json:"id,omitempty" sentinel:"-"` + // @inject_tag: sentinel:"-" + MountAccessor string `protobuf:"bytes,4,opt,name=mount_accessor,json=mountAccessor,proto3" json:"mount_accessor,omitempty" sentinel:"-"` + // @inject_tag: sentinel:"-" + UsernameFormat string `protobuf:"bytes,5,opt,name=username_format,json=usernameFormat,proto3" json:"username_format,omitempty" sentinel:"-"` + // @inject_tag: sentinel:"-" + // + // Types that are assignable to Config: + // + // *Config_TOTPConfig + // *Config_OktaConfig + // *Config_DuoConfig + // *Config_PingIDConfig + Config isConfig_Config `protobuf_oneof:"config" sentinel:"-"` + // @inject_tag: sentinel:"-" + NamespaceID string `protobuf:"bytes,10,opt,name=namespace_id,json=namespaceID,proto3" json:"namespace_id,omitempty" sentinel:"-"` +} + +func (x *Config) Reset() { + *x = Config{} + if protoimpl.UnsafeEnabled { + mi := &file_helper_identity_mfa_types_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Config) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Config) ProtoMessage() {} + +func (x *Config) ProtoReflect() protoreflect.Message { + mi := &file_helper_identity_mfa_types_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Config.ProtoReflect.Descriptor instead. +func (*Config) Descriptor() ([]byte, []int) { + return file_helper_identity_mfa_types_proto_rawDescGZIP(), []int{0} +} + +func (x *Config) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Config) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Config) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *Config) GetMountAccessor() string { + if x != nil { + return x.MountAccessor + } + return "" +} + +func (x *Config) GetUsernameFormat() string { + if x != nil { + return x.UsernameFormat + } + return "" +} + +func (m *Config) GetConfig() isConfig_Config { + if m != nil { + return m.Config + } + return nil +} + +func (x *Config) GetTOTPConfig() *TOTPConfig { + if x, ok := x.GetConfig().(*Config_TOTPConfig); ok { + return x.TOTPConfig + } + return nil +} + +func (x *Config) GetOktaConfig() *OktaConfig { + if x, ok := x.GetConfig().(*Config_OktaConfig); ok { + return x.OktaConfig + } + return nil +} + +func (x *Config) GetDuoConfig() *DuoConfig { + if x, ok := x.GetConfig().(*Config_DuoConfig); ok { + return x.DuoConfig + } + return nil +} + +func (x *Config) GetPingIDConfig() *PingIDConfig { + if x, ok := x.GetConfig().(*Config_PingIDConfig); ok { + return x.PingIDConfig + } + return nil +} + +func (x *Config) GetNamespaceID() string { + if x != nil { + return x.NamespaceID + } + return "" +} + +type isConfig_Config interface { + isConfig_Config() +} + +type Config_TOTPConfig struct { + TOTPConfig *TOTPConfig `protobuf:"bytes,6,opt,name=totp_config,json=totpConfig,proto3,oneof"` +} + +type Config_OktaConfig struct { + OktaConfig *OktaConfig `protobuf:"bytes,7,opt,name=okta_config,json=oktaConfig,proto3,oneof"` +} + +type Config_DuoConfig struct { + DuoConfig *DuoConfig `protobuf:"bytes,8,opt,name=duo_config,json=duoConfig,proto3,oneof"` +} + +type Config_PingIDConfig struct { + PingIDConfig *PingIDConfig `protobuf:"bytes,9,opt,name=pingid_config,json=pingidConfig,proto3,oneof"` +} + +func (*Config_TOTPConfig) isConfig_Config() {} + +func (*Config_OktaConfig) isConfig_Config() {} + +func (*Config_DuoConfig) isConfig_Config() {} + +func (*Config_PingIDConfig) isConfig_Config() {} + +// TOTPConfig represents the configuration information required to generate +// a TOTP key. The generated key will be stored in the entity along with these +// options. Validation of credentials supplied over the API will be validated +// by the information stored in the entity and not from the values in the +// configuration. +type TOTPConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // @inject_tag: sentinel:"-" + Issuer string `protobuf:"bytes,1,opt,name=issuer,proto3" json:"issuer,omitempty" sentinel:"-"` + // @inject_tag: sentinel:"-" + Period uint32 `protobuf:"varint,2,opt,name=period,proto3" json:"period,omitempty" sentinel:"-"` + // @inject_tag: sentinel:"-" + Algorithm int32 `protobuf:"varint,3,opt,name=algorithm,proto3" json:"algorithm,omitempty" sentinel:"-"` + // @inject_tag: sentinel:"-" + Digits int32 `protobuf:"varint,4,opt,name=digits,proto3" json:"digits,omitempty" sentinel:"-"` + // @inject_tag: sentinel:"-" + Skew uint32 `protobuf:"varint,5,opt,name=skew,proto3" json:"skew,omitempty" sentinel:"-"` + // @inject_tag: sentinel:"-" + KeySize uint32 `protobuf:"varint,6,opt,name=key_size,json=keySize,proto3" json:"key_size,omitempty" sentinel:"-"` + // @inject_tag: sentinel:"-" + QRSize int32 `protobuf:"varint,7,opt,name=qr_size,json=qrSize,proto3" json:"qr_size,omitempty" sentinel:"-"` + // @inject_tag: sentinel:"-" + MaxValidationAttempts uint32 `protobuf:"varint,8,opt,name=max_validation_attempts,json=maxValidationAttempts,proto3" json:"max_validation_attempts,omitempty" sentinel:"-"` +} + +func (x *TOTPConfig) Reset() { + *x = TOTPConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_helper_identity_mfa_types_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TOTPConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TOTPConfig) ProtoMessage() {} + +func (x *TOTPConfig) ProtoReflect() protoreflect.Message { + mi := &file_helper_identity_mfa_types_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TOTPConfig.ProtoReflect.Descriptor instead. +func (*TOTPConfig) Descriptor() ([]byte, []int) { + return file_helper_identity_mfa_types_proto_rawDescGZIP(), []int{1} +} + +func (x *TOTPConfig) GetIssuer() string { + if x != nil { + return x.Issuer + } + return "" +} + +func (x *TOTPConfig) GetPeriod() uint32 { + if x != nil { + return x.Period + } + return 0 +} + +func (x *TOTPConfig) GetAlgorithm() int32 { + if x != nil { + return x.Algorithm + } + return 0 +} + +func (x *TOTPConfig) GetDigits() int32 { + if x != nil { + return x.Digits + } + return 0 +} + +func (x *TOTPConfig) GetSkew() uint32 { + if x != nil { + return x.Skew + } + return 0 +} + +func (x *TOTPConfig) GetKeySize() uint32 { + if x != nil { + return x.KeySize + } + return 0 +} + +func (x *TOTPConfig) GetQRSize() int32 { + if x != nil { + return x.QRSize + } + return 0 +} + +func (x *TOTPConfig) GetMaxValidationAttempts() uint32 { + if x != nil { + return x.MaxValidationAttempts + } + return 0 +} + +// DuoConfig represents the configuration information required to perform +// Duo authentication. +type DuoConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // @inject_tag: sentinel:"-" + IntegrationKey string `protobuf:"bytes,1,opt,name=integration_key,json=integrationKey,proto3" json:"integration_key,omitempty" sentinel:"-"` + // @inject_tag: sentinel:"-" + SecretKey string `protobuf:"bytes,2,opt,name=secret_key,json=secretKey,proto3" json:"secret_key,omitempty" sentinel:"-"` + // @inject_tag: sentinel:"-" + APIHostname string `protobuf:"bytes,3,opt,name=api_hostname,json=apiHostname,proto3" json:"api_hostname,omitempty" sentinel:"-"` + // @inject_tag: sentinel:"-" + PushInfo string `protobuf:"bytes,4,opt,name=push_info,json=pushInfo,proto3" json:"push_info,omitempty" sentinel:"-"` + // @inject_tag: sentinel:"-" + UsePasscode bool `protobuf:"varint,5,opt,name=use_passcode,json=usePasscode,proto3" json:"use_passcode,omitempty" sentinel:"-"` +} + +func (x *DuoConfig) Reset() { + *x = DuoConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_helper_identity_mfa_types_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DuoConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DuoConfig) ProtoMessage() {} + +func (x *DuoConfig) ProtoReflect() protoreflect.Message { + mi := &file_helper_identity_mfa_types_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DuoConfig.ProtoReflect.Descriptor instead. +func (*DuoConfig) Descriptor() ([]byte, []int) { + return file_helper_identity_mfa_types_proto_rawDescGZIP(), []int{2} +} + +func (x *DuoConfig) GetIntegrationKey() string { + if x != nil { + return x.IntegrationKey + } + return "" +} + +func (x *DuoConfig) GetSecretKey() string { + if x != nil { + return x.SecretKey + } + return "" +} + +func (x *DuoConfig) GetAPIHostname() string { + if x != nil { + return x.APIHostname + } + return "" +} + +func (x *DuoConfig) GetPushInfo() string { + if x != nil { + return x.PushInfo + } + return "" +} + +func (x *DuoConfig) GetUsePasscode() bool { + if x != nil { + return x.UsePasscode + } + return false +} + +// OktaConfig contains Okta configuration parameters required to perform Okta +// authentication. +type OktaConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // @inject_tag: sentinel:"-" + OrgName string `protobuf:"bytes,1,opt,name=org_name,json=orgName,proto3" json:"org_name,omitempty" sentinel:"-"` + // @inject_tag: sentinel:"-" + APIToken string `protobuf:"bytes,2,opt,name=api_token,json=apiToken,proto3" json:"api_token,omitempty" sentinel:"-"` + // @inject_tag: sentinel:"-" + Production bool `protobuf:"varint,3,opt,name=production,proto3" json:"production,omitempty" sentinel:"-"` + // @inject_tag: sentinel:"-" + BaseURL string `protobuf:"bytes,4,opt,name=base_url,json=baseUrl,proto3" json:"base_url,omitempty" sentinel:"-"` + // @inject_tag: sentinel:"-" + PrimaryEmail bool `protobuf:"varint,5,opt,name=primary_email,json=primaryEmail,proto3" json:"primary_email,omitempty" sentinel:"-"` +} + +func (x *OktaConfig) Reset() { + *x = OktaConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_helper_identity_mfa_types_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OktaConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OktaConfig) ProtoMessage() {} + +func (x *OktaConfig) ProtoReflect() protoreflect.Message { + mi := &file_helper_identity_mfa_types_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OktaConfig.ProtoReflect.Descriptor instead. +func (*OktaConfig) Descriptor() ([]byte, []int) { + return file_helper_identity_mfa_types_proto_rawDescGZIP(), []int{3} +} + +func (x *OktaConfig) GetOrgName() string { + if x != nil { + return x.OrgName + } + return "" +} + +func (x *OktaConfig) GetAPIToken() string { + if x != nil { + return x.APIToken + } + return "" +} + +func (x *OktaConfig) GetProduction() bool { + if x != nil { + return x.Production + } + return false +} + +func (x *OktaConfig) GetBaseURL() string { + if x != nil { + return x.BaseURL + } + return "" +} + +func (x *OktaConfig) GetPrimaryEmail() bool { + if x != nil { + return x.PrimaryEmail + } + return false +} + +// PingIDConfig contains PingID configuration information +type PingIDConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // @inject_tag: sentinel:"-" + UseBase64Key string `protobuf:"bytes,1,opt,name=use_base64_key,json=useBase64Key,proto3" json:"use_base64_key,omitempty" sentinel:"-"` + // @inject_tag: sentinel:"-" + UseSignature bool `protobuf:"varint,2,opt,name=use_signature,json=useSignature,proto3" json:"use_signature,omitempty" sentinel:"-"` + // @inject_tag: sentinel:"-" + Token string `protobuf:"bytes,3,opt,name=token,proto3" json:"token,omitempty" sentinel:"-"` + // @inject_tag: sentinel:"-" + IDPURL string `protobuf:"bytes,4,opt,name=idp_url,json=idpUrl,proto3" json:"idp_url,omitempty" sentinel:"-"` + // @inject_tag: sentinel:"-" + OrgAlias string `protobuf:"bytes,5,opt,name=org_alias,json=orgAlias,proto3" json:"org_alias,omitempty" sentinel:"-"` + // @inject_tag: sentinel:"-" + AdminURL string `protobuf:"bytes,6,opt,name=admin_url,json=adminUrl,proto3" json:"admin_url,omitempty" sentinel:"-"` + // @inject_tag: sentinel:"-" + AuthenticatorURL string `protobuf:"bytes,7,opt,name=authenticator_url,json=authenticatorUrl,proto3" json:"authenticator_url,omitempty" sentinel:"-"` +} + +func (x *PingIDConfig) Reset() { + *x = PingIDConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_helper_identity_mfa_types_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PingIDConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PingIDConfig) ProtoMessage() {} + +func (x *PingIDConfig) ProtoReflect() protoreflect.Message { + mi := &file_helper_identity_mfa_types_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PingIDConfig.ProtoReflect.Descriptor instead. +func (*PingIDConfig) Descriptor() ([]byte, []int) { + return file_helper_identity_mfa_types_proto_rawDescGZIP(), []int{4} +} + +func (x *PingIDConfig) GetUseBase64Key() string { + if x != nil { + return x.UseBase64Key + } + return "" +} + +func (x *PingIDConfig) GetUseSignature() bool { + if x != nil { + return x.UseSignature + } + return false +} + +func (x *PingIDConfig) GetToken() string { + if x != nil { + return x.Token + } + return "" +} + +func (x *PingIDConfig) GetIDPURL() string { + if x != nil { + return x.IDPURL + } + return "" +} + +func (x *PingIDConfig) GetOrgAlias() string { + if x != nil { + return x.OrgAlias + } + return "" +} + +func (x *PingIDConfig) GetAdminURL() string { + if x != nil { + return x.AdminURL + } + return "" +} + +func (x *PingIDConfig) GetAuthenticatorURL() string { + if x != nil { + return x.AuthenticatorURL + } + return "" +} + +// Secret represents all the types of secrets which the entity can hold. +// Each MFA type should add a secret type to the oneof block in this message. +type Secret struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // @inject_tag: sentinel:"-" + MethodName string `protobuf:"bytes,1,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty" sentinel:"-"` + // Types that are assignable to Value: + // + // *Secret_TOTPSecret + Value isSecret_Value `protobuf_oneof:"value"` +} + +func (x *Secret) Reset() { + *x = Secret{} + if protoimpl.UnsafeEnabled { + mi := &file_helper_identity_mfa_types_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Secret) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Secret) ProtoMessage() {} + +func (x *Secret) ProtoReflect() protoreflect.Message { + mi := &file_helper_identity_mfa_types_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Secret.ProtoReflect.Descriptor instead. +func (*Secret) Descriptor() ([]byte, []int) { + return file_helper_identity_mfa_types_proto_rawDescGZIP(), []int{5} +} + +func (x *Secret) GetMethodName() string { + if x != nil { + return x.MethodName + } + return "" +} + +func (m *Secret) GetValue() isSecret_Value { + if m != nil { + return m.Value + } + return nil +} + +func (x *Secret) GetTOTPSecret() *TOTPSecret { + if x, ok := x.GetValue().(*Secret_TOTPSecret); ok { + return x.TOTPSecret + } + return nil +} + +type isSecret_Value interface { + isSecret_Value() +} + +type Secret_TOTPSecret struct { + // @inject_tag: sentinel:"-" + TOTPSecret *TOTPSecret `protobuf:"bytes,2,opt,name=totp_secret,json=totpSecret,proto3,oneof" sentinel:"-"` +} + +func (*Secret_TOTPSecret) isSecret_Value() {} + +// TOTPSecret represents the secret that gets stored in the entity about a +// particular MFA method. This information is used to validate the MFA +// credential supplied over the API during request time. +type TOTPSecret struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // @inject_tag: sentinel:"-" + Issuer string `protobuf:"bytes,1,opt,name=issuer,proto3" json:"issuer,omitempty" sentinel:"-"` + // @inject_tag: sentinel:"-" + Period uint32 `protobuf:"varint,2,opt,name=period,proto3" json:"period,omitempty" sentinel:"-"` + // @inject_tag: sentinel:"-" + Algorithm int32 `protobuf:"varint,3,opt,name=algorithm,proto3" json:"algorithm,omitempty" sentinel:"-"` + // @inject_tag: sentinel:"-" + Digits int32 `protobuf:"varint,4,opt,name=digits,proto3" json:"digits,omitempty" sentinel:"-"` + // @inject_tag: sentinel:"-" + Skew uint32 `protobuf:"varint,5,opt,name=skew,proto3" json:"skew,omitempty" sentinel:"-"` + // @inject_tag: sentinel:"-" + KeySize uint32 `protobuf:"varint,6,opt,name=key_size,json=keySize,proto3" json:"key_size,omitempty" sentinel:"-"` + // reserving 7 here just to keep parity with the config message above + // @inject_tag: sentinel:"-" + AccountName string `protobuf:"bytes,8,opt,name=account_name,json=accountName,proto3" json:"account_name,omitempty" sentinel:"-"` + // @inject_tag: sentinel:"-" + Key string `protobuf:"bytes,9,opt,name=key,proto3" json:"key,omitempty" sentinel:"-"` +} + +func (x *TOTPSecret) Reset() { + *x = TOTPSecret{} + if protoimpl.UnsafeEnabled { + mi := &file_helper_identity_mfa_types_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TOTPSecret) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TOTPSecret) ProtoMessage() {} + +func (x *TOTPSecret) ProtoReflect() protoreflect.Message { + mi := &file_helper_identity_mfa_types_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TOTPSecret.ProtoReflect.Descriptor instead. +func (*TOTPSecret) Descriptor() ([]byte, []int) { + return file_helper_identity_mfa_types_proto_rawDescGZIP(), []int{6} +} + +func (x *TOTPSecret) GetIssuer() string { + if x != nil { + return x.Issuer + } + return "" +} + +func (x *TOTPSecret) GetPeriod() uint32 { + if x != nil { + return x.Period + } + return 0 +} + +func (x *TOTPSecret) GetAlgorithm() int32 { + if x != nil { + return x.Algorithm + } + return 0 +} + +func (x *TOTPSecret) GetDigits() int32 { + if x != nil { + return x.Digits + } + return 0 +} + +func (x *TOTPSecret) GetSkew() uint32 { + if x != nil { + return x.Skew + } + return 0 +} + +func (x *TOTPSecret) GetKeySize() uint32 { + if x != nil { + return x.KeySize + } + return 0 +} + +func (x *TOTPSecret) GetAccountName() string { + if x != nil { + return x.AccountName + } + return "" +} + +func (x *TOTPSecret) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +// MFAEnforcementConfig is what the user provides to the +// mfa/login_enforcement endpoint. +type MFAEnforcementConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + NamespaceID string `protobuf:"bytes,2,opt,name=namespace_id,json=namespaceID,proto3" json:"namespace_id,omitempty"` + MFAMethodIDs []string `protobuf:"bytes,3,rep,name=mfa_method_ids,json=mfaMethodIds,proto3" json:"mfa_method_ids,omitempty"` + AuthMethodAccessors []string `protobuf:"bytes,4,rep,name=auth_method_accessors,json=authMethodAccessors,proto3" json:"auth_method_accessors,omitempty"` + AuthMethodTypes []string `protobuf:"bytes,5,rep,name=auth_method_types,json=authMethodTypes,proto3" json:"auth_method_types,omitempty"` + IdentityGroupIds []string `protobuf:"bytes,6,rep,name=identity_group_ids,json=identityGroupIds,proto3" json:"identity_group_ids,omitempty"` + IdentityEntityIDs []string `protobuf:"bytes,7,rep,name=identity_entity_ids,json=identityEntityIds,proto3" json:"identity_entity_ids,omitempty"` + ID string `protobuf:"bytes,8,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *MFAEnforcementConfig) Reset() { + *x = MFAEnforcementConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_helper_identity_mfa_types_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MFAEnforcementConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MFAEnforcementConfig) ProtoMessage() {} + +func (x *MFAEnforcementConfig) ProtoReflect() protoreflect.Message { + mi := &file_helper_identity_mfa_types_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MFAEnforcementConfig.ProtoReflect.Descriptor instead. +func (*MFAEnforcementConfig) Descriptor() ([]byte, []int) { + return file_helper_identity_mfa_types_proto_rawDescGZIP(), []int{7} +} + +func (x *MFAEnforcementConfig) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *MFAEnforcementConfig) GetNamespaceID() string { + if x != nil { + return x.NamespaceID + } + return "" +} + +func (x *MFAEnforcementConfig) GetMFAMethodIDs() []string { + if x != nil { + return x.MFAMethodIDs + } + return nil +} + +func (x *MFAEnforcementConfig) GetAuthMethodAccessors() []string { + if x != nil { + return x.AuthMethodAccessors + } + return nil +} + +func (x *MFAEnforcementConfig) GetAuthMethodTypes() []string { + if x != nil { + return x.AuthMethodTypes + } + return nil +} + +func (x *MFAEnforcementConfig) GetIdentityGroupIds() []string { + if x != nil { + return x.IdentityGroupIds + } + return nil +} + +func (x *MFAEnforcementConfig) GetIdentityEntityIDs() []string { + if x != nil { + return x.IdentityEntityIDs + } + return nil +} + +func (x *MFAEnforcementConfig) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +var File_helper_identity_mfa_types_proto protoreflect.FileDescriptor + +var file_helper_identity_mfa_types_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x68, 0x65, 0x6c, 0x70, 0x65, 0x72, 0x2f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x2f, 0x6d, 0x66, 0x61, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x03, 0x6d, 0x66, 0x61, 0x22, 0x90, 0x03, 0x0a, 0x06, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x6f, 0x75, + 0x6e, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0d, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, + 0x12, 0x27, 0x0a, 0x0f, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x75, 0x73, 0x65, 0x72, 0x6e, + 0x61, 0x6d, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x32, 0x0a, 0x0b, 0x74, 0x6f, 0x74, + 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, + 0x2e, 0x6d, 0x66, 0x61, 0x2e, 0x54, 0x4f, 0x54, 0x50, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, + 0x00, 0x52, 0x0a, 0x74, 0x6f, 0x74, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x32, 0x0a, + 0x0b, 0x6f, 0x6b, 0x74, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6d, 0x66, 0x61, 0x2e, 0x4f, 0x6b, 0x74, 0x61, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0a, 0x6f, 0x6b, 0x74, 0x61, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x2f, 0x0a, 0x0a, 0x64, 0x75, 0x6f, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6d, 0x66, 0x61, 0x2e, 0x44, 0x75, 0x6f, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x09, 0x64, 0x75, 0x6f, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x38, 0x0a, 0x0d, 0x70, 0x69, 0x6e, 0x67, 0x69, 0x64, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x6d, 0x66, 0x61, 0x2e, + 0x50, 0x69, 0x6e, 0x67, 0x49, 0x44, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0c, + 0x70, 0x69, 0x6e, 0x67, 0x69, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x21, 0x0a, 0x0c, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, + 0x08, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xf2, 0x01, 0x0a, 0x0a, 0x54, 0x4f, + 0x54, 0x50, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x16, 0x0a, 0x06, 0x69, 0x73, 0x73, 0x75, + 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, + 0x12, 0x16, 0x0a, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x6c, 0x67, 0x6f, + 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x61, 0x6c, 0x67, + 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x69, 0x74, 0x73, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x64, 0x69, 0x67, 0x69, 0x74, 0x73, 0x12, 0x12, + 0x0a, 0x04, 0x73, 0x6b, 0x65, 0x77, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x73, 0x6b, + 0x65, 0x77, 0x12, 0x19, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x17, 0x0a, + 0x07, 0x71, 0x72, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, + 0x71, 0x72, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x36, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, + 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x56, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x22, 0xb6, + 0x01, 0x0a, 0x09, 0x44, 0x75, 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x27, 0x0a, 0x0f, + 0x69, 0x6e, 0x74, 0x65, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, + 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x70, 0x69, 0x5f, 0x68, 0x6f, 0x73, 0x74, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x61, 0x70, 0x69, 0x48, + 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x75, 0x73, 0x68, 0x5f, + 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x75, 0x73, 0x68, + 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x73, 0x65, 0x5f, 0x70, 0x61, 0x73, 0x73, + 0x63, 0x6f, 0x64, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x73, 0x65, 0x50, + 0x61, 0x73, 0x73, 0x63, 0x6f, 0x64, 0x65, 0x22, 0xa4, 0x01, 0x0a, 0x0a, 0x4f, 0x6b, 0x74, 0x61, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x72, 0x67, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x72, 0x67, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x70, 0x69, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x70, 0x69, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1e, + 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, + 0x0a, 0x08, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x62, 0x61, 0x73, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x72, 0x69, + 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0c, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x22, 0xef, + 0x01, 0x0a, 0x0c, 0x50, 0x69, 0x6e, 0x67, 0x49, 0x44, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x24, 0x0a, 0x0e, 0x75, 0x73, 0x65, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x36, 0x34, 0x5f, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x75, 0x73, 0x65, 0x42, 0x61, 0x73, 0x65, + 0x36, 0x34, 0x4b, 0x65, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x73, 0x65, 0x5f, 0x73, 0x69, 0x67, + 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x75, 0x73, + 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x12, 0x17, 0x0a, 0x07, 0x69, 0x64, 0x70, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x69, 0x64, 0x70, 0x55, 0x72, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x6f, 0x72, 0x67, + 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6f, 0x72, + 0x67, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x5f, + 0x75, 0x72, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x55, 0x72, 0x6c, 0x12, 0x2b, 0x0a, 0x11, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, + 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, + 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x55, 0x72, 0x6c, + 0x22, 0x66, 0x0a, 0x06, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x0b, 0x74, + 0x6f, 0x74, 0x70, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0f, 0x2e, 0x6d, 0x66, 0x61, 0x2e, 0x54, 0x4f, 0x54, 0x50, 0x53, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x48, 0x00, 0x52, 0x0a, 0x74, 0x6f, 0x74, 0x70, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x42, + 0x07, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xd6, 0x01, 0x0a, 0x0a, 0x54, 0x4f, 0x54, + 0x50, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x12, + 0x16, 0x0a, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, + 0x69, 0x74, 0x68, 0x6d, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x61, 0x6c, 0x67, 0x6f, + 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x69, 0x74, 0x73, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x64, 0x69, 0x67, 0x69, 0x74, 0x73, 0x12, 0x12, 0x0a, + 0x04, 0x73, 0x6b, 0x65, 0x77, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x73, 0x6b, 0x65, + 0x77, 0x12, 0x19, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x22, 0xc1, 0x02, 0x0a, 0x14, 0x4d, 0x46, 0x41, 0x45, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, + 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, + 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x66, 0x61, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, + 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x6d, 0x66, 0x61, 0x4d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x49, 0x64, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x68, 0x5f, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x73, + 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x68, 0x4d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x61, + 0x75, 0x74, 0x68, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x61, 0x75, 0x74, 0x68, 0x4d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x69, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x74, 0x79, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x06, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x47, 0x72, 0x6f, + 0x75, 0x70, 0x49, 0x64, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x5f, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x07, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x11, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x45, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x49, 0x64, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x02, 0x69, 0x64, 0x42, 0x30, 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, 0x61, + 0x75, 0x6c, 0x74, 0x2f, 0x68, 0x65, 0x6c, 0x70, 0x65, 0x72, 0x2f, 0x69, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x74, 0x79, 0x2f, 0x6d, 0x66, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_helper_identity_mfa_types_proto_rawDescOnce sync.Once + file_helper_identity_mfa_types_proto_rawDescData = file_helper_identity_mfa_types_proto_rawDesc +) + +func file_helper_identity_mfa_types_proto_rawDescGZIP() []byte { + file_helper_identity_mfa_types_proto_rawDescOnce.Do(func() { + file_helper_identity_mfa_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_helper_identity_mfa_types_proto_rawDescData) + }) + return file_helper_identity_mfa_types_proto_rawDescData +} + +var file_helper_identity_mfa_types_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_helper_identity_mfa_types_proto_goTypes = []interface{}{ + (*Config)(nil), // 0: mfa.Config + (*TOTPConfig)(nil), // 1: mfa.TOTPConfig + (*DuoConfig)(nil), // 2: mfa.DuoConfig + (*OktaConfig)(nil), // 3: mfa.OktaConfig + (*PingIDConfig)(nil), // 4: mfa.PingIDConfig + (*Secret)(nil), // 5: mfa.Secret + (*TOTPSecret)(nil), // 6: mfa.TOTPSecret + (*MFAEnforcementConfig)(nil), // 7: mfa.MFAEnforcementConfig +} +var file_helper_identity_mfa_types_proto_depIDxs = []int32{ + 1, // 0: mfa.Config.totp_config:type_name -> mfa.TOTPConfig + 3, // 1: mfa.Config.okta_config:type_name -> mfa.OktaConfig + 2, // 2: mfa.Config.duo_config:type_name -> mfa.DuoConfig + 4, // 3: mfa.Config.pingid_config:type_name -> mfa.PingIDConfig + 6, // 4: mfa.Secret.totp_secret:type_name -> mfa.TOTPSecret + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_helper_identity_mfa_types_proto_init() } +func file_helper_identity_mfa_types_proto_init() { + if File_helper_identity_mfa_types_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_helper_identity_mfa_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Config); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_helper_identity_mfa_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TOTPConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_helper_identity_mfa_types_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DuoConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_helper_identity_mfa_types_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OktaConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_helper_identity_mfa_types_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PingIDConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_helper_identity_mfa_types_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Secret); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_helper_identity_mfa_types_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TOTPSecret); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_helper_identity_mfa_types_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MFAEnforcementConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_helper_identity_mfa_types_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*Config_TOTPConfig)(nil), + (*Config_OktaConfig)(nil), + (*Config_DuoConfig)(nil), + (*Config_PingIDConfig)(nil), + } + file_helper_identity_mfa_types_proto_msgTypes[5].OneofWrappers = []interface{}{ + (*Secret_TOTPSecret)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_helper_identity_mfa_types_proto_rawDesc, + NumEnums: 0, + NumMessages: 8, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_helper_identity_mfa_types_proto_goTypes, + DependencyIndexes: file_helper_identity_mfa_types_proto_depIDxs, + MessageInfos: file_helper_identity_mfa_types_proto_msgTypes, + }.Build() + File_helper_identity_mfa_types_proto = out.File + file_helper_identity_mfa_types_proto_rawDesc = nil + file_helper_identity_mfa_types_proto_goTypes = nil + file_helper_identity_mfa_types_proto_depIDxs = nil +} diff --git a/helper/identity/mfa/types.proto b/helper/identity/mfa/types.proto new file mode 100644 index 0000000..65eb853 --- /dev/null +++ b/helper/identity/mfa/types.proto @@ -0,0 +1,153 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +syntax = "proto3"; + +option go_package = "github.com/hashicorp/vault/helper/identity/mfa"; + +package mfa; + +// Config represents the configuration information used *along with* the MFA +// secret tied to caller's identity, to verify the MFA credentials supplied. +// Configuration information differs by type. Handler of each type should know +// what to expect from the Config field. +message Config { + // @inject_tag: sentinel:"-" + string type = 1; + // @inject_tag: sentinel:"-" + string name = 2; + // @inject_tag: sentinel:"-" + string id = 3; + // @inject_tag: sentinel:"-" + string mount_accessor = 4; + // @inject_tag: sentinel:"-" + string username_format = 5; + // @inject_tag: sentinel:"-" + oneof config { + TOTPConfig totp_config = 6; + OktaConfig okta_config = 7; + DuoConfig duo_config = 8; + PingIDConfig pingid_config = 9; + } + // @inject_tag: sentinel:"-" + string namespace_id = 10; +} + +// TOTPConfig represents the configuration information required to generate +// a TOTP key. The generated key will be stored in the entity along with these +// options. Validation of credentials supplied over the API will be validated +// by the information stored in the entity and not from the values in the +// configuration. +message TOTPConfig { + // @inject_tag: sentinel:"-" + string issuer = 1; + // @inject_tag: sentinel:"-" + uint32 period = 2; + // @inject_tag: sentinel:"-" + int32 algorithm = 3; + // @inject_tag: sentinel:"-" + int32 digits = 4; + // @inject_tag: sentinel:"-" + uint32 skew = 5; + // @inject_tag: sentinel:"-" + uint32 key_size = 6; + // @inject_tag: sentinel:"-" + int32 qr_size = 7; + // @inject_tag: sentinel:"-" + uint32 max_validation_attempts = 8; +} + +// DuoConfig represents the configuration information required to perform +// Duo authentication. +message DuoConfig { + // @inject_tag: sentinel:"-" + string integration_key = 1; + // @inject_tag: sentinel:"-" + string secret_key = 2; + // @inject_tag: sentinel:"-" + string api_hostname = 3; + // @inject_tag: sentinel:"-" + string push_info = 4; + // @inject_tag: sentinel:"-" + bool use_passcode = 5; +} + +// OktaConfig contains Okta configuration parameters required to perform Okta +// authentication. +message OktaConfig { + // @inject_tag: sentinel:"-" + string org_name = 1; + // @inject_tag: sentinel:"-" + string api_token = 2; + // @inject_tag: sentinel:"-" + bool production = 3; + // @inject_tag: sentinel:"-" + string base_url = 4; + // @inject_tag: sentinel:"-" + bool primary_email = 5; +} + +// PingIDConfig contains PingID configuration information +message PingIDConfig { + // @inject_tag: sentinel:"-" + string use_base64_key = 1; + // @inject_tag: sentinel:"-" + bool use_signature = 2; + // @inject_tag: sentinel:"-" + string token = 3; + // @inject_tag: sentinel:"-" + string idp_url = 4; + // @inject_tag: sentinel:"-" + string org_alias = 5; + // @inject_tag: sentinel:"-" + string admin_url = 6; + // @inject_tag: sentinel:"-" + string authenticator_url = 7; +} + +// Secret represents all the types of secrets which the entity can hold. +// Each MFA type should add a secret type to the oneof block in this message. +message Secret { + // @inject_tag: sentinel:"-" + string method_name = 1; + oneof value { + // @inject_tag: sentinel:"-" + TOTPSecret totp_secret = 2; + } +} + +// TOTPSecret represents the secret that gets stored in the entity about a +// particular MFA method. This information is used to validate the MFA +// credential supplied over the API during request time. +message TOTPSecret { + // @inject_tag: sentinel:"-" + string issuer = 1; + // @inject_tag: sentinel:"-" + uint32 period = 2; + // @inject_tag: sentinel:"-" + int32 algorithm = 3; + // @inject_tag: sentinel:"-" + int32 digits = 4; + // @inject_tag: sentinel:"-" + uint32 skew = 5; + // @inject_tag: sentinel:"-" + uint32 key_size = 6; + // reserving 7 here just to keep parity with the config message above + // @inject_tag: sentinel:"-" + string account_name = 8; + // @inject_tag: sentinel:"-" + string key = 9; +} + +// MFAEnforcementConfig is what the user provides to the +// mfa/login_enforcement endpoint. +message MFAEnforcementConfig { + string name = 1; + string namespace_id = 2; + repeated string mfa_method_ids = 3; + repeated string auth_method_accessors = 4; + repeated string auth_method_types = 5; + repeated string identity_group_ids = 6; + repeated string identity_entity_ids = 7; + string id = 8; +} diff --git a/helper/identity/sentinel.go b/helper/identity/sentinel.go new file mode 100644 index 0000000..4f65e62 --- /dev/null +++ b/helper/identity/sentinel.go @@ -0,0 +1,129 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package identity + +import "github.com/golang/protobuf/ptypes" + +func (e *Entity) SentinelGet(key string) (interface{}, error) { + if e == nil { + return nil, nil + } + switch key { + case "aliases": + return e.Aliases, nil + case "id": + return e.ID, nil + case "meta", "metadata": + return e.Metadata, nil + case "name": + return e.Name, nil + case "creation_time": + return ptypes.TimestampString(e.CreationTime), nil + case "last_update_time": + return ptypes.TimestampString(e.LastUpdateTime), nil + case "merged_entity_ids": + return e.MergedEntityIDs, nil + case "policies": + return e.Policies, nil + } + + return nil, nil +} + +func (e *Entity) SentinelKeys() []string { + return []string{ + "id", + "aliases", + "metadata", + "meta", + "name", + "creation_time", + "last_update_time", + "merged_entity_ids", + "policies", + } +} + +func (p *Alias) SentinelGet(key string) (interface{}, error) { + if p == nil { + return nil, nil + } + switch key { + case "id": + return p.ID, nil + case "mount_type": + return p.MountType, nil + case "mount_accessor": + return p.MountAccessor, nil + case "mount_path": + return p.MountPath, nil + case "meta", "metadata": + return p.Metadata, nil + case "name": + return p.Name, nil + case "creation_time": + return ptypes.TimestampString(p.CreationTime), nil + case "last_update_time": + return ptypes.TimestampString(p.LastUpdateTime), nil + case "merged_from_entity_ids": + return p.MergedFromCanonicalIDs, nil + } + + return nil, nil +} + +func (a *Alias) SentinelKeys() []string { + return []string{ + "id", + "mount_type", + "mount_accessor", + "mount_path", + "meta", + "metadata", + "name", + "creation_time", + "last_update_time", + "merged_from_entity_ids", + } +} + +func (g *Group) SentinelGet(key string) (interface{}, error) { + if g == nil { + return nil, nil + } + switch key { + case "id": + return g.ID, nil + case "name": + return g.Name, nil + case "policies": + return g.Policies, nil + case "parent_group_ids": + return g.ParentGroupIDs, nil + case "member_entity_ids": + return g.MemberEntityIDs, nil + case "meta", "metadata": + return g.Metadata, nil + case "creation_time": + return ptypes.TimestampString(g.CreationTime), nil + case "last_update_time": + return ptypes.TimestampString(g.LastUpdateTime), nil + } + + return nil, nil +} + +func (g *Group) SentinelKeys() []string { + return []string{ + "id", + "name", + "policies", + "parent_group_ids", + "member_entity_ids", + "metadata", + "meta", + "creation_time", + "last_update_time", + } +} diff --git a/helper/identity/types.pb.go b/helper/identity/types.pb.go new file mode 100644 index 0000000..91b4c0f --- /dev/null +++ b/helper/identity/types.pb.go @@ -0,0 +1,1256 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.12 +// source: helper/identity/types.proto + +package identity + +import ( + mfa "github.com/hashicorp/vault/helper/identity/mfa" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Group represents an identity group. +type Group struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // ID is the unique identifier for this group + // @inject_tag: sentinel:"-" + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty" sentinel:"-"` + // Name is the unique name for this group + // @inject_tag: sentinel:"-" + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty" sentinel:"-"` + // Policies are the vault policies to be granted to members of this group + // @inject_tag: sentinel:"-" + Policies []string `protobuf:"bytes,3,rep,name=policies,proto3" json:"policies,omitempty" sentinel:"-"` + // ParentGroupIDs are the identifiers of those groups to which this group is a + // member of. These will serve as references to the parent group in the + // hierarchy. + // @inject_tag: sentinel:"-" + ParentGroupIDs []string `protobuf:"bytes,4,rep,name=parent_group_ids,json=parentGroupIds,proto3" json:"parent_group_ids,omitempty" sentinel:"-"` + // MemberEntityIDs are the identifiers of entities which are members of this + // group + // @inject_tag: sentinel:"-" + MemberEntityIDs []string `protobuf:"bytes,5,rep,name=member_entity_ids,json=memberEntityIDs,proto3" json:"member_entity_ids,omitempty" sentinel:"-"` + // Metadata represents the custom data tied with this group + // @inject_tag: sentinel:"-" + Metadata map[string]string `protobuf:"bytes,6,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3" sentinel:"-"` + // CreationTime is the time at which this group was created + // @inject_tag: sentinel:"-" + CreationTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=creation_time,json=creationTime,proto3" json:"creation_time,omitempty" sentinel:"-"` + // LastUpdateTime is the time at which this group was last modified + // @inject_tag: sentinel:"-" + LastUpdateTime *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=last_update_time,json=lastUpdateTime,proto3" json:"last_update_time,omitempty" sentinel:"-"` + // ModifyIndex tracks the number of updates to the group. It is useful to detect + // updates to the groups. + // @inject_tag: sentinel:"-" + ModifyIndex uint64 `protobuf:"varint,9,opt,name=modify_index,json=modifyIndex,proto3" json:"modify_index,omitempty" sentinel:"-"` + // BucketKey is the path of the storage packer key into which this group is + // stored. + // @inject_tag: sentinel:"-" + BucketKey string `protobuf:"bytes,10,opt,name=bucket_key,json=bucketKey,proto3" json:"bucket_key,omitempty" sentinel:"-"` + // Alias is used to mark this group as an internal mapping of a group that + // is external to the identity store. Alias can only be set if the 'type' + // is set to 'external'. + // @inject_tag: sentinel:"-" + Alias *Alias `protobuf:"bytes,11,opt,name=alias,proto3" json:"alias,omitempty" sentinel:"-"` + // Type indicates if this group is an internal group or an external group. + // Memberships of the internal groups can be managed over the API whereas + // the memberships on the external group --for which a corresponding alias + // will be set-- will be managed automatically. + // @inject_tag: sentinel:"-" + Type string `protobuf:"bytes,12,opt,name=type,proto3" json:"type,omitempty" sentinel:"-"` + // NamespaceID is the identifier of the namespace to which this group + // belongs to. Do not return this value over the API when reading the + // group. + // @inject_tag: sentinel:"-" + NamespaceID string `protobuf:"bytes,13,opt,name=namespace_id,json=namespaceID,proto3" json:"namespace_id,omitempty" sentinel:"-"` +} + +func (x *Group) Reset() { + *x = Group{} + if protoimpl.UnsafeEnabled { + mi := &file_helper_identity_types_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Group) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Group) ProtoMessage() {} + +func (x *Group) ProtoReflect() protoreflect.Message { + mi := &file_helper_identity_types_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Group.ProtoReflect.Descriptor instead. +func (*Group) Descriptor() ([]byte, []int) { + return file_helper_identity_types_proto_rawDescGZIP(), []int{0} +} + +func (x *Group) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *Group) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Group) GetPolicies() []string { + if x != nil { + return x.Policies + } + return nil +} + +func (x *Group) GetParentGroupIDs() []string { + if x != nil { + return x.ParentGroupIDs + } + return nil +} + +func (x *Group) GetMemberEntityIDs() []string { + if x != nil { + return x.MemberEntityIDs + } + return nil +} + +func (x *Group) GetMetadata() map[string]string { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *Group) GetCreationTime() *timestamppb.Timestamp { + if x != nil { + return x.CreationTime + } + return nil +} + +func (x *Group) GetLastUpdateTime() *timestamppb.Timestamp { + if x != nil { + return x.LastUpdateTime + } + return nil +} + +func (x *Group) GetModifyIndex() uint64 { + if x != nil { + return x.ModifyIndex + } + return 0 +} + +func (x *Group) GetBucketKey() string { + if x != nil { + return x.BucketKey + } + return "" +} + +func (x *Group) GetAlias() *Alias { + if x != nil { + return x.Alias + } + return nil +} + +func (x *Group) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Group) GetNamespaceID() string { + if x != nil { + return x.NamespaceID + } + return "" +} + +// LocalAliases holds the aliases belonging to an entity that are local to the +// cluster. +type LocalAliases struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Aliases []*Alias `protobuf:"bytes,1,rep,name=aliases,proto3" json:"aliases,omitempty"` +} + +func (x *LocalAliases) Reset() { + *x = LocalAliases{} + if protoimpl.UnsafeEnabled { + mi := &file_helper_identity_types_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LocalAliases) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocalAliases) ProtoMessage() {} + +func (x *LocalAliases) ProtoReflect() protoreflect.Message { + mi := &file_helper_identity_types_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LocalAliases.ProtoReflect.Descriptor instead. +func (*LocalAliases) Descriptor() ([]byte, []int) { + return file_helper_identity_types_proto_rawDescGZIP(), []int{1} +} + +func (x *LocalAliases) GetAliases() []*Alias { + if x != nil { + return x.Aliases + } + return nil +} + +// Entity represents an entity that gets persisted and indexed. +// Entity is fundamentally composed of zero or many aliases. +type Entity struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Aliases are the identities that this entity is made of. This can be + // empty as well to favor being able to create the entity first and then + // incrementally adding aliases. + // @inject_tag: sentinel:"-" + Aliases []*Alias `protobuf:"bytes,1,rep,name=aliases,proto3" json:"aliases,omitempty" sentinel:"-"` + // ID is the unique identifier of the entity which always be a UUID. This + // should never be allowed to be updated. + // @inject_tag: sentinel:"-" + ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty" sentinel:"-"` + // Name is a unique identifier of the entity which is intended to be + // human-friendly. The default name might not be human friendly since it + // gets suffixed by a UUID, but it can optionally be updated, unlike the ID + // field. + // @inject_tag: sentinel:"-" + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty" sentinel:"-"` + // Metadata represents the explicit metadata which is set by the + // clients. This is useful to tie any information pertaining to the + // aliases. This is a non-unique field of entity, meaning multiple + // entities can have the same metadata set. Entities will be indexed based + // on this explicit metadata. This enables virtual groupings of entities + // based on its metadata. + // @inject_tag: sentinel:"-" + Metadata map[string]string `protobuf:"bytes,4,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3" sentinel:"-"` + // CreationTime is the time at which this entity is first created. + // @inject_tag: sentinel:"-" + CreationTime *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=creation_time,json=creationTime,proto3" json:"creation_time,omitempty" sentinel:"-"` + // LastUpdateTime is the most recent time at which the properties of this + // entity got modified. This is helpful in filtering out entities based on + // its age and to take action on them, if desired. + // @inject_tag: sentinel:"-" + LastUpdateTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=last_update_time,json=lastUpdateTime,proto3" json:"last_update_time,omitempty" sentinel:"-"` + // MergedEntityIDs are the entities which got merged to this one. Entities + // will be indexed based on all the entities that got merged into it. This + // helps to apply the actions on this entity on the tokens that are merged + // to the merged entities. Merged entities will be deleted entirely and + // this is the only trackable trail of its earlier presence. + // @inject_tag: sentinel:"-" + MergedEntityIDs []string `protobuf:"bytes,7,rep,name=merged_entity_ids,json=mergedEntityIDs,proto3" json:"merged_entity_ids,omitempty" sentinel:"-"` + // Policies the entity is entitled to + // @inject_tag: sentinel:"-" + Policies []string `protobuf:"bytes,8,rep,name=policies,proto3" json:"policies,omitempty" sentinel:"-"` + // BucketKey is the path of the storage packer key into which this entity is + // stored. + // @inject_tag: sentinel:"-" + BucketKey string `protobuf:"bytes,9,opt,name=bucket_key,json=bucketKey,proto3" json:"bucket_key,omitempty" sentinel:"-"` + // MFASecrets holds the MFA secrets indexed by the identifier of the MFA + // method configuration. + // @inject_tag: sentinel:"-" + MFASecrets map[string]*mfa.Secret `protobuf:"bytes,10,rep,name=mfa_secrets,json=mfaSecrets,proto3" json:"mfa_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3" sentinel:"-"` + // Disabled indicates whether tokens associated with the account should not + // be able to be used + // @inject_tag: sentinel:"-" + Disabled bool `protobuf:"varint,11,opt,name=disabled,proto3" json:"disabled,omitempty" sentinel:"-"` + // NamespaceID is the identifier of the namespace to which this entity + // belongs to. Do not return this value over the API when reading the + // entity. + // @inject_tag: sentinel:"-" + NamespaceID string `protobuf:"bytes,12,opt,name=namespace_id,json=namespaceID,proto3" json:"namespace_id,omitempty" sentinel:"-"` +} + +func (x *Entity) Reset() { + *x = Entity{} + if protoimpl.UnsafeEnabled { + mi := &file_helper_identity_types_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Entity) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Entity) ProtoMessage() {} + +func (x *Entity) ProtoReflect() protoreflect.Message { + mi := &file_helper_identity_types_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Entity.ProtoReflect.Descriptor instead. +func (*Entity) Descriptor() ([]byte, []int) { + return file_helper_identity_types_proto_rawDescGZIP(), []int{2} +} + +func (x *Entity) GetAliases() []*Alias { + if x != nil { + return x.Aliases + } + return nil +} + +func (x *Entity) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *Entity) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Entity) GetMetadata() map[string]string { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *Entity) GetCreationTime() *timestamppb.Timestamp { + if x != nil { + return x.CreationTime + } + return nil +} + +func (x *Entity) GetLastUpdateTime() *timestamppb.Timestamp { + if x != nil { + return x.LastUpdateTime + } + return nil +} + +func (x *Entity) GetMergedEntityIDs() []string { + if x != nil { + return x.MergedEntityIDs + } + return nil +} + +func (x *Entity) GetPolicies() []string { + if x != nil { + return x.Policies + } + return nil +} + +func (x *Entity) GetBucketKey() string { + if x != nil { + return x.BucketKey + } + return "" +} + +func (x *Entity) GetMFASecrets() map[string]*mfa.Secret { + if x != nil { + return x.MFASecrets + } + return nil +} + +func (x *Entity) GetDisabled() bool { + if x != nil { + return x.Disabled + } + return false +} + +func (x *Entity) GetNamespaceID() string { + if x != nil { + return x.NamespaceID + } + return "" +} + +// Alias represents the alias that gets stored inside of the +// entity object in storage and also represents in an in-memory index of an +// alias object. +type Alias struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // ID is the unique identifier that represents this alias + // @inject_tag: sentinel:"-" + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty" sentinel:"-"` + // CanonicalID is the entity identifier to which this alias belongs to + // @inject_tag: sentinel:"-" + CanonicalID string `protobuf:"bytes,2,opt,name=canonical_id,json=canonicalId,proto3" json:"canonical_id,omitempty" sentinel:"-"` + // MountType is the backend mount's type to which this alias belongs to. + // This enables categorically querying aliases of specific backend types. + // @inject_tag: sentinel:"-" + MountType string `protobuf:"bytes,3,opt,name=mount_type,json=mountType,proto3" json:"mount_type,omitempty" sentinel:"-"` + // MountAccessor is the backend mount's accessor to which this alias + // belongs to. + // @inject_tag: sentinel:"-" + MountAccessor string `protobuf:"bytes,4,opt,name=mount_accessor,json=mountAccessor,proto3" json:"mount_accessor,omitempty" sentinel:"-"` + // MountPath is the backend mount's path to which the Maccessor belongs to. This + // field is not used for any operational purposes. This is only returned when + // alias is read, only as a nicety. + // @inject_tag: sentinel:"-" + MountPath string `protobuf:"bytes,5,opt,name=mount_path,json=mountPath,proto3" json:"mount_path,omitempty" sentinel:"-"` + // Metadata is the explicit metadata that clients set against an entity + // which enables virtual grouping of aliases. Aliases will be indexed + // against their metadata. + // @inject_tag: sentinel:"-" + Metadata map[string]string `protobuf:"bytes,6,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3" sentinel:"-"` + // Name is the identifier of this alias in its authentication source. + // This does not uniquely identify an alias in Vault. This in conjunction + // with MountAccessor form to be the factors that represent an alias in a + // unique way. Aliases will be indexed based on this combined uniqueness + // factor. + // @inject_tag: sentinel:"-" + Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty" sentinel:"-"` + // CreationTime is the time at which this alias was first created + // @inject_tag: sentinel:"-" + CreationTime *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=creation_time,json=creationTime,proto3" json:"creation_time,omitempty" sentinel:"-"` + // LastUpdateTime is the most recent time at which the properties of this + // alias got modified. This is helpful in filtering out aliases based + // on its age and to take action on them, if desired. + // @inject_tag: sentinel:"-" + LastUpdateTime *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=last_update_time,json=lastUpdateTime,proto3" json:"last_update_time,omitempty" sentinel:"-"` + // MergedFromCanonicalIDs is the FIFO history of merging activity + // @inject_tag: sentinel:"-" + MergedFromCanonicalIDs []string `protobuf:"bytes,10,rep,name=merged_from_canonical_ids,json=mergedFromCanonicalIds,proto3" json:"merged_from_canonical_ids,omitempty" sentinel:"-"` + // NamespaceID is the identifier of the namespace to which this alias + // belongs. + // @inject_tag: sentinel:"-" + NamespaceID string `protobuf:"bytes,11,opt,name=namespace_id,json=namespaceID,proto3" json:"namespace_id,omitempty" sentinel:"-"` + // Custom Metadata represents the custom data tied to this alias + // @inject_tag: sentinel:"-" + CustomMetadata map[string]string `protobuf:"bytes,12,rep,name=custom_metadata,json=customMetadata,proto3" json:"custom_metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3" sentinel:"-"` + // Local indicates if the alias only belongs to the cluster where it was + // created. If true, the alias will be stored in a location that is ignored by + // the performance replication subsystem. + // @inject_tag: sentinel:"-" + Local bool `protobuf:"varint,13,opt,name=local,proto3" json:"local,omitempty" sentinel:"-"` + // LocalBucketKey is the identifying element of the location where this alias + // is stored in the storage packer. This helps in querying local aliases + // during invalidation of local aliases in performance standbys. + // @inject_tag: sentinel:"-" + LocalBucketKey string `protobuf:"bytes,14,opt,name=local_bucket_key,json=localBucketKey,proto3" json:"local_bucket_key,omitempty" sentinel:"-"` +} + +func (x *Alias) Reset() { + *x = Alias{} + if protoimpl.UnsafeEnabled { + mi := &file_helper_identity_types_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Alias) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Alias) ProtoMessage() {} + +func (x *Alias) ProtoReflect() protoreflect.Message { + mi := &file_helper_identity_types_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Alias.ProtoReflect.Descriptor instead. +func (*Alias) Descriptor() ([]byte, []int) { + return file_helper_identity_types_proto_rawDescGZIP(), []int{3} +} + +func (x *Alias) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *Alias) GetCanonicalID() string { + if x != nil { + return x.CanonicalID + } + return "" +} + +func (x *Alias) GetMountType() string { + if x != nil { + return x.MountType + } + return "" +} + +func (x *Alias) GetMountAccessor() string { + if x != nil { + return x.MountAccessor + } + return "" +} + +func (x *Alias) GetMountPath() string { + if x != nil { + return x.MountPath + } + return "" +} + +func (x *Alias) GetMetadata() map[string]string { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *Alias) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Alias) GetCreationTime() *timestamppb.Timestamp { + if x != nil { + return x.CreationTime + } + return nil +} + +func (x *Alias) GetLastUpdateTime() *timestamppb.Timestamp { + if x != nil { + return x.LastUpdateTime + } + return nil +} + +func (x *Alias) GetMergedFromCanonicalIDs() []string { + if x != nil { + return x.MergedFromCanonicalIDs + } + return nil +} + +func (x *Alias) GetNamespaceID() string { + if x != nil { + return x.NamespaceID + } + return "" +} + +func (x *Alias) GetCustomMetadata() map[string]string { + if x != nil { + return x.CustomMetadata + } + return nil +} + +func (x *Alias) GetLocal() bool { + if x != nil { + return x.Local + } + return false +} + +func (x *Alias) GetLocalBucketKey() string { + if x != nil { + return x.LocalBucketKey + } + return "" +} + +// Deprecated. Retained for backwards compatibility. +type EntityStorageEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Personas []*PersonaIndexEntry `protobuf:"bytes,1,rep,name=personas,proto3" json:"personas,omitempty"` + ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + Metadata map[string]string `protobuf:"bytes,4,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + CreationTime *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=creation_time,json=creationTime,proto3" json:"creation_time,omitempty"` + LastUpdateTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=last_update_time,json=lastUpdateTime,proto3" json:"last_update_time,omitempty"` + MergedEntityIDs []string `protobuf:"bytes,7,rep,name=merged_entity_ids,json=mergedEntityIDs,proto3" json:"merged_entity_ids,omitempty"` + Policies []string `protobuf:"bytes,8,rep,name=policies,proto3" json:"policies,omitempty"` + BucketKeyHash string `protobuf:"bytes,9,opt,name=bucket_key_hash,json=bucketKeyHash,proto3" json:"bucket_key_hash,omitempty"` + MFASecrets map[string]*mfa.Secret `protobuf:"bytes,10,rep,name=mfa_secrets,json=mfaSecrets,proto3" json:"mfa_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *EntityStorageEntry) Reset() { + *x = EntityStorageEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_helper_identity_types_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EntityStorageEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EntityStorageEntry) ProtoMessage() {} + +func (x *EntityStorageEntry) ProtoReflect() protoreflect.Message { + mi := &file_helper_identity_types_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EntityStorageEntry.ProtoReflect.Descriptor instead. +func (*EntityStorageEntry) Descriptor() ([]byte, []int) { + return file_helper_identity_types_proto_rawDescGZIP(), []int{4} +} + +func (x *EntityStorageEntry) GetPersonas() []*PersonaIndexEntry { + if x != nil { + return x.Personas + } + return nil +} + +func (x *EntityStorageEntry) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *EntityStorageEntry) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *EntityStorageEntry) GetMetadata() map[string]string { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *EntityStorageEntry) GetCreationTime() *timestamppb.Timestamp { + if x != nil { + return x.CreationTime + } + return nil +} + +func (x *EntityStorageEntry) GetLastUpdateTime() *timestamppb.Timestamp { + if x != nil { + return x.LastUpdateTime + } + return nil +} + +func (x *EntityStorageEntry) GetMergedEntityIDs() []string { + if x != nil { + return x.MergedEntityIDs + } + return nil +} + +func (x *EntityStorageEntry) GetPolicies() []string { + if x != nil { + return x.Policies + } + return nil +} + +func (x *EntityStorageEntry) GetBucketKeyHash() string { + if x != nil { + return x.BucketKeyHash + } + return "" +} + +func (x *EntityStorageEntry) GetMFASecrets() map[string]*mfa.Secret { + if x != nil { + return x.MFASecrets + } + return nil +} + +// Deprecated. Retained for backwards compatibility. +type PersonaIndexEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + EntityID string `protobuf:"bytes,2,opt,name=entity_id,json=entityId,proto3" json:"entity_id,omitempty"` + MountType string `protobuf:"bytes,3,opt,name=mount_type,json=mountType,proto3" json:"mount_type,omitempty"` + MountAccessor string `protobuf:"bytes,4,opt,name=mount_accessor,json=mountAccessor,proto3" json:"mount_accessor,omitempty"` + MountPath string `protobuf:"bytes,5,opt,name=mount_path,json=mountPath,proto3" json:"mount_path,omitempty"` + Metadata map[string]string `protobuf:"bytes,6,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"` + CreationTime *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=creation_time,json=creationTime,proto3" json:"creation_time,omitempty"` + LastUpdateTime *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=last_update_time,json=lastUpdateTime,proto3" json:"last_update_time,omitempty"` + MergedFromEntityIDs []string `protobuf:"bytes,10,rep,name=merged_from_entity_ids,json=mergedFromEntityIDs,proto3" json:"merged_from_entity_ids,omitempty"` +} + +func (x *PersonaIndexEntry) Reset() { + *x = PersonaIndexEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_helper_identity_types_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PersonaIndexEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PersonaIndexEntry) ProtoMessage() {} + +func (x *PersonaIndexEntry) ProtoReflect() protoreflect.Message { + mi := &file_helper_identity_types_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PersonaIndexEntry.ProtoReflect.Descriptor instead. +func (*PersonaIndexEntry) Descriptor() ([]byte, []int) { + return file_helper_identity_types_proto_rawDescGZIP(), []int{5} +} + +func (x *PersonaIndexEntry) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *PersonaIndexEntry) GetEntityID() string { + if x != nil { + return x.EntityID + } + return "" +} + +func (x *PersonaIndexEntry) GetMountType() string { + if x != nil { + return x.MountType + } + return "" +} + +func (x *PersonaIndexEntry) GetMountAccessor() string { + if x != nil { + return x.MountAccessor + } + return "" +} + +func (x *PersonaIndexEntry) GetMountPath() string { + if x != nil { + return x.MountPath + } + return "" +} + +func (x *PersonaIndexEntry) GetMetadata() map[string]string { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *PersonaIndexEntry) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *PersonaIndexEntry) GetCreationTime() *timestamppb.Timestamp { + if x != nil { + return x.CreationTime + } + return nil +} + +func (x *PersonaIndexEntry) GetLastUpdateTime() *timestamppb.Timestamp { + if x != nil { + return x.LastUpdateTime + } + return nil +} + +func (x *PersonaIndexEntry) GetMergedFromEntityIDs() []string { + if x != nil { + return x.MergedFromEntityIDs + } + return nil +} + +var File_helper_identity_types_proto protoreflect.FileDescriptor + +var file_helper_identity_types_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x68, 0x65, 0x6c, 0x70, 0x65, 0x72, 0x2f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x69, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x68, 0x65, 0x6c, 0x70, 0x65, 0x72, + 0x2f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2f, 0x6d, 0x66, 0x61, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbc, 0x04, 0x0a, 0x05, 0x47, 0x72, + 0x6f, 0x75, 0x70, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x67, 0x72, + 0x6f, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x64, 0x73, 0x12, 0x2a, 0x0a, + 0x11, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, + 0x64, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, + 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x73, 0x12, 0x39, 0x0a, 0x08, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x2e, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x12, 0x3f, 0x0a, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0c, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x44, 0x0a, 0x10, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0e, 0x6c, 0x61, 0x73, + 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x6d, + 0x6f, 0x64, 0x69, 0x66, 0x79, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x0b, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1d, + 0x0a, 0x0a, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x25, 0x0a, + 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x69, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, + 0x6c, 0x69, 0x61, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x39, 0x0a, 0x0c, 0x4c, 0x6f, 0x63, 0x61, + 0x6c, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x07, 0x61, 0x6c, 0x69, 0x61, + 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x69, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x2e, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x07, 0x61, 0x6c, 0x69, 0x61, + 0x73, 0x65, 0x73, 0x22, 0x8c, 0x05, 0x0a, 0x06, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x29, + 0x0a, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x0f, 0x2e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x41, 0x6c, 0x69, 0x61, 0x73, + 0x52, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3a, 0x0a, + 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1e, 0x2e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3f, 0x0a, 0x0d, 0x63, 0x72, 0x65, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0c, 0x63, 0x72, + 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x44, 0x0a, 0x10, 0x6c, 0x61, + 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x52, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, + 0x12, 0x2a, 0x0a, 0x11, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x64, 0x5f, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x6d, 0x65, 0x72, + 0x67, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x73, 0x12, 0x1a, 0x0a, 0x08, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x41, 0x0a, 0x0b, 0x6d, 0x66, 0x61, 0x5f, 0x73, + 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x4d, + 0x66, 0x61, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, + 0x6d, 0x66, 0x61, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x69, + 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x69, + 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, + 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4a, 0x0a, 0x0f, 0x4d, 0x66, 0x61, 0x53, 0x65, 0x63, + 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x21, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x6d, 0x66, 0x61, + 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x22, 0xe1, 0x05, 0x0a, 0x05, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x21, 0x0a, 0x0c, + 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x12, + 0x1d, 0x0a, 0x0a, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, + 0x0a, 0x0e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x41, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x70, + 0x61, 0x74, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x6f, 0x75, 0x6e, 0x74, + 0x50, 0x61, 0x74, 0x68, 0x12, 0x39, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x2e, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0c, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x54, 0x69, 0x6d, 0x65, 0x12, 0x44, 0x0a, 0x10, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0e, 0x6c, 0x61, 0x73, 0x74, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x19, 0x6d, 0x65, + 0x72, 0x67, 0x65, 0x64, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, + 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x16, 0x6d, + 0x65, 0x72, 0x67, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x43, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, + 0x61, 0x6c, 0x49, 0x64, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x4c, 0x0a, 0x0f, 0x63, 0x75, 0x73, 0x74, + 0x6f, 0x6d, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x0c, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x23, 0x2e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x41, 0x6c, 0x69, + 0x61, 0x73, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x18, + 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x12, 0x28, 0x0a, 0x10, + 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x1a, 0x41, 0x0a, 0x13, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x88, 0x05, 0x0a, 0x12, 0x45, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x37, 0x0a, + 0x08, 0x70, 0x65, 0x72, 0x73, 0x6f, 0x6e, 0x61, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1b, 0x2e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x50, 0x65, 0x72, 0x73, 0x6f, + 0x6e, 0x61, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x70, 0x65, + 0x72, 0x73, 0x6f, 0x6e, 0x61, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x08, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x69, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x53, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x12, 0x3f, 0x0a, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0c, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, + 0x69, 0x6d, 0x65, 0x12, 0x44, 0x0a, 0x10, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x6d, 0x65, 0x72, + 0x67, 0x65, 0x64, 0x5f, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x07, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x49, 0x64, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, + 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, + 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, + 0x68, 0x61, 0x73, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x4b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, 0x12, 0x4d, 0x0a, 0x0b, 0x6d, 0x66, 0x61, + 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, + 0x2e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x4d, 0x66, 0x61, + 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x6d, 0x66, + 0x61, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4a, 0x0a, 0x0f, 0x4d, 0x66, 0x61, 0x53, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x21, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x6d, 0x66, 0x61, 0x2e, + 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0xf9, 0x03, 0x0a, 0x11, 0x50, 0x65, 0x72, 0x73, 0x6f, 0x6e, 0x61, 0x49, 0x6e, 0x64, + 0x65, 0x78, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x6f, 0x75, + 0x6e, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x6f, + 0x75, 0x6e, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x50, 0x61, 0x74, 0x68, 0x12, 0x45, 0x0a, 0x08, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x69, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x50, 0x65, 0x72, 0x73, 0x6f, 0x6e, 0x61, 0x49, 0x6e, + 0x64, 0x65, 0x78, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0c, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x44, 0x0a, 0x10, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0e, 0x6c, 0x61, 0x73, + 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x33, 0x0a, 0x16, 0x6d, + 0x65, 0x72, 0x67, 0x65, 0x64, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x6d, 0x65, 0x72, + 0x67, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x73, + 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x2c, 0x5a, + 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, + 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x68, 0x65, 0x6c, 0x70, + 0x65, 0x72, 0x2f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_helper_identity_types_proto_rawDescOnce sync.Once + file_helper_identity_types_proto_rawDescData = file_helper_identity_types_proto_rawDesc +) + +func file_helper_identity_types_proto_rawDescGZIP() []byte { + file_helper_identity_types_proto_rawDescOnce.Do(func() { + file_helper_identity_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_helper_identity_types_proto_rawDescData) + }) + return file_helper_identity_types_proto_rawDescData +} + +var file_helper_identity_types_proto_msgTypes = make([]protoimpl.MessageInfo, 14) +var file_helper_identity_types_proto_goTypes = []interface{}{ + (*Group)(nil), // 0: identity.Group + (*LocalAliases)(nil), // 1: identity.LocalAliases + (*Entity)(nil), // 2: identity.Entity + (*Alias)(nil), // 3: identity.Alias + (*EntityStorageEntry)(nil), // 4: identity.EntityStorageEntry + (*PersonaIndexEntry)(nil), // 5: identity.PersonaIndexEntry + nil, // 6: identity.Group.MetadataEntry + nil, // 7: identity.Entity.MetadataEntry + nil, // 8: identity.Entity.MFASecretsEntry + nil, // 9: identity.Alias.MetadataEntry + nil, // 10: identity.Alias.CustomMetadataEntry + nil, // 11: identity.EntityStorageEntry.MetadataEntry + nil, // 12: identity.EntityStorageEntry.MFASecretsEntry + nil, // 13: identity.PersonaIndexEntry.MetadataEntry + (*timestamppb.Timestamp)(nil), // 14: google.protobuf.Timestamp + (*mfa.Secret)(nil), // 15: mfa.Secret +} +var file_helper_identity_types_proto_depIDxs = []int32{ + 6, // 0: identity.Group.metadata:type_name -> identity.Group.MetadataEntry + 14, // 1: identity.Group.creation_time:type_name -> google.protobuf.Timestamp + 14, // 2: identity.Group.last_update_time:type_name -> google.protobuf.Timestamp + 3, // 3: identity.Group.alias:type_name -> identity.Alias + 3, // 4: identity.LocalAliases.aliases:type_name -> identity.Alias + 3, // 5: identity.Entity.aliases:type_name -> identity.Alias + 7, // 6: identity.Entity.metadata:type_name -> identity.Entity.MetadataEntry + 14, // 7: identity.Entity.creation_time:type_name -> google.protobuf.Timestamp + 14, // 8: identity.Entity.last_update_time:type_name -> google.protobuf.Timestamp + 8, // 9: identity.Entity.mfa_secrets:type_name -> identity.Entity.MFASecretsEntry + 9, // 10: identity.Alias.metadata:type_name -> identity.Alias.MetadataEntry + 14, // 11: identity.Alias.creation_time:type_name -> google.protobuf.Timestamp + 14, // 12: identity.Alias.last_update_time:type_name -> google.protobuf.Timestamp + 10, // 13: identity.Alias.custom_metadata:type_name -> identity.Alias.CustomMetadataEntry + 5, // 14: identity.EntityStorageEntry.personas:type_name -> identity.PersonaIndexEntry + 11, // 15: identity.EntityStorageEntry.metadata:type_name -> identity.EntityStorageEntry.MetadataEntry + 14, // 16: identity.EntityStorageEntry.creation_time:type_name -> google.protobuf.Timestamp + 14, // 17: identity.EntityStorageEntry.last_update_time:type_name -> google.protobuf.Timestamp + 12, // 18: identity.EntityStorageEntry.mfa_secrets:type_name -> identity.EntityStorageEntry.MFASecretsEntry + 13, // 19: identity.PersonaIndexEntry.metadata:type_name -> identity.PersonaIndexEntry.MetadataEntry + 14, // 20: identity.PersonaIndexEntry.creation_time:type_name -> google.protobuf.Timestamp + 14, // 21: identity.PersonaIndexEntry.last_update_time:type_name -> google.protobuf.Timestamp + 15, // 22: identity.Entity.MFASecretsEntry.value:type_name -> mfa.Secret + 15, // 23: identity.EntityStorageEntry.MFASecretsEntry.value:type_name -> mfa.Secret + 24, // [24:24] is the sub-list for method output_type + 24, // [24:24] is the sub-list for method input_type + 24, // [24:24] is the sub-list for extension type_name + 24, // [24:24] is the sub-list for extension extendee + 0, // [0:24] is the sub-list for field type_name +} + +func init() { file_helper_identity_types_proto_init() } +func file_helper_identity_types_proto_init() { + if File_helper_identity_types_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_helper_identity_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Group); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_helper_identity_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocalAliases); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_helper_identity_types_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Entity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_helper_identity_types_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Alias); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_helper_identity_types_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EntityStorageEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_helper_identity_types_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PersonaIndexEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_helper_identity_types_proto_rawDesc, + NumEnums: 0, + NumMessages: 14, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_helper_identity_types_proto_goTypes, + DependencyIndexes: file_helper_identity_types_proto_depIDxs, + MessageInfos: file_helper_identity_types_proto_msgTypes, + }.Build() + File_helper_identity_types_proto = out.File + file_helper_identity_types_proto_rawDesc = nil + file_helper_identity_types_proto_goTypes = nil + file_helper_identity_types_proto_depIDxs = nil +} diff --git a/helper/identity/types.proto b/helper/identity/types.proto new file mode 100644 index 0000000..a34d715 --- /dev/null +++ b/helper/identity/types.proto @@ -0,0 +1,264 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +syntax = "proto3"; + +option go_package = "github.com/hashicorp/vault/helper/identity"; + +package identity; + +import "google/protobuf/timestamp.proto"; +import "helper/identity/mfa/types.proto"; + +// Group represents an identity group. +message Group { + // ID is the unique identifier for this group + // @inject_tag: sentinel:"-" + string id = 1; + + // Name is the unique name for this group + // @inject_tag: sentinel:"-" + string name = 2; + + // Policies are the vault policies to be granted to members of this group + // @inject_tag: sentinel:"-" + repeated string policies = 3; + + // ParentGroupIDs are the identifiers of those groups to which this group is a + // member of. These will serve as references to the parent group in the + // hierarchy. + // @inject_tag: sentinel:"-" + repeated string parent_group_ids = 4; + + // MemberEntityIDs are the identifiers of entities which are members of this + // group + // @inject_tag: sentinel:"-" + repeated string member_entity_ids = 5; + + // Metadata represents the custom data tied with this group + // @inject_tag: sentinel:"-" + map metadata = 6; + + // CreationTime is the time at which this group was created + // @inject_tag: sentinel:"-" + google.protobuf.Timestamp creation_time = 7; + + // LastUpdateTime is the time at which this group was last modified + // @inject_tag: sentinel:"-" + google.protobuf.Timestamp last_update_time= 8; + + // ModifyIndex tracks the number of updates to the group. It is useful to detect + // updates to the groups. + // @inject_tag: sentinel:"-" + uint64 modify_index = 9; + + // BucketKey is the path of the storage packer key into which this group is + // stored. + // @inject_tag: sentinel:"-" + string bucket_key = 10; + + // Alias is used to mark this group as an internal mapping of a group that + // is external to the identity store. Alias can only be set if the 'type' + // is set to 'external'. + // @inject_tag: sentinel:"-" + Alias alias = 11; + + // Type indicates if this group is an internal group or an external group. + // Memberships of the internal groups can be managed over the API whereas + // the memberships on the external group --for which a corresponding alias + // will be set-- will be managed automatically. + // @inject_tag: sentinel:"-" + string type = 12; + + // NamespaceID is the identifier of the namespace to which this group + // belongs to. Do not return this value over the API when reading the + // group. + // @inject_tag: sentinel:"-" + string namespace_id = 13; +} + +// LocalAliases holds the aliases belonging to an entity that are local to the +// cluster. +message LocalAliases { + repeated Alias aliases = 1; +} + +// Entity represents an entity that gets persisted and indexed. +// Entity is fundamentally composed of zero or many aliases. +message Entity { + // Aliases are the identities that this entity is made of. This can be + // empty as well to favor being able to create the entity first and then + // incrementally adding aliases. + // @inject_tag: sentinel:"-" + repeated Alias aliases = 1; + + // ID is the unique identifier of the entity which always be a UUID. This + // should never be allowed to be updated. + // @inject_tag: sentinel:"-" + string id = 2; + + // Name is a unique identifier of the entity which is intended to be + // human-friendly. The default name might not be human friendly since it + // gets suffixed by a UUID, but it can optionally be updated, unlike the ID + // field. + // @inject_tag: sentinel:"-" + string name = 3; + + // Metadata represents the explicit metadata which is set by the + // clients. This is useful to tie any information pertaining to the + // aliases. This is a non-unique field of entity, meaning multiple + // entities can have the same metadata set. Entities will be indexed based + // on this explicit metadata. This enables virtual groupings of entities + // based on its metadata. + // @inject_tag: sentinel:"-" + map metadata = 4; + + // CreationTime is the time at which this entity is first created. + // @inject_tag: sentinel:"-" + google.protobuf.Timestamp creation_time = 5; + + // LastUpdateTime is the most recent time at which the properties of this + // entity got modified. This is helpful in filtering out entities based on + // its age and to take action on them, if desired. + // @inject_tag: sentinel:"-" + google.protobuf.Timestamp last_update_time= 6; + + // MergedEntityIDs are the entities which got merged to this one. Entities + // will be indexed based on all the entities that got merged into it. This + // helps to apply the actions on this entity on the tokens that are merged + // to the merged entities. Merged entities will be deleted entirely and + // this is the only trackable trail of its earlier presence. + // @inject_tag: sentinel:"-" + repeated string merged_entity_ids = 7; + + // Policies the entity is entitled to + // @inject_tag: sentinel:"-" + repeated string policies = 8; + + // BucketKey is the path of the storage packer key into which this entity is + // stored. + // @inject_tag: sentinel:"-" + string bucket_key = 9; + + // MFASecrets holds the MFA secrets indexed by the identifier of the MFA + // method configuration. + // @inject_tag: sentinel:"-" + map mfa_secrets = 10; + + // Disabled indicates whether tokens associated with the account should not + // be able to be used + // @inject_tag: sentinel:"-" + bool disabled = 11; + + // NamespaceID is the identifier of the namespace to which this entity + // belongs to. Do not return this value over the API when reading the + // entity. + // @inject_tag: sentinel:"-" + string namespace_id = 12; +} + +// Alias represents the alias that gets stored inside of the +// entity object in storage and also represents in an in-memory index of an +// alias object. +message Alias { + // ID is the unique identifier that represents this alias + // @inject_tag: sentinel:"-" + string id = 1; + + // CanonicalID is the entity identifier to which this alias belongs to + // @inject_tag: sentinel:"-" + string canonical_id = 2; + + // MountType is the backend mount's type to which this alias belongs to. + // This enables categorically querying aliases of specific backend types. + // @inject_tag: sentinel:"-" + string mount_type = 3; + + // MountAccessor is the backend mount's accessor to which this alias + // belongs to. + // @inject_tag: sentinel:"-" + string mount_accessor = 4; + + // MountPath is the backend mount's path to which the Maccessor belongs to. This + // field is not used for any operational purposes. This is only returned when + // alias is read, only as a nicety. + // @inject_tag: sentinel:"-" + string mount_path = 5; + + // Metadata is the explicit metadata that clients set against an entity + // which enables virtual grouping of aliases. Aliases will be indexed + // against their metadata. + // @inject_tag: sentinel:"-" + map metadata = 6; + + // Name is the identifier of this alias in its authentication source. + // This does not uniquely identify an alias in Vault. This in conjunction + // with MountAccessor form to be the factors that represent an alias in a + // unique way. Aliases will be indexed based on this combined uniqueness + // factor. + // @inject_tag: sentinel:"-" + string name = 7; + + // CreationTime is the time at which this alias was first created + // @inject_tag: sentinel:"-" + google.protobuf.Timestamp creation_time = 8; + + // LastUpdateTime is the most recent time at which the properties of this + // alias got modified. This is helpful in filtering out aliases based + // on its age and to take action on them, if desired. + // @inject_tag: sentinel:"-" + google.protobuf.Timestamp last_update_time = 9; + + // MergedFromCanonicalIDs is the FIFO history of merging activity + // @inject_tag: sentinel:"-" + repeated string merged_from_canonical_ids = 10; + + // NamespaceID is the identifier of the namespace to which this alias + // belongs. + // @inject_tag: sentinel:"-" + string namespace_id = 11; + + // Custom Metadata represents the custom data tied to this alias + // @inject_tag: sentinel:"-" + map custom_metadata = 12; + + // Local indicates if the alias only belongs to the cluster where it was + // created. If true, the alias will be stored in a location that is ignored by + // the performance replication subsystem. + // @inject_tag: sentinel:"-" + bool local = 13; + + // LocalBucketKey is the identifying element of the location where this alias + // is stored in the storage packer. This helps in querying local aliases + // during invalidation of local aliases in performance standbys. + // @inject_tag: sentinel:"-" + string local_bucket_key = 14; +} + +// Deprecated. Retained for backwards compatibility. +message EntityStorageEntry { + repeated PersonaIndexEntry personas = 1; + string id = 2; + string name = 3; + map metadata = 4; + google.protobuf.Timestamp creation_time = 5; + google.protobuf.Timestamp last_update_time= 6; + repeated string merged_entity_ids = 7; + repeated string policies = 8; + string bucket_key_hash = 9; + map mfa_secrets = 10; +} + +// Deprecated. Retained for backwards compatibility. +message PersonaIndexEntry { + string id = 1; + string entity_id = 2; + string mount_type = 3; + string mount_accessor = 4; + string mount_path = 5; + map metadata = 6; + string name = 7; + google.protobuf.Timestamp creation_time = 8; + google.protobuf.Timestamp last_update_time = 9; + repeated string merged_from_entity_ids = 10; +} diff --git a/helper/locking/lock.go b/helper/locking/lock.go new file mode 100644 index 0000000..a9bff4c --- /dev/null +++ b/helper/locking/lock.go @@ -0,0 +1,49 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package locking + +import ( + "sync" + + "github.com/sasha-s/go-deadlock" +) + +// Common mutex interface to allow either built-in or imported deadlock use +type Mutex interface { + Lock() + Unlock() +} + +// Common r/w mutex interface to allow either built-in or imported deadlock use +type RWMutex interface { + Lock() + RLock() + RLocker() sync.Locker + RUnlock() + Unlock() +} + +// DeadlockMutex (used when requested via config option `detact_deadlocks`), +// behaves like a sync.Mutex but does periodic checking to see if outstanding +// locks and requests look like a deadlock. If it finds a deadlock candidate it +// will output it prefixed with "POTENTIAL DEADLOCK", as described at +// https://github.com/sasha-s/go-deadlock +type DeadlockMutex struct { + deadlock.Mutex +} + +// DeadlockRWMutex is the RW version of DeadlockMutex. +type DeadlockRWMutex struct { + deadlock.RWMutex +} + +// Regular sync/mutex. +type SyncMutex struct { + sync.Mutex +} + +// DeadlockRWMutex is the RW version of SyncMutex. +type SyncRWMutex struct { + sync.RWMutex +} diff --git a/helper/logging/logfile.go b/helper/logging/logfile.go new file mode 100644 index 0000000..2ea9764 --- /dev/null +++ b/helper/logging/logfile.go @@ -0,0 +1,163 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logging + +import ( + "fmt" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/hashicorp/go-multierror" +) + +var now = time.Now + +type LogFile struct { + // Name of the log file + fileName string + + // Path to the log file + logPath string + + // duration between each file rotation operation + duration time.Duration + + // lastCreated represents the creation time of the latest log + lastCreated time.Time + + // fileInfo is the pointer to the current file being written to + fileInfo *os.File + + // maxBytes is the maximum number of desired bytes for a log file + maxBytes int + + // bytesWritten is the number of bytes written in the current log file + bytesWritten int64 + + // Max rotated files to keep before removing them. + maxArchivedFiles int + + // acquire is the mutex utilized to ensure we have no concurrency issues + acquire sync.Mutex +} + +// Write is used to implement io.Writer +func (l *LogFile) Write(b []byte) (n int, err error) { + l.acquire.Lock() + defer l.acquire.Unlock() + + // Create a new file if we have no file to write to + if l.fileInfo == nil { + if err := l.openNew(); err != nil { + return 0, err + } + } + if err := l.rotate(); err != nil { // Check for the last contact and rotate if necessary + return 0, err + } + + bytesWritten, err := l.fileInfo.Write(b) + + if bytesWritten > 0 { + l.bytesWritten += int64(bytesWritten) + } + + return bytesWritten, err +} + +func (l *LogFile) fileNamePattern() string { + // Extract the file extension + fileExt := filepath.Ext(l.fileName) + // If we have no file extension we append .log + if fileExt == "" { + fileExt = ".log" + } + // Remove the file extension from the filename + return strings.TrimSuffix(l.fileName, fileExt) + "-%s" + fileExt +} + +func (l *LogFile) openNew() error { + newFileName := l.fileName + newFilePath := filepath.Join(l.logPath, newFileName) + + // Try creating or opening the active log file. Since the active log file + // always has the same name, append log entries to prevent overwriting + // previous log data. + filePointer, err := os.OpenFile(newFilePath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0o640) + if err != nil { + return err + } + + // New file, new bytes tracker, new creation time :) + l.fileInfo = filePointer + l.lastCreated = now() + l.bytesWritten = 0 + return nil +} + +func (l *LogFile) rotate() error { + // Get the time from the last point of contact + timeElapsed := time.Since(l.lastCreated) + // Rotate if we hit the byte file limit or the time limit + if (l.bytesWritten >= int64(l.maxBytes) && (l.maxBytes > 0)) || timeElapsed >= l.duration { + if err := l.fileInfo.Close(); err != nil { + return err + } + if err := l.renameCurrentFile(); err != nil { + return err + } + if err := l.pruneFiles(); err != nil { + return err + } + return l.openNew() + } + return nil +} + +func (l *LogFile) pruneFiles() error { + if l.maxArchivedFiles == 0 { + return nil + } + + pattern := filepath.Join(l.logPath, fmt.Sprintf(l.fileNamePattern(), "*")) + matches, err := filepath.Glob(pattern) + if err != nil { + return err + } + + switch { + case l.maxArchivedFiles < 0: + return removeFiles(matches) + case len(matches) < l.maxArchivedFiles: + return nil + } + + sort.Strings(matches) + last := len(matches) - l.maxArchivedFiles + return removeFiles(matches[:last]) +} + +func removeFiles(files []string) (err error) { + for _, file := range files { + if fileError := os.Remove(file); fileError != nil { + err = multierror.Append(err, fmt.Errorf("error removing file %s: %v", file, fileError)) + } + } + return err +} + +func (l *LogFile) renameCurrentFile() error { + fileNamePattern := l.fileNamePattern() + createTime := now() + currentFilePath := filepath.Join(l.logPath, l.fileName) + oldFileName := fmt.Sprintf(fileNamePattern, strconv.FormatInt(createTime.UnixNano(), 10)) + oldFilePath := filepath.Join(l.logPath, oldFileName) + + return os.Rename(currentFilePath, oldFilePath) +} diff --git a/helper/logging/logfile_test.go b/helper/logging/logfile_test.go new file mode 100644 index 0000000..a0cae98 --- /dev/null +++ b/helper/logging/logfile_test.go @@ -0,0 +1,157 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logging + +import ( + "os" + "path/filepath" + "sort" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLogFile_openNew(t *testing.T) { + logFile := &LogFile{ + fileName: "vault.log", + logPath: t.TempDir(), + duration: defaultRotateDuration, + } + + err := logFile.openNew() + require.NoError(t, err) + + msg := "[INFO] Something" + _, err = logFile.Write([]byte(msg)) + require.NoError(t, err) + + content, err := os.ReadFile(logFile.fileInfo.Name()) + require.NoError(t, err) + require.Contains(t, string(content), msg) +} + +func TestLogFile_Rotation_MaxDuration(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + tempDir := t.TempDir() + logFile := LogFile{ + fileName: "vault.log", + logPath: tempDir, + duration: 50 * time.Millisecond, + } + + _, err := logFile.Write([]byte("Hello World")) + assert.NoError(t, err, "error writing rotation max duration part 1") + + time.Sleep(3 * logFile.duration) + + _, err = logFile.Write([]byte("Second File")) + assert.NoError(t, err, "error writing rotation max duration part 2") + + require.Len(t, listDir(t, tempDir), 2) +} + +func TestLogFile_Rotation_MaxBytes(t *testing.T) { + tempDir := t.TempDir() + logFile := LogFile{ + fileName: "somefile.log", + logPath: tempDir, + maxBytes: 10, + duration: defaultRotateDuration, + } + _, err := logFile.Write([]byte("Hello World")) + assert.NoError(t, err, "error writing rotation max bytes part 1") + + _, err = logFile.Write([]byte("Second File")) + assert.NoError(t, err, "error writing rotation max bytes part 2") + + require.Len(t, listDir(t, tempDir), 2) +} + +func TestLogFile_PruneFiles(t *testing.T) { + tempDir := t.TempDir() + logFile := LogFile{ + fileName: "vault.log", + logPath: tempDir, + maxBytes: 10, + duration: defaultRotateDuration, + maxArchivedFiles: 1, + } + _, err := logFile.Write([]byte("[INFO] Hello World")) + assert.NoError(t, err, "error writing during prune files test part 1") + + _, err = logFile.Write([]byte("[INFO] Second File")) + assert.NoError(t, err, "error writing during prune files test part 1") + + _, err = logFile.Write([]byte("[INFO] Third File")) + assert.NoError(t, err, "error writing during prune files test part 1") + + logFiles := listDir(t, tempDir) + sort.Strings(logFiles) + require.Len(t, logFiles, 2) + + content, err := os.ReadFile(filepath.Join(tempDir, logFiles[0])) + require.NoError(t, err) + require.Contains(t, string(content), "Second File") + + content, err = os.ReadFile(filepath.Join(tempDir, logFiles[1])) + require.NoError(t, err) + require.Contains(t, string(content), "Third File") +} + +func TestLogFile_PruneFiles_Disabled(t *testing.T) { + tempDir := t.TempDir() + logFile := LogFile{ + fileName: "somename.log", + logPath: tempDir, + maxBytes: 10, + duration: defaultRotateDuration, + maxArchivedFiles: 0, + } + + _, err := logFile.Write([]byte("[INFO] Hello World")) + assert.NoError(t, err, "error writing during prune files - disabled test part 1") + + _, err = logFile.Write([]byte("[INFO] Second File")) + assert.NoError(t, err, "error writing during prune files - disabled test part 2") + + _, err = logFile.Write([]byte("[INFO] Third File")) + assert.NoError(t, err, "error writing during prune files - disabled test part 3") + + require.Len(t, listDir(t, tempDir), 3) +} + +func TestLogFile_FileRotation_Disabled(t *testing.T) { + tempDir := t.TempDir() + logFile := LogFile{ + fileName: "vault.log", + logPath: tempDir, + maxBytes: 10, + maxArchivedFiles: -1, + } + + _, err := logFile.Write([]byte("[INFO] Hello World")) + assert.NoError(t, err, "error writing during rotation disabled test part 1") + + _, err = logFile.Write([]byte("[INFO] Second File")) + assert.NoError(t, err, "error writing during rotation disabled test part 2") + + _, err = logFile.Write([]byte("[INFO] Third File")) + assert.NoError(t, err, "error writing during rotation disabled test part 3") + + require.Len(t, listDir(t, tempDir), 1) +} + +func listDir(t *testing.T, name string) []string { + t.Helper() + fh, err := os.Open(name) + require.NoError(t, err) + files, err := fh.Readdirnames(100) + require.NoError(t, err) + return files +} diff --git a/helper/logging/logger.go b/helper/logging/logger.go new file mode 100644 index 0000000..8768ab5 --- /dev/null +++ b/helper/logging/logger.go @@ -0,0 +1,223 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logging + +import ( + "errors" + "fmt" + "io" + "path/filepath" + "strings" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" +) + +const ( + UnspecifiedFormat LogFormat = iota + StandardFormat + JSONFormat +) + +// defaultRotateDuration is the default time taken by the agent to rotate logs +const defaultRotateDuration = 24 * time.Hour + +type LogFormat int + +// LogConfig should be used to supply configuration when creating a new Vault logger +type LogConfig struct { + // Name is the name the returned logger will use to prefix log lines. + Name string + + // LogLevel is the minimum level to be logged. + LogLevel hclog.Level + + // LogFormat is the log format to use, supported formats are 'standard' and 'json'. + LogFormat LogFormat + + // LogFilePath is the path to write the logs to the user specified file. + LogFilePath string + + // LogRotateDuration is the user specified time to rotate logs + LogRotateDuration time.Duration + + // LogRotateBytes is the user specified byte limit to rotate logs + LogRotateBytes int + + // LogRotateMaxFiles is the maximum number of past archived log files to keep + LogRotateMaxFiles int + + // DefaultFileName should be set to the value to be used if the LogFilePath + // ends in a path separator such as '/var/log/' + // Examples of the default name are as follows: 'vault', 'agent' or 'proxy. + // The creator of this struct *must* ensure that it is assigned before doing + // anything with LogConfig! + DefaultFileName string +} + +// NewLogConfig should be used to initialize the LogConfig struct. +func NewLogConfig(defaultFileName string) (*LogConfig, error) { + defaultFileName = strings.TrimSpace(defaultFileName) + if defaultFileName == "" { + return nil, errors.New("default file name is required") + } + + return &LogConfig{DefaultFileName: defaultFileName}, nil +} + +func (c *LogConfig) isLevelInvalid() bool { + return c.LogLevel == hclog.NoLevel || c.LogLevel == hclog.Off || c.LogLevel.String() == "unknown" +} + +func (c *LogConfig) isFormatJson() bool { + return c.LogFormat == JSONFormat +} + +// Stringer implementation +func (lf LogFormat) String() string { + switch lf { + case UnspecifiedFormat: + return "unspecified" + case StandardFormat: + return "standard" + case JSONFormat: + return "json" + } + + // unreachable + return "unknown" +} + +// noErrorWriter is a wrapper to suppress errors when writing to w. +type noErrorWriter struct { + w io.Writer +} + +func (w noErrorWriter) Write(p []byte) (n int, err error) { + _, _ = w.w.Write(p) + // We purposely return n == len(p) as if write was successful + return len(p), nil +} + +// parseFullPath takes a full path intended to be the location for log files and +// breaks it down into a directory and a file name. It checks both of these for +// the common globbing character '*' and returns an error if it is present. +func parseFullPath(fullPath string) (directory, fileName string, err error) { + directory, fileName = filepath.Split(fullPath) + + globChars := "*?[" + if strings.ContainsAny(directory, globChars) { + err = multierror.Append(err, fmt.Errorf("directory contains glob character")) + } + if fileName == "" { + fileName = "vault.log" + } else if strings.ContainsAny(fileName, globChars) { + err = multierror.Append(err, fmt.Errorf("file name contains globbing character")) + } + + return directory, fileName, err +} + +// Setup creates a new logger with the specified configuration and writer +func Setup(config *LogConfig, w io.Writer) (hclog.InterceptLogger, error) { + // Validate the log level + if config.isLevelInvalid() { + return nil, fmt.Errorf("invalid log level: %v", config.LogLevel) + } + + // If out is os.Stdout and Vault is being run as a Windows Service, writes will + // fail silently, which may inadvertently prevent writes to other writers. + // noErrorWriter is used as a wrapper to suppress any errors when writing to out. + writers := []io.Writer{noErrorWriter{w: w}} + + // Create a file logger if the user has specified the path to the log file + if config.LogFilePath != "" { + dir, fileName, err := parseFullPath(config.LogFilePath) + if err != nil { + return nil, err + } + if fileName == "" { + fileName = fmt.Sprintf("%s.log", config.DefaultFileName) + } + if config.LogRotateDuration == 0 { + config.LogRotateDuration = defaultRotateDuration + } + + logFile := &LogFile{ + fileName: fileName, + logPath: dir, + duration: config.LogRotateDuration, + maxBytes: config.LogRotateBytes, + maxArchivedFiles: config.LogRotateMaxFiles, + } + if err := logFile.pruneFiles(); err != nil { + return nil, fmt.Errorf("failed to prune log files: %w", err) + } + if err := logFile.openNew(); err != nil { + return nil, fmt.Errorf("failed to setup logging: %w", err) + } + writers = append(writers, logFile) + } + + logger := hclog.NewInterceptLogger(&hclog.LoggerOptions{ + Name: config.Name, + Level: config.LogLevel, + IndependentLevels: true, + Output: io.MultiWriter(writers...), + JSONFormat: config.isFormatJson(), + }) + + return logger, nil +} + +// ParseLogFormat parses the log format from the provided string. +func ParseLogFormat(format string) (LogFormat, error) { + switch strings.ToLower(strings.TrimSpace(format)) { + case "": + return UnspecifiedFormat, nil + case "standard": + return StandardFormat, nil + case "json": + return JSONFormat, nil + default: + return UnspecifiedFormat, fmt.Errorf("unknown log format: %s", format) + } +} + +// ParseLogLevel returns the hclog.Level that corresponds with the provided level string. +// This differs hclog.LevelFromString in that it supports additional level strings. +func ParseLogLevel(logLevel string) (hclog.Level, error) { + var result hclog.Level + logLevel = strings.ToLower(strings.TrimSpace(logLevel)) + + switch logLevel { + case "trace": + result = hclog.Trace + case "debug": + result = hclog.Debug + case "notice", "info", "": + result = hclog.Info + case "warn", "warning": + result = hclog.Warn + case "err", "error": + result = hclog.Error + default: + return -1, errors.New(fmt.Sprintf("unknown log level: %s", logLevel)) + } + + return result, nil +} + +// TranslateLoggerLevel returns the string that corresponds with logging level of the hclog.Logger. +func TranslateLoggerLevel(logger hclog.Logger) (string, error) { + logLevel := logger.GetLevel() + + switch logLevel { + case hclog.Trace, hclog.Debug, hclog.Info, hclog.Warn, hclog.Error: + return logLevel.String(), nil + default: + return "", fmt.Errorf("unknown log level") + } +} diff --git a/helper/logging/logger_test.go b/helper/logging/logger_test.go new file mode 100644 index 0000000..323c761 --- /dev/null +++ b/helper/logging/logger_test.go @@ -0,0 +1,285 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logging + +import ( + "bytes" + "encoding/json" + "errors" + "os" + "path/filepath" + "testing" + + "github.com/hashicorp/go-hclog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLogger_SetupBasic(t *testing.T) { + cfg := newTestLogConfig(t) + cfg.LogLevel = hclog.Info + + logger, err := Setup(cfg, nil) + require.NoError(t, err) + require.NotNil(t, logger) + require.Equal(t, logger.Name(), "test-system") + require.True(t, logger.IsInfo()) +} + +func TestLogger_SetupInvalidLogLevel(t *testing.T) { + cfg := newTestLogConfig(t) + + _, err := Setup(cfg, nil) + assert.Containsf(t, err.Error(), "invalid log level", "expected error %s", err) +} + +func TestLogger_SetupLoggerErrorLevel(t *testing.T) { + cfg := newTestLogConfig(t) + cfg.LogLevel = hclog.Error + + var buf bytes.Buffer + + logger, err := Setup(cfg, &buf) + require.NoError(t, err) + require.NotNil(t, logger) + + logger.Error("test error msg") + logger.Info("test info msg") + + output := buf.String() + + require.Contains(t, output, "[ERROR] test-system: test error msg") + require.NotContains(t, output, "[INFO] test-system: test info msg") +} + +func TestLogger_SetupLoggerDebugLevel(t *testing.T) { + cfg := newTestLogConfig(t) + cfg.LogLevel = hclog.Debug + var buf bytes.Buffer + + logger, err := Setup(cfg, &buf) + require.NoError(t, err) + require.NotNil(t, logger) + + logger.Info("test info msg") + logger.Debug("test debug msg") + + output := buf.String() + + require.Contains(t, output, "[INFO] test-system: test info msg") + require.Contains(t, output, "[DEBUG] test-system: test debug msg") +} + +func TestLogger_SetupLoggerWithoutName(t *testing.T) { + cfg := newTestLogConfig(t) + cfg.Name = "" + cfg.LogLevel = hclog.Info + var buf bytes.Buffer + + logger, err := Setup(cfg, &buf) + require.NoError(t, err) + require.NotNil(t, logger) + + logger.Warn("test warn msg") + + require.Contains(t, buf.String(), "[WARN] test warn msg") +} + +func TestLogger_SetupLoggerWithJSON(t *testing.T) { + cfg := newTestLogConfig(t) + cfg.LogLevel = hclog.Debug + cfg.LogFormat = JSONFormat + var buf bytes.Buffer + + logger, err := Setup(cfg, &buf) + require.NoError(t, err) + require.NotNil(t, logger) + + logger.Warn("test warn msg") + + var jsonOutput map[string]string + err = json.Unmarshal(buf.Bytes(), &jsonOutput) + require.NoError(t, err) + require.Contains(t, jsonOutput, "@level") + require.Equal(t, jsonOutput["@level"], "warn") + require.Contains(t, jsonOutput, "@message") + require.Equal(t, jsonOutput["@message"], "test warn msg") +} + +func TestLogger_SetupLoggerWithValidLogPathMissingFileName(t *testing.T) { + tmpDir := t.TempDir() + cfg := newTestLogConfig(t) + cfg.LogLevel = hclog.Info + cfg.LogFilePath = tmpDir + "/" // add the trailing slash to the temp dir + var buf bytes.Buffer + + logger, err := Setup(cfg, &buf) + require.NoError(t, err) + require.NotNil(t, logger) + + logger.Info("juan?") + + m, err := filepath.Glob(cfg.LogFilePath + "*") + require.NoError(t, err) + require.Truef(t, len(m) == 1, "no files were found") +} + +func TestLogger_SetupLoggerWithValidLogPathFileName(t *testing.T) { + tmpDir := t.TempDir() + cfg := newTestLogConfig(t) + cfg.LogLevel = hclog.Info + cfg.LogFilePath = filepath.Join(tmpDir, "juan.log") + var buf bytes.Buffer + + logger, err := Setup(cfg, &buf) + require.NoError(t, err) + require.NotNil(t, logger) + + logger.Info("juan?") + f, err := os.Stat(cfg.LogFilePath) + require.NoError(t, err) + require.NotNil(t, f) +} + +func TestLogger_SetupLoggerWithValidLogPathFileNameRotate(t *testing.T) { + tmpDir := t.TempDir() + cfg := newTestLogConfig(t) + cfg.LogLevel = hclog.Info + cfg.LogFilePath = filepath.Join(tmpDir, "juan.log") + cfg.LogRotateBytes = 1 // set a tiny number of bytes to force rotation + var buf bytes.Buffer + + logger, err := Setup(cfg, &buf) + require.NoError(t, err) + require.NotNil(t, logger) + + logger.Info("juan?") + logger.Info("john?") + f, err := os.Stat(cfg.LogFilePath) + require.NoError(t, err) + require.NotNil(t, f) + m, err := filepath.Glob(tmpDir + "/juan-*") // look for juan-{timestamp}.log + require.NoError(t, err) + require.Truef(t, len(m) == 1, "no files were found") +} + +func TestLogger_SetupLoggerWithValidLogPath(t *testing.T) { + tmpDir := t.TempDir() + cfg := newTestLogConfig(t) + cfg.LogLevel = hclog.Info + cfg.LogFilePath = tmpDir + "/" // add the trailing slash to the temp dir + var buf bytes.Buffer + + logger, err := Setup(cfg, &buf) + require.NoError(t, err) + require.NotNil(t, logger) +} + +func TestLogger_SetupLoggerWithInValidLogPath(t *testing.T) { + cfg := newTestLogConfig(t) + cfg.LogLevel = hclog.Info + cfg.LogLevel = hclog.Info + cfg.LogFilePath = "nonexistentdir/" + var buf bytes.Buffer + + logger, err := Setup(cfg, &buf) + require.Error(t, err) + require.True(t, errors.Is(err, os.ErrNotExist)) + require.Nil(t, logger) +} + +func TestLogger_SetupLoggerWithInValidLogPathPermission(t *testing.T) { + tmpDir := "/tmp/" + t.Name() + + err := os.Mkdir(tmpDir, 0o000) + assert.NoError(t, err, "unexpected error testing with invalid log path permission") + defer os.RemoveAll(tmpDir) + + cfg := newTestLogConfig(t) + cfg.LogLevel = hclog.Info + cfg.LogFilePath = tmpDir + "/" + var buf bytes.Buffer + + logger, err := Setup(cfg, &buf) + require.Error(t, err) + require.True(t, errors.Is(err, os.ErrPermission)) + require.Nil(t, logger) +} + +func TestLogger_SetupLoggerWithInvalidLogFilePath(t *testing.T) { + cases := map[string]struct { + path string + message string + }{ + "file name *": { + path: "/this/isnt/ok/juan*.log", + message: "file name contains globbing character", + }, + "file name ?": { + path: "/this/isnt/ok/juan?.log", + message: "file name contains globbing character", + }, + "file name [": { + path: "/this/isnt/ok/[juan].log", + message: "file name contains globbing character", + }, + "directory path *": { + path: "/this/isnt/ok/*/qwerty.log", + message: "directory contains glob character", + }, + "directory path ?": { + path: "/this/isnt/ok/?/qwerty.log", + message: "directory contains glob character", + }, + "directory path [": { + path: "/this/isnt/ok/[foo]/qwerty.log", + message: "directory contains glob character", + }, + } + + for name, tc := range cases { + name := name + tc := tc + cfg := newTestLogConfig(t) + cfg.LogLevel = hclog.Info + cfg.LogFilePath = tc.path + + _, err := Setup(cfg, &bytes.Buffer{}) + assert.Error(t, err, "%s: expected error due to *", name) + assert.Contains(t, err.Error(), tc.message, "%s: error message does not match: %s", name, err.Error()) + } +} + +func TestLogger_ChangeLogLevels(t *testing.T) { + cfg := newTestLogConfig(t) + cfg.LogLevel = hclog.Debug + var buf bytes.Buffer + + logger, err := Setup(cfg, &buf) + require.NoError(t, err) + require.NotNil(t, logger) + + assert.Equal(t, hclog.Debug, logger.GetLevel()) + + // Create new named loggers from the base logger and change the levels + logger2 := logger.Named("test2") + logger3 := logger.Named("test3") + + logger2.SetLevel(hclog.Info) + logger3.SetLevel(hclog.Error) + + assert.Equal(t, hclog.Debug, logger.GetLevel()) + assert.Equal(t, hclog.Info, logger2.GetLevel()) + assert.Equal(t, hclog.Error, logger3.GetLevel()) +} + +func newTestLogConfig(t *testing.T) *LogConfig { + t.Helper() + + cfg, err := NewLogConfig("test") + require.NoError(t, err) + cfg.Name = "test-system" + + return cfg +} diff --git a/helper/metricsutil/bucket.go b/helper/metricsutil/bucket.go new file mode 100644 index 0000000..0f602e2 --- /dev/null +++ b/helper/metricsutil/bucket.go @@ -0,0 +1,60 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package metricsutil + +import ( + "sort" + "time" +) + +var bucketBoundaries = []struct { + Value time.Duration + Label string +}{ + {1 * time.Minute, "1m"}, + {10 * time.Minute, "10m"}, + {20 * time.Minute, "20m"}, + {1 * time.Hour, "1h"}, + {2 * time.Hour, "2h"}, + {24 * time.Hour, "1d"}, + {2 * 24 * time.Hour, "2d"}, + {7 * 24 * time.Hour, "7d"}, + {30 * 24 * time.Hour, "30d"}, +} + +const OverflowBucket = "+Inf" + +// TTLBucket computes the label to apply for a token TTL. +func TTLBucket(ttl time.Duration) string { + upperBound := sort.Search( + len(bucketBoundaries), + func(i int) bool { + return ttl <= bucketBoundaries[i].Value + }, + ) + if upperBound >= len(bucketBoundaries) { + return OverflowBucket + } else { + return bucketBoundaries[upperBound].Label + } +} + +func ExpiryBucket(expiryTime time.Time, leaseEpsilon time.Duration, rollingWindow time.Time, labelNS string, useNS bool) *LeaseExpiryLabel { + if !useNS { + labelNS = "" + } + leaseExpiryLabel := LeaseExpiryLabel{LabelNS: labelNS} + + // calculate rolling window + if expiryTime.Before(rollingWindow) { + leaseExpiryLabel.LabelName = expiryTime.Round(leaseEpsilon).String() + return &leaseExpiryLabel + } + return nil +} + +type LeaseExpiryLabel = struct { + LabelName string + LabelNS string +} diff --git a/helper/metricsutil/bucket_test.go b/helper/metricsutil/bucket_test.go new file mode 100644 index 0000000..1179e4d --- /dev/null +++ b/helper/metricsutil/bucket_test.go @@ -0,0 +1,31 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package metricsutil + +import ( + "testing" + "time" +) + +func TestTTLBucket_Lookup(t *testing.T) { + testCases := []struct { + Input time.Duration + Expected string + }{ + {30 * time.Second, "1m"}, + {0 * time.Second, "1m"}, + {2 * time.Hour, "2h"}, + {2*time.Hour - time.Second, "2h"}, + {2*time.Hour + time.Second, "1d"}, + {30 * 24 * time.Hour, "30d"}, + {31 * 24 * time.Hour, "+Inf"}, + } + + for _, tc := range testCases { + bucket := TTLBucket(tc.Input) + if bucket != tc.Expected { + t.Errorf("Expected %q, got %q for duration %v.", tc.Expected, bucket, tc.Input) + } + } +} diff --git a/helper/metricsutil/gauge_process.go b/helper/metricsutil/gauge_process.go new file mode 100644 index 0000000..c6fcd56 --- /dev/null +++ b/helper/metricsutil/gauge_process.go @@ -0,0 +1,283 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package metricsutil + +import ( + "context" + "math/rand" + "sort" + "time" + + "github.com/armon/go-metrics" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/timeutil" +) + +// GaugeLabelValues is one gauge in a set sharing a single key, that +// are measured in a batch. +type GaugeLabelValues struct { + Labels []Label + Value float32 +} + +// GaugeCollector is a callback function that returns an unfiltered +// set of label-value pairs. It may be cancelled if it takes too long. +type GaugeCollector = func(context.Context) ([]GaugeLabelValues, error) + +// collectionBound is a hard limit on how long a collection process +// may take, as a fraction of the current interval. +const collectionBound = 0.02 + +// collectionTarget is a soft limit; if exceeded, the collection interval +// will be doubled. +const collectionTarget = 0.01 + +// A GaugeCollectionProcess is responsible for one particular gauge metric. +// It handles a delay on initial startup; limiting the cardinality; and +// exponential backoff on the requested interval. +type GaugeCollectionProcess struct { + stop chan struct{} + stopped chan struct{} + + // gauge name + key []string + // labels to use when reporting + labels []Label + + // callback function + collector GaugeCollector + + // destination for metrics + sink Metrics + logger log.Logger + + // time between collections + originalInterval time.Duration + currentInterval time.Duration + ticker *time.Ticker + + // used to help limit cardinality + maxGaugeCardinality int + + // time source + clock timeutil.Clock +} + +// NewGaugeCollectionProcess creates a new collection process for the callback +// function given as an argument, and starts it running. +// A label should be provided for metrics *about* this collection process. +// +// The Run() method must be called to start the process. +func NewGaugeCollectionProcess( + key []string, + id []Label, + collector GaugeCollector, + m metrics.MetricSink, + gaugeInterval time.Duration, + maxGaugeCardinality int, + logger log.Logger, +) (*GaugeCollectionProcess, error) { + return newGaugeCollectionProcessWithClock( + key, + id, + collector, + SinkWrapper{MetricSink: m}, + gaugeInterval, + maxGaugeCardinality, + logger, + timeutil.DefaultClock{}, + ) +} + +// NewGaugeCollectionProcess creates a new collection process for the callback +// function given as an argument, and starts it running. +// A label should be provided for metrics *about* this collection process. +// +// The Run() method must be called to start the process. +func (m *ClusterMetricSink) NewGaugeCollectionProcess( + key []string, + id []Label, + collector GaugeCollector, + logger log.Logger, +) (*GaugeCollectionProcess, error) { + return newGaugeCollectionProcessWithClock( + key, + id, + collector, + m, + m.GaugeInterval, + m.MaxGaugeCardinality, + logger, + timeutil.DefaultClock{}, + ) +} + +// test version allows an alternative clock implementation +func newGaugeCollectionProcessWithClock( + key []string, + id []Label, + collector GaugeCollector, + sink Metrics, + gaugeInterval time.Duration, + maxGaugeCardinality int, + logger log.Logger, + clock timeutil.Clock, +) (*GaugeCollectionProcess, error) { + process := &GaugeCollectionProcess{ + stop: make(chan struct{}, 1), + stopped: make(chan struct{}, 1), + key: key, + labels: id, + collector: collector, + sink: sink, + originalInterval: gaugeInterval, + currentInterval: gaugeInterval, + maxGaugeCardinality: maxGaugeCardinality, + logger: logger, + clock: clock, + } + return process, nil +} + +// delayStart randomly delays by up to one extra interval +// so that collection processes do not all run at the time. +// If we knew all the processes in advance, we could just schedule them +// evenly, but a new one could be added per secret engine. +func (p *GaugeCollectionProcess) delayStart() bool { + randomDelay := time.Duration(rand.Int63n(int64(p.currentInterval))) + // A Timer might be better, but then we'd have to simulate + // one of those too? + delayTick := p.clock.NewTicker(randomDelay) + defer delayTick.Stop() + + select { + case <-p.stop: + return true + case <-delayTick.C: + break + } + return false +} + +// resetTicker stops the old ticker and starts a new one at the current +// interval setting. +func (p *GaugeCollectionProcess) resetTicker() { + if p.ticker != nil { + p.ticker.Stop() + } + p.ticker = p.clock.NewTicker(p.currentInterval) +} + +// collectAndFilterGauges executes the callback function, +// limits the cardinality, and streams the results to the metrics sink. +func (p *GaugeCollectionProcess) collectAndFilterGauges() { + // Run for only an allotted amount of time. + timeout := time.Duration(collectionBound * float64(p.currentInterval)) + ctx, cancel := context.WithTimeout(context.Background(), + timeout) + defer cancel() + + p.sink.AddDurationWithLabels([]string{"metrics", "collection", "interval"}, + p.currentInterval, + p.labels) + + start := p.clock.Now() + values, err := p.collector(ctx) + end := p.clock.Now() + duration := end.Sub(start) + + // Report how long it took to perform the operation. + p.sink.AddDurationWithLabels([]string{"metrics", "collection"}, + duration, + p.labels) + + // If over threshold, back off by doubling the measurement interval. + // Currently a restart is the only way to bring it back down. + threshold := time.Duration(collectionTarget * float64(p.currentInterval)) + if duration > threshold { + p.logger.Warn("gauge collection time exceeded target", "target", threshold, "actual", duration, "id", p.labels) + p.currentInterval *= 2 + p.resetTicker() + } + + if err != nil { + p.logger.Error("error collecting gauge", "id", p.labels, "error", err) + p.sink.IncrCounterWithLabels([]string{"metrics", "collection", "error"}, + 1, + p.labels) + return + } + + // Filter to top N. + // This does not guarantee total cardinality is <= N, but it does slow things down + // a little if the cardinality *is* too high and the gauge needs to be disabled. + if len(values) > p.maxGaugeCardinality { + sort.Slice(values, func(a, b int) bool { + return values[a].Value > values[b].Value + }) + values = values[:p.maxGaugeCardinality] + } + + p.streamGaugesToSink(values) +} + +// batchSize is the number of metrics to be sent per tick duration. +const batchSize = 25 + +func (p *GaugeCollectionProcess) streamGaugesToSink(values []GaugeLabelValues) { + // Dumping 500 metrics in one big chunk is somewhat unfriendly to UDP-based + // transport, and to the rest of the metrics trying to get through. + // Let's smooth things out over the course of a second. + // 1 second / 500 = 2 ms each, so we can send 25 (batchSize) per 50 milliseconds. + // That should be one or two packets. + sendTick := p.clock.NewTicker(50 * time.Millisecond) + defer sendTick.Stop() + + for i, lv := range values { + if i > 0 && i%batchSize == 0 { + select { + case <-p.stop: + // because the channel is closed, + // the main loop will successfully + // read from p.stop too, and exit. + return + case <-sendTick.C: + break + } + } + p.sink.SetGaugeWithLabels(p.key, lv.Value, lv.Labels) + } +} + +// Run should be called as a goroutine. +func (p *GaugeCollectionProcess) Run() { + defer close(p.stopped) + + // Wait a random amount of time + stopReceived := p.delayStart() + if stopReceived { + return + } + + // Create a ticker to start each cycle + p.resetTicker() + + // Loop until we get a signal to stop + for { + select { + case <-p.ticker.C: + p.collectAndFilterGauges() + case <-p.stop: + // Can't use defer because this might + // not be the original ticker. + p.ticker.Stop() + return + } + } +} + +// Stop the collection process +func (p *GaugeCollectionProcess) Stop() { + close(p.stop) +} diff --git a/helper/metricsutil/gauge_process_test.go b/helper/metricsutil/gauge_process_test.go new file mode 100644 index 0000000..efd74e7 --- /dev/null +++ b/helper/metricsutil/gauge_process_test.go @@ -0,0 +1,579 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package metricsutil + +import ( + "context" + "errors" + "fmt" + "math/rand" + "reflect" + "sync/atomic" + "testing" + "time" + + "github.com/armon/go-metrics" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/timeutil" +) + +// SimulatedTime maintains a virtual clock so the test isn't +// dependent upon real time. +// Unfortunately there is no way to run these tests in parallel +// since they rely on the same global timeNow function. +type SimulatedTime struct { + now time.Time + tickerBarrier chan *SimulatedTicker + timeutil.DefaultClock +} + +var _ timeutil.Clock = &SimulatedTime{} + +type SimulatedTicker struct { + ticker *time.Ticker + duration time.Duration + sender chan time.Time +} + +func (s *SimulatedTime) Now() time.Time { + return s.now +} + +func (s *SimulatedTime) NewTicker(d time.Duration) *time.Ticker { + // Create a real ticker, but set its duration to an amount that will never fire for real. + // We'll inject times into the channel directly. + replacementChannel := make(chan time.Time) + t := time.NewTicker(1000 * time.Hour) + t.C = replacementChannel + s.tickerBarrier <- &SimulatedTicker{t, d, replacementChannel} + return t +} + +func (s *SimulatedTime) waitForTicker(t *testing.T) *SimulatedTicker { + t.Helper() + // System under test should create a ticker within 100ms, + // wait for it to show up or else fail the test. + timeout := time.After(100 * time.Millisecond) + select { + case <-timeout: + t.Fatal("Timeout waiting for ticker creation.") + return nil + case t := <-s.tickerBarrier: + return t + } +} + +func (s *SimulatedTime) allowTickers(n int) { + s.tickerBarrier = make(chan *SimulatedTicker, n) +} + +func startSimulatedTime() *SimulatedTime { + s := &SimulatedTime{ + now: time.Now(), + tickerBarrier: make(chan *SimulatedTicker, 1), + } + return s +} + +type SimulatedCollector struct { + numCalls uint32 + callBarrier chan uint32 +} + +func newSimulatedCollector() *SimulatedCollector { + return &SimulatedCollector{ + numCalls: 0, + callBarrier: make(chan uint32, 1), + } +} + +func (s *SimulatedCollector) waitForCall(t *testing.T) { + timeout := time.After(100 * time.Millisecond) + select { + case <-timeout: + t.Fatal("Timeout waiting for call to collection function.") + return + case <-s.callBarrier: + return + } +} + +func (s *SimulatedCollector) EmptyCollectionFunction(ctx context.Context) ([]GaugeLabelValues, error) { + atomic.AddUint32(&s.numCalls, 1) + s.callBarrier <- s.numCalls + return []GaugeLabelValues{}, nil +} + +func TestGauge_Creation(t *testing.T) { + c := newSimulatedCollector() + sink := BlackholeSink() + sink.GaugeInterval = 33 * time.Minute + + key := []string{"example", "count"} + labels := []Label{{"gauge", "test"}} + + p, err := sink.NewGaugeCollectionProcess( + key, + labels, + c.EmptyCollectionFunction, + log.Default(), + ) + if err != nil { + t.Fatalf("Error creating collection process: %v", err) + } + + if _, ok := p.clock.(timeutil.DefaultClock); !ok { + t.Error("Default clock not installed.") + } + + if !reflect.DeepEqual(p.key, key) { + t.Errorf("Key not initialized, got %v but expected %v", + p.key, key) + } + + if !reflect.DeepEqual(p.labels, labels) { + t.Errorf("Labels not initialized, got %v but expected %v", + p.key, key) + } + + if p.originalInterval != sink.GaugeInterval || p.currentInterval != sink.GaugeInterval { + t.Errorf("Intervals not initialized, got %v and %v, expected %v", + p.originalInterval, p.currentInterval, sink.GaugeInterval) + } +} + +func TestGauge_StartDelay(t *testing.T) { + // Work through an entire startup sequence, up to collecting + // the first batch of gauges. + s := startSimulatedTime() + c := newSimulatedCollector() + + sink := BlackholeSink() + sink.GaugeInterval = 2 * time.Hour + + p, err := newGaugeCollectionProcessWithClock( + []string{"example", "count"}, + []Label{{"gauge", "test"}}, + c.EmptyCollectionFunction, + sink, + sink.GaugeInterval, + sink.MaxGaugeCardinality, + log.Default(), + s, + ) + if err != nil { + t.Fatalf("Error creating collection process: %v", err) + } + go p.Run() + + delayTicker := s.waitForTicker(t) + if delayTicker.duration > sink.GaugeInterval { + t.Errorf("Delayed start %v is more than interval %v.", + delayTicker.duration, sink.GaugeInterval) + } + if c.numCalls > 0 { + t.Error("Collection function has been called") + } + + // Signal the end of delay, then another ticker should start + delayTicker.sender <- time.Now() + + intervalTicker := s.waitForTicker(t) + if intervalTicker.duration != sink.GaugeInterval { + t.Errorf("Ticker duration is %v, expected %v", + intervalTicker.duration, sink.GaugeInterval) + } + if c.numCalls > 0 { + t.Error("Collection function has been called") + } + + // Time's up, ensure the collection function is executed. + intervalTicker.sender <- time.Now() + c.waitForCall(t) + if c.numCalls != 1 { + t.Errorf("Collection function called %v times, expected %v.", c.numCalls, 1) + } + + p.Stop() +} + +func waitForStopped(t *testing.T, p *GaugeCollectionProcess) { + t.Helper() + timeout := time.After(100 * time.Millisecond) + select { + case <-timeout: + t.Fatal("Timeout waiting for process to stop.") + case <-p.stopped: + return + } +} + +func TestGauge_StoppedDuringInitialDelay(t *testing.T) { + // Stop the process before it gets into its main loop + s := startSimulatedTime() + c := newSimulatedCollector() + + sink := BlackholeSink() + sink.GaugeInterval = 2 * time.Hour + + p, err := newGaugeCollectionProcessWithClock( + []string{"example", "count"}, + []Label{{"gauge", "test"}}, + c.EmptyCollectionFunction, + sink, + sink.GaugeInterval, + sink.MaxGaugeCardinality, + log.Default(), + s, + ) + if err != nil { + t.Fatalf("Error creating collection process: %v", err) + } + go p.Run() + + // Stop during the initial delay, check that goroutine exits + s.waitForTicker(t) + p.Stop() + waitForStopped(t, p) +} + +func TestGauge_StoppedAfterInitialDelay(t *testing.T) { + // Stop the process during its main loop + s := startSimulatedTime() + c := newSimulatedCollector() + + sink := BlackholeSink() + sink.GaugeInterval = 2 * time.Hour + + p, err := newGaugeCollectionProcessWithClock( + []string{"example", "count"}, + []Label{{"gauge", "test"}}, + c.EmptyCollectionFunction, + sink, + sink.GaugeInterval, + sink.MaxGaugeCardinality, + log.Default(), + s, + ) + if err != nil { + t.Fatalf("Error creating collection process: %v", err) + } + go p.Run() + + // Get through initial delay, wait for interval ticker + delayTicker := s.waitForTicker(t) + delayTicker.sender <- time.Now() + + s.waitForTicker(t) + p.Stop() + waitForStopped(t, p) +} + +func TestGauge_Backoff(t *testing.T) { + s := startSimulatedTime() + s.allowTickers(100) + + c := newSimulatedCollector() + + sink := BlackholeSink() + sink.GaugeInterval = 2 * time.Hour + + threshold := sink.GaugeInterval / 100 + f := func(ctx context.Context) ([]GaugeLabelValues, error) { + atomic.AddUint32(&c.numCalls, 1) + // Move time forward by more than 1% of the gauge interval + s.now = s.now.Add(threshold).Add(time.Second) + c.callBarrier <- c.numCalls + return []GaugeLabelValues{}, nil + } + + p, err := newGaugeCollectionProcessWithClock( + []string{"example", "count"}, + []Label{{"gauge", "test"}}, + f, + sink, + sink.GaugeInterval, + sink.MaxGaugeCardinality, + log.Default(), + s, + ) + if err != nil { + t.Fatalf("Error creating collection process: %v", err) + } + // Do not run, we'll just going to call an internal function. + p.collectAndFilterGauges() + + if p.currentInterval != 2*p.originalInterval { + t.Errorf("Current interval is %v, should be 2x%v.", + p.currentInterval, + p.originalInterval) + } +} + +func TestGauge_RestartTimer(t *testing.T) { + s := startSimulatedTime() + c := newSimulatedCollector() + sink := BlackholeSink() + sink.GaugeInterval = 2 * time.Hour + + p, err := newGaugeCollectionProcessWithClock( + []string{"example", "count"}, + []Label{{"gauge", "test"}}, + c.EmptyCollectionFunction, + sink, + sink.GaugeInterval, + sink.MaxGaugeCardinality, + log.Default(), + s, + ) + if err != nil { + t.Fatalf("Error creating collection process: %v", err) + } + + p.resetTicker() + t1 := s.waitForTicker(t) + if t1.duration != p.currentInterval { + t.Fatalf("Bad ticker interval, got %v expected %v", + t1.duration, p.currentInterval) + } + + p.currentInterval = 4 * p.originalInterval + p.resetTicker() + t2 := s.waitForTicker(t) + if t2.duration != p.currentInterval { + t.Fatalf("Bad ticker interval, got %v expected %v", + t1.duration, p.currentInterval) + } +} + +func waitForDone(t *testing.T, + tick chan<- time.Time, + done <-chan struct{}, +) int { + t.Helper() + timeout := time.After(500 * time.Millisecond) + + numTicks := 0 + for { + select { + case <-timeout: + t.Fatal("Timeout waiting for metrics to be sent.") + case tick <- time.Now(): + numTicks += 1 + case <-done: + return numTicks + } + } +} + +func makeLabels(numLabels int) []GaugeLabelValues { + values := make([]GaugeLabelValues, numLabels) + for i := range values { + values[i].Labels = []Label{ + {"test", "true"}, + {"which", fmt.Sprintf("%v", i)}, + } + values[i].Value = float32(i + 1) + } + return values +} + +func TestGauge_InterruptedStreaming(t *testing.T) { + s := startSimulatedTime() + // Long bucket time == low chance of crossing interval + inmemSink := metrics.NewInmemSink( + 1000000*time.Hour, + 2000000*time.Hour) + + sink := NewClusterMetricSink("test", inmemSink) + sink.MaxGaugeCardinality = 500 + sink.GaugeInterval = 2 * time.Hour + + p, err := newGaugeCollectionProcessWithClock( + []string{"example", "count"}, + []Label{{"gauge", "test"}}, + nil, // shouldn't be called + sink, + sink.GaugeInterval, + sink.MaxGaugeCardinality, + log.Default(), + s, + ) + if err != nil { + t.Fatalf("Error creating collection process: %v", err) + } + + // We'll queue up at least two batches; only one will be sent + // unless we give a ticker. + values := makeLabels(75) + done := make(chan struct{}) + go func() { + p.streamGaugesToSink(values) + close(done) + }() + + p.Stop() + // a nil channel is never writeable + waitForDone(t, nil, done) + + // If we start close to the end of an interval, metrics will + // be split across two buckets. + intervals := inmemSink.Data() + if len(intervals) > 1 { + t.Skip("Detected interval crossing.") + } + + if len(intervals[0].Gauges) == len(values) { + t.Errorf("Found %v gauges, expected fewer.", + len(intervals[0].Gauges)) + } +} + +// helper function to create a closure that's a GaugeCollector. +func (c *SimulatedCollector) makeFunctionForValues( + values []GaugeLabelValues, + s *SimulatedTime, + advanceTime time.Duration, +) GaugeCollector { + // A function that returns a static list + return func(ctx context.Context) ([]GaugeLabelValues, error) { + atomic.AddUint32(&c.numCalls, 1) + // TODO: this seems like a data race? + s.now = s.now.Add(advanceTime) + c.callBarrier <- c.numCalls + return values, nil + } +} + +func TestGauge_MaximumMeasurements(t *testing.T) { + s := startSimulatedTime() + c := newSimulatedCollector() + + // Long bucket time == low chance of crossing interval + inmemSink := metrics.NewInmemSink( + 1000000*time.Hour, + 2000000*time.Hour) + + sink := NewClusterMetricSink("test", inmemSink) + sink.MaxGaugeCardinality = 100 + sink.GaugeInterval = 2 * time.Hour + + // Create a report larger than the default limit + excessGauges := 20 + values := makeLabels(sink.MaxGaugeCardinality + excessGauges) + rand.Shuffle(len(values), func(i, j int) { + values[i], values[j] = values[j], values[i] + }) + + // Advance time by 0.5% of duration + advance := time.Duration(int(0.005 * float32(sink.GaugeInterval))) + p, err := newGaugeCollectionProcessWithClock( + []string{"example", "count"}, + []Label{{"gauge", "test"}}, + c.makeFunctionForValues(values, s, advance), + sink, + sink.GaugeInterval, + sink.MaxGaugeCardinality, + log.Default(), + s, + ) + if err != nil { + t.Fatalf("Error creating collection process: %v", err) + } + + // This needs a ticker in order to do its thing, + // so run it in the background and we'll send the ticks + // from here. + done := make(chan struct{}, 1) + go func() { + p.collectAndFilterGauges() + close(done) + }() + + sendTicker := s.waitForTicker(t) + numTicksSent := waitForDone(t, sendTicker.sender, done) + + // 100 items, one delay after each batchSize (25), means that + // 3 ticks are consumed, so 3 or 4 must be sent. + expectedTicks := sink.MaxGaugeCardinality/batchSize - 1 + if numTicksSent < expectedTicks || numTicksSent > expectedTicks+1 { + t.Errorf("Number of ticks = %v, expected %v.", numTicksSent, expectedTicks) + } + + // If we start close to the end of an interval, metrics will + // be split across two buckets. + intervals := inmemSink.Data() + if len(intervals) > 1 { + t.Skip("Detected interval crossing.") + } + + if len(intervals[0].Gauges) != sink.MaxGaugeCardinality { + t.Errorf("Found %v gauges, expected %v.", + len(intervals[0].Gauges), + sink.MaxGaugeCardinality) + } + + minVal := float32(excessGauges) + for _, v := range intervals[0].Gauges { + if v.Value < minVal { + t.Errorf("Gauge %v with value %v should not have been included.", v.Labels, v.Value) + break + } + } +} + +func TestGauge_MeasurementError(t *testing.T) { + s := startSimulatedTime() + c := newSimulatedCollector() + inmemSink := metrics.NewInmemSink( + 1000000*time.Hour, + 2000000*time.Hour) + sink := NewClusterMetricSink("test", inmemSink) + sink.MaxGaugeCardinality = 500 + sink.GaugeInterval = 2 * time.Hour + + // Create a small report so we don't have to deal with batching. + numGauges := 10 + values := make([]GaugeLabelValues, numGauges) + for i := range values { + values[i].Labels = []Label{ + {"test", "true"}, + {"which", fmt.Sprintf("%v", i)}, + } + values[i].Value = float32(i + 1) + } + + f := func(ctx context.Context) ([]GaugeLabelValues, error) { + atomic.AddUint32(&c.numCalls, 1) + c.callBarrier <- c.numCalls + return values, errors.New("test error") + } + + p, err := newGaugeCollectionProcessWithClock( + []string{"example", "count"}, + []Label{{"gauge", "test"}}, + f, + sink, + sink.GaugeInterval, + sink.MaxGaugeCardinality, + log.Default(), + s, + ) + if err != nil { + t.Fatalf("Error creating collection process: %v", err) + } + + p.collectAndFilterGauges() + + // We should see no data in the sink + intervals := inmemSink.Data() + if len(intervals) > 1 { + t.Skip("Detected interval crossing.") + } + + if len(intervals[0].Gauges) != 0 { + t.Errorf("Found %v gauges, expected %v.", + len(intervals[0].Gauges), 0) + } +} diff --git a/helper/metricsutil/metricsutil.go b/helper/metricsutil/metricsutil.go new file mode 100644 index 0000000..cfc2e11 --- /dev/null +++ b/helper/metricsutil/metricsutil.go @@ -0,0 +1,176 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package metricsutil + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "strings" + "sync" + + "github.com/armon/go-metrics" + "github.com/hashicorp/vault/sdk/logical" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/expfmt" +) + +const ( + OpenMetricsMIMEType = "application/openmetrics-text" + + PrometheusSchemaMIMEType = "prometheus/telemetry" + + // ErrorContentType is the content type returned by an error response. + ErrorContentType = "text/plain" +) + +const ( + PrometheusMetricFormat = "prometheus" +) + +// PhysicalTableSizeName is a set of gauge metric keys for physical mount table sizes +var PhysicalTableSizeName []string = []string{"core", "mount_table", "size"} + +// LogicalTableSizeName is a set of gauge metric keys for logical mount table sizes +var LogicalTableSizeName []string = []string{"core", "mount_table", "num_entries"} + +type MetricsHelper struct { + inMemSink *metrics.InmemSink + PrometheusEnabled bool + LoopMetrics GaugeMetrics +} + +type GaugeMetrics struct { + // Metrics is a map from keys concatenated by "." to the metric. + // It is a map because although we do not care about distinguishing + // these loop metrics during emission, we must distinguish them + // when we update a metric. + Metrics sync.Map +} + +type GaugeMetric struct { + Value float32 + Labels []Label + Key []string +} + +func NewMetricsHelper(inMem *metrics.InmemSink, enablePrometheus bool) *MetricsHelper { + return &MetricsHelper{inMem, enablePrometheus, GaugeMetrics{Metrics: sync.Map{}}} +} + +func FormatFromRequest(req *logical.Request) string { + acceptHeaders := req.Headers["Accept"] + if len(acceptHeaders) > 0 { + acceptHeader := acceptHeaders[0] + if strings.HasPrefix(acceptHeader, OpenMetricsMIMEType) { + return PrometheusMetricFormat + } + + // Look for prometheus accept header + for _, header := range acceptHeaders { + if strings.Contains(header, PrometheusSchemaMIMEType) { + return PrometheusMetricFormat + } + } + } + return "" +} + +func (m *MetricsHelper) AddGaugeLoopMetric(key []string, val float32, labels []Label) { + mapKey := m.CreateMetricsCacheKeyName(key, val, labels) + m.LoopMetrics.Metrics.Store(mapKey, + GaugeMetric{ + Key: key, + Value: val, + Labels: labels, + }) +} + +func (m *MetricsHelper) CreateMetricsCacheKeyName(key []string, val float32, labels []Label) string { + var keyJoin string = strings.Join(key, ".") + labelJoinStr := "" + for _, label := range labels { + labelJoinStr = labelJoinStr + label.Name + "|" + label.Value + "||" + } + keyJoin = keyJoin + "." + labelJoinStr + return keyJoin +} + +func (m *MetricsHelper) ResponseForFormat(format string) *logical.Response { + switch format { + case PrometheusMetricFormat: + return m.PrometheusResponse() + case "": + return m.GenericResponse() + default: + return &logical.Response{ + Data: map[string]interface{}{ + logical.HTTPContentType: ErrorContentType, + logical.HTTPRawBody: fmt.Sprintf("metric response format %q unknown", format), + logical.HTTPStatusCode: http.StatusBadRequest, + }, + } + } +} + +func (m *MetricsHelper) PrometheusResponse() *logical.Response { + resp := &logical.Response{ + Data: map[string]interface{}{ + logical.HTTPContentType: ErrorContentType, + logical.HTTPStatusCode: http.StatusBadRequest, + }, + } + + if !m.PrometheusEnabled { + resp.Data[logical.HTTPRawBody] = "prometheus is not enabled" + return resp + } + metricsFamilies, err := prometheus.DefaultGatherer.Gather() + if err != nil && len(metricsFamilies) == 0 { + resp.Data[logical.HTTPRawBody] = fmt.Sprintf("no prometheus metrics could be decoded: %s", err) + return resp + } + + // Initialize a byte buffer. + buf := &bytes.Buffer{} + defer buf.Reset() + + e := expfmt.NewEncoder(buf, expfmt.FmtText) + for _, mf := range metricsFamilies { + err := e.Encode(mf) + if err != nil { + resp.Data[logical.HTTPRawBody] = fmt.Sprintf("error during the encoding of metrics: %s", err) + return resp + } + } + resp.Data[logical.HTTPContentType] = string(expfmt.FmtText) + resp.Data[logical.HTTPRawBody] = buf.Bytes() + resp.Data[logical.HTTPStatusCode] = http.StatusOK + return resp +} + +func (m *MetricsHelper) GenericResponse() *logical.Response { + resp := &logical.Response{ + Data: map[string]interface{}{ + logical.HTTPContentType: ErrorContentType, + logical.HTTPStatusCode: http.StatusBadRequest, + }, + } + + summary, err := m.inMemSink.DisplayMetrics(nil, nil) + if err != nil { + resp.Data[logical.HTTPRawBody] = fmt.Sprintf("error while fetching the in-memory metrics: %s", err) + return resp + } + content, err := json.Marshal(summary) + if err != nil { + resp.Data[logical.HTTPRawBody] = fmt.Sprintf("error while marshalling the in-memory metrics: %s", err) + return resp + } + resp.Data[logical.HTTPContentType] = "application/json" + resp.Data[logical.HTTPRawBody] = content + resp.Data[logical.HTTPStatusCode] = http.StatusOK + return resp +} diff --git a/helper/metricsutil/metricsutil_test.go b/helper/metricsutil/metricsutil_test.go new file mode 100644 index 0000000..f8f17fe --- /dev/null +++ b/helper/metricsutil/metricsutil_test.go @@ -0,0 +1,49 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package metricsutil + +import ( + "testing" + + "github.com/hashicorp/vault/sdk/logical" +) + +func TestFormatFromRequest(t *testing.T) { + testCases := []struct { + original *logical.Request + expected string + }{ + { + original: &logical.Request{Headers: map[string][]string{ + "Accept": { + "application/vnd.google.protobuf", + "schema=\"prometheus/telemetry\"", + }, + }}, + expected: "prometheus", + }, + { + original: &logical.Request{Headers: map[string][]string{ + "Accept": { + "schema=\"prometheus\"", + }, + }}, + expected: "", + }, + { + original: &logical.Request{Headers: map[string][]string{ + "Accept": { + "application/openmetrics-text", + }, + }}, + expected: "prometheus", + }, + } + + for _, tCase := range testCases { + if metricsType := FormatFromRequest(tCase.original); metricsType != tCase.expected { + t.Fatalf("expected %s but got %s", tCase.expected, metricsType) + } + } +} diff --git a/helper/metricsutil/wrapped_metrics.go b/helper/metricsutil/wrapped_metrics.go new file mode 100644 index 0000000..e3df058 --- /dev/null +++ b/helper/metricsutil/wrapped_metrics.go @@ -0,0 +1,159 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package metricsutil + +import ( + "strings" + "sync/atomic" + "time" + + "github.com/armon/go-metrics" + "github.com/hashicorp/vault/helper/namespace" +) + +// ClusterMetricSink serves as a shim around go-metrics +// and inserts a "cluster" label. +// +// It also provides a mechanism to limit the cardinality of the labels on a gauge +// (at each reporting interval, which isn't sufficient if there is variability in which +// labels are the top N) and a backoff mechanism for gauge computation. +type ClusterMetricSink struct { + // ClusterName is either the cluster ID, or a name provided + // in the telemetry configuration stanza. + // + // Because it may be set after the Core is initialized, we need + // to protect against concurrent access. + ClusterName atomic.Value + + MaxGaugeCardinality int + GaugeInterval time.Duration + + // Sink is the go-metrics instance to send to. + Sink metrics.MetricSink + + // Constants that are helpful for metrics within the metrics sink + TelemetryConsts TelemetryConstConfig +} + +type TelemetryConstConfig struct { + LeaseMetricsEpsilon time.Duration + NumLeaseMetricsTimeBuckets int + LeaseMetricsNameSpaceLabels bool +} + +type Metrics interface { + SetGaugeWithLabels(key []string, val float32, labels []Label) + IncrCounterWithLabels(key []string, val float32, labels []Label) + AddSampleWithLabels(key []string, val float32, labels []Label) + AddDurationWithLabels(key []string, d time.Duration, labels []Label) + MeasureSinceWithLabels(key []string, start time.Time, labels []Label) +} + +var _ Metrics = &ClusterMetricSink{} + +// SinkWrapper implements `metricsutil.Metrics` using an instance of +// armon/go-metrics `MetricSink` as the underlying implementation. +type SinkWrapper struct { + metrics.MetricSink +} + +func (s SinkWrapper) AddDurationWithLabels(key []string, d time.Duration, labels []Label) { + val := float32(d) / float32(time.Millisecond) + s.MetricSink.AddSampleWithLabels(key, val, labels) +} + +func (s SinkWrapper) MeasureSinceWithLabels(key []string, start time.Time, labels []Label) { + elapsed := time.Now().Sub(start) + val := float32(elapsed) / float32(time.Millisecond) + s.MetricSink.AddSampleWithLabels(key, val, labels) +} + +var _ Metrics = SinkWrapper{} + +// Convenience alias +type Label = metrics.Label + +func (m *ClusterMetricSink) SetGauge(key []string, val float32) { + m.Sink.SetGaugeWithLabels(key, val, []Label{{"cluster", m.ClusterName.Load().(string)}}) +} + +func (m *ClusterMetricSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { + m.Sink.SetGaugeWithLabels(key, val, + append(labels, Label{"cluster", m.ClusterName.Load().(string)})) +} + +func (m *ClusterMetricSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { + m.Sink.IncrCounterWithLabels(key, val, + append(labels, Label{"cluster", m.ClusterName.Load().(string)})) +} + +func (m *ClusterMetricSink) AddSample(key []string, val float32) { + m.Sink.AddSampleWithLabels(key, val, []Label{{"cluster", m.ClusterName.Load().(string)}}) +} + +func (m *ClusterMetricSink) AddSampleWithLabels(key []string, val float32, labels []Label) { + m.Sink.AddSampleWithLabels(key, val, + append(labels, Label{"cluster", m.ClusterName.Load().(string)})) +} + +func (m *ClusterMetricSink) AddDurationWithLabels(key []string, d time.Duration, labels []Label) { + val := float32(d) / float32(time.Millisecond) + m.AddSampleWithLabels(key, val, labels) +} + +func (m *ClusterMetricSink) MeasureSinceWithLabels(key []string, start time.Time, labels []Label) { + elapsed := time.Now().Sub(start) + val := float32(elapsed) / float32(time.Millisecond) + m.AddSampleWithLabels(key, val, labels) +} + +// BlackholeSink is a default suitable for use in unit tests. +func BlackholeSink() *ClusterMetricSink { + conf := metrics.DefaultConfig("") + conf.EnableRuntimeMetrics = false + sink, _ := metrics.New(conf, &metrics.BlackholeSink{}) + cms := &ClusterMetricSink{ + ClusterName: atomic.Value{}, + Sink: sink, + } + cms.ClusterName.Store("") + return cms +} + +func NewClusterMetricSink(clusterName string, sink metrics.MetricSink) *ClusterMetricSink { + cms := &ClusterMetricSink{ + ClusterName: atomic.Value{}, + Sink: sink, + TelemetryConsts: TelemetryConstConfig{}, + } + cms.ClusterName.Store(clusterName) + return cms +} + +// SetDefaultClusterName changes the cluster name from its default value, +// if it has not previously been configured. +func (m *ClusterMetricSink) SetDefaultClusterName(clusterName string) { + // This is not a true compare-and-swap, but it should be + // consistent enough for normal uses + if m.ClusterName.Load().(string) == "" { + m.ClusterName.Store(clusterName) + } +} + +// NamespaceLabel creates a metrics label for the given +// Namespace: root is "root"; others are path with the +// final '/' removed. +func NamespaceLabel(ns *namespace.Namespace) metrics.Label { + switch { + case ns == nil: + return metrics.Label{"namespace", "root"} + case ns.ID == namespace.RootNamespaceID: + return metrics.Label{"namespace", "root"} + default: + return metrics.Label{ + "namespace", + strings.Trim(ns.Path, "/"), + } + } +} diff --git a/helper/metricsutil/wrapped_metrics_test.go b/helper/metricsutil/wrapped_metrics_test.go new file mode 100644 index 0000000..b65809b --- /dev/null +++ b/helper/metricsutil/wrapped_metrics_test.go @@ -0,0 +1,113 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package metricsutil + +import ( + "testing" + "time" + + "github.com/armon/go-metrics" +) + +func isLabelPresent(toFind Label, ls []Label) bool { + for _, l := range ls { + if l == toFind { + return true + } + } + return false +} + +// We can use a sink directly, or wrap the top-level +// go-metrics implementation for testing purposes. +func defaultMetrics(sink metrics.MetricSink) *metrics.Metrics { + // No service name + config := metrics.DefaultConfig("") + + // No host name + config.EnableHostname = false + m, _ := metrics.New(config, sink) + return m +} + +func TestClusterLabelPresent(t *testing.T) { + testClusterName := "test-cluster" + + // Use a ridiculously long time to minimize the chance + // that we have to deal with more than one interval. + // InMemSink rounds down to an interval boundary rather than + // starting one at the time of initialization. + inmemSink := metrics.NewInmemSink( + 1000000*time.Hour, + 2000000*time.Hour) + clusterSink := NewClusterMetricSink(testClusterName, defaultMetrics(inmemSink)) + + key1 := []string{"aaa", "bbb"} + key2 := []string{"ccc", "ddd"} + key3 := []string{"eee", "fff"} + labels1 := []Label{{"dim1", "val1"}} + labels2 := []Label{{"dim2", "val2"}} + labels3 := []Label{{"dim3", "val3"}} + clusterLabel := Label{"cluster", testClusterName} + expectedKey1 := "aaa.bbb;dim1=val1;cluster=" + testClusterName + expectedKey2 := "ccc.ddd;dim2=val2;cluster=" + testClusterName + expectedKey3 := "eee.fff;dim3=val3;cluster=" + testClusterName + + clusterSink.SetGaugeWithLabels(key1, 1.0, labels1) + clusterSink.IncrCounterWithLabels(key2, 2.0, labels2) + clusterSink.AddSampleWithLabels(key3, 3.0, labels3) + + intervals := inmemSink.Data() + // If we start very close to the end of an interval, then our metrics might be + // split across two different buckets. We won't write the code to try to handle that. + // 100000-hours = at most once every 4167 days + if len(intervals) > 1 { + t.Skip("Detected interval crossing.") + } + + // Check Gauge + g, ok := intervals[0].Gauges[expectedKey1] + if !ok { + t.Fatal("Key", expectedKey1, "not found in map", intervals[0].Gauges) + } + if g.Value != 1.0 { + t.Error("Gauge value", g.Value, "does not match", 1.0) + } + if !isLabelPresent(labels1[0], g.Labels) { + t.Error("Gauge label", g.Labels, "does not include", labels1) + } + if !isLabelPresent(clusterLabel, g.Labels) { + t.Error("Gauge label", g.Labels, "does not include", clusterLabel) + } + + // Check Counter + c, ok := intervals[0].Counters[expectedKey2] + if !ok { + t.Fatal("Key", expectedKey2, "not found in map", intervals[0].Counters) + } + if c.Sum != 2.0 { + t.Error("Counter value", c.Sum, "does not match", 2.0) + } + if !isLabelPresent(labels2[0], c.Labels) { + t.Error("Counter label", c.Labels, "does not include", labels2) + } + if !isLabelPresent(clusterLabel, c.Labels) { + t.Error("Counter label", c.Labels, "does not include", clusterLabel) + } + + // Check Sample + s, ok := intervals[0].Samples[expectedKey3] + if !ok { + t.Fatal("Key", expectedKey3, "not found in map", intervals[0].Samples) + } + if s.Sum != 3.0 { + t.Error("Sample value", s.Sum, "does not match", 3.0) + } + if !isLabelPresent(labels3[0], s.Labels) { + t.Error("Sample label", s.Labels, "does not include", labels3) + } + if !isLabelPresent(clusterLabel, s.Labels) { + t.Error("Sample label", s.Labels, "does not include", clusterLabel) + } +} diff --git a/helper/monitor/monitor.go b/helper/monitor/monitor.go new file mode 100644 index 0000000..28ecf0e --- /dev/null +++ b/helper/monitor/monitor.go @@ -0,0 +1,169 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package monitor + +import ( + "fmt" + "time" + + log "github.com/hashicorp/go-hclog" + "go.uber.org/atomic" +) + +// Monitor provides a mechanism to stream logs using go-hclog +// InterceptLogger and SinkAdapter. It allows streaming of logs +// at a different log level than what is set on the logger. +type Monitor interface { + // Start returns a channel of log messages which are sent + // every time a log message occurs + Start() <-chan []byte + + // Stop de-registers the sink from the InterceptLogger + // and closes the log channels + Stop() +} + +// monitor implements the Monitor interface. Note that this +// struct is not threadsafe. +type monitor struct { + sink log.SinkAdapter + + // logger is the logger we will be monitoring + logger log.InterceptLogger + + // logCh is a buffered chan where we send logs when streaming + logCh chan []byte + + // doneCh coordinates the shutdown of logCh + doneCh chan struct{} + + // droppedCount is the current count of messages + // that were dropped from the logCh buffer. + droppedCount *atomic.Uint32 + bufSize int + + // dropCheckInterval is the amount of time we should + // wait to check for dropped messages. Defaults + // to 3 seconds + dropCheckInterval time.Duration + + // started is whether the monitor has been started or not. + // This is to ensure that we don't start it again until + // it has been shut down. + started *atomic.Bool +} + +// NewMonitor creates a new Monitor. Start must be called in order to actually start +// streaming logs. buf is the buffer size of the channel that sends log messages. +func NewMonitor(buf int, logger log.InterceptLogger, opts *log.LoggerOptions) (Monitor, error) { + return newMonitor(buf, logger, opts) +} + +func newMonitor(buf int, logger log.InterceptLogger, opts *log.LoggerOptions) (*monitor, error) { + if buf <= 0 { + return nil, fmt.Errorf("buf must be greater than zero") + } + + sw := &monitor{ + logger: logger, + logCh: make(chan []byte, buf), + doneCh: make(chan struct{}), + bufSize: buf, + dropCheckInterval: 3 * time.Second, + droppedCount: atomic.NewUint32(0), + started: atomic.NewBool(false), + } + + opts.Output = sw + sink := log.NewSinkAdapter(opts) + sw.sink = sink + + return sw, nil +} + +// Stop deregisters the sink and stops the monitoring process +func (d *monitor) Stop() { + d.logger.DeregisterSink(d.sink) + close(d.doneCh) + d.started.Store(false) +} + +// Start registers a sink on the monitor's logger and starts sending +// received log messages over the returned channel. +func (d *monitor) Start() <-chan []byte { + // Check to see if this has already been started. If not, flag + // it and proceed. If so, bail out early. + if !d.started.CAS(false, true) { + return nil + } + + // register our sink with the logger + d.logger.RegisterSink(d.sink) + + streamCh := make(chan []byte, d.bufSize) + + // Run a go routine that listens for streamed + // log messages and sends them to streamCh. + // + // It also periodically checks for dropped + // messages and makes room on the logCh to add + // a dropped message count warning + go func() { + defer close(streamCh) + + ticker := time.NewTicker(d.dropCheckInterval) + defer ticker.Stop() + + var logMessage []byte + for { + logMessage = nil + + select { + case <-ticker.C: + // Check if there have been any dropped messages. + dc := d.droppedCount.Load() + + if dc > 0 { + logMessage = []byte(fmt.Sprintf("Monitor dropped %d logs during monitor request\n", dc)) + d.droppedCount.Swap(0) + } + case logMessage = <-d.logCh: + case <-d.doneCh: + return + } + + if len(logMessage) > 0 { + select { + case <-d.doneCh: + return + case streamCh <- logMessage: + } + } + } + }() + + return streamCh +} + +// Write attempts to send latest log to logCh +// it drops the log if channel is unavailable to receive +func (d *monitor) Write(p []byte) (n int, err error) { + // ensure logCh is still open + select { + case <-d.doneCh: + return + default: + } + + bytes := make([]byte, len(p)) + copy(bytes, p) + + select { + case d.logCh <- bytes: + default: + d.droppedCount.Add(1) + } + + return len(p), nil +} diff --git a/helper/monitor/monitor_test.go b/helper/monitor/monitor_test.go new file mode 100644 index 0000000..06e1e01 --- /dev/null +++ b/helper/monitor/monitor_test.go @@ -0,0 +1,140 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package monitor + +import ( + "encoding/json" + "fmt" + "strings" + "testing" + "time" + + log "github.com/hashicorp/go-hclog" + "github.com/stretchr/testify/require" +) + +func TestMonitor_Start(t *testing.T) { + t.Parallel() + + logger := log.NewInterceptLogger(&log.LoggerOptions{ + Level: log.Error, + }) + + m, _ := NewMonitor(512, logger, &log.LoggerOptions{ + Level: log.Debug, + }) + + logCh := m.Start() + defer m.Stop() + + go func() { + logger.Debug("test log") + time.Sleep(10 * time.Millisecond) + }() + + select { + case l := <-logCh: + require.Contains(t, string(l), "[DEBUG] test log") + return + case <-time.After(5 * time.Second): + t.Fatal("Expected to receive from log channel") + } +} + +func TestMonitor_JSONFormat(t *testing.T) { + t.Parallel() + + logger := log.NewInterceptLogger(&log.LoggerOptions{ + Level: log.Error, + }) + + m, _ := NewMonitor(512, logger, &log.LoggerOptions{ + Level: log.Debug, + JSONFormat: true, + }) + + type jsonlog struct { + Level string `json:"@level"` + Message string `json:"@message"` + TimeStamp string `json:"@timestamp"` + } + jsonLog := &jsonlog{} + + logCh := m.Start() + defer m.Stop() + + go func() { + logger.Debug("test json log") + time.Sleep(10 * time.Millisecond) + }() + + select { + case l := <-logCh: + err := json.Unmarshal(l, jsonLog) + if err != nil { + t.Fatal("Expected JSON log from channel") + } + require.Contains(t, jsonLog.Message, "test json log") + return + case <-time.After(5 * time.Second): + t.Fatal("Expected to receive from log channel") + } +} + +func TestMonitor_Start_Unbuffered(t *testing.T) { + t.Parallel() + + logger := log.NewInterceptLogger(&log.LoggerOptions{ + Level: log.Error, + }) + + _, err := NewMonitor(0, logger, &log.LoggerOptions{ + Level: log.Debug, + }) + + if err == nil { + t.Fatal("expected to get an error, but didn't") + } else { + if !strings.Contains(err.Error(), "greater than zero") { + t.Fatal("expected an error about buf being greater than zero") + } + } +} + +// Ensure number of dropped messages are logged +func TestMonitor_DroppedMessages(t *testing.T) { + t.Parallel() + + logger := log.NewInterceptLogger(&log.LoggerOptions{ + Level: log.Warn, + }) + + m, _ := newMonitor(5, logger, &log.LoggerOptions{ + Level: log.Debug, + }) + m.dropCheckInterval = 5 * time.Millisecond + + logCh := m.Start() + defer m.Stop() + + for i := 0; i <= 100; i++ { + logger.Debug(fmt.Sprintf("test message %d", i)) + } + + passed := make(chan struct{}) + go func() { + for recv := range logCh { + if strings.Contains(string(recv), "Monitor dropped") { + close(passed) + return + } + } + }() + + select { + case <-passed: + case <-time.After(2 * time.Second): + require.Fail(t, "expected to see warn dropped messages") + } +} diff --git a/helper/namespace/namespace.go b/helper/namespace/namespace.go new file mode 100644 index 0000000..04a5b79 --- /dev/null +++ b/helper/namespace/namespace.go @@ -0,0 +1,159 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package namespace + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/hashicorp/vault/sdk/helper/consts" +) + +type contextValues struct{} + +type Namespace struct { + ID string `json:"id" mapstructure:"id"` + Path string `json:"path" mapstructure:"path"` + CustomMetadata map[string]string `json:"custom_metadata" mapstructure:"custom_metadata"` +} + +func (n *Namespace) String() string { + return fmt.Sprintf("ID: %s. Path: %s", n.ID, n.Path) +} + +const ( + RootNamespaceID = "root" +) + +var ( + contextNamespace contextValues = struct{}{} + ErrNoNamespace error = errors.New("no namespace") + RootNamespace *Namespace = &Namespace{ + ID: RootNamespaceID, + Path: "", + CustomMetadata: make(map[string]string), + } +) + +func (n *Namespace) HasParent(possibleParent *Namespace) bool { + switch { + case possibleParent.Path == "": + return true + case n.Path == "": + return false + default: + return strings.HasPrefix(n.Path, possibleParent.Path) + } +} + +func (n *Namespace) TrimmedPath(path string) string { + return strings.TrimPrefix(path, n.Path) +} + +func ContextWithNamespace(ctx context.Context, ns *Namespace) context.Context { + return context.WithValue(ctx, contextNamespace, ns) +} + +func RootContext(ctx context.Context) context.Context { + if ctx == nil { + return ContextWithNamespace(context.Background(), RootNamespace) + } + return ContextWithNamespace(ctx, RootNamespace) +} + +// FromContext retrieves the namespace from a context, or an error +// if there is no namespace in the context. +func FromContext(ctx context.Context) (*Namespace, error) { + if ctx == nil { + return nil, errors.New("context was nil") + } + + nsRaw := ctx.Value(contextNamespace) + if nsRaw == nil { + return nil, ErrNoNamespace + } + + ns := nsRaw.(*Namespace) + if ns == nil { + return nil, ErrNoNamespace + } + + return ns, nil +} + +// Canonicalize trims any prefix '/' and adds a trailing '/' to the +// provided string +func Canonicalize(nsPath string) string { + if nsPath == "" { + return "" + } + + // Canonicalize the path to not have a '/' prefix + nsPath = strings.TrimPrefix(nsPath, "/") + + // Canonicalize the path to always having a '/' suffix + if !strings.HasSuffix(nsPath, "/") { + nsPath += "/" + } + + return nsPath +} + +func SplitIDFromString(input string) (string, string) { + prefix := "" + slashIdx := strings.LastIndex(input, "/") + + switch { + case strings.HasPrefix(input, consts.LegacyBatchTokenPrefix): + prefix = consts.LegacyBatchTokenPrefix + input = input[2:] + + case strings.HasPrefix(input, consts.LegacyServiceTokenPrefix): + prefix = consts.LegacyServiceTokenPrefix + input = input[2:] + case strings.HasPrefix(input, consts.BatchTokenPrefix): + prefix = consts.BatchTokenPrefix + input = input[4:] + case strings.HasPrefix(input, consts.ServiceTokenPrefix): + prefix = consts.ServiceTokenPrefix + input = input[4:] + + case slashIdx > 0: + // Leases will never have a b./s. to start + if slashIdx == len(input)-1 { + return input, "" + } + prefix = input[:slashIdx+1] + input = input[slashIdx+1:] + } + + idx := strings.LastIndex(input, ".") + if idx == -1 { + return prefix + input, "" + } + if idx == len(input)-1 { + return prefix + input, "" + } + + return prefix + input[:idx], input[idx+1:] +} + +// MountPathDetails contains the details of a mount's location, +// consisting of the namespace of the mount and the path of the +// mount within the namespace +type MountPathDetails struct { + Namespace *Namespace + MountPath string +} + +func (mpd *MountPathDetails) GetRelativePath(currNs *Namespace) string { + subNsPath := strings.TrimPrefix(mpd.Namespace.Path, currNs.Path) + return subNsPath + mpd.MountPath +} + +func (mpd *MountPathDetails) GetFullPath() string { + return mpd.Namespace.Path + mpd.MountPath +} diff --git a/helper/namespace/namespace_test.go b/helper/namespace/namespace_test.go new file mode 100644 index 0000000..fd4c4c2 --- /dev/null +++ b/helper/namespace/namespace_test.go @@ -0,0 +1,174 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package namespace + +import ( + "testing" +) + +func TestSplitIDFromString(t *testing.T) { + tcases := []struct { + input string + id string + prefix string + }{ + { + "foo", + "", + "foo", + }, + { + "foo.id", + "id", + "foo", + }, + { + "foo.foo.id", + "id", + "foo.foo", + }, + { + "foo.foo/foo.id", + "id", + "foo.foo/foo", + }, + { + "foo.foo/.id", + "id", + "foo.foo/", + }, + { + "foo.foo/foo", + "", + "foo.foo/foo", + }, + { + "foo.foo/f", + "", + "foo.foo/f", + }, + { + "foo.foo/", + "", + "foo.foo/", + }, + { + "b.foo", + "", + "b.foo", + }, + { + "s.foo", + "", + "s.foo", + }, + { + "t.foo", + "foo", + "t", + }, + } + + for _, c := range tcases { + pre, id := SplitIDFromString(c.input) + if pre != c.prefix || id != c.id { + t.Fatalf("bad test case: %s != %s, %s != %s", pre, c.prefix, id, c.id) + } + } +} + +func TestHasParent(t *testing.T) { + // Create ns1 + ns1 := &Namespace{ + ID: "id1", + Path: "ns1/", + } + + // Create ns1/ns2 + ns2 := &Namespace{ + ID: "id2", + Path: "ns1/ns2/", + } + + // Create ns1/ns2/ns3 + ns3 := &Namespace{ + ID: "id3", + Path: "ns1/ns2/ns3/", + } + + // Create ns4 + ns4 := &Namespace{ + ID: "id4", + Path: "ns4/", + } + + // Create ns4/ns5 + ns5 := &Namespace{ + ID: "id5", + Path: "ns4/ns5/", + } + + tests := []struct { + name string + parent *Namespace + ns *Namespace + expected bool + }{ + { + "is root an ancestor of ns1", + RootNamespace, + ns1, + true, + }, + { + "is ns1 an ancestor of ns2", + ns1, + ns2, + true, + }, + { + "is ns2 an ancestor of ns3", + ns2, + ns3, + true, + }, + { + "is ns1 an ancestor of ns3", + ns1, + ns3, + true, + }, + { + "is root an ancestor of ns3", + RootNamespace, + ns3, + true, + }, + { + "is ns4 an ancestor of ns3", + ns4, + ns3, + false, + }, + { + "is ns5 an ancestor of ns3", + ns5, + ns3, + false, + }, + { + "is ns1 an ancestor of ns5", + ns1, + ns5, + false, + }, + } + + for _, test := range tests { + actual := test.ns.HasParent(test.parent) + if actual != test.expected { + t.Fatalf("bad ancestor calculation; name: %q, actual: %t, expected: %t", test.name, actual, test.expected) + } + } +} diff --git a/helper/osutil/fileinfo.go b/helper/osutil/fileinfo.go new file mode 100644 index 0000000..f14db6b --- /dev/null +++ b/helper/osutil/fileinfo.go @@ -0,0 +1,83 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package osutil + +import ( + "fmt" + "io/fs" + "os" +) + +func IsWriteGroup(mode os.FileMode) bool { + return mode&0o20 != 0 +} + +func IsWriteOther(mode os.FileMode) bool { + return mode&0o02 != 0 +} + +func checkPathInfo(info fs.FileInfo, path string, uid int, permissions int) error { + err := FileUidMatch(info, path, uid) + if err != nil { + return err + } + err = FilePermissionsMatch(info, path, permissions) + if err != nil { + return err + } + return nil +} + +func FilePermissionsMatch(info fs.FileInfo, path string, permissions int) error { + if permissions != 0 && int(info.Mode().Perm()) != permissions { + return fmt.Errorf("path %q does not have permissions %o", path, permissions) + } + if permissions == 0 && (IsWriteOther(info.Mode()) || IsWriteGroup(info.Mode())) { + return fmt.Errorf("path %q has insecure permissions %o. Vault expects no write permissions for group or others", path, info.Mode().Perm()) + } + + return nil +} + +// OwnerPermissionsMatch checks if vault user is the owner and permissions are secure for input path +func OwnerPermissionsMatch(path string, uid int, permissions int) error { + if path == "" { + return fmt.Errorf("could not verify permissions for path. No path provided ") + } + + info, err := os.Stat(path) + if err != nil { + return fmt.Errorf("error stating %q: %w", path, err) + } + if info.Mode()&os.ModeSymlink != 0 { + symLinkInfo, err := os.Lstat(path) + if err != nil { + return fmt.Errorf("error stating %q: %w", path, err) + } + err = checkPathInfo(symLinkInfo, path, uid, permissions) + if err != nil { + return err + } + } + err = checkPathInfo(info, path, uid, permissions) + if err != nil { + return err + } + + return nil +} + +// OwnerPermissionsMatchFile checks if vault user is the owner and permissions are secure for the input file +func OwnerPermissionsMatchFile(file *os.File, uid int, permissions int) error { + info, err := file.Stat() + if err != nil { + return fmt.Errorf("file stat error on path %q: %w", file.Name(), err) + } + err = checkPathInfo(info, file.Name(), uid, permissions) + if err != nil { + return err + } + + return nil +} diff --git a/helper/osutil/fileinfo_test.go b/helper/osutil/fileinfo_test.go new file mode 100644 index 0000000..8c3316b --- /dev/null +++ b/helper/osutil/fileinfo_test.go @@ -0,0 +1,183 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package osutil + +import ( + "io/fs" + "os" + "os/user" + "path/filepath" + "runtime" + "strconv" + "testing" +) + +func TestCheckPathInfo(t *testing.T) { + currentUser, err := user.Current() + if err != nil { + t.Errorf("failed to get details of current process owner. The error is: %v", err) + } + uid, err := strconv.ParseInt(currentUser.Uid, 0, 64) + if err != nil { + t.Errorf("failed to convert uid to int64. The error is: %v", err) + } + uid2, err := strconv.ParseInt(currentUser.Uid+"1", 0, 64) + if err != nil { + t.Errorf("failed to convert uid to int64. The error is: %v", err) + } + + testCases := []struct { + uid int + filepermissions fs.FileMode + permissions int + expectError bool + }{ + { + uid: 0, + filepermissions: 0o700, + permissions: 0, + expectError: false, + }, + { + uid: int(uid2), + filepermissions: 0o700, + permissions: 0, + expectError: true, + }, + { + uid: int(uid), + filepermissions: 0o700, + permissions: 0, + expectError: false, + }, + { + uid: 0, + filepermissions: 0o777, + permissions: 744, + expectError: true, + }, + } + + for _, tc := range testCases { + err := os.Mkdir("testFile", tc.filepermissions) + if err != nil { + t.Fatal(err) + } + info, err := os.Stat("testFile") + if err != nil { + t.Errorf("error stating %q: %v", "testFile", err) + } + if tc.uid != 0 && runtime.GOOS == "windows" && tc.expectError == true { + t.Skip("Skipping test in windows environment as no error will be returned in this case") + } + + err = checkPathInfo(info, "testFile", tc.uid, int(tc.permissions)) + if tc.expectError && err == nil { + t.Errorf("invalid result. expected error") + } + if !tc.expectError && err != nil { + t.Errorf(err.Error()) + } + + err = os.RemoveAll("testFile") + if err != nil { + t.Fatal(err) + } + } +} + +// TestOwnerPermissionsMatchFile creates a file and verifies that the current user of the process is the owner of the +// file +func TestOwnerPermissionsMatchFile(t *testing.T) { + currentUser, err := user.Current() + if err != nil { + t.Fatal("failed to get current user", err) + } + uid, err := strconv.ParseInt(currentUser.Uid, 0, 64) + if err != nil { + t.Fatal("failed to convert uid", err) + } + dir := t.TempDir() + path := filepath.Join(dir, "foo") + f, err := os.Create(path) + if err != nil { + t.Fatal("failed to create test file", err) + } + defer f.Close() + + info, err := os.Stat(path) + if err != nil { + t.Fatal("failed to stat test file", err) + } + + if err := OwnerPermissionsMatchFile(f, int(uid), int(info.Mode())); err != nil { + t.Fatalf("expected no error but got %v", err) + } +} + +// TestOwnerPermissionsMatchFile_OtherUser creates a file using the user that started the current process and verifies +// that a different user is not the owner of the file +func TestOwnerPermissionsMatchFile_OtherUser(t *testing.T) { + currentUser, err := user.Current() + if err != nil { + t.Fatal("failed to get current user", err) + } + uid, err := strconv.ParseInt(currentUser.Uid, 0, 64) + if err != nil { + t.Fatal("failed to convert uid", err) + } + dir := t.TempDir() + path := filepath.Join(dir, "foo") + f, err := os.Create(path) + if err != nil { + t.Fatal("failed to create test file", err) + } + defer f.Close() + + info, err := os.Stat(path) + if err != nil { + t.Fatal("failed to stat test file", err) + } + + if err := OwnerPermissionsMatchFile(f, int(uid)+1, int(info.Mode())); err == nil { + t.Fatalf("expected error but none") + } +} + +// TestOwnerPermissionsMatchFile_Symlink creates a file and a symlink to that file. The test verifies that the current +// user of the process is the owner of the file +func TestOwnerPermissionsMatchFile_Symlink(t *testing.T) { + currentUser, err := user.Current() + if err != nil { + t.Fatal("failed to get current user", err) + } + uid, err := strconv.ParseInt(currentUser.Uid, 0, 64) + if err != nil { + t.Fatal("failed to convert uid", err) + } + dir := t.TempDir() + path := filepath.Join(dir, "foo") + f, err := os.Create(path) + if err != nil { + t.Fatal("failed to create test file", err) + } + defer f.Close() + + symlink := filepath.Join(dir, "symlink") + err = os.Symlink(path, symlink) + if err != nil { + t.Fatal("failed to symlink file", err) + } + symlinkedFile, err := os.Open(symlink) + if err != nil { + t.Fatal("failed to open file", err) + } + info, err := os.Stat(symlink) + if err != nil { + t.Fatal("failed to stat test file", err) + } + if err := OwnerPermissionsMatchFile(symlinkedFile, int(uid), int(info.Mode())); err != nil { + t.Fatalf("expected no error but got %v", err) + } +} diff --git a/helper/osutil/fileinfo_unix.go b/helper/osutil/fileinfo_unix.go new file mode 100644 index 0000000..bb60c49 --- /dev/null +++ b/helper/osutil/fileinfo_unix.go @@ -0,0 +1,61 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !windows + +package osutil + +import ( + "fmt" + "io/fs" + "os/user" + "strconv" + "syscall" +) + +func FileUIDEqual(info fs.FileInfo, uid int) bool { + if stat, ok := info.Sys().(*syscall.Stat_t); ok { + path_uid := int(stat.Uid) + if path_uid == uid { + return true + } + } + return false +} + +func FileGIDEqual(info fs.FileInfo, gid int) bool { + if stat, ok := info.Sys().(*syscall.Stat_t); ok { + path_gid := int(stat.Gid) + if path_gid == gid { + return true + } + } + return false +} + +func FileUidMatch(info fs.FileInfo, path string, uid int) (err error) { + currentUser, err := user.Current() + if err != nil { + return fmt.Errorf("failed to get details of current process owner. The error is: %w", err) + } + switch uid { + case 0: + currentUserUid, err := strconv.Atoi(currentUser.Uid) + if err != nil { + return fmt.Errorf("failed to convert uid %q to int. The error is: %w", currentUser.Uid, err) + } + if !FileUIDEqual(info, currentUserUid) { + return fmt.Errorf("path %q is not owned by my uid %s", path, currentUser.Uid) + } + default: + if !FileUIDEqual(info, uid) { + return fmt.Errorf("path %q is not owned by uid %d", path, uid) + } + } + return err +} + +// Sets new umask and returns old umask +func Umask(newmask int) int { + return syscall.Umask(newmask) +} diff --git a/helper/osutil/fileinfo_unix_test.go b/helper/osutil/fileinfo_unix_test.go new file mode 100644 index 0000000..302bd9e --- /dev/null +++ b/helper/osutil/fileinfo_unix_test.go @@ -0,0 +1,103 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !windows + +package osutil + +import ( + "os" + "os/user" + "strconv" + "testing" +) + +func TestFileUIDEqual(t *testing.T) { + currentUser, err := user.Current() + if err != nil { + t.Errorf("failed to get details of current process owner. The error is: %v", err) + } + uid, err := strconv.Atoi(currentUser.Uid) + if err != nil { + t.Errorf("failed to convert uid to int. The error is: %v", err) + } + + testCases := []struct { + uid int + expected bool + }{ + { + uid: uid, + expected: true, + }, + { + uid: uid + 1, + expected: false, + }, + } + + for _, tc := range testCases { + err := os.Mkdir("testFile", 0o777) + if err != nil { + t.Fatal(err) + } + info, err := os.Stat("testFile") + if err != nil { + t.Errorf("error stating %q: %v", "testFile", err) + } + + result := FileUIDEqual(info, tc.uid) + if result != tc.expected { + t.Errorf("invalid result. expected %t for uid %v", tc.expected, tc.uid) + } + err = os.RemoveAll("testFile") + if err != nil { + t.Fatal(err) + } + } +} + +func TestFileGIDEqual(t *testing.T) { + currentUser, err := user.Current() + if err != nil { + t.Errorf("failed to get details of current process owner. The error is: %v", err) + } + gid, err := strconv.Atoi(currentUser.Gid) + if err != nil { + t.Errorf("failed to convert gid to int. The error is: %v", err) + } + + testCases := []struct { + gid int + expected bool + }{ + { + gid: gid, + expected: true, + }, + { + gid: gid + 1, + expected: false, + }, + } + + for _, tc := range testCases { + err := os.Mkdir("testFile", 0o777) + if err != nil { + t.Fatal(err) + } + info, err := os.Stat("testFile") + if err != nil { + t.Errorf("error stating %q: %v", "testFile", err) + } + + result := FileGIDEqual(info, tc.gid) + if result != tc.expected { + t.Errorf("invalid result. expected %t for gid %v", tc.expected, tc.gid) + } + err = os.RemoveAll("testFile") + if err != nil { + t.Fatal(err) + } + } +} diff --git a/helper/osutil/fileinfo_windows.go b/helper/osutil/fileinfo_windows.go new file mode 100644 index 0000000..193fe3f --- /dev/null +++ b/helper/osutil/fileinfo_windows.go @@ -0,0 +1,19 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build windows + +package osutil + +import ( + "io/fs" +) + +func FileUidMatch(info fs.FileInfo, path string, uid int) error { + return nil +} + +// Umask does nothing for windows for now +func Umask(newmask int) int { + return 0 +} diff --git a/helper/parseip/parseip.go b/helper/parseip/parseip.go new file mode 100644 index 0000000..9557963 --- /dev/null +++ b/helper/parseip/parseip.go @@ -0,0 +1,76 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package parseip + +import ( + "strings" + + "k8s.io/utils/net" +) + +// In Go 1.17 the behaviour of net.ParseIP and net.ParseCIDR changed +// (https://golang.org/doc/go1.17#net) so that leading zeros in the input results +// in an error. This package contains helpers that strip leading zeroes so as +// to avoid those errors. + +// You should probably not be using anything here unless you've found a new place +// where IPs/CIDRs are read from storage and re-parsed. + +// trimLeadingZeroes returns its input trimmed of any leading zeroes. +func trimLeadingZeroes(s string) string { + for i, r := range s { + if r == '0' { + continue + } + return s[i:] + } + return "" +} + +// trimLeadingZeroesIPv4 takes an IPv4 string and returns the input +// trimmed of any excess leading zeroes in each octet. +func trimLeadingZeroesIPv4(s string) string { + if len(s) == 0 { + return s + } + + pieces := strings.Split(s, ".") + var sb strings.Builder + for i, piece := range pieces { + trimmed := trimLeadingZeroes(piece) + if trimmed == "" && len(piece) > 0 { + sb.WriteByte('0') + } else { + sb.WriteString(trimmed) + } + if i != len(pieces)-1 { + sb.WriteByte('.') + } + } + return sb.String() +} + +// trimLeadingZeroesIP does the same work as trimLeadingZeroesIPv4 but also accepts +// an IPv6 address that may contain an IPv4 address representation. Only decimal +// IPv4 addresses get zero-stripped. +func trimLeadingZeroesIP(s string) string { + for i := len(s) - 1; i >= 0; i-- { + if s[i] == ':' && net.ParseIPSloppy(s[i+1:]) != nil { + return s[:i+1] + trimLeadingZeroesIPv4(s[i+1:]) + } + } + return trimLeadingZeroesIPv4(s) +} + +// TrimLeadingZeroesCIDR does the same thing as trimLeadingZeroesIP but expects +// a CIDR address as input. If the input isn't a valid CIDR address, it is returned +// unchanged. +func TrimLeadingZeroesCIDR(s string) string { + pieces := strings.Split(s, "/") + if len(pieces) != 2 { + return s + } + pieces[0] = trimLeadingZeroesIP(pieces[0]) + return strings.Join(pieces, "/") +} diff --git a/helper/parseip/parseip_test.go b/helper/parseip/parseip_test.go new file mode 100644 index 0000000..e26c810 --- /dev/null +++ b/helper/parseip/parseip_test.go @@ -0,0 +1,38 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package parseip + +import ( + "testing" +) + +func Test_TrimLeadingZeroes(t *testing.T) { + tests := []struct { + in string + want string + }{ + {"127.0.0.1", "127.0.0.1"}, + {"010.010.20.5", "10.10.20.5"}, + {"1.1.1.010", "1.1.1.10"}, + {"64:ff9b::192.00.002.33", "64:ff9b::192.0.2.33"}, + {"2001:db8:122:344:c0:2:2100::", "2001:db8:122:344:c0:2:2100::"}, + {"2001:db8:122:344::192.0.2.033", "2001:db8:122:344::192.0.2.33"}, + } + for _, tt := range tests { + if got := trimLeadingZeroesIP(tt.in); got != tt.want { + t.Errorf("trimLeadingZeroesIP() = %v, want %v", got, tt.want) + } + } + + for _, tt := range tests { + // Non-CIDR addresses are ignored. + if got := TrimLeadingZeroesCIDR(tt.in); got != tt.in { + t.Errorf("TrimLeadingZeroesCIDR() = %v, want %v", got, tt.in) + } + want := tt.want + "/32" + if got := TrimLeadingZeroesCIDR(tt.in + "/32"); got != want { + t.Errorf("TrimLeadingZeroesCIDR() = %v, want %v", got, want) + } + } +} diff --git a/helper/pgpkeys/encrypt_decrypt.go b/helper/pgpkeys/encrypt_decrypt.go new file mode 100644 index 0000000..c7a8027 --- /dev/null +++ b/helper/pgpkeys/encrypt_decrypt.go @@ -0,0 +1,120 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pgpkeys + +import ( + "bytes" + "encoding/base64" + "fmt" + + "github.com/ProtonMail/go-crypto/openpgp" + "github.com/ProtonMail/go-crypto/openpgp/packet" +) + +// EncryptShares takes an ordered set of byte slices to encrypt and the +// corresponding base64-encoded public keys to encrypt them with, encrypts each +// byte slice with the corresponding public key. +// +// Note: There is no corresponding test function; this functionality is +// thoroughly tested in the init and rekey command unit tests +func EncryptShares(input [][]byte, pgpKeys []string) ([]string, [][]byte, error) { + if len(input) != len(pgpKeys) { + return nil, nil, fmt.Errorf("mismatch between number items to encrypt and number of PGP keys") + } + encryptedShares := make([][]byte, 0, len(pgpKeys)) + entities, err := GetEntities(pgpKeys) + if err != nil { + return nil, nil, err + } + for i, entity := range entities { + ctBuf := bytes.NewBuffer(nil) + pt, err := openpgp.Encrypt(ctBuf, []*openpgp.Entity{entity}, nil, nil, nil) + if err != nil { + return nil, nil, fmt.Errorf("error setting up encryption for PGP message: %w", err) + } + _, err = pt.Write(input[i]) + if err != nil { + return nil, nil, fmt.Errorf("error encrypting PGP message: %w", err) + } + pt.Close() + encryptedShares = append(encryptedShares, ctBuf.Bytes()) + } + + fingerprints, err := GetFingerprints(nil, entities) + if err != nil { + return nil, nil, err + } + + return fingerprints, encryptedShares, nil +} + +// GetFingerprints takes in a list of openpgp Entities and returns the +// fingerprints. If entities is nil, it will instead parse both entities and +// fingerprints from the pgpKeys string slice. +func GetFingerprints(pgpKeys []string, entities []*openpgp.Entity) ([]string, error) { + if entities == nil { + var err error + entities, err = GetEntities(pgpKeys) + + if err != nil { + return nil, err + } + } + ret := make([]string, 0, len(entities)) + for _, entity := range entities { + ret = append(ret, fmt.Sprintf("%x", entity.PrimaryKey.Fingerprint)) + } + return ret, nil +} + +// GetEntities takes in a string array of base64-encoded PGP keys and returns +// the openpgp Entities +func GetEntities(pgpKeys []string) ([]*openpgp.Entity, error) { + ret := make([]*openpgp.Entity, 0, len(pgpKeys)) + for _, keystring := range pgpKeys { + data, err := base64.StdEncoding.DecodeString(keystring) + if err != nil { + return nil, fmt.Errorf("error decoding given PGP key: %w", err) + } + entity, err := openpgp.ReadEntity(packet.NewReader(bytes.NewBuffer(data))) + if err != nil { + return nil, fmt.Errorf("error parsing given PGP key: %w", err) + } + ret = append(ret, entity) + } + return ret, nil +} + +// DecryptBytes takes in base64-encoded encrypted bytes and the base64-encoded +// private key and decrypts it. A bytes.Buffer is returned to allow the caller +// to do useful thing with it (get it as a []byte, get it as a string, use it +// as an io.Reader, etc), and also because this function doesn't know if what +// comes out is binary data or a string, so let the caller decide. +func DecryptBytes(encodedCrypt, privKey string) (*bytes.Buffer, error) { + privKeyBytes, err := base64.StdEncoding.DecodeString(privKey) + if err != nil { + return nil, fmt.Errorf("error decoding base64 private key: %w", err) + } + + cryptBytes, err := base64.StdEncoding.DecodeString(encodedCrypt) + if err != nil { + return nil, fmt.Errorf("error decoding base64 crypted bytes: %w", err) + } + + entity, err := openpgp.ReadEntity(packet.NewReader(bytes.NewBuffer(privKeyBytes))) + if err != nil { + return nil, fmt.Errorf("error parsing private key: %w", err) + } + + entityList := &openpgp.EntityList{entity} + md, err := openpgp.ReadMessage(bytes.NewBuffer(cryptBytes), entityList, nil, nil) + if err != nil { + return nil, fmt.Errorf("error decrypting the messages: %w", err) + } + + ptBuf := bytes.NewBuffer(nil) + ptBuf.ReadFrom(md.UnverifiedBody) + + return ptBuf, nil +} diff --git a/helper/pgpkeys/flag.go b/helper/pgpkeys/flag.go new file mode 100644 index 0000000..79d114b --- /dev/null +++ b/helper/pgpkeys/flag.go @@ -0,0 +1,144 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pgpkeys + +import ( + "bytes" + "encoding/base64" + "errors" + "fmt" + "os" + "strings" + + "github.com/ProtonMail/go-crypto/openpgp" +) + +// PubKeyFileFlag implements flag.Value and command.Example to receive exactly +// one PGP or keybase key via a flag. +type PubKeyFileFlag string + +func (p *PubKeyFileFlag) String() string { return string(*p) } + +func (p *PubKeyFileFlag) Set(val string) error { + if p != nil && *p != "" { + return errors.New("can only be specified once") + } + + keys, err := ParsePGPKeys(strings.Split(val, ",")) + if err != nil { + return err + } + + if len(keys) > 1 { + return errors.New("can only specify one pgp key") + } + + *p = PubKeyFileFlag(keys[0]) + return nil +} + +func (p *PubKeyFileFlag) Example() string { return "keybase:user" } + +// PGPPubKeyFiles implements the flag.Value interface and allows parsing and +// reading a list of PGP public key files. +type PubKeyFilesFlag []string + +func (p *PubKeyFilesFlag) String() string { + return fmt.Sprint(*p) +} + +func (p *PubKeyFilesFlag) Set(val string) error { + if len(*p) > 0 { + return errors.New("can only be specified once") + } + + keys, err := ParsePGPKeys(strings.Split(val, ",")) + if err != nil { + return err + } + + *p = PubKeyFilesFlag(keys) + return nil +} + +func (p *PubKeyFilesFlag) Example() string { return "keybase:user1, keybase:user2, ..." } + +// ParsePGPKeys takes a list of PGP keys and parses them either using keybase +// or reading them from disk and returns the "expanded" list of pgp keys in +// the same order. +func ParsePGPKeys(keyfiles []string) ([]string, error) { + keys := make([]string, len(keyfiles)) + + keybaseMap, err := FetchKeybasePubkeys(keyfiles) + if err != nil { + return nil, err + } + + for i, keyfile := range keyfiles { + keyfile = strings.TrimSpace(keyfile) + + if strings.HasPrefix(keyfile, kbPrefix) { + key, ok := keybaseMap[keyfile] + if !ok || key == "" { + return nil, fmt.Errorf("keybase user %q not found", strings.TrimPrefix(keyfile, kbPrefix)) + } + keys[i] = key + continue + } + + pgpStr, err := ReadPGPFile(keyfile) + if err != nil { + return nil, err + } + keys[i] = pgpStr + } + + return keys, nil +} + +// ReadPGPFile reads the given PGP file from disk. +func ReadPGPFile(path string) (string, error) { + if len(path) <= 0 { + return "", errors.New("empty path") + } + if path[0] == '@' { + path = path[1:] + } + f, err := os.Open(path) + if err != nil { + return "", err + } + defer f.Close() + buf := bytes.NewBuffer(nil) + _, err = buf.ReadFrom(f) + if err != nil { + return "", err + } + + // First parse as an armored keyring file, if that doesn't work, treat it as a straight binary/b64 string + keyReader := bytes.NewReader(buf.Bytes()) + entityList, err := openpgp.ReadArmoredKeyRing(keyReader) + if err == nil { + if len(entityList) != 1 { + return "", fmt.Errorf("more than one key found in file %q", path) + } + if entityList[0] == nil { + return "", fmt.Errorf("primary key was nil for file %q", path) + } + + serializedEntity := bytes.NewBuffer(nil) + err = entityList[0].Serialize(serializedEntity) + if err != nil { + return "", fmt.Errorf("error serializing entity for file %q: %w", path, err) + } + + return base64.StdEncoding.EncodeToString(serializedEntity.Bytes()), nil + } + + _, err = base64.StdEncoding.DecodeString(buf.String()) + if err == nil { + return buf.String(), nil + } + return base64.StdEncoding.EncodeToString(buf.Bytes()), nil +} diff --git a/helper/pgpkeys/flag_test.go b/helper/pgpkeys/flag_test.go new file mode 100644 index 0000000..9ea25d4 --- /dev/null +++ b/helper/pgpkeys/flag_test.go @@ -0,0 +1,242 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pgpkeys + +import ( + "bytes" + "encoding/base64" + "encoding/hex" + "flag" + "fmt" + "io/ioutil" + "os" + "reflect" + "strings" + "testing" + + "github.com/ProtonMail/go-crypto/openpgp" + "github.com/ProtonMail/go-crypto/openpgp/packet" +) + +func TestPubKeyFilesFlag_implements(t *testing.T) { + var raw interface{} + raw = new(PubKeyFilesFlag) + if _, ok := raw.(flag.Value); !ok { + t.Fatalf("PubKeysFilesFlag should be a Value") + } +} + +func TestPubKeyFilesFlagSetBinary(t *testing.T) { + tempDir, err := ioutil.TempDir("", "vault-test") + if err != nil { + t.Fatalf("Error creating temporary directory: %s", err) + } + defer os.RemoveAll(tempDir) + + decoder := base64.StdEncoding + pub1Bytes, err := decoder.DecodeString(pubKey1) + if err != nil { + t.Fatalf("Error decoding bytes for public key 1: %s", err) + } + err = ioutil.WriteFile(tempDir+"/pubkey1", pub1Bytes, 0o755) + if err != nil { + t.Fatalf("Error writing pub key 1 to temp file: %s", err) + } + pub2Bytes, err := decoder.DecodeString(pubKey2) + if err != nil { + t.Fatalf("Error decoding bytes for public key 2: %s", err) + } + err = ioutil.WriteFile(tempDir+"/pubkey2", pub2Bytes, 0o755) + if err != nil { + t.Fatalf("Error writing pub key 2 to temp file: %s", err) + } + pub3Bytes, err := decoder.DecodeString(pubKey3) + if err != nil { + t.Fatalf("Error decoding bytes for public key 3: %s", err) + } + err = ioutil.WriteFile(tempDir+"/pubkey3", pub3Bytes, 0o755) + if err != nil { + t.Fatalf("Error writing pub key 3 to temp file: %s", err) + } + + pkf := new(PubKeyFilesFlag) + err = pkf.Set(tempDir + "/pubkey1,@" + tempDir + "/pubkey2") + if err != nil { + t.Fatalf("err: %s", err) + } + + err = pkf.Set(tempDir + "/pubkey3") + if err == nil { + t.Fatalf("err: should not have been able to set a second value") + } + + expected := []string{strings.ReplaceAll(pubKey1, "\n", ""), strings.ReplaceAll(pubKey2, "\n", "")} + if !reflect.DeepEqual(pkf.String(), fmt.Sprint(expected)) { + t.Fatalf("Bad: %#v", pkf) + } +} + +func TestPubKeyFilesFlagSetB64(t *testing.T) { + tempDir, err := ioutil.TempDir("", "vault-test") + if err != nil { + t.Fatalf("Error creating temporary directory: %s", err) + } + defer os.RemoveAll(tempDir) + + err = ioutil.WriteFile(tempDir+"/pubkey1", []byte(pubKey1), 0o755) + if err != nil { + t.Fatalf("Error writing pub key 1 to temp file: %s", err) + } + err = ioutil.WriteFile(tempDir+"/pubkey2", []byte(pubKey2), 0o755) + if err != nil { + t.Fatalf("Error writing pub key 2 to temp file: %s", err) + } + err = ioutil.WriteFile(tempDir+"/pubkey3", []byte(pubKey3), 0o755) + if err != nil { + t.Fatalf("Error writing pub key 3 to temp file: %s", err) + } + + pkf := new(PubKeyFilesFlag) + err = pkf.Set(tempDir + "/pubkey1,@" + tempDir + "/pubkey2") + if err != nil { + t.Fatalf("err: %s", err) + } + + err = pkf.Set(tempDir + "/pubkey3") + if err == nil { + t.Fatalf("err: should not have been able to set a second value") + } + + expected := []string{pubKey1, pubKey2} + if !reflect.DeepEqual(pkf.String(), fmt.Sprint(expected)) { + t.Fatalf("bad: got %s, expected %s", pkf.String(), fmt.Sprint(expected)) + } +} + +func TestPubKeyFilesFlagSetKeybase(t *testing.T) { + tempDir, err := ioutil.TempDir("", "vault-test") + if err != nil { + t.Fatalf("Error creating temporary directory: %s", err) + } + defer os.RemoveAll(tempDir) + + err = ioutil.WriteFile(tempDir+"/pubkey2", []byte(pubKey2), 0o755) + if err != nil { + t.Fatalf("Error writing pub key 2 to temp file: %s", err) + } + + pkf := new(PubKeyFilesFlag) + err = pkf.Set("keybase:jefferai,@" + tempDir + "/pubkey2" + ",keybase:hashicorp") + if err != nil { + t.Fatalf("err: %s", err) + } + fingerprints := []string{} + for _, pubkey := range []string(*pkf) { + keyBytes, err := base64.StdEncoding.DecodeString(pubkey) + if err != nil { + t.Fatalf("bad: %v", err) + } + pubKeyBuf := bytes.NewBuffer(keyBytes) + reader := packet.NewReader(pubKeyBuf) + entity, err := openpgp.ReadEntity(reader) + if err != nil { + t.Fatalf("bad: %v", err) + } + if entity == nil { + t.Fatalf("nil entity encountered") + } + fingerprints = append(fingerprints, hex.EncodeToString(entity.PrimaryKey.Fingerprint[:])) + } + + exp := []string{ + "0f801f518ec853daff611e836528efcac6caa3db", + "cf3d4694c9f57b28cb4092c2eb832c67eb5e8957", + "c874011f0ab405110d02105534365d9472d7468f", + } + + if !reflect.DeepEqual(fingerprints, exp) { + t.Fatalf("bad: got \n%#v\nexpected\n%#v\n", fingerprints, exp) + } +} + +const pubKey1 = `mQENBFXbjPUBCADjNjCUQwfxKL+RR2GA6pv/1K+zJZ8UWIF9S0lk7cVIEfJiprzzwiMwBS5cD0da +rGin1FHvIWOZxujA7oW0O2TUuatqI3aAYDTfRYurh6iKLC+VS+F7H+/mhfFvKmgr0Y5kDCF1j0T/ +063QZ84IRGucR/X43IY7kAtmxGXH0dYOCzOe5UBX1fTn3mXGe2ImCDWBH7gOViynXmb6XNvXkP0f +sF5St9jhO7mbZU9EFkv9O3t3EaURfHopsCVDOlCkFCw5ArY+DUORHRzoMX0PnkyQb5OzibkChzpg +8hQssKeVGpuskTdz5Q7PtdW71jXd4fFVzoNH8fYwRpziD2xNvi6HABEBAAG0EFZhdWx0IFRlc3Qg +S2V5IDGJATgEEwECACIFAlXbjPUCGy8GCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEOfLr44B +HbeTo+sH/i7bapIgPnZsJ81hmxPj4W12uvunksGJiC7d4hIHsG7kmJRTJfjECi+AuTGeDwBy84TD +cRaOB6e79fj65Fg6HgSahDUtKJbGxj/lWzmaBuTzlN3CEe8cMwIPqPT2kajJVdOyrvkyuFOdPFOE +A7bdCH0MqgIdM2SdF8t40k/ATfuD2K1ZmumJ508I3gF39jgTnPzD4C8quswrMQ3bzfvKC3klXRlB +C0yoArn+0QA3cf2B9T4zJ2qnvgotVbeK/b1OJRNj6Poeo+SsWNc/A5mw7lGScnDgL3yfwCm1gQXa +QKfOt5x+7GqhWDw10q+bJpJlI10FfzAnhMF9etSqSeURBRW5AQ0EVduM9QEIAL53hJ5bZJ7oEDCn +aY+SCzt9QsAfnFTAnZJQrvkvusJzrTQ088eUQmAjvxkfRqnv981fFwGnh2+I1Ktm698UAZS9Jt8y +jak9wWUICKQO5QUt5k8cHwldQXNXVXFa+TpQWQR5yW1a9okjh5o/3d4cBt1yZPUJJyLKY43Wvptb +6EuEsScO2DnRkh5wSMDQ7dTooddJCmaq3LTjOleRFQbu9ij386Do6jzK69mJU56TfdcydkxkWF5N +ZLGnED3lq+hQNbe+8UI5tD2oP/3r5tXKgMy1R/XPvR/zbfwvx4FAKFOP01awLq4P3d/2xOkMu4Lu +9p315E87DOleYwxk+FoTqXEAEQEAAYkCPgQYAQIACQUCVduM9QIbLgEpCRDny6+OAR23k8BdIAQZ +AQIABgUCVduM9QAKCRAID0JGyHtSGmqYB/4m4rJbbWa7dBJ8VqRU7ZKnNRDR9CVhEGipBmpDGRYu +lEimOPzLUX/ZXZmTZzgemeXLBaJJlWnopVUWuAsyjQuZAfdd8nHkGRHG0/DGum0l4sKTta3OPGHN +C1z1dAcQ1RCr9bTD3PxjLBczdGqhzw71trkQRBRdtPiUchltPMIyjUHqVJ0xmg0hPqFic0fICsr0 +YwKoz3h9+QEcZHvsjSZjgydKvfLYcm+4DDMCCqcHuJrbXJKUWmJcXR0y/+HQONGrGJ5xWdO+6eJi +oPn2jVMnXCm4EKc7fcLFrz/LKmJ8seXhxjM3EdFtylBGCrx3xdK0f+JDNQaC/rhUb5V2XuX6VwoH +/AtY+XsKVYRfNIupLOUcf/srsm3IXT4SXWVomOc9hjGQiJ3rraIbADsc+6bCAr4XNZS7moViAAcI +PXFv3m3WfUlnG/om78UjQqyVACRZqqAGmuPq+TSkRUCpt9h+A39LQWkojHqyob3cyLgy6z9Q557O +9uK3lQozbw2gH9zC0RqnePl+rsWIUU/ga16fH6pWc1uJiEBt8UZGypQ/E56/343epmYAe0a87sHx +8iDV+dNtDVKfPRENiLOOc19MmS+phmUyrbHqI91c0pmysYcJZCD3a502X1gpjFbPZcRtiTmGnUKd +OIu60YPNE4+h7u2CfYyFPu3AlUaGNMBlvy6PEpU=` + +const pubKey2 = `mQENBFXbkJEBCADKb1ZvlT14XrJa2rTOe5924LQr2PTZlRv+651TXy33yEhelZ+V4sMrELN8fKEG +Zy1kNixmbq3MCF/671k3LigHA7VrOaH9iiQgr6IIq2MeIkUYKZ27C992vQkYLjbYUG8+zl5h69S4 +0Ixm0yL0M54XOJ0gm+maEK1ZESKTUlDNkIS7l0jLZSYwfUeGXSEt6FWs8OgbyRTaHw4PDHrDEE9e +Q67K6IZ3YMhPOL4fVk4Jwrp5R/RwiklT+lNozWEyFVwPFH4MeQMs9nMbt+fWlTzEA7tI4acI9yDk +Cm1yN2R9rmY0UjODRiJw6z6sLV2T+Pf32n3MNSUOYczOjZa4VBwjABEBAAG0EFZhdWx0IFRlc3Qg +S2V5IDKJATgEEwECACIFAlXbkJECGy8GCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEOuDLGfr +XolXqz4H/28IuoRxGKoJ064YHjPkkpoddW6zdzzNfHipZnNfEUiTEls4qF1IB81M2xqfiXIFRIdO +2kaLkRPFhO0hRxbtI6VuZYLgG3QCaXhxW6GyFa5zKABqhb5ojexdnAYRswaHV201ZCclj9rnJN1P +Ag0Rz6MdX/w1euEWktQxWKo42oZKyx8oT9p6lrv5KRmGkdrg8K8ARmRILjmwuBAgJM0eXBZHNGWX +elk4YmOgnAAcZA6ZAo1G+8Pg6pKKP61ETewuCg3/u7N0vDttB+ZXqF88W9jAYlvdgbTtajNF5IDY +DjTzWfeCaIB18F9gOzXq15SwWeDDI+CU9Nmq358IzXlxk4e5AQ0EVduQkQEIAOjZV5tbpfIh5Qef +pIp2dpGMVfpgPj4RNc15CyFnb8y6dhCrdybkY9GveXJe4F3GNYnSfB42cgxrfhizX3LakmZQ/SAg ++YO5KxfCIN7Q9LPNeTgPsZZT6h8lVuXUxOFKXfRaR3/tGF5xE3e5CoZRsHV/c92h3t1LdJNOnC5m +UKIPO4zDxiw/C2T2q3rP1kmIMaOH724kEH5A+xcp1cBHyt0tdHtIWuQv6joTJzujqViRhlCwQYzQ +SKpSBxwhBsorPvyinZI/ZXA4XXZc5RoMqV9rikedrb1rENO8JOuPu6tMS+znFu67skq2gFFZwCQW +IjdHm+2ukE+PE580WAWudyMAEQEAAYkCPgQYAQIACQUCVduQkQIbLgEpCRDrgyxn616JV8BdIAQZ +AQIABgUCVduQkQAKCRArYtevdF38xtzgB/4zVzozBpVOnagRkA7FDsHo36xX60Lik+ew0m28ueDD +hnV3bXQsCvn/6wiCVWqLOTDeYCPlyTTpEMyk8zwdCICW6MgSkVHWcEDOrRqIrqm86rirjTGjJSgQ +e3l4CqJvkn6jybShYoBk1OZZV6vVv9hPTXXv9E6dLKoEW5YZBrrF+VC0w1iOIvaAQ+QXph20eV4K +BIrp/bhG6PdnigKxuBZ79cdqDnXIzT9UiIa6LYpR0rbeg+7BmuZTTPS8t+41hIiKS+UZFdKa67eY +ENtyOmEMWOFCLLRJGxkleukchiMJ70rknloZXsvJIweXBzSZ6m7mJQBgaig/L/dXyjv6+j2pNB4H +/1trYUtJjXQKHmqlgCmpCkHt3g7JoxWvglnDNmE6q3hIWuVIYQpnzZy1g05+X9Egwc1WVpBB02H7 +PkUZTfpaP/L6DLneMmSKPhZE3I+lPIPjwrxqh6xy5uQezcWkJTNKvPWF4FJzrVvx7XTPjfGvOB0U +PEnjvtZTp5yOhTeZK7DgIEtb/Wcrqs+iRArQKboM930ORSZhwvGK3F9V/gMDpIrvge5vDFsTEYQd +w/2epIewH0L/FUb/6jBRcVEpGo9Ayg+Jnhq14GOGcd1y9oMZ48kYVLVBTA9tQ+82WE8Bch7uFPj4 +MFOMVRn1dc3qdXlg3mimA+iK7tABQfG0RJ9YzWs=` + +const pubKey3 = `mQENBFXbkiMBCACiHW4/VI2JkfvSEINddS7vE6wEu5e1leNQDaLUh6PrATQZS2a4Q6kRE6WlJumj +6wCeN753Cm93UGQl2Bi3USIEeArIZnPTcocrckOVXxtoLBNKXgqKvEsDXgfw8A+doSfXoDm/3Js4 +Wy3WsYKNR9LaPuJZHnpjsFAJhvRVyhH4UFD+1RTSSefq1mozPfDdMoZeZNEpfhwt3DuTJs7RqcTH +CgR2CqhEHnOOE5jJUljHKYLCglE2+8dth1bZlQi4xly/VHZzP3Bn7wKeolK/ROP6VZz/e0xq/BKy +resmxvlBWZ1zWwqGIrV9b0uwYvGrh2hOd5C5+5oGaA2MGcjxwaLBABEBAAG0EFZhdWx0IFRlc3Qg +S2V5IDOJATgEEwECACIFAlXbkiMCGy8GCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEPR5S1b8 +LcbdWjEH/2mhqC9a0Vk1IzOgxEoVxYVqVdvaxI0nTZOTfmcFYn4HQlQ+SLEoyNWe5jtkhx4k5uHi +pxwKHzOv02YM14NWC6bvKw2CQETLDPG4Cv8YMUmpho5tnMDdttIzp8HjyJRtHazU1uTes2/yuqh6 +LHCejVJI0uST3RibquwdG3QjPP8Umxu+YC9+FOW2Kit/AQ8JluFDJdq3/wSX8VfYZrGdgmreE7KY +MolhCkzGSPj7oFygw8LqKoJvt9tCuBKhZMBuMv1sB5CoJIWdPoqOZc4U7L1XdqfKvFZR/RhuXgN1 +lkI9MqrnLDpikL3Lk+ctLxWOjUCW8roqKoHZYBF7XPqdAfm5AQ0EVduSIwEIAOPcjd4QgbLlqIk3 +s6BPRRyVzglTgUdf+I0rUDybaDJfJobZd8U6e4hkPvRoQ8tJefnz/qnD/63watAbJYcVTme40I3V +KDOmVGcyaDxiKP1disKqcEJd7XQiI72oAiXmEH0y+5UwnOMks/lwaAGDMGVRjHEXI6fiRPFsfTr8 +7qvMJ3pW1OiOXVSezuBNTlmyJC7srQ1/nwxL337ev6D1zQZd3JuhcxLkHrUELLNwzhvcZ70vg645 +jAmz8EdmvvoqEPPoHqKgP5AeHACOsTm953KHhgx3NYuGPU/RoIvugKt4Iq5nw7TWFTjPHGVF3GTQ +ry5CZ/AzXiL57hVEhDvmuT8AEQEAAYkCPgQYAQIACQUCVduSIwIbLgEpCRD0eUtW/C3G3cBdIAQZ +AQIABgUCVduSIwAKCRAFI/9Nx3K5IPOFCACsZ/Z4s2LcEoA51TW+T5w+YevlIuq+332JtqNIpuGI +WpGxUxyDyPT0YQWr0SObBORYNr7RP8d/I2rbaFKyaDaKvRofYr+TwXy92phBo7pdEUamBpfrm/sr ++2BgAB2x3HWXp+IMdeVVhqQe8t4cnFm3c1fIdxADyiJuV5ge2Ml5gK5yNwqCQPh7U2RqC+lmVlMJ +GvWInIRn2mf6A7phDYNZfOz6dkar4yyh5r9rRgrZw88r/yIlrq/c6KRUIgnPMrFYEauggOceZ827 ++jKkbKWFEuHtiCxW7kRAN25UfnGsPaF+NSGM2q1vCG4HiFydx6lMoXM0Shf8+ZwyrV/5BzAqpWwI +AJ37tEwC58Fboynly6OGOzgPS0xKnzkXMOtquTo0qEH/1bEUsBknn795BmZOTf4oBC5blN6qRv7c +GSP00i+sxql1NhTjJcjJtfOPUzgfW+Af/+HR648z4c7c6MCjDFKnk8ZkoGLRU7ISjenkNFzvu2bj +lxJkil0uJDlLPbbX80ojzV1GS9g+ZxVPR+68N1QLl2FU6zsfg34upmLLHG8VG4vExzgyNkOwfTYv +dgyRNTjnuPue6H12fZZ9uCNeG52v7lR3eoQcCxBOniwgipB8UJ52RWXblwxzCtGtDi/EWB3zLTUn +puKcgucA0LotbihSMxhDylaARfVO1QV6csabM/g=` diff --git a/helper/pgpkeys/keybase.go b/helper/pgpkeys/keybase.go new file mode 100644 index 0000000..b24e4bf --- /dev/null +++ b/helper/pgpkeys/keybase.go @@ -0,0 +1,119 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pgpkeys + +import ( + "bytes" + "encoding/base64" + "fmt" + "strings" + + "github.com/ProtonMail/go-crypto/openpgp" + cleanhttp "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/vault/sdk/helper/jsonutil" +) + +const ( + kbPrefix = "keybase:" +) + +// FetchKeybasePubkeys fetches public keys from Keybase given a set of +// usernames, which are derived from correctly formatted input entries. It +// doesn't use their client code due to both the API and the fact that it is +// considered alpha and probably best not to rely on it. The keys are returned +// as base64-encoded strings. +func FetchKeybasePubkeys(input []string) (map[string]string, error) { + client := cleanhttp.DefaultClient() + if client == nil { + return nil, fmt.Errorf("unable to create an http client") + } + + if len(input) == 0 { + return nil, nil + } + + usernames := make([]string, 0, len(input)) + for _, v := range input { + if strings.HasPrefix(v, kbPrefix) { + usernames = append(usernames, strings.TrimPrefix(v, kbPrefix)) + } + } + + if len(usernames) == 0 { + return nil, nil + } + + ret := make(map[string]string, len(usernames)) + url := fmt.Sprintf("https://keybase.io/_/api/1.0/user/lookup.json?usernames=%s&fields=public_keys", strings.Join(usernames, ",")) + resp, err := client.Get(url) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + type PublicKeys struct { + Primary struct { + Bundle string + } + } + + type LThem struct { + PublicKeys `json:"public_keys"` + } + + type KbResp struct { + Status struct { + Name string + } + Them []LThem + } + + out := &KbResp{ + Them: []LThem{}, + } + + if err := jsonutil.DecodeJSONFromReader(resp.Body, out); err != nil { + return nil, err + } + + if out.Status.Name != "OK" { + return nil, fmt.Errorf("got non-OK response: %q", out.Status.Name) + } + + missingNames := make([]string, 0, len(usernames)) + var keyReader *bytes.Reader + serializedEntity := bytes.NewBuffer(nil) + for i, themVal := range out.Them { + if themVal.Primary.Bundle == "" { + missingNames = append(missingNames, usernames[i]) + continue + } + keyReader = bytes.NewReader([]byte(themVal.Primary.Bundle)) + entityList, err := openpgp.ReadArmoredKeyRing(keyReader) + if err != nil { + return nil, err + } + if len(entityList) != 1 { + return nil, fmt.Errorf("primary key could not be parsed for user %q", usernames[i]) + } + if entityList[0] == nil { + return nil, fmt.Errorf("primary key was nil for user %q", usernames[i]) + } + + serializedEntity.Reset() + err = entityList[0].Serialize(serializedEntity) + if err != nil { + return nil, fmt.Errorf("error serializing entity for user %q: %w", usernames[i], err) + } + + // The API returns values in the same ordering requested, so this should properly match + ret[kbPrefix+usernames[i]] = base64.StdEncoding.EncodeToString(serializedEntity.Bytes()) + } + + if len(missingNames) > 0 { + return nil, fmt.Errorf("unable to fetch keys for user(s) %q from keybase", strings.Join(missingNames, ",")) + } + + return ret, nil +} diff --git a/helper/pgpkeys/keybase_test.go b/helper/pgpkeys/keybase_test.go new file mode 100644 index 0000000..7d59899 --- /dev/null +++ b/helper/pgpkeys/keybase_test.go @@ -0,0 +1,45 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pgpkeys + +import ( + "bytes" + "encoding/base64" + "encoding/hex" + "reflect" + "testing" + + "github.com/ProtonMail/go-crypto/openpgp" + "github.com/ProtonMail/go-crypto/openpgp/packet" +) + +func TestFetchKeybasePubkeys(t *testing.T) { + testset := []string{"keybase:jefferai", "keybase:hashicorp"} + ret, err := FetchKeybasePubkeys(testset) + if err != nil { + t.Fatalf("bad: %v", err) + } + + fingerprints := []string{} + for _, user := range testset { + data, err := base64.StdEncoding.DecodeString(ret[user]) + if err != nil { + t.Fatalf("error decoding key for user %s: %v", user, err) + } + entity, err := openpgp.ReadEntity(packet.NewReader(bytes.NewBuffer(data))) + if err != nil { + t.Fatalf("error parsing key for user %s: %v", user, err) + } + fingerprints = append(fingerprints, hex.EncodeToString(entity.PrimaryKey.Fingerprint[:])) + } + + exp := []string{ + "0f801f518ec853daff611e836528efcac6caa3db", + "c874011f0ab405110d02105534365d9472d7468f", + } + + if !reflect.DeepEqual(fingerprints, exp) { + t.Fatalf("fingerprints do not match; expected \n%#v\ngot\n%#v\n", exp, fingerprints) + } +} diff --git a/helper/pgpkeys/test_keys.go b/helper/pgpkeys/test_keys.go new file mode 100644 index 0000000..be97698 --- /dev/null +++ b/helper/pgpkeys/test_keys.go @@ -0,0 +1,274 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pgpkeys + +const ( + TestPrivKey1 = `lQOYBFXbjPUBCADjNjCUQwfxKL+RR2GA6pv/1K+zJZ8UWIF9S0lk7cVIEfJiprzzwiMwBS5cD0da +rGin1FHvIWOZxujA7oW0O2TUuatqI3aAYDTfRYurh6iKLC+VS+F7H+/mhfFvKmgr0Y5kDCF1j0T/ +063QZ84IRGucR/X43IY7kAtmxGXH0dYOCzOe5UBX1fTn3mXGe2ImCDWBH7gOViynXmb6XNvXkP0f +sF5St9jhO7mbZU9EFkv9O3t3EaURfHopsCVDOlCkFCw5ArY+DUORHRzoMX0PnkyQb5OzibkChzpg +8hQssKeVGpuskTdz5Q7PtdW71jXd4fFVzoNH8fYwRpziD2xNvi6HABEBAAEAB/wL+KX0mdeISEpX +oDgt766Key1Kthe8nbEs5dOXIsP7OR7ZPcnE2hy6gftgVFnBGEZnWVN70vmJd6Z5y9d1mI+GecXj +UL0EpI0EmohyYDJsHUnght/5ecRNFA+VeNmGPYNQGCeHJyZOiFunGGENpHU7BbubAht8delz37Mx +JQgvMyR6AKvg8HKBoQeqV1uMWNJE/vKwV/z1dh1sjK/GFxu05Qaq0GTfAjVLuFOyJTS95yq6gblD +jUdbHLp7tBeqIKo9voWCJF5mGOlq3973vVoWETy9b0YYPCE/M7fXmK9dJITHqkROLMW6TgcFeIw4 +yL5KOBCHk+QGPSvyQN7R7Fd5BADwuT1HZmvg7Y9GjarKXDjxdNemUiHtba2rUzfH6uNmKNQvwQek +nma5palNUJ4/dz1aPB21FUBXJF5yWwXEdApl+lIDU0J5m4UD26rqEVRq9Kx3GsX+yfcwObkrSzW6 +kmnQSB5KI0fIuegMTM+Jxo3pB/mIRwDTMmk+vfzIGyW+7QQA8aFwFLMdKdfLgSGbl5Z6etmOAVQ2 +Oe2ebegU9z/ewi/Rdt2s9yQiAdGVM8+q15Saz8a+kyS/l1CjNPzr3VpYx1OdZ3gb7i2xoy9GdMYR +ZpTq3TuST95kx/9DqA97JrP23G47U0vwF/cg8ixCYF8Fz5dG4DEsxgMwKqhGdW58wMMD/iytkfMk +Vk6Z958Rpy7lhlC6L3zpO38767bSeZ8gRRi/NMFVOSGYepKFarnfxcTiNa+EoSVA6hUo1N64nALE +sJBpyOoTfKIpz7WwTF1+WogkiYrfM6lHon1+3qlziAcRW0IohM3g2C1i3GWdON4Cl8/PDO3R0E52 +N6iG/ctNNeMiPe60EFZhdWx0IFRlc3QgS2V5IDGJATgEEwECACIFAlXbjPUCGy8GCwkIBwMCBhUI +AgkKCwQWAgMBAh4BAheAAAoJEOfLr44BHbeTo+sH/i7bapIgPnZsJ81hmxPj4W12uvunksGJiC7d +4hIHsG7kmJRTJfjECi+AuTGeDwBy84TDcRaOB6e79fj65Fg6HgSahDUtKJbGxj/lWzmaBuTzlN3C +Ee8cMwIPqPT2kajJVdOyrvkyuFOdPFOEA7bdCH0MqgIdM2SdF8t40k/ATfuD2K1ZmumJ508I3gF3 +9jgTnPzD4C8quswrMQ3bzfvKC3klXRlBC0yoArn+0QA3cf2B9T4zJ2qnvgotVbeK/b1OJRNj6Poe +o+SsWNc/A5mw7lGScnDgL3yfwCm1gQXaQKfOt5x+7GqhWDw10q+bJpJlI10FfzAnhMF9etSqSeUR +BRWdA5gEVduM9QEIAL53hJ5bZJ7oEDCnaY+SCzt9QsAfnFTAnZJQrvkvusJzrTQ088eUQmAjvxkf +Rqnv981fFwGnh2+I1Ktm698UAZS9Jt8yjak9wWUICKQO5QUt5k8cHwldQXNXVXFa+TpQWQR5yW1a +9okjh5o/3d4cBt1yZPUJJyLKY43Wvptb6EuEsScO2DnRkh5wSMDQ7dTooddJCmaq3LTjOleRFQbu +9ij386Do6jzK69mJU56TfdcydkxkWF5NZLGnED3lq+hQNbe+8UI5tD2oP/3r5tXKgMy1R/XPvR/z +bfwvx4FAKFOP01awLq4P3d/2xOkMu4Lu9p315E87DOleYwxk+FoTqXEAEQEAAQAH+wVyQXaNwnjQ +xfW+M8SJNo0C7e+0d7HsuBTA/d/eP4bj6+X8RaRFVwiMvSAoxsqBNCLJP00qzzKfRQWJseD1H35z +UjM7rNVUEL2k1yppyp61S0qj0TdhVUfJDYZqRYonVgRMvzfDTB1ryKrefKenQYL/jGd9VYMnKmWZ +6GVk4WWXXx61iOt2HNcmSXKetMM1Mg67woPZkA3fJaXZ+zW0zMu4lTSB7yl3+vLGIFYILkCFnREr +drQ+pmIMwozUAt+pBq8dylnkHh6g/FtRfWmLIMDqM1NlyuHRp3dyLDFdTA93osLG0QJblfX54W34 +byX7a4HASelGi3nPjjOAsTFDkuEEANV2viaWk1CV4ryDrXGmy4Xo32Md+laGPRcVfbJ0mjZjhQsO +gWC1tjMs1qZMPhcrKIBCjjdAcAIrGV9h3CXc0uGuez4XxLO+TPBKaS0B8rKhnKph1YZuf+HrOhzS +astDnOjNIT+qucCL/qSbdYpj9of3yY61S59WphPOBjoVM3BFBADka6ZCk81gx8jA2E1e9UqQDmdM +FZaVA1E7++kqVSFRDJGnq+5GrBTwCJ+sevi+Rvf8Nx4AXvpCdtMBPX9RogsUFcR0pMrKBrgRo/Vg +EpuodY2Ef1VtqXR24OxtRf1UwvHKydIsU05rzMAy5uGgQvTzRTXxZFLGUY31wjWqmo9VPQP+PnwA +K83EV2kk2bsXwZ9MXg05iXqGQYR4bEc/12v04BtaNaDS53hBDO4JIa3Bnz+5oUoYhb8FgezUKA9I +n6RdKTTP1BLAu8titeozpNF07V++dPiSE2wrIVsaNHL1pUwW0ql50titVwe+EglWiCKPtJBcCPUA +3oepSPchiDjPqrNCYIkCPgQYAQIACQUCVduM9QIbLgEpCRDny6+OAR23k8BdIAQZAQIABgUCVduM +9QAKCRAID0JGyHtSGmqYB/4m4rJbbWa7dBJ8VqRU7ZKnNRDR9CVhEGipBmpDGRYulEimOPzLUX/Z +XZmTZzgemeXLBaJJlWnopVUWuAsyjQuZAfdd8nHkGRHG0/DGum0l4sKTta3OPGHNC1z1dAcQ1RCr +9bTD3PxjLBczdGqhzw71trkQRBRdtPiUchltPMIyjUHqVJ0xmg0hPqFic0fICsr0YwKoz3h9+QEc +ZHvsjSZjgydKvfLYcm+4DDMCCqcHuJrbXJKUWmJcXR0y/+HQONGrGJ5xWdO+6eJioPn2jVMnXCm4 +EKc7fcLFrz/LKmJ8seXhxjM3EdFtylBGCrx3xdK0f+JDNQaC/rhUb5V2XuX6VwoH/AtY+XsKVYRf +NIupLOUcf/srsm3IXT4SXWVomOc9hjGQiJ3rraIbADsc+6bCAr4XNZS7moViAAcIPXFv3m3WfUln +G/om78UjQqyVACRZqqAGmuPq+TSkRUCpt9h+A39LQWkojHqyob3cyLgy6z9Q557O9uK3lQozbw2g +H9zC0RqnePl+rsWIUU/ga16fH6pWc1uJiEBt8UZGypQ/E56/343epmYAe0a87sHx8iDV+dNtDVKf +PRENiLOOc19MmS+phmUyrbHqI91c0pmysYcJZCD3a502X1gpjFbPZcRtiTmGnUKdOIu60YPNE4+h +7u2CfYyFPu3AlUaGNMBlvy6PEpU=` + + TestPrivKey2 = `lQOYBFXbkJEBCADKb1ZvlT14XrJa2rTOe5924LQr2PTZlRv+651TXy33yEhelZ+V4sMrELN8fKEG +Zy1kNixmbq3MCF/671k3LigHA7VrOaH9iiQgr6IIq2MeIkUYKZ27C992vQkYLjbYUG8+zl5h69S4 +0Ixm0yL0M54XOJ0gm+maEK1ZESKTUlDNkIS7l0jLZSYwfUeGXSEt6FWs8OgbyRTaHw4PDHrDEE9e +Q67K6IZ3YMhPOL4fVk4Jwrp5R/RwiklT+lNozWEyFVwPFH4MeQMs9nMbt+fWlTzEA7tI4acI9yDk +Cm1yN2R9rmY0UjODRiJw6z6sLV2T+Pf32n3MNSUOYczOjZa4VBwjABEBAAEAB/oCBqTIsxlUgLtz +HRpWW5MJ+93xvmVV0JHhRK/ygKghq+zpC6S+cn7dwrEj1JTPh+17lyemYQK+RMeiBEduoWNKuHUd +WX353w2411rrc/VuGTglzhd8Ir2BdJlPesCzw4JQnrWqcBqN52W+iwhnE7PWVhnvItWnx6APK5Se +q7dzFWy8Z8tNIHm0pBQbeyo6x2rHHSWkr2fs7V02qFQhii1ayFRMcgdOWSNX6CaZJuYhk/DyjApN +9pVhi3P1pNMpFeV0Pt8Gl1f/9o6/HpAYYEt/6vtVRhFUGgtNi95oc0oyzIJxliRvd6+Z236osigQ +QEBwj1ImRK8TKyWPlykiJWc5BADfldgOCA55o3Qz/z/oVE1mm+a3FmPPTQlHBXotNEsrWV2wmJHe +lNQPI6ZwMtLrBSg8PUpG2Rvao6XJ4ZBl/VcDwfcLgCnALPCcL0L0Z3vH3Sc9Ta/bQWJODG7uSaI1 +iVJ7ArKNtVzTqRQWK967mol9CCqh4A0jRrH0aVEFbrqQ/QQA58iEJaFhzFZjufjC9N8Isn3Ky7xu +h+dk001RNCb1GnNZcx4Ld2IB+uXyYjtg7dNaUhGgGuCBo9nax89bMsBzzUukx3SHq1pxopMg6Dm8 +ImBoIAicuQWgEkaP2T0rlwCozUalJZaG1gyrzkPhkeY7CglpJycHLHfY2MIb46c8+58D/iJ83Q5j +Y4x+sqW2QeUYFwqCcOW8Urg64UxEkgXZXiNMwTAJCaxp/Pz7cgeUDwgv+6CXEdnT1910+byzK9ha +V1Q/65+/JYuCeyHxcoAb4Wtpdl7GALGd/1G0UAmq47yrefEr/b00uS35i1qUUhOzo1NmEZch/bvF +kmJ+WtAHunZcOCu0EFZhdWx0IFRlc3QgS2V5IDKJATgEEwECACIFAlXbkJECGy8GCwkIBwMCBhUI +AgkKCwQWAgMBAh4BAheAAAoJEOuDLGfrXolXqz4H/28IuoRxGKoJ064YHjPkkpoddW6zdzzNfHip +ZnNfEUiTEls4qF1IB81M2xqfiXIFRIdO2kaLkRPFhO0hRxbtI6VuZYLgG3QCaXhxW6GyFa5zKABq +hb5ojexdnAYRswaHV201ZCclj9rnJN1PAg0Rz6MdX/w1euEWktQxWKo42oZKyx8oT9p6lrv5KRmG +kdrg8K8ARmRILjmwuBAgJM0eXBZHNGWXelk4YmOgnAAcZA6ZAo1G+8Pg6pKKP61ETewuCg3/u7N0 +vDttB+ZXqF88W9jAYlvdgbTtajNF5IDYDjTzWfeCaIB18F9gOzXq15SwWeDDI+CU9Nmq358IzXlx +k4edA5gEVduQkQEIAOjZV5tbpfIh5QefpIp2dpGMVfpgPj4RNc15CyFnb8y6dhCrdybkY9GveXJe +4F3GNYnSfB42cgxrfhizX3LakmZQ/SAg+YO5KxfCIN7Q9LPNeTgPsZZT6h8lVuXUxOFKXfRaR3/t +GF5xE3e5CoZRsHV/c92h3t1LdJNOnC5mUKIPO4zDxiw/C2T2q3rP1kmIMaOH724kEH5A+xcp1cBH +yt0tdHtIWuQv6joTJzujqViRhlCwQYzQSKpSBxwhBsorPvyinZI/ZXA4XXZc5RoMqV9rikedrb1r +ENO8JOuPu6tMS+znFu67skq2gFFZwCQWIjdHm+2ukE+PE580WAWudyMAEQEAAQAH/i7ndRPI+t0T +AdEu0dTIdyrrg3g7gd471kQtIVZZwTYSy2yhNY/Ciu72s3ab8QNCxY8dNL5bRk8FKjHslAoNSFdO +8iZSLiDgIHOZOcjYe6pqdgQaeTHodm1Otrn2SbB+K/3oX6W/y1xe18aSojGba/nHMj5PeJbIN9Pi +jmh0WMLD/0rmkTTxR7qQ5+kMV4O29xY4qjdYRD5O0adeZX0mNncmlmQ+rX9yxrtSgFROu1jwVtfP +hcNetifTTshJnTwND8hux5ECEadlIVBHypW28Hth9TRBXmddTmv7L7mdtUO6DybgkpWpw4k4LPsk +uZ6aY4wcGRp7EVfWGr9NHbq/n+0EAOlhDXIGdylkQsndjBMyhPsXZa5fFBmOyHjXj733195Jgr1v +ZjaIomrA9cvYrmN75oKrG1jJsMEl6HfC/ZPzEj6E51/p1PRdHP7CdUUA+DG8x4M3jn+e43psVuAR +a1XbN+8/bOa0ubt7ljVPjAEvWRSvU9dRaQz93w3fduAuM07dBAD/ayK3e0d6JMJMrU50lNOXQBgL +rFbg4rWzPO9BJQdhjOhmOZQiUa1Q+EV+s95yIUg1OAfaMP9KRIljr5RCdGNS6WoMNBAQOSrZpelf +jW4NpzphNfWDGVkUoPoskVtJz/nu9d860dGd3Al0kSmtUpMu5QKlo+sSxXUPbWLUn8V9/wP/ScCW +H+0gtL4R7SFazPeTIP+Cu5oR7A/DlFVLJKa3vo+atkhSvwxHGbg04vb/W4mKhGGVtMBtlhRmaWOe +PhUulU5FdaYsdlpN/Yd+hhgU6NHlyImPGVEHWD8c6CG8qoZfpR33j2sqshs4i/MtJZeBvl62vxPn +9bDN7KAjFNll9axAjIkCPgQYAQIACQUCVduQkQIbLgEpCRDrgyxn616JV8BdIAQZAQIABgUCVduQ +kQAKCRArYtevdF38xtzgB/4zVzozBpVOnagRkA7FDsHo36xX60Lik+ew0m28ueDDhnV3bXQsCvn/ +6wiCVWqLOTDeYCPlyTTpEMyk8zwdCICW6MgSkVHWcEDOrRqIrqm86rirjTGjJSgQe3l4CqJvkn6j +ybShYoBk1OZZV6vVv9hPTXXv9E6dLKoEW5YZBrrF+VC0w1iOIvaAQ+QXph20eV4KBIrp/bhG6Pdn +igKxuBZ79cdqDnXIzT9UiIa6LYpR0rbeg+7BmuZTTPS8t+41hIiKS+UZFdKa67eYENtyOmEMWOFC +LLRJGxkleukchiMJ70rknloZXsvJIweXBzSZ6m7mJQBgaig/L/dXyjv6+j2pNB4H/1trYUtJjXQK +HmqlgCmpCkHt3g7JoxWvglnDNmE6q3hIWuVIYQpnzZy1g05+X9Egwc1WVpBB02H7PkUZTfpaP/L6 +DLneMmSKPhZE3I+lPIPjwrxqh6xy5uQezcWkJTNKvPWF4FJzrVvx7XTPjfGvOB0UPEnjvtZTp5yO +hTeZK7DgIEtb/Wcrqs+iRArQKboM930ORSZhwvGK3F9V/gMDpIrvge5vDFsTEYQdw/2epIewH0L/ +FUb/6jBRcVEpGo9Ayg+Jnhq14GOGcd1y9oMZ48kYVLVBTA9tQ+82WE8Bch7uFPj4MFOMVRn1dc3q +dXlg3mimA+iK7tABQfG0RJ9YzWs=` + + TestPrivKey3 = `lQOXBFXbkiMBCACiHW4/VI2JkfvSEINddS7vE6wEu5e1leNQDaLUh6PrATQZS2a4Q6kRE6WlJumj +6wCeN753Cm93UGQl2Bi3USIEeArIZnPTcocrckOVXxtoLBNKXgqKvEsDXgfw8A+doSfXoDm/3Js4 +Wy3WsYKNR9LaPuJZHnpjsFAJhvRVyhH4UFD+1RTSSefq1mozPfDdMoZeZNEpfhwt3DuTJs7RqcTH +CgR2CqhEHnOOE5jJUljHKYLCglE2+8dth1bZlQi4xly/VHZzP3Bn7wKeolK/ROP6VZz/e0xq/BKy +resmxvlBWZ1zWwqGIrV9b0uwYvGrh2hOd5C5+5oGaA2MGcjxwaLBABEBAAEAB/dQbElFIa0VklZa +39ZLhtbBxACSWH3ql3EtRZaB2Mh4zSALbFyJDQfScOy8AZHmv66Ozxit9X9WsYr9OzcHujgl/2da +A3lybF6iLw1YDNaL11G6kuyn5sFP6lYGMRGOIWSik9oSVF6slo8m8ujRLdBsdMXVcElHKzCJiWmt +JZHEnUkl9X96fIPajMBfWjHHwcaeMOc77nvjwqy5wC4EY8TSVYzxeZHL7DADQ0EHBcThlmfizpCq +26LMVb6ju8STH7uDDFyKmhr/hC2vOkt+PKsvBCmW8/ESanO1zKPD9cvSsOWr2rZWNnkDRftqzOU5 +OCrI+3o9E74+toNb07bPntEEAMEStOzSvqZ6NKdh7EZYPA4mkkFC+EiHYIoinP1sd9V8O2Hq+dzx +yFHtWu0LmP6uWXk45vsP9y1UMJcEa33ew5JJa7zgucI772/BNvd/Oys/PqwIAl6uNIY8uYLgmn4L +1IPatp7vDiXzZSivPZd4yN4S4zCypZp9cnpO3qv8q7CtBADW87IA0TabdoxiN+m4XL7sYDRIfglr +MRPAlfrkAUaGDBx/t1xb6IaKk7giFdwHpTI6+g9XNkqKqogMe4Fp+nsd1xtfsNUBn6iKZavm5kXe +Lp9QgE+K6mvIreOTe2PKQqXqgPRG6+SRGatoKeY76fIpd8AxOJyWERxcq2lUHLn45QP/UXDTcYB7 +gzJtZrfpXN0GqQ0lYXMzbQfLnkUsu3mYzArfNy0otzEmKTkwmKclNY1/EJSzSdHfgmeA260a0nLK +64C0wPgSmOqw90qwi5odAYSjSFBapDbyGF86JpHrLxyEEpGoXanRPwWfbiWp19Nwg6nknA87AtaM +3+AHjbWzwCpHL7QQVmF1bHQgVGVzdCBLZXkgM4kBOAQTAQIAIgUCVduSIwIbLwYLCQgHAwIGFQgC +CQoLBBYCAwECHgECF4AACgkQ9HlLVvwtxt1aMQf/aaGoL1rRWTUjM6DEShXFhWpV29rEjSdNk5N+ +ZwVifgdCVD5IsSjI1Z7mO2SHHiTm4eKnHAofM6/TZgzXg1YLpu8rDYJARMsM8bgK/xgxSamGjm2c +wN220jOnwePIlG0drNTW5N6zb/K6qHoscJ6NUkjS5JPdGJuq7B0bdCM8/xSbG75gL34U5bYqK38B +DwmW4UMl2rf/BJfxV9hmsZ2Cat4TspgyiWEKTMZI+PugXKDDwuoqgm+320K4EqFkwG4y/WwHkKgk +hZ0+io5lzhTsvVd2p8q8VlH9GG5eA3WWQj0yqucsOmKQvcuT5y0vFY6NQJbyuioqgdlgEXtc+p0B ++Z0DmARV25IjAQgA49yN3hCBsuWoiTezoE9FHJXOCVOBR1/4jStQPJtoMl8mhtl3xTp7iGQ+9GhD +y0l5+fP+qcP/rfBq0BslhxVOZ7jQjdUoM6ZUZzJoPGIo/V2KwqpwQl3tdCIjvagCJeYQfTL7lTCc +4ySz+XBoAYMwZVGMcRcjp+JE8Wx9Ovzuq8wnelbU6I5dVJ7O4E1OWbIkLuytDX+fDEvfft6/oPXN +Bl3cm6FzEuQetQQss3DOG9xnvS+DrjmMCbPwR2a++ioQ8+geoqA/kB4cAI6xOb3ncoeGDHc1i4Y9 +T9Ggi+6Aq3girmfDtNYVOM8cZUXcZNCvLkJn8DNeIvnuFUSEO+a5PwARAQABAAf/TPd98CmRNdV/ +VUI8aYT9Kkervdi4DVzsfvrHcoFn88PSJrCkVTmI6qw526Kwa6VZD0YMmll7LszLt5nD1lorDrwN +rir3FmMzlVwge20IvXRwX4rkunYxtA2oFvL+LsEEhtXGx0ERbWRDapk+eGxQ15hxIO4Y/Cdg9E+a +CWfQUrTSnC6qMVfVYMGfnM1yNX3OWattEFfmxQas5XqQk/0FgjCZALixdanjN/r1tjp5/2MiSD8N +Wkemzsr6yPicnc3+BOZc5YOOnH8FqBvVHcDlSJI6pCOCEiO3Pq2QEk/1evONulbF116mLnQoGrpp +W77l+5O42VUpZfjROCPd5DYyMQQA492CFXZpDIJ2emB9/nK8X6IzdVRK3oof8btbSNnme5afIzhs +wR1ruX30O7ThfB+5ezpbgK1C988CWkr9SNSTy43omarafGig6/Y1RzdiITILuIGfbChoSpc70jXx +U0nzJ/1i9yZ/vDgP3EC2miRhlDcp5w0Bu0oMBlgG/1uhj0cEAP/+7aFGP0fo2MZPhyl5feHKWj4k +85XoAIpMBnzF6HTGU3ljAE56a+4sVw3bWB755DPhvpZvDkX60I9iIJxio8TK5ITdfjlLhxuskXyt +ycwWI/4J+soeq4meoxK9jxZJuDl/qvoGfyzNg1oy2OBehX8+6erW46kr6Z/MQutS3zJJBACmJHrK +VR40qD7a8KbvfuM3ruwlm5JqT/Ykq1gfKKxHjWDIUIeyBX/axGQvAGNYeuuQCzZ0+QsEWur3C4kN +U+Pb5K1WGyOKkhJzivSI56AG3d8TA/Q0JhqST6maY0fvUoahWSCcpd7MULa3n1zx5Wsvi8mkVtup +Js/IDi/kqneqM0XviQI+BBgBAgAJBQJV25IjAhsuASkJEPR5S1b8LcbdwF0gBBkBAgAGBQJV25Ij +AAoJEAUj/03Hcrkg84UIAKxn9nizYtwSgDnVNb5PnD5h6+Ui6r7ffYm2o0im4YhakbFTHIPI9PRh +BavRI5sE5Fg2vtE/x38jattoUrJoNoq9Gh9iv5PBfL3amEGjul0RRqYGl+ub+yv7YGAAHbHcdZen +4gx15VWGpB7y3hycWbdzV8h3EAPKIm5XmB7YyXmArnI3CoJA+HtTZGoL6WZWUwka9YichGfaZ/oD +umENg1l87Pp2RqvjLKHmv2tGCtnDzyv/IiWur9zopFQiCc8ysVgRq6CA5x5nzbv6MqRspYUS4e2I +LFbuREA3blR+caw9oX41IYzarW8IbgeIXJ3HqUyhczRKF/z5nDKtX/kHMCqlbAgAnfu0TALnwVuj +KeXLo4Y7OA9LTEqfORcw62q5OjSoQf/VsRSwGSefv3kGZk5N/igELluU3qpG/twZI/TSL6zGqXU2 +FOMlyMm1849TOB9b4B//4dHrjzPhztzowKMMUqeTxmSgYtFTshKN6eQ0XO+7ZuOXEmSKXS4kOUs9 +ttfzSiPNXUZL2D5nFU9H7rw3VAuXYVTrOx+Dfi6mYsscbxUbi8THODI2Q7B9Ni92DJE1OOe4+57o +fXZ9ln24I14bna/uVHd6hBwLEE6eLCCKkHxQnnZFZduXDHMK0a0OL8RYHfMtNSem4pyC5wDQui1u +KFIzGEPKVoBF9U7VBXpyxpsz+A==` + + TestPubKey1 = `mQENBFXbjPUBCADjNjCUQwfxKL+RR2GA6pv/1K+zJZ8UWIF9S0lk7cVIEfJiprzzwiMwBS5cD0da +rGin1FHvIWOZxujA7oW0O2TUuatqI3aAYDTfRYurh6iKLC+VS+F7H+/mhfFvKmgr0Y5kDCF1j0T/ +063QZ84IRGucR/X43IY7kAtmxGXH0dYOCzOe5UBX1fTn3mXGe2ImCDWBH7gOViynXmb6XNvXkP0f +sF5St9jhO7mbZU9EFkv9O3t3EaURfHopsCVDOlCkFCw5ArY+DUORHRzoMX0PnkyQb5OzibkChzpg +8hQssKeVGpuskTdz5Q7PtdW71jXd4fFVzoNH8fYwRpziD2xNvi6HABEBAAG0EFZhdWx0IFRlc3Qg +S2V5IDGJATgEEwECACIFAlXbjPUCGy8GCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEOfLr44B +HbeTo+sH/i7bapIgPnZsJ81hmxPj4W12uvunksGJiC7d4hIHsG7kmJRTJfjECi+AuTGeDwBy84TD +cRaOB6e79fj65Fg6HgSahDUtKJbGxj/lWzmaBuTzlN3CEe8cMwIPqPT2kajJVdOyrvkyuFOdPFOE +A7bdCH0MqgIdM2SdF8t40k/ATfuD2K1ZmumJ508I3gF39jgTnPzD4C8quswrMQ3bzfvKC3klXRlB +C0yoArn+0QA3cf2B9T4zJ2qnvgotVbeK/b1OJRNj6Poeo+SsWNc/A5mw7lGScnDgL3yfwCm1gQXa +QKfOt5x+7GqhWDw10q+bJpJlI10FfzAnhMF9etSqSeURBRW5AQ0EVduM9QEIAL53hJ5bZJ7oEDCn +aY+SCzt9QsAfnFTAnZJQrvkvusJzrTQ088eUQmAjvxkfRqnv981fFwGnh2+I1Ktm698UAZS9Jt8y +jak9wWUICKQO5QUt5k8cHwldQXNXVXFa+TpQWQR5yW1a9okjh5o/3d4cBt1yZPUJJyLKY43Wvptb +6EuEsScO2DnRkh5wSMDQ7dTooddJCmaq3LTjOleRFQbu9ij386Do6jzK69mJU56TfdcydkxkWF5N +ZLGnED3lq+hQNbe+8UI5tD2oP/3r5tXKgMy1R/XPvR/zbfwvx4FAKFOP01awLq4P3d/2xOkMu4Lu +9p315E87DOleYwxk+FoTqXEAEQEAAYkCPgQYAQIACQUCVduM9QIbLgEpCRDny6+OAR23k8BdIAQZ +AQIABgUCVduM9QAKCRAID0JGyHtSGmqYB/4m4rJbbWa7dBJ8VqRU7ZKnNRDR9CVhEGipBmpDGRYu +lEimOPzLUX/ZXZmTZzgemeXLBaJJlWnopVUWuAsyjQuZAfdd8nHkGRHG0/DGum0l4sKTta3OPGHN +C1z1dAcQ1RCr9bTD3PxjLBczdGqhzw71trkQRBRdtPiUchltPMIyjUHqVJ0xmg0hPqFic0fICsr0 +YwKoz3h9+QEcZHvsjSZjgydKvfLYcm+4DDMCCqcHuJrbXJKUWmJcXR0y/+HQONGrGJ5xWdO+6eJi +oPn2jVMnXCm4EKc7fcLFrz/LKmJ8seXhxjM3EdFtylBGCrx3xdK0f+JDNQaC/rhUb5V2XuX6VwoH +/AtY+XsKVYRfNIupLOUcf/srsm3IXT4SXWVomOc9hjGQiJ3rraIbADsc+6bCAr4XNZS7moViAAcI +PXFv3m3WfUlnG/om78UjQqyVACRZqqAGmuPq+TSkRUCpt9h+A39LQWkojHqyob3cyLgy6z9Q557O +9uK3lQozbw2gH9zC0RqnePl+rsWIUU/ga16fH6pWc1uJiEBt8UZGypQ/E56/343epmYAe0a87sHx +8iDV+dNtDVKfPRENiLOOc19MmS+phmUyrbHqI91c0pmysYcJZCD3a502X1gpjFbPZcRtiTmGnUKd +OIu60YPNE4+h7u2CfYyFPu3AlUaGNMBlvy6PEpU=` + + TestPubKey2 = `mQENBFXbkJEBCADKb1ZvlT14XrJa2rTOe5924LQr2PTZlRv+651TXy33yEhelZ+V4sMrELN8fKEG +Zy1kNixmbq3MCF/671k3LigHA7VrOaH9iiQgr6IIq2MeIkUYKZ27C992vQkYLjbYUG8+zl5h69S4 +0Ixm0yL0M54XOJ0gm+maEK1ZESKTUlDNkIS7l0jLZSYwfUeGXSEt6FWs8OgbyRTaHw4PDHrDEE9e +Q67K6IZ3YMhPOL4fVk4Jwrp5R/RwiklT+lNozWEyFVwPFH4MeQMs9nMbt+fWlTzEA7tI4acI9yDk +Cm1yN2R9rmY0UjODRiJw6z6sLV2T+Pf32n3MNSUOYczOjZa4VBwjABEBAAG0EFZhdWx0IFRlc3Qg +S2V5IDKJATgEEwECACIFAlXbkJECGy8GCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEOuDLGfr +XolXqz4H/28IuoRxGKoJ064YHjPkkpoddW6zdzzNfHipZnNfEUiTEls4qF1IB81M2xqfiXIFRIdO +2kaLkRPFhO0hRxbtI6VuZYLgG3QCaXhxW6GyFa5zKABqhb5ojexdnAYRswaHV201ZCclj9rnJN1P +Ag0Rz6MdX/w1euEWktQxWKo42oZKyx8oT9p6lrv5KRmGkdrg8K8ARmRILjmwuBAgJM0eXBZHNGWX +elk4YmOgnAAcZA6ZAo1G+8Pg6pKKP61ETewuCg3/u7N0vDttB+ZXqF88W9jAYlvdgbTtajNF5IDY +DjTzWfeCaIB18F9gOzXq15SwWeDDI+CU9Nmq358IzXlxk4e5AQ0EVduQkQEIAOjZV5tbpfIh5Qef +pIp2dpGMVfpgPj4RNc15CyFnb8y6dhCrdybkY9GveXJe4F3GNYnSfB42cgxrfhizX3LakmZQ/SAg ++YO5KxfCIN7Q9LPNeTgPsZZT6h8lVuXUxOFKXfRaR3/tGF5xE3e5CoZRsHV/c92h3t1LdJNOnC5m +UKIPO4zDxiw/C2T2q3rP1kmIMaOH724kEH5A+xcp1cBHyt0tdHtIWuQv6joTJzujqViRhlCwQYzQ +SKpSBxwhBsorPvyinZI/ZXA4XXZc5RoMqV9rikedrb1rENO8JOuPu6tMS+znFu67skq2gFFZwCQW +IjdHm+2ukE+PE580WAWudyMAEQEAAYkCPgQYAQIACQUCVduQkQIbLgEpCRDrgyxn616JV8BdIAQZ +AQIABgUCVduQkQAKCRArYtevdF38xtzgB/4zVzozBpVOnagRkA7FDsHo36xX60Lik+ew0m28ueDD +hnV3bXQsCvn/6wiCVWqLOTDeYCPlyTTpEMyk8zwdCICW6MgSkVHWcEDOrRqIrqm86rirjTGjJSgQ +e3l4CqJvkn6jybShYoBk1OZZV6vVv9hPTXXv9E6dLKoEW5YZBrrF+VC0w1iOIvaAQ+QXph20eV4K +BIrp/bhG6PdnigKxuBZ79cdqDnXIzT9UiIa6LYpR0rbeg+7BmuZTTPS8t+41hIiKS+UZFdKa67eY +ENtyOmEMWOFCLLRJGxkleukchiMJ70rknloZXsvJIweXBzSZ6m7mJQBgaig/L/dXyjv6+j2pNB4H +/1trYUtJjXQKHmqlgCmpCkHt3g7JoxWvglnDNmE6q3hIWuVIYQpnzZy1g05+X9Egwc1WVpBB02H7 +PkUZTfpaP/L6DLneMmSKPhZE3I+lPIPjwrxqh6xy5uQezcWkJTNKvPWF4FJzrVvx7XTPjfGvOB0U +PEnjvtZTp5yOhTeZK7DgIEtb/Wcrqs+iRArQKboM930ORSZhwvGK3F9V/gMDpIrvge5vDFsTEYQd +w/2epIewH0L/FUb/6jBRcVEpGo9Ayg+Jnhq14GOGcd1y9oMZ48kYVLVBTA9tQ+82WE8Bch7uFPj4 +MFOMVRn1dc3qdXlg3mimA+iK7tABQfG0RJ9YzWs=` + + TestPubKey3 = `mQENBFXbkiMBCACiHW4/VI2JkfvSEINddS7vE6wEu5e1leNQDaLUh6PrATQZS2a4Q6kRE6WlJumj +6wCeN753Cm93UGQl2Bi3USIEeArIZnPTcocrckOVXxtoLBNKXgqKvEsDXgfw8A+doSfXoDm/3Js4 +Wy3WsYKNR9LaPuJZHnpjsFAJhvRVyhH4UFD+1RTSSefq1mozPfDdMoZeZNEpfhwt3DuTJs7RqcTH +CgR2CqhEHnOOE5jJUljHKYLCglE2+8dth1bZlQi4xly/VHZzP3Bn7wKeolK/ROP6VZz/e0xq/BKy +resmxvlBWZ1zWwqGIrV9b0uwYvGrh2hOd5C5+5oGaA2MGcjxwaLBABEBAAG0EFZhdWx0IFRlc3Qg +S2V5IDOJATgEEwECACIFAlXbkiMCGy8GCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEPR5S1b8 +LcbdWjEH/2mhqC9a0Vk1IzOgxEoVxYVqVdvaxI0nTZOTfmcFYn4HQlQ+SLEoyNWe5jtkhx4k5uHi +pxwKHzOv02YM14NWC6bvKw2CQETLDPG4Cv8YMUmpho5tnMDdttIzp8HjyJRtHazU1uTes2/yuqh6 +LHCejVJI0uST3RibquwdG3QjPP8Umxu+YC9+FOW2Kit/AQ8JluFDJdq3/wSX8VfYZrGdgmreE7KY +MolhCkzGSPj7oFygw8LqKoJvt9tCuBKhZMBuMv1sB5CoJIWdPoqOZc4U7L1XdqfKvFZR/RhuXgN1 +lkI9MqrnLDpikL3Lk+ctLxWOjUCW8roqKoHZYBF7XPqdAfm5AQ0EVduSIwEIAOPcjd4QgbLlqIk3 +s6BPRRyVzglTgUdf+I0rUDybaDJfJobZd8U6e4hkPvRoQ8tJefnz/qnD/63watAbJYcVTme40I3V +KDOmVGcyaDxiKP1disKqcEJd7XQiI72oAiXmEH0y+5UwnOMks/lwaAGDMGVRjHEXI6fiRPFsfTr8 +7qvMJ3pW1OiOXVSezuBNTlmyJC7srQ1/nwxL337ev6D1zQZd3JuhcxLkHrUELLNwzhvcZ70vg645 +jAmz8EdmvvoqEPPoHqKgP5AeHACOsTm953KHhgx3NYuGPU/RoIvugKt4Iq5nw7TWFTjPHGVF3GTQ +ry5CZ/AzXiL57hVEhDvmuT8AEQEAAYkCPgQYAQIACQUCVduSIwIbLgEpCRD0eUtW/C3G3cBdIAQZ +AQIABgUCVduSIwAKCRAFI/9Nx3K5IPOFCACsZ/Z4s2LcEoA51TW+T5w+YevlIuq+332JtqNIpuGI +WpGxUxyDyPT0YQWr0SObBORYNr7RP8d/I2rbaFKyaDaKvRofYr+TwXy92phBo7pdEUamBpfrm/sr ++2BgAB2x3HWXp+IMdeVVhqQe8t4cnFm3c1fIdxADyiJuV5ge2Ml5gK5yNwqCQPh7U2RqC+lmVlMJ +GvWInIRn2mf6A7phDYNZfOz6dkar4yyh5r9rRgrZw88r/yIlrq/c6KRUIgnPMrFYEauggOceZ827 ++jKkbKWFEuHtiCxW7kRAN25UfnGsPaF+NSGM2q1vCG4HiFydx6lMoXM0Shf8+ZwyrV/5BzAqpWwI +AJ37tEwC58Fboynly6OGOzgPS0xKnzkXMOtquTo0qEH/1bEUsBknn795BmZOTf4oBC5blN6qRv7c +GSP00i+sxql1NhTjJcjJtfOPUzgfW+Af/+HR648z4c7c6MCjDFKnk8ZkoGLRU7ISjenkNFzvu2bj +lxJkil0uJDlLPbbX80ojzV1GS9g+ZxVPR+68N1QLl2FU6zsfg34upmLLHG8VG4vExzgyNkOwfTYv +dgyRNTjnuPue6H12fZZ9uCNeG52v7lR3eoQcCxBOniwgipB8UJ52RWXblwxzCtGtDi/EWB3zLTUn +puKcgucA0LotbihSMxhDylaARfVO1QV6csabM/g=` + + TestAAPubKey1 = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1 + +mQENBFXbjPUBCADjNjCUQwfxKL+RR2GA6pv/1K+zJZ8UWIF9S0lk7cVIEfJiprzz +wiMwBS5cD0darGin1FHvIWOZxujA7oW0O2TUuatqI3aAYDTfRYurh6iKLC+VS+F7 +H+/mhfFvKmgr0Y5kDCF1j0T/063QZ84IRGucR/X43IY7kAtmxGXH0dYOCzOe5UBX +1fTn3mXGe2ImCDWBH7gOViynXmb6XNvXkP0fsF5St9jhO7mbZU9EFkv9O3t3EaUR +fHopsCVDOlCkFCw5ArY+DUORHRzoMX0PnkyQb5OzibkChzpg8hQssKeVGpuskTdz +5Q7PtdW71jXd4fFVzoNH8fYwRpziD2xNvi6HABEBAAG0EFZhdWx0IFRlc3QgS2V5 +IDGJATgEEwECACIFAlXbjPUCGy8GCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJ +EOfLr44BHbeTo+sH/i7bapIgPnZsJ81hmxPj4W12uvunksGJiC7d4hIHsG7kmJRT +JfjECi+AuTGeDwBy84TDcRaOB6e79fj65Fg6HgSahDUtKJbGxj/lWzmaBuTzlN3C +Ee8cMwIPqPT2kajJVdOyrvkyuFOdPFOEA7bdCH0MqgIdM2SdF8t40k/ATfuD2K1Z +mumJ508I3gF39jgTnPzD4C8quswrMQ3bzfvKC3klXRlBC0yoArn+0QA3cf2B9T4z +J2qnvgotVbeK/b1OJRNj6Poeo+SsWNc/A5mw7lGScnDgL3yfwCm1gQXaQKfOt5x+ +7GqhWDw10q+bJpJlI10FfzAnhMF9etSqSeURBRW5AQ0EVduM9QEIAL53hJ5bZJ7o +EDCnaY+SCzt9QsAfnFTAnZJQrvkvusJzrTQ088eUQmAjvxkfRqnv981fFwGnh2+I +1Ktm698UAZS9Jt8yjak9wWUICKQO5QUt5k8cHwldQXNXVXFa+TpQWQR5yW1a9okj +h5o/3d4cBt1yZPUJJyLKY43Wvptb6EuEsScO2DnRkh5wSMDQ7dTooddJCmaq3LTj +OleRFQbu9ij386Do6jzK69mJU56TfdcydkxkWF5NZLGnED3lq+hQNbe+8UI5tD2o +P/3r5tXKgMy1R/XPvR/zbfwvx4FAKFOP01awLq4P3d/2xOkMu4Lu9p315E87DOle +Ywxk+FoTqXEAEQEAAYkCPgQYAQIACQUCVduM9QIbLgEpCRDny6+OAR23k8BdIAQZ +AQIABgUCVduM9QAKCRAID0JGyHtSGmqYB/4m4rJbbWa7dBJ8VqRU7ZKnNRDR9CVh +EGipBmpDGRYulEimOPzLUX/ZXZmTZzgemeXLBaJJlWnopVUWuAsyjQuZAfdd8nHk +GRHG0/DGum0l4sKTta3OPGHNC1z1dAcQ1RCr9bTD3PxjLBczdGqhzw71trkQRBRd +tPiUchltPMIyjUHqVJ0xmg0hPqFic0fICsr0YwKoz3h9+QEcZHvsjSZjgydKvfLY +cm+4DDMCCqcHuJrbXJKUWmJcXR0y/+HQONGrGJ5xWdO+6eJioPn2jVMnXCm4EKc7 +fcLFrz/LKmJ8seXhxjM3EdFtylBGCrx3xdK0f+JDNQaC/rhUb5V2XuX6VwoH/AtY ++XsKVYRfNIupLOUcf/srsm3IXT4SXWVomOc9hjGQiJ3rraIbADsc+6bCAr4XNZS7 +moViAAcIPXFv3m3WfUlnG/om78UjQqyVACRZqqAGmuPq+TSkRUCpt9h+A39LQWko +jHqyob3cyLgy6z9Q557O9uK3lQozbw2gH9zC0RqnePl+rsWIUU/ga16fH6pWc1uJ +iEBt8UZGypQ/E56/343epmYAe0a87sHx8iDV+dNtDVKfPRENiLOOc19MmS+phmUy +rbHqI91c0pmysYcJZCD3a502X1gpjFbPZcRtiTmGnUKdOIu60YPNE4+h7u2CfYyF +Pu3AlUaGNMBlvy6PEpU= +=NUTS +-----END PGP PUBLIC KEY BLOCK-----` +) diff --git a/helper/policies/policies.go b/helper/policies/policies.go new file mode 100644 index 0000000..2a34602 --- /dev/null +++ b/helper/policies/policies.go @@ -0,0 +1,60 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package policies + +import "sort" + +// ComparePolicies checks whether the given policy sets are equivalent, as in, +// they contain the same values. The benefit of this method is that it leaves +// the "default" policy out of its comparisons as it may be added later by core +// after a set of policies has been saved by a backend. +func EquivalentPolicies(a, b []string) bool { + if a == nil && b == nil { + return true + } + + if a == nil || b == nil { + return false + } + + // First we'll build maps to ensure unique values and filter default + mapA := map[string]bool{} + mapB := map[string]bool{} + for _, keyA := range a { + if keyA == "default" { + continue + } + mapA[keyA] = true + } + for _, keyB := range b { + if keyB == "default" { + continue + } + mapB[keyB] = true + } + + // Now we'll build our checking slices + var sortedA, sortedB []string + for keyA := range mapA { + sortedA = append(sortedA, keyA) + } + for keyB := range mapB { + sortedB = append(sortedB, keyB) + } + sort.Strings(sortedA) + sort.Strings(sortedB) + + // Finally, compare + if len(sortedA) != len(sortedB) { + return false + } + + for i := range sortedA { + if sortedA[i] != sortedB[i] { + return false + } + } + + return true +} diff --git a/helper/policies/policies_test.go b/helper/policies/policies_test.go new file mode 100644 index 0000000..6356dee --- /dev/null +++ b/helper/policies/policies_test.go @@ -0,0 +1,29 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package policies + +import "testing" + +func TestEquivalentPolicies(t *testing.T) { + a := []string{"foo", "bar"} + var b []string + if EquivalentPolicies(a, b) { + t.Fatal("bad") + } + + b = []string{"foo"} + if EquivalentPolicies(a, b) { + t.Fatal("bad") + } + + b = []string{"bar", "foo"} + if !EquivalentPolicies(a, b) { + t.Fatal("bad") + } + + b = []string{"foo", "default", "bar"} + if !EquivalentPolicies(a, b) { + t.Fatal("bad") + } +} diff --git a/helper/proxyutil/proxyutil.go b/helper/proxyutil/proxyutil.go new file mode 100644 index 0000000..b0f06d6 --- /dev/null +++ b/helper/proxyutil/proxyutil.go @@ -0,0 +1,83 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package proxyutil + +import ( + "errors" + "fmt" + "net" + "sync" + "time" + + "github.com/hashicorp/go-secure-stdlib/parseutil" + sockaddr "github.com/hashicorp/go-sockaddr" + proxyproto "github.com/pires/go-proxyproto" +) + +// ProxyProtoConfig contains configuration for the PROXY protocol +type ProxyProtoConfig struct { + sync.RWMutex + Behavior string + AuthorizedAddrs []*sockaddr.SockAddrMarshaler `json:"authorized_addrs"` +} + +func (p *ProxyProtoConfig) SetAuthorizedAddrs(addrs interface{}) error { + aa, err := parseutil.ParseAddrs(addrs) + if err != nil { + return err + } + + p.AuthorizedAddrs = aa + return nil +} + +// WrapInProxyProto wraps the given listener in the PROXY protocol. If behavior +// is "use_if_authorized" or "deny_if_unauthorized" it also configures a +// SourceCheck based on the given ProxyProtoConfig. In an error case it returns +// the original listener and the error. +func WrapInProxyProto(listener net.Listener, config *ProxyProtoConfig) (net.Listener, error) { + config.Lock() + defer config.Unlock() + + var newLn *proxyproto.Listener + + switch config.Behavior { + case "use_always": + newLn = &proxyproto.Listener{ + Listener: listener, + ReadHeaderTimeout: 10 * time.Second, + } + + case "allow_authorized", "deny_unauthorized": + newLn = &proxyproto.Listener{ + Listener: listener, + ReadHeaderTimeout: 10 * time.Second, + Policy: func(addr net.Addr) (proxyproto.Policy, error) { + config.RLock() + defer config.RUnlock() + + sa, err := sockaddr.NewSockAddr(addr.String()) + if err != nil { + return proxyproto.REJECT, fmt.Errorf("error parsing remote address: %w", err) + } + + for _, authorizedAddr := range config.AuthorizedAddrs { + if authorizedAddr.Contains(sa) { + return proxyproto.USE, nil + } + } + + if config.Behavior == "allow_authorized" { + return proxyproto.IGNORE, nil + } + + return proxyproto.REJECT, errors.New(`upstream connection not trusted proxy_protocol_behavior is "deny_unauthorized"`) + }, + } + default: + return listener, fmt.Errorf("unknown behavior type for proxy proto config") + } + + return newLn, nil +} diff --git a/helper/random/parser.go b/helper/random/parser.go new file mode 100644 index 0000000..c5e82c8 --- /dev/null +++ b/helper/random/parser.go @@ -0,0 +1,158 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package random + +import ( + "fmt" + "reflect" + "unicode/utf8" + + "github.com/hashicorp/hcl" + "github.com/mitchellh/mapstructure" +) + +// ParsePolicy is a convenience function for parsing HCL into a StringGenerator. +// See PolicyParser.ParsePolicy for details. +func ParsePolicy(raw string) (gen StringGenerator, err error) { + parser := PolicyParser{ + RuleRegistry: Registry{ + Rules: defaultRuleNameMapping, + }, + } + return parser.ParsePolicy(raw) +} + +// ParsePolicyBytes is a convenience function for parsing HCL into a StringGenerator. +// See PolicyParser.ParsePolicy for details. +func ParsePolicyBytes(raw []byte) (gen StringGenerator, err error) { + return ParsePolicy(string(raw)) +} + +// PolicyParser parses string generator configuration from HCL. +type PolicyParser struct { + // RuleRegistry maps rule names in HCL to Rule constructors. + RuleRegistry Registry +} + +// ParsePolicy parses the provided HCL into a StringGenerator. +func (p PolicyParser) ParsePolicy(raw string) (sg StringGenerator, err error) { + rawData := map[string]interface{}{} + err = hcl.Decode(&rawData, raw) + if err != nil { + return sg, fmt.Errorf("unable to decode: %w", err) + } + + // Decode the top level items + gen := StringGenerator{} + decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + Result: &gen, + DecodeHook: stringToRunesFunc, + }) + if err != nil { + return sg, fmt.Errorf("unable to decode configuration: %w", err) + } + + err = decoder.Decode(rawData) + if err != nil { + return sg, fmt.Errorf("failed to decode configuration: %w", err) + } + + // Decode & parse rules + rawRules, err := getMapSlice(rawData, "rule") + if err != nil { + return sg, fmt.Errorf("unable to retrieve rules: %w", err) + } + + rules, err := parseRules(p.RuleRegistry, rawRules) + if err != nil { + return sg, fmt.Errorf("unable to parse rules: %w", err) + } + + gen = StringGenerator{ + Length: gen.Length, + Rules: rules, + } + + err = gen.validateConfig() + if err != nil { + return sg, err + } + + return gen, nil +} + +func parseRules(registry Registry, rawRules []map[string]interface{}) (rules []Rule, err error) { + for _, rawRule := range rawRules { + info, err := getRuleInfo(rawRule) + if err != nil { + return nil, fmt.Errorf("unable to get rule info: %w", err) + } + + rule, err := registry.parseRule(info.ruleType, info.data) + if err != nil { + return nil, fmt.Errorf("unable to parse rule %s: %w", info.ruleType, err) + } + rules = append(rules, rule) + } + + return rules, nil +} + +// getMapSlice from the provided map. This will retrieve and type-assert a []map[string]interface{} from the map +// This will not error if the key does not exist +// This will return an error if the value at the provided key is not of type []map[string]interface{} +func getMapSlice(m map[string]interface{}, key string) (mapSlice []map[string]interface{}, err error) { + rawSlice, exists := m[key] + if !exists { + return nil, nil + } + + mapSlice = []map[string]interface{}{} + err = mapstructure.Decode(rawSlice, &mapSlice) + if err != nil { + return nil, err + } + return mapSlice, nil +} + +type ruleInfo struct { + ruleType string + data map[string]interface{} +} + +// getRuleInfo splits the provided HCL-decoded rule into its rule type along with the data associated with it +func getRuleInfo(rule map[string]interface{}) (data ruleInfo, err error) { + // There should only be one key, but it's a dynamic key yay! + for key := range rule { + slice, err := getMapSlice(rule, key) + if err != nil { + return data, fmt.Errorf("unable to get rule data: %w", err) + } + + if len(slice) == 0 { + return data, fmt.Errorf("rule info cannot be empty") + } + + data = ruleInfo{ + ruleType: key, + data: slice[0], + } + return data, nil + } + return data, fmt.Errorf("rule is empty") +} + +// stringToRunesFunc converts a string to a []rune for use in the mapstructure library +func stringToRunesFunc(from reflect.Kind, to reflect.Kind, data interface{}) (interface{}, error) { + if from != reflect.String || to != reflect.Slice { + return data, nil + } + + raw := data.(string) + + if !utf8.ValidString(raw) { + return nil, fmt.Errorf("invalid UTF8 string") + } + return []rune(raw), nil +} diff --git a/helper/random/parser_test.go b/helper/random/parser_test.go new file mode 100644 index 0000000..f8af5a5 --- /dev/null +++ b/helper/random/parser_test.go @@ -0,0 +1,590 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package random + +import ( + "encoding/json" + "reflect" + "testing" +) + +func TestParsePolicy(t *testing.T) { + type testCase struct { + rawConfig string + expected StringGenerator + expectErr bool + } + + tests := map[string]testCase{ + "unrecognized rule": { + rawConfig: ` + length = 20 + rule "testrule" { + string = "teststring" + int = 123 + }`, + expected: StringGenerator{}, + expectErr: true, + }, + + "charset restrictions": { + rawConfig: ` + length = 20 + rule "charset" { + charset = "abcde" + min-chars = 2 + }`, + expected: StringGenerator{ + Length: 20, + charset: []rune("abcde"), + Rules: []Rule{ + CharsetRule{ + Charset: []rune("abcde"), + MinChars: 2, + }, + }, + }, + expectErr: false, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + actual, err := ParsePolicy(test.rawConfig) + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + + if !reflect.DeepEqual(actual, test.expected) { + t.Fatalf("Actual: %#v\nExpected:%#v", actual, test.expected) + } + }) + } +} + +func TestParser_ParsePolicy(t *testing.T) { + type testCase struct { + registry map[string]ruleConstructor + + rawConfig string + expected StringGenerator + expectErr bool + } + + tests := map[string]testCase{ + "empty config": { + registry: defaultRuleNameMapping, + rawConfig: "", + expected: StringGenerator{}, + expectErr: true, + }, + "bogus config": { + registry: defaultRuleNameMapping, + rawConfig: "asdf", + expected: StringGenerator{}, + expectErr: true, + }, + "config with only length": { + registry: defaultRuleNameMapping, + rawConfig: ` + length = 20`, + expected: StringGenerator{}, + expectErr: true, + }, + "config with zero length": { + registry: defaultRuleNameMapping, + rawConfig: ` + length = 0 + rule "charset" { + charset = "abcde" + }`, + expected: StringGenerator{}, + expectErr: true, + }, + "config with negative length": { + registry: defaultRuleNameMapping, + rawConfig: ` + length = -2 + rule "charset" { + charset = "abcde" + }`, + expected: StringGenerator{}, + expectErr: true, + }, + "charset restrictions": { + registry: defaultRuleNameMapping, + rawConfig: ` + length = 20 + rule "charset" { + charset = "abcde" + min-chars = 2 + }`, + expected: StringGenerator{ + Length: 20, + charset: []rune("abcde"), + Rules: []Rule{ + CharsetRule{ + Charset: []rune("abcde"), + MinChars: 2, + }, + }, + }, + expectErr: false, + }, + "test rule": { + registry: map[string]ruleConstructor{ + "testrule": newTestRule, + }, + rawConfig: ` + length = 20 + rule "testrule" { + string = "teststring" + int = 123 + }`, + expected: StringGenerator{ + Length: 20, + charset: deduplicateRunes([]rune("teststring")), + Rules: []Rule{ + testCharsetRule{ + String: "teststring", + Integer: 123, + }, + }, + }, + expectErr: false, + }, + "test rule and charset restrictions": { + registry: map[string]ruleConstructor{ + "testrule": newTestRule, + "charset": ParseCharset, + }, + rawConfig: ` + length = 20 + rule "testrule" { + string = "teststring" + int = 123 + } + rule "charset" { + charset = "abcde" + min-chars = 2 + }`, + expected: StringGenerator{ + Length: 20, + charset: deduplicateRunes([]rune("abcdeteststring")), + Rules: []Rule{ + testCharsetRule{ + String: "teststring", + Integer: 123, + }, + CharsetRule{ + Charset: []rune("abcde"), + MinChars: 2, + }, + }, + }, + expectErr: false, + }, + "unrecognized rule": { + registry: defaultRuleNameMapping, + rawConfig: ` + length = 20 + rule "testrule" { + string = "teststring" + int = 123 + }`, + expected: StringGenerator{}, + expectErr: true, + }, + + // ///////////////////////////////////////////////// + // JSON data + "manually JSONified HCL": { + registry: map[string]ruleConstructor{ + "testrule": newTestRule, + "charset": ParseCharset, + }, + rawConfig: ` + { + "charset": "abcde", + "length": 20, + "rule": [ + { + "testrule": [ + { + "string": "teststring", + "int": 123 + } + ] + }, + { + "charset": [ + { + "charset": "abcde", + "min-chars": 2 + } + ] + } + ] + }`, + expected: StringGenerator{ + Length: 20, + charset: deduplicateRunes([]rune("abcdeteststring")), + Rules: []Rule{ + testCharsetRule{ + String: "teststring", + Integer: 123, + }, + CharsetRule{ + Charset: []rune("abcde"), + MinChars: 2, + }, + }, + }, + expectErr: false, + }, + "JSONified HCL": { + registry: map[string]ruleConstructor{ + "testrule": newTestRule, + "charset": ParseCharset, + }, + rawConfig: toJSON(t, StringGenerator{ + Length: 20, + Rules: []Rule{ + testCharsetRule{ + String: "teststring", + Integer: 123, + }, + CharsetRule{ + Charset: []rune("abcde"), + MinChars: 2, + }, + }, + }), + expected: StringGenerator{ + Length: 20, + charset: deduplicateRunes([]rune("abcdeteststring")), + Rules: []Rule{ + testCharsetRule{ + String: "teststring", + Integer: 123, + }, + CharsetRule{ + Charset: []rune("abcde"), + MinChars: 2, + }, + }, + }, + expectErr: false, + }, + "JSON unrecognized rule": { + registry: defaultRuleNameMapping, + rawConfig: ` + { + "charset": "abcde", + "length": 20, + "rule": [ + { + "testrule": [ + { + "string": "teststring", + "int": 123 + } + ], + } + ] + }`, + expected: StringGenerator{}, + expectErr: true, + }, + "config value with empty slice": { + registry: defaultRuleNameMapping, + rawConfig: ` + rule { + n = [] + }`, + expected: StringGenerator{}, + expectErr: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + parser := PolicyParser{ + RuleRegistry: Registry{ + Rules: test.registry, + }, + } + + actual, err := parser.ParsePolicy(test.rawConfig) + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + + if !reflect.DeepEqual(actual, test.expected) { + t.Fatalf("Actual: %#v\nExpected:%#v", actual, test.expected) + } + }) + } +} + +func TestParseRules(t *testing.T) { + type testCase struct { + registry map[string]ruleConstructor + + rawRules []map[string]interface{} + expectedRules []Rule + expectErr bool + } + + tests := map[string]testCase{ + "nil rule data": { + registry: defaultRuleNameMapping, + rawRules: nil, + expectedRules: nil, + expectErr: false, + }, + "empty rule data": { + registry: defaultRuleNameMapping, + rawRules: []map[string]interface{}{}, + expectedRules: nil, + expectErr: false, + }, + "invalid rule data": { + registry: defaultRuleNameMapping, + rawRules: []map[string]interface{}{ + { + "testrule": map[string]interface{}{ + "string": "teststring", + }, + }, + }, + expectedRules: nil, + expectErr: true, + }, + "unrecognized rule data": { + registry: defaultRuleNameMapping, + rawRules: []map[string]interface{}{ + { + "testrule": []map[string]interface{}{ + { + "string": "teststring", + "int": 123, + }, + }, + }, + }, + expectedRules: nil, + expectErr: true, + }, + "recognized rule": { + registry: map[string]ruleConstructor{ + "testrule": newTestRule, + }, + rawRules: []map[string]interface{}{ + { + "testrule": []map[string]interface{}{ + { + "string": "teststring", + "int": 123, + }, + }, + }, + }, + expectedRules: []Rule{ + testCharsetRule{ + String: "teststring", + Integer: 123, + }, + }, + expectErr: false, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + registry := Registry{ + Rules: test.registry, + } + + actualRules, err := parseRules(registry, test.rawRules) + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + + if !reflect.DeepEqual(actualRules, test.expectedRules) { + t.Fatalf("Actual: %#v\nExpected:%#v", actualRules, test.expectedRules) + } + }) + } +} + +func TestGetMapSlice(t *testing.T) { + type testCase struct { + input map[string]interface{} + key string + expectedSlice []map[string]interface{} + expectErr bool + } + + tests := map[string]testCase{ + "nil map": { + input: nil, + key: "testkey", + expectedSlice: nil, + expectErr: false, + }, + "empty map": { + input: map[string]interface{}{}, + key: "testkey", + expectedSlice: nil, + expectErr: false, + }, + "ignored keys": { + input: map[string]interface{}{ + "foo": "bar", + }, + key: "testkey", + expectedSlice: nil, + expectErr: false, + }, + "key has wrong type": { + input: map[string]interface{}{ + "foo": "bar", + }, + key: "foo", + expectedSlice: nil, + expectErr: true, + }, + "good data": { + input: map[string]interface{}{ + "foo": []map[string]interface{}{ + { + "sub-foo": "bar", + }, + }, + }, + key: "foo", + expectedSlice: []map[string]interface{}{ + { + "sub-foo": "bar", + }, + }, + expectErr: false, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + actualSlice, err := getMapSlice(test.input, test.key) + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + + if !reflect.DeepEqual(actualSlice, test.expectedSlice) { + t.Fatalf("Actual: %#v\nExpected:%#v", actualSlice, test.expectedSlice) + } + }) + } +} + +func TestGetRuleInfo(t *testing.T) { + type testCase struct { + rule map[string]interface{} + expectedInfo ruleInfo + expectErr bool + } + + tests := map[string]testCase{ + "nil rule": { + rule: nil, + expectedInfo: ruleInfo{}, + expectErr: true, + }, + "empty rule": { + rule: map[string]interface{}{}, + expectedInfo: ruleInfo{}, + expectErr: true, + }, + "rule with invalid type": { + rule: map[string]interface{}{ + "TestRuleType": "wrong type", + }, + expectedInfo: ruleInfo{}, + expectErr: true, + }, + "rule with good data": { + rule: map[string]interface{}{ + "TestRuleType": []map[string]interface{}{ + { + "foo": "bar", + }, + }, + }, + expectedInfo: ruleInfo{ + ruleType: "TestRuleType", + data: map[string]interface{}{ + "foo": "bar", + }, + }, + expectErr: false, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + actualInfo, err := getRuleInfo(test.rule) + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + + if !reflect.DeepEqual(actualInfo, test.expectedInfo) { + t.Fatalf("Actual: %#v\nExpected:%#v", actualInfo, test.expectedInfo) + } + }) + } +} + +func BenchmarkParser_Parse(b *testing.B) { + config := `length = 20 + rule "charset" { + charset = "abcde" + min-chars = 2 + }` + + for i := 0; i < b.N; i++ { + parser := PolicyParser{ + RuleRegistry: Registry{ + Rules: defaultRuleNameMapping, + }, + } + _, err := parser.ParsePolicy(config) + if err != nil { + b.Fatalf("Failed to parse: %s", err) + } + } +} + +func toJSON(t *testing.T, val interface{}) string { + t.Helper() + b, err := json.Marshal(val) + if err != nil { + t.Fatalf("unable to marshal to JSON: %s", err) + } + return string(b) +} diff --git a/helper/random/random_api.go b/helper/random/random_api.go new file mode 100644 index 0000000..5bb9316 --- /dev/null +++ b/helper/random/random_api.go @@ -0,0 +1,118 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package random + +import ( + "crypto/rand" + "encoding/base64" + "encoding/hex" + "fmt" + "io" + "strconv" + + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/xor" + "github.com/hashicorp/vault/sdk/logical" +) + +const APIMaxBytes = 128 * 1024 + +func HandleRandomAPI(d *framework.FieldData, additionalSource io.Reader) (*logical.Response, error) { + bytes := 0 + // Parsing is convoluted here, but allows operators to ACL both source and byte count + maybeUrlBytes := d.Raw["urlbytes"] + maybeSource := d.Raw["source"] + source := "platform" + var err error + if maybeSource == "" { + bytes = d.Get("bytes").(int) + } else if maybeUrlBytes == "" && isValidSource(maybeSource.(string)) { + source = maybeSource.(string) + bytes = d.Get("bytes").(int) + } else if maybeUrlBytes == "" { + bytes, err = strconv.Atoi(maybeSource.(string)) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("error parsing url-set byte count: %s", err)), nil + } + } else { + source = maybeSource.(string) + bytes, err = strconv.Atoi(maybeUrlBytes.(string)) + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("error parsing url-set byte count: %s", err)), nil + } + } + format := d.Get("format").(string) + + if bytes < 1 { + return logical.ErrorResponse(`"bytes" cannot be less than 1`), nil + } + + if bytes > APIMaxBytes { + return logical.ErrorResponse(`"bytes" should be less than %d`, APIMaxBytes), nil + } + + switch format { + case "hex": + case "base64": + default: + return logical.ErrorResponse("unsupported encoding format %q; must be \"hex\" or \"base64\"", format), nil + } + + var randBytes []byte + var warning string + switch source { + case "", "platform": + randBytes, err = uuid.GenerateRandomBytes(bytes) + if err != nil { + return nil, err + } + case "seal": + if rand.Reader == additionalSource { + warning = "no seal/entropy augmentation available, using platform entropy source" + } + randBytes, err = uuid.GenerateRandomBytesWithReader(bytes, additionalSource) + case "all": + randBytes, err = uuid.GenerateRandomBytes(bytes) + if err == nil && rand.Reader != additionalSource { + var sealBytes []byte + sealBytes, err = uuid.GenerateRandomBytesWithReader(bytes, additionalSource) + if err == nil { + randBytes, err = xor.XORBytes(sealBytes, randBytes) + } + } + default: + return logical.ErrorResponse("unsupported entropy source %q; must be \"platform\" or \"seal\", or \"all\"", source), nil + } + if err != nil { + return nil, err + } + + var retStr string + switch format { + case "hex": + retStr = hex.EncodeToString(randBytes) + case "base64": + retStr = base64.StdEncoding.EncodeToString(randBytes) + } + + // Generate the response + resp := &logical.Response{ + Data: map[string]interface{}{ + "random_bytes": retStr, + }, + } + if warning != "" { + resp.Warnings = []string{warning} + } + return resp, nil +} + +func isValidSource(s string) bool { + switch s { + case "", "platform", "seal", "all": + return true + } + return false +} diff --git a/helper/random/registry.go b/helper/random/registry.go new file mode 100644 index 0000000..334df73 --- /dev/null +++ b/helper/random/registry.go @@ -0,0 +1,38 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package random + +import ( + "fmt" +) + +type ruleConstructor func(map[string]interface{}) (Rule, error) + +var ( + // defaultRuleNameMapping is the default mapping of HCL rule names to the appropriate rule constructor. + // Add to this map when adding a new Rule type to be recognized in HCL. + defaultRuleNameMapping = map[string]ruleConstructor{ + "charset": ParseCharset, + } + + defaultRegistry = Registry{ + Rules: defaultRuleNameMapping, + } +) + +// Registry of HCL rule names to rule constructors. +type Registry struct { + // Rules maps names of rules to a constructor for the rule + Rules map[string]ruleConstructor +} + +func (r Registry) parseRule(ruleType string, ruleData map[string]interface{}) (rule Rule, err error) { + constructor, exists := r.Rules[ruleType] + if !exists { + return nil, fmt.Errorf("unrecognized rule type %s", ruleType) + } + + rule, err = constructor(ruleData) + return rule, err +} diff --git a/helper/random/registry_test.go b/helper/random/registry_test.go new file mode 100644 index 0000000..21297aa --- /dev/null +++ b/helper/random/registry_test.go @@ -0,0 +1,115 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package random + +import ( + "fmt" + "reflect" + "testing" + + "github.com/mitchellh/mapstructure" +) + +type testCharsetRule struct { + String string `mapstructure:"string" json:"string"` + Integer int `mapstructure:"int" json:"int"` + + // Default to passing + fail bool +} + +func newTestRule(data map[string]interface{}) (rule Rule, err error) { + tr := &testCharsetRule{} + err = mapstructure.Decode(data, tr) + if err != nil { + return nil, fmt.Errorf("unable to decode test rule") + } + return *tr, nil +} + +func (tr testCharsetRule) Pass([]rune) bool { return !tr.fail } +func (tr testCharsetRule) Type() string { return "testrule" } +func (tr testCharsetRule) Chars() []rune { return []rune(tr.String) } + +func TestParseRule(t *testing.T) { + type testCase struct { + rules map[string]ruleConstructor + + ruleType string + ruleData map[string]interface{} + + expectedRule Rule + expectErr bool + } + + tests := map[string]testCase{ + "missing rule": { + rules: map[string]ruleConstructor{}, + ruleType: "testrule", + ruleData: map[string]interface{}{ + "string": "teststring", + "int": 123, + }, + expectedRule: nil, + expectErr: true, + }, + "nil data": { + rules: map[string]ruleConstructor{ + "testrule": newTestRule, + }, + ruleType: "testrule", + ruleData: nil, + expectedRule: testCharsetRule{}, + expectErr: false, + }, + "good rule": { + rules: map[string]ruleConstructor{ + "testrule": newTestRule, + }, + ruleType: "testrule", + ruleData: map[string]interface{}{ + "string": "teststring", + "int": 123, + }, + expectedRule: testCharsetRule{ + String: "teststring", + Integer: 123, + }, + expectErr: false, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + reg := Registry{ + Rules: test.rules, + } + + actualRule, err := reg.parseRule(test.ruleType, test.ruleData) + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + + if !reflect.DeepEqual(actualRule, test.expectedRule) { + t.Fatalf("Actual: %#v\nExpected:%#v", actualRule, test.expectedRule) + } + }) + } +} + +// Ensure the mappings in the defaultRuleNameMapping are consistent between the keys +// in the map and the Type() calls on the Rule values +func TestDefaultRuleNameMapping(t *testing.T) { + for expectedType, constructor := range defaultRuleNameMapping { + // In this case, we don't care about the error since we're checking the types, not the contents + instance, _ := constructor(map[string]interface{}{}) + actualType := instance.Type() + if actualType != expectedType { + t.Fatalf("Default registry mismatched types: Actual: %s Expected: %s", actualType, expectedType) + } + } +} diff --git a/helper/random/rules.go b/helper/random/rules.go new file mode 100644 index 0000000..05cc800 --- /dev/null +++ b/helper/random/rules.go @@ -0,0 +1,94 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package random + +import ( + "fmt" + + "github.com/mitchellh/mapstructure" +) + +// Rule to assert on string values. +type Rule interface { + // Pass should return true if the provided value passes any assertions this Rule is making. + Pass(value []rune) bool + + // Type returns the name of the rule as associated in the registry + Type() string +} + +// CharsetRule requires a certain number of characters from the specified charset. +type CharsetRule struct { + // CharsetRule is the list of rules that candidate strings must contain a minimum number of. + Charset runes `mapstructure:"charset" json:"charset"` + + // MinChars indicates the minimum (inclusive) number of characters from the charset that should appear in the string. + MinChars int `mapstructure:"min-chars" json:"min-chars"` +} + +// ParseCharset from the provided data map. The data map is expected to be parsed from HCL. +func ParseCharset(data map[string]interface{}) (rule Rule, err error) { + cr := &CharsetRule{} + + decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + Metadata: nil, + Result: cr, + DecodeHook: stringToRunesFunc, + }) + if err != nil { + return nil, fmt.Errorf("unable to decode charset restriction: %w", err) + } + + err = decoder.Decode(data) + if err != nil { + return nil, fmt.Errorf("failed to parse charset restriction: %w", err) + } + + return *cr, nil +} + +func (c CharsetRule) Type() string { + return "charset" +} + +// Chars returns the charset that this rule is looking for. +func (c CharsetRule) Chars() []rune { + return c.Charset +} + +func (c CharsetRule) MinLength() int { + return c.MinChars +} + +// Pass returns true if the provided candidate string has a minimum number of chars in it. +// This adheres to the Rule interface +func (c CharsetRule) Pass(value []rune) bool { + if c.MinChars <= 0 { + return true + } + + count := 0 + for _, r := range value { + // charIn is sometimes faster than a map lookup because the data is so small + // This is being kept rather than converted to a map to keep the code cleaner, + // otherwise there would need to be additional parsing logic. + if charIn(r, c.Charset) { + count++ + if count >= c.MinChars { + return true + } + } + } + + return false +} + +func charIn(search rune, charset []rune) bool { + for _, r := range charset { + if search == r { + return true + } + } + return false +} diff --git a/helper/random/rules_test.go b/helper/random/rules_test.go new file mode 100644 index 0000000..e85df50 --- /dev/null +++ b/helper/random/rules_test.go @@ -0,0 +1,93 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package random + +import ( + "testing" +) + +func TestCharset(t *testing.T) { + type testCase struct { + charset string + minChars int + input string + expected bool + } + + tests := map[string]testCase{ + "0 minimum, empty input": { + charset: LowercaseCharset, + minChars: 0, + input: "", + expected: true, + }, + "0 minimum, many matching": { + charset: LowercaseCharset, + minChars: 0, + input: LowercaseCharset, + expected: true, + }, + "0 minimum, no matching": { + charset: LowercaseCharset, + minChars: 0, + input: "0123456789", + expected: true, + }, + "1 minimum, empty input": { + charset: LowercaseCharset, + minChars: 1, + input: "", + expected: false, + }, + "1 minimum, no matching": { + charset: LowercaseCharset, + minChars: 1, + input: "0123456789", + expected: false, + }, + "1 minimum, exactly 1 matching": { + charset: LowercaseCharset, + minChars: 1, + input: "a", + expected: true, + }, + "1 minimum, many matching": { + charset: LowercaseCharset, + minChars: 1, + input: "abcdefhaaaa", + expected: true, + }, + "2 minimum, 1 matching": { + charset: LowercaseCharset, + minChars: 2, + input: "f", + expected: false, + }, + "2 minimum, 2 matching": { + charset: LowercaseCharset, + minChars: 2, + input: "fz", + expected: true, + }, + "2 minimum, many matching": { + charset: LowercaseCharset, + minChars: 2, + input: "joixnbonxd", + expected: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + cr := CharsetRule{ + Charset: []rune(test.charset), + MinChars: test.minChars, + } + actual := cr.Pass([]rune(test.input)) + if actual != test.expected { + t.FailNow() + } + }) + } +} diff --git a/helper/random/serializing.go b/helper/random/serializing.go new file mode 100644 index 0000000..5b68d32 --- /dev/null +++ b/helper/random/serializing.go @@ -0,0 +1,91 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package random + +import ( + "encoding/json" + "fmt" + + "github.com/mitchellh/mapstructure" +) + +// serializableRules is a slice of rules that can be marshalled to JSON in an HCL format +type serializableRules []Rule + +// MarshalJSON in an HCL-friendly way +func (r serializableRules) MarshalJSON() (b []byte, err error) { + // Example: + // [ + // { + // "testrule": [ + // { + // "string": "teststring", + // "int": 123 + // } + // ] + // }, + // { + // "charset": [ + // { + // "charset": "abcde", + // "min-chars": 2 + // } + // ] + // } + // ] + data := []map[string][]map[string]interface{}{} // Totally not confusing at all + for _, rule := range r { + ruleData := map[string]interface{}{} + err = mapstructure.Decode(rule, &ruleData) + if err != nil { + return nil, fmt.Errorf("unable to decode rule: %w", err) + } + + ruleMap := map[string][]map[string]interface{}{ + rule.Type(): { + ruleData, + }, + } + data = append(data, ruleMap) + } + + b, err = json.Marshal(data) + return b, err +} + +func (r *serializableRules) UnmarshalJSON(data []byte) (err error) { + mapData := []map[string]interface{}{} + err = json.Unmarshal(data, &mapData) + if err != nil { + return err + } + rules, err := parseRules(defaultRegistry, mapData) + if err != nil { + return err + } + *r = rules + return nil +} + +type runes []rune + +func (r runes) Len() int { return len(r) } +func (r runes) Less(i, j int) bool { return r[i] < r[j] } +func (r runes) Swap(i, j int) { r[i], r[j] = r[j], r[i] } + +// MarshalJSON converts the runes to a string for smaller JSON and easier readability +func (r runes) MarshalJSON() (b []byte, err error) { + return json.Marshal(string(r)) +} + +// UnmarshalJSON converts a string to []rune +func (r *runes) UnmarshalJSON(data []byte) (err error) { + var str string + err = json.Unmarshal(data, &str) + if err != nil { + return err + } + *r = []rune(str) + return nil +} diff --git a/helper/random/serializing_test.go b/helper/random/serializing_test.go new file mode 100644 index 0000000..bfa17ae --- /dev/null +++ b/helper/random/serializing_test.go @@ -0,0 +1,61 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package random + +import ( + "encoding/json" + "reflect" + "testing" +) + +func TestJSONMarshalling(t *testing.T) { + expected := serializableRules{ + CharsetRule{ + Charset: LowercaseRuneset, + MinChars: 1, + }, + CharsetRule{ + Charset: UppercaseRuneset, + MinChars: 1, + }, + CharsetRule{ + Charset: NumericRuneset, + MinChars: 1, + }, + CharsetRule{ + Charset: ShortSymbolRuneset, + MinChars: 1, + }, + } + + marshalled, err := json.Marshal(expected) + if err != nil { + t.Fatalf("no error expected, got: %s", err) + } + + actual := serializableRules{} + err = json.Unmarshal(marshalled, &actual) + if err != nil { + t.Fatalf("no error expected, got: %s", err) + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("Actual: %#v\nExpected: %#v", actual, expected) + } +} + +func TestRunes_UnmarshalJSON(t *testing.T) { + data := []byte(`"noaw8hgfsdjlkfsj3"`) + + expected := runes([]rune("noaw8hgfsdjlkfsj3")) + actual := runes{} + err := (&actual).UnmarshalJSON(data) + if err != nil { + t.Fatalf("no error expected, got: %s", err) + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("Actual: %#v\nExpected: %#v", actual, expected) + } +} diff --git a/helper/random/string_generator.go b/helper/random/string_generator.go new file mode 100644 index 0000000..48f08ed --- /dev/null +++ b/helper/random/string_generator.go @@ -0,0 +1,312 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package random + +import ( + "context" + "crypto/rand" + "fmt" + "io" + "math" + "sort" + "sync" + "time" + "unicode" + + "github.com/hashicorp/go-multierror" +) + +var ( + LowercaseCharset = sortCharset("abcdefghijklmnopqrstuvwxyz") + UppercaseCharset = sortCharset("ABCDEFGHIJKLMNOPQRSTUVWXYZ") + NumericCharset = sortCharset("0123456789") + FullSymbolCharset = sortCharset("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") + ShortSymbolCharset = sortCharset("-") + + AlphabeticCharset = sortCharset(UppercaseCharset + LowercaseCharset) + AlphaNumericCharset = sortCharset(AlphabeticCharset + NumericCharset) + AlphaNumericShortSymbolCharset = sortCharset(AlphaNumericCharset + ShortSymbolCharset) + AlphaNumericFullSymbolCharset = sortCharset(AlphaNumericCharset + FullSymbolCharset) + + LowercaseRuneset = []rune(LowercaseCharset) + UppercaseRuneset = []rune(UppercaseCharset) + NumericRuneset = []rune(NumericCharset) + FullSymbolRuneset = []rune(FullSymbolCharset) + ShortSymbolRuneset = []rune(ShortSymbolCharset) + + AlphabeticRuneset = []rune(AlphabeticCharset) + AlphaNumericRuneset = []rune(AlphaNumericCharset) + AlphaNumericShortSymbolRuneset = []rune(AlphaNumericShortSymbolCharset) + AlphaNumericFullSymbolRuneset = []rune(AlphaNumericFullSymbolCharset) + + // DefaultStringGenerator has reasonable default rules for generating strings + DefaultStringGenerator = &StringGenerator{ + Length: 20, + Rules: []Rule{ + CharsetRule{ + Charset: LowercaseRuneset, + MinChars: 1, + }, + CharsetRule{ + Charset: UppercaseRuneset, + MinChars: 1, + }, + CharsetRule{ + Charset: NumericRuneset, + MinChars: 1, + }, + CharsetRule{ + Charset: ShortSymbolRuneset, + MinChars: 1, + }, + }, + } +) + +func sortCharset(chars string) string { + r := runes(chars) + sort.Sort(r) + return string(r) +} + +// StringGenerator generates random strings from the provided charset & adhering to a set of rules. The set of rules +// are things like CharsetRule which requires a certain number of characters from a sub-charset. +type StringGenerator struct { + // Length of the string to generate. + Length int `mapstructure:"length" json:"length"` + + // Rules the generated strings must adhere to. + Rules serializableRules `mapstructure:"-" json:"rule"` // This is "rule" in JSON so it matches the HCL property type + + // CharsetRule to choose runes from. This is computed from the rules, not directly configurable + charset runes + charsetLock sync.RWMutex +} + +// Generate a random string from the charset and adhering to the provided rules. +// The io.Reader is optional. If not provided, it will default to the reader from crypto/rand +func (g *StringGenerator) Generate(ctx context.Context, rng io.Reader) (str string, err error) { + if _, hasTimeout := ctx.Deadline(); !hasTimeout { + var cancel func() + ctx, cancel = context.WithTimeout(ctx, 1*time.Second) // Ensure there's a timeout on the context + defer cancel() + } + + // Ensure the generator is configured well since it may be manually created rather than parsed from HCL + err = g.validateConfig() + if err != nil { + return "", err + } + +LOOP: + for { + select { + case <-ctx.Done(): + return "", fmt.Errorf("timed out generating string") + default: + str, err = g.generate(rng) + if err != nil { + return "", err + } + if str == "" { + continue LOOP + } + return str, err + } + } +} + +func (g *StringGenerator) generate(rng io.Reader) (str string, err error) { + // If performance improvements need to be made, this can be changed to read a batch of + // potential strings at once rather than one at a time. This will significantly + // improve performance, but at the cost of added complexity. + g.charsetLock.RLock() + charset := g.charset + g.charsetLock.RUnlock() + candidate, err := randomRunes(rng, charset, g.Length) + if err != nil { + return "", fmt.Errorf("unable to generate random characters: %w", err) + } + + for _, rule := range g.Rules { + if !rule.Pass(candidate) { + return "", nil + } + } + + // Passed all rules + return string(candidate), nil +} + +const ( + // maxCharsetLen is the maximum length a charset is allowed to be when generating a candidate string. + // This is the total number of numbers available for selecting an index out of the charset slice. + maxCharsetLen = 256 +) + +// randomRunes creates a random string based on the provided charset. The charset is limited to 255 characters, but +// could be expanded if needed. Expanding the maximum charset size will decrease performance because it will need to +// combine bytes into a larger integer using binary.BigEndian.Uint16() function. +func randomRunes(rng io.Reader, charset []rune, length int) (candidate []rune, err error) { + if len(charset) == 0 { + return nil, fmt.Errorf("no charset specified") + } + if len(charset) > maxCharsetLen { + return nil, fmt.Errorf("charset is too long: limited to %d characters", math.MaxUint8) + } + if length <= 0 { + return nil, fmt.Errorf("unable to generate a zero or negative length runeset") + } + + // This can't always select indexes from [0-maxCharsetLen) because it could introduce bias to the character selection. + // For instance, if the length of the charset is [a-zA-Z0-9-] (length of 63): + // RNG ranges: [0-62][63-125][126-188][189-251] will equally select from the entirety of the charset. However, + // the RNG values [252-255] will select the first 4 characters of the charset while ignoring the remaining 59. + // This results in a bias towards the front of the charset. + // + // To avoid this, we determine the largest integer multiplier of the charset length that is <= maxCharsetLen + // For instance, if the maxCharsetLen is 256 (the size of one byte) and the charset is length 63, the multiplier + // equals 4: + // 256/63 => 4.06 + // Trunc(4.06) => 4 + // Multiply by the charset length + // Subtract 1 to account for 0-based counting and you get the max index value: 251 + maxAllowedRNGValue := (maxCharsetLen/len(charset))*len(charset) - 1 + + // rngBufferMultiplier increases the size of the RNG buffer to account for lost + // indexes due to the maxAllowedRNGValue + rngBufferMultiplier := 1.0 + + // Don't set a multiplier if we are able to use the entire range of indexes + if maxAllowedRNGValue < maxCharsetLen { + // Anything more complicated than an arbitrary percentage appears to have little practical performance benefit + rngBufferMultiplier = 1.5 + } + + // Default to the standard crypto reader if one isn't provided + if rng == nil { + rng = rand.Reader + } + + charsetLen := byte(len(charset)) + + runes := make([]rune, 0, length) + + for len(runes) < length { + // Generate a bunch of indexes + data := make([]byte, int(float64(length)*rngBufferMultiplier)) + numBytes, err := rng.Read(data) + if err != nil { + return nil, err + } + + // Append characters until either we're out of indexes or the length is long enough + for i := 0; i < numBytes; i++ { + // Be careful to ensure that maxAllowedRNGValue isn't >= 256 as it will overflow and this + // comparison will prevent characters from being selected from the charset + if data[i] > byte(maxAllowedRNGValue) { + continue + } + + index := data[i] + if len(charset) != maxCharsetLen { + index = index % charsetLen + } + r := charset[index] + runes = append(runes, r) + + if len(runes) == length { + break + } + } + } + + return runes, nil +} + +// validateConfig of the generator to ensure that we can successfully generate a string. +func (g *StringGenerator) validateConfig() (err error) { + merr := &multierror.Error{} + + // Ensure the sum of minimum lengths in the rules doesn't exceed the length specified + minLen := getMinLength(g.Rules) + if g.Length <= 0 { + merr = multierror.Append(merr, fmt.Errorf("length must be > 0")) + } else if g.Length < minLen { + merr = multierror.Append(merr, fmt.Errorf("specified rules require at least %d characters but %d is specified", minLen, g.Length)) + } + + g.charsetLock.Lock() + defer g.charsetLock.Unlock() + // Ensure we have a charset & all characters are printable + if len(g.charset) == 0 { + // Yes this is mutating the generator but this is done so we don't have to compute this on every generation + g.charset = getChars(g.Rules) + } + if len(g.charset) == 0 { + merr = multierror.Append(merr, fmt.Errorf("no charset specified")) + } else { + for _, r := range g.charset { + if !unicode.IsPrint(r) { + merr = multierror.Append(merr, fmt.Errorf("non-printable character in charset")) + break + } + } + } + return merr.ErrorOrNil() +} + +// getMinLength from the rules using the optional interface: `MinLength() int` +func getMinLength(rules []Rule) (minLen int) { + type minLengthProvider interface { + MinLength() int + } + + for _, rule := range rules { + mlp, ok := rule.(minLengthProvider) + if !ok { + continue + } + minLen += mlp.MinLength() + } + return minLen +} + +// getChars from the rules using the optional interface: `Chars() []rune` +func getChars(rules []Rule) (chars []rune) { + type charsetProvider interface { + Chars() []rune + } + + for _, rule := range rules { + cp, ok := rule.(charsetProvider) + if !ok { + continue + } + chars = append(chars, cp.Chars()...) + } + return deduplicateRunes(chars) +} + +// deduplicateRunes returns a new slice of sorted & de-duplicated runes +func deduplicateRunes(original []rune) (deduped []rune) { + if len(original) == 0 { + return nil + } + + m := map[rune]bool{} + dedupedRunes := []rune(nil) + + for _, r := range original { + if m[r] { + continue + } + m[r] = true + dedupedRunes = append(dedupedRunes, r) + } + + // They don't have to be sorted, but this is being done to make the charset easier to visualize + sort.Sort(runes(dedupedRunes)) + return dedupedRunes +} diff --git a/helper/random/string_generator_test.go b/helper/random/string_generator_test.go new file mode 100644 index 0000000..8307ff7 --- /dev/null +++ b/helper/random/string_generator_test.go @@ -0,0 +1,832 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package random + +import ( + "context" + "crypto/rand" + "encoding/json" + "fmt" + "io" + "math" + MRAND "math/rand" + "reflect" + "sort" + "testing" + "time" +) + +func TestStringGenerator_Generate_successful(t *testing.T) { + type testCase struct { + timeout time.Duration + generator *StringGenerator + } + + tests := map[string]testCase{ + "common rules": { + timeout: 1 * time.Second, + generator: &StringGenerator{ + Length: 20, + Rules: []Rule{ + CharsetRule{ + Charset: LowercaseRuneset, + MinChars: 1, + }, + CharsetRule{ + Charset: UppercaseRuneset, + MinChars: 1, + }, + CharsetRule{ + Charset: NumericRuneset, + MinChars: 1, + }, + CharsetRule{ + Charset: ShortSymbolRuneset, + MinChars: 1, + }, + }, + charset: AlphaNumericShortSymbolRuneset, + }, + }, + "charset not explicitly specified": { + timeout: 1 * time.Second, + generator: &StringGenerator{ + Length: 20, + Rules: []Rule{ + CharsetRule{ + Charset: LowercaseRuneset, + MinChars: 1, + }, + CharsetRule{ + Charset: UppercaseRuneset, + MinChars: 1, + }, + CharsetRule{ + Charset: NumericRuneset, + MinChars: 1, + }, + }, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + // One context to rule them all, one context to find them, one context to bring them all and in the darkness bind them. + ctx, cancel := context.WithTimeout(context.Background(), test.timeout) + defer cancel() + + runeset := map[rune]bool{} + runesFound := []rune{} + + for i := 0; i < 100; i++ { + actual, err := test.generator.Generate(ctx, nil) + if err != nil { + t.Fatalf("no error expected, but got: %s", err) + } + for _, r := range actual { + if runeset[r] { + continue + } + runeset[r] = true + runesFound = append(runesFound, r) + } + } + + sort.Sort(runes(runesFound)) + + expectedCharset := getChars(test.generator.Rules) + + if !reflect.DeepEqual(runesFound, expectedCharset) { + t.Fatalf("Didn't find all characters from the charset\nActual : [%s]\nExpected: [%s]", string(runesFound), string(expectedCharset)) + } + }) + } +} + +func TestStringGenerator_Generate_errors(t *testing.T) { + type testCase struct { + timeout time.Duration + generator *StringGenerator + rng io.Reader + } + + tests := map[string]testCase{ + "already timed out": { + timeout: 0, + generator: &StringGenerator{ + Length: 20, + Rules: []Rule{ + testCharsetRule{ + fail: false, + }, + }, + charset: AlphaNumericShortSymbolRuneset, + }, + rng: rand.Reader, + }, + "impossible rules": { + timeout: 10 * time.Millisecond, // Keep this short so the test doesn't take too long + generator: &StringGenerator{ + Length: 20, + Rules: []Rule{ + testCharsetRule{ + fail: true, + }, + }, + charset: AlphaNumericShortSymbolRuneset, + }, + rng: rand.Reader, + }, + "bad RNG reader": { + timeout: 10 * time.Millisecond, // Keep this short so the test doesn't take too long + generator: &StringGenerator{ + Length: 20, + Rules: []Rule{}, + charset: AlphaNumericShortSymbolRuneset, + }, + rng: badReader{}, + }, + "0 length": { + timeout: 10 * time.Millisecond, + generator: &StringGenerator{ + Length: 0, + Rules: []Rule{ + CharsetRule{ + Charset: []rune("abcde"), + MinChars: 0, + }, + }, + charset: []rune("abcde"), + }, + rng: rand.Reader, + }, + "-1 length": { + timeout: 10 * time.Millisecond, + generator: &StringGenerator{ + Length: -1, + Rules: []Rule{ + CharsetRule{ + Charset: []rune("abcde"), + MinChars: 0, + }, + }, + charset: []rune("abcde"), + }, + rng: rand.Reader, + }, + "no charset": { + timeout: 10 * time.Millisecond, + generator: &StringGenerator{ + Length: 20, + Rules: []Rule{}, + }, + rng: rand.Reader, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + // One context to rule them all, one context to find them, one context to bring them all and in the darkness bind them. + ctx, cancel := context.WithTimeout(context.Background(), test.timeout) + defer cancel() + + actual, err := test.generator.Generate(ctx, test.rng) + if err == nil { + t.Fatalf("Expected error but none found") + } + if actual != "" { + t.Fatalf("Random string returned: %s", actual) + } + }) + } +} + +func TestRandomRunes_deterministic(t *testing.T) { + // These tests are to ensure that the charset selection doesn't do anything weird like selecting the same character + // over and over again. The number of test cases here should be kept to a minimum since they are sensitive to changes + type testCase struct { + rngSeed int64 + charset string + length int + expected string + } + + tests := map[string]testCase{ + "small charset": { + rngSeed: 1585593298447807000, + charset: "abcde", + length: 20, + expected: "ddddddcdebbeebdbdbcd", + }, + "common charset": { + rngSeed: 1585593298447807001, + charset: AlphaNumericShortSymbolCharset, + length: 20, + expected: "ON6lVjnBs84zJbUBVEzb", + }, + "max size charset": { + rngSeed: 1585593298447807002, + charset: " !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_" + + "`abcdefghijklmnopqrstuvwxyz{|}~ĀāĂ㥹ĆćĈĉĊċČčĎďĐđĒēĔĕĖėĘęĚěĜĝĞğĠ" + + "ġĢģĤĥĦħĨĩĪīĬĭĮįİıIJijĴĵĶķĸĹĺĻļĽľĿŀŁłŃńŅņŇňʼnŊŋŌōŎŏŐőŒœŔŕŖŗŘřŚśŜŝŞşŠ" + + "šŢţŤťŦŧŨũŪūŬŭŮůŰűŲųŴŵŶŷŸŹźŻżŽžſ℀℁ℂ℃℄℅℆ℇ℈℉ℊℋℌℍℎℏℐℑℒℓ℔ℕ№℗℘ℙℚℛℜℝ℞℟℠", + length: 20, + expected: "tųŎ℄ņ℃Œ.@řHš-ℍ}ħGIJLℏ", + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + rng := MRAND.New(MRAND.NewSource(test.rngSeed)) + runes, err := randomRunes(rng, []rune(test.charset), test.length) + if err != nil { + t.Fatalf("Expected no error, but found: %s", err) + } + + str := string(runes) + + if str != test.expected { + t.Fatalf("Actual: %s Expected: %s", str, test.expected) + } + }) + } +} + +func TestRandomRunes_successful(t *testing.T) { + type testCase struct { + charset []rune // Assumes no duplicate runes + length int + } + + tests := map[string]testCase{ + "small charset": { + charset: []rune("abcde"), + length: 20, + }, + "common charset": { + charset: AlphaNumericShortSymbolRuneset, + length: 20, + }, + "max size charset": { + charset: []rune( + " !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_" + + "`abcdefghijklmnopqrstuvwxyz{|}~ĀāĂ㥹ĆćĈĉĊċČčĎďĐđĒēĔĕĖėĘęĚěĜĝĞğĠ" + + "ġĢģĤĥĦħĨĩĪīĬĭĮįİıIJijĴĵĶķĸĹĺĻļĽľĿŀŁłŃńŅņŇňʼnŊŋŌōŎŏŐőŒœŔŕŖŗŘřŚśŜŝŞşŠ" + + "šŢţŤťŦŧŨũŪūŬŭŮůŰűŲųŴŵŶŷŸŹźŻżŽžſ℀℁ℂ℃℄℅℆ℇ℈℉ℊℋℌℍℎℏℐℑℒℓ℔ℕ№℗℘ℙℚℛℜℝ℞℟℠", + ), + length: 20, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + runeset := map[rune]bool{} + runesFound := []rune{} + + for i := 0; i < 10000; i++ { + actual, err := randomRunes(rand.Reader, test.charset, test.length) + if err != nil { + t.Fatalf("no error expected, but got: %s", err) + } + for _, r := range actual { + if runeset[r] { + continue + } + runeset[r] = true + runesFound = append(runesFound, r) + } + } + + sort.Sort(runes(runesFound)) + + // Sort the input too just to ensure that they can be compared + sort.Sort(runes(test.charset)) + + if !reflect.DeepEqual(runesFound, test.charset) { + t.Fatalf("Didn't find all characters from the charset\nActual : [%s]\nExpected: [%s]", string(runesFound), string(test.charset)) + } + }) + } +} + +func TestRandomRunes_errors(t *testing.T) { + type testCase struct { + charset []rune + length int + rng io.Reader + } + + tests := map[string]testCase{ + "nil charset": { + charset: nil, + length: 20, + rng: rand.Reader, + }, + "empty charset": { + charset: []rune{}, + length: 20, + rng: rand.Reader, + }, + "charset is too long": { + charset: []rune(" !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_" + + "`abcdefghijklmnopqrstuvwxyz{|}~ĀāĂ㥹ĆćĈĉĊċČčĎďĐđĒēĔĕĖėĘęĚěĜĝĞğĠ" + + "ġĢģĤĥĦħĨĩĪīĬĭĮįİıIJijĴĵĶķĸĹĺĻļĽľĿŀŁłŃńŅņŇňʼnŊŋŌōŎŏŐőŒœŔŕŖŗŘřŚśŜŝŞşŠ" + + "šŢţŤťŦŧŨũŪūŬŭŮůŰűŲųŴŵŶŷŸŹźŻżŽžſ℀℁ℂ℃℄℅℆ℇ℈℉ℊℋℌℍℎℏℐℑℒℓ℔ℕ№℗℘ℙℚℛℜℝ℞℟℠" + + "Σ", + ), + length: 20, + rng: rand.Reader, + }, + "length is zero": { + charset: []rune("abcde"), + length: 0, + rng: rand.Reader, + }, + "length is negative": { + charset: []rune("abcde"), + length: -3, + rng: rand.Reader, + }, + "reader failed": { + charset: []rune("abcde"), + length: 20, + rng: badReader{}, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + actual, err := randomRunes(test.rng, test.charset, test.length) + if err == nil { + t.Fatalf("Expected error but none found") + } + if actual != nil { + t.Fatalf("Expected no value, but found [%s]", string(actual)) + } + }) + } +} + +func BenchmarkStringGenerator_Generate(b *testing.B) { + lengths := []int{ + 8, 12, 16, 20, 24, 28, + } + + type testCase struct { + generator *StringGenerator + } + + benches := map[string]testCase{ + "no restrictions": { + generator: &StringGenerator{ + Rules: []Rule{ + CharsetRule{ + Charset: AlphaNumericFullSymbolRuneset, + }, + }, + }, + }, + "default generator": { + generator: DefaultStringGenerator, + }, + "large symbol set": { + generator: &StringGenerator{ + Rules: []Rule{ + CharsetRule{ + Charset: LowercaseRuneset, + MinChars: 1, + }, + CharsetRule{ + Charset: UppercaseRuneset, + MinChars: 1, + }, + CharsetRule{ + Charset: NumericRuneset, + MinChars: 1, + }, + CharsetRule{ + Charset: FullSymbolRuneset, + MinChars: 1, + }, + }, + }, + }, + "max symbol set": { + generator: &StringGenerator{ + Rules: []Rule{ + CharsetRule{ + Charset: []rune(" !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_" + + "`abcdefghijklmnopqrstuvwxyz{|}~ĀāĂ㥹ĆćĈĉĊċČčĎďĐđĒēĔĕĖėĘęĚěĜĝĞğĠ" + + "ġĢģĤĥĦħĨĩĪīĬĭĮįİıIJijĴĵĶķĸĹĺĻļĽľĿŀŁłŃńŅņŇňʼnŊŋŌōŎŏŐőŒœŔŕŖŗŘřŚśŜŝŞşŠ" + + "šŢţŤťŦŧŨũŪūŬŭŮůŰűŲųŴŵŶŷŸŹźŻżŽžſ℀℁ℂ℃℄℅℆ℇ℈℉ℊℋℌℍℎℏℐℑℒℓ℔ℕ№℗℘ℙℚℛℜℝ℞℟℠"), + }, + CharsetRule{ + Charset: LowercaseRuneset, + MinChars: 1, + }, + CharsetRule{ + Charset: UppercaseRuneset, + MinChars: 1, + }, + CharsetRule{ + Charset: []rune("ĩĪīĬĭĮįİıIJijĴĵĶķĸĹĺĻļĽľĿŀŁłŃńŅņŇňʼnŊŋŌōŎŏŐőŒ"), + MinChars: 1, + }, + }, + }, + }, + "restrictive charset rules": { + generator: &StringGenerator{ + Rules: []Rule{ + CharsetRule{ + Charset: AlphaNumericShortSymbolRuneset, + }, + CharsetRule{ + Charset: []rune("A"), + MinChars: 1, + }, + CharsetRule{ + Charset: []rune("1"), + MinChars: 1, + }, + CharsetRule{ + Charset: []rune("a"), + MinChars: 1, + }, + CharsetRule{ + Charset: []rune("-"), + MinChars: 1, + }, + }, + }, + }, + } + + for name, bench := range benches { + b.Run(name, func(b *testing.B) { + for _, length := range lengths { + bench.generator.Length = length + b.Run(fmt.Sprintf("length=%d", length), func(b *testing.B) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + str, err := bench.generator.Generate(ctx, nil) + if err != nil { + b.Fatalf("Failed to generate string: %s", err) + } + if str == "" { + b.Fatalf("Didn't error but didn't generate a string") + } + } + }) + } + }) + } + + // Mimic what the SQLCredentialsProducer is doing + b.Run("SQLCredentialsProducer", func(b *testing.B) { + sg := StringGenerator{ + Length: 16, // 16 because the SQLCredentialsProducer prepends 4 characters to a 20 character password + charset: AlphaNumericRuneset, + Rules: nil, + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + str, err := sg.Generate(ctx, nil) + if err != nil { + b.Fatalf("Failed to generate string: %s", err) + } + if str == "" { + b.Fatalf("Didn't error but didn't generate a string") + } + } + }) +} + +// Ensure the StringGenerator can be properly JSON-ified +func TestStringGenerator_JSON(t *testing.T) { + expected := StringGenerator{ + Length: 20, + charset: deduplicateRunes([]rune("teststring" + ShortSymbolCharset)), + Rules: []Rule{ + testCharsetRule{ + String: "teststring", + Integer: 123, + }, + CharsetRule{ + Charset: ShortSymbolRuneset, + MinChars: 1, + }, + }, + } + + b, err := json.Marshal(expected) + if err != nil { + t.Fatalf("Failed to marshal to JSON: %s", err) + } + + parser := PolicyParser{ + RuleRegistry: Registry{ + Rules: map[string]ruleConstructor{ + "testrule": newTestRule, + "charset": ParseCharset, + }, + }, + } + actual, err := parser.ParsePolicy(string(b)) + if err != nil { + t.Fatalf("Failed to parse JSON: %s", err) + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("Actual: %#v\nExpected: %#v", actual, expected) + } +} + +type badReader struct{} + +func (badReader) Read([]byte) (int, error) { + return 0, fmt.Errorf("test error") +} + +func TestValidate(t *testing.T) { + type testCase struct { + generator *StringGenerator + expectErr bool + } + + tests := map[string]testCase{ + "default generator": { + generator: DefaultStringGenerator, + expectErr: false, + }, + "length is 0": { + generator: &StringGenerator{ + Length: 0, + }, + expectErr: true, + }, + "length is negative": { + generator: &StringGenerator{ + Length: -2, + }, + expectErr: true, + }, + "nil charset, no rules": { + generator: &StringGenerator{ + Length: 5, + charset: nil, + }, + expectErr: true, + }, + "zero length charset, no rules": { + generator: &StringGenerator{ + Length: 5, + charset: []rune{}, + }, + expectErr: true, + }, + "rules require password longer than length": { + generator: &StringGenerator{ + Length: 5, + charset: []rune("abcde"), + Rules: []Rule{ + CharsetRule{ + Charset: []rune("abcde"), + MinChars: 6, + }, + }, + }, + expectErr: true, + }, + "charset has non-printable characters": { + generator: &StringGenerator{ + Length: 0, + charset: []rune{ + 'a', + 'b', + 0, // Null character + 'd', + 'e', + }, + }, + expectErr: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + err := test.generator.validateConfig() + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + }) + } +} + +type testNonCharsetRule struct { + String string `mapstructure:"string" json:"string"` +} + +func (tr testNonCharsetRule) Pass([]rune) bool { return true } +func (tr testNonCharsetRule) Type() string { return "testNonCharsetRule" } + +func TestGetChars(t *testing.T) { + type testCase struct { + rules []Rule + expected []rune + } + + tests := map[string]testCase{ + "nil rules": { + rules: nil, + expected: []rune(nil), + }, + "empty rules": { + rules: []Rule{}, + expected: []rune(nil), + }, + "rule without chars": { + rules: []Rule{ + testNonCharsetRule{ + String: "teststring", + }, + }, + expected: []rune(nil), + }, + "rule with chars": { + rules: []Rule{ + CharsetRule{ + Charset: []rune("abcdefghij"), + MinChars: 1, + }, + }, + expected: []rune("abcdefghij"), + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + actual := getChars(test.rules) + if !reflect.DeepEqual(actual, test.expected) { + t.Fatalf("Actual: %v\nExpected: %v", actual, test.expected) + } + }) + } +} + +func TestDeduplicateRunes(t *testing.T) { + type testCase struct { + input []rune + expected []rune + } + + tests := map[string]testCase{ + "empty string": { + input: []rune(""), + expected: []rune(nil), + }, + "no duplicates": { + input: []rune("abcde"), + expected: []rune("abcde"), + }, + "in order duplicates": { + input: []rune("aaaabbbbcccccccddddeeeee"), + expected: []rune("abcde"), + }, + "out of order duplicates": { + input: []rune("abcdeabcdeabcdeabcde"), + expected: []rune("abcde"), + }, + "unicode no duplicates": { + input: []rune("日本語"), + expected: []rune("日本語"), + }, + "unicode in order duplicates": { + input: []rune("日日日日本本本語語語語語"), + expected: []rune("日本語"), + }, + "unicode out of order duplicates": { + input: []rune("日本語日本語日本語日本語"), + expected: []rune("日本語"), + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + actual := deduplicateRunes(test.input) + if !reflect.DeepEqual(actual, test.expected) { + t.Fatalf("Actual: %#v\nExpected:%#v", actual, test.expected) + } + }) + } +} + +func TestRandomRunes_Bias(t *testing.T) { + type testCase struct { + charset []rune + maxStdDev float64 + } + + tests := map[string]testCase{ + "small charset": { + charset: []rune("abcde"), + maxStdDev: 2700, + }, + "lowercase characters": { + charset: LowercaseRuneset, + maxStdDev: 1000, + }, + "alphabetical characters": { + charset: AlphabeticRuneset, + maxStdDev: 800, + }, + "alphanumeric": { + charset: AlphaNumericRuneset, + maxStdDev: 800, + }, + "alphanumeric with symbol": { + charset: AlphaNumericShortSymbolRuneset, + maxStdDev: 800, + }, + "charset evenly divisible into 256": { + charset: append(AlphaNumericRuneset, '!', '@'), + maxStdDev: 800, + }, + "large charset": { + charset: FullSymbolRuneset, + maxStdDev: 800, + }, + "just under half size charset": { + charset: []rune(" !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_" + + "`abcdefghijklmnopqrstuvwxyz{|}~ĀāĂ㥹ĆćĈĉĊċČčĎďĐđĒēĔĕĖėĘęĚěĜĝĞğ"), + maxStdDev: 800, + }, + "half size charset": { + charset: []rune(" !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_" + + "`abcdefghijklmnopqrstuvwxyz{|}~ĀāĂ㥹ĆćĈĉĊċČčĎďĐđĒēĔĕĖėĘęĚěĜĝĞğĠ"), + maxStdDev: 800, + }, + } + + for name, test := range tests { + t.Run(fmt.Sprintf("%s (%d chars)", name, len(test.charset)), func(t *testing.T) { + runeCounts := map[rune]int{} + + generations := 50000 + length := 100 + for i := 0; i < generations; i++ { + str, err := randomRunes(nil, test.charset, length) + if err != nil { + t.Fatal(err) + } + for _, r := range str { + runeCounts[r]++ + } + } + + chars := charCounts{} + + var sum float64 + for r, count := range runeCounts { + chars = append(chars, charCount{r, count}) + sum += float64(count) + } + + mean := sum / float64(len(runeCounts)) + var stdDev float64 + for _, count := range runeCounts { + stdDev += math.Pow(float64(count)-mean, 2) + } + + stdDev = math.Sqrt(stdDev / float64(len(runeCounts))) + t.Logf("Mean : %10.4f", mean) + + if stdDev > test.maxStdDev { + t.Fatalf("Standard deviation is too large: %.2f > %.2f", stdDev, test.maxStdDev) + } + }) + } +} + +type charCount struct { + r rune + count int +} + +type charCounts []charCount + +func (s charCounts) Len() int { return len(s) } +func (s charCounts) Less(i, j int) bool { return s[i].r < s[j].r } +func (s charCounts) Swap(i, j int) { s[i], s[j] = s[j], s[i] } diff --git a/helper/storagepacker/storagepacker.go b/helper/storagepacker/storagepacker.go new file mode 100644 index 0000000..2e69f3a --- /dev/null +++ b/helper/storagepacker/storagepacker.go @@ -0,0 +1,385 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package storagepacker + +import ( + "context" + "crypto/md5" + "fmt" + "strconv" + "strings" + "time" + + "github.com/armon/go-metrics" + "github.com/golang/protobuf/proto" + "github.com/hashicorp/go-hclog" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/compressutil" + "github.com/hashicorp/vault/sdk/helper/locksutil" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + bucketCount = 256 + // StoragePackerBucketsPrefix is the default storage key prefix under which + // bucket data will be stored. + StoragePackerBucketsPrefix = "packer/buckets/" +) + +// StoragePacker packs items into a specific number of buckets by hashing +// its identifier and indexing on it. Currently this supports only 256 bucket entries and +// hence relies on the first byte of the hash value for indexing. +type StoragePacker struct { + view logical.Storage + logger log.Logger + storageLocks []*locksutil.LockEntry + viewPrefix string +} + +// View returns the storage view configured to be used by the packer +func (s *StoragePacker) View() logical.Storage { + return s.view +} + +// GetBucket returns a bucket for a given key +func (s *StoragePacker) GetBucket(ctx context.Context, key string) (*Bucket, error) { + if key == "" { + return nil, fmt.Errorf("missing bucket key") + } + + lock := locksutil.LockForKey(s.storageLocks, key) + lock.RLock() + defer lock.RUnlock() + + // Read from storage + storageEntry, err := s.view.Get(ctx, key) + if err != nil { + return nil, fmt.Errorf("failed to read packed storage entry: %w", err) + } + if storageEntry == nil { + return nil, nil + } + + uncompressedData, notCompressed, err := compressutil.Decompress(storageEntry.Value) + if err != nil { + return nil, fmt.Errorf("failed to decompress packed storage entry: %w", err) + } + if notCompressed { + uncompressedData = storageEntry.Value + } + + var bucket Bucket + err = proto.Unmarshal(uncompressedData, &bucket) + if err != nil { + return nil, fmt.Errorf("failed to decode packed storage entry: %w", err) + } + + return &bucket, nil +} + +// upsert either inserts a new item into the bucket or updates an existing one +// if an item with a matching key is already present. +func (s *Bucket) upsert(item *Item) error { + if s == nil { + return fmt.Errorf("nil storage bucket") + } + + if item == nil { + return fmt.Errorf("nil item") + } + + if item.ID == "" { + return fmt.Errorf("missing item ID") + } + + // Look for an item with matching key and don't modify the collection while + // iterating + foundIdx := -1 + for itemIdx, bucketItems := range s.Items { + if bucketItems.ID == item.ID { + foundIdx = itemIdx + break + } + } + + // If there is no match, append the item, otherwise update it + if foundIdx == -1 { + s.Items = append(s.Items, item) + } else { + s.Items[foundIdx] = item + } + + return nil +} + +// BucketKey returns the storage key of the bucket where the given item will be +// stored. +func (s *StoragePacker) BucketKey(itemID string) string { + hf := md5.New() + input := []byte(itemID) + n, err := hf.Write(input) + // Make linter happy + if err != nil || n != len(input) { + return "" + } + index := uint8(hf.Sum(nil)[0]) + return s.viewPrefix + strconv.Itoa(int(index)) +} + +// DeleteItem removes the item from the respective bucket +func (s *StoragePacker) DeleteItem(ctx context.Context, itemID string) error { + return s.DeleteMultipleItems(ctx, nil, []string{itemID}) +} + +func (s *StoragePacker) DeleteMultipleItems(ctx context.Context, logger hclog.Logger, itemIDs []string) error { + defer metrics.MeasureSince([]string{"storage_packer", "delete_items"}, time.Now()) + if len(itemIDs) == 0 { + return nil + } + + if logger == nil { + logger = hclog.NewNullLogger() + } + + // Sort the ids by the bucket they will be deleted from + lockKeys := make([]string, 0) + byBucket := make(map[string]map[string]struct{}) + for _, id := range itemIDs { + bucketKey := s.BucketKey(id) + bucket, ok := byBucket[bucketKey] + if !ok { + bucket = make(map[string]struct{}) + byBucket[bucketKey] = bucket + + // Add the lock key once + lockKeys = append(lockKeys, bucketKey) + } + + bucket[id] = struct{}{} + } + + locks := locksutil.LocksForKeys(s.storageLocks, lockKeys) + for _, lock := range locks { + lock.Lock() + defer lock.Unlock() + } + + logger.Debug("deleting multiple items from storagepacker; caching and deleting from buckets", "total_items", len(itemIDs)) + + // For each bucket, load from storage, remove the necessary items, and add + // write it back out to storage + pctDone := 0 + idx := 0 + for bucketKey, itemsToRemove := range byBucket { + // Read bucket from storage + storageEntry, err := s.view.Get(ctx, bucketKey) + if err != nil { + return fmt.Errorf("failed to read packed storage value: %w", err) + } + if storageEntry == nil { + logger.Warn("could not find bucket", "bucket", bucketKey) + continue + } + + uncompressedData, notCompressed, err := compressutil.Decompress(storageEntry.Value) + if err != nil { + return fmt.Errorf("failed to decompress packed storage value: %w", err) + } + if notCompressed { + uncompressedData = storageEntry.Value + } + + bucket := new(Bucket) + err = proto.Unmarshal(uncompressedData, bucket) + if err != nil { + return fmt.Errorf("failed decoding packed storage entry: %w", err) + } + + // Look for a matching storage entries and delete them from the list. + for i := 0; i < len(bucket.Items); i++ { + if _, ok := itemsToRemove[bucket.Items[i].ID]; ok { + bucket.Items[i] = bucket.Items[len(bucket.Items)-1] + bucket.Items = bucket.Items[:len(bucket.Items)-1] + + // Since we just moved a value to position i we need to + // decrement i so we replay this position + i-- + } + } + + // Fail if the context is canceled, the storage calls will fail anyways + if ctx.Err() != nil { + return ctx.Err() + } + + err = s.putBucket(ctx, bucket) + if err != nil { + return err + } + + newPctDone := idx * 100.0 / len(byBucket) + if int(newPctDone) > pctDone { + pctDone = int(newPctDone) + logger.Trace("bucket persistence progress", "percent", pctDone, "buckets_persisted", idx) + } + + idx++ + } + + return nil +} + +func (s *StoragePacker) putBucket(ctx context.Context, bucket *Bucket) error { + defer metrics.MeasureSince([]string{"storage_packer", "put_bucket"}, time.Now()) + if bucket == nil { + return fmt.Errorf("nil bucket entry") + } + + if bucket.Key == "" { + return fmt.Errorf("missing key") + } + + if !strings.HasPrefix(bucket.Key, s.viewPrefix) { + return fmt.Errorf("incorrect prefix; bucket entry key should have %q prefix", s.viewPrefix) + } + + marshaledBucket, err := proto.Marshal(bucket) + if err != nil { + return fmt.Errorf("failed to marshal bucket: %w", err) + } + + compressedBucket, err := compressutil.Compress(marshaledBucket, &compressutil.CompressionConfig{ + Type: compressutil.CompressionTypeSnappy, + }) + if err != nil { + return fmt.Errorf("failed to compress packed bucket: %w", err) + } + + // Store the compressed value + err = s.view.Put(ctx, &logical.StorageEntry{ + Key: bucket.Key, + Value: compressedBucket, + }) + if err != nil { + return fmt.Errorf("failed to persist packed storage entry: %w", err) + } + + return nil +} + +// GetItem fetches the storage entry for a given key from its corresponding +// bucket. +func (s *StoragePacker) GetItem(itemID string) (*Item, error) { + defer metrics.MeasureSince([]string{"storage_packer", "get_item"}, time.Now()) + + if itemID == "" { + return nil, fmt.Errorf("empty item ID") + } + + bucketKey := s.BucketKey(itemID) + + // Fetch the bucket entry + bucket, err := s.GetBucket(context.Background(), bucketKey) + if err != nil { + return nil, fmt.Errorf("failed to read packed storage item: %w", err) + } + if bucket == nil { + return nil, nil + } + + // Look for a matching storage entry in the bucket items + for _, item := range bucket.Items { + if item.ID == itemID { + return item, nil + } + } + + return nil, nil +} + +// PutItem stores the given item in its respective bucket +func (s *StoragePacker) PutItem(ctx context.Context, item *Item) error { + defer metrics.MeasureSince([]string{"storage_packer", "put_item"}, time.Now()) + + if item == nil { + return fmt.Errorf("nil item") + } + + if item.ID == "" { + return fmt.Errorf("missing ID in item") + } + + var err error + bucketKey := s.BucketKey(item.ID) + + bucket := &Bucket{ + Key: bucketKey, + } + + // In this case, we persist the storage entry regardless of the read + // storageEntry below is nil or not. Hence, directly acquire write lock + // even to read the entry. + lock := locksutil.LockForKey(s.storageLocks, bucketKey) + lock.Lock() + defer lock.Unlock() + + // Check if there is an existing bucket for a given key + storageEntry, err := s.view.Get(ctx, bucketKey) + if err != nil { + return fmt.Errorf("failed to read packed storage bucket entry: %w", err) + } + + if storageEntry == nil { + // If the bucket entry does not exist, this will be the only item the + // bucket that is going to be persisted. + bucket.Items = []*Item{ + item, + } + } else { + uncompressedData, notCompressed, err := compressutil.Decompress(storageEntry.Value) + if err != nil { + return fmt.Errorf("failed to decompress packed storage entry: %w", err) + } + if notCompressed { + uncompressedData = storageEntry.Value + } + + err = proto.Unmarshal(uncompressedData, bucket) + if err != nil { + return fmt.Errorf("failed to decode packed storage entry: %w", err) + } + + err = bucket.upsert(item) + if err != nil { + return fmt.Errorf("failed to update entry in packed storage entry: %w", err) + } + } + + return s.putBucket(ctx, bucket) +} + +// NewStoragePacker creates a new storage packer for a given view +func NewStoragePacker(view logical.Storage, logger log.Logger, viewPrefix string) (*StoragePacker, error) { + if view == nil { + return nil, fmt.Errorf("nil view") + } + + if viewPrefix == "" { + viewPrefix = StoragePackerBucketsPrefix + } + + if !strings.HasSuffix(viewPrefix, "/") { + viewPrefix = viewPrefix + "/" + } + + // Create a new packer object for the given view + packer := &StoragePacker{ + view: view, + viewPrefix: viewPrefix, + logger: logger, + storageLocks: locksutil.CreateLocks(), + } + + return packer, nil +} diff --git a/helper/storagepacker/storagepacker_test.go b/helper/storagepacker/storagepacker_test.go new file mode 100644 index 0000000..ad76107 --- /dev/null +++ b/helper/storagepacker/storagepacker_test.go @@ -0,0 +1,295 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package storagepacker + +import ( + "context" + "fmt" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + log "github.com/hashicorp/go-hclog" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/helper/identity" + "github.com/hashicorp/vault/sdk/logical" +) + +func BenchmarkStoragePacker(b *testing.B) { + storagePacker, err := NewStoragePacker(&logical.InmemStorage{}, log.New(&log.LoggerOptions{Name: "storagepackertest"}), "") + if err != nil { + b.Fatal(err) + } + + ctx := context.Background() + + for i := 0; i < b.N; i++ { + itemID, err := uuid.GenerateUUID() + if err != nil { + b.Fatal(err) + } + + item := &Item{ + ID: itemID, + } + + err = storagePacker.PutItem(ctx, item) + if err != nil { + b.Fatal(err) + } + + fetchedItem, err := storagePacker.GetItem(itemID) + if err != nil { + b.Fatal(err) + } + + if fetchedItem == nil { + b.Fatalf("failed to read stored item with ID: %q, iteration: %d", item.ID, i) + } + + if fetchedItem.ID != item.ID { + b.Fatalf("bad: item ID; expected: %q\n actual: %q", item.ID, fetchedItem.ID) + } + + err = storagePacker.DeleteItem(ctx, item.ID) + if err != nil { + b.Fatal(err) + } + + fetchedItem, err = storagePacker.GetItem(item.ID) + if err != nil { + b.Fatal(err) + } + if fetchedItem != nil { + b.Fatalf("failed to delete item") + } + } +} + +func TestStoragePacker(t *testing.T) { + storagePacker, err := NewStoragePacker(&logical.InmemStorage{}, log.New(&log.LoggerOptions{Name: "storagepackertest"}), "") + if err != nil { + t.Fatal(err) + } + + ctx := context.Background() + + // Persist a storage entry + item1 := &Item{ + ID: "item1", + } + + err = storagePacker.PutItem(ctx, item1) + if err != nil { + t.Fatal(err) + } + + // Verify that it can be read + fetchedItem, err := storagePacker.GetItem(item1.ID) + if err != nil { + t.Fatal(err) + } + if fetchedItem == nil { + t.Fatalf("failed to read the stored item") + } + + if item1.ID != fetchedItem.ID { + t.Fatalf("bad: item ID; expected: %q\n actual: %q\n", item1.ID, fetchedItem.ID) + } + + // Delete item1 + err = storagePacker.DeleteItem(ctx, item1.ID) + if err != nil { + t.Fatal(err) + } + + // Check that the deletion was successful + fetchedItem, err = storagePacker.GetItem(item1.ID) + if err != nil { + t.Fatal(err) + } + + if fetchedItem != nil { + t.Fatalf("failed to delete item") + } +} + +func TestStoragePacker_SerializeDeserializeComplexItem(t *testing.T) { + storagePacker, err := NewStoragePacker(&logical.InmemStorage{}, log.New(&log.LoggerOptions{Name: "storagepackertest"}), "") + if err != nil { + t.Fatal(err) + } + + ctx := context.Background() + + timeNow := ptypes.TimestampNow() + + alias1 := &identity.Alias{ + ID: "alias_id", + CanonicalID: "canonical_id", + MountType: "mount_type", + MountAccessor: "mount_accessor", + Metadata: map[string]string{ + "aliasmkey": "aliasmvalue", + }, + Name: "alias_name", + CreationTime: timeNow, + LastUpdateTime: timeNow, + MergedFromCanonicalIDs: []string{"merged_from_canonical_id"}, + } + + entity := &identity.Entity{ + Aliases: []*identity.Alias{alias1}, + ID: "entity_id", + Name: "entity_name", + Metadata: map[string]string{ + "testkey1": "testvalue1", + "testkey2": "testvalue2", + }, + CreationTime: timeNow, + LastUpdateTime: timeNow, + BucketKey: "entity_hash", + MergedEntityIDs: []string{"merged_entity_id1", "merged_entity_id2"}, + Policies: []string{"policy1", "policy2"}, + } + + marshaledEntity, err := ptypes.MarshalAny(entity) + if err != nil { + t.Fatal(err) + } + err = storagePacker.PutItem(ctx, &Item{ + ID: entity.ID, + Message: marshaledEntity, + }) + if err != nil { + t.Fatal(err) + } + + itemFetched, err := storagePacker.GetItem(entity.ID) + if err != nil { + t.Fatal(err) + } + + var itemDecoded identity.Entity + err = ptypes.UnmarshalAny(itemFetched.Message, &itemDecoded) + if err != nil { + t.Fatal(err) + } + + if !proto.Equal(&itemDecoded, entity) { + t.Fatalf("bad: expected: %#v\nactual: %#v\n", entity, itemDecoded) + } +} + +func TestStoragePacker_DeleteMultiple(t *testing.T) { + storagePacker, err := NewStoragePacker(&logical.InmemStorage{}, log.New(&log.LoggerOptions{Name: "storagepackertest"}), "") + if err != nil { + t.Fatal(err) + } + + ctx := context.Background() + + // Persist a storage entry + for i := 0; i < 100; i++ { + item := &Item{ + ID: fmt.Sprintf("item%d", i), + } + + err = storagePacker.PutItem(ctx, item) + if err != nil { + t.Fatal(err) + } + + // Verify that it can be read + fetchedItem, err := storagePacker.GetItem(item.ID) + if err != nil { + t.Fatal(err) + } + if fetchedItem == nil { + t.Fatalf("failed to read the stored item") + } + + if item.ID != fetchedItem.ID { + t.Fatalf("bad: item ID; expected: %q\n actual: %q\n", item.ID, fetchedItem.ID) + } + } + + itemsToDelete := make([]string, 0, 50) + for i := 1; i < 100; i += 2 { + itemsToDelete = append(itemsToDelete, fmt.Sprintf("item%d", i)) + } + + err = storagePacker.DeleteMultipleItems(ctx, nil, itemsToDelete) + if err != nil { + t.Fatal(err) + } + + // Check that the deletion was successful + for i := 0; i < 100; i++ { + fetchedItem, err := storagePacker.GetItem(fmt.Sprintf("item%d", i)) + if err != nil { + t.Fatal(err) + } + + if i%2 == 0 && fetchedItem == nil { + t.Fatal("expected item not found") + } + if i%2 == 1 && fetchedItem != nil { + t.Fatalf("failed to delete item") + } + } +} + +func TestStoragePacker_DeleteMultiple_ALL(t *testing.T) { + storagePacker, err := NewStoragePacker(&logical.InmemStorage{}, log.New(&log.LoggerOptions{Name: "storagepackertest"}), "") + if err != nil { + t.Fatal(err) + } + + ctx := context.Background() + + // Persist a storage entry + itemsToDelete := make([]string, 0, 10000) + for i := 0; i < 10000; i++ { + item := &Item{ + ID: fmt.Sprintf("item%d", i), + } + + err = storagePacker.PutItem(ctx, item) + if err != nil { + t.Fatal(err) + } + + // Verify that it can be read + fetchedItem, err := storagePacker.GetItem(item.ID) + if err != nil { + t.Fatal(err) + } + if fetchedItem == nil { + t.Fatalf("failed to read the stored item") + } + + if item.ID != fetchedItem.ID { + t.Fatalf("bad: item ID; expected: %q\n actual: %q\n", item.ID, fetchedItem.ID) + } + + itemsToDelete = append(itemsToDelete, fmt.Sprintf("item%d", i)) + } + + err = storagePacker.DeleteMultipleItems(ctx, nil, itemsToDelete) + if err != nil { + t.Fatal(err) + } + + // Check that the deletion was successful + for _, item := range itemsToDelete { + fetchedItem, err := storagePacker.GetItem(item) + if err != nil { + t.Fatal(err) + } + if fetchedItem != nil { + t.Fatal("item not deleted") + } + } +} diff --git a/helper/storagepacker/types.pb.go b/helper/storagepacker/types.pb.go new file mode 100644 index 0000000..6dd58b9 --- /dev/null +++ b/helper/storagepacker/types.pb.go @@ -0,0 +1,273 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.12 +// source: helper/storagepacker/types.proto + +package storagepacker + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Item represents an entry that gets inserted into the storage packer +type Item struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // ID must be provided by the caller; the same value, if used with GetItem, + // can be used to fetch the item. However, when iterating through a bucket, + // this ID will be an internal ID. In other words, outside of the use-case + // described above, the caller *must not* rely on this value to be + // consistent with what they passed in. + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // message is the contents of the item + Message *anypb.Any `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` +} + +func (x *Item) Reset() { + *x = Item{} + if protoimpl.UnsafeEnabled { + mi := &file_helper_storagepacker_types_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Item) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Item) ProtoMessage() {} + +func (x *Item) ProtoReflect() protoreflect.Message { + mi := &file_helper_storagepacker_types_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Item.ProtoReflect.Descriptor instead. +func (*Item) Descriptor() ([]byte, []int) { + return file_helper_storagepacker_types_proto_rawDescGZIP(), []int{0} +} + +func (x *Item) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *Item) GetMessage() *anypb.Any { + if x != nil { + return x.Message + } + return nil +} + +// Bucket is a construct to hold multiple items within itself. This +// abstraction contains multiple buckets of the same kind within itself and +// shares amont them the items that get inserted. When the bucket as a whole +// gets too big to hold more items, the contained buckets gets pushed out only +// to become independent buckets. Hence, this can grow infinitely in terms of +// storage space for items that get inserted. +type Bucket struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Key is the storage path where the bucket gets stored + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // Items holds the items contained within this bucket. Used by v1. + Items []*Item `protobuf:"bytes,2,rep,name=items,proto3" json:"items,omitempty"` + // ItemMap stores a mapping of item ID to message. Used by v2. + ItemMap map[string]*anypb.Any `protobuf:"bytes,3,rep,name=item_map,json=itemMap,proto3" json:"item_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Bucket) Reset() { + *x = Bucket{} + if protoimpl.UnsafeEnabled { + mi := &file_helper_storagepacker_types_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket) ProtoMessage() {} + +func (x *Bucket) ProtoReflect() protoreflect.Message { + mi := &file_helper_storagepacker_types_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket.ProtoReflect.Descriptor instead. +func (*Bucket) Descriptor() ([]byte, []int) { + return file_helper_storagepacker_types_proto_rawDescGZIP(), []int{1} +} + +func (x *Bucket) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *Bucket) GetItems() []*Item { + if x != nil { + return x.Items + } + return nil +} + +func (x *Bucket) GetItemMap() map[string]*anypb.Any { + if x != nil { + return x.ItemMap + } + return nil +} + +var File_helper_storagepacker_types_proto protoreflect.FileDescriptor + +var file_helper_storagepacker_types_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x68, 0x65, 0x6c, 0x70, 0x65, 0x72, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x70, 0x61, 0x63, 0x6b, 0x65, 0x72, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x70, 0x61, 0x63, 0x6b, 0x65, + 0x72, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x46, 0x0a, 0x04, + 0x49, 0x74, 0x65, 0x6d, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x02, 0x69, 0x64, 0x12, 0x2e, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x22, 0xd6, 0x01, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x29, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x13, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x72, + 0x2e, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x3d, 0x0a, 0x08, + 0x69, 0x74, 0x65, 0x6d, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x72, 0x2e, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x74, 0x65, 0x6d, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x07, 0x69, 0x74, 0x65, 0x6d, 0x4d, 0x61, 0x70, 0x1a, 0x50, 0x0a, 0x0c, 0x49, + 0x74, 0x65, 0x6d, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, + 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x31, 0x5a, + 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, + 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x68, 0x65, 0x6c, 0x70, + 0x65, 0x72, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x72, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_helper_storagepacker_types_proto_rawDescOnce sync.Once + file_helper_storagepacker_types_proto_rawDescData = file_helper_storagepacker_types_proto_rawDesc +) + +func file_helper_storagepacker_types_proto_rawDescGZIP() []byte { + file_helper_storagepacker_types_proto_rawDescOnce.Do(func() { + file_helper_storagepacker_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_helper_storagepacker_types_proto_rawDescData) + }) + return file_helper_storagepacker_types_proto_rawDescData +} + +var file_helper_storagepacker_types_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_helper_storagepacker_types_proto_goTypes = []interface{}{ + (*Item)(nil), // 0: storagepacker.Item + (*Bucket)(nil), // 1: storagepacker.Bucket + nil, // 2: storagepacker.Bucket.ItemMapEntry + (*anypb.Any)(nil), // 3: google.protobuf.Any +} +var file_helper_storagepacker_types_proto_depIDxs = []int32{ + 3, // 0: storagepacker.Item.message:type_name -> google.protobuf.Any + 0, // 1: storagepacker.Bucket.items:type_name -> storagepacker.Item + 2, // 2: storagepacker.Bucket.item_map:type_name -> storagepacker.Bucket.ItemMapEntry + 3, // 3: storagepacker.Bucket.ItemMapEntry.value:type_name -> google.protobuf.Any + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_helper_storagepacker_types_proto_init() } +func file_helper_storagepacker_types_proto_init() { + if File_helper_storagepacker_types_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_helper_storagepacker_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Item); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_helper_storagepacker_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_helper_storagepacker_types_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_helper_storagepacker_types_proto_goTypes, + DependencyIndexes: file_helper_storagepacker_types_proto_depIDxs, + MessageInfos: file_helper_storagepacker_types_proto_msgTypes, + }.Build() + File_helper_storagepacker_types_proto = out.File + file_helper_storagepacker_types_proto_rawDesc = nil + file_helper_storagepacker_types_proto_goTypes = nil + file_helper_storagepacker_types_proto_depIDxs = nil +} diff --git a/helper/storagepacker/types.proto b/helper/storagepacker/types.proto new file mode 100644 index 0000000..7efb0a1 --- /dev/null +++ b/helper/storagepacker/types.proto @@ -0,0 +1,37 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +syntax = "proto3"; + +option go_package = "github.com/hashicorp/vault/helper/storagepacker"; + +package storagepacker; + +import "google/protobuf/any.proto"; + +// Item represents an entry that gets inserted into the storage packer +message Item { + // ID must be provided by the caller; the same value, if used with GetItem, + // can be used to fetch the item. However, when iterating through a bucket, + // this ID will be an internal ID. In other words, outside of the use-case + // described above, the caller *must not* rely on this value to be + // consistent with what they passed in. + string id = 1; + // message is the contents of the item + google.protobuf.Any message = 2; +} + +// Bucket is a construct to hold multiple items within itself. This +// abstraction contains multiple buckets of the same kind within itself and +// shares amont them the items that get inserted. When the bucket as a whole +// gets too big to hold more items, the contained buckets gets pushed out only +// to become independent buckets. Hence, this can grow infinitely in terms of +// storage space for items that get inserted. +message Bucket { + // Key is the storage path where the bucket gets stored + string key = 1; + // Items holds the items contained within this bucket. Used by v1. + repeated Item items = 2; + // ItemMap stores a mapping of item ID to message. Used by v2. + map item_map = 3; +} diff --git a/helper/testhelpers/azurite/azurite.go b/helper/testhelpers/azurite/azurite.go new file mode 100644 index 0000000..a538e5c --- /dev/null +++ b/helper/testhelpers/azurite/azurite.go @@ -0,0 +1,109 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package azurite + +import ( + "context" + "fmt" + "net/url" + "testing" + + "github.com/Azure/azure-storage-blob-go/azblob" + "github.com/hashicorp/vault/sdk/helper/docker" +) + +type Config struct { + Endpoint string + AccountName string + AccountKey string +} + +func (c Config) Address() string { + return c.Endpoint +} + +func (c Config) URL() *url.URL { + return &url.URL{Scheme: "http", Host: c.Endpoint, Path: "/" + accountName} +} + +//func (c Config) ConnectionString() string { +// elems := []string{ +// "DefaultEndpointsProtocol=http", +// "AccountName=" + accountName, +// "AccountKey=" + accountKey, +// "EndpointSuffix=" + c.Endpoint, +// } +// return strings.Join(elems, ";") +//} + +func (c Config) ContainerURL(ctx context.Context, container string) (*azblob.ContainerURL, error) { + credential, err := azblob.NewSharedKeyCredential(accountName, accountKey) + if err != nil { + return nil, err + } + p := azblob.NewPipeline(credential, azblob.PipelineOptions{ + //Log: pipeline.LogOptions{ + // Log: func(level pipeline.LogLevel, message string) { + // log.Println(message) + // }, + // ShouldLog: func(level pipeline.LogLevel) bool { + // return true + // }, + //}, + }) + u := *c.URL() + u.Path += "/" + container + cu := azblob.NewContainerURL(u, p) + return &cu, nil +} + +var _ docker.ServiceConfig = &Config{} + +const ( + accountName = "testaccount" + accountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==" +) + +func PrepareTestContainer(t *testing.T, version string) (func(), docker.ServiceConfig) { + if version == "" { + version = "latest" + } + runner, err := docker.NewServiceRunner(docker.RunOptions{ + ContainerName: "azurite", + ImageRepo: "mcr.microsoft.com/azure-storage/azurite", + ImageTag: version, + Cmd: []string{"azurite-blob", "--blobHost", "0.0.0.0", "--blobPort", "10000", "-d", "/dev/stderr"}, + Ports: []string{"10000/tcp"}, + Env: []string{fmt.Sprintf(`AZURITE_ACCOUNTS=%s:%s`, accountName, accountKey)}, + }) + if err != nil { + t.Fatalf("Could not start docker Azurite: %s", err) + } + + svc, err := runner.StartService(context.Background(), connectAzure) + if err != nil { + t.Fatalf("Could not start docker Azurite: %s", err) + } + + return svc.Cleanup, svc.Config +} + +func connectAzure(ctx context.Context, host string, port int) (docker.ServiceConfig, error) { + cfg := &Config{ + Endpoint: fmt.Sprintf("%s:%d", host, port), + AccountName: accountName, + AccountKey: accountKey, + } + + containerURL, err := cfg.ContainerURL(ctx, "testcontainer") + if err != nil { + return nil, err + } + _, err = containerURL.Create(ctx, azblob.Metadata{}, azblob.PublicAccessContainer) + if err != nil { + return nil, err + } + + return cfg, nil +} diff --git a/helper/testhelpers/cassandra/cassandrahelper.go b/helper/testhelpers/cassandra/cassandrahelper.go new file mode 100644 index 0000000..64f9df1 --- /dev/null +++ b/helper/testhelpers/cassandra/cassandrahelper.go @@ -0,0 +1,178 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cassandra + +import ( + "context" + "fmt" + "net" + "os" + "path/filepath" + "testing" + "time" + + "github.com/gocql/gocql" + "github.com/hashicorp/vault/sdk/helper/docker" +) + +type containerConfig struct { + containerName string + imageName string + version string + copyFromTo map[string]string + env []string + + sslOpts *gocql.SslOptions +} + +type ContainerOpt func(*containerConfig) + +func ContainerName(name string) ContainerOpt { + return func(cfg *containerConfig) { + cfg.containerName = name + } +} + +func Image(imageName string, version string) ContainerOpt { + return func(cfg *containerConfig) { + cfg.imageName = imageName + cfg.version = version + + // Reset the environment because there's a very good chance the default environment doesn't apply to the + // non-default image being used + cfg.env = nil + } +} + +func Version(version string) ContainerOpt { + return func(cfg *containerConfig) { + cfg.version = version + } +} + +func CopyFromTo(copyFromTo map[string]string) ContainerOpt { + return func(cfg *containerConfig) { + cfg.copyFromTo = copyFromTo + } +} + +func Env(keyValue string) ContainerOpt { + return func(cfg *containerConfig) { + cfg.env = append(cfg.env, keyValue) + } +} + +func SslOpts(sslOpts *gocql.SslOptions) ContainerOpt { + return func(cfg *containerConfig) { + cfg.sslOpts = sslOpts + } +} + +type Host struct { + Name string + Port string +} + +func (h Host) ConnectionURL() string { + return net.JoinHostPort(h.Name, h.Port) +} + +func PrepareTestContainer(t *testing.T, opts ...ContainerOpt) (Host, func()) { + t.Helper() + if os.Getenv("CASSANDRA_HOSTS") != "" { + host, port, err := net.SplitHostPort(os.Getenv("CASSANDRA_HOSTS")) + if err != nil { + t.Fatalf("Failed to split host & port from CASSANDRA_HOSTS (%s): %s", os.Getenv("CASSANDRA_HOSTS"), err) + } + h := Host{ + Name: host, + Port: port, + } + return h, func() {} + } + + containerCfg := &containerConfig{ + imageName: "docker.mirror.hashicorp.services/library/cassandra", + containerName: "cassandra", + version: "3.11", + env: []string{"CASSANDRA_BROADCAST_ADDRESS=127.0.0.1"}, + } + + for _, opt := range opts { + opt(containerCfg) + } + + copyFromTo := map[string]string{} + for from, to := range containerCfg.copyFromTo { + absFrom, err := filepath.Abs(from) + if err != nil { + t.Fatalf("Unable to get absolute path for file %s", from) + } + copyFromTo[absFrom] = to + } + + runOpts := docker.RunOptions{ + ContainerName: containerCfg.containerName, + ImageRepo: containerCfg.imageName, + ImageTag: containerCfg.version, + Ports: []string{"9042/tcp"}, + CopyFromTo: copyFromTo, + Env: containerCfg.env, + } + runner, err := docker.NewServiceRunner(runOpts) + if err != nil { + t.Fatalf("Could not start docker cassandra: %s", err) + } + + svc, err := runner.StartService(context.Background(), func(ctx context.Context, host string, port int) (docker.ServiceConfig, error) { + cfg := docker.NewServiceHostPort(host, port) + clusterConfig := gocql.NewCluster(cfg.Address()) + clusterConfig.Authenticator = gocql.PasswordAuthenticator{ + Username: "cassandra", + Password: "cassandra", + } + clusterConfig.Timeout = 30 * time.Second + clusterConfig.ProtoVersion = 4 + clusterConfig.Port = port + + clusterConfig.SslOpts = containerCfg.sslOpts + + session, err := clusterConfig.CreateSession() + if err != nil { + return nil, fmt.Errorf("error creating session: %s", err) + } + defer session.Close() + + // Create keyspace + query := session.Query(`CREATE KEYSPACE "vault" WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };`) + if err := query.Exec(); err != nil { + t.Fatalf("could not create cassandra keyspace: %v", err) + } + + // Create table + query = session.Query(`CREATE TABLE "vault"."entries" ( + bucket text, + key text, + value blob, + PRIMARY KEY (bucket, key) + ) WITH CLUSTERING ORDER BY (key ASC);`) + if err := query.Exec(); err != nil { + t.Fatalf("could not create cassandra table: %v", err) + } + return cfg, nil + }) + if err != nil { + t.Fatalf("Could not start docker cassandra: %s", err) + } + + host, port, err := net.SplitHostPort(svc.Config.Address()) + if err != nil { + t.Fatalf("Failed to split host & port from address (%s): %s", svc.Config.Address(), err) + } + h := Host{ + Name: host, + Port: port, + } + return h, svc.Cleanup +} diff --git a/helper/testhelpers/certhelpers/cert_helpers.go b/helper/testhelpers/certhelpers/cert_helpers.go new file mode 100644 index 0000000..42692d0 --- /dev/null +++ b/helper/testhelpers/certhelpers/cert_helpers.go @@ -0,0 +1,247 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package certhelpers + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "math/big" + "net" + "strings" + "testing" + "time" +) + +type CertBuilder struct { + tmpl *x509.Certificate + parentTmpl *x509.Certificate + + selfSign bool + parentKey *rsa.PrivateKey + + isCA bool +} + +type CertOpt func(*CertBuilder) error + +func CommonName(cn string) CertOpt { + return func(builder *CertBuilder) error { + builder.tmpl.Subject.CommonName = cn + return nil + } +} + +func Parent(parent Certificate) CertOpt { + return func(builder *CertBuilder) error { + builder.parentKey = parent.PrivKey.PrivKey + builder.parentTmpl = parent.Template + return nil + } +} + +func IsCA(isCA bool) CertOpt { + return func(builder *CertBuilder) error { + builder.isCA = isCA + return nil + } +} + +func SelfSign() CertOpt { + return func(builder *CertBuilder) error { + builder.selfSign = true + return nil + } +} + +func IP(ip ...string) CertOpt { + return func(builder *CertBuilder) error { + for _, addr := range ip { + if ipAddr := net.ParseIP(addr); ipAddr != nil { + builder.tmpl.IPAddresses = append(builder.tmpl.IPAddresses, ipAddr) + } + } + return nil + } +} + +func DNS(dns ...string) CertOpt { + return func(builder *CertBuilder) error { + builder.tmpl.DNSNames = dns + return nil + } +} + +func NewCert(t *testing.T, opts ...CertOpt) (cert Certificate) { + t.Helper() + + builder := CertBuilder{ + tmpl: &x509.Certificate{ + SerialNumber: makeSerial(t), + Subject: pkix.Name{ + CommonName: makeCommonName(), + }, + NotBefore: time.Now().Add(-1 * time.Hour), + NotAfter: time.Now().Add(1 * time.Hour), + IsCA: false, + KeyUsage: x509.KeyUsageDigitalSignature | + x509.KeyUsageKeyEncipherment | + x509.KeyUsageKeyAgreement, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + }, + } + + for _, opt := range opts { + err := opt(&builder) + if err != nil { + t.Fatalf("Failed to set up certificate builder: %s", err) + } + } + + key := NewPrivateKey(t) + + builder.tmpl.SubjectKeyId = getSubjKeyID(t, key.PrivKey) + + tmpl := builder.tmpl + parent := builder.parentTmpl + publicKey := key.PrivKey.Public() + signingKey := builder.parentKey + + if builder.selfSign { + parent = tmpl + signingKey = key.PrivKey + } + + if builder.isCA { + tmpl.IsCA = true + tmpl.KeyUsage = x509.KeyUsageCertSign | x509.KeyUsageCRLSign + tmpl.ExtKeyUsage = nil + } else { + tmpl.KeyUsage = x509.KeyUsageDigitalSignature | + x509.KeyUsageKeyEncipherment | + x509.KeyUsageKeyAgreement + tmpl.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth} + } + + certBytes, err := x509.CreateCertificate(rand.Reader, tmpl, parent, publicKey, signingKey) + if err != nil { + t.Fatalf("Unable to generate certificate: %s", err) + } + certPem := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: certBytes, + }) + + tlsCert, err := tls.X509KeyPair(certPem, key.Pem) + if err != nil { + t.Fatalf("Unable to parse X509 key pair: %s", err) + } + + return Certificate{ + Template: tmpl, + PrivKey: key, + TLSCert: tlsCert, + RawCert: certBytes, + Pem: certPem, + IsCA: builder.isCA, + } +} + +// //////////////////////////////////////////////////////////////////////////// +// Private Key +// //////////////////////////////////////////////////////////////////////////// +type KeyWrapper struct { + PrivKey *rsa.PrivateKey + Pem []byte +} + +func NewPrivateKey(t *testing.T) (key KeyWrapper) { + t.Helper() + + privKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + t.Fatalf("Unable to generate key for cert: %s", err) + } + + privKeyPem := pem.EncodeToMemory( + &pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(privKey), + }, + ) + + key = KeyWrapper{ + PrivKey: privKey, + Pem: privKeyPem, + } + + return key +} + +// //////////////////////////////////////////////////////////////////////////// +// Certificate +// //////////////////////////////////////////////////////////////////////////// +type Certificate struct { + PrivKey KeyWrapper + Template *x509.Certificate + TLSCert tls.Certificate + RawCert []byte + Pem []byte + IsCA bool +} + +func (cert Certificate) CombinedPEM() []byte { + if cert.IsCA { + return cert.Pem + } + return bytes.Join([][]byte{cert.PrivKey.Pem, cert.Pem}, []byte{'\n'}) +} + +func (cert Certificate) PrivateKeyPEM() []byte { + return cert.PrivKey.Pem +} + +// //////////////////////////////////////////////////////////////////////////// +// Helpers +// //////////////////////////////////////////////////////////////////////////// +func makeSerial(t *testing.T) *big.Int { + t.Helper() + + v := &big.Int{} + serialNumberLimit := v.Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + t.Fatalf("Unable to generate serial number: %s", err) + } + return serialNumber +} + +// Pulled from sdk/helper/certutil & slightly modified for test usage +func getSubjKeyID(t *testing.T, privateKey crypto.Signer) []byte { + t.Helper() + + if privateKey == nil { + t.Fatalf("passed-in private key is nil") + } + + marshaledKey, err := x509.MarshalPKIXPublicKey(privateKey.Public()) + if err != nil { + t.Fatalf("error marshalling public key: %s", err) + } + + subjKeyID := sha1.Sum(marshaledKey) + + return subjKeyID[:] +} + +func makeCommonName() (cn string) { + return strings.ReplaceAll(time.Now().Format("20060102T150405.000"), ".", "") +} diff --git a/helper/testhelpers/consul/cluster_storage.go b/helper/testhelpers/consul/cluster_storage.go new file mode 100644 index 0000000..9ca1080 --- /dev/null +++ b/helper/testhelpers/consul/cluster_storage.go @@ -0,0 +1,70 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package consul + +import ( + "context" + "fmt" + + "github.com/hashicorp/vault/sdk/helper/testcluster" +) + +type ClusterStorage struct { + // Set these after calling `NewConsulClusterStorage` but before `Start` (or + // passing in to NewDockerCluster) to control Consul version specifically in + // your test. Leave empty for latest OSS (defined in consulhelper.go). + ConsulVersion string + ConsulEnterprise bool + + cleanup func() + config *Config +} + +var _ testcluster.ClusterStorage = &ClusterStorage{} + +func NewClusterStorage() *ClusterStorage { + return &ClusterStorage{} +} + +func (s *ClusterStorage) Start(ctx context.Context, opts *testcluster.ClusterOptions) error { + prefix := "" + if opts != nil && opts.ClusterName != "" { + prefix = fmt.Sprintf("%s-", opts.ClusterName) + } + cleanup, config, err := RunContainer(ctx, prefix, s.ConsulVersion, s.ConsulEnterprise, true) + if err != nil { + return err + } + s.cleanup = cleanup + s.config = config + + return nil +} + +func (s *ClusterStorage) Cleanup() error { + if s.cleanup != nil { + s.cleanup() + s.cleanup = nil + } + return nil +} + +func (s *ClusterStorage) Opts() map[string]interface{} { + if s.config == nil { + return nil + } + return map[string]interface{}{ + "address": s.config.ContainerHTTPAddr, + "token": s.config.Token, + "max_parallel": "32", + } +} + +func (s *ClusterStorage) Type() string { + return "consul" +} + +func (s *ClusterStorage) Config() *Config { + return s.config +} diff --git a/helper/testhelpers/consul/consulhelper.go b/helper/testhelpers/consul/consulhelper.go new file mode 100644 index 0000000..9b3f488 --- /dev/null +++ b/helper/testhelpers/consul/consulhelper.go @@ -0,0 +1,292 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package consul + +import ( + "context" + "fmt" + "os" + "strings" + "testing" + + consulapi "github.com/hashicorp/consul/api" + goversion "github.com/hashicorp/go-version" + "github.com/hashicorp/vault/sdk/helper/docker" +) + +// LatestConsulVersion is the most recent version of Consul which is used unless +// another version is specified in the test config or environment. This will +// probably go stale as we don't always update it on every release but we rarely +// rely on specific new Consul functionality so that's probably not a problem. +const LatestConsulVersion = "1.15.3" + +type Config struct { + docker.ServiceHostPort + Token string + ContainerHTTPAddr string +} + +func (c *Config) APIConfig() *consulapi.Config { + apiConfig := consulapi.DefaultConfig() + apiConfig.Address = c.Address() + apiConfig.Token = c.Token + return apiConfig +} + +// PrepareTestContainer is a test helper that creates a Consul docker container +// or fails the test if unsuccessful. See RunContainer for more details on the +// configuration. +func PrepareTestContainer(t *testing.T, version string, isEnterprise bool, doBootstrapSetup bool) (func(), *Config) { + t.Helper() + + cleanup, config, err := RunContainer(context.Background(), "", version, isEnterprise, doBootstrapSetup) + if err != nil { + t.Fatalf("failed starting consul: %s", err) + } + return cleanup, config +} + +// RunContainer runs Consul in a Docker container unless CONSUL_HTTP_ADDR is +// already found in the environment. Consul version is determined by the version +// argument. If version is empty string, the CONSUL_DOCKER_VERSION environment +// variable is used and if that is empty too, LatestConsulVersion is used +// (defined above). If namePrefix is provided we assume you have chosen a unique +// enough prefix to avoid collision with other tests that may be running in +// parallel and so _do not_ add an additional unique ID suffix. We will also +// ensure previous instances are deleted and leave the container running for +// debugging. This is useful for using Consul as part of at testcluster (i.e. +// when Vault is in Docker too). If namePrefix is empty then a unique suffix is +// added since many older tests rely on a uniq instance of the container. This +// is used by `PrepareTestContainer` which is used typically in tests that rely +// on Consul but run tested code within the test process. +func RunContainer(ctx context.Context, namePrefix, version string, isEnterprise bool, doBootstrapSetup bool) (func(), *Config, error) { + if retAddress := os.Getenv("CONSUL_HTTP_ADDR"); retAddress != "" { + shp, err := docker.NewServiceHostPortParse(retAddress) + if err != nil { + return nil, nil, err + } + return func() {}, &Config{ServiceHostPort: *shp, Token: os.Getenv("CONSUL_HTTP_TOKEN")}, nil + } + + config := `acl { enabled = true default_policy = "deny" }` + if version == "" { + consulVersion := os.Getenv("CONSUL_DOCKER_VERSION") + if consulVersion != "" { + version = consulVersion + } else { + version = LatestConsulVersion + } + } + if strings.HasPrefix(version, "1.3") { + config = `datacenter = "test" acl_default_policy = "deny" acl_datacenter = "test" acl_master_token = "test"` + } + + name := "consul" + repo := "docker.mirror.hashicorp.services/library/consul" + var envVars []string + // If running the enterprise container, set the appropriate values below. + if isEnterprise { + version += "-ent" + name = "consul-enterprise" + repo = "docker.mirror.hashicorp.services/hashicorp/consul-enterprise" + license, hasLicense := os.LookupEnv("CONSUL_LICENSE") + envVars = append(envVars, "CONSUL_LICENSE="+license) + + if !hasLicense { + return nil, nil, fmt.Errorf("Failed to find enterprise license") + } + } + if namePrefix != "" { + name = namePrefix + name + } + + if dockerRepo, hasEnvRepo := os.LookupEnv("CONSUL_DOCKER_REPO"); hasEnvRepo { + repo = dockerRepo + } + + dockerOpts := docker.RunOptions{ + ContainerName: name, + ImageRepo: repo, + ImageTag: version, + Env: envVars, + Cmd: []string{"agent", "-dev", "-client", "0.0.0.0", "-hcl", config}, + Ports: []string{"8500/tcp"}, + AuthUsername: os.Getenv("CONSUL_DOCKER_USERNAME"), + AuthPassword: os.Getenv("CONSUL_DOCKER_PASSWORD"), + } + + // Add a unique suffix if there is no per-test prefix provided + addSuffix := true + if namePrefix != "" { + // Don't add a suffix if the caller already provided a prefix + addSuffix = false + // Also enable predelete and non-removal to make debugging easier for test + // cases with named containers). + dockerOpts.PreDelete = true + dockerOpts.DoNotAutoRemove = true + } + + runner, err := docker.NewServiceRunner(dockerOpts) + if err != nil { + return nil, nil, fmt.Errorf("Could not start docker Consul: %s", err) + } + + svc, _, err := runner.StartNewService(ctx, addSuffix, false, func(ctx context.Context, host string, port int) (docker.ServiceConfig, error) { + shp := docker.NewServiceHostPort(host, port) + apiConfig := consulapi.DefaultNonPooledConfig() + apiConfig.Address = shp.Address() + consul, err := consulapi.NewClient(apiConfig) + if err != nil { + return nil, err + } + + // Make sure Consul is up + if _, err = consul.Status().Leader(); err != nil { + return nil, err + } + + // For version of Consul < 1.4 + if strings.HasPrefix(version, "1.3") { + consulToken := "test" + _, err = consul.KV().Put(&consulapi.KVPair{ + Key: "setuptest", + Value: []byte("setuptest"), + }, &consulapi.WriteOptions{ + Token: consulToken, + }) + if err != nil { + return nil, err + } + return &Config{ + ServiceHostPort: *shp, + Token: consulToken, + }, nil + } + + // New default behavior + var consulToken string + if doBootstrapSetup { + aclbootstrap, _, err := consul.ACL().Bootstrap() + if err != nil { + return nil, err + } + consulToken = aclbootstrap.SecretID + policy := &consulapi.ACLPolicy{ + Name: "test", + Description: "test", + Rules: `node_prefix "" { + policy = "write" + } + + service_prefix "" { + policy = "read" + }`, + } + q := &consulapi.WriteOptions{ + Token: consulToken, + } + _, _, err = consul.ACL().PolicyCreate(policy, q) + if err != nil { + return nil, err + } + + // Create a Consul role that contains the test policy, for Consul 1.5 and newer + currVersion, _ := goversion.NewVersion(version) + roleVersion, _ := goversion.NewVersion("1.5") + if currVersion.GreaterThanOrEqual(roleVersion) { + ACLList := []*consulapi.ACLTokenRoleLink{{Name: "test"}} + + role := &consulapi.ACLRole{ + Name: "role-test", + Description: "consul roles test", + Policies: ACLList, + } + + _, _, err = consul.ACL().RoleCreate(role, q) + if err != nil { + return nil, err + } + } + + // Configure a namespace and partition if testing enterprise Consul + if isEnterprise { + // Namespaces require Consul 1.7 or newer + namespaceVersion, _ := goversion.NewVersion("1.7") + if currVersion.GreaterThanOrEqual(namespaceVersion) { + namespace := &consulapi.Namespace{ + Name: "ns1", + Description: "ns1 test", + } + + _, _, err = consul.Namespaces().Create(namespace, q) + if err != nil { + return nil, err + } + + nsPolicy := &consulapi.ACLPolicy{ + Name: "ns-test", + Description: "namespace test", + Namespace: "ns1", + Rules: `service_prefix "" { + policy = "read" + }`, + } + _, _, err = consul.ACL().PolicyCreate(nsPolicy, q) + if err != nil { + return nil, err + } + } + + // Partitions require Consul 1.11 or newer + partitionVersion, _ := goversion.NewVersion("1.11") + if currVersion.GreaterThanOrEqual(partitionVersion) { + partition := &consulapi.Partition{ + Name: "part1", + Description: "part1 test", + } + + _, _, err = consul.Partitions().Create(ctx, partition, q) + if err != nil { + return nil, err + } + + partPolicy := &consulapi.ACLPolicy{ + Name: "part-test", + Description: "partition test", + Partition: "part1", + Rules: `service_prefix "" { + policy = "read" + }`, + } + _, _, err = consul.ACL().PolicyCreate(partPolicy, q) + if err != nil { + return nil, err + } + } + } + } + + return &Config{ + ServiceHostPort: *shp, + Token: consulToken, + }, nil + }) + if err != nil { + return nil, nil, err + } + + // Find the container network info. + if len(svc.Container.NetworkSettings.Networks) < 1 { + svc.Cleanup() + return nil, nil, fmt.Errorf("failed to find any network settings for container") + } + cfg := svc.Config.(*Config) + for _, eps := range svc.Container.NetworkSettings.Networks { + // Just pick the first network, we assume only one for now. + // Pull out the real container IP and set that up + cfg.ContainerHTTPAddr = fmt.Sprintf("http://%s:8500", eps.IPAddress) + break + } + return svc.Cleanup, cfg, nil +} diff --git a/helper/testhelpers/corehelpers/corehelpers.go b/helper/testhelpers/corehelpers/corehelpers.go new file mode 100644 index 0000000..1771489 --- /dev/null +++ b/helper/testhelpers/corehelpers/corehelpers.go @@ -0,0 +1,439 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package corehelpers contains testhelpers that don't depend on package vault, +// and thus can be used within vault (as well as elsewhere.) +package corehelpers + +import ( + "bytes" + "context" + "crypto/sha256" + "io" + "os" + "path/filepath" + "sync" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/audit" + "github.com/hashicorp/vault/builtin/credential/approle" + "github.com/hashicorp/vault/plugins/database/mysql" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/logical" + "github.com/mitchellh/go-testing-interface" +) + +var externalPlugins = []string{"transform", "kmip", "keymgmt"} + +// RetryUntil runs f until it returns a nil result or the timeout is reached. +// If a nil result hasn't been obtained by timeout, calls t.Fatal. +func RetryUntil(t testing.T, timeout time.Duration, f func() error) { + t.Helper() + deadline := time.Now().Add(timeout) + var err error + for time.Now().Before(deadline) { + if err = f(); err == nil { + return + } + time.Sleep(100 * time.Millisecond) + } + t.Fatalf("did not complete before deadline, err: %v", err) +} + +// MakeTestPluginDir creates a temporary directory suitable for holding plugins. +// This helper also resolves symlinks to make tests happy on OS X. +func MakeTestPluginDir(t testing.T) (string, func(t testing.T)) { + if t != nil { + t.Helper() + } + + dir, err := os.MkdirTemp("", "") + if err != nil { + if t == nil { + panic(err) + } + t.Fatal(err) + } + + // OSX tempdir are /var, but actually symlinked to /private/var + dir, err = filepath.EvalSymlinks(dir) + if err != nil { + if t == nil { + panic(err) + } + t.Fatal(err) + } + + return dir, func(t testing.T) { + if err := os.RemoveAll(dir); err != nil { + if t == nil { + panic(err) + } + t.Fatal(err) + } + } +} + +func NewMockBuiltinRegistry() *mockBuiltinRegistry { + return &mockBuiltinRegistry{ + forTesting: map[string]mockBackend{ + "mysql-database-plugin": {PluginType: consts.PluginTypeDatabase}, + "postgresql-database-plugin": {PluginType: consts.PluginTypeDatabase}, + "approle": {PluginType: consts.PluginTypeCredential}, + "pending-removal-test-plugin": { + PluginType: consts.PluginTypeCredential, + DeprecationStatus: consts.PendingRemoval, + }, + "aws": {PluginType: consts.PluginTypeCredential}, + "consul": {PluginType: consts.PluginTypeSecrets}, + }, + } +} + +type mockBackend struct { + consts.PluginType + consts.DeprecationStatus +} + +type mockBuiltinRegistry struct { + forTesting map[string]mockBackend +} + +func toFunc(f logical.Factory) func() (interface{}, error) { + return func() (interface{}, error) { + return f, nil + } +} + +func (m *mockBuiltinRegistry) Get(name string, pluginType consts.PluginType) (func() (interface{}, error), bool) { + testBackend, ok := m.forTesting[name] + if !ok { + return nil, false + } + testPluginType := testBackend.PluginType + if pluginType != testPluginType { + return nil, false + } + + switch name { + case "approle", "pending-removal-test-plugin": + return toFunc(approle.Factory), true + case "aws": + return toFunc(func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) { + b := new(framework.Backend) + b.Setup(ctx, config) + b.BackendType = logical.TypeCredential + return b, nil + }), true + case "postgresql-database-plugin": + return toFunc(func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) { + b := new(framework.Backend) + b.Setup(ctx, config) + b.BackendType = logical.TypeLogical + return b, nil + }), true + case "mysql-database-plugin": + return mysql.New(mysql.DefaultUserNameTemplate), true + case "consul": + return toFunc(func(ctx context.Context, config *logical.BackendConfig) (logical.Backend, error) { + b := new(framework.Backend) + b.Setup(ctx, config) + b.BackendType = logical.TypeLogical + return b, nil + }), true + default: + return nil, false + } +} + +// Keys only supports getting a realistic list of the keys for database plugins, +// and approle +func (m *mockBuiltinRegistry) Keys(pluginType consts.PluginType) []string { + switch pluginType { + case consts.PluginTypeDatabase: + // This is a hard-coded reproduction of the db plugin keys in + // helper/builtinplugins/registry.go. The registry isn't directly used + // because it causes import cycles. + return []string{ + "mysql-database-plugin", + "mysql-aurora-database-plugin", + "mysql-rds-database-plugin", + "mysql-legacy-database-plugin", + + "cassandra-database-plugin", + "couchbase-database-plugin", + "elasticsearch-database-plugin", + "hana-database-plugin", + "influxdb-database-plugin", + "mongodb-database-plugin", + "mongodbatlas-database-plugin", + "mssql-database-plugin", + "postgresql-database-plugin", + "redis-elasticache-database-plugin", + "redshift-database-plugin", + "redis-database-plugin", + "snowflake-database-plugin", + } + case consts.PluginTypeCredential: + return []string{ + "pending-removal-test-plugin", + "approle", + } + + case consts.PluginTypeSecrets: + return append(externalPlugins, "kv") + } + + return []string{} +} + +func (r *mockBuiltinRegistry) IsBuiltinEntPlugin(name string, pluginType consts.PluginType) bool { + for _, i := range externalPlugins { + if i == name { + return true + } + } + return false +} + +func (m *mockBuiltinRegistry) Contains(name string, pluginType consts.PluginType) bool { + for _, key := range m.Keys(pluginType) { + if key == name { + return true + } + } + return false +} + +func (m *mockBuiltinRegistry) DeprecationStatus(name string, pluginType consts.PluginType) (consts.DeprecationStatus, bool) { + if m.Contains(name, pluginType) { + return m.forTesting[name].DeprecationStatus, true + } + + return consts.Unknown, false +} + +func TestNoopAudit(t testing.T, config map[string]string) *NoopAudit { + n, err := NewNoopAudit(config) + if err != nil { + t.Fatal(err) + } + return n +} + +func NewNoopAudit(config map[string]string) (*NoopAudit, error) { + view := &logical.InmemStorage{} + err := view.Put(context.Background(), &logical.StorageEntry{ + Key: "salt", + Value: []byte("foo"), + }) + if err != nil { + return nil, err + } + + n := &NoopAudit{ + Config: &audit.BackendConfig{ + SaltView: view, + SaltConfig: &salt.Config{ + HMAC: sha256.New, + HMACType: "hmac-sha256", + }, + Config: config, + }, + } + n.formatter.AuditFormatWriter = &audit.JSONFormatWriter{ + SaltFunc: n.Salt, + } + return n, nil +} + +func NoopAuditFactory(records **[][]byte) audit.Factory { + return func(_ context.Context, config *audit.BackendConfig) (audit.Backend, error) { + n, err := NewNoopAudit(config.Config) + if err != nil { + return nil, err + } + if records != nil { + *records = &n.records + } + return n, nil + } +} + +type NoopAudit struct { + Config *audit.BackendConfig + ReqErr error + ReqAuth []*logical.Auth + Req []*logical.Request + ReqHeaders []map[string][]string + ReqNonHMACKeys []string + ReqErrs []error + + RespErr error + RespAuth []*logical.Auth + RespReq []*logical.Request + Resp []*logical.Response + RespNonHMACKeys [][]string + RespReqNonHMACKeys [][]string + RespErrs []error + + formatter audit.AuditFormatter + records [][]byte + l sync.RWMutex + salt *salt.Salt + saltMutex sync.RWMutex +} + +func (n *NoopAudit) LogRequest(ctx context.Context, in *logical.LogInput) error { + n.l.Lock() + defer n.l.Unlock() + if n.formatter.AuditFormatWriter != nil { + var w bytes.Buffer + err := n.formatter.FormatRequest(ctx, &w, audit.FormatterConfig{}, in) + if err != nil { + return err + } + n.records = append(n.records, w.Bytes()) + } + + n.ReqAuth = append(n.ReqAuth, in.Auth) + n.Req = append(n.Req, in.Request) + n.ReqHeaders = append(n.ReqHeaders, in.Request.Headers) + n.ReqNonHMACKeys = in.NonHMACReqDataKeys + n.ReqErrs = append(n.ReqErrs, in.OuterErr) + + return n.ReqErr +} + +func (n *NoopAudit) LogResponse(ctx context.Context, in *logical.LogInput) error { + n.l.Lock() + defer n.l.Unlock() + + if n.formatter.AuditFormatWriter != nil { + var w bytes.Buffer + err := n.formatter.FormatResponse(ctx, &w, audit.FormatterConfig{}, in) + if err != nil { + return err + } + n.records = append(n.records, w.Bytes()) + } + + n.RespAuth = append(n.RespAuth, in.Auth) + n.RespReq = append(n.RespReq, in.Request) + n.Resp = append(n.Resp, in.Response) + n.RespErrs = append(n.RespErrs, in.OuterErr) + + if in.Response != nil { + n.RespNonHMACKeys = append(n.RespNonHMACKeys, in.NonHMACRespDataKeys) + n.RespReqNonHMACKeys = append(n.RespReqNonHMACKeys, in.NonHMACReqDataKeys) + } + + return n.RespErr +} + +func (n *NoopAudit) LogTestMessage(ctx context.Context, in *logical.LogInput, config map[string]string) error { + n.l.Lock() + defer n.l.Unlock() + var w bytes.Buffer + tempFormatter := audit.NewTemporaryFormatter(config["format"], config["prefix"]) + err := tempFormatter.FormatResponse(ctx, &w, audit.FormatterConfig{}, in) + if err != nil { + return err + } + n.records = append(n.records, w.Bytes()) + return nil +} + +func (n *NoopAudit) Salt(ctx context.Context) (*salt.Salt, error) { + n.saltMutex.RLock() + if n.salt != nil { + defer n.saltMutex.RUnlock() + return n.salt, nil + } + n.saltMutex.RUnlock() + n.saltMutex.Lock() + defer n.saltMutex.Unlock() + if n.salt != nil { + return n.salt, nil + } + salt, err := salt.NewSalt(ctx, n.Config.SaltView, n.Config.SaltConfig) + if err != nil { + return nil, err + } + n.salt = salt + return salt, nil +} + +func (n *NoopAudit) GetHash(ctx context.Context, data string) (string, error) { + salt, err := n.Salt(ctx) + if err != nil { + return "", err + } + return salt.GetIdentifiedHMAC(data), nil +} + +func (n *NoopAudit) Reload(ctx context.Context) error { + return nil +} + +func (n *NoopAudit) Invalidate(ctx context.Context) { + n.saltMutex.Lock() + defer n.saltMutex.Unlock() + n.salt = nil +} + +type TestLogger struct { + hclog.InterceptLogger + Path string + File *os.File + sink hclog.SinkAdapter +} + +func NewTestLogger(t testing.T) *TestLogger { + var logFile *os.File + var logPath string + output := os.Stderr + + logDir := os.Getenv("VAULT_TEST_LOG_DIR") + if logDir != "" { + logPath = filepath.Join(logDir, t.Name()+".log") + // t.Name may include slashes. + dir, _ := filepath.Split(logPath) + err := os.MkdirAll(dir, 0o755) + if err != nil { + t.Fatal(err) + } + logFile, err = os.Create(logPath) + if err != nil { + t.Fatal(err) + } + output = logFile + } + + // We send nothing on the regular logger, that way we can later deregister + // the sink to stop logging during cluster cleanup. + logger := hclog.NewInterceptLogger(&hclog.LoggerOptions{ + Output: io.Discard, + IndependentLevels: true, + Name: t.Name(), + }) + sink := hclog.NewSinkAdapter(&hclog.LoggerOptions{ + Output: output, + Level: hclog.Trace, + IndependentLevels: true, + }) + logger.RegisterSink(sink) + return &TestLogger{ + Path: logPath, + File: logFile, + InterceptLogger: logger, + sink: sink, + } +} + +func (tl *TestLogger) StopLogging() { + tl.InterceptLogger.DeregisterSink(tl.sink) +} diff --git a/helper/testhelpers/etcd/etcdhelper.go b/helper/testhelpers/etcd/etcdhelper.go new file mode 100644 index 0000000..a5b2578 --- /dev/null +++ b/helper/testhelpers/etcd/etcdhelper.go @@ -0,0 +1,90 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package etcd + +import ( + "context" + "fmt" + "net/url" + "os" + "testing" + "time" + + "github.com/hashicorp/vault/sdk/helper/docker" + clientv3 "go.etcd.io/etcd/client/v3" +) + +type Config struct { + docker.ServiceURL +} + +// PrepareTestContainer creates etcd docker container. If environment variabe +// ETCD_ADDR is set, the tests are executed against specified address and etcd +// container is not launched. +func PrepareTestContainer(t *testing.T, version string) (func(), *Config) { + if addr := os.Getenv("ETCD_ADDR"); addr != "" { + url, err := docker.NewServiceURLParse(addr) + if err != nil { + t.Fatal(err) + } + return func() {}, &Config{ServiceURL: *url} + } + + // Check https://github.com/etcd-io/etcd/releases for latest releases. + runner, err := docker.NewServiceRunner(docker.RunOptions{ + ContainerName: "etcd", + ImageRepo: "gcr.io/etcd-development/etcd", + ImageTag: version, + Cmd: []string{ + "/usr/local/bin/etcd", + "--name", "s1", + "--listen-client-urls", "http://0.0.0.0:2379", + "--advertise-client-urls", "http://0.0.0.0:2379", + "--listen-peer-urls", "http://0.0.0.0:2380", + "--initial-advertise-peer-urls", "http://0.0.0.0:2380", + "--initial-cluster", "s1=http://0.0.0.0:2380", + "--initial-cluster-token", "tkn", + "--initial-cluster-state", "new", + "--log-level", "info", + "--logger", "zap", + "--log-outputs", "stderr", + }, + Ports: []string{"2379/tcp"}, + }) + if err != nil { + t.Fatalf("Could not start docker etcd container: %s", err) + } + + svc, err := runner.StartService(context.Background(), func(ctx context.Context, host string, port int) (docker.ServiceConfig, error) { + address := fmt.Sprintf("%s:%d", host, port) + s := docker.NewServiceURL(url.URL{ + Scheme: "http", + Host: address, + }) + + client, err := clientv3.New(clientv3.Config{ + Endpoints: []string{address}, + DialTimeout: 2 * time.Minute, + }) + if err != nil { + return nil, fmt.Errorf("could not connect to etcd container: %w", err) + } + + // Enable authentication for the tests. + client.RoleAdd(ctx, "root") + client.UserAdd(ctx, "root", "insecure") + client.UserGrantRole(ctx, "root", "root") + client.AuthEnable(ctx) + client.Close() + + return &Config{ + ServiceURL: *s, + }, nil + }) + if err != nil { + t.Fatalf("Could not start docker etcd container: %s", err) + } + + return svc.Cleanup, svc.Config.(*Config) +} diff --git a/helper/testhelpers/fakegcsserver/fake-gcs-server.go b/helper/testhelpers/fakegcsserver/fake-gcs-server.go new file mode 100644 index 0000000..503824e --- /dev/null +++ b/helper/testhelpers/fakegcsserver/fake-gcs-server.go @@ -0,0 +1,77 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package fakegcsserver + +import ( + "context" + "crypto/tls" + "fmt" + "net/http" + "net/url" + "testing" + + "cloud.google.com/go/storage" + "github.com/hashicorp/vault/sdk/helper/docker" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +// In principle we don't need docker for fake-gcs-server, we could run it in +// memory instead. However I had an error trying to use it: +// go: finding module for package google.golang.org/grpc/naming +// github.com/hashicorp/vault/vault imports +// google.golang.org/grpc/naming: module google.golang.org/grpc@latest found (v1.32.0), but does not contain package google.golang.org/grpc/naming +// so it seemed easiest to go this route. Vault already has too many deps anyway. + +func PrepareTestContainer(t *testing.T, version string) (func(), docker.ServiceConfig) { + if version == "" { + version = "latest" + } + runner, err := docker.NewServiceRunner(docker.RunOptions{ + ContainerName: "fake-gcs-server", + ImageRepo: "docker.mirror.hashicorp.services/fsouza/fake-gcs-server", + ImageTag: version, + Cmd: []string{"-scheme", "http", "-public-host", "storage.gcs.127.0.0.1.nip.io:4443"}, + Ports: []string{"4443/tcp"}, + }) + if err != nil { + t.Fatalf("Could not start docker fake-gcs-server: %s", err) + } + + svc, err := runner.StartService(context.Background(), connectGCS) + if err != nil { + t.Fatalf("Could not start docker fake-gcs-server: %s", err) + } + + return svc.Cleanup, svc.Config +} + +func connectGCS(ctx context.Context, host string, port int) (docker.ServiceConfig, error) { + u := url.URL{ + Scheme: "http", + Host: fmt.Sprintf("%s:%d", host, port), + Path: "storage/v1/b", + } + transCfg := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, // ignore expired SSL certificates + } + httpClient := &http.Client{Transport: transCfg} + client, err := storage.NewClient(context.TODO(), option.WithEndpoint(u.String()), option.WithHTTPClient(httpClient)) + if err != nil { + return nil, err + } + + it := client.Buckets(ctx, "test") + for { + _, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, err + } + } + + return docker.NewServiceURL(u), nil +} diff --git a/helper/testhelpers/ldap/ldaphelper.go b/helper/testhelpers/ldap/ldaphelper.go new file mode 100644 index 0000000..b0da614 --- /dev/null +++ b/helper/testhelpers/ldap/ldaphelper.go @@ -0,0 +1,67 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ldap + +import ( + "context" + "fmt" + "testing" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/hashicorp/vault/sdk/helper/ldaputil" +) + +func PrepareTestContainer(t *testing.T, version string) (cleanup func(), cfg *ldaputil.ConfigEntry) { + runner, err := docker.NewServiceRunner(docker.RunOptions{ + // Currently set to "michelvocks" until https://github.com/rroemhild/docker-test-openldap/pull/14 + // has been merged. + ImageRepo: "docker.mirror.hashicorp.services/michelvocks/docker-test-openldap", + ImageTag: version, + ContainerName: "ldap", + Ports: []string{"389/tcp"}, + // Env: []string{"LDAP_DEBUG_LEVEL=384"}, + }) + if err != nil { + t.Fatalf("could not start local LDAP docker container: %s", err) + } + + cfg = new(ldaputil.ConfigEntry) + cfg.UserDN = "ou=people,dc=planetexpress,dc=com" + cfg.UserAttr = "cn" + cfg.UserFilter = "({{.UserAttr}}={{.Username}})" + cfg.BindDN = "cn=admin,dc=planetexpress,dc=com" + cfg.BindPassword = "GoodNewsEveryone" + cfg.GroupDN = "ou=people,dc=planetexpress,dc=com" + cfg.GroupAttr = "cn" + cfg.RequestTimeout = 60 + cfg.MaximumPageSize = 1000 + + svc, err := runner.StartService(context.Background(), func(ctx context.Context, host string, port int) (docker.ServiceConfig, error) { + connURL := fmt.Sprintf("ldap://%s:%d", host, port) + cfg.Url = connURL + logger := hclog.New(nil) + client := ldaputil.Client{ + LDAP: ldaputil.NewLDAP(), + Logger: logger, + } + + conn, err := client.DialLDAP(cfg) + if err != nil { + return nil, err + } + defer conn.Close() + + if _, err := client.GetUserBindDN(cfg, conn, "Philip J. Fry"); err != nil { + return nil, err + } + + return docker.NewServiceURLParse(connURL) + }) + if err != nil { + t.Fatalf("could not start local LDAP docker container: %s", err) + } + + return svc.Cleanup, cfg +} diff --git a/helper/testhelpers/logical/testing.go b/helper/testhelpers/logical/testing.go new file mode 100644 index 0000000..f634be2 --- /dev/null +++ b/helper/testhelpers/logical/testing.go @@ -0,0 +1,533 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package testing + +import ( + "context" + "crypto/tls" + "fmt" + "os" + "reflect" + "sort" + "testing" + + "github.com/hashicorp/errwrap" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/physical/inmem" + "github.com/hashicorp/vault/vault" +) + +// TestEnvVar must be set to a non-empty value for acceptance tests to run. +const TestEnvVar = "VAULT_ACC" + +// TestCase is a single set of tests to run for a backend. A TestCase +// should generally map 1:1 to each test method for your acceptance +// tests. +type TestCase struct { + // Precheck, if non-nil, will be called once before the test case + // runs at all. This can be used for some validation prior to the + // test running. + PreCheck func() + + // LogicalBackend is the backend that will be mounted. + LogicalBackend logical.Backend + + // LogicalFactory can be used instead of LogicalBackend if the + // backend requires more construction + LogicalFactory logical.Factory + + // CredentialBackend is the backend that will be mounted. + CredentialBackend logical.Backend + + // CredentialFactory can be used instead of CredentialBackend if the + // backend requires more construction + CredentialFactory logical.Factory + + // Steps are the set of operations that are run for this test case. + Steps []TestStep + + // Teardown will be called before the test case is over regardless + // of if the test succeeded or failed. This should return an error + // in the case that the test can't guarantee all resources were + // properly cleaned up. + Teardown TestTeardownFunc + + // AcceptanceTest, if set, the test case will be run only if + // the environment variable VAULT_ACC is set. If not this test case + // will be run as a unit test. + AcceptanceTest bool +} + +// TestStep is a single step within a TestCase. +type TestStep struct { + // Operation is the operation to execute + Operation logical.Operation + + // Path is the request path. The mount prefix will be automatically added. + Path string + + // Arguments to pass in + Data map[string]interface{} + + // Check is called after this step is executed in order to test that + // the step executed successfully. If this is not set, then the next + // step will be called + Check TestCheckFunc + + // PreFlight is called directly before execution of the request, allowing + // modification of the request parameters (e.g. Path) with dynamic values. + PreFlight PreFlightFunc + + // ErrorOk, if true, will let erroneous responses through to the check + ErrorOk bool + + // Unauthenticated, if true, will make the request unauthenticated. + Unauthenticated bool + + // RemoteAddr, if set, will set the remote addr on the request. + RemoteAddr string + + // ConnState, if set, will set the tls connection state + ConnState *tls.ConnectionState +} + +// TestCheckFunc is the callback used for Check in TestStep. +type TestCheckFunc func(*logical.Response) error + +// PreFlightFunc is used to modify request parameters directly before execution +// in each TestStep. +type PreFlightFunc func(*logical.Request) error + +// TestTeardownFunc is the callback used for Teardown in TestCase. +type TestTeardownFunc func() error + +// Test performs an acceptance test on a backend with the given test case. +// +// Tests are not run unless an environmental variable "VAULT_ACC" is +// set to some non-empty value. This is to avoid test cases surprising +// a user by creating real resources. +// +// Tests will fail unless the verbose flag (`go test -v`, or explicitly +// the "-test.v" flag) is set. Because some acceptance tests take quite +// long, we require the verbose flag so users are able to see progress +// output. +func Test(tt TestT, c TestCase) { + // We only run acceptance tests if an env var is set because they're + // slow and generally require some outside configuration. + if c.AcceptanceTest && os.Getenv(TestEnvVar) == "" { + tt.Skip(fmt.Sprintf( + "Acceptance tests skipped unless env %q set", + TestEnvVar)) + return + } + + // We require verbose mode so that the user knows what is going on. + if c.AcceptanceTest && !testTesting && !testing.Verbose() { + tt.Fatal("Acceptance tests must be run with the -v flag on tests") + return + } + + // Run the PreCheck if we have it + if c.PreCheck != nil { + c.PreCheck() + } + + // Defer on the teardown, regardless of pass/fail at this point + if c.Teardown != nil { + defer c.Teardown() + } + + // Check that something is provided + if c.LogicalBackend == nil && c.LogicalFactory == nil { + if c.CredentialBackend == nil && c.CredentialFactory == nil { + tt.Fatal("Must provide either Backend or Factory") + return + } + } + // We currently only support doing one logical OR one credential test at a time. + if (c.LogicalFactory != nil || c.LogicalBackend != nil) && (c.CredentialFactory != nil || c.CredentialBackend != nil) { + tt.Fatal("Must provide only one backend or factory") + return + } + + // Create an in-memory Vault core + logger := logging.NewVaultLogger(log.Trace) + + phys, err := inmem.NewInmem(nil, logger) + if err != nil { + tt.Fatal(err) + return + } + + config := &vault.CoreConfig{ + Physical: phys, + DisableMlock: true, + BuiltinRegistry: corehelpers.NewMockBuiltinRegistry(), + } + + if c.LogicalBackend != nil || c.LogicalFactory != nil { + config.LogicalBackends = map[string]logical.Factory{ + "test": func(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + if c.LogicalBackend != nil { + return c.LogicalBackend, nil + } + return c.LogicalFactory(ctx, conf) + }, + } + } + if c.CredentialBackend != nil || c.CredentialFactory != nil { + config.CredentialBackends = map[string]logical.Factory{ + "test": func(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + if c.CredentialBackend != nil { + return c.CredentialBackend, nil + } + return c.CredentialFactory(ctx, conf) + }, + } + } + + core, err := vault.NewCore(config) + if err != nil { + tt.Fatal("error initializing core: ", err) + return + } + + // Initialize the core + init, err := core.Initialize(context.Background(), &vault.InitParams{ + BarrierConfig: &vault.SealConfig{ + SecretShares: 1, + SecretThreshold: 1, + }, + RecoveryConfig: nil, + }) + if err != nil { + tt.Fatal("error initializing core: ", err) + return + } + + // Unseal the core + if unsealed, err := core.Unseal(init.SecretShares[0]); err != nil { + tt.Fatal("error unsealing core: ", err) + return + } else if !unsealed { + tt.Fatal("vault shouldn't be sealed") + return + } + + // Create an HTTP API server and client + ln, addr := http.TestServer(nil, core) + defer ln.Close() + clientConfig := api.DefaultConfig() + clientConfig.Address = addr + client, err := api.NewClient(clientConfig) + if err != nil { + tt.Fatal("error initializing HTTP client: ", err) + return + } + + // Set the token so we're authenticated + client.SetToken(init.RootToken) + + prefix := "mnt" + if c.LogicalBackend != nil || c.LogicalFactory != nil { + // Mount the backend + mountInfo := &api.MountInput{ + Type: "test", + Description: "acceptance test", + } + if err := client.Sys().Mount(prefix, mountInfo); err != nil { + tt.Fatal("error mounting backend: ", err) + return + } + } + + isAuthBackend := false + if c.CredentialBackend != nil || c.CredentialFactory != nil { + isAuthBackend = true + + // Enable the test auth method + opts := &api.EnableAuthOptions{ + Type: "test", + } + if err := client.Sys().EnableAuthWithOptions(prefix, opts); err != nil { + tt.Fatal("error enabling backend: ", err) + return + } + } + + tokenInfo, err := client.Auth().Token().LookupSelf() + if err != nil { + tt.Fatal("error looking up token: ", err) + return + } + var tokenPolicies []string + if tokenPoliciesRaw, ok := tokenInfo.Data["policies"]; ok { + if tokenPoliciesSliceRaw, ok := tokenPoliciesRaw.([]interface{}); ok { + for _, p := range tokenPoliciesSliceRaw { + tokenPolicies = append(tokenPolicies, p.(string)) + } + } + } + + // Make requests + var revoke []*logical.Request + for i, s := range c.Steps { + if logger.IsWarn() { + logger.Warn("Executing test step", "step_number", i+1) + } + + // Create the request + req := &logical.Request{ + Operation: s.Operation, + Path: s.Path, + Data: s.Data, + } + if !s.Unauthenticated { + req.ClientToken = client.Token() + req.SetTokenEntry(&logical.TokenEntry{ + ID: req.ClientToken, + NamespaceID: namespace.RootNamespaceID, + Policies: tokenPolicies, + DisplayName: tokenInfo.Data["display_name"].(string), + }) + } + req.Connection = &logical.Connection{RemoteAddr: s.RemoteAddr} + if s.ConnState != nil { + req.Connection.ConnState = s.ConnState + } + + if s.PreFlight != nil { + ct := req.ClientToken + req.ClientToken = "" + if err := s.PreFlight(req); err != nil { + tt.Error(fmt.Sprintf("Failed preflight for step %d: %s", i+1, err)) + break + } + req.ClientToken = ct + } + + // Make sure to prefix the path with where we mounted the thing + req.Path = fmt.Sprintf("%s/%s", prefix, req.Path) + + if isAuthBackend { + // Prepend the path with "auth" + req.Path = "auth/" + req.Path + } + + // Make the request + resp, err := core.HandleRequest(namespace.RootContext(nil), req) + if resp != nil && resp.Secret != nil { + // Revoke this secret later + revoke = append(revoke, &logical.Request{ + Operation: logical.UpdateOperation, + Path: "sys/revoke/" + resp.Secret.LeaseID, + }) + } + + // Test step returned an error. + if err != nil { + // But if an error is expected, do not fail the test step, + // regardless of whether the error is a 'logical.ErrorResponse' + // or not. Set the err to nil. If the error is a logical.ErrorResponse, + // it will be handled later. + if s.ErrorOk { + err = nil + } else { + // If the error is not expected, fail right away. + tt.Error(fmt.Sprintf("Failed step %d: %s", i+1, err)) + break + } + } + + // If the error is a 'logical.ErrorResponse' and if error was not expected, + // set the error so that this can be caught below. + if resp.IsError() && !s.ErrorOk { + err = fmt.Errorf("erroneous response:\n\n%#v", resp) + } + + // Either the 'err' was nil or if an error was expected, it was set to nil. + // Call the 'Check' function if there is one. + // + // TODO: This works perfectly for now, but it would be better if 'Check' + // function takes in both the response object and the error, and decide on + // the action on its own. + if err == nil && s.Check != nil { + // Call the test method + err = s.Check(resp) + } + + if err != nil { + tt.Error(fmt.Sprintf("Failed step %d: %s", i+1, err)) + break + } + } + + // Revoke any secrets we might have. + var failedRevokes []*logical.Secret + for _, req := range revoke { + if logger.IsWarn() { + logger.Warn("Revoking secret", "secret", fmt.Sprintf("%#v", req)) + } + req.ClientToken = client.Token() + resp, err := core.HandleRequest(namespace.RootContext(nil), req) + if err == nil && resp.IsError() { + err = fmt.Errorf("erroneous response:\n\n%#v", resp) + } + if err != nil { + failedRevokes = append(failedRevokes, req.Secret) + tt.Error(fmt.Sprintf("Revoke error: %s", err)) + } + } + + // Perform any rollbacks. This should no-op if there aren't any. + // We set the "immediate" flag here that any backend can pick up on + // to do all rollbacks immediately even if the WAL entries are new. + logger.Warn("Requesting RollbackOperation") + rollbackPath := prefix + "/" + if c.CredentialFactory != nil || c.CredentialBackend != nil { + rollbackPath = "auth/" + rollbackPath + } + req := logical.RollbackRequest(rollbackPath) + req.Data["immediate"] = true + req.ClientToken = client.Token() + resp, err := core.HandleRequest(namespace.RootContext(nil), req) + if err == nil && resp.IsError() { + err = fmt.Errorf("erroneous response:\n\n%#v", resp) + } + if err != nil { + if !errwrap.Contains(err, logical.ErrUnsupportedOperation.Error()) { + tt.Error(fmt.Sprintf("[ERR] Rollback error: %s", err)) + } + } + + // If we have any failed revokes, log it. + if len(failedRevokes) > 0 { + for _, s := range failedRevokes { + tt.Error(fmt.Sprintf( + "WARNING: Revoking the following secret failed. It may\n"+ + "still exist. Please verify:\n\n%#v", + s)) + } + } +} + +// TestCheckMulti is a helper to have multiple checks. +func TestCheckMulti(fs ...TestCheckFunc) TestCheckFunc { + return func(resp *logical.Response) error { + for _, f := range fs { + if err := f(resp); err != nil { + return err + } + } + + return nil + } +} + +// TestCheckAuth is a helper to check that a request generated an +// auth token with the proper policies. +func TestCheckAuth(policies []string) TestCheckFunc { + return func(resp *logical.Response) error { + if resp == nil || resp.Auth == nil { + return fmt.Errorf("no auth in response") + } + expected := make([]string, len(policies)) + copy(expected, policies) + sort.Strings(expected) + ret := make([]string, len(resp.Auth.Policies)) + copy(ret, resp.Auth.Policies) + sort.Strings(ret) + if !reflect.DeepEqual(ret, expected) { + return fmt.Errorf("invalid policies: expected %#v, got %#v", expected, ret) + } + + return nil + } +} + +// TestCheckAuthEntityId is a helper to check that a request generated an +// auth token with the expected entity_id. +func TestCheckAuthEntityId(entity_id *string) TestCheckFunc { + return func(resp *logical.Response) error { + if resp == nil || resp.Auth == nil { + return fmt.Errorf("no auth in response") + } + + if *entity_id == "" { + // If we don't know what the entity_id should be, just save it + *entity_id = resp.Auth.EntityID + } else if resp.Auth.EntityID != *entity_id { + return fmt.Errorf("entity_id %s does not match the expected value of %s", resp.Auth.EntityID, *entity_id) + } + + return nil + } +} + +// TestCheckAuthEntityAliasMetadataName is a helper to check that a request generated an +// auth token with the expected alias metadata. +func TestCheckAuthEntityAliasMetadataName(key string, value string) TestCheckFunc { + return func(resp *logical.Response) error { + if resp == nil || resp.Auth == nil { + return fmt.Errorf("no auth in response") + } + + if key == "" || value == "" { + return fmt.Errorf("alias metadata key and value required") + } + + name, ok := resp.Auth.Alias.Metadata[key] + if !ok { + return fmt.Errorf("metadata key %s does not exist, it should", key) + } + + if name != value { + return fmt.Errorf("expected map value %s, got %s", value, name) + } + return nil + } +} + +// TestCheckAuthDisplayName is a helper to check that a request generated a +// valid display name. +func TestCheckAuthDisplayName(n string) TestCheckFunc { + return func(resp *logical.Response) error { + if resp.Auth == nil { + return fmt.Errorf("no auth in response") + } + if n != "" && resp.Auth.DisplayName != "mnt-"+n { + return fmt.Errorf("invalid display name: %#v", resp.Auth.DisplayName) + } + + return nil + } +} + +// TestCheckError is a helper to check that a response is an error. +func TestCheckError() TestCheckFunc { + return func(resp *logical.Response) error { + if !resp.IsError() { + return fmt.Errorf("response should be error") + } + + return nil + } +} + +// TestT is the interface used to handle the test lifecycle of a test. +// +// Users should just use a *testing.T object, which implements this. +type TestT interface { + Error(args ...interface{}) + Fatal(args ...interface{}) + Skip(args ...interface{}) +} + +var testTesting = false diff --git a/helper/testhelpers/logical/testing_test.go b/helper/testhelpers/logical/testing_test.go new file mode 100644 index 0000000..9f2d74b --- /dev/null +++ b/helper/testhelpers/logical/testing_test.go @@ -0,0 +1,93 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package testing + +import ( + "os" + "testing" +) + +func init() { + testTesting = true + + if err := os.Setenv(TestEnvVar, "1"); err != nil { + panic(err) + } +} + +func TestTest_noEnv(t *testing.T) { + // Unset the variable + if err := os.Setenv(TestEnvVar, ""); err != nil { + t.Fatalf("err: %s", err) + } + defer os.Setenv(TestEnvVar, "1") + + mt := new(mockT) + Test(mt, TestCase{ + AcceptanceTest: true, + }) + + if !mt.SkipCalled { + t.Fatal("skip not called") + } +} + +func TestTest_preCheck(t *testing.T) { + called := false + + mt := new(mockT) + Test(mt, TestCase{ + PreCheck: func() { called = true }, + }) + + if !called { + t.Fatal("precheck should be called") + } +} + +// mockT implements TestT for testing +type mockT struct { + ErrorCalled bool + ErrorArgs []interface{} + FatalCalled bool + FatalArgs []interface{} + SkipCalled bool + SkipArgs []interface{} + + f bool +} + +func (t *mockT) Error(args ...interface{}) { + t.ErrorCalled = true + t.ErrorArgs = args + t.f = true +} + +func (t *mockT) Fatal(args ...interface{}) { + t.FatalCalled = true + t.FatalArgs = args + t.f = true +} + +func (t *mockT) Skip(args ...interface{}) { + t.SkipCalled = true + t.SkipArgs = args + t.f = true +} + +func (t *mockT) failed() bool { + return t.f +} + +func (t *mockT) failMessage() string { + if t.FatalCalled { + return t.FatalArgs[0].(string) + } else if t.ErrorCalled { + return t.ErrorArgs[0].(string) + } else if t.SkipCalled { + return t.SkipArgs[0].(string) + } + + return "unknown" +} diff --git a/helper/testhelpers/minio/miniohelper.go b/helper/testhelpers/minio/miniohelper.go new file mode 100644 index 0000000..5550a12 --- /dev/null +++ b/helper/testhelpers/minio/miniohelper.go @@ -0,0 +1,114 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package minio + +import ( + "context" + "fmt" + "net/url" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/hashicorp/vault/sdk/helper/docker" +) + +type Config struct { + Endpoint string + AccessKeyID string + SecretAccessKey string + Region string +} + +const ( + accessKeyID = "min-access-key" + secretKey = "min-secret-key" +) + +func PrepareTestContainer(t *testing.T, version string) (func(), *Config) { + if version == "" { + version = "latest" + } + runner, err := docker.NewServiceRunner(docker.RunOptions{ + ContainerName: "minio", + ImageRepo: "docker.mirror.hashicorp.services/minio/minio", + ImageTag: version, + Env: []string{ + "MINIO_ACCESS_KEY=" + accessKeyID, + "MINIO_SECRET_KEY=" + secretKey, + }, + Cmd: []string{"server", "/data"}, + Ports: []string{"9000/tcp"}, + }) + if err != nil { + t.Fatalf("Could not start docker Minio: %s", err) + } + + svc, err := runner.StartService(context.Background(), connectMinio) + if err != nil { + t.Fatalf("Could not start docker Minio: %s", err) + } + + return svc.Cleanup, &Config{ + Endpoint: svc.Config.URL().Host, + AccessKeyID: accessKeyID, + SecretAccessKey: secretKey, + Region: "us-east-1", + } +} + +func connectMinio(ctx context.Context, host string, port int) (docker.ServiceConfig, error) { + u := url.URL{ + Scheme: "s3", + Host: fmt.Sprintf("%s:%d", host, port), + } + + c := &Config{ + Endpoint: u.Host, + AccessKeyID: accessKeyID, + SecretAccessKey: secretKey, + Region: "us-east-1", + } + s3conn, err := c.Conn() + if err != nil { + return nil, err + } + + _, err = s3conn.ListBuckets(&s3.ListBucketsInput{}) + if err != nil { + return nil, err + } + + return docker.NewServiceURL(u), nil +} + +func (c *Config) Conn() (*s3.S3, error) { + cfg := &aws.Config{ + DisableSSL: aws.Bool(true), + Region: aws.String("us-east-1"), + Endpoint: aws.String(c.Endpoint), + S3ForcePathStyle: aws.Bool(true), + Credentials: credentials.NewChainCredentials( + []credentials.Provider{ + &credentials.StaticProvider{ + Value: credentials.Value{ + AccessKeyID: accessKeyID, + SecretAccessKey: secretKey, + }, + }, + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{}, + defaults.RemoteCredProvider(*(defaults.Config()), defaults.Handlers()), + }), + } + + sess, err := session.NewSession(cfg) + if err != nil { + return nil, err + } + return s3.New(sess), nil +} diff --git a/helper/testhelpers/mongodb/mongodbhelper.go b/helper/testhelpers/mongodb/mongodbhelper.go new file mode 100644 index 0000000..1f7afe3 --- /dev/null +++ b/helper/testhelpers/mongodb/mongodbhelper.go @@ -0,0 +1,66 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package mongodb + +import ( + "context" + "fmt" + "os" + "testing" + "time" + + "github.com/hashicorp/vault/sdk/helper/docker" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" + "go.mongodb.org/mongo-driver/mongo/readpref" +) + +// PrepareTestContainer calls PrepareTestContainerWithDatabase without a +// database name value, which results in configuring a database named "test" +func PrepareTestContainer(t *testing.T, version string) (cleanup func(), retURL string) { + return PrepareTestContainerWithDatabase(t, version, "") +} + +// PrepareTestContainerWithDatabase configures a test container with a given +// database name, to test non-test/admin database configurations +func PrepareTestContainerWithDatabase(t *testing.T, version, dbName string) (func(), string) { + if os.Getenv("MONGODB_URL") != "" { + return func() {}, os.Getenv("MONGODB_URL") + } + + runner, err := docker.NewServiceRunner(docker.RunOptions{ + ContainerName: "mongo", + ImageRepo: "docker.mirror.hashicorp.services/library/mongo", + ImageTag: version, + Ports: []string{"27017/tcp"}, + }) + if err != nil { + t.Fatalf("could not start docker mongo: %s", err) + } + + svc, err := runner.StartService(context.Background(), func(ctx context.Context, host string, port int) (docker.ServiceConfig, error) { + connURL := fmt.Sprintf("mongodb://%s:%d", host, port) + if dbName != "" { + connURL = fmt.Sprintf("%s/%s", connURL, dbName) + } + + ctx, _ = context.WithTimeout(context.Background(), 1*time.Minute) + client, err := mongo.Connect(ctx, options.Client().ApplyURI(connURL)) + if err != nil { + return nil, err + } + + err = client.Ping(ctx, readpref.Primary()) + if err = client.Disconnect(ctx); err != nil { + t.Fatal() + } + + return docker.NewServiceURLParse(connURL) + }) + if err != nil { + t.Fatalf("could not start docker mongo: %s", err) + } + + return svc.Cleanup, svc.Config.URL().String() +} diff --git a/helper/testhelpers/mssql/mssqlhelper.go b/helper/testhelpers/mssql/mssqlhelper.go new file mode 100644 index 0000000..e4a19e1 --- /dev/null +++ b/helper/testhelpers/mssql/mssqlhelper.go @@ -0,0 +1,78 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package mssqlhelper + +import ( + "context" + "database/sql" + "fmt" + "net/url" + "os" + "testing" + + "github.com/hashicorp/vault/sdk/helper/docker" +) + +const mssqlPassword = "yourStrong(!)Password" + +// This constant is used in retrying the mssql container restart, since +// intermittently the container starts but mssql within the container +// is unreachable. +const numRetries = 3 + +func PrepareMSSQLTestContainer(t *testing.T) (cleanup func(), retURL string) { + if os.Getenv("MSSQL_URL") != "" { + return func() {}, os.Getenv("MSSQL_URL") + } + + var err error + for i := 0; i < numRetries; i++ { + var svc *docker.Service + runner, err := docker.NewServiceRunner(docker.RunOptions{ + ContainerName: "sqlserver", + ImageRepo: "mcr.microsoft.com/mssql/server", + ImageTag: "2017-latest-ubuntu", + Env: []string{"ACCEPT_EULA=Y", "SA_PASSWORD=" + mssqlPassword}, + Ports: []string{"1433/tcp"}, + LogConsumer: func(s string) { + if t.Failed() { + t.Logf("container logs: %s", s) + } + }, + }) + if err != nil { + t.Fatalf("Could not start docker MSSQL: %s", err) + } + + svc, err = runner.StartService(context.Background(), connectMSSQL) + if err == nil { + return svc.Cleanup, svc.Config.URL().String() + } + } + + t.Fatalf("Could not start docker MSSQL: %s", err) + return nil, "" +} + +func connectMSSQL(ctx context.Context, host string, port int) (docker.ServiceConfig, error) { + u := url.URL{ + Scheme: "sqlserver", + User: url.UserPassword("sa", mssqlPassword), + Host: fmt.Sprintf("%s:%d", host, port), + } + // Attempt to address connection flakiness within tests such as "Failed to initialize: error verifying connection ..." + u.Query().Add("Connection Timeout", "30") + + db, err := sql.Open("mssql", u.String()) + if err != nil { + return nil, err + } + defer db.Close() + + err = db.Ping() + if err != nil { + return nil, err + } + return docker.NewServiceURL(u), nil +} diff --git a/helper/testhelpers/mysql/mysqlhelper.go b/helper/testhelpers/mysql/mysqlhelper.go new file mode 100644 index 0000000..31490dc --- /dev/null +++ b/helper/testhelpers/mysql/mysqlhelper.go @@ -0,0 +1,74 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package mysqlhelper + +import ( + "context" + "database/sql" + "fmt" + "os" + "strings" + "testing" + + "github.com/hashicorp/vault/sdk/helper/docker" +) + +type Config struct { + docker.ServiceHostPort + ConnString string +} + +var _ docker.ServiceConfig = &Config{} + +func PrepareTestContainer(t *testing.T, legacy bool, pw string) (func(), string) { + if os.Getenv("MYSQL_URL") != "" { + return func() {}, os.Getenv("MYSQL_URL") + } + + imageVersion := "5.7" + if legacy { + imageVersion = "5.6" + } + + runner, err := docker.NewServiceRunner(docker.RunOptions{ + ContainerName: "mysql", + ImageRepo: "docker.mirror.hashicorp.services/library/mysql", + ImageTag: imageVersion, + Ports: []string{"3306/tcp"}, + Env: []string{"MYSQL_ROOT_PASSWORD=" + pw}, + }) + if err != nil { + t.Fatalf("could not start docker mysql: %s", err) + } + + svc, err := runner.StartService(context.Background(), func(ctx context.Context, host string, port int) (docker.ServiceConfig, error) { + hostIP := docker.NewServiceHostPort(host, port) + connString := fmt.Sprintf("root:%s@tcp(%s)/mysql?parseTime=true", pw, hostIP.Address()) + db, err := sql.Open("mysql", connString) + if err != nil { + return nil, err + } + defer db.Close() + err = db.Ping() + if err != nil { + return nil, err + } + return &Config{ServiceHostPort: *hostIP, ConnString: connString}, nil + }) + if err != nil { + t.Fatalf("could not start docker mysql: %s", err) + } + return svc.Cleanup, svc.Config.(*Config).ConnString +} + +func TestCredsExist(t testing.TB, connURL, username, password string) error { + // Log in with the new creds + connURL = strings.Replace(connURL, "root:secret", fmt.Sprintf("%s:%s", username, password), 1) + db, err := sql.Open("mysql", connURL) + if err != nil { + return err + } + defer db.Close() + return db.Ping() +} diff --git a/helper/testhelpers/pluginhelpers/pluginhelpers.go b/helper/testhelpers/pluginhelpers/pluginhelpers.go new file mode 100644 index 0000000..e9a0067 --- /dev/null +++ b/helper/testhelpers/pluginhelpers/pluginhelpers.go @@ -0,0 +1,146 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package pluginhelpers contains testhelpers that don't depend on package +// vault, and thus can be used within vault (as well as elsewhere.) +package pluginhelpers + +import ( + "crypto/sha256" + "fmt" + "os" + "os/exec" + "path" + "path/filepath" + "sync" + + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/mitchellh/go-testing-interface" +) + +var ( + testPluginCacheLock sync.Mutex + testPluginCache = map[string][]byte{} +) + +type TestPlugin struct { + Name string + Typ consts.PluginType + Version string + FileName string + Sha256 string +} + +func GetPlugin(t testing.T, typ consts.PluginType) (string, string, string, string) { + t.Helper() + var pluginName string + var pluginType string + var pluginMain string + var pluginVersionLocation string + + switch typ { + case consts.PluginTypeCredential: + pluginType = "approle" + pluginName = "vault-plugin-auth-" + pluginType + pluginMain = filepath.Join("builtin", "credential", pluginType, "cmd", pluginType, "main.go") + pluginVersionLocation = fmt.Sprintf("github.com/hashicorp/vault/builtin/credential/%s.ReportedVersion", pluginType) + case consts.PluginTypeSecrets: + pluginType = "consul" + pluginName = "vault-plugin-secrets-" + pluginType + pluginMain = filepath.Join("builtin", "logical", pluginType, "cmd", pluginType, "main.go") + pluginVersionLocation = fmt.Sprintf("github.com/hashicorp/vault/builtin/logical/%s.ReportedVersion", pluginType) + case consts.PluginTypeDatabase: + pluginType = "postgresql" + pluginName = "vault-plugin-database-" + pluginType + pluginMain = filepath.Join("plugins", "database", pluginType, fmt.Sprintf("%s-database-plugin", pluginType), "main.go") + pluginVersionLocation = fmt.Sprintf("github.com/hashicorp/vault/plugins/database/%s.ReportedVersion", pluginType) + default: + t.Fatal(typ.String()) + } + return pluginName, pluginType, pluginMain, pluginVersionLocation +} + +// to mount a plugin, we need a working binary plugin, so we compile one here. +// pluginVersion is used to override the plugin's self-reported version +func CompilePlugin(t testing.T, typ consts.PluginType, pluginVersion string, pluginDir string) TestPlugin { + t.Helper() + + pluginName, pluginType, pluginMain, pluginVersionLocation := GetPlugin(t, typ) + + testPluginCacheLock.Lock() + defer testPluginCacheLock.Unlock() + + var pluginBytes []byte + + dir := "" + var err error + pluginRootDir := "builtin" + if typ == consts.PluginTypeDatabase { + pluginRootDir = "plugins" + } + for { + dir, err = os.Getwd() + if err != nil { + t.Fatal(err) + } + // detect if we are in a subdirectory or the root directory and compensate + if _, err := os.Stat(pluginRootDir); os.IsNotExist(err) { + err := os.Chdir("..") + if err != nil { + t.Fatal(err) + } + } else { + break + } + } + + pluginPath := path.Join(pluginDir, pluginName) + if pluginVersion != "" { + pluginPath += "-" + pluginVersion + } + + key := fmt.Sprintf("%s %s %s", pluginName, pluginType, pluginVersion) + // cache the compilation to only run once + var ok bool + pluginBytes, ok = testPluginCache[key] + if !ok { + // we need to compile + line := []string{"build"} + if pluginVersion != "" { + line = append(line, "-ldflags", fmt.Sprintf("-X %s=%s", pluginVersionLocation, pluginVersion)) + } + line = append(line, "-o", pluginPath, pluginMain) + cmd := exec.Command("go", line...) + cmd.Dir = dir + output, err := cmd.CombinedOutput() + if err != nil { + t.Fatal(fmt.Errorf("error running go build %v output: %s", err, output)) + } + testPluginCache[key], err = os.ReadFile(pluginPath) + if err != nil { + t.Fatal(err) + } + pluginBytes = testPluginCache[key] + } + + // write the cached plugin if necessary + if _, err := os.Stat(pluginPath); os.IsNotExist(err) { + err = os.WriteFile(pluginPath, pluginBytes, 0o755) + } + if err != nil { + t.Fatal(err) + } + + sha := sha256.New() + _, err = sha.Write(pluginBytes) + if err != nil { + t.Fatal(err) + } + return TestPlugin{ + Name: pluginName, + Typ: typ, + Version: pluginVersion, + FileName: path.Base(pluginPath), + Sha256: fmt.Sprintf("%x", sha.Sum(nil)), + } +} diff --git a/helper/testhelpers/postgresql/postgresqlhelper.go b/helper/testhelpers/postgresql/postgresqlhelper.go new file mode 100644 index 0000000..4f90177 --- /dev/null +++ b/helper/testhelpers/postgresql/postgresqlhelper.go @@ -0,0 +1,138 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package postgresql + +import ( + "context" + "database/sql" + "fmt" + "net/url" + "os" + "testing" + + "github.com/hashicorp/vault/sdk/helper/docker" +) + +func PrepareTestContainer(t *testing.T, version string) (func(), string) { + env := []string{ + "POSTGRES_PASSWORD=secret", + "POSTGRES_DB=database", + } + + _, cleanup, url, _ := prepareTestContainer(t, "postgres", "docker.mirror.hashicorp.services/postgres", version, "secret", true, false, false, env) + + return cleanup, url +} + +// PrepareTestContainerWithVaultUser will setup a test container with a Vault +// admin user configured so that we can safely call rotate-root without +// rotating the root DB credentials +func PrepareTestContainerWithVaultUser(t *testing.T, ctx context.Context, version string) (func(), string) { + env := []string{ + "POSTGRES_PASSWORD=secret", + "POSTGRES_DB=database", + } + + runner, cleanup, url, id := prepareTestContainer(t, "postgres", "docker.mirror.hashicorp.services/postgres", version, "secret", true, false, false, env) + + cmd := []string{"psql", "-U", "postgres", "-c", "CREATE USER vaultadmin WITH LOGIN PASSWORD 'vaultpass' SUPERUSER"} + _, err := runner.RunCmdInBackground(ctx, id, cmd) + if err != nil { + t.Fatalf("Could not run command (%v) in container: %v", cmd, err) + } + + return cleanup, url +} + +func PrepareTestContainerWithPassword(t *testing.T, version, password string) (func(), string) { + env := []string{ + "POSTGRES_PASSWORD=" + password, + "POSTGRES_DB=database", + } + + _, cleanup, url, _ := prepareTestContainer(t, "postgres", "docker.mirror.hashicorp.services/postgres", version, password, true, false, false, env) + + return cleanup, url +} + +func PrepareTestContainerRepmgr(t *testing.T, name, version string, envVars []string) (*docker.Runner, func(), string, string) { + env := append(envVars, + "REPMGR_PARTNER_NODES=psql-repl-node-0,psql-repl-node-1", + "REPMGR_PRIMARY_HOST=psql-repl-node-0", + "REPMGR_PASSWORD=repmgrpass", + "POSTGRESQL_PASSWORD=secret") + + return prepareTestContainer(t, name, "docker.mirror.hashicorp.services/bitnami/postgresql-repmgr", version, "secret", false, true, true, env) +} + +func prepareTestContainer(t *testing.T, name, repo, version, password string, + addSuffix, forceLocalAddr, doNotAutoRemove bool, envVars []string, +) (*docker.Runner, func(), string, string) { + if os.Getenv("PG_URL") != "" { + return nil, func() {}, "", os.Getenv("PG_URL") + } + + if version == "" { + version = "11" + } + + runOpts := docker.RunOptions{ + ContainerName: name, + ImageRepo: repo, + ImageTag: version, + Env: envVars, + Ports: []string{"5432/tcp"}, + DoNotAutoRemove: doNotAutoRemove, + } + if repo == "bitnami/postgresql-repmgr" { + runOpts.NetworkID = os.Getenv("POSTGRES_MULTIHOST_NET") + } + + runner, err := docker.NewServiceRunner(runOpts) + if err != nil { + t.Fatalf("Could not start docker Postgres: %s", err) + } + + svc, containerID, err := runner.StartNewService(context.Background(), addSuffix, forceLocalAddr, connectPostgres(password, repo)) + if err != nil { + t.Fatalf("Could not start docker Postgres: %s", err) + } + + return runner, svc.Cleanup, svc.Config.URL().String(), containerID +} + +func connectPostgres(password, repo string) docker.ServiceAdapter { + return func(ctx context.Context, host string, port int) (docker.ServiceConfig, error) { + u := url.URL{ + Scheme: "postgres", + User: url.UserPassword("postgres", password), + Host: fmt.Sprintf("%s:%d", host, port), + Path: "postgres", + RawQuery: "sslmode=disable", + } + + db, err := sql.Open("pgx", u.String()) + if err != nil { + return nil, err + } + defer db.Close() + + if err = db.Ping(); err != nil { + return nil, err + } + return docker.NewServiceURL(u), nil + } +} + +func StopContainer(t *testing.T, ctx context.Context, runner *docker.Runner, containerID string) { + if err := runner.Stop(ctx, containerID); err != nil { + t.Fatalf("Could not stop docker Postgres: %s", err) + } +} + +func RestartContainer(t *testing.T, ctx context.Context, runner *docker.Runner, containerID string) { + if err := runner.Restart(ctx, containerID); err != nil { + t.Fatalf("Could not restart docker Postgres: %s", err) + } +} diff --git a/helper/testhelpers/seal/sealhelper.go b/helper/testhelpers/seal/sealhelper.go new file mode 100644 index 0000000..b05401f --- /dev/null +++ b/helper/testhelpers/seal/sealhelper.go @@ -0,0 +1,78 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package sealhelper + +import ( + "path" + "strconv" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/transit" + "github.com/hashicorp/vault/helper/testhelpers/teststorage" + "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" + "github.com/hashicorp/vault/vault/seal" + "github.com/mitchellh/go-testing-interface" +) + +type TransitSealServer struct { + *vault.TestCluster +} + +func NewTransitSealServer(t testing.T, idx int) *TransitSealServer { + conf := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "transit": transit.Factory, + }, + } + opts := &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: http.Handler, + Logger: logging.NewVaultLogger(hclog.Trace).Named(t.Name()).Named("transit-seal" + strconv.Itoa(idx)), + } + teststorage.InmemBackendSetup(conf, opts) + cluster := vault.NewTestCluster(t, conf, opts) + cluster.Start() + + if err := cluster.Cores[0].Client.Sys().Mount("transit", &api.MountInput{ + Type: "transit", + }); err != nil { + t.Fatal(err) + } + + return &TransitSealServer{cluster} +} + +func (tss *TransitSealServer) MakeKey(t testing.T, key string) { + client := tss.Cores[0].Client + if _, err := client.Logical().Write(path.Join("transit", "keys", key), nil); err != nil { + t.Fatal(err) + } + if _, err := client.Logical().Write(path.Join("transit", "keys", key, "config"), map[string]interface{}{ + "deletion_allowed": true, + }); err != nil { + t.Fatal(err) + } +} + +func (tss *TransitSealServer) MakeSeal(t testing.T, key string) (vault.Seal, error) { + client := tss.Cores[0].Client + wrapperConfig := map[string]string{ + "address": client.Address(), + "token": client.Token(), + "mount_path": "transit", + "key_name": key, + "tls_ca_cert": tss.CACertPEMFile, + } + transitSeal, _, err := configutil.GetTransitKMSFunc(&configutil.KMS{Config: wrapperConfig}) + if err != nil { + t.Fatalf("error setting wrapper config: %v", err) + } + + return vault.NewAutoSeal(seal.NewAccess(transitSeal)) +} diff --git a/helper/testhelpers/testhelpers.go b/helper/testhelpers/testhelpers.go new file mode 100644 index 0000000..6ae5160 --- /dev/null +++ b/helper/testhelpers/testhelpers.go @@ -0,0 +1,1055 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package testhelpers + +import ( + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "math/rand" + "net/url" + "os" + "strings" + "sync/atomic" + "time" + + "github.com/armon/go-metrics" + raftlib "github.com/hashicorp/raft" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/helper/metricsutil" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/physical/raft" + "github.com/hashicorp/vault/sdk/helper/xor" + "github.com/hashicorp/vault/vault" + "github.com/mitchellh/go-testing-interface" +) + +type GenerateRootKind int + +const ( + GenerateRootRegular GenerateRootKind = iota + GenerateRootDR + GenerateRecovery +) + +// GenerateRoot generates a root token on the target cluster. +func GenerateRoot(t testing.T, cluster *vault.TestCluster, kind GenerateRootKind) string { + t.Helper() + token, err := GenerateRootWithError(t, cluster, kind) + if err != nil { + t.Fatal(err) + } + return token +} + +func GenerateRootWithError(t testing.T, cluster *vault.TestCluster, kind GenerateRootKind) (string, error) { + t.Helper() + // If recovery keys supported, use those to perform root token generation instead + var keys [][]byte + if cluster.Cores[0].SealAccess().RecoveryKeySupported() { + keys = cluster.RecoveryKeys + } else { + keys = cluster.BarrierKeys + } + client := cluster.Cores[0].Client + oldNS := client.Namespace() + defer client.SetNamespace(oldNS) + client.ClearNamespace() + + var err error + var status *api.GenerateRootStatusResponse + switch kind { + case GenerateRootRegular: + status, err = client.Sys().GenerateRootInit("", "") + case GenerateRootDR: + status, err = client.Sys().GenerateDROperationTokenInit("", "") + case GenerateRecovery: + status, err = client.Sys().GenerateRecoveryOperationTokenInit("", "") + } + if err != nil { + return "", err + } + + if status.Required > len(keys) { + return "", fmt.Errorf("need more keys than have, need %d have %d", status.Required, len(keys)) + } + + otp := status.OTP + + for i, key := range keys { + if i >= status.Required { + break + } + + strKey := base64.StdEncoding.EncodeToString(key) + switch kind { + case GenerateRootRegular: + status, err = client.Sys().GenerateRootUpdate(strKey, status.Nonce) + case GenerateRootDR: + status, err = client.Sys().GenerateDROperationTokenUpdate(strKey, status.Nonce) + case GenerateRecovery: + status, err = client.Sys().GenerateRecoveryOperationTokenUpdate(strKey, status.Nonce) + } + if err != nil { + return "", err + } + } + if !status.Complete { + return "", errors.New("generate root operation did not end successfully") + } + + tokenBytes, err := base64.RawStdEncoding.DecodeString(status.EncodedToken) + if err != nil { + return "", err + } + tokenBytes, err = xor.XORBytes(tokenBytes, []byte(otp)) + if err != nil { + return "", err + } + return string(tokenBytes), nil +} + +// RandomWithPrefix is used to generate a unique name with a prefix, for +// randomizing names in acceptance tests +func RandomWithPrefix(name string) string { + return fmt.Sprintf("%s-%d", name, rand.New(rand.NewSource(time.Now().UnixNano())).Int()) +} + +func EnsureCoresSealed(t testing.T, c *vault.TestCluster) { + t.Helper() + for _, core := range c.Cores { + EnsureCoreSealed(t, core) + } +} + +func EnsureCoreSealed(t testing.T, core *vault.TestClusterCore) { + t.Helper() + core.Seal(t) + timeout := time.Now().Add(60 * time.Second) + for { + if time.Now().After(timeout) { + t.Fatal("timeout waiting for core to seal") + } + if core.Core.Sealed() { + break + } + time.Sleep(250 * time.Millisecond) + } +} + +func EnsureCoresUnsealed(t testing.T, c *vault.TestCluster) { + t.Helper() + for i, core := range c.Cores { + err := AttemptUnsealCore(c, core) + if err != nil { + t.Fatalf("failed to unseal core %d: %v", i, err) + } + } +} + +func EnsureCoreUnsealed(t testing.T, c *vault.TestCluster, core *vault.TestClusterCore) { + t.Helper() + err := AttemptUnsealCore(c, core) + if err != nil { + t.Fatalf("failed to unseal core: %v", err) + } +} + +func AttemptUnsealCores(c *vault.TestCluster) error { + for i, core := range c.Cores { + err := AttemptUnsealCore(c, core) + if err != nil { + return fmt.Errorf("failed to unseal core %d: %v", i, err) + } + } + return nil +} + +func AttemptUnsealCore(c *vault.TestCluster, core *vault.TestClusterCore) error { + if !core.Sealed() { + return nil + } + + core.SealAccess().ClearCaches(context.Background()) + if err := core.UnsealWithStoredKeys(context.Background()); err != nil { + return err + } + + client := core.Client + oldNS := client.Namespace() + defer client.SetNamespace(oldNS) + client.ClearNamespace() + + client.Sys().ResetUnsealProcess() + for j := 0; j < len(c.BarrierKeys); j++ { + statusResp, err := client.Sys().Unseal(base64.StdEncoding.EncodeToString(c.BarrierKeys[j])) + if err != nil { + // Sometimes when we get here it's already unsealed on its own + // and then this fails for DR secondaries so check again + if core.Sealed() { + return err + } else { + return nil + } + } + if statusResp == nil { + return fmt.Errorf("nil status response during unseal") + } + if !statusResp.Sealed { + break + } + } + if core.Sealed() { + return fmt.Errorf("core is still sealed") + } + return nil +} + +func EnsureStableActiveNode(t testing.T, cluster *vault.TestCluster) { + t.Helper() + deriveStableActiveCore(t, cluster) +} + +func DeriveStableActiveCore(t testing.T, cluster *vault.TestCluster) *vault.TestClusterCore { + t.Helper() + return deriveStableActiveCore(t, cluster) +} + +func deriveStableActiveCore(t testing.T, cluster *vault.TestCluster) *vault.TestClusterCore { + t.Helper() + activeCore := DeriveActiveCore(t, cluster) + minDuration := time.NewTimer(3 * time.Second) + + for i := 0; i < 60; i++ { + leaderResp, err := activeCore.Client.Sys().Leader() + if err != nil { + t.Fatal(err) + } + if !leaderResp.IsSelf { + minDuration.Reset(3 * time.Second) + } + time.Sleep(200 * time.Millisecond) + } + + select { + case <-minDuration.C: + default: + if stopped := minDuration.Stop(); stopped { + t.Fatal("unstable active node") + } + // Drain the value + <-minDuration.C + } + + return activeCore +} + +func DeriveActiveCore(t testing.T, cluster *vault.TestCluster) *vault.TestClusterCore { + t.Helper() + for i := 0; i < 60; i++ { + for _, core := range cluster.Cores { + oldNS := core.Client.Namespace() + core.Client.ClearNamespace() + leaderResp, err := core.Client.Sys().Leader() + core.Client.SetNamespace(oldNS) + if err != nil { + t.Fatal(err) + } + if leaderResp.IsSelf { + return core + } + } + time.Sleep(1 * time.Second) + } + t.Fatal("could not derive the active core") + return nil +} + +func DeriveStandbyCores(t testing.T, cluster *vault.TestCluster) []*vault.TestClusterCore { + t.Helper() + cores := make([]*vault.TestClusterCore, 0, 2) + for _, core := range cluster.Cores { + oldNS := core.Client.Namespace() + core.Client.ClearNamespace() + leaderResp, err := core.Client.Sys().Leader() + core.Client.SetNamespace(oldNS) + if err != nil { + t.Fatal(err) + } + if !leaderResp.IsSelf { + cores = append(cores, core) + } + } + + return cores +} + +func WaitForNCoresUnsealed(t testing.T, cluster *vault.TestCluster, n int) { + t.Helper() + for i := 0; i < 30; i++ { + unsealed := 0 + for _, core := range cluster.Cores { + if !core.Core.Sealed() { + unsealed++ + } + } + + if unsealed >= n { + return + } + time.Sleep(time.Second) + } + + t.Fatalf("%d cores were not unsealed", n) +} + +func SealCores(t testing.T, cluster *vault.TestCluster) { + t.Helper() + for _, core := range cluster.Cores { + if err := core.Shutdown(); err != nil { + t.Fatal(err) + } + timeout := time.Now().Add(3 * time.Second) + for { + if time.Now().After(timeout) { + t.Fatal("timeout waiting for core to seal") + } + if core.Sealed() { + break + } + time.Sleep(100 * time.Millisecond) + } + } +} + +func WaitForNCoresSealed(t testing.T, cluster *vault.TestCluster, n int) { + t.Helper() + for i := 0; i < 60; i++ { + sealed := 0 + for _, core := range cluster.Cores { + if core.Core.Sealed() { + sealed++ + } + } + + if sealed >= n { + return + } + time.Sleep(time.Second) + } + + t.Fatalf("%d cores were not sealed", n) +} + +func WaitForActiveNode(t testing.T, cluster *vault.TestCluster) *vault.TestClusterCore { + t.Helper() + for i := 0; i < 60; i++ { + for _, core := range cluster.Cores { + if standby, _ := core.Core.Standby(); !standby { + return core + } + } + + time.Sleep(time.Second) + } + + t.Fatalf("node did not become active") + return nil +} + +func WaitForStandbyNode(t testing.T, core *vault.TestClusterCore) { + t.Helper() + for i := 0; i < 30; i++ { + if isLeader, _, clusterAddr, _ := core.Core.Leader(); isLeader != true && clusterAddr != "" { + return + } + if core.Core.ActiveNodeReplicationState() == 0 { + return + } + + time.Sleep(time.Second) + } + + t.Fatalf("node did not become standby") +} + +func RekeyCluster(t testing.T, cluster *vault.TestCluster, recovery bool) [][]byte { + t.Helper() + cluster.Logger.Info("rekeying cluster", "recovery", recovery) + client := cluster.Cores[0].Client + + initFunc := client.Sys().RekeyInit + if recovery { + initFunc = client.Sys().RekeyRecoveryKeyInit + } + init, err := initFunc(&api.RekeyInitRequest{ + SecretShares: 5, + SecretThreshold: 3, + }) + if err != nil { + t.Fatal(err) + } + + var statusResp *api.RekeyUpdateResponse + keys := cluster.BarrierKeys + if cluster.Cores[0].Core.SealAccess().RecoveryKeySupported() { + keys = cluster.RecoveryKeys + } + + updateFunc := client.Sys().RekeyUpdate + if recovery { + updateFunc = client.Sys().RekeyRecoveryKeyUpdate + } + for j := 0; j < len(keys); j++ { + statusResp, err = updateFunc(base64.StdEncoding.EncodeToString(keys[j]), init.Nonce) + if err != nil { + t.Fatal(err) + } + if statusResp == nil { + t.Fatal("nil status response during unseal") + } + if statusResp.Complete { + break + } + } + cluster.Logger.Info("cluster rekeyed", "recovery", recovery) + + if cluster.Cores[0].Core.SealAccess().RecoveryKeySupported() && !recovery { + return nil + } + if len(statusResp.KeysB64) != 5 { + t.Fatal("wrong number of keys") + } + + newKeys := make([][]byte, 5) + for i, key := range statusResp.KeysB64 { + newKeys[i], err = base64.StdEncoding.DecodeString(key) + if err != nil { + t.Fatal(err) + } + } + return newKeys +} + +// TestRaftServerAddressProvider is a ServerAddressProvider that uses the +// ClusterAddr() of each node to provide raft addresses. +// +// Note that TestRaftServerAddressProvider should only be used in cases where +// cores that are part of a raft configuration have already had +// startClusterListener() called (via either unsealing or raft joining). +type TestRaftServerAddressProvider struct { + Cluster *vault.TestCluster +} + +func (p *TestRaftServerAddressProvider) ServerAddr(id raftlib.ServerID) (raftlib.ServerAddress, error) { + for _, core := range p.Cluster.Cores { + if core.NodeID == string(id) { + parsed, err := url.Parse(core.ClusterAddr()) + if err != nil { + return "", err + } + + return raftlib.ServerAddress(parsed.Host), nil + } + } + + return "", errors.New("could not find cluster addr") +} + +func RaftClusterJoinNodes(t testing.T, cluster *vault.TestCluster) { + addressProvider := &TestRaftServerAddressProvider{Cluster: cluster} + + atomic.StoreUint32(&vault.TestingUpdateClusterAddr, 1) + + leader := cluster.Cores[0] + + // Seal the leader so we can install an address provider + { + EnsureCoreSealed(t, leader) + leader.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) + cluster.UnsealCore(t, leader) + vault.TestWaitActive(t, leader.Core) + } + + leaderInfos := []*raft.LeaderJoinInfo{ + { + LeaderAPIAddr: leader.Client.Address(), + TLSConfig: leader.TLSConfig(), + }, + } + + // Join followers + for i := 1; i < len(cluster.Cores); i++ { + core := cluster.Cores[i] + core.UnderlyingRawStorage.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) + _, err := core.JoinRaftCluster(namespace.RootContext(context.Background()), leaderInfos, false) + if err != nil { + t.Fatal(err) + } + + cluster.UnsealCore(t, core) + } + + WaitForNCoresUnsealed(t, cluster, len(cluster.Cores)) +} + +// HardcodedServerAddressProvider is a ServerAddressProvider that uses +// a hardcoded map of raft node addresses. +// +// It is useful in cases where the raft configuration is known ahead of time, +// but some of the cores have not yet had startClusterListener() called (via +// either unsealing or raft joining), and thus do not yet have a ClusterAddr() +// assigned. +type HardcodedServerAddressProvider struct { + Entries map[raftlib.ServerID]raftlib.ServerAddress +} + +func (p *HardcodedServerAddressProvider) ServerAddr(id raftlib.ServerID) (raftlib.ServerAddress, error) { + if addr, ok := p.Entries[id]; ok { + return addr, nil + } + return "", errors.New("could not find cluster addr") +} + +// NewHardcodedServerAddressProvider is a convenience function that makes a +// ServerAddressProvider from a given cluster address base port. +func NewHardcodedServerAddressProvider(numCores, baseClusterPort int) raftlib.ServerAddressProvider { + entries := make(map[raftlib.ServerID]raftlib.ServerAddress) + + for i := 0; i < numCores; i++ { + id := fmt.Sprintf("core-%d", i) + addr := fmt.Sprintf("127.0.0.1:%d", baseClusterPort+i) + entries[raftlib.ServerID(id)] = raftlib.ServerAddress(addr) + } + + return &HardcodedServerAddressProvider{ + entries, + } +} + +// VerifyRaftConfiguration checks that we have a valid raft configuration, i.e. +// the correct number of servers, having the correct NodeIDs, and exactly one +// leader. +func VerifyRaftConfiguration(core *vault.TestClusterCore, numCores int) error { + backend := core.UnderlyingRawStorage.(*raft.RaftBackend) + ctx := namespace.RootContext(context.Background()) + config, err := backend.GetConfiguration(ctx) + if err != nil { + return err + } + + servers := config.Servers + if len(servers) != numCores { + return fmt.Errorf("Found %d servers, not %d", len(servers), numCores) + } + + leaders := 0 + for i, s := range servers { + if s.NodeID != fmt.Sprintf("core-%d", i) { + return fmt.Errorf("Found unexpected node ID %q", s.NodeID) + } + if s.Leader { + leaders++ + } + } + + if leaders != 1 { + return fmt.Errorf("Found %d leaders", leaders) + } + + return nil +} + +func RaftAppliedIndex(core *vault.TestClusterCore) uint64 { + return core.UnderlyingRawStorage.(*raft.RaftBackend).AppliedIndex() +} + +func WaitForRaftApply(t testing.T, core *vault.TestClusterCore, index uint64) { + t.Helper() + + backend := core.UnderlyingRawStorage.(*raft.RaftBackend) + for i := 0; i < 30; i++ { + if backend.AppliedIndex() >= index { + return + } + + time.Sleep(time.Second) + } + + t.Fatalf("node did not apply index") +} + +// AwaitLeader waits for one of the cluster's nodes to become leader. +func AwaitLeader(t testing.T, cluster *vault.TestCluster) (int, error) { + timeout := time.Now().Add(60 * time.Second) + for { + if time.Now().After(timeout) { + break + } + + for i, core := range cluster.Cores { + if core.Core.Sealed() { + continue + } + + isLeader, _, _, _ := core.Leader() + if isLeader { + return i, nil + } + } + + time.Sleep(time.Second) + } + + return 0, fmt.Errorf("timeout waiting leader") +} + +func GenerateDebugLogs(t testing.T, client *api.Client) chan struct{} { + t.Helper() + + stopCh := make(chan struct{}) + + go func() { + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + for { + select { + case <-stopCh: + return + case <-ticker.C: + err := client.Sys().Mount("foo", &api.MountInput{ + Type: "kv", + Options: map[string]string{ + "version": "1", + }, + }) + if err != nil { + t.Fatal(err) + } + + err = client.Sys().Unmount("foo") + if err != nil { + t.Fatal(err) + } + } + } + }() + + return stopCh +} + +// VerifyRaftPeers verifies that the raft configuration contains a given set of peers. +// The `expected` contains a map of expected peers. Existing entries are deleted +// from the map by removing entries whose keys are in the raft configuration. +// Remaining entries result in an error return so that the caller can poll for +// an expected configuration. +func VerifyRaftPeers(t testing.T, client *api.Client, expected map[string]bool) error { + t.Helper() + + resp, err := client.Logical().Read("sys/storage/raft/configuration") + if err != nil { + t.Fatalf("error reading raft config: %v", err) + } + + if resp == nil || resp.Data == nil { + t.Fatal("missing response data") + } + + config, ok := resp.Data["config"].(map[string]interface{}) + if !ok { + t.Fatal("missing config in response data") + } + + servers, ok := config["servers"].([]interface{}) + if !ok { + t.Fatal("missing servers in response data config") + } + + // Iterate through the servers and remove the node found in the response + // from the expected collection + for _, s := range servers { + server := s.(map[string]interface{}) + delete(expected, server["node_id"].(string)) + } + + // If the collection is non-empty, it means that the peer was not found in + // the response. + if len(expected) != 0 { + return fmt.Errorf("failed to read configuration successfully, expected peers not found in configuration list: %v", expected) + } + + return nil +} + +func TestMetricSinkProvider(gaugeInterval time.Duration) func(string) (*metricsutil.ClusterMetricSink, *metricsutil.MetricsHelper) { + return func(clusterName string) (*metricsutil.ClusterMetricSink, *metricsutil.MetricsHelper) { + inm := metrics.NewInmemSink(1000000*time.Hour, 2000000*time.Hour) + clusterSink := metricsutil.NewClusterMetricSink(clusterName, inm) + clusterSink.GaugeInterval = gaugeInterval + return clusterSink, metricsutil.NewMetricsHelper(inm, false) + } +} + +func SysMetricsReq(client *api.Client, cluster *vault.TestCluster, unauth bool) (*SysMetricsJSON, error) { + r := client.NewRequest("GET", "/v1/sys/metrics") + if !unauth { + r.Headers.Set("X-Vault-Token", cluster.RootToken) + } + var data SysMetricsJSON + resp, err := client.RawRequestWithContext(context.Background(), r) + if err != nil { + return nil, err + } + bodyBytes, err := ioutil.ReadAll(resp.Response.Body) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if err := json.Unmarshal(bodyBytes, &data); err != nil { + return nil, errors.New("failed to unmarshal:" + err.Error()) + } + return &data, nil +} + +type SysMetricsJSON struct { + Gauges []gaugeJSON `json:"Gauges"` + Counters []counterJSON `json:"Counters"` + + // note: this is referred to as a "Summary" type in our telemetry docs, but + // the field name in the JSON is "Samples" + Summaries []summaryJSON `json:"Samples"` +} + +type baseInfoJSON struct { + Name string `json:"Name"` + Labels map[string]interface{} `json:"Labels"` +} + +type gaugeJSON struct { + baseInfoJSON + Value int `json:"Value"` +} + +type counterJSON struct { + baseInfoJSON + Count int `json:"Count"` + Rate float64 `json:"Rate"` + Sum int `json:"Sum"` + Min int `json:"Min"` + Max int `json:"Max"` + Mean float64 `json:"Mean"` + Stddev float64 `json:"Stddev"` +} + +type summaryJSON struct { + baseInfoJSON + Count int `json:"Count"` + Rate float64 `json:"Rate"` + Sum float64 `json:"Sum"` + Min float64 `json:"Min"` + Max float64 `json:"Max"` + Mean float64 `json:"Mean"` + Stddev float64 `json:"Stddev"` +} + +// SetNonRootToken sets a token on :client: with a fairly generic policy. +// This is useful if a test needs to examine differing behavior based on if a +// root token is passed with the request. +func SetNonRootToken(client *api.Client) error { + policy := `path "*" { capabilities = ["create", "update", "read"] }` + if err := client.Sys().PutPolicy("policy", policy); err != nil { + return fmt.Errorf("error putting policy: %v", err) + } + + secret, err := client.Auth().Token().Create(&api.TokenCreateRequest{ + Policies: []string{"policy"}, + TTL: "30m", + }) + if err != nil { + return fmt.Errorf("error creating token secret: %v", err) + } + + if secret == nil || secret.Auth == nil || secret.Auth.ClientToken == "" { + return fmt.Errorf("missing token auth data") + } + + client.SetToken(secret.Auth.ClientToken) + return nil +} + +// RetryUntilAtCadence runs f until it returns a nil result or the timeout is reached. +// If a nil result hasn't been obtained by timeout, calls t.Fatal. +func RetryUntilAtCadence(t testing.T, timeout, sleepTime time.Duration, f func() error) { + t.Helper() + deadline := time.Now().Add(timeout) + var err error + for time.Now().Before(deadline) { + if err = f(); err == nil { + return + } + time.Sleep(sleepTime) + } + t.Fatalf("did not complete before deadline, err: %v", err) +} + +// RetryUntil runs f until it returns a nil result or the timeout is reached. +// If a nil result hasn't been obtained by timeout, calls t.Fatal. +func RetryUntil(t testing.T, timeout time.Duration, f func() error) { + t.Helper() + deadline := time.Now().Add(timeout) + var err error + for time.Now().Before(deadline) { + if err = f(); err == nil { + return + } + time.Sleep(100 * time.Millisecond) + } + t.Fatalf("did not complete before deadline, err: %v", err) +} + +// CreateEntityAndAlias clones an existing client and creates an entity/alias. +// It returns the cloned client, entityID, and aliasID. +func CreateEntityAndAlias(t testing.T, client *api.Client, mountAccessor, entityName, aliasName string) (*api.Client, string, string) { + t.Helper() + userClient, err := client.Clone() + if err != nil { + t.Fatalf("failed to clone the client:%v", err) + } + userClient.SetToken(client.Token()) + + resp, err := client.Logical().WriteWithContext(context.Background(), "identity/entity", map[string]interface{}{ + "name": entityName, + }) + if err != nil { + t.Fatalf("failed to create an entity:%v", err) + } + entityID := resp.Data["id"].(string) + + aliasResp, err := client.Logical().WriteWithContext(context.Background(), "identity/entity-alias", map[string]interface{}{ + "name": aliasName, + "canonical_id": entityID, + "mount_accessor": mountAccessor, + }) + if err != nil { + t.Fatalf("failed to create an entity alias:%v", err) + } + aliasID := aliasResp.Data["id"].(string) + if aliasID == "" { + t.Fatal("Alias ID not present in response") + } + _, err = client.Logical().WriteWithContext(context.Background(), fmt.Sprintf("auth/userpass/users/%s", aliasName), map[string]interface{}{ + "password": "testpassword", + }) + if err != nil { + t.Fatalf("failed to configure userpass backend: %v", err) + } + + return userClient, entityID, aliasID +} + +// SetupTOTPMount enables the totp secrets engine by mounting it. This requires +// that the test cluster has a totp backend available. +func SetupTOTPMount(t testing.T, client *api.Client) { + t.Helper() + // Mount the TOTP backend + mountInfo := &api.MountInput{ + Type: "totp", + } + if err := client.Sys().Mount("totp", mountInfo); err != nil { + t.Fatalf("failed to mount totp backend: %v", err) + } +} + +// SetupTOTPMethod configures the TOTP secrets engine with a provided config map. +func SetupTOTPMethod(t testing.T, client *api.Client, config map[string]interface{}) string { + t.Helper() + + resp1, err := client.Logical().Write("identity/mfa/method/totp", config) + + if err != nil || (resp1 == nil) { + t.Fatalf("bad: resp: %#v\n err: %v", resp1, err) + } + + methodID := resp1.Data["method_id"].(string) + if methodID == "" { + t.Fatalf("method ID is empty") + } + + return methodID +} + +// SetupMFALoginEnforcement configures a single enforcement method using the +// provided config map. "name" field is required in the config map. +func SetupMFALoginEnforcement(t testing.T, client *api.Client, config map[string]interface{}) { + t.Helper() + enfName, ok := config["name"] + if !ok { + t.Fatalf("couldn't find name in login-enforcement config") + } + _, err := client.Logical().WriteWithContext(context.Background(), fmt.Sprintf("identity/mfa/login-enforcement/%s", enfName), config) + if err != nil { + t.Fatalf("failed to configure MFAEnforcementConfig: %v", err) + } +} + +// SetupUserpassMountAccessor sets up userpass auth and returns its mount +// accessor. This requires that the test cluster has a "userpass" auth method +// available. +func SetupUserpassMountAccessor(t testing.T, client *api.Client) string { + t.Helper() + // Enable Userpass authentication + err := client.Sys().EnableAuthWithOptions("userpass", &api.EnableAuthOptions{ + Type: "userpass", + }) + if err != nil { + t.Fatalf("failed to enable userpass auth: %v", err) + } + + auths, err := client.Sys().ListAuthWithContext(context.Background()) + if err != nil { + t.Fatalf("failed to list auth methods: %v", err) + } + if auths == nil || auths["userpass/"] == nil { + t.Fatalf("failed to get userpass mount accessor") + } + + return auths["userpass/"].Accessor +} + +// RegisterEntityInTOTPEngine registers an entity with a methodID and returns +// the generated name. +func RegisterEntityInTOTPEngine(t testing.T, client *api.Client, entityID, methodID string) string { + t.Helper() + totpGenName := fmt.Sprintf("%s-%s", entityID, methodID) + secret, err := client.Logical().WriteWithContext(context.Background(), "identity/mfa/method/totp/admin-generate", map[string]interface{}{ + "entity_id": entityID, + "method_id": methodID, + }) + if err != nil { + t.Fatalf("failed to generate a TOTP secret on an entity: %v", err) + } + totpURL := secret.Data["url"].(string) + if totpURL == "" { + t.Fatalf("failed to get TOTP url in secret response: %+v", secret) + } + _, err = client.Logical().WriteWithContext(context.Background(), fmt.Sprintf("totp/keys/%s", totpGenName), map[string]interface{}{ + "url": totpURL, + }) + if err != nil { + t.Fatalf("failed to register a TOTP URL: %v", err) + } + enfPath := fmt.Sprintf("identity/mfa/login-enforcement/%s", methodID[0:4]) + _, err = client.Logical().WriteWithContext(context.Background(), enfPath, map[string]interface{}{ + "name": methodID[0:4], + "identity_entity_ids": []string{entityID}, + "mfa_method_ids": []string{methodID}, + }) + if err != nil { + t.Fatalf("failed to create login enforcement") + } + + return totpGenName +} + +// GetTOTPCodeFromEngine requests a TOTP code from the specified enginePath. +func GetTOTPCodeFromEngine(t testing.T, client *api.Client, enginePath string) string { + t.Helper() + totpPath := fmt.Sprintf("totp/code/%s", enginePath) + secret, err := client.Logical().ReadWithContext(context.Background(), totpPath) + if err != nil { + t.Fatalf("failed to create totp passcode: %v", err) + } + if secret == nil || secret.Data == nil { + t.Fatalf("bad secret returned from %s", totpPath) + } + return secret.Data["code"].(string) +} + +// SetupLoginMFATOTP setups up a TOTP MFA using some basic configuration and +// returns all relevant information to the client. +func SetupLoginMFATOTP(t testing.T, client *api.Client, methodName string, waitPeriod int) (*api.Client, string, string) { + t.Helper() + // Mount the totp secrets engine + SetupTOTPMount(t, client) + + // Create a mount accessor to associate with an entity + mountAccessor := SetupUserpassMountAccessor(t, client) + + // Create a test entity and alias + entityClient, entityID, _ := CreateEntityAndAlias(t, client, mountAccessor, "entity1", "testuser1") + + // Configure a default TOTP method + totpConfig := map[string]interface{}{ + "issuer": "yCorp", + "period": waitPeriod, + "algorithm": "SHA256", + "digits": 6, + "skew": 1, + "key_size": 20, + "qr_size": 200, + "max_validation_attempts": 5, + "method_name": methodName, + } + methodID := SetupTOTPMethod(t, client, totpConfig) + + // Configure a default login enforcement + enforcementConfig := map[string]interface{}{ + "auth_method_types": []string{"userpass"}, + "name": methodID[0:4], + "mfa_method_ids": []string{methodID}, + } + + SetupMFALoginEnforcement(t, client, enforcementConfig) + return entityClient, entityID, methodID +} + +func SkipUnlessEnvVarsSet(t testing.T, envVars []string) { + t.Helper() + + for _, i := range envVars { + if os.Getenv(i) == "" { + t.Skipf("%s must be set for this test to run", strings.Join(envVars, " ")) + } + } +} + +// WaitForNodesExcludingSelectedStandbys is variation on WaitForActiveNodeAndStandbys. +// It waits for the active node before waiting for standby nodes, however +// it will not wait for cores with indexes that match those specified as arguments. +// Whilst you could specify index 0 which is likely to be the leader node, the function +// checks for the leader first regardless of the indexes to skip, so it would be redundant to do so. +// The intention/use case for this function is to allow a cluster to start and become active with one +// or more nodes not joined, so that we can test scenarios where a node joins later. +// e.g. 4 nodes in the cluster, only 3 nodes in cluster 'active', 1 node can be joined later in tests. +func WaitForNodesExcludingSelectedStandbys(t testing.T, cluster *vault.TestCluster, indexesToSkip ...int) { + WaitForActiveNode(t, cluster) + + contains := func(elems []int, e int) bool { + for _, v := range elems { + if v == e { + return true + } + } + + return false + } + for i, core := range cluster.Cores { + if contains(indexesToSkip, i) { + continue + } + + if standby, _ := core.Core.Standby(); standby { + WaitForStandbyNode(t, core) + } + } +} + +// IsLocalOrRegressionTests returns true when the tests are running locally (not in CI), or when +// the regression test env var (VAULT_REGRESSION_TESTS) is provided. +func IsLocalOrRegressionTests() bool { + return os.Getenv("CI") == "" || os.Getenv("VAULT_REGRESSION_TESTS") == "true" +} diff --git a/helper/testhelpers/testhelpers_oss.go b/helper/testhelpers/testhelpers_oss.go new file mode 100644 index 0000000..fc55e9b --- /dev/null +++ b/helper/testhelpers/testhelpers_oss.go @@ -0,0 +1,22 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !enterprise + +package testhelpers + +import ( + "github.com/hashicorp/vault/vault" + "github.com/mitchellh/go-testing-interface" +) + +// WaitForActiveNodeAndStandbys does nothing more than wait for the active node +// on OSS. On enterprise it waits for perf standbys to be healthy too. +func WaitForActiveNodeAndStandbys(t testing.T, cluster *vault.TestCluster) { + WaitForActiveNode(t, cluster) + for _, core := range cluster.Cores { + if standby, _ := core.Core.Standby(); standby { + WaitForStandbyNode(t, core) + } + } +} diff --git a/helper/testhelpers/teststorage/consul/consul.go b/helper/testhelpers/teststorage/consul/consul.go new file mode 100644 index 0000000..d16e4f5 --- /dev/null +++ b/helper/testhelpers/teststorage/consul/consul.go @@ -0,0 +1,125 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package consul + +import ( + "sync" + realtesting "testing" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/testhelpers/consul" + physConsul "github.com/hashicorp/vault/physical/consul" + "github.com/hashicorp/vault/vault" + "github.com/mitchellh/go-testing-interface" +) + +func MakeConsulBackend(t testing.T, logger hclog.Logger) *vault.PhysicalBackendBundle { + cleanup, config := consul.PrepareTestContainer(t.(*realtesting.T), "", false, true) + + consulConf := map[string]string{ + "address": config.Address(), + "token": config.Token, + "max_parallel": "32", + } + consulBackend, err := physConsul.NewConsulBackend(consulConf, logger) + if err != nil { + t.Fatal(err) + } + return &vault.PhysicalBackendBundle{ + Backend: consulBackend, + Cleanup: cleanup, + } +} + +func ConsulBackendSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions) { + m := &consulContainerManager{} + opts.PhysicalFactory = m.Backend +} + +// consulContainerManager exposes Backend which matches the PhysicalFactory func +// type. When called, it will ensure that a separate Consul container is started +// for each distinct vault cluster that calls it and ensures that each Vault +// core gets a separate Consul backend instance since that contains state +// related to lock sessions. The whole test framework doesn't have a concept of +// "cluster names" outside of the prefix attached to the logger and other +// backend factories, mostly via SharedPhysicalFactory currently implicitly rely +// on being called in a sequence of core 0, 1, 2,... on one cluster and then +// core 0, 1, 2... on the next and so on. Refactoring lots of things to make +// first-class cluster identifiers a thing seems like a heavy lift given that we +// already rely on sequence of calls everywhere else anyway so we do the same +// here - each time the Backend method is called with coreIdx == 0 we create a +// whole new Consul and assume subsequent non 0 index cores are in the same +// cluster. +type consulContainerManager struct { + mu sync.Mutex + current *consulContainerBackendFactory +} + +func (m *consulContainerManager) Backend(t testing.T, coreIdx int, + logger hclog.Logger, conf map[string]interface{}, +) *vault.PhysicalBackendBundle { + m.mu.Lock() + if coreIdx == 0 || m.current == nil { + // Create a new consul container factory + m.current = &consulContainerBackendFactory{} + } + f := m.current + m.mu.Unlock() + + return f.Backend(t, coreIdx, logger, conf) +} + +type consulContainerBackendFactory struct { + mu sync.Mutex + refCount int + cleanupFn func() + config map[string]string +} + +func (f *consulContainerBackendFactory) Backend(t testing.T, coreIdx int, + logger hclog.Logger, conf map[string]interface{}, +) *vault.PhysicalBackendBundle { + f.mu.Lock() + defer f.mu.Unlock() + + if f.refCount == 0 { + f.startContainerLocked(t) + logger.Debug("started consul container", "clusterID", conf["cluster_id"], + "address", f.config["address"]) + } + + f.refCount++ + consulBackend, err := physConsul.NewConsulBackend(f.config, logger.Named("consul")) + if err != nil { + t.Fatal(err) + } + return &vault.PhysicalBackendBundle{ + Backend: consulBackend, + Cleanup: f.cleanup, + } +} + +func (f *consulContainerBackendFactory) startContainerLocked(t testing.T) { + cleanup, config := consul.PrepareTestContainer(t.(*realtesting.T), "", false, true) + f.config = map[string]string{ + "address": config.Address(), + "token": config.Token, + "max_parallel": "32", + } + f.cleanupFn = cleanup +} + +func (f *consulContainerBackendFactory) cleanup() { + f.mu.Lock() + defer f.mu.Unlock() + + if f.refCount < 1 || f.cleanupFn == nil { + return + } + f.refCount-- + if f.refCount == 0 { + f.cleanupFn() + f.cleanupFn = nil + } +} diff --git a/helper/testhelpers/teststorage/teststorage.go b/helper/testhelpers/teststorage/teststorage.go new file mode 100644 index 0000000..edbc26e --- /dev/null +++ b/helper/testhelpers/teststorage/teststorage.go @@ -0,0 +1,277 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package teststorage + +import ( + "fmt" + "io/ioutil" + "math/rand" + "os" + "time" + + "github.com/hashicorp/go-hclog" + logicalKv "github.com/hashicorp/vault-plugin-secrets-kv" + "github.com/hashicorp/vault/audit" + auditFile "github.com/hashicorp/vault/builtin/audit/file" + auditSocket "github.com/hashicorp/vault/builtin/audit/socket" + auditSyslog "github.com/hashicorp/vault/builtin/audit/syslog" + logicalDb "github.com/hashicorp/vault/builtin/logical/database" + "github.com/hashicorp/vault/builtin/plugin" + "github.com/hashicorp/vault/helper/testhelpers" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/physical/raft" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/physical" + physFile "github.com/hashicorp/vault/sdk/physical/file" + "github.com/hashicorp/vault/sdk/physical/inmem" + "github.com/hashicorp/vault/vault" + "github.com/mitchellh/go-testing-interface" +) + +func MakeInmemBackend(t testing.T, logger hclog.Logger) *vault.PhysicalBackendBundle { + inm, err := inmem.NewTransactionalInmem(nil, logger) + if err != nil { + t.Fatal(err) + } + inmha, err := inmem.NewInmemHA(nil, logger) + if err != nil { + t.Fatal(err) + } + + return &vault.PhysicalBackendBundle{ + Backend: inm, + HABackend: inmha.(physical.HABackend), + } +} + +func MakeLatentInmemBackend(t testing.T, logger hclog.Logger) *vault.PhysicalBackendBundle { + r := rand.New(rand.NewSource(time.Now().UnixNano())) + jitter := r.Intn(20) + latency := time.Duration(r.Intn(15)) * time.Millisecond + + pbb := MakeInmemBackend(t, logger) + latencyInjector := physical.NewTransactionalLatencyInjector(pbb.Backend, latency, jitter, logger) + pbb.Backend = latencyInjector + return pbb +} + +func MakeInmemNonTransactionalBackend(t testing.T, logger hclog.Logger) *vault.PhysicalBackendBundle { + inm, err := inmem.NewInmem(nil, logger) + if err != nil { + t.Fatal(err) + } + inmha, err := inmem.NewInmemHA(nil, logger) + if err != nil { + t.Fatal(err) + } + + return &vault.PhysicalBackendBundle{ + Backend: inm, + HABackend: inmha.(physical.HABackend), + } +} + +func MakeFileBackend(t testing.T, logger hclog.Logger) *vault.PhysicalBackendBundle { + path, err := ioutil.TempDir("", "vault-integ-file-") + if err != nil { + t.Fatal(err) + } + fileConf := map[string]string{ + "path": path, + } + fileBackend, err := physFile.NewTransactionalFileBackend(fileConf, logger) + if err != nil { + t.Fatal(err) + } + + inmha, err := inmem.NewInmemHA(nil, logger) + if err != nil { + t.Fatal(err) + } + + return &vault.PhysicalBackendBundle{ + Backend: fileBackend, + HABackend: inmha.(physical.HABackend), + Cleanup: func() { + err := os.RemoveAll(path) + if err != nil { + t.Fatal(err) + } + }, + } +} + +func MakeRaftBackend(t testing.T, coreIdx int, logger hclog.Logger, extraConf map[string]interface{}) *vault.PhysicalBackendBundle { + nodeID := fmt.Sprintf("core-%d", coreIdx) + raftDir, err := ioutil.TempDir("", "vault-raft-") + if err != nil { + t.Fatal(err) + } + // t.Logf("raft dir: %s", raftDir) + cleanupFunc := func() { + os.RemoveAll(raftDir) + } + + logger.Info("raft dir", "dir", raftDir) + + conf := map[string]string{ + "path": raftDir, + "node_id": nodeID, + "performance_multiplier": "8", + } + for k, v := range extraConf { + val, ok := v.(string) + if ok { + conf[k] = val + } + } + + backend, err := raft.NewRaftBackend(conf, logger.Named("raft")) + if err != nil { + cleanupFunc() + t.Fatal(err) + } + + return &vault.PhysicalBackendBundle{ + Backend: backend, + Cleanup: cleanupFunc, + } +} + +// RaftHAFactory returns a PhysicalBackendBundle with raft set as the HABackend +// and the physical.Backend provided in PhysicalBackendBundler as the storage +// backend. +func RaftHAFactory(f PhysicalBackendBundler) func(t testing.T, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle { + return func(t testing.T, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle { + // Call the factory func to create the storage backend + physFactory := SharedPhysicalFactory(f) + bundle := physFactory(t, coreIdx, logger, nil) + + // This can happen if a shared physical backend is called on a non-0th core. + if bundle == nil { + bundle = new(vault.PhysicalBackendBundle) + } + + raftDir := makeRaftDir(t) + cleanupFunc := func() { + os.RemoveAll(raftDir) + } + + nodeID := fmt.Sprintf("core-%d", coreIdx) + backendConf := map[string]string{ + "path": raftDir, + "node_id": nodeID, + "performance_multiplier": "8", + "autopilot_reconcile_interval": "300ms", + "autopilot_update_interval": "100ms", + } + + // Create and set the HA Backend + raftBackend, err := raft.NewRaftBackend(backendConf, logger) + if err != nil { + bundle.Cleanup() + t.Fatal(err) + } + bundle.HABackend = raftBackend.(physical.HABackend) + + // Re-wrap the cleanup func + bundleCleanup := bundle.Cleanup + bundle.Cleanup = func() { + if bundleCleanup != nil { + bundleCleanup() + } + cleanupFunc() + } + + return bundle + } +} + +type PhysicalBackendBundler func(t testing.T, logger hclog.Logger) *vault.PhysicalBackendBundle + +func SharedPhysicalFactory(f PhysicalBackendBundler) func(t testing.T, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle { + return func(t testing.T, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle { + if coreIdx == 0 { + return f(t, logger) + } + return nil + } +} + +type ClusterSetupMutator func(conf *vault.CoreConfig, opts *vault.TestClusterOptions) + +func InmemBackendSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions) { + opts.PhysicalFactory = SharedPhysicalFactory(MakeInmemBackend) +} + +func InmemLatentBackendSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions) { + opts.PhysicalFactory = SharedPhysicalFactory(MakeLatentInmemBackend) +} + +func InmemNonTransactionalBackendSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions) { + opts.PhysicalFactory = SharedPhysicalFactory(MakeInmemNonTransactionalBackend) +} + +func FileBackendSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions) { + opts.PhysicalFactory = SharedPhysicalFactory(MakeFileBackend) +} + +func RaftBackendSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions) { + opts.KeepStandbysSealed = true + opts.PhysicalFactory = MakeRaftBackend + opts.SetupFunc = func(t testing.T, c *vault.TestCluster) { + if opts.NumCores != 1 { + testhelpers.RaftClusterJoinNodes(t, c) + time.Sleep(15 * time.Second) + } + } +} + +func RaftHASetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions, bundler PhysicalBackendBundler) { + opts.KeepStandbysSealed = true + opts.PhysicalFactory = RaftHAFactory(bundler) +} + +func ClusterSetup(conf *vault.CoreConfig, opts *vault.TestClusterOptions, setup ClusterSetupMutator) (*vault.CoreConfig, *vault.TestClusterOptions) { + var localConf vault.CoreConfig + localConf.DisableAutopilot = true + if conf != nil { + localConf = *conf + } + localOpts := vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + } + if opts != nil { + localOpts = *opts + } + if setup == nil { + setup = InmemBackendSetup + } + setup(&localConf, &localOpts) + if localConf.CredentialBackends == nil { + localConf.CredentialBackends = map[string]logical.Factory{ + "plugin": plugin.Factory, + } + } + if localConf.LogicalBackends == nil { + localConf.LogicalBackends = map[string]logical.Factory{ + "plugin": plugin.Factory, + "database": logicalDb.Factory, + // This is also available in the plugin catalog, but is here due to the need to + // automatically mount it. + "kv": logicalKv.Factory, + } + } + if localConf.AuditBackends == nil { + localConf.AuditBackends = map[string]audit.Factory{ + "file": auditFile.Factory, + "socket": auditSocket.Factory, + "syslog": auditSyslog.Factory, + "noop": corehelpers.NoopAuditFactory(nil), + } + } + + return &localConf, &localOpts +} diff --git a/helper/testhelpers/teststorage/teststorage_reusable.go b/helper/testhelpers/teststorage/teststorage_reusable.go new file mode 100644 index 0000000..ff9fd2b --- /dev/null +++ b/helper/testhelpers/teststorage/teststorage_reusable.go @@ -0,0 +1,198 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package teststorage + +import ( + "fmt" + "io/ioutil" + "os" + + hclog "github.com/hashicorp/go-hclog" + raftlib "github.com/hashicorp/raft" + "github.com/hashicorp/vault/physical/raft" + "github.com/hashicorp/vault/sdk/physical" + "github.com/hashicorp/vault/vault" + "github.com/mitchellh/go-testing-interface" +) + +// ReusableStorage is a physical backend that can be re-used across +// multiple test clusters in sequence. It is useful for testing things like +// seal migration, wherein a given physical backend must be re-used as several +// test clusters are sequentially created, tested, and discarded. +type ReusableStorage struct { + // IsRaft specifies whether the storage is using a raft backend. + IsRaft bool + + // Setup should be called just before a new TestCluster is created. + Setup ClusterSetupMutator + + // Cleanup should be called after a TestCluster is no longer + // needed -- generally in a defer, just before the call to + // cluster.Cleanup(). + Cleanup func(t testing.T, cluster *vault.TestCluster) +} + +// StorageCleanup is a function that should be called once -- at the very end +// of a given unit test, after each of the sequence of clusters have been +// created, tested, and discarded. +type StorageCleanup func() + +// MakeReusableStorage makes a physical backend that can be re-used across +// multiple test clusters in sequence. +func MakeReusableStorage(t testing.T, logger hclog.Logger, bundle *vault.PhysicalBackendBundle) (ReusableStorage, StorageCleanup) { + storage := ReusableStorage{ + IsRaft: false, + + Setup: func(conf *vault.CoreConfig, opts *vault.TestClusterOptions) { + opts.PhysicalFactory = func(t testing.T, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle { + if coreIdx == 0 { + // We intentionally do not clone the backend's Cleanup func, + // because we don't want it to be run until the entire test has + // been completed. + return &vault.PhysicalBackendBundle{ + Backend: bundle.Backend, + HABackend: bundle.HABackend, + } + } + return nil + } + }, + + // No-op + Cleanup: func(t testing.T, cluster *vault.TestCluster) {}, + } + + cleanup := func() { + if bundle.Cleanup != nil { + bundle.Cleanup() + } + } + + return storage, cleanup +} + +// MakeReusableRaftStorage makes a physical raft backend that can be re-used +// across multiple test clusters in sequence. +func MakeReusableRaftStorage(t testing.T, logger hclog.Logger, numCores int, addressProvider raftlib.ServerAddressProvider) (ReusableStorage, StorageCleanup) { + raftDirs := make([]string, numCores) + for i := 0; i < numCores; i++ { + raftDirs[i] = makeRaftDir(t) + } + + storage := ReusableStorage{ + IsRaft: true, + + Setup: func(conf *vault.CoreConfig, opts *vault.TestClusterOptions) { + conf.DisablePerformanceStandby = true + opts.KeepStandbysSealed = true + opts.PhysicalFactory = func(t testing.T, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle { + return makeReusableRaftBackend(t, coreIdx, logger, raftDirs[coreIdx], addressProvider, false) + } + }, + + // Close open files being used by raft. + Cleanup: func(t testing.T, cluster *vault.TestCluster) { + for i := 0; i < len(cluster.Cores); i++ { + CloseRaftStorage(t, cluster, i) + } + }, + } + + cleanup := func() { + for _, rd := range raftDirs { + os.RemoveAll(rd) + } + } + + return storage, cleanup +} + +// CloseRaftStorage closes open files being used by raft. +func CloseRaftStorage(t testing.T, cluster *vault.TestCluster, idx int) { + raftStorage := cluster.Cores[idx].UnderlyingRawStorage.(*raft.RaftBackend) + if err := raftStorage.Close(); err != nil { + t.Fatal(err) + } +} + +func MakeReusableRaftHAStorage(t testing.T, logger hclog.Logger, numCores int, bundle *vault.PhysicalBackendBundle) (ReusableStorage, StorageCleanup) { + raftDirs := make([]string, numCores) + for i := 0; i < numCores; i++ { + raftDirs[i] = makeRaftDir(t) + } + + storage := ReusableStorage{ + Setup: func(conf *vault.CoreConfig, opts *vault.TestClusterOptions) { + opts.KeepStandbysSealed = true + opts.PhysicalFactory = func(t testing.T, coreIdx int, logger hclog.Logger, conf map[string]interface{}) *vault.PhysicalBackendBundle { + haBundle := makeReusableRaftBackend(t, coreIdx, logger, raftDirs[coreIdx], nil, true) + + return &vault.PhysicalBackendBundle{ + Backend: bundle.Backend, + HABackend: haBundle.HABackend, + } + } + }, + + // Close open files being used by raft. + Cleanup: func(t testing.T, cluster *vault.TestCluster) { + for _, core := range cluster.Cores { + raftStorage := core.UnderlyingHAStorage.(*raft.RaftBackend) + if err := raftStorage.Close(); err != nil { + t.Fatal(err) + } + } + }, + } + + cleanup := func() { + if bundle.Cleanup != nil { + bundle.Cleanup() + } + + for _, rd := range raftDirs { + os.RemoveAll(rd) + } + } + + return storage, cleanup +} + +func makeRaftDir(t testing.T) string { + raftDir, err := ioutil.TempDir("", "vault-raft-") + if err != nil { + t.Fatal(err) + } + // t.Logf("raft dir: %s", raftDir) + return raftDir +} + +func makeReusableRaftBackend(t testing.T, coreIdx int, logger hclog.Logger, raftDir string, addressProvider raftlib.ServerAddressProvider, ha bool) *vault.PhysicalBackendBundle { + nodeID := fmt.Sprintf("core-%d", coreIdx) + conf := map[string]string{ + "path": raftDir, + "node_id": nodeID, + "performance_multiplier": "8", + "autopilot_reconcile_interval": "300ms", + "autopilot_update_interval": "100ms", + } + + backend, err := raft.NewRaftBackend(conf, logger) + if err != nil { + t.Fatal(err) + } + + if addressProvider != nil { + backend.(*raft.RaftBackend).SetServerAddressProvider(addressProvider) + } + + bundle := new(vault.PhysicalBackendBundle) + + if ha { + bundle.HABackend = backend.(physical.HABackend) + } else { + bundle.Backend = backend + } + return bundle +} diff --git a/helper/timeutil/timeutil.go b/helper/timeutil/timeutil.go new file mode 100644 index 0000000..16f8343 --- /dev/null +++ b/helper/timeutil/timeutil.go @@ -0,0 +1,167 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package timeutil + +import ( + "errors" + "fmt" + "strconv" + "strings" + "testing" + "time" +) + +func StartOfPreviousMonth(t time.Time) time.Time { + year, month, _ := t.Date() + return time.Date(year, month, 1, 0, 0, 0, 0, t.Location()).AddDate(0, -1, 0) +} + +func StartOfMonth(t time.Time) time.Time { + year, month, _ := t.Date() + return time.Date(year, month, 1, 0, 0, 0, 0, t.Location()) +} + +func StartOfNextMonth(t time.Time) time.Time { + year, month, _ := t.Date() + return time.Date(year, month, 1, 0, 0, 0, 0, t.Location()).AddDate(0, 1, 0) +} + +// IsMonthStart checks if :t: is the start of the month +func IsMonthStart(t time.Time) bool { + return t.Equal(StartOfMonth(t)) +} + +func EndOfMonth(t time.Time) time.Time { + year, month, _ := t.Date() + if month == time.December { + return time.Date(year, time.December, 31, 23, 59, 59, 0, t.Location()) + } else { + eom := time.Date(year, month+1, 1, 23, 59, 59, 0, t.Location()) + return eom.AddDate(0, 0, -1) + } +} + +// IsPreviousMonth checks if :t: is in the month directly before :toCompare: +func IsPreviousMonth(t, toCompare time.Time) bool { + thisMonthStart := StartOfMonth(toCompare) + previousMonthStart := StartOfMonth(thisMonthStart.AddDate(0, 0, -1)) + + if t.Equal(previousMonthStart) { + return true + } + return t.After(previousMonthStart) && t.Before(thisMonthStart) +} + +// IsCurrentMonth checks if :t: is in the current month, as defined by :compare: +// generally, pass in time.Now().UTC() as :compare: +func IsCurrentMonth(t, compare time.Time) bool { + thisMonthStart := StartOfMonth(compare) + queryMonthStart := StartOfMonth(t) + + return queryMonthStart.Equal(thisMonthStart) +} + +// GetMostRecentContinuousMonths finds the start time of the most +// recent set of continguous months. +// +// For example, if the most recent start time is Aug 15, then that range is just 1 month +// If the recent start times are Aug 1 and July 1 and June 15, then that range is +// three months and we return June 15. +// +// note: return slice will be nil if :startTimes: is nil +// :startTimes: must be sorted in decreasing order (see unit test for examples) +func GetMostRecentContiguousMonths(startTimes []time.Time) []time.Time { + if len(startTimes) < 2 { + // no processing needed if 0 or 1 months worth of logs + return startTimes + } + + out := []time.Time{startTimes[0]} + if !IsMonthStart(out[0]) { + // there is less than one contiguous month (most recent start time is after the start of this month) + return out + } + + i := 1 + for ; i < len(startTimes); i++ { + if !IsMonthStart(startTimes[i]) || !IsPreviousMonth(startTimes[i], startTimes[i-1]) { + break + } + + out = append(out, startTimes[i]) + } + + // handle mid-month log starts + if i < len(startTimes) { + if IsPreviousMonth(StartOfMonth(startTimes[i]), startTimes[i-1]) { + // the earliest part of the segment is mid-month, but still valid for this segment + out = append(out, startTimes[i]) + } + } + + return out +} + +func InRange(t, start, end time.Time) bool { + return (t.Equal(start) || t.After(start)) && + (t.Equal(end) || t.Before(end)) +} + +// ParseTimeFromPath returns a UTC time from a path of the form '/', +// where is a Unix timestamp +func ParseTimeFromPath(path string) (time.Time, error) { + elems := strings.Split(path, "/") + if len(elems) == 1 { + // :path: is a directory that must have children + return time.Time{}, errors.New("Invalid path provided") + } + + unixSeconds, err := strconv.ParseInt(elems[0], 10, 64) + if err != nil { + return time.Time{}, fmt.Errorf("could not convert time from path segment %q. error: %w", elems[0], err) + } + + return time.Unix(unixSeconds, 0).UTC(), nil +} + +// Compute the N-month period before the given date. +// For example, if it is currently April 2020, then 12 months is April 2019 through March 2020. +func MonthsPreviousTo(months int, now time.Time) time.Time { + firstOfMonth := StartOfMonth(now.UTC()) + return firstOfMonth.AddDate(0, -months, 0) +} + +// Skip this test if too close to the end of a month! +func SkipAtEndOfMonth(t *testing.T) { + t.Helper() + + thisMonth := StartOfMonth(time.Now().UTC()) + endOfMonth := EndOfMonth(thisMonth) + if endOfMonth.Sub(time.Now()) < 10*time.Minute { + t.Skip("too close to end of month") + } +} + +// This interface allows unit tests to substitute in a simulated Clock. +type Clock interface { + Now() time.Time + NewTicker(time.Duration) *time.Ticker + NewTimer(time.Duration) *time.Timer +} + +type DefaultClock struct{} + +var _ Clock = (*DefaultClock)(nil) + +func (_ DefaultClock) Now() time.Time { + return time.Now() +} + +func (_ DefaultClock) NewTicker(d time.Duration) *time.Ticker { + return time.NewTicker(d) +} + +func (_ DefaultClock) NewTimer(d time.Duration) *time.Timer { + return time.NewTimer(d) +} diff --git a/helper/timeutil/timeutil_test.go b/helper/timeutil/timeutil_test.go new file mode 100644 index 0000000..b9ccdbd --- /dev/null +++ b/helper/timeutil/timeutil_test.go @@ -0,0 +1,328 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package timeutil + +import ( + "reflect" + "testing" + "time" +) + +func TestTimeutil_StartOfPreviousMonth(t *testing.T) { + testCases := []struct { + Input time.Time + Expected time.Time + }{ + { + Input: time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC), + Expected: time.Date(2019, 12, 1, 0, 0, 0, 0, time.UTC), + }, + { + Input: time.Date(2020, 1, 15, 0, 0, 0, 0, time.UTC), + Expected: time.Date(2019, 12, 1, 0, 0, 0, 0, time.UTC), + }, + { + Input: time.Date(2020, 3, 31, 23, 59, 59, 999999999, time.UTC), + Expected: time.Date(2020, 2, 1, 0, 0, 0, 0, time.UTC), + }, + } + + for _, tc := range testCases { + result := StartOfPreviousMonth(tc.Input) + if !result.Equal(tc.Expected) { + t.Errorf("start of month before %v is %v, got %v", tc.Input, tc.Expected, result) + } + } +} + +func TestTimeutil_StartOfMonth(t *testing.T) { + testCases := []struct { + Input time.Time + Expected time.Time + }{ + { + Input: time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC), + Expected: time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC), + }, + { + Input: time.Date(2020, 1, 1, 1, 0, 0, 0, time.UTC), + Expected: time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC), + }, + { + Input: time.Date(2020, 1, 1, 0, 0, 0, 1, time.UTC), + Expected: time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC), + }, + { + Input: time.Date(2020, 1, 31, 23, 59, 59, 999999999, time.UTC), + Expected: time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC), + }, + { + Input: time.Date(2020, 2, 28, 1, 2, 3, 4, time.UTC), + Expected: time.Date(2020, 2, 1, 0, 0, 0, 0, time.UTC), + }, + } + + for _, tc := range testCases { + result := StartOfMonth(tc.Input) + if !result.Equal(tc.Expected) { + t.Errorf("start of %v is %v, expected %v", tc.Input, result, tc.Expected) + } + } +} + +func TestTimeutil_IsMonthStart(t *testing.T) { + testCases := []struct { + input time.Time + expected bool + }{ + { + input: time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC), + expected: true, + }, + { + input: time.Date(2020, 1, 1, 0, 0, 0, 1, time.UTC), + expected: false, + }, + { + input: time.Date(2020, 4, 5, 0, 0, 0, 0, time.UTC), + expected: false, + }, + { + input: time.Date(2020, 1, 31, 23, 59, 59, 999999999, time.UTC), + expected: false, + }, + } + + for _, tc := range testCases { + result := IsMonthStart(tc.input) + if result != tc.expected { + t.Errorf("is %v the start of the month? expected %t, got %t", tc.input, tc.expected, result) + } + } +} + +func TestTimeutil_EndOfMonth(t *testing.T) { + testCases := []struct { + Input time.Time + Expected time.Time + }{ + { + // The current behavior does not use the nanoseconds + // because we didn't want to clutter the result of end-of-month reporting. + Input: time.Date(2020, 1, 31, 23, 59, 59, 0, time.UTC), + Expected: time.Date(2020, 1, 31, 23, 59, 59, 0, time.UTC), + }, + { + Input: time.Date(2020, 1, 31, 23, 59, 59, 999999999, time.UTC), + Expected: time.Date(2020, 1, 31, 23, 59, 59, 0, time.UTC), + }, + { + Input: time.Date(2020, 1, 15, 1, 2, 3, 4, time.UTC), + Expected: time.Date(2020, 1, 31, 23, 59, 59, 0, time.UTC), + }, + { + // Leap year + Input: time.Date(2020, 2, 1, 0, 0, 0, 0, time.UTC), + Expected: time.Date(2020, 2, 29, 23, 59, 59, 0, time.UTC), + }, + { + // non-leap year + Input: time.Date(2100, 2, 1, 0, 0, 0, 0, time.UTC), + Expected: time.Date(2100, 2, 28, 23, 59, 59, 0, time.UTC), + }, + } + + for _, tc := range testCases { + result := EndOfMonth(tc.Input) + if !result.Equal(tc.Expected) { + t.Errorf("end of %v is %v, expected %v", tc.Input, result, tc.Expected) + } + } +} + +func TestTimeutil_IsPreviousMonth(t *testing.T) { + testCases := []struct { + tInput time.Time + compareInput time.Time + expected bool + }{ + { + tInput: time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC), + compareInput: time.Date(2020, 1, 31, 0, 0, 0, 0, time.UTC), + expected: false, + }, + { + tInput: time.Date(2019, 12, 31, 0, 0, 0, 0, time.UTC), + compareInput: time.Date(2020, 1, 31, 0, 0, 0, 0, time.UTC), + expected: true, + }, + { + // leap year (false) + tInput: time.Date(2019, 12, 29, 10, 10, 10, 0, time.UTC), + compareInput: time.Date(2020, 2, 29, 10, 10, 10, 0, time.UTC), + expected: false, + }, + { + // leap year (true) + tInput: time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC), + compareInput: time.Date(2020, 2, 29, 10, 10, 10, 0, time.UTC), + expected: true, + }, + { + tInput: time.Date(2018, 5, 5, 5, 0, 0, 0, time.UTC), + compareInput: time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC), + expected: false, + }, + { + // test normalization. want to make subtracting 1 month from 3/30/2020 doesn't yield 2/30/2020, normalized + // to 3/1/2020 + tInput: time.Date(2020, 2, 1, 0, 0, 0, 0, time.UTC), + compareInput: time.Date(2020, 3, 30, 0, 0, 0, 0, time.UTC), + expected: true, + }, + } + + for _, tc := range testCases { + result := IsPreviousMonth(tc.tInput, tc.compareInput) + if result != tc.expected { + t.Errorf("%v in previous month to %v? expected %t, got %t", tc.tInput, tc.compareInput, tc.expected, result) + } + } +} + +func TestTimeutil_IsCurrentMonth(t *testing.T) { + now := time.Now() + testCases := []struct { + input time.Time + expected bool + }{ + { + input: now, + expected: true, + }, + { + input: StartOfMonth(now).AddDate(0, 0, -1), + expected: false, + }, + { + input: EndOfMonth(now).AddDate(0, 0, -1), + expected: true, + }, + { + input: StartOfMonth(now).AddDate(-1, 0, 0), + expected: false, + }, + } + + for _, tc := range testCases { + result := IsCurrentMonth(tc.input, now) + if result != tc.expected { + t.Errorf("invalid result. expected %t for %v", tc.expected, tc.input) + } + } +} + +func TestTimeUtil_ContiguousMonths(t *testing.T) { + testCases := []struct { + input []time.Time + expected []time.Time + }{ + { + input: []time.Time{ + time.Date(2020, 4, 1, 0, 0, 0, 0, time.UTC), + time.Date(2020, 3, 1, 0, 0, 0, 0, time.UTC), + time.Date(2020, 2, 5, 0, 0, 0, 0, time.UTC), + time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC), + }, + expected: []time.Time{ + time.Date(2020, 4, 1, 0, 0, 0, 0, time.UTC), + time.Date(2020, 3, 1, 0, 0, 0, 0, time.UTC), + time.Date(2020, 2, 5, 0, 0, 0, 0, time.UTC), + }, + }, + { + input: []time.Time{ + time.Date(2020, 4, 1, 0, 0, 0, 0, time.UTC), + time.Date(2020, 3, 1, 0, 0, 0, 0, time.UTC), + time.Date(2020, 2, 1, 0, 0, 0, 0, time.UTC), + time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC), + }, + expected: []time.Time{ + time.Date(2020, 4, 1, 0, 0, 0, 0, time.UTC), + time.Date(2020, 3, 1, 0, 0, 0, 0, time.UTC), + time.Date(2020, 2, 1, 0, 0, 0, 0, time.UTC), + time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC), + }, + }, + { + input: []time.Time{ + time.Date(2020, 4, 1, 0, 0, 0, 0, time.UTC), + }, + expected: []time.Time{ + time.Date(2020, 4, 1, 0, 0, 0, 0, time.UTC), + }, + }, + { + input: []time.Time{}, + expected: []time.Time{}, + }, + { + input: nil, + expected: nil, + }, + { + input: []time.Time{ + time.Date(2020, 2, 2, 0, 0, 0, 0, time.UTC), + time.Date(2020, 1, 15, 0, 0, 0, 0, time.UTC), + }, + expected: []time.Time{ + time.Date(2020, 2, 2, 0, 0, 0, 0, time.UTC), + }, + }, + } + + for _, tc := range testCases { + result := GetMostRecentContiguousMonths(tc.input) + + if !reflect.DeepEqual(tc.expected, result) { + t.Errorf("invalid contiguous segment returned. expected %v, got %v", tc.expected, result) + } + } +} + +func TestTimeUtil_ParseTimeFromPath(t *testing.T) { + testCases := []struct { + input string + expectedOut time.Time + expectError bool + }{ + { + input: "719020800/1", + expectedOut: time.Unix(719020800, 0).UTC(), + expectError: false, + }, + { + input: "1601415205/3", + expectedOut: time.Unix(1601415205, 0).UTC(), + expectError: false, + }, + { + input: "baddata/3", + expectedOut: time.Time{}, + expectError: true, + }, + } + + for _, tc := range testCases { + result, err := ParseTimeFromPath(tc.input) + gotError := err != nil + + if result != tc.expectedOut { + t.Errorf("bad timestamp on input %q. expected: %v got: %v", tc.input, tc.expectedOut, result) + } + if gotError != tc.expectError { + t.Errorf("bad error status on input %q. expected error: %t, got error: %t", tc.input, tc.expectError, gotError) + } + } +} diff --git a/helper/useragent/useragent.go b/helper/useragent/useragent.go new file mode 100644 index 0000000..a16b871 --- /dev/null +++ b/helper/useragent/useragent.go @@ -0,0 +1,110 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package useragent + +import ( + "fmt" + "runtime" + + "github.com/hashicorp/vault/version" +) + +var ( + // projectURL is the project URL. + projectURL = "https://www.vaultproject.io/" + + // rt is the runtime - variable for tests. + rt = runtime.Version() + + // versionFunc is the func that returns the current version. This is a + // function to take into account the different build processes and distinguish + // between enterprise and oss builds. + versionFunc = func() string { + return version.GetVersion().VersionNumber() + } +) + +// String returns the consistent user-agent string for Vault. +// +// e.g. Vault/0.10.4 (+https://www.vaultproject.io/; go1.10.1) +func String() string { + return fmt.Sprintf("Vault/%s (+%s; %s)", + versionFunc(), projectURL, rt) +} + +// AgentString returns the consistent user-agent string for Vault Agent. +// +// e.g. Vault Agent/0.10.4 (+https://www.vaultproject.io/; go1.10.1) +func AgentString() string { + return fmt.Sprintf("Vault Agent/%s (+%s; %s)", + versionFunc(), projectURL, rt) +} + +// AgentTemplatingString returns the consistent user-agent string for Vault Agent Templating. +// +// e.g. Vault Agent Templating/0.10.4 (+https://www.vaultproject.io/; go1.10.1) +func AgentTemplatingString() string { + return fmt.Sprintf("Vault Agent Templating/%s (+%s; %s)", + versionFunc(), projectURL, rt) +} + +// AgentProxyString returns the consistent user-agent string for Vault Agent API Proxying. +// +// e.g. Vault Agent API Proxy/0.10.4 (+https://www.vaultproject.io/; go1.10.1) +func AgentProxyString() string { + return fmt.Sprintf("Vault Agent API Proxy/%s (+%s; %s)", + versionFunc(), projectURL, rt) +} + +// AgentProxyStringWithProxiedUserAgent returns the consistent user-agent +// string for Vault Agent API Proxying, keeping the User-Agent of the proxied +// client as an extension to this UserAgent +// +// e.g. Vault Agent API Proxy/0.10.4 (+https://www.vaultproject.io/; go1.10.1); proxiedUserAgent +func AgentProxyStringWithProxiedUserAgent(proxiedUserAgent string) string { + return fmt.Sprintf("Vault Agent API Proxy/%s (+%s; %s); %s", + versionFunc(), projectURL, rt, proxiedUserAgent) +} + +// AgentAutoAuthString returns the consistent user-agent string for Vault Agent Auto-Auth. +// +// e.g. Vault Agent Auto-Auth/0.10.4 (+https://www.vaultproject.io/; go1.10.1) +func AgentAutoAuthString() string { + return fmt.Sprintf("Vault Agent Auto-Auth/%s (+%s; %s)", + versionFunc(), projectURL, rt) +} + +// ProxyString returns the consistent user-agent string for Vault Proxy. +// +// e.g. Vault Proxy/0.10.4 (+https://www.vaultproject.io/; go1.10.1) +func ProxyString() string { + return fmt.Sprintf("Vault Proxy/%s (+%s; %s)", + versionFunc(), projectURL, rt) +} + +// ProxyAPIProxyString returns the consistent user-agent string for Vault Proxy API Proxying. +// +// e.g. Vault Proxy API Proxy/0.10.4 (+https://www.vaultproject.io/; go1.10.1) +func ProxyAPIProxyString() string { + return fmt.Sprintf("Vault Proxy API Proxy/%s (+%s; %s)", + versionFunc(), projectURL, rt) +} + +// ProxyStringWithProxiedUserAgent returns the consistent user-agent +// string for Vault Proxy API Proxying, keeping the User-Agent of the proxied +// client as an extension to this UserAgent +// +// e.g. Vault Proxy API Proxy/0.10.4 (+https://www.vaultproject.io/; go1.10.1); proxiedUserAgent +func ProxyStringWithProxiedUserAgent(proxiedUserAgent string) string { + return fmt.Sprintf("Vault Proxy API Proxy/%s (+%s; %s); %s", + versionFunc(), projectURL, rt, proxiedUserAgent) +} + +// ProxyAutoAuthString returns the consistent user-agent string for Vault Agent Auto-Auth. +// +// e.g. Vault Proxy Auto-Auth/0.10.4 (+https://www.vaultproject.io/; go1.10.1) +func ProxyAutoAuthString() string { + return fmt.Sprintf("Vault Proxy Auto-Auth/%s (+%s; %s)", + versionFunc(), projectURL, rt) +} diff --git a/helper/useragent/useragent_test.go b/helper/useragent/useragent_test.go new file mode 100644 index 0000000..af5e2f0 --- /dev/null +++ b/helper/useragent/useragent_test.go @@ -0,0 +1,140 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package useragent + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestUserAgent(t *testing.T) { + projectURL = "https://vault-test.com" + rt = "go5.0" + versionFunc = func() string { return "1.2.3" } + + act := String() + + exp := "Vault/1.2.3 (+https://vault-test.com; go5.0)" + require.Equal(t, exp, act) +} + +// TestUserAgent_VaultAgent tests the AgentString() function works +// as expected +func TestUserAgent_VaultAgent(t *testing.T) { + projectURL = "https://vault-test.com" + rt = "go5.0" + versionFunc = func() string { return "1.2.3" } + + act := AgentString() + + exp := "Vault Agent/1.2.3 (+https://vault-test.com; go5.0)" + require.Equal(t, exp, act) +} + +// TestUserAgent_VaultAgentTemplating tests the AgentTemplatingString() function works +// as expected +func TestUserAgent_VaultAgentTemplating(t *testing.T) { + projectURL = "https://vault-test.com" + rt = "go5.0" + versionFunc = func() string { return "1.2.3" } + + act := AgentTemplatingString() + + exp := "Vault Agent Templating/1.2.3 (+https://vault-test.com; go5.0)" + require.Equal(t, exp, act) +} + +// TestUserAgent_VaultAgentProxy tests the AgentProxyString() function works +// as expected +func TestUserAgent_VaultAgentProxy(t *testing.T) { + projectURL = "https://vault-test.com" + rt = "go5.0" + versionFunc = func() string { return "1.2.3" } + + act := AgentProxyString() + + exp := "Vault Agent API Proxy/1.2.3 (+https://vault-test.com; go5.0)" + require.Equal(t, exp, act) +} + +// TestUserAgent_VaultAgentProxyWithProxiedUserAgent tests the AgentProxyStringWithProxiedUserAgent() +// function works as expected +func TestUserAgent_VaultAgentProxyWithProxiedUserAgent(t *testing.T) { + projectURL = "https://vault-test.com" + rt = "go5.0" + versionFunc = func() string { return "1.2.3" } + userAgent := "my-user-agent" + + act := AgentProxyStringWithProxiedUserAgent(userAgent) + + exp := "Vault Agent API Proxy/1.2.3 (+https://vault-test.com; go5.0); my-user-agent" + require.Equal(t, exp, act) +} + +// TestUserAgent_VaultAgentAutoAuth tests the AgentAutoAuthString() function works +// as expected +func TestUserAgent_VaultAgentAutoAuth(t *testing.T) { + projectURL = "https://vault-test.com" + rt = "go5.0" + versionFunc = func() string { return "1.2.3" } + + act := AgentAutoAuthString() + + exp := "Vault Agent Auto-Auth/1.2.3 (+https://vault-test.com; go5.0)" + require.Equal(t, exp, act) +} + +// TestUserAgent_VaultProxy tests the ProxyString() function works +// as expected +func TestUserAgent_VaultProxy(t *testing.T) { + projectURL = "https://vault-test.com" + rt = "go5.0" + versionFunc = func() string { return "1.2.3" } + + act := ProxyString() + + exp := "Vault Proxy/1.2.3 (+https://vault-test.com; go5.0)" + require.Equal(t, exp, act) +} + +// TestUserAgent_VaultProxyAPIProxy tests the ProxyAPIProxyString() function works +// as expected +func TestUserAgent_VaultProxyAPIProxy(t *testing.T) { + projectURL = "https://vault-test.com" + rt = "go5.0" + versionFunc = func() string { return "1.2.3" } + + act := ProxyAPIProxyString() + + exp := "Vault Proxy API Proxy/1.2.3 (+https://vault-test.com; go5.0)" + require.Equal(t, exp, act) +} + +// TestUserAgent_VaultProxyWithProxiedUserAgent tests the ProxyStringWithProxiedUserAgent() +// function works as expected +func TestUserAgent_VaultProxyWithProxiedUserAgent(t *testing.T) { + projectURL = "https://vault-test.com" + rt = "go5.0" + versionFunc = func() string { return "1.2.3" } + userAgent := "my-user-agent" + + act := ProxyStringWithProxiedUserAgent(userAgent) + + exp := "Vault Proxy API Proxy/1.2.3 (+https://vault-test.com; go5.0); my-user-agent" + require.Equal(t, exp, act) +} + +// TestUserAgent_VaultProxyAutoAuth tests the ProxyAPIProxyString() function works +// as expected +func TestUserAgent_VaultProxyAutoAuth(t *testing.T) { + projectURL = "https://vault-test.com" + rt = "go5.0" + versionFunc = func() string { return "1.2.3" } + + act := ProxyAutoAuthString() + + exp := "Vault Proxy Auto-Auth/1.2.3 (+https://vault-test.com; go5.0)" + require.Equal(t, exp, act) +} diff --git a/helper/versions/version.go b/helper/versions/version.go new file mode 100644 index 0000000..9eb8077 --- /dev/null +++ b/helper/versions/version.go @@ -0,0 +1,81 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package versions + +import ( + "fmt" + "runtime/debug" + "strings" + "sync" + + semver "github.com/hashicorp/go-version" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/version" +) + +const ( + BuiltinMetadata = "builtin" +) + +var ( + buildInfoOnce sync.Once // once is used to ensure we only parse build info once. + buildInfo *debug.BuildInfo + DefaultBuiltinVersion = fmt.Sprintf("v%s+%s.vault", version.GetVersion().Version, BuiltinMetadata) +) + +func GetBuiltinVersion(pluginType consts.PluginType, pluginName string) string { + buildInfoOnce.Do(func() { + buildInfo, _ = debug.ReadBuildInfo() + }) + + // Should never happen, means the binary was built without Go modules. + // Fall back to just the Vault version. + if buildInfo == nil { + return DefaultBuiltinVersion + } + + // Vault builtin plugins are all either: + // a) An external repo within the hashicorp org - return external repo version with +builtin + // b) Within the Vault repo itself - return Vault version with +builtin.vault + // + // The repo names are predictable, but follow slightly different patterns + // for each plugin type. + t := pluginType.String() + switch pluginType { + case consts.PluginTypeDatabase: + // Database plugin built-ins are registered as e.g. "postgresql-database-plugin" + pluginName = strings.TrimSuffix(pluginName, "-database-plugin") + case consts.PluginTypeSecrets: + // Repos use "secrets", pluginType.String() is "secret". + t = "secrets" + } + pluginModulePath := fmt.Sprintf("github.com/hashicorp/vault-plugin-%s-%s", t, pluginName) + + for _, dep := range buildInfo.Deps { + if dep.Path == pluginModulePath { + return dep.Version + "+" + BuiltinMetadata + } + } + + return DefaultBuiltinVersion +} + +// IsBuiltinVersion checks for the "builtin" metadata identifier in a plugin's +// semantic version. Vault rejects any plugin registration requests with this +// identifier, so we can be certain it's a builtin plugin if it's present. +func IsBuiltinVersion(v string) bool { + semanticVersion, err := semver.NewSemver(v) + if err != nil { + return false + } + + metadataIdentifiers := strings.Split(semanticVersion.Metadata(), ".") + for _, identifier := range metadataIdentifiers { + if identifier == BuiltinMetadata { + return true + } + } + + return false +} diff --git a/helper/versions/version_test.go b/helper/versions/version_test.go new file mode 100644 index 0000000..85b46cd --- /dev/null +++ b/helper/versions/version_test.go @@ -0,0 +1,26 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package versions + +import "testing" + +func TestIsBuiltinVersion(t *testing.T) { + for _, tc := range []struct { + version string + builtin bool + }{ + {"v1.0.0+builtin", true}, + {"v2.3.4+builtin.vault", true}, + {"1.0.0+builtin.anythingelse", true}, + {"v1.0.0+other.builtin", true}, + {"v1.0.0+builtinbutnot", false}, + {"v1.0.0", false}, + {"not-a-semver", false}, + } { + builtin := IsBuiltinVersion(tc.version) + if builtin != tc.builtin { + t.Fatalf("%s should give: %v, but got %v", tc.version, tc.builtin, builtin) + } + } +} diff --git a/http/assets.go b/http/assets.go new file mode 100644 index 0000000..b60a594 --- /dev/null +++ b/http/assets.go @@ -0,0 +1,28 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build ui + +package http + +import ( + "embed" + "io/fs" + "net/http" +) + +// content is our static web server content. +// +//go:embed web_ui/* +var content embed.FS + +// assetFS is a http Filesystem that serves the generated web UI from the +// "ember-dist" make step +func assetFS() http.FileSystem { + // sub out to web_ui, where the generated content lives + f, err := fs.Sub(content, "web_ui") + if err != nil { + panic(err) + } + return http.FS(f) +} diff --git a/http/assets_stub.go b/http/assets_stub.go new file mode 100644 index 0000000..e1b4daf --- /dev/null +++ b/http/assets_stub.go @@ -0,0 +1,21 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !ui + +package http + +import ( + "net/http" +) + +func init() { + // set uiBuiltIn to false to indicate the ui is not built in. See + // http/handler.go + uiBuiltIn = false +} + +// assetFS is a stub for building Vault without a UI. +func assetFS() http.FileSystem { + return nil +} diff --git a/http/auth_token_test.go b/http/auth_token_test.go new file mode 100644 index 0000000..d96e183 --- /dev/null +++ b/http/auth_token_test.go @@ -0,0 +1,209 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "strings" + "testing" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/vault" +) + +func TestAuthTokenCreate(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + + config := api.DefaultConfig() + config.Address = addr + + client, err := api.NewClient(config) + if err != nil { + t.Fatal(err) + } + client.SetToken(token) + + secret, err := client.Auth().Token().Create(&api.TokenCreateRequest{ + Lease: "1h", + }) + if err != nil { + t.Fatal(err) + } + if secret.Auth.LeaseDuration != 3600 { + t.Errorf("expected 1h, got %q", secret.Auth.LeaseDuration) + } + + renewCreateRequest := &api.TokenCreateRequest{ + TTL: "1h", + Renewable: new(bool), + } + + secret, err = client.Auth().Token().Create(renewCreateRequest) + if err != nil { + t.Fatal(err) + } + if secret.Auth.LeaseDuration != 3600 { + t.Errorf("expected 1h, got %q", secret.Auth.LeaseDuration) + } + if secret.Auth.Renewable { + t.Errorf("expected non-renewable token") + } + + *renewCreateRequest.Renewable = true + secret, err = client.Auth().Token().Create(renewCreateRequest) + if err != nil { + t.Fatal(err) + } + if secret.Auth.LeaseDuration != 3600 { + t.Errorf("expected 1h, got %q", secret.Auth.LeaseDuration) + } + if !secret.Auth.Renewable { + t.Errorf("expected renewable token") + } + + explicitMaxCreateRequest := &api.TokenCreateRequest{ + TTL: "1h", + ExplicitMaxTTL: "1800s", + } + + secret, err = client.Auth().Token().Create(explicitMaxCreateRequest) + if err != nil { + t.Fatal(err) + } + if secret.Auth.LeaseDuration != 1800 { + t.Errorf("expected 1800 seconds, got %d", secret.Auth.LeaseDuration) + } + + explicitMaxCreateRequest.ExplicitMaxTTL = "2h" + secret, err = client.Auth().Token().Create(explicitMaxCreateRequest) + if err != nil { + t.Fatal(err) + } + if secret.Auth.LeaseDuration != 3600 { + t.Errorf("expected 3600 seconds, got %q", secret.Auth.LeaseDuration) + } +} + +func TestAuthTokenLookup(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + + config := api.DefaultConfig() + config.Address = addr + + client, err := api.NewClient(config) + if err != nil { + t.Fatal(err) + } + client.SetToken(token) + + // Create a new token ... + secret2, err := client.Auth().Token().Create(&api.TokenCreateRequest{ + Lease: "1h", + }) + if err != nil { + t.Fatal(err) + } + + // lookup details of this token + secret, err := client.Auth().Token().Lookup(secret2.Auth.ClientToken) + if err != nil { + t.Fatalf("unable to lookup details of token, err = %v", err) + } + + if secret.Data["id"] != secret2.Auth.ClientToken { + t.Errorf("Did not get back details about our provided token, id returned=%s", secret.Data["id"]) + } +} + +func TestAuthTokenLookupSelf(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + + config := api.DefaultConfig() + config.Address = addr + + client, err := api.NewClient(config) + if err != nil { + t.Fatal(err) + } + client.SetToken(token) + + // you should be able to lookup your own token + secret, err := client.Auth().Token().LookupSelf() + if err != nil { + t.Fatalf("should be allowed to lookup self, err = %v", err) + } + + if secret.Data["id"] != token { + t.Errorf("Did not get back details about our own (self) token, id returned=%s", secret.Data["id"]) + } + if secret.Data["display_name"] != "root" { + t.Errorf("Did not get back details about our own (self) token, display_name returned=%s", secret.Data["display_name"]) + } +} + +func TestAuthTokenRenew(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + + config := api.DefaultConfig() + config.Address = addr + + client, err := api.NewClient(config) + if err != nil { + t.Fatal(err) + } + client.SetToken(token) + + // The default root token is not renewable, so this should not work + _, err = client.Auth().Token().Renew(token, 0) + if err == nil { + t.Fatal("should not be allowed to renew root token") + } + if !strings.Contains(err.Error(), "invalid lease ID") { + t.Fatalf("wrong error; got %v", err) + } + + // Create a new token that should be renewable + secret, err := client.Auth().Token().Create(&api.TokenCreateRequest{ + Lease: "1h", + }) + if err != nil { + t.Fatal(err) + } + client.SetToken(secret.Auth.ClientToken) + + // Now attempt a renew with the new token + secret, err = client.Auth().Token().Renew(secret.Auth.ClientToken, 3600) + if err != nil { + t.Fatal(err) + } + + if secret.Auth.LeaseDuration != 3600 { + t.Errorf("expected 1h, got %v", secret.Auth.LeaseDuration) + } + + if secret.Auth.Renewable != true { + t.Error("expected lease to be renewable") + } + + // Do the same thing with the self variant + secret, err = client.Auth().Token().RenewSelf(3600) + if err != nil { + t.Fatal(err) + } + + if secret.Auth.LeaseDuration != 3600 { + t.Errorf("expected 1h, got %v", secret.Auth.LeaseDuration) + } + + if secret.Auth.Renewable != true { + t.Error("expected lease to be renewable") + } +} diff --git a/http/cors.go b/http/cors.go new file mode 100644 index 0000000..7e8c311 --- /dev/null +++ b/http/cors.go @@ -0,0 +1,70 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/vault" +) + +var allowedMethods = []string{ + http.MethodDelete, + http.MethodGet, + http.MethodOptions, + http.MethodPost, + http.MethodPut, + "LIST", // LIST is not an official HTTP method, but Vault supports it. +} + +func wrapCORSHandler(h http.Handler, core *vault.Core) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + corsConf := core.CORSConfig() + + // If CORS is not enabled or if no Origin header is present (i.e. the request + // is from the Vault CLI. A browser will always send an Origin header), then + // just return a 204. + if !corsConf.IsEnabled() { + h.ServeHTTP(w, req) + return + } + + origin := req.Header.Get("Origin") + requestMethod := req.Header.Get("Access-Control-Request-Method") + + if origin == "" { + h.ServeHTTP(w, req) + return + } + + // Return a 403 if the origin is not allowed to make cross-origin requests. + if !corsConf.IsValidOrigin(origin) { + respondError(w, http.StatusForbidden, fmt.Errorf("origin not allowed")) + return + } + + if req.Method == http.MethodOptions && !strutil.StrListContains(allowedMethods, requestMethod) { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + w.Header().Set("Access-Control-Allow-Origin", origin) + w.Header().Set("Vary", "Origin") + + // apply headers for preflight requests + if req.Method == http.MethodOptions { + w.Header().Set("Access-Control-Allow-Methods", strings.Join(allowedMethods, ",")) + w.Header().Set("Access-Control-Allow-Headers", strings.Join(corsConf.AllowedHeaders, ",")) + w.Header().Set("Access-Control-Max-Age", "300") + + return + } + + h.ServeHTTP(w, req) + return + }) +} diff --git a/http/custom_header_test.go b/http/custom_header_test.go new file mode 100644 index 0000000..289379a --- /dev/null +++ b/http/custom_header_test.go @@ -0,0 +1,130 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "testing" + + "github.com/hashicorp/vault/vault" +) + +var defaultCustomHeaders = map[string]string{ + "Strict-Transport-Security": "max-age=1; domains", + "Content-Security-Policy": "default-src 'others'", + "X-Custom-Header": "Custom header value default", + "X-Frame-Options": "Deny", + "X-Content-Type-Options": "nosniff", + "Content-Type": "application/json", + "X-XSS-Protection": "1; mode=block", +} + +var customHeader2xx = map[string]string{ + "X-Custom-Header": "Custom header value 2xx", +} + +var customHeader200 = map[string]string{ + "Someheader-200": "200", + "X-Custom-Header": "Custom header value 200", +} + +var customHeader4xx = map[string]string{ + "Someheader-4xx": "4xx", +} + +var customHeader400 = map[string]string{ + "Someheader-400": "400", +} + +var customHeader405 = map[string]string{ + "Someheader-405": "405", +} + +var CustomResponseHeaders = map[string]map[string]string{ + "default": defaultCustomHeaders, + "307": {"X-Custom-Header": "Custom header value 307"}, + "3xx": { + "X-Custom-Header": "Custom header value 3xx", + "X-Vault-Ignored-3xx": "Ignored 3xx", + }, + "200": customHeader200, + "2xx": customHeader2xx, + "400": customHeader400, + "405": customHeader405, + "4xx": customHeader4xx, +} + +func TestCustomResponseHeaders(t *testing.T) { + core, _, token := vault.TestCoreWithCustomResponseHeaderAndUI(t, CustomResponseHeaders, true) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpGet(t, token, addr+"/v1/sys/raw/") + testResponseStatus(t, resp, 404) + testResponseHeader(t, resp, defaultCustomHeaders) + testResponseHeader(t, resp, customHeader4xx) + + resp = testHttpGet(t, token, addr+"/v1/sys/seal") + testResponseStatus(t, resp, 405) + testResponseHeader(t, resp, defaultCustomHeaders) + testResponseHeader(t, resp, customHeader4xx) + testResponseHeader(t, resp, customHeader405) + + resp = testHttpGet(t, token, addr+"/v1/sys/leader") + testResponseStatus(t, resp, 200) + testResponseHeader(t, resp, customHeader200) + + resp = testHttpGet(t, token, addr+"/v1/sys/health") + testResponseStatus(t, resp, 200) + testResponseHeader(t, resp, customHeader200) + + resp = testHttpGet(t, token, addr+"/v1/sys/generate-root/attempt") + testResponseStatus(t, resp, 200) + testResponseHeader(t, resp, customHeader200) + + resp = testHttpGet(t, token, addr+"/v1/sys/generate-root/update") + testResponseStatus(t, resp, 400) + testResponseHeader(t, resp, defaultCustomHeaders) + testResponseHeader(t, resp, customHeader4xx) + testResponseHeader(t, resp, customHeader400) + + resp = testHttpGet(t, token, addr+"/v1/sys/") + testResponseStatus(t, resp, 404) + testResponseHeader(t, resp, defaultCustomHeaders) + testResponseHeader(t, resp, customHeader4xx) + + resp = testHttpGet(t, token, addr+"/v1/sys") + testResponseStatus(t, resp, 404) + testResponseHeader(t, resp, defaultCustomHeaders) + testResponseHeader(t, resp, customHeader4xx) + + resp = testHttpGet(t, token, addr+"/v1/") + testResponseStatus(t, resp, 404) + testResponseHeader(t, resp, defaultCustomHeaders) + testResponseHeader(t, resp, customHeader4xx) + + resp = testHttpGet(t, token, addr+"/v1") + testResponseStatus(t, resp, 404) + testResponseHeader(t, resp, defaultCustomHeaders) + testResponseHeader(t, resp, customHeader4xx) + + resp = testHttpGet(t, token, addr+"/") + testResponseStatus(t, resp, 200) + testResponseHeader(t, resp, customHeader200) + + resp = testHttpGet(t, token, addr+"/ui") + testResponseStatus(t, resp, 200) + testResponseHeader(t, resp, customHeader200) + + resp = testHttpGet(t, token, addr+"/ui/") + testResponseStatus(t, resp, 200) + testResponseHeader(t, resp, customHeader200) + + resp = testHttpPost(t, token, addr+"/v1/sys/auth/foo", map[string]interface{}{ + "type": "noop", + "description": "foo", + }) + testResponseStatus(t, resp, 204) + testResponseHeader(t, resp, customHeader2xx) +} diff --git a/http/events.go b/http/events.go new file mode 100644 index 0000000..072fcd6 --- /dev/null +++ b/http/events.go @@ -0,0 +1,165 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "context" + "errors" + "fmt" + "net/http" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" + "github.com/hashicorp/vault/vault/eventbus" + "nhooyr.io/websocket" +) + +type eventSubscribeArgs struct { + ctx context.Context + logger hclog.Logger + events *eventbus.EventBus + ns *namespace.Namespace + pattern string + conn *websocket.Conn + json bool +} + +// handleEventsSubscribeWebsocket runs forever, returning a websocket error code and reason +// only if the connection closes or there was an error. +func handleEventsSubscribeWebsocket(args eventSubscribeArgs) (websocket.StatusCode, string, error) { + ctx := args.ctx + logger := args.logger + ch, cancel, err := args.events.Subscribe(ctx, args.ns, args.pattern) + if err != nil { + logger.Info("Error subscribing", "error", err) + return websocket.StatusUnsupportedData, "Error subscribing", nil + } + defer cancel() + + for { + select { + case <-ctx.Done(): + logger.Info("Websocket context is done, closing the connection") + return websocket.StatusNormalClosure, "", nil + case message := <-ch: + logger.Debug("Sending message to websocket", "message", message.Payload) + var messageBytes []byte + var messageType websocket.MessageType + if args.json { + var ok bool + messageBytes, ok = message.Format("cloudevents-json") + if !ok { + logger.Warn("Could not get cloudevents JSON format") + return 0, "", errors.New("could not get cloudevents JSON format") + } + messageType = websocket.MessageText + } else { + messageBytes, err = proto.Marshal(message.Payload.(*logical.EventReceived)) + messageType = websocket.MessageBinary + } + if err != nil { + logger.Warn("Could not serialize websocket event", "error", err) + return 0, "", err + } + err = args.conn.Write(ctx, messageType, messageBytes) + if err != nil { + return 0, "", err + } + } + } +} + +func handleEventsSubscribe(core *vault.Core, req *logical.Request) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + logger := core.Logger().Named("events-subscribe") + logger.Debug("Got request to", "url", r.URL, "version", r.Proto) + + ctx := r.Context() + + // ACL check + _, _, err := core.CheckToken(ctx, req, false) + if err != nil { + if errors.Is(err, logical.ErrPermissionDenied) { + respondError(w, http.StatusForbidden, logical.ErrPermissionDenied) + return + } + logger.Debug("Error validating token", "error", err) + respondError(w, http.StatusInternalServerError, fmt.Errorf("error validating token")) + return + } + + ns, err := namespace.FromContext(ctx) + if err != nil { + logger.Info("Could not find namespace", "error", err) + respondError(w, http.StatusInternalServerError, fmt.Errorf("could not find namespace")) + return + } + + prefix := "/v1/sys/events/subscribe/" + if ns.ID != namespace.RootNamespaceID { + prefix = fmt.Sprintf("/v1/%ssys/events/subscribe/", ns.Path) + } + pattern := strings.TrimSpace(strings.TrimPrefix(r.URL.Path, prefix)) + if pattern == "" { + respondError(w, http.StatusBadRequest, fmt.Errorf("did not specify eventType to subscribe to")) + return + } + + json := false + jsonRaw := r.URL.Query().Get("json") + if jsonRaw != "" { + var err error + json, err = strconv.ParseBool(jsonRaw) + if err != nil { + respondError(w, http.StatusBadRequest, fmt.Errorf("invalid parameter for JSON: %v", jsonRaw)) + return + } + } + + conn, err := websocket.Accept(w, r, nil) + if err != nil { + logger.Info("Could not accept as websocket", "error", err) + respondError(w, http.StatusInternalServerError, fmt.Errorf("could not accept as websocket")) + return + } + + // we don't expect any incoming messages + ctx = conn.CloseRead(ctx) + // start the pinger + go func() { + for { + time.Sleep(30 * time.Second) // not too aggressive, but keep the HTTP connection alive + err := conn.Ping(ctx) + if err != nil { + return + } + } + }() + + closeStatus, closeReason, err := handleEventsSubscribeWebsocket(eventSubscribeArgs{ctx, logger, core.Events(), ns, pattern, conn, json}) + if err != nil { + closeStatus = websocket.CloseStatus(err) + if closeStatus == -1 { + closeStatus = websocket.StatusInternalError + } + closeReason = fmt.Sprintf("Internal error: %v", err) + logger.Debug("Error from websocket handler", "error", err) + } + // Close() will panic if the reason is greater than this length + if len(closeReason) > 123 { + logger.Debug("Truncated close reason", "closeReason", closeReason) + closeReason = closeReason[:123] + } + err = conn.Close(closeStatus, closeReason) + if err != nil { + logger.Debug("Error closing websocket", "error", err) + } + }) +} diff --git a/http/events_test.go b/http/events_test.go new file mode 100644 index 0000000..b5ce0a1 --- /dev/null +++ b/http/events_test.go @@ -0,0 +1,199 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "strings" + "sync/atomic" + "testing" + "time" + + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" + "nhooyr.io/websocket" +) + +// TestEventsSubscribe tests the websocket endpoint for subscribing to events +// by generating some events. +func TestEventsSubscribe(t *testing.T) { + core := vault.TestCore(t) + ln, addr := TestServer(t, core) + defer ln.Close() + + // unseal the core + keys, token := vault.TestCoreInit(t, core) + for _, key := range keys { + _, err := core.Unseal(key) + if err != nil { + t.Fatal(err) + } + } + + stop := atomic.Bool{} + + const eventType = "abc" + + // send some events + go func() { + for !stop.Load() { + id, err := uuid.GenerateUUID() + if err != nil { + core.Logger().Info("Error generating UUID, exiting sender", "error", err) + } + pluginInfo := &logical.EventPluginInfo{ + MountPath: "secret", + } + err = core.Events().SendInternal(namespace.RootContext(context.Background()), namespace.RootNamespace, pluginInfo, logical.EventType(eventType), &logical.EventData{ + Id: id, + Metadata: nil, + EntityIds: nil, + Note: "testing", + }) + if err != nil { + core.Logger().Info("Error sending event, exiting sender", "error", err) + } + time.Sleep(100 * time.Millisecond) + } + }() + + t.Cleanup(func() { + stop.Store(true) + }) + + ctx := context.Background() + wsAddr := strings.Replace(addr, "http", "ws", 1) + + testCases := []struct { + json bool + }{{true}, {false}} + + for _, testCase := range testCases { + url := fmt.Sprintf("%s/v1/sys/events/subscribe/%s?json=%v", wsAddr, eventType, testCase.json) + conn, _, err := websocket.Dial(ctx, url, &websocket.DialOptions{ + HTTPHeader: http.Header{"x-vault-token": []string{token}}, + }) + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + conn.Close(websocket.StatusNormalClosure, "") + }) + + _, msg, err := conn.Read(ctx) + if err != nil { + t.Fatal(err) + } + if testCase.json { + event := map[string]interface{}{} + err = json.Unmarshal(msg, &event) + if err != nil { + t.Fatal(err) + } + t.Log(string(msg)) + data := event["data"].(map[string]interface{}) + if actualType := data["event_type"].(string); actualType != eventType { + t.Fatalf("Expeced event type %s, got %s", eventType, actualType) + } + pluginInfo, ok := data["plugin_info"].(map[string]interface{}) + if !ok || pluginInfo == nil { + t.Fatalf("No plugin_info object: %v", data) + } + mountPath, ok := pluginInfo["mount_path"].(string) + if !ok || mountPath != "secret" { + t.Fatalf("Wrong mount_path: %v", data) + } + innerEvent := data["event"].(map[string]interface{}) + if innerEvent["id"].(string) != event["id"].(string) { + t.Fatalf("IDs don't match, expected %s, got %s", innerEvent["id"].(string), event["id"].(string)) + } + if innerEvent["note"].(string) != "testing" { + t.Fatalf("Expected 'testing', got %s", innerEvent["note"].(string)) + } + + checkRequiredCloudEventsFields(t, event) + } + } +} + +func checkRequiredCloudEventsFields(t *testing.T, event map[string]interface{}) { + t.Helper() + for _, attr := range []string{"id", "source", "specversion", "type"} { + if v, ok := event[attr]; !ok { + t.Errorf("Missing attribute %s", attr) + } else if str, ok := v.(string); !ok { + t.Errorf("Expected %s to be string but got %T", attr, v) + } else if str == "" { + t.Errorf("%s was empty string", attr) + } + } +} + +// TestEventsSubscribeAuth tests that unauthenticated and unauthorized subscriptions +// fail correctly. +func TestEventsSubscribeAuth(t *testing.T) { + core := vault.TestCore(t) + ln, addr := TestServer(t, core) + defer ln.Close() + + // unseal the core + keys, root := vault.TestCoreInit(t, core) + for _, key := range keys { + _, err := core.Unseal(key) + if err != nil { + t.Fatal(err) + } + } + + var nonPrivilegedToken string + // Fetch a valid non privileged token. + { + config := api.DefaultConfig() + config.Address = addr + + client, err := api.NewClient(config) + if err != nil { + t.Fatal(err) + } + client.SetToken(root) + + secret, err := client.Auth().Token().Create(&api.TokenCreateRequest{Policies: []string{"default"}}) + if err != nil { + t.Fatal(err) + } + if secret.Auth.ClientToken == "" { + t.Fatal("Failed to fetch a non privileged token") + } + nonPrivilegedToken = secret.Auth.ClientToken + } + + ctx := context.Background() + wsAddr := strings.Replace(addr, "http", "ws", 1) + + // Get a 403 with no token. + _, resp, err := websocket.Dial(ctx, wsAddr+"/v1/sys/events/subscribe/abc", nil) + if err == nil { + t.Error("Expected websocket error but got none") + } + if resp == nil || resp.StatusCode != http.StatusForbidden { + t.Errorf("Expected 403 but got %+v", resp) + } + + // Get a 403 with a non privileged token. + _, resp, err = websocket.Dial(ctx, wsAddr+"/v1/sys/events/subscribe/abc", &websocket.DialOptions{ + HTTPHeader: http.Header{"x-vault-token": []string{nonPrivilegedToken}}, + }) + if err == nil { + t.Error("Expected websocket error but got none") + } + if resp == nil || resp.StatusCode != http.StatusForbidden { + t.Errorf("Expected 403 but got %+v", resp) + } +} diff --git a/http/forwarded_for_test.go b/http/forwarded_for_test.go new file mode 100644 index 0000000..89bc62a --- /dev/null +++ b/http/forwarded_for_test.go @@ -0,0 +1,258 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "bytes" + "net/http" + "strings" + "testing" + + sockaddr "github.com/hashicorp/go-sockaddr" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/vault" +) + +func getListenerConfigForMarshalerTest(addr sockaddr.IPAddr) *configutil.Listener { + return &configutil.Listener{ + XForwardedForAuthorizedAddrs: []*sockaddr.SockAddrMarshaler{ + { + SockAddr: addr, + }, + }, + } +} + +func TestHandler_XForwardedFor(t *testing.T) { + goodAddr, err := sockaddr.NewIPAddr("127.0.0.1") + if err != nil { + t.Fatal(err) + } + + badAddr, err := sockaddr.NewIPAddr("1.2.3.4") + if err != nil { + t.Fatal(err) + } + + // First: test reject not present + t.Run("reject_not_present", func(t *testing.T) { + t.Parallel() + testHandler := func(props *vault.HandlerProperties) http.Handler { + origHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(r.RemoteAddr)) + }) + listenerConfig := getListenerConfigForMarshalerTest(goodAddr) + listenerConfig.XForwardedForRejectNotPresent = true + return WrapForwardedForHandler(origHandler, listenerConfig) + } + + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: HandlerFunc(testHandler), + }) + cluster.Start() + defer cluster.Cleanup() + client := cluster.Cores[0].Client + + req := client.NewRequest("GET", "/") + _, err := client.RawRequest(req) + if err == nil { + t.Fatal("expected error") + } + if !strings.Contains(err.Error(), "missing x-forwarded-for") { + t.Fatalf("bad error message: %v", err) + } + req = client.NewRequest("GET", "/") + req.Headers = make(http.Header) + req.Headers.Set("x-forwarded-for", "1.2.3.4") + resp, err := client.RawRequest(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + buf := bytes.NewBuffer(nil) + buf.ReadFrom(resp.Body) + if !strings.HasPrefix(buf.String(), "1.2.3.4:") { + t.Fatalf("bad body: %s", buf.String()) + } + }) + + // Next: test allow unauth + t.Run("allow_unauth", func(t *testing.T) { + t.Parallel() + testHandler := func(props *vault.HandlerProperties) http.Handler { + origHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(r.RemoteAddr)) + }) + listenerConfig := getListenerConfigForMarshalerTest(badAddr) + listenerConfig.XForwardedForRejectNotPresent = true + return WrapForwardedForHandler(origHandler, listenerConfig) + } + + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: HandlerFunc(testHandler), + }) + cluster.Start() + defer cluster.Cleanup() + client := cluster.Cores[0].Client + + req := client.NewRequest("GET", "/") + req.Headers = make(http.Header) + req.Headers.Set("x-forwarded-for", "5.6.7.8") + resp, err := client.RawRequest(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + buf := bytes.NewBuffer(nil) + buf.ReadFrom(resp.Body) + if !strings.HasPrefix(buf.String(), "127.0.0.1:") { + t.Fatalf("bad body: %s", buf.String()) + } + }) + + // Next: test fail unauth + t.Run("fail_unauth", func(t *testing.T) { + t.Parallel() + testHandler := func(props *vault.HandlerProperties) http.Handler { + origHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(r.RemoteAddr)) + }) + listenerConfig := getListenerConfigForMarshalerTest(badAddr) + listenerConfig.XForwardedForRejectNotPresent = true + listenerConfig.XForwardedForRejectNotAuthorized = true + return WrapForwardedForHandler(origHandler, listenerConfig) + } + + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: HandlerFunc(testHandler), + }) + cluster.Start() + defer cluster.Cleanup() + client := cluster.Cores[0].Client + + req := client.NewRequest("GET", "/") + req.Headers = make(http.Header) + req.Headers.Set("x-forwarded-for", "5.6.7.8") + _, err := client.RawRequest(req) + if err == nil { + t.Fatal("expected error") + } + if !strings.Contains(err.Error(), "not authorized for x-forwarded-for") { + t.Fatalf("bad error message: %v", err) + } + }) + + // Next: test bad hops (too many) + t.Run("too_many_hops", func(t *testing.T) { + t.Parallel() + testHandler := func(props *vault.HandlerProperties) http.Handler { + origHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(r.RemoteAddr)) + }) + listenerConfig := getListenerConfigForMarshalerTest(goodAddr) + listenerConfig.XForwardedForRejectNotPresent = true + listenerConfig.XForwardedForRejectNotAuthorized = true + listenerConfig.XForwardedForHopSkips = 4 + return WrapForwardedForHandler(origHandler, listenerConfig) + } + + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: HandlerFunc(testHandler), + }) + cluster.Start() + defer cluster.Cleanup() + client := cluster.Cores[0].Client + + req := client.NewRequest("GET", "/") + req.Headers = make(http.Header) + req.Headers.Set("x-forwarded-for", "2.3.4.5,3.4.5.6") + _, err := client.RawRequest(req) + if err == nil { + t.Fatal("expected error") + } + if !strings.Contains(err.Error(), "would skip before earliest") { + t.Fatalf("bad error message: %v", err) + } + }) + + // Next: test picking correct value + t.Run("correct_hop_skipping", func(t *testing.T) { + t.Parallel() + testHandler := func(props *vault.HandlerProperties) http.Handler { + origHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(r.RemoteAddr)) + }) + listenerConfig := getListenerConfigForMarshalerTest(goodAddr) + listenerConfig.XForwardedForRejectNotPresent = true + listenerConfig.XForwardedForRejectNotAuthorized = true + listenerConfig.XForwardedForHopSkips = 1 + return WrapForwardedForHandler(origHandler, listenerConfig) + } + + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: HandlerFunc(testHandler), + }) + cluster.Start() + defer cluster.Cleanup() + client := cluster.Cores[0].Client + + req := client.NewRequest("GET", "/") + req.Headers = make(http.Header) + req.Headers.Set("x-forwarded-for", "2.3.4.5,3.4.5.6,4.5.6.7,5.6.7.8") + resp, err := client.RawRequest(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + buf := bytes.NewBuffer(nil) + buf.ReadFrom(resp.Body) + if !strings.HasPrefix(buf.String(), "4.5.6.7:") { + t.Fatalf("bad body: %s", buf.String()) + } + }) + + // Next: multi-header approach + t.Run("correct_hop_skipping_multi_header", func(t *testing.T) { + t.Parallel() + testHandler := func(props *vault.HandlerProperties) http.Handler { + origHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(r.RemoteAddr)) + }) + listenerConfig := getListenerConfigForMarshalerTest(goodAddr) + listenerConfig.XForwardedForRejectNotPresent = true + listenerConfig.XForwardedForRejectNotAuthorized = true + listenerConfig.XForwardedForHopSkips = 1 + return WrapForwardedForHandler(origHandler, listenerConfig) + } + + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: HandlerFunc(testHandler), + }) + cluster.Start() + defer cluster.Cleanup() + client := cluster.Cores[0].Client + + req := client.NewRequest("GET", "/") + req.Headers = make(http.Header) + req.Headers.Add("x-forwarded-for", "2.3.4.5") + req.Headers.Add("x-forwarded-for", "3.4.5.6,4.5.6.7") + req.Headers.Add("x-forwarded-for", "5.6.7.8") + resp, err := client.RawRequest(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + buf := bytes.NewBuffer(nil) + buf.ReadFrom(resp.Body) + if !strings.HasPrefix(buf.String(), "4.5.6.7:") { + t.Fatalf("bad body: %s", buf.String()) + } + }) +} diff --git a/http/forwarding_bench_test.go b/http/forwarding_bench_test.go new file mode 100644 index 0000000..0c3f5e2 --- /dev/null +++ b/http/forwarding_bench_test.go @@ -0,0 +1,106 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" + "testing" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/builtin/logical/transit" + "github.com/hashicorp/vault/helper/benchhelpers" + "github.com/hashicorp/vault/helper/forwarding" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" + "golang.org/x/net/http2" +) + +func BenchmarkHTTP_Forwarding_Stress(b *testing.B) { + testPlaintextB64 := "dGhlIHF1aWNrIGJyb3duIGZveA==" + + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "transit": transit.Factory, + }, + } + + cluster := vault.NewTestCluster(benchhelpers.TBtoT(b), coreConfig, &vault.TestClusterOptions{ + HandlerFunc: Handler, + Logger: logging.NewVaultLoggerWithWriter(ioutil.Discard, log.Error), + }) + cluster.Start() + defer cluster.Cleanup() + cores := cluster.Cores + + // make it easy to get access to the active + core := cores[0].Core + vault.TestWaitActive(benchhelpers.TBtoT(b), core) + + handler := cores[0].Handler + host := fmt.Sprintf("https://127.0.0.1:%d/v1/transit/", cores[0].Listeners[0].Address.Port) + + transport := &http.Transport{ + TLSClientConfig: cores[0].TLSConfig(), + } + if err := http2.ConfigureTransport(transport); err != nil { + b.Fatal(err) + } + + client := &http.Client{ + Transport: transport, + } + + req, err := http.NewRequest("POST", fmt.Sprintf("https://127.0.0.1:%d/v1/sys/mounts/transit", cores[0].Listeners[0].Address.Port), + bytes.NewBuffer([]byte("{\"type\": \"transit\"}"))) + if err != nil { + b.Fatal(err) + } + req.Header.Set(consts.AuthHeaderName, cluster.RootToken) + _, err = client.Do(req) + if err != nil { + b.Fatal(err) + } + + var numOps uint32 + + doReq := func(b *testing.B, method, url string, body io.Reader) { + req, err := http.NewRequest(method, url, body) + if err != nil { + b.Fatal(err) + } + req.Header.Set(consts.AuthHeaderName, cluster.RootToken) + w := forwarding.NewRPCResponseWriter() + handler.ServeHTTP(w, req) + switch w.StatusCode() { + case 200: + case 204: + if !strings.Contains(url, "keys") { + b.Fatal("got 204") + } + default: + b.Fatalf("bad status code: %d, resp: %s", w.StatusCode(), w.Body().String()) + } + // b.Log(w.Body().String()) + numOps++ + } + + doReq(b, "POST", host+"keys/test1", bytes.NewBuffer([]byte("{}"))) + keyUrl := host + "encrypt/test1" + reqBuf := []byte(fmt.Sprintf("{\"plaintext\": \"%s\"}", testPlaintextB64)) + + b.Run("doreq", func(b *testing.B) { + for i := 0; i < b.N; i++ { + doReq(b, "POST", keyUrl, bytes.NewReader(reqBuf)) + } + }) + + b.Logf("total ops: %d", numOps) +} diff --git a/http/forwarding_test.go b/http/forwarding_test.go new file mode 100644 index 0000000..51cc2c0 --- /dev/null +++ b/http/forwarding_test.go @@ -0,0 +1,608 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "math/rand" + "net/http" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + + "golang.org/x/net/http2" + + cleanhttp "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/vault/api" + credCert "github.com/hashicorp/vault/builtin/credential/cert" + "github.com/hashicorp/vault/builtin/logical/transit" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/keysutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +func TestHTTP_Fallback_Bad_Address(t *testing.T) { + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "transit": transit.Factory, + }, + ClusterAddr: "https://127.3.4.1:8382", + } + + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: Handler, + }) + cluster.Start() + defer cluster.Cleanup() + cores := cluster.Cores + + // make it easy to get access to the active + core := cores[0].Core + vault.TestWaitActive(t, core) + + addrs := []string{ + fmt.Sprintf("https://127.0.0.1:%d", cores[1].Listeners[0].Address.Port), + fmt.Sprintf("https://127.0.0.1:%d", cores[2].Listeners[0].Address.Port), + } + + for _, addr := range addrs { + config := api.DefaultConfig() + config.Address = addr + config.HttpClient.Transport.(*http.Transport).TLSClientConfig = cores[0].TLSConfig() + + client, err := api.NewClient(config) + if err != nil { + t.Fatal(err) + } + client.SetToken(cluster.RootToken) + + secret, err := client.Auth().Token().LookupSelf() + if err != nil { + t.Fatal(err) + } + if secret == nil { + t.Fatal("secret is nil") + } + if secret.Data["id"].(string) != cluster.RootToken { + t.Fatal("token mismatch") + } + } +} + +func TestHTTP_Fallback_Disabled(t *testing.T) { + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "transit": transit.Factory, + }, + ClusterAddr: "empty", + } + + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: Handler, + }) + cluster.Start() + defer cluster.Cleanup() + cores := cluster.Cores + + // make it easy to get access to the active + core := cores[0].Core + vault.TestWaitActive(t, core) + + addrs := []string{ + fmt.Sprintf("https://127.0.0.1:%d", cores[1].Listeners[0].Address.Port), + fmt.Sprintf("https://127.0.0.1:%d", cores[2].Listeners[0].Address.Port), + } + + for _, addr := range addrs { + config := api.DefaultConfig() + config.Address = addr + config.HttpClient.Transport.(*http.Transport).TLSClientConfig = cores[0].TLSConfig() + + client, err := api.NewClient(config) + if err != nil { + t.Fatal(err) + } + client.SetToken(cluster.RootToken) + + secret, err := client.Auth().Token().LookupSelf() + if err != nil { + t.Fatal(err) + } + if secret == nil { + t.Fatal("secret is nil") + } + if secret.Data["id"].(string) != cluster.RootToken { + t.Fatal("token mismatch") + } + } +} + +// This function recreates the fuzzy testing from transit to pipe a large +// number of requests from the standbys to the active node. +func TestHTTP_Forwarding_Stress(t *testing.T) { + testHTTP_Forwarding_Stress_Common(t, false, 50) + testHTTP_Forwarding_Stress_Common(t, true, 50) +} + +func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint32) { + testPlaintext := "the quick brown fox" + testPlaintextB64 := "dGhlIHF1aWNrIGJyb3duIGZveA==" + + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "transit": transit.Factory, + }, + } + + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: Handler, + }) + cluster.Start() + defer cluster.Cleanup() + cores := cluster.Cores + + // make it easy to get access to the active + core := cores[0].Core + vault.TestWaitActive(t, core) + + wg := sync.WaitGroup{} + + funcs := []string{"encrypt", "decrypt", "rotate", "change_min_version"} + keys := []string{"test1", "test2", "test3"} + + hosts := []string{ + fmt.Sprintf("https://127.0.0.1:%d/v1/transit/", cores[1].Listeners[0].Address.Port), + fmt.Sprintf("https://127.0.0.1:%d/v1/transit/", cores[2].Listeners[0].Address.Port), + } + + transport := &http.Transport{ + TLSClientConfig: cores[0].TLSConfig(), + } + if err := http2.ConfigureTransport(transport); err != nil { + t.Fatal(err) + } + + client := &http.Client{ + Transport: transport, + CheckRedirect: func(*http.Request, []*http.Request) error { + return fmt.Errorf("redirects not allowed in this test") + }, + } + + // core.Logger().Printf("[TRACE] mounting transit") + req, err := http.NewRequest("POST", fmt.Sprintf("https://127.0.0.1:%d/v1/sys/mounts/transit", cores[0].Listeners[0].Address.Port), + bytes.NewBuffer([]byte("{\"type\": \"transit\"}"))) + if err != nil { + t.Fatal(err) + } + req.Header.Set(consts.AuthHeaderName, cluster.RootToken) + _, err = client.Do(req) + if err != nil { + t.Fatal(err) + } + // core.Logger().Printf("[TRACE] done mounting transit") + + var totalOps *uint32 = new(uint32) + var successfulOps *uint32 = new(uint32) + var key1ver *int32 = new(int32) + *key1ver = 1 + var key2ver *int32 = new(int32) + *key2ver = 1 + var key3ver *int32 = new(int32) + *key3ver = 1 + var numWorkers *uint32 = new(uint32) + *numWorkers = 50 + var numWorkersStarted *uint32 = new(uint32) + var waitLock sync.Mutex + waitCond := sync.NewCond(&waitLock) + + // This is the goroutine loop + doFuzzy := func(id int, parallel bool) { + var myTotalOps uint32 + var mySuccessfulOps uint32 + var keyVer int32 = 1 + // Check for panics, otherwise notify we're done + defer func() { + if err := recover(); err != nil { + core.Logger().Error("got a panic", "error", err) + t.Fail() + } + atomic.AddUint32(totalOps, myTotalOps) + atomic.AddUint32(successfulOps, mySuccessfulOps) + wg.Done() + }() + + // Holds the latest encrypted value for each key + latestEncryptedText := map[string]string{} + + client := &http.Client{ + Transport: transport, + } + + var chosenFunc, chosenKey, chosenHost string + + myRand := rand.New(rand.NewSource(int64(id) * 400)) + + doReq := func(method, url string, body io.Reader) (*http.Response, error) { + req, err := http.NewRequest(method, url, body) + if err != nil { + return nil, err + } + req.Header.Set(consts.AuthHeaderName, cluster.RootToken) + resp, err := client.Do(req) + if err != nil { + return nil, err + } + return resp, nil + } + + doResp := func(resp *http.Response) (*api.Secret, error) { + if resp == nil { + return nil, fmt.Errorf("nil response") + } + defer resp.Body.Close() + + // Make sure we weren't redirected + if resp.StatusCode > 300 && resp.StatusCode < 400 { + return nil, fmt.Errorf("got status code %d, resp was %#v", resp.StatusCode, *resp) + } + + result := &api.Response{Response: resp} + err := result.Error() + if err != nil { + return nil, err + } + + secret, err := api.ParseSecret(result.Body) + if err != nil { + return nil, err + } + + return secret, nil + } + + for _, chosenHost := range hosts { + for _, chosenKey := range keys { + // Try to write the key to make sure it exists + _, err := doReq("POST", chosenHost+"keys/"+fmt.Sprintf("%s-%t", chosenKey, parallel), bytes.NewBuffer([]byte("{}"))) + if err != nil { + panic(err) + } + } + } + + if !parallel { + chosenHost = hosts[id%len(hosts)] + chosenKey = fmt.Sprintf("key-%t-%d", parallel, id) + + _, err := doReq("POST", chosenHost+"keys/"+chosenKey, bytes.NewBuffer([]byte("{}"))) + if err != nil { + panic(err) + } + } + + atomic.AddUint32(numWorkersStarted, 1) + + waitCond.L.Lock() + for atomic.LoadUint32(numWorkersStarted) != atomic.LoadUint32(numWorkers) { + waitCond.Wait() + } + waitCond.L.Unlock() + waitCond.Broadcast() + + core.Logger().Debug("Starting goroutine", "id", id) + + startTime := time.Now() + for { + // Stop after 10 seconds + if time.Now().Sub(startTime) > 10*time.Second { + return + } + + myTotalOps++ + + // Pick a function and a key + chosenFunc = funcs[myRand.Int()%len(funcs)] + if parallel { + chosenKey = fmt.Sprintf("%s-%t", keys[myRand.Int()%len(keys)], parallel) + chosenHost = hosts[myRand.Int()%len(hosts)] + } + + switch chosenFunc { + // Encrypt our plaintext and store the result + case "encrypt": + // core.Logger().Printf("[TRACE] %s, %s, %d", chosenFunc, chosenKey, id) + resp, err := doReq("POST", chosenHost+"encrypt/"+chosenKey, bytes.NewBuffer([]byte(fmt.Sprintf("{\"plaintext\": \"%s\"}", testPlaintextB64)))) + if err != nil { + panic(err) + } + + secret, err := doResp(resp) + if err != nil { + panic(err) + } + + latest := secret.Data["ciphertext"].(string) + if latest == "" { + panic(fmt.Errorf("bad ciphertext")) + } + latestEncryptedText[chosenKey] = secret.Data["ciphertext"].(string) + + mySuccessfulOps++ + + // Decrypt the ciphertext and compare the result + case "decrypt": + ct := latestEncryptedText[chosenKey] + if ct == "" { + mySuccessfulOps++ + continue + } + + // core.Logger().Printf("[TRACE] %s, %s, %d", chosenFunc, chosenKey, id) + resp, err := doReq("POST", chosenHost+"decrypt/"+chosenKey, bytes.NewBuffer([]byte(fmt.Sprintf("{\"ciphertext\": \"%s\"}", ct)))) + if err != nil { + panic(err) + } + + secret, err := doResp(resp) + if err != nil { + // This could well happen since the min version is jumping around + if strings.Contains(err.Error(), keysutil.ErrTooOld) { + mySuccessfulOps++ + continue + } + panic(err) + } + + ptb64 := secret.Data["plaintext"].(string) + pt, err := base64.StdEncoding.DecodeString(ptb64) + if err != nil { + panic(fmt.Errorf("got an error decoding base64 plaintext: %v", err)) + } + if string(pt) != testPlaintext { + panic(fmt.Errorf("got bad plaintext back: %s", pt)) + } + + mySuccessfulOps++ + + // Rotate to a new key version + case "rotate": + // core.Logger().Printf("[TRACE] %s, %s, %d", chosenFunc, chosenKey, id) + _, err := doReq("POST", chosenHost+"keys/"+chosenKey+"/rotate", bytes.NewBuffer([]byte("{}"))) + if err != nil { + panic(err) + } + if parallel { + switch chosenKey { + case "test1": + atomic.AddInt32(key1ver, 1) + case "test2": + atomic.AddInt32(key2ver, 1) + case "test3": + atomic.AddInt32(key3ver, 1) + } + } else { + keyVer++ + } + + mySuccessfulOps++ + + // Change the min version, which also tests the archive functionality + case "change_min_version": + var latestVersion int32 = keyVer + if parallel { + switch chosenKey { + case "test1": + latestVersion = atomic.LoadInt32(key1ver) + case "test2": + latestVersion = atomic.LoadInt32(key2ver) + case "test3": + latestVersion = atomic.LoadInt32(key3ver) + } + } + + setVersion := (myRand.Int31() % latestVersion) + 1 + + // core.Logger().Printf("[TRACE] %s, %s, %d, new min version %d", chosenFunc, chosenKey, id, setVersion) + + _, err := doReq("POST", chosenHost+"keys/"+chosenKey+"/config", bytes.NewBuffer([]byte(fmt.Sprintf("{\"min_decryption_version\": %d}", setVersion)))) + if err != nil { + panic(err) + } + + mySuccessfulOps++ + } + } + } + + atomic.StoreUint32(numWorkers, num) + + // Spawn some of these workers for 10 seconds + for i := 0; i < int(atomic.LoadUint32(numWorkers)); i++ { + wg.Add(1) + // core.Logger().Printf("[TRACE] spawning %d", i) + go doFuzzy(i+1, parallel) + } + + // Wait for them all to finish + wg.Wait() + + if *totalOps == 0 || *totalOps != *successfulOps { + t.Fatalf("total/successful ops zero or mismatch: %d/%d; parallel: %t, num %d", *totalOps, *successfulOps, parallel, num) + } + t.Logf("total operations tried: %d, total successful: %d; parallel: %t, num %d", *totalOps, *successfulOps, parallel, num) +} + +// This tests TLS connection state forwarding by ensuring that we can use a +// client TLS to authenticate against the cert backend +func TestHTTP_Forwarding_ClientTLS(t *testing.T) { + coreConfig := &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "cert": credCert.Factory, + }, + } + + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: Handler, + }) + cluster.Start() + defer cluster.Cleanup() + cores := cluster.Cores + + // make it easy to get access to the active + core := cores[0].Core + vault.TestWaitActive(t, core) + + transport := cleanhttp.DefaultTransport() + transport.TLSClientConfig = cores[0].TLSConfig() + if err := http2.ConfigureTransport(transport); err != nil { + t.Fatal(err) + } + + client := &http.Client{ + Transport: transport, + } + + req, err := http.NewRequest("POST", fmt.Sprintf("https://127.0.0.1:%d/v1/sys/auth/cert", cores[0].Listeners[0].Address.Port), + bytes.NewBuffer([]byte("{\"type\": \"cert\"}"))) + if err != nil { + t.Fatal(err) + } + req.Header.Set(consts.AuthHeaderName, cluster.RootToken) + _, err = client.Do(req) + if err != nil { + t.Fatal(err) + } + + type certConfig struct { + Certificate string `json:"certificate"` + Policies string `json:"policies"` + } + encodedCertConfig, err := json.Marshal(&certConfig{ + Certificate: string(cluster.CACertPEM), + Policies: "default", + }) + if err != nil { + t.Fatal(err) + } + req, err = http.NewRequest("POST", fmt.Sprintf("https://127.0.0.1:%d/v1/auth/cert/certs/test", cores[0].Listeners[0].Address.Port), + bytes.NewBuffer(encodedCertConfig)) + if err != nil { + t.Fatal(err) + } + req.Header.Set(consts.AuthHeaderName, cluster.RootToken) + _, err = client.Do(req) + if err != nil { + t.Fatal(err) + } + + addrs := []string{ + fmt.Sprintf("https://127.0.0.1:%d", cores[1].Listeners[0].Address.Port), + fmt.Sprintf("https://127.0.0.1:%d", cores[2].Listeners[0].Address.Port), + } + + for i, addr := range addrs { + // Ensure we can't possibly use lingering connections even though it should + // be to a different address + transport = cleanhttp.DefaultTransport() + // i starts at zero but cores in addrs start at 1 + transport.TLSClientConfig = cores[i+1].TLSConfig() + if err := http2.ConfigureTransport(transport); err != nil { + t.Fatal(err) + } + httpClient := &http.Client{ + Transport: transport, + CheckRedirect: func(*http.Request, []*http.Request) error { + return fmt.Errorf("redirects not allowed in this test") + }, + } + client, err := api.NewClient(&api.Config{ + Address: addr, + HttpClient: httpClient, + }) + if err != nil { + t.Fatal(err) + } + + secret, err := client.Logical().Write("auth/cert/login", nil) + if err != nil { + t.Fatal(err) + } + if secret == nil { + t.Fatal("secret is nil") + } + if secret.Auth == nil { + t.Fatal("auth is nil") + } + if secret.Auth.Policies == nil || len(secret.Auth.Policies) == 0 || secret.Auth.Policies[0] != "default" { + t.Fatalf("bad policies: %#v", secret.Auth.Policies) + } + if secret.Auth.ClientToken == "" { + t.Fatalf("bad client token: %#v", *secret.Auth) + } + client.SetToken(secret.Auth.ClientToken) + secret, err = client.Auth().Token().LookupSelf() + if err != nil { + t.Fatal(err) + } + if secret == nil { + t.Fatal("secret is nil") + } + if secret.Data == nil || len(secret.Data) == 0 { + t.Fatal("secret data was empty") + } + } +} + +func TestHTTP_Forwarding_HelpOperation(t *testing.T) { + cluster := vault.NewTestCluster(t, &vault.CoreConfig{}, &vault.TestClusterOptions{ + HandlerFunc: Handler, + }) + cluster.Start() + defer cluster.Cleanup() + cores := cluster.Cores + + vault.TestWaitActive(t, cores[0].Core) + + testHelp := func(client *api.Client) { + help, err := client.Help("auth/token") + if err != nil { + t.Fatal(err) + } + if help == nil { + t.Fatal("help was nil") + } + } + + testHelp(cores[0].Client) + testHelp(cores[1].Client) +} + +func TestHTTP_Forwarding_LocalOnly(t *testing.T) { + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: Handler, + }) + cluster.Start() + defer cluster.Cleanup() + cores := cluster.Cores + + vault.TestWaitActive(t, cores[0].Core) + + testLocalOnly := func(client *api.Client) { + _, err := client.Logical().Read("sys/config/state/sanitized") + if err == nil { + t.Fatal("expected error") + } + } + + testLocalOnly(cores[1].Client) + testLocalOnly(cores[2].Client) +} diff --git a/http/handler.go b/http/handler.go new file mode 100644 index 0000000..6aaf92c --- /dev/null +++ b/http/handler.go @@ -0,0 +1,1228 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/fs" + "io/ioutil" + "mime" + "net" + "net/http" + "net/http/pprof" + "net/textproto" + "net/url" + "os" + "regexp" + "strings" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/go-sockaddr" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/pathmanager" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" + gziphandler "github.com/klauspost/compress/gzhttp" +) + +const ( + // WrapTTLHeaderName is the name of the header containing a directive to + // wrap the response + WrapTTLHeaderName = "X-Vault-Wrap-TTL" + + // WrapFormatHeaderName is the name of the header containing the format to + // wrap in; has no effect if the wrap TTL is not set + WrapFormatHeaderName = "X-Vault-Wrap-Format" + + // NoRequestForwardingHeaderName is the name of the header telling Vault + // not to use request forwarding + NoRequestForwardingHeaderName = "X-Vault-No-Request-Forwarding" + + // MFAHeaderName represents the HTTP header which carries the credentials + // required to perform MFA on any path. + MFAHeaderName = "X-Vault-MFA" + + // canonicalMFAHeaderName is the MFA header value's format in the request + // headers. Do not alter the casing of this string. + canonicalMFAHeaderName = "X-Vault-Mfa" + + // PolicyOverrideHeaderName is the header set to request overriding + // soft-mandatory Sentinel policies. + PolicyOverrideHeaderName = "X-Vault-Policy-Override" + + VaultIndexHeaderName = "X-Vault-Index" + VaultInconsistentHeaderName = "X-Vault-Inconsistent" + VaultForwardHeaderName = "X-Vault-Forward" + VaultInconsistentForward = "forward-active-node" + VaultInconsistentFail = "fail" + + // DefaultMaxRequestSize is the default maximum accepted request size. This + // is to prevent a denial of service attack where no Content-Length is + // provided and the server is fed ever more data until it exhausts memory. + // Can be overridden per listener. + DefaultMaxRequestSize = 32 * 1024 * 1024 +) + +var ( + // Set to false by stub_asset if the ui build tag isn't enabled + uiBuiltIn = true + + // perfStandbyAlwaysForwardPaths is used to check a requested path against + // the always forward list + perfStandbyAlwaysForwardPaths = pathmanager.New() + alwaysRedirectPaths = pathmanager.New() + + injectDataIntoTopRoutes = []string{ + "/v1/sys/audit", + "/v1/sys/audit/", + "/v1/sys/audit-hash/", + "/v1/sys/auth", + "/v1/sys/auth/", + "/v1/sys/config/cors", + "/v1/sys/config/auditing/request-headers/", + "/v1/sys/config/auditing/request-headers", + "/v1/sys/capabilities", + "/v1/sys/capabilities-accessor", + "/v1/sys/capabilities-self", + "/v1/sys/ha-status", + "/v1/sys/key-status", + "/v1/sys/mounts", + "/v1/sys/mounts/", + "/v1/sys/policy", + "/v1/sys/policy/", + "/v1/sys/rekey/backup", + "/v1/sys/rekey/recovery-key-backup", + "/v1/sys/remount", + "/v1/sys/rotate", + "/v1/sys/wrapping/wrap", + } + + oidcProtectedPathRegex = regexp.MustCompile(`^identity/oidc/provider/\w(([\w-.]+)?\w)?/userinfo$`) +) + +func init() { + alwaysRedirectPaths.AddPaths([]string{ + "sys/storage/raft/snapshot", + "sys/storage/raft/snapshot-force", + "!sys/storage/raft/snapshot-auto/config", + }) +} + +type HandlerAnchor struct{} + +func (h HandlerAnchor) Handler(props *vault.HandlerProperties) http.Handler { + return handler(props) +} + +var Handler vault.HandlerHandler = HandlerAnchor{} + +type HandlerFunc func(props *vault.HandlerProperties) http.Handler + +func (h HandlerFunc) Handler(props *vault.HandlerProperties) http.Handler { + return h(props) +} + +var _ vault.HandlerHandler = HandlerFunc(func(props *vault.HandlerProperties) http.Handler { return nil }) + +// handler returns an http.Handler for the API. This can be used on +// its own to mount the Vault API within another web server. +func handler(props *vault.HandlerProperties) http.Handler { + core := props.Core + + // Create the muxer to handle the actual endpoints + mux := http.NewServeMux() + + switch { + case props.RecoveryMode: + raw := vault.NewRawBackend(core) + strategy := vault.GenerateRecoveryTokenStrategy(props.RecoveryToken) + mux.Handle("/v1/sys/raw/", handleLogicalRecovery(raw, props.RecoveryToken)) + mux.Handle("/v1/sys/generate-recovery-token/attempt", handleSysGenerateRootAttempt(core, strategy)) + mux.Handle("/v1/sys/generate-recovery-token/update", handleSysGenerateRootUpdate(core, strategy)) + default: + // Handle non-forwarded paths + mux.Handle("/v1/sys/config/state/", handleLogicalNoForward(core)) + mux.Handle("/v1/sys/host-info", handleLogicalNoForward(core)) + + mux.Handle("/v1/sys/init", handleSysInit(core)) + mux.Handle("/v1/sys/seal-status", handleSysSealStatus(core)) + mux.Handle("/v1/sys/seal", handleSysSeal(core)) + mux.Handle("/v1/sys/step-down", handleRequestForwarding(core, handleSysStepDown(core))) + mux.Handle("/v1/sys/unseal", handleSysUnseal(core)) + mux.Handle("/v1/sys/leader", handleSysLeader(core)) + mux.Handle("/v1/sys/health", handleSysHealth(core)) + mux.Handle("/v1/sys/monitor", handleLogicalNoForward(core)) + mux.Handle("/v1/sys/generate-root/attempt", handleRequestForwarding(core, + handleAuditNonLogical(core, handleSysGenerateRootAttempt(core, vault.GenerateStandardRootTokenStrategy)))) + mux.Handle("/v1/sys/generate-root/update", handleRequestForwarding(core, + handleAuditNonLogical(core, handleSysGenerateRootUpdate(core, vault.GenerateStandardRootTokenStrategy)))) + mux.Handle("/v1/sys/rekey/init", handleRequestForwarding(core, handleSysRekeyInit(core, false))) + mux.Handle("/v1/sys/rekey/update", handleRequestForwarding(core, handleSysRekeyUpdate(core, false))) + mux.Handle("/v1/sys/rekey/verify", handleRequestForwarding(core, handleSysRekeyVerify(core, false))) + mux.Handle("/v1/sys/rekey-recovery-key/init", handleRequestForwarding(core, handleSysRekeyInit(core, true))) + mux.Handle("/v1/sys/rekey-recovery-key/update", handleRequestForwarding(core, handleSysRekeyUpdate(core, true))) + mux.Handle("/v1/sys/rekey-recovery-key/verify", handleRequestForwarding(core, handleSysRekeyVerify(core, true))) + mux.Handle("/v1/sys/storage/raft/bootstrap", handleSysRaftBootstrap(core)) + mux.Handle("/v1/sys/storage/raft/join", handleSysRaftJoin(core)) + mux.Handle("/v1/sys/internal/ui/feature-flags", handleSysInternalFeatureFlags(core)) + + for _, path := range injectDataIntoTopRoutes { + mux.Handle(path, handleRequestForwarding(core, handleLogicalWithInjector(core))) + } + mux.Handle("/v1/sys/", handleRequestForwarding(core, handleLogical(core))) + mux.Handle("/v1/", handleRequestForwarding(core, handleLogical(core))) + if core.UIEnabled() { + if uiBuiltIn { + mux.Handle("/ui/", http.StripPrefix("/ui/", gziphandler.GzipHandler(handleUIHeaders(core, handleUI(http.FileServer(&UIAssetWrapper{FileSystem: assetFS()})))))) + mux.Handle("/robots.txt", gziphandler.GzipHandler(handleUIHeaders(core, handleUI(http.FileServer(&UIAssetWrapper{FileSystem: assetFS()}))))) + } else { + mux.Handle("/ui/", handleUIHeaders(core, handleUIStub())) + } + mux.Handle("/ui", handleUIRedirect()) + mux.Handle("/", handleUIRedirect()) + + } + + // Register metrics path without authentication if enabled + if props.ListenerConfig != nil && props.ListenerConfig.Telemetry.UnauthenticatedMetricsAccess { + mux.Handle("/v1/sys/metrics", handleMetricsUnauthenticated(core)) + } else { + mux.Handle("/v1/sys/metrics", handleLogicalNoForward(core)) + } + + if props.ListenerConfig != nil && props.ListenerConfig.Profiling.UnauthenticatedPProfAccess { + for _, name := range []string{"goroutine", "threadcreate", "heap", "allocs", "block", "mutex"} { + mux.Handle("/v1/sys/pprof/"+name, pprof.Handler(name)) + } + mux.Handle("/v1/sys/pprof/", http.HandlerFunc(pprof.Index)) + mux.Handle("/v1/sys/pprof/cmdline", http.HandlerFunc(pprof.Cmdline)) + mux.Handle("/v1/sys/pprof/profile", http.HandlerFunc(pprof.Profile)) + mux.Handle("/v1/sys/pprof/symbol", http.HandlerFunc(pprof.Symbol)) + mux.Handle("/v1/sys/pprof/trace", http.HandlerFunc(pprof.Trace)) + } else { + mux.Handle("/v1/sys/pprof/", handleLogicalNoForward(core)) + } + + if props.ListenerConfig != nil && props.ListenerConfig.InFlightRequestLogging.UnauthenticatedInFlightAccess { + mux.Handle("/v1/sys/in-flight-req", handleUnAuthenticatedInFlightRequest(core)) + } else { + mux.Handle("/v1/sys/in-flight-req", handleLogicalNoForward(core)) + } + additionalRoutes(mux, core) + } + + // Wrap the handler in another handler to trigger all help paths. + helpWrappedHandler := wrapHelpHandler(mux, core) + corsWrappedHandler := wrapCORSHandler(helpWrappedHandler, core) + quotaWrappedHandler := rateLimitQuotaWrapping(corsWrappedHandler, core) + genericWrappedHandler := genericWrapping(core, quotaWrappedHandler, props) + wrappedHandler := wrapMaxRequestSizeHandler(genericWrappedHandler, props) + + // Wrap the handler with PrintablePathCheckHandler to check for non-printable + // characters in the request path. + printablePathCheckHandler := wrappedHandler + if !props.DisablePrintableCheck { + printablePathCheckHandler = cleanhttp.PrintablePathCheckHandler(wrappedHandler, nil) + } + + return printablePathCheckHandler +} + +type copyResponseWriter struct { + wrapped http.ResponseWriter + statusCode int + body *bytes.Buffer +} + +// newCopyResponseWriter returns an initialized newCopyResponseWriter +func newCopyResponseWriter(wrapped http.ResponseWriter) *copyResponseWriter { + w := ©ResponseWriter{ + wrapped: wrapped, + body: new(bytes.Buffer), + statusCode: 200, + } + return w +} + +func (w *copyResponseWriter) Header() http.Header { + return w.wrapped.Header() +} + +func (w *copyResponseWriter) Write(buf []byte) (int, error) { + w.body.Write(buf) + return w.wrapped.Write(buf) +} + +func (w *copyResponseWriter) WriteHeader(code int) { + w.statusCode = code + w.wrapped.WriteHeader(code) +} + +func handleAuditNonLogical(core *vault.Core, h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + origBody := new(bytes.Buffer) + reader := ioutil.NopCloser(io.TeeReader(r.Body, origBody)) + r.Body = reader + req, _, status, err := buildLogicalRequestNoAuth(core.PerfStandby(), w, r) + if err != nil || status != 0 { + respondError(w, status, err) + return + } + if origBody != nil { + r.Body = ioutil.NopCloser(origBody) + } + input := &logical.LogInput{ + Request: req, + } + err = core.AuditLogger().AuditRequest(r.Context(), input) + if err != nil { + respondError(w, status, err) + return + } + cw := newCopyResponseWriter(w) + h.ServeHTTP(cw, r) + data := make(map[string]interface{}) + err = jsonutil.DecodeJSON(cw.body.Bytes(), &data) + if err != nil { + // best effort, ignore + } + httpResp := &logical.HTTPResponse{Data: data, Headers: cw.Header()} + input.Response = logical.HTTPResponseToLogicalResponse(httpResp) + err = core.AuditLogger().AuditResponse(r.Context(), input) + if err != nil { + respondError(w, status, err) + } + return + }) +} + +// wrapGenericHandler wraps the handler with an extra layer of handler where +// tasks that should be commonly handled for all the requests and/or responses +// are performed. +func wrapGenericHandler(core *vault.Core, h http.Handler, props *vault.HandlerProperties) http.Handler { + var maxRequestDuration time.Duration + if props.ListenerConfig != nil { + maxRequestDuration = props.ListenerConfig.MaxRequestDuration + } + if maxRequestDuration == 0 { + maxRequestDuration = vault.DefaultMaxRequestDuration + } + // Swallow this error since we don't want to pollute the logs and we also don't want to + // return an HTTP error here. This information is best effort. + hostname, _ := os.Hostname() + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // This block needs to be here so that upon sending SIGHUP, custom response + // headers are also reloaded into the handlers. + var customHeaders map[string][]*logical.CustomHeader + if props.ListenerConfig != nil { + la := props.ListenerConfig.Address + listenerCustomHeaders := core.GetListenerCustomResponseHeaders(la) + if listenerCustomHeaders != nil { + customHeaders = listenerCustomHeaders.StatusCodeHeaderMap + } + } + // saving start time for the in-flight requests + inFlightReqStartTime := time.Now() + + nw := logical.NewStatusHeaderResponseWriter(w, customHeaders) + + // Set the Cache-Control header for all the responses returned + // by Vault + nw.Header().Set("Cache-Control", "no-store") + + // Start with the request context + ctx := r.Context() + var cancelFunc context.CancelFunc + // Add our timeout, but not for the monitor or events endpoints, as they are streaming + if strings.HasSuffix(r.URL.Path, "sys/monitor") || strings.Contains(r.URL.Path, "sys/events") { + ctx, cancelFunc = context.WithCancel(ctx) + } else { + ctx, cancelFunc = context.WithTimeout(ctx, maxRequestDuration) + } + ctx = context.WithValue(ctx, "original_request_path", r.URL.Path) + r = r.WithContext(ctx) + r = r.WithContext(namespace.ContextWithNamespace(r.Context(), namespace.RootNamespace)) + + // Set some response headers with raft node id (if applicable) and hostname, if available + if core.RaftNodeIDHeaderEnabled() { + nodeID := core.GetRaftNodeID() + if nodeID != "" { + nw.Header().Set("X-Vault-Raft-Node-ID", nodeID) + } + } + + if core.HostnameHeaderEnabled() && hostname != "" { + nw.Header().Set("X-Vault-Hostname", hostname) + } + + switch { + case strings.HasPrefix(r.URL.Path, "/v1/"): + newR, status := adjustRequest(core, r) + if status != 0 { + respondError(nw, status, nil) + cancelFunc() + return + } + r = newR + + case strings.HasPrefix(r.URL.Path, "/ui"), r.URL.Path == "/robots.txt", r.URL.Path == "/": + default: + respondError(nw, http.StatusNotFound, nil) + cancelFunc() + return + } + + // The uuid for the request is going to be generated when a logical + // request is generated. But, here we generate one to be able to track + // in-flight requests, and use that to update the req data with clientID + inFlightReqID, err := uuid.GenerateUUID() + if err != nil { + respondError(nw, http.StatusInternalServerError, fmt.Errorf("failed to generate an identifier for the in-flight request")) + } + // adding an entry to the context to enable updating in-flight + // data with ClientID in the logical layer + r = r.WithContext(context.WithValue(r.Context(), logical.CtxKeyInFlightRequestID{}, inFlightReqID)) + + // extracting the client address to be included in the in-flight request + var clientAddr string + headers := r.Header[textproto.CanonicalMIMEHeaderKey("X-Forwarded-For")] + if len(headers) == 0 { + clientAddr = r.RemoteAddr + } else { + clientAddr = headers[0] + } + + // getting the request method + requestMethod := r.Method + + // Storing the in-flight requests. Path should include namespace as well + core.StoreInFlightReqData( + inFlightReqID, + vault.InFlightReqData{ + StartTime: inFlightReqStartTime, + ReqPath: r.URL.Path, + ClientRemoteAddr: clientAddr, + Method: requestMethod, + }) + defer func() { + // Not expecting this fail, so skipping the assertion check + core.FinalizeInFlightReqData(inFlightReqID, nw.StatusCode) + }() + + // Setting the namespace in the header to be included in the error message + ns := r.Header.Get(consts.NamespaceHeaderName) + if ns != "" { + nw.Header().Set(consts.NamespaceHeaderName, ns) + } + + h.ServeHTTP(nw, r) + + cancelFunc() + return + }) +} + +func WrapForwardedForHandler(h http.Handler, l *configutil.Listener) http.Handler { + rejectNotPresent := l.XForwardedForRejectNotPresent + hopSkips := l.XForwardedForHopSkips + authorizedAddrs := l.XForwardedForAuthorizedAddrs + rejectNotAuthz := l.XForwardedForRejectNotAuthorized + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + headers, headersOK := r.Header[textproto.CanonicalMIMEHeaderKey("X-Forwarded-For")] + if !headersOK || len(headers) == 0 { + if !rejectNotPresent { + h.ServeHTTP(w, r) + return + } + respondError(w, http.StatusBadRequest, fmt.Errorf("missing x-forwarded-for header and configured to reject when not present")) + return + } + + host, port, err := net.SplitHostPort(r.RemoteAddr) + if err != nil { + // If not rejecting treat it like we just don't have a valid + // header because we can't do a comparison against an address we + // can't understand + if !rejectNotPresent { + h.ServeHTTP(w, r) + return + } + respondError(w, http.StatusBadRequest, fmt.Errorf("error parsing client hostport: %w", err)) + return + } + + addr, err := sockaddr.NewIPAddr(host) + if err != nil { + // We treat this the same as the case above + if !rejectNotPresent { + h.ServeHTTP(w, r) + return + } + respondError(w, http.StatusBadRequest, fmt.Errorf("error parsing client address: %w", err)) + return + } + + var found bool + for _, authz := range authorizedAddrs { + if authz.Contains(addr) { + found = true + break + } + } + if !found { + // If we didn't find it and aren't configured to reject, simply + // don't trust it + if !rejectNotAuthz { + h.ServeHTTP(w, r) + return + } + respondError(w, http.StatusBadRequest, fmt.Errorf("client address not authorized for x-forwarded-for and configured to reject connection")) + return + } + + // At this point we have at least one value and it's authorized + + // Split comma separated ones, which are common. This brings it in line + // to the multiple-header case. + var acc []string + for _, header := range headers { + vals := strings.Split(header, ",") + for _, v := range vals { + acc = append(acc, strings.TrimSpace(v)) + } + } + + indexToUse := int64(len(acc)) - 1 - hopSkips + if indexToUse < 0 { + // This is likely an error in either configuration or other + // infrastructure. We could either deny the request, or we + // could simply not trust the value. Denying the request is + // "safer" since if this logic is configured at all there may + // be an assumption it can always be trusted. Given that we can + // deny accepting the request at all if it's not from an + // authorized address, if we're at this point the address is + // authorized (or we've turned off explicit rejection) and we + // should assume that what comes in should be properly + // formatted. + respondError(w, http.StatusBadRequest, fmt.Errorf("malformed x-forwarded-for configuration or request, hops to skip (%d) would skip before earliest chain link (chain length %d)", hopSkips, len(headers))) + return + } + + r.RemoteAddr = net.JoinHostPort(acc[indexToUse], port) + h.ServeHTTP(w, r) + return + }) +} + +// stripPrefix is a helper to strip a prefix from the path. It will +// return false from the second return value if it the prefix doesn't exist. +func stripPrefix(prefix, path string) (string, bool) { + if !strings.HasPrefix(path, prefix) { + return "", false + } + + path = path[len(prefix):] + if path == "" { + return "", false + } + + return path, true +} + +func handleUIHeaders(core *vault.Core, h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + header := w.Header() + + userHeaders, err := core.UIHeaders() + if err != nil { + respondError(w, http.StatusInternalServerError, err) + return + } + if userHeaders != nil { + for k := range userHeaders { + v := userHeaders.Get(k) + header.Set(k, v) + } + } + h.ServeHTTP(w, req) + }) +} + +func handleUI(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + // The fileserver handler strips trailing slashes and does a redirect. + // We don't want the redirect to happen so we preemptively trim the slash + // here. + req.URL.Path = strings.TrimSuffix(req.URL.Path, "/") + h.ServeHTTP(w, req) + return + }) +} + +func handleUIStub() http.Handler { + stubHTML := ` + + + +
+
+
+ + + +

Vault UI is not available in this binary.

+
+

To get Vault UI do one of the following:

+
+
+
+ + ` + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + w.Write([]byte(stubHTML)) + }) +} + +func handleUIRedirect() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + http.Redirect(w, req, "/ui/", 307) + return + }) +} + +type UIAssetWrapper struct { + FileSystem http.FileSystem +} + +func (fsw *UIAssetWrapper) Open(name string) (http.File, error) { + file, err := fsw.FileSystem.Open(name) + if err == nil { + return file, nil + } + // serve index.html instead of 404ing + if errors.Is(err, fs.ErrNotExist) { + file, err := fsw.FileSystem.Open("index.html") + return file, err + } + return nil, err +} + +func parseQuery(values url.Values) map[string]interface{} { + data := map[string]interface{}{} + for k, v := range values { + // Skip the help key as this is a reserved parameter + if k == "help" { + continue + } + + switch { + case len(v) == 0: + case len(v) == 1: + data[k] = v[0] + default: + data[k] = v + } + } + + if len(data) > 0 { + return data + } + return nil +} + +func parseJSONRequest(perfStandby bool, r *http.Request, w http.ResponseWriter, out interface{}) (io.ReadCloser, error) { + // Limit the maximum number of bytes to MaxRequestSize to protect + // against an indefinite amount of data being read. + reader := r.Body + var origBody io.ReadWriter + if perfStandby { + // Since we're checking PerfStandby here we key on origBody being nil + // or not later, so we need to always allocate so it's non-nil + origBody = new(bytes.Buffer) + reader = ioutil.NopCloser(io.TeeReader(reader, origBody)) + } + err := jsonutil.DecodeJSONFromReader(reader, out) + if err != nil && err != io.EOF { + return nil, fmt.Errorf("failed to parse JSON input: %w", err) + } + if origBody != nil { + return ioutil.NopCloser(origBody), err + } + return nil, err +} + +// parseFormRequest parses values from a form POST. +// +// A nil map will be returned if the format is empty or invalid. +func parseFormRequest(r *http.Request) (map[string]interface{}, error) { + if err := r.ParseForm(); err != nil { + return nil, err + } + + var data map[string]interface{} + + if len(r.PostForm) != 0 { + data = make(map[string]interface{}, len(r.PostForm)) + for k, v := range r.PostForm { + switch len(v) { + case 0: + case 1: + data[k] = v[0] + default: + // Almost anywhere taking in a string list can take in comma + // separated values, and really this is super niche anyways + data[k] = strings.Join(v, ",") + } + } + } + + return data, nil +} + +// forwardBasedOnHeaders returns true if the request headers specify that +// we should forward to the active node - either unconditionally or because +// a specified state isn't present locally. +func forwardBasedOnHeaders(core *vault.Core, r *http.Request) (bool, error) { + rawForward := r.Header.Get(VaultForwardHeaderName) + if rawForward != "" { + if !core.AllowForwardingViaHeader() { + return false, fmt.Errorf("forwarding via header %s disabled in configuration", VaultForwardHeaderName) + } + if rawForward == "active-node" { + return true, nil + } + return false, nil + } + + rawInconsistent := r.Header.Get(VaultInconsistentHeaderName) + if rawInconsistent == "" { + return false, nil + } + + switch rawInconsistent { + case VaultInconsistentForward: + if !core.AllowForwardingViaHeader() { + return false, fmt.Errorf("forwarding via header %s=%s disabled in configuration", + VaultInconsistentHeaderName, VaultInconsistentForward) + } + default: + return false, nil + } + + return core.MissingRequiredState(r.Header.Values(VaultIndexHeaderName), core.PerfStandby()), nil +} + +// handleRequestForwarding determines whether to forward a request or not, +// falling back on the older behavior of redirecting the client +func handleRequestForwarding(core *vault.Core, handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Note if the client requested forwarding + shouldForward, err := forwardBasedOnHeaders(core, r) + if err != nil { + respondError(w, http.StatusBadRequest, err) + return + } + + // If we are a performance standby we can maybe handle the request. + if core.PerfStandby() && !shouldForward { + ns, err := namespace.FromContext(r.Context()) + if err != nil { + respondError(w, http.StatusBadRequest, err) + return + } + path := ns.TrimmedPath(r.URL.Path[len("/v1/"):]) + if !perfStandbyAlwaysForwardPaths.HasPath(path) && !alwaysRedirectPaths.HasPath(path) { + handler.ServeHTTP(w, r) + return + } + } + + // Note: in an HA setup, this call will also ensure that connections to + // the leader are set up, as that happens once the advertised cluster + // values are read during this function + isLeader, leaderAddr, _, err := core.Leader() + if err != nil { + if err == vault.ErrHANotEnabled { + // Standalone node, serve request normally + handler.ServeHTTP(w, r) + return + } + // Some internal error occurred + respondError(w, http.StatusInternalServerError, err) + return + } + if isLeader { + // No forwarding needed, we're leader + handler.ServeHTTP(w, r) + return + } + if leaderAddr == "" { + respondError(w, http.StatusInternalServerError, fmt.Errorf("local node not active but active cluster node not found")) + return + } + + forwardRequest(core, w, r) + return + }) +} + +func forwardRequest(core *vault.Core, w http.ResponseWriter, r *http.Request) { + if r.Header.Get(vault.IntNoForwardingHeaderName) != "" { + respondStandby(core, w, r.URL) + return + } + + if r.Header.Get(NoRequestForwardingHeaderName) != "" { + // Forwarding explicitly disabled, fall back to previous behavior + core.Logger().Debug("handleRequestForwarding: forwarding disabled by client request") + respondStandby(core, w, r.URL) + return + } + + ns, err := namespace.FromContext(r.Context()) + if err != nil { + respondError(w, http.StatusBadRequest, err) + return + } + path := ns.TrimmedPath(r.URL.Path[len("/v1/"):]) + if alwaysRedirectPaths.HasPath(path) { + respondStandby(core, w, r.URL) + return + } + + // Attempt forwarding the request. If we cannot forward -- perhaps it's + // been disabled on the active node -- this will return with an + // ErrCannotForward and we simply fall back + statusCode, header, retBytes, err := core.ForwardRequest(r) + if err != nil { + if err == vault.ErrCannotForward { + core.Logger().Debug("cannot forward request (possibly disabled on active node), falling back") + } else { + core.Logger().Error("forward request error", "error", err) + } + + // Fall back to redirection + respondStandby(core, w, r.URL) + return + } + + if header != nil { + for k, v := range header { + w.Header()[k] = v + } + } + + w.WriteHeader(statusCode) + w.Write(retBytes) +} + +// request is a helper to perform a request and properly exit in the +// case of an error. +func request(core *vault.Core, w http.ResponseWriter, rawReq *http.Request, r *logical.Request) (*logical.Response, bool, bool) { + resp, err := core.HandleRequest(rawReq.Context(), r) + if r.LastRemoteWAL() > 0 && !vault.WaitUntilWALShipped(rawReq.Context(), core, r.LastRemoteWAL()) { + if resp == nil { + resp = &logical.Response{} + } + resp.AddWarning("Timeout hit while waiting for local replicated cluster to apply primary's write; this client may encounter stale reads of values written during this operation.") + } + if errwrap.Contains(err, consts.ErrStandby.Error()) { + respondStandby(core, w, rawReq.URL) + return resp, false, false + } + if err != nil && errwrap.Contains(err, logical.ErrPerfStandbyPleaseForward.Error()) { + return nil, false, true + } + + if resp != nil && len(resp.Headers) > 0 { + // Set this here so it will take effect regardless of any other type of + // response processing + header := w.Header() + for k, v := range resp.Headers { + for _, h := range v { + header.Add(k, h) + } + } + + switch { + case resp.Secret != nil, + resp.Auth != nil, + len(resp.Data) > 0, + resp.Redirect != "", + len(resp.Warnings) > 0, + resp.WrapInfo != nil: + // Nothing, resp has data + + default: + // We have an otherwise totally empty response except for headers, + // so nil out the response now that the headers are written out + resp = nil + } + } + + // If vault's core has already written to the response writer do not add any + // additional output. Headers have already been sent. If the response writer + // is set but has not been written to it likely means there was some kind of + // error + if r.ResponseWriter != nil && r.ResponseWriter.Written() { + return nil, true, false + } + + if respondErrorCommon(w, r, resp, err) { + return resp, false, false + } + + return resp, true, false +} + +// respondStandby is used to trigger a redirect in the case that this Vault is currently a hot standby +func respondStandby(core *vault.Core, w http.ResponseWriter, reqURL *url.URL) { + // Request the leader address + _, redirectAddr, _, err := core.Leader() + if err != nil { + if err == vault.ErrHANotEnabled { + // Standalone node, serve 503 + err = errors.New("node is not active") + respondError(w, http.StatusServiceUnavailable, err) + return + } + + respondError(w, http.StatusInternalServerError, err) + return + } + + // If there is no leader, generate a 503 error + if redirectAddr == "" { + err = errors.New("no active Vault instance found") + respondError(w, http.StatusServiceUnavailable, err) + return + } + + // Parse the redirect location + redirectURL, err := url.Parse(redirectAddr) + if err != nil { + respondError(w, http.StatusInternalServerError, err) + return + } + + // Generate a redirect URL + finalURL := url.URL{ + Scheme: redirectURL.Scheme, + Host: redirectURL.Host, + Path: reqURL.Path, + RawQuery: reqURL.RawQuery, + } + + // Ensure there is a scheme, default to https + if finalURL.Scheme == "" { + finalURL.Scheme = "https" + } + + // If we have an address, redirect! We use a 307 code + // because we don't actually know if its permanent and + // the request method should be preserved. + w.Header().Set("Location", finalURL.String()) + w.WriteHeader(307) +} + +// getTokenFromReq parse headers of the incoming request to extract token if +// present it accepts Authorization Bearer (RFC6750) and X-Vault-Token header. +// Returns true if the token was sourced from a Bearer header. +func getTokenFromReq(r *http.Request) (string, bool) { + if token := r.Header.Get(consts.AuthHeaderName); token != "" { + return token, false + } + if headers, ok := r.Header["Authorization"]; ok { + // Reference for Authorization header format: https://tools.ietf.org/html/rfc7236#section-3 + + // If string does not start by 'Bearer ', it is not one we would use, + // but might be used by plugins + for _, v := range headers { + if !strings.HasPrefix(v, "Bearer ") { + continue + } + return strings.TrimSpace(v[7:]), true + } + } + return "", false +} + +// requestAuth adds the token to the logical.Request if it exists. +func requestAuth(r *http.Request, req *logical.Request) { + // Attach the header value if we have it + token, fromAuthzHeader := getTokenFromReq(r) + if token != "" { + req.ClientToken = token + req.ClientTokenSource = logical.ClientTokenFromVaultHeader + if fromAuthzHeader { + req.ClientTokenSource = logical.ClientTokenFromAuthzHeader + } + + } +} + +func requestPolicyOverride(r *http.Request, req *logical.Request) error { + raw := r.Header.Get(PolicyOverrideHeaderName) + if raw == "" { + return nil + } + + override, err := parseutil.ParseBool(raw) + if err != nil { + return err + } + + req.PolicyOverride = override + return nil +} + +// requestWrapInfo adds the WrapInfo value to the logical.Request if wrap info exists +func requestWrapInfo(r *http.Request, req *logical.Request) (*logical.Request, error) { + // First try for the header value + wrapTTL := r.Header.Get(WrapTTLHeaderName) + if wrapTTL == "" { + return req, nil + } + + // If it has an allowed suffix parse as a duration string + dur, err := parseutil.ParseDurationSecond(wrapTTL) + if err != nil { + return req, err + } + if int64(dur) < 0 { + return req, fmt.Errorf("requested wrap ttl cannot be negative") + } + + req.WrapInfo = &logical.RequestWrapInfo{ + TTL: dur, + } + + wrapFormat := r.Header.Get(WrapFormatHeaderName) + switch wrapFormat { + case "jwt": + req.WrapInfo.Format = "jwt" + } + + return req, nil +} + +// parseMFAHeader parses the MFAHeaderName in the request headers and organizes +// them with MFA method name as the index. +func parseMFAHeader(req *logical.Request) error { + if req == nil { + return fmt.Errorf("request is nil") + } + + if req.Headers == nil { + return nil + } + + // Reset and initialize the credentials in the request + req.MFACreds = make(map[string][]string) + + for _, mfaHeaderValue := range req.Headers[canonicalMFAHeaderName] { + // Skip the header with no value in it + if mfaHeaderValue == "" { + continue + } + + // Handle the case where only method name is mentioned and no value + // is supplied + if !strings.Contains(mfaHeaderValue, ":") { + // Mark the presence of method name, but set an empty set to it + // indicating that there were no values supplied for the method + if req.MFACreds[mfaHeaderValue] == nil { + req.MFACreds[mfaHeaderValue] = []string{} + } + continue + } + + shardSplits := strings.SplitN(mfaHeaderValue, ":", 2) + if shardSplits[0] == "" { + return fmt.Errorf("invalid data in header %q; missing method name or ID", MFAHeaderName) + } + + if shardSplits[1] == "" { + return fmt.Errorf("invalid data in header %q; missing method value", MFAHeaderName) + } + + req.MFACreds[shardSplits[0]] = append(req.MFACreds[shardSplits[0]], shardSplits[1]) + } + + return nil +} + +// isForm tries to determine whether the request should be +// processed as a form or as JSON. +// +// Virtually all existing use cases have assumed processing as JSON, +// and there has not been a Content-Type requirement in the API. In order to +// maintain backwards compatibility, this will err on the side of JSON. +// The request will be considered a form only if: +// +// 1. The content type is "application/x-www-form-urlencoded" +// 2. The start of the request doesn't look like JSON. For this test we +// we expect the body to begin with { or [, ignoring leading whitespace. +func isForm(head []byte, contentType string) bool { + contentType, _, err := mime.ParseMediaType(contentType) + + if err != nil || contentType != "application/x-www-form-urlencoded" { + return false + } + + // Look for the start of JSON or not-JSON, skipping any insignificant + // whitespace (per https://tools.ietf.org/html/rfc7159#section-2). + for _, c := range head { + switch c { + case ' ', '\t', '\n', '\r': + continue + case '[', '{': // JSON + return false + default: // not JSON + return true + } + } + + return true +} + +func respondError(w http.ResponseWriter, status int, err error) { + logical.RespondError(w, status, err) +} + +func respondErrorAndData(w http.ResponseWriter, status int, data interface{}, err error) { + logical.RespondErrorAndData(w, status, data, err) +} + +func respondErrorCommon(w http.ResponseWriter, req *logical.Request, resp *logical.Response, err error) bool { + statusCode, newErr := logical.RespondErrorCommon(req, resp, err) + if newErr == nil && statusCode == 0 { + return false + } + + // If ErrPermissionDenied occurs for OIDC protected resources (e.g., userinfo), + // then respond with a JSON error format that complies with the specification. + // This prevents the JSON error format from changing to a Vault-y format (i.e., + // the format that results from respondError) after an OIDC access token expires. + if oidcPermissionDenied(req.Path, err) { + respondOIDCPermissionDenied(w) + return true + } + + if resp != nil { + if data := resp.Data["data"]; data != nil { + respondErrorAndData(w, statusCode, data, newErr) + return true + } + } + respondError(w, statusCode, newErr) + return true +} + +func respondOk(w http.ResponseWriter, body interface{}) { + w.Header().Set("Content-Type", "application/json") + + if body == nil { + w.WriteHeader(http.StatusNoContent) + } else { + w.WriteHeader(http.StatusOK) + enc := json.NewEncoder(w) + enc.Encode(body) + } +} + +// oidcPermissionDenied returns true if the given path matches the +// UserInfo Endpoint published by Vault OIDC providers and the given +// error is a logical.ErrPermissionDenied. +func oidcPermissionDenied(path string, err error) bool { + return errwrap.Contains(err, logical.ErrPermissionDenied.Error()) && + oidcProtectedPathRegex.MatchString(path) +} + +// respondOIDCPermissionDenied writes a response to the given w for +// permission denied errors (expired token) on resources protected +// by OIDC access tokens. Currently, the UserInfo Endpoint is the only +// protected resource. See the following specifications for details: +// - https://openid.net/specs/openid-connect-core-1_0.html#UserInfoError +// - https://datatracker.ietf.org/doc/html/rfc6750#section-3.1 +func respondOIDCPermissionDenied(w http.ResponseWriter) { + errorCode := "invalid_token" + errorDescription := logical.ErrPermissionDenied.Error() + + w.Header().Set("Content-Type", "application/json") + w.Header().Set("WWW-Authenticate", fmt.Sprintf("Bearer error=%q,error_description=%q", + errorCode, errorDescription)) + w.WriteHeader(http.StatusUnauthorized) + + var oidcResponse struct { + Error string `json:"error"` + ErrorDescription string `json:"error_description"` + } + oidcResponse.Error = errorCode + oidcResponse.ErrorDescription = errorDescription + + enc := json.NewEncoder(w) + enc.Encode(oidcResponse) +} diff --git a/http/handler_test.go b/http/handler_test.go new file mode 100644 index 0000000..0a15b22 --- /dev/null +++ b/http/handler_test.go @@ -0,0 +1,949 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "errors" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/textproto" + "net/url" + "reflect" + "runtime" + "strings" + "testing" + + "github.com/go-test/deep" + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/helper/versions" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" + "github.com/stretchr/testify/require" +) + +func TestHandler_parseMFAHandler(t *testing.T) { + var err error + var expectedMFACreds logical.MFACreds + req := &logical.Request{ + Headers: make(map[string][]string), + } + + headerName := textproto.CanonicalMIMEHeaderKey(MFAHeaderName) + + // Set TOTP passcode in the MFA header + req.Headers[headerName] = []string{ + "my_totp:123456", + "my_totp:111111", + "my_second_mfa:hi=hello", + "my_third_mfa", + } + err = parseMFAHeader(req) + if err != nil { + t.Fatal(err) + } + + // Verify that it is being parsed properly + expectedMFACreds = logical.MFACreds{ + "my_totp": []string{ + "123456", + "111111", + }, + "my_second_mfa": []string{ + "hi=hello", + }, + "my_third_mfa": []string{}, + } + if !reflect.DeepEqual(expectedMFACreds, req.MFACreds) { + t.Fatalf("bad: parsed MFACreds; expected: %#v\n actual: %#v\n", expectedMFACreds, req.MFACreds) + } + + // Split the creds of a method type in different headers and check if they + // all get merged together + req.Headers[headerName] = []string{ + "my_mfa:passcode=123456", + "my_mfa:month=july", + "my_mfa:day=tuesday", + } + err = parseMFAHeader(req) + if err != nil { + t.Fatal(err) + } + + expectedMFACreds = logical.MFACreds{ + "my_mfa": []string{ + "passcode=123456", + "month=july", + "day=tuesday", + }, + } + if !reflect.DeepEqual(expectedMFACreds, req.MFACreds) { + t.Fatalf("bad: parsed MFACreds; expected: %#v\n actual: %#v\n", expectedMFACreds, req.MFACreds) + } + + // Header without method name should error out + req.Headers[headerName] = []string{ + ":passcode=123456", + } + err = parseMFAHeader(req) + if err == nil { + t.Fatalf("expected an error; actual: %#v\n", req.MFACreds) + } + + // Header without method name and method value should error out + req.Headers[headerName] = []string{ + ":", + } + err = parseMFAHeader(req) + if err == nil { + t.Fatalf("expected an error; actual: %#v\n", req.MFACreds) + } + + // Header without method name and method value should error out + req.Headers[headerName] = []string{ + "my_totp:", + } + err = parseMFAHeader(req) + if err == nil { + t.Fatalf("expected an error; actual: %#v\n", req.MFACreds) + } +} + +func TestHandler_cors(t *testing.T) { + core, _, _ := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + + // Enable CORS and allow from any origin for testing. + corsConfig := core.CORSConfig() + err := corsConfig.Enable(context.Background(), []string{addr}, nil) + if err != nil { + t.Fatalf("Error enabling CORS: %s", err) + } + + req, err := http.NewRequest(http.MethodOptions, addr+"/v1/sys/seal-status", nil) + if err != nil { + t.Fatalf("err: %s", err) + } + req.Header.Set("Origin", "BAD ORIGIN") + + // Requests from unacceptable origins will be rejected with a 403. + client := cleanhttp.DefaultClient() + resp, err := client.Do(req) + if err != nil { + t.Fatalf("err: %s", err) + } + + if resp.StatusCode != http.StatusForbidden { + t.Fatalf("Bad status:\nexpected: 403 Forbidden\nactual: %s", resp.Status) + } + + // + // Test preflight requests + // + + // Set a valid origin + req.Header.Set("Origin", addr) + + // Server should NOT accept arbitrary methods. + req.Header.Set("Access-Control-Request-Method", "FOO") + + client = cleanhttp.DefaultClient() + resp, err = client.Do(req) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Fail if an arbitrary method is accepted. + if resp.StatusCode != http.StatusMethodNotAllowed { + t.Fatalf("Bad status:\nexpected: 405 Method Not Allowed\nactual: %s", resp.Status) + } + + // Server SHOULD accept acceptable methods. + req.Header.Set("Access-Control-Request-Method", http.MethodPost) + + client = cleanhttp.DefaultClient() + resp, err = client.Do(req) + if err != nil { + t.Fatalf("err: %s", err) + } + + // + // Test that the CORS headers are applied correctly. + // + expHeaders := map[string]string{ + "Access-Control-Allow-Origin": addr, + "Access-Control-Allow-Headers": strings.Join(vault.StdAllowedHeaders, ","), + "Access-Control-Max-Age": "300", + "Vary": "Origin", + } + + for expHeader, expected := range expHeaders { + actual := resp.Header.Get(expHeader) + if actual == "" { + t.Fatalf("bad:\nHeader: %#v was not on response.", expHeader) + } + + if actual != expected { + t.Fatalf("bad:\nExpected: %#v\nActual: %#v\n", expected, actual) + } + } +} + +func TestHandler_HostnameHeader(t *testing.T) { + t.Parallel() + testCases := []struct { + description string + config *vault.CoreConfig + headerPresent bool + }{ + { + description: "with no header configured", + config: nil, + headerPresent: false, + }, + { + description: "with header configured", + config: &vault.CoreConfig{ + EnableResponseHeaderHostname: true, + }, + headerPresent: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.description, func(t *testing.T) { + var core *vault.Core + + if tc.config == nil { + core, _, _ = vault.TestCoreUnsealed(t) + } else { + core, _, _ = vault.TestCoreUnsealedWithConfig(t, tc.config) + } + + ln, addr := TestServer(t, core) + defer ln.Close() + + req, err := http.NewRequest("GET", addr+"/v1/sys/seal-status", nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + client := cleanhttp.DefaultClient() + resp, err := client.Do(req) + if err != nil { + t.Fatalf("err: %s", err) + } + + if resp == nil { + t.Fatal("nil response") + } + + hnHeader := resp.Header.Get("X-Vault-Hostname") + if tc.headerPresent && hnHeader == "" { + t.Logf("header configured = %t", core.HostnameHeaderEnabled()) + t.Fatal("missing 'X-Vault-Hostname' header entry in response") + } + if !tc.headerPresent && hnHeader != "" { + t.Fatal("didn't expect 'X-Vault-Hostname' header but it was present anyway") + } + + rniHeader := resp.Header.Get("X-Vault-Raft-Node-ID") + if rniHeader != "" { + t.Fatalf("no raft node ID header was expected, since we're not running a raft cluster. instead, got %s", rniHeader) + } + }) + } +} + +func TestHandler_CacheControlNoStore(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + + req, err := http.NewRequest("GET", addr+"/v1/sys/mounts", nil) + if err != nil { + t.Fatalf("err: %s", err) + } + req.Header.Set(consts.AuthHeaderName, token) + req.Header.Set(WrapTTLHeaderName, "60s") + + client := cleanhttp.DefaultClient() + resp, err := client.Do(req) + if err != nil { + t.Fatalf("err: %s", err) + } + + if resp == nil { + t.Fatalf("nil response") + } + + actual := resp.Header.Get("Cache-Control") + + if actual == "" { + t.Fatalf("missing 'Cache-Control' header entry in response writer") + } + + if actual != "no-store" { + t.Fatalf("bad: Cache-Control. Expected: 'no-store', Actual: %q", actual) + } +} + +func TestHandler_InFlightRequest(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + req, err := http.NewRequest("GET", addr+"/v1/sys/in-flight-req", nil) + if err != nil { + t.Fatalf("err: %s", err) + } + req.Header.Set(consts.AuthHeaderName, token) + + client := cleanhttp.DefaultClient() + resp, err := client.Do(req) + if err != nil { + t.Fatalf("err: %s", err) + } + + if resp == nil { + t.Fatalf("nil response") + } + + var actual map[string]interface{} + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + if actual == nil || len(actual) == 0 { + t.Fatal("expected to get at least one in-flight request, got nil or zero length map") + } + for _, v := range actual { + reqInfo, ok := v.(map[string]interface{}) + if !ok { + t.Fatal("failed to read in-flight request") + } + if reqInfo["request_path"] != "/v1/sys/in-flight-req" { + t.Fatalf("expected /v1/sys/in-flight-req in-flight request path, got %s", actual["request_path"]) + } + } +} + +// TestHandler_MissingToken tests the response / error code if a request comes +// in with a missing client token. See +// https://github.com/hashicorp/vault/issues/8377 +func TestHandler_MissingToken(t *testing.T) { + // core, _, token := vault.TestCoreUnsealed(t) + core, _, _ := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + + req, err := http.NewRequest("GET", addr+"/v1/sys/internal/ui/mounts/cubbyhole", nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + req.Header.Set(WrapTTLHeaderName, "60s") + + client := cleanhttp.DefaultClient() + resp, err := client.Do(req) + if err != nil { + t.Fatal(err) + } + if resp.StatusCode != 403 { + t.Fatalf("expected code 403, got: %d", resp.StatusCode) + } +} + +func TestHandler_Accepted(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + + req, err := http.NewRequest("POST", addr+"/v1/auth/token/tidy", nil) + if err != nil { + t.Fatalf("err: %s", err) + } + req.Header.Set(consts.AuthHeaderName, token) + + client := cleanhttp.DefaultClient() + resp, err := client.Do(req) + if err != nil { + t.Fatalf("err: %s", err) + } + + testResponseStatus(t, resp, 202) +} + +// We use this test to verify header auth +func TestSysMounts_headerAuth(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + + req, err := http.NewRequest("GET", addr+"/v1/sys/mounts", nil) + if err != nil { + t.Fatalf("err: %s", err) + } + req.Header.Set(consts.AuthHeaderName, token) + + client := cleanhttp.DefaultClient() + resp, err := client.Do(req) + if err != nil { + t.Fatalf("err: %s", err) + } + + var actual map[string]interface{} + expected := map[string]interface{}{ + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "data": map[string]interface{}{ + "secret/": map[string]interface{}{ + "description": "key/value secret storage", + "type": "kv", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + }, + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{"version": "1"}, + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + }, + "sys/": map[string]interface{}{ + "description": "system endpoints used for control, policy and debugging", + "type": "system", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + "passthrough_request_headers": []interface{}{"Accept"}, + }, + "local": false, + "seal_wrap": true, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.DefaultBuiltinVersion, + }, + "cubbyhole/": map[string]interface{}{ + "description": "per-token private secret storage", + "type": "cubbyhole", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + }, + "local": true, + "seal_wrap": false, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "cubbyhole"), + }, + "identity/": map[string]interface{}{ + "description": "identity store", + "type": "identity", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + "passthrough_request_headers": []interface{}{"Authorization"}, + }, + "local": false, + "seal_wrap": false, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "identity"), + }, + }, + "secret/": map[string]interface{}{ + "description": "key/value secret storage", + "type": "kv", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + }, + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{"version": "1"}, + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + }, + "sys/": map[string]interface{}{ + "description": "system endpoints used for control, policy and debugging", + "type": "system", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + "passthrough_request_headers": []interface{}{"Accept"}, + }, + "local": false, + "seal_wrap": true, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.DefaultBuiltinVersion, + }, + "cubbyhole/": map[string]interface{}{ + "description": "per-token private secret storage", + "type": "cubbyhole", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + }, + "local": true, + "seal_wrap": false, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "cubbyhole"), + }, + "identity/": map[string]interface{}{ + "description": "identity store", + "type": "identity", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + "passthrough_request_headers": []interface{}{"Authorization"}, + }, + "local": false, + "seal_wrap": false, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "identity"), + }, + } + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + + expected["request_id"] = actual["request_id"] + for k, v := range actual["data"].(map[string]interface{}) { + if v.(map[string]interface{})["accessor"] == "" { + t.Fatalf("no accessor from %s", k) + } + if v.(map[string]interface{})["uuid"] == "" { + t.Fatalf("no uuid from %s", k) + } + + expected[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + expected[k].(map[string]interface{})["uuid"] = v.(map[string]interface{})["uuid"] + expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + expected["data"].(map[string]interface{})[k].(map[string]interface{})["uuid"] = v.(map[string]interface{})["uuid"] + } + + if diff := deep.Equal(actual, expected); len(diff) > 0 { + t.Fatalf("bad, diff: %#v", diff) + } +} + +// We use this test to verify header auth wrapping +func TestSysMounts_headerAuth_Wrapped(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + + req, err := http.NewRequest("GET", addr+"/v1/sys/mounts", nil) + if err != nil { + t.Fatalf("err: %s", err) + } + req.Header.Set(consts.AuthHeaderName, token) + req.Header.Set(WrapTTLHeaderName, "60s") + + client := cleanhttp.DefaultClient() + resp, err := client.Do(req) + if err != nil { + t.Fatalf("err: %s", err) + } + + var actual map[string]interface{} + expected := map[string]interface{}{ + "request_id": "", + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "data": nil, + "wrap_info": map[string]interface{}{ + "ttl": json.Number("60"), + }, + "warnings": nil, + "auth": nil, + } + + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + + actualToken, ok := actual["wrap_info"].(map[string]interface{})["token"] + if !ok || actualToken == "" { + t.Fatal("token missing in wrap info") + } + expected["wrap_info"].(map[string]interface{})["token"] = actualToken + + actualCreationTime, ok := actual["wrap_info"].(map[string]interface{})["creation_time"] + if !ok || actualCreationTime == "" { + t.Fatal("creation_time missing in wrap info") + } + expected["wrap_info"].(map[string]interface{})["creation_time"] = actualCreationTime + + actualCreationPath, ok := actual["wrap_info"].(map[string]interface{})["creation_path"] + if !ok || actualCreationPath == "" { + t.Fatal("creation_path missing in wrap info") + } + expected["wrap_info"].(map[string]interface{})["creation_path"] = actualCreationPath + + actualAccessor, ok := actual["wrap_info"].(map[string]interface{})["accessor"] + if !ok || actualAccessor == "" { + t.Fatal("accessor missing in wrap info") + } + expected["wrap_info"].(map[string]interface{})["accessor"] = actualAccessor + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad:\nExpected: %#v\nActual: %#v\n%T %T", expected, actual, actual["warnings"], actual["data"]) + } +} + +func TestHandler_sealed(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + + core.Seal(token) + + resp, err := http.Get(addr + "/v1/secret/foo") + if err != nil { + t.Fatalf("err: %s", err) + } + testResponseStatus(t, resp, 503) +} + +func TestHandler_ui_default(t *testing.T) { + core := vault.TestCoreUI(t, false) + ln, addr := TestServer(t, core) + defer ln.Close() + + resp, err := http.Get(addr + "/ui/") + if err != nil { + t.Fatalf("err: %s", err) + } + testResponseStatus(t, resp, 404) +} + +func TestHandler_ui_enabled(t *testing.T) { + core := vault.TestCoreUI(t, true) + ln, addr := TestServer(t, core) + defer ln.Close() + + resp, err := http.Get(addr + "/ui/") + if err != nil { + t.Fatalf("err: %s", err) + } + testResponseStatus(t, resp, 200) +} + +func TestHandler_error(t *testing.T) { + w := httptest.NewRecorder() + + respondError(w, 500, errors.New("test Error")) + + if w.Code != 500 { + t.Fatalf("expected 500, got %d", w.Code) + } + + // The code inside of the error should override + // the argument to respondError + w2 := httptest.NewRecorder() + e := logical.CodedError(403, "error text") + + respondError(w2, 500, e) + + if w2.Code != 403 { + t.Fatalf("expected 403, got %d", w2.Code) + } + + // vault.ErrSealed is a special case + w3 := httptest.NewRecorder() + + respondError(w3, 400, consts.ErrSealed) + + if w3.Code != 503 { + t.Fatalf("expected 503, got %d", w3.Code) + } +} + +func TestHandler_requestAuth(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + + rootCtx := namespace.RootContext(nil) + te, err := core.LookupToken(rootCtx, token) + if err != nil { + t.Fatalf("err: %s", err) + } + + rWithAuthorization, err := http.NewRequest("GET", "v1/test/path", nil) + if err != nil { + t.Fatalf("err: %s", err) + } + rWithAuthorization.Header.Set("Authorization", "Bearer "+token) + + rWithVault, err := http.NewRequest("GET", "v1/test/path", nil) + if err != nil { + t.Fatalf("err: %s", err) + } + rWithVault.Header.Set(consts.AuthHeaderName, token) + + for _, r := range []*http.Request{rWithVault, rWithAuthorization} { + req := logical.TestRequest(t, logical.ReadOperation, "test/path") + r = r.WithContext(rootCtx) + requestAuth(r, req) + err = core.PopulateTokenEntry(rootCtx, req) + if err != nil { + t.Fatalf("err: %s", err) + } + + if req.ClientToken != token { + t.Fatalf("client token should be filled with %s, got %s", token, req.ClientToken) + } + if req.TokenEntry() == nil { + t.Fatal("token entry should not be nil") + } + if !reflect.DeepEqual(req.TokenEntry(), te) { + t.Fatalf("token entry should be the same as the core") + } + if req.ClientTokenAccessor == "" { + t.Fatal("token accessor should not be empty") + } + } + + rNothing, err := http.NewRequest("GET", "v1/test/path", nil) + if err != nil { + t.Fatalf("err: %s", err) + } + req := logical.TestRequest(t, logical.ReadOperation, "test/path") + + requestAuth(rNothing, req) + err = core.PopulateTokenEntry(rootCtx, req) + if err != nil { + t.Fatalf("expected no error, got %s", err) + } + if req.ClientToken != "" { + t.Fatalf("client token should not be filled, got %s", req.ClientToken) + } +} + +func TestHandler_getTokenFromReq(t *testing.T) { + r := http.Request{Header: http.Header{}} + + tok, _ := getTokenFromReq(&r) + if tok != "" { + t.Fatalf("expected '' as result, got '%s'", tok) + } + + r.Header.Set("Authorization", "Bearer TOKEN NOT_GOOD_TOKEN") + token, fromHeader := getTokenFromReq(&r) + if !fromHeader { + t.Fatal("expected from header") + } else if token != "TOKEN NOT_GOOD_TOKEN" { + t.Fatal("did not get expected token value") + } else if r.Header.Get("Authorization") == "" { + t.Fatal("expected value to be passed through") + } + + r.Header.Set(consts.AuthHeaderName, "NEWTOKEN") + tok, _ = getTokenFromReq(&r) + if tok == "TOKEN" { + t.Fatalf("%s header should be prioritized", consts.AuthHeaderName) + } else if tok != "NEWTOKEN" { + t.Fatalf("expected 'NEWTOKEN' as result, got '%s'", tok) + } + + r.Header = http.Header{} + r.Header.Set("Authorization", "Basic TOKEN") + tok, fromHeader = getTokenFromReq(&r) + if tok != "" { + t.Fatalf("expected '' as result, got '%s'", tok) + } else if fromHeader { + t.Fatal("expected not from header") + } +} + +func TestHandler_nonPrintableChars(t *testing.T) { + testNonPrintable(t, false) + testNonPrintable(t, true) +} + +func testNonPrintable(t *testing.T, disable bool) { + core, _, token := vault.TestCoreUnsealedWithConfig(t, &vault.CoreConfig{ + DisableKeyEncodingChecks: disable, + }) + ln, addr := TestListener(t) + props := &vault.HandlerProperties{ + Core: core, + DisablePrintableCheck: disable, + } + TestServerWithListenerAndProperties(t, ln, addr, core, props) + defer ln.Close() + + req, err := http.NewRequest("PUT", addr+"/v1/cubbyhole/foo\u2028bar", strings.NewReader(`{"zip": "zap"}`)) + if err != nil { + t.Fatalf("err: %s", err) + } + req.Header.Set(consts.AuthHeaderName, token) + + client := cleanhttp.DefaultClient() + resp, err := client.Do(req) + if err != nil { + t.Fatalf("err: %s", err) + } + + if disable { + testResponseStatus(t, resp, 204) + } else { + testResponseStatus(t, resp, 400) + } +} + +func TestHandler_Parse_Form(t *testing.T) { + cluster := vault.NewTestCluster(t, &vault.CoreConfig{}, &vault.TestClusterOptions{ + HandlerFunc: Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + cores := cluster.Cores + + core := cores[0].Core + vault.TestWaitActive(t, core) + + c := cleanhttp.DefaultClient() + c.Transport = &http.Transport{ + TLSClientConfig: &tls.Config{ + RootCAs: cluster.RootCAs, + }, + } + + values := url.Values{ + "zip": []string{"zap"}, + "abc": []string{"xyz"}, + "multi": []string{"first", "second"}, + "empty": []string{}, + } + req, err := http.NewRequest("POST", cores[0].Client.Address()+"/v1/secret/foo", nil) + if err != nil { + t.Fatal(err) + } + req.Body = ioutil.NopCloser(strings.NewReader(values.Encode())) + req.Header.Set("x-vault-token", cluster.RootToken) + req.Header.Set("content-type", "application/x-www-form-urlencoded") + resp, err := c.Do(req) + if err != nil { + t.Fatal(err) + } + + if resp.StatusCode != 204 { + t.Fatalf("bad response: %#v\nrequest was: %#v\nurl was: %#v", *resp, *req, req.URL) + } + + client := cores[0].Client + client.SetToken(cluster.RootToken) + + apiResp, err := client.Logical().Read("secret/foo") + if err != nil { + t.Fatal(err) + } + if apiResp == nil { + t.Fatal("api resp is nil") + } + expected := map[string]interface{}{ + "zip": "zap", + "abc": "xyz", + "multi": "first,second", + } + if diff := deep.Equal(expected, apiResp.Data); diff != nil { + t.Fatal(diff) + } +} + +// TestHandler_MaxRequestSize verifies that a request larger than the +// MaxRequestSize fails +func TestHandler_MaxRequestSize(t *testing.T) { + t.Parallel() + cluster := vault.NewTestCluster(t, &vault.CoreConfig{}, &vault.TestClusterOptions{ + DefaultHandlerProperties: vault.HandlerProperties{ + ListenerConfig: &configutil.Listener{ + MaxRequestSize: 1024, + }, + }, + HandlerFunc: Handler, + NumCores: 1, + }) + cluster.Start() + defer cluster.Cleanup() + + client := cluster.Cores[0].Client + _, err := client.KVv2("secret").Put(context.Background(), "foo", map[string]interface{}{ + "bar": strings.Repeat("a", 1025), + }) + + require.ErrorContains(t, err, "error parsing JSON") +} + +// TestHandler_MaxRequestSize_Memory sets the max request size to 1024 bytes, +// and creates a 1MB request. The test verifies that less than 1MB of memory is +// allocated when the request is sent. This test shouldn't be run in parallel, +// because it modifies GOMAXPROCS +func TestHandler_MaxRequestSize_Memory(t *testing.T) { + ln, addr := TestListener(t) + core, _, token := vault.TestCoreUnsealed(t) + TestServerWithListenerAndProperties(t, ln, addr, core, &vault.HandlerProperties{ + Core: core, + ListenerConfig: &configutil.Listener{ + Address: addr, + MaxRequestSize: 1024, + }, + }) + defer ln.Close() + + data := bytes.Repeat([]byte{0x1}, 1024*1024) + + req, err := http.NewRequest("POST", addr+"/v1/sys/unseal", bytes.NewReader(data)) + require.NoError(t, err) + req.Header.Set(consts.AuthHeaderName, token) + + client := cleanhttp.DefaultClient() + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1)) + var start, end runtime.MemStats + runtime.GC() + runtime.ReadMemStats(&start) + client.Do(req) + runtime.ReadMemStats(&end) + require.Less(t, end.TotalAlloc-start.TotalAlloc, uint64(1024*1024)) +} diff --git a/http/help.go b/http/help.go new file mode 100644 index 0000000..64085f1 --- /dev/null +++ b/http/help.go @@ -0,0 +1,59 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "errors" + "net/http" + "strings" + + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +func wrapHelpHandler(h http.Handler, core *vault.Core) http.Handler { + return http.HandlerFunc(func(writer http.ResponseWriter, req *http.Request) { + // If the help parameter is not blank, then show the help. We request + // forward because standby nodes do not have mounts and other state. + if v := req.URL.Query().Get("help"); v != "" || req.Method == "HELP" { + handleRequestForwarding(core, + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + handleHelp(core, w, r) + })).ServeHTTP(writer, req) + return + } + + h.ServeHTTP(writer, req) + return + }) +} + +func handleHelp(core *vault.Core, w http.ResponseWriter, r *http.Request) { + ns, err := namespace.FromContext(r.Context()) + if err != nil { + respondError(w, http.StatusBadRequest, nil) + return + } + if !strings.HasPrefix(r.URL.Path, "/v1/") { + respondError(w, http.StatusNotFound, errors.New("Missing /v1/ prefix in path. Use vault path-help command to retrieve API help for paths")) + return + } + path := ns.TrimmedPath(r.URL.Path[len("/v1/"):]) + + req := &logical.Request{ + Operation: logical.HelpOperation, + Path: path, + Connection: getConnection(r), + } + requestAuth(r, req) + + resp, err := core.HandleRequest(r.Context(), req) + if err != nil { + respondErrorCommon(w, req, resp, err) + return + } + + respondOk(w, resp.Data) +} diff --git a/http/help_test.go b/http/help_test.go new file mode 100644 index 0000000..d02c26a --- /dev/null +++ b/http/help_test.go @@ -0,0 +1,36 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "net/http" + "testing" + + "github.com/hashicorp/vault/vault" +) + +func TestHelp(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + // request without /v1/ prefix + resp := testHttpGet(t, token, addr+"/?help=1") + testResponseStatus(t, resp, 404) + + resp = testHttpGet(t, "", addr+"/v1/sys/mounts?help=1") + if resp.StatusCode != http.StatusForbidden { + t.Fatal("expected permission denied with no token") + } + + resp = testHttpGet(t, token, addr+"/v1/sys/mounts?help=1") + + var actual map[string]interface{} + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + if _, ok := actual["help"]; !ok { + t.Fatalf("bad: %#v", actual) + } +} diff --git a/http/http_test.go b/http/http_test.go new file mode 100644 index 0000000..5e51ce7 --- /dev/null +++ b/http/http_test.go @@ -0,0 +1,147 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "regexp" + "strings" + "testing" + "time" + + cleanhttp "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/jsonutil" +) + +func testHttpGet(t *testing.T, token string, addr string) *http.Response { + loggedToken := token + if len(token) == 0 { + loggedToken = "" + } + t.Logf("Token is %s", loggedToken) + return testHttpData(t, "GET", token, addr, nil, false, 0) +} + +func testHttpDelete(t *testing.T, token string, addr string) *http.Response { + return testHttpData(t, "DELETE", token, addr, nil, false, 0) +} + +// Go 1.8+ clients redirect automatically which breaks our 307 standby testing +func testHttpDeleteDisableRedirect(t *testing.T, token string, addr string) *http.Response { + return testHttpData(t, "DELETE", token, addr, nil, true, 0) +} + +func testHttpPostWrapped(t *testing.T, token string, addr string, body interface{}, wrapTTL time.Duration) *http.Response { + return testHttpData(t, "POST", token, addr, body, false, wrapTTL) +} + +func testHttpPost(t *testing.T, token string, addr string, body interface{}) *http.Response { + return testHttpData(t, "POST", token, addr, body, false, 0) +} + +func testHttpPut(t *testing.T, token string, addr string, body interface{}) *http.Response { + return testHttpData(t, "PUT", token, addr, body, false, 0) +} + +// Go 1.8+ clients redirect automatically which breaks our 307 standby testing +func testHttpPutDisableRedirect(t *testing.T, token string, addr string, body interface{}) *http.Response { + return testHttpData(t, "PUT", token, addr, body, true, 0) +} + +func testHttpData(t *testing.T, method string, token string, addr string, body interface{}, disableRedirect bool, wrapTTL time.Duration) *http.Response { + bodyReader := new(bytes.Buffer) + if body != nil { + enc := json.NewEncoder(bodyReader) + if err := enc.Encode(body); err != nil { + t.Fatalf("err:%s", err) + } + } + + req, err := http.NewRequest(method, addr, bodyReader) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Get the address of the local listener in order to attach it to an Origin header. + // This will allow for the testing of requests that require CORS, without using a browser. + hostURLRegexp, _ := regexp.Compile("http[s]?://.+:[0-9]+") + req.Header.Set("Origin", hostURLRegexp.FindString(addr)) + + req.Header.Set("Content-Type", "application/json") + + if wrapTTL > 0 { + req.Header.Set("X-Vault-Wrap-TTL", wrapTTL.String()) + } + + if len(token) != 0 { + req.Header.Set(consts.AuthHeaderName, token) + } + + client := cleanhttp.DefaultClient() + client.Timeout = 60 * time.Second + + // From https://github.com/michiwend/gomusicbrainz/pull/4/files + defaultRedirectLimit := 30 + + client.CheckRedirect = func(req *http.Request, via []*http.Request) error { + if disableRedirect { + return fmt.Errorf("checkRedirect disabled for test") + } + if len(via) > defaultRedirectLimit { + return fmt.Errorf("%d consecutive requests(redirects)", len(via)) + } + if len(via) == 0 { + // No redirects + return nil + } + // mutate the subsequent redirect requests with the first Header + if token := via[0].Header.Get(consts.AuthHeaderName); len(token) != 0 { + req.Header.Set(consts.AuthHeaderName, token) + } + return nil + } + + resp, err := client.Do(req) + if err != nil && !strings.Contains(err.Error(), "checkRedirect disabled for test") { + t.Fatalf("err: %s", err) + } + + return resp +} + +func testResponseStatus(t *testing.T, resp *http.Response, code int) { + t.Helper() + if resp.StatusCode != code { + body := new(bytes.Buffer) + io.Copy(body, resp.Body) + resp.Body.Close() + + t.Fatalf( + "Expected status %d, got %d. Body:\n\n%s", + code, resp.StatusCode, body.String()) + } +} + +func testResponseHeader(t *testing.T, resp *http.Response, expectedHeaders map[string]string) { + t.Helper() + for k, v := range expectedHeaders { + hv := resp.Header.Get(k) + if v != hv { + t.Fatalf("expected header value %v=%v, got %v=%v", k, v, k, hv) + } + } +} + +func testResponseBody(t *testing.T, resp *http.Response, out interface{}) { + defer resp.Body.Close() + + if err := jsonutil.DecodeJSONFromReader(resp.Body, out); err != nil { + t.Fatalf("err: %s", err) + } +} diff --git a/http/logical.go b/http/logical.go new file mode 100644 index 0000000..8a681d9 --- /dev/null +++ b/http/logical.go @@ -0,0 +1,588 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "bufio" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "mime" + "net" + "net/http" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/helper/experiments" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" + "go.uber.org/atomic" +) + +// bufferedReader can be used to replace a request body with a buffered +// version. The Close method invokes the original Closer. +type bufferedReader struct { + *bufio.Reader + rOrig io.ReadCloser +} + +func newBufferedReader(r io.ReadCloser) *bufferedReader { + return &bufferedReader{ + Reader: bufio.NewReader(r), + rOrig: r, + } +} + +func (b *bufferedReader) Close() error { + return b.rOrig.Close() +} + +const MergePatchContentTypeHeader = "application/merge-patch+json" + +func buildLogicalRequestNoAuth(perfStandby bool, w http.ResponseWriter, r *http.Request) (*logical.Request, io.ReadCloser, int, error) { + ns, err := namespace.FromContext(r.Context()) + if err != nil { + return nil, nil, http.StatusBadRequest, nil + } + path := ns.TrimmedPath(r.URL.Path[len("/v1/"):]) + + var data map[string]interface{} + var origBody io.ReadCloser + var passHTTPReq bool + var responseWriter http.ResponseWriter + + // Determine the operation + var op logical.Operation + switch r.Method { + case "DELETE": + op = logical.DeleteOperation + data = parseQuery(r.URL.Query()) + case "GET": + op = logical.ReadOperation + queryVals := r.URL.Query() + var list bool + var err error + listStr := queryVals.Get("list") + if listStr != "" { + list, err = strconv.ParseBool(listStr) + if err != nil { + return nil, nil, http.StatusBadRequest, nil + } + if list { + queryVals.Del("list") + op = logical.ListOperation + if !strings.HasSuffix(path, "/") { + path += "/" + } + } + } + + data = parseQuery(queryVals) + + switch { + case strings.HasPrefix(path, "sys/pprof/"): + passHTTPReq = true + responseWriter = w + case path == "sys/storage/raft/snapshot": + responseWriter = w + case path == "sys/internal/counters/activity/export": + responseWriter = w + case path == "sys/monitor": + passHTTPReq = true + responseWriter = w + } + + case "POST", "PUT": + op = logical.UpdateOperation + + // Buffer the request body in order to allow us to peek at the beginning + // without consuming it. This approach involves no copying. + bufferedBody := newBufferedReader(r.Body) + r.Body = bufferedBody + + // If we are uploading a snapshot or receiving an ocsp-request (which + // is der encoded) we don't want to parse it. Instead, we will simply + // add the HTTP request to the logical request object for later consumption. + contentType := r.Header.Get("Content-Type") + if path == "sys/storage/raft/snapshot" || path == "sys/storage/raft/snapshot-force" || isOcspRequest(contentType) { + passHTTPReq = true + origBody = r.Body + } else { + // Sample the first bytes to determine whether this should be parsed as + // a form or as JSON. The amount to look ahead (512 bytes) is arbitrary + // but extremely tolerant (i.e. allowing 511 bytes of leading whitespace + // and an incorrect content-type). + head, err := bufferedBody.Peek(512) + if err != nil && err != bufio.ErrBufferFull && err != io.EOF { + status := http.StatusBadRequest + logical.AdjustErrorStatusCode(&status, err) + return nil, nil, status, fmt.Errorf("error reading data") + } + + if isForm(head, contentType) { + formData, err := parseFormRequest(r) + if err != nil { + status := http.StatusBadRequest + logical.AdjustErrorStatusCode(&status, err) + return nil, nil, status, fmt.Errorf("error parsing form data") + } + + data = formData + } else { + origBody, err = parseJSONRequest(perfStandby, r, w, &data) + if err == io.EOF { + data = nil + err = nil + } + if err != nil { + status := http.StatusBadRequest + logical.AdjustErrorStatusCode(&status, err) + return nil, nil, status, fmt.Errorf("error parsing JSON") + } + } + } + + case "PATCH": + op = logical.PatchOperation + + contentTypeHeader := r.Header.Get("Content-Type") + contentType, _, err := mime.ParseMediaType(contentTypeHeader) + if err != nil { + status := http.StatusBadRequest + logical.AdjustErrorStatusCode(&status, err) + return nil, nil, status, err + } + + if contentType != MergePatchContentTypeHeader { + return nil, nil, http.StatusUnsupportedMediaType, fmt.Errorf("PATCH requires Content-Type of %s, provided %s", MergePatchContentTypeHeader, contentType) + } + + origBody, err = parseJSONRequest(perfStandby, r, w, &data) + + if err == io.EOF { + data = nil + err = nil + } + + if err != nil { + status := http.StatusBadRequest + logical.AdjustErrorStatusCode(&status, err) + return nil, nil, status, fmt.Errorf("error parsing JSON") + } + + case "LIST": + op = logical.ListOperation + if !strings.HasSuffix(path, "/") { + path += "/" + } + + data = parseQuery(r.URL.Query()) + case "HEAD": + op = logical.HeaderOperation + data = parseQuery(r.URL.Query()) + case "OPTIONS": + default: + return nil, nil, http.StatusMethodNotAllowed, nil + } + + requestId, err := uuid.GenerateUUID() + if err != nil { + return nil, nil, http.StatusInternalServerError, fmt.Errorf("failed to generate identifier for the request: %w", err) + } + + req := &logical.Request{ + ID: requestId, + Operation: op, + Path: path, + Data: data, + Connection: getConnection(r), + Headers: r.Header, + } + + if passHTTPReq { + req.HTTPRequest = r + } + if responseWriter != nil { + req.ResponseWriter = logical.NewHTTPResponseWriter(responseWriter) + } + + return req, origBody, 0, nil +} + +func isOcspRequest(contentType string) bool { + contentType, _, err := mime.ParseMediaType(contentType) + if err != nil { + return false + } + + return contentType == "application/ocsp-request" +} + +func buildLogicalPath(r *http.Request) (string, int, error) { + ns, err := namespace.FromContext(r.Context()) + if err != nil { + return "", http.StatusBadRequest, nil + } + + path := ns.TrimmedPath(strings.TrimPrefix(r.URL.Path, "/v1/")) + + switch r.Method { + case "GET": + var ( + list bool + err error + ) + + queryVals := r.URL.Query() + + listStr := queryVals.Get("list") + if listStr != "" { + list, err = strconv.ParseBool(listStr) + if err != nil { + return "", http.StatusBadRequest, nil + } + if list { + if !strings.HasSuffix(path, "/") { + path += "/" + } + } + } + + case "LIST": + if !strings.HasSuffix(path, "/") { + path += "/" + } + } + + return path, 0, nil +} + +func buildLogicalRequest(core *vault.Core, w http.ResponseWriter, r *http.Request) (*logical.Request, io.ReadCloser, int, error) { + req, origBody, status, err := buildLogicalRequestNoAuth(core.PerfStandby(), w, r) + if err != nil || status != 0 { + return nil, nil, status, err + } + + req.SetRequiredState(r.Header.Values(VaultIndexHeaderName)) + requestAuth(r, req) + + req, err = requestWrapInfo(r, req) + if err != nil { + return nil, nil, http.StatusBadRequest, fmt.Errorf("error parsing X-Vault-Wrap-TTL header: %w", err) + } + + err = parseMFAHeader(req) + if err != nil { + return nil, nil, http.StatusBadRequest, fmt.Errorf("failed to parse X-Vault-MFA header: %w", err) + } + + err = requestPolicyOverride(r, req) + if err != nil { + return nil, nil, http.StatusBadRequest, fmt.Errorf("failed to parse %s header: %w", PolicyOverrideHeaderName, err) + } + + return req, origBody, 0, nil +} + +// handleLogical returns a handler for processing logical requests. These requests +// may or may not end up getting forwarded under certain scenarios if the node +// is a performance standby. Some of these cases include: +// - Perf standby and token with limited use count. +// - Perf standby and token re-validation needed (e.g. due to invalid token). +// - Perf standby and control group error. +func handleLogical(core *vault.Core) http.Handler { + return handleLogicalInternal(core, false, false) +} + +// handleLogicalWithInjector returns a handler for processing logical requests +// that also have their logical response data injected at the top-level payload. +// All forwarding behavior remains the same as `handleLogical`. +func handleLogicalWithInjector(core *vault.Core) http.Handler { + return handleLogicalInternal(core, true, false) +} + +// handleLogicalNoForward returns a handler for processing logical local-only +// requests. These types of requests never forwarded, and return an +// `vault.ErrCannotForwardLocalOnly` error if attempted to do so. +func handleLogicalNoForward(core *vault.Core) http.Handler { + return handleLogicalInternal(core, false, true) +} + +func handleLogicalRecovery(raw *vault.RawBackend, token *atomic.String) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + req, _, statusCode, err := buildLogicalRequestNoAuth(false, w, r) + if err != nil || statusCode != 0 { + respondError(w, statusCode, err) + return + } + reqToken := r.Header.Get(consts.AuthHeaderName) + if reqToken == "" || token.Load() == "" || reqToken != token.Load() { + respondError(w, http.StatusForbidden, nil) + return + } + + resp, err := raw.HandleRequest(r.Context(), req) + if respondErrorCommon(w, req, resp, err) { + return + } + + var httpResp *logical.HTTPResponse + if resp != nil { + httpResp = logical.LogicalResponseToHTTPResponse(resp) + httpResp.RequestID = req.ID + } + respondOk(w, httpResp) + }) +} + +// handleLogicalInternal is a common helper that returns a handler for +// processing logical requests. The behavior depends on the various boolean +// toggles. Refer to usage on functions for possible behaviors. +func handleLogicalInternal(core *vault.Core, injectDataIntoTopLevel bool, noForward bool) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + req, origBody, statusCode, err := buildLogicalRequest(core, w, r) + if err != nil || statusCode != 0 { + respondError(w, statusCode, err) + return + } + + // Websockets need to be handled at HTTP layer instead of logical requests. + if core.IsExperimentEnabled(experiments.VaultExperimentEventsAlpha1) { + ns, err := namespace.FromContext(r.Context()) + if err != nil { + respondError(w, http.StatusInternalServerError, err) + return + } + nsPath := ns.Path + if ns.ID == namespace.RootNamespaceID { + nsPath = "" + } + if strings.HasPrefix(r.URL.Path, fmt.Sprintf("/v1/%ssys/events/subscribe/", nsPath)) { + handler := handleEventsSubscribe(core, req) + handler.ServeHTTP(w, r) + return + } + } + + // Make the internal request. We attach the connection info + // as well in case this is an authentication request that requires + // it. Vault core handles stripping this if we need to. This also + // handles all error cases; if we hit respondLogical, the request is a + // success. + resp, ok, needsForward := request(core, w, r, req) + switch { + case needsForward && noForward: + respondError(w, http.StatusBadRequest, vault.ErrCannotForwardLocalOnly) + return + case needsForward && !noForward: + if origBody != nil { + r.Body = origBody + } + forwardRequest(core, w, r) + return + case !ok: + // If not ok, we simply return. The call on request should have + // taken care of setting the appropriate response code and payload + // in this case. + return + default: + // Build and return the proper response if everything is fine. + respondLogical(core, w, r, req, resp, injectDataIntoTopLevel) + return + } + }) +} + +func respondLogical(core *vault.Core, w http.ResponseWriter, r *http.Request, req *logical.Request, resp *logical.Response, injectDataIntoTopLevel bool) { + var httpResp *logical.HTTPResponse + var ret interface{} + + // If vault's core has already written to the response writer do not add any + // additional output. Headers have already been sent. + if req != nil && req.ResponseWriter != nil && req.ResponseWriter.Written() { + return + } + + if resp != nil { + if resp.Redirect != "" { + // If we have a redirect, redirect! We use a 307 code + // because we don't actually know if its permanent. + http.Redirect(w, r, resp.Redirect, 307) + return + } + + // Check if this is a raw response + if _, ok := resp.Data[logical.HTTPStatusCode]; ok { + respondRaw(w, r, resp) + return + } + + if resp.WrapInfo != nil && resp.WrapInfo.Token != "" { + httpResp = &logical.HTTPResponse{ + WrapInfo: &logical.HTTPWrapInfo{ + Token: resp.WrapInfo.Token, + Accessor: resp.WrapInfo.Accessor, + TTL: int(resp.WrapInfo.TTL.Seconds()), + CreationTime: resp.WrapInfo.CreationTime.Format(time.RFC3339Nano), + CreationPath: resp.WrapInfo.CreationPath, + WrappedAccessor: resp.WrapInfo.WrappedAccessor, + }, + } + } else { + httpResp = logical.LogicalResponseToHTTPResponse(resp) + httpResp.RequestID = req.ID + } + + ret = httpResp + + if injectDataIntoTopLevel { + injector := logical.HTTPSysInjector{ + Response: httpResp, + } + ret = injector + } + } + + adjustResponse(core, w, req) + + // Respond + respondOk(w, ret) + return +} + +// respondRaw is used when the response is using HTTPContentType and HTTPRawBody +// to change the default response handling. This is only used for specific things like +// returning the CRL information on the PKI backends. +func respondRaw(w http.ResponseWriter, r *http.Request, resp *logical.Response) { + retErr := func(w http.ResponseWriter, err string) { + w.Header().Set("X-Vault-Raw-Error", err) + w.WriteHeader(http.StatusInternalServerError) + w.Write(nil) + } + + // Ensure this is never a secret or auth response + if resp.Secret != nil || resp.Auth != nil { + retErr(w, "raw responses cannot contain secrets or auth") + return + } + + // Get the status code + statusRaw, ok := resp.Data[logical.HTTPStatusCode] + if !ok { + retErr(w, "no status code given") + return + } + + var status int + switch statusRaw.(type) { + case int: + status = statusRaw.(int) + case float64: + status = int(statusRaw.(float64)) + case json.Number: + s64, err := statusRaw.(json.Number).Float64() + if err != nil { + retErr(w, "cannot decode status code") + return + } + status = int(s64) + default: + retErr(w, "cannot decode status code") + return + } + + nonEmpty := status != http.StatusNoContent + + var contentType string + var body []byte + + // Get the content type header; don't require it if the body is empty + contentTypeRaw, ok := resp.Data[logical.HTTPContentType] + if !ok && nonEmpty { + retErr(w, "no content type given") + return + } + if ok { + contentType, ok = contentTypeRaw.(string) + if !ok { + retErr(w, "cannot decode content type") + return + } + } + + if nonEmpty { + // Get the body + bodyRaw, ok := resp.Data[logical.HTTPRawBody] + if !ok { + goto WRITE_RESPONSE + } + + switch bodyRaw.(type) { + case string: + // This is best effort. The value may already be base64-decoded so + // if it doesn't work we just use as-is + bodyDec, err := base64.StdEncoding.DecodeString(bodyRaw.(string)) + if err == nil { + body = bodyDec + } else { + body = []byte(bodyRaw.(string)) + } + case []byte: + body = bodyRaw.([]byte) + default: + retErr(w, "cannot decode body") + return + } + } + +WRITE_RESPONSE: + // Write the response + if contentType != "" { + w.Header().Set("Content-Type", contentType) + } + + if cacheControl, ok := resp.Data[logical.HTTPCacheControlHeader].(string); ok { + w.Header().Set("Cache-Control", cacheControl) + } + + if pragma, ok := resp.Data[logical.HTTPPragmaHeader].(string); ok { + w.Header().Set("Pragma", pragma) + } + + if wwwAuthn, ok := resp.Data[logical.HTTPWWWAuthenticateHeader].(string); ok { + w.Header().Set("WWW-Authenticate", wwwAuthn) + } + + w.WriteHeader(status) + w.Write(body) +} + +// getConnection is used to format the connection information for +// attaching to a logical request +func getConnection(r *http.Request) (connection *logical.Connection) { + var remoteAddr string + var remotePort int + + remoteAddr, port, err := net.SplitHostPort(r.RemoteAddr) + if err != nil { + remoteAddr = "" + } else { + remotePort, err = strconv.Atoi(port) + if err != nil { + remotePort = 0 + } + } + + connection = &logical.Connection{ + RemoteAddr: remoteAddr, + RemotePort: remotePort, + ConnState: r.TLS, + } + return +} diff --git a/http/logical_test.go b/http/logical_test.go new file mode 100644 index 0000000..a9ccdff --- /dev/null +++ b/http/logical_test.go @@ -0,0 +1,940 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "bytes" + "context" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "reflect" + "strconv" + "strings" + "testing" + "time" + + kv "github.com/hashicorp/vault-plugin-secrets-kv" + "github.com/hashicorp/vault/api" + auditFile "github.com/hashicorp/vault/builtin/audit/file" + credUserpass "github.com/hashicorp/vault/builtin/credential/userpass" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/physical" + "github.com/hashicorp/vault/sdk/physical/inmem" + + "github.com/go-test/deep" + log "github.com/hashicorp/go-hclog" + + "github.com/hashicorp/vault/audit" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/vault" +) + +func TestLogical(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + // WRITE + resp := testHttpPut(t, token, addr+"/v1/secret/foo", map[string]interface{}{ + "data": "bar", + }) + testResponseStatus(t, resp, 204) + + // READ + // Bad token should return a 403 + resp = testHttpGet(t, token+"bad", addr+"/v1/secret/foo") + testResponseStatus(t, resp, 403) + + resp = testHttpGet(t, token, addr+"/v1/secret/foo") + var actual map[string]interface{} + var nilWarnings interface{} + expected := map[string]interface{}{ + "renewable": false, + "lease_duration": json.Number(strconv.Itoa(int((32 * 24 * time.Hour) / time.Second))), + "data": map[string]interface{}{ + "data": "bar", + }, + "auth": nil, + "wrap_info": nil, + "warnings": nilWarnings, + } + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + delete(actual, "lease_id") + expected["request_id"] = actual["request_id"] + if diff := deep.Equal(actual, expected); diff != nil { + t.Fatal(diff) + } + + // DELETE + resp = testHttpDelete(t, token, addr+"/v1/secret/foo") + testResponseStatus(t, resp, 204) + + resp = testHttpGet(t, token, addr+"/v1/secret/foo") + testResponseStatus(t, resp, 404) +} + +func TestLogical_noExist(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpGet(t, token, addr+"/v1/secret/foo") + testResponseStatus(t, resp, 404) +} + +func TestLogical_StandbyRedirect(t *testing.T) { + ln1, addr1 := TestListener(t) + defer ln1.Close() + ln2, addr2 := TestListener(t) + defer ln2.Close() + + // Create an HA Vault + logger := logging.NewVaultLogger(log.Debug) + + inmha, err := inmem.NewInmemHA(nil, logger) + if err != nil { + t.Fatal(err) + } + conf := &vault.CoreConfig{ + Physical: inmha, + HAPhysical: inmha.(physical.HABackend), + RedirectAddr: addr1, + DisableMlock: true, + } + core1, err := vault.NewCore(conf) + if err != nil { + t.Fatalf("err: %v", err) + } + defer core1.Shutdown() + keys, root := vault.TestCoreInit(t, core1) + for _, key := range keys { + if _, err := core1.Unseal(vault.TestKeyCopy(key)); err != nil { + t.Fatalf("unseal err: %s", err) + } + } + + // Attempt to fix raciness in this test by giving the first core a chance + // to grab the lock + time.Sleep(2 * time.Second) + + // Create a second HA Vault + conf2 := &vault.CoreConfig{ + Physical: inmha, + HAPhysical: inmha.(physical.HABackend), + RedirectAddr: addr2, + DisableMlock: true, + } + core2, err := vault.NewCore(conf2) + if err != nil { + t.Fatalf("err: %v", err) + } + defer core2.Shutdown() + for _, key := range keys { + if _, err := core2.Unseal(vault.TestKeyCopy(key)); err != nil { + t.Fatalf("unseal err: %s", err) + } + } + + TestServerWithListener(t, ln1, addr1, core1) + TestServerWithListener(t, ln2, addr2, core2) + TestServerAuth(t, addr1, root) + + // WRITE to STANDBY + resp := testHttpPutDisableRedirect(t, root, addr2+"/v1/secret/foo", map[string]interface{}{ + "data": "bar", + }) + logger.Debug("307 test one starting") + testResponseStatus(t, resp, 307) + logger.Debug("307 test one stopping") + + //// READ to standby + resp = testHttpGet(t, root, addr2+"/v1/auth/token/lookup-self") + var actual map[string]interface{} + var nilWarnings interface{} + expected := map[string]interface{}{ + "renewable": false, + "lease_duration": json.Number("0"), + "data": map[string]interface{}{ + "meta": nil, + "num_uses": json.Number("0"), + "path": "auth/token/root", + "policies": []interface{}{"root"}, + "display_name": "root", + "orphan": true, + "id": root, + "ttl": json.Number("0"), + "creation_ttl": json.Number("0"), + "explicit_max_ttl": json.Number("0"), + "expire_time": nil, + "entity_id": "", + "type": "service", + }, + "warnings": nilWarnings, + "wrap_info": nil, + "auth": nil, + } + + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + actualDataMap := actual["data"].(map[string]interface{}) + delete(actualDataMap, "creation_time") + delete(actualDataMap, "accessor") + actual["data"] = actualDataMap + expected["request_id"] = actual["request_id"] + delete(actual, "lease_id") + if diff := deep.Equal(actual, expected); diff != nil { + t.Fatal(diff) + } + + //// DELETE to standby + resp = testHttpDeleteDisableRedirect(t, root, addr2+"/v1/secret/foo") + logger.Debug("307 test two starting") + testResponseStatus(t, resp, 307) + logger.Debug("307 test two stopping") +} + +func TestLogical_CreateToken(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + // WRITE + resp := testHttpPut(t, token, addr+"/v1/auth/token/create", map[string]interface{}{ + "data": "bar", + }) + + var actual map[string]interface{} + expected := map[string]interface{}{ + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "data": nil, + "wrap_info": nil, + "auth": map[string]interface{}{ + "policies": []interface{}{"root"}, + "token_policies": []interface{}{"root"}, + "metadata": nil, + "lease_duration": json.Number("0"), + "renewable": false, + "entity_id": "", + "token_type": "service", + "orphan": false, + "mfa_requirement": nil, + "num_uses": json.Number("0"), + }, + } + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + delete(actual["auth"].(map[string]interface{}), "client_token") + delete(actual["auth"].(map[string]interface{}), "accessor") + delete(actual, "warnings") + expected["request_id"] = actual["request_id"] + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad:\nexpected:\n%#v\nactual:\n%#v", expected, actual) + } +} + +func TestLogical_RawHTTP(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpPost(t, token, addr+"/v1/sys/mounts/foo", map[string]interface{}{ + "type": "http", + }) + testResponseStatus(t, resp, 204) + + // Get the raw response + resp = testHttpGet(t, token, addr+"/v1/foo/raw") + testResponseStatus(t, resp, 200) + + // Test the headers + if resp.Header.Get("Content-Type") != "plain/text" { + t.Fatalf("Bad: %#v", resp.Header) + } + + // Get the body + body := new(bytes.Buffer) + io.Copy(body, resp.Body) + if string(body.Bytes()) != "hello world" { + t.Fatalf("Bad: %s", body.Bytes()) + } +} + +func TestLogical_RequestSizeLimit(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + // Write a very large object, should fail. This test works because Go will + // convert the byte slice to base64, which makes it significantly larger + // than the default max request size. + resp := testHttpPut(t, token, addr+"/v1/secret/foo", map[string]interface{}{ + "data": make([]byte, DefaultMaxRequestSize), + }) + testResponseStatus(t, resp, http.StatusRequestEntityTooLarge) +} + +func TestLogical_RequestSizeDisableLimit(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestListener(t) + props := &vault.HandlerProperties{ + Core: core, + ListenerConfig: &configutil.Listener{ + MaxRequestSize: -1, + Address: "127.0.0.1", + TLSDisable: true, + }, + } + TestServerWithListenerAndProperties(t, ln, addr, core, props) + + defer ln.Close() + TestServerAuth(t, addr, token) + + // Write a very large object, should pass as MaxRequestSize set to -1/Negative value + + resp := testHttpPut(t, token, addr+"/v1/secret/foo", map[string]interface{}{ + "data": make([]byte, DefaultMaxRequestSize), + }) + testResponseStatus(t, resp, http.StatusNoContent) +} + +func TestLogical_ListSuffix(t *testing.T) { + core, _, rootToken := vault.TestCoreUnsealed(t) + req, _ := http.NewRequest("GET", "http://127.0.0.1:8200/v1/secret/foo", nil) + req = req.WithContext(namespace.RootContext(nil)) + req.Header.Add(consts.AuthHeaderName, rootToken) + + lreq, _, status, err := buildLogicalRequest(core, nil, req) + if err != nil { + t.Fatal(err) + } + if status != 0 { + t.Fatalf("got status %d", status) + } + if strings.HasSuffix(lreq.Path, "/") { + t.Fatal("trailing slash found on path") + } + + req, _ = http.NewRequest("GET", "http://127.0.0.1:8200/v1/secret/foo?list=true", nil) + req = req.WithContext(namespace.RootContext(nil)) + req.Header.Add(consts.AuthHeaderName, rootToken) + + lreq, _, status, err = buildLogicalRequest(core, nil, req) + if err != nil { + t.Fatal(err) + } + if status != 0 { + t.Fatalf("got status %d", status) + } + if !strings.HasSuffix(lreq.Path, "/") { + t.Fatal("trailing slash not found on path") + } + + req, _ = http.NewRequest("LIST", "http://127.0.0.1:8200/v1/secret/foo", nil) + req = req.WithContext(namespace.RootContext(nil)) + req.Header.Add(consts.AuthHeaderName, rootToken) + + _, _, status, err = buildLogicalRequestNoAuth(core.PerfStandby(), nil, req) + if err != nil || status != 0 { + t.Fatal(err) + } + + lreq, _, status, err = buildLogicalRequest(core, nil, req) + if err != nil { + t.Fatal(err) + } + if status != 0 { + t.Fatalf("got status %d", status) + } + if !strings.HasSuffix(lreq.Path, "/") { + t.Fatal("trailing slash not found on path") + } +} + +func TestLogical_ListWithQueryParameters(t *testing.T) { + core, _, rootToken := vault.TestCoreUnsealed(t) + + tests := []struct { + name string + requestMethod string + url string + expectedData map[string]interface{} + }{ + { + name: "LIST request method parses query parameter", + requestMethod: "LIST", + url: "http://127.0.0.1:8200/v1/secret/foo?key1=value1", + expectedData: map[string]interface{}{ + "key1": "value1", + }, + }, + { + name: "LIST request method parses query multiple parameters", + requestMethod: "LIST", + url: "http://127.0.0.1:8200/v1/secret/foo?key1=value1&key2=value2", + expectedData: map[string]interface{}{ + "key1": "value1", + "key2": "value2", + }, + }, + { + name: "GET request method with list=true parses query parameter", + requestMethod: "GET", + url: "http://127.0.0.1:8200/v1/secret/foo?list=true&key1=value1", + expectedData: map[string]interface{}{ + "key1": "value1", + }, + }, + { + name: "GET request method with list=true parses multiple query parameters", + requestMethod: "GET", + url: "http://127.0.0.1:8200/v1/secret/foo?list=true&key1=value1&key2=value2", + expectedData: map[string]interface{}{ + "key1": "value1", + "key2": "value2", + }, + }, + { + name: "GET request method with alternate order list=true parses multiple query parameters", + requestMethod: "GET", + url: "http://127.0.0.1:8200/v1/secret/foo?key1=value1&list=true&key2=value2", + expectedData: map[string]interface{}{ + "key1": "value1", + "key2": "value2", + }, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + req, _ := http.NewRequest(tc.requestMethod, tc.url, nil) + req = req.WithContext(namespace.RootContext(nil)) + req.Header.Add(consts.AuthHeaderName, rootToken) + + lreq, _, status, err := buildLogicalRequest(core, nil, req) + if err != nil { + t.Fatal(err) + } + if status != 0 { + t.Fatalf("got status %d", status) + } + if !strings.HasSuffix(lreq.Path, "/") { + t.Fatal("trailing slash not found on path") + } + if lreq.Operation != logical.ListOperation { + t.Fatalf("expected logical.ListOperation, got %v", lreq.Operation) + } + if !reflect.DeepEqual(tc.expectedData, lreq.Data) { + t.Fatalf("expected query parameter data %v, got %v", tc.expectedData, lreq.Data) + } + }) + } +} + +func TestLogical_RespondWithStatusCode(t *testing.T) { + resp := &logical.Response{ + Data: map[string]interface{}{ + "test-data": "foo", + }, + } + + resp404, err := logical.RespondWithStatusCode(resp, &logical.Request{ID: "id"}, http.StatusNotFound) + if err != nil { + t.Fatal(err) + } + + w := httptest.NewRecorder() + respondLogical(nil, w, nil, nil, resp404, false) + + if w.Code != 404 { + t.Fatalf("Bad Status code: %d", w.Code) + } + + bodyRaw, err := ioutil.ReadAll(w.Body) + if err != nil { + t.Fatal(err) + } + + expected := `{"request_id":"id","lease_id":"","renewable":false,"lease_duration":0,"data":{"test-data":"foo"},"wrap_info":null,"warnings":null,"auth":null}` + + if string(bodyRaw[:]) != strings.Trim(expected, "\n") { + t.Fatalf("bad response: %s", string(bodyRaw[:])) + } +} + +func TestLogical_Audit_invalidWrappingToken(t *testing.T) { + // Create a noop audit backend + noop := corehelpers.TestNoopAudit(t, nil) + c, _, root := vault.TestCoreUnsealedWithConfig(t, &vault.CoreConfig{ + AuditBackends: map[string]audit.Factory{ + "noop": func(ctx context.Context, config *audit.BackendConfig) (audit.Backend, error) { + return noop, nil + }, + }, + }) + ln, addr := TestServer(t, c) + defer ln.Close() + + // Enable the audit backend + + resp := testHttpPost(t, root, addr+"/v1/sys/audit/noop", map[string]interface{}{ + "type": "noop", + }) + testResponseStatus(t, resp, 204) + + { + // Make a wrapping/unwrap request with an invalid token + resp := testHttpPost(t, root, addr+"/v1/sys/wrapping/unwrap", map[string]interface{}{ + "token": "foo", + }) + testResponseStatus(t, resp, 400) + body := map[string][]string{} + testResponseBody(t, resp, &body) + if body["errors"][0] != "wrapping token is not valid or does not exist" { + t.Fatal(body) + } + + // Check the audit trail on request and response + if len(noop.ReqAuth) != 1 { + t.Fatalf("bad: %#v", noop) + } + auth := noop.ReqAuth[0] + if auth.ClientToken != root { + t.Fatalf("bad client token: %#v", auth) + } + if len(noop.Req) != 1 || noop.Req[0].Path != "sys/wrapping/unwrap" { + t.Fatalf("bad:\ngot:\n%#v", noop.Req[0]) + } + + if len(noop.ReqErrs) != 1 { + t.Fatalf("bad: %#v", noop.RespErrs) + } + if noop.ReqErrs[0] != consts.ErrInvalidWrappingToken { + t.Fatalf("bad: %#v", noop.ReqErrs) + } + } + + { + resp := testHttpPostWrapped(t, root, addr+"/v1/auth/token/create", nil, 10*time.Second) + testResponseStatus(t, resp, 200) + body := map[string]interface{}{} + testResponseBody(t, resp, &body) + + wrapToken := body["wrap_info"].(map[string]interface{})["token"].(string) + + // Make a wrapping/unwrap request with an invalid token + resp = testHttpPost(t, root, addr+"/v1/sys/wrapping/unwrap", map[string]interface{}{ + "token": wrapToken, + }) + testResponseStatus(t, resp, 200) + + // Check the audit trail on request and response + if len(noop.ReqAuth) != 3 { + t.Fatalf("bad: %#v", noop) + } + auth := noop.ReqAuth[2] + if auth.ClientToken != root { + t.Fatalf("bad client token: %#v", auth) + } + if len(noop.Req) != 3 || noop.Req[2].Path != "sys/wrapping/unwrap" { + t.Fatalf("bad:\ngot:\n%#v", noop.Req[2]) + } + + // Make sure there is only one error in the logs + if noop.ReqErrs[1] != nil || noop.ReqErrs[2] != nil { + t.Fatalf("bad: %#v", noop.RespErrs) + } + } +} + +func TestLogical_ShouldParseForm(t *testing.T) { + const formCT = "application/x-www-form-urlencoded" + + tests := map[string]struct { + prefix string + contentType string + isForm bool + }{ + "JSON": {`{"a":42}`, formCT, false}, + "JSON 2": {`[42]`, formCT, false}, + "JSON w/leading space": {" \n\n\r\t [42] ", formCT, false}, + "Form": {"a=42&b=dog", formCT, true}, + "Form w/wrong CT": {"a=42&b=dog", "application/json", false}, + } + + for name, test := range tests { + isForm := isForm([]byte(test.prefix), test.contentType) + + if isForm != test.isForm { + t.Fatalf("%s fail: expected isForm %t, got %t", name, test.isForm, isForm) + } + } +} + +func TestLogical_AuditPort(t *testing.T) { + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "kv": kv.VersionedKVFactory, + }, + AuditBackends: map[string]audit.Factory{ + "file": auditFile.Factory, + }, + } + + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: Handler, + }) + + cluster.Start() + defer cluster.Cleanup() + + cores := cluster.Cores + + core := cores[0].Core + c := cluster.Cores[0].Client + vault.TestWaitActive(t, core) + + if err := c.Sys().Mount("kv/", &api.MountInput{ + Type: "kv-v2", + }); err != nil { + t.Fatalf("kv-v2 mount attempt failed - err: %#v\n", err) + } + + auditLogFile, err := ioutil.TempFile("", "auditport") + if err != nil { + t.Fatal(err) + } + + err = c.Sys().EnableAuditWithOptions("file", &api.EnableAuditOptions{ + Type: "file", + Options: map[string]string{ + "file_path": auditLogFile.Name(), + }, + }) + if err != nil { + t.Fatalf("failed to enable audit file, err: %#v\n", err) + } + + writeData := map[string]interface{}{ + "data": map[string]interface{}{ + "bar": "a", + }, + } + + // workaround kv-v2 initialization upgrade errors + numFailures := 0 + corehelpers.RetryUntil(t, 10*time.Second, func() error { + resp, err := c.Logical().Write("kv/data/foo", writeData) + if err != nil { + if strings.Contains(err.Error(), "Upgrading from non-versioned to versioned data") { + t.Logf("Retrying fetch KV data due to upgrade error") + time.Sleep(100 * time.Millisecond) + numFailures += 1 + return err + } + + t.Fatalf("write request failed, err: %#v, resp: %#v\n", err, resp) + } + + return nil + }) + + decoder := json.NewDecoder(auditLogFile) + + var auditRecord map[string]interface{} + count := 0 + for decoder.Decode(&auditRecord) == nil { + count += 1 + + // Skip the first line + if count == 1 { + continue + } + + auditRequest := map[string]interface{}{} + + if req, ok := auditRecord["request"]; ok { + auditRequest = req.(map[string]interface{}) + } + + if _, ok := auditRequest["remote_address"].(string); !ok { + t.Fatalf("remote_address should be a string, not %T", auditRequest["remote_address"]) + } + + if _, ok := auditRequest["remote_port"].(float64); !ok { + t.Fatalf("remote_port should be a number, not %T", auditRequest["remote_port"]) + } + } + + // We expect the following items in the audit log: + // audit log header + an entry for updating sys/audit/file + // + request/response per failure (if any) + request/response for creating kv + numExpectedEntries := (numFailures * 2) + 4 + if count != numExpectedEntries { + t.Fatalf("wrong number of audit entries expected: %d got: %d", numExpectedEntries, count) + } +} + +func TestLogical_ErrRelativePath(t *testing.T) { + coreConfig := &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "userpass": credUserpass.Factory, + }, + } + + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: Handler, + }) + + cluster.Start() + defer cluster.Cleanup() + + cores := cluster.Cores + + core := cores[0].Core + c := cluster.Cores[0].Client + vault.TestWaitActive(t, core) + + err := c.Sys().EnableAuthWithOptions("userpass", &api.EnableAuthOptions{ + Type: "userpass", + }) + if err != nil { + t.Fatalf("failed to enable userpass, err: %v", err) + } + + resp, err := c.Logical().Read("auth/userpass/users/user..aaa") + + if err == nil || resp != nil { + t.Fatalf("expected read request to fail, resp: %#v, err: %v", resp, err) + } + + respErr, ok := err.(*api.ResponseError) + + if !ok { + t.Fatalf("unexpected error type, err: %#v", err) + } + + if respErr.StatusCode != 400 { + t.Errorf("expected 400 response for read, actual: %d", respErr.StatusCode) + } + + if !strings.Contains(respErr.Error(), logical.ErrRelativePath.Error()) { + t.Errorf("expected response for read to include %q", logical.ErrRelativePath.Error()) + } + + data := map[string]interface{}{ + "password": "abc123", + } + + resp, err = c.Logical().Write("auth/userpass/users/user..aaa", data) + + if err == nil || resp != nil { + t.Fatalf("expected write request to fail, resp: %#v, err: %v", resp, err) + } + + respErr, ok = err.(*api.ResponseError) + + if !ok { + t.Fatalf("unexpected error type, err: %#v", err) + } + + if respErr.StatusCode != 400 { + t.Errorf("expected 400 response for write, actual: %d", respErr.StatusCode) + } + + if !strings.Contains(respErr.Error(), logical.ErrRelativePath.Error()) { + t.Errorf("expected response for write to include %q", logical.ErrRelativePath.Error()) + } +} + +func testBuiltinPluginMetadataAuditLog(t *testing.T, log map[string]interface{}, expectedMountClass string) { + if mountClass, ok := log["mount_class"].(string); !ok { + t.Fatalf("mount_class should be a string, not %T", log["mount_class"]) + } else if mountClass != expectedMountClass { + t.Fatalf("bad: mount_class should be %s, not %s", expectedMountClass, mountClass) + } + + if _, ok := log["mount_running_version"].(string); !ok { + t.Fatalf("mount_running_version should be a string, not %T", log["mount_running_version"]) + } + + if _, ok := log["mount_running_sha256"].(string); ok { + t.Fatalf("mount_running_sha256 should be nil, not %T", log["mount_running_sha256"]) + } + + if mountIsExternalPlugin, ok := log["mount_is_external_plugin"].(bool); ok && mountIsExternalPlugin { + t.Fatalf("mount_is_external_plugin should be nil or false, not %T", log["mount_is_external_plugin"]) + } +} + +// TestLogical_AuditEnabled_ShouldLogPluginMetadata_Auth tests that we have plugin metadata of a builtin auth plugin +// in audit log when it is enabled +func TestLogical_AuditEnabled_ShouldLogPluginMetadata_Auth(t *testing.T) { + coreConfig := &vault.CoreConfig{ + AuditBackends: map[string]audit.Factory{ + "file": auditFile.Factory, + }, + } + + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: Handler, + }) + + cluster.Start() + defer cluster.Cleanup() + + cores := cluster.Cores + + core := cores[0].Core + c := cluster.Cores[0].Client + vault.TestWaitActive(t, core) + + // Enable the audit backend + tempDir := t.TempDir() + auditLogFile, err := os.CreateTemp(tempDir, "") + if err != nil { + t.Fatal(err) + } + + err = c.Sys().EnableAuditWithOptions("file", &api.EnableAuditOptions{ + Type: "file", + Options: map[string]string{ + "file_path": auditLogFile.Name(), + }, + }) + if err != nil { + t.Fatal(err) + } + + _, err = c.Logical().Write("auth/token/create", map[string]interface{}{ + "ttl": "10s", + }) + if err != nil { + t.Fatal(err) + } + + // Check the audit trail on request and response + decoder := json.NewDecoder(auditLogFile) + var auditRecord map[string]interface{} + for decoder.Decode(&auditRecord) == nil { + auditRequest := map[string]interface{}{} + if req, ok := auditRecord["request"]; ok { + auditRequest = req.(map[string]interface{}) + if auditRequest["path"] != "auth/token/create" { + continue + } + } + testBuiltinPluginMetadataAuditLog(t, auditRequest, consts.PluginTypeCredential.String()) + + auditResponse := map[string]interface{}{} + if req, ok := auditRecord["response"]; ok { + auditRequest = req.(map[string]interface{}) + if auditResponse["path"] != "auth/token/create" { + continue + } + } + testBuiltinPluginMetadataAuditLog(t, auditResponse, consts.PluginTypeCredential.String()) + } +} + +// TestLogical_AuditEnabled_ShouldLogPluginMetadata_Secret tests that we have plugin metadata of a builtin secret plugin +// in audit log when it is enabled +func TestLogical_AuditEnabled_ShouldLogPluginMetadata_Secret(t *testing.T) { + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "kv": kv.VersionedKVFactory, + }, + AuditBackends: map[string]audit.Factory{ + "file": auditFile.Factory, + }, + } + + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: Handler, + }) + + cluster.Start() + defer cluster.Cleanup() + + cores := cluster.Cores + + core := cores[0].Core + c := cluster.Cores[0].Client + vault.TestWaitActive(t, core) + + if err := c.Sys().Mount("kv/", &api.MountInput{ + Type: "kv-v2", + }); err != nil { + t.Fatalf("kv-v2 mount attempt failed - err: %#v\n", err) + } + + // Enable the audit backend + tempDir := t.TempDir() + auditLogFile, err := os.CreateTemp(tempDir, "") + if err != nil { + t.Fatal(err) + } + + err = c.Sys().EnableAuditWithOptions("file", &api.EnableAuditOptions{ + Type: "file", + Options: map[string]string{ + "file_path": auditLogFile.Name(), + }, + }) + if err != nil { + t.Fatal(err) + } + + { + writeData := map[string]interface{}{ + "data": map[string]interface{}{ + "bar": "a", + }, + } + corehelpers.RetryUntil(t, 10*time.Second, func() error { + resp, err := c.Logical().Write("kv/data/foo", writeData) + if err != nil { + t.Fatalf("write request failed, err: %#v, resp: %#v\n", err, resp) + } + return nil + }) + } + + // Check the audit trail on request and response + decoder := json.NewDecoder(auditLogFile) + var auditRecord map[string]interface{} + for decoder.Decode(&auditRecord) == nil { + auditRequest := map[string]interface{}{} + if req, ok := auditRecord["request"]; ok { + auditRequest = req.(map[string]interface{}) + if auditRequest["path"] != "kv/data/foo" { + continue + } + } + testBuiltinPluginMetadataAuditLog(t, auditRequest, consts.PluginTypeSecrets.String()) + + auditResponse := map[string]interface{}{} + if req, ok := auditRecord["response"]; ok { + auditRequest = req.(map[string]interface{}) + if auditResponse["path"] != "kv/data/foo" { + continue + } + } + testBuiltinPluginMetadataAuditLog(t, auditResponse, consts.PluginTypeSecrets.String()) + } +} diff --git a/http/plugin_test.go b/http/plugin_test.go new file mode 100644 index 0000000..b0d85be --- /dev/null +++ b/http/plugin_test.go @@ -0,0 +1,196 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "encoding/json" + "io/ioutil" + "os" + "reflect" + "sync" + "testing" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + bplugin "github.com/hashicorp/vault/builtin/plugin" + "github.com/hashicorp/vault/helper/benchhelpers" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/pluginutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/physical" + "github.com/hashicorp/vault/sdk/physical/inmem" + "github.com/hashicorp/vault/sdk/plugin" + "github.com/hashicorp/vault/sdk/plugin/mock" + "github.com/hashicorp/vault/vault" +) + +func getPluginClusterAndCore(t testing.TB, logger log.Logger) (*vault.TestCluster, *vault.TestClusterCore) { + inm, err := inmem.NewTransactionalInmem(nil, logger) + if err != nil { + t.Fatal(err) + } + inmha, err := inmem.NewInmemHA(nil, logger) + if err != nil { + t.Fatal(err) + } + + coreConfig := &vault.CoreConfig{ + Physical: inm, + HAPhysical: inmha.(physical.HABackend), + LogicalBackends: map[string]logical.Factory{ + "plugin": bplugin.Factory, + }, + } + + cluster := vault.NewTestCluster(benchhelpers.TBtoT(t), coreConfig, &vault.TestClusterOptions{ + HandlerFunc: Handler, + Logger: logger.Named("testclusteroptions"), + }) + cluster.Start() + + cores := cluster.Cores + core := cores[0] + + os.Setenv(pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile) + + vault.TestWaitActive(benchhelpers.TBtoT(t), core.Core) + vault.TestAddTestPlugin(benchhelpers.TBtoT(t), core.Core, "mock-plugin", consts.PluginTypeSecrets, "", "TestPlugin_PluginMain", []string{}, "") + + // Mount the mock plugin + err = core.Client.Sys().Mount("mock", &api.MountInput{ + Type: "mock-plugin", + }) + if err != nil { + t.Fatal(err) + } + + return cluster, core +} + +func TestPlugin_PluginMain(t *testing.T) { + if os.Getenv(pluginutil.PluginVaultVersionEnv) == "" { + return + } + + caPEM := os.Getenv(pluginutil.PluginCACertPEMEnv) + if caPEM == "" { + t.Fatal("CA cert not passed in") + } + + args := []string{"--ca-cert=" + caPEM} + + apiClientMeta := &api.PluginAPIClientMeta{} + flags := apiClientMeta.FlagSet() + flags.Parse(args) + + factoryFunc := mock.FactoryType(logical.TypeLogical) + + err := plugin.Serve(&plugin.ServeOpts{ + BackendFactoryFunc: factoryFunc, + }) + if err != nil { + t.Fatal(err) + } + t.Fatal("Why are we here") +} + +func TestPlugin_MockList(t *testing.T) { + logger := log.New(&log.LoggerOptions{ + Mutex: &sync.Mutex{}, + }) + cluster, core := getPluginClusterAndCore(t, logger) + defer cluster.Cleanup() + + _, err := core.Client.Logical().Write("mock/kv/foo", map[string]interface{}{ + "value": "baz", + }) + if err != nil { + t.Fatal(err) + } + + keys, err := core.Client.Logical().List("mock/kv/") + if err != nil { + t.Fatal(err) + } + if keys.Data["keys"].([]interface{})[0].(string) != "foo" { + t.Fatal(keys) + } + + _, err = core.Client.Logical().Write("mock/kv/zoo", map[string]interface{}{ + "value": "baz", + }) + if err != nil { + t.Fatal(err) + } + + keys, err = core.Client.Logical().List("mock/kv/") + if err != nil { + t.Fatal(err) + } + if keys.Data["keys"].([]interface{})[0].(string) != "foo" || keys.Data["keys"].([]interface{})[1].(string) != "zoo" { + t.Fatal(keys) + } +} + +func TestPlugin_MockRawResponse(t *testing.T) { + logger := log.New(&log.LoggerOptions{ + Mutex: &sync.Mutex{}, + }) + cluster, core := getPluginClusterAndCore(t, logger) + defer cluster.Cleanup() + + resp, err := core.Client.RawRequest(core.Client.NewRequest("GET", "/v1/mock/raw")) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + if string(body[:]) != "Response" { + t.Fatal("bad body") + } + + if resp.StatusCode != 200 { + t.Fatal("bad status") + } +} + +func TestPlugin_GetParams(t *testing.T) { + logger := log.New(&log.LoggerOptions{ + Mutex: &sync.Mutex{}, + }) + cluster, core := getPluginClusterAndCore(t, logger) + defer cluster.Cleanup() + + _, err := core.Client.Logical().Write("mock/kv/foo", map[string]interface{}{ + "value": "baz", + }) + if err != nil { + t.Fatal(err) + } + + r := core.Client.NewRequest("GET", "/v1/mock/kv/foo") + r.Params.Add("version", "12") + resp, err := core.Client.RawRequest(r) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + + secret, err := api.ParseSecret(resp.Body) + if err != nil { + t.Fatal(err) + } + + expected := map[string]interface{}{ + "value": "baz", + "version": json.Number("12"), + } + + if !reflect.DeepEqual(secret.Data, expected) { + t.Fatal(secret.Data) + } +} diff --git a/http/sys_audit_test.go b/http/sys_audit_test.go new file mode 100644 index 0000000..2ec4ffc --- /dev/null +++ b/http/sys_audit_test.go @@ -0,0 +1,135 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "encoding/json" + "reflect" + "testing" + + "github.com/hashicorp/vault/vault" +) + +func TestSysAudit(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpPost(t, token, addr+"/v1/sys/audit/noop", map[string]interface{}{ + "type": "noop", + }) + testResponseStatus(t, resp, 204) + + resp = testHttpGet(t, token, addr+"/v1/sys/audit") + + var actual map[string]interface{} + expected := map[string]interface{}{ + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "data": map[string]interface{}{ + "noop/": map[string]interface{}{ + "path": "noop/", + "type": "noop", + "description": "", + "options": map[string]interface{}{}, + "local": false, + }, + }, + "noop/": map[string]interface{}{ + "path": "noop/", + "type": "noop", + "description": "", + "options": map[string]interface{}{}, + "local": false, + }, + } + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + + expected["request_id"] = actual["request_id"] + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: expected:\n%#v actual:\n%#v\n", expected, actual) + } +} + +func TestSysDisableAudit(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpPost(t, token, addr+"/v1/sys/audit/foo", map[string]interface{}{ + "type": "noop", + }) + testResponseStatus(t, resp, 204) + + resp = testHttpDelete(t, token, addr+"/v1/sys/audit/foo") + testResponseStatus(t, resp, 204) + + resp = testHttpGet(t, token, addr+"/v1/sys/audit") + + var actual map[string]interface{} + expected := map[string]interface{}{ + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "data": map[string]interface{}{}, + } + + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + + expected["request_id"] = actual["request_id"] + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad:\nactual: %#v\nexpected: %#v\n", actual, expected) + } +} + +func TestSysAuditHash(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpPost(t, token, addr+"/v1/sys/audit/noop", map[string]interface{}{ + "type": "noop", + }) + testResponseStatus(t, resp, 204) + + resp = testHttpPost(t, token, addr+"/v1/sys/audit-hash/noop", map[string]interface{}{ + "input": "bar", + }) + + var actual map[string]interface{} + expected := map[string]interface{}{ + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "data": map[string]interface{}{ + "hash": "hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317", + }, + "hash": "hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317", + } + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + + expected["request_id"] = actual["request_id"] + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: expected:\n%#v\n, got:\n%#v\n", expected, actual) + } +} diff --git a/http/sys_auth_test.go b/http/sys_auth_test.go new file mode 100644 index 0000000..3bd0a00 --- /dev/null +++ b/http/sys_auth_test.go @@ -0,0 +1,608 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "encoding/json" + "fmt" + "reflect" + "testing" + "time" + + "github.com/go-test/deep" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/hashicorp/vault/helper/versions" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/vault" +) + +func TestSysAuth(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpGet(t, token, addr+"/v1/sys/auth") + + var actual map[string]interface{} + expected := map[string]interface{}{ + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "data": map[string]interface{}{ + "token/": map[string]interface{}{ + "description": "token based credentials", + "type": "token", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "token_type": "default-service", + "force_no_cache": false, + }, + "local": false, + "seal_wrap": false, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeCredential, "token"), + }, + }, + "token/": map[string]interface{}{ + "description": "token based credentials", + "type": "token", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "token_type": "default-service", + "force_no_cache": false, + }, + "local": false, + "seal_wrap": false, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeCredential, "token"), + }, + } + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + + expected["request_id"] = actual["request_id"] + for k, v := range actual["data"].(map[string]interface{}) { + if v.(map[string]interface{})["accessor"] == "" { + t.Fatalf("no accessor from %s", k) + } + if v.(map[string]interface{})["uuid"] == "" { + t.Fatalf("no uuid from %s", k) + } + + expected[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + expected[k].(map[string]interface{})["uuid"] = v.(map[string]interface{})["uuid"] + expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + expected["data"].(map[string]interface{})[k].(map[string]interface{})["uuid"] = v.(map[string]interface{})["uuid"] + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual) + } +} + +func TestSysEnableAuth(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpPost(t, token, addr+"/v1/sys/auth/foo", map[string]interface{}{ + "type": "approle", + "description": "foo", + }) + testResponseStatus(t, resp, 204) + + resp = testHttpGet(t, token, addr+"/v1/sys/auth") + + var actual map[string]interface{} + expected := map[string]interface{}{ + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "data": map[string]interface{}{ + "foo/": map[string]interface{}{ + "description": "foo", + "type": "approle", + "external_entropy_access": false, + "deprecation_status": "supported", + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "token_type": "default-service", + "force_no_cache": false, + }, + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{}, + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeCredential, "approle"), + }, + "token/": map[string]interface{}{ + "description": "token based credentials", + "type": "token", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + "token_type": "default-service", + }, + "local": false, + "seal_wrap": false, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeCredential, "token"), + }, + }, + "foo/": map[string]interface{}{ + "description": "foo", + "type": "approle", + "external_entropy_access": false, + "deprecation_status": "supported", + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "token_type": "default-service", + "force_no_cache": false, + }, + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{}, + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeCredential, "approle"), + }, + "token/": map[string]interface{}{ + "description": "token based credentials", + "type": "token", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "token_type": "default-service", + "force_no_cache": false, + }, + "local": false, + "seal_wrap": false, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeCredential, "token"), + }, + } + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + + expected["request_id"] = actual["request_id"] + for k, v := range actual["data"].(map[string]interface{}) { + if v.(map[string]interface{})["accessor"] == "" { + t.Fatalf("no accessor from %s", k) + } + if v.(map[string]interface{})["uuid"] == "" { + t.Fatalf("no uuid from %s", k) + } + + expected[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + expected[k].(map[string]interface{})["uuid"] = v.(map[string]interface{})["uuid"] + expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + expected["data"].(map[string]interface{})[k].(map[string]interface{})["uuid"] = v.(map[string]interface{})["uuid"] + } + + if diff := deep.Equal(actual, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestSysDisableAuth(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpPost(t, token, addr+"/v1/sys/auth/foo", map[string]interface{}{ + "type": "noop", + "description": "foo", + }) + testResponseStatus(t, resp, 204) + + resp = testHttpDelete(t, token, addr+"/v1/sys/auth/foo") + testResponseStatus(t, resp, 204) + + resp = testHttpGet(t, token, addr+"/v1/sys/auth") + + var actual map[string]interface{} + expected := map[string]interface{}{ + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "data": map[string]interface{}{ + "token/": map[string]interface{}{ + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "token_type": "default-service", + "force_no_cache": false, + }, + "description": "token based credentials", + "type": "token", + "external_entropy_access": false, + "local": false, + "seal_wrap": false, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeCredential, "token"), + }, + }, + "token/": map[string]interface{}{ + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "token_type": "default-service", + "force_no_cache": false, + }, + "description": "token based credentials", + "type": "token", + "external_entropy_access": false, + "local": false, + "seal_wrap": false, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeCredential, "token"), + }, + } + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + + expected["request_id"] = actual["request_id"] + for k, v := range actual["data"].(map[string]interface{}) { + if v.(map[string]interface{})["accessor"] == "" { + t.Fatalf("no accessor from %s", k) + } + if v.(map[string]interface{})["uuid"] == "" { + t.Fatalf("no uuid from %s", k) + } + + expected[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + expected[k].(map[string]interface{})["uuid"] = v.(map[string]interface{})["uuid"] + expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + expected["data"].(map[string]interface{})[k].(map[string]interface{})["uuid"] = v.(map[string]interface{})["uuid"] + } + + if diff := deep.Equal(actual, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestSysTuneAuth_nonHMACKeys(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + // Mount-tune the audit_non_hmac_request_keys + resp := testHttpPost(t, token, addr+"/v1/sys/auth/token/tune", map[string]interface{}{ + "audit_non_hmac_request_keys": "foo", + }) + testResponseStatus(t, resp, 204) + + // Mount-tune the audit_non_hmac_response_keys + resp = testHttpPost(t, token, addr+"/v1/sys/auth/token/tune", map[string]interface{}{ + "audit_non_hmac_response_keys": "bar", + }) + testResponseStatus(t, resp, 204) + + // Check results + resp = testHttpGet(t, token, addr+"/v1/sys/auth/token/tune") + testResponseStatus(t, resp, 200) + + actual := map[string]interface{}{} + expected := map[string]interface{}{ + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "data": map[string]interface{}{ + "description": "token based credentials", + "default_lease_ttl": json.Number("2764800"), + "max_lease_ttl": json.Number("2764800"), + "force_no_cache": false, + "audit_non_hmac_request_keys": []interface{}{"foo"}, + "audit_non_hmac_response_keys": []interface{}{"bar"}, + "token_type": "default-service", + }, + "description": "token based credentials", + "default_lease_ttl": json.Number("2764800"), + "max_lease_ttl": json.Number("2764800"), + "force_no_cache": false, + "audit_non_hmac_request_keys": []interface{}{"foo"}, + "audit_non_hmac_response_keys": []interface{}{"bar"}, + "token_type": "default-service", + } + testResponseBody(t, resp, &actual) + expected["request_id"] = actual["request_id"] + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad:\nExpected: %#v\nActual:%#v", expected, actual) + } + + // Unset those mount tune values + resp = testHttpPost(t, token, addr+"/v1/sys/auth/token/tune", map[string]interface{}{ + "audit_non_hmac_request_keys": "", + }) + testResponseStatus(t, resp, 204) + + resp = testHttpPost(t, token, addr+"/v1/sys/auth/token/tune", map[string]interface{}{ + "audit_non_hmac_response_keys": "", + }) + + // Check results + resp = testHttpGet(t, token, addr+"/v1/sys/auth/token/tune") + testResponseStatus(t, resp, 200) + + actual = map[string]interface{}{} + expected = map[string]interface{}{ + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "data": map[string]interface{}{ + "description": "token based credentials", + "default_lease_ttl": json.Number("2764800"), + "max_lease_ttl": json.Number("2764800"), + "force_no_cache": false, + "token_type": "default-service", + }, + "description": "token based credentials", + "default_lease_ttl": json.Number("2764800"), + "max_lease_ttl": json.Number("2764800"), + "force_no_cache": false, + "token_type": "default-service", + } + testResponseBody(t, resp, &actual) + expected["request_id"] = actual["request_id"] + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad:\nExpected: %#v\nActual:%#v", expected, actual) + } +} + +func TestSysTuneAuth_showUIMount(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + // Get original tune values, ensure that listing_visibility is not set + resp := testHttpGet(t, token, addr+"/v1/sys/auth/token/tune") + testResponseStatus(t, resp, 200) + + actual := map[string]interface{}{} + expected := map[string]interface{}{ + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "data": map[string]interface{}{ + "description": "token based credentials", + "default_lease_ttl": json.Number("2764800"), + "max_lease_ttl": json.Number("2764800"), + "force_no_cache": false, + "token_type": "default-service", + }, + "description": "token based credentials", + "default_lease_ttl": json.Number("2764800"), + "max_lease_ttl": json.Number("2764800"), + "force_no_cache": false, + "token_type": "default-service", + } + testResponseBody(t, resp, &actual) + expected["request_id"] = actual["request_id"] + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad:\nExpected: %#v\nActual:%#v", expected, actual) + } + + // Mount-tune the listing_visibility + resp = testHttpPost(t, token, addr+"/v1/sys/auth/token/tune", map[string]interface{}{ + "listing_visibility": "unauth", + }) + testResponseStatus(t, resp, 204) + + // Check results + resp = testHttpGet(t, token, addr+"/v1/sys/auth/token/tune") + testResponseStatus(t, resp, 200) + + actual = map[string]interface{}{} + expected = map[string]interface{}{ + "description": "token based credentials", + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "data": map[string]interface{}{ + "description": "token based credentials", + "default_lease_ttl": json.Number("2764800"), + "max_lease_ttl": json.Number("2764800"), + "force_no_cache": false, + "listing_visibility": "unauth", + "token_type": "default-service", + }, + "default_lease_ttl": json.Number("2764800"), + "max_lease_ttl": json.Number("2764800"), + "force_no_cache": false, + "listing_visibility": "unauth", + "token_type": "default-service", + } + testResponseBody(t, resp, &actual) + expected["request_id"] = actual["request_id"] + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad:\nExpected: %#v\nActual:%#v", expected, actual) + } +} + +func TestSysRemountAuth(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpPost(t, token, addr+"/v1/sys/auth/foo", map[string]interface{}{ + "type": "noop", + "description": "foo", + }) + testResponseStatus(t, resp, 204) + + resp = testHttpPost(t, token, addr+"/v1/sys/remount", map[string]interface{}{ + "from": "auth/foo", + "to": "auth/bar", + }) + testResponseStatus(t, resp, 200) + + // Poll until the remount succeeds + var remountResp map[string]interface{} + testResponseBody(t, resp, &remountResp) + corehelpers.RetryUntil(t, 5*time.Second, func() error { + resp = testHttpGet(t, token, addr+"/v1/sys/remount/status/"+remountResp["migration_id"].(string)) + testResponseStatus(t, resp, 200) + + var remountStatusResp map[string]interface{} + testResponseBody(t, resp, &remountStatusResp) + + status := remountStatusResp["data"].(map[string]interface{})["migration_info"].(map[string]interface{})["status"] + if status != "success" { + return fmt.Errorf("Expected migration status to be successful, got %q", status) + } + return nil + }) + + resp = testHttpGet(t, token, addr+"/v1/sys/auth") + + var actual map[string]interface{} + expected := map[string]interface{}{ + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "data": map[string]interface{}{ + "bar/": map[string]interface{}{ + "description": "foo", + "type": "noop", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "token_type": "default-service", + "force_no_cache": false, + }, + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{}, + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + }, + "token/": map[string]interface{}{ + "description": "token based credentials", + "type": "token", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + "token_type": "default-service", + }, + "local": false, + "seal_wrap": false, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeCredential, "token"), + }, + }, + "bar/": map[string]interface{}{ + "description": "foo", + "type": "noop", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "token_type": "default-service", + "force_no_cache": false, + }, + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{}, + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + }, + "token/": map[string]interface{}{ + "description": "token based credentials", + "type": "token", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "token_type": "default-service", + "force_no_cache": false, + }, + "local": false, + "seal_wrap": false, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeCredential, "token"), + }, + } + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + + expected["request_id"] = actual["request_id"] + for k, v := range actual["data"].(map[string]interface{}) { + if v.(map[string]interface{})["accessor"] == "" { + t.Fatalf("no accessor from %s", k) + } + if v.(map[string]interface{})["uuid"] == "" { + t.Fatalf("no uuid from %s", k) + } + + expected[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + expected[k].(map[string]interface{})["uuid"] = v.(map[string]interface{})["uuid"] + expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + expected["data"].(map[string]interface{})[k].(map[string]interface{})["uuid"] = v.(map[string]interface{})["uuid"] + } + + if diff := deep.Equal(actual, expected); diff != nil { + t.Fatal(diff) + } +} diff --git a/http/sys_config_cors_test.go b/http/sys_config_cors_test.go new file mode 100644 index 0000000..2f4a29a --- /dev/null +++ b/http/sys_config_cors_test.go @@ -0,0 +1,80 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "encoding/json" + "net/http" + "reflect" + "testing" + + "github.com/hashicorp/vault/vault" +) + +func TestSysConfigCors(t *testing.T) { + var resp *http.Response + + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + corsConf := core.CORSConfig() + + // Try to enable CORS without providing a value for allowed_origins + resp = testHttpPut(t, token, addr+"/v1/sys/config/cors", map[string]interface{}{ + "allowed_headers": "X-Custom-Header", + }) + + testResponseStatus(t, resp, 500) + + // Enable CORS, but provide an origin this time. + resp = testHttpPut(t, token, addr+"/v1/sys/config/cors", map[string]interface{}{ + "allowed_origins": addr, + "allowed_headers": "X-Custom-Header", + }) + + testResponseStatus(t, resp, 204) + + // Read the CORS configuration + resp = testHttpGet(t, token, addr+"/v1/sys/config/cors") + testResponseStatus(t, resp, 200) + + var actual map[string]interface{} + var expected map[string]interface{} + + lenStdHeaders := len(corsConf.AllowedHeaders) + + expectedHeaders := make([]interface{}, lenStdHeaders) + + for i := range corsConf.AllowedHeaders { + expectedHeaders[i] = corsConf.AllowedHeaders[i] + } + + expected = map[string]interface{}{ + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "data": map[string]interface{}{ + "enabled": true, + "allowed_origins": []interface{}{addr}, + "allowed_headers": expectedHeaders, + }, + "enabled": true, + "allowed_origins": []interface{}{addr}, + "allowed_headers": expectedHeaders, + } + + testResponseStatus(t, resp, 200) + + testResponseBody(t, resp, &actual) + expected["request_id"] = actual["request_id"] + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: expected: %#v\nactual: %#v", expected, actual) + } +} diff --git a/http/sys_config_state_test.go b/http/sys_config_state_test.go new file mode 100644 index 0000000..5dfdf27 --- /dev/null +++ b/http/sys_config_state_test.go @@ -0,0 +1,203 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "encoding/json" + "net/http" + "testing" + + "github.com/go-test/deep" + "github.com/hashicorp/vault/command/server" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/vault" +) + +func TestSysConfigState_Sanitized(t *testing.T) { + cases := []struct { + name string + storageConfig *server.Storage + haStorageConfig *server.Storage + expectedStorageOutput map[string]interface{} + expectedHAStorageOutput map[string]interface{} + }{ + { + name: "raft storage", + storageConfig: &server.Storage{ + Type: "raft", + RedirectAddr: "http://127.0.0.1:8200", + ClusterAddr: "http://127.0.0.1:8201", + DisableClustering: false, + Config: map[string]string{ + "path": "/storage/path/raft", + "node_id": "raft1", + "max_entry_size": "2097152", + }, + }, + haStorageConfig: nil, + expectedStorageOutput: map[string]interface{}{ + "type": "raft", + "redirect_addr": "http://127.0.0.1:8200", + "cluster_addr": "http://127.0.0.1:8201", + "disable_clustering": false, + "raft": map[string]interface{}{ + "max_entry_size": "2097152", + }, + }, + expectedHAStorageOutput: nil, + }, + { + name: "inmem storage, no HA storage", + storageConfig: &server.Storage{ + Type: "inmem", + RedirectAddr: "http://127.0.0.1:8200", + ClusterAddr: "http://127.0.0.1:8201", + DisableClustering: false, + }, + haStorageConfig: nil, + expectedStorageOutput: map[string]interface{}{ + "type": "inmem", + "redirect_addr": "http://127.0.0.1:8200", + "cluster_addr": "http://127.0.0.1:8201", + "disable_clustering": false, + }, + expectedHAStorageOutput: nil, + }, + { + name: "inmem storage, raft HA storage", + storageConfig: &server.Storage{ + Type: "inmem", + RedirectAddr: "http://127.0.0.1:8200", + ClusterAddr: "http://127.0.0.1:8201", + DisableClustering: false, + }, + haStorageConfig: &server.Storage{ + Type: "raft", + RedirectAddr: "http://127.0.0.1:8200", + ClusterAddr: "http://127.0.0.1:8201", + DisableClustering: false, + Config: map[string]string{ + "path": "/storage/path/raft", + "node_id": "raft1", + "max_entry_size": "2097152", + }, + }, + expectedStorageOutput: map[string]interface{}{ + "type": "inmem", + "redirect_addr": "http://127.0.0.1:8200", + "cluster_addr": "http://127.0.0.1:8201", + "disable_clustering": false, + }, + expectedHAStorageOutput: map[string]interface{}{ + "type": "raft", + "redirect_addr": "http://127.0.0.1:8200", + "cluster_addr": "http://127.0.0.1:8201", + "disable_clustering": false, + "raft": map[string]interface{}{ + "max_entry_size": "2097152", + }, + }, + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + var resp *http.Response + confRaw := &server.Config{ + Storage: tc.storageConfig, + HAStorage: tc.haStorageConfig, + SharedConfig: &configutil.SharedConfig{ + Listeners: []*configutil.Listener{ + { + Type: "tcp", + Address: "127.0.0.1", + }, + }, + }, + } + + conf := &vault.CoreConfig{ + RawConfig: confRaw, + } + + core, _, token := vault.TestCoreUnsealedWithConfig(t, conf) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp = testHttpGet(t, token, addr+"/v1/sys/config/state/sanitized") + testResponseStatus(t, resp, 200) + + var actual map[string]interface{} + var expected map[string]interface{} + + configResp := map[string]interface{}{ + "api_addr": "", + "cache_size": json.Number("0"), + "cluster_addr": "", + "cluster_cipher_suites": "", + "cluster_name": "", + "default_lease_ttl": json.Number("0"), + "default_max_request_duration": json.Number("0"), + "disable_cache": false, + "disable_clustering": false, + "disable_indexing": false, + "disable_mlock": false, + "disable_performance_standby": false, + "disable_printable_check": false, + "disable_sealwrap": false, + "experiments": nil, + "raw_storage_endpoint": false, + "detect_deadlocks": "", + "introspection_endpoint": false, + "disable_sentinel_trace": false, + "enable_ui": false, + "log_format": "", + "log_level": "", + "max_lease_ttl": json.Number("0"), + "pid_file": "", + "plugin_directory": "", + "plugin_file_uid": json.Number("0"), + "plugin_file_permissions": json.Number("0"), + "enable_response_header_hostname": false, + "enable_response_header_raft_node_id": false, + "log_requests_level": "", + "listeners": []interface{}{ + map[string]interface{}{ + "config": nil, + "type": "tcp", + }, + }, + "storage": tc.expectedStorageOutput, + "administrative_namespace_path": "", + "imprecise_lease_role_tracking": false, + } + + if tc.expectedHAStorageOutput != nil { + configResp["ha_storage"] = tc.expectedHAStorageOutput + } + + expected = map[string]interface{}{ + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "data": configResp, + } + + testResponseBody(t, resp, &actual) + expected["request_id"] = actual["request_id"] + + if diff := deep.Equal(actual, expected); len(diff) > 0 { + t.Fatalf("bad mismatch response body: diff: %v", diff) + } + }) + } +} diff --git a/http/sys_feature_flags.go b/http/sys_feature_flags.go new file mode 100644 index 0000000..9f654b7 --- /dev/null +++ b/http/sys_feature_flags.go @@ -0,0 +1,55 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "encoding/json" + "net/http" + "os" + + "github.com/hashicorp/vault/vault" +) + +type FeatureFlagsResponse struct { + FeatureFlags []string `json:"feature_flags"` +} + +var FeatureFlag_EnvVariables = [...]string{ + "VAULT_CLOUD_ADMIN_NAMESPACE", +} + +func featureFlagIsSet(name string) bool { + switch os.Getenv(name) { + case "", "0": + return false + default: + return true + } +} + +func handleSysInternalFeatureFlags(core *vault.Core) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case "GET": + break + default: + respondError(w, http.StatusMethodNotAllowed, nil) + } + + response := &FeatureFlagsResponse{} + + for _, f := range FeatureFlag_EnvVariables { + if featureFlagIsSet(f) { + response.FeatureFlags = append(response.FeatureFlags, f) + } + } + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + // Generate the response + enc := json.NewEncoder(w) + enc.Encode(response) + }) +} diff --git a/http/sys_generate_root.go b/http/sys_generate_root.go new file mode 100644 index 0000000..7f953e4 --- /dev/null +++ b/http/sys_generate_root.go @@ -0,0 +1,224 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "io" + "net/http" + + "github.com/hashicorp/go-secure-stdlib/base62" + "github.com/hashicorp/vault/vault" +) + +func handleSysGenerateRootAttempt(core *vault.Core, generateStrategy vault.GenerateRootStrategy) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case "GET": + handleSysGenerateRootAttemptGet(core, w, r, "") + case "POST", "PUT": + handleSysGenerateRootAttemptPut(core, w, r, generateStrategy) + case "DELETE": + handleSysGenerateRootAttemptDelete(core, w, r) + default: + respondError(w, http.StatusMethodNotAllowed, nil) + } + }) +} + +func handleSysGenerateRootAttemptGet(core *vault.Core, w http.ResponseWriter, r *http.Request, otp string) { + ctx, cancel := core.GetContext() + defer cancel() + + // Get the current seal configuration + barrierConfig, err := core.SealAccess().BarrierConfig(ctx) + if err != nil { + respondError(w, http.StatusInternalServerError, err) + return + } + if barrierConfig == nil { + respondError(w, http.StatusBadRequest, fmt.Errorf("server is not yet initialized")) + return + } + + sealConfig := barrierConfig + if core.SealAccess().RecoveryKeySupported() { + sealConfig, err = core.SealAccess().RecoveryConfig(ctx) + if err != nil { + respondError(w, http.StatusInternalServerError, err) + return + } + } + + // Get the generation configuration + generationConfig, err := core.GenerateRootConfiguration() + if err != nil { + respondError(w, http.StatusInternalServerError, err) + return + } + + // Get the progress + progress, err := core.GenerateRootProgress() + if err != nil { + respondError(w, http.StatusInternalServerError, err) + return + } + var otpLength int + if core.DisableSSCTokens() { + otpLength = vault.TokenLength + vault.OldTokenPrefixLength + } else { + otpLength = vault.TokenLength + vault.TokenPrefixLength + } + + // Format the status + status := &GenerateRootStatusResponse{ + Started: false, + Progress: progress, + Required: sealConfig.SecretThreshold, + Complete: false, + OTPLength: otpLength, + OTP: otp, + } + if generationConfig != nil { + status.Nonce = generationConfig.Nonce + status.Started = true + status.PGPFingerprint = generationConfig.PGPFingerprint + } + + respondOk(w, status) +} + +func handleSysGenerateRootAttemptPut(core *vault.Core, w http.ResponseWriter, r *http.Request, generateStrategy vault.GenerateRootStrategy) { + // Parse the request + var req GenerateRootInitRequest + if _, err := parseJSONRequest(core.PerfStandby(), r, w, &req); err != nil && err != io.EOF { + respondError(w, http.StatusBadRequest, err) + return + } + + var err error + var genned bool + + switch { + case len(req.PGPKey) > 0, len(req.OTP) > 0: + default: + genned = true + if core.DisableSSCTokens() { + req.OTP, err = base62.Random(vault.TokenLength + vault.OldTokenPrefixLength) + } else { + req.OTP, err = base62.Random(vault.TokenLength + vault.TokenPrefixLength) + } + if err != nil { + respondError(w, http.StatusInternalServerError, err) + return + } + } + + // Attemptialize the generation + if err := core.GenerateRootInit(req.OTP, req.PGPKey, generateStrategy); err != nil { + respondError(w, http.StatusBadRequest, err) + return + } + + if genned { + handleSysGenerateRootAttemptGet(core, w, r, req.OTP) + return + } + + handleSysGenerateRootAttemptGet(core, w, r, "") +} + +func handleSysGenerateRootAttemptDelete(core *vault.Core, w http.ResponseWriter, r *http.Request) { + err := core.GenerateRootCancel() + if err != nil { + respondError(w, http.StatusInternalServerError, err) + return + } + respondOk(w, nil) +} + +func handleSysGenerateRootUpdate(core *vault.Core, generateStrategy vault.GenerateRootStrategy) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Parse the request + var req GenerateRootUpdateRequest + if _, err := parseJSONRequest(core.PerfStandby(), r, w, &req); err != nil { + respondError(w, http.StatusBadRequest, err) + return + } + if req.Key == "" { + respondError( + w, http.StatusBadRequest, + errors.New("'key' must be specified in request body as JSON")) + return + } + + // Decode the key, which is base64 or hex encoded + min, max := core.BarrierKeyLength() + key, err := hex.DecodeString(req.Key) + // We check min and max here to ensure that a string that is base64 + // encoded but also valid hex will not be valid and we instead base64 + // decode it + if err != nil || len(key) < min || len(key) > max { + key, err = base64.StdEncoding.DecodeString(req.Key) + if err != nil { + respondError( + w, http.StatusBadRequest, + errors.New("'key' must be a valid hex or base64 string")) + return + } + } + + ctx, cancel := core.GetContext() + defer cancel() + + // Use the key to make progress on root generation + result, err := core.GenerateRootUpdate(ctx, key, req.Nonce, generateStrategy) + if err != nil { + respondError(w, http.StatusBadRequest, err) + return + } + + resp := &GenerateRootStatusResponse{ + Complete: result.Progress == result.Required, + Nonce: req.Nonce, + Progress: result.Progress, + Required: result.Required, + Started: true, + EncodedToken: result.EncodedToken, + PGPFingerprint: result.PGPFingerprint, + } + + if generateStrategy == vault.GenerateStandardRootTokenStrategy { + resp.EncodedRootToken = result.EncodedToken + } + + respondOk(w, resp) + }) +} + +type GenerateRootInitRequest struct { + OTP string `json:"otp"` + PGPKey string `json:"pgp_key"` +} + +type GenerateRootStatusResponse struct { + Nonce string `json:"nonce"` + Started bool `json:"started"` + Progress int `json:"progress"` + Required int `json:"required"` + Complete bool `json:"complete"` + EncodedToken string `json:"encoded_token"` + EncodedRootToken string `json:"encoded_root_token"` + PGPFingerprint string `json:"pgp_fingerprint"` + OTP string `json:"otp"` + OTPLength int `json:"otp_length"` +} + +type GenerateRootUpdateRequest struct { + Nonce string + Key string +} diff --git a/http/sys_generate_root_test.go b/http/sys_generate_root_test.go new file mode 100644 index 0000000..dbd7796 --- /dev/null +++ b/http/sys_generate_root_test.go @@ -0,0 +1,477 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "context" + "encoding/base64" + "encoding/hex" + "encoding/json" + "fmt" + "net" + "net/http" + "reflect" + "testing" + + "github.com/go-test/deep" + "github.com/hashicorp/vault/audit" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/helper/pgpkeys" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/hashicorp/vault/sdk/helper/xor" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +var tokenLength string = fmt.Sprintf("%d", vault.TokenLength+vault.TokenPrefixLength) + +func TestSysGenerateRootAttempt_Status(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp, err := http.Get(addr + "/v1/sys/generate-root/attempt") + if err != nil { + t.Fatalf("err: %s", err) + } + + var actual map[string]interface{} + expected := map[string]interface{}{ + "started": false, + "progress": json.Number("0"), + "required": json.Number("3"), + "complete": false, + "encoded_token": "", + "encoded_root_token": "", + "pgp_fingerprint": "", + "nonce": "", + "otp_length": json.Number(tokenLength), + } + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + expected["otp"] = actual["otp"] + if diff := deep.Equal(actual, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestSysGenerateRootAttempt_Setup_OTP(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpPut(t, token, addr+"/v1/sys/generate-root/attempt", nil) + testResponseStatus(t, resp, 200) + + var actual map[string]interface{} + expected := map[string]interface{}{ + "started": true, + "progress": json.Number("0"), + "required": json.Number("3"), + "complete": false, + "encoded_token": "", + "encoded_root_token": "", + "pgp_fingerprint": "", + "otp_length": json.Number(tokenLength), + } + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + if actual["nonce"].(string) == "" { + t.Fatalf("nonce was empty") + } + expected["nonce"] = actual["nonce"] + expected["otp"] = actual["otp"] + if diff := deep.Equal(actual, expected); diff != nil { + t.Fatal(diff) + } + + resp = testHttpGet(t, token, addr+"/v1/sys/generate-root/attempt") + + actual = map[string]interface{}{} + expected = map[string]interface{}{ + "started": true, + "progress": json.Number("0"), + "required": json.Number("3"), + "complete": false, + "encoded_token": "", + "encoded_root_token": "", + "pgp_fingerprint": "", + "otp": "", + "otp_length": json.Number(tokenLength), + } + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + if actual["nonce"].(string) == "" { + t.Fatalf("nonce was empty") + } + expected["nonce"] = actual["nonce"] + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual) + } +} + +func TestSysGenerateRootAttempt_Setup_PGP(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpPut(t, token, addr+"/v1/sys/generate-root/attempt", map[string]interface{}{ + "pgp_key": pgpkeys.TestPubKey1, + }) + testResponseStatus(t, resp, 200) + + resp = testHttpGet(t, token, addr+"/v1/sys/generate-root/attempt") + + var actual map[string]interface{} + expected := map[string]interface{}{ + "started": true, + "progress": json.Number("0"), + "required": json.Number("3"), + "complete": false, + "encoded_token": "", + "encoded_root_token": "", + "pgp_fingerprint": "816938b8a29146fbe245dd29e7cbaf8e011db793", + "otp": "", + "otp_length": json.Number(tokenLength), + } + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + if actual["nonce"].(string) == "" { + t.Fatalf("nonce was empty") + } + expected["nonce"] = actual["nonce"] + if diff := deep.Equal(actual, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestSysGenerateRootAttempt_Cancel(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpPut(t, token, addr+"/v1/sys/generate-root/attempt", nil) + + var actual map[string]interface{} + expected := map[string]interface{}{ + "started": true, + "progress": json.Number("0"), + "required": json.Number("3"), + "complete": false, + "encoded_token": "", + "encoded_root_token": "", + "pgp_fingerprint": "", + "otp_length": json.Number(tokenLength), + } + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + if actual["nonce"].(string) == "" { + t.Fatalf("nonce was empty") + } + expected["nonce"] = actual["nonce"] + expected["otp"] = actual["otp"] + if diff := deep.Equal(actual, expected); diff != nil { + t.Fatal(diff) + } + + resp = testHttpDelete(t, token, addr+"/v1/sys/generate-root/attempt") + testResponseStatus(t, resp, 204) + + resp, err := http.Get(addr + "/v1/sys/generate-root/attempt") + if err != nil { + t.Fatalf("err: %s", err) + } + + actual = map[string]interface{}{} + expected = map[string]interface{}{ + "started": false, + "progress": json.Number("0"), + "required": json.Number("3"), + "complete": false, + "encoded_token": "", + "encoded_root_token": "", + "pgp_fingerprint": "", + "nonce": "", + "otp": "", + "otp_length": json.Number(tokenLength), + } + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual) + } +} + +func enableNoopAudit(t *testing.T, token string, core *vault.Core) { + t.Helper() + auditReq := &logical.Request{ + Operation: logical.UpdateOperation, + ClientToken: token, + Path: "sys/audit/noop", + Data: map[string]interface{}{ + "type": "noop", + }, + } + resp, err := core.HandleRequest(namespace.RootContext(context.Background()), auditReq) + if err != nil { + t.Fatal(err) + } + + if resp.IsError() { + t.Fatal(err) + } +} + +func testCoreUnsealedWithAudit(t *testing.T, records **[][]byte) (*vault.Core, [][]byte, string) { + conf := &vault.CoreConfig{ + BuiltinRegistry: corehelpers.NewMockBuiltinRegistry(), + AuditBackends: map[string]audit.Factory{ + "noop": corehelpers.NoopAuditFactory(records), + }, + } + core, keys, token := vault.TestCoreUnsealedWithConfig(t, conf) + return core, keys, token +} + +func testServerWithAudit(t *testing.T, records **[][]byte) (net.Listener, string, string, [][]byte) { + core, keys, token := testCoreUnsealedWithAudit(t, records) + ln, addr := TestServer(t, core) + TestServerAuth(t, addr, token) + enableNoopAudit(t, token, core) + return ln, addr, token, keys +} + +func TestSysGenerateRoot_badKey(t *testing.T) { + var records *[][]byte + ln, addr, token, _ := testServerWithAudit(t, &records) + defer ln.Close() + + resp := testHttpPut(t, token, addr+"/v1/sys/generate-root/update", map[string]interface{}{ + "key": "0123", + }) + testResponseStatus(t, resp, 400) + + if len(*records) < 3 { + // One record for enabling the noop audit device, two for generate root attempt + t.Fatalf("expected at least 3 audit records, got %d", len(*records)) + } + t.Log(string((*records)[2])) +} + +func TestSysGenerateRoot_ReAttemptUpdate(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpPut(t, token, addr+"/v1/sys/generate-root/attempt", nil) + testResponseStatus(t, resp, 200) + + resp = testHttpDelete(t, token, addr+"/v1/sys/generate-root/attempt") + testResponseStatus(t, resp, 204) + + resp = testHttpPut(t, token, addr+"/v1/sys/generate-root/attempt", map[string]interface{}{ + "pgp_key": pgpkeys.TestPubKey1, + }) + + testResponseStatus(t, resp, 200) +} + +func TestSysGenerateRoot_Update_OTP(t *testing.T) { + var records *[][]byte + ln, addr, token, keys := testServerWithAudit(t, &records) + defer ln.Close() + + resp := testHttpPut(t, token, addr+"/v1/sys/generate-root/attempt", map[string]interface{}{}) + var rootGenerationStatus map[string]interface{} + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &rootGenerationStatus) + otp := rootGenerationStatus["otp"].(string) + + var actual map[string]interface{} + var expected map[string]interface{} + for i, key := range keys { + resp = testHttpPut(t, token, addr+"/v1/sys/generate-root/update", map[string]interface{}{ + "nonce": rootGenerationStatus["nonce"].(string), + "key": hex.EncodeToString(key), + }) + + actual = map[string]interface{}{} + expected = map[string]interface{}{ + "complete": false, + "nonce": rootGenerationStatus["nonce"].(string), + "progress": json.Number(fmt.Sprintf("%d", i+1)), + "required": json.Number(fmt.Sprintf("%d", len(keys))), + "started": true, + "pgp_fingerprint": "", + "otp": "", + "otp_length": json.Number("0"), + } + if i+1 == len(keys) { + expected["complete"] = true + } + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + } + + if actual["encoded_token"] == nil || actual["encoded_token"] == "" { + t.Fatalf("no encoded token found in response") + } + if actual["encoded_root_token"] == nil || actual["encoded_root-token"] == "" { + t.Fatalf("no encoded root token found in response") + } + expected["encoded_token"] = actual["encoded_token"] + expected["encoded_root_token"] = actual["encoded_root_token"] + expected["encoded_token"] = actual["encoded_token"] + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual) + } + + tokenBytes, err := base64.RawStdEncoding.DecodeString(expected["encoded_token"].(string)) + if err != nil { + t.Fatal(err) + } + + tokenBytes, err = xor.XORBytes(tokenBytes, []byte(otp)) + if err != nil { + t.Fatal(err) + } + newRootToken := string(tokenBytes) + + actual = map[string]interface{}{} + expected = map[string]interface{}{ + "id": newRootToken, + "display_name": "root", + "meta": interface{}(nil), + "num_uses": json.Number("0"), + "policies": []interface{}{"root"}, + "orphan": true, + "creation_ttl": json.Number("0"), + "ttl": json.Number("0"), + "path": "auth/token/root", + "explicit_max_ttl": json.Number("0"), + "expire_time": nil, + "entity_id": "", + "type": "service", + } + + resp = testHttpGet(t, newRootToken, addr+"/v1/auth/token/lookup-self") + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + + expected["creation_time"] = actual["data"].(map[string]interface{})["creation_time"] + expected["accessor"] = actual["data"].(map[string]interface{})["accessor"] + + if !reflect.DeepEqual(actual["data"], expected) { + t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual["data"]) + } + + for _, r := range *records { + t.Log(string(r)) + } +} + +func TestSysGenerateRoot_Update_PGP(t *testing.T) { + core, keys, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpPut(t, token, addr+"/v1/sys/generate-root/attempt", map[string]interface{}{ + "pgp_key": pgpkeys.TestPubKey1, + }) + testResponseStatus(t, resp, 200) + + // We need to get the nonce first before we update + resp, err := http.Get(addr + "/v1/sys/generate-root/attempt") + if err != nil { + t.Fatalf("err: %s", err) + } + var rootGenerationStatus map[string]interface{} + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &rootGenerationStatus) + + var actual map[string]interface{} + var expected map[string]interface{} + for i, key := range keys { + resp = testHttpPut(t, token, addr+"/v1/sys/generate-root/update", map[string]interface{}{ + "nonce": rootGenerationStatus["nonce"].(string), + "key": hex.EncodeToString(key), + }) + + actual = map[string]interface{}{} + expected = map[string]interface{}{ + "complete": false, + "nonce": rootGenerationStatus["nonce"].(string), + "progress": json.Number(fmt.Sprintf("%d", i+1)), + "required": json.Number(fmt.Sprintf("%d", len(keys))), + "started": true, + "pgp_fingerprint": "816938b8a29146fbe245dd29e7cbaf8e011db793", + "otp": "", + "otp_length": json.Number("0"), + } + if i+1 == len(keys) { + expected["complete"] = true + } + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + } + + if actual["encoded_token"] == nil || actual["encoded_token"] == "" { + t.Fatalf("no encoded token found in response") + } + if actual["encoded_root_token"] == nil || actual["encoded_root-token"] == "" { + t.Fatalf("no encoded root token found in response") + } + expected["encoded_token"] = actual["encoded_token"] + expected["encoded_root_token"] = actual["encoded_root_token"] + expected["encoded_token"] = actual["encoded_token"] + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual) + } + + decodedTokenBuf, err := pgpkeys.DecryptBytes(actual["encoded_token"].(string), pgpkeys.TestPrivKey1) + if err != nil { + t.Fatal(err) + } + if decodedTokenBuf == nil { + t.Fatal("decoded root token buffer is nil") + } + + newRootToken := decodedTokenBuf.String() + + actual = map[string]interface{}{} + expected = map[string]interface{}{ + "id": newRootToken, + "display_name": "root", + "meta": interface{}(nil), + "num_uses": json.Number("0"), + "policies": []interface{}{"root"}, + "orphan": true, + "creation_ttl": json.Number("0"), + "ttl": json.Number("0"), + "path": "auth/token/root", + "explicit_max_ttl": json.Number("0"), + "expire_time": nil, + "entity_id": "", + "type": "service", + } + + resp = testHttpGet(t, newRootToken, addr+"/v1/auth/token/lookup-self") + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + + expected["creation_time"] = actual["data"].(map[string]interface{})["creation_time"] + expected["accessor"] = actual["data"].(map[string]interface{})["accessor"] + + if diff := deep.Equal(actual["data"], expected); diff != nil { + t.Fatal(diff) + } +} diff --git a/http/sys_health.go b/http/sys_health.go new file mode 100644 index 0000000..b3f29d4 --- /dev/null +++ b/http/sys_health.go @@ -0,0 +1,242 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "strconv" + "time" + + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/vault" + "github.com/hashicorp/vault/version" +) + +func handleSysHealth(core *vault.Core) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case "GET": + handleSysHealthGet(core, w, r) + case "HEAD": + handleSysHealthHead(core, w, r) + default: + respondError(w, http.StatusMethodNotAllowed, nil) + } + }) +} + +func fetchStatusCode(r *http.Request, field string) (int, bool, bool) { + var err error + statusCode := http.StatusOK + if statusCodeStr, statusCodeOk := r.URL.Query()[field]; statusCodeOk { + statusCode, err = strconv.Atoi(statusCodeStr[0]) + if err != nil || len(statusCodeStr) < 1 { + return http.StatusBadRequest, false, false + } + return statusCode, true, true + } + return statusCode, false, true +} + +func handleSysHealthGet(core *vault.Core, w http.ResponseWriter, r *http.Request) { + code, body, err := getSysHealth(core, r) + if err != nil { + core.Logger().Error("error checking health", "error", err) + respondError(w, code, nil) + return + } + + if body == nil { + respondError(w, code, nil) + return + } + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(code) + + // Generate the response + enc := json.NewEncoder(w) + enc.Encode(body) +} + +func handleSysHealthHead(core *vault.Core, w http.ResponseWriter, r *http.Request) { + code, body, _ := getSysHealth(core, r) + + if body != nil { + w.Header().Set("Content-Type", "application/json") + } + w.WriteHeader(code) +} + +func getSysHealth(core *vault.Core, r *http.Request) (int, *HealthResponse, error) { + var err error + + // Check if being a standby is allowed for the purpose of a 200 OK + standbyOKStr, standbyOK := r.URL.Query()["standbyok"] + if standbyOK { + standbyOK, err = parseutil.ParseBool(standbyOKStr[0]) + if err != nil { + return http.StatusBadRequest, nil, fmt.Errorf("bad value for standbyok parameter: %w", err) + } + } + perfStandbyOKStr, perfStandbyOK := r.URL.Query()["perfstandbyok"] + if perfStandbyOK { + perfStandbyOK, err = parseutil.ParseBool(perfStandbyOKStr[0]) + if err != nil { + return http.StatusBadRequest, nil, fmt.Errorf("bad value for perfstandbyok parameter: %w", err) + } + } + + uninitCode := http.StatusNotImplemented + if code, found, ok := fetchStatusCode(r, "uninitcode"); !ok { + return http.StatusBadRequest, nil, nil + } else if found { + uninitCode = code + } + + sealedCode := http.StatusServiceUnavailable + if code, found, ok := fetchStatusCode(r, "sealedcode"); !ok { + return http.StatusBadRequest, nil, nil + } else if found { + sealedCode = code + } + + standbyCode := http.StatusTooManyRequests // Consul warning code + if code, found, ok := fetchStatusCode(r, "standbycode"); !ok { + return http.StatusBadRequest, nil, nil + } else if found { + standbyCode = code + } + + activeCode := http.StatusOK + if code, found, ok := fetchStatusCode(r, "activecode"); !ok { + return http.StatusBadRequest, nil, nil + } else if found { + activeCode = code + } + + drSecondaryCode := 472 // unofficial 4xx status code + if code, found, ok := fetchStatusCode(r, "drsecondarycode"); !ok { + return http.StatusBadRequest, nil, nil + } else if found { + drSecondaryCode = code + } + + perfStandbyCode := 473 // unofficial 4xx status code + if code, found, ok := fetchStatusCode(r, "performancestandbycode"); !ok { + return http.StatusBadRequest, nil, nil + } else if found { + perfStandbyCode = code + } + + ctx := context.Background() + + // Check system status + sealed := core.Sealed() + standby, perfStandby := core.StandbyStates() + var replicationState consts.ReplicationState + if standby { + replicationState = core.ActiveNodeReplicationState() + } else { + replicationState = core.ReplicationState() + } + + init, err := core.Initialized(ctx) + if err != nil { + return http.StatusInternalServerError, nil, err + } + + // Determine the status code + code := activeCode + switch { + case !init: + code = uninitCode + case sealed: + code = sealedCode + case replicationState.HasState(consts.ReplicationDRSecondary): + code = drSecondaryCode + case perfStandby: + if !perfStandbyOK { + code = perfStandbyCode + } + case standby: + if !standbyOK { + code = standbyCode + } + } + + // Fetch the local cluster name and identifier + var clusterName, clusterID string + if !sealed { + cluster, err := core.Cluster(ctx) + if err != nil { + return http.StatusInternalServerError, nil, err + } + if cluster == nil { + return http.StatusInternalServerError, nil, fmt.Errorf("failed to fetch cluster details") + } + clusterName = cluster.Name + clusterID = cluster.ID + } + + // Format the body + body := &HealthResponse{ + Initialized: init, + Sealed: sealed, + Standby: standby, + PerformanceStandby: perfStandby, + ReplicationPerformanceMode: replicationState.GetPerformanceString(), + ReplicationDRMode: replicationState.GetDRString(), + ServerTimeUTC: time.Now().UTC().Unix(), + Version: version.GetVersion().VersionNumber(), + ClusterName: clusterName, + ClusterID: clusterID, + } + + licenseState, err := vault.LicenseSummary(core) + if err != nil { + return http.StatusInternalServerError, nil, err + } + + if licenseState != nil { + body.License = &HealthResponseLicense{ + State: licenseState.State, + Terminated: licenseState.Terminated, + } + if !licenseState.ExpiryTime.IsZero() { + body.License.ExpiryTime = licenseState.ExpiryTime.Format(time.RFC3339) + } + } + + if init && !sealed && !standby { + body.LastWAL = vault.LastWAL(core) + } + + return code, body, nil +} + +type HealthResponseLicense struct { + State string `json:"state"` + ExpiryTime string `json:"expiry_time"` + Terminated bool `json:"terminated"` +} + +type HealthResponse struct { + Initialized bool `json:"initialized"` + Sealed bool `json:"sealed"` + Standby bool `json:"standby"` + PerformanceStandby bool `json:"performance_standby"` + ReplicationPerformanceMode string `json:"replication_performance_mode"` + ReplicationDRMode string `json:"replication_dr_mode"` + ServerTimeUTC int64 `json:"server_time_utc"` + Version string `json:"version"` + ClusterName string `json:"cluster_name,omitempty"` + ClusterID string `json:"cluster_id,omitempty"` + LastWAL uint64 `json:"last_wal,omitempty"` + License *HealthResponseLicense `json:"license,omitempty"` +} diff --git a/http/sys_health_test.go b/http/sys_health_test.go new file mode 100644 index 0000000..9761ec1 --- /dev/null +++ b/http/sys_health_test.go @@ -0,0 +1,281 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "io/ioutil" + "net/http" + "net/url" + "reflect" + "testing" + + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/vault" +) + +func TestSysHealth_get(t *testing.T) { + core := vault.TestCore(t) + ln, addr := TestServer(t, core) + defer ln.Close() + + resp, err := http.Get(addr + "/v1/sys/health") + if err != nil { + t.Fatalf("err: %s", err) + } + + var actual map[string]interface{} + expected := map[string]interface{}{ + "replication_performance_mode": consts.ReplicationUnknown.GetPerformanceString(), + "replication_dr_mode": consts.ReplicationUnknown.GetDRString(), + "initialized": false, + "sealed": true, + "standby": true, + "performance_standby": false, + } + testResponseStatus(t, resp, 501) + testResponseBody(t, resp, &actual) + expected["server_time_utc"] = actual["server_time_utc"] + expected["version"] = actual["version"] + if actual["cluster_name"] == nil { + delete(expected, "cluster_name") + } else { + expected["cluster_name"] = actual["cluster_name"] + } + if actual["cluster_id"] == nil { + delete(expected, "cluster_id") + } else { + expected["cluster_id"] = actual["cluster_id"] + } + delete(actual, "license") + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual) + } + + keys, _ := vault.TestCoreInit(t, core) + resp, err = http.Get(addr + "/v1/sys/health") + if err != nil { + t.Fatalf("err: %s", err) + } + + actual = map[string]interface{}{} + expected = map[string]interface{}{ + "replication_performance_mode": consts.ReplicationUnknown.GetPerformanceString(), + "replication_dr_mode": consts.ReplicationUnknown.GetDRString(), + "initialized": true, + "sealed": true, + "standby": true, + "performance_standby": false, + } + testResponseStatus(t, resp, 503) + testResponseBody(t, resp, &actual) + expected["server_time_utc"] = actual["server_time_utc"] + expected["version"] = actual["version"] + if actual["cluster_name"] == nil { + delete(expected, "cluster_name") + } else { + expected["cluster_name"] = actual["cluster_name"] + } + if actual["cluster_id"] == nil { + delete(expected, "cluster_id") + } else { + expected["cluster_id"] = actual["cluster_id"] + } + delete(actual, "license") + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual) + } + + for _, key := range keys { + if _, err := vault.TestCoreUnseal(core, vault.TestKeyCopy(key)); err != nil { + t.Fatalf("unseal err: %s", err) + } + } + resp, err = http.Get(addr + "/v1/sys/health") + if err != nil { + t.Fatalf("err: %s", err) + } + + actual = map[string]interface{}{} + expected = map[string]interface{}{ + "replication_performance_mode": consts.ReplicationPerformanceDisabled.GetPerformanceString(), + "replication_dr_mode": consts.ReplicationDRDisabled.GetDRString(), + "initialized": true, + "sealed": false, + "standby": false, + "performance_standby": false, + } + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + expected["server_time_utc"] = actual["server_time_utc"] + expected["version"] = actual["version"] + if actual["cluster_name"] == nil { + delete(expected, "cluster_name") + } else { + expected["cluster_name"] = actual["cluster_name"] + } + if actual["cluster_id"] == nil { + delete(expected, "cluster_id") + } else { + expected["cluster_id"] = actual["cluster_id"] + } + delete(actual, "license") + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual) + } +} + +func TestSysHealth_customcodes(t *testing.T) { + core := vault.TestCore(t) + ln, addr := TestServer(t, core) + defer ln.Close() + + queryurl, err := url.Parse(addr + "/v1/sys/health?uninitcode=581&sealedcode=523&activecode=202") + if err != nil { + t.Fatalf("err: %s", err) + } + resp, err := http.Get(queryurl.String()) + if err != nil { + t.Fatalf("err: %s", err) + } + + var actual map[string]interface{} + expected := map[string]interface{}{ + "replication_performance_mode": consts.ReplicationUnknown.GetPerformanceString(), + "replication_dr_mode": consts.ReplicationUnknown.GetDRString(), + "initialized": false, + "sealed": true, + "standby": true, + "performance_standby": false, + } + testResponseStatus(t, resp, 581) + testResponseBody(t, resp, &actual) + + expected["server_time_utc"] = actual["server_time_utc"] + expected["version"] = actual["version"] + if actual["cluster_name"] == nil { + delete(expected, "cluster_name") + } else { + expected["cluster_name"] = actual["cluster_name"] + } + if actual["cluster_id"] == nil { + delete(expected, "cluster_id") + } else { + expected["cluster_id"] = actual["cluster_id"] + } + delete(actual, "license") + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual) + } + + keys, _ := vault.TestCoreInit(t, core) + resp, err = http.Get(queryurl.String()) + if err != nil { + t.Fatalf("err: %s", err) + } + + actual = map[string]interface{}{} + expected = map[string]interface{}{ + "replication_performance_mode": consts.ReplicationUnknown.GetPerformanceString(), + "replication_dr_mode": consts.ReplicationUnknown.GetDRString(), + "initialized": true, + "sealed": true, + "standby": true, + "performance_standby": false, + } + testResponseStatus(t, resp, 523) + testResponseBody(t, resp, &actual) + + expected["server_time_utc"] = actual["server_time_utc"] + expected["version"] = actual["version"] + if actual["cluster_name"] == nil { + delete(expected, "cluster_name") + } else { + expected["cluster_name"] = actual["cluster_name"] + } + if actual["cluster_id"] == nil { + delete(expected, "cluster_id") + } else { + expected["cluster_id"] = actual["cluster_id"] + } + delete(actual, "license") + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual) + } + + for _, key := range keys { + if _, err := vault.TestCoreUnseal(core, vault.TestKeyCopy(key)); err != nil { + t.Fatalf("unseal err: %s", err) + } + } + resp, err = http.Get(queryurl.String()) + if err != nil { + t.Fatalf("err: %s", err) + } + + actual = map[string]interface{}{} + expected = map[string]interface{}{ + "replication_performance_mode": consts.ReplicationPerformanceDisabled.GetPerformanceString(), + "replication_dr_mode": consts.ReplicationDRDisabled.GetDRString(), + "initialized": true, + "sealed": false, + "standby": false, + "performance_standby": false, + } + testResponseStatus(t, resp, 202) + testResponseBody(t, resp, &actual) + expected["server_time_utc"] = actual["server_time_utc"] + expected["version"] = actual["version"] + if actual["cluster_name"] == nil { + delete(expected, "cluster_name") + } else { + expected["cluster_name"] = actual["cluster_name"] + } + if actual["cluster_id"] == nil { + delete(expected, "cluster_id") + } else { + expected["cluster_id"] = actual["cluster_id"] + } + delete(actual, "license") + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual) + } +} + +func TestSysHealth_head(t *testing.T) { + core, _, _ := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + + testData := []struct { + uri string + code int + }{ + {"", 200}, + {"?activecode=503", 503}, + {"?activecode=notacode", 400}, + } + + for _, tt := range testData { + queryurl, err := url.Parse(addr + "/v1/sys/health" + tt.uri) + if err != nil { + t.Fatalf("err on %v: %s", queryurl, err) + } + resp, err := http.Head(queryurl.String()) + if err != nil { + t.Fatalf("err on %v: %s", queryurl, err) + } + + if resp.StatusCode != tt.code { + t.Fatalf("HEAD %v expected code %d, got %d.", queryurl, tt.code, resp.StatusCode) + } + + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("err on %v: %s", queryurl, err) + } + if len(data) > 0 { + t.Fatalf("HEAD %v expected no body, received \"%v\".", queryurl, data) + } + } +} diff --git a/http/sys_hostinfo_test.go b/http/sys_hostinfo_test.go new file mode 100644 index 0000000..756841e --- /dev/null +++ b/http/sys_hostinfo_test.go @@ -0,0 +1,64 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "encoding/json" + "testing" + + "github.com/hashicorp/vault/helper/hostutil" + "github.com/hashicorp/vault/vault" +) + +func TestSysHostInfo(t *testing.T) { + cluster := vault.NewTestCluster(t, &vault.CoreConfig{}, &vault.TestClusterOptions{ + HandlerFunc: Handler, + }) + cluster.Start() + defer cluster.Cleanup() + cores := cluster.Cores + + vault.TestWaitActive(t, cores[0].Core) + + // Query against the active node, should get host information back + secret, err := cores[0].Client.Logical().Read("sys/host-info") + if err != nil { + t.Fatal(err) + } + if secret == nil || secret.Data == nil { + t.Fatal("expected data in the response") + } + + dataBytes, err := json.Marshal(secret.Data) + if err != nil { + t.Fatal(err) + } + + var info hostutil.HostInfo + if err := json.Unmarshal(dataBytes, &info); err != nil { + t.Fatal(err) + } + + if info.Timestamp.IsZero() { + t.Fatal("expected non-zero Timestamp") + } + if info.CPU == nil { + t.Fatal("expected non-nil CPU value") + } + if info.Disk == nil { + t.Fatal("expected disk info") + } + if info.Host == nil { + t.Fatal("expected host info") + } + if info.Memory == nil { + t.Fatal("expected memory info") + } + + // Query against a standby, should error + secret, err = cores[1].Client.Logical().Read("sys/host-info") + if err == nil || secret != nil { + t.Fatalf("expected error on standby node, HostInfo: %v", secret) + } +} diff --git a/http/sys_in_flight_requests.go b/http/sys_in_flight_requests.go new file mode 100644 index 0000000..bdf3eba --- /dev/null +++ b/http/sys_in_flight_requests.go @@ -0,0 +1,25 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "net/http" + + "github.com/hashicorp/vault/vault" +) + +func handleUnAuthenticatedInFlightRequest(core *vault.Core) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case "GET": + default: + respondError(w, http.StatusMethodNotAllowed, nil) + return + } + + currentInFlightReqMap := core.LoadInFlightReqData() + + respondOk(w, currentInFlightReqMap) + }) +} diff --git a/http/sys_in_flight_requests_test.go b/http/sys_in_flight_requests_test.go new file mode 100644 index 0000000..93c92c5 --- /dev/null +++ b/http/sys_in_flight_requests_test.go @@ -0,0 +1,49 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "testing" + + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/vault" +) + +func TestInFlightRequestUnauthenticated(t *testing.T) { + conf := &vault.CoreConfig{} + core, _, token := vault.TestCoreUnsealedWithConfig(t, conf) + ln, addr := TestServer(t, core) + TestServerAuth(t, addr, token) + + // Default: Only authenticated access + resp := testHttpGet(t, "", addr+"/v1/sys/in-flight-req") + testResponseStatus(t, resp, 403) + resp = testHttpGet(t, token, addr+"/v1/sys/in-flight-req") + testResponseStatus(t, resp, 200) + + // Close listener + ln.Close() + + // Setup new custom listener with unauthenticated metrics access + ln, addr = TestListener(t) + props := &vault.HandlerProperties{ + Core: core, + ListenerConfig: &configutil.Listener{ + InFlightRequestLogging: configutil.ListenerInFlightRequestLogging{ + UnauthenticatedInFlightAccess: true, + }, + }, + } + TestServerWithListenerAndProperties(t, ln, addr, core, props) + defer ln.Close() + TestServerAuth(t, addr, token) + + // Test without token + resp = testHttpGet(t, "", addr+"/v1/sys/in-flight-req") + testResponseStatus(t, resp, 200) + + // Should also work with token + resp = testHttpGet(t, token, addr+"/v1/sys/in-flight-req") + testResponseStatus(t, resp, 200) +} diff --git a/http/sys_init.go b/http/sys_init.go new file mode 100644 index 0000000..905916b --- /dev/null +++ b/http/sys_init.go @@ -0,0 +1,179 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "context" + "encoding/base64" + "encoding/hex" + "fmt" + "net/http" + "strings" + + "github.com/hashicorp/vault/vault" +) + +func handleSysInit(core *vault.Core) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case "GET": + handleSysInitGet(core, w, r) + case "PUT", "POST": + handleSysInitPut(core, w, r) + default: + respondError(w, http.StatusMethodNotAllowed, nil) + } + }) +} + +func handleSysInitGet(core *vault.Core, w http.ResponseWriter, r *http.Request) { + init, err := core.Initialized(context.Background()) + if err != nil { + respondError(w, http.StatusInternalServerError, err) + return + } + + respondOk(w, &InitStatusResponse{ + Initialized: init, + }) +} + +func handleSysInitPut(core *vault.Core, w http.ResponseWriter, r *http.Request) { + ctx := context.Background() + + // Parse the request + var req InitRequest + if _, err := parseJSONRequest(core.PerfStandby(), r, w, &req); err != nil { + respondError(w, http.StatusBadRequest, err) + return + } + + // Validate init request parameters + if err := validateInitParameters(core, req); err != nil { + respondError(w, http.StatusBadRequest, err) + return + } + + // Initialize + barrierConfig := &vault.SealConfig{ + SecretShares: req.SecretShares, + SecretThreshold: req.SecretThreshold, + StoredShares: req.StoredShares, + PGPKeys: req.PGPKeys, + } + + recoveryConfig := &vault.SealConfig{ + SecretShares: req.RecoveryShares, + SecretThreshold: req.RecoveryThreshold, + PGPKeys: req.RecoveryPGPKeys, + } + + initParams := &vault.InitParams{ + BarrierConfig: barrierConfig, + RecoveryConfig: recoveryConfig, + RootTokenPGPKey: req.RootTokenPGPKey, + } + + result, initErr := core.Initialize(ctx, initParams) + if initErr != nil { + if vault.IsFatalError(initErr) { + respondError(w, http.StatusBadRequest, initErr) + return + } else { + // Add a warnings field? The error will be logged in the vault log + // already. + } + } + + // Encode the keys + keys := make([]string, 0, len(result.SecretShares)) + keysB64 := make([]string, 0, len(result.SecretShares)) + for _, k := range result.SecretShares { + keys = append(keys, hex.EncodeToString(k)) + keysB64 = append(keysB64, base64.StdEncoding.EncodeToString(k)) + } + + resp := &InitResponse{ + Keys: keys, + KeysB64: keysB64, + RootToken: result.RootToken, + } + + if len(result.RecoveryShares) > 0 { + resp.RecoveryKeys = make([]string, 0, len(result.RecoveryShares)) + resp.RecoveryKeysB64 = make([]string, 0, len(result.RecoveryShares)) + for _, k := range result.RecoveryShares { + resp.RecoveryKeys = append(resp.RecoveryKeys, hex.EncodeToString(k)) + resp.RecoveryKeysB64 = append(resp.RecoveryKeysB64, base64.StdEncoding.EncodeToString(k)) + } + } + + if err := core.UnsealWithStoredKeys(ctx); err != nil { + respondError(w, http.StatusInternalServerError, err) + return + } + + respondOk(w, resp) +} + +type InitRequest struct { + SecretShares int `json:"secret_shares"` + SecretThreshold int `json:"secret_threshold"` + StoredShares int `json:"stored_shares"` + PGPKeys []string `json:"pgp_keys"` + RecoveryShares int `json:"recovery_shares"` + RecoveryThreshold int `json:"recovery_threshold"` + RecoveryPGPKeys []string `json:"recovery_pgp_keys"` + RootTokenPGPKey string `json:"root_token_pgp_key"` +} + +type InitResponse struct { + Keys []string `json:"keys"` + KeysB64 []string `json:"keys_base64"` + RecoveryKeys []string `json:"recovery_keys,omitempty"` + RecoveryKeysB64 []string `json:"recovery_keys_base64,omitempty"` + RootToken string `json:"root_token"` +} + +type InitStatusResponse struct { + Initialized bool `json:"initialized"` +} + +// Validates if the right parameters are used based on AutoUnseal +func validateInitParameters(core *vault.Core, req InitRequest) error { + recoveryFlags := make([]string, 0) + barrierFlags := make([]string, 0) + + if req.SecretShares != 0 { + barrierFlags = append(barrierFlags, "secret_shares") + } + if req.SecretThreshold != 0 { + barrierFlags = append(barrierFlags, "secret_threshold") + } + if len(req.PGPKeys) != 0 { + barrierFlags = append(barrierFlags, "pgp_keys") + } + if req.RecoveryShares != 0 { + recoveryFlags = append(recoveryFlags, "recovery_shares") + } + if req.RecoveryThreshold != 0 { + recoveryFlags = append(recoveryFlags, "recovery_threshold") + } + if len(req.RecoveryPGPKeys) != 0 { + recoveryFlags = append(recoveryFlags, "recovery_pgp_keys") + } + + switch core.SealAccess().RecoveryKeySupported() { + case true: + if len(barrierFlags) > 0 { + return fmt.Errorf("parameters %s not applicable to seal type %s", strings.Join(barrierFlags, ","), core.SealAccess().BarrierType()) + } + default: + if len(recoveryFlags) > 0 { + return fmt.Errorf("parameters %s not applicable to seal type %s", strings.Join(recoveryFlags, ","), core.SealAccess().BarrierType()) + } + + } + return nil +} diff --git a/http/sys_init_test.go b/http/sys_init_test.go new file mode 100644 index 0000000..79dd275 --- /dev/null +++ b/http/sys_init_test.go @@ -0,0 +1,196 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "encoding/hex" + "net/http" + "reflect" + "strconv" + "testing" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/builtin/logical/transit" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" + "github.com/hashicorp/vault/vault/seal" +) + +func TestSysInit_get(t *testing.T) { + core := vault.TestCore(t) + ln, addr := TestServer(t, core) + defer ln.Close() + + { + // Pre-init + resp, err := http.Get(addr + "/v1/sys/init") + if err != nil { + t.Fatalf("err: %s", err) + } + + var actual map[string]interface{} + expected := map[string]interface{}{ + "initialized": false, + } + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v", actual) + } + } + + vault.TestCoreInit(t, core) + + { + // Post-init + resp, err := http.Get(addr + "/v1/sys/init") + if err != nil { + t.Fatalf("err: %s", err) + } + + var actual map[string]interface{} + expected := map[string]interface{}{ + "initialized": true, + } + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v", actual) + } + } +} + +// Test to check if the API errors out when wrong number of PGP keys are +// supplied +func TestSysInit_pgpKeysEntries(t *testing.T) { + core := vault.TestCore(t) + ln, addr := TestServer(t, core) + defer ln.Close() + + resp := testHttpPut(t, "", addr+"/v1/sys/init", map[string]interface{}{ + "secret_shares": 5, + "secret_threshold": 3, + "pgp_keys": []string{"pgpkey1"}, + }) + testResponseStatus(t, resp, 400) +} + +// Test to check if the API errors out when wrong number of PGP keys are +// supplied for recovery config +func TestSysInit_pgpKeysEntriesForRecovery(t *testing.T) { + core := vault.TestCoreNewSeal(t) + ln, addr := TestServer(t, core) + defer ln.Close() + + resp := testHttpPut(t, "", addr+"/v1/sys/init", map[string]interface{}{ + "secret_shares": 1, + "secret_threshold": 1, + "stored_shares": 1, + "recovery_shares": 5, + "recovery_threshold": 3, + "recovery_pgp_keys": []string{"pgpkey1"}, + }) + testResponseStatus(t, resp, 400) +} + +func TestSysInit_put(t *testing.T) { + core := vault.TestCore(t) + ln, addr := TestServer(t, core) + defer ln.Close() + + resp := testHttpPut(t, "", addr+"/v1/sys/init", map[string]interface{}{ + "secret_shares": 5, + "secret_threshold": 3, + }) + + var actual map[string]interface{} + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + keysRaw, ok := actual["keys"] + if !ok { + t.Fatalf("no keys: %#v", actual) + } + + if _, ok := actual["root_token"]; !ok { + t.Fatal("no root token") + } + + for _, key := range keysRaw.([]interface{}) { + keySlice, err := hex.DecodeString(key.(string)) + if err != nil { + t.Fatalf("bad: %s", err) + } + + if _, err := core.Unseal(keySlice); err != nil { + t.Fatalf("bad: %s", err) + } + } + + if core.Sealed() { + t.Fatal("should not be sealed") + } +} + +func TestSysInit_Put_ValidateParams(t *testing.T) { + core := vault.TestCore(t) + ln, addr := TestServer(t, core) + defer ln.Close() + + resp := testHttpPut(t, "", addr+"/v1/sys/init", map[string]interface{}{ + "secret_shares": 5, + "secret_threshold": 3, + "recovery_shares": 5, + "recovery_threshold": 3, + }) + testResponseStatus(t, resp, http.StatusBadRequest) + body := map[string][]string{} + testResponseBody(t, resp, &body) + if body["errors"][0] != "parameters recovery_shares,recovery_threshold not applicable to seal type shamir" { + t.Fatal(body) + } +} + +func TestSysInit_Put_ValidateParams_AutoUnseal(t *testing.T) { + testSeal, _ := seal.NewTestSeal(&seal.TestSealOpts{Name: "transit"}) + autoSeal, err := vault.NewAutoSeal(testSeal) + if err != nil { + t.Fatal(err) + } + + // Create the transit server. + conf := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "transit": transit.Factory, + }, + Seal: autoSeal, + } + opts := &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: Handler, + Logger: logging.NewVaultLogger(hclog.Trace).Named(t.Name()).Named("transit-seal" + strconv.Itoa(0)), + } + cluster := vault.NewTestCluster(t, conf, opts) + cluster.Start() + defer cluster.Cleanup() + + cores := cluster.Cores + core := cores[0].Core + + ln, addr := TestServer(t, core) + defer ln.Close() + + resp := testHttpPut(t, "", addr+"/v1/sys/init", map[string]interface{}{ + "secret_shares": 5, + "secret_threshold": 3, + "recovery_shares": 5, + "recovery_threshold": 3, + }) + testResponseStatus(t, resp, http.StatusBadRequest) + body := map[string][]string{} + testResponseBody(t, resp, &body) + if body["errors"][0] != "parameters secret_shares,secret_threshold not applicable to seal type transit" { + t.Fatal(body) + } +} diff --git a/http/sys_internal_test.go b/http/sys_internal_test.go new file mode 100644 index 0000000..11d9376 --- /dev/null +++ b/http/sys_internal_test.go @@ -0,0 +1,88 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "encoding/json" + "reflect" + "testing" + + "github.com/hashicorp/vault/vault" +) + +func TestSysInternal_UIMounts(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + // Get original tune values, ensure that listing_visibility is not set + resp := testHttpGet(t, "", addr+"/v1/sys/internal/ui/mounts") + testResponseStatus(t, resp, 200) + + actual := map[string]interface{}{} + expected := map[string]interface{}{ + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "data": map[string]interface{}{ + "auth": map[string]interface{}{}, + "secret": map[string]interface{}{}, + }, + } + testResponseBody(t, resp, &actual) + expected["request_id"] = actual["request_id"] + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad:\nExpected: %#v\nActual:%#v", expected, actual) + } + + // Mount-tune the listing_visibility + resp = testHttpPost(t, token, addr+"/v1/sys/mounts/secret/tune", map[string]interface{}{ + "listing_visibility": "unauth", + }) + testResponseStatus(t, resp, 204) + + resp = testHttpPost(t, token, addr+"/v1/sys/auth/token/tune", map[string]interface{}{ + "listing_visibility": "unauth", + }) + testResponseStatus(t, resp, 204) + + // Check results + resp = testHttpGet(t, "", addr+"/v1/sys/internal/ui/mounts") + testResponseStatus(t, resp, 200) + + actual = map[string]interface{}{} + expected = map[string]interface{}{ + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "data": map[string]interface{}{ + "secret": map[string]interface{}{ + "secret/": map[string]interface{}{ + "type": "kv", + "description": "key/value secret storage", + "options": map[string]interface{}{"version": "1"}, + }, + }, + "auth": map[string]interface{}{ + "token/": map[string]interface{}{ + "type": "token", + "description": "token based credentials", + "options": interface{}(nil), + }, + }, + }, + } + testResponseBody(t, resp, &actual) + expected["request_id"] = actual["request_id"] + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad:\nExpected: %#v\nActual:%#v", expected, actual) + } +} diff --git a/http/sys_leader.go b/http/sys_leader.go new file mode 100644 index 0000000..6b39c44 --- /dev/null +++ b/http/sys_leader.go @@ -0,0 +1,32 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "net/http" + + "github.com/hashicorp/vault/vault" +) + +// This endpoint is needed to answer queries before Vault unseals +// or becomes the leader. +func handleSysLeader(core *vault.Core) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case "GET": + handleSysLeaderGet(core, w, r) + default: + respondError(w, http.StatusMethodNotAllowed, nil) + } + }) +} + +func handleSysLeaderGet(core *vault.Core, w http.ResponseWriter, r *http.Request) { + resp, err := core.GetLeaderStatus() + if err != nil { + respondError(w, http.StatusInternalServerError, err) + return + } + respondOk(w, resp) +} diff --git a/http/sys_leader_test.go b/http/sys_leader_test.go new file mode 100644 index 0000000..3292b7f --- /dev/null +++ b/http/sys_leader_test.go @@ -0,0 +1,41 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "encoding/json" + "net/http" + "reflect" + "testing" + "time" + + "github.com/hashicorp/vault/vault" +) + +func TestSysLeader_get(t *testing.T) { + core, _, _ := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + + resp, err := http.Get(addr + "/v1/sys/leader") + if err != nil { + t.Fatalf("err: %s", err) + } + + var actual map[string]interface{} + expected := map[string]interface{}{ + "ha_enabled": false, + "is_self": false, + "leader_address": "", + "leader_cluster_address": "", + "performance_standby": false, + "performance_standby_last_remote_wal": json.Number("0"), + "active_time": time.Time{}.UTC().Format(time.RFC3339), + } + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v \n%#v", actual, expected) + } +} diff --git a/http/sys_lease_test.go b/http/sys_lease_test.go new file mode 100644 index 0000000..6b069ca --- /dev/null +++ b/http/sys_lease_test.go @@ -0,0 +1,76 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "testing" + + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/vault" +) + +func TestSysRenew(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + // write secret + resp := testHttpPut(t, token, addr+"/v1/secret/foo", map[string]interface{}{ + "data": "bar", + "lease": "1h", + }) + testResponseStatus(t, resp, 204) + + // read secret + resp = testHttpGet(t, token, addr+"/v1/secret/foo") + var result struct { + LeaseID string `json:"lease_id"` + } + if err := jsonutil.DecodeJSONFromReader(resp.Body, &result); err != nil { + t.Fatalf("bad: %s", err) + } + + var renewResult struct { + LeaseID string `json:"lease_id"` + Data map[string]interface{} `json:"data"` + } + resp = testHttpPut(t, token, addr+"/v1/sys/renew/"+result.LeaseID, nil) + testResponseStatus(t, resp, 200) + if err := jsonutil.DecodeJSONFromReader(resp.Body, &renewResult); err != nil { + t.Fatal(err) + } + if result.LeaseID != renewResult.LeaseID { + t.Fatal("lease id changed in renew request") + } + + resp = testHttpPut(t, token, addr+"/v1/sys/leases/renew/"+result.LeaseID, nil) + testResponseStatus(t, resp, 200) + if err := jsonutil.DecodeJSONFromReader(resp.Body, &renewResult); err != nil { + t.Fatal(err) + } + if result.LeaseID != renewResult.LeaseID { + t.Fatal("lease id changed in renew request") + } +} + +func TestSysRevoke(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpPut(t, token, addr+"/v1/sys/revoke/secret/foo/1234", nil) + testResponseStatus(t, resp, 204) +} + +func TestSysRevokePrefix(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpPut(t, token, addr+"/v1/sys/revoke-prefix/secret/foo/1234", nil) + testResponseStatus(t, resp, 204) +} diff --git a/http/sys_metrics.go b/http/sys_metrics.go new file mode 100644 index 0000000..2bb819b --- /dev/null +++ b/http/sys_metrics.go @@ -0,0 +1,54 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "fmt" + "net/http" + + "github.com/hashicorp/vault/helper/metricsutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +func handleMetricsUnauthenticated(core *vault.Core) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + req := &logical.Request{Headers: r.Header} + + switch r.Method { + case "GET": + default: + respondError(w, http.StatusMethodNotAllowed, nil) + return + } + + // Parse form + if err := r.ParseForm(); err != nil { + respondError(w, http.StatusBadRequest, err) + return + } + + format := r.Form.Get("format") + if format == "" { + format = metricsutil.FormatFromRequest(req) + } + + // Define response + resp := core.MetricsHelper().ResponseForFormat(format) + + // Manually extract the logical response and send back the information + status := resp.Data[logical.HTTPStatusCode].(int) + w.Header().Set("Content-Type", resp.Data[logical.HTTPContentType].(string)) + switch v := resp.Data[logical.HTTPRawBody].(type) { + case string: + w.WriteHeader(status) + w.Write([]byte(v)) + case []byte: + w.WriteHeader(status) + w.Write(v) + default: + respondError(w, http.StatusInternalServerError, fmt.Errorf("wrong response returned")) + } + }) +} diff --git a/http/sys_metrics_test.go b/http/sys_metrics_test.go new file mode 100644 index 0000000..167347b --- /dev/null +++ b/http/sys_metrics_test.go @@ -0,0 +1,101 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "testing" + "time" + + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + + "github.com/armon/go-metrics" + "github.com/hashicorp/vault/helper/metricsutil" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/vault" +) + +func TestSysMetricsUnauthenticated(t *testing.T) { + inm := metrics.NewInmemSink(10*time.Second, time.Minute) + metrics.DefaultInmemSignal(inm) + conf := &vault.CoreConfig{ + BuiltinRegistry: corehelpers.NewMockBuiltinRegistry(), + MetricsHelper: metricsutil.NewMetricsHelper(inm, true), + } + core, _, token := vault.TestCoreUnsealedWithConfig(t, conf) + ln, addr := TestServer(t, core) + TestServerAuth(t, addr, token) + + // Default: Only authenticated access + resp := testHttpGet(t, "", addr+"/v1/sys/metrics") + testResponseStatus(t, resp, 403) + resp = testHttpGet(t, token, addr+"/v1/sys/metrics") + testResponseStatus(t, resp, 200) + + // Close listener + ln.Close() + + // Setup new custom listener with unauthenticated metrics access + ln, addr = TestListener(t) + props := &vault.HandlerProperties{ + Core: core, + ListenerConfig: &configutil.Listener{ + Telemetry: configutil.ListenerTelemetry{ + UnauthenticatedMetricsAccess: true, + }, + }, + } + TestServerWithListenerAndProperties(t, ln, addr, core, props) + defer ln.Close() + TestServerAuth(t, addr, token) + + // Test without token + resp = testHttpGet(t, "", addr+"/v1/sys/metrics") + testResponseStatus(t, resp, 200) + + // Should also work with token + resp = testHttpGet(t, token, addr+"/v1/sys/metrics") + testResponseStatus(t, resp, 200) + + // Test if prometheus response is correct + resp = testHttpGet(t, "", addr+"/v1/sys/metrics?format=prometheus") + testResponseStatus(t, resp, 200) +} + +func TestSysPProfUnauthenticated(t *testing.T) { + conf := &vault.CoreConfig{} + core, _, token := vault.TestCoreUnsealedWithConfig(t, conf) + ln, addr := TestServer(t, core) + TestServerAuth(t, addr, token) + + // Default: Only authenticated access + resp := testHttpGet(t, "", addr+"/v1/sys/pprof/cmdline") + testResponseStatus(t, resp, 403) + resp = testHttpGet(t, token, addr+"/v1/sys/pprof/cmdline") + testResponseStatus(t, resp, 200) + + // Close listener + ln.Close() + + // Setup new custom listener with unauthenticated metrics access + ln, addr = TestListener(t) + props := &vault.HandlerProperties{ + Core: core, + ListenerConfig: &configutil.Listener{ + Profiling: configutil.ListenerProfiling{ + UnauthenticatedPProfAccess: true, + }, + }, + } + TestServerWithListenerAndProperties(t, ln, addr, core, props) + defer ln.Close() + TestServerAuth(t, addr, token) + + // Test without token + resp = testHttpGet(t, "", addr+"/v1/sys/pprof/cmdline") + testResponseStatus(t, resp, 200) + + // Should also work with token + resp = testHttpGet(t, token, addr+"/v1/sys/pprof/cmdline") + testResponseStatus(t, resp, 200) +} diff --git a/http/sys_monitor_test.go b/http/sys_monitor_test.go new file mode 100644 index 0000000..5d428c4 --- /dev/null +++ b/http/sys_monitor_test.go @@ -0,0 +1,127 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "context" + "encoding/json" + "strings" + "testing" + "time" + + "github.com/hashicorp/vault/helper/testhelpers" + "github.com/hashicorp/vault/vault" +) + +func TestSysMonitorUnknownLogLevel(t *testing.T) { + t.Parallel() + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: Handler, + NumCores: 1, + }) + defer cluster.Cleanup() + + client := cluster.Cores[0].Client + request := client.NewRequest("GET", "/v1/sys/monitor") + request.Params.Add("log_level", "haha") + _, err := client.RawRequest(request) + + if err == nil { + t.Fatal("expected to get an error, but didn't") + } else { + if !strings.Contains(err.Error(), "Code: 400") { + t.Fatalf("expected to receive a 400 error, but got %s instead", err) + } + + if !strings.Contains(err.Error(), "unknown log level") { + t.Fatalf("expected to receive a message indicating an unknown log level, but got %s instead", err) + } + } +} + +func TestSysMonitorUnknownLogFormat(t *testing.T) { + t.Parallel() + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: Handler, + NumCores: 1, + }) + defer cluster.Cleanup() + + client := cluster.Cores[0].Client + request := client.NewRequest("GET", "/v1/sys/monitor") + request.Params.Add("log_format", "haha") + _, err := client.RawRequest(request) + + if err == nil { + t.Fatal("expected to get an error, but didn't") + } else { + if !strings.Contains(err.Error(), "Code: 400") { + t.Fatalf("expected to receive a 400 error, but got %s instead", err) + } + + if !strings.Contains(err.Error(), "unknown log format") { + t.Fatalf("expected to receive a message indicating an unknown log format, but got %s instead", err) + } + } +} + +func TestSysMonitorStreamingLogs(t *testing.T) { + t.Parallel() + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: Handler, + NumCores: 1, + }) + defer cluster.Cleanup() + + client := cluster.Cores[0].Client + stopCh := testhelpers.GenerateDebugLogs(t, client) + defer close(stopCh) + + for _, lf := range []string{"standard", "json"} { + t.Run(lf, func(t *testing.T) { + debugCount := 0 + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logCh, err := client.Sys().Monitor(ctx, "DEBUG", lf) + if err != nil { + t.Fatal(err) + } + + type jsonlog struct { + Level string `json:"@level"` + Message string `json:"@message"` + TimeStamp string `json:"@timestamp"` + } + jsonLog := &jsonlog{} + + timeCh := time.After(5 * time.Second) + + for { + select { + case log := <-logCh: + if lf == "json" { + err := json.Unmarshal([]byte(log), jsonLog) + if err != nil { + t.Fatal("Expected JSON log from channel") + } + if strings.Contains(jsonLog.Level, "debug") { + debugCount++ + } + } else if strings.Contains(log, "[DEBUG]") { + debugCount++ + } + if debugCount > 3 { + // If we've seen multiple lines that match what we want, + // it's probably safe to assume streaming is working + return + } + case <-timeCh: + t.Fatal("Failed to get a DEBUG message after 5 seconds") + } + } + }) + } +} diff --git a/http/sys_mount_test.go b/http/sys_mount_test.go new file mode 100644 index 0000000..384f5bf --- /dev/null +++ b/http/sys_mount_test.go @@ -0,0 +1,1891 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "encoding/json" + "fmt" + "reflect" + "testing" + "time" + + "github.com/fatih/structs" + "github.com/go-test/deep" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/hashicorp/vault/helper/versions" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/vault" +) + +func TestSysMounts(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpGet(t, token, addr+"/v1/sys/mounts") + + var actual map[string]interface{} + expected := map[string]interface{}{ + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "data": map[string]interface{}{ + "secret/": map[string]interface{}{ + "description": "key/value secret storage", + "type": "kv", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + }, + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{"version": "1"}, + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + }, + "sys/": map[string]interface{}{ + "description": "system endpoints used for control, policy and debugging", + "type": "system", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + "passthrough_request_headers": []interface{}{"Accept"}, + }, + "local": false, + "seal_wrap": true, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.DefaultBuiltinVersion, + }, + "cubbyhole/": map[string]interface{}{ + "description": "per-token private secret storage", + "type": "cubbyhole", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + }, + "local": true, + "seal_wrap": false, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "cubbyhole"), + }, + "identity/": map[string]interface{}{ + "description": "identity store", + "type": "identity", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + "passthrough_request_headers": []interface{}{"Authorization"}, + }, + "local": false, + "seal_wrap": false, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "identity"), + }, + }, + "secret/": map[string]interface{}{ + "description": "key/value secret storage", + "type": "kv", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + }, + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{"version": "1"}, + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + }, + "sys/": map[string]interface{}{ + "description": "system endpoints used for control, policy and debugging", + "type": "system", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + "passthrough_request_headers": []interface{}{"Accept"}, + }, + "local": false, + "seal_wrap": true, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.DefaultBuiltinVersion, + }, + "cubbyhole/": map[string]interface{}{ + "description": "per-token private secret storage", + "type": "cubbyhole", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + }, + "local": true, + "seal_wrap": false, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "cubbyhole"), + }, + "identity/": map[string]interface{}{ + "description": "identity store", + "type": "identity", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + "passthrough_request_headers": []interface{}{"Authorization"}, + }, + "local": false, + "seal_wrap": false, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "identity"), + }, + } + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + expected["request_id"] = actual["request_id"] + for k, v := range actual["data"].(map[string]interface{}) { + if v.(map[string]interface{})["accessor"] == "" { + t.Fatalf("no accessor from %s", k) + } + if v.(map[string]interface{})["uuid"] == "" { + t.Fatalf("no uuid from %s", k) + } + expected[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + expected[k].(map[string]interface{})["uuid"] = v.(map[string]interface{})["uuid"] + expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + expected["data"].(map[string]interface{})[k].(map[string]interface{})["uuid"] = v.(map[string]interface{})["uuid"] + } + + if diff := deep.Equal(actual, expected); len(diff) > 0 { + t.Fatalf("bad, diff: %#v", diff) + } +} + +func TestSysMount(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpPost(t, token, addr+"/v1/sys/mounts/foo", map[string]interface{}{ + "type": "kv", + "description": "foo", + "options": map[string]string{ + "version": "1", + }, + }) + testResponseStatus(t, resp, 204) + + resp = testHttpGet(t, token, addr+"/v1/sys/mounts") + + var actual map[string]interface{} + expected := map[string]interface{}{ + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "data": map[string]interface{}{ + "foo/": map[string]interface{}{ + "description": "foo", + "type": "kv", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + }, + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{"version": "1"}, + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + }, + "secret/": map[string]interface{}{ + "description": "key/value secret storage", + "type": "kv", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + }, + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{"version": "1"}, + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + }, + "sys/": map[string]interface{}{ + "description": "system endpoints used for control, policy and debugging", + "type": "system", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + "passthrough_request_headers": []interface{}{"Accept"}, + }, + "local": false, + "seal_wrap": true, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.DefaultBuiltinVersion, + }, + "cubbyhole/": map[string]interface{}{ + "description": "per-token private secret storage", + "type": "cubbyhole", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + }, + "local": true, + "seal_wrap": false, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "cubbyhole"), + }, + "identity/": map[string]interface{}{ + "description": "identity store", + "type": "identity", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + "passthrough_request_headers": []interface{}{"Authorization"}, + }, + "local": false, + "seal_wrap": false, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "identity"), + }, + }, + "foo/": map[string]interface{}{ + "description": "foo", + "type": "kv", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + }, + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{"version": "1"}, + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + }, + "secret/": map[string]interface{}{ + "description": "key/value secret storage", + "type": "kv", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + }, + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{"version": "1"}, + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + }, + "sys/": map[string]interface{}{ + "description": "system endpoints used for control, policy and debugging", + "type": "system", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + "passthrough_request_headers": []interface{}{"Accept"}, + }, + "local": false, + "seal_wrap": true, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.DefaultBuiltinVersion, + }, + "cubbyhole/": map[string]interface{}{ + "description": "per-token private secret storage", + "type": "cubbyhole", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + }, + "local": true, + "seal_wrap": false, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "cubbyhole"), + }, + "identity/": map[string]interface{}{ + "description": "identity store", + "type": "identity", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + "passthrough_request_headers": []interface{}{"Authorization"}, + }, + "local": false, + "seal_wrap": false, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "identity"), + }, + } + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + expected["request_id"] = actual["request_id"] + for k, v := range actual["data"].(map[string]interface{}) { + if v.(map[string]interface{})["accessor"] == "" { + t.Fatalf("no accessor from %s", k) + } + if v.(map[string]interface{})["uuid"] == "" { + t.Fatalf("no uuid from %s", k) + } + expected[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + expected[k].(map[string]interface{})["uuid"] = v.(map[string]interface{})["uuid"] + expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + expected["data"].(map[string]interface{})[k].(map[string]interface{})["uuid"] = v.(map[string]interface{})["uuid"] + } + + if diff := deep.Equal(actual, expected); len(diff) > 0 { + t.Fatalf("bad, diff: %#v", diff) + } +} + +func TestSysMount_put(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpPut(t, token, addr+"/v1/sys/mounts/foo", map[string]interface{}{ + "type": "kv", + "description": "foo", + }) + testResponseStatus(t, resp, 204) + + // The TestSysMount test tests the thing is actually created. See that test + // for more info. +} + +// TestSysRemountSpacesFrom ensure we succeed in a remount where the 'from' mount has spaces in the name +func TestSysRemountSpacesFrom(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpPost(t, token, addr+"/v1/sys/mounts/foo%20bar", map[string]interface{}{ + "type": "kv", + "description": "foo", + }) + testResponseStatus(t, resp, 204) + + resp = testHttpPost(t, token, addr+"/v1/sys/remount", map[string]interface{}{ + "from": "foo bar", + "to": "baz", + }) + testResponseStatus(t, resp, 200) +} + +// TestSysRemountSpacesTo ensure we succeed in a remount where the 'to' mount has spaces in the name +func TestSysRemountSpacesTo(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpPost(t, token, addr+"/v1/sys/mounts/foo%20bar", map[string]interface{}{ + "type": "kv", + "description": "foo", + }) + testResponseStatus(t, resp, 204) + + resp = testHttpPost(t, token, addr+"/v1/sys/remount", map[string]interface{}{ + "from": "foo bar", + "to": "bar baz", + }) + testResponseStatus(t, resp, 200) +} + +// TestSysRemountTrailingSpaces ensures we fail on trailing spaces +func TestSysRemountTrailingSpaces(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpPost(t, token, addr+"/v1/sys/mounts/foo%20bar", map[string]interface{}{ + "type": "kv", + "description": "foo", + }) + testResponseStatus(t, resp, 204) + + resp = testHttpPost(t, token, addr+"/v1/sys/remount", map[string]interface{}{ + "from": "foo bar", + "to": " baz ", + }) + testResponseStatus(t, resp, 400) + + resp = testHttpPost(t, token, addr+"/v1/sys/remount", map[string]interface{}{ + "from": " foo bar ", + "to": "baz", + }) + testResponseStatus(t, resp, 400) +} + +func TestSysRemount(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpPost(t, token, addr+"/v1/sys/mounts/foo", map[string]interface{}{ + "type": "kv", + "description": "foo", + }) + testResponseStatus(t, resp, 204) + + resp = testHttpPost(t, token, addr+"/v1/sys/remount", map[string]interface{}{ + "from": "foo", + "to": "bar", + }) + testResponseStatus(t, resp, 200) + + // Poll until the remount succeeds + var remountResp map[string]interface{} + testResponseBody(t, resp, &remountResp) + corehelpers.RetryUntil(t, 5*time.Second, func() error { + resp = testHttpGet(t, token, addr+"/v1/sys/remount/status/"+remountResp["migration_id"].(string)) + testResponseStatus(t, resp, 200) + + var remountStatusResp map[string]interface{} + testResponseBody(t, resp, &remountStatusResp) + + status := remountStatusResp["data"].(map[string]interface{})["migration_info"].(map[string]interface{})["status"] + if status != "success" { + return fmt.Errorf("Expected migration status to be successful, got %q", status) + } + return nil + }) + resp = testHttpGet(t, token, addr+"/v1/sys/mounts") + + var actual map[string]interface{} + expected := map[string]interface{}{ + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "data": map[string]interface{}{ + "bar/": map[string]interface{}{ + "description": "foo", + "type": "kv", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + }, + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{}, + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + }, + "secret/": map[string]interface{}{ + "description": "key/value secret storage", + "type": "kv", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + }, + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{"version": "1"}, + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + }, + "sys/": map[string]interface{}{ + "description": "system endpoints used for control, policy and debugging", + "type": "system", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + "passthrough_request_headers": []interface{}{"Accept"}, + }, + "local": false, + "seal_wrap": true, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.DefaultBuiltinVersion, + }, + "cubbyhole/": map[string]interface{}{ + "description": "per-token private secret storage", + "type": "cubbyhole", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + }, + "local": true, + "seal_wrap": false, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "cubbyhole"), + }, + "identity/": map[string]interface{}{ + "description": "identity store", + "type": "identity", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + "passthrough_request_headers": []interface{}{"Authorization"}, + }, + "local": false, + "seal_wrap": false, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "identity"), + }, + }, + "bar/": map[string]interface{}{ + "description": "foo", + "type": "kv", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + }, + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{}, + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + }, + "secret/": map[string]interface{}{ + "description": "key/value secret storage", + "type": "kv", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + }, + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{"version": "1"}, + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + }, + "sys/": map[string]interface{}{ + "description": "system endpoints used for control, policy and debugging", + "type": "system", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + "passthrough_request_headers": []interface{}{"Accept"}, + }, + "local": false, + "seal_wrap": true, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.DefaultBuiltinVersion, + }, + "cubbyhole/": map[string]interface{}{ + "description": "per-token private secret storage", + "type": "cubbyhole", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + }, + "local": true, + "seal_wrap": false, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "cubbyhole"), + }, + "identity/": map[string]interface{}{ + "description": "identity store", + "type": "identity", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + "passthrough_request_headers": []interface{}{"Authorization"}, + }, + "local": false, + "seal_wrap": false, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "identity"), + }, + } + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + expected["request_id"] = actual["request_id"] + for k, v := range actual["data"].(map[string]interface{}) { + if v.(map[string]interface{})["accessor"] == "" { + t.Fatalf("no accessor from %s", k) + } + if v.(map[string]interface{})["uuid"] == "" { + t.Fatalf("no uuid from %s", k) + } + expected[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + expected[k].(map[string]interface{})["uuid"] = v.(map[string]interface{})["uuid"] + expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + expected["data"].(map[string]interface{})[k].(map[string]interface{})["uuid"] = v.(map[string]interface{})["uuid"] + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad:\nExpected: %#v\nActual: %#v\n", expected, actual) + } +} + +func TestSysUnmount(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpPost(t, token, addr+"/v1/sys/mounts/foo", map[string]interface{}{ + "type": "kv", + "description": "foo", + }) + testResponseStatus(t, resp, 204) + + resp = testHttpDelete(t, token, addr+"/v1/sys/mounts/foo") + testResponseStatus(t, resp, 204) + + resp = testHttpGet(t, token, addr+"/v1/sys/mounts") + + var actual map[string]interface{} + expected := map[string]interface{}{ + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "data": map[string]interface{}{ + "secret/": map[string]interface{}{ + "description": "key/value secret storage", + "type": "kv", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + }, + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{"version": "1"}, + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + }, + "sys/": map[string]interface{}{ + "description": "system endpoints used for control, policy and debugging", + "type": "system", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + "passthrough_request_headers": []interface{}{"Accept"}, + }, + "local": false, + "seal_wrap": true, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.DefaultBuiltinVersion, + }, + "cubbyhole/": map[string]interface{}{ + "description": "per-token private secret storage", + "type": "cubbyhole", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + }, + "local": true, + "seal_wrap": false, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "cubbyhole"), + }, + "identity/": map[string]interface{}{ + "description": "identity store", + "type": "identity", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + "passthrough_request_headers": []interface{}{"Authorization"}, + }, + "local": false, + "seal_wrap": false, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "identity"), + }, + }, + "secret/": map[string]interface{}{ + "description": "key/value secret storage", + "type": "kv", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + }, + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{"version": "1"}, + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + }, + "sys/": map[string]interface{}{ + "description": "system endpoints used for control, policy and debugging", + "type": "system", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + "passthrough_request_headers": []interface{}{"Accept"}, + }, + "local": false, + "seal_wrap": true, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.DefaultBuiltinVersion, + }, + "cubbyhole/": map[string]interface{}{ + "description": "per-token private secret storage", + "type": "cubbyhole", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + }, + "local": true, + "seal_wrap": false, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "cubbyhole"), + }, + "identity/": map[string]interface{}{ + "description": "identity store", + "type": "identity", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + "passthrough_request_headers": []interface{}{"Authorization"}, + }, + "local": false, + "seal_wrap": false, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "identity"), + }, + } + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + expected["request_id"] = actual["request_id"] + for k, v := range actual["data"].(map[string]interface{}) { + if v.(map[string]interface{})["accessor"] == "" { + t.Fatalf("no accessor from %s", k) + } + if v.(map[string]interface{})["uuid"] == "" { + t.Fatalf("no uuid from %s", k) + } + expected[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + expected[k].(map[string]interface{})["uuid"] = v.(map[string]interface{})["uuid"] + expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + expected["data"].(map[string]interface{})[k].(map[string]interface{})["uuid"] = v.(map[string]interface{})["uuid"] + } + + if diff := deep.Equal(actual, expected); len(diff) > 0 { + t.Fatalf("bad, diff: %#v", diff) + } +} + +func TestSysTuneMount_Options(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpPost(t, token, addr+"/v1/sys/mounts/foo", map[string]interface{}{ + "type": "kv", + "description": "foo", + }) + + testResponseStatus(t, resp, 204) + // Mount-tune the options + resp = testHttpPost(t, token, addr+"/v1/sys/mounts/foo/tune", map[string]interface{}{ + "options": map[string]string{ + "test": "true", + }, + }) + testResponseStatus(t, resp, 204) + + // Check results + resp = testHttpGet(t, token, addr+"/v1/sys/mounts/foo/tune") + testResponseStatus(t, resp, 200) + + actual := map[string]interface{}{} + expected := map[string]interface{}{ + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "data": map[string]interface{}{ + "description": "foo", + "default_lease_ttl": json.Number("2764800"), + "max_lease_ttl": json.Number("2764800"), + "force_no_cache": false, + "options": map[string]interface{}{"test": "true"}, + }, + "description": "foo", + "default_lease_ttl": json.Number("2764800"), + "max_lease_ttl": json.Number("2764800"), + "force_no_cache": false, + "options": map[string]interface{}{"test": "true"}, + } + testResponseBody(t, resp, &actual) + expected["request_id"] = actual["request_id"] + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad:\nExpected: %#v\nActual:%#v", expected, actual) + } + + // Check that we're not allowed to unset the options map once that's set + resp = testHttpPost(t, token, addr+"/v1/sys/mounts/foo/tune", map[string]interface{}{ + "options": map[string]string{}, + }) + testResponseStatus(t, resp, 204) + + // Check results + resp = testHttpGet(t, token, addr+"/v1/sys/mounts/foo/tune") + testResponseStatus(t, resp, 200) + + actual = map[string]interface{}{} + expected = map[string]interface{}{ + "description": "foo", + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "data": map[string]interface{}{ + "description": "foo", + "default_lease_ttl": json.Number("2764800"), + "max_lease_ttl": json.Number("2764800"), + "force_no_cache": false, + "options": map[string]interface{}{"test": "true"}, + }, + "default_lease_ttl": json.Number("2764800"), + "max_lease_ttl": json.Number("2764800"), + "force_no_cache": false, + "options": map[string]interface{}{"test": "true"}, + } + testResponseBody(t, resp, &actual) + expected["request_id"] = actual["request_id"] + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad:\nExpected: %#v\nActual:%#v", expected, actual) + } +} + +func TestSysTuneMount(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpPost(t, token, addr+"/v1/sys/mounts/foo", map[string]interface{}{ + "type": "kv", + "description": "foo", + }) + testResponseStatus(t, resp, 204) + + resp = testHttpGet(t, token, addr+"/v1/sys/mounts") + + var actual map[string]interface{} + expected := map[string]interface{}{ + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "data": map[string]interface{}{ + "foo/": map[string]interface{}{ + "description": "foo", + "type": "kv", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + }, + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{}, + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + }, + "secret/": map[string]interface{}{ + "description": "key/value secret storage", + "type": "kv", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + }, + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{"version": "1"}, + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + }, + "sys/": map[string]interface{}{ + "description": "system endpoints used for control, policy and debugging", + "type": "system", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + "passthrough_request_headers": []interface{}{"Accept"}, + }, + "local": false, + "seal_wrap": true, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.DefaultBuiltinVersion, + }, + "cubbyhole/": map[string]interface{}{ + "description": "per-token private secret storage", + "type": "cubbyhole", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + }, + "local": true, + "seal_wrap": false, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "cubbyhole"), + }, + "identity/": map[string]interface{}{ + "description": "identity store", + "type": "identity", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + "passthrough_request_headers": []interface{}{"Authorization"}, + }, + "local": false, + "seal_wrap": false, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "identity"), + }, + }, + "foo/": map[string]interface{}{ + "description": "foo", + "type": "kv", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + }, + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{}, + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + }, + "secret/": map[string]interface{}{ + "description": "key/value secret storage", + "type": "kv", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + }, + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{"version": "1"}, + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + }, + "sys/": map[string]interface{}{ + "description": "system endpoints used for control, policy and debugging", + "type": "system", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + "passthrough_request_headers": []interface{}{"Accept"}, + }, + "local": false, + "seal_wrap": true, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.DefaultBuiltinVersion, + }, + "cubbyhole/": map[string]interface{}{ + "description": "per-token private secret storage", + "type": "cubbyhole", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + }, + "local": true, + "seal_wrap": false, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "cubbyhole"), + }, + "identity/": map[string]interface{}{ + "description": "identity store", + "type": "identity", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + "passthrough_request_headers": []interface{}{"Authorization"}, + }, + "local": false, + "seal_wrap": false, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "identity"), + }, + } + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + expected["request_id"] = actual["request_id"] + for k, v := range actual["data"].(map[string]interface{}) { + if v.(map[string]interface{})["accessor"] == "" { + t.Fatalf("no accessor from %s", k) + } + if v.(map[string]interface{})["uuid"] == "" { + t.Fatalf("no uuid from %s", k) + } + expected[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + expected[k].(map[string]interface{})["uuid"] = v.(map[string]interface{})["uuid"] + expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + expected["data"].(map[string]interface{})[k].(map[string]interface{})["uuid"] = v.(map[string]interface{})["uuid"] + } + + if diff := deep.Equal(actual, expected); len(diff) > 0 { + t.Fatalf("bad, diff: %#v", diff) + } + + // Shorter than system default + resp = testHttpPost(t, token, addr+"/v1/sys/mounts/foo/tune", map[string]interface{}{ + "default_lease_ttl": "72h", + }) + testResponseStatus(t, resp, 204) + + // Longer than system max + resp = testHttpPost(t, token, addr+"/v1/sys/mounts/foo/tune", map[string]interface{}{ + "default_lease_ttl": "72000h", + }) + testResponseStatus(t, resp, 204) + + // Longer than system default + resp = testHttpPost(t, token, addr+"/v1/sys/mounts/foo/tune", map[string]interface{}{ + "max_lease_ttl": "72000h", + }) + testResponseStatus(t, resp, 204) + + // Longer than backend max + resp = testHttpPost(t, token, addr+"/v1/sys/mounts/foo/tune", map[string]interface{}{ + "default_lease_ttl": "72001h", + }) + testResponseStatus(t, resp, 400) + + // Shorter than backend default + resp = testHttpPost(t, token, addr+"/v1/sys/mounts/foo/tune", map[string]interface{}{ + "max_lease_ttl": "1h", + }) + testResponseStatus(t, resp, 400) + + // Shorter than backend max, longer than system max + resp = testHttpPost(t, token, addr+"/v1/sys/mounts/foo/tune", map[string]interface{}{ + "default_lease_ttl": "71999h", + }) + testResponseStatus(t, resp, 204) + + // mark as versioned + resp = testHttpPost(t, token, addr+"/v1/sys/mounts/foo/tune", map[string]interface{}{ + "options": map[string]string{ + "version": "1", + }, + }) + testResponseStatus(t, resp, 200) + + resp = testHttpGet(t, token, addr+"/v1/sys/mounts") + expected = map[string]interface{}{ + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "data": map[string]interface{}{ + "foo/": map[string]interface{}{ + "description": "foo", + "type": "kv", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("259196400"), + "max_lease_ttl": json.Number("259200000"), + "force_no_cache": false, + }, + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{"version": "1"}, + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + }, + "secret/": map[string]interface{}{ + "description": "key/value secret storage", + "type": "kv", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + }, + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{"version": "1"}, + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + }, + "sys/": map[string]interface{}{ + "description": "system endpoints used for control, policy and debugging", + "type": "system", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + "passthrough_request_headers": []interface{}{"Accept"}, + }, + "local": false, + "seal_wrap": true, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.DefaultBuiltinVersion, + }, + "cubbyhole/": map[string]interface{}{ + "description": "per-token private secret storage", + "type": "cubbyhole", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + }, + "local": true, + "seal_wrap": false, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "cubbyhole"), + }, + "identity/": map[string]interface{}{ + "description": "identity store", + "type": "identity", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + "passthrough_request_headers": []interface{}{"Authorization"}, + }, + "local": false, + "seal_wrap": false, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "identity"), + }, + }, + "foo/": map[string]interface{}{ + "description": "foo", + "type": "kv", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("259196400"), + "max_lease_ttl": json.Number("259200000"), + "force_no_cache": false, + }, + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{"version": "1"}, + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + }, + "secret/": map[string]interface{}{ + "description": "key/value secret storage", + "type": "kv", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + }, + "local": false, + "seal_wrap": false, + "options": map[string]interface{}{"version": "1"}, + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + }, + "sys/": map[string]interface{}{ + "description": "system endpoints used for control, policy and debugging", + "type": "system", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + "passthrough_request_headers": []interface{}{"Accept"}, + }, + "local": false, + "seal_wrap": true, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "kv"), + }, + "cubbyhole/": map[string]interface{}{ + "description": "per-token private secret storage", + "type": "cubbyhole", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + }, + "local": true, + "seal_wrap": false, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "cubbyhole"), + }, + "identity/": map[string]interface{}{ + "description": "identity store", + "type": "identity", + "external_entropy_access": false, + "config": map[string]interface{}{ + "default_lease_ttl": json.Number("0"), + "max_lease_ttl": json.Number("0"), + "force_no_cache": false, + "passthrough_request_headers": []interface{}{"Authorization"}, + }, + "local": false, + "seal_wrap": false, + "options": interface{}(nil), + "plugin_version": "", + "running_sha256": "", + "running_plugin_version": versions.GetBuiltinVersion(consts.PluginTypeSecrets, "identity"), + }, + } + + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + expected["request_id"] = actual["request_id"] + for k, v := range actual["data"].(map[string]interface{}) { + if v.(map[string]interface{})["accessor"] == "" { + t.Fatalf("no accessor from %s", k) + } + if v.(map[string]interface{})["uuid"] == "" { + t.Fatalf("no uuid from %s", k) + } + expected[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + expected[k].(map[string]interface{})["uuid"] = v.(map[string]interface{})["uuid"] + expected["data"].(map[string]interface{})[k].(map[string]interface{})["accessor"] = v.(map[string]interface{})["accessor"] + expected["data"].(map[string]interface{})[k].(map[string]interface{})["uuid"] = v.(map[string]interface{})["uuid"] + } + + if diff := deep.Equal(actual, expected); len(diff) > 0 { + t.Fatalf("bad, diff: %#v", diff) + } + + // Check simple configuration endpoint + resp = testHttpGet(t, token, addr+"/v1/sys/mounts/foo/tune") + actual = map[string]interface{}{} + expected = map[string]interface{}{ + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "data": map[string]interface{}{ + "description": "foo", + "default_lease_ttl": json.Number("259196400"), + "max_lease_ttl": json.Number("259200000"), + "force_no_cache": false, + "options": map[string]interface{}{"version": "1"}, + }, + "description": "foo", + "default_lease_ttl": json.Number("259196400"), + "max_lease_ttl": json.Number("259200000"), + "force_no_cache": false, + "options": map[string]interface{}{"version": "1"}, + } + + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + expected["request_id"] = actual["request_id"] + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad:\nExpected: %#v\nActual:%#v", expected, actual) + } + + // Set a low max + resp = testHttpPost(t, token, addr+"/v1/sys/mounts/secret/tune", map[string]interface{}{ + "description": "foobar", + "default_lease_ttl": "40s", + "max_lease_ttl": "80s", + }) + testResponseStatus(t, resp, 204) + + resp = testHttpGet(t, token, addr+"/v1/sys/mounts/secret/tune") + actual = map[string]interface{}{} + expected = map[string]interface{}{ + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "data": map[string]interface{}{ + "description": "foobar", + "default_lease_ttl": json.Number("40"), + "max_lease_ttl": json.Number("80"), + "force_no_cache": false, + "options": map[string]interface{}{"version": "1"}, + }, + "description": "foobar", + "default_lease_ttl": json.Number("40"), + "max_lease_ttl": json.Number("80"), + "force_no_cache": false, + "options": map[string]interface{}{"version": "1"}, + } + + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + expected["request_id"] = actual["request_id"] + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad:\nExpected: %#v\nActual:%#v", expected, actual) + } + + // First try with lease above backend max + resp = testHttpPut(t, token, addr+"/v1/secret/foo", map[string]interface{}{ + "data": "bar", + "ttl": "28347h", + }) + testResponseStatus(t, resp, 204) + + // read secret + resp = testHttpGet(t, token, addr+"/v1/secret/foo") + var result struct { + LeaseID string `json:"lease_id" structs:"lease_id"` + LeaseDuration int `json:"lease_duration" structs:"lease_duration"` + } + + testResponseBody(t, resp, &result) + + expected = map[string]interface{}{ + "lease_duration": int(80), + "lease_id": result.LeaseID, + } + + if !reflect.DeepEqual(structs.Map(result), expected) { + t.Fatalf("bad:\nExpected: %#v\nActual:%#v", expected, structs.Map(result)) + } + + // Now with lease TTL unspecified + resp = testHttpPut(t, token, addr+"/v1/secret/foo", map[string]interface{}{ + "data": "bar", + }) + testResponseStatus(t, resp, 204) + + // read secret + resp = testHttpGet(t, token, addr+"/v1/secret/foo") + + testResponseBody(t, resp, &result) + + expected = map[string]interface{}{ + "lease_duration": int(40), + "lease_id": result.LeaseID, + } + + if !reflect.DeepEqual(structs.Map(result), expected) { + t.Fatalf("bad:\nExpected: %#v\nActual:%#v", expected, structs.Map(result)) + } +} + +func TestSysTuneMount_nonHMACKeys(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + // Mount-tune the audit_non_hmac_request_keys + resp := testHttpPost(t, token, addr+"/v1/sys/mounts/secret/tune", map[string]interface{}{ + "audit_non_hmac_request_keys": "foo", + }) + testResponseStatus(t, resp, 204) + + // Mount-tune the audit_non_hmac_response_keys + resp = testHttpPost(t, token, addr+"/v1/sys/mounts/secret/tune", map[string]interface{}{ + "audit_non_hmac_response_keys": "bar", + }) + testResponseStatus(t, resp, 204) + + // Check results + resp = testHttpGet(t, token, addr+"/v1/sys/mounts/secret/tune") + testResponseStatus(t, resp, 200) + + actual := map[string]interface{}{} + expected := map[string]interface{}{ + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "data": map[string]interface{}{ + "description": "key/value secret storage", + "default_lease_ttl": json.Number("2764800"), + "max_lease_ttl": json.Number("2764800"), + "force_no_cache": false, + "audit_non_hmac_request_keys": []interface{}{"foo"}, + "audit_non_hmac_response_keys": []interface{}{"bar"}, + "options": map[string]interface{}{"version": "1"}, + }, + "description": "key/value secret storage", + "default_lease_ttl": json.Number("2764800"), + "max_lease_ttl": json.Number("2764800"), + "force_no_cache": false, + "audit_non_hmac_request_keys": []interface{}{"foo"}, + "audit_non_hmac_response_keys": []interface{}{"bar"}, + "options": map[string]interface{}{"version": "1"}, + } + testResponseBody(t, resp, &actual) + expected["request_id"] = actual["request_id"] + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad:\nExpected: %#v\nActual:%#v", expected, actual) + } + + // Unset those mount tune values + resp = testHttpPost(t, token, addr+"/v1/sys/mounts/secret/tune", map[string]interface{}{ + "audit_non_hmac_request_keys": "", + }) + testResponseStatus(t, resp, 204) + + resp = testHttpPost(t, token, addr+"/v1/sys/mounts/secret/tune", map[string]interface{}{ + "audit_non_hmac_response_keys": "", + }) + testResponseStatus(t, resp, 204) + + // Check results + resp = testHttpGet(t, token, addr+"/v1/sys/mounts/secret/tune") + testResponseStatus(t, resp, 200) + + actual = map[string]interface{}{} + expected = map[string]interface{}{ + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "data": map[string]interface{}{ + "description": "key/value secret storage", + "default_lease_ttl": json.Number("2764800"), + "max_lease_ttl": json.Number("2764800"), + "force_no_cache": false, + "options": map[string]interface{}{"version": "1"}, + }, + "description": "key/value secret storage", + "default_lease_ttl": json.Number("2764800"), + "max_lease_ttl": json.Number("2764800"), + "force_no_cache": false, + "options": map[string]interface{}{"version": "1"}, + } + testResponseBody(t, resp, &actual) + expected["request_id"] = actual["request_id"] + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad:\nExpected: %#v\nActual:%#v", expected, actual) + } +} + +func TestSysTuneMount_listingVisibility(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + // Get original tune values, ensure that listing_visibility is not set + resp := testHttpGet(t, token, addr+"/v1/sys/mounts/secret/tune") + testResponseStatus(t, resp, 200) + + actual := map[string]interface{}{} + expected := map[string]interface{}{ + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "data": map[string]interface{}{ + "description": "key/value secret storage", + "default_lease_ttl": json.Number("2764800"), + "max_lease_ttl": json.Number("2764800"), + "force_no_cache": false, + "options": map[string]interface{}{"version": "1"}, + }, + "description": "key/value secret storage", + "default_lease_ttl": json.Number("2764800"), + "max_lease_ttl": json.Number("2764800"), + "force_no_cache": false, + "options": map[string]interface{}{"version": "1"}, + } + testResponseBody(t, resp, &actual) + expected["request_id"] = actual["request_id"] + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad:\nExpected: %#v\nActual:%#v", expected, actual) + } + + // Mount-tune the listing_visibility + resp = testHttpPost(t, token, addr+"/v1/sys/mounts/secret/tune", map[string]interface{}{ + "listing_visibility": "unauth", + }) + testResponseStatus(t, resp, 204) + + // Check results + resp = testHttpGet(t, token, addr+"/v1/sys/mounts/secret/tune") + testResponseStatus(t, resp, 200) + + actual = map[string]interface{}{} + expected = map[string]interface{}{ + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "data": map[string]interface{}{ + "description": "key/value secret storage", + "default_lease_ttl": json.Number("2764800"), + "max_lease_ttl": json.Number("2764800"), + "force_no_cache": false, + "listing_visibility": "unauth", + "options": map[string]interface{}{"version": "1"}, + }, + "description": "key/value secret storage", + "default_lease_ttl": json.Number("2764800"), + "max_lease_ttl": json.Number("2764800"), + "force_no_cache": false, + "listing_visibility": "unauth", + "options": map[string]interface{}{"version": "1"}, + } + testResponseBody(t, resp, &actual) + expected["request_id"] = actual["request_id"] + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad:\nExpected: %#v\nActual:%#v", expected, actual) + } +} + +func TestSysTuneMount_passthroughRequestHeaders(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + // Mount-tune the audit_non_hmac_request_keys + resp := testHttpPost(t, token, addr+"/v1/sys/mounts/secret/tune", map[string]interface{}{ + "passthrough_request_headers": "X-Vault-Foo", + }) + testResponseStatus(t, resp, 204) + + // Check results + resp = testHttpGet(t, token, addr+"/v1/sys/mounts/secret/tune") + testResponseStatus(t, resp, 200) + + actual := map[string]interface{}{} + expected := map[string]interface{}{ + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "data": map[string]interface{}{ + "description": "key/value secret storage", + "default_lease_ttl": json.Number("2764800"), + "max_lease_ttl": json.Number("2764800"), + "options": map[string]interface{}{"version": "1"}, + "force_no_cache": false, + "passthrough_request_headers": []interface{}{"X-Vault-Foo"}, + }, + "description": "key/value secret storage", + "default_lease_ttl": json.Number("2764800"), + "max_lease_ttl": json.Number("2764800"), + "options": map[string]interface{}{"version": "1"}, + "force_no_cache": false, + "passthrough_request_headers": []interface{}{"X-Vault-Foo"}, + } + testResponseBody(t, resp, &actual) + expected["request_id"] = actual["request_id"] + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad:\nExpected: %#v\nActual:%#v", expected, actual) + } + + // Unset the mount tune value + resp = testHttpPost(t, token, addr+"/v1/sys/mounts/secret/tune", map[string]interface{}{ + "passthrough_request_headers": "", + }) + testResponseStatus(t, resp, 204) + + // Check results + resp = testHttpGet(t, token, addr+"/v1/sys/mounts/secret/tune") + testResponseStatus(t, resp, 200) + + actual = map[string]interface{}{} + expected = map[string]interface{}{ + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "data": map[string]interface{}{ + "description": "key/value secret storage", + "default_lease_ttl": json.Number("2764800"), + "max_lease_ttl": json.Number("2764800"), + "force_no_cache": false, + "options": map[string]interface{}{"version": "1"}, + }, + "description": "key/value secret storage", + "default_lease_ttl": json.Number("2764800"), + "max_lease_ttl": json.Number("2764800"), + "force_no_cache": false, + "options": map[string]interface{}{"version": "1"}, + } + testResponseBody(t, resp, &actual) + expected["request_id"] = actual["request_id"] + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad:\nExpected: %#v\nActual:%#v", expected, actual) + } +} + +func TestSysTuneMount_allowedManagedKeys(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + // Mount-tune the allowed_managed_keys + resp := testHttpPost(t, token, addr+"/v1/sys/mounts/secret/tune", map[string]interface{}{ + "allowed_managed_keys": "test_key", + }) + testResponseStatus(t, resp, 204) + + // Check results + resp = testHttpGet(t, token, addr+"/v1/sys/mounts/secret/tune") + testResponseStatus(t, resp, 200) + + actual := map[string]interface{}{} + expected := map[string]interface{}{ + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "data": map[string]interface{}{ + "description": "key/value secret storage", + "default_lease_ttl": json.Number("2764800"), + "max_lease_ttl": json.Number("2764800"), + "options": map[string]interface{}{"version": "1"}, + "force_no_cache": false, + "allowed_managed_keys": []interface{}{"test_key"}, + }, + "description": "key/value secret storage", + "default_lease_ttl": json.Number("2764800"), + "max_lease_ttl": json.Number("2764800"), + "options": map[string]interface{}{"version": "1"}, + "force_no_cache": false, + "allowed_managed_keys": []interface{}{"test_key"}, + } + testResponseBody(t, resp, &actual) + expected["request_id"] = actual["request_id"] + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad:\nExpected: %#v\nActual:%#v", expected, actual) + } + + // Unset the mount tune value + resp = testHttpPost(t, token, addr+"/v1/sys/mounts/secret/tune", map[string]interface{}{ + "allowed_managed_keys": "", + }) + testResponseStatus(t, resp, 204) + + // Check results + resp = testHttpGet(t, token, addr+"/v1/sys/mounts/secret/tune") + testResponseStatus(t, resp, 200) + + actual = map[string]interface{}{} + expected = map[string]interface{}{ + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "data": map[string]interface{}{ + "description": "key/value secret storage", + "default_lease_ttl": json.Number("2764800"), + "max_lease_ttl": json.Number("2764800"), + "force_no_cache": false, + "options": map[string]interface{}{"version": "1"}, + }, + "description": "key/value secret storage", + "default_lease_ttl": json.Number("2764800"), + "max_lease_ttl": json.Number("2764800"), + "force_no_cache": false, + "options": map[string]interface{}{"version": "1"}, + } + testResponseBody(t, resp, &actual) + expected["request_id"] = actual["request_id"] + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad:\nExpected: %#v\nActual:%#v", expected, actual) + } +} diff --git a/http/sys_mounts_test.go b/http/sys_mounts_test.go new file mode 100644 index 0000000..5f22185 --- /dev/null +++ b/http/sys_mounts_test.go @@ -0,0 +1,68 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "fmt" + "math/rand" + "testing" + "time" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/vault" +) + +func TestSysMountConfig(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + + config := api.DefaultConfig() + config.Address = addr + + client, err := api.NewClient(config) + if err != nil { + t.Fatal(err) + } + client.SetToken(token) + + // Set up a test mount + path, err := testMount(client) + if err != nil { + t.Fatal(err) + } + defer client.Sys().Unmount(path) + + // Get config info for this mount + mountConfig, err := client.Sys().MountConfig(path) + if err != nil { + t.Fatal(err) + } + + expectedDefaultTTL := 2764800 + if mountConfig.DefaultLeaseTTL != expectedDefaultTTL { + t.Fatalf("Expected default lease TTL: %d, got %d", + expectedDefaultTTL, mountConfig.DefaultLeaseTTL) + } + + expectedMaxTTL := 2764800 + if mountConfig.MaxLeaseTTL != expectedMaxTTL { + t.Fatalf("Expected default lease TTL: %d, got %d", + expectedMaxTTL, mountConfig.MaxLeaseTTL) + } + + if mountConfig.ForceNoCache { + t.Fatal("did not expect force cache") + } +} + +// testMount sets up a test mount of a kv backend w/ a random path; caller +// is responsible for unmounting +func testMount(client *api.Client) (string, error) { + rand.Seed(time.Now().UTC().UnixNano()) + randInt := rand.New(rand.NewSource(time.Now().UnixNano())).Int() + path := fmt.Sprintf("testmount-%d", randInt) + err := client.Sys().Mount(path, &api.MountInput{Type: "kv"}) + return path, err +} diff --git a/http/sys_policy_test.go b/http/sys_policy_test.go new file mode 100644 index 0000000..1ab1e85 --- /dev/null +++ b/http/sys_policy_test.go @@ -0,0 +1,159 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "encoding/json" + "reflect" + "testing" + + "github.com/hashicorp/vault/vault" +) + +func TestSysPolicies(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpGet(t, token, addr+"/v1/sys/policy") + + var actual map[string]interface{} + expected := map[string]interface{}{ + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "data": map[string]interface{}{ + "policies": []interface{}{"default", "root"}, + "keys": []interface{}{"default", "root"}, + }, + "policies": []interface{}{"default", "root"}, + "keys": []interface{}{"default", "root"}, + } + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + expected["request_id"] = actual["request_id"] + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: got\n%#v\nexpected\n%#v\n", actual, expected) + } +} + +func TestSysReadPolicy(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpGet(t, token, addr+"/v1/sys/policy/root") + + var actual map[string]interface{} + expected := map[string]interface{}{ + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "data": map[string]interface{}{ + "name": "root", + "rules": "", + }, + "name": "root", + "rules": "", + } + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + expected["request_id"] = actual["request_id"] + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: got\n%#v\nexpected\n%#v\n", actual, expected) + } +} + +func TestSysWritePolicy(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpPost(t, token, addr+"/v1/sys/policy/foo", map[string]interface{}{ + "rules": `path "*" { capabilities = ["read"] }`, + }) + testResponseStatus(t, resp, 200) + + resp = testHttpGet(t, token, addr+"/v1/sys/policy") + + var actual map[string]interface{} + expected := map[string]interface{}{ + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "data": map[string]interface{}{ + "policies": []interface{}{"default", "foo", "root"}, + "keys": []interface{}{"default", "foo", "root"}, + }, + "policies": []interface{}{"default", "foo", "root"}, + "keys": []interface{}{"default", "foo", "root"}, + } + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + expected["request_id"] = actual["request_id"] + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: got\n%#v\nexpected\n%#v\n", actual, expected) + } + + resp = testHttpPost(t, token, addr+"/v1/sys/policy/response-wrapping", map[string]interface{}{ + "rules": ``, + }) + testResponseStatus(t, resp, 400) +} + +func TestSysDeletePolicy(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpPost(t, token, addr+"/v1/sys/policy/foo", map[string]interface{}{ + "rules": `path "*" { capabilities = ["read"] }`, + }) + testResponseStatus(t, resp, 200) + + resp = testHttpDelete(t, token, addr+"/v1/sys/policy/foo") + testResponseStatus(t, resp, 204) + + // Also attempt to delete these since they should not be allowed (ignore + // responses, if they exist later that's sufficient) + resp = testHttpDelete(t, token, addr+"/v1/sys/policy/default") + resp = testHttpDelete(t, token, addr+"/v1/sys/policy/response-wrapping") + + resp = testHttpGet(t, token, addr+"/v1/sys/policy") + + var actual map[string]interface{} + expected := map[string]interface{}{ + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "data": map[string]interface{}{ + "policies": []interface{}{"default", "root"}, + "keys": []interface{}{"default", "root"}, + }, + "policies": []interface{}{"default", "root"}, + "keys": []interface{}{"default", "root"}, + } + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + expected["request_id"] = actual["request_id"] + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: got\n%#v\nexpected\n%#v\n", actual, expected) + } +} diff --git a/http/sys_raft.go b/http/sys_raft.go new file mode 100644 index 0000000..1e00ebe --- /dev/null +++ b/http/sys_raft.go @@ -0,0 +1,116 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "io" + "net/http" + + "github.com/hashicorp/go-secure-stdlib/tlsutil" + "github.com/hashicorp/vault/physical/raft" + "github.com/hashicorp/vault/vault" +) + +func handleSysRaftBootstrap(core *vault.Core) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case "POST", "PUT": + if core.Sealed() { + respondError(w, http.StatusBadRequest, errors.New("node must be unsealed to bootstrap")) + } + + if err := core.RaftBootstrap(context.Background(), false); err != nil { + respondError(w, http.StatusInternalServerError, err) + return + } + + default: + respondError(w, http.StatusBadRequest, nil) + } + }) +} + +func handleSysRaftJoin(core *vault.Core) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case "POST", "PUT": + handleSysRaftJoinPost(core, w, r) + default: + respondError(w, http.StatusMethodNotAllowed, nil) + } + }) +} + +func handleSysRaftJoinPost(core *vault.Core, w http.ResponseWriter, r *http.Request) { + // Parse the request + var req JoinRequest + if _, err := parseJSONRequest(core.PerfStandby(), r, w, &req); err != nil && err != io.EOF { + respondError(w, http.StatusBadRequest, err) + return + } + + if req.NonVoter && !nonVotersAllowed { + respondError(w, http.StatusBadRequest, errors.New("non-voting nodes not allowed")) + return + } + + var tlsConfig *tls.Config + var err error + if len(req.LeaderCACert) != 0 || len(req.LeaderClientCert) != 0 || len(req.LeaderClientKey) != 0 { + tlsConfig, err = tlsutil.ClientTLSConfig([]byte(req.LeaderCACert), []byte(req.LeaderClientCert), []byte(req.LeaderClientKey)) + if err != nil { + respondError(w, http.StatusBadRequest, err) + return + } + tlsConfig.ServerName = req.LeaderTLSServerName + } + + if req.AutoJoinScheme != "" && (req.AutoJoinScheme != "http" && req.AutoJoinScheme != "https") { + respondError(w, http.StatusBadRequest, fmt.Errorf("invalid scheme %q; must either be http or https", req.AutoJoinScheme)) + return + } + + leaderInfos := []*raft.LeaderJoinInfo{ + { + AutoJoin: req.AutoJoin, + AutoJoinScheme: req.AutoJoinScheme, + AutoJoinPort: req.AutoJoinPort, + LeaderAPIAddr: req.LeaderAPIAddr, + TLSConfig: tlsConfig, + Retry: req.Retry, + }, + } + + joined, err := core.JoinRaftCluster(context.Background(), leaderInfos, req.NonVoter) + if err != nil { + respondError(w, http.StatusInternalServerError, err) + return + } + + resp := JoinResponse{ + Joined: joined, + } + respondOk(w, resp) +} + +type JoinResponse struct { + Joined bool `json:"joined"` +} + +type JoinRequest struct { + AutoJoin string `json:"auto_join"` + AutoJoinScheme string `json:"auto_join_scheme"` + AutoJoinPort uint `json:"auto_join_port"` + LeaderAPIAddr string `json:"leader_api_addr"` + LeaderCACert string `json:"leader_ca_cert"` + LeaderClientCert string `json:"leader_client_cert"` + LeaderClientKey string `json:"leader_client_key"` + LeaderTLSServerName string `json:"leader_tls_servername"` + Retry bool `json:"retry"` + NonVoter bool `json:"non_voter"` +} diff --git a/http/sys_rekey.go b/http/sys_rekey.go new file mode 100644 index 0000000..c05dc83 --- /dev/null +++ b/http/sys_rekey.go @@ -0,0 +1,414 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "context" + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "net/http" + + "github.com/hashicorp/vault/helper/pgpkeys" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/vault" +) + +func handleSysRekeyInit(core *vault.Core, recovery bool) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + standby, _ := core.Standby() + if standby { + respondStandby(core, w, r.URL) + return + } + + repState := core.ReplicationState() + if repState.HasState(consts.ReplicationPerformanceSecondary) { + respondError(w, http.StatusBadRequest, + fmt.Errorf("rekeying can only be performed on the primary cluster when replication is activated")) + return + } + + ctx, cancel := core.GetContext() + defer cancel() + + switch { + case recovery && !core.SealAccess().RecoveryKeySupported(): + respondError(w, http.StatusBadRequest, fmt.Errorf("recovery rekeying not supported")) + case r.Method == "GET": + handleSysRekeyInitGet(ctx, core, recovery, w, r) + case r.Method == "POST" || r.Method == "PUT": + handleSysRekeyInitPut(ctx, core, recovery, w, r) + case r.Method == "DELETE": + handleSysRekeyInitDelete(ctx, core, recovery, w, r) + default: + respondError(w, http.StatusMethodNotAllowed, nil) + } + }) +} + +func handleSysRekeyInitGet(ctx context.Context, core *vault.Core, recovery bool, w http.ResponseWriter, r *http.Request) { + barrierConfig, barrierConfErr := core.SealAccess().BarrierConfig(ctx) + if barrierConfErr != nil { + respondError(w, http.StatusInternalServerError, barrierConfErr) + return + } + if barrierConfig == nil { + respondError(w, http.StatusBadRequest, fmt.Errorf("server is not yet initialized")) + return + } + + // Get the rekey configuration + rekeyConf, err := core.RekeyConfig(recovery) + if err != nil { + respondError(w, err.Code(), err) + return + } + + sealThreshold, err := core.RekeyThreshold(ctx, recovery) + if err != nil { + respondError(w, err.Code(), err) + return + } + + // Format the status + status := &RekeyStatusResponse{ + Started: false, + T: 0, + N: 0, + Required: sealThreshold, + } + if rekeyConf != nil { + // Get the progress + started, progress, err := core.RekeyProgress(recovery, false) + if err != nil { + respondError(w, err.Code(), err) + return + } + + status.Nonce = rekeyConf.Nonce + status.Started = started + status.T = rekeyConf.SecretThreshold + status.N = rekeyConf.SecretShares + status.Progress = progress + status.VerificationRequired = rekeyConf.VerificationRequired + status.VerificationNonce = rekeyConf.VerificationNonce + if rekeyConf.PGPKeys != nil && len(rekeyConf.PGPKeys) != 0 { + pgpFingerprints, err := pgpkeys.GetFingerprints(rekeyConf.PGPKeys, nil) + if err != nil { + respondError(w, http.StatusInternalServerError, err) + return + } + status.PGPFingerprints = pgpFingerprints + status.Backup = rekeyConf.Backup + } + } + respondOk(w, status) +} + +func handleSysRekeyInitPut(ctx context.Context, core *vault.Core, recovery bool, w http.ResponseWriter, r *http.Request) { + // Parse the request + var req RekeyRequest + if _, err := parseJSONRequest(core.PerfStandby(), r, w, &req); err != nil { + respondError(w, http.StatusBadRequest, err) + return + } + + if req.Backup && len(req.PGPKeys) == 0 { + respondError(w, http.StatusBadRequest, fmt.Errorf("cannot request a backup of the new keys without providing PGP keys for encryption")) + return + } + + if len(req.PGPKeys) > 0 && len(req.PGPKeys) != req.SecretShares { + respondError(w, http.StatusBadRequest, fmt.Errorf("incorrect number of PGP keys for rekey")) + return + } + + // Initialize the rekey + err := core.RekeyInit(&vault.SealConfig{ + SecretShares: req.SecretShares, + SecretThreshold: req.SecretThreshold, + StoredShares: req.StoredShares, + PGPKeys: req.PGPKeys, + Backup: req.Backup, + VerificationRequired: req.RequireVerification, + }, recovery) + if err != nil { + respondError(w, err.Code(), err) + return + } + + handleSysRekeyInitGet(ctx, core, recovery, w, r) +} + +func handleSysRekeyInitDelete(ctx context.Context, core *vault.Core, recovery bool, w http.ResponseWriter, r *http.Request) { + if err := core.RekeyCancel(recovery); err != nil { + respondError(w, err.Code(), err) + return + } + respondOk(w, nil) +} + +func handleSysRekeyUpdate(core *vault.Core, recovery bool) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + standby, _ := core.Standby() + if standby { + respondStandby(core, w, r.URL) + return + } + + // Parse the request + var req RekeyUpdateRequest + if _, err := parseJSONRequest(core.PerfStandby(), r, w, &req); err != nil { + respondError(w, http.StatusBadRequest, err) + return + } + if req.Key == "" { + respondError( + w, http.StatusBadRequest, + errors.New("'key' must be specified in request body as JSON")) + return + } + + // Decode the key, which is base64 or hex encoded + min, max := core.BarrierKeyLength() + key, err := hex.DecodeString(req.Key) + // We check min and max here to ensure that a string that is base64 + // encoded but also valid hex will not be valid and we instead base64 + // decode it + if err != nil || len(key) < min || len(key) > max { + key, err = base64.StdEncoding.DecodeString(req.Key) + if err != nil { + respondError( + w, http.StatusBadRequest, + errors.New("'key' must be a valid hex or base64 string")) + return + } + } + + ctx, cancel := core.GetContext() + defer cancel() + + // Use the key to make progress on rekey + result, rekeyErr := core.RekeyUpdate(ctx, key, req.Nonce, recovery) + if rekeyErr != nil { + respondError(w, rekeyErr.Code(), rekeyErr) + return + } + + // Format the response + resp := &RekeyUpdateResponse{} + if result != nil { + resp.Complete = true + resp.Nonce = req.Nonce + resp.Backup = result.Backup + resp.PGPFingerprints = result.PGPFingerprints + resp.VerificationRequired = result.VerificationRequired + resp.VerificationNonce = result.VerificationNonce + + // Encode the keys + keys := make([]string, 0, len(result.SecretShares)) + keysB64 := make([]string, 0, len(result.SecretShares)) + for _, k := range result.SecretShares { + keys = append(keys, hex.EncodeToString(k)) + keysB64 = append(keysB64, base64.StdEncoding.EncodeToString(k)) + } + resp.Keys = keys + resp.KeysB64 = keysB64 + respondOk(w, resp) + } else { + handleSysRekeyInitGet(ctx, core, recovery, w, r) + } + }) +} + +func handleSysRekeyVerify(core *vault.Core, recovery bool) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + standby, _ := core.Standby() + if standby { + respondStandby(core, w, r.URL) + return + } + + repState := core.ReplicationState() + if repState.HasState(consts.ReplicationPerformanceSecondary) { + respondError(w, http.StatusBadRequest, + fmt.Errorf("rekeying can only be performed on the primary cluster when replication is activated")) + return + } + + ctx, cancel := core.GetContext() + defer cancel() + + switch { + case recovery && !core.SealAccess().RecoveryKeySupported(): + respondError(w, http.StatusBadRequest, fmt.Errorf("recovery rekeying not supported")) + case r.Method == "GET": + handleSysRekeyVerifyGet(ctx, core, recovery, w, r) + case r.Method == "POST" || r.Method == "PUT": + handleSysRekeyVerifyPut(ctx, core, recovery, w, r) + case r.Method == "DELETE": + handleSysRekeyVerifyDelete(ctx, core, recovery, w, r) + default: + respondError(w, http.StatusMethodNotAllowed, nil) + } + }) +} + +func handleSysRekeyVerifyGet(ctx context.Context, core *vault.Core, recovery bool, w http.ResponseWriter, r *http.Request) { + barrierConfig, barrierConfErr := core.SealAccess().BarrierConfig(ctx) + if barrierConfErr != nil { + respondError(w, http.StatusInternalServerError, barrierConfErr) + return + } + if barrierConfig == nil { + respondError(w, http.StatusBadRequest, fmt.Errorf("server is not yet initialized")) + return + } + + // Get the rekey configuration + rekeyConf, err := core.RekeyConfig(recovery) + if err != nil { + respondError(w, err.Code(), err) + return + } + if rekeyConf == nil { + respondError(w, http.StatusBadRequest, errors.New("no rekey configuration found")) + return + } + + // Get the progress + started, progress, err := core.RekeyProgress(recovery, true) + if err != nil { + respondError(w, err.Code(), err) + return + } + + // Format the status + status := &RekeyVerificationStatusResponse{ + Started: started, + Nonce: rekeyConf.VerificationNonce, + T: rekeyConf.SecretThreshold, + N: rekeyConf.SecretShares, + Progress: progress, + } + respondOk(w, status) +} + +func handleSysRekeyVerifyDelete(ctx context.Context, core *vault.Core, recovery bool, w http.ResponseWriter, r *http.Request) { + if err := core.RekeyVerifyRestart(recovery); err != nil { + respondError(w, err.Code(), err) + return + } + + handleSysRekeyVerifyGet(ctx, core, recovery, w, r) +} + +func handleSysRekeyVerifyPut(ctx context.Context, core *vault.Core, recovery bool, w http.ResponseWriter, r *http.Request) { + // Parse the request + var req RekeyVerificationUpdateRequest + if _, err := parseJSONRequest(core.PerfStandby(), r, w, &req); err != nil { + respondError(w, http.StatusBadRequest, err) + return + } + if req.Key == "" { + respondError( + w, http.StatusBadRequest, + errors.New("'key' must be specified in request body as JSON")) + return + } + + // Decode the key, which is base64 or hex encoded + min, max := core.BarrierKeyLength() + key, err := hex.DecodeString(req.Key) + // We check min and max here to ensure that a string that is base64 + // encoded but also valid hex will not be valid and we instead base64 + // decode it + if err != nil || len(key) < min || len(key) > max { + key, err = base64.StdEncoding.DecodeString(req.Key) + if err != nil { + respondError( + w, http.StatusBadRequest, + errors.New("'key' must be a valid hex or base64 string")) + return + } + } + + ctx, cancel := core.GetContext() + defer cancel() + + // Use the key to make progress on rekey + result, rekeyErr := core.RekeyVerify(ctx, key, req.Nonce, recovery) + if rekeyErr != nil { + respondError(w, rekeyErr.Code(), rekeyErr) + return + } + + // Format the response + resp := &RekeyVerificationUpdateResponse{} + if result != nil { + resp.Complete = true + resp.Nonce = result.Nonce + respondOk(w, resp) + } else { + handleSysRekeyVerifyGet(ctx, core, recovery, w, r) + } +} + +type RekeyRequest struct { + SecretShares int `json:"secret_shares"` + SecretThreshold int `json:"secret_threshold"` + StoredShares int `json:"stored_shares"` + PGPKeys []string `json:"pgp_keys"` + Backup bool `json:"backup"` + RequireVerification bool `json:"require_verification"` +} + +type RekeyStatusResponse struct { + Nonce string `json:"nonce"` + Started bool `json:"started"` + T int `json:"t"` + N int `json:"n"` + Progress int `json:"progress"` + Required int `json:"required"` + PGPFingerprints []string `json:"pgp_fingerprints"` + Backup bool `json:"backup"` + VerificationRequired bool `json:"verification_required"` + VerificationNonce string `json:"verification_nonce,omitempty"` +} + +type RekeyUpdateRequest struct { + Nonce string + Key string +} + +type RekeyUpdateResponse struct { + Nonce string `json:"nonce"` + Complete bool `json:"complete"` + Keys []string `json:"keys"` + KeysB64 []string `json:"keys_base64"` + PGPFingerprints []string `json:"pgp_fingerprints"` + Backup bool `json:"backup"` + VerificationRequired bool `json:"verification_required"` + VerificationNonce string `json:"verification_nonce,omitempty"` +} + +type RekeyVerificationUpdateRequest struct { + Nonce string `json:"nonce"` + Key string `json:"key"` +} + +type RekeyVerificationStatusResponse struct { + Nonce string `json:"nonce"` + Started bool `json:"started"` + T int `json:"t"` + N int `json:"n"` + Progress int `json:"progress"` +} + +type RekeyVerificationUpdateResponse struct { + Nonce string `json:"nonce"` + Complete bool `json:"complete"` +} diff --git a/http/sys_rekey_test.go b/http/sys_rekey_test.go new file mode 100644 index 0000000..eaef4dd --- /dev/null +++ b/http/sys_rekey_test.go @@ -0,0 +1,296 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "encoding/hex" + "encoding/json" + "fmt" + "reflect" + "testing" + + "github.com/go-test/deep" + "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" + "github.com/hashicorp/vault/vault" +) + +// Test to check if the API errors out when wrong number of PGP keys are +// supplied for rekey +func TestSysRekey_Init_pgpKeysEntriesForRekey(t *testing.T) { + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: Handler, + RequestResponseCallback: schema.ResponseValidatingCallback(t), + }) + cluster.Start() + defer cluster.Cleanup() + cl := cluster.Cores[0].Client + + _, err := cl.Logical().Write("sys/rekey/init", map[string]interface{}{ + "secret_shares": 5, + "secret_threshold": 3, + "pgp_keys": []string{"pgpkey1"}, + }) + if err == nil { + t.Fatal("should have failed to write pgp key entry due to mismatched keys", err) + } +} + +func TestSysRekey_Init_Status(t *testing.T) { + t.Run("status-barrier-default", func(t *testing.T) { + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: Handler, + RequestResponseCallback: schema.ResponseValidatingCallback(t), + }) + cluster.Start() + defer cluster.Cleanup() + cl := cluster.Cores[0].Client + + resp, err := cl.Logical().Read("sys/rekey/init") + if err != nil { + t.Fatalf("err: %s", err) + } + + actual := resp.Data + expected := map[string]interface{}{ + "started": false, + "t": json.Number("0"), + "n": json.Number("0"), + "progress": json.Number("0"), + "required": json.Number("3"), + "pgp_fingerprints": interface{}(nil), + "backup": false, + "nonce": "", + "verification_required": false, + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual) + } + }) +} + +func TestSysRekey_Init_Setup(t *testing.T) { + t.Run("init-barrier-barrier-key", func(t *testing.T) { + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: Handler, + RequestResponseCallback: schema.ResponseValidatingCallback(t), + }) + cluster.Start() + defer cluster.Cleanup() + cl := cluster.Cores[0].Client + + // Start rekey + resp, err := cl.Logical().Write("sys/rekey/init", map[string]interface{}{ + "secret_shares": 5, + "secret_threshold": 3, + }) + if err != nil { + t.Fatalf("err: %s", err) + } + + actual := resp.Data + expected := map[string]interface{}{ + "started": true, + "t": json.Number("3"), + "n": json.Number("5"), + "progress": json.Number("0"), + "required": json.Number("3"), + "pgp_fingerprints": interface{}(nil), + "backup": false, + "verification_required": false, + } + + if actual["nonce"].(string) == "" { + t.Fatalf("nonce was empty") + } + expected["nonce"] = actual["nonce"] + if diff := deep.Equal(actual, expected); diff != nil { + t.Fatal(diff) + } + + // Get rekey status + resp, err = cl.Logical().Read("sys/rekey/init") + if err != nil { + t.Fatalf("err: %s", err) + } + + actual = resp.Data + expected = map[string]interface{}{ + "started": true, + "t": json.Number("3"), + "n": json.Number("5"), + "progress": json.Number("0"), + "required": json.Number("3"), + "pgp_fingerprints": interface{}(nil), + "backup": false, + "verification_required": false, + } + if actual["nonce"].(string) == "" { + t.Fatalf("nonce was empty") + } + if actual["nonce"].(string) == "" { + t.Fatalf("nonce was empty") + } + expected["nonce"] = actual["nonce"] + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual) + } + }) +} + +func TestSysRekey_Init_Cancel(t *testing.T) { + t.Run("cancel-barrier-barrier-key", func(t *testing.T) { + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: Handler, + RequestResponseCallback: schema.ResponseValidatingCallback(t), + }) + cluster.Start() + defer cluster.Cleanup() + cl := cluster.Cores[0].Client + + _, err := cl.Logical().Write("sys/rekey/init", map[string]interface{}{ + "secret_shares": 5, + "secret_threshold": 3, + }) + if err != nil { + t.Fatalf("err: %s", err) + } + + _, err = cl.Logical().Delete("sys/rekey/init") + if err != nil { + t.Fatalf("err: %s", err) + } + + resp, err := cl.Logical().Read("sys/rekey/init") + if err != nil { + t.Fatalf("err: %s", err) + } + + actual := resp.Data + expected := map[string]interface{}{ + "started": false, + "t": json.Number("0"), + "n": json.Number("0"), + "progress": json.Number("0"), + "required": json.Number("3"), + "pgp_fingerprints": interface{}(nil), + "backup": false, + "nonce": "", + "verification_required": false, + } + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual) + } + }) +} + +func TestSysRekey_badKey(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpPut(t, token, addr+"/v1/sys/rekey/update", map[string]interface{}{ + "key": "0123", + }) + testResponseStatus(t, resp, 400) +} + +func TestSysRekey_Update(t *testing.T) { + t.Run("rekey-barrier-barrier-key", func(t *testing.T) { + core, keys, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpPut(t, token, addr+"/v1/sys/rekey/init", map[string]interface{}{ + "secret_shares": 5, + "secret_threshold": 3, + }) + var rekeyStatus map[string]interface{} + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &rekeyStatus) + + var actual map[string]interface{} + var expected map[string]interface{} + + for i, key := range keys { + resp = testHttpPut(t, token, addr+"/v1/sys/rekey/update", map[string]interface{}{ + "nonce": rekeyStatus["nonce"].(string), + "key": hex.EncodeToString(key), + }) + + actual = map[string]interface{}{} + expected = map[string]interface{}{ + "started": true, + "nonce": rekeyStatus["nonce"].(string), + "backup": false, + "pgp_fingerprints": interface{}(nil), + "required": json.Number("3"), + "t": json.Number("3"), + "n": json.Number("5"), + "progress": json.Number(fmt.Sprintf("%d", i+1)), + "verification_required": false, + } + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + + if i+1 == len(keys) { + delete(expected, "started") + delete(expected, "required") + delete(expected, "t") + delete(expected, "n") + delete(expected, "progress") + expected["complete"] = true + expected["keys"] = actual["keys"] + expected["keys_base64"] = actual["keys_base64"] + } + + if i+1 < len(keys) && (actual["nonce"] == nil || actual["nonce"].(string) == "") { + t.Fatalf("expected a nonce, i is %d, actual is %#v", i, actual) + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("\nexpected: \n%#v\nactual: \n%#v", expected, actual) + } + } + + retKeys := actual["keys"].([]interface{}) + if len(retKeys) != 5 { + t.Fatalf("bad: %#v", retKeys) + } + keysB64 := actual["keys_base64"].([]interface{}) + if len(keysB64) != 5 { + t.Fatalf("bad: %#v", keysB64) + } + }) +} + +func TestSysRekey_ReInitUpdate(t *testing.T) { + core, keys, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpPut(t, token, addr+"/v1/sys/rekey/init", map[string]interface{}{ + "secret_shares": 5, + "secret_threshold": 3, + }) + testResponseStatus(t, resp, 200) + + resp = testHttpDelete(t, token, addr+"/v1/sys/rekey/init") + testResponseStatus(t, resp, 204) + + resp = testHttpPut(t, token, addr+"/v1/sys/rekey/init", map[string]interface{}{ + "secret_shares": 5, + "secret_threshold": 3, + }) + testResponseStatus(t, resp, 200) + + resp = testHttpPut(t, token, addr+"/v1/sys/rekey/update", map[string]interface{}{ + "key": hex.EncodeToString(keys[0]), + }) + + testResponseStatus(t, resp, 400) +} diff --git a/http/sys_rotate_test.go b/http/sys_rotate_test.go new file mode 100644 index 0000000..dfc28a2 --- /dev/null +++ b/http/sys_rotate_test.go @@ -0,0 +1,55 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "encoding/json" + "testing" + + "github.com/go-test/deep" + "github.com/hashicorp/vault/vault" +) + +func TestSysRotate(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpPost(t, token, addr+"/v1/sys/rotate", map[string]interface{}{}) + testResponseStatus(t, resp, 204) + + resp = testHttpGet(t, token, addr+"/v1/sys/key-status") + + var actual map[string]interface{} + expected := map[string]interface{}{ + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "data": map[string]interface{}{ + "term": json.Number("2"), + }, + "term": json.Number("2"), + } + + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + + for _, field := range []string{"install_time", "encryptions"} { + actualVal, ok := actual["data"].(map[string]interface{})[field] + if !ok || actualVal == "" { + t.Fatal(field, " missing in data") + } + expected["data"].(map[string]interface{})[field] = actualVal + expected[field] = actualVal + } + + expected["request_id"] = actual["request_id"] + if diff := deep.Equal(actual, expected); diff != nil { + t.Fatal(diff) + } +} diff --git a/http/sys_seal.go b/http/sys_seal.go new file mode 100644 index 0000000..c4f4c4f --- /dev/null +++ b/http/sys_seal.go @@ -0,0 +1,184 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "context" + "encoding/base64" + "encoding/hex" + "errors" + "net/http" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +func handleSysSeal(core *vault.Core) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + req, _, statusCode, err := buildLogicalRequest(core, w, r) + if err != nil || statusCode != 0 { + respondError(w, statusCode, err) + return + } + + switch req.Operation { + case logical.UpdateOperation: + default: + respondError(w, http.StatusMethodNotAllowed, nil) + return + } + + // Seal with the token above + // We use context.Background since there won't be a request context if the node isn't active + if err := core.SealWithRequest(r.Context(), req); err != nil { + if errwrap.Contains(err, logical.ErrPermissionDenied.Error()) { + respondError(w, http.StatusForbidden, err) + return + } + respondError(w, http.StatusInternalServerError, err) + return + } + + respondOk(w, nil) + }) +} + +func handleSysStepDown(core *vault.Core) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + req, _, statusCode, err := buildLogicalRequest(core, w, r) + if err != nil || statusCode != 0 { + respondError(w, statusCode, err) + return + } + + switch req.Operation { + case logical.UpdateOperation: + default: + respondError(w, http.StatusMethodNotAllowed, nil) + return + } + + // Seal with the token above + if err := core.StepDown(r.Context(), req); err != nil { + if errwrap.Contains(err, logical.ErrPermissionDenied.Error()) { + respondError(w, http.StatusForbidden, err) + return + } + respondError(w, http.StatusInternalServerError, err) + return + } + + respondOk(w, nil) + }) +} + +func handleSysUnseal(core *vault.Core) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case "PUT": + case "POST": + default: + respondError(w, http.StatusMethodNotAllowed, nil) + return + } + + // Parse the request + var req UnsealRequest + if _, err := parseJSONRequest(core.PerfStandby(), r, w, &req); err != nil { + respondError(w, http.StatusBadRequest, err) + return + } + + if req.Reset { + if !core.Sealed() { + respondError(w, http.StatusBadRequest, errors.New("vault is unsealed")) + return + } + core.ResetUnsealProcess() + handleSysSealStatusRaw(core, w, r) + return + } + + if req.Key == "" { + respondError( + w, http.StatusBadRequest, + errors.New("'key' must be specified in request body as JSON, or 'reset' set to true")) + return + } + + // Decode the key, which is base64 or hex encoded + min, max := core.BarrierKeyLength() + key, err := hex.DecodeString(req.Key) + // We check min and max here to ensure that a string that is base64 + // encoded but also valid hex will not be valid and we instead base64 + // decode it + if err != nil || len(key) < min || len(key) > max { + key, err = base64.StdEncoding.DecodeString(req.Key) + if err != nil { + respondError( + w, http.StatusBadRequest, + errors.New("'key' must be a valid hex or base64 string")) + return + } + } + + // Attempt the unseal. If migrate was specified, the key should correspond + // to the old seal. + if req.Migrate { + _, err = core.UnsealMigrate(key) + } else { + _, err = core.Unseal(key) + } + if err != nil { + switch { + case errwrap.ContainsType(err, new(vault.ErrInvalidKey)): + case errwrap.Contains(err, vault.ErrBarrierInvalidKey.Error()): + case errwrap.Contains(err, vault.ErrBarrierNotInit.Error()): + case errwrap.Contains(err, vault.ErrBarrierSealed.Error()): + case errwrap.Contains(err, consts.ErrStandby.Error()): + default: + respondError(w, http.StatusInternalServerError, err) + return + } + respondError(w, http.StatusBadRequest, err) + return + } + + // Return the seal status + handleSysSealStatusRaw(core, w, r) + }) +} + +func handleSysSealStatus(core *vault.Core) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + respondError(w, http.StatusMethodNotAllowed, nil) + return + } + + handleSysSealStatusRaw(core, w, r) + }) +} + +func handleSysSealStatusRaw(core *vault.Core, w http.ResponseWriter, r *http.Request) { + ctx := context.Background() + status, err := core.GetSealStatus(ctx, true) + if err != nil { + respondError(w, http.StatusInternalServerError, err) + return + } + + respondOk(w, status) +} + +// Note: because we didn't provide explicit tagging in the past we can't do it +// now because if it then no longer accepts capitalized versions it could break +// clients +type UnsealRequest struct { + Key string + Reset bool + Migrate bool +} diff --git a/http/sys_seal_test.go b/http/sys_seal_test.go new file mode 100644 index 0000000..ef5922d --- /dev/null +++ b/http/sys_seal_test.go @@ -0,0 +1,558 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "context" + "encoding/base64" + "encoding/hex" + "encoding/json" + "fmt" + "net/http" + "strconv" + "strings" + "testing" + + "github.com/go-test/deep" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" + "github.com/hashicorp/vault/vault/seal" + "github.com/hashicorp/vault/version" +) + +func TestSysSealStatus(t *testing.T) { + core := vault.TestCore(t) + vault.TestCoreInit(t, core) + ln, addr := TestServer(t, core) + defer ln.Close() + + resp, err := http.Get(addr + "/v1/sys/seal-status") + if err != nil { + t.Fatalf("err: %s", err) + } + + var actual map[string]interface{} + expected := map[string]interface{}{ + "sealed": true, + "t": json.Number("3"), + "n": json.Number("3"), + "progress": json.Number("0"), + "nonce": "", + "type": "shamir", + "recovery_seal": false, + "initialized": true, + "migration": false, + "build_date": version.BuildDate, + } + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + if actual["version"] == nil { + t.Fatalf("expected version information") + } + expected["version"] = actual["version"] + if actual["cluster_name"] == nil { + delete(expected, "cluster_name") + } else { + expected["cluster_name"] = actual["cluster_name"] + } + if actual["cluster_id"] == nil { + delete(expected, "cluster_id") + } else { + expected["cluster_id"] = actual["cluster_id"] + } + if diff := deep.Equal(actual, expected); diff != nil { + t.Fatal(diff) + } +} + +func TestSysSealStatus_uninit(t *testing.T) { + core := vault.TestCore(t) + ln, addr := TestServer(t, core) + defer ln.Close() + + resp, err := http.Get(addr + "/v1/sys/seal-status") + if err != nil { + t.Fatalf("err: %s", err) + } + testResponseStatus(t, resp, 200) +} + +func TestSysSeal(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpPut(t, token, addr+"/v1/sys/seal", nil) + testResponseStatus(t, resp, 204) + + if !core.Sealed() { + t.Fatal("should be sealed") + } +} + +func TestSysSeal_unsealed(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpPut(t, token, addr+"/v1/sys/seal", nil) + testResponseStatus(t, resp, 204) + + if !core.Sealed() { + t.Fatal("should be sealed") + } +} + +func TestSysUnseal(t *testing.T) { + core := vault.TestCore(t) + keys, _ := vault.TestCoreInit(t, core) + ln, addr := TestServer(t, core) + defer ln.Close() + + for i, key := range keys { + resp := testHttpPut(t, "", addr+"/v1/sys/unseal", map[string]interface{}{ + "key": hex.EncodeToString(key), + }) + + var actual map[string]interface{} + expected := map[string]interface{}{ + "sealed": true, + "t": json.Number("3"), + "n": json.Number("3"), + "progress": json.Number(fmt.Sprintf("%d", i+1)), + "nonce": "", + "type": "shamir", + "recovery_seal": false, + "initialized": true, + "migration": false, + "build_date": version.BuildDate, + } + if i == len(keys)-1 { + expected["sealed"] = false + expected["progress"] = json.Number("0") + } + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + if i < len(keys)-1 && (actual["nonce"] == nil || actual["nonce"].(string) == "") { + t.Fatalf("got nil nonce, actual is %#v", actual) + } else { + expected["nonce"] = actual["nonce"] + } + if actual["version"] == nil { + t.Fatalf("expected version information") + } + expected["version"] = actual["version"] + if actual["cluster_name"] == nil { + delete(expected, "cluster_name") + } else { + expected["cluster_name"] = actual["cluster_name"] + } + if actual["cluster_id"] == nil { + delete(expected, "cluster_id") + } else { + expected["cluster_id"] = actual["cluster_id"] + } + if diff := deep.Equal(actual, expected); diff != nil { + t.Fatal(diff) + } + } +} + +func subtestBadSingleKey(t *testing.T, seal vault.Seal) { + core := vault.TestCoreWithSeal(t, seal, false) + _, err := core.Initialize(context.Background(), &vault.InitParams{ + BarrierConfig: &vault.SealConfig{ + SecretShares: 1, + SecretThreshold: 1, + }, + RecoveryConfig: &vault.SealConfig{ + SecretShares: 1, + SecretThreshold: 1, + }, + }) + if err != nil { + t.Fatalf("err: %s", err) + } + + ln, addr := TestServer(t, core) + defer ln.Close() + + testCases := []struct { + description string + key string + }{ + // hex key tests + // hexadecimal strings have 2 symbols per byte; size(0xAA) == 1 byte + { + "short hex key", + strings.Repeat("AA", 8), + }, + { + "long hex key", + strings.Repeat("AA", 34), + }, + { + "uneven hex key byte length", + strings.Repeat("AA", 33), + }, + { + "valid hex key but wrong cluster", + "4482691dd3a710723c4f77c4920ee21b96c226bf4829fa6eb8e8262c180ae933", + }, + + // base64 key tests + // base64 strings have min. 1 character per byte; size("m") == 1 byte + { + "short b64 key", + base64.StdEncoding.EncodeToString([]byte(strings.Repeat("m", 8))), + }, + { + "long b64 key", + base64.StdEncoding.EncodeToString([]byte(strings.Repeat("m", 34))), + }, + { + "uneven b64 key byte length", + base64.StdEncoding.EncodeToString([]byte(strings.Repeat("m", 33))), + }, + { + "valid b64 key but wrong cluster", + "RIJpHdOnEHI8T3fEkg7iG5bCJr9IKfpuuOgmLBgK6TM=", + }, + + // other key tests + { + "empty key", + "", + }, + { + "key with bad format", + "ThisKeyIsNeitherB64NorHex", + }, + } + + for _, tc := range testCases { + t.Run(tc.description, func(t *testing.T) { + resp := testHttpPut(t, "", addr+"/v1/sys/unseal", map[string]interface{}{ + "key": tc.key, + }) + + testResponseStatus(t, resp, 400) + }) + } +} + +func subtestBadMultiKey(t *testing.T, seal vault.Seal) { + numKeys := 3 + + core := vault.TestCoreWithSeal(t, seal, false) + _, err := core.Initialize(context.Background(), &vault.InitParams{ + BarrierConfig: &vault.SealConfig{ + SecretShares: numKeys, + SecretThreshold: numKeys, + }, + RecoveryConfig: &vault.SealConfig{ + SecretShares: numKeys, + SecretThreshold: numKeys, + }, + }) + if err != nil { + t.Fatalf("err: %s", err) + } + ln, addr := TestServer(t, core) + defer ln.Close() + + testCases := []struct { + description string + keys []string + }{ + { + "all unseal keys from another cluster", + []string{ + "b189d98fdec3a15bed9b1cce5088f82b92896696b788c07bdf03c73da08279a5e8", + "0fa98232f034177d8d9c2824899a2ac1e55dc6799348533e10510b856aef99f61a", + "5344f5caa852f9ba1967d9623ed286a45ea7c4a529522d25f05d29ff44f17930ac", + }, + }, + { + "mixing unseal keys from different cluster, different share config", + []string{ + "b189d98fdec3a15bed9b1cce5088f82b92896696b788c07bdf03c73da08279a5e8", + "0fa98232f034177d8d9c2824899a2ac1e55dc6799348533e10510b856aef99f61a", + "e04ea3020838c2050c4a169d7ba4d30e034eec8e83e8bed9461bf2646ee412c0", + }, + }, + { + "mixing unseal keys from different clusters, similar share config", + []string{ + "b189d98fdec3a15bed9b1cce5088f82b92896696b788c07bdf03c73da08279a5e8", + "0fa98232f034177d8d9c2824899a2ac1e55dc6799348533e10510b856aef99f61a", + "413f80521b393aa6c4e42e9a3a3ab7f00c2002b2c3bf1e273fc6f363f35f2a378b", + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.description, func(t *testing.T) { + for i, key := range tc.keys { + resp := testHttpPut(t, "", addr+"/v1/sys/unseal", map[string]interface{}{ + "key": key, + }) + + if i == numKeys-1 { + // last key + testResponseStatus(t, resp, 400) + } else { + // unseal in progress + testResponseStatus(t, resp, 200) + } + + } + }) + } +} + +func TestSysUnseal_BadKeyNewShamir(t *testing.T) { + seal := vault.NewTestSeal(t, + &seal.TestSealOpts{StoredKeys: seal.StoredKeysSupportedShamirRoot}) + + subtestBadSingleKey(t, seal) + subtestBadMultiKey(t, seal) +} + +func TestSysUnseal_BadKeyAutoUnseal(t *testing.T) { + seal := vault.NewTestSeal(t, + &seal.TestSealOpts{StoredKeys: seal.StoredKeysSupportedGeneric}) + + subtestBadSingleKey(t, seal) + subtestBadMultiKey(t, seal) +} + +func TestSysUnseal_Reset(t *testing.T) { + core := vault.TestCore(t) + ln, addr := TestServer(t, core) + defer ln.Close() + + thresh := 3 + resp := testHttpPut(t, "", addr+"/v1/sys/init", map[string]interface{}{ + "secret_shares": 5, + "secret_threshold": thresh, + }) + + var actual map[string]interface{} + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + keysRaw, ok := actual["keys"] + if !ok { + t.Fatalf("no keys: %#v", actual) + } + for i, key := range keysRaw.([]interface{}) { + if i > thresh-2 { + break + } + + resp := testHttpPut(t, "", addr+"/v1/sys/unseal", map[string]interface{}{ + "key": key.(string), + }) + + var actual map[string]interface{} + expected := map[string]interface{}{ + "sealed": true, + "t": json.Number("3"), + "n": json.Number("5"), + "progress": json.Number(strconv.Itoa(i + 1)), + "type": "shamir", + "recovery_seal": false, + "initialized": true, + "migration": false, + "build_date": version.BuildDate, + } + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + if actual["version"] == nil { + t.Fatalf("expected version information") + } + expected["version"] = actual["version"] + if actual["nonce"] == "" && expected["sealed"].(bool) { + t.Fatalf("expected a nonce") + } + expected["nonce"] = actual["nonce"] + if actual["cluster_name"] == nil { + delete(expected, "cluster_name") + } else { + expected["cluster_name"] = actual["cluster_name"] + } + if actual["cluster_id"] == nil { + delete(expected, "cluster_id") + } else { + expected["cluster_id"] = actual["cluster_id"] + } + if diff := deep.Equal(actual, expected); diff != nil { + t.Fatal(diff) + } + } + + resp = testHttpPut(t, "", addr+"/v1/sys/unseal", map[string]interface{}{ + "reset": true, + }) + + actual = map[string]interface{}{} + expected := map[string]interface{}{ + "sealed": true, + "t": json.Number("3"), + "n": json.Number("5"), + "progress": json.Number("0"), + "type": "shamir", + "recovery_seal": false, + "initialized": true, + "build_date": version.BuildDate, + "migration": false, + } + testResponseStatus(t, resp, 200) + testResponseBody(t, resp, &actual) + if actual["version"] == nil { + t.Fatalf("expected version information") + } + expected["version"] = actual["version"] + expected["nonce"] = actual["nonce"] + if actual["cluster_name"] == nil { + delete(expected, "cluster_name") + } else { + expected["cluster_name"] = actual["cluster_name"] + } + if actual["cluster_id"] == nil { + delete(expected, "cluster_id") + } else { + expected["cluster_id"] = actual["cluster_id"] + } + if diff := deep.Equal(actual, expected); diff != nil { + t.Fatal(diff) + } +} + +// Test Seal's permissions logic, which is slightly different than normal code +// paths in that it queries the ACL rather than having checkToken do it. This +// is because it was abusing RootPaths in logical_system, but that caused some +// haywire with code paths that expected there to be an actual corresponding +// logical.Path for it. This way is less hacky, but this test ensures that we +// have not opened up a permissions hole. +func TestSysSeal_Permissions(t *testing.T) { + core, _, root := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, root) + + // Set the 'test' policy object to permit write access to sys/seal + req := &logical.Request{ + Operation: logical.UpdateOperation, + Path: "sys/policy/test", + Data: map[string]interface{}{ + "rules": `path "sys/seal" { capabilities = ["read"] }`, + }, + ClientToken: root, + } + resp, err := core.HandleRequest(namespace.RootContext(nil), req) + if err != nil { + t.Fatalf("err: %v", err) + } + if resp == nil || resp.IsError() { + t.Fatalf("bad: %#v", resp) + } + + // Create a non-root token with access to that policy + req.Path = "auth/token/create" + req.Data = map[string]interface{}{ + "id": "child", + "policies": []string{"test"}, + } + + resp, err = core.HandleRequest(namespace.RootContext(nil), req) + if err != nil { + t.Fatalf("err: %v %v", err, resp) + } + if resp.Auth.ClientToken != "child" { + t.Fatalf("bad: %#v", resp) + } + + // We must go through the HTTP interface since seal doesn't go through HandleRequest + + // We expect this to fail since it needs update and sudo + httpResp := testHttpPut(t, "child", addr+"/v1/sys/seal", nil) + testResponseStatus(t, httpResp, 403) + + // Now modify to add update capability + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "sys/policy/test", + Data: map[string]interface{}{ + "rules": `path "sys/seal" { capabilities = ["update"] }`, + }, + ClientToken: root, + } + resp, err = core.HandleRequest(namespace.RootContext(nil), req) + if err != nil { + t.Fatalf("err: %v", err) + } + if resp == nil || resp.IsError() { + t.Fatalf("bad: %#v", resp) + } + + // We expect this to fail since it needs sudo + httpResp = testHttpPut(t, "child", addr+"/v1/sys/seal", nil) + testResponseStatus(t, httpResp, 403) + + // Now modify to just sudo capability + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "sys/policy/test", + Data: map[string]interface{}{ + "rules": `path "sys/seal" { capabilities = ["sudo"] }`, + }, + ClientToken: root, + } + resp, err = core.HandleRequest(namespace.RootContext(nil), req) + if err != nil { + t.Fatalf("err: %v", err) + } + if resp == nil || resp.IsError() { + t.Fatalf("bad: %#v", resp) + } + + // We expect this to fail since it needs update + httpResp = testHttpPut(t, "child", addr+"/v1/sys/seal", nil) + testResponseStatus(t, httpResp, 403) + + // Now modify to add all needed capabilities + req = &logical.Request{ + Operation: logical.UpdateOperation, + Path: "sys/policy/test", + Data: map[string]interface{}{ + "rules": `path "sys/seal" { capabilities = ["update", "sudo"] }`, + }, + ClientToken: root, + } + resp, err = core.HandleRequest(namespace.RootContext(nil), req) + if err != nil { + t.Fatalf("err: %v", err) + } + if resp == nil || resp.IsError() { + t.Fatalf("bad: %#v", resp) + } + + // We expect this to work + httpResp = testHttpPut(t, "child", addr+"/v1/sys/seal", nil) + testResponseStatus(t, httpResp, 204) +} + +func TestSysStepDown(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpPut(t, token, addr+"/v1/sys/step-down", nil) + testResponseStatus(t, resp, 204) +} diff --git a/http/sys_wrapping_test.go b/http/sys_wrapping_test.go new file mode 100644 index 0000000..c991bd2 --- /dev/null +++ b/http/sys_wrapping_test.go @@ -0,0 +1,389 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "encoding/json" + "errors" + "reflect" + "testing" + "time" + + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/vault" +) + +// Test wrapping functionality +func TestHTTP_Wrapping(t *testing.T) { + cluster := vault.NewTestCluster(t, &vault.CoreConfig{}, &vault.TestClusterOptions{ + HandlerFunc: Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + cores := cluster.Cores + + // make it easy to get access to the active + core := cores[0].Core + vault.TestWaitActive(t, core) + + client := cores[0].Client + client.SetToken(cluster.RootToken) + + // Write a value that we will use with wrapping for lookup + _, err := client.Logical().Write("secret/foo", map[string]interface{}{ + "zip": "zap", + }) + if err != nil { + t.Fatal(err) + } + + // Set a wrapping lookup function for reads on that path + client.SetWrappingLookupFunc(func(operation, path string) string { + if operation == "GET" && path == "secret/foo" { + return "5m" + } + + return api.DefaultWrappingLookupFunc(operation, path) + }) + + // First test: basic things that should fail, lookup edition + // Root token isn't a wrapping token + _, err = client.Logical().Write("sys/wrapping/lookup", nil) + if err == nil { + t.Fatal("expected error") + } + // Not supplied + _, err = client.Logical().Write("sys/wrapping/lookup", map[string]interface{}{ + "foo": "bar", + }) + if err == nil { + t.Fatal("expected error") + } + // Nonexistent token isn't a wrapping token + _, err = client.Logical().Write("sys/wrapping/lookup", map[string]interface{}{ + "token": "bar", + }) + if err == nil { + t.Fatal("expected error") + } + + // Second: basic things that should fail, unwrap edition + // Root token isn't a wrapping token + _, err = client.Logical().Unwrap(cluster.RootToken) + if err == nil { + t.Fatal("expected error") + } + // Root token isn't a wrapping token + _, err = client.Logical().Write("sys/wrapping/unwrap", nil) + if err == nil { + t.Fatal("expected error") + } + // Not supplied + _, err = client.Logical().Write("sys/wrapping/unwrap", map[string]interface{}{ + "foo": "bar", + }) + if err == nil { + t.Fatal("expected error") + } + // Nonexistent token isn't a wrapping token + _, err = client.Logical().Write("sys/wrapping/unwrap", map[string]interface{}{ + "token": "bar", + }) + if err == nil { + t.Fatal("expected error") + } + + // + // Test lookup + // + + // Create a wrapping token + secret, err := client.Logical().Read("secret/foo") + if err != nil { + t.Fatal(err) + } + if secret == nil || secret.WrapInfo == nil { + t.Fatal("secret or wrap info is nil") + } + wrapInfo := secret.WrapInfo + + // Test this twice to ensure no ill effect to the wrapping token as a result of the lookup + for i := 0; i < 2; i++ { + secret, err = client.Logical().Write("sys/wrapping/lookup", map[string]interface{}{ + "token": wrapInfo.Token, + }) + if err != nil { + t.Fatal(err) + } + if secret == nil || secret.Data == nil { + t.Fatal("secret or secret data is nil") + } + creationTTL, _ := secret.Data["creation_ttl"].(json.Number).Int64() + if int(creationTTL) != wrapInfo.TTL { + t.Fatalf("mismatched ttls: %d vs %d", creationTTL, wrapInfo.TTL) + } + if secret.Data["creation_time"].(string) != wrapInfo.CreationTime.Format(time.RFC3339Nano) { + t.Fatalf("mismatched creation times: %q vs %q", secret.Data["creation_time"].(string), wrapInfo.CreationTime.Format(time.RFC3339Nano)) + } + } + + // + // Test unwrap + // + + // Create a wrapping token + secret, err = client.Logical().Read("secret/foo") + if err != nil { + t.Fatal(err) + } + if secret == nil || secret.WrapInfo == nil { + t.Fatal("secret or wrap info is nil") + } + wrapInfo = secret.WrapInfo + + // Test unwrap via the client token + client.SetToken(wrapInfo.Token) + secret, err = client.Logical().Write("sys/wrapping/unwrap", nil) + if err != nil { + t.Fatal(err) + } + if secret.Warnings != nil { + t.Fatalf("Warnings found: %v", secret.Warnings) + } + if secret == nil || secret.Data == nil { + t.Fatal("secret or secret data is nil") + } + ret1 := secret + // Should be expired and fail + _, err = client.Logical().Write("sys/wrapping/unwrap", nil) + if err == nil { + t.Fatal("expected err") + } + + // Create a wrapping token + client.SetToken(cluster.RootToken) + secret, err = client.Logical().Read("secret/foo") + if err != nil { + t.Fatal(err) + } + if secret == nil || secret.WrapInfo == nil { + t.Fatal("secret or wrap info is nil") + } + wrapInfo = secret.WrapInfo + + // Test as a separate token + secret, err = client.Logical().Write("sys/wrapping/unwrap", map[string]interface{}{ + "token": wrapInfo.Token, + }) + if err != nil { + t.Fatal(err) + } + ret2 := secret + // Should be expired and fail + _, err = client.Logical().Write("sys/wrapping/unwrap", map[string]interface{}{ + "token": wrapInfo.Token, + }) + if err == nil { + t.Fatal("expected err") + } + + // Create a wrapping token + secret, err = client.Logical().Read("secret/foo") + if err != nil { + t.Fatal(err) + } + if secret == nil || secret.WrapInfo == nil { + t.Fatal("secret or wrap info is nil") + } + wrapInfo = secret.WrapInfo + + // Read response directly + client.SetToken(wrapInfo.Token) + secret, err = client.Logical().Read("cubbyhole/response") + if err != nil { + t.Fatal(err) + } + ret3 := secret + // Should be expired and fail + _, err = client.Logical().Write("cubbyhole/response", nil) + if err == nil { + t.Fatal("expected err") + } + + // Create a wrapping token + client.SetToken(cluster.RootToken) + secret, err = client.Logical().Read("secret/foo") + if err != nil { + t.Fatal(err) + } + if secret == nil || secret.WrapInfo == nil { + t.Fatal("secret or wrap info is nil") + } + wrapInfo = secret.WrapInfo + + // Read via Unwrap method + secret, err = client.Logical().Unwrap(wrapInfo.Token) + if err != nil { + t.Fatal(err) + } + if secret.Warnings != nil { + t.Fatalf("Warnings found: %v", secret.Warnings) + } + ret4 := secret + // Should be expired and fail + _, err = client.Logical().Unwrap(wrapInfo.Token) + if err == nil { + t.Fatal("expected err") + } + + if !reflect.DeepEqual(ret1.Data, map[string]interface{}{ + "zip": "zap", + }) { + t.Fatalf("ret1 data did not match expected: %#v", ret1.Data) + } + if !reflect.DeepEqual(ret2.Data, map[string]interface{}{ + "zip": "zap", + }) { + t.Fatalf("ret2 data did not match expected: %#v", ret2.Data) + } + var ret3Secret api.Secret + err = jsonutil.DecodeJSON([]byte(ret3.Data["response"].(string)), &ret3Secret) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(ret3Secret.Data, map[string]interface{}{ + "zip": "zap", + }) { + t.Fatalf("ret3 data did not match expected: %#v", ret3Secret.Data) + } + if !reflect.DeepEqual(ret4.Data, map[string]interface{}{ + "zip": "zap", + }) { + t.Fatalf("ret4 data did not match expected: %#v", ret4.Data) + } + + // + // Custom wrapping + // + + client.SetToken(cluster.RootToken) + data := map[string]interface{}{ + "zip": "zap", + "three": json.Number("2"), + } + + // Don't set a request TTL on that path, should fail + client.SetWrappingLookupFunc(func(operation, path string) string { + return "" + }) + secret, err = client.Logical().Write("sys/wrapping/wrap", data) + if err == nil { + t.Fatal("expected error") + } + + // Re-set the lookup function + client.SetWrappingLookupFunc(func(operation, path string) string { + if operation == "GET" && path == "secret/foo" { + return "5m" + } + + return api.DefaultWrappingLookupFunc(operation, path) + }) + secret, err = client.Logical().Write("sys/wrapping/wrap", data) + if err != nil { + t.Fatal(err) + } + if secret.Warnings != nil { + t.Fatalf("Warnings found: %v", secret.Warnings) + } + secret, err = client.Logical().Unwrap(secret.WrapInfo.Token) + if err != nil { + t.Fatal(err) + } + if secret.Warnings != nil { + t.Fatalf("Warnings found: %v", secret.Warnings) + } + if !reflect.DeepEqual(data, secret.Data) { + t.Fatalf("custom wrap did not match expected: %#v", secret.Data) + } + + // + // Test rewrap + // + + // Create a wrapping token + secret, err = client.Logical().Read("secret/foo") + if err != nil { + t.Fatal(err) + } + if secret == nil || secret.WrapInfo == nil { + t.Fatal("secret or wrap info is nil") + } + wrapInfo = secret.WrapInfo + + // Check for correct CreationPath before rewrap + if wrapInfo.CreationPath != "secret/foo" { + t.Fatalf("error on wrapInfo.CreationPath: expected: secret/foo, got: %s", wrapInfo.CreationPath) + } + + // Test rewrapping + secret, err = client.Logical().Write("sys/wrapping/rewrap", map[string]interface{}{ + "token": wrapInfo.Token, + }) + if err != nil { + t.Fatal(err) + } + if secret.Warnings != nil { + t.Fatalf("Warnings found: %v", secret.Warnings) + } + + // Check for correct Creation path after rewrap + if wrapInfo.CreationPath != "secret/foo" { + t.Fatalf("error on wrapInfo.CreationPath: expected: secret/foo, got: %s", wrapInfo.CreationPath) + } + + // Should be expired and fail + _, err = client.Logical().Write("sys/wrapping/unwrap", map[string]interface{}{ + "token": wrapInfo.Token, + }) + if err == nil { + t.Fatal("expected err") + } + + // Attempt unwrapping the rewrapped token + wrapToken := secret.WrapInfo.Token + secret, err = client.Logical().Unwrap(wrapToken) + if err != nil { + t.Fatal(err) + } + // Should be expired and fail + _, err = client.Logical().Unwrap(wrapToken) + if err == nil { + t.Fatal("expected err") + } + + if !reflect.DeepEqual(secret.Data, map[string]interface{}{ + "zip": "zap", + }) { + t.Fatalf("secret data did not match expected: %#v", secret.Data) + } + + // Ensure that wrapping lookup without a client token responds correctly + client.ClearToken() + secret, err = client.Logical().Read("sys/wrapping/lookup") + if secret != nil { + t.Fatalf("expected no response: %#v", secret) + } + + if err == nil { + t.Fatal("expected error") + } + + var respError *api.ResponseError + if errors.As(err, &respError); respError.StatusCode != 403 { + t.Fatalf("expected 403 response, actual: %d", respError.StatusCode) + } +} diff --git a/http/testing.go b/http/testing.go new file mode 100644 index 0000000..9515399 --- /dev/null +++ b/http/testing.go @@ -0,0 +1,76 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "fmt" + "net" + "net/http" + "testing" + + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/vault" +) + +func TestListener(tb testing.TB) (net.Listener, string) { + fail := func(format string, args ...interface{}) { + panic(fmt.Sprintf(format, args...)) + } + if tb != nil { + fail = tb.Fatalf + } + + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + fail("err: %s", err) + } + addr := "http://" + ln.Addr().String() + return ln, addr +} + +func TestServerWithListenerAndProperties(tb testing.TB, ln net.Listener, addr string, core *vault.Core, props *vault.HandlerProperties) { + // Create a muxer to handle our requests so that we can authenticate + // for tests. + mux := http.NewServeMux() + mux.Handle("/_test/auth", http.HandlerFunc(testHandleAuth)) + mux.Handle("/", Handler.Handler(props)) + + server := &http.Server{ + Addr: ln.Addr().String(), + Handler: mux, + ErrorLog: core.Logger().StandardLogger(nil), + } + go server.Serve(ln) +} + +func TestServerWithListener(tb testing.TB, ln net.Listener, addr string, core *vault.Core) { + ip, _, _ := net.SplitHostPort(ln.Addr().String()) + + // Create a muxer to handle our requests so that we can authenticate + // for tests. + props := &vault.HandlerProperties{ + Core: core, + // This is needed for testing custom response headers + ListenerConfig: &configutil.Listener{ + Address: ip, + }, + } + TestServerWithListenerAndProperties(tb, ln, addr, core, props) +} + +func TestServer(tb testing.TB, core *vault.Core) (net.Listener, string) { + ln, addr := TestListener(tb) + TestServerWithListener(tb, ln, addr, core) + return ln, addr +} + +func TestServerAuth(tb testing.TB, addr string, token string) { + if _, err := http.Get(addr + "/_test/auth?token=" + token); err != nil { + tb.Fatalf("error authenticating: %s", err) + } +} + +func testHandleAuth(w http.ResponseWriter, req *http.Request) { + respondOk(w, nil) +} diff --git a/http/unwrapping_raw_body_test.go b/http/unwrapping_raw_body_test.go new file mode 100644 index 0000000..e1ad0df --- /dev/null +++ b/http/unwrapping_raw_body_test.go @@ -0,0 +1,65 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "testing" + + kv "github.com/hashicorp/vault-plugin-secrets-kv" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" +) + +func TestUnwrapping_Raw_Body(t *testing.T) { + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "kv": kv.Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + core := cluster.Cores[0].Core + vault.TestWaitActive(t, core) + client := cluster.Cores[0].Client + + // Mount a k/v backend, version 2 + err := client.Sys().Mount("kv", &api.MountInput{ + Type: "kv", + Options: map[string]string{"version": "2"}, + }) + if err != nil { + t.Fatal(err) + } + + client.SetWrappingLookupFunc(func(operation, path string) string { + return "5m" + }) + secret, err := client.Logical().Write("kv/foo/bar", map[string]interface{}{ + "a": "b", + }) + if err != nil { + t.Fatal(err) + } + if secret == nil { + t.Fatal("nil secret") + } + if secret.WrapInfo == nil { + t.Fatal("nil wrap info") + } + wrapToken := secret.WrapInfo.Token + + client.SetWrappingLookupFunc(nil) + secret, err = client.Logical().Unwrap(wrapToken) + if err != nil { + t.Fatal(err) + } + if len(secret.Warnings) != 1 { + t.Fatal("expected 1 warning") + } +} diff --git a/http/util.go b/http/util.go new file mode 100644 index 0000000..87b593b --- /dev/null +++ b/http/util.go @@ -0,0 +1,190 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package http + +import ( + "bytes" + "context" + "fmt" + "io" + "net" + "net/http" + "strings" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/vault/sdk/logical" + + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/vault" + "github.com/hashicorp/vault/vault/quotas" +) + +var ( + adjustRequest = func(c *vault.Core, r *http.Request) (*http.Request, int) { + return r, 0 + } + + genericWrapping = func(core *vault.Core, in http.Handler, props *vault.HandlerProperties) http.Handler { + // Wrap the help wrapped handler with another layer with a generic + // handler + return wrapGenericHandler(core, in, props) + } + + additionalRoutes = func(mux *http.ServeMux, core *vault.Core) {} + + nonVotersAllowed = false + + adjustResponse = func(core *vault.Core, w http.ResponseWriter, req *logical.Request) {} +) + +func wrapMaxRequestSizeHandler(handler http.Handler, props *vault.HandlerProperties) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var maxRequestSize int64 + if props.ListenerConfig != nil { + maxRequestSize = props.ListenerConfig.MaxRequestSize + } + if maxRequestSize == 0 { + maxRequestSize = DefaultMaxRequestSize + } + ctx := r.Context() + originalBody := r.Body + if maxRequestSize > 0 { + r.Body = http.MaxBytesReader(w, r.Body, maxRequestSize) + } + ctx = logical.CreateContextOriginalBody(ctx, originalBody) + r = r.WithContext(ctx) + + handler.ServeHTTP(w, r) + }) +} + +func rateLimitQuotaWrapping(handler http.Handler, core *vault.Core) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ns, err := namespace.FromContext(r.Context()) + if err != nil { + respondError(w, http.StatusInternalServerError, err) + return + } + + // We don't want to do buildLogicalRequestNoAuth here because, if the + // request gets allowed by the quota, the same function will get called + // again, which is not desired. + path, status, err := buildLogicalPath(r) + if err != nil || status != 0 { + respondError(w, status, err) + return + } + mountPath := strings.TrimPrefix(core.MatchingMount(r.Context(), path), ns.Path) + + quotaReq := "as.Request{ + Type: quotas.TypeRateLimit, + Path: path, + MountPath: mountPath, + NamespacePath: ns.Path, + ClientAddress: parseRemoteIPAddress(r), + } + + // This checks if any role based quota is required (LCQ or RLQ). + requiresResolveRole, err := core.ResolveRoleForQuotas(r.Context(), quotaReq) + if err != nil { + core.Logger().Error("failed to lookup quotas", "path", path, "error", err) + respondError(w, http.StatusInternalServerError, err) + return + } + + // If any role-based quotas are enabled for this namespace/mount, just + // do the role resolution once here. + if requiresResolveRole { + buf := bytes.Buffer{} + teeReader := io.TeeReader(r.Body, &buf) + role := core.DetermineRoleFromLoginRequestFromReader(r.Context(), mountPath, teeReader) + + // Reset the body if it was read + if buf.Len() > 0 { + r.Body = io.NopCloser(&buf) + originalBody, ok := logical.ContextOriginalBodyValue(r.Context()) + if ok { + r = r.WithContext(logical.CreateContextOriginalBody(r.Context(), newMultiReaderCloser(&buf, originalBody))) + } + } + // add an entry to the context to prevent recalculating request role unnecessarily + r = r.WithContext(context.WithValue(r.Context(), logical.CtxKeyRequestRole{}, role)) + quotaReq.Role = role + } + + quotaResp, err := core.ApplyRateLimitQuota(r.Context(), quotaReq) + if err != nil { + core.Logger().Error("failed to apply quota", "path", path, "error", err) + respondError(w, http.StatusInternalServerError, err) + return + } + + if core.RateLimitResponseHeadersEnabled() { + for h, v := range quotaResp.Headers { + w.Header().Set(h, v) + } + } + + if !quotaResp.Allowed { + quotaErr := fmt.Errorf("request path %q: %w", path, quotas.ErrRateLimitQuotaExceeded) + respondError(w, http.StatusTooManyRequests, quotaErr) + + if core.Logger().IsTrace() { + core.Logger().Trace("request rejected due to rate limit quota violation", "request_path", path) + } + + if core.RateLimitAuditLoggingEnabled() { + req, _, status, err := buildLogicalRequestNoAuth(core.PerfStandby(), w, r) + if err != nil || status != 0 { + respondError(w, status, err) + return + } + + err = core.AuditLogger().AuditRequest(r.Context(), &logical.LogInput{ + Request: req, + OuterErr: quotaErr, + }) + if err != nil { + core.Logger().Warn("failed to audit log request rejection caused by rate limit quota violation", "error", err) + } + } + + return + } + + handler.ServeHTTP(w, r) + return + }) +} + +func parseRemoteIPAddress(r *http.Request) string { + ip, _, err := net.SplitHostPort(r.RemoteAddr) + if err != nil { + return "" + } + + return ip +} + +type multiReaderCloser struct { + readers []io.Reader + io.Reader +} + +func newMultiReaderCloser(readers ...io.Reader) *multiReaderCloser { + return &multiReaderCloser{ + readers: readers, + Reader: io.MultiReader(readers...), + } +} + +func (m *multiReaderCloser) Close() error { + var err error + for _, r := range m.readers { + if c, ok := r.(io.Closer); ok { + err = multierror.Append(err, c.Close()) + } + } + return err +} diff --git a/http/web_ui/.gitkeep b/http/web_ui/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/internal/go118_sha1_patch.go b/internal/go118_sha1_patch.go new file mode 100644 index 0000000..fc2ccf2 --- /dev/null +++ b/internal/go118_sha1_patch.go @@ -0,0 +1,59 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package internal + +import ( + "fmt" + "os" + "sync" + _ "unsafe" // for go:linkname + + goversion "github.com/hashicorp/go-version" + "github.com/hashicorp/vault/version" +) + +const sha1PatchVersionsBefore = "1.12.0" + +var patchSha1 sync.Once + +//go:linkname debugAllowSHA1 crypto/x509.debugAllowSHA1 +var debugAllowSHA1 bool + +// PatchSha1 patches Go 1.18+ to allow certificates with signatures containing SHA-1 hashes to be allowed. +// It is safe to call this function multiple times. +// This is necessary to allow Vault 1.10 and 1.11 to work with Go 1.18+ without breaking backwards compatibility +// with these certificates. See https://go.dev/doc/go1.18#sha1 and +// https://developer.hashicorp.com/vault/docs/deprecation/faq#q-what-is-the-impact-of-removing-support-for-x-509-certificates-with-signatures-that-use-sha-1 +// for more details. +// TODO: remove when Vault <=1.11 is no longer supported +func PatchSha1() { + patchSha1.Do(func() { + // for Go 1.19.4 and later + godebug := os.Getenv("GODEBUG") + if godebug != "" { + godebug += "," + } + godebug += "x509sha1=1" + os.Setenv("GODEBUG", godebug) + + // for Go 1.19.3 and earlier, patch the variable + patchBefore, err := goversion.NewSemver(sha1PatchVersionsBefore) + if err != nil { + panic(err) + } + + patch := false + v, err := goversion.NewSemver(version.GetVersion().Version) + if err == nil { + patch = v.LessThan(patchBefore) + } else { + fmt.Fprintf(os.Stderr, "Cannot parse version %s; going to apply SHA-1 deprecation patch workaround\n", version.GetVersion().Version) + patch = true + } + + if patch { + debugAllowSHA1 = true + } + }) +} diff --git a/internalshared/README.md b/internalshared/README.md new file mode 100644 index 0000000..dd2ee8a --- /dev/null +++ b/internalshared/README.md @@ -0,0 +1,14 @@ +HashiCorp-internal libs +================= + +Do not use these unless you know what you're doing. + +These libraries are used by other HashiCorp software to reduce code duplication +and increase consistency. They are not libraries needed by Vault plugins -- +those are in the sdk/ module. + +There are no compatibility guarantees. Things in here may change or move or +disappear at any time. + +If you are a Vault plugin author and think you need a library in here in your +plugin, please open an issue for discussion. diff --git a/internalshared/configutil/config.go b/internalshared/configutil/config.go new file mode 100644 index 0000000..04f94a8 --- /dev/null +++ b/internalshared/configutil/config.go @@ -0,0 +1,279 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configutil + +import ( + "fmt" + "time" + + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/token" +) + +// SharedConfig contains some shared values +type SharedConfig struct { + FoundKeys []string `hcl:",decodedFields"` + UnusedKeys UnusedKeyMap `hcl:",unusedKeyPositions"` + Sections map[string][]token.Pos + + EntSharedConfig + + Listeners []*Listener `hcl:"-"` + + UserLockouts []*UserLockout `hcl:"-"` + + Seals []*KMS `hcl:"-"` + Entropy *Entropy `hcl:"-"` + + DisableMlock bool `hcl:"-"` + DisableMlockRaw interface{} `hcl:"disable_mlock"` + + Telemetry *Telemetry `hcl:"telemetry"` + + HCPLinkConf *HCPLinkConfig `hcl:"cloud"` + + DefaultMaxRequestDuration time.Duration `hcl:"-"` + DefaultMaxRequestDurationRaw interface{} `hcl:"default_max_request_duration"` + + // LogFormat specifies the log format. Valid values are "standard" and + // "json". The values are case-insenstive. If no log format is specified, + // then standard format will be used. + LogFile string `hcl:"log_file"` + LogFormat string `hcl:"log_format"` + LogLevel string `hcl:"log_level"` + LogRotateBytes int `hcl:"log_rotate_bytes"` + LogRotateBytesRaw interface{} `hcl:"log_rotate_bytes"` + LogRotateDuration string `hcl:"log_rotate_duration"` + LogRotateMaxFiles int `hcl:"log_rotate_max_files"` + LogRotateMaxFilesRaw interface{} `hcl:"log_rotate_max_files"` + + PidFile string `hcl:"pid_file"` + + ClusterName string `hcl:"cluster_name"` + + AdministrativeNamespacePath string `hcl:"administrative_namespace_path"` +} + +func ParseConfig(d string) (*SharedConfig, error) { + // Parse! + obj, err := hcl.Parse(d) + if err != nil { + return nil, err + } + + // Start building the result + var result SharedConfig + + if err := hcl.DecodeObject(&result, obj); err != nil { + return nil, err + } + + if result.DefaultMaxRequestDurationRaw != nil { + if result.DefaultMaxRequestDuration, err = parseutil.ParseDurationSecond(result.DefaultMaxRequestDurationRaw); err != nil { + return nil, err + } + result.FoundKeys = append(result.FoundKeys, "DefaultMaxRequestDuration") + result.DefaultMaxRequestDurationRaw = nil + } + + if result.DisableMlockRaw != nil { + if result.DisableMlock, err = parseutil.ParseBool(result.DisableMlockRaw); err != nil { + return nil, err + } + result.FoundKeys = append(result.FoundKeys, "DisableMlock") + result.DisableMlockRaw = nil + } + + list, ok := obj.Node.(*ast.ObjectList) + if !ok { + return nil, fmt.Errorf("error parsing: file doesn't contain a root object") + } + + if o := list.Filter("hsm"); len(o.Items) > 0 { + result.found("hsm", "hsm") + if err := parseKMS(&result.Seals, o, "hsm", 2); err != nil { + return nil, fmt.Errorf("error parsing 'hsm': %w", err) + } + } + + if o := list.Filter("seal"); len(o.Items) > 0 { + result.found("seal", "Seal") + if err := parseKMS(&result.Seals, o, "seal", 3); err != nil { + return nil, fmt.Errorf("error parsing 'seal': %w", err) + } + } + + if o := list.Filter("kms"); len(o.Items) > 0 { + result.found("kms", "Seal") + if err := parseKMS(&result.Seals, o, "kms", 3); err != nil { + return nil, fmt.Errorf("error parsing 'kms': %w", err) + } + } + + if o := list.Filter("entropy"); len(o.Items) > 0 { + result.found("entropy", "Entropy") + if err := ParseEntropy(&result, o, "entropy"); err != nil { + return nil, fmt.Errorf("error parsing 'entropy': %w", err) + } + } + + if o := list.Filter("listener"); len(o.Items) > 0 { + result.found("listener", "Listener") + if err := ParseListeners(&result, o); err != nil { + return nil, fmt.Errorf("error parsing 'listener': %w", err) + } + } + + if o := list.Filter("user_lockout"); len(o.Items) > 0 { + result.found("user_lockout", "UserLockout") + if err := ParseUserLockouts(&result, o); err != nil { + return nil, fmt.Errorf("error parsing 'user_lockout': %w", err) + } + } + + if o := list.Filter("telemetry"); len(o.Items) > 0 { + result.found("telemetry", "Telemetry") + if err := parseTelemetry(&result, o); err != nil { + return nil, fmt.Errorf("error parsing 'telemetry': %w", err) + } + } + + if o := list.Filter("cloud"); len(o.Items) > 0 { + result.found("cloud", "Cloud") + if err := parseCloud(&result, o); err != nil { + return nil, fmt.Errorf("error parsing 'cloud': %w", err) + } + } + + entConfig := &(result.EntSharedConfig) + if err := entConfig.ParseConfig(list); err != nil { + return nil, fmt.Errorf("error parsing enterprise config: %w", err) + } + + return &result, nil +} + +// Sanitized returns a copy of the config with all values that are considered +// sensitive stripped. It also strips all `*Raw` values that are mainly +// used for parsing. +// +// Specifically, the fields that this method strips are: +// - KMS.Config +// - Telemetry.CirconusAPIToken +func (c *SharedConfig) Sanitized() map[string]interface{} { + if c == nil { + return nil + } + + result := map[string]interface{}{ + "default_max_request_duration": c.DefaultMaxRequestDuration, + "disable_mlock": c.DisableMlock, + "log_level": c.LogLevel, + "log_format": c.LogFormat, + "pid_file": c.PidFile, + "cluster_name": c.ClusterName, + "administrative_namespace_path": c.AdministrativeNamespacePath, + } + + // Optional log related settings + if c.LogFile != "" { + result["log_file"] = c.LogFile + } + if c.LogRotateBytes != 0 { + result["log_rotate_bytes"] = c.LogRotateBytes + } + if c.LogRotateDuration != "" { + result["log_rotate_duration"] = c.LogRotateDuration + } + if c.LogRotateMaxFiles != 0 { + result["log_rotate_max_files"] = c.LogRotateMaxFiles + } + + // Sanitize listeners + if len(c.Listeners) != 0 { + var sanitizedListeners []interface{} + for _, ln := range c.Listeners { + cleanLn := map[string]interface{}{ + "type": ln.Type, + "config": ln.RawConfig, + } + sanitizedListeners = append(sanitizedListeners, cleanLn) + } + result["listeners"] = sanitizedListeners + } + + // Sanitize user lockout stanza + if len(c.UserLockouts) != 0 { + var sanitizedUserLockouts []interface{} + for _, userlockout := range c.UserLockouts { + cleanUserLockout := map[string]interface{}{ + "type": userlockout.Type, + "lockout_threshold": userlockout.LockoutThreshold, + "lockout_duration": userlockout.LockoutDuration, + "lockout_counter_reset": userlockout.LockoutCounterReset, + "disable_lockout": userlockout.DisableLockout, + } + sanitizedUserLockouts = append(sanitizedUserLockouts, cleanUserLockout) + } + result["user_lockout_configs"] = sanitizedUserLockouts + } + + // Sanitize seals stanza + if len(c.Seals) != 0 { + var sanitizedSeals []interface{} + for _, s := range c.Seals { + cleanSeal := map[string]interface{}{ + "type": s.Type, + "disabled": s.Disabled, + } + sanitizedSeals = append(sanitizedSeals, cleanSeal) + } + result["seals"] = sanitizedSeals + } + + // Sanitize telemetry stanza + if c.Telemetry != nil { + sanitizedTelemetry := map[string]interface{}{ + "statsite_address": c.Telemetry.StatsiteAddr, + "statsd_address": c.Telemetry.StatsdAddr, + "disable_hostname": c.Telemetry.DisableHostname, + "metrics_prefix": c.Telemetry.MetricsPrefix, + "usage_gauge_period": c.Telemetry.UsageGaugePeriod, + "maximum_gauge_cardinality": c.Telemetry.MaximumGaugeCardinality, + "circonus_api_token": "", + "circonus_api_app": c.Telemetry.CirconusAPIApp, + "circonus_api_url": c.Telemetry.CirconusAPIURL, + "circonus_submission_interval": c.Telemetry.CirconusSubmissionInterval, + "circonus_submission_url": c.Telemetry.CirconusCheckSubmissionURL, + "circonus_check_id": c.Telemetry.CirconusCheckID, + "circonus_check_force_metric_activation": c.Telemetry.CirconusCheckForceMetricActivation, + "circonus_check_instance_id": c.Telemetry.CirconusCheckInstanceID, + "circonus_check_search_tag": c.Telemetry.CirconusCheckSearchTag, + "circonus_check_tags": c.Telemetry.CirconusCheckTags, + "circonus_check_display_name": c.Telemetry.CirconusCheckDisplayName, + "circonus_broker_id": c.Telemetry.CirconusBrokerID, + "circonus_broker_select_tag": c.Telemetry.CirconusBrokerSelectTag, + "dogstatsd_addr": c.Telemetry.DogStatsDAddr, + "dogstatsd_tags": c.Telemetry.DogStatsDTags, + "prometheus_retention_time": c.Telemetry.PrometheusRetentionTime, + "stackdriver_project_id": c.Telemetry.StackdriverProjectID, + "stackdriver_location": c.Telemetry.StackdriverLocation, + "stackdriver_namespace": c.Telemetry.StackdriverNamespace, + "stackdriver_debug_logs": c.Telemetry.StackdriverDebugLogs, + "lease_metrics_epsilon": c.Telemetry.LeaseMetricsEpsilon, + "num_lease_metrics_buckets": c.Telemetry.NumLeaseMetricsTimeBuckets, + "add_lease_metrics_namespace_labels": c.Telemetry.LeaseMetricsNameSpaceLabels, + } + result["telemetry"] = sanitizedTelemetry + } + + return result +} + +func (c *SharedConfig) found(s, k string) { + delete(c.UnusedKeys, s) + c.FoundKeys = append(c.FoundKeys, k) +} diff --git a/internalshared/configutil/config_test.go b/internalshared/configutil/config_test.go new file mode 100644 index 0000000..e44bf7f --- /dev/null +++ b/internalshared/configutil/config_test.go @@ -0,0 +1,157 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configutil + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +type mapValue[T any] struct { + Value T + IsFound bool +} + +type expectedLogFields struct { + File mapValue[string] + Format mapValue[string] + Level mapValue[string] + RotateBytes mapValue[int] + RotateDuration mapValue[string] + RotateMaxFiles mapValue[int] +} + +// TestSharedConfig_Sanitized_LogFields ensures that 'log related' shared config +// is sanitized as expected. +func TestSharedConfig_Sanitized_LogFields(t *testing.T) { + tests := map[string]struct { + Value *SharedConfig + IsNil bool + Expected expectedLogFields + }{ + "nil": { + Value: nil, + IsNil: true, + }, + "empty": { + Value: &SharedConfig{}, + IsNil: false, + Expected: expectedLogFields{ + Format: mapValue[string]{IsFound: true, Value: ""}, + Level: mapValue[string]{IsFound: true, Value: ""}, + }, + }, + "only-log-level-and-format": { + Value: &SharedConfig{ + LogFormat: "json", + LogLevel: "warn", + }, + IsNil: false, + Expected: expectedLogFields{ + Format: mapValue[string]{IsFound: true, Value: "json"}, + Level: mapValue[string]{IsFound: true, Value: "warn"}, + }, + }, + "valid-log-fields": { + Value: &SharedConfig{ + LogFile: "vault.log", + LogFormat: "json", + LogLevel: "warn", + LogRotateBytes: 1024, + LogRotateDuration: "30m", + LogRotateMaxFiles: -1, + }, + IsNil: false, + Expected: expectedLogFields{ + File: mapValue[string]{IsFound: true, Value: "vault.log"}, + Format: mapValue[string]{IsFound: true, Value: "json"}, + Level: mapValue[string]{IsFound: true, Value: "warn"}, + RotateBytes: mapValue[int]{IsFound: true, Value: 1024}, + RotateDuration: mapValue[string]{IsFound: true, Value: "30m"}, + RotateMaxFiles: mapValue[int]{IsFound: true, Value: -1}, + }, + }, + } + + for name, tc := range tests { + name := name + tc := tc + t.Run(name, func(t *testing.T) { + cfg := tc.Value.Sanitized() + switch { + case tc.IsNil: + require.Nil(t, cfg) + default: + require.NotNil(t, cfg) + + // Log file + val, found := cfg["log_file"] + switch { + case tc.Expected.File.IsFound: + require.True(t, found) + require.NotNil(t, val) + require.Equal(t, tc.Expected.File.Value, val) + default: + require.Nil(t, val) + } + + // Log format + val, found = cfg["log_format"] + switch { + case tc.Expected.Format.IsFound: + require.True(t, found) + require.NotNil(t, val) + require.Equal(t, tc.Expected.Format.Value, val) + default: + require.Nil(t, val) + } + + // Log level + val, found = cfg["log_level"] + switch { + case tc.Expected.Level.IsFound: + require.True(t, found) + require.NotNil(t, val) + require.Equal(t, tc.Expected.Level.Value, val) + default: + require.Nil(t, val) + } + + // Log rotate bytes + val, found = cfg["log_rotate_bytes"] + switch { + case tc.Expected.RotateBytes.IsFound: + require.True(t, found) + require.NotNil(t, val) + require.Equal(t, tc.Expected.RotateBytes.Value, val) + default: + require.Nil(t, val) + } + + // Log rotate duration + val, found = cfg["log_rotate_duration"] + switch { + case tc.Expected.RotateDuration.IsFound: + require.True(t, found) + require.NotNil(t, val) + require.Equal(t, tc.Expected.RotateDuration.Value, val) + default: + require.Nil(t, val) + } + + // Log rotate max files + val, found = cfg["log_rotate_max_files"] + switch { + case tc.Expected.RotateMaxFiles.IsFound: + require.True(t, found) + require.NotNil(t, val) + require.Equal(t, tc.Expected.RotateMaxFiles.Value, val) + default: + require.Nil(t, val) + } + } + }) + } +} diff --git a/internalshared/configutil/config_util.go b/internalshared/configutil/config_util.go new file mode 100644 index 0000000..3fd4bb9 --- /dev/null +++ b/internalshared/configutil/config_util.go @@ -0,0 +1,20 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !enterprise + +package configutil + +import ( + "github.com/hashicorp/hcl/hcl/ast" +) + +type EntSharedConfig struct{} + +func (ec *EntSharedConfig) ParseConfig(list *ast.ObjectList) error { + return nil +} + +func ParseEntropy(result *SharedConfig, list *ast.ObjectList, blockName string) error { + return nil +} diff --git a/internalshared/configutil/encrypt_decrypt.go b/internalshared/configutil/encrypt_decrypt.go new file mode 100644 index 0000000..f0e5fcc --- /dev/null +++ b/internalshared/configutil/encrypt_decrypt.go @@ -0,0 +1,95 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configutil + +import ( + "bytes" + "context" + "encoding/base64" + "errors" + "fmt" + "regexp" + + wrapping "github.com/hashicorp/go-kms-wrapping/v2" + "google.golang.org/protobuf/proto" +) + +var ( + encryptRegex = regexp.MustCompile(`{{encrypt\(.*\)}}`) + decryptRegex = regexp.MustCompile(`{{decrypt\(.*\)}}`) +) + +func EncryptDecrypt(rawStr string, decrypt, strip bool, wrapper wrapping.Wrapper) (string, error) { + var locs [][]int + raw := []byte(rawStr) + searchVal := "{{encrypt(" + replaceVal := "{{decrypt(" + suffixVal := ")}}" + if decrypt { + searchVal = "{{decrypt(" + replaceVal = "{{encrypt(" + locs = decryptRegex.FindAllIndex(raw, -1) + } else { + locs = encryptRegex.FindAllIndex(raw, -1) + } + if strip { + replaceVal = "" + suffixVal = "" + } + + out := make([]byte, 0, len(rawStr)*2) + var prevMaxLoc int + for _, match := range locs { + if len(match) != 2 { + return "", fmt.Errorf("expected two values for match, got %d", len(match)) + } + + // Append everything from the end of the last match to the beginning of this one + out = append(out, raw[prevMaxLoc:match[0]]...) + + // Transform. First pull off the suffix/prefix + matchBytes := raw[match[0]:match[1]] + matchBytes = bytes.TrimSuffix(bytes.TrimPrefix(matchBytes, []byte(searchVal)), []byte(")}}")) + var finalVal string + + // Now encrypt or decrypt + switch decrypt { + case false: + outBlob, err := wrapper.Encrypt(context.Background(), matchBytes, nil) + if err != nil { + return "", fmt.Errorf("error encrypting parameter: %w", err) + } + if outBlob == nil { + return "", errors.New("nil value returned from encrypting parameter") + } + outMsg, err := proto.Marshal(outBlob) + if err != nil { + return "", fmt.Errorf("error marshaling encrypted parameter: %w", err) + } + finalVal = base64.RawURLEncoding.EncodeToString(outMsg) + + default: + inMsg, err := base64.RawURLEncoding.DecodeString(string(matchBytes)) + if err != nil { + return "", fmt.Errorf("error decoding encrypted parameter: %w", err) + } + inBlob := new(wrapping.BlobInfo) + if err := proto.Unmarshal(inMsg, inBlob); err != nil { + return "", fmt.Errorf("error unmarshaling encrypted parameter: %w", err) + } + dec, err := wrapper.Decrypt(context.Background(), inBlob, nil) + if err != nil { + return "", fmt.Errorf("error decrypting encrypted parameter: %w", err) + } + finalVal = string(dec) + } + + // Append new value + out = append(out, []byte(fmt.Sprintf("%s%s%s", replaceVal, finalVal, suffixVal))...) + prevMaxLoc = match[1] + } + // At the end, append the rest + out = append(out, raw[prevMaxLoc:]...) + return string(out), nil +} diff --git a/internalshared/configutil/encrypt_decrypt_test.go b/internalshared/configutil/encrypt_decrypt_test.go new file mode 100644 index 0000000..19bf685 --- /dev/null +++ b/internalshared/configutil/encrypt_decrypt_test.go @@ -0,0 +1,122 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configutil + +import ( + "bytes" + "context" + "encoding/base64" + "testing" + + wrapping "github.com/hashicorp/go-kms-wrapping/v2" + "google.golang.org/protobuf/proto" +) + +func getAEADTestKMS(t *testing.T) { +} + +func TestEncryptParams(t *testing.T) { + rawStr := ` +storage "consul" { + api_key = "{{encrypt(foobar)}}" +} + +telemetry { + some_param = "something" + circonus_api_key = "{{encrypt(barfoo)}}" +} +` + + finalStr := ` +storage "consul" { + api_key = "foobar" +} + +telemetry { + some_param = "something" + circonus_api_key = "barfoo" +} +` + + reverser := new(reversingWrapper) + out, err := EncryptDecrypt(rawStr, false, false, reverser) + if err != nil { + t.Fatal(err) + } + + first := true + locs := decryptRegex.FindAllIndex([]byte(out), -1) + for _, match := range locs { + matchBytes := []byte(out)[match[0]:match[1]] + matchBytes = bytes.TrimSuffix(bytes.TrimPrefix(matchBytes, []byte("{{decrypt(")), []byte(")}}")) + inMsg, err := base64.RawURLEncoding.DecodeString(string(matchBytes)) + if err != nil { + t.Fatal(err) + } + inBlob := new(wrapping.BlobInfo) + if err := proto.Unmarshal(inMsg, inBlob); err != nil { + t.Fatal(err) + } + ct := string(inBlob.Ciphertext) + if first { + if ct != "raboof" { + t.Fatal(ct) + } + first = false + } else { + if ct != "oofrab" { + t.Fatal(ct) + } + } + } + + decOut, err := EncryptDecrypt(out, true, false, reverser) + if err != nil { + t.Fatal(err) + } + + if decOut != rawStr { + t.Fatal(decOut) + } + + decOut, err = EncryptDecrypt(out, true, true, reverser) + if err != nil { + t.Fatal(err) + } + + if decOut != finalStr { + t.Fatal(decOut) + } +} + +type reversingWrapper struct{} + +func (r *reversingWrapper) Type(_ context.Context) (wrapping.WrapperType, error) { + return "reverser", nil +} +func (r *reversingWrapper) KeyId(_ context.Context) (string, error) { return "reverser", nil } +func (r *reversingWrapper) HMACKeyID() string { return "" } +func (r *reversingWrapper) Init(_ context.Context) error { return nil } +func (r *reversingWrapper) Finalize(_ context.Context) error { return nil } +func (r *reversingWrapper) SetConfig(_ context.Context, opts ...wrapping.Option) (*wrapping.WrapperConfig, error) { + return &wrapping.WrapperConfig{}, nil +} + +func (r *reversingWrapper) Encrypt(_ context.Context, input []byte, _ ...wrapping.Option) (*wrapping.BlobInfo, error) { + return &wrapping.BlobInfo{ + Ciphertext: r.reverse(input), + }, nil +} + +func (r *reversingWrapper) Decrypt(_ context.Context, input *wrapping.BlobInfo, _ ...wrapping.Option) ([]byte, error) { + return r.reverse(input.Ciphertext), nil +} + +func (r *reversingWrapper) reverse(input []byte) []byte { + output := make([]byte, len(input)) + for i, j := 0, len(input)-1; i < j; i, j = i+1, j-1 { + output[i], output[j] = input[j], input[i] + } + return output +} diff --git a/internalshared/configutil/hcp_link.go b/internalshared/configutil/hcp_link.go new file mode 100644 index 0000000..fd8d6b6 --- /dev/null +++ b/internalshared/configutil/hcp_link.go @@ -0,0 +1,73 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configutil + +import ( + "fmt" + "os" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/ast" + sdkResource "github.com/hashicorp/hcp-sdk-go/resource" +) + +// HCPLinkConfig is the HCP Link configuration for the server. +type HCPLinkConfig struct { + UnusedKeys UnusedKeyMap `hcl:",unusedKeyPositions"` + + ResourceIDRaw string `hcl:"resource_id"` + Resource *sdkResource.Resource `hcl:"-"` + EnableAPICapability bool `hcl:"enable_api_capability"` + EnablePassThroughCapability bool `hcl:"enable_passthrough_capability"` + ClientID string `hcl:"client_id"` + ClientSecret string `hcl:"client_secret"` +} + +func parseCloud(result *SharedConfig, list *ast.ObjectList) error { + if len(list.Items) > 1 { + return fmt.Errorf("only one 'cloud' block is permitted") + } + + // Get our one item + item := list.Items[0] + + if result.HCPLinkConf == nil { + result.HCPLinkConf = &HCPLinkConfig{} + } + + if err := hcl.DecodeObject(&result.HCPLinkConf, item.Val); err != nil { + return multierror.Prefix(err, "cloud:") + } + + // let's check if the Client ID and Secret are set in the environment + if envClientID := os.Getenv("HCP_CLIENT_ID"); envClientID != "" { + result.HCPLinkConf.ClientID = envClientID + } + if envClientSecret := os.Getenv("HCP_CLIENT_SECRET"); envClientSecret != "" { + result.HCPLinkConf.ClientSecret = envClientSecret + } + + // three pieces are necessary if the cloud stanza is configured + if result.HCPLinkConf.ResourceIDRaw == "" || result.HCPLinkConf.ClientID == "" || result.HCPLinkConf.ClientSecret == "" { + return multierror.Prefix(fmt.Errorf("failed to find the required cloud stanza configurations. all resource ID, client ID and client secret are required"), "cloud:") + } + + res, err := sdkResource.FromString(result.HCPLinkConf.ResourceIDRaw) + if err != nil { + return multierror.Prefix(fmt.Errorf("failed to parse resource_id for HCP Link"), "cloud:") + } + result.HCPLinkConf.Resource = &res + + // ENV var takes precedence over the config value + if apiCapEnv := os.Getenv("HCP_LINK_ENABLE_API_CAPABILITY"); apiCapEnv != "" { + result.HCPLinkConf.EnableAPICapability = true + } + + if passthroughCapEnv := os.Getenv("HCP_LINK_ENABLE_PASSTHROUGH_CAPABILITY"); passthroughCapEnv != "" { + result.HCPLinkConf.EnablePassThroughCapability = true + } + + return nil +} diff --git a/internalshared/configutil/http_response_headers.go b/internalshared/configutil/http_response_headers.go new file mode 100644 index 0000000..b808f9e --- /dev/null +++ b/internalshared/configutil/http_response_headers.go @@ -0,0 +1,132 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configutil + +import ( + "fmt" + "net/textproto" + "strconv" + "strings" + + "github.com/hashicorp/go-secure-stdlib/strutil" +) + +var ValidCustomStatusCodeCollection = []string{ + "default", + "1xx", + "2xx", + "3xx", + "4xx", + "5xx", +} + +const StrictTransportSecurity = "max-age=31536000; includeSubDomains" + +// ParseCustomResponseHeaders takes a raw config values for the +// "custom_response_headers". It makes sure the config entry is passed in +// as a map of status code to a map of header name and header values. It +// verifies the validity of the status codes, and header values. It also +// adds the default headers values. +func ParseCustomResponseHeaders(responseHeaders interface{}) (map[string]map[string]string, error) { + h := make(map[string]map[string]string) + // if r is nil, we still should set the default custom headers + if responseHeaders == nil { + h["default"] = map[string]string{"Strict-Transport-Security": StrictTransportSecurity} + return h, nil + } + + customResponseHeader, ok := responseHeaders.([]map[string]interface{}) + if !ok { + return nil, fmt.Errorf("response headers were not configured correctly. please make sure they're in a slice of maps") + } + + for _, crh := range customResponseHeader { + for statusCode, responseHeader := range crh { + headerValList, ok := responseHeader.([]map[string]interface{}) + if !ok { + return nil, fmt.Errorf("response headers were not configured correctly. please make sure they're in a slice of maps") + } + + if !IsValidStatusCode(statusCode) { + return nil, fmt.Errorf("invalid status code found in the server configuration: %v", statusCode) + } + + if len(headerValList) != 1 { + return nil, fmt.Errorf("invalid number of response headers exist") + } + headerValMap := headerValList[0] + headerVal, err := parseHeaders(headerValMap) + if err != nil { + return nil, err + } + + h[statusCode] = headerVal + } + } + + // setting Strict-Transport-Security as a default header + if h["default"] == nil { + h["default"] = make(map[string]string) + } + if _, ok := h["default"]["Strict-Transport-Security"]; !ok { + h["default"]["Strict-Transport-Security"] = StrictTransportSecurity + } + + return h, nil +} + +// IsValidStatusCode checking for status codes outside the boundary +func IsValidStatusCode(sc string) bool { + if strutil.StrListContains(ValidCustomStatusCodeCollection, sc) { + return true + } + + i, err := strconv.Atoi(sc) + if err != nil { + return false + } + + if i >= 600 || i < 100 { + return false + } + + return true +} + +func parseHeaders(in map[string]interface{}) (map[string]string, error) { + hvMap := make(map[string]string) + for k, v := range in { + // parsing header name + headerName := textproto.CanonicalMIMEHeaderKey(k) + // parsing header values + s, err := parseHeaderValues(v) + if err != nil { + return nil, err + } + hvMap[headerName] = s + } + return hvMap, nil +} + +func parseHeaderValues(header interface{}) (string, error) { + var sl []string + if _, ok := header.([]interface{}); !ok { + return "", fmt.Errorf("headers must be given in a list of strings") + } + headerValList := header.([]interface{}) + for _, vh := range headerValList { + if _, ok := vh.(string); !ok { + return "", fmt.Errorf("found a non-string header value: %v", vh) + } + headerVal := strings.TrimSpace(vh.(string)) + if headerVal == "" { + continue + } + sl = append(sl, headerVal) + + } + s := strings.Join(sl, "; ") + + return s, nil +} diff --git a/internalshared/configutil/kms.go b/internalshared/configutil/kms.go new file mode 100644 index 0000000..0250181 --- /dev/null +++ b/internalshared/configutil/kms.go @@ -0,0 +1,352 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configutil + +import ( + "context" + "crypto/rand" + "fmt" + "io" + "strings" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-hclog" + wrapping "github.com/hashicorp/go-kms-wrapping/v2" + aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2" + "github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms/v2" + "github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2" + "github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2" + "github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2" + "github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/v2" + "github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/vault/sdk/logical" +) + +var ( + ConfigureWrapper = configureWrapper + CreateSecureRandomReaderFunc = createSecureRandomReader +) + +// Entropy contains Entropy configuration for the server +type EntropyMode int + +const ( + EntropyUnknown EntropyMode = iota + EntropyAugmentation +) + +type Entropy struct { + Mode EntropyMode +} + +// KMS contains KMS configuration for the server +type KMS struct { + UnusedKeys []string `hcl:",unusedKeys"` + Type string + // Purpose can be used to allow a string-based specification of what this + // KMS is designated for, in situations where we want to allow more than + // one KMS to be specified + Purpose []string `hcl:"-"` + + Disabled bool + Config map[string]string +} + +func (k *KMS) GoString() string { + return fmt.Sprintf("*%#v", *k) +} + +func parseKMS(result *[]*KMS, list *ast.ObjectList, blockName string, maxKMS int) error { + if len(list.Items) > maxKMS { + return fmt.Errorf("only two or less %q blocks are permitted", blockName) + } + + seals := make([]*KMS, 0, len(list.Items)) + for _, item := range list.Items { + key := blockName + if len(item.Keys) > 0 { + key = item.Keys[0].Token.Value().(string) + } + + // We first decode into a map[string]interface{} because purpose isn't + // necessarily a string. Then we migrate everything else over to + // map[string]string and error if it doesn't work. + var m map[string]interface{} + if err := hcl.DecodeObject(&m, item.Val); err != nil { + return multierror.Prefix(err, fmt.Sprintf("%s.%s:", blockName, key)) + } + + var purpose []string + var err error + if v, ok := m["purpose"]; ok { + if purpose, err = parseutil.ParseCommaStringSlice(v); err != nil { + return multierror.Prefix(fmt.Errorf("unable to parse 'purpose' in kms type %q: %w", key, err), fmt.Sprintf("%s.%s:", blockName, key)) + } + for i, p := range purpose { + purpose[i] = strings.ToLower(p) + } + delete(m, "purpose") + } + + var disabled bool + if v, ok := m["disabled"]; ok { + disabled, err = parseutil.ParseBool(v) + if err != nil { + return multierror.Prefix(err, fmt.Sprintf("%s.%s:", blockName, key)) + } + delete(m, "disabled") + } + + strMap := make(map[string]string, len(m)) + for k, v := range m { + s, err := parseutil.ParseString(v) + if err != nil { + return multierror.Prefix(err, fmt.Sprintf("%s.%s:", blockName, key)) + } + strMap[k] = s + } + + seal := &KMS{ + Type: strings.ToLower(key), + Purpose: purpose, + Disabled: disabled, + } + if len(strMap) > 0 { + seal.Config = strMap + } + seals = append(seals, seal) + } + + *result = append(*result, seals...) + + return nil +} + +func ParseKMSes(d string) ([]*KMS, error) { + // Parse! + obj, err := hcl.Parse(d) + if err != nil { + return nil, err + } + + // Start building the result + var result struct { + Seals []*KMS `hcl:"-"` + } + + if err := hcl.DecodeObject(&result, obj); err != nil { + return nil, err + } + + list, ok := obj.Node.(*ast.ObjectList) + if !ok { + return nil, fmt.Errorf("error parsing: file doesn't contain a root object") + } + + if o := list.Filter("seal"); len(o.Items) > 0 { + if err := parseKMS(&result.Seals, o, "seal", 3); err != nil { + return nil, fmt.Errorf("error parsing 'seal': %w", err) + } + } + + if o := list.Filter("kms"); len(o.Items) > 0 { + if err := parseKMS(&result.Seals, o, "kms", 3); err != nil { + return nil, fmt.Errorf("error parsing 'kms': %w", err) + } + } + + return result.Seals, nil +} + +func configureWrapper(configKMS *KMS, infoKeys *[]string, info *map[string]string, logger hclog.Logger, opts ...wrapping.Option) (wrapping.Wrapper, error) { + var wrapper wrapping.Wrapper + var kmsInfo map[string]string + var err error + + switch wrapping.WrapperType(configKMS.Type) { + case wrapping.WrapperTypeShamir: + return nil, nil + + case wrapping.WrapperTypeAead: + wrapper, kmsInfo, err = GetAEADKMSFunc(configKMS, opts...) + + case wrapping.WrapperTypeAliCloudKms: + wrapper, kmsInfo, err = GetAliCloudKMSFunc(configKMS, opts...) + + case wrapping.WrapperTypeAwsKms: + wrapper, kmsInfo, err = GetAWSKMSFunc(configKMS, opts...) + + case wrapping.WrapperTypeAzureKeyVault: + wrapper, kmsInfo, err = GetAzureKeyVaultKMSFunc(configKMS, opts...) + + case wrapping.WrapperTypeGcpCkms: + wrapper, kmsInfo, err = GetGCPCKMSKMSFunc(configKMS, opts...) + + case wrapping.WrapperTypeOciKms: + if keyId, ok := configKMS.Config["key_id"]; ok { + opts = append(opts, wrapping.WithKeyId(keyId)) + } + wrapper, kmsInfo, err = GetOCIKMSKMSFunc(configKMS, opts...) + case wrapping.WrapperTypeTransit: + wrapper, kmsInfo, err = GetTransitKMSFunc(configKMS, opts...) + + case wrapping.WrapperTypePkcs11: + return nil, fmt.Errorf("KMS type 'pkcs11' requires the Vault Enterprise HSM binary") + + default: + return nil, fmt.Errorf("Unknown KMS type %q", configKMS.Type) + } + + if err != nil { + return nil, err + } + + if infoKeys != nil && info != nil { + for k, v := range kmsInfo { + *infoKeys = append(*infoKeys, k) + (*info)[k] = v + } + } + + return wrapper, nil +} + +func GetAEADKMSFunc(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map[string]string, error) { + wrapper := aeadwrapper.NewWrapper() + wrapperInfo, err := wrapper.SetConfig(context.Background(), append(opts, wrapping.WithConfigMap(kms.Config))...) + if err != nil { + return nil, nil, err + } + info := make(map[string]string) + if wrapperInfo != nil { + str := "AEAD Type" + if len(kms.Purpose) > 0 { + str = fmt.Sprintf("%v %s", kms.Purpose, str) + } + info[str] = wrapperInfo.Metadata["aead_type"] + } + return wrapper, info, nil +} + +func GetAliCloudKMSFunc(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map[string]string, error) { + wrapper := alicloudkms.NewWrapper() + wrapperInfo, err := wrapper.SetConfig(context.Background(), append(opts, wrapping.WithConfigMap(kms.Config))...) + if err != nil { + // If the error is any other than logical.KeyNotFoundError, return the error + if !errwrap.ContainsType(err, new(logical.KeyNotFoundError)) { + return nil, nil, err + } + } + info := make(map[string]string) + if wrapperInfo != nil { + info["AliCloud KMS Region"] = wrapperInfo.Metadata["region"] + info["AliCloud KMS KeyID"] = wrapperInfo.Metadata["kms_key_id"] + if domain, ok := wrapperInfo.Metadata["domain"]; ok { + info["AliCloud KMS Domain"] = domain + } + } + return wrapper, info, nil +} + +var GetAWSKMSFunc = func(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map[string]string, error) { + wrapper := awskms.NewWrapper() + wrapperInfo, err := wrapper.SetConfig(context.Background(), append(opts, wrapping.WithConfigMap(kms.Config))...) + if err != nil { + // If the error is any other than logical.KeyNotFoundError, return the error + if !errwrap.ContainsType(err, new(logical.KeyNotFoundError)) { + return nil, nil, err + } + } + info := make(map[string]string) + if wrapperInfo != nil { + info["AWS KMS Region"] = wrapperInfo.Metadata["region"] + info["AWS KMS KeyID"] = wrapperInfo.Metadata["kms_key_id"] + if endpoint, ok := wrapperInfo.Metadata["endpoint"]; ok { + info["AWS KMS Endpoint"] = endpoint + } + } + return wrapper, info, nil +} + +func GetAzureKeyVaultKMSFunc(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map[string]string, error) { + wrapper := azurekeyvault.NewWrapper() + wrapperInfo, err := wrapper.SetConfig(context.Background(), append(opts, wrapping.WithConfigMap(kms.Config))...) + if err != nil { + // If the error is any other than logical.KeyNotFoundError, return the error + if !errwrap.ContainsType(err, new(logical.KeyNotFoundError)) { + return nil, nil, err + } + } + info := make(map[string]string) + if wrapperInfo != nil { + info["Azure Environment"] = wrapperInfo.Metadata["environment"] + info["Azure Vault Name"] = wrapperInfo.Metadata["vault_name"] + info["Azure Key Name"] = wrapperInfo.Metadata["key_name"] + } + return wrapper, info, nil +} + +func GetGCPCKMSKMSFunc(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map[string]string, error) { + wrapper := gcpckms.NewWrapper() + wrapperInfo, err := wrapper.SetConfig(context.Background(), append(opts, wrapping.WithConfigMap(kms.Config))...) + if err != nil { + // If the error is any other than logical.KeyNotFoundError, return the error + if !errwrap.ContainsType(err, new(logical.KeyNotFoundError)) { + return nil, nil, err + } + } + info := make(map[string]string) + if wrapperInfo != nil { + info["GCP KMS Project"] = wrapperInfo.Metadata["project"] + info["GCP KMS Region"] = wrapperInfo.Metadata["region"] + info["GCP KMS Key Ring"] = wrapperInfo.Metadata["key_ring"] + info["GCP KMS Crypto Key"] = wrapperInfo.Metadata["crypto_key"] + } + return wrapper, info, nil +} + +func GetOCIKMSKMSFunc(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map[string]string, error) { + wrapper := ocikms.NewWrapper() + wrapperInfo, err := wrapper.SetConfig(context.Background(), append(opts, wrapping.WithConfigMap(kms.Config))...) + if err != nil { + return nil, nil, err + } + info := make(map[string]string) + if wrapperInfo != nil { + info["OCI KMS KeyID"] = wrapperInfo.Metadata[ocikms.KmsConfigKeyId] + info["OCI KMS Crypto Endpoint"] = wrapperInfo.Metadata[ocikms.KmsConfigCryptoEndpoint] + info["OCI KMS Management Endpoint"] = wrapperInfo.Metadata[ocikms.KmsConfigManagementEndpoint] + info["OCI KMS Principal Type"] = wrapperInfo.Metadata["principal_type"] + } + return wrapper, info, nil +} + +var GetTransitKMSFunc = func(kms *KMS, opts ...wrapping.Option) (wrapping.Wrapper, map[string]string, error) { + wrapper := transit.NewWrapper() + wrapperInfo, err := wrapper.SetConfig(context.Background(), append(opts, wrapping.WithConfigMap(kms.Config))...) + if err != nil { + // If the error is any other than logical.KeyNotFoundError, return the error + if !errwrap.ContainsType(err, new(logical.KeyNotFoundError)) { + return nil, nil, err + } + } + info := make(map[string]string) + if wrapperInfo != nil { + info["Transit Address"] = wrapperInfo.Metadata["address"] + info["Transit Mount Path"] = wrapperInfo.Metadata["mount_path"] + info["Transit Key Name"] = wrapperInfo.Metadata["key_name"] + if namespace, ok := wrapperInfo.Metadata["namespace"]; ok { + info["Transit Namespace"] = namespace + } + } + return wrapper, info, nil +} + +func createSecureRandomReader(conf *SharedConfig, wrapper wrapping.Wrapper) (io.Reader, error) { + return rand.Reader, nil +} diff --git a/internalshared/configutil/lint.go b/internalshared/configutil/lint.go new file mode 100644 index 0000000..24b968e --- /dev/null +++ b/internalshared/configutil/lint.go @@ -0,0 +1,62 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configutil + +import ( + "fmt" + + "github.com/asaskevich/govalidator" + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/hcl/hcl/token" +) + +type UnusedKeyMap map[string][]token.Pos + +type ConfigError struct { + Problem string + Position token.Pos +} + +func (c *ConfigError) String() string { + return fmt.Sprintf("%s at %s", c.Problem, c.Position.String()) +} + +type ValidatableConfig interface { + Validate() []ConfigError +} + +// Creates the ConfigErrors for unused fields, which occur in various structs +func ValidateUnusedFields(unusedKeyPositions UnusedKeyMap, sourceFilePath string) []ConfigError { + if unusedKeyPositions == nil { + return nil + } + var errors []ConfigError + for field, positions := range unusedKeyPositions { + problem := fmt.Sprintf("unknown or unsupported field %s found in configuration", field) + for _, pos := range positions { + if pos.Filename == "" && sourceFilePath != "" { + pos.Filename = sourceFilePath + } + errors = append(errors, ConfigError{ + Problem: problem, + Position: pos, + }) + } + } + return errors +} + +// UnusedFieldDifference returns all the keys in map a that are not present in map b, and also not present in foundKeys. +func UnusedFieldDifference(a, b UnusedKeyMap, foundKeys []string) UnusedKeyMap { + if a == nil { + return nil + } + res := make(UnusedKeyMap) + for k, v := range a { + if _, ok := b[k]; !ok && !strutil.StrListContainsCaseInsensitive(foundKeys, govalidator.UnderscoreToCamelCase(k)) { + res[k] = v + } + } + return res +} diff --git a/internalshared/configutil/listener.go b/internalshared/configutil/listener.go new file mode 100644 index 0000000..5e9373a --- /dev/null +++ b/internalshared/configutil/listener.go @@ -0,0 +1,455 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configutil + +import ( + "errors" + "fmt" + "net/textproto" + "regexp" + "strings" + "time" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/go-secure-stdlib/tlsutil" + "github.com/hashicorp/go-sockaddr" + "github.com/hashicorp/go-sockaddr/template" + "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/ast" +) + +type ListenerTelemetry struct { + UnusedKeys UnusedKeyMap `hcl:",unusedKeyPositions"` + UnauthenticatedMetricsAccess bool `hcl:"-"` + UnauthenticatedMetricsAccessRaw interface{} `hcl:"unauthenticated_metrics_access,alias:UnauthenticatedMetricsAccess"` +} + +type ListenerProfiling struct { + UnusedKeys UnusedKeyMap `hcl:",unusedKeyPositions"` + UnauthenticatedPProfAccess bool `hcl:"-"` + UnauthenticatedPProfAccessRaw interface{} `hcl:"unauthenticated_pprof_access,alias:UnauthenticatedPProfAccessRaw"` +} + +type ListenerInFlightRequestLogging struct { + UnusedKeys UnusedKeyMap `hcl:",unusedKeyPositions"` + UnauthenticatedInFlightAccess bool `hcl:"-"` + UnauthenticatedInFlightAccessRaw interface{} `hcl:"unauthenticated_in_flight_requests_access,alias:unauthenticatedInFlightAccessRaw"` +} + +// Listener is the listener configuration for the server. +type Listener struct { + UnusedKeys UnusedKeyMap `hcl:",unusedKeyPositions"` + RawConfig map[string]interface{} + + Type string + Purpose []string `hcl:"-"` + PurposeRaw interface{} `hcl:"purpose"` + Role string `hcl:"role"` + + Address string `hcl:"address"` + ClusterAddress string `hcl:"cluster_address"` + MaxRequestSize int64 `hcl:"-"` + MaxRequestSizeRaw interface{} `hcl:"max_request_size"` + MaxRequestDuration time.Duration `hcl:"-"` + MaxRequestDurationRaw interface{} `hcl:"max_request_duration"` + RequireRequestHeader bool `hcl:"-"` + RequireRequestHeaderRaw interface{} `hcl:"require_request_header"` + + TLSDisable bool `hcl:"-"` + TLSDisableRaw interface{} `hcl:"tls_disable"` + TLSCertFile string `hcl:"tls_cert_file"` + TLSKeyFile string `hcl:"tls_key_file"` + TLSMinVersion string `hcl:"tls_min_version"` + TLSMaxVersion string `hcl:"tls_max_version"` + TLSCipherSuites []uint16 `hcl:"-"` + TLSCipherSuitesRaw string `hcl:"tls_cipher_suites"` + TLSRequireAndVerifyClientCert bool `hcl:"-"` + TLSRequireAndVerifyClientCertRaw interface{} `hcl:"tls_require_and_verify_client_cert"` + TLSClientCAFile string `hcl:"tls_client_ca_file"` + TLSDisableClientCerts bool `hcl:"-"` + TLSDisableClientCertsRaw interface{} `hcl:"tls_disable_client_certs"` + + HTTPReadTimeout time.Duration `hcl:"-"` + HTTPReadTimeoutRaw interface{} `hcl:"http_read_timeout"` + HTTPReadHeaderTimeout time.Duration `hcl:"-"` + HTTPReadHeaderTimeoutRaw interface{} `hcl:"http_read_header_timeout"` + HTTPWriteTimeout time.Duration `hcl:"-"` + HTTPWriteTimeoutRaw interface{} `hcl:"http_write_timeout"` + HTTPIdleTimeout time.Duration `hcl:"-"` + HTTPIdleTimeoutRaw interface{} `hcl:"http_idle_timeout"` + + ProxyProtocolBehavior string `hcl:"proxy_protocol_behavior"` + ProxyProtocolAuthorizedAddrs []*sockaddr.SockAddrMarshaler `hcl:"-"` + ProxyProtocolAuthorizedAddrsRaw interface{} `hcl:"proxy_protocol_authorized_addrs,alias:ProxyProtocolAuthorizedAddrs"` + + XForwardedForAuthorizedAddrs []*sockaddr.SockAddrMarshaler `hcl:"-"` + XForwardedForAuthorizedAddrsRaw interface{} `hcl:"x_forwarded_for_authorized_addrs,alias:XForwardedForAuthorizedAddrs"` + XForwardedForHopSkips int64 `hcl:"-"` + XForwardedForHopSkipsRaw interface{} `hcl:"x_forwarded_for_hop_skips,alias:XForwardedForHopSkips"` + XForwardedForRejectNotPresent bool `hcl:"-"` + XForwardedForRejectNotPresentRaw interface{} `hcl:"x_forwarded_for_reject_not_present,alias:XForwardedForRejectNotPresent"` + XForwardedForRejectNotAuthorized bool `hcl:"-"` + XForwardedForRejectNotAuthorizedRaw interface{} `hcl:"x_forwarded_for_reject_not_authorized,alias:XForwardedForRejectNotAuthorized"` + + SocketMode string `hcl:"socket_mode"` + SocketUser string `hcl:"socket_user"` + SocketGroup string `hcl:"socket_group"` + + AgentAPI *AgentAPI `hcl:"agent_api"` + + ProxyAPI *ProxyAPI `hcl:"proxy_api"` + + Telemetry ListenerTelemetry `hcl:"telemetry"` + Profiling ListenerProfiling `hcl:"profiling"` + InFlightRequestLogging ListenerInFlightRequestLogging `hcl:"inflight_requests_logging"` + + // RandomPort is used only for some testing purposes + RandomPort bool `hcl:"-"` + + CorsEnabledRaw interface{} `hcl:"cors_enabled"` + CorsEnabled bool `hcl:"-"` + CorsAllowedOrigins []string `hcl:"cors_allowed_origins"` + CorsAllowedHeaders []string `hcl:"-"` + CorsAllowedHeadersRaw []string `hcl:"cors_allowed_headers,alias:cors_allowed_headers"` + + // Custom Http response headers + CustomResponseHeaders map[string]map[string]string `hcl:"-"` + CustomResponseHeadersRaw interface{} `hcl:"custom_response_headers"` +} + +// AgentAPI allows users to select which parts of the Agent API they want enabled. +type AgentAPI struct { + EnableQuit bool `hcl:"enable_quit"` +} + +// ProxyAPI allows users to select which parts of the Vault Proxy API they want enabled. +type ProxyAPI struct { + EnableQuit bool `hcl:"enable_quit"` +} + +func (l *Listener) GoString() string { + return fmt.Sprintf("*%#v", *l) +} + +func (l *Listener) Validate(path string) []ConfigError { + results := append(ValidateUnusedFields(l.UnusedKeys, path), ValidateUnusedFields(l.Telemetry.UnusedKeys, path)...) + return append(results, ValidateUnusedFields(l.Profiling.UnusedKeys, path)...) +} + +func ParseListeners(result *SharedConfig, list *ast.ObjectList) error { + var err error + result.Listeners = make([]*Listener, 0, len(list.Items)) + for i, item := range list.Items { + var l Listener + if err := hcl.DecodeObject(&l, item.Val); err != nil { + return multierror.Prefix(err, fmt.Sprintf("listeners.%d:", i)) + } + if rendered, err := ParseSingleIPTemplate(l.Address); err != nil { + return multierror.Prefix(err, fmt.Sprintf("listeners.%d:", i)) + } else { + l.Address = rendered + } + if rendered, err := ParseSingleIPTemplate(l.ClusterAddress); err != nil { + return multierror.Prefix(err, fmt.Sprintf("listeners.%d:", i)) + } else { + l.ClusterAddress = rendered + } + + // Hacky way, for now, to get the values we want for sanitizing + var m map[string]interface{} + if err := hcl.DecodeObject(&m, item.Val); err != nil { + return multierror.Prefix(err, fmt.Sprintf("listeners.%d:", i)) + } + l.RawConfig = m + + // Base values + { + switch { + case l.Type != "": + case len(item.Keys) == 1: + l.Type = strings.ToLower(item.Keys[0].Token.Value().(string)) + default: + return multierror.Prefix(errors.New("listener type must be specified"), fmt.Sprintf("listeners.%d:", i)) + } + + l.Type = strings.ToLower(l.Type) + switch l.Type { + case "tcp", "unix": + result.found(l.Type, l.Type) + default: + return multierror.Prefix(fmt.Errorf("unsupported listener type %q", l.Type), fmt.Sprintf("listeners.%d:", i)) + } + + if l.PurposeRaw != nil { + if l.Purpose, err = parseutil.ParseCommaStringSlice(l.PurposeRaw); err != nil { + return multierror.Prefix(fmt.Errorf("unable to parse 'purpose' in listener type %q: %w", l.Type, err), fmt.Sprintf("listeners.%d:", i)) + } + for i, v := range l.Purpose { + l.Purpose[i] = strings.ToLower(v) + } + + l.PurposeRaw = nil + } + + switch l.Role { + case "default", "metrics_only", "": + result.found(l.Type, l.Type) + default: + return multierror.Prefix(fmt.Errorf("unsupported listener role %q", l.Role), fmt.Sprintf("listeners.%d:", i)) + } + } + + // Request Parameters + { + if l.MaxRequestSizeRaw != nil { + if l.MaxRequestSize, err = parseutil.ParseInt(l.MaxRequestSizeRaw); err != nil { + return multierror.Prefix(fmt.Errorf("error parsing max_request_size: %w", err), fmt.Sprintf("listeners.%d", i)) + } + + l.MaxRequestSizeRaw = nil + } + + if l.MaxRequestDurationRaw != nil { + if l.MaxRequestDuration, err = parseutil.ParseDurationSecond(l.MaxRequestDurationRaw); err != nil { + return multierror.Prefix(fmt.Errorf("error parsing max_request_duration: %w", err), fmt.Sprintf("listeners.%d", i)) + } + if l.MaxRequestDuration < 0 { + return multierror.Prefix(errors.New("max_request_duration cannot be negative"), fmt.Sprintf("listeners.%d", i)) + } + + l.MaxRequestDurationRaw = nil + } + + if l.RequireRequestHeaderRaw != nil { + if l.RequireRequestHeader, err = parseutil.ParseBool(l.RequireRequestHeaderRaw); err != nil { + return multierror.Prefix(fmt.Errorf("invalid value for require_request_header: %w", err), fmt.Sprintf("listeners.%d", i)) + } + + l.RequireRequestHeaderRaw = nil + } + } + + // TLS Parameters + { + if l.TLSDisableRaw != nil { + if l.TLSDisable, err = parseutil.ParseBool(l.TLSDisableRaw); err != nil { + return multierror.Prefix(fmt.Errorf("invalid value for tls_disable: %w", err), fmt.Sprintf("listeners.%d", i)) + } + + l.TLSDisableRaw = nil + } + + if l.TLSCipherSuitesRaw != "" { + if l.TLSCipherSuites, err = tlsutil.ParseCiphers(l.TLSCipherSuitesRaw); err != nil { + return multierror.Prefix(fmt.Errorf("invalid value for tls_cipher_suites: %w", err), fmt.Sprintf("listeners.%d", i)) + } + } + + if l.TLSRequireAndVerifyClientCertRaw != nil { + if l.TLSRequireAndVerifyClientCert, err = parseutil.ParseBool(l.TLSRequireAndVerifyClientCertRaw); err != nil { + return multierror.Prefix(fmt.Errorf("invalid value for tls_require_and_verify_client_cert: %w", err), fmt.Sprintf("listeners.%d", i)) + } + + l.TLSRequireAndVerifyClientCertRaw = nil + } + + if l.TLSDisableClientCertsRaw != nil { + if l.TLSDisableClientCerts, err = parseutil.ParseBool(l.TLSDisableClientCertsRaw); err != nil { + return multierror.Prefix(fmt.Errorf("invalid value for tls_disable_client_certs: %w", err), fmt.Sprintf("listeners.%d", i)) + } + + l.TLSDisableClientCertsRaw = nil + } + } + + // HTTP timeouts + { + if l.HTTPReadTimeoutRaw != nil { + if l.HTTPReadTimeout, err = parseutil.ParseDurationSecond(l.HTTPReadTimeoutRaw); err != nil { + return multierror.Prefix(fmt.Errorf("error parsing http_read_timeout: %w", err), fmt.Sprintf("listeners.%d", i)) + } + + l.HTTPReadTimeoutRaw = nil + } + + if l.HTTPReadHeaderTimeoutRaw != nil { + if l.HTTPReadHeaderTimeout, err = parseutil.ParseDurationSecond(l.HTTPReadHeaderTimeoutRaw); err != nil { + return multierror.Prefix(fmt.Errorf("error parsing http_read_header_timeout: %w", err), fmt.Sprintf("listeners.%d", i)) + } + + l.HTTPReadHeaderTimeoutRaw = nil + } + + if l.HTTPWriteTimeoutRaw != nil { + if l.HTTPWriteTimeout, err = parseutil.ParseDurationSecond(l.HTTPWriteTimeoutRaw); err != nil { + return multierror.Prefix(fmt.Errorf("error parsing http_write_timeout: %w", err), fmt.Sprintf("listeners.%d", i)) + } + + l.HTTPWriteTimeoutRaw = nil + } + + if l.HTTPIdleTimeoutRaw != nil { + if l.HTTPIdleTimeout, err = parseutil.ParseDurationSecond(l.HTTPIdleTimeoutRaw); err != nil { + return multierror.Prefix(fmt.Errorf("error parsing http_idle_timeout: %w", err), fmt.Sprintf("listeners.%d", i)) + } + + l.HTTPIdleTimeoutRaw = nil + } + } + + // Proxy Protocol config + { + if l.ProxyProtocolAuthorizedAddrsRaw != nil { + if l.ProxyProtocolAuthorizedAddrs, err = parseutil.ParseAddrs(l.ProxyProtocolAuthorizedAddrsRaw); err != nil { + return multierror.Prefix(fmt.Errorf("error parsing proxy_protocol_authorized_addrs: %w", err), fmt.Sprintf("listeners.%d", i)) + } + + switch l.ProxyProtocolBehavior { + case "allow_authorized", "deny_authorized": + if len(l.ProxyProtocolAuthorizedAddrs) == 0 { + return multierror.Prefix(errors.New("proxy_protocol_behavior set to allow or deny only authorized addresses but no proxy_protocol_authorized_addrs value"), fmt.Sprintf("listeners.%d", i)) + } + } + + l.ProxyProtocolAuthorizedAddrsRaw = nil + } + } + + // X-Forwarded-For config + { + if l.XForwardedForAuthorizedAddrsRaw != nil { + if l.XForwardedForAuthorizedAddrs, err = parseutil.ParseAddrs(l.XForwardedForAuthorizedAddrsRaw); err != nil { + return multierror.Prefix(fmt.Errorf("error parsing x_forwarded_for_authorized_addrs: %w", err), fmt.Sprintf("listeners.%d", i)) + } + + l.XForwardedForAuthorizedAddrsRaw = nil + } + + if l.XForwardedForHopSkipsRaw != nil { + if l.XForwardedForHopSkips, err = parseutil.ParseInt(l.XForwardedForHopSkipsRaw); err != nil { + return multierror.Prefix(fmt.Errorf("error parsing x_forwarded_for_hop_skips: %w", err), fmt.Sprintf("listeners.%d", i)) + } + + if l.XForwardedForHopSkips < 0 { + return multierror.Prefix(fmt.Errorf("x_forwarded_for_hop_skips cannot be negative but set to %d", l.XForwardedForHopSkips), fmt.Sprintf("listeners.%d", i)) + } + + l.XForwardedForHopSkipsRaw = nil + } + + if l.XForwardedForRejectNotAuthorizedRaw != nil { + if l.XForwardedForRejectNotAuthorized, err = parseutil.ParseBool(l.XForwardedForRejectNotAuthorizedRaw); err != nil { + return multierror.Prefix(fmt.Errorf("invalid value for x_forwarded_for_reject_not_authorized: %w", err), fmt.Sprintf("listeners.%d", i)) + } + + l.XForwardedForRejectNotAuthorizedRaw = nil + } + + if l.XForwardedForRejectNotPresentRaw != nil { + if l.XForwardedForRejectNotPresent, err = parseutil.ParseBool(l.XForwardedForRejectNotPresentRaw); err != nil { + return multierror.Prefix(fmt.Errorf("invalid value for x_forwarded_for_reject_not_present: %w", err), fmt.Sprintf("listeners.%d", i)) + } + + l.XForwardedForRejectNotPresentRaw = nil + } + } + + // Telemetry + { + if l.Telemetry.UnauthenticatedMetricsAccessRaw != nil { + if l.Telemetry.UnauthenticatedMetricsAccess, err = parseutil.ParseBool(l.Telemetry.UnauthenticatedMetricsAccessRaw); err != nil { + return multierror.Prefix(fmt.Errorf("invalid value for telemetry.unauthenticated_metrics_access: %w", err), fmt.Sprintf("listeners.%d", i)) + } + + l.Telemetry.UnauthenticatedMetricsAccessRaw = nil + } + } + + // Profiling + { + if l.Profiling.UnauthenticatedPProfAccessRaw != nil { + if l.Profiling.UnauthenticatedPProfAccess, err = parseutil.ParseBool(l.Profiling.UnauthenticatedPProfAccessRaw); err != nil { + return multierror.Prefix(fmt.Errorf("invalid value for profiling.unauthenticated_pprof_access: %w", err), fmt.Sprintf("listeners.%d", i)) + } + + l.Profiling.UnauthenticatedPProfAccessRaw = nil + } + } + + // InFlight Request logging + { + if l.InFlightRequestLogging.UnauthenticatedInFlightAccessRaw != nil { + if l.InFlightRequestLogging.UnauthenticatedInFlightAccess, err = parseutil.ParseBool(l.InFlightRequestLogging.UnauthenticatedInFlightAccessRaw); err != nil { + return multierror.Prefix(fmt.Errorf("invalid value for inflight_requests_logging.unauthenticated_in_flight_requests_access: %w", err), fmt.Sprintf("listeners.%d", i)) + } + + l.InFlightRequestLogging.UnauthenticatedInFlightAccessRaw = "" + } + } + + // CORS + { + if l.CorsEnabledRaw != nil { + if l.CorsEnabled, err = parseutil.ParseBool(l.CorsEnabledRaw); err != nil { + return multierror.Prefix(fmt.Errorf("invalid value for cors_enabled: %w", err), fmt.Sprintf("listeners.%d", i)) + } + + l.CorsEnabledRaw = nil + } + + if strutil.StrListContains(l.CorsAllowedOrigins, "*") && len(l.CorsAllowedOrigins) > 1 { + return multierror.Prefix(errors.New("cors_allowed_origins must only contain a wildcard or only non-wildcard values"), fmt.Sprintf("listeners.%d", i)) + } + + if len(l.CorsAllowedHeadersRaw) > 0 { + for _, header := range l.CorsAllowedHeadersRaw { + l.CorsAllowedHeaders = append(l.CorsAllowedHeaders, textproto.CanonicalMIMEHeaderKey(header)) + } + } + } + + // HTTP Headers + { + // if CustomResponseHeadersRaw is nil, we still need to set the default headers + customHeadersMap, err := ParseCustomResponseHeaders(l.CustomResponseHeadersRaw) + if err != nil { + return multierror.Prefix(fmt.Errorf("failed to parse custom_response_headers: %w", err), fmt.Sprintf("listeners.%d", i)) + } + l.CustomResponseHeaders = customHeadersMap + l.CustomResponseHeadersRaw = nil + } + + result.Listeners = append(result.Listeners, &l) + } + + return nil +} + +// ParseSingleIPTemplate is used as a helper function to parse out a single IP +// address from a config parameter. +// If the input doesn't appear to contain the 'template' format, +// it will return the specified input unchanged. +func ParseSingleIPTemplate(ipTmpl string) (string, error) { + r := regexp.MustCompile("{{.*?}}") + if !r.MatchString(ipTmpl) { + return ipTmpl, nil + } + + out, err := template.Parse(ipTmpl) + if err != nil { + return "", fmt.Errorf("unable to parse address template %q: %v", ipTmpl, err) + } + + ips := strings.Split(out, " ") + switch len(ips) { + case 0: + return "", errors.New("no addresses found, please configure one") + case 1: + return strings.TrimSpace(ips[0]), nil + default: + return "", fmt.Errorf("multiple addresses found (%q), please configure one", out) + } +} diff --git a/internalshared/configutil/listener_test.go b/internalshared/configutil/listener_test.go new file mode 100644 index 0000000..da7d765 --- /dev/null +++ b/internalshared/configutil/listener_test.go @@ -0,0 +1,52 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configutil + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParseSingleIPTemplate(t *testing.T) { + type args struct { + ipTmpl string + } + tests := []struct { + name string + arg string + want string + wantErr assert.ErrorAssertionFunc + }{ + { + name: "test https addr", + arg: "https://vaultproject.io:8200", + want: "https://vaultproject.io:8200", + wantErr: assert.NoError, + }, + { + name: "test invalid template func", + arg: "{{FooBar}}", + want: "", + wantErr: assert.Error, + }, + { + name: "test partial template", + arg: "{{FooBar", + want: "{{FooBar", + wantErr: assert.NoError, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := ParseSingleIPTemplate(tt.arg) + if !tt.wantErr(t, err, fmt.Sprintf("ParseSingleIPTemplate(%v)", tt.arg)) { + return + } + + assert.Equalf(t, tt.want, got, "ParseSingleIPTemplate(%v)", tt.arg) + }) + } +} diff --git a/internalshared/configutil/merge.go b/internalshared/configutil/merge.go new file mode 100644 index 0000000..940e8bf --- /dev/null +++ b/internalshared/configutil/merge.go @@ -0,0 +1,102 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configutil + +func (c *SharedConfig) Merge(c2 *SharedConfig) *SharedConfig { + if c2 == nil { + return c + } + + result := new(SharedConfig) + + for _, l := range c.Listeners { + result.Listeners = append(result.Listeners, l) + } + for _, l := range c2.Listeners { + result.Listeners = append(result.Listeners, l) + } + + for _, userlockout := range c.UserLockouts { + result.UserLockouts = append(result.UserLockouts, userlockout) + } + for _, userlockout := range c2.UserLockouts { + result.UserLockouts = append(result.UserLockouts, userlockout) + } + + result.HCPLinkConf = c.HCPLinkConf + if c2.HCPLinkConf != nil { + result.HCPLinkConf = c2.HCPLinkConf + } + + result.Entropy = c.Entropy + if c2.Entropy != nil { + result.Entropy = c2.Entropy + } + + for _, s := range c.Seals { + result.Seals = append(result.Seals, s) + } + for _, s := range c2.Seals { + result.Seals = append(result.Seals, s) + } + + result.Telemetry = c.Telemetry + if c2.Telemetry != nil { + result.Telemetry = c2.Telemetry + } + + result.DisableMlock = c.DisableMlock + if c2.DisableMlock { + result.DisableMlock = c2.DisableMlock + } + + result.DefaultMaxRequestDuration = c.DefaultMaxRequestDuration + if c2.DefaultMaxRequestDuration > result.DefaultMaxRequestDuration { + result.DefaultMaxRequestDuration = c2.DefaultMaxRequestDuration + } + + result.LogLevel = c.LogLevel + if c2.LogLevel != "" { + result.LogLevel = c2.LogLevel + } + + result.LogFormat = c.LogFormat + if c2.LogFormat != "" { + result.LogFormat = c2.LogFormat + } + + result.LogFile = c.LogFile + if c2.LogFile != "" { + result.LogFile = c2.LogFile + } + + result.LogRotateBytes = c.LogRotateBytes + if c2.LogRotateBytesRaw != nil { + result.LogRotateBytes = c2.LogRotateBytes + result.LogRotateBytesRaw = c2.LogRotateBytesRaw + } + + result.LogRotateMaxFiles = c.LogRotateMaxFiles + if c2.LogRotateMaxFilesRaw != nil { + result.LogRotateMaxFiles = c2.LogRotateMaxFiles + result.LogRotateMaxFilesRaw = c2.LogRotateMaxFilesRaw + } + + result.LogRotateDuration = c.LogRotateDuration + if c2.LogRotateDuration != "" { + result.LogRotateDuration = c2.LogRotateDuration + } + + result.PidFile = c.PidFile + if c2.PidFile != "" { + result.PidFile = c2.PidFile + } + + result.ClusterName = c.ClusterName + if c2.ClusterName != "" { + result.ClusterName = c2.ClusterName + } + + return result +} diff --git a/internalshared/configutil/telemetry.go b/internalshared/configutil/telemetry.go new file mode 100644 index 0000000..270eb49 --- /dev/null +++ b/internalshared/configutil/telemetry.go @@ -0,0 +1,433 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configutil + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/hashicorp/go-secure-stdlib/parseutil" + + monitoring "cloud.google.com/go/monitoring/apiv3" + "github.com/armon/go-metrics" + "github.com/armon/go-metrics/circonus" + "github.com/armon/go-metrics/datadog" + "github.com/armon/go-metrics/prometheus" + stackdriver "github.com/google/go-metrics-stackdriver" + stackdrivervault "github.com/google/go-metrics-stackdriver/vault" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/vault/helper/metricsutil" + "github.com/mitchellh/cli" + "google.golang.org/api/option" +) + +const ( + PrometheusDefaultRetentionTime = 24 * time.Hour + UsageGaugeDefaultPeriod = 10 * time.Minute + MaximumGaugeCardinalityDefault = 500 + LeaseMetricsEpsilonDefault = time.Hour + NumLeaseMetricsTimeBucketsDefault = 168 +) + +// Telemetry is the telemetry configuration for the server +type Telemetry struct { + FoundKeys []string `hcl:",decodedFields"` + UnusedKeys UnusedKeyMap `hcl:",unusedKeyPositions"` + StatsiteAddr string `hcl:"statsite_address"` + StatsdAddr string `hcl:"statsd_address"` + + DisableHostname bool `hcl:"disable_hostname"` + EnableHostnameLabel bool `hcl:"enable_hostname_label"` + MetricsPrefix string `hcl:"metrics_prefix"` + UsageGaugePeriod time.Duration + UsageGaugePeriodRaw interface{} `hcl:"usage_gauge_period,alias:UsageGaugePeriod"` + + MaximumGaugeCardinality int `hcl:"maximum_gauge_cardinality"` + + // Circonus: see https://github.com/circonus-labs/circonus-gometrics + // for more details on the various configuration options. + // Valid configuration combinations: + // - CirconusAPIToken + // metric management enabled (search for existing check or create a new one) + // - CirconusSubmissionUrl + // metric management disabled (use check with specified submission_url, + // broker must be using a public SSL certificate) + // - CirconusAPIToken + CirconusCheckSubmissionURL + // metric management enabled (use check with specified submission_url) + // - CirconusAPIToken + CirconusCheckID + // metric management enabled (use check with specified id) + + // CirconusAPIToken is a valid API Token used to create/manage check. If provided, + // metric management is enabled. + // Default: none + CirconusAPIToken string `hcl:"circonus_api_token"` + // CirconusAPIApp is an app name associated with API token. + // Default: "consul" + CirconusAPIApp string `hcl:"circonus_api_app"` + // CirconusAPIURL is the base URL to use for contacting the Circonus API. + // Default: "https://api.circonus.com/v2" + CirconusAPIURL string `hcl:"circonus_api_url"` + // CirconusSubmissionInterval is the interval at which metrics are submitted to Circonus. + // Default: 10s + CirconusSubmissionInterval string `hcl:"circonus_submission_interval"` + // CirconusCheckSubmissionURL is the check.config.submission_url field from a + // previously created HTTPTRAP check. + // Default: none + CirconusCheckSubmissionURL string `hcl:"circonus_submission_url"` + // CirconusCheckID is the check id (not check bundle id) from a previously created + // HTTPTRAP check. The numeric portion of the check._cid field. + // Default: none + CirconusCheckID string `hcl:"circonus_check_id"` + // CirconusCheckForceMetricActivation will force enabling metrics, as they are encountered, + // if the metric already exists and is NOT active. If check management is enabled, the default + // behavior is to add new metrics as they are encountered. If the metric already exists in the + // check, it will *NOT* be activated. This setting overrides that behavior. + // Default: "false" + CirconusCheckForceMetricActivation string `hcl:"circonus_check_force_metric_activation"` + // CirconusCheckInstanceID serves to uniquely identify the metrics coming from this "instance". + // It can be used to maintain metric continuity with transient or ephemeral instances as + // they move around within an infrastructure. + // Default: hostname:app + CirconusCheckInstanceID string `hcl:"circonus_check_instance_id"` + // CirconusCheckSearchTag is a special tag which, when coupled with the instance id, helps to + // narrow down the search results when neither a Submission URL or Check ID is provided. + // Default: service:app (e.g. service:consul) + CirconusCheckSearchTag string `hcl:"circonus_check_search_tag"` + // CirconusCheckTags is a comma separated list of tags to apply to the check. Note that + // the value of CirconusCheckSearchTag will always be added to the check. + // Default: none + CirconusCheckTags string `hcl:"circonus_check_tags"` + // CirconusCheckDisplayName is the name for the check which will be displayed in the Circonus UI. + // Default: value of CirconusCheckInstanceID + CirconusCheckDisplayName string `hcl:"circonus_check_display_name"` + // CirconusBrokerID is an explicit broker to use when creating a new check. The numeric portion + // of broker._cid. If metric management is enabled and neither a Submission URL nor Check ID + // is provided, an attempt will be made to search for an existing check using Instance ID and + // Search Tag. If one is not found, a new HTTPTRAP check will be created. + // Default: use Select Tag if provided, otherwise, a random Enterprise Broker associated + // with the specified API token or the default Circonus Broker. + // Default: none + CirconusBrokerID string `hcl:"circonus_broker_id"` + // CirconusBrokerSelectTag is a special tag which will be used to select a broker when + // a Broker ID is not provided. The best use of this is to as a hint for which broker + // should be used based on *where* this particular instance is running. + // (e.g. a specific geo location or datacenter, dc:sfo) + // Default: none + CirconusBrokerSelectTag string `hcl:"circonus_broker_select_tag"` + + // Dogstats: + // DogStatsdAddr is the address of a dogstatsd instance. If provided, + // metrics will be sent to that instance + DogStatsDAddr string `hcl:"dogstatsd_addr"` + + // DogStatsdTags are the global tags that should be sent with each packet to dogstatsd + // It is a list of strings, where each string looks like "my_tag_name:my_tag_value" + DogStatsDTags []string `hcl:"dogstatsd_tags"` + + // Prometheus: + // PrometheusRetentionTime is the retention time for prometheus metrics if greater than 0. + // Default: 24h + PrometheusRetentionTime time.Duration `hcl:"-"` + PrometheusRetentionTimeRaw interface{} `hcl:"prometheus_retention_time"` + + // Stackdriver: + // StackdriverProjectID is the project to publish stackdriver metrics to. + StackdriverProjectID string `hcl:"stackdriver_project_id"` + // StackdriverLocation is the GCP or AWS region of the monitored resource. + StackdriverLocation string `hcl:"stackdriver_location"` + // StackdriverNamespace is the namespace identifier, such as a cluster name. + StackdriverNamespace string `hcl:"stackdriver_namespace"` + // StackdriverDebugLogs will write additional stackdriver related debug logs to stderr. + StackdriverDebugLogs bool `hcl:"stackdriver_debug_logs"` + + // How often metrics for lease expiry will be aggregated + LeaseMetricsEpsilon time.Duration + LeaseMetricsEpsilonRaw interface{} `hcl:"lease_metrics_epsilon"` + + // Number of buckets by time that will be used in lease aggregation + NumLeaseMetricsTimeBuckets int `hcl:"num_lease_metrics_buckets"` + + // Whether or not telemetry should add labels for namespaces + LeaseMetricsNameSpaceLabels bool `hcl:"add_lease_metrics_namespace_labels"` + + // FilterDefault is the default for whether to allow a metric that's not + // covered by the prefix filter. + FilterDefault *bool `hcl:"filter_default"` + + // PrefixFilter is a list of filter rules to apply for allowing + // or blocking metrics by prefix. + PrefixFilter []string `hcl:"prefix_filter"` +} + +func (t *Telemetry) Validate(source string) []ConfigError { + return ValidateUnusedFields(t.UnusedKeys, source) +} + +func (t *Telemetry) GoString() string { + return fmt.Sprintf("*%#v", *t) +} + +func parseTelemetry(result *SharedConfig, list *ast.ObjectList) error { + if len(list.Items) > 1 { + return fmt.Errorf("only one 'telemetry' block is permitted") + } + + // Get our one item + item := list.Items[0] + + if result.Telemetry == nil { + result.Telemetry = &Telemetry{} + } + + if err := hcl.DecodeObject(&result.Telemetry, item.Val); err != nil { + return multierror.Prefix(err, "telemetry:") + } + + if result.Telemetry.PrometheusRetentionTimeRaw != nil { + var err error + if result.Telemetry.PrometheusRetentionTime, err = parseutil.ParseDurationSecond(result.Telemetry.PrometheusRetentionTimeRaw); err != nil { + return err + } + result.Telemetry.PrometheusRetentionTimeRaw = nil + } else { + result.Telemetry.PrometheusRetentionTime = PrometheusDefaultRetentionTime + } + + if result.Telemetry.UsageGaugePeriodRaw != nil { + if result.Telemetry.UsageGaugePeriodRaw == "none" { + result.Telemetry.UsageGaugePeriod = 0 + } else { + var err error + if result.Telemetry.UsageGaugePeriod, err = parseutil.ParseDurationSecond(result.Telemetry.UsageGaugePeriodRaw); err != nil { + return err + } + result.Telemetry.UsageGaugePeriodRaw = nil + } + } else { + result.Telemetry.UsageGaugePeriod = UsageGaugeDefaultPeriod + } + + if result.Telemetry.MaximumGaugeCardinality == 0 { + result.Telemetry.MaximumGaugeCardinality = MaximumGaugeCardinalityDefault + } + + if result.Telemetry.LeaseMetricsEpsilonRaw != nil { + if result.Telemetry.LeaseMetricsEpsilonRaw == "none" { + result.Telemetry.LeaseMetricsEpsilonRaw = 0 + } else { + var err error + if result.Telemetry.LeaseMetricsEpsilon, err = parseutil.ParseDurationSecond(result.Telemetry.LeaseMetricsEpsilonRaw); err != nil { + return err + } + result.Telemetry.LeaseMetricsEpsilonRaw = nil + } + } else { + result.Telemetry.LeaseMetricsEpsilon = LeaseMetricsEpsilonDefault + } + + if result.Telemetry.NumLeaseMetricsTimeBuckets == 0 { + result.Telemetry.NumLeaseMetricsTimeBuckets = NumLeaseMetricsTimeBucketsDefault + } + + return nil +} + +type SetupTelemetryOpts struct { + Config *Telemetry + Ui cli.Ui + ServiceName string + DisplayName string + UserAgent string + ClusterName string +} + +// SetupTelemetry is used to setup the telemetry sub-systems and returns the +// in-memory sink to be used in http configuration +func SetupTelemetry(opts *SetupTelemetryOpts) (*metrics.InmemSink, *metricsutil.ClusterMetricSink, bool, error) { + if opts == nil { + return nil, nil, false, errors.New("nil opts passed into SetupTelemetry") + } + + if opts.Config == nil { + opts.Config = &Telemetry{} + } + + /* Setup telemetry + Aggregate on 10 second intervals for 1 minute. Expose the + metrics over stderr when there is a SIGUSR1 received. + */ + inm := metrics.NewInmemSink(10*time.Second, time.Minute) + metrics.DefaultInmemSignal(inm) + + if opts.Config.MetricsPrefix != "" { + opts.ServiceName = opts.Config.MetricsPrefix + } + + metricsConf := metrics.DefaultConfig(opts.ServiceName) + metricsConf.EnableHostname = !opts.Config.DisableHostname + metricsConf.EnableHostnameLabel = opts.Config.EnableHostnameLabel + if opts.Config.FilterDefault != nil { + metricsConf.FilterDefault = *opts.Config.FilterDefault + } + + // Configure the statsite sink + var fanout metrics.FanoutSink + var prometheusEnabled bool + + // Configure the Prometheus sink + if opts.Config.PrometheusRetentionTime != 0 { + prometheusEnabled = true + prometheusOpts := prometheus.PrometheusOpts{ + Expiration: opts.Config.PrometheusRetentionTime, + } + + sink, err := prometheus.NewPrometheusSinkFrom(prometheusOpts) + if err != nil { + return nil, nil, false, err + } + fanout = append(fanout, sink) + } + + if opts.Config.StatsiteAddr != "" { + sink, err := metrics.NewStatsiteSink(opts.Config.StatsiteAddr) + if err != nil { + return nil, nil, false, err + } + fanout = append(fanout, sink) + } + + // Configure the statsd sink + if opts.Config.StatsdAddr != "" { + sink, err := metrics.NewStatsdSink(opts.Config.StatsdAddr) + if err != nil { + return nil, nil, false, err + } + fanout = append(fanout, sink) + } + + // Configure the Circonus sink + if opts.Config.CirconusAPIToken != "" || opts.Config.CirconusCheckSubmissionURL != "" { + cfg := &circonus.Config{} + cfg.Interval = opts.Config.CirconusSubmissionInterval + cfg.CheckManager.API.TokenKey = opts.Config.CirconusAPIToken + cfg.CheckManager.API.TokenApp = opts.Config.CirconusAPIApp + cfg.CheckManager.API.URL = opts.Config.CirconusAPIURL + cfg.CheckManager.Check.SubmissionURL = opts.Config.CirconusCheckSubmissionURL + cfg.CheckManager.Check.ID = opts.Config.CirconusCheckID + cfg.CheckManager.Check.ForceMetricActivation = opts.Config.CirconusCheckForceMetricActivation + cfg.CheckManager.Check.InstanceID = opts.Config.CirconusCheckInstanceID + cfg.CheckManager.Check.SearchTag = opts.Config.CirconusCheckSearchTag + cfg.CheckManager.Check.DisplayName = opts.Config.CirconusCheckDisplayName + cfg.CheckManager.Check.Tags = opts.Config.CirconusCheckTags + cfg.CheckManager.Broker.ID = opts.Config.CirconusBrokerID + cfg.CheckManager.Broker.SelectTag = opts.Config.CirconusBrokerSelectTag + + if cfg.CheckManager.API.TokenApp == "" { + cfg.CheckManager.API.TokenApp = opts.ServiceName + } + + if cfg.CheckManager.Check.DisplayName == "" { + cfg.CheckManager.Check.DisplayName = opts.DisplayName + } + + if cfg.CheckManager.Check.SearchTag == "" { + cfg.CheckManager.Check.SearchTag = fmt.Sprintf("service:%s", opts.ServiceName) + } + + sink, err := circonus.NewCirconusSink(cfg) + if err != nil { + return nil, nil, false, err + } + sink.Start() + fanout = append(fanout, sink) + } + + if opts.Config.DogStatsDAddr != "" { + var tags []string + + if opts.Config.DogStatsDTags != nil { + tags = opts.Config.DogStatsDTags + } + + sink, err := datadog.NewDogStatsdSink(opts.Config.DogStatsDAddr, metricsConf.HostName) + if err != nil { + return nil, nil, false, fmt.Errorf("failed to start DogStatsD sink: %w", err) + } + sink.SetTags(tags) + fanout = append(fanout, sink) + } + + // Configure the stackdriver sink + if opts.Config.StackdriverProjectID != "" { + client, err := monitoring.NewMetricClient(context.Background(), option.WithUserAgent(opts.UserAgent)) + if err != nil { + return nil, nil, false, fmt.Errorf("Failed to create stackdriver client: %v", err) + } + sink := stackdriver.NewSink(client, &stackdriver.Config{ + LabelExtractor: stackdrivervault.Extractor, + Bucketer: stackdrivervault.Bucketer, + ProjectID: opts.Config.StackdriverProjectID, + Location: opts.Config.StackdriverLocation, + Namespace: opts.Config.StackdriverNamespace, + DebugLogs: opts.Config.StackdriverDebugLogs, + }) + fanout = append(fanout, sink) + } + + // Initialize the global sink + if len(fanout) > 1 { + // Hostname enabled will create poor quality metrics name for prometheus + if !opts.Config.DisableHostname { + opts.Ui.Warn("telemetry.disable_hostname has been set to false. Recommended setting is true for Prometheus to avoid poorly named metrics.") + } + } else { + metricsConf.EnableHostname = false + } + fanout = append(fanout, inm) + globalMetrics, err := metrics.NewGlobal(metricsConf, fanout) + if err != nil { + return nil, nil, false, err + } + + // Intialize a wrapper around the global sink; this will be passed to Core + // and to any backend. + wrapper := metricsutil.NewClusterMetricSink(opts.ClusterName, globalMetrics) + wrapper.MaxGaugeCardinality = opts.Config.MaximumGaugeCardinality + wrapper.GaugeInterval = opts.Config.UsageGaugePeriod + wrapper.TelemetryConsts.LeaseMetricsEpsilon = opts.Config.LeaseMetricsEpsilon + wrapper.TelemetryConsts.LeaseMetricsNameSpaceLabels = opts.Config.LeaseMetricsNameSpaceLabels + wrapper.TelemetryConsts.NumLeaseMetricsTimeBuckets = opts.Config.NumLeaseMetricsTimeBuckets + + // Parse the metric filters + telemetryAllowedPrefixes, telemetryBlockedPrefixes, err := parsePrefixFilter(opts.Config.PrefixFilter) + if err != nil { + return nil, nil, false, err + } + + metrics.UpdateFilter(telemetryAllowedPrefixes, telemetryBlockedPrefixes) + return inm, wrapper, prometheusEnabled, nil +} + +func parsePrefixFilter(prefixFilters []string) ([]string, []string, error) { + var telemetryAllowedPrefixes, telemetryBlockedPrefixes []string + + for _, rule := range prefixFilters { + if rule == "" { + return nil, nil, fmt.Errorf("Cannot have empty filter rule in prefix_filter") + } + switch rule[0] { + case '+': + telemetryAllowedPrefixes = append(telemetryAllowedPrefixes, rule[1:]) + case '-': + telemetryBlockedPrefixes = append(telemetryBlockedPrefixes, rule[1:]) + default: + return nil, nil, fmt.Errorf("Filter rule must begin with either '+' or '-': %q", rule) + } + } + return telemetryAllowedPrefixes, telemetryBlockedPrefixes, nil +} diff --git a/internalshared/configutil/telemetry_test.go b/internalshared/configutil/telemetry_test.go new file mode 100644 index 0000000..aaeb808 --- /dev/null +++ b/internalshared/configutil/telemetry_test.go @@ -0,0 +1,56 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configutil + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParsePrefixFilters(t *testing.T) { + t.Parallel() + cases := []struct { + inputFilters []string + expectedErrStr string + expectedAllowedPrefixes []string + expectedBlockedPrefixes []string + }{ + { + []string{""}, + "Cannot have empty filter rule in prefix_filter", + []string(nil), + []string(nil), + }, + { + []string{"vault.abc"}, + "Filter rule must begin with either '+' or '-': \"vault.abc\"", + []string(nil), + []string(nil), + }, + { + []string{"+vault.abc", "-vault.bcd"}, + "", + []string{"vault.abc"}, + []string{"vault.bcd"}, + }, + } + t.Run("validate metric filter configs", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + + allowedPrefixes, blockedPrefixes, err := parsePrefixFilter(tc.inputFilters) + + if err != nil { + assert.EqualError(t, err, tc.expectedErrStr) + } else { + assert.Equal(t, "", tc.expectedErrStr) + assert.Equal(t, tc.expectedAllowedPrefixes, allowedPrefixes) + + assert.Equal(t, tc.expectedBlockedPrefixes, blockedPrefixes) + } + } + }) +} diff --git a/internalshared/configutil/userlockout.go b/internalshared/configutil/userlockout.go new file mode 100644 index 0000000..df76308 --- /dev/null +++ b/internalshared/configutil/userlockout.go @@ -0,0 +1,193 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configutil + +import ( + "errors" + "fmt" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/ast" +) + +const ( + UserLockoutThresholdDefault = 5 + UserLockoutDurationDefault = 15 * time.Minute + UserLockoutCounterResetDefault = 15 * time.Minute + DisableUserLockoutDefault = false +) + +type UserLockout struct { + Type string + LockoutThreshold uint64 `hcl:"-"` + LockoutThresholdRaw interface{} `hcl:"lockout_threshold"` + LockoutDuration time.Duration `hcl:"-"` + LockoutDurationRaw interface{} `hcl:"lockout_duration"` + LockoutCounterReset time.Duration `hcl:"-"` + LockoutCounterResetRaw interface{} `hcl:"lockout_counter_reset"` + DisableLockout bool `hcl:"-"` + DisableLockoutRaw interface{} `hcl:"disable_lockout"` +} + +func GetSupportedUserLockoutsAuthMethods() []string { + return []string{"userpass", "approle", "ldap"} +} + +func ParseUserLockouts(result *SharedConfig, list *ast.ObjectList) error { + var err error + result.UserLockouts = make([]*UserLockout, 0, len(list.Items)) + userLockoutsMap := make(map[string]*UserLockout) + for i, item := range list.Items { + var userLockoutConfig UserLockout + if err := hcl.DecodeObject(&userLockoutConfig, item.Val); err != nil { + return multierror.Prefix(err, fmt.Sprintf("userLockouts.%d:", i)) + } + + // Base values + { + switch { + case userLockoutConfig.Type != "": + case len(item.Keys) == 1: + userLockoutConfig.Type = strings.ToLower(item.Keys[0].Token.Value().(string)) + default: + return multierror.Prefix(errors.New("auth type for user lockout must be specified, if it applies to all auth methods specify \"all\" "), fmt.Sprintf("user_lockouts.%d:", i)) + } + + userLockoutConfig.Type = strings.ToLower(userLockoutConfig.Type) + // Supported auth methods for user lockout configuration: ldap, approle, userpass + // "all" is used to apply the configuration to all supported auth methods + switch userLockoutConfig.Type { + case "all", "ldap", "approle", "userpass": + result.found(userLockoutConfig.Type, userLockoutConfig.Type) + default: + return multierror.Prefix(fmt.Errorf("unsupported auth type %q", userLockoutConfig.Type), fmt.Sprintf("user_lockouts.%d:", i)) + } + } + + // Lockout Parameters + + // Not setting raw entries to nil here as soon as they are parsed + // as they are used to set the missing user lockout configuration values later. + { + if userLockoutConfig.LockoutThresholdRaw != nil { + userLockoutThresholdString := fmt.Sprintf("%v", userLockoutConfig.LockoutThresholdRaw) + if userLockoutConfig.LockoutThreshold, err = strconv.ParseUint(userLockoutThresholdString, 10, 64); err != nil { + return multierror.Prefix(fmt.Errorf("error parsing lockout_threshold: %w", err), fmt.Sprintf("user_lockouts.%d", i)) + } + } + + if userLockoutConfig.LockoutDurationRaw != nil { + if userLockoutConfig.LockoutDuration, err = parseutil.ParseDurationSecond(userLockoutConfig.LockoutDurationRaw); err != nil { + return multierror.Prefix(fmt.Errorf("error parsing lockout_duration: %w", err), fmt.Sprintf("user_lockouts.%d", i)) + } + if userLockoutConfig.LockoutDuration < 0 { + return multierror.Prefix(errors.New("lockout_duration cannot be negative"), fmt.Sprintf("user_lockouts.%d", i)) + } + + } + + if userLockoutConfig.LockoutCounterResetRaw != nil { + if userLockoutConfig.LockoutCounterReset, err = parseutil.ParseDurationSecond(userLockoutConfig.LockoutCounterResetRaw); err != nil { + return multierror.Prefix(fmt.Errorf("error parsing lockout_counter_reset: %w", err), fmt.Sprintf("user_lockouts.%d", i)) + } + if userLockoutConfig.LockoutCounterReset < 0 { + return multierror.Prefix(errors.New("lockout_counter_reset cannot be negative"), fmt.Sprintf("user_lockouts.%d", i)) + } + + } + if userLockoutConfig.DisableLockoutRaw != nil { + if userLockoutConfig.DisableLockout, err = parseutil.ParseBool(userLockoutConfig.DisableLockoutRaw); err != nil { + return multierror.Prefix(fmt.Errorf("invalid value for disable_lockout: %w", err), fmt.Sprintf("user_lockouts.%d", i)) + } + } + } + userLockoutsMap[userLockoutConfig.Type] = &userLockoutConfig + } + + // Use raw entries to set values for user lockout configurations fields + // that were not configured using config file. + // The raw entries would mean that the entry was configured by the user using the config file. + // If any of these fields are not configured using the config file (missing fields), + // we set values for these fields with defaults + // The issue with not being able to use non-raw entries is because of fields lockout threshold + // and disable lockout. We cannot differentiate using non-raw entries if the user configured these fields + // with values (0 and false) or if the the user did not configure these values in config file at all. + // The raw fields are set to nil after setting missing values in setNilValuesForRawUserLockoutFields function + userLockoutsMap = setMissingUserLockoutValuesInMap(userLockoutsMap) + for _, userLockoutValues := range userLockoutsMap { + result.UserLockouts = append(result.UserLockouts, userLockoutValues) + } + return nil +} + +// setUserLockoutValueAllInMap sets default user lockout values for key "all" (all auth methods) +// for user lockout fields that are not configured using config file +func setUserLockoutValueAllInMap(userLockoutAll *UserLockout) *UserLockout { + if userLockoutAll.Type == "" { + userLockoutAll.Type = "all" + } + if userLockoutAll.LockoutThresholdRaw == nil { + userLockoutAll.LockoutThreshold = UserLockoutThresholdDefault + } + if userLockoutAll.LockoutDurationRaw == nil { + userLockoutAll.LockoutDuration = UserLockoutDurationDefault + } + if userLockoutAll.LockoutCounterResetRaw == nil { + userLockoutAll.LockoutCounterReset = UserLockoutCounterResetDefault + } + if userLockoutAll.DisableLockoutRaw == nil { + userLockoutAll.DisableLockout = DisableUserLockoutDefault + } + return setNilValuesForRawUserLockoutFields(userLockoutAll) +} + +// setDefaultUserLockoutValuesInMap sets missing user lockout fields for auth methods +// with default values (from key "all") that are not configured using config file +func setMissingUserLockoutValuesInMap(userLockoutsMap map[string]*UserLockout) map[string]*UserLockout { + // set values for "all" key with default values for "all" user lockout fields that are not configured + // the "all" key values will be used as default values for other auth methods + userLockoutAll, ok := userLockoutsMap["all"] + switch ok { + case true: + userLockoutsMap["all"] = setUserLockoutValueAllInMap(userLockoutAll) + default: + userLockoutsMap["all"] = setUserLockoutValueAllInMap(&UserLockout{}) + } + + for _, userLockoutAuth := range userLockoutsMap { + if userLockoutAuth.Type == "all" { + continue + } + // set missing values + if userLockoutAuth.LockoutThresholdRaw == nil { + userLockoutAuth.LockoutThreshold = userLockoutsMap["all"].LockoutThreshold + } + if userLockoutAuth.LockoutDurationRaw == nil { + userLockoutAuth.LockoutDuration = userLockoutsMap["all"].LockoutDuration + } + if userLockoutAuth.LockoutCounterResetRaw == nil { + userLockoutAuth.LockoutCounterReset = userLockoutsMap["all"].LockoutCounterReset + } + if userLockoutAuth.DisableLockoutRaw == nil { + userLockoutAuth.DisableLockout = userLockoutsMap["all"].DisableLockout + } + userLockoutAuth = setNilValuesForRawUserLockoutFields(userLockoutAuth) + userLockoutsMap[userLockoutAuth.Type] = userLockoutAuth + } + return userLockoutsMap +} + +// setNilValuesForRawUserLockoutFields sets nil values for user lockout Raw fields +func setNilValuesForRawUserLockoutFields(userLockout *UserLockout) *UserLockout { + userLockout.LockoutThresholdRaw = nil + userLockout.LockoutDurationRaw = nil + userLockout.LockoutCounterResetRaw = nil + userLockout.DisableLockoutRaw = nil + return userLockout +} diff --git a/internalshared/configutil/userlockout_test.go b/internalshared/configutil/userlockout_test.go new file mode 100644 index 0000000..db05441 --- /dev/null +++ b/internalshared/configutil/userlockout_test.go @@ -0,0 +1,72 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package configutil + +import ( + "reflect" + "testing" + "time" +) + +func TestParseUserLockout(t *testing.T) { + t.Parallel() + t.Run("Missing user lockout block in config file", func(t *testing.T) { + t.Parallel() + inputConfig := make(map[string]*UserLockout) + expectedConfig := make(map[string]*UserLockout) + expectedConfigall := &UserLockout{} + expectedConfigall.Type = "all" + expectedConfigall.LockoutThreshold = UserLockoutThresholdDefault + expectedConfigall.LockoutDuration = UserLockoutDurationDefault + expectedConfigall.LockoutCounterReset = UserLockoutCounterResetDefault + expectedConfigall.DisableLockout = DisableUserLockoutDefault + expectedConfig["all"] = expectedConfigall + + outputConfig := setMissingUserLockoutValuesInMap(inputConfig) + if !reflect.DeepEqual(expectedConfig["all"], outputConfig["all"]) { + t.Errorf("user lockout config: expected %#v\nactual %#v", expectedConfig["all"], outputConfig["all"]) + } + }) + t.Run("setting default lockout counter reset and lockout duration for userpass in config ", func(t *testing.T) { + t.Parallel() + // input user lockout in config file + inputConfig := make(map[string]*UserLockout) + configAll := &UserLockout{} + configAll.Type = "all" + configAll.LockoutCounterReset = 20 * time.Minute + configAll.LockoutCounterResetRaw = "1200000000000" + inputConfig["all"] = configAll + configUserpass := &UserLockout{} + configUserpass.Type = "userpass" + configUserpass.LockoutDuration = 10 * time.Minute + configUserpass.LockoutDurationRaw = "600000000000" + inputConfig["userpass"] = configUserpass + + expectedConfig := make(map[string]*UserLockout) + expectedConfigall := &UserLockout{} + expectedConfigUserpass := &UserLockout{} + // expected default values + expectedConfigall.Type = "all" + expectedConfigall.LockoutThreshold = UserLockoutThresholdDefault + expectedConfigall.LockoutDuration = UserLockoutDurationDefault + expectedConfigall.LockoutCounterReset = 20 * time.Minute + expectedConfigall.DisableLockout = DisableUserLockoutDefault + // expected values for userpass + expectedConfigUserpass.Type = "userpass" + expectedConfigUserpass.LockoutThreshold = UserLockoutThresholdDefault + expectedConfigUserpass.LockoutDuration = 10 * time.Minute + expectedConfigUserpass.LockoutCounterReset = 20 * time.Minute + expectedConfigUserpass.DisableLockout = DisableUserLockoutDefault + expectedConfig["all"] = expectedConfigall + expectedConfig["userpass"] = expectedConfigUserpass + + outputConfig := setMissingUserLockoutValuesInMap(inputConfig) + if !reflect.DeepEqual(expectedConfig["all"], outputConfig["all"]) { + t.Errorf("user lockout config: expected %#v\nactual %#v", expectedConfig["all"], outputConfig["all"]) + } + if !reflect.DeepEqual(expectedConfig["userpass"], outputConfig["userpass"]) { + t.Errorf("user lockout config: expected %#v\nactual %#v", expectedConfig["userpass"], outputConfig["userpass"]) + } + }) +} diff --git a/internalshared/listenerutil/bufconn.go b/internalshared/listenerutil/bufconn.go new file mode 100644 index 0000000..54af0a7 --- /dev/null +++ b/internalshared/listenerutil/bufconn.go @@ -0,0 +1,41 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package listenerutil + +import ( + "context" + "net" + + "google.golang.org/grpc/test/bufconn" +) + +const BufConnType = "bufconn" + +// BufConnWrapper implements consul-template's TransportDialer using a +// bufconn listener, to provide a way to Dial the in-memory listener +type BufConnWrapper struct { + listener *bufconn.Listener +} + +// NewBufConnWrapper returns a new BufConnWrapper using an +// existing bufconn.Listener +func NewBufConnWrapper(bcl *bufconn.Listener) *BufConnWrapper { + return &BufConnWrapper{ + listener: bcl, + } +} + +// Dial connects to the listening end of the bufconn (satisfies +// consul-template's TransportDialer interface). This is essentially the client +// side of the bufconn connection. +func (bcl *BufConnWrapper) Dial(_, _ string) (net.Conn, error) { + return bcl.listener.Dial() +} + +// DialContext connects to the listening end of the bufconn (satisfies +// consul-template's TransportDialer interface). This is essentially the client +// side of the bufconn connection. +func (bcl *BufConnWrapper) DialContext(ctx context.Context, _, _ string) (net.Conn, error) { + return bcl.listener.DialContext(ctx) +} diff --git a/internalshared/listenerutil/listener.go b/internalshared/listenerutil/listener.go new file mode 100644 index 0000000..9a4edb4 --- /dev/null +++ b/internalshared/listenerutil/listener.go @@ -0,0 +1,249 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package listenerutil + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net" + "os" + osuser "os/user" + "strconv" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-secure-stdlib/reloadutil" + "github.com/hashicorp/go-secure-stdlib/tlsutil" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/jefferai/isbadcipher" + "github.com/mitchellh/cli" +) + +type Listener struct { + net.Listener + Config *configutil.Listener +} + +type UnixSocketsConfig struct { + User string `hcl:"user"` + Mode string `hcl:"mode"` + Group string `hcl:"group"` +} + +// rmListener is an implementation of net.Listener that forwards most +// calls to the listener but also removes a file as part of the close. We +// use this to cleanup the unix domain socket on close. +type rmListener struct { + net.Listener + Path string +} + +func (l *rmListener) Close() error { + // Close the listener itself + if err := l.Listener.Close(); err != nil { + return err + } + + // Remove the file + return os.Remove(l.Path) +} + +func UnixSocketListener(path string, unixSocketsConfig *UnixSocketsConfig) (net.Listener, error) { + if err := os.Remove(path); err != nil && !os.IsNotExist(err) { + return nil, fmt.Errorf("failed to remove socket file: %v", err) + } + + ln, err := net.Listen("unix", path) + if err != nil { + return nil, err + } + + if unixSocketsConfig != nil { + err = setFilePermissions(path, unixSocketsConfig.User, unixSocketsConfig.Group, unixSocketsConfig.Mode) + if err != nil { + return nil, fmt.Errorf("failed to set file system permissions on the socket file: %s", err) + } + } + + // Wrap the listener in rmListener so that the Unix domain socket file is + // removed on close. + return &rmListener{ + Listener: ln, + Path: path, + }, nil +} + +func TLSConfig( + l *configutil.Listener, + props map[string]string, + ui cli.Ui, +) (*tls.Config, reloadutil.ReloadFunc, error) { + props["tls"] = "disabled" + + if l.TLSDisable { + return nil, nil, nil + } + + cg := reloadutil.NewCertificateGetter(l.TLSCertFile, l.TLSKeyFile, "") + if err := cg.Reload(); err != nil { + // We try the key without a passphrase first and if we get an incorrect + // passphrase response, try again after prompting for a passphrase + if errwrap.Contains(err, x509.IncorrectPasswordError.Error()) { + var passphrase string + passphrase, err = ui.AskSecret(fmt.Sprintf("Enter passphrase for %s:", l.TLSKeyFile)) + if err == nil { + cg = reloadutil.NewCertificateGetter(l.TLSCertFile, l.TLSKeyFile, passphrase) + if err = cg.Reload(); err == nil { + goto PASSPHRASECORRECT + } + } + } + return nil, nil, fmt.Errorf("error loading TLS cert: %w", err) + } + +PASSPHRASECORRECT: + tlsConf := &tls.Config{ + GetCertificate: cg.GetCertificate, + NextProtos: []string{"h2", "http/1.1"}, + ClientAuth: tls.RequestClientCert, + } + + if l.TLSMinVersion == "" { + l.TLSMinVersion = "tls12" + } + + if l.TLSMaxVersion == "" { + l.TLSMaxVersion = "tls13" + } + + var ok bool + tlsConf.MinVersion, ok = tlsutil.TLSLookup[l.TLSMinVersion] + if !ok { + return nil, nil, fmt.Errorf("'tls_min_version' value %q not supported, please specify one of [tls10,tls11,tls12,tls13]", l.TLSMinVersion) + } + + tlsConf.MaxVersion, ok = tlsutil.TLSLookup[l.TLSMaxVersion] + if !ok { + return nil, nil, fmt.Errorf("'tls_max_version' value %q not supported, please specify one of [tls10,tls11,tls12,tls13]", l.TLSMaxVersion) + } + + if tlsConf.MaxVersion < tlsConf.MinVersion { + return nil, nil, fmt.Errorf("'tls_max_version' must be greater than or equal to 'tls_min_version'") + } + + if len(l.TLSCipherSuites) > 0 { + // HTTP/2 with TLS 1.2 blacklists several cipher suites. + // https://tools.ietf.org/html/rfc7540#appendix-A + // + // Since the CLI (net/http) automatically uses HTTP/2 with TLS 1.2, + // we check here if all or some specified cipher suites are blacklisted. + badCiphers := []string{} + for _, cipher := range l.TLSCipherSuites { + if isbadcipher.IsBadCipher(cipher) { + // Get the name of the current cipher. + cipherStr, err := tlsutil.GetCipherName(cipher) + if err != nil { + return nil, nil, fmt.Errorf("invalid value for 'tls_cipher_suites': %w", err) + } + badCiphers = append(badCiphers, cipherStr) + } + } + if len(badCiphers) == len(l.TLSCipherSuites) { + ui.Warn(`WARNING! All cipher suites defined by 'tls_cipher_suites' are blacklisted by the +HTTP/2 specification. HTTP/2 communication with TLS 1.2 will not work as intended +and Vault will be unavailable via the CLI. +Please see https://tools.ietf.org/html/rfc7540#appendix-A for further information.`) + } else if len(badCiphers) > 0 { + ui.Warn(fmt.Sprintf(`WARNING! The following cipher suites defined by 'tls_cipher_suites' are +blacklisted by the HTTP/2 specification: +%v +Please see https://tools.ietf.org/html/rfc7540#appendix-A for further information.`, badCiphers)) + } + tlsConf.CipherSuites = l.TLSCipherSuites + } + + if l.TLSRequireAndVerifyClientCert { + tlsConf.ClientAuth = tls.RequireAndVerifyClientCert + if l.TLSClientCAFile != "" { + caPool := x509.NewCertPool() + data, err := ioutil.ReadFile(l.TLSClientCAFile) + if err != nil { + return nil, nil, fmt.Errorf("failed to read tls_client_ca_file: %w", err) + } + + if !caPool.AppendCertsFromPEM(data) { + return nil, nil, fmt.Errorf("failed to parse CA certificate in tls_client_ca_file") + } + tlsConf.ClientCAs = caPool + } + } + + if l.TLSDisableClientCerts { + if l.TLSRequireAndVerifyClientCert { + return nil, nil, fmt.Errorf("'tls_disable_client_certs' and 'tls_require_and_verify_client_cert' are mutually exclusive") + } + tlsConf.ClientAuth = tls.NoClientCert + } + + props["tls"] = "enabled" + return tlsConf, cg.Reload, nil +} + +// setFilePermissions handles configuring ownership and permissions +// settings on a given file. All permission/ownership settings are +// optional. If no user or group is specified, the current user/group +// will be used. Mode is optional, and has no default (the operation is +// not performed if absent). User may be specified by name or ID, but +// group may only be specified by ID. +func setFilePermissions(path string, user, group, mode string) error { + var err error + uid, gid := os.Getuid(), os.Getgid() + + if user != "" { + if uid, err = strconv.Atoi(user); err == nil { + goto GROUP + } + + // Try looking up the user by name + u, err := osuser.Lookup(user) + if err != nil { + return fmt.Errorf("failed to look up user %q: %v", user, err) + } + uid, _ = strconv.Atoi(u.Uid) + } + +GROUP: + if group != "" { + if gid, err = strconv.Atoi(group); err == nil { + goto OWN + } + + // Try looking up the user by name + g, err := osuser.LookupGroup(group) + if err != nil { + return fmt.Errorf("failed to look up group %q: %v", user, err) + } + gid, _ = strconv.Atoi(g.Gid) + } + +OWN: + if err := os.Chown(path, uid, gid); err != nil { + return fmt.Errorf("failed setting ownership to %d:%d on %q: %v", + uid, gid, path, err) + } + + if mode != "" { + mode, err := strconv.ParseUint(mode, 8, 32) + if err != nil { + return fmt.Errorf("invalid mode specified: %v", mode) + } + if err := os.Chmod(path, os.FileMode(mode)); err != nil { + return fmt.Errorf("failed setting permissions to %d on %q: %v", + mode, path, err) + } + } + + return nil +} diff --git a/internalshared/listenerutil/listener_test.go b/internalshared/listenerutil/listener_test.go new file mode 100644 index 0000000..6219727 --- /dev/null +++ b/internalshared/listenerutil/listener_test.go @@ -0,0 +1,90 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package listenerutil + +import ( + "io/ioutil" + "os" + osuser "os/user" + "strconv" + "testing" +) + +func TestUnixSocketListener(t *testing.T) { + t.Run("ids", func(t *testing.T) { + socket, err := ioutil.TempFile("", "socket") + if err != nil { + t.Fatal(err) + } + defer os.Remove(socket.Name()) + + uid, gid := os.Getuid(), os.Getgid() + + u, err := osuser.LookupId(strconv.Itoa(uid)) + if err != nil { + t.Fatal(err) + } + user := u.Username + + g, err := osuser.LookupGroupId(strconv.Itoa(gid)) + if err != nil { + t.Fatal(err) + } + group := g.Name + + l, err := UnixSocketListener(socket.Name(), &UnixSocketsConfig{ + User: user, + Group: group, + Mode: "644", + }) + if err != nil { + t.Fatal(err) + } + defer l.Close() + + fi, err := os.Stat(socket.Name()) + if err != nil { + t.Fatal(err) + } + + mode, err := strconv.ParseUint("644", 8, 32) + if err != nil { + t.Fatal(err) + } + if fi.Mode().Perm() != os.FileMode(mode) { + t.Fatalf("failed to set permissions on the socket file") + } + }) + t.Run("names", func(t *testing.T) { + socket, err := ioutil.TempFile("", "socket") + if err != nil { + t.Fatal(err) + } + defer os.Remove(socket.Name()) + + uid, gid := os.Getuid(), os.Getgid() + l, err := UnixSocketListener(socket.Name(), &UnixSocketsConfig{ + User: strconv.Itoa(uid), + Group: strconv.Itoa(gid), + Mode: "644", + }) + if err != nil { + t.Fatal(err) + } + defer l.Close() + + fi, err := os.Stat(socket.Name()) + if err != nil { + t.Fatal(err) + } + + mode, err := strconv.ParseUint("644", 8, 32) + if err != nil { + t.Fatal(err) + } + if fi.Mode().Perm() != os.FileMode(mode) { + t.Fatalf("failed to set permissions on the socket file") + } + }) +} diff --git a/main.go b/main.go new file mode 100644 index 0000000..0417bd9 --- /dev/null +++ b/main.go @@ -0,0 +1,20 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main // import "github.com/hashicorp/vault" + +import ( + "os" + + "github.com/hashicorp/vault/command" + "github.com/hashicorp/vault/internal" +) + +func init() { + // this is a good place to patch SHA-1 support back into x509 + internal.PatchSha1() +} + +func main() { + os.Exit(command.Run(os.Args[1:])) +} diff --git a/main_test.go b/main_test.go new file mode 100644 index 0000000..3639833 --- /dev/null +++ b/main_test.go @@ -0,0 +1,7 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main // import "github.com/hashicorp/vault" + +// This file is intentionally empty to force early versions of Go +// to test compilation for tests. diff --git a/make.bat b/make.bat new file mode 100644 index 0000000..ca3238b --- /dev/null +++ b/make.bat @@ -0,0 +1,113 @@ +@echo off +setlocal + +set _EXITCODE=0 + +REM If no target is provided, default to test. +if [%1]==[] goto test + +set _TARGETS=bin,bootstrap,dev,generate,test,testacc,testrace,vet +set _EXTERNAL_TOOLS=github.com/kardianos/govendor + +REM Run target. +for %%a in (%_TARGETS%) do (if x%1==x%%a goto %%a) +goto usage + +REM bin generates the releasable binaries for Vault +:bin + call :generate + call .\scripts\windows\build.bat "%CD%" + goto :eof + +REM bootstrap downloads required build tools +:bootstrap + for %%t in (%_EXTERNAL_TOOLS%) do (go get -u -v %%t) + goto :eof + +REM dev creates binaries for testing Vault locally. These are put +REM into ./bin/ as well as %GOPATH%/bin +:dev + call :generate + call .\scripts\windows\build.bat "%CD%" VAULT_DEV + goto :eof + +REM generate runs `go generate` to build the dynamically generated +REM source files. +:generate + for /F "usebackq" %%f in (`go list ./... ^| findstr /v vendor`) do @go generate %%f + goto :eof + +REM test runs the unit tests and vets the code. +:test + call :testsetup + go test %_TEST% %TESTARGS% -timeout=30s -parallel=4 + call :setMaxExitCode %ERRORLEVEL% + echo. + goto vet + +REM testacc runs acceptance tests. +:testacc + call :testsetup + if x%_TEST% == x./... goto testacc_fail + if x%_TEST% == x.\... goto testacc_fail + set VAULT_ACC=1 + go test %_TEST% -v %TESTARGS% -timeout 45m + exit /b %ERRORLEVEL% +:testacc_fail + echo ERROR: Set %%TEST%% to a specific package. + exit /b 1 + +REM testrace runs the race checker. +:testrace + call :testsetup + go test -race %_TEST% %TESTARGS% + exit /b %ERRORLEVEL% + +REM testsetup calls `go generate` and defines the variables VAULT_ACC +REM and _TEST. VAULT_ACC is always cleared. _TEST defaults to the value +REM of the TEST environment variable, provided that TEST is defined, +REM otherwise _TEST it is set to "./...". +:testsetup + call :generate + set VAULT_ACC= + set _TEST=./... + if defined TEST set _TEST=%TEST% + goto :eof + +REM vet runs the Go source code static analysis tool `vet` to find +REM any common errors. +:vet + set _VETARGS=-asmdecl -atomic -bool -buildtags -copylocks -methods -nilfunc -printf -rangeloops -shift -structtags -unsafeptr + if defined VETARGS set _VETARGS=%VETARGS% + + go tool vet 2>nul + if %ERRORLEVEL% equ 3 go get golang.org/x/tools/cmd/vet + + set _vetExitCode=0 + set _VAULT_PKG_DIRS=%TEMP%\vault-pkg-dirs.txt + + go list -f {{.Dir}} ./... | findstr /v vendor >"%_VAULT_PKG_DIRS%" + REM Skip the first row, which is the main vault package (.*github.com/hashicorp/vault$) + for /f "delims= skip=1" %%d in ("%_VAULT_PKG_DIRS%") do ( + go tool vet %_VETARGS% "%%d" + if ERRORLEVEL 1 set _vetExitCode=1 + call :setMaxExitCode %_vetExitCode% + ) + del /f "%_VAULT_PKG_DIRS%" 2>NUL + if %_vetExitCode% equ 0 exit /b %_EXITCODE% + echo. + echo Vet found suspicious constructs. Please check the reported constructs + echo and fix them if necessary before submitting the code for reviewal. + exit /b %_EXITCODE% + +:setMaxExitCode + if %1 gtr %_EXITCODE% set _EXITCODE=%1 + goto :eof + +:usage + echo usage: make [target] + echo. + echo target is in {%_TARGETS%}. + echo target defaults to test if none is provided. + exit /b 2 + goto :eof diff --git a/physical/aerospike/aerospike.go b/physical/aerospike/aerospike.go new file mode 100644 index 0000000..81aab22 --- /dev/null +++ b/physical/aerospike/aerospike.go @@ -0,0 +1,254 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package aerospike + +import ( + "context" + "crypto/sha256" + "fmt" + "strconv" + "strings" + "time" + + aero "github.com/aerospike/aerospike-client-go/v5" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/sdk/physical" +) + +const ( + keyBin = "keyBin" + valueBin = "valueBin" + + defaultNamespace = "test" + + defaultHostname = "127.0.0.1" + defaultPort = 3000 + + keyNotFoundError = "Key not found" +) + +// AerospikeBackend is a physical backend that stores data in Aerospike. +type AerospikeBackend struct { + client *aero.Client + namespace string + set string + logger log.Logger +} + +// Verify AerospikeBackend satisfies the correct interface. +var _ physical.Backend = (*AerospikeBackend)(nil) + +// NewAerospikeBackend constructs an AerospikeBackend backend. +func NewAerospikeBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { + namespace, ok := conf["namespace"] + if !ok { + namespace = defaultNamespace + } + set := conf["set"] + + policy, err := buildClientPolicy(conf) + if err != nil { + return nil, err + } + + client, err := buildAerospikeClient(conf, policy) + if err != nil { + return nil, err + } + + return &AerospikeBackend{ + client: client, + namespace: namespace, + set: set, + logger: logger, + }, nil +} + +func buildAerospikeClient(conf map[string]string, policy *aero.ClientPolicy) (*aero.Client, error) { + hostListString, ok := conf["hostlist"] + if !ok || hostListString == "" { + hostname, ok := conf["hostname"] + if !ok || hostname == "" { + hostname = defaultHostname + } + + portString, ok := conf["port"] + if !ok || portString == "" { + portString = strconv.Itoa(defaultPort) + } + + port, err := strconv.Atoi(portString) + if err != nil { + return nil, err + } + + return aero.NewClientWithPolicy(policy, hostname, port) + } + + hostList, err := parseHostList(hostListString) + if err != nil { + return nil, err + } + + return aero.NewClientWithPolicyAndHost(policy, hostList...) +} + +func buildClientPolicy(conf map[string]string) (*aero.ClientPolicy, error) { + policy := aero.NewClientPolicy() + + policy.User = conf["username"] + policy.Password = conf["password"] + + authMode := aero.AuthModeInternal + if mode, ok := conf["auth_mode"]; ok { + switch strings.ToUpper(mode) { + case "EXTERNAL": + authMode = aero.AuthModeExternal + case "INTERNAL": + authMode = aero.AuthModeInternal + default: + return nil, fmt.Errorf("'auth_mode' must be one of {INTERNAL, EXTERNAL}") + } + } + policy.AuthMode = authMode + policy.ClusterName = conf["cluster_name"] + + if timeoutString, ok := conf["timeout"]; ok { + timeout, err := strconv.Atoi(timeoutString) + if err != nil { + return nil, err + } + policy.Timeout = time.Duration(timeout) * time.Millisecond + } + + if idleTimeoutString, ok := conf["idle_timeout"]; ok { + idleTimeout, err := strconv.Atoi(idleTimeoutString) + if err != nil { + return nil, err + } + policy.IdleTimeout = time.Duration(idleTimeout) * time.Millisecond + } + + return policy, nil +} + +func (a *AerospikeBackend) key(userKey string) (*aero.Key, error) { + return aero.NewKey(a.namespace, a.set, hash(userKey)) +} + +// Put is used to insert or update an entry. +func (a *AerospikeBackend) Put(_ context.Context, entry *physical.Entry) error { + aeroKey, err := a.key(entry.Key) + if err != nil { + return err + } + + // replace the Aerospike record if exists + writePolicy := aero.NewWritePolicy(0, 0) + writePolicy.RecordExistsAction = aero.REPLACE + + binMap := make(aero.BinMap, 2) + binMap[keyBin] = entry.Key + binMap[valueBin] = entry.Value + + return a.client.Put(writePolicy, aeroKey, binMap) +} + +// Get is used to fetch an entry. +func (a *AerospikeBackend) Get(_ context.Context, key string) (*physical.Entry, error) { + aeroKey, err := a.key(key) + if err != nil { + return nil, err + } + + record, err := a.client.Get(nil, aeroKey) + if err != nil { + if strings.Contains(err.Error(), keyNotFoundError) { + return nil, nil + } + return nil, err + } + + value, ok := record.Bins[valueBin] + if !ok { + return nil, fmt.Errorf("Value bin was not found in the record") + } + + return &physical.Entry{ + Key: key, + Value: value.([]byte), + }, nil +} + +// Delete is used to permanently delete an entry. +func (a *AerospikeBackend) Delete(_ context.Context, key string) error { + aeroKey, err := a.key(key) + if err != nil { + return err + } + + _, err = a.client.Delete(nil, aeroKey) + return err +} + +// List is used to list all the keys under a given +// prefix, up to the next prefix. +func (a *AerospikeBackend) List(_ context.Context, prefix string) ([]string, error) { + recordSet, err := a.client.ScanAll(nil, a.namespace, a.set) + if err != nil { + return nil, err + } + + var keyList []string + for res := range recordSet.Results() { + if res.Err != nil { + return nil, res.Err + } + recordKey := res.Record.Bins[keyBin].(string) + if strings.HasPrefix(recordKey, prefix) { + trimPrefix := strings.TrimPrefix(recordKey, prefix) + keys := strings.Split(trimPrefix, "/") + if len(keys) == 1 { + keyList = append(keyList, keys[0]) + } else { + withSlash := keys[0] + "/" + if !strutil.StrListContains(keyList, withSlash) { + keyList = append(keyList, withSlash) + } + } + } + } + + return keyList, nil +} + +func parseHostList(list string) ([]*aero.Host, error) { + hosts := strings.Split(list, ",") + var hostList []*aero.Host + for _, host := range hosts { + if host == "" { + continue + } + split := strings.Split(host, ":") + switch len(split) { + case 1: + hostList = append(hostList, aero.NewHost(split[0], defaultPort)) + case 2: + port, err := strconv.Atoi(split[1]) + if err != nil { + return nil, err + } + hostList = append(hostList, aero.NewHost(split[0], port)) + default: + return nil, fmt.Errorf("Invalid 'hostlist' configuration") + } + } + return hostList, nil +} + +func hash(s string) string { + hash := sha256.Sum256([]byte(s)) + return fmt.Sprintf("%x", hash[:]) +} diff --git a/physical/aerospike/aerospike_test.go b/physical/aerospike/aerospike_test.go new file mode 100644 index 0000000..6887610 --- /dev/null +++ b/physical/aerospike/aerospike_test.go @@ -0,0 +1,93 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package aerospike + +import ( + "context" + "math/bits" + "testing" + "time" + + aero "github.com/aerospike/aerospike-client-go/v5" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/physical" +) + +func TestAerospikeBackend(t *testing.T) { + if bits.UintSize == 32 { + t.Skip("Aerospike storage is only supported on 64-bit architectures") + } + cleanup, config := prepareAerospikeContainer(t) + defer cleanup() + + logger := logging.NewVaultLogger(log.Debug) + + b, err := NewAerospikeBackend(map[string]string{ + "hostname": config.hostname, + "port": config.port, + "namespace": config.namespace, + "set": config.set, + }, logger) + if err != nil { + t.Fatalf("err: %s", err) + } + + physical.ExerciseBackend(t, b) + physical.ExerciseBackend_ListPrefix(t, b) +} + +type aerospikeConfig struct { + hostname string + port string + namespace string + set string +} + +func prepareAerospikeContainer(t *testing.T) (func(), *aerospikeConfig) { + runner, err := docker.NewServiceRunner(docker.RunOptions{ + ImageRepo: "docker.mirror.hashicorp.services/aerospike/aerospike-server", + ContainerName: "aerospikedb", + ImageTag: "5.6.0.5", + Ports: []string{"3000/tcp", "3001/tcp", "3002/tcp", "3003/tcp"}, + }) + if err != nil { + t.Fatalf("Could not start local Aerospike: %s", err) + } + + svc, err := runner.StartService(context.Background(), + func(ctx context.Context, host string, port int) (docker.ServiceConfig, error) { + cfg := docker.NewServiceHostPort(host, port) + + time.Sleep(time.Second) + client, err := aero.NewClient(host, port) + if err != nil { + return nil, err + } + + node, err := client.Cluster().GetRandomNode() + if err != nil { + return nil, err + } + + _, err = node.RequestInfo(aero.NewInfoPolicy(), "namespaces") + if err != nil { + return nil, err + } + + return cfg, nil + }, + ) + if err != nil { + t.Fatalf("Could not start local Aerospike: %s", err) + } + + return svc.Cleanup, &aerospikeConfig{ + hostname: svc.Config.URL().Hostname(), + port: svc.Config.URL().Port(), + namespace: "test", + set: "vault", + } +} diff --git a/physical/alicloudoss/alicloudoss.go b/physical/alicloudoss/alicloudoss.go new file mode 100644 index 0000000..d32d14b --- /dev/null +++ b/physical/alicloudoss/alicloudoss.go @@ -0,0 +1,232 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package alicloudoss + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + "os" + "sort" + "strconv" + "strings" + "time" + + "github.com/aliyun/aliyun-oss-go-sdk/oss" + "github.com/armon/go-metrics" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/physical" +) + +const ( + AlibabaMetricKey = "alibaba" + + AlibabaCloudOSSEndpointEnv = "ALICLOUD_OSS_ENDPOINT" + AlibabaCloudOSSBucketEnv = "ALICLOUD_OSS_BUCKET" + AlibabaCloudAccessKeyEnv = "ALICLOUD_ACCESS_KEY" + AlibabaCloudSecretKeyEnv = "ALICLOUD_SECRET_KEY" +) + +// Verify AliCloudOSSBackend satisfies the correct interfaces +var _ physical.Backend = (*AliCloudOSSBackend)(nil) + +// AliCloudOSSBackend is a physical backend that stores data +// within an Alibaba OSS bucket. +type AliCloudOSSBackend struct { + bucket string + client *oss.Client + logger log.Logger + permitPool *physical.PermitPool +} + +// NewAliCloudOSSBackend constructs an OSS backend using a pre-existing +// bucket. Credentials can be provided to the backend, sourced +// from the environment. +func NewAliCloudOSSBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { + endpoint := os.Getenv(AlibabaCloudOSSEndpointEnv) + if endpoint == "" { + endpoint = conf["endpoint"] + if endpoint == "" { + return nil, fmt.Errorf("'endpoint' must be set") + } + } + + bucket := os.Getenv(AlibabaCloudOSSBucketEnv) + if bucket == "" { + bucket = conf["bucket"] + if bucket == "" { + return nil, fmt.Errorf("'bucket' must be set") + } + } + + accessKeyID := os.Getenv(AlibabaCloudAccessKeyEnv) + if accessKeyID == "" { + accessKeyID = conf["access_key"] + if accessKeyID == "" { + return nil, fmt.Errorf("'access_key' must be set") + } + } + + accessKeySecret := os.Getenv(AlibabaCloudSecretKeyEnv) + if accessKeySecret == "" { + accessKeySecret = conf["secret_key"] + if accessKeySecret == "" { + return nil, fmt.Errorf("'secret_key' must be set") + } + } + + options := func(c *oss.Client) { + c.Config.Timeout = 30 + } + + client, err := oss.New(endpoint, accessKeyID, accessKeySecret, options) + if err != nil { + return nil, err + } + + bucketObj, err := client.Bucket(bucket) + if err != nil { + return nil, err + } + + _, err = bucketObj.ListObjects() + if err != nil { + return nil, fmt.Errorf("unable to access bucket %q at endpoint %q: %w", bucket, endpoint, err) + } + + maxParStr, ok := conf["max_parallel"] + var maxParInt int + if ok { + maxParInt, err = strconv.Atoi(maxParStr) + if err != nil { + return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err) + } + if logger.IsDebug() { + logger.Debug("max_parallel set", "max_parallel", maxParInt) + } + } + + a := &AliCloudOSSBackend{ + client: client, + bucket: bucket, + logger: logger, + permitPool: physical.NewPermitPool(maxParInt), + } + return a, nil +} + +// Put is used to insert or update an entry +func (a *AliCloudOSSBackend) Put(ctx context.Context, entry *physical.Entry) error { + defer metrics.MeasureSince([]string{AlibabaMetricKey, "put"}, time.Now()) + + a.permitPool.Acquire() + defer a.permitPool.Release() + + bucket, err := a.client.Bucket(a.bucket) + if err != nil { + return err + } + + return bucket.PutObject(entry.Key, bytes.NewReader(entry.Value)) +} + +// Get is used to fetch an entry +func (a *AliCloudOSSBackend) Get(ctx context.Context, key string) (*physical.Entry, error) { + defer metrics.MeasureSince([]string{AlibabaMetricKey, "get"}, time.Now()) + + a.permitPool.Acquire() + defer a.permitPool.Release() + + bucket, err := a.client.Bucket(a.bucket) + if err != nil { + return nil, err + } + + object, err := bucket.GetObject(key) + if err != nil { + switch err := err.(type) { + case oss.ServiceError: + if err.StatusCode == http.StatusNotFound && err.Code == "NoSuchKey" { + return nil, nil + } + } + return nil, err + } + + data := bytes.NewBuffer(nil) + _, err = io.Copy(data, object) + if err != nil { + return nil, err + } + + ent := &physical.Entry{ + Key: key, + Value: data.Bytes(), + } + + return ent, nil +} + +// Delete is used to permanently delete an entry +func (a *AliCloudOSSBackend) Delete(ctx context.Context, key string) error { + defer metrics.MeasureSince([]string{AlibabaMetricKey, "delete"}, time.Now()) + + a.permitPool.Acquire() + defer a.permitPool.Release() + + bucket, err := a.client.Bucket(a.bucket) + if err != nil { + return err + } + + return bucket.DeleteObject(key) +} + +// List is used to list all the keys under a given +// prefix, up to the next prefix. +func (a *AliCloudOSSBackend) List(ctx context.Context, prefix string) ([]string, error) { + defer metrics.MeasureSince([]string{AlibabaMetricKey, "list"}, time.Now()) + + a.permitPool.Acquire() + defer a.permitPool.Release() + + keys := []string{} + + bucket, err := a.client.Bucket(a.bucket) + if err != nil { + return nil, err + } + + marker := oss.Marker("") + + for { + result, err := bucket.ListObjects(oss.Prefix(prefix), oss.Delimiter("/"), marker) + if err != nil { + return nil, err + } + + for _, commonPrefix := range result.CommonPrefixes { + commonPrefix := strings.TrimPrefix(commonPrefix, prefix) + keys = append(keys, commonPrefix) + } + + for _, object := range result.Objects { + // Add objects only from the current 'folder' + key := strings.TrimPrefix(object.Key, prefix) + keys = append(keys, key) + } + + if !result.IsTruncated { + break + } + + marker = oss.Marker(result.NextMarker) + } + + sort.Strings(keys) + + return keys, nil +} diff --git a/physical/alicloudoss/alicloudoss_test.go b/physical/alicloudoss/alicloudoss_test.go new file mode 100644 index 0000000..1b098bd --- /dev/null +++ b/physical/alicloudoss/alicloudoss_test.go @@ -0,0 +1,84 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package alicloudoss + +import ( + "fmt" + "math/rand" + "os" + "testing" + "time" + + "github.com/aliyun/aliyun-oss-go-sdk/oss" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/physical" +) + +func TestAliCloudOSSBackend(t *testing.T) { + // ex. http://oss-us-east-1.aliyuncs.com + endpoint := os.Getenv(AlibabaCloudOSSEndpointEnv) + accessKeyID := os.Getenv(AlibabaCloudAccessKeyEnv) + accessKeySecret := os.Getenv(AlibabaCloudSecretKeyEnv) + + if endpoint == "" || accessKeyID == "" || accessKeySecret == "" { + t.SkipNow() + } + + conn, err := oss.New(endpoint, accessKeyID, accessKeySecret) + if err != nil { + t.Fatalf("unable to create test client: %s", err) + } + + randInt := rand.New(rand.NewSource(time.Now().UnixNano())).Int() + bucket := fmt.Sprintf("vault-alibaba-testacc-%d", randInt) + + err = conn.CreateBucket(bucket) + if err != nil { + t.Fatalf("unable to create test bucket: %s", err) + } + + defer func() { + // Gotta list all the objects and delete them + // before being able to delete the bucket + b, err := conn.Bucket(bucket) + if err != nil { + t.Fatalf("err: %s", err) + } + + listResp, err := b.ListObjects() + if err != nil { + t.Fatalf("err: %s", err) + } + + objects := []string{} + for _, object := range listResp.Objects { + objects = append(objects, object.Key) + } + + _, err = b.DeleteObjects(objects) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = conn.DeleteBucket(bucket) + if err != nil { + t.Fatalf("err: %s", err) + } + }() + + logger := logging.NewVaultLogger(log.Debug) + + // This uses the same logic to find the Alibaba credentials as we did at the beginning of the test + b, err := NewAliCloudOSSBackend( + map[string]string{"bucket": bucket}, + logger, + ) + if err != nil { + t.Fatalf("err: %s", err) + } + + physical.ExerciseBackend(t, b) + physical.ExerciseBackend_ListPrefix(t, b) +} diff --git a/physical/azure/azure.go b/physical/azure/azure.go new file mode 100644 index 0000000..fe88449 --- /dev/null +++ b/physical/azure/azure.go @@ -0,0 +1,330 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package azure + +import ( + "context" + "errors" + "fmt" + "io/ioutil" + "net/url" + "os" + "sort" + "strconv" + "strings" + "time" + + "github.com/Azure/azure-storage-blob-go/azblob" + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/armon/go-metrics" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/sdk/physical" +) + +const ( + // MaxBlobSize at this time + MaxBlobSize = 1024 * 1024 * 4 + // MaxListResults is the current default value, setting explicitly + MaxListResults = 5000 +) + +// AzureBackend is a physical backend that stores data +// within an Azure blob container. +type AzureBackend struct { + container *azblob.ContainerURL + logger log.Logger + permitPool *physical.PermitPool +} + +// Verify AzureBackend satisfies the correct interfaces +var _ physical.Backend = (*AzureBackend)(nil) + +// NewAzureBackend constructs an Azure backend using a pre-existing +// bucket. Credentials can be provided to the backend, sourced +// from the environment, via HCL or by using managed identities. +func NewAzureBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { + name := os.Getenv("AZURE_BLOB_CONTAINER") + useMSI := false + + if name == "" { + name = conf["container"] + if name == "" { + return nil, fmt.Errorf("'container' must be set") + } + } + + accountName := os.Getenv("AZURE_ACCOUNT_NAME") + if accountName == "" { + accountName = conf["accountName"] + if accountName == "" { + return nil, fmt.Errorf("'accountName' must be set") + } + } + + accountKey := os.Getenv("AZURE_ACCOUNT_KEY") + if accountKey == "" { + accountKey = conf["accountKey"] + if accountKey == "" { + logger.Info("accountKey not set, using managed identity auth") + useMSI = true + } + } + + environmentName := os.Getenv("AZURE_ENVIRONMENT") + if environmentName == "" { + environmentName = conf["environment"] + if environmentName == "" { + environmentName = "AzurePublicCloud" + } + } + + environmentURL := os.Getenv("AZURE_ARM_ENDPOINT") + if environmentURL == "" { + environmentURL = conf["arm_endpoint"] + } + + var environment azure.Environment + var URL *url.URL + var err error + + testHost := conf["testHost"] + switch { + case testHost != "": + URL = &url.URL{Scheme: "http", Host: testHost, Path: fmt.Sprintf("/%s/%s", accountName, name)} + default: + if environmentURL != "" { + environment, err = azure.EnvironmentFromURL(environmentURL) + if err != nil { + return nil, fmt.Errorf("failed to look up Azure environment descriptor for URL %q: %w", environmentURL, err) + } + } else { + environment, err = azure.EnvironmentFromName(environmentName) + if err != nil { + return nil, fmt.Errorf("failed to look up Azure environment descriptor for name %q: %w", environmentName, err) + } + } + URL, err = url.Parse( + fmt.Sprintf("https://%s.blob.%s/%s", accountName, environment.StorageEndpointSuffix, name)) + if err != nil { + return nil, fmt.Errorf("failed to create Azure client: %w", err) + } + } + + var credential azblob.Credential + if useMSI { + authToken, err := getAuthTokenFromIMDS(environment.ResourceIdentifiers.Storage) + if err != nil { + return nil, fmt.Errorf("failed to obtain auth token from IMDS %q: %w", environmentName, err) + } + + credential = azblob.NewTokenCredential(authToken.OAuthToken(), func(c azblob.TokenCredential) time.Duration { + err = authToken.Refresh() + if err != nil { + logger.Error("couldn't refresh token credential", "error", err) + return 0 + } + + expIn, err := authToken.Token().ExpiresIn.Int64() + if err != nil { + logger.Error("couldn't retrieve jwt claim for 'expiresIn' from refreshed token", "error", err) + return 0 + } + + logger.Debug("token refreshed, new token expires in", "access_token_expiry", expIn) + c.SetToken(authToken.OAuthToken()) + + // tokens are valid for 23h59m (86399s) by default, refresh after ~21h + return time.Duration(int(float64(expIn)*0.9)) * time.Second + }) + } else { + credential, err = azblob.NewSharedKeyCredential(accountName, accountKey) + if err != nil { + return nil, fmt.Errorf("failed to create Azure client: %w", err) + } + } + + p := azblob.NewPipeline(credential, azblob.PipelineOptions{}) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + containerURL := azblob.NewContainerURL(*URL, p) + _, err = containerURL.GetProperties(ctx, azblob.LeaseAccessConditions{}) + if err != nil { + var e azblob.StorageError + if errors.As(err, &e) { + switch e.ServiceCode() { + case azblob.ServiceCodeContainerNotFound: + _, err := containerURL.Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone) + if err != nil { + return nil, fmt.Errorf("failed to create %q container: %w", name, err) + } + default: + return nil, fmt.Errorf("failed to get properties for container %q: %w", name, err) + } + } + } + + maxParStr, ok := conf["max_parallel"] + var maxParInt int + if ok { + maxParInt, err = strconv.Atoi(maxParStr) + if err != nil { + return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err) + } + if logger.IsDebug() { + logger.Debug("max_parallel set", "max_parallel", maxParInt) + } + } + + a := &AzureBackend{ + container: &containerURL, + logger: logger, + permitPool: physical.NewPermitPool(maxParInt), + } + return a, nil +} + +// Put is used to insert or update an entry +func (a *AzureBackend) Put(ctx context.Context, entry *physical.Entry) error { + defer metrics.MeasureSince([]string{"azure", "put"}, time.Now()) + + if len(entry.Value) >= MaxBlobSize { + return fmt.Errorf("value is bigger than the current supported limit of 4MBytes") + } + + a.permitPool.Acquire() + defer a.permitPool.Release() + + blobURL := a.container.NewBlockBlobURL(entry.Key) + _, err := azblob.UploadBufferToBlockBlob(ctx, entry.Value, blobURL, azblob.UploadToBlockBlobOptions{ + BlockSize: MaxBlobSize, + }) + + return err +} + +// Get is used to fetch an entry +func (a *AzureBackend) Get(ctx context.Context, key string) (*physical.Entry, error) { + defer metrics.MeasureSince([]string{"azure", "get"}, time.Now()) + + a.permitPool.Acquire() + defer a.permitPool.Release() + + blobURL := a.container.NewBlockBlobURL(key) + clientOptions := azblob.ClientProvidedKeyOptions{} + + res, err := blobURL.Download(ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false, clientOptions) + if err != nil { + var e azblob.StorageError + if errors.As(err, &e) { + switch e.ServiceCode() { + case azblob.ServiceCodeBlobNotFound: + return nil, nil + default: + return nil, fmt.Errorf("failed to download blob %q: %w", key, err) + } + } + return nil, err + } + + reader := res.Body(azblob.RetryReaderOptions{}) + defer reader.Close() + + data, err := ioutil.ReadAll(reader) + + ent := &physical.Entry{ + Key: key, + Value: data, + } + + return ent, err +} + +// Delete is used to permanently delete an entry +func (a *AzureBackend) Delete(ctx context.Context, key string) error { + defer metrics.MeasureSince([]string{"azure", "delete"}, time.Now()) + + a.permitPool.Acquire() + defer a.permitPool.Release() + + blobURL := a.container.NewBlockBlobURL(key) + _, err := blobURL.Delete(ctx, azblob.DeleteSnapshotsOptionInclude, azblob.BlobAccessConditions{}) + if err != nil { + var e azblob.StorageError + if errors.As(err, &e) { + switch e.ServiceCode() { + case azblob.ServiceCodeBlobNotFound: + return nil + default: + return fmt.Errorf("failed to delete blob %q: %w", key, err) + } + } + } + + return err +} + +// List is used to list all the keys under a given +// prefix, up to the next prefix. +func (a *AzureBackend) List(ctx context.Context, prefix string) ([]string, error) { + defer metrics.MeasureSince([]string{"azure", "list"}, time.Now()) + + a.permitPool.Acquire() + defer a.permitPool.Release() + + var keys []string + for marker := (azblob.Marker{}); marker.NotDone(); { + listBlob, err := a.container.ListBlobsFlatSegment(ctx, marker, azblob.ListBlobsSegmentOptions{ + Prefix: prefix, + MaxResults: MaxListResults, + }) + if err != nil { + return nil, err + } + + for _, blobInfo := range listBlob.Segment.BlobItems { + key := strings.TrimPrefix(blobInfo.Name, prefix) + if i := strings.Index(key, "/"); i == -1 { + // file + keys = append(keys, key) + } else { + // subdirectory + keys = strutil.AppendIfMissing(keys, key[:i+1]) + } + } + + marker = listBlob.NextMarker + } + + sort.Strings(keys) + return keys, nil +} + +// getAuthTokenFromIMDS uses the Azure Instance Metadata Service to retrieve a short-lived credential using OAuth +// more info on this https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview +func getAuthTokenFromIMDS(resource string) (*adal.ServicePrincipalToken, error) { + msiEndpoint, err := adal.GetMSIVMEndpoint() + if err != nil { + return nil, err + } + + spt, err := adal.NewServicePrincipalTokenFromMSI(msiEndpoint, resource) + if err != nil { + return nil, err + } + + if err := spt.Refresh(); err != nil { + return nil, err + } + + token := spt.Token() + if token.IsZero() { + return nil, err + } + + return spt, nil +} diff --git a/physical/azure/azure_test.go b/physical/azure/azure_test.go new file mode 100644 index 0000000..a004c83 --- /dev/null +++ b/physical/azure/azure_test.go @@ -0,0 +1,131 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package azure + +import ( + "context" + "fmt" + "net" + "os" + "strconv" + "testing" + "time" + + "github.com/hashicorp/vault/helper/testhelpers/azurite" + + "github.com/Azure/azure-storage-blob-go/azblob" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/physical" +) + +/// These tests run against an Azurite docker container, unless AZURE_ACCOUNT_NAME is given. +/// Authentication options: +/// - Use a static access key via AZURE_ACCOUNT_KEY +/// - Use managed identities (leave AZURE_ACCOUNT_KEY empty) +/// +/// To run the tests using managed identities, the following pre-requisites have to be met: +/// 1. Access to the Azure Instance Metadata Service (IMDS) is required (e.g. run it on a Azure VM) +/// 2. A system-assigned oder user-assigned identity attached to the host running the test +/// 3. A role assignment for a storage account with "Storage Blob Data Contributor" permissions + +func testFixture(t *testing.T) (*AzureBackend, func()) { + t.Helper() + + ts := time.Now().UnixNano() + name := fmt.Sprintf("vault-test-%d", ts) + _ = os.Setenv("AZURE_BLOB_CONTAINER", name) + + cleanup := func() {} + backendConf := map[string]string{ + "container": name, + } + + if os.Getenv("AZURE_ACCOUNT_NAME") == "" { + dockerCleanup, conf := azurite.PrepareTestContainer(t, "") + cfgaz := conf.(*azurite.Config) + backendConf["accountName"] = cfgaz.AccountName + backendConf["accountKey"] = cfgaz.AccountKey + backendConf["testHost"] = cfgaz.Endpoint + cleanup = dockerCleanup + } else { + accountKey := os.Getenv("AZURE_ACCOUNT_KEY") + if accountKey != "" { + t.Log("using account key provided to authenticate against storage account") + } else { + t.Log("using managed identity to authenticate against storage account") + if !isIMDSReachable(t) { + t.Log("running managed identity test requires access to the Azure IMDS with a valid identity for a storage account attached to it, skipping") + t.SkipNow() + } + } + } + + backend, err := NewAzureBackend(backendConf, logging.NewVaultLogger(log.Debug)) + if err != nil { + defer cleanup() + t.Fatalf("err: %s", err) + } + + azBackend := backend.(*AzureBackend) + + return azBackend, func() { + blobService, err := azBackend.container.GetProperties(context.Background(), azblob.LeaseAccessConditions{}) + if err != nil { + t.Logf("failed to retrieve blob container info: %v", err) + return + } + + if blobService.StatusCode() == 200 { + _, err := azBackend.container.Delete(context.Background(), azblob.ContainerAccessConditions{}) + if err != nil { + t.Logf("clean up failed: %v", err) + } + } + cleanup() + } +} + +func TestAzureBackend(t *testing.T) { + backend, cleanup := testFixture(t) + defer cleanup() + + physical.ExerciseBackend(t, backend) + physical.ExerciseBackend_ListPrefix(t, backend) +} + +func TestAzureBackend_ListPaging(t *testing.T) { + backend, cleanup := testFixture(t) + defer cleanup() + + // by default, azure returns 5000 results in a page, load up more than that + for i := 0; i < MaxListResults+100; i++ { + if err := backend.Put(context.Background(), &physical.Entry{ + Key: "foo" + strconv.Itoa(i), + Value: []byte(strconv.Itoa(i)), + }); err != nil { + t.Fatalf("err: %s", err) + } + } + + results, err := backend.List(context.Background(), "") + if err != nil { + t.Fatalf("err: %s", err) + } + + if len(results) != MaxListResults+100 { + t.Fatalf("expected %d, got %d, %v", MaxListResults+100, len(results), results) + } +} + +func isIMDSReachable(t *testing.T) bool { + t.Helper() + + _, err := net.DialTimeout("tcp", "169.254.169.254:80", time.Second*3) + if err != nil { + return false + } + + return true +} diff --git a/physical/cassandra/cassandra.go b/physical/cassandra/cassandra.go new file mode 100644 index 0000000..fc9261a --- /dev/null +++ b/physical/cassandra/cassandra.go @@ -0,0 +1,366 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cassandra + +import ( + "context" + "crypto/tls" + "fmt" + "io/ioutil" + "net" + "strconv" + "strings" + "time" + + metrics "github.com/armon/go-metrics" + "github.com/gocql/gocql" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/physical" +) + +// CassandraBackend is a physical backend that stores data in Cassandra. +type CassandraBackend struct { + sess *gocql.Session + table string + + logger log.Logger +} + +// Verify CassandraBackend satisfies the correct interfaces +var _ physical.Backend = (*CassandraBackend)(nil) + +// NewCassandraBackend constructs a Cassandra backend using a pre-existing +// keyspace and table. +func NewCassandraBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { + splitArray := func(v string) []string { + return strings.FieldsFunc(v, func(r rune) bool { + return r == ',' + }) + } + + var ( + hosts = splitArray(conf["hosts"]) + port = 9042 + explicitPort = false + keyspace = conf["keyspace"] + table = conf["table"] + consistency = gocql.LocalQuorum + ) + + if len(hosts) == 0 { + hosts = []string{"localhost"} + } + for i, hp := range hosts { + h, ps, err := net.SplitHostPort(hp) + if err != nil { + continue + } + p, err := strconv.Atoi(ps) + if err != nil { + return nil, err + } + + if explicitPort && p != port { + return nil, fmt.Errorf("all hosts must have the same port") + } + hosts[i], port = h, p + explicitPort = true + } + + if keyspace == "" { + keyspace = "vault" + } + if table == "" { + table = "entries" + } + if cs, ok := conf["consistency"]; ok { + switch cs { + case "ANY": + consistency = gocql.Any + case "ONE": + consistency = gocql.One + case "TWO": + consistency = gocql.Two + case "THREE": + consistency = gocql.Three + case "QUORUM": + consistency = gocql.Quorum + case "ALL": + consistency = gocql.All + case "LOCAL_QUORUM": + consistency = gocql.LocalQuorum + case "EACH_QUORUM": + consistency = gocql.EachQuorum + case "LOCAL_ONE": + consistency = gocql.LocalOne + default: + return nil, fmt.Errorf("'consistency' must be one of {ANY, ONE, TWO, THREE, QUORUM, ALL, LOCAL_QUORUM, EACH_QUORUM, LOCAL_ONE}") + } + } + + connectStart := time.Now() + cluster := gocql.NewCluster(hosts...) + cluster.Port = port + cluster.Keyspace = keyspace + + if retryCountStr, ok := conf["simple_retry_policy_retries"]; ok { + retryCount, err := strconv.Atoi(retryCountStr) + if err != nil || retryCount <= 0 { + return nil, fmt.Errorf("'simple_retry_policy_retries' must be a positive integer") + } + cluster.RetryPolicy = &gocql.SimpleRetryPolicy{NumRetries: retryCount} + } + + cluster.ProtoVersion = 2 + if protoVersionStr, ok := conf["protocol_version"]; ok { + protoVersion, err := strconv.Atoi(protoVersionStr) + if err != nil { + return nil, fmt.Errorf("'protocol_version' must be an integer") + } + cluster.ProtoVersion = protoVersion + } + + if username, ok := conf["username"]; ok { + if cluster.ProtoVersion < 2 { + return nil, fmt.Errorf("authentication is not supported with protocol version < 2") + } + authenticator := gocql.PasswordAuthenticator{Username: username} + if password, ok := conf["password"]; ok { + authenticator.Password = password + } + cluster.Authenticator = authenticator + } + + if initialConnectionTimeoutStr, ok := conf["initial_connection_timeout"]; ok { + initialConnectionTimeout, err := strconv.Atoi(initialConnectionTimeoutStr) + if err != nil || initialConnectionTimeout <= 0 { + return nil, fmt.Errorf("'initial_connection_timeout' must be a positive integer") + } + cluster.ConnectTimeout = time.Duration(initialConnectionTimeout) * time.Second + } + + if connTimeoutStr, ok := conf["connection_timeout"]; ok { + connectionTimeout, err := strconv.Atoi(connTimeoutStr) + if err != nil || connectionTimeout <= 0 { + return nil, fmt.Errorf("'connection_timeout' must be a positive integer") + } + cluster.Timeout = time.Duration(connectionTimeout) * time.Second + } + + if err := setupCassandraTLS(conf, cluster); err != nil { + return nil, err + } + + sess, err := cluster.CreateSession() + if err != nil { + return nil, err + } + metrics.MeasureSince([]string{"cassandra", "connect"}, connectStart) + sess.SetConsistency(consistency) + + impl := &CassandraBackend{ + sess: sess, + table: table, + logger: logger, + } + return impl, nil +} + +func setupCassandraTLS(conf map[string]string, cluster *gocql.ClusterConfig) error { + tlsOnStr, ok := conf["tls"] + if !ok { + return nil + } + + tlsOn, err := strconv.Atoi(tlsOnStr) + if err != nil { + return fmt.Errorf("'tls' must be an integer (0 or 1)") + } + + if tlsOn == 0 { + return nil + } + + tlsConfig := &tls.Config{} + if pemBundlePath, ok := conf["pem_bundle_file"]; ok { + pemBundleData, err := ioutil.ReadFile(pemBundlePath) + if err != nil { + return fmt.Errorf("error reading pem bundle from %q: %w", pemBundlePath, err) + } + pemBundle, err := certutil.ParsePEMBundle(string(pemBundleData)) + if err != nil { + return fmt.Errorf("error parsing 'pem_bundle': %w", err) + } + tlsConfig, err = pemBundle.GetTLSConfig(certutil.TLSClient) + if err != nil { + return err + } + } else if pemJSONPath, ok := conf["pem_json_file"]; ok { + pemJSONData, err := ioutil.ReadFile(pemJSONPath) + if err != nil { + return fmt.Errorf("error reading json bundle from %q: %w", pemJSONPath, err) + } + pemJSON, err := certutil.ParsePKIJSON([]byte(pemJSONData)) + if err != nil { + return err + } + tlsConfig, err = pemJSON.GetTLSConfig(certutil.TLSClient) + if err != nil { + return err + } + } + + if tlsSkipVerifyStr, ok := conf["tls_skip_verify"]; ok { + tlsSkipVerify, err := strconv.Atoi(tlsSkipVerifyStr) + if err != nil { + return fmt.Errorf("'tls_skip_verify' must be an integer (0 or 1)") + } + if tlsSkipVerify == 0 { + tlsConfig.InsecureSkipVerify = false + } else { + tlsConfig.InsecureSkipVerify = true + } + } + + if tlsMinVersion, ok := conf["tls_min_version"]; ok { + switch tlsMinVersion { + case "tls10": + tlsConfig.MinVersion = tls.VersionTLS10 + case "tls11": + tlsConfig.MinVersion = tls.VersionTLS11 + case "tls12": + tlsConfig.MinVersion = tls.VersionTLS12 + case "tls13": + tlsConfig.MinVersion = tls.VersionTLS13 + default: + return fmt.Errorf("'tls_min_version' must be one of `tls10`, `tls11`, `tls12` or `tls13`") + } + } + + cluster.SslOpts = &gocql.SslOptions{ + Config: tlsConfig, + EnableHostVerification: !tlsConfig.InsecureSkipVerify, + } + return nil +} + +// bucketName sanitises a bucket name for Cassandra +func (c *CassandraBackend) bucketName(name string) string { + if name == "" { + name = "." + } + return strings.TrimRight(name, "/") +} + +// bucket returns all the prefix buckets the key should be stored at +func (c *CassandraBackend) buckets(key string) []string { + vals := append([]string{""}, physical.Prefixes(key)...) + for i, v := range vals { + vals[i] = c.bucketName(v) + } + return vals +} + +// bucket returns the most specific bucket for the key +func (c *CassandraBackend) bucket(key string) string { + bs := c.buckets(key) + return bs[len(bs)-1] +} + +// Put is used to insert or update an entry +func (c *CassandraBackend) Put(ctx context.Context, entry *physical.Entry) error { + defer metrics.MeasureSince([]string{"cassandra", "put"}, time.Now()) + + // Execute inserts to each key prefix simultaneously + stmt := fmt.Sprintf(`INSERT INTO "%s" (bucket, key, value) VALUES (?, ?, ?)`, c.table) + buckets := c.buckets(entry.Key) + results := make(chan error, len(buckets)) + for i, _bucket := range buckets { + go func(i int, bucket string) { + var value []byte + if i == len(buckets)-1 { + // Only store the full value if this is the leaf bucket where the entry will actually be read + // otherwise this write is just to allow for list operations + value = entry.Value + } + results <- c.sess.Query(stmt, bucket, entry.Key, value).Exec() + }(i, _bucket) + } + for i := 0; i < len(buckets); i++ { + if err := <-results; err != nil { + return err + } + } + return nil +} + +// Get is used to fetch an entry +func (c *CassandraBackend) Get(ctx context.Context, key string) (*physical.Entry, error) { + defer metrics.MeasureSince([]string{"cassandra", "get"}, time.Now()) + + v := []byte(nil) + stmt := fmt.Sprintf(`SELECT value FROM "%s" WHERE bucket = ? AND key = ? LIMIT 1`, c.table) + q := c.sess.Query(stmt, c.bucket(key), key) + if err := q.Scan(&v); err != nil { + if err == gocql.ErrNotFound { + return nil, nil + } + return nil, err + } + + return &physical.Entry{ + Key: key, + Value: v, + }, nil +} + +// Delete is used to permanently delete an entry +func (c *CassandraBackend) Delete(ctx context.Context, key string) error { + defer metrics.MeasureSince([]string{"cassandra", "delete"}, time.Now()) + + stmt := fmt.Sprintf(`DELETE FROM "%s" WHERE bucket = ? AND key = ?`, c.table) + buckets := c.buckets(key) + results := make(chan error, len(buckets)) + + for _, bucket := range buckets { + go func(bucket string) { + results <- c.sess.Query(stmt, bucket, key).Exec() + }(bucket) + } + + for i := 0; i < len(buckets); i++ { + if err := <-results; err != nil { + return err + } + } + return nil +} + +// List is used ot list all the keys under a given +// prefix, up to the next prefix. +func (c *CassandraBackend) List(ctx context.Context, prefix string) ([]string, error) { + defer metrics.MeasureSince([]string{"cassandra", "list"}, time.Now()) + + stmt := fmt.Sprintf(`SELECT key FROM "%s" WHERE bucket = ?`, c.table) + q := c.sess.Query(stmt, c.bucketName(prefix)) + iter := q.Iter() + k, keys := "", []string{} + for iter.Scan(&k) { + // Only return the next "component" (with a trailing slash if it has children) + k = strings.TrimPrefix(k, prefix) + if parts := strings.SplitN(k, "/", 2); len(parts) > 1 { + k = parts[0] + "/" + } else { + k = parts[0] + } + + // Deduplicate; this works because the keys are sorted + if len(keys) > 0 && keys[len(keys)-1] == k { + continue + } + keys = append(keys, k) + } + return keys, iter.Close() +} diff --git a/physical/cassandra/cassandra_test.go b/physical/cassandra/cassandra_test.go new file mode 100644 index 0000000..9466d0a --- /dev/null +++ b/physical/cassandra/cassandra_test.go @@ -0,0 +1,60 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cassandra + +import ( + "os" + "reflect" + "testing" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/testhelpers/cassandra" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/physical" +) + +func TestCassandraBackend(t *testing.T) { + if testing.Short() { + t.Skipf("skipping in short mode") + } + if os.Getenv("VAULT_CI_GO_TEST_RACE") != "" { + t.Skip("skipping race test in CI pending https://github.com/gocql/gocql/pull/1474") + } + + host, cleanup := cassandra.PrepareTestContainer(t) + defer cleanup() + + // Run vault tests + logger := logging.NewVaultLogger(log.Debug) + b, err := NewCassandraBackend(map[string]string{ + "hosts": host.ConnectionURL(), + "protocol_version": "3", + "connection_timeout": "5", + "initial_connection_timeout": "5", + "simple_retry_policy_retries": "3", + }, logger) + if err != nil { + t.Fatalf("Failed to create new backend: %v", err) + } + + physical.ExerciseBackend(t, b) + physical.ExerciseBackend_ListPrefix(t, b) +} + +func TestCassandraBackendBuckets(t *testing.T) { + expectations := map[string][]string{ + "": {"."}, + "a": {"."}, + "a/b": {".", "a"}, + "a/b/c/d/e": {".", "a", "a/b", "a/b/c", "a/b/c/d"}, + } + + b := &CassandraBackend{} + for input, expected := range expectations { + actual := b.buckets(input) + if !reflect.DeepEqual(actual, expected) { + t.Errorf("bad: %v expected: %v", actual, expected) + } + } +} diff --git a/physical/cockroachdb/cockroachdb.go b/physical/cockroachdb/cockroachdb.go new file mode 100644 index 0000000..38f935c --- /dev/null +++ b/physical/cockroachdb/cockroachdb.go @@ -0,0 +1,356 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cockroachdb + +import ( + "context" + "database/sql" + "fmt" + "sort" + "strconv" + "strings" + "time" + "unicode" + + metrics "github.com/armon/go-metrics" + "github.com/cockroachdb/cockroach-go/crdb" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/sdk/physical" + + // CockroachDB uses the Postgres SQL driver + _ "github.com/jackc/pgx/v4/stdlib" +) + +// Verify CockroachDBBackend satisfies the correct interfaces +var ( + _ physical.Backend = (*CockroachDBBackend)(nil) + _ physical.Transactional = (*CockroachDBBackend)(nil) +) + +const ( + defaultTableName = "vault_kv_store" + defaultHATableName = "vault_ha_locks" +) + +// CockroachDBBackend Backend is a physical backend that stores data +// within a CockroachDB database. +type CockroachDBBackend struct { + table string + haTable string + client *sql.DB + rawStatements map[string]string + statements map[string]*sql.Stmt + rawHAStatements map[string]string + haStatements map[string]*sql.Stmt + logger log.Logger + permitPool *physical.PermitPool + haEnabled bool +} + +// NewCockroachDBBackend constructs a CockroachDB backend using the given +// API client, server address, credentials, and database. +func NewCockroachDBBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { + // Get the CockroachDB credentials to perform read/write operations. + connURL, ok := conf["connection_url"] + if !ok || connURL == "" { + return nil, fmt.Errorf("missing connection_url") + } + + haEnabled := conf["ha_enabled"] == "true" + + dbTable := conf["table"] + if dbTable == "" { + dbTable = defaultTableName + } + + err := validateDBTable(dbTable) + if err != nil { + return nil, fmt.Errorf("invalid table: %w", err) + } + + dbHATable, ok := conf["ha_table"] + if !ok { + dbHATable = defaultHATableName + } + + err = validateDBTable(dbHATable) + if err != nil { + return nil, fmt.Errorf("invalid HA table: %w", err) + } + + maxParStr, ok := conf["max_parallel"] + var maxParInt int + if ok { + maxParInt, err = strconv.Atoi(maxParStr) + if err != nil { + return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err) + } + if logger.IsDebug() { + logger.Debug("max_parallel set", "max_parallel", maxParInt) + } + } + + // Create CockroachDB handle for the database. + db, err := sql.Open("pgx", connURL) + if err != nil { + return nil, fmt.Errorf("failed to connect to cockroachdb: %w", err) + } + + // Create the required tables if they don't exist. + createQuery := "CREATE TABLE IF NOT EXISTS " + dbTable + + " (path STRING, value BYTES, PRIMARY KEY (path))" + if _, err := db.Exec(createQuery); err != nil { + return nil, fmt.Errorf("failed to create CockroachDB table: %w", err) + } + if haEnabled { + createHATableQuery := "CREATE TABLE IF NOT EXISTS " + dbHATable + + "(ha_key TEXT NOT NULL, " + + " ha_identity TEXT NOT NULL, " + + " ha_value TEXT, " + + " valid_until TIMESTAMP WITH TIME ZONE NOT NULL, " + + " CONSTRAINT ha_key PRIMARY KEY (ha_key) " + + ");" + if _, err := db.Exec(createHATableQuery); err != nil { + return nil, fmt.Errorf("failed to create CockroachDB HA table: %w", err) + } + } + + // Setup the backend + c := &CockroachDBBackend{ + table: dbTable, + haTable: dbHATable, + client: db, + rawStatements: map[string]string{ + "put": "INSERT INTO " + dbTable + " VALUES($1, $2)" + + " ON CONFLICT (path) DO " + + " UPDATE SET (path, value) = ($1, $2)", + "get": "SELECT value FROM " + dbTable + " WHERE path = $1", + "delete": "DELETE FROM " + dbTable + " WHERE path = $1", + "list": "SELECT path FROM " + dbTable + " WHERE path LIKE $1", + }, + statements: make(map[string]*sql.Stmt), + rawHAStatements: map[string]string{ + "get": "SELECT ha_value FROM " + dbHATable + " WHERE NOW() <= valid_until AND ha_key = $1", + "upsert": "INSERT INTO " + dbHATable + " as t (ha_identity, ha_key, ha_value, valid_until)" + + " VALUES ($1, $2, $3, NOW() + $4) " + + " ON CONFLICT (ha_key) DO " + + " UPDATE SET (ha_identity, ha_key, ha_value, valid_until) = ($1, $2, $3, NOW() + $4) " + + " WHERE (t.valid_until < NOW() AND t.ha_key = $2) OR " + + " (t.ha_identity = $1 AND t.ha_key = $2) ", + "delete": "DELETE FROM " + dbHATable + " WHERE ha_key = $1", + }, + haStatements: make(map[string]*sql.Stmt), + logger: logger, + permitPool: physical.NewPermitPool(maxParInt), + haEnabled: haEnabled, + } + + // Prepare all the statements required + for name, query := range c.rawStatements { + if err := c.prepare(c.statements, name, query); err != nil { + return nil, err + } + } + if haEnabled { + for name, query := range c.rawHAStatements { + if err := c.prepare(c.haStatements, name, query); err != nil { + return nil, err + } + } + } + return c, nil +} + +// prepare is a helper to prepare a query for future execution. +func (c *CockroachDBBackend) prepare(statementMap map[string]*sql.Stmt, name, query string) error { + stmt, err := c.client.Prepare(query) + if err != nil { + return fmt.Errorf("failed to prepare %q: %w", name, err) + } + statementMap[name] = stmt + return nil +} + +// Put is used to insert or update an entry. +func (c *CockroachDBBackend) Put(ctx context.Context, entry *physical.Entry) error { + defer metrics.MeasureSince([]string{"cockroachdb", "put"}, time.Now()) + + c.permitPool.Acquire() + defer c.permitPool.Release() + + _, err := c.statements["put"].Exec(entry.Key, entry.Value) + if err != nil { + return err + } + return nil +} + +// Get is used to fetch and entry. +func (c *CockroachDBBackend) Get(ctx context.Context, key string) (*physical.Entry, error) { + defer metrics.MeasureSince([]string{"cockroachdb", "get"}, time.Now()) + + c.permitPool.Acquire() + defer c.permitPool.Release() + + var result []byte + err := c.statements["get"].QueryRow(key).Scan(&result) + if err == sql.ErrNoRows { + return nil, nil + } + if err != nil { + return nil, err + } + + ent := &physical.Entry{ + Key: key, + Value: result, + } + return ent, nil +} + +// Delete is used to permanently delete an entry +func (c *CockroachDBBackend) Delete(ctx context.Context, key string) error { + defer metrics.MeasureSince([]string{"cockroachdb", "delete"}, time.Now()) + + c.permitPool.Acquire() + defer c.permitPool.Release() + + _, err := c.statements["delete"].Exec(key) + if err != nil { + return err + } + return nil +} + +// List is used to list all the keys under a given +// prefix, up to the next prefix. +func (c *CockroachDBBackend) List(ctx context.Context, prefix string) ([]string, error) { + defer metrics.MeasureSince([]string{"cockroachdb", "list"}, time.Now()) + + c.permitPool.Acquire() + defer c.permitPool.Release() + + likePrefix := prefix + "%" + rows, err := c.statements["list"].Query(likePrefix) + if err != nil { + return nil, err + } + defer rows.Close() + + var keys []string + for rows.Next() { + var key string + err = rows.Scan(&key) + if err != nil { + return nil, fmt.Errorf("failed to scan rows: %w", err) + } + + key = strings.TrimPrefix(key, prefix) + if i := strings.Index(key, "/"); i == -1 { + // Add objects only from the current 'folder' + keys = append(keys, key) + } else if i != -1 { + // Add truncated 'folder' paths + keys = strutil.AppendIfMissing(keys, string(key[:i+1])) + } + } + + sort.Strings(keys) + return keys, nil +} + +// Transaction is used to run multiple entries via a transaction +func (c *CockroachDBBackend) Transaction(ctx context.Context, txns []*physical.TxnEntry) error { + defer metrics.MeasureSince([]string{"cockroachdb", "transaction"}, time.Now()) + if len(txns) == 0 { + return nil + } + + c.permitPool.Acquire() + defer c.permitPool.Release() + + return crdb.ExecuteTx(context.Background(), c.client, nil, func(tx *sql.Tx) error { + return c.transaction(tx, txns) + }) +} + +func (c *CockroachDBBackend) transaction(tx *sql.Tx, txns []*physical.TxnEntry) error { + deleteStmt, err := tx.Prepare(c.rawStatements["delete"]) + if err != nil { + return err + } + putStmt, err := tx.Prepare(c.rawStatements["put"]) + if err != nil { + return err + } + + for _, op := range txns { + switch op.Operation { + case physical.DeleteOperation: + _, err = deleteStmt.Exec(op.Entry.Key) + case physical.PutOperation: + _, err = putStmt.Exec(op.Entry.Key, op.Entry.Value) + default: + return fmt.Errorf("%q is not a supported transaction operation", op.Operation) + } + if err != nil { + return err + } + } + return nil +} + +// validateDBTable against the CockroachDB rules for table names: +// https://www.cockroachlabs.com/docs/stable/keywords-and-identifiers.html#identifiers +// +// - All values that accept an identifier must: +// - Begin with a Unicode letter or an underscore (_). Subsequent characters can be letters, +// - underscores, digits (0-9), or dollar signs ($). +// - Not equal any SQL keyword unless the keyword is accepted by the element's syntax. For example, +// name accepts Unreserved or Column Name keywords. +// +// The docs do state that we can bypass these rules with double quotes, however I think it +// is safer to just require these rules across the board. +func validateDBTable(dbTable string) (err error) { + // Check if this is 'database.table' formatted. If so, split them apart and check the two + // parts from each other + split := strings.SplitN(dbTable, ".", 2) + if len(split) == 2 { + merr := &multierror.Error{} + merr = multierror.Append(merr, wrapErr("invalid database: %w", validateDBTable(split[0]))) + merr = multierror.Append(merr, wrapErr("invalid table name: %w", validateDBTable(split[1]))) + return merr.ErrorOrNil() + } + + // Disallow SQL keywords as the table name + if sqlKeywords[strings.ToUpper(dbTable)] { + return fmt.Errorf("name must not be a SQL keyword") + } + + runes := []rune(dbTable) + for i, r := range runes { + if i == 0 && !unicode.IsLetter(r) && r != '_' { + return fmt.Errorf("must use a letter or an underscore as the first character") + } + + if !unicode.IsLetter(r) && r != '_' && !unicode.IsDigit(r) && r != '$' { + return fmt.Errorf("must only contain letters, underscores, digits, and dollar signs") + } + + if r == '`' || r == '\'' || r == '"' { + return fmt.Errorf("cannot contain backticks, single quotes, or double quotes") + } + } + + return nil +} + +func wrapErr(message string, err error) error { + if err == nil { + return nil + } + return fmt.Errorf(message, err) +} diff --git a/physical/cockroachdb/cockroachdb_ha.go b/physical/cockroachdb/cockroachdb_ha.go new file mode 100644 index 0000000..39b6175 --- /dev/null +++ b/physical/cockroachdb/cockroachdb_ha.go @@ -0,0 +1,204 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cockroachdb + +import ( + "database/sql" + "fmt" + "sync" + "time" + + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/sdk/physical" +) + +const ( + // The lock TTL matches the default that Consul API uses, 15 seconds. + // Used as part of SQL commands to set/extend lock expiry time relative to + // database clock. + CockroachDBLockTTLSeconds = 15 + + // The amount of time to wait between the lock renewals + CockroachDBLockRenewInterval = 5 * time.Second + + // CockroachDBLockRetryInterval is the amount of time to wait + // if a lock fails before trying again. + CockroachDBLockRetryInterval = time.Second +) + +// Verify backend satisfies the correct interfaces. +var ( + _ physical.HABackend = (*CockroachDBBackend)(nil) + _ physical.Lock = (*CockroachDBLock)(nil) +) + +type CockroachDBLock struct { + backend *CockroachDBBackend + key string + value string + identity string + lock sync.Mutex + + renewTicker *time.Ticker + + // ttlSeconds is how long a lock is valid for. + ttlSeconds int + + // renewInterval is how much time to wait between lock renewals. must be << ttl. + renewInterval time.Duration + + // retryInterval is how much time to wait between attempts to grab the lock. + retryInterval time.Duration +} + +func (c *CockroachDBBackend) HAEnabled() bool { + return c.haEnabled +} + +func (c *CockroachDBBackend) LockWith(key, value string) (physical.Lock, error) { + identity, err := uuid.GenerateUUID() + if err != nil { + return nil, err + } + return &CockroachDBLock{ + backend: c, + key: key, + value: value, + identity: identity, + ttlSeconds: CockroachDBLockTTLSeconds, + renewInterval: CockroachDBLockRenewInterval, + retryInterval: CockroachDBLockRetryInterval, + }, nil +} + +// Lock tries to acquire the lock by repeatedly trying to create a record in the +// CockroachDB table. It will block until either the stop channel is closed or +// the lock could be acquired successfully. The returned channel will be closed +// once the lock in the CockroachDB table cannot be renewed, either due to an +// error speaking to CockroachDB or because someone else has taken it. +func (l *CockroachDBLock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { + l.lock.Lock() + defer l.lock.Unlock() + + var ( + success = make(chan struct{}) + errors = make(chan error, 1) + leader = make(chan struct{}) + ) + go l.tryToLock(stopCh, success, errors) + + select { + case <-success: + // After acquiring it successfully, we must renew the lock periodically. + l.renewTicker = time.NewTicker(l.renewInterval) + go l.periodicallyRenewLock(leader) + case err := <-errors: + return nil, err + case <-stopCh: + return nil, nil + } + + return leader, nil +} + +// Unlock releases the lock by deleting the lock record from the +// CockroachDB table. +func (l *CockroachDBLock) Unlock() error { + c := l.backend + c.permitPool.Acquire() + defer c.permitPool.Release() + + if l.renewTicker != nil { + l.renewTicker.Stop() + } + + _, err := c.haStatements["delete"].Exec(l.key) + return err +} + +// Value checks whether or not the lock is held by any instance of CockroachDBLock, +// including this one, and returns the current value. +func (l *CockroachDBLock) Value() (bool, string, error) { + c := l.backend + c.permitPool.Acquire() + defer c.permitPool.Release() + var result string + err := c.haStatements["get"].QueryRow(l.key).Scan(&result) + + switch err { + case nil: + return true, result, nil + case sql.ErrNoRows: + return false, "", nil + default: + return false, "", err + + } +} + +// tryToLock tries to create a new item in CockroachDB every `retryInterval`. +// As long as the item cannot be created (because it already exists), it will +// be retried. If the operation fails due to an error, it is sent to the errors +// channel. When the lock could be acquired successfully, the success channel +// is closed. +func (l *CockroachDBLock) tryToLock(stop <-chan struct{}, success chan struct{}, errors chan error) { + ticker := time.NewTicker(l.retryInterval) + defer ticker.Stop() + + for { + select { + case <-stop: + return + case <-ticker.C: + gotlock, err := l.writeItem() + switch { + case err != nil: + // Send to the error channel and don't block if full. + select { + case errors <- err: + default: + } + return + case gotlock: + close(success) + return + } + } + } +} + +func (l *CockroachDBLock) periodicallyRenewLock(done chan struct{}) { + for range l.renewTicker.C { + gotlock, err := l.writeItem() + if err != nil || !gotlock { + close(done) + l.renewTicker.Stop() + return + } + } +} + +// Attempts to put/update the CockroachDB item using condition expressions to +// evaluate the TTL. Returns true if the lock was obtained, false if not. +// If false error may be nil or non-nil: nil indicates simply that someone +// else has the lock, whereas non-nil means that something unexpected happened. +func (l *CockroachDBLock) writeItem() (bool, error) { + c := l.backend + c.permitPool.Acquire() + defer c.permitPool.Release() + + sqlResult, err := c.haStatements["upsert"].Exec(l.identity, l.key, l.value, fmt.Sprintf("%d seconds", l.ttlSeconds)) + if err != nil { + return false, err + } + if sqlResult == nil { + return false, fmt.Errorf("empty SQL response received") + } + + ar, err := sqlResult.RowsAffected() + if err != nil { + return false, err + } + return ar == 1, nil +} diff --git a/physical/cockroachdb/cockroachdb_test.go b/physical/cockroachdb/cockroachdb_test.go new file mode 100644 index 0000000..da1da96 --- /dev/null +++ b/physical/cockroachdb/cockroachdb_test.go @@ -0,0 +1,214 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cockroachdb + +import ( + "context" + "database/sql" + "fmt" + "net/url" + "os" + "testing" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/physical" +) + +type Config struct { + docker.ServiceURL + TableName string + HATableName string +} + +var _ docker.ServiceConfig = &Config{} + +func prepareCockroachDBTestContainer(t *testing.T) (func(), *Config) { + if retURL := os.Getenv("CR_URL"); retURL != "" { + s, err := docker.NewServiceURLParse(retURL) + if err != nil { + t.Fatal(err) + } + return func() {}, &Config{ + ServiceURL: *s, + TableName: "vault." + defaultTableName, + HATableName: "vault." + defaultHATableName, + } + } + + runner, err := docker.NewServiceRunner(docker.RunOptions{ + ImageRepo: "docker.mirror.hashicorp.services/cockroachdb/cockroach", + ImageTag: "release-1.0", + ContainerName: "cockroachdb", + Cmd: []string{"start", "--insecure"}, + Ports: []string{"26257/tcp"}, + }) + if err != nil { + t.Fatalf("Could not start docker CockroachDB: %s", err) + } + svc, err := runner.StartService(context.Background(), connectCockroachDB) + if err != nil { + t.Fatalf("Could not start docker CockroachDB: %s", err) + } + + return svc.Cleanup, svc.Config.(*Config) +} + +func connectCockroachDB(ctx context.Context, host string, port int) (docker.ServiceConfig, error) { + u := url.URL{ + Scheme: "postgresql", + User: url.UserPassword("root", ""), + Host: fmt.Sprintf("%s:%d", host, port), + RawQuery: "sslmode=disable", + } + + db, err := sql.Open("pgx", u.String()) + if err != nil { + return nil, err + } + defer db.Close() + + database := "vault" + _, err = db.Exec(fmt.Sprintf("CREATE DATABASE %s", database)) + if err != nil { + return nil, err + } + + return &Config{ + ServiceURL: *docker.NewServiceURL(u), + TableName: database + "." + defaultTableName, + HATableName: database + "." + defaultHATableName, + }, nil +} + +func TestCockroachDBBackend(t *testing.T) { + cleanup, config := prepareCockroachDBTestContainer(t) + defer cleanup() + + hae := os.Getenv("CR_HA_ENABLED") + if hae == "" { + hae = "true" + } + + // Run vault tests + logger := logging.NewVaultLogger(log.Debug) + + b1, err := NewCockroachDBBackend(map[string]string{ + "connection_url": config.URL().String(), + "table": config.TableName, + "ha_table": config.HATableName, + "ha_enabled": hae, + }, logger) + if err != nil { + t.Fatalf("Failed to create new backend: %v", err) + } + + b2, err := NewCockroachDBBackend(map[string]string{ + "connection_url": config.URL().String(), + "table": config.TableName, + "ha_table": config.HATableName, + "ha_enabled": hae, + }, logger) + if err != nil { + t.Fatalf("Failed to create new backend: %v", err) + } + + defer func() { + truncate(t, b1) + truncate(t, b2) + }() + + physical.ExerciseBackend(t, b1) + truncate(t, b1) + physical.ExerciseBackend_ListPrefix(t, b1) + truncate(t, b1) + physical.ExerciseTransactionalBackend(t, b1) + truncate(t, b1) + + ha1, ok1 := b1.(physical.HABackend) + ha2, ok2 := b2.(physical.HABackend) + if !ok1 || !ok2 { + t.Fatalf("CockroachDB does not implement HABackend") + } + + if ha1.HAEnabled() && ha2.HAEnabled() { + logger.Info("Running ha backend tests") + physical.ExerciseHABackend(t, ha1, ha2) + } +} + +func truncate(t *testing.T, b physical.Backend) { + crdb := b.(*CockroachDBBackend) + _, err := crdb.client.Exec("TRUNCATE TABLE " + crdb.table) + if err != nil { + t.Fatalf("Failed to drop table: %v", err) + } + if crdb.haEnabled { + _, err = crdb.client.Exec("TRUNCATE TABLE " + crdb.haTable) + if err != nil { + t.Fatalf("Failed to drop table: %v", err) + } + } +} + +func TestValidateDBTable(t *testing.T) { + type testCase struct { + table string + expectErr bool + } + + tests := map[string]testCase{ + "first character is letter": {"abcdef", false}, + "first character is underscore": {"_bcdef", false}, + "exclamation point": {"ab!def", true}, + "at symbol": {"ab@def", true}, + "hash": {"ab#def", true}, + "percent": {"ab%def", true}, + "carrot": {"ab^def", true}, + "ampersand": {"ab&def", true}, + "star": {"ab*def", true}, + "left paren": {"ab(def", true}, + "right paren": {"ab)def", true}, + "dash": {"ab-def", true}, + "digit": {"a123ef", false}, + "dollar end": {"abcde$", false}, + "dollar middle": {"ab$def", false}, + "dollar start": {"$bcdef", true}, + "backtick prefix": {"`bcdef", true}, + "backtick middle": {"ab`def", true}, + "backtick suffix": {"abcde`", true}, + "single quote prefix": {"'bcdef", true}, + "single quote middle": {"ab'def", true}, + "single quote suffix": {"abcde'", true}, + "double quote prefix": {`"bcdef`, true}, + "double quote middle": {`ab"def`, true}, + "double quote suffix": {`abcde"`, true}, + "underscore with all runes": {"_bcd123__a__$", false}, + "all runes": {"abcd123__a__$", false}, + "default table name": {defaultTableName, false}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + err := validateDBTable(test.table) + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + }) + t.Run(fmt.Sprintf("database: %s", name), func(t *testing.T) { + dbTable := fmt.Sprintf("%s.%s", test.table, test.table) + err := validateDBTable(dbTable) + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + }) + } +} diff --git a/physical/cockroachdb/keywords.go b/physical/cockroachdb/keywords.go new file mode 100644 index 0000000..f44089f --- /dev/null +++ b/physical/cockroachdb/keywords.go @@ -0,0 +1,441 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cockroachdb + +// sqlKeywords is a reference of all of the keywords that we do not allow for use as the table name +// Referenced from: +// https://www.cockroachlabs.com/docs/stable/keywords-and-identifiers.html#identifiers +// -> https://www.cockroachlabs.com/docs/stable/keywords-and-identifiers.html#keywords +// -> https://www.cockroachlabs.com/docs/stable/sql-grammar.html +var sqlKeywords = map[string]bool{ + // reserved_keyword + // https://www.cockroachlabs.com/docs/stable/sql-grammar.html#reserved_keyword + "ALL": true, + "ANALYSE": true, + "ANALYZE": true, + "AND": true, + "ANY": true, + "ARRAY": true, + "AS": true, + "ASC": true, + "ASYMMETRIC": true, + "BOTH": true, + "CASE": true, + "CAST": true, + "CHECK": true, + "COLLATE": true, + "COLUMN": true, + "CONCURRENTLY": true, + "CONSTRAINT": true, + "CREATE": true, + "CURRENT_CATALOG": true, + "CURRENT_DATE": true, + "CURRENT_ROLE": true, + "CURRENT_SCHEMA": true, + "CURRENT_TIME": true, + "CURRENT_TIMESTAMP": true, + "CURRENT_USER": true, + "DEFAULT": true, + "DEFERRABLE": true, + "DESC": true, + "DISTINCT": true, + "DO": true, + "ELSE": true, + "END": true, + "EXCEPT": true, + "FALSE": true, + "FETCH": true, + "FOR": true, + "FOREIGN": true, + "FROM": true, + "GRANT": true, + "GROUP": true, + "HAVING": true, + "IN": true, + "INITIALLY": true, + "INTERSECT": true, + "INTO": true, + "LATERAL": true, + "LEADING": true, + "LIMIT": true, + "LOCALTIME": true, + "LOCALTIMESTAMP": true, + "NOT": true, + "NULL": true, + "OFFSET": true, + "ON": true, + "ONLY": true, + "OR": true, + "ORDER": true, + "PLACING": true, + "PRIMARY": true, + "REFERENCES": true, + "RETURNING": true, + "SELECT": true, + "SESSION_USER": true, + "SOME": true, + "SYMMETRIC": true, + "TABLE": true, + "THEN": true, + "TO": true, + "TRAILING": true, + "TRUE": true, + "UNION": true, + "UNIQUE": true, + "USER": true, + "USING": true, + "VARIADIC": true, + "WHEN": true, + "WHERE": true, + "WINDOW": true, + "WITH": true, + + // cockroachdb_extra_reserved_keyword + // https://www.cockroachlabs.com/docs/stable/sql-grammar.html#cockroachdb_extra_reserved_keyword + "INDEX": true, + "NOTHING": true, + + // type_func_name_keyword + // https://www.cockroachlabs.com/docs/stable/sql-grammar.html#type_func_name_keyword + "COLLATION": true, + "CROSS": true, + "FULL": true, + "INNER": true, + "ILIKE": true, + "IS": true, + "ISNULL": true, + "JOIN": true, + "LEFT": true, + "LIKE": true, + "NATURAL": true, + "NONE": true, + "NOTNULL": true, + "OUTER": true, + "OVERLAPS": true, + "RIGHT": true, + "SIMILAR": true, + "FAMILY": true, + + // col_name_keyword + // https://www.cockroachlabs.com/docs/stable/sql-grammar.html#col_name_keyword + "ANNOTATE_TYPE": true, + "BETWEEN": true, + "BIGINT": true, + "BIT": true, + "BOOLEAN": true, + "CHAR": true, + "CHARACTER": true, + "CHARACTERISTICS": true, + "COALESCE": true, + "DEC": true, + "DECIMAL": true, + "EXISTS": true, + "EXTRACT": true, + "EXTRACT_DURATION": true, + "FLOAT": true, + "GREATEST": true, + "GROUPING": true, + "IF": true, + "IFERROR": true, + "IFNULL": true, + "INT": true, + "INTEGER": true, + "INTERVAL": true, + "ISERROR": true, + "LEAST": true, + "NULLIF": true, + "NUMERIC": true, + "OUT": true, + "OVERLAY": true, + "POSITION": true, + "PRECISION": true, + "REAL": true, + "ROW": true, + "SMALLINT": true, + "SUBSTRING": true, + "TIME": true, + "TIMETZ": true, + "TIMESTAMP": true, + "TIMESTAMPTZ": true, + "TREAT": true, + "TRIM": true, + "VALUES": true, + "VARBIT": true, + "VARCHAR": true, + "VIRTUAL": true, + "WORK": true, + + // unreserved_keyword + // https://www.cockroachlabs.com/docs/stable/sql-grammar.html#unreserved_keyword + "ABORT": true, + "ACTION": true, + "ADD": true, + "ADMIN": true, + "AGGREGATE": true, + "ALTER": true, + "AT": true, + "AUTOMATIC": true, + "AUTHORIZATION": true, + "BACKUP": true, + "BEGIN": true, + "BIGSERIAL": true, + "BLOB": true, + "BOOL": true, + "BUCKET_COUNT": true, + "BUNDLE": true, + "BY": true, + "BYTEA": true, + "BYTES": true, + "CACHE": true, + "CANCEL": true, + "CASCADE": true, + "CHANGEFEED": true, + "CLUSTER": true, + "COLUMNS": true, + "COMMENT": true, + "COMMIT": true, + "COMMITTED": true, + "COMPACT": true, + "COMPLETE": true, + "CONFLICT": true, + "CONFIGURATION": true, + "CONFIGURATIONS": true, + "CONFIGURE": true, + "CONSTRAINTS": true, + "CONVERSION": true, + "COPY": true, + "COVERING": true, + "CREATEROLE": true, + "CUBE": true, + "CURRENT": true, + "CYCLE": true, + "DATA": true, + "DATABASE": true, + "DATABASES": true, + "DATE": true, + "DAY": true, + "DEALLOCATE": true, + "DELETE": true, + "DEFERRED": true, + "DISCARD": true, + "DOMAIN": true, + "DOUBLE": true, + "DROP": true, + "ENCODING": true, + "ENUM": true, + "ESCAPE": true, + "EXCLUDE": true, + "EXECUTE": true, + "EXPERIMENTAL": true, + "EXPERIMENTAL_AUDIT": true, + "EXPERIMENTAL_FINGERPRINTS": true, + "EXPERIMENTAL_RELOCATE": true, + "EXPERIMENTAL_REPLICA": true, + "EXPIRATION": true, + "EXPLAIN": true, + "EXPORT": true, + "EXTENSION": true, + "FILES": true, + "FILTER": true, + "FIRST": true, + "FLOAT4": true, + "FLOAT8": true, + "FOLLOWING": true, + "FORCE_INDEX": true, + "FUNCTION": true, + "GLOBAL": true, + "GRANTS": true, + "GROUPS": true, + "HASH": true, + "HIGH": true, + "HISTOGRAM": true, + "HOUR": true, + "IMMEDIATE": true, + "IMPORT": true, + "INCLUDE": true, + "INCREMENT": true, + "INCREMENTAL": true, + "INDEXES": true, + "INET": true, + "INJECT": true, + "INSERT": true, + "INT2": true, + "INT2VECTOR": true, + "INT4": true, + "INT8": true, + "INT64": true, + "INTERLEAVE": true, + "INVERTED": true, + "ISOLATION": true, + "JOB": true, + "JOBS": true, + "JSON": true, + "JSONB": true, + "KEY": true, + "KEYS": true, + "KV": true, + "LANGUAGE": true, + "LAST": true, + "LC_COLLATE": true, + "LC_CTYPE": true, + "LEASE": true, + "LESS": true, + "LEVEL": true, + "LIST": true, + "LOCAL": true, + "LOCKED": true, + "LOGIN": true, + "LOOKUP": true, + "LOW": true, + "MATCH": true, + "MATERIALIZED": true, + "MAXVALUE": true, + "MERGE": true, + "MINUTE": true, + "MINVALUE": true, + "MONTH": true, + "NAMES": true, + "NAN": true, + "NAME": true, + "NEXT": true, + "NO": true, + "NORMAL": true, + "NO_INDEX_JOIN": true, + "NOCREATEROLE": true, + "NOLOGIN": true, + "NOWAIT": true, + "NULLS": true, + "IGNORE_FOREIGN_KEYS": true, + "OF": true, + "OFF": true, + "OID": true, + "OIDS": true, + "OIDVECTOR": true, + "OPERATOR": true, + "OPT": true, + "OPTION": true, + "OPTIONS": true, + "ORDINALITY": true, + "OTHERS": true, + "OVER": true, + "OWNED": true, + "PARENT": true, + "PARTIAL": true, + "PARTITION": true, + "PARTITIONS": true, + "PASSWORD": true, + "PAUSE": true, + "PHYSICAL": true, + "PLAN": true, + "PLANS": true, + "PRECEDING": true, + "PREPARE": true, + "PRESERVE": true, + "PRIORITY": true, + "PUBLIC": true, + "PUBLICATION": true, + "QUERIES": true, + "QUERY": true, + "RANGE": true, + "RANGES": true, + "READ": true, + "RECURSIVE": true, + "REF": true, + "REGCLASS": true, + "REGPROC": true, + "REGPROCEDURE": true, + "REGNAMESPACE": true, + "REGTYPE": true, + "REINDEX": true, + "RELEASE": true, + "RENAME": true, + "REPEATABLE": true, + "REPLACE": true, + "RESET": true, + "RESTORE": true, + "RESTRICT": true, + "RESUME": true, + "REVOKE": true, + "ROLE": true, + "ROLES": true, + "ROLLBACK": true, + "ROLLUP": true, + "ROWS": true, + "RULE": true, + "SETTING": true, + "SETTINGS": true, + "STATUS": true, + "SAVEPOINT": true, + "SCATTER": true, + "SCHEMA": true, + "SCHEMAS": true, + "SCRUB": true, + "SEARCH": true, + "SECOND": true, + "SERIAL": true, + "SERIALIZABLE": true, + "SERIAL2": true, + "SERIAL4": true, + "SERIAL8": true, + "SEQUENCE": true, + "SEQUENCES": true, + "SERVER": true, + "SESSION": true, + "SESSIONS": true, + "SET": true, + "SHARE": true, + "SHOW": true, + "SIMPLE": true, + "SKIP": true, + "SMALLSERIAL": true, + "SNAPSHOT": true, + "SPLIT": true, + "SQL": true, + "START": true, + "STATISTICS": true, + "STDIN": true, + "STORE": true, + "STORED": true, + "STORING": true, + "STRICT": true, + "STRING": true, + "SUBSCRIPTION": true, + "SYNTAX": true, + "SYSTEM": true, + "TABLES": true, + "TEMP": true, + "TEMPLATE": true, + "TEMPORARY": true, + "TESTING_RELOCATE": true, + "TEXT": true, + "TIES": true, + "TRACE": true, + "TRANSACTION": true, + "TRIGGER": true, + "TRUNCATE": true, + "TRUSTED": true, + "TYPE": true, + "THROTTLING": true, + "UNBOUNDED": true, + "UNCOMMITTED": true, + "UNKNOWN": true, + "UNLOGGED": true, + "UNSPLIT": true, + "UNTIL": true, + "UPDATE": true, + "UPSERT": true, + "UUID": true, + "USE": true, + "USERS": true, + "VALID": true, + "VALIDATE": true, + "VALUE": true, + "VARYING": true, + "VIEW": true, + "WITHIN": true, + "WITHOUT": true, + "WRITE": true, + "YEAR": true, + "ZONE": true, +} diff --git a/physical/consul/consul.go b/physical/consul/consul.go new file mode 100644 index 0000000..d26c729 --- /dev/null +++ b/physical/consul/consul.go @@ -0,0 +1,784 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package consul + +import ( + "context" + "errors" + "fmt" + "net/http" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/armon/go-metrics" + "github.com/hashicorp/consul/api" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/go-secure-stdlib/tlsutil" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/physical" + "github.com/hashicorp/vault/vault/diagnose" + "golang.org/x/net/http2" +) + +const ( + // consistencyModeDefault is the configuration value used to tell + // consul to use default consistency. + consistencyModeDefault = "default" + + // consistencyModeStrong is the configuration value used to tell + // consul to use strong consistency. + consistencyModeStrong = "strong" + + // nonExistentKey is used as part of a capabilities check against Consul + nonExistentKey = "F35C28E1-7035-40BB-B865-6BED9E3A1B28" +) + +// Verify ConsulBackend satisfies the correct interfaces +var ( + _ physical.Backend = (*ConsulBackend)(nil) + _ physical.FencingHABackend = (*ConsulBackend)(nil) + _ physical.Lock = (*ConsulLock)(nil) + _ physical.Transactional = (*ConsulBackend)(nil) + + GetInTxnDisabledError = errors.New("get operations inside transactions are disabled in consul backend") +) + +// ConsulBackend is a physical backend that stores data at specific +// prefix within Consul. It is used for most production situations as +// it allows Vault to run on multiple machines in a highly-available manner. +// failGetInTxn is only used in tests. +type ConsulBackend struct { + logger log.Logger + client *api.Client + path string + kv *api.KV + txn *api.Txn + permitPool *physical.PermitPool + consistencyMode string + sessionTTL string + lockWaitTime time.Duration + failGetInTxn *uint32 + activeNodeLock atomic.Pointer[ConsulLock] +} + +// NewConsulBackend constructs a Consul backend using the given API client +// and the prefix in the KV store. +func NewConsulBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { + // Get the path in Consul + path, ok := conf["path"] + if !ok { + path = "vault/" + } + if logger.IsDebug() { + logger.Debug("config path set", "path", path) + } + + // Ensure path is suffixed but not prefixed + if !strings.HasSuffix(path, "/") { + logger.Warn("appending trailing forward slash to path") + path += "/" + } + if strings.HasPrefix(path, "/") { + logger.Warn("trimming path of its forward slash") + path = strings.TrimPrefix(path, "/") + } + + sessionTTL := api.DefaultLockSessionTTL + sessionTTLStr, ok := conf["session_ttl"] + if ok { + _, err := parseutil.ParseDurationSecond(sessionTTLStr) + if err != nil { + return nil, fmt.Errorf("invalid session_ttl: %w", err) + } + sessionTTL = sessionTTLStr + if logger.IsDebug() { + logger.Debug("config session_ttl set", "session_ttl", sessionTTL) + } + } + + lockWaitTime := api.DefaultLockWaitTime + lockWaitTimeRaw, ok := conf["lock_wait_time"] + if ok { + d, err := parseutil.ParseDurationSecond(lockWaitTimeRaw) + if err != nil { + return nil, fmt.Errorf("invalid lock_wait_time: %w", err) + } + lockWaitTime = d + if logger.IsDebug() { + logger.Debug("config lock_wait_time set", "lock_wait_time", d) + } + } + + maxParStr, ok := conf["max_parallel"] + var maxParInt int + if ok { + maxParInt, err := strconv.Atoi(maxParStr) + if err != nil { + return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err) + } + if logger.IsDebug() { + logger.Debug("max_parallel set", "max_parallel", maxParInt) + } + } + + consistencyMode, ok := conf["consistency_mode"] + if ok { + switch consistencyMode { + case consistencyModeDefault, consistencyModeStrong: + default: + return nil, fmt.Errorf("invalid consistency_mode value: %q", consistencyMode) + } + } else { + consistencyMode = consistencyModeDefault + } + + // Configure the client + consulConf := api.DefaultConfig() + // Set MaxIdleConnsPerHost to the number of processes used in expiration.Restore + consulConf.Transport.MaxIdleConnsPerHost = consts.ExpirationRestoreWorkerCount + + if err := SetupSecureTLS(context.Background(), consulConf, conf, logger, false); err != nil { + return nil, fmt.Errorf("client setup failed: %w", err) + } + + consulConf.HttpClient = &http.Client{Transport: consulConf.Transport} + client, err := api.NewClient(consulConf) + if err != nil { + return nil, fmt.Errorf("client setup failed: %w", err) + } + + // Set up the backend + c := &ConsulBackend{ + logger: logger, + path: path, + client: client, + kv: client.KV(), + txn: client.Txn(), + permitPool: physical.NewPermitPool(maxParInt), + consistencyMode: consistencyMode, + sessionTTL: sessionTTL, + lockWaitTime: lockWaitTime, + failGetInTxn: new(uint32), + } + + return c, nil +} + +func SetupSecureTLS(ctx context.Context, consulConf *api.Config, conf map[string]string, logger log.Logger, isDiagnose bool) error { + if addr, ok := conf["address"]; ok { + consulConf.Address = addr + if logger.IsDebug() { + logger.Debug("config address set", "address", addr) + } + + // Copied from the Consul API module; set the Scheme based on + // the protocol field if address looks ike a URL. + // This can enable the TLS configuration below. + parts := strings.SplitN(addr, "://", 2) + if len(parts) == 2 { + if parts[0] == "http" || parts[0] == "https" { + consulConf.Scheme = parts[0] + consulConf.Address = parts[1] + if logger.IsDebug() { + logger.Debug("config address parsed", "scheme", parts[0]) + logger.Debug("config scheme parsed", "address", parts[1]) + } + } // allow "unix:" or whatever else consul supports in the future + } + } + if scheme, ok := conf["scheme"]; ok { + consulConf.Scheme = scheme + if logger.IsDebug() { + logger.Debug("config scheme set", "scheme", scheme) + } + } + if token, ok := conf["token"]; ok { + consulConf.Token = token + logger.Debug("config token set") + } + + if consulConf.Scheme == "https" { + if isDiagnose { + certPath, okCert := conf["tls_cert_file"] + keyPath, okKey := conf["tls_key_file"] + if okCert && okKey { + warnings, err := diagnose.TLSFileChecks(certPath, keyPath) + for _, warning := range warnings { + diagnose.Warn(ctx, warning) + } + if err != nil { + return err + } + return nil + } + return fmt.Errorf("key or cert path: %s, %s, cannot be loaded from consul config file", certPath, keyPath) + } + + // Use the parsed Address instead of the raw conf['address'] + tlsClientConfig, err := tlsutil.SetupTLSConfig(conf, consulConf.Address) + if err != nil { + return err + } + + consulConf.Transport.TLSClientConfig = tlsClientConfig + if err := http2.ConfigureTransport(consulConf.Transport); err != nil { + return err + } + logger.Debug("configured TLS") + } else { + if isDiagnose { + diagnose.Skipped(ctx, "HTTPS is not used, Skipping TLS verification.") + } + } + return nil +} + +// ExpandedCapabilitiesAvailable tests to see if Consul has KVGetOrEmpty and 128 entries per transaction available +func (c *ConsulBackend) ExpandedCapabilitiesAvailable(ctx context.Context) bool { + available := false + + maxEntries := 128 + ops := make([]*api.TxnOp, maxEntries) + for i := 0; i < maxEntries; i++ { + ops[i] = &api.TxnOp{KV: &api.KVTxnOp{ + Key: c.path + nonExistentKey, + Verb: api.KVGetOrEmpty, + }} + } + + c.permitPool.Acquire() + defer c.permitPool.Release() + + queryOpts := &api.QueryOptions{} + queryOpts = queryOpts.WithContext(ctx) + + ok, resp, _, err := c.txn.Txn(ops, queryOpts) + if ok && len(resp.Errors) == 0 && err == nil { + available = true + } + + return available +} + +func (c *ConsulBackend) writeTxnOps(ctx context.Context, len int) ([]*api.TxnOp, string) { + if len < 1 { + len = 1 + } + ops := make([]*api.TxnOp, 0, len+1) + + // If we don't have a lock yet, return a transaction with no session check. We + // need to do this to allow writes during cluster initialization before there + // is an active node. + lock := c.activeNodeLock.Load() + if lock == nil { + return ops, "" + } + + lockKey, lockSession := lock.Info() + if lockKey == "" || lockSession == "" { + return ops, "" + } + + // If the context used to write has been marked as a special case write that + // happens outside of a lock then don't add the session check. + if physical.IsUnfencedWrite(ctx) { + return ops, "" + } + + // Insert the session check operation at index 0. This will allow us later to + // work out easily if a write failure is because of the session check. + ops = append(ops, &api.TxnOp{ + KV: &api.KVTxnOp{ + Verb: api.KVCheckSession, + Key: lockKey, + Session: lockSession, + }, + }) + return ops, lockSession +} + +// Transaction is used to run multiple entries via a transaction. +func (c *ConsulBackend) Transaction(ctx context.Context, txns []*physical.TxnEntry) error { + return c.txnInternal(ctx, txns, "transaction") +} + +func (c *ConsulBackend) txnInternal(ctx context.Context, txns []*physical.TxnEntry, apiOpName string) error { + if len(txns) == 0 { + return nil + } + defer metrics.MeasureSince([]string{"consul", apiOpName}, time.Now()) + + failGetInTxn := atomic.LoadUint32(c.failGetInTxn) + for _, t := range txns { + if t.Operation == physical.GetOperation && failGetInTxn != 0 { + return GetInTxnDisabledError + } + } + + ops, sessionID := c.writeTxnOps(ctx, len(txns)) + for _, t := range txns { + o, err := c.makeApiTxn(t) + if err != nil { + return fmt.Errorf("error converting physical transactions into api transactions: %w", err) + } + + ops = append(ops, o) + } + + c.permitPool.Acquire() + defer c.permitPool.Release() + + var retErr *multierror.Error + kvMap := make(map[string][]byte, 0) + + queryOpts := &api.QueryOptions{} + queryOpts = queryOpts.WithContext(ctx) + + ok, resp, _, err := c.txn.Txn(ops, queryOpts) + if err != nil { + if strings.Contains(err.Error(), "is too large") { + return fmt.Errorf("%s: %w", physical.ErrValueTooLarge, err) + } + return err + } + if ok && len(resp.Errors) == 0 { + // Loop over results and cache them in a map. Note that we're only caching + // the first time we see a key, which _should_ correspond to a Get + // operation, since we expect those come first in our txns slice (though + // after check-session). + for _, txnr := range resp.Results { + if len(txnr.KV.Value) > 0 { + // We need to trim the Consul kv path (typically "vault/") from the key + // otherwise it won't match the transaction entries we have. + key := strings.TrimPrefix(txnr.KV.Key, c.path) + if _, found := kvMap[key]; !found { + kvMap[key] = txnr.KV.Value + } + } + } + } + + if len(resp.Errors) > 0 { + for _, res := range resp.Errors { + retErr = multierror.Append(retErr, errors.New(res.What)) + if res.OpIndex == 0 && sessionID != "" { + // We added a session check (sessionID not empty) so an error at OpIndex + // 0 means that we failed that session check. We don't attempt to string + // match because Consul can return at least three different errors here + // with no common string. In all cases though failing this check means + // we no longer hold the lock because it was released, modified or + // deleted. Rather than just continuing to try writing until the + // blocking query manages to notice we're no longer the lock holder + // (which can take 10s of seconds even in good network conditions in my + // testing) we can now Unlock directly here. Our ConsulLock now has a + // shortcut that will cause the lock to close the leaderCh immediately + // when we call without waiting for the blocking query to return (unlike + // Consul's current Lock implementation). But before we unlock, we + // should re-load the lock and ensure it's still the same instance we + // just tried to write with in case this goroutine is somehow really + // delayed and we actually acquired a whole new lock in the meantime! + lock := c.activeNodeLock.Load() + if lock != nil { + _, lockSessionID := lock.Info() + if sessionID == lockSessionID { + c.logger.Warn("session check failed on write, we lost active node lock, stepping down", "err", res.What) + lock.Unlock() + } + } + } + } + } + + if retErr != nil { + return retErr + } + + // Loop over our get transactions and populate any values found in our map cache. + for _, t := range txns { + if val, ok := kvMap[t.Entry.Key]; ok && t.Operation == physical.GetOperation { + newVal := make([]byte, len(val)) + copy(newVal, val) + t.Entry.Value = newVal + } + } + + return nil +} + +func (c *ConsulBackend) makeApiTxn(txn *physical.TxnEntry) (*api.TxnOp, error) { + op := &api.KVTxnOp{ + Key: c.path + txn.Entry.Key, + } + switch txn.Operation { + case physical.GetOperation: + op.Verb = api.KVGetOrEmpty + case physical.DeleteOperation: + op.Verb = api.KVDelete + case physical.PutOperation: + op.Verb = api.KVSet + op.Value = txn.Entry.Value + default: + return nil, fmt.Errorf("%q is not a supported transaction operation", txn.Operation) + } + + return &api.TxnOp{KV: op}, nil +} + +// Put is used to insert or update an entry +func (c *ConsulBackend) Put(ctx context.Context, entry *physical.Entry) error { + txns := []*physical.TxnEntry{ + { + Operation: physical.PutOperation, + Entry: entry, + }, + } + return c.txnInternal(ctx, txns, "put") +} + +// Get is used to fetch an entry +func (c *ConsulBackend) Get(ctx context.Context, key string) (*physical.Entry, error) { + defer metrics.MeasureSince([]string{"consul", "get"}, time.Now()) + + c.permitPool.Acquire() + defer c.permitPool.Release() + + queryOpts := &api.QueryOptions{} + queryOpts = queryOpts.WithContext(ctx) + + if c.consistencyMode == consistencyModeStrong { + queryOpts.RequireConsistent = true + } + + pair, _, err := c.kv.Get(c.path+key, queryOpts) + if err != nil { + return nil, err + } + if pair == nil { + return nil, nil + } + ent := &physical.Entry{ + Key: key, + Value: pair.Value, + } + return ent, nil +} + +// Delete is used to permanently delete an entry +func (c *ConsulBackend) Delete(ctx context.Context, key string) error { + txns := []*physical.TxnEntry{ + { + Operation: physical.DeleteOperation, + Entry: &physical.Entry{ + Key: key, + }, + }, + } + return c.txnInternal(ctx, txns, "delete") +} + +// List is used to list all the keys under a given +// prefix, up to the next prefix. +func (c *ConsulBackend) List(ctx context.Context, prefix string) ([]string, error) { + defer metrics.MeasureSince([]string{"consul", "list"}, time.Now()) + scan := c.path + prefix + + // The TrimPrefix call below will not work correctly if we have "//" at the + // end. This can happen in cases where you are e.g. listing the root of a + // prefix in a logical backend via "/" instead of "" + if strings.HasSuffix(scan, "//") { + scan = scan[:len(scan)-1] + } + + c.permitPool.Acquire() + defer c.permitPool.Release() + + queryOpts := &api.QueryOptions{} + queryOpts = queryOpts.WithContext(ctx) + + out, _, err := c.kv.Keys(scan, "/", queryOpts) + for idx, val := range out { + out[idx] = strings.TrimPrefix(val, scan) + } + + return out, err +} + +func (c *ConsulBackend) FailGetInTxn(fail bool) { + var val uint32 + if fail { + val = 1 + } + atomic.StoreUint32(c.failGetInTxn, val) +} + +// LockWith is used for mutual exclusion based on the given key. +func (c *ConsulBackend) LockWith(key, value string) (physical.Lock, error) { + cl := &ConsulLock{ + logger: c.logger, + client: c.client, + key: c.path + key, + value: value, + consistencyMode: c.consistencyMode, + sessionTTL: c.sessionTTL, + lockWaitTime: c.lockWaitTime, + } + return cl, nil +} + +// HAEnabled indicates whether the HA functionality should be exposed. +// Currently always returns true. +func (c *ConsulBackend) HAEnabled() bool { + return true +} + +// DetectHostAddr is used to detect the host address by asking the Consul agent +func (c *ConsulBackend) DetectHostAddr() (string, error) { + agent := c.client.Agent() + self, err := agent.Self() + if err != nil { + return "", err + } + addr, ok := self["Member"]["Addr"].(string) + if !ok { + return "", fmt.Errorf("unable to convert an address to string") + } + return addr, nil +} + +// RegisterActiveNodeLock is called after active node lock is obtained to allow +// us to fence future writes. +func (c *ConsulBackend) RegisterActiveNodeLock(l physical.Lock) error { + cl, ok := l.(*ConsulLock) + if !ok { + return fmt.Errorf("invalid Lock type") + } + c.activeNodeLock.Store(cl) + key, sessionID := cl.Info() + c.logger.Info("registered active node lock", "key", key, "sessionID", sessionID) + return nil +} + +// ConsulLock is used to provide the Lock interface backed by Consul. We work +// around some limitations of Consuls api.Lock noted in +// https://github.com/hashicorp/consul/issues/18271 by creating and managing the +// session ourselves, while using Consul's Lock to do the heavy lifting. +type ConsulLock struct { + logger log.Logger + client *api.Client + key string + value string + consistencyMode string + sessionTTL string + lockWaitTime time.Duration + + mu sync.Mutex // protects session state + session *lockSession + // sessionID is a copy of the value from session.id. We use a separate field + // because `Info` needs to keep returning the same sessionID after Unlock has + // cleaned up the session state so that we continue to fence any writes still + // in flight after the lock is Unlocked. It's easier to reason about that as a + // separate field rather than keeping an already-terminated session object + // around. Once Lock is called again this will be replaced (while mu is + // locked) with the new session ID. Must hold mu to read or write this. + sessionID string +} + +type lockSession struct { + // id is immutable after the session is created so does not need mu held + id string + + // mu protects the lock and unlockCh to ensure they are only cleaned up once + mu sync.Mutex + lock *api.Lock + unlockCh chan struct{} +} + +func (s *lockSession) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { + s.mu.Lock() + defer s.mu.Unlock() + + lockHeld := false + defer func() { + if !lockHeld { + s.cleanupLocked() + } + }() + + consulLeaderCh, err := s.lock.Lock(stopCh) + if err != nil { + return nil, err + } + if consulLeaderCh == nil { + // If both leaderCh and err are nil from Consul's Lock then it means we + // waited for the lockWait without grabbing it. + return nil, nil + } + // We got the Lock, monitor it! + lockHeld = true + leaderCh := make(chan struct{}) + go s.monitorLock(leaderCh, s.unlockCh, consulLeaderCh) + return leaderCh, nil +} + +// monitorLock waits for either unlockCh or consulLeaderCh to close and then +// closes leaderCh. It's designed to be run in a separate goroutine. Note that +// we pass unlockCh rather than accessing it via the member variable because it +// is mutated under the lock during Unlock so reading it from c could be racy. +// We just need the chan created at the call site here so we pass it instead of +// locking and unlocking in here. +func (s *lockSession) monitorLock(leaderCh chan struct{}, unlockCh, consulLeaderCh <-chan struct{}) { + select { + case <-unlockCh: + case <-consulLeaderCh: + } + // We lost the lock. Close the leaderCh + close(leaderCh) + + // Whichever chan closed, cleanup to unwind all the state. If we were + // triggered by a cleanup call this will be a no-op, but if not it ensures all + // state is cleaned up correctly. + s.cleanup() +} + +func (s *lockSession) cleanup() { + s.mu.Lock() + defer s.mu.Unlock() + + s.cleanupLocked() +} + +func (s *lockSession) cleanupLocked() { + if s.lock != nil { + s.lock.Unlock() + s.lock = nil + } + if s.unlockCh != nil { + close(s.unlockCh) + s.unlockCh = nil + } + // Don't bother destroying sessions as they will be destroyed after TTL + // anyway. +} + +func (c *ConsulLock) createSession() (*lockSession, error) { + se := &api.SessionEntry{ + Name: "Vault Lock", + TTL: c.sessionTTL, + // We use Consul's default LockDelay of 15s by not specifying it + } + session, _, err := c.client.Session().Create(se, nil) + if err != nil { + return nil, err + } + + opts := &api.LockOptions{ + Key: c.key, + Value: []byte(c.value), + Session: session, + MonitorRetries: 5, + LockWaitTime: c.lockWaitTime, + SessionTTL: c.sessionTTL, + } + lock, err := c.client.LockOpts(opts) + if err != nil { + // Don't bother destroying sessions as they will be destroyed after TTL + // anyway. + return nil, fmt.Errorf("failed to create lock: %w", err) + } + + unlockCh := make(chan struct{}) + + s := &lockSession{ + id: session, + lock: lock, + unlockCh: unlockCh, + } + + // Start renewals of the session + go func() { + // Note we capture unlockCh here rather than s.unlockCh because s.unlockCh + // is mutated on cleanup which is racy since we don't hold a lock here. + // unlockCh will never be mutated though. + err := c.client.Session().RenewPeriodic(c.sessionTTL, session, nil, unlockCh) + if err != nil { + c.logger.Error("failed to renew consul session for more than the TTL, lock lost", "err", err) + } + // release other resources for this session only i.e. don't c.Unlock as that + // might now be locked under a different session). + s.cleanup() + }() + return s, nil +} + +func (c *ConsulLock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { + c.mu.Lock() + defer c.mu.Unlock() + + if c.session != nil { + return nil, fmt.Errorf("lock instance already locked") + } + + session, err := c.createSession() + if err != nil { + return nil, err + } + leaderCh, err := session.Lock(stopCh) + if leaderCh != nil && err == nil { + // We hold the lock, store the session + c.session = session + c.sessionID = session.id + } + return leaderCh, err +} + +func (c *ConsulLock) Unlock() error { + c.mu.Lock() + defer c.mu.Unlock() + + if c.session != nil { + c.session.cleanup() + c.session = nil + // Don't clear c.sessionID since we rely on returning the same old ID after + // Unlock until the next Lock. + } + return nil +} + +func (c *ConsulLock) Value() (bool, string, error) { + kv := c.client.KV() + + var queryOptions *api.QueryOptions + if c.consistencyMode == consistencyModeStrong { + queryOptions = &api.QueryOptions{ + RequireConsistent: true, + } + } + + pair, _, err := kv.Get(c.key, queryOptions) + if err != nil { + return false, "", err + } + if pair == nil { + return false, "", nil + } + // Note that held is expected to mean "does _any_ node hold the lock" not + // "does this current instance hold the lock" so although we know what our own + // session ID is, we don't check it matches here only that there is _some_ + // session in Consul holding the lock right now. + held := pair.Session != "" + value := string(pair.Value) + return held, value, nil +} + +func (c *ConsulLock) Info() (key, sessionid string) { + c.mu.Lock() + defer c.mu.Unlock() + + return c.key, c.sessionID +} diff --git a/physical/consul/consul_test.go b/physical/consul/consul_test.go new file mode 100644 index 0000000..7de69b6 --- /dev/null +++ b/physical/consul/consul_test.go @@ -0,0 +1,528 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package consul + +import ( + "bytes" + "context" + "encoding/hex" + "fmt" + "math/rand" + "reflect" + "strings" + "testing" + "time" + + "github.com/hashicorp/consul/api" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/testhelpers/consul" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/physical" + "github.com/stretchr/testify/require" +) + +func TestConsul_newConsulBackend(t *testing.T) { + tests := []struct { + name string + consulConfig map[string]string + fail bool + redirectAddr string + checkTimeout time.Duration + path string + service string + address string + scheme string + token string + max_parallel int + disableReg bool + consistencyMode string + }{ + { + name: "Valid default config", + consulConfig: map[string]string{}, + checkTimeout: 5 * time.Second, + redirectAddr: "http://127.0.0.1:8200", + path: "vault/", + service: "vault", + address: "127.0.0.1:8500", + scheme: "http", + token: "", + max_parallel: 4, + disableReg: false, + consistencyMode: "default", + }, + { + name: "Valid modified config", + consulConfig: map[string]string{ + "path": "seaTech/", + "service": "astronomy", + "redirect_addr": "http://127.0.0.2:8200", + "check_timeout": "6s", + "address": "127.0.0.2", + "scheme": "https", + "token": "deadbeef-cafeefac-deadc0de-feedface", + "max_parallel": "4", + "disable_registration": "false", + "consistency_mode": "strong", + }, + checkTimeout: 6 * time.Second, + path: "seaTech/", + service: "astronomy", + redirectAddr: "http://127.0.0.2:8200", + address: "127.0.0.2", + scheme: "https", + token: "deadbeef-cafeefac-deadc0de-feedface", + max_parallel: 4, + consistencyMode: "strong", + }, + { + name: "Unix socket", + consulConfig: map[string]string{ + "address": "unix:///tmp/.consul.http.sock", + }, + address: "/tmp/.consul.http.sock", + scheme: "http", // Default, not overridden? + + // Defaults + checkTimeout: 5 * time.Second, + redirectAddr: "http://127.0.0.1:8200", + path: "vault/", + service: "vault", + token: "", + max_parallel: 4, + disableReg: false, + consistencyMode: "default", + }, + { + name: "Scheme in address", + consulConfig: map[string]string{ + "address": "https://127.0.0.2:5000", + }, + address: "127.0.0.2:5000", + scheme: "https", + + // Defaults + checkTimeout: 5 * time.Second, + redirectAddr: "http://127.0.0.1:8200", + path: "vault/", + service: "vault", + token: "", + max_parallel: 4, + disableReg: false, + consistencyMode: "default", + }, + } + + for _, test := range tests { + logger := logging.NewVaultLogger(log.Debug) + + be, err := NewConsulBackend(test.consulConfig, logger) + if test.fail { + if err == nil { + t.Fatalf(`Expected config "%s" to fail`, test.name) + } else { + continue + } + } else if !test.fail && err != nil { + t.Fatalf("Expected config %s to not fail: %v", test.name, err) + } + + c, ok := be.(*ConsulBackend) + if !ok { + t.Fatalf("Expected ConsulBackend: %s", test.name) + } + + if test.path != c.path { + t.Errorf("bad: %s %v != %v", test.name, test.path, c.path) + } + + if test.consistencyMode != c.consistencyMode { + t.Errorf("bad consistency_mode value: %v != %v", test.consistencyMode, c.consistencyMode) + } + + // The configuration stored in the Consul "client" object is not exported, so + // we either have to skip validating it, or add a method to export it, or use reflection. + consulConfig := reflect.Indirect(reflect.ValueOf(c.client)).FieldByName("config") + consulConfigScheme := consulConfig.FieldByName("Scheme").String() + consulConfigAddress := consulConfig.FieldByName("Address").String() + + if test.scheme != consulConfigScheme { + t.Errorf("bad scheme value: %v != %v", test.scheme, consulConfigScheme) + } + + if test.address != consulConfigAddress { + t.Errorf("bad address value: %v != %v", test.address, consulConfigAddress) + } + + // FIXME(sean@): Unable to test max_parallel + // if test.max_parallel != cap(c.permitPool) { + // t.Errorf("bad: %v != %v", test.max_parallel, cap(c.permitPool)) + // } + } +} + +func TestConsulBackend(t *testing.T) { + cleanup, config := consul.PrepareTestContainer(t, "1.4.4", false, true) + defer cleanup() + + client, err := api.NewClient(config.APIConfig()) + if err != nil { + t.Fatalf("err: %v", err) + } + + randPath := fmt.Sprintf("vault-%d/", time.Now().Unix()) + defer func() { + client.KV().DeleteTree(randPath, nil) + }() + + logger := logging.NewVaultLogger(log.Debug) + + b, err := NewConsulBackend(map[string]string{ + "address": config.Address(), + "token": config.Token, + "path": randPath, + "max_parallel": "256", + }, logger) + if err != nil { + t.Fatalf("err: %s", err) + } + + physical.ExerciseBackend(t, b) + physical.ExerciseBackend_ListPrefix(t, b) +} + +func TestConsul_TooLarge(t *testing.T) { + cleanup, config := consul.PrepareTestContainer(t, "1.4.4", false, true) + defer cleanup() + + client, err := api.NewClient(config.APIConfig()) + if err != nil { + t.Fatalf("err: %v", err) + } + + randPath := fmt.Sprintf("vault-%d/", time.Now().Unix()) + defer func() { + client.KV().DeleteTree(randPath, nil) + }() + + logger := logging.NewVaultLogger(log.Debug) + + b, err := NewConsulBackend(map[string]string{ + "address": config.Address(), + "token": config.Token, + "path": randPath, + "max_parallel": "256", + }, logger) + if err != nil { + t.Fatalf("err: %s", err) + } + + zeros := make([]byte, 600000) + n, err := rand.Read(zeros) + if n != 600000 { + t.Fatalf("expected 500k zeros, read %d", n) + } + if err != nil { + t.Fatal(err) + } + + err = b.Put(context.Background(), &physical.Entry{ + Key: "foo", + Value: zeros, + }) + if err == nil { + t.Fatal("expected error") + } + if !strings.Contains(err.Error(), physical.ErrValueTooLarge) { + t.Fatalf("expected value too large error, got %v", err) + } + + err = b.(physical.Transactional).Transaction(context.Background(), []*physical.TxnEntry{ + { + Operation: physical.PutOperation, + Entry: &physical.Entry{ + Key: "foo", + Value: zeros, + }, + }, + }) + if err == nil { + t.Fatal("expected error") + } + if !strings.Contains(err.Error(), physical.ErrValueTooLarge) { + t.Fatalf("expected value too large error, got %v", err) + } +} + +func TestConsul_ExpandedCapabilitiesAvailable(t *testing.T) { + testCases := map[string]bool{ + "1.13.5": false, + "1.14.3": true, + } + + for version, shouldBeAvailable := range testCases { + t.Run(version, func(t *testing.T) { + cleanup, config := consul.PrepareTestContainer(t, version, false, true) + defer cleanup() + + logger := logging.NewVaultLogger(log.Debug) + backendConfig := map[string]string{ + "address": config.Address(), + "token": config.Token, + "path": "vault/", + "max_parallel": "-1", + } + + be, err := NewConsulBackend(backendConfig, logger) + if err != nil { + t.Fatal(err) + } + b := be.(*ConsulBackend) + + isAvailable := b.ExpandedCapabilitiesAvailable(context.Background()) + if isAvailable != shouldBeAvailable { + t.Errorf("%t != %t, version %s\n", isAvailable, shouldBeAvailable, version) + } + }) + } +} + +func TestConsul_TransactionalBackend_GetTransactionsForNonExistentValues(t *testing.T) { + cleanup, config := consul.PrepareTestContainer(t, "1.14.2", false, true) + defer cleanup() + + client, err := api.NewClient(config.APIConfig()) + if err != nil { + t.Fatal(err) + } + + txns := make([]*physical.TxnEntry, 0) + ctx := context.Background() + logger := logging.NewVaultLogger(log.Debug) + backendConfig := map[string]string{ + "address": config.Address(), + "token": config.Token, + "path": "vault/", + "max_parallel": "-1", + } + + be, err := NewConsulBackend(backendConfig, logger) + if err != nil { + t.Fatal(err) + } + b := be.(*ConsulBackend) + + defer func() { + _, _ = client.KV().DeleteTree("foo/", nil) + }() + + txns = append(txns, &physical.TxnEntry{ + Operation: physical.GetOperation, + Entry: &physical.Entry{ + Key: "foo/bar", + }, + }) + txns = append(txns, &physical.TxnEntry{ + Operation: physical.PutOperation, + Entry: &physical.Entry{ + Key: "foo/bar", + Value: []byte("baz"), + }, + }) + + err = b.Transaction(ctx, txns) + if err != nil { + t.Fatal(err) + } + + // This should return nil, because the key foo/bar didn't exist when we ran that transaction, so the get + // should return nil, and the put always returns nil + for _, txn := range txns { + if txn.Operation == physical.GetOperation { + if txn.Entry.Value != nil { + t.Fatalf("expected txn.entry.value to be nil but it was %q", string(txn.Entry.Value)) + } + } + } +} + +// TestConsul_TransactionalBackend_GetTransactions tests that passing a slice of transactions to the +// consul backend will populate values for any transactions that are Get operations. +func TestConsul_TransactionalBackend_GetTransactions(t *testing.T) { + cleanup, config := consul.PrepareTestContainer(t, "1.14.2", false, true) + defer cleanup() + + client, err := api.NewClient(config.APIConfig()) + if err != nil { + t.Fatal(err) + } + + txns := make([]*physical.TxnEntry, 0) + ctx := context.Background() + logger := logging.NewVaultLogger(log.Debug) + backendConfig := map[string]string{ + "address": config.Address(), + "token": config.Token, + "path": "vault/", + "max_parallel": "-1", + } + + be, err := NewConsulBackend(backendConfig, logger) + if err != nil { + t.Fatal(err) + } + b := be.(*ConsulBackend) + + defer func() { + _, _ = client.KV().DeleteTree("foo/", nil) + }() + + // Add some seed values to consul, and prepare our slice of transactions at the same time + for i := 0; i < 64; i++ { + key := fmt.Sprintf("foo/lol-%d", i) + err := b.Put(ctx, &physical.Entry{Key: key, Value: []byte(fmt.Sprintf("value-%d", i))}) + if err != nil { + t.Fatal(err) + } + + txns = append(txns, &physical.TxnEntry{ + Operation: physical.GetOperation, + Entry: &physical.Entry{ + Key: key, + }, + }) + } + + for i := 0; i < 64; i++ { + key := fmt.Sprintf("foo/lol-%d", i) + if i%2 == 0 { + txns = append(txns, &physical.TxnEntry{ + Operation: physical.PutOperation, + Entry: &physical.Entry{ + Key: key, + Value: []byte("lmao"), + }, + }) + } else { + txns = append(txns, &physical.TxnEntry{ + Operation: physical.DeleteOperation, + Entry: &physical.Entry{ + Key: key, + }, + }) + } + } + + if len(txns) != 128 { + t.Fatal("wrong number of transactions") + } + + err = b.Transaction(ctx, txns) + if err != nil { + t.Fatal(err) + } + + // Check that our Get operations were populated with their values + for i, txn := range txns { + if txn.Operation == physical.GetOperation { + val := []byte(fmt.Sprintf("value-%d", i)) + if !bytes.Equal(val, txn.Entry.Value) { + t.Fatalf("expected %s to equal %s but it didn't", hex.EncodeToString(val), hex.EncodeToString(txn.Entry.Value)) + } + } + } +} + +func TestConsulHABackend(t *testing.T) { + cleanup, config := consul.PrepareTestContainer(t, "1.4.4", false, true) + defer cleanup() + + client, err := api.NewClient(config.APIConfig()) + if err != nil { + t.Fatalf("err: %v", err) + } + + // We used to use a timestamp here but then if you run multiple instances in + // parallel with one Consul they end up conflicting. + randPath := fmt.Sprintf("vault-%d/", rand.Int()) + defer func() { + client.KV().DeleteTree(randPath, nil) + }() + + logger := logging.NewVaultLogger(log.Debug) + backendConfig := map[string]string{ + "address": config.Address(), + "token": config.Token, + "path": randPath, + "max_parallel": "-1", + // We have to wait this out as part of the test so shorten it a little from + // the default 15 seconds helps with test run times, especially when running + // this in a loop to detect flakes! + "lock_wait_time": "3s", + } + + b, err := NewConsulBackend(backendConfig, logger) + if err != nil { + t.Fatalf("err: %s", err) + } + + b2, err := NewConsulBackend(backendConfig, logger) + if err != nil { + t.Fatalf("err: %s", err) + } + + physical.ExerciseHABackend(t, b.(physical.HABackend), b2.(physical.HABackend)) + + detect, ok := b.(physical.RedirectDetect) + if !ok { + t.Fatalf("consul does not implement RedirectDetect") + } + host, err := detect.DetectHostAddr() + if err != nil { + t.Fatalf("err: %s", err) + } + if host == "" { + t.Fatalf("bad addr: %v", host) + } + + // Calling `Info` on a Lock that has been unlocked must still return the old + // sessionID (until it is locked again) otherwise we will fail to fence writes + // that are still in flight from before (e.g. queued WAL or Merkle flushes) as + // soon as the first one unlocks the session allowing corruption again. + l, err := b.(physical.HABackend).LockWith("test-lock-session-info", "bar") + require.NoError(t, err) + + expectKey := randPath + "test-lock-session-info" + + cl := l.(*ConsulLock) + + stopCh := make(chan struct{}) + time.AfterFunc(5*time.Second, func() { + close(stopCh) + }) + leaderCh, err := cl.Lock(stopCh) + require.NoError(t, err) + require.NotNil(t, leaderCh) + + key, sid := cl.Info() + require.Equal(t, expectKey, key) + require.NotEmpty(t, sid) + + // Now Unlock the lock, sessionID should be reset to empty string + err = cl.Unlock() + require.NoError(t, err) + key2, sid2 := cl.Info() + require.Equal(t, key, key2) + require.Equal(t, sid, sid2) + + // Lock it again, this should cause a new session to be created so SID should + // change. + leaderCh, err = cl.Lock(stopCh) + require.NoError(t, err) + require.NotNil(t, leaderCh) + + key3, sid3 := cl.Info() + require.Equal(t, key, key3) + require.NotEqual(t, sid, sid3) +} diff --git a/physical/consul/helpers.go b/physical/consul/helpers.go new file mode 100644 index 0000000..ce7c47f --- /dev/null +++ b/physical/consul/helpers.go @@ -0,0 +1,43 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package consul + +import ( + "math/rand" + "time" +) + +// DurationMinusBuffer returns a duration, minus a buffer and jitter +// subtracted from the duration. This function is used primarily for +// servicing Consul TTL Checks in advance of the TTL. +func DurationMinusBuffer(intv time.Duration, buffer time.Duration, jitter int64) time.Duration { + d := intv - buffer + if jitter == 0 { + d -= RandomStagger(d) + } else { + d -= RandomStagger(time.Duration(int64(d) / jitter)) + } + return d +} + +// DurationMinusBufferDomain returns the domain of valid durations from a +// call to DurationMinusBuffer. This function is used to check user +// specified input values to DurationMinusBuffer. +func DurationMinusBufferDomain(intv time.Duration, buffer time.Duration, jitter int64) (min time.Duration, max time.Duration) { + max = intv - buffer + if jitter == 0 { + min = max + } else { + min = max - time.Duration(int64(max)/jitter) + } + return min, max +} + +// RandomStagger returns an interval between 0 and the duration +func RandomStagger(intv time.Duration) time.Duration { + if intv == 0 { + return 0 + } + return time.Duration(uint64(rand.Int63()) % uint64(intv)) +} diff --git a/physical/couchdb/couchdb.go b/physical/couchdb/couchdb.go new file mode 100644 index 0000000..9c54b04 --- /dev/null +++ b/physical/couchdb/couchdb.go @@ -0,0 +1,317 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package couchdb + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + metrics "github.com/armon/go-metrics" + cleanhttp "github.com/hashicorp/go-cleanhttp" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/physical" +) + +// CouchDBBackend allows the management of couchdb users +type CouchDBBackend struct { + logger log.Logger + client *couchDBClient + permitPool *physical.PermitPool +} + +// Verify CouchDBBackend satisfies the correct interfaces +var ( + _ physical.Backend = (*CouchDBBackend)(nil) + _ physical.PseudoTransactional = (*CouchDBBackend)(nil) + _ physical.PseudoTransactional = (*TransactionalCouchDBBackend)(nil) +) + +type couchDBClient struct { + endpoint string + username string + password string + *http.Client +} + +type couchDBListItem struct { + ID string `json:"id"` + Key string `json:"key"` + Value struct { + Revision string + } `json:"value"` +} + +type couchDBList struct { + TotalRows int `json:"total_rows"` + Offset int `json:"offset"` + Rows []couchDBListItem `json:"rows"` +} + +func (m *couchDBClient) rev(key string) (string, error) { + req, err := http.NewRequest("HEAD", fmt.Sprintf("%s/%s", m.endpoint, key), nil) + if err != nil { + return "", err + } + req.SetBasicAuth(m.username, m.password) + + resp, err := m.Client.Do(req) + if err != nil { + return "", err + } + resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return "", nil + } + etag := resp.Header.Get("Etag") + if len(etag) < 2 { + return "", nil + } + return etag[1 : len(etag)-1], nil +} + +func (m *couchDBClient) put(e couchDBEntry) error { + bs, err := json.Marshal(e) + if err != nil { + return err + } + + req, err := http.NewRequest("PUT", fmt.Sprintf("%s/%s", m.endpoint, e.ID), bytes.NewReader(bs)) + if err != nil { + return err + } + req.SetBasicAuth(m.username, m.password) + resp, err := m.Client.Do(req) + if err == nil { + resp.Body.Close() + } + + return err +} + +func (m *couchDBClient) get(key string) (*physical.Entry, error) { + req, err := http.NewRequest("GET", fmt.Sprintf("%s/%s", m.endpoint, url.PathEscape(key)), nil) + if err != nil { + return nil, err + } + req.SetBasicAuth(m.username, m.password) + resp, err := m.Client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode == http.StatusNotFound { + return nil, nil + } else if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("GET returned %q", resp.Status) + } + bs, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + entry := couchDBEntry{} + if err := json.Unmarshal(bs, &entry); err != nil { + return nil, err + } + return entry.Entry, nil +} + +func (m *couchDBClient) list(prefix string) ([]couchDBListItem, error) { + req, _ := http.NewRequest("GET", fmt.Sprintf("%s/_all_docs", m.endpoint), nil) + req.SetBasicAuth(m.username, m.password) + values := req.URL.Query() + values.Set("skip", "0") + values.Set("include_docs", "false") + if prefix != "" { + values.Set("startkey", fmt.Sprintf("%q", prefix)) + values.Set("endkey", fmt.Sprintf("%q", prefix+"{}")) + } + req.URL.RawQuery = values.Encode() + + resp, err := m.Client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + results := couchDBList{} + if err := json.Unmarshal(data, &results); err != nil { + return nil, err + } + + return results.Rows, nil +} + +func buildCouchDBBackend(conf map[string]string, logger log.Logger) (*CouchDBBackend, error) { + endpoint := os.Getenv("COUCHDB_ENDPOINT") + if endpoint == "" { + endpoint = conf["endpoint"] + } + if endpoint == "" { + return nil, fmt.Errorf("missing endpoint") + } + + username := os.Getenv("COUCHDB_USERNAME") + if username == "" { + username = conf["username"] + } + + password := os.Getenv("COUCHDB_PASSWORD") + if password == "" { + password = conf["password"] + } + + maxParStr, ok := conf["max_parallel"] + var maxParInt int + var err error + if ok { + maxParInt, err = strconv.Atoi(maxParStr) + if err != nil { + return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err) + } + if logger.IsDebug() { + logger.Debug("max_parallel set", "max_parallel", maxParInt) + } + } + + return &CouchDBBackend{ + client: &couchDBClient{ + endpoint: endpoint, + username: username, + password: password, + Client: cleanhttp.DefaultPooledClient(), + }, + logger: logger, + permitPool: physical.NewPermitPool(maxParInt), + }, nil +} + +func NewCouchDBBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { + return buildCouchDBBackend(conf, logger) +} + +type couchDBEntry struct { + Entry *physical.Entry `json:"entry"` + Rev string `json:"_rev,omitempty"` + ID string `json:"_id"` + Deleted *bool `json:"_deleted,omitempty"` +} + +// Put is used to insert or update an entry +func (m *CouchDBBackend) Put(ctx context.Context, entry *physical.Entry) error { + m.permitPool.Acquire() + defer m.permitPool.Release() + + return m.PutInternal(ctx, entry) +} + +// Get is used to fetch an entry +func (m *CouchDBBackend) Get(ctx context.Context, key string) (*physical.Entry, error) { + m.permitPool.Acquire() + defer m.permitPool.Release() + + return m.GetInternal(ctx, key) +} + +// Delete is used to permanently delete an entry +func (m *CouchDBBackend) Delete(ctx context.Context, key string) error { + m.permitPool.Acquire() + defer m.permitPool.Release() + + return m.DeleteInternal(ctx, key) +} + +// List is used to list all the keys under a given prefix +func (m *CouchDBBackend) List(ctx context.Context, prefix string) ([]string, error) { + defer metrics.MeasureSince([]string{"couchdb", "list"}, time.Now()) + + m.permitPool.Acquire() + defer m.permitPool.Release() + + items, err := m.client.list(prefix) + if err != nil { + return nil, err + } + + var out []string + seen := make(map[string]interface{}) + for _, result := range items { + trimmed := strings.TrimPrefix(result.ID, prefix) + sep := strings.Index(trimmed, "/") + if sep == -1 { + out = append(out, trimmed) + } else { + trimmed = trimmed[:sep+1] + if _, ok := seen[trimmed]; !ok { + out = append(out, trimmed) + seen[trimmed] = struct{}{} + } + } + } + return out, nil +} + +// TransactionalCouchDBBackend creates a couchdb backend that forces all operations to happen +// in serial +type TransactionalCouchDBBackend struct { + CouchDBBackend +} + +func NewTransactionalCouchDBBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { + backend, err := buildCouchDBBackend(conf, logger) + if err != nil { + return nil, err + } + backend.permitPool = physical.NewPermitPool(1) + + return &TransactionalCouchDBBackend{ + CouchDBBackend: *backend, + }, nil +} + +// GetInternal is used to fetch an entry +func (m *CouchDBBackend) GetInternal(ctx context.Context, key string) (*physical.Entry, error) { + defer metrics.MeasureSince([]string{"couchdb", "get"}, time.Now()) + + return m.client.get(key) +} + +// PutInternal is used to insert or update an entry +func (m *CouchDBBackend) PutInternal(ctx context.Context, entry *physical.Entry) error { + defer metrics.MeasureSince([]string{"couchdb", "put"}, time.Now()) + + revision, _ := m.client.rev(url.PathEscape(entry.Key)) + + return m.client.put(couchDBEntry{ + Entry: entry, + Rev: revision, + ID: url.PathEscape(entry.Key), + }) +} + +// DeleteInternal is used to permanently delete an entry +func (m *CouchDBBackend) DeleteInternal(ctx context.Context, key string) error { + defer metrics.MeasureSince([]string{"couchdb", "delete"}, time.Now()) + + revision, _ := m.client.rev(url.PathEscape(key)) + deleted := true + return m.client.put(couchDBEntry{ + ID: url.PathEscape(key), + Rev: revision, + Deleted: &deleted, + }) +} diff --git a/physical/couchdb/couchdb_test.go b/physical/couchdb/couchdb_test.go new file mode 100644 index 0000000..359b205 --- /dev/null +++ b/physical/couchdb/couchdb_test.go @@ -0,0 +1,165 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package couchdb + +import ( + "context" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "strings" + "testing" + "time" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/physical" +) + +func TestCouchDBBackend(t *testing.T) { + cleanup, config := prepareCouchdbDBTestContainer(t) + defer cleanup() + + logger := logging.NewVaultLogger(log.Debug) + + b, err := NewCouchDBBackend(map[string]string{ + "endpoint": config.URL().String(), + "username": config.username, + "password": config.password, + }, logger) + if err != nil { + t.Fatalf("err: %s", err) + } + + physical.ExerciseBackend(t, b) + physical.ExerciseBackend_ListPrefix(t, b) +} + +func TestTransactionalCouchDBBackend(t *testing.T) { + cleanup, config := prepareCouchdbDBTestContainer(t) + defer cleanup() + + logger := logging.NewVaultLogger(log.Debug) + + b, err := NewTransactionalCouchDBBackend(map[string]string{ + "endpoint": config.URL().String(), + "username": config.username, + "password": config.password, + }, logger) + if err != nil { + t.Fatalf("err: %s", err) + } + + physical.ExerciseBackend(t, b) + physical.ExerciseBackend_ListPrefix(t, b) +} + +type couchDB struct { + baseURL url.URL + dbname string + username string + password string +} + +func (c couchDB) Address() string { + return c.baseURL.Host +} + +func (c couchDB) URL() *url.URL { + u := c.baseURL + u.Path = c.dbname + return &u +} + +var _ docker.ServiceConfig = &couchDB{} + +func prepareCouchdbDBTestContainer(t *testing.T) (func(), *couchDB) { + // If environment variable is set, assume caller wants to target a real + // DynamoDB. + if os.Getenv("COUCHDB_ENDPOINT") != "" { + return func() {}, &couchDB{ + baseURL: url.URL{Host: os.Getenv("COUCHDB_ENDPOINT")}, + username: os.Getenv("COUCHDB_USERNAME"), + password: os.Getenv("COUCHDB_PASSWORD"), + } + } + + runner, err := docker.NewServiceRunner(docker.RunOptions{ + ContainerName: "couchdb", + ImageRepo: "docker.mirror.hashicorp.services/library/couchdb", + ImageTag: "1.6", + Ports: []string{"5984/tcp"}, + DoNotAutoRemove: true, + }) + if err != nil { + t.Fatalf("Could not start local CouchDB: %s", err) + } + + svc, err := runner.StartService(context.Background(), setupCouchDB) + if err != nil { + t.Fatalf("Could not start local CouchDB: %s", err) + } + + return svc.Cleanup, svc.Config.(*couchDB) +} + +func setupCouchDB(ctx context.Context, host string, port int) (docker.ServiceConfig, error) { + c := &couchDB{ + baseURL: url.URL{Scheme: "http", Host: fmt.Sprintf("%s:%d", host, port)}, + dbname: fmt.Sprintf("vault-test-%d", time.Now().Unix()), + username: "admin", + password: "admin", + } + + { + resp, err := http.Get(c.baseURL.String()) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("expected couchdb to return status code 200, got (%s) instead", resp.Status) + } + } + + { + req, err := http.NewRequest("PUT", c.URL().String(), nil) + if err != nil { + return nil, fmt.Errorf("could not create create database request: %q", err) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, fmt.Errorf("could not create database: %q", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusCreated { + bs, _ := ioutil.ReadAll(resp.Body) + return nil, fmt.Errorf("failed to create database: %s %s\n", resp.Status, string(bs)) + } + } + + { + u := c.baseURL + u.Path = fmt.Sprintf("_config/admins/%s", c.username) + req, err := http.NewRequest("PUT", u.String(), strings.NewReader(fmt.Sprintf(`"%s"`, c.password))) + if err != nil { + return nil, fmt.Errorf("Could not create admin user request: %q", err) + } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, fmt.Errorf("Could not create admin user: %q", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + bs, _ := ioutil.ReadAll(resp.Body) + return nil, fmt.Errorf("Failed to create admin user: %s %s\n", resp.Status, string(bs)) + } + } + + return c, nil +} diff --git a/physical/dynamodb/dynamodb.go b/physical/dynamodb/dynamodb.go new file mode 100644 index 0000000..591c65c --- /dev/null +++ b/physical/dynamodb/dynamodb.go @@ -0,0 +1,911 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dynamodb + +import ( + "context" + "errors" + "fmt" + "math" + "net/http" + "os" + pkgPath "path" + "sort" + "strconv" + "strings" + "sync" + "time" + + log "github.com/hashicorp/go-hclog" + + metrics "github.com/armon/go-metrics" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute" + cleanhttp "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-secure-stdlib/awsutil" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/physical" + + "github.com/cenkalti/backoff/v3" +) + +const ( + // DefaultDynamoDBRegion is used when no region is configured + // explicitly. + DefaultDynamoDBRegion = "us-east-1" + // DefaultDynamoDBTableName is used when no table name + // is configured explicitly. + DefaultDynamoDBTableName = "vault-dynamodb-backend" + + // DefaultDynamoDBReadCapacity is the default read capacity + // that is used when none is configured explicitly. + DefaultDynamoDBReadCapacity = 5 + // DefaultDynamoDBWriteCapacity is the default write capacity + // that is used when none is configured explicitly. + DefaultDynamoDBWriteCapacity = 5 + + // DynamoDBEmptyPath is the string that is used instead of + // empty strings when stored in DynamoDB. + DynamoDBEmptyPath = " " + // DynamoDBLockPrefix is the prefix used to mark DynamoDB records + // as locks. This prefix causes them not to be returned by + // List operations. + DynamoDBLockPrefix = "_" + + // The lock TTL matches the default that Consul API uses, 15 seconds. + DynamoDBLockTTL = 15 * time.Second + + // The amount of time to wait between the lock renewals + DynamoDBLockRenewInterval = 5 * time.Second + + // DynamoDBLockRetryInterval is the amount of time to wait + // if a lock fails before trying again. + DynamoDBLockRetryInterval = time.Second + // DynamoDBWatchRetryMax is the number of times to re-try a + // failed watch before signaling that leadership is lost. + DynamoDBWatchRetryMax = 5 + // DynamoDBWatchRetryInterval is the amount of time to wait + // if a watch fails before trying again. + DynamoDBWatchRetryInterval = 5 * time.Second +) + +// Verify DynamoDBBackend satisfies the correct interfaces +var ( + _ physical.Backend = (*DynamoDBBackend)(nil) + _ physical.HABackend = (*DynamoDBBackend)(nil) + _ physical.Lock = (*DynamoDBLock)(nil) +) + +// DynamoDBBackend is a physical backend that stores data in +// a DynamoDB table. It can be run in high-availability mode +// as DynamoDB has locking capabilities. +type DynamoDBBackend struct { + table string + client *dynamodb.DynamoDB + logger log.Logger + haEnabled bool + permitPool *physical.PermitPool +} + +// DynamoDBRecord is the representation of a vault entry in +// DynamoDB. The vault key is split up into two components +// (Path and Key) in order to allow more efficient listings. +type DynamoDBRecord struct { + Path string + Key string + Value []byte +} + +// DynamoDBLock implements a lock using an DynamoDB client. +type DynamoDBLock struct { + backend *DynamoDBBackend + value, key string + identity string + held bool + lock sync.Mutex + // Allow modifying the Lock durations for ease of unit testing. + renewInterval time.Duration + ttl time.Duration + watchRetryInterval time.Duration +} + +type DynamoDBLockRecord struct { + Path string + Key string + Value []byte + Identity []byte + Expires int64 +} + +// NewDynamoDBBackend constructs a DynamoDB backend. If the +// configured DynamoDB table does not exist, it creates it. +func NewDynamoDBBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { + table := os.Getenv("AWS_DYNAMODB_TABLE") + if table == "" { + table = conf["table"] + if table == "" { + table = DefaultDynamoDBTableName + } + } + readCapacityString := os.Getenv("AWS_DYNAMODB_READ_CAPACITY") + if readCapacityString == "" { + readCapacityString = conf["read_capacity"] + if readCapacityString == "" { + readCapacityString = "0" + } + } + readCapacity, err := strconv.Atoi(readCapacityString) + if err != nil { + return nil, fmt.Errorf("invalid read capacity: %q", readCapacityString) + } + if readCapacity == 0 { + readCapacity = DefaultDynamoDBReadCapacity + } + + writeCapacityString := os.Getenv("AWS_DYNAMODB_WRITE_CAPACITY") + if writeCapacityString == "" { + writeCapacityString = conf["write_capacity"] + if writeCapacityString == "" { + writeCapacityString = "0" + } + } + writeCapacity, err := strconv.Atoi(writeCapacityString) + if err != nil { + return nil, fmt.Errorf("invalid write capacity: %q", writeCapacityString) + } + if writeCapacity == 0 { + writeCapacity = DefaultDynamoDBWriteCapacity + } + + endpoint := os.Getenv("AWS_DYNAMODB_ENDPOINT") + if endpoint == "" { + endpoint = conf["endpoint"] + } + region := os.Getenv("AWS_DYNAMODB_REGION") + if region == "" { + region = os.Getenv("AWS_REGION") + if region == "" { + region = os.Getenv("AWS_DEFAULT_REGION") + if region == "" { + region = conf["region"] + if region == "" { + region = DefaultDynamoDBRegion + } + } + } + } + + dynamodbMaxRetryString := os.Getenv("AWS_DYNAMODB_MAX_RETRIES") + if dynamodbMaxRetryString == "" { + dynamodbMaxRetryString = conf["dynamodb_max_retries"] + } + dynamodbMaxRetry := aws.UseServiceDefaultRetries + if dynamodbMaxRetryString != "" { + var err error + dynamodbMaxRetry, err = strconv.Atoi(dynamodbMaxRetryString) + if err != nil { + return nil, fmt.Errorf("invalid max retry: %q", dynamodbMaxRetryString) + } + } + + credsConfig := &awsutil.CredentialsConfig{ + AccessKey: conf["access_key"], + SecretKey: conf["secret_key"], + SessionToken: conf["session_token"], + Logger: logger, + } + creds, err := credsConfig.GenerateCredentialChain() + if err != nil { + return nil, err + } + + pooledTransport := cleanhttp.DefaultPooledTransport() + pooledTransport.MaxIdleConnsPerHost = consts.ExpirationRestoreWorkerCount + + awsConf := aws.NewConfig(). + WithCredentials(creds). + WithRegion(region). + WithEndpoint(endpoint). + WithHTTPClient(&http.Client{ + Transport: pooledTransport, + }). + WithMaxRetries(dynamodbMaxRetry) + + awsSession, err := session.NewSession(awsConf) + if err != nil { + return nil, fmt.Errorf("Could not establish AWS session: %w", err) + } + + client := dynamodb.New(awsSession) + + if err := ensureTableExists(client, table, readCapacity, writeCapacity); err != nil { + return nil, err + } + + haEnabled := os.Getenv("DYNAMODB_HA_ENABLED") + if haEnabled == "" { + haEnabled = conf["ha_enabled"] + } + haEnabledBool, _ := strconv.ParseBool(haEnabled) + + maxParStr, ok := conf["max_parallel"] + var maxParInt int + if ok { + maxParInt, err = strconv.Atoi(maxParStr) + if err != nil { + return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err) + } + if logger.IsDebug() { + logger.Debug("max_parallel set", "max_parallel", maxParInt) + } + } + + return &DynamoDBBackend{ + table: table, + client: client, + permitPool: physical.NewPermitPool(maxParInt), + haEnabled: haEnabledBool, + logger: logger, + }, nil +} + +// Put is used to insert or update an entry +func (d *DynamoDBBackend) Put(ctx context.Context, entry *physical.Entry) error { + defer metrics.MeasureSince([]string{"dynamodb", "put"}, time.Now()) + + record := DynamoDBRecord{ + Path: recordPathForVaultKey(entry.Key), + Key: recordKeyForVaultKey(entry.Key), + Value: entry.Value, + } + item, err := dynamodbattribute.MarshalMap(record) + if err != nil { + return fmt.Errorf("could not convert prefix record to DynamoDB item: %w", err) + } + requests := []*dynamodb.WriteRequest{{ + PutRequest: &dynamodb.PutRequest{ + Item: item, + }, + }} + + for _, prefix := range physical.Prefixes(entry.Key) { + record = DynamoDBRecord{ + Path: recordPathForVaultKey(prefix), + Key: fmt.Sprintf("%s/", recordKeyForVaultKey(prefix)), + } + item, err := dynamodbattribute.MarshalMap(record) + if err != nil { + return fmt.Errorf("could not convert prefix record to DynamoDB item: %w", err) + } + requests = append(requests, &dynamodb.WriteRequest{ + PutRequest: &dynamodb.PutRequest{ + Item: item, + }, + }) + } + + return d.batchWriteRequests(requests) +} + +// Get is used to fetch an entry +func (d *DynamoDBBackend) Get(ctx context.Context, key string) (*physical.Entry, error) { + defer metrics.MeasureSince([]string{"dynamodb", "get"}, time.Now()) + + d.permitPool.Acquire() + defer d.permitPool.Release() + + resp, err := d.client.GetItem(&dynamodb.GetItemInput{ + TableName: aws.String(d.table), + ConsistentRead: aws.Bool(true), + Key: map[string]*dynamodb.AttributeValue{ + "Path": {S: aws.String(recordPathForVaultKey(key))}, + "Key": {S: aws.String(recordKeyForVaultKey(key))}, + }, + }) + if err != nil { + return nil, err + } + if resp.Item == nil { + return nil, nil + } + + record := &DynamoDBRecord{} + if err := dynamodbattribute.UnmarshalMap(resp.Item, record); err != nil { + return nil, err + } + + return &physical.Entry{ + Key: vaultKey(record), + Value: record.Value, + }, nil +} + +// Delete is used to permanently delete an entry +func (d *DynamoDBBackend) Delete(ctx context.Context, key string) error { + defer metrics.MeasureSince([]string{"dynamodb", "delete"}, time.Now()) + + requests := []*dynamodb.WriteRequest{{ + DeleteRequest: &dynamodb.DeleteRequest{ + Key: map[string]*dynamodb.AttributeValue{ + "Path": {S: aws.String(recordPathForVaultKey(key))}, + "Key": {S: aws.String(recordKeyForVaultKey(key))}, + }, + }, + }} + + // Clean up empty "folders" by looping through all levels of the path to the item being deleted looking for + // children. Loop from deepest path to shallowest, and only consider items children if they are not going to be + // deleted by our batch delete request. If a path has no valid children, then it should be considered an empty + // "folder" and be deleted along with the original item in our batch job. Because we loop from deepest path to + // shallowest, once we find a path level that contains valid children we can stop the cleanup operation. + prefixes := physical.Prefixes(key) + sort.Sort(sort.Reverse(sort.StringSlice(prefixes))) + for index, prefix := range prefixes { + // Because delete batches its requests, we need to pass keys we know are going to be deleted into + // hasChildren so it can exclude those when it determines if there WILL be any children left after + // the delete operations have completed. + var excluded []string + if index == 0 { + // This is the value we know for sure is being deleted + excluded = append(excluded, recordKeyForVaultKey(key)) + } else { + // The previous path doesn't count as a child, since if we're still looping, we've found no children + excluded = append(excluded, recordKeyForVaultKey(prefixes[index-1])) + } + + hasChildren, err := d.hasChildren(prefix, excluded) + if err != nil { + return err + } + + if !hasChildren { + // If there are no children other than ones we know are being deleted then cleanup empty "folder" pointers + requests = append(requests, &dynamodb.WriteRequest{ + DeleteRequest: &dynamodb.DeleteRequest{ + Key: map[string]*dynamodb.AttributeValue{ + "Path": {S: aws.String(recordPathForVaultKey(prefix))}, + "Key": {S: aws.String(fmt.Sprintf("%s/", recordKeyForVaultKey(prefix)))}, + }, + }, + }) + } else { + // This loop starts at the deepest path and works backwards looking for children + // once a deeper level of the path has been found to have children there is no + // more cleanup that needs to happen, otherwise we might remove folder pointers + // to that deeper path making it "undiscoverable" with the list operation + break + } + } + + return d.batchWriteRequests(requests) +} + +// List is used to list all the keys under a given +// prefix, up to the next prefix. +func (d *DynamoDBBackend) List(ctx context.Context, prefix string) ([]string, error) { + defer metrics.MeasureSince([]string{"dynamodb", "list"}, time.Now()) + + prefix = strings.TrimSuffix(prefix, "/") + + keys := []string{} + prefix = escapeEmptyPath(prefix) + queryInput := &dynamodb.QueryInput{ + TableName: aws.String(d.table), + ConsistentRead: aws.Bool(true), + KeyConditions: map[string]*dynamodb.Condition{ + "Path": { + ComparisonOperator: aws.String("EQ"), + AttributeValueList: []*dynamodb.AttributeValue{{ + S: aws.String(prefix), + }}, + }, + }, + } + + d.permitPool.Acquire() + defer d.permitPool.Release() + + err := d.client.QueryPages(queryInput, func(out *dynamodb.QueryOutput, lastPage bool) bool { + var record DynamoDBRecord + for _, item := range out.Items { + dynamodbattribute.UnmarshalMap(item, &record) + if !strings.HasPrefix(record.Key, DynamoDBLockPrefix) { + keys = append(keys, record.Key) + } + } + return !lastPage + }) + if err != nil { + return nil, err + } + + return keys, nil +} + +// hasChildren returns true if there exist items below a certain path prefix. +// To do so, the method fetches such items from DynamoDB. This method is primarily +// used by Delete. Because DynamoDB requests are batched this method is being called +// before any deletes take place. To account for that hasChildren accepts a slice of +// strings representing values we expect to find that should NOT be counted as children +// because they are going to be deleted. +func (d *DynamoDBBackend) hasChildren(prefix string, exclude []string) (bool, error) { + prefix = strings.TrimSuffix(prefix, "/") + prefix = escapeEmptyPath(prefix) + + queryInput := &dynamodb.QueryInput{ + TableName: aws.String(d.table), + ConsistentRead: aws.Bool(true), + KeyConditions: map[string]*dynamodb.Condition{ + "Path": { + ComparisonOperator: aws.String("EQ"), + AttributeValueList: []*dynamodb.AttributeValue{{ + S: aws.String(prefix), + }}, + }, + }, + // Avoid fetching too many items from DynamoDB for performance reasons. + // We want to know if there are any children we don't expect to see. + // Answering that question requires fetching a minimum of one more item + // than the number we expect. In most cases this value will be 2 + Limit: aws.Int64(int64(len(exclude) + 1)), + } + + d.permitPool.Acquire() + defer d.permitPool.Release() + + out, err := d.client.Query(queryInput) + if err != nil { + return false, err + } + var childrenExist bool + for _, item := range out.Items { + for _, excluded := range exclude { + // Check if we've found an item we didn't expect to. Look for "folder" pointer keys (trailing slash) + // and regular value keys (no trailing slash) + if *item["Key"].S != excluded && *item["Key"].S != fmt.Sprintf("%s/", excluded) { + childrenExist = true + break + } + } + if childrenExist { + // We only need to find ONE child we didn't expect to. + break + } + } + + return childrenExist, nil +} + +// LockWith is used for mutual exclusion based on the given key. +func (d *DynamoDBBackend) LockWith(key, value string) (physical.Lock, error) { + identity, err := uuid.GenerateUUID() + if err != nil { + return nil, err + } + return &DynamoDBLock{ + backend: d, + key: pkgPath.Join(pkgPath.Dir(key), DynamoDBLockPrefix+pkgPath.Base(key)), + value: value, + identity: identity, + renewInterval: DynamoDBLockRenewInterval, + ttl: DynamoDBLockTTL, + watchRetryInterval: DynamoDBWatchRetryInterval, + }, nil +} + +func (d *DynamoDBBackend) HAEnabled() bool { + return d.haEnabled +} + +// batchWriteRequests takes a list of write requests and executes them in badges +// with a maximum size of 25 (which is the limit of BatchWriteItem requests). +func (d *DynamoDBBackend) batchWriteRequests(requests []*dynamodb.WriteRequest) error { + for len(requests) > 0 { + batchSize := int(math.Min(float64(len(requests)), 25)) + batch := map[string][]*dynamodb.WriteRequest{d.table: requests[:batchSize]} + requests = requests[batchSize:] + + var err error + + d.permitPool.Acquire() + + boff := backoff.NewExponentialBackOff() + boff.MaxElapsedTime = 600 * time.Second + + for len(batch) > 0 { + var output *dynamodb.BatchWriteItemOutput + output, err = d.client.BatchWriteItem(&dynamodb.BatchWriteItemInput{ + RequestItems: batch, + }) + + if err != nil { + break + } + + if len(output.UnprocessedItems) == 0 { + break + } else { + duration := boff.NextBackOff() + if duration != backoff.Stop { + batch = output.UnprocessedItems + time.Sleep(duration) + } else { + err = errors.New("dynamodb: timeout handling UnproccessedItems") + break + } + } + } + + d.permitPool.Release() + if err != nil { + return err + } + } + return nil +} + +// Lock tries to acquire the lock by repeatedly trying to create +// a record in the DynamoDB table. It will block until either the +// stop channel is closed or the lock could be acquired successfully. +// The returned channel will be closed once the lock is deleted or +// changed in the DynamoDB table. +func (l *DynamoDBLock) Lock(stopCh <-chan struct{}) (doneCh <-chan struct{}, retErr error) { + l.lock.Lock() + defer l.lock.Unlock() + if l.held { + return nil, fmt.Errorf("lock already held") + } + + done := make(chan struct{}) + // close done channel even in case of error + defer func() { + if retErr != nil { + close(done) + } + }() + + var ( + stop = make(chan struct{}) + success = make(chan struct{}) + errors = make(chan error) + leader = make(chan struct{}) + ) + // try to acquire the lock asynchronously + go l.tryToLock(stop, success, errors) + + select { + case <-success: + l.held = true + // after acquiring it successfully, we must renew the lock periodically, + // and watch the lock in order to close the leader channel + // once it is lost. + go l.periodicallyRenewLock(leader) + go l.watch(leader) + case retErr = <-errors: + close(stop) + return nil, retErr + case <-stopCh: + close(stop) + return nil, nil + } + + return leader, retErr +} + +// Unlock releases the lock by deleting the lock record from the +// DynamoDB table. +func (l *DynamoDBLock) Unlock() error { + l.lock.Lock() + defer l.lock.Unlock() + if !l.held { + return nil + } + + l.held = false + + // Conditionally delete after check that the key is actually this Vault's and + // not been already claimed by another leader + condition := "#identity = :identity" + deleteMyLock := &dynamodb.DeleteItemInput{ + TableName: &l.backend.table, + ConditionExpression: &condition, + Key: map[string]*dynamodb.AttributeValue{ + "Path": {S: aws.String(recordPathForVaultKey(l.key))}, + "Key": {S: aws.String(recordKeyForVaultKey(l.key))}, + }, + ExpressionAttributeNames: map[string]*string{ + "#identity": aws.String("Identity"), + }, + ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{ + ":identity": {B: []byte(l.identity)}, + }, + } + + _, err := l.backend.client.DeleteItem(deleteMyLock) + if isConditionCheckFailed(err) { + err = nil + } + + return err +} + +// Value checks whether or not the lock is held by any instance of DynamoDBLock, +// including this one, and returns the current value. +func (l *DynamoDBLock) Value() (bool, string, error) { + entry, err := l.backend.Get(context.Background(), l.key) + if err != nil { + return false, "", err + } + if entry == nil { + return false, "", nil + } + + return true, string(entry.Value), nil +} + +// tryToLock tries to create a new item in DynamoDB +// every `DynamoDBLockRetryInterval`. As long as the item +// cannot be created (because it already exists), it will +// be retried. If the operation fails due to an error, it +// is sent to the errors channel. +// When the lock could be acquired successfully, the success +// channel is closed. +func (l *DynamoDBLock) tryToLock(stop, success chan struct{}, errors chan error) { + ticker := time.NewTicker(DynamoDBLockRetryInterval) + defer ticker.Stop() + + for { + select { + case <-stop: + return + case <-ticker.C: + err := l.updateItem(true) + if err != nil { + if err, ok := err.(awserr.Error); ok { + // Don't report a condition check failure, this means that the lock + // is already being held. + if !isConditionCheckFailed(err) { + errors <- err + } + } else { + // Its not an AWS error, and is probably not transient, bail out. + errors <- err + return + } + } else { + close(success) + return + } + } + } +} + +func (l *DynamoDBLock) periodicallyRenewLock(done chan struct{}) { + ticker := time.NewTicker(l.renewInterval) + for { + select { + case <-ticker.C: + // This should not renew the lock if the lock was deleted from under you. + err := l.updateItem(false) + if err != nil { + if !isConditionCheckFailed(err) { + l.backend.logger.Error("error renewing leadership lock", "error", err) + } + } + case <-done: + ticker.Stop() + return + } + } +} + +// Attempts to put/update the dynamodb item using condition expressions to +// evaluate the TTL. +func (l *DynamoDBLock) updateItem(createIfMissing bool) error { + now := time.Now() + + conditionExpression := "" + if createIfMissing { + conditionExpression += "attribute_not_exists(#path) or " + + "attribute_not_exists(#key) or " + } else { + conditionExpression += "attribute_exists(#path) and " + + "attribute_exists(#key) and " + } + + // To work when upgrading from older versions that did not include the + // Identity attribute, we first check if the attr doesn't exist, and if + // it does, then we check if the identity is equal to our own. + // We also write if the lock expired. + conditionExpression += "(attribute_not_exists(#identity) or #identity = :identity or #expires <= :now)" + + _, err := l.backend.client.UpdateItem(&dynamodb.UpdateItemInput{ + TableName: aws.String(l.backend.table), + Key: map[string]*dynamodb.AttributeValue{ + "Path": {S: aws.String(recordPathForVaultKey(l.key))}, + "Key": {S: aws.String(recordKeyForVaultKey(l.key))}, + }, + UpdateExpression: aws.String("SET #value=:value, #identity=:identity, #expires=:expires"), + // If both key and path already exist, we can only write if + // A. identity is equal to our identity (or the identity doesn't exist) + // or + // B. The ttl on the item is <= to the current time + ConditionExpression: aws.String(conditionExpression), + ExpressionAttributeNames: map[string]*string{ + "#path": aws.String("Path"), + "#key": aws.String("Key"), + "#identity": aws.String("Identity"), + "#expires": aws.String("Expires"), + "#value": aws.String("Value"), + }, + ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{ + ":identity": {B: []byte(l.identity)}, + ":value": {B: []byte(l.value)}, + ":now": {N: aws.String(strconv.FormatInt(now.UnixNano(), 10))}, + ":expires": {N: aws.String(strconv.FormatInt(now.Add(l.ttl).UnixNano(), 10))}, + }, + }) + + return err +} + +// watch checks whether the lock has changed in the +// DynamoDB table and closes the leader channel if so. +// The interval is set by `DynamoDBWatchRetryInterval`. +// If an error occurs during the check, watch will retry +// the operation for `DynamoDBWatchRetryMax` times and +// close the leader channel if it can't succeed. +func (l *DynamoDBLock) watch(lost chan struct{}) { + retries := DynamoDBWatchRetryMax + + ticker := time.NewTicker(l.watchRetryInterval) +WatchLoop: + for { + select { + case <-ticker.C: + resp, err := l.backend.client.GetItem(&dynamodb.GetItemInput{ + TableName: aws.String(l.backend.table), + ConsistentRead: aws.Bool(true), + Key: map[string]*dynamodb.AttributeValue{ + "Path": {S: aws.String(recordPathForVaultKey(l.key))}, + "Key": {S: aws.String(recordKeyForVaultKey(l.key))}, + }, + }) + if err != nil { + retries-- + if retries == 0 { + break WatchLoop + } + continue + } + + if resp == nil { + break WatchLoop + } + record := &DynamoDBLockRecord{} + err = dynamodbattribute.UnmarshalMap(resp.Item, record) + if err != nil || string(record.Identity) != l.identity { + break WatchLoop + } + } + retries = DynamoDBWatchRetryMax + } + + close(lost) +} + +// ensureTableExists creates a DynamoDB table with a given +// DynamoDB client. If the table already exists, it is not +// being reconfigured. +func ensureTableExists(client *dynamodb.DynamoDB, table string, readCapacity, writeCapacity int) error { + _, err := client.DescribeTable(&dynamodb.DescribeTableInput{ + TableName: aws.String(table), + }) + if err != nil { + if awsError, ok := err.(awserr.Error); ok { + if awsError.Code() == "ResourceNotFoundException" { + _, err := client.CreateTable(&dynamodb.CreateTableInput{ + TableName: aws.String(table), + ProvisionedThroughput: &dynamodb.ProvisionedThroughput{ + ReadCapacityUnits: aws.Int64(int64(readCapacity)), + WriteCapacityUnits: aws.Int64(int64(writeCapacity)), + }, + KeySchema: []*dynamodb.KeySchemaElement{{ + AttributeName: aws.String("Path"), + KeyType: aws.String("HASH"), + }, { + AttributeName: aws.String("Key"), + KeyType: aws.String("RANGE"), + }}, + AttributeDefinitions: []*dynamodb.AttributeDefinition{{ + AttributeName: aws.String("Path"), + AttributeType: aws.String("S"), + }, { + AttributeName: aws.String("Key"), + AttributeType: aws.String("S"), + }}, + }) + if err != nil { + return err + } + + err = client.WaitUntilTableExists(&dynamodb.DescribeTableInput{ + TableName: aws.String(table), + }) + if err != nil { + return err + } + // table created successfully + return nil + } + } + return err + } + + return nil +} + +// recordPathForVaultKey transforms a vault key into +// a value suitable for the `DynamoDBRecord`'s `Path` +// property. This path equals the the vault key without +// its last component. +func recordPathForVaultKey(key string) string { + if strings.Contains(key, "/") { + return pkgPath.Dir(key) + } + return DynamoDBEmptyPath +} + +// recordKeyForVaultKey transforms a vault key into +// a value suitable for the `DynamoDBRecord`'s `Key` +// property. This path equals the the vault key's +// last component. +func recordKeyForVaultKey(key string) string { + return pkgPath.Base(key) +} + +// vaultKey returns the vault key for a given record +// from the DynamoDB table. This is the combination of +// the records Path and Key. +func vaultKey(record *DynamoDBRecord) string { + path := unescapeEmptyPath(record.Path) + if path == "" { + return record.Key + } + return pkgPath.Join(record.Path, record.Key) +} + +// escapeEmptyPath is used to escape the root key's path +// with a value that can be stored in DynamoDB. DynamoDB +// does not allow values to be empty strings. +func escapeEmptyPath(s string) string { + if s == "" { + return DynamoDBEmptyPath + } + return s +} + +// unescapeEmptyPath is the opposite of `escapeEmptyPath`. +func unescapeEmptyPath(s string) string { + if s == DynamoDBEmptyPath { + return "" + } + return s +} + +// isConditionCheckFailed tests whether err is an ErrCodeConditionalCheckFailedException +// from the AWS SDK. +func isConditionCheckFailed(err error) bool { + if err != nil { + if err, ok := err.(awserr.Error); ok { + return err.Code() == dynamodb.ErrCodeConditionalCheckFailedException + } + } + + return false +} diff --git a/physical/dynamodb/dynamodb_test.go b/physical/dynamodb/dynamodb_test.go new file mode 100644 index 0000000..a0b6046 --- /dev/null +++ b/physical/dynamodb/dynamodb_test.go @@ -0,0 +1,421 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dynamodb + +import ( + "context" + "fmt" + "math/rand" + "net/http" + "net/url" + "os" + "testing" + "time" + + "github.com/go-test/deep" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/physical" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute" +) + +func TestDynamoDBBackend(t *testing.T) { + cleanup, svccfg := prepareDynamoDBTestContainer(t) + defer cleanup() + + creds, err := svccfg.Credentials.Get() + if err != nil { + t.Fatalf("err: %v", err) + } + + region := os.Getenv("AWS_DEFAULT_REGION") + if region == "" { + region = "us-east-1" + } + + awsSession, err := session.NewSession(&aws.Config{ + Credentials: svccfg.Credentials, + Endpoint: aws.String(svccfg.URL().String()), + Region: aws.String(region), + }) + if err != nil { + t.Fatalf("err: %v", err) + } + + conn := dynamodb.New(awsSession) + + randInt := rand.New(rand.NewSource(time.Now().UnixNano())).Int() + table := fmt.Sprintf("vault-dynamodb-testacc-%d", randInt) + + defer func() { + conn.DeleteTable(&dynamodb.DeleteTableInput{ + TableName: aws.String(table), + }) + }() + + logger := logging.NewVaultLogger(log.Debug) + + b, err := NewDynamoDBBackend(map[string]string{ + "access_key": creds.AccessKeyID, + "secret_key": creds.SecretAccessKey, + "session_token": creds.SessionToken, + "table": table, + "region": region, + "endpoint": svccfg.URL().String(), + }, logger) + if err != nil { + t.Fatalf("err: %s", err) + } + + physical.ExerciseBackend(t, b) + physical.ExerciseBackend_ListPrefix(t, b) + + t.Run("Marshalling upgrade", func(t *testing.T) { + path := "test_key" + + // Manually write to DynamoDB using the old ConvertTo function + // for marshalling data + inputEntry := &physical.Entry{ + Key: path, + Value: []byte{0x0f, 0xcf, 0x4a, 0x0f, 0xba, 0x2b, 0x15, 0xf0, 0xaa, 0x75, 0x09}, + } + + record := DynamoDBRecord{ + Path: recordPathForVaultKey(inputEntry.Key), + Key: recordKeyForVaultKey(inputEntry.Key), + Value: inputEntry.Value, + } + + item, err := dynamodbattribute.ConvertToMap(record) + if err != nil { + t.Fatalf("err: %s", err) + } + + request := &dynamodb.PutItemInput{ + Item: item, + TableName: &table, + } + conn.PutItem(request) + + // Read back the data using the normal interface which should + // handle the old marshalling format gracefully + entry, err := b.Get(context.Background(), path) + if err != nil { + t.Fatalf("err: %s", err) + } + if diff := deep.Equal(inputEntry, entry); diff != nil { + t.Fatal(diff) + } + }) +} + +func TestDynamoDBHABackend(t *testing.T) { + cleanup, svccfg := prepareDynamoDBTestContainer(t) + defer cleanup() + + creds, err := svccfg.Credentials.Get() + if err != nil { + t.Fatalf("err: %v", err) + } + + region := os.Getenv("AWS_DEFAULT_REGION") + if region == "" { + region = "us-east-1" + } + + awsSession, err := session.NewSession(&aws.Config{ + Credentials: svccfg.Credentials, + Endpoint: aws.String(svccfg.URL().String()), + Region: aws.String(region), + }) + if err != nil { + t.Fatalf("err: %v", err) + } + + conn := dynamodb.New(awsSession) + + randInt := rand.New(rand.NewSource(time.Now().UnixNano())).Int() + table := fmt.Sprintf("vault-dynamodb-testacc-%d", randInt) + + defer func() { + conn.DeleteTable(&dynamodb.DeleteTableInput{ + TableName: aws.String(table), + }) + }() + + logger := logging.NewVaultLogger(log.Debug) + config := map[string]string{ + "access_key": creds.AccessKeyID, + "secret_key": creds.SecretAccessKey, + "session_token": creds.SessionToken, + "table": table, + "region": region, + "endpoint": svccfg.URL().String(), + } + + b, err := NewDynamoDBBackend(config, logger) + if err != nil { + t.Fatalf("err: %s", err) + } + + b2, err := NewDynamoDBBackend(config, logger) + if err != nil { + t.Fatalf("err: %s", err) + } + + physical.ExerciseHABackend(t, b.(physical.HABackend), b2.(physical.HABackend)) + testDynamoDBLockTTL(t, b.(physical.HABackend)) + testDynamoDBLockRenewal(t, b.(physical.HABackend)) +} + +// Similar to testHABackend, but using internal implementation details to +// trigger the lock failure scenario by setting the lock renew period for one +// of the locks to a higher value than the lock TTL. +func testDynamoDBLockTTL(t *testing.T, ha physical.HABackend) { + // Set much smaller lock times to speed up the test. + lockTTL := time.Second * 3 + renewInterval := time.Second * 1 + watchInterval := time.Second * 1 + + // Get the lock + origLock, err := ha.LockWith("dynamodbttl", "bar") + if err != nil { + t.Fatalf("err: %v", err) + } + // set the first lock renew period to double the expected TTL. + lock := origLock.(*DynamoDBLock) + lock.renewInterval = lockTTL * 2 + lock.ttl = lockTTL + lock.watchRetryInterval = watchInterval + + // Attempt to lock + leaderCh, err := lock.Lock(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if leaderCh == nil { + t.Fatalf("failed to get leader ch") + } + + // Check the value + held, val, err := lock.Value() + if err != nil { + t.Fatalf("err: %v", err) + } + if !held { + t.Fatalf("should be held") + } + if val != "bar" { + t.Fatalf("bad value: %v", err) + } + + // Second acquisition should succeed because the first lock should + // not renew within the 3 sec TTL. + origLock2, err := ha.LockWith("dynamodbttl", "baz") + if err != nil { + t.Fatalf("err: %v", err) + } + + lock2 := origLock2.(*DynamoDBLock) + lock2.renewInterval = renewInterval + lock2.ttl = lockTTL + lock2.watchRetryInterval = watchInterval + + // Cancel attempt eventually so as not to block unit tests forever + stopCh := make(chan struct{}) + time.AfterFunc(lockTTL*10, func() { + close(stopCh) + }) + + // Attempt to lock should work + leaderCh2, err := lock2.Lock(stopCh) + if err != nil { + t.Fatalf("err: %v", err) + } + if leaderCh2 == nil { + t.Fatalf("should get leader ch") + } + + // Check the value + held, val, err = lock2.Value() + if err != nil { + t.Fatalf("err: %v", err) + } + if !held { + t.Fatalf("should be held") + } + if val != "baz" { + t.Fatalf("bad value: %v", err) + } + + // The first lock should have lost the leader channel + leaderChClosed := false + blocking := make(chan struct{}) + // Attempt to read from the leader or the blocking channel, which ever one + // happens first. + go func() { + select { + case <-time.After(watchInterval * 3): + return + case <-leaderCh: + leaderChClosed = true + close(blocking) + case <-blocking: + return + } + }() + + <-blocking + if !leaderChClosed { + t.Fatalf("original lock did not have its leader channel closed.") + } + + // Cleanup + lock2.Unlock() +} + +// Similar to testHABackend, but using internal implementation details to +// trigger a renewal before a "watch" check, which has been a source of +// race conditions. +func testDynamoDBLockRenewal(t *testing.T, ha physical.HABackend) { + renewInterval := time.Second * 1 + watchInterval := time.Second * 5 + + // Get the lock + origLock, err := ha.LockWith("dynamodbrenewal", "bar") + if err != nil { + t.Fatalf("err: %v", err) + } + + // customize the renewal and watch intervals + lock := origLock.(*DynamoDBLock) + lock.renewInterval = renewInterval + lock.watchRetryInterval = watchInterval + + // Attempt to lock + leaderCh, err := lock.Lock(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if leaderCh == nil { + t.Fatalf("failed to get leader ch") + } + + // Check the value + held, val, err := lock.Value() + if err != nil { + t.Fatalf("err: %v", err) + } + if !held { + t.Fatalf("should be held") + } + if val != "bar" { + t.Fatalf("bad value: %v", err) + } + + // Release the lock, which will delete the stored item + if err := lock.Unlock(); err != nil { + t.Fatalf("err: %v", err) + } + + // Wait longer than the renewal time, but less than the watch time + time.Sleep(1500 * time.Millisecond) + + // Attempt to lock with new lock + newLock, err := ha.LockWith("dynamodbrenewal", "baz") + if err != nil { + t.Fatalf("err: %v", err) + } + + // Cancel attempt in 6 sec so as not to block unit tests forever + stopCh := make(chan struct{}) + time.AfterFunc(6*time.Second, func() { + close(stopCh) + }) + + // Attempt to lock should work + leaderCh2, err := newLock.Lock(stopCh) + if err != nil { + t.Fatalf("err: %v", err) + } + if leaderCh2 == nil { + t.Fatalf("should get leader ch") + } + + // Check the value + held, val, err = newLock.Value() + if err != nil { + t.Fatalf("err: %v", err) + } + if !held { + t.Fatalf("should be held") + } + if val != "baz" { + t.Fatalf("bad value: %v", err) + } + + // Cleanup + newLock.Unlock() +} + +type Config struct { + docker.ServiceURL + Credentials *credentials.Credentials +} + +var _ docker.ServiceConfig = &Config{} + +func prepareDynamoDBTestContainer(t *testing.T) (func(), *Config) { + // If environment variable is set, assume caller wants to target a real + // DynamoDB. + if endpoint := os.Getenv("AWS_DYNAMODB_ENDPOINT"); endpoint != "" { + s, err := docker.NewServiceURLParse(endpoint) + if err != nil { + t.Fatal(err) + } + return func() {}, &Config{*s, credentials.NewEnvCredentials()} + } + + runner, err := docker.NewServiceRunner(docker.RunOptions{ + ImageRepo: "docker.mirror.hashicorp.services/cnadiminti/dynamodb-local", + ImageTag: "latest", + ContainerName: "dynamodb", + Ports: []string{"8000/tcp"}, + }) + if err != nil { + t.Fatalf("Could not start local DynamoDB: %s", err) + } + + svc, err := runner.StartService(context.Background(), connectDynamoDB) + if err != nil { + t.Fatalf("Could not start local DynamoDB: %s", err) + } + + return svc.Cleanup, svc.Config.(*Config) +} + +func connectDynamoDB(ctx context.Context, host string, port int) (docker.ServiceConfig, error) { + u := url.URL{ + Scheme: "http", + Host: fmt.Sprintf("%s:%d", host, port), + } + resp, err := http.Get(u.String()) + if err != nil { + return nil, err + } + if resp.StatusCode != 400 { + return nil, err + } + + return &Config{ + ServiceURL: *docker.NewServiceURL(u), + Credentials: credentials.NewStaticCredentials("fake", "fake", ""), + }, nil +} diff --git a/physical/etcd/etcd.go b/physical/etcd/etcd.go new file mode 100644 index 0000000..f17a552 --- /dev/null +++ b/physical/etcd/etcd.go @@ -0,0 +1,92 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package etcd + +import ( + "errors" + "fmt" + "net/url" + "os" + "strings" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/physical" + "go.etcd.io/etcd/client/v2" +) + +var ( + EtcdMultipleBootstrapError = errors.New("client setup failed: multiple discovery or bootstrap flags specified, use either \"address\" or \"discovery_srv\"") + EtcdAddressError = errors.New("client setup failed: address must be valid URL (ex. 'scheme://host:port')") + EtcdLockHeldError = errors.New("lock already held") + EtcdLockNotHeldError = errors.New("lock not held") + EtcdVersionUnknown = errors.New("etcd: unknown API version") +) + +// NewEtcdBackend constructs a etcd backend using a given machine address. +func NewEtcdBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { + var ( + apiVersion string + ok bool + ) + + if apiVersion, ok = conf["etcd_api"]; !ok { + apiVersion = os.Getenv("ETCD_API") + } + + if apiVersion == "" { + apiVersion = "v3" + } + + switch apiVersion { + case "3", "etcd3", "v3": + return newEtcd3Backend(conf, logger) + default: + return nil, EtcdVersionUnknown + } +} + +// Retrieves the config option in order of priority: +// 1. The named environment variable if it exist +// 2. The key in the config map +func getEtcdOption(conf map[string]string, confKey, envVar string) (string, bool) { + confVal, inConf := conf[confKey] + envVal, inEnv := os.LookupEnv(envVar) + if inEnv { + return envVal, true + } + return confVal, inConf +} + +func getEtcdEndpoints(conf map[string]string) ([]string, error) { + address, staticBootstrap := getEtcdOption(conf, "address", "ETCD_ADDR") + domain, useSrv := getEtcdOption(conf, "discovery_srv", "ETCD_DISCOVERY_SRV") + if useSrv && staticBootstrap { + return nil, EtcdMultipleBootstrapError + } + + if staticBootstrap { + endpoints := strings.Split(address, ",") + // Verify that the machines are valid URLs + for _, e := range endpoints { + u, urlErr := url.Parse(e) + if urlErr != nil || u.Scheme == "" { + return nil, EtcdAddressError + } + } + return endpoints, nil + } + + if useSrv { + srvName, _ := getEtcdOption(conf, "discovery_srv_name", "ETCD_DISCOVERY_SRV_NAME") + discoverer := client.NewSRVDiscover() + endpoints, err := discoverer.Discover(domain, srvName) + if err != nil { + return nil, fmt.Errorf("failed to discover etcd endpoints through SRV discovery: %w", err) + } + return endpoints, nil + } + + // Set a default endpoints list if no option was set + return []string{"http://127.0.0.1:2379"}, nil +} diff --git a/physical/etcd/etcd3.go b/physical/etcd/etcd3.go new file mode 100644 index 0000000..8501a8b --- /dev/null +++ b/physical/etcd/etcd3.go @@ -0,0 +1,383 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package etcd + +import ( + "context" + "errors" + "fmt" + "os" + "path" + "strconv" + "strings" + "sync" + "time" + + "github.com/armon/go-metrics" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/sdk/physical" + "go.etcd.io/etcd/client/pkg/v3/transport" + clientv3 "go.etcd.io/etcd/client/v3" + "go.etcd.io/etcd/client/v3/concurrency" +) + +// EtcdBackend is a physical backend that stores data at specific +// prefix within etcd. It is used for most production situations as +// it allows Vault to run on multiple machines in a highly-available manner. +type EtcdBackend struct { + logger log.Logger + path string + haEnabled bool + lockTimeout time.Duration + requestTimeout time.Duration + + permitPool *physical.PermitPool + + etcd *clientv3.Client +} + +// Verify EtcdBackend satisfies the correct interfaces +var ( + _ physical.Backend = (*EtcdBackend)(nil) + _ physical.HABackend = (*EtcdBackend)(nil) + _ physical.Lock = (*EtcdLock)(nil) +) + +// newEtcd3Backend constructs a etcd3 backend. +func newEtcd3Backend(conf map[string]string, logger log.Logger) (physical.Backend, error) { + // Get the etcd path form the configuration. + path, ok := conf["path"] + if !ok { + path = "/vault" + } + + // Ensure path is prefixed. + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + + endpoints, err := getEtcdEndpoints(conf) + if err != nil { + return nil, err + } + + cfg := clientv3.Config{ + Endpoints: endpoints, + } + + haEnabled := os.Getenv("ETCD_HA_ENABLED") + if haEnabled == "" { + haEnabled = conf["ha_enabled"] + } + if haEnabled == "" { + haEnabled = "false" + } + haEnabledBool, err := strconv.ParseBool(haEnabled) + if err != nil { + return nil, fmt.Errorf("value [%v] of 'ha_enabled' could not be understood", haEnabled) + } + + cert, hasCert := conf["tls_cert_file"] + key, hasKey := conf["tls_key_file"] + ca, hasCa := conf["tls_ca_file"] + if (hasCert && hasKey) || hasCa { + tls := transport.TLSInfo{ + TrustedCAFile: ca, + CertFile: cert, + KeyFile: key, + } + + tlscfg, err := tls.ClientConfig() + if err != nil { + return nil, err + } + cfg.TLS = tlscfg + } + + // Set credentials. + username := os.Getenv("ETCD_USERNAME") + if username == "" { + username, _ = conf["username"] + } + + password := os.Getenv("ETCD_PASSWORD") + if password == "" { + password, _ = conf["password"] + } + + if username != "" && password != "" { + cfg.Username = username + cfg.Password = password + } + + if maxReceive, ok := conf["max_receive_size"]; ok { + // grpc converts this to uint32 internally, so parse as that to avoid passing invalid values + val, err := strconv.ParseUint(maxReceive, 10, 32) + if err != nil { + return nil, fmt.Errorf("value of 'max_receive_size' (%v) could not be understood: %w", maxReceive, err) + } + cfg.MaxCallRecvMsgSize = int(val) + } + + etcd, err := clientv3.New(cfg) + if err != nil { + return nil, err + } + + sReqTimeout := conf["request_timeout"] + if sReqTimeout == "" { + // etcd3 default request timeout is set to 5s. It should be long enough + // for most cases, even with internal retry. + sReqTimeout = "5s" + } + reqTimeout, err := parseutil.ParseDurationSecond(sReqTimeout) + if err != nil { + return nil, fmt.Errorf("value [%v] of 'request_timeout' could not be understood: %w", sReqTimeout, err) + } + + ssync, ok := conf["sync"] + if !ok { + ssync = "true" + } + sync, err := strconv.ParseBool(ssync) + if err != nil { + return nil, fmt.Errorf("value of 'sync' (%v) could not be understood: %w", ssync, err) + } + + if sync { + ctx, cancel := context.WithTimeout(context.Background(), reqTimeout) + err := etcd.Sync(ctx) + cancel() + if err != nil { + return nil, err + } + } + + sLock := conf["lock_timeout"] + if sLock == "" { + // etcd3 default lease duration is 60s. set to 15s for faster recovery. + sLock = "15s" + } + lock, err := parseutil.ParseDurationSecond(sLock) + if err != nil { + return nil, fmt.Errorf("value [%v] of 'lock_timeout' could not be understood: %w", sLock, err) + } + + return &EtcdBackend{ + path: path, + etcd: etcd, + permitPool: physical.NewPermitPool(physical.DefaultParallelOperations), + logger: logger, + haEnabled: haEnabledBool, + lockTimeout: lock, + requestTimeout: reqTimeout, + }, nil +} + +func (c *EtcdBackend) Put(ctx context.Context, entry *physical.Entry) error { + defer metrics.MeasureSince([]string{"etcd", "put"}, time.Now()) + + c.permitPool.Acquire() + defer c.permitPool.Release() + + ctx, cancel := context.WithTimeout(context.Background(), c.requestTimeout) + defer cancel() + _, err := c.etcd.Put(ctx, path.Join(c.path, entry.Key), string(entry.Value)) + return err +} + +func (c *EtcdBackend) Get(ctx context.Context, key string) (*physical.Entry, error) { + defer metrics.MeasureSince([]string{"etcd", "get"}, time.Now()) + + c.permitPool.Acquire() + defer c.permitPool.Release() + + ctx, cancel := context.WithTimeout(context.Background(), c.requestTimeout) + defer cancel() + resp, err := c.etcd.Get(ctx, path.Join(c.path, key)) + if err != nil { + return nil, err + } + + if len(resp.Kvs) == 0 { + return nil, nil + } + if len(resp.Kvs) > 1 { + return nil, errors.New("unexpected number of keys from a get request") + } + return &physical.Entry{ + Key: key, + Value: resp.Kvs[0].Value, + }, nil +} + +func (c *EtcdBackend) Delete(ctx context.Context, key string) error { + defer metrics.MeasureSince([]string{"etcd", "delete"}, time.Now()) + + c.permitPool.Acquire() + defer c.permitPool.Release() + + ctx, cancel := context.WithTimeout(context.Background(), c.requestTimeout) + defer cancel() + _, err := c.etcd.Delete(ctx, path.Join(c.path, key)) + if err != nil { + return err + } + return nil +} + +func (c *EtcdBackend) List(ctx context.Context, prefix string) ([]string, error) { + defer metrics.MeasureSince([]string{"etcd", "list"}, time.Now()) + + c.permitPool.Acquire() + defer c.permitPool.Release() + + ctx, cancel := context.WithTimeout(context.Background(), c.requestTimeout) + defer cancel() + prefix = path.Join(c.path, prefix) + "/" + resp, err := c.etcd.Get(ctx, prefix, clientv3.WithPrefix(), clientv3.WithKeysOnly()) + if err != nil { + return nil, err + } + + keys := []string{} + for _, kv := range resp.Kvs { + key := strings.TrimPrefix(string(kv.Key), prefix) + key = strings.TrimPrefix(key, "/") + + if len(key) == 0 { + continue + } + + if i := strings.Index(key, "/"); i == -1 { + keys = append(keys, key) + } else if i != -1 { + keys = strutil.AppendIfMissing(keys, key[:i+1]) + } + } + return keys, nil +} + +func (e *EtcdBackend) HAEnabled() bool { + return e.haEnabled +} + +// EtcdLock implements a lock using and etcd backend. +type EtcdLock struct { + lock sync.Mutex + held bool + timeout time.Duration + requestTimeout time.Duration + + etcdSession *concurrency.Session + etcdMu *concurrency.Mutex + + prefix string + value string + + etcd *clientv3.Client +} + +// Lock is used for mutual exclusion based on the given key. +func (c *EtcdBackend) LockWith(key, value string) (physical.Lock, error) { + p := path.Join(c.path, key) + return &EtcdLock{ + prefix: p, + value: value, + etcd: c.etcd, + timeout: c.lockTimeout, + requestTimeout: c.requestTimeout, + }, nil +} + +func (c *EtcdLock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { + c.lock.Lock() + defer c.lock.Unlock() + + if c.etcdMu == nil { + if err := c.initMu(); err != nil { + return nil, err + } + } + + if c.held { + return nil, EtcdLockHeldError + } + + select { + case _, ok := <-c.etcdSession.Done(): + if !ok { + // The session's done channel is closed, so the session is over, + // and we need a new lock with a new session. + if err := c.initMu(); err != nil { + return nil, err + } + } + default: + } + + ctx, cancel := context.WithCancel(context.Background()) + go func() { + <-stopCh + cancel() + }() + if err := c.etcdMu.Lock(ctx); err != nil { + if err == context.Canceled { + return nil, nil + } + return nil, err + } + + pctx, cancel := context.WithTimeout(context.Background(), c.requestTimeout) + defer cancel() + if _, err := c.etcd.Put(pctx, c.etcdMu.Key(), c.value, clientv3.WithLease(c.etcdSession.Lease())); err != nil { + return nil, err + } + + c.held = true + + return c.etcdSession.Done(), nil +} + +func (c *EtcdLock) Unlock() error { + c.lock.Lock() + defer c.lock.Unlock() + + if !c.held { + return EtcdLockNotHeldError + } + + ctx, cancel := context.WithTimeout(context.Background(), c.requestTimeout) + defer cancel() + return c.etcdMu.Unlock(ctx) +} + +func (c *EtcdLock) Value() (bool, string, error) { + ctx, cancel := context.WithTimeout(context.Background(), c.requestTimeout) + defer cancel() + + resp, err := c.etcd.Get(ctx, + c.prefix, clientv3.WithPrefix(), + clientv3.WithSort(clientv3.SortByCreateRevision, clientv3.SortAscend)) + if err != nil { + return false, "", err + } + if len(resp.Kvs) == 0 { + return false, "", nil + } + + return true, string(resp.Kvs[0].Value), nil +} + +func (c *EtcdLock) initMu() error { + session, err := concurrency.NewSession(c.etcd, concurrency.WithTTL(int(c.timeout.Seconds()))) + if err != nil { + return err + } + c.etcdSession = session + c.etcdMu = concurrency.NewMutex(session, c.prefix) + return nil +} diff --git a/physical/etcd/etcd3_test.go b/physical/etcd/etcd3_test.go new file mode 100644 index 0000000..9156fc1 --- /dev/null +++ b/physical/etcd/etcd3_test.go @@ -0,0 +1,46 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package etcd + +import ( + "fmt" + "testing" + "time" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/testhelpers/etcd" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/physical" +) + +func TestEtcd3Backend(t *testing.T) { + cleanup, config := etcd.PrepareTestContainer(t, "v3.5.0") + defer cleanup() + + logger := logging.NewVaultLogger(log.Debug) + configMap := map[string]string{ + "address": config.URL().String(), + "path": fmt.Sprintf("/vault-%d", time.Now().Unix()), + "etcd_api": "3", + "username": "root", + "password": "insecure", + + // Syncing advertised client urls should be disabled since docker port mapping confuses the client. + "sync": "false", + } + + b, err := NewEtcdBackend(configMap, logger) + if err != nil { + t.Fatalf("err: %s", err) + } + + b2, err := NewEtcdBackend(configMap, logger) + if err != nil { + t.Fatalf("err: %s", err) + } + + physical.ExerciseBackend(t, b) + physical.ExerciseBackend_ListPrefix(t, b) + physical.ExerciseHABackend(t, b.(physical.HABackend), b2.(physical.HABackend)) +} diff --git a/physical/foundationdb/README.md b/physical/foundationdb/README.md new file mode 100644 index 0000000..ee56e38 --- /dev/null +++ b/physical/foundationdb/README.md @@ -0,0 +1,47 @@ +# FoundationDB storage backend + +Extra steps are required to produce a Vault build containing the FoundationDB +backend; attempts to use the backend on a build produced without following +this procedure will fail with a descriptive error message at runtime. + +## Installing the Go bindings + +### Picking a version + +The version of the Go bindings and the FoundationDB client library used to +build them must match. + +This version will determine the minimum API version that can be used, hence +it should be no higher than the version of FoundationDB used in your cluster, +and must also satisfy the requirements of the backend code. + +The minimum required API version for the FoundationDB backend is 520. + +### Installation + +Make sure you have Mono installed (core is enough), then install the +Go bindings using the `fdb-go-install.sh` script: + +``` +$ physical/foundationdb/fdb-go-install.sh install --fdbver x.y.z +``` + +By default, if `--fdbver x.y.z` is not specified, version 5.2.4 will be used. + +## Building Vault + +To build Vault the FoundationDB backend, add FDB_ENABLED=1 when invoking +`make`, e.g. + +``` +$ make dev FDB_ENABLED=1 +``` + +## Running tests + +Similarly, add FDB_ENABLED=1 to your `make` invocation when running tests, +e.g. + +``` +$ make test TEST=./physical/foundationdb FDB_ENABLED=1 +``` diff --git a/physical/foundationdb/fdb-go-install.sh b/physical/foundationdb/fdb-go-install.sh new file mode 100755 index 0000000..4b2c125 --- /dev/null +++ b/physical/foundationdb/fdb-go-install.sh @@ -0,0 +1,333 @@ +#!/bin/bash -eu +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +# +# fdb-go-install.sh +# +# Installs the FoundationDB Go bindings for a client. This will download +# the repository from the remote repo either into the go directory +# with the appropriate semantic version. It will then build a few +# generated files that need to be present for the go build to work. +# At the end, it has some advice for flags to modify within your +# go environment so that other packages may successfully use this +# library. +# + +DESTDIR="${DESTDIR:-}" +FDBVER="${FDBVER:-5.2.4}" +REMOTE="${REMOTE:-github.com}" +FDBREPO="${FDBREPO:-apple/foundationdb}" + +status=0 + +platform=$(uname) +if [[ "${platform}" == "Darwin" ]] ; then + FDBLIBDIR="${FDBLIBDIR:-/usr/local/lib}" + libfdbc="libfdb_c.dylib" +elif [[ "${platform}" == "Linux" ]] ; then + libfdbc="libfdb_c.so" + custom_libdir="${FDBLIBDIR:-}" + FDBLIBDIR="" + + if [[ -z "${custom_libdir}" ]]; then + search_libdirs=( '/usr/lib' '/usr/lib64' ) + else + search_libdirs=( "${custom_libdir}" ) + fi + + for libdir in "${search_libdirs[@]}" ; do + if [[ -e "${libdir}/${libfdbc}" ]]; then + FDBLIBDIR="${libdir}" + break + fi + done + + if [[ -z "${FDBLIBDIR}" ]]; then + echo "The FoundationDB C library could not be found in any of:" + for libdir in "${search_libdirs[@]}" ; do + echo " ${libdir}" + done + echo "Your installation may be incomplete, or you need to set a custom FDBLIBDIR." + let status="${status} + 1" + fi + +else + echo "Unsupported platform ${platform}". + echo "At the moment, only macOS and Linux are supported by this script." + let status="${status} + 1" +fi + +filedir=$(cd `dirname "${BASH_SOURCE[0]}"` && pwd) +destdir="" + +function printUsage() { + echo "Usage: fdb-go-install.sh " + echo + echo "cmd: One of the commands to run. The options are:" + echo " install Download the FDB go bindings and install them" + echo " localinstall Install a into the go path a local copy of the repo" + echo " download Download but do not prepare the FoundationDB bindings" + echo " help Print this help message and then quit" + echo + echo "Command Line Options:" + echo " --fdbver FoundationDB semantic version (default is ${FDBVER})" + echo " -d/--dest-dir Local location for the repo (default is to place in go path)" + echo + echo "Environment Variable Options:" + echo " REMOTE Remote repository to download from (currently ${REMOTE})" + echo " FDBREPO Repository of FoundationDB library to download (currently ${FDBREPO})" + echo " FDBLIBDIR Directory within which should be the FoundationDB c library (currently ${FDBLIBDIR})" +} + +function parseArgs() { + local status=0 + + if [[ "${#}" -lt 0 ]] ; then + printUsage + let status="${status} + 1" + else + operation="${1}" + shift + if [[ "${operation}" != "install" ]] && [[ "${operation}" != "localinstall" ]] && [[ "${operation}" != "download" ]] && [[ "${operation}" != "help" ]] ; then + echo "Unknown command: ${operation}" + printUsage + let status="${status} + 1" + fi + fi + + while [[ "${#}" -gt 0 ]] && [[ "${status}" -eq 0 ]] ; do + local key="${1}" + case "${key}" in + --fdbver) + if [[ "${#}" -lt 2 ]] ; then + echo "No version specified with --fdbver flag" + printUsage + let status="${status} + 1" + else + FDBVER="${2}" + fi + shift + ;; + + -d|--dest-dir) + if [[ "${#}" -lt 2 ]] ; then + echo "No destination specified with ${key} flag" + printUsage + let status="${status} + 1" + else + destdir="${2}" + fi + shift + ;; + + *) + echo "Unrecognized argument ${key}" + printUsage + let status="${status} + 1" + esac + shift + done + + return "${status}" +} + +function checkBin() { + if [[ "${#}" -lt 1 ]] ; then + echo "Usage: checkBin " + return 1 + else + if [[ -n $(which "${1}") ]] ; then + return 0 + else + return 1 + fi + fi +} + +if [[ "${status}" -gt 0 ]] ; then + # We have already failed. + : +elif [[ "${#}" -lt 1 ]] ; then + printUsage +else + required_bins=( 'go' 'git' 'make' 'mono' ) + + missing_bins=() + for bin in "${required_bins[@]}" ; do + if ! checkBin "${bin}" ; then + missing_bins+=("${bin}") + let status="${status} + 1" + fi + done + + if [[ "${status}" -gt 0 ]] ; then + echo "Missing binaries: ${missing_bins[*]}" + elif ! parseArgs ${@} ; then + let status="${status} + 1" + elif [[ "${operation}" == "help" ]] ; then + printUsage + else + # Add go-specific environment variables. + eval $(go env) + + golibdir=$(dirname "${GOPATH}/src/${REMOTE}/${FDBREPO}") + if [[ -z "${destdir}" ]] ; then + if [[ "${operation}" == "localinstall" ]] ; then + # Assume its the local directory. + destdir=$(cd "${filedir}/../../.." && pwd) + else + destdir="${golibdir}" + fi + fi + + if [[ ! -d "${destdir}" ]] ; then + cmd=( 'mkdir' '-p' "${destdir}" ) + echo "${cmd[*]}" + if ! "${cmd[@]}" ; then + let status="${status} + 1" + echo "Could not create destination directory ${destdir}." + fi + fi + + # Step 1: Make sure repository is present. + + if [[ "${status}" -eq 0 ]] ; then + destdir=$( cd "${destdir}" && pwd ) # Get absolute path of destination dir. + fdbdir="${destdir}/foundationdb" + + if [[ ! -d "${destdir}" ]] ; then + cmd=("mkdir" "-p" "${destdir}") + echo "${cmd[*]}" + if ! "${cmd[@]}" ; then + echo "Could not create destination directory ${destdir}." + let status="${status} + 1" + fi + fi + fi + + if [[ "${operation}" == "localinstall" ]] ; then + # No download occurs in this case. + : + else + if [[ -d "${fdbdir}" ]] ; then + echo "Directory ${fdbdir} already exists ; checking out appropriate tag" + cmd1=( 'git' '-C' "${fdbdir}" 'fetch' 'origin' ) + cmd2=( 'git' '-C' "${fdbdir}" 'checkout' "${FDBVER}" ) + + if ! echo "${cmd1[*]}" || ! "${cmd1[@]}" ; then + let status="${status} + 1" + echo "Could not pull latest changes from origin" + elif ! echo "${cmd2[*]}" || ! "${cmd2[@]}" ; then + let status="${status} + 1" + echo "Could not checkout tag ${FDBVER}." + fi + else + echo "Downloading foundation repository into ${destdir}:" + cmd=( 'git' '-C' "${destdir}" 'clone' '--branch' "${FDBVER}" "https://${REMOTE}/${FDBREPO}.git" ) + + echo "${cmd[*]}" + if ! "${cmd[@]}" ; then + let status="${status} + 1" + echo "Could not download repository." + fi + fi + fi + + # Step 2: Build generated things. + + if [[ "${operation}" == "download" ]] ; then + # The generated files are not created under a strict download. + : + elif [[ "${status}" -eq 0 ]] ; then + echo "Building generated files." + # FoundationDB starting with 6.0 can figure that out on its own + if [ -e '/usr/bin/mcs' ]; then + MCS_BIN=/usr/bin/mcs + else + MCS_BIN=/usr/bin/dmcs + fi + cmd=( 'make' '-C' "${fdbdir}" 'bindings/c/foundationdb/fdb_c_options.g.h' "MCS=$MCS_BIN" ) + + echo "${cmd[*]}" + if ! "${cmd[@]}" ; then + let status="${status} + 1" + echo "Could not generate required c header" + else + infile="${fdbdir}/fdbclient/vexillographer/fdb.options" + outfile="${fdbdir}/bindings/go/src/fdb/generated.go" + cmd=( 'go' 'run' "${fdbdir}/bindings/go/src/_util/translate_fdb_options.go" ) + echo "${cmd[*]} < ${infile} > ${outfile}" + if ! "${cmd[@]}" < "${infile}" > "${outfile}" ; then + let status="${status} + 1" + echo "Could not generate generated go file." + fi + fi + fi + + # Step 3: Add to go path. + + if [[ "${operation}" == "download" ]] ; then + # The files are not moved under a strict download. + : + elif [[ "${status}" -eq 0 ]] ; then + linkpath="${GOPATH}/src/${REMOTE}/${FDBREPO}" + if [[ "${linkpath}" == "${fdbdir}" ]] ; then + # Downloaded directly into go path. Skip making the link. + : + elif [[ -e "${linkpath}" ]] ; then + echo "Warning: link path (${linkpath}) already exists. Leaving in place." + else + dirpath=$(dirname "${linkpath}") + if [[ ! -d "${dirpath}" ]] ; then + cmd=( 'mkdir' '-p' "${dirpath}" ) + echo "${cmd[*]}" + if ! "${cmd[@]}" ; then + let status="${status} + 1" + echo "Could not create directory for link." + fi + fi + + if [[ "${status}" -eq 0 ]] ; then + cmd=( 'ln' '-s' "${fdbdir}" "${linkpath}" ) + echo "${cmd[*]}" + if ! "${cmd[@]}" ; then + let status="${status} + 1" + echo "Could not create link within go path." + fi + fi + fi + fi + + # Step 4: Build the binaries. + + if [[ "${operation}" == "download" ]] ; then + # Do not install if only downloading + : + elif [[ "${status}" -eq 0 ]] ; then + cgo_cppflags="-I${linkpath}/bindings/c" + cgo_cflags="-g -O2" + cgo_ldflags="-L${FDBLIBDIR}" + fdb_go_path="${REMOTE}/${FDBREPO}/bindings/go/src" + + if ! CGO_CPPFLAGS="${cgo_cppflags}" CGO_CFLAGS="${cgo_cflags}" CGO_LDFLAGS="${cgo_ldflags}" go install "${fdb_go_path}/fdb" "${fdb_go_path}/fdb/tuple" "${fdb_go_path}/fdb/subspace" "${fdb_go_path}/fdb/directory" ; then + let status="${status} + 1" + echo "Could not build FoundationDB go libraries." + fi + fi + + # Step 5: Explain CGO flags. + + if [[ "${status}" -eq 0 && ("${operation}" == "localinstall" || "${operation}" == "install" ) ]] ; then + echo + echo "The FoundationDB go bindings were successfully installed." + echo "To build packages which use the go bindings, you will need to" + echo "set the following environment variables:" + echo " CGO_CPPFLAGS=\"${cgo_cppflags}\"" + echo " CGO_CFLAGS=\"${cgo_cflags}\"" + echo " CGO_LDFLAGS=\"${cgo_ldflags}\"" + fi + fi +fi + +exit "${status}" diff --git a/physical/foundationdb/foundationdb.go b/physical/foundationdb/foundationdb.go new file mode 100644 index 0000000..03f984f --- /dev/null +++ b/physical/foundationdb/foundationdb.go @@ -0,0 +1,888 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build foundationdb + +package foundationdb + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + "strconv" + "strings" + "sync" + "time" + + log "github.com/hashicorp/go-hclog" + uuid "github.com/hashicorp/go-uuid" + + "github.com/apple/foundationdb/bindings/go/src/fdb" + "github.com/apple/foundationdb/bindings/go/src/fdb/directory" + "github.com/apple/foundationdb/bindings/go/src/fdb/subspace" + "github.com/apple/foundationdb/bindings/go/src/fdb/tuple" + + metrics "github.com/armon/go-metrics" + "github.com/hashicorp/vault/sdk/physical" +) + +const ( + // The minimum acceptable API version + minAPIVersion = 520 + + // The namespace under our top directory containing keys only for list operations + metaKeysNamespace = "_meta-keys" + + // The namespace under our top directory containing the actual data + dataNamespace = "_data" + + // The namespace under our top directory containing locks + lockNamespace = "_lock" + + // Path hierarchy markers + // - an entry in a directory (included in list) + dirEntryMarker = "/\x01" + // - a path component (excluded from list) + dirPathMarker = "/\x02" +) + +var ( + // 64bit 1 and -1 for FDB atomic Add() + atomicArgOne = []byte{0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + atomicArgMinusOne = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} +) + +// Verify FDBBackend satisfies the correct interfaces +var ( + _ physical.Backend = (*FDBBackend)(nil) + _ physical.Transactional = (*FDBBackend)(nil) + _ physical.HABackend = (*FDBBackend)(nil) + _ physical.Lock = (*FDBBackendLock)(nil) +) + +// FDBBackend is a physical backend that stores data at a specific +// prefix within FoundationDB. +type FDBBackend struct { + logger log.Logger + haEnabled bool + db fdb.Database + metaKeysSpace subspace.Subspace + dataSpace subspace.Subspace + lockSpace subspace.Subspace + instanceUUID string +} + +func concat(a []byte, b ...byte) []byte { + r := make([]byte, len(a)+len(b)) + + copy(r, a) + copy(r[len(a):], b) + + return r +} + +func decoratePrefix(prefix string) ([]byte, error) { + pathElements := strings.Split(prefix, "/") + decoratedPrefix := strings.Join(pathElements[:len(pathElements)-1], dirPathMarker) + + return []byte(decoratedPrefix + dirEntryMarker), nil +} + +// Turn a path string into a decorated byte array to be used as (part of) a key +// foo /\x01foo +// foo/ /\x01foo/ +// foo/bar /\x02foo/\x01bar +// foo/bar/ /\x02foo/\x01bar/ +// foo/bar/baz /\x02foo/\x02bar/\x01baz +// foo/bar/baz/ /\x02foo/\x02bar/\x01baz/ +// foo/bar/baz/quux /\x02foo/\x02bar/\x02baz/\x01quux +// This allows for range queries to retrieve the "directory" listing. The +// decoratePrefix() function builds the path leading up to the leaf. +func decoratePath(path string) ([]byte, error) { + if path == "" { + return nil, fmt.Errorf("Invalid empty path") + } + + path = "/" + path + + isDir := strings.HasSuffix(path, "/") + path = strings.TrimRight(path, "/") + + lastSlash := strings.LastIndexByte(path, '/') + decoratedPrefix, err := decoratePrefix(path[:lastSlash+1]) + if err != nil { + return nil, err + } + + leaf := path[lastSlash+1:] + if isDir { + leaf += "/" + } + + return concat(decoratedPrefix, []byte(leaf)...), nil +} + +// Turn a decorated byte array back into a path string +func undecoratePath(decoratedPath []byte) string { + ret := strings.ReplaceAll(string(decoratedPath), dirPathMarker, "/") + ret = strings.ReplaceAll(ret, dirEntryMarker, "/") + + return strings.TrimLeft(ret, "/") +} + +// NewFDBBackend constructs a FoundationDB backend storing keys in the +// top-level directory designated by path +func NewFDBBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { + // Get the top-level directory name + path, ok := conf["path"] + if !ok { + path = "vault" + } + logger.Debug("config path set", "path", path) + + dirPath := strings.Split(strings.Trim(path, "/"), "/") + + // TLS support + tlsCertFile, hasCertFile := conf["tls_cert_file"] + tlsKeyFile, hasKeyFile := conf["tls_key_file"] + tlsCAFile, hasCAFile := conf["tls_ca_file"] + + tlsEnabled := hasCertFile && hasKeyFile && hasCAFile + + if (hasCertFile || hasKeyFile || hasCAFile) && !tlsEnabled { + return nil, fmt.Errorf("FoundationDB TLS requires all 3 of tls_cert_file, tls_key_file, and tls_ca_file") + } + + tlsVerifyPeers, ok := conf["tls_verify_peers"] + if !ok && tlsEnabled { + return nil, fmt.Errorf("Required option tls_verify_peers not set in configuration") + } + + // FoundationDB API version + fdbApiVersionStr, ok := conf["api_version"] + if !ok { + return nil, fmt.Errorf("FoundationDB API version not specified") + } + + fdbApiVersionInt, err := strconv.Atoi(fdbApiVersionStr) + if err != nil { + return nil, fmt.Errorf("failed to parse fdb_api_version parameter: %w", err) + } + + // Check requested FDB API version against minimum required API version + if fdbApiVersionInt < minAPIVersion { + return nil, fmt.Errorf("Configured FoundationDB API version lower than minimum required version: %d < %d", fdbApiVersionInt, minAPIVersion) + } + + logger.Debug("FoundationDB API version set", "fdb_api_version", fdbApiVersionInt) + + // FoundationDB cluster file + fdbClusterFile, ok := conf["cluster_file"] + if !ok { + return nil, fmt.Errorf("FoundationDB cluster file not specified") + } + + haEnabled := false + haEnabledStr, ok := conf["ha_enabled"] + if ok { + haEnabled, err = strconv.ParseBool(haEnabledStr) + if err != nil { + return nil, fmt.Errorf("failed to parse ha_enabled parameter: %w", err) + } + } + + instanceUUID, err := uuid.GenerateUUID() + if err != nil { + return nil, fmt.Errorf("could not generate instance UUID: %w", err) + } + logger.Debug("Instance UUID", "uuid", instanceUUID) + + if err := fdb.APIVersion(fdbApiVersionInt); err != nil { + return nil, fmt.Errorf("failed to set FDB API version: %w", err) + } + + if tlsEnabled { + opts := fdb.Options() + + tlsPassword, ok := conf["tls_password"] + if ok { + err := opts.SetTLSPassword(tlsPassword) + if err != nil { + return nil, fmt.Errorf("failed to set TLS password: %w", err) + } + } + + err := opts.SetTLSCaPath(tlsCAFile) + if err != nil { + return nil, fmt.Errorf("failed to set TLS CA bundle path: %w", err) + } + + err = opts.SetTLSCertPath(tlsCertFile) + if err != nil { + return nil, fmt.Errorf("failed to set TLS certificate path: %w", err) + } + + err = opts.SetTLSKeyPath(tlsKeyFile) + if err != nil { + return nil, fmt.Errorf("failed to set TLS key path: %w", err) + } + + err = opts.SetTLSVerifyPeers([]byte(tlsVerifyPeers)) + if err != nil { + return nil, fmt.Errorf("failed to set TLS peer verification criteria: %w", err) + } + } + + db, err := fdb.Open(fdbClusterFile, []byte("DB")) + if err != nil { + return nil, fmt.Errorf("failed to open database with cluster file %q: %w", fdbClusterFile, err) + } + + topDir, err := directory.CreateOrOpen(db, dirPath, nil) + if err != nil { + return nil, fmt.Errorf("failed to create/open top-level directory %q: %w", path, err) + } + + // Setup the backend + f := &FDBBackend{ + logger: logger, + haEnabled: haEnabled, + db: db, + metaKeysSpace: topDir.Sub(metaKeysNamespace), + dataSpace: topDir.Sub(dataNamespace), + lockSpace: topDir.Sub(lockNamespace), + instanceUUID: instanceUUID, + } + return f, nil +} + +// Increase refcount on directories in the path, from the bottom -> up +func (f *FDBBackend) incDirsRefcount(tr fdb.Transaction, path string) error { + pathElements := strings.Split(strings.TrimRight(path, "/"), "/") + + for i := len(pathElements) - 1; i != 0; i-- { + dPath, err := decoratePath(strings.Join(pathElements[:i], "/") + "/") + if err != nil { + return fmt.Errorf("error incrementing directories refcount: %w", err) + } + + // Atomic +1 + tr.Add(fdb.Key(concat(f.metaKeysSpace.Bytes(), dPath...)), atomicArgOne) + tr.Add(fdb.Key(concat(f.dataSpace.Bytes(), dPath...)), atomicArgOne) + } + + return nil +} + +type DirsDecTodo struct { + fkey fdb.Key + future fdb.FutureByteSlice +} + +// Decrease refcount on directories in the path, from the bottom -> up, and remove empty ones +func (f *FDBBackend) decDirsRefcount(tr fdb.Transaction, path string) error { + pathElements := strings.Split(strings.TrimRight(path, "/"), "/") + + dirsTodo := make([]DirsDecTodo, 0, len(pathElements)*2) + + for i := len(pathElements) - 1; i != 0; i-- { + dPath, err := decoratePath(strings.Join(pathElements[:i], "/") + "/") + if err != nil { + return fmt.Errorf("error decrementing directories refcount: %w", err) + } + + metaFKey := fdb.Key(concat(f.metaKeysSpace.Bytes(), dPath...)) + dirsTodo = append(dirsTodo, DirsDecTodo{ + fkey: metaFKey, + future: tr.Get(metaFKey), + }) + + dataFKey := fdb.Key(concat(f.dataSpace.Bytes(), dPath...)) + dirsTodo = append(dirsTodo, DirsDecTodo{ + fkey: dataFKey, + future: tr.Get(dataFKey), + }) + } + + for _, todo := range dirsTodo { + value, err := todo.future.Get() + if err != nil { + return fmt.Errorf("error getting directory refcount while decrementing: %w", err) + } + + // The directory entry does not exist; this is not expected + if value == nil { + return fmt.Errorf("non-existent directory while decrementing directory refcount") + } + + var count int64 + err = binary.Read(bytes.NewReader(value), binary.LittleEndian, &count) + if err != nil { + return fmt.Errorf("error reading directory refcount while decrementing: %w", err) + } + + if count > 1 { + // Atomic -1 + tr.Add(todo.fkey, atomicArgMinusOne) + } else { + // Directory is empty, remove it + tr.Clear(todo.fkey) + } + } + + return nil +} + +func (f *FDBBackend) internalPut(tr fdb.Transaction, decoratedPath []byte, path string, value []byte) error { + // Check that the meta key exists before blindly increasing the refcounts + // in the directory hierarchy; this protects against commit_unknown_result + // and other similar cases where a previous transaction may have gone + // through without us knowing for sure. + + metaFKey := fdb.Key(concat(f.metaKeysSpace.Bytes(), decoratedPath...)) + metaFuture := tr.Get(metaFKey) + + dataFKey := fdb.Key(concat(f.dataSpace.Bytes(), decoratedPath...)) + tr.Set(dataFKey, value) + + value, err := metaFuture.Get() + if err != nil { + return fmt.Errorf("Put error while getting meta key: %w", err) + } + + if value == nil { + tr.Set(metaFKey, []byte{}) + return f.incDirsRefcount(tr, path) + } + + return nil +} + +func (f *FDBBackend) internalClear(tr fdb.Transaction, decoratedPath []byte, path string) error { + // Same as above - check existence of the meta key before taking any + // action, to protect against a possible previous commit_unknown_result + // error. + + metaFKey := fdb.Key(concat(f.metaKeysSpace.Bytes(), decoratedPath...)) + + value, err := tr.Get(metaFKey).Get() + if err != nil { + return fmt.Errorf("Delete error while getting meta key: %w", err) + } + + if value != nil { + dataFKey := fdb.Key(concat(f.dataSpace.Bytes(), decoratedPath...)) + tr.Clear(dataFKey) + tr.Clear(metaFKey) + return f.decDirsRefcount(tr, path) + } + + return nil +} + +type TxnTodo struct { + decoratedPath []byte + op *physical.TxnEntry +} + +// Used to run multiple entries via a transaction +func (f *FDBBackend) Transaction(ctx context.Context, txns []*physical.TxnEntry) error { + if len(txns) == 0 { + return nil + } + + todo := make([]*TxnTodo, len(txns)) + + for i, op := range txns { + if op.Operation != physical.DeleteOperation && op.Operation != physical.PutOperation { + return fmt.Errorf("%q is not a supported transaction operation", op.Operation) + } + + decoratedPath, err := decoratePath(op.Entry.Key) + if err != nil { + return fmt.Errorf("could not build decorated path for transaction item %s: %w", op.Entry.Key, err) + } + + todo[i] = &TxnTodo{ + decoratedPath: decoratedPath, + op: op, + } + } + + _, err := f.db.Transact(func(tr fdb.Transaction) (interface{}, error) { + for _, txnTodo := range todo { + var err error + switch txnTodo.op.Operation { + case physical.DeleteOperation: + err = f.internalClear(tr, txnTodo.decoratedPath, txnTodo.op.Entry.Key) + case physical.PutOperation: + err = f.internalPut(tr, txnTodo.decoratedPath, txnTodo.op.Entry.Key, txnTodo.op.Entry.Value) + } + + if err != nil { + return nil, fmt.Errorf("operation %s failed for transaction item %s: %w", txnTodo.op.Operation, txnTodo.op.Entry.Key, err) + } + } + + return nil, nil + }) + if err != nil { + return fmt.Errorf("transaction failed: %w", err) + } + + return nil +} + +// Put is used to insert or update an entry +func (f *FDBBackend) Put(ctx context.Context, entry *physical.Entry) error { + defer metrics.MeasureSince([]string{"foundationdb", "put"}, time.Now()) + + decoratedPath, err := decoratePath(entry.Key) + if err != nil { + return fmt.Errorf("could not build decorated path to put item %s: %w", entry.Key, err) + } + + _, err = f.db.Transact(func(tr fdb.Transaction) (interface{}, error) { + err := f.internalPut(tr, decoratedPath, entry.Key, entry.Value) + if err != nil { + return nil, err + } + + return nil, nil + }) + + if err != nil { + return fmt.Errorf("put failed for item %s: %w", entry.Key, err) + } + + return nil +} + +// Get is used to fetch an entry +// Return nil for non-existent keys +func (f *FDBBackend) Get(ctx context.Context, key string) (*physical.Entry, error) { + defer metrics.MeasureSince([]string{"foundationdb", "get"}, time.Now()) + + decoratedPath, err := decoratePath(key) + if err != nil { + return nil, fmt.Errorf("could not build decorated path to get item %s: %w", key, err) + } + + fkey := fdb.Key(concat(f.dataSpace.Bytes(), decoratedPath...)) + + value, err := f.db.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) { + value, err := rtr.Get(fkey).Get() + if err != nil { + return nil, err + } + + return value, nil + }) + if err != nil { + return nil, fmt.Errorf("get failed for item %s: %w", key, err) + } + if value.([]byte) == nil { + return nil, nil + } + + return &physical.Entry{ + Key: key, + Value: value.([]byte), + }, nil +} + +// Delete is used to permanently delete an entry +func (f *FDBBackend) Delete(ctx context.Context, key string) error { + defer metrics.MeasureSince([]string{"foundationdb", "delete"}, time.Now()) + + decoratedPath, err := decoratePath(key) + if err != nil { + return fmt.Errorf("could not build decorated path to delete item %s: %w", key, err) + } + + _, err = f.db.Transact(func(tr fdb.Transaction) (interface{}, error) { + err := f.internalClear(tr, decoratedPath, key) + if err != nil { + return nil, err + } + + return nil, nil + }) + + if err != nil { + return fmt.Errorf("delete failed for item %s: %w", key, err) + } + + return nil +} + +// List is used to list all the keys under a given +// prefix, up to the next prefix. +// Return empty string slice for non-existent directories +func (f *FDBBackend) List(ctx context.Context, prefix string) ([]string, error) { + defer metrics.MeasureSince([]string{"foundationdb", "list"}, time.Now()) + + prefix = strings.TrimRight("/"+prefix, "/") + "/" + + decoratedPrefix, err := decoratePrefix(prefix) + if err != nil { + return nil, fmt.Errorf("could not build decorated path to list prefix %s: %w", prefix, err) + } + + // The beginning of the range is /\x02foo/\x02bar/\x01 (the decorated prefix) to list foo/bar/ + rangeBegin := fdb.Key(concat(f.metaKeysSpace.Bytes(), decoratedPrefix...)) + rangeEnd := fdb.Key(concat(rangeBegin, 0xff)) + pathRange := fdb.KeyRange{rangeBegin, rangeEnd} + keyPrefixLen := len(rangeBegin) + + content, err := f.db.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) { + dirList := make([]string, 0, 0) + + ri := rtr.GetRange(pathRange, fdb.RangeOptions{Mode: fdb.StreamingModeWantAll}).Iterator() + + for ri.Advance() { + kv := ri.MustGet() + + // Strip length of the rangeBegin key off the FDB key, yielding + // the part of the key we're interested in, which does not need + // to be undecorated, by construction. + dirList = append(dirList, string(kv.Key[keyPrefixLen:])) + } + + return dirList, nil + }) + if err != nil { + return nil, fmt.Errorf("could not list prefix %s: %w", prefix, err) + } + + return content.([]string), nil +} + +type FDBBackendLock struct { + f *FDBBackend + key string + value string + fkey fdb.Key + lock sync.Mutex +} + +// LockWith is used for mutual exclusion based on the given key. +func (f *FDBBackend) LockWith(key, value string) (physical.Lock, error) { + return &FDBBackendLock{ + f: f, + key: key, + value: value, + fkey: f.lockSpace.Pack(tuple.Tuple{key}), + }, nil +} + +func (f *FDBBackend) HAEnabled() bool { + return f.haEnabled +} + +const ( + // Position of elements in the lock content tuple + lockContentValueIdx = 0 + lockContentOwnerIdx = 1 + lockContentExpiresIdx = 2 + + // Number of elements in the lock content tuple + lockTupleContentElts = 3 + + lockTTL = 15 * time.Second + lockRenewInterval = 5 * time.Second + lockAcquireRetryInterval = 5 * time.Second +) + +type FDBBackendLockContent struct { + value string + ownerUUID string + expires time.Time +} + +func packLock(content *FDBBackendLockContent) []byte { + t := tuple.Tuple{content.value, content.ownerUUID, content.expires.UnixNano()} + + return t.Pack() +} + +func unpackLock(tupleContent []byte) (*FDBBackendLockContent, error) { + t, err := tuple.Unpack(tupleContent) + if err != nil { + return nil, err + } + + if len(t) != lockTupleContentElts { + return nil, fmt.Errorf("unexpected lock content, len %d != %d", len(t), lockTupleContentElts) + } + + return &FDBBackendLockContent{ + value: t[lockContentValueIdx].(string), + ownerUUID: t[lockContentOwnerIdx].(string), + expires: time.Unix(0, t[lockContentExpiresIdx].(int64)), + }, nil +} + +func (fl *FDBBackendLock) getLockContent(tr fdb.Transaction) (*FDBBackendLockContent, error) { + tupleContent, err := tr.Get(fl.fkey).Get() + if err != nil { + return nil, err + } + + // Lock doesn't exist + if tupleContent == nil { + return nil, fmt.Errorf("non-existent lock %s", fl.key) + } + + content, err := unpackLock(tupleContent) + if err != nil { + return nil, fmt.Errorf("failed to unpack lock %s: %w", fl.key, err) + } + + return content, nil +} + +func (fl *FDBBackendLock) setLockContent(tr fdb.Transaction, content *FDBBackendLockContent) { + tr.Set(fl.fkey, packLock(content)) +} + +func (fl *FDBBackendLock) isOwned(content *FDBBackendLockContent) bool { + return content.ownerUUID == fl.f.instanceUUID +} + +func (fl *FDBBackendLock) isExpired(content *FDBBackendLockContent) bool { + return time.Now().After(content.expires) +} + +func (fl *FDBBackendLock) acquireTryLock(acquired chan struct{}, errors chan error) (bool, error) { + wonTheRace, err := fl.f.db.Transact(func(tr fdb.Transaction) (interface{}, error) { + tupleContent, err := tr.Get(fl.fkey).Get() + if err != nil { + return nil, fmt.Errorf("could not read lock: %w", err) + } + + // Lock exists + if tupleContent != nil { + content, err := unpackLock(tupleContent) + if err != nil { + return nil, fmt.Errorf("failed to unpack lock %s: %w", fl.key, err) + } + + if fl.isOwned(content) { + return nil, fmt.Errorf("lock %s already held", fl.key) + } + + // The lock already exists, is not owned by us, and is not expired + if !fl.isExpired(content) { + return false, nil + } + } + + // Lock doesn't exist, or exists but is expired, we can go ahead + content := &FDBBackendLockContent{ + value: fl.value, + ownerUUID: fl.f.instanceUUID, + expires: time.Now().Add(lockTTL), + } + + fl.setLockContent(tr, content) + + return true, nil + }) + if err != nil { + errors <- err + return false, err + } + + if wonTheRace.(bool) { + close(acquired) + } + + return wonTheRace.(bool), nil +} + +func (fl *FDBBackendLock) acquireLock(abandon chan struct{}, acquired chan struct{}, errors chan error) { + ticker := time.NewTicker(lockAcquireRetryInterval) + defer ticker.Stop() + + lockAcquired, err := fl.acquireTryLock(acquired, errors) + if lockAcquired || err != nil { + return + } + + for { + select { + case <-abandon: + return + case <-ticker.C: + lockAcquired, err := fl.acquireTryLock(acquired, errors) + if lockAcquired || err != nil { + return + } + } + } +} + +func (fl *FDBBackendLock) maintainLock(lost <-chan struct{}) { + ticker := time.NewTicker(lockRenewInterval) + for { + select { + case <-ticker.C: + _, err := fl.f.db.Transact(func(tr fdb.Transaction) (interface{}, error) { + content, err := fl.getLockContent(tr) + if err != nil { + return nil, err + } + + // We don't own the lock + if !fl.isOwned(content) { + return nil, fmt.Errorf("lost lock %s", fl.key) + } + + // The lock is expired + if fl.isExpired(content) { + return nil, fmt.Errorf("lock %s expired", fl.key) + } + + content.expires = time.Now().Add(lockTTL) + + fl.setLockContent(tr, content) + + return nil, nil + }) + if err != nil { + fl.f.logger.Error("lock maintain", "error", err) + } + + // Failure to renew the lock will cause another node to take over + // and the watch to fire. DB errors will also be caught by the watch. + case <-lost: + ticker.Stop() + return + } + } +} + +func (fl *FDBBackendLock) watchLock(lost chan struct{}) { + for { + watch, err := fl.f.db.Transact(func(tr fdb.Transaction) (interface{}, error) { + content, err := fl.getLockContent(tr) + if err != nil { + return nil, err + } + + // We don't own the lock + if !fl.isOwned(content) { + return nil, fmt.Errorf("lost lock %s", fl.key) + } + + // The lock is expired + if fl.isExpired(content) { + return nil, fmt.Errorf("lock %s expired", fl.key) + } + + // Set FDB watch on the lock + future := tr.Watch(fl.fkey) + + return future, nil + }) + if err != nil { + fl.f.logger.Error("lock watch", "error", err) + break + } + + // Wait for the watch to fire, and go again + watch.(fdb.FutureNil).Get() + } + + close(lost) +} + +func (fl *FDBBackendLock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { + fl.lock.Lock() + defer fl.lock.Unlock() + + var ( + // Inform the lock owner that we lost the lock + lost = make(chan struct{}) + + // Tell our watch and renewal routines the lock has been abandoned + abandon = make(chan struct{}) + + // Feedback from lock acquisition routine + acquired = make(chan struct{}) + errors = make(chan error) + ) + + // try to acquire the lock asynchronously + go fl.acquireLock(abandon, acquired, errors) + + select { + case <-acquired: + // Maintain the lock after initial acquisition + go fl.maintainLock(lost) + // Watch the lock for changes + go fl.watchLock(lost) + case err := <-errors: + // Initial acquisition failed + close(abandon) + return nil, err + case <-stopCh: + // Prospective lock owner cancelling lock acquisition + close(abandon) + return nil, nil + } + + return lost, nil +} + +func (fl *FDBBackendLock) Unlock() error { + fl.lock.Lock() + defer fl.lock.Unlock() + + _, err := fl.f.db.Transact(func(tr fdb.Transaction) (interface{}, error) { + content, err := fl.getLockContent(tr) + if err != nil { + return nil, fmt.Errorf("could not get lock content: %w", err) + } + + // We don't own the lock + if !fl.isOwned(content) { + return nil, nil + } + + tr.Clear(fl.fkey) + + return nil, nil + }) + if err != nil { + return fmt.Errorf("unlock failed: %w", err) + } + + return nil +} + +func (fl *FDBBackendLock) Value() (bool, string, error) { + tupleContent, err := fl.f.db.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) { + tupleContent, err := rtr.Get(fl.fkey).Get() + if err != nil { + return nil, fmt.Errorf("could not read lock: %w", err) + } + + return tupleContent, nil + }) + if err != nil { + return false, "", fmt.Errorf("get lock value failed for lock %s: %w", fl.key, err) + } + if tupleContent.([]byte) == nil { + return false, "", nil + } + + content, err := unpackLock(tupleContent.([]byte)) + if err != nil { + return false, "", fmt.Errorf("get lock value failed to unpack lock %s: %w", fl.key, err) + } + + return true, content.value, nil +} diff --git a/physical/foundationdb/foundationdb_test.go b/physical/foundationdb/foundationdb_test.go new file mode 100644 index 0000000..ecd6aa8 --- /dev/null +++ b/physical/foundationdb/foundationdb_test.go @@ -0,0 +1,199 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build foundationdb + +package foundationdb + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "testing" + "time" + + log "github.com/hashicorp/go-hclog" + uuid "github.com/hashicorp/go-uuid" + + "github.com/apple/foundationdb/bindings/go/src/fdb" + "github.com/apple/foundationdb/bindings/go/src/fdb/directory" + + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/physical" + + dockertest "gopkg.in/ory-am/dockertest.v3" +) + +func connectToFoundationDB(clusterFile string) (*fdb.Database, error) { + if err := fdb.APIVersion(520); err != nil { + return nil, fmt.Errorf("failed to set FDB API version: %w", err) + } + + db, err := fdb.Open(clusterFile, []byte("DB")) + if err != nil { + return nil, fmt.Errorf("failed to open database: %w", err) + } + + return &db, nil +} + +func cleanupTopDir(clusterFile, topDir string) error { + db, err := connectToFoundationDB(clusterFile) + if err != nil { + return fmt.Errorf("could not connect to FDB for cleanup: %w", err) + } + + if _, err := directory.Root().Remove(db, []string{topDir}); err != nil { + return fmt.Errorf("could not remove directory: %w", err) + } + + return nil +} + +func TestFoundationDBPathDecoration(t *testing.T) { + cases := map[string][]byte{ + "foo": []byte("/\x01foo"), + "foo/": []byte("/\x01foo/"), + "foo/bar": []byte("/\x02foo/\x01bar"), + "foo/bar/": []byte("/\x02foo/\x01bar/"), + "foo/bar/baz": []byte("/\x02foo/\x02bar/\x01baz"), + "foo/bar/baz/": []byte("/\x02foo/\x02bar/\x01baz/"), + "foo/bar/baz/quux": []byte("/\x02foo/\x02bar/\x02baz/\x01quux"), + } + + for path, expected := range cases { + decorated, err := decoratePath(path) + if err != nil { + t.Fatalf("path %s error: %s", path, err) + } + + if !bytes.Equal(expected, decorated) { + t.Fatalf("path %s expected %v got %v", path, expected, decorated) + } + + undecorated := undecoratePath(decorated) + if undecorated != path { + t.Fatalf("expected %s got %s", path, undecorated) + } + } +} + +func TestFoundationDBBackend(t *testing.T) { + if testing.Short() { + t.Skipf("skipping in short mode") + } + + testUUID, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("foundationdb: could not generate UUID to top-level directory: %s", err) + } + + topDir := fmt.Sprintf("vault-test-%s", testUUID) + + var clusterFile string + clusterFile = os.Getenv("FOUNDATIONDB_CLUSTER_FILE") + if clusterFile == "" { + var cleanup func() + cleanup, clusterFile = prepareFoundationDBTestDirectory(t, topDir) + defer cleanup() + } + + // Remove the test data once done + defer func() { + if err := cleanupTopDir(clusterFile, topDir); err != nil { + t.Fatalf("foundationdb: could not cleanup test data at end of test: %s", err) + } + }() + + // Remove any leftover test data before starting + if err := cleanupTopDir(clusterFile, topDir); err != nil { + t.Fatalf("foundationdb: could not cleanup test data before starting test: %s", err) + } + + // Run vault tests + logger := logging.NewVaultLogger(log.Debug) + config := map[string]string{ + "path": topDir, + "api_version": "520", + "cluster_file": clusterFile, + } + + b, err := NewFDBBackend(config, logger) + if err != nil { + t.Fatalf("foundationdb: failed to create new backend: %s", err) + } + + b2, err := NewFDBBackend(config, logger) + if err != nil { + t.Fatalf("foundationdb: failed to create new backend: %s", err) + } + + physical.ExerciseBackend(t, b) + physical.ExerciseBackend_ListPrefix(t, b) + physical.ExerciseTransactionalBackend(t, b) + physical.ExerciseHABackend(t, b.(physical.HABackend), b2.(physical.HABackend)) +} + +func prepareFoundationDBTestDirectory(t *testing.T, topDir string) (func(), string) { + pool, err := dockertest.NewPool("") + if err != nil { + t.Fatalf("foundationdb: failed to connect to docker: %s", err) + } + + resource, err := pool.Run("foundationdb", "5.1.7", nil) + if err != nil { + t.Fatalf("foundationdb: could not start container: %s", err) + } + + tmpFile, err := ioutil.TempFile("", topDir) + if err != nil { + t.Fatalf("foundationdb: could not create temporary file for cluster file: %s", err) + } + + clusterFile := tmpFile.Name() + + cleanup := func() { + var err error + for i := 0; i < 10; i++ { + err = pool.Purge(resource) + if err == nil { + break + } + time.Sleep(1 * time.Second) + } + os.Remove(clusterFile) + if err != nil { + t.Fatalf("Failed to cleanup local container: %s", err) + } + } + + setup := func() error { + connectString := fmt.Sprintf("foundationdb:foundationdb@127.0.0.1:%s", resource.GetPort("4500/tcp")) + + if err := tmpFile.Truncate(0); err != nil { + return fmt.Errorf("could not truncate cluster file: %w", err) + } + + _, err := tmpFile.WriteAt([]byte(connectString), 0) + if err != nil { + return fmt.Errorf("could not write cluster file: %w", err) + } + + if _, err := connectToFoundationDB(clusterFile); err != nil { + return fmt.Errorf("could not connect to FoundationDB after starting container: %s", err) + } + + return nil + } + + if pool.Retry(setup); err != nil { + cleanup() + + t.Fatalf("foundationdb: could not setup container: %s", err) + } + + tmpFile.Close() + + return cleanup, clusterFile +} diff --git a/physical/foundationdb/foundationdbstub.go b/physical/foundationdb/foundationdbstub.go new file mode 100644 index 0000000..283ca09 --- /dev/null +++ b/physical/foundationdb/foundationdbstub.go @@ -0,0 +1,18 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !foundationdb + +package foundationdb + +import ( + "fmt" + + log "github.com/hashicorp/go-hclog" + + "github.com/hashicorp/vault/sdk/physical" +) + +func NewFDBBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { + return nil, fmt.Errorf("FoundationDB backend not available in this Vault build") +} diff --git a/physical/gcs/gcs.go b/physical/gcs/gcs.go new file mode 100644 index 0000000..4a3f5bd --- /dev/null +++ b/physical/gcs/gcs.go @@ -0,0 +1,312 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package gcs + +import ( + "context" + "crypto/md5" + "errors" + "fmt" + "io/ioutil" + "os" + "sort" + "strconv" + "strings" + "time" + + "cloud.google.com/go/storage" + metrics "github.com/armon/go-metrics" + log "github.com/hashicorp/go-hclog" + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/vault/helper/useragent" + "github.com/hashicorp/vault/sdk/physical" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +// Verify Backend satisfies the correct interfaces +var _ physical.Backend = (*Backend)(nil) + +const ( + // envBucket is the name of the environment variable to search for the + // storage bucket name. + envBucket = "GOOGLE_STORAGE_BUCKET" + + // envChunkSize is the environment variable to serach for the chunk size for + // requests. + envChunkSize = "GOOGLE_STORAGE_CHUNK_SIZE" + + // envHAEnabled is the name of the environment variable to search for the + // boolean indicating if HA is enabled. + envHAEnabled = "GOOGLE_STORAGE_HA_ENABLED" + + // defaultChunkSize is the number of bytes the writer will attempt to write in + // a single request. + defaultChunkSize = "8192" + + // objectDelimiter is the string to use to delimit objects. + objectDelimiter = "/" +) + +var ( + // metricDelete is the key for the metric for measuring a Delete call. + metricDelete = []string{"gcs", "delete"} + + // metricGet is the key for the metric for measuring a Get call. + metricGet = []string{"gcs", "get"} + + // metricList is the key for the metric for measuring a List call. + metricList = []string{"gcs", "list"} + + // metricPut is the key for the metric for measuring a Put call. + metricPut = []string{"gcs", "put"} +) + +// Backend implements physical.Backend and describes the steps necessary to +// persist data in Google Cloud Storage. +type Backend struct { + // bucket is the name of the bucket to use for data storage and retrieval. + bucket string + + // chunkSize is the chunk size to use for requests. + chunkSize int + + // client is the API client and permitPool is the allowed concurrent uses of + // the client. + client *storage.Client + permitPool *physical.PermitPool + + // haEnabled indicates if HA is enabled. + haEnabled bool + + // haClient is the API client. This is managed separately from the main client + // because a flood of requests should not block refreshing the TTLs on the + // lock. + // + // This value will be nil if haEnabled is false. + haClient *storage.Client + + // logger is an internal logger. + logger log.Logger +} + +// NewBackend constructs a Google Cloud Storage backend with the given +// configuration. This uses the official Golang Cloud SDK and therefore supports +// specifying credentials via envvars, credential files, etc. from environment +// variables or a service account file +func NewBackend(c map[string]string, logger log.Logger) (physical.Backend, error) { + logger.Debug("configuring backend") + + // Bucket name + bucket := os.Getenv(envBucket) + if bucket == "" { + bucket = c["bucket"] + } + if bucket == "" { + return nil, errors.New("missing bucket name") + } + + // Chunk size + chunkSizeStr := os.Getenv(envChunkSize) + if chunkSizeStr == "" { + chunkSizeStr = c["chunk_size"] + } + if chunkSizeStr == "" { + chunkSizeStr = defaultChunkSize + } + chunkSize, err := strconv.Atoi(chunkSizeStr) + if err != nil { + return nil, fmt.Errorf("failed to parse chunk_size: %w", err) + } + + // Values are specified as kb, but the API expects them as bytes. + chunkSize = chunkSize * 1024 + + // HA configuration + haClient := (*storage.Client)(nil) + haEnabled := false + haEnabledStr := os.Getenv(envHAEnabled) + if haEnabledStr == "" { + haEnabledStr = c["ha_enabled"] + } + if haEnabledStr != "" { + var err error + haEnabled, err = strconv.ParseBool(haEnabledStr) + if err != nil { + return nil, fmt.Errorf("failed to parse HA enabled: %w", err) + } + } + if haEnabled { + logger.Debug("creating client") + var err error + ctx := context.Background() + haClient, err = storage.NewClient(ctx, option.WithUserAgent(useragent.String())) + if err != nil { + return nil, fmt.Errorf("failed to create HA storage client: %w", err) + } + } + + // Max parallel + maxParallel, err := extractInt(c["max_parallel"]) + if err != nil { + return nil, fmt.Errorf("failed to parse max_parallel: %w", err) + } + + logger.Debug("configuration", + "bucket", bucket, + "chunk_size", chunkSize, + "ha_enabled", haEnabled, + "max_parallel", maxParallel, + ) + + logger.Debug("creating client") + ctx := context.Background() + client, err := storage.NewClient(ctx, option.WithUserAgent(useragent.String())) + if err != nil { + return nil, fmt.Errorf("failed to create storage client: %w", err) + } + + return &Backend{ + bucket: bucket, + chunkSize: chunkSize, + client: client, + permitPool: physical.NewPermitPool(maxParallel), + + haEnabled: haEnabled, + haClient: haClient, + + logger: logger, + }, nil +} + +// Put is used to insert or update an entry +func (b *Backend) Put(ctx context.Context, entry *physical.Entry) (retErr error) { + defer metrics.MeasureSince(metricPut, time.Now()) + + // Pooling + b.permitPool.Acquire() + defer b.permitPool.Release() + + // Insert + w := b.client.Bucket(b.bucket).Object(entry.Key).NewWriter(ctx) + w.ChunkSize = b.chunkSize + md5Array := md5.Sum(entry.Value) + w.MD5 = md5Array[:] + defer func() { + closeErr := w.Close() + if closeErr != nil { + retErr = multierror.Append(retErr, fmt.Errorf("error closing connection: %w", closeErr)) + } + }() + + if _, err := w.Write(entry.Value); err != nil { + return fmt.Errorf("failed to put data: %w", err) + } + return nil +} + +// Get fetches an entry. If no entry exists, this function returns nil. +func (b *Backend) Get(ctx context.Context, key string) (retEntry *physical.Entry, retErr error) { + defer metrics.MeasureSince(metricGet, time.Now()) + + // Pooling + b.permitPool.Acquire() + defer b.permitPool.Release() + + // Read + r, err := b.client.Bucket(b.bucket).Object(key).NewReader(ctx) + if err == storage.ErrObjectNotExist { + return nil, nil + } + if err != nil { + return nil, fmt.Errorf("failed to read value for %q: %w", key, err) + } + + defer func() { + closeErr := r.Close() + if closeErr != nil { + retErr = multierror.Append(retErr, fmt.Errorf("error closing connection: %w", closeErr)) + } + }() + + value, err := ioutil.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("failed to read value into a string: %w", err) + } + + return &physical.Entry{ + Key: key, + Value: value, + }, nil +} + +// Delete deletes an entry with the given key +func (b *Backend) Delete(ctx context.Context, key string) error { + defer metrics.MeasureSince(metricDelete, time.Now()) + + // Pooling + b.permitPool.Acquire() + defer b.permitPool.Release() + + // Delete + err := b.client.Bucket(b.bucket).Object(key).Delete(ctx) + if err != nil && err != storage.ErrObjectNotExist { + return fmt.Errorf("failed to delete key %q: %w", key, err) + } + return nil +} + +// List is used to list all the keys under a given +// prefix, up to the next prefix. +func (b *Backend) List(ctx context.Context, prefix string) ([]string, error) { + defer metrics.MeasureSince(metricList, time.Now()) + + // Pooling + b.permitPool.Acquire() + defer b.permitPool.Release() + + iter := b.client.Bucket(b.bucket).Objects(ctx, &storage.Query{ + Prefix: prefix, + Delimiter: objectDelimiter, + Versions: false, + }) + + keys := []string{} + + for { + objAttrs, err := iter.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, fmt.Errorf("failed to read object: %w", err) + } + + var path string + if objAttrs.Prefix != "" { + // "subdirectory" + path = objAttrs.Prefix + } else { + // file + path = objAttrs.Name + } + + // get relative file/dir just like "basename" + key := strings.TrimPrefix(path, prefix) + keys = append(keys, key) + } + + sort.Strings(keys) + + return keys, nil +} + +// extractInt is a helper function that takes a string and converts that string +// to an int, but accounts for the empty string. +func extractInt(s string) (int, error) { + if s == "" { + return 0, nil + } + return strconv.Atoi(s) +} diff --git a/physical/gcs/gcs_ha.go b/physical/gcs/gcs_ha.go new file mode 100644 index 0000000..2e4e762 --- /dev/null +++ b/physical/gcs/gcs_ha.go @@ -0,0 +1,416 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package gcs + +import ( + "context" + "encoding/json" + "fmt" + "sync" + "time" + + "cloud.google.com/go/storage" + metrics "github.com/armon/go-metrics" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/sdk/physical" + "github.com/pkg/errors" + "google.golang.org/api/googleapi" +) + +// Verify Backend satisfies the correct interfaces +var ( + _ physical.HABackend = (*Backend)(nil) + _ physical.Lock = (*Lock)(nil) +) + +const ( + // LockRenewInterval is the time to wait between lock renewals. + LockRenewInterval = 5 * time.Second + + // LockRetryInterval is the amount of time to wait if the lock fails before + // trying again. + LockRetryInterval = 5 * time.Second + + // LockTTL is the default lock TTL. + LockTTL = 15 * time.Second + + // LockWatchRetryInterval is the amount of time to wait if a watch fails + // before trying again. + LockWatchRetryInterval = 5 * time.Second + + // LockWatchRetryMax is the number of times to retry a failed watch before + // signaling that leadership is lost. + LockWatchRetryMax = 5 +) + +var ( + // metricLockUnlock is the metric to register for a lock delete. + metricLockUnlock = []string{"gcs", "lock", "unlock"} + + // metricLockLock is the metric to register for a lock get. + metricLockLock = []string{"gcs", "lock", "lock"} + + // metricLockValue is the metric to register for a lock create/update. + metricLockValue = []string{"gcs", "lock", "value"} +) + +// Lock is the HA lock. +type Lock struct { + // backend is the underlying physical backend. + backend *Backend + + // key is the name of the key. value is the value of the key. + key, value string + + // held is a boolean indicating if the lock is currently held. + held bool + + // identity is the internal identity of this key (unique to this server + // instance). + identity string + + // lock is an internal lock + lock sync.Mutex + + // stopCh is the channel that stops all operations. It may be closed in the + // event of a leader loss or graceful shutdown. stopped is a boolean + // indicating if we are stopped - it exists to prevent double closing the + // channel. stopLock is a mutex around the locks. + stopCh chan struct{} + stopped bool + stopLock sync.Mutex + + // Allow modifying the Lock durations for ease of unit testing. + renewInterval time.Duration + retryInterval time.Duration + ttl time.Duration + watchRetryInterval time.Duration + watchRetryMax int +} + +// LockRecord is the struct that corresponds to a lock. +type LockRecord struct { + Key string `json:"key"` + Value string `json:"value"` + Identity string `json:"identity"` + Timestamp time.Time `json:"timestamp"` + + // attrs are the internal object attributes. This is stored internally, for + // internal consumption only. + attrs *storage.ObjectAttrs +} + +// HAEnabled implements HABackend and indicates that this backend supports high +// availability. +func (b *Backend) HAEnabled() bool { + return b.haEnabled +} + +// LockWith acquires a mutual exclusion based on the given key. +func (b *Backend) LockWith(key, value string) (physical.Lock, error) { + identity, err := uuid.GenerateUUID() + if err != nil { + return nil, fmt.Errorf("lock with: %w", err) + } + return &Lock{ + backend: b, + key: key, + value: value, + identity: identity, + stopped: true, + + renewInterval: LockRenewInterval, + retryInterval: LockRetryInterval, + ttl: LockTTL, + watchRetryInterval: LockWatchRetryInterval, + watchRetryMax: LockWatchRetryMax, + }, nil +} + +// Lock acquires the given lock. The stopCh is optional. If closed, it +// interrupts the lock acquisition attempt. The returned channel should be +// closed when leadership is lost. +func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { + defer metrics.MeasureSince(metricLockLock, time.Now()) + + l.lock.Lock() + defer l.lock.Unlock() + if l.held { + return nil, errors.New("lock already held") + } + + // Attempt to lock - this function blocks until a lock is acquired or an error + // occurs. + acquired, err := l.attemptLock(stopCh) + if err != nil { + return nil, fmt.Errorf("lock: %w", err) + } + if !acquired { + return nil, nil + } + + // We have the lock now + l.held = true + + // Build the locks + l.stopLock.Lock() + l.stopCh = make(chan struct{}) + l.stopped = false + l.stopLock.Unlock() + + // Periodically renew and watch the lock + go l.renewLock() + go l.watchLock() + + return l.stopCh, nil +} + +// Unlock releases the lock. +func (l *Lock) Unlock() error { + defer metrics.MeasureSince(metricLockUnlock, time.Now()) + + l.lock.Lock() + defer l.lock.Unlock() + if !l.held { + return nil + } + + // Stop any existing locking or renewal attempts + l.stopLock.Lock() + if !l.stopped { + l.stopped = true + close(l.stopCh) + } + l.stopLock.Unlock() + + // Read the record value before deleting. This needs to be a CAS operation or + // else we might be deleting someone else's lock. + ctx := context.Background() + r, err := l.get(ctx) + if err != nil { + return fmt.Errorf("failed to read lock for deletion: %w", err) + } + if r != nil && r.Identity == l.identity { + ctx := context.Background() + conds := storage.Conditions{ + GenerationMatch: r.attrs.Generation, + MetagenerationMatch: r.attrs.Metageneration, + } + + obj := l.backend.haClient.Bucket(l.backend.bucket).Object(l.key) + if err := obj.If(conds).Delete(ctx); err != nil { + // If the pre-condition failed, it means that someone else has already + // acquired the lock and we don't want to delete it. + if terr, ok := err.(*googleapi.Error); ok && terr.Code == 412 { + l.backend.logger.Debug("unlock: preconditions failed (lock already taken by someone else?)") + } else { + return fmt.Errorf("failed to delete lock: %w", err) + } + } + } + + // We are no longer holding the lock + l.held = false + + return nil +} + +// Value returns the value of the lock and if it is held. +func (l *Lock) Value() (bool, string, error) { + defer metrics.MeasureSince(metricLockValue, time.Now()) + + r, err := l.get(context.Background()) + if err != nil { + return false, "", err + } + if r == nil { + return false, "", err + } + return true, string(r.Value), nil +} + +// attemptLock attempts to acquire a lock. If the given channel is closed, the +// acquisition attempt stops. This function returns when a lock is acquired or +// an error occurs. +func (l *Lock) attemptLock(stopCh <-chan struct{}) (bool, error) { + ticker := time.NewTicker(l.retryInterval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + acquired, err := l.writeLock() + if err != nil { + return false, fmt.Errorf("attempt lock: %w", err) + } + if !acquired { + continue + } + + return true, nil + case <-stopCh: + return false, nil + } + } +} + +// renewLock renews the given lock until the channel is closed. +func (l *Lock) renewLock() { + ticker := time.NewTicker(l.renewInterval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + l.writeLock() + case <-l.stopCh: + return + } + } +} + +// watchLock checks whether the lock has changed in the table and closes the +// leader channel accordingly. If an error occurs during the check, watchLock +// will retry the operation and then close the leader channel if it can't +// succeed after retries. +func (l *Lock) watchLock() { + retries := 0 + ticker := time.NewTicker(l.watchRetryInterval) + +OUTER: + for { + // Check if the channel is already closed + select { + case <-l.stopCh: + break OUTER + default: + } + + // Check if we've exceeded retries + if retries >= l.watchRetryMax-1 { + break OUTER + } + + // Wait for the timer + select { + case <-ticker.C: + case <-l.stopCh: + break OUTER + } + + // Attempt to read the key + r, err := l.get(context.Background()) + if err != nil { + retries++ + continue + } + + // Verify the identity is the same + if r == nil || r.Identity != l.identity { + break OUTER + } + } + + l.stopLock.Lock() + defer l.stopLock.Unlock() + if !l.stopped { + l.stopped = true + close(l.stopCh) + } +} + +// writeLock writes the given lock using the following algorithm: +// +// - lock does not exist +// - write the lock +// +// - lock exists +// - if key is empty or identity is the same or timestamp exceeds TTL +// - update the lock to self +func (l *Lock) writeLock() (bool, error) { + // Create a transaction to read and the update (maybe) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // The operation may be retried, so we need to stop it if we lose leadership. + go func() { + select { + case <-l.stopCh: + cancel() + case <-ctx.Done(): + } + }() + + // Build up the list of conditions + var conds storage.Conditions + + // Read the record + r, err := l.get(ctx) + if err != nil { + return false, fmt.Errorf("write lock: %w", err) + } + if r != nil { + // If the key is empty or the identity is ours or the ttl expired, we can + // write. Otherwise, return now because we cannot. + if r.Key != "" && r.Identity != l.identity && time.Now().UTC().Sub(r.Timestamp) < l.ttl { + return false, nil + } + + // CAS operation + conds.GenerationMatch = r.attrs.Generation + conds.MetagenerationMatch = r.attrs.Metageneration + } else { + // Ensure no one created while we were working + conds.DoesNotExist = true + } + + // Update the lock to now + lockData, err := json.Marshal(&LockRecord{ + Key: l.key, + Value: l.value, + Identity: l.identity, + Timestamp: time.Now().UTC(), + }) + if err != nil { + return false, fmt.Errorf("write lock: failed to encode JSON: %w", err) + } + + // Write the object + obj := l.backend.haClient.Bucket(l.backend.bucket).Object(l.key) + w := obj.If(conds).NewWriter(ctx) + w.ObjectAttrs.CacheControl = "no-cache; no-store; max-age=0" + w.ObjectAttrs.Metadata = map[string]string{ + "lock": string(lockData), + } + if err := w.Close(); err != nil { + // If the pre-condition failed, it means that we already have a lock. + if terr, ok := err.(*googleapi.Error); ok && terr.Code == 412 { + return false, nil + } + return false, err + } + + return true, nil +} + +// get retrieves the value for the lock. +func (l *Lock) get(ctx context.Context) (*LockRecord, error) { + // Read + attrs, err := l.backend.haClient.Bucket(l.backend.bucket).Object(l.key).Attrs(ctx) + if err == storage.ErrObjectNotExist { + return nil, nil + } + if err != nil { + return nil, fmt.Errorf("failed to read attrs for %q: %w", l.key, err) + } + + // If we got this far, we have attributes, meaning the lockfile exists. + var r LockRecord + r.attrs = attrs + lockData := []byte(attrs.Metadata["lock"]) + if err := json.Unmarshal(lockData, &r); err != nil { + return nil, fmt.Errorf("failed to decode lock: %w", err) + } + return &r, nil +} diff --git a/physical/gcs/gcs_ha_test.go b/physical/gcs/gcs_ha_test.go new file mode 100644 index 0000000..cdd59e7 --- /dev/null +++ b/physical/gcs/gcs_ha_test.go @@ -0,0 +1,62 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package gcs + +import ( + "context" + "fmt" + "math/rand" + "os" + "testing" + "time" + + "cloud.google.com/go/storage" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/physical" +) + +func TestHABackend(t *testing.T) { + projectID := os.Getenv("GOOGLE_PROJECT_ID") + if projectID == "" { + t.Skip("GOOGLE_PROJECT_ID not set") + } + + r := rand.New(rand.NewSource(time.Now().UnixNano())).Int() + bucket := fmt.Sprintf("vault-gcs-testacc-%d", r) + + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + t.Fatal(err) + } + + testCleanup(t, client, bucket) + defer testCleanup(t, client, bucket) + + bh := client.Bucket(bucket) + if err := bh.Create(context.Background(), projectID, nil); err != nil { + t.Fatal(err) + } + + logger := logging.NewVaultLogger(log.Trace) + config := map[string]string{ + "bucket": bucket, + "ha_enabled": "true", + } + + b, err := NewBackend(config, logger) + if err != nil { + t.Fatal(err) + } + + b2, err := NewBackend(config, logger) + if err != nil { + t.Fatal(err) + } + + physical.ExerciseBackend(t, b) + physical.ExerciseBackend_ListPrefix(t, b) + physical.ExerciseHABackend(t, b.(physical.HABackend), b2.(physical.HABackend)) +} diff --git a/physical/gcs/gcs_test.go b/physical/gcs/gcs_test.go new file mode 100644 index 0000000..332ba35 --- /dev/null +++ b/physical/gcs/gcs_test.go @@ -0,0 +1,77 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package gcs + +import ( + "context" + "fmt" + "math/rand" + "os" + "strconv" + "testing" + "time" + + "cloud.google.com/go/storage" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/physical" + "google.golang.org/api/googleapi" +) + +func testCleanup(t testing.TB, client *storage.Client, bucket string) { + t.Helper() + + ctx := context.Background() + if err := client.Bucket(bucket).Delete(ctx); err != nil { + if terr, ok := err.(*googleapi.Error); !ok || terr.Code != 404 { + t.Fatal(err) + } + } +} + +func TestBackend(t *testing.T) { + projectID := os.Getenv("GOOGLE_PROJECT_ID") + if projectID == "" { + t.Skip("GOOGLE_PROJECT_ID not set") + } + + r := rand.New(rand.NewSource(time.Now().UnixNano())).Int() + bucket := fmt.Sprintf("vault-gcs-testacc-%d", r) + + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + t.Fatal(err) + } + + testCleanup(t, client, bucket) + defer testCleanup(t, client, bucket) + + b := client.Bucket(bucket) + if err := b.Create(context.Background(), projectID, nil); err != nil { + t.Fatal(err) + } + + backend, err := NewBackend(map[string]string{ + "bucket": bucket, + "ha_enabled": "false", + }, logging.NewVaultLogger(log.Trace)) + if err != nil { + t.Fatal(err) + } + + // Verify chunkSize is set correctly on the Backend + be := backend.(*Backend) + expectedChunkSize, err := strconv.Atoi(defaultChunkSize) + if err != nil { + t.Fatalf("failed to convert defaultChunkSize to int: %s", err) + } + expectedChunkSize = expectedChunkSize * 1024 + if be.chunkSize != expectedChunkSize { + t.Fatalf("expected chunkSize to be %d. got=%d", expectedChunkSize, be.chunkSize) + } + + physical.ExerciseBackend(t, backend) + physical.ExerciseBackend_ListPrefix(t, backend) +} diff --git a/physical/manta/manta.go b/physical/manta/manta.go new file mode 100644 index 0000000..cfb0770 --- /dev/null +++ b/physical/manta/manta.go @@ -0,0 +1,266 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package manta + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "path" + "sort" + "strconv" + "strings" + "time" + + metrics "github.com/armon/go-metrics" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/physical" + triton "github.com/joyent/triton-go" + "github.com/joyent/triton-go/authentication" + "github.com/joyent/triton-go/errors" + "github.com/joyent/triton-go/storage" +) + +const mantaDefaultRootStore = "/stor" + +type MantaBackend struct { + logger log.Logger + permitPool *physical.PermitPool + client *storage.StorageClient + directory string +} + +func NewMantaBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { + user := os.Getenv("MANTA_USER") + if user == "" { + user = conf["user"] + } + + keyId := os.Getenv("MANTA_KEY_ID") + if keyId == "" { + keyId = conf["key_id"] + } + + url := os.Getenv("MANTA_URL") + if url == "" { + url = conf["url"] + } else { + url = "https://us-east.manta.joyent.com" + } + + subuser := os.Getenv("MANTA_SUBUSER") + if subuser == "" { + if confUser, ok := conf["subuser"]; ok { + subuser = confUser + } + } + + input := authentication.SSHAgentSignerInput{ + KeyID: keyId, + AccountName: user, + Username: subuser, + } + signer, err := authentication.NewSSHAgentSigner(input) + if err != nil { + return nil, fmt.Errorf("Error Creating SSH Agent Signer: %w", err) + } + + maxParStr, ok := conf["max_parallel"] + var maxParInt int + if ok { + maxParInt, err = strconv.Atoi(maxParStr) + if err != nil { + return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err) + } + if logger.IsDebug() { + logger.Debug("max_parallel set", "max_parallel", maxParInt) + } + } + + config := &triton.ClientConfig{ + MantaURL: url, + AccountName: user, + Signers: []authentication.Signer{signer}, + } + + client, err := storage.NewClient(config) + if err != nil { + return nil, fmt.Errorf("failed initialising Storage client: %w", err) + } + + return &MantaBackend{ + client: client, + directory: conf["directory"], + logger: logger, + permitPool: physical.NewPermitPool(maxParInt), + }, nil +} + +// Put is used to insert or update an entry +func (m *MantaBackend) Put(ctx context.Context, entry *physical.Entry) error { + defer metrics.MeasureSince([]string{"manta", "put"}, time.Now()) + + m.permitPool.Acquire() + defer m.permitPool.Release() + + r := bytes.NewReader(entry.Value) + r.Seek(0, 0) + + return m.client.Objects().Put(ctx, &storage.PutObjectInput{ + ObjectPath: path.Join(mantaDefaultRootStore, m.directory, entry.Key, ".vault_value"), + ObjectReader: r, + ContentLength: uint64(len(entry.Value)), + ForceInsert: true, + }) +} + +// Get is used to fetch an entry +func (m *MantaBackend) Get(ctx context.Context, key string) (*physical.Entry, error) { + defer metrics.MeasureSince([]string{"manta", "get"}, time.Now()) + + m.permitPool.Acquire() + defer m.permitPool.Release() + + output, err := m.client.Objects().Get(ctx, &storage.GetObjectInput{ + ObjectPath: path.Join(mantaDefaultRootStore, m.directory, key, ".vault_value"), + }) + if err != nil { + if strings.Contains(err.Error(), "ResourceNotFound") { + return nil, nil + } + return nil, err + } + + defer output.ObjectReader.Close() + + data := make([]byte, output.ContentLength) + _, err = io.ReadFull(output.ObjectReader, data) + if err != nil { + return nil, err + } + + ent := &physical.Entry{ + Key: key, + Value: data, + } + + return ent, nil +} + +// Delete is used to permanently delete an entry +func (m *MantaBackend) Delete(ctx context.Context, key string) error { + defer metrics.MeasureSince([]string{"manta", "delete"}, time.Now()) + + m.permitPool.Acquire() + defer m.permitPool.Release() + + if strings.HasSuffix(key, "/") { + err := m.client.Dir().Delete(ctx, &storage.DeleteDirectoryInput{ + DirectoryName: path.Join(mantaDefaultRootStore, m.directory, key), + ForceDelete: true, + }) + if err != nil { + return err + } + } else { + err := m.client.Objects().Delete(ctx, &storage.DeleteObjectInput{ + ObjectPath: path.Join(mantaDefaultRootStore, m.directory, key, ".vault_value"), + }) + if err != nil { + if errors.IsResourceNotFound(err) { + return nil + } + return err + } + + return tryDeleteDirectory(ctx, m, path.Join(mantaDefaultRootStore, m.directory, key)) + } + + return nil +} + +func tryDeleteDirectory(ctx context.Context, m *MantaBackend, directoryPath string) error { + objs, err := m.client.Dir().List(ctx, &storage.ListDirectoryInput{ + DirectoryName: directoryPath, + }) + if err != nil { + if errors.IsResourceNotFound(err) { + return nil + } + return err + } + if objs != nil && len(objs.Entries) == 0 { + err := m.client.Dir().Delete(ctx, &storage.DeleteDirectoryInput{ + DirectoryName: directoryPath, + }) + if err != nil { + return err + } + + return tryDeleteDirectory(ctx, m, path.Dir(directoryPath)) + } + return nil +} + +// List is used to list all the keys under a given +// prefix, up to the next prefix. +func (m *MantaBackend) List(ctx context.Context, prefix string) ([]string, error) { + defer metrics.MeasureSince([]string{"manta", "list"}, time.Now()) + + m.permitPool.Acquire() + defer m.permitPool.Release() + + objs, err := m.client.Dir().List(ctx, &storage.ListDirectoryInput{ + DirectoryName: path.Join(mantaDefaultRootStore, m.directory, prefix), + }) + if err != nil { + if errors.IsResourceNotFound(err) { + return []string{}, nil + } + return nil, err + } + + keys := []string{} + for _, obj := range objs.Entries { + if obj.Type == "directory" { + objs, err := m.client.Dir().List(ctx, &storage.ListDirectoryInput{ + DirectoryName: path.Join(mantaDefaultRootStore, m.directory, prefix, obj.Name), + }) + if err != nil { + if !errors.IsResourceNotFound(err) { + return nil, err + } + } + + // We need to check to see if there is something more than just the `value` file + // if the length of the children is: + // > 1 and includes the value `index` then we need to add foo and foo/ + // = 1 and the value is `index` then we need to add foo + // = 1 and the value is not `index` then we need to add foo/ + if len(objs.Entries) == 1 { + if objs.Entries[0].Name != ".vault_value" { + keys = append(keys, fmt.Sprintf("%s/", obj.Name)) + } else { + keys = append(keys, obj.Name) + } + } else if len(objs.Entries) > 1 { + for _, childObj := range objs.Entries { + if childObj.Name == ".vault_value" { + keys = append(keys, obj.Name) + } else { + keys = append(keys, fmt.Sprintf("%s/", obj.Name)) + } + } + } else { + keys = append(keys, obj.Name) + } + } + } + + sort.Strings(keys) + + return keys, nil +} diff --git a/physical/manta/manta_test.go b/physical/manta/manta_test.go new file mode 100644 index 0000000..67d50fe --- /dev/null +++ b/physical/manta/manta_test.go @@ -0,0 +1,89 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package manta + +import ( + "context" + "fmt" + "math/rand" + "os" + "path" + "testing" + "time" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/physical" + triton "github.com/joyent/triton-go" + "github.com/joyent/triton-go/authentication" + tt "github.com/joyent/triton-go/errors" + "github.com/joyent/triton-go/storage" +) + +func TestMantaBackend(t *testing.T) { + user := os.Getenv("MANTA_USER") + keyId := os.Getenv("MANTA_KEY_ID") + url := "https://us-east.manta.joyent.com" + testHarnessBucket := fmt.Sprintf("test-bucket-%d", randInt()) + + if user == "" || keyId == "" { + t.SkipNow() + } + + input := authentication.SSHAgentSignerInput{ + KeyID: keyId, + AccountName: user, + Username: "", + } + signer, err := authentication.NewSSHAgentSigner(input) + if err != nil { + t.Fatalf("Error Creating SSH Agent Signer: %s", err.Error()) + } + + config := &triton.ClientConfig{ + MantaURL: url, + AccountName: user, + Signers: []authentication.Signer{signer}, + } + + client, err := storage.NewClient(config) + if err != nil { + t.Fatalf("failed initialising Storage client: %s", err.Error()) + } + + logger := logging.NewVaultLogger(log.Debug) + mb := &MantaBackend{ + client: client, + directory: testHarnessBucket, + logger: logger.Named("storage.mantabackend"), + permitPool: physical.NewPermitPool(128), + } + + err = mb.client.Dir().Put(context.Background(), &storage.PutDirectoryInput{ + DirectoryName: path.Join(mantaDefaultRootStore), + }) + if err != nil { + t.Fatal("Error creating test harness directory") + } + + defer func() { + err = mb.client.Dir().Delete(context.Background(), &storage.DeleteDirectoryInput{ + DirectoryName: path.Join(mantaDefaultRootStore, testHarnessBucket), + ForceDelete: true, + }) + if err != nil { + if !tt.IsResourceNotFoundError(err) { + t.Fatal("failed to delete test harness directory") + } + } + }() + + physical.ExerciseBackend(t, mb) + physical.ExerciseBackend_ListPrefix(t, mb) +} + +func randInt() int { + rand.Seed(time.Now().UTC().UnixNano()) + return rand.New(rand.NewSource(time.Now().UnixNano())).Int() +} diff --git a/physical/mssql/mssql.go b/physical/mssql/mssql.go new file mode 100644 index 0000000..2859a65 --- /dev/null +++ b/physical/mssql/mssql.go @@ -0,0 +1,285 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package mssql + +import ( + "context" + "database/sql" + "fmt" + "regexp" + "sort" + "strconv" + "strings" + "time" + + metrics "github.com/armon/go-metrics" + _ "github.com/denisenkom/go-mssqldb" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/sdk/physical" +) + +// Verify MSSQLBackend satisfies the correct interfaces +var _ physical.Backend = (*MSSQLBackend)(nil) +var identifierRegex = regexp.MustCompile(`^[\p{L}_][\p{L}\p{Nd}@#$_]*$`) + +type MSSQLBackend struct { + dbTable string + client *sql.DB + statements map[string]*sql.Stmt + logger log.Logger + permitPool *physical.PermitPool +} + +func isInvalidIdentifier(name string) bool { + if !identifierRegex.MatchString(name) { + return true + } + return false +} + +func NewMSSQLBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { + username, ok := conf["username"] + if !ok { + username = "" + } + + password, ok := conf["password"] + if !ok { + password = "" + } + + server, ok := conf["server"] + if !ok || server == "" { + return nil, fmt.Errorf("missing server") + } + + port, ok := conf["port"] + if !ok { + port = "" + } + + maxParStr, ok := conf["max_parallel"] + var maxParInt int + var err error + if ok { + maxParInt, err = strconv.Atoi(maxParStr) + if err != nil { + return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err) + } + if logger.IsDebug() { + logger.Debug("max_parallel set", "max_parallel", maxParInt) + } + } else { + maxParInt = physical.DefaultParallelOperations + } + + database, ok := conf["database"] + if !ok { + database = "Vault" + } + + if isInvalidIdentifier(database) { + return nil, fmt.Errorf("invalid database name") + } + + table, ok := conf["table"] + if !ok { + table = "Vault" + } + + if isInvalidIdentifier(table) { + return nil, fmt.Errorf("invalid table name") + } + + appname, ok := conf["appname"] + if !ok { + appname = "Vault" + } + + connectionTimeout, ok := conf["connectiontimeout"] + if !ok { + connectionTimeout = "30" + } + + logLevel, ok := conf["loglevel"] + if !ok { + logLevel = "0" + } + + schema, ok := conf["schema"] + if !ok || schema == "" { + schema = "dbo" + } + + if isInvalidIdentifier(schema) { + return nil, fmt.Errorf("invalid schema name") + } + + connectionString := fmt.Sprintf("server=%s;app name=%s;connection timeout=%s;log=%s", server, appname, connectionTimeout, logLevel) + if username != "" { + connectionString += ";user id=" + username + } + + if password != "" { + connectionString += ";password=" + password + } + + if port != "" { + connectionString += ";port=" + port + } + + db, err := sql.Open("mssql", connectionString) + if err != nil { + return nil, fmt.Errorf("failed to connect to mssql: %w", err) + } + + db.SetMaxOpenConns(maxParInt) + + if _, err := db.Exec("IF NOT EXISTS(SELECT * FROM sys.databases WHERE name = ?) CREATE DATABASE "+database, database); err != nil { + return nil, fmt.Errorf("failed to create mssql database: %w", err) + } + + dbTable := database + "." + schema + "." + table + createQuery := "IF NOT EXISTS(SELECT 1 FROM " + database + ".INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE='BASE TABLE' AND TABLE_NAME=? AND TABLE_SCHEMA=?) CREATE TABLE " + dbTable + " (Path VARCHAR(512) PRIMARY KEY, Value VARBINARY(MAX))" + + if schema != "dbo" { + + var num int + err = db.QueryRow("SELECT 1 FROM "+database+".sys.schemas WHERE name = ?", schema).Scan(&num) + + switch { + case err == sql.ErrNoRows: + if _, err := db.Exec("USE " + database + "; EXEC ('CREATE SCHEMA " + schema + "')"); err != nil { + return nil, fmt.Errorf("failed to create mssql schema: %w", err) + } + + case err != nil: + return nil, fmt.Errorf("failed to check if mssql schema exists: %w", err) + } + } + + if _, err := db.Exec(createQuery, table, schema); err != nil { + return nil, fmt.Errorf("failed to create mssql table: %w", err) + } + + m := &MSSQLBackend{ + dbTable: dbTable, + client: db, + statements: make(map[string]*sql.Stmt), + logger: logger, + permitPool: physical.NewPermitPool(maxParInt), + } + + statements := map[string]string{ + "put": "IF EXISTS(SELECT 1 FROM " + dbTable + " WHERE Path = ?) UPDATE " + dbTable + " SET Value = ? WHERE Path = ?" + + " ELSE INSERT INTO " + dbTable + " VALUES(?, ?)", + "get": "SELECT Value FROM " + dbTable + " WHERE Path = ?", + "delete": "DELETE FROM " + dbTable + " WHERE Path = ?", + "list": "SELECT Path FROM " + dbTable + " WHERE Path LIKE ?", + } + + for name, query := range statements { + if err := m.prepare(name, query); err != nil { + return nil, err + } + } + + return m, nil +} + +func (m *MSSQLBackend) prepare(name, query string) error { + stmt, err := m.client.Prepare(query) + if err != nil { + return fmt.Errorf("failed to prepare %q: %w", name, err) + } + + m.statements[name] = stmt + + return nil +} + +func (m *MSSQLBackend) Put(ctx context.Context, entry *physical.Entry) error { + defer metrics.MeasureSince([]string{"mssql", "put"}, time.Now()) + + m.permitPool.Acquire() + defer m.permitPool.Release() + + _, err := m.statements["put"].Exec(entry.Key, entry.Value, entry.Key, entry.Key, entry.Value) + if err != nil { + return err + } + + return nil +} + +func (m *MSSQLBackend) Get(ctx context.Context, key string) (*physical.Entry, error) { + defer metrics.MeasureSince([]string{"mssql", "get"}, time.Now()) + + m.permitPool.Acquire() + defer m.permitPool.Release() + + var result []byte + err := m.statements["get"].QueryRow(key).Scan(&result) + if err == sql.ErrNoRows { + return nil, nil + } + + if err != nil { + return nil, err + } + + ent := &physical.Entry{ + Key: key, + Value: result, + } + + return ent, nil +} + +func (m *MSSQLBackend) Delete(ctx context.Context, key string) error { + defer metrics.MeasureSince([]string{"mssql", "delete"}, time.Now()) + + m.permitPool.Acquire() + defer m.permitPool.Release() + + _, err := m.statements["delete"].Exec(key) + if err != nil { + return err + } + + return nil +} + +func (m *MSSQLBackend) List(ctx context.Context, prefix string) ([]string, error) { + defer metrics.MeasureSince([]string{"mssql", "list"}, time.Now()) + + m.permitPool.Acquire() + defer m.permitPool.Release() + + likePrefix := prefix + "%" + rows, err := m.statements["list"].Query(likePrefix) + if err != nil { + return nil, err + } + var keys []string + for rows.Next() { + var key string + err = rows.Scan(&key) + if err != nil { + return nil, fmt.Errorf("failed to scan rows: %w", err) + } + + key = strings.TrimPrefix(key, prefix) + if i := strings.Index(key, "/"); i == -1 { + keys = append(keys, key) + } else if i != -1 { + keys = strutil.AppendIfMissing(keys, string(key[:i+1])) + } + } + + sort.Strings(keys) + + return keys, nil +} diff --git a/physical/mssql/mssql_test.go b/physical/mssql/mssql_test.go new file mode 100644 index 0000000..2324ff5 --- /dev/null +++ b/physical/mssql/mssql_test.go @@ -0,0 +1,157 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package mssql + +import ( + "os" + "testing" + + _ "github.com/denisenkom/go-mssqldb" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/physical" +) + +// TestInvalidIdentifier checks validity of an identifier +func TestInvalidIdentifier(t *testing.T) { + testcases := map[string]bool{ + "name": true, + "_name": true, + "Name": true, + "#name": false, + "?Name": false, + "9name": false, + "@name": false, + "$name": false, + " name": false, + "n ame": false, + "n4444444": true, + "_4321098765": true, + "_##$$@@__": true, + "_123name#@": true, + "name!": false, + "name%": false, + "name^": false, + "name&": false, + "name*": false, + "name(": false, + "name)": false, + "nåame": true, + "åname": true, + "name'": false, + "nam`e": false, + "пример": true, + "_#Āā@#$_ĂĄąćĈĉĊċ": true, + "ÛÜÝÞßàáâ": true, + "豈更滑a23$#@": true, + } + + for i, expected := range testcases { + if !isInvalidIdentifier(i) != expected { + t.Fatalf("unexpected identifier %s: expected validity %v", i, expected) + } + } +} + +func TestMSSQLBackend(t *testing.T) { + server := os.Getenv("MSSQL_SERVER") + if server == "" { + t.SkipNow() + } + + database := os.Getenv("MSSQL_DB") + if database == "" { + database = "test" + } + + table := os.Getenv("MSSQL_TABLE") + if table == "" { + table = "test" + } + + schema := os.Getenv("MSSQL_SCHEMA") + if schema == "" { + schema = "test" + } + + username := os.Getenv("MSSQL_USERNAME") + password := os.Getenv("MSSQL_PASSWORD") + + // Run vault tests + logger := logging.NewVaultLogger(log.Debug) + + b, err := NewMSSQLBackend(map[string]string{ + "server": server, + "database": database, + "table": table, + "schema": schema, + "username": username, + "password": password, + }, logger) + if err != nil { + t.Fatalf("Failed to create new backend: %v", err) + } + + defer func() { + mssql := b.(*MSSQLBackend) + _, err := mssql.client.Exec("DROP TABLE " + mssql.dbTable) + if err != nil { + t.Fatalf("Failed to drop table: %v", err) + } + }() + + physical.ExerciseBackend(t, b) + physical.ExerciseBackend_ListPrefix(t, b) +} + +func TestMSSQLBackend_schema(t *testing.T) { + server := os.Getenv("MSSQL_SERVER") + if server == "" { + t.SkipNow() + } + + database := os.Getenv("MSSQL_DB") + if database == "" { + database = "test" + } + + table := os.Getenv("MSSQL_TABLE") + if table == "" { + table = "test" + } + + schema := os.Getenv("MSSQL_SCHEMA") + if schema == "" { + schema = "test" + } + + username := os.Getenv("MSSQL_USERNAME") + password := os.Getenv("MSSQL_PASSWORD") + + // Run vault tests + logger := logging.NewVaultLogger(log.Debug) + + b, err := NewMSSQLBackend(map[string]string{ + "server": server, + "database": database, + "schema": schema, + "table": table, + "username": username, + "password": password, + }, logger) + if err != nil { + t.Fatalf("Failed to create new backend: %v", err) + } + + defer func() { + mssql := b.(*MSSQLBackend) + _, err := mssql.client.Exec("DROP TABLE " + mssql.dbTable) + if err != nil { + t.Fatalf("Failed to drop table: %v", err) + } + }() + + physical.ExerciseBackend(t, b) + physical.ExerciseBackend_ListPrefix(t, b) +} diff --git a/physical/mysql/mysql.go b/physical/mysql/mysql.go new file mode 100644 index 0000000..225882f --- /dev/null +++ b/physical/mysql/mysql.go @@ -0,0 +1,779 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package mysql + +import ( + "context" + "crypto/tls" + "crypto/x509" + "database/sql" + "errors" + "fmt" + "io/ioutil" + "math" + "net/url" + "sort" + "strconv" + "strings" + "sync" + "time" + "unicode" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" + + metrics "github.com/armon/go-metrics" + mysql "github.com/go-sql-driver/mysql" + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/sdk/physical" +) + +// Verify MySQLBackend satisfies the correct interfaces +var ( + _ physical.Backend = (*MySQLBackend)(nil) + _ physical.HABackend = (*MySQLBackend)(nil) + _ physical.Lock = (*MySQLHALock)(nil) +) + +// Unreserved tls key +// Reserved values are "true", "false", "skip-verify" +const mysqlTLSKey = "default" + +// MySQLBackend is a physical backend that stores data +// within MySQL database. +type MySQLBackend struct { + dbTable string + dbLockTable string + client *sql.DB + statements map[string]*sql.Stmt + logger log.Logger + permitPool *physical.PermitPool + conf map[string]string + redirectHost string + redirectPort int64 + haEnabled bool +} + +// NewMySQLBackend constructs a MySQL backend using the given API client and +// server address and credential for accessing mysql database. +func NewMySQLBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { + var err error + + db, err := NewMySQLClient(conf, logger) + if err != nil { + return nil, err + } + + database := conf["database"] + if database == "" { + database = "vault" + } + table := conf["table"] + if table == "" { + table = "vault" + } + + err = validateDBTable(database, table) + if err != nil { + return nil, err + } + + dbTable := fmt.Sprintf("`%s`.`%s`", database, table) + + maxParStr, ok := conf["max_parallel"] + var maxParInt int + if ok { + maxParInt, err = strconv.Atoi(maxParStr) + if err != nil { + return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err) + } + if logger.IsDebug() { + logger.Debug("max_parallel set", "max_parallel", maxParInt) + } + } else { + maxParInt = physical.DefaultParallelOperations + } + + // Check schema exists + var schemaExist bool + schemaRows, err := db.Query("SELECT SCHEMA_NAME FROM information_schema.SCHEMATA WHERE SCHEMA_NAME = ?", database) + if err != nil { + return nil, fmt.Errorf("failed to check mysql schema exist: %w", err) + } + defer schemaRows.Close() + schemaExist = schemaRows.Next() + + // Check table exists + var tableExist bool + tableRows, err := db.Query("SELECT TABLE_NAME FROM information_schema.TABLES WHERE TABLE_NAME = ? AND TABLE_SCHEMA = ?", table, database) + if err != nil { + return nil, fmt.Errorf("failed to check mysql table exist: %w", err) + } + defer tableRows.Close() + tableExist = tableRows.Next() + + // Create the required database if it doesn't exists. + if !schemaExist { + if _, err := db.Exec("CREATE DATABASE IF NOT EXISTS `" + database + "`"); err != nil { + return nil, fmt.Errorf("failed to create mysql database: %w", err) + } + } + + // Create the required table if it doesn't exists. + if !tableExist { + create_query := "CREATE TABLE IF NOT EXISTS " + dbTable + + " (vault_key varbinary(3072), vault_value mediumblob, PRIMARY KEY (vault_key))" + if _, err := db.Exec(create_query); err != nil { + return nil, fmt.Errorf("failed to create mysql table: %w", err) + } + } + + // Default value for ha_enabled + haEnabledStr, ok := conf["ha_enabled"] + if !ok { + haEnabledStr = "false" + } + haEnabled, err := strconv.ParseBool(haEnabledStr) + if err != nil { + return nil, fmt.Errorf("value [%v] of 'ha_enabled' could not be understood", haEnabledStr) + } + + locktable, ok := conf["lock_table"] + if !ok { + locktable = table + "_lock" + } + + dbLockTable := "`" + database + "`.`" + locktable + "`" + + // Only create lock table if ha_enabled is true + if haEnabled { + // Check table exists + var lockTableExist bool + lockTableRows, err := db.Query("SELECT TABLE_NAME FROM information_schema.TABLES WHERE TABLE_NAME = ? AND TABLE_SCHEMA = ?", locktable, database) + if err != nil { + return nil, fmt.Errorf("failed to check mysql table exist: %w", err) + } + defer lockTableRows.Close() + lockTableExist = lockTableRows.Next() + + // Create the required table if it doesn't exists. + if !lockTableExist { + create_query := "CREATE TABLE IF NOT EXISTS " + dbLockTable + + " (node_job varbinary(512), current_leader varbinary(512), PRIMARY KEY (node_job))" + if _, err := db.Exec(create_query); err != nil { + return nil, fmt.Errorf("failed to create mysql table: %w", err) + } + } + } + + // Setup the backend. + m := &MySQLBackend{ + dbTable: dbTable, + dbLockTable: dbLockTable, + client: db, + statements: make(map[string]*sql.Stmt), + logger: logger, + permitPool: physical.NewPermitPool(maxParInt), + conf: conf, + haEnabled: haEnabled, + } + + // Prepare all the statements required + statements := map[string]string{ + "put": "INSERT INTO " + dbTable + + " VALUES( ?, ? ) ON DUPLICATE KEY UPDATE vault_value=VALUES(vault_value)", + "get": "SELECT vault_value FROM " + dbTable + " WHERE vault_key = ?", + "delete": "DELETE FROM " + dbTable + " WHERE vault_key = ?", + "list": "SELECT vault_key FROM " + dbTable + " WHERE vault_key LIKE ?", + } + + // Only prepare ha-related statements if we need them + if haEnabled { + statements["get_lock"] = "SELECT current_leader FROM " + dbLockTable + " WHERE node_job = ?" + statements["used_lock"] = "SELECT IS_USED_LOCK(?)" + } + + for name, query := range statements { + if err := m.prepare(name, query); err != nil { + return nil, err + } + } + + return m, nil +} + +// validateDBTable to prevent SQL injection attacks. This ensures that the database and table names only have valid +// characters in them. MySQL allows for more characters that this will allow, but there isn't an easy way of +// representing the full Unicode Basic Multilingual Plane to check against. +// https://dev.mysql.com/doc/refman/5.7/en/identifiers.html +func validateDBTable(db, table string) (err error) { + merr := &multierror.Error{} + merr = multierror.Append(merr, wrapErr("invalid database: %w", validate(db))) + merr = multierror.Append(merr, wrapErr("invalid table: %w", validate(table))) + return merr.ErrorOrNil() +} + +func validate(name string) (err error) { + if name == "" { + return fmt.Errorf("missing name") + } + // From: https://dev.mysql.com/doc/refman/5.7/en/identifiers.html + // - Permitted characters in quoted identifiers include the full Unicode Basic Multilingual Plane (BMP), except U+0000: + // ASCII: U+0001 .. U+007F + // Extended: U+0080 .. U+FFFF + // - ASCII NUL (U+0000) and supplementary characters (U+10000 and higher) are not permitted in quoted or unquoted identifiers. + // - Identifiers may begin with a digit but unless quoted may not consist solely of digits. + // - Database, table, and column names cannot end with space characters. + // + // We are explicitly excluding all space characters (it's easier to deal with) + // The name will be quoted, so the all-digit requirement doesn't apply + runes := []rune(name) + validationErr := fmt.Errorf("invalid character found: can only include printable, non-space characters between [0x0001-0xFFFF]") + for _, r := range runes { + // U+0000 Explicitly disallowed + if r == 0x0000 { + return fmt.Errorf("invalid character: cannot include 0x0000") + } + // Cannot be above 0xFFFF + if r > 0xFFFF { + return fmt.Errorf("invalid character: cannot include any characters above 0xFFFF") + } + if r == '`' { + return fmt.Errorf("invalid character: cannot include '`' character") + } + if r == '\'' || r == '"' { + return fmt.Errorf("invalid character: cannot include quotes") + } + // We are excluding non-printable characters (not mentioned in the docs) + if !unicode.IsPrint(r) { + return validationErr + } + // We are excluding space characters (not mentioned in the docs) + if unicode.IsSpace(r) { + return validationErr + } + } + return nil +} + +func wrapErr(message string, err error) error { + if err == nil { + return nil + } + return fmt.Errorf(message, err) +} + +func NewMySQLClient(conf map[string]string, logger log.Logger) (*sql.DB, error) { + var err error + + // Get the MySQL credentials to perform read/write operations. + username, ok := conf["username"] + if !ok || username == "" { + return nil, fmt.Errorf("missing username") + } + password, ok := conf["password"] + if !ok || password == "" { + return nil, fmt.Errorf("missing password") + } + + // Get or set MySQL server address. Defaults to localhost and default port(3306) + address, ok := conf["address"] + if !ok { + address = "127.0.0.1:3306" + } + + maxIdleConnStr, ok := conf["max_idle_connections"] + var maxIdleConnInt int + if ok { + maxIdleConnInt, err = strconv.Atoi(maxIdleConnStr) + if err != nil { + return nil, fmt.Errorf("failed parsing max_idle_connections parameter: %w", err) + } + if logger.IsDebug() { + logger.Debug("max_idle_connections set", "max_idle_connections", maxIdleConnInt) + } + } + + maxConnLifeStr, ok := conf["max_connection_lifetime"] + var maxConnLifeInt int + if ok { + maxConnLifeInt, err = strconv.Atoi(maxConnLifeStr) + if err != nil { + return nil, fmt.Errorf("failed parsing max_connection_lifetime parameter: %w", err) + } + if logger.IsDebug() { + logger.Debug("max_connection_lifetime set", "max_connection_lifetime", maxConnLifeInt) + } + } + + maxParStr, ok := conf["max_parallel"] + var maxParInt int + if ok { + maxParInt, err = strconv.Atoi(maxParStr) + if err != nil { + return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err) + } + if logger.IsDebug() { + logger.Debug("max_parallel set", "max_parallel", maxParInt) + } + } else { + maxParInt = physical.DefaultParallelOperations + } + + dsnParams := url.Values{} + tlsCaFile, tlsOk := conf["tls_ca_file"] + if tlsOk { + if err := setupMySQLTLSConfig(tlsCaFile); err != nil { + return nil, fmt.Errorf("failed register TLS config: %w", err) + } + + dsnParams.Add("tls", mysqlTLSKey) + } + ptAllowed, ptOk := conf["plaintext_connection_allowed"] + if !(ptOk && strings.ToLower(ptAllowed) == "true") && !tlsOk { + logger.Warn("No TLS specified, credentials will be sent in plaintext. To mute this warning add 'plaintext_connection_allowed' with a true value to your MySQL configuration in your config file.") + } + + // Create MySQL handle for the database. + dsn := username + ":" + password + "@tcp(" + address + ")/?" + dsnParams.Encode() + db, err := sql.Open("mysql", dsn) + if err != nil { + return nil, fmt.Errorf("failed to connect to mysql: %w", err) + } + db.SetMaxOpenConns(maxParInt) + if maxIdleConnInt != 0 { + db.SetMaxIdleConns(maxIdleConnInt) + } + if maxConnLifeInt != 0 { + db.SetConnMaxLifetime(time.Duration(maxConnLifeInt) * time.Second) + } + + return db, err +} + +// prepare is a helper to prepare a query for future execution +func (m *MySQLBackend) prepare(name, query string) error { + stmt, err := m.client.Prepare(query) + if err != nil { + return fmt.Errorf("failed to prepare %q: %w", name, err) + } + m.statements[name] = stmt + return nil +} + +// Put is used to insert or update an entry. +func (m *MySQLBackend) Put(ctx context.Context, entry *physical.Entry) error { + defer metrics.MeasureSince([]string{"mysql", "put"}, time.Now()) + + m.permitPool.Acquire() + defer m.permitPool.Release() + + _, err := m.statements["put"].Exec(entry.Key, entry.Value) + if err != nil { + return err + } + return nil +} + +// Get is used to fetch an entry. +func (m *MySQLBackend) Get(ctx context.Context, key string) (*physical.Entry, error) { + defer metrics.MeasureSince([]string{"mysql", "get"}, time.Now()) + + m.permitPool.Acquire() + defer m.permitPool.Release() + + var result []byte + err := m.statements["get"].QueryRow(key).Scan(&result) + if err == sql.ErrNoRows { + return nil, nil + } + if err != nil { + return nil, err + } + + ent := &physical.Entry{ + Key: key, + Value: result, + } + return ent, nil +} + +// Delete is used to permanently delete an entry +func (m *MySQLBackend) Delete(ctx context.Context, key string) error { + defer metrics.MeasureSince([]string{"mysql", "delete"}, time.Now()) + + m.permitPool.Acquire() + defer m.permitPool.Release() + + _, err := m.statements["delete"].Exec(key) + if err != nil { + return err + } + return nil +} + +// List is used to list all the keys under a given +// prefix, up to the next prefix. +func (m *MySQLBackend) List(ctx context.Context, prefix string) ([]string, error) { + defer metrics.MeasureSince([]string{"mysql", "list"}, time.Now()) + + m.permitPool.Acquire() + defer m.permitPool.Release() + + // Add the % wildcard to the prefix to do the prefix search + likePrefix := prefix + "%" + rows, err := m.statements["list"].Query(likePrefix) + if err != nil { + return nil, fmt.Errorf("failed to execute statement: %w", err) + } + + var keys []string + for rows.Next() { + var key string + err = rows.Scan(&key) + if err != nil { + return nil, fmt.Errorf("failed to scan rows: %w", err) + } + + key = strings.TrimPrefix(key, prefix) + if i := strings.Index(key, "/"); i == -1 { + // Add objects only from the current 'folder' + keys = append(keys, key) + } else if i != -1 { + // Add truncated 'folder' paths + keys = strutil.AppendIfMissing(keys, string(key[:i+1])) + } + } + + sort.Strings(keys) + return keys, nil +} + +// LockWith is used for mutual exclusion based on the given key. +func (m *MySQLBackend) LockWith(key, value string) (physical.Lock, error) { + l := &MySQLHALock{ + in: m, + key: key, + value: value, + logger: m.logger, + } + return l, nil +} + +func (m *MySQLBackend) HAEnabled() bool { + return m.haEnabled +} + +// MySQLHALock is a MySQL Lock implementation for the HABackend +type MySQLHALock struct { + in *MySQLBackend + key string + value string + logger log.Logger + + held bool + localLock sync.Mutex + leaderCh chan struct{} + stopCh <-chan struct{} + lock *MySQLLock +} + +func (i *MySQLHALock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { + i.localLock.Lock() + defer i.localLock.Unlock() + if i.held { + return nil, fmt.Errorf("lock already held") + } + + // Attempt an async acquisition + didLock := make(chan struct{}) + failLock := make(chan error, 1) + releaseCh := make(chan bool, 1) + go i.attemptLock(i.key, i.value, didLock, failLock, releaseCh) + + // Wait for lock acquisition, failure, or shutdown + select { + case <-didLock: + releaseCh <- false + case err := <-failLock: + return nil, err + case <-stopCh: + releaseCh <- true + return nil, nil + } + + // Create the leader channel + i.held = true + i.leaderCh = make(chan struct{}) + + go i.monitorLock(i.leaderCh) + + i.stopCh = stopCh + + return i.leaderCh, nil +} + +func (i *MySQLHALock) attemptLock(key, value string, didLock chan struct{}, failLock chan error, releaseCh chan bool) { + lock, err := NewMySQLLock(i.in, i.logger, key, value) + if err != nil { + failLock <- err + return + } + + // Set node value + i.lock = lock + + err = lock.Lock() + if err != nil { + failLock <- err + return + } + + // Signal that lock is held + close(didLock) + + // Handle an early abort + release := <-releaseCh + if release { + lock.Unlock() + } +} + +func (i *MySQLHALock) monitorLock(leaderCh chan struct{}) { + for { + // The only way to lose this lock is if someone is + // logging into the DB and altering system tables or you lose a connection in + // which case you will lose the lock anyway. + err := i.hasLock(i.key) + if err != nil { + // Somehow we lost the lock.... likely because the connection holding + // the lock was closed or someone was playing around with the locks in the DB. + close(leaderCh) + return + } + + time.Sleep(5 * time.Second) + } +} + +func (i *MySQLHALock) Unlock() error { + i.localLock.Lock() + defer i.localLock.Unlock() + if !i.held { + return nil + } + + err := i.lock.Unlock() + + if err == nil { + i.held = false + return nil + } + + return err +} + +// hasLock will check if a lock is held by checking the current lock id against our known ID. +func (i *MySQLHALock) hasLock(key string) error { + var result sql.NullInt64 + err := i.in.statements["used_lock"].QueryRow(key).Scan(&result) + if err == sql.ErrNoRows || !result.Valid { + // This is not an error to us since it just means the lock isn't held + return nil + } + + if err != nil { + return err + } + + // IS_USED_LOCK will return the ID of the connection that created the lock. + if result.Int64 != GlobalLockID { + return ErrLockHeld + } + + return nil +} + +func (i *MySQLHALock) GetLeader() (string, error) { + defer metrics.MeasureSince([]string{"mysql", "lock_get"}, time.Now()) + var result string + err := i.in.statements["get_lock"].QueryRow("leader").Scan(&result) + if err == sql.ErrNoRows { + return "", err + } + + return result, nil +} + +func (i *MySQLHALock) Value() (bool, string, error) { + leaderkey, err := i.GetLeader() + if err != nil { + return false, "", err + } + + return true, leaderkey, err +} + +// MySQLLock provides an easy way to grab and release mysql +// locks using the built in GET_LOCK function. Note that these +// locks are released when you lose connection to the server. +type MySQLLock struct { + parentConn *MySQLBackend + in *sql.DB + logger log.Logger + statements map[string]*sql.Stmt + key string + value string +} + +// Errors specific to trying to grab a lock in MySQL +var ( + // This is the GlobalLockID for checking if the lock we got is still the current lock + GlobalLockID int64 + // ErrLockHeld is returned when another vault instance already has a lock held for the given key. + ErrLockHeld = errors.New("mysql: lock already held") + // ErrUnlockFailed + ErrUnlockFailed = errors.New("mysql: unable to release lock, already released or not held by this session") + // You were unable to update that you are the new leader in the DB + ErrClaimFailed = errors.New("mysql: unable to update DB with new leader information") + // Error to throw if between getting the lock and checking the ID of it we lost it. + ErrSettingGlobalID = errors.New("mysql: getting global lock id failed") +) + +// NewMySQLLock helper function +func NewMySQLLock(in *MySQLBackend, l log.Logger, key, value string) (*MySQLLock, error) { + // Create a new MySQL connection so we can close this and have no effect on + // the rest of the MySQL backend and any cleanup that might need to be done. + conn, _ := NewMySQLClient(in.conf, in.logger) + + m := &MySQLLock{ + parentConn: in, + in: conn, + logger: l, + statements: make(map[string]*sql.Stmt), + key: key, + value: value, + } + + statements := map[string]string{ + "put": "INSERT INTO " + in.dbLockTable + + " VALUES( ?, ? ) ON DUPLICATE KEY UPDATE current_leader=VALUES(current_leader)", + } + + for name, query := range statements { + if err := m.prepare(name, query); err != nil { + return nil, err + } + } + + return m, nil +} + +// prepare is a helper to prepare a query for future execution +func (m *MySQLLock) prepare(name, query string) error { + stmt, err := m.in.Prepare(query) + if err != nil { + return fmt.Errorf("failed to prepare %q: %w", name, err) + } + m.statements[name] = stmt + return nil +} + +// update the current cluster leader in the DB. This is used so +// we can tell the servers in standby who the active leader is. +func (i *MySQLLock) becomeLeader() error { + _, err := i.statements["put"].Exec("leader", i.value) + if err != nil { + return err + } + + return nil +} + +// Lock will try to get a lock for an indefinite amount of time +// based on the given key that has been requested. +func (i *MySQLLock) Lock() error { + defer metrics.MeasureSince([]string{"mysql", "get_lock"}, time.Now()) + + // Lock timeout math.MaxInt32 instead of -1 solves compatibility issues with + // different MySQL flavours i.e. MariaDB + rows, err := i.in.Query("SELECT GET_LOCK(?, ?), IS_USED_LOCK(?)", i.key, math.MaxInt32, i.key) + if err != nil { + return err + } + + defer rows.Close() + rows.Next() + var lock sql.NullInt64 + var connectionID sql.NullInt64 + rows.Scan(&lock, &connectionID) + + if rows.Err() != nil { + return rows.Err() + } + + // 1 is returned from GET_LOCK if it was able to get the lock + // 0 if it failed and NULL if some strange error happened. + // https://dev.mysql.com/doc/refman/8.0/en/miscellaneous-functions.html#function_get-lock + if !lock.Valid || lock.Int64 != 1 { + return ErrLockHeld + } + + // Since we have the lock alert the rest of the cluster + // that we are now the active leader. + err = i.becomeLeader() + if err != nil { + return ErrLockHeld + } + + // This will return the connection ID of NULL if an error happens + // https://dev.mysql.com/doc/refman/8.0/en/miscellaneous-functions.html#function_is-used-lock + if !connectionID.Valid { + return ErrSettingGlobalID + } + + GlobalLockID = connectionID.Int64 + + return nil +} + +// Unlock just closes the connection. This is because closing the MySQL connection +// is a 100% reliable way to close the lock. If you just release the lock you must +// do it from the same mysql connection_id that you originally created it from. This +// is a huge hastle and I actually couldn't find a clean way to do this although one +// likely does exist. Closing the connection however ensures we don't ever get into a +// state where we try to release the lock and it hangs it is also much less code. +func (i *MySQLLock) Unlock() error { + err := i.in.Close() + if err != nil { + return ErrUnlockFailed + } + + return nil +} + +// Establish a TLS connection with a given CA certificate +// Register a tsl.Config associated with the same key as the dns param from sql.Open +// foo:bar@tcp(127.0.0.1:3306)/dbname?tls=default +func setupMySQLTLSConfig(tlsCaFile string) error { + rootCertPool := x509.NewCertPool() + + pem, err := ioutil.ReadFile(tlsCaFile) + if err != nil { + return err + } + + if ok := rootCertPool.AppendCertsFromPEM(pem); !ok { + return err + } + + err = mysql.RegisterTLSConfig(mysqlTLSKey, &tls.Config{ + RootCAs: rootCertPool, + }) + if err != nil { + return err + } + + return nil +} diff --git a/physical/mysql/mysql_test.go b/physical/mysql/mysql_test.go new file mode 100644 index 0000000..b13c7e4 --- /dev/null +++ b/physical/mysql/mysql_test.go @@ -0,0 +1,346 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package mysql + +import ( + "bytes" + "os" + "strings" + "testing" + "time" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/physical" + + _ "github.com/go-sql-driver/mysql" + mysql "github.com/go-sql-driver/mysql" + + mysqlhelper "github.com/hashicorp/vault/helper/testhelpers/mysql" +) + +func TestMySQLPlaintextCatch(t *testing.T) { + address := os.Getenv("MYSQL_ADDR") + if address == "" { + t.SkipNow() + } + + database := os.Getenv("MYSQL_DB") + if database == "" { + database = "test" + } + + table := os.Getenv("MYSQL_TABLE") + if table == "" { + table = "test" + } + + username := os.Getenv("MYSQL_USERNAME") + password := os.Getenv("MYSQL_PASSWORD") + + // Run vault tests + var buf bytes.Buffer + log.DefaultOutput = &buf + + logger := logging.NewVaultLogger(log.Debug) + + NewMySQLBackend(map[string]string{ + "address": address, + "database": database, + "table": table, + "username": username, + "password": password, + "plaintext_connection_allowed": "false", + }, logger) + + str := buf.String() + dataIdx := strings.IndexByte(str, ' ') + rest := str[dataIdx+1:] + + if !strings.Contains(rest, "credentials will be sent in plaintext") { + t.Fatalf("No warning of plaintext credentials occurred") + } +} + +func TestMySQLBackend(t *testing.T) { + address := os.Getenv("MYSQL_ADDR") + if address == "" { + t.SkipNow() + } + + database := os.Getenv("MYSQL_DB") + if database == "" { + database = "test" + } + + table := os.Getenv("MYSQL_TABLE") + if table == "" { + table = "test" + } + + username := os.Getenv("MYSQL_USERNAME") + password := os.Getenv("MYSQL_PASSWORD") + + // Run vault tests + logger := logging.NewVaultLogger(log.Debug) + + b, err := NewMySQLBackend(map[string]string{ + "address": address, + "database": database, + "table": table, + "username": username, + "password": password, + "plaintext_connection_allowed": "true", + "max_connection_lifetime": "1", + }, logger) + if err != nil { + t.Fatalf("Failed to create new backend: %v", err) + } + + defer func() { + mysql := b.(*MySQLBackend) + _, err := mysql.client.Exec("DROP TABLE IF EXISTS " + mysql.dbTable + " ," + mysql.dbLockTable) + if err != nil { + t.Fatalf("Failed to drop table: %v", err) + } + }() + + physical.ExerciseBackend(t, b) + physical.ExerciseBackend_ListPrefix(t, b) +} + +func TestMySQLHABackend(t *testing.T) { + address := os.Getenv("MYSQL_ADDR") + if address == "" { + t.SkipNow() + } + + database := os.Getenv("MYSQL_DB") + if database == "" { + database = "test" + } + + table := os.Getenv("MYSQL_TABLE") + if table == "" { + table = "test" + } + + username := os.Getenv("MYSQL_USERNAME") + password := os.Getenv("MYSQL_PASSWORD") + + // Run vault tests + logger := logging.NewVaultLogger(log.Debug) + config := map[string]string{ + "address": address, + "database": database, + "table": table, + "username": username, + "password": password, + "ha_enabled": "true", + "plaintext_connection_allowed": "true", + } + + b, err := NewMySQLBackend(config, logger) + if err != nil { + t.Fatalf("Failed to create new backend: %v", err) + } + + defer func() { + mysql := b.(*MySQLBackend) + _, err := mysql.client.Exec("DROP TABLE IF EXISTS " + mysql.dbTable + " ," + mysql.dbLockTable) + if err != nil { + t.Fatalf("Failed to drop table: %v", err) + } + }() + + b2, err := NewMySQLBackend(config, logger) + if err != nil { + t.Fatalf("Failed to create new backend: %v", err) + } + + physical.ExerciseHABackend(t, b.(physical.HABackend), b2.(physical.HABackend)) +} + +// TestMySQLHABackend_LockFailPanic is a regression test for the panic shown in +// https://github.com/hashicorp/vault/issues/8203 and patched in +// https://github.com/hashicorp/vault/pull/8229 +func TestMySQLHABackend_LockFailPanic(t *testing.T) { + cleanup, connURL := mysqlhelper.PrepareTestContainer(t, false, "secret") + + cfg, err := mysql.ParseDSN(connURL) + if err != nil { + t.Fatal(err) + } + + if err := mysqlhelper.TestCredsExist(t, connURL, cfg.User, cfg.Passwd); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } + + table := "test" + logger := logging.NewVaultLogger(log.Debug) + config := map[string]string{ + "address": cfg.Addr, + "database": cfg.DBName, + "table": table, + "username": cfg.User, + "password": cfg.Passwd, + "ha_enabled": "true", + "plaintext_connection_allowed": "true", + } + + b, err := NewMySQLBackend(config, logger) + if err != nil { + t.Fatalf("Failed to create new backend: %v", err) + } + + b2, err := NewMySQLBackend(config, logger) + if err != nil { + t.Fatalf("Failed to create new backend: %v", err) + } + + b1ha := b.(physical.HABackend) + b2ha := b2.(physical.HABackend) + + // Copied from ExerciseHABackend - ensuring things are normal at this point + // Get the lock + lock, err := b1ha.LockWith("foo", "bar") + if err != nil { + t.Fatalf("initial lock: %v", err) + } + + // Attempt to lock + leaderCh, err := lock.Lock(nil) + if err != nil { + t.Fatalf("lock attempt 1: %v", err) + } + if leaderCh == nil { + t.Fatalf("missing leaderCh") + } + + // Check the value + held, val, err := lock.Value() + if err != nil { + t.Fatalf("err: %v", err) + } + if !held { + t.Errorf("should be held") + } + if val != "bar" { + t.Errorf("expected value bar: %v", err) + } + + // Second acquisition should fail + lock2, err := b2ha.LockWith("foo", "baz") + if err != nil { + t.Fatalf("lock 2: %v", err) + } + + stopCh := make(chan struct{}) + time.AfterFunc(10*time.Second, func() { + close(stopCh) + }) + + // Attempt to lock - can't lock because lock1 is held - this is normal + leaderCh2, err := lock2.Lock(stopCh) + if err != nil { + t.Fatalf("stop lock 2: %v", err) + } + if leaderCh2 != nil { + t.Errorf("should not have gotten leaderCh: %v", leaderCh2) + } + // end normal + + // Clean up the database. When Lock() is called, a new connection is created + // using the configuration. If that connection cannot be created, there was a + // panic due to not returning with the connection error. Here we intentionally + // break the config for b2, so a new connection can't be made, which would + // trigger the panic shown in https://github.com/hashicorp/vault/issues/8203 + cleanup() + + stopCh2 := make(chan struct{}) + time.AfterFunc(10*time.Second, func() { + close(stopCh2) + }) + leaderCh2, err = lock2.Lock(stopCh2) + if err == nil { + t.Fatalf("expected error, got none, leaderCh2=%v", leaderCh2) + } +} + +func TestValidateDBTable(t *testing.T) { + type testCase struct { + database string + table string + expectErr bool + } + + tests := map[string]testCase{ + "empty database & table": {"", "", true}, + "empty database": {"", "a", true}, + "empty table": {"a", "", true}, + "ascii database": {"abcde", "a", false}, + "ascii table": {"a", "abcde", false}, + "ascii database & table": {"abcde", "abcde", false}, + "only whitespace db": {" ", "a", true}, + "only whitespace table": {"a", " ", true}, + "whitespace prefix db": {" bcde", "a", true}, + "whitespace middle db": {"ab de", "a", true}, + "whitespace suffix db": {"abcd ", "a", true}, + "whitespace prefix table": {"a", " bcde", true}, + "whitespace middle table": {"a", "ab de", true}, + "whitespace suffix table": {"a", "abcd ", true}, + "backtick prefix db": {"`bcde", "a", true}, + "backtick middle db": {"ab`de", "a", true}, + "backtick suffix db": {"abcd`", "a", true}, + "backtick prefix table": {"a", "`bcde", true}, + "backtick middle table": {"a", "ab`de", true}, + "backtick suffix table": {"a", "abcd`", true}, + "single quote prefix db": {"'bcde", "a", true}, + "single quote middle db": {"ab'de", "a", true}, + "single quote suffix db": {"abcd'", "a", true}, + "single quote prefix table": {"a", "'bcde", true}, + "single quote middle table": {"a", "ab'de", true}, + "single quote suffix table": {"a", "abcd'", true}, + "double quote prefix db": {`"bcde`, "a", true}, + "double quote middle db": {`ab"de`, "a", true}, + "double quote suffix db": {`abcd"`, "a", true}, + "double quote prefix table": {"a", `"bcde`, true}, + "double quote middle table": {"a", `ab"de`, true}, + "double quote suffix table": {"a", `abcd"`, true}, + "0x0000 prefix db": {str(0x0000, 'b', 'c'), "a", true}, + "0x0000 middle db": {str('a', 0x0000, 'c'), "a", true}, + "0x0000 suffix db": {str('a', 'b', 0x0000), "a", true}, + "0x0000 prefix table": {"a", str(0x0000, 'b', 'c'), true}, + "0x0000 middle table": {"a", str('a', 0x0000, 'c'), true}, + "0x0000 suffix table": {"a", str('a', 'b', 0x0000), true}, + "unicode > 0xFFFF prefix db": {str(0x10000, 'b', 'c'), "a", true}, + "unicode > 0xFFFF middle db": {str('a', 0x10000, 'c'), "a", true}, + "unicode > 0xFFFF suffix db": {str('a', 'b', 0x10000), "a", true}, + "unicode > 0xFFFF prefix table": {"a", str(0x10000, 'b', 'c'), true}, + "unicode > 0xFFFF middle table": {"a", str('a', 0x10000, 'c'), true}, + "unicode > 0xFFFF suffix table": {"a", str('a', 'b', 0x10000), true}, + "non-printable prefix db": {str(0x0001, 'b', 'c'), "a", true}, + "non-printable middle db": {str('a', 0x0001, 'c'), "a", true}, + "non-printable suffix db": {str('a', 'b', 0x0001), "a", true}, + "non-printable prefix table": {"a", str(0x0001, 'b', 'c'), true}, + "non-printable middle table": {"a", str('a', 0x0001, 'c'), true}, + "non-printable suffix table": {"a", str('a', 'b', 0x0001), true}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + err := validateDBTable(test.database, test.table) + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + }) + } +} + +func str(r ...rune) string { + return string(r) +} diff --git a/physical/oci/oci.go b/physical/oci/oci.go new file mode 100644 index 0000000..3665813 --- /dev/null +++ b/physical/oci/oci.go @@ -0,0 +1,384 @@ +// Copyright © 2019, Oracle and/or its affiliates. +package oci + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "net/http" + "sort" + "strconv" + "strings" + "time" + + "github.com/armon/go-metrics" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/sdk/physical" + "github.com/oracle/oci-go-sdk/common" + "github.com/oracle/oci-go-sdk/common/auth" + "github.com/oracle/oci-go-sdk/objectstorage" + "golang.org/x/net/context" +) + +// Verify Backend satisfies the correct interfaces +var _ physical.Backend = (*Backend)(nil) + +const ( + // Limits maximum outstanding requests + MaxNumberOfPermits = 256 +) + +var ( + metricDelete = []string{"oci", "delete"} + metricGet = []string{"oci", "get"} + metricList = []string{"oci", "list"} + metricPut = []string{"oci", "put"} + metricDeleteFull = []string{"oci", "deleteFull"} + metricGetFull = []string{"oci", "getFull"} + metricListFull = []string{"oci", "listFull"} + metricPutFull = []string{"oci", "putFull"} + + metricDeleteHa = []string{"oci", "deleteHa"} + metricGetHa = []string{"oci", "getHa"} + metricPutHa = []string{"oci", "putHa"} + + metricDeleteAcquirePool = []string{"oci", "deleteAcquirePool"} + metricGetAcquirePool = []string{"oci", "getAcquirePool"} + metricListAcquirePool = []string{"oci", "listAcquirePool"} + metricPutAcquirePool = []string{"oci", "putAcquirePool"} + + metricDeleteFailed = []string{"oci", "deleteFailed"} + metricGetFailed = []string{"oci", "getFailed"} + metricListFailed = []string{"oci", "listFailed"} + metricPutFailed = []string{"oci", "putFailed"} + metricHaWatchLockRetriable = []string{"oci", "haWatchLockRetriable"} + metricPermitsUsed = []string{"oci", "permitsUsed"} + + metric5xx = []string{"oci", "5xx"} +) + +type Backend struct { + client *objectstorage.ObjectStorageClient + bucketName string + logger log.Logger + permitPool *physical.PermitPool + namespaceName string + haEnabled bool + lockBucketName string +} + +func NewBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { + bucketName := conf["bucket_name"] + if bucketName == "" { + return nil, errors.New("missing bucket name") + } + + namespaceName := conf["namespace_name"] + if bucketName == "" { + return nil, errors.New("missing namespace name") + } + + lockBucketName := "" + haEnabled := false + var err error + haEnabledStr := conf["ha_enabled"] + if haEnabledStr != "" { + haEnabled, err = strconv.ParseBool(haEnabledStr) + if err != nil { + return nil, fmt.Errorf("failed to parse HA enabled: %w", err) + } + + if haEnabled { + lockBucketName = conf["lock_bucket_name"] + if lockBucketName == "" { + return nil, errors.New("missing lock bucket name") + } + } + } + + authTypeAPIKeyBool := false + authTypeAPIKeyStr := conf["auth_type_api_key"] + if authTypeAPIKeyStr != "" { + authTypeAPIKeyBool, err = strconv.ParseBool(authTypeAPIKeyStr) + if err != nil { + return nil, fmt.Errorf("failed parsing auth_type_api_key parameter: %w", err) + } + } + + var cp common.ConfigurationProvider + if authTypeAPIKeyBool { + cp = common.DefaultConfigProvider() + } else { + cp, err = auth.InstancePrincipalConfigurationProvider() + if err != nil { + return nil, fmt.Errorf("failed creating InstancePrincipalConfigurationProvider: %w", err) + } + } + + objectStorageClient, err := objectstorage.NewObjectStorageClientWithConfigurationProvider(cp) + if err != nil { + return nil, fmt.Errorf("failed creating NewObjectStorageClientWithConfigurationProvider: %w", err) + } + + region := conf["region"] + if region != "" { + objectStorageClient.SetRegion(region) + } + + logger.Debug("configuration", + "bucket_name", bucketName, + "region", region, + "namespace_name", namespaceName, + "ha_enabled", haEnabled, + "lock_bucket_name", lockBucketName, + "auth_type_api_key", authTypeAPIKeyBool, + ) + + return &Backend{ + client: &objectStorageClient, + bucketName: bucketName, + logger: logger, + permitPool: physical.NewPermitPool(MaxNumberOfPermits), + namespaceName: namespaceName, + haEnabled: haEnabled, + lockBucketName: lockBucketName, + }, nil +} + +func (o *Backend) Put(ctx context.Context, entry *physical.Entry) error { + o.logger.Debug("PUT started") + defer metrics.MeasureSince(metricPutFull, time.Now()) + startAcquirePool := time.Now() + metrics.SetGauge(metricPermitsUsed, float32(o.permitPool.CurrentPermits())) + o.permitPool.Acquire() + defer o.permitPool.Release() + metrics.MeasureSince(metricPutAcquirePool, startAcquirePool) + + defer metrics.MeasureSince(metricPut, time.Now()) + size := int64(len(entry.Value)) + opcClientRequestId, err := uuid.GenerateUUID() + if err != nil { + metrics.IncrCounter(metricPutFailed, 1) + o.logger.Error("failed to generate UUID") + return fmt.Errorf("failed to generate UUID: %w", err) + } + + o.logger.Debug("PUT", "opc-client-request-id", opcClientRequestId) + request := objectstorage.PutObjectRequest{ + NamespaceName: &o.namespaceName, + BucketName: &o.bucketName, + ObjectName: &entry.Key, + ContentLength: &size, + PutObjectBody: ioutil.NopCloser(bytes.NewReader(entry.Value)), + OpcMeta: nil, + OpcClientRequestId: &opcClientRequestId, + } + + resp, err := o.client.PutObject(ctx, request) + if resp.RawResponse != nil && resp.RawResponse.Body != nil { + defer resp.RawResponse.Body.Close() + } + + if err != nil { + metrics.IncrCounter(metricPutFailed, 1) + return fmt.Errorf("failed to put data: %w", err) + } + + o.logRequest("PUT", resp.RawResponse, resp.OpcClientRequestId, resp.OpcRequestId, err) + o.logger.Debug("PUT completed") + + return nil +} + +func (o *Backend) Get(ctx context.Context, key string) (*physical.Entry, error) { + o.logger.Debug("GET started") + defer metrics.MeasureSince(metricGetFull, time.Now()) + metrics.SetGauge(metricPermitsUsed, float32(o.permitPool.CurrentPermits())) + startAcquirePool := time.Now() + o.permitPool.Acquire() + defer o.permitPool.Release() + metrics.MeasureSince(metricGetAcquirePool, startAcquirePool) + + defer metrics.MeasureSince(metricGet, time.Now()) + opcClientRequestId, err := uuid.GenerateUUID() + if err != nil { + o.logger.Error("failed to generate UUID") + return nil, fmt.Errorf("failed to generate UUID: %w", err) + } + o.logger.Debug("GET", "opc-client-request-id", opcClientRequestId) + request := objectstorage.GetObjectRequest{ + NamespaceName: &o.namespaceName, + BucketName: &o.bucketName, + ObjectName: &key, + OpcClientRequestId: &opcClientRequestId, + } + + resp, err := o.client.GetObject(ctx, request) + if resp.RawResponse != nil && resp.RawResponse.Body != nil { + defer resp.RawResponse.Body.Close() + } + o.logRequest("GET", resp.RawResponse, resp.OpcClientRequestId, resp.OpcRequestId, err) + + if err != nil { + if resp.RawResponse != nil && resp.RawResponse.StatusCode == http.StatusNotFound { + return nil, nil + } + metrics.IncrCounter(metricGetFailed, 1) + return nil, fmt.Errorf("failed to read Value: %w", err) + } + + body, err := ioutil.ReadAll(resp.Content) + if err != nil { + metrics.IncrCounter(metricGetFailed, 1) + return nil, fmt.Errorf("failed to decode Value into bytes: %w", err) + } + + o.logger.Debug("GET completed") + + return &physical.Entry{ + Key: key, + Value: body, + }, nil +} + +func (o *Backend) Delete(ctx context.Context, key string) error { + o.logger.Debug("DELETE started") + defer metrics.MeasureSince(metricDeleteFull, time.Now()) + metrics.SetGauge(metricPermitsUsed, float32(o.permitPool.CurrentPermits())) + startAcquirePool := time.Now() + o.permitPool.Acquire() + defer o.permitPool.Release() + metrics.MeasureSince(metricDeleteAcquirePool, startAcquirePool) + + defer metrics.MeasureSince(metricDelete, time.Now()) + opcClientRequestId, err := uuid.GenerateUUID() + if err != nil { + o.logger.Error("Delete: error generating UUID") + return fmt.Errorf("failed to generate UUID: %w", err) + } + o.logger.Debug("Delete", "opc-client-request-id", opcClientRequestId) + request := objectstorage.DeleteObjectRequest{ + NamespaceName: &o.namespaceName, + BucketName: &o.bucketName, + ObjectName: &key, + OpcClientRequestId: &opcClientRequestId, + } + + resp, err := o.client.DeleteObject(ctx, request) + if resp.RawResponse != nil && resp.RawResponse.Body != nil { + defer resp.RawResponse.Body.Close() + } + + o.logRequest("DELETE", resp.RawResponse, resp.OpcClientRequestId, resp.OpcRequestId, err) + + if err != nil { + if resp.RawResponse != nil && resp.RawResponse.StatusCode == http.StatusNotFound { + return nil + } + metrics.IncrCounter(metricDeleteFailed, 1) + return fmt.Errorf("failed to delete Key: %w", err) + } + o.logger.Debug("DELETE completed") + + return nil +} + +func (o *Backend) List(ctx context.Context, prefix string) ([]string, error) { + o.logger.Debug("LIST started") + defer metrics.MeasureSince(metricListFull, time.Now()) + metrics.SetGauge(metricPermitsUsed, float32(o.permitPool.CurrentPermits())) + startAcquirePool := time.Now() + o.permitPool.Acquire() + defer o.permitPool.Release() + + metrics.MeasureSince(metricListAcquirePool, startAcquirePool) + defer metrics.MeasureSince(metricList, time.Now()) + var keys []string + delimiter := "/" + var start *string + + for { + opcClientRequestId, err := uuid.GenerateUUID() + if err != nil { + o.logger.Error("List: error generating UUID") + return nil, fmt.Errorf("failed to generate UUID %w", err) + } + o.logger.Debug("LIST", "opc-client-request-id", opcClientRequestId) + request := objectstorage.ListObjectsRequest{ + NamespaceName: &o.namespaceName, + BucketName: &o.bucketName, + Prefix: &prefix, + Delimiter: &delimiter, + Start: start, + OpcClientRequestId: &opcClientRequestId, + } + + resp, err := o.client.ListObjects(ctx, request) + o.logRequest("LIST", resp.RawResponse, resp.OpcClientRequestId, resp.OpcRequestId, err) + + if err != nil { + metrics.IncrCounter(metricListFailed, 1) + return nil, fmt.Errorf("failed to list using prefix: %w", err) + } + + for _, commonPrefix := range resp.Prefixes { + commonPrefix := strings.TrimPrefix(commonPrefix, prefix) + keys = append(keys, commonPrefix) + } + + for _, object := range resp.Objects { + key := strings.TrimPrefix(*object.Name, prefix) + keys = append(keys, key) + } + + // Duplicate keys are not expected + keys = strutil.RemoveDuplicates(keys, false) + + if resp.NextStartWith == nil { + resp.RawResponse.Body.Close() + break + } + + start = resp.NextStartWith + resp.RawResponse.Body.Close() + } + + sort.Strings(keys) + o.logger.Debug("LIST completed") + return keys, nil +} + +func (o *Backend) logRequest(operation string, response *http.Response, clientOpcRequestIdPtr *string, opcRequestIdPtr *string, err error) { + statusCode := 0 + clientOpcRequestId := " " + opcRequestId := " " + + if response != nil { + statusCode = response.StatusCode + if statusCode/100 == 5 { + metrics.IncrCounter(metric5xx, 1) + } + } + + if clientOpcRequestIdPtr != nil { + clientOpcRequestId = *clientOpcRequestIdPtr + } + + if opcRequestIdPtr != nil { + opcRequestId = *opcRequestIdPtr + } + + statusCodeStr := "No response" + if statusCode != 0 { + statusCodeStr = strconv.Itoa(statusCode) + } + + logLine := fmt.Sprintf("%s client:opc-request-id %s opc-request-id: %s status-code: %s", + operation, clientOpcRequestId, opcRequestId, statusCodeStr) + if err != nil && statusCode/100 == 5 { + o.logger.Error(logLine, "error", err) + } +} diff --git a/physical/oci/oci_ha.go b/physical/oci/oci_ha.go new file mode 100644 index 0000000..a4c6ad5 --- /dev/null +++ b/physical/oci/oci_ha.go @@ -0,0 +1,551 @@ +// Copyright © 2019, Oracle and/or its affiliates. +package oci + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "sync" + "sync/atomic" + "time" + + "github.com/armon/go-metrics" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/sdk/physical" + "github.com/oracle/oci-go-sdk/objectstorage" +) + +// The lock implementation below prioritizes ensuring that there are not 2 primary at any given point in time +// over high availability of the primary instance + +// Verify Backend satisfies the correct interfaces +var ( + _ physical.HABackend = (*Backend)(nil) + _ physical.Lock = (*Lock)(nil) +) + +const ( + // LockRenewInterval is the time to wait between lock renewals. + LockRenewInterval = 3 * time.Second + + // LockRetryInterval is the amount of time to wait if the lock fails before trying again. + LockRetryInterval = 5 * time.Second + + // LockWatchRetryInterval is the amount of time to wait if a watch fails before trying again. + LockWatchRetryInterval = 2 * time.Second + + // LockTTL is the default lock TTL. + LockTTL = 15 * time.Second + + // LockWatchRetryMax is the number of times to retry a failed watch before signaling that leadership is lost. + LockWatchRetryMax = 4 + + // LockCacheMinAcceptableAge is minimum cache age in seconds to determine that its safe for a secondary instance + // to acquire lock. + LockCacheMinAcceptableAge = 45 * time.Second + + // LockWriteRetriesOnFailures is the number of retries that are made on write 5xx failures. + LockWriteRetriesOnFailures = 4 + + ObjectStorageCallsReadTimeout = 3 * time.Second + + ObjectStorageCallsWriteTimeout = 3 * time.Second +) + +type LockCache struct { + // ETag values are unique identifiers generated by the OCI service and changed every time the object is modified. + etag string + lastUpdate time.Time + lockRecord *LockRecord +} + +type Lock struct { + // backend is the underlying physical backend. + backend *Backend + + // Key is the name of the Key. Value is the Value of the Key. + key, value string + + // held is a boolean indicating if the lock is currently held. + held bool + + // Identity is the internal Identity of this Key (unique to this server instance). + identity string + + internalLock sync.Mutex + + // stopCh is the channel that stops all operations. It may be closed in the + // event of a leader loss or graceful shutdown. stopped is a boolean + // indicating if we are stopped - it exists to prevent double closing the + // channel. stopLock is a mutex around the locks. + stopCh chan struct{} + stopped bool + stopLock sync.Mutex + + lockRecordCache atomic.Value + + // Allow modifying the Lock durations for ease of unit testing. + renewInterval time.Duration + retryInterval time.Duration + ttl time.Duration + watchRetryInterval time.Duration + watchRetryMax int +} + +type LockRecord struct { + Key string + Value string + Identity string +} + +var ( + metricLockUnlock = []string{"oci", "lock", "unlock"} + metricLockLock = []string{"oci", "lock", "lock"} + metricLockValue = []string{"oci", "lock", "Value"} + metricLeaderValue = []string{"oci", "leader", "Value"} +) + +func (b *Backend) HAEnabled() bool { + return b.haEnabled +} + +// LockWith acquires a mutual exclusion based on the given Key. +func (b *Backend) LockWith(key, value string) (physical.Lock, error) { + identity, err := uuid.GenerateUUID() + if err != nil { + return nil, fmt.Errorf("Lock with: %w", err) + } + return &Lock{ + backend: b, + key: key, + value: value, + identity: identity, + stopped: true, + + renewInterval: LockRenewInterval, + retryInterval: LockRetryInterval, + ttl: LockTTL, + watchRetryInterval: LockWatchRetryInterval, + watchRetryMax: LockWatchRetryMax, + }, nil +} + +func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { + l.backend.logger.Debug("Lock() called") + defer metrics.MeasureSince(metricLockLock, time.Now().UTC()) + l.internalLock.Lock() + defer l.internalLock.Unlock() + if l.held { + return nil, errors.New("lock already held") + } + + // Attempt to lock - this function blocks until a lock is acquired or an error + // occurs. + acquired, err := l.attemptLock(stopCh) + if err != nil { + return nil, fmt.Errorf("lock: %w", err) + } + if !acquired { + return nil, nil + } + + // We have the lock now + l.held = true + + // Build the locks + l.stopLock.Lock() + l.stopCh = make(chan struct{}) + l.stopped = false + l.stopLock.Unlock() + + // Periodically renew and watch the lock + go l.renewLock() + go l.watchLock() + + return l.stopCh, nil +} + +// attemptLock attempts to acquire a lock. If the given channel is closed, the +// acquisition attempt stops. This function returns when a lock is acquired or +// an error occurs. +func (l *Lock) attemptLock(stopCh <-chan struct{}) (bool, error) { + l.backend.logger.Debug("AttemptLock() called") + ticker := time.NewTicker(l.retryInterval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + acquired, err := l.writeLock() + if err != nil { + return false, fmt.Errorf("attempt lock: %w", err) + } + if !acquired { + continue + } + + return true, nil + case <-stopCh: + return false, nil + } + } +} + +// renewLock renews the given lock until the channel is closed. +func (l *Lock) renewLock() { + l.backend.logger.Debug("RenewLock() called") + ticker := time.NewTicker(l.renewInterval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + l.writeLock() + case <-l.stopCh: + return + } + } +} + +func loadLockRecordCache(l *Lock) *LockCache { + lockRecordCache := l.lockRecordCache.Load() + if lockRecordCache == nil { + return nil + } + return lockRecordCache.(*LockCache) +} + +// watchLock checks whether the lock has changed in the table and closes the +// leader channel accordingly. If an error occurs during the check, watchLock +// will retry the operation and then close the leader channel if it can't +// succeed after retries. +func (l *Lock) watchLock() { + l.backend.logger.Debug("WatchLock() called") + retries := 0 + ticker := time.NewTicker(l.watchRetryInterval) + defer ticker.Stop() + +OUTER: + for { + // Check if the channel is already closed + select { + case <-l.stopCh: + l.backend.logger.Debug("WatchLock():Stop lock signaled/closed.") + break OUTER + default: + } + + // Check if we've exceeded retries + if retries >= l.watchRetryMax-1 { + l.backend.logger.Debug("WatchLock: Failed to get lock data from object storage. Giving up the lease after max retries") + break OUTER + } + + // Wait for the timer + select { + case <-ticker.C: + case <-l.stopCh: + break OUTER + } + + lockRecordCache := loadLockRecordCache(l) + if (lockRecordCache == nil) || + (lockRecordCache.lockRecord == nil) || + (lockRecordCache.lockRecord.Identity != l.identity) || + (time.Now().Sub(lockRecordCache.lastUpdate) > l.ttl) { + l.backend.logger.Debug("WatchLock: Lock record cache is nil, stale or does not belong to self.") + break OUTER + } + + lockRecord, _, err := l.get(context.Background()) + if err != nil { + retries++ + l.backend.logger.Debug("WatchLock: Failed to get lock data from object storage. Retrying..") + metrics.SetGauge(metricHaWatchLockRetriable, 1) + continue + } + + if (lockRecord == nil) || (lockRecord.Identity != l.identity) { + l.backend.logger.Debug("WatchLock: Lock record cache is nil or does not belong to self.") + break OUTER + } + + // reset retries counter on success + retries = 0 + l.backend.logger.Debug("WatchLock() successful") + metrics.SetGauge(metricHaWatchLockRetriable, 0) + } + + l.stopLock.Lock() + defer l.stopLock.Unlock() + if !l.stopped { + l.stopped = true + l.backend.logger.Debug("Closing the stop channel to give up leadership.") + close(l.stopCh) + } +} + +func (l *Lock) Unlock() error { + l.backend.logger.Debug("Unlock() called") + defer metrics.MeasureSince(metricLockUnlock, time.Now().UTC()) + + l.internalLock.Lock() + defer l.internalLock.Unlock() + if !l.held { + return nil + } + + // Stop any existing locking or renewal attempts + l.stopLock.Lock() + if !l.stopped { + l.stopped = true + close(l.stopCh) + } + l.stopLock.Unlock() + + // We are no longer holding the lock + l.held = false + + // Get current lock record + currentLockRecord, etag, err := l.get(context.Background()) + if err != nil { + return fmt.Errorf("error reading lock record: %w", err) + } + + if currentLockRecord != nil && currentLockRecord.Identity == l.identity { + + defer metrics.MeasureSince(metricDeleteHa, time.Now()) + opcClientRequestId, err := uuid.GenerateUUID() + if err != nil { + l.backend.logger.Debug("Unlock: error generating UUID") + return fmt.Errorf("failed to generate UUID: %w", err) + } + l.backend.logger.Debug("Unlock", "opc-client-request-id", opcClientRequestId) + request := objectstorage.DeleteObjectRequest{ + NamespaceName: &l.backend.namespaceName, + BucketName: &l.backend.lockBucketName, + ObjectName: &l.key, + IfMatch: &etag, + OpcClientRequestId: &opcClientRequestId, + } + + response, err := l.backend.client.DeleteObject(context.Background(), request) + l.backend.logRequest("deleteHA", response.RawResponse, response.OpcClientRequestId, response.OpcRequestId, err) + + if err != nil { + metrics.IncrCounter(metricDeleteFailed, 1) + return fmt.Errorf("write lock: %w", err) + } + } + + return nil +} + +func (l *Lock) Value() (bool, string, error) { + l.backend.logger.Debug("Value() called") + defer metrics.MeasureSince(metricLockValue, time.Now().UTC()) + + lockRecord, _, err := l.get(context.Background()) + if err != nil { + return false, "", err + } + if lockRecord == nil { + return false, "", err + } + return true, lockRecord.Value, nil +} + +// get retrieves the Value for the lock. +func (l *Lock) get(ctx context.Context) (*LockRecord, string, error) { + l.backend.logger.Debug("Called getLockRecord()") + + // Read lock Key + + defer metrics.MeasureSince(metricGetHa, time.Now()) + opcClientRequestId, err := uuid.GenerateUUID() + if err != nil { + l.backend.logger.Error("getHa: error generating UUID") + return nil, "", fmt.Errorf("failed to generate UUID: %w", err) + } + l.backend.logger.Debug("getHa", "opc-client-request-id", opcClientRequestId) + + request := objectstorage.GetObjectRequest{ + NamespaceName: &l.backend.namespaceName, + BucketName: &l.backend.lockBucketName, + ObjectName: &l.key, + OpcClientRequestId: &opcClientRequestId, + } + + ctx, cancel := context.WithTimeout(ctx, ObjectStorageCallsReadTimeout) + defer cancel() + + response, err := l.backend.client.GetObject(ctx, request) + l.backend.logRequest("getHA", response.RawResponse, response.OpcClientRequestId, response.OpcRequestId, err) + + if err != nil { + if response.RawResponse != nil && response.RawResponse.StatusCode == http.StatusNotFound { + return nil, "", nil + } + + metrics.IncrCounter(metricGetFailed, 1) + l.backend.logger.Error("Error calling GET", "err", err) + return nil, "", fmt.Errorf("failed to read Value for %q: %w", l.key, err) + } + + defer response.RawResponse.Body.Close() + + body, err := ioutil.ReadAll(response.Content) + if err != nil { + metrics.IncrCounter(metricGetFailed, 1) + l.backend.logger.Error("Error reading content", "err", err) + return nil, "", fmt.Errorf("failed to decode Value into bytes: %w", err) + } + + var lockRecord LockRecord + err = json.Unmarshal(body, &lockRecord) + if err != nil { + metrics.IncrCounter(metricGetFailed, 1) + l.backend.logger.Error("Error un-marshalling content", "err", err) + return nil, "", fmt.Errorf("failed to read Value for %q: %w", l.key, err) + } + + return &lockRecord, *response.ETag, nil +} + +func (l *Lock) writeLock() (bool, error) { + l.backend.logger.Debug("WriteLock() called") + + // Create a transaction to read and the update (maybe) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // The transaction will be retried, and it could sit in a queue behind, say, + // the delete operation. To stop the transaction, we close the context when + // the associated stopCh is received. + go func() { + select { + case <-l.stopCh: + cancel() + case <-ctx.Done(): + } + }() + + lockRecordCache := loadLockRecordCache(l) + if (lockRecordCache == nil) || lockRecordCache.lockRecord == nil || + lockRecordCache.lockRecord.Identity != l.identity || + time.Now().Sub(lockRecordCache.lastUpdate) > l.ttl { + // case secondary + currentLockRecord, currentEtag, err := l.get(ctx) + if err != nil { + return false, fmt.Errorf("error reading lock record: %w", err) + } + + if (lockRecordCache == nil) || lockRecordCache.etag != currentEtag { + // update cached lock record + l.lockRecordCache.Store(&LockCache{ + etag: currentEtag, + lastUpdate: time.Now().UTC(), + lockRecord: currentLockRecord, + }) + + lockRecordCache = loadLockRecordCache(l) + } + + // Current lock record being null implies that there is no leader. In this case we want to try acquiring lock. + if currentLockRecord != nil && time.Now().Sub(lockRecordCache.lastUpdate) < LockCacheMinAcceptableAge { + return false, nil + } + // cache is old enough and current, try acquiring lock as secondary + } + + newLockRecord := &LockRecord{ + Key: l.key, + Value: l.value, + Identity: l.identity, + } + + newLockRecordJson, err := json.Marshal(newLockRecord) + if err != nil { + return false, fmt.Errorf("error reading lock record: %w", err) + } + + defer metrics.MeasureSince(metricPutHa, time.Now()) + + opcClientRequestId, err := uuid.GenerateUUID() + if err != nil { + l.backend.logger.Error("putHa: error generating UUID") + return false, fmt.Errorf("failed to generate UUID: %w", err) + } + l.backend.logger.Debug("putHa", "opc-client-request-id", opcClientRequestId) + size := int64(len(newLockRecordJson)) + putRequest := objectstorage.PutObjectRequest{ + NamespaceName: &l.backend.namespaceName, + BucketName: &l.backend.lockBucketName, + ObjectName: &l.key, + ContentLength: &size, + PutObjectBody: ioutil.NopCloser(bytes.NewReader(newLockRecordJson)), + OpcMeta: nil, + OpcClientRequestId: &opcClientRequestId, + } + + if lockRecordCache.etag == "" { + noneMatch := "*" + putRequest.IfNoneMatch = &noneMatch + } else { + putRequest.IfMatch = &lockRecordCache.etag + } + + newtEtag := "" + for i := 1; i <= LockWriteRetriesOnFailures; i++ { + writeCtx, writeCancel := context.WithTimeout(ctx, ObjectStorageCallsWriteTimeout) + defer writeCancel() + + putObjectResponse, putObjectError := l.backend.client.PutObject(writeCtx, putRequest) + l.backend.logRequest("putHA", putObjectResponse.RawResponse, putObjectResponse.OpcClientRequestId, putObjectResponse.OpcRequestId, putObjectError) + + if putObjectError == nil { + newtEtag = *putObjectResponse.ETag + putObjectResponse.RawResponse.Body.Close() + break + } + + err = putObjectError + + if putObjectResponse.RawResponse == nil { + metrics.IncrCounter(metricPutFailed, 1) + l.backend.logger.Error("PUT", "err", err) + break + } + + putObjectResponse.RawResponse.Body.Close() + + // Retry if the return code is 5xx + if (putObjectResponse.RawResponse.StatusCode / 100) == 5 { + metrics.IncrCounter(metricPutFailed, 1) + l.backend.logger.Warn("PUT. Retrying..", "err", err) + time.Sleep(time.Duration(100*i) * time.Millisecond) + } else { + l.backend.logger.Error("PUT", "err", err) + break + } + } + + if err != nil { + return false, fmt.Errorf("write lock: %w", err) + } + + l.backend.logger.Debug("Lock written", string(newLockRecordJson)) + + l.lockRecordCache.Store(&LockCache{ + etag: newtEtag, + lastUpdate: time.Now().UTC(), + lockRecord: newLockRecord, + }) + + metrics.SetGauge(metricLeaderValue, 1) + return true, nil +} diff --git a/physical/oci/oci_ha_test.go b/physical/oci/oci_ha_test.go new file mode 100644 index 0000000..b7213b9 --- /dev/null +++ b/physical/oci/oci_ha_test.go @@ -0,0 +1,39 @@ +// Copyright © 2019, Oracle and/or its affiliates. +package oci + +import ( + "os" + "testing" + + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/sdk/physical" + "github.com/oracle/oci-go-sdk/common" + "github.com/oracle/oci-go-sdk/objectstorage" +) + +func TestOCIHABackend(t *testing.T) { + // Skip tests if we are not running acceptance tests + if os.Getenv("VAULT_ACC") == "" { + t.SkipNow() + } + + if !hasOCICredentials() { + t.Skip("Skipping because OCI credentials could not be resolved. See https://pkg.go.dev/github.com/oracle/oci-go-sdk/common#DefaultConfigProvider for information on how to set up OCI credentials.") + } + + bucketName, _ := uuid.GenerateUUID() + configProvider := common.DefaultConfigProvider() + objectStorageClient, _ := objectstorage.NewObjectStorageClientWithConfigurationProvider(configProvider) + namespaceName := getNamespaceName(objectStorageClient, t) + + createBucket(bucketName, getTenancyOcid(configProvider, t), namespaceName, objectStorageClient, t) + defer deleteBucket(namespaceName, bucketName, objectStorageClient, t) + + backend := createBackend(bucketName, namespaceName, "true", bucketName, t) + ha, ok := backend.(physical.HABackend) + if !ok { + t.Fatalf("does not implement") + } + + physical.ExerciseHABackend(t, ha, ha) +} diff --git a/physical/oci/oci_test.go b/physical/oci/oci_test.go new file mode 100644 index 0000000..e20b808 --- /dev/null +++ b/physical/oci/oci_test.go @@ -0,0 +1,105 @@ +// Copyright © 2019, Oracle and/or its affiliates. +package oci + +import ( + "os" + "testing" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/physical" + "github.com/oracle/oci-go-sdk/common" + "github.com/oracle/oci-go-sdk/objectstorage" + "golang.org/x/net/context" +) + +func TestOCIBackend(t *testing.T) { + // Skip tests if we are not running acceptance tests + if os.Getenv("VAULT_ACC") == "" { + t.SkipNow() + } + + if !hasOCICredentials() { + t.Skip("Skipping because OCI credentials could not be resolved. See https://pkg.go.dev/github.com/oracle/oci-go-sdk/common#DefaultConfigProvider for information on how to set up OCI credentials.") + } + + bucketName, _ := uuid.GenerateUUID() + configProvider := common.DefaultConfigProvider() + objectStorageClient, _ := objectstorage.NewObjectStorageClientWithConfigurationProvider(configProvider) + namespaceName := getNamespaceName(objectStorageClient, t) + + createBucket(bucketName, getTenancyOcid(configProvider, t), namespaceName, objectStorageClient, t) + defer deleteBucket(namespaceName, bucketName, objectStorageClient, t) + + backend := createBackend(bucketName, namespaceName, "false", "", t) + physical.ExerciseBackend(t, backend) + physical.ExerciseBackend_ListPrefix(t, backend) +} + +func createBucket(bucketName string, tenancyOcid string, namespaceName string, objectStorageClient objectstorage.ObjectStorageClient, t *testing.T) { + createBucketRequest := objectstorage.CreateBucketRequest{ + NamespaceName: &namespaceName, + } + createBucketRequest.CompartmentId = &tenancyOcid + createBucketRequest.Name = &bucketName + createBucketRequest.Metadata = make(map[string]string) + createBucketRequest.PublicAccessType = objectstorage.CreateBucketDetailsPublicAccessTypeNopublicaccess + _, err := objectStorageClient.CreateBucket(context.Background(), createBucketRequest) + if err != nil { + t.Fatalf("Failed to create bucket: %v", err) + } +} + +func deleteBucket(nameSpaceName string, bucketName string, objectStorageClient objectstorage.ObjectStorageClient, t *testing.T) { + request := objectstorage.DeleteBucketRequest{ + NamespaceName: &nameSpaceName, + BucketName: &bucketName, + } + _, err := objectStorageClient.DeleteBucket(context.Background(), request) + if err != nil { + t.Fatalf("Failed to delete bucket: %v", err) + } +} + +func getTenancyOcid(configProvider common.ConfigurationProvider, t *testing.T) (tenancyOcid string) { + tenancyOcid, err := configProvider.TenancyOCID() + if err != nil { + t.Fatalf("Failed to get tenancy ocid: %v", err) + } + return tenancyOcid +} + +func createBackend(bucketName string, namespaceName string, haEnabledStr string, lockBucketName string, t *testing.T) physical.Backend { + backend, err := NewBackend(map[string]string{ + "auth_type_api_key": "true", + "bucket_name": bucketName, + "namespace_name": namespaceName, + "ha_enabled": haEnabledStr, + "lock_bucket_name": lockBucketName, + }, logging.NewVaultLogger(log.Trace)) + if err != nil { + t.Fatalf("Failed to create new backend: %v", err) + } + return backend +} + +func getNamespaceName(objectStorageClient objectstorage.ObjectStorageClient, t *testing.T) string { + response, err := objectStorageClient.GetNamespace(context.Background(), objectstorage.GetNamespaceRequest{}) + if err != nil { + t.Fatalf("Failed to get namespaceName: %v", err) + } + nameSpaceName := *response.Value + return nameSpaceName +} + +func hasOCICredentials() bool { + configProvider := common.DefaultConfigProvider() + + _, err := configProvider.KeyID() + if err != nil { + return false + } + + return true +} diff --git a/physical/postgresql/postgresql.go b/physical/postgresql/postgresql.go new file mode 100644 index 0000000..a701330 --- /dev/null +++ b/physical/postgresql/postgresql.go @@ -0,0 +1,474 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package postgresql + +import ( + "context" + "database/sql" + "fmt" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/armon/go-metrics" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/sdk/database/helper/dbutil" + "github.com/hashicorp/vault/sdk/physical" + _ "github.com/jackc/pgx/v4/stdlib" +) + +const ( + + // The lock TTL matches the default that Consul API uses, 15 seconds. + // Used as part of SQL commands to set/extend lock expiry time relative to + // database clock. + PostgreSQLLockTTLSeconds = 15 + + // The amount of time to wait between the lock renewals + PostgreSQLLockRenewInterval = 5 * time.Second + + // PostgreSQLLockRetryInterval is the amount of time to wait + // if a lock fails before trying again. + PostgreSQLLockRetryInterval = time.Second +) + +// Verify PostgreSQLBackend satisfies the correct interfaces +var _ physical.Backend = (*PostgreSQLBackend)(nil) + +// HA backend was implemented based on the DynamoDB backend pattern +// With distinction using central postgres clock, hereby avoiding +// possible issues with multiple clocks +var ( + _ physical.HABackend = (*PostgreSQLBackend)(nil) + _ physical.Lock = (*PostgreSQLLock)(nil) +) + +// PostgreSQL Backend is a physical backend that stores data +// within a PostgreSQL database. +type PostgreSQLBackend struct { + table string + client *sql.DB + put_query string + get_query string + delete_query string + list_query string + + ha_table string + haGetLockValueQuery string + haUpsertLockIdentityExec string + haDeleteLockExec string + + haEnabled bool + logger log.Logger + permitPool *physical.PermitPool +} + +// PostgreSQLLock implements a lock using an PostgreSQL client. +type PostgreSQLLock struct { + backend *PostgreSQLBackend + value, key string + identity string + lock sync.Mutex + + renewTicker *time.Ticker + + // ttlSeconds is how long a lock is valid for + ttlSeconds int + + // renewInterval is how much time to wait between lock renewals. must be << ttl + renewInterval time.Duration + + // retryInterval is how much time to wait between attempts to grab the lock + retryInterval time.Duration +} + +// NewPostgreSQLBackend constructs a PostgreSQL backend using the given +// API client, server address, credentials, and database. +func NewPostgreSQLBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { + // Get the PostgreSQL credentials to perform read/write operations. + connURL := connectionURL(conf) + if connURL == "" { + return nil, fmt.Errorf("missing connection_url") + } + + unquoted_table, ok := conf["table"] + if !ok { + unquoted_table = "vault_kv_store" + } + quoted_table := dbutil.QuoteIdentifier(unquoted_table) + + maxParStr, ok := conf["max_parallel"] + var maxParInt int + var err error + if ok { + maxParInt, err = strconv.Atoi(maxParStr) + if err != nil { + return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err) + } + if logger.IsDebug() { + logger.Debug("max_parallel set", "max_parallel", maxParInt) + } + } else { + maxParInt = physical.DefaultParallelOperations + } + + maxIdleConnsStr, maxIdleConnsIsSet := conf["max_idle_connections"] + var maxIdleConns int + if maxIdleConnsIsSet { + maxIdleConns, err = strconv.Atoi(maxIdleConnsStr) + if err != nil { + return nil, fmt.Errorf("failed parsing max_idle_connections parameter: %w", err) + } + if logger.IsDebug() { + logger.Debug("max_idle_connections set", "max_idle_connections", maxIdleConnsStr) + } + } + + // Create PostgreSQL handle for the database. + db, err := sql.Open("pgx", connURL) + if err != nil { + return nil, fmt.Errorf("failed to connect to postgres: %w", err) + } + db.SetMaxOpenConns(maxParInt) + + if maxIdleConnsIsSet { + db.SetMaxIdleConns(maxIdleConns) + } + + // Determine if we should use a function to work around lack of upsert (versions < 9.5) + var upsertAvailable bool + upsertAvailableQuery := "SELECT current_setting('server_version_num')::int >= 90500" + if err := db.QueryRow(upsertAvailableQuery).Scan(&upsertAvailable); err != nil { + return nil, fmt.Errorf("failed to check for native upsert: %w", err) + } + + if !upsertAvailable && conf["ha_enabled"] == "true" { + return nil, fmt.Errorf("ha_enabled=true in config but PG version doesn't support HA, must be at least 9.5") + } + + // Setup our put strategy based on the presence or absence of a native + // upsert. + var put_query string + if !upsertAvailable { + put_query = "SELECT vault_kv_put($1, $2, $3, $4)" + } else { + put_query = "INSERT INTO " + quoted_table + " VALUES($1, $2, $3, $4)" + + " ON CONFLICT (path, key) DO " + + " UPDATE SET (parent_path, path, key, value) = ($1, $2, $3, $4)" + } + + unquoted_ha_table, ok := conf["ha_table"] + if !ok { + unquoted_ha_table = "vault_ha_locks" + } + quoted_ha_table := dbutil.QuoteIdentifier(unquoted_ha_table) + + // Setup the backend. + m := &PostgreSQLBackend{ + table: quoted_table, + client: db, + put_query: put_query, + get_query: "SELECT value FROM " + quoted_table + " WHERE path = $1 AND key = $2", + delete_query: "DELETE FROM " + quoted_table + " WHERE path = $1 AND key = $2", + list_query: "SELECT key FROM " + quoted_table + " WHERE path = $1" + + " UNION ALL SELECT DISTINCT substring(substr(path, length($1)+1) from '^.*?/') FROM " + quoted_table + + " WHERE parent_path LIKE $1 || '%'", + haGetLockValueQuery: + // only read non expired data + " SELECT ha_value FROM " + quoted_ha_table + " WHERE NOW() <= valid_until AND ha_key = $1 ", + haUpsertLockIdentityExec: + // $1=identity $2=ha_key $3=ha_value $4=TTL in seconds + // update either steal expired lock OR update expiry for lock owned by me + " INSERT INTO " + quoted_ha_table + " as t (ha_identity, ha_key, ha_value, valid_until) VALUES ($1, $2, $3, NOW() + $4 * INTERVAL '1 seconds' ) " + + " ON CONFLICT (ha_key) DO " + + " UPDATE SET (ha_identity, ha_key, ha_value, valid_until) = ($1, $2, $3, NOW() + $4 * INTERVAL '1 seconds') " + + " WHERE (t.valid_until < NOW() AND t.ha_key = $2) OR " + + " (t.ha_identity = $1 AND t.ha_key = $2) ", + haDeleteLockExec: + // $1=ha_identity $2=ha_key + " DELETE FROM " + quoted_ha_table + " WHERE ha_identity=$1 AND ha_key=$2 ", + logger: logger, + permitPool: physical.NewPermitPool(maxParInt), + haEnabled: conf["ha_enabled"] == "true", + } + + return m, nil +} + +// connectionURL first check the environment variables for a connection URL. If +// no connection URL exists in the environment variable, the Vault config file is +// checked. If neither the environment variables or the config file set the connection +// URL for the Postgres backend, because it is a required field, an error is returned. +func connectionURL(conf map[string]string) string { + connURL := conf["connection_url"] + if envURL := os.Getenv("VAULT_PG_CONNECTION_URL"); envURL != "" { + connURL = envURL + } + + return connURL +} + +// splitKey is a helper to split a full path key into individual +// parts: parentPath, path, key +func (m *PostgreSQLBackend) splitKey(fullPath string) (string, string, string) { + var parentPath string + var path string + + pieces := strings.Split(fullPath, "/") + depth := len(pieces) + key := pieces[depth-1] + + if depth == 1 { + parentPath = "" + path = "/" + } else if depth == 2 { + parentPath = "/" + path = "/" + pieces[0] + "/" + } else { + parentPath = "/" + strings.Join(pieces[:depth-2], "/") + "/" + path = "/" + strings.Join(pieces[:depth-1], "/") + "/" + } + + return parentPath, path, key +} + +// Put is used to insert or update an entry. +func (m *PostgreSQLBackend) Put(ctx context.Context, entry *physical.Entry) error { + defer metrics.MeasureSince([]string{"postgres", "put"}, time.Now()) + + m.permitPool.Acquire() + defer m.permitPool.Release() + + parentPath, path, key := m.splitKey(entry.Key) + + _, err := m.client.ExecContext(ctx, m.put_query, parentPath, path, key, entry.Value) + if err != nil { + return err + } + return nil +} + +// Get is used to fetch and entry. +func (m *PostgreSQLBackend) Get(ctx context.Context, fullPath string) (*physical.Entry, error) { + defer metrics.MeasureSince([]string{"postgres", "get"}, time.Now()) + + m.permitPool.Acquire() + defer m.permitPool.Release() + + _, path, key := m.splitKey(fullPath) + + var result []byte + err := m.client.QueryRowContext(ctx, m.get_query, path, key).Scan(&result) + if err == sql.ErrNoRows { + return nil, nil + } + if err != nil { + return nil, err + } + + ent := &physical.Entry{ + Key: fullPath, + Value: result, + } + return ent, nil +} + +// Delete is used to permanently delete an entry +func (m *PostgreSQLBackend) Delete(ctx context.Context, fullPath string) error { + defer metrics.MeasureSince([]string{"postgres", "delete"}, time.Now()) + + m.permitPool.Acquire() + defer m.permitPool.Release() + + _, path, key := m.splitKey(fullPath) + + _, err := m.client.ExecContext(ctx, m.delete_query, path, key) + if err != nil { + return err + } + return nil +} + +// List is used to list all the keys under a given +// prefix, up to the next prefix. +func (m *PostgreSQLBackend) List(ctx context.Context, prefix string) ([]string, error) { + defer metrics.MeasureSince([]string{"postgres", "list"}, time.Now()) + + m.permitPool.Acquire() + defer m.permitPool.Release() + + rows, err := m.client.QueryContext(ctx, m.list_query, "/"+prefix) + if err != nil { + return nil, err + } + defer rows.Close() + + var keys []string + for rows.Next() { + var key string + err = rows.Scan(&key) + if err != nil { + return nil, fmt.Errorf("failed to scan rows: %w", err) + } + + keys = append(keys, key) + } + + return keys, nil +} + +// LockWith is used for mutual exclusion based on the given key. +func (p *PostgreSQLBackend) LockWith(key, value string) (physical.Lock, error) { + identity, err := uuid.GenerateUUID() + if err != nil { + return nil, err + } + return &PostgreSQLLock{ + backend: p, + key: key, + value: value, + identity: identity, + ttlSeconds: PostgreSQLLockTTLSeconds, + renewInterval: PostgreSQLLockRenewInterval, + retryInterval: PostgreSQLLockRetryInterval, + }, nil +} + +func (p *PostgreSQLBackend) HAEnabled() bool { + return p.haEnabled +} + +// Lock tries to acquire the lock by repeatedly trying to create a record in the +// PostgreSQL table. It will block until either the stop channel is closed or +// the lock could be acquired successfully. The returned channel will be closed +// once the lock in the PostgreSQL table cannot be renewed, either due to an +// error speaking to PostgreSQL or because someone else has taken it. +func (l *PostgreSQLLock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { + l.lock.Lock() + defer l.lock.Unlock() + + var ( + success = make(chan struct{}) + errors = make(chan error) + leader = make(chan struct{}) + ) + // try to acquire the lock asynchronously + go l.tryToLock(stopCh, success, errors) + + select { + case <-success: + // after acquiring it successfully, we must renew the lock periodically + l.renewTicker = time.NewTicker(l.renewInterval) + go l.periodicallyRenewLock(leader) + case err := <-errors: + return nil, err + case <-stopCh: + return nil, nil + } + + return leader, nil +} + +// Unlock releases the lock by deleting the lock record from the +// PostgreSQL table. +func (l *PostgreSQLLock) Unlock() error { + pg := l.backend + pg.permitPool.Acquire() + defer pg.permitPool.Release() + + if l.renewTicker != nil { + l.renewTicker.Stop() + } + + // Delete lock owned by me + _, err := pg.client.Exec(pg.haDeleteLockExec, l.identity, l.key) + return err +} + +// Value checks whether or not the lock is held by any instance of PostgreSQLLock, +// including this one, and returns the current value. +func (l *PostgreSQLLock) Value() (bool, string, error) { + pg := l.backend + pg.permitPool.Acquire() + defer pg.permitPool.Release() + var result string + err := pg.client.QueryRow(pg.haGetLockValueQuery, l.key).Scan(&result) + + switch err { + case nil: + return true, result, nil + case sql.ErrNoRows: + return false, "", nil + default: + return false, "", err + + } +} + +// tryToLock tries to create a new item in PostgreSQL every `retryInterval`. +// As long as the item cannot be created (because it already exists), it will +// be retried. If the operation fails due to an error, it is sent to the errors +// channel. When the lock could be acquired successfully, the success channel +// is closed. +func (l *PostgreSQLLock) tryToLock(stop <-chan struct{}, success chan struct{}, errors chan error) { + ticker := time.NewTicker(l.retryInterval) + defer ticker.Stop() + + for { + select { + case <-stop: + return + case <-ticker.C: + gotlock, err := l.writeItem() + switch { + case err != nil: + errors <- err + return + case gotlock: + close(success) + return + } + } + } +} + +func (l *PostgreSQLLock) periodicallyRenewLock(done chan struct{}) { + for range l.renewTicker.C { + gotlock, err := l.writeItem() + if err != nil || !gotlock { + close(done) + l.renewTicker.Stop() + return + } + } +} + +// Attempts to put/update the PostgreSQL item using condition expressions to +// evaluate the TTL. Returns true if the lock was obtained, false if not. +// If false error may be nil or non-nil: nil indicates simply that someone +// else has the lock, whereas non-nil means that something unexpected happened. +func (l *PostgreSQLLock) writeItem() (bool, error) { + pg := l.backend + pg.permitPool.Acquire() + defer pg.permitPool.Release() + + // Try steal lock or update expiry on my lock + + sqlResult, err := pg.client.Exec(pg.haUpsertLockIdentityExec, l.identity, l.key, l.value, l.ttlSeconds) + if err != nil { + return false, err + } + if sqlResult == nil { + return false, fmt.Errorf("empty SQL response received") + } + + ar, err := sqlResult.RowsAffected() + if err != nil { + return false, err + } + return ar == 1, nil +} diff --git a/physical/postgresql/postgresql_test.go b/physical/postgresql/postgresql_test.go new file mode 100644 index 0000000..5dec40a --- /dev/null +++ b/physical/postgresql/postgresql_test.go @@ -0,0 +1,426 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package postgresql + +import ( + "fmt" + "os" + "testing" + "time" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/testhelpers/postgresql" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/physical" + _ "github.com/jackc/pgx/v4/stdlib" +) + +func TestPostgreSQLBackend(t *testing.T) { + logger := logging.NewVaultLogger(log.Debug) + + // Use docker as pg backend if no url is provided via environment variables + connURL := os.Getenv("PGURL") + if connURL == "" { + cleanup, u := postgresql.PrepareTestContainer(t, "11.1") + defer cleanup() + connURL = u + } + + table := os.Getenv("PGTABLE") + if table == "" { + table = "vault_kv_store" + } + + hae := os.Getenv("PGHAENABLED") + if hae == "" { + hae = "true" + } + + // Run vault tests + logger.Info(fmt.Sprintf("Connection URL: %v", connURL)) + + b1, err := NewPostgreSQLBackend(map[string]string{ + "connection_url": connURL, + "table": table, + "ha_enabled": hae, + }, logger) + if err != nil { + t.Fatalf("Failed to create new backend: %v", err) + } + + b2, err := NewPostgreSQLBackend(map[string]string{ + "connection_url": connURL, + "table": table, + "ha_enabled": hae, + }, logger) + if err != nil { + t.Fatalf("Failed to create new backend: %v", err) + } + pg := b1.(*PostgreSQLBackend) + + // Read postgres version to test basic connects works + var pgversion string + if err = pg.client.QueryRow("SELECT current_setting('server_version_num')").Scan(&pgversion); err != nil { + t.Fatalf("Failed to check for Postgres version: %v", err) + } + logger.Info(fmt.Sprintf("Postgres Version: %v", pgversion)) + + setupDatabaseObjects(t, logger, pg) + + defer func() { + pg := b1.(*PostgreSQLBackend) + _, err := pg.client.Exec(fmt.Sprintf(" TRUNCATE TABLE %v ", pg.table)) + if err != nil { + t.Fatalf("Failed to truncate table: %v", err) + } + }() + + logger.Info("Running basic backend tests") + physical.ExerciseBackend(t, b1) + logger.Info("Running list prefix backend tests") + physical.ExerciseBackend_ListPrefix(t, b1) + + ha1, ok := b1.(physical.HABackend) + if !ok { + t.Fatalf("PostgreSQLDB does not implement HABackend") + } + + ha2, ok := b2.(physical.HABackend) + if !ok { + t.Fatalf("PostgreSQLDB does not implement HABackend") + } + + if ha1.HAEnabled() && ha2.HAEnabled() { + logger.Info("Running ha backend tests") + physical.ExerciseHABackend(t, ha1, ha2) + testPostgresSQLLockTTL(t, ha1) + testPostgresSQLLockRenewal(t, ha1) + } +} + +func TestPostgreSQLBackendMaxIdleConnectionsParameter(t *testing.T) { + _, err := NewPostgreSQLBackend(map[string]string{ + "connection_url": "some connection url", + "max_idle_connections": "bad param", + }, logging.NewVaultLogger(log.Debug)) + if err == nil { + t.Error("Expected invalid max_idle_connections param to return error") + } + expectedErrStr := "failed parsing max_idle_connections parameter: strconv.Atoi: parsing \"bad param\": invalid syntax" + if err.Error() != expectedErrStr { + t.Errorf("Expected: %q but found %q", expectedErrStr, err.Error()) + } +} + +func TestConnectionURL(t *testing.T) { + type input struct { + envar string + conf map[string]string + } + + cases := map[string]struct { + want string + input input + }{ + "environment_variable_not_set_use_config_value": { + want: "abc", + input: input{ + envar: "", + conf: map[string]string{"connection_url": "abc"}, + }, + }, + + "no_value_connection_url_set_key_exists": { + want: "", + input: input{ + envar: "", + conf: map[string]string{"connection_url": ""}, + }, + }, + + "no_value_connection_url_set_key_doesnt_exist": { + want: "", + input: input{ + envar: "", + conf: map[string]string{}, + }, + }, + + "environment_variable_set": { + want: "abc", + input: input{ + envar: "abc", + conf: map[string]string{"connection_url": "def"}, + }, + }, + } + + for name, tt := range cases { + t.Run(name, func(t *testing.T) { + // This is necessary to avoid always testing the branch where the env is set. + // As long the the env is set --- even if the value is "" --- `ok` returns true. + if tt.input.envar != "" { + os.Setenv("VAULT_PG_CONNECTION_URL", tt.input.envar) + defer os.Unsetenv("VAULT_PG_CONNECTION_URL") + } + + got := connectionURL(tt.input.conf) + + if got != tt.want { + t.Errorf("connectionURL(%s): want %q, got %q", tt.input, tt.want, got) + } + }) + } +} + +// Similar to testHABackend, but using internal implementation details to +// trigger the lock failure scenario by setting the lock renew period for one +// of the locks to a higher value than the lock TTL. +const maxTries = 3 + +func testPostgresSQLLockTTL(t *testing.T, ha physical.HABackend) { + t.Log("Skipping testPostgresSQLLockTTL portion of test.") + return + + for tries := 1; tries <= maxTries; tries++ { + // Try this several times. If the test environment is too slow the lock can naturally lapse + if attemptLockTTLTest(t, ha, tries) { + break + } + } +} + +func attemptLockTTLTest(t *testing.T, ha physical.HABackend, tries int) bool { + // Set much smaller lock times to speed up the test. + lockTTL := 3 + renewInterval := time.Second * 1 + retryInterval := time.Second * 1 + longRenewInterval := time.Duration(lockTTL*2) * time.Second + lockkey := "postgresttl" + + var leaderCh <-chan struct{} + + // Get the lock + origLock, err := ha.LockWith(lockkey, "bar") + if err != nil { + t.Fatalf("err: %v", err) + } + { + // set the first lock renew period to double the expected TTL. + lock := origLock.(*PostgreSQLLock) + lock.renewInterval = longRenewInterval + lock.ttlSeconds = lockTTL + + // Attempt to lock + lockTime := time.Now() + leaderCh, err = lock.Lock(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if leaderCh == nil { + t.Fatalf("failed to get leader ch") + } + + if tries == 1 { + time.Sleep(3 * time.Second) + } + // Check the value + held, val, err := lock.Value() + if err != nil { + t.Fatalf("err: %v", err) + } + if !held { + if tries < maxTries && time.Since(lockTime) > (time.Second*time.Duration(lockTTL)) { + // Our test environment is slow enough that we failed this, retry + return false + } + t.Fatalf("should be held") + } + if val != "bar" { + t.Fatalf("bad value: %v", val) + } + } + + // Second acquisition should succeed because the first lock should + // not renew within the 3 sec TTL. + origLock2, err := ha.LockWith(lockkey, "baz") + if err != nil { + t.Fatalf("err: %v", err) + } + { + lock2 := origLock2.(*PostgreSQLLock) + lock2.renewInterval = renewInterval + lock2.ttlSeconds = lockTTL + lock2.retryInterval = retryInterval + + // Cancel attempt in 6 sec so as not to block unit tests forever + stopCh := make(chan struct{}) + time.AfterFunc(time.Duration(lockTTL*2)*time.Second, func() { + close(stopCh) + }) + + // Attempt to lock should work + lockTime := time.Now() + leaderCh2, err := lock2.Lock(stopCh) + if err != nil { + t.Fatalf("err: %v", err) + } + if leaderCh2 == nil { + t.Fatalf("should get leader ch") + } + defer lock2.Unlock() + + // Check the value + held, val, err := lock2.Value() + if err != nil { + t.Fatalf("err: %v", err) + } + if !held { + if tries < maxTries && time.Since(lockTime) > (time.Second*time.Duration(lockTTL)) { + // Our test environment is slow enough that we failed this, retry + return false + } + t.Fatalf("should be held") + } + if val != "baz" { + t.Fatalf("bad value: %v", val) + } + } + // The first lock should have lost the leader channel + select { + case <-time.After(longRenewInterval * 2): + t.Fatalf("original lock did not have its leader channel closed.") + case <-leaderCh: + } + return true +} + +// Verify that once Unlock is called, we don't keep trying to renew the original +// lock. +func testPostgresSQLLockRenewal(t *testing.T, ha physical.HABackend) { + // Get the lock + origLock, err := ha.LockWith("pgrenewal", "bar") + if err != nil { + t.Fatalf("err: %v", err) + } + + // customize the renewal and watch intervals + lock := origLock.(*PostgreSQLLock) + // lock.renewInterval = time.Second * 1 + + // Attempt to lock + leaderCh, err := lock.Lock(nil) + if err != nil { + t.Fatalf("err: %v", err) + } + if leaderCh == nil { + t.Fatalf("failed to get leader ch") + } + + // Check the value + held, val, err := lock.Value() + if err != nil { + t.Fatalf("err: %v", err) + } + if !held { + t.Fatalf("should be held") + } + if val != "bar" { + t.Fatalf("bad value: %v", val) + } + + // Release the lock, which will delete the stored item + if err := lock.Unlock(); err != nil { + t.Fatalf("err: %v", err) + } + + // Wait longer than the renewal time + time.Sleep(1500 * time.Millisecond) + + // Attempt to lock with new lock + newLock, err := ha.LockWith("pgrenewal", "baz") + if err != nil { + t.Fatalf("err: %v", err) + } + + stopCh := make(chan struct{}) + timeout := time.Duration(lock.ttlSeconds)*time.Second + lock.retryInterval + time.Second + + var leaderCh2 <-chan struct{} + newlockch := make(chan struct{}) + go func() { + leaderCh2, err = newLock.Lock(stopCh) + close(newlockch) + }() + + // Cancel attempt after lock ttl + 1s so as not to block unit tests forever + select { + case <-time.After(timeout): + t.Logf("giving up on lock attempt after %v", timeout) + close(stopCh) + case <-newlockch: + // pass through + } + + // Attempt to lock should work + if err != nil { + t.Fatalf("err: %v", err) + } + if leaderCh2 == nil { + t.Fatalf("should get leader ch") + } + + // Check the value + held, val, err = newLock.Value() + if err != nil { + t.Fatalf("err: %v", err) + } + if !held { + t.Fatalf("should be held") + } + if val != "baz" { + t.Fatalf("bad value: %v", val) + } + + // Cleanup + newLock.Unlock() +} + +func setupDatabaseObjects(t *testing.T, logger log.Logger, pg *PostgreSQLBackend) { + var err error + // Setup tables and indexes if not exists. + createTableSQL := fmt.Sprintf( + " CREATE TABLE IF NOT EXISTS %v ( "+ + " parent_path TEXT COLLATE \"C\" NOT NULL, "+ + " path TEXT COLLATE \"C\", "+ + " key TEXT COLLATE \"C\", "+ + " value BYTEA, "+ + " CONSTRAINT pkey PRIMARY KEY (path, key) "+ + " ); ", pg.table) + + _, err = pg.client.Exec(createTableSQL) + if err != nil { + t.Fatalf("Failed to create table: %v", err) + } + + createIndexSQL := fmt.Sprintf(" CREATE INDEX IF NOT EXISTS parent_path_idx ON %v (parent_path); ", pg.table) + + _, err = pg.client.Exec(createIndexSQL) + if err != nil { + t.Fatalf("Failed to create index: %v", err) + } + + createHaTableSQL := " CREATE TABLE IF NOT EXISTS vault_ha_locks ( " + + " ha_key TEXT COLLATE \"C\" NOT NULL, " + + " ha_identity TEXT COLLATE \"C\" NOT NULL, " + + " ha_value TEXT COLLATE \"C\", " + + " valid_until TIMESTAMP WITH TIME ZONE NOT NULL, " + + " CONSTRAINT ha_key PRIMARY KEY (ha_key) " + + " ); " + + _, err = pg.client.Exec(createHaTableSQL) + if err != nil { + t.Fatalf("Failed to create hatable: %v", err) + } +} diff --git a/physical/raft/bolt_32bit_test.go b/physical/raft/bolt_32bit_test.go new file mode 100644 index 0000000..7694d82 --- /dev/null +++ b/physical/raft/bolt_32bit_test.go @@ -0,0 +1,45 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build 386 || arm + +package raft + +import ( + "os" + "strconv" + "testing" +) + +func Test_BoltOptions(t *testing.T) { + t.Parallel() + key := "VAULT_RAFT_INITIAL_MMAP_SIZE" + + testCases := []struct { + name string + env string + expectedSize int + }{ + {"none", "", 0}, + {"5MB", strconv.Itoa(5 * 1024 * 1024), 5 * 1024 * 1024}, + {"negative", "-1", 0}, + } + + for _, tc := range testCases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + if tc.env != "" { + current := os.Getenv(key) + defer os.Setenv(key, current) + os.Setenv(key, tc.env) + } + + o := boltOptions("") + + if o.InitialMmapSize != tc.expectedSize { + t.Errorf("expected InitialMmapSize to be %d but it was %d", tc.expectedSize, o.InitialMmapSize) + } + }) + } +} diff --git a/physical/raft/bolt_64bit_test.go b/physical/raft/bolt_64bit_test.go new file mode 100644 index 0000000..c4b89b8 --- /dev/null +++ b/physical/raft/bolt_64bit_test.go @@ -0,0 +1,45 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !386 && !arm + +package raft + +import ( + "os" + "strconv" + "testing" +) + +func Test_BoltOptions(t *testing.T) { + t.Parallel() + key := "VAULT_RAFT_INITIAL_MMAP_SIZE" + + testCases := []struct { + name string + env string + expectedSize int + }{ + {"none", "", 100 * 1024 * 1024 * 1024}, + {"5MB", strconv.Itoa(5 * 1024 * 1024), 5 * 1024 * 1024}, + {"negative", "-1", 0}, + } + + for _, tc := range testCases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + if tc.env != "" { + current := os.Getenv(key) + defer os.Setenv(key, current) + os.Setenv(key, tc.env) + } + + o := boltOptions("") + + if o.InitialMmapSize != tc.expectedSize { + t.Errorf("expected InitialMmapSize to be %d but it was %d", tc.expectedSize, o.InitialMmapSize) + } + }) + } +} diff --git a/physical/raft/bolt_linux.go b/physical/raft/bolt_linux.go new file mode 100644 index 0000000..b7774c6 --- /dev/null +++ b/physical/raft/bolt_linux.go @@ -0,0 +1,39 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package raft + +import ( + "context" + "os" + + "github.com/shirou/gopsutil/v3/mem" + "golang.org/x/sys/unix" +) + +func init() { + getMmapFlags = getMmapFlagsLinux +} + +func getMmapFlagsLinux(dbPath string) int { + if os.Getenv("VAULT_RAFT_DISABLE_MAP_POPULATE") != "" { + return 0 + } + stat, err := os.Stat(dbPath) + if err != nil { + return 0 + } + size := stat.Size() + + v, err := mem.VirtualMemoryWithContext(context.Background()) + if err != nil { + return 0 + } + + // We won't worry about swap, since we already tell people not to use it. + if v.Total > uint64(size) { + return unix.MAP_POPULATE + } + + return 0 +} diff --git a/physical/raft/chunking_test.go b/physical/raft/chunking_test.go new file mode 100644 index 0000000..64f83e6 --- /dev/null +++ b/physical/raft/chunking_test.go @@ -0,0 +1,238 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package raft + +import ( + "bytes" + "context" + "fmt" + "os" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/hashicorp/go-raftchunking" + raftchunkingtypes "github.com/hashicorp/go-raftchunking/types" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/raft" + "github.com/hashicorp/raft-boltdb/v2" + "github.com/hashicorp/vault/sdk/physical" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// This chunks encoded data and then performing out-of-order applies of half +// the logs. It then snapshots, restores to a new FSM, and applies the rest. +// The goal is to verify that chunking snapshotting works as expected. +func TestRaft_Chunking_Lifecycle(t *testing.T) { + t.Parallel() + require := require.New(t) + assert := assert.New(t) + + b, dir := GetRaft(t, true, false) + defer os.RemoveAll(dir) + + t.Log("applying configuration") + + b.applyConfigSettings(raft.DefaultConfig()) + + t.Log("chunking") + + buf := []byte("let's see how this goes, shall we?") + logData := &LogData{ + Operations: []*LogOperation{ + { + OpType: putOp, + Key: "foobar", + Value: buf, + }, + }, + } + cmdBytes, err := proto.Marshal(logData) + require.NoError(err) + + var logs []*raft.Log + for i, b := range cmdBytes { + // Stage multiple operations so we can test restoring across multiple opnums + for j := 0; j < 10; j++ { + chunkInfo := &raftchunkingtypes.ChunkInfo{ + OpNum: uint64(32 + j), + SequenceNum: uint32(i), + NumChunks: uint32(len(cmdBytes)), + } + chunkBytes, err := proto.Marshal(chunkInfo) + require.NoError(err) + + logs = append(logs, &raft.Log{ + Data: []byte{b}, + Extensions: chunkBytes, + }) + } + } + + t.Log("applying half of the logs") + + // The reason for the skipping is to test out-of-order applies which are + // theoretically possible. Some of these will actually finish though! + for i := 0; i < len(logs); i += 2 { + resp := b.fsm.chunker.Apply(logs[i]) + if resp != nil { + _, ok := resp.(raftchunking.ChunkingSuccess) + assert.True(ok) + } + } + + t.Log("tearing down cluster") + require.NoError(b.TeardownCluster(nil)) + require.NoError(b.fsm.getDB().Close()) + require.NoError(b.stableStore.(*raftboltdb.BoltStore).Close()) + + t.Log("starting new backend") + backendRaw, err := NewRaftBackend(b.conf, b.logger) + require.NoError(err) + b = backendRaw.(*RaftBackend) + + t.Log("applying rest of the logs") + + // Apply the rest of the logs + var resp interface{} + for i := 1; i < len(logs); i += 2 { + resp = b.fsm.chunker.Apply(logs[i]) + if resp != nil { + _, ok := resp.(raftchunking.ChunkingSuccess) + assert.True(ok) + } + } + + assert.NotNil(resp) + _, ok := resp.(raftchunking.ChunkingSuccess) + assert.True(ok) +} + +func TestFSM_Chunking_TermChange(t *testing.T) { + t.Parallel() + require := require.New(t) + assert := assert.New(t) + + b, dir := GetRaft(t, true, false) + defer os.RemoveAll(dir) + + t.Log("applying configuration") + + b.applyConfigSettings(raft.DefaultConfig()) + + t.Log("chunking") + + buf := []byte("let's see how this goes, shall we?") + logData := &LogData{ + Operations: []*LogOperation{ + { + OpType: putOp, + Key: "foobar", + Value: buf, + }, + }, + } + cmdBytes, err := proto.Marshal(logData) + require.NoError(err) + + // Only need two chunks to test this + chunks := [][]byte{ + cmdBytes[0:2], + cmdBytes[2:], + } + var logs []*raft.Log + for i, b := range chunks { + chunkInfo := &raftchunkingtypes.ChunkInfo{ + OpNum: uint64(32), + SequenceNum: uint32(i), + NumChunks: uint32(len(chunks)), + } + chunkBytes, err := proto.Marshal(chunkInfo) + if err != nil { + t.Fatal(err) + } + logs = append(logs, &raft.Log{ + Term: uint64(i), + Data: b, + Extensions: chunkBytes, + }) + } + + // We should see nil for both + for _, log := range logs { + resp := b.fsm.chunker.Apply(log) + assert.Nil(resp) + } + + // Now verify the other baseline, that when the term doesn't change we see + // non-nil. First make the chunker have a clean state, then set the terms + // to be the same. + b.fsm.chunker.RestoreState(nil) + logs[1].Term = uint64(0) + + // We should see nil only for the first one + for i, log := range logs { + resp := b.fsm.chunker.Apply(log) + if i == 0 { + assert.Nil(resp) + } + if i == 1 { + assert.NotNil(resp) + _, ok := resp.(raftchunking.ChunkingSuccess) + assert.True(ok) + } + } +} + +func TestRaft_Chunking_AppliedIndex(t *testing.T) { + t.Parallel() + + raft, dir := GetRaft(t, true, false) + defer os.RemoveAll(dir) + + // Lower the size for tests + raftchunking.ChunkSize = 1024 + val, err := uuid.GenerateRandomBytes(3 * raftchunking.ChunkSize) + if err != nil { + t.Fatal(err) + } + + // Write a value to fastforward the index + err = raft.Put(context.Background(), &physical.Entry{ + Key: "key", + Value: []byte("test"), + }) + if err != nil { + t.Fatal(err) + } + + currentIndex := raft.AppliedIndex() + // Write some data + for i := 0; i < 10; i++ { + err := raft.Put(context.Background(), &physical.Entry{ + Key: fmt.Sprintf("key-%d", i), + Value: val, + }) + if err != nil { + t.Fatal(err) + } + } + + newIndex := raft.AppliedIndex() + + // Each put should generate 4 chunks + if newIndex-currentIndex != 10*4 { + t.Fatalf("Did not apply chunks as expected, applied index = %d - %d = %d", newIndex, currentIndex, newIndex-currentIndex) + } + + for i := 0; i < 10; i++ { + entry, err := raft.Get(context.Background(), fmt.Sprintf("key-%d", i)) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(entry.Value, val) { + t.Fatal("value is corrupt") + } + } +} diff --git a/physical/raft/fsm.go b/physical/raft/fsm.go new file mode 100644 index 0000000..a888281 --- /dev/null +++ b/physical/raft/fsm.go @@ -0,0 +1,1120 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package raft + +import ( + "bytes" + "context" + "encoding/hex" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/armon/go-metrics" + "github.com/golang/protobuf/proto" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-raftchunking" + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/raft" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/physical" + "github.com/hashicorp/vault/sdk/plugin/pb" + bolt "go.etcd.io/bbolt" +) + +const ( + deleteOp uint32 = 1 << iota + putOp + restoreCallbackOp + getOp + + chunkingPrefix = "raftchunking/" + databaseFilename = "vault.db" +) + +var ( + // dataBucketName is the value we use for the bucket + dataBucketName = []byte("data") + configBucketName = []byte("config") + latestIndexKey = []byte("latest_indexes") + latestConfigKey = []byte("latest_config") + localNodeConfigKey = []byte("local_node_config") +) + +// Verify FSM satisfies the correct interfaces +var ( + _ physical.Backend = (*FSM)(nil) + _ physical.Transactional = (*FSM)(nil) + _ raft.FSM = (*FSM)(nil) + _ raft.BatchingFSM = (*FSM)(nil) +) + +type restoreCallback func(context.Context) error + +type FSMEntry struct { + Key string + Value []byte +} + +func (f *FSMEntry) String() string { + return fmt.Sprintf("Key: %s. Value: %s", f.Key, hex.EncodeToString(f.Value)) +} + +// FSMApplyResponse is returned from an FSM apply. It indicates if the apply was +// successful or not. EntryMap contains the keys/values from the Get operations. +type FSMApplyResponse struct { + Success bool + EntrySlice []*FSMEntry +} + +// FSM is Vault's primary state storage. It writes updates to a bolt db file +// that lives on local disk. FSM implements raft.FSM and physical.Backend +// interfaces. +type FSM struct { + // latestIndex and latestTerm must stay at the top of this struct to be + // properly 64-bit aligned. + + // latestIndex and latestTerm are the term and index of the last log we + // received + latestIndex *uint64 + latestTerm *uint64 + // latestConfig is the latest server configuration we've seen + latestConfig atomic.Value + + l sync.RWMutex + path string + logger log.Logger + noopRestore bool + + // applyCallback is used to control the pace of applies in tests + applyCallback func() + + db *bolt.DB + + // retoreCb is called after we've restored a snapshot + restoreCb restoreCallback + + chunker *raftchunking.ChunkingBatchingFSM + + localID string + desiredSuffrage string + unknownOpTypes sync.Map +} + +// NewFSM constructs a FSM using the given directory +func NewFSM(path string, localID string, logger log.Logger) (*FSM, error) { + // Initialize the latest term, index, and config values + latestTerm := new(uint64) + latestIndex := new(uint64) + latestConfig := atomic.Value{} + atomic.StoreUint64(latestTerm, 0) + atomic.StoreUint64(latestIndex, 0) + latestConfig.Store((*ConfigurationValue)(nil)) + + f := &FSM{ + path: path, + logger: logger, + + latestTerm: latestTerm, + latestIndex: latestIndex, + latestConfig: latestConfig, + // Assume that the default intent is to join as as voter. This will be updated + // when this node joins a cluster with a different suffrage, or during cluster + // setup if this is already part of a cluster with a desired suffrage. + desiredSuffrage: "voter", + localID: localID, + } + + f.chunker = raftchunking.NewChunkingBatchingFSM(f, &FSMChunkStorage{ + f: f, + ctx: context.Background(), + }) + + dbPath := filepath.Join(path, databaseFilename) + f.l.Lock() + defer f.l.Unlock() + if err := f.openDBFile(dbPath); err != nil { + return nil, fmt.Errorf("failed to open bolt file: %w", err) + } + + return f, nil +} + +func (f *FSM) getDB() *bolt.DB { + f.l.RLock() + defer f.l.RUnlock() + + return f.db +} + +// SetFSMDelay adds a delay to the FSM apply. This is used in tests to simulate +// a slow apply. +func (r *RaftBackend) SetFSMDelay(delay time.Duration) { + r.SetFSMApplyCallback(func() { time.Sleep(delay) }) +} + +func (r *RaftBackend) SetFSMApplyCallback(f func()) { + r.fsm.l.Lock() + r.fsm.applyCallback = f + r.fsm.l.Unlock() +} + +func (f *FSM) openDBFile(dbPath string) error { + if len(dbPath) == 0 { + return errors.New("can not open empty filename") + } + + st, err := os.Stat(dbPath) + switch { + case err != nil && os.IsNotExist(err): + case err != nil: + return fmt.Errorf("error checking raft FSM db file %q: %v", dbPath, err) + default: + perms := st.Mode() & os.ModePerm + if perms&0o077 != 0 { + f.logger.Warn("raft FSM db file has wider permissions than needed", + "needed", os.FileMode(0o600), "existing", perms) + } + } + + opts := boltOptions(dbPath) + start := time.Now() + boltDB, err := bolt.Open(dbPath, 0o600, opts) + if err != nil { + return err + } + elapsed := time.Now().Sub(start) + f.logger.Debug("time to open database", "elapsed", elapsed, "path", dbPath) + metrics.MeasureSince([]string{"raft_storage", "fsm", "open_db_file"}, start) + + err = boltDB.Update(func(tx *bolt.Tx) error { + // make sure we have the necessary buckets created + _, err := tx.CreateBucketIfNotExists(dataBucketName) + if err != nil { + return fmt.Errorf("failed to create bucket: %v", err) + } + b, err := tx.CreateBucketIfNotExists(configBucketName) + if err != nil { + return fmt.Errorf("failed to create bucket: %v", err) + } + + // Read in our latest index and term and populate it inmemory + val := b.Get(latestIndexKey) + if val != nil { + var latest IndexValue + err := proto.Unmarshal(val, &latest) + if err != nil { + return err + } + + atomic.StoreUint64(f.latestTerm, latest.Term) + atomic.StoreUint64(f.latestIndex, latest.Index) + } + + // Read in our latest config and populate it inmemory + val = b.Get(latestConfigKey) + if val != nil { + var latest ConfigurationValue + err := proto.Unmarshal(val, &latest) + if err != nil { + return err + } + + f.latestConfig.Store(&latest) + } + return nil + }) + if err != nil { + return err + } + + f.db = boltDB + return nil +} + +func (f *FSM) Stats() bolt.Stats { + f.l.RLock() + defer f.l.RUnlock() + + return f.db.Stats() +} + +func (f *FSM) Close() error { + f.l.RLock() + defer f.l.RUnlock() + + return f.db.Close() +} + +func writeSnapshotMetaToDB(metadata *raft.SnapshotMeta, db *bolt.DB) error { + latestIndex := &IndexValue{ + Term: metadata.Term, + Index: metadata.Index, + } + indexBytes, err := proto.Marshal(latestIndex) + if err != nil { + return err + } + + protoConfig := raftConfigurationToProtoConfiguration(metadata.ConfigurationIndex, metadata.Configuration) + configBytes, err := proto.Marshal(protoConfig) + if err != nil { + return err + } + + err = db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists(configBucketName) + if err != nil { + return err + } + + err = b.Put(latestConfigKey, configBytes) + if err != nil { + return err + } + + err = b.Put(latestIndexKey, indexBytes) + if err != nil { + return err + } + + return nil + }) + if err != nil { + return err + } + + return nil +} + +func (f *FSM) localNodeConfig() (*LocalNodeConfigValue, error) { + var configBytes []byte + if err := f.db.View(func(tx *bolt.Tx) error { + value := tx.Bucket(configBucketName).Get(localNodeConfigKey) + if value != nil { + configBytes = make([]byte, len(value)) + copy(configBytes, value) + } + return nil + }); err != nil { + return nil, err + } + if configBytes == nil { + return nil, nil + } + + var lnConfig LocalNodeConfigValue + if configBytes != nil { + err := proto.Unmarshal(configBytes, &lnConfig) + if err != nil { + return nil, err + } + f.desiredSuffrage = lnConfig.DesiredSuffrage + return &lnConfig, nil + } + + return nil, nil +} + +func (f *FSM) DesiredSuffrage() string { + f.l.RLock() + defer f.l.RUnlock() + + return f.desiredSuffrage +} + +func (f *FSM) upgradeLocalNodeConfig() error { + f.l.Lock() + defer f.l.Unlock() + + // Read the local node config + lnConfig, err := f.localNodeConfig() + if err != nil { + return err + } + + // Entry is already present. Get the suffrage value. + if lnConfig != nil { + f.desiredSuffrage = lnConfig.DesiredSuffrage + return nil + } + + // + // This is the upgrade case where there is no entry + // + + lnConfig = &LocalNodeConfigValue{} + + // Refer to the persisted latest raft config + config := f.latestConfig.Load().(*ConfigurationValue) + + // If there is no config, then this is a fresh node coming up. This could end up + // being a voter or non-voter. But by default assume that this is a voter. It + // will be changed if this node joins the cluster as a non-voter. + if config == nil { + f.desiredSuffrage = "voter" + lnConfig.DesiredSuffrage = f.desiredSuffrage + return f.persistDesiredSuffrage(lnConfig) + } + + // Get the last known suffrage of the node and assume that it is the desired + // suffrage. There is no better alternative here. + for _, srv := range config.Servers { + if srv.Id == f.localID { + switch srv.Suffrage { + case int32(raft.Nonvoter): + lnConfig.DesiredSuffrage = "non-voter" + default: + lnConfig.DesiredSuffrage = "voter" + } + // Bring the intent to the fsm instance. + f.desiredSuffrage = lnConfig.DesiredSuffrage + break + } + } + + return f.persistDesiredSuffrage(lnConfig) +} + +// recordSuffrage is called when a node successfully joins the cluster. This +// intent should land in the stored configuration. If the config isn't available +// yet, we still go ahead and store the intent in the fsm. During the next +// update to the configuration, this intent will be persisted. +func (f *FSM) recordSuffrage(desiredSuffrage string) error { + f.l.Lock() + defer f.l.Unlock() + + if err := f.persistDesiredSuffrage(&LocalNodeConfigValue{ + DesiredSuffrage: desiredSuffrage, + }); err != nil { + return err + } + + f.desiredSuffrage = desiredSuffrage + return nil +} + +func (f *FSM) persistDesiredSuffrage(lnconfig *LocalNodeConfigValue) error { + dsBytes, err := proto.Marshal(lnconfig) + if err != nil { + return err + } + + return f.db.Update(func(tx *bolt.Tx) error { + return tx.Bucket(configBucketName).Put(localNodeConfigKey, dsBytes) + }) +} + +func (f *FSM) witnessSnapshot(metadata *raft.SnapshotMeta) error { + f.l.RLock() + defer f.l.RUnlock() + + err := writeSnapshotMetaToDB(metadata, f.db) + if err != nil { + return err + } + + atomic.StoreUint64(f.latestIndex, metadata.Index) + atomic.StoreUint64(f.latestTerm, metadata.Term) + f.latestConfig.Store(raftConfigurationToProtoConfiguration(metadata.ConfigurationIndex, metadata.Configuration)) + + return nil +} + +// LatestState returns the latest index and configuration values we have seen on +// this FSM. +func (f *FSM) LatestState() (*IndexValue, *ConfigurationValue) { + return &IndexValue{ + Term: atomic.LoadUint64(f.latestTerm), + Index: atomic.LoadUint64(f.latestIndex), + }, f.latestConfig.Load().(*ConfigurationValue) +} + +// Delete deletes the given key from the bolt file. +func (f *FSM) Delete(ctx context.Context, path string) error { + defer metrics.MeasureSince([]string{"raft_storage", "fsm", "delete"}, time.Now()) + + f.l.RLock() + defer f.l.RUnlock() + + return f.db.Update(func(tx *bolt.Tx) error { + return tx.Bucket(dataBucketName).Delete([]byte(path)) + }) +} + +// Delete deletes the given key from the bolt file. +func (f *FSM) DeletePrefix(ctx context.Context, prefix string) error { + defer metrics.MeasureSince([]string{"raft_storage", "fsm", "delete_prefix"}, time.Now()) + + f.l.RLock() + defer f.l.RUnlock() + + err := f.db.Update(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + c := tx.Bucket(dataBucketName).Cursor() + + prefixBytes := []byte(prefix) + for k, _ := c.Seek(prefixBytes); k != nil && bytes.HasPrefix(k, prefixBytes); k, _ = c.Next() { + if err := c.Delete(); err != nil { + return err + } + } + + return nil + }) + + return err +} + +// Get retrieves the value at the given path from the bolt file. +func (f *FSM) Get(ctx context.Context, path string) (*physical.Entry, error) { + // TODO: Remove this outdated metric name in an older release + defer metrics.MeasureSince([]string{"raft", "get"}, time.Now()) + defer metrics.MeasureSince([]string{"raft_storage", "fsm", "get"}, time.Now()) + + f.l.RLock() + defer f.l.RUnlock() + + var valCopy []byte + var found bool + + err := f.db.View(func(tx *bolt.Tx) error { + value := tx.Bucket(dataBucketName).Get([]byte(path)) + if value != nil { + found = true + valCopy = make([]byte, len(value)) + copy(valCopy, value) + } + + return nil + }) + if err != nil { + return nil, err + } + if !found { + return nil, nil + } + + return &physical.Entry{ + Key: path, + Value: valCopy, + }, nil +} + +// Put writes the given entry to the bolt file. +func (f *FSM) Put(ctx context.Context, entry *physical.Entry) error { + defer metrics.MeasureSince([]string{"raft_storage", "fsm", "put"}, time.Now()) + + f.l.RLock() + defer f.l.RUnlock() + + // Start a write transaction. + return f.db.Update(func(tx *bolt.Tx) error { + return tx.Bucket(dataBucketName).Put([]byte(entry.Key), entry.Value) + }) +} + +// List retrieves the set of keys with the given prefix from the bolt file. +func (f *FSM) List(ctx context.Context, prefix string) ([]string, error) { + // TODO: Remove this outdated metric name in a future release + defer metrics.MeasureSince([]string{"raft", "list"}, time.Now()) + defer metrics.MeasureSince([]string{"raft_storage", "fsm", "list"}, time.Now()) + + f.l.RLock() + defer f.l.RUnlock() + + var keys []string + + err := f.db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + c := tx.Bucket(dataBucketName).Cursor() + + prefixBytes := []byte(prefix) + for k, _ := c.Seek(prefixBytes); k != nil && bytes.HasPrefix(k, prefixBytes); k, _ = c.Next() { + key := string(k) + key = strings.TrimPrefix(key, prefix) + if i := strings.Index(key, "/"); i == -1 { + // Add objects only from the current 'folder' + keys = append(keys, key) + } else { + // Add truncated 'folder' paths + if len(keys) == 0 || keys[len(keys)-1] != key[:i+1] { + keys = append(keys, string(key[:i+1])) + } + } + } + + return nil + }) + + return keys, err +} + +// Transaction writes all the operations in the provided transaction to the bolt +// file. +func (f *FSM) Transaction(ctx context.Context, txns []*physical.TxnEntry) error { + f.l.RLock() + defer f.l.RUnlock() + + // Start a write transaction. + err := f.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket(dataBucketName) + for _, txn := range txns { + var err error + switch txn.Operation { + case physical.PutOperation: + err = b.Put([]byte(txn.Entry.Key), txn.Entry.Value) + case physical.DeleteOperation: + err = b.Delete([]byte(txn.Entry.Key)) + default: + return fmt.Errorf("%q is not a supported transaction operation", txn.Operation) + } + if err != nil { + return err + } + } + + return nil + }) + + return err +} + +// ApplyBatch will apply a set of logs to the FSM. This is called from the raft +// library. +func (f *FSM) ApplyBatch(logs []*raft.Log) []interface{} { + numLogs := len(logs) + + if numLogs == 0 { + return []interface{}{} + } + + // We will construct one slice per log, each slice containing another slice of results from our get ops + entrySlices := make([][]*FSMEntry, 0, numLogs) + + // Do the unmarshalling first so we don't hold locks + var latestConfiguration *ConfigurationValue + commands := make([]interface{}, 0, numLogs) + for _, l := range logs { + switch l.Type { + case raft.LogCommand: + command := &LogData{} + err := proto.Unmarshal(l.Data, command) + if err != nil { + f.logger.Error("error proto unmarshaling log data", "error", err) + panic("error proto unmarshaling log data") + } + commands = append(commands, command) + case raft.LogConfiguration: + configuration := raft.DecodeConfiguration(l.Data) + config := raftConfigurationToProtoConfiguration(l.Index, configuration) + + commands = append(commands, config) + + // Update the latest configuration the fsm has received; we will + // store this after it has been committed to storage. + latestConfiguration = config + + default: + panic(fmt.Sprintf("got unexpected log type: %d", l.Type)) + } + } + + // Only advance latest pointer if this log has a higher index value than + // what we have seen in the past. + var logIndex []byte + var err error + latestIndex, _ := f.LatestState() + lastLog := logs[numLogs-1] + if latestIndex.Index < lastLog.Index { + logIndex, err = proto.Marshal(&IndexValue{ + Term: lastLog.Term, + Index: lastLog.Index, + }) + if err != nil { + f.logger.Error("unable to marshal latest index", "error", err) + panic("unable to marshal latest index") + } + } + + f.l.RLock() + defer f.l.RUnlock() + + if f.applyCallback != nil { + f.applyCallback() + } + + err = f.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket(dataBucketName) + for _, commandRaw := range commands { + entrySlice := make([]*FSMEntry, 0) + switch command := commandRaw.(type) { + case *LogData: + for _, op := range command.Operations { + var err error + switch op.OpType { + case putOp: + err = b.Put([]byte(op.Key), op.Value) + case deleteOp: + err = b.Delete([]byte(op.Key)) + case getOp: + fsmEntry := &FSMEntry{ + Key: op.Key, + } + val := b.Get([]byte(op.Key)) + if len(val) > 0 { + newVal := make([]byte, len(val)) + copy(newVal, val) + fsmEntry.Value = newVal + } + entrySlice = append(entrySlice, fsmEntry) + case restoreCallbackOp: + if f.restoreCb != nil { + // Kick off the restore callback function in a go routine + go f.restoreCb(context.Background()) + } + default: + if _, ok := f.unknownOpTypes.Load(op.OpType); !ok { + f.logger.Error("unsupported transaction operation", "op", op.OpType) + f.unknownOpTypes.Store(op.OpType, struct{}{}) + } + } + if err != nil { + return err + } + } + + case *ConfigurationValue: + b := tx.Bucket(configBucketName) + configBytes, err := proto.Marshal(command) + if err != nil { + return err + } + if err := b.Put(latestConfigKey, configBytes); err != nil { + return err + } + } + + entrySlices = append(entrySlices, entrySlice) + } + + if len(logIndex) > 0 { + b := tx.Bucket(configBucketName) + err = b.Put(latestIndexKey, logIndex) + if err != nil { + return err + } + } + + return nil + }) + if err != nil { + f.logger.Error("failed to store data", "error", err) + panic("failed to store data") + } + + // If we advanced the latest value, update the in-memory representation too. + if len(logIndex) > 0 { + atomic.StoreUint64(f.latestTerm, lastLog.Term) + atomic.StoreUint64(f.latestIndex, lastLog.Index) + } + + // If one or more configuration changes were processed, store the latest one. + if latestConfiguration != nil { + f.latestConfig.Store(latestConfiguration) + } + + // Build the responses. The logs array is used here to ensure we reply to + // all command values; even if they are not of the types we expect. This + // should futureproof this function from more log types being provided. + resp := make([]interface{}, numLogs) + for i := range logs { + resp[i] = &FSMApplyResponse{ + Success: true, + EntrySlice: entrySlices[i], + } + } + + return resp +} + +// Apply will apply a log value to the FSM. This is called from the raft +// library. +func (f *FSM) Apply(log *raft.Log) interface{} { + return f.ApplyBatch([]*raft.Log{log})[0] +} + +type writeErrorCloser interface { + io.WriteCloser + CloseWithError(error) error +} + +// writeTo will copy the FSM's content to a remote sink. The data is written +// twice, once for use in determining various metadata attributes of the dataset +// (size, checksum, etc) and a second for the sink of the data. We also use a +// proto delimited writer so we can stream proto messages to the sink. +func (f *FSM) writeTo(ctx context.Context, metaSink writeErrorCloser, sink writeErrorCloser) { + defer metrics.MeasureSince([]string{"raft_storage", "fsm", "write_snapshot"}, time.Now()) + + protoWriter := NewDelimitedWriter(sink) + metadataProtoWriter := NewDelimitedWriter(metaSink) + + f.l.RLock() + defer f.l.RUnlock() + + err := f.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket(dataBucketName) + + c := b.Cursor() + + // Do the first scan of the data for metadata purposes. + for k, v := c.First(); k != nil; k, v = c.Next() { + err := metadataProtoWriter.WriteMsg(&pb.StorageEntry{ + Key: string(k), + Value: v, + }) + if err != nil { + metaSink.CloseWithError(err) + return err + } + } + metaSink.Close() + + // Do the second scan for copy purposes. + for k, v := c.First(); k != nil; k, v = c.Next() { + err := protoWriter.WriteMsg(&pb.StorageEntry{ + Key: string(k), + Value: v, + }) + if err != nil { + return err + } + } + + return nil + }) + sink.CloseWithError(err) +} + +// Snapshot implements the FSM interface. It returns a noop snapshot object. +func (f *FSM) Snapshot() (raft.FSMSnapshot, error) { + return &noopSnapshotter{ + fsm: f, + }, nil +} + +// SetNoopRestore is used to disable restore operations on raft startup. Because +// we are using persistent storage in our FSM we do not need to issue a restore +// on startup. +func (f *FSM) SetNoopRestore(enabled bool) { + f.l.Lock() + f.noopRestore = enabled + f.l.Unlock() +} + +// Restore installs a new snapshot from the provided reader. It does an atomic +// rename of the snapshot file into the database filepath. While a restore is +// happening the FSM is locked and no writes or reads can be performed. +func (f *FSM) Restore(r io.ReadCloser) error { + defer metrics.MeasureSince([]string{"raft_storage", "fsm", "restore_snapshot"}, time.Now()) + + if f.noopRestore { + return nil + } + + snapshotInstaller, ok := r.(*boltSnapshotInstaller) + if !ok { + wrapper, ok := r.(raft.ReadCloserWrapper) + if !ok { + return fmt.Errorf("expected ReadCloserWrapper object, got: %T", r) + } + snapshotInstallerRaw := wrapper.WrappedReadCloser() + snapshotInstaller, ok = snapshotInstallerRaw.(*boltSnapshotInstaller) + if !ok { + return fmt.Errorf("expected snapshot installer object, got: %T", snapshotInstallerRaw) + } + } + + f.l.Lock() + defer f.l.Unlock() + + // Cache the local node config before closing the db file + lnConfig, err := f.localNodeConfig() + if err != nil { + return err + } + + // Close the db file + if err := f.db.Close(); err != nil { + f.logger.Error("failed to close database file", "error", err) + return err + } + + dbPath := filepath.Join(f.path, databaseFilename) + + f.logger.Info("installing snapshot to FSM") + + // Install the new boltdb file + var retErr *multierror.Error + if err := snapshotInstaller.Install(dbPath); err != nil { + f.logger.Error("failed to install snapshot", "error", err) + retErr = multierror.Append(retErr, fmt.Errorf("failed to install snapshot database: %w", err)) + } else { + f.logger.Info("snapshot installed") + } + + // Open the db file. We want to do this regardless of if the above install + // worked. If the install failed we should try to open the old DB file. + if err := f.openDBFile(dbPath); err != nil { + f.logger.Error("failed to open new database file", "error", err) + retErr = multierror.Append(retErr, fmt.Errorf("failed to open new bolt file: %w", err)) + } + + // Handle local node config restore. lnConfig should not be nil here, but + // adding the nil check anyways for safety. + if lnConfig != nil { + // Persist the local node config on the restored fsm. + if err := f.persistDesiredSuffrage(lnConfig); err != nil { + f.logger.Error("failed to persist local node config from before the restore", "error", err) + retErr = multierror.Append(retErr, fmt.Errorf("failed to persist local node config from before the restore: %w", err)) + } + } + + return retErr.ErrorOrNil() +} + +// noopSnapshotter implements the fsm.Snapshot interface. It doesn't do anything +// since our SnapshotStore reads data out of the FSM on Open(). +type noopSnapshotter struct { + fsm *FSM +} + +// Persist implements the fsm.Snapshot interface. It doesn't need to persist any +// state data, but it does persist the raft metadata. This is necessary so we +// can be sure to capture indexes for operation types that are not sent to the +// FSM. +func (s *noopSnapshotter) Persist(sink raft.SnapshotSink) error { + boltSnapshotSink := sink.(*BoltSnapshotSink) + + // We are processing a snapshot, fastforward the index, term, and + // configuration to the latest seen by the raft system. + if err := s.fsm.witnessSnapshot(&boltSnapshotSink.meta); err != nil { + return err + } + + return nil +} + +// Release doesn't do anything. +func (s *noopSnapshotter) Release() {} + +// raftConfigurationToProtoConfiguration converts a raft configuration object to +// a proto value. +func raftConfigurationToProtoConfiguration(index uint64, configuration raft.Configuration) *ConfigurationValue { + servers := make([]*Server, len(configuration.Servers)) + for i, s := range configuration.Servers { + servers[i] = &Server{ + Suffrage: int32(s.Suffrage), + Id: string(s.ID), + Address: string(s.Address), + } + } + return &ConfigurationValue{ + Index: index, + Servers: servers, + } +} + +// protoConfigurationToRaftConfiguration converts a proto configuration object +// to a raft object. +func protoConfigurationToRaftConfiguration(configuration *ConfigurationValue) (uint64, raft.Configuration) { + servers := make([]raft.Server, len(configuration.Servers)) + for i, s := range configuration.Servers { + servers[i] = raft.Server{ + Suffrage: raft.ServerSuffrage(s.Suffrage), + ID: raft.ServerID(s.Id), + Address: raft.ServerAddress(s.Address), + } + } + return configuration.Index, raft.Configuration{ + Servers: servers, + } +} + +type FSMChunkStorage struct { + f *FSM + ctx context.Context +} + +// chunkPaths returns a disk prefix and key given chunkinfo +func (f *FSMChunkStorage) chunkPaths(chunk *raftchunking.ChunkInfo) (string, string) { + prefix := fmt.Sprintf("%s%d/", chunkingPrefix, chunk.OpNum) + key := fmt.Sprintf("%s%d", prefix, chunk.SequenceNum) + return prefix, key +} + +func (f *FSMChunkStorage) StoreChunk(chunk *raftchunking.ChunkInfo) (bool, error) { + b, err := jsonutil.EncodeJSON(chunk) + if err != nil { + return false, fmt.Errorf("error encoding chunk info: %w", err) + } + + prefix, key := f.chunkPaths(chunk) + + entry := &physical.Entry{ + Key: key, + Value: b, + } + + f.f.l.RLock() + defer f.f.l.RUnlock() + + // Start a write transaction. + done := new(bool) + if err := f.f.db.Update(func(tx *bolt.Tx) error { + if err := tx.Bucket(dataBucketName).Put([]byte(entry.Key), entry.Value); err != nil { + return fmt.Errorf("error storing chunk info: %w", err) + } + + // Assume bucket exists and has keys + c := tx.Bucket(dataBucketName).Cursor() + + var keys []string + prefixBytes := []byte(prefix) + for k, _ := c.Seek(prefixBytes); k != nil && bytes.HasPrefix(k, prefixBytes); k, _ = c.Next() { + key := string(k) + key = strings.TrimPrefix(key, prefix) + if i := strings.Index(key, "/"); i == -1 { + // Add objects only from the current 'folder' + keys = append(keys, key) + } else { + // Add truncated 'folder' paths + keys = strutil.AppendIfMissing(keys, string(key[:i+1])) + } + } + + *done = uint32(len(keys)) == chunk.NumChunks + + return nil + }); err != nil { + return false, err + } + + return *done, nil +} + +func (f *FSMChunkStorage) FinalizeOp(opNum uint64) ([]*raftchunking.ChunkInfo, error) { + ret, err := f.chunksForOpNum(opNum) + if err != nil { + return nil, fmt.Errorf("error getting chunks for op keys: %w", err) + } + + prefix, _ := f.chunkPaths(&raftchunking.ChunkInfo{OpNum: opNum}) + if err := f.f.DeletePrefix(f.ctx, prefix); err != nil { + return nil, fmt.Errorf("error deleting prefix after op finalization: %w", err) + } + + return ret, nil +} + +func (f *FSMChunkStorage) chunksForOpNum(opNum uint64) ([]*raftchunking.ChunkInfo, error) { + prefix, _ := f.chunkPaths(&raftchunking.ChunkInfo{OpNum: opNum}) + + opChunkKeys, err := f.f.List(f.ctx, prefix) + if err != nil { + return nil, fmt.Errorf("error fetching op chunk keys: %w", err) + } + + if len(opChunkKeys) == 0 { + return nil, nil + } + + var ret []*raftchunking.ChunkInfo + + for _, v := range opChunkKeys { + seqNum, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return nil, fmt.Errorf("error converting seqnum to integer: %w", err) + } + + entry, err := f.f.Get(f.ctx, prefix+v) + if err != nil { + return nil, fmt.Errorf("error fetching chunkinfo: %w", err) + } + + var ci raftchunking.ChunkInfo + if err := jsonutil.DecodeJSON(entry.Value, &ci); err != nil { + return nil, fmt.Errorf("error decoding chunkinfo json: %w", err) + } + + if ret == nil { + ret = make([]*raftchunking.ChunkInfo, ci.NumChunks) + } + + ret[seqNum] = &ci + } + + return ret, nil +} + +func (f *FSMChunkStorage) GetChunks() (raftchunking.ChunkMap, error) { + opNums, err := f.f.List(f.ctx, chunkingPrefix) + if err != nil { + return nil, fmt.Errorf("error doing recursive list for chunk saving: %w", err) + } + + if len(opNums) == 0 { + return nil, nil + } + + ret := make(raftchunking.ChunkMap, len(opNums)) + for _, opNumStr := range opNums { + opNum, err := strconv.ParseInt(opNumStr, 10, 64) + if err != nil { + return nil, fmt.Errorf("error parsing op num during chunk saving: %w", err) + } + + opChunks, err := f.chunksForOpNum(uint64(opNum)) + if err != nil { + return nil, fmt.Errorf("error getting chunks for op keys during chunk saving: %w", err) + } + + ret[uint64(opNum)] = opChunks + } + + return ret, nil +} + +func (f *FSMChunkStorage) RestoreChunks(chunks raftchunking.ChunkMap) error { + if err := f.f.DeletePrefix(f.ctx, chunkingPrefix); err != nil { + return fmt.Errorf("error deleting prefix for chunk restoration: %w", err) + } + if len(chunks) == 0 { + return nil + } + + for opNum, opChunks := range chunks { + for _, chunk := range opChunks { + if chunk == nil { + continue + } + if chunk.OpNum != opNum { + return errors.New("unexpected op number in chunk") + } + if _, err := f.StoreChunk(chunk); err != nil { + return fmt.Errorf("error storing chunk during restoration: %w", err) + } + } + } + + return nil +} diff --git a/physical/raft/fsm_test.go b/physical/raft/fsm_test.go new file mode 100644 index 0000000..ba0e382 --- /dev/null +++ b/physical/raft/fsm_test.go @@ -0,0 +1,164 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package raft + +import ( + "context" + "fmt" + "io/ioutil" + "math/rand" + "os" + "sort" + "testing" + + "github.com/go-test/deep" + "github.com/golang/protobuf/proto" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/raft" + "github.com/hashicorp/vault/sdk/physical" +) + +func getFSM(t testing.TB) (*FSM, string) { + raftDir, err := ioutil.TempDir("", "vault-raft-") + if err != nil { + t.Fatal(err) + } + t.Logf("raft dir: %s", raftDir) + + logger := hclog.New(&hclog.LoggerOptions{ + Name: "raft", + Level: hclog.Trace, + }) + + fsm, err := NewFSM(raftDir, "", logger) + if err != nil { + t.Fatal(err) + } + + return fsm, raftDir +} + +func TestFSM_Batching(t *testing.T) { + fsm, dir := getFSM(t) + defer func() { _ = os.RemoveAll(dir) }() + + var index uint64 + var term uint64 = 1 + + getLog := func(i uint64) (int, *raft.Log) { + if rand.Intn(10) >= 8 { + term += 1 + return 0, &raft.Log{ + Index: i, + Term: term, + Type: raft.LogConfiguration, + Data: raft.EncodeConfiguration(raft.Configuration{ + Servers: []raft.Server{ + { + Address: "test", + ID: "test", + }, + }, + }), + } + } + + command := &LogData{ + Operations: make([]*LogOperation, rand.Intn(10)), + } + + for j := range command.Operations { + command.Operations[j] = &LogOperation{ + OpType: putOp, + Key: fmt.Sprintf("key-%d-%d", i, j), + Value: []byte(fmt.Sprintf("value-%d-%d", i, j)), + } + } + commandBytes, err := proto.Marshal(command) + if err != nil { + t.Fatal(err) + } + return len(command.Operations), &raft.Log{ + Index: i, + Term: term, + Type: raft.LogCommand, + Data: commandBytes, + } + } + + totalKeys := 0 + for i := 0; i < 100; i++ { + batchSize := rand.Intn(64) + batch := make([]*raft.Log, batchSize) + for j := 0; j < batchSize; j++ { + var keys int + index++ + keys, batch[j] = getLog(index) + totalKeys += keys + } + + resp := fsm.ApplyBatch(batch) + if len(resp) != batchSize { + t.Fatalf("incorrect response length: got %d expected %d", len(resp), batchSize) + } + + for _, r := range resp { + if _, ok := r.(*FSMApplyResponse); !ok { + t.Fatal("bad response type") + } + } + } + + keys, err := fsm.List(context.Background(), "") + if err != nil { + t.Fatal(err) + } + + if len(keys) != totalKeys { + t.Fatalf("incorrect number of keys: got %d expected %d", len(keys), totalKeys) + } + + latestIndex, latestConfig := fsm.LatestState() + if latestIndex.Index != index { + t.Fatalf("bad latest index: got %d expected %d", latestIndex.Index, index) + } + if latestIndex.Term != term { + t.Fatalf("bad latest term: got %d expected %d", latestIndex.Term, term) + } + + if latestConfig == nil && term > 1 { + t.Fatal("config wasn't updated") + } +} + +func TestFSM_List(t *testing.T) { + fsm, dir := getFSM(t) + defer func() { _ = os.RemoveAll(dir) }() + + ctx := context.Background() + count := 100 + keys := rand.Perm(count) + var sorted []string + for _, k := range keys { + err := fsm.Put(ctx, &physical.Entry{Key: fmt.Sprintf("foo/%d/bar", k)}) + if err != nil { + t.Fatal(err) + } + err = fsm.Put(ctx, &physical.Entry{Key: fmt.Sprintf("foo/%d/baz", k)}) + if err != nil { + t.Fatal(err) + } + sorted = append(sorted, fmt.Sprintf("%d/", k)) + } + sort.Strings(sorted) + + got, err := fsm.List(ctx, "foo/") + if err != nil { + t.Fatal(err) + } + sort.Strings(got) + if diff := deep.Equal(sorted, got); len(diff) > 0 { + t.Fatal(diff) + } +} diff --git a/physical/raft/io.go b/physical/raft/io.go new file mode 100644 index 0000000..98f96bc --- /dev/null +++ b/physical/raft/io.go @@ -0,0 +1,54 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package raft + +import ( + "io" + + "github.com/golang/protobuf/proto" +) + +type Writer interface { + WriteMsg(proto.Message) error +} + +type WriteCloser interface { + Writer + io.Closer +} + +type Reader interface { + ReadMsg(msg proto.Message) error + GetLastReadSize() int +} + +type ReadCloser interface { + Reader + io.Closer +} diff --git a/physical/raft/msgpack.go b/physical/raft/msgpack.go new file mode 100644 index 0000000..88ac74d --- /dev/null +++ b/physical/raft/msgpack.go @@ -0,0 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package raft + +// If we downgrade msgpack from v1.1.5 to v0.5.5, everything will still +// work, but any pre-existing raft clusters will break on upgrade. +// This file exists so that the Vault project has an explicit dependency +// on the library, which allows us to pin the version in go.mod. + +import ( + _ "github.com/hashicorp/go-msgpack/codec" +) diff --git a/physical/raft/raft.go b/physical/raft/raft.go new file mode 100644 index 0000000..bfb4139 --- /dev/null +++ b/physical/raft/raft.go @@ -0,0 +1,1970 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package raft + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "io" + "io/ioutil" + "math/rand" + "os" + "path/filepath" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/armon/go-metrics" + "github.com/golang/protobuf/proto" + log "github.com/hashicorp/go-hclog" + wrapping "github.com/hashicorp/go-kms-wrapping/v2" + "github.com/hashicorp/go-raftchunking" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/go-secure-stdlib/tlsutil" + "github.com/hashicorp/go-uuid" + goversion "github.com/hashicorp/go-version" + "github.com/hashicorp/raft" + autopilot "github.com/hashicorp/raft-autopilot" + raftboltdb "github.com/hashicorp/raft-boltdb/v2" + snapshot "github.com/hashicorp/raft-snapshot" + "github.com/hashicorp/vault/helper/metricsutil" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/physical" + "github.com/hashicorp/vault/vault/cluster" + "github.com/hashicorp/vault/vault/seal" + "github.com/hashicorp/vault/version" + bolt "go.etcd.io/bbolt" +) + +const ( + // EnvVaultRaftNodeID is used to fetch the Raft node ID from the environment. + EnvVaultRaftNodeID = "VAULT_RAFT_NODE_ID" + + // EnvVaultRaftPath is used to fetch the path where Raft data is stored from the environment. + EnvVaultRaftPath = "VAULT_RAFT_PATH" + + // EnvVaultRaftNonVoter is used to override the non_voter config option, telling Vault to join as a non-voter (i.e. read replica). + EnvVaultRaftNonVoter = "VAULT_RAFT_RETRY_JOIN_AS_NON_VOTER" + raftNonVoterConfigKey = "retry_join_as_non_voter" +) + +var getMmapFlags = func(string) int { return 0 } + +// Verify RaftBackend satisfies the correct interfaces +var ( + _ physical.Backend = (*RaftBackend)(nil) + _ physical.Transactional = (*RaftBackend)(nil) + _ physical.HABackend = (*RaftBackend)(nil) + _ physical.Lock = (*RaftLock)(nil) +) + +var ( + // raftLogCacheSize is the maximum number of logs to cache in-memory. + // This is used to reduce disk I/O for the recently committed entries. + raftLogCacheSize = 512 + + raftState = "raft/" + peersFileName = "peers.json" + restoreOpDelayDuration = 5 * time.Second + defaultMaxEntrySize = uint64(2 * raftchunking.ChunkSize) + + GetInTxnDisabledError = errors.New("get operations inside transactions are disabled in raft backend") +) + +// RaftBackend implements the backend interfaces and uses the raft protocol to +// persist writes to the FSM. +type RaftBackend struct { + logger log.Logger + conf map[string]string + l sync.RWMutex + + // fsm is the state store for vault's data + fsm *FSM + + // raft is the instance of raft we will operate on. + raft *raft.Raft + + // raftInitCh is used to block during HA lock acquisition if raft + // has not been initialized yet, which can occur if raft is being + // used for HA-only. + raftInitCh chan struct{} + + // raftNotifyCh is used to receive updates about leadership changes + // regarding this node. + raftNotifyCh chan bool + + // streamLayer is the network layer used to connect the nodes in the raft + // cluster. + streamLayer *raftLayer + + // raftTransport is the transport layer that the raft library uses for RPC + // communication. + raftTransport raft.Transport + + // snapStore is our snapshot mechanism. + snapStore raft.SnapshotStore + + // logStore is used by the raft library to store the raft logs in durable + // storage. + logStore raft.LogStore + + // stableStore is used by the raft library to store additional metadata in + // durable storage. + stableStore raft.StableStore + + // bootstrapConfig is only set when this node needs to be bootstrapped upon + // startup. + bootstrapConfig *raft.Configuration + + // dataDir is the location on the local filesystem that raft and FSM data + // will be stored. + dataDir string + + // localID is the ID for this node. This can either be configured in the + // config file, via a file on disk, or is otherwise randomly generated. + localID string + + // serverAddressProvider is used to map server IDs to addresses. + serverAddressProvider raft.ServerAddressProvider + + // permitPool is used to limit the number of concurrent storage calls. + permitPool *physical.PermitPool + + // maxEntrySize imposes a size limit (in bytes) on a raft entry (put or transaction). + // It is suggested to use a value of 2x the Raft chunking size for optimal + // performance. + maxEntrySize uint64 + + // autopilot is the instance of raft-autopilot library implementation of the + // autopilot features. This will be instantiated in both leader and followers. + // However, only active node will have a "running" autopilot. + autopilot *autopilot.Autopilot + + // autopilotConfig represents the configuration required to instantiate autopilot. + autopilotConfig *AutopilotConfig + + // followerStates represents the information about all the peers of the raft + // leader. This is used to track some state of the peers and as well as used + // to see if the peers are "alive" using the heartbeat received from them. + followerStates *FollowerStates + + // followerHeartbeatTicker is used to compute dead servers using follower + // state heartbeats. + followerHeartbeatTicker *time.Ticker + + // disableAutopilot if set will not put autopilot implementation to use. The + // fallback will be to interact with the raft instance directly. This can only + // be set during startup via the environment variable + // VAULT_RAFT_AUTOPILOT_DISABLE during startup and can't be updated once the + // node is up and running. + disableAutopilot bool + + // autopilotReconcileInterval is how long between rounds of performing promotions, demotions + // and leadership transfers. + autopilotReconcileInterval time.Duration + + // autopilotUpdateInterval is the time between the periodic state updates. These periodic + // state updates take in known servers from the delegate, request Raft stats be + // fetched and pull in other inputs such as the Raft configuration to create + // an updated view of the Autopilot State. + autopilotUpdateInterval time.Duration + + // upgradeVersion is used to override the Vault SDK version when performing an autopilot automated upgrade. + upgradeVersion string + + // redundancyZone specifies a redundancy zone for autopilot. + redundancyZone string + + // nonVoter specifies whether the node should join the cluster as a non-voter. Non-voters get + // replicated to and can serve reads, but do not take part in leader elections. + nonVoter bool + + effectiveSDKVersion string + failGetInTxn *uint32 +} + +// LeaderJoinInfo contains information required by a node to join itself as a +// follower to an existing raft cluster +type LeaderJoinInfo struct { + // AutoJoin defines any cloud auto-join metadata. If supplied, Vault will + // attempt to automatically discover peers in addition to what can be provided + // via 'leader_api_addr'. + AutoJoin string `json:"auto_join"` + + // AutoJoinScheme defines the optional URI protocol scheme for addresses + // discovered via auto-join. + AutoJoinScheme string `json:"auto_join_scheme"` + + // AutoJoinPort defines the optional port used for addressed discovered via + // auto-join. + AutoJoinPort uint `json:"auto_join_port"` + + // LeaderAPIAddr is the address of the leader node to connect to + LeaderAPIAddr string `json:"leader_api_addr"` + + // LeaderCACert is the CA cert of the leader node + LeaderCACert string `json:"leader_ca_cert"` + + // LeaderClientCert is the client certificate for the follower node to + // establish client authentication during TLS + LeaderClientCert string `json:"leader_client_cert"` + + // LeaderClientKey is the client key for the follower node to establish + // client authentication during TLS. + LeaderClientKey string `json:"leader_client_key"` + + // LeaderCACertFile is the path on disk to the the CA cert file of the + // leader node. This should only be provided via Vault's configuration file. + LeaderCACertFile string `json:"leader_ca_cert_file"` + + // LeaderClientCertFile is the path on disk to the client certificate file + // for the follower node to establish client authentication during TLS. This + // should only be provided via Vault's configuration file. + LeaderClientCertFile string `json:"leader_client_cert_file"` + + // LeaderClientKeyFile is the path on disk to the client key file for the + // follower node to establish client authentication during TLS. This should + // only be provided via Vault's configuration file. + LeaderClientKeyFile string `json:"leader_client_key_file"` + + // LeaderTLSServerName is the optional ServerName to expect in the leader's + // certificate, instead of the host/IP we're actually connecting to. + LeaderTLSServerName string `json:"leader_tls_servername"` + + // Retry indicates if the join process should automatically be retried + Retry bool `json:"-"` + + // TLSConfig for the API client to use when communicating with the leader node + TLSConfig *tls.Config `json:"-"` +} + +// JoinConfig returns a list of information about possible leader nodes that +// this node can join as a follower +func (b *RaftBackend) JoinConfig() ([]*LeaderJoinInfo, error) { + config := b.conf["retry_join"] + if config == "" { + return nil, nil + } + + var leaderInfos []*LeaderJoinInfo + err := jsonutil.DecodeJSON([]byte(config), &leaderInfos) + if err != nil { + return nil, fmt.Errorf("failed to decode retry_join config: %w", err) + } + + if len(leaderInfos) == 0 { + return nil, errors.New("invalid retry_join config") + } + + for i, info := range leaderInfos { + if len(info.AutoJoin) != 0 && len(info.LeaderAPIAddr) != 0 { + return nil, errors.New("cannot provide both a leader_api_addr and auto_join") + } + + if info.AutoJoinScheme != "" && (info.AutoJoinScheme != "http" && info.AutoJoinScheme != "https") { + return nil, fmt.Errorf("invalid scheme %q; must either be http or https", info.AutoJoinScheme) + } + + info.Retry = true + info.TLSConfig, err = parseTLSInfo(info) + if err != nil { + return nil, fmt.Errorf("failed to create tls config to communicate with leader node (retry_join index: %d): %w", i, err) + } + } + + return leaderInfos, nil +} + +// parseTLSInfo is a helper for parses the TLS information, preferring file +// paths over raw certificate content. +func parseTLSInfo(leaderInfo *LeaderJoinInfo) (*tls.Config, error) { + var tlsConfig *tls.Config + var err error + if len(leaderInfo.LeaderCACertFile) != 0 || len(leaderInfo.LeaderClientCertFile) != 0 || len(leaderInfo.LeaderClientKeyFile) != 0 { + tlsConfig, err = tlsutil.LoadClientTLSConfig(leaderInfo.LeaderCACertFile, leaderInfo.LeaderClientCertFile, leaderInfo.LeaderClientKeyFile) + if err != nil { + return nil, err + } + } else if len(leaderInfo.LeaderCACert) != 0 || len(leaderInfo.LeaderClientCert) != 0 || len(leaderInfo.LeaderClientKey) != 0 { + tlsConfig, err = tlsutil.ClientTLSConfig([]byte(leaderInfo.LeaderCACert), []byte(leaderInfo.LeaderClientCert), []byte(leaderInfo.LeaderClientKey)) + if err != nil { + return nil, err + } + } + if tlsConfig != nil { + tlsConfig.ServerName = leaderInfo.LeaderTLSServerName + } + + return tlsConfig, nil +} + +// EnsurePath is used to make sure a path exists +func EnsurePath(path string, dir bool) error { + if !dir { + path = filepath.Dir(path) + } + return os.MkdirAll(path, 0o700) +} + +// NewRaftBackend constructs a RaftBackend using the given directory +func NewRaftBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { + path := os.Getenv(EnvVaultRaftPath) + if path == "" { + pathFromConfig, ok := conf["path"] + if !ok { + return nil, fmt.Errorf("'path' must be set") + } + path = pathFromConfig + } + + var localID string + { + // Determine the local node ID from the environment. + if raftNodeID := os.Getenv(EnvVaultRaftNodeID); raftNodeID != "" { + localID = raftNodeID + } + + // If not set in the environment check the configuration file. + if len(localID) == 0 { + localID = conf["node_id"] + } + + // If not set in the config check the "node-id" file. + if len(localID) == 0 { + localIDRaw, err := ioutil.ReadFile(filepath.Join(path, "node-id")) + switch { + case err == nil: + if len(localIDRaw) > 0 { + localID = string(localIDRaw) + } + case os.IsNotExist(err): + default: + return nil, err + } + } + + // If all of the above fails generate a UUID and persist it to the + // "node-id" file. + if len(localID) == 0 { + id, err := uuid.GenerateUUID() + if err != nil { + return nil, err + } + + if err := ioutil.WriteFile(filepath.Join(path, "node-id"), []byte(id), 0o600); err != nil { + return nil, err + } + + localID = id + } + } + + // Create the FSM. + fsm, err := NewFSM(path, localID, logger.Named("fsm")) + if err != nil { + return nil, fmt.Errorf("failed to create fsm: %v", err) + } + + if delayRaw, ok := conf["apply_delay"]; ok { + delay, err := parseutil.ParseDurationSecond(delayRaw) + if err != nil { + return nil, fmt.Errorf("apply_delay does not parse as a duration: %w", err) + } + fsm.applyCallback = func() { + time.Sleep(delay) + } + } + + // Build an all in-memory setup for dev mode, otherwise prepare a full + // disk-based setup. + var log raft.LogStore + var stable raft.StableStore + var snap raft.SnapshotStore + + var devMode bool + if devMode { + store := raft.NewInmemStore() + stable = store + log = store + snap = raft.NewInmemSnapshotStore() + } else { + // Create the base raft path. + path := filepath.Join(path, raftState) + if err := EnsurePath(path, true); err != nil { + return nil, err + } + + // Create the backend raft store for logs and stable storage. + dbPath := filepath.Join(path, "raft.db") + opts := boltOptions(dbPath) + raftOptions := raftboltdb.Options{ + Path: dbPath, + BoltOptions: opts, + } + store, err := raftboltdb.New(raftOptions) + if err != nil { + return nil, err + } + stable = store + + // Wrap the store in a LogCache to improve performance. + cacheStore, err := raft.NewLogCache(raftLogCacheSize, store) + if err != nil { + return nil, err + } + log = cacheStore + + // Create the snapshot store. + snapshots, err := NewBoltSnapshotStore(path, logger.Named("snapshot"), fsm) + if err != nil { + return nil, err + } + snap = snapshots + } + + if delayRaw, ok := conf["snapshot_delay"]; ok { + delay, err := parseutil.ParseDurationSecond(delayRaw) + if err != nil { + return nil, fmt.Errorf("snapshot_delay does not parse as a duration: %w", err) + } + snap = newSnapshotStoreDelay(snap, delay, logger) + } + + maxEntrySize := defaultMaxEntrySize + if maxEntrySizeCfg := conf["max_entry_size"]; len(maxEntrySizeCfg) != 0 { + i, err := strconv.Atoi(maxEntrySizeCfg) + if err != nil { + return nil, fmt.Errorf("failed to parse 'max_entry_size': %w", err) + } + + maxEntrySize = uint64(i) + } + + var reconcileInterval time.Duration + if interval := conf["autopilot_reconcile_interval"]; interval != "" { + interval, err := parseutil.ParseDurationSecond(interval) + if err != nil { + return nil, fmt.Errorf("autopilot_reconcile_interval does not parse as a duration: %w", err) + } + reconcileInterval = interval + } + + var updateInterval time.Duration + if interval := conf["autopilot_update_interval"]; interval != "" { + interval, err := parseutil.ParseDurationSecond(interval) + if err != nil { + return nil, fmt.Errorf("autopilot_update_interval does not parse as a duration: %w", err) + } + updateInterval = interval + } + + effectiveReconcileInterval := autopilot.DefaultReconcileInterval + effectiveUpdateInterval := autopilot.DefaultUpdateInterval + + if reconcileInterval != 0 { + effectiveReconcileInterval = reconcileInterval + } + if updateInterval != 0 { + effectiveUpdateInterval = updateInterval + } + + if effectiveReconcileInterval < effectiveUpdateInterval { + return nil, fmt.Errorf("autopilot_reconcile_interval (%v) should be larger than autopilot_update_interval (%v)", effectiveReconcileInterval, effectiveUpdateInterval) + } + + var upgradeVersion string + if uv, ok := conf["autopilot_upgrade_version"]; ok && uv != "" { + upgradeVersion = uv + _, err := goversion.NewVersion(upgradeVersion) + if err != nil { + return nil, fmt.Errorf("autopilot_upgrade_version does not parse as a semantic version: %w", err) + } + } + + var nonVoter bool + if v := os.Getenv(EnvVaultRaftNonVoter); v != "" { + // Consistent with handling of other raft boolean env vars + // VAULT_RAFT_AUTOPILOT_DISABLE and VAULT_RAFT_FREELIST_SYNC + nonVoter = true + } else if v, ok := conf[raftNonVoterConfigKey]; ok { + nonVoter, err = strconv.ParseBool(v) + if err != nil { + return nil, fmt.Errorf("failed to parse %s config value %q as a boolean: %w", raftNonVoterConfigKey, v, err) + } + } + + if nonVoter && conf["retry_join"] == "" { + return nil, fmt.Errorf("setting %s to true is only valid if at least one retry_join stanza is specified", raftNonVoterConfigKey) + } + + return &RaftBackend{ + logger: logger, + fsm: fsm, + raftInitCh: make(chan struct{}), + conf: conf, + logStore: log, + stableStore: stable, + snapStore: snap, + dataDir: path, + localID: localID, + permitPool: physical.NewPermitPool(physical.DefaultParallelOperations), + maxEntrySize: maxEntrySize, + followerHeartbeatTicker: time.NewTicker(time.Second), + autopilotReconcileInterval: reconcileInterval, + autopilotUpdateInterval: updateInterval, + redundancyZone: conf["autopilot_redundancy_zone"], + nonVoter: nonVoter, + upgradeVersion: upgradeVersion, + failGetInTxn: new(uint32), + }, nil +} + +type snapshotStoreDelay struct { + logger log.Logger + wrapped raft.SnapshotStore + delay time.Duration +} + +func (s snapshotStoreDelay) Create(version raft.SnapshotVersion, index, term uint64, configuration raft.Configuration, configurationIndex uint64, trans raft.Transport) (raft.SnapshotSink, error) { + s.logger.Trace("delaying before creating snapshot", "delay", s.delay) + time.Sleep(s.delay) + return s.wrapped.Create(version, index, term, configuration, configurationIndex, trans) +} + +func (s snapshotStoreDelay) List() ([]*raft.SnapshotMeta, error) { + return s.wrapped.List() +} + +func (s snapshotStoreDelay) Open(id string) (*raft.SnapshotMeta, io.ReadCloser, error) { + return s.wrapped.Open(id) +} + +var _ raft.SnapshotStore = &snapshotStoreDelay{} + +func newSnapshotStoreDelay(snap raft.SnapshotStore, delay time.Duration, logger log.Logger) *snapshotStoreDelay { + return &snapshotStoreDelay{ + logger: logger, + wrapped: snap, + delay: delay, + } +} + +// Close is used to gracefully close all file resources. N.B. This method +// should only be called if you are sure the RaftBackend will never be used +// again. +func (b *RaftBackend) Close() error { + b.l.Lock() + defer b.l.Unlock() + + if err := b.fsm.Close(); err != nil { + return err + } + + if err := b.stableStore.(*raftboltdb.BoltStore).Close(); err != nil { + return err + } + + return nil +} + +func (b *RaftBackend) FailGetInTxn(fail bool) { + var val uint32 + if fail { + val = 1 + } + atomic.StoreUint32(b.failGetInTxn, val) +} + +func (b *RaftBackend) SetEffectiveSDKVersion(sdkVersion string) { + b.l.Lock() + b.effectiveSDKVersion = sdkVersion + b.l.Unlock() +} + +func (b *RaftBackend) RedundancyZone() string { + b.l.RLock() + defer b.l.RUnlock() + + return b.redundancyZone +} + +func (b *RaftBackend) NonVoter() bool { + b.l.RLock() + defer b.l.RUnlock() + + return b.nonVoter +} + +func (b *RaftBackend) EffectiveVersion() string { + b.l.RLock() + defer b.l.RUnlock() + + if b.upgradeVersion != "" { + return b.upgradeVersion + } + + return version.GetVersion().Version +} + +// DisableUpgradeMigration returns the state of the DisableUpgradeMigration config flag and whether it was set or not +func (b *RaftBackend) DisableUpgradeMigration() (bool, bool) { + b.l.RLock() + defer b.l.RUnlock() + + if b.autopilotConfig == nil { + return false, false + } + + return b.autopilotConfig.DisableUpgradeMigration, true +} + +func (b *RaftBackend) CollectMetrics(sink *metricsutil.ClusterMetricSink) { + var stats map[string]string + b.l.RLock() + logstoreStats := b.stableStore.(*raftboltdb.BoltStore).Stats() + fsmStats := b.fsm.Stats() + if b.raft != nil { + stats = b.raft.Stats() + } + b.l.RUnlock() + b.collectMetricsWithStats(logstoreStats, sink, "logstore") + b.collectMetricsWithStats(fsmStats, sink, "fsm") + labels := []metrics.Label{ + { + Name: "peer_id", + Value: b.localID, + }, + } + if stats != nil { + for _, key := range []string{"term", "commit_index", "applied_index", "fsm_pending"} { + n, err := strconv.ParseUint(stats[key], 10, 64) + if err == nil { + sink.SetGaugeWithLabels([]string{"raft_storage", "stats", key}, float32(n), labels) + } + } + } +} + +func (b *RaftBackend) collectMetricsWithStats(stats bolt.Stats, sink *metricsutil.ClusterMetricSink, database string) { + txstats := stats.TxStats + labels := []metricsutil.Label{{"database", database}} + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "freelist", "free_pages"}, float32(stats.FreePageN), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "freelist", "pending_pages"}, float32(stats.PendingPageN), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "freelist", "allocated_bytes"}, float32(stats.FreeAlloc), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "freelist", "used_bytes"}, float32(stats.FreelistInuse), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "transaction", "started_read_transactions"}, float32(stats.TxN), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "transaction", "currently_open_read_transactions"}, float32(stats.OpenTxN), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "page", "count"}, float32(txstats.GetPageCount()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "page", "bytes_allocated"}, float32(txstats.GetPageAlloc()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "cursor", "count"}, float32(txstats.GetCursorCount()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "node", "count"}, float32(txstats.GetNodeCount()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "node", "dereferences"}, float32(txstats.GetNodeDeref()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "rebalance", "count"}, float32(txstats.GetRebalance()), labels) + sink.AddSampleWithLabels([]string{"raft_storage", "bolt", "rebalance", "time"}, float32(txstats.GetRebalanceTime().Milliseconds()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "split", "count"}, float32(txstats.GetSplit()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "spill", "count"}, float32(txstats.GetSpill()), labels) + sink.AddSampleWithLabels([]string{"raft_storage", "bolt", "spill", "time"}, float32(txstats.GetSpillTime().Milliseconds()), labels) + sink.SetGaugeWithLabels([]string{"raft_storage", "bolt", "write", "count"}, float32(txstats.GetWrite()), labels) + sink.IncrCounterWithLabels([]string{"raft_storage", "bolt", "write", "time"}, float32(txstats.GetWriteTime().Milliseconds()), labels) +} + +// RaftServer has information about a server in the Raft configuration +type RaftServer struct { + // NodeID is the name of the server + NodeID string `json:"node_id"` + + // Address is the IP:port of the server, used for Raft communications + Address string `json:"address"` + + // Leader is true if this server is the current cluster leader + Leader bool `json:"leader"` + + // Protocol version is the raft protocol version used by the server + ProtocolVersion string `json:"protocol_version"` + + // Voter is true if this server has a vote in the cluster. This might + // be false if the server is staging and still coming online. + Voter bool `json:"voter"` +} + +// RaftConfigurationResponse is returned when querying for the current Raft +// configuration. +type RaftConfigurationResponse struct { + // Servers has the list of servers in the Raft configuration. + Servers []*RaftServer `json:"servers"` + + // Index has the Raft index of this configuration. + Index uint64 `json:"index"` +} + +// Peer defines the ID and Address for a given member of the raft cluster. +type Peer struct { + ID string `json:"id"` + Address string `json:"address"` + Suffrage int `json:"suffrage"` +} + +// NodeID returns the identifier of the node +func (b *RaftBackend) NodeID() string { + return b.localID +} + +// Initialized tells if raft is running or not +func (b *RaftBackend) Initialized() bool { + b.l.RLock() + init := b.raft != nil + b.l.RUnlock() + return init +} + +// SetTLSKeyring is used to install a new keyring. If the active key has changed +// it will also close any network connections or streams forcing a reconnect +// with the new key. +func (b *RaftBackend) SetTLSKeyring(keyring *TLSKeyring) error { + b.l.RLock() + err := b.streamLayer.setTLSKeyring(keyring) + b.l.RUnlock() + + return err +} + +// SetServerAddressProvider sets a the address provider for determining the raft +// node addresses. This is currently only used in tests. +func (b *RaftBackend) SetServerAddressProvider(provider raft.ServerAddressProvider) { + b.l.Lock() + b.serverAddressProvider = provider + b.l.Unlock() +} + +// Bootstrap prepares the given peers to be part of the raft cluster +func (b *RaftBackend) Bootstrap(peers []Peer) error { + b.l.Lock() + defer b.l.Unlock() + + hasState, err := raft.HasExistingState(b.logStore, b.stableStore, b.snapStore) + if err != nil { + return err + } + + if hasState { + return errors.New("error bootstrapping cluster: cluster already has state") + } + + raftConfig := &raft.Configuration{ + Servers: make([]raft.Server, len(peers)), + } + + for i, p := range peers { + raftConfig.Servers[i] = raft.Server{ + ID: raft.ServerID(p.ID), + Address: raft.ServerAddress(p.Address), + Suffrage: raft.ServerSuffrage(p.Suffrage), + } + } + + // Store the config for later use + b.bootstrapConfig = raftConfig + return nil +} + +// SetRestoreCallback sets the callback to be used when a restoreCallbackOp is +// processed through the FSM. +func (b *RaftBackend) SetRestoreCallback(restoreCb restoreCallback) { + b.fsm.l.Lock() + b.fsm.restoreCb = restoreCb + b.fsm.l.Unlock() +} + +func (b *RaftBackend) applyConfigSettings(config *raft.Config) error { + config.Logger = b.logger + multiplierRaw, ok := b.conf["performance_multiplier"] + multiplier := 5 + if ok { + var err error + multiplier, err = strconv.Atoi(multiplierRaw) + if err != nil { + return err + } + } + config.ElectionTimeout *= time.Duration(multiplier) + config.HeartbeatTimeout *= time.Duration(multiplier) + config.LeaderLeaseTimeout *= time.Duration(multiplier) + + snapThresholdRaw, ok := b.conf["snapshot_threshold"] + if ok { + var err error + snapThreshold, err := strconv.Atoi(snapThresholdRaw) + if err != nil { + return err + } + config.SnapshotThreshold = uint64(snapThreshold) + } + + trailingLogsRaw, ok := b.conf["trailing_logs"] + if ok { + var err error + trailingLogs, err := strconv.Atoi(trailingLogsRaw) + if err != nil { + return err + } + config.TrailingLogs = uint64(trailingLogs) + } + snapshotIntervalRaw, ok := b.conf["snapshot_interval"] + if ok { + var err error + snapshotInterval, err := parseutil.ParseDurationSecond(snapshotIntervalRaw) + if err != nil { + return err + } + config.SnapshotInterval = snapshotInterval + } + + config.NoSnapshotRestoreOnStart = true + config.MaxAppendEntries = 64 + + // Setting BatchApplyCh allows the raft library to enqueue up to + // MaxAppendEntries into each raft apply rather than relying on the + // scheduler. + config.BatchApplyCh = true + + b.logger.Trace("applying raft config", "inputs", b.conf) + return nil +} + +// SetupOpts are used to pass options to the raft setup function. +type SetupOpts struct { + // TLSKeyring is the keyring to use for the cluster traffic. + TLSKeyring *TLSKeyring + + // ClusterListener is the cluster hook used to register the raft handler and + // client with core's cluster listeners. + ClusterListener cluster.ClusterHook + + // StartAsLeader is used to specify this node should start as leader and + // bypass the leader election. This should be used with caution. + StartAsLeader bool + + // RecoveryModeConfig is the configuration for the raft cluster in recovery + // mode. + RecoveryModeConfig *raft.Configuration +} + +func (b *RaftBackend) StartRecoveryCluster(ctx context.Context, peer Peer) error { + recoveryModeConfig := &raft.Configuration{ + Servers: []raft.Server{ + { + ID: raft.ServerID(peer.ID), + Address: raft.ServerAddress(peer.Address), + }, + }, + } + + return b.SetupCluster(context.Background(), SetupOpts{ + StartAsLeader: true, + RecoveryModeConfig: recoveryModeConfig, + }) +} + +func (b *RaftBackend) HasState() (bool, error) { + b.l.RLock() + defer b.l.RUnlock() + + return raft.HasExistingState(b.logStore, b.stableStore, b.snapStore) +} + +// SetupCluster starts the raft cluster and enables the networking needed for +// the raft nodes to communicate. +func (b *RaftBackend) SetupCluster(ctx context.Context, opts SetupOpts) error { + b.logger.Trace("setting up raft cluster") + + b.l.Lock() + defer b.l.Unlock() + + // We are already unsealed + if b.raft != nil { + b.logger.Debug("raft already started, not setting up cluster") + return nil + } + + if len(b.localID) == 0 { + return errors.New("no local node id configured") + } + + // Setup the raft config + raftConfig := raft.DefaultConfig() + if err := b.applyConfigSettings(raftConfig); err != nil { + return err + } + + listenerIsNil := func(cl cluster.ClusterHook) bool { + switch { + case opts.ClusterListener == nil: + return true + default: + // Concrete type checks + switch cl.(type) { + case *cluster.Listener: + return cl.(*cluster.Listener) == nil + } + } + return false + } + + var initialTimeoutMultiplier time.Duration + switch { + case opts.TLSKeyring == nil && listenerIsNil(opts.ClusterListener): + // If we don't have a provided network we use an in-memory one. + // This allows us to bootstrap a node without bringing up a cluster + // network. This will be true during bootstrap, tests and dev modes. + _, b.raftTransport = raft.NewInmemTransportWithTimeout(raft.ServerAddress(b.localID), time.Second) + case opts.TLSKeyring == nil: + return errors.New("no keyring provided") + case listenerIsNil(opts.ClusterListener): + return errors.New("no cluster listener provided") + default: + initialTimeoutMultiplier = 3 + if !opts.StartAsLeader { + electionTimeout, heartbeatTimeout := raftConfig.ElectionTimeout, raftConfig.HeartbeatTimeout + // Use bigger values for first election + raftConfig.ElectionTimeout *= initialTimeoutMultiplier + raftConfig.HeartbeatTimeout *= initialTimeoutMultiplier + b.logger.Trace("using larger timeouts for raft at startup", + "initial_election_timeout", raftConfig.ElectionTimeout, + "initial_heartbeat_timeout", raftConfig.HeartbeatTimeout, + "normal_election_timeout", electionTimeout, + "normal_heartbeat_timeout", heartbeatTimeout) + } + + // Set the local address and localID in the streaming layer and the raft config. + streamLayer, err := NewRaftLayer(b.logger.Named("stream"), opts.TLSKeyring, opts.ClusterListener) + if err != nil { + return err + } + transConfig := &raft.NetworkTransportConfig{ + Stream: streamLayer, + MaxPool: 3, + Timeout: 10 * time.Second, + ServerAddressProvider: b.serverAddressProvider, + Logger: b.logger.Named("raft-net"), + } + transport := raft.NewNetworkTransportWithConfig(transConfig) + + b.streamLayer = streamLayer + b.raftTransport = transport + } + + raftConfig.LocalID = raft.ServerID(b.localID) + + // Set up a channel for reliable leader notifications. + raftNotifyCh := make(chan bool, 10) + raftConfig.NotifyCh = raftNotifyCh + + // If we have a bootstrapConfig set we should bootstrap now. + if b.bootstrapConfig != nil { + bootstrapConfig := b.bootstrapConfig + // Unset the bootstrap config + b.bootstrapConfig = nil + + // Bootstrap raft with our known cluster members. + if err := raft.BootstrapCluster(raftConfig, b.logStore, b.stableStore, b.snapStore, b.raftTransport, *bootstrapConfig); err != nil { + return err + } + } + + // Setup the Raft store. + b.fsm.SetNoopRestore(true) + + raftPath := filepath.Join(b.dataDir, raftState) + peersFile := filepath.Join(raftPath, peersFileName) + _, err := os.Stat(peersFile) + if err == nil { + b.logger.Info("raft recovery initiated", "recovery_file", peersFileName) + + recoveryConfig, err := raft.ReadConfigJSON(peersFile) + if err != nil { + return fmt.Errorf("raft recovery failed to parse peers.json: %w", err) + } + + // Non-voting servers are only allowed in enterprise. If Suffrage is disabled, + // error out to indicate that it isn't allowed. + for idx := range recoveryConfig.Servers { + if !nonVotersAllowed && recoveryConfig.Servers[idx].Suffrage == raft.Nonvoter { + return fmt.Errorf("raft recovery failed to parse configuration for node %q: setting `non_voter` is only supported in enterprise", recoveryConfig.Servers[idx].ID) + } + } + + b.logger.Info("raft recovery found new config", "config", recoveryConfig) + + err = raft.RecoverCluster(raftConfig, b.fsm, b.logStore, b.stableStore, b.snapStore, b.raftTransport, recoveryConfig) + if err != nil { + return fmt.Errorf("raft recovery failed: %w", err) + } + + err = os.Remove(peersFile) + if err != nil { + return fmt.Errorf("raft recovery failed to delete peers.json; please delete manually: %w", err) + } + b.logger.Info("raft recovery deleted peers.json") + } + + if opts.RecoveryModeConfig != nil { + err = raft.RecoverCluster(raftConfig, b.fsm, b.logStore, b.stableStore, b.snapStore, b.raftTransport, *opts.RecoveryModeConfig) + if err != nil { + return fmt.Errorf("recovering raft cluster failed: %w", err) + } + } + + b.logger.Info("creating Raft", "config", fmt.Sprintf("%#v", raftConfig)) + raftObj, err := raft.NewRaft(raftConfig, b.fsm.chunker, b.logStore, b.stableStore, b.snapStore, b.raftTransport) + b.fsm.SetNoopRestore(false) + if err != nil { + return err + } + + // If we are expecting to start as leader wait until we win the election. + // This should happen quickly since there is only one node in the cluster. + // StartAsLeader is only set during init, recovery mode, storage migration, + // and tests. + if opts.StartAsLeader { + // ticker is used to prevent memory leak of using time.After in + // for - select pattern. + ticker := time.NewTicker(10 * time.Millisecond) + defer ticker.Stop() + for { + if raftObj.State() == raft.Leader { + break + } + + ticker.Reset(10 * time.Millisecond) + select { + case <-ctx.Done(): + future := raftObj.Shutdown() + if future.Error() != nil { + return fmt.Errorf("shutdown while waiting for leadership: %w", future.Error()) + } + + return errors.New("shutdown while waiting for leadership") + case <-ticker.C: + } + } + } + + b.raft = raftObj + b.raftNotifyCh = raftNotifyCh + + if err := b.fsm.upgradeLocalNodeConfig(); err != nil { + b.logger.Error("failed to upgrade local node configuration") + return err + } + + if b.streamLayer != nil { + // Add Handler to the cluster. + opts.ClusterListener.AddHandler(consts.RaftStorageALPN, b.streamLayer) + + // Add Client to the cluster. + opts.ClusterListener.AddClient(consts.RaftStorageALPN, b.streamLayer) + } + + // Close the init channel to signal setup has been completed + close(b.raftInitCh) + + reloadConfig := func() { + newCfg := raft.ReloadableConfig{ + TrailingLogs: raftConfig.TrailingLogs, + SnapshotInterval: raftConfig.SnapshotInterval, + SnapshotThreshold: raftConfig.SnapshotThreshold, + HeartbeatTimeout: raftConfig.HeartbeatTimeout / initialTimeoutMultiplier, + ElectionTimeout: raftConfig.ElectionTimeout / initialTimeoutMultiplier, + } + err := raftObj.ReloadConfig(newCfg) + if err != nil { + b.logger.Error("failed to reload raft config to set lower timeouts", "error", err) + } else { + b.logger.Trace("reloaded raft config to set lower timeouts", "config", fmt.Sprintf("%#v", newCfg)) + } + } + confFuture := raftObj.GetConfiguration() + numServers := 0 + if err := confFuture.Error(); err != nil { + // This should probably never happen, but just in case we'll log the error. + // We'll default in this case to the multi-node behaviour. + b.logger.Error("failed to read raft configuration", "error", err) + } else { + clusterConf := confFuture.Configuration() + numServers = len(clusterConf.Servers) + } + if initialTimeoutMultiplier != 0 { + if numServers == 1 { + reloadConfig() + } else { + go func() { + ticker := time.NewTicker(50 * time.Millisecond) + // Emulate the random timeout used in Raft lib, to ensure that + // if all nodes are brought up simultaneously, they don't all + // call for an election at once. + extra := time.Duration(rand.Int63()) % raftConfig.HeartbeatTimeout + timeout := time.NewTimer(raftConfig.HeartbeatTimeout + extra) + for { + select { + case <-ticker.C: + switch raftObj.State() { + case raft.Candidate, raft.Leader: + b.logger.Trace("triggering raft config reload due to being candidate or leader") + reloadConfig() + return + case raft.Shutdown: + return + } + case <-timeout.C: + b.logger.Trace("triggering raft config reload due to initial timeout") + reloadConfig() + return + } + } + }() + } + } + + b.logger.Trace("finished setting up raft cluster") + return nil +} + +// TeardownCluster shuts down the raft cluster +func (b *RaftBackend) TeardownCluster(clusterListener cluster.ClusterHook) error { + if clusterListener != nil { + clusterListener.StopHandler(consts.RaftStorageALPN) + clusterListener.RemoveClient(consts.RaftStorageALPN) + } + + b.l.Lock() + + // Perform shutdown only if the raft object is non-nil. The object could be nil + // if the node is unsealed but has not joined the peer set. + var future raft.Future + if b.raft != nil { + future = b.raft.Shutdown() + } + + b.raft = nil + + // If we're tearing down, then we need to recreate the raftInitCh + b.raftInitCh = make(chan struct{}) + b.l.Unlock() + + if future != nil { + return future.Error() + } + + return nil +} + +// CommittedIndex returns the latest index committed to stable storage +func (b *RaftBackend) CommittedIndex() uint64 { + b.l.RLock() + defer b.l.RUnlock() + + if b.raft == nil { + return 0 + } + + return b.raft.LastIndex() +} + +// AppliedIndex returns the latest index applied to the FSM +func (b *RaftBackend) AppliedIndex() uint64 { + b.l.RLock() + defer b.l.RUnlock() + + if b.fsm == nil { + return 0 + } + + // We use the latest index that the FSM has seen here, which may be behind + // raft.AppliedIndex() due to the async nature of the raft library. + indexState, _ := b.fsm.LatestState() + return indexState.Index +} + +// Term returns the raft term of this node. +func (b *RaftBackend) Term() uint64 { + b.l.RLock() + defer b.l.RUnlock() + + if b.fsm == nil { + return 0 + } + + // We use the latest index that the FSM has seen here, which may be behind + // raft.AppliedIndex() due to the async nature of the raft library. + indexState, _ := b.fsm.LatestState() + return indexState.Term +} + +// RemovePeer removes the given peer ID from the raft cluster. If the node is +// ourselves we will give up leadership. +func (b *RaftBackend) RemovePeer(ctx context.Context, peerID string) error { + b.l.RLock() + defer b.l.RUnlock() + + if err := ctx.Err(); err != nil { + return err + } + + if b.disableAutopilot { + if b.raft == nil { + return errors.New("raft storage is not initialized") + } + b.logger.Trace("removing server from raft", "id", peerID) + future := b.raft.RemoveServer(raft.ServerID(peerID), 0, 0) + return future.Error() + } + + if b.autopilot == nil { + return errors.New("raft storage autopilot is not initialized") + } + + b.logger.Trace("removing server from raft via autopilot", "id", peerID) + return b.autopilot.RemoveServer(raft.ServerID(peerID)) +} + +// GetConfigurationOffline is used to read the stale, last known raft +// configuration to this node. It accesses the last state written into the +// FSM. When a server is online use GetConfiguration instead. +func (b *RaftBackend) GetConfigurationOffline() (*RaftConfigurationResponse, error) { + b.l.RLock() + defer b.l.RUnlock() + + if b.raft != nil { + return nil, errors.New("raft storage is initialized, used GetConfiguration instead") + } + + if b.fsm == nil { + return nil, nil + } + + state, configuration := b.fsm.LatestState() + config := &RaftConfigurationResponse{ + Index: state.Index, + } + + if configuration == nil || configuration.Servers == nil { + return config, nil + } + + for _, server := range configuration.Servers { + entry := &RaftServer{ + NodeID: server.Id, + Address: server.Address, + // Since we are offline no node is the leader. + Leader: false, + Voter: raft.ServerSuffrage(server.Suffrage) == raft.Voter, + } + config.Servers = append(config.Servers, entry) + } + + return config, nil +} + +func (b *RaftBackend) GetConfiguration(ctx context.Context) (*RaftConfigurationResponse, error) { + if err := ctx.Err(); err != nil { + return nil, err + } + + b.l.RLock() + defer b.l.RUnlock() + + if b.raft == nil { + return nil, errors.New("raft storage is not initialized") + } + + future := b.raft.GetConfiguration() + if err := future.Error(); err != nil { + return nil, err + } + + config := &RaftConfigurationResponse{ + Index: future.Index(), + } + + for _, server := range future.Configuration().Servers { + entry := &RaftServer{ + NodeID: string(server.ID), + Address: string(server.Address), + // Since we only service this request on the active node our node ID + // denotes the raft leader. + Leader: string(server.ID) == b.NodeID(), + Voter: server.Suffrage == raft.Voter, + ProtocolVersion: strconv.Itoa(raft.ProtocolVersionMax), + } + + config.Servers = append(config.Servers, entry) + } + + return config, nil +} + +// AddPeer adds a new server to the raft cluster +func (b *RaftBackend) AddPeer(ctx context.Context, peerID, clusterAddr string) error { + if err := ctx.Err(); err != nil { + return err + } + + b.l.RLock() + defer b.l.RUnlock() + + if b.disableAutopilot { + if b.raft == nil { + return errors.New("raft storage is not initialized") + } + b.logger.Trace("adding server to raft", "id", peerID) + future := b.raft.AddVoter(raft.ServerID(peerID), raft.ServerAddress(clusterAddr), 0, 0) + return future.Error() + } + + if b.autopilot == nil { + return errors.New("raft storage autopilot is not initialized") + } + + b.logger.Trace("adding server to raft via autopilot", "id", peerID) + return b.autopilot.AddServer(&autopilot.Server{ + ID: raft.ServerID(peerID), + Name: peerID, + Address: raft.ServerAddress(clusterAddr), + RaftVersion: raft.ProtocolVersionMax, + NodeType: autopilot.NodeVoter, + }) +} + +// Peers returns all the servers present in the raft cluster +func (b *RaftBackend) Peers(ctx context.Context) ([]Peer, error) { + if err := ctx.Err(); err != nil { + return nil, err + } + + b.l.RLock() + defer b.l.RUnlock() + + if b.raft == nil { + return nil, errors.New("raft storage is not initialized") + } + + future := b.raft.GetConfiguration() + if err := future.Error(); err != nil { + return nil, err + } + + ret := make([]Peer, len(future.Configuration().Servers)) + for i, s := range future.Configuration().Servers { + ret[i] = Peer{ + ID: string(s.ID), + Address: string(s.Address), + Suffrage: int(s.Suffrage), + } + } + + return ret, nil +} + +// SnapshotHTTP is a wrapper for Snapshot that sends the snapshot as an HTTP +// response. +func (b *RaftBackend) SnapshotHTTP(out *logical.HTTPResponseWriter, access seal.Access) error { + out.Header().Add("Content-Disposition", "attachment") + out.Header().Add("Content-Type", "application/gzip") + + return b.Snapshot(out, access) +} + +// Snapshot takes a raft snapshot, packages it into a archive file and writes it +// to the provided writer. Seal access is used to encrypt the SHASUM file so we +// can validate the snapshot was taken using the same root keys or not. +func (b *RaftBackend) Snapshot(out io.Writer, access seal.Access) error { + b.l.RLock() + defer b.l.RUnlock() + + if b.raft == nil { + return errors.New("raft storage is sealed") + } + + // If we have access to the seal create a sealer object + var s snapshot.Sealer + if access != nil { + s = &sealer{ + access: access, + } + } + + return snapshot.Write(b.logger.Named("snapshot"), b.raft, s, out) +} + +// WriteSnapshotToTemp reads a snapshot archive off the provided reader, +// extracts the data and writes the snapshot to a temporary file. The seal +// access is used to decrypt the SHASUM file in the archive to ensure this +// snapshot has the same root key as the running instance. If the provided +// access is nil then it will skip that validation. +func (b *RaftBackend) WriteSnapshotToTemp(in io.ReadCloser, access seal.Access) (*os.File, func(), raft.SnapshotMeta, error) { + b.l.RLock() + defer b.l.RUnlock() + + var metadata raft.SnapshotMeta + if b.raft == nil { + return nil, nil, metadata, errors.New("raft storage is sealed") + } + + // If we have access to the seal create a sealer object + var s snapshot.Sealer + if access != nil { + s = &sealer{ + access: access, + } + } + + snap, cleanup, err := snapshot.WriteToTempFileWithSealer(b.logger.Named("snapshot"), in, &metadata, s) + return snap, cleanup, metadata, err +} + +// RestoreSnapshot applies the provided snapshot metadata and snapshot data to +// raft. +func (b *RaftBackend) RestoreSnapshot(ctx context.Context, metadata raft.SnapshotMeta, snap io.Reader) error { + if err := ctx.Err(); err != nil { + return err + } + + b.l.RLock() + defer b.l.RUnlock() + + if b.raft == nil { + return errors.New("raft storage is not initialized") + } + + if err := b.raft.Restore(&metadata, snap, 0); err != nil { + b.logger.Named("snapshot").Error("failed to restore snapshot", "error", err) + return err + } + + // Apply a log that tells the follower nodes to run the restore callback + // function. This is done after the restore call so we can be sure the + // snapshot applied to a quorum of nodes. + command := &LogData{ + Operations: []*LogOperation{ + { + OpType: restoreCallbackOp, + }, + }, + } + + err := b.applyLog(ctx, command) + + // Do a best-effort attempt to let the standbys apply the restoreCallbackOp + // before we continue. + time.Sleep(restoreOpDelayDuration) + return err +} + +// Delete inserts an entry in the log to delete the given path +func (b *RaftBackend) Delete(ctx context.Context, path string) error { + defer metrics.MeasureSince([]string{"raft-storage", "delete"}, time.Now()) + + if err := ctx.Err(); err != nil { + return err + } + + command := &LogData{ + Operations: []*LogOperation{ + { + OpType: deleteOp, + Key: path, + }, + }, + } + b.permitPool.Acquire() + defer b.permitPool.Release() + + b.l.RLock() + err := b.applyLog(ctx, command) + b.l.RUnlock() + return err +} + +// Get returns the value corresponding to the given path from the fsm +func (b *RaftBackend) Get(ctx context.Context, path string) (*physical.Entry, error) { + defer metrics.MeasureSince([]string{"raft-storage", "get"}, time.Now()) + if b.fsm == nil { + return nil, errors.New("raft: fsm not configured") + } + + if err := ctx.Err(); err != nil { + return nil, err + } + + b.permitPool.Acquire() + defer b.permitPool.Release() + + if err := ctx.Err(); err != nil { + return nil, err + } + + entry, err := b.fsm.Get(ctx, path) + if entry != nil { + valueLen := len(entry.Value) + if uint64(valueLen) > b.maxEntrySize { + b.logger.Warn("retrieved entry value is too large, has raft's max_entry_size been reduced?", + "size", valueLen, "max_entry_size", b.maxEntrySize) + } + } + + return entry, err +} + +// Put inserts an entry in the log for the put operation. It will return an +// error if the resulting entry encoding exceeds the configured max_entry_size +// or if the call to applyLog fails. +func (b *RaftBackend) Put(ctx context.Context, entry *physical.Entry) error { + defer metrics.MeasureSince([]string{"raft-storage", "put"}, time.Now()) + if len(entry.Key) > bolt.MaxKeySize { + return fmt.Errorf("%s, max key size for integrated storage is %d", physical.ErrKeyTooLarge, bolt.MaxKeySize) + } + + if err := ctx.Err(); err != nil { + return err + } + + command := &LogData{ + Operations: []*LogOperation{ + { + OpType: putOp, + Key: entry.Key, + Value: entry.Value, + }, + }, + } + + b.permitPool.Acquire() + defer b.permitPool.Release() + + b.l.RLock() + err := b.applyLog(ctx, command) + b.l.RUnlock() + return err +} + +// List enumerates all the items under the prefix from the fsm +func (b *RaftBackend) List(ctx context.Context, prefix string) ([]string, error) { + defer metrics.MeasureSince([]string{"raft-storage", "list"}, time.Now()) + if b.fsm == nil { + return nil, errors.New("raft: fsm not configured") + } + + if err := ctx.Err(); err != nil { + return nil, err + } + + b.permitPool.Acquire() + defer b.permitPool.Release() + + if err := ctx.Err(); err != nil { + return nil, err + } + + return b.fsm.List(ctx, prefix) +} + +// Transaction applies all the given operations into a single log and +// applies it. +func (b *RaftBackend) Transaction(ctx context.Context, txns []*physical.TxnEntry) error { + defer metrics.MeasureSince([]string{"raft-storage", "transaction"}, time.Now()) + + if err := ctx.Err(); err != nil { + return err + } + + failGetInTxn := atomic.LoadUint32(b.failGetInTxn) + for _, t := range txns { + if t.Operation == physical.GetOperation && failGetInTxn != 0 { + return GetInTxnDisabledError + } + } + + txnMap := make(map[string]*physical.TxnEntry) + + command := &LogData{ + Operations: make([]*LogOperation, len(txns)), + } + for i, txn := range txns { + op := &LogOperation{} + switch txn.Operation { + case physical.PutOperation: + if len(txn.Entry.Key) > bolt.MaxKeySize { + return fmt.Errorf("%s, max key size for integrated storage is %d", physical.ErrKeyTooLarge, bolt.MaxKeySize) + } + op.OpType = putOp + op.Key = txn.Entry.Key + op.Value = txn.Entry.Value + case physical.DeleteOperation: + op.OpType = deleteOp + op.Key = txn.Entry.Key + case physical.GetOperation: + op.OpType = getOp + op.Key = txn.Entry.Key + txnMap[op.Key] = txn + default: + return fmt.Errorf("%q is not a supported transaction operation", txn.Operation) + } + + command.Operations[i] = op + } + + b.permitPool.Acquire() + defer b.permitPool.Release() + + b.l.RLock() + err := b.applyLog(ctx, command) + b.l.RUnlock() + + // loop over results and update pointers to get operations + for _, logOp := range command.Operations { + if logOp.OpType == getOp { + if txn, found := txnMap[logOp.Key]; found { + txn.Entry.Value = logOp.Value + } + } + } + + return err +} + +// applyLog will take a given log command and apply it to the raft log. applyLog +// doesn't return until the log has been applied to a quorum of servers and is +// persisted to the local FSM. Caller should hold the backend's read lock. +func (b *RaftBackend) applyLog(ctx context.Context, command *LogData) error { + if b.raft == nil { + return errors.New("raft storage is not initialized") + } + if err := ctx.Err(); err != nil { + return err + } + + commandBytes, err := proto.Marshal(command) + if err != nil { + return err + } + + cmdSize := len(commandBytes) + if uint64(cmdSize) > b.maxEntrySize { + return fmt.Errorf("%s; got %d bytes, max: %d bytes", physical.ErrValueTooLarge, cmdSize, b.maxEntrySize) + } + + defer metrics.AddSample([]string{"raft-storage", "entry_size"}, float32(cmdSize)) + + var chunked bool + var applyFuture raft.ApplyFuture + switch { + case len(commandBytes) <= raftchunking.ChunkSize: + applyFuture = b.raft.Apply(commandBytes, 0) + default: + chunked = true + applyFuture = raftchunking.ChunkingApply(commandBytes, nil, 0, b.raft.ApplyLog) + } + + if err := applyFuture.Error(); err != nil { + return err + } + + resp := applyFuture.Response() + + if chunked { + // In this case we didn't apply all chunks successfully, possibly due + // to a term change + if resp == nil { + // This returns the error in the interface because the raft library + // returns errors from the FSM via the future, not via err from the + // apply function. Downstream client code expects to see any error + // from the FSM (as opposed to the apply itself) and decide whether + // it can retry in the future's response. + return errors.New("applying chunking failed, please retry") + } + + // We expect that this conversion should always work + chunkedSuccess, ok := resp.(raftchunking.ChunkingSuccess) + if !ok { + return errors.New("unknown type of response back from chunking FSM") + } + + // Replace the reply with the inner wrapped version + resp = chunkedSuccess.Response + } + + fsmar, ok := resp.(*FSMApplyResponse) + if !ok || !fsmar.Success { + return errors.New("could not apply data") + } + + // populate command with our results + if fsmar.EntrySlice == nil { + return errors.New("entries on FSM response were empty") + } + + for i, logOp := range command.Operations { + if logOp.OpType == getOp { + fsmEntry := fsmar.EntrySlice[i] + + // this should always be true because the entries in the slice were created in the same order as + // the command operations. + if logOp.Key == fsmEntry.Key { + if len(fsmEntry.Value) > 0 { + logOp.Value = fsmEntry.Value + } + } else { + // this shouldn't happen + return errors.New("entries in FSM response were out of order") + } + } + } + + return nil +} + +// HAEnabled is the implementation of the HABackend interface +func (b *RaftBackend) HAEnabled() bool { return true } + +// HAEnabled is the implementation of the HABackend interface +func (b *RaftBackend) LockWith(key, value string) (physical.Lock, error) { + return &RaftLock{ + key: key, + value: []byte(value), + b: b, + }, nil +} + +// SetDesiredSuffrage sets a field in the fsm indicating the suffrage intent for +// this node. +func (b *RaftBackend) SetDesiredSuffrage(nonVoter bool) error { + b.l.Lock() + defer b.l.Unlock() + + var desiredSuffrage string + switch nonVoter { + case true: + desiredSuffrage = "non-voter" + default: + desiredSuffrage = "voter" + } + + err := b.fsm.recordSuffrage(desiredSuffrage) + if err != nil { + return err + } + + return nil +} + +func (b *RaftBackend) DesiredSuffrage() string { + return b.fsm.DesiredSuffrage() +} + +// RaftLock implements the physical Lock interface and enables HA for this +// backend. The Lock uses the raftNotifyCh for receiving leadership edge +// triggers. Vault's active duty matches raft's leadership. +type RaftLock struct { + key string + value []byte + + b *RaftBackend +} + +// monitorLeadership waits until we receive an update on the raftNotifyCh and +// closes the leaderLost channel. +func (l *RaftLock) monitorLeadership(stopCh <-chan struct{}, leaderNotifyCh <-chan bool) <-chan struct{} { + leaderLost := make(chan struct{}) + go func() { + for { + select { + case isLeader := <-leaderNotifyCh: + // leaderNotifyCh may deliver a true value initially if this + // server is already the leader prior to RaftLock.Lock call + // (the true message was already queued). The next message is + // always going to be false. The for loop should loop at most + // twice. + if !isLeader { + close(leaderLost) + return + } + case <-stopCh: + return + } + } + }() + return leaderLost +} + +// Lock blocks until we become leader or are shutdown. It returns a channel that +// is closed when we detect a loss of leadership. +func (l *RaftLock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { + // If not initialized, block until it is + if !l.b.Initialized() { + select { + case <-l.b.raftInitCh: + case <-stopCh: + return nil, nil + } + } + + l.b.l.RLock() + + // Ensure that we still have a raft instance after grabbing the read lock + if l.b.raft == nil { + l.b.l.RUnlock() + return nil, errors.New("attempted to grab a lock on a nil raft backend") + } + + // Cache the notifyCh locally + leaderNotifyCh := l.b.raftNotifyCh + + // Check to see if we are already leader. + if l.b.raft.State() == raft.Leader { + err := l.b.applyLog(context.Background(), &LogData{ + Operations: []*LogOperation{ + { + OpType: putOp, + Key: l.key, + Value: l.value, + }, + }, + }) + l.b.l.RUnlock() + if err != nil { + return nil, err + } + + return l.monitorLeadership(stopCh, leaderNotifyCh), nil + } + l.b.l.RUnlock() + + for { + select { + case isLeader := <-leaderNotifyCh: + if isLeader { + // We are leader, set the key + l.b.l.RLock() + err := l.b.applyLog(context.Background(), &LogData{ + Operations: []*LogOperation{ + { + OpType: putOp, + Key: l.key, + Value: l.value, + }, + }, + }) + l.b.l.RUnlock() + if err != nil { + return nil, err + } + + return l.monitorLeadership(stopCh, leaderNotifyCh), nil + } + case <-stopCh: + return nil, nil + } + } +} + +// Unlock gives up leadership. +func (l *RaftLock) Unlock() error { + if l.b.raft == nil { + return nil + } + + return l.b.raft.LeadershipTransfer().Error() +} + +// Value reads the value of the lock. This informs us who is currently leader. +func (l *RaftLock) Value() (bool, string, error) { + e, err := l.b.Get(context.Background(), l.key) + if err != nil { + return false, "", err + } + if e == nil { + return false, "", nil + } + + value := string(e.Value) + // TODO: how to tell if held? + return true, value, nil +} + +// sealer implements the snapshot.Sealer interface and is used in the snapshot +// process for encrypting/decrypting the SHASUM file in snapshot archives. +type sealer struct { + access seal.Access +} + +// Seal encrypts the data with using the seal access object. +func (s sealer) Seal(ctx context.Context, pt []byte) ([]byte, error) { + if s.access == nil { + return nil, errors.New("no seal access available") + } + eblob, err := s.access.Encrypt(ctx, pt, nil) + if err != nil { + return nil, err + } + + return proto.Marshal(eblob) +} + +// Open decrypts the data using the seal access object. +func (s sealer) Open(ctx context.Context, ct []byte) ([]byte, error) { + if s.access == nil { + return nil, errors.New("no seal access available") + } + + var eblob wrapping.BlobInfo + err := proto.Unmarshal(ct, &eblob) + if err != nil { + return nil, err + } + + return s.access.Decrypt(ctx, &eblob, nil) +} + +// boltOptions returns a bolt.Options struct, suitable for passing to +// bolt.Open(), pre-configured with all of our preferred defaults. +func boltOptions(path string) *bolt.Options { + o := &bolt.Options{ + Timeout: 1 * time.Second, + FreelistType: bolt.FreelistMapType, + NoFreelistSync: true, + MmapFlags: getMmapFlags(path), + } + + if os.Getenv("VAULT_RAFT_FREELIST_TYPE") == "array" { + o.FreelistType = bolt.FreelistArrayType + } + + if os.Getenv("VAULT_RAFT_FREELIST_SYNC") != "" { + o.NoFreelistSync = false + } + + // By default, we want to set InitialMmapSize to 100GB, but only on 64bit platforms. + // Otherwise, we set it to whatever the value of VAULT_RAFT_INITIAL_MMAP_SIZE + // is, assuming it can be parsed as an int. Bolt itself sets this to 0 by default, + // so if users are wanting to turn this off, they can also set it to 0. Setting it + // to a negative value is the same as not setting it at all. + if os.Getenv("VAULT_RAFT_INITIAL_MMAP_SIZE") == "" { + o.InitialMmapSize = initialMmapSize + } else { + imms, err := strconv.Atoi(os.Getenv("VAULT_RAFT_INITIAL_MMAP_SIZE")) + + // If there's an error here, it means they passed something that's not convertible to + // a number. Rather than fail startup, just ignore it. + if err == nil && imms > 0 { + o.InitialMmapSize = imms + } + } + + return o +} diff --git a/physical/raft/raft_autopilot.go b/physical/raft/raft_autopilot.go new file mode 100644 index 0000000..3d8878a --- /dev/null +++ b/physical/raft/raft_autopilot.go @@ -0,0 +1,852 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package raft + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "math" + "os" + "strconv" + "sync" + "time" + + "github.com/armon/go-metrics" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/raft" + autopilot "github.com/hashicorp/raft-autopilot" + "github.com/mitchellh/mapstructure" + "go.uber.org/atomic" +) + +type CleanupDeadServersValue int + +const ( + CleanupDeadServersUnset CleanupDeadServersValue = 0 + CleanupDeadServersTrue CleanupDeadServersValue = 1 + CleanupDeadServersFalse CleanupDeadServersValue = 2 + AutopilotUpgradeVersionTag string = "upgrade_version" + AutopilotRedundancyZoneTag string = "redundancy_zone" +) + +func (c CleanupDeadServersValue) Value() bool { + switch c { + case CleanupDeadServersTrue: + return true + default: + return false + } +} + +// AutopilotConfig is used for querying/setting the Autopilot configuration. +type AutopilotConfig struct { + // CleanupDeadServers controls whether to remove dead servers from the Raft + // peer list periodically or when a new server joins. + CleanupDeadServers bool `mapstructure:"cleanup_dead_servers"` + + // CleanupDeadServersValue is used to shadow the CleanupDeadServers field in + // storage. Having it as an int helps in knowing if the value was set explicitly + // using the API or not. + CleanupDeadServersValue CleanupDeadServersValue `mapstructure:"cleanup_dead_servers_value"` + + // LastContactThreshold is the limit on the amount of time a server can go + // without leader contact before being considered unhealthy. + LastContactThreshold time.Duration `mapstructure:"-"` + + // DeadServerLastContactThreshold is the limit on the amount of time a server + // can go without leader contact before being considered failed. This takes + // effect only when CleanupDeadServers is set. + DeadServerLastContactThreshold time.Duration `mapstructure:"-"` + + // MaxTrailingLogs is the amount of entries in the Raft Log that a server can + // be behind before being considered unhealthy. + MaxTrailingLogs uint64 `mapstructure:"max_trailing_logs"` + + // MinQuorum sets the minimum number of servers allowed in a cluster before + // autopilot can prune dead servers. + MinQuorum uint `mapstructure:"min_quorum"` + + // ServerStabilizationTime is the minimum amount of time a server must be in a + // stable, healthy state before it can be added to the cluster. Only applicable + // with Raft protocol version 3 or higher. + ServerStabilizationTime time.Duration `mapstructure:"-"` + + // (Enterprise-only) DisableUpgradeMigration will disable Autopilot's upgrade migration + // strategy of waiting until enough newer-versioned servers have been added to the + // cluster before promoting them to voters. + DisableUpgradeMigration bool `mapstructure:"disable_upgrade_migration"` + + // (Enterprise-only) RedundancyZoneTag is the node tag to use for separating + // servers into zones for redundancy. If left blank, this feature will be disabled. + RedundancyZoneTag string `mapstructure:"redundancy_zone_tag"` + + // (Enterprise-only) UpgradeVersionTag is the node tag to use for version info when + // performing upgrade migrations. If left blank, the Consul version will be used. + UpgradeVersionTag string `mapstructure:"upgrade_version_tag"` +} + +// Merge combines the supplied config with the receiver. Supplied ones take +// priority. +func (to *AutopilotConfig) Merge(from *AutopilotConfig) { + if from == nil { + return + } + if from.CleanupDeadServersValue != CleanupDeadServersUnset { + to.CleanupDeadServers = from.CleanupDeadServersValue.Value() + } + if from.MinQuorum != 0 { + to.MinQuorum = from.MinQuorum + } + if from.LastContactThreshold != 0 { + to.LastContactThreshold = from.LastContactThreshold + } + if from.DeadServerLastContactThreshold != 0 { + to.DeadServerLastContactThreshold = from.DeadServerLastContactThreshold + } + if from.MaxTrailingLogs != 0 { + to.MaxTrailingLogs = from.MaxTrailingLogs + } + if from.ServerStabilizationTime != 0 { + to.ServerStabilizationTime = from.ServerStabilizationTime + } + + // UpgradeVersionTag and RedundancyZoneTag are purposely not included here since those values aren't user + // controllable and should never change. + to.DisableUpgradeMigration = from.DisableUpgradeMigration +} + +// Clone returns a duplicate instance of AutopilotConfig with the exact same values. +func (ac *AutopilotConfig) Clone() *AutopilotConfig { + if ac == nil { + return nil + } + return &AutopilotConfig{ + CleanupDeadServers: ac.CleanupDeadServers, + LastContactThreshold: ac.LastContactThreshold, + DeadServerLastContactThreshold: ac.DeadServerLastContactThreshold, + MaxTrailingLogs: ac.MaxTrailingLogs, + MinQuorum: ac.MinQuorum, + ServerStabilizationTime: ac.ServerStabilizationTime, + UpgradeVersionTag: ac.UpgradeVersionTag, + RedundancyZoneTag: ac.RedundancyZoneTag, + DisableUpgradeMigration: ac.DisableUpgradeMigration, + } +} + +// MarshalJSON makes the autopilot config fields JSON compatible +func (ac *AutopilotConfig) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]interface{}{ + "cleanup_dead_servers": ac.CleanupDeadServers, + "cleanup_dead_servers_value": ac.CleanupDeadServersValue, + "last_contact_threshold": ac.LastContactThreshold.String(), + "dead_server_last_contact_threshold": ac.DeadServerLastContactThreshold.String(), + "max_trailing_logs": ac.MaxTrailingLogs, + "min_quorum": ac.MinQuorum, + "server_stabilization_time": ac.ServerStabilizationTime.String(), + "upgrade_version_tag": ac.UpgradeVersionTag, + "redundancy_zone_tag": ac.RedundancyZoneTag, + "disable_upgrade_migration": ac.DisableUpgradeMigration, + }) +} + +// UnmarshalJSON parses the autopilot config JSON blob +func (ac *AutopilotConfig) UnmarshalJSON(b []byte) error { + var data interface{} + err := json.Unmarshal(b, &data) + if err != nil { + return err + } + + conf := data.(map[string]interface{}) + if err = mapstructure.WeakDecode(conf, ac); err != nil { + return err + } + if ac.LastContactThreshold, err = parseutil.ParseDurationSecond(conf["last_contact_threshold"]); err != nil { + return err + } + if ac.DeadServerLastContactThreshold, err = parseutil.ParseDurationSecond(conf["dead_server_last_contact_threshold"]); err != nil { + return err + } + if ac.ServerStabilizationTime, err = parseutil.ParseDurationSecond(conf["server_stabilization_time"]); err != nil { + return err + } + + return nil +} + +// FollowerState represents the information about peer that the leader tracks. +type FollowerState struct { + AppliedIndex uint64 + LastHeartbeat time.Time + LastTerm uint64 + IsDead *atomic.Bool + DesiredSuffrage string + Version string + UpgradeVersion string + RedundancyZone string +} + +// EchoRequestUpdate is here to avoid 1) the list of arguments to Update() getting huge 2) an import cycle on the vault package +type EchoRequestUpdate struct { + NodeID string + AppliedIndex uint64 + Term uint64 + DesiredSuffrage string + UpgradeVersion string + SDKVersion string + RedundancyZone string +} + +// FollowerStates holds information about all the followers in the raft cluster +// tracked by the leader. +type FollowerStates struct { + l sync.RWMutex + followers map[string]*FollowerState +} + +// NewFollowerStates creates a new FollowerStates object +func NewFollowerStates() *FollowerStates { + return &FollowerStates{ + followers: make(map[string]*FollowerState), + } +} + +// Update the peer information in the follower states. Note that this function +// runs on the active node. Returns true if a new entry was added, as opposed +// to modifying one already present. +func (s *FollowerStates) Update(req *EchoRequestUpdate) bool { + s.l.Lock() + defer s.l.Unlock() + + state, present := s.followers[req.NodeID] + if !present { + state = &FollowerState{ + IsDead: atomic.NewBool(false), + } + s.followers[req.NodeID] = state + } + + state.IsDead.Store(false) + state.AppliedIndex = req.AppliedIndex + state.LastTerm = req.Term + state.DesiredSuffrage = req.DesiredSuffrage + state.LastHeartbeat = time.Now() + state.Version = req.SDKVersion + state.UpgradeVersion = req.UpgradeVersion + state.RedundancyZone = req.RedundancyZone + + return !present +} + +// Clear wipes all the information regarding peers in the follower states. +func (s *FollowerStates) Clear() { + s.l.Lock() + for i := range s.followers { + delete(s.followers, i) + } + s.l.Unlock() +} + +// Delete the entry of a peer represented by the nodeID from follower states. +func (s *FollowerStates) Delete(nodeID string) { + s.l.Lock() + delete(s.followers, nodeID) + s.l.Unlock() +} + +// MinIndex returns the minimum raft index applied in the raft cluster. +func (s *FollowerStates) MinIndex() uint64 { + var min uint64 = math.MaxUint64 + minFunc := func(a, b uint64) uint64 { + if a > b { + return b + } + return a + } + + s.l.RLock() + for _, state := range s.followers { + min = minFunc(min, state.AppliedIndex) + } + s.l.RUnlock() + + if min == math.MaxUint64 { + return 0 + } + + return min +} + +// Ensure that the Delegate implements the ApplicationIntegration interface +var _ autopilot.ApplicationIntegration = (*Delegate)(nil) + +// Delegate is an implementation of autopilot.ApplicationIntegration interface. +// This is used by the autopilot library to retrieve information and to have +// application specific tasks performed. +type Delegate struct { + *RaftBackend + + // dl is a lock dedicated for guarding delegate's fields + dl sync.RWMutex + inflightRemovals map[raft.ServerID]bool + emptyVersionLogs map[raft.ServerID]struct{} +} + +func NewDelegate(b *RaftBackend) *Delegate { + return &Delegate{ + RaftBackend: b, + inflightRemovals: make(map[raft.ServerID]bool), + emptyVersionLogs: make(map[raft.ServerID]struct{}), + } +} + +// AutopilotConfig is called by the autopilot library to know the desired +// autopilot configuration. +func (d *Delegate) AutopilotConfig() *autopilot.Config { + d.l.RLock() + config := &autopilot.Config{ + CleanupDeadServers: d.autopilotConfig.CleanupDeadServers, + LastContactThreshold: d.autopilotConfig.LastContactThreshold, + MaxTrailingLogs: d.autopilotConfig.MaxTrailingLogs, + MinQuorum: d.autopilotConfig.MinQuorum, + ServerStabilizationTime: d.autopilotConfig.ServerStabilizationTime, + Ext: d.autopilotConfigExt(), + } + d.l.RUnlock() + return config +} + +// NotifyState is called by the autopilot library whenever there is a state +// change. We update a few metrics when this happens. +func (d *Delegate) NotifyState(state *autopilot.State) { + if d.raft.State() == raft.Leader { + metrics.SetGauge([]string{"autopilot", "failure_tolerance"}, float32(state.FailureTolerance)) + if state.Healthy { + metrics.SetGauge([]string{"autopilot", "healthy"}, 1) + } else { + metrics.SetGauge([]string{"autopilot", "healthy"}, 0) + } + + for id, state := range state.Servers { + labels := []metrics.Label{ + { + Name: "node_id", + Value: string(id), + }, + } + if state.Health.Healthy { + metrics.SetGaugeWithLabels([]string{"autopilot", "node", "healthy"}, 1, labels) + } else { + metrics.SetGaugeWithLabels([]string{"autopilot", "node", "healthy"}, 0, labels) + } + } + } +} + +// FetchServerStats is called by the autopilot library to retrieve information +// about all the nodes in the raft cluster. +func (d *Delegate) FetchServerStats(ctx context.Context, servers map[raft.ServerID]*autopilot.Server) map[raft.ServerID]*autopilot.ServerStats { + ret := make(map[raft.ServerID]*autopilot.ServerStats) + + d.l.RLock() + followerStates := d.followerStates + d.l.RUnlock() + + followerStates.l.RLock() + defer followerStates.l.RUnlock() + + now := time.Now() + for id, followerState := range followerStates.followers { + ret[raft.ServerID(id)] = &autopilot.ServerStats{ + LastContact: now.Sub(followerState.LastHeartbeat), + LastTerm: followerState.LastTerm, + LastIndex: followerState.AppliedIndex, + } + } + + leaderState, _ := d.fsm.LatestState() + ret[raft.ServerID(d.localID)] = &autopilot.ServerStats{ + LastTerm: leaderState.Term, + LastIndex: leaderState.Index, + } + + return ret +} + +// KnownServers is called by the autopilot library to know the status of each +// node in the raft cluster. If the application thinks that certain nodes left, +// it is here that we let the autopilot library know of the same. +func (d *Delegate) KnownServers() map[raft.ServerID]*autopilot.Server { + d.l.RLock() + defer d.l.RUnlock() + future := d.raft.GetConfiguration() + if err := future.Error(); err != nil { + d.logger.Error("failed to get raft configuration when computing known servers", "error", err) + return nil + } + + apServerStates := d.autopilot.GetState().Servers + servers := future.Configuration().Servers + serverIDs := make([]string, 0, len(servers)) + for _, server := range servers { + serverIDs = append(serverIDs, string(server.ID)) + } + + d.followerStates.l.RLock() + defer d.followerStates.l.RUnlock() + + ret := make(map[raft.ServerID]*autopilot.Server) + for id, state := range d.followerStates.followers { + // If the server is not in raft configuration, even if we received a follower + // heartbeat, it shouldn't be a known server for autopilot. + if !strutil.StrListContains(serverIDs, id) { + continue + } + + // If version isn't found in the state, fake it using the version from the leader so that autopilot + // doesn't demote the node to a non-voter, just because of a missed heartbeat. + currentServerID := raft.ServerID(id) + followerVersion := state.Version + leaderVersion := d.effectiveSDKVersion + d.dl.Lock() + if followerVersion == "" { + if _, ok := d.emptyVersionLogs[currentServerID]; !ok { + d.logger.Trace("received empty Vault version in heartbeat state. faking it with the leader version for now", "id", id, "leader version", leaderVersion) + d.emptyVersionLogs[currentServerID] = struct{}{} + } + followerVersion = leaderVersion + } else { + delete(d.emptyVersionLogs, currentServerID) + } + d.dl.Unlock() + + server := &autopilot.Server{ + ID: currentServerID, + Name: id, + RaftVersion: raft.ProtocolVersionMax, + Meta: d.meta(state), + Version: followerVersion, + Ext: d.autopilotServerExt(state), + } + + // As KnownServers is a delegate called by autopilot let's check if we already + // had this data in the correct format and use it. If we don't (which sounds a + // bit sad, unless this ISN'T a voter) then as a fail-safe, let's try what we've + // done elsewhere in code to check the desired suffrage and manually set NodeType + // based on whether that's a voter or not. If we don't do either of these + // things, NodeType isn't set which means technically it's not a voter. + // It shouldn't be a voter and end up in this state. + if apServerState, found := apServerStates[raft.ServerID(id)]; found && apServerState.Server.NodeType != "" { + server.NodeType = apServerState.Server.NodeType + } else if state.DesiredSuffrage == "voter" { + server.NodeType = autopilot.NodeVoter + } + + switch state.IsDead.Load() { + case true: + d.logger.Debug("informing autopilot that the node left", "id", id) + server.NodeStatus = autopilot.NodeLeft + default: + server.NodeStatus = autopilot.NodeAlive + } + + ret[raft.ServerID(id)] = server + } + + // Add the leader + ret[raft.ServerID(d.localID)] = &autopilot.Server{ + ID: raft.ServerID(d.localID), + Name: d.localID, + RaftVersion: raft.ProtocolVersionMax, + NodeStatus: autopilot.NodeAlive, + NodeType: autopilot.NodeVoter, // The leader must be a voter + Meta: d.meta(&FollowerState{ + UpgradeVersion: d.EffectiveVersion(), + RedundancyZone: d.RedundancyZone(), + }), + Version: d.effectiveSDKVersion, + Ext: d.autopilotServerExt(nil), + IsLeader: true, + } + + return ret +} + +// RemoveFailedServer is called by the autopilot library when it desires a node +// to be removed from the raft configuration. This function removes the node +// from the raft cluster and stops tracking its information in follower states. +// This function needs to return quickly. Hence removal is performed in a +// goroutine. +func (d *Delegate) RemoveFailedServer(server *autopilot.Server) { + go func() { + added := false + defer func() { + if added { + d.dl.Lock() + delete(d.inflightRemovals, server.ID) + d.dl.Unlock() + } + }() + + d.dl.Lock() + _, ok := d.inflightRemovals[server.ID] + if ok { + d.logger.Info("removal of dead server is already initiated", "id", server.ID) + d.dl.Unlock() + return + } + + added = true + d.inflightRemovals[server.ID] = true + d.dl.Unlock() + + d.logger.Info("removing dead server from raft configuration", "id", server.ID) + if future := d.raft.RemoveServer(server.ID, 0, 0); future.Error() != nil { + d.logger.Error("failed to remove server", "server_id", server.ID, "server_address", server.Address, "server_name", server.Name, "error", future.Error()) + return + } + + d.followerStates.Delete(string(server.ID)) + }() +} + +// SetFollowerStates sets the followerStates field in the backend to track peers +// in the raft cluster. +func (b *RaftBackend) SetFollowerStates(states *FollowerStates) { + b.l.Lock() + b.followerStates = states + b.l.Unlock() +} + +// SetAutopilotConfig updates the autopilot configuration in the backend. +func (b *RaftBackend) SetAutopilotConfig(config *AutopilotConfig) { + b.l.Lock() + b.autopilotConfig = config + b.logger.Info("updated autopilot configuration", "config", b.autopilotConfig) + b.l.Unlock() +} + +// AutopilotConfig returns the autopilot configuration in the backend. +func (b *RaftBackend) AutopilotConfig() *AutopilotConfig { + b.l.RLock() + defer b.l.RUnlock() + return b.autopilotConfig.Clone() +} + +func (b *RaftBackend) defaultAutopilotConfig() *AutopilotConfig { + return &AutopilotConfig{ + CleanupDeadServers: false, + LastContactThreshold: 10 * time.Second, + DeadServerLastContactThreshold: 24 * time.Hour, + MaxTrailingLogs: 1000, + ServerStabilizationTime: 10 * time.Second, + DisableUpgradeMigration: false, + UpgradeVersionTag: AutopilotUpgradeVersionTag, + RedundancyZoneTag: AutopilotRedundancyZoneTag, + } +} + +func (b *RaftBackend) AutopilotDisabled() bool { + b.l.RLock() + disabled := b.disableAutopilot + b.l.RUnlock() + return disabled +} + +func (b *RaftBackend) startFollowerHeartbeatTracker() { + b.l.RLock() + tickerCh := b.followerHeartbeatTicker.C + b.l.RUnlock() + + followerGauge := func(peerID string, suffix string, value float32) { + labels := []metrics.Label{ + { + Name: "peer_id", + Value: peerID, + }, + } + metrics.SetGaugeWithLabels([]string{"raft_storage", "follower", suffix}, value, labels) + } + for range tickerCh { + b.l.RLock() + if b.raft == nil { + // We could be racing with teardown, which will stop the ticker + // but that doesn't guarantee that we won't reach this line with a nil + // b.raft. + b.l.RUnlock() + return + } + b.followerStates.l.RLock() + myAppliedIndex := b.raft.AppliedIndex() + for peerID, state := range b.followerStates.followers { + timeSinceLastHeartbeat := time.Now().Sub(state.LastHeartbeat) / time.Millisecond + followerGauge(peerID, "last_heartbeat_ms", float32(timeSinceLastHeartbeat)) + followerGauge(peerID, "applied_index_delta", float32(myAppliedIndex-state.AppliedIndex)) + + if b.autopilotConfig.CleanupDeadServers && b.autopilotConfig.DeadServerLastContactThreshold != 0 { + if state.LastHeartbeat.IsZero() || state.IsDead.Load() { + continue + } + now := time.Now() + if now.After(state.LastHeartbeat.Add(b.autopilotConfig.DeadServerLastContactThreshold)) { + state.IsDead.Store(true) + } + } + } + b.followerStates.l.RUnlock() + b.l.RUnlock() + } +} + +// StopAutopilot stops a running autopilot instance. This should only be called +// on the active node. +func (b *RaftBackend) StopAutopilot() { + b.l.Lock() + defer b.l.Unlock() + + if b.autopilot == nil { + return + } + b.autopilot.Stop() + b.autopilot = nil + b.followerHeartbeatTicker.Stop() +} + +// AutopilotState represents the health information retrieved from autopilot. +type AutopilotState struct { + Healthy bool `json:"healthy" mapstructure:"healthy"` + FailureTolerance int `json:"failure_tolerance" mapstructure:"failure_tolerance"` + Servers map[string]*AutopilotServer `json:"servers" mapstructure:"servers"` + Leader string `json:"leader" mapstructure:"leader"` + Voters []string `json:"voters" mapstructure:"voters"` + NonVoters []string `json:"non_voters,omitempty" mapstructure:"non_voters,omitempty"` + RedundancyZones map[string]AutopilotZone `json:"redundancy_zones,omitempty" mapstructure:"redundancy_zones,omitempty"` + Upgrade *AutopilotUpgrade `json:"upgrade_info,omitempty" mapstructure:"upgrade_info,omitempty"` + OptimisticFailureTolerance int `json:"optimistic_failure_tolerance,omitempty" mapstructure:"optimistic_failure_tolerance,omitempty"` +} + +// AutopilotServer represents the health information of individual server node +// retrieved from autopilot. +type AutopilotServer struct { + ID string `json:"id" mapstructure:"id"` + Name string `json:"name" mapstructure:"name"` + Address string `json:"address" mapstructure:"address"` + NodeStatus string `json:"node_status" mapstructure:"node_status"` + LastContact *ReadableDuration `json:"last_contact" mapstructure:"last_contact"` + LastTerm uint64 `json:"last_term" mapstructure:"last_term"` + LastIndex uint64 `json:"last_index" mapstructure:"last_index"` + Healthy bool `json:"healthy" mapstructure:"healthy"` + StableSince time.Time `json:"stable_since" mapstructure:"stable_since"` + Status string `json:"status" mapstructure:"status"` + Version string `json:"version" mapstructure:"version"` + RedundancyZone string `json:"redundancy_zone,omitempty" mapstructure:"redundancy_zone,omitempty"` + UpgradeVersion string `json:"upgrade_version,omitempty" mapstructure:"upgrade_version,omitempty"` + ReadReplica bool `json:"read_replica,omitempty" mapstructure:"read_replica,omitempty"` + NodeType string `json:"node_type,omitempty" mapstructure:"node_type,omitempty"` +} + +type AutopilotZone struct { + Servers []string `json:"servers,omitempty" mapstructure:"servers,omitempty"` + Voters []string `json:"voters,omitempty" mapstructure:"voters,omitempty"` + FailureTolerance int `json:"failure_tolerance,omitempty" mapstructure:"failure_tolerance,omitempty"` +} + +type AutopilotUpgrade struct { + Status string `json:"status" mapstructure:"status"` + TargetVersion string `json:"target_version,omitempty" mapstructure:"target_version,omitempty"` + TargetVersionVoters []string `json:"target_version_voters,omitempty" mapstructure:"target_version_voters,omitempty"` + TargetVersionNonVoters []string `json:"target_version_non_voters,omitempty" mapstructure:"target_version_non_voters,omitempty"` + TargetVersionReadReplicas []string `json:"target_version_read_replicas,omitempty" mapstructure:"target_version_read_replicas,omitempty"` + OtherVersionVoters []string `json:"other_version_voters,omitempty" mapstructure:"other_version_voters,omitempty"` + OtherVersionNonVoters []string `json:"other_version_non_voters,omitempty" mapstructure:"other_version_non_voters,omitempty"` + OtherVersionReadReplicas []string `json:"other_version_read_replicas,omitempty" mapstructure:"other_version_read_replicas,omitempty"` + RedundancyZones map[string]AutopilotZoneUpgradeVersions `json:"redundancy_zones,omitempty" mapstructure:"redundancy_zones,omitempty"` +} + +type AutopilotZoneUpgradeVersions struct { + TargetVersionVoters []string `json:"target_version_voters,omitempty" mapstructure:"target_version_voters,omitempty"` + TargetVersionNonVoters []string `json:"target_version_non_voters,omitempty" mapstructure:"target_version_non_voters,omitempty"` + OtherVersionVoters []string `json:"other_version_voters,omitempty" mapstructure:"other_version_voters,omitempty"` + OtherVersionNonVoters []string `json:"other_version_non_voters,omitempty" mapstructure:"other_version_non_voters,omitempty"` +} + +// ReadableDuration is a duration type that is serialized to JSON in human readable format. +type ReadableDuration time.Duration + +func NewReadableDuration(dur time.Duration) *ReadableDuration { + d := ReadableDuration(dur) + return &d +} + +func (d *ReadableDuration) String() string { + return d.Duration().String() +} + +func (d *ReadableDuration) Duration() time.Duration { + if d == nil { + return time.Duration(0) + } + return time.Duration(*d) +} + +func (d *ReadableDuration) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`"%s"`, d.Duration().String())), nil +} + +func (d *ReadableDuration) UnmarshalJSON(raw []byte) (err error) { + if d == nil { + return fmt.Errorf("cannot unmarshal to nil pointer") + } + + var dur time.Duration + str := string(raw) + if len(str) >= 2 && str[0] == '"' && str[len(str)-1] == '"' { + // quoted string + dur, err = parseutil.ParseDurationSecond(str[1 : len(str)-1]) + if err != nil { + return err + } + } else { + // no quotes, not a string + v, err := strconv.ParseFloat(str, 64) + if err != nil { + return err + } + dur = time.Duration(v) + } + + *d = ReadableDuration(dur) + return nil +} + +func stringIDs(ids []raft.ServerID) []string { + out := make([]string, len(ids)) + for i, id := range ids { + out[i] = string(id) + } + return out +} + +func autopilotToAPIState(state *autopilot.State) (*AutopilotState, error) { + out := &AutopilotState{ + Healthy: state.Healthy, + FailureTolerance: state.FailureTolerance, + Leader: string(state.Leader), + Voters: stringIDs(state.Voters), + Servers: make(map[string]*AutopilotServer), + } + + for id, srv := range state.Servers { + aps, err := autopilotToAPIServer(srv) + if err != nil { + return nil, err + } + out.Servers[string(id)] = aps + } + + err := autopilotToAPIStateEnterprise(state, out) + if err != nil { + return nil, err + } + + return out, nil +} + +func autopilotToAPIServer(srv *autopilot.ServerState) (*AutopilotServer, error) { + apiSrv := &AutopilotServer{ + ID: string(srv.Server.ID), + Name: srv.Server.Name, + Address: string(srv.Server.Address), + NodeStatus: string(srv.Server.NodeStatus), + LastContact: NewReadableDuration(srv.Stats.LastContact), + LastTerm: srv.Stats.LastTerm, + LastIndex: srv.Stats.LastIndex, + Healthy: srv.Health.Healthy, + StableSince: srv.Health.StableSince, + Status: string(srv.State), + Version: srv.Server.Version, + NodeType: string(srv.Server.NodeType), + } + + err := autopilotToAPIServerEnterprise(&srv.Server, apiSrv) + if err != nil { + return nil, err + } + + return apiSrv, nil +} + +// GetAutopilotServerState retrieves raft cluster state from autopilot to +// return over the API. +func (b *RaftBackend) GetAutopilotServerState(ctx context.Context) (*AutopilotState, error) { + b.l.RLock() + defer b.l.RUnlock() + + if b.raft == nil { + return nil, errors.New("raft storage is not initialized") + } + + if b.autopilot == nil { + return nil, nil + } + + apState := b.autopilot.GetState() + if apState == nil { + return nil, nil + } + + return autopilotToAPIState(apState) +} + +func (b *RaftBackend) DisableAutopilot() { + b.l.Lock() + b.disableAutopilot = true + b.l.Unlock() +} + +// SetupAutopilot gathers information required to configure autopilot and starts +// it. If autopilot is disabled, this function does nothing. +func (b *RaftBackend) SetupAutopilot(ctx context.Context, storageConfig *AutopilotConfig, followerStates *FollowerStates, disable bool) { + b.l.Lock() + if disable || os.Getenv("VAULT_RAFT_AUTOPILOT_DISABLE") != "" { + b.disableAutopilot = true + } + + if b.disableAutopilot { + b.logger.Info("disabling autopilot") + b.l.Unlock() + return + } + + // Start with a default config + b.autopilotConfig = b.defaultAutopilotConfig() + + // Merge the setting provided over the API + b.autopilotConfig.Merge(storageConfig) + + // Create the autopilot instance + options := []autopilot.Option{ + autopilot.WithLogger(b.logger), + autopilot.WithPromoter(b.autopilotPromoter()), + } + if b.autopilotReconcileInterval != 0 { + options = append(options, autopilot.WithReconcileInterval(b.autopilotReconcileInterval)) + } + if b.autopilotUpdateInterval != 0 { + options = append(options, autopilot.WithUpdateInterval(b.autopilotUpdateInterval)) + } + b.autopilot = autopilot.New(b.raft, NewDelegate(b), options...) + b.followerStates = followerStates + b.followerHeartbeatTicker = time.NewTicker(1 * time.Second) + + b.l.Unlock() + + b.logger.Info("starting autopilot", "config", b.autopilotConfig, "reconcile_interval", b.autopilotReconcileInterval) + b.autopilot.Start(ctx) + + go b.startFollowerHeartbeatTracker() +} diff --git a/physical/raft/raft_test.go b/physical/raft/raft_test.go new file mode 100644 index 0000000..73d0ce3 --- /dev/null +++ b/physical/raft/raft_test.go @@ -0,0 +1,765 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package raft + +import ( + "bytes" + "context" + "crypto/md5" + "encoding/base64" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "math/rand" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/go-test/deep" + "github.com/golang/protobuf/proto" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-secure-stdlib/base62" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/raft" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/physical" + bolt "go.etcd.io/bbolt" +) + +func connectPeers(nodes ...*RaftBackend) { + for _, node := range nodes { + for _, peer := range nodes { + if node == peer { + continue + } + + node.raftTransport.(*raft.InmemTransport).Connect(raft.ServerAddress(peer.NodeID()), peer.raftTransport) + peer.raftTransport.(*raft.InmemTransport).Connect(raft.ServerAddress(node.NodeID()), node.raftTransport) + } + } +} + +func stepDownLeader(t *testing.T, node *RaftBackend) { + t.Helper() + + if err := node.raft.LeadershipTransfer().Error(); err != nil { + t.Fatal(err) + } + + timeout := time.Now().Add(time.Second * 10) + for !time.Now().After(timeout) { + if err := node.raft.VerifyLeader().Error(); err != nil { + return + } + time.Sleep(100 * time.Millisecond) + } + + t.Fatal("still leader") +} + +func waitForLeader(t *testing.T, nodes ...*RaftBackend) *RaftBackend { + t.Helper() + timeout := time.Now().Add(time.Second * 10) + for !time.Now().After(timeout) { + for _, node := range nodes { + if node.raft.Leader() == raft.ServerAddress(node.NodeID()) { + return node + } + } + time.Sleep(100 * time.Millisecond) + } + + t.Fatal("no leader") + return nil +} + +func compareFSMs(t *testing.T, fsm1, fsm2 *FSM) { + t.Helper() + if err := compareFSMsWithErr(t, fsm1, fsm2); err != nil { + t.Fatal(err) + } +} + +func compareFSMsWithErr(t *testing.T, fsm1, fsm2 *FSM) error { + t.Helper() + index1, config1 := fsm1.LatestState() + index2, config2 := fsm2.LatestState() + + if !proto.Equal(index1, index2) { + return fmt.Errorf("indexes did not match: %+v != %+v", index1, index2) + } + if !proto.Equal(config1, config2) { + return fmt.Errorf("configs did not match: %+v != %+v", config1, config2) + } + + return compareDBs(t, fsm1.getDB(), fsm2.getDB(), false) +} + +func compareDBs(t *testing.T, boltDB1, boltDB2 *bolt.DB, dataOnly bool) error { + t.Helper() + db1 := make(map[string]string) + db2 := make(map[string]string) + + err := boltDB1.View(func(tx *bolt.Tx) error { + c := tx.Cursor() + for bucketName, _ := c.First(); bucketName != nil; bucketName, _ = c.Next() { + if dataOnly && !bytes.Equal(bucketName, dataBucketName) { + continue + } + + b := tx.Bucket(bucketName) + + cBucket := b.Cursor() + + for k, v := cBucket.First(); k != nil; k, v = cBucket.Next() { + db1[string(k)] = base64.StdEncoding.EncodeToString(v) + } + } + + return nil + }) + if err != nil { + t.Fatal(err) + } + + err = boltDB2.View(func(tx *bolt.Tx) error { + c := tx.Cursor() + for bucketName, _ := c.First(); bucketName != nil; bucketName, _ = c.Next() { + if dataOnly && !bytes.Equal(bucketName, dataBucketName) { + continue + } + b := tx.Bucket(bucketName) + + c := b.Cursor() + + for k, v := c.First(); k != nil; k, v = c.Next() { + db2[string(k)] = base64.StdEncoding.EncodeToString(v) + } + } + + return nil + }) + + if err != nil { + t.Fatal(err) + } + + if diff := deep.Equal(db1, db2); diff != nil { + return fmt.Errorf("%+v", diff) + } + + return nil +} + +func TestRaft_Backend(t *testing.T) { + b, dir := GetRaft(t, true, true) + defer os.RemoveAll(dir) + + physical.ExerciseBackend(t, b) +} + +func TestRaft_ParseAutopilotUpgradeVersion(t *testing.T) { + raftDir, err := ioutil.TempDir("", "vault-raft-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(raftDir) + + conf := map[string]string{ + "path": raftDir, + "node_id": "abc123", + "autopilot_upgrade_version": "hahano", + } + + _, err = NewRaftBackend(conf, hclog.NewNullLogger()) + if err == nil { + t.Fatal("expected an error but got none") + } + + if !strings.Contains(err.Error(), "does not parse") { + t.Fatal("expected an error about unparseable versions but got none") + } +} + +func TestRaft_ParseNonVoter(t *testing.T) { + p := func(s string) *string { + return &s + } + + for _, retryJoinConf := range []string{"", "not-empty"} { + t.Run(retryJoinConf, func(t *testing.T) { + for name, tc := range map[string]struct { + envValue *string + configValue *string + expectNonVoter bool + invalidNonVoterValue bool + }{ + "valid false": {nil, p("false"), false, false}, + "valid true": {nil, p("true"), true, false}, + "invalid empty": {nil, p(""), false, true}, + "invalid truthy": {nil, p("no"), false, true}, + "invalid": {nil, p("totallywrong"), false, true}, + "valid env false": {p("false"), nil, true, false}, + "valid env true": {p("true"), nil, true, false}, + "valid env not boolean": {p("anything"), nil, true, false}, + "valid env empty": {p(""), nil, false, false}, + "neither set, default false": {nil, nil, false, false}, + "both set, env preferred": {p("true"), p("false"), true, false}, + } { + t.Run(name, func(t *testing.T) { + if tc.envValue != nil { + t.Setenv(EnvVaultRaftNonVoter, *tc.envValue) + } + raftDir, err := ioutil.TempDir("", "vault-raft-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(raftDir) + + conf := map[string]string{ + "path": raftDir, + "node_id": "abc123", + "retry_join": retryJoinConf, + } + if tc.configValue != nil { + conf[raftNonVoterConfigKey] = *tc.configValue + } + + backend, err := NewRaftBackend(conf, hclog.NewNullLogger()) + switch { + case tc.invalidNonVoterValue || (retryJoinConf == "" && tc.expectNonVoter): + if err == nil { + t.Fatal("expected an error but got none") + } + default: + if err != nil { + t.Fatalf("expected no error but got: %s", err) + } + + raftBackend := backend.(*RaftBackend) + if tc.expectNonVoter != raftBackend.NonVoter() { + t.Fatalf("expected %s %v but got %v", raftNonVoterConfigKey, tc.expectNonVoter, raftBackend.NonVoter()) + } + } + }) + } + }) + } +} + +func TestRaft_Backend_LargeKey(t *testing.T) { + b, dir := GetRaft(t, true, true) + defer os.RemoveAll(dir) + + key, err := base62.Random(bolt.MaxKeySize + 1) + if err != nil { + t.Fatal(err) + } + entry := &physical.Entry{Key: key, Value: []byte(key)} + + err = b.Put(context.Background(), entry) + if err == nil { + t.Fatal("expected error for put entry") + } + + if !strings.Contains(err.Error(), physical.ErrKeyTooLarge) { + t.Fatalf("expected %q, got %v", physical.ErrKeyTooLarge, err) + } + + out, err := b.Get(context.Background(), entry.Key) + if err != nil { + t.Fatalf("unexpected error after failed put: %v", err) + } + if out != nil { + t.Fatal("expected response entry to be nil after a failed put") + } +} + +func TestRaft_Backend_LargeValue(t *testing.T) { + b, dir := GetRaft(t, true, true) + defer os.RemoveAll(dir) + + value := make([]byte, defaultMaxEntrySize+1) + rand.Read(value) + entry := &physical.Entry{Key: "foo", Value: value} + + err := b.Put(context.Background(), entry) + if err == nil { + t.Fatal("expected error for put entry") + } + + if !strings.Contains(err.Error(), physical.ErrValueTooLarge) { + t.Fatalf("expected %q, got %v", physical.ErrValueTooLarge, err) + } + + out, err := b.Get(context.Background(), entry.Key) + if err != nil { + t.Fatalf("unexpected error after failed put: %v", err) + } + if out != nil { + t.Fatal("expected response entry to be nil after a failed put") + } +} + +// TestRaft_TransactionalBackend_GetTransactions tests that passing a slice of transactions to the +// raft backend will populate values for any transactions that are Get operations. +func TestRaft_TransactionalBackend_GetTransactions(t *testing.T) { + b, dir := GetRaft(t, true, true) + defer os.RemoveAll(dir) + + ctx := context.Background() + txns := make([]*physical.TxnEntry, 0) + + // Add some seed values to our FSM, and prepare our slice of transactions at the same time + for i := 0; i < 5; i++ { + key := fmt.Sprintf("foo/%d", i) + err := b.fsm.Put(ctx, &physical.Entry{Key: key, Value: []byte(fmt.Sprintf("value-%d", i))}) + if err != nil { + t.Fatal(err) + } + + txns = append(txns, &physical.TxnEntry{ + Operation: physical.GetOperation, + Entry: &physical.Entry{ + Key: key, + }, + }) + } + + // Add some additional transactions, so we have a mix of operations + for i := 0; i < 10; i++ { + txnEntry := &physical.TxnEntry{ + Entry: &physical.Entry{ + Key: fmt.Sprintf("lol-%d", i), + }, + } + + if i%2 == 0 { + txnEntry.Operation = physical.PutOperation + txnEntry.Entry.Value = []byte("lol") + } else { + txnEntry.Operation = physical.DeleteOperation + } + + txns = append(txns, txnEntry) + } + + err := b.Transaction(ctx, txns) + if err != nil { + t.Fatal(err) + } + + // Check that our Get operations were populated with their values + for i, txn := range txns { + if txn.Operation == physical.GetOperation { + val := []byte(fmt.Sprintf("value-%d", i)) + if !bytes.Equal(val, txn.Entry.Value) { + t.Fatalf("expected %s to equal %s but it didn't", hex.EncodeToString(val), hex.EncodeToString(txn.Entry.Value)) + } + } + } +} + +func TestRaft_TransactionalBackend_LargeKey(t *testing.T) { + b, dir := GetRaft(t, true, true) + defer os.RemoveAll(dir) + + value := make([]byte, defaultMaxEntrySize+1) + rand.Read(value) + + key, err := base62.Random(bolt.MaxKeySize + 1) + if err != nil { + t.Fatal(err) + } + txns := []*physical.TxnEntry{ + { + Operation: physical.PutOperation, + Entry: &physical.Entry{ + Key: key, + Value: []byte(key), + }, + }, + } + + err = b.Transaction(context.Background(), txns) + if err == nil { + t.Fatal("expected error for transactions") + } + + if !strings.Contains(err.Error(), physical.ErrKeyTooLarge) { + t.Fatalf("expected %q, got %v", physical.ErrValueTooLarge, err) + } + + out, err := b.Get(context.Background(), txns[0].Entry.Key) + if err != nil { + t.Fatalf("unexpected error after failed put: %v", err) + } + if out != nil { + t.Fatal("expected response entry to be nil after a failed put") + } +} + +func TestRaft_TransactionalBackend_LargeValue(t *testing.T) { + b, dir := GetRaft(t, true, true) + defer os.RemoveAll(dir) + + value := make([]byte, defaultMaxEntrySize+1) + rand.Read(value) + + txns := []*physical.TxnEntry{ + { + Operation: physical.PutOperation, + Entry: &physical.Entry{ + Key: "foo", + Value: value, + }, + }, + } + + err := b.Transaction(context.Background(), txns) + if err == nil { + t.Fatal("expected error for transactions") + } + + if !strings.Contains(err.Error(), physical.ErrValueTooLarge) { + t.Fatalf("expected %q, got %v", physical.ErrValueTooLarge, err) + } + + out, err := b.Get(context.Background(), txns[0].Entry.Key) + if err != nil { + t.Fatalf("unexpected error after failed put: %v", err) + } + if out != nil { + t.Fatal("expected response entry to be nil after a failed put") + } +} + +func TestRaft_Backend_ListPrefix(t *testing.T) { + b, dir := GetRaft(t, true, true) + defer os.RemoveAll(dir) + + physical.ExerciseBackend_ListPrefix(t, b) +} + +func TestRaft_TransactionalBackend(t *testing.T) { + b, dir := GetRaft(t, true, true) + defer os.RemoveAll(dir) + + physical.ExerciseTransactionalBackend(t, b) +} + +func TestRaft_HABackend(t *testing.T) { + t.Skip() + raft, dir := GetRaft(t, true, true) + defer os.RemoveAll(dir) + raft2, dir2 := GetRaft(t, false, true) + defer os.RemoveAll(dir2) + + // Add raft2 to the cluster + addPeer(t, raft, raft2) + + physical.ExerciseHABackend(t, raft, raft2) +} + +func TestRaft_Backend_ThreeNode(t *testing.T) { + raft1, dir := GetRaft(t, true, true) + raft2, dir2 := GetRaft(t, false, true) + raft3, dir3 := GetRaft(t, false, true) + defer os.RemoveAll(dir) + defer os.RemoveAll(dir2) + defer os.RemoveAll(dir3) + + // Add raft2 to the cluster + addPeer(t, raft1, raft2) + + // Add raft3 to the cluster + addPeer(t, raft1, raft3) + + physical.ExerciseBackend(t, raft1) + + time.Sleep(10 * time.Second) + // Make sure all stores are the same + compareFSMs(t, raft1.fsm, raft2.fsm) + compareFSMs(t, raft1.fsm, raft3.fsm) +} + +func TestRaft_GetOfflineConfig(t *testing.T) { + // Create 3 raft nodes + raft1, dir1 := GetRaft(t, true, true) + raft2, dir2 := GetRaft(t, false, true) + raft3, dir3 := GetRaft(t, false, true) + defer os.RemoveAll(dir1) + defer os.RemoveAll(dir2) + defer os.RemoveAll(dir3) + + // Add them all to the cluster + addPeer(t, raft1, raft2) + addPeer(t, raft1, raft3) + + // Add some data into the FSM + physical.ExerciseBackend(t, raft1) + + time.Sleep(10 * time.Second) + + // Spin down the raft cluster and check that GetConfigurationOffline + // returns 3 voters + raft3.TeardownCluster(nil) + raft2.TeardownCluster(nil) + raft1.TeardownCluster(nil) + + conf, err := raft1.GetConfigurationOffline() + if err != nil { + t.Fatal(err) + } + if len(conf.Servers) != 3 { + t.Fatalf("three raft nodes existed but we only see %d", len(conf.Servers)) + } + for _, s := range conf.Servers { + if s.Voter != true { + t.Fatalf("one of the nodes is not a voter") + } + } +} + +func TestRaft_Recovery(t *testing.T) { + // Create 4 raft nodes + raft1, dir1 := GetRaft(t, true, true) + raft2, dir2 := GetRaft(t, false, true) + raft3, dir3 := GetRaft(t, false, true) + raft4, dir4 := GetRaft(t, false, true) + defer os.RemoveAll(dir1) + defer os.RemoveAll(dir2) + defer os.RemoveAll(dir3) + defer os.RemoveAll(dir4) + + // Add them all to the cluster + addPeer(t, raft1, raft2) + addPeer(t, raft1, raft3) + addPeer(t, raft1, raft4) + + // Add some data into the FSM + physical.ExerciseBackend(t, raft1) + + time.Sleep(10 * time.Second) + + // Bring down all nodes + raft1.TeardownCluster(nil) + raft2.TeardownCluster(nil) + raft3.TeardownCluster(nil) + raft4.TeardownCluster(nil) + + // Prepare peers.json + type RecoveryPeer struct { + ID string `json:"id"` + Address string `json:"address"` + NonVoter bool `json:"non_voter"` + } + + // Leave out node 1 during recovery + peersList := make([]*RecoveryPeer, 0, 3) + peersList = append(peersList, &RecoveryPeer{ + ID: raft1.NodeID(), + Address: raft1.NodeID(), + NonVoter: false, + }) + peersList = append(peersList, &RecoveryPeer{ + ID: raft2.NodeID(), + Address: raft2.NodeID(), + NonVoter: false, + }) + peersList = append(peersList, &RecoveryPeer{ + ID: raft4.NodeID(), + Address: raft4.NodeID(), + NonVoter: false, + }) + + peersJSONBytes, err := jsonutil.EncodeJSON(peersList) + if err != nil { + t.Fatal(err) + } + err = ioutil.WriteFile(filepath.Join(filepath.Join(dir1, raftState), "peers.json"), peersJSONBytes, 0o644) + if err != nil { + t.Fatal(err) + } + err = ioutil.WriteFile(filepath.Join(filepath.Join(dir2, raftState), "peers.json"), peersJSONBytes, 0o644) + if err != nil { + t.Fatal(err) + } + err = ioutil.WriteFile(filepath.Join(filepath.Join(dir4, raftState), "peers.json"), peersJSONBytes, 0o644) + if err != nil { + t.Fatal(err) + } + + // Bring up the nodes again + raft1.SetupCluster(context.Background(), SetupOpts{}) + raft2.SetupCluster(context.Background(), SetupOpts{}) + raft4.SetupCluster(context.Background(), SetupOpts{}) + + peers, err := raft1.Peers(context.Background()) + if err != nil { + t.Fatal(err) + } + if len(peers) != 3 { + t.Fatalf("failed to recover the cluster") + } + + time.Sleep(10 * time.Second) + + compareFSMs(t, raft1.fsm, raft2.fsm) + compareFSMs(t, raft1.fsm, raft4.fsm) +} + +func TestRaft_TransactionalBackend_ThreeNode(t *testing.T) { + raft1, dir := GetRaft(t, true, true) + raft2, dir2 := GetRaft(t, false, true) + raft3, dir3 := GetRaft(t, false, true) + defer os.RemoveAll(dir) + defer os.RemoveAll(dir2) + defer os.RemoveAll(dir3) + + // Add raft2 to the cluster + addPeer(t, raft1, raft2) + + // Add raft3 to the cluster + addPeer(t, raft1, raft3) + + physical.ExerciseTransactionalBackend(t, raft1) + + time.Sleep(10 * time.Second) + // Make sure all stores are the same + compareFSMs(t, raft1.fsm, raft2.fsm) + compareFSMs(t, raft1.fsm, raft3.fsm) +} + +func TestRaft_Backend_Performance(t *testing.T) { + b, dir := GetRaft(t, true, false) + defer os.RemoveAll(dir) + + defaultConfig := raft.DefaultConfig() + + localConfig := raft.DefaultConfig() + b.applyConfigSettings(localConfig) + + if localConfig.ElectionTimeout != defaultConfig.ElectionTimeout*5 { + t.Fatalf("bad config: %v", localConfig) + } + if localConfig.HeartbeatTimeout != defaultConfig.HeartbeatTimeout*5 { + t.Fatalf("bad config: %v", localConfig) + } + if localConfig.LeaderLeaseTimeout != defaultConfig.LeaderLeaseTimeout*5 { + t.Fatalf("bad config: %v", localConfig) + } + + b.conf = map[string]string{ + "path": dir, + "performance_multiplier": "5", + } + + localConfig = raft.DefaultConfig() + b.applyConfigSettings(localConfig) + + if localConfig.ElectionTimeout != defaultConfig.ElectionTimeout*5 { + t.Fatalf("bad config: %v", localConfig) + } + if localConfig.HeartbeatTimeout != defaultConfig.HeartbeatTimeout*5 { + t.Fatalf("bad config: %v", localConfig) + } + if localConfig.LeaderLeaseTimeout != defaultConfig.LeaderLeaseTimeout*5 { + t.Fatalf("bad config: %v", localConfig) + } + + b.conf = map[string]string{ + "path": dir, + "performance_multiplier": "1", + } + + localConfig = raft.DefaultConfig() + b.applyConfigSettings(localConfig) + + if localConfig.ElectionTimeout != defaultConfig.ElectionTimeout { + t.Fatalf("bad config: %v", localConfig) + } + if localConfig.HeartbeatTimeout != defaultConfig.HeartbeatTimeout { + t.Fatalf("bad config: %v", localConfig) + } + if localConfig.LeaderLeaseTimeout != defaultConfig.LeaderLeaseTimeout { + t.Fatalf("bad config: %v", localConfig) + } +} + +func BenchmarkDB_Puts(b *testing.B) { + raft, dir := GetRaft(b, true, false) + defer os.RemoveAll(dir) + raft2, dir2 := GetRaft(b, true, false) + defer os.RemoveAll(dir2) + + bench := func(b *testing.B, s physical.Backend, dataSize int) { + data, err := uuid.GenerateRandomBytes(dataSize) + if err != nil { + b.Fatal(err) + } + + ctx := context.Background() + pe := &physical.Entry{ + Value: data, + } + testName := b.Name() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + pe.Key = fmt.Sprintf("%x", md5.Sum([]byte(fmt.Sprintf("%s-%d", testName, i)))) + err := s.Put(ctx, pe) + if err != nil { + b.Fatal(err) + } + } + } + + b.Run("256b", func(b *testing.B) { bench(b, raft, 256) }) + b.Run("256kb", func(b *testing.B) { bench(b, raft2, 256*1024) }) +} + +func BenchmarkDB_Snapshot(b *testing.B) { + raft, dir := GetRaft(b, true, false) + defer os.RemoveAll(dir) + + data, err := uuid.GenerateRandomBytes(256 * 1024) + if err != nil { + b.Fatal(err) + } + + ctx := context.Background() + pe := &physical.Entry{ + Value: data, + } + testName := b.Name() + + for i := 0; i < 100; i++ { + pe.Key = fmt.Sprintf("%x", md5.Sum([]byte(fmt.Sprintf("%s-%d", testName, i)))) + err = raft.Put(ctx, pe) + if err != nil { + b.Fatal(err) + } + } + + bench := func(b *testing.B, s *FSM) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + pe.Key = fmt.Sprintf("%x", md5.Sum([]byte(fmt.Sprintf("%s-%d", testName, i)))) + s.writeTo(ctx, discardCloser{Writer: ioutil.Discard}, discardCloser{Writer: ioutil.Discard}) + } + } + + b.Run("256kb", func(b *testing.B) { bench(b, raft.fsm) }) +} + +type discardCloser struct { + io.Writer +} + +func (d discardCloser) Close() error { return nil } +func (d discardCloser) CloseWithError(error) error { return nil } diff --git a/physical/raft/raft_util.go b/physical/raft/raft_util.go new file mode 100644 index 0000000..bd496df --- /dev/null +++ b/physical/raft/raft_util.go @@ -0,0 +1,44 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !enterprise + +package raft + +import ( + "context" + "errors" + + autopilot "github.com/hashicorp/raft-autopilot" +) + +const nonVotersAllowed = false + +func (b *RaftBackend) autopilotPromoter() autopilot.Promoter { + return autopilot.DefaultPromoter() +} + +// AddNonVotingPeer adds a new server to the raft cluster +func (b *RaftBackend) AddNonVotingPeer(ctx context.Context, peerID, clusterAddr string) error { + return errors.New("adding non voting peer is not allowed") +} + +func autopilotToAPIServerEnterprise(_ *autopilot.Server, _ *AutopilotServer) error { + return nil +} + +func autopilotToAPIStateEnterprise(_ *autopilot.State, _ *AutopilotState) error { + return nil +} + +func (d *Delegate) autopilotConfigExt() interface{} { + return nil +} + +func (d *Delegate) autopilotServerExt(_ *FollowerState) interface{} { + return nil +} + +func (d *Delegate) meta(_ *FollowerState) map[string]string { + return nil +} diff --git a/physical/raft/snapshot.go b/physical/raft/snapshot.go new file mode 100644 index 0000000..68d9c95 --- /dev/null +++ b/physical/raft/snapshot.go @@ -0,0 +1,538 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package raft + +import ( + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "math" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "time" + + "github.com/golang/protobuf/proto" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/plugin/pb" + "github.com/rboyer/safeio" + bolt "go.etcd.io/bbolt" + "go.uber.org/atomic" + + "github.com/hashicorp/raft" +) + +const ( + // boltSnapshotID is the stable ID for any boltDB snapshot. Keeping the ID + // stable means there is only ever one bolt snapshot in the system + boltSnapshotID = "bolt-snapshot" + tmpSuffix = ".tmp" + snapPath = "snapshots" +) + +// BoltSnapshotStore implements the SnapshotStore interface and allows snapshots +// to be stored in BoltDB files on local disk. Since we always have an up to +// date FSM we use a special snapshot ID to indicate that the snapshot can be +// pulled from the BoltDB file that is currently backing the FSM. This allows us +// to provide just-in-time snapshots without doing incremental data dumps. +// +// When a snapshot is being installed on the node we will Create and Write data +// to it. This will cause the snapshot store to create a new BoltDB file and +// write the snapshot data to it. Then, we can simply rename the snapshot to the +// FSM's filename. This allows us to atomically install the snapshot and +// reduces the amount of disk i/o. Older snapshots are reaped on startup and +// before each subsequent snapshot write. This ensures we only have one snapshot +// on disk at a time. +type BoltSnapshotStore struct { + // path is the directory in which to store file based snapshots + path string + + // We hold a copy of the FSM so we can stream snapshots straight out of the + // database. + fsm *FSM + + logger log.Logger +} + +// BoltSnapshotSink implements SnapshotSink optionally choosing to write to a +// file. +type BoltSnapshotSink struct { + store *BoltSnapshotStore + logger log.Logger + meta raft.SnapshotMeta + trans raft.Transport + + // These fields will be used if we are writing a snapshot (vs. reading + // one) + written atomic.Bool + writer io.WriteCloser + writeError error + dir string + parentDir string + doneWritingCh chan struct{} + + l sync.Mutex + closed bool +} + +// NewBoltSnapshotStore creates a new BoltSnapshotStore based +// on a base directory. +func NewBoltSnapshotStore(base string, logger log.Logger, fsm *FSM) (*BoltSnapshotStore, error) { + if logger == nil { + return nil, fmt.Errorf("no logger provided") + } + + // Ensure our path exists + path := filepath.Join(base, snapPath) + if err := os.MkdirAll(path, 0o700); err != nil && !os.IsExist(err) { + return nil, fmt.Errorf("snapshot path not accessible: %v", err) + } + + // Setup the store + store := &BoltSnapshotStore{ + logger: logger, + fsm: fsm, + path: path, + } + + // Cleanup any old or failed snapshots on startup. + if err := store.ReapSnapshots(); err != nil { + return nil, err + } + + return store, nil +} + +// Create is used to start a new snapshot +func (f *BoltSnapshotStore) Create(version raft.SnapshotVersion, index, term uint64, configuration raft.Configuration, configurationIndex uint64, trans raft.Transport) (raft.SnapshotSink, error) { + // We only support version 1 snapshots at this time. + if version != 1 { + return nil, fmt.Errorf("unsupported snapshot version %d", version) + } + + // Create the sink + sink := &BoltSnapshotSink{ + store: f, + logger: f.logger, + meta: raft.SnapshotMeta{ + Version: version, + ID: boltSnapshotID, + Index: index, + Term: term, + Configuration: configuration, + ConfigurationIndex: configurationIndex, + }, + trans: trans, + } + + return sink, nil +} + +// List returns available snapshots in the store. It only returns bolt +// snapshots. No snapshot will be returned if there are no indexes in the +// FSM. +func (f *BoltSnapshotStore) List() ([]*raft.SnapshotMeta, error) { + meta, err := f.getMetaFromFSM() + if err != nil { + return nil, err + } + + // If we haven't seen any data yet do not return a snapshot + if meta.Index == 0 { + return nil, nil + } + + return []*raft.SnapshotMeta{meta}, nil +} + +// getBoltSnapshotMeta returns the fsm's latest state and configuration. +func (f *BoltSnapshotStore) getMetaFromFSM() (*raft.SnapshotMeta, error) { + latestIndex, latestConfig := f.fsm.LatestState() + meta := &raft.SnapshotMeta{ + Version: 1, + ID: boltSnapshotID, + Index: latestIndex.Index, + Term: latestIndex.Term, + } + + if latestConfig != nil { + meta.ConfigurationIndex, meta.Configuration = protoConfigurationToRaftConfiguration(latestConfig) + } + + return meta, nil +} + +// Open takes a snapshot ID and returns a ReadCloser for that snapshot. +func (f *BoltSnapshotStore) Open(id string) (*raft.SnapshotMeta, io.ReadCloser, error) { + if id == boltSnapshotID { + return f.openFromFSM() + } + + return f.openFromFile(id) +} + +func (f *BoltSnapshotStore) openFromFSM() (*raft.SnapshotMeta, io.ReadCloser, error) { + meta, err := f.getMetaFromFSM() + if err != nil { + return nil, nil, err + } + // If we don't have any data return an error + if meta.Index == 0 { + return nil, nil, errors.New("no snapshot data") + } + + // Stream data out of the FSM to calculate the size + readCloser, writeCloser := io.Pipe() + metaReadCloser, metaWriteCloser := io.Pipe() + go func() { + f.fsm.writeTo(context.Background(), metaWriteCloser, writeCloser) + }() + + // Compute the size + n, err := io.Copy(ioutil.Discard, metaReadCloser) + if err != nil { + f.logger.Error("failed to read state file", "error", err) + metaReadCloser.Close() + readCloser.Close() + return nil, nil, err + } + + meta.Size = n + metaReadCloser.Close() + + return meta, readCloser, nil +} + +func (f *BoltSnapshotStore) getMetaFromDB(id string) (*raft.SnapshotMeta, error) { + if len(id) == 0 { + return nil, errors.New("can not open empty snapshot ID") + } + + filename := filepath.Join(f.path, id, databaseFilename) + boltDB, err := bolt.Open(filename, 0o600, &bolt.Options{Timeout: 1 * time.Second}) + if err != nil { + return nil, err + } + defer boltDB.Close() + + meta := &raft.SnapshotMeta{ + Version: 1, + ID: id, + } + + err = boltDB.View(func(tx *bolt.Tx) error { + b := tx.Bucket(configBucketName) + val := b.Get(latestIndexKey) + if val != nil { + var snapshotIndexes IndexValue + err := proto.Unmarshal(val, &snapshotIndexes) + if err != nil { + return err + } + + meta.Index = snapshotIndexes.Index + meta.Term = snapshotIndexes.Term + } + + // Read in our latest config and populate it inmemory + val = b.Get(latestConfigKey) + if val != nil { + var config ConfigurationValue + err := proto.Unmarshal(val, &config) + if err != nil { + return err + } + + meta.ConfigurationIndex, meta.Configuration = protoConfigurationToRaftConfiguration(&config) + } + return nil + }) + if err != nil { + return nil, err + } + + return meta, nil +} + +func (f *BoltSnapshotStore) openFromFile(id string) (*raft.SnapshotMeta, io.ReadCloser, error) { + meta, err := f.getMetaFromDB(id) + if err != nil { + return nil, nil, err + } + + filename := filepath.Join(f.path, id, databaseFilename) + installer := &boltSnapshotInstaller{ + meta: meta, + ReadCloser: ioutil.NopCloser(strings.NewReader(filename)), + filename: filename, + } + + return meta, installer, nil +} + +// ReapSnapshots reaps all snapshots. +func (f *BoltSnapshotStore) ReapSnapshots() error { + snapshots, err := ioutil.ReadDir(f.path) + switch { + case err == nil: + case os.IsNotExist(err): + return nil + default: + f.logger.Error("failed to scan snapshot directory", "error", err) + return err + } + + for _, snap := range snapshots { + // Ignore any files + if !snap.IsDir() { + continue + } + + // Warn about temporary snapshots, this indicates a previously failed + // snapshot attempt. We still want to clean these up. + dirName := snap.Name() + if strings.HasSuffix(dirName, tmpSuffix) { + f.logger.Warn("found temporary snapshot", "name", dirName) + } + + path := filepath.Join(f.path, dirName) + f.logger.Info("reaping snapshot", "path", path) + if err := os.RemoveAll(path); err != nil { + f.logger.Error("failed to reap snapshot", "path", snap.Name(), "error", err) + return err + } + } + + return nil +} + +// ID returns the ID of the snapshot, can be used with Open() +// after the snapshot is finalized. +func (s *BoltSnapshotSink) ID() string { + s.l.Lock() + defer s.l.Unlock() + + return s.meta.ID +} + +func (s *BoltSnapshotSink) writeBoltDBFile() error { + // Create a new path + name := snapshotName(s.meta.Term, s.meta.Index) + path := filepath.Join(s.store.path, name+tmpSuffix) + s.logger.Info("creating new snapshot", "path", path) + + // Make the directory + if err := os.MkdirAll(path, 0o700); err != nil { + s.logger.Error("failed to make snapshot directory", "error", err) + return err + } + + // Create the BoltDB file + dbPath := filepath.Join(path, databaseFilename) + boltDB, err := bolt.Open(dbPath, 0o600, &bolt.Options{Timeout: 1 * time.Second}) + if err != nil { + return err + } + + // Write the snapshot metadata + if err := writeSnapshotMetaToDB(&s.meta, boltDB); err != nil { + return err + } + + // Set the snapshot ID to the generated name. + s.meta.ID = name + + // Create the done channel + s.doneWritingCh = make(chan struct{}) + + // Store the directories so we can commit the changes on success or abort + // them on failure. + s.dir = path + s.parentDir = s.store.path + + // Create a pipe so we pipe writes into the go routine below. + reader, writer := io.Pipe() + s.writer = writer + + // Start a go routine in charge of piping data from the snapshot's Write + // call to the delimtedreader and the BoltDB file. + go func() { + defer close(s.doneWritingCh) + defer boltDB.Close() + + // The delimted reader will parse full proto messages from the snapshot + // data. + protoReader := NewDelimitedReader(reader, math.MaxInt32) + defer protoReader.Close() + + var done bool + var keys int + entry := new(pb.StorageEntry) + for !done { + err := boltDB.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists(dataBucketName) + if err != nil { + return err + } + + // Commit in batches of 50k. Bolt holds all the data in memory and + // doesn't split the pages until commit so we do incremental writes. + for i := 0; i < 50000; i++ { + err := protoReader.ReadMsg(entry) + if err != nil { + if err == io.EOF { + done = true + return nil + } + return err + } + + err = b.Put([]byte(entry.Key), entry.Value) + if err != nil { + return err + } + keys += 1 + } + + return nil + }) + if err != nil { + s.logger.Error("snapshot write: failed to write transaction", "error", err) + s.writeError = err + return + } + + s.logger.Trace("snapshot write: writing keys", "num_written", keys) + } + }() + + return nil +} + +// Write is used to append to the bolt file. The first call to write ensures we +// have the file created. +func (s *BoltSnapshotSink) Write(b []byte) (int, error) { + s.l.Lock() + defer s.l.Unlock() + + // If this is the first call to Write we need to setup the boltDB file and + // kickoff the pipeline write + if previouslyWritten := s.written.Swap(true); !previouslyWritten { + // Reap any old snapshots + if err := s.store.ReapSnapshots(); err != nil { + return 0, err + } + + if err := s.writeBoltDBFile(); err != nil { + return 0, err + } + } + + return s.writer.Write(b) +} + +// Close is used to indicate a successful end. +func (s *BoltSnapshotSink) Close() error { + s.l.Lock() + defer s.l.Unlock() + + // Make sure close is idempotent + if s.closed { + return nil + } + s.closed = true + + if s.writer != nil { + s.writer.Close() + <-s.doneWritingCh + + if s.writeError != nil { + // If we encountered an error while writing then we should remove + // the directory and return the error + _ = os.RemoveAll(s.dir) + return s.writeError + } + + // Move the directory into place + newPath := strings.TrimSuffix(s.dir, tmpSuffix) + + var err error + if runtime.GOOS != "windows" { + err = safeio.Rename(s.dir, newPath) + } else { + err = os.Rename(s.dir, newPath) + } + + if err != nil { + s.logger.Error("failed to move snapshot into place", "error", err) + return err + } + } + + return nil +} + +// Cancel is used to indicate an unsuccessful end. +func (s *BoltSnapshotSink) Cancel() error { + s.l.Lock() + defer s.l.Unlock() + + // Make sure close is idempotent + if s.closed { + return nil + } + s.closed = true + + if s.writer != nil { + s.writer.Close() + <-s.doneWritingCh + + // Attempt to remove all artifacts + return os.RemoveAll(s.dir) + } + + return nil +} + +type boltSnapshotInstaller struct { + io.ReadCloser + meta *raft.SnapshotMeta + filename string +} + +func (i *boltSnapshotInstaller) Filename() string { + return i.filename +} + +func (i *boltSnapshotInstaller) Metadata() *raft.SnapshotMeta { + return i.meta +} + +func (i *boltSnapshotInstaller) Install(filename string) error { + if len(i.filename) == 0 { + return errors.New("snapshot filename empty") + } + + if len(filename) == 0 { + return errors.New("fsm filename empty") + } + + // Rename the snapshot to the FSM location + if runtime.GOOS != "windows" { + return safeio.Rename(i.filename, filename) + } else { + return os.Rename(i.filename, filename) + } +} + +// snapshotName generates a name for the snapshot. +func snapshotName(term, index uint64) string { + now := time.Now() + msec := now.UnixNano() / int64(time.Millisecond) + return fmt.Sprintf("%d-%d-%d", term, index, msec) +} diff --git a/physical/raft/snapshot_test.go b/physical/raft/snapshot_test.go new file mode 100644 index 0000000..3472c8d --- /dev/null +++ b/physical/raft/snapshot_test.go @@ -0,0 +1,960 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package raft + +import ( + "bytes" + "context" + "fmt" + "hash/crc64" + "io" + "io/ioutil" + "net/http/httptest" + "os" + "path/filepath" + "reflect" + "runtime" + "testing" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/raft" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/physical" + "github.com/hashicorp/vault/sdk/plugin/pb" +) + +type idAddr struct { + id string +} + +func (a *idAddr) Network() string { return "inmem" } +func (a *idAddr) String() string { return a.id } + +func addPeer(t *testing.T, leader, follower *RaftBackend) { + t.Helper() + if err := leader.AddPeer(context.Background(), follower.NodeID(), follower.NodeID()); err != nil { + t.Fatal(err) + } + + peers, err := leader.Peers(context.Background()) + if err != nil { + t.Fatal(err) + } + + err = follower.Bootstrap(peers) + if err != nil { + t.Fatal(err) + } + + err = follower.SetupCluster(context.Background(), SetupOpts{}) + if err != nil { + t.Fatal(err) + } + + leader.raftTransport.(*raft.InmemTransport).Connect(raft.ServerAddress(follower.NodeID()), follower.raftTransport) + follower.raftTransport.(*raft.InmemTransport).Connect(raft.ServerAddress(leader.NodeID()), leader.raftTransport) +} + +func TestRaft_Snapshot_Loading(t *testing.T) { + raft, dir := GetRaft(t, true, false) + defer os.RemoveAll(dir) + + // Write some data + for i := 0; i < 1000; i++ { + err := raft.Put(context.Background(), &physical.Entry{ + Key: fmt.Sprintf("key-%d", i), + Value: []byte(fmt.Sprintf("value-%d", i)), + }) + if err != nil { + t.Fatal(err) + } + } + + readCloser, writeCloser := io.Pipe() + metaReadCloser, metaWriteCloser := io.Pipe() + + go func() { + raft.fsm.writeTo(context.Background(), metaWriteCloser, writeCloser) + }() + + // Create a CRC64 hash + stateHash := crc64.New(crc64.MakeTable(crc64.ECMA)) + + // Compute the hash + size1, err := io.Copy(stateHash, metaReadCloser) + if err != nil { + t.Fatal(err) + } + + computed1 := stateHash.Sum(nil) + + // Create a CRC64 hash + stateHash = crc64.New(crc64.MakeTable(crc64.ECMA)) + + // Compute the hash + size2, err := io.Copy(stateHash, readCloser) + if err != nil { + t.Fatal(err) + } + + computed2 := stateHash.Sum(nil) + + if size1 != size2 { + t.Fatal("sizes did not match") + } + + if !bytes.Equal(computed1, computed2) { + t.Fatal("hashes did not match") + } + + snapFuture := raft.raft.Snapshot() + if err := snapFuture.Error(); err != nil { + t.Fatal(err) + } + + meta, reader, err := snapFuture.Open() + if err != nil { + t.Fatal(err) + } + if meta.Size != size1 { + t.Fatal("meta size did not match expected") + } + + // Create a CRC64 hash + stateHash = crc64.New(crc64.MakeTable(crc64.ECMA)) + + // Compute the hash + size3, err := io.Copy(stateHash, reader) + if err != nil { + t.Fatal(err) + } + + computed3 := stateHash.Sum(nil) + if size1 != size3 { + t.Fatal("sizes did not match") + } + + if !bytes.Equal(computed1, computed3) { + t.Fatal("hashes did not match") + } +} + +func TestRaft_Snapshot_Index(t *testing.T) { + raft, dir := GetRaft(t, true, false) + defer os.RemoveAll(dir) + + err := raft.Put(context.Background(), &physical.Entry{ + Key: "key", + Value: []byte("value"), + }) + if err != nil { + t.Fatal(err) + } + + // Get index + index, _ := raft.fsm.LatestState() + if index.Term != 2 { + t.Fatalf("unexpected term, got %d expected 2", index.Term) + } + if index.Index != 3 { + t.Fatalf("unexpected index, got %d expected 3", index.Term) + } + + // Write some data + for i := 0; i < 100; i++ { + err := raft.Put(context.Background(), &physical.Entry{ + Key: fmt.Sprintf("key-%d", i), + Value: []byte(fmt.Sprintf("value-%d", i)), + }) + if err != nil { + t.Fatal(err) + } + } + + // Get index + index, _ = raft.fsm.LatestState() + if index.Term != 2 { + t.Fatalf("unexpected term, got %d expected 2", index.Term) + } + if index.Index != 103 { + t.Fatalf("unexpected index, got %d expected 103", index.Term) + } + + // Take a snapshot + snapFuture := raft.raft.Snapshot() + if err := snapFuture.Error(); err != nil { + t.Fatal(err) + } + + meta, reader, err := snapFuture.Open() + if err != nil { + t.Fatal(err) + } + io.Copy(ioutil.Discard, reader) + + if meta.Index != index.Index { + t.Fatalf("indexes did not match, got %d expected %d", meta.Index, index.Index) + } + if meta.Term != index.Term { + t.Fatalf("term did not match, got %d expected %d", meta.Term, index.Term) + } + + // Write some more data + for i := 0; i < 100; i++ { + err := raft.Put(context.Background(), &physical.Entry{ + Key: fmt.Sprintf("key-%d", i), + Value: []byte(fmt.Sprintf("value-%d", i)), + }) + if err != nil { + t.Fatal(err) + } + } + + // Open the same snapshot again + meta, reader, err = raft.snapStore.Open(meta.ID) + if err != nil { + t.Fatal(err) + } + io.Copy(ioutil.Discard, reader) + + // Make sure the meta data has updated to the new values + if meta.Index != 203 { + t.Fatalf("unexpected snapshot index %d", meta.Index) + } + if meta.Term != 2 { + t.Fatalf("unexpected snapshot term %d", meta.Term) + } +} + +func TestRaft_Snapshot_Peers(t *testing.T) { + raft1, dir := GetRaft(t, true, false) + raft2, dir2 := GetRaft(t, false, false) + raft3, dir3 := GetRaft(t, false, false) + defer os.RemoveAll(dir) + defer os.RemoveAll(dir2) + defer os.RemoveAll(dir3) + + // Write some data + for i := 0; i < 1000; i++ { + err := raft1.Put(context.Background(), &physical.Entry{ + Key: fmt.Sprintf("key-%d", i), + Value: []byte(fmt.Sprintf("value-%d", i)), + }) + if err != nil { + t.Fatal(err) + } + } + + // Force a snapshot + snapFuture := raft1.raft.Snapshot() + if err := snapFuture.Error(); err != nil { + t.Fatal(err) + } + + commitIdx := raft1.CommittedIndex() + + // Add raft2 to the cluster + addPeer(t, raft1, raft2) + + ensureCommitApplied(t, commitIdx, raft2) + + // Make sure the snapshot was applied correctly on the follower + if err := compareDBs(t, raft1.fsm.getDB(), raft2.fsm.getDB(), false); err != nil { + t.Fatal(err) + } + + // Write some more data + for i := 1000; i < 2000; i++ { + err := raft1.Put(context.Background(), &physical.Entry{ + Key: fmt.Sprintf("key-%d", i), + Value: []byte(fmt.Sprintf("value-%d", i)), + }) + if err != nil { + t.Fatal(err) + } + } + + snapFuture = raft1.raft.Snapshot() + if err := snapFuture.Error(); err != nil { + t.Fatal(err) + } + + commitIdx = raft1.CommittedIndex() + + // Add raft3 to the cluster + addPeer(t, raft1, raft3) + + ensureCommitApplied(t, commitIdx, raft2) + ensureCommitApplied(t, commitIdx, raft3) + + // Make sure all stores are the same + compareFSMs(t, raft1.fsm, raft2.fsm) + compareFSMs(t, raft1.fsm, raft3.fsm) +} + +func ensureCommitApplied(t *testing.T, leaderCommitIdx uint64, backend *RaftBackend) { + t.Helper() + + timeout := time.Now().Add(10 * time.Second) + for { + if time.Now().After(timeout) { + t.Fatal("timeout reached while verifying applied index on raft backend") + } + + if backend.AppliedIndex() >= leaderCommitIdx { + break + } + + time.Sleep(1 * time.Second) + } +} + +func TestRaft_Snapshot_Restart(t *testing.T) { + raft1, dir := GetRaft(t, true, false) + defer os.RemoveAll(dir) + raft2, dir2 := GetRaft(t, false, false) + defer os.RemoveAll(dir2) + + // Write some data + for i := 0; i < 100; i++ { + err := raft1.Put(context.Background(), &physical.Entry{ + Key: fmt.Sprintf("key-%d", i), + Value: []byte(fmt.Sprintf("value-%d", i)), + }) + if err != nil { + t.Fatal(err) + } + } + + // Take a snapshot + snapFuture := raft1.raft.Snapshot() + if err := snapFuture.Error(); err != nil { + t.Fatal(err) + } + // Advance FSM's index past configuration change + raft1.Put(context.Background(), &physical.Entry{ + Key: "key", + Value: []byte("value"), + }) + + // Add raft2 to the cluster + addPeer(t, raft1, raft2) + + time.Sleep(2 * time.Second) + + peers, err := raft2.Peers(context.Background()) + if err != nil { + t.Fatal(err) + } + if len(peers) != 2 { + t.Fatal(peers) + } + + // Finalize raft1 + if err := raft1.TeardownCluster(nil); err != nil { + t.Fatal(err) + } + + // Start Raft + err = raft1.SetupCluster(context.Background(), SetupOpts{}) + if err != nil { + t.Fatal(err) + } + + peers, err = raft1.Peers(context.Background()) + if err != nil { + t.Fatal(err) + } + if len(peers) != 2 { + t.Fatal(peers) + } + + compareFSMs(t, raft1.fsm, raft2.fsm) +} + +/* +func TestRaft_Snapshot_ErrorRecovery(t *testing.T) { + raft1, dir := GetRaft(t, true, false) + raft2, dir2 := GetRaft(t, false, false) + raft3, dir3 := GetRaft(t, false, false) + defer os.RemoveAll(dir) + defer os.RemoveAll(dir2) + defer os.RemoveAll(dir3) + + // Add raft2 to the cluster + addPeer(t, raft1, raft2) + + // Write some data + for i := 0; i < 100; i++ { + err := raft1.Put(context.Background(), &physical.Entry{ + Key: fmt.Sprintf("key-%d", i), + Value: []byte(fmt.Sprintf("value-%d", i)), + }) + if err != nil { + t.Fatal(err) + } + } + + // Take a snapshot on each node to ensure we no longer have older logs + snapFuture := raft1.raft.Snapshot() + if err := snapFuture.Error(); err != nil { + t.Fatal(err) + } + + stepDownLeader(t, raft1) + leader := waitForLeader(t, raft1, raft2) + + snapFuture = leader.raft.Snapshot() + if err := snapFuture.Error(); err != nil { + t.Fatal(err) + } + + // Advance FSM's index past snapshot index + leader.Put(context.Background(), &physical.Entry{ + Key: "key", + Value: []byte("value"), + }) + + // Error on snapshot restore + raft3.fsm.testSnapshotRestoreError = true + + // Add raft3 to the cluster + addPeer(t, leader, raft3) + + time.Sleep(2 * time.Second) + + // Restart the failing node to make sure fresh state does not have invalid + // values. + if err := raft3.TeardownCluster(nil); err != nil { + t.Fatal(err) + } + + // Ensure the databases are not equal + if err := compareFSMsWithErr(t, leader.fsm, raft3.fsm); err == nil { + t.Fatal("nil error") + } + + // Remove error and make sure we can reconcile state + raft3.fsm.testSnapshotRestoreError = false + + // Step down leader node + stepDownLeader(t, leader) + leader = waitForLeader(t, raft1, raft2) + + // Start Raft3 + if err := raft3.SetupCluster(context.Background(), SetupOpts{}); err != nil { + t.Fatal(err) + } + + connectPeers(raft1, raft2, raft3) + waitForLeader(t, raft1, raft2) + + time.Sleep(5 * time.Second) + + // Make sure state gets re-replicated. + compareFSMs(t, raft1.fsm, raft3.fsm) +}*/ + +func TestRaft_Snapshot_Take_Restore(t *testing.T) { + raft1, dir := GetRaft(t, true, false) + defer os.RemoveAll(dir) + raft2, dir2 := GetRaft(t, false, false) + defer os.RemoveAll(dir2) + + addPeer(t, raft1, raft2) + + // Write some data + for i := 0; i < 100; i++ { + err := raft1.Put(context.Background(), &physical.Entry{ + Key: fmt.Sprintf("key-%d", i), + Value: []byte(fmt.Sprintf("value-%d", i)), + }) + if err != nil { + t.Fatal(err) + } + } + + recorder := httptest.NewRecorder() + snap := logical.NewHTTPResponseWriter(recorder) + + err := raft1.Snapshot(snap, nil) + if err != nil { + t.Fatal(err) + } + + // Write some more data + for i := 100; i < 200; i++ { + err := raft1.Put(context.Background(), &physical.Entry{ + Key: fmt.Sprintf("key-%d", i), + Value: []byte(fmt.Sprintf("value-%d", i)), + }) + if err != nil { + t.Fatal(err) + } + } + + snapFile, cleanup, metadata, err := raft1.WriteSnapshotToTemp(ioutil.NopCloser(recorder.Body), nil) + if err != nil { + t.Fatal(err) + } + defer cleanup() + + err = raft1.RestoreSnapshot(context.Background(), metadata, snapFile) + if err != nil { + t.Fatal(err) + } + + // make sure we don't have the second batch of writes + for i := 100; i < 200; i++ { + { + value, err := raft1.Get(context.Background(), fmt.Sprintf("key-%d", i)) + if err != nil { + t.Fatal(err) + } + if value != nil { + t.Fatal("didn't remove data") + } + } + { + value, err := raft2.Get(context.Background(), fmt.Sprintf("key-%d", i)) + if err != nil { + t.Fatal(err) + } + if value != nil { + t.Fatal("didn't remove data") + } + } + } + + time.Sleep(10 * time.Second) + compareFSMs(t, raft1.fsm, raft2.fsm) +} + +func TestBoltSnapshotStore_CreateSnapshotMissingParentDir(t *testing.T) { + parent, err := ioutil.TempDir("", "raft") + if err != nil { + t.Fatalf("err: %v ", err) + } + defer os.RemoveAll(parent) + + dir, err := ioutil.TempDir(parent, "raft") + if err != nil { + t.Fatalf("err: %v ", err) + } + + logger := hclog.New(&hclog.LoggerOptions{ + Name: "raft", + Level: hclog.Trace, + }) + + snap, err := NewBoltSnapshotStore(dir, logger, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + os.RemoveAll(parent) + _, trans := raft.NewInmemTransport(raft.NewInmemAddr()) + sink, err := snap.Create(raft.SnapshotVersionMax, 10, 3, raft.Configuration{}, 0, trans) + if err != nil { + t.Fatal(err) + } + defer sink.Cancel() + + _, err = sink.Write([]byte("test")) + if err != nil { + t.Fatalf("should not fail when using non existing parent: %s", err) + } + + // Ensure the snapshot file exists + _, err = os.Stat(filepath.Join(snap.path, sink.ID()+tmpSuffix, databaseFilename)) + if err != nil { + t.Fatal(err) + } +} + +func TestBoltSnapshotStore_Listing(t *testing.T) { + // Create a test dir + parent, err := ioutil.TempDir("", "raft") + if err != nil { + t.Fatalf("err: %v ", err) + } + defer os.RemoveAll(parent) + + dir, err := ioutil.TempDir(parent, "raft") + if err != nil { + t.Fatalf("err: %v ", err) + } + + logger := hclog.New(&hclog.LoggerOptions{ + Name: "raft", + Level: hclog.Trace, + }) + + fsm, err := NewFSM(parent, "", logger) + if err != nil { + t.Fatal(err) + } + + snap, err := NewBoltSnapshotStore(dir, logger, fsm) + if err != nil { + t.Fatalf("err: %v", err) + } + + // FSM has no data, should have empty snapshot list + snaps, err := snap.List() + if err != nil { + t.Fatalf("err: %v", err) + } + if len(snaps) != 0 { + t.Fatalf("expect 0 snapshots: %v", snaps) + } + + // Move the fsm forward + err = fsm.witnessSnapshot(&raft.SnapshotMeta{ + Index: 100, + Term: 20, + Configuration: raft.Configuration{}, + ConfigurationIndex: 0, + }) + if err != nil { + t.Fatal(err) + } + + snaps, err = snap.List() + if err != nil { + t.Fatal(err) + } + if len(snaps) != 1 { + t.Fatalf("expect 1 snapshots: %v", snaps) + } + + if snaps[0].Index != 100 || snaps[0].Term != 20 { + t.Fatalf("bad snapshot: %+v", snaps[0]) + } + + if snaps[0].ID != boltSnapshotID { + t.Fatalf("bad snapshot: %+v", snaps[0]) + } +} + +func TestBoltSnapshotStore_CreateInstallSnapshot(t *testing.T) { + // Create a test dir + parent, err := ioutil.TempDir("", "raft") + if err != nil { + t.Fatalf("err: %v ", err) + } + defer os.RemoveAll(parent) + + dir, err := ioutil.TempDir(parent, "raft") + if err != nil { + t.Fatalf("err: %v ", err) + } + + logger := hclog.New(&hclog.LoggerOptions{ + Name: "raft", + Level: hclog.Trace, + }) + + fsm, err := NewFSM(parent, "", logger) + if err != nil { + t.Fatal(err) + } + defer fsm.Close() + + snap, err := NewBoltSnapshotStore(dir, logger, fsm) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Check no snapshots + snaps, err := snap.List() + if err != nil { + t.Fatalf("err: %v", err) + } + if len(snaps) != 0 { + t.Fatalf("did not expect any snapshots: %v", snaps) + } + + // Create a new sink + var configuration raft.Configuration + configuration.Servers = append(configuration.Servers, raft.Server{ + Suffrage: raft.Voter, + ID: raft.ServerID("my id"), + Address: raft.ServerAddress("over here"), + }) + _, trans := raft.NewInmemTransport(raft.NewInmemAddr()) + sink, err := snap.Create(raft.SnapshotVersionMax, 10, 3, configuration, 2, trans) + if err != nil { + t.Fatalf("err: %v", err) + } + + protoWriter := NewDelimitedWriter(sink) + + err = fsm.Put(context.Background(), &physical.Entry{ + Key: "test-key", + Value: []byte("test-value"), + }) + if err != nil { + t.Fatal(err) + } + + err = fsm.Put(context.Background(), &physical.Entry{ + Key: "test-key1", + Value: []byte("test-value1"), + }) + if err != nil { + t.Fatal(err) + } + + // Write to the sink + err = protoWriter.WriteMsg(&pb.StorageEntry{ + Key: "test-key", + Value: []byte("test-value"), + }) + if err != nil { + t.Fatalf("err: %v", err) + } + err = protoWriter.WriteMsg(&pb.StorageEntry{ + Key: "test-key1", + Value: []byte("test-value1"), + }) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Done! + err = sink.Close() + if err != nil { + t.Fatalf("err: %v", err) + } + + // Read the snapshot + meta, r, err := snap.Open(sink.ID()) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Check the latest + if meta.Index != 10 { + t.Fatalf("bad snapshot: %+v", meta) + } + if meta.Term != 3 { + t.Fatalf("bad snapshot: %+v", meta) + } + if !reflect.DeepEqual(meta.Configuration, configuration) { + t.Fatalf("bad snapshot: %+v", meta) + } + if meta.ConfigurationIndex != 2 { + t.Fatalf("bad snapshot: %+v", meta) + } + + installer, ok := r.(*boltSnapshotInstaller) + if !ok { + t.Fatal("expected snapshot installer object") + } + + newFSM, err := NewFSM(filepath.Dir(installer.Filename()), "", logger) + if err != nil { + t.Fatal(err) + } + + err = compareDBs(t, fsm.getDB(), newFSM.getDB(), true) + if err != nil { + t.Fatal(err) + } + + // Make sure config data is different + err = compareDBs(t, fsm.getDB(), newFSM.getDB(), false) + if err == nil { + t.Fatal("expected error") + } + + if err := newFSM.Close(); err != nil { + t.Fatal(err) + } + + err = fsm.Restore(installer) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 2; i++ { + latestIndex, latestConfigRaw := fsm.LatestState() + latestConfigIndex, latestConfig := protoConfigurationToRaftConfiguration(latestConfigRaw) + if latestIndex.Index != 10 { + t.Fatalf("bad install: %+v", latestIndex) + } + if latestIndex.Term != 3 { + t.Fatalf("bad install: %+v", latestIndex) + } + if !reflect.DeepEqual(latestConfig, configuration) { + t.Fatalf("bad install: %+v", latestConfig) + } + if latestConfigIndex != 2 { + t.Fatalf("bad install: %+v", latestConfigIndex) + } + + v, err := fsm.Get(context.Background(), "test-key") + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(v.Value, []byte("test-value")) { + t.Fatalf("bad: %+v", v) + } + + v, err = fsm.Get(context.Background(), "test-key1") + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(v.Value, []byte("test-value1")) { + t.Fatalf("bad: %+v", v) + } + + // Close/Reopen the db and make sure we still match + fsm.Close() + fsm, err = NewFSM(parent, "", logger) + if err != nil { + t.Fatal(err) + } + } +} + +func TestBoltSnapshotStore_CancelSnapshot(t *testing.T) { + // Create a test dir + dir, err := ioutil.TempDir("", "raft") + if err != nil { + t.Fatalf("err: %v ", err) + } + defer os.RemoveAll(dir) + + logger := hclog.New(&hclog.LoggerOptions{ + Name: "raft", + Level: hclog.Trace, + }) + + snap, err := NewBoltSnapshotStore(dir, logger, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + _, trans := raft.NewInmemTransport(raft.NewInmemAddr()) + sink, err := snap.Create(raft.SnapshotVersionMax, 10, 3, raft.Configuration{}, 0, trans) + if err != nil { + t.Fatal(err) + } + _, err = sink.Write([]byte("test")) + if err != nil { + t.Fatalf("should not fail when using non existing parent: %s", err) + } + + // Ensure the snapshot file exists + _, err = os.Stat(filepath.Join(snap.path, sink.ID()+tmpSuffix, databaseFilename)) + if err != nil { + t.Fatal(err) + } + + // Cancel the snapshot! Should delete + err = sink.Cancel() + if err != nil { + t.Fatalf("err: %v", err) + } + + // Ensure the snapshot file does not exist + _, err = os.Stat(filepath.Join(snap.path, sink.ID()+tmpSuffix, databaseFilename)) + if !os.IsNotExist(err) { + t.Fatal(err) + } + + // Make sure future writes fail + _, err = sink.Write([]byte("test")) + if err == nil { + t.Fatal("expected write to fail") + } +} + +func TestBoltSnapshotStore_BadPerm(t *testing.T) { + var err error + if runtime.GOOS == "windows" { + t.Skip("skipping file permission test on windows") + } + + // Create a temp dir + var dir1 string + dir1, err = ioutil.TempDir("", "raft") + if err != nil { + t.Fatalf("err: %s", err) + } + defer os.RemoveAll(dir1) + + // Create a sub dir and remove all permissions + var dir2 string + dir2, err = ioutil.TempDir(dir1, "badperm") + if err != nil { + t.Fatalf("err: %s", err) + } + if err = os.Chmod(dir2, 0o00); err != nil { + t.Fatalf("err: %s", err) + } + defer os.Chmod(dir2, 777) // Set perms back for delete + + logger := hclog.New(&hclog.LoggerOptions{ + Name: "raft", + Level: hclog.Trace, + }) + + _, err = NewBoltSnapshotStore(dir2, logger, nil) + if err == nil { + t.Fatalf("should fail to use dir with bad perms") + } +} + +func TestBoltSnapshotStore_CloseFailure(t *testing.T) { + // Create a test dir + dir, err := ioutil.TempDir("", "raft") + if err != nil { + t.Fatalf("err: %v ", err) + } + defer os.RemoveAll(dir) + + logger := hclog.New(&hclog.LoggerOptions{ + Name: "raft", + Level: hclog.Trace, + }) + + snap, err := NewBoltSnapshotStore(dir, logger, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + _, trans := raft.NewInmemTransport(raft.NewInmemAddr()) + sink, err := snap.Create(raft.SnapshotVersionMax, 10, 3, raft.Configuration{}, 0, trans) + if err != nil { + t.Fatal(err) + } + + // This should stash an error value + _, err = sink.Write([]byte("test")) + if err != nil { + t.Fatalf("should not fail when using non existing parent: %s", err) + } + + // Cancel the snapshot! Should delete + err = sink.Close() + if err == nil { + t.Fatalf("expected error") + } + + // Ensure the snapshot file does not exist + _, err = os.Stat(filepath.Join(snap.path, sink.ID()+tmpSuffix, databaseFilename)) + if !os.IsNotExist(err) { + t.Fatal(err) + } + + // Make sure future writes fail + _, err = sink.Write([]byte("test")) + if err == nil { + t.Fatal("expected write to fail") + } +} diff --git a/physical/raft/streamlayer.go b/physical/raft/streamlayer.go new file mode 100644 index 0000000..90d8e49 --- /dev/null +++ b/physical/raft/streamlayer.go @@ -0,0 +1,384 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package raft + +import ( + "bytes" + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "errors" + fmt "fmt" + "io" + "math/big" + mathrand "math/rand" + "net" + "net/url" + "sync" + "time" + + log "github.com/hashicorp/go-hclog" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/raft" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/vault/cluster" +) + +// TLSKey is a single TLS keypair in the Keyring +type TLSKey struct { + // ID is a unique identifier for this Key + ID string `json:"id"` + + // KeyType defines the algorighm used to generate the private keys + KeyType string `json:"key_type"` + + // AppliedIndex is the earliest known raft index that safely contains this + // key. + AppliedIndex uint64 `json:"applied_index"` + + // CertBytes is the marshaled certificate. + CertBytes []byte `json:"cluster_cert"` + + // KeyParams is the marshaled private key. + KeyParams *certutil.ClusterKeyParams `json:"cluster_key_params"` + + // CreatedTime is the time this key was generated. This value is useful in + // determining when the next rotation should be. + CreatedTime time.Time `json:"created_time"` + + parsedCert *x509.Certificate + parsedKey *ecdsa.PrivateKey +} + +// TLSKeyring is the set of keys that raft uses for network communication. +// Only one key is used to dial at a time but both keys will be used to accept +// connections. +type TLSKeyring struct { + // Keys is the set of available key pairs + Keys []*TLSKey `json:"keys"` + + // AppliedIndex is the earliest known raft index that safely contains the + // latest key in the keyring. + AppliedIndex uint64 `json:"applied_index"` + + // Term is an incrementing identifier value used to quickly determine if two + // states of the keyring are different. + Term uint64 `json:"term"` + + // ActiveKeyID is the key ID to track the active key in the keyring. Only + // the active key is used for dialing. + ActiveKeyID string `json:"active_key_id"` +} + +// GetActive returns the active key. +func (k *TLSKeyring) GetActive() *TLSKey { + if k.ActiveKeyID == "" { + return nil + } + + for _, key := range k.Keys { + if key.ID == k.ActiveKeyID { + return key + } + } + return nil +} + +func GenerateTLSKey(reader io.Reader) (*TLSKey, error) { + key, err := ecdsa.GenerateKey(elliptic.P521(), reader) + if err != nil { + return nil, err + } + + host, err := uuid.GenerateUUID() + if err != nil { + return nil, err + } + host = fmt.Sprintf("raft-%s", host) + template := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: host, + }, + DNSNames: []string{host}, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageServerAuth, + x509.ExtKeyUsageClientAuth, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement | x509.KeyUsageCertSign, + SerialNumber: big.NewInt(mathrand.Int63()), + NotBefore: time.Now().Add(-30 * time.Second), + // 30 years ought to be enough for anybody + NotAfter: time.Now().Add(262980 * time.Hour), + BasicConstraintsValid: true, + IsCA: true, + } + + certBytes, err := x509.CreateCertificate(rand.Reader, template, template, key.Public(), key) + if err != nil { + return nil, fmt.Errorf("unable to generate local cluster certificate: %w", err) + } + + return &TLSKey{ + ID: host, + KeyType: certutil.PrivateKeyTypeP521, + CertBytes: certBytes, + KeyParams: &certutil.ClusterKeyParams{ + Type: certutil.PrivateKeyTypeP521, + X: key.PublicKey.X, + Y: key.PublicKey.Y, + D: key.D, + }, + CreatedTime: time.Now(), + }, nil +} + +var ( + // Make sure raftLayer satisfies the raft.StreamLayer interface + _ raft.StreamLayer = (*raftLayer)(nil) + + // Make sure raftLayer satisfies the cluster.Handler and cluster.Client + // interfaces + _ cluster.Handler = (*raftLayer)(nil) + _ cluster.Client = (*raftLayer)(nil) +) + +// RaftLayer implements the raft.StreamLayer interface, +// so that we can use a single RPC layer for Raft and Vault +type raftLayer struct { + // Addr is the listener address to return + addr net.Addr + + // connCh is used to accept connections + connCh chan net.Conn + + // Tracks if we are closed + closed bool + closeCh chan struct{} + closeLock sync.Mutex + + logger log.Logger + + dialerFunc func(string, time.Duration) (net.Conn, error) + + // TLS config + keyring *TLSKeyring + clusterListener cluster.ClusterHook +} + +// NewRaftLayer creates a new raftLayer object. It parses the TLS information +// from the network config. +func NewRaftLayer(logger log.Logger, raftTLSKeyring *TLSKeyring, clusterListener cluster.ClusterHook) (*raftLayer, error) { + clusterAddr := clusterListener.Addr() + if clusterAddr == nil { + return nil, errors.New("no raft addr found") + } + + { + // Test the advertised address to make sure it's not an unspecified IP + u := url.URL{ + Host: clusterAddr.String(), + } + ip := net.ParseIP(u.Hostname()) + if ip != nil && ip.IsUnspecified() { + return nil, fmt.Errorf("cannot use unspecified IP with raft storage: %s", clusterAddr.String()) + } + } + + layer := &raftLayer{ + addr: clusterAddr, + connCh: make(chan net.Conn), + closeCh: make(chan struct{}), + logger: logger, + clusterListener: clusterListener, + } + + if err := layer.setTLSKeyring(raftTLSKeyring); err != nil { + return nil, err + } + + return layer, nil +} + +func (l *raftLayer) setTLSKeyring(keyring *TLSKeyring) error { + // Fast path a noop update + if l.keyring != nil && l.keyring.Term == keyring.Term { + return nil + } + + for _, key := range keyring.Keys { + switch { + case key.KeyParams == nil: + return errors.New("no raft cluster key params found") + + case key.KeyParams.X == nil, key.KeyParams.Y == nil, key.KeyParams.D == nil: + return errors.New("failed to parse raft cluster key") + + case key.KeyParams.Type != certutil.PrivateKeyTypeP521: + return errors.New("failed to find valid raft cluster key type") + + case len(key.CertBytes) == 0: + return errors.New("no cluster cert found") + } + + parsedCert, err := x509.ParseCertificate(key.CertBytes) + if err != nil { + return fmt.Errorf("error parsing raft cluster certificate: %w", err) + } + + key.parsedCert = parsedCert + key.parsedKey = &ecdsa.PrivateKey{ + PublicKey: ecdsa.PublicKey{ + Curve: elliptic.P521(), + X: key.KeyParams.X, + Y: key.KeyParams.Y, + }, + D: key.KeyParams.D, + } + } + + if keyring.GetActive() == nil { + return errors.New("expected one active key to be present in the keyring") + } + + l.keyring = keyring + + return nil +} + +func (l *raftLayer) ServerName() string { + key := l.keyring.GetActive() + if key == nil { + return "" + } + + return key.parsedCert.Subject.CommonName +} + +func (l *raftLayer) CACert(ctx context.Context) *x509.Certificate { + key := l.keyring.GetActive() + if key == nil { + return nil + } + + return key.parsedCert +} + +func (l *raftLayer) ClientLookup(ctx context.Context, requestInfo *tls.CertificateRequestInfo) (*tls.Certificate, error) { + for _, subj := range requestInfo.AcceptableCAs { + for _, key := range l.keyring.Keys { + if bytes.Equal(subj, key.parsedCert.RawIssuer) { + localCert := make([]byte, len(key.CertBytes)) + copy(localCert, key.CertBytes) + + return &tls.Certificate{ + Certificate: [][]byte{localCert}, + PrivateKey: key.parsedKey, + Leaf: key.parsedCert, + }, nil + } + } + } + + return nil, nil +} + +func (l *raftLayer) ServerLookup(ctx context.Context, clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) { + if l.keyring == nil { + return nil, errors.New("got raft connection but no local cert") + } + + for _, key := range l.keyring.Keys { + if clientHello.ServerName == key.ID { + localCert := make([]byte, len(key.CertBytes)) + copy(localCert, key.CertBytes) + + return &tls.Certificate{ + Certificate: [][]byte{localCert}, + PrivateKey: key.parsedKey, + Leaf: key.parsedCert, + }, nil + } + } + + return nil, nil +} + +// CALookup returns the CA to use when validating this connection. +func (l *raftLayer) CALookup(context.Context) ([]*x509.Certificate, error) { + ret := make([]*x509.Certificate, len(l.keyring.Keys)) + for i, key := range l.keyring.Keys { + ret[i] = key.parsedCert + } + return ret, nil +} + +// Stop shuts down the raft layer. +func (l *raftLayer) Stop() error { + l.Close() + return nil +} + +// Handoff is used to hand off a connection to the +// RaftLayer. This allows it to be Accept()'ed +func (l *raftLayer) Handoff(ctx context.Context, wg *sync.WaitGroup, quit chan struct{}, conn *tls.Conn) error { + l.closeLock.Lock() + closed := l.closed + l.closeLock.Unlock() + + if closed { + return errors.New("raft is shutdown") + } + + wg.Add(1) + go func() { + defer wg.Done() + select { + case l.connCh <- conn: + case <-l.closeCh: + case <-ctx.Done(): + case <-quit: + } + }() + + return nil +} + +// Accept is used to return connection which are +// dialed to be used with the Raft layer +func (l *raftLayer) Accept() (net.Conn, error) { + select { + case conn := <-l.connCh: + return conn, nil + case <-l.closeCh: + return nil, fmt.Errorf("Raft RPC layer closed") + } +} + +// Close is used to stop listening for Raft connections +func (l *raftLayer) Close() error { + l.closeLock.Lock() + defer l.closeLock.Unlock() + + if !l.closed { + l.closed = true + close(l.closeCh) + } + return nil +} + +// Addr is used to return the address of the listener +func (l *raftLayer) Addr() net.Addr { + return l.addr +} + +// Dial is used to create a new outgoing connection +func (l *raftLayer) Dial(address raft.ServerAddress, timeout time.Duration) (net.Conn, error) { + dialFunc := l.clusterListener.GetDialerFunc(context.Background(), consts.RaftStorageALPN) + return dialFunc(string(address), timeout) +} diff --git a/physical/raft/streamlayer_test.go b/physical/raft/streamlayer_test.go new file mode 100644 index 0000000..d826eaa --- /dev/null +++ b/physical/raft/streamlayer_test.go @@ -0,0 +1,73 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package raft + +import ( + "context" + "crypto/rand" + "crypto/tls" + "net" + "testing" + "time" + + "github.com/hashicorp/vault/vault/cluster" +) + +type mockClusterHook struct { + address net.Addr +} + +func (*mockClusterHook) AddClient(alpn string, client cluster.Client) {} +func (*mockClusterHook) RemoveClient(alpn string) {} +func (*mockClusterHook) AddHandler(alpn string, handler cluster.Handler) {} +func (*mockClusterHook) StopHandler(alpn string) {} +func (*mockClusterHook) TLSConfig(ctx context.Context) (*tls.Config, error) { return nil, nil } +func (m *mockClusterHook) Addr() net.Addr { return m.address } +func (*mockClusterHook) GetDialerFunc(ctx context.Context, alpnProto string) func(string, time.Duration) (net.Conn, error) { + return func(string, time.Duration) (net.Conn, error) { + return nil, nil + } +} + +func TestStreamLayer_UnspecifiedIP(t *testing.T) { + m := &mockClusterHook{ + address: &cluster.NetAddr{ + Host: "0.0.0.0:8200", + }, + } + + raftTLSKey, err := GenerateTLSKey(rand.Reader) + if err != nil { + t.Fatal(err) + } + + raftTLS := &TLSKeyring{ + Keys: []*TLSKey{raftTLSKey}, + ActiveKeyID: raftTLSKey.ID, + } + + layer, err := NewRaftLayer(nil, raftTLS, m) + if err == nil { + t.Fatal("expected error") + } + + if err.Error() != "cannot use unspecified IP with raft storage: 0.0.0.0:8200" { + t.Fatalf("unexpected error: %s", err.Error()) + } + + if layer != nil { + t.Fatal("expected nil layer") + } + + m.address.(*cluster.NetAddr).Host = "10.0.0.1:8200" + + layer, err = NewRaftLayer(nil, raftTLS, m) + if err != nil { + t.Fatal(err) + } + + if layer == nil { + t.Fatal("nil layer") + } +} diff --git a/physical/raft/testing.go b/physical/raft/testing.go new file mode 100644 index 0000000..ea68479 --- /dev/null +++ b/physical/raft/testing.go @@ -0,0 +1,81 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package raft + +import ( + "context" + "fmt" + "io/ioutil" + "testing" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-uuid" +) + +func GetRaft(t testing.TB, bootstrap bool, noStoreState bool) (*RaftBackend, string) { + raftDir, err := ioutil.TempDir("", "vault-raft-") + if err != nil { + t.Fatal(err) + } + t.Logf("raft dir: %s", raftDir) + + return getRaftWithDir(t, bootstrap, noStoreState, raftDir) +} + +func getRaftWithDir(t testing.TB, bootstrap bool, noStoreState bool, raftDir string) (*RaftBackend, string) { + id, err := uuid.GenerateUUID() + if err != nil { + t.Fatal(err) + } + + logger := hclog.New(&hclog.LoggerOptions{ + Name: fmt.Sprintf("raft-%s", id), + Level: hclog.Trace, + }) + logger.Info("raft dir", "dir", raftDir) + + conf := map[string]string{ + "path": raftDir, + "trailing_logs": "100", + "node_id": id, + } + + if noStoreState { + conf["doNotStoreLatestState"] = "" + } + + backendRaw, err := NewRaftBackend(conf, logger) + if err != nil { + t.Fatal(err) + } + backend := backendRaw.(*RaftBackend) + + if bootstrap { + err = backend.Bootstrap([]Peer{ + { + ID: backend.NodeID(), + Address: backend.NodeID(), + }, + }) + if err != nil { + t.Fatal(err) + } + + err = backend.SetupCluster(context.Background(), SetupOpts{}) + if err != nil { + t.Fatal(err) + } + + for { + if backend.raft.AppliedIndex() >= 2 { + break + } + } + + } + + backend.DisableAutopilot() + + return backend, raftDir +} diff --git a/physical/raft/types.pb.go b/physical/raft/types.pb.go new file mode 100644 index 0000000..2835e1f --- /dev/null +++ b/physical/raft/types.pb.go @@ -0,0 +1,535 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.12 +// source: physical/raft/types.proto + +package raft + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type LogOperation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // OpType is the Operation type + OpType uint32 `protobuf:"varint,1,opt,name=op_type,json=opType,proto3" json:"op_type,omitempty"` + // Flags is an opaque value, currently unused. Reserved. + Flags uint64 `protobuf:"varint,2,opt,name=flags,proto3" json:"flags,omitempty"` + // Key that is being affected + Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + // Value is optional, corresponds to the key + Value []byte `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *LogOperation) Reset() { + *x = LogOperation{} + if protoimpl.UnsafeEnabled { + mi := &file_physical_raft_types_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LogOperation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LogOperation) ProtoMessage() {} + +func (x *LogOperation) ProtoReflect() protoreflect.Message { + mi := &file_physical_raft_types_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LogOperation.ProtoReflect.Descriptor instead. +func (*LogOperation) Descriptor() ([]byte, []int) { + return file_physical_raft_types_proto_rawDescGZIP(), []int{0} +} + +func (x *LogOperation) GetOpType() uint32 { + if x != nil { + return x.OpType + } + return 0 +} + +func (x *LogOperation) GetFlags() uint64 { + if x != nil { + return x.Flags + } + return 0 +} + +func (x *LogOperation) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *LogOperation) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +type LogData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Operations []*LogOperation `protobuf:"bytes,1,rep,name=operations,proto3" json:"operations,omitempty"` +} + +func (x *LogData) Reset() { + *x = LogData{} + if protoimpl.UnsafeEnabled { + mi := &file_physical_raft_types_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LogData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LogData) ProtoMessage() {} + +func (x *LogData) ProtoReflect() protoreflect.Message { + mi := &file_physical_raft_types_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LogData.ProtoReflect.Descriptor instead. +func (*LogData) Descriptor() ([]byte, []int) { + return file_physical_raft_types_proto_rawDescGZIP(), []int{1} +} + +func (x *LogData) GetOperations() []*LogOperation { + if x != nil { + return x.Operations + } + return nil +} + +type IndexValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Term uint64 `protobuf:"varint,1,opt,name=term,proto3" json:"term,omitempty"` + Index uint64 `protobuf:"varint,2,opt,name=index,proto3" json:"index,omitempty"` +} + +func (x *IndexValue) Reset() { + *x = IndexValue{} + if protoimpl.UnsafeEnabled { + mi := &file_physical_raft_types_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *IndexValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IndexValue) ProtoMessage() {} + +func (x *IndexValue) ProtoReflect() protoreflect.Message { + mi := &file_physical_raft_types_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IndexValue.ProtoReflect.Descriptor instead. +func (*IndexValue) Descriptor() ([]byte, []int) { + return file_physical_raft_types_proto_rawDescGZIP(), []int{2} +} + +func (x *IndexValue) GetTerm() uint64 { + if x != nil { + return x.Term + } + return 0 +} + +func (x *IndexValue) GetIndex() uint64 { + if x != nil { + return x.Index + } + return 0 +} + +type Server struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Suffrage int32 `protobuf:"varint,1,opt,name=suffrage,proto3" json:"suffrage,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + Address string `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"` +} + +func (x *Server) Reset() { + *x = Server{} + if protoimpl.UnsafeEnabled { + mi := &file_physical_raft_types_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Server) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Server) ProtoMessage() {} + +func (x *Server) ProtoReflect() protoreflect.Message { + mi := &file_physical_raft_types_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Server.ProtoReflect.Descriptor instead. +func (*Server) Descriptor() ([]byte, []int) { + return file_physical_raft_types_proto_rawDescGZIP(), []int{3} +} + +func (x *Server) GetSuffrage() int32 { + if x != nil { + return x.Suffrage + } + return 0 +} + +func (x *Server) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *Server) GetAddress() string { + if x != nil { + return x.Address + } + return "" +} + +type ConfigurationValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Index uint64 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"` + Servers []*Server `protobuf:"bytes,2,rep,name=servers,proto3" json:"servers,omitempty"` +} + +func (x *ConfigurationValue) Reset() { + *x = ConfigurationValue{} + if protoimpl.UnsafeEnabled { + mi := &file_physical_raft_types_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConfigurationValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConfigurationValue) ProtoMessage() {} + +func (x *ConfigurationValue) ProtoReflect() protoreflect.Message { + mi := &file_physical_raft_types_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConfigurationValue.ProtoReflect.Descriptor instead. +func (*ConfigurationValue) Descriptor() ([]byte, []int) { + return file_physical_raft_types_proto_rawDescGZIP(), []int{4} +} + +func (x *ConfigurationValue) GetIndex() uint64 { + if x != nil { + return x.Index + } + return 0 +} + +func (x *ConfigurationValue) GetServers() []*Server { + if x != nil { + return x.Servers + } + return nil +} + +type LocalNodeConfigValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DesiredSuffrage string `protobuf:"bytes,1,opt,name=desired_suffrage,json=desiredSuffrage,proto3" json:"desired_suffrage,omitempty"` +} + +func (x *LocalNodeConfigValue) Reset() { + *x = LocalNodeConfigValue{} + if protoimpl.UnsafeEnabled { + mi := &file_physical_raft_types_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LocalNodeConfigValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocalNodeConfigValue) ProtoMessage() {} + +func (x *LocalNodeConfigValue) ProtoReflect() protoreflect.Message { + mi := &file_physical_raft_types_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LocalNodeConfigValue.ProtoReflect.Descriptor instead. +func (*LocalNodeConfigValue) Descriptor() ([]byte, []int) { + return file_physical_raft_types_proto_rawDescGZIP(), []int{5} +} + +func (x *LocalNodeConfigValue) GetDesiredSuffrage() string { + if x != nil { + return x.DesiredSuffrage + } + return "" +} + +var File_physical_raft_types_proto protoreflect.FileDescriptor + +var file_physical_raft_types_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x70, 0x68, 0x79, 0x73, 0x69, 0x63, 0x61, 0x6c, 0x2f, 0x72, 0x61, 0x66, 0x74, 0x2f, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x04, 0x72, 0x61, 0x66, + 0x74, 0x22, 0x65, 0x0a, 0x0c, 0x4c, 0x6f, 0x67, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x17, 0x0a, 0x07, 0x6f, 0x70, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x06, 0x6f, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, + 0x61, 0x67, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x3d, 0x0a, 0x07, 0x4c, 0x6f, 0x67, 0x44, + 0x61, 0x74, 0x61, 0x12, 0x32, 0x0a, 0x0a, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x72, 0x61, 0x66, 0x74, 0x2e, 0x4c, + 0x6f, 0x67, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x6f, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x36, 0x0a, 0x0a, 0x49, 0x6e, 0x64, 0x65, 0x78, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x72, 0x6d, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x04, 0x74, 0x65, 0x72, 0x6d, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, + 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x22, + 0x4e, 0x0a, 0x06, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x75, 0x66, + 0x66, 0x72, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x73, 0x75, 0x66, + 0x66, 0x72, 0x61, 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, + 0x52, 0x0a, 0x12, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x26, 0x0a, 0x07, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x72, + 0x61, 0x66, 0x74, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x73, 0x22, 0x41, 0x0a, 0x14, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4e, 0x6f, 0x64, 0x65, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x64, + 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x72, 0x61, 0x67, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x53, 0x75, + 0x66, 0x66, 0x72, 0x61, 0x67, 0x65, 0x42, 0x2a, 0x5a, 0x28, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, + 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x70, 0x68, 0x79, 0x73, 0x69, 0x63, 0x61, 0x6c, 0x2f, 0x72, 0x61, + 0x66, 0x74, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_physical_raft_types_proto_rawDescOnce sync.Once + file_physical_raft_types_proto_rawDescData = file_physical_raft_types_proto_rawDesc +) + +func file_physical_raft_types_proto_rawDescGZIP() []byte { + file_physical_raft_types_proto_rawDescOnce.Do(func() { + file_physical_raft_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_physical_raft_types_proto_rawDescData) + }) + return file_physical_raft_types_proto_rawDescData +} + +var file_physical_raft_types_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_physical_raft_types_proto_goTypes = []interface{}{ + (*LogOperation)(nil), // 0: raft.LogOperation + (*LogData)(nil), // 1: raft.LogData + (*IndexValue)(nil), // 2: raft.IndexValue + (*Server)(nil), // 3: raft.Server + (*ConfigurationValue)(nil), // 4: raft.ConfigurationValue + (*LocalNodeConfigValue)(nil), // 5: raft.LocalNodeConfigValue +} +var file_physical_raft_types_proto_depIdxs = []int32{ + 0, // 0: raft.LogData.operations:type_name -> raft.LogOperation + 3, // 1: raft.ConfigurationValue.servers:type_name -> raft.Server + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_physical_raft_types_proto_init() } +func file_physical_raft_types_proto_init() { + if File_physical_raft_types_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_physical_raft_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LogOperation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_physical_raft_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LogData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_physical_raft_types_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*IndexValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_physical_raft_types_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Server); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_physical_raft_types_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConfigurationValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_physical_raft_types_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocalNodeConfigValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_physical_raft_types_proto_rawDesc, + NumEnums: 0, + NumMessages: 6, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_physical_raft_types_proto_goTypes, + DependencyIndexes: file_physical_raft_types_proto_depIdxs, + MessageInfos: file_physical_raft_types_proto_msgTypes, + }.Build() + File_physical_raft_types_proto = out.File + file_physical_raft_types_proto_rawDesc = nil + file_physical_raft_types_proto_goTypes = nil + file_physical_raft_types_proto_depIdxs = nil +} diff --git a/physical/raft/types.proto b/physical/raft/types.proto new file mode 100644 index 0000000..bb3d136 --- /dev/null +++ b/physical/raft/types.proto @@ -0,0 +1,46 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +syntax = "proto3"; + +option go_package = "github.com/hashicorp/vault/physical/raft"; + +package raft; + +message LogOperation { + // OpType is the Operation type + uint32 op_type = 1; + + // Flags is an opaque value, currently unused. Reserved. + uint64 flags = 2; + + // Key that is being affected + string key = 3; + + // Value is optional, corresponds to the key + bytes value = 4; +} + +message LogData { + repeated LogOperation operations = 1; +} + +message IndexValue { + uint64 term = 1; + uint64 index = 2; +} + +message Server { + int32 suffrage = 1; + string id = 2; + string address = 3; +} + +message ConfigurationValue { + uint64 index = 1; + repeated Server servers = 2; +} + +message LocalNodeConfigValue{ + string desired_suffrage = 1; +} diff --git a/physical/raft/varint.go b/physical/raft/varint.go new file mode 100644 index 0000000..87f59ea --- /dev/null +++ b/physical/raft/varint.go @@ -0,0 +1,123 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package raft + +import ( + "bufio" + "encoding/binary" + "errors" + "io" + + "github.com/golang/protobuf/proto" +) + +var ( + errSmallBuffer = errors.New("Buffer Too Small") + errLargeValue = errors.New("Value is Larger than 64 bits") +) + +func NewDelimitedWriter(w io.Writer) WriteCloser { + return &varintWriter{w, make([]byte, binary.MaxVarintLen64), nil} +} + +type varintWriter struct { + w io.Writer + lenBuf []byte + buffer []byte +} + +func (this *varintWriter) WriteMsg(msg proto.Message) (err error) { + data, err := proto.Marshal(msg) + if err != nil { + return err + } + length := uint64(len(data)) + n := binary.PutUvarint(this.lenBuf, length) + _, err = this.w.Write(this.lenBuf[:n]) + if err != nil { + return err + } + _, err = this.w.Write(data) + return err +} + +func (this *varintWriter) Close() error { + if closer, ok := this.w.(io.Closer); ok { + return closer.Close() + } + return nil +} + +func NewDelimitedReader(r io.Reader, maxSize int) ReadCloser { + var closer io.Closer + if c, ok := r.(io.Closer); ok { + closer = c + } + return &varintReader{bufio.NewReader(r), nil, maxSize, closer, 0} +} + +type varintReader struct { + r *bufio.Reader + buf []byte + maxSize int + closer io.Closer + lastReadSize int +} + +func (this *varintReader) GetLastReadSize() int { + return this.lastReadSize +} + +func (this *varintReader) ReadMsg(msg proto.Message) error { + length64, err := binary.ReadUvarint(this.r) + if err != nil { + return err + } + length := int(length64) + if length < 0 || length > this.maxSize { + return io.ErrShortBuffer + } + if len(this.buf) < length { + this.buf = make([]byte, length) + } + buf := this.buf[:length] + size, err := io.ReadFull(this.r, buf) + if err != nil { + return err + } + this.lastReadSize = size + return proto.Unmarshal(buf, msg) +} + +func (this *varintReader) Close() error { + if this.closer != nil { + return this.closer.Close() + } + return nil +} diff --git a/physical/raft/vars_32bit.go b/physical/raft/vars_32bit.go new file mode 100644 index 0000000..6e5c51f --- /dev/null +++ b/physical/raft/vars_32bit.go @@ -0,0 +1,8 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build 386 || arm || windows + +package raft + +const initialMmapSize = 0 diff --git a/physical/raft/vars_64bit.go b/physical/raft/vars_64bit.go new file mode 100644 index 0000000..a1eea0f --- /dev/null +++ b/physical/raft/vars_64bit.go @@ -0,0 +1,8 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !386 && !arm && !windows + +package raft + +const initialMmapSize = 100 * 1024 * 1024 * 1024 // 100GB diff --git a/physical/s3/s3.go b/physical/s3/s3.go new file mode 100644 index 0000000..0cb8e0a --- /dev/null +++ b/physical/s3/s3.go @@ -0,0 +1,325 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package s3 + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + "os" + "path" + "sort" + "strconv" + "strings" + "time" + + "github.com/armon/go-metrics" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/hashicorp/go-cleanhttp" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-secure-stdlib/awsutil" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/physical" +) + +// Verify S3Backend satisfies the correct interfaces +var _ physical.Backend = (*S3Backend)(nil) + +// S3Backend is a physical backend that stores data +// within an S3 bucket. +type S3Backend struct { + bucket string + path string + kmsKeyId string + client *s3.S3 + logger log.Logger + permitPool *physical.PermitPool +} + +// NewS3Backend constructs a S3 backend using a pre-existing +// bucket. Credentials can be provided to the backend, sourced +// from the environment, AWS credential files or by IAM role. +func NewS3Backend(conf map[string]string, logger log.Logger) (physical.Backend, error) { + bucket := os.Getenv("AWS_S3_BUCKET") + if bucket == "" { + bucket = conf["bucket"] + if bucket == "" { + return nil, fmt.Errorf("'bucket' must be set") + } + } + + path := conf["path"] + + accessKey, ok := conf["access_key"] + if !ok { + accessKey = "" + } + secretKey, ok := conf["secret_key"] + if !ok { + secretKey = "" + } + sessionToken, ok := conf["session_token"] + if !ok { + sessionToken = "" + } + endpoint := os.Getenv("AWS_S3_ENDPOINT") + if endpoint == "" { + endpoint = conf["endpoint"] + } + region := os.Getenv("AWS_REGION") + if region == "" { + region = os.Getenv("AWS_DEFAULT_REGION") + if region == "" { + region = conf["region"] + if region == "" { + region = "us-east-1" + } + } + } + s3ForcePathStyleStr, ok := conf["s3_force_path_style"] + if !ok { + s3ForcePathStyleStr = "false" + } + s3ForcePathStyleBool, err := parseutil.ParseBool(s3ForcePathStyleStr) + if err != nil { + return nil, fmt.Errorf("invalid boolean set for s3_force_path_style: %q", s3ForcePathStyleStr) + } + disableSSLStr, ok := conf["disable_ssl"] + if !ok { + disableSSLStr = "false" + } + disableSSLBool, err := parseutil.ParseBool(disableSSLStr) + if err != nil { + return nil, fmt.Errorf("invalid boolean set for disable_ssl: %q", disableSSLStr) + } + + credsConfig := &awsutil.CredentialsConfig{ + AccessKey: accessKey, + SecretKey: secretKey, + SessionToken: sessionToken, + Logger: logger, + } + creds, err := credsConfig.GenerateCredentialChain() + if err != nil { + return nil, err + } + + pooledTransport := cleanhttp.DefaultPooledTransport() + pooledTransport.MaxIdleConnsPerHost = consts.ExpirationRestoreWorkerCount + + sess, err := session.NewSession(&aws.Config{ + Credentials: creds, + HTTPClient: &http.Client{ + Transport: pooledTransport, + }, + Endpoint: aws.String(endpoint), + Region: aws.String(region), + S3ForcePathStyle: aws.Bool(s3ForcePathStyleBool), + DisableSSL: aws.Bool(disableSSLBool), + }) + if err != nil { + return nil, err + } + s3conn := s3.New(sess) + + _, err = s3conn.ListObjects(&s3.ListObjectsInput{Bucket: &bucket}) + if err != nil { + return nil, fmt.Errorf("unable to access bucket %q in region %q: %w", bucket, region, err) + } + + maxParStr, ok := conf["max_parallel"] + var maxParInt int + if ok { + maxParInt, err = strconv.Atoi(maxParStr) + if err != nil { + return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err) + } + if logger.IsDebug() { + logger.Debug("max_parallel set", "max_parallel", maxParInt) + } + } + + kmsKeyId, ok := conf["kms_key_id"] + if !ok { + kmsKeyId = "" + } + + s := &S3Backend{ + client: s3conn, + bucket: bucket, + path: path, + kmsKeyId: kmsKeyId, + logger: logger, + permitPool: physical.NewPermitPool(maxParInt), + } + return s, nil +} + +// Put is used to insert or update an entry +func (s *S3Backend) Put(ctx context.Context, entry *physical.Entry) error { + defer metrics.MeasureSince([]string{"s3", "put"}, time.Now()) + + s.permitPool.Acquire() + defer s.permitPool.Release() + + // Setup key + key := path.Join(s.path, entry.Key) + + putObjectInput := &s3.PutObjectInput{ + Bucket: aws.String(s.bucket), + Key: aws.String(key), + Body: bytes.NewReader(entry.Value), + } + + if s.kmsKeyId != "" { + putObjectInput.ServerSideEncryption = aws.String("aws:kms") + putObjectInput.SSEKMSKeyId = aws.String(s.kmsKeyId) + } + + _, err := s.client.PutObject(putObjectInput) + if err != nil { + return err + } + + return nil +} + +// Get is used to fetch an entry +func (s *S3Backend) Get(ctx context.Context, key string) (*physical.Entry, error) { + defer metrics.MeasureSince([]string{"s3", "get"}, time.Now()) + + s.permitPool.Acquire() + defer s.permitPool.Release() + + // Setup key + key = path.Join(s.path, key) + + resp, err := s.client.GetObject(&s3.GetObjectInput{ + Bucket: aws.String(s.bucket), + Key: aws.String(key), + }) + if resp != nil && resp.Body != nil { + defer resp.Body.Close() + } + if awsErr, ok := err.(awserr.RequestFailure); ok { + // Return nil on 404s, error on anything else + if awsErr.StatusCode() == 404 { + return nil, nil + } + return nil, err + } + if err != nil { + return nil, err + } + if resp == nil { + return nil, fmt.Errorf("got nil response from S3 but no error") + } + + data := bytes.NewBuffer(nil) + if resp.ContentLength != nil { + data = bytes.NewBuffer(make([]byte, 0, *resp.ContentLength)) + } + _, err = io.Copy(data, resp.Body) + if err != nil { + return nil, err + } + + // Strip path prefix + if s.path != "" { + key = strings.TrimPrefix(key, s.path+"/") + } + + ent := &physical.Entry{ + Key: key, + Value: data.Bytes(), + } + + return ent, nil +} + +// Delete is used to permanently delete an entry +func (s *S3Backend) Delete(ctx context.Context, key string) error { + defer metrics.MeasureSince([]string{"s3", "delete"}, time.Now()) + + s.permitPool.Acquire() + defer s.permitPool.Release() + + // Setup key + key = path.Join(s.path, key) + + _, err := s.client.DeleteObject(&s3.DeleteObjectInput{ + Bucket: aws.String(s.bucket), + Key: aws.String(key), + }) + if err != nil { + return err + } + + return nil +} + +// List is used to list all the keys under a given +// prefix, up to the next prefix. +func (s *S3Backend) List(ctx context.Context, prefix string) ([]string, error) { + defer metrics.MeasureSince([]string{"s3", "list"}, time.Now()) + + s.permitPool.Acquire() + defer s.permitPool.Release() + + // Setup prefix + prefix = path.Join(s.path, prefix) + + // Validate prefix (if present) is ending with a "/" + if prefix != "" && !strings.HasSuffix(prefix, "/") { + prefix += "/" + } + + params := &s3.ListObjectsV2Input{ + Bucket: aws.String(s.bucket), + Prefix: aws.String(prefix), + Delimiter: aws.String("/"), + } + + keys := []string{} + + err := s.client.ListObjectsV2Pages(params, + func(page *s3.ListObjectsV2Output, lastPage bool) bool { + if page != nil { + // Add truncated 'folder' paths + for _, commonPrefix := range page.CommonPrefixes { + // Avoid panic + if commonPrefix == nil { + continue + } + + commonPrefix := strings.TrimPrefix(*commonPrefix.Prefix, prefix) + keys = append(keys, commonPrefix) + } + // Add objects only from the current 'folder' + for _, key := range page.Contents { + // Avoid panic + if key == nil { + continue + } + + key := strings.TrimPrefix(*key.Key, prefix) + keys = append(keys, key) + } + } + return true + }) + if err != nil { + return nil, err + } + + sort.Strings(keys) + + return keys, nil +} diff --git a/physical/s3/s3_test.go b/physical/s3/s3_test.go new file mode 100644 index 0000000..139e41d --- /dev/null +++ b/physical/s3/s3_test.go @@ -0,0 +1,137 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package s3 + +import ( + "context" + "fmt" + "math/rand" + "os" + "testing" + "time" + + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-secure-stdlib/awsutil" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/physical" +) + +func TestDefaultS3Backend(t *testing.T) { + DoS3BackendTest(t, "") +} + +func TestS3BackendSseKms(t *testing.T) { + DoS3BackendTest(t, "alias/aws/s3") +} + +func DoS3BackendTest(t *testing.T, kmsKeyId string) { + if enabled := os.Getenv("VAULT_ACC"); enabled == "" { + t.Skip() + } + + if !hasAWSCredentials() { + t.Skip("Skipping because AWS credentials could not be resolved. See https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials for information on how to set up AWS credentials.") + } + + logger := logging.NewVaultLogger(log.Debug) + + credsConfig := &awsutil.CredentialsConfig{Logger: logger} + + credsChain, err := credsConfig.GenerateCredentialChain() + if err != nil { + t.Fatal(err) + } + + _, err = credsChain.Get() + if err != nil { + t.Fatal(err) + } + + // If the variable is empty or doesn't exist, the default + // AWS endpoints will be used + endpoint := os.Getenv("AWS_S3_ENDPOINT") + + region := os.Getenv("AWS_DEFAULT_REGION") + if region == "" { + region = "us-east-1" + } + + sess, err := session.NewSession(&aws.Config{ + Credentials: credsChain, + Endpoint: aws.String(endpoint), + Region: aws.String(region), + }) + if err != nil { + t.Fatal(err) + } + s3conn := s3.New(sess) + + randInt := rand.New(rand.NewSource(time.Now().UnixNano())).Int() + bucket := fmt.Sprintf("vault-s3-testacc-%d", randInt) + + _, err = s3conn.CreateBucket(&s3.CreateBucketInput{ + Bucket: aws.String(bucket), + }) + if err != nil { + t.Fatalf("unable to create test bucket: %s", err) + } + + defer func() { + // Gotta list all the objects and delete them + // before being able to delete the bucket + listResp, _ := s3conn.ListObjects(&s3.ListObjectsInput{ + Bucket: aws.String(bucket), + }) + + objects := &s3.Delete{} + for _, key := range listResp.Contents { + oi := &s3.ObjectIdentifier{Key: key.Key} + objects.Objects = append(objects.Objects, oi) + } + + s3conn.DeleteObjects(&s3.DeleteObjectsInput{ + Bucket: aws.String(bucket), + Delete: objects, + }) + + _, err := s3conn.DeleteBucket(&s3.DeleteBucketInput{Bucket: aws.String(bucket)}) + if err != nil { + t.Fatalf("err: %s", err) + } + }() + + // This uses the same logic to find the AWS credentials as we did at the beginning of the test + b, err := NewS3Backend(map[string]string{ + "bucket": bucket, + "kmsKeyId": kmsKeyId, + "path": "test/vault", + }, logger) + if err != nil { + t.Fatalf("err: %s", err) + } + + physical.ExerciseBackend(t, b) + physical.ExerciseBackend_ListPrefix(t, b) +} + +func hasAWSCredentials() bool { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + cfg, err := config.LoadDefaultConfig(ctx) + if err != nil { + return false + } + + creds, err := cfg.Credentials.Retrieve(ctx) + if err != nil { + return false + } + + return creds.HasKeys() +} diff --git a/physical/spanner/spanner.go b/physical/spanner/spanner.go new file mode 100644 index 0000000..b84e0d4 --- /dev/null +++ b/physical/spanner/spanner.go @@ -0,0 +1,377 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package spanner + +import ( + "context" + "fmt" + "os" + "sort" + "strconv" + "strings" + "time" + + "cloud.google.com/go/spanner" + metrics "github.com/armon/go-metrics" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/helper/useragent" + "github.com/hashicorp/vault/sdk/physical" + "github.com/pkg/errors" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/grpc/codes" +) + +// Verify Backend satisfies the correct interfaces +var ( + _ physical.Backend = (*Backend)(nil) + _ physical.Transactional = (*Backend)(nil) +) + +const ( + // envDatabase is the name of the environment variable to search for the + // database name. + envDatabase = "GOOGLE_SPANNER_DATABASE" + + // envHAEnabled is the name of the environment variable to search for the + // boolean indicating if HA is enabled. + envHAEnabled = "GOOGLE_SPANNER_HA_ENABLED" + + // envHATable is the name of the environment variable to search for the table + // name to use for HA. + envHATable = "GOOGLE_SPANNER_HA_TABLE" + + // envTable is the name of the environment variable to search for the table + // name. + envTable = "GOOGLE_SPANNER_TABLE" + + // defaultTable is the default table name if none is specified. + defaultTable = "Vault" + + // defaultHASuffix is the default suffix to apply to the table name if no + // HA table is provided. + defaultHASuffix = "HA" +) + +var ( + // metricDelete is the key for the metric for measuring a Delete call. + metricDelete = []string{"spanner", "delete"} + + // metricGet is the key for the metric for measuring a Get call. + metricGet = []string{"spanner", "get"} + + // metricList is the key for the metric for measuring a List call. + metricList = []string{"spanner", "list"} + + // metricPut is the key for the metric for measuring a Put call. + metricPut = []string{"spanner", "put"} + + // metricTxn is the key for the metric for measuring a Transaction call. + metricTxn = []string{"spanner", "txn"} +) + +// Backend implements physical.Backend and describes the steps necessary to +// persist data using Google Cloud Spanner. +type Backend struct { + // database is the name of the database to use for data storage and retrieval. + // This is supplied as part of user configuration. + database string + + // table is the name of the table in the database. + table string + + // client is the API client and permitPool is the allowed concurrent uses of + // the client. + client *spanner.Client + permitPool *physical.PermitPool + + // haTable is the name of the table to use for HA in the database. + haTable string + + // haEnabled indicates if high availability is enabled. Default: true. + haEnabled bool + + // haClient is the API client. This is managed separately from the main client + // because a flood of requests should not block refreshing the TTLs on the + // lock. + // + // This value will be nil if haEnabled is false. + haClient *spanner.Client + + // logger is the internal logger. + logger log.Logger +} + +// NewBackend creates a new Google Spanner storage backend with the given +// configuration. This uses the official Golang Cloud SDK and therefore supports +// specifying credentials via envvars, credential files, etc. +func NewBackend(c map[string]string, logger log.Logger) (physical.Backend, error) { + logger.Debug("configuring backend") + + // Database name + database := os.Getenv(envDatabase) + if database == "" { + database = c["database"] + } + if database == "" { + return nil, errors.New("missing database name") + } + + // Table name + table := os.Getenv(envTable) + if table == "" { + table = c["table"] + } + if table == "" { + table = defaultTable + } + + // HA table name + haTable := os.Getenv(envHATable) + if haTable == "" { + haTable = c["ha_table"] + } + if haTable == "" { + haTable = table + defaultHASuffix + } + + // HA configuration + haClient := (*spanner.Client)(nil) + haEnabled := false + haEnabledStr := os.Getenv(envHAEnabled) + if haEnabledStr == "" { + haEnabledStr = c["ha_enabled"] + } + if haEnabledStr != "" { + var err error + haEnabled, err = strconv.ParseBool(haEnabledStr) + if err != nil { + return nil, fmt.Errorf("failed to parse HA enabled: %w", err) + } + } + if haEnabled { + logger.Debug("creating HA client") + var err error + ctx := context.Background() + haClient, err = spanner.NewClient(ctx, database, + option.WithUserAgent(useragent.String()), + ) + if err != nil { + return nil, fmt.Errorf("failed to create HA client: %w", err) + } + } + + // Max parallel + maxParallel, err := extractInt(c["max_parallel"]) + if err != nil { + return nil, fmt.Errorf("failed to parse max_parallel: %w", err) + } + + logger.Debug("configuration", + "database", database, + "table", table, + "haEnabled", haEnabled, + "haTable", haTable, + "maxParallel", maxParallel, + ) + + logger.Debug("creating client") + ctx := context.Background() + client, err := spanner.NewClient(ctx, database, + option.WithUserAgent(useragent.String()), + ) + if err != nil { + return nil, fmt.Errorf("failed to create spanner client: %w", err) + } + + return &Backend{ + database: database, + table: table, + client: client, + permitPool: physical.NewPermitPool(maxParallel), + + haEnabled: haEnabled, + haTable: haTable, + haClient: haClient, + + logger: logger, + }, nil +} + +// Put creates or updates an entry. +func (b *Backend) Put(ctx context.Context, entry *physical.Entry) error { + defer metrics.MeasureSince(metricPut, time.Now()) + + // Pooling + b.permitPool.Acquire() + defer b.permitPool.Release() + + // Insert + m := spanner.InsertOrUpdateMap(b.table, map[string]interface{}{ + "Key": entry.Key, + "Value": entry.Value, + }) + if _, err := b.client.Apply(ctx, []*spanner.Mutation{m}); err != nil { + return fmt.Errorf("failed to put data: %w", err) + } + return nil +} + +// Get fetches an entry. If there is no entry, this function returns nil. +func (b *Backend) Get(ctx context.Context, key string) (*physical.Entry, error) { + defer metrics.MeasureSince(metricGet, time.Now()) + + // Pooling + b.permitPool.Acquire() + defer b.permitPool.Release() + + // Read + row, err := b.client.Single().ReadRow(ctx, b.table, spanner.Key{key}, []string{"Value"}) + if spanner.ErrCode(err) == codes.NotFound { + return nil, nil + } + if err != nil { + return nil, fmt.Errorf("failed to read value for %q: %w", key, err) + } + + var value []byte + if err := row.Column(0, &value); err != nil { + return nil, fmt.Errorf("failed to decode value into bytes: %w", err) + } + + return &physical.Entry{ + Key: key, + Value: value, + }, nil +} + +// Delete deletes an entry with the given key. +func (b *Backend) Delete(ctx context.Context, key string) error { + defer metrics.MeasureSince(metricDelete, time.Now()) + + // Pooling + b.permitPool.Acquire() + defer b.permitPool.Release() + + // Delete + m := spanner.Delete(b.table, spanner.Key{key}) + if _, err := b.client.Apply(ctx, []*spanner.Mutation{m}); err != nil { + return fmt.Errorf("failed to delete key: %w", err) + } + + return nil +} + +// List enumerates all keys with the given prefix. +func (b *Backend) List(ctx context.Context, prefix string) ([]string, error) { + defer metrics.MeasureSince(metricList, time.Now()) + + // Pooling + b.permitPool.Acquire() + defer b.permitPool.Release() + + // Sanitize + safeTable := sanitizeTable(b.table) + + // List + iter := b.client.Single().Query(ctx, spanner.Statement{ + SQL: "SELECT Key FROM " + safeTable + " WHERE STARTS_WITH(Key, @prefix)", + Params: map[string]interface{}{ + "prefix": prefix, + }, + }) + defer iter.Stop() + + var keys []string + + for { + row, err := iter.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, fmt.Errorf("failed to read row: %w", err) + } + + var key string + if err := row.Column(0, &key); err != nil { + return nil, fmt.Errorf("failed to decode key into string: %w", err) + } + + // The results will include the full prefix (folder) and any deeply-nested + // prefixes (subfolders). Vault expects only the top-most things to be + // included. + key = strings.TrimPrefix(key, prefix) + if i := strings.Index(key, "/"); i == -1 { + // Add objects only from the current 'folder' + keys = append(keys, key) + } else { + // Add truncated 'folder' paths + keys = strutil.AppendIfMissing(keys, string(key[:i+1])) + } + } + + // Sort because the resulting order is not predictable + sort.Strings(keys) + + return keys, nil +} + +// Transaction runs multiple entries via a single transaction. +func (b *Backend) Transaction(ctx context.Context, txns []*physical.TxnEntry) error { + defer metrics.MeasureSince(metricTxn, time.Now()) + + // Quit early if we can + if len(txns) == 0 { + return nil + } + + // Build all the ops before taking out the pool + ms := make([]*spanner.Mutation, len(txns)) + for i, tx := range txns { + op, key, value := tx.Operation, tx.Entry.Key, tx.Entry.Value + + switch op { + case physical.DeleteOperation: + ms[i] = spanner.Delete(b.table, spanner.Key{key}) + case physical.PutOperation: + ms[i] = spanner.InsertOrUpdateMap(b.table, map[string]interface{}{ + "Key": key, + "Value": value, + }) + default: + return fmt.Errorf("unsupported transaction operation: %q", op) + } + } + + // Pooling + b.permitPool.Acquire() + defer b.permitPool.Release() + + // Transactivate! + if _, err := b.client.Apply(ctx, ms); err != nil { + return fmt.Errorf("failed to commit transaction: %w", err) + } + + return nil +} + +// extractInt is a helper function that takes a string and converts that string +// to an int, but accounts for the empty string. +func extractInt(s string) (int, error) { + if s == "" { + return 0, nil + } + return strconv.Atoi(s) +} + +// sanitizeTable attempts to sanitize the table name. +func sanitizeTable(s string) string { + end := strings.IndexRune(s, 0) + if end > -1 { + s = s[:end] + } + return strings.ReplaceAll(s, `"`, `""`) +} diff --git a/physical/spanner/spanner_ha.go b/physical/spanner/spanner_ha.go new file mode 100644 index 0000000..d116be0 --- /dev/null +++ b/physical/spanner/spanner_ha.go @@ -0,0 +1,410 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package spanner + +import ( + "context" + "fmt" + "sync" + "time" + + "cloud.google.com/go/spanner" + metrics "github.com/armon/go-metrics" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/sdk/physical" + "github.com/pkg/errors" + "google.golang.org/grpc/codes" +) + +// Verify Backend satisfies the correct interfaces +var ( + _ physical.HABackend = (*Backend)(nil) + _ physical.Lock = (*Lock)(nil) +) + +const ( + // LockRenewInterval is the time to wait between lock renewals. + LockRenewInterval = 5 * time.Second + + // LockRetryInterval is the amount of time to wait if the lock fails before + // trying again. + LockRetryInterval = 5 * time.Second + + // LockTTL is the default lock TTL. + LockTTL = 15 * time.Second + + // LockWatchRetryInterval is the amount of time to wait if a watch fails + // before trying again. + LockWatchRetryInterval = 5 * time.Second + + // LockWatchRetryMax is the number of times to retry a failed watch before + // signaling that leadership is lost. + LockWatchRetryMax = 5 +) + +var ( + // metricLockUnlock is the metric to register for a lock delete. + metricLockUnlock = []string{"spanner", "lock", "unlock"} + + // metricLockGet is the metric to register for a lock get. + metricLockLock = []string{"spanner", "lock", "lock"} + + // metricLockValue is the metric to register for a lock create/update. + metricLockValue = []string{"spanner", "lock", "value"} +) + +// Lock is the HA lock. +type Lock struct { + // backend is the underlying physical backend. + backend *Backend + + // key is the name of the key. value is the value of the key. + key, value string + + // held is a boolean indicating if the lock is currently held. + held bool + + // identity is the internal identity of this key (unique to this server + // instance). + identity string + + // lock is an internal lock + lock sync.Mutex + + // stopCh is the channel that stops all operations. It may be closed in the + // event of a leader loss or graceful shutdown. stopped is a boolean + // indicating if we are stopped - it exists to prevent double closing the + // channel. stopLock is a mutex around the locks. + stopCh chan struct{} + stopped bool + stopLock sync.Mutex + + // Allow modifying the Lock durations for ease of unit testing. + renewInterval time.Duration + retryInterval time.Duration + ttl time.Duration + watchRetryInterval time.Duration + watchRetryMax int +} + +// LockRecord is the struct that corresponds to a lock. +type LockRecord struct { + Key string + Value string + Identity string + Timestamp time.Time +} + +// HAEnabled implements HABackend and indicates that this backend supports high +// availability. +func (b *Backend) HAEnabled() bool { + return b.haEnabled +} + +// LockWith acquires a mutual exclusion based on the given key. +func (b *Backend) LockWith(key, value string) (physical.Lock, error) { + identity, err := uuid.GenerateUUID() + if err != nil { + return nil, fmt.Errorf("lock with: %w", err) + } + return &Lock{ + backend: b, + key: key, + value: value, + identity: identity, + stopped: true, + + renewInterval: LockRenewInterval, + retryInterval: LockRetryInterval, + ttl: LockTTL, + watchRetryInterval: LockWatchRetryInterval, + watchRetryMax: LockWatchRetryMax, + }, nil +} + +// Lock acquires the given lock. The stopCh is optional. If closed, it +// interrupts the lock acquisition attempt. The returned channel should be +// closed when leadership is lost. +func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { + defer metrics.MeasureSince(metricLockLock, time.Now()) + + l.lock.Lock() + defer l.lock.Unlock() + if l.held { + return nil, errors.New("lock already held") + } + + // Attempt to lock - this function blocks until a lock is acquired or an error + // occurs. + acquired, err := l.attemptLock(stopCh) + if err != nil { + return nil, fmt.Errorf("lock: %w", err) + } + if !acquired { + return nil, nil + } + + // We have the lock now + l.held = true + + // Build the locks + l.stopLock.Lock() + l.stopCh = make(chan struct{}) + l.stopped = false + l.stopLock.Unlock() + + // Periodically renew and watch the lock + go l.renewLock() + go l.watchLock() + + return l.stopCh, nil +} + +// Unlock releases the lock. +func (l *Lock) Unlock() error { + defer metrics.MeasureSince(metricLockUnlock, time.Now()) + + l.lock.Lock() + defer l.lock.Unlock() + if !l.held { + return nil + } + + // Stop any existing locking or renewal attempts + l.stopLock.Lock() + if !l.stopped { + l.stopped = true + close(l.stopCh) + } + l.stopLock.Unlock() + + // Delete + ctx := context.Background() + if _, err := l.backend.haClient.ReadWriteTransaction(ctx, func(ctx context.Context, txn *spanner.ReadWriteTransaction) error { + row, err := txn.ReadRow(ctx, l.backend.haTable, spanner.Key{l.key}, []string{"Identity"}) + if err != nil { + if spanner.ErrCode(err) != codes.NotFound { + return nil + } + return err + } + + var r LockRecord + if derr := row.ToStruct(&r); derr != nil { + return fmt.Errorf("failed to decode to struct: %w", derr) + } + + // If the identity is different, that means that between the time that after + // we stopped acquisition, the TTL expired and someone else grabbed the + // lock. We do not want to delete a lock that is not our own. + if r.Identity != l.identity { + return nil + } + + return txn.BufferWrite([]*spanner.Mutation{ + spanner.Delete(l.backend.haTable, spanner.Key{l.key}), + }) + }); err != nil { + return fmt.Errorf("unlock: %w", err) + } + + // We are no longer holding the lock + l.held = false + + return nil +} + +// Value returns the value of the lock and if it is held. +func (l *Lock) Value() (bool, string, error) { + defer metrics.MeasureSince(metricLockValue, time.Now()) + + r, err := l.get(context.Background()) + if err != nil { + return false, "", err + } + if r == nil { + return false, "", err + } + return true, string(r.Value), nil +} + +// attemptLock attempts to acquire a lock. If the given channel is closed, the +// acquisition attempt stops. This function returns when a lock is acquired or +// an error occurs. +func (l *Lock) attemptLock(stopCh <-chan struct{}) (bool, error) { + ticker := time.NewTicker(l.retryInterval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + acquired, err := l.writeLock() + if err != nil { + return false, fmt.Errorf("attempt lock: %w", err) + } + if !acquired { + continue + } + + return true, nil + case <-stopCh: + return false, nil + } + } +} + +// renewLock renews the given lock until the channel is closed. +func (l *Lock) renewLock() { + ticker := time.NewTicker(l.renewInterval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + l.writeLock() + case <-l.stopCh: + return + } + } +} + +// watchLock checks whether the lock has changed in the table and closes the +// leader channel accordingly. If an error occurs during the check, watchLock +// will retry the operation and then close the leader channel if it can't +// succeed after retries. +func (l *Lock) watchLock() { + retries := 0 + ticker := time.NewTicker(l.watchRetryInterval) + +OUTER: + for { + // Check if the channel is already closed + select { + case <-l.stopCh: + break OUTER + default: + } + + // Check if we've exceeded retries + if retries >= l.watchRetryMax-1 { + break OUTER + } + + // Wait for the timer + select { + case <-ticker.C: + case <-l.stopCh: + break OUTER + } + + // Attempt to read the key + r, err := l.get(context.Background()) + if err != nil { + retries++ + continue + } + + // Verify the identity is the same + if r == nil || r.Identity != l.identity { + break OUTER + } + } + + l.stopLock.Lock() + defer l.stopLock.Unlock() + if !l.stopped { + l.stopped = true + close(l.stopCh) + } +} + +// writeLock writes the given lock using the following algorithm: +// +// - lock does not exist +// - write the lock +// +// - lock exists +// - if key is empty or identity is the same or timestamp exceeds TTL +// - update the lock to self +func (l *Lock) writeLock() (bool, error) { + // Keep track of whether the lock was written + lockWritten := false + + // Create a transaction to read and the update (maybe) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // The transaction will be retried, and it could sit in a queue behind, say, + // the delete operation. To stop the transaction, we close the context when + // the associated stopCh is received. + go func() { + select { + case <-l.stopCh: + cancel() + case <-ctx.Done(): + } + }() + + _, err := l.backend.haClient.ReadWriteTransaction(ctx, func(ctx context.Context, txn *spanner.ReadWriteTransaction) error { + row, err := txn.ReadRow(ctx, l.backend.haTable, spanner.Key{l.key}, []string{"Key", "Identity", "Timestamp"}) + if err != nil && spanner.ErrCode(err) != codes.NotFound { + return err + } + + // If there was a record, verify that the record is still trustable. + if row != nil { + var r LockRecord + if derr := row.ToStruct(&r); derr != nil { + return fmt.Errorf("failed to decode to struct: %w", derr) + } + + // If the key is empty or the identity is ours or the ttl expired, we can + // write. Otherwise, return now because we cannot. + if r.Key != "" && r.Identity != l.identity && time.Now().UTC().Sub(r.Timestamp) < l.ttl { + return nil + } + } + + m, err := spanner.InsertOrUpdateStruct(l.backend.haTable, &LockRecord{ + Key: l.key, + Value: l.value, + Identity: l.identity, + Timestamp: time.Now().UTC(), + }) + if err != nil { + return fmt.Errorf("failed to generate struct: %w", err) + } + if err := txn.BufferWrite([]*spanner.Mutation{m}); err != nil { + return fmt.Errorf("failed to write: %w", err) + } + + // Mark that the lock was acquired + lockWritten = true + + return nil + }) + if err != nil { + return false, fmt.Errorf("write lock: %w", err) + } + + return lockWritten, nil +} + +// get retrieves the value for the lock. +func (l *Lock) get(ctx context.Context) (*LockRecord, error) { + // Read + row, err := l.backend.haClient.Single().ReadRow(ctx, l.backend.haTable, spanner.Key{l.key}, []string{"Key", "Value", "Timestamp", "Identity"}) + if spanner.ErrCode(err) == codes.NotFound { + return nil, nil + } + if err != nil { + return nil, fmt.Errorf("failed to read value for %q: %w", l.key, err) + } + + var r LockRecord + if err := row.ToStruct(&r); err != nil { + return nil, fmt.Errorf("failed to decode lock: %w", err) + } + return &r, nil +} diff --git a/physical/spanner/spanner_ha_test.go b/physical/spanner/spanner_ha_test.go new file mode 100644 index 0000000..dad39ad --- /dev/null +++ b/physical/spanner/spanner_ha_test.go @@ -0,0 +1,63 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package spanner + +import ( + "context" + "os" + "testing" + + "cloud.google.com/go/spanner" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/physical" +) + +func TestHABackend(t *testing.T) { + database := os.Getenv("GOOGLE_SPANNER_DATABASE") + if database == "" { + t.Skip("GOOGLE_SPANNER_DATABASE not set") + } + + table := os.Getenv("GOOGLE_SPANNER_TABLE") + if table == "" { + t.Skip("GOOGLE_SPANNER_TABLE not set") + } + + haTable := os.Getenv("GOOGLE_SPANNER_HA_TABLE") + if haTable == "" { + t.Skip("GOOGLE_SPANNER_HA_TABLE not set") + } + + ctx := context.Background() + client, err := spanner.NewClient(ctx, database) + if err != nil { + t.Fatal(err) + } + + testCleanup(t, client, table) + defer testCleanup(t, client, table) + testCleanup(t, client, haTable) + defer testCleanup(t, client, haTable) + + logger := logging.NewVaultLogger(log.Debug) + config := map[string]string{ + "database": database, + "table": table, + "ha_table": haTable, + "ha_enabled": "true", + } + + b, err := NewBackend(config, logger) + if err != nil { + t.Fatal(err) + } + + b2, err := NewBackend(config, logger) + if err != nil { + t.Fatal(err) + } + + physical.ExerciseHABackend(t, b.(physical.HABackend), b2.(physical.HABackend)) +} diff --git a/physical/spanner/spanner_test.go b/physical/spanner/spanner_test.go new file mode 100644 index 0000000..4b7c1c4 --- /dev/null +++ b/physical/spanner/spanner_test.go @@ -0,0 +1,60 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package spanner + +import ( + "context" + "os" + "testing" + + "cloud.google.com/go/spanner" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/physical" +) + +func testCleanup(t testing.TB, client *spanner.Client, table string) { + t.Helper() + + // Delete all data in the table + ctx := context.Background() + m := spanner.Delete(table, spanner.AllKeys()) + if _, err := client.Apply(ctx, []*spanner.Mutation{m}); err != nil { + t.Fatal(err) + } +} + +func TestBackend(t *testing.T) { + database := os.Getenv("GOOGLE_SPANNER_DATABASE") + if database == "" { + t.Skip("GOOGLE_SPANNER_DATABASE not set") + } + + table := os.Getenv("GOOGLE_SPANNER_TABLE") + if table == "" { + t.Skip("GOOGLE_SPANNER_TABLE not set") + } + + ctx := context.Background() + client, err := spanner.NewClient(ctx, database) + if err != nil { + t.Fatal(err) + } + + testCleanup(t, client, table) + defer testCleanup(t, client, table) + + backend, err := NewBackend(map[string]string{ + "database": database, + "table": table, + "ha_enabled": "false", + }, logging.NewVaultLogger(log.Debug)) + if err != nil { + t.Fatal(err) + } + + physical.ExerciseBackend(t, backend) + physical.ExerciseBackend_ListPrefix(t, backend) + physical.ExerciseTransactionalBackend(t, backend) +} diff --git a/physical/swift/swift.go b/physical/swift/swift.go new file mode 100644 index 0000000..d616bfe --- /dev/null +++ b/physical/swift/swift.go @@ -0,0 +1,249 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package swift + +import ( + "context" + "fmt" + "os" + "sort" + "strconv" + "strings" + "time" + + log "github.com/hashicorp/go-hclog" + + metrics "github.com/armon/go-metrics" + cleanhttp "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/sdk/physical" + "github.com/ncw/swift" +) + +// Verify SwiftBackend satisfies the correct interfaces +var _ physical.Backend = (*SwiftBackend)(nil) + +// SwiftBackend is a physical backend that stores data +// within an OpenStack Swift container. +type SwiftBackend struct { + container string + client *swift.Connection + logger log.Logger + permitPool *physical.PermitPool +} + +// NewSwiftBackend constructs a Swift backend using a pre-existing +// container. Credentials can be provided to the backend, sourced +// from the environment. +func NewSwiftBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { + var ok bool + + username := os.Getenv("OS_USERNAME") + if username == "" { + username = conf["username"] + if username == "" { + return nil, fmt.Errorf("missing username") + } + } + password := os.Getenv("OS_PASSWORD") + if password == "" { + password = conf["password"] + if password == "" { + return nil, fmt.Errorf("missing password") + } + } + authUrl := os.Getenv("OS_AUTH_URL") + if authUrl == "" { + authUrl = conf["auth_url"] + if authUrl == "" { + return nil, fmt.Errorf("missing auth_url") + } + } + container := os.Getenv("OS_CONTAINER") + if container == "" { + container = conf["container"] + if container == "" { + return nil, fmt.Errorf("missing container") + } + } + project := os.Getenv("OS_PROJECT_NAME") + if project == "" { + if project, ok = conf["project"]; !ok { + // Check for KeyStone naming prior to V3 + project = os.Getenv("OS_TENANT_NAME") + if project == "" { + project = conf["tenant"] + } + } + } + + domain := os.Getenv("OS_USER_DOMAIN_NAME") + if domain == "" { + domain = conf["domain"] + } + projectDomain := os.Getenv("OS_PROJECT_DOMAIN_NAME") + if projectDomain == "" { + projectDomain = conf["project-domain"] + } + + region := os.Getenv("OS_REGION_NAME") + if region == "" { + region = conf["region"] + } + tenantID := os.Getenv("OS_TENANT_ID") + if tenantID == "" { + tenantID = conf["tenant_id"] + } + trustID := os.Getenv("OS_TRUST_ID") + if trustID == "" { + trustID = conf["trust_id"] + } + storageUrl := os.Getenv("OS_STORAGE_URL") + if storageUrl == "" { + storageUrl = conf["storage_url"] + } + authToken := os.Getenv("OS_AUTH_TOKEN") + if authToken == "" { + authToken = conf["auth_token"] + } + + c := swift.Connection{ + Domain: domain, + UserName: username, + ApiKey: password, + AuthUrl: authUrl, + Tenant: project, + TenantDomain: projectDomain, + Region: region, + TenantId: tenantID, + TrustId: trustID, + StorageUrl: storageUrl, + AuthToken: authToken, + Transport: cleanhttp.DefaultPooledTransport(), + } + + err := c.Authenticate() + if err != nil { + return nil, err + } + + _, _, err = c.Container(container) + if err != nil { + return nil, fmt.Errorf("Unable to access container %q: %w", container, err) + } + + maxParStr, ok := conf["max_parallel"] + var maxParInt int + if ok { + maxParInt, err = strconv.Atoi(maxParStr) + if err != nil { + return nil, fmt.Errorf("failed parsing max_parallel parameter: %w", err) + } + if logger.IsDebug() { + logger.Debug("max_parallel set", "max_parallel", maxParInt) + } + } + + s := &SwiftBackend{ + client: &c, + container: container, + logger: logger, + permitPool: physical.NewPermitPool(maxParInt), + } + return s, nil +} + +// Put is used to insert or update an entry +func (s *SwiftBackend) Put(ctx context.Context, entry *physical.Entry) error { + defer metrics.MeasureSince([]string{"swift", "put"}, time.Now()) + + s.permitPool.Acquire() + defer s.permitPool.Release() + + err := s.client.ObjectPutBytes(s.container, entry.Key, entry.Value, "") + if err != nil { + return err + } + + return nil +} + +// Get is used to fetch an entry +func (s *SwiftBackend) Get(ctx context.Context, key string) (*physical.Entry, error) { + defer metrics.MeasureSince([]string{"swift", "get"}, time.Now()) + + s.permitPool.Acquire() + defer s.permitPool.Release() + + // Do a list of names with the key first since eventual consistency means + // it might be deleted, but a node might return a read of bytes which fails + // the physical test + list, err := s.client.ObjectNames(s.container, &swift.ObjectsOpts{Prefix: key}) + if err != nil { + return nil, err + } + if 0 == len(list) { + return nil, nil + } + data, err := s.client.ObjectGetBytes(s.container, key) + if err == swift.ObjectNotFound { + return nil, nil + } + if err != nil { + return nil, err + } + ent := &physical.Entry{ + Key: key, + Value: data, + } + + return ent, nil +} + +// Delete is used to permanently delete an entry +func (s *SwiftBackend) Delete(ctx context.Context, key string) error { + defer metrics.MeasureSince([]string{"swift", "delete"}, time.Now()) + + s.permitPool.Acquire() + defer s.permitPool.Release() + + err := s.client.ObjectDelete(s.container, key) + + if err != nil && err != swift.ObjectNotFound { + return err + } + + return nil +} + +// List is used to list all the keys under a given +// prefix, up to the next prefix. +func (s *SwiftBackend) List(ctx context.Context, prefix string) ([]string, error) { + defer metrics.MeasureSince([]string{"swift", "list"}, time.Now()) + + s.permitPool.Acquire() + defer s.permitPool.Release() + + list, err := s.client.ObjectNamesAll(s.container, &swift.ObjectsOpts{Prefix: prefix}) + if nil != err { + return nil, err + } + + keys := []string{} + for _, key := range list { + key := strings.TrimPrefix(key, prefix) + + if i := strings.Index(key, "/"); i == -1 { + // Add objects only from the current 'folder' + keys = append(keys, key) + } else if i != -1 { + // Add truncated 'folder' paths + keys = strutil.AppendIfMissing(keys, key[:i+1]) + } + } + + sort.Strings(keys) + + return keys, nil +} diff --git a/physical/swift/swift_test.go b/physical/swift/swift_test.go new file mode 100644 index 0000000..8f8af16 --- /dev/null +++ b/physical/swift/swift_test.go @@ -0,0 +1,94 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package swift + +import ( + "fmt" + "os" + "testing" + "time" + + log "github.com/hashicorp/go-hclog" + + cleanhttp "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/physical" + "github.com/ncw/swift" +) + +func TestSwiftBackend(t *testing.T) { + if os.Getenv("OS_USERNAME") == "" || os.Getenv("OS_PASSWORD") == "" || + os.Getenv("OS_AUTH_URL") == "" { + t.SkipNow() + } + username := os.Getenv("OS_USERNAME") + password := os.Getenv("OS_PASSWORD") + authUrl := os.Getenv("OS_AUTH_URL") + project := os.Getenv("OS_PROJECT_NAME") + domain := os.Getenv("OS_USER_DOMAIN_NAME") + projectDomain := os.Getenv("OS_PROJECT_DOMAIN_NAME") + region := os.Getenv("OS_REGION_NAME") + tenantID := os.Getenv("OS_TENANT_ID") + + ts := time.Now().UnixNano() + container := fmt.Sprintf("vault-test-%d", ts) + + cleaner := swift.Connection{ + Domain: domain, + UserName: username, + ApiKey: password, + AuthUrl: authUrl, + Tenant: project, + TenantDomain: projectDomain, + Region: region, + TenantId: tenantID, + Transport: cleanhttp.DefaultPooledTransport(), + } + + err := cleaner.Authenticate() + if err != nil { + t.Fatalf("err: %s", err) + } + + err = cleaner.ContainerCreate(container, nil) + if nil != err { + t.Fatalf("Unable to create test container %q: %v", container, err) + } + defer func() { + newObjects, err := cleaner.ObjectNamesAll(container, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + for _, o := range newObjects { + err := cleaner.ObjectDelete(container, o) + if err != nil { + t.Fatalf("err: %s", err) + } + } + err = cleaner.ContainerDelete(container) + if err != nil { + t.Fatalf("err: %s", err) + } + }() + + logger := logging.NewVaultLogger(log.Debug) + + b, err := NewSwiftBackend(map[string]string{ + "username": username, + "password": password, + "container": container, + "auth_url": authUrl, + "project": project, + "domain": domain, + "project-domain": projectDomain, + "tenant_id": tenantID, + "region": region, + }, logger) + if err != nil { + t.Fatalf("err: %s", err) + } + + physical.ExerciseBackend(t, b) + physical.ExerciseBackend_ListPrefix(t, b) +} diff --git a/physical/zookeeper/zookeeper.go b/physical/zookeeper/zookeeper.go new file mode 100644 index 0000000..e52ac9b --- /dev/null +++ b/physical/zookeeper/zookeeper.go @@ -0,0 +1,671 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package zookeeper + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net" + "path/filepath" + "sort" + "strings" + "sync" + "time" + + metrics "github.com/armon/go-metrics" + "github.com/go-zookeeper/zk" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/go-secure-stdlib/tlsutil" + "github.com/hashicorp/vault/sdk/physical" +) + +const ( + // ZKNodeFilePrefix is prefixed to any "files" in ZooKeeper, + // so that they do not collide with directory entries. Otherwise, + // we cannot delete a file if the path is a full-prefix of another + // key. + ZKNodeFilePrefix = "_" +) + +// Verify ZooKeeperBackend satisfies the correct interfaces +var ( + _ physical.Backend = (*ZooKeeperBackend)(nil) + _ physical.HABackend = (*ZooKeeperBackend)(nil) + _ physical.Lock = (*ZooKeeperHALock)(nil) +) + +// ZooKeeperBackend is a physical backend that stores data at specific +// prefix within ZooKeeper. It is used in production situations as +// it allows Vault to run on multiple machines in a highly-available manner. +type ZooKeeperBackend struct { + path string + client *zk.Conn + acl []zk.ACL + logger log.Logger +} + +// NewZooKeeperBackend constructs a ZooKeeper backend using the given API client +// and the prefix in the KV store. +func NewZooKeeperBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { + // Get the path in ZooKeeper + path, ok := conf["path"] + if !ok { + path = "vault/" + } + + // Ensure path is suffixed and prefixed (zk requires prefix /) + if !strings.HasSuffix(path, "/") { + path += "/" + } + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + + // Configure the client, default to localhost instance + var machines string + machines, ok = conf["address"] + if !ok { + machines = "localhost:2181" + } + + // zNode owner and schema. + var owner string + var schema string + var schemaAndOwner string + schemaAndOwner, ok = conf["znode_owner"] + if !ok { + owner = "anyone" + schema = "world" + } else { + parsedSchemaAndOwner := strings.SplitN(schemaAndOwner, ":", 2) + if len(parsedSchemaAndOwner) != 2 { + return nil, fmt.Errorf("znode_owner expected format is 'schema:owner'") + } else { + schema = parsedSchemaAndOwner[0] + owner = parsedSchemaAndOwner[1] + + // znode_owner is in config and structured correctly - but does it make any sense? + // Either 'owner' or 'schema' was set but not both - this seems like a failed attempt + // (e.g. ':MyUser' which omit the schema, or ':' omitting both) + if owner == "" || schema == "" { + return nil, fmt.Errorf("znode_owner expected format is 'schema:auth'") + } + } + } + + acl := []zk.ACL{ + { + Perms: zk.PermAll, + Scheme: schema, + ID: owner, + }, + } + + // Authentication info + var schemaAndUser string + var useAddAuth bool + schemaAndUser, useAddAuth = conf["auth_info"] + if useAddAuth { + parsedSchemaAndUser := strings.SplitN(schemaAndUser, ":", 2) + if len(parsedSchemaAndUser) != 2 { + return nil, fmt.Errorf("auth_info expected format is 'schema:auth'") + } else { + schema = parsedSchemaAndUser[0] + owner = parsedSchemaAndUser[1] + + // auth_info is in config and structured correctly - but does it make any sense? + // Either 'owner' or 'schema' was set but not both - this seems like a failed attempt + // (e.g. ':MyUser' which omit the schema, or ':' omitting both) + if owner == "" || schema == "" { + return nil, fmt.Errorf("auth_info expected format is 'schema:auth'") + } + } + } + + // We have all of the configuration in hand - let's try and connect to ZK + client, _, err := createClient(conf, machines, time.Second) + if err != nil { + return nil, fmt.Errorf("client setup failed: %w", err) + } + + // ZK AddAuth API if the user asked for it + if useAddAuth { + err = client.AddAuth(schema, []byte(owner)) + if err != nil { + return nil, fmt.Errorf("ZooKeeper rejected authentication information provided at auth_info: %w", err) + } + } + + // Setup the backend + c := &ZooKeeperBackend{ + path: path, + client: client, + acl: acl, + logger: logger, + } + return c, nil +} + +func caseInsenstiveContains(superset, val string) bool { + return strings.Contains(strings.ToUpper(superset), strings.ToUpper(val)) +} + +// Returns a client for ZK connection. Config value 'tls_enabled' determines if TLS is enabled or not. +func createClient(conf map[string]string, machines string, timeout time.Duration) (*zk.Conn, <-chan zk.Event, error) { + // 'tls_enabled' defaults to false + isTlsEnabled := false + isTlsEnabledStr, ok := conf["tls_enabled"] + + if ok && isTlsEnabledStr != "" { + parsedBoolval, err := parseutil.ParseBool(isTlsEnabledStr) + if err != nil { + return nil, nil, fmt.Errorf("failed parsing tls_enabled parameter: %w", err) + } + isTlsEnabled = parsedBoolval + } + + if isTlsEnabled { + // Create a custom Dialer with cert configuration for TLS handshake. + tlsDialer := customTLSDial(conf, machines) + options := zk.WithDialer(tlsDialer) + return zk.Connect(strings.Split(machines, ","), timeout, options) + } else { + return zk.Connect(strings.Split(machines, ","), timeout) + } +} + +// Vault config file properties: +// 1. tls_skip_verify: skip host name verification. +// 2. tls_min_version: minimum supported/acceptable tls version +// 3. tls_cert_file: Cert file Absolute path +// 4. tls_key_file: Key file Absolute path +// 5. tls_ca_file: ca file absolute path +// 6. tls_verify_ip: If set to true, server's IP is verified in certificate if tls_skip_verify is false. +func customTLSDial(conf map[string]string, machines string) zk.Dialer { + return func(network, addr string, timeout time.Duration) (net.Conn, error) { + // Sets the serverName. *Note* the addr field comes in as an IP address + serverName, _, sParseErr := net.SplitHostPort(addr) + if sParseErr != nil { + // If the address is only missing port, assign the full address anyway + if strings.Contains(sParseErr.Error(), "missing port") { + serverName = addr + } else { + return nil, fmt.Errorf("failed parsing the server address for 'serverName' setting %w", sParseErr) + } + } + + insecureSkipVerify := false + tlsSkipVerify, ok := conf["tls_skip_verify"] + + if ok && tlsSkipVerify != "" { + b, err := parseutil.ParseBool(tlsSkipVerify) + if err != nil { + return nil, fmt.Errorf("failed parsing tls_skip_verify parameter: %w", err) + } + insecureSkipVerify = b + } + + if !insecureSkipVerify { + // If tls_verify_ip is set to false, Server's DNS name is verified in the CN/SAN of the certificate. + // if tls_verify_ip is true, Server's IP is verified in the CN/SAN of the certificate. + // These checks happen only when tls_skip_verify is set to false. + // This value defaults to false + ipSanCheck := false + configVal, lookupOk := conf["tls_verify_ip"] + + if lookupOk && configVal != "" { + parsedIpSanCheck, ipSanErr := parseutil.ParseBool(configVal) + if ipSanErr != nil { + return nil, fmt.Errorf("failed parsing tls_verify_ip parameter: %w", ipSanErr) + } + ipSanCheck = parsedIpSanCheck + } + // The addr/serverName parameter to this method comes in as an IP address. + // Here we lookup the DNS name and assign it to serverName if ipSanCheck is set to false + if !ipSanCheck { + lookupAddressMany, lookupErr := net.LookupAddr(serverName) + if lookupErr == nil { + for _, lookupAddress := range lookupAddressMany { + // strip the trailing '.' from lookupAddr + if lookupAddress[len(lookupAddress)-1] == '.' { + lookupAddress = lookupAddress[:len(lookupAddress)-1] + } + // Allow serverName to be replaced only if the lookupname is part of the + // supplied machine names + // If there is no match, the serverName will continue to be an IP value. + if caseInsenstiveContains(machines, lookupAddress) { + serverName = lookupAddress + break + } + } + } + } + + } + + tlsMinVersionStr, ok := conf["tls_min_version"] + if !ok { + // Set the default value + tlsMinVersionStr = "tls12" + } + + tlsMinVersion, ok := tlsutil.TLSLookup[tlsMinVersionStr] + if !ok { + return nil, fmt.Errorf("invalid 'tls_min_version'") + } + + tlsClientConfig := &tls.Config{ + MinVersion: tlsMinVersion, + InsecureSkipVerify: insecureSkipVerify, + ServerName: serverName, + } + + _, okCert := conf["tls_cert_file"] + _, okKey := conf["tls_key_file"] + + if okCert && okKey { + tlsCert, err := tls.LoadX509KeyPair(conf["tls_cert_file"], conf["tls_key_file"]) + if err != nil { + return nil, fmt.Errorf("client tls setup failed for ZK: %w", err) + } + + tlsClientConfig.Certificates = []tls.Certificate{tlsCert} + } + + if tlsCaFile, ok := conf["tls_ca_file"]; ok { + caPool := x509.NewCertPool() + + data, err := ioutil.ReadFile(tlsCaFile) + if err != nil { + return nil, fmt.Errorf("failed to read ZK CA file: %w", err) + } + + if !caPool.AppendCertsFromPEM(data) { + return nil, fmt.Errorf("failed to parse ZK CA certificate") + } + tlsClientConfig.RootCAs = caPool + } + + if network != "tcp" { + return nil, fmt.Errorf("unsupported network %q", network) + } + + tcpConn, err := net.DialTimeout("tcp", addr, timeout) + if err != nil { + return nil, err + } + conn := tls.Client(tcpConn, tlsClientConfig) + if err := conn.Handshake(); err != nil { + return nil, fmt.Errorf("Handshake failed with Zookeeper : %v", err) + } + return conn, nil + } +} + +// ensurePath is used to create each node in the path hierarchy. +// We avoid calling this optimistically, and invoke it when we get +// an error during an operation +func (c *ZooKeeperBackend) ensurePath(path string, value []byte) error { + nodes := strings.Split(path, "/") + fullPath := "" + for index, node := range nodes { + if strings.TrimSpace(node) != "" { + fullPath += "/" + node + isLastNode := index+1 == len(nodes) + + // set parent nodes to nil, leaf to value + // this block reduces round trips by being smart on the leaf create/set + if exists, _, _ := c.client.Exists(fullPath); !isLastNode && !exists { + if _, err := c.client.Create(fullPath, nil, int32(0), c.acl); err != nil { + return err + } + } else if isLastNode && !exists { + if _, err := c.client.Create(fullPath, value, int32(0), c.acl); err != nil { + return err + } + } else if isLastNode && exists { + if _, err := c.client.Set(fullPath, value, int32(-1)); err != nil { + return err + } + } + } + } + return nil +} + +// cleanupLogicalPath is used to remove all empty nodes, beginning with deepest one, +// aborting on first non-empty one, up to top-level node. +func (c *ZooKeeperBackend) cleanupLogicalPath(path string) error { + nodes := strings.Split(path, "/") + for i := len(nodes) - 1; i > 0; i-- { + fullPath := c.path + strings.Join(nodes[:i], "/") + + _, stat, err := c.client.Exists(fullPath) + if err != nil { + return fmt.Errorf("failed to acquire node data: %w", err) + } + + if stat.DataLength > 0 && stat.NumChildren > 0 { + panic(fmt.Sprintf("node %q is both of data and leaf type", fullPath)) + } else if stat.DataLength > 0 { + panic(fmt.Sprintf("node %q is a data node, this is either a bug or backend data is corrupted", fullPath)) + } else if stat.NumChildren > 0 { + return nil + } else { + // Empty node, lets clean it up! + if err := c.client.Delete(fullPath, -1); err != nil && err != zk.ErrNoNode { + return fmt.Errorf("removal of node %q failed: %w", fullPath, err) + } + } + } + return nil +} + +// nodePath returns an zk path based on the given key. +func (c *ZooKeeperBackend) nodePath(key string) string { + return filepath.Join(c.path, filepath.Dir(key), ZKNodeFilePrefix+filepath.Base(key)) +} + +// Put is used to insert or update an entry +func (c *ZooKeeperBackend) Put(ctx context.Context, entry *physical.Entry) error { + defer metrics.MeasureSince([]string{"zookeeper", "put"}, time.Now()) + + // Attempt to set the full path + fullPath := c.nodePath(entry.Key) + _, err := c.client.Set(fullPath, entry.Value, -1) + + // If we get ErrNoNode, we need to construct the path hierarchy + if err == zk.ErrNoNode { + return c.ensurePath(fullPath, entry.Value) + } + return err +} + +// Get is used to fetch an entry +func (c *ZooKeeperBackend) Get(ctx context.Context, key string) (*physical.Entry, error) { + defer metrics.MeasureSince([]string{"zookeeper", "get"}, time.Now()) + + // Attempt to read the full path + fullPath := c.nodePath(key) + value, _, err := c.client.Get(fullPath) + + // Ignore if the node does not exist + if err == zk.ErrNoNode { + err = nil + } + if err != nil { + return nil, err + } + + // Handle a non-existing value + if value == nil { + return nil, nil + } + ent := &physical.Entry{ + Key: key, + Value: value, + } + return ent, nil +} + +// Delete is used to permanently delete an entry +func (c *ZooKeeperBackend) Delete(ctx context.Context, key string) error { + defer metrics.MeasureSince([]string{"zookeeper", "delete"}, time.Now()) + + if key == "" { + return nil + } + + // Delete the full path + fullPath := c.nodePath(key) + err := c.client.Delete(fullPath, -1) + + // Mask if the node does not exist + if err != nil && err != zk.ErrNoNode { + return fmt.Errorf("failed to remove %q: %w", fullPath, err) + } + + err = c.cleanupLogicalPath(key) + + return err +} + +// List is used ot list all the keys under a given +// prefix, up to the next prefix. +func (c *ZooKeeperBackend) List(ctx context.Context, prefix string) ([]string, error) { + defer metrics.MeasureSince([]string{"zookeeper", "list"}, time.Now()) + + // Query the children at the full path + fullPath := strings.TrimSuffix(c.path+prefix, "/") + result, _, err := c.client.Children(fullPath) + + // If the path nodes are missing, no children! + if err == zk.ErrNoNode { + return []string{}, nil + } else if err != nil { + return []string{}, err + } + + children := []string{} + for _, key := range result { + childPath := fullPath + "/" + key + _, stat, err := c.client.Exists(childPath) + if err != nil { + // Node is ought to exists, so it must be something different + return []string{}, err + } + + // Check if this entry is a leaf of a node, + // and append the slash which is what Vault depends on + // for iteration + if stat.DataLength > 0 && stat.NumChildren > 0 { + if childPath == c.nodePath("core/lock") { + // go-zookeeper Lock() breaks Vault semantics and creates a directory + // under the lock file; just treat it like the file Vault expects + children = append(children, key[1:]) + } else { + panic(fmt.Sprintf("node %q is both of data and leaf type", childPath)) + } + } else if stat.DataLength == 0 { + // No, we cannot differentiate here on number of children as node + // can have all it leafs removed, and it still is a node. + children = append(children, key+"/") + } else { + children = append(children, key[1:]) + } + } + sort.Strings(children) + return children, nil +} + +// LockWith is used for mutual exclusion based on the given key. +func (c *ZooKeeperBackend) LockWith(key, value string) (physical.Lock, error) { + l := &ZooKeeperHALock{ + in: c, + key: key, + value: value, + logger: c.logger, + } + return l, nil +} + +// HAEnabled indicates whether the HA functionality should be exposed. +// Currently always returns true. +func (c *ZooKeeperBackend) HAEnabled() bool { + return true +} + +// ZooKeeperHALock is a ZooKeeper Lock implementation for the HABackend +type ZooKeeperHALock struct { + in *ZooKeeperBackend + key string + value string + logger log.Logger + + held bool + localLock sync.Mutex + leaderCh chan struct{} + stopCh <-chan struct{} + zkLock *zk.Lock +} + +func (i *ZooKeeperHALock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { + i.localLock.Lock() + defer i.localLock.Unlock() + if i.held { + return nil, fmt.Errorf("lock already held") + } + + // Attempt an async acquisition + didLock := make(chan struct{}) + failLock := make(chan error, 1) + releaseCh := make(chan bool, 1) + lockpath := i.in.nodePath(i.key) + go i.attemptLock(lockpath, didLock, failLock, releaseCh) + + // Wait for lock acquisition, failure, or shutdown + select { + case <-didLock: + releaseCh <- false + case err := <-failLock: + return nil, err + case <-stopCh: + releaseCh <- true + return nil, nil + } + + // Create the leader channel + i.held = true + i.leaderCh = make(chan struct{}) + + // Watch for Events which could result in loss of our zkLock and close(i.leaderCh) + currentVal, _, lockeventCh, err := i.in.client.GetW(lockpath) + if err != nil { + return nil, fmt.Errorf("unable to watch HA lock: %w", err) + } + if i.value != string(currentVal) { + return nil, fmt.Errorf("lost HA lock immediately before watch") + } + go i.monitorLock(lockeventCh, i.leaderCh) + + i.stopCh = stopCh + + return i.leaderCh, nil +} + +func (i *ZooKeeperHALock) attemptLock(lockpath string, didLock chan struct{}, failLock chan error, releaseCh chan bool) { + // Wait to acquire the lock in ZK + lock := zk.NewLock(i.in.client, lockpath, i.in.acl) + err := lock.Lock() + if err != nil { + failLock <- err + return + } + // Set node value + data := []byte(i.value) + err = i.in.ensurePath(lockpath, data) + if err != nil { + failLock <- err + lock.Unlock() + return + } + i.zkLock = lock + + // Signal that lock is held + close(didLock) + + // Handle an early abort + release := <-releaseCh + if release { + lock.Unlock() + } +} + +func (i *ZooKeeperHALock) monitorLock(lockeventCh <-chan zk.Event, leaderCh chan struct{}) { + for { + select { + case event := <-lockeventCh: + // Lost connection? + switch event.State { + case zk.StateConnected: + case zk.StateHasSession: + default: + close(leaderCh) + return + } + + // Lost lock? + switch event.Type { + case zk.EventNodeChildrenChanged: + case zk.EventSession: + default: + close(leaderCh) + return + } + } + } +} + +func (i *ZooKeeperHALock) unlockInternal() error { + i.localLock.Lock() + defer i.localLock.Unlock() + if !i.held { + return nil + } + + err := i.zkLock.Unlock() + + if err == nil { + i.held = false + return nil + } + + return err +} + +func (i *ZooKeeperHALock) Unlock() error { + var err error + + if err = i.unlockInternal(); err != nil { + i.logger.Error("failed to release distributed lock", "error", err) + + go func(i *ZooKeeperHALock) { + attempts := 0 + i.logger.Info("launching automated distributed lock release") + + for { + if err := i.unlockInternal(); err == nil { + i.logger.Info("distributed lock released") + return + } + + timer := time.NewTimer(time.Second) + select { + case <-timer.C: + attempts := attempts + 1 + if attempts >= 10 { + i.logger.Error("release lock max attempts reached. Lock may not be released", "error", err) + return + } + continue + case <-i.stopCh: + timer.Stop() + return + } + } + }(i) + } + + return err +} + +func (i *ZooKeeperHALock) Value() (bool, string, error) { + lockpath := i.in.nodePath(i.key) + value, _, err := i.in.client.Get(lockpath) + return (value != nil), string(value), err +} diff --git a/physical/zookeeper/zookeeper_test.go b/physical/zookeeper/zookeeper_test.go new file mode 100644 index 0000000..e4448bf --- /dev/null +++ b/physical/zookeeper/zookeeper_test.go @@ -0,0 +1,104 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package zookeeper + +import ( + "fmt" + "os" + "testing" + "time" + + "github.com/go-zookeeper/zk" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/physical" +) + +func TestZooKeeperBackend(t *testing.T) { + addr := os.Getenv("ZOOKEEPER_ADDR") + if addr == "" { + t.SkipNow() + } + + client, _, err := zk.Connect([]string{addr}, time.Second) + if err != nil { + t.Fatalf("err: %v", err) + } + + randPath := fmt.Sprintf("/vault-%d", time.Now().Unix()) + acl := zk.WorldACL(zk.PermAll) + _, err = client.Create(randPath, []byte("hi"), int32(0), acl) + + if err != nil { + t.Fatalf("err: %v", err) + } + + defer func() { + client.Delete(randPath+"/foo/nested1/nested2/nested3", -1) + client.Delete(randPath+"/foo/nested1/nested2", -1) + client.Delete(randPath+"/foo/nested1", -1) + client.Delete(randPath+"/foo/bar/baz", -1) + client.Delete(randPath+"/foo/bar", -1) + client.Delete(randPath+"/foo", -1) + client.Delete(randPath, -1) + client.Close() + }() + + logger := logging.NewVaultLogger(log.Debug) + + b, err := NewZooKeeperBackend(map[string]string{ + "address": addr + "," + addr, + "path": randPath, + }, logger) + if err != nil { + t.Fatalf("err: %s", err) + } + + physical.ExerciseBackend(t, b) + physical.ExerciseBackend_ListPrefix(t, b) +} + +func TestZooKeeperHABackend(t *testing.T) { + addr := os.Getenv("ZOOKEEPER_ADDR") + if addr == "" { + t.SkipNow() + } + + client, _, err := zk.Connect([]string{addr}, time.Second) + if err != nil { + t.Fatalf("err: %v", err) + } + + randPath := fmt.Sprintf("/vault-ha-%d", time.Now().Unix()) + acl := zk.WorldACL(zk.PermAll) + _, err = client.Create(randPath, []byte("hi"), int32(0), acl) + + if err != nil { + t.Fatalf("err: %v", err) + } + + defer func() { + client.Delete(randPath+"/foo", -1) + client.Delete(randPath, -1) + client.Close() + }() + + logger := logging.NewVaultLogger(log.Debug) + config := map[string]string{ + "address": addr + "," + addr, + "path": randPath, + } + + b, err := NewZooKeeperBackend(config, logger) + if err != nil { + t.Fatalf("err: %s", err) + } + + b2, err := NewZooKeeperBackend(config, logger) + if err != nil { + t.Fatalf("err: %s", err) + } + + physical.ExerciseHABackend(t, b.(physical.HABackend), b2.(physical.HABackend)) +} diff --git a/plugins/database/cassandra/cassandra-database-plugin/main.go b/plugins/database/cassandra/cassandra-database-plugin/main.go new file mode 100644 index 0000000..8a91d1b --- /dev/null +++ b/plugins/database/cassandra/cassandra-database-plugin/main.go @@ -0,0 +1,27 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "log" + "os" + + "github.com/hashicorp/vault/plugins/database/cassandra" + "github.com/hashicorp/vault/sdk/database/dbplugin/v5" +) + +func main() { + err := Run() + if err != nil { + log.Println(err) + os.Exit(1) + } +} + +// Run instantiates a Cassandra object, and runs the RPC server for the plugin +func Run() error { + dbplugin.ServeMultiplex(cassandra.New) + + return nil +} diff --git a/plugins/database/cassandra/cassandra.go b/plugins/database/cassandra/cassandra.go new file mode 100644 index 0000000..8118fa0 --- /dev/null +++ b/plugins/database/cassandra/cassandra.go @@ -0,0 +1,263 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cassandra + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/vault/sdk/helper/template" + + "github.com/gocql/gocql" + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-secure-stdlib/strutil" + dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + "github.com/hashicorp/vault/sdk/database/helper/dbutil" +) + +const ( + defaultUserCreationCQL = `CREATE USER '{{username}}' WITH PASSWORD '{{password}}' NOSUPERUSER;` + defaultUserDeletionCQL = `DROP USER '{{username}}';` + defaultChangePasswordCQL = `ALTER USER '{{username}}' WITH PASSWORD '{{password}}';` + cassandraTypeName = "cassandra" + + defaultUserNameTemplate = `{{ printf "v_%s_%s_%s_%s" (.DisplayName | truncate 15) (.RoleName | truncate 15) (random 20) (unix_time) | truncate 100 | replace "-" "_" | lowercase }}` +) + +var _ dbplugin.Database = &Cassandra{} + +// Cassandra is an implementation of Database interface +type Cassandra struct { + *cassandraConnectionProducer + + usernameProducer template.StringTemplate +} + +// New returns a new Cassandra instance +func New() (interface{}, error) { + db := new() + dbType := dbplugin.NewDatabaseErrorSanitizerMiddleware(db, db.secretValues) + + return dbType, nil +} + +func new() *Cassandra { + connProducer := &cassandraConnectionProducer{} + connProducer.Type = cassandraTypeName + + return &Cassandra{ + cassandraConnectionProducer: connProducer, + } +} + +// Type returns the TypeName for this backend +func (c *Cassandra) Type() (string, error) { + return cassandraTypeName, nil +} + +func (c *Cassandra) getConnection(ctx context.Context) (*gocql.Session, error) { + session, err := c.Connection(ctx) + if err != nil { + return nil, err + } + + return session.(*gocql.Session), nil +} + +func (c *Cassandra) Initialize(ctx context.Context, req dbplugin.InitializeRequest) (dbplugin.InitializeResponse, error) { + usernameTemplate, err := strutil.GetString(req.Config, "username_template") + if err != nil { + return dbplugin.InitializeResponse{}, fmt.Errorf("failed to retrieve username_template: %w", err) + } + if usernameTemplate == "" { + usernameTemplate = defaultUserNameTemplate + } + + up, err := template.NewTemplate(template.Template(usernameTemplate)) + if err != nil { + return dbplugin.InitializeResponse{}, fmt.Errorf("unable to initialize username template: %w", err) + } + c.usernameProducer = up + + _, err = c.usernameProducer.Generate(dbplugin.UsernameMetadata{}) + if err != nil { + return dbplugin.InitializeResponse{}, fmt.Errorf("invalid username template: %w", err) + } + + err = c.cassandraConnectionProducer.Initialize(ctx, req) + if err != nil { + return dbplugin.InitializeResponse{}, fmt.Errorf("failed to initialize: %w", err) + } + + resp := dbplugin.InitializeResponse{ + Config: req.Config, + } + return resp, nil +} + +// NewUser generates the username/password on the underlying Cassandra secret backend as instructed by +// the statements provided. +func (c *Cassandra) NewUser(ctx context.Context, req dbplugin.NewUserRequest) (dbplugin.NewUserResponse, error) { + c.Lock() + defer c.Unlock() + + session, err := c.getConnection(ctx) + if err != nil { + return dbplugin.NewUserResponse{}, err + } + + creationCQL := req.Statements.Commands + if len(creationCQL) == 0 { + creationCQL = []string{defaultUserCreationCQL} + } + + rollbackCQL := req.RollbackStatements.Commands + if len(rollbackCQL) == 0 { + rollbackCQL = []string{defaultUserDeletionCQL} + } + + username, err := c.usernameProducer.Generate(req.UsernameConfig) + if err != nil { + return dbplugin.NewUserResponse{}, err + } + + for _, stmt := range creationCQL { + for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + m := map[string]string{ + "username": username, + "password": req.Password, + } + err = session. + Query(dbutil.QueryHelper(query, m)). + WithContext(ctx). + Exec() + if err != nil { + rollbackErr := rollbackUser(ctx, session, username, rollbackCQL) + if rollbackErr != nil { + err = multierror.Append(err, rollbackErr) + } + return dbplugin.NewUserResponse{}, err + } + } + } + + resp := dbplugin.NewUserResponse{ + Username: username, + } + return resp, nil +} + +func rollbackUser(ctx context.Context, session *gocql.Session, username string, rollbackCQL []string) error { + for _, stmt := range rollbackCQL { + for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + m := map[string]string{ + "username": username, + } + err := session. + Query(dbutil.QueryHelper(query, m)). + WithContext(ctx). + Exec() + if err != nil { + return fmt.Errorf("failed to roll back user %s: %w", username, err) + } + } + } + return nil +} + +func (c *Cassandra) UpdateUser(ctx context.Context, req dbplugin.UpdateUserRequest) (dbplugin.UpdateUserResponse, error) { + if req.Password == nil && req.Expiration == nil { + return dbplugin.UpdateUserResponse{}, fmt.Errorf("no changes requested") + } + + if req.Password != nil { + err := c.changeUserPassword(ctx, req.Username, req.Password) + return dbplugin.UpdateUserResponse{}, err + } + // Expiration is no-op + return dbplugin.UpdateUserResponse{}, nil +} + +func (c *Cassandra) changeUserPassword(ctx context.Context, username string, changePass *dbplugin.ChangePassword) error { + session, err := c.getConnection(ctx) + if err != nil { + return err + } + + rotateCQL := changePass.Statements.Commands + if len(rotateCQL) == 0 { + rotateCQL = []string{defaultChangePasswordCQL} + } + + var result *multierror.Error + for _, stmt := range rotateCQL { + for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + m := map[string]string{ + "username": username, + "password": changePass.NewPassword, + } + err := session. + Query(dbutil.QueryHelper(query, m)). + WithContext(ctx). + Exec() + result = multierror.Append(result, err) + } + } + + return result.ErrorOrNil() +} + +// DeleteUser attempts to drop the specified user. +func (c *Cassandra) DeleteUser(ctx context.Context, req dbplugin.DeleteUserRequest) (dbplugin.DeleteUserResponse, error) { + c.Lock() + defer c.Unlock() + + session, err := c.getConnection(ctx) + if err != nil { + return dbplugin.DeleteUserResponse{}, err + } + + revocationCQL := req.Statements.Commands + if len(revocationCQL) == 0 { + revocationCQL = []string{defaultUserDeletionCQL} + } + + var result *multierror.Error + for _, stmt := range revocationCQL { + for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + m := map[string]string{ + "username": req.Username, + } + err := session. + Query(dbutil.QueryHelper(query, m)). + WithContext(ctx). + Exec() + + result = multierror.Append(result, err) + } + } + + return dbplugin.DeleteUserResponse{}, result.ErrorOrNil() +} diff --git a/plugins/database/cassandra/cassandra_test.go b/plugins/database/cassandra/cassandra_test.go new file mode 100644 index 0000000..7a32609 --- /dev/null +++ b/plugins/database/cassandra/cassandra_test.go @@ -0,0 +1,308 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cassandra + +import ( + "context" + "reflect" + "testing" + "time" + + "github.com/stretchr/testify/require" + + backoff "github.com/cenkalti/backoff/v3" + "github.com/gocql/gocql" + "github.com/hashicorp/vault/helper/testhelpers/cassandra" + dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + dbtesting "github.com/hashicorp/vault/sdk/database/dbplugin/v5/testing" +) + +func getCassandra(t *testing.T, protocolVersion interface{}) (*Cassandra, func()) { + host, cleanup := cassandra.PrepareTestContainer(t, + cassandra.Version("3.11"), + cassandra.CopyFromTo(insecureFileMounts), + ) + + db := new() + initReq := dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "hosts": host.ConnectionURL(), + "port": host.Port, + "username": "cassandra", + "password": "cassandra", + "protocol_version": protocolVersion, + "connect_timeout": "20s", + }, + VerifyConnection: true, + } + + expectedConfig := map[string]interface{}{ + "hosts": host.ConnectionURL(), + "port": host.Port, + "username": "cassandra", + "password": "cassandra", + "protocol_version": protocolVersion, + "connect_timeout": "20s", + } + + initResp := dbtesting.AssertInitialize(t, db, initReq) + if !reflect.DeepEqual(initResp.Config, expectedConfig) { + t.Fatalf("Initialize response config actual: %#v\nExpected: %#v", initResp.Config, expectedConfig) + } + + if !db.Initialized { + t.Fatal("Database should be initialized") + } + return db, cleanup +} + +func TestInitialize(t *testing.T) { + t.Run("integer protocol version", func(t *testing.T) { + // getCassandra performs an Initialize call + db, cleanup := getCassandra(t, 4) + t.Cleanup(cleanup) + + err := db.Close() + if err != nil { + t.Fatalf("err: %s", err) + } + }) + + t.Run("string protocol version", func(t *testing.T) { + // getCassandra performs an Initialize call + db, cleanup := getCassandra(t, "4") + t.Cleanup(cleanup) + + err := db.Close() + if err != nil { + t.Fatalf("err: %s", err) + } + }) +} + +func TestCreateUser(t *testing.T) { + type testCase struct { + // Config will have the hosts & port added to it during the test + config map[string]interface{} + newUserReq dbplugin.NewUserRequest + expectErr bool + expectedUsernameRegex string + assertCreds func(t testing.TB, address string, port int, username, password string, sslOpts *gocql.SslOptions, timeout time.Duration) + } + + tests := map[string]testCase{ + "default username_template": { + config: map[string]interface{}{ + "username": "cassandra", + "password": "cassandra", + "protocol_version": "4", + "connect_timeout": "20s", + }, + newUserReq: dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: "token", + RoleName: "mylongrolenamewithmanycharacters", + }, + Statements: dbplugin.Statements{ + Commands: []string{createUserStatements}, + }, + Password: "bfn985wjAHIh6t", + Expiration: time.Now().Add(1 * time.Minute), + }, + expectErr: false, + expectedUsernameRegex: `^v_token_mylongrolenamew_[a-z0-9]{20}_[0-9]{10}$`, + assertCreds: assertCreds, + }, + "custom username_template": { + config: map[string]interface{}{ + "username": "cassandra", + "password": "cassandra", + "protocol_version": "4", + "connect_timeout": "20s", + "username_template": `foo_{{random 20}}_{{.RoleName | replace "e" "3"}}_{{unix_time}}`, + }, + newUserReq: dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: "token", + RoleName: "mylongrolenamewithmanycharacters", + }, + Statements: dbplugin.Statements{ + Commands: []string{createUserStatements}, + }, + Password: "bfn985wjAHIh6t", + Expiration: time.Now().Add(1 * time.Minute), + }, + expectErr: false, + expectedUsernameRegex: `^foo_[a-zA-Z0-9]{20}_mylongrol3nam3withmanycharact3rs_[0-9]{10}$`, + assertCreds: assertCreds, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + host, cleanup := cassandra.PrepareTestContainer(t, + cassandra.Version("3.11"), + cassandra.CopyFromTo(insecureFileMounts), + ) + defer cleanup() + + db := new() + + config := test.config + config["hosts"] = host.ConnectionURL() + config["port"] = host.Port + + initReq := dbplugin.InitializeRequest{ + Config: config, + VerifyConnection: true, + } + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + dbtesting.AssertInitialize(t, db, initReq) + + require.True(t, db.Initialized, "Database is not initialized") + + ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + newUserResp, err := db.NewUser(ctx, test.newUserReq) + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + require.Regexp(t, test.expectedUsernameRegex, newUserResp.Username) + test.assertCreds(t, db.Hosts, db.Port, newUserResp.Username, test.newUserReq.Password, nil, 5*time.Second) + }) + } +} + +func TestUpdateUserPassword(t *testing.T) { + db, cleanup := getCassandra(t, 4) + defer cleanup() + + password := "myreallysecurepassword" + createReq := dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: "test", + RoleName: "test", + }, + Statements: dbplugin.Statements{ + Commands: []string{createUserStatements}, + }, + Password: password, + Expiration: time.Now().Add(1 * time.Minute), + } + + createResp := dbtesting.AssertNewUser(t, db, createReq) + + assertCreds(t, db.Hosts, db.Port, createResp.Username, password, nil, 5*time.Second) + + newPassword := "somenewpassword" + updateReq := dbplugin.UpdateUserRequest{ + Username: createResp.Username, + Password: &dbplugin.ChangePassword{ + NewPassword: newPassword, + Statements: dbplugin.Statements{}, + }, + Expiration: nil, + } + + dbtesting.AssertUpdateUser(t, db, updateReq) + + assertCreds(t, db.Hosts, db.Port, createResp.Username, newPassword, nil, 5*time.Second) +} + +func TestDeleteUser(t *testing.T) { + db, cleanup := getCassandra(t, 4) + defer cleanup() + + password := "myreallysecurepassword" + createReq := dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: "test", + RoleName: "test", + }, + Statements: dbplugin.Statements{ + Commands: []string{createUserStatements}, + }, + Password: password, + Expiration: time.Now().Add(1 * time.Minute), + } + + createResp := dbtesting.AssertNewUser(t, db, createReq) + + assertCreds(t, db.Hosts, db.Port, createResp.Username, password, nil, 5*time.Second) + + deleteReq := dbplugin.DeleteUserRequest{ + Username: createResp.Username, + } + + dbtesting.AssertDeleteUser(t, db, deleteReq) + + assertNoCreds(t, db.Hosts, db.Port, createResp.Username, password, nil, 5*time.Second) +} + +func assertCreds(t testing.TB, address string, port int, username, password string, sslOpts *gocql.SslOptions, timeout time.Duration) { + t.Helper() + op := func() error { + return connect(t, address, port, username, password, sslOpts) + } + bo := backoff.NewExponentialBackOff() + bo.MaxElapsedTime = timeout + bo.InitialInterval = 500 * time.Millisecond + bo.MaxInterval = bo.InitialInterval + bo.RandomizationFactor = 0.0 + + err := backoff.Retry(op, bo) + if err != nil { + t.Fatalf("failed to connect after %s: %s", timeout, err) + } +} + +func connect(t testing.TB, address string, port int, username, password string, sslOpts *gocql.SslOptions) error { + t.Helper() + clusterConfig := gocql.NewCluster(address) + clusterConfig.Authenticator = gocql.PasswordAuthenticator{ + Username: username, + Password: password, + } + clusterConfig.ProtoVersion = 4 + clusterConfig.Port = port + clusterConfig.SslOpts = sslOpts + + session, err := clusterConfig.CreateSession() + if err != nil { + return err + } + defer session.Close() + return nil +} + +func assertNoCreds(t testing.TB, address string, port int, username, password string, sslOpts *gocql.SslOptions, timeout time.Duration) { + t.Helper() + + op := func() error { + // "Invert" the error so the backoff logic sees a failure to connect as a success + err := connect(t, address, port, username, password, sslOpts) + if err != nil { + return nil + } + return nil + } + bo := backoff.NewExponentialBackOff() + bo.MaxElapsedTime = timeout + bo.InitialInterval = 500 * time.Millisecond + bo.MaxInterval = bo.InitialInterval + bo.RandomizationFactor = 0.0 + + err := backoff.Retry(op, bo) + if err != nil { + t.Fatalf("successfully connected after %s when it shouldn't", timeout) + } +} + +const createUserStatements = `CREATE USER '{{username}}' WITH PASSWORD '{{password}}' NOSUPERUSER; +GRANT ALL PERMISSIONS ON ALL KEYSPACES TO '{{username}}';` diff --git a/plugins/database/cassandra/connection_producer.go b/plugins/database/cassandra/connection_producer.go new file mode 100644 index 0000000..a63ed27 --- /dev/null +++ b/plugins/database/cassandra/connection_producer.go @@ -0,0 +1,244 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cassandra + +import ( + "context" + "crypto/tls" + "fmt" + "strings" + "sync" + "time" + + "github.com/gocql/gocql" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/go-secure-stdlib/tlsutil" + dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + "github.com/hashicorp/vault/sdk/database/helper/connutil" + "github.com/hashicorp/vault/sdk/database/helper/dbutil" + "github.com/mitchellh/mapstructure" +) + +// cassandraConnectionProducer implements ConnectionProducer and provides an +// interface for cassandra databases to make connections. +type cassandraConnectionProducer struct { + Hosts string `json:"hosts" structs:"hosts" mapstructure:"hosts"` + Port int `json:"port" structs:"port" mapstructure:"port"` + Username string `json:"username" structs:"username" mapstructure:"username"` + Password string `json:"password" structs:"password" mapstructure:"password"` + TLS bool `json:"tls" structs:"tls" mapstructure:"tls"` + InsecureTLS bool `json:"insecure_tls" structs:"insecure_tls" mapstructure:"insecure_tls"` + TLSServerName string `json:"tls_server_name" structs:"tls_server_name" mapstructure:"tls_server_name"` + ProtocolVersion int `json:"protocol_version" structs:"protocol_version" mapstructure:"protocol_version"` + ConnectTimeoutRaw interface{} `json:"connect_timeout" structs:"connect_timeout" mapstructure:"connect_timeout"` + SocketKeepAliveRaw interface{} `json:"socket_keep_alive" structs:"socket_keep_alive" mapstructure:"socket_keep_alive"` + TLSMinVersion string `json:"tls_min_version" structs:"tls_min_version" mapstructure:"tls_min_version"` + Consistency string `json:"consistency" structs:"consistency" mapstructure:"consistency"` + LocalDatacenter string `json:"local_datacenter" structs:"local_datacenter" mapstructure:"local_datacenter"` + PemBundle string `json:"pem_bundle" structs:"pem_bundle" mapstructure:"pem_bundle"` + PemJSON string `json:"pem_json" structs:"pem_json" mapstructure:"pem_json"` + SkipVerification bool `json:"skip_verification" structs:"skip_verification" mapstructure:"skip_verification"` + + connectTimeout time.Duration + socketKeepAlive time.Duration + sslOpts *gocql.SslOptions + rawConfig map[string]interface{} + + Initialized bool + Type string + session *gocql.Session + sync.Mutex +} + +func (c *cassandraConnectionProducer) Initialize(ctx context.Context, req dbplugin.InitializeRequest) error { + c.Lock() + defer c.Unlock() + + c.rawConfig = req.Config + + err := mapstructure.WeakDecode(req.Config, c) + if err != nil { + return err + } + + if c.ConnectTimeoutRaw == nil { + c.ConnectTimeoutRaw = "5s" + } + c.connectTimeout, err = parseutil.ParseDurationSecond(c.ConnectTimeoutRaw) + if err != nil { + return fmt.Errorf("invalid connect_timeout: %w", err) + } + + if c.SocketKeepAliveRaw == nil { + c.SocketKeepAliveRaw = "0s" + } + c.socketKeepAlive, err = parseutil.ParseDurationSecond(c.SocketKeepAliveRaw) + if err != nil { + return fmt.Errorf("invalid socket_keep_alive: %w", err) + } + + switch { + case len(c.Hosts) == 0: + return fmt.Errorf("hosts cannot be empty") + case len(c.Username) == 0: + return fmt.Errorf("username cannot be empty") + case len(c.Password) == 0: + return fmt.Errorf("password cannot be empty") + case len(c.PemJSON) > 0 && len(c.PemBundle) > 0: + return fmt.Errorf("cannot specify both pem_json and pem_bundle") + } + + var tlsMinVersion uint16 = tls.VersionTLS12 + if c.TLSMinVersion != "" { + ver, exists := tlsutil.TLSLookup[c.TLSMinVersion] + if !exists { + return fmt.Errorf("unrecognized TLS version [%s]", c.TLSMinVersion) + } + tlsMinVersion = ver + } + + switch { + case len(c.PemJSON) != 0: + cfg, err := jsonBundleToTLSConfig(c.PemJSON, tlsMinVersion, c.TLSServerName, c.InsecureTLS) + if err != nil { + return fmt.Errorf("failed to parse pem_json: %w", err) + } + c.sslOpts = &gocql.SslOptions{ + Config: cfg, + EnableHostVerification: !cfg.InsecureSkipVerify, + } + c.TLS = true + + case len(c.PemBundle) != 0: + cfg, err := pemBundleToTLSConfig(c.PemBundle, tlsMinVersion, c.TLSServerName, c.InsecureTLS) + if err != nil { + return fmt.Errorf("failed to parse pem_bundle: %w", err) + } + c.sslOpts = &gocql.SslOptions{ + Config: cfg, + EnableHostVerification: !cfg.InsecureSkipVerify, + } + c.TLS = true + + case c.InsecureTLS: + c.sslOpts = &gocql.SslOptions{ + EnableHostVerification: !c.InsecureTLS, + } + } + + // Set initialized to true at this point since all fields are set, + // and the connection can be established at a later time. + c.Initialized = true + + if req.VerifyConnection { + if _, err := c.Connection(ctx); err != nil { + return fmt.Errorf("error verifying connection: %w", err) + } + } + + return nil +} + +func (c *cassandraConnectionProducer) Connection(ctx context.Context) (interface{}, error) { + if !c.Initialized { + return nil, connutil.ErrNotInitialized + } + + // If we already have a DB, return it + if c.session != nil && !c.session.Closed() { + return c.session, nil + } + + session, err := c.createSession(ctx) + if err != nil { + return nil, err + } + + // Store the session in backend for reuse + c.session = session + + return session, nil +} + +func (c *cassandraConnectionProducer) Close() error { + c.Lock() + defer c.Unlock() + + if c.session != nil { + c.session.Close() + } + + c.session = nil + + return nil +} + +func (c *cassandraConnectionProducer) createSession(ctx context.Context) (*gocql.Session, error) { + hosts := strings.Split(c.Hosts, ",") + clusterConfig := gocql.NewCluster(hosts...) + clusterConfig.Authenticator = gocql.PasswordAuthenticator{ + Username: c.Username, + Password: c.Password, + } + + if c.Port != 0 { + clusterConfig.Port = c.Port + } + + clusterConfig.ProtoVersion = c.ProtocolVersion + if clusterConfig.ProtoVersion == 0 { + clusterConfig.ProtoVersion = 2 + } + + clusterConfig.Timeout = c.connectTimeout + clusterConfig.ConnectTimeout = c.connectTimeout + clusterConfig.SocketKeepalive = c.socketKeepAlive + clusterConfig.SslOpts = c.sslOpts + + if c.LocalDatacenter != "" { + clusterConfig.PoolConfig.HostSelectionPolicy = gocql.DCAwareRoundRobinPolicy(c.LocalDatacenter) + } + + session, err := clusterConfig.CreateSession() + if err != nil { + return nil, fmt.Errorf("error creating session: %w", err) + } + + if c.Consistency != "" { + consistencyValue, err := gocql.ParseConsistencyWrapper(c.Consistency) + if err != nil { + session.Close() + return nil, err + } + + session.SetConsistency(consistencyValue) + } + + if !c.SkipVerification { + err = session.Query(`LIST ALL`).WithContext(ctx).Exec() + if err != nil && len(c.Username) != 0 && strings.Contains(err.Error(), "not authorized") { + rowNum := session.Query(dbutil.QueryHelper(`LIST CREATE ON ALL ROLES OF '{{username}}';`, map[string]string{ + "username": c.Username, + })).Iter().NumRows() + + if rowNum < 1 { + session.Close() + return nil, fmt.Errorf("error validating connection info: No role create permissions found, previous error: %w", err) + } + } else if err != nil { + session.Close() + return nil, fmt.Errorf("error validating connection info: %w", err) + } + } + + return session, nil +} + +func (c *cassandraConnectionProducer) secretValues() map[string]string { + return map[string]string{ + c.Password: "[password]", + c.PemBundle: "[pem_bundle]", + c.PemJSON: "[pem_json]", + } +} diff --git a/plugins/database/cassandra/connection_producer_test.go b/plugins/database/cassandra/connection_producer_test.go new file mode 100644 index 0000000..e2f4ba0 --- /dev/null +++ b/plugins/database/cassandra/connection_producer_test.go @@ -0,0 +1,233 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cassandra + +import ( + "context" + "crypto/tls" + "crypto/x509" + "encoding/json" + "io/ioutil" + "testing" + "time" + + "github.com/gocql/gocql" + "github.com/hashicorp/vault/helper/testhelpers/cassandra" + "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + dbtesting "github.com/hashicorp/vault/sdk/database/dbplugin/v5/testing" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/stretchr/testify/require" +) + +var insecureFileMounts = map[string]string{ + "test-fixtures/no_tls/cassandra.yaml": "/etc/cassandra/cassandra.yaml", +} + +func TestSelfSignedCA(t *testing.T) { + copyFromTo := map[string]string{ + "test-fixtures/with_tls/stores": "/bitnami/cassandra/secrets/", + "test-fixtures/with_tls/cqlshrc": "/.cassandra/cqlshrc", + } + + tlsConfig := loadServerCA(t, "test-fixtures/with_tls/ca.pem") + // Note about CI behavior: when running these tests locally, they seem to pass without issue. However, if the + // ServerName is not set, the tests fail within CI. It's not entirely clear to me why they are failing in CI + // however by manually setting the ServerName we can get around the hostname/DNS issue and get them passing. + // Setting the ServerName isn't the ideal solution, but it was the only reliable one I was able to find + tlsConfig.ServerName = "cassandra" + sslOpts := &gocql.SslOptions{ + Config: tlsConfig, + EnableHostVerification: true, + } + + host, cleanup := cassandra.PrepareTestContainer(t, + cassandra.ContainerName("cassandra"), + cassandra.Image("bitnami/cassandra", "3.11.11"), + cassandra.CopyFromTo(copyFromTo), + cassandra.SslOpts(sslOpts), + cassandra.Env("CASSANDRA_KEYSTORE_PASSWORD=cassandra"), + cassandra.Env("CASSANDRA_TRUSTSTORE_PASSWORD=cassandra"), + cassandra.Env("CASSANDRA_INTERNODE_ENCRYPTION=none"), + cassandra.Env("CASSANDRA_CLIENT_ENCRYPTION=true"), + ) + t.Cleanup(cleanup) + + type testCase struct { + config map[string]interface{} + expectErr bool + } + + caPEM := loadFile(t, "test-fixtures/with_tls/ca.pem") + badCAPEM := loadFile(t, "test-fixtures/with_tls/bad_ca.pem") + + tests := map[string]testCase{ + // /////////////////////// + // pem_json tests + "pem_json/ca only": { + config: map[string]interface{}{ + "pem_json": toJSON(t, certutil.CertBundle{ + CAChain: []string{caPEM}, + }), + }, + expectErr: false, + }, + "pem_json/bad ca": { + config: map[string]interface{}{ + "pem_json": toJSON(t, certutil.CertBundle{ + CAChain: []string{badCAPEM}, + }), + }, + expectErr: true, + }, + "pem_json/missing ca": { + config: map[string]interface{}{ + "pem_json": "", + }, + expectErr: true, + }, + + // /////////////////////// + // pem_bundle tests + "pem_bundle/ca only": { + config: map[string]interface{}{ + "pem_bundle": caPEM, + }, + expectErr: false, + }, + "pem_bundle/unrecognized CA": { + config: map[string]interface{}{ + "pem_bundle": badCAPEM, + }, + expectErr: true, + }, + "pem_bundle/missing ca": { + config: map[string]interface{}{ + "pem_bundle": "", + }, + expectErr: true, + }, + + // /////////////////////// + // no cert data provided + "no cert data/tls=true": { + config: map[string]interface{}{ + "tls": "true", + }, + expectErr: true, + }, + "no cert data/tls=false": { + config: map[string]interface{}{ + "tls": "false", + }, + expectErr: true, + }, + "no cert data/insecure_tls": { + config: map[string]interface{}{ + "insecure_tls": "true", + }, + expectErr: false, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + // Set values that we don't know until the cassandra container is started + config := map[string]interface{}{ + "hosts": host.Name, + "port": host.Port, + "username": "cassandra", + "password": "cassandra", + "protocol_version": "4", + "connect_timeout": "30s", + "tls": "true", + + // Note about CI behavior: when running these tests locally, they seem to pass without issue. However, if the + // tls_server_name is not set, the tests fail within CI. It's not entirely clear to me why they are failing in CI + // however by manually setting the tls_server_name we can get around the hostname/DNS issue and get them passing. + // Setting the tls_server_name isn't the ideal solution, but it was the only reliable one I was able to find + "tls_server_name": "cassandra", + } + + // Apply the generated & common fields to the config to be sent to the DB + for k, v := range test.config { + config[k] = v + } + + db := new() + initReq := dbplugin.InitializeRequest{ + Config: config, + VerifyConnection: true, + } + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + _, err := db.Initialize(ctx, initReq) + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + + // If no error expected, run a NewUser query to make sure the connection + // actually works in case Initialize doesn't catch it + if !test.expectErr { + assertNewUser(t, db, sslOpts) + } + }) + } +} + +func assertNewUser(t *testing.T, db *Cassandra, sslOpts *gocql.SslOptions) { + newUserReq := dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: "dispname", + RoleName: "rolename", + }, + Statements: dbplugin.Statements{ + Commands: []string{ + "create user '{{username}}' with password '{{password}}'", + }, + }, + RollbackStatements: dbplugin.Statements{}, + Password: "gh8eruajASDFAsgy89svn", + Expiration: time.Now().Add(5 * time.Second), + } + + newUserResp := dbtesting.AssertNewUser(t, db, newUserReq) + t.Logf("Username: %s", newUserResp.Username) + + assertCreds(t, db.Hosts, db.Port, newUserResp.Username, newUserReq.Password, sslOpts, 5*time.Second) +} + +func loadServerCA(t *testing.T, file string) *tls.Config { + t.Helper() + + pemData, err := ioutil.ReadFile(file) + require.NoError(t, err) + + pool := x509.NewCertPool() + pool.AppendCertsFromPEM(pemData) + + config := &tls.Config{ + RootCAs: pool, + } + return config +} + +func loadFile(t *testing.T, filename string) string { + t.Helper() + + contents, err := ioutil.ReadFile(filename) + require.NoError(t, err) + return string(contents) +} + +func toJSON(t *testing.T, val interface{}) string { + t.Helper() + b, err := json.Marshal(val) + require.NoError(t, err) + return string(b) +} diff --git a/plugins/database/cassandra/test-fixtures/no_tls/cassandra.yaml b/plugins/database/cassandra/test-fixtures/no_tls/cassandra.yaml new file mode 100644 index 0000000..4819969 --- /dev/null +++ b/plugins/database/cassandra/test-fixtures/no_tls/cassandra.yaml @@ -0,0 +1,1149 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +# Cassandra storage config YAML + +# NOTE: +# See http://wiki.apache.org/cassandra/StorageConfiguration for +# full explanations of configuration directives +# /NOTE + +# The name of the cluster. This is mainly used to prevent machines in +# one logical cluster from joining another. +cluster_name: 'Test Cluster' + +# This defines the number of tokens randomly assigned to this node on the ring +# The more tokens, relative to other nodes, the larger the proportion of data +# that this node will store. You probably want all nodes to have the same number +# of tokens assuming they have equal hardware capability. +# +# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, +# and will use the initial_token as described below. +# +# Specifying initial_token will override this setting on the node's initial start, +# on subsequent starts, this setting will apply even if initial token is set. +# +# If you already have a cluster with 1 token per node, and wish to migrate to +# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations +num_tokens: 256 + +# Triggers automatic allocation of num_tokens tokens for this node. The allocation +# algorithm attempts to choose tokens in a way that optimizes replicated load over +# the nodes in the datacenter for the replication strategy used by the specified +# keyspace. +# +# The load assigned to each node will be close to proportional to its number of +# vnodes. +# +# Only supported with the Murmur3Partitioner. +# allocate_tokens_for_keyspace: KEYSPACE + +# initial_token allows you to specify tokens manually. While you can use it with +# vnodes (num_tokens > 1, above) -- in which case you should provide a +# comma-separated list -- it's primarily used when adding nodes to legacy clusters +# that do not have vnodes enabled. +# initial_token: + +# See http://wiki.apache.org/cassandra/HintedHandoff +# May either be "true" or "false" to enable globally +hinted_handoff_enabled: true + +# When hinted_handoff_enabled is true, a black list of data centers that will not +# perform hinted handoff +# hinted_handoff_disabled_datacenters: +# - DC1 +# - DC2 + +# this defines the maximum amount of time a dead host will have hints +# generated. After it has been dead this long, new hints for it will not be +# created until it has been seen alive and gone down again. +max_hint_window_in_ms: 10800000 # 3 hours + +# Maximum throttle in KBs per second, per delivery thread. This will be +# reduced proportionally to the number of nodes in the cluster. (If there +# are two nodes in the cluster, each delivery thread will use the maximum +# rate; if there are three, each will throttle to half of the maximum, +# since we expect two nodes to be delivering hints simultaneously.) +hinted_handoff_throttle_in_kb: 1024 + +# Number of threads with which to deliver hints; +# Consider increasing this number when you have multi-dc deployments, since +# cross-dc handoff tends to be slower +max_hints_delivery_threads: 2 + +# Directory where Cassandra should store hints. +# If not set, the default directory is $CASSANDRA_HOME/data/hints. +# hints_directory: /var/lib/cassandra/hints + +# How often hints should be flushed from the internal buffers to disk. +# Will *not* trigger fsync. +hints_flush_period_in_ms: 10000 + +# Maximum size for a single hints file, in megabytes. +max_hints_file_size_in_mb: 128 + +# Compression to apply to the hint files. If omitted, hints files +# will be written uncompressed. LZ4, Snappy, and Deflate compressors +# are supported. +#hints_compression: +# - class_name: LZ4Compressor +# parameters: +# - + +# Maximum throttle in KBs per second, total. This will be +# reduced proportionally to the number of nodes in the cluster. +batchlog_replay_throttle_in_kb: 1024 + +# Authentication backend, implementing IAuthenticator; used to identify users +# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator, +# PasswordAuthenticator}. +# +# - AllowAllAuthenticator performs no checks - set it to disable authentication. +# - PasswordAuthenticator relies on username/password pairs to authenticate +# users. It keeps usernames and hashed passwords in system_auth.credentials table. +# Please increase system_auth keyspace replication factor if you use this authenticator. +# If using PasswordAuthenticator, CassandraRoleManager must also be used (see below) +authenticator: PasswordAuthenticator + +# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions +# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer, +# CassandraAuthorizer}. +# +# - AllowAllAuthorizer allows any action to any user - set it to disable authorization. +# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please +# increase system_auth keyspace replication factor if you use this authorizer. +authorizer: CassandraAuthorizer + +# Part of the Authentication & Authorization backend, implementing IRoleManager; used +# to maintain grants and memberships between roles. +# Out of the box, Cassandra provides org.apache.cassandra.auth.CassandraRoleManager, +# which stores role information in the system_auth keyspace. Most functions of the +# IRoleManager require an authenticated login, so unless the configured IAuthenticator +# actually implements authentication, most of this functionality will be unavailable. +# +# - CassandraRoleManager stores role data in the system_auth keyspace. Please +# increase system_auth keyspace replication factor if you use this role manager. +role_manager: CassandraRoleManager + +# Validity period for roles cache (fetching granted roles can be an expensive +# operation depending on the role manager, CassandraRoleManager is one example) +# Granted roles are cached for authenticated sessions in AuthenticatedUser and +# after the period specified here, become eligible for (async) reload. +# Defaults to 2000, set to 0 to disable caching entirely. +# Will be disabled automatically for AllowAllAuthenticator. +roles_validity_in_ms: 2000 + +# Refresh interval for roles cache (if enabled). +# After this interval, cache entries become eligible for refresh. Upon next +# access, an async reload is scheduled and the old value returned until it +# completes. If roles_validity_in_ms is non-zero, then this must be +# also. +# Defaults to the same value as roles_validity_in_ms. +# roles_update_interval_in_ms: 2000 + +# Validity period for permissions cache (fetching permissions can be an +# expensive operation depending on the authorizer, CassandraAuthorizer is +# one example). Defaults to 2000, set to 0 to disable. +# Will be disabled automatically for AllowAllAuthorizer. +permissions_validity_in_ms: 2000 + +# Refresh interval for permissions cache (if enabled). +# After this interval, cache entries become eligible for refresh. Upon next +# access, an async reload is scheduled and the old value returned until it +# completes. If permissions_validity_in_ms is non-zero, then this must be +# also. +# Defaults to the same value as permissions_validity_in_ms. +# permissions_update_interval_in_ms: 2000 + +# Validity period for credentials cache. This cache is tightly coupled to +# the provided PasswordAuthenticator implementation of IAuthenticator. If +# another IAuthenticator implementation is configured, this cache will not +# be automatically used and so the following settings will have no effect. +# Please note, credentials are cached in their encrypted form, so while +# activating this cache may reduce the number of queries made to the +# underlying table, it may not bring a significant reduction in the +# latency of individual authentication attempts. +# Defaults to 2000, set to 0 to disable credentials caching. +credentials_validity_in_ms: 2000 + +# Refresh interval for credentials cache (if enabled). +# After this interval, cache entries become eligible for refresh. Upon next +# access, an async reload is scheduled and the old value returned until it +# completes. If credentials_validity_in_ms is non-zero, then this must be +# also. +# Defaults to the same value as credentials_validity_in_ms. +# credentials_update_interval_in_ms: 2000 + +# The partitioner is responsible for distributing groups of rows (by +# partition key) across nodes in the cluster. You should leave this +# alone for new clusters. The partitioner can NOT be changed without +# reloading all data, so when upgrading you should set this to the +# same partitioner you were already using. +# +# Besides Murmur3Partitioner, partitioners included for backwards +# compatibility include RandomPartitioner, ByteOrderedPartitioner, and +# OrderPreservingPartitioner. +# +partitioner: org.apache.cassandra.dht.Murmur3Partitioner + +# Directories where Cassandra should store data on disk. Cassandra +# will spread data evenly across them, subject to the granularity of +# the configured compaction strategy. +# If not set, the default directory is $CASSANDRA_HOME/data/data. +data_file_directories: + - /var/lib/cassandra/data + +# commit log. when running on magnetic HDD, this should be a +# separate spindle than the data directories. +# If not set, the default directory is $CASSANDRA_HOME/data/commitlog. +commitlog_directory: /var/lib/cassandra/commitlog + +# Enable / disable CDC functionality on a per-node basis. This modifies the logic used +# for write path allocation rejection (standard: never reject. cdc: reject Mutation +# containing a CDC-enabled table if at space limit in cdc_raw_directory). +cdc_enabled: false + +# CommitLogSegments are moved to this directory on flush if cdc_enabled: true and the +# segment contains mutations for a CDC-enabled table. This should be placed on a +# separate spindle than the data directories. If not set, the default directory is +# $CASSANDRA_HOME/data/cdc_raw. +# cdc_raw_directory: /var/lib/cassandra/cdc_raw + +# Policy for data disk failures: +# +# die +# shut down gossip and client transports and kill the JVM for any fs errors or +# single-sstable errors, so the node can be replaced. +# +# stop_paranoid +# shut down gossip and client transports even for single-sstable errors, +# kill the JVM for errors during startup. +# +# stop +# shut down gossip and client transports, leaving the node effectively dead, but +# can still be inspected via JMX, kill the JVM for errors during startup. +# +# best_effort +# stop using the failed disk and respond to requests based on +# remaining available sstables. This means you WILL see obsolete +# data at CL.ONE! +# +# ignore +# ignore fatal errors and let requests fail, as in pre-1.2 Cassandra +disk_failure_policy: stop + +# Policy for commit disk failures: +# +# die +# shut down gossip and Thrift and kill the JVM, so the node can be replaced. +# +# stop +# shut down gossip and Thrift, leaving the node effectively dead, but +# can still be inspected via JMX. +# +# stop_commit +# shutdown the commit log, letting writes collect but +# continuing to service reads, as in pre-2.0.5 Cassandra +# +# ignore +# ignore fatal errors and let the batches fail +commit_failure_policy: stop + +# Maximum size of the native protocol prepared statement cache +# +# Valid values are either "auto" (omitting the value) or a value greater 0. +# +# Note that specifying a too large value will result in long running GCs and possibly +# out-of-memory errors. Keep the value at a small fraction of the heap. +# +# If you constantly see "prepared statements discarded in the last minute because +# cache limit reached" messages, the first step is to investigate the root cause +# of these messages and check whether prepared statements are used correctly - +# i.e. use bind markers for variable parts. +# +# Do only change the default value, if you really have more prepared statements than +# fit in the cache. In most cases it is not necessary to change this value. +# Constantly re-preparing statements is a performance penalty. +# +# Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater +prepared_statements_cache_size_mb: + +# Maximum size of the Thrift prepared statement cache +# +# If you do not use Thrift at all, it is safe to leave this value at "auto". +# +# See description of 'prepared_statements_cache_size_mb' above for more information. +# +# Default value ("auto") is 1/256th of the heap or 10MB, whichever is greater +thrift_prepared_statements_cache_size_mb: + +# Maximum size of the key cache in memory. +# +# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the +# minimum, sometimes more. The key cache is fairly tiny for the amount of +# time it saves, so it's worthwhile to use it at large numbers. +# The row cache saves even more time, but must contain the entire row, +# so it is extremely space-intensive. It's best to only use the +# row cache if you have hot rows or static rows. +# +# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. +# +# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache. +key_cache_size_in_mb: + +# Duration in seconds after which Cassandra should +# save the key cache. Caches are saved to saved_caches_directory as +# specified in this configuration file. +# +# Saved caches greatly improve cold-start speeds, and is relatively cheap in +# terms of I/O for the key cache. Row cache saving is much more expensive and +# has limited use. +# +# Default is 14400 or 4 hours. +key_cache_save_period: 14400 + +# Number of keys from the key cache to save +# Disabled by default, meaning all keys are going to be saved +# key_cache_keys_to_save: 100 + +# Row cache implementation class name. Available implementations: +# +# org.apache.cassandra.cache.OHCProvider +# Fully off-heap row cache implementation (default). +# +# org.apache.cassandra.cache.SerializingCacheProvider +# This is the row cache implementation availabile +# in previous releases of Cassandra. +# row_cache_class_name: org.apache.cassandra.cache.OHCProvider + +# Maximum size of the row cache in memory. +# Please note that OHC cache implementation requires some additional off-heap memory to manage +# the map structures and some in-flight memory during operations before/after cache entries can be +# accounted against the cache capacity. This overhead is usually small compared to the whole capacity. +# Do not specify more memory that the system can afford in the worst usual situation and leave some +# headroom for OS block level cache. Do never allow your system to swap. +# +# Default value is 0, to disable row caching. +row_cache_size_in_mb: 0 + +# Duration in seconds after which Cassandra should save the row cache. +# Caches are saved to saved_caches_directory as specified in this configuration file. +# +# Saved caches greatly improve cold-start speeds, and is relatively cheap in +# terms of I/O for the key cache. Row cache saving is much more expensive and +# has limited use. +# +# Default is 0 to disable saving the row cache. +row_cache_save_period: 0 + +# Number of keys from the row cache to save. +# Specify 0 (which is the default), meaning all keys are going to be saved +# row_cache_keys_to_save: 100 + +# Maximum size of the counter cache in memory. +# +# Counter cache helps to reduce counter locks' contention for hot counter cells. +# In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before +# write entirely. With RF > 1 a counter cache hit will still help to reduce the duration +# of the lock hold, helping with hot counter cell updates, but will not allow skipping +# the read entirely. Only the local (clock, count) tuple of a counter cell is kept +# in memory, not the whole counter, so it's relatively cheap. +# +# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. +# +# Default value is empty to make it "auto" (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache. +# NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache. +counter_cache_size_in_mb: + +# Duration in seconds after which Cassandra should +# save the counter cache (keys only). Caches are saved to saved_caches_directory as +# specified in this configuration file. +# +# Default is 7200 or 2 hours. +counter_cache_save_period: 7200 + +# Number of keys from the counter cache to save +# Disabled by default, meaning all keys are going to be saved +# counter_cache_keys_to_save: 100 + +# saved caches +# If not set, the default directory is $CASSANDRA_HOME/data/saved_caches. +saved_caches_directory: /var/lib/cassandra/saved_caches + +# commitlog_sync may be either "periodic" or "batch." +# +# When in batch mode, Cassandra won't ack writes until the commit log +# has been fsynced to disk. It will wait +# commitlog_sync_batch_window_in_ms milliseconds between fsyncs. +# This window should be kept short because the writer threads will +# be unable to do extra work while waiting. (You may need to increase +# concurrent_writes for the same reason.) +# +# commitlog_sync: batch +# commitlog_sync_batch_window_in_ms: 2 +# +# the other option is "periodic" where writes may be acked immediately +# and the CommitLog is simply synced every commitlog_sync_period_in_ms +# milliseconds. +commitlog_sync: periodic +commitlog_sync_period_in_ms: 10000 + +# The size of the individual commitlog file segments. A commitlog +# segment may be archived, deleted, or recycled once all the data +# in it (potentially from each columnfamily in the system) has been +# flushed to sstables. +# +# The default size is 32, which is almost always fine, but if you are +# archiving commitlog segments (see commitlog_archiving.properties), +# then you probably want a finer granularity of archiving; 8 or 16 MB +# is reasonable. +# Max mutation size is also configurable via max_mutation_size_in_kb setting in +# cassandra.yaml. The default is half the size commitlog_segment_size_in_mb * 1024. +# +# NOTE: If max_mutation_size_in_kb is set explicitly then commitlog_segment_size_in_mb must +# be set to at least twice the size of max_mutation_size_in_kb / 1024 +# +commitlog_segment_size_in_mb: 32 + +# Compression to apply to the commit log. If omitted, the commit log +# will be written uncompressed. LZ4, Snappy, and Deflate compressors +# are supported. +# commitlog_compression: +# - class_name: LZ4Compressor +# parameters: +# - + +# any class that implements the SeedProvider interface and has a +# constructor that takes a Map of parameters will do. +seed_provider: + # Addresses of hosts that are deemed contact points. + # Cassandra nodes use this list of hosts to find each other and learn + # the topology of the ring. You must change this if you are running + # multiple nodes! + - class_name: org.apache.cassandra.locator.SimpleSeedProvider + parameters: + # seeds is actually a comma-delimited list of addresses. + # Ex: ",," + - seeds: "127.0.0.1" + +# For workloads with more data than can fit in memory, Cassandra's +# bottleneck will be reads that need to fetch data from +# disk. "concurrent_reads" should be set to (16 * number_of_drives) in +# order to allow the operations to enqueue low enough in the stack +# that the OS and drives can reorder them. Same applies to +# "concurrent_counter_writes", since counter writes read the current +# values before incrementing and writing them back. +# +# On the other hand, since writes are almost never IO bound, the ideal +# number of "concurrent_writes" is dependent on the number of cores in +# your system; (8 * number_of_cores) is a good rule of thumb. +concurrent_reads: 32 +concurrent_writes: 32 +concurrent_counter_writes: 32 + +# For materialized view writes, as there is a read involved, so this should +# be limited by the less of concurrent reads or concurrent writes. +concurrent_materialized_view_writes: 32 + +# Maximum memory to use for sstable chunk cache and buffer pooling. +# 32MB of this are reserved for pooling buffers, the rest is used as an +# cache that holds uncompressed sstable chunks. +# Defaults to the smaller of 1/4 of heap or 512MB. This pool is allocated off-heap, +# so is in addition to the memory allocated for heap. The cache also has on-heap +# overhead which is roughly 128 bytes per chunk (i.e. 0.2% of the reserved size +# if the default 64k chunk size is used). +# Memory is only allocated when needed. +# file_cache_size_in_mb: 512 + +# Flag indicating whether to allocate on or off heap when the sstable buffer +# pool is exhausted, that is when it has exceeded the maximum memory +# file_cache_size_in_mb, beyond which it will not cache buffers but allocate on request. + +# buffer_pool_use_heap_if_exhausted: true + +# The strategy for optimizing disk read +# Possible values are: +# ssd (for solid state disks, the default) +# spinning (for spinning disks) +# disk_optimization_strategy: ssd + +# Total permitted memory to use for memtables. Cassandra will stop +# accepting writes when the limit is exceeded until a flush completes, +# and will trigger a flush based on memtable_cleanup_threshold +# If omitted, Cassandra will set both to 1/4 the size of the heap. +# memtable_heap_space_in_mb: 2048 +# memtable_offheap_space_in_mb: 2048 + +# Ratio of occupied non-flushing memtable size to total permitted size +# that will trigger a flush of the largest memtable. Larger mct will +# mean larger flushes and hence less compaction, but also less concurrent +# flush activity which can make it difficult to keep your disks fed +# under heavy write load. +# +# memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1) +# memtable_cleanup_threshold: 0.11 + +# Specify the way Cassandra allocates and manages memtable memory. +# Options are: +# +# heap_buffers +# on heap nio buffers +# +# offheap_buffers +# off heap (direct) nio buffers +# +# offheap_objects +# off heap objects +memtable_allocation_type: heap_buffers + +# Total space to use for commit logs on disk. +# +# If space gets above this value, Cassandra will flush every dirty CF +# in the oldest segment and remove it. So a small total commitlog space +# will tend to cause more flush activity on less-active columnfamilies. +# +# The default value is the smaller of 8192, and 1/4 of the total space +# of the commitlog volume. +# +# commitlog_total_space_in_mb: 8192 + +# This sets the amount of memtable flush writer threads. These will +# be blocked by disk io, and each one will hold a memtable in memory +# while blocked. +# +# memtable_flush_writers defaults to one per data_file_directory. +# +# If your data directories are backed by SSD, you can increase this, but +# avoid having memtable_flush_writers * data_file_directories > number of cores +#memtable_flush_writers: 1 + +# Total space to use for change-data-capture logs on disk. +# +# If space gets above this value, Cassandra will throw WriteTimeoutException +# on Mutations including tables with CDC enabled. A CDCCompactor is responsible +# for parsing the raw CDC logs and deleting them when parsing is completed. +# +# The default value is the min of 4096 mb and 1/8th of the total space +# of the drive where cdc_raw_directory resides. +# cdc_total_space_in_mb: 4096 + +# When we hit our cdc_raw limit and the CDCCompactor is either running behind +# or experiencing backpressure, we check at the following interval to see if any +# new space for cdc-tracked tables has been made available. Default to 250ms +# cdc_free_space_check_interval_ms: 250 + +# A fixed memory pool size in MB for for SSTable index summaries. If left +# empty, this will default to 5% of the heap size. If the memory usage of +# all index summaries exceeds this limit, SSTables with low read rates will +# shrink their index summaries in order to meet this limit. However, this +# is a best-effort process. In extreme conditions Cassandra may need to use +# more than this amount of memory. +index_summary_capacity_in_mb: + +# How frequently index summaries should be resampled. This is done +# periodically to redistribute memory from the fixed-size pool to sstables +# proportional their recent read rates. Setting to -1 will disable this +# process, leaving existing index summaries at their current sampling level. +index_summary_resize_interval_in_minutes: 60 + +# Whether to, when doing sequential writing, fsync() at intervals in +# order to force the operating system to flush the dirty +# buffers. Enable this to avoid sudden dirty buffer flushing from +# impacting read latencies. Almost always a good idea on SSDs; not +# necessarily on platters. +trickle_fsync: false +trickle_fsync_interval_in_kb: 10240 + +# TCP port, for commands and data +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +storage_port: 7000 + +# SSL port, for encrypted communication. Unused unless enabled in +# encryption_options +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +ssl_storage_port: 7001 + +# Address or interface to bind to and tell other Cassandra nodes to connect to. +# You _must_ change this if you want multiple nodes to be able to communicate! +# +# Set listen_address OR listen_interface, not both. +# +# Leaving it blank leaves it up to InetAddress.getLocalHost(). This +# will always do the Right Thing _if_ the node is properly configured +# (hostname, name resolution, etc), and the Right Thing is to use the +# address associated with the hostname (it might not be). +# +# Setting listen_address to 0.0.0.0 is always wrong. +# +listen_address: 172.17.0.3 + +# Set listen_address OR listen_interface, not both. Interfaces must correspond +# to a single address, IP aliasing is not supported. +# listen_interface: eth0 + +# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address +# you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4 +# address will be used. If true the first ipv6 address will be used. Defaults to false preferring +# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. +# listen_interface_prefer_ipv6: false + +# Address to broadcast to other Cassandra nodes +# Leaving this blank will set it to the same value as listen_address +broadcast_address: 127.0.0.1 + +# When using multiple physical network interfaces, set this +# to true to listen on broadcast_address in addition to +# the listen_address, allowing nodes to communicate in both +# interfaces. +# Ignore this property if the network configuration automatically +# routes between the public and private networks such as EC2. +# listen_on_broadcast_address: false + +# Internode authentication backend, implementing IInternodeAuthenticator; +# used to allow/disallow connections from peer nodes. +# internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator + +# Whether to start the native transport server. +# Please note that the address on which the native transport is bound is the +# same as the rpc_address. The port however is different and specified below. +start_native_transport: true +# port for the CQL native transport to listen for clients on +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +native_transport_port: 9042 +# Enabling native transport encryption in client_encryption_options allows you to either use +# encryption for the standard port or to use a dedicated, additional port along with the unencrypted +# standard native_transport_port. +# Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption +# for native_transport_port. Setting native_transport_port_ssl to a different value +# from native_transport_port will use encryption for native_transport_port_ssl while +# keeping native_transport_port unencrypted. +# native_transport_port_ssl: 9142 +# The maximum threads for handling requests when the native transport is used. +# This is similar to rpc_max_threads though the default differs slightly (and +# there is no native_transport_min_threads, idle threads will always be stopped +# after 30 seconds). +# native_transport_max_threads: 128 +# +# The maximum size of allowed frame. Frame (requests) larger than this will +# be rejected as invalid. The default is 256MB. If you're changing this parameter, +# you may want to adjust max_value_size_in_mb accordingly. +# native_transport_max_frame_size_in_mb: 256 + +# The maximum number of concurrent client connections. +# The default is -1, which means unlimited. +# native_transport_max_concurrent_connections: -1 + +# The maximum number of concurrent client connections per source ip. +# The default is -1, which means unlimited. +# native_transport_max_concurrent_connections_per_ip: -1 + +# Whether to start the thrift rpc server. +start_rpc: false + +# The address or interface to bind the Thrift RPC service and native transport +# server to. +# +# Set rpc_address OR rpc_interface, not both. +# +# Leaving rpc_address blank has the same effect as on listen_address +# (i.e. it will be based on the configured hostname of the node). +# +# Note that unlike listen_address, you can specify 0.0.0.0, but you must also +# set broadcast_rpc_address to a value other than 0.0.0.0. +# +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +rpc_address: 0.0.0.0 + +# Set rpc_address OR rpc_interface, not both. Interfaces must correspond +# to a single address, IP aliasing is not supported. +# rpc_interface: eth1 + +# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address +# you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4 +# address will be used. If true the first ipv6 address will be used. Defaults to false preferring +# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. +# rpc_interface_prefer_ipv6: false + +# port for Thrift to listen for clients on +rpc_port: 9160 + +# RPC address to broadcast to drivers and other Cassandra nodes. This cannot +# be set to 0.0.0.0. If left blank, this will be set to the value of +# rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must +# be set. +broadcast_rpc_address: 127.0.0.1 + +# enable or disable keepalive on rpc/native connections +rpc_keepalive: true + +# Cassandra provides two out-of-the-box options for the RPC Server: +# +# sync +# One thread per thrift connection. For a very large number of clients, memory +# will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size +# per thread, and that will correspond to your use of virtual memory (but physical memory +# may be limited depending on use of stack space). +# +# hsha +# Stands for "half synchronous, half asynchronous." All thrift clients are handled +# asynchronously using a small number of threads that does not vary with the amount +# of thrift clients (and thus scales well to many clients). The rpc requests are still +# synchronous (one thread per active request). If hsha is selected then it is essential +# that rpc_max_threads is changed from the default value of unlimited. +# +# The default is sync because on Windows hsha is about 30% slower. On Linux, +# sync/hsha performance is about the same, with hsha of course using less memory. +# +# Alternatively, can provide your own RPC server by providing the fully-qualified class name +# of an o.a.c.t.TServerFactory that can create an instance of it. +rpc_server_type: sync + +# Uncomment rpc_min|max_thread to set request pool size limits. +# +# Regardless of your choice of RPC server (see above), the number of maximum requests in the +# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync +# RPC server, it also dictates the number of clients that can be connected at all). +# +# The default is unlimited and thus provides no protection against clients overwhelming the server. You are +# encouraged to set a maximum that makes sense for you in production, but do keep in mind that +# rpc_max_threads represents the maximum number of client requests this server may execute concurrently. +# +# rpc_min_threads: 16 +# rpc_max_threads: 2048 + +# uncomment to set socket buffer sizes on rpc connections +# rpc_send_buff_size_in_bytes: +# rpc_recv_buff_size_in_bytes: + +# Uncomment to set socket buffer size for internode communication +# Note that when setting this, the buffer size is limited by net.core.wmem_max +# and when not setting it it is defined by net.ipv4.tcp_wmem +# See also: +# /proc/sys/net/core/wmem_max +# /proc/sys/net/core/rmem_max +# /proc/sys/net/ipv4/tcp_wmem +# /proc/sys/net/ipv4/tcp_wmem +# and 'man tcp' +# internode_send_buff_size_in_bytes: + +# Uncomment to set socket buffer size for internode communication +# Note that when setting this, the buffer size is limited by net.core.wmem_max +# and when not setting it it is defined by net.ipv4.tcp_wmem +# internode_recv_buff_size_in_bytes: + +# Frame size for thrift (maximum message length). +thrift_framed_transport_size_in_mb: 15 + +# Set to true to have Cassandra create a hard link to each sstable +# flushed or streamed locally in a backups/ subdirectory of the +# keyspace data. Removing these links is the operator's +# responsibility. +incremental_backups: false + +# Whether or not to take a snapshot before each compaction. Be +# careful using this option, since Cassandra won't clean up the +# snapshots for you. Mostly useful if you're paranoid when there +# is a data format change. +snapshot_before_compaction: false + +# Whether or not a snapshot is taken of the data before keyspace truncation +# or dropping of column families. The STRONGLY advised default of true +# should be used to provide data safety. If you set this flag to false, you will +# lose data on truncation or drop. +auto_snapshot: true + +# Granularity of the collation index of rows within a partition. +# Increase if your rows are large, or if you have a very large +# number of rows per partition. The competing goals are these: +# +# - a smaller granularity means more index entries are generated +# and looking up rows withing the partition by collation column +# is faster +# - but, Cassandra will keep the collation index in memory for hot +# rows (as part of the key cache), so a larger granularity means +# you can cache more hot rows +column_index_size_in_kb: 64 + +# Per sstable indexed key cache entries (the collation index in memory +# mentioned above) exceeding this size will not be held on heap. +# This means that only partition information is held on heap and the +# index entries are read from disk. +# +# Note that this size refers to the size of the +# serialized index information and not the size of the partition. +column_index_cache_size_in_kb: 2 + +# Number of simultaneous compactions to allow, NOT including +# validation "compactions" for anti-entropy repair. Simultaneous +# compactions can help preserve read performance in a mixed read/write +# workload, by mitigating the tendency of small sstables to accumulate +# during a single long running compactions. The default is usually +# fine and if you experience problems with compaction running too +# slowly or too fast, you should look at +# compaction_throughput_mb_per_sec first. +# +# concurrent_compactors defaults to the smaller of (number of disks, +# number of cores), with a minimum of 2 and a maximum of 8. +# +# If your data directories are backed by SSD, you should increase this +# to the number of cores. +#concurrent_compactors: 1 + +# Throttles compaction to the given total throughput across the entire +# system. The faster you insert data, the faster you need to compact in +# order to keep the sstable count down, but in general, setting this to +# 16 to 32 times the rate you are inserting data is more than sufficient. +# Setting this to 0 disables throttling. Note that this account for all types +# of compaction, including validation compaction. +compaction_throughput_mb_per_sec: 16 + +# When compacting, the replacement sstable(s) can be opened before they +# are completely written, and used in place of the prior sstables for +# any range that has been written. This helps to smoothly transfer reads +# between the sstables, reducing page cache churn and keeping hot rows hot +sstable_preemptive_open_interval_in_mb: 50 + +# Throttles all outbound streaming file transfers on this node to the +# given total throughput in Mbps. This is necessary because Cassandra does +# mostly sequential IO when streaming data during bootstrap or repair, which +# can lead to saturating the network connection and degrading rpc performance. +# When unset, the default is 200 Mbps or 25 MB/s. +# stream_throughput_outbound_megabits_per_sec: 200 + +# Throttles all streaming file transfer between the datacenters, +# this setting allows users to throttle inter dc stream throughput in addition +# to throttling all network stream traffic as configured with +# stream_throughput_outbound_megabits_per_sec +# When unset, the default is 200 Mbps or 25 MB/s +# inter_dc_stream_throughput_outbound_megabits_per_sec: 200 + +# How long the coordinator should wait for read operations to complete +read_request_timeout_in_ms: 5000 +# How long the coordinator should wait for seq or index scans to complete +range_request_timeout_in_ms: 10000 +# How long the coordinator should wait for writes to complete +write_request_timeout_in_ms: 2000 +# How long the coordinator should wait for counter writes to complete +counter_write_request_timeout_in_ms: 5000 +# How long a coordinator should continue to retry a CAS operation +# that contends with other proposals for the same row +cas_contention_timeout_in_ms: 1000 +# How long the coordinator should wait for truncates to complete +# (This can be much longer, because unless auto_snapshot is disabled +# we need to flush first so we can snapshot before removing the data.) +truncate_request_timeout_in_ms: 60000 +# The default timeout for other, miscellaneous operations +request_timeout_in_ms: 10000 + +# Enable operation timeout information exchange between nodes to accurately +# measure request timeouts. If disabled, replicas will assume that requests +# were forwarded to them instantly by the coordinator, which means that +# under overload conditions we will waste that much extra time processing +# already-timed-out requests. +# +# Warning: before enabling this property make sure to ntp is installed +# and the times are synchronized between the nodes. +cross_node_timeout: false + +# Set socket timeout for streaming operation. +# The stream session is failed if no data/ack is received by any of the participants +# within that period, which means this should also be sufficient to stream a large +# sstable or rebuild table indexes. +# Default value is 86400000ms, which means stale streams timeout after 24 hours. +# A value of zero means stream sockets should never time out. +# streaming_socket_timeout_in_ms: 86400000 + +# phi value that must be reached for a host to be marked down. +# most users should never need to adjust this. +# phi_convict_threshold: 8 + +# endpoint_snitch -- Set this to a class that implements +# IEndpointSnitch. The snitch has two functions: +# +# - it teaches Cassandra enough about your network topology to route +# requests efficiently +# - it allows Cassandra to spread replicas around your cluster to avoid +# correlated failures. It does this by grouping machines into +# "datacenters" and "racks." Cassandra will do its best not to have +# more than one replica on the same "rack" (which may not actually +# be a physical location) +# +# CASSANDRA WILL NOT ALLOW YOU TO SWITCH TO AN INCOMPATIBLE SNITCH +# ONCE DATA IS INSERTED INTO THE CLUSTER. This would cause data loss. +# This means that if you start with the default SimpleSnitch, which +# locates every node on "rack1" in "datacenter1", your only options +# if you need to add another datacenter are GossipingPropertyFileSnitch +# (and the older PFS). From there, if you want to migrate to an +# incompatible snitch like Ec2Snitch you can do it by adding new nodes +# under Ec2Snitch (which will locate them in a new "datacenter") and +# decommissioning the old ones. +# +# Out of the box, Cassandra provides: +# +# SimpleSnitch: +# Treats Strategy order as proximity. This can improve cache +# locality when disabling read repair. Only appropriate for +# single-datacenter deployments. +# +# GossipingPropertyFileSnitch +# This should be your go-to snitch for production use. The rack +# and datacenter for the local node are defined in +# cassandra-rackdc.properties and propagated to other nodes via +# gossip. If cassandra-topology.properties exists, it is used as a +# fallback, allowing migration from the PropertyFileSnitch. +# +# PropertyFileSnitch: +# Proximity is determined by rack and data center, which are +# explicitly configured in cassandra-topology.properties. +# +# Ec2Snitch: +# Appropriate for EC2 deployments in a single Region. Loads Region +# and Availability Zone information from the EC2 API. The Region is +# treated as the datacenter, and the Availability Zone as the rack. +# Only private IPs are used, so this will not work across multiple +# Regions. +# +# Ec2MultiRegionSnitch: +# Uses public IPs as broadcast_address to allow cross-region +# connectivity. (Thus, you should set seed addresses to the public +# IP as well.) You will need to open the storage_port or +# ssl_storage_port on the public IP firewall. (For intra-Region +# traffic, Cassandra will switch to the private IP after +# establishing a connection.) +# +# RackInferringSnitch: +# Proximity is determined by rack and data center, which are +# assumed to correspond to the 3rd and 2nd octet of each node's IP +# address, respectively. Unless this happens to match your +# deployment conventions, this is best used as an example of +# writing a custom Snitch class and is provided in that spirit. +# +# You can use a custom Snitch by setting this to the full class name +# of the snitch, which will be assumed to be on your classpath. +endpoint_snitch: SimpleSnitch + +# controls how often to perform the more expensive part of host score +# calculation +dynamic_snitch_update_interval_in_ms: 100 +# controls how often to reset all host scores, allowing a bad host to +# possibly recover +dynamic_snitch_reset_interval_in_ms: 600000 +# if set greater than zero and read_repair_chance is < 1.0, this will allow +# 'pinning' of replicas to hosts in order to increase cache capacity. +# The badness threshold will control how much worse the pinned host has to be +# before the dynamic snitch will prefer other replicas over it. This is +# expressed as a double which represents a percentage. Thus, a value of +# 0.2 means Cassandra would continue to prefer the static snitch values +# until the pinned host was 20% worse than the fastest. +dynamic_snitch_badness_threshold: 0.1 + +# request_scheduler -- Set this to a class that implements +# RequestScheduler, which will schedule incoming client requests +# according to the specific policy. This is useful for multi-tenancy +# with a single Cassandra cluster. +# NOTE: This is specifically for requests from the client and does +# not affect inter node communication. +# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place +# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of +# client requests to a node with a separate queue for each +# request_scheduler_id. The scheduler is further customized by +# request_scheduler_options as described below. +request_scheduler: org.apache.cassandra.scheduler.NoScheduler + +# Scheduler Options vary based on the type of scheduler +# +# NoScheduler +# Has no options +# +# RoundRobin +# throttle_limit +# The throttle_limit is the number of in-flight +# requests per client. Requests beyond +# that limit are queued up until +# running requests can complete. +# The value of 80 here is twice the number of +# concurrent_reads + concurrent_writes. +# default_weight +# default_weight is optional and allows for +# overriding the default which is 1. +# weights +# Weights are optional and will default to 1 or the +# overridden default_weight. The weight translates into how +# many requests are handled during each turn of the +# RoundRobin, based on the scheduler id. +# +# request_scheduler_options: +# throttle_limit: 80 +# default_weight: 5 +# weights: +# Keyspace1: 1 +# Keyspace2: 5 + +# request_scheduler_id -- An identifier based on which to perform +# the request scheduling. Currently the only valid option is keyspace. +# request_scheduler_id: keyspace + +# Enable or disable inter-node encryption +# JVM defaults for supported SSL socket protocols and cipher suites can +# be replaced using custom encryption options. This is not recommended +# unless you have policies in place that dictate certain settings, or +# need to disable vulnerable ciphers or protocols in case the JVM cannot +# be updated. +# FIPS compliant settings can be configured at JVM level and should not +# involve changing encryption settings here: +# https://docs.oracle.com/javase/8/docs/technotes/guides/security/jsse/FIPS.html +# *NOTE* No custom encryption options are enabled at the moment +# The available internode options are : all, none, dc, rack +# +# If set to dc cassandra will encrypt the traffic between the DCs +# If set to rack cassandra will encrypt the traffic between the racks +# +# The passwords used in these options must match the passwords used when generating +# the keystore and truststore. For instructions on generating these files, see: +# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore +# +server_encryption_options: + internode_encryption: none + keystore: conf/.keystore + keystore_password: cassandra + truststore: conf/.truststore + truststore_password: cassandra + # More advanced defaults below: + # protocol: TLS + # algorithm: SunX509 + # store_type: JKS + # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] + # require_client_auth: false + # require_endpoint_verification: false + +# enable or disable client/server encryption. +client_encryption_options: + enabled: false + # If enabled and optional is set to true encrypted and unencrypted connections are handled. + optional: false + keystore: conf/.keystore + keystore_password: cassandra + # require_client_auth: false + # Set truststore and truststore_password if require_client_auth is true + # truststore: conf/.truststore + # truststore_password: cassandra + # More advanced defaults below: + # protocol: TLS + # algorithm: SunX509 + # store_type: JKS + # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] + +# internode_compression controls whether traffic between nodes is +# compressed. +# Can be: +# +# all +# all traffic is compressed +# +# dc +# traffic between different datacenters is compressed +# +# none +# nothing is compressed. +internode_compression: dc + +# Enable or disable tcp_nodelay for inter-dc communication. +# Disabling it will result in larger (but fewer) network packets being sent, +# reducing overhead from the TCP protocol itself, at the cost of increasing +# latency if you block for cross-datacenter responses. +inter_dc_tcp_nodelay: false + +# TTL for different trace types used during logging of the repair process. +tracetype_query_ttl: 86400 +tracetype_repair_ttl: 604800 + +# By default, Cassandra logs GC Pauses greater than 200 ms at INFO level +# This threshold can be adjusted to minimize logging if necessary +# gc_log_threshold_in_ms: 200 + +# If unset, all GC Pauses greater than gc_log_threshold_in_ms will log at +# INFO level +# UDFs (user defined functions) are disabled by default. +# As of Cassandra 3.0 there is a sandbox in place that should prevent execution of evil code. +enable_user_defined_functions: false + +# Enables scripted UDFs (JavaScript UDFs). +# Java UDFs are always enabled, if enable_user_defined_functions is true. +# Enable this option to be able to use UDFs with "language javascript" or any custom JSR-223 provider. +# This option has no effect, if enable_user_defined_functions is false. +enable_scripted_user_defined_functions: false + +# The default Windows kernel timer and scheduling resolution is 15.6ms for power conservation. +# Lowering this value on Windows can provide much tighter latency and better throughput, however +# some virtualized environments may see a negative performance impact from changing this setting +# below their system default. The sysinternals 'clockres' tool can confirm your system's default +# setting. +windows_timer_interval: 1 + + +# Enables encrypting data at-rest (on disk). Different key providers can be plugged in, but the default reads from +# a JCE-style keystore. A single keystore can hold multiple keys, but the one referenced by +# the "key_alias" is the only key that will be used for encrypt operations; previously used keys +# can still (and should!) be in the keystore and will be used on decrypt operations +# (to handle the case of key rotation). +# +# It is strongly recommended to download and install Java Cryptography Extension (JCE) +# Unlimited Strength Jurisdiction Policy Files for your version of the JDK. +# (current link: http://www.oracle.com/technetwork/java/javase/downloads/jce8-download-2133166.html) +# +# Currently, only the following file types are supported for transparent data encryption, although +# more are coming in future cassandra releases: commitlog, hints +transparent_data_encryption_options: + enabled: false + chunk_length_kb: 64 + cipher: AES/CBC/PKCS5Padding + key_alias: testing:1 + # CBC IV length for AES needs to be 16 bytes (which is also the default size) + # iv_length: 16 + key_provider: + - class_name: org.apache.cassandra.security.JKSKeyProvider + parameters: + - keystore: conf/.keystore + keystore_password: cassandra + store_type: JCEKS + key_password: cassandra + + +##################### +# SAFETY THRESHOLDS # +##################### + +# When executing a scan, within or across a partition, we need to keep the +# tombstones seen in memory so we can return them to the coordinator, which +# will use them to make sure other replicas also know about the deleted rows. +# With workloads that generate a lot of tombstones, this can cause performance +# problems and even exhaust the server heap. +# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets) +# Adjust the thresholds here if you understand the dangers and want to +# scan more tombstones anyway. These thresholds may also be adjusted at runtime +# using the StorageService mbean. +tombstone_warn_threshold: 1000 +tombstone_failure_threshold: 100000 + +# Log WARN on any batch size exceeding this value. 5kb per batch by default. +# Caution should be taken on increasing the size of this threshold as it can lead to node instability. +batch_size_warn_threshold_in_kb: 5 + +# Fail any batch exceeding this value. 50kb (10x warn threshold) by default. +batch_size_fail_threshold_in_kb: 50 + +# Log WARN on any batches not of type LOGGED than span across more partitions than this limit +unlogged_batch_across_partitions_warn_threshold: 10 + +# Log a warning when compacting partitions larger than this value +compaction_large_partition_warning_threshold_mb: 100 + +# GC Pauses greater than gc_warn_threshold_in_ms will be logged at WARN level +# Adjust the threshold based on your application throughput requirement +# By default, Cassandra logs GC Pauses greater than 200 ms at INFO level +gc_warn_threshold_in_ms: 1000 + +# Maximum size of any value in SSTables. Safety measure to detect SSTable corruption +# early. Any value size larger than this threshold will result into marking an SSTable +# as corrupted. +# max_value_size_in_mb: 256 diff --git a/plugins/database/cassandra/test-fixtures/with_tls/bad_ca.pem b/plugins/database/cassandra/test-fixtures/with_tls/bad_ca.pem new file mode 100644 index 0000000..6674fa7 --- /dev/null +++ b/plugins/database/cassandra/test-fixtures/with_tls/bad_ca.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIIEFjCCAv6gAwIBAgIUHNknw0iUWaMC5UCpiribG8DQhZYwDQYJKoZIhvcNAQEL +BQAwgaIxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH +Ew1TYW4gRnJhbmNpc2NvMRIwEAYDVQQKEwlIYXNoaUNvcnAxIzAhBgNVBAsTGlRl +c3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MS0wKwYDVQQDEyRQcm90b3R5cGUgVGVz +dCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMjEwNjE0MjAyNDAwWhcNMjYwNjEz +MjAyNDAwWjCBojELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAU +BgNVBAcTDVNhbiBGcmFuY2lzY28xEjAQBgNVBAoTCUhhc2hpQ29ycDEjMCEGA1UE +CxMaVGVzdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxLTArBgNVBAMTJFByb3RvdHlw +ZSBUZXN0IENlcnRpZmljYXRlIEF1dGhvcml0eTCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBANc0MEZOJ7xm4JrCceerX0kWcdPIczXFIIZTJYdTB7YPHTiL +PFSZ9ugu8W6R7wOMLUazcD7Ugw0hjt+JkiRIY1AOvuZRX7DR3Q0sGy9qFb1y2kOk +lTSAFOV96FxxAg9Fn23mcvjV1TDO1dlxvOuAo0NMjk82TzHk7LVuYOKuJ/Sc9i8a +Ba4vndbiwkSGpytymCu0X4T4ZEARLUZ4feGhr5RbYRehq2Nb8kw/KNLZZyzlzJbr +8OkVizW796bkVJwRfCFubZPl8EvRslxZ2+sMFSozoofoFlB1FsGAvlnEfkxqTJJo +WafmsYnOVnbNfwOogDP0+bp8WAZrAxJqTAWm/LMCAwEAAaNCMEAwDgYDVR0PAQH/ +BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFHyfBUnvAULGlcFSljTI +DegUVLB5MA0GCSqGSIb3DQEBCwUAA4IBAQBOdVqZpMCKq+X2TBi3nJmz6kjePVBh +ocHUG02nRkL533x+PUxRpDG3AMzWF3niPxtMuVIZDfpi27zlm2QCh9b3sQi83w+9 +UX1/j3dUoUyiVi/U0iZeZmuDY3ne59DNFdOgGY9p3FvJ+b9WfPg8+v2w26rGoSMz +21XKNZcRFcjOJ5LJ3i9+liaCkpXLfErA+AtqNeraHOorJ5UO4mA7OlFowV8adOQq +SinFIoXCExBTxqMv0lVzEhGN6Wd261CmKY5e4QLqASCO+s7zwGhHyzwjdA0pCNtI +PmHIk13m0p56G8hpz+M/5hBQFb0MIIR3Je6QVzfRty2ipUO91E9Ydm7C +-----END CERTIFICATE----- diff --git a/plugins/database/cassandra/test-fixtures/with_tls/ca.pem b/plugins/database/cassandra/test-fixtures/with_tls/ca.pem new file mode 100644 index 0000000..fdcfb23 --- /dev/null +++ b/plugins/database/cassandra/test-fixtures/with_tls/ca.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIIEFjCCAv6gAwIBAgIUWd8FZSev3ygjhWE7O8orqHPQ4IEwDQYJKoZIhvcNAQEL +BQAwgaIxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH +Ew1TYW4gRnJhbmNpc2NvMRIwEAYDVQQKEwlIYXNoaUNvcnAxIzAhBgNVBAsTGlRl +c3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MS0wKwYDVQQDEyRQcm90b3R5cGUgVGVz +dCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMjEwNjEwMjAwNDAwWhcNMjYwNjA5 +MjAwNDAwWjCBojELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAU +BgNVBAcTDVNhbiBGcmFuY2lzY28xEjAQBgNVBAoTCUhhc2hpQ29ycDEjMCEGA1UE +CxMaVGVzdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxLTArBgNVBAMTJFByb3RvdHlw +ZSBUZXN0IENlcnRpZmljYXRlIEF1dGhvcml0eTCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAMXTnIDpOXXiHuKyI9EZxv7qg81DmelOB+iAzhvRsigMSuka +qZH29Aaf4PBvKLlSVN6sVP16cXRvk48qa0C78tP0kTPKWdEyE1xQUZb270SZ6Tm3 +T7sNRTRwWTsgeC1n6SHlBUn3MviQgA1dZM1CbZIXQpBxtuPg+p9eu3YP/CZJFJjT +LYVKT6kRumBQEX/UUesNfUnUpVIOxxOwbVeF6a/wGxeLY6/fOQ+TJhVUjSy/pvaI +6NnycrwD/4ck6gusV5HKakidCID9MwV610Vc7AFi070VGYCjKfiv6EYMMnjycYqi +KHz623Ca4rO4qtWWvT1K/+GkryDKXeI3KHuEsdsCAwEAAaNCMEAwDgYDVR0PAQH/ +BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFIy8cvyabFclVWwcZ4rl +ADoLEdyAMA0GCSqGSIb3DQEBCwUAA4IBAQCzn9QbsOpBuvhhgdH/Jk0q7H0kmpVS +rbLhcQyWv9xiyopYbbUfh0Hud15rnqAkyT9nd2Kvo8T/X9rc1OXa6oDO6aoXjIm1 +aKOFikET8fc/81rT81E7TVPO7TZW5s9Cej30zCOJQWZ+ibHNyequuyihtImNacXF ++1pAAldj/JMu+Ky1YFrs2iccGOpGCGbsWfLQt+wYKwya7dpSz1ceqigKavIJSOMV +CNsyC59UtFbvdk139FyEvCmecsCbWuo0JVg3do5n6upwqrgvLRNP8EHzm17DWu5T +aNtsBbv85uUgMmF7kzxr+t6VdtG9u+q0HCmW1/1VVK3ZsA+UTB7UBddD +-----END CERTIFICATE----- diff --git a/plugins/database/cassandra/test-fixtures/with_tls/ca.pem.json b/plugins/database/cassandra/test-fixtures/with_tls/ca.pem.json new file mode 100644 index 0000000..a28e2ed --- /dev/null +++ b/plugins/database/cassandra/test-fixtures/with_tls/ca.pem.json @@ -0,0 +1,3 @@ +{ + "ca_chain": ["-----BEGIN CERTIFICATE-----\nMIIEFjCCAv6gAwIBAgIUWd8FZSev3ygjhWE7O8orqHPQ4IEwDQYJKoZIhvcNAQEL\nBQAwgaIxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH\nEw1TYW4gRnJhbmNpc2NvMRIwEAYDVQQKEwlIYXNoaUNvcnAxIzAhBgNVBAsTGlRl\nc3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MS0wKwYDVQQDEyRQcm90b3R5cGUgVGVz\ndCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMjEwNjEwMjAwNDAwWhcNMjYwNjA5\nMjAwNDAwWjCBojELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAU\nBgNVBAcTDVNhbiBGcmFuY2lzY28xEjAQBgNVBAoTCUhhc2hpQ29ycDEjMCEGA1UE\nCxMaVGVzdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxLTArBgNVBAMTJFByb3RvdHlw\nZSBUZXN0IENlcnRpZmljYXRlIEF1dGhvcml0eTCCASIwDQYJKoZIhvcNAQEBBQAD\nggEPADCCAQoCggEBAMXTnIDpOXXiHuKyI9EZxv7qg81DmelOB+iAzhvRsigMSuka\nqZH29Aaf4PBvKLlSVN6sVP16cXRvk48qa0C78tP0kTPKWdEyE1xQUZb270SZ6Tm3\nT7sNRTRwWTsgeC1n6SHlBUn3MviQgA1dZM1CbZIXQpBxtuPg+p9eu3YP/CZJFJjT\nLYVKT6kRumBQEX/UUesNfUnUpVIOxxOwbVeF6a/wGxeLY6/fOQ+TJhVUjSy/pvaI\n6NnycrwD/4ck6gusV5HKakidCID9MwV610Vc7AFi070VGYCjKfiv6EYMMnjycYqi\nKHz623Ca4rO4qtWWvT1K/+GkryDKXeI3KHuEsdsCAwEAAaNCMEAwDgYDVR0PAQH/\nBAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFIy8cvyabFclVWwcZ4rl\nADoLEdyAMA0GCSqGSIb3DQEBCwUAA4IBAQCzn9QbsOpBuvhhgdH/Jk0q7H0kmpVS\nrbLhcQyWv9xiyopYbbUfh0Hud15rnqAkyT9nd2Kvo8T/X9rc1OXa6oDO6aoXjIm1\naKOFikET8fc/81rT81E7TVPO7TZW5s9Cej30zCOJQWZ+ibHNyequuyihtImNacXF\n+1pAAldj/JMu+Ky1YFrs2iccGOpGCGbsWfLQt+wYKwya7dpSz1ceqigKavIJSOMV\nCNsyC59UtFbvdk139FyEvCmecsCbWuo0JVg3do5n6upwqrgvLRNP8EHzm17DWu5T\naNtsBbv85uUgMmF7kzxr+t6VdtG9u+q0HCmW1/1VVK3ZsA+UTB7UBddD\n-----END CERTIFICATE-----\n"] +} diff --git a/plugins/database/cassandra/test-fixtures/with_tls/cqlshrc b/plugins/database/cassandra/test-fixtures/with_tls/cqlshrc new file mode 100644 index 0000000..6a226e4 --- /dev/null +++ b/plugins/database/cassandra/test-fixtures/with_tls/cqlshrc @@ -0,0 +1,3 @@ +[ssl] +validate = false +version = SSLv23 diff --git a/plugins/database/cassandra/test-fixtures/with_tls/stores/keystore b/plugins/database/cassandra/test-fixtures/with_tls/stores/keystore new file mode 100644 index 0000000..fce8a77 Binary files /dev/null and b/plugins/database/cassandra/test-fixtures/with_tls/stores/keystore differ diff --git a/plugins/database/cassandra/test-fixtures/with_tls/stores/server.p12 b/plugins/database/cassandra/test-fixtures/with_tls/stores/server.p12 new file mode 100644 index 0000000..c775b54 Binary files /dev/null and b/plugins/database/cassandra/test-fixtures/with_tls/stores/server.p12 differ diff --git a/plugins/database/cassandra/test-fixtures/with_tls/stores/truststore b/plugins/database/cassandra/test-fixtures/with_tls/stores/truststore new file mode 100644 index 0000000..e29c3fe Binary files /dev/null and b/plugins/database/cassandra/test-fixtures/with_tls/stores/truststore differ diff --git a/plugins/database/cassandra/tls.go b/plugins/database/cassandra/tls.go new file mode 100644 index 0000000..17e148d --- /dev/null +++ b/plugins/database/cassandra/tls.go @@ -0,0 +1,120 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cassandra + +import ( + "crypto/tls" + "crypto/x509" + "encoding/json" + "encoding/pem" + "fmt" + + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/helper/errutil" +) + +func jsonBundleToTLSConfig(rawJSON string, tlsMinVersion uint16, serverName string, insecureSkipVerify bool) (*tls.Config, error) { + var certBundle certutil.CertBundle + err := json.Unmarshal([]byte(rawJSON), &certBundle) + if err != nil { + return nil, fmt.Errorf("failed to parse JSON: %w", err) + } + + if certBundle.IssuingCA != "" && len(certBundle.CAChain) > 0 { + return nil, fmt.Errorf("issuing_ca and ca_chain cannot both be specified") + } + if certBundle.IssuingCA != "" { + certBundle.CAChain = []string{certBundle.IssuingCA} + certBundle.IssuingCA = "" + } + + return toClientTLSConfig(certBundle.Certificate, certBundle.PrivateKey, certBundle.CAChain, tlsMinVersion, serverName, insecureSkipVerify) +} + +func pemBundleToTLSConfig(pemBundle string, tlsMinVersion uint16, serverName string, insecureSkipVerify bool) (*tls.Config, error) { + if len(pemBundle) == 0 { + return nil, errutil.UserError{Err: "empty pem bundle"} + } + + pemBytes := []byte(pemBundle) + var pemBlock *pem.Block + + certificate := "" + privateKey := "" + caChain := []string{} + + for len(pemBytes) > 0 { + pemBlock, pemBytes = pem.Decode(pemBytes) + if pemBlock == nil { + return nil, errutil.UserError{Err: "no data found in PEM block"} + } + blockBytes := pem.EncodeToMemory(pemBlock) + + switch pemBlock.Type { + case "CERTIFICATE": + // Parse the cert so we know if it's a CA or not + cert, err := x509.ParseCertificate(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate: %w", err) + } + if cert.IsCA { + caChain = append(caChain, string(blockBytes)) + continue + } + + // Only one leaf certificate supported + if certificate != "" { + return nil, errutil.UserError{Err: "multiple leaf certificates not supported"} + } + certificate = string(blockBytes) + + case "RSA PRIVATE KEY", "EC PRIVATE KEY", "PRIVATE KEY": + if privateKey != "" { + return nil, errutil.UserError{Err: "multiple private keys not supported"} + } + privateKey = string(blockBytes) + default: + return nil, fmt.Errorf("unsupported PEM block type [%s]", pemBlock.Type) + } + } + + return toClientTLSConfig(certificate, privateKey, caChain, tlsMinVersion, serverName, insecureSkipVerify) +} + +func toClientTLSConfig(certificatePEM string, privateKeyPEM string, caChainPEMs []string, tlsMinVersion uint16, serverName string, insecureSkipVerify bool) (*tls.Config, error) { + if certificatePEM != "" && privateKeyPEM == "" { + return nil, fmt.Errorf("found certificate for client-side TLS authentication but no private key") + } else if certificatePEM == "" && privateKeyPEM != "" { + return nil, fmt.Errorf("found private key for client-side TLS authentication but no certificate") + } + + var certificates []tls.Certificate + if certificatePEM != "" { + certificate, err := tls.X509KeyPair([]byte(certificatePEM), []byte(privateKeyPEM)) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate and private key pair: %w", err) + } + certificates = append(certificates, certificate) + } + + var rootCAs *x509.CertPool + if len(caChainPEMs) > 0 { + rootCAs = x509.NewCertPool() + for _, caBlock := range caChainPEMs { + ok := rootCAs.AppendCertsFromPEM([]byte(caBlock)) + if !ok { + return nil, fmt.Errorf("failed to add CA certificate to certificate pool: it may be malformed or empty") + } + } + } + + config := &tls.Config{ + Certificates: certificates, + RootCAs: rootCAs, + ServerName: serverName, + InsecureSkipVerify: insecureSkipVerify, + MinVersion: tlsMinVersion, + } + return config, nil +} diff --git a/plugins/database/hana/hana-database-plugin/main.go b/plugins/database/hana/hana-database-plugin/main.go new file mode 100644 index 0000000..9ec568b --- /dev/null +++ b/plugins/database/hana/hana-database-plugin/main.go @@ -0,0 +1,27 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "log" + "os" + + "github.com/hashicorp/vault/plugins/database/hana" + "github.com/hashicorp/vault/sdk/database/dbplugin/v5" +) + +func main() { + err := Run() + if err != nil { + log.Println(err) + os.Exit(1) + } +} + +// Run instantiates a HANA object, and runs the RPC server for the plugin +func Run() error { + dbplugin.ServeMultiplex(hana.New) + + return nil +} diff --git a/plugins/database/hana/hana.go b/plugins/database/hana/hana.go new file mode 100644 index 0000000..987cc1a --- /dev/null +++ b/plugins/database/hana/hana.go @@ -0,0 +1,374 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package hana + +import ( + "context" + "database/sql" + "fmt" + "strings" + + _ "github.com/SAP/go-hdb/driver" + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + "github.com/hashicorp/vault/sdk/database/helper/connutil" + "github.com/hashicorp/vault/sdk/database/helper/dbutil" + "github.com/hashicorp/vault/sdk/helper/dbtxn" + "github.com/hashicorp/vault/sdk/helper/template" +) + +const ( + hanaTypeName = "hdb" + + defaultUserNameTemplate = `{{ printf "v_%s_%s_%s_%s" (.DisplayName | truncate 32) (.RoleName | truncate 20) (random 20) (unix_time) | truncate 127 | replace "-" "_" | uppercase }}` +) + +// HANA is an implementation of Database interface +type HANA struct { + *connutil.SQLConnectionProducer + + usernameProducer template.StringTemplate +} + +var _ dbplugin.Database = (*HANA)(nil) + +// New implements builtinplugins.BuiltinFactory +func New() (interface{}, error) { + db := new() + // Wrap the plugin with middleware to sanitize errors + dbType := dbplugin.NewDatabaseErrorSanitizerMiddleware(db, db.secretValues) + + return dbType, nil +} + +func new() *HANA { + connProducer := &connutil.SQLConnectionProducer{} + connProducer.Type = hanaTypeName + + return &HANA{ + SQLConnectionProducer: connProducer, + } +} + +func (h *HANA) secretValues() map[string]string { + return map[string]string{ + h.Password: "[password]", + } +} + +func (h *HANA) Initialize(ctx context.Context, req dbplugin.InitializeRequest) (dbplugin.InitializeResponse, error) { + conf, err := h.Init(ctx, req.Config, req.VerifyConnection) + if err != nil { + return dbplugin.InitializeResponse{}, fmt.Errorf("error initializing db: %w", err) + } + + usernameTemplate, err := strutil.GetString(req.Config, "username_template") + if err != nil { + return dbplugin.InitializeResponse{}, fmt.Errorf("failed to retrieve username_template: %w", err) + } + if usernameTemplate == "" { + usernameTemplate = defaultUserNameTemplate + } + + up, err := template.NewTemplate(template.Template(usernameTemplate)) + if err != nil { + return dbplugin.InitializeResponse{}, fmt.Errorf("unable to initialize username template: %w", err) + } + h.usernameProducer = up + + _, err = h.usernameProducer.Generate(dbplugin.UsernameMetadata{}) + if err != nil { + return dbplugin.InitializeResponse{}, fmt.Errorf("invalid username template: %w", err) + } + + return dbplugin.InitializeResponse{ + Config: conf, + }, nil +} + +// Type returns the TypeName for this backend +func (h *HANA) Type() (string, error) { + return hanaTypeName, nil +} + +func (h *HANA) getConnection(ctx context.Context) (*sql.DB, error) { + db, err := h.Connection(ctx) + if err != nil { + return nil, err + } + + return db.(*sql.DB), nil +} + +// NewUser generates the username/password on the underlying HANA secret backend +// as instructed by the CreationStatement provided. +func (h *HANA) NewUser(ctx context.Context, req dbplugin.NewUserRequest) (response dbplugin.NewUserResponse, err error) { + // Grab the lock + h.Lock() + defer h.Unlock() + + // Get the connection + db, err := h.getConnection(ctx) + if err != nil { + return dbplugin.NewUserResponse{}, err + } + + if len(req.Statements.Commands) == 0 { + return dbplugin.NewUserResponse{}, dbutil.ErrEmptyCreationStatement + } + + // Generate username + username, err := h.usernameProducer.Generate(req.UsernameConfig) + if err != nil { + return dbplugin.NewUserResponse{}, err + } + + // HANA does not allow hyphens in usernames, and highly prefers capital letters + username = strings.ReplaceAll(username, "-", "_") + username = strings.ToUpper(username) + + // If expiration is in the role SQL, HANA will deactivate the user when time is up, + // regardless of whether vault is alive to revoke lease + expirationStr := req.Expiration.UTC().Format("2006-01-02 15:04:05") + + // Start a transaction + tx, err := db.BeginTx(ctx, nil) + if err != nil { + return dbplugin.NewUserResponse{}, err + } + defer tx.Rollback() + + // Execute each query + for _, stmt := range req.Statements.Commands { + for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + m := map[string]string{ + "name": username, + "password": req.Password, + "expiration": expirationStr, + } + + if err := dbtxn.ExecuteTxQueryDirect(ctx, tx, m, query); err != nil { + return dbplugin.NewUserResponse{}, err + } + } + } + + // Commit the transaction + if err := tx.Commit(); err != nil { + return dbplugin.NewUserResponse{}, err + } + + resp := dbplugin.NewUserResponse{ + Username: username, + } + + return resp, nil +} + +// UpdateUser allows for updating the expiration or password of the user mentioned in +// the UpdateUserRequest +func (h *HANA) UpdateUser(ctx context.Context, req dbplugin.UpdateUserRequest) (dbplugin.UpdateUserResponse, error) { + h.Lock() + defer h.Unlock() + + // No change requested + if req.Password == nil && req.Expiration == nil { + return dbplugin.UpdateUserResponse{}, nil + } + + // Get connection + db, err := h.getConnection(ctx) + if err != nil { + return dbplugin.UpdateUserResponse{}, err + } + + // Start a transaction + tx, err := db.BeginTx(ctx, nil) + if err != nil { + return dbplugin.UpdateUserResponse{}, err + } + defer tx.Rollback() + + if req.Password != nil { + err = h.updateUserPassword(ctx, tx, req.Username, req.Password) + if err != nil { + return dbplugin.UpdateUserResponse{}, err + } + } + + if req.Expiration != nil { + err = h.updateUserExpiration(ctx, tx, req.Username, req.Expiration) + if err != nil { + return dbplugin.UpdateUserResponse{}, err + } + } + + // Commit the transaction + if err := tx.Commit(); err != nil { + return dbplugin.UpdateUserResponse{}, err + } + + return dbplugin.UpdateUserResponse{}, nil +} + +func (h *HANA) updateUserPassword(ctx context.Context, tx *sql.Tx, username string, req *dbplugin.ChangePassword) error { + password := req.NewPassword + + if username == "" || password == "" { + return fmt.Errorf("must provide both username and password") + } + + stmts := req.Statements.Commands + if len(stmts) == 0 { + stmts = []string{"ALTER USER {{username}} PASSWORD \"{{password}}\""} + } + + for _, stmt := range stmts { + for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + m := map[string]string{ + "name": username, + "username": username, + "password": password, + } + + if err := dbtxn.ExecuteTxQueryDirect(ctx, tx, m, query); err != nil { + return fmt.Errorf("failed to execute query: %w", err) + } + } + } + + return nil +} + +func (h *HANA) updateUserExpiration(ctx context.Context, tx *sql.Tx, username string, req *dbplugin.ChangeExpiration) error { + // If expiration is in the role SQL, HANA will deactivate the user when time is up, + // regardless of whether vault is alive to revoke lease + expirationStr := req.NewExpiration.String() + + if username == "" || expirationStr == "" { + return fmt.Errorf("must provide both username and expiration") + } + + stmts := req.Statements.Commands + if len(stmts) == 0 { + stmts = []string{"ALTER USER {{username}} VALID UNTIL '{{expiration}}'"} + } + + for _, stmt := range stmts { + for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + m := map[string]string{ + "name": username, + "username": username, + "expiration": expirationStr, + } + + if err := dbtxn.ExecuteTxQueryDirect(ctx, tx, m, query); err != nil { + return fmt.Errorf("failed to execute query: %w", err) + } + } + } + + return nil +} + +// Revoking hana user will deactivate user and try to perform a soft drop +func (h *HANA) DeleteUser(ctx context.Context, req dbplugin.DeleteUserRequest) (dbplugin.DeleteUserResponse, error) { + h.Lock() + defer h.Unlock() + + // default revoke will be a soft drop on user + if len(req.Statements.Commands) == 0 { + return h.revokeUserDefault(ctx, req) + } + + // Get connection + db, err := h.getConnection(ctx) + if err != nil { + return dbplugin.DeleteUserResponse{}, err + } + + // Start a transaction + tx, err := db.BeginTx(ctx, nil) + if err != nil { + return dbplugin.DeleteUserResponse{}, err + } + defer tx.Rollback() + + // Execute each query + for _, stmt := range req.Statements.Commands { + for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + m := map[string]string{ + "name": req.Username, + } + if err := dbtxn.ExecuteTxQueryDirect(ctx, tx, m, query); err != nil { + return dbplugin.DeleteUserResponse{}, err + } + } + } + + return dbplugin.DeleteUserResponse{}, tx.Commit() +} + +func (h *HANA) revokeUserDefault(ctx context.Context, req dbplugin.DeleteUserRequest) (dbplugin.DeleteUserResponse, error) { + // Get connection + db, err := h.getConnection(ctx) + if err != nil { + return dbplugin.DeleteUserResponse{}, err + } + + // Start a transaction + tx, err := db.BeginTx(ctx, nil) + if err != nil { + return dbplugin.DeleteUserResponse{}, err + } + defer tx.Rollback() + + // Disable server login for user + disableStmt, err := tx.PrepareContext(ctx, fmt.Sprintf("ALTER USER %s DEACTIVATE USER NOW", req.Username)) + if err != nil { + return dbplugin.DeleteUserResponse{}, err + } + defer disableStmt.Close() + if _, err := disableStmt.ExecContext(ctx); err != nil { + return dbplugin.DeleteUserResponse{}, err + } + + // Invalidates current sessions and performs soft drop (drop if no dependencies) + // if hard drop is desired, custom revoke statements should be written for role + dropStmt, err := tx.PrepareContext(ctx, fmt.Sprintf("DROP USER %s RESTRICT", req.Username)) + if err != nil { + return dbplugin.DeleteUserResponse{}, err + } + defer dropStmt.Close() + if _, err := dropStmt.ExecContext(ctx); err != nil { + return dbplugin.DeleteUserResponse{}, err + } + + // Commit transaction + if err := tx.Commit(); err != nil { + return dbplugin.DeleteUserResponse{}, err + } + + return dbplugin.DeleteUserResponse{}, nil +} diff --git a/plugins/database/hana/hana_test.go b/plugins/database/hana/hana_test.go new file mode 100644 index 0000000..6a3c1db --- /dev/null +++ b/plugins/database/hana/hana_test.go @@ -0,0 +1,393 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package hana + +import ( + "context" + "database/sql" + "fmt" + "os" + "reflect" + "strings" + "testing" + "time" + + "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + dbtesting "github.com/hashicorp/vault/sdk/database/dbplugin/v5/testing" + "github.com/stretchr/testify/require" +) + +func TestHANA_Initialize(t *testing.T) { + if os.Getenv("HANA_URL") == "" || os.Getenv("VAULT_ACC") != "1" { + t.SkipNow() + } + connURL := os.Getenv("HANA_URL") + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + + expectedConfig := copyConfig(connectionDetails) + + initReq := dbplugin.InitializeRequest{ + Config: connectionDetails, + VerifyConnection: true, + } + + db := new() + initResp := dbtesting.AssertInitialize(t, db, initReq) + defer dbtesting.AssertClose(t, db) + + if !reflect.DeepEqual(initResp.Config, expectedConfig) { + t.Fatalf("Actual config: %#v\nExpected config: %#v", initResp.Config, expectedConfig) + } +} + +// this test will leave a lingering user on the system +func TestHANA_NewUser(t *testing.T) { + if os.Getenv("HANA_URL") == "" || os.Getenv("VAULT_ACC") != "1" { + t.SkipNow() + } + + connURL := os.Getenv("HANA_URL") + + type testCase struct { + commands []string + expectErr bool + assertUser func(t testing.TB, connURL, username, password string) + } + + tests := map[string]testCase{ + "no creation statements": { + commands: []string{}, + expectErr: true, + assertUser: assertCredsDoNotExist, + }, + "with creation statements": { + commands: []string{testHANARole}, + expectErr: false, + assertUser: assertCredsExist, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + + initReq := dbplugin.InitializeRequest{ + Config: connectionDetails, + VerifyConnection: true, + } + + db := new() + dbtesting.AssertInitialize(t, db, initReq) + defer dbtesting.AssertClose(t, db) + + req := dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: "test-test", + RoleName: "test-test", + }, + Statements: dbplugin.Statements{ + Commands: test.commands, + }, + Password: "AG4qagho_dsvZ", + Expiration: time.Now().Add(1 * time.Second), + } + + createResp, err := db.NewUser(context.Background(), req) + if test.expectErr && err == nil { + t.Fatalf("err expected, received nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + + test.assertUser(t, connURL, createResp.Username, req.Password) + }) + } +} + +func TestHANA_UpdateUser(t *testing.T) { + if os.Getenv("HANA_URL") == "" || os.Getenv("VAULT_ACC") != "1" { + t.SkipNow() + } + connURL := os.Getenv("HANA_URL") + + type testCase struct { + commands []string + expectErrOnLogin bool + expectedErrMsg string + } + + tests := map[string]testCase{ + "no update statements": { + commands: []string{}, + expectErrOnLogin: true, + expectedErrMsg: "user is forced to change password", + }, + "with custom update statements": { + commands: []string{testHANAUpdate}, + expectErrOnLogin: false, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + + initReq := dbplugin.InitializeRequest{ + Config: connectionDetails, + VerifyConnection: true, + } + + db := new() + dbtesting.AssertInitialize(t, db, initReq) + defer dbtesting.AssertClose(t, db) + + password := "this_is_Thirty_2_characters_wow_" + newReq := dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: "test-test", + RoleName: "test-test", + }, + Password: password, + Statements: dbplugin.Statements{ + Commands: []string{testHANARole}, + }, + Expiration: time.Now().Add(time.Hour), + } + + userResp := dbtesting.AssertNewUser(t, db, newReq) + assertCredsExist(t, connURL, userResp.Username, password) + + req := dbplugin.UpdateUserRequest{ + Username: userResp.Username, + Password: &dbplugin.ChangePassword{ + NewPassword: "this_is_ALSO_Thirty_2_characters_", + Statements: dbplugin.Statements{ + Commands: test.commands, + }, + }, + } + + dbtesting.AssertUpdateUser(t, db, req) + err := testCredsExist(t, connURL, userResp.Username, req.Password.NewPassword) + if test.expectErrOnLogin { + if err == nil { + t.Fatalf("Able to login with new creds when expecting an issue") + } else if test.expectedErrMsg != "" && !strings.Contains(err.Error(), test.expectedErrMsg) { + t.Fatalf("Expected error message to contain %q, received: %s", test.expectedErrMsg, err) + } + } + if !test.expectErrOnLogin && err != nil { + t.Fatalf("Unable to login: %s", err) + } + }) + } +} + +func TestHANA_DeleteUser(t *testing.T) { + if os.Getenv("HANA_URL") == "" || os.Getenv("VAULT_ACC") != "1" { + t.SkipNow() + } + connURL := os.Getenv("HANA_URL") + + type testCase struct { + commands []string + } + + tests := map[string]testCase{ + "no update statements": { + commands: []string{}, + }, + "with custom update statements": { + commands: []string{testHANADrop}, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + + initReq := dbplugin.InitializeRequest{ + Config: connectionDetails, + VerifyConnection: true, + } + + db := new() + dbtesting.AssertInitialize(t, db, initReq) + defer dbtesting.AssertClose(t, db) + + password := "this_is_Thirty_2_characters_wow_" + + newReq := dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: "test-test", + RoleName: "test-test", + }, + Password: password, + Statements: dbplugin.Statements{ + Commands: []string{testHANARole}, + }, + Expiration: time.Now().Add(time.Hour), + } + + userResp := dbtesting.AssertNewUser(t, db, newReq) + assertCredsExist(t, connURL, userResp.Username, password) + + req := dbplugin.DeleteUserRequest{ + Username: userResp.Username, + Statements: dbplugin.Statements{ + Commands: test.commands, + }, + } + + dbtesting.AssertDeleteUser(t, db, req) + assertCredsDoNotExist(t, connURL, userResp.Username, password) + }) + } +} + +func testCredsExist(t testing.TB, connURL, username, password string) error { + // Log in with the new creds + parts := strings.Split(connURL, "@") + connURL = fmt.Sprintf("hdb://%s:%s@%s", username, password, parts[1]) + db, err := sql.Open("hdb", connURL) + if err != nil { + return err + } + defer db.Close() + return db.Ping() +} + +func assertCredsExist(t testing.TB, connURL, username, password string) { + t.Helper() + err := testCredsExist(t, connURL, username, password) + if err != nil { + t.Fatalf("Unable to log in as %q: %s", username, err) + } +} + +func assertCredsDoNotExist(t testing.TB, connURL, username, password string) { + t.Helper() + err := testCredsExist(t, connURL, username, password) + if err == nil { + t.Fatalf("Able to log in when we should not be able to") + } +} + +func copyConfig(config map[string]interface{}) map[string]interface{} { + newConfig := map[string]interface{}{} + for k, v := range config { + newConfig[k] = v + } + return newConfig +} + +func TestHANA_DefaultUsernameTemplate(t *testing.T) { + if os.Getenv("HANA_URL") == "" || os.Getenv("VAULT_ACC") != "1" { + t.SkipNow() + } + connURL := os.Getenv("HANA_URL") + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + + initReq := dbplugin.InitializeRequest{ + Config: connectionDetails, + VerifyConnection: true, + } + + db := new() + dbtesting.AssertInitialize(t, db, initReq) + + usernameConfig := dbplugin.UsernameMetadata{ + DisplayName: "test", + RoleName: "test", + } + + const password = "SuperSecurePa55w0rd!" + resp := dbtesting.AssertNewUser(t, db, dbplugin.NewUserRequest{ + UsernameConfig: usernameConfig, + Password: password, + Statements: dbplugin.Statements{ + Commands: []string{testHANARole}, + }, + Expiration: time.Now().Add(5 * time.Minute), + }) + username := resp.Username + + if resp.Username == "" { + t.Fatalf("Missing username") + } + + testCredsExist(t, connURL, username, password) + + require.Regexp(t, `^V_TEST_TEST_[A-Z0-9]{20}_[0-9]{10}$`, resp.Username) + + defer dbtesting.AssertClose(t, db) +} + +func TestHANA_CustomUsernameTemplate(t *testing.T) { + if os.Getenv("HANA_URL") == "" || os.Getenv("VAULT_ACC") != "1" { + t.SkipNow() + } + connURL := os.Getenv("HANA_URL") + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + "username_template": "{{.DisplayName}}_{{random 10}}", + } + + initReq := dbplugin.InitializeRequest{ + Config: connectionDetails, + VerifyConnection: true, + } + + db := new() + dbtesting.AssertInitialize(t, db, initReq) + + usernameConfig := dbplugin.UsernameMetadata{ + DisplayName: "test", + RoleName: "test", + } + + const password = "SuperSecurePa55w0rd!" + resp := dbtesting.AssertNewUser(t, db, dbplugin.NewUserRequest{ + UsernameConfig: usernameConfig, + Password: password, + Statements: dbplugin.Statements{ + Commands: []string{testHANARole}, + }, + Expiration: time.Now().Add(5 * time.Minute), + }) + username := resp.Username + + if resp.Username == "" { + t.Fatalf("Missing username") + } + + testCredsExist(t, connURL, username, password) + + require.Regexp(t, `^TEST_[A-Z0-9]{10}$`, resp.Username) + + defer dbtesting.AssertClose(t, db) +} + +const testHANARole = ` +CREATE USER {{name}} PASSWORD "{{password}}" NO FORCE_FIRST_PASSWORD_CHANGE VALID UNTIL '{{expiration}}';` + +const testHANADrop = ` +DROP USER {{name}} CASCADE;` + +const testHANAUpdate = ` +ALTER USER {{name}} PASSWORD "{{password}}" NO FORCE_FIRST_PASSWORD_CHANGE;` diff --git a/plugins/database/influxdb/connection_producer.go b/plugins/database/influxdb/connection_producer.go new file mode 100644 index 0000000..b9f18c5 --- /dev/null +++ b/plugins/database/influxdb/connection_producer.go @@ -0,0 +1,270 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package influxdb + +import ( + "context" + "crypto/tls" + "fmt" + "sync" + "time" + + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/go-secure-stdlib/tlsutil" + dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + "github.com/hashicorp/vault/sdk/database/helper/connutil" + "github.com/hashicorp/vault/sdk/helper/certutil" + influx "github.com/influxdata/influxdb1-client/v2" + "github.com/mitchellh/mapstructure" +) + +// influxdbConnectionProducer implements ConnectionProducer and provides an +// interface for influxdb databases to make connections. +type influxdbConnectionProducer struct { + Host string `json:"host" structs:"host" mapstructure:"host"` + Username string `json:"username" structs:"username" mapstructure:"username"` + Password string `json:"password" structs:"password" mapstructure:"password"` + Port string `json:"port" structs:"port" mapstructure:"port"` // default to 8086 + TLS bool `json:"tls" structs:"tls" mapstructure:"tls"` + InsecureTLS bool `json:"insecure_tls" structs:"insecure_tls" mapstructure:"insecure_tls"` + ConnectTimeoutRaw interface{} `json:"connect_timeout" structs:"connect_timeout" mapstructure:"connect_timeout"` + TLSMinVersion string `json:"tls_min_version" structs:"tls_min_version" mapstructure:"tls_min_version"` + PemBundle string `json:"pem_bundle" structs:"pem_bundle" mapstructure:"pem_bundle"` + PemJSON string `json:"pem_json" structs:"pem_json" mapstructure:"pem_json"` + + connectTimeout time.Duration + certificate string + privateKey string + issuingCA string + rawConfig map[string]interface{} + + Initialized bool + Type string + client influx.Client + sync.Mutex +} + +func (i *influxdbConnectionProducer) Initialize(ctx context.Context, req dbplugin.InitializeRequest) (dbplugin.InitializeResponse, error) { + i.Lock() + defer i.Unlock() + + i.rawConfig = req.Config + + err := mapstructure.WeakDecode(req.Config, i) + if err != nil { + return dbplugin.InitializeResponse{}, err + } + + if i.ConnectTimeoutRaw == nil { + i.ConnectTimeoutRaw = "5s" + } + if i.Port == "" { + i.Port = "8086" + } + i.connectTimeout, err = parseutil.ParseDurationSecond(i.ConnectTimeoutRaw) + if err != nil { + return dbplugin.InitializeResponse{}, fmt.Errorf("invalid connect_timeout: %w", err) + } + + switch { + case len(i.Host) == 0: + return dbplugin.InitializeResponse{}, fmt.Errorf("host cannot be empty") + case len(i.Username) == 0: + return dbplugin.InitializeResponse{}, fmt.Errorf("username cannot be empty") + case len(i.Password) == 0: + return dbplugin.InitializeResponse{}, fmt.Errorf("password cannot be empty") + } + + var certBundle *certutil.CertBundle + var parsedCertBundle *certutil.ParsedCertBundle + switch { + case len(i.PemJSON) != 0: + parsedCertBundle, err = certutil.ParsePKIJSON([]byte(i.PemJSON)) + if err != nil { + return dbplugin.InitializeResponse{}, fmt.Errorf("could not parse given JSON; it must be in the format of the output of the PKI backend certificate issuing command: %w", err) + } + certBundle, err = parsedCertBundle.ToCertBundle() + if err != nil { + return dbplugin.InitializeResponse{}, fmt.Errorf("Error marshaling PEM information: %w", err) + } + i.certificate = certBundle.Certificate + i.privateKey = certBundle.PrivateKey + i.issuingCA = certBundle.IssuingCA + i.TLS = true + + case len(i.PemBundle) != 0: + parsedCertBundle, err = certutil.ParsePEMBundle(i.PemBundle) + if err != nil { + return dbplugin.InitializeResponse{}, fmt.Errorf("Error parsing the given PEM information: %w", err) + } + certBundle, err = parsedCertBundle.ToCertBundle() + if err != nil { + return dbplugin.InitializeResponse{}, fmt.Errorf("Error marshaling PEM information: %w", err) + } + i.certificate = certBundle.Certificate + i.privateKey = certBundle.PrivateKey + i.issuingCA = certBundle.IssuingCA + i.TLS = true + } + + // Set initialized to true at this point since all fields are set, + // and the connection can be established at a later time. + i.Initialized = true + + if req.VerifyConnection { + if _, err := i.Connection(ctx); err != nil { + return dbplugin.InitializeResponse{}, fmt.Errorf("error verifying connection: %w", err) + } + } + + resp := dbplugin.InitializeResponse{ + Config: req.Config, + } + + return resp, nil +} + +func (i *influxdbConnectionProducer) Connection(_ context.Context) (interface{}, error) { + if !i.Initialized { + return nil, connutil.ErrNotInitialized + } + + // If we already have a DB, return it + if i.client != nil { + return i.client, nil + } + + cli, err := i.createClient() + if err != nil { + return nil, err + } + + // Store the session in backend for reuse + i.client = cli + + return cli, nil +} + +func (i *influxdbConnectionProducer) Close() error { + // Grab the write lock + i.Lock() + defer i.Unlock() + + if i.client != nil { + i.client.Close() + } + + i.client = nil + + return nil +} + +func (i *influxdbConnectionProducer) createClient() (influx.Client, error) { + clientConfig := influx.HTTPConfig{ + Addr: fmt.Sprintf("http://%s:%s", i.Host, i.Port), + Username: i.Username, + Password: i.Password, + UserAgent: "vault-influxdb-plugin", + Timeout: i.connectTimeout, + } + + if i.TLS { + tlsConfig := &tls.Config{} + if len(i.certificate) > 0 || len(i.issuingCA) > 0 { + if len(i.certificate) > 0 && len(i.privateKey) == 0 { + return nil, fmt.Errorf("found certificate for TLS authentication but no private key") + } + + certBundle := &certutil.CertBundle{} + if len(i.certificate) > 0 { + certBundle.Certificate = i.certificate + certBundle.PrivateKey = i.privateKey + } + if len(i.issuingCA) > 0 { + certBundle.IssuingCA = i.issuingCA + } + + parsedCertBundle, err := certBundle.ToParsedCertBundle() + if err != nil { + return nil, fmt.Errorf("failed to parse certificate bundle: %w", err) + } + + tlsConfig, err = parsedCertBundle.GetTLSConfig(certutil.TLSClient) + if err != nil || tlsConfig == nil { + return nil, fmt.Errorf("failed to get TLS configuration: tlsConfig:%#v err:%w", tlsConfig, err) + } + } + + tlsConfig.InsecureSkipVerify = i.InsecureTLS + + if i.TLSMinVersion != "" { + var ok bool + tlsConfig.MinVersion, ok = tlsutil.TLSLookup[i.TLSMinVersion] + if !ok { + return nil, fmt.Errorf("invalid 'tls_min_version' in config") + } + } else { + // MinVersion was not being set earlier. Reset it to + // zero to gracefully handle upgrades. + tlsConfig.MinVersion = 0 + } + + clientConfig.TLSConfig = tlsConfig + clientConfig.Addr = fmt.Sprintf("https://%s:%s", i.Host, i.Port) + } + + cli, err := influx.NewHTTPClient(clientConfig) + if err != nil { + return nil, fmt.Errorf("error creating client: %w", err) + } + + // Checking server status + _, _, err = cli.Ping(i.connectTimeout) + if err != nil { + return nil, fmt.Errorf("error checking cluster status: %w", err) + } + + // verifying infos about the connection + isAdmin, err := isUserAdmin(cli, i.Username) + if err != nil { + return nil, fmt.Errorf("error getting if provided username is admin: %w", err) + } + if !isAdmin { + return nil, fmt.Errorf("the provided user is not an admin of the influxDB server") + } + + return cli, nil +} + +func (i *influxdbConnectionProducer) secretValues() map[string]string { + return map[string]string{ + i.Password: "[password]", + i.PemBundle: "[pem_bundle]", + i.PemJSON: "[pem_json]", + } +} + +func isUserAdmin(cli influx.Client, user string) (bool, error) { + q := influx.NewQuery("SHOW USERS", "", "") + response, err := cli.Query(q) + if err != nil { + return false, err + } + if response == nil { + return false, fmt.Errorf("empty response") + } + if response.Error() != nil { + return false, response.Error() + } + for _, res := range response.Results { + for _, serie := range res.Series { + for _, val := range serie.Values { + if val[0].(string) == user && val[1].(bool) { + return true, nil + } + } + } + } + return false, fmt.Errorf("the provided username is not a valid user in the influxdb") +} diff --git a/plugins/database/influxdb/influxdb-database-plugin/main.go b/plugins/database/influxdb/influxdb-database-plugin/main.go new file mode 100644 index 0000000..bfc94f7 --- /dev/null +++ b/plugins/database/influxdb/influxdb-database-plugin/main.go @@ -0,0 +1,27 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "log" + "os" + + "github.com/hashicorp/vault/plugins/database/influxdb" + "github.com/hashicorp/vault/sdk/database/dbplugin/v5" +) + +func main() { + err := Run() + if err != nil { + log.Println(err) + os.Exit(1) + } +} + +// Run instantiates a Influxdb object, and runs the RPC server for the plugin +func Run() error { + dbplugin.ServeMultiplex(influxdb.New) + + return nil +} diff --git a/plugins/database/influxdb/influxdb.go b/plugins/database/influxdb/influxdb.go new file mode 100644 index 0000000..f216319 --- /dev/null +++ b/plugins/database/influxdb/influxdb.go @@ -0,0 +1,266 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package influxdb + +import ( + "context" + "fmt" + "strings" + + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-secure-stdlib/strutil" + dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + "github.com/hashicorp/vault/sdk/database/helper/dbutil" + "github.com/hashicorp/vault/sdk/helper/template" + influx "github.com/influxdata/influxdb1-client/v2" +) + +const ( + defaultUserCreationIFQL = `CREATE USER "{{username}}" WITH PASSWORD '{{password}}';` + defaultUserDeletionIFQL = `DROP USER "{{username}}";` + defaultRootCredentialRotationIFQL = `SET PASSWORD FOR "{{username}}" = '{{password}}';` + influxdbTypeName = "influxdb" + + defaultUserNameTemplate = `{{ printf "v_%s_%s_%s_%s" (.DisplayName | truncate 15) (.RoleName | truncate 15) (random 20) (unix_time) | truncate 100 | replace "-" "_" | lowercase }}` +) + +var _ dbplugin.Database = &Influxdb{} + +// Influxdb is an implementation of Database interface +type Influxdb struct { + *influxdbConnectionProducer + + usernameProducer template.StringTemplate +} + +// New returns a new Cassandra instance +func New() (interface{}, error) { + db := new() + dbType := dbplugin.NewDatabaseErrorSanitizerMiddleware(db, db.secretValues) + + return dbType, nil +} + +func new() *Influxdb { + connProducer := &influxdbConnectionProducer{} + connProducer.Type = influxdbTypeName + + return &Influxdb{ + influxdbConnectionProducer: connProducer, + } +} + +// Type returns the TypeName for this backend +func (i *Influxdb) Type() (string, error) { + return influxdbTypeName, nil +} + +func (i *Influxdb) getConnection(ctx context.Context) (influx.Client, error) { + cli, err := i.Connection(ctx) + if err != nil { + return nil, err + } + + return cli.(influx.Client), nil +} + +func (i *Influxdb) Initialize(ctx context.Context, req dbplugin.InitializeRequest) (resp dbplugin.InitializeResponse, err error) { + usernameTemplate, err := strutil.GetString(req.Config, "username_template") + if err != nil { + return dbplugin.InitializeResponse{}, fmt.Errorf("failed to retrieve username_template: %w", err) + } + if usernameTemplate == "" { + usernameTemplate = defaultUserNameTemplate + } + + up, err := template.NewTemplate(template.Template(usernameTemplate)) + if err != nil { + return dbplugin.InitializeResponse{}, fmt.Errorf("unable to initialize username template: %w", err) + } + i.usernameProducer = up + + _, err = i.usernameProducer.Generate(dbplugin.UsernameMetadata{}) + if err != nil { + return dbplugin.InitializeResponse{}, fmt.Errorf("invalid username template: %w", err) + } + + return i.influxdbConnectionProducer.Initialize(ctx, req) +} + +// NewUser generates the username/password on the underlying Influxdb secret backend as instructed by +// the statements provided. +func (i *Influxdb) NewUser(ctx context.Context, req dbplugin.NewUserRequest) (resp dbplugin.NewUserResponse, err error) { + i.Lock() + defer i.Unlock() + + cli, err := i.getConnection(ctx) + if err != nil { + return dbplugin.NewUserResponse{}, fmt.Errorf("unable to get connection: %w", err) + } + + creationIFQL := req.Statements.Commands + if len(creationIFQL) == 0 { + creationIFQL = []string{defaultUserCreationIFQL} + } + + rollbackIFQL := req.RollbackStatements.Commands + if len(rollbackIFQL) == 0 { + rollbackIFQL = []string{defaultUserDeletionIFQL} + } + + username, err := i.usernameProducer.Generate(req.UsernameConfig) + if err != nil { + return dbplugin.NewUserResponse{}, err + } + + for _, stmt := range creationIFQL { + for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + m := map[string]string{ + "username": username, + "password": req.Password, + } + qry := influx.NewQuery(dbutil.QueryHelper(query, m), "", "") + response, err := cli.Query(qry) + // err can be nil with response.Error() being not nil, so both need to be handled + merr := multierror.Append(err, response.Error()) + if merr.ErrorOrNil() != nil { + // Attempt rollback only when the response has an error + if response != nil && response.Error() != nil { + attemptRollback(cli, username, rollbackIFQL) + } + + return dbplugin.NewUserResponse{}, fmt.Errorf("failed to run query in InfluxDB: %w", merr) + } + } + } + resp = dbplugin.NewUserResponse{ + Username: username, + } + return resp, nil +} + +// attemptRollback will attempt to roll back user creation if an error occurs in +// CreateUser +func attemptRollback(cli influx.Client, username string, rollbackStatements []string) error { + for _, stmt := range rollbackStatements { + for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") { + query = strings.TrimSpace(query) + + if len(query) == 0 { + continue + } + q := influx.NewQuery(dbutil.QueryHelper(query, map[string]string{ + "username": username, + }), "", "") + + response, err := cli.Query(q) + // err can be nil with response.Error() being not nil, so both need to be handled + merr := multierror.Append(err, response.Error()) + if merr.ErrorOrNil() != nil { + return merr + } + } + } + return nil +} + +func (i *Influxdb) DeleteUser(ctx context.Context, req dbplugin.DeleteUserRequest) (dbplugin.DeleteUserResponse, error) { + i.Lock() + defer i.Unlock() + + cli, err := i.getConnection(ctx) + if err != nil { + return dbplugin.DeleteUserResponse{}, fmt.Errorf("unable to get connection: %w", err) + } + + revocationIFQL := req.Statements.Commands + if len(revocationIFQL) == 0 { + revocationIFQL = []string{defaultUserDeletionIFQL} + } + + var result *multierror.Error + for _, stmt := range revocationIFQL { + for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + m := map[string]string{ + "username": req.Username, + } + q := influx.NewQuery(dbutil.QueryHelper(query, m), "", "") + response, err := cli.Query(q) + result = multierror.Append(result, err) + if response != nil { + result = multierror.Append(result, response.Error()) + } + } + } + if result.ErrorOrNil() != nil { + return dbplugin.DeleteUserResponse{}, fmt.Errorf("failed to delete user cleanly: %w", result.ErrorOrNil()) + } + return dbplugin.DeleteUserResponse{}, nil +} + +func (i *Influxdb) UpdateUser(ctx context.Context, req dbplugin.UpdateUserRequest) (dbplugin.UpdateUserResponse, error) { + if req.Password == nil && req.Expiration == nil { + return dbplugin.UpdateUserResponse{}, fmt.Errorf("no changes requested") + } + + i.Lock() + defer i.Unlock() + + if req.Password != nil { + err := i.changeUserPassword(ctx, req.Username, req.Password) + if err != nil { + return dbplugin.UpdateUserResponse{}, fmt.Errorf("failed to change %q password: %w", req.Username, err) + } + } + // Expiration is a no-op + return dbplugin.UpdateUserResponse{}, nil +} + +func (i *Influxdb) changeUserPassword(ctx context.Context, username string, changePassword *dbplugin.ChangePassword) error { + cli, err := i.getConnection(ctx) + if err != nil { + return fmt.Errorf("unable to get connection: %w", err) + } + + rotateIFQL := changePassword.Statements.Commands + if len(rotateIFQL) == 0 { + rotateIFQL = []string{defaultRootCredentialRotationIFQL} + } + + var result *multierror.Error + for _, stmt := range rotateIFQL { + for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + m := map[string]string{ + "username": username, + "password": changePassword.NewPassword, + } + q := influx.NewQuery(dbutil.QueryHelper(query, m), "", "") + response, err := cli.Query(q) + result = multierror.Append(result, err) + if response != nil { + result = multierror.Append(result, response.Error()) + } + } + } + + err = result.ErrorOrNil() + if err != nil { + return fmt.Errorf("failed to execute rotation queries: %w", err) + } + + return nil +} diff --git a/plugins/database/influxdb/influxdb_test.go b/plugins/database/influxdb/influxdb_test.go new file mode 100644 index 0000000..a7a8258 --- /dev/null +++ b/plugins/database/influxdb/influxdb_test.go @@ -0,0 +1,484 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package influxdb + +import ( + "context" + "fmt" + "net/url" + "os" + "reflect" + "strconv" + "strings" + "testing" + "time" + + "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + dbtesting "github.com/hashicorp/vault/sdk/database/dbplugin/v5/testing" + "github.com/hashicorp/vault/sdk/helper/docker" + influx "github.com/influxdata/influxdb1-client/v2" + "github.com/stretchr/testify/require" +) + +const createUserStatements = `CREATE USER "{{username}}" WITH PASSWORD '{{password}}';GRANT ALL ON "vault" TO "{{username}}";` + +type Config struct { + docker.ServiceURL + Username string + Password string +} + +var _ docker.ServiceConfig = &Config{} + +func (c *Config) apiConfig() influx.HTTPConfig { + return influx.HTTPConfig{ + Addr: c.URL().String(), + Username: c.Username, + Password: c.Password, + } +} + +func (c *Config) connectionParams() map[string]interface{} { + pieces := strings.Split(c.Address(), ":") + port, _ := strconv.Atoi(pieces[1]) + return map[string]interface{}{ + "host": pieces[0], + "port": port, + "username": c.Username, + "password": c.Password, + } +} + +func prepareInfluxdbTestContainer(t *testing.T) (func(), *Config) { + c := &Config{ + Username: "influx-root", + Password: "influx-root", + } + if host := os.Getenv("INFLUXDB_HOST"); host != "" { + c.ServiceURL = *docker.NewServiceURL(url.URL{Scheme: "http", Host: host}) + return func() {}, c + } + + runner, err := docker.NewServiceRunner(docker.RunOptions{ + ImageRepo: "docker.mirror.hashicorp.services/influxdb", + ContainerName: "influxdb", + ImageTag: "1.8-alpine", + Env: []string{ + "INFLUXDB_DB=vault", + "INFLUXDB_ADMIN_USER=" + c.Username, + "INFLUXDB_ADMIN_PASSWORD=" + c.Password, + "INFLUXDB_HTTP_AUTH_ENABLED=true", + }, + Ports: []string{"8086/tcp"}, + }) + if err != nil { + t.Fatalf("Could not start docker InfluxDB: %s", err) + } + svc, err := runner.StartService(context.Background(), func(ctx context.Context, host string, port int) (docker.ServiceConfig, error) { + c.ServiceURL = *docker.NewServiceURL(url.URL{ + Scheme: "http", + Host: fmt.Sprintf("%s:%d", host, port), + }) + cli, err := influx.NewHTTPClient(c.apiConfig()) + if err != nil { + return nil, fmt.Errorf("error creating InfluxDB client: %w", err) + } + defer cli.Close() + _, _, err = cli.Ping(1) + if err != nil { + return nil, fmt.Errorf("error checking cluster status: %w", err) + } + + return c, nil + }) + if err != nil { + t.Fatalf("Could not start docker InfluxDB: %s", err) + } + + return svc.Cleanup, svc.Config.(*Config) +} + +func TestInfluxdb_Initialize(t *testing.T) { + cleanup, config := prepareInfluxdbTestContainer(t) + defer cleanup() + + type testCase struct { + req dbplugin.InitializeRequest + expectedResponse dbplugin.InitializeResponse + expectErr bool + expectInitialized bool + } + + tests := map[string]testCase{ + "port is an int": { + req: dbplugin.InitializeRequest{ + Config: makeConfig(config.connectionParams()), + VerifyConnection: true, + }, + expectedResponse: dbplugin.InitializeResponse{ + Config: config.connectionParams(), + }, + expectErr: false, + expectInitialized: true, + }, + "port is a string": { + req: dbplugin.InitializeRequest{ + Config: makeConfig(config.connectionParams(), "port", strconv.Itoa(config.connectionParams()["port"].(int))), + VerifyConnection: true, + }, + expectedResponse: dbplugin.InitializeResponse{ + Config: makeConfig(config.connectionParams(), "port", strconv.Itoa(config.connectionParams()["port"].(int))), + }, + expectErr: false, + expectInitialized: true, + }, + "missing config": { + req: dbplugin.InitializeRequest{ + Config: nil, + VerifyConnection: true, + }, + expectedResponse: dbplugin.InitializeResponse{}, + expectErr: true, + expectInitialized: false, + }, + "missing host": { + req: dbplugin.InitializeRequest{ + Config: makeConfig(config.connectionParams(), "host", ""), + VerifyConnection: true, + }, + expectedResponse: dbplugin.InitializeResponse{}, + expectErr: true, + expectInitialized: false, + }, + "missing username": { + req: dbplugin.InitializeRequest{ + Config: makeConfig(config.connectionParams(), "username", ""), + VerifyConnection: true, + }, + expectedResponse: dbplugin.InitializeResponse{}, + expectErr: true, + expectInitialized: false, + }, + "missing password": { + req: dbplugin.InitializeRequest{ + Config: makeConfig(config.connectionParams(), "password", ""), + VerifyConnection: true, + }, + expectedResponse: dbplugin.InitializeResponse{}, + expectErr: true, + expectInitialized: false, + }, + "failed to validate connection": { + req: dbplugin.InitializeRequest{ + // Host exists, but isn't a running instance + Config: makeConfig(config.connectionParams(), "host", "foobar://bad_connection"), + VerifyConnection: true, + }, + expectedResponse: dbplugin.InitializeResponse{}, + expectErr: true, + expectInitialized: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + db := new() + defer dbtesting.AssertClose(t, db) + + resp, err := db.Initialize(context.Background(), test.req) + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + + if !reflect.DeepEqual(resp, test.expectedResponse) { + t.Fatalf("Actual response: %#v\nExpected response: %#v", resp, test.expectedResponse) + } + + if test.expectInitialized && !db.Initialized { + t.Fatalf("Database should be initialized but wasn't") + } else if !test.expectInitialized && db.Initialized { + t.Fatalf("Database was initiailized when it shouldn't") + } + }) + } +} + +func makeConfig(rootConfig map[string]interface{}, keyValues ...interface{}) map[string]interface{} { + if len(keyValues)%2 != 0 { + panic("makeConfig must be provided with key and value pairs") + } + + // Make a copy of the map so there isn't a chance of test bleedover between maps + config := make(map[string]interface{}, len(rootConfig)+(len(keyValues)/2)) + for k, v := range rootConfig { + config[k] = v + } + for i := 0; i < len(keyValues); i += 2 { + k := keyValues[i].(string) // Will panic if the key field isn't a string and that's fine in a test + v := keyValues[i+1] + config[k] = v + } + return config +} + +func TestInfluxdb_CreateUser_DefaultUsernameTemplate(t *testing.T) { + cleanup, config := prepareInfluxdbTestContainer(t) + defer cleanup() + + db := new() + req := dbplugin.InitializeRequest{ + Config: config.connectionParams(), + VerifyConnection: true, + } + dbtesting.AssertInitialize(t, db, req) + + password := "nuozxby98523u89bdfnkjl" + newUserReq := dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: "token", + RoleName: "mylongrolenamewithmanycharacters", + }, + Statements: dbplugin.Statements{ + Commands: []string{createUserStatements}, + }, + Password: password, + Expiration: time.Now().Add(1 * time.Minute), + } + resp := dbtesting.AssertNewUser(t, db, newUserReq) + + if resp.Username == "" { + t.Fatalf("Missing username") + } + + assertCredsExist(t, config.URL().String(), resp.Username, password) + + require.Regexp(t, `^v_token_mylongrolenamew_[a-z0-9]{20}_[0-9]{10}$`, resp.Username) +} + +func TestInfluxdb_CreateUser_CustomUsernameTemplate(t *testing.T) { + cleanup, config := prepareInfluxdbTestContainer(t) + defer cleanup() + + db := new() + + conf := config.connectionParams() + conf["username_template"] = "{{.DisplayName}}_{{random 10}}" + + req := dbplugin.InitializeRequest{ + Config: conf, + VerifyConnection: true, + } + dbtesting.AssertInitialize(t, db, req) + + password := "nuozxby98523u89bdfnkjl" + newUserReq := dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: "token", + RoleName: "mylongrolenamewithmanycharacters", + }, + Statements: dbplugin.Statements{ + Commands: []string{createUserStatements}, + }, + Password: password, + Expiration: time.Now().Add(1 * time.Minute), + } + resp := dbtesting.AssertNewUser(t, db, newUserReq) + + if resp.Username == "" { + t.Fatalf("Missing username") + } + + assertCredsExist(t, config.URL().String(), resp.Username, password) + + require.Regexp(t, `^token_[a-zA-Z0-9]{10}$`, resp.Username) +} + +func TestUpdateUser_expiration(t *testing.T) { + // This test should end up with a no-op since the expiration doesn't do anything in Influx + + cleanup, config := prepareInfluxdbTestContainer(t) + defer cleanup() + + db := new() + req := dbplugin.InitializeRequest{ + Config: config.connectionParams(), + VerifyConnection: true, + } + dbtesting.AssertInitialize(t, db, req) + + password := "nuozxby98523u89bdfnkjl" + newUserReq := dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: "test", + RoleName: "test", + }, + Statements: dbplugin.Statements{ + Commands: []string{createUserStatements}, + }, + Password: password, + Expiration: time.Now().Add(1 * time.Minute), + } + newUserResp := dbtesting.AssertNewUser(t, db, newUserReq) + + assertCredsExist(t, config.URL().String(), newUserResp.Username, password) + + renewReq := dbplugin.UpdateUserRequest{ + Username: newUserResp.Username, + Expiration: &dbplugin.ChangeExpiration{ + NewExpiration: time.Now().Add(5 * time.Minute), + }, + } + dbtesting.AssertUpdateUser(t, db, renewReq) + + // Make sure the user hasn't changed + assertCredsExist(t, config.URL().String(), newUserResp.Username, password) +} + +func TestUpdateUser_password(t *testing.T) { + cleanup, config := prepareInfluxdbTestContainer(t) + defer cleanup() + + db := new() + req := dbplugin.InitializeRequest{ + Config: config.connectionParams(), + VerifyConnection: true, + } + dbtesting.AssertInitialize(t, db, req) + + initialPassword := "nuozxby98523u89bdfnkjl" + newUserReq := dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: "test", + RoleName: "test", + }, + Statements: dbplugin.Statements{ + Commands: []string{createUserStatements}, + }, + Password: initialPassword, + Expiration: time.Now().Add(1 * time.Minute), + } + newUserResp := dbtesting.AssertNewUser(t, db, newUserReq) + + assertCredsExist(t, config.URL().String(), newUserResp.Username, initialPassword) + + newPassword := "y89qgmbzadiygry8uazodijnb" + newPasswordReq := dbplugin.UpdateUserRequest{ + Username: newUserResp.Username, + Password: &dbplugin.ChangePassword{ + NewPassword: newPassword, + }, + } + dbtesting.AssertUpdateUser(t, db, newPasswordReq) + + assertCredsDoNotExist(t, config.URL().String(), newUserResp.Username, initialPassword) + assertCredsExist(t, config.URL().String(), newUserResp.Username, newPassword) +} + +// TestInfluxdb_RevokeDeletedUser tests attempting to revoke a user that was +// deleted externally. Guards against a panic, see +// https://github.com/hashicorp/vault/issues/6734 +// Updated to attempt to delete a user that never existed to replicate a similar scenario since +// the cleanup function from `prepareInfluxdbTestContainer` does not do anything if using an +// external InfluxDB instance rather than spinning one up for the test. +func TestInfluxdb_RevokeDeletedUser(t *testing.T) { + cleanup, config := prepareInfluxdbTestContainer(t) + defer cleanup() + + db := new() + req := dbplugin.InitializeRequest{ + Config: config.connectionParams(), + VerifyConnection: true, + } + dbtesting.AssertInitialize(t, db, req) + + // attempt to revoke a user that does not exist + delReq := dbplugin.DeleteUserRequest{ + Username: "someuser", + } + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + _, err := db.DeleteUser(ctx, delReq) + if err == nil { + t.Fatalf("Expected err, got nil") + } +} + +func TestInfluxdb_RevokeUser(t *testing.T) { + cleanup, config := prepareInfluxdbTestContainer(t) + defer cleanup() + + db := new() + req := dbplugin.InitializeRequest{ + Config: config.connectionParams(), + VerifyConnection: true, + } + dbtesting.AssertInitialize(t, db, req) + + initialPassword := "nuozxby98523u89bdfnkjl" + newUserReq := dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: "test", + RoleName: "test", + }, + Statements: dbplugin.Statements{ + Commands: []string{createUserStatements}, + }, + Password: initialPassword, + Expiration: time.Now().Add(1 * time.Minute), + } + newUserResp := dbtesting.AssertNewUser(t, db, newUserReq) + + assertCredsExist(t, config.URL().String(), newUserResp.Username, initialPassword) + + delReq := dbplugin.DeleteUserRequest{ + Username: newUserResp.Username, + } + dbtesting.AssertDeleteUser(t, db, delReq) + assertCredsDoNotExist(t, config.URL().String(), newUserResp.Username, initialPassword) +} + +func assertCredsExist(t testing.TB, address, username, password string) { + t.Helper() + err := testCredsExist(address, username, password) + if err != nil { + t.Fatalf("Could not log in as %q", username) + } +} + +func assertCredsDoNotExist(t testing.TB, address, username, password string) { + t.Helper() + err := testCredsExist(address, username, password) + if err == nil { + t.Fatalf("Able to log in as %q when it shouldn't", username) + } +} + +func testCredsExist(address, username, password string) error { + conf := influx.HTTPConfig{ + Addr: address, + Username: username, + Password: password, + } + cli, err := influx.NewHTTPClient(conf) + if err != nil { + return fmt.Errorf("Error creating InfluxDB Client: %w", err) + } + defer cli.Close() + _, _, err = cli.Ping(1) + if err != nil { + return fmt.Errorf("error checking server ping: %w", err) + } + q := influx.NewQuery("SHOW SERIES ON vault", "", "") + response, err := cli.Query(q) + if err != nil { + return fmt.Errorf("error querying influxdb server: %w", err) + } + if response != nil && response.Error() != nil { + return fmt.Errorf("error using the correct influx database: %w", response.Error()) + } + return nil +} diff --git a/plugins/database/mongodb/README.md b/plugins/database/mongodb/README.md new file mode 100644 index 0000000..cd252ca --- /dev/null +++ b/plugins/database/mongodb/README.md @@ -0,0 +1,12 @@ +# MongoDB Tests +The test `TestInit_clientTLS` cannot be run within CircleCI in its current form. This is because [it's not +possible to use volume mounting with the docker executor](https://support.circleci.com/hc/en-us/articles/360007324514-How-can-I-mount-volumes-to-docker-containers-). + +Because of this, the test is skipped. Running this locally shouldn't present any issues as long as you have +docker set up to allow volume mounting from this directory: + +```sh +go test -v -run Init_clientTLS +``` + +This may be able to be fixed if we mess with the entrypoint or the command arguments. diff --git a/plugins/database/mongodb/cert_helpers_test.go b/plugins/database/mongodb/cert_helpers_test.go new file mode 100644 index 0000000..9f9388b --- /dev/null +++ b/plugins/database/mongodb/cert_helpers_test.go @@ -0,0 +1,231 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package mongodb + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "math/big" + "strings" + "testing" + "time" +) + +type certBuilder struct { + tmpl *x509.Certificate + parentTmpl *x509.Certificate + + selfSign bool + parentKey *rsa.PrivateKey + + isCA bool +} + +type certOpt func(*certBuilder) error + +func commonName(cn string) certOpt { + return func(builder *certBuilder) error { + builder.tmpl.Subject.CommonName = cn + return nil + } +} + +func parent(parent certificate) certOpt { + return func(builder *certBuilder) error { + builder.parentKey = parent.privKey.privKey + builder.parentTmpl = parent.template + return nil + } +} + +func isCA(isCA bool) certOpt { + return func(builder *certBuilder) error { + builder.isCA = isCA + return nil + } +} + +func selfSign() certOpt { + return func(builder *certBuilder) error { + builder.selfSign = true + return nil + } +} + +func dns(dns ...string) certOpt { + return func(builder *certBuilder) error { + builder.tmpl.DNSNames = dns + return nil + } +} + +func newCert(t *testing.T, opts ...certOpt) (cert certificate) { + t.Helper() + + builder := certBuilder{ + tmpl: &x509.Certificate{ + SerialNumber: makeSerial(t), + Subject: pkix.Name{ + CommonName: makeCommonName(), + }, + NotBefore: time.Now().Add(-1 * time.Hour), + NotAfter: time.Now().Add(1 * time.Hour), + IsCA: false, + KeyUsage: x509.KeyUsageDigitalSignature | + x509.KeyUsageKeyEncipherment | + x509.KeyUsageKeyAgreement, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + }, + } + + for _, opt := range opts { + err := opt(&builder) + if err != nil { + t.Fatalf("Failed to set up certificate builder: %s", err) + } + } + + key := newPrivateKey(t) + + builder.tmpl.SubjectKeyId = getSubjKeyID(t, key.privKey) + + tmpl := builder.tmpl + parent := builder.parentTmpl + publicKey := key.privKey.Public() + signingKey := builder.parentKey + + if builder.selfSign { + parent = tmpl + signingKey = key.privKey + } + + if builder.isCA { + tmpl.IsCA = true + tmpl.KeyUsage = x509.KeyUsageCertSign | x509.KeyUsageCRLSign + tmpl.ExtKeyUsage = nil + } else { + tmpl.KeyUsage = x509.KeyUsageDigitalSignature | + x509.KeyUsageKeyEncipherment | + x509.KeyUsageKeyAgreement + tmpl.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth} + } + + certBytes, err := x509.CreateCertificate(rand.Reader, tmpl, parent, publicKey, signingKey) + if err != nil { + t.Fatalf("Unable to generate certificate: %s", err) + } + certPem := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: certBytes, + }) + + tlsCert, err := tls.X509KeyPair(certPem, key.pem) + if err != nil { + t.Fatalf("Unable to parse X509 key pair: %s", err) + } + + return certificate{ + template: tmpl, + privKey: key, + tlsCert: tlsCert, + rawCert: certBytes, + pem: certPem, + isCA: builder.isCA, + } +} + +// //////////////////////////////////////////////////////////////////////////// +// Private Key +// //////////////////////////////////////////////////////////////////////////// +type keyWrapper struct { + privKey *rsa.PrivateKey + pem []byte +} + +func newPrivateKey(t *testing.T) (key keyWrapper) { + t.Helper() + + privKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + t.Fatalf("Unable to generate key for cert: %s", err) + } + + privKeyPem := pem.EncodeToMemory( + &pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(privKey), + }, + ) + + key = keyWrapper{ + privKey: privKey, + pem: privKeyPem, + } + + return key +} + +// //////////////////////////////////////////////////////////////////////////// +// Certificate +// //////////////////////////////////////////////////////////////////////////// +type certificate struct { + privKey keyWrapper + template *x509.Certificate + tlsCert tls.Certificate + rawCert []byte + pem []byte + isCA bool +} + +func (cert certificate) CombinedPEM() []byte { + if cert.isCA { + return cert.pem + } + return bytes.Join([][]byte{cert.privKey.pem, cert.pem}, []byte{'\n'}) +} + +// //////////////////////////////////////////////////////////////////////////// +// Helpers +// //////////////////////////////////////////////////////////////////////////// +func makeSerial(t *testing.T) *big.Int { + t.Helper() + + v := &big.Int{} + serialNumberLimit := v.Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + t.Fatalf("Unable to generate serial number: %s", err) + } + return serialNumber +} + +// Pulled from sdk/helper/certutil & slightly modified for test usage +func getSubjKeyID(t *testing.T, privateKey crypto.Signer) []byte { + t.Helper() + + if privateKey == nil { + t.Fatalf("passed-in private key is nil") + } + + marshaledKey, err := x509.MarshalPKIXPublicKey(privateKey.Public()) + if err != nil { + t.Fatalf("error marshalling public key: %s", err) + } + + subjKeyID := sha1.Sum(marshaledKey) + + return subjKeyID[:] +} + +func makeCommonName() (cn string) { + return strings.ReplaceAll(time.Now().Format("20060102T150405.000"), ".", "") +} diff --git a/plugins/database/mongodb/connection_producer.go b/plugins/database/mongodb/connection_producer.go new file mode 100644 index 0000000..4686c3b --- /dev/null +++ b/plugins/database/mongodb/connection_producer.go @@ -0,0 +1,292 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package mongodb + +import ( + "context" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "encoding/json" + "fmt" + "sync" + "time" + + "github.com/hashicorp/vault/sdk/database/helper/connutil" + "github.com/hashicorp/vault/sdk/database/helper/dbutil" + "github.com/mitchellh/mapstructure" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" + "go.mongodb.org/mongo-driver/mongo/readpref" + "go.mongodb.org/mongo-driver/mongo/writeconcern" +) + +// mongoDBConnectionProducer implements ConnectionProducer and provides an +// interface for databases to make connections. +type mongoDBConnectionProducer struct { + ConnectionURL string `json:"connection_url" structs:"connection_url" mapstructure:"connection_url"` + WriteConcern string `json:"write_concern" structs:"write_concern" mapstructure:"write_concern"` + + Username string `json:"username" structs:"username" mapstructure:"username"` + Password string `json:"password" structs:"password" mapstructure:"password"` + + TLSCertificateKeyData []byte `json:"tls_certificate_key" structs:"-" mapstructure:"tls_certificate_key"` + TLSCAData []byte `json:"tls_ca" structs:"-" mapstructure:"tls_ca"` + + SocketTimeout time.Duration `json:"socket_timeout" structs:"-" mapstructure:"socket_timeout"` + ConnectTimeout time.Duration `json:"connect_timeout" structs:"-" mapstructure:"connect_timeout"` + ServerSelectionTimeout time.Duration `json:"server_selection_timeout" structs:"-" mapstructure:"server_selection_timeout"` + + Initialized bool + RawConfig map[string]interface{} + Type string + clientOptions *options.ClientOptions + client *mongo.Client + sync.Mutex +} + +// writeConcern defines the write concern options +type writeConcern struct { + W int // Min # of servers to ack before success + WMode string // Write mode for MongoDB 2.0+ (e.g. "majority") + WTimeout int // Milliseconds to wait for W before timing out + FSync bool // DEPRECATED: Is now handled by J. See: https://jira.mongodb.org/browse/CXX-910 + J bool // Sync via the journal if present +} + +func (c *mongoDBConnectionProducer) loadConfig(cfg map[string]interface{}) error { + err := mapstructure.WeakDecode(cfg, c) + if err != nil { + return err + } + + if len(c.ConnectionURL) == 0 { + return fmt.Errorf("connection_url cannot be empty") + } + + if c.SocketTimeout < 0 { + return fmt.Errorf("socket_timeout must be >= 0") + } + if c.ConnectTimeout < 0 { + return fmt.Errorf("connect_timeout must be >= 0") + } + if c.ServerSelectionTimeout < 0 { + return fmt.Errorf("server_selection_timeout must be >= 0") + } + + opts, err := c.makeClientOpts() + if err != nil { + return err + } + + c.clientOptions = opts + + return nil +} + +// Connection creates or returns an existing a database connection. If the session fails +// on a ping check, the session will be closed and then re-created. +// This method does locks the mutex on its own. +func (c *mongoDBConnectionProducer) Connection(ctx context.Context) (*mongo.Client, error) { + if !c.Initialized { + return nil, connutil.ErrNotInitialized + } + + c.Mutex.Lock() + defer c.Mutex.Unlock() + + if c.client != nil { + if err := c.client.Ping(ctx, readpref.Primary()); err == nil { + return c.client, nil + } + // Ignore error on purpose since we want to re-create a session + _ = c.client.Disconnect(ctx) + } + + client, err := c.createClient(ctx) + if err != nil { + return nil, err + } + c.client = client + return c.client, nil +} + +func (c *mongoDBConnectionProducer) createClient(ctx context.Context) (client *mongo.Client, err error) { + if !c.Initialized { + return nil, fmt.Errorf("failed to create client: connection producer is not initialized") + } + if c.clientOptions == nil { + return nil, fmt.Errorf("missing client options") + } + client, err = mongo.Connect(ctx, options.MergeClientOptions(options.Client().ApplyURI(c.getConnectionURL()), c.clientOptions)) + if err != nil { + return nil, err + } + return client, nil +} + +// Close terminates the database connection. +func (c *mongoDBConnectionProducer) Close() error { + c.Lock() + defer c.Unlock() + + if c.client != nil { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + if err := c.client.Disconnect(ctx); err != nil { + return err + } + } + + c.client = nil + + return nil +} + +func (c *mongoDBConnectionProducer) secretValues() map[string]string { + return map[string]string{ + c.Password: "[password]", + } +} + +func (c *mongoDBConnectionProducer) getConnectionURL() (connURL string) { + connURL = dbutil.QueryHelper(c.ConnectionURL, map[string]string{ + "username": c.Username, + "password": c.Password, + }) + return connURL +} + +func (c *mongoDBConnectionProducer) makeClientOpts() (*options.ClientOptions, error) { + writeOpts, err := c.getWriteConcern() + if err != nil { + return nil, err + } + + authOpts, err := c.getTLSAuth() + if err != nil { + return nil, err + } + + timeoutOpts, err := c.timeoutOpts() + if err != nil { + return nil, err + } + + opts := options.MergeClientOptions(writeOpts, authOpts, timeoutOpts) + return opts, nil +} + +func (c *mongoDBConnectionProducer) getWriteConcern() (opts *options.ClientOptions, err error) { + if c.WriteConcern == "" { + return nil, nil + } + + input := c.WriteConcern + + // Try to base64 decode the input. If successful, consider the decoded + // value as input. + inputBytes, err := base64.StdEncoding.DecodeString(input) + if err == nil { + input = string(inputBytes) + } + + concern := &writeConcern{} + err = json.Unmarshal([]byte(input), concern) + if err != nil { + return nil, fmt.Errorf("error unmarshalling write_concern: %w", err) + } + + // Translate write concern to mongo options + var w writeconcern.Option + switch { + case concern.W != 0: + w = writeconcern.W(concern.W) + case concern.WMode != "": + w = writeconcern.WTagSet(concern.WMode) + default: + w = writeconcern.WMajority() + } + + var j writeconcern.Option + switch { + case concern.FSync: + j = writeconcern.J(concern.FSync) + case concern.J: + j = writeconcern.J(concern.J) + default: + j = writeconcern.J(false) + } + + writeConcern := writeconcern.New( + w, + j, + writeconcern.WTimeout(time.Duration(concern.WTimeout)*time.Millisecond)) + + opts = options.Client() + opts.SetWriteConcern(writeConcern) + return opts, nil +} + +func (c *mongoDBConnectionProducer) getTLSAuth() (opts *options.ClientOptions, err error) { + if len(c.TLSCAData) == 0 && len(c.TLSCertificateKeyData) == 0 { + return nil, nil + } + + opts = options.Client() + + tlsConfig := &tls.Config{} + + if len(c.TLSCAData) > 0 { + tlsConfig.RootCAs = x509.NewCertPool() + + ok := tlsConfig.RootCAs.AppendCertsFromPEM(c.TLSCAData) + if !ok { + return nil, fmt.Errorf("failed to append CA to client options") + } + } + + if len(c.TLSCertificateKeyData) > 0 { + certificate, err := tls.X509KeyPair(c.TLSCertificateKeyData, c.TLSCertificateKeyData) + if err != nil { + return nil, fmt.Errorf("unable to load tls_certificate_key_data: %w", err) + } + + opts.SetAuth(options.Credential{ + AuthMechanism: "MONGODB-X509", + Username: c.Username, + }) + + tlsConfig.Certificates = append(tlsConfig.Certificates, certificate) + } + + opts.SetTLSConfig(tlsConfig) + return opts, nil +} + +func (c *mongoDBConnectionProducer) timeoutOpts() (opts *options.ClientOptions, err error) { + opts = options.Client() + + if c.SocketTimeout < 0 { + return nil, fmt.Errorf("socket_timeout must be >= 0") + } + + if c.SocketTimeout == 0 { + opts.SetSocketTimeout(1 * time.Minute) + } else { + opts.SetSocketTimeout(c.SocketTimeout) + } + + if c.ConnectTimeout == 0 { + opts.SetConnectTimeout(1 * time.Minute) + } else { + opts.SetConnectTimeout(c.ConnectTimeout) + } + + if c.ServerSelectionTimeout != 0 { + opts.SetServerSelectionTimeout(c.ServerSelectionTimeout) + } + + return opts, nil +} diff --git a/plugins/database/mongodb/connection_producer_test.go b/plugins/database/mongodb/connection_producer_test.go new file mode 100644 index 0000000..2ce3872 --- /dev/null +++ b/plugins/database/mongodb/connection_producer_test.go @@ -0,0 +1,314 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package mongodb + +import ( + "context" + "fmt" + "io/ioutil" + "net/url" + "os" + paths "path" + "path/filepath" + "reflect" + "sort" + "testing" + "time" + + "github.com/hashicorp/vault/helper/testhelpers/certhelpers" + dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + "github.com/ory/dockertest" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" + "go.mongodb.org/mongo-driver/mongo/readpref" +) + +func TestInit_clientTLS(t *testing.T) { + t.Skip("Skipping this test because CircleCI can't mount the files we need without further investigation: " + + "https://support.circleci.com/hc/en-us/articles/360007324514-How-can-I-mount-volumes-to-docker-containers-") + + // Set up temp directory so we can mount it to the docker container + confDir := makeTempDir(t) + defer os.RemoveAll(confDir) + + // Create certificates for Mongo authentication + caCert := certhelpers.NewCert(t, + certhelpers.CommonName("test certificate authority"), + certhelpers.IsCA(true), + certhelpers.SelfSign(), + ) + serverCert := certhelpers.NewCert(t, + certhelpers.CommonName("server"), + certhelpers.DNS("localhost"), + certhelpers.Parent(caCert), + ) + clientCert := certhelpers.NewCert(t, + certhelpers.CommonName("client"), + certhelpers.DNS("client"), + certhelpers.Parent(caCert), + ) + + writeFile(t, paths.Join(confDir, "ca.pem"), caCert.CombinedPEM(), 0o644) + writeFile(t, paths.Join(confDir, "server.pem"), serverCert.CombinedPEM(), 0o644) + writeFile(t, paths.Join(confDir, "client.pem"), clientCert.CombinedPEM(), 0o644) + + // ////////////////////////////////////////////////////// + // Set up Mongo config file + rawConf := ` +net: + tls: + mode: preferTLS + certificateKeyFile: /etc/mongo/server.pem + CAFile: /etc/mongo/ca.pem + allowInvalidHostnames: true` + + writeFile(t, paths.Join(confDir, "mongod.conf"), []byte(rawConf), 0o644) + + // ////////////////////////////////////////////////////// + // Start Mongo container + retURL, cleanup := startMongoWithTLS(t, "latest", confDir) + defer cleanup() + + // ////////////////////////////////////////////////////// + // Set up x509 user + mClient := connect(t, retURL) + + setUpX509User(t, mClient, clientCert) + + // ////////////////////////////////////////////////////// + // Test + mongo := new() + + initReq := dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "connection_url": retURL, + "allowed_roles": "*", + "tls_certificate_key": clientCert.CombinedPEM(), + "tls_ca": caCert.Pem, + }, + VerifyConnection: true, + } + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + _, err := mongo.Initialize(ctx, initReq) + if err != nil { + t.Fatalf("Unable to initialize mongo engine: %s", err) + } + + // Initialization complete. The connection was established, but we need to ensure + // that we're connected as the right user + whoamiCmd := map[string]interface{}{ + "connectionStatus": 1, + } + + client, err := mongo.Connection(ctx) + if err != nil { + t.Fatalf("Unable to make connection to Mongo: %s", err) + } + result := client.Database("test").RunCommand(ctx, whoamiCmd) + if result.Err() != nil { + t.Fatalf("Unable to connect to Mongo: %s", err) + } + + expected := connStatus{ + AuthInfo: authInfo{ + AuthenticatedUsers: []user{ + { + User: fmt.Sprintf("CN=%s", clientCert.Template.Subject.CommonName), + DB: "$external", + }, + }, + AuthenticatedUserRoles: []role{ + { + Role: "readWrite", + DB: "test", + }, + { + Role: "userAdminAnyDatabase", + DB: "admin", + }, + }, + }, + Ok: 1, + } + // Sort the AuthenticatedUserRoles because Mongo doesn't return them in the same order every time + // Thanks Mongo! /tableflip + sort.Sort(expected.AuthInfo.AuthenticatedUserRoles) + + actual := connStatus{} + err = result.Decode(&actual) + if err != nil { + t.Fatalf("Unable to decode connection status: %s", err) + } + + sort.Sort(actual.AuthInfo.AuthenticatedUserRoles) + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("Actual:%#v\nExpected:\n%#v", actual, expected) + } +} + +func makeTempDir(t *testing.T) (confDir string) { + confDir, err := ioutil.TempDir(".", "mongodb-test-data") + if err != nil { + t.Fatalf("Unable to make temp directory: %s", err) + } + // Convert the directory to an absolute path because docker needs it when mounting + confDir, err = filepath.Abs(filepath.Clean(confDir)) + if err != nil { + t.Fatalf("Unable to determine where temp directory is on absolute path: %s", err) + } + return confDir +} + +func startMongoWithTLS(t *testing.T, version string, confDir string) (retURL string, cleanup func()) { + if os.Getenv("MONGODB_URL") != "" { + return os.Getenv("MONGODB_URL"), func() {} + } + + pool, err := dockertest.NewPool("") + if err != nil { + t.Fatalf("Failed to connect to docker: %s", err) + } + pool.MaxWait = 30 * time.Second + + containerName := "mongo-unit-test" + + // Remove previously running container if it is still running because cleanup failed + err = pool.RemoveContainerByName(containerName) + if err != nil { + t.Fatalf("Unable to remove old running containers: %s", err) + } + + runOpts := &dockertest.RunOptions{ + Name: containerName, + Repository: "mongo", + Tag: version, + Cmd: []string{"mongod", "--config", "/etc/mongo/mongod.conf"}, + // Mount the directory from local filesystem into the container + Mounts: []string{ + fmt.Sprintf("%s:/etc/mongo", confDir), + }, + } + + resource, err := pool.RunWithOptions(runOpts) + if err != nil { + t.Fatalf("Could not start local mongo docker container: %s", err) + } + resource.Expire(30) + + cleanup = func() { + err := pool.Purge(resource) + if err != nil { + t.Fatalf("Failed to cleanup local container: %s", err) + } + } + + uri := url.URL{ + Scheme: "mongodb", + Host: fmt.Sprintf("localhost:%s", resource.GetPort("27017/tcp")), + } + retURL = uri.String() + + // exponential backoff-retry + err = pool.Retry(func() error { + var err error + ctx, _ := context.WithTimeout(context.Background(), 1*time.Minute) + client, err := mongo.Connect(ctx, options.Client().ApplyURI(retURL)) + if err = client.Disconnect(ctx); err != nil { + t.Fatal() + } + return client.Ping(ctx, readpref.Primary()) + }) + if err != nil { + cleanup() + t.Fatalf("Could not connect to mongo docker container: %s", err) + } + + return retURL, cleanup +} + +func connect(t *testing.T, uri string) (client *mongo.Client) { + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + + client, err := mongo.Connect(ctx, options.Client().ApplyURI(uri)) + if err != nil { + t.Fatalf("Unable to make connection to Mongo: %s", err) + } + + err = client.Ping(ctx, readpref.Primary()) + if err != nil { + t.Fatalf("Failed to ping Mongo server: %s", err) + } + + return client +} + +func setUpX509User(t *testing.T, client *mongo.Client, cert certhelpers.Certificate) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + username := fmt.Sprintf("CN=%s", cert.Template.Subject.CommonName) + + cmd := &createUserCommand{ + Username: username, + Roles: []interface{}{ + mongodbRole{ + Role: "readWrite", + DB: "test", + }, + mongodbRole{ + Role: "userAdminAnyDatabase", + DB: "admin", + }, + }, + } + + result := client.Database("$external").RunCommand(ctx, cmd) + err := result.Err() + if err != nil { + t.Fatalf("Failed to create x509 user in database: %s", err) + } +} + +type connStatus struct { + AuthInfo authInfo `bson:"authInfo"` + Ok int `bson:"ok"` +} + +type authInfo struct { + AuthenticatedUsers []user `bson:"authenticatedUsers"` + AuthenticatedUserRoles roles `bson:"authenticatedUserRoles"` +} + +type user struct { + User string `bson:"user"` + DB string `bson:"db"` +} + +type role struct { + Role string `bson:"role"` + DB string `bson:"db"` +} + +type roles []role + +func (r roles) Len() int { return len(r) } +func (r roles) Less(i, j int) bool { return r[i].Role < r[j].Role } +func (r roles) Swap(i, j int) { r[i], r[j] = r[j], r[i] } + +// //////////////////////////////////////////////////////////////////////////// +// Writing to file +// //////////////////////////////////////////////////////////////////////////// +func writeFile(t *testing.T, filename string, data []byte, perms os.FileMode) { + t.Helper() + + err := ioutil.WriteFile(filename, data, perms) + if err != nil { + t.Fatalf("Unable to write to file [%s]: %s", filename, err) + } +} diff --git a/plugins/database/mongodb/mongodb-database-plugin/main.go b/plugins/database/mongodb/mongodb-database-plugin/main.go new file mode 100644 index 0000000..fe68659 --- /dev/null +++ b/plugins/database/mongodb/mongodb-database-plugin/main.go @@ -0,0 +1,27 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "log" + "os" + + "github.com/hashicorp/vault/plugins/database/mongodb" + "github.com/hashicorp/vault/sdk/database/dbplugin/v5" +) + +func main() { + err := Run() + if err != nil { + log.Println(err) + os.Exit(1) + } +} + +// Run instantiates a MongoDB object, and runs the RPC server for the plugin +func Run() error { + dbplugin.ServeMultiplex(mongodb.New) + + return nil +} diff --git a/plugins/database/mongodb/mongodb.go b/plugins/database/mongodb/mongodb.go new file mode 100644 index 0000000..a291154 --- /dev/null +++ b/plugins/database/mongodb/mongodb.go @@ -0,0 +1,273 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package mongodb + +import ( + "context" + "encoding/json" + "fmt" + "io" + "strings" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-secure-stdlib/strutil" + dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + "github.com/hashicorp/vault/sdk/database/helper/dbutil" + "github.com/hashicorp/vault/sdk/helper/template" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/readpref" + "go.mongodb.org/mongo-driver/mongo/writeconcern" + "go.mongodb.org/mongo-driver/x/mongo/driver/connstring" +) + +const ( + mongoDBTypeName = "mongodb" + + defaultUserNameTemplate = `{{ printf "v-%s-%s-%s-%s" (.DisplayName | truncate 15) (.RoleName | truncate 15) (random 20) (unix_time) | replace "." "-" | truncate 100 }}` +) + +// MongoDB is an implementation of Database interface +type MongoDB struct { + *mongoDBConnectionProducer + + usernameProducer template.StringTemplate +} + +var _ dbplugin.Database = &MongoDB{} + +// New returns a new MongoDB instance +func New() (interface{}, error) { + db := new() + dbType := dbplugin.NewDatabaseErrorSanitizerMiddleware(db, db.secretValues) + return dbType, nil +} + +func new() *MongoDB { + connProducer := &mongoDBConnectionProducer{ + Type: mongoDBTypeName, + } + + return &MongoDB{ + mongoDBConnectionProducer: connProducer, + } +} + +// Type returns the TypeName for this backend +func (m *MongoDB) Type() (string, error) { + return mongoDBTypeName, nil +} + +func (m *MongoDB) Initialize(ctx context.Context, req dbplugin.InitializeRequest) (dbplugin.InitializeResponse, error) { + m.Lock() + defer m.Unlock() + + m.RawConfig = req.Config + + usernameTemplate, err := strutil.GetString(req.Config, "username_template") + if err != nil { + return dbplugin.InitializeResponse{}, fmt.Errorf("failed to retrieve username_template: %w", err) + } + if usernameTemplate == "" { + usernameTemplate = defaultUserNameTemplate + } + + up, err := template.NewTemplate(template.Template(usernameTemplate)) + if err != nil { + return dbplugin.InitializeResponse{}, fmt.Errorf("unable to initialize username template: %w", err) + } + m.usernameProducer = up + + _, err = m.usernameProducer.Generate(dbplugin.UsernameMetadata{}) + if err != nil { + return dbplugin.InitializeResponse{}, fmt.Errorf("invalid username template: %w", err) + } + + err = m.mongoDBConnectionProducer.loadConfig(req.Config) + if err != nil { + return dbplugin.InitializeResponse{}, err + } + + // Set initialized to true at this point since all fields are set, + // and the connection can be established at a later time. + m.Initialized = true + + if req.VerifyConnection { + client, err := m.mongoDBConnectionProducer.createClient(ctx) + if err != nil { + return dbplugin.InitializeResponse{}, fmt.Errorf("failed to verify connection: %w", err) + } + + err = client.Ping(ctx, readpref.Primary()) + if err != nil { + _ = client.Disconnect(ctx) // Try to prevent any sort of resource leak + return dbplugin.InitializeResponse{}, fmt.Errorf("failed to verify connection: %w", err) + } + m.mongoDBConnectionProducer.client = client + } + + resp := dbplugin.InitializeResponse{ + Config: req.Config, + } + return resp, nil +} + +func (m *MongoDB) NewUser(ctx context.Context, req dbplugin.NewUserRequest) (dbplugin.NewUserResponse, error) { + if len(req.Statements.Commands) == 0 { + return dbplugin.NewUserResponse{}, dbutil.ErrEmptyCreationStatement + } + + username, err := m.usernameProducer.Generate(req.UsernameConfig) + if err != nil { + return dbplugin.NewUserResponse{}, err + } + + // Unmarshal statements.CreationStatements into mongodbRoles + var mongoCS mongoDBStatement + err = json.Unmarshal([]byte(req.Statements.Commands[0]), &mongoCS) + if err != nil { + return dbplugin.NewUserResponse{}, err + } + + // Default to "admin" if no db provided + if mongoCS.DB == "" { + mongoCS.DB = "admin" + } + + if len(mongoCS.Roles) == 0 { + return dbplugin.NewUserResponse{}, fmt.Errorf("roles array is required in creation statement") + } + + createUserCmd := createUserCommand{ + Username: username, + Password: req.Password, + Roles: mongoCS.Roles.toStandardRolesArray(), + } + + if err := m.runCommandWithRetry(ctx, mongoCS.DB, createUserCmd); err != nil { + return dbplugin.NewUserResponse{}, err + } + + resp := dbplugin.NewUserResponse{ + Username: username, + } + return resp, nil +} + +func (m *MongoDB) UpdateUser(ctx context.Context, req dbplugin.UpdateUserRequest) (dbplugin.UpdateUserResponse, error) { + if req.Password != nil { + err := m.changeUserPassword(ctx, req.Username, req.Password.NewPassword) + return dbplugin.UpdateUserResponse{}, err + } + return dbplugin.UpdateUserResponse{}, nil +} + +func (m *MongoDB) changeUserPassword(ctx context.Context, username, password string) error { + connURL := m.getConnectionURL() + cs, err := connstring.Parse(connURL) + if err != nil { + return err + } + + // Currently doesn't support custom statements for changing the user's password + changeUserCmd := &updateUserCommand{ + Username: username, + Password: password, + } + + database := cs.Database + if database == "" { + database = "admin" + } + + err = m.runCommandWithRetry(ctx, database, changeUserCmd) + if err != nil { + return err + } + + return nil +} + +func (m *MongoDB) DeleteUser(ctx context.Context, req dbplugin.DeleteUserRequest) (dbplugin.DeleteUserResponse, error) { + // If no revocation statements provided, pass in empty JSON + var revocationStatement string + switch len(req.Statements.Commands) { + case 0: + revocationStatement = `{}` + case 1: + revocationStatement = req.Statements.Commands[0] + default: + return dbplugin.DeleteUserResponse{}, fmt.Errorf("expected 0 or 1 revocation statements, got %d", len(req.Statements.Commands)) + } + + // Unmarshal revocation statements into mongodbRoles + var mongoCS mongoDBStatement + err := json.Unmarshal([]byte(revocationStatement), &mongoCS) + if err != nil { + return dbplugin.DeleteUserResponse{}, err + } + + db := mongoCS.DB + // If db is not specified, use the default authenticationDatabase "admin" + if db == "" { + db = "admin" + } + + // Set the write concern. The default is majority. + writeConcern := writeconcern.New(writeconcern.WMajority()) + opts, err := m.getWriteConcern() + if err != nil { + return dbplugin.DeleteUserResponse{}, err + } + if opts != nil { + writeConcern = opts.WriteConcern + } + + dropUserCmd := &dropUserCommand{ + Username: req.Username, + WriteConcern: writeConcern, + } + + err = m.runCommandWithRetry(ctx, db, dropUserCmd) + cErr, ok := err.(mongo.CommandError) + if ok && cErr.Name == "UserNotFound" { // User already removed, don't retry needlessly + log.Default().Warn("MongoDB user was deleted prior to lease revocation", "user", req.Username) + return dbplugin.DeleteUserResponse{}, nil + } + + return dbplugin.DeleteUserResponse{}, err +} + +// runCommandWithRetry runs a command and retries once more if there's a failure +// on the first attempt. This should be called with the lock held +func (m *MongoDB) runCommandWithRetry(ctx context.Context, db string, cmd interface{}) error { + // Get the client + client, err := m.Connection(ctx) + if err != nil { + return err + } + + // Run command + result := client.Database(db).RunCommand(ctx, cmd, nil) + + // Error check on the first attempt + err = result.Err() + switch { + case err == nil: + return nil + case err == io.EOF, strings.Contains(err.Error(), "EOF"): + // Call getConnection to reset and retry query if we get an EOF error on first attempt. + client, err = m.Connection(ctx) + if err != nil { + return err + } + result = client.Database(db).RunCommand(ctx, cmd, nil) + if err := result.Err(); err != nil { + return err + } + default: + return err + } + + return nil +} diff --git a/plugins/database/mongodb/mongodb_test.go b/plugins/database/mongodb/mongodb_test.go new file mode 100644 index 0000000..a0b513e --- /dev/null +++ b/plugins/database/mongodb/mongodb_test.go @@ -0,0 +1,531 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package mongodb + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "net/http" + "reflect" + "strings" + "sync" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/hashicorp/vault/helper/testhelpers/certhelpers" + "github.com/hashicorp/vault/helper/testhelpers/mongodb" + dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + dbtesting "github.com/hashicorp/vault/sdk/database/dbplugin/v5/testing" + "github.com/stretchr/testify/require" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" + "go.mongodb.org/mongo-driver/mongo/readpref" +) + +const ( + mongoAdminRole = `{ "db": "admin", "roles": [ { "role": "readWrite" } ] }` + mongoTestDBAdminRole = `{ "db": "test", "roles": [ { "role": "readWrite" } ] }` +) + +func TestMongoDB_Initialize(t *testing.T) { + cleanup, connURL := mongodb.PrepareTestContainer(t, "latest") + defer cleanup() + + db := new() + defer dbtesting.AssertClose(t, db) + + config := map[string]interface{}{ + "connection_url": connURL, + } + + // Make a copy since the original map could be modified by the Initialize call + expectedConfig := copyConfig(config) + + req := dbplugin.InitializeRequest{ + Config: config, + VerifyConnection: true, + } + + resp := dbtesting.AssertInitialize(t, db, req) + + if !reflect.DeepEqual(resp.Config, expectedConfig) { + t.Fatalf("Actual config: %#v\nExpected config: %#v", resp.Config, expectedConfig) + } + + if !db.Initialized { + t.Fatal("Database should be initialized") + } +} + +func TestNewUser_usernameTemplate(t *testing.T) { + type testCase struct { + usernameTemplate string + + newUserReq dbplugin.NewUserRequest + expectedUsernameRegex string + } + + tests := map[string]testCase{ + "default username template": { + usernameTemplate: "", + + newUserReq: dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: "token", + RoleName: "testrolenamewithmanycharacters", + }, + Statements: dbplugin.Statements{ + Commands: []string{mongoAdminRole}, + }, + Password: "98yq3thgnakjsfhjkl", + Expiration: time.Now().Add(time.Minute), + }, + + expectedUsernameRegex: "^v-token-testrolenamewit-[a-zA-Z0-9]{20}-[0-9]{10}$", + }, + "default username template with invalid chars": { + usernameTemplate: "", + + newUserReq: dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: "a.bad.account", + RoleName: "a.bad.role", + }, + Statements: dbplugin.Statements{ + Commands: []string{mongoAdminRole}, + }, + Password: "98yq3thgnakjsfhjkl", + Expiration: time.Now().Add(time.Minute), + }, + + expectedUsernameRegex: "^v-a-bad-account-a-bad-role-[a-zA-Z0-9]{20}-[0-9]{10}$", + }, + "custom username template": { + usernameTemplate: "{{random 2 | uppercase}}_{{unix_time}}_{{.RoleName | uppercase}}_{{.DisplayName | uppercase}}", + + newUserReq: dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: "token", + RoleName: "testrolenamewithmanycharacters", + }, + Statements: dbplugin.Statements{ + Commands: []string{mongoAdminRole}, + }, + Password: "98yq3thgnakjsfhjkl", + Expiration: time.Now().Add(time.Minute), + }, + + expectedUsernameRegex: "^[A-Z0-9]{2}_[0-9]{10}_TESTROLENAMEWITHMANYCHARACTERS_TOKEN$", + }, + "admin in test database username template": { + usernameTemplate: "", + + newUserReq: dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: "token", + RoleName: "testrolenamewithmanycharacters", + }, + Statements: dbplugin.Statements{ + Commands: []string{mongoTestDBAdminRole}, + }, + Password: "98yq3thgnakjsfhjkl", + Expiration: time.Now().Add(time.Minute), + }, + + expectedUsernameRegex: "^v-token-testrolenamewit-[a-zA-Z0-9]{20}-[0-9]{10}$", + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + cleanup, connURL := mongodb.PrepareTestContainer(t, "latest") + defer cleanup() + + if name == "admin in test database username template" { + connURL = connURL + "/test?authSource=test" + } + + db := new() + defer dbtesting.AssertClose(t, db) + + initReq := dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "connection_url": connURL, + "username_template": test.usernameTemplate, + }, + VerifyConnection: true, + } + dbtesting.AssertInitialize(t, db, initReq) + + ctx := context.Background() + newUserResp, err := db.NewUser(ctx, test.newUserReq) + require.NoError(t, err) + require.Regexp(t, test.expectedUsernameRegex, newUserResp.Username) + + assertCredsExist(t, newUserResp.Username, test.newUserReq.Password, connURL) + }) + } +} + +func TestMongoDB_CreateUser(t *testing.T) { + cleanup, connURL := mongodb.PrepareTestContainer(t, "latest") + defer cleanup() + + db := new() + defer dbtesting.AssertClose(t, db) + + initReq := dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "connection_url": connURL, + }, + VerifyConnection: true, + } + dbtesting.AssertInitialize(t, db, initReq) + + password := "myreallysecurepassword" + createReq := dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: "test", + RoleName: "test", + }, + Statements: dbplugin.Statements{ + Commands: []string{mongoAdminRole}, + }, + Password: password, + Expiration: time.Now().Add(time.Minute), + } + createResp := dbtesting.AssertNewUser(t, db, createReq) + + assertCredsExist(t, createResp.Username, password, connURL) +} + +func TestMongoDB_CreateUser_writeConcern(t *testing.T) { + cleanup, connURL := mongodb.PrepareTestContainer(t, "latest") + defer cleanup() + + initReq := dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "connection_url": connURL, + "write_concern": `{ "wmode": "majority", "wtimeout": 5000 }`, + }, + VerifyConnection: true, + } + + db := new() + defer dbtesting.AssertClose(t, db) + + dbtesting.AssertInitialize(t, db, initReq) + + password := "myreallysecurepassword" + createReq := dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: "test", + RoleName: "test", + }, + Statements: dbplugin.Statements{ + Commands: []string{mongoAdminRole}, + }, + Password: password, + Expiration: time.Now().Add(time.Minute), + } + createResp := dbtesting.AssertNewUser(t, db, createReq) + + assertCredsExist(t, createResp.Username, password, connURL) +} + +func TestMongoDB_DeleteUser(t *testing.T) { + cleanup, connURL := mongodb.PrepareTestContainer(t, "latest") + defer cleanup() + + db := new() + defer dbtesting.AssertClose(t, db) + + initReq := dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "connection_url": connURL, + }, + VerifyConnection: true, + } + dbtesting.AssertInitialize(t, db, initReq) + + password := "myreallysecurepassword" + createReq := dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: "test", + RoleName: "test", + }, + Statements: dbplugin.Statements{ + Commands: []string{mongoAdminRole}, + }, + Password: password, + Expiration: time.Now().Add(time.Minute), + } + createResp := dbtesting.AssertNewUser(t, db, createReq) + assertCredsExist(t, createResp.Username, password, connURL) + + // Test default revocation statement + delReq := dbplugin.DeleteUserRequest{ + Username: createResp.Username, + } + + dbtesting.AssertDeleteUser(t, db, delReq) + + assertCredsDoNotExist(t, createResp.Username, password, connURL) +} + +func TestMongoDB_UpdateUser_Password(t *testing.T) { + cleanup, connURL := mongodb.PrepareTestContainer(t, "latest") + defer cleanup() + + // The docker test method PrepareTestContainer defaults to a database "test" + // if none is provided + connURL = connURL + "/test" + db := new() + defer dbtesting.AssertClose(t, db) + + initReq := dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "connection_url": connURL, + }, + VerifyConnection: true, + } + dbtesting.AssertInitialize(t, db, initReq) + + // create the database user in advance, and test the connection + dbUser := "testmongouser" + startingPassword := "password" + createDBUser(t, connURL, "test", dbUser, startingPassword) + + newPassword := "myreallysecurecredentials" + + updateReq := dbplugin.UpdateUserRequest{ + Username: dbUser, + Password: &dbplugin.ChangePassword{ + NewPassword: newPassword, + }, + } + dbtesting.AssertUpdateUser(t, db, updateReq) + + assertCredsExist(t, dbUser, newPassword, connURL) +} + +func TestMongoDB_RotateRoot_NonAdminDB(t *testing.T) { + cleanup, connURL := mongodb.PrepareTestContainer(t, "latest") + defer cleanup() + + connURL = connURL + "/test?authSource=test" + db := new() + defer dbtesting.AssertClose(t, db) + + initReq := dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "connection_url": connURL, + }, + VerifyConnection: true, + } + dbtesting.AssertInitialize(t, db, initReq) + + dbUser := "testmongouser" + startingPassword := "password" + createDBUser(t, connURL, "test", dbUser, startingPassword) + + newPassword := "myreallysecurecredentials" + + updateReq := dbplugin.UpdateUserRequest{ + Username: dbUser, + Password: &dbplugin.ChangePassword{ + NewPassword: newPassword, + }, + } + dbtesting.AssertUpdateUser(t, db, updateReq) + + assertCredsExist(t, dbUser, newPassword, connURL) +} + +func TestGetTLSAuth(t *testing.T) { + ca := certhelpers.NewCert(t, + certhelpers.CommonName("certificate authority"), + certhelpers.IsCA(true), + certhelpers.SelfSign(), + ) + cert := certhelpers.NewCert(t, + certhelpers.CommonName("test cert"), + certhelpers.Parent(ca), + ) + + type testCase struct { + username string + tlsCAData []byte + tlsKeyData []byte + + expectOpts *options.ClientOptions + expectErr bool + } + + tests := map[string]testCase{ + "no TLS data set": { + expectOpts: nil, + expectErr: false, + }, + "bad CA": { + tlsCAData: []byte("foobar"), + + expectOpts: nil, + expectErr: true, + }, + "bad key": { + tlsKeyData: []byte("foobar"), + + expectOpts: nil, + expectErr: true, + }, + "good ca": { + tlsCAData: cert.Pem, + + expectOpts: options.Client(). + SetTLSConfig( + &tls.Config{ + RootCAs: appendToCertPool(t, x509.NewCertPool(), cert.Pem), + }, + ), + expectErr: false, + }, + "good key": { + username: "unittest", + tlsKeyData: cert.CombinedPEM(), + + expectOpts: options.Client(). + SetTLSConfig( + &tls.Config{ + Certificates: []tls.Certificate{cert.TLSCert}, + }, + ). + SetAuth(options.Credential{ + AuthMechanism: "MONGODB-X509", + Username: "unittest", + }), + expectErr: false, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + c := new() + c.Username = test.username + c.TLSCAData = test.tlsCAData + c.TLSCertificateKeyData = test.tlsKeyData + + actual, err := c.getTLSAuth() + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + assertDeepEqual(t, test.expectOpts, actual) + }) + } +} + +func appendToCertPool(t *testing.T, pool *x509.CertPool, caPem []byte) *x509.CertPool { + t.Helper() + + ok := pool.AppendCertsFromPEM(caPem) + if !ok { + t.Fatalf("Unable to append cert to cert pool") + } + return pool +} + +var cmpClientOptionsOpts = cmp.Options{ + cmpopts.IgnoreTypes(http.Transport{}), + + cmp.AllowUnexported(options.ClientOptions{}), + + cmp.AllowUnexported(tls.Config{}), + cmpopts.IgnoreTypes(sync.Mutex{}, sync.RWMutex{}), + + // 'lazyCerts' has a func field which can't be compared. + cmpopts.IgnoreFields(x509.CertPool{}, "lazyCerts"), + cmp.AllowUnexported(x509.CertPool{}), +} + +// Need a special comparison for ClientOptions because reflect.DeepEquals won't work in Go 1.16. +// See: https://github.com/golang/go/issues/45891 +func assertDeepEqual(t *testing.T, a, b *options.ClientOptions) { + t.Helper() + + if diff := cmp.Diff(a, b, cmpClientOptionsOpts); diff != "" { + t.Fatalf("assertion failed: values are not equal\n--- expected\n+++ actual\n%v", diff) + } +} + +func createDBUser(t testing.TB, connURL, db, username, password string) { + t.Helper() + + ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) + client, err := mongo.Connect(ctx, options.Client().ApplyURI(connURL)) + if err != nil { + t.Fatal(err) + } + + createUserCmd := &createUserCommand{ + Username: username, + Password: password, + Roles: []interface{}{}, + } + result := client.Database(db).RunCommand(ctx, createUserCmd, nil) + if result.Err() != nil { + t.Fatalf("failed to create user in mongodb: %s", result.Err()) + } + + assertCredsExist(t, username, password, connURL) +} + +func assertCredsExist(t testing.TB, username, password, connURL string) { + t.Helper() + + connURL = strings.Replace(connURL, "mongodb://", fmt.Sprintf("mongodb://%s:%s@", username, password), 1) + + ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) + client, err := mongo.Connect(ctx, options.Client().ApplyURI(connURL)) + if err != nil { + t.Fatalf("Failed to connect to mongo: %s", err) + } + + err = client.Ping(ctx, readpref.Primary()) + if err != nil { + t.Fatalf("Failed to ping mongo with user %q: %s", username, err) + } +} + +func assertCredsDoNotExist(t testing.TB, username, password, connURL string) { + t.Helper() + + connURL = strings.Replace(connURL, "mongodb://", fmt.Sprintf("mongodb://%s:%s@", username, password), 1) + + ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) + client, err := mongo.Connect(ctx, options.Client().ApplyURI(connURL)) + if err != nil { + return // Creds don't exist as expected + } + + err = client.Ping(ctx, readpref.Primary()) + if err != nil { + return // Creds don't exist as expected + } + t.Fatalf("User %q exists and was able to authenticate", username) +} + +func copyConfig(config map[string]interface{}) map[string]interface{} { + newConfig := map[string]interface{}{} + for k, v := range config { + newConfig[k] = v + } + return newConfig +} diff --git a/plugins/database/mongodb/util.go b/plugins/database/mongodb/util.go new file mode 100644 index 0000000..be58421 --- /dev/null +++ b/plugins/database/mongodb/util.go @@ -0,0 +1,55 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package mongodb + +import "go.mongodb.org/mongo-driver/mongo/writeconcern" + +type createUserCommand struct { + Username string `bson:"createUser"` + Password string `bson:"pwd,omitempty"` + Roles []interface{} `bson:"roles"` +} + +type updateUserCommand struct { + Username string `bson:"updateUser"` + Password string `bson:"pwd"` +} + +type dropUserCommand struct { + Username string `bson:"dropUser"` + WriteConcern *writeconcern.WriteConcern `bson:"writeConcern"` +} + +type mongodbRole struct { + Role string `json:"role" bson:"role"` + DB string `json:"db" bson:"db"` +} + +type mongodbRoles []mongodbRole + +type mongoDBStatement struct { + DB string `json:"db"` + Roles mongodbRoles `json:"roles"` +} + +// Convert array of role documents like: +// +// [ { "role": "readWrite" }, { "role": "readWrite", "db": "test" } ] +// +// into a "standard" MongoDB roles array containing both strings and role documents: +// +// [ "readWrite", { "role": "readWrite", "db": "test" } ] +// +// MongoDB's createUser command accepts the latter. +func (roles mongodbRoles) toStandardRolesArray() []interface{} { + var standardRolesArray []interface{} + for _, role := range roles { + if role.DB == "" { + standardRolesArray = append(standardRolesArray, role.Role) + } else { + standardRolesArray = append(standardRolesArray, role) + } + } + return standardRolesArray +} diff --git a/plugins/database/mssql/README.md b/plugins/database/mssql/README.md new file mode 100644 index 0000000..a6d60dd --- /dev/null +++ b/plugins/database/mssql/README.md @@ -0,0 +1,24 @@ +# Testing + +To run these tests, first start MSSQL in Docker. Please do make sure to view the EULA before +accepting it as it includes limits on the number of users per company who can be using the +image, and how it can be used in testing. + +``` +sudo docker run -e 'ACCEPT_EULA=Y' -e 'SA_PASSWORD=' \ + -p 1433:1433 --name sql1 \ + -d mcr.microsoft.com/mssql/server:2017-latest +``` + +Then use the following env variables for testing: + +``` +export VAULT_ACC=1 +export MSSQL_URL="sqlserver://SA:%3CYourStrong%21Passw0rd%3E@localhost:1433" +``` + +Note that the SA password passed into the Docker container differs from the one passed into the tests. +It's the same password, but Go's libraries require it to be percent encoded. + +Running all the tests at once against one Docker container will likely fail because they interact with +each other. Consider running one test at a time. diff --git a/plugins/database/mssql/mssql-database-plugin/main.go b/plugins/database/mssql/mssql-database-plugin/main.go new file mode 100644 index 0000000..2a57b57 --- /dev/null +++ b/plugins/database/mssql/mssql-database-plugin/main.go @@ -0,0 +1,27 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "log" + "os" + + "github.com/hashicorp/vault/plugins/database/mssql" + "github.com/hashicorp/vault/sdk/database/dbplugin/v5" +) + +func main() { + err := Run() + if err != nil { + log.Println(err) + os.Exit(1) + } +} + +// Run instantiates a MSSQL object, and runs the RPC server for the plugin +func Run() error { + dbplugin.ServeMultiplex(mssql.New) + + return nil +} diff --git a/plugins/database/mssql/mssql.go b/plugins/database/mssql/mssql.go new file mode 100644 index 0000000..7c7a4c2 --- /dev/null +++ b/plugins/database/mssql/mssql.go @@ -0,0 +1,434 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package mssql + +import ( + "context" + "database/sql" + "errors" + "fmt" + "strings" + + _ "github.com/denisenkom/go-mssqldb" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/go-secure-stdlib/strutil" + + "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + "github.com/hashicorp/vault/sdk/database/helper/connutil" + "github.com/hashicorp/vault/sdk/database/helper/dbutil" + "github.com/hashicorp/vault/sdk/helper/dbtxn" + "github.com/hashicorp/vault/sdk/helper/template" +) + +const ( + msSQLTypeName = "mssql" + + defaultUserNameTemplate = `{{ printf "v-%s-%s-%s-%s" (.DisplayName | truncate 20) (.RoleName | truncate 20) (random 20) (unix_time) | truncate 128 }}` +) + +var _ dbplugin.Database = &MSSQL{} + +// MSSQL is an implementation of Database interface +type MSSQL struct { + *connutil.SQLConnectionProducer + + usernameProducer template.StringTemplate + + // A flag to let us know to skip cross DB queries and server login checks + containedDB bool +} + +func New() (interface{}, error) { + db := new() + // Wrap the plugin with middleware to sanitize errors + dbType := dbplugin.NewDatabaseErrorSanitizerMiddleware(db, db.secretValues) + + return dbType, nil +} + +func new() *MSSQL { + connProducer := &connutil.SQLConnectionProducer{} + connProducer.Type = msSQLTypeName + + return &MSSQL{ + SQLConnectionProducer: connProducer, + } +} + +// Type returns the TypeName for this backend +func (m *MSSQL) Type() (string, error) { + return msSQLTypeName, nil +} + +func (m *MSSQL) secretValues() map[string]string { + return map[string]string{ + m.Password: "[password]", + } +} + +func (m *MSSQL) getConnection(ctx context.Context) (*sql.DB, error) { + db, err := m.Connection(ctx) + if err != nil { + return nil, err + } + + return db.(*sql.DB), nil +} + +func (m *MSSQL) Initialize(ctx context.Context, req dbplugin.InitializeRequest) (dbplugin.InitializeResponse, error) { + newConf, err := m.SQLConnectionProducer.Init(ctx, req.Config, req.VerifyConnection) + if err != nil { + return dbplugin.InitializeResponse{}, err + } + + usernameTemplate, err := strutil.GetString(req.Config, "username_template") + if err != nil { + return dbplugin.InitializeResponse{}, fmt.Errorf("failed to retrieve username_template: %w", err) + } + if usernameTemplate == "" { + usernameTemplate = defaultUserNameTemplate + } + + up, err := template.NewTemplate(template.Template(usernameTemplate)) + if err != nil { + return dbplugin.InitializeResponse{}, fmt.Errorf("unable to initialize username template: %w", err) + } + m.usernameProducer = up + + _, err = m.usernameProducer.Generate(dbplugin.UsernameMetadata{}) + if err != nil { + return dbplugin.InitializeResponse{}, fmt.Errorf("invalid username template - did you reference a field that isn't available? : %w", err) + } + + if v, ok := req.Config["contained_db"]; ok { + containedDB, err := parseutil.ParseBool(v) + if err != nil { + return dbplugin.InitializeResponse{}, fmt.Errorf(`invalid value for "contained_db": %w`, err) + } + m.containedDB = containedDB + } + + resp := dbplugin.InitializeResponse{ + Config: newConf, + } + return resp, nil +} + +// NewUser generates the username/password on the underlying MSSQL secret backend as instructed by +// the statements provided. +func (m *MSSQL) NewUser(ctx context.Context, req dbplugin.NewUserRequest) (dbplugin.NewUserResponse, error) { + m.Lock() + defer m.Unlock() + + db, err := m.getConnection(ctx) + if err != nil { + return dbplugin.NewUserResponse{}, fmt.Errorf("unable to get connection: %w", err) + } + + if len(req.Statements.Commands) == 0 { + return dbplugin.NewUserResponse{}, dbutil.ErrEmptyCreationStatement + } + + username, err := m.usernameProducer.Generate(req.UsernameConfig) + if err != nil { + return dbplugin.NewUserResponse{}, err + } + + expirationStr := req.Expiration.Format("2006-01-02 15:04:05-0700") + + tx, err := db.BeginTx(ctx, nil) + if err != nil { + return dbplugin.NewUserResponse{}, err + } + defer tx.Rollback() + + for _, stmt := range req.Statements.Commands { + for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + m := map[string]string{ + "name": username, + "password": req.Password, + "expiration": expirationStr, + } + + if err := dbtxn.ExecuteTxQueryDirect(ctx, tx, m, query); err != nil { + return dbplugin.NewUserResponse{}, err + } + } + } + + if err := tx.Commit(); err != nil { + return dbplugin.NewUserResponse{}, err + } + + resp := dbplugin.NewUserResponse{ + Username: username, + } + + return resp, nil +} + +// DeleteUser attempts to drop the specified user. It will first attempt to disable login, +// then kill pending connections from that user, and finally drop the user and login from the +// database instance. +func (m *MSSQL) DeleteUser(ctx context.Context, req dbplugin.DeleteUserRequest) (dbplugin.DeleteUserResponse, error) { + if len(req.Statements.Commands) == 0 { + err := m.revokeUserDefault(ctx, req.Username) + return dbplugin.DeleteUserResponse{}, err + } + + db, err := m.getConnection(ctx) + if err != nil { + return dbplugin.DeleteUserResponse{}, fmt.Errorf("unable to get connection: %w", err) + } + + merr := &multierror.Error{} + + // Execute each query + for _, stmt := range req.Statements.Commands { + for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + m := map[string]string{ + "name": req.Username, + } + if err := dbtxn.ExecuteDBQueryDirect(ctx, db, m, query); err != nil { + merr = multierror.Append(merr, err) + } + } + } + + return dbplugin.DeleteUserResponse{}, merr.ErrorOrNil() +} + +func (m *MSSQL) revokeUserDefault(ctx context.Context, username string) error { + // Get connection + db, err := m.getConnection(ctx) + if err != nil { + return err + } + + // Check if DB is contained + if m.containedDB { + revokeQuery := `DECLARE @stmt nvarchar(max); + SET @stmt = 'DROP USER IF EXISTS ' + QuoteName(@username); + EXEC(@stmt);` + revokeStmt, err := db.PrepareContext(ctx, revokeQuery) + if err != nil { + return err + } + defer revokeStmt.Close() + if _, err := revokeStmt.ExecContext(ctx, sql.Named("username", username)); err != nil { + return err + } + return nil + } + + // First disable server login + disableQuery := `DECLARE @stmt nvarchar(max); + SET @stmt = 'ALTER LOGIN ' + QuoteName(@username) + ' DISABLE'; + EXEC(@stmt);` + disableStmt, err := db.PrepareContext(ctx, disableQuery) + if err != nil { + return err + } + defer disableStmt.Close() + if _, err := disableStmt.ExecContext(ctx, sql.Named("username", username)); err != nil { + return err + } + + // Query for sessions for the login so that we can kill any outstanding + // sessions. There cannot be any active sessions before we drop the logins + // This isn't done in a transaction because even if we fail along the way, + // we want to remove as much access as possible + sessionStmt, err := db.PrepareContext(ctx, + "SELECT session_id FROM sys.dm_exec_sessions WHERE login_name = @p1;") + if err != nil { + return err + } + defer sessionStmt.Close() + + sessionRows, err := sessionStmt.QueryContext(ctx, username) + if err != nil { + return err + } + defer sessionRows.Close() + + var revokeStmts []string + for sessionRows.Next() { + var sessionID int + err = sessionRows.Scan(&sessionID) + if err != nil { + return err + } + revokeStmts = append(revokeStmts, fmt.Sprintf("KILL %d;", sessionID)) + } + + // Query for database users using undocumented stored procedure for now since + // it is the easiest way to get this information; + // we need to drop the database users before we can drop the login and the role + // This isn't done in a transaction because even if we fail along the way, + // we want to remove as much access as possible + stmt, err := db.PrepareContext(ctx, "EXEC master.dbo.sp_msloginmappings @p1;") + if err != nil { + return err + } + defer stmt.Close() + + rows, err := stmt.QueryContext(ctx, username) + if err != nil { + return err + } + defer rows.Close() + + for rows.Next() { + var loginName, dbName, qUsername, aliasName sql.NullString + err = rows.Scan(&loginName, &dbName, &qUsername, &aliasName) + if err != nil { + return err + } + if !dbName.Valid { + continue + } + revokeStmts = append(revokeStmts, fmt.Sprintf(dropUserSQL, dbName.String, username, username)) + } + + // we do not stop on error, as we want to remove as + // many permissions as possible right now + var lastStmtError error + for _, query := range revokeStmts { + if err := dbtxn.ExecuteDBQueryDirect(ctx, db, nil, query); err != nil { + lastStmtError = err + } + } + + // can't drop if not all database users are dropped + if rows.Err() != nil { + return fmt.Errorf("could not generate sql statements for all rows: %w", rows.Err()) + } + if lastStmtError != nil { + return fmt.Errorf("could not perform all sql statements: %w", lastStmtError) + } + + // Drop this login + stmt, err = db.PrepareContext(ctx, dropLoginSQL) + if err != nil { + return err + } + defer stmt.Close() + if _, err := stmt.ExecContext(ctx, sql.Named("username", username)); err != nil { + return err + } + + return nil +} + +func (m *MSSQL) UpdateUser(ctx context.Context, req dbplugin.UpdateUserRequest) (dbplugin.UpdateUserResponse, error) { + if req.Password == nil && req.Expiration == nil { + return dbplugin.UpdateUserResponse{}, fmt.Errorf("no changes requested") + } + if req.Password != nil { + err := m.updateUserPass(ctx, req.Username, req.Password) + return dbplugin.UpdateUserResponse{}, err + } + // Expiration is a no-op + return dbplugin.UpdateUserResponse{}, nil +} + +func (m *MSSQL) updateUserPass(ctx context.Context, username string, changePass *dbplugin.ChangePassword) error { + stmts := changePass.Statements.Commands + if len(stmts) == 0 && !m.containedDB { + stmts = []string{alterLoginSQL} + } + + password := changePass.NewPassword + + if username == "" || password == "" { + return errors.New("must provide both username and password") + } + + m.Lock() + defer m.Unlock() + + db, err := m.getConnection(ctx) + if err != nil { + return err + } + + // Since contained DB users do not have server logins, we + // only query for a login if DB is not a contained DB + if !m.containedDB { + var exists bool + + err = db.QueryRowContext(ctx, "SELECT 1 FROM master.sys.server_principals where name = N'$1'", username).Scan(&exists) + + if err != nil && err != sql.ErrNoRows { + return err + } + } + + tx, err := db.BeginTx(ctx, nil) + if err != nil { + return err + } + + defer func() { + _ = tx.Rollback() + }() + + for _, stmt := range stmts { + for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + m := map[string]string{ + "name": username, + "username": username, + "password": password, + } + if err := dbtxn.ExecuteTxQueryDirect(ctx, tx, m, query); err != nil { + return fmt.Errorf("failed to execute query: %w", err) + } + } + } + + if err := tx.Commit(); err != nil { + return fmt.Errorf("failed to commit transaction: %w", err) + } + + return nil +} + +const dropUserSQL = ` +USE [%s] +IF EXISTS + (SELECT name + FROM sys.database_principals + WHERE name = N'%s') +BEGIN + DROP USER [%s] +END +` + +const dropLoginSQL = ` +DECLARE @stmt nvarchar(max) +SET @stmt = 'IF EXISTS (SELECT name FROM [master].[sys].[server_principals] WHERE [name] = ' + QuoteName(@username, '''') + ') ' + + 'BEGIN ' + + 'DROP LOGIN ' + QuoteName(@username) + ' ' + + 'END' +EXEC (@stmt)` + +const alterLoginSQL = ` +ALTER LOGIN [{{username}}] WITH PASSWORD = '{{password}}' +` diff --git a/plugins/database/mssql/mssql_test.go b/plugins/database/mssql/mssql_test.go new file mode 100644 index 0000000..385c5f0 --- /dev/null +++ b/plugins/database/mssql/mssql_test.go @@ -0,0 +1,580 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package mssql + +import ( + "context" + "database/sql" + "fmt" + "reflect" + "regexp" + "strings" + "testing" + "time" + + mssqlhelper "github.com/hashicorp/vault/helper/testhelpers/mssql" + "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + dbtesting "github.com/hashicorp/vault/sdk/database/dbplugin/v5/testing" + "github.com/hashicorp/vault/sdk/helper/dbtxn" + "github.com/stretchr/testify/assert" +) + +func TestInitialize(t *testing.T) { + cleanup, connURL := mssqlhelper.PrepareMSSQLTestContainer(t) + defer cleanup() + + type testCase struct { + req dbplugin.InitializeRequest + } + + tests := map[string]testCase{ + "happy path": { + req: dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "connection_url": connURL, + }, + VerifyConnection: true, + }, + }, + "max_open_connections set": { + dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "connection_url": connURL, + "max_open_connections": "5", + }, + VerifyConnection: true, + }, + }, + "contained_db set": { + dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "connection_url": connURL, + "contained_db": true, + }, + VerifyConnection: true, + }, + }, + "contained_db set string": { + dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "connection_url": connURL, + "contained_db": "true", + }, + VerifyConnection: true, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + db := new() + dbtesting.AssertInitializeCircleCiTest(t, db, test.req) + defer dbtesting.AssertClose(t, db) + + if !db.Initialized { + t.Fatal("Database should be initialized") + } + }) + } +} + +func TestNewUser(t *testing.T) { + cleanup, connURL := mssqlhelper.PrepareMSSQLTestContainer(t) + defer cleanup() + + type testCase struct { + usernameTemplate string + req dbplugin.NewUserRequest + usernameRegex string + expectErr bool + assertUser func(t testing.TB, connURL, username, password string) + } + + tests := map[string]testCase{ + "no creation statements": { + req: dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: "test", + RoleName: "test", + }, + Statements: dbplugin.Statements{}, + Password: "AG4qagho-dsvZ", + Expiration: time.Now().Add(1 * time.Second), + }, + usernameRegex: "^$", + expectErr: true, + assertUser: assertCredsDoNotExist, + }, + "with creation statements": { + req: dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: "test", + RoleName: "test", + }, + Statements: dbplugin.Statements{ + Commands: []string{testMSSQLRole}, + }, + Password: "AG4qagho-dsvZ", + Expiration: time.Now().Add(1 * time.Second), + }, + usernameRegex: "^v-test-test-[a-zA-Z0-9]{20}-[0-9]{10}$", + expectErr: false, + assertUser: assertCredsExist, + }, + "custom username template": { + usernameTemplate: "{{random 10}}_{{.RoleName}}.{{.DisplayName | sha256}}", + req: dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: "tokenwithlotsofextracharactershere", + RoleName: "myrolenamewithlotsofextracharacters", + }, + Statements: dbplugin.Statements{ + Commands: []string{testMSSQLRole}, + }, + Password: "AG4qagho-dsvZ", + Expiration: time.Now().Add(1 * time.Second), + }, + usernameRegex: "^[a-zA-Z0-9]{10}_myrolenamewithlotsofextracharacters.80d15d22dba29ddbd4994f8009b5ff7b17922c267eb49fb805a9488bd55d11f9$", + expectErr: false, + assertUser: assertCredsExist, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + usernameRe, err := regexp.Compile(test.usernameRegex) + if err != nil { + t.Fatalf("failed to compile username regex %q: %s", test.usernameRegex, err) + } + + initReq := dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "connection_url": connURL, + "username_template": test.usernameTemplate, + }, + VerifyConnection: true, + } + + db := new() + dbtesting.AssertInitializeCircleCiTest(t, db, initReq) + defer dbtesting.AssertClose(t, db) + + createResp, err := db.NewUser(context.Background(), test.req) + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + + if !usernameRe.MatchString(createResp.Username) { + t.Fatalf("Generated username %q did not match regex %q", createResp.Username, test.usernameRegex) + } + + // Protect against future fields that aren't specified + expectedResp := dbplugin.NewUserResponse{ + Username: createResp.Username, + } + if !reflect.DeepEqual(createResp, expectedResp) { + t.Fatalf("Fields missing from expected response: Actual: %#v", createResp) + } + + test.assertUser(t, connURL, createResp.Username, test.req.Password) + }) + } +} + +func TestUpdateUser_password(t *testing.T) { + type testCase struct { + req dbplugin.UpdateUserRequest + expectErr bool + expectedPassword string + } + + dbUser := "vaultuser" + initPassword := "p4$sw0rd" + + tests := map[string]testCase{ + "missing password": { + req: dbplugin.UpdateUserRequest{ + Username: dbUser, + Password: &dbplugin.ChangePassword{ + NewPassword: "", + Statements: dbplugin.Statements{}, + }, + }, + expectErr: true, + expectedPassword: initPassword, + }, + "empty rotation statements": { + req: dbplugin.UpdateUserRequest{ + Username: dbUser, + Password: &dbplugin.ChangePassword{ + NewPassword: "N90gkKLy8$angf", + Statements: dbplugin.Statements{}, + }, + }, + expectErr: false, + expectedPassword: "N90gkKLy8$angf", + }, + "username rotation": { + req: dbplugin.UpdateUserRequest{ + Username: dbUser, + Password: &dbplugin.ChangePassword{ + NewPassword: "N90gkKLy8$angf", + Statements: dbplugin.Statements{ + Commands: []string{ + "ALTER LOGIN [{{username}}] WITH PASSWORD = '{{password}}'", + }, + }, + }, + }, + expectErr: false, + expectedPassword: "N90gkKLy8$angf", + }, + "bad statements": { + req: dbplugin.UpdateUserRequest{ + Username: dbUser, + Password: &dbplugin.ChangePassword{ + NewPassword: "N90gkKLy8$angf", + Statements: dbplugin.Statements{ + Commands: []string{ + "ahosh98asjdffs", + }, + }, + }, + }, + expectErr: true, + expectedPassword: initPassword, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + cleanup, connURL := mssqlhelper.PrepareMSSQLTestContainer(t) + defer cleanup() + + initReq := dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "connection_url": connURL, + }, + VerifyConnection: true, + } + + db := new() + dbtesting.AssertInitializeCircleCiTest(t, db, initReq) + defer dbtesting.AssertClose(t, db) + + err := createTestMSSQLUser(connURL, dbUser, initPassword, testMSSQLLogin) + if err != nil { + t.Fatalf("Failed to create user: %s", err) + } + + assertCredsExist(t, connURL, dbUser, initPassword) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + updateResp, err := db.UpdateUser(ctx, test.req) + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + + // Protect against future fields that aren't specified + expectedResp := dbplugin.UpdateUserResponse{} + if !reflect.DeepEqual(updateResp, expectedResp) { + t.Fatalf("Fields missing from expected response: Actual: %#v", updateResp) + } + + assertCredsExist(t, connURL, dbUser, test.expectedPassword) + + // Delete user at the end of each test + deleteReq := dbplugin.DeleteUserRequest{ + Username: dbUser, + } + + ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + deleteResp, err := db.DeleteUser(ctx, deleteReq) + if err != nil { + t.Fatalf("Failed to delete user: %s", err) + } + + // Protect against future fields that aren't specified + expectedDeleteResp := dbplugin.DeleteUserResponse{} + if !reflect.DeepEqual(deleteResp, expectedDeleteResp) { + t.Fatalf("Fields missing from expected response: Actual: %#v", deleteResp) + } + + assertCredsDoNotExist(t, connURL, dbUser, initPassword) + }) + } +} + +func TestDeleteUser(t *testing.T) { + cleanup, connURL := mssqlhelper.PrepareMSSQLTestContainer(t) + defer cleanup() + + dbUser := "vaultuser" + initPassword := "p4$sw0rd" + + initReq := dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "connection_url": connURL, + }, + VerifyConnection: true, + } + + db := new() + + dbtesting.AssertInitializeCircleCiTest(t, db, initReq) + defer dbtesting.AssertClose(t, db) + + err := createTestMSSQLUser(connURL, dbUser, initPassword, testMSSQLLogin) + if err != nil { + t.Fatalf("Failed to create user: %s", err) + } + + assertCredsExist(t, connURL, dbUser, initPassword) + + deleteReq := dbplugin.DeleteUserRequest{ + Username: dbUser, + } + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + deleteResp, err := db.DeleteUser(ctx, deleteReq) + if err != nil { + t.Fatalf("Failed to delete user: %s", err) + } + + // Protect against future fields that aren't specified + expectedResp := dbplugin.DeleteUserResponse{} + if !reflect.DeepEqual(deleteResp, expectedResp) { + t.Fatalf("Fields missing from expected response: Actual: %#v", deleteResp) + } + + assertCredsDoNotExist(t, connURL, dbUser, initPassword) +} + +func TestDeleteUserContainedDB(t *testing.T) { + cleanup, connURL := mssqlhelper.PrepareMSSQLTestContainer(t) + defer cleanup() + + dbUser := "vaultuser" + initPassword := "p4$sw0rd" + + initReq := dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "connection_url": connURL, + "contained_db": true, + }, + VerifyConnection: true, + } + + db := new() + + dbtesting.AssertInitializeCircleCiTest(t, db, initReq) + defer dbtesting.AssertClose(t, db) + + err := createTestMSSQLUser(connURL, dbUser, initPassword, testMSSQLContainedLogin) + if err != nil { + t.Fatalf("Failed to create user: %s", err) + } + + assertCredsExist(t, connURL, dbUser, initPassword) + + deleteReq := dbplugin.DeleteUserRequest{ + Username: dbUser, + } + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + deleteResp, err := db.DeleteUser(ctx, deleteReq) + if err != nil { + t.Fatalf("Failed to delete user: %s", err) + } + + // Protect against future fields that aren't specified + expectedResp := dbplugin.DeleteUserResponse{} + if !reflect.DeepEqual(deleteResp, expectedResp) { + t.Fatalf("Fields missing from expected response: Actual: %#v", deleteResp) + } + + assertContainedDBCredsDoNotExist(t, connURL, dbUser) +} + +func TestContainedDBSQLSanitization(t *testing.T) { + cleanup, connURL := mssqlhelper.PrepareMSSQLTestContainer(t) + defer cleanup() + + injectionString := "vaultuser]" + dbUser := "vaultuser" + initPassword := "p4$sw0rd" + + initReq := dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "connection_url": connURL, + }, + VerifyConnection: true, + } + + db := new() + + dbtesting.AssertInitializeCircleCiTest(t, db, initReq) + defer dbtesting.AssertClose(t, db) + + err := createTestMSSQLUser(connURL, dbUser, initPassword, testMSSQLContainedLogin) + if err != nil { + t.Fatalf("Failed to create user: %s", err) + } + + assertCredsExist(t, connURL, dbUser, initPassword) + + deleteReq := dbplugin.DeleteUserRequest{ + Username: injectionString, + } + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + _, err = db.DeleteUser(ctx, deleteReq) + + assert.EqualError(t, err, "mssql: Cannot alter the login 'vaultuser]', because it does not exist or you do not have permission.") +} + +func TestSQLSanitization(t *testing.T) { + cleanup, connURL := mssqlhelper.PrepareMSSQLTestContainer(t) + defer cleanup() + + injectionString := "vaultuser]" + dbUser := "vaultuser" + initPassword := "p4$sw0rd" + + initReq := dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "connection_url": connURL, + }, + VerifyConnection: true, + } + + db := new() + + dbtesting.AssertInitializeCircleCiTest(t, db, initReq) + defer dbtesting.AssertClose(t, db) + + err := createTestMSSQLUser(connURL, dbUser, initPassword, testMSSQLLogin) + if err != nil { + t.Fatalf("Failed to create user: %s", err) + } + + assertCredsExist(t, connURL, dbUser, initPassword) + + deleteReq := dbplugin.DeleteUserRequest{ + Username: injectionString, + } + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + _, err = db.DeleteUser(ctx, deleteReq) + + assert.EqualError(t, err, "mssql: Cannot alter the login 'vaultuser]', because it does not exist or you do not have permission.") +} + +func assertCredsExist(t testing.TB, connURL, username, password string) { + t.Helper() + err := testCredsExist(connURL, username, password) + if err != nil { + t.Fatalf("Unable to log in as %q: %s", username, err) + } +} + +func assertCredsDoNotExist(t testing.TB, connURL, username, password string) { + t.Helper() + err := testCredsExist(connURL, username, password) + if err == nil { + t.Fatalf("Able to log in when it shouldn't") + } +} + +func assertContainedDBCredsDoNotExist(t testing.TB, connURL, username string) { + t.Helper() + err := testContainedDBCredsExist(connURL, username) + assert.EqualError(t, err, "mssql: Cannot drop the user 'vaultuser', because it does not exist or you do not have permission.") +} + +func testContainedDBCredsExist(connURL, username string) error { + ctx := context.Background() + // Log in + db, err := sql.Open("mssql", connURL) + if err != nil { + return err + } + defer db.Close() + userStmt, err := db.PrepareContext(ctx, fmt.Sprintf("DROP USER [%s]", username)) + if err != nil { + return err + } + _, err = userStmt.ExecContext(ctx) + defer userStmt.Close() + return err +} + +func testCredsExist(connURL, username, password string) error { + // Log in with the new creds + parts := strings.Split(connURL, "@") + connURL = fmt.Sprintf("sqlserver://%s:%s@%s", username, password, parts[1]) + db, err := sql.Open("mssql", connURL) + if err != nil { + return err + } + defer db.Close() + return db.Ping() +} + +func createTestMSSQLUser(connURL string, username, password, query string) error { + db, err := sql.Open("mssql", connURL) + defer db.Close() + if err != nil { + return err + } + + // Start a transaction + ctx := context.Background() + tx, err := db.BeginTx(ctx, nil) + if err != nil { + return err + } + defer func() { + _ = tx.Rollback() + }() + + m := map[string]string{ + "name": username, + "password": password, + } + if err := dbtxn.ExecuteTxQueryDirect(ctx, tx, m, query); err != nil { + return err + } + // Commit the transaction + if err := tx.Commit(); err != nil { + return err + } + return nil +} + +const testMSSQLRole = ` +CREATE LOGIN [{{name}}] WITH PASSWORD = '{{password}}'; +CREATE USER [{{name}}] FOR LOGIN [{{name}}]; +GRANT SELECT, INSERT, UPDATE, DELETE ON SCHEMA::dbo TO [{{name}}];` + +const testMSSQLLogin = ` +CREATE LOGIN [{{name}}] WITH PASSWORD = '{{password}}'; +` + +const testMSSQLContainedLogin = ` +CREATE LOGIN [{{name}}] WITH PASSWORD = '{{password}}'; +CREATE USER [{{name}}] FOR LOGIN [{{name}}]; +` diff --git a/plugins/database/mysql/connection_producer.go b/plugins/database/mysql/connection_producer.go new file mode 100644 index 0000000..5c59792 --- /dev/null +++ b/plugins/database/mysql/connection_producer.go @@ -0,0 +1,232 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package mysql + +import ( + "context" + "crypto/tls" + "crypto/x509" + "database/sql" + "fmt" + "net/url" + "sync" + "time" + + "github.com/go-sql-driver/mysql" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/sdk/database/helper/connutil" + "github.com/hashicorp/vault/sdk/database/helper/dbutil" + "github.com/mitchellh/mapstructure" +) + +// mySQLConnectionProducer implements ConnectionProducer and provides a generic producer for most sql databases +type mySQLConnectionProducer struct { + ConnectionURL string `json:"connection_url" mapstructure:"connection_url" structs:"connection_url"` + MaxOpenConnections int `json:"max_open_connections" mapstructure:"max_open_connections" structs:"max_open_connections"` + MaxIdleConnections int `json:"max_idle_connections" mapstructure:"max_idle_connections" structs:"max_idle_connections"` + MaxConnectionLifetimeRaw interface{} `json:"max_connection_lifetime" mapstructure:"max_connection_lifetime" structs:"max_connection_lifetime"` + Username string `json:"username" mapstructure:"username" structs:"username"` + Password string `json:"password" mapstructure:"password" structs:"password"` + + TLSCertificateKeyData []byte `json:"tls_certificate_key" mapstructure:"tls_certificate_key" structs:"-"` + TLSCAData []byte `json:"tls_ca" mapstructure:"tls_ca" structs:"-"` + TLSServerName string `json:"tls_server_name" mapstructure:"tls_server_name" structs:"tls_server_name"` + TLSSkipVerify bool `json:"tls_skip_verify" mapstructure:"tls_skip_verify" structs:"tls_skip_verify"` + + // tlsConfigName is a globally unique name that references the TLS config for this instance in the mysql driver + tlsConfigName string + + RawConfig map[string]interface{} + maxConnectionLifetime time.Duration + Initialized bool + db *sql.DB + sync.Mutex +} + +func (c *mySQLConnectionProducer) Initialize(ctx context.Context, conf map[string]interface{}, verifyConnection bool) error { + _, err := c.Init(ctx, conf, verifyConnection) + return err +} + +func (c *mySQLConnectionProducer) Init(ctx context.Context, conf map[string]interface{}, verifyConnection bool) (map[string]interface{}, error) { + c.Lock() + defer c.Unlock() + + c.RawConfig = conf + + err := mapstructure.WeakDecode(conf, &c) + if err != nil { + return nil, err + } + + if len(c.ConnectionURL) == 0 { + return nil, fmt.Errorf("connection_url cannot be empty") + } + + // Don't escape special characters for MySQL password + password := c.Password + + // QueryHelper doesn't do any SQL escaping, but if it starts to do so + // then maybe we won't be able to use it to do URL substitution any more. + c.ConnectionURL = dbutil.QueryHelper(c.ConnectionURL, map[string]string{ + "username": url.PathEscape(c.Username), + "password": password, + }) + + if c.MaxOpenConnections == 0 { + c.MaxOpenConnections = 4 + } + + if c.MaxIdleConnections == 0 { + c.MaxIdleConnections = c.MaxOpenConnections + } + if c.MaxIdleConnections > c.MaxOpenConnections { + c.MaxIdleConnections = c.MaxOpenConnections + } + if c.MaxConnectionLifetimeRaw == nil { + c.MaxConnectionLifetimeRaw = "0s" + } + + c.maxConnectionLifetime, err = parseutil.ParseDurationSecond(c.MaxConnectionLifetimeRaw) + if err != nil { + return nil, fmt.Errorf("invalid max_connection_lifetime: %w", err) + } + + tlsConfig, err := c.getTLSAuth() + if err != nil { + return nil, err + } + + if tlsConfig != nil { + if c.tlsConfigName == "" { + c.tlsConfigName, err = uuid.GenerateUUID() + if err != nil { + return nil, fmt.Errorf("unable to generate UUID for TLS configuration: %w", err) + } + } + + mysql.RegisterTLSConfig(c.tlsConfigName, tlsConfig) + } + + // Set initialized to true at this point since all fields are set, + // and the connection can be established at a later time. + c.Initialized = true + + if verifyConnection { + if _, err = c.Connection(ctx); err != nil { + return nil, fmt.Errorf("error verifying - connection: %w", err) + } + + if err := c.db.PingContext(ctx); err != nil { + return nil, fmt.Errorf("error verifying - ping: %w", err) + } + } + + return c.RawConfig, nil +} + +func (c *mySQLConnectionProducer) Connection(ctx context.Context) (interface{}, error) { + if !c.Initialized { + return nil, connutil.ErrNotInitialized + } + + // If we already have a DB, test it and return + if c.db != nil { + if err := c.db.PingContext(ctx); err == nil { + return c.db, nil + } + // If the ping was unsuccessful, close it and ignore errors as we'll be + // reestablishing anyways + c.db.Close() + } + + connURL, err := c.addTLStoDSN() + if err != nil { + return nil, err + } + + c.db, err = sql.Open("mysql", connURL) + if err != nil { + return nil, err + } + + // Set some connection pool settings. We don't need much of this, + // since the request rate shouldn't be high. + c.db.SetMaxOpenConns(c.MaxOpenConnections) + c.db.SetMaxIdleConns(c.MaxIdleConnections) + c.db.SetConnMaxLifetime(c.maxConnectionLifetime) + + return c.db, nil +} + +func (c *mySQLConnectionProducer) SecretValues() map[string]string { + return map[string]string{ + c.Password: "[password]", + } +} + +// Close attempts to close the connection +func (c *mySQLConnectionProducer) Close() error { + // Grab the write lock + c.Lock() + defer c.Unlock() + + if c.db != nil { + c.db.Close() + } + + c.db = nil + + return nil +} + +func (c *mySQLConnectionProducer) getTLSAuth() (tlsConfig *tls.Config, err error) { + if len(c.TLSCAData) == 0 && + len(c.TLSCertificateKeyData) == 0 { + return nil, nil + } + + rootCertPool := x509.NewCertPool() + if len(c.TLSCAData) > 0 { + ok := rootCertPool.AppendCertsFromPEM(c.TLSCAData) + if !ok { + return nil, fmt.Errorf("failed to append CA to client options") + } + } + + clientCert := make([]tls.Certificate, 0, 1) + + if len(c.TLSCertificateKeyData) > 0 { + certificate, err := tls.X509KeyPair(c.TLSCertificateKeyData, c.TLSCertificateKeyData) + if err != nil { + return nil, fmt.Errorf("unable to load tls_certificate_key_data: %w", err) + } + + clientCert = append(clientCert, certificate) + } + + tlsConfig = &tls.Config{ + RootCAs: rootCertPool, + Certificates: clientCert, + ServerName: c.TLSServerName, + InsecureSkipVerify: c.TLSSkipVerify, + } + + return tlsConfig, nil +} + +func (c *mySQLConnectionProducer) addTLStoDSN() (connURL string, err error) { + config, err := mysql.ParseDSN(c.ConnectionURL) + if err != nil { + return "", fmt.Errorf("unable to parse connectionURL: %s", err) + } + + if len(c.tlsConfigName) > 0 { + config.TLSConfig = c.tlsConfigName + } + + connURL = config.FormatDSN() + return connURL, nil +} diff --git a/plugins/database/mysql/connection_producer_test.go b/plugins/database/mysql/connection_producer_test.go new file mode 100644 index 0000000..a3f0bc7 --- /dev/null +++ b/plugins/database/mysql/connection_producer_test.go @@ -0,0 +1,319 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package mysql + +import ( + "context" + "database/sql" + "fmt" + "io/ioutil" + "os" + paths "path" + "path/filepath" + "reflect" + "testing" + "time" + + "github.com/hashicorp/vault/helper/testhelpers/certhelpers" + "github.com/hashicorp/vault/sdk/database/helper/dbutil" + dockertest "github.com/ory/dockertest/v3" +) + +func Test_addTLStoDSN(t *testing.T) { + type testCase struct { + rootUrl string + tlsConfigName string + expectedResult string + } + + tests := map[string]testCase{ + "no tls, no query string": { + rootUrl: "user:password@tcp(localhost:3306)/test", + tlsConfigName: "", + expectedResult: "user:password@tcp(localhost:3306)/test", + }, + "tls, no query string": { + rootUrl: "user:password@tcp(localhost:3306)/test", + tlsConfigName: "tlsTest101", + expectedResult: "user:password@tcp(localhost:3306)/test?tls=tlsTest101", + }, + "tls, query string": { + rootUrl: "user:password@tcp(localhost:3306)/test?foo=bar", + tlsConfigName: "tlsTest101", + expectedResult: "user:password@tcp(localhost:3306)/test?tls=tlsTest101&foo=bar", + }, + "tls, query string, ? in password": { + rootUrl: "user:pa?ssword?@tcp(localhost:3306)/test?foo=bar", + tlsConfigName: "tlsTest101", + expectedResult: "user:pa?ssword?@tcp(localhost:3306)/test?tls=tlsTest101&foo=bar", + }, + "tls, valid tls parameter in query string": { + rootUrl: "user:password@tcp(localhost:3306)/test?tls=true", + tlsConfigName: "", + expectedResult: "user:password@tcp(localhost:3306)/test?tls=true", + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + tCase := mySQLConnectionProducer{ + ConnectionURL: test.rootUrl, + tlsConfigName: test.tlsConfigName, + } + + actual, err := tCase.addTLStoDSN() + if err != nil { + t.Fatalf("error occurred in test: %s", err) + } + if actual != test.expectedResult { + t.Fatalf("generated: %s, expected: %s", actual, test.expectedResult) + } + }) + } +} + +func TestInit_clientTLS(t *testing.T) { + t.Skip("Skipping this test because CircleCI can't mount the files we need without further investigation: " + + "https://support.circleci.com/hc/en-us/articles/360007324514-How-can-I-mount-volumes-to-docker-containers-") + + // Set up temp directory so we can mount it to the docker container + confDir := makeTempDir(t) + defer os.RemoveAll(confDir) + + // Create certificates for MySQL authentication + caCert := certhelpers.NewCert(t, + certhelpers.CommonName("test certificate authority"), + certhelpers.IsCA(true), + certhelpers.SelfSign(), + ) + serverCert := certhelpers.NewCert(t, + certhelpers.CommonName("server"), + certhelpers.DNS("localhost"), + certhelpers.Parent(caCert), + ) + clientCert := certhelpers.NewCert(t, + certhelpers.CommonName("client"), + certhelpers.DNS("client"), + certhelpers.Parent(caCert), + ) + + writeFile(t, paths.Join(confDir, "ca.pem"), caCert.CombinedPEM(), 0o644) + writeFile(t, paths.Join(confDir, "server-cert.pem"), serverCert.Pem, 0o644) + writeFile(t, paths.Join(confDir, "server-key.pem"), serverCert.PrivateKeyPEM(), 0o644) + writeFile(t, paths.Join(confDir, "client.pem"), clientCert.CombinedPEM(), 0o644) + + // ////////////////////////////////////////////////////// + // Set up MySQL config file + rawConf := ` +[mysqld] +ssl +ssl-ca=/etc/mysql/ca.pem +ssl-cert=/etc/mysql/server-cert.pem +ssl-key=/etc/mysql/server-key.pem` + + writeFile(t, paths.Join(confDir, "my.cnf"), []byte(rawConf), 0o644) + + // ////////////////////////////////////////////////////// + // Start MySQL container + retURL, cleanup := startMySQLWithTLS(t, "5.7", confDir) + defer cleanup() + + // ////////////////////////////////////////////////////// + // Set up x509 user + mClient := connect(t, retURL) + + username := setUpX509User(t, mClient, clientCert) + + // ////////////////////////////////////////////////////// + // Test + mysql := newMySQL(DefaultUserNameTemplate) + + conf := map[string]interface{}{ + "connection_url": retURL, + "username": username, + "tls_certificate_key": clientCert.CombinedPEM(), + "tls_ca": caCert.Pem, + } + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + _, err := mysql.Init(ctx, conf, true) + if err != nil { + t.Fatalf("Unable to initialize mysql engine: %s", err) + } + + // Initialization complete. The connection was established, but we need to ensure + // that we're connected as the right user + whoamiCmd := "SELECT CURRENT_USER()" + + client, err := mysql.getConnection(ctx) + if err != nil { + t.Fatalf("Unable to make connection to MySQL: %s", err) + } + stmt, err := client.Prepare(whoamiCmd) + if err != nil { + t.Fatalf("Unable to prepare MySQL statementL %s", err) + } + + results := stmt.QueryRow() + + expected := fmt.Sprintf("%s@%%", username) + + var result string + if err := results.Scan(&result); err != nil { + t.Fatalf("result could not be scanned from result set: %s", err) + } + + if !reflect.DeepEqual(result, expected) { + t.Fatalf("Actual:%#v\nExpected:\n%#v", result, expected) + } +} + +func makeTempDir(t *testing.T) (confDir string) { + confDir, err := ioutil.TempDir(".", "mysql-test-data") + if err != nil { + t.Fatalf("Unable to make temp directory: %s", err) + } + // Convert the directory to an absolute path because docker needs it when mounting + confDir, err = filepath.Abs(filepath.Clean(confDir)) + if err != nil { + t.Fatalf("Unable to determine where temp directory is on absolute path: %s", err) + } + return confDir +} + +func startMySQLWithTLS(t *testing.T, version string, confDir string) (retURL string, cleanup func()) { + if os.Getenv("MYSQL_URL") != "" { + return os.Getenv("MYSQL_URL"), func() {} + } + + pool, err := dockertest.NewPool("") + if err != nil { + t.Fatalf("Failed to connect to docker: %s", err) + } + pool.MaxWait = 30 * time.Second + + containerName := "mysql-unit-test" + + // Remove previously running container if it is still running because cleanup failed + err = pool.RemoveContainerByName(containerName) + if err != nil { + t.Fatalf("Unable to remove old running containers: %s", err) + } + + username := "root" + password := "x509test" + + runOpts := &dockertest.RunOptions{ + Name: containerName, + Repository: "mysql", + Tag: version, + Cmd: []string{"--defaults-extra-file=/etc/mysql/my.cnf", "--auto-generate-certs=OFF"}, + Env: []string{fmt.Sprintf("MYSQL_ROOT_PASSWORD=%s", password)}, + // Mount the directory from local filesystem into the container + Mounts: []string{ + fmt.Sprintf("%s:/etc/mysql", confDir), + }, + } + + resource, err := pool.RunWithOptions(runOpts) + if err != nil { + t.Fatalf("Could not start local mysql docker container: %s", err) + } + resource.Expire(30) + + cleanup = func() { + err := pool.Purge(resource) + if err != nil { + t.Fatalf("Failed to cleanup local container: %s", err) + } + } + + dsn := fmt.Sprintf("{{username}}:{{password}}@tcp(localhost:%s)/mysql", resource.GetPort("3306/tcp")) + + url := dbutil.QueryHelper(dsn, map[string]string{ + "username": username, + "password": password, + }) + // exponential backoff-retry + err = pool.Retry(func() error { + var err error + + db, err := sql.Open("mysql", url) + if err != nil { + t.Logf("err: %s", err) + return err + } + defer db.Close() + return db.Ping() + }) + if err != nil { + cleanup() + t.Fatalf("Could not connect to mysql docker container: %s", err) + } + + return dsn, cleanup +} + +func connect(t *testing.T, dsn string) (db *sql.DB) { + url := dbutil.QueryHelper(dsn, map[string]string{ + "username": "root", + "password": "x509test", + }) + + db, err := sql.Open("mysql", url) + if err != nil { + t.Fatalf("Unable to make connection to MySQL: %s", err) + } + + err = db.Ping() + if err != nil { + t.Fatalf("Failed to ping MySQL server: %s", err) + } + + return db +} + +func setUpX509User(t *testing.T, db *sql.DB, cert certhelpers.Certificate) (username string) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + username = cert.Template.Subject.CommonName + + cmds := []string{ + fmt.Sprintf("CREATE USER %s IDENTIFIED BY '' REQUIRE X509", username), + fmt.Sprintf("GRANT ALL ON mysql.* TO '%s'@'%s' REQUIRE X509", username, "%"), + } + + for _, cmd := range cmds { + stmt, err := db.PrepareContext(ctx, cmd) + if err != nil { + t.Fatalf("Failed to prepare query: %s", err) + } + + _, err = stmt.ExecContext(ctx) + if err != nil { + t.Fatalf("Failed to create x509 user in database: %s", err) + } + err = stmt.Close() + if err != nil { + t.Fatalf("Failed to close prepared statement: %s", err) + } + } + + return username +} + +// //////////////////////////////////////////////////////////////////////////// +// Writing to file +// //////////////////////////////////////////////////////////////////////////// +func writeFile(t *testing.T, filename string, data []byte, perms os.FileMode) { + t.Helper() + + err := ioutil.WriteFile(filename, data, perms) + if err != nil { + t.Fatalf("Unable to write to file [%s]: %s", filename, err) + } +} diff --git a/plugins/database/mysql/mysql-database-plugin/main.go b/plugins/database/mysql/mysql-database-plugin/main.go new file mode 100644 index 0000000..56640b2 --- /dev/null +++ b/plugins/database/mysql/mysql-database-plugin/main.go @@ -0,0 +1,30 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "log" + "os" + + "github.com/hashicorp/vault/plugins/database/mysql" + "github.com/hashicorp/vault/sdk/database/dbplugin/v5" +) + +func main() { + err := Run() + if err != nil { + log.Println(err) + os.Exit(1) + } +} + +// Run instantiates a MySQL object, and runs the RPC server for the plugin +func Run() error { + var f func() (interface{}, error) + f = mysql.New(mysql.DefaultUserNameTemplate) + + dbplugin.ServeMultiplex(f) + + return nil +} diff --git a/plugins/database/mysql/mysql-legacy-database-plugin/main.go b/plugins/database/mysql/mysql-legacy-database-plugin/main.go new file mode 100644 index 0000000..8aeba0b --- /dev/null +++ b/plugins/database/mysql/mysql-legacy-database-plugin/main.go @@ -0,0 +1,34 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "log" + "os" + + "github.com/hashicorp/vault/plugins/database/mysql" + dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5" +) + +func main() { + err := Run() + if err != nil { + log.Println(err) + os.Exit(1) + } +} + +// Run instantiates a MySQL object, and runs the RPC server for the plugin +func Run() error { + var f func() (interface{}, error) + f = mysql.New(mysql.DefaultLegacyUserNameTemplate) + dbType, err := f() + if err != nil { + return err + } + + dbplugin.Serve(dbType.(dbplugin.Database)) + + return nil +} diff --git a/plugins/database/mysql/mysql.go b/plugins/database/mysql/mysql.go new file mode 100644 index 0000000..0260ec2 --- /dev/null +++ b/plugins/database/mysql/mysql.go @@ -0,0 +1,296 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package mysql + +import ( + "context" + "database/sql" + "errors" + "fmt" + "strings" + + stdmysql "github.com/go-sql-driver/mysql" + "github.com/hashicorp/go-secure-stdlib/strutil" + dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + "github.com/hashicorp/vault/sdk/database/helper/dbutil" + "github.com/hashicorp/vault/sdk/helper/template" +) + +const ( + defaultMysqlRevocationStmts = ` + REVOKE ALL PRIVILEGES, GRANT OPTION FROM '{{name}}'@'%'; + DROP USER '{{name}}'@'%' + ` + + defaultMySQLRotateCredentialsSQL = ` + ALTER USER '{{username}}'@'%' IDENTIFIED BY '{{password}}'; + ` + + mySQLTypeName = "mysql" + + DefaultUserNameTemplate = `{{ printf "v-%s-%s-%s-%s" (.DisplayName | truncate 10) (.RoleName | truncate 10) (random 20) (unix_time) | truncate 32 }}` + DefaultLegacyUserNameTemplate = `{{ printf "v-%s-%s-%s" (.RoleName | truncate 4) (random 20) | truncate 16 }}` +) + +var _ dbplugin.Database = (*MySQL)(nil) + +type MySQL struct { + *mySQLConnectionProducer + + usernameProducer template.StringTemplate + defaultUsernameTemplate string +} + +// New implements builtinplugins.BuiltinFactory +func New(defaultUsernameTemplate string) func() (interface{}, error) { + return func() (interface{}, error) { + if defaultUsernameTemplate == "" { + return nil, fmt.Errorf("missing default username template") + } + db := newMySQL(defaultUsernameTemplate) + // Wrap the plugin with middleware to sanitize errors + dbType := dbplugin.NewDatabaseErrorSanitizerMiddleware(db, db.SecretValues) + + return dbType, nil + } +} + +func newMySQL(defaultUsernameTemplate string) *MySQL { + connProducer := &mySQLConnectionProducer{} + + return &MySQL{ + mySQLConnectionProducer: connProducer, + defaultUsernameTemplate: defaultUsernameTemplate, + } +} + +func (m *MySQL) Type() (string, error) { + return mySQLTypeName, nil +} + +func (m *MySQL) getConnection(ctx context.Context) (*sql.DB, error) { + db, err := m.Connection(ctx) + if err != nil { + return nil, err + } + + return db.(*sql.DB), nil +} + +func (m *MySQL) Initialize(ctx context.Context, req dbplugin.InitializeRequest) (dbplugin.InitializeResponse, error) { + usernameTemplate, err := strutil.GetString(req.Config, "username_template") + if err != nil { + return dbplugin.InitializeResponse{}, err + } + + if usernameTemplate == "" { + usernameTemplate = m.defaultUsernameTemplate + } + + up, err := template.NewTemplate(template.Template(usernameTemplate)) + if err != nil { + return dbplugin.InitializeResponse{}, fmt.Errorf("unable to initialize username template: %w", err) + } + + m.usernameProducer = up + + _, err = m.usernameProducer.Generate(dbplugin.UsernameMetadata{}) + if err != nil { + return dbplugin.InitializeResponse{}, fmt.Errorf("invalid username template: %w", err) + } + + err = m.mySQLConnectionProducer.Initialize(ctx, req.Config, req.VerifyConnection) + if err != nil { + return dbplugin.InitializeResponse{}, err + } + + resp := dbplugin.InitializeResponse{ + Config: req.Config, + } + + return resp, nil +} + +func (m *MySQL) NewUser(ctx context.Context, req dbplugin.NewUserRequest) (dbplugin.NewUserResponse, error) { + if len(req.Statements.Commands) == 0 { + return dbplugin.NewUserResponse{}, dbutil.ErrEmptyCreationStatement + } + + username, err := m.usernameProducer.Generate(req.UsernameConfig) + if err != nil { + return dbplugin.NewUserResponse{}, err + } + + password := req.Password + + expirationStr := req.Expiration.Format("2006-01-02 15:04:05-0700") + + queryMap := map[string]string{ + "name": username, + "username": username, + "password": password, + "expiration": expirationStr, + } + + if err := m.executePreparedStatementsWithMap(ctx, req.Statements.Commands, queryMap); err != nil { + return dbplugin.NewUserResponse{}, err + } + + resp := dbplugin.NewUserResponse{ + Username: username, + } + return resp, nil +} + +func (m *MySQL) DeleteUser(ctx context.Context, req dbplugin.DeleteUserRequest) (dbplugin.DeleteUserResponse, error) { + // Grab the read lock + m.Lock() + defer m.Unlock() + + // Get the connection + db, err := m.getConnection(ctx) + if err != nil { + return dbplugin.DeleteUserResponse{}, err + } + + revocationStmts := req.Statements.Commands + // Use a default SQL statement for revocation if one cannot be fetched from the role + if len(revocationStmts) == 0 { + revocationStmts = []string{defaultMysqlRevocationStmts} + } + + // Start a transaction + tx, err := db.BeginTx(ctx, nil) + if err != nil { + return dbplugin.DeleteUserResponse{}, err + } + defer tx.Rollback() + + for _, stmt := range revocationStmts { + for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + // This is not a prepared statement because not all commands are supported + // 1295: This command is not supported in the prepared statement protocol yet + // Reference https://mariadb.com/kb/en/mariadb/prepare-statement/ + query = strings.ReplaceAll(query, "{{name}}", req.Username) + query = strings.ReplaceAll(query, "{{username}}", req.Username) + _, err = tx.ExecContext(ctx, query) + if err != nil { + return dbplugin.DeleteUserResponse{}, err + } + } + } + + // Commit the transaction + err = tx.Commit() + return dbplugin.DeleteUserResponse{}, err +} + +func (m *MySQL) UpdateUser(ctx context.Context, req dbplugin.UpdateUserRequest) (dbplugin.UpdateUserResponse, error) { + if req.Password == nil && req.Expiration == nil { + return dbplugin.UpdateUserResponse{}, fmt.Errorf("no change requested") + } + + if req.Password != nil { + err := m.changeUserPassword(ctx, req.Username, req.Password.NewPassword, req.Password.Statements.Commands) + if err != nil { + return dbplugin.UpdateUserResponse{}, fmt.Errorf("failed to change password: %w", err) + } + } + + // Expiration change/update is currently a no-op + + return dbplugin.UpdateUserResponse{}, nil +} + +func (m *MySQL) changeUserPassword(ctx context.Context, username, password string, rotateStatements []string) error { + if username == "" || password == "" { + return errors.New("must provide both username and password") + } + + if len(rotateStatements) == 0 { + rotateStatements = []string{defaultMySQLRotateCredentialsSQL} + } + + queryMap := map[string]string{ + "name": username, + "username": username, + "password": password, + } + + if err := m.executePreparedStatementsWithMap(ctx, rotateStatements, queryMap); err != nil { + return err + } + return nil +} + +// executePreparedStatementsWithMap loops through the given templated SQL statements and +// applies the map to them, interpolating values into the templates, returning +// the resulting username and password +func (m *MySQL) executePreparedStatementsWithMap(ctx context.Context, statements []string, queryMap map[string]string) error { + // Grab the lock + m.Lock() + defer m.Unlock() + + // Get the connection + db, err := m.getConnection(ctx) + if err != nil { + return err + } + // Start a transaction + tx, err := db.BeginTx(ctx, nil) + if err != nil { + return err + } + defer func() { + _ = tx.Rollback() + }() + + // Execute each query + for _, stmt := range statements { + for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + query = dbutil.QueryHelper(query, queryMap) + + stmt, err := tx.PrepareContext(ctx, query) + if err != nil { + // If the error code we get back is Error 1295: This command is not + // supported in the prepared statement protocol yet, we will execute + // the statement without preparing it. This allows the caller to + // manually prepare statements, as well as run other not yet + // prepare supported commands. If there is no error when running we + // will continue to the next statement. + if e, ok := err.(*stdmysql.MySQLError); ok && e.Number == 1295 { + _, err = tx.ExecContext(ctx, query) + if err != nil { + stmt.Close() + return err + } + continue + } + + return err + } + if _, err := stmt.ExecContext(ctx); err != nil { + stmt.Close() + return err + } + stmt.Close() + } + } + + // Commit the transaction + if err := tx.Commit(); err != nil { + return err + } + return nil +} diff --git a/plugins/database/mysql/mysql_test.go b/plugins/database/mysql/mysql_test.go new file mode 100644 index 0000000..07e0165 --- /dev/null +++ b/plugins/database/mysql/mysql_test.go @@ -0,0 +1,796 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package mysql + +import ( + "context" + "database/sql" + "fmt" + "strings" + "testing" + "time" + + stdmysql "github.com/go-sql-driver/mysql" + "github.com/hashicorp/go-secure-stdlib/strutil" + mysqlhelper "github.com/hashicorp/vault/helper/testhelpers/mysql" + dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + dbtesting "github.com/hashicorp/vault/sdk/database/dbplugin/v5/testing" + "github.com/hashicorp/vault/sdk/database/helper/credsutil" + "github.com/hashicorp/vault/sdk/database/helper/dbutil" + "github.com/stretchr/testify/require" +) + +var _ dbplugin.Database = (*MySQL)(nil) + +func TestMySQL_Initialize(t *testing.T) { + type testCase struct { + rootPassword string + } + + tests := map[string]testCase{ + "non-special characters in root password": { + rootPassword: "B44a30c4C04D0aAaE140", + }, + "special characters in root password": { + rootPassword: "#secret!%25#{@}", + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + testInitialize(t, test.rootPassword) + }) + } +} + +func testInitialize(t *testing.T, rootPassword string) { + cleanup, connURL := mysqlhelper.PrepareTestContainer(t, false, rootPassword) + defer cleanup() + + mySQLConfig, err := stdmysql.ParseDSN(connURL) + if err != nil { + panic(fmt.Sprintf("Test failure: connection URL is invalid: %s", err)) + } + rootUser := mySQLConfig.User + mySQLConfig.User = "{{username}}" + mySQLConfig.Passwd = "{{password}}" + tmplConnURL := mySQLConfig.FormatDSN() + + type testCase struct { + initRequest dbplugin.InitializeRequest + expectedResp dbplugin.InitializeResponse + + expectErr bool + expectInitialized bool + } + + tests := map[string]testCase{ + "missing connection_url": { + initRequest: dbplugin.InitializeRequest{ + Config: map[string]interface{}{}, + VerifyConnection: true, + }, + expectedResp: dbplugin.InitializeResponse{}, + expectErr: true, + expectInitialized: false, + }, + "basic config": { + initRequest: dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "connection_url": connURL, + }, + VerifyConnection: true, + }, + expectedResp: dbplugin.InitializeResponse{ + Config: map[string]interface{}{ + "connection_url": connURL, + }, + }, + expectErr: false, + expectInitialized: true, + }, + "username and password replacement in connection_url": { + initRequest: dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "connection_url": tmplConnURL, + "username": rootUser, + "password": rootPassword, + }, + VerifyConnection: true, + }, + expectedResp: dbplugin.InitializeResponse{ + Config: map[string]interface{}{ + "connection_url": tmplConnURL, + "username": rootUser, + "password": rootPassword, + }, + }, + expectErr: false, + expectInitialized: true, + }, + "invalid username template": { + initRequest: dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "connection_url": connURL, + "username_template": "{{.FieldThatDoesNotExist}}", + }, + VerifyConnection: true, + }, + expectedResp: dbplugin.InitializeResponse{}, + expectErr: true, + expectInitialized: false, + }, + "bad username template": { + initRequest: dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "connection_url": connURL, + "username_template": "{{ .DisplayName", // Explicitly bad template + }, + VerifyConnection: true, + }, + expectedResp: dbplugin.InitializeResponse{}, + expectErr: true, + expectInitialized: false, + }, + "custom username template": { + initRequest: dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "connection_url": connURL, + "username_template": "foo-{{random 10}}-{{.DisplayName}}", + }, + VerifyConnection: true, + }, + expectedResp: dbplugin.InitializeResponse{ + Config: map[string]interface{}{ + "connection_url": connURL, + "username_template": "foo-{{random 10}}-{{.DisplayName}}", + }, + }, + expectErr: false, + expectInitialized: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + db := newMySQL(DefaultUserNameTemplate) + defer dbtesting.AssertClose(t, db) + initResp, err := db.Initialize(context.Background(), test.initRequest) + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + require.Equal(t, test.expectedResp, initResp) + require.Equal(t, test.expectInitialized, db.Initialized, "Initialized variable not set correctly") + }) + } +} + +func TestMySQL_NewUser_nonLegacy(t *testing.T) { + displayName := "token" + roleName := "testrole" + + type testCase struct { + usernameTemplate string + + newUserReq dbplugin.NewUserRequest + + expectedUsernameRegex string + expectErr bool + } + + tests := map[string]testCase{ + "name statements": { + newUserReq: dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: displayName, + RoleName: roleName, + }, + Statements: dbplugin.Statements{ + Commands: []string{ + `CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}'; + GRANT SELECT ON *.* TO '{{name}}'@'%';`, + }, + }, + Password: "09g8hanbdfkVSM", + Expiration: time.Now().Add(time.Minute), + }, + + expectedUsernameRegex: `^v-token-testrole-[a-zA-Z0-9]{15}$`, + expectErr: false, + }, + "username statements": { + newUserReq: dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: displayName, + RoleName: roleName, + }, + Statements: dbplugin.Statements{ + Commands: []string{ + `CREATE USER '{{username}}'@'%' IDENTIFIED BY '{{password}}'; + GRANT SELECT ON *.* TO '{{username}}'@'%';`, + }, + }, + Password: "09g8hanbdfkVSM", + Expiration: time.Now().Add(time.Minute), + }, + + expectedUsernameRegex: `^v-token-testrole-[a-zA-Z0-9]{15}$`, + expectErr: false, + }, + "prepared name statements": { + newUserReq: dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: displayName, + RoleName: roleName, + }, + Statements: dbplugin.Statements{ + Commands: []string{ + `CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}'; + set @grants=CONCAT("GRANT SELECT ON ", "*", ".* TO '{{name}}'@'%'"); + PREPARE grantStmt from @grants; + EXECUTE grantStmt; + DEALLOCATE PREPARE grantStmt;`, + }, + }, + Password: "09g8hanbdfkVSM", + Expiration: time.Now().Add(time.Minute), + }, + + expectedUsernameRegex: `^v-token-testrole-[a-zA-Z0-9]{15}$`, + expectErr: false, + }, + "prepared username statements": { + newUserReq: dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: displayName, + RoleName: roleName, + }, + Statements: dbplugin.Statements{ + Commands: []string{ + `CREATE USER '{{username}}'@'%' IDENTIFIED BY '{{password}}'; + set @grants=CONCAT("GRANT SELECT ON ", "*", ".* TO '{{username}}'@'%'"); + PREPARE grantStmt from @grants; + EXECUTE grantStmt; + DEALLOCATE PREPARE grantStmt;`, + }, + }, + Password: "09g8hanbdfkVSM", + Expiration: time.Now().Add(time.Minute), + }, + + expectedUsernameRegex: `^v-token-testrole-[a-zA-Z0-9]{15}$`, + expectErr: false, + }, + "custom username template": { + usernameTemplate: "foo-{{random 10}}-{{.RoleName | uppercase}}", + + newUserReq: dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: displayName, + RoleName: roleName, + }, + Statements: dbplugin.Statements{ + Commands: []string{ + `CREATE USER '{{username}}'@'%' IDENTIFIED BY '{{password}}'; + GRANT SELECT ON *.* TO '{{username}}'@'%';`, + }, + }, + Password: "09g8hanbdfkVSM", + Expiration: time.Now().Add(time.Minute), + }, + + expectedUsernameRegex: `^foo-[a-zA-Z0-9]{10}-TESTROLE$`, + expectErr: false, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + cleanup, connURL := mysqlhelper.PrepareTestContainer(t, false, "secret") + defer cleanup() + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + "username_template": test.usernameTemplate, + } + + initReq := dbplugin.InitializeRequest{ + Config: connectionDetails, + VerifyConnection: true, + } + + db := newMySQL(DefaultUserNameTemplate) + defer db.Close() + _, err := db.Initialize(context.Background(), initReq) + require.NoError(t, err) + + userResp, err := db.NewUser(context.Background(), test.newUserReq) + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + require.Regexp(t, test.expectedUsernameRegex, userResp.Username) + + err = mysqlhelper.TestCredsExist(t, connURL, userResp.Username, test.newUserReq.Password) + require.NoError(t, err, "Failed to connect with credentials") + }) + } +} + +func TestMySQL_NewUser_legacy(t *testing.T) { + displayName := "token" + roleName := "testrole" + + type testCase struct { + usernameTemplate string + + newUserReq dbplugin.NewUserRequest + + expectedUsernameRegex string + expectErr bool + } + + tests := map[string]testCase{ + "name statements": { + newUserReq: dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: displayName, + RoleName: roleName, + }, + Statements: dbplugin.Statements{ + Commands: []string{ + `CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}'; + GRANT SELECT ON *.* TO '{{name}}'@'%';`, + }, + }, + Password: "09g8hanbdfkVSM", + Expiration: time.Now().Add(time.Minute), + }, + + expectedUsernameRegex: `^v-test-[a-zA-Z0-9]{9}$`, + expectErr: false, + }, + "username statements": { + newUserReq: dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: displayName, + RoleName: roleName, + }, + Statements: dbplugin.Statements{ + Commands: []string{ + `CREATE USER '{{username}}'@'%' IDENTIFIED BY '{{password}}'; + GRANT SELECT ON *.* TO '{{username}}'@'%';`, + }, + }, + Password: "09g8hanbdfkVSM", + Expiration: time.Now().Add(time.Minute), + }, + + expectedUsernameRegex: `^v-test-[a-zA-Z0-9]{9}$`, + expectErr: false, + }, + "prepared name statements": { + newUserReq: dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: displayName, + RoleName: roleName, + }, + Statements: dbplugin.Statements{ + Commands: []string{ + `CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}'; + set @grants=CONCAT("GRANT SELECT ON ", "*", ".* TO '{{name}}'@'%'"); + PREPARE grantStmt from @grants; + EXECUTE grantStmt; + DEALLOCATE PREPARE grantStmt;`, + }, + }, + Password: "09g8hanbdfkVSM", + Expiration: time.Now().Add(time.Minute), + }, + + expectedUsernameRegex: `^v-test-[a-zA-Z0-9]{9}$`, + expectErr: false, + }, + "prepared username statements": { + newUserReq: dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: displayName, + RoleName: roleName, + }, + Statements: dbplugin.Statements{ + Commands: []string{ + `CREATE USER '{{username}}'@'%' IDENTIFIED BY '{{password}}'; + set @grants=CONCAT("GRANT SELECT ON ", "*", ".* TO '{{username}}'@'%'"); + PREPARE grantStmt from @grants; + EXECUTE grantStmt; + DEALLOCATE PREPARE grantStmt;`, + }, + }, + Password: "09g8hanbdfkVSM", + Expiration: time.Now().Add(time.Minute), + }, + + expectedUsernameRegex: `^v-test-[a-zA-Z0-9]{9}$`, + expectErr: false, + }, + "custom username template": { + usernameTemplate: `{{printf "foo-%s-%s" (random 5) (.RoleName | uppercase) | truncate 16}}`, + + newUserReq: dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: displayName, + RoleName: roleName, + }, + Statements: dbplugin.Statements{ + Commands: []string{ + `CREATE USER '{{username}}'@'%' IDENTIFIED BY '{{password}}'; + GRANT SELECT ON *.* TO '{{username}}'@'%';`, + }, + }, + Password: "09g8hanbdfkVSM", + Expiration: time.Now().Add(time.Minute), + }, + + expectedUsernameRegex: `^foo-[a-zA-Z0-9]{5}-TESTRO$`, + expectErr: false, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + cleanup, connURL := mysqlhelper.PrepareTestContainer(t, false, "secret") + defer cleanup() + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + "username_template": test.usernameTemplate, + } + + initReq := dbplugin.InitializeRequest{ + Config: connectionDetails, + VerifyConnection: true, + } + + db := newMySQL(DefaultLegacyUserNameTemplate) + defer db.Close() + _, err := db.Initialize(context.Background(), initReq) + require.NoError(t, err) + + userResp, err := db.NewUser(context.Background(), test.newUserReq) + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + require.Regexp(t, test.expectedUsernameRegex, userResp.Username) + + err = mysqlhelper.TestCredsExist(t, connURL, userResp.Username, test.newUserReq.Password) + require.NoError(t, err, "Failed to connect with credentials") + }) + } +} + +func TestMySQL_RotateRootCredentials(t *testing.T) { + type testCase struct { + statements []string + } + + tests := map[string]testCase{ + "empty statements": { + statements: nil, + }, + "default username": { + statements: []string{defaultMySQLRotateCredentialsSQL}, + }, + "default name": { + statements: []string{ + ` + ALTER USER '{{username}}'@'%' IDENTIFIED BY '{{password}}';`, + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + cleanup, connURL := mysqlhelper.PrepareTestContainer(t, false, "secret") + defer cleanup() + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + "username": "root", + "password": "secret", + } + + // Give a timeout just in case the test decides to be problematic + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + initReq := dbplugin.InitializeRequest{ + Config: connectionDetails, + VerifyConnection: true, + } + + db := newMySQL(DefaultUserNameTemplate) + defer db.Close() + _, err := db.Initialize(context.Background(), initReq) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !db.Initialized { + t.Fatal("Database should be initialized") + } + + updateReq := dbplugin.UpdateUserRequest{ + Username: "root", + Password: &dbplugin.ChangePassword{ + NewPassword: "different_sercret", + Statements: dbplugin.Statements{ + Commands: test.statements, + }, + }, + } + + _, err = db.UpdateUser(ctx, updateReq) + if err != nil { + t.Fatalf("err: %v", err) + } + err = mysqlhelper.TestCredsExist(t, connURL, updateReq.Username, updateReq.Password.NewPassword) + if err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } + + // verify old password doesn't work + if err := mysqlhelper.TestCredsExist(t, connURL, updateReq.Username, "secret"); err == nil { + t.Fatalf("Should not be able to connect with initial credentials") + } + + err = db.Close() + if err != nil { + t.Fatalf("err: %s", err) + } + }) + } +} + +func TestMySQL_DeleteUser(t *testing.T) { + type testCase struct { + revokeStmts []string + } + + tests := map[string]testCase{ + "empty statements": { + revokeStmts: nil, + }, + "default name": { + revokeStmts: []string{defaultMysqlRevocationStmts}, + }, + "default username": { + revokeStmts: []string{ + ` + REVOKE ALL PRIVILEGES, GRANT OPTION FROM '{{username}}'@'%'; + DROP USER '{{username}}'@'%'`, + }, + }, + } + + // Shared test container for speed - there should not be any overlap between the tests + cleanup, connURL := mysqlhelper.PrepareTestContainer(t, false, "secret") + defer cleanup() + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + + initReq := dbplugin.InitializeRequest{ + Config: connectionDetails, + VerifyConnection: true, + } + + db := newMySQL(DefaultUserNameTemplate) + defer db.Close() + _, err := db.Initialize(context.Background(), initReq) + if err != nil { + t.Fatalf("err: %s", err) + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + password, err := credsutil.RandomAlphaNumeric(32, false) + if err != nil { + t.Fatalf("unable to generate password: %s", err) + } + + createReq := dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: "test", + RoleName: "test", + }, + Statements: dbplugin.Statements{ + Commands: []string{ + ` + CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}'; + GRANT SELECT ON *.* TO '{{name}}'@'%';`, + }, + }, + Password: password, + Expiration: time.Now().Add(time.Minute), + } + + // Give a timeout just in case the test decides to be problematic + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + userResp, err := db.NewUser(ctx, createReq) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err := mysqlhelper.TestCredsExist(t, connURL, userResp.Username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } + + deleteReq := dbplugin.DeleteUserRequest{ + Username: userResp.Username, + Statements: dbplugin.Statements{ + Commands: test.revokeStmts, + }, + } + _, err = db.DeleteUser(context.Background(), deleteReq) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err := mysqlhelper.TestCredsExist(t, connURL, userResp.Username, password); err == nil { + t.Fatalf("Credentials were not revoked!") + } + }) + } +} + +func TestMySQL_UpdateUser(t *testing.T) { + type testCase struct { + rotateStmts []string + } + + tests := map[string]testCase{ + "empty statements": { + rotateStmts: nil, + }, + "custom statement name": { + rotateStmts: []string{` + ALTER USER '{{name}}'@'%' IDENTIFIED BY '{{password}}';`}, + }, + "custom statement username": { + rotateStmts: []string{` + ALTER USER '{{username}}'@'%' IDENTIFIED BY '{{password}}';`}, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + cleanup, connURL := mysqlhelper.PrepareTestContainer(t, false, "secret") + defer cleanup() + + // create the database user and verify we can access + dbUser := "vaultstatictest" + initPassword := "password" + + createStatements := ` + CREATE USER '{{name}}'@'%' IDENTIFIED BY '{{password}}'; + GRANT SELECT ON *.* TO '{{name}}'@'%';` + + createTestMySQLUser(t, connURL, dbUser, initPassword, createStatements) + if err := mysqlhelper.TestCredsExist(t, connURL, dbUser, initPassword); err != nil { + t.Fatalf("Could not connect with credentials: %s", err) + } + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + + initReq := dbplugin.InitializeRequest{ + Config: connectionDetails, + VerifyConnection: true, + } + + // Give a timeout just in case the test decides to be problematic + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + db := newMySQL(DefaultUserNameTemplate) + defer db.Close() + _, err := db.Initialize(context.Background(), initReq) + if err != nil { + t.Fatalf("err: %s", err) + } + + newPassword, err := credsutil.RandomAlphaNumeric(32, false) + if err != nil { + t.Fatalf("unable to generate password: %s", err) + } + + updateReq := dbplugin.UpdateUserRequest{ + Username: dbUser, + Password: &dbplugin.ChangePassword{ + NewPassword: newPassword, + Statements: dbplugin.Statements{ + Commands: test.rotateStmts, + }, + }, + } + + _, err = db.UpdateUser(ctx, updateReq) + if err != nil { + t.Fatalf("err: %s", err) + } + + // verify new password works + if err := mysqlhelper.TestCredsExist(t, connURL, dbUser, newPassword); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } + + // verify old password doesn't work + if err := mysqlhelper.TestCredsExist(t, connURL, dbUser, initPassword); err == nil { + t.Fatalf("Should not be able to connect with initial credentials") + } + }) + } +} + +func createTestMySQLUser(t *testing.T, connURL, username, password, query string) { + t.Helper() + db, err := sql.Open("mysql", connURL) + defer db.Close() + if err != nil { + t.Fatal(err) + } + + // Start a transaction + ctx := context.Background() + tx, err := db.BeginTx(ctx, nil) + if err != nil { + t.Fatal(err) + } + defer func() { + _ = tx.Rollback() + }() + + // copied from mysql.go + for _, query := range strutil.ParseArbitraryStringSlice(query, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + query = dbutil.QueryHelper(query, map[string]string{ + "name": username, + "password": password, + }) + + stmt, err := tx.PrepareContext(ctx, query) + if err != nil { + if e, ok := err.(*stdmysql.MySQLError); ok && e.Number == 1295 { + _, err = tx.ExecContext(ctx, query) + if err != nil { + t.Fatal(err) + } + stmt.Close() + continue + } + + t.Fatal(err) + } + if _, err := stmt.ExecContext(ctx); err != nil { + stmt.Close() + t.Fatal(err) + } + stmt.Close() + } +} diff --git a/plugins/database/postgresql/passwordauthentication.go b/plugins/database/postgresql/passwordauthentication.go new file mode 100644 index 0000000..ec94baf --- /dev/null +++ b/plugins/database/postgresql/passwordauthentication.go @@ -0,0 +1,25 @@ +package postgresql + +import "fmt" + +// passwordAuthentication determines whether to send passwords in plaintext (password) or hashed (scram-sha-256). +type passwordAuthentication string + +var ( + // passwordAuthenticationPassword is the default. If set, passwords will be sent to PostgreSQL in plain text. + passwordAuthenticationPassword passwordAuthentication = "password" + passwordAuthenticationSCRAMSHA256 passwordAuthentication = "scram-sha-256" +) + +var passwordAuthentications = map[passwordAuthentication]struct{}{ + passwordAuthenticationSCRAMSHA256: {}, + passwordAuthenticationPassword: {}, +} + +func parsePasswordAuthentication(s string) (passwordAuthentication, error) { + if _, ok := passwordAuthentications[passwordAuthentication(s)]; !ok { + return "", fmt.Errorf("'%s' is not a valid password authentication type", s) + } + + return passwordAuthentication(s), nil +} diff --git a/plugins/database/postgresql/postgresql-database-plugin/main.go b/plugins/database/postgresql/postgresql-database-plugin/main.go new file mode 100644 index 0000000..f543167 --- /dev/null +++ b/plugins/database/postgresql/postgresql-database-plugin/main.go @@ -0,0 +1,27 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "log" + "os" + + "github.com/hashicorp/vault/plugins/database/postgresql" + "github.com/hashicorp/vault/sdk/database/dbplugin/v5" +) + +func main() { + err := Run() + if err != nil { + log.Println(err) + os.Exit(1) + } +} + +// Run instantiates a PostgreSQL object, and runs the RPC server for the plugin +func Run() error { + dbplugin.ServeMultiplex(postgresql.New) + + return nil +} diff --git a/plugins/database/postgresql/postgresql.go b/plugins/database/postgresql/postgresql.go new file mode 100644 index 0000000..66c44cc --- /dev/null +++ b/plugins/database/postgresql/postgresql.go @@ -0,0 +1,559 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package postgresql + +import ( + "context" + "database/sql" + "fmt" + "regexp" + "strings" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/plugins/database/postgresql/scram" + "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + "github.com/hashicorp/vault/sdk/database/helper/connutil" + "github.com/hashicorp/vault/sdk/database/helper/dbutil" + "github.com/hashicorp/vault/sdk/helper/dbtxn" + "github.com/hashicorp/vault/sdk/helper/template" + "github.com/hashicorp/vault/sdk/logical" + _ "github.com/jackc/pgx/v4/stdlib" +) + +const ( + postgreSQLTypeName = "pgx" + defaultExpirationStatement = ` +ALTER ROLE "{{name}}" VALID UNTIL '{{expiration}}'; +` + defaultChangePasswordStatement = ` +ALTER ROLE "{{username}}" WITH PASSWORD '{{password}}'; +` + + expirationFormat = "2006-01-02 15:04:05-0700" + + defaultUserNameTemplate = `{{ printf "v-%s-%s-%s-%s" (.DisplayName | truncate 8) (.RoleName | truncate 8) (random 20) (unix_time) | truncate 63 }}` +) + +var ( + _ dbplugin.Database = (*PostgreSQL)(nil) + _ logical.PluginVersioner = (*PostgreSQL)(nil) + + // postgresEndStatement is basically the word "END" but + // surrounded by a word boundary to differentiate it from + // other words like "APPEND". + postgresEndStatement = regexp.MustCompile(`\bEND\b`) + + // doubleQuotedPhrases finds substrings like "hello" + // and pulls them out with the quotes included. + doubleQuotedPhrases = regexp.MustCompile(`(".*?")`) + + // singleQuotedPhrases finds substrings like 'hello' + // and pulls them out with the quotes included. + singleQuotedPhrases = regexp.MustCompile(`('.*?')`) + + // ReportedVersion is used to report a specific version to Vault. + ReportedVersion = "" +) + +func New() (interface{}, error) { + db := new() + // Wrap the plugin with middleware to sanitize errors + dbType := dbplugin.NewDatabaseErrorSanitizerMiddleware(db, db.secretValues) + return dbType, nil +} + +func new() *PostgreSQL { + connProducer := &connutil.SQLConnectionProducer{} + connProducer.Type = postgreSQLTypeName + + db := &PostgreSQL{ + SQLConnectionProducer: connProducer, + passwordAuthentication: passwordAuthenticationPassword, + } + + return db +} + +type PostgreSQL struct { + *connutil.SQLConnectionProducer + + usernameProducer template.StringTemplate + passwordAuthentication passwordAuthentication +} + +func (p *PostgreSQL) Initialize(ctx context.Context, req dbplugin.InitializeRequest) (dbplugin.InitializeResponse, error) { + newConf, err := p.SQLConnectionProducer.Init(ctx, req.Config, req.VerifyConnection) + if err != nil { + return dbplugin.InitializeResponse{}, err + } + + usernameTemplate, err := strutil.GetString(req.Config, "username_template") + if err != nil { + return dbplugin.InitializeResponse{}, fmt.Errorf("failed to retrieve username_template: %w", err) + } + if usernameTemplate == "" { + usernameTemplate = defaultUserNameTemplate + } + + up, err := template.NewTemplate(template.Template(usernameTemplate)) + if err != nil { + return dbplugin.InitializeResponse{}, fmt.Errorf("unable to initialize username template: %w", err) + } + p.usernameProducer = up + + _, err = p.usernameProducer.Generate(dbplugin.UsernameMetadata{}) + if err != nil { + return dbplugin.InitializeResponse{}, fmt.Errorf("invalid username template: %w", err) + } + + passwordAuthenticationRaw, err := strutil.GetString(req.Config, "password_authentication") + if err != nil { + return dbplugin.InitializeResponse{}, fmt.Errorf("failed to retrieve password_authentication: %w", err) + } + + if passwordAuthenticationRaw != "" { + pwAuthentication, err := parsePasswordAuthentication(passwordAuthenticationRaw) + if err != nil { + return dbplugin.InitializeResponse{}, err + } + + p.passwordAuthentication = pwAuthentication + } + + resp := dbplugin.InitializeResponse{ + Config: newConf, + } + return resp, nil +} + +func (p *PostgreSQL) Type() (string, error) { + return postgreSQLTypeName, nil +} + +func (p *PostgreSQL) getConnection(ctx context.Context) (*sql.DB, error) { + db, err := p.Connection(ctx) + if err != nil { + return nil, err + } + + return db.(*sql.DB), nil +} + +func (p *PostgreSQL) UpdateUser(ctx context.Context, req dbplugin.UpdateUserRequest) (dbplugin.UpdateUserResponse, error) { + if req.Username == "" { + return dbplugin.UpdateUserResponse{}, fmt.Errorf("missing username") + } + if req.Password == nil && req.Expiration == nil { + return dbplugin.UpdateUserResponse{}, fmt.Errorf("no changes requested") + } + + merr := &multierror.Error{} + if req.Password != nil { + err := p.changeUserPassword(ctx, req.Username, req.Password) + merr = multierror.Append(merr, err) + } + if req.Expiration != nil { + err := p.changeUserExpiration(ctx, req.Username, req.Expiration) + merr = multierror.Append(merr, err) + } + return dbplugin.UpdateUserResponse{}, merr.ErrorOrNil() +} + +func (p *PostgreSQL) changeUserPassword(ctx context.Context, username string, changePass *dbplugin.ChangePassword) error { + stmts := changePass.Statements.Commands + if len(stmts) == 0 { + stmts = []string{defaultChangePasswordStatement} + } + + password := changePass.NewPassword + if password == "" { + return fmt.Errorf("missing password") + } + + p.Lock() + defer p.Unlock() + + db, err := p.getConnection(ctx) + if err != nil { + return fmt.Errorf("unable to get connection: %w", err) + } + + // Check if the role exists + var exists bool + err = db.QueryRowContext(ctx, "SELECT exists (SELECT rolname FROM pg_roles WHERE rolname=$1);", username).Scan(&exists) + if err != nil && err != sql.ErrNoRows { + return fmt.Errorf("user does not appear to exist: %w", err) + } + + tx, err := db.BeginTx(ctx, nil) + if err != nil { + return fmt.Errorf("unable to start transaction: %w", err) + } + defer tx.Rollback() + + for _, stmt := range stmts { + for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + m := map[string]string{ + "name": username, + "username": username, + "password": password, + } + + if p.passwordAuthentication == passwordAuthenticationSCRAMSHA256 { + hashedPassword, err := scram.Hash(password) + if err != nil { + return fmt.Errorf("unable to scram-sha256 password: %w", err) + } + m["password"] = hashedPassword + } + + if err := dbtxn.ExecuteTxQueryDirect(ctx, tx, m, query); err != nil { + return fmt.Errorf("failed to execute query: %w", err) + } + } + } + + if err := tx.Commit(); err != nil { + return err + } + + return nil +} + +func (p *PostgreSQL) changeUserExpiration(ctx context.Context, username string, changeExp *dbplugin.ChangeExpiration) error { + p.Lock() + defer p.Unlock() + + renewStmts := changeExp.Statements.Commands + if len(renewStmts) == 0 { + renewStmts = []string{defaultExpirationStatement} + } + + db, err := p.getConnection(ctx) + if err != nil { + return err + } + + tx, err := db.BeginTx(ctx, nil) + if err != nil { + return err + } + defer func() { + tx.Rollback() + }() + + expirationStr := changeExp.NewExpiration.Format(expirationFormat) + + for _, stmt := range renewStmts { + for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + m := map[string]string{ + "name": username, + "username": username, + "expiration": expirationStr, + } + if err := dbtxn.ExecuteTxQueryDirect(ctx, tx, m, query); err != nil { + return err + } + } + } + + return tx.Commit() +} + +func (p *PostgreSQL) NewUser(ctx context.Context, req dbplugin.NewUserRequest) (dbplugin.NewUserResponse, error) { + if len(req.Statements.Commands) == 0 { + return dbplugin.NewUserResponse{}, dbutil.ErrEmptyCreationStatement + } + + p.Lock() + defer p.Unlock() + + username, err := p.usernameProducer.Generate(req.UsernameConfig) + if err != nil { + return dbplugin.NewUserResponse{}, err + } + + expirationStr := req.Expiration.Format(expirationFormat) + + db, err := p.getConnection(ctx) + if err != nil { + return dbplugin.NewUserResponse{}, fmt.Errorf("unable to get connection: %w", err) + } + + tx, err := db.BeginTx(ctx, nil) + if err != nil { + return dbplugin.NewUserResponse{}, fmt.Errorf("unable to start transaction: %w", err) + } + defer tx.Rollback() + + m := map[string]string{ + "name": username, + "username": username, + "password": req.Password, + "expiration": expirationStr, + } + + if p.passwordAuthentication == passwordAuthenticationSCRAMSHA256 { + hashedPassword, err := scram.Hash(req.Password) + if err != nil { + return dbplugin.NewUserResponse{}, fmt.Errorf("unable to scram-sha256 password: %w", err) + } + m["password"] = hashedPassword + } + + for _, stmt := range req.Statements.Commands { + if containsMultilineStatement(stmt) { + // Execute it as-is. + if err := dbtxn.ExecuteTxQueryDirect(ctx, tx, m, stmt); err != nil { + return dbplugin.NewUserResponse{}, fmt.Errorf("failed to execute query: %w", err) + } + continue + } + // Otherwise, it's fine to split the statements on the semicolon. + for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + if err := dbtxn.ExecuteTxQueryDirect(ctx, tx, m, query); err != nil { + return dbplugin.NewUserResponse{}, fmt.Errorf("failed to execute query: %w", err) + } + } + } + + if err := tx.Commit(); err != nil { + return dbplugin.NewUserResponse{}, err + } + + resp := dbplugin.NewUserResponse{ + Username: username, + } + return resp, nil +} + +func (p *PostgreSQL) DeleteUser(ctx context.Context, req dbplugin.DeleteUserRequest) (dbplugin.DeleteUserResponse, error) { + p.Lock() + defer p.Unlock() + + if len(req.Statements.Commands) == 0 { + return dbplugin.DeleteUserResponse{}, p.defaultDeleteUser(ctx, req.Username) + } + + return dbplugin.DeleteUserResponse{}, p.customDeleteUser(ctx, req.Username, req.Statements.Commands) +} + +func (p *PostgreSQL) customDeleteUser(ctx context.Context, username string, revocationStmts []string) error { + db, err := p.getConnection(ctx) + if err != nil { + return err + } + + tx, err := db.BeginTx(ctx, nil) + if err != nil { + return err + } + defer func() { + tx.Rollback() + }() + + for _, stmt := range revocationStmts { + if containsMultilineStatement(stmt) { + // Execute it as-is. + m := map[string]string{ + "name": username, + "username": username, + } + if err := dbtxn.ExecuteTxQueryDirect(ctx, tx, m, stmt); err != nil { + return err + } + continue + } + for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + m := map[string]string{ + "name": username, + "username": username, + } + if err := dbtxn.ExecuteTxQueryDirect(ctx, tx, m, query); err != nil { + return err + } + } + } + + return tx.Commit() +} + +func (p *PostgreSQL) defaultDeleteUser(ctx context.Context, username string) error { + db, err := p.getConnection(ctx) + if err != nil { + return err + } + + // Check if the role exists + var exists bool + err = db.QueryRowContext(ctx, "SELECT exists (SELECT rolname FROM pg_roles WHERE rolname=$1);", username).Scan(&exists) + if err != nil && err != sql.ErrNoRows { + return err + } + + if !exists { + return nil + } + + // Query for permissions; we need to revoke permissions before we can drop + // the role + // This isn't done in a transaction because even if we fail along the way, + // we want to remove as much access as possible + stmt, err := db.PrepareContext(ctx, "SELECT DISTINCT table_schema FROM information_schema.role_column_grants WHERE grantee=$1;") + if err != nil { + return err + } + defer stmt.Close() + + rows, err := stmt.QueryContext(ctx, username) + if err != nil { + return err + } + defer rows.Close() + + const initialNumRevocations = 16 + revocationStmts := make([]string, 0, initialNumRevocations) + for rows.Next() { + var schema string + err = rows.Scan(&schema) + if err != nil { + // keep going; remove as many permissions as possible right now + continue + } + revocationStmts = append(revocationStmts, fmt.Sprintf( + `REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA %s FROM %s;`, + (schema), + dbutil.QuoteIdentifier(username))) + + revocationStmts = append(revocationStmts, fmt.Sprintf( + `REVOKE USAGE ON SCHEMA %s FROM %s;`, + dbutil.QuoteIdentifier(schema), + dbutil.QuoteIdentifier(username))) + } + + // for good measure, revoke all privileges and usage on schema public + revocationStmts = append(revocationStmts, fmt.Sprintf( + `REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM %s;`, + dbutil.QuoteIdentifier(username))) + + revocationStmts = append(revocationStmts, fmt.Sprintf( + "REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM %s;", + dbutil.QuoteIdentifier(username))) + + revocationStmts = append(revocationStmts, fmt.Sprintf( + "REVOKE USAGE ON SCHEMA public FROM %s;", + dbutil.QuoteIdentifier(username))) + + // get the current database name so we can issue a REVOKE CONNECT for + // this username + var dbname sql.NullString + if err := db.QueryRowContext(ctx, "SELECT current_database();").Scan(&dbname); err != nil { + return err + } + + if dbname.Valid { + revocationStmts = append(revocationStmts, fmt.Sprintf( + `REVOKE CONNECT ON DATABASE %s FROM %s;`, + dbutil.QuoteIdentifier(dbname.String), + dbutil.QuoteIdentifier(username))) + } + + // again, here, we do not stop on error, as we want to remove as + // many permissions as possible right now + var lastStmtError error + for _, query := range revocationStmts { + if err := dbtxn.ExecuteDBQueryDirect(ctx, db, nil, query); err != nil { + lastStmtError = err + } + } + + // can't drop if not all privileges are revoked + if rows.Err() != nil { + return fmt.Errorf("could not generate revocation statements for all rows: %w", rows.Err()) + } + if lastStmtError != nil { + return fmt.Errorf("could not perform all revocation statements: %w", lastStmtError) + } + + // Drop this user + stmt, err = db.PrepareContext(ctx, fmt.Sprintf( + `DROP ROLE IF EXISTS %s;`, dbutil.QuoteIdentifier(username))) + if err != nil { + return err + } + defer stmt.Close() + if _, err := stmt.ExecContext(ctx); err != nil { + return err + } + + return nil +} + +func (p *PostgreSQL) secretValues() map[string]string { + return map[string]string{ + p.Password: "[password]", + } +} + +func (p *PostgreSQL) PluginVersion() logical.PluginVersion { + return logical.PluginVersion{Version: ReportedVersion} +} + +// containsMultilineStatement is a best effort to determine whether +// a particular statement is multiline, and therefore should not be +// split upon semicolons. If it's unsure, it defaults to false. +func containsMultilineStatement(stmt string) bool { + // We're going to look for the word "END", but first let's ignore + // anything the user provided within single or double quotes since + // we're looking for an "END" within the Postgres syntax. + literals, err := extractQuotedStrings(stmt) + if err != nil { + return false + } + stmtWithoutLiterals := stmt + for _, literal := range literals { + stmtWithoutLiterals = strings.ReplaceAll(stmt, literal, "") + } + // Now look for the word "END" specifically. This will miss any + // representations of END that aren't surrounded by spaces, but + // it should be easy to change on the user's side. + return postgresEndStatement.MatchString(stmtWithoutLiterals) +} + +// extractQuotedStrings extracts 0 or many substrings +// that have been single- or double-quoted. Ex: +// `"Hello", silly 'elephant' from the "zoo".` +// returns [ `Hello`, `'elephant'`, `"zoo"` ] +func extractQuotedStrings(s string) ([]string, error) { + var found []string + toFind := []*regexp.Regexp{ + doubleQuotedPhrases, + singleQuotedPhrases, + } + for _, typeOfPhrase := range toFind { + found = append(found, typeOfPhrase.FindAllString(s, -1)...) + } + return found, nil +} diff --git a/plugins/database/postgresql/postgresql_test.go b/plugins/database/postgresql/postgresql_test.go new file mode 100644 index 0000000..5e89ee9 --- /dev/null +++ b/plugins/database/postgresql/postgresql_test.go @@ -0,0 +1,1240 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package postgresql + +import ( + "context" + "database/sql" + "fmt" + "os" + "strings" + "testing" + "time" + + "github.com/hashicorp/vault/helper/testhelpers/postgresql" + "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + dbtesting "github.com/hashicorp/vault/sdk/database/dbplugin/v5/testing" + "github.com/hashicorp/vault/sdk/database/helper/dbutil" + "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/hashicorp/vault/sdk/helper/template" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func getPostgreSQL(t *testing.T, options map[string]interface{}) (*PostgreSQL, func()) { + cleanup, connURL := postgresql.PrepareTestContainer(t, "13.4-buster") + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + for k, v := range options { + connectionDetails[k] = v + } + + req := dbplugin.InitializeRequest{ + Config: connectionDetails, + VerifyConnection: true, + } + + db := new() + dbtesting.AssertInitialize(t, db, req) + + if !db.Initialized { + t.Fatal("Database should be initialized") + } + return db, cleanup +} + +func TestPostgreSQL_Initialize(t *testing.T) { + db, cleanup := getPostgreSQL(t, map[string]interface{}{ + "max_open_connections": 5, + }) + defer cleanup() + + if err := db.Close(); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestPostgreSQL_InitializeWithStringVals(t *testing.T) { + db, cleanup := getPostgreSQL(t, map[string]interface{}{ + "max_open_connections": "5", + }) + defer cleanup() + + if err := db.Close(); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestPostgreSQL_Initialize_ConnURLWithDSNFormat(t *testing.T) { + cleanup, connURL := postgresql.PrepareTestContainer(t, "13.4-buster") + defer cleanup() + + dsnConnURL, err := dbutil.ParseURL(connURL) + if err != nil { + t.Fatal(err) + } + + connectionDetails := map[string]interface{}{ + "connection_url": dsnConnURL, + } + + req := dbplugin.InitializeRequest{ + Config: connectionDetails, + VerifyConnection: true, + } + + db := new() + dbtesting.AssertInitialize(t, db, req) + + if !db.Initialized { + t.Fatal("Database should be initialized") + } +} + +// TestPostgreSQL_PasswordAuthentication tests that the default "password_authentication" is "none", and that +// an error is returned if an invalid "password_authentication" is provided. +func TestPostgreSQL_PasswordAuthentication(t *testing.T) { + cleanup, connURL := postgresql.PrepareTestContainer(t, "13.4-buster") + defer cleanup() + + dsnConnURL, err := dbutil.ParseURL(connURL) + assert.NoError(t, err) + db := new() + + ctx := context.Background() + + t.Run("invalid-password-authentication", func(t *testing.T) { + connectionDetails := map[string]interface{}{ + "connection_url": dsnConnURL, + "password_authentication": "invalid-password-authentication", + } + + req := dbplugin.InitializeRequest{ + Config: connectionDetails, + VerifyConnection: true, + } + + _, err := db.Initialize(ctx, req) + assert.EqualError(t, err, "'invalid-password-authentication' is not a valid password authentication type") + }) + + t.Run("default-is-none", func(t *testing.T) { + connectionDetails := map[string]interface{}{ + "connection_url": dsnConnURL, + } + + req := dbplugin.InitializeRequest{ + Config: connectionDetails, + VerifyConnection: true, + } + + _ = dbtesting.AssertInitialize(t, db, req) + assert.Equal(t, passwordAuthenticationPassword, db.passwordAuthentication) + }) +} + +// TestPostgreSQL_PasswordAuthentication_SCRAMSHA256 tests that password_authentication works when set to scram-sha-256. +// When sending an encrypted password, the raw password should still successfully authenticate the user. +func TestPostgreSQL_PasswordAuthentication_SCRAMSHA256(t *testing.T) { + cleanup, connURL := postgresql.PrepareTestContainer(t, "13.4-buster") + defer cleanup() + + dsnConnURL, err := dbutil.ParseURL(connURL) + if err != nil { + t.Fatal(err) + } + + connectionDetails := map[string]interface{}{ + "connection_url": dsnConnURL, + "password_authentication": string(passwordAuthenticationSCRAMSHA256), + } + + req := dbplugin.InitializeRequest{ + Config: connectionDetails, + VerifyConnection: true, + } + + db := new() + resp := dbtesting.AssertInitialize(t, db, req) + assert.Equal(t, string(passwordAuthenticationSCRAMSHA256), resp.Config["password_authentication"]) + + if !db.Initialized { + t.Fatal("Database should be initialized") + } + + ctx := context.Background() + newUserRequest := dbplugin.NewUserRequest{ + Statements: dbplugin.Statements{ + Commands: []string{ + ` + CREATE ROLE "{{name}}" WITH + LOGIN + PASSWORD '{{password}}' + VALID UNTIL '{{expiration}}'; + GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}";`, + }, + }, + Password: "somesecurepassword", + Expiration: time.Now().Add(1 * time.Minute), + } + newUserResponse, err := db.NewUser(ctx, newUserRequest) + + assertCredsExist(t, db.ConnectionURL, newUserResponse.Username, newUserRequest.Password) +} + +func TestPostgreSQL_NewUser(t *testing.T) { + type testCase struct { + req dbplugin.NewUserRequest + expectErr bool + credsAssertion credsAssertion + } + + tests := map[string]testCase{ + "no creation statements": { + req: dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: "test", + RoleName: "test", + }, + // No statements + Password: "somesecurepassword", + Expiration: time.Now().Add(1 * time.Minute), + }, + expectErr: true, + credsAssertion: assertCreds( + assertUsernameRegex("^$"), + assertCredsDoNotExist, + ), + }, + "admin name": { + req: dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: "test", + RoleName: "test", + }, + Statements: dbplugin.Statements{ + Commands: []string{ + ` + CREATE ROLE "{{name}}" WITH + LOGIN + PASSWORD '{{password}}' + VALID UNTIL '{{expiration}}'; + GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}";`, + }, + }, + Password: "somesecurepassword", + Expiration: time.Now().Add(1 * time.Minute), + }, + expectErr: false, + credsAssertion: assertCreds( + assertUsernameRegex("^v-test-test-[a-zA-Z0-9]{20}-[0-9]{10}$"), + assertCredsExist, + ), + }, + "admin username": { + req: dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: "test", + RoleName: "test", + }, + Statements: dbplugin.Statements{ + Commands: []string{ + ` + CREATE ROLE "{{username}}" WITH + LOGIN + PASSWORD '{{password}}' + VALID UNTIL '{{expiration}}'; + GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{username}}";`, + }, + }, + Password: "somesecurepassword", + Expiration: time.Now().Add(1 * time.Minute), + }, + expectErr: false, + credsAssertion: assertCreds( + assertUsernameRegex("^v-test-test-[a-zA-Z0-9]{20}-[0-9]{10}$"), + assertCredsExist, + ), + }, + "read only name": { + req: dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: "test", + RoleName: "test", + }, + Statements: dbplugin.Statements{ + Commands: []string{ + ` + CREATE ROLE "{{name}}" WITH + LOGIN + PASSWORD '{{password}}' + VALID UNTIL '{{expiration}}'; + GRANT SELECT ON ALL TABLES IN SCHEMA public TO "{{name}}"; + GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO "{{name}}";`, + }, + }, + Password: "somesecurepassword", + Expiration: time.Now().Add(1 * time.Minute), + }, + expectErr: false, + credsAssertion: assertCreds( + assertUsernameRegex("^v-test-test-[a-zA-Z0-9]{20}-[0-9]{10}$"), + assertCredsExist, + ), + }, + "read only username": { + req: dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: "test", + RoleName: "test", + }, + Statements: dbplugin.Statements{ + Commands: []string{ + ` + CREATE ROLE "{{username}}" WITH + LOGIN + PASSWORD '{{password}}' + VALID UNTIL '{{expiration}}'; + GRANT SELECT ON ALL TABLES IN SCHEMA public TO "{{username}}"; + GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO "{{username}}";`, + }, + }, + Password: "somesecurepassword", + Expiration: time.Now().Add(1 * time.Minute), + }, + expectErr: false, + credsAssertion: assertCreds( + assertUsernameRegex("^v-test-test-[a-zA-Z0-9]{20}-[0-9]{10}$"), + assertCredsExist, + ), + }, + // https://github.com/hashicorp/vault/issues/6098 + "reproduce GH-6098": { + req: dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: "test", + RoleName: "test", + }, + Statements: dbplugin.Statements{ + Commands: []string{ + // NOTE: "rolname" in the following line is not a typo. + "DO $$ BEGIN IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname='my_role') THEN CREATE ROLE my_role; END IF; END $$", + }, + }, + Password: "somesecurepassword", + Expiration: time.Now().Add(1 * time.Minute), + }, + expectErr: false, + credsAssertion: assertCreds( + assertUsernameRegex("^v-test-test-[a-zA-Z0-9]{20}-[0-9]{10}$"), + assertCredsDoNotExist, + ), + }, + "reproduce issue with template": { + req: dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: "test", + RoleName: "test", + }, + Statements: dbplugin.Statements{ + Commands: []string{ + `DO $$ BEGIN IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname='my_role') THEN CREATE ROLE "{{username}}"; END IF; END $$`, + }, + }, + Password: "somesecurepassword", + Expiration: time.Now().Add(1 * time.Minute), + }, + expectErr: false, + credsAssertion: assertCreds( + assertUsernameRegex("^v-test-test-[a-zA-Z0-9]{20}-[0-9]{10}$"), + assertCredsDoNotExist, + ), + }, + "large block statements": { + req: dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: "test", + RoleName: "test", + }, + Statements: dbplugin.Statements{ + Commands: newUserLargeBlockStatements, + }, + Password: "somesecurepassword", + Expiration: time.Now().Add(1 * time.Minute), + }, + expectErr: false, + credsAssertion: assertCreds( + assertUsernameRegex("^v-test-test-[a-zA-Z0-9]{20}-[0-9]{10}$"), + assertCredsExist, + ), + }, + } + + // Shared test container for speed - there should not be any overlap between the tests + db, cleanup := getPostgreSQL(t, nil) + defer cleanup() + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + // Give a timeout just in case the test decides to be problematic + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + resp, err := db.NewUser(ctx, test.req) + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + + test.credsAssertion(t, db.ConnectionURL, resp.Username, test.req.Password) + + // Ensure that the role doesn't expire immediately + time.Sleep(2 * time.Second) + + test.credsAssertion(t, db.ConnectionURL, resp.Username, test.req.Password) + }) + } +} + +func TestUpdateUser_Password(t *testing.T) { + type testCase struct { + statements []string + expectErr bool + credsAssertion credsAssertion + } + + tests := map[string]testCase{ + "default statements": { + statements: nil, + expectErr: false, + credsAssertion: assertCredsExist, + }, + "explicit default statements": { + statements: []string{defaultChangePasswordStatement}, + expectErr: false, + credsAssertion: assertCredsExist, + }, + "name instead of username": { + statements: []string{`ALTER ROLE "{{name}}" WITH PASSWORD '{{password}}';`}, + expectErr: false, + credsAssertion: assertCredsExist, + }, + "bad statements": { + statements: []string{`asdofyas8uf77asoiajv`}, + expectErr: true, + credsAssertion: assertCredsDoNotExist, + }, + } + + // Shared test container for speed - there should not be any overlap between the tests + db, cleanup := getPostgreSQL(t, nil) + defer cleanup() + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + initialPass := "myreallysecurepassword" + createReq := dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: "test", + RoleName: "test", + }, + Statements: dbplugin.Statements{ + Commands: []string{createAdminUser}, + }, + Password: initialPass, + Expiration: time.Now().Add(2 * time.Second), + } + createResp := dbtesting.AssertNewUser(t, db, createReq) + + assertCredsExist(t, db.ConnectionURL, createResp.Username, initialPass) + + newPass := "somenewpassword" + updateReq := dbplugin.UpdateUserRequest{ + Username: createResp.Username, + Password: &dbplugin.ChangePassword{ + NewPassword: newPass, + Statements: dbplugin.Statements{ + Commands: test.statements, + }, + }, + } + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + _, err := db.UpdateUser(ctx, updateReq) + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + + test.credsAssertion(t, db.ConnectionURL, createResp.Username, newPass) + }) + } + + t.Run("user does not exist", func(t *testing.T) { + newPass := "somenewpassword" + updateReq := dbplugin.UpdateUserRequest{ + Username: "missing-user", + Password: &dbplugin.ChangePassword{ + NewPassword: newPass, + Statements: dbplugin.Statements{}, + }, + } + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + _, err := db.UpdateUser(ctx, updateReq) + if err == nil { + t.Fatalf("err expected, got nil") + } + + assertCredsDoNotExist(t, db.ConnectionURL, updateReq.Username, newPass) + }) +} + +func TestUpdateUser_Expiration(t *testing.T) { + type testCase struct { + initialExpiration time.Time + newExpiration time.Time + expectedExpiration time.Time + statements []string + expectErr bool + } + + now := time.Now() + tests := map[string]testCase{ + "no statements": { + initialExpiration: now.Add(1 * time.Minute), + newExpiration: now.Add(5 * time.Minute), + expectedExpiration: now.Add(5 * time.Minute), + statements: nil, + expectErr: false, + }, + "default statements with name": { + initialExpiration: now.Add(1 * time.Minute), + newExpiration: now.Add(5 * time.Minute), + expectedExpiration: now.Add(5 * time.Minute), + statements: []string{defaultExpirationStatement}, + expectErr: false, + }, + "default statements with username": { + initialExpiration: now.Add(1 * time.Minute), + newExpiration: now.Add(5 * time.Minute), + expectedExpiration: now.Add(5 * time.Minute), + statements: []string{`ALTER ROLE "{{username}}" VALID UNTIL '{{expiration}}';`}, + expectErr: false, + }, + "bad statements": { + initialExpiration: now.Add(1 * time.Minute), + newExpiration: now.Add(5 * time.Minute), + expectedExpiration: now.Add(1 * time.Minute), + statements: []string{"ladshfouay09sgj"}, + expectErr: true, + }, + } + + // Shared test container for speed - there should not be any overlap between the tests + db, cleanup := getPostgreSQL(t, nil) + defer cleanup() + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + password := "myreallysecurepassword" + initialExpiration := test.initialExpiration.Truncate(time.Second) + createReq := dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: "test", + RoleName: "test", + }, + Statements: dbplugin.Statements{ + Commands: []string{createAdminUser}, + }, + Password: password, + Expiration: initialExpiration, + } + createResp := dbtesting.AssertNewUser(t, db, createReq) + + assertCredsExist(t, db.ConnectionURL, createResp.Username, password) + + actualExpiration := getExpiration(t, db, createResp.Username) + if actualExpiration.IsZero() { + t.Fatalf("Initial expiration is zero but should be set") + } + if !actualExpiration.Equal(initialExpiration) { + t.Fatalf("Actual expiration: %s Expected expiration: %s", actualExpiration, initialExpiration) + } + + newExpiration := test.newExpiration.Truncate(time.Second) + updateReq := dbplugin.UpdateUserRequest{ + Username: createResp.Username, + Expiration: &dbplugin.ChangeExpiration{ + NewExpiration: newExpiration, + Statements: dbplugin.Statements{ + Commands: test.statements, + }, + }, + } + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + _, err := db.UpdateUser(ctx, updateReq) + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + + expectedExpiration := test.expectedExpiration.Truncate(time.Second) + actualExpiration = getExpiration(t, db, createResp.Username) + if !actualExpiration.Equal(expectedExpiration) { + t.Fatalf("Actual expiration: %s Expected expiration: %s", actualExpiration, expectedExpiration) + } + }) + } +} + +func getExpiration(t testing.TB, db *PostgreSQL, username string) time.Time { + t.Helper() + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + query := fmt.Sprintf("select valuntil from pg_catalog.pg_user where usename = '%s'", username) + conn, err := db.getConnection(ctx) + if err != nil { + t.Fatalf("Failed to get connection to database: %s", err) + } + + stmt, err := conn.PrepareContext(ctx, query) + if err != nil { + t.Fatalf("Failed to prepare statement: %s", err) + } + defer stmt.Close() + + rows, err := stmt.QueryContext(ctx) + if err != nil { + t.Fatalf("Failed to execute query to get expiration: %s", err) + } + + if !rows.Next() { + return time.Time{} // No expiration + } + rawExp := "" + err = rows.Scan(&rawExp) + if err != nil { + t.Fatalf("Unable to get raw expiration: %s", err) + } + if rawExp == "" { + return time.Time{} // No expiration + } + exp, err := time.Parse(time.RFC3339, rawExp) + if err != nil { + t.Fatalf("Failed to parse expiration %q: %s", rawExp, err) + } + return exp +} + +func TestDeleteUser(t *testing.T) { + type testCase struct { + revokeStmts []string + expectErr bool + credsAssertion credsAssertion + } + + tests := map[string]testCase{ + "no statements": { + revokeStmts: nil, + expectErr: false, + // Wait for a short time before failing because postgres takes a moment to finish deleting the user + credsAssertion: waitUntilCredsDoNotExist(2 * time.Second), + }, + "statements with name": { + revokeStmts: []string{` + REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM "{{name}}"; + REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM "{{name}}"; + REVOKE USAGE ON SCHEMA public FROM "{{name}}"; + + DROP ROLE IF EXISTS "{{name}}";`}, + expectErr: false, + // Wait for a short time before failing because postgres takes a moment to finish deleting the user + credsAssertion: waitUntilCredsDoNotExist(2 * time.Second), + }, + "statements with username": { + revokeStmts: []string{` + REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM "{{username}}"; + REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM "{{username}}"; + REVOKE USAGE ON SCHEMA public FROM "{{username}}"; + + DROP ROLE IF EXISTS "{{username}}";`}, + expectErr: false, + // Wait for a short time before failing because postgres takes a moment to finish deleting the user + credsAssertion: waitUntilCredsDoNotExist(2 * time.Second), + }, + "bad statements": { + revokeStmts: []string{`8a9yhfoiasjff`}, + expectErr: true, + // Wait for a short time before checking because postgres takes a moment to finish deleting the user + credsAssertion: assertCredsExistAfter(100 * time.Millisecond), + }, + "multiline": { + revokeStmts: []string{` + DO $$ BEGIN + REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM "{{username}}"; + REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM "{{username}}"; + REVOKE USAGE ON SCHEMA public FROM "{{username}}"; + DROP ROLE IF EXISTS "{{username}}"; + END $$; + `}, + expectErr: false, + // Wait for a short time before checking because postgres takes a moment to finish deleting the user + credsAssertion: waitUntilCredsDoNotExist(2 * time.Second), + }, + } + + // Shared test container for speed - there should not be any overlap between the tests + db, cleanup := getPostgreSQL(t, nil) + defer cleanup() + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + password := "myreallysecurepassword" + createReq := dbplugin.NewUserRequest{ + UsernameConfig: dbplugin.UsernameMetadata{ + DisplayName: "test", + RoleName: "test", + }, + Statements: dbplugin.Statements{ + Commands: []string{createAdminUser}, + }, + Password: password, + Expiration: time.Now().Add(2 * time.Second), + } + createResp := dbtesting.AssertNewUser(t, db, createReq) + + assertCredsExist(t, db.ConnectionURL, createResp.Username, password) + + deleteReq := dbplugin.DeleteUserRequest{ + Username: createResp.Username, + Statements: dbplugin.Statements{ + Commands: test.revokeStmts, + }, + } + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + _, err := db.DeleteUser(ctx, deleteReq) + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + + test.credsAssertion(t, db.ConnectionURL, createResp.Username, password) + }) + } +} + +type credsAssertion func(t testing.TB, connURL, username, password string) + +func assertCreds(assertions ...credsAssertion) credsAssertion { + return func(t testing.TB, connURL, username, password string) { + t.Helper() + for _, assertion := range assertions { + assertion(t, connURL, username, password) + } + } +} + +func assertUsernameRegex(rawRegex string) credsAssertion { + return func(t testing.TB, _, username, _ string) { + t.Helper() + require.Regexp(t, rawRegex, username) + } +} + +func assertCredsExist(t testing.TB, connURL, username, password string) { + t.Helper() + err := testCredsExist(t, connURL, username, password) + if err != nil { + t.Fatalf("user does not exist: %s", err) + } +} + +func assertCredsDoNotExist(t testing.TB, connURL, username, password string) { + t.Helper() + err := testCredsExist(t, connURL, username, password) + if err == nil { + t.Fatalf("user should not exist but does") + } +} + +func waitUntilCredsDoNotExist(timeout time.Duration) credsAssertion { + return func(t testing.TB, connURL, username, password string) { + t.Helper() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + ticker := time.NewTicker(10 * time.Millisecond) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + t.Fatalf("Timed out waiting for user %s to be deleted", username) + case <-ticker.C: + err := testCredsExist(t, connURL, username, password) + if err != nil { + // Happy path + return + } + } + } + } +} + +func assertCredsExistAfter(timeout time.Duration) credsAssertion { + return func(t testing.TB, connURL, username, password string) { + t.Helper() + time.Sleep(timeout) + assertCredsExist(t, connURL, username, password) + } +} + +func testCredsExist(t testing.TB, connURL, username, password string) error { + t.Helper() + // Log in with the new creds + connURL = strings.Replace(connURL, "postgres:secret", fmt.Sprintf("%s:%s", username, password), 1) + db, err := sql.Open("pgx", connURL) + if err != nil { + return err + } + defer db.Close() + return db.Ping() +} + +const createAdminUser = ` +CREATE ROLE "{{name}}" WITH + LOGIN + PASSWORD '{{password}}' + VALID UNTIL '{{expiration}}'; +GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}"; +` + +var newUserLargeBlockStatements = []string{ + ` +DO $$ +BEGIN + IF NOT EXISTS (SELECT * FROM pg_catalog.pg_roles WHERE rolname='foo-role') THEN + CREATE ROLE "foo-role"; + CREATE SCHEMA IF NOT EXISTS foo AUTHORIZATION "foo-role"; + ALTER ROLE "foo-role" SET search_path = foo; + GRANT TEMPORARY ON DATABASE "postgres" TO "foo-role"; + GRANT ALL PRIVILEGES ON SCHEMA foo TO "foo-role"; + GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA foo TO "foo-role"; + GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA foo TO "foo-role"; + GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA foo TO "foo-role"; + END IF; +END +$$ +`, + `CREATE ROLE "{{name}}" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}';`, + `GRANT "foo-role" TO "{{name}}";`, + `ALTER ROLE "{{name}}" SET search_path = foo;`, + `GRANT CONNECT ON DATABASE "postgres" TO "{{name}}";`, +} + +func TestContainsMultilineStatement(t *testing.T) { + type testCase struct { + Input string + Expected bool + } + + testCases := map[string]*testCase{ + "issue 6098 repro": { + Input: `DO $$ BEGIN IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname='my_role') THEN CREATE ROLE my_role; END IF; END $$`, + Expected: true, + }, + "multiline with template fields": { + Input: `DO $$ BEGIN IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname="{{name}}") THEN CREATE ROLE {{name}}; END IF; END $$`, + Expected: true, + }, + "docs example": { + Input: `CREATE ROLE "{{name}}" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}'; \ + GRANT SELECT ON ALL TABLES IN SCHEMA public TO "{{name}}";`, + Expected: false, + }, + } + + for tName, tCase := range testCases { + t.Run(tName, func(t *testing.T) { + if containsMultilineStatement(tCase.Input) != tCase.Expected { + t.Fatalf("%q should be %t for multiline input", tCase.Input, tCase.Expected) + } + }) + } +} + +func TestExtractQuotedStrings(t *testing.T) { + type testCase struct { + Input string + Expected []string + } + + testCases := map[string]*testCase{ + "no quotes": { + Input: `Five little monkeys jumping on the bed`, + Expected: []string{}, + }, + "two of both quote types": { + Input: `"Five" little 'monkeys' "jumping on" the' 'bed`, + Expected: []string{`"Five"`, `"jumping on"`, `'monkeys'`, `' '`}, + }, + "one single quote": { + Input: `Five little monkeys 'jumping on the bed`, + Expected: []string{}, + }, + "empty string": { + Input: ``, + Expected: []string{}, + }, + "templated field": { + Input: `DO $$ BEGIN IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname="{{name}}") THEN CREATE ROLE {{name}}; END IF; END $$`, + Expected: []string{`"{{name}}"`}, + }, + } + + for tName, tCase := range testCases { + t.Run(tName, func(t *testing.T) { + results, err := extractQuotedStrings(tCase.Input) + if err != nil { + t.Fatal(err) + } + if len(results) != len(tCase.Expected) { + t.Fatalf("%s isn't equal to %s", results, tCase.Expected) + } + for i := range results { + if results[i] != tCase.Expected[i] { + t.Fatalf(`expected %q but received %q`, tCase.Expected, results[i]) + } + } + }) + } +} + +func TestUsernameGeneration(t *testing.T) { + type testCase struct { + data dbplugin.UsernameMetadata + expectedRegex string + } + + tests := map[string]testCase{ + "simple display and role names": { + data: dbplugin.UsernameMetadata{ + DisplayName: "token", + RoleName: "myrole", + }, + expectedRegex: `v-token-myrole-[a-zA-Z0-9]{20}-[0-9]{10}`, + }, + "display name has dash": { + data: dbplugin.UsernameMetadata{ + DisplayName: "token-foo", + RoleName: "myrole", + }, + expectedRegex: `v-token-fo-myrole-[a-zA-Z0-9]{20}-[0-9]{10}`, + }, + "display name has underscore": { + data: dbplugin.UsernameMetadata{ + DisplayName: "token_foo", + RoleName: "myrole", + }, + expectedRegex: `v-token_fo-myrole-[a-zA-Z0-9]{20}-[0-9]{10}`, + }, + "display name has period": { + data: dbplugin.UsernameMetadata{ + DisplayName: "token.foo", + RoleName: "myrole", + }, + expectedRegex: `v-token.fo-myrole-[a-zA-Z0-9]{20}-[0-9]{10}`, + }, + "role name has dash": { + data: dbplugin.UsernameMetadata{ + DisplayName: "token", + RoleName: "myrole-foo", + }, + expectedRegex: `v-token-myrole-f-[a-zA-Z0-9]{20}-[0-9]{10}`, + }, + "role name has underscore": { + data: dbplugin.UsernameMetadata{ + DisplayName: "token", + RoleName: "myrole_foo", + }, + expectedRegex: `v-token-myrole_f-[a-zA-Z0-9]{20}-[0-9]{10}`, + }, + "role name has period": { + data: dbplugin.UsernameMetadata{ + DisplayName: "token", + RoleName: "myrole.foo", + }, + expectedRegex: `v-token-myrole.f-[a-zA-Z0-9]{20}-[0-9]{10}`, + }, + } + + for name, test := range tests { + t.Run(fmt.Sprintf("new-%s", name), func(t *testing.T) { + up, err := template.NewTemplate( + template.Template(defaultUserNameTemplate), + ) + require.NoError(t, err) + + for i := 0; i < 1000; i++ { + username, err := up.Generate(test.data) + require.NoError(t, err) + require.Regexp(t, test.expectedRegex, username) + } + }) + } +} + +func TestNewUser_CustomUsername(t *testing.T) { + cleanup, connURL := postgresql.PrepareTestContainer(t, "13.4-buster") + defer cleanup() + + type testCase struct { + usernameTemplate string + newUserData dbplugin.UsernameMetadata + expectedRegex string + } + + tests := map[string]testCase{ + "default template": { + usernameTemplate: "", + newUserData: dbplugin.UsernameMetadata{ + DisplayName: "displayname", + RoleName: "longrolename", + }, + expectedRegex: "^v-displayn-longrole-[a-zA-Z0-9]{20}-[0-9]{10}$", + }, + "explicit default template": { + usernameTemplate: defaultUserNameTemplate, + newUserData: dbplugin.UsernameMetadata{ + DisplayName: "displayname", + RoleName: "longrolename", + }, + expectedRegex: "^v-displayn-longrole-[a-zA-Z0-9]{20}-[0-9]{10}$", + }, + "unique template": { + usernameTemplate: "foo-bar", + newUserData: dbplugin.UsernameMetadata{ + DisplayName: "displayname", + RoleName: "longrolename", + }, + expectedRegex: "^foo-bar$", + }, + "custom prefix": { + usernameTemplate: "foobar-{{.DisplayName | truncate 8}}-{{.RoleName | truncate 8}}-{{random 20}}-{{unix_time}}", + newUserData: dbplugin.UsernameMetadata{ + DisplayName: "displayname", + RoleName: "longrolename", + }, + expectedRegex: "^foobar-displayn-longrole-[a-zA-Z0-9]{20}-[0-9]{10}$", + }, + "totally custom template": { + usernameTemplate: "foobar_{{random 10}}-{{.RoleName | uppercase}}.{{unix_time}}x{{.DisplayName | truncate 5}}", + newUserData: dbplugin.UsernameMetadata{ + DisplayName: "displayname", + RoleName: "longrolename", + }, + expectedRegex: `^foobar_[a-zA-Z0-9]{10}-LONGROLENAME\.[0-9]{10}xdispl$`, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + initReq := dbplugin.InitializeRequest{ + Config: map[string]interface{}{ + "connection_url": connURL, + "username_template": test.usernameTemplate, + }, + VerifyConnection: true, + } + + db := new() + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + _, err := db.Initialize(ctx, initReq) + require.NoError(t, err) + + newUserReq := dbplugin.NewUserRequest{ + UsernameConfig: test.newUserData, + Statements: dbplugin.Statements{ + Commands: []string{ + ` + CREATE ROLE "{{name}}" WITH + LOGIN + PASSWORD '{{password}}' + VALID UNTIL '{{expiration}}'; + GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}";`, + }, + }, + Password: "myReally-S3curePassword", + Expiration: time.Now().Add(1 * time.Hour), + } + ctx, cancel = context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + newUserResp, err := db.NewUser(ctx, newUserReq) + require.NoError(t, err) + + require.Regexp(t, test.expectedRegex, newUserResp.Username) + }) + } +} + +// This is a long-running integration test which tests the functionality of Postgres's multi-host +// connection strings. It uses two Postgres containers preconfigured with Replication Manager +// provided by Bitnami. This test currently does not run in CI and must be run manually. This is +// due to the test length, as it requires multiple sleep calls to ensure cluster setup and +// primary node failover occurs before the test steps continue. +// +// To run the test, set the environment variable POSTGRES_MULTIHOST_NET to the value of +// a docker network you've preconfigured, e.g. +// 'docker network create -d bridge postgres-repmgr' +// 'export POSTGRES_MULTIHOST_NET=postgres-repmgr' +func TestPostgreSQL_Repmgr(t *testing.T) { + _, exists := os.LookupEnv("POSTGRES_MULTIHOST_NET") + if !exists { + t.Skipf("POSTGRES_MULTIHOST_NET not set, skipping test") + } + + // Run two postgres-repmgr containers in a replication cluster + db0, runner0, url0, container0 := testPostgreSQL_Repmgr_Container(t, "psql-repl-node-0") + _, _, url1, _ := testPostgreSQL_Repmgr_Container(t, "psql-repl-node-1") + + ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second) + defer cancel() + + time.Sleep(10 * time.Second) + + // Write a read role to the cluster + _, err := db0.NewUser(ctx, dbplugin.NewUserRequest{ + Statements: dbplugin.Statements{ + Commands: []string{ + `CREATE ROLE "ro" NOINHERIT; + GRANT SELECT ON ALL TABLES IN SCHEMA public TO "ro";`, + }, + }, + }) + if err != nil { + t.Fatalf("no error expected, got: %s", err) + } + + // Open a connection to both databases using the multihost connection string + connectionDetails := map[string]interface{}{ + "connection_url": fmt.Sprintf("postgresql://{{username}}:{{password}}@%s,%s/postgres?target_session_attrs=read-write", getHost(url0), getHost(url1)), + "username": "postgres", + "password": "secret", + } + req := dbplugin.InitializeRequest{ + Config: connectionDetails, + VerifyConnection: true, + } + + db := new() + dbtesting.AssertInitialize(t, db, req) + if !db.Initialized { + t.Fatal("Database should be initialized") + } + defer db.Close() + + // Add a user to the cluster, then stop the primary container + if err = testPostgreSQL_Repmgr_AddUser(ctx, db); err != nil { + t.Fatalf("no error expected, got: %s", err) + } + postgresql.StopContainer(t, ctx, runner0, container0) + + // Try adding a new user immediately - expect failure as the database + // cluster is still switching primaries + err = testPostgreSQL_Repmgr_AddUser(ctx, db) + if !strings.HasSuffix(err.Error(), "ValidateConnect failed (read only connection)") { + t.Fatalf("expected error was not received, got: %s", err) + } + + time.Sleep(20 * time.Second) + + // Try adding a new user again which should succeed after the sleep + // as the primary failover should have finished. Then, restart + // the first container which should become a secondary DB. + if err = testPostgreSQL_Repmgr_AddUser(ctx, db); err != nil { + t.Fatalf("no error expected, got: %s", err) + } + postgresql.RestartContainer(t, ctx, runner0, container0) + + time.Sleep(10 * time.Second) + + // A final new user to add, which should succeed after the secondary joins. + if err = testPostgreSQL_Repmgr_AddUser(ctx, db); err != nil { + t.Fatalf("no error expected, got: %s", err) + } + + if err := db.Close(); err != nil { + t.Fatalf("err: %s", err) + } +} + +func testPostgreSQL_Repmgr_Container(t *testing.T, name string) (*PostgreSQL, *docker.Runner, string, string) { + envVars := []string{ + "REPMGR_NODE_NAME=" + name, + "REPMGR_NODE_NETWORK_NAME=" + name, + } + + runner, cleanup, connURL, containerID := postgresql.PrepareTestContainerRepmgr(t, name, "13.4.0", envVars) + t.Cleanup(cleanup) + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + req := dbplugin.InitializeRequest{ + Config: connectionDetails, + VerifyConnection: true, + } + db := new() + dbtesting.AssertInitialize(t, db, req) + if !db.Initialized { + t.Fatal("Database should be initialized") + } + + if err := db.Close(); err != nil { + t.Fatalf("err: %s", err) + } + + return db, runner, connURL, containerID +} + +func testPostgreSQL_Repmgr_AddUser(ctx context.Context, db *PostgreSQL) error { + _, err := db.NewUser(ctx, dbplugin.NewUserRequest{ + Statements: dbplugin.Statements{ + Commands: []string{ + `CREATE ROLE "{{name}}" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}' INHERIT; + GRANT ro TO "{{name}}";`, + }, + }, + }) + + return err +} + +func getHost(url string) string { + splitCreds := strings.Split(url, "@")[1] + + return strings.Split(splitCreds, "/")[0] +} diff --git a/plugins/database/postgresql/scram/LICENSE b/plugins/database/postgresql/scram/LICENSE new file mode 100644 index 0000000..cc36995 --- /dev/null +++ b/plugins/database/postgresql/scram/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Taishi Kasuga + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/plugins/database/postgresql/scram/scram.go b/plugins/database/postgresql/scram/scram.go new file mode 100644 index 0000000..f5c6923 --- /dev/null +++ b/plugins/database/postgresql/scram/scram.go @@ -0,0 +1,86 @@ +package scram + +// +// @see https://github.com/postgres/postgres/blob/c30f54ad732ca5c8762bb68bbe0f51de9137dd72/src/interfaces/libpq/fe-auth.c#L1167-L1285 +// @see https://github.com/postgres/postgres/blob/e6bdfd9700ebfc7df811c97c2fc46d7e94e329a2/src/interfaces/libpq/fe-auth-scram.c#L868-L905 +// @see https://github.com/postgres/postgres/blob/c30f54ad732ca5c8762bb68bbe0f51de9137dd72/src/port/pg_strong_random.c#L66-L96 +// @see https://github.com/postgres/postgres/blob/e6bdfd9700ebfc7df811c97c2fc46d7e94e329a2/src/common/scram-common.c#L160-L274 +// @see https://github.com/postgres/postgres/blob/e6bdfd9700ebfc7df811c97c2fc46d7e94e329a2/src/common/scram-common.c#L27-L85 + +// Implementation from https://github.com/supercaracal/scram-sha-256/blob/d3c05cd927770a11c6e12de3e3a99c3446a1f78d/main.go +import ( + "crypto/hmac" + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "fmt" + "io" + + "golang.org/x/crypto/pbkdf2" +) + +const ( + // @see https://github.com/postgres/postgres/blob/e6bdfd9700ebfc7df811c97c2fc46d7e94e329a2/src/include/common/scram-common.h#L36-L41 + saltSize = 16 + + // @see https://github.com/postgres/postgres/blob/c30f54ad732ca5c8762bb68bbe0f51de9137dd72/src/include/common/sha2.h#L22 + digestLen = 32 + + // @see https://github.com/postgres/postgres/blob/e6bdfd9700ebfc7df811c97c2fc46d7e94e329a2/src/include/common/scram-common.h#L43-L47 + iterationCnt = 4096 +) + +var ( + clientRawKey = []byte("Client Key") + serverRawKey = []byte("Server Key") +) + +func genSalt(size int) ([]byte, error) { + salt := make([]byte, size) + if _, err := io.ReadFull(rand.Reader, salt); err != nil { + return nil, err + } + return salt, nil +} + +func encodeB64(src []byte) (dst []byte) { + dst = make([]byte, base64.StdEncoding.EncodedLen(len(src))) + base64.StdEncoding.Encode(dst, src) + return +} + +func getHMACSum(key, msg []byte) []byte { + h := hmac.New(sha256.New, key) + _, _ = h.Write(msg) + return h.Sum(nil) +} + +func getSHA256Sum(key []byte) []byte { + h := sha256.New() + _, _ = h.Write(key) + return h.Sum(nil) +} + +func hashPassword(rawPassword, salt []byte, iter, keyLen int) string { + digestKey := pbkdf2.Key(rawPassword, salt, iter, keyLen, sha256.New) + clientKey := getHMACSum(digestKey, clientRawKey) + storedKey := getSHA256Sum(clientKey) + serverKey := getHMACSum(digestKey, serverRawKey) + + return fmt.Sprintf("SCRAM-SHA-256$%d:%s$%s:%s", + iter, + string(encodeB64(salt)), + string(encodeB64(storedKey)), + string(encodeB64(serverKey)), + ) +} + +func Hash(password string) (string, error) { + salt, err := genSalt(saltSize) + if err != nil { + return "", err + } + + hashedPassword := hashPassword([]byte(password), salt, iterationCnt, digestLen) + return hashedPassword, nil +} diff --git a/plugins/database/postgresql/scram/scram_test.go b/plugins/database/postgresql/scram/scram_test.go new file mode 100644 index 0000000..d2933eb --- /dev/null +++ b/plugins/database/postgresql/scram/scram_test.go @@ -0,0 +1,27 @@ +package scram + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +// TestScram tests the Hash method. The hashed password string should have a SCRAM-SHA-256 prefix. +func TestScram(t *testing.T) { + tcs := map[string]struct { + Password string + }{ + "empty-password": {Password: ""}, + "simple-password": {Password: "password"}, + } + + for name, tc := range tcs { + t.Run(name, func(t *testing.T) { + got, err := Hash(tc.Password) + assert.NoError(t, err) + assert.True(t, strings.HasPrefix(got, "SCRAM-SHA-256$4096:")) + assert.Len(t, got, 133) + }) + } +} diff --git a/plugins/database/redshift/redshift-database-plugin/main.go b/plugins/database/redshift/redshift-database-plugin/main.go new file mode 100644 index 0000000..7fcd9b0 --- /dev/null +++ b/plugins/database/redshift/redshift-database-plugin/main.go @@ -0,0 +1,26 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "log" + "os" + + "github.com/hashicorp/vault/plugins/database/redshift" + "github.com/hashicorp/vault/sdk/database/dbplugin/v5" +) + +func main() { + if err := Run(); err != nil { + log.Println(err) + os.Exit(1) + } +} + +// Run instantiates a RedShift object, and runs the RPC server for the plugin +func Run() error { + dbplugin.ServeMultiplex(redshift.New) + + return nil +} diff --git a/plugins/database/redshift/redshift.go b/plugins/database/redshift/redshift.go new file mode 100644 index 0000000..11ce30a --- /dev/null +++ b/plugins/database/redshift/redshift.go @@ -0,0 +1,483 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package redshift + +import ( + "context" + "database/sql" + "errors" + "fmt" + "strings" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-secure-stdlib/strutil" + dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + "github.com/hashicorp/vault/sdk/database/helper/connutil" + "github.com/hashicorp/vault/sdk/database/helper/dbutil" + "github.com/hashicorp/vault/sdk/helper/dbtxn" + "github.com/hashicorp/vault/sdk/helper/template" + _ "github.com/jackc/pgx/v4/stdlib" +) + +const ( + // This is how this plugin will be reflected in middleware + // such as metrics. + middlewareTypeName = "redshift" + + // This allows us to use the postgres database driver. + sqlTypeName = "pgx" + + defaultRenewSQL = ` +ALTER USER "{{name}}" VALID UNTIL '{{expiration}}'; +` + defaultRotateRootCredentialsSQL = ` +ALTER USER "{{name}}" WITH PASSWORD '{{password}}'; +` + defaultUserNameTemplate = `{{ printf "v-%s-%s-%s-%s" (.DisplayName | truncate 8) (.RoleName | truncate 8) (random 20) (unix_time) | truncate 63 | lowercase }}` +) + +var _ dbplugin.Database = (*RedShift)(nil) + +// New implements builtinplugins.BuiltinFactory +// Redshift implements (mostly) a postgres 8 interface, and part of that is +// under the hood, it's lower-casing the usernames. +func New() (interface{}, error) { + db := newRedshift() + // Wrap the plugin with middleware to sanitize errors + dbType := dbplugin.NewDatabaseErrorSanitizerMiddleware(db, db.secretValues) + return dbType, nil +} + +func newRedshift() *RedShift { + connProducer := &connutil.SQLConnectionProducer{} + connProducer.Type = sqlTypeName + + db := &RedShift{ + SQLConnectionProducer: connProducer, + } + + return db +} + +type RedShift struct { + *connutil.SQLConnectionProducer + + usernameProducer template.StringTemplate +} + +func (r *RedShift) secretValues() map[string]string { + return map[string]string{ + r.Password: "[password]", + } +} + +func (r *RedShift) Type() (string, error) { + return middlewareTypeName, nil +} + +// Initialize must be called on each new RedShift struct before use. +// It uses the connutil.SQLConnectionProducer's Init function to do all the lifting. +func (r *RedShift) Initialize(ctx context.Context, req dbplugin.InitializeRequest) (dbplugin.InitializeResponse, error) { + conf, err := r.Init(ctx, req.Config, req.VerifyConnection) + if err != nil { + return dbplugin.InitializeResponse{}, fmt.Errorf("error initializing db: %w", err) + } + + usernameTemplate, err := strutil.GetString(req.Config, "username_template") + if err != nil { + return dbplugin.InitializeResponse{}, fmt.Errorf("failed to retrieve username_template: %w", err) + } + if usernameTemplate == "" { + usernameTemplate = defaultUserNameTemplate + } + + up, err := template.NewTemplate(template.Template(usernameTemplate)) + if err != nil { + return dbplugin.InitializeResponse{}, fmt.Errorf("unable to initialize username template: %w", err) + } + r.usernameProducer = up + + _, err = r.usernameProducer.Generate(dbplugin.UsernameMetadata{}) + if err != nil { + return dbplugin.InitializeResponse{}, fmt.Errorf("invalid username template: %w", err) + } + + return dbplugin.InitializeResponse{ + Config: conf, + }, nil +} + +// getConnection accepts a context and returns a new pointer to a sql.DB object. +// It's up to the caller to close the connection or handle reuse logic. +func (r *RedShift) getConnection(ctx context.Context) (*sql.DB, error) { + db, err := r.Connection(ctx) + if err != nil { + return nil, err + } + return db.(*sql.DB), nil +} + +// NewUser creates a new user in the database. There is no default statement for +// creating users, so one must be specified in the plugin config. +// Generated usernames are of the form v-{display-name}-{role-name}-{UUID}-{timestamp} +func (r *RedShift) NewUser(ctx context.Context, req dbplugin.NewUserRequest) (dbplugin.NewUserResponse, error) { + if len(req.Statements.Commands) == 0 { + return dbplugin.NewUserResponse{}, dbutil.ErrEmptyCreationStatement + } + + // Grab the lock + r.Lock() + defer r.Unlock() + + username, err := r.usernameProducer.Generate(req.UsernameConfig) + if err != nil { + return dbplugin.NewUserResponse{}, err + } + password := req.Password + expirationStr := req.Expiration.Format("2006-01-02 15:04:05-0700") + + // Get the connection + db, err := r.getConnection(ctx) + if err != nil { + return dbplugin.NewUserResponse{}, err + } + defer db.Close() + + // Start a transaction + tx, err := db.BeginTx(ctx, nil) + if err != nil { + return dbplugin.NewUserResponse{}, err + } + defer func() { + tx.Rollback() + }() + + // Execute each query + for _, stmt := range req.Statements.Commands { + for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + m := map[string]string{ + "name": username, + "username": username, + "password": password, + "expiration": expirationStr, + } + if err := dbtxn.ExecuteTxQueryDirect(ctx, tx, m, query); err != nil { + return dbplugin.NewUserResponse{}, err + } + } + } + + // Commit the transaction + if err := tx.Commit(); err != nil { + return dbplugin.NewUserResponse{}, err + } + return dbplugin.NewUserResponse{ + Username: username, + }, nil +} + +// UpdateUser can update the expiration or the password of a user, or both. +// The updates all happen in a single transaction, so they will either all +// succeed or all fail. +// Both updates support both default and custom statements. +func (r *RedShift) UpdateUser(ctx context.Context, req dbplugin.UpdateUserRequest) (dbplugin.UpdateUserResponse, error) { + if req.Password == nil && req.Expiration == nil { + return dbplugin.UpdateUserResponse{}, errors.New("no changes requested") + } + + r.Lock() + defer r.Unlock() + + db, err := r.getConnection(ctx) + if err != nil { + return dbplugin.UpdateUserResponse{}, err + } + defer db.Close() + + tx, err := db.BeginTx(ctx, nil) + if err != nil { + return dbplugin.UpdateUserResponse{}, err + } + defer func() { + tx.Rollback() + }() + + if req.Expiration != nil { + err = updateUserExpiration(ctx, req, tx) + if err != nil { + return dbplugin.UpdateUserResponse{}, err + } + } + + if req.Password != nil { + err = updateUserPassword(ctx, req, tx) + if err != nil { + return dbplugin.UpdateUserResponse{}, err + } + } + + err = tx.Commit() + return dbplugin.UpdateUserResponse{}, err +} + +func updateUserExpiration(ctx context.Context, req dbplugin.UpdateUserRequest, tx *sql.Tx) error { + if req.Username == "" { + return errors.New("must provide a username to update user expiration") + } + renewStmts := req.Expiration.Statements + if len(renewStmts.Commands) == 0 { + renewStmts.Commands = []string{defaultRenewSQL} + } + + expirationStr := req.Expiration.NewExpiration.Format("2006-01-02 15:04:05-0700") + + for _, stmt := range renewStmts.Commands { + for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + m := map[string]string{ + "name": req.Username, + "username": req.Username, + "expiration": expirationStr, + } + if err := dbtxn.ExecuteTxQueryDirect(ctx, tx, m, query); err != nil { + return err + } + } + } + + return nil +} + +func updateUserPassword(ctx context.Context, req dbplugin.UpdateUserRequest, tx *sql.Tx) error { + username := req.Username + password := req.Password.NewPassword + if username == "" || password == "" { + return errors.New("must provide both username and a new password to update user password") + } + + // Check if the role exists + var exists bool + err := tx.QueryRowContext(ctx, "SELECT exists (SELECT usename FROM pg_user WHERE usename=$1);", username).Scan(&exists) + if err != nil && err != sql.ErrNoRows { + // Server error + return err + } + if err == sql.ErrNoRows || !exists { + // Most likely a user error + return fmt.Errorf("cannot update password for username %q because it does not exist", username) + } + + // Vault requires the database user already exist, and that the credentials + // used to execute the rotation statements has sufficient privileges. + statements := req.Password.Statements.Commands + if len(statements) == 0 { + statements = []string{defaultRotateRootCredentialsSQL} + } + // Execute each query + for _, stmt := range statements { + for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + m := map[string]string{ + "name": username, + "username": username, + "password": password, + } + if err := dbtxn.ExecuteTxQueryDirect(ctx, tx, m, query); err != nil { + return err + } + } + } + + return nil +} + +// DeleteUser supports both default and custom statements to delete a user. +func (r *RedShift) DeleteUser(ctx context.Context, req dbplugin.DeleteUserRequest) (dbplugin.DeleteUserResponse, error) { + // Grab the lock + r.Lock() + defer r.Unlock() + + if len(req.Statements.Commands) == 0 { + return r.defaultDeleteUser(ctx, req) + } + + return r.customDeleteUser(ctx, req) +} + +func (r *RedShift) customDeleteUser(ctx context.Context, req dbplugin.DeleteUserRequest) (dbplugin.DeleteUserResponse, error) { + db, err := r.getConnection(ctx) + if err != nil { + return dbplugin.DeleteUserResponse{}, err + } + defer db.Close() + + tx, err := db.BeginTx(ctx, nil) + if err != nil { + return dbplugin.DeleteUserResponse{}, err + } + defer func() { + tx.Rollback() + }() + + for _, stmt := range req.Statements.Commands { + for _, query := range strutil.ParseArbitraryStringSlice(stmt, ";") { + query = strings.TrimSpace(query) + if len(query) == 0 { + continue + } + + m := map[string]string{ + "name": req.Username, + "username": req.Username, + } + if err := dbtxn.ExecuteTxQueryDirect(ctx, tx, m, query); err != nil { + return dbplugin.DeleteUserResponse{}, err + } + } + } + + return dbplugin.DeleteUserResponse{}, tx.Commit() +} + +func (r *RedShift) defaultDeleteUser(ctx context.Context, req dbplugin.DeleteUserRequest) (dbplugin.DeleteUserResponse, error) { + db, err := r.getConnection(ctx) + if err != nil { + return dbplugin.DeleteUserResponse{}, err + } + defer db.Close() + + username := req.Username + + // Check if the role exists + var exists bool + err = db.QueryRowContext(ctx, "SELECT exists (SELECT usename FROM pg_user WHERE usename=$1);", username).Scan(&exists) + if err != nil && err != sql.ErrNoRows { + return dbplugin.DeleteUserResponse{}, err + } + + if !exists { + // No error as Redshift may have deleted the user via TTL before we got to it. + return dbplugin.DeleteUserResponse{}, nil + } + + // Query for permissions; we need to revoke permissions before we can drop + // the role + // This isn't done in a transaction because even if we fail along the way, + // we want to remove as much access as possible + stmt, err := db.PrepareContext(ctx, "SELECT DISTINCT table_schema FROM information_schema.role_column_grants WHERE grantee=$1;") + if err != nil { + return dbplugin.DeleteUserResponse{}, err + } + defer stmt.Close() + + rows, err := stmt.QueryContext(ctx, username) + if err != nil { + return dbplugin.DeleteUserResponse{}, err + } + defer rows.Close() + + const initialNumRevocations = 16 + revocationStmts := make([]string, 0, initialNumRevocations) + for rows.Next() { + var schema string + err = rows.Scan(&schema) + if err != nil { + // keep going; remove as many permissions as possible right now + continue + } + revocationStmts = append(revocationStmts, fmt.Sprintf( + `REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA %s FROM %s;`, + dbutil.QuoteIdentifier(schema), + dbutil.QuoteIdentifier(username))) + + revocationStmts = append(revocationStmts, fmt.Sprintf( + `REVOKE USAGE ON SCHEMA %s FROM %s;`, + dbutil.QuoteIdentifier(schema), + dbutil.QuoteIdentifier(username))) + } + + // for good measure, revoke all privileges and usage on schema public + revocationStmts = append(revocationStmts, fmt.Sprintf( + `REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM %s;`, + dbutil.QuoteIdentifier(username))) + + revocationStmts = append(revocationStmts, fmt.Sprintf( + "REVOKE USAGE ON SCHEMA public FROM %s;", + dbutil.QuoteIdentifier(username))) + + // get the current database name so we can issue a REVOKE CONNECT for + // this username + var dbname sql.NullString + if err := db.QueryRowContext(ctx, "SELECT current_database();").Scan(&dbname); err != nil { + return dbplugin.DeleteUserResponse{}, err + } + + if dbname.Valid { + /* + We create this stored procedure to ensure we can durably revoke users on Redshift. We do not + clean up since that can cause race conditions with other instances of Vault attempting to use + this SP at the same time. + */ + revocationStmts = append(revocationStmts, `CREATE OR REPLACE PROCEDURE terminateloop(dbusername varchar(100)) +LANGUAGE plpgsql +AS $$ +DECLARE + currentpid int; + loopvar int; + qtyconns int; +BEGIN +SELECT COUNT(process) INTO qtyconns FROM stv_sessions WHERE user_name=dbusername; + FOR loopvar IN 1..qtyconns LOOP + SELECT INTO currentpid process FROM stv_sessions WHERE user_name=dbusername ORDER BY process ASC LIMIT 1; + SELECT pg_terminate_backend(currentpid); + END LOOP; +END +$$;`) + + revocationStmts = append(revocationStmts, fmt.Sprintf(`call terminateloop('%s');`, username)) + } + + // again, here, we do not stop on error, as we want to remove as + // many permissions as possible right now + var lastStmtError *multierror.Error // error + for _, query := range revocationStmts { + if err := dbtxn.ExecuteDBQueryDirect(ctx, db, nil, query); err != nil { + lastStmtError = multierror.Append(lastStmtError, err) + } + } + + // can't drop if not all privileges are revoked + if rows.Err() != nil { + return dbplugin.DeleteUserResponse{}, fmt.Errorf("could not generate revocation statements for all rows: %w", rows.Err()) + } + if lastStmtError != nil { + return dbplugin.DeleteUserResponse{}, fmt.Errorf("could not perform all revocation statements: %w", lastStmtError) + } + + // Drop this user + stmt, err = db.PrepareContext(ctx, fmt.Sprintf( + `DROP USER IF EXISTS %s;`, dbutil.QuoteIdentifier(username))) + if err != nil { + return dbplugin.DeleteUserResponse{}, err + } + defer stmt.Close() + if _, err := stmt.ExecContext(ctx); err != nil { + return dbplugin.DeleteUserResponse{}, err + } + + return dbplugin.DeleteUserResponse{}, nil +} diff --git a/plugins/database/redshift/redshift_test.go b/plugins/database/redshift/redshift_test.go new file mode 100644 index 0000000..af26458 --- /dev/null +++ b/plugins/database/redshift/redshift_test.go @@ -0,0 +1,576 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package redshift + +import ( + "context" + "database/sql" + "fmt" + "os" + "reflect" + "regexp" + "testing" + "time" + + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/helper/testhelpers" + dbplugin "github.com/hashicorp/vault/sdk/database/dbplugin/v5" + dbtesting "github.com/hashicorp/vault/sdk/database/dbplugin/v5/testing" + "github.com/hashicorp/vault/sdk/helper/dbtxn" + "github.com/stretchr/testify/require" +) + +/* +To run these sets of acceptance tests, you must pre-configure a Redshift cluster +in AWS and ensure the machine running these tests has network access to it. + +Once the redshift cluster is running, you can pass the admin username and password +as environment variables to be used to run these tests. Note that these tests +will create users on your redshift cluster and currently do not clean up after +themselves. + +Do not run this test suite against a production Redshift cluster. + +Configuration: + + REDSHIFT_URL=my-redshift-url.region.redshift.amazonaws.com:5439/database-name + REDSHIFT_USER=my-redshift-admin-user + REDSHIFT_PASSWORD=my-redshift-admin-password + VAULT_ACC= # This must be set to run any of the tests in this test suite +*/ + +var ( + keyRedshiftURL = "REDSHIFT_URL" + keyRedshiftUser = "REDSHIFT_USER" + keyRedshiftPassword = "REDSHIFT_PASSWORD" + + credNames = []string{ + keyRedshiftURL, + keyRedshiftUser, + keyRedshiftPassword, + } + + vaultACC = "VAULT_ACC" +) + +func interpolateConnectionURL(url, user, password string) string { + return fmt.Sprintf("postgres://%s:%s@%s", user, password, url) +} + +func redshiftEnv() (connURL string, url string, user string, password string, errEmpty error) { + if url = os.Getenv(keyRedshiftURL); url == "" { + return "", "", "", "", fmt.Errorf("%s environment variable required", keyRedshiftURL) + } + + if user = os.Getenv(keyRedshiftUser); url == "" { + return "", "", "", "", fmt.Errorf("%s environment variable required", keyRedshiftUser) + } + + if password = os.Getenv(keyRedshiftPassword); url == "" { + return "", "", "", "", fmt.Errorf("%s environment variable required", keyRedshiftPassword) + } + + connURL = interpolateConnectionURL(url, user, password) + return connURL, url, user, password, nil +} + +func TestRedshift_Initialize(t *testing.T) { + if os.Getenv(vaultACC) != "1" { + t.SkipNow() + } + + // Ensure each cred is populated. + testhelpers.SkipUnlessEnvVarsSet(t, credNames) + + connURL, _, _, _, err := redshiftEnv() + if err != nil { + t.Fatal(err) + } + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + "max_open_connections": 73, + } + + db := newRedshift() + resp := dbtesting.AssertInitialize(t, db, dbplugin.InitializeRequest{ + Config: connectionDetails, + VerifyConnection: true, + }) + + if !db.Initialized { + t.Fatal("Database should be initialized") + } + expectedConfig := make(map[string]interface{}) + for k, v := range connectionDetails { + expectedConfig[k] = v + } + if !reflect.DeepEqual(expectedConfig, resp.Config) { + t.Fatalf("Expected config %+v, but was %v", expectedConfig, resp.Config) + } + if db.MaxOpenConnections != 73 { + t.Fatalf("Expected max_open_connections to be set to 73, but was %d", db.MaxOpenConnections) + } + + dbtesting.AssertClose(t, db) +} + +func TestRedshift_NewUser(t *testing.T) { + if os.Getenv(vaultACC) != "1" { + t.SkipNow() + } + + // Ensure each cred is populated. + testhelpers.SkipUnlessEnvVarsSet(t, credNames) + + connURL, url, _, _, err := redshiftEnv() + if err != nil { + t.Fatal(err) + } + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + + db := newRedshift() + dbtesting.AssertInitialize(t, db, dbplugin.InitializeRequest{ + Config: connectionDetails, + VerifyConnection: true, + }) + + usernameConfig := dbplugin.UsernameMetadata{ + DisplayName: "test", + RoleName: "test", + } + + const password = "SuperSecurePa55w0rd!" + for _, commands := range [][]string{{testRedshiftRole}, {testRedshiftReadOnlyRole}} { + resp := dbtesting.AssertNewUser(t, db, dbplugin.NewUserRequest{ + UsernameConfig: usernameConfig, + Password: password, + Statements: dbplugin.Statements{ + Commands: commands, + }, + Expiration: time.Now().Add(5 * time.Minute), + }) + username := resp.Username + + if err = testCredsExist(t, url, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s\n%s:%s", err, username, password) + } + + usernameRegex := regexp.MustCompile("^v-test-test-[a-zA-Z0-9]{20}-[0-9]{10}$") + if !usernameRegex.Match([]byte(username)) { + t.Fatalf("Expected username %q to match regex %q", username, usernameRegex.String()) + } + } + + dbtesting.AssertClose(t, db) +} + +func TestRedshift_NewUser_NoCreationStatement_ShouldError(t *testing.T) { + if os.Getenv(vaultACC) != "1" { + t.SkipNow() + } + + // Ensure each cred is populated. + testhelpers.SkipUnlessEnvVarsSet(t, credNames) + + connURL, _, _, _, err := redshiftEnv() + if err != nil { + t.Fatal(err) + } + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + + db := newRedshift() + dbtesting.AssertInitialize(t, db, dbplugin.InitializeRequest{ + Config: connectionDetails, + VerifyConnection: true, + }) + + usernameConfig := dbplugin.UsernameMetadata{ + DisplayName: "test", + RoleName: "test", + } + + const password = "SuperSecurePa55w0rd!" + + // Test with no configured Creation Statement + _, err = db.NewUser(context.Background(), dbplugin.NewUserRequest{ + UsernameConfig: usernameConfig, + Password: password, + Statements: dbplugin.Statements{ + Commands: []string{}, // Empty commands field here should cause error. + }, + Expiration: time.Now().Add(5 * time.Minute), + }) + if err == nil { + t.Fatal("Expected error when no creation statement is provided") + } + + dbtesting.AssertClose(t, db) +} + +func TestRedshift_UpdateUser_Expiration(t *testing.T) { + if os.Getenv(vaultACC) != "1" { + t.SkipNow() + } + + // Ensure each cred is populated. + testhelpers.SkipUnlessEnvVarsSet(t, credNames) + + connURL, url, _, _, err := redshiftEnv() + if err != nil { + t.Fatal(err) + } + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + + db := newRedshift() + dbtesting.AssertInitialize(t, db, dbplugin.InitializeRequest{ + Config: connectionDetails, + VerifyConnection: true, + }) + + usernameConfig := dbplugin.UsernameMetadata{ + DisplayName: "test", + RoleName: "test", + } + + const password = "SuperSecurePa55w0rd!" + const initialTTL = 2 * time.Second + const longTTL = time.Minute + for _, commands := range [][]string{{}, {defaultRenewSQL}} { + newResp := dbtesting.AssertNewUser(t, db, dbplugin.NewUserRequest{ + UsernameConfig: usernameConfig, + Password: password, + Statements: dbplugin.Statements{Commands: []string{testRedshiftRole}}, + Expiration: time.Now().Add(initialTTL), + }) + username := newResp.Username + + if err = testCredsExist(t, url, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } + + dbtesting.AssertUpdateUser(t, db, dbplugin.UpdateUserRequest{ + Username: username, + Expiration: &dbplugin.ChangeExpiration{ + NewExpiration: time.Now().Add(longTTL), + Statements: dbplugin.Statements{Commands: commands}, + }, + }) + + // Sleep longer than the initial expiration time + time.Sleep(initialTTL + time.Second) + + if err = testCredsExist(t, url, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } + } + + dbtesting.AssertClose(t, db) +} + +func TestRedshift_UpdateUser_Password(t *testing.T) { + if os.Getenv(vaultACC) != "1" { + t.SkipNow() + } + + // Ensure each cred is populated. + testhelpers.SkipUnlessEnvVarsSet(t, credNames) + + connURL, url, _, _, err := redshiftEnv() + if err != nil { + t.Fatal(err) + } + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + + // create the database user + uid, err := uuid.GenerateUUID() + if err != nil { + t.Fatal(err) + } + dbUser := "vaultstatictest-" + fmt.Sprintf("%s", uid) + createTestPGUser(t, connURL, dbUser, "1Password", testRoleStaticCreate) + + db := newRedshift() + dbtesting.AssertInitialize(t, db, dbplugin.InitializeRequest{ + Config: connectionDetails, + VerifyConnection: true, + }) + + const password1 = "MyTemporaryUserPassword1!" + const password2 = "MyTemporaryUserPassword2!" + + for _, tc := range []struct { + password string + commands []string + }{ + {password1, []string{}}, + {password2, []string{testRedshiftStaticRoleRotate}}, + } { + dbtesting.AssertUpdateUser(t, db, dbplugin.UpdateUserRequest{ + Username: dbUser, + Password: &dbplugin.ChangePassword{ + NewPassword: tc.password, + Statements: dbplugin.Statements{Commands: tc.commands}, + }, + }) + + if err := testCredsExist(t, url, dbUser, tc.password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } + } + + dbtesting.AssertClose(t, db) +} + +func TestRedshift_DeleteUser(t *testing.T) { + if os.Getenv(vaultACC) != "1" { + t.SkipNow() + } + + // Ensure each cred is populated. + testhelpers.SkipUnlessEnvVarsSet(t, credNames) + + connURL, url, _, _, err := redshiftEnv() + if err != nil { + t.Fatal(err) + } + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + + db := newRedshift() + dbtesting.AssertInitialize(t, db, dbplugin.InitializeRequest{ + Config: connectionDetails, + VerifyConnection: true, + }) + + usernameConfig := dbplugin.UsernameMetadata{ + DisplayName: "test", + RoleName: "test", + } + + const password = "SuperSecretPa55word!" + for _, commands := range [][]string{{}, {defaultRedshiftRevocationSQL}} { + newResponse := dbtesting.AssertNewUser(t, db, dbplugin.NewUserRequest{ + UsernameConfig: usernameConfig, + Statements: dbplugin.Statements{Commands: []string{testRedshiftRole}}, + Password: password, + Expiration: time.Now().Add(2 * time.Second), + }) + username := newResponse.Username + + if err = testCredsExist(t, url, username, password); err != nil { + t.Fatalf("Could not connect with new credentials: %s", err) + } + + // Intentionally _not_ using dbtesting here as the call almost always takes longer than the 2s default timeout + db.DeleteUser(context.Background(), dbplugin.DeleteUserRequest{ + Username: username, + Statements: dbplugin.Statements{Commands: commands}, + }) + + if err := testCredsExist(t, url, username, password); err == nil { + t.Fatal("Credentials were not revoked") + } + } + + dbtesting.AssertClose(t, db) +} + +func testCredsExist(t testing.TB, url, username, password string) error { + t.Helper() + + connURL := interpolateConnectionURL(url, username, password) + db, err := sql.Open("pgx", connURL) + if err != nil { + return err + } + defer db.Close() + return db.Ping() +} + +func TestRedshift_DefaultUsernameTemplate(t *testing.T) { + if os.Getenv(vaultACC) != "1" { + t.SkipNow() + } + + // Ensure each cred is populated. + testhelpers.SkipUnlessEnvVarsSet(t, credNames) + + connURL, url, _, _, err := redshiftEnv() + if err != nil { + t.Fatal(err) + } + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + } + + db := newRedshift() + dbtesting.AssertInitialize(t, db, dbplugin.InitializeRequest{ + Config: connectionDetails, + VerifyConnection: true, + }) + + usernameConfig := dbplugin.UsernameMetadata{ + DisplayName: "test", + RoleName: "test", + } + + const password = "SuperSecurePa55w0rd!" + for _, commands := range [][]string{{testRedshiftRole}, {testRedshiftReadOnlyRole}} { + resp := dbtesting.AssertNewUser(t, db, dbplugin.NewUserRequest{ + UsernameConfig: usernameConfig, + Password: password, + Statements: dbplugin.Statements{ + Commands: commands, + }, + Expiration: time.Now().Add(5 * time.Minute), + }) + username := resp.Username + + if resp.Username == "" { + t.Fatalf("Missing username") + } + + testCredsExist(t, url, username, password) + + require.Regexp(t, `^v-test-test-[a-z0-9]{20}-[0-9]{10}$`, resp.Username) + } + dbtesting.AssertClose(t, db) +} + +func TestRedshift_CustomUsernameTemplate(t *testing.T) { + if os.Getenv(vaultACC) != "1" { + t.SkipNow() + } + + // Ensure each cred is populated. + testhelpers.SkipUnlessEnvVarsSet(t, credNames) + + connURL, url, _, _, err := redshiftEnv() + if err != nil { + t.Fatal(err) + } + + connectionDetails := map[string]interface{}{ + "connection_url": connURL, + "username_template": "{{.DisplayName}}-{{random 10}}", + } + + db := newRedshift() + dbtesting.AssertInitialize(t, db, dbplugin.InitializeRequest{ + Config: connectionDetails, + VerifyConnection: true, + }) + + usernameConfig := dbplugin.UsernameMetadata{ + DisplayName: "test", + RoleName: "test", + } + + const password = "SuperSecurePa55w0rd!" + for _, commands := range [][]string{{testRedshiftRole}, {testRedshiftReadOnlyRole}} { + resp := dbtesting.AssertNewUser(t, db, dbplugin.NewUserRequest{ + UsernameConfig: usernameConfig, + Password: password, + Statements: dbplugin.Statements{ + Commands: commands, + }, + Expiration: time.Now().Add(5 * time.Minute), + }) + username := resp.Username + + if resp.Username == "" { + t.Fatalf("Missing username") + } + + testCredsExist(t, url, username, password) + + require.Regexp(t, `^test-[a-zA-Z0-9]{10}$`, resp.Username) + } + dbtesting.AssertClose(t, db) +} + +const testRedshiftRole = ` +CREATE USER "{{name}}" WITH PASSWORD '{{password}}' VALID UNTIL '{{expiration}}'; +GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}"; +` + +const testRedshiftReadOnlyRole = ` +CREATE USER "{{name}}" WITH + PASSWORD '{{password}}' + VALID UNTIL '{{expiration}}'; +GRANT SELECT ON ALL TABLES IN SCHEMA public TO "{{name}}"; +` + +const defaultRedshiftRevocationSQL = ` +REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM "{{name}}"; +REVOKE USAGE ON SCHEMA public FROM "{{name}}"; + +DROP USER IF EXISTS "{{name}}"; +` + +const testRedshiftStaticRole = ` +CREATE USER "{{name}}" WITH + PASSWORD '{{password}}'; +GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}"; +` + +const testRoleStaticCreate = ` +CREATE USER "{{name}}" WITH + PASSWORD '{{password}}'; +` + +const testRedshiftStaticRoleRotate = ` +ALTER USER "{{name}}" WITH PASSWORD '{{password}}'; +` + +// This is a copy of a test helper method also found in +// builtin/logical/database/rotation_test.go , and should be moved into a shared +// helper file in the future. +func createTestPGUser(t *testing.T, connURL string, username, password, query string) { + t.Helper() + + db, err := sql.Open("pgx", connURL) + defer db.Close() + if err != nil { + t.Fatal(err) + } + + // Start a transaction + ctx := context.Background() + tx, err := db.BeginTx(ctx, nil) + if err != nil { + t.Fatal(err) + } + defer func() { + _ = tx.Rollback() + }() + + m := map[string]string{ + "name": username, + "password": password, + } + if err := dbtxn.ExecuteTxQueryDirect(ctx, tx, m, query); err != nil { + t.Fatal(err) + } + // Commit the transaction + if err := tx.Commit(); err != nil { + t.Fatal(err) + } +} diff --git a/scan.hcl b/scan.hcl new file mode 100644 index 0000000..7553139 --- /dev/null +++ b/scan.hcl @@ -0,0 +1,25 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +repository { + go_modules = true + osv = true + secrets { + all = true + } + dependabot { + required = true + check_config = true + } + + plugin "semgrep" { + use_git_ignore = true + exclude = ["vendor"] + config = ["tools/semgrep/ci", "p/r2c-security-audit"] + exclude_rule = ["generic.html-templates.security.unquoted-attribute-var.unquoted-attribute-var"] + } + + plugin "codeql" { + languages = ["go"] + } +} diff --git a/scripts/assetcheck.sh b/scripts/assetcheck.sh new file mode 100755 index 0000000..d846dd5 --- /dev/null +++ b/scripts/assetcheck.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +if [[ ! -e http/web_ui/index.html ]] +then + printf "Compiled UI assets not found. They can be built with: make static-dist\n\n" + exit 1 +else + if [[ `find http/web_ui/index.html -mmin +10080` ]] + then + printf "Compiled UI assets are more than one week old. They can be rebuilt with: make static-dist\n\n" + exit 1 + fi +fi diff --git a/scripts/build.sh b/scripts/build.sh new file mode 100755 index 0000000..cf990fa --- /dev/null +++ b/scripts/build.sh @@ -0,0 +1,64 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +# +# This script builds the application from source for multiple platforms. +set -e + +GO_CMD=${GO_CMD:-go} + +# Get the parent directory of where this script is. +SOURCE="${BASH_SOURCE[0]}" +SOURCE_DIR=$( dirname "$SOURCE" ) +while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done +DIR="$( cd -P "$SOURCE_DIR/.." && pwd )" + +# Change into that directory +cd "$DIR" + +# Set build tags +BUILD_TAGS="${BUILD_TAGS:-"vault"}" + +# Get the git commit +GIT_COMMIT="$("$SOURCE_DIR"/ci-helper.sh revision)" +GIT_DIRTY="$(test -n "`git status --porcelain`" && echo "+CHANGES" || true)" + +BUILD_DATE="$("$SOURCE_DIR"/ci-helper.sh date)" + +GOPATH=${GOPATH:-$(${GO_CMD} env GOPATH)} +case $(uname) in + CYGWIN*) + GOPATH="$(cygpath $GOPATH)" + ;; +esac + +# Delete the old dir +echo "==> Removing old directory..." +rm -f bin/* +rm -rf pkg/* +mkdir -p bin/ + +# Build! +echo "==> Building..." +${GO_CMD} build \ + -gcflags "${GCFLAGS}" \ + -ldflags "${LD_FLAGS} -X github.com/hashicorp/vault/version.GitCommit='${GIT_COMMIT}${GIT_DIRTY}' -X github.com/hashicorp/vault/version.BuildDate=${BUILD_DATE}" \ + -o "bin/vault" \ + -tags "${BUILD_TAGS}" \ + . + +# Move all the compiled things to the $GOPATH/bin +OLDIFS=$IFS +IFS=: FIRST=($GOPATH) BIN_PATH=${GOBIN:-${FIRST}/bin} +IFS=$OLDIFS + +# Ensure the go bin folder exists +mkdir -p ${BIN_PATH} +rm -f ${BIN_PATH}/vault +cp bin/vault ${BIN_PATH} + +# Done! +echo +echo "==> Results:" +ls -hl bin/ diff --git a/scripts/ci-helper.sh b/scripts/ci-helper.sh new file mode 100755 index 0000000..45e290d --- /dev/null +++ b/scripts/ci-helper.sh @@ -0,0 +1,172 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +# The ci-helper is used to determine build metadata, build Vault binaries, +# package those binaries into artifacts, and execute tests with those artifacts. + +set -euo pipefail + +# We don't want to get stuck in some kind of interactive pager +export GIT_PAGER=cat + +# Get the build date from the latest commit since it can be used across all +# builds +function build_date() { + # It's tricky to do an RFC3339 format in a cross platform way, so we hardcode UTC + : "${DATE_FORMAT:="%Y-%m-%dT%H:%M:%SZ"}" + git show --no-show-signature -s --format=%cd --date=format:"$DATE_FORMAT" HEAD +} + +# Get the revision, which is the latest commit SHA +function build_revision() { + git rev-parse HEAD +} + +# Determine our repository by looking at our origin URL +function repo() { + basename -s .git "$(git config --get remote.origin.url)" +} + +# Determine the artifact basename based on metadata +function artifact_basename() { + : "${PKG_NAME:="vault"}" + : "${GOOS:=$(go env GOOS)}" + : "${GOARCH:=$(go env GOARCH)}" + + : "${VERSION:=""}" + if [ -z "$VERSION" ]; then + echo "You must specify the VERSION variable for this command" >&2 + exit 1 + fi + + echo "${PKG_NAME}_${VERSION}_${GOOS}_${GOARCH}" +} + +# Bundle the dist directory into a zip +function bundle() { + : "${BUNDLE_PATH:=$(repo_root)/vault.zip}" + echo "--> Bundling dist/* to $BUNDLE_PATH" + zip -r -j "$BUNDLE_PATH" dist/ +} + +# Determine the root directory of the repository +function repo_root() { + git rev-parse --show-toplevel +} + +# Build the UI +function build_ui() { + local repo_root + repo_root=$(repo_root) + + pushd "$repo_root" + mkdir -p http/web_ui + popd + pushd "$repo_root/ui" + yarn install + npm rebuild node-sass + yarn run build + popd +} + +# Build Vault +function build() { + local revision + local build_date + local ldflags + local msg + + # Get or set our basic build metadata + revision=$(build_revision) + build_date=$(build_date) # + : "${BIN_PATH:="dist/"}" #if not run by actions-go-build (enos local) then set this explicitly + : "${GO_TAGS:=""}" + : "${REMOVE_SYMBOLS:=""}" + + (unset GOOS; unset GOARCH; go generate ./...) + + # Build our ldflags + msg="--> Building Vault revision $revision, built $build_date" + + # Keep the symbol and dwarf information by default + if [ -n "$REMOVE_SYMBOLS" ]; then + ldflags="-s -w " + else + ldflags="" + fi + + ldflags="${ldflags} -X github.com/hashicorp/vault/version.GitCommit=$revision -X github.com/hashicorp/vault/version.BuildDate=$build_date" + + if [[ ${VERSION_METADATA+x} ]]; then + msg="${msg}, metadata ${VERSION_METADATA}" + ldflags="${ldflags} -X github.com/hashicorp/vault/version.VersionMetadata=$VERSION_METADATA" + fi + + # Build vault + echo "$msg" + pushd "$(repo_root)" + mkdir -p dist + mkdir -p out + set -x + go build -v -tags "$GO_TAGS" -ldflags "$ldflags" -o dist/ + set +x + popd +} + +# Prepare legal requirements for packaging +function prepare_legal() { + : "${PKG_NAME:="vault"}" + + pushd "$(repo_root)" + mkdir -p dist + curl -o dist/EULA.txt https://eula.hashicorp.com/EULA.txt + curl -o dist/TermsOfEvaluation.txt https://eula.hashicorp.com/TermsOfEvaluation.txt + mkdir -p ".release/linux/package/usr/share/doc/$PKG_NAME" + cp dist/EULA.txt ".release/linux/package/usr/share/doc/$PKG_NAME/EULA.txt" + cp dist/TermsOfEvaluation.txt ".release/linux/package/usr/share/doc/$PKG_NAME/TermsOfEvaluation.txt" + popd +} + +# Package version converts a vault version string into a compatible representation for system +# packages. +function version_package() { + awk '{ gsub("-","~",$1); print $1 }' <<< "$VAULT_VERSION" +} + +# Run the CI Helper +function main() { + case $1 in + artifact-basename) + artifact_basename + ;; + build) + build + ;; + build-ui) + build_ui + ;; + bundle) + bundle + ;; + date) + build_date + ;; + prepare-legal) + prepare_legal + ;; + revision) + build_revision + ;; + version-package) + version_package + ;; + *) + echo "unknown sub-command" >&2 + exit 1 + ;; + esac +} + +main "$@" diff --git a/scripts/coverage.sh b/scripts/coverage.sh new file mode 100755 index 0000000..7f5d49e --- /dev/null +++ b/scripts/coverage.sh @@ -0,0 +1,55 @@ +#!/bin/sh +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +# Generate test coverage statistics for Go packages. +# +# Works around the fact that `go test -coverprofile` currently does not work +# with multiple packages, see https://code.google.com/p/go/issues/detail?id=6909 +# +# Usage: script/coverage [--html|--coveralls] +# +# --html Additionally create HTML report and open it in browser +# --coveralls Push coverage statistics to coveralls.io +# + +set -e + +workdir=.cover +profile="$workdir/cover.out" +mode=count + +generate_cover_data() { + rm -rf "$workdir" + mkdir "$workdir" + + for pkg in "$@"; do + f="$workdir/$(echo $pkg | tr / -).cover" + go test -covermode="$mode" -coverprofile="$f" "$pkg" + done + + echo "mode: $mode" >"$profile" + grep -h -v "^mode:" "$workdir"/*.cover >>"$profile" +} + +show_cover_report() { + go tool cover -${1}="$profile" +} + +push_to_coveralls() { + echo "Pushing coverage statistics to coveralls.io" + goveralls -coverprofile="$profile" +} + +generate_cover_data $(go list ./... | grep -v /vendor/) +show_cover_report func +case "$1" in +"") + ;; +--html) + show_cover_report html ;; +--coveralls) + push_to_coveralls ;; +*) + echo >&2 "error: invalid option: $1"; exit 1 ;; +esac diff --git a/scripts/cross/Dockerfile b/scripts/cross/Dockerfile new file mode 100644 index 0000000..c0fddb3 --- /dev/null +++ b/scripts/cross/Dockerfile @@ -0,0 +1,40 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +FROM debian:buster + +RUN apt-get update -y && apt-get install --no-install-recommends -y -q \ + curl \ + zip \ + build-essential \ + gcc-multilib \ + g++-multilib \ + ca-certificates \ + git mercurial bzr \ + gnupg \ + libltdl-dev \ + libltdl7 + +RUN curl -sL https://deb.nodesource.com/setup_16.x | bash - +RUN curl -sL https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - +RUN echo "deb https://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list + +RUN apt-get update -y && apt-get install -y -q nodejs yarn + +RUN rm -rf /var/lib/apt/lists/* + + +ENV GOVERSION 1.13.8 +RUN mkdir /goroot && mkdir /gopath +RUN curl https://storage.googleapis.com/golang/go${GOVERSION}.linux-amd64.tar.gz \ + | tar xvzf - -C /goroot --strip-components=1 + +ENV GOPATH /gopath +ENV GOROOT /goroot +ENV PATH $GOROOT/bin:$GOPATH/bin:$PATH + +RUN go get golang.org/x/tools/cmd/goimports + +RUN mkdir -p /gopath/src/github.com/hashicorp/vault +WORKDIR /gopath/src/github.com/hashicorp/vault +CMD make static-dist bin diff --git a/scripts/deprecations-checker.sh b/scripts/deprecations-checker.sh new file mode 100755 index 0000000..586ac13 --- /dev/null +++ b/scripts/deprecations-checker.sh @@ -0,0 +1,38 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +# This script is sourced into the shell running in a Github Actions workflow. + +# Usage: +# To check deprecations locally using the script, follow these steps: +# From the repository root or within a package folder, execute deprecations-checker.sh +# Optionally: to only show deprecations in changed files between the current branch and +# a specific branch, pass the other branch name as an argument to the script. +# +# For example: +# ./scripts/deprecations-checker.sh (or) make deprecations +# ./scripts/deprecations-checker.sh main (or) make ci-deprecations +# +# If no branch name is specified, the command will show all usage of deprecations in the code. +# +# GitHub Actions runs this against the PR's base ref branch. + +# Staticcheck uses static analysis to finds bugs and performance issues, offers simplifications, +# and enforces style rules. +# Here, it is used to check if a deprecated function, variable, constant or field is used. + +# Run staticcheck +set -e +echo "Performing deprecations check: running staticcheck" + + +# If no compare branch name is specified, output all deprecations +# Else only output the deprecations from the changes added +if [ -z $1 ] + then + staticcheck -checks="SA1019" -tags="$BUILD_TAGS" + else + # GitHub Actions will use this to find only changes wrt PR's base ref branch + # revgrep CLI tool will return an exit status of 1 if any issues match, else it will return 0 + staticcheck -checks="SA1019" -tags="$BUILD_TAGS" 2>&1 | revgrep origin/"$1" +fi diff --git a/scripts/deps_upgrade.py b/scripts/deps_upgrade.py new file mode 100644 index 0000000..edd1b52 --- /dev/null +++ b/scripts/deps_upgrade.py @@ -0,0 +1,13 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +import os +import sys + +filename = sys.argv[1] +with open(filename) as f: + content = f.readlines() + for l in content: + name = l.split()[0] + print(name) + os.system("go get " + name + "@latest") \ No newline at end of file diff --git a/scripts/dist.sh b/scripts/dist.sh new file mode 100755 index 0000000..fc605d4 --- /dev/null +++ b/scripts/dist.sh @@ -0,0 +1,79 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +set -e + +# Get the version from the command line +VERSION=$1 +if [ -z $VERSION ]; then + echo "Please specify a version." + exit 1 +fi + +# Make sure we have AWS API keys +if ([ -z $AWS_ACCESS_KEY_ID ] || [ -z $AWS_SECRET_ACCESS_KEY ]) && [ ! -z $HC_RELEASE ]; then + echo "Please set your AWS access key information in the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY env vars." + exit 1 +fi + +if [ -z $NOBUILD ] && [ -z $DOCKER_CROSS_IMAGE ]; then + echo "Please set the Docker cross-compile image in DOCKER_CROSS_IMAGE" + exit 1 +fi + +# Get the parent directory of where this script is. +SOURCE="${BASH_SOURCE[0]}" +while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done +DIR="$( cd -P "$( dirname "$SOURCE" )/.." && pwd )" + +# Change into that dir because we expect that +cd $DIR + +if [ -z $RELBRANCH ]; then + RELBRANCH=main +fi + +# Tag, unless told not to +if [ -z $NOTAG ]; then + echo "==> Tagging..." + git commit --allow-empty --gpg-sign=348FFC4C -m "Cut version $VERSION" + git tag -a -m "Version $VERSION" -s -u 348FFC4C "v${VERSION}" $RELBRANCH +fi + +# Build the packages +if [ -z $NOBUILD ]; then + # This should be a local build of the Dockerfile in the cross dir + docker run --rm -v "$(pwd)":/gopath/src/github.com/hashicorp/vault -w /gopath/src/github.com/hashicorp/vault ${DOCKER_CROSS_IMAGE} +fi + +# Zip all the files +rm -rf ./pkg/dist +mkdir -p ./pkg/dist +for FILENAME in $(find ./pkg -mindepth 1 -maxdepth 1 -type f); do + FILENAME=$(basename $FILENAME) + cp ./pkg/${FILENAME} ./pkg/dist/vault_${VERSION}_${FILENAME} +done + +if [ -z $NOSIGN ]; then + echo "==> Signing..." + pushd ./pkg/dist + rm -f ./vault_${VERSION}_SHA256SUMS* + shasum -a256 * > ./vault_${VERSION}_SHA256SUMS + gpg --default-key 348FFC4C --detach-sig ./vault_${VERSION}_SHA256SUMS + popd +fi + +# Upload +if [ ! -z $HC_RELEASE ]; then + hc-releases upload $DIR/pkg/dist + hc-releases publish + + curl -X PURGE https://releases.hashicorp.com/vault/${VERSION} + for FILENAME in $(find $DIR/pkg/dist -type f); do + FILENAME=$(basename $FILENAME) + curl -X PURGE https://releases.hashicorp.com/vault/${VERSION}/${FILENAME} + done +fi + +exit 0 diff --git a/scripts/docker/Dockerfile b/scripts/docker/Dockerfile new file mode 100644 index 0000000..ceb6ec6 --- /dev/null +++ b/scripts/docker/Dockerfile @@ -0,0 +1,66 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +# Multi-stage builder to avoid polluting users environment with wrong +# architecture binaries. +ARG VERSION + +FROM golang:${VERSION} AS builder + +ARG CGO_ENABLED=0 +ARG BUILD_TAGS + +WORKDIR /go/src/github.com/hashicorp/vault +COPY . . + +RUN make bootstrap \ + && CGO_ENABLED=$CGO_ENABLED BUILD_TAGS="${BUILD_TAGS}" VAULT_DEV_BUILD=1 sh -c "'./scripts/build.sh'" + +# Docker Image + +FROM alpine:3.13 + +# Create a vault user and group first so the IDs get set the same way, +# even as the rest of this may change over time. +RUN addgroup vault && \ + adduser -S -G vault vault + +# Set up certificates, our base tools, and Vault. +RUN set -eux; \ + apk add --no-cache ca-certificates libcap su-exec dumb-init tzdata + +COPY --from=builder /go/src/github.com/hashicorp/vault/bin/vault /bin/vault + +# /vault/logs is made available to use as a location to store audit logs, if +# desired; /vault/file is made available to use as a location with the file +# storage backend, if desired; the server will be started with /vault/config as +# the configuration directory so you can add additional config files in that +# location. +RUN mkdir -p /vault/logs && \ + mkdir -p /vault/file && \ + mkdir -p /vault/config && \ + chown -R vault:vault /vault + +# Expose the logs directory as a volume since there's potentially long-running +# state in there +VOLUME /vault/logs + +# Expose the file directory as a volume since there's potentially long-running +# state in there +VOLUME /vault/file + +# 8200/tcp is the primary interface that applications use to interact with +# Vault. +EXPOSE 8200 + +# The entry point script uses dumb-init as the top-level process to reap any +# zombie processes created by Vault sub-processes. +# +# For production derivatives of this container, you should add the IPC_LOCK +# capability so that Vault can mlock memory. +COPY ./scripts/docker/docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh +ENTRYPOINT ["docker-entrypoint.sh"] + +# By default you'll get a single-node development server that stores everything +# in RAM and bootstraps itself. Don't use this configuration for production. +CMD ["server", "-dev"] diff --git a/scripts/docker/Dockerfile.ui b/scripts/docker/Dockerfile.ui new file mode 100644 index 0000000..cac9692 --- /dev/null +++ b/scripts/docker/Dockerfile.ui @@ -0,0 +1,89 @@ +# Multi-stage builder to avoid polluting users environment with wrong +# architecture binaries. This file only currently works for linux/amd64. +FROM debian:buster AS builder + +ARG VERSION +ARG CGO_ENABLED=0 +ARG BUILD_TAGS +ENV JOBS=2 + +RUN apt-get update -y && apt-get install --no-install-recommends -y -q \ + curl \ + zip \ + build-essential \ + gcc-multilib \ + g++-multilib \ + ca-certificates \ + git mercurial bzr \ + gnupg \ + libltdl-dev \ + libltdl7 + +RUN curl -sL https://deb.nodesource.com/setup_16.x | bash - +RUN curl -sL https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - +RUN echo "deb https://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list + +RUN apt-get update -y && apt-get install -y -q nodejs yarn + +RUN rm -rf /var/lib/apt/lists/* + +RUN mkdir /goroot && mkdir /go +RUN curl https://storage.googleapis.com/golang/go${VERSION}.linux-amd64.tar.gz \ + | tar xvzf - -C /goroot --strip-components=1 +ENV GOPATH /go +ENV GOROOT /goroot +ENV PATH $GOROOT/bin:$GOPATH/bin:$PATH + +WORKDIR /go/src/github.com/hashicorp/vault +COPY . . +RUN make bootstrap static-dist \ + && CGO_ENABLED=$CGO_ENABLED BUILD_TAGS="${BUILD_TAGS} ui" VAULT_DEV_BUILD=1 GOOS=linux GOARCH=amd64 sh -c "'./scripts/build.sh'" + +# Docker Image + +FROM alpine:3.13 + +# Create a vault user and group first so the IDs get set the same way, +# even as the rest of this may change over time. +RUN addgroup vault && \ + adduser -S -G vault vault + +# Set up certificates, our base tools, and Vault. +RUN set -eux; \ + apk add --no-cache ca-certificates libcap su-exec dumb-init tzdata + +COPY --from=builder /go/src/github.com/hashicorp/vault/bin/vault /bin/vault + +# /vault/logs is made available to use as a location to store audit logs, if +# desired; /vault/file is made available to use as a location with the file +# storage backend, if desired; the server will be started with /vault/config as +# the configuration directory so you can add additional config files in that +# location. +RUN mkdir -p /vault/logs && \ + mkdir -p /vault/file && \ + mkdir -p /vault/config && \ + chown -R vault:vault /vault + +# Expose the logs directory as a volume since there's potentially long-running +# state in there +VOLUME /vault/logs + +# Expose the file directory as a volume since there's potentially long-running +# state in there +VOLUME /vault/file + +# 8200/tcp is the primary interface that applications use to interact with +# Vault. +EXPOSE 8200 + +# The entry point script uses dumb-init as the top-level process to reap any +# zombie processes created by Vault sub-processes. +# +# For production derivatives of this container, you should add the IPC_LOCK +# capability so that Vault can mlock memory. +COPY ./scripts/docker/docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh +ENTRYPOINT ["docker-entrypoint.sh"] + +# By default you'll get a single-node development server that stores everything +# in RAM and bootstraps itself. Don't use this configuration for production. +CMD ["server", "-dev"] diff --git a/scripts/docker/docker-entrypoint.sh b/scripts/docker/docker-entrypoint.sh new file mode 100755 index 0000000..2b9b8f3 --- /dev/null +++ b/scripts/docker/docker-entrypoint.sh @@ -0,0 +1,107 @@ +#!/usr/bin/dumb-init /bin/sh +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +set -e + +# Note above that we run dumb-init as PID 1 in order to reap zombie processes +# as well as forward signals to all processes in its session. Normally, sh +# wouldn't do either of these functions so we'd leak zombies as well as do +# unclean termination of all our sub-processes. + +# Prevent core dumps +ulimit -c 0 + +# Allow setting VAULT_REDIRECT_ADDR and VAULT_CLUSTER_ADDR using an interface +# name instead of an IP address. The interface name is specified using +# VAULT_REDIRECT_INTERFACE and VAULT_CLUSTER_INTERFACE environment variables. If +# VAULT_*_ADDR is also set, the resulting URI will combine the protocol and port +# number with the IP of the named interface. +get_addr () { + local if_name=$1 + local uri_template=$2 + ip addr show dev $if_name | awk -v uri=$uri_template '/\s*inet\s/ { \ + ip=gensub(/(.+)\/.+/, "\\1", "g", $2); \ + print gensub(/^(.+:\/\/).+(:.+)$/, "\\1" ip "\\2", "g", uri); \ + exit}' +} + +if [ -n "$VAULT_REDIRECT_INTERFACE" ]; then + export VAULT_REDIRECT_ADDR=$(get_addr $VAULT_REDIRECT_INTERFACE ${VAULT_REDIRECT_ADDR:-"http://0.0.0.0:8200"}) + echo "Using $VAULT_REDIRECT_INTERFACE for VAULT_REDIRECT_ADDR: $VAULT_REDIRECT_ADDR" +fi +if [ -n "$VAULT_CLUSTER_INTERFACE" ]; then + export VAULT_CLUSTER_ADDR=$(get_addr $VAULT_CLUSTER_INTERFACE ${VAULT_CLUSTER_ADDR:-"https://0.0.0.0:8201"}) + echo "Using $VAULT_CLUSTER_INTERFACE for VAULT_CLUSTER_ADDR: $VAULT_CLUSTER_ADDR" +fi + +# VAULT_CONFIG_DIR isn't exposed as a volume but you can compose additional +# config files in there if you use this image as a base, or use +# VAULT_LOCAL_CONFIG below. +VAULT_CONFIG_DIR=/vault/config + +# You can also set the VAULT_LOCAL_CONFIG environment variable to pass some +# Vault configuration JSON without having to bind any volumes. +if [ -n "$VAULT_LOCAL_CONFIG" ]; then + echo "$VAULT_LOCAL_CONFIG" > "$VAULT_CONFIG_DIR/local.json" +fi + +# If the user is trying to run Vault directly with some arguments, then +# pass them to Vault. +if [ "${1:0:1}" = '-' ]; then + set -- vault "$@" +fi + +# Look for Vault subcommands. +if [ "$1" = 'server' ]; then + shift + set -- vault server \ + -config="$VAULT_CONFIG_DIR" \ + -dev-root-token-id="$VAULT_DEV_ROOT_TOKEN_ID" \ + -dev-listen-address="${VAULT_DEV_LISTEN_ADDRESS:-"0.0.0.0:8200"}" \ + "$@" +elif [ "$1" = 'version' ]; then + # This needs a special case because there's no help output. + set -- vault "$@" +elif vault --help "$1" 2>&1 | grep -q "vault $1"; then + # We can't use the return code to check for the existence of a subcommand, so + # we have to use grep to look for a pattern in the help output. + set -- vault "$@" +fi + +# If we are running Vault, make sure it executes as the proper user. +if [ "$1" = 'vault' ]; then + if [ -z "$SKIP_CHOWN" ]; then + # If the config dir is bind mounted then chown it + if [ "$(stat -c %u /vault/config)" != "$(id -u vault)" ]; then + chown -R vault:vault /vault/config || echo "Could not chown /vault/config (may not have appropriate permissions)" + fi + + # If the logs dir is bind mounted then chown it + if [ "$(stat -c %u /vault/logs)" != "$(id -u vault)" ]; then + chown -R vault:vault /vault/logs + fi + + # If the file dir is bind mounted then chown it + if [ "$(stat -c %u /vault/file)" != "$(id -u vault)" ]; then + chown -R vault:vault /vault/file + fi + fi + + if [ -z "$SKIP_SETCAP" ]; then + # Allow mlock to avoid swapping Vault memory to disk + setcap cap_ipc_lock=+ep $(readlink -f $(which vault)) + + # In the case vault has been started in a container without IPC_LOCK privileges + if ! vault -version 1>/dev/null 2>/dev/null; then + >&2 echo "Couldn't start vault with IPC_LOCK. Disabling IPC_LOCK, please use --cap-add IPC_LOCK" + setcap cap_ipc_lock=-ep $(readlink -f $(which vault)) + fi + fi + + if [ "$(id -u)" = '0' ]; then + set -- su-exec vault "$@" + fi +fi + +exec "$@" diff --git a/scripts/gen_openapi.sh b/scripts/gen_openapi.sh new file mode 100755 index 0000000..852c683 --- /dev/null +++ b/scripts/gen_openapi.sh @@ -0,0 +1,112 @@ +#!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +set -e + +# Generate an OpenAPI document for all backends. +# +# Assumptions: +# +# 1. Vault has been checked out at an appropriate version and built +# 2. vault executable is in your path +# 3. Vault isn't already running +# 4. jq is installed + +cd "$(dirname "${BASH_SOURCE[0]}")" + +echo "Starting Vault..." +if pgrep -x "vault" > /dev/null +then + echo "Vault is already running. Aborting." + exit 1 +fi + +vault server -dev -dev-root-token-id=root & +VAULT_PID=$! + +# Allow time for Vault to start its HTTP listener +sleep 1 + +defer_stop_vault() { + echo "Stopping Vault..." + kill $VAULT_PID + # Allow time for Vault to print final logging and exit, + # before this script ends, and the shell prints its next prompt + sleep 1 +} + +trap defer_stop_vault INT TERM EXIT + +export VAULT_ADDR=http://127.0.0.1:8200 + +echo "Unmounting the default kv-v2 secrets engine ..." + +# Unmount the default kv-v2 engine so that we can remount it at 'kv_v2/' later. +# The mount path will be reflected in the resultant OpenAPI document. +vault secrets disable "secret/" + +echo "Mounting all builtin plugins ..." + +# Enable auth plugins +vault auth enable "alicloud" +vault auth enable "approle" +vault auth enable "aws" +vault auth enable "azure" +vault auth enable "centrify" +vault auth enable "cert" +vault auth enable "cf" +vault auth enable "gcp" +vault auth enable "github" +vault auth enable "jwt" +vault auth enable "kerberos" +vault auth enable "kubernetes" +vault auth enable "ldap" +vault auth enable "oci" +vault auth enable "okta" +vault auth enable "radius" +vault auth enable "userpass" + +# Enable secrets plugins +vault secrets enable "alicloud" +vault secrets enable "aws" +vault secrets enable "azure" +vault secrets enable "consul" +vault secrets enable "database" +vault secrets enable "gcp" +vault secrets enable "gcpkms" +vault secrets enable "kubernetes" +vault secrets enable -path="kv-v1/" -version=1 "kv" +vault secrets enable -path="kv-v2/" -version=2 "kv" +vault secrets enable "ldap" +vault secrets enable "mongodbatlas" +vault secrets enable "nomad" +vault secrets enable "pki" +vault secrets enable "rabbitmq" +vault secrets enable "ssh" +vault secrets enable "terraform" +vault secrets enable "totp" +vault secrets enable "transit" + +# Enable enterprise features +if [[ -n "${VAULT_LICENSE:-}" ]]; then + vault secrets enable "keymgmt" + vault secrets enable "kmip" + vault secrets enable "transform" +fi + +# Output OpenAPI, optionally formatted +if [ "$1" == "-p" ]; then + curl --header 'X-Vault-Token: root' \ + --data '{"generic_mount_paths": true}' \ + 'http://127.0.0.1:8200/v1/sys/internal/specs/openapi' | jq > openapi.json +else + curl --header 'X-Vault-Token: root' \ + --data '{"generic_mount_paths": true}' \ + 'http://127.0.0.1:8200/v1/sys/internal/specs/openapi' > openapi.json +fi + +echo +echo "openapi.json generated" +echo diff --git a/scripts/gofmtcheck.sh b/scripts/gofmtcheck.sh new file mode 100755 index 0000000..19d4adb --- /dev/null +++ b/scripts/gofmtcheck.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +echo "==> Checking that code complies with gofmt requirements..." + +files=$(echo $1 | xargs) +if [ -n "$files" ]; then + echo "Checking changed files..." + gofmt_files="$(echo $1 | grep -v pb.go | grep -v vendor | xargs go run mvdan.cc/gofumpt -l)" +else + echo "Checking all files..." + gofmt_files="$(find . -name '*.go' | grep -v pb.go | grep -v vendor | xargs go run mvdan.cc/gofumpt -l)" +fi + +if [[ -n "${gofmt_files}" ]]; then + echo 'gofumpt needs running on the following files:' + echo "${gofmt_files}" + echo "You can use the command: \`make fmt\` to reformat code." + exit 1 +fi diff --git a/scripts/goversioncheck.sh b/scripts/goversioncheck.sh new file mode 100755 index 0000000..7ee7422 --- /dev/null +++ b/scripts/goversioncheck.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +GO_CMD=${GO_CMD:-go} + +GO_VERSION_MIN=$1 +echo "==> Checking that build is using go version >= $1..." + +if $GO_CMD version | grep -q devel; +then + GO_VERSION="devel" +else + GO_VERSION=$($GO_CMD version | grep -o 'go[0-9]\+\.[0-9]\+\(\.[0-9]\+\)\?' | tr -d 'go') + + IFS="." read -r -a GO_VERSION_ARR <<< "$GO_VERSION" + IFS="." read -r -a GO_VERSION_REQ <<< "$GO_VERSION_MIN" + + if [[ ${GO_VERSION_ARR[0]} -lt ${GO_VERSION_REQ[0]} || + ( ${GO_VERSION_ARR[0]} -eq ${GO_VERSION_REQ[0]} && + ( ${GO_VERSION_ARR[1]} -lt ${GO_VERSION_REQ[1]} || + ( ${GO_VERSION_ARR[1]} -eq ${GO_VERSION_REQ[1]} && ${GO_VERSION_ARR[2]} -lt ${GO_VERSION_REQ[2]} ))) + ]]; then + echo "Vault requires go $GO_VERSION_MIN to build; found $GO_VERSION." + exit 1 + fi +fi + +echo "==> Using go version $GO_VERSION..." diff --git a/scripts/protocversioncheck.sh b/scripts/protocversioncheck.sh new file mode 100755 index 0000000..a2cbc6c --- /dev/null +++ b/scripts/protocversioncheck.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +set -euo pipefail + +PROTOC_CMD=${PROTOC_CMD:-protoc} +PROTOC_VERSION_EXACT="$1" +echo "==> Checking that protoc is at version $1..." + +PROTOC_VERSION=$($PROTOC_CMD --version | grep -o '[0-9]\+\.[0-9]\+\.[0-9]\+') + +if [ "$PROTOC_VERSION" == "$PROTOC_VERSION_EXACT" ]; then + echo "Using protoc version $PROTOC_VERSION" +else + echo "protoc should be at $PROTOC_VERSION_EXACT; found $PROTOC_VERSION." + echo "If your version is higher than the version this script is looking for, updating the Makefile with the newer version." + exit 1 +fi diff --git a/scripts/semgrep_plugin_repos.sh b/scripts/semgrep_plugin_repos.sh new file mode 100755 index 0000000..6dc7407 --- /dev/null +++ b/scripts/semgrep_plugin_repos.sh @@ -0,0 +1,23 @@ +#!/bin/sh +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +set -e +set -x + +## Make a temp dir +tempdir=$(mktemp -d plugin-semgrep.XXXXXX) +vaultdir=$(pwd) +## Set paths +cd $tempdir + +for plugin in $(grep github.com/hashicorp/vault-plugin- $vaultdir/go.mod | cut -f 2 | cut -d ' ' -f 1 | cut -d '/' -f 3) +do + if [ -z $SKIP_MODULE_UPDATING ] + then + echo "Fetching $plugin..." + git clone https://github.com/hashicorp/$plugin + semgrep --include '*.go' --exclude 'vendor' -a -f $vaultdir/tools/semgrep/ci/ $plugin/. > $plugin.semgrep.txt + fi +done diff --git a/scripts/testciphers.sh b/scripts/testciphers.sh new file mode 100755 index 0000000..f9684f5 --- /dev/null +++ b/scripts/testciphers.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +# Adapted from https://superuser.com/a/224263 + +# OpenSSL requires the port number. +SERVER=$1 +ciphers=$(openssl ciphers 'ALL:eNULL' | sed -e 's/:/ /g') + +echo Obtaining cipher list from $(openssl version). + +for cipher in ${ciphers[@]} +do +echo -n Testing $cipher... +result=$(echo -n | openssl s_client -cipher "$cipher" -alpn req_fw_sb-act_v1 -connect $SERVER 2>&1) +if [[ "$result" =~ ":error:" ]] ; then + error=$(echo -n $result | cut -d':' -f6) + echo NO \($error\) +else + if [[ "$result" =~ "Cipher is ${cipher}" || "$result" =~ "Cipher :" ]] ; then + echo YES + else + echo UNKNOWN RESPONSE + echo $result + fi +fi +done diff --git a/scripts/update_deps.sh b/scripts/update_deps.sh new file mode 100755 index 0000000..f491b7e --- /dev/null +++ b/scripts/update_deps.sh @@ -0,0 +1,60 @@ +#!/bin/sh +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +set -e + +TOOL=vault + +## Make a temp dir +tempdir=$(mktemp -d update-${TOOL}-deps.XXXXXX) + +## Set paths +export GOPATH="$(pwd)/${tempdir}" +export PATH="${GOPATH}/bin:${PATH}" +cd $tempdir + +## Get Vault +mkdir -p src/github.com/hashicorp +cd src/github.com/hashicorp +echo "Fetching ${TOOL}..." +git clone https://github.com/hashicorp/${TOOL} +cd ${TOOL} + +## Clean out earlier vendoring +rm -rf Godeps vendor + +## Get govendor +go get github.com/kardianos/govendor + +## Init +govendor init + +## Fetch deps +echo "Fetching deps, will take some time..." +govendor fetch -v +missing + +# Clean up after the logrus mess +govendor remove -v github.com/Sirupsen/logrus +cd vendor +find -type f | grep '.go' | xargs sed -i -e 's/Sirupsen/sirupsen/' + +# Need the v2 branch for Azure +govendor fetch -v github.com/coreos/go-oidc@v2 + +# Need the v3 branch for dockertest +govendor fetch -v github.com/ory/dockertest@v3 + +# Current influx master is alpha, pin to v1.7.3 +govendor fetch github.com/influxdata/influxdb/client/v2@v1.7.4 +govendor fetch github.com/influxdata/influxdb/models@v1.7.4 +govendor fetch github.com/influxdata/influxdb/pkg/escape@v1.7.4 + +# Current circonus needs v3 +grep circonus-gometrics vendor.json | cut -d '"' -f 4 | while read -r i; do govendor fetch $i@v2; done + +# API breakage +govendor fetch github.com/satori/go.uuid@f58768cc1a7a7e77a3bd49e98cdd21419399b6a3 + +echo "Done; to commit run \n\ncd ${GOPATH}/src/github.com/hashicorp/${TOOL}\n" diff --git a/scripts/update_plugin_modules.sh b/scripts/update_plugin_modules.sh new file mode 100755 index 0000000..2a300f3 --- /dev/null +++ b/scripts/update_plugin_modules.sh @@ -0,0 +1,51 @@ +#!/bin/sh +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + + +set -e + +## Make a temp dir +tempdir=$(mktemp -d update-plugin-modules.XXXXXX) + +## Set paths +cd $tempdir + +## Get Vault +echo "Fetching vault..." +git clone https://github.com/hashicorp/vault + +for plugin in $(grep github.com/hashicorp/vault-plugin- vault/go.mod | cut -f 2 | cut -d ' ' -f 1 | cut -d '/' -f 3) +do + if [ -z $SKIP_MODULE_UPDATING ] + then + echo "Fetching $plugin..." + git clone https://github.com/hashicorp/$plugin + cd $plugin + rm -rf vendor + go get github.com/hashicorp/vault/api${API_BRANCH} + go mod tidy + go mod vendor + git add . + git commit --allow-empty -m "Updating vault dep" + if [ ! -z $PUSH_COMMITS ] + then + git push + fi + cd .. + fi + cd vault + go get github.com/hashicorp/$plugin@main + cd .. +done + +cd vault +go mod tidy +rm -rf vendor +go mod vendor +git add . +git commit --allow-empty -m "Updating plugin deps" +if [ ! -z $PUSH_VAULT_COMMIT ] +then + git push +fi diff --git a/scripts/windows/build.bat b/scripts/windows/build.bat new file mode 100644 index 0000000..79e2760 --- /dev/null +++ b/scripts/windows/build.bat @@ -0,0 +1,96 @@ +@echo off +setlocal + +set _EXITCODE=0 +set _DEV_BUILD=0 + +if not exist %1 exit /b 1 +if x%2 == xVAULT_DEV set _DEV_BUILD=1 + +cd %1 +md bin 2>nul + +:: Get the git commit +set _GIT_COMMIT_FILE=%TEMP%\vault-git_commit.txt +set _GIT_DIRTY_FILE=%TEMP%\vault-git_dirty.txt +set _GIT_COMMIT_DATE_FILE=%TEMP%\vault-git_commit_date.txt + +set _NUL_CMP_FILE=%TEMP%\vault-nul_cmp.txt +type nul >%_NUL_CMP_FILE% + +git rev-parse HEAD >"%_GIT_COMMIT_FILE%" +set /p _GIT_COMMIT=<"%_GIT_COMMIT_FILE%" +del /f "%_GIT_COMMIT_FILE%" 2>nul + +git show -s --format=%cd --date=format:"%Y-%m-%dT%H:%M:%SZ" HEAD >"%_GIT_COMMIT__DATE_FILE%" +set /p _BUILD_DATE=<"%_GIT_COMMIT_DATE_FILE%" +del /f "%_GIT_COMMIT_DATE_FILE%" 2>nul + +set _GIT_DIRTY= +git status --porcelain >"%_GIT_DIRTY_FILE%" +fc "%_GIT_DIRTY_FILE%" "%_NUL_CMP_FILE%" >nul +if errorlevel 1 set _GIT_DIRTY=+CHANGES +del /f "%_GIT_DIRTY_FILE%" 2>nul +del /f "%_NUL_CMP_FILE%" 2>nul + +REM Determine the arch/os combos we're building for +set _XC_ARCH=386 amd64 arm +set _XC_OS=linux darwin windows freebsd openbsd + +REM Install dependencies +echo ==^> Installing dependencies... +go get ./... + +REM Clean up the old binaries and packages. +echo ==^> Cleaning old builds... +rd /s /q bin pkg 2>nul +md bin 2>nul + +REM If its dev mode, only build for ourself +if not %_DEV_BUILD% equ 1 goto build + +:devbuild +echo ==^> Preparing for development build... +set _GO_ENV_TMP_FILE=%TEMP%\vault-go-env.txt +go env GOARCH >"%_GO_ENV_TMP_FILE%" +set /p _XC_ARCH=<"%_GO_ENV_TMP_FILE%" +del /f "%_GO_ENV_TMP_FILE%" 2>nul +go env GOOS >"%_GO_ENV_TMP_FILE%" +set /p _XC_OS=<"%_GO_ENV_TMP_FILE%" +del /f "%_GO_ENV_TMP_FILE%" 2>nul + +:build +REM Build! +echo ==^> Building... +go build^ + -ldflags "-X github.com/hashicorp/vault/version.GitCommit=%_GIT_COMMIT%%_GIT_DIRTY% -X github.com/hashicorp/vault/version.BuildDate=%_BUILD_DATE%"^ + -o "bin/vault.exe"^ + . + +if %ERRORLEVEL% equ 1 set %_EXITCODE%=1 + +if %_EXITCODE% equ 1 exit /b %_EXITCODE% + +set _GO_ENV_TMP_FILE=%TEMP%\vault-go-env.txt + +go env GOPATH >"%_GO_ENV_TMP_FILE%" +set /p _GOPATH=<"%_GO_ENV_TMP_FILE%" +del /f "%_GO_ENV_TMP_FILE%" 2>nul + +go env GOARCH >"%_GO_ENV_TMP_FILE%" +set /p _GOARCH=<"%_GO_ENV_TMP_FILE%" +del /f "%_GO_ENV_TMP_FILE%" 2>nul + +go env GOOS >"%_GO_ENV_TMP_FILE%" +set /p _GOOS=<"%_GO_ENV_TMP_FILE%" +del /f "%_GO_ENV_TMP_FILE%" 2>nul + +REM TODO(ceh): package dist + +REM Done! +echo. +echo ==^> Results: +echo. +for %%A in ("bin\*") do echo %%~fA + +exit /b %_EXITCODE% diff --git a/sdk/README.md b/sdk/README.md new file mode 100644 index 0000000..d106210 --- /dev/null +++ b/sdk/README.md @@ -0,0 +1,12 @@ +Vault SDK libs +================= + +This package provides the `sdk` package which contains code useful for +developing Vault plugins. + +Although we try not to break functionality, we reserve the right to reorganize +the code at will and may occasionally cause breaks if they are warranted. As +such we expect the tag of this module will stay less than `v1.0.0`. + +For any major changes we will try to give advance notice in the CHANGES section +of Vault's CHANGELOG.md. diff --git a/sdk/database/dbplugin/README.md b/sdk/database/dbplugin/README.md new file mode 100644 index 0000000..94b1eff --- /dev/null +++ b/sdk/database/dbplugin/README.md @@ -0,0 +1,83 @@ +# Combined Database Engine +This package is how database plugins interact with Vault. + +## Upgrading to Version 5 + +### Background +In Vault 1.6, a new Database interface was created that solved a number of issues with the +previous interface: + +1. It could not use password policies because the database plugins were responsible for + generating passwords. +2. There were significant inconsistencies between functions in the interface. +3. Several functions (`SetCredentials` and `RotateRootCredentials`) were doing the same operation. +4. It had a function that was no longer being used as it had been deprecated in a previous version + but never removed. + +Prior to Vault 1.6, the Database interface is version 4 (with other versions in older versions of +Vault). The new version introduced in Vault 1.6 is version 5. This distinction was not exposed in +previous iterations of the Database interface as the previous versions were additive to the +interface. Since version 5 is an overhaul of the interface, this distinction needed to be made. + +We highly recommend that you upgrade any version 4 database plugins to version 5 as version 4 is +considered deprecated and support for it will be removed in a future release. Version 5 plugins +will not function with Vault prior to Vault 1.6. + +The new interface is roughly modeled after a [gRPC](https://grpc.io/) interface. It has improved +future compatibility by not requiring changes to the interface definition to add additional data +in the requests or responses. It also simplifies the interface by merging several into a single +function call. + +### Upgrading your custom database + +Vault 1.6 supports both version 4 and version 5 database plugins. The support for version 4 +plugins will be removed in a future release. Version 5 database plugins will not function with +Vault prior to version 1.6. If you upgrade your database plugins, ensure that you are only using +Vault 1.6 or later. To determine if a plugin is using version 4 or version 5, the following is a +list of changes in no particular order that you can check against your plugin to determine +the version: + +1. The import path for version 4 is `github.com/hashicorp/vault/sdk/database/dbplugin` + whereas the import path for version 5 is `github.com/hashicorp/vault/sdk/database/dbplugin/v5` +2. Version 4 has the following functions: `Initialize`, `Init`, `CreateUser`, `RenewUser`, + `RevokeUser`, `SetCredentials`, `RotateRootCredentials`, `Type`, and `Close`. You can see the + full function signatures in `sdk/database/dbplugin/plugin.go`. +3. Version 5 has the following functions: `Initialize`, `NewUser`, `UpdateUser`, `DeleteUser`, + `Type`, and `Close`. You can see the full function signatures in + `sdk/database/dbplugin/v5/database.go`. + +If you are using a version 4 custom database plugin, the following are basic instructions +for upgrading to version 5. + +-> In version 4, password generation was the responsibility of the plugin. This is no longer + the case with version 5. Vault is responsible for generating passwords and passing them to + the plugin via `NewUserRequest.Password` and `UpdateUserRequest.Password.NewPassword`. + +1. Change the import path from `github.com/hashicorp/vault/sdk/database/dbplugin` to + `github.com/hashicorp/vault/sdk/database/dbplugin/v5`. The package name is the same, so any + references to `dbplugin` can remain as long as those symbols exist within the new package + (such as the `Serve` function). +2. An easy way to see what functions need to be implemented is to put the following as a + global variable within your package: `var _ dbplugin.Database = (*MyDatabase)(nil)`. This + will fail to compile if the `MyDatabase` type does not adhere to the + `dbplugin.Database` interface. +3. Replace `Init` and `Initialize` with the new `Initialize` function definition. The fields that + `Init` was taking (`config` and `verifyConnection`) are now wrapped into `InitializeRequest`. + The returned `map[string]interface{}` object is now wrapped into `InitializeResponse`. + Only `Initialize` is needed to adhere to the `Database` interface. +4. Update `CreateUser` to `NewUser`. The `NewUserRequest` object contains the username and + password of the user to be created. It also includes a list of statements for creating the + user as well as several other fields that may or may not be applicable. Your custom plugin + should use the password provided in the request, not generate one. If you generate a password + instead, Vault will not know about it and will give the caller the wrong password. +5. `SetCredentials`, `RotateRootCredentials`, and `RenewUser` are combined into `UpdateUser`. + The request object, `UpdateUserRequest` contains three parts: the username to change, a + `ChangePassword` and a `ChangeExpiration` object. When one of the objects is not nil, this + indicates that particular field (password or expiration) needs to change. For instance, if + the `ChangePassword` field is not-nil, the user's password should be changed. This is + equivalent to calling `SetCredentials`. If the `ChangeExpiration` field is not-nil, the + user's expiration date should be changed. This is equivalent to calling `RenewUser`. + Many databases don't need to do anything with the updated expiration. +6. Update `RevokeUser` to `DeleteUser`. This is the simplest change. The username to be + deleted is enclosed in the `DeleteUserRequest` object. + diff --git a/sdk/database/dbplugin/client.go b/sdk/database/dbplugin/client.go new file mode 100644 index 0000000..265b46b --- /dev/null +++ b/sdk/database/dbplugin/client.go @@ -0,0 +1,91 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dbplugin + +import ( + "context" + "errors" + "sync" + + log "github.com/hashicorp/go-hclog" + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/sdk/helper/pluginutil" +) + +// DatabasePluginClient embeds a databasePluginRPCClient and wraps it's Close +// method to also call Kill() on the plugin.Client. +type DatabasePluginClient struct { + client *plugin.Client + sync.Mutex + + Database +} + +// This wraps the Close call and ensures we both close the database connection +// and kill the plugin. +func (dc *DatabasePluginClient) Close() error { + err := dc.Database.Close() + dc.client.Kill() + + return err +} + +// NewPluginClient returns a databaseRPCClient with a connection to a running +// plugin. The client is wrapped in a DatabasePluginClient object to ensure the +// plugin is killed on call of Close(). +func NewPluginClient(ctx context.Context, sys pluginutil.RunnerUtil, pluginRunner *pluginutil.PluginRunner, logger log.Logger, isMetadataMode bool) (Database, error) { + // pluginSets is the map of plugins we can dispense. + pluginSets := map[int]plugin.PluginSet{ + // Version 3 used to supports both protocols. We want to keep it around + // since it's possible old plugins built against this version will still + // work with gRPC. There is currently no difference between version 3 + // and version 4. + 3: { + "database": new(GRPCDatabasePlugin), + }, + // Version 4 only supports gRPC + 4: { + "database": new(GRPCDatabasePlugin), + }, + } + + var client *plugin.Client + var err error + if isMetadataMode { + client, err = pluginRunner.RunMetadataMode(ctx, sys, pluginSets, handshakeConfig, []string{}, logger) + } else { + client, err = pluginRunner.Run(ctx, sys, pluginSets, handshakeConfig, []string{}, logger) + } + if err != nil { + return nil, err + } + + // Connect via RPC + rpcClient, err := client.Client() + if err != nil { + return nil, err + } + + // Request the plugin + raw, err := rpcClient.Dispense("database") + if err != nil { + return nil, err + } + + // We should have a database type now. This feels like a normal interface + // implementation but is in fact over an RPC connection. + var db Database + switch raw.(type) { + case *gRPCClient: + db = raw.(*gRPCClient) + default: + return nil, errors.New("unsupported client type") + } + + // Wrap RPC implementation in DatabasePluginClient + return &DatabasePluginClient{ + client: client, + Database: db, + }, nil +} diff --git a/sdk/database/dbplugin/database.pb.go b/sdk/database/dbplugin/database.pb.go new file mode 100644 index 0000000..62964c7 --- /dev/null +++ b/sdk/database/dbplugin/database.pb.go @@ -0,0 +1,1468 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.12 +// source: sdk/database/dbplugin/database.proto + +package dbplugin + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Deprecated: Do not use. +type InitializeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Config []byte `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + VerifyConnection bool `protobuf:"varint,2,opt,name=verify_connection,json=verifyConnection,proto3" json:"verify_connection,omitempty"` +} + +func (x *InitializeRequest) Reset() { + *x = InitializeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_database_dbplugin_database_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InitializeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InitializeRequest) ProtoMessage() {} + +func (x *InitializeRequest) ProtoReflect() protoreflect.Message { + mi := &file_sdk_database_dbplugin_database_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InitializeRequest.ProtoReflect.Descriptor instead. +func (*InitializeRequest) Descriptor() ([]byte, []int) { + return file_sdk_database_dbplugin_database_proto_rawDescGZIP(), []int{0} +} + +func (x *InitializeRequest) GetConfig() []byte { + if x != nil { + return x.Config + } + return nil +} + +func (x *InitializeRequest) GetVerifyConnection() bool { + if x != nil { + return x.VerifyConnection + } + return false +} + +type InitRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Config []byte `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + VerifyConnection bool `protobuf:"varint,2,opt,name=verify_connection,json=verifyConnection,proto3" json:"verify_connection,omitempty"` +} + +func (x *InitRequest) Reset() { + *x = InitRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_database_dbplugin_database_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InitRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InitRequest) ProtoMessage() {} + +func (x *InitRequest) ProtoReflect() protoreflect.Message { + mi := &file_sdk_database_dbplugin_database_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InitRequest.ProtoReflect.Descriptor instead. +func (*InitRequest) Descriptor() ([]byte, []int) { + return file_sdk_database_dbplugin_database_proto_rawDescGZIP(), []int{1} +} + +func (x *InitRequest) GetConfig() []byte { + if x != nil { + return x.Config + } + return nil +} + +func (x *InitRequest) GetVerifyConnection() bool { + if x != nil { + return x.VerifyConnection + } + return false +} + +type CreateUserRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Statements *Statements `protobuf:"bytes,1,opt,name=statements,proto3" json:"statements,omitempty"` + UsernameConfig *UsernameConfig `protobuf:"bytes,2,opt,name=username_config,json=usernameConfig,proto3" json:"username_config,omitempty"` + Expiration *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=expiration,proto3" json:"expiration,omitempty"` +} + +func (x *CreateUserRequest) Reset() { + *x = CreateUserRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_database_dbplugin_database_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateUserRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateUserRequest) ProtoMessage() {} + +func (x *CreateUserRequest) ProtoReflect() protoreflect.Message { + mi := &file_sdk_database_dbplugin_database_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateUserRequest.ProtoReflect.Descriptor instead. +func (*CreateUserRequest) Descriptor() ([]byte, []int) { + return file_sdk_database_dbplugin_database_proto_rawDescGZIP(), []int{2} +} + +func (x *CreateUserRequest) GetStatements() *Statements { + if x != nil { + return x.Statements + } + return nil +} + +func (x *CreateUserRequest) GetUsernameConfig() *UsernameConfig { + if x != nil { + return x.UsernameConfig + } + return nil +} + +func (x *CreateUserRequest) GetExpiration() *timestamppb.Timestamp { + if x != nil { + return x.Expiration + } + return nil +} + +type RenewUserRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Statements *Statements `protobuf:"bytes,1,opt,name=statements,proto3" json:"statements,omitempty"` + Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"` + Expiration *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=expiration,proto3" json:"expiration,omitempty"` +} + +func (x *RenewUserRequest) Reset() { + *x = RenewUserRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_database_dbplugin_database_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RenewUserRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RenewUserRequest) ProtoMessage() {} + +func (x *RenewUserRequest) ProtoReflect() protoreflect.Message { + mi := &file_sdk_database_dbplugin_database_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RenewUserRequest.ProtoReflect.Descriptor instead. +func (*RenewUserRequest) Descriptor() ([]byte, []int) { + return file_sdk_database_dbplugin_database_proto_rawDescGZIP(), []int{3} +} + +func (x *RenewUserRequest) GetStatements() *Statements { + if x != nil { + return x.Statements + } + return nil +} + +func (x *RenewUserRequest) GetUsername() string { + if x != nil { + return x.Username + } + return "" +} + +func (x *RenewUserRequest) GetExpiration() *timestamppb.Timestamp { + if x != nil { + return x.Expiration + } + return nil +} + +type RevokeUserRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Statements *Statements `protobuf:"bytes,1,opt,name=statements,proto3" json:"statements,omitempty"` + Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"` +} + +func (x *RevokeUserRequest) Reset() { + *x = RevokeUserRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_database_dbplugin_database_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RevokeUserRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RevokeUserRequest) ProtoMessage() {} + +func (x *RevokeUserRequest) ProtoReflect() protoreflect.Message { + mi := &file_sdk_database_dbplugin_database_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RevokeUserRequest.ProtoReflect.Descriptor instead. +func (*RevokeUserRequest) Descriptor() ([]byte, []int) { + return file_sdk_database_dbplugin_database_proto_rawDescGZIP(), []int{4} +} + +func (x *RevokeUserRequest) GetStatements() *Statements { + if x != nil { + return x.Statements + } + return nil +} + +func (x *RevokeUserRequest) GetUsername() string { + if x != nil { + return x.Username + } + return "" +} + +type RotateRootCredentialsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Statements []string `protobuf:"bytes,1,rep,name=statements,proto3" json:"statements,omitempty"` +} + +func (x *RotateRootCredentialsRequest) Reset() { + *x = RotateRootCredentialsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_database_dbplugin_database_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RotateRootCredentialsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RotateRootCredentialsRequest) ProtoMessage() {} + +func (x *RotateRootCredentialsRequest) ProtoReflect() protoreflect.Message { + mi := &file_sdk_database_dbplugin_database_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RotateRootCredentialsRequest.ProtoReflect.Descriptor instead. +func (*RotateRootCredentialsRequest) Descriptor() ([]byte, []int) { + return file_sdk_database_dbplugin_database_proto_rawDescGZIP(), []int{5} +} + +func (x *RotateRootCredentialsRequest) GetStatements() []string { + if x != nil { + return x.Statements + } + return nil +} + +type Statements struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // DEPRECATED, will be removed in 0.12 + // + // Deprecated: Do not use. + CreationStatements string `protobuf:"bytes,1,opt,name=creation_statements,json=creationStatements,proto3" json:"creation_statements,omitempty"` + // DEPRECATED, will be removed in 0.12 + // + // Deprecated: Do not use. + RevocationStatements string `protobuf:"bytes,2,opt,name=revocation_statements,json=revocationStatements,proto3" json:"revocation_statements,omitempty"` + // DEPRECATED, will be removed in 0.12 + // + // Deprecated: Do not use. + RollbackStatements string `protobuf:"bytes,3,opt,name=rollback_statements,json=rollbackStatements,proto3" json:"rollback_statements,omitempty"` + // DEPRECATED, will be removed in 0.12 + // + // Deprecated: Do not use. + RenewStatements string `protobuf:"bytes,4,opt,name=renew_statements,json=renewStatements,proto3" json:"renew_statements,omitempty"` + Creation []string `protobuf:"bytes,5,rep,name=creation,proto3" json:"creation,omitempty"` + Revocation []string `protobuf:"bytes,6,rep,name=revocation,proto3" json:"revocation,omitempty"` + Rollback []string `protobuf:"bytes,7,rep,name=rollback,proto3" json:"rollback,omitempty"` + Renewal []string `protobuf:"bytes,8,rep,name=renewal,proto3" json:"renewal,omitempty"` + Rotation []string `protobuf:"bytes,9,rep,name=rotation,proto3" json:"rotation,omitempty"` +} + +func (x *Statements) Reset() { + *x = Statements{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_database_dbplugin_database_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Statements) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Statements) ProtoMessage() {} + +func (x *Statements) ProtoReflect() protoreflect.Message { + mi := &file_sdk_database_dbplugin_database_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Statements.ProtoReflect.Descriptor instead. +func (*Statements) Descriptor() ([]byte, []int) { + return file_sdk_database_dbplugin_database_proto_rawDescGZIP(), []int{6} +} + +// Deprecated: Do not use. +func (x *Statements) GetCreationStatements() string { + if x != nil { + return x.CreationStatements + } + return "" +} + +// Deprecated: Do not use. +func (x *Statements) GetRevocationStatements() string { + if x != nil { + return x.RevocationStatements + } + return "" +} + +// Deprecated: Do not use. +func (x *Statements) GetRollbackStatements() string { + if x != nil { + return x.RollbackStatements + } + return "" +} + +// Deprecated: Do not use. +func (x *Statements) GetRenewStatements() string { + if x != nil { + return x.RenewStatements + } + return "" +} + +func (x *Statements) GetCreation() []string { + if x != nil { + return x.Creation + } + return nil +} + +func (x *Statements) GetRevocation() []string { + if x != nil { + return x.Revocation + } + return nil +} + +func (x *Statements) GetRollback() []string { + if x != nil { + return x.Rollback + } + return nil +} + +func (x *Statements) GetRenewal() []string { + if x != nil { + return x.Renewal + } + return nil +} + +func (x *Statements) GetRotation() []string { + if x != nil { + return x.Rotation + } + return nil +} + +type UsernameConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DisplayName string `protobuf:"bytes,1,opt,name=DisplayName,proto3" json:"DisplayName,omitempty"` + RoleName string `protobuf:"bytes,2,opt,name=RoleName,proto3" json:"RoleName,omitempty"` +} + +func (x *UsernameConfig) Reset() { + *x = UsernameConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_database_dbplugin_database_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UsernameConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UsernameConfig) ProtoMessage() {} + +func (x *UsernameConfig) ProtoReflect() protoreflect.Message { + mi := &file_sdk_database_dbplugin_database_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UsernameConfig.ProtoReflect.Descriptor instead. +func (*UsernameConfig) Descriptor() ([]byte, []int) { + return file_sdk_database_dbplugin_database_proto_rawDescGZIP(), []int{7} +} + +func (x *UsernameConfig) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *UsernameConfig) GetRoleName() string { + if x != nil { + return x.RoleName + } + return "" +} + +type InitResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Config []byte `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` +} + +func (x *InitResponse) Reset() { + *x = InitResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_database_dbplugin_database_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InitResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InitResponse) ProtoMessage() {} + +func (x *InitResponse) ProtoReflect() protoreflect.Message { + mi := &file_sdk_database_dbplugin_database_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InitResponse.ProtoReflect.Descriptor instead. +func (*InitResponse) Descriptor() ([]byte, []int) { + return file_sdk_database_dbplugin_database_proto_rawDescGZIP(), []int{8} +} + +func (x *InitResponse) GetConfig() []byte { + if x != nil { + return x.Config + } + return nil +} + +type CreateUserResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` +} + +func (x *CreateUserResponse) Reset() { + *x = CreateUserResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_database_dbplugin_database_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateUserResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateUserResponse) ProtoMessage() {} + +func (x *CreateUserResponse) ProtoReflect() protoreflect.Message { + mi := &file_sdk_database_dbplugin_database_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateUserResponse.ProtoReflect.Descriptor instead. +func (*CreateUserResponse) Descriptor() ([]byte, []int) { + return file_sdk_database_dbplugin_database_proto_rawDescGZIP(), []int{9} +} + +func (x *CreateUserResponse) GetUsername() string { + if x != nil { + return x.Username + } + return "" +} + +func (x *CreateUserResponse) GetPassword() string { + if x != nil { + return x.Password + } + return "" +} + +type TypeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` +} + +func (x *TypeResponse) Reset() { + *x = TypeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_database_dbplugin_database_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TypeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TypeResponse) ProtoMessage() {} + +func (x *TypeResponse) ProtoReflect() protoreflect.Message { + mi := &file_sdk_database_dbplugin_database_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TypeResponse.ProtoReflect.Descriptor instead. +func (*TypeResponse) Descriptor() ([]byte, []int) { + return file_sdk_database_dbplugin_database_proto_rawDescGZIP(), []int{10} +} + +func (x *TypeResponse) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +type RotateRootCredentialsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Config []byte `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` +} + +func (x *RotateRootCredentialsResponse) Reset() { + *x = RotateRootCredentialsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_database_dbplugin_database_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RotateRootCredentialsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RotateRootCredentialsResponse) ProtoMessage() {} + +func (x *RotateRootCredentialsResponse) ProtoReflect() protoreflect.Message { + mi := &file_sdk_database_dbplugin_database_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RotateRootCredentialsResponse.ProtoReflect.Descriptor instead. +func (*RotateRootCredentialsResponse) Descriptor() ([]byte, []int) { + return file_sdk_database_dbplugin_database_proto_rawDescGZIP(), []int{11} +} + +func (x *RotateRootCredentialsResponse) GetConfig() []byte { + if x != nil { + return x.Config + } + return nil +} + +type Empty struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Empty) Reset() { + *x = Empty{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_database_dbplugin_database_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Empty) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Empty) ProtoMessage() {} + +func (x *Empty) ProtoReflect() protoreflect.Message { + mi := &file_sdk_database_dbplugin_database_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Empty.ProtoReflect.Descriptor instead. +func (*Empty) Descriptor() ([]byte, []int) { + return file_sdk_database_dbplugin_database_proto_rawDescGZIP(), []int{12} +} + +type GenerateCredentialsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Password string `protobuf:"bytes,1,opt,name=password,proto3" json:"password,omitempty"` +} + +func (x *GenerateCredentialsResponse) Reset() { + *x = GenerateCredentialsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_database_dbplugin_database_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GenerateCredentialsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GenerateCredentialsResponse) ProtoMessage() {} + +func (x *GenerateCredentialsResponse) ProtoReflect() protoreflect.Message { + mi := &file_sdk_database_dbplugin_database_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GenerateCredentialsResponse.ProtoReflect.Descriptor instead. +func (*GenerateCredentialsResponse) Descriptor() ([]byte, []int) { + return file_sdk_database_dbplugin_database_proto_rawDescGZIP(), []int{13} +} + +func (x *GenerateCredentialsResponse) GetPassword() string { + if x != nil { + return x.Password + } + return "" +} + +type StaticUserConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + Create bool `protobuf:"varint,3,opt,name=create,proto3" json:"create,omitempty"` +} + +func (x *StaticUserConfig) Reset() { + *x = StaticUserConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_database_dbplugin_database_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StaticUserConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StaticUserConfig) ProtoMessage() {} + +func (x *StaticUserConfig) ProtoReflect() protoreflect.Message { + mi := &file_sdk_database_dbplugin_database_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StaticUserConfig.ProtoReflect.Descriptor instead. +func (*StaticUserConfig) Descriptor() ([]byte, []int) { + return file_sdk_database_dbplugin_database_proto_rawDescGZIP(), []int{14} +} + +func (x *StaticUserConfig) GetUsername() string { + if x != nil { + return x.Username + } + return "" +} + +func (x *StaticUserConfig) GetPassword() string { + if x != nil { + return x.Password + } + return "" +} + +func (x *StaticUserConfig) GetCreate() bool { + if x != nil { + return x.Create + } + return false +} + +type SetCredentialsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Statements *Statements `protobuf:"bytes,1,opt,name=statements,proto3" json:"statements,omitempty"` + StaticUserConfig *StaticUserConfig `protobuf:"bytes,2,opt,name=static_user_config,json=staticUserConfig,proto3" json:"static_user_config,omitempty"` +} + +func (x *SetCredentialsRequest) Reset() { + *x = SetCredentialsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_database_dbplugin_database_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SetCredentialsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetCredentialsRequest) ProtoMessage() {} + +func (x *SetCredentialsRequest) ProtoReflect() protoreflect.Message { + mi := &file_sdk_database_dbplugin_database_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetCredentialsRequest.ProtoReflect.Descriptor instead. +func (*SetCredentialsRequest) Descriptor() ([]byte, []int) { + return file_sdk_database_dbplugin_database_proto_rawDescGZIP(), []int{15} +} + +func (x *SetCredentialsRequest) GetStatements() *Statements { + if x != nil { + return x.Statements + } + return nil +} + +func (x *SetCredentialsRequest) GetStaticUserConfig() *StaticUserConfig { + if x != nil { + return x.StaticUserConfig + } + return nil +} + +type SetCredentialsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` +} + +func (x *SetCredentialsResponse) Reset() { + *x = SetCredentialsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_database_dbplugin_database_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SetCredentialsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetCredentialsResponse) ProtoMessage() {} + +func (x *SetCredentialsResponse) ProtoReflect() protoreflect.Message { + mi := &file_sdk_database_dbplugin_database_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetCredentialsResponse.ProtoReflect.Descriptor instead. +func (*SetCredentialsResponse) Descriptor() ([]byte, []int) { + return file_sdk_database_dbplugin_database_proto_rawDescGZIP(), []int{16} +} + +func (x *SetCredentialsResponse) GetUsername() string { + if x != nil { + return x.Username + } + return "" +} + +func (x *SetCredentialsResponse) GetPassword() string { + if x != nil { + return x.Password + } + return "" +} + +var File_sdk_database_dbplugin_database_proto protoreflect.FileDescriptor + +var file_sdk_database_dbplugin_database_proto_rawDesc = []byte{ + 0x0a, 0x24, 0x73, 0x64, 0x6b, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2f, 0x64, + 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0x5c, 0x0a, 0x11, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2b, + 0x0a, 0x11, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x76, 0x65, 0x72, 0x69, 0x66, + 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x02, 0x18, 0x01, 0x22, + 0x52, 0x0a, 0x0b, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, + 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2b, 0x0a, 0x11, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, + 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x10, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x22, 0xc8, 0x01, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x73, + 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x34, 0x0a, 0x0a, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, + 0x41, 0x0a, 0x0f, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x0e, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x3a, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa0, + 0x01, 0x0a, 0x10, 0x52, 0x65, 0x6e, 0x65, 0x77, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x34, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x0a, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, + 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, + 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3a, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x22, 0x65, 0x0a, 0x11, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x34, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x64, 0x62, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, + 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, + 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x3e, 0x0a, 0x1c, 0x52, 0x6f, 0x74, 0x61, + 0x74, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xec, 0x02, 0x0a, 0x0a, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x33, 0x0a, 0x13, 0x63, 0x72, 0x65, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x12, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x15, + 0x72, 0x65, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, + 0x14, 0x72, 0x65, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x33, 0x0a, 0x13, 0x72, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, + 0x6b, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x12, 0x72, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2d, 0x0a, 0x10, 0x72, 0x65, + 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x65, 0x77, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x72, 0x65, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x72, 0x65, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x72, 0x65, 0x76, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x76, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, + 0x6b, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x72, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, + 0x6b, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x6e, 0x65, 0x77, 0x61, 0x6c, 0x18, 0x08, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x6e, 0x65, 0x77, 0x61, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x72, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x72, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4e, 0x0a, 0x0e, 0x55, 0x73, 0x65, 0x72, 0x6e, + 0x61, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x20, 0x0a, 0x0b, 0x44, 0x69, 0x73, + 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x44, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x52, + 0x6f, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x52, + 0x6f, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x26, 0x0a, 0x0c, 0x49, 0x6e, 0x69, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, + 0x4c, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0x22, 0x0a, + 0x0c, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x22, 0x37, 0x0a, 0x1d, 0x52, 0x6f, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x43, + 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x22, 0x39, 0x0a, 0x1b, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, + 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0x62, + 0x0a, 0x10, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x55, 0x73, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, + 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x63, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x22, 0x97, 0x01, 0x0a, 0x15, 0x53, 0x65, 0x74, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x61, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x34, 0x0a, 0x0a, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x73, 0x12, 0x48, 0x0a, 0x12, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x5f, 0x75, 0x73, 0x65, + 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, + 0x55, 0x73, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x10, 0x73, 0x74, 0x61, 0x74, + 0x69, 0x63, 0x55, 0x73, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x50, 0x0a, 0x16, + 0x53, 0x65, 0x74, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x32, 0xab, + 0x05, 0x0a, 0x08, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x04, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x0f, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x16, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, 0x0a, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x12, 0x1b, 0x2e, 0x64, 0x62, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x52, 0x65, 0x6e, 0x65, 0x77, 0x55, 0x73, + 0x65, 0x72, 0x12, 0x1a, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x52, 0x65, + 0x6e, 0x65, 0x77, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, + 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, + 0x3a, 0x0a, 0x0a, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x55, 0x73, 0x65, 0x72, 0x12, 0x1b, 0x2e, + 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x55, + 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, 0x64, 0x62, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x68, 0x0a, 0x15, 0x52, + 0x6f, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x61, 0x6c, 0x73, 0x12, 0x26, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, + 0x52, 0x6f, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x61, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x64, + 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x52, 0x6f, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, + 0x6f, 0x74, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x04, 0x49, 0x6e, 0x69, 0x74, 0x12, 0x15, 0x2e, + 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, + 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x05, + 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x12, 0x0f, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x0f, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x53, 0x0a, 0x0e, 0x53, 0x65, 0x74, 0x43, 0x72, + 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x1f, 0x2e, 0x64, 0x62, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x61, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x64, 0x62, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x61, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x13, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x61, 0x6c, 0x73, 0x12, 0x0f, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x25, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x61, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x0a, 0x49, + 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x12, 0x1b, 0x2e, 0x64, 0x62, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x03, 0x88, 0x02, 0x01, 0x42, 0x32, 0x5a, 0x30, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, + 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x73, 0x64, 0x6b, 0x2f, 0x64, + 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2f, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_sdk_database_dbplugin_database_proto_rawDescOnce sync.Once + file_sdk_database_dbplugin_database_proto_rawDescData = file_sdk_database_dbplugin_database_proto_rawDesc +) + +func file_sdk_database_dbplugin_database_proto_rawDescGZIP() []byte { + file_sdk_database_dbplugin_database_proto_rawDescOnce.Do(func() { + file_sdk_database_dbplugin_database_proto_rawDescData = protoimpl.X.CompressGZIP(file_sdk_database_dbplugin_database_proto_rawDescData) + }) + return file_sdk_database_dbplugin_database_proto_rawDescData +} + +var file_sdk_database_dbplugin_database_proto_msgTypes = make([]protoimpl.MessageInfo, 17) +var file_sdk_database_dbplugin_database_proto_goTypes = []interface{}{ + (*InitializeRequest)(nil), // 0: dbplugin.InitializeRequest + (*InitRequest)(nil), // 1: dbplugin.InitRequest + (*CreateUserRequest)(nil), // 2: dbplugin.CreateUserRequest + (*RenewUserRequest)(nil), // 3: dbplugin.RenewUserRequest + (*RevokeUserRequest)(nil), // 4: dbplugin.RevokeUserRequest + (*RotateRootCredentialsRequest)(nil), // 5: dbplugin.RotateRootCredentialsRequest + (*Statements)(nil), // 6: dbplugin.Statements + (*UsernameConfig)(nil), // 7: dbplugin.UsernameConfig + (*InitResponse)(nil), // 8: dbplugin.InitResponse + (*CreateUserResponse)(nil), // 9: dbplugin.CreateUserResponse + (*TypeResponse)(nil), // 10: dbplugin.TypeResponse + (*RotateRootCredentialsResponse)(nil), // 11: dbplugin.RotateRootCredentialsResponse + (*Empty)(nil), // 12: dbplugin.Empty + (*GenerateCredentialsResponse)(nil), // 13: dbplugin.GenerateCredentialsResponse + (*StaticUserConfig)(nil), // 14: dbplugin.StaticUserConfig + (*SetCredentialsRequest)(nil), // 15: dbplugin.SetCredentialsRequest + (*SetCredentialsResponse)(nil), // 16: dbplugin.SetCredentialsResponse + (*timestamppb.Timestamp)(nil), // 17: google.protobuf.Timestamp +} +var file_sdk_database_dbplugin_database_proto_depIdxs = []int32{ + 6, // 0: dbplugin.CreateUserRequest.statements:type_name -> dbplugin.Statements + 7, // 1: dbplugin.CreateUserRequest.username_config:type_name -> dbplugin.UsernameConfig + 17, // 2: dbplugin.CreateUserRequest.expiration:type_name -> google.protobuf.Timestamp + 6, // 3: dbplugin.RenewUserRequest.statements:type_name -> dbplugin.Statements + 17, // 4: dbplugin.RenewUserRequest.expiration:type_name -> google.protobuf.Timestamp + 6, // 5: dbplugin.RevokeUserRequest.statements:type_name -> dbplugin.Statements + 6, // 6: dbplugin.SetCredentialsRequest.statements:type_name -> dbplugin.Statements + 14, // 7: dbplugin.SetCredentialsRequest.static_user_config:type_name -> dbplugin.StaticUserConfig + 12, // 8: dbplugin.Database.Type:input_type -> dbplugin.Empty + 2, // 9: dbplugin.Database.CreateUser:input_type -> dbplugin.CreateUserRequest + 3, // 10: dbplugin.Database.RenewUser:input_type -> dbplugin.RenewUserRequest + 4, // 11: dbplugin.Database.RevokeUser:input_type -> dbplugin.RevokeUserRequest + 5, // 12: dbplugin.Database.RotateRootCredentials:input_type -> dbplugin.RotateRootCredentialsRequest + 1, // 13: dbplugin.Database.Init:input_type -> dbplugin.InitRequest + 12, // 14: dbplugin.Database.Close:input_type -> dbplugin.Empty + 15, // 15: dbplugin.Database.SetCredentials:input_type -> dbplugin.SetCredentialsRequest + 12, // 16: dbplugin.Database.GenerateCredentials:input_type -> dbplugin.Empty + 0, // 17: dbplugin.Database.Initialize:input_type -> dbplugin.InitializeRequest + 10, // 18: dbplugin.Database.Type:output_type -> dbplugin.TypeResponse + 9, // 19: dbplugin.Database.CreateUser:output_type -> dbplugin.CreateUserResponse + 12, // 20: dbplugin.Database.RenewUser:output_type -> dbplugin.Empty + 12, // 21: dbplugin.Database.RevokeUser:output_type -> dbplugin.Empty + 11, // 22: dbplugin.Database.RotateRootCredentials:output_type -> dbplugin.RotateRootCredentialsResponse + 8, // 23: dbplugin.Database.Init:output_type -> dbplugin.InitResponse + 12, // 24: dbplugin.Database.Close:output_type -> dbplugin.Empty + 16, // 25: dbplugin.Database.SetCredentials:output_type -> dbplugin.SetCredentialsResponse + 13, // 26: dbplugin.Database.GenerateCredentials:output_type -> dbplugin.GenerateCredentialsResponse + 12, // 27: dbplugin.Database.Initialize:output_type -> dbplugin.Empty + 18, // [18:28] is the sub-list for method output_type + 8, // [8:18] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name +} + +func init() { file_sdk_database_dbplugin_database_proto_init() } +func file_sdk_database_dbplugin_database_proto_init() { + if File_sdk_database_dbplugin_database_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_sdk_database_dbplugin_database_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InitializeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_database_dbplugin_database_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InitRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_database_dbplugin_database_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateUserRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_database_dbplugin_database_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RenewUserRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_database_dbplugin_database_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RevokeUserRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_database_dbplugin_database_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RotateRootCredentialsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_database_dbplugin_database_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Statements); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_database_dbplugin_database_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UsernameConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_database_dbplugin_database_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InitResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_database_dbplugin_database_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateUserResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_database_dbplugin_database_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TypeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_database_dbplugin_database_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RotateRootCredentialsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_database_dbplugin_database_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Empty); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_database_dbplugin_database_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GenerateCredentialsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_database_dbplugin_database_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StaticUserConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_database_dbplugin_database_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetCredentialsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_database_dbplugin_database_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetCredentialsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_sdk_database_dbplugin_database_proto_rawDesc, + NumEnums: 0, + NumMessages: 17, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_sdk_database_dbplugin_database_proto_goTypes, + DependencyIndexes: file_sdk_database_dbplugin_database_proto_depIdxs, + MessageInfos: file_sdk_database_dbplugin_database_proto_msgTypes, + }.Build() + File_sdk_database_dbplugin_database_proto = out.File + file_sdk_database_dbplugin_database_proto_rawDesc = nil + file_sdk_database_dbplugin_database_proto_goTypes = nil + file_sdk_database_dbplugin_database_proto_depIdxs = nil +} diff --git a/sdk/database/dbplugin/database.proto b/sdk/database/dbplugin/database.proto new file mode 100644 index 0000000..ed29252 --- /dev/null +++ b/sdk/database/dbplugin/database.proto @@ -0,0 +1,119 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +syntax = "proto3"; + +option go_package = "github.com/hashicorp/vault/sdk/database/dbplugin"; + +package dbplugin; + +import "google/protobuf/timestamp.proto"; + +message InitializeRequest { + option deprecated = true; + bytes config = 1; + bool verify_connection = 2; +} + +message InitRequest { + bytes config = 1; + bool verify_connection = 2; +} + +message CreateUserRequest { + Statements statements = 1; + UsernameConfig username_config = 2; + google.protobuf.Timestamp expiration = 3; +} + +message RenewUserRequest { + Statements statements = 1; + string username = 2; + google.protobuf.Timestamp expiration = 3; +} + +message RevokeUserRequest { + Statements statements = 1; + string username = 2; +} + +message RotateRootCredentialsRequest { + repeated string statements = 1; +} + +message Statements { + // DEPRECATED, will be removed in 0.12 + string creation_statements = 1 [deprecated=true]; + // DEPRECATED, will be removed in 0.12 + string revocation_statements = 2 [deprecated=true]; + // DEPRECATED, will be removed in 0.12 + string rollback_statements = 3 [deprecated=true]; + // DEPRECATED, will be removed in 0.12 + string renew_statements = 4 [deprecated=true]; + + repeated string creation = 5; + repeated string revocation = 6; + repeated string rollback = 7; + repeated string renewal = 8; + repeated string rotation = 9; +} + +message UsernameConfig { + string DisplayName = 1; + string RoleName = 2; +} + +message InitResponse { + bytes config = 1; +} + +message CreateUserResponse { + string username = 1; + string password = 2; +} + +message TypeResponse { + string type = 1; +} + +message RotateRootCredentialsResponse { + bytes config = 1; +} + +message Empty {} + +message GenerateCredentialsResponse { + string password = 1; +} + +message StaticUserConfig{ + string username = 1; + string password = 2; + bool create = 3; +} + +message SetCredentialsRequest { + Statements statements = 1; + StaticUserConfig static_user_config = 2; +} + +message SetCredentialsResponse { + string username = 1; + string password = 2; +} + +service Database { + rpc Type(Empty) returns (TypeResponse); + rpc CreateUser(CreateUserRequest) returns (CreateUserResponse); + rpc RenewUser(RenewUserRequest) returns (Empty); + rpc RevokeUser(RevokeUserRequest) returns (Empty); + rpc RotateRootCredentials(RotateRootCredentialsRequest) returns (RotateRootCredentialsResponse); + rpc Init(InitRequest) returns (InitResponse); + rpc Close(Empty) returns (Empty); + rpc SetCredentials(SetCredentialsRequest) returns (SetCredentialsResponse); + rpc GenerateCredentials(Empty) returns (GenerateCredentialsResponse); + + rpc Initialize(InitializeRequest) returns (Empty) { + option deprecated = true; + }; +} diff --git a/sdk/database/dbplugin/database_grpc.pb.go b/sdk/database/dbplugin/database_grpc.pb.go new file mode 100644 index 0000000..0e34e00 --- /dev/null +++ b/sdk/database/dbplugin/database_grpc.pb.go @@ -0,0 +1,428 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package dbplugin + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// DatabaseClient is the client API for Database service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type DatabaseClient interface { + Type(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TypeResponse, error) + CreateUser(ctx context.Context, in *CreateUserRequest, opts ...grpc.CallOption) (*CreateUserResponse, error) + RenewUser(ctx context.Context, in *RenewUserRequest, opts ...grpc.CallOption) (*Empty, error) + RevokeUser(ctx context.Context, in *RevokeUserRequest, opts ...grpc.CallOption) (*Empty, error) + RotateRootCredentials(ctx context.Context, in *RotateRootCredentialsRequest, opts ...grpc.CallOption) (*RotateRootCredentialsResponse, error) + Init(ctx context.Context, in *InitRequest, opts ...grpc.CallOption) (*InitResponse, error) + Close(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) + SetCredentials(ctx context.Context, in *SetCredentialsRequest, opts ...grpc.CallOption) (*SetCredentialsResponse, error) + GenerateCredentials(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*GenerateCredentialsResponse, error) + // Deprecated: Do not use. + Initialize(ctx context.Context, in *InitializeRequest, opts ...grpc.CallOption) (*Empty, error) +} + +type databaseClient struct { + cc grpc.ClientConnInterface +} + +func NewDatabaseClient(cc grpc.ClientConnInterface) DatabaseClient { + return &databaseClient{cc} +} + +func (c *databaseClient) Type(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TypeResponse, error) { + out := new(TypeResponse) + err := c.cc.Invoke(ctx, "/dbplugin.Database/Type", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseClient) CreateUser(ctx context.Context, in *CreateUserRequest, opts ...grpc.CallOption) (*CreateUserResponse, error) { + out := new(CreateUserResponse) + err := c.cc.Invoke(ctx, "/dbplugin.Database/CreateUser", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseClient) RenewUser(ctx context.Context, in *RenewUserRequest, opts ...grpc.CallOption) (*Empty, error) { + out := new(Empty) + err := c.cc.Invoke(ctx, "/dbplugin.Database/RenewUser", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseClient) RevokeUser(ctx context.Context, in *RevokeUserRequest, opts ...grpc.CallOption) (*Empty, error) { + out := new(Empty) + err := c.cc.Invoke(ctx, "/dbplugin.Database/RevokeUser", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseClient) RotateRootCredentials(ctx context.Context, in *RotateRootCredentialsRequest, opts ...grpc.CallOption) (*RotateRootCredentialsResponse, error) { + out := new(RotateRootCredentialsResponse) + err := c.cc.Invoke(ctx, "/dbplugin.Database/RotateRootCredentials", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseClient) Init(ctx context.Context, in *InitRequest, opts ...grpc.CallOption) (*InitResponse, error) { + out := new(InitResponse) + err := c.cc.Invoke(ctx, "/dbplugin.Database/Init", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseClient) Close(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { + out := new(Empty) + err := c.cc.Invoke(ctx, "/dbplugin.Database/Close", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseClient) SetCredentials(ctx context.Context, in *SetCredentialsRequest, opts ...grpc.CallOption) (*SetCredentialsResponse, error) { + out := new(SetCredentialsResponse) + err := c.cc.Invoke(ctx, "/dbplugin.Database/SetCredentials", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseClient) GenerateCredentials(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*GenerateCredentialsResponse, error) { + out := new(GenerateCredentialsResponse) + err := c.cc.Invoke(ctx, "/dbplugin.Database/GenerateCredentials", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Deprecated: Do not use. +func (c *databaseClient) Initialize(ctx context.Context, in *InitializeRequest, opts ...grpc.CallOption) (*Empty, error) { + out := new(Empty) + err := c.cc.Invoke(ctx, "/dbplugin.Database/Initialize", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// DatabaseServer is the server API for Database service. +// All implementations must embed UnimplementedDatabaseServer +// for forward compatibility +type DatabaseServer interface { + Type(context.Context, *Empty) (*TypeResponse, error) + CreateUser(context.Context, *CreateUserRequest) (*CreateUserResponse, error) + RenewUser(context.Context, *RenewUserRequest) (*Empty, error) + RevokeUser(context.Context, *RevokeUserRequest) (*Empty, error) + RotateRootCredentials(context.Context, *RotateRootCredentialsRequest) (*RotateRootCredentialsResponse, error) + Init(context.Context, *InitRequest) (*InitResponse, error) + Close(context.Context, *Empty) (*Empty, error) + SetCredentials(context.Context, *SetCredentialsRequest) (*SetCredentialsResponse, error) + GenerateCredentials(context.Context, *Empty) (*GenerateCredentialsResponse, error) + // Deprecated: Do not use. + Initialize(context.Context, *InitializeRequest) (*Empty, error) + mustEmbedUnimplementedDatabaseServer() +} + +// UnimplementedDatabaseServer must be embedded to have forward compatible implementations. +type UnimplementedDatabaseServer struct { +} + +func (UnimplementedDatabaseServer) Type(context.Context, *Empty) (*TypeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Type not implemented") +} +func (UnimplementedDatabaseServer) CreateUser(context.Context, *CreateUserRequest) (*CreateUserResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateUser not implemented") +} +func (UnimplementedDatabaseServer) RenewUser(context.Context, *RenewUserRequest) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method RenewUser not implemented") +} +func (UnimplementedDatabaseServer) RevokeUser(context.Context, *RevokeUserRequest) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method RevokeUser not implemented") +} +func (UnimplementedDatabaseServer) RotateRootCredentials(context.Context, *RotateRootCredentialsRequest) (*RotateRootCredentialsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RotateRootCredentials not implemented") +} +func (UnimplementedDatabaseServer) Init(context.Context, *InitRequest) (*InitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Init not implemented") +} +func (UnimplementedDatabaseServer) Close(context.Context, *Empty) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Close not implemented") +} +func (UnimplementedDatabaseServer) SetCredentials(context.Context, *SetCredentialsRequest) (*SetCredentialsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetCredentials not implemented") +} +func (UnimplementedDatabaseServer) GenerateCredentials(context.Context, *Empty) (*GenerateCredentialsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GenerateCredentials not implemented") +} +func (UnimplementedDatabaseServer) Initialize(context.Context, *InitializeRequest) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Initialize not implemented") +} +func (UnimplementedDatabaseServer) mustEmbedUnimplementedDatabaseServer() {} + +// UnsafeDatabaseServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to DatabaseServer will +// result in compilation errors. +type UnsafeDatabaseServer interface { + mustEmbedUnimplementedDatabaseServer() +} + +func RegisterDatabaseServer(s grpc.ServiceRegistrar, srv DatabaseServer) { + s.RegisterService(&Database_ServiceDesc, srv) +} + +func _Database_Type_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServer).Type(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/dbplugin.Database/Type", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServer).Type(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _Database_CreateUser_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateUserRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServer).CreateUser(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/dbplugin.Database/CreateUser", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServer).CreateUser(ctx, req.(*CreateUserRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Database_RenewUser_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RenewUserRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServer).RenewUser(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/dbplugin.Database/RenewUser", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServer).RenewUser(ctx, req.(*RenewUserRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Database_RevokeUser_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RevokeUserRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServer).RevokeUser(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/dbplugin.Database/RevokeUser", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServer).RevokeUser(ctx, req.(*RevokeUserRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Database_RotateRootCredentials_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RotateRootCredentialsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServer).RotateRootCredentials(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/dbplugin.Database/RotateRootCredentials", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServer).RotateRootCredentials(ctx, req.(*RotateRootCredentialsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Database_Init_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InitRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServer).Init(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/dbplugin.Database/Init", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServer).Init(ctx, req.(*InitRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Database_Close_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServer).Close(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/dbplugin.Database/Close", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServer).Close(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _Database_SetCredentials_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetCredentialsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServer).SetCredentials(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/dbplugin.Database/SetCredentials", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServer).SetCredentials(ctx, req.(*SetCredentialsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Database_GenerateCredentials_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServer).GenerateCredentials(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/dbplugin.Database/GenerateCredentials", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServer).GenerateCredentials(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _Database_Initialize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InitializeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServer).Initialize(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/dbplugin.Database/Initialize", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServer).Initialize(ctx, req.(*InitializeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Database_ServiceDesc is the grpc.ServiceDesc for Database service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Database_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "dbplugin.Database", + HandlerType: (*DatabaseServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Type", + Handler: _Database_Type_Handler, + }, + { + MethodName: "CreateUser", + Handler: _Database_CreateUser_Handler, + }, + { + MethodName: "RenewUser", + Handler: _Database_RenewUser_Handler, + }, + { + MethodName: "RevokeUser", + Handler: _Database_RevokeUser_Handler, + }, + { + MethodName: "RotateRootCredentials", + Handler: _Database_RotateRootCredentials_Handler, + }, + { + MethodName: "Init", + Handler: _Database_Init_Handler, + }, + { + MethodName: "Close", + Handler: _Database_Close_Handler, + }, + { + MethodName: "SetCredentials", + Handler: _Database_SetCredentials_Handler, + }, + { + MethodName: "GenerateCredentials", + Handler: _Database_GenerateCredentials_Handler, + }, + { + MethodName: "Initialize", + Handler: _Database_Initialize_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "sdk/database/dbplugin/database.proto", +} diff --git a/sdk/database/dbplugin/databasemiddleware.go b/sdk/database/dbplugin/databasemiddleware.go new file mode 100644 index 0000000..d7cabaf --- /dev/null +++ b/sdk/database/dbplugin/databasemiddleware.go @@ -0,0 +1,347 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dbplugin + +import ( + "context" + "errors" + "net/url" + "strings" + "sync" + "time" + + metrics "github.com/armon/go-metrics" + "github.com/hashicorp/errwrap" + log "github.com/hashicorp/go-hclog" + "google.golang.org/grpc/status" +) + +// ---- Tracing Middleware Domain ---- + +// databaseTracingMiddleware wraps a implementation of Database and executes +// trace logging on function call. +type databaseTracingMiddleware struct { + next Database + logger log.Logger +} + +func (mw *databaseTracingMiddleware) Type() (string, error) { + return mw.next.Type() +} + +func (mw *databaseTracingMiddleware) CreateUser(ctx context.Context, statements Statements, usernameConfig UsernameConfig, expiration time.Time) (username string, password string, err error) { + defer func(then time.Time) { + mw.logger.Trace("create user", "status", "finished", "err", err, "took", time.Since(then)) + }(time.Now()) + + mw.logger.Trace("create user", "status", "started") + return mw.next.CreateUser(ctx, statements, usernameConfig, expiration) +} + +func (mw *databaseTracingMiddleware) RenewUser(ctx context.Context, statements Statements, username string, expiration time.Time) (err error) { + defer func(then time.Time) { + mw.logger.Trace("renew user", "status", "finished", "err", err, "took", time.Since(then)) + }(time.Now()) + + mw.logger.Trace("renew user", "status", "started") + return mw.next.RenewUser(ctx, statements, username, expiration) +} + +func (mw *databaseTracingMiddleware) RevokeUser(ctx context.Context, statements Statements, username string) (err error) { + defer func(then time.Time) { + mw.logger.Trace("revoke user", "status", "finished", "err", err, "took", time.Since(then)) + }(time.Now()) + + mw.logger.Trace("revoke user", "status", "started") + return mw.next.RevokeUser(ctx, statements, username) +} + +func (mw *databaseTracingMiddleware) RotateRootCredentials(ctx context.Context, statements []string) (conf map[string]interface{}, err error) { + defer func(then time.Time) { + mw.logger.Trace("rotate root credentials", "status", "finished", "err", err, "took", time.Since(then)) + }(time.Now()) + + mw.logger.Trace("rotate root credentials", "status", "started") + return mw.next.RotateRootCredentials(ctx, statements) +} + +func (mw *databaseTracingMiddleware) Initialize(ctx context.Context, conf map[string]interface{}, verifyConnection bool) error { + _, err := mw.Init(ctx, conf, verifyConnection) + return err +} + +func (mw *databaseTracingMiddleware) Init(ctx context.Context, conf map[string]interface{}, verifyConnection bool) (saveConf map[string]interface{}, err error) { + defer func(then time.Time) { + mw.logger.Trace("initialize", "status", "finished", "verify", verifyConnection, "err", err, "took", time.Since(then)) + }(time.Now()) + + mw.logger.Trace("initialize", "status", "started") + return mw.next.Init(ctx, conf, verifyConnection) +} + +func (mw *databaseTracingMiddleware) Close() (err error) { + defer func(then time.Time) { + mw.logger.Trace("close", "status", "finished", "err", err, "took", time.Since(then)) + }(time.Now()) + + mw.logger.Trace("close", "status", "started") + return mw.next.Close() +} + +func (mw *databaseTracingMiddleware) GenerateCredentials(ctx context.Context) (password string, err error) { + defer func(then time.Time) { + mw.logger.Trace("generate credentials", "status", "finished", "err", err, "took", time.Since(then)) + }(time.Now()) + + mw.logger.Trace("generate credentials", "status", "started") + return mw.next.GenerateCredentials(ctx) +} + +func (mw *databaseTracingMiddleware) SetCredentials(ctx context.Context, statements Statements, staticConfig StaticUserConfig) (username, password string, err error) { + defer func(then time.Time) { + mw.logger.Trace("set credentials", "status", "finished", "err", err, "took", time.Since(then)) + }(time.Now()) + + mw.logger.Trace("set credentials", "status", "started") + return mw.next.SetCredentials(ctx, statements, staticConfig) +} + +// ---- Metrics Middleware Domain ---- + +// databaseMetricsMiddleware wraps an implementation of Databases and on +// function call logs metrics about this instance. +type databaseMetricsMiddleware struct { + next Database + + typeStr string +} + +func (mw *databaseMetricsMiddleware) Type() (string, error) { + return mw.next.Type() +} + +func (mw *databaseMetricsMiddleware) CreateUser(ctx context.Context, statements Statements, usernameConfig UsernameConfig, expiration time.Time) (username string, password string, err error) { + defer func(now time.Time) { + metrics.MeasureSince([]string{"database", "CreateUser"}, now) + metrics.MeasureSince([]string{"database", mw.typeStr, "CreateUser"}, now) + + if err != nil { + metrics.IncrCounter([]string{"database", "CreateUser", "error"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "CreateUser", "error"}, 1) + } + }(time.Now()) + + metrics.IncrCounter([]string{"database", "CreateUser"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "CreateUser"}, 1) + return mw.next.CreateUser(ctx, statements, usernameConfig, expiration) +} + +func (mw *databaseMetricsMiddleware) RenewUser(ctx context.Context, statements Statements, username string, expiration time.Time) (err error) { + defer func(now time.Time) { + metrics.MeasureSince([]string{"database", "RenewUser"}, now) + metrics.MeasureSince([]string{"database", mw.typeStr, "RenewUser"}, now) + + if err != nil { + metrics.IncrCounter([]string{"database", "RenewUser", "error"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "RenewUser", "error"}, 1) + } + }(time.Now()) + + metrics.IncrCounter([]string{"database", "RenewUser"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "RenewUser"}, 1) + return mw.next.RenewUser(ctx, statements, username, expiration) +} + +func (mw *databaseMetricsMiddleware) RevokeUser(ctx context.Context, statements Statements, username string) (err error) { + defer func(now time.Time) { + metrics.MeasureSince([]string{"database", "RevokeUser"}, now) + metrics.MeasureSince([]string{"database", mw.typeStr, "RevokeUser"}, now) + + if err != nil { + metrics.IncrCounter([]string{"database", "RevokeUser", "error"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "RevokeUser", "error"}, 1) + } + }(time.Now()) + + metrics.IncrCounter([]string{"database", "RevokeUser"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "RevokeUser"}, 1) + return mw.next.RevokeUser(ctx, statements, username) +} + +func (mw *databaseMetricsMiddleware) RotateRootCredentials(ctx context.Context, statements []string) (conf map[string]interface{}, err error) { + defer func(now time.Time) { + metrics.MeasureSince([]string{"database", "RotateRootCredentials"}, now) + metrics.MeasureSince([]string{"database", mw.typeStr, "RotateRootCredentials"}, now) + + if err != nil { + metrics.IncrCounter([]string{"database", "RotateRootCredentials", "error"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "RotateRootCredentials", "error"}, 1) + } + }(time.Now()) + + metrics.IncrCounter([]string{"database", "RotateRootCredentials"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "RotateRootCredentials"}, 1) + return mw.next.RotateRootCredentials(ctx, statements) +} + +func (mw *databaseMetricsMiddleware) Initialize(ctx context.Context, conf map[string]interface{}, verifyConnection bool) error { + _, err := mw.Init(ctx, conf, verifyConnection) + return err +} + +func (mw *databaseMetricsMiddleware) Init(ctx context.Context, conf map[string]interface{}, verifyConnection bool) (saveConf map[string]interface{}, err error) { + defer func(now time.Time) { + metrics.MeasureSince([]string{"database", "Initialize"}, now) + metrics.MeasureSince([]string{"database", mw.typeStr, "Initialize"}, now) + + if err != nil { + metrics.IncrCounter([]string{"database", "Initialize", "error"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "Initialize", "error"}, 1) + } + }(time.Now()) + + metrics.IncrCounter([]string{"database", "Initialize"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "Initialize"}, 1) + return mw.next.Init(ctx, conf, verifyConnection) +} + +func (mw *databaseMetricsMiddleware) Close() (err error) { + defer func(now time.Time) { + metrics.MeasureSince([]string{"database", "Close"}, now) + metrics.MeasureSince([]string{"database", mw.typeStr, "Close"}, now) + + if err != nil { + metrics.IncrCounter([]string{"database", "Close", "error"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "Close", "error"}, 1) + } + }(time.Now()) + + metrics.IncrCounter([]string{"database", "Close"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "Close"}, 1) + return mw.next.Close() +} + +func (mw *databaseMetricsMiddleware) GenerateCredentials(ctx context.Context) (password string, err error) { + defer func(now time.Time) { + metrics.MeasureSince([]string{"database", "GenerateCredentials"}, now) + metrics.MeasureSince([]string{"database", mw.typeStr, "GenerateCredentials"}, now) + + if err != nil { + metrics.IncrCounter([]string{"database", "GenerateCredentials", "error"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "GenerateCredentials", "error"}, 1) + } + }(time.Now()) + + metrics.IncrCounter([]string{"database", "GenerateCredentials"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "GenerateCredentials"}, 1) + return mw.next.GenerateCredentials(ctx) +} + +func (mw *databaseMetricsMiddleware) SetCredentials(ctx context.Context, statements Statements, staticConfig StaticUserConfig) (username, password string, err error) { + defer func(now time.Time) { + metrics.MeasureSince([]string{"database", "SetCredentials"}, now) + metrics.MeasureSince([]string{"database", mw.typeStr, "SetCredentials"}, now) + + if err != nil { + metrics.IncrCounter([]string{"database", "SetCredentials", "error"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "SetCredentials", "error"}, 1) + } + }(time.Now()) + + metrics.IncrCounter([]string{"database", "SetCredentials"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "SetCredentials"}, 1) + return mw.next.SetCredentials(ctx, statements, staticConfig) +} + +// ---- Error Sanitizer Middleware Domain ---- + +// DatabaseErrorSanitizerMiddleware wraps an implementation of Databases and +// sanitizes returned error messages +type DatabaseErrorSanitizerMiddleware struct { + l sync.RWMutex + next Database + secretsFn func() map[string]interface{} +} + +func NewDatabaseErrorSanitizerMiddleware(next Database, secretsFn func() map[string]interface{}) *DatabaseErrorSanitizerMiddleware { + return &DatabaseErrorSanitizerMiddleware{ + next: next, + secretsFn: secretsFn, + } +} + +func (mw *DatabaseErrorSanitizerMiddleware) Type() (string, error) { + dbType, err := mw.next.Type() + return dbType, mw.sanitize(err) +} + +func (mw *DatabaseErrorSanitizerMiddleware) CreateUser(ctx context.Context, statements Statements, usernameConfig UsernameConfig, expiration time.Time) (username string, password string, err error) { + username, password, err = mw.next.CreateUser(ctx, statements, usernameConfig, expiration) + return username, password, mw.sanitize(err) +} + +func (mw *DatabaseErrorSanitizerMiddleware) RenewUser(ctx context.Context, statements Statements, username string, expiration time.Time) (err error) { + return mw.sanitize(mw.next.RenewUser(ctx, statements, username, expiration)) +} + +func (mw *DatabaseErrorSanitizerMiddleware) RevokeUser(ctx context.Context, statements Statements, username string) (err error) { + return mw.sanitize(mw.next.RevokeUser(ctx, statements, username)) +} + +func (mw *DatabaseErrorSanitizerMiddleware) RotateRootCredentials(ctx context.Context, statements []string) (conf map[string]interface{}, err error) { + conf, err = mw.next.RotateRootCredentials(ctx, statements) + return conf, mw.sanitize(err) +} + +func (mw *DatabaseErrorSanitizerMiddleware) Initialize(ctx context.Context, conf map[string]interface{}, verifyConnection bool) error { + _, err := mw.Init(ctx, conf, verifyConnection) + return err +} + +func (mw *DatabaseErrorSanitizerMiddleware) Init(ctx context.Context, conf map[string]interface{}, verifyConnection bool) (saveConf map[string]interface{}, err error) { + saveConf, err = mw.next.Init(ctx, conf, verifyConnection) + return saveConf, mw.sanitize(err) +} + +func (mw *DatabaseErrorSanitizerMiddleware) Close() (err error) { + return mw.sanitize(mw.next.Close()) +} + +// sanitize +func (mw *DatabaseErrorSanitizerMiddleware) sanitize(err error) error { + if err == nil { + return nil + } + if errwrap.ContainsType(err, new(url.Error)) { + return errors.New("unable to parse connection url") + } + if mw.secretsFn != nil { + for k, v := range mw.secretsFn() { + if k == "" { + continue + } + + // Attempt to keep the status code attached to the + // error without changing the actual error message + s, ok := status.FromError(err) + if ok { + err = status.Error(s.Code(), strings.ReplaceAll(s.Message(), k, v.(string))) + continue + } + + err = errors.New(strings.ReplaceAll(err.Error(), k, v.(string))) + } + } + return err +} + +func (mw *DatabaseErrorSanitizerMiddleware) GenerateCredentials(ctx context.Context) (password string, err error) { + password, err = mw.next.GenerateCredentials(ctx) + return password, mw.sanitize(err) +} + +func (mw *DatabaseErrorSanitizerMiddleware) SetCredentials(ctx context.Context, statements Statements, staticConfig StaticUserConfig) (username, password string, err error) { + username, password, err = mw.next.SetCredentials(ctx, statements, staticConfig) + return username, password, mw.sanitize(err) +} diff --git a/sdk/database/dbplugin/grpc_transport.go b/sdk/database/dbplugin/grpc_transport.go new file mode 100644 index 0000000..3740ef5 --- /dev/null +++ b/sdk/database/dbplugin/grpc_transport.go @@ -0,0 +1,359 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dbplugin + +import ( + "context" + "encoding/json" + "errors" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/golang/protobuf/ptypes" + "github.com/hashicorp/vault/sdk/helper/pluginutil" +) + +var ( + ErrPluginShutdown = errors.New("plugin shutdown") + ErrPluginStaticUnsupported = errors.New("database plugin does not support Static Accounts") +) + +// ---- gRPC Server domain ---- + +type gRPCServer struct { + UnimplementedDatabaseServer + + impl Database +} + +func (s *gRPCServer) Type(context.Context, *Empty) (*TypeResponse, error) { + t, err := s.impl.Type() + if err != nil { + return nil, err + } + + return &TypeResponse{ + Type: t, + }, nil +} + +func (s *gRPCServer) CreateUser(ctx context.Context, req *CreateUserRequest) (*CreateUserResponse, error) { + e, err := ptypes.Timestamp(req.Expiration) + if err != nil { + return nil, err + } + + u, p, err := s.impl.CreateUser(ctx, *req.Statements, *req.UsernameConfig, e) + + return &CreateUserResponse{ + Username: u, + Password: p, + }, err +} + +func (s *gRPCServer) RenewUser(ctx context.Context, req *RenewUserRequest) (*Empty, error) { + e, err := ptypes.Timestamp(req.Expiration) + if err != nil { + return nil, err + } + err = s.impl.RenewUser(ctx, *req.Statements, req.Username, e) + return &Empty{}, err +} + +func (s *gRPCServer) RevokeUser(ctx context.Context, req *RevokeUserRequest) (*Empty, error) { + err := s.impl.RevokeUser(ctx, *req.Statements, req.Username) + return &Empty{}, err +} + +func (s *gRPCServer) RotateRootCredentials(ctx context.Context, req *RotateRootCredentialsRequest) (*RotateRootCredentialsResponse, error) { + resp, err := s.impl.RotateRootCredentials(ctx, req.Statements) + if err != nil { + return nil, err + } + + respConfig, err := json.Marshal(resp) + if err != nil { + return nil, err + } + + return &RotateRootCredentialsResponse{ + Config: respConfig, + }, err +} + +func (s *gRPCServer) Initialize(ctx context.Context, req *InitializeRequest) (*Empty, error) { + _, err := s.Init(ctx, &InitRequest{ + Config: req.Config, + VerifyConnection: req.VerifyConnection, + }) + return &Empty{}, err +} + +func (s *gRPCServer) Init(ctx context.Context, req *InitRequest) (*InitResponse, error) { + config := map[string]interface{}{} + err := json.Unmarshal(req.Config, &config) + if err != nil { + return nil, err + } + + resp, err := s.impl.Init(ctx, config, req.VerifyConnection) + if err != nil { + return nil, err + } + + respConfig, err := json.Marshal(resp) + if err != nil { + return nil, err + } + + return &InitResponse{ + Config: respConfig, + }, err +} + +func (s *gRPCServer) Close(_ context.Context, _ *Empty) (*Empty, error) { + s.impl.Close() + return &Empty{}, nil +} + +func (s *gRPCServer) GenerateCredentials(ctx context.Context, _ *Empty) (*GenerateCredentialsResponse, error) { + p, err := s.impl.GenerateCredentials(ctx) + if err != nil { + return nil, err + } + + return &GenerateCredentialsResponse{ + Password: p, + }, nil +} + +func (s *gRPCServer) SetCredentials(ctx context.Context, req *SetCredentialsRequest) (*SetCredentialsResponse, error) { + username, password, err := s.impl.SetCredentials(ctx, *req.Statements, *req.StaticUserConfig) + if err != nil { + return nil, err + } + + return &SetCredentialsResponse{ + Username: username, + Password: password, + }, err +} + +// ---- gRPC client domain ---- + +type gRPCClient struct { + client DatabaseClient + clientConn *grpc.ClientConn + + doneCtx context.Context +} + +func (c *gRPCClient) Type() (string, error) { + resp, err := c.client.Type(c.doneCtx, &Empty{}) + if err != nil { + return "", err + } + + return resp.Type, err +} + +func (c *gRPCClient) CreateUser(ctx context.Context, statements Statements, usernameConfig UsernameConfig, expiration time.Time) (username string, password string, err error) { + t, err := ptypes.TimestampProto(expiration) + if err != nil { + return "", "", err + } + + ctx, cancel := context.WithCancel(ctx) + quitCh := pluginutil.CtxCancelIfCanceled(cancel, c.doneCtx) + defer close(quitCh) + defer cancel() + + resp, err := c.client.CreateUser(ctx, &CreateUserRequest{ + Statements: &statements, + UsernameConfig: &usernameConfig, + Expiration: t, + }) + if err != nil { + if c.doneCtx.Err() != nil { + return "", "", ErrPluginShutdown + } + + return "", "", err + } + + return resp.Username, resp.Password, err +} + +func (c *gRPCClient) RenewUser(ctx context.Context, statements Statements, username string, expiration time.Time) error { + t, err := ptypes.TimestampProto(expiration) + if err != nil { + return err + } + + ctx, cancel := context.WithCancel(ctx) + quitCh := pluginutil.CtxCancelIfCanceled(cancel, c.doneCtx) + defer close(quitCh) + defer cancel() + + _, err = c.client.RenewUser(ctx, &RenewUserRequest{ + Statements: &statements, + Username: username, + Expiration: t, + }) + if err != nil { + if c.doneCtx.Err() != nil { + return ErrPluginShutdown + } + + return err + } + + return nil +} + +func (c *gRPCClient) RevokeUser(ctx context.Context, statements Statements, username string) error { + ctx, cancel := context.WithCancel(ctx) + quitCh := pluginutil.CtxCancelIfCanceled(cancel, c.doneCtx) + defer close(quitCh) + defer cancel() + + _, err := c.client.RevokeUser(ctx, &RevokeUserRequest{ + Statements: &statements, + Username: username, + }) + if err != nil { + if c.doneCtx.Err() != nil { + return ErrPluginShutdown + } + + return err + } + + return nil +} + +func (c *gRPCClient) RotateRootCredentials(ctx context.Context, statements []string) (conf map[string]interface{}, err error) { + ctx, cancel := context.WithCancel(ctx) + quitCh := pluginutil.CtxCancelIfCanceled(cancel, c.doneCtx) + defer close(quitCh) + defer cancel() + + resp, err := c.client.RotateRootCredentials(ctx, &RotateRootCredentialsRequest{ + Statements: statements, + }) + if err != nil { + if c.doneCtx.Err() != nil { + return nil, ErrPluginShutdown + } + + return nil, err + } + + if err := json.Unmarshal(resp.Config, &conf); err != nil { + return nil, err + } + + return conf, nil +} + +func (c *gRPCClient) Initialize(ctx context.Context, conf map[string]interface{}, verifyConnection bool) error { + _, err := c.Init(ctx, conf, verifyConnection) + return err +} + +func (c *gRPCClient) Init(ctx context.Context, conf map[string]interface{}, verifyConnection bool) (map[string]interface{}, error) { + configRaw, err := json.Marshal(conf) + if err != nil { + return nil, err + } + + ctx, cancel := context.WithCancel(ctx) + quitCh := pluginutil.CtxCancelIfCanceled(cancel, c.doneCtx) + defer close(quitCh) + defer cancel() + + resp, err := c.client.Init(ctx, &InitRequest{ + Config: configRaw, + VerifyConnection: verifyConnection, + }) + if err != nil { + // Fall back to old call if not implemented + grpcStatus, ok := status.FromError(err) + if ok && grpcStatus.Code() == codes.Unimplemented { + _, err = c.client.Initialize(ctx, &InitializeRequest{ + Config: configRaw, + VerifyConnection: verifyConnection, + }) + if err == nil { + return conf, nil + } + } + + if c.doneCtx.Err() != nil { + return nil, ErrPluginShutdown + } + return nil, err + } + + if err := json.Unmarshal(resp.Config, &conf); err != nil { + return nil, err + } + return conf, nil +} + +func (c *gRPCClient) Close() error { + _, err := c.client.Close(c.doneCtx, &Empty{}) + return err +} + +func (c *gRPCClient) GenerateCredentials(ctx context.Context) (string, error) { + ctx, cancel := context.WithCancel(ctx) + quitCh := pluginutil.CtxCancelIfCanceled(cancel, c.doneCtx) + defer close(quitCh) + defer cancel() + + resp, err := c.client.GenerateCredentials(ctx, &Empty{}) + if err != nil { + grpcStatus, ok := status.FromError(err) + if ok && grpcStatus.Code() == codes.Unimplemented { + return "", ErrPluginStaticUnsupported + } + + if c.doneCtx.Err() != nil { + return "", ErrPluginShutdown + } + return "", err + } + + return resp.Password, nil +} + +func (c *gRPCClient) SetCredentials(ctx context.Context, statements Statements, staticUser StaticUserConfig) (username, password string, err error) { + ctx, cancel := context.WithCancel(ctx) + quitCh := pluginutil.CtxCancelIfCanceled(cancel, c.doneCtx) + defer close(quitCh) + defer cancel() + + resp, err := c.client.SetCredentials(ctx, &SetCredentialsRequest{ + StaticUserConfig: &staticUser, + Statements: &statements, + }) + if err != nil { + // Fall back to old call if not implemented + grpcStatus, ok := status.FromError(err) + if ok && grpcStatus.Code() == codes.Unimplemented { + return "", "", ErrPluginStaticUnsupported + } + + if c.doneCtx.Err() != nil { + return "", "", ErrPluginShutdown + } + return "", "", err + } + + return resp.Username, resp.Password, err +} diff --git a/sdk/database/dbplugin/plugin.go b/sdk/database/dbplugin/plugin.go new file mode 100644 index 0000000..0b01454 --- /dev/null +++ b/sdk/database/dbplugin/plugin.go @@ -0,0 +1,187 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dbplugin + +import ( + "context" + "fmt" + "time" + + "google.golang.org/grpc" + + "github.com/hashicorp/errwrap" + log "github.com/hashicorp/go-hclog" + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/pluginutil" +) + +// Database is the interface that all database objects must implement. +type Database interface { + // Type returns the TypeName for the particular database backend + // implementation. This type name is usually set as a constant within the + // database backend implementation, e.g. "mysql" for the MySQL database + // backend. + Type() (string, error) + + // CreateUser is called on `$ vault read database/creds/:role-name` and it's + // also the first time anything is touched from `$ vault write + // database/roles/:role-name`. This is likely to be the highest-throughput + // method for most plugins. + CreateUser(ctx context.Context, statements Statements, usernameConfig UsernameConfig, expiration time.Time) (username string, password string, err error) + + // RenewUser is triggered by a renewal call to the API. In many database + // backends, this triggers a call on the underlying database that extends a + // VALID UNTIL clause on a user. However, if no such need exists, setting + // this as a NO-OP means that when renewal is called, the lease renewal time + // is pushed further out as appropriate, thus pushing out the time until the + // RevokeUser method is called. + RenewUser(ctx context.Context, statements Statements, username string, expiration time.Time) error + + // RevokeUser is triggered either automatically by a lease expiration, or by + // a revocation call to the API. + RevokeUser(ctx context.Context, statements Statements, username string) error + + // RotateRootCredentials is triggered by a root credential rotation call to + // the API. + RotateRootCredentials(ctx context.Context, statements []string) (config map[string]interface{}, err error) + + // GenerateCredentials returns a generated password for the plugin. This is + // used in combination with SetCredentials to set a specific password for a + // database user and preserve the password in WAL entries. + GenerateCredentials(ctx context.Context) (string, error) + + // SetCredentials uses provided information to create or set the credentials + // for a database user. Unlike CreateUser, this method requires both a + // username and a password given instead of generating them. This is used for + // creating and setting the password of static accounts, as well as rolling + // back passwords in the database in the event an updated database fails to + // save in Vault's storage. + SetCredentials(ctx context.Context, statements Statements, staticConfig StaticUserConfig) (username string, password string, err error) + + // Init is called on `$ vault write database/config/:db-name`, or when you + // do a creds call after Vault's been restarted. The config provided won't + // hold all the keys and values provided in the API call, some will be + // stripped by the database engine before the config is provided. The config + // returned will be stored, which will persist it across shutdowns. + Init(ctx context.Context, config map[string]interface{}, verifyConnection bool) (saveConfig map[string]interface{}, err error) + + // Close attempts to close the underlying database connection that was + // established by the backend. + Close() error +} + +// PluginFactory is used to build plugin database types. It wraps the database +// object in a logging and metrics middleware. +func PluginFactory(ctx context.Context, pluginName string, pluginVersion string, sys pluginutil.LookRunnerUtil, logger log.Logger) (Database, error) { + return PluginFactoryVersion(ctx, pluginName, "", sys, logger) +} + +// PluginFactory is used to build plugin database types with a version specified. +// It wraps the database object in a logging and metrics middleware. +func PluginFactoryVersion(ctx context.Context, pluginName string, pluginVersion string, sys pluginutil.LookRunnerUtil, logger log.Logger) (Database, error) { + // Look for plugin in the plugin catalog + pluginRunner, err := sys.LookupPluginVersion(ctx, pluginName, consts.PluginTypeDatabase, pluginVersion) + if err != nil { + return nil, err + } + + namedLogger := logger.Named(pluginName) + + var transport string + var db Database + if pluginRunner.Builtin { + // Plugin is builtin so we can retrieve an instance of the interface + // from the pluginRunner. Then cast it to a Database. + dbRaw, err := pluginRunner.BuiltinFactory() + if err != nil { + return nil, errwrap.Wrapf("error initializing plugin: {{err}}", err) + } + + var ok bool + db, ok = dbRaw.(Database) + if !ok { + return nil, fmt.Errorf("unsupported database type: %q", pluginName) + } + + transport = "builtin" + + } else { + // create a DatabasePluginClient instance + db, err = NewPluginClient(ctx, sys, pluginRunner, namedLogger, false) + if err != nil { + return nil, err + } + + // Switch on the underlying database client type to get the transport + // method. + switch db.(*DatabasePluginClient).Database.(type) { + case *gRPCClient: + transport = "gRPC" + } + + } + + typeStr, err := db.Type() + if err != nil { + return nil, errwrap.Wrapf("error getting plugin type: {{err}}", err) + } + + // Wrap with metrics middleware + db = &databaseMetricsMiddleware{ + next: db, + typeStr: typeStr, + } + + // Wrap with tracing middleware + if namedLogger.IsTrace() { + db = &databaseTracingMiddleware{ + next: db, + logger: namedLogger.With("transport", transport), + } + } + + return db, nil +} + +// handshakeConfigs are used to just do a basic handshake between +// a plugin and host. If the handshake fails, a user friendly error is shown. +// This prevents users from executing bad plugins or executing a plugin +// directory. It is a UX feature, not a security feature. +var handshakeConfig = plugin.HandshakeConfig{ + ProtocolVersion: 4, + MagicCookieKey: "VAULT_DATABASE_PLUGIN", + MagicCookieValue: "926a0820-aea2-be28-51d6-83cdf00e8edb", +} + +var ( + _ plugin.Plugin = &GRPCDatabasePlugin{} + _ plugin.GRPCPlugin = &GRPCDatabasePlugin{} +) + +// GRPCDatabasePlugin is the plugin.Plugin implementation that only supports GRPC +// transport +type GRPCDatabasePlugin struct { + Impl Database + + // Embeding this will disable the netRPC protocol + plugin.NetRPCUnsupportedPlugin +} + +func (d GRPCDatabasePlugin) GRPCServer(_ *plugin.GRPCBroker, s *grpc.Server) error { + impl := &DatabaseErrorSanitizerMiddleware{ + next: d.Impl, + } + + RegisterDatabaseServer(s, &gRPCServer{impl: impl}) + return nil +} + +func (GRPCDatabasePlugin) GRPCClient(doneCtx context.Context, _ *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { + return &gRPCClient{ + client: NewDatabaseClient(c), + clientConn: c, + doneCtx: doneCtx, + }, nil +} diff --git a/sdk/database/dbplugin/server.go b/sdk/database/dbplugin/server.go new file mode 100644 index 0000000..bf96a3b --- /dev/null +++ b/sdk/database/dbplugin/server.go @@ -0,0 +1,54 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dbplugin + +import ( + "crypto/tls" + fmt "fmt" + + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/sdk/helper/pluginutil" +) + +// Serve is called from within a plugin and wraps the provided +// Database implementation in a databasePluginRPCServer object and starts a +// RPC server. +func Serve(db Database, tlsProvider func() (*tls.Config, error)) { + plugin.Serve(ServeConfig(db, tlsProvider)) +} + +func ServeConfig(db Database, tlsProvider func() (*tls.Config, error)) *plugin.ServeConfig { + err := pluginutil.OptionallyEnableMlock() + if err != nil { + fmt.Println(err) + return nil + } + + // pluginSets is the map of plugins we can dispense. + pluginSets := map[int]plugin.PluginSet{ + // Version 3 used to supports both protocols. We want to keep it around + // since it's possible old plugins built against this version will still + // work with gRPC. There is currently no difference between version 3 + // and version 4. + 3: { + "database": &GRPCDatabasePlugin{ + Impl: db, + }, + }, + 4: { + "database": &GRPCDatabasePlugin{ + Impl: db, + }, + }, + } + + conf := &plugin.ServeConfig{ + HandshakeConfig: handshakeConfig, + VersionedPlugins: pluginSets, + TLSProvider: tlsProvider, + GRPCServer: plugin.DefaultGRPCServer, + } + + return conf +} diff --git a/sdk/database/dbplugin/v5/conversions_test.go b/sdk/database/dbplugin/v5/conversions_test.go new file mode 100644 index 0000000..5e65c34 --- /dev/null +++ b/sdk/database/dbplugin/v5/conversions_test.go @@ -0,0 +1,530 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dbplugin + +import ( + "fmt" + "reflect" + "strings" + "testing" + "time" + "unicode" + + "github.com/hashicorp/vault/sdk/database/dbplugin/v5/proto" + "google.golang.org/protobuf/types/known/structpb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +func TestConversionsHaveAllFields(t *testing.T) { + t.Run("initReqToProto", func(t *testing.T) { + req := InitializeRequest{ + Config: map[string]interface{}{ + "foo": map[string]interface{}{ + "bar": "baz", + }, + }, + VerifyConnection: true, + } + + protoReq, err := initReqToProto(req) + if err != nil { + t.Fatalf("Failed to convert request to proto request: %s", err) + } + + values := getAllGetterValues(protoReq) + if len(values) == 0 { + // Probably a test failure - the protos used in these tests should have Get functions on them + t.Fatalf("No values found from Get functions!") + } + + for _, gtr := range values { + err := assertAllFieldsSet(fmt.Sprintf("InitializeRequest.%s", gtr.name), gtr.value) + if err != nil { + t.Fatalf("%s", err) + } + } + }) + + t.Run("newUserReqToProto", func(t *testing.T) { + req := NewUserRequest{ + UsernameConfig: UsernameMetadata{ + DisplayName: "dispName", + RoleName: "roleName", + }, + Statements: Statements{ + Commands: []string{ + "statement", + }, + }, + RollbackStatements: Statements{ + Commands: []string{ + "rollback_statement", + }, + }, + CredentialType: CredentialTypeRSAPrivateKey, + PublicKey: []byte("-----BEGIN PUBLIC KEY-----"), + Password: "password", + Subject: "subject", + Expiration: time.Now(), + } + + protoReq, err := newUserReqToProto(req) + if err != nil { + t.Fatalf("Failed to convert request to proto request: %s", err) + } + + values := getAllGetterValues(protoReq) + if len(values) == 0 { + // Probably a test failure - the protos used in these tests should have Get functions on them + t.Fatalf("No values found from Get functions!") + } + + for _, gtr := range values { + err := assertAllFieldsSet(fmt.Sprintf("NewUserRequest.%s", gtr.name), gtr.value) + if err != nil { + t.Fatalf("%s", err) + } + } + }) + + t.Run("updateUserReqToProto", func(t *testing.T) { + req := UpdateUserRequest{ + Username: "username", + CredentialType: CredentialTypeRSAPrivateKey, + Password: &ChangePassword{ + NewPassword: "newpassword", + Statements: Statements{ + Commands: []string{ + "statement", + }, + }, + }, + PublicKey: &ChangePublicKey{ + NewPublicKey: []byte("-----BEGIN PUBLIC KEY-----"), + Statements: Statements{ + Commands: []string{ + "statement", + }, + }, + }, + Expiration: &ChangeExpiration{ + NewExpiration: time.Now(), + Statements: Statements{ + Commands: []string{ + "statement", + }, + }, + }, + } + + protoReq, err := updateUserReqToProto(req) + if err != nil { + t.Fatalf("Failed to convert request to proto request: %s", err) + } + + values := getAllGetterValues(protoReq) + if len(values) == 0 { + // Probably a test failure - the protos used in these tests should have Get functions on them + t.Fatalf("No values found from Get functions!") + } + + for _, gtr := range values { + err := assertAllFieldsSet(fmt.Sprintf("UpdateUserRequest.%s", gtr.name), gtr.value) + if err != nil { + t.Fatalf("%s", err) + } + } + }) + + t.Run("deleteUserReqToProto", func(t *testing.T) { + req := DeleteUserRequest{ + Username: "username", + Statements: Statements{ + Commands: []string{ + "statement", + }, + }, + } + + protoReq, err := deleteUserReqToProto(req) + if err != nil { + t.Fatalf("Failed to convert request to proto request: %s", err) + } + + values := getAllGetterValues(protoReq) + if len(values) == 0 { + // Probably a test failure - the protos used in these tests should have Get functions on them + t.Fatalf("No values found from Get functions!") + } + + for _, gtr := range values { + err := assertAllFieldsSet(fmt.Sprintf("DeleteUserRequest.%s", gtr.name), gtr.value) + if err != nil { + t.Fatalf("%s", err) + } + } + }) + + t.Run("getUpdateUserRequest", func(t *testing.T) { + req := &proto.UpdateUserRequest{ + Username: "username", + CredentialType: int32(CredentialTypeRSAPrivateKey), + Password: &proto.ChangePassword{ + NewPassword: "newpass", + Statements: &proto.Statements{ + Commands: []string{ + "statement", + }, + }, + }, + PublicKey: &proto.ChangePublicKey{ + NewPublicKey: []byte("-----BEGIN PUBLIC KEY-----"), + Statements: &proto.Statements{ + Commands: []string{ + "statement", + }, + }, + }, + Expiration: &proto.ChangeExpiration{ + NewExpiration: timestamppb.Now(), + Statements: &proto.Statements{ + Commands: []string{ + "statement", + }, + }, + }, + } + + protoReq, err := getUpdateUserRequest(req) + if err != nil { + t.Fatalf("Failed to convert request to proto request: %s", err) + } + + err = assertAllFieldsSet("proto.UpdateUserRequest", protoReq) + if err != nil { + t.Fatalf("%s", err) + } + }) +} + +type getter struct { + name string + value interface{} +} + +func getAllGetterValues(value interface{}) (values []getter) { + typ := reflect.TypeOf(value) + val := reflect.ValueOf(value) + for i := 0; i < typ.NumMethod(); i++ { + method := typ.Method(i) + if !strings.HasPrefix(method.Name, "Get") { + continue + } + valMethod := val.Method(i) + resp := valMethod.Call(nil) + getVal := resp[0].Interface() + gtr := getter{ + name: strings.TrimPrefix(method.Name, "Get"), + value: getVal, + } + values = append(values, gtr) + } + return values +} + +// Ensures the assertion works properly +func TestAssertAllFieldsSet(t *testing.T) { + type testCase struct { + value interface{} + expectErr bool + } + + tests := map[string]testCase{ + "zero int": { + value: 0, + expectErr: true, + }, + "non-zero int": { + value: 1, + expectErr: false, + }, + "zero float64": { + value: 0.0, + expectErr: true, + }, + "non-zero float64": { + value: 1.0, + expectErr: false, + }, + "empty string": { + value: "", + expectErr: true, + }, + "true boolean": { + value: true, + expectErr: false, + }, + "false boolean": { // False is an exception to the "is zero" rule + value: false, + expectErr: false, + }, + "blank struct": { + value: struct{}{}, + expectErr: true, + }, + "non-blank but empty struct": { + value: struct { + str string + }{ + str: "", + }, + expectErr: true, + }, + "non-empty string": { + value: "foo", + expectErr: false, + }, + "non-empty struct": { + value: struct { + str string + }{ + str: "foo", + }, + expectErr: false, + }, + "empty nested struct": { + value: struct { + Str string + Substruct struct { + Substr string + } + }{ + Str: "foo", + Substruct: struct { + Substr string + }{}, // Empty sub-field + }, + expectErr: true, + }, + "filled nested struct": { + value: struct { + str string + substruct struct { + substr string + } + }{ + str: "foo", + substruct: struct { + substr string + }{ + substr: "sub-foo", + }, + }, + expectErr: false, + }, + "nil map": { + value: map[string]string(nil), + expectErr: true, + }, + "empty map": { + value: map[string]string{}, + expectErr: true, + }, + "filled map": { + value: map[string]string{ + "foo": "bar", + "int": "42", + }, + expectErr: false, + }, + "map with empty string value": { + value: map[string]string{ + "foo": "", + }, + expectErr: true, + }, + "nested map with empty string value": { + value: map[string]interface{}{ + "bar": "baz", + "foo": map[string]interface{}{ + "subfoo": "", + }, + }, + expectErr: true, + }, + "nil slice": { + value: []string(nil), + expectErr: true, + }, + "empty slice": { + value: []string{}, + expectErr: true, + }, + "filled slice": { + value: []string{ + "foo", + }, + expectErr: false, + }, + "slice with empty string value": { + value: []string{ + "", + }, + expectErr: true, + }, + "empty structpb": { + value: newStructPb(t, map[string]interface{}{}), + expectErr: true, + }, + "filled structpb": { + value: newStructPb(t, map[string]interface{}{ + "foo": "bar", + "int": 42, + }), + expectErr: false, + }, + + "pointer to zero int": { + value: intPtr(0), + expectErr: true, + }, + "pointer to non-zero int": { + value: intPtr(1), + expectErr: false, + }, + "pointer to zero float64": { + value: float64Ptr(0.0), + expectErr: true, + }, + "pointer to non-zero float64": { + value: float64Ptr(1.0), + expectErr: false, + }, + "pointer to nil string": { + value: new(string), + expectErr: true, + }, + "pointer to non-nil string": { + value: strPtr("foo"), + expectErr: false, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + err := assertAllFieldsSet("", test.value) + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + }) + } +} + +func assertAllFieldsSet(name string, val interface{}) error { + if val == nil { + return fmt.Errorf("value is nil") + } + + rVal := reflect.ValueOf(val) + return assertAllFieldsSetValue(name, rVal) +} + +func assertAllFieldsSetValue(name string, rVal reflect.Value) error { + // All booleans are allowed - we don't have a way of differentiating between + // and intentional false and a missing false + if rVal.Kind() == reflect.Bool { + return nil + } + + // Primitives fall through here + if rVal.IsZero() { + return fmt.Errorf("%s is zero", name) + } + + switch rVal.Kind() { + case reflect.Ptr, reflect.Interface: + return assertAllFieldsSetValue(name, rVal.Elem()) + case reflect.Struct: + return assertAllFieldsSetStruct(name, rVal) + case reflect.Map: + if rVal.Len() == 0 { + return fmt.Errorf("%s (map type) is empty", name) + } + + iter := rVal.MapRange() + for iter.Next() { + k := iter.Key() + v := iter.Value() + + err := assertAllFieldsSetValue(fmt.Sprintf("%s[%s]", name, k), v) + if err != nil { + return err + } + } + case reflect.Slice: + if rVal.Len() == 0 { + return fmt.Errorf("%s (slice type) is empty", name) + } + for i := 0; i < rVal.Len(); i++ { + sliceVal := rVal.Index(i) + err := assertAllFieldsSetValue(fmt.Sprintf("%s[%d]", name, i), sliceVal) + if err != nil { + return err + } + } + } + return nil +} + +func assertAllFieldsSetStruct(name string, rVal reflect.Value) error { + switch rVal.Type() { + case reflect.TypeOf(timestamppb.Timestamp{}): + ts := rVal.Interface().(timestamppb.Timestamp) + if ts.AsTime().IsZero() { + return fmt.Errorf("%s is zero", name) + } + return nil + default: + for i := 0; i < rVal.NumField(); i++ { + field := rVal.Field(i) + fieldName := rVal.Type().Field(i) + + // Skip fields that aren't exported + if unicode.IsLower([]rune(fieldName.Name)[0]) { + continue + } + + err := assertAllFieldsSetValue(fmt.Sprintf("%s.%s", name, fieldName.Name), field) + if err != nil { + return err + } + } + return nil + } +} + +func intPtr(i int) *int { + return &i +} + +func float64Ptr(f float64) *float64 { + return &f +} + +func strPtr(str string) *string { + return &str +} + +func newStructPb(t *testing.T, m map[string]interface{}) *structpb.Struct { + t.Helper() + + s, err := structpb.NewStruct(m) + if err != nil { + t.Fatalf("Failed to convert map to struct: %s", err) + } + return s +} diff --git a/sdk/database/dbplugin/v5/database.go b/sdk/database/dbplugin/v5/database.go new file mode 100644 index 0000000..065aaef --- /dev/null +++ b/sdk/database/dbplugin/v5/database.go @@ -0,0 +1,256 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dbplugin + +import ( + "context" + "time" +) + +// Database to manipulate users within an external system (typically a database). +type Database interface { + // Initialize the database plugin. This is the equivalent of a constructor for the + // database object itself. + Initialize(ctx context.Context, req InitializeRequest) (InitializeResponse, error) + + // NewUser creates a new user within the database. This user is temporary in that it + // will exist until the TTL expires. + NewUser(ctx context.Context, req NewUserRequest) (NewUserResponse, error) + + // UpdateUser updates an existing user within the database. + UpdateUser(ctx context.Context, req UpdateUserRequest) (UpdateUserResponse, error) + + // DeleteUser from the database. This should not error if the user didn't + // exist prior to this call. + DeleteUser(ctx context.Context, req DeleteUserRequest) (DeleteUserResponse, error) + + // Type returns the Name for the particular database backend implementation. + // This type name is usually set as a constant within the database backend + // implementation, e.g. "mysql" for the MySQL database backend. This is used + // for things like metrics and logging. No behavior is switched on this. + Type() (string, error) + + // Close attempts to close the underlying database connection that was + // established by the backend. + Close() error +} + +// /////////////////////////////////////////////////////////////////////////// +// Database Request & Response Objects +// These request and response objects are *not* protobuf types because gRPC does not +// support all types that we need in a nice way. For instance, gRPC does not support +// map[string]interface{}. It does have an `Any` type, but converting it to a map +// requires extensive use of reflection and knowing what types to support ahead of +// time. Instead these types are made as user-friendly as possible so the conversion +// between protobuf types and request/response objects is handled by Vault developers +// rather than needing to be handled by external plugin developers. +// /////////////////////////////////////////////////////////////////////////// + +// /////////////////////////////////////////////////////// +// Initialize() +// /////////////////////////////////////////////////////// + +// InitializeRequest contains all information needed to initialize a database plugin. +type InitializeRequest struct { + // Config to initialize the database with. This can include things like connection details, + // a "root" username & password, etc. This will not include all configuration items specified + // when configuring the database. Some values will be stripped out by the database engine + // prior to being passed to the plugin. + Config map[string]interface{} + + // VerifyConnection during initialization. If true, a connection should be made to the + // database to verify the connection can be made. If false, no connection should be made + // on initialization. + VerifyConnection bool +} + +// InitializeResponse returns any information Vault needs to know after initializing +// a database plugin. +type InitializeResponse struct { + // Config that should be saved in Vault. This may differ from the config in the request, + // but should contain everything required to Initialize the database. + // REQUIRED in order to save the configuration into Vault after initialization + Config map[string]interface{} +} + +// SupportedCredentialTypesKey is used to get and set the supported +// CredentialType values in database plugins and Vault. +const SupportedCredentialTypesKey = "supported_credential_types" + +// SetSupportedCredentialTypes sets the CredentialType values that are +// supported by the database plugin. It can be used by database plugins +// to communicate what CredentialType values it supports managing. +func (ir InitializeResponse) SetSupportedCredentialTypes(credTypes []CredentialType) { + sct := make([]interface{}, 0, len(credTypes)) + for _, t := range credTypes { + sct = append(sct, t.String()) + } + + ir.Config[SupportedCredentialTypesKey] = sct +} + +// /////////////////////////////////////////////////////// +// NewUser() +// /////////////////////////////////////////////////////// + +// NewUserRequest request a new user is created +type NewUserRequest struct { + // UsernameConfig is metadata that can be used to generate a username + // within the database plugin + UsernameConfig UsernameMetadata + + // Statements is an ordered list of commands to run within the database when + // creating a new user. This frequently includes permissions to give the + // user or similar actions. + Statements Statements + + // RollbackStatements is an ordered list of commands to run within the database + // if the new user creation process fails. + RollbackStatements Statements + + // CredentialType is the type of credential to use when creating a user. + // Respective fields for the credential type will contain the credential + // value that was generated by Vault. + CredentialType CredentialType + + // Password credential to use when creating the user. + // Value is set when the credential type is CredentialTypePassword. + Password string + + // PublicKey credential to use when creating the user. + // The value is a PKIX marshaled, PEM encoded public key. + // The value is set when the credential type is CredentialTypeRSAPrivateKey. + PublicKey []byte + + // Subject is the distinguished name for the client certificate credential. + // Value is set when the credential type is CredentialTypeClientCertificate. + Subject string + + // Expiration of the user. Not all database plugins will support this. + Expiration time.Time +} + +// UsernameMetadata is metadata the database plugin can use to generate a username +type UsernameMetadata struct { + DisplayName string + RoleName string +} + +// NewUserResponse returns any information Vault needs to know after creating a new user. +type NewUserResponse struct { + // Username of the user created within the database. + // REQUIRED so Vault knows the name of the user that was created + Username string +} + +// CredentialType is a type of database credential. +type CredentialType int + +const ( + CredentialTypePassword CredentialType = iota + CredentialTypeRSAPrivateKey + CredentialTypeClientCertificate +) + +func (k CredentialType) String() string { + switch k { + case CredentialTypePassword: + return "password" + case CredentialTypeRSAPrivateKey: + return "rsa_private_key" + case CredentialTypeClientCertificate: + return "client_certificate" + default: + return "unknown" + } +} + +// /////////////////////////////////////////////////////// +// UpdateUser() +// /////////////////////////////////////////////////////// + +type UpdateUserRequest struct { + // Username to make changes to. + Username string + + // CredentialType is the type of credential to use when updating a user. + // Respective fields for the credential type will contain the credential + // value that was generated by Vault. + CredentialType CredentialType + + // Password indicates the new password to change to. + // The value is set when the credential type is CredentialTypePassword. + // If nil, no change is requested. + Password *ChangePassword + + // PublicKey indicates the new public key to change to. + // The value is set when the credential type is CredentialTypeRSAPrivateKey. + // If nil, no change is requested. + PublicKey *ChangePublicKey + + // Expiration indicates the new expiration date to change to. + // If nil, no change is requested. + Expiration *ChangeExpiration +} + +// ChangePublicKey of a given user +type ChangePublicKey struct { + // NewPublicKey is the new public key credential for the user. + // The value is a PKIX marshaled, PEM encoded public key. + NewPublicKey []byte + + // Statements is an ordered list of commands to run within the database + // when changing the user's public key credential. + Statements Statements +} + +// ChangePassword of a given user +type ChangePassword struct { + // NewPassword for the user + NewPassword string + + // Statements is an ordered list of commands to run within the database + // when changing the user's password. + Statements Statements +} + +// ChangeExpiration of a given user +type ChangeExpiration struct { + // NewExpiration of the user + NewExpiration time.Time + + // Statements is an ordered list of commands to run within the database + // when changing the user's expiration. + Statements Statements +} + +type UpdateUserResponse struct{} + +// /////////////////////////////////////////////////////// +// DeleteUser() +// /////////////////////////////////////////////////////// + +type DeleteUserRequest struct { + // Username to delete from the database + Username string + + // Statements is an ordered list of commands to run within the database + // when deleting a user. + Statements Statements +} + +type DeleteUserResponse struct{} + +// /////////////////////////////////////////////////////// +// Used across multiple functions +// /////////////////////////////////////////////////////// + +// Statements wraps a collection of statements to run in a database when an +// operation is performed (create, update, etc.). This is a struct rather than +// a string slice so we can easily add more information to this in the future. +type Statements struct { + // Commands is an ordered list of commands to execute in the database. + // These commands may include templated fields such as {{username}} and {{password}} + Commands []string +} diff --git a/sdk/database/dbplugin/v5/grpc_client.go b/sdk/database/dbplugin/v5/grpc_client.go new file mode 100644 index 0000000..9b0b984 --- /dev/null +++ b/sdk/database/dbplugin/v5/grpc_client.go @@ -0,0 +1,297 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dbplugin + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/golang/protobuf/ptypes" + "github.com/hashicorp/vault/sdk/database/dbplugin/v5/proto" + "github.com/hashicorp/vault/sdk/helper/pluginutil" + "github.com/hashicorp/vault/sdk/logical" +) + +var ( + _ Database = gRPCClient{} + _ logical.PluginVersioner = gRPCClient{} + + ErrPluginShutdown = errors.New("plugin shutdown") +) + +type gRPCClient struct { + client proto.DatabaseClient + versionClient logical.PluginVersionClient + doneCtx context.Context +} + +func (c gRPCClient) PluginVersion() logical.PluginVersion { + version, _ := c.versionClient.Version(context.Background(), &logical.Empty{}) + if version != nil { + return logical.PluginVersion{Version: version.PluginVersion} + } + return logical.EmptyPluginVersion +} + +func (c gRPCClient) Initialize(ctx context.Context, req InitializeRequest) (InitializeResponse, error) { + rpcReq, err := initReqToProto(req) + if err != nil { + return InitializeResponse{}, err + } + + rpcResp, err := c.client.Initialize(ctx, rpcReq) + if err != nil { + return InitializeResponse{}, fmt.Errorf("unable to initialize: %s", err.Error()) + } + + return initRespFromProto(rpcResp) +} + +func initReqToProto(req InitializeRequest) (*proto.InitializeRequest, error) { + config, err := mapToStruct(req.Config) + if err != nil { + return nil, fmt.Errorf("unable to marshal config: %w", err) + } + + rpcReq := &proto.InitializeRequest{ + ConfigData: config, + VerifyConnection: req.VerifyConnection, + } + return rpcReq, nil +} + +func initRespFromProto(rpcResp *proto.InitializeResponse) (InitializeResponse, error) { + newConfig := structToMap(rpcResp.GetConfigData()) + + resp := InitializeResponse{ + Config: newConfig, + } + return resp, nil +} + +func (c gRPCClient) NewUser(ctx context.Context, req NewUserRequest) (NewUserResponse, error) { + ctx, cancel := context.WithCancel(ctx) + quitCh := pluginutil.CtxCancelIfCanceled(cancel, c.doneCtx) + defer close(quitCh) + defer cancel() + + rpcReq, err := newUserReqToProto(req) + if err != nil { + return NewUserResponse{}, err + } + + rpcResp, err := c.client.NewUser(ctx, rpcReq) + if err != nil { + if c.doneCtx.Err() != nil { + return NewUserResponse{}, ErrPluginShutdown + } + return NewUserResponse{}, fmt.Errorf("unable to create new user: %w", err) + } + + return newUserRespFromProto(rpcResp) +} + +func newUserReqToProto(req NewUserRequest) (*proto.NewUserRequest, error) { + switch req.CredentialType { + case CredentialTypePassword: + if req.Password == "" { + return nil, fmt.Errorf("missing password credential") + } + case CredentialTypeRSAPrivateKey: + if len(req.PublicKey) == 0 { + return nil, fmt.Errorf("missing public key credential") + } + case CredentialTypeClientCertificate: + if req.Subject == "" { + return nil, fmt.Errorf("missing certificate subject") + } + default: + return nil, fmt.Errorf("unknown credential type") + } + + expiration, err := ptypes.TimestampProto(req.Expiration) + if err != nil { + return nil, fmt.Errorf("unable to marshal expiration date: %w", err) + } + + rpcReq := &proto.NewUserRequest{ + UsernameConfig: &proto.UsernameConfig{ + DisplayName: req.UsernameConfig.DisplayName, + RoleName: req.UsernameConfig.RoleName, + }, + CredentialType: int32(req.CredentialType), + Password: req.Password, + PublicKey: req.PublicKey, + Subject: req.Subject, + Expiration: expiration, + Statements: &proto.Statements{ + Commands: req.Statements.Commands, + }, + RollbackStatements: &proto.Statements{ + Commands: req.RollbackStatements.Commands, + }, + } + return rpcReq, nil +} + +func newUserRespFromProto(rpcResp *proto.NewUserResponse) (NewUserResponse, error) { + resp := NewUserResponse{ + Username: rpcResp.GetUsername(), + } + return resp, nil +} + +func (c gRPCClient) UpdateUser(ctx context.Context, req UpdateUserRequest) (UpdateUserResponse, error) { + rpcReq, err := updateUserReqToProto(req) + if err != nil { + return UpdateUserResponse{}, err + } + + rpcResp, err := c.client.UpdateUser(ctx, rpcReq) + if err != nil { + if c.doneCtx.Err() != nil { + return UpdateUserResponse{}, ErrPluginShutdown + } + + return UpdateUserResponse{}, fmt.Errorf("unable to update user: %w", err) + } + + return updateUserRespFromProto(rpcResp) +} + +func updateUserReqToProto(req UpdateUserRequest) (*proto.UpdateUserRequest, error) { + if req.Username == "" { + return nil, fmt.Errorf("missing username") + } + + if (req.Password == nil || req.Password.NewPassword == "") && + (req.PublicKey == nil || len(req.PublicKey.NewPublicKey) == 0) && + (req.Expiration == nil || req.Expiration.NewExpiration.IsZero()) { + return nil, fmt.Errorf("missing changes") + } + + expiration, err := expirationToProto(req.Expiration) + if err != nil { + return nil, fmt.Errorf("unable to parse new expiration date: %w", err) + } + + var password *proto.ChangePassword + if req.Password != nil && req.Password.NewPassword != "" { + password = &proto.ChangePassword{ + NewPassword: req.Password.NewPassword, + Statements: &proto.Statements{ + Commands: req.Password.Statements.Commands, + }, + } + } + + var publicKey *proto.ChangePublicKey + if req.PublicKey != nil && len(req.PublicKey.NewPublicKey) > 0 { + publicKey = &proto.ChangePublicKey{ + NewPublicKey: req.PublicKey.NewPublicKey, + Statements: &proto.Statements{ + Commands: req.PublicKey.Statements.Commands, + }, + } + } + + rpcReq := &proto.UpdateUserRequest{ + Username: req.Username, + CredentialType: int32(req.CredentialType), + Password: password, + PublicKey: publicKey, + Expiration: expiration, + } + return rpcReq, nil +} + +func updateUserRespFromProto(rpcResp *proto.UpdateUserResponse) (UpdateUserResponse, error) { + // Placeholder for future conversion if data is returned + return UpdateUserResponse{}, nil +} + +func expirationToProto(exp *ChangeExpiration) (*proto.ChangeExpiration, error) { + if exp == nil { + return nil, nil + } + + expiration, err := ptypes.TimestampProto(exp.NewExpiration) + if err != nil { + return nil, err + } + + changeExp := &proto.ChangeExpiration{ + NewExpiration: expiration, + Statements: &proto.Statements{ + Commands: exp.Statements.Commands, + }, + } + return changeExp, nil +} + +func (c gRPCClient) DeleteUser(ctx context.Context, req DeleteUserRequest) (DeleteUserResponse, error) { + rpcReq, err := deleteUserReqToProto(req) + if err != nil { + return DeleteUserResponse{}, err + } + + rpcResp, err := c.client.DeleteUser(ctx, rpcReq) + if err != nil { + if c.doneCtx.Err() != nil { + return DeleteUserResponse{}, ErrPluginShutdown + } + return DeleteUserResponse{}, fmt.Errorf("unable to delete user: %w", err) + } + + return deleteUserRespFromProto(rpcResp) +} + +func deleteUserReqToProto(req DeleteUserRequest) (*proto.DeleteUserRequest, error) { + if req.Username == "" { + return nil, fmt.Errorf("missing username") + } + + rpcReq := &proto.DeleteUserRequest{ + Username: req.Username, + Statements: &proto.Statements{ + Commands: req.Statements.Commands, + }, + } + return rpcReq, nil +} + +func deleteUserRespFromProto(rpcResp *proto.DeleteUserResponse) (DeleteUserResponse, error) { + // Placeholder for future conversion if data is returned + return DeleteUserResponse{}, nil +} + +func (c gRPCClient) Type() (string, error) { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + typeResp, err := c.client.Type(ctx, &proto.Empty{}) + if err != nil { + if c.doneCtx.Err() != nil { + return "", ErrPluginShutdown + } + return "", fmt.Errorf("unable to get database plugin type: %w", err) + } + return typeResp.GetType(), nil +} + +func (c gRPCClient) Close() error { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + _, err := c.client.Close(ctx, &proto.Empty{}) + if err != nil { + if c.doneCtx.Err() != nil { + return ErrPluginShutdown + } + return err + } + return nil +} diff --git a/sdk/database/dbplugin/v5/grpc_client_test.go b/sdk/database/dbplugin/v5/grpc_client_test.go new file mode 100644 index 0000000..05ecb96 --- /dev/null +++ b/sdk/database/dbplugin/v5/grpc_client_test.go @@ -0,0 +1,564 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dbplugin + +import ( + "context" + "encoding/json" + "errors" + "reflect" + "testing" + "time" + + "github.com/hashicorp/vault/sdk/database/dbplugin/v5/proto" + "google.golang.org/grpc" +) + +func TestGRPCClient_Initialize(t *testing.T) { + type testCase struct { + client proto.DatabaseClient + req InitializeRequest + expectedResp InitializeResponse + assertErr errorAssertion + } + + tests := map[string]testCase{ + "bad config": { + client: fakeClient{}, + req: InitializeRequest{ + Config: map[string]interface{}{ + "foo": badJSONValue{}, + }, + }, + assertErr: assertErrNotNil, + }, + "database error": { + client: fakeClient{ + initErr: errors.New("initialize error"), + }, + req: InitializeRequest{ + Config: map[string]interface{}{ + "foo": "bar", + }, + }, + assertErr: assertErrNotNil, + }, + "happy path": { + client: fakeClient{ + initResp: &proto.InitializeResponse{ + ConfigData: marshal(t, map[string]interface{}{ + "foo": "bar", + "baz": "biz", + }), + }, + }, + req: InitializeRequest{ + Config: map[string]interface{}{ + "foo": "bar", + }, + }, + expectedResp: InitializeResponse{ + Config: map[string]interface{}{ + "foo": "bar", + "baz": "biz", + }, + }, + assertErr: assertErrNil, + }, + "JSON number type in initialize request": { + client: fakeClient{ + initResp: &proto.InitializeResponse{ + ConfigData: marshal(t, map[string]interface{}{ + "foo": "bar", + "max": "10", + }), + }, + }, + req: InitializeRequest{ + Config: map[string]interface{}{ + "foo": "bar", + "max": json.Number("10"), + }, + }, + expectedResp: InitializeResponse{ + Config: map[string]interface{}{ + "foo": "bar", + "max": "10", + }, + }, + assertErr: assertErrNil, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + c := gRPCClient{ + client: test.client, + doneCtx: nil, + } + + // Context doesn't need to timeout since this is just passed through + ctx := context.Background() + + resp, err := c.Initialize(ctx, test.req) + test.assertErr(t, err) + + if !reflect.DeepEqual(resp, test.expectedResp) { + t.Fatalf("Actual response: %#v\nExpected response: %#v", resp, test.expectedResp) + } + }) + } +} + +func TestGRPCClient_NewUser(t *testing.T) { + runningCtx := context.Background() + cancelledCtx, cancel := context.WithCancel(context.Background()) + cancel() + + type testCase struct { + client proto.DatabaseClient + req NewUserRequest + doneCtx context.Context + expectedResp NewUserResponse + assertErr errorAssertion + } + + tests := map[string]testCase{ + "missing password": { + client: fakeClient{}, + req: NewUserRequest{ + Password: "", + Expiration: time.Now(), + }, + doneCtx: runningCtx, + assertErr: assertErrNotNil, + }, + "bad expiration": { + client: fakeClient{}, + req: NewUserRequest{ + Password: "njkvcb8y934u90grsnkjl", + Expiration: invalidExpiration, + }, + doneCtx: runningCtx, + assertErr: assertErrNotNil, + }, + "database error": { + client: fakeClient{ + newUserErr: errors.New("new user error"), + }, + req: NewUserRequest{ + Password: "njkvcb8y934u90grsnkjl", + Expiration: time.Now(), + }, + doneCtx: runningCtx, + assertErr: assertErrNotNil, + }, + "plugin shut down": { + client: fakeClient{ + newUserErr: errors.New("new user error"), + }, + req: NewUserRequest{ + Password: "njkvcb8y934u90grsnkjl", + Expiration: time.Now(), + }, + doneCtx: cancelledCtx, + assertErr: assertErrEquals(ErrPluginShutdown), + }, + "happy path": { + client: fakeClient{ + newUserResp: &proto.NewUserResponse{ + Username: "new_user", + }, + }, + req: NewUserRequest{ + Password: "njkvcb8y934u90grsnkjl", + Expiration: time.Now(), + }, + doneCtx: runningCtx, + expectedResp: NewUserResponse{ + Username: "new_user", + }, + assertErr: assertErrNil, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + c := gRPCClient{ + client: test.client, + doneCtx: test.doneCtx, + } + + ctx := context.Background() + + resp, err := c.NewUser(ctx, test.req) + test.assertErr(t, err) + + if !reflect.DeepEqual(resp, test.expectedResp) { + t.Fatalf("Actual response: %#v\nExpected response: %#v", resp, test.expectedResp) + } + }) + } +} + +func TestGRPCClient_UpdateUser(t *testing.T) { + runningCtx := context.Background() + cancelledCtx, cancel := context.WithCancel(context.Background()) + cancel() + + type testCase struct { + client proto.DatabaseClient + req UpdateUserRequest + doneCtx context.Context + assertErr errorAssertion + } + + tests := map[string]testCase{ + "missing username": { + client: fakeClient{}, + req: UpdateUserRequest{}, + doneCtx: runningCtx, + assertErr: assertErrNotNil, + }, + "missing changes": { + client: fakeClient{}, + req: UpdateUserRequest{ + Username: "user", + }, + doneCtx: runningCtx, + assertErr: assertErrNotNil, + }, + "empty password": { + client: fakeClient{}, + req: UpdateUserRequest{ + Username: "user", + Password: &ChangePassword{ + NewPassword: "", + }, + }, + doneCtx: runningCtx, + assertErr: assertErrNotNil, + }, + "zero expiration": { + client: fakeClient{}, + req: UpdateUserRequest{ + Username: "user", + Expiration: &ChangeExpiration{ + NewExpiration: time.Time{}, + }, + }, + doneCtx: runningCtx, + assertErr: assertErrNotNil, + }, + "bad expiration": { + client: fakeClient{}, + req: UpdateUserRequest{ + Username: "user", + Expiration: &ChangeExpiration{ + NewExpiration: invalidExpiration, + }, + }, + doneCtx: runningCtx, + assertErr: assertErrNotNil, + }, + "database error": { + client: fakeClient{ + updateUserErr: errors.New("update user error"), + }, + req: UpdateUserRequest{ + Username: "user", + Password: &ChangePassword{ + NewPassword: "asdf", + }, + }, + doneCtx: runningCtx, + assertErr: assertErrNotNil, + }, + "plugin shut down": { + client: fakeClient{ + updateUserErr: errors.New("update user error"), + }, + req: UpdateUserRequest{ + Username: "user", + Password: &ChangePassword{ + NewPassword: "asdf", + }, + }, + doneCtx: cancelledCtx, + assertErr: assertErrEquals(ErrPluginShutdown), + }, + "happy path - change password": { + client: fakeClient{}, + req: UpdateUserRequest{ + Username: "user", + Password: &ChangePassword{ + NewPassword: "asdf", + }, + }, + doneCtx: runningCtx, + assertErr: assertErrNil, + }, + "happy path - change expiration": { + client: fakeClient{}, + req: UpdateUserRequest{ + Username: "user", + Expiration: &ChangeExpiration{ + NewExpiration: time.Now(), + }, + }, + doneCtx: runningCtx, + assertErr: assertErrNil, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + c := gRPCClient{ + client: test.client, + doneCtx: test.doneCtx, + } + + ctx := context.Background() + + _, err := c.UpdateUser(ctx, test.req) + test.assertErr(t, err) + }) + } +} + +func TestGRPCClient_DeleteUser(t *testing.T) { + runningCtx := context.Background() + cancelledCtx, cancel := context.WithCancel(context.Background()) + cancel() + + type testCase struct { + client proto.DatabaseClient + req DeleteUserRequest + doneCtx context.Context + assertErr errorAssertion + } + + tests := map[string]testCase{ + "missing username": { + client: fakeClient{}, + req: DeleteUserRequest{}, + doneCtx: runningCtx, + assertErr: assertErrNotNil, + }, + "database error": { + client: fakeClient{ + deleteUserErr: errors.New("delete user error'"), + }, + req: DeleteUserRequest{ + Username: "user", + }, + doneCtx: runningCtx, + assertErr: assertErrNotNil, + }, + "plugin shut down": { + client: fakeClient{ + deleteUserErr: errors.New("delete user error'"), + }, + req: DeleteUserRequest{ + Username: "user", + }, + doneCtx: cancelledCtx, + assertErr: assertErrEquals(ErrPluginShutdown), + }, + "happy path": { + client: fakeClient{}, + req: DeleteUserRequest{ + Username: "user", + }, + doneCtx: runningCtx, + assertErr: assertErrNil, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + c := gRPCClient{ + client: test.client, + doneCtx: test.doneCtx, + } + + ctx := context.Background() + + _, err := c.DeleteUser(ctx, test.req) + test.assertErr(t, err) + }) + } +} + +func TestGRPCClient_Type(t *testing.T) { + runningCtx := context.Background() + cancelledCtx, cancel := context.WithCancel(context.Background()) + cancel() + + type testCase struct { + client proto.DatabaseClient + doneCtx context.Context + expectedType string + assertErr errorAssertion + } + + tests := map[string]testCase{ + "database error": { + client: fakeClient{ + typeErr: errors.New("type error"), + }, + doneCtx: runningCtx, + assertErr: assertErrNotNil, + }, + "plugin shut down": { + client: fakeClient{ + typeErr: errors.New("type error"), + }, + doneCtx: cancelledCtx, + assertErr: assertErrEquals(ErrPluginShutdown), + }, + "happy path": { + client: fakeClient{ + typeResp: &proto.TypeResponse{ + Type: "test type", + }, + }, + doneCtx: runningCtx, + expectedType: "test type", + assertErr: assertErrNil, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + c := gRPCClient{ + client: test.client, + doneCtx: test.doneCtx, + } + + dbType, err := c.Type() + test.assertErr(t, err) + + if dbType != test.expectedType { + t.Fatalf("Actual type: %s Expected type: %s", dbType, test.expectedType) + } + }) + } +} + +func TestGRPCClient_Close(t *testing.T) { + runningCtx := context.Background() + cancelledCtx, cancel := context.WithCancel(context.Background()) + cancel() + + type testCase struct { + client proto.DatabaseClient + doneCtx context.Context + assertErr errorAssertion + } + + tests := map[string]testCase{ + "database error": { + client: fakeClient{ + typeErr: errors.New("type error"), + }, + doneCtx: runningCtx, + assertErr: assertErrNotNil, + }, + "plugin shut down": { + client: fakeClient{ + typeErr: errors.New("type error"), + }, + doneCtx: cancelledCtx, + assertErr: assertErrEquals(ErrPluginShutdown), + }, + "happy path": { + client: fakeClient{}, + doneCtx: runningCtx, + assertErr: assertErrNil, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + c := gRPCClient{ + client: test.client, + doneCtx: test.doneCtx, + } + + err := c.Close() + test.assertErr(t, err) + }) + } +} + +type errorAssertion func(*testing.T, error) + +func assertErrNotNil(t *testing.T, err error) { + t.Helper() + if err == nil { + t.Fatalf("err expected, got nil") + } +} + +func assertErrNil(t *testing.T, err error) { + t.Helper() + if err != nil { + t.Fatalf("no error expected, got: %s", err) + } +} + +func assertErrEquals(expectedErr error) errorAssertion { + return func(t *testing.T, err error) { + t.Helper() + if err != expectedErr { + t.Fatalf("Actual err: %#v Expected err: %#v", err, expectedErr) + } + } +} + +var _ proto.DatabaseClient = fakeClient{} + +type fakeClient struct { + initResp *proto.InitializeResponse + initErr error + + newUserResp *proto.NewUserResponse + newUserErr error + + updateUserResp *proto.UpdateUserResponse + updateUserErr error + + deleteUserResp *proto.DeleteUserResponse + deleteUserErr error + + typeResp *proto.TypeResponse + typeErr error + + closeErr error +} + +func (f fakeClient) Initialize(context.Context, *proto.InitializeRequest, ...grpc.CallOption) (*proto.InitializeResponse, error) { + return f.initResp, f.initErr +} + +func (f fakeClient) NewUser(context.Context, *proto.NewUserRequest, ...grpc.CallOption) (*proto.NewUserResponse, error) { + return f.newUserResp, f.newUserErr +} + +func (f fakeClient) UpdateUser(context.Context, *proto.UpdateUserRequest, ...grpc.CallOption) (*proto.UpdateUserResponse, error) { + return f.updateUserResp, f.updateUserErr +} + +func (f fakeClient) DeleteUser(context.Context, *proto.DeleteUserRequest, ...grpc.CallOption) (*proto.DeleteUserResponse, error) { + return f.deleteUserResp, f.deleteUserErr +} + +func (f fakeClient) Type(context.Context, *proto.Empty, ...grpc.CallOption) (*proto.TypeResponse, error) { + return f.typeResp, f.typeErr +} + +func (f fakeClient) Close(context.Context, *proto.Empty, ...grpc.CallOption) (*proto.Empty, error) { + return &proto.Empty{}, f.typeErr +} diff --git a/sdk/database/dbplugin/v5/grpc_database_plugin.go b/sdk/database/dbplugin/v5/grpc_database_plugin.go new file mode 100644 index 0000000..b428d4c --- /dev/null +++ b/sdk/database/dbplugin/v5/grpc_database_plugin.go @@ -0,0 +1,72 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dbplugin + +import ( + "context" + + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/sdk/database/dbplugin/v5/proto" + "github.com/hashicorp/vault/sdk/helper/pluginutil" + "github.com/hashicorp/vault/sdk/logical" + "google.golang.org/grpc" +) + +// handshakeConfigs are used to just do a basic handshake between +// a plugin and host. If the handshake fails, a user friendly error is shown. +// This prevents users from executing bad plugins or executing a plugin +// directory. It is a UX feature, not a security feature. +var HandshakeConfig = plugin.HandshakeConfig{ + MagicCookieKey: "VAULT_DATABASE_PLUGIN", + MagicCookieValue: "926a0820-aea2-be28-51d6-83cdf00e8edb", +} + +// Factory is the factory function to create a dbplugin Database. +type Factory func() (interface{}, error) + +type GRPCDatabasePlugin struct { + FactoryFunc Factory + Impl Database + + // Embeding this will disable the netRPC protocol + plugin.NetRPCUnsupportedPlugin +} + +var ( + _ plugin.Plugin = &GRPCDatabasePlugin{} + _ plugin.GRPCPlugin = &GRPCDatabasePlugin{} +) + +func (d GRPCDatabasePlugin) GRPCServer(_ *plugin.GRPCBroker, s *grpc.Server) error { + var server gRPCServer + + if d.Impl != nil { + server = gRPCServer{singleImpl: d.Impl} + } else { + // multiplexing is supported + server = gRPCServer{ + factoryFunc: d.FactoryFunc, + instances: make(map[string]Database), + } + + // Multiplexing is enabled for this plugin, register the server so we + // can tell the client in Vault. + pluginutil.RegisterPluginMultiplexingServer(s, pluginutil.PluginMultiplexingServerImpl{ + Supported: true, + }) + } + + proto.RegisterDatabaseServer(s, &server) + logical.RegisterPluginVersionServer(s, &server) + return nil +} + +func (GRPCDatabasePlugin) GRPCClient(doneCtx context.Context, _ *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { + client := gRPCClient{ + client: proto.NewDatabaseClient(c), + versionClient: logical.NewPluginVersionClient(c), + doneCtx: doneCtx, + } + return client, nil +} diff --git a/sdk/database/dbplugin/v5/grpc_server.go b/sdk/database/dbplugin/v5/grpc_server.go new file mode 100644 index 0000000..7e1bc3f --- /dev/null +++ b/sdk/database/dbplugin/v5/grpc_server.go @@ -0,0 +1,361 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dbplugin + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/golang/protobuf/ptypes" + "github.com/hashicorp/vault/sdk/database/dbplugin/v5/proto" + "github.com/hashicorp/vault/sdk/helper/base62" + "github.com/hashicorp/vault/sdk/helper/pluginutil" + "github.com/hashicorp/vault/sdk/logical" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var _ proto.DatabaseServer = &gRPCServer{} + +type gRPCServer struct { + proto.UnimplementedDatabaseServer + logical.UnimplementedPluginVersionServer + + // holds the non-multiplexed Database + // when this is set the plugin does not support multiplexing + singleImpl Database + + // instances holds the multiplexed Databases + instances map[string]Database + factoryFunc func() (interface{}, error) + + sync.RWMutex +} + +func (g *gRPCServer) getOrCreateDatabase(ctx context.Context) (Database, error) { + g.Lock() + defer g.Unlock() + + if g.singleImpl != nil { + return g.singleImpl, nil + } + + id, err := pluginutil.GetMultiplexIDFromContext(ctx) + if err != nil { + return nil, err + } + if db, ok := g.instances[id]; ok { + return db, nil + } + return g.createDatabase(id) +} + +// must hold the g.Lock() to call this function +func (g *gRPCServer) createDatabase(id string) (Database, error) { + db, err := g.factoryFunc() + if err != nil { + return nil, err + } + + database := db.(Database) + g.instances[id] = database + + return database, nil +} + +// getDatabaseInternal returns the database but does not hold a lock +func (g *gRPCServer) getDatabaseInternal(ctx context.Context) (Database, error) { + if g.singleImpl != nil { + return g.singleImpl, nil + } + + id, err := pluginutil.GetMultiplexIDFromContext(ctx) + if err != nil { + return nil, err + } + + if db, ok := g.instances[id]; ok { + return db, nil + } + + return nil, fmt.Errorf("no database instance found") +} + +// getDatabase holds a read lock and returns the database +func (g *gRPCServer) getDatabase(ctx context.Context) (Database, error) { + g.RLock() + impl, err := g.getDatabaseInternal(ctx) + g.RUnlock() + return impl, err +} + +// Initialize the database plugin +func (g *gRPCServer) Initialize(ctx context.Context, request *proto.InitializeRequest) (*proto.InitializeResponse, error) { + impl, err := g.getOrCreateDatabase(ctx) + if err != nil { + return nil, err + } + + rawConfig := structToMap(request.ConfigData) + + dbReq := InitializeRequest{ + Config: rawConfig, + VerifyConnection: request.VerifyConnection, + } + + dbResp, err := impl.Initialize(ctx, dbReq) + if err != nil { + return &proto.InitializeResponse{}, status.Errorf(codes.Internal, "failed to initialize: %s", err) + } + + newConfig, err := mapToStruct(dbResp.Config) + if err != nil { + return &proto.InitializeResponse{}, status.Errorf(codes.Internal, "failed to marshal new config to JSON: %s", err) + } + + resp := &proto.InitializeResponse{ + ConfigData: newConfig, + } + + return resp, nil +} + +func (g *gRPCServer) NewUser(ctx context.Context, req *proto.NewUserRequest) (*proto.NewUserResponse, error) { + if req.GetUsernameConfig() == nil { + return &proto.NewUserResponse{}, status.Errorf(codes.InvalidArgument, "missing username config") + } + + var expiration time.Time + + if req.GetExpiration() != nil { + exp, err := ptypes.Timestamp(req.GetExpiration()) + if err != nil { + return &proto.NewUserResponse{}, status.Errorf(codes.InvalidArgument, "unable to parse expiration date: %s", err) + } + expiration = exp + } + + impl, err := g.getDatabase(ctx) + if err != nil { + return nil, err + } + + dbReq := NewUserRequest{ + UsernameConfig: UsernameMetadata{ + DisplayName: req.GetUsernameConfig().GetDisplayName(), + RoleName: req.GetUsernameConfig().GetRoleName(), + }, + CredentialType: CredentialType(req.GetCredentialType()), + Password: req.GetPassword(), + PublicKey: req.GetPublicKey(), + Subject: req.GetSubject(), + Expiration: expiration, + Statements: getStatementsFromProto(req.GetStatements()), + RollbackStatements: getStatementsFromProto(req.GetRollbackStatements()), + } + + dbResp, err := impl.NewUser(ctx, dbReq) + if err != nil { + return &proto.NewUserResponse{}, status.Errorf(codes.Internal, "unable to create new user: %s", err) + } + + resp := &proto.NewUserResponse{ + Username: dbResp.Username, + } + return resp, nil +} + +func (g *gRPCServer) UpdateUser(ctx context.Context, req *proto.UpdateUserRequest) (*proto.UpdateUserResponse, error) { + if req.GetUsername() == "" { + return &proto.UpdateUserResponse{}, status.Errorf(codes.InvalidArgument, "no username provided") + } + + dbReq, err := getUpdateUserRequest(req) + if err != nil { + return &proto.UpdateUserResponse{}, status.Errorf(codes.InvalidArgument, err.Error()) + } + + impl, err := g.getDatabase(ctx) + if err != nil { + return nil, err + } + + _, err = impl.UpdateUser(ctx, dbReq) + if err != nil { + return &proto.UpdateUserResponse{}, status.Errorf(codes.Internal, "unable to update user: %s", err) + } + return &proto.UpdateUserResponse{}, nil +} + +func getUpdateUserRequest(req *proto.UpdateUserRequest) (UpdateUserRequest, error) { + var password *ChangePassword + if req.GetPassword() != nil && req.GetPassword().GetNewPassword() != "" { + password = &ChangePassword{ + NewPassword: req.GetPassword().GetNewPassword(), + Statements: getStatementsFromProto(req.GetPassword().GetStatements()), + } + } + + var publicKey *ChangePublicKey + if req.GetPublicKey() != nil && len(req.GetPublicKey().GetNewPublicKey()) > 0 { + publicKey = &ChangePublicKey{ + NewPublicKey: req.GetPublicKey().GetNewPublicKey(), + Statements: getStatementsFromProto(req.GetPublicKey().GetStatements()), + } + } + + var expiration *ChangeExpiration + if req.GetExpiration() != nil && req.GetExpiration().GetNewExpiration() != nil { + newExpiration, err := ptypes.Timestamp(req.GetExpiration().GetNewExpiration()) + if err != nil { + return UpdateUserRequest{}, fmt.Errorf("unable to parse new expiration: %w", err) + } + + expiration = &ChangeExpiration{ + NewExpiration: newExpiration, + Statements: getStatementsFromProto(req.GetExpiration().GetStatements()), + } + } + + dbReq := UpdateUserRequest{ + Username: req.GetUsername(), + CredentialType: CredentialType(req.GetCredentialType()), + Password: password, + PublicKey: publicKey, + Expiration: expiration, + } + + if !hasChange(dbReq) { + return UpdateUserRequest{}, fmt.Errorf("update user request has no changes") + } + + return dbReq, nil +} + +func hasChange(dbReq UpdateUserRequest) bool { + if dbReq.Password != nil && dbReq.Password.NewPassword != "" { + return true + } + if dbReq.PublicKey != nil && len(dbReq.PublicKey.NewPublicKey) > 0 { + return true + } + if dbReq.Expiration != nil && !dbReq.Expiration.NewExpiration.IsZero() { + return true + } + return false +} + +func (g *gRPCServer) DeleteUser(ctx context.Context, req *proto.DeleteUserRequest) (*proto.DeleteUserResponse, error) { + if req.GetUsername() == "" { + return &proto.DeleteUserResponse{}, status.Errorf(codes.InvalidArgument, "no username provided") + } + dbReq := DeleteUserRequest{ + Username: req.GetUsername(), + Statements: getStatementsFromProto(req.GetStatements()), + } + + impl, err := g.getDatabase(ctx) + if err != nil { + return nil, err + } + + _, err = impl.DeleteUser(ctx, dbReq) + if err != nil { + return &proto.DeleteUserResponse{}, status.Errorf(codes.Internal, "unable to delete user: %s", err) + } + return &proto.DeleteUserResponse{}, nil +} + +func (g *gRPCServer) Type(ctx context.Context, _ *proto.Empty) (*proto.TypeResponse, error) { + impl, err := g.getOrCreateDatabase(ctx) + if err != nil { + return nil, err + } + + t, err := impl.Type() + if err != nil { + return &proto.TypeResponse{}, status.Errorf(codes.Internal, "unable to retrieve type: %s", err) + } + + resp := &proto.TypeResponse{ + Type: t, + } + return resp, nil +} + +func (g *gRPCServer) Close(ctx context.Context, _ *proto.Empty) (*proto.Empty, error) { + g.Lock() + defer g.Unlock() + + impl, err := g.getDatabaseInternal(ctx) + if err != nil { + return nil, err + } + + err = impl.Close() + if err != nil { + return &proto.Empty{}, status.Errorf(codes.Internal, "unable to close database plugin: %s", err) + } + + if g.singleImpl == nil { + // only cleanup instances map when multiplexing is supported + id, err := pluginutil.GetMultiplexIDFromContext(ctx) + if err != nil { + return nil, err + } + delete(g.instances, id) + } + + return &proto.Empty{}, nil +} + +// getOrForceCreateDatabase will create a database even if the multiplexing ID is not present +func (g *gRPCServer) getOrForceCreateDatabase(ctx context.Context) (Database, error) { + impl, err := g.getOrCreateDatabase(ctx) + if errors.Is(err, pluginutil.ErrNoMultiplexingIDFound) { + // if this is called without a multiplexing context, like from the plugin catalog directly, + // then we won't have a database ID, so let's generate a new database instance + id, err := base62.Random(10) + if err != nil { + return nil, err + } + + g.Lock() + defer g.Unlock() + impl, err = g.createDatabase(id) + if err != nil { + return nil, err + } + } else if err != nil { + return nil, err + } + return impl, nil +} + +// Version forwards the version request to the underlying Database implementation. +func (g *gRPCServer) Version(ctx context.Context, _ *logical.Empty) (*logical.VersionReply, error) { + impl, err := g.getOrForceCreateDatabase(ctx) + if err != nil { + return nil, err + } + + if versioner, ok := impl.(logical.PluginVersioner); ok { + return &logical.VersionReply{PluginVersion: versioner.PluginVersion().Version}, nil + } + return &logical.VersionReply{}, nil +} + +func getStatementsFromProto(protoStmts *proto.Statements) (statements Statements) { + if protoStmts == nil { + return statements + } + cmds := protoStmts.GetCommands() + statements = Statements{ + Commands: cmds, + } + return statements +} diff --git a/sdk/database/dbplugin/v5/grpc_server_test.go b/sdk/database/dbplugin/v5/grpc_server_test.go new file mode 100644 index 0000000..53d44c7 --- /dev/null +++ b/sdk/database/dbplugin/v5/grpc_server_test.go @@ -0,0 +1,839 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dbplugin + +import ( + "context" + "errors" + "fmt" + "reflect" + "testing" + "time" + + "github.com/hashicorp/vault/sdk/logical" + "google.golang.org/protobuf/types/known/structpb" + + "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/ptypes/timestamp" + "github.com/hashicorp/vault/sdk/database/dbplugin/v5/proto" + "github.com/hashicorp/vault/sdk/helper/pluginutil" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Before minValidSeconds in ptypes package +var invalidExpiration = time.Date(0, 1, 1, 0, 0, 0, 0, time.UTC) + +func TestGRPCServer_Initialize(t *testing.T) { + type testCase struct { + db Database + req *proto.InitializeRequest + expectedResp *proto.InitializeResponse + expectErr bool + expectCode codes.Code + grpcSetupFunc func(*testing.T, Database) (context.Context, gRPCServer) + } + + tests := map[string]testCase{ + "database errored": { + db: fakeDatabase{ + initErr: errors.New("initialization error"), + }, + req: &proto.InitializeRequest{}, + expectedResp: &proto.InitializeResponse{}, + expectErr: true, + expectCode: codes.Internal, + grpcSetupFunc: testGrpcServer, + }, + "newConfig can't marshal to JSON": { + db: fakeDatabase{ + initResp: InitializeResponse{ + Config: map[string]interface{}{ + "bad-data": badJSONValue{}, + }, + }, + }, + req: &proto.InitializeRequest{}, + expectedResp: &proto.InitializeResponse{}, + expectErr: true, + expectCode: codes.Internal, + grpcSetupFunc: testGrpcServer, + }, + "happy path with config data for multiplexed plugin": { + db: fakeDatabase{ + initResp: InitializeResponse{ + Config: map[string]interface{}{ + "foo": "bar", + }, + }, + }, + req: &proto.InitializeRequest{ + ConfigData: marshal(t, map[string]interface{}{ + "foo": "bar", + }), + }, + expectedResp: &proto.InitializeResponse{ + ConfigData: marshal(t, map[string]interface{}{ + "foo": "bar", + }), + }, + expectErr: false, + expectCode: codes.OK, + grpcSetupFunc: testGrpcServer, + }, + "happy path with config data for non-multiplexed plugin": { + db: fakeDatabase{ + initResp: InitializeResponse{ + Config: map[string]interface{}{ + "foo": "bar", + }, + }, + }, + req: &proto.InitializeRequest{ + ConfigData: marshal(t, map[string]interface{}{ + "foo": "bar", + }), + }, + expectedResp: &proto.InitializeResponse{ + ConfigData: marshal(t, map[string]interface{}{ + "foo": "bar", + }), + }, + expectErr: false, + expectCode: codes.OK, + grpcSetupFunc: testGrpcServerSingleImpl, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + idCtx, g := test.grpcSetupFunc(t, test.db) + resp, err := g.Initialize(idCtx, test.req) + + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + + actualCode := status.Code(err) + if actualCode != test.expectCode { + t.Fatalf("Actual code: %s Expected code: %s", actualCode, test.expectCode) + } + + if !reflect.DeepEqual(resp, test.expectedResp) { + t.Fatalf("Actual response: %#v\nExpected response: %#v", resp, test.expectedResp) + } + }) + } +} + +func TestCoerceFloatsToInt(t *testing.T) { + type testCase struct { + input map[string]interface{} + expected map[string]interface{} + } + + tests := map[string]testCase{ + "no numbers": { + input: map[string]interface{}{ + "foo": "bar", + }, + expected: map[string]interface{}{ + "foo": "bar", + }, + }, + "raw integers": { + input: map[string]interface{}{ + "foo": 42, + }, + expected: map[string]interface{}{ + "foo": 42, + }, + }, + "floats ": { + input: map[string]interface{}{ + "foo": 42.2, + }, + expected: map[string]interface{}{ + "foo": 42.2, + }, + }, + "floats coerced to ints": { + input: map[string]interface{}{ + "foo": float64(42), + }, + expected: map[string]interface{}{ + "foo": int64(42), + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + actual := copyMap(test.input) + coerceFloatsToInt(actual) + if !reflect.DeepEqual(actual, test.expected) { + t.Fatalf("Actual: %#v\nExpected: %#v", actual, test.expected) + } + }) + } +} + +func copyMap(m map[string]interface{}) map[string]interface{} { + newMap := map[string]interface{}{} + for k, v := range m { + newMap[k] = v + } + return newMap +} + +func TestGRPCServer_NewUser(t *testing.T) { + type testCase struct { + db Database + req *proto.NewUserRequest + expectedResp *proto.NewUserResponse + expectErr bool + expectCode codes.Code + } + + tests := map[string]testCase{ + "missing username config": { + db: fakeDatabase{}, + req: &proto.NewUserRequest{}, + expectedResp: &proto.NewUserResponse{}, + expectErr: true, + expectCode: codes.InvalidArgument, + }, + "bad expiration": { + db: fakeDatabase{}, + req: &proto.NewUserRequest{ + UsernameConfig: &proto.UsernameConfig{ + DisplayName: "dispname", + RoleName: "rolename", + }, + Expiration: ×tamp.Timestamp{ + Seconds: invalidExpiration.Unix(), + }, + }, + expectedResp: &proto.NewUserResponse{}, + expectErr: true, + expectCode: codes.InvalidArgument, + }, + "database error": { + db: fakeDatabase{ + newUserErr: errors.New("new user error"), + }, + req: &proto.NewUserRequest{ + UsernameConfig: &proto.UsernameConfig{ + DisplayName: "dispname", + RoleName: "rolename", + }, + Expiration: ptypes.TimestampNow(), + }, + expectedResp: &proto.NewUserResponse{}, + expectErr: true, + expectCode: codes.Internal, + }, + "happy path with expiration": { + db: fakeDatabase{ + newUserResp: NewUserResponse{ + Username: "someuser_foo", + }, + }, + req: &proto.NewUserRequest{ + UsernameConfig: &proto.UsernameConfig{ + DisplayName: "dispname", + RoleName: "rolename", + }, + Expiration: ptypes.TimestampNow(), + }, + expectedResp: &proto.NewUserResponse{ + Username: "someuser_foo", + }, + expectErr: false, + expectCode: codes.OK, + }, + "happy path without expiration": { + db: fakeDatabase{ + newUserResp: NewUserResponse{ + Username: "someuser_foo", + }, + }, + req: &proto.NewUserRequest{ + UsernameConfig: &proto.UsernameConfig{ + DisplayName: "dispname", + RoleName: "rolename", + }, + }, + expectedResp: &proto.NewUserResponse{ + Username: "someuser_foo", + }, + expectErr: false, + expectCode: codes.OK, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + idCtx, g := testGrpcServer(t, test.db) + resp, err := g.NewUser(idCtx, test.req) + + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + + actualCode := status.Code(err) + if actualCode != test.expectCode { + t.Fatalf("Actual code: %s Expected code: %s", actualCode, test.expectCode) + } + + if !reflect.DeepEqual(resp, test.expectedResp) { + t.Fatalf("Actual response: %#v\nExpected response: %#v", resp, test.expectedResp) + } + }) + } +} + +func TestGRPCServer_UpdateUser(t *testing.T) { + type testCase struct { + db Database + req *proto.UpdateUserRequest + expectedResp *proto.UpdateUserResponse + expectErr bool + expectCode codes.Code + } + + tests := map[string]testCase{ + "missing username": { + db: fakeDatabase{}, + req: &proto.UpdateUserRequest{}, + expectedResp: &proto.UpdateUserResponse{}, + expectErr: true, + expectCode: codes.InvalidArgument, + }, + "missing changes": { + db: fakeDatabase{}, + req: &proto.UpdateUserRequest{ + Username: "someuser", + }, + expectedResp: &proto.UpdateUserResponse{}, + expectErr: true, + expectCode: codes.InvalidArgument, + }, + "database error": { + db: fakeDatabase{ + updateUserErr: errors.New("update user error"), + }, + req: &proto.UpdateUserRequest{ + Username: "someuser", + Password: &proto.ChangePassword{ + NewPassword: "90ughaino", + }, + }, + expectedResp: &proto.UpdateUserResponse{}, + expectErr: true, + expectCode: codes.Internal, + }, + "bad expiration date": { + db: fakeDatabase{}, + req: &proto.UpdateUserRequest{ + Username: "someuser", + Expiration: &proto.ChangeExpiration{ + NewExpiration: ×tamp.Timestamp{ + // Before minValidSeconds in ptypes package + Seconds: invalidExpiration.Unix(), + }, + }, + }, + expectedResp: &proto.UpdateUserResponse{}, + expectErr: true, + expectCode: codes.InvalidArgument, + }, + "change password happy path": { + db: fakeDatabase{}, + req: &proto.UpdateUserRequest{ + Username: "someuser", + Password: &proto.ChangePassword{ + NewPassword: "90ughaino", + }, + }, + expectedResp: &proto.UpdateUserResponse{}, + expectErr: false, + expectCode: codes.OK, + }, + "change expiration happy path": { + db: fakeDatabase{}, + req: &proto.UpdateUserRequest{ + Username: "someuser", + Expiration: &proto.ChangeExpiration{ + NewExpiration: ptypes.TimestampNow(), + }, + }, + expectedResp: &proto.UpdateUserResponse{}, + expectErr: false, + expectCode: codes.OK, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + idCtx, g := testGrpcServer(t, test.db) + resp, err := g.UpdateUser(idCtx, test.req) + + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + + actualCode := status.Code(err) + if actualCode != test.expectCode { + t.Fatalf("Actual code: %s Expected code: %s", actualCode, test.expectCode) + } + + if !reflect.DeepEqual(resp, test.expectedResp) { + t.Fatalf("Actual response: %#v\nExpected response: %#v", resp, test.expectedResp) + } + }) + } +} + +func TestGRPCServer_DeleteUser(t *testing.T) { + type testCase struct { + db Database + req *proto.DeleteUserRequest + expectedResp *proto.DeleteUserResponse + expectErr bool + expectCode codes.Code + } + + tests := map[string]testCase{ + "missing username": { + db: fakeDatabase{}, + req: &proto.DeleteUserRequest{}, + expectedResp: &proto.DeleteUserResponse{}, + expectErr: true, + expectCode: codes.InvalidArgument, + }, + "database error": { + db: fakeDatabase{ + deleteUserErr: errors.New("delete user error"), + }, + req: &proto.DeleteUserRequest{ + Username: "someuser", + }, + expectedResp: &proto.DeleteUserResponse{}, + expectErr: true, + expectCode: codes.Internal, + }, + "happy path": { + db: fakeDatabase{}, + req: &proto.DeleteUserRequest{ + Username: "someuser", + }, + expectedResp: &proto.DeleteUserResponse{}, + expectErr: false, + expectCode: codes.OK, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + idCtx, g := testGrpcServer(t, test.db) + resp, err := g.DeleteUser(idCtx, test.req) + + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + + actualCode := status.Code(err) + if actualCode != test.expectCode { + t.Fatalf("Actual code: %s Expected code: %s", actualCode, test.expectCode) + } + + if !reflect.DeepEqual(resp, test.expectedResp) { + t.Fatalf("Actual response: %#v\nExpected response: %#v", resp, test.expectedResp) + } + }) + } +} + +func TestGRPCServer_Type(t *testing.T) { + type testCase struct { + db Database + expectedResp *proto.TypeResponse + expectErr bool + expectCode codes.Code + } + + tests := map[string]testCase{ + "database error": { + db: fakeDatabase{ + typeErr: errors.New("type error"), + }, + expectedResp: &proto.TypeResponse{}, + expectErr: true, + expectCode: codes.Internal, + }, + "happy path": { + db: fakeDatabase{ + typeResp: "fake database", + }, + expectedResp: &proto.TypeResponse{ + Type: "fake database", + }, + expectErr: false, + expectCode: codes.OK, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + idCtx, g := testGrpcServer(t, test.db) + resp, err := g.Type(idCtx, &proto.Empty{}) + + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + + actualCode := status.Code(err) + if actualCode != test.expectCode { + t.Fatalf("Actual code: %s Expected code: %s", actualCode, test.expectCode) + } + + if !reflect.DeepEqual(resp, test.expectedResp) { + t.Fatalf("Actual response: %#v\nExpected response: %#v", resp, test.expectedResp) + } + }) + } +} + +func TestGRPCServer_Close(t *testing.T) { + type testCase struct { + db Database + expectErr bool + expectCode codes.Code + grpcSetupFunc func(*testing.T, Database) (context.Context, gRPCServer) + assertFunc func(t *testing.T, g gRPCServer) + } + + tests := map[string]testCase{ + "database error": { + db: fakeDatabase{ + closeErr: errors.New("close error"), + }, + expectErr: true, + expectCode: codes.Internal, + grpcSetupFunc: testGrpcServer, + assertFunc: nil, + }, + "happy path for multiplexed plugin": { + db: fakeDatabase{}, + expectErr: false, + expectCode: codes.OK, + grpcSetupFunc: testGrpcServer, + assertFunc: func(t *testing.T, g gRPCServer) { + if len(g.instances) != 0 { + t.Fatalf("err expected instances map to be empty") + } + }, + }, + "happy path for non-multiplexed plugin": { + db: fakeDatabase{}, + expectErr: false, + expectCode: codes.OK, + grpcSetupFunc: testGrpcServerSingleImpl, + assertFunc: nil, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + idCtx, g := test.grpcSetupFunc(t, test.db) + _, err := g.Close(idCtx, &proto.Empty{}) + + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + + actualCode := status.Code(err) + if actualCode != test.expectCode { + t.Fatalf("Actual code: %s Expected code: %s", actualCode, test.expectCode) + } + + if test.assertFunc != nil { + test.assertFunc(t, g) + } + }) + } +} + +func TestGRPCServer_Version(t *testing.T) { + type testCase struct { + db Database + expectedResp string + expectErr bool + expectCode codes.Code + } + + tests := map[string]testCase{ + "backend that does not implement version": { + db: fakeDatabase{}, + expectedResp: "", + expectErr: false, + expectCode: codes.OK, + }, + "backend with version": { + db: fakeDatabaseWithVersion{ + version: "v123", + }, + expectedResp: "v123", + expectErr: false, + expectCode: codes.OK, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + idCtx, g := testGrpcServer(t, test.db) + resp, err := g.Version(idCtx, &logical.Empty{}) + + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + + actualCode := status.Code(err) + if actualCode != test.expectCode { + t.Fatalf("Actual code: %s Expected code: %s", actualCode, test.expectCode) + } + + if !reflect.DeepEqual(resp.PluginVersion, test.expectedResp) { + t.Fatalf("Actual response: %#v\nExpected response: %#v", resp, test.expectedResp) + } + }) + } +} + +// testGrpcServer is a test helper that returns a context with an ID set in its +// metadata and a gRPCServer instance for a multiplexed plugin +func testGrpcServer(t *testing.T, db Database) (context.Context, gRPCServer) { + t.Helper() + g := gRPCServer{ + factoryFunc: func() (interface{}, error) { + return db, nil + }, + instances: make(map[string]Database), + } + + id := "12345" + idCtx := idCtx(t, id) + g.instances[id] = db + + return idCtx, g +} + +// testGrpcServerSingleImpl is a test helper that returns a context and a +// gRPCServer instance for a non-multiplexed plugin +func testGrpcServerSingleImpl(t *testing.T, db Database) (context.Context, gRPCServer) { + t.Helper() + return context.Background(), gRPCServer{ + singleImpl: db, + } +} + +// idCtx is a test helper that will return a context with the IDs set in its +// metadata +func idCtx(t *testing.T, ids ...string) context.Context { + t.Helper() + // Context doesn't need to timeout since this is just passed through + ctx := context.Background() + md := metadata.MD{} + for _, id := range ids { + md.Append(pluginutil.MultiplexingCtxKey, id) + } + return metadata.NewIncomingContext(ctx, md) +} + +func marshal(t *testing.T, m map[string]interface{}) *structpb.Struct { + t.Helper() + + strct, err := mapToStruct(m) + if err != nil { + t.Fatalf("unable to marshal to protobuf: %s", err) + } + return strct +} + +type badJSONValue struct{} + +func (badJSONValue) MarshalJSON() ([]byte, error) { + return nil, fmt.Errorf("this cannot be marshalled to JSON") +} + +func (badJSONValue) UnmarshalJSON([]byte) error { + return fmt.Errorf("this cannot be unmarshalled from JSON") +} + +var _ Database = fakeDatabase{} + +type fakeDatabase struct { + initResp InitializeResponse + initErr error + + newUserResp NewUserResponse + newUserErr error + + updateUserResp UpdateUserResponse + updateUserErr error + + deleteUserResp DeleteUserResponse + deleteUserErr error + + typeResp string + typeErr error + + closeErr error +} + +func (e fakeDatabase) Initialize(ctx context.Context, req InitializeRequest) (InitializeResponse, error) { + return e.initResp, e.initErr +} + +func (e fakeDatabase) NewUser(ctx context.Context, req NewUserRequest) (NewUserResponse, error) { + return e.newUserResp, e.newUserErr +} + +func (e fakeDatabase) UpdateUser(ctx context.Context, req UpdateUserRequest) (UpdateUserResponse, error) { + return e.updateUserResp, e.updateUserErr +} + +func (e fakeDatabase) DeleteUser(ctx context.Context, req DeleteUserRequest) (DeleteUserResponse, error) { + return e.deleteUserResp, e.deleteUserErr +} + +func (e fakeDatabase) Type() (string, error) { + return e.typeResp, e.typeErr +} + +func (e fakeDatabase) Close() error { + return e.closeErr +} + +var _ Database = &recordingDatabase{} + +type recordingDatabase struct { + initializeCalls int + newUserCalls int + updateUserCalls int + deleteUserCalls int + typeCalls int + closeCalls int + + // recordingDatabase can act as middleware so we can record the calls to other test Database implementations + next Database +} + +func (f *recordingDatabase) Initialize(ctx context.Context, req InitializeRequest) (InitializeResponse, error) { + f.initializeCalls++ + if f.next == nil { + return InitializeResponse{}, nil + } + return f.next.Initialize(ctx, req) +} + +func (f *recordingDatabase) NewUser(ctx context.Context, req NewUserRequest) (NewUserResponse, error) { + f.newUserCalls++ + if f.next == nil { + return NewUserResponse{}, nil + } + return f.next.NewUser(ctx, req) +} + +func (f *recordingDatabase) UpdateUser(ctx context.Context, req UpdateUserRequest) (UpdateUserResponse, error) { + f.updateUserCalls++ + if f.next == nil { + return UpdateUserResponse{}, nil + } + return f.next.UpdateUser(ctx, req) +} + +func (f *recordingDatabase) DeleteUser(ctx context.Context, req DeleteUserRequest) (DeleteUserResponse, error) { + f.deleteUserCalls++ + if f.next == nil { + return DeleteUserResponse{}, nil + } + return f.next.DeleteUser(ctx, req) +} + +func (f *recordingDatabase) Type() (string, error) { + f.typeCalls++ + if f.next == nil { + return "recordingDatabase", nil + } + return f.next.Type() +} + +func (f *recordingDatabase) Close() error { + f.closeCalls++ + if f.next == nil { + return nil + } + return f.next.Close() +} + +type fakeDatabaseWithVersion struct { + version string +} + +func (e fakeDatabaseWithVersion) PluginVersion() logical.PluginVersion { + return logical.PluginVersion{Version: e.version} +} + +func (e fakeDatabaseWithVersion) Initialize(_ context.Context, _ InitializeRequest) (InitializeResponse, error) { + return InitializeResponse{}, nil +} + +func (e fakeDatabaseWithVersion) NewUser(_ context.Context, _ NewUserRequest) (NewUserResponse, error) { + return NewUserResponse{}, nil +} + +func (e fakeDatabaseWithVersion) UpdateUser(_ context.Context, _ UpdateUserRequest) (UpdateUserResponse, error) { + return UpdateUserResponse{}, nil +} + +func (e fakeDatabaseWithVersion) DeleteUser(_ context.Context, _ DeleteUserRequest) (DeleteUserResponse, error) { + return DeleteUserResponse{}, nil +} + +func (e fakeDatabaseWithVersion) Type() (string, error) { + return "", nil +} + +func (e fakeDatabaseWithVersion) Close() error { + return nil +} + +var ( + _ Database = (*fakeDatabaseWithVersion)(nil) + _ logical.PluginVersioner = (*fakeDatabaseWithVersion)(nil) +) diff --git a/sdk/database/dbplugin/v5/marshalling.go b/sdk/database/dbplugin/v5/marshalling.go new file mode 100644 index 0000000..2b3e8cb --- /dev/null +++ b/sdk/database/dbplugin/v5/marshalling.go @@ -0,0 +1,53 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dbplugin + +import ( + "encoding/json" + "math" + + "google.golang.org/protobuf/types/known/structpb" +) + +func mapToStruct(m map[string]interface{}) (*structpb.Struct, error) { + // Convert any json.Number typed values to float64, since the + // type does not have a conversion mapping defined in structpb + for k, v := range m { + if n, ok := v.(json.Number); ok { + nf, err := n.Float64() + if err != nil { + return nil, err + } + + m[k] = nf + } + } + + return structpb.NewStruct(m) +} + +func structToMap(strct *structpb.Struct) map[string]interface{} { + m := strct.AsMap() + coerceFloatsToInt(m) + return m +} + +// coerceFloatsToInt if the floats can be coerced to an integer without losing data +func coerceFloatsToInt(m map[string]interface{}) { + for k, v := range m { + fVal, ok := v.(float64) + if !ok { + continue + } + if isInt(fVal) { + m[k] = int64(fVal) + } + } +} + +// isInt attempts to determine if the given floating point number could be represented as an integer without losing data +// This does not work for very large floats, however in this usage that's okay since we don't expect numbers that large. +func isInt(f float64) bool { + return math.Floor(f) == f +} diff --git a/sdk/database/dbplugin/v5/middleware.go b/sdk/database/dbplugin/v5/middleware.go new file mode 100644 index 0000000..2091e67 --- /dev/null +++ b/sdk/database/dbplugin/v5/middleware.go @@ -0,0 +1,324 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dbplugin + +import ( + "context" + "errors" + "net/url" + "strings" + "time" + + "github.com/armon/go-metrics" + "github.com/hashicorp/errwrap" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/logical" + "google.golang.org/grpc/status" +) + +// /////////////////////////////////////////////////// +// Tracing Middleware +// /////////////////////////////////////////////////// + +var ( + _ Database = databaseTracingMiddleware{} + _ logical.PluginVersioner = databaseTracingMiddleware{} +) + +// databaseTracingMiddleware wraps a implementation of Database and executes +// trace logging on function call. +type databaseTracingMiddleware struct { + next Database + logger log.Logger +} + +func (mw databaseTracingMiddleware) PluginVersion() (resp logical.PluginVersion) { + defer func(then time.Time) { + mw.logger.Trace("version", + "status", "finished", + "version", resp, + "took", time.Since(then)) + }(time.Now()) + + mw.logger.Trace("version", "status", "started") + if versioner, ok := mw.next.(logical.PluginVersioner); ok { + return versioner.PluginVersion() + } + return logical.EmptyPluginVersion +} + +func (mw databaseTracingMiddleware) Initialize(ctx context.Context, req InitializeRequest) (resp InitializeResponse, err error) { + defer func(then time.Time) { + mw.logger.Trace("initialize", + "status", "finished", + "verify", req.VerifyConnection, + "err", err, + "took", time.Since(then)) + }(time.Now()) + + mw.logger.Trace("initialize", "status", "started") + return mw.next.Initialize(ctx, req) +} + +func (mw databaseTracingMiddleware) NewUser(ctx context.Context, req NewUserRequest) (resp NewUserResponse, err error) { + defer func(then time.Time) { + mw.logger.Trace("create user", + "status", "finished", + "err", err, + "took", time.Since(then)) + }(time.Now()) + + mw.logger.Trace("create user", + "status", "started") + return mw.next.NewUser(ctx, req) +} + +func (mw databaseTracingMiddleware) UpdateUser(ctx context.Context, req UpdateUserRequest) (resp UpdateUserResponse, err error) { + defer func(then time.Time) { + mw.logger.Trace("update user", + "status", "finished", + "err", err, + "took", time.Since(then)) + }(time.Now()) + + mw.logger.Trace("update user", "status", "started") + return mw.next.UpdateUser(ctx, req) +} + +func (mw databaseTracingMiddleware) DeleteUser(ctx context.Context, req DeleteUserRequest) (resp DeleteUserResponse, err error) { + defer func(then time.Time) { + mw.logger.Trace("delete user", + "status", "finished", + "err", err, + "took", time.Since(then)) + }(time.Now()) + + mw.logger.Trace("delete user", + "status", "started") + return mw.next.DeleteUser(ctx, req) +} + +func (mw databaseTracingMiddleware) Type() (string, error) { + return mw.next.Type() +} + +func (mw databaseTracingMiddleware) Close() (err error) { + defer func(then time.Time) { + mw.logger.Trace("close", + "status", "finished", + "err", err, + "took", time.Since(then)) + }(time.Now()) + + mw.logger.Trace("close", + "status", "started") + return mw.next.Close() +} + +// /////////////////////////////////////////////////// +// Metrics Middleware Domain +// /////////////////////////////////////////////////// + +var ( + _ Database = databaseMetricsMiddleware{} + _ logical.PluginVersioner = databaseMetricsMiddleware{} +) + +// databaseMetricsMiddleware wraps an implementation of Databases and on +// function call logs metrics about this instance. +type databaseMetricsMiddleware struct { + next Database + + typeStr string +} + +func (mw databaseMetricsMiddleware) PluginVersion() logical.PluginVersion { + defer func(now time.Time) { + metrics.MeasureSince([]string{"database", "PluginVersion"}, now) + metrics.MeasureSince([]string{"database", mw.typeStr, "PluginVersion"}, now) + }(time.Now()) + + metrics.IncrCounter([]string{"database", "PluginVersion"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "PluginVersion"}, 1) + + if versioner, ok := mw.next.(logical.PluginVersioner); ok { + return versioner.PluginVersion() + } + return logical.EmptyPluginVersion +} + +func (mw databaseMetricsMiddleware) Initialize(ctx context.Context, req InitializeRequest) (resp InitializeResponse, err error) { + defer func(now time.Time) { + metrics.MeasureSince([]string{"database", "Initialize"}, now) + metrics.MeasureSince([]string{"database", mw.typeStr, "Initialize"}, now) + + if err != nil { + metrics.IncrCounter([]string{"database", "Initialize", "error"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "Initialize", "error"}, 1) + } + }(time.Now()) + + metrics.IncrCounter([]string{"database", "Initialize"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "Initialize"}, 1) + return mw.next.Initialize(ctx, req) +} + +func (mw databaseMetricsMiddleware) NewUser(ctx context.Context, req NewUserRequest) (resp NewUserResponse, err error) { + defer func(start time.Time) { + metrics.MeasureSince([]string{"database", "NewUser"}, start) + metrics.MeasureSince([]string{"database", mw.typeStr, "NewUser"}, start) + + if err != nil { + metrics.IncrCounter([]string{"database", "NewUser", "error"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "NewUser", "error"}, 1) + } + }(time.Now()) + + metrics.IncrCounter([]string{"database", "NewUser"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "NewUser"}, 1) + return mw.next.NewUser(ctx, req) +} + +func (mw databaseMetricsMiddleware) UpdateUser(ctx context.Context, req UpdateUserRequest) (resp UpdateUserResponse, err error) { + defer func(now time.Time) { + metrics.MeasureSince([]string{"database", "UpdateUser"}, now) + metrics.MeasureSince([]string{"database", mw.typeStr, "UpdateUser"}, now) + + if err != nil { + metrics.IncrCounter([]string{"database", "UpdateUser", "error"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "UpdateUser", "error"}, 1) + } + }(time.Now()) + + metrics.IncrCounter([]string{"database", "UpdateUser"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "UpdateUser"}, 1) + return mw.next.UpdateUser(ctx, req) +} + +func (mw databaseMetricsMiddleware) DeleteUser(ctx context.Context, req DeleteUserRequest) (resp DeleteUserResponse, err error) { + defer func(now time.Time) { + metrics.MeasureSince([]string{"database", "DeleteUser"}, now) + metrics.MeasureSince([]string{"database", mw.typeStr, "DeleteUser"}, now) + + if err != nil { + metrics.IncrCounter([]string{"database", "DeleteUser", "error"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "DeleteUser", "error"}, 1) + } + }(time.Now()) + + metrics.IncrCounter([]string{"database", "DeleteUser"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "DeleteUser"}, 1) + return mw.next.DeleteUser(ctx, req) +} + +func (mw databaseMetricsMiddleware) Type() (string, error) { + return mw.next.Type() +} + +func (mw databaseMetricsMiddleware) Close() (err error) { + defer func(now time.Time) { + metrics.MeasureSince([]string{"database", "Close"}, now) + metrics.MeasureSince([]string{"database", mw.typeStr, "Close"}, now) + + if err != nil { + metrics.IncrCounter([]string{"database", "Close", "error"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "Close", "error"}, 1) + } + }(time.Now()) + + metrics.IncrCounter([]string{"database", "Close"}, 1) + metrics.IncrCounter([]string{"database", mw.typeStr, "Close"}, 1) + return mw.next.Close() +} + +// /////////////////////////////////////////////////// +// Error Sanitizer Middleware Domain +// /////////////////////////////////////////////////// + +var ( + _ Database = (*DatabaseErrorSanitizerMiddleware)(nil) + _ logical.PluginVersioner = (*DatabaseErrorSanitizerMiddleware)(nil) +) + +// DatabaseErrorSanitizerMiddleware wraps an implementation of Databases and +// sanitizes returned error messages +type DatabaseErrorSanitizerMiddleware struct { + next Database + secretsFn secretsFn +} + +type secretsFn func() map[string]string + +func NewDatabaseErrorSanitizerMiddleware(next Database, secrets secretsFn) DatabaseErrorSanitizerMiddleware { + return DatabaseErrorSanitizerMiddleware{ + next: next, + secretsFn: secrets, + } +} + +func (mw DatabaseErrorSanitizerMiddleware) Initialize(ctx context.Context, req InitializeRequest) (resp InitializeResponse, err error) { + resp, err = mw.next.Initialize(ctx, req) + return resp, mw.sanitize(err) +} + +func (mw DatabaseErrorSanitizerMiddleware) NewUser(ctx context.Context, req NewUserRequest) (resp NewUserResponse, err error) { + resp, err = mw.next.NewUser(ctx, req) + return resp, mw.sanitize(err) +} + +func (mw DatabaseErrorSanitizerMiddleware) UpdateUser(ctx context.Context, req UpdateUserRequest) (UpdateUserResponse, error) { + resp, err := mw.next.UpdateUser(ctx, req) + return resp, mw.sanitize(err) +} + +func (mw DatabaseErrorSanitizerMiddleware) DeleteUser(ctx context.Context, req DeleteUserRequest) (DeleteUserResponse, error) { + resp, err := mw.next.DeleteUser(ctx, req) + return resp, mw.sanitize(err) +} + +func (mw DatabaseErrorSanitizerMiddleware) Type() (string, error) { + dbType, err := mw.next.Type() + return dbType, mw.sanitize(err) +} + +func (mw DatabaseErrorSanitizerMiddleware) Close() (err error) { + return mw.sanitize(mw.next.Close()) +} + +func (mw DatabaseErrorSanitizerMiddleware) PluginVersion() logical.PluginVersion { + if versioner, ok := mw.next.(logical.PluginVersioner); ok { + return versioner.PluginVersion() + } + return logical.EmptyPluginVersion +} + +// sanitize errors by removing any sensitive strings within their messages. This uses +// the secretsFn to determine what fields should be sanitized. +func (mw DatabaseErrorSanitizerMiddleware) sanitize(err error) error { + if err == nil { + return nil + } + if errwrap.ContainsType(err, new(url.Error)) { + return errors.New("unable to parse connection url") + } + if mw.secretsFn == nil { + return err + } + for find, replace := range mw.secretsFn() { + if find == "" { + continue + } + + // Attempt to keep the status code attached to the + // error while changing the actual error message + s, ok := status.FromError(err) + if ok { + err = status.Error(s.Code(), strings.ReplaceAll(s.Message(), find, replace)) + continue + } + + err = errors.New(strings.ReplaceAll(err.Error(), find, replace)) + } + return err +} diff --git a/sdk/database/dbplugin/v5/middleware_test.go b/sdk/database/dbplugin/v5/middleware_test.go new file mode 100644 index 0000000..a2a7633 --- /dev/null +++ b/sdk/database/dbplugin/v5/middleware_test.go @@ -0,0 +1,487 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dbplugin + +import ( + "context" + "errors" + "net/url" + "reflect" + "testing" + + "github.com/hashicorp/go-hclog" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestDatabaseErrorSanitizerMiddleware(t *testing.T) { + type testCase struct { + inputErr error + secretsFunc func() map[string]string + + expectedError error + } + + tests := map[string]testCase{ + "nil error": { + inputErr: nil, + expectedError: nil, + }, + "url error": { + inputErr: new(url.Error), + expectedError: errors.New("unable to parse connection url"), + }, + "nil secrets func": { + inputErr: errors.New("here is my password: iofsd9473tg"), + expectedError: errors.New("here is my password: iofsd9473tg"), + }, + "secrets with empty string": { + inputErr: errors.New("here is my password: iofsd9473tg"), + secretsFunc: secretFunc(t, "", ""), + expectedError: errors.New("here is my password: iofsd9473tg"), + }, + "secrets that do not match": { + inputErr: errors.New("here is my password: iofsd9473tg"), + secretsFunc: secretFunc(t, "asdf", ""), + expectedError: errors.New("here is my password: iofsd9473tg"), + }, + "secrets that do match": { + inputErr: errors.New("here is my password: iofsd9473tg"), + secretsFunc: secretFunc(t, "iofsd9473tg", ""), + expectedError: errors.New("here is my password: "), + }, + "multiple secrets": { + inputErr: errors.New("here is my password: iofsd9473tg"), + secretsFunc: secretFunc(t, + "iofsd9473tg", "", + "password", "", + ), + expectedError: errors.New("here is my : "), + }, + "gRPC status error": { + inputErr: status.Error(codes.InvalidArgument, "an error with a password iofsd9473tg"), + secretsFunc: secretFunc(t, "iofsd9473tg", ""), + expectedError: status.Errorf(codes.InvalidArgument, "an error with a password "), + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + db := fakeDatabase{} + mw := NewDatabaseErrorSanitizerMiddleware(db, test.secretsFunc) + + actualErr := mw.sanitize(test.inputErr) + if !reflect.DeepEqual(actualErr, test.expectedError) { + t.Fatalf("Actual error: %s\nExpected error: %s", actualErr, test.expectedError) + } + }) + } + + t.Run("Initialize", func(t *testing.T) { + db := &recordingDatabase{ + next: fakeDatabase{ + initErr: errors.New("password: iofsd9473tg with some stuff after it"), + }, + } + mw := DatabaseErrorSanitizerMiddleware{ + next: db, + secretsFn: secretFunc(t, "iofsd9473tg", ""), + } + + expectedErr := errors.New("password: with some stuff after it") + + _, err := mw.Initialize(context.Background(), InitializeRequest{}) + if !reflect.DeepEqual(err, expectedErr) { + t.Fatalf("Actual err: %s\n Expected err: %s", err, expectedErr) + } + + assertEquals(t, db.initializeCalls, 1) + assertEquals(t, db.newUserCalls, 0) + assertEquals(t, db.updateUserCalls, 0) + assertEquals(t, db.deleteUserCalls, 0) + assertEquals(t, db.typeCalls, 0) + assertEquals(t, db.closeCalls, 0) + }) + + t.Run("NewUser", func(t *testing.T) { + db := &recordingDatabase{ + next: fakeDatabase{ + newUserErr: errors.New("password: iofsd9473tg with some stuff after it"), + }, + } + mw := DatabaseErrorSanitizerMiddleware{ + next: db, + secretsFn: secretFunc(t, "iofsd9473tg", ""), + } + + expectedErr := errors.New("password: with some stuff after it") + + _, err := mw.NewUser(context.Background(), NewUserRequest{}) + if !reflect.DeepEqual(err, expectedErr) { + t.Fatalf("Actual err: %s\n Expected err: %s", err, expectedErr) + } + + assertEquals(t, db.initializeCalls, 0) + assertEquals(t, db.newUserCalls, 1) + assertEquals(t, db.updateUserCalls, 0) + assertEquals(t, db.deleteUserCalls, 0) + assertEquals(t, db.typeCalls, 0) + assertEquals(t, db.closeCalls, 0) + }) + + t.Run("UpdateUser", func(t *testing.T) { + db := &recordingDatabase{ + next: fakeDatabase{ + updateUserErr: errors.New("password: iofsd9473tg with some stuff after it"), + }, + } + mw := DatabaseErrorSanitizerMiddleware{ + next: db, + secretsFn: secretFunc(t, "iofsd9473tg", ""), + } + + expectedErr := errors.New("password: with some stuff after it") + + _, err := mw.UpdateUser(context.Background(), UpdateUserRequest{}) + if !reflect.DeepEqual(err, expectedErr) { + t.Fatalf("Actual err: %s\n Expected err: %s", err, expectedErr) + } + + assertEquals(t, db.initializeCalls, 0) + assertEquals(t, db.newUserCalls, 0) + assertEquals(t, db.updateUserCalls, 1) + assertEquals(t, db.deleteUserCalls, 0) + assertEquals(t, db.typeCalls, 0) + assertEquals(t, db.closeCalls, 0) + }) + + t.Run("DeleteUser", func(t *testing.T) { + db := &recordingDatabase{ + next: fakeDatabase{ + deleteUserErr: errors.New("password: iofsd9473tg with some stuff after it"), + }, + } + mw := DatabaseErrorSanitizerMiddleware{ + next: db, + secretsFn: secretFunc(t, "iofsd9473tg", ""), + } + + expectedErr := errors.New("password: with some stuff after it") + + _, err := mw.DeleteUser(context.Background(), DeleteUserRequest{}) + if !reflect.DeepEqual(err, expectedErr) { + t.Fatalf("Actual err: %s\n Expected err: %s", err, expectedErr) + } + + assertEquals(t, db.initializeCalls, 0) + assertEquals(t, db.newUserCalls, 0) + assertEquals(t, db.updateUserCalls, 0) + assertEquals(t, db.deleteUserCalls, 1) + assertEquals(t, db.typeCalls, 0) + assertEquals(t, db.closeCalls, 0) + }) + + t.Run("Type", func(t *testing.T) { + db := &recordingDatabase{ + next: fakeDatabase{ + typeErr: errors.New("password: iofsd9473tg with some stuff after it"), + }, + } + mw := DatabaseErrorSanitizerMiddleware{ + next: db, + secretsFn: secretFunc(t, "iofsd9473tg", ""), + } + + expectedErr := errors.New("password: with some stuff after it") + + _, err := mw.Type() + if !reflect.DeepEqual(err, expectedErr) { + t.Fatalf("Actual err: %s\n Expected err: %s", err, expectedErr) + } + + assertEquals(t, db.initializeCalls, 0) + assertEquals(t, db.newUserCalls, 0) + assertEquals(t, db.updateUserCalls, 0) + assertEquals(t, db.deleteUserCalls, 0) + assertEquals(t, db.typeCalls, 1) + assertEquals(t, db.closeCalls, 0) + }) + + t.Run("Close", func(t *testing.T) { + db := &recordingDatabase{ + next: fakeDatabase{ + closeErr: errors.New("password: iofsd9473tg with some stuff after it"), + }, + } + mw := DatabaseErrorSanitizerMiddleware{ + next: db, + secretsFn: secretFunc(t, "iofsd9473tg", ""), + } + + expectedErr := errors.New("password: with some stuff after it") + + err := mw.Close() + if !reflect.DeepEqual(err, expectedErr) { + t.Fatalf("Actual err: %s\n Expected err: %s", err, expectedErr) + } + + assertEquals(t, db.initializeCalls, 0) + assertEquals(t, db.newUserCalls, 0) + assertEquals(t, db.updateUserCalls, 0) + assertEquals(t, db.deleteUserCalls, 0) + assertEquals(t, db.typeCalls, 0) + assertEquals(t, db.closeCalls, 1) + }) +} + +func secretFunc(t *testing.T, vals ...string) func() map[string]string { + t.Helper() + if len(vals)%2 != 0 { + t.Fatalf("Test configuration error: secretFunc must be called with an even number of values") + } + + m := map[string]string{} + + for i := 0; i < len(vals); i += 2 { + key := vals[i] + m[key] = vals[i+1] + } + + return func() map[string]string { + return m + } +} + +func TestTracingMiddleware(t *testing.T) { + t.Run("Initialize", func(t *testing.T) { + db := &recordingDatabase{} + logger := hclog.NewNullLogger() + mw := databaseTracingMiddleware{ + next: db, + logger: logger, + } + _, err := mw.Initialize(context.Background(), InitializeRequest{}) + if err != nil { + t.Fatalf("Expected no error, but got: %s", err) + } + assertEquals(t, db.initializeCalls, 1) + assertEquals(t, db.newUserCalls, 0) + assertEquals(t, db.updateUserCalls, 0) + assertEquals(t, db.deleteUserCalls, 0) + assertEquals(t, db.typeCalls, 0) + assertEquals(t, db.closeCalls, 0) + }) + + t.Run("NewUser", func(t *testing.T) { + db := &recordingDatabase{} + logger := hclog.NewNullLogger() + mw := databaseTracingMiddleware{ + next: db, + logger: logger, + } + _, err := mw.NewUser(context.Background(), NewUserRequest{}) + if err != nil { + t.Fatalf("Expected no error, but got: %s", err) + } + assertEquals(t, db.initializeCalls, 0) + assertEquals(t, db.newUserCalls, 1) + assertEquals(t, db.updateUserCalls, 0) + assertEquals(t, db.deleteUserCalls, 0) + assertEquals(t, db.typeCalls, 0) + assertEquals(t, db.closeCalls, 0) + }) + + t.Run("UpdateUser", func(t *testing.T) { + db := &recordingDatabase{} + logger := hclog.NewNullLogger() + mw := databaseTracingMiddleware{ + next: db, + logger: logger, + } + _, err := mw.UpdateUser(context.Background(), UpdateUserRequest{}) + if err != nil { + t.Fatalf("Expected no error, but got: %s", err) + } + assertEquals(t, db.initializeCalls, 0) + assertEquals(t, db.newUserCalls, 0) + assertEquals(t, db.updateUserCalls, 1) + assertEquals(t, db.deleteUserCalls, 0) + assertEquals(t, db.typeCalls, 0) + assertEquals(t, db.closeCalls, 0) + }) + + t.Run("DeleteUser", func(t *testing.T) { + db := &recordingDatabase{} + logger := hclog.NewNullLogger() + mw := databaseTracingMiddleware{ + next: db, + logger: logger, + } + _, err := mw.DeleteUser(context.Background(), DeleteUserRequest{}) + if err != nil { + t.Fatalf("Expected no error, but got: %s", err) + } + assertEquals(t, db.initializeCalls, 0) + assertEquals(t, db.newUserCalls, 0) + assertEquals(t, db.updateUserCalls, 0) + assertEquals(t, db.deleteUserCalls, 1) + assertEquals(t, db.typeCalls, 0) + assertEquals(t, db.closeCalls, 0) + }) + + t.Run("Type", func(t *testing.T) { + db := &recordingDatabase{} + logger := hclog.NewNullLogger() + mw := databaseTracingMiddleware{ + next: db, + logger: logger, + } + _, err := mw.Type() + if err != nil { + t.Fatalf("Expected no error, but got: %s", err) + } + assertEquals(t, db.initializeCalls, 0) + assertEquals(t, db.newUserCalls, 0) + assertEquals(t, db.updateUserCalls, 0) + assertEquals(t, db.deleteUserCalls, 0) + assertEquals(t, db.typeCalls, 1) + assertEquals(t, db.closeCalls, 0) + }) + + t.Run("Close", func(t *testing.T) { + db := &recordingDatabase{} + logger := hclog.NewNullLogger() + mw := databaseTracingMiddleware{ + next: db, + logger: logger, + } + err := mw.Close() + if err != nil { + t.Fatalf("Expected no error, but got: %s", err) + } + assertEquals(t, db.initializeCalls, 0) + assertEquals(t, db.newUserCalls, 0) + assertEquals(t, db.updateUserCalls, 0) + assertEquals(t, db.deleteUserCalls, 0) + assertEquals(t, db.typeCalls, 0) + assertEquals(t, db.closeCalls, 1) + }) +} + +func TestMetricsMiddleware(t *testing.T) { + t.Run("Initialize", func(t *testing.T) { + db := &recordingDatabase{} + mw := databaseMetricsMiddleware{ + next: db, + typeStr: "metrics", + } + _, err := mw.Initialize(context.Background(), InitializeRequest{}) + if err != nil { + t.Fatalf("Expected no error, but got: %s", err) + } + assertEquals(t, db.initializeCalls, 1) + assertEquals(t, db.newUserCalls, 0) + assertEquals(t, db.updateUserCalls, 0) + assertEquals(t, db.deleteUserCalls, 0) + assertEquals(t, db.typeCalls, 0) + assertEquals(t, db.closeCalls, 0) + }) + + t.Run("NewUser", func(t *testing.T) { + db := &recordingDatabase{} + mw := databaseMetricsMiddleware{ + next: db, + typeStr: "metrics", + } + _, err := mw.NewUser(context.Background(), NewUserRequest{}) + if err != nil { + t.Fatalf("Expected no error, but got: %s", err) + } + assertEquals(t, db.initializeCalls, 0) + assertEquals(t, db.newUserCalls, 1) + assertEquals(t, db.updateUserCalls, 0) + assertEquals(t, db.deleteUserCalls, 0) + assertEquals(t, db.typeCalls, 0) + assertEquals(t, db.closeCalls, 0) + }) + + t.Run("UpdateUser", func(t *testing.T) { + db := &recordingDatabase{} + mw := databaseMetricsMiddleware{ + next: db, + typeStr: "metrics", + } + _, err := mw.UpdateUser(context.Background(), UpdateUserRequest{}) + if err != nil { + t.Fatalf("Expected no error, but got: %s", err) + } + assertEquals(t, db.initializeCalls, 0) + assertEquals(t, db.newUserCalls, 0) + assertEquals(t, db.updateUserCalls, 1) + assertEquals(t, db.deleteUserCalls, 0) + assertEquals(t, db.typeCalls, 0) + assertEquals(t, db.closeCalls, 0) + }) + + t.Run("DeleteUser", func(t *testing.T) { + db := &recordingDatabase{} + mw := databaseMetricsMiddleware{ + next: db, + typeStr: "metrics", + } + _, err := mw.DeleteUser(context.Background(), DeleteUserRequest{}) + if err != nil { + t.Fatalf("Expected no error, but got: %s", err) + } + assertEquals(t, db.initializeCalls, 0) + assertEquals(t, db.newUserCalls, 0) + assertEquals(t, db.updateUserCalls, 0) + assertEquals(t, db.deleteUserCalls, 1) + assertEquals(t, db.typeCalls, 0) + assertEquals(t, db.closeCalls, 0) + }) + + t.Run("Type", func(t *testing.T) { + db := &recordingDatabase{} + mw := databaseMetricsMiddleware{ + next: db, + typeStr: "metrics", + } + _, err := mw.Type() + if err != nil { + t.Fatalf("Expected no error, but got: %s", err) + } + assertEquals(t, db.initializeCalls, 0) + assertEquals(t, db.newUserCalls, 0) + assertEquals(t, db.updateUserCalls, 0) + assertEquals(t, db.deleteUserCalls, 0) + assertEquals(t, db.typeCalls, 1) + assertEquals(t, db.closeCalls, 0) + }) + + t.Run("Close", func(t *testing.T) { + db := &recordingDatabase{} + mw := databaseMetricsMiddleware{ + next: db, + typeStr: "metrics", + } + err := mw.Close() + if err != nil { + t.Fatalf("Expected no error, but got: %s", err) + } + assertEquals(t, db.initializeCalls, 0) + assertEquals(t, db.newUserCalls, 0) + assertEquals(t, db.updateUserCalls, 0) + assertEquals(t, db.deleteUserCalls, 0) + assertEquals(t, db.typeCalls, 0) + assertEquals(t, db.closeCalls, 1) + }) +} + +func assertEquals(t *testing.T, actual, expected int) { + t.Helper() + if actual != expected { + t.Fatalf("Actual: %d Expected: %d", actual, expected) + } +} diff --git a/sdk/database/dbplugin/v5/plugin_client.go b/sdk/database/dbplugin/v5/plugin_client.go new file mode 100644 index 0000000..b4085ea --- /dev/null +++ b/sdk/database/dbplugin/v5/plugin_client.go @@ -0,0 +1,82 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dbplugin + +import ( + "context" + "errors" + + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/sdk/database/dbplugin/v5/proto" + "github.com/hashicorp/vault/sdk/helper/pluginutil" + "github.com/hashicorp/vault/sdk/logical" +) + +var _ logical.PluginVersioner = (*DatabasePluginClient)(nil) + +type DatabasePluginClient struct { + client pluginutil.PluginClient + Database +} + +func (dc *DatabasePluginClient) PluginVersion() logical.PluginVersion { + if versioner, ok := dc.Database.(logical.PluginVersioner); ok { + return versioner.PluginVersion() + } + return logical.EmptyPluginVersion +} + +// This wraps the Close call and ensures we both close the database connection +// and kill the plugin. +func (dc *DatabasePluginClient) Close() error { + err := dc.Database.Close() + dc.client.Close() + + return err +} + +// pluginSets is the map of plugins we can dispense. +var PluginSets = map[int]plugin.PluginSet{ + 5: { + "database": &GRPCDatabasePlugin{}, + }, + 6: { + "database": &GRPCDatabasePlugin{}, + }, +} + +// NewPluginClient returns a databaseRPCClient with a connection to a running +// plugin. +func NewPluginClient(ctx context.Context, sys pluginutil.RunnerUtil, config pluginutil.PluginClientConfig) (Database, error) { + pluginClient, err := sys.NewPluginClient(ctx, config) + if err != nil { + return nil, err + } + + // Request the plugin + raw, err := pluginClient.Dispense("database") + if err != nil { + return nil, err + } + + // We should have a database type now. This feels like a normal interface + // implementation but is in fact over an RPC connection. + var db Database + switch c := raw.(type) { + case gRPCClient: + // This is an abstraction leak from go-plugin but it is necessary in + // order to enable multiplexing on multiplexed plugins + c.client = proto.NewDatabaseClient(pluginClient.Conn()) + c.versionClient = logical.NewPluginVersionClient(pluginClient.Conn()) + + db = c + default: + return nil, errors.New("unsupported client type") + } + + return &DatabasePluginClient{ + client: pluginClient, + Database: db, + }, nil +} diff --git a/sdk/database/dbplugin/v5/plugin_client_test.go b/sdk/database/dbplugin/v5/plugin_client_test.go new file mode 100644 index 0000000..10f02b7 --- /dev/null +++ b/sdk/database/dbplugin/v5/plugin_client_test.go @@ -0,0 +1,158 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dbplugin + +import ( + "context" + "errors" + "reflect" + "testing" + "time" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/database/dbplugin/v5/proto" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/pluginutil" + "github.com/hashicorp/vault/sdk/helper/wrapping" + "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/mock" + "google.golang.org/grpc" +) + +func TestNewPluginClient(t *testing.T) { + type testCase struct { + config pluginutil.PluginClientConfig + pluginClient pluginutil.PluginClient + expectedResp *DatabasePluginClient + expectedErr error + } + + tests := map[string]testCase{ + "happy path": { + config: testPluginClientConfig(), + pluginClient: &fakePluginClient{ + connResp: nil, + dispenseResp: gRPCClient{client: fakeClient{}}, + dispenseErr: nil, + }, + expectedResp: &DatabasePluginClient{ + client: &fakePluginClient{ + connResp: nil, + dispenseResp: gRPCClient{client: fakeClient{}}, + dispenseErr: nil, + }, + Database: gRPCClient{client: proto.NewDatabaseClient(nil), versionClient: logical.NewPluginVersionClient(nil), doneCtx: context.Context(nil)}, + }, + expectedErr: nil, + }, + "dispense error": { + config: testPluginClientConfig(), + pluginClient: &fakePluginClient{ + connResp: nil, + dispenseResp: gRPCClient{}, + dispenseErr: errors.New("dispense error"), + }, + expectedResp: nil, + expectedErr: errors.New("dispense error"), + }, + "error unsupported client type": { + config: testPluginClientConfig(), + pluginClient: &fakePluginClient{ + connResp: nil, + dispenseResp: nil, + dispenseErr: nil, + }, + expectedResp: nil, + expectedErr: errors.New("unsupported client type"), + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + ctx := context.Background() + + mockWrapper := new(mockRunnerUtil) + mockWrapper.On("NewPluginClient", ctx, mock.Anything). + Return(test.pluginClient, nil) + defer mockWrapper.AssertNumberOfCalls(t, "NewPluginClient", 1) + + resp, err := NewPluginClient(ctx, mockWrapper, test.config) + if test.expectedErr != nil && err == nil { + t.Fatalf("err expected, got nil") + } + if test.expectedErr == nil && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + if test.expectedErr == nil && !reflect.DeepEqual(resp, test.expectedResp) { + t.Fatalf("Actual response: %#v\nExpected response: %#v", resp, test.expectedResp) + } + }) + } +} + +func testPluginClientConfig() pluginutil.PluginClientConfig { + return pluginutil.PluginClientConfig{ + Name: "test-plugin", + PluginSets: PluginSets, + PluginType: consts.PluginTypeDatabase, + HandshakeConfig: HandshakeConfig, + Logger: log.NewNullLogger(), + IsMetadataMode: true, + AutoMTLS: true, + } +} + +var _ pluginutil.PluginClient = &fakePluginClient{} + +type fakePluginClient struct { + connResp grpc.ClientConnInterface + + dispenseResp interface{} + dispenseErr error +} + +func (f *fakePluginClient) Conn() grpc.ClientConnInterface { + return nil +} + +func (f *fakePluginClient) Reload() error { + return nil +} + +func (f *fakePluginClient) Dispense(name string) (interface{}, error) { + return f.dispenseResp, f.dispenseErr +} + +func (f *fakePluginClient) Ping() error { + return nil +} + +func (f *fakePluginClient) Close() error { + return nil +} + +var _ pluginutil.RunnerUtil = &mockRunnerUtil{} + +type mockRunnerUtil struct { + mock.Mock +} + +func (m *mockRunnerUtil) VaultVersion(ctx context.Context) (string, error) { + return "dummyversion", nil +} + +func (m *mockRunnerUtil) NewPluginClient(ctx context.Context, config pluginutil.PluginClientConfig) (pluginutil.PluginClient, error) { + args := m.Called(ctx, config) + return args.Get(0).(pluginutil.PluginClient), args.Error(1) +} + +func (m *mockRunnerUtil) ResponseWrapData(ctx context.Context, data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) { + args := m.Called(ctx, data, ttl, jwt) + return args.Get(0).(*wrapping.ResponseWrapInfo), args.Error(1) +} + +func (m *mockRunnerUtil) MlockEnabled() bool { + args := m.Called() + return args.Bool(0) +} diff --git a/sdk/database/dbplugin/v5/plugin_factory.go b/sdk/database/dbplugin/v5/plugin_factory.go new file mode 100644 index 0000000..4b158c3 --- /dev/null +++ b/sdk/database/dbplugin/v5/plugin_factory.go @@ -0,0 +1,99 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dbplugin + +import ( + "context" + "fmt" + + "github.com/hashicorp/errwrap" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/pluginutil" +) + +// PluginFactory is used to build plugin database types. It wraps the database +// object in a logging and metrics middleware. +func PluginFactory(ctx context.Context, pluginName string, sys pluginutil.LookRunnerUtil, logger log.Logger) (Database, error) { + return PluginFactoryVersion(ctx, pluginName, "", sys, logger) +} + +// PluginFactoryVersion is used to build plugin database types with a version specified. +// It wraps the database object in a logging and metrics middleware. +func PluginFactoryVersion(ctx context.Context, pluginName string, pluginVersion string, sys pluginutil.LookRunnerUtil, logger log.Logger) (Database, error) { + // Look for plugin in the plugin catalog + pluginRunner, err := sys.LookupPluginVersion(ctx, pluginName, consts.PluginTypeDatabase, pluginVersion) + if err != nil { + return nil, err + } + + namedLogger := logger.Named(pluginName) + + var transport string + var db Database + if pluginRunner.Builtin { + // Plugin is builtin so we can retrieve an instance of the interface + // from the pluginRunner. Then cast it to a Database. + dbRaw, err := pluginRunner.BuiltinFactory() + if err != nil { + return nil, errwrap.Wrapf("error initializing plugin: {{err}}", err) + } + + var ok bool + db, ok = dbRaw.(Database) + if !ok { + return nil, fmt.Errorf("unsupported database type: %q", pluginName) + } + + transport = "builtin" + + } else { + config := pluginutil.PluginClientConfig{ + Name: pluginName, + PluginType: consts.PluginTypeDatabase, + Version: pluginVersion, + PluginSets: PluginSets, + HandshakeConfig: HandshakeConfig, + Logger: namedLogger, + IsMetadataMode: false, + AutoMTLS: true, + Wrapper: sys, + } + // create a DatabasePluginClient instance + db, err = NewPluginClient(ctx, sys, config) + if err != nil { + return nil, err + } + + // Switch on the underlying database client type to get the transport + // method. + switch db.(*DatabasePluginClient).Database.(type) { + case *gRPCClient: + transport = "gRPC" + } + + } + + typeStr, err := db.Type() + if err != nil { + return nil, errwrap.Wrapf("error getting plugin type: {{err}}", err) + } + logger.Debug("got database plugin instance", "type", typeStr) + + // Wrap with metrics middleware + db = &databaseMetricsMiddleware{ + next: db, + typeStr: typeStr, + } + + // Wrap with tracing middleware + if namedLogger.IsTrace() { + db = &databaseTracingMiddleware{ + next: db, + logger: namedLogger.With("transport", transport), + } + } + + return db, nil +} diff --git a/sdk/database/dbplugin/v5/plugin_server.go b/sdk/database/dbplugin/v5/plugin_server.go new file mode 100644 index 0000000..216219d --- /dev/null +++ b/sdk/database/dbplugin/v5/plugin_server.go @@ -0,0 +1,85 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dbplugin + +import ( + "fmt" + + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/sdk/helper/pluginutil" +) + +// Serve is called from within a plugin and wraps the provided +// Database implementation in a databasePluginRPCServer object and starts a +// RPC server. +func Serve(db Database) { + plugin.Serve(ServeConfig(db)) +} + +func ServeConfig(db Database) *plugin.ServeConfig { + err := pluginutil.OptionallyEnableMlock() + if err != nil { + fmt.Println(err) + return nil + } + + // pluginSets is the map of plugins we can dispense. + pluginSets := map[int]plugin.PluginSet{ + 5: { + "database": &GRPCDatabasePlugin{ + Impl: db, + }, + }, + } + + conf := &plugin.ServeConfig{ + HandshakeConfig: HandshakeConfig, + VersionedPlugins: pluginSets, + GRPCServer: plugin.DefaultGRPCServer, + } + + return conf +} + +func ServeMultiplex(factory Factory) { + plugin.Serve(ServeConfigMultiplex(factory)) +} + +func ServeConfigMultiplex(factory Factory) *plugin.ServeConfig { + err := pluginutil.OptionallyEnableMlock() + if err != nil { + fmt.Println(err) + return nil + } + + db, err := factory() + if err != nil { + fmt.Println(err) + return nil + } + + database := db.(Database) + + // pluginSets is the map of plugins we can dispense. + pluginSets := map[int]plugin.PluginSet{ + 5: { + "database": &GRPCDatabasePlugin{ + Impl: database, + }, + }, + 6: { + "database": &GRPCDatabasePlugin{ + FactoryFunc: factory, + }, + }, + } + + conf := &plugin.ServeConfig{ + HandshakeConfig: HandshakeConfig, + VersionedPlugins: pluginSets, + GRPCServer: plugin.DefaultGRPCServer, + } + + return conf +} diff --git a/sdk/database/dbplugin/v5/proto/database.pb.go b/sdk/database/dbplugin/v5/proto/database.pb.go new file mode 100644 index 0000000..f152aca --- /dev/null +++ b/sdk/database/dbplugin/v5/proto/database.pb.go @@ -0,0 +1,1273 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.12 +// source: sdk/database/dbplugin/v5/proto/database.proto + +package proto + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// /////////////// +// Initialize() +// /////////////// +type InitializeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ConfigData *structpb.Struct `protobuf:"bytes,1,opt,name=config_data,json=configData,proto3" json:"config_data,omitempty"` + VerifyConnection bool `protobuf:"varint,2,opt,name=verify_connection,json=verifyConnection,proto3" json:"verify_connection,omitempty"` +} + +func (x *InitializeRequest) Reset() { + *x = InitializeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InitializeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InitializeRequest) ProtoMessage() {} + +func (x *InitializeRequest) ProtoReflect() protoreflect.Message { + mi := &file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InitializeRequest.ProtoReflect.Descriptor instead. +func (*InitializeRequest) Descriptor() ([]byte, []int) { + return file_sdk_database_dbplugin_v5_proto_database_proto_rawDescGZIP(), []int{0} +} + +func (x *InitializeRequest) GetConfigData() *structpb.Struct { + if x != nil { + return x.ConfigData + } + return nil +} + +func (x *InitializeRequest) GetVerifyConnection() bool { + if x != nil { + return x.VerifyConnection + } + return false +} + +type InitializeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ConfigData *structpb.Struct `protobuf:"bytes,1,opt,name=config_data,json=configData,proto3" json:"config_data,omitempty"` +} + +func (x *InitializeResponse) Reset() { + *x = InitializeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InitializeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InitializeResponse) ProtoMessage() {} + +func (x *InitializeResponse) ProtoReflect() protoreflect.Message { + mi := &file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InitializeResponse.ProtoReflect.Descriptor instead. +func (*InitializeResponse) Descriptor() ([]byte, []int) { + return file_sdk_database_dbplugin_v5_proto_database_proto_rawDescGZIP(), []int{1} +} + +func (x *InitializeResponse) GetConfigData() *structpb.Struct { + if x != nil { + return x.ConfigData + } + return nil +} + +type NewUserRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + UsernameConfig *UsernameConfig `protobuf:"bytes,1,opt,name=username_config,json=usernameConfig,proto3" json:"username_config,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + Expiration *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=expiration,proto3" json:"expiration,omitempty"` + Statements *Statements `protobuf:"bytes,4,opt,name=statements,proto3" json:"statements,omitempty"` + RollbackStatements *Statements `protobuf:"bytes,5,opt,name=rollback_statements,json=rollbackStatements,proto3" json:"rollback_statements,omitempty"` + CredentialType int32 `protobuf:"varint,6,opt,name=credential_type,json=credentialType,proto3" json:"credential_type,omitempty"` + PublicKey []byte `protobuf:"bytes,7,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"` + Subject string `protobuf:"bytes,8,opt,name=subject,proto3" json:"subject,omitempty"` +} + +func (x *NewUserRequest) Reset() { + *x = NewUserRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NewUserRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NewUserRequest) ProtoMessage() {} + +func (x *NewUserRequest) ProtoReflect() protoreflect.Message { + mi := &file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NewUserRequest.ProtoReflect.Descriptor instead. +func (*NewUserRequest) Descriptor() ([]byte, []int) { + return file_sdk_database_dbplugin_v5_proto_database_proto_rawDescGZIP(), []int{2} +} + +func (x *NewUserRequest) GetUsernameConfig() *UsernameConfig { + if x != nil { + return x.UsernameConfig + } + return nil +} + +func (x *NewUserRequest) GetPassword() string { + if x != nil { + return x.Password + } + return "" +} + +func (x *NewUserRequest) GetExpiration() *timestamppb.Timestamp { + if x != nil { + return x.Expiration + } + return nil +} + +func (x *NewUserRequest) GetStatements() *Statements { + if x != nil { + return x.Statements + } + return nil +} + +func (x *NewUserRequest) GetRollbackStatements() *Statements { + if x != nil { + return x.RollbackStatements + } + return nil +} + +func (x *NewUserRequest) GetCredentialType() int32 { + if x != nil { + return x.CredentialType + } + return 0 +} + +func (x *NewUserRequest) GetPublicKey() []byte { + if x != nil { + return x.PublicKey + } + return nil +} + +func (x *NewUserRequest) GetSubject() string { + if x != nil { + return x.Subject + } + return "" +} + +type UsernameConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DisplayName string `protobuf:"bytes,1,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + RoleName string `protobuf:"bytes,2,opt,name=role_name,json=roleName,proto3" json:"role_name,omitempty"` +} + +func (x *UsernameConfig) Reset() { + *x = UsernameConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UsernameConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UsernameConfig) ProtoMessage() {} + +func (x *UsernameConfig) ProtoReflect() protoreflect.Message { + mi := &file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UsernameConfig.ProtoReflect.Descriptor instead. +func (*UsernameConfig) Descriptor() ([]byte, []int) { + return file_sdk_database_dbplugin_v5_proto_database_proto_rawDescGZIP(), []int{3} +} + +func (x *UsernameConfig) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *UsernameConfig) GetRoleName() string { + if x != nil { + return x.RoleName + } + return "" +} + +type NewUserResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"` +} + +func (x *NewUserResponse) Reset() { + *x = NewUserResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NewUserResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NewUserResponse) ProtoMessage() {} + +func (x *NewUserResponse) ProtoReflect() protoreflect.Message { + mi := &file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NewUserResponse.ProtoReflect.Descriptor instead. +func (*NewUserResponse) Descriptor() ([]byte, []int) { + return file_sdk_database_dbplugin_v5_proto_database_proto_rawDescGZIP(), []int{4} +} + +func (x *NewUserResponse) GetUsername() string { + if x != nil { + return x.Username + } + return "" +} + +// /////////////// +// UpdateUser() +// /////////////// +type UpdateUserRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"` + Password *ChangePassword `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + Expiration *ChangeExpiration `protobuf:"bytes,3,opt,name=expiration,proto3" json:"expiration,omitempty"` + PublicKey *ChangePublicKey `protobuf:"bytes,4,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"` + CredentialType int32 `protobuf:"varint,5,opt,name=credential_type,json=credentialType,proto3" json:"credential_type,omitempty"` +} + +func (x *UpdateUserRequest) Reset() { + *x = UpdateUserRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateUserRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateUserRequest) ProtoMessage() {} + +func (x *UpdateUserRequest) ProtoReflect() protoreflect.Message { + mi := &file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateUserRequest.ProtoReflect.Descriptor instead. +func (*UpdateUserRequest) Descriptor() ([]byte, []int) { + return file_sdk_database_dbplugin_v5_proto_database_proto_rawDescGZIP(), []int{5} +} + +func (x *UpdateUserRequest) GetUsername() string { + if x != nil { + return x.Username + } + return "" +} + +func (x *UpdateUserRequest) GetPassword() *ChangePassword { + if x != nil { + return x.Password + } + return nil +} + +func (x *UpdateUserRequest) GetExpiration() *ChangeExpiration { + if x != nil { + return x.Expiration + } + return nil +} + +func (x *UpdateUserRequest) GetPublicKey() *ChangePublicKey { + if x != nil { + return x.PublicKey + } + return nil +} + +func (x *UpdateUserRequest) GetCredentialType() int32 { + if x != nil { + return x.CredentialType + } + return 0 +} + +type ChangePassword struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NewPassword string `protobuf:"bytes,1,opt,name=new_password,json=newPassword,proto3" json:"new_password,omitempty"` + Statements *Statements `protobuf:"bytes,2,opt,name=statements,proto3" json:"statements,omitempty"` +} + +func (x *ChangePassword) Reset() { + *x = ChangePassword{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ChangePassword) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ChangePassword) ProtoMessage() {} + +func (x *ChangePassword) ProtoReflect() protoreflect.Message { + mi := &file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ChangePassword.ProtoReflect.Descriptor instead. +func (*ChangePassword) Descriptor() ([]byte, []int) { + return file_sdk_database_dbplugin_v5_proto_database_proto_rawDescGZIP(), []int{6} +} + +func (x *ChangePassword) GetNewPassword() string { + if x != nil { + return x.NewPassword + } + return "" +} + +func (x *ChangePassword) GetStatements() *Statements { + if x != nil { + return x.Statements + } + return nil +} + +type ChangePublicKey struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NewPublicKey []byte `protobuf:"bytes,1,opt,name=new_public_key,json=newPublicKey,proto3" json:"new_public_key,omitempty"` + Statements *Statements `protobuf:"bytes,2,opt,name=statements,proto3" json:"statements,omitempty"` +} + +func (x *ChangePublicKey) Reset() { + *x = ChangePublicKey{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ChangePublicKey) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ChangePublicKey) ProtoMessage() {} + +func (x *ChangePublicKey) ProtoReflect() protoreflect.Message { + mi := &file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ChangePublicKey.ProtoReflect.Descriptor instead. +func (*ChangePublicKey) Descriptor() ([]byte, []int) { + return file_sdk_database_dbplugin_v5_proto_database_proto_rawDescGZIP(), []int{7} +} + +func (x *ChangePublicKey) GetNewPublicKey() []byte { + if x != nil { + return x.NewPublicKey + } + return nil +} + +func (x *ChangePublicKey) GetStatements() *Statements { + if x != nil { + return x.Statements + } + return nil +} + +type ChangeExpiration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NewExpiration *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=new_expiration,json=newExpiration,proto3" json:"new_expiration,omitempty"` + Statements *Statements `protobuf:"bytes,2,opt,name=statements,proto3" json:"statements,omitempty"` +} + +func (x *ChangeExpiration) Reset() { + *x = ChangeExpiration{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ChangeExpiration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ChangeExpiration) ProtoMessage() {} + +func (x *ChangeExpiration) ProtoReflect() protoreflect.Message { + mi := &file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ChangeExpiration.ProtoReflect.Descriptor instead. +func (*ChangeExpiration) Descriptor() ([]byte, []int) { + return file_sdk_database_dbplugin_v5_proto_database_proto_rawDescGZIP(), []int{8} +} + +func (x *ChangeExpiration) GetNewExpiration() *timestamppb.Timestamp { + if x != nil { + return x.NewExpiration + } + return nil +} + +func (x *ChangeExpiration) GetStatements() *Statements { + if x != nil { + return x.Statements + } + return nil +} + +type UpdateUserResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *UpdateUserResponse) Reset() { + *x = UpdateUserResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateUserResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateUserResponse) ProtoMessage() {} + +func (x *UpdateUserResponse) ProtoReflect() protoreflect.Message { + mi := &file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateUserResponse.ProtoReflect.Descriptor instead. +func (*UpdateUserResponse) Descriptor() ([]byte, []int) { + return file_sdk_database_dbplugin_v5_proto_database_proto_rawDescGZIP(), []int{9} +} + +// /////////////// +// DeleteUser() +// /////////////// +type DeleteUserRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"` + Statements *Statements `protobuf:"bytes,2,opt,name=statements,proto3" json:"statements,omitempty"` +} + +func (x *DeleteUserRequest) Reset() { + *x = DeleteUserRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteUserRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteUserRequest) ProtoMessage() {} + +func (x *DeleteUserRequest) ProtoReflect() protoreflect.Message { + mi := &file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteUserRequest.ProtoReflect.Descriptor instead. +func (*DeleteUserRequest) Descriptor() ([]byte, []int) { + return file_sdk_database_dbplugin_v5_proto_database_proto_rawDescGZIP(), []int{10} +} + +func (x *DeleteUserRequest) GetUsername() string { + if x != nil { + return x.Username + } + return "" +} + +func (x *DeleteUserRequest) GetStatements() *Statements { + if x != nil { + return x.Statements + } + return nil +} + +type DeleteUserResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *DeleteUserResponse) Reset() { + *x = DeleteUserResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteUserResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteUserResponse) ProtoMessage() {} + +func (x *DeleteUserResponse) ProtoReflect() protoreflect.Message { + mi := &file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteUserResponse.ProtoReflect.Descriptor instead. +func (*DeleteUserResponse) Descriptor() ([]byte, []int) { + return file_sdk_database_dbplugin_v5_proto_database_proto_rawDescGZIP(), []int{11} +} + +// /////////////// +// Type() +// /////////////// +type TypeResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=Type,proto3" json:"Type,omitempty"` +} + +func (x *TypeResponse) Reset() { + *x = TypeResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TypeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TypeResponse) ProtoMessage() {} + +func (x *TypeResponse) ProtoReflect() protoreflect.Message { + mi := &file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TypeResponse.ProtoReflect.Descriptor instead. +func (*TypeResponse) Descriptor() ([]byte, []int) { + return file_sdk_database_dbplugin_v5_proto_database_proto_rawDescGZIP(), []int{12} +} + +func (x *TypeResponse) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +// /////////////// +// General purpose +// /////////////// +type Statements struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Commands []string `protobuf:"bytes,1,rep,name=Commands,proto3" json:"Commands,omitempty"` +} + +func (x *Statements) Reset() { + *x = Statements{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Statements) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Statements) ProtoMessage() {} + +func (x *Statements) ProtoReflect() protoreflect.Message { + mi := &file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Statements.ProtoReflect.Descriptor instead. +func (*Statements) Descriptor() ([]byte, []int) { + return file_sdk_database_dbplugin_v5_proto_database_proto_rawDescGZIP(), []int{13} +} + +func (x *Statements) GetCommands() []string { + if x != nil { + return x.Commands + } + return nil +} + +type Empty struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Empty) Reset() { + *x = Empty{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Empty) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Empty) ProtoMessage() {} + +func (x *Empty) ProtoReflect() protoreflect.Message { + mi := &file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Empty.ProtoReflect.Descriptor instead. +func (*Empty) Descriptor() ([]byte, []int) { + return file_sdk_database_dbplugin_v5_proto_database_proto_rawDescGZIP(), []int{14} +} + +var File_sdk_database_dbplugin_v5_proto_database_proto protoreflect.FileDescriptor + +var file_sdk_database_dbplugin_v5_proto_database_proto_rawDesc = []byte{ + 0x0a, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2f, 0x64, + 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2f, 0x76, 0x35, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x0b, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x1a, 0x1c, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, + 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x7a, 0x0a, 0x11, 0x49, + 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x38, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0a, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x61, 0x74, 0x61, 0x12, 0x2b, 0x0a, 0x11, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4e, 0x0a, 0x12, 0x49, 0x6e, 0x69, 0x74, 0x69, + 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, + 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0a, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x93, 0x03, 0x0a, 0x0e, 0x4e, 0x65, 0x77, 0x55, + 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x0f, 0x75, 0x73, + 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, + 0x35, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x52, 0x0e, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x3a, 0x0a, 0x0a, + 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x65, 0x78, + 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x64, + 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x73, 0x12, 0x48, 0x0a, 0x13, 0x72, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x12, 0x72, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, + 0x6b, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x63, + 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, + 0x65, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, + 0x4b, 0x65, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x50, 0x0a, + 0x0e, 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x6f, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x6f, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, + 0x2d, 0x0a, 0x0f, 0x4e, 0x65, 0x77, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x8d, + 0x02, 0x0a, 0x11, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x37, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, + 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, + 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x3d, 0x0a, 0x0a, 0x65, 0x78, 0x70, + 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, + 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x43, 0x68, 0x61, 0x6e, + 0x67, 0x65, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x65, 0x78, + 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x64, + 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, + 0x65, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x61, 0x6c, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, + 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x54, 0x79, 0x70, 0x65, 0x22, 0x6c, + 0x0a, 0x0e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, + 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x65, 0x77, 0x50, 0x61, 0x73, 0x73, 0x77, + 0x6f, 0x72, 0x64, 0x12, 0x37, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, + 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x70, 0x0a, 0x0f, + 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, + 0x24, 0x0a, 0x0e, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x6e, 0x65, 0x77, 0x50, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x37, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x64, 0x62, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x73, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x8e, + 0x01, 0x0a, 0x10, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0e, 0x6e, 0x65, 0x77, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x6e, 0x65, 0x77, 0x45, 0x78, 0x70, 0x69, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x64, 0x62, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, + 0x14, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x68, 0x0a, 0x11, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, + 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, + 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, + 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x64, 0x62, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, + 0x14, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x22, 0x0a, 0x0c, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x22, 0x28, 0x0a, 0x0a, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x43, 0x6f, 0x6d, 0x6d, 0x61, + 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x43, 0x6f, 0x6d, 0x6d, 0x61, + 0x6e, 0x64, 0x73, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x32, 0xa5, 0x03, 0x0a, + 0x08, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x0a, 0x49, 0x6e, 0x69, + 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x12, 0x1e, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, 0x0a, 0x07, 0x4e, 0x65, 0x77, 0x55, + 0x73, 0x65, 0x72, 0x12, 0x1b, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, + 0x35, 0x2e, 0x4e, 0x65, 0x77, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1c, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x4e, + 0x65, 0x77, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, + 0x0a, 0x0a, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x12, 0x1e, 0x2e, 0x64, + 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x64, + 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, + 0x0a, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x12, 0x1e, 0x2e, 0x64, 0x62, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x64, 0x62, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x04, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, + 0x76, 0x35, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x19, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x05, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x12, 0x12, 0x2e, 0x64, + 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x1a, 0x12, 0x2e, 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x76, 0x35, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x42, 0x3b, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, 0x61, 0x75, + 0x6c, 0x74, 0x2f, 0x73, 0x64, 0x6b, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2f, + 0x64, 0x62, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2f, 0x76, 0x35, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_sdk_database_dbplugin_v5_proto_database_proto_rawDescOnce sync.Once + file_sdk_database_dbplugin_v5_proto_database_proto_rawDescData = file_sdk_database_dbplugin_v5_proto_database_proto_rawDesc +) + +func file_sdk_database_dbplugin_v5_proto_database_proto_rawDescGZIP() []byte { + file_sdk_database_dbplugin_v5_proto_database_proto_rawDescOnce.Do(func() { + file_sdk_database_dbplugin_v5_proto_database_proto_rawDescData = protoimpl.X.CompressGZIP(file_sdk_database_dbplugin_v5_proto_database_proto_rawDescData) + }) + return file_sdk_database_dbplugin_v5_proto_database_proto_rawDescData +} + +var file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes = make([]protoimpl.MessageInfo, 15) +var file_sdk_database_dbplugin_v5_proto_database_proto_goTypes = []interface{}{ + (*InitializeRequest)(nil), // 0: dbplugin.v5.InitializeRequest + (*InitializeResponse)(nil), // 1: dbplugin.v5.InitializeResponse + (*NewUserRequest)(nil), // 2: dbplugin.v5.NewUserRequest + (*UsernameConfig)(nil), // 3: dbplugin.v5.UsernameConfig + (*NewUserResponse)(nil), // 4: dbplugin.v5.NewUserResponse + (*UpdateUserRequest)(nil), // 5: dbplugin.v5.UpdateUserRequest + (*ChangePassword)(nil), // 6: dbplugin.v5.ChangePassword + (*ChangePublicKey)(nil), // 7: dbplugin.v5.ChangePublicKey + (*ChangeExpiration)(nil), // 8: dbplugin.v5.ChangeExpiration + (*UpdateUserResponse)(nil), // 9: dbplugin.v5.UpdateUserResponse + (*DeleteUserRequest)(nil), // 10: dbplugin.v5.DeleteUserRequest + (*DeleteUserResponse)(nil), // 11: dbplugin.v5.DeleteUserResponse + (*TypeResponse)(nil), // 12: dbplugin.v5.TypeResponse + (*Statements)(nil), // 13: dbplugin.v5.Statements + (*Empty)(nil), // 14: dbplugin.v5.Empty + (*structpb.Struct)(nil), // 15: google.protobuf.Struct + (*timestamppb.Timestamp)(nil), // 16: google.protobuf.Timestamp +} +var file_sdk_database_dbplugin_v5_proto_database_proto_depIdxs = []int32{ + 15, // 0: dbplugin.v5.InitializeRequest.config_data:type_name -> google.protobuf.Struct + 15, // 1: dbplugin.v5.InitializeResponse.config_data:type_name -> google.protobuf.Struct + 3, // 2: dbplugin.v5.NewUserRequest.username_config:type_name -> dbplugin.v5.UsernameConfig + 16, // 3: dbplugin.v5.NewUserRequest.expiration:type_name -> google.protobuf.Timestamp + 13, // 4: dbplugin.v5.NewUserRequest.statements:type_name -> dbplugin.v5.Statements + 13, // 5: dbplugin.v5.NewUserRequest.rollback_statements:type_name -> dbplugin.v5.Statements + 6, // 6: dbplugin.v5.UpdateUserRequest.password:type_name -> dbplugin.v5.ChangePassword + 8, // 7: dbplugin.v5.UpdateUserRequest.expiration:type_name -> dbplugin.v5.ChangeExpiration + 7, // 8: dbplugin.v5.UpdateUserRequest.public_key:type_name -> dbplugin.v5.ChangePublicKey + 13, // 9: dbplugin.v5.ChangePassword.statements:type_name -> dbplugin.v5.Statements + 13, // 10: dbplugin.v5.ChangePublicKey.statements:type_name -> dbplugin.v5.Statements + 16, // 11: dbplugin.v5.ChangeExpiration.new_expiration:type_name -> google.protobuf.Timestamp + 13, // 12: dbplugin.v5.ChangeExpiration.statements:type_name -> dbplugin.v5.Statements + 13, // 13: dbplugin.v5.DeleteUserRequest.statements:type_name -> dbplugin.v5.Statements + 0, // 14: dbplugin.v5.Database.Initialize:input_type -> dbplugin.v5.InitializeRequest + 2, // 15: dbplugin.v5.Database.NewUser:input_type -> dbplugin.v5.NewUserRequest + 5, // 16: dbplugin.v5.Database.UpdateUser:input_type -> dbplugin.v5.UpdateUserRequest + 10, // 17: dbplugin.v5.Database.DeleteUser:input_type -> dbplugin.v5.DeleteUserRequest + 14, // 18: dbplugin.v5.Database.Type:input_type -> dbplugin.v5.Empty + 14, // 19: dbplugin.v5.Database.Close:input_type -> dbplugin.v5.Empty + 1, // 20: dbplugin.v5.Database.Initialize:output_type -> dbplugin.v5.InitializeResponse + 4, // 21: dbplugin.v5.Database.NewUser:output_type -> dbplugin.v5.NewUserResponse + 9, // 22: dbplugin.v5.Database.UpdateUser:output_type -> dbplugin.v5.UpdateUserResponse + 11, // 23: dbplugin.v5.Database.DeleteUser:output_type -> dbplugin.v5.DeleteUserResponse + 12, // 24: dbplugin.v5.Database.Type:output_type -> dbplugin.v5.TypeResponse + 14, // 25: dbplugin.v5.Database.Close:output_type -> dbplugin.v5.Empty + 20, // [20:26] is the sub-list for method output_type + 14, // [14:20] is the sub-list for method input_type + 14, // [14:14] is the sub-list for extension type_name + 14, // [14:14] is the sub-list for extension extendee + 0, // [0:14] is the sub-list for field type_name +} + +func init() { file_sdk_database_dbplugin_v5_proto_database_proto_init() } +func file_sdk_database_dbplugin_v5_proto_database_proto_init() { + if File_sdk_database_dbplugin_v5_proto_database_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InitializeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InitializeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NewUserRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UsernameConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NewUserResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateUserRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ChangePassword); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ChangePublicKey); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ChangeExpiration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateUserResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteUserRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteUserResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TypeResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Statements); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Empty); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_sdk_database_dbplugin_v5_proto_database_proto_rawDesc, + NumEnums: 0, + NumMessages: 15, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_sdk_database_dbplugin_v5_proto_database_proto_goTypes, + DependencyIndexes: file_sdk_database_dbplugin_v5_proto_database_proto_depIdxs, + MessageInfos: file_sdk_database_dbplugin_v5_proto_database_proto_msgTypes, + }.Build() + File_sdk_database_dbplugin_v5_proto_database_proto = out.File + file_sdk_database_dbplugin_v5_proto_database_proto_rawDesc = nil + file_sdk_database_dbplugin_v5_proto_database_proto_goTypes = nil + file_sdk_database_dbplugin_v5_proto_database_proto_depIdxs = nil +} diff --git a/sdk/database/dbplugin/v5/proto/database.proto b/sdk/database/dbplugin/v5/proto/database.proto new file mode 100644 index 0000000..d6f877b --- /dev/null +++ b/sdk/database/dbplugin/v5/proto/database.proto @@ -0,0 +1,108 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +syntax = "proto3"; +package dbplugin.v5; + +option go_package = "github.com/hashicorp/vault/sdk/database/dbplugin/v5/proto"; + +import "google/protobuf/struct.proto"; +import "google/protobuf/timestamp.proto"; + +///////////////// +// Initialize() +///////////////// +message InitializeRequest { + google.protobuf.Struct config_data = 1; + bool verify_connection = 2; +} + +message InitializeResponse { + google.protobuf.Struct config_data = 1; +} +///////////////// +// NewUser() +///////////////// + +message NewUserRequest { + UsernameConfig username_config = 1; + string password = 2; + google.protobuf.Timestamp expiration = 3; + Statements statements = 4; + Statements rollback_statements = 5; + int32 credential_type = 6; + bytes public_key = 7; + string subject = 8; +} + +message UsernameConfig { + string display_name = 1; + string role_name = 2; +} + +message NewUserResponse { + string username = 1; +} + +///////////////// +// UpdateUser() +///////////////// +message UpdateUserRequest { + string username = 1; + ChangePassword password = 2; + ChangeExpiration expiration = 3; + ChangePublicKey public_key = 4; + int32 credential_type = 5; +} + +message ChangePassword { + string new_password = 1; + Statements statements = 2; +} + +message ChangePublicKey { + bytes new_public_key = 1; + Statements statements = 2; +} + +message ChangeExpiration { + google.protobuf.Timestamp new_expiration = 1; + Statements statements = 2; +} + +message UpdateUserResponse {} + +///////////////// +// DeleteUser() +///////////////// +message DeleteUserRequest { + string username = 1; + Statements statements = 2; +} + +message DeleteUserResponse {} + +///////////////// +// Type() +///////////////// +message TypeResponse { + string Type = 1; +} + +///////////////// +// General purpose +///////////////// +message Statements { + repeated string Commands = 1; +} + +message Empty {} + +service Database { + rpc Initialize(InitializeRequest) returns (InitializeResponse); + rpc NewUser(NewUserRequest) returns (NewUserResponse); + rpc UpdateUser(UpdateUserRequest) returns (UpdateUserResponse); + rpc DeleteUser(DeleteUserRequest) returns (DeleteUserResponse); + rpc Type(Empty) returns (TypeResponse); + rpc Close(Empty) returns (Empty); +} \ No newline at end of file diff --git a/sdk/database/dbplugin/v5/proto/database_grpc.pb.go b/sdk/database/dbplugin/v5/proto/database_grpc.pb.go new file mode 100644 index 0000000..8a549fe --- /dev/null +++ b/sdk/database/dbplugin/v5/proto/database_grpc.pb.go @@ -0,0 +1,281 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package proto + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// DatabaseClient is the client API for Database service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type DatabaseClient interface { + Initialize(ctx context.Context, in *InitializeRequest, opts ...grpc.CallOption) (*InitializeResponse, error) + NewUser(ctx context.Context, in *NewUserRequest, opts ...grpc.CallOption) (*NewUserResponse, error) + UpdateUser(ctx context.Context, in *UpdateUserRequest, opts ...grpc.CallOption) (*UpdateUserResponse, error) + DeleteUser(ctx context.Context, in *DeleteUserRequest, opts ...grpc.CallOption) (*DeleteUserResponse, error) + Type(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TypeResponse, error) + Close(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) +} + +type databaseClient struct { + cc grpc.ClientConnInterface +} + +func NewDatabaseClient(cc grpc.ClientConnInterface) DatabaseClient { + return &databaseClient{cc} +} + +func (c *databaseClient) Initialize(ctx context.Context, in *InitializeRequest, opts ...grpc.CallOption) (*InitializeResponse, error) { + out := new(InitializeResponse) + err := c.cc.Invoke(ctx, "/dbplugin.v5.Database/Initialize", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseClient) NewUser(ctx context.Context, in *NewUserRequest, opts ...grpc.CallOption) (*NewUserResponse, error) { + out := new(NewUserResponse) + err := c.cc.Invoke(ctx, "/dbplugin.v5.Database/NewUser", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseClient) UpdateUser(ctx context.Context, in *UpdateUserRequest, opts ...grpc.CallOption) (*UpdateUserResponse, error) { + out := new(UpdateUserResponse) + err := c.cc.Invoke(ctx, "/dbplugin.v5.Database/UpdateUser", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseClient) DeleteUser(ctx context.Context, in *DeleteUserRequest, opts ...grpc.CallOption) (*DeleteUserResponse, error) { + out := new(DeleteUserResponse) + err := c.cc.Invoke(ctx, "/dbplugin.v5.Database/DeleteUser", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseClient) Type(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TypeResponse, error) { + out := new(TypeResponse) + err := c.cc.Invoke(ctx, "/dbplugin.v5.Database/Type", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseClient) Close(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { + out := new(Empty) + err := c.cc.Invoke(ctx, "/dbplugin.v5.Database/Close", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// DatabaseServer is the server API for Database service. +// All implementations must embed UnimplementedDatabaseServer +// for forward compatibility +type DatabaseServer interface { + Initialize(context.Context, *InitializeRequest) (*InitializeResponse, error) + NewUser(context.Context, *NewUserRequest) (*NewUserResponse, error) + UpdateUser(context.Context, *UpdateUserRequest) (*UpdateUserResponse, error) + DeleteUser(context.Context, *DeleteUserRequest) (*DeleteUserResponse, error) + Type(context.Context, *Empty) (*TypeResponse, error) + Close(context.Context, *Empty) (*Empty, error) + mustEmbedUnimplementedDatabaseServer() +} + +// UnimplementedDatabaseServer must be embedded to have forward compatible implementations. +type UnimplementedDatabaseServer struct { +} + +func (UnimplementedDatabaseServer) Initialize(context.Context, *InitializeRequest) (*InitializeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Initialize not implemented") +} +func (UnimplementedDatabaseServer) NewUser(context.Context, *NewUserRequest) (*NewUserResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NewUser not implemented") +} +func (UnimplementedDatabaseServer) UpdateUser(context.Context, *UpdateUserRequest) (*UpdateUserResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateUser not implemented") +} +func (UnimplementedDatabaseServer) DeleteUser(context.Context, *DeleteUserRequest) (*DeleteUserResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteUser not implemented") +} +func (UnimplementedDatabaseServer) Type(context.Context, *Empty) (*TypeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Type not implemented") +} +func (UnimplementedDatabaseServer) Close(context.Context, *Empty) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Close not implemented") +} +func (UnimplementedDatabaseServer) mustEmbedUnimplementedDatabaseServer() {} + +// UnsafeDatabaseServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to DatabaseServer will +// result in compilation errors. +type UnsafeDatabaseServer interface { + mustEmbedUnimplementedDatabaseServer() +} + +func RegisterDatabaseServer(s grpc.ServiceRegistrar, srv DatabaseServer) { + s.RegisterService(&Database_ServiceDesc, srv) +} + +func _Database_Initialize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InitializeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServer).Initialize(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/dbplugin.v5.Database/Initialize", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServer).Initialize(ctx, req.(*InitializeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Database_NewUser_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NewUserRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServer).NewUser(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/dbplugin.v5.Database/NewUser", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServer).NewUser(ctx, req.(*NewUserRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Database_UpdateUser_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateUserRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServer).UpdateUser(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/dbplugin.v5.Database/UpdateUser", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServer).UpdateUser(ctx, req.(*UpdateUserRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Database_DeleteUser_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteUserRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServer).DeleteUser(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/dbplugin.v5.Database/DeleteUser", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServer).DeleteUser(ctx, req.(*DeleteUserRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Database_Type_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServer).Type(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/dbplugin.v5.Database/Type", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServer).Type(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _Database_Close_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServer).Close(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/dbplugin.v5.Database/Close", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServer).Close(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +// Database_ServiceDesc is the grpc.ServiceDesc for Database service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Database_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "dbplugin.v5.Database", + HandlerType: (*DatabaseServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Initialize", + Handler: _Database_Initialize_Handler, + }, + { + MethodName: "NewUser", + Handler: _Database_NewUser_Handler, + }, + { + MethodName: "UpdateUser", + Handler: _Database_UpdateUser_Handler, + }, + { + MethodName: "DeleteUser", + Handler: _Database_DeleteUser_Handler, + }, + { + MethodName: "Type", + Handler: _Database_Type_Handler, + }, + { + MethodName: "Close", + Handler: _Database_Close_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "sdk/database/dbplugin/v5/proto/database.proto", +} diff --git a/sdk/database/dbplugin/v5/testing/test_helpers.go b/sdk/database/dbplugin/v5/testing/test_helpers.go new file mode 100644 index 0000000..9be65c6 --- /dev/null +++ b/sdk/database/dbplugin/v5/testing/test_helpers.go @@ -0,0 +1,121 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dbtesting + +import ( + "context" + "os" + "testing" + "time" + + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/sdk/database/dbplugin/v5" +) + +func getRequestTimeout(t *testing.T) time.Duration { + rawDur := os.Getenv("VAULT_TEST_DATABASE_REQUEST_TIMEOUT") + if rawDur == "" { + // Note: we incremented the default timeout from 5 to 10 seconds in a bid + // to fix sporadic failures of mssql_test.go tests TestInitialize() and + // TestUpdateUser_password(). + + return 10 * time.Second + } + + dur, err := parseutil.ParseDurationSecond(rawDur) + if err != nil { + t.Fatalf("Failed to parse custom request timeout %q: %s", rawDur, err) + } + return dur +} + +// AssertInitializeCircleCiTest help to diagnose CircleCI failures within AssertInitialize for mssql tests failing +// with "Failed to initialize: error verifying connection ...". This will now mark a test as failed instead of being fatal +func AssertInitializeCircleCiTest(t *testing.T, db dbplugin.Database, req dbplugin.InitializeRequest) dbplugin.InitializeResponse { + t.Helper() + maxAttempts := 5 + var resp dbplugin.InitializeResponse + var err error + + for i := 1; i <= maxAttempts; i++ { + resp, err = verifyInitialize(t, db, req) + if err != nil { + t.Errorf("Failed AssertInitialize attempt: %d with error:\n%+v\n", i, err) + time.Sleep(1 * time.Second) + continue + } + + if i > 1 { + t.Logf("AssertInitialize worked the %d time around with a 1 second sleep", i) + } + break + } + + return resp +} + +func AssertInitialize(t *testing.T, db dbplugin.Database, req dbplugin.InitializeRequest) dbplugin.InitializeResponse { + t.Helper() + resp, err := verifyInitialize(t, db, req) + if err != nil { + t.Fatalf("Failed to initialize: %s", err) + } + return resp +} + +func verifyInitialize(t *testing.T, db dbplugin.Database, req dbplugin.InitializeRequest) (dbplugin.InitializeResponse, error) { + ctx, cancel := context.WithTimeout(context.Background(), getRequestTimeout(t)) + defer cancel() + + return db.Initialize(ctx, req) +} + +func AssertNewUser(t *testing.T, db dbplugin.Database, req dbplugin.NewUserRequest) dbplugin.NewUserResponse { + t.Helper() + + ctx, cancel := context.WithTimeout(context.Background(), getRequestTimeout(t)) + defer cancel() + + resp, err := db.NewUser(ctx, req) + if err != nil { + t.Fatalf("Failed to create new user: %s", err) + } + + if resp.Username == "" { + t.Fatalf("Missing username from NewUser response") + } + return resp +} + +func AssertUpdateUser(t *testing.T, db dbplugin.Database, req dbplugin.UpdateUserRequest) { + t.Helper() + + ctx, cancel := context.WithTimeout(context.Background(), getRequestTimeout(t)) + defer cancel() + + _, err := db.UpdateUser(ctx, req) + if err != nil { + t.Fatalf("Failed to update user: %s", err) + } +} + +func AssertDeleteUser(t *testing.T, db dbplugin.Database, req dbplugin.DeleteUserRequest) { + t.Helper() + + ctx, cancel := context.WithTimeout(context.Background(), getRequestTimeout(t)) + defer cancel() + + _, err := db.DeleteUser(ctx, req) + if err != nil { + t.Fatalf("Failed to delete user %q: %s", req.Username, err) + } +} + +func AssertClose(t *testing.T, db dbplugin.Database) { + t.Helper() + err := db.Close() + if err != nil { + t.Fatalf("Failed to close database: %s", err) + } +} diff --git a/sdk/database/helper/connutil/connutil.go b/sdk/database/helper/connutil/connutil.go new file mode 100644 index 0000000..50582aa --- /dev/null +++ b/sdk/database/helper/connutil/connutil.go @@ -0,0 +1,26 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package connutil + +import ( + "context" + "errors" + "sync" +) + +var ErrNotInitialized = errors.New("connection has not been initialized") + +// ConnectionProducer can be used as an embedded interface in the Database +// definition. It implements the methods dealing with individual database +// connections and is used in all the builtin database types. +type ConnectionProducer interface { + Close() error + Init(context.Context, map[string]interface{}, bool) (map[string]interface{}, error) + Connection(context.Context) (interface{}, error) + + sync.Locker + + // DEPRECATED, will be removed in 0.12 + Initialize(context.Context, map[string]interface{}, bool) error +} diff --git a/sdk/database/helper/connutil/sql.go b/sdk/database/helper/connutil/sql.go new file mode 100644 index 0000000..d1af480 --- /dev/null +++ b/sdk/database/helper/connutil/sql.go @@ -0,0 +1,210 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package connutil + +import ( + "context" + "database/sql" + "fmt" + "net/url" + "strings" + "sync" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/sdk/database/dbplugin" + "github.com/hashicorp/vault/sdk/database/helper/dbutil" + "github.com/mitchellh/mapstructure" +) + +var _ ConnectionProducer = &SQLConnectionProducer{} + +// SQLConnectionProducer implements ConnectionProducer and provides a generic producer for most sql databases +type SQLConnectionProducer struct { + ConnectionURL string `json:"connection_url" mapstructure:"connection_url" structs:"connection_url"` + MaxOpenConnections int `json:"max_open_connections" mapstructure:"max_open_connections" structs:"max_open_connections"` + MaxIdleConnections int `json:"max_idle_connections" mapstructure:"max_idle_connections" structs:"max_idle_connections"` + MaxConnectionLifetimeRaw interface{} `json:"max_connection_lifetime" mapstructure:"max_connection_lifetime" structs:"max_connection_lifetime"` + Username string `json:"username" mapstructure:"username" structs:"username"` + Password string `json:"password" mapstructure:"password" structs:"password"` + DisableEscaping bool `json:"disable_escaping" mapstructure:"disable_escaping" structs:"disable_escaping"` + + Type string + RawConfig map[string]interface{} + maxConnectionLifetime time.Duration + Initialized bool + db *sql.DB + sync.Mutex +} + +func (c *SQLConnectionProducer) Initialize(ctx context.Context, conf map[string]interface{}, verifyConnection bool) error { + _, err := c.Init(ctx, conf, verifyConnection) + return err +} + +func (c *SQLConnectionProducer) Init(ctx context.Context, conf map[string]interface{}, verifyConnection bool) (map[string]interface{}, error) { + c.Lock() + defer c.Unlock() + + c.RawConfig = conf + + err := mapstructure.WeakDecode(conf, &c) + if err != nil { + return nil, err + } + + if len(c.ConnectionURL) == 0 { + return nil, fmt.Errorf("connection_url cannot be empty") + } + + // Do not allow the username or password template pattern to be used as + // part of the user-supplied username or password + if strings.Contains(c.Username, "{{username}}") || + strings.Contains(c.Username, "{{password}}") || + strings.Contains(c.Password, "{{username}}") || + strings.Contains(c.Password, "{{password}}") { + + return nil, fmt.Errorf("username and/or password cannot contain the template variables") + } + + // Don't escape special characters for MySQL password + // Also don't escape special characters for the username and password if + // the disable_escaping parameter is set to true + username := c.Username + password := c.Password + if !c.DisableEscaping { + username = url.PathEscape(c.Username) + } + if (c.Type != "mysql") && !c.DisableEscaping { + password = url.PathEscape(c.Password) + } + + // QueryHelper doesn't do any SQL escaping, but if it starts to do so + // then maybe we won't be able to use it to do URL substitution any more. + c.ConnectionURL = dbutil.QueryHelper(c.ConnectionURL, map[string]string{ + "username": username, + "password": password, + }) + + if c.MaxOpenConnections == 0 { + c.MaxOpenConnections = 4 + } + + if c.MaxIdleConnections == 0 { + c.MaxIdleConnections = c.MaxOpenConnections + } + if c.MaxIdleConnections > c.MaxOpenConnections { + c.MaxIdleConnections = c.MaxOpenConnections + } + if c.MaxConnectionLifetimeRaw == nil { + c.MaxConnectionLifetimeRaw = "0s" + } + + c.maxConnectionLifetime, err = parseutil.ParseDurationSecond(c.MaxConnectionLifetimeRaw) + if err != nil { + return nil, errwrap.Wrapf("invalid max_connection_lifetime: {{err}}", err) + } + + // Set initialized to true at this point since all fields are set, + // and the connection can be established at a later time. + c.Initialized = true + + if verifyConnection { + if _, err := c.Connection(ctx); err != nil { + return nil, errwrap.Wrapf("error verifying connection: {{err}}", err) + } + + if err := c.db.PingContext(ctx); err != nil { + return nil, errwrap.Wrapf("error verifying connection: {{err}}", err) + } + } + + return c.RawConfig, nil +} + +func (c *SQLConnectionProducer) Connection(ctx context.Context) (interface{}, error) { + if !c.Initialized { + return nil, ErrNotInitialized + } + + // If we already have a DB, test it and return + if c.db != nil { + if err := c.db.PingContext(ctx); err == nil { + return c.db, nil + } + // If the ping was unsuccessful, close it and ignore errors as we'll be + // reestablishing anyways + c.db.Close() + } + + // For mssql backend, switch to sqlserver instead + dbType := c.Type + if c.Type == "mssql" { + dbType = "sqlserver" + } + + // Otherwise, attempt to make connection + conn := c.ConnectionURL + + // PostgreSQL specific settings + if strings.HasPrefix(conn, "postgres://") || strings.HasPrefix(conn, "postgresql://") { + // Ensure timezone is set to UTC for all the connections + if strings.Contains(conn, "?") { + conn += "&timezone=UTC" + } else { + conn += "?timezone=UTC" + } + + // Ensure a reasonable application_name is set + if !strings.Contains(conn, "application_name") { + conn += "&application_name=vault" + } + } + + var err error + c.db, err = sql.Open(dbType, conn) + if err != nil { + return nil, err + } + + // Set some connection pool settings. We don't need much of this, + // since the request rate shouldn't be high. + c.db.SetMaxOpenConns(c.MaxOpenConnections) + c.db.SetMaxIdleConns(c.MaxIdleConnections) + c.db.SetConnMaxLifetime(c.maxConnectionLifetime) + + return c.db, nil +} + +func (c *SQLConnectionProducer) SecretValues() map[string]interface{} { + return map[string]interface{}{ + c.Password: "[password]", + } +} + +// Close attempts to close the connection +func (c *SQLConnectionProducer) Close() error { + // Grab the write lock + c.Lock() + defer c.Unlock() + + if c.db != nil { + c.db.Close() + } + + c.db = nil + + return nil +} + +// SetCredentials uses provided information to set/create a user in the +// database. Unlike CreateUser, this method requires a username be provided and +// uses the name given, instead of generating a name. This is used for creating +// and setting the password of static accounts, as well as rolling back +// passwords in the database in the event an updated database fails to save in +// Vault's storage. +func (c *SQLConnectionProducer) SetCredentials(ctx context.Context, statements dbplugin.Statements, staticUser dbplugin.StaticUserConfig) (username, password string, err error) { + return "", "", dbutil.Unimplemented() +} diff --git a/sdk/database/helper/connutil/sql_test.go b/sdk/database/helper/connutil/sql_test.go new file mode 100644 index 0000000..9f29d4a --- /dev/null +++ b/sdk/database/helper/connutil/sql_test.go @@ -0,0 +1,141 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package connutil + +import ( + "context" + "net/url" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSQLPasswordChars(t *testing.T) { + testCases := []struct { + Username string + Password string + }{ + {"postgres", "password{0}"}, + {"postgres", "pass:word"}, + {"postgres", "pass/word"}, + {"postgres", "p@ssword"}, + {"postgres", "pass\"word\""}, + } + for _, tc := range testCases { + t.Logf("username %q password %q", tc.Username, tc.Password) + + sql := &SQLConnectionProducer{} + ctx := context.Background() + conf := map[string]interface{}{ + "connection_url": "postgres://{{username}}:{{password}}@localhost:5432/mydb", + "username": tc.Username, + "password": tc.Password, + "disable_escaping": false, + } + _, err := sql.Init(ctx, conf, false) + if err != nil { + t.Errorf("Init error on %q %q: %+v", tc.Username, tc.Password, err) + } else { + // This jumps down a few layers... + // Connection() uses sql.Open uses lib/pq uses net/url.Parse + u, err := url.Parse(sql.ConnectionURL) + if err != nil { + t.Errorf("URL parse error on %q %q: %+v", tc.Username, tc.Password, err) + } else { + username := u.User.Username() + password, pPresent := u.User.Password() + if username != tc.Username { + t.Errorf("Parsed username %q != original username %q", username, tc.Username) + } + if !pPresent { + t.Errorf("Password %q not present", tc.Password) + } else if password != tc.Password { + t.Errorf("Parsed password %q != original password %q", password, tc.Password) + } + } + } + } +} + +func TestSQLDisableEscaping(t *testing.T) { + testCases := []struct { + Username string + Password string + DisableEscaping bool + }{ + {"mssql{0}", "password{0}", true}, + {"mssql{0}", "password{0}", false}, + {"ms\"sql\"", "pass\"word\"", true}, + {"ms\"sql\"", "pass\"word\"", false}, + {"ms'sq;l", "pass'wor;d", true}, + {"ms'sq;l", "pass'wor;d", false}, + } + for _, tc := range testCases { + t.Logf("username %q password %q disable_escaling %t", tc.Username, tc.Password, tc.DisableEscaping) + + sql := &SQLConnectionProducer{} + ctx := context.Background() + conf := map[string]interface{}{ + "connection_url": "server=localhost;port=1433;user id={{username}};password={{password}};database=mydb;", + "username": tc.Username, + "password": tc.Password, + "disable_escaping": tc.DisableEscaping, + } + _, err := sql.Init(ctx, conf, false) + if err != nil { + t.Errorf("Init error on %q %q: %+v", tc.Username, tc.Password, err) + } else { + if tc.DisableEscaping { + if !strings.Contains(sql.ConnectionURL, tc.Username) || !strings.Contains(sql.ConnectionURL, tc.Password) { + t.Errorf("Raw username and/or password missing from ConnectionURL") + } + } else { + if strings.Contains(sql.ConnectionURL, tc.Username) || strings.Contains(sql.ConnectionURL, tc.Password) { + t.Errorf("Raw username and/or password was present in ConnectionURL") + } + } + } + } +} + +func TestSQLDisallowTemplates(t *testing.T) { + testCases := []struct { + Username string + Password string + }{ + {"{{username}}", "pass"}, + {"{{password}}", "pass"}, + {"user", "{{username}}"}, + {"user", "{{password}}"}, + {"{{username}}", "{{password}}"}, + {"abc{username}xyz", "123{password}789"}, + {"abc{{username}}xyz", "123{{password}}789"}, + {"abc{{{username}}}xyz", "123{{{password}}}789"}, + } + for _, disableEscaping := range []bool{true, false} { + for _, tc := range testCases { + t.Logf("username %q password %q disable_escaping %t", tc.Username, tc.Password, disableEscaping) + + sql := &SQLConnectionProducer{} + ctx := context.Background() + conf := map[string]interface{}{ + "connection_url": "server=localhost;port=1433;user id={{username}};password={{password}};database=mydb;", + "username": tc.Username, + "password": tc.Password, + "disable_escaping": disableEscaping, + } + _, err := sql.Init(ctx, conf, false) + if disableEscaping { + if err != nil { + if !assert.EqualError(t, err, "username and/or password cannot contain the template variables") { + t.Errorf("Init error on %q %q: %+v", tc.Username, tc.Password, err) + } + } else { + assert.Equal(t, sql.ConnectionURL, "server=localhost;port=1433;user id=abc{username}xyz;password=123{password}789;database=mydb;") + } + } + } + } +} diff --git a/sdk/database/helper/credsutil/credsutil.go b/sdk/database/helper/credsutil/credsutil.go new file mode 100644 index 0000000..503999c --- /dev/null +++ b/sdk/database/helper/credsutil/credsutil.go @@ -0,0 +1,50 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package credsutil + +import ( + "context" + "fmt" + "time" + + "github.com/hashicorp/go-secure-stdlib/base62" + "github.com/hashicorp/vault/sdk/database/dbplugin" +) + +// CredentialsProducer can be used as an embedded interface in the Database +// definition. It implements the methods for generating user information for a +// particular database type and is used in all the builtin database types. +type CredentialsProducer interface { + GenerateCredentials(context.Context) (string, error) + GenerateUsername(dbplugin.UsernameConfig) (string, error) + GeneratePassword() (string, error) + GenerateExpiration(time.Time) (string, error) +} + +const ( + reqStr = `A1a-` + minStrLen = 10 +) + +// RandomAlphaNumeric returns a random string of characters [A-Za-z0-9-] +// of the provided length. The string generated takes up to 4 characters +// of space that are predefined and prepended to ensure password +// character requirements. It also requires a min length of 10 characters. +func RandomAlphaNumeric(length int, prependA1a bool) (string, error) { + if length < minStrLen { + return "", fmt.Errorf("minimum length of %d is required", minStrLen) + } + + var prefix string + if prependA1a { + prefix = reqStr + } + + randomStr, err := base62.Random(length - len(prefix)) + if err != nil { + return "", err + } + + return prefix + randomStr, nil +} diff --git a/sdk/database/helper/credsutil/credsutil_test.go b/sdk/database/helper/credsutil/credsutil_test.go new file mode 100644 index 0000000..77e1a28 --- /dev/null +++ b/sdk/database/helper/credsutil/credsutil_test.go @@ -0,0 +1,43 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package credsutil + +import ( + "strings" + "testing" +) + +func TestRandomAlphaNumeric(t *testing.T) { + s, err := RandomAlphaNumeric(10, true) + if err != nil { + t.Fatalf("Unexpected error: %s", err) + } + if len(s) != 10 { + t.Fatalf("Unexpected length of string, expected 10, got string: %s", s) + } + + s, err = RandomAlphaNumeric(20, true) + if err != nil { + t.Fatalf("Unexpected error: %s", err) + } + if len(s) != 20 { + t.Fatalf("Unexpected length of string, expected 20, got string: %s", s) + } + + if !strings.Contains(s, reqStr) { + t.Fatalf("Expected %s to contain %s", s, reqStr) + } + + s, err = RandomAlphaNumeric(20, false) + if err != nil { + t.Fatalf("Unexpected error: %s", err) + } + if len(s) != 20 { + t.Fatalf("Unexpected length of string, expected 20, got string: %s", s) + } + + if strings.Contains(s, reqStr) { + t.Fatalf("Expected %s not to contain %s", s, reqStr) + } +} diff --git a/sdk/database/helper/credsutil/sql.go b/sdk/database/helper/credsutil/sql.go new file mode 100644 index 0000000..2c27adf --- /dev/null +++ b/sdk/database/helper/credsutil/sql.go @@ -0,0 +1,59 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package credsutil + +import ( + "context" + "time" + + "github.com/hashicorp/vault/sdk/database/dbplugin" +) + +const ( + NoneLength int = -1 +) + +// SQLCredentialsProducer implements CredentialsProducer and provides a generic credentials producer for most sql database types. +type SQLCredentialsProducer struct { + DisplayNameLen int + RoleNameLen int + UsernameLen int + Separator string + LowercaseUsername bool +} + +func (scp *SQLCredentialsProducer) GenerateCredentials(ctx context.Context) (string, error) { + password, err := scp.GeneratePassword() + if err != nil { + return "", err + } + return password, nil +} + +func (scp *SQLCredentialsProducer) GenerateUsername(config dbplugin.UsernameConfig) (string, error) { + caseOp := KeepCase + if scp.LowercaseUsername { + caseOp = Lowercase + } + return GenerateUsername( + DisplayName(config.DisplayName, scp.DisplayNameLen), + RoleName(config.RoleName, scp.RoleNameLen), + Case(caseOp), + Separator(scp.Separator), + MaxLength(scp.UsernameLen), + ) +} + +func (scp *SQLCredentialsProducer) GeneratePassword() (string, error) { + password, err := RandomAlphaNumeric(20, true) + if err != nil { + return "", err + } + + return password, nil +} + +func (scp *SQLCredentialsProducer) GenerateExpiration(ttl time.Time) (string, error) { + return ttl.Format("2006-01-02 15:04:05-0700"), nil +} diff --git a/sdk/database/helper/credsutil/usernames.go b/sdk/database/helper/credsutil/usernames.go new file mode 100644 index 0000000..962208a --- /dev/null +++ b/sdk/database/helper/credsutil/usernames.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package credsutil + +import ( + "fmt" + "strings" + "time" +) + +type CaseOp int + +const ( + KeepCase CaseOp = iota + Uppercase + Lowercase +) + +type usernameBuilder struct { + displayName string + roleName string + separator string + + maxLen int + caseOperation CaseOp +} + +func (ub usernameBuilder) makeUsername() (string, error) { + userUUID, err := RandomAlphaNumeric(20, false) + if err != nil { + return "", err + } + + now := fmt.Sprint(time.Now().Unix()) + + username := joinNonEmpty(ub.separator, + "v", + ub.displayName, + ub.roleName, + userUUID, + now) + username = trunc(username, ub.maxLen) + switch ub.caseOperation { + case Lowercase: + username = strings.ToLower(username) + case Uppercase: + username = strings.ToUpper(username) + } + + return username, nil +} + +type UsernameOpt func(*usernameBuilder) + +func DisplayName(dispName string, maxLength int) UsernameOpt { + return func(b *usernameBuilder) { + b.displayName = trunc(dispName, maxLength) + } +} + +func RoleName(roleName string, maxLength int) UsernameOpt { + return func(b *usernameBuilder) { + b.roleName = trunc(roleName, maxLength) + } +} + +func Separator(sep string) UsernameOpt { + return func(b *usernameBuilder) { + b.separator = sep + } +} + +func MaxLength(maxLen int) UsernameOpt { + return func(b *usernameBuilder) { + b.maxLen = maxLen + } +} + +func Case(c CaseOp) UsernameOpt { + return func(b *usernameBuilder) { + b.caseOperation = c + } +} + +func ToLower() UsernameOpt { + return Case(Lowercase) +} + +func ToUpper() UsernameOpt { + return Case(Uppercase) +} + +func GenerateUsername(opts ...UsernameOpt) (string, error) { + b := usernameBuilder{ + separator: "_", + maxLen: 100, + caseOperation: KeepCase, + } + + for _, opt := range opts { + opt(&b) + } + + return b.makeUsername() +} + +func trunc(str string, l int) string { + switch { + case l > 0: + if l > len(str) { + return str + } + return str[:l] + case l == 0: + return str + default: + return "" + } +} + +func joinNonEmpty(sep string, vals ...string) string { + if sep == "" { + return strings.Join(vals, sep) + } + switch len(vals) { + case 0: + return "" + case 1: + return vals[0] + } + builder := &strings.Builder{} + for _, val := range vals { + if val == "" { + continue + } + if builder.Len() > 0 { + builder.WriteString(sep) + } + builder.WriteString(val) + } + return builder.String() +} diff --git a/sdk/database/helper/credsutil/usernames_test.go b/sdk/database/helper/credsutil/usernames_test.go new file mode 100644 index 0000000..a3e8834 --- /dev/null +++ b/sdk/database/helper/credsutil/usernames_test.go @@ -0,0 +1,147 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package credsutil + +import ( + "regexp" + "testing" +) + +func TestGenerateUsername(t *testing.T) { + type testCase struct { + displayName string + displayNameLen int + + roleName string + roleNameLen int + + usernameLen int + separator string + caseOp CaseOp + + regex string + } + tests := map[string]testCase{ + "all opts": { + displayName: "abcdefghijklmonpqrstuvwxyz", + displayNameLen: 10, + roleName: "zyxwvutsrqpnomlkjihgfedcba", + roleNameLen: 10, + usernameLen: 45, + separator: ".", + caseOp: KeepCase, + + regex: "^v.abcdefghij.zyxwvutsrq.[a-zA-Z0-9]{20}.$", + }, + "no separator": { + displayName: "abcdefghijklmonpqrstuvwxyz", + displayNameLen: 10, + roleName: "zyxwvutsrqpnomlkjihgfedcba", + roleNameLen: 10, + usernameLen: 45, + separator: "", + caseOp: KeepCase, + + regex: "^vabcdefghijzyxwvutsrq[a-zA-Z0-9]{20}[0-9]{4}$", + }, + "lowercase": { + displayName: "abcdefghijklmonpqrstuvwxyz", + displayNameLen: 10, + roleName: "zyxwvutsrqpnomlkjihgfedcba", + roleNameLen: 10, + usernameLen: 45, + separator: "_", + caseOp: Lowercase, + + regex: "^v_abcdefghij_zyxwvutsrq_[a-z0-9]{20}_$", + }, + "uppercase": { + displayName: "abcdefghijklmonpqrstuvwxyz", + displayNameLen: 10, + roleName: "zyxwvutsrqpnomlkjihgfedcba", + roleNameLen: 10, + usernameLen: 45, + separator: "_", + caseOp: Uppercase, + + regex: "^V_ABCDEFGHIJ_ZYXWVUTSRQ_[A-Z0-9]{20}_$", + }, + "short username": { + displayName: "abcdefghijklmonpqrstuvwxyz", + displayNameLen: 5, + roleName: "zyxwvutsrqpnomlkjihgfedcba", + roleNameLen: 5, + usernameLen: 15, + separator: "_", + caseOp: KeepCase, + + regex: "^v_abcde_zyxwv_[a-zA-Z0-9]{1}$", + }, + "long username": { + displayName: "abcdefghijklmonpqrstuvwxyz", + displayNameLen: 0, + roleName: "zyxwvutsrqpnomlkjihgfedcba", + roleNameLen: 0, + usernameLen: 100, + separator: "_", + caseOp: KeepCase, + + regex: "^v_abcdefghijklmonpqrstuvwxyz_zyxwvutsrqpnomlkjihgfedcba_[a-zA-Z0-9]{20}_[0-9]{1,23}$", + }, + "zero max length": { + displayName: "abcdefghijklmonpqrstuvwxyz", + displayNameLen: 0, + roleName: "zyxwvutsrqpnomlkjihgfedcba", + roleNameLen: 0, + usernameLen: 0, + separator: "_", + caseOp: KeepCase, + + regex: "^v_abcdefghijklmonpqrstuvwxyz_zyxwvutsrqpnomlkjihgfedcba_[a-zA-Z0-9]{20}_[0-9]+$", + }, + "no display name": { + displayName: "abcdefghijklmonpqrstuvwxyz", + displayNameLen: NoneLength, + roleName: "zyxwvutsrqpnomlkjihgfedcba", + roleNameLen: 15, + usernameLen: 100, + separator: "_", + caseOp: KeepCase, + + regex: "^v_zyxwvutsrqpnoml_[a-zA-Z0-9]{20}_[0-9]+$", + }, + "no role name": { + displayName: "abcdefghijklmonpqrstuvwxyz", + displayNameLen: 15, + roleName: "zyxwvutsrqpnomlkjihgfedcba", + roleNameLen: NoneLength, + usernameLen: 100, + separator: "_", + caseOp: KeepCase, + + regex: "^v_abcdefghijklmon_[a-zA-Z0-9]{20}_[0-9]+$", + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + re := regexp.MustCompile(test.regex) + + username, err := GenerateUsername( + DisplayName(test.displayName, test.displayNameLen), + RoleName(test.roleName, test.roleNameLen), + Separator(test.separator), + MaxLength(test.usernameLen), + Case(test.caseOp), + ) + if err != nil { + t.Fatalf("no error expected, got: %s", err) + } + + if !re.MatchString(username) { + t.Fatalf("username %q does not match regex %q", username, test.regex) + } + }) + } +} diff --git a/sdk/database/helper/dbutil/dbutil.go b/sdk/database/helper/dbutil/dbutil.go new file mode 100644 index 0000000..efc7e01 --- /dev/null +++ b/sdk/database/helper/dbutil/dbutil.go @@ -0,0 +1,63 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dbutil + +import ( + "errors" + "fmt" + "strings" + + "github.com/hashicorp/vault/sdk/database/dbplugin" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var ( + ErrEmptyCreationStatement = errors.New("empty creation statements") + ErrEmptyRotationStatement = errors.New("empty rotation statements") +) + +// Query templates a query for us. +func QueryHelper(tpl string, data map[string]string) string { + for k, v := range data { + tpl = strings.ReplaceAll(tpl, fmt.Sprintf("{{%s}}", k), v) + } + + return tpl +} + +// StatementCompatibilityHelper will populate the statements fields to support +// compatibility +func StatementCompatibilityHelper(statements dbplugin.Statements) dbplugin.Statements { + switch { + case len(statements.Creation) > 0 && len(statements.CreationStatements) == 0: + statements.CreationStatements = strings.Join(statements.Creation, ";") + case len(statements.CreationStatements) > 0: + statements.Creation = []string{statements.CreationStatements} + } + switch { + case len(statements.Revocation) > 0 && len(statements.RevocationStatements) == 0: + statements.RevocationStatements = strings.Join(statements.Revocation, ";") + case len(statements.RevocationStatements) > 0: + statements.Revocation = []string{statements.RevocationStatements} + } + switch { + case len(statements.Renewal) > 0 && len(statements.RenewStatements) == 0: + statements.RenewStatements = strings.Join(statements.Renewal, ";") + case len(statements.RenewStatements) > 0: + statements.Renewal = []string{statements.RenewStatements} + } + switch { + case len(statements.Rollback) > 0 && len(statements.RollbackStatements) == 0: + statements.RollbackStatements = strings.Join(statements.Rollback, ";") + case len(statements.RollbackStatements) > 0: + statements.Rollback = []string{statements.RollbackStatements} + } + return statements +} + +// Unimplemented returns a gRPC error with the Unimplemented code +func Unimplemented() error { + return status.Error(codes.Unimplemented, "Not yet implemented") +} diff --git a/sdk/database/helper/dbutil/dbutil_test.go b/sdk/database/helper/dbutil/dbutil_test.go new file mode 100644 index 0000000..797712b --- /dev/null +++ b/sdk/database/helper/dbutil/dbutil_test.go @@ -0,0 +1,64 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dbutil + +import ( + "reflect" + "testing" + + "github.com/hashicorp/vault/sdk/database/dbplugin" +) + +func TestStatementCompatibilityHelper(t *testing.T) { + const ( + creationStatement = "creation" + renewStatement = "renew" + revokeStatement = "revoke" + rollbackStatement = "rollback" + ) + + expectedStatements := dbplugin.Statements{ + Creation: []string{creationStatement}, + Rollback: []string{rollbackStatement}, + Revocation: []string{revokeStatement}, + Renewal: []string{renewStatement}, + CreationStatements: creationStatement, + RenewStatements: renewStatement, + RollbackStatements: rollbackStatement, + RevocationStatements: revokeStatement, + } + + statements1 := dbplugin.Statements{ + CreationStatements: creationStatement, + RenewStatements: renewStatement, + RollbackStatements: rollbackStatement, + RevocationStatements: revokeStatement, + } + + if !reflect.DeepEqual(expectedStatements, StatementCompatibilityHelper(statements1)) { + t.Fatalf("mismatch: %#v, %#v", expectedStatements, statements1) + } + + statements2 := dbplugin.Statements{ + Creation: []string{creationStatement}, + Rollback: []string{rollbackStatement}, + Revocation: []string{revokeStatement}, + Renewal: []string{renewStatement}, + } + + if !reflect.DeepEqual(expectedStatements, StatementCompatibilityHelper(statements2)) { + t.Fatalf("mismatch: %#v, %#v", expectedStatements, statements2) + } + + statements3 := dbplugin.Statements{ + CreationStatements: creationStatement, + } + expectedStatements3 := dbplugin.Statements{ + Creation: []string{creationStatement}, + CreationStatements: creationStatement, + } + if !reflect.DeepEqual(expectedStatements3, StatementCompatibilityHelper(statements3)) { + t.Fatalf("mismatch: %#v, %#v", expectedStatements3, statements3) + } +} diff --git a/sdk/database/helper/dbutil/parseurl.go b/sdk/database/helper/dbutil/parseurl.go new file mode 100644 index 0000000..4bf128c --- /dev/null +++ b/sdk/database/helper/dbutil/parseurl.go @@ -0,0 +1,99 @@ +// Copyright (c) 2011-2013, 'pq' Contributors Portions Copyright (C) 2011 Blake +// Mizerany +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +// Copied from https://github.com/lib/pq/blob/v1.10.6/url.go#L32 + +package dbutil + +import ( + "fmt" + "net" + nurl "net/url" + "sort" + "strings" +) + +// ParseURL no longer needs to be used by clients of this library since supplying a URL as a +// connection string to sql.Open() is now supported: +// +// sql.Open("postgres", "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full") +// +// It remains exported here for backwards-compatibility. +// +// ParseURL converts a url to a connection string for driver.Open. +// Example: +// +// "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full" +// +// converts to: +// +// "user=bob password=secret host=1.2.3.4 port=5432 dbname=mydb sslmode=verify-full" +// +// A minimal example: +// +// "postgres://" +// +// This will be blank, causing driver.Open to use all of the defaults +func ParseURL(url string) (string, error) { + u, err := nurl.Parse(url) + if err != nil { + return "", err + } + + if u.Scheme != "postgres" && u.Scheme != "postgresql" { + return "", fmt.Errorf("invalid connection protocol: %s", u.Scheme) + } + + var kvs []string + escaper := strings.NewReplacer(`'`, `\'`, `\`, `\\`) + accrue := func(k, v string) { + if v != "" { + kvs = append(kvs, k+"='"+escaper.Replace(v)+"'") + } + } + + if u.User != nil { + v := u.User.Username() + accrue("user", v) + + v, _ = u.User.Password() + accrue("password", v) + } + + if host, port, err := net.SplitHostPort(u.Host); err != nil { + accrue("host", u.Host) + } else { + accrue("host", host) + accrue("port", port) + } + + if u.Path != "" { + accrue("dbname", u.Path[1:]) + } + + q := u.Query() + for k := range q { + accrue(k, q.Get(k)) + } + + sort.Strings(kvs) // Makes testing easier (not a performance concern) + return strings.Join(kvs, " "), nil +} diff --git a/sdk/database/helper/dbutil/quoteidentifier.go b/sdk/database/helper/dbutil/quoteidentifier.go new file mode 100644 index 0000000..92c6fda --- /dev/null +++ b/sdk/database/helper/dbutil/quoteidentifier.go @@ -0,0 +1,45 @@ +// Copyright (c) 2011-2013, 'pq' Contributors Portions Copyright (C) 2011 Blake +// Mizerany +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +// Copied from https://github.com/lib/pq/blob/v1.10.4/conn.go#L1640 + +package dbutil + +import "strings" + +// QuoteIdentifier quotes an "identifier" (e.g. a table or a column name) to be +// used as part of an SQL statement. For example: +// +// tblname := "my_table" +// data := "my_data" +// quoted := pq.QuoteIdentifier(tblname) +// err := db.Exec(fmt.Sprintf("INSERT INTO %s VALUES ($1)", quoted), data) +// +// Any double quotes in name will be escaped. The quoted identifier will be +// case sensitive when used in a query. If the input string contains a zero +// byte, the result will be truncated immediately before it. +func QuoteIdentifier(name string) string { + end := strings.IndexRune(name, 0) + if end > -1 { + name = name[:end] + } + return `"` + strings.ReplaceAll(name, `"`, `""`) + `"` +} diff --git a/sdk/framework/backend.go b/sdk/framework/backend.go new file mode 100644 index 0000000..c0527ad --- /dev/null +++ b/sdk/framework/backend.go @@ -0,0 +1,812 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package framework + +import ( + "context" + "crypto/rand" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "regexp" + "sort" + "strings" + "sync" + "time" + + "github.com/hashicorp/go-kms-wrapping/entropy/v2" + + jsonpatch "github.com/evanphx/json-patch/v5" + "github.com/hashicorp/errwrap" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/helper/license" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/logical" +) + +// Backend is an implementation of logical.Backend that allows +// the implementer to code a backend using a much more programmer-friendly +// framework that handles a lot of the routing and validation for you. +// +// This is recommended over implementing logical.Backend directly. +type Backend struct { + // Help is the help text that is shown when a help request is made + // on the root of this resource. The root help is special since we + // show all the paths that can be requested. + Help string + + // Paths are the various routes that the backend responds to. + // This cannot be modified after construction (i.e. dynamically changing + // paths, including adding or removing, is not allowed once the + // backend is in use). + // + // PathsSpecial is the list of path patterns that denote the paths above + // that require special privileges. + Paths []*Path + PathsSpecial *logical.Paths + + // Secrets is the list of secret types that this backend can + // return. It is used to automatically generate proper responses, + // and ease specifying callbacks for revocation, renewal, etc. + Secrets []*Secret + + // InitializeFunc is the callback, which if set, will be invoked via + // Initialize() just after a plugin has been mounted. + InitializeFunc InitializeFunc + + // PeriodicFunc is the callback, which if set, will be invoked when the + // periodic timer of RollbackManager ticks. This can be used by + // backends to do anything it wishes to do periodically. + // + // PeriodicFunc can be invoked to, say periodically delete stale + // entries in backend's storage, while the backend is still being used. + // (Note the difference between this action and `Clean`, which is + // invoked just before the backend is unmounted). + PeriodicFunc periodicFunc + + // WALRollback is called when a WAL entry (see wal.go) has to be rolled + // back. It is called with the data from the entry. + // + // WALRollbackMinAge is the minimum age of a WAL entry before it is attempted + // to be rolled back. This should be longer than the maximum time it takes + // to successfully create a secret. + WALRollback WALRollbackFunc + WALRollbackMinAge time.Duration + + // Clean is called on unload to clean up e.g any existing connections + // to the backend, if required. + Clean CleanupFunc + + // Invalidate is called when a key is modified, if required. + Invalidate InvalidateFunc + + // AuthRenew is the callback to call when a RenewRequest for an + // authentication comes in. By default, renewal won't be allowed. + // See the built-in AuthRenew helpers in lease.go for common callbacks. + AuthRenew OperationFunc + + // BackendType is the logical.BackendType for the backend implementation + BackendType logical.BackendType + + // RunningVersion is the optional version that will be self-reported + RunningVersion string + + logger log.Logger + system logical.SystemView + events logical.EventSender + once sync.Once + pathsRe []*regexp.Regexp +} + +// periodicFunc is the callback called when the RollbackManager's timer ticks. +// This can be utilized by the backends to do anything it wants. +type periodicFunc func(context.Context, *logical.Request) error + +// OperationFunc is the callback called for an operation on a path. +type OperationFunc func(context.Context, *logical.Request, *FieldData) (*logical.Response, error) + +// ExistenceFunc is the callback called for an existence check on a path. +type ExistenceFunc func(context.Context, *logical.Request, *FieldData) (bool, error) + +// WALRollbackFunc is the callback for rollbacks. +type WALRollbackFunc func(context.Context, *logical.Request, string, interface{}) error + +// CleanupFunc is the callback for backend unload. +type CleanupFunc func(context.Context) + +// InvalidateFunc is the callback for backend key invalidation. +type InvalidateFunc func(context.Context, string) + +// InitializeFunc is the callback, which if set, will be invoked via +// Initialize() just after a plugin has been mounted. +type InitializeFunc func(context.Context, *logical.InitializationRequest) error + +// PatchPreprocessorFunc is used by HandlePatchOperation in order to shape +// the input as defined by request handler prior to JSON marshaling +type PatchPreprocessorFunc func(map[string]interface{}) (map[string]interface{}, error) + +// ErrNoEvents is returned when attempting to send an event, but when the event +// sender was not passed in during `backend.Setup()`. +var ErrNoEvents = errors.New("no event sender configured") + +// Initialize is the logical.Backend implementation. +func (b *Backend) Initialize(ctx context.Context, req *logical.InitializationRequest) error { + if b.InitializeFunc != nil { + return b.InitializeFunc(ctx, req) + } + return nil +} + +// HandleExistenceCheck is the logical.Backend implementation. +func (b *Backend) HandleExistenceCheck(ctx context.Context, req *logical.Request) (checkFound bool, exists bool, err error) { + b.once.Do(b.init) + + // Ensure we are only doing this when one of the correct operations is in play + switch req.Operation { + case logical.CreateOperation: + case logical.UpdateOperation: + default: + return false, false, fmt.Errorf("incorrect operation type %v for an existence check", req.Operation) + } + + // Find the matching route + path, captures := b.route(req.Path) + if path == nil { + return false, false, logical.ErrUnsupportedPath + } + + if path.ExistenceCheck == nil { + return false, false, nil + } + + checkFound = true + + // Build up the data for the route, with the URL taking priority + // for the fields over the PUT data. + raw := make(map[string]interface{}, len(path.Fields)) + for k, v := range req.Data { + raw[k] = v + } + for k, v := range captures { + raw[k] = v + } + + fd := FieldData{ + Raw: raw, + Schema: path.Fields, + } + + err = fd.Validate() + if err != nil { + return false, false, errutil.UserError{Err: err.Error()} + } + + // Call the callback with the request and the data + exists, err = path.ExistenceCheck(ctx, req, &fd) + return +} + +// HandleRequest is the logical.Backend implementation. +func (b *Backend) HandleRequest(ctx context.Context, req *logical.Request) (*logical.Response, error) { + b.once.Do(b.init) + + // Check for special cased global operations. These don't route + // to a specific Path. + switch req.Operation { + case logical.RenewOperation: + fallthrough + case logical.RevokeOperation: + return b.handleRevokeRenew(ctx, req) + case logical.RollbackOperation: + return b.handleRollback(ctx, req) + } + + // If the path is empty and it is a help operation, handle that. + if req.Path == "" && req.Operation == logical.HelpOperation { + return b.handleRootHelp(req) + } + + // Find the matching route + path, captures := b.route(req.Path) + if path == nil { + return nil, logical.ErrUnsupportedPath + } + + // Check if a feature is required and if the license has that feature + if path.FeatureRequired != license.FeatureNone { + hasFeature := b.system.HasFeature(path.FeatureRequired) + if !hasFeature { + return nil, logical.CodedError(401, "Feature Not Enabled") + } + } + + // Build up the data for the route, with the URL taking priority + // for the fields over the PUT data. + raw := make(map[string]interface{}, len(path.Fields)) + var ignored []string + for k, v := range req.Data { + raw[k] = v + if !path.TakesArbitraryInput && path.Fields[k] == nil { + ignored = append(ignored, k) + } + } + + var replaced []string + for k, v := range captures { + if raw[k] != nil { + replaced = append(replaced, k) + } + raw[k] = v + } + + // Look up the callback for this operation, preferring the + // path.Operations definition if present. + var callback OperationFunc + + if path.Operations != nil { + if op, ok := path.Operations[req.Operation]; ok { + + // Check whether this operation should be forwarded + if sysView := b.System(); sysView != nil { + replState := sysView.ReplicationState() + props := op.Properties() + + if props.ForwardPerformanceStandby && replState.HasState(consts.ReplicationPerformanceStandby) { + return nil, logical.ErrReadOnly + } + + if props.ForwardPerformanceSecondary && !sysView.LocalMount() && replState.HasState(consts.ReplicationPerformanceSecondary) { + return nil, logical.ErrReadOnly + } + } + + callback = op.Handler() + } + } else { + callback = path.Callbacks[req.Operation] + } + ok := callback != nil + + if !ok { + if req.Operation == logical.HelpOperation { + callback = path.helpCallback(b) + ok = true + } + } + if !ok { + return nil, logical.ErrUnsupportedOperation + } + + fd := FieldData{ + Raw: raw, + Schema: path.Fields, + } + + if req.Operation != logical.HelpOperation { + err := fd.Validate() + if err != nil { + return logical.ErrorResponse(fmt.Sprintf("Field validation failed: %s", err.Error())), nil + } + } + + resp, err := callback(ctx, req, &fd) + if err != nil { + return resp, err + } + + switch resp { + case nil: + default: + // If fields supplied in the request are not present in the field schema + // of the path, add a warning to the response indicating that those + // parameters will be ignored. + sort.Strings(ignored) + + if len(ignored) != 0 { + resp.AddWarning(fmt.Sprintf("Endpoint ignored these unrecognized parameters: %v", ignored)) + } + // If fields supplied in the request is being overwritten by the values + // supplied in the API request path, add a warning to the response + // indicating that those parameters will be replaced. + if len(replaced) != 0 { + resp.AddWarning(fmt.Sprintf("Endpoint replaced the value of these parameters with the values captured from the endpoint's path: %v", replaced)) + } + } + + return resp, nil +} + +// HandlePatchOperation acts as an abstraction for performing JSON merge patch +// operations (see https://datatracker.ietf.org/doc/html/rfc7396) for HTTP +// PATCH requests. It is responsible for properly processing and marshalling +// the input and existing resource prior to performing the JSON merge operation +// using the MergePatch function from the json-patch library. The preprocessor +// is an arbitrary func that can be provided to further process the input. The +// MergePatch function accepts and returns byte arrays. Null values will unset +// fields defined within the input's FieldData (as if they were never specified) +// and remove user-specified keys that exist within a map field. +func HandlePatchOperation(input *FieldData, resource map[string]interface{}, preprocessor PatchPreprocessorFunc) ([]byte, error) { + var err error + + if resource == nil { + return nil, fmt.Errorf("resource does not exist") + } + + inputMap := map[string]interface{}{} + + for key := range input.Raw { + if _, ok := input.Schema[key]; !ok { + // Only accept fields in the schema + continue + } + + // Ensure data types are handled properly according to the FieldSchema + val, ok, err := input.GetOkErr(key) + if err != nil { + return nil, err + } + + if ok { + inputMap[key] = val + } + } + + if preprocessor != nil { + inputMap, err = preprocessor(inputMap) + if err != nil { + return nil, err + } + } + + marshaledResource, err := json.Marshal(resource) + if err != nil { + return nil, err + } + + marshaledInput, err := json.Marshal(inputMap) + if err != nil { + return nil, err + } + + modified, err := jsonpatch.MergePatch(marshaledResource, marshaledInput) + if err != nil { + return nil, err + } + + return modified, nil +} + +// SpecialPaths is the logical.Backend implementation. +func (b *Backend) SpecialPaths() *logical.Paths { + return b.PathsSpecial +} + +// Cleanup is used to release resources and prepare to stop the backend +func (b *Backend) Cleanup(ctx context.Context) { + if b.Clean != nil { + b.Clean(ctx) + } +} + +// InvalidateKey is used to clear caches and reset internal state on key changes +func (b *Backend) InvalidateKey(ctx context.Context, key string) { + if b.Invalidate != nil { + b.Invalidate(ctx, key) + } +} + +// Setup is used to initialize the backend with the initial backend configuration +func (b *Backend) Setup(ctx context.Context, config *logical.BackendConfig) error { + b.logger = config.Logger + b.system = config.System + b.events = config.EventsSender + return nil +} + +// GetRandomReader returns an io.Reader to use for generating key material in +// backends. If the backend has access to an external entropy source it will +// return that, otherwise it returns crypto/rand.Reader. +func (b *Backend) GetRandomReader() io.Reader { + if sourcer, ok := b.System().(entropy.Sourcer); ok { + return entropy.NewReader(sourcer) + } + + return rand.Reader +} + +// Logger can be used to get the logger. If no logger has been set, +// the logs will be discarded. +func (b *Backend) Logger() log.Logger { + if b.logger != nil { + return b.logger + } + + return logging.NewVaultLoggerWithWriter(ioutil.Discard, log.NoLevel) +} + +// System returns the backend's system view. +func (b *Backend) System() logical.SystemView { + return b.system +} + +// Type returns the backend type +func (b *Backend) Type() logical.BackendType { + return b.BackendType +} + +// Version returns the plugin version information +func (b *Backend) PluginVersion() logical.PluginVersion { + return logical.PluginVersion{ + Version: b.RunningVersion, + } +} + +// Route looks up the path that would be used for a given path string. +func (b *Backend) Route(path string) *Path { + result, _ := b.route(path) + return result +} + +// Secret is used to look up the secret with the given type. +func (b *Backend) Secret(k string) *Secret { + for _, s := range b.Secrets { + if s.Type == k { + return s + } + } + + return nil +} + +func (b *Backend) init() { + b.pathsRe = make([]*regexp.Regexp, len(b.Paths)) + for i, p := range b.Paths { + if len(p.Pattern) == 0 { + panic(fmt.Sprintf("Routing pattern cannot be blank")) + } + // Automatically anchor the pattern + if p.Pattern[0] != '^' { + p.Pattern = "^" + p.Pattern + } + if p.Pattern[len(p.Pattern)-1] != '$' { + p.Pattern = p.Pattern + "$" + } + b.pathsRe[i] = regexp.MustCompile(p.Pattern) + } +} + +func (b *Backend) route(path string) (*Path, map[string]string) { + b.once.Do(b.init) + + for i, re := range b.pathsRe { + matches := re.FindStringSubmatch(path) + if matches == nil { + continue + } + + // We have a match, determine the mapping of the captures and + // store that for returning. + var captures map[string]string + path := b.Paths[i] + if captureNames := re.SubexpNames(); len(captureNames) > 1 { + captures = make(map[string]string, len(captureNames)) + for i, name := range captureNames { + if name != "" { + captures[name] = matches[i] + } + } + } + + return path, captures + } + + return nil, nil +} + +func (b *Backend) handleRootHelp(req *logical.Request) (*logical.Response, error) { + // Build a mapping of the paths and get the paths alphabetized to + // make the output prettier. + pathsMap := make(map[string]*Path) + paths := make([]string, 0, len(b.Paths)) + for i, p := range b.pathsRe { + paths = append(paths, p.String()) + pathsMap[p.String()] = b.Paths[i] + } + sort.Strings(paths) + + // Build the path data + pathData := make([]rootHelpTemplatePath, 0, len(paths)) + for _, route := range paths { + p := pathsMap[route] + pathData = append(pathData, rootHelpTemplatePath{ + Path: route, + Help: strings.TrimSpace(p.HelpSynopsis), + }) + } + + help, err := executeTemplate(rootHelpTemplate, &rootHelpTemplateData{ + Help: strings.TrimSpace(b.Help), + Paths: pathData, + }) + if err != nil { + return nil, err + } + + // Plugins currently don't have a direct knowledge of their own "type" + // (e.g. "kv", "cubbyhole"). It defaults to the name of the executable but + // can be overridden when the plugin is mounted. Since we need this type to + // form the request & response full names, we are passing it as an optional + // request parameter to the plugin's root help endpoint. If specified in + // the request, the type will be used as part of the request/response body + // names in the OAS document. + requestResponsePrefix := req.GetString("requestResponsePrefix") + + // Build OpenAPI response for the entire backend + vaultVersion := "unknown" + if b.System() != nil { + env, err := b.System().PluginEnv(context.Background()) + if err != nil { + return nil, err + } + vaultVersion = env.VaultVersion + } + + doc := NewOASDocument(vaultVersion) + if err := documentPaths(b, requestResponsePrefix, doc); err != nil { + b.Logger().Warn("error generating OpenAPI", "error", err) + } + + return logical.HelpResponse(help, nil, doc), nil +} + +func (b *Backend) handleRevokeRenew(ctx context.Context, req *logical.Request) (*logical.Response, error) { + // Special case renewal of authentication for credential backends + if req.Operation == logical.RenewOperation && req.Auth != nil { + return b.handleAuthRenew(ctx, req) + } + + if req.Secret == nil { + return nil, fmt.Errorf("request has no secret") + } + + rawSecretType, ok := req.Secret.InternalData["secret_type"] + if !ok { + return nil, fmt.Errorf("secret is unsupported by this backend") + } + secretType, ok := rawSecretType.(string) + if !ok { + return nil, fmt.Errorf("secret is unsupported by this backend") + } + + secret := b.Secret(secretType) + if secret == nil { + return nil, fmt.Errorf("secret is unsupported by this backend") + } + + switch req.Operation { + case logical.RenewOperation: + return secret.HandleRenew(ctx, req) + case logical.RevokeOperation: + return secret.HandleRevoke(ctx, req) + default: + return nil, fmt.Errorf("invalid operation for revoke/renew: %q", req.Operation) + } +} + +// handleRollback invokes the PeriodicFunc set on the backend. It also does a +// WAL rollback operation. +func (b *Backend) handleRollback(ctx context.Context, req *logical.Request) (*logical.Response, error) { + // Response is not expected from the periodic operation. + var resp *logical.Response + + merr := new(multierror.Error) + if b.PeriodicFunc != nil { + if err := b.PeriodicFunc(ctx, req); err != nil { + merr = multierror.Append(merr, err) + } + } + + if b.WALRollback != nil { + var err error + resp, err = b.handleWALRollback(ctx, req) + if err != nil { + merr = multierror.Append(merr, err) + } + } + return resp, merr.ErrorOrNil() +} + +func (b *Backend) handleAuthRenew(ctx context.Context, req *logical.Request) (*logical.Response, error) { + if b.AuthRenew == nil { + return logical.ErrorResponse("this auth type doesn't support renew"), nil + } + + return b.AuthRenew(ctx, req, nil) +} + +func (b *Backend) handleWALRollback(ctx context.Context, req *logical.Request) (*logical.Response, error) { + if b.WALRollback == nil { + return nil, logical.ErrUnsupportedOperation + } + + var merr error + keys, err := ListWAL(ctx, req.Storage) + if err != nil { + return logical.ErrorResponse(err.Error()), nil + } + if len(keys) == 0 { + return nil, nil + } + + // Calculate the minimum time that the WAL entries could be + // created in order to be rolled back. + age := b.WALRollbackMinAge + if age == 0 { + age = 10 * time.Minute + } + minAge := time.Now().Add(-1 * age) + if _, ok := req.Data["immediate"]; ok { + minAge = time.Now().Add(1000 * time.Hour) + } + + for _, k := range keys { + entry, err := GetWAL(ctx, req.Storage, k) + if err != nil { + merr = multierror.Append(merr, err) + continue + } + if entry == nil { + continue + } + + // If the entry isn't old enough, then don't roll it back + if !time.Unix(entry.CreatedAt, 0).Before(minAge) { + continue + } + + // Attempt a WAL rollback + err = b.WALRollback(ctx, req, entry.Kind, entry.Data) + if err != nil { + err = errwrap.Wrapf(fmt.Sprintf("error rolling back %q entry: {{err}}", entry.Kind), err) + } + if err == nil { + err = DeleteWAL(ctx, req.Storage, k) + } + if err != nil { + merr = multierror.Append(merr, err) + } + } + + if merr == nil { + return nil, nil + } + + return logical.ErrorResponse(merr.Error()), nil +} + +func (b *Backend) SendEvent(ctx context.Context, eventType logical.EventType, event *logical.EventData) error { + if b.events == nil { + return ErrNoEvents + } + return b.events.Send(ctx, eventType, event) +} + +// FieldSchema is a basic schema to describe the format of a path field. +type FieldSchema struct { + Type FieldType + Default interface{} + Description string + + // The Required and Deprecated members are only used by openapi, and are not actually + // used by the framework. + Required bool + Deprecated bool + + // Query indicates this field will be sent as a query parameter: + // + // /v1/foo/bar?some_param=some_value + // + // It doesn't affect handling of the value, but may be used for documentation. + Query bool + + // AllowedValues is an optional list of permitted values for this field. + // This constraint is not (yet) enforced by the framework, but the list is + // output as part of OpenAPI generation and may effect documentation and + // dynamic UI generation. + AllowedValues []interface{} + + // DisplayAttrs provides hints for UI and documentation generators. They + // will be included in OpenAPI output if set. + DisplayAttrs *DisplayAttributes +} + +// DefaultOrZero returns the default value if it is set, or otherwise +// the zero value of the type. +func (s *FieldSchema) DefaultOrZero() interface{} { + if s.Default != nil { + switch s.Type { + case TypeDurationSecond, TypeSignedDurationSecond: + resultDur, err := parseutil.ParseDurationSecond(s.Default) + if err != nil { + return s.Type.Zero() + } + return int(resultDur.Seconds()) + + default: + return s.Default + } + } + + return s.Type.Zero() +} + +// Zero returns the correct zero-value for a specific FieldType +func (t FieldType) Zero() interface{} { + switch t { + case TypeString, TypeNameString, TypeLowerCaseString: + return "" + case TypeInt: + return 0 + case TypeInt64: + return int64(0) + case TypeBool: + return false + case TypeMap: + return map[string]interface{}{} + case TypeKVPairs: + return map[string]string{} + case TypeDurationSecond, TypeSignedDurationSecond: + return 0 + case TypeSlice: + return []interface{}{} + case TypeStringSlice, TypeCommaStringSlice: + return []string{} + case TypeCommaIntSlice: + return []int{} + case TypeHeader: + return http.Header{} + case TypeFloat: + return 0.0 + case TypeTime: + return time.Time{} + default: + panic("unknown type: " + t.String()) + } +} + +type rootHelpTemplateData struct { + Help string + Paths []rootHelpTemplatePath +} + +type rootHelpTemplatePath struct { + Path string + Help string +} + +const rootHelpTemplate = ` +## DESCRIPTION + +{{.Help}} + +## PATHS + +The following paths are supported by this backend. To view help for +any of the paths below, use the help command with any route matching +the path pattern. Note that depending on the policy of your auth token, +you may or may not be able to access certain paths. + +{{range .Paths}}{{indent 4 .Path}} +{{indent 8 .Help}} + +{{end}} + +` diff --git a/sdk/framework/backend_test.go b/sdk/framework/backend_test.go new file mode 100644 index 0000000..0b7a205 --- /dev/null +++ b/sdk/framework/backend_test.go @@ -0,0 +1,837 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package framework + +import ( + "context" + "fmt" + "net/http" + "reflect" + "strings" + "sync/atomic" + "testing" + "time" + + "github.com/hashicorp/go-secure-stdlib/strutil" + + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/require" +) + +func BenchmarkBackendRoute(b *testing.B) { + patterns := []string{ + "foo", + "bar/(?P.+?)", + "baz/(?Pwhat)", + `aws/policy/(?P\w)`, + `aws/(?P\w)`, + } + + backend := &Backend{Paths: make([]*Path, 0, len(patterns))} + for _, p := range patterns { + backend.Paths = append(backend.Paths, &Path{Pattern: p}) + } + + // Warm any caches + backend.Route("aws/policy/foo") + + // Reset the timer since we did a lot above + b.ResetTimer() + + // Run through and route. We do a sanity check of the return value + for i := 0; i < b.N; i++ { + if p := backend.Route("aws/policy/foo"); p == nil { + b.Fatal("p should not be nil") + } + } +} + +func TestBackend_impl(t *testing.T) { + var _ logical.Backend = new(Backend) +} + +func TestBackendHandleRequestFieldWarnings(t *testing.T) { + handler := func(ctx context.Context, req *logical.Request, data *FieldData) (*logical.Response, error) { + return &logical.Response{ + Data: map[string]interface{}{ + "an_int": data.Get("an_int"), + "a_string": data.Get("a_string"), + "name": data.Get("name"), + }, + }, nil + } + + backend := &Backend{ + Paths: []*Path{ + { + Pattern: "foo/bar/(?P.+)", + Fields: map[string]*FieldSchema{ + "an_int": {Type: TypeInt}, + "a_string": {Type: TypeString}, + "name": {Type: TypeString}, + }, + Operations: map[logical.Operation]OperationHandler{ + logical.UpdateOperation: &PathOperation{Callback: handler}, + }, + }, + }, + } + ctx := context.Background() + resp, err := backend.HandleRequest(ctx, &logical.Request{ + Operation: logical.UpdateOperation, + Path: "foo/bar/baz", + Data: map[string]interface{}{ + "an_int": 10, + "a_string": "accepted", + "unrecognized1": "unrecognized", + "unrecognized2": 20.2, + "name": "noop", + }, + }) + require.NoError(t, err) + require.NotNil(t, resp) + t.Log(resp.Warnings) + require.Len(t, resp.Warnings, 2) + require.True(t, strutil.StrListContains(resp.Warnings, "Endpoint ignored these unrecognized parameters: [unrecognized1 unrecognized2]")) + require.True(t, strutil.StrListContains(resp.Warnings, "Endpoint replaced the value of these parameters with the values captured from the endpoint's path: [name]")) +} + +func TestBackendHandleRequest(t *testing.T) { + callback := func(ctx context.Context, req *logical.Request, data *FieldData) (*logical.Response, error) { + return &logical.Response{ + Data: map[string]interface{}{ + "value": data.Get("value"), + }, + }, nil + } + handler := func(ctx context.Context, req *logical.Request, data *FieldData) (*logical.Response, error) { + return &logical.Response{ + Data: map[string]interface{}{ + "amount": data.Get("amount"), + }, + }, nil + } + + b := &Backend{ + Paths: []*Path{ + { + Pattern: "foo/bar", + Fields: map[string]*FieldSchema{ + "value": {Type: TypeInt}, + }, + Callbacks: map[logical.Operation]OperationFunc{ + logical.ReadOperation: callback, + }, + }, + { + Pattern: "foo/baz/handler", + Fields: map[string]*FieldSchema{ + "amount": {Type: TypeInt}, + }, + Operations: map[logical.Operation]OperationHandler{ + logical.ReadOperation: &PathOperation{Callback: handler}, + }, + }, + { + Pattern: "foo/both/handler", + Fields: map[string]*FieldSchema{ + "amount": {Type: TypeInt}, + }, + Callbacks: map[logical.Operation]OperationFunc{ + logical.ReadOperation: callback, + }, + Operations: map[logical.Operation]OperationHandler{ + logical.ReadOperation: &PathOperation{Callback: handler}, + }, + }, + }, + system: &logical.StaticSystemView{}, + } + + for _, path := range []string{"foo/bar", "foo/baz/handler", "foo/both/handler"} { + key := "value" + if strings.Contains(path, "handler") { + key = "amount" + } + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ReadOperation, + Path: path, + Data: map[string]interface{}{key: "42"}, + }) + if err != nil { + t.Fatalf("err: %s", err) + } + if resp.Data[key] != 42 { + t.Fatalf("bad: %#v", resp) + } + } +} + +func TestBackendHandleRequest_Forwarding(t *testing.T) { + tests := map[string]struct { + fwdStandby bool + fwdSecondary bool + isLocal bool + isStandby bool + isSecondary bool + expectFwd bool + nilSysView bool + }{ + "no forward": { + expectFwd: false, + }, + "no forward, local restricted": { + isSecondary: true, + fwdSecondary: true, + isLocal: true, + expectFwd: false, + }, + "no forward, forwarding not requested": { + isSecondary: true, + isStandby: true, + expectFwd: false, + }, + "forward, secondary": { + fwdSecondary: true, + isSecondary: true, + expectFwd: true, + }, + "forward, standby": { + fwdStandby: true, + isStandby: true, + expectFwd: true, + }, + "no forward, only secondary": { + fwdSecondary: true, + isStandby: true, + expectFwd: false, + }, + "no forward, only standby": { + fwdStandby: true, + isSecondary: true, + expectFwd: false, + }, + "nil system view": { + nilSysView: true, + expectFwd: false, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + var replState consts.ReplicationState + if test.isStandby { + replState.AddState(consts.ReplicationPerformanceStandby) + } + if test.isSecondary { + replState.AddState(consts.ReplicationPerformanceSecondary) + } + + b := &Backend{ + Paths: []*Path{ + { + Pattern: "foo", + Operations: map[logical.Operation]OperationHandler{ + logical.ReadOperation: &PathOperation{ + Callback: func(ctx context.Context, req *logical.Request, data *FieldData) (*logical.Response, error) { + return nil, nil + }, + ForwardPerformanceSecondary: test.fwdSecondary, + ForwardPerformanceStandby: test.fwdStandby, + }, + }, + }, + }, + + system: &logical.StaticSystemView{ + LocalMountVal: test.isLocal, + ReplicationStateVal: replState, + }, + } + + if test.nilSysView { + b.system = nil + } + + _, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ReadOperation, + Path: "foo", + }) + + if !test.expectFwd && err != nil { + t.Fatalf("unexpected err: %v", err) + } + if test.expectFwd && err != logical.ErrReadOnly { + t.Fatalf("expected ErrReadOnly, got: %v", err) + } + }) + } +} + +func TestBackendHandleRequest_badwrite(t *testing.T) { + callback := func(ctx context.Context, req *logical.Request, data *FieldData) (*logical.Response, error) { + return &logical.Response{ + Data: map[string]interface{}{ + "value": data.Get("value").(bool), + }, + }, nil + } + + b := &Backend{ + Paths: []*Path{ + { + Pattern: "foo/bar", + Fields: map[string]*FieldSchema{ + "value": {Type: TypeBool}, + }, + Callbacks: map[logical.Operation]OperationFunc{ + logical.UpdateOperation: callback, + }, + }, + }, + } + + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "foo/bar", + Data: map[string]interface{}{"value": "3false3"}, + }) + if err != nil { + t.Fatalf("err: %s", err) + } + + if !strings.Contains(resp.Data["error"].(string), "Field validation failed") { + t.Fatalf("bad: %#v", resp) + } +} + +func TestBackendHandleRequest_404(t *testing.T) { + callback := func(ctx context.Context, req *logical.Request, data *FieldData) (*logical.Response, error) { + return &logical.Response{ + Data: map[string]interface{}{ + "value": data.Get("value"), + }, + }, nil + } + + b := &Backend{ + Paths: []*Path{ + { + Pattern: `foo/bar`, + Fields: map[string]*FieldSchema{ + "value": {Type: TypeInt}, + }, + Callbacks: map[logical.Operation]OperationFunc{ + logical.ReadOperation: callback, + }, + }, + }, + } + + _, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ReadOperation, + Path: "foo/baz", + Data: map[string]interface{}{"value": "84"}, + }) + if err != logical.ErrUnsupportedPath { + t.Fatalf("err: %s", err) + } +} + +func TestBackendHandleRequest_help(t *testing.T) { + b := &Backend{ + Paths: []*Path{ + { + Pattern: "foo/bar", + Fields: map[string]*FieldSchema{ + "value": {Type: TypeInt}, + }, + HelpSynopsis: "foo", + HelpDescription: "bar", + }, + }, + } + + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.HelpOperation, + Path: "foo/bar", + Data: map[string]interface{}{"value": "42"}, + }) + if err != nil { + t.Fatalf("err: %s", err) + } + if resp.Data["help"] == nil { + t.Fatalf("bad: %#v", resp) + } +} + +func TestBackendHandleRequest_helpRoot(t *testing.T) { + b := &Backend{ + Help: "42", + } + + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.HelpOperation, + Path: "", + }) + if err != nil { + t.Fatalf("err: %s", err) + } + if resp.Data["help"] == nil { + t.Fatalf("bad: %#v", resp) + } +} + +func TestBackendHandleRequest_renewAuth(t *testing.T) { + b := &Backend{} + + resp, err := b.HandleRequest(context.Background(), logical.RenewAuthRequest("/foo", &logical.Auth{}, nil)) + if err != nil { + t.Fatalf("err: %s", err) + } + if !resp.IsError() { + t.Fatalf("bad: %#v", resp) + } +} + +func TestBackendHandleRequest_renewAuthCallback(t *testing.T) { + called := new(uint32) + callback := func(context.Context, *logical.Request, *FieldData) (*logical.Response, error) { + atomic.AddUint32(called, 1) + return nil, nil + } + + b := &Backend{ + AuthRenew: callback, + } + + _, err := b.HandleRequest(context.Background(), logical.RenewAuthRequest("/foo", &logical.Auth{}, nil)) + if err != nil { + t.Fatalf("err: %s", err) + } + if v := atomic.LoadUint32(called); v != 1 { + t.Fatalf("bad: %#v", v) + } +} + +func TestBackendHandleRequest_renew(t *testing.T) { + called := new(uint32) + callback := func(context.Context, *logical.Request, *FieldData) (*logical.Response, error) { + atomic.AddUint32(called, 1) + return nil, nil + } + + secret := &Secret{ + Type: "foo", + Renew: callback, + } + b := &Backend{ + Secrets: []*Secret{secret}, + } + + _, err := b.HandleRequest(context.Background(), logical.RenewRequest("/foo", secret.Response(nil, nil).Secret, nil)) + if err != nil { + t.Fatalf("err: %s", err) + } + if v := atomic.LoadUint32(called); v != 1 { + t.Fatalf("bad: %#v", v) + } +} + +func TestBackendHandleRequest_revoke(t *testing.T) { + called := new(uint32) + callback := func(context.Context, *logical.Request, *FieldData) (*logical.Response, error) { + atomic.AddUint32(called, 1) + return nil, nil + } + + secret := &Secret{ + Type: "foo", + Revoke: callback, + } + b := &Backend{ + Secrets: []*Secret{secret}, + } + + _, err := b.HandleRequest(context.Background(), logical.RevokeRequest("/foo", secret.Response(nil, nil).Secret, nil)) + if err != nil { + t.Fatalf("err: %s", err) + } + if v := atomic.LoadUint32(called); v != 1 { + t.Fatalf("bad: %#v", v) + } +} + +func TestBackendHandleRequest_rollback(t *testing.T) { + called := new(uint32) + callback := func(_ context.Context, req *logical.Request, kind string, data interface{}) error { + if data == "foo" { + atomic.AddUint32(called, 1) + } + return nil + } + + b := &Backend{ + WALRollback: callback, + WALRollbackMinAge: 1 * time.Millisecond, + } + + storage := new(logical.InmemStorage) + if _, err := PutWAL(context.Background(), storage, "kind", "foo"); err != nil { + t.Fatalf("err: %s", err) + } + + time.Sleep(10 * time.Millisecond) + + _, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.RollbackOperation, + Path: "", + Storage: storage, + }) + if err != nil { + t.Fatalf("err: %s", err) + } + if v := atomic.LoadUint32(called); v != 1 { + t.Fatalf("bad: %#v", v) + } +} + +func TestBackendHandleRequest_rollbackMinAge(t *testing.T) { + called := new(uint32) + callback := func(_ context.Context, req *logical.Request, kind string, data interface{}) error { + if data == "foo" { + atomic.AddUint32(called, 1) + } + return nil + } + + b := &Backend{ + WALRollback: callback, + WALRollbackMinAge: 5 * time.Second, + } + + storage := new(logical.InmemStorage) + if _, err := PutWAL(context.Background(), storage, "kind", "foo"); err != nil { + t.Fatalf("err: %s", err) + } + + _, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.RollbackOperation, + Path: "", + Storage: storage, + }) + if err != nil { + t.Fatalf("err: %s", err) + } + if v := atomic.LoadUint32(called); v != 0 { + t.Fatalf("bad: %#v", v) + } +} + +func TestBackendHandleRequest_unsupportedOperation(t *testing.T) { + callback := func(ctx context.Context, req *logical.Request, data *FieldData) (*logical.Response, error) { + return &logical.Response{ + Data: map[string]interface{}{ + "value": data.Get("value"), + }, + }, nil + } + + b := &Backend{ + Paths: []*Path{ + { + Pattern: `foo/bar`, + Fields: map[string]*FieldSchema{ + "value": {Type: TypeInt}, + }, + Callbacks: map[logical.Operation]OperationFunc{ + logical.ReadOperation: callback, + }, + }, + }, + } + + _, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "foo/bar", + Data: map[string]interface{}{"value": "84"}, + }) + if err != logical.ErrUnsupportedOperation { + t.Fatalf("err: %s", err) + } +} + +func TestBackendHandleRequest_urlPriority(t *testing.T) { + callback := func(ctx context.Context, req *logical.Request, data *FieldData) (*logical.Response, error) { + return &logical.Response{ + Data: map[string]interface{}{ + "value": data.Get("value"), + }, + }, nil + } + + b := &Backend{ + Paths: []*Path{ + { + Pattern: `foo/(?P\d+)`, + Fields: map[string]*FieldSchema{ + "value": {Type: TypeInt}, + }, + Callbacks: map[logical.Operation]OperationFunc{ + logical.ReadOperation: callback, + }, + }, + }, + } + + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ReadOperation, + Path: "foo/42", + Data: map[string]interface{}{"value": "84"}, + }) + if err != nil { + t.Fatalf("err: %s", err) + } + if resp.Data["value"] != 42 { + t.Fatalf("bad: %#v", resp) + } +} + +func TestBackendRoute(t *testing.T) { + cases := map[string]struct { + Patterns []string + Path string + Match string + }{ + "no match": { + []string{"foo"}, + "bar", + "", + }, + + "exact": { + []string{"foo"}, + "foo", + "^foo$", + }, + + "regexp": { + []string{"fo+"}, + "foo", + "^fo+$", + }, + + "anchor-start": { + []string{"bar"}, + "foobar", + "", + }, + + "anchor-end": { + []string{"bar"}, + "barfoo", + "", + }, + + "anchor-ambiguous": { + []string{"mounts", "sys/mounts"}, + "sys/mounts", + "^sys/mounts$", + }, + } + + for n, tc := range cases { + paths := make([]*Path, len(tc.Patterns)) + for i, pattern := range tc.Patterns { + paths[i] = &Path{Pattern: pattern} + } + + b := &Backend{Paths: paths} + result := b.Route(tc.Path) + match := "" + if result != nil { + match = result.Pattern + } + + if match != tc.Match { + t.Fatalf("bad: %s\n\nExpected: %s\nGot: %s", + n, tc.Match, match) + } + } +} + +func TestBackendSecret(t *testing.T) { + cases := map[string]struct { + Secrets []*Secret + Search string + Match bool + }{ + "no match": { + []*Secret{{Type: "foo"}}, + "bar", + false, + }, + + "match": { + []*Secret{{Type: "foo"}}, + "foo", + true, + }, + } + + for n, tc := range cases { + b := &Backend{Secrets: tc.Secrets} + result := b.Secret(tc.Search) + if tc.Match != (result != nil) { + t.Fatalf("bad: %s\n\nExpected match: %v", n, tc.Match) + } + if result != nil && result.Type != tc.Search { + t.Fatalf("bad: %s\n\nExpected matching type: %#v", n, result) + } + } +} + +func TestFieldSchemaDefaultOrZero(t *testing.T) { + cases := map[string]struct { + Schema *FieldSchema + Value interface{} + }{ + "default set": { + &FieldSchema{Type: TypeString, Default: "foo"}, + "foo", + }, + + "default not set": { + &FieldSchema{Type: TypeString}, + "", + }, + + "default duration set": { + &FieldSchema{Type: TypeDurationSecond, Default: 60}, + 60, + }, + + "default duration int64": { + &FieldSchema{Type: TypeDurationSecond, Default: int64(60)}, + 60, + }, + + "default duration string": { + &FieldSchema{Type: TypeDurationSecond, Default: "60s"}, + 60, + }, + + "illegal default duration string": { + &FieldSchema{Type: TypeDurationSecond, Default: "h1"}, + 0, + }, + + "default duration time.Duration": { + &FieldSchema{Type: TypeDurationSecond, Default: 60 * time.Second}, + 60, + }, + + "default duration not set": { + &FieldSchema{Type: TypeDurationSecond}, + 0, + }, + + "default signed positive duration set": { + &FieldSchema{Type: TypeSignedDurationSecond, Default: 60}, + 60, + }, + + "default signed positive duration int64": { + &FieldSchema{Type: TypeSignedDurationSecond, Default: int64(60)}, + 60, + }, + + "default signed positive duration string": { + &FieldSchema{Type: TypeSignedDurationSecond, Default: "60s"}, + 60, + }, + + "illegal default signed duration string": { + &FieldSchema{Type: TypeDurationSecond, Default: "-h1"}, + 0, + }, + + "default signed positive duration time.Duration": { + &FieldSchema{Type: TypeSignedDurationSecond, Default: 60 * time.Second}, + 60, + }, + + "default signed negative duration set": { + &FieldSchema{Type: TypeSignedDurationSecond, Default: -60}, + -60, + }, + + "default signed negative duration int64": { + &FieldSchema{Type: TypeSignedDurationSecond, Default: int64(-60)}, + -60, + }, + + "default signed negative duration string": { + &FieldSchema{Type: TypeSignedDurationSecond, Default: "-60s"}, + -60, + }, + + "default signed negative duration time.Duration": { + &FieldSchema{Type: TypeSignedDurationSecond, Default: -60 * time.Second}, + -60, + }, + + "default signed negative duration not set": { + &FieldSchema{Type: TypeSignedDurationSecond}, + 0, + }, + "default header not set": { + &FieldSchema{Type: TypeHeader}, + http.Header{}, + }, + } + + for name, tc := range cases { + actual := tc.Schema.DefaultOrZero() + if !reflect.DeepEqual(actual, tc.Value) { + t.Errorf("bad: %s\n\nExpected: %#v\nGot: %#v", + name, tc.Value, actual) + } + } +} + +func TestInitializeBackend(t *testing.T) { + var inited bool + backend := &Backend{InitializeFunc: func(context.Context, *logical.InitializationRequest) error { + inited = true + return nil + }} + + backend.Initialize(nil, &logical.InitializationRequest{Storage: nil}) + + if !inited { + t.Fatal("backend should be open") + } +} + +// TestFieldTypeMethods tries to ensure our switch-case statements for the +// FieldType "enum" are complete. +func TestFieldTypeMethods(t *testing.T) { + unknownFormat := convertType(TypeInvalid).format + + for i := TypeInvalid + 1; i < typeInvalidMax; i++ { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + if i.String() == TypeInvalid.String() { + t.Errorf("unknown type string for %d", i) + } + + if convertType(i).format == unknownFormat { + t.Errorf("unknown schema for %d", i) + } + + _ = i.Zero() + }) + } +} diff --git a/sdk/framework/field_data.go b/sdk/framework/field_data.go new file mode 100644 index 0000000..e5f69ac --- /dev/null +++ b/sdk/framework/field_data.go @@ -0,0 +1,466 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package framework + +import ( + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "net/http" + "regexp" + "strings" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/mitchellh/mapstructure" +) + +// FieldData is the structure passed to the callback to handle a path +// containing the populated parameters for fields. This should be used +// instead of the raw (*vault.Request).Data to access data in a type-safe +// way. +type FieldData struct { + Raw map[string]interface{} + Schema map[string]*FieldSchema +} + +// Validate cycles through raw data and validates conversions in +// the schema, so we don't get an error/panic later when +// trying to get data out. Data not in the schema is not +// an error at this point, so we don't worry about it. +func (d *FieldData) Validate() error { + for field, value := range d.Raw { + + schema, ok := d.Schema[field] + if !ok { + continue + } + + switch schema.Type { + case TypeBool, TypeInt, TypeInt64, TypeMap, TypeDurationSecond, TypeSignedDurationSecond, TypeString, + TypeLowerCaseString, TypeNameString, TypeSlice, TypeStringSlice, TypeCommaStringSlice, + TypeKVPairs, TypeCommaIntSlice, TypeHeader, TypeFloat, TypeTime: + _, _, err := d.getPrimitive(field, schema) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("error converting input %v for field %q: {{err}}", value, field), err) + } + default: + return fmt.Errorf("unknown field type %q for field %q", schema.Type, field) + } + } + + return nil +} + +// ValidateStrict cycles through raw data and validates conversions in the +// schema. In addition to the checks done by Validate, this function ensures +// that the raw data has all of the schema's required fields and does not +// have any fields outside of the schema. It will return a non-nil error if: +// +// 1. a conversion (parsing of the field's value) fails +// 2. a raw field does not exist in the schema (unless the schema is nil) +// 3. a required schema field is missing from the raw data +// +// This function is currently used for validating response schemas in tests. +func (d *FieldData) ValidateStrict() error { + // the schema is nil, nothing to validate + if d.Schema == nil { + return nil + } + + for field := range d.Raw { + if _, _, err := d.GetOkErr(field); err != nil { + return fmt.Errorf("field %q: %w", field, err) + } + } + + for field, schema := range d.Schema { + if !schema.Required { + continue + } + if _, ok := d.Raw[field]; !ok { + return fmt.Errorf("missing required field %q", field) + } + } + + return nil +} + +// Get gets the value for the given field. If the key is an invalid field, +// FieldData will panic. If you want a safer version of this method, use +// GetOk. If the field k is not set, the default value (if set) will be +// returned, otherwise the zero value will be returned. +func (d *FieldData) Get(k string) interface{} { + schema, ok := d.Schema[k] + if !ok { + panic(fmt.Sprintf("field %s not in the schema", k)) + } + + // If the value can't be decoded, use the zero or default value for the field + // type + value, ok := d.GetOk(k) + if !ok || value == nil { + value = schema.DefaultOrZero() + } + + return value +} + +// GetDefaultOrZero gets the default value set on the schema for the given +// field. If there is no default value set, the zero value of the type +// will be returned. +func (d *FieldData) GetDefaultOrZero(k string) interface{} { + schema, ok := d.Schema[k] + if !ok { + panic(fmt.Sprintf("field %s not in the schema", k)) + } + + return schema.DefaultOrZero() +} + +// GetFirst gets the value for the given field names, in order from first +// to last. This can be useful for fields with a current name, and one or +// more deprecated names. The second return value will be false if the keys +// are invalid or the keys are not set at all. +func (d *FieldData) GetFirst(k ...string) (interface{}, bool) { + for _, v := range k { + if result, ok := d.GetOk(v); ok { + return result, ok + } + } + return nil, false +} + +// GetOk gets the value for the given field. The second return value will be +// false if the key is invalid or the key is not set at all. If the field k is +// set and the decoded value is nil, the default or zero value +// will be returned instead. +func (d *FieldData) GetOk(k string) (interface{}, bool) { + schema, ok := d.Schema[k] + if !ok { + return nil, false + } + + result, ok, err := d.GetOkErr(k) + if err != nil { + panic(fmt.Sprintf("error reading %s: %s", k, err)) + } + + if ok && result == nil { + result = schema.DefaultOrZero() + } + + return result, ok +} + +// GetOkErr is the most conservative of all the Get methods. It returns +// whether key is set or not, but also an error value. The error value is +// non-nil if the field doesn't exist or there was an error parsing the +// field value. +func (d *FieldData) GetOkErr(k string) (interface{}, bool, error) { + schema, ok := d.Schema[k] + if !ok { + return nil, false, fmt.Errorf("unknown field: %q", k) + } + + switch schema.Type { + case TypeBool, TypeInt, TypeInt64, TypeMap, TypeDurationSecond, TypeSignedDurationSecond, TypeString, + TypeLowerCaseString, TypeNameString, TypeSlice, TypeStringSlice, TypeCommaStringSlice, + TypeKVPairs, TypeCommaIntSlice, TypeHeader, TypeFloat, TypeTime: + return d.getPrimitive(k, schema) + default: + return nil, false, + fmt.Errorf("unknown field type %q for field %q", schema.Type, k) + } +} + +func (d *FieldData) getPrimitive(k string, schema *FieldSchema) (interface{}, bool, error) { + raw, ok := d.Raw[k] + if !ok { + return nil, false, nil + } + + switch t := schema.Type; t { + case TypeBool: + var result bool + if err := mapstructure.WeakDecode(raw, &result); err != nil { + return nil, false, err + } + return result, true, nil + + case TypeInt: + var result int + if err := mapstructure.WeakDecode(raw, &result); err != nil { + return nil, false, err + } + return result, true, nil + + case TypeInt64: + var result int64 + if err := mapstructure.WeakDecode(raw, &result); err != nil { + return nil, false, err + } + return result, true, nil + + case TypeFloat: + var result float64 + if err := mapstructure.WeakDecode(raw, &result); err != nil { + return nil, false, err + } + return result, true, nil + + case TypeString: + var result string + if err := mapstructure.WeakDecode(raw, &result); err != nil { + return nil, false, err + } + return result, true, nil + + case TypeLowerCaseString: + var result string + if err := mapstructure.WeakDecode(raw, &result); err != nil { + return nil, false, err + } + return strings.ToLower(result), true, nil + + case TypeNameString: + var result string + if err := mapstructure.WeakDecode(raw, &result); err != nil { + return nil, false, err + } + matched, err := regexp.MatchString("^\\w(([\\w-.]+)?\\w)?$", result) + if err != nil { + return nil, false, err + } + if !matched { + return nil, false, errors.New("field does not match the formatting rules") + } + return result, true, nil + + case TypeMap: + var result map[string]interface{} + if err := mapstructure.WeakDecode(raw, &result); err != nil { + return nil, false, err + } + return result, true, nil + + case TypeDurationSecond, TypeSignedDurationSecond: + var result int + switch inp := raw.(type) { + case nil: + return nil, false, nil + default: + dur, err := parseutil.ParseDurationSecond(inp) + if err != nil { + return nil, false, err + } + result = int(dur.Seconds()) + } + if t == TypeDurationSecond && result < 0 { + return nil, false, fmt.Errorf("cannot provide negative value '%d'", result) + } + return result, true, nil + + case TypeTime: + switch inp := raw.(type) { + case nil: + // Handle nil interface{} as a non-error case + return nil, false, nil + default: + time, err := parseutil.ParseAbsoluteTime(inp) + if err != nil { + return nil, false, err + } + return time.UTC(), true, nil + } + + case TypeCommaIntSlice: + var result []int + + jsonIn, ok := raw.(json.Number) + if ok { + raw = jsonIn.String() + } + + config := &mapstructure.DecoderConfig{ + Result: &result, + WeaklyTypedInput: true, + DecodeHook: mapstructure.StringToSliceHookFunc(","), + } + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return nil, false, err + } + if err := decoder.Decode(raw); err != nil { + return nil, false, err + } + if len(result) == 0 { + return make([]int, 0), true, nil + } + return result, true, nil + + case TypeSlice: + var result []interface{} + if err := mapstructure.WeakDecode(raw, &result); err != nil { + return nil, false, err + } + if len(result) == 0 { + return make([]interface{}, 0), true, nil + } + return result, true, nil + + case TypeStringSlice: + rawString, ok := raw.(string) + if ok && rawString == "" { + return []string{}, true, nil + } + + var result []string + if err := mapstructure.WeakDecode(raw, &result); err != nil { + return nil, false, err + } + if len(result) == 0 { + return make([]string, 0), true, nil + } + return strutil.TrimStrings(result), true, nil + + case TypeCommaStringSlice: + res, err := parseutil.ParseCommaStringSlice(raw) + if err != nil { + return nil, false, err + } + return res, true, nil + + case TypeKVPairs: + // First try to parse this as a map + var mapResult map[string]string + if err := mapstructure.WeakDecode(raw, &mapResult); err == nil { + return mapResult, true, nil + } + + // If map parse fails, parse as a string list of = delimited pairs + var listResult []string + if err := mapstructure.WeakDecode(raw, &listResult); err != nil { + return nil, false, err + } + + result := make(map[string]string, len(listResult)) + for _, keyPair := range listResult { + keyPairSlice := strings.SplitN(keyPair, "=", 2) + if len(keyPairSlice) != 2 || keyPairSlice[0] == "" { + return nil, false, fmt.Errorf("invalid key pair %q", keyPair) + } + result[keyPairSlice[0]] = keyPairSlice[1] + } + return result, true, nil + + case TypeHeader: + /* + + There are multiple ways a header could be provided: + + 1. As a map[string]interface{} that resolves to a map[string]string or map[string][]string, or a mix of both + because that's permitted for headers. + This mainly comes from the API. + + 2. As a string... + a. That contains JSON that originally was JSON, but then was base64 encoded. + b. That contains JSON, ex. `{"content-type":"text/json","accept":["encoding/json"]}`. + This mainly comes from the API and is used to save space while sending in the header. + + 3. As an array of strings that contains comma-delimited key-value pairs associated via a colon, + ex: `content-type:text/json`,`accept:encoding/json`. + This mainly comes from the CLI. + + We go through these sequentially below. + + */ + result := http.Header{} + + toHeader := func(resultMap map[string]interface{}) (http.Header, error) { + header := http.Header{} + for headerKey, headerValGroup := range resultMap { + switch typedHeader := headerValGroup.(type) { + case string: + header.Add(headerKey, typedHeader) + case []string: + for _, headerVal := range typedHeader { + header.Add(headerKey, headerVal) + } + case json.Number: + header.Add(headerKey, typedHeader.String()) + case []interface{}: + for _, headerVal := range typedHeader { + switch typedHeader := headerVal.(type) { + case string: + header.Add(headerKey, typedHeader) + case json.Number: + header.Add(headerKey, typedHeader.String()) + default: + // All header values should already be strings when they're being sent in. + // Even numbers and booleans will be treated as strings. + return nil, fmt.Errorf("received non-string value for header key:%s, val:%s", headerKey, headerValGroup) + } + } + default: + return nil, fmt.Errorf("unrecognized type for %s", headerValGroup) + } + } + return header, nil + } + + resultMap := make(map[string]interface{}) + + // 1. Are we getting a map from the API? + if err := mapstructure.WeakDecode(raw, &resultMap); err == nil { + result, err = toHeader(resultMap) + if err != nil { + return nil, false, err + } + return result, true, nil + } + + // 2. Are we getting a JSON string? + if headerStr, ok := raw.(string); ok { + // a. Is it base64 encoded? + headerBytes, err := base64.StdEncoding.DecodeString(headerStr) + if err != nil { + // b. It's not base64 encoded, it's a straight-out JSON string. + headerBytes = []byte(headerStr) + } + if err := jsonutil.DecodeJSON(headerBytes, &resultMap); err != nil { + return nil, false, err + } + result, err = toHeader(resultMap) + if err != nil { + return nil, false, err + } + return result, true, nil + } + + // 3. Are we getting an array of fields like "content-type:encoding/json" from the CLI? + var keyPairs []interface{} + if err := mapstructure.WeakDecode(raw, &keyPairs); err == nil { + for _, keyPairIfc := range keyPairs { + keyPair, ok := keyPairIfc.(string) + if !ok { + return nil, false, fmt.Errorf("invalid key pair %q", keyPair) + } + keyPairSlice := strings.SplitN(keyPair, ":", 2) + if len(keyPairSlice) != 2 || keyPairSlice[0] == "" { + return nil, false, fmt.Errorf("invalid key pair %q", keyPair) + } + result.Add(keyPairSlice[0], keyPairSlice[1]) + } + return result, true, nil + } + return nil, false, fmt.Errorf("%s not provided an expected format", raw) + + default: + panic(fmt.Sprintf("Unknown type: %s", schema.Type)) + } +} diff --git a/sdk/framework/field_data_test.go b/sdk/framework/field_data_test.go new file mode 100644 index 0000000..078c6fb --- /dev/null +++ b/sdk/framework/field_data_test.go @@ -0,0 +1,1277 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package framework + +import ( + "encoding/json" + "net/http" + "reflect" + "testing" + "time" +) + +func TestFieldDataGet(t *testing.T) { + cases := map[string]struct { + Schema map[string]*FieldSchema + Raw map[string]interface{} + Key string + Value interface{} + ExpectError bool + }{ + "string type, string value": { + map[string]*FieldSchema{ + "foo": {Type: TypeString}, + }, + map[string]interface{}{ + "foo": "bar", + }, + "foo", + "bar", + false, + }, + + "string type, int value": { + map[string]*FieldSchema{ + "foo": {Type: TypeString}, + }, + map[string]interface{}{ + "foo": 42, + }, + "foo", + "42", + false, + }, + + "string type, unset value": { + map[string]*FieldSchema{ + "foo": {Type: TypeString}, + }, + map[string]interface{}{}, + "foo", + "", + false, + }, + + "string type, unset value with default": { + map[string]*FieldSchema{ + "foo": { + Type: TypeString, + Default: "bar", + }, + }, + map[string]interface{}{}, + "foo", + "bar", + false, + }, + + "lowercase string type, lowercase string value": { + map[string]*FieldSchema{ + "foo": {Type: TypeLowerCaseString}, + }, + map[string]interface{}{ + "foo": "bar", + }, + "foo", + "bar", + false, + }, + + "lowercase string type, mixed-case string value": { + map[string]*FieldSchema{ + "foo": {Type: TypeLowerCaseString}, + }, + map[string]interface{}{ + "foo": "BaR", + }, + "foo", + "bar", + false, + }, + + "lowercase string type, int value": { + map[string]*FieldSchema{ + "foo": {Type: TypeLowerCaseString}, + }, + map[string]interface{}{ + "foo": 42, + }, + "foo", + "42", + false, + }, + + "lowercase string type, unset value": { + map[string]*FieldSchema{ + "foo": {Type: TypeLowerCaseString}, + }, + map[string]interface{}{}, + "foo", + "", + false, + }, + + "lowercase string type, unset value with lowercase default": { + map[string]*FieldSchema{ + "foo": { + Type: TypeLowerCaseString, + Default: "bar", + }, + }, + map[string]interface{}{}, + "foo", + "bar", + false, + }, + + "int type, int value": { + map[string]*FieldSchema{ + "foo": {Type: TypeInt}, + }, + map[string]interface{}{ + "foo": 42, + }, + "foo", + 42, + false, + }, + + "bool type, bool value": { + map[string]*FieldSchema{ + "foo": {Type: TypeBool}, + }, + map[string]interface{}{ + "foo": false, + }, + "foo", + false, + false, + }, + + "map type, map value": { + map[string]*FieldSchema{ + "foo": {Type: TypeMap}, + }, + map[string]interface{}{ + "foo": map[string]interface{}{ + "child": true, + }, + }, + "foo", + map[string]interface{}{ + "child": true, + }, + false, + }, + + "duration type, string value": { + map[string]*FieldSchema{ + "foo": {Type: TypeDurationSecond}, + }, + map[string]interface{}{ + "foo": "42", + }, + "foo", + 42, + false, + }, + + "duration type, string duration value": { + map[string]*FieldSchema{ + "foo": {Type: TypeDurationSecond}, + }, + map[string]interface{}{ + "foo": "42m", + }, + "foo", + 2520, + false, + }, + + "duration type, int value": { + map[string]*FieldSchema{ + "foo": {Type: TypeDurationSecond}, + }, + map[string]interface{}{ + "foo": 42, + }, + "foo", + 42, + false, + }, + + "duration type, float value": { + map[string]*FieldSchema{ + "foo": {Type: TypeDurationSecond}, + }, + map[string]interface{}{ + "foo": 42.0, + }, + "foo", + 42, + false, + }, + + "duration type, nil value": { + map[string]*FieldSchema{ + "foo": {Type: TypeDurationSecond}, + }, + map[string]interface{}{ + "foo": nil, + }, + "foo", + 0, + false, + }, + + "duration type, 0 value": { + map[string]*FieldSchema{ + "foo": {Type: TypeDurationSecond}, + }, + map[string]interface{}{ + "foo": 0, + }, + "foo", + 0, + false, + }, + + "signed duration type, positive string value": { + map[string]*FieldSchema{ + "foo": {Type: TypeSignedDurationSecond}, + }, + map[string]interface{}{ + "foo": "42", + }, + "foo", + 42, + false, + }, + + "signed duration type, positive string duration value": { + map[string]*FieldSchema{ + "foo": {Type: TypeSignedDurationSecond}, + }, + map[string]interface{}{ + "foo": "42m", + }, + "foo", + 2520, + false, + }, + + "signed duration type, positive int value": { + map[string]*FieldSchema{ + "foo": {Type: TypeSignedDurationSecond}, + }, + map[string]interface{}{ + "foo": 42, + }, + "foo", + 42, + false, + }, + + "signed duration type, positive float value": { + map[string]*FieldSchema{ + "foo": {Type: TypeSignedDurationSecond}, + }, + map[string]interface{}{ + "foo": 42.0, + }, + "foo", + 42, + false, + }, + + "signed duration type, negative string value": { + map[string]*FieldSchema{ + "foo": {Type: TypeSignedDurationSecond}, + }, + map[string]interface{}{ + "foo": "-42", + }, + "foo", + -42, + false, + }, + + "signed duration type, negative string duration value": { + map[string]*FieldSchema{ + "foo": {Type: TypeSignedDurationSecond}, + }, + map[string]interface{}{ + "foo": "-42m", + }, + "foo", + -2520, + false, + }, + + "signed duration type, negative int value": { + map[string]*FieldSchema{ + "foo": {Type: TypeSignedDurationSecond}, + }, + map[string]interface{}{ + "foo": -42, + }, + "foo", + -42, + false, + }, + + "signed duration type, negative float value": { + map[string]*FieldSchema{ + "foo": {Type: TypeSignedDurationSecond}, + }, + map[string]interface{}{ + "foo": -42.0, + }, + "foo", + -42, + false, + }, + + "signed duration type, nil value": { + map[string]*FieldSchema{ + "foo": {Type: TypeSignedDurationSecond}, + }, + map[string]interface{}{ + "foo": nil, + }, + "foo", + 0, + false, + }, + + "signed duration type, 0 value": { + map[string]*FieldSchema{ + "foo": {Type: TypeSignedDurationSecond}, + }, + map[string]interface{}{ + "foo": 0, + }, + "foo", + 0, + false, + }, + + "slice type, empty slice": { + map[string]*FieldSchema{ + "foo": {Type: TypeSlice}, + }, + map[string]interface{}{ + "foo": []interface{}{}, + }, + "foo", + []interface{}{}, + false, + }, + + "slice type, filled, mixed slice": { + map[string]*FieldSchema{ + "foo": {Type: TypeSlice}, + }, + map[string]interface{}{ + "foo": []interface{}{123, "abc"}, + }, + "foo", + []interface{}{123, "abc"}, + false, + }, + + "string slice type, filled slice": { + map[string]*FieldSchema{ + "foo": {Type: TypeStringSlice}, + }, + map[string]interface{}{ + "foo": []interface{}{123, "abc"}, + }, + "foo", + []string{"123", "abc"}, + false, + }, + + "string slice type, single value": { + map[string]*FieldSchema{ + "foo": {Type: TypeStringSlice}, + }, + map[string]interface{}{ + "foo": "abc", + }, + "foo", + []string{"abc"}, + false, + }, + + "string slice type, empty string": { + map[string]*FieldSchema{ + "foo": {Type: TypeStringSlice}, + }, + map[string]interface{}{ + "foo": "", + }, + "foo", + []string{}, + false, + }, + + "comma string slice type, empty string": { + map[string]*FieldSchema{ + "foo": {Type: TypeCommaStringSlice}, + }, + map[string]interface{}{ + "foo": "", + }, + "foo", + []string{}, + false, + }, + + "comma string slice type, comma string with one value": { + map[string]*FieldSchema{ + "foo": {Type: TypeCommaStringSlice}, + }, + map[string]interface{}{ + "foo": "value1", + }, + "foo", + []string{"value1"}, + false, + }, + + "comma string slice type, comma string with multi value": { + map[string]*FieldSchema{ + "foo": {Type: TypeCommaStringSlice}, + }, + map[string]interface{}{ + "foo": "value1,value2,value3", + }, + "foo", + []string{"value1", "value2", "value3"}, + false, + }, + + "comma string slice type, nil string slice value": { + map[string]*FieldSchema{ + "foo": {Type: TypeCommaStringSlice}, + }, + map[string]interface{}{ + "foo": "", + }, + "foo", + []string{}, + false, + }, + + "comma string slice type, string slice with one value": { + map[string]*FieldSchema{ + "foo": {Type: TypeCommaStringSlice}, + }, + map[string]interface{}{ + "foo": []interface{}{"value1"}, + }, + "foo", + []string{"value1"}, + false, + }, + + "comma string slice type, string slice with multi value": { + map[string]*FieldSchema{ + "foo": {Type: TypeCommaStringSlice}, + }, + map[string]interface{}{ + "foo": []interface{}{"value1", "value2", "value3"}, + }, + "foo", + []string{"value1", "value2", "value3"}, + false, + }, + + "comma string slice type, empty string slice value": { + map[string]*FieldSchema{ + "foo": {Type: TypeCommaStringSlice}, + }, + map[string]interface{}{ + "foo": []interface{}{}, + }, + "foo", + []string{}, + false, + }, + + "comma int slice type, comma int with one value": { + map[string]*FieldSchema{ + "foo": {Type: TypeCommaIntSlice}, + }, + map[string]interface{}{ + "foo": 1, + }, + "foo", + []int{1}, + false, + }, + + "comma int slice type, comma int with multi value slice": { + map[string]*FieldSchema{ + "foo": {Type: TypeCommaIntSlice}, + }, + map[string]interface{}{ + "foo": []int{1, 2, 3}, + }, + "foo", + []int{1, 2, 3}, + false, + }, + + "comma int slice type, comma int with multi value": { + map[string]*FieldSchema{ + "foo": {Type: TypeCommaIntSlice}, + }, + map[string]interface{}{ + "foo": "1,2,3", + }, + "foo", + []int{1, 2, 3}, + false, + }, + + "comma int slice type, nil int slice value": { + map[string]*FieldSchema{ + "foo": {Type: TypeCommaIntSlice}, + }, + map[string]interface{}{ + "foo": "", + }, + "foo", + []int{}, + false, + }, + + "comma int slice type, int slice with one value": { + map[string]*FieldSchema{ + "foo": {Type: TypeCommaIntSlice}, + }, + map[string]interface{}{ + "foo": []interface{}{"1"}, + }, + "foo", + []int{1}, + false, + }, + + "comma int slice type, int slice with multi value strings": { + map[string]*FieldSchema{ + "foo": {Type: TypeCommaIntSlice}, + }, + map[string]interface{}{ + "foo": []interface{}{"1", "2", "3"}, + }, + "foo", + []int{1, 2, 3}, + false, + }, + + "comma int slice type, int slice with multi value": { + map[string]*FieldSchema{ + "foo": {Type: TypeCommaIntSlice}, + }, + map[string]interface{}{ + "foo": []interface{}{1, 2, 3}, + }, + "foo", + []int{1, 2, 3}, + false, + }, + + "comma int slice type, empty int slice value": { + map[string]*FieldSchema{ + "foo": {Type: TypeCommaIntSlice}, + }, + map[string]interface{}{ + "foo": []interface{}{}, + }, + "foo", + []int{}, + false, + }, + + "comma int slice type, json number": { + map[string]*FieldSchema{ + "foo": {Type: TypeCommaIntSlice}, + }, + map[string]interface{}{ + "foo": json.Number("1"), + }, + "foo", + []int{1}, + false, + }, + + "name string type, valid string": { + map[string]*FieldSchema{ + "foo": {Type: TypeNameString}, + }, + map[string]interface{}{ + "foo": "bar", + }, + "foo", + "bar", + false, + }, + + "name string type, valid value with special characters": { + map[string]*FieldSchema{ + "foo": {Type: TypeNameString}, + }, + map[string]interface{}{ + "foo": "bar.baz-bay123", + }, + "foo", + "bar.baz-bay123", + false, + }, + + "keypair type, valid value map type": { + map[string]*FieldSchema{ + "foo": {Type: TypeKVPairs}, + }, + map[string]interface{}{ + "foo": map[string]interface{}{ + "key1": "value1", + "key2": "value2", + "key3": 1, + }, + }, + "foo", + map[string]string{ + "key1": "value1", + "key2": "value2", + "key3": "1", + }, + false, + }, + + "keypair type, list of equal sign delim key pairs type": { + map[string]*FieldSchema{ + "foo": {Type: TypeKVPairs}, + }, + map[string]interface{}{ + "foo": []interface{}{"key1=value1", "key2=value2", "key3=1"}, + }, + "foo", + map[string]string{ + "key1": "value1", + "key2": "value2", + "key3": "1", + }, + false, + }, + + "keypair type, single equal sign delim value": { + map[string]*FieldSchema{ + "foo": {Type: TypeKVPairs}, + }, + map[string]interface{}{ + "foo": "key1=value1", + }, + "foo", + map[string]string{ + "key1": "value1", + }, + false, + }, + + "type header, keypair string array": { + map[string]*FieldSchema{ + "foo": {Type: TypeHeader}, + }, + map[string]interface{}{ + "foo": []interface{}{"key1:value1", "key2:value2", "key3:1"}, + }, + "foo", + http.Header{ + "Key1": []string{"value1"}, + "Key2": []string{"value2"}, + "Key3": []string{"1"}, + }, + false, + }, + + "type header, b64 string": { + map[string]*FieldSchema{ + "foo": {Type: TypeHeader}, + }, + map[string]interface{}{ + "foo": "eyJDb250ZW50LUxlbmd0aCI6IFsiNDMiXSwgIlVzZXItQWdlbnQiOiBbImF3cy1zZGstZ28vMS40LjEyIChnbzEuNy4xOyBsaW51eDsgYW1kNjQpIl0sICJYLVZhdWx0LUFXU0lBTS1TZXJ2ZXItSWQiOiBbInZhdWx0LmV4YW1wbGUuY29tIl0sICJYLUFtei1EYXRlIjogWyIyMDE2MDkzMFQwNDMxMjFaIl0sICJDb250ZW50LVR5cGUiOiBbImFwcGxpY2F0aW9uL3gtd3d3LWZvcm0tdXJsZW5jb2RlZDsgY2hhcnNldD11dGYtOCJdLCAiQXV0aG9yaXphdGlvbiI6IFsiQVdTNC1ITUFDLVNIQTI1NiBDcmVkZW50aWFsPWZvby8yMDE2MDkzMC91cy1lYXN0LTEvc3RzL2F3czRfcmVxdWVzdCwgU2lnbmVkSGVhZGVycz1jb250ZW50LWxlbmd0aDtjb250ZW50LXR5cGU7aG9zdDt4LWFtei1kYXRlO3gtdmF1bHQtc2VydmVyLCBTaWduYXR1cmU9YTY5ZmQ3NTBhMzQ0NWM0ZTU1M2UxYjNlNzlkM2RhOTBlZWY1NDA0N2YxZWI0ZWZlOGZmYmM5YzQyOGMyNjU1YiJdLCAiRm9vIjogNDJ9", + }, + "foo", + http.Header{ + "Content-Length": []string{"43"}, + "User-Agent": []string{"aws-sdk-go/1.4.12 (go1.7.1; linux; amd64)"}, + "X-Vault-Awsiam-Server-Id": []string{"vault.example.com"}, + "X-Amz-Date": []string{"20160930T043121Z"}, + "Content-Type": []string{"application/x-www-form-urlencoded; charset=utf-8"}, + "Authorization": []string{"AWS4-HMAC-SHA256 Credential=foo/20160930/us-east-1/sts/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date;x-vault-server, Signature=a69fd750a3445c4e553e1b3e79d3da90eef54047f1eb4efe8ffbc9c428c2655b"}, + "Foo": []string{"42"}, + }, + false, + }, + + "type header, json string": { + map[string]*FieldSchema{ + "foo": {Type: TypeHeader}, + }, + map[string]interface{}{ + "foo": `{"hello":"world","bonjour":["monde","dieu"], "Guten Tag": 42, "你好": ["10", 20, 3.14]}`, + }, + "foo", + http.Header{ + "Hello": []string{"world"}, + "Bonjour": []string{"monde", "dieu"}, + "Guten Tag": []string{"42"}, + "你好": []string{"10", "20", "3.14"}, + }, + false, + }, + + "type header, keypair string array with dupe key": { + map[string]*FieldSchema{ + "foo": {Type: TypeHeader}, + }, + map[string]interface{}{ + "foo": []interface{}{"key1:value1", "key2:value2", "key3:1", "key3:true"}, + }, + "foo", + http.Header{ + "Key1": []string{"value1"}, + "Key2": []string{"value2"}, + "Key3": []string{"1", "true"}, + }, + false, + }, + + "type header, map string slice": { + map[string]*FieldSchema{ + "foo": {Type: TypeHeader}, + }, + map[string]interface{}{ + "foo": map[string][]string{ + "key1": {"value1"}, + "key2": {"value2"}, + "key3": {"1"}, + }, + }, + "foo", + http.Header{ + "Key1": []string{"value1"}, + "Key2": []string{"value2"}, + "Key3": []string{"1"}, + }, + false, + }, + + "name string type, not supplied": { + map[string]*FieldSchema{ + "foo": {Type: TypeNameString}, + }, + map[string]interface{}{}, + "foo", + "", + false, + }, + + "string type, not supplied": { + map[string]*FieldSchema{ + "foo": {Type: TypeString}, + }, + map[string]interface{}{}, + "foo", + "", + false, + }, + + "type int, not supplied": { + map[string]*FieldSchema{ + "foo": {Type: TypeInt}, + }, + map[string]interface{}{}, + "foo", + 0, + false, + }, + + "type bool, not supplied": { + map[string]*FieldSchema{ + "foo": {Type: TypeBool}, + }, + map[string]interface{}{}, + "foo", + false, + false, + }, + + "type map, not supplied": { + map[string]*FieldSchema{ + "foo": {Type: TypeMap}, + }, + map[string]interface{}{}, + "foo", + map[string]interface{}{}, + false, + }, + + "type duration second, not supplied": { + map[string]*FieldSchema{ + "foo": {Type: TypeDurationSecond}, + }, + map[string]interface{}{}, + "foo", + 0, + false, + }, + + "type signed duration second, not supplied": { + map[string]*FieldSchema{ + "foo": {Type: TypeSignedDurationSecond}, + }, + map[string]interface{}{}, + "foo", + 0, + false, + }, + + "type slice, not supplied": { + map[string]*FieldSchema{ + "foo": {Type: TypeSlice}, + }, + map[string]interface{}{}, + "foo", + []interface{}{}, + false, + }, + + "type string slice, not supplied": { + map[string]*FieldSchema{ + "foo": {Type: TypeStringSlice}, + }, + map[string]interface{}{}, + "foo", + []string{}, + false, + }, + + "type comma string slice, not supplied": { + map[string]*FieldSchema{ + "foo": {Type: TypeCommaStringSlice}, + }, + map[string]interface{}{}, + "foo", + []string{}, + false, + }, + + "comma string slice type, single JSON number value": { + map[string]*FieldSchema{ + "foo": {Type: TypeCommaStringSlice}, + }, + map[string]interface{}{ + "foo": json.Number("123"), + }, + "foo", + []string{"123"}, + false, + }, + + "type kv pair, not supplied": { + map[string]*FieldSchema{ + "foo": {Type: TypeKVPairs}, + }, + map[string]interface{}{}, + "foo", + map[string]string{}, + false, + }, + + "type header, not supplied": { + map[string]*FieldSchema{ + "foo": {Type: TypeHeader}, + }, + map[string]interface{}{}, + "foo", + http.Header{}, + false, + }, + + "float type, positive with decimals, as string": { + map[string]*FieldSchema{ + "foo": {Type: TypeFloat}, + }, + map[string]interface{}{ + "foo": "1234567.891234567", + }, + "foo", + 1234567.891234567, + false, + }, + + "float type, negative with decimals, as string": { + map[string]*FieldSchema{ + "foo": {Type: TypeFloat}, + }, + map[string]interface{}{ + "foo": "-1234567.891234567", + }, + "foo", + -1234567.891234567, + false, + }, + + "float type, positive without decimals": { + map[string]*FieldSchema{ + "foo": {Type: TypeFloat}, + }, + map[string]interface{}{ + "foo": 1234567, + }, + "foo", + 1234567.0, + false, + }, + + "type float, not supplied": { + map[string]*FieldSchema{ + "foo": {Type: TypeFloat}, + }, + map[string]interface{}{}, + "foo", + 0.0, + false, + }, + + "type float, invalid value": { + map[string]*FieldSchema{ + "foo": {Type: TypeFloat}, + }, + map[string]interface{}{ + "foo": "invalid0.0", + }, + "foo", + 0.0, + true, + }, + + "type time, not supplied": { + map[string]*FieldSchema{ + "foo": {Type: TypeTime}, + }, + map[string]interface{}{}, + "foo", + time.Time{}, + false, + }, + "type time, string value": { + map[string]*FieldSchema{ + "foo": {Type: TypeTime}, + }, + map[string]interface{}{ + "foo": "2021-12-11T09:08:07Z", + }, + "foo", + // Comparison uses DeepEqual() so better match exactly, + // can't have a different location. + time.Date(2021, 12, 11, 9, 8, 7, 0, time.UTC), + false, + }, + "type time, invalid value": { + map[string]*FieldSchema{ + "foo": {Type: TypeTime}, + }, + map[string]interface{}{ + "foo": "2021-13-11T09:08:07+02:00", + }, + "foo", + time.Time{}, + true, + }, + } + + for name, tc := range cases { + name, tc := name, tc + t.Run(name, func(t *testing.T) { + t.Parallel() + data := &FieldData{ + Raw: tc.Raw, + Schema: tc.Schema, + } + + err := data.Validate() + switch { + case tc.ExpectError && err == nil: + t.Fatalf("expected error") + case tc.ExpectError && err != nil: + return + case !tc.ExpectError && err != nil: + t.Fatal(err) + default: + // Continue if !tc.ExpectError && err == nil + } + + actual := data.Get(tc.Key) + if !reflect.DeepEqual(actual, tc.Value) { + t.Fatalf("Expected: %#v\nGot: %#v", tc.Value, actual) + } + }) + } +} + +func TestFieldDataGet_Error(t *testing.T) { + cases := map[string]struct { + Schema map[string]*FieldSchema + Raw map[string]interface{} + Key string + }{ + "name string type, invalid value with invalid characters": { + map[string]*FieldSchema{ + "foo": {Type: TypeNameString}, + }, + map[string]interface{}{ + "foo": "bar baz", + }, + "foo", + }, + "name string type, invalid value with special characters at beginning": { + map[string]*FieldSchema{ + "foo": {Type: TypeNameString}, + }, + map[string]interface{}{ + "foo": ".barbaz", + }, + "foo", + }, + "name string type, invalid value with special characters at end": { + map[string]*FieldSchema{ + "foo": {Type: TypeNameString}, + }, + map[string]interface{}{ + "foo": "barbaz-", + }, + "foo", + }, + "name string type, empty string": { + map[string]*FieldSchema{ + "foo": {Type: TypeNameString}, + }, + map[string]interface{}{ + "foo": "", + }, + "foo", + }, + "keypair type, csv version empty key name": { + map[string]*FieldSchema{ + "foo": {Type: TypeKVPairs}, + }, + map[string]interface{}{ + "foo": []interface{}{"=value1", "key2=value2", "key3=1"}, + }, + "foo", + }, + "duration type, negative string value": { + map[string]*FieldSchema{ + "foo": {Type: TypeDurationSecond}, + }, + map[string]interface{}{ + "foo": "-42", + }, + "foo", + }, + "duration type, negative string duration value": { + map[string]*FieldSchema{ + "foo": {Type: TypeDurationSecond}, + }, + map[string]interface{}{ + "foo": "-42m", + }, + "foo", + }, + "duration type, negative int value": { + map[string]*FieldSchema{ + "foo": {Type: TypeDurationSecond}, + }, + map[string]interface{}{ + "foo": -42, + }, + "foo", + }, + "duration type, negative float value": { + map[string]*FieldSchema{ + "foo": {Type: TypeDurationSecond}, + }, + map[string]interface{}{ + "foo": -42.0, + }, + "foo", + }, + } + + for name, tc := range cases { + name, tc := name, tc + t.Run(name, func(t *testing.T) { + t.Parallel() + data := &FieldData{ + Raw: tc.Raw, + Schema: tc.Schema, + } + + got, _, err := data.GetOkErr(tc.Key) + if err == nil { + t.Fatalf("error expected, none received, got result: %#v", got) + } + }) + } +} + +func TestFieldDataGetFirst(t *testing.T) { + data := &FieldData{ + Raw: map[string]interface{}{ + "foo": "bar", + "fizz": "buzz", + }, + Schema: map[string]*FieldSchema{ + "foo": {Type: TypeNameString}, + "fizz": {Type: TypeNameString}, + }, + } + + result, ok := data.GetFirst("foo", "fizz") + if !ok { + t.Fatal("should have found value for foo") + } + if result.(string) != "bar" { + t.Fatal("should have gotten bar for foo") + } + + result, ok = data.GetFirst("fizz", "foo") + if !ok { + t.Fatal("should have found value for fizz") + } + if result.(string) != "buzz" { + t.Fatal("should have gotten buzz for fizz") + } + + _, ok = data.GetFirst("cats") + if ok { + t.Fatal("shouldn't have gotten anything for cats") + } +} + +func TestValidateStrict(t *testing.T) { + cases := map[string]struct { + Schema map[string]*FieldSchema + Raw map[string]interface{} + ExpectError bool + }{ + "string type, string value": { + map[string]*FieldSchema{ + "foo": {Type: TypeString}, + }, + map[string]interface{}{ + "foo": "bar", + }, + false, + }, + + "string type, int value": { + map[string]*FieldSchema{ + "foo": {Type: TypeString}, + }, + map[string]interface{}{ + "foo": 42, + }, + false, + }, + + "string type, unset value": { + map[string]*FieldSchema{ + "foo": {Type: TypeString}, + }, + map[string]interface{}{}, + false, + }, + + "string type, unset required value": { + map[string]*FieldSchema{ + "foo": { + Type: TypeString, + Required: true, + }, + }, + map[string]interface{}{}, + true, + }, + + "value not in schema": { + map[string]*FieldSchema{ + "foo": { + Type: TypeString, + Required: true, + }, + }, + map[string]interface{}{ + "foo": 42, + "bar": 43, + }, + true, + }, + + "value not in schema, empty schema": { + map[string]*FieldSchema{}, + map[string]interface{}{ + "foo": 42, + "bar": 43, + }, + true, + }, + + "value not in schema, nil schema": { + nil, + map[string]interface{}{ + "foo": 42, + "bar": 43, + }, + false, + }, + + "type time, invalid value": { + map[string]*FieldSchema{ + "foo": {Type: TypeTime}, + }, + map[string]interface{}{ + "foo": "2021-13-11T09:08:07+02:00", + }, + true, + }, + } + + for name, tc := range cases { + name, tc := name, tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + data := &FieldData{ + Raw: tc.Raw, + Schema: tc.Schema, + } + + err := data.ValidateStrict() + + if err == nil && tc.ExpectError == true { + t.Fatalf("expected an error, got nil") + } + if err != nil && tc.ExpectError == false { + t.Fatalf("unexpected error: %v", err) + } + }) + } +} diff --git a/sdk/framework/field_type.go b/sdk/framework/field_type.go new file mode 100644 index 0000000..ee07b6a --- /dev/null +++ b/sdk/framework/field_type.go @@ -0,0 +1,106 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package framework + +// FieldType is the enum of types that a field can be. +type FieldType uint + +const ( + TypeInvalid FieldType = 0 + TypeString FieldType = iota + TypeInt + TypeInt64 + TypeBool + TypeMap + + // TypeDurationSecond represent as seconds, this can be either an + // integer or go duration format string (e.g. 24h) + TypeDurationSecond + + // TypeSignedDurationSecond represents a positive or negative duration + // as seconds, this can be either an integer or go duration format + // string (e.g. 24h). + TypeSignedDurationSecond + + // TypeSlice represents a slice of any type + TypeSlice + + // TypeStringSlice is a helper for TypeSlice that returns a sanitized + // slice of strings + TypeStringSlice + + // TypeCommaStringSlice is a helper for TypeSlice that returns a sanitized + // slice of strings and also supports parsing a comma-separated list in + // a string field + TypeCommaStringSlice + + // TypeLowerCaseString is a helper for TypeString that returns a lowercase + // version of the provided string + TypeLowerCaseString + + // TypeNameString represents a name that is URI safe and follows specific + // rules. These rules include start and end with an alphanumeric + // character and characters in the middle can be alphanumeric or . or -. + TypeNameString + + // TypeKVPairs allows you to represent the data as a map or a list of + // equal sign delimited key pairs + TypeKVPairs + + // TypeCommaIntSlice is a helper for TypeSlice that returns a sanitized + // slice of Ints + TypeCommaIntSlice + + // TypeHeader is a helper for sending request headers through to Vault. + // For instance, the AWS and AliCloud credential plugins both act as a + // benevolent MITM for a request, and the headers are sent through and + // parsed. + TypeHeader + + // TypeFloat parses both float32 and float64 values + TypeFloat + + // TypeTime represents absolute time. It accepts an RFC3339-formatted + // string (with or without fractional seconds), or an epoch timestamp + // formatted as a string or a number. The resulting time.Time + // is converted to UTC. + TypeTime + + // DO NOT USE. Any new values must be inserted before this value. + // Used to write tests that ensure type methods handle all possible values. + typeInvalidMax +) + +func (t FieldType) String() string { + switch t { + case TypeString: + return "string" + case TypeLowerCaseString: + return "lowercase string" + case TypeNameString: + return "name string" + case TypeInt: + return "int" + case TypeInt64: + return "int64" + case TypeBool: + return "bool" + case TypeMap: + return "map" + case TypeKVPairs: + return "keypair" + case TypeDurationSecond, TypeSignedDurationSecond: + return "duration (sec)" + case TypeSlice, TypeStringSlice, TypeCommaStringSlice, TypeCommaIntSlice: + return "slice" + case TypeHeader: + return "header" + case TypeFloat: + return "float" + case TypeTime: + return "time" + default: + return "unknown type" + } +} diff --git a/sdk/framework/filter.go b/sdk/framework/filter.go new file mode 100644 index 0000000..b9b9979 --- /dev/null +++ b/sdk/framework/filter.go @@ -0,0 +1,38 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package framework + +import ( + "context" + + "github.com/hashicorp/vault/sdk/logical" + "github.com/ryanuber/go-glob" +) + +// GlobListFilter wraps an OperationFunc with an optional filter which excludes listed entries +// which don't match a glob style pattern +func GlobListFilter(fieldName string, callback OperationFunc) OperationFunc { + return func(ctx context.Context, req *logical.Request, data *FieldData) (*logical.Response, error) { + resp, err := callback(ctx, req, data) + if err != nil { + return nil, err + } + + if keys, ok := resp.Data["keys"]; ok { + if entries, ok := keys.([]string); ok { + filter, ok := data.GetOk(fieldName) + if ok && filter != "" && filter != "*" { + var filteredEntries []string + for _, e := range entries { + if glob.Glob(filter.(string), e) { + filteredEntries = append(filteredEntries, e) + } + } + resp.Data["keys"] = filteredEntries + } + } + } + return resp, nil + } +} diff --git a/sdk/framework/identity.go b/sdk/framework/identity.go new file mode 100644 index 0000000..157f3c1 --- /dev/null +++ b/sdk/framework/identity.go @@ -0,0 +1,60 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package framework + +import ( + "errors" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/helper/identitytpl" + "github.com/hashicorp/vault/sdk/logical" +) + +// PopulateIdentityTemplate takes a template string, an entity ID, and an +// instance of system view. It will query system view for information about the +// entity and use the resulting identity information to populate the template +// string. +func PopulateIdentityTemplate(tpl string, entityID string, sysView logical.SystemView) (string, error) { + entity, err := sysView.EntityInfo(entityID) + if err != nil { + return "", err + } + if entity == nil { + return "", errors.New("no entity found") + } + + groups, err := sysView.GroupsForEntity(entityID) + if err != nil { + return "", err + } + + input := identitytpl.PopulateStringInput{ + String: tpl, + Entity: entity, + Groups: groups, + Mode: identitytpl.ACLTemplating, + } + + _, out, err := identitytpl.PopulateString(input) + if err != nil { + return "", err + } + + return out, nil +} + +// ValidateIdentityTemplate takes a template string and returns if the string is +// a valid identity template. +func ValidateIdentityTemplate(tpl string) (bool, error) { + hasTemplating, _, err := identitytpl.PopulateString(identitytpl.PopulateStringInput{ + Mode: identitytpl.ACLTemplating, + ValidityCheckOnly: true, + String: tpl, + }) + if err != nil { + return false, errwrap.Wrapf("failed to validate policy templating: {{err}}", err) + } + + return hasTemplating, nil +} diff --git a/sdk/framework/identity_test.go b/sdk/framework/identity_test.go new file mode 100644 index 0000000..1667fb9 --- /dev/null +++ b/sdk/framework/identity_test.go @@ -0,0 +1,103 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package framework + +import ( + "testing" + + "github.com/hashicorp/vault/sdk/logical" +) + +func TestIdentityTemplating(t *testing.T) { + sysView := &logical.StaticSystemView{ + EntityVal: &logical.Entity{ + ID: "test-id", + Name: "test", + Aliases: []*logical.Alias{ + { + ID: "alias-id", + Name: "test alias", + MountAccessor: "test_mount", + MountType: "secret", + Metadata: map[string]string{ + "alias-metadata": "alias-metadata-value", + }, + }, + }, + Metadata: map[string]string{ + "entity-metadata": "entity-metadata-value", + }, + }, + GroupsVal: []*logical.Group{ + { + ID: "group1-id", + Name: "group1", + Metadata: map[string]string{ + "group-metadata": "group-metadata-value", + }, + }, + }, + } + + tCases := []struct { + tpl string + expected string + }{ + { + tpl: "{{identity.entity.id}}", + expected: "test-id", + }, + { + tpl: "{{identity.entity.name}}", + expected: "test", + }, + { + tpl: "{{identity.entity.metadata.entity-metadata}}", + expected: "entity-metadata-value", + }, + { + tpl: "{{identity.entity.aliases.test_mount.id}}", + expected: "alias-id", + }, + { + tpl: "{{identity.entity.aliases.test_mount.id}}", + expected: "alias-id", + }, + { + tpl: "{{identity.entity.aliases.test_mount.name}}", + expected: "test alias", + }, + { + tpl: "{{identity.entity.aliases.test_mount.metadata.alias-metadata}}", + expected: "alias-metadata-value", + }, + { + tpl: "{{identity.groups.ids.group1-id.name}}", + expected: "group1", + }, + { + tpl: "{{identity.groups.names.group1.id}}", + expected: "group1-id", + }, + { + tpl: "{{identity.groups.names.group1.metadata.group-metadata}}", + expected: "group-metadata-value", + }, + { + tpl: "{{identity.groups.ids.group1-id.metadata.group-metadata}}", + expected: "group-metadata-value", + }, + } + + for _, tCase := range tCases { + out, err := PopulateIdentityTemplate(tCase.tpl, "test", sysView) + if err != nil { + t.Fatal(err) + } + + if out != tCase.expected { + t.Fatalf("got %q, expected %q", out, tCase.expected) + } + } +} diff --git a/sdk/framework/lease.go b/sdk/framework/lease.go new file mode 100644 index 0000000..24824ca --- /dev/null +++ b/sdk/framework/lease.go @@ -0,0 +1,128 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package framework + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/hashicorp/vault/sdk/logical" +) + +// LeaseExtend is left for backwards compatibility for plugins. This function +// now just passes back the data that was passed into it to be processed in core. +// DEPRECATED +func LeaseExtend(backendIncrement, backendMax time.Duration, systemView logical.SystemView) OperationFunc { + return func(ctx context.Context, req *logical.Request, data *FieldData) (*logical.Response, error) { + switch { + case req.Auth != nil: + req.Auth.TTL = backendIncrement + req.Auth.MaxTTL = backendMax + return &logical.Response{Auth: req.Auth}, nil + case req.Secret != nil: + req.Secret.TTL = backendIncrement + req.Secret.MaxTTL = backendMax + return &logical.Response{Secret: req.Secret}, nil + } + return nil, fmt.Errorf("no lease options for request") + } +} + +// CalculateTTL takes all the user-specified, backend, and system inputs and calculates +// a TTL for a lease +func CalculateTTL(sysView logical.SystemView, increment, backendTTL, period, backendMaxTTL, explicitMaxTTL time.Duration, startTime time.Time) (ttl time.Duration, warnings []string, errors error) { + // Truncate all times to the second since that is the lowest precision for + // TTLs + now := time.Now().Truncate(time.Second) + if startTime.IsZero() { + startTime = now + } else { + startTime = startTime.Truncate(time.Second) + } + + // Use the mount's configured max unless the backend specifies + // something more restrictive (perhaps from a role configuration + // parameter) + maxTTL := sysView.MaxLeaseTTL() + if backendMaxTTL > 0 && backendMaxTTL < maxTTL { + maxTTL = backendMaxTTL + } + if explicitMaxTTL > 0 && explicitMaxTTL < maxTTL { + maxTTL = explicitMaxTTL + } + + // Should never happen, but guard anyways + if maxTTL <= 0 { + return 0, nil, fmt.Errorf("max TTL must be greater than zero") + } + + var maxValidTime time.Time + switch { + case period > 0: + // Cap the period value to the sys max_ttl value + if period > maxTTL { + warnings = append(warnings, + fmt.Sprintf("period of %q exceeded the effective max_ttl of %q; period value is capped accordingly", + humanDuration(period), humanDuration(maxTTL))) + period = maxTTL + } + ttl = period + + if explicitMaxTTL > 0 { + maxValidTime = startTime.Add(explicitMaxTTL) + } + default: + switch { + case increment > 0: + ttl = increment + case backendTTL > 0: + ttl = backendTTL + default: + ttl = sysView.DefaultLeaseTTL() + } + + // We cannot go past this time + maxValidTime = startTime.Add(maxTTL) + } + + if !maxValidTime.IsZero() { + // Determine the max valid TTL + maxValidTTL := maxValidTime.Sub(now) + + // If we are past the max TTL, we shouldn't be in this function...but + // fast path out if we are + if maxValidTTL <= 0 { + return 0, nil, fmt.Errorf("past the max TTL, cannot renew") + } + + // If the proposed expiration is after the maximum TTL of the lease, + // cap the increment to whatever is left + if maxValidTTL-ttl < 0 { + warnings = append(warnings, + fmt.Sprintf("TTL of %q exceeded the effective max_ttl of %q; TTL value is capped accordingly", + humanDuration(ttl), humanDuration(maxValidTTL))) + ttl = maxValidTTL + } + } + + return ttl, warnings, nil +} + +// humanDuration prints the time duration without zero elements. +func humanDuration(d time.Duration) string { + if d == 0 { + return "0s" + } + + s := d.String() + if strings.HasSuffix(s, "m0s") { + s = s[:len(s)-2] + } + if idx := strings.Index(s, "h0m"); idx > 0 { + s = s[:idx+1] + s[idx+3:] + } + return s +} diff --git a/sdk/framework/lease_test.go b/sdk/framework/lease_test.go new file mode 100644 index 0000000..5d1f9f0 --- /dev/null +++ b/sdk/framework/lease_test.go @@ -0,0 +1,137 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package framework + +import ( + "testing" + "time" + + "github.com/hashicorp/vault/sdk/logical" +) + +func TestCalculateTTL(t *testing.T) { + testSysView := logical.StaticSystemView{ + DefaultLeaseTTLVal: 5 * time.Hour, + MaxLeaseTTLVal: 30 * time.Hour, + } + + cases := map[string]struct { + Increment time.Duration + BackendDefault time.Duration + BackendMax time.Duration + Period time.Duration + ExplicitMaxTTL time.Duration + Result time.Duration + Warnings int + Error bool + }{ + "valid request, good bounds, increment is preferred": { + BackendDefault: 30 * time.Hour, + Increment: 1 * time.Hour, + Result: 1 * time.Hour, + }, + + "valid request, zero backend default, uses increment": { + BackendDefault: 0, + Increment: 1 * time.Hour, + Result: 1 * time.Hour, + }, + + "lease increment is zero, uses backend default": { + BackendDefault: 30 * time.Hour, + Increment: 0, + Result: 30 * time.Hour, + }, + + "lease increment and default are zero, uses systemview": { + BackendDefault: 0, + Increment: 0, + Result: 5 * time.Hour, + }, + + "backend max and associated request are too long": { + BackendDefault: 40 * time.Hour, + BackendMax: 45 * time.Hour, + Result: 30 * time.Hour, + Warnings: 1, + }, + + "all request values are larger than the system view, so the system view limits": { + BackendDefault: 40 * time.Hour, + BackendMax: 50 * time.Hour, + Increment: 40 * time.Hour, + Result: 30 * time.Hour, + Warnings: 1, + }, + + "request within backend max": { + BackendDefault: 9 * time.Hour, + BackendMax: 5 * time.Hour, + Increment: 4 * time.Hour, + Result: 4 * time.Hour, + }, + + "request outside backend max": { + BackendDefault: 9 * time.Hour, + BackendMax: 4 * time.Hour, + Increment: 5 * time.Hour, + Result: 4 * time.Hour, + Warnings: 1, + }, + + "request is negative, no backend default, use sysview": { + Increment: -7 * time.Hour, + Result: 5 * time.Hour, + }, + + "lease increment too large": { + Increment: 40 * time.Hour, + Result: 30 * time.Hour, + Warnings: 1, + }, + + "periodic, good request, period is preferred": { + Increment: 3 * time.Hour, + BackendDefault: 4 * time.Hour, + BackendMax: 2 * time.Hour, + Period: 1 * time.Hour, + Result: 1 * time.Hour, + }, + + "period too large, explicit max ttl is preferred": { + Period: 2 * time.Hour, + ExplicitMaxTTL: 1 * time.Hour, + Result: 1 * time.Hour, + Warnings: 1, + }, + + "period too large, capped by backend max": { + Period: 2 * time.Hour, + BackendMax: 1 * time.Hour, + Result: 1 * time.Hour, + Warnings: 1, + }, + } + + for name, tc := range cases { + ttl, warnings, err := CalculateTTL(testSysView, tc.Increment, tc.BackendDefault, tc.Period, tc.BackendMax, tc.ExplicitMaxTTL, time.Time{}) + if (err != nil) != tc.Error { + t.Fatalf("bad: %s\nerr: %s", name, err) + } + if tc.Error { + continue + } + + // Round it to the nearest hour + now := time.Now().Round(time.Hour) + lease := now.Add(ttl).Round(time.Hour).Sub(now) + if lease != tc.Result { + t.Fatalf("bad: %s\nlease: %s", name, lease) + } + + if tc.Warnings != len(warnings) { + t.Fatalf("bad: %s\nwarning count mismatch, expect %d, got %d: %#v", name, tc.Warnings, len(warnings), warnings) + } + } +} diff --git a/sdk/framework/openapi.go b/sdk/framework/openapi.go new file mode 100644 index 0000000..53e7ee2 --- /dev/null +++ b/sdk/framework/openapi.go @@ -0,0 +1,1115 @@ +package framework + +import ( + "errors" + "fmt" + "reflect" + "regexp" + "regexp/syntax" + "sort" + "strconv" + "strings" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/wrapping" + "github.com/hashicorp/vault/sdk/logical" + "github.com/mitchellh/mapstructure" + "golang.org/x/text/cases" + "golang.org/x/text/language" +) + +// OpenAPI specification (OAS): https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md +const OASVersion = "3.0.2" + +// NewOASDocument returns an empty OpenAPI document. +func NewOASDocument(version string) *OASDocument { + return &OASDocument{ + Version: OASVersion, + Info: OASInfo{ + Title: "HashiCorp Vault API", + Description: "HTTP API that gives you full access to Vault. All API routes are prefixed with `/v1/`.", + Version: version, + License: OASLicense{ + Name: "Mozilla Public License 2.0", + URL: "https://www.mozilla.org/en-US/MPL/2.0", + }, + }, + Paths: make(map[string]*OASPathItem), + Components: OASComponents{ + Schemas: make(map[string]*OASSchema), + }, + } +} + +// NewOASDocumentFromMap builds an OASDocument from an existing map version of a document. +// If a document has been decoded from JSON or received from a plugin, it will be as a map[string]interface{} +// and needs special handling beyond the default mapstructure decoding. +func NewOASDocumentFromMap(input map[string]interface{}) (*OASDocument, error) { + // The Responses map uses integer keys (the response code), but once translated into JSON + // (e.g. during the plugin transport) these become strings. mapstructure will not coerce these back + // to integers without a custom decode hook. + decodeHook := func(src reflect.Type, tgt reflect.Type, inputRaw interface{}) (interface{}, error) { + // Only alter data if: + // 1. going from string to int + // 2. string represent an int in status code range (100-599) + if src.Kind() == reflect.String && tgt.Kind() == reflect.Int { + if input, ok := inputRaw.(string); ok { + if intval, err := strconv.Atoi(input); err == nil { + if intval >= 100 && intval < 600 { + return intval, nil + } + } + } + } + return inputRaw, nil + } + + doc := new(OASDocument) + + config := &mapstructure.DecoderConfig{ + DecodeHook: decodeHook, + Result: doc, + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return nil, err + } + + if err := decoder.Decode(input); err != nil { + return nil, err + } + + return doc, nil +} + +type OASDocument struct { + Version string `json:"openapi" mapstructure:"openapi"` + Info OASInfo `json:"info"` + Paths map[string]*OASPathItem `json:"paths"` + Components OASComponents `json:"components"` +} + +type OASComponents struct { + Schemas map[string]*OASSchema `json:"schemas"` +} + +type OASInfo struct { + Title string `json:"title"` + Description string `json:"description"` + Version string `json:"version"` + License OASLicense `json:"license"` +} + +type OASLicense struct { + Name string `json:"name"` + URL string `json:"url"` +} + +type OASPathItem struct { + Description string `json:"description,omitempty"` + Parameters []OASParameter `json:"parameters,omitempty"` + Sudo bool `json:"x-vault-sudo,omitempty" mapstructure:"x-vault-sudo"` + Unauthenticated bool `json:"x-vault-unauthenticated,omitempty" mapstructure:"x-vault-unauthenticated"` + CreateSupported bool `json:"x-vault-createSupported,omitempty" mapstructure:"x-vault-createSupported"` + DisplayNavigation bool `json:"x-vault-displayNavigation,omitempty" mapstructure:"x-vault-displayNavigation"` + DisplayAttrs *DisplayAttributes `json:"x-vault-displayAttrs,omitempty" mapstructure:"x-vault-displayAttrs"` + + Get *OASOperation `json:"get,omitempty"` + Post *OASOperation `json:"post,omitempty"` + Delete *OASOperation `json:"delete,omitempty"` +} + +// NewOASOperation creates an empty OpenAPI Operations object. +func NewOASOperation() *OASOperation { + return &OASOperation{ + Responses: make(map[int]*OASResponse), + } +} + +type OASOperation struct { + Summary string `json:"summary,omitempty"` + Description string `json:"description,omitempty"` + OperationID string `json:"operationId,omitempty"` + Tags []string `json:"tags,omitempty"` + Parameters []OASParameter `json:"parameters,omitempty"` + RequestBody *OASRequestBody `json:"requestBody,omitempty"` + Responses map[int]*OASResponse `json:"responses"` + Deprecated bool `json:"deprecated,omitempty"` +} + +type OASParameter struct { + Name string `json:"name"` + Description string `json:"description,omitempty"` + In string `json:"in"` + Schema *OASSchema `json:"schema,omitempty"` + Required bool `json:"required,omitempty"` + Deprecated bool `json:"deprecated,omitempty"` +} + +type OASRequestBody struct { + Description string `json:"description,omitempty"` + Required bool `json:"required,omitempty"` + Content OASContent `json:"content,omitempty"` +} + +type OASContent map[string]*OASMediaTypeObject + +type OASMediaTypeObject struct { + Schema *OASSchema `json:"schema,omitempty"` +} + +type OASSchema struct { + Ref string `json:"$ref,omitempty"` + Type string `json:"type,omitempty"` + Description string `json:"description,omitempty"` + Properties map[string]*OASSchema `json:"properties,omitempty"` + + // Required is a list of keys in Properties that are required to be present. This is a different + // approach than OASParameter (unfortunately), but is how JSONSchema handles 'required'. + Required []string `json:"required,omitempty"` + + Items *OASSchema `json:"items,omitempty"` + Format string `json:"format,omitempty"` + Pattern string `json:"pattern,omitempty"` + Enum []interface{} `json:"enum,omitempty"` + Default interface{} `json:"default,omitempty"` + Example interface{} `json:"example,omitempty"` + Deprecated bool `json:"deprecated,omitempty"` + // DisplayName string `json:"x-vault-displayName,omitempty" mapstructure:"x-vault-displayName,omitempty"` + DisplayValue interface{} `json:"x-vault-displayValue,omitempty" mapstructure:"x-vault-displayValue,omitempty"` + DisplaySensitive bool `json:"x-vault-displaySensitive,omitempty" mapstructure:"x-vault-displaySensitive,omitempty"` + DisplayGroup string `json:"x-vault-displayGroup,omitempty" mapstructure:"x-vault-displayGroup,omitempty"` + DisplayAttrs *DisplayAttributes `json:"x-vault-displayAttrs,omitempty" mapstructure:"x-vault-displayAttrs,omitempty"` +} + +type OASResponse struct { + Description string `json:"description"` + Content OASContent `json:"content,omitempty"` +} + +var OASStdRespOK = &OASResponse{ + Description: "OK", +} + +var OASStdRespNoContent = &OASResponse{ + Description: "empty body", +} + +// Regex for handling fields in paths, and string cleanup. +// Predefined here to avoid substantial recompilation. + +var ( + nonWordRe = regexp.MustCompile(`[^\w]+`) // Match a sequence of non-word characters + pathFieldsRe = regexp.MustCompile(`{(\w+)}`) // Capture OpenAPI-style named parameters, e.g. "lookup/{urltoken}", + wsRe = regexp.MustCompile(`\s+`) // Match whitespace, to be compressed during cleaning +) + +// documentPaths parses all paths in a framework.Backend into OpenAPI paths. +func documentPaths(backend *Backend, requestResponsePrefix string, doc *OASDocument) error { + for _, p := range backend.Paths { + if err := documentPath(p, backend.SpecialPaths(), requestResponsePrefix, backend.BackendType, doc); err != nil { + return err + } + } + + return nil +} + +// documentPath parses a framework.Path into one or more OpenAPI paths. +func documentPath(p *Path, specialPaths *logical.Paths, requestResponsePrefix string, backendType logical.BackendType, doc *OASDocument) error { + var sudoPaths []string + var unauthPaths []string + + if specialPaths != nil { + sudoPaths = specialPaths.Root + unauthPaths = specialPaths.Unauthenticated + } + + // Convert optional parameters into distinct patterns to be processed independently. + forceUnpublished := false + paths, err := expandPattern(p.Pattern) + if err != nil { + if errors.Is(err, errUnsupportableRegexpOperationForOpenAPI) { + // Pattern cannot be transformed into sensible OpenAPI paths. In this case, we override the later + // processing to use the regexp, as is, as the path, and behave as if Unpublished was set on every + // operation (meaning the operations will not be represented in the OpenAPI document). + // + // This allows a human reading the OpenAPI document to notice that, yes, a path handler does exist, + // even though it was not able to contribute actual OpenAPI operations. + forceUnpublished = true + paths = []string{p.Pattern} + } else { + return err + } + } + + for pathIndex, path := range paths { + // Construct a top level PathItem which will be populated as the path is processed. + pi := OASPathItem{ + Description: cleanString(p.HelpSynopsis), + } + + pi.Sudo = specialPathMatch(path, sudoPaths) + pi.Unauthenticated = specialPathMatch(path, unauthPaths) + pi.DisplayAttrs = withoutOperationHints(p.DisplayAttrs) + + // If the newer style Operations map isn't defined, create one from the legacy fields. + operations := p.Operations + if operations == nil { + operations = make(map[logical.Operation]OperationHandler) + + for opType, cb := range p.Callbacks { + operations[opType] = &PathOperation{ + Callback: cb, + Summary: p.HelpSynopsis, + } + } + } + + // Process path and header parameters, which are common to all operations. + // Body fields will be added to individual operations. + pathFields, bodyFields := splitFields(p.Fields, path) + + for name, field := range pathFields { + location := "path" + required := true + + if field == nil { + continue + } + + if field.Query { + location = "query" + required = false + } + + t := convertType(field.Type) + p := OASParameter{ + Name: name, + Description: cleanString(field.Description), + In: location, + Schema: &OASSchema{ + Type: t.baseType, + Pattern: t.pattern, + Enum: field.AllowedValues, + Default: field.Default, + DisplayAttrs: withoutOperationHints(field.DisplayAttrs), + }, + Required: required, + Deprecated: field.Deprecated, + } + pi.Parameters = append(pi.Parameters, p) + } + + // Sort parameters for a stable output + sort.Slice(pi.Parameters, func(i, j int) bool { + return strings.ToLower(pi.Parameters[i].Name) < strings.ToLower(pi.Parameters[j].Name) + }) + + // Process each supported operation by building up an Operation object + // with descriptions, properties and examples from the framework.Path data. + for opType, opHandler := range operations { + props := opHandler.Properties() + if props.Unpublished || forceUnpublished { + continue + } + + if opType == logical.CreateOperation { + pi.CreateSupported = true + + // If both Create and Update are defined, only process Update. + if operations[logical.UpdateOperation] != nil { + continue + } + } + + // If both List and Read are defined, only process Read. + if opType == logical.ListOperation && operations[logical.ReadOperation] != nil { + continue + } + + op := NewOASOperation() + + operationID := constructOperationID( + path, + pathIndex, + p.DisplayAttrs, + opType, + props.DisplayAttrs, + requestResponsePrefix, + ) + + op.Summary = props.Summary + op.Description = props.Description + op.Deprecated = props.Deprecated + op.OperationID = operationID + + // Add any fields not present in the path as body parameters for POST. + if opType == logical.CreateOperation || opType == logical.UpdateOperation { + s := &OASSchema{ + Type: "object", + Properties: make(map[string]*OASSchema), + Required: make([]string, 0), + } + + for name, field := range bodyFields { + // Removing this field from the spec as it is deprecated in favor of using "sha256" + // The duplicate sha_256 and sha256 in these paths cause issues with codegen + if name == "sha_256" && strings.Contains(path, "plugins/catalog/") { + continue + } + + openapiField := convertType(field.Type) + if field.Required { + s.Required = append(s.Required, name) + } + + p := OASSchema{ + Type: openapiField.baseType, + Description: cleanString(field.Description), + Format: openapiField.format, + Pattern: openapiField.pattern, + Enum: field.AllowedValues, + Default: field.Default, + Deprecated: field.Deprecated, + DisplayAttrs: withoutOperationHints(field.DisplayAttrs), + } + if openapiField.baseType == "array" { + p.Items = &OASSchema{ + Type: openapiField.items, + } + } + s.Properties[name] = &p + } + + // Make the ordering deterministic, so that the generated OpenAPI spec document, observed over several + // versions, doesn't contain spurious non-semantic changes. + sort.Strings(s.Required) + + // If examples were given, use the first one as the sample + // of this schema. + if len(props.Examples) > 0 { + s.Example = props.Examples[0].Data + } + + // Set the final request body. Only JSON request data is supported. + if len(s.Properties) > 0 || s.Example != nil { + requestName := hyphenatedToTitleCase(operationID) + "Request" + doc.Components.Schemas[requestName] = s + op.RequestBody = &OASRequestBody{ + Required: true, + Content: OASContent{ + "application/json": &OASMediaTypeObject{ + Schema: &OASSchema{Ref: fmt.Sprintf("#/components/schemas/%s", requestName)}, + }, + }, + } + } + } + + // LIST is represented as GET with a `list` query parameter. + if opType == logical.ListOperation { + // Only accepts List (due to the above skipping of ListOperations that also have ReadOperations) + op.Parameters = append(op.Parameters, OASParameter{ + Name: "list", + Description: "Must be set to `true`", + Required: true, + In: "query", + Schema: &OASSchema{Type: "string", Enum: []interface{}{"true"}}, + }) + } else if opType == logical.ReadOperation && operations[logical.ListOperation] != nil { + // Accepts both Read and List + op.Parameters = append(op.Parameters, OASParameter{ + Name: "list", + Description: "Return a list if `true`", + In: "query", + Schema: &OASSchema{Type: "string"}, + }) + } + + // Add tags based on backend type + var tags []string + switch backendType { + case logical.TypeLogical: + tags = []string{"secrets"} + case logical.TypeCredential: + tags = []string{"auth"} + } + + op.Tags = append(op.Tags, tags...) + + // Set default responses. + if len(props.Responses) == 0 { + if opType == logical.DeleteOperation { + op.Responses[204] = OASStdRespNoContent + } else { + op.Responses[200] = OASStdRespOK + } + } + + // Add any defined response details. + for code, responses := range props.Responses { + var description string + content := make(OASContent) + + for i, resp := range responses { + if i == 0 { + description = resp.Description + } + if resp.Example != nil { + mediaType := resp.MediaType + if mediaType == "" { + mediaType = "application/json" + } + + // create a version of the response that will not emit null items + cr := cleanResponse(resp.Example) + + // Only one example per media type is allowed, so first one wins + if _, ok := content[mediaType]; !ok { + content[mediaType] = &OASMediaTypeObject{ + Schema: &OASSchema{ + Example: cr, + }, + } + } + } + + responseSchema := &OASSchema{ + Type: "object", + Properties: make(map[string]*OASSchema), + } + + for name, field := range resp.Fields { + openapiField := convertType(field.Type) + p := OASSchema{ + Type: openapiField.baseType, + Description: cleanString(field.Description), + Format: openapiField.format, + Pattern: openapiField.pattern, + Enum: field.AllowedValues, + Default: field.Default, + Deprecated: field.Deprecated, + DisplayAttrs: withoutOperationHints(field.DisplayAttrs), + } + if openapiField.baseType == "array" { + p.Items = &OASSchema{ + Type: openapiField.items, + } + } + responseSchema.Properties[name] = &p + } + + if len(resp.Fields) != 0 { + responseName := hyphenatedToTitleCase(operationID) + "Response" + doc.Components.Schemas[responseName] = responseSchema + content = OASContent{ + "application/json": &OASMediaTypeObject{ + Schema: &OASSchema{Ref: fmt.Sprintf("#/components/schemas/%s", responseName)}, + }, + } + } + } + + op.Responses[code] = &OASResponse{ + Description: description, + Content: content, + } + } + + switch opType { + case logical.CreateOperation, logical.UpdateOperation: + pi.Post = op + case logical.ReadOperation, logical.ListOperation: + pi.Get = op + case logical.DeleteOperation: + pi.Delete = op + } + } + + doc.Paths["/"+path] = &pi + } + + return nil +} + +// specialPathMatch checks whether the given path matches one of the special +// paths, taking into account * and + wildcards (e.g. foo/+/bar/*) +func specialPathMatch(path string, specialPaths []string) bool { + // pathMatchesByParts determines if the path matches the special path's + // pattern, accounting for the '+' and '*' wildcards + pathMatchesByParts := func(pathParts []string, specialPathParts []string) bool { + if len(pathParts) < len(specialPathParts) { + return false + } + for i := 0; i < len(specialPathParts); i++ { + var ( + part = pathParts[i] + pattern = specialPathParts[i] + ) + if pattern == "+" { + continue + } + if pattern == "*" { + return true + } + if strings.HasSuffix(pattern, "*") && strings.HasPrefix(part, pattern[0:len(pattern)-1]) { + return true + } + if pattern != part { + return false + } + } + return len(pathParts) == len(specialPathParts) + } + + pathParts := strings.Split(path, "/") + + for _, sp := range specialPaths { + // exact match + if sp == path { + return true + } + + // match * + if strings.HasSuffix(sp, "*") && strings.HasPrefix(path, sp[0:len(sp)-1]) { + return true + } + + // match + + if strings.Contains(sp, "+") && pathMatchesByParts(pathParts, strings.Split(sp, "/")) { + return true + } + } + + return false +} + +// constructOperationID joins the given inputs into a hyphen-separated +// lower-case operation id, which is also used as a prefix for request and +// response names. +// +// The OperationPrefix / -Verb / -Suffix found in display attributes will be +// used, if provided. Otherwise, the function falls back to using the path and +// the operation. +// +// Examples of generated operation identifiers: +// - kvv2-write +// - kvv2-read +// - google-cloud-login +// - google-cloud-write-role +func constructOperationID( + path string, + pathIndex int, + pathAttributes *DisplayAttributes, + operation logical.Operation, + operationAttributes *DisplayAttributes, + defaultPrefix string, +) string { + var ( + prefix string + verb string + suffix string + ) + + if operationAttributes != nil { + prefix = operationAttributes.OperationPrefix + verb = operationAttributes.OperationVerb + suffix = operationAttributes.OperationSuffix + } + + if pathAttributes != nil { + if prefix == "" { + prefix = pathAttributes.OperationPrefix + } + if verb == "" { + verb = pathAttributes.OperationVerb + } + if suffix == "" { + suffix = pathAttributes.OperationSuffix + } + } + + // A single suffix string can contain multiple pipe-delimited strings. To + // determine the actual suffix, we attempt to match it by the index of the + // paths returned from `expandPattern(...)`. For example: + // + // pki/ + // Pattern: "keys/generate/(internal|exported|kms)", + // DisplayAttrs: { + // ... + // OperationSuffix: "internal-key|exported-key|kms-key", + // }, + // + // will expand into three paths and corresponding suffixes: + // + // path 0: "keys/generate/internal" suffix: internal-key + // path 1: "keys/generate/exported" suffix: exported-key + // path 2: "keys/generate/kms" suffix: kms-key + // + pathIndexOutOfRange := false + + if suffixes := strings.Split(suffix, "|"); len(suffixes) > 1 || pathIndex > 0 { + // if the index is out of bounds, fall back to the old logic + if pathIndex >= len(suffixes) { + suffix = "" + pathIndexOutOfRange = true + } else { + suffix = suffixes[pathIndex] + } + } + + // a helper that hyphenates & lower-cases the slice except the empty elements + toLowerHyphenate := func(parts []string) string { + filtered := make([]string, 0, len(parts)) + for _, e := range parts { + if e != "" { + filtered = append(filtered, e) + } + } + return strings.ToLower(strings.Join(filtered, "-")) + } + + // fall back to using the path + operation to construct the operation id + var ( + needPrefix = prefix == "" && verb == "" + needVerb = verb == "" + needSuffix = suffix == "" && (verb == "" || pathIndexOutOfRange) + ) + + if needPrefix { + prefix = defaultPrefix + } + + if needVerb { + if operation == logical.UpdateOperation { + verb = "write" + } else { + verb = string(operation) + } + } + + if needSuffix { + suffix = toLowerHyphenate(nonWordRe.Split(path, -1)) + } + + return toLowerHyphenate([]string{prefix, verb, suffix}) +} + +// expandPattern expands a regex pattern by generating permutations of any optional parameters +// and changing named parameters into their {openapi} equivalents. +func expandPattern(pattern string) ([]string, error) { + // Happily, the Go regexp library exposes its underlying "parse to AST" functionality, so we can rely on that to do + // the hard work of interpreting the regexp syntax. + rx, err := syntax.Parse(pattern, syntax.Perl) + if err != nil { + // This should be impossible to reach, since regexps have previously been compiled with MustCompile in + // Backend.init. + panic(err) + } + + paths, err := collectPathsFromRegexpAST(rx) + if err != nil { + return nil, err + } + + return paths, nil +} + +type pathCollector struct { + strings.Builder + conditionalSlashAppendedAtLength int +} + +// collectPathsFromRegexpAST performs a depth-first recursive walk through a regexp AST, collecting an OpenAPI-style +// path as it goes. +// +// Each time it encounters alternation (a|b) or an optional part (a?), it forks its processing to produce additional +// results, to account for each possibility. Note: This does mean that an input pattern with lots of these regexp +// features can produce a lot of different OpenAPI endpoints. At the time of writing, the most complex known example is +// +// "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/crl(/pem|/der|/delta(/pem|/der)?)?" +// +// in the PKI secrets engine which expands to 6 separate paths. +// +// Each named capture group - i.e. (?Psomething here) - is replaced with an OpenAPI parameter - i.e. {name} - and +// the subtree of regexp AST inside the parameter is completely skipped. +func collectPathsFromRegexpAST(rx *syntax.Regexp) ([]string, error) { + pathCollectors, err := collectPathsFromRegexpASTInternal(rx, []*pathCollector{{}}) + if err != nil { + return nil, err + } + paths := make([]string, 0, len(pathCollectors)) + for _, collector := range pathCollectors { + if collector.conditionalSlashAppendedAtLength != collector.Len() { + paths = append(paths, collector.String()) + } + } + return paths, nil +} + +var errUnsupportableRegexpOperationForOpenAPI = errors.New("path regexp uses an operation that cannot be translated to an OpenAPI pattern") + +func collectPathsFromRegexpASTInternal(rx *syntax.Regexp, appendingTo []*pathCollector) ([]*pathCollector, error) { + var err error + + // Depending on the type of this regexp AST node (its Op, i.e. operation), figure out whether it contributes any + // characters to the URL path, and whether we need to recurse through child AST nodes. + // + // Each element of the appendingTo slice tracks a separate path, defined by the alternatives chosen when traversing + // the | and ? conditional regexp features, and new elements are added as each of these features are traversed. + // + // To share this slice across multiple recursive calls of this function, it is passed down as a parameter to each + // recursive call, potentially modified throughout this switch block, and passed back up as a return value at the + // end of this function - the parent call uses the return value to update its own local variable. + switch rx.Op { + + // These AST operations are leaf nodes (no children), that match zero characters, so require no processing at all + case syntax.OpEmptyMatch: // e.g. (?:) + case syntax.OpBeginLine: // i.e. ^ when (?m) + case syntax.OpEndLine: // i.e. $ when (?m) + case syntax.OpBeginText: // i.e. \A, or ^ when (?-m) + case syntax.OpEndText: // i.e. \z, or $ when (?-m) + case syntax.OpWordBoundary: // i.e. \b + case syntax.OpNoWordBoundary: // i.e. \B + + // OpConcat simply represents multiple parts of the pattern appearing one after the other, so just recurse through + // those pieces. + case syntax.OpConcat: + for _, child := range rx.Sub { + appendingTo, err = collectPathsFromRegexpASTInternal(child, appendingTo) + if err != nil { + return nil, err + } + } + + // OpLiteral is a literal string in the pattern - append it to the paths we are building. + case syntax.OpLiteral: + for _, collector := range appendingTo { + collector.WriteString(string(rx.Rune)) + } + + // OpAlternate, i.e. a|b, means we clone all of the pathCollector instances we are currently accumulating paths + // into, and independently recurse through each alternate option. + case syntax.OpAlternate: // i.e | + var totalAppendingTo []*pathCollector + lastIndex := len(rx.Sub) - 1 + for index, child := range rx.Sub { + var childAppendingTo []*pathCollector + if index == lastIndex { + // Optimization: last time through this loop, we can simply re-use the existing set of pathCollector + // instances, as we no longer need to preserve them unmodified to make further copies of. + childAppendingTo = appendingTo + } else { + for _, collector := range appendingTo { + newCollector := new(pathCollector) + newCollector.WriteString(collector.String()) + newCollector.conditionalSlashAppendedAtLength = collector.conditionalSlashAppendedAtLength + childAppendingTo = append(childAppendingTo, newCollector) + } + } + childAppendingTo, err = collectPathsFromRegexpASTInternal(child, childAppendingTo) + if err != nil { + return nil, err + } + totalAppendingTo = append(totalAppendingTo, childAppendingTo...) + } + appendingTo = totalAppendingTo + + // OpQuest, i.e. a?, is much like an alternation between exactly two options, one of which is the empty string. + case syntax.OpQuest: + child := rx.Sub[0] + var childAppendingTo []*pathCollector + for _, collector := range appendingTo { + newCollector := new(pathCollector) + newCollector.WriteString(collector.String()) + newCollector.conditionalSlashAppendedAtLength = collector.conditionalSlashAppendedAtLength + childAppendingTo = append(childAppendingTo, newCollector) + } + childAppendingTo, err = collectPathsFromRegexpASTInternal(child, childAppendingTo) + if err != nil { + return nil, err + } + appendingTo = append(appendingTo, childAppendingTo...) + + // Many Vault path patterns end with `/?` to accept paths that end with or without a slash. Our current + // convention for generating the OpenAPI is to strip away these slashes. To do that, this very special case + // detects when we just appended a single conditional slash, and records the length of the path at this point, + // so we can later discard this path variant, if nothing else is appended to it later. + if child.Op == syntax.OpLiteral && string(child.Rune) == "/" { + for _, collector := range childAppendingTo { + collector.conditionalSlashAppendedAtLength = collector.Len() + } + } + + // OpCapture, i.e. ( ) or (?P ), a capturing group + case syntax.OpCapture: + if rx.Name == "" { + // In Vault, an unnamed capturing group is not actually used for capturing. + // We treat it exactly the same as OpConcat. + for _, child := range rx.Sub { + appendingTo, err = collectPathsFromRegexpASTInternal(child, appendingTo) + if err != nil { + return nil, err + } + } + } else { + // A named capturing group is replaced with the OpenAPI parameter syntax, and the regexp inside the group + // is NOT added to the OpenAPI path. + for _, builder := range appendingTo { + builder.WriteRune('{') + builder.WriteString(rx.Name) + builder.WriteRune('}') + } + } + + // Any other kind of operation is a problem, and will trigger an error, resulting in the pattern being left out of + // the OpenAPI entirely - that's better than generating a path which is incorrect. + // + // The Op types we expect to hit the default condition are: + // + // OpCharClass - i.e. [something] + // OpAnyCharNotNL - i.e. . + // OpAnyChar - i.e. (?s:.) + // OpStar - i.e. * + // OpPlus - i.e. + + // OpRepeat - i.e. {N}, {N,M}, etc. + // + // In any of these conditions, there is no sensible translation of the path to OpenAPI syntax. (Note, this only + // applies to these appearing outside of a named capture group, otherwise they are handled in the previous case.) + // + // At the time of writing, the only pattern in the builtin Vault plugins that hits this codepath is the ".*" + // pattern in the KVv2 secrets engine, which is not a valid path, but rather, is a catch-all used to implement + // custom error handling behaviour to guide users who attempt to treat a KVv2 as a KVv1. It is already marked as + // Unpublished, so is withheld from the OpenAPI anyway. + // + // For completeness, one other Op type exists, OpNoMatch, which is never generated by syntax.Parse - only by + // subsequent Simplify in preparation to Compile, which is not used here. + default: + return nil, errUnsupportableRegexpOperationForOpenAPI + } + + return appendingTo, nil +} + +// schemaType is a subset of the JSON Schema elements used as a target +// for conversions from Vault's standard FieldTypes. +type schemaType struct { + baseType string + items string + format string + pattern string +} + +// convertType translates a FieldType into an OpenAPI type. +// In the case of arrays, a subtype is returned as well. +func convertType(t FieldType) schemaType { + ret := schemaType{} + + switch t { + case TypeString, TypeHeader: + ret.baseType = "string" + case TypeNameString: + ret.baseType = "string" + ret.pattern = `\w([\w-.]*\w)?` + case TypeLowerCaseString: + ret.baseType = "string" + ret.format = "lowercase" + case TypeInt: + ret.baseType = "integer" + case TypeInt64: + ret.baseType = "integer" + ret.format = "int64" + case TypeDurationSecond, TypeSignedDurationSecond: + ret.baseType = "integer" + ret.format = "seconds" + case TypeBool: + ret.baseType = "boolean" + case TypeMap: + ret.baseType = "object" + ret.format = "map" + case TypeKVPairs: + ret.baseType = "object" + ret.format = "kvpairs" + case TypeSlice: + ret.baseType = "array" + ret.items = "object" + case TypeStringSlice, TypeCommaStringSlice: + ret.baseType = "array" + ret.items = "string" + case TypeCommaIntSlice: + ret.baseType = "array" + ret.items = "integer" + case TypeTime: + ret.baseType = "string" + ret.format = "date-time" + case TypeFloat: + ret.baseType = "number" + ret.format = "float" + default: + log.L().Warn("error parsing field type", "type", t) + ret.format = "unknown" + } + + return ret +} + +// cleanString prepares s for inclusion in the output +func cleanString(s string) string { + // clean leading/trailing whitespace, and replace whitespace runs into a single space + s = strings.TrimSpace(s) + s = wsRe.ReplaceAllString(s, " ") + return s +} + +// splitFields partitions fields into path and body groups +// The input pattern is expected to have been run through expandPattern, +// with paths parameters denotes in {braces}. +func splitFields(allFields map[string]*FieldSchema, pattern string) (pathFields, bodyFields map[string]*FieldSchema) { + pathFields = make(map[string]*FieldSchema) + bodyFields = make(map[string]*FieldSchema) + + for _, match := range pathFieldsRe.FindAllStringSubmatch(pattern, -1) { + name := match[1] + pathFields[name] = allFields[name] + } + + for name, field := range allFields { + if _, ok := pathFields[name]; !ok { + if field.Query { + pathFields[name] = field + } else { + bodyFields[name] = field + } + } + } + + return pathFields, bodyFields +} + +// withoutOperationHints returns a copy of the given DisplayAttributes without +// OperationPrefix / OperationVerb / OperationSuffix since we don't need these +// fields in the final output. +func withoutOperationHints(in *DisplayAttributes) *DisplayAttributes { + if in == nil { + return nil + } + + copy := *in + + copy.OperationPrefix = "" + copy.OperationVerb = "" + copy.OperationSuffix = "" + + // return nil if all fields are empty to avoid empty JSON objects + if copy == (DisplayAttributes{}) { + return nil + } + + return © +} + +func hyphenatedToTitleCase(in string) string { + var b strings.Builder + + title := cases.Title(language.English, cases.NoLower) + + for _, word := range strings.Split(in, "-") { + b.WriteString(title.String(word)) + } + + return b.String() +} + +// cleanedResponse is identical to logical.Response but with nulls +// removed from from JSON encoding +type cleanedResponse struct { + Secret *logical.Secret `json:"secret,omitempty"` + Auth *logical.Auth `json:"auth,omitempty"` + Data map[string]interface{} `json:"data,omitempty"` + Redirect string `json:"redirect,omitempty"` + Warnings []string `json:"warnings,omitempty"` + WrapInfo *wrapping.ResponseWrapInfo `json:"wrap_info,omitempty"` + Headers map[string][]string `json:"headers,omitempty"` +} + +func cleanResponse(resp *logical.Response) *cleanedResponse { + return &cleanedResponse{ + Secret: resp.Secret, + Auth: resp.Auth, + Data: resp.Data, + Redirect: resp.Redirect, + Warnings: resp.Warnings, + WrapInfo: resp.WrapInfo, + Headers: resp.Headers, + } +} + +// CreateOperationIDs generates unique operationIds for all paths/methods. +// The transform will convert path/method into camelcase. e.g.: +// +// /sys/tools/random/{urlbytes} -> postSysToolsRandomUrlbytes +// +// In the unlikely case of a duplicate ids, a numeric suffix is added: +// +// postSysToolsRandomUrlbytes_2 +// +// An optional user-provided suffix ("context") may also be appended. +// +// Deprecated: operationID's are now populated using `constructOperationID`. +// This function is here for backwards compatibility with older plugins. +func (d *OASDocument) CreateOperationIDs(context string) { + opIDCount := make(map[string]int) + var paths []string + + // traverse paths in a stable order to ensure stable output + for path := range d.Paths { + paths = append(paths, path) + } + sort.Strings(paths) + + for _, path := range paths { + pi := d.Paths[path] + for _, method := range []string{"get", "post", "delete"} { + var oasOperation *OASOperation + switch method { + case "get": + oasOperation = pi.Get + case "post": + oasOperation = pi.Post + case "delete": + oasOperation = pi.Delete + } + + if oasOperation == nil { + continue + } + + if oasOperation.OperationID != "" { + continue + } + + // Discard "_mount_path" from any {thing_mount_path} parameters + path = strings.Replace(path, "_mount_path", "", 1) + + // Space-split on non-words, title case everything, recombine + opID := nonWordRe.ReplaceAllString(strings.ToLower(path), " ") + opID = strings.Title(opID) + opID = method + strings.ReplaceAll(opID, " ", "") + + // deduplicate operationIds. This is a safeguard, since generated IDs should + // already be unique given our current path naming conventions. + opIDCount[opID]++ + if opIDCount[opID] > 1 { + opID = fmt.Sprintf("%s_%d", opID, opIDCount[opID]) + } + + if context != "" { + opID += "_" + context + } + + oasOperation.OperationID = opID + } + } +} diff --git a/sdk/framework/openapi_test.go b/sdk/framework/openapi_test.go new file mode 100644 index 0000000..9e2763f --- /dev/null +++ b/sdk/framework/openapi_test.go @@ -0,0 +1,925 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package framework + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "path/filepath" + "reflect" + "regexp" + "sort" + "strings" + "testing" + + "github.com/go-test/deep" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/wrapping" + "github.com/hashicorp/vault/sdk/logical" +) + +func TestOpenAPI_Regex(t *testing.T) { + t.Run("Path fields", func(t *testing.T) { + input := `/foo/bar/{inner}/baz/{outer}` + + matches := pathFieldsRe.FindAllStringSubmatch(input, -1) + + exp1 := "inner" + exp2 := "outer" + if matches[0][1] != exp1 || matches[1][1] != exp2 { + t.Fatalf("Capture error. Expected %s and %s, got %v", exp1, exp2, matches) + } + + input = `/foo/bar/inner/baz/outer` + matches = pathFieldsRe.FindAllStringSubmatch(input, -1) + + if matches != nil { + t.Fatalf("Expected nil match (%s), got %+v", input, matches) + } + }) + t.Run("Filtering", func(t *testing.T) { + tests := []struct { + input string + regex *regexp.Regexp + output string + }{ + { + input: `abcde`, + regex: wsRe, + output: "abcde", + }, + { + input: ` a b cd e `, + regex: wsRe, + output: "abcde", + }, + } + + for _, test := range tests { + result := test.regex.ReplaceAllString(test.input, "") + if result != test.output { + t.Fatalf("Clean Regex error (%s). Expected %s, got %s", test.input, test.output, result) + } + } + }) +} + +func TestOpenAPI_ExpandPattern(t *testing.T) { + tests := []struct { + inPattern string + outPathlets []string + }{ + // A simple string without regexp metacharacters passes through as is + {"rekey/backup", []string{"rekey/backup"}}, + // A trailing regexp anchor metacharacter is removed + {"rekey/backup$", []string{"rekey/backup"}}, + // As is a leading one + {"^rekey/backup", []string{"rekey/backup"}}, + // Named capture groups become OpenAPI parameters + {"auth/(?P.+?)/tune$", []string{"auth/{path}/tune"}}, + {"auth/(?P.+?)/tune/(?P.*?)$", []string{"auth/{path}/tune/{more}"}}, + // Even if the capture group contains very complex regexp structure inside it + {"something/(?P(a|b(c|d))|e+|f{1,3}[ghi-k]?.*)", []string{"something/{something}"}}, + // A question-mark results in a result without and with the optional path part + {"tools/hash(/(?P.+))?", []string{ + "tools/hash", + "tools/hash/{urlalgorithm}", + }}, + // Multiple question-marks evaluate each possible combination + {"(leases/)?renew(/(?P.+))?", []string{ + "leases/renew", + "leases/renew/{url_lease_id}", + "renew", + "renew/{url_lease_id}", + }}, + // GenericNameRegex is one particular way of writing a named capture group, so behaves the same + {`config/ui/headers/` + GenericNameRegex("header"), []string{"config/ui/headers/{header}"}}, + // The question-mark behaviour is still works when the question-mark is directly applied to a named capture group + {`leases/lookup/(?P.+?)?`, []string{ + "leases/lookup/", + "leases/lookup/{prefix}", + }}, + // Optional trailing slashes at the end of the path get stripped - even if appearing deep inside an alternation + {`(raw/?$|raw/(?P.+))`, []string{ + "raw", + "raw/{path}", + }}, + // OptionalParamRegex is also another way of writing a named capture group, that is optional + {"lookup" + OptionalParamRegex("urltoken"), []string{ + "lookup", + "lookup/{urltoken}", + }}, + // Optional trailign slashes at the end of the path get stripped in simpler cases too + {"roles/?$", []string{ + "roles", + }}, + {"roles/?", []string{ + "roles", + }}, + // Non-optional trailing slashes remain... although don't do this, it breaks HelpOperation! + // (Existing real examples of this pattern being fixed via https://github.com/hashicorp/vault/pull/18571) + {"accessors/$", []string{ + "accessors/", + }}, + // GenericNameRegex and OptionalParamRegex still work when concatenated + {"verify/" + GenericNameRegex("name") + OptionalParamRegex("urlalgorithm"), []string{ + "verify/{name}", + "verify/{name}/{urlalgorithm}", + }}, + // Named capture groups that specify enum-like parameters work as expected + {"^plugins/catalog/(?Pauth|database|secret)/(?P.+)$", []string{ + "plugins/catalog/{type}/{name}", + }}, + {"^plugins/catalog/(?Pauth|database|secret)/?$", []string{ + "plugins/catalog/{type}", + }}, + // Alternations between various literal path segments work + {"(pathOne|pathTwo)/", []string{"pathOne/", "pathTwo/"}}, + {"(pathOne|pathTwo)/" + GenericNameRegex("name"), []string{"pathOne/{name}", "pathTwo/{name}"}}, + { + "(pathOne|path-2|Path_3)/" + GenericNameRegex("name"), + []string{"Path_3/{name}", "path-2/{name}", "pathOne/{name}"}, + }, + // They still work when combined with GenericNameWithAtRegex + {"(creds|sts)/" + GenericNameWithAtRegex("name"), []string{ + "creds/{name}", + "sts/{name}", + }}, + // And when they're somewhere other than the start of the pattern + {"keys/generate/(internal|exported|kms)", []string{ + "keys/generate/exported", + "keys/generate/internal", + "keys/generate/kms", + }}, + // If a plugin author makes their list operation support both singular and plural forms, the OpenAPI notices + {"rolesets?/?", []string{"roleset", "rolesets"}}, + // Complex nested alternation and question-marks are correctly interpreted + {"crl(/pem|/delta(/pem)?)?", []string{"crl", "crl/delta", "crl/delta/pem", "crl/pem"}}, + } + + for i, test := range tests { + out, err := expandPattern(test.inPattern) + if err != nil { + t.Fatal(err) + } + sort.Strings(out) + if !reflect.DeepEqual(out, test.outPathlets) { + t.Fatalf("Test %d: Expected %v got %v", i, test.outPathlets, out) + } + } +} + +func TestOpenAPI_ExpandPattern_ReturnsError(t *testing.T) { + tests := []struct { + inPattern string + outError error + }{ + // None of these regexp constructs are allowed outside of named capture groups + {"[a-z]", errUnsupportableRegexpOperationForOpenAPI}, + {".", errUnsupportableRegexpOperationForOpenAPI}, + {"a+", errUnsupportableRegexpOperationForOpenAPI}, + {"a*", errUnsupportableRegexpOperationForOpenAPI}, + // So this pattern, which is a combination of two of the above isn't either - this pattern occurs in the KV + // secrets engine for its catch-all error handler, which provides a helpful hint to people treating a KV v2 as + // a KV v1. + {".*", errUnsupportableRegexpOperationForOpenAPI}, + } + + for i, test := range tests { + _, err := expandPattern(test.inPattern) + if err != test.outError { + t.Fatalf("Test %d: Expected %q got %q", i, test.outError, err) + } + } +} + +func TestOpenAPI_SplitFields(t *testing.T) { + fields := map[string]*FieldSchema{ + "a": {Description: "path"}, + "b": {Description: "body"}, + "c": {Description: "body"}, + "d": {Description: "body"}, + "e": {Description: "path"}, + } + + pathFields, bodyFields := splitFields(fields, "some/{a}/path/{e}") + + lp := len(pathFields) + lb := len(bodyFields) + l := len(fields) + if lp+lb != l { + t.Fatalf("split length error: %d + %d != %d", lp, lb, l) + } + + for name, field := range pathFields { + if field.Description != "path" { + t.Fatalf("expected field %s to be in 'path', found in %s", name, field.Description) + } + } + for name, field := range bodyFields { + if field.Description != "body" { + t.Fatalf("expected field %s to be in 'body', found in %s", name, field.Description) + } + } +} + +func TestOpenAPI_SpecialPaths(t *testing.T) { + tests := map[string]struct { + pattern string + rootPaths []string + rootExpected bool + unauthenticatedPaths []string + unauthenticatedExpected bool + }{ + "empty": { + pattern: "foo", + rootPaths: []string{}, + rootExpected: false, + unauthenticatedPaths: []string{}, + unauthenticatedExpected: false, + }, + "exact-match-unauthenticated": { + pattern: "foo", + rootPaths: []string{}, + rootExpected: false, + unauthenticatedPaths: []string{"foo"}, + unauthenticatedExpected: true, + }, + "exact-match-root": { + pattern: "foo", + rootPaths: []string{"foo"}, + rootExpected: true, + unauthenticatedPaths: []string{"bar"}, + unauthenticatedExpected: false, + }, + "asterisk-match-unauthenticated": { + pattern: "foo/bar", + rootPaths: []string{"foo"}, + rootExpected: false, + unauthenticatedPaths: []string{"foo/*"}, + unauthenticatedExpected: true, + }, + "asterisk-match-root": { + pattern: "foo/bar", + rootPaths: []string{"foo/*"}, + rootExpected: true, + unauthenticatedPaths: []string{"foo"}, + unauthenticatedExpected: false, + }, + "path-ends-with-slash": { + pattern: "foo/", + rootPaths: []string{"foo/*"}, + rootExpected: true, + unauthenticatedPaths: []string{"a", "b", "foo*"}, + unauthenticatedExpected: true, + }, + "asterisk-match-no-slash": { + pattern: "foo", + rootPaths: []string{"foo*"}, + rootExpected: true, + unauthenticatedPaths: []string{"a", "fo*"}, + unauthenticatedExpected: true, + }, + "multiple-root-paths": { + pattern: "foo/bar", + rootPaths: []string{"a", "b", "foo/*"}, + rootExpected: true, + unauthenticatedPaths: []string{"foo/baz/*"}, + unauthenticatedExpected: false, + }, + "plus-match-unauthenticated": { + pattern: "foo/bar/baz", + rootPaths: []string{"foo/bar"}, + rootExpected: false, + unauthenticatedPaths: []string{"foo/+/baz"}, + unauthenticatedExpected: true, + }, + "plus-match-root": { + pattern: "foo/bar/baz", + rootPaths: []string{"foo/+/baz"}, + rootExpected: true, + unauthenticatedPaths: []string{"foo/bar"}, + unauthenticatedExpected: false, + }, + "plus-and-asterisk": { + pattern: "foo/bar/baz/something", + rootPaths: []string{"foo/+/baz/*"}, + rootExpected: true, + unauthenticatedPaths: []string{"foo/+/baz*"}, + unauthenticatedExpected: true, + }, + "double-plus-good": { + pattern: "foo/bar/baz", + rootPaths: []string{"foo/+/+"}, + rootExpected: true, + unauthenticatedPaths: []string{"foo/bar"}, + unauthenticatedExpected: false, + }, + } + for name, test := range tests { + t.Run(name, func(t *testing.T) { + doc := NewOASDocument("version") + path := Path{ + Pattern: test.pattern, + } + specialPaths := &logical.Paths{ + Root: test.rootPaths, + Unauthenticated: test.unauthenticatedPaths, + } + + if err := documentPath(&path, specialPaths, "kv", logical.TypeLogical, doc); err != nil { + t.Fatal(err) + } + + actual := doc.Paths["/"+test.pattern].Sudo + if actual != test.rootExpected { + t.Fatalf("Test (root): expected: %v; got: %v", test.rootExpected, actual) + } + + actual = doc.Paths["/"+test.pattern].Unauthenticated + if actual != test.unauthenticatedExpected { + t.Fatalf("Test (unauth): expected: %v; got: %v", test.unauthenticatedExpected, actual) + } + }) + } +} + +func TestOpenAPI_Paths(t *testing.T) { + origDepth := deep.MaxDepth + defer func() { deep.MaxDepth = origDepth }() + deep.MaxDepth = 20 + + t.Run("Legacy callbacks", func(t *testing.T) { + p := &Path{ + Pattern: "lookup/" + GenericNameRegex("id"), + + Fields: map[string]*FieldSchema{ + "id": { + Type: TypeString, + Description: "My id parameter", + }, + "token": { + Type: TypeString, + Description: "My token", + }, + }, + + Callbacks: map[logical.Operation]OperationFunc{ + logical.ReadOperation: nil, + logical.UpdateOperation: nil, + }, + + HelpSynopsis: "Synopsis", + HelpDescription: "Description", + } + + sp := &logical.Paths{ + Root: []string{}, + Unauthenticated: []string{}, + } + testPath(t, p, sp, expected("legacy")) + }) + + t.Run("Operations - All Operations", func(t *testing.T) { + p := &Path{ + Pattern: "foo/" + GenericNameRegex("id"), + Fields: map[string]*FieldSchema{ + "id": { + Type: TypeString, + Description: "id path parameter", + }, + "flavors": { + Type: TypeCommaStringSlice, + Description: "the flavors", + }, + "name": { + Type: TypeNameString, + Default: "Larry", + Description: "the name", + }, + "age": { + Type: TypeInt, + Description: "the age", + AllowedValues: []interface{}{1, 2, 3}, + Required: true, + DisplayAttrs: &DisplayAttributes{ + Name: "Age", + Sensitive: true, + Group: "Some Group", + Value: 7, + }, + }, + "x-abc-token": { + Type: TypeHeader, + Description: "a header value", + AllowedValues: []interface{}{"a", "b", "c"}, + }, + "maximum": { + Type: TypeInt64, + Description: "a maximum value", + }, + "format": { + Type: TypeString, + Description: "a query param", + Query: true, + }, + }, + HelpSynopsis: "Synopsis", + HelpDescription: "Description", + Operations: map[logical.Operation]OperationHandler{ + logical.ReadOperation: &PathOperation{ + Summary: "My Summary", + Description: "My Description", + }, + logical.UpdateOperation: &PathOperation{ + Summary: "Update Summary", + Description: "Update Description", + }, + logical.CreateOperation: &PathOperation{ + Summary: "Create Summary", + Description: "Create Description", + }, + logical.ListOperation: &PathOperation{ + Summary: "List Summary", + Description: "List Description", + }, + logical.DeleteOperation: &PathOperation{ + Summary: "This shouldn't show up", + Unpublished: true, + }, + }, + DisplayAttrs: &DisplayAttributes{ + Navigation: true, + }, + } + + sp := &logical.Paths{ + Root: []string{"foo*"}, + } + testPath(t, p, sp, expected("operations")) + }) + + t.Run("Operations - List Only", func(t *testing.T) { + p := &Path{ + Pattern: "foo/" + GenericNameRegex("id"), + Fields: map[string]*FieldSchema{ + "id": { + Type: TypeString, + Description: "id path parameter", + }, + "flavors": { + Type: TypeCommaStringSlice, + Description: "the flavors", + }, + "name": { + Type: TypeNameString, + Default: "Larry", + Description: "the name", + }, + "age": { + Type: TypeInt, + Description: "the age", + AllowedValues: []interface{}{1, 2, 3}, + Required: true, + DisplayAttrs: &DisplayAttributes{ + Name: "Age", + Sensitive: true, + Group: "Some Group", + Value: 7, + }, + }, + "x-abc-token": { + Type: TypeHeader, + Description: "a header value", + AllowedValues: []interface{}{"a", "b", "c"}, + }, + "format": { + Type: TypeString, + Description: "a query param", + Query: true, + }, + }, + HelpSynopsis: "Synopsis", + HelpDescription: "Description", + Operations: map[logical.Operation]OperationHandler{ + logical.ListOperation: &PathOperation{ + Summary: "List Summary", + Description: "List Description", + }, + }, + DisplayAttrs: &DisplayAttributes{ + Navigation: true, + }, + } + + sp := &logical.Paths{ + Root: []string{"foo*"}, + } + testPath(t, p, sp, expected("operations_list")) + }) + + t.Run("Responses", func(t *testing.T) { + p := &Path{ + Pattern: "foo", + HelpSynopsis: "Synopsis", + HelpDescription: "Description", + Operations: map[logical.Operation]OperationHandler{ + logical.ReadOperation: &PathOperation{ + Summary: "My Summary", + Description: "My Description", + Responses: map[int][]Response{ + 202: {{ + Description: "Amazing", + Example: &logical.Response{ + Data: map[string]interface{}{ + "amount": 42, + }, + }, + Fields: map[string]*FieldSchema{ + "field_a": { + Type: TypeString, + Description: "field_a description", + }, + "field_b": { + Type: TypeBool, + Description: "field_b description", + }, + }, + }}, + }, + }, + logical.DeleteOperation: &PathOperation{ + Summary: "Delete stuff", + }, + }, + } + + sp := &logical.Paths{ + Unauthenticated: []string{"x", "y", "foo"}, + } + + testPath(t, p, sp, expected("responses")) + }) +} + +func TestOpenAPI_CustomDecoder(t *testing.T) { + p := &Path{ + Pattern: "foo", + HelpSynopsis: "Synopsis", + Operations: map[logical.Operation]OperationHandler{ + logical.ReadOperation: &PathOperation{ + Summary: "My Summary", + Responses: map[int][]Response{ + 100: {{ + Description: "OK", + Example: &logical.Response{ + Data: map[string]interface{}{ + "foo": 42, + }, + }, + }}, + 200: {{ + Description: "Good", + Example: (*logical.Response)(nil), + }}, + 599: {{ + Description: "Bad", + }}, + }, + }, + }, + } + + docOrig := NewOASDocument("version") + err := documentPath(p, nil, "kv", logical.TypeLogical, docOrig) + if err != nil { + t.Fatal(err) + } + + docJSON := mustJSONMarshal(t, docOrig) + + var intermediate map[string]interface{} + if err := jsonutil.DecodeJSON(docJSON, &intermediate); err != nil { + t.Fatal(err) + } + + docNew, err := NewOASDocumentFromMap(intermediate) + if err != nil { + t.Fatal(err) + } + + docNewJSON := mustJSONMarshal(t, docNew) + + if diff := deep.Equal(docJSON, docNewJSON); diff != nil { + t.Fatal(diff) + } +} + +func TestOpenAPI_CleanResponse(t *testing.T) { + // Verify that an all-null input results in empty JSON + orig := &logical.Response{} + + cr := cleanResponse(orig) + + newJSON := mustJSONMarshal(t, cr) + + if !bytes.Equal(newJSON, []byte("{}")) { + t.Fatalf("expected {}, got: %q", newJSON) + } + + // Verify that all non-null inputs results in JSON that matches the marshalling of + // logical.Response. This will fail if logical.Response changes without a corresponding + // change to cleanResponse() + orig = &logical.Response{ + Secret: new(logical.Secret), + Auth: new(logical.Auth), + Data: map[string]interface{}{"foo": 42}, + Redirect: "foo", + Warnings: []string{"foo"}, + WrapInfo: &wrapping.ResponseWrapInfo{Token: "foo"}, + Headers: map[string][]string{"foo": {"bar"}}, + } + origJSON := mustJSONMarshal(t, orig) + + cr = cleanResponse(orig) + + cleanJSON := mustJSONMarshal(t, cr) + + if diff := deep.Equal(origJSON, cleanJSON); diff != nil { + t.Fatal(diff) + } +} + +func TestOpenAPI_constructOperationID(t *testing.T) { + tests := map[string]struct { + path string + pathIndex int + pathAttributes *DisplayAttributes + operation logical.Operation + operationAttributes *DisplayAttributes + defaultPrefix string + expected string + }{ + "empty": { + path: "", + pathIndex: 0, + pathAttributes: nil, + operation: logical.Operation(""), + operationAttributes: nil, + defaultPrefix: "", + expected: "", + }, + "simple-read": { + path: "path/to/thing", + pathIndex: 0, + pathAttributes: nil, + operation: logical.ReadOperation, + operationAttributes: nil, + defaultPrefix: "test", + expected: "test-read-path-to-thing", + }, + "simple-write": { + path: "path/to/thing", + pathIndex: 0, + pathAttributes: nil, + operation: logical.UpdateOperation, + operationAttributes: nil, + defaultPrefix: "test", + expected: "test-write-path-to-thing", + }, + "operation-verb": { + path: "path/to/thing", + pathIndex: 0, + pathAttributes: &DisplayAttributes{OperationVerb: "do-something"}, + operation: logical.UpdateOperation, + operationAttributes: nil, + defaultPrefix: "test", + expected: "do-something", + }, + "operation-verb-override": { + path: "path/to/thing", + pathIndex: 0, + pathAttributes: &DisplayAttributes{OperationVerb: "do-something"}, + operation: logical.UpdateOperation, + operationAttributes: &DisplayAttributes{OperationVerb: "do-something-else"}, + defaultPrefix: "test", + expected: "do-something-else", + }, + "operation-prefix": { + path: "path/to/thing", + pathIndex: 0, + pathAttributes: &DisplayAttributes{OperationPrefix: "my-prefix"}, + operation: logical.UpdateOperation, + operationAttributes: nil, + defaultPrefix: "test", + expected: "my-prefix-write-path-to-thing", + }, + "operation-prefix-override": { + path: "path/to/thing", + pathIndex: 0, + pathAttributes: &DisplayAttributes{OperationPrefix: "my-prefix"}, + operation: logical.UpdateOperation, + operationAttributes: &DisplayAttributes{OperationPrefix: "better-prefix"}, + defaultPrefix: "test", + expected: "better-prefix-write-path-to-thing", + }, + "operation-prefix-and-suffix": { + path: "path/to/thing", + pathIndex: 0, + pathAttributes: &DisplayAttributes{OperationPrefix: "my-prefix", OperationSuffix: "my-suffix"}, + operation: logical.UpdateOperation, + operationAttributes: nil, + defaultPrefix: "test", + expected: "my-prefix-write-my-suffix", + }, + "operation-prefix-and-suffix-override": { + path: "path/to/thing", + pathIndex: 0, + pathAttributes: &DisplayAttributes{OperationPrefix: "my-prefix", OperationSuffix: "my-suffix"}, + operation: logical.UpdateOperation, + operationAttributes: &DisplayAttributes{OperationPrefix: "better-prefix", OperationSuffix: "better-suffix"}, + defaultPrefix: "test", + expected: "better-prefix-write-better-suffix", + }, + "operation-prefix-verb-suffix": { + path: "path/to/thing", + pathIndex: 0, + pathAttributes: &DisplayAttributes{OperationPrefix: "my-prefix", OperationSuffix: "my-suffix", OperationVerb: "Create"}, + operation: logical.UpdateOperation, + operationAttributes: &DisplayAttributes{OperationPrefix: "better-prefix", OperationSuffix: "better-suffix"}, + defaultPrefix: "test", + expected: "better-prefix-create-better-suffix", + }, + "operation-prefix-verb-suffix-override": { + path: "path/to/thing", + pathIndex: 0, + pathAttributes: &DisplayAttributes{OperationPrefix: "my-prefix", OperationSuffix: "my-suffix", OperationVerb: "Create"}, + operation: logical.UpdateOperation, + operationAttributes: &DisplayAttributes{OperationPrefix: "better-prefix", OperationSuffix: "better-suffix", OperationVerb: "Login"}, + defaultPrefix: "test", + expected: "better-prefix-login-better-suffix", + }, + "operation-prefix-verb": { + path: "path/to/thing", + pathIndex: 0, + pathAttributes: nil, + operation: logical.UpdateOperation, + operationAttributes: &DisplayAttributes{OperationPrefix: "better-prefix", OperationVerb: "Login"}, + defaultPrefix: "test", + expected: "better-prefix-login", + }, + "operation-verb-suffix": { + path: "path/to/thing", + pathIndex: 0, + pathAttributes: nil, + operation: logical.UpdateOperation, + operationAttributes: &DisplayAttributes{OperationVerb: "Login", OperationSuffix: "better-suffix"}, + defaultPrefix: "test", + expected: "login-better-suffix", + }, + "pipe-delimited-suffix-0": { + path: "path/to/thing", + pathIndex: 0, + pathAttributes: nil, + operation: logical.UpdateOperation, + operationAttributes: &DisplayAttributes{OperationPrefix: "better-prefix", OperationSuffix: "suffix0|suffix1"}, + defaultPrefix: "test", + expected: "better-prefix-write-suffix0", + }, + "pipe-delimited-suffix-1": { + path: "path/to/thing", + pathIndex: 1, + pathAttributes: nil, + operation: logical.UpdateOperation, + operationAttributes: &DisplayAttributes{OperationPrefix: "better-prefix", OperationSuffix: "suffix0|suffix1"}, + defaultPrefix: "test", + expected: "better-prefix-write-suffix1", + }, + "pipe-delimited-suffix-2-fallback": { + path: "path/to/thing", + pathIndex: 2, + pathAttributes: nil, + operation: logical.UpdateOperation, + operationAttributes: &DisplayAttributes{OperationPrefix: "better-prefix", OperationSuffix: "suffix0|suffix1"}, + defaultPrefix: "test", + expected: "better-prefix-write-path-to-thing", + }, + } + + for name, test := range tests { + name, test := name, test + t.Run(name, func(t *testing.T) { + t.Parallel() + actual := constructOperationID( + test.path, + test.pathIndex, + test.pathAttributes, + test.operation, + test.operationAttributes, + test.defaultPrefix, + ) + if actual != test.expected { + t.Fatalf("expected: %s; got: %s", test.expected, actual) + } + }) + } +} + +func TestOpenAPI_hyphenatedToTitleCase(t *testing.T) { + tests := map[string]struct { + in string + expected string + }{ + "simple": { + in: "test", + expected: "Test", + }, + "two-words": { + in: "two-words", + expected: "TwoWords", + }, + "three-words": { + in: "one-two-three", + expected: "OneTwoThree", + }, + "not-hyphenated": { + in: "something_like_this", + expected: "Something_like_this", + }, + } + + for name, test := range tests { + name, test := name, test + t.Run(name, func(t *testing.T) { + t.Parallel() + actual := hyphenatedToTitleCase(test.in) + if actual != test.expected { + t.Fatalf("expected: %s; got: %s", test.expected, actual) + } + }) + } +} + +func testPath(t *testing.T, path *Path, sp *logical.Paths, expectedJSON string) { + t.Helper() + + doc := NewOASDocument("dummyversion") + if err := documentPath(path, sp, "kv", logical.TypeLogical, doc); err != nil { + t.Fatal(err) + } + doc.CreateOperationIDs("") + + docJSON, err := json.MarshalIndent(doc, "", " ") + if err != nil { + t.Fatal(err) + } + + // Compare json by first decoding, then comparing with a deep equality check. + var expected, actual interface{} + if err := jsonutil.DecodeJSON(docJSON, &actual); err != nil { + t.Fatal(err) + } + + if err := jsonutil.DecodeJSON([]byte(expectedJSON), &expected); err != nil { + t.Fatal(err) + } + + if diff := deep.Equal(actual, expected); diff != nil { + // fmt.Println(string(docJSON)) // uncomment to debug generated JSON (very helpful when fixing tests) + t.Fatal(diff) + } +} + +func getPathOp(pi *OASPathItem, op string) *OASOperation { + switch op { + case "get": + return pi.Get + case "post": + return pi.Post + case "delete": + return pi.Delete + default: + panic("unexpected operation: " + op) + } +} + +func expected(name string) string { + data, err := ioutil.ReadFile(filepath.Join("testdata", name+".json")) + if err != nil { + panic(err) + } + + content := strings.Replace(string(data), "", "dummyversion", 1) + + return content +} + +func mustJSONMarshal(t *testing.T, data interface{}) []byte { + j, err := json.MarshalIndent(data, "", " ") + if err != nil { + t.Fatal(err) + } + return j +} diff --git a/sdk/framework/path.go b/sdk/framework/path.go new file mode 100644 index 0000000..ccee0fc --- /dev/null +++ b/sdk/framework/path.go @@ -0,0 +1,430 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package framework + +import ( + "context" + "fmt" + "sort" + "strings" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/helper/license" + "github.com/hashicorp/vault/sdk/logical" +) + +// Helper which returns a generic regex string for creating endpoint patterns +// that are identified by the given name in the backends +func GenericNameRegex(name string) string { + return fmt.Sprintf("(?P<%s>\\w(([\\w-.]+)?\\w)?)", name) +} + +// GenericNameWithAtRegex returns a generic regex that allows alphanumeric +// characters along with -, . and @. +func GenericNameWithAtRegex(name string) string { + return fmt.Sprintf("(?P<%s>\\w(([\\w-.@]+)?\\w)?)", name) +} + +// Helper which returns a regex string for optionally accepting the a field +// from the API URL +func OptionalParamRegex(name string) string { + return fmt.Sprintf("(/(?P<%s>.+))?", name) +} + +// Helper which returns a regex string for capturing an entire endpoint path +// as the given name. +func MatchAllRegex(name string) string { + return fmt.Sprintf(`(?P<%s>.*)`, name) +} + +// PathAppend is a helper for appending lists of paths into a single +// list. +func PathAppend(paths ...[]*Path) []*Path { + result := make([]*Path, 0, 10) + for _, ps := range paths { + result = append(result, ps...) + } + + return result +} + +// Path is a single path that the backend responds to. +type Path struct { + // Pattern is the pattern of the URL that matches this path. + // + // This should be a valid regular expression. Named captures will be + // exposed as fields that should map to a schema in Fields. If a named + // capture is not a field in the Fields map, then it will be ignored. + // + // The pattern will automatically have a ^ prepended and a $ appended before + // use, if these are not already present, so these may be omitted for clarity. + // + // If a ListOperation is being defined, the pattern must end with /? to match + // a trailing slash optionally, as ListOperations are always processed with a + // trailing slash added to the path if not already present. The match must not + // require the presence of a trailing slash, as HelpOperations, even for a + // path which only implements ListOperation, are processed without a trailing + // slash - so failure to make the trailing slash optional will break the + // `vault path-help` command for the path. + Pattern string + + // Fields is the mapping of data fields to a schema describing that + // field. + // + // Field values are obtained from: + // + // - Named captures in the Pattern. + // + // - Parameters in the HTTP request body, for HTTP methods where a + // request body is expected, i.e. PUT/POST/PATCH. The request body is + // typically formatted as JSON, though + // "application/x-www-form-urlencoded" format can also be accepted. + // + // - Parameters in the HTTP URL query-string, for HTTP methods where + // there is no request body, i.e. GET/LIST/DELETE. The query-string + // is *not* parsed at all for PUT/POST/PATCH requests. + // + // Should the same field be specified both as a named capture and as + // a parameter, the named capture takes precedence, and a warning is + // returned. + Fields map[string]*FieldSchema + + // Operations is the set of operations supported and the associated OperationsHandler. + // + // If both Create and Update operations are present, documentation and examples from + // the Update definition will be used. Similarly if both Read and List are present, + // Read will be used for documentation. + Operations map[logical.Operation]OperationHandler + + // Callbacks are the set of callbacks that are called for a given + // operation. If a callback for a specific operation is not present, + // then logical.ErrUnsupportedOperation is automatically generated. + // + // The help operation is the only operation that the Path will + // automatically handle if the Help field is set. If both the Help + // field is set and there is a callback registered here, then the + // callback will be called. + // + // Deprecated: Operations should be used instead and will take priority if present. + Callbacks map[logical.Operation]OperationFunc + + // ExistenceCheck, if implemented, is used to query whether a given + // resource exists or not. This is used for ACL purposes: if an Update + // action is specified, and the existence check returns false, the action + // is not allowed since the resource must first be created. The reverse is + // also true. If not specified, the Update action is forced and the user + // must have UpdateCapability on the path. + ExistenceCheck ExistenceFunc + + // FeatureRequired, if implemented, will validate if the given feature is + // enabled for the set of paths + FeatureRequired license.Features + + // Deprecated denotes that this path is considered deprecated. This may + // be reflected in help and documentation. + Deprecated bool + + // Help is text describing how to use this path. This will be used + // to auto-generate the help operation. The Path will automatically + // generate a parameter listing and URL structure based on the + // regular expression, so the help text should just contain a description + // of what happens. + // + // HelpSynopsis is a one-sentence description of the path. This will + // be automatically line-wrapped at 80 characters. + // + // HelpDescription is a long-form description of the path. This will + // be automatically line-wrapped at 80 characters. + HelpSynopsis string + HelpDescription string + + // DisplayAttrs provides hints for UI and documentation generators. They + // will be included in OpenAPI output if set. + DisplayAttrs *DisplayAttributes + + // TakesArbitraryInput is used for endpoints that take arbitrary input, instead + // of or as well as their Fields. This is taken into account when printing + // warnings about ignored fields. If this is set, we will not warn when data is + // provided that is not part of the Fields declaration. + TakesArbitraryInput bool +} + +// OperationHandler defines and describes a specific operation handler. +type OperationHandler interface { + Handler() OperationFunc + Properties() OperationProperties +} + +// OperationProperties describes an operation for documentation, help text, +// and other clients. A Summary should always be provided, whereas other +// fields can be populated as needed. +type OperationProperties struct { + // Summary is a brief (usually one line) description of the operation. + Summary string + + // Description is extended documentation of the operation and may contain + // Markdown-formatted text markup. + Description string + + // Examples provides samples of the expected request data. The most + // relevant example should be first in the list, as it will be shown in + // documentation that supports only a single example. + Examples []RequestExample + + // Responses provides a list of response description for a given response + // code. The most relevant response should be first in the list, as it will + // be shown in documentation that only allows a single example. + Responses map[int][]Response + + // Unpublished indicates that this operation should not appear in public + // documentation or help text. The operation may still have documentation + // attached that can be used internally. + Unpublished bool + + // Deprecated indicates that this operation should be avoided. + Deprecated bool + + // The ForwardPerformance* parameters tell the router to unconditionally forward requests + // to this path if the processing node is a performance secondary/standby. This is generally + // *not* needed as there is already handling in place to automatically forward requests + // that try to write to storage. But there are a few cases where explicit forwarding is needed, + // for example: + // + // * The handler makes requests to other systems (e.g. an external API, database, ...) that + // change external state somehow, and subsequently writes to storage. In this case the + // default forwarding logic could result in multiple mutative calls to the external system. + // + // * The operation spans multiple requests (e.g. an OIDC callback), in-memory caching used, + // and the same node (and therefore cache) should process both steps. + // + // If explicit forwarding is needed, it is usually true that forwarding from both performance + // standbys and performance secondaries should be enabled. + // + // ForwardPerformanceStandby indicates that this path should not be processed + // on a performance standby node, and should be forwarded to the active node instead. + ForwardPerformanceStandby bool + + // ForwardPerformanceSecondary indicates that this path should not be processed + // on a performance secondary node, and should be forwarded to the active node instead. + ForwardPerformanceSecondary bool + + // DisplayAttrs provides hints for UI and documentation generators. They + // will be included in OpenAPI output if set. + DisplayAttrs *DisplayAttributes +} + +type DisplayAttributes struct { + // Name is the name of the field suitable as a label or documentation heading. + Name string `json:"name,omitempty"` + + // Description of the field that renders as tooltip help text beside the label (name) in the UI. + // This may be used to replace descriptions that reference comma separation but correspond + // to UI inputs where only arrays are valid. For example params with Type: framework.TypeCommaStringSlice + Description string `json:"description,omitempty"` + + // Value is a sample value to display for this field. This may be used + // to indicate a default value, but it is for display only and completely separate + // from any Default member handling. + Value interface{} `json:"value,omitempty"` + + // Sensitive indicates that the value should be masked by default in the UI. + Sensitive bool `json:"sensitive,omitempty"` + + // Navigation indicates that the path should be available as a navigation tab + Navigation bool `json:"navigation,omitempty"` + + // ItemType is the type of item this path operates on + ItemType string `json:"itemType,omitempty"` + + // Group is the suggested UI group to place this field in. + Group string `json:"group,omitempty"` + + // Action is the verb to use for the operation. + Action string `json:"action,omitempty"` + + // OperationPrefix is a hyphenated lower-case string used to construct + // OpenAPI OperationID (prefix + verb + suffix). OperationPrefix is + // typically a human-readable name of the plugin or a prefix shared by + // multiple related endpoints. + OperationPrefix string `json:"operationPrefix,omitempty"` + + // OperationVerb is a hyphenated lower-case string used to construct + // OpenAPI OperationID (prefix + verb + suffix). OperationVerb is typically + // an action to be performed (e.g. "generate", "sign", "login", etc.). If + // not specified, the verb defaults to `logical.Operation.String()` + // (e.g. "read", "list", "delete", "write" for Create/Update) + OperationVerb string `json:"operationVerb,omitempty"` + + // OperationSuffix is a hyphenated lower-case string used to construct + // OpenAPI OperationID (prefix + verb + suffix). It is typically the name + // of the resource on which the action is performed (e.g. "role", + // "credentials", etc.). A pipe (|) separator can be used to list different + // suffixes for various permutations of the `Path.Pattern` regular + // expression. If not specified, the suffix defaults to the `Path.Pattern` + // split by dashes. + OperationSuffix string `json:"operationSuffix,omitempty"` + + // EditType is the optional type of form field needed for a property + // This is only necessary for a "textarea" or "file" + EditType string `json:"editType,omitempty"` +} + +// RequestExample is example of request data. +type RequestExample struct { + Description string // optional description of the request + Data map[string]interface{} // map version of sample JSON request data + + // Optional example response to the sample request. This approach is considered + // provisional for now, and this field may be changed or removed. + Response *Response +} + +// Response describes and optional demonstrations an operation response. +type Response struct { + Description string // summary of the the response and should always be provided + MediaType string // media type of the response, defaulting to "application/json" if empty + Fields map[string]*FieldSchema // the fields present in this response, used to generate openapi response + Example *logical.Response // example response data +} + +// PathOperation is a concrete implementation of OperationHandler. +type PathOperation struct { + Callback OperationFunc + Summary string + Description string + Examples []RequestExample + Responses map[int][]Response + Unpublished bool + Deprecated bool + ForwardPerformanceSecondary bool + ForwardPerformanceStandby bool + DisplayAttrs *DisplayAttributes +} + +func (p *PathOperation) Handler() OperationFunc { + return p.Callback +} + +func (p *PathOperation) Properties() OperationProperties { + return OperationProperties{ + Summary: strings.TrimSpace(p.Summary), + Description: strings.TrimSpace(p.Description), + Responses: p.Responses, + Examples: p.Examples, + Unpublished: p.Unpublished, + Deprecated: p.Deprecated, + ForwardPerformanceSecondary: p.ForwardPerformanceSecondary, + ForwardPerformanceStandby: p.ForwardPerformanceStandby, + DisplayAttrs: p.DisplayAttrs, + } +} + +func (p *Path) helpCallback(b *Backend) OperationFunc { + return func(ctx context.Context, req *logical.Request, data *FieldData) (*logical.Response, error) { + var tplData pathTemplateData + tplData.Request = req.Path + tplData.RoutePattern = p.Pattern + tplData.Synopsis = strings.TrimSpace(p.HelpSynopsis) + if tplData.Synopsis == "" { + tplData.Synopsis = "" + } + tplData.Description = strings.TrimSpace(p.HelpDescription) + if tplData.Description == "" { + tplData.Description = "" + } + + // Alphabetize the fields + fieldKeys := make([]string, 0, len(p.Fields)) + for k := range p.Fields { + fieldKeys = append(fieldKeys, k) + } + sort.Strings(fieldKeys) + + // Build the field help + tplData.Fields = make([]pathTemplateFieldData, len(fieldKeys)) + for i, k := range fieldKeys { + schema := p.Fields[k] + description := strings.TrimSpace(schema.Description) + if description == "" { + description = "" + } + + tplData.Fields[i] = pathTemplateFieldData{ + Key: k, + Type: schema.Type.String(), + Description: description, + Deprecated: schema.Deprecated, + } + } + + help, err := executeTemplate(pathHelpTemplate, &tplData) + if err != nil { + return nil, errwrap.Wrapf("error executing template: {{err}}", err) + } + + // The plugin type (e.g. "kv", "cubbyhole") is only assigned at the time + // the plugin is enabled (mounted). If specified in the request, the type + // will be used as part of the request/response names in the OAS document + var requestResponsePrefix string + if v, ok := req.Data["requestResponsePrefix"]; ok { + requestResponsePrefix = v.(string) + } + + // Build OpenAPI response for this path + vaultVersion := "unknown" + if b.System() != nil { + // b.System() should always be non-nil, except tests might create a + // Backend without one. + env, err := b.System().PluginEnv(context.Background()) + if err != nil { + return nil, err + } + if env != nil { + vaultVersion = env.VaultVersion + } + } + doc := NewOASDocument(vaultVersion) + if err := documentPath(p, b.SpecialPaths(), requestResponsePrefix, b.BackendType, doc); err != nil { + b.Logger().Warn("error generating OpenAPI", "error", err) + } + + return logical.HelpResponse(help, nil, doc), nil + } +} + +type pathTemplateData struct { + Request string + RoutePattern string + Synopsis string + Description string + Fields []pathTemplateFieldData +} + +type pathTemplateFieldData struct { + Key string + Type string + Deprecated bool + Description string + URL bool +} + +const pathHelpTemplate = ` +Request: {{.Request}} +Matching Route: {{.RoutePattern}} + +{{.Synopsis}} + +{{ if .Fields -}} +## PARAMETERS +{{range .Fields}} +{{indent 4 .Key}} ({{.Type}}) +{{if .Deprecated}} +{{printf "(DEPRECATED) %s" .Description | indent 8}} +{{else}} +{{indent 8 .Description}} +{{end}}{{end}}{{end}} +## DESCRIPTION + +{{.Description}} +` diff --git a/sdk/framework/path_map.go b/sdk/framework/path_map.go new file mode 100644 index 0000000..46cf472 --- /dev/null +++ b/sdk/framework/path_map.go @@ -0,0 +1,289 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package framework + +import ( + "context" + "fmt" + "strings" + "sync" + + saltpkg "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/logical" +) + +// DEPRECATED: Don't use this. It's too inflexible, nearly impossible to use +// with some modern Vault features, and imposes specific API designs. +// +// PathMap can be used to generate a path that stores mappings in the +// storage. It is a structure that also exports functions for querying the +// mappings. +// +// The primary use case for this is for credential providers to do their +// mapping to policies. +type PathMap struct { + Prefix string + Name string + Schema map[string]*FieldSchema + CaseSensitive bool + Salt *saltpkg.Salt + SaltFunc func(context.Context) (*saltpkg.Salt, error) + + once sync.Once +} + +func (p *PathMap) init() { + if p.Prefix == "" { + p.Prefix = "map" + } + + if p.Schema == nil { + p.Schema = map[string]*FieldSchema{ + "value": { + Type: TypeString, + Description: fmt.Sprintf("Value for %s mapping", p.Name), + }, + } + } +} + +// pathStruct returns the pathStruct for this mapping +func (p *PathMap) pathStruct(ctx context.Context, s logical.Storage, k string) (*PathStruct, error) { + p.once.Do(p.init) + + // If we don't care about casing, store everything lowercase + if !p.CaseSensitive { + k = strings.ToLower(k) + } + + // The original key before any salting + origKey := k + + // If we have a salt, apply it before lookup + salt := p.Salt + var err error + if p.SaltFunc != nil { + salt, err = p.SaltFunc(ctx) + if err != nil { + return nil, err + } + } + if salt != nil { + k = "s" + salt.SaltIDHashFunc(k, saltpkg.SHA256Hash) + } + + finalName := fmt.Sprintf("map/%s/%s", p.Name, k) + ps := &PathStruct{ + Name: finalName, + Schema: p.Schema, + } + + if !strings.HasPrefix(origKey, "s") && k != origKey { + // Ensure that no matter what happens what is returned is the final + // path + defer func() { + ps.Name = finalName + }() + + // + // Check for unsalted version and upgrade if so + // + + // Generate the unsalted name + unsaltedName := fmt.Sprintf("map/%s/%s", p.Name, origKey) + // Set the path struct to use the unsalted name + ps.Name = unsaltedName + + val, err := ps.Get(ctx, s) + if err != nil { + return nil, err + } + // If not nil, we have an unsalted entry -- upgrade it + if val != nil { + // Set the path struct to use the desired final name + ps.Name = finalName + err = ps.Put(ctx, s, val) + if err != nil { + return nil, err + } + // Set it back to the old path and delete + ps.Name = unsaltedName + err = ps.Delete(ctx, s) + if err != nil { + return nil, err + } + // We'll set this in the deferred function but doesn't hurt here + ps.Name = finalName + } + + // + // Check for SHA1 hashed version and upgrade if so + // + + // Generate the SHA1 hash suffixed path name + sha1SuffixedName := fmt.Sprintf("map/%s/%s", p.Name, salt.SaltID(origKey)) + + // Set the path struct to use the SHA1 hash suffixed path name + ps.Name = sha1SuffixedName + + val, err = ps.Get(ctx, s) + if err != nil { + return nil, err + } + // If not nil, we have an SHA1 hash suffixed entry -- upgrade it + if val != nil { + // Set the path struct to use the desired final name + ps.Name = finalName + err = ps.Put(ctx, s, val) + if err != nil { + return nil, err + } + // Set it back to the old path and delete + ps.Name = sha1SuffixedName + err = ps.Delete(ctx, s) + if err != nil { + return nil, err + } + // We'll set this in the deferred function but doesn't hurt here + ps.Name = finalName + } + } + + return ps, nil +} + +// Get reads a value out of the mapping +func (p *PathMap) Get(ctx context.Context, s logical.Storage, k string) (map[string]interface{}, error) { + ps, err := p.pathStruct(ctx, s, k) + if err != nil { + return nil, err + } + return ps.Get(ctx, s) +} + +// Put writes a value into the mapping +func (p *PathMap) Put(ctx context.Context, s logical.Storage, k string, v map[string]interface{}) error { + ps, err := p.pathStruct(ctx, s, k) + if err != nil { + return err + } + return ps.Put(ctx, s, v) +} + +// Delete removes a value from the mapping +func (p *PathMap) Delete(ctx context.Context, s logical.Storage, k string) error { + ps, err := p.pathStruct(ctx, s, k) + if err != nil { + return err + } + return ps.Delete(ctx, s) +} + +// List reads the keys under a given path +func (p *PathMap) List(ctx context.Context, s logical.Storage, prefix string) ([]string, error) { + stripPrefix := fmt.Sprintf("struct/map/%s/", p.Name) + fullPrefix := fmt.Sprintf("%s%s", stripPrefix, prefix) + out, err := s.List(ctx, fullPrefix) + if err != nil { + return nil, err + } + stripped := make([]string, len(out)) + for idx, k := range out { + stripped[idx] = strings.TrimPrefix(k, stripPrefix) + } + return stripped, nil +} + +// Paths are the paths to append to the Backend paths. +func (p *PathMap) Paths() []*Path { + p.once.Do(p.init) + + // Build the schema by simply adding the "key" + schema := make(map[string]*FieldSchema) + for k, v := range p.Schema { + schema[k] = v + } + schema["key"] = &FieldSchema{ + Type: TypeString, + Description: fmt.Sprintf("Key for the %s mapping", p.Name), + } + + return []*Path{ + { + Pattern: fmt.Sprintf("%s/%s/?$", p.Prefix, p.Name), + + Callbacks: map[logical.Operation]OperationFunc{ + logical.ListOperation: p.pathList(), + logical.ReadOperation: p.pathList(), + }, + + HelpSynopsis: fmt.Sprintf("Read mappings for %s", p.Name), + }, + + { + Pattern: fmt.Sprintf(`%s/%s/(?P[-\w]+)`, p.Prefix, p.Name), + + Fields: schema, + + Callbacks: map[logical.Operation]OperationFunc{ + logical.CreateOperation: p.pathSingleWrite(), + logical.ReadOperation: p.pathSingleRead(), + logical.UpdateOperation: p.pathSingleWrite(), + logical.DeleteOperation: p.pathSingleDelete(), + }, + + HelpSynopsis: fmt.Sprintf("Read/write/delete a single %s mapping", p.Name), + + ExistenceCheck: p.pathSingleExistenceCheck(), + }, + } +} + +func (p *PathMap) pathList() OperationFunc { + return func(ctx context.Context, req *logical.Request, d *FieldData) (*logical.Response, error) { + keys, err := p.List(ctx, req.Storage, "") + if err != nil { + return nil, err + } + + return logical.ListResponse(keys), nil + } +} + +func (p *PathMap) pathSingleRead() OperationFunc { + return func(ctx context.Context, req *logical.Request, d *FieldData) (*logical.Response, error) { + v, err := p.Get(ctx, req.Storage, d.Get("key").(string)) + if err != nil { + return nil, err + } + + return &logical.Response{ + Data: v, + }, nil + } +} + +func (p *PathMap) pathSingleWrite() OperationFunc { + return func(ctx context.Context, req *logical.Request, d *FieldData) (*logical.Response, error) { + err := p.Put(ctx, req.Storage, d.Get("key").(string), d.Raw) + return nil, err + } +} + +func (p *PathMap) pathSingleDelete() OperationFunc { + return func(ctx context.Context, req *logical.Request, d *FieldData) (*logical.Response, error) { + err := p.Delete(ctx, req.Storage, d.Get("key").(string)) + return nil, err + } +} + +func (p *PathMap) pathSingleExistenceCheck() ExistenceFunc { + return func(ctx context.Context, req *logical.Request, d *FieldData) (bool, error) { + v, err := p.Get(ctx, req.Storage, d.Get("key").(string)) + if err != nil { + return false, err + } + return v != nil, nil + } +} diff --git a/sdk/framework/path_map_test.go b/sdk/framework/path_map_test.go new file mode 100644 index 0000000..3fe6308 --- /dev/null +++ b/sdk/framework/path_map_test.go @@ -0,0 +1,353 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package framework + +import ( + "context" + "testing" + + saltpkg "github.com/hashicorp/vault/sdk/helper/salt" + "github.com/hashicorp/vault/sdk/logical" +) + +func TestPathMap(t *testing.T) { + p := &PathMap{Name: "foo"} + storage := new(logical.InmemStorage) + var b logical.Backend = &Backend{Paths: p.Paths()} + + ctx := context.Background() + + // Write via HTTP + _, err := b.HandleRequest(ctx, &logical.Request{ + Operation: logical.UpdateOperation, + Path: "map/foo/a", + Data: map[string]interface{}{ + "value": "bar", + }, + Storage: storage, + }) + if err != nil { + t.Fatalf("bad: %#v", err) + } + + // Read via HTTP + resp, err := b.HandleRequest(ctx, &logical.Request{ + Operation: logical.ReadOperation, + Path: "map/foo/a", + Storage: storage, + }) + if err != nil { + t.Fatalf("bad: %#v", err) + } + if resp.Data["value"] != "bar" { + t.Fatalf("bad: %#v", resp) + } + + // Read via API + v, err := p.Get(ctx, storage, "a") + if err != nil { + t.Fatalf("bad: %#v", err) + } + if v["value"] != "bar" { + t.Fatalf("bad: %#v", v) + } + + // Read via API with other casing + v, err = p.Get(ctx, storage, "A") + if err != nil { + t.Fatalf("bad: %#v", err) + } + if v["value"] != "bar" { + t.Fatalf("bad: %#v", v) + } + + // Verify List + keys, err := p.List(ctx, storage, "") + if err != nil { + t.Fatalf("bad: %#v", err) + } + if len(keys) != 1 || keys[0] != "a" { + t.Fatalf("bad: %#v", keys) + } + + // LIST via HTTP + resp, err = b.HandleRequest(ctx, &logical.Request{ + Operation: logical.ListOperation, + Path: "map/foo/", + Storage: storage, + }) + if err != nil { + t.Fatalf("bad: %#v", err) + } + if len(resp.Data) != 1 || len(resp.Data["keys"].([]string)) != 1 || + resp.Data["keys"].([]string)[0] != "a" { + t.Fatalf("bad: %#v", resp) + } + + // Delete via HTTP + resp, err = b.HandleRequest(ctx, &logical.Request{ + Operation: logical.DeleteOperation, + Path: "map/foo/a", + Storage: storage, + }) + if err != nil { + t.Fatalf("bad: %#v", err) + } + if resp != nil { + t.Fatalf("bad: %#v", resp) + } + + // Re-read via HTTP + resp, err = b.HandleRequest(ctx, &logical.Request{ + Operation: logical.ReadOperation, + Path: "map/foo/a", + Storage: storage, + }) + if err != nil { + t.Fatalf("bad: %#v", err) + } + if _, ok := resp.Data["value"]; ok { + t.Fatalf("bad: %#v", resp) + } + + // Re-read via API + v, err = p.Get(ctx, storage, "a") + if err != nil { + t.Fatalf("bad: %#v", err) + } + if v != nil { + t.Fatalf("bad: %#v", v) + } +} + +func TestPathMap_getInvalid(t *testing.T) { + p := &PathMap{Name: "foo"} + storage := new(logical.InmemStorage) + + v, err := p.Get(context.Background(), storage, "nope") + if err != nil { + t.Fatalf("bad: %#v", err) + } + if v != nil { + t.Fatalf("bad: %#v", v) + } +} + +func TestPathMap_routes(t *testing.T) { + p := &PathMap{Name: "foo"} + TestBackendRoutes(t, &Backend{Paths: p.Paths()}, []string{ + "map/foo", // Normal + "map/foo/bar", // Normal + "map/foo/bar-baz", // Hyphen key + }) +} + +func TestPathMap_Salted(t *testing.T) { + storage := new(logical.InmemStorage) + + salt, err := saltpkg.NewSalt(context.Background(), storage, &saltpkg.Config{ + HashFunc: saltpkg.SHA1Hash, + }) + if err != nil { + t.Fatalf("err: %v", err) + } + + testSalting(t, context.Background(), storage, salt, &PathMap{Name: "foo", Salt: salt}) +} + +func testSalting(t *testing.T, ctx context.Context, storage logical.Storage, salt *saltpkg.Salt, p *PathMap) { + var b logical.Backend = &Backend{Paths: p.Paths()} + var err error + + // Write via HTTP + _, err = b.HandleRequest(ctx, &logical.Request{ + Operation: logical.UpdateOperation, + Path: "map/foo/a", + Data: map[string]interface{}{ + "value": "bar", + }, + Storage: storage, + }) + if err != nil { + t.Fatalf("bad: %#v", err) + } + + // Non-salted version should not be there + out, err := storage.Get(ctx, "struct/map/foo/a") + if err != nil { + t.Fatalf("err: %v", err) + } + if out != nil { + t.Fatalf("non-salted key found") + } + + // Ensure the path is salted + expect := "s" + salt.SaltIDHashFunc("a", saltpkg.SHA256Hash) + out, err = storage.Get(ctx, "struct/map/foo/"+expect) + if err != nil { + t.Fatalf("err: %v", err) + } + if out == nil { + t.Fatalf("missing salted key") + } + + // Read via HTTP + resp, err := b.HandleRequest(ctx, &logical.Request{ + Operation: logical.ReadOperation, + Path: "map/foo/a", + Storage: storage, + }) + if err != nil { + t.Fatalf("bad: %#v", err) + } + if resp.Data["value"] != "bar" { + t.Fatalf("bad: %#v", resp) + } + + // Read via API + v, err := p.Get(ctx, storage, "a") + if err != nil { + t.Fatalf("bad: %#v", err) + } + if v["value"] != "bar" { + t.Fatalf("bad: %#v", v) + } + + // Read via API with other casing + v, err = p.Get(ctx, storage, "A") + if err != nil { + t.Fatalf("bad: %#v", err) + } + if v["value"] != "bar" { + t.Fatalf("bad: %#v", v) + } + + // Verify List + keys, err := p.List(ctx, storage, "") + if err != nil { + t.Fatalf("bad: %#v", err) + } + if len(keys) != 1 || keys[0] != expect { + t.Fatalf("bad: %#v", keys) + } + + // Delete via HTTP + resp, err = b.HandleRequest(ctx, &logical.Request{ + Operation: logical.DeleteOperation, + Path: "map/foo/a", + Storage: storage, + }) + if err != nil { + t.Fatalf("bad: %#v", err) + } + if resp != nil { + t.Fatalf("bad: %#v", resp) + } + + // Re-read via HTTP + resp, err = b.HandleRequest(ctx, &logical.Request{ + Operation: logical.ReadOperation, + Path: "map/foo/a", + Storage: storage, + }) + if err != nil { + t.Fatalf("bad: %#v", err) + } + if _, ok := resp.Data["value"]; ok { + t.Fatalf("bad: %#v", resp) + } + + // Re-read via API + v, err = p.Get(ctx, storage, "a") + if err != nil { + t.Fatalf("bad: %#v", err) + } + if v != nil { + t.Fatalf("bad: %#v", v) + } + + // Put in a non-salted version and make sure that after reading it's been + // upgraded + err = storage.Put(ctx, &logical.StorageEntry{ + Key: "struct/map/foo/b", + Value: []byte(`{"foo": "bar"}`), + }) + if err != nil { + t.Fatalf("err: %v", err) + } + // A read should transparently upgrade + resp, err = b.HandleRequest(ctx, &logical.Request{ + Operation: logical.ReadOperation, + Path: "map/foo/b", + Storage: storage, + }) + if err != nil { + t.Fatal(err) + } + list, _ := storage.List(ctx, "struct/map/foo/") + if len(list) != 1 { + t.Fatalf("unexpected number of entries left after upgrade; expected 1, got %d", len(list)) + } + found := false + for _, v := range list { + if v == "s"+salt.SaltIDHashFunc("b", saltpkg.SHA256Hash) { + found = true + break + } + } + if !found { + t.Fatal("did not find upgraded value") + } + + // Put in a SHA1 salted version and make sure that after reading its been + // upgraded + err = storage.Put(ctx, &logical.StorageEntry{ + Key: "struct/map/foo/" + salt.SaltID("b"), + Value: []byte(`{"foo": "bar"}`), + }) + if err != nil { + t.Fatal(err) + } + + // A read should transparently upgrade + resp, err = b.HandleRequest(ctx, &logical.Request{ + Operation: logical.ReadOperation, + Path: "map/foo/b", + Storage: storage, + }) + if err != nil { + t.Fatal(err) + } + list, _ = storage.List(ctx, "struct/map/foo/") + if len(list) != 1 { + t.Fatalf("unexpected number of entries left after upgrade; expected 1, got %d", len(list)) + } + found = false + for _, v := range list { + if v == "s"+salt.SaltIDHashFunc("b", saltpkg.SHA256Hash) { + found = true + break + } + } + if !found { + t.Fatal("did not find upgraded value") + } +} + +func TestPathMap_SaltFunc(t *testing.T) { + storage := new(logical.InmemStorage) + + salt, err := saltpkg.NewSalt(context.Background(), storage, &saltpkg.Config{ + HashFunc: saltpkg.SHA1Hash, + }) + if err != nil { + t.Fatalf("err: %v", err) + } + + saltFunc := func(context.Context) (*saltpkg.Salt, error) { + return salt, nil + } + + testSalting(t, context.Background(), storage, salt, &PathMap{Name: "foo", SaltFunc: saltFunc}) +} diff --git a/sdk/framework/path_struct.go b/sdk/framework/path_struct.go new file mode 100644 index 0000000..cba8550 --- /dev/null +++ b/sdk/framework/path_struct.go @@ -0,0 +1,127 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package framework + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/logical" +) + +// PathStruct can be used to generate a path that stores a struct +// in the storage. This structure is a map[string]interface{} but the +// types are set according to the schema in this structure. +type PathStruct struct { + Name string + Path string + Schema map[string]*FieldSchema + HelpSynopsis string + HelpDescription string + + Read bool +} + +// Get reads the structure. +func (p *PathStruct) Get(ctx context.Context, s logical.Storage) (map[string]interface{}, error) { + entry, err := s.Get(ctx, fmt.Sprintf("struct/%s", p.Name)) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var result map[string]interface{} + if err := jsonutil.DecodeJSON(entry.Value, &result); err != nil { + return nil, err + } + + return result, nil +} + +// Put writes the structure. +func (p *PathStruct) Put(ctx context.Context, s logical.Storage, v map[string]interface{}) error { + bytes, err := json.Marshal(v) + if err != nil { + return err + } + + return s.Put(ctx, &logical.StorageEntry{ + Key: fmt.Sprintf("struct/%s", p.Name), + Value: bytes, + }) +} + +// Delete removes the structure. +func (p *PathStruct) Delete(ctx context.Context, s logical.Storage) error { + return s.Delete(ctx, fmt.Sprintf("struct/%s", p.Name)) +} + +// Paths are the paths to append to the Backend paths. +func (p *PathStruct) Paths() []*Path { + // The single path we support to read/write this config + path := &Path{ + Pattern: p.Path, + Fields: p.Schema, + + Callbacks: map[logical.Operation]OperationFunc{ + logical.CreateOperation: p.pathWrite(), + logical.UpdateOperation: p.pathWrite(), + logical.DeleteOperation: p.pathDelete(), + }, + + ExistenceCheck: p.pathExistenceCheck(), + + HelpSynopsis: p.HelpSynopsis, + HelpDescription: p.HelpDescription, + } + + // If we support reads, add that + if p.Read { + path.Callbacks[logical.ReadOperation] = p.pathRead() + } + + return []*Path{path} +} + +func (p *PathStruct) pathRead() OperationFunc { + return func(ctx context.Context, req *logical.Request, d *FieldData) (*logical.Response, error) { + v, err := p.Get(ctx, req.Storage) + if err != nil { + return nil, err + } + + return &logical.Response{ + Data: v, + }, nil + } +} + +func (p *PathStruct) pathWrite() OperationFunc { + return func(ctx context.Context, req *logical.Request, d *FieldData) (*logical.Response, error) { + err := p.Put(ctx, req.Storage, d.Raw) + return nil, err + } +} + +func (p *PathStruct) pathDelete() OperationFunc { + return func(ctx context.Context, req *logical.Request, d *FieldData) (*logical.Response, error) { + err := p.Delete(ctx, req.Storage) + return nil, err + } +} + +func (p *PathStruct) pathExistenceCheck() ExistenceFunc { + return func(ctx context.Context, req *logical.Request, d *FieldData) (bool, error) { + v, err := p.Get(ctx, req.Storage) + if err != nil { + return false, err + } + + return v != nil, nil + } +} diff --git a/sdk/framework/path_struct_test.go b/sdk/framework/path_struct_test.go new file mode 100644 index 0000000..88662af --- /dev/null +++ b/sdk/framework/path_struct_test.go @@ -0,0 +1,98 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package framework + +import ( + "context" + "testing" + + "github.com/hashicorp/vault/sdk/logical" +) + +func TestPathStruct(t *testing.T) { + p := &PathStruct{ + Name: "foo", + Path: "bar", + Schema: map[string]*FieldSchema{ + "value": {Type: TypeString}, + }, + Read: true, + } + + storage := new(logical.InmemStorage) + var b logical.Backend = &Backend{Paths: p.Paths()} + + ctx := context.Background() + + // Write via HTTP + _, err := b.HandleRequest(ctx, &logical.Request{ + Operation: logical.UpdateOperation, + Path: "bar", + Data: map[string]interface{}{ + "value": "baz", + }, + Storage: storage, + }) + if err != nil { + t.Fatalf("bad: %#v", err) + } + + // Read via HTTP + resp, err := b.HandleRequest(ctx, &logical.Request{ + Operation: logical.ReadOperation, + Path: "bar", + Storage: storage, + }) + if err != nil { + t.Fatalf("bad: %#v", err) + } + if resp.Data["value"] != "baz" { + t.Fatalf("bad: %#v", resp) + } + + // Read via API + v, err := p.Get(ctx, storage) + if err != nil { + t.Fatalf("bad: %#v", err) + } + if v["value"] != "baz" { + t.Fatalf("bad: %#v", v) + } + + // Delete via HTTP + resp, err = b.HandleRequest(ctx, &logical.Request{ + Operation: logical.DeleteOperation, + Path: "bar", + Data: nil, + Storage: storage, + }) + if err != nil { + t.Fatalf("bad: %#v", err) + } + if resp != nil { + t.Fatalf("bad: %#v", resp) + } + + // Re-read via HTTP + resp, err = b.HandleRequest(ctx, &logical.Request{ + Operation: logical.ReadOperation, + Path: "bar", + Storage: storage, + }) + if err != nil { + t.Fatalf("bad: %#v", err) + } + if _, ok := resp.Data["value"]; ok { + t.Fatalf("bad: %#v", resp) + } + + // Re-read via API + v, err = p.Get(ctx, storage) + if err != nil { + t.Fatalf("bad: %#v", err) + } + if v != nil { + t.Fatalf("bad: %#v", v) + } +} diff --git a/sdk/framework/path_test.go b/sdk/framework/path_test.go new file mode 100644 index 0000000..4541930 --- /dev/null +++ b/sdk/framework/path_test.go @@ -0,0 +1,100 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package framework + +import ( + "testing" + + "github.com/go-test/deep" +) + +func TestPath_Regex(t *testing.T) { + tests := []struct { + pattern string + input string + pathMatch bool + captures map[string]string + }{ + { + pattern: "a/b/" + GenericNameRegex("val"), + input: "a/b/foo", + pathMatch: true, + captures: map[string]string{"val": "foo"}, + }, + { + pattern: "a/b/" + GenericNameRegex("val"), + input: "a/b/foo/more", + pathMatch: false, + captures: nil, + }, + { + pattern: "a/b/" + GenericNameRegex("val"), + input: "a/b/abc-.123", + pathMatch: true, + captures: map[string]string{"val": "abc-.123"}, + }, + { + pattern: "a/b/" + GenericNameRegex("val") + "/c/d", + input: "a/b/foo/c/d", + pathMatch: true, + captures: map[string]string{"val": "foo"}, + }, + { + pattern: "a/b/" + GenericNameRegex("val") + "/c/d", + input: "a/b/foo/c/d/e", + pathMatch: false, + captures: nil, + }, + { + pattern: "a/b" + OptionalParamRegex("val"), + input: "a/b", + pathMatch: true, + captures: map[string]string{"val": ""}, + }, + { + pattern: "a/b" + OptionalParamRegex("val"), + input: "a/b/foo", + pathMatch: true, + captures: map[string]string{"val": "foo"}, + }, + { + pattern: "foo/" + MatchAllRegex("val"), + input: "foos/ball", + pathMatch: false, + captures: nil, + }, + { + pattern: "foos/" + MatchAllRegex("val"), + input: "foos/ball", + pathMatch: true, + captures: map[string]string{"val": "ball"}, + }, + { + pattern: "foos/ball/" + MatchAllRegex("val"), + input: "foos/ball/with/more/stuff/at_the/end", + pathMatch: true, + captures: map[string]string{"val": "with/more/stuff/at_the/end"}, + }, + { + pattern: MatchAllRegex("val"), + input: "foos/ball/with/more/stuff/at_the/end", + pathMatch: true, + captures: map[string]string{"val": "foos/ball/with/more/stuff/at_the/end"}, + }, + } + + for i, test := range tests { + b := Backend{ + Paths: []*Path{{Pattern: test.pattern}}, + } + path, captures := b.route(test.input) + pathMatch := path != nil + if pathMatch != test.pathMatch { + t.Fatalf("[%d] unexpected path match result (%s): expected %t, actual %t", i, test.pattern, test.pathMatch, pathMatch) + } + if diff := deep.Equal(captures, test.captures); diff != nil { + t.Fatal(diff) + } + } +} diff --git a/sdk/framework/policy_map.go b/sdk/framework/policy_map.go new file mode 100644 index 0000000..94accf8 --- /dev/null +++ b/sdk/framework/policy_map.go @@ -0,0 +1,71 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package framework + +import ( + "context" + "sort" + "strings" + + "github.com/hashicorp/vault/sdk/logical" +) + +// DEPRECATED: Don't use this. It's too inflexible, nearly impossible to use +// with some modern Vault features, and imposes specific API designs. +// +// PolicyMap is a specialization of PathMap that expects the values to +// be lists of policies. This assists in querying and loading policies +// from the PathMap. +type PolicyMap struct { + PathMap + + DefaultKey string + PolicyKey string +} + +func (p *PolicyMap) Policies(ctx context.Context, s logical.Storage, names ...string) ([]string, error) { + policyKey := "value" + if p.PolicyKey != "" { + policyKey = p.PolicyKey + } + + if p.DefaultKey != "" { + newNames := make([]string, len(names)+1) + newNames[0] = p.DefaultKey + copy(newNames[1:], names) + names = newNames + } + + set := make(map[string]struct{}) + for _, name := range names { + v, err := p.Get(ctx, s, name) + if err != nil { + return nil, err + } + + valuesRaw, ok := v[policyKey] + if !ok { + continue + } + + values, ok := valuesRaw.(string) + if !ok { + continue + } + + for _, p := range strings.Split(values, ",") { + if p = strings.TrimSpace(p); p != "" { + set[p] = struct{}{} + } + } + } + + list := make([]string, 0, len(set)) + for k := range set { + list = append(list, k) + } + sort.Strings(list) + + return list, nil +} diff --git a/sdk/framework/policy_map_test.go b/sdk/framework/policy_map_test.go new file mode 100644 index 0000000..b785fdd --- /dev/null +++ b/sdk/framework/policy_map_test.go @@ -0,0 +1,34 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package framework + +import ( + "context" + "reflect" + "testing" + + "github.com/hashicorp/vault/sdk/logical" +) + +func TestPolicyMap(t *testing.T) { + p := &PolicyMap{} + p.PathMap.Name = "foo" + s := new(logical.InmemStorage) + + ctx := context.Background() + + p.Put(ctx, s, "foo", map[string]interface{}{"value": "bar"}) + p.Put(ctx, s, "bar", map[string]interface{}{"value": "foo,baz "}) + + // Read via API + actual, err := p.Policies(ctx, s, "foo", "bar") + if err != nil { + t.Fatalf("bad: %#v", err) + } + + expected := []string{"bar", "baz", "foo"} + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v", actual) + } +} diff --git a/sdk/framework/secret.go b/sdk/framework/secret.go new file mode 100644 index 0000000..095bc12 --- /dev/null +++ b/sdk/framework/secret.go @@ -0,0 +1,95 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package framework + +import ( + "context" + "time" + + "github.com/hashicorp/vault/sdk/logical" +) + +// Secret is a type of secret that can be returned from a backend. +type Secret struct { + // Type is the name of this secret type. This is used to setup the + // vault ID and to look up the proper secret structure when revocation/ + // renewal happens. Once this is set this should not be changed. + // + // The format of this must match (case insensitive): ^a-Z0-9_$ + Type string + + // Fields is the mapping of data fields and schema that comprise + // the structure of this secret. + Fields map[string]*FieldSchema + + // DefaultDuration is the default value for the duration of the lease for + // this secret. This can be manually overwritten with the result of + // Response(). + // + // If these aren't set, Vault core will set a default lease period which + // may come from a mount tuning. + DefaultDuration time.Duration + + // Renew is the callback called to renew this secret. If Renew is + // not specified then renewable is set to false in the secret. + // See lease.go for helpers for this value. + Renew OperationFunc + + // Revoke is the callback called to revoke this secret. This is required. + Revoke OperationFunc +} + +func (s *Secret) Renewable() bool { + return s.Renew != nil +} + +func (s *Secret) Response( + data, internal map[string]interface{}, +) *logical.Response { + internalData := make(map[string]interface{}) + for k, v := range internal { + internalData[k] = v + } + internalData["secret_type"] = s.Type + + return &logical.Response{ + Secret: &logical.Secret{ + LeaseOptions: logical.LeaseOptions{ + TTL: s.DefaultDuration, + Renewable: s.Renewable(), + }, + InternalData: internalData, + }, + + Data: data, + } +} + +// HandleRenew is the request handler for renewing this secret. +func (s *Secret) HandleRenew(ctx context.Context, req *logical.Request) (*logical.Response, error) { + if !s.Renewable() { + return nil, logical.ErrUnsupportedOperation + } + + data := &FieldData{ + Raw: req.Data, + Schema: s.Fields, + } + + return s.Renew(ctx, req, data) +} + +// HandleRevoke is the request handler for revoking this secret. +func (s *Secret) HandleRevoke(ctx context.Context, req *logical.Request) (*logical.Response, error) { + data := &FieldData{ + Raw: req.Data, + Schema: s.Fields, + } + + if s.Revoke != nil { + return s.Revoke(ctx, req, data) + } + + return nil, logical.ErrUnsupportedOperation +} diff --git a/sdk/framework/secret_test.go b/sdk/framework/secret_test.go new file mode 100644 index 0000000..29058dc --- /dev/null +++ b/sdk/framework/secret_test.go @@ -0,0 +1,4 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package framework diff --git a/sdk/framework/template.go b/sdk/framework/template.go new file mode 100644 index 0000000..d395c8f --- /dev/null +++ b/sdk/framework/template.go @@ -0,0 +1,45 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package framework + +import ( + "bufio" + "bytes" + "strings" + "text/template" + + "github.com/hashicorp/errwrap" +) + +func executeTemplate(tpl string, data interface{}) (string, error) { + // Define the functions + funcs := map[string]interface{}{ + "indent": funcIndent, + } + + // Parse the help template + t, err := template.New("root").Funcs(funcs).Parse(tpl) + if err != nil { + return "", errwrap.Wrapf("error parsing template: {{err}}", err) + } + + // Execute the template and store the output + var buf bytes.Buffer + if err := t.Execute(&buf, data); err != nil { + return "", errwrap.Wrapf("error executing template: {{err}}", err) + } + + return strings.TrimSpace(buf.String()), nil +} + +func funcIndent(count int, text string) string { + var buf bytes.Buffer + prefix := strings.Repeat(" ", count) + scan := bufio.NewScanner(strings.NewReader(text)) + for scan.Scan() { + buf.WriteString(prefix + scan.Text() + "\n") + } + + return strings.TrimRight(buf.String(), "\n") +} diff --git a/sdk/framework/testdata/legacy.json b/sdk/framework/testdata/legacy.json new file mode 100644 index 0000000..548151c --- /dev/null +++ b/sdk/framework/testdata/legacy.json @@ -0,0 +1,75 @@ +{ + "openapi": "3.0.2", + "info": { + "title": "HashiCorp Vault API", + "description": "HTTP API that gives you full access to Vault. All API routes are prefixed with `/v1/`.", + "version": "", + "license": { + "name": "Mozilla Public License 2.0", + "url": "https://www.mozilla.org/en-US/MPL/2.0" + } + }, + "paths": { + "/lookup/{id}": { + "description": "Synopsis", + "parameters": [ + { + "name": "id", + "description": "My id parameter", + "in": "path", + "schema": { + "type": "string" + }, + "required": true + } + ], + "get": { + "operationId": "kv-read-lookup-id", + "summary": "Synopsis", + "tags": [ + "secrets" + ], + "responses": { + "200": { + "description": "OK" + } + } + }, + "post": { + "operationId": "kv-write-lookup-id", + "summary": "Synopsis", + "tags": [ + "secrets" + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KvWriteLookupIdRequest" + } + } + } + }, + "responses": { + "200": { + "description": "OK" + } + } + } + } + }, + "components": { + "schemas": { + "KvWriteLookupIdRequest": { + "type": "object", + "properties": { + "token": { + "type": "string", + "description": "My token" + } + } + } + } + } +} diff --git a/sdk/framework/testdata/operations.json b/sdk/framework/testdata/operations.json new file mode 100644 index 0000000..7fca0e2 --- /dev/null +++ b/sdk/framework/testdata/operations.json @@ -0,0 +1,141 @@ +{ + "openapi": "3.0.2", + "info": { + "title": "HashiCorp Vault API", + "description": "HTTP API that gives you full access to Vault. All API routes are prefixed with `/v1/`.", + "version": "", + "license": { + "name": "Mozilla Public License 2.0", + "url": "https://www.mozilla.org/en-US/MPL/2.0" + } + }, + "paths": { + "/foo/{id}": { + "description": "Synopsis", + "x-vault-createSupported": true, + "x-vault-sudo": true, + "x-vault-displayAttrs": { + "navigation": true + }, + "parameters": [ + { + "name": "format", + "description": "a query param", + "in": "query", + "schema": { + "type": "string" + } + }, + { + "name": "id", + "description": "id path parameter", + "in": "path", + "schema": { + "type": "string" + }, + "required": true + } + ], + "get": { + "operationId": "kv-read-foo-id", + "tags": [ + "secrets" + ], + "summary": "My Summary", + "description": "My Description", + "responses": { + "200": { + "description": "OK" + } + }, + "parameters": [ + { + "name": "list", + "description": "Return a list if `true`", + "in": "query", + "schema": { + "type": "string" + } + } + ] + }, + "post": { + "operationId": "kv-write-foo-id", + "tags": [ + "secrets" + ], + "summary": "Update Summary", + "description": "Update Description", + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KvWriteFooIdRequest" + } + } + } + }, + "responses": { + "200": { + "description": "OK" + } + } + } + } + }, + "components": { + "schemas": { + "KvWriteFooIdRequest": { + "type": "object", + "required": [ + "age" + ], + "properties": { + "flavors": { + "type": "array", + "description": "the flavors", + "items": { + "type": "string" + } + }, + "age": { + "type": "integer", + "description": "the age", + "enum": [ + 1, + 2, + 3 + ], + "x-vault-displayAttrs": { + "name": "Age", + "sensitive": true, + "group": "Some Group", + "value": 7 + } + }, + "name": { + "type": "string", + "description": "the name", + "default": "Larry", + "pattern": "\\w([\\w-.]*\\w)?" + }, + "x-abc-token": { + "type": "string", + "description": "a header value", + "enum": [ + "a", + "b", + "c" + ] + }, + "maximum": { + "type": "integer", + "description": "a maximum value", + "format": "int64" + } + } + } + } + } +} diff --git a/sdk/framework/testdata/operations_list.json b/sdk/framework/testdata/operations_list.json new file mode 100644 index 0000000..a08208b --- /dev/null +++ b/sdk/framework/testdata/operations_list.json @@ -0,0 +1,70 @@ +{ + "openapi": "3.0.2", + "info": { + "title": "HashiCorp Vault API", + "description": "HTTP API that gives you full access to Vault. All API routes are prefixed with `/v1/`.", + "version": "", + "license": { + "name": "Mozilla Public License 2.0", + "url": "https://www.mozilla.org/en-US/MPL/2.0" + } + }, + "paths": { + "/foo/{id}": { + "description": "Synopsis", + "x-vault-sudo": true, + "x-vault-displayAttrs": { + "navigation": true + }, + "parameters": [ + { + "name": "format", + "description": "a query param", + "in": "query", + "schema": { + "type": "string" + } + }, + { + "name": "id", + "description": "id path parameter", + "in": "path", + "schema": { + "type": "string" + }, + "required": true + } + ], + "get": { + "operationId": "kv-list-foo-id", + "tags": [ + "secrets" + ], + "summary": "List Summary", + "description": "List Description", + "responses": { + "200": { + "description": "OK" + } + }, + "parameters": [ + { + "name": "list", + "description": "Must be set to `true`", + "required": true, + "in": "query", + "schema": { + "type": "string", + "enum": [ + "true" + ] + } + } + ] + } + } + }, + "components": { + "schemas": {} + } +} diff --git a/sdk/framework/testdata/responses.json b/sdk/framework/testdata/responses.json new file mode 100644 index 0000000..98d501e --- /dev/null +++ b/sdk/framework/testdata/responses.json @@ -0,0 +1,67 @@ +{ + "openapi": "3.0.2", + "info": { + "title": "HashiCorp Vault API", + "description": "HTTP API that gives you full access to Vault. All API routes are prefixed with `/v1/`.", + "version": "", + "license": { + "name": "Mozilla Public License 2.0", + "url": "https://www.mozilla.org/en-US/MPL/2.0" + } + }, + "paths": { + "/foo": { + "description": "Synopsis", + "x-vault-unauthenticated": true, + "delete": { + "operationId": "kv-delete-foo", + "tags": [ + "secrets" + ], + "summary": "Delete stuff", + "responses": { + "204": { + "description": "empty body" + } + } + }, + "get": { + "operationId": "kv-read-foo", + "tags": [ + "secrets" + ], + "summary": "My Summary", + "description": "My Description", + "responses": { + "202": { + "description": "Amazing", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/KvReadFooResponse" + } + } + } + } + } + } + } + }, + "components": { + "schemas": { + "KvReadFooResponse": { + "type": "object", + "properties": { + "field_a": { + "type": "string", + "description": "field_a description" + }, + "field_b": { + "type": "boolean", + "description": "field_b description" + } + } + } + } + } +} diff --git a/sdk/framework/testing.go b/sdk/framework/testing.go new file mode 100644 index 0000000..d2035d6 --- /dev/null +++ b/sdk/framework/testing.go @@ -0,0 +1,18 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package framework + +import ( + "testing" +) + +// TestBackendRoutes is a helper to test that all the given routes will +// route properly in the backend. +func TestBackendRoutes(t *testing.T, b *Backend, rs []string) { + for _, r := range rs { + if b.Route(r) == nil { + t.Fatalf("bad route: %s", r) + } + } +} diff --git a/sdk/framework/wal.go b/sdk/framework/wal.go new file mode 100644 index 0000000..b090f03 --- /dev/null +++ b/sdk/framework/wal.go @@ -0,0 +1,104 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package framework + +import ( + "context" + "encoding/json" + "strings" + "time" + + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/logical" +) + +// WALPrefix is the prefix within Storage where WAL entries will be written. +const WALPrefix = "wal/" + +type WALEntry struct { + ID string `json:"-"` + Kind string `json:"type"` + Data interface{} `json:"data"` + CreatedAt int64 `json:"created_at"` +} + +// PutWAL writes some data to the WAL. +// +// The kind parameter is used by the framework to allow users to store +// multiple kinds of WAL data and to easily disambiguate what data they're +// expecting. +// +// Data within the WAL that is uncommitted (CommitWAL hasn't be called) +// will be given to the rollback callback when an rollback operation is +// received, allowing the backend to clean up some partial states. +// +// The data must be JSON encodable. +// +// This returns a unique ID that can be used to reference this WAL data. +// WAL data cannot be modified. You can only add to the WAL and commit existing +// WAL entries. +func PutWAL(ctx context.Context, s logical.Storage, kind string, data interface{}) (string, error) { + value, err := json.Marshal(&WALEntry{ + Kind: kind, + Data: data, + CreatedAt: time.Now().UTC().Unix(), + }) + if err != nil { + return "", err + } + + id, err := uuid.GenerateUUID() + if err != nil { + return "", err + } + + return id, s.Put(ctx, &logical.StorageEntry{ + Key: WALPrefix + id, + Value: value, + }) +} + +// GetWAL reads a specific entry from the WAL. If the entry doesn't exist, +// then nil value is returned. +// +// The kind, value, and error are returned. +func GetWAL(ctx context.Context, s logical.Storage, id string) (*WALEntry, error) { + entry, err := s.Get(ctx, WALPrefix+id) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + + var raw WALEntry + if err := jsonutil.DecodeJSON(entry.Value, &raw); err != nil { + return nil, err + } + raw.ID = id + + return &raw, nil +} + +// DeleteWAL commits the WAL entry with the given ID. Once committed, +// it is assumed that the operation was a success and doesn't need to +// be rolled back. +func DeleteWAL(ctx context.Context, s logical.Storage, id string) error { + return s.Delete(ctx, WALPrefix+id) +} + +// ListWAL lists all the entries in the WAL. +func ListWAL(ctx context.Context, s logical.Storage) ([]string, error) { + keys, err := s.List(ctx, WALPrefix) + if err != nil { + return nil, err + } + + for i, k := range keys { + keys[i] = strings.TrimPrefix(k, WALPrefix) + } + + return keys, nil +} diff --git a/sdk/framework/wal_test.go b/sdk/framework/wal_test.go new file mode 100644 index 0000000..0407492 --- /dev/null +++ b/sdk/framework/wal_test.go @@ -0,0 +1,66 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package framework + +import ( + "context" + "reflect" + "testing" + + "github.com/hashicorp/vault/sdk/logical" +) + +func TestWAL(t *testing.T) { + s := new(logical.InmemStorage) + + ctx := context.Background() + + // WAL should be empty to start + keys, err := ListWAL(ctx, s) + if err != nil { + t.Fatalf("err: %s", err) + } + if len(keys) > 0 { + t.Fatalf("bad: %#v", keys) + } + + // Write an entry to the WAL + id, err := PutWAL(ctx, s, "foo", "bar") + if err != nil { + t.Fatalf("err: %s", err) + } + + // The key should be in the WAL + keys, err = ListWAL(ctx, s) + if err != nil { + t.Fatalf("err: %s", err) + } + if !reflect.DeepEqual(keys, []string{id}) { + t.Fatalf("bad: %#v", keys) + } + + // Should be able to get the value + entry, err := GetWAL(ctx, s, id) + if err != nil { + t.Fatalf("err: %s", err) + } + if entry.Kind != "foo" { + t.Fatalf("bad: %#v", entry) + } + if entry.Data != "bar" { + t.Fatalf("bad: %#v", entry) + } + + // Should be able to delete the value + if err := DeleteWAL(ctx, s, id); err != nil { + t.Fatalf("err: %s", err) + } + entry, err = GetWAL(ctx, s, id) + if err != nil { + t.Fatalf("err: %s", err) + } + if entry != nil { + t.Fatalf("bad: %#v", entry) + } +} diff --git a/sdk/go.mod b/sdk/go.mod new file mode 100644 index 0000000..c4a2ec9 --- /dev/null +++ b/sdk/go.mod @@ -0,0 +1,96 @@ +module github.com/hashicorp/vault/sdk + +go 1.19 + +require ( + github.com/armon/go-metrics v0.4.1 + github.com/armon/go-radix v1.0.0 + github.com/cenkalti/backoff/v3 v3.2.2 + github.com/docker/docker v23.0.4+incompatible + github.com/docker/go-connections v0.4.0 + github.com/evanphx/json-patch/v5 v5.6.0 + github.com/fatih/structs v1.1.0 + github.com/go-ldap/ldap/v3 v3.4.1 + github.com/go-test/deep v1.1.0 + github.com/golang/protobuf v1.5.2 + github.com/golang/snappy v0.0.4 + github.com/google/tink/go v1.7.0 + github.com/hashicorp/errwrap v1.1.0 + github.com/hashicorp/go-cleanhttp v0.5.2 + github.com/hashicorp/go-hclog v1.4.0 + github.com/hashicorp/go-immutable-radix v1.3.1 + github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0 + github.com/hashicorp/go-kms-wrapping/v2 v2.0.8 + github.com/hashicorp/go-multierror v1.1.1 + github.com/hashicorp/go-plugin v1.4.8 + github.com/hashicorp/go-retryablehttp v0.7.1 + github.com/hashicorp/go-secure-stdlib/base62 v0.1.2 + github.com/hashicorp/go-secure-stdlib/mlock v0.1.2 + github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 + github.com/hashicorp/go-secure-stdlib/password v0.1.1 + github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 + github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.2 + github.com/hashicorp/go-sockaddr v1.0.2 + github.com/hashicorp/go-uuid v1.0.3 + github.com/hashicorp/go-version v1.6.0 + github.com/hashicorp/golang-lru v0.5.4 + github.com/hashicorp/hcl v1.0.1-vault-5 + github.com/hashicorp/vault/api v1.9.1 + github.com/mitchellh/copystructure v1.2.0 + github.com/mitchellh/go-testing-interface v1.14.1 + github.com/mitchellh/mapstructure v1.5.0 + github.com/pierrec/lz4 v2.6.1+incompatible + github.com/ryanuber/go-glob v1.0.0 + github.com/stretchr/testify v1.8.2 + go.uber.org/atomic v1.9.0 + golang.org/x/crypto v0.6.0 + golang.org/x/net v0.8.0 + golang.org/x/text v0.8.0 + google.golang.org/grpc v1.53.0 + google.golang.org/protobuf v1.28.1 +) + +require ( + github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/containerd/containerd v1.7.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/docker/distribution v2.8.1+incompatible // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/fatih/color v1.13.0 // indirect + github.com/frankban/quicktest v1.11.3 // indirect + github.com/go-asn1-ber/asn1-ber v1.5.1 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/hashicorp/go-rootcerts v1.0.2 // indirect + github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87 // indirect + github.com/klauspost/compress v1.16.5 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.17 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/patternmatcher v0.5.0 // indirect + github.com/moby/sys/sequential v0.5.0 // indirect + github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect + github.com/morikuni/aec v1.0.0 // indirect + github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect + github.com/oklog/run v1.1.0 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b // indirect + github.com/opencontainers/runc v1.1.6 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/rogpeppe/go-internal v1.8.1 // indirect + github.com/sirupsen/logrus v1.9.0 // indirect + github.com/stretchr/objx v0.5.0 // indirect + golang.org/x/mod v0.8.0 // indirect + golang.org/x/sys v0.6.0 // indirect + golang.org/x/term v0.6.0 // indirect + golang.org/x/time v0.0.0-20220411224347-583f2d630306 // indirect + golang.org/x/tools v0.6.0 // indirect + google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect + gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect + gopkg.in/square/go-jose.v2 v2.6.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + gotest.tools/v3 v3.4.0 // indirect +) diff --git a/sdk/go.sum b/sdk/go.sum new file mode 100644 index 0000000..a6b0f59 --- /dev/null +++ b/sdk/go.sum @@ -0,0 +1,349 @@ +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c h1:/IBSNwUN8+eKzUzbJPqhK839ygXJ82sde8x3ogr6R28= +github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Microsoft/hcsshim v0.10.0-rc.7 h1:HBytQPxcv8Oy4244zbQbe6hnOnx544eL5QPUqhJldz8= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M= +github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/containerd/containerd v1.7.0 h1:G/ZQr3gMZs6ZT0qPUZ15znx5QSdQdASW11nXTLTM2Pg= +github.com/containerd/containerd v1.7.0/go.mod h1:QfR7Efgb/6X2BDpTPJRvPTYDE9rsF0FsXX9J8sIs/sc= +github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= +github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v23.0.4+incompatible h1:Kd3Bh9V/rO+XpTP/BLqM+gx8z7+Yb0AA2Ibj+nNo4ek= +github.com/docker/docker v23.0.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= +github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/go-asn1-ber/asn1-ber v1.5.1 h1:pDbRAunXzIUXfx4CB2QJFv5IuPiuoW+sWvr/Us009o8= +github.com/go-asn1-ber/asn1-ber v1.5.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-ldap/ldap/v3 v3.4.1 h1:fU/0xli6HY02ocbMuozHAYsaHLcnkLjvho2r5a34BUU= +github.com/go-ldap/ldap/v3 v3.4.1/go.mod h1:iYS1MdmrmceOJ1QOTnRXrIs7i3kloqtmGQjRvjKpyMg= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= +github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/tink/go v1.7.0 h1:6Eox8zONGebBFcCBqkVmt60LaWZa6xg1cl/DwAh/J1w= +github.com/google/tink/go v1.7.0/go.mod h1:GAUOd+QE3pgj9q8VKIGTCP33c/B7eb4NhxLcgTJZStM= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v1.4.0 h1:ctuWFGrhFha8BnnzxqeRGidlEcQkDyL5u8J8t5eA11I= +github.com/hashicorp/go-hclog v1.4.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0 h1:pSjQfW3vPtrOTcasTUKgCTQT7OGPPTTMVRrOfU6FJD8= +github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0/go.mod h1:xvb32K2keAc+R8DSFG2IwDcydK9DBQE+fGA5fsw6hSk= +github.com/hashicorp/go-kms-wrapping/v2 v2.0.8 h1:9Q2lu1YbbmiAgvYZ7Pr31RdlVonUpX+mmDL7Z7qTA2U= +github.com/hashicorp/go-kms-wrapping/v2 v2.0.8/go.mod h1:qTCjxGig/kjuj3hk1z8pOUrzbse/GxB1tGfbrq8tGJg= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-plugin v1.4.8 h1:CHGwpxYDOttQOY7HOWgETU9dyVjOXzniXDqJcYJE1zM= +github.com/hashicorp/go-plugin v1.4.8/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ= +github.com/hashicorp/go-retryablehttp v0.7.1/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/base62 v0.1.2 h1:ET4pqyjiGmY09R5y+rSd70J2w45CtbWDNvGqWp/R3Ng= +github.com/hashicorp/go-secure-stdlib/base62 v0.1.2/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.2 h1:p4AKXPPS24tO8Wc8i1gLvSKdmkiSY5xuju57czJ/IJQ= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.2/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 h1:UpiO20jno/eV1eVZcxqWnUohyKRe1g8FPV/xH1s/2qs= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/password v0.1.1 h1:6JzmBqXprakgFEHwBgdchsjaA9x3GyjdI568bXKxa60= +github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= +github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.2 h1:phcbL8urUzF/kxA/Oj6awENaRwfWsjP59GW7u2qlDyY= +github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.2/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= +github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= +github.com/hashicorp/vault/api v1.9.1 h1:LtY/I16+5jVGU8rufyyAkwopgq/HpUnxFBg+QLOAV38= +github.com/hashicorp/vault/api v1.9.1/go.mod h1:78kktNcQYbBGSrOjQfHjXN32OhhxXnbYl3zxpd2uPUs= +github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87 h1:xixZ2bWeofWV68J+x6AzmKuVM/JWCQwkWm6GW/MUR6I= +github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI= +github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= +github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/patternmatcher v0.5.0 h1:YCZgJOeULcxLw1Q+sVR636pmS7sPEn1Qo2iAN6M7DBo= +github.com/moby/patternmatcher v0.5.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= +github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= +github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA= +github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b h1:YWuSjZCQAPM8UUBLkYUk1e+rZcvWHJmFb6i6rM44Xs8= +github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= +github.com/opencontainers/runc v1.1.6 h1:XbhB8IfG/EsnhNvZtNdLB0GBw92GYEFvKlhaJk9jUgA= +github.com/opencontainers/runc v1.1.6/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= +github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= +github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/time v0.0.0-20220411224347-583f2d630306 h1:+gHMid33q6pen7kv9xvT+JRinntgeXO2AeZVd0AWD3w= +golang.org/x/time v0.0.0-20220411224347-583f2d630306/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= +google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= +gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= +gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= diff --git a/sdk/helper/authmetadata/auth_metadata.go b/sdk/helper/authmetadata/auth_metadata.go new file mode 100644 index 0000000..e490ab3 --- /dev/null +++ b/sdk/helper/authmetadata/auth_metadata.go @@ -0,0 +1,203 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package authmetadata + +/* + authmetadata is a package offering convenience and + standardization when supporting an `auth_metadata` + field in a plugin's configuration. This then controls + what metadata is added to an Auth during login. + + To see an example of how to add and use it, check out + how these structs and fields are used in the AWS auth + method. + + Or, check out its acceptance test in this package to + see its integration points. +*/ + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +// Fields is for configuring a back-end's available +// default and additional fields. These are used for +// providing a verbose field description, and for parsing +// user input. +type Fields struct { + // The field name as it'll be reflected in the user-facing + // schema. + FieldName string + + // Default is a list of the default fields that should + // be included if a user sends "default" in their list + // of desired fields. These fields should all have a + // low rate of change because each change can incur a + // write to storage. + Default []string + + // AvailableToAdd is a list of fields not included by + // default, that the user may include. + AvailableToAdd []string +} + +func (f *Fields) all() []string { + return append(f.Default, f.AvailableToAdd...) +} + +// FieldSchema takes the default and additionally available +// fields, and uses them to generate a verbose description +// regarding how to use the "auth_metadata" field. +func FieldSchema(fields *Fields) *framework.FieldSchema { + return &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: description(fields), + DisplayAttrs: &framework.DisplayAttributes{ + Name: fields.FieldName, + Value: "field1,field2", + }, + Default: []string{"default"}, + } +} + +func NewHandler(fields *Fields) *Handler { + return &Handler{ + fields: fields, + } +} + +type Handler struct { + // authMetadata is an explicit list of all the user's configured + // fields that are being added to auth metadata. If it is set to + // default or unconfigured, it will be nil. Otherwise, it will + // hold the explicit fields set by the user. + authMetadata []string + + // fields is a list of the configured default and available + // fields. + fields *Fields +} + +// AuthMetadata is intended to be used on config reads. +// It gets an explicit list of all the user's configured +// fields that are being added to auth metadata. +func (h *Handler) AuthMetadata() []string { + if h.authMetadata == nil { + return h.fields.Default + } + return h.authMetadata +} + +// ParseAuthMetadata is intended to be used on config create/update. +// It takes a user's selected fields (or lack thereof), +// converts it to a list of explicit fields, and adds it to the Handler +// for later storage. +func (h *Handler) ParseAuthMetadata(data *framework.FieldData) error { + userProvidedRaw, ok := data.GetOk(h.fields.FieldName) + if !ok { + // Nothing further to do here. + return nil + } + userProvided, ok := userProvidedRaw.([]string) + if !ok { + return fmt.Errorf("%s is an unexpected type of %T", userProvidedRaw, userProvidedRaw) + } + userProvided = strutil.RemoveDuplicates(userProvided, true) + + // If the only field the user has chosen was the default field, + // we don't store anything so we won't have to do a storage + // migration if the default changes. + if len(userProvided) == 1 && userProvided[0] == "default" { + h.authMetadata = nil + return nil + } + + // Validate and store the input. + if strutil.StrListContains(userProvided, "default") { + return fmt.Errorf("%q contains default - default can't be used in combination with other fields", + userProvided) + } + if !strutil.StrListSubset(h.fields.all(), userProvided) { + return fmt.Errorf("%q contains an unavailable field, please select from %q", + strings.Join(userProvided, ", "), strings.Join(h.fields.all(), ", ")) + } + h.authMetadata = userProvided + return nil +} + +// PopulateDesiredMetadata is intended to be used during login +// just before returning an auth. +// It takes the available auth metadata and, +// if the auth should have it, adds it to the auth's metadata. +func (h *Handler) PopulateDesiredMetadata(auth *logical.Auth, available map[string]string) error { + if auth == nil { + return errors.New("auth is nil") + } + if auth.Metadata == nil { + auth.Metadata = make(map[string]string) + } + if auth.Alias == nil { + auth.Alias = &logical.Alias{} + } + if auth.Alias.Metadata == nil { + auth.Alias.Metadata = make(map[string]string) + } + fieldsToInclude := h.fields.Default + if h.authMetadata != nil { + fieldsToInclude = h.authMetadata + } + for availableField, itsValue := range available { + if itsValue == "" { + // Don't bother setting fields for which there is no value. + continue + } + if strutil.StrListContains(fieldsToInclude, availableField) { + auth.Metadata[availableField] = itsValue + auth.Alias.Metadata[availableField] = itsValue + } + } + return nil +} + +func (h *Handler) MarshalJSON() ([]byte, error) { + return json.Marshal(&struct { + AuthMetadata []string `json:"auth_metadata"` + }{ + AuthMetadata: h.authMetadata, + }) +} + +func (h *Handler) UnmarshalJSON(data []byte) error { + jsonable := &struct { + AuthMetadata []string `json:"auth_metadata"` + }{ + AuthMetadata: h.authMetadata, + } + if err := json.Unmarshal(data, jsonable); err != nil { + return err + } + h.authMetadata = jsonable.AuthMetadata + return nil +} + +func description(fields *Fields) string { + desc := "The metadata to include on the aliases and audit logs generated by this plugin." + if len(fields.Default) > 0 { + desc += fmt.Sprintf(" When set to 'default', includes: %s.", strings.Join(fields.Default, ", ")) + } + if len(fields.AvailableToAdd) > 0 { + desc += fmt.Sprintf(" These fields are available to add: %s.", strings.Join(fields.AvailableToAdd, ", ")) + } + desc += " Not editing this field means the 'default' fields are included." + + " Explicitly setting this field to empty overrides the 'default' and means no metadata will be included." + + " If not using 'default', explicit fields must be sent like: 'field1,field2'." + return desc +} diff --git a/sdk/helper/authmetadata/auth_metadata_acc_test.go b/sdk/helper/authmetadata/auth_metadata_acc_test.go new file mode 100644 index 0000000..189c960 --- /dev/null +++ b/sdk/helper/authmetadata/auth_metadata_acc_test.go @@ -0,0 +1,480 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package authmetadata + +import ( + "context" + "fmt" + "reflect" + "testing" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +type environment struct { + ctx context.Context + storage logical.Storage + backend logical.Backend +} + +func TestAcceptance(t *testing.T) { + ctx := context.Background() + storage := &logical.InmemStorage{} + b, err := backend(ctx, storage) + if err != nil { + t.Fatal(err) + } + env := &environment{ + ctx: ctx, + storage: storage, + backend: b, + } + t.Run("test initial fields are default", env.TestInitialFieldsAreDefault) + t.Run("test fields can be unset", env.TestAuthMetadataCanBeUnset) + t.Run("test defaults can be restored", env.TestDefaultCanBeReused) + t.Run("test default plus more cannot be selected", env.TestDefaultPlusMoreCannotBeSelected) + t.Run("test only non-defaults can be selected", env.TestOnlyNonDefaultsCanBeSelected) + t.Run("test bad field results in useful error", env.TestAddingBadField) +} + +func (e *environment) TestInitialFieldsAreDefault(t *testing.T) { + // On the first read of auth_metadata, when nothing has been touched, + // we should receive the default field(s) if a read is performed. + resp, err := e.backend.HandleRequest(e.ctx, &logical.Request{ + Operation: logical.ReadOperation, + Path: "config", + Storage: e.storage, + Connection: &logical.Connection{ + RemoteAddr: "http://foo.com", + }, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.Data == nil { + t.Fatal("expected non-nil response") + } + if !reflect.DeepEqual(resp.Data[authMetadataFields.FieldName], []string{"role_name"}) { + t.Fatal("expected default field of role_name to be returned") + } + + // The auth should only have the default metadata. + resp, err = e.backend.HandleRequest(e.ctx, &logical.Request{ + Operation: logical.UpdateOperation, + Path: "login", + Storage: e.storage, + Connection: &logical.Connection{ + RemoteAddr: "http://foo.com", + }, + Data: map[string]interface{}{ + "role_name": "something", + }, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.Auth == nil || resp.Auth.Alias == nil || resp.Auth.Alias.Metadata == nil { + t.Fatalf("expected alias metadata") + } + if len(resp.Auth.Alias.Metadata) != 1 { + t.Fatal("expected only 1 field") + } + if resp.Auth.Alias.Metadata["role_name"] != "something" { + t.Fatal("expected role_name to be something") + } +} + +func (e *environment) TestAuthMetadataCanBeUnset(t *testing.T) { + // We should be able to set the auth_metadata to empty by sending an + // explicitly empty array. + resp, err := e.backend.HandleRequest(e.ctx, &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config", + Storage: e.storage, + Connection: &logical.Connection{ + RemoteAddr: "http://foo.com", + }, + Data: map[string]interface{}{ + authMetadataFields.FieldName: []string{}, + }, + }) + if err != nil { + t.Fatal(err) + } + if resp != nil { + t.Fatal("expected nil response") + } + + // Now we should receive no fields for auth_metadata. + resp, err = e.backend.HandleRequest(e.ctx, &logical.Request{ + Operation: logical.ReadOperation, + Path: "config", + Storage: e.storage, + Connection: &logical.Connection{ + RemoteAddr: "http://foo.com", + }, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.Data == nil { + t.Fatal("expected non-nil response") + } + if !reflect.DeepEqual(resp.Data[authMetadataFields.FieldName], []string{}) { + t.Fatal("expected no fields to be returned") + } + + // The auth should have no metadata. + resp, err = e.backend.HandleRequest(e.ctx, &logical.Request{ + Operation: logical.UpdateOperation, + Path: "login", + Storage: e.storage, + Connection: &logical.Connection{ + RemoteAddr: "http://foo.com", + }, + Data: map[string]interface{}{ + "role_name": "something", + }, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.Auth == nil || resp.Auth.Alias == nil || resp.Auth.Alias.Metadata == nil { + t.Fatal("expected alias metadata") + } + if len(resp.Auth.Alias.Metadata) != 0 { + t.Fatal("expected 0 fields") + } +} + +func (e *environment) TestDefaultCanBeReused(t *testing.T) { + // Now if we set it to "default", the default fields should + // be restored. + resp, err := e.backend.HandleRequest(e.ctx, &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config", + Storage: e.storage, + Connection: &logical.Connection{ + RemoteAddr: "http://foo.com", + }, + Data: map[string]interface{}{ + authMetadataFields.FieldName: []string{"default"}, + }, + }) + if err != nil { + t.Fatal(err) + } + if resp != nil { + t.Fatal("expected nil response") + } + + // Let's make sure we've returned to the default fields. + resp, err = e.backend.HandleRequest(e.ctx, &logical.Request{ + Operation: logical.ReadOperation, + Path: "config", + Storage: e.storage, + Connection: &logical.Connection{ + RemoteAddr: "http://foo.com", + }, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.Data == nil { + t.Fatal("expected non-nil response") + } + if !reflect.DeepEqual(resp.Data[authMetadataFields.FieldName], []string{"role_name"}) { + t.Fatal("expected default field of role_name to be returned") + } + + // We should again only receive the default field on the login. + resp, err = e.backend.HandleRequest(e.ctx, &logical.Request{ + Operation: logical.UpdateOperation, + Path: "login", + Storage: e.storage, + Connection: &logical.Connection{ + RemoteAddr: "http://foo.com", + }, + Data: map[string]interface{}{ + "role_name": "something", + }, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.Auth == nil || resp.Auth.Alias == nil || resp.Auth.Alias.Metadata == nil { + t.Fatal("expected alias metadata") + } + if len(resp.Auth.Alias.Metadata) != 1 { + t.Fatal("expected only 1 field") + } + if resp.Auth.Alias.Metadata["role_name"] != "something" { + t.Fatal("expected role_name to be something") + } +} + +func (e *environment) TestDefaultPlusMoreCannotBeSelected(t *testing.T) { + // We should not be able to set it to "default" plus 1 optional field. + _, err := e.backend.HandleRequest(e.ctx, &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config", + Storage: e.storage, + Connection: &logical.Connection{ + RemoteAddr: "http://foo.com", + }, + Data: map[string]interface{}{ + authMetadataFields.FieldName: []string{"default", "remote_addr"}, + }, + }) + if err == nil { + t.Fatal("expected err") + } +} + +func (e *environment) TestOnlyNonDefaultsCanBeSelected(t *testing.T) { + // Omit all default fields and just select one. + resp, err := e.backend.HandleRequest(e.ctx, &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config", + Storage: e.storage, + Connection: &logical.Connection{ + RemoteAddr: "http://foo.com", + }, + Data: map[string]interface{}{ + authMetadataFields.FieldName: []string{"remote_addr"}, + }, + }) + if err != nil { + t.Fatal(err) + } + if resp != nil { + t.Fatal("expected nil response") + } + + // Make sure that worked. + resp, err = e.backend.HandleRequest(e.ctx, &logical.Request{ + Operation: logical.ReadOperation, + Path: "config", + Storage: e.storage, + Connection: &logical.Connection{ + RemoteAddr: "http://foo.com", + }, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.Data == nil { + t.Fatal("expected non-nil response") + } + if !reflect.DeepEqual(resp.Data[authMetadataFields.FieldName], []string{"remote_addr"}) { + t.Fatal("expected remote_addr to be returned") + } + + // Ensure only the selected one is on logins. + // They both should now appear on the login. + resp, err = e.backend.HandleRequest(e.ctx, &logical.Request{ + Operation: logical.UpdateOperation, + Path: "login", + Storage: e.storage, + Connection: &logical.Connection{ + RemoteAddr: "http://foo.com", + }, + Data: map[string]interface{}{ + "role_name": "something", + }, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil || resp.Auth == nil || resp.Auth.Alias == nil || resp.Auth.Alias.Metadata == nil { + t.Fatal("expected alias metadata") + } + if len(resp.Auth.Alias.Metadata) != 1 { + t.Fatal("expected only 1 field") + } + if resp.Auth.Alias.Metadata["remote_addr"] != "http://foo.com" { + t.Fatal("expected remote_addr to be http://foo.com") + } +} + +func (e *environment) TestAddingBadField(t *testing.T) { + // Try adding an unsupported field. + resp, err := e.backend.HandleRequest(e.ctx, &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config", + Storage: e.storage, + Connection: &logical.Connection{ + RemoteAddr: "http://foo.com", + }, + Data: map[string]interface{}{ + authMetadataFields.FieldName: []string{"asl;dfkj"}, + }, + }) + if err == nil { + t.Fatal("expected err") + } + if resp == nil { + t.Fatal("expected non-nil response") + } + if !resp.IsError() { + t.Fatal("expected error response") + } +} + +// We expect people to embed the Handler on their +// config so it automatically makes its helper methods +// available and easy to find wherever the config is +// needed. Explicitly naming it in json avoids it +// automatically being named "Handler" by Go's JSON +// marshalling library. +type fakeConfig struct { + *Handler `json:"auth_metadata_handler"` +} + +type fakeBackend struct { + *framework.Backend +} + +// We expect each back-end to explicitly define the fields that +// will be included by default, and optionally available. +var authMetadataFields = &Fields{ + FieldName: "some_field_name", + Default: []string{ + "role_name", // This would likely never change because the alias is the role name. + }, + AvailableToAdd: []string{ + "remote_addr", // This would likely change with every new caller. + }, +} + +func configPath() *framework.Path { + return &framework.Path{ + Pattern: "config", + Fields: map[string]*framework.FieldSchema{ + authMetadataFields.FieldName: FieldSchema(authMetadataFields), + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: func(ctx context.Context, req *logical.Request, fd *framework.FieldData) (*logical.Response, error) { + entryRaw, err := req.Storage.Get(ctx, "config") + if err != nil { + return nil, err + } + conf := &fakeConfig{ + Handler: NewHandler(authMetadataFields), + } + if entryRaw != nil { + if err := entryRaw.DecodeJSON(conf); err != nil { + return nil, err + } + } + // Note that even if the config entry was nil, we return + // a populated response to give info on what the default + // auth metadata is when unconfigured. + return &logical.Response{ + Data: map[string]interface{}{ + authMetadataFields.FieldName: conf.AuthMetadata(), + }, + }, nil + }, + }, + logical.UpdateOperation: &framework.PathOperation{ + Callback: func(ctx context.Context, req *logical.Request, fd *framework.FieldData) (*logical.Response, error) { + entryRaw, err := req.Storage.Get(ctx, "config") + if err != nil { + return nil, err + } + conf := &fakeConfig{ + Handler: NewHandler(authMetadataFields), + } + if entryRaw != nil { + if err := entryRaw.DecodeJSON(conf); err != nil { + return nil, err + } + } + // This is where we read in the user's given auth metadata. + if err := conf.ParseAuthMetadata(fd); err != nil { + // Since this will only error on bad input, it's best to give + // a 400 response with the explicit problem included. + return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest + } + entry, err := logical.StorageEntryJSON("config", conf) + if err != nil { + return nil, err + } + if err = req.Storage.Put(ctx, entry); err != nil { + return nil, err + } + return nil, nil + }, + }, + }, + } +} + +func loginPath() *framework.Path { + return &framework.Path{ + Pattern: "login", + Fields: map[string]*framework.FieldSchema{ + "role_name": { + Type: framework.TypeString, + Required: true, + }, + }, + Operations: map[logical.Operation]framework.OperationHandler{ + logical.UpdateOperation: &framework.PathOperation{ + Callback: func(ctx context.Context, req *logical.Request, fd *framework.FieldData) (*logical.Response, error) { + entryRaw, err := req.Storage.Get(ctx, "config") + if err != nil { + return nil, err + } + conf := &fakeConfig{ + Handler: NewHandler(authMetadataFields), + } + if entryRaw != nil { + if err := entryRaw.DecodeJSON(conf); err != nil { + return nil, err + } + } + auth := &logical.Auth{ + Alias: &logical.Alias{ + Name: fd.Get("role_name").(string), + }, + } + // Here we provide everything and let the method strip out + // the undesired stuff. + if err := conf.PopulateDesiredMetadata(auth, map[string]string{ + "role_name": fd.Get("role_name").(string), + "remote_addr": req.Connection.RemoteAddr, + }); err != nil { + fmt.Println("unable to populate due to " + err.Error()) + } + return &logical.Response{ + Auth: auth, + }, nil + }, + }, + }, + } +} + +func backend(ctx context.Context, storage logical.Storage) (logical.Backend, error) { + b := &fakeBackend{ + Backend: &framework.Backend{ + Paths: []*framework.Path{ + configPath(), + loginPath(), + }, + }, + } + if err := b.Setup(ctx, &logical.BackendConfig{ + StorageView: storage, + Logger: hclog.Default(), + }); err != nil { + return nil, err + } + return b, nil +} diff --git a/sdk/helper/authmetadata/auth_metadata_test.go b/sdk/helper/authmetadata/auth_metadata_test.go new file mode 100644 index 0000000..a82044f --- /dev/null +++ b/sdk/helper/authmetadata/auth_metadata_test.go @@ -0,0 +1,130 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package authmetadata + +import ( + "fmt" + "reflect" + "sort" + "testing" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +var testFields = &Fields{ + FieldName: "some-field-name", + Default: []string{"fizz", "buzz"}, + AvailableToAdd: []string{"foo", "bar"}, +} + +func TestFieldSchema(t *testing.T) { + schema := FieldSchema(testFields) + if schema.Type != framework.TypeCommaStringSlice { + t.Fatal("expected TypeCommaStringSlice") + } + if schema.Description != `The metadata to include on the aliases and audit logs generated by this plugin. When set to 'default', includes: fizz, buzz. These fields are available to add: foo, bar. Not editing this field means the 'default' fields are included. Explicitly setting this field to empty overrides the 'default' and means no metadata will be included. If not using 'default', explicit fields must be sent like: 'field1,field2'.` { + t.Fatal("received unexpected description: " + schema.Description) + } + if schema.DisplayAttrs == nil { + t.Fatal("expected display attributes") + } + if schema.DisplayAttrs.Name != testFields.FieldName { + t.Fatalf("expected name of %s", testFields.FieldName) + } + if schema.DisplayAttrs.Value != "field1,field2" { + t.Fatal("expected field1,field2") + } + if !reflect.DeepEqual(schema.Default, []string{"default"}) { + t.Fatal("expected default") + } +} + +func TestGetAuthMetadata(t *testing.T) { + h := NewHandler(testFields) + expected := []string{"fizz", "buzz"} + sort.Strings(expected) + actual := h.AuthMetadata() + sort.Strings(actual) + if !reflect.DeepEqual(expected, actual) { + t.Fatalf("expected %s but received %s", expected, actual) + } +} + +func TestParseAuthMetadata(t *testing.T) { + h := NewHandler(testFields) + data := &framework.FieldData{ + Raw: map[string]interface{}{ + testFields.FieldName: []string{"default"}, + }, + Schema: map[string]*framework.FieldSchema{ + testFields.FieldName: FieldSchema(testFields), + }, + } + if err := h.ParseAuthMetadata(data); err != nil { + t.Fatal(err) + } + expected := []string{"fizz", "buzz"} + sort.Strings(expected) + actual := h.AuthMetadata() + sort.Strings(actual) + if !reflect.DeepEqual(expected, actual) { + t.Fatalf("expected %s but received %s", expected, actual) + } +} + +func TestPopulateDesiredAuthMetadata(t *testing.T) { + h := NewHandler(testFields) + data := &framework.FieldData{ + Raw: map[string]interface{}{ + testFields.FieldName: []string{"foo"}, + }, + Schema: map[string]*framework.FieldSchema{ + testFields.FieldName: FieldSchema(testFields), + }, + } + if err := h.ParseAuthMetadata(data); err != nil { + t.Fatal(err) + } + auth := &logical.Auth{ + Alias: &logical.Alias{ + Name: "foo", + }, + } + if err := h.PopulateDesiredMetadata(auth, map[string]string{ + "fizz": "fizzval", + "buzz": "buzzval", + "foo": "fooval", + }); err != nil { + t.Fatal(err) + } + if len(auth.Alias.Metadata) != 1 { + t.Fatal("expected only 1 configured field to be populated") + } + if auth.Alias.Metadata["foo"] != "fooval" { + t.Fatal("expected foova;") + } +} + +func TestMarshalJSON(t *testing.T) { + h := NewHandler(&Fields{}) + h.authMetadata = []string{"fizz", "buzz"} + b, err := h.MarshalJSON() + if err != nil { + t.Fatal(err) + } + if string(b) != `{"auth_metadata":["fizz","buzz"]}` { + t.Fatal(`expected {"auth_metadata":["fizz","buzz"]}`) + } +} + +func TestUnmarshalJSON(t *testing.T) { + h := NewHandler(&Fields{}) + if err := h.UnmarshalJSON([]byte(`{"auth_metadata":["fizz","buzz"]}`)); err != nil { + t.Fatal(err) + } + if fmt.Sprintf("%s", h.authMetadata) != `[fizz buzz]` { + t.Fatal(`expected [fizz buzz]`) + } +} diff --git a/sdk/helper/base62/base62.go b/sdk/helper/base62/base62.go new file mode 100644 index 0000000..7d2c7d5 --- /dev/null +++ b/sdk/helper/base62/base62.go @@ -0,0 +1,19 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// DEPRECATED: this has been moved to go-secure-stdlib and will be removed +package base62 + +import ( + "io" + + extbase62 "github.com/hashicorp/go-secure-stdlib/base62" +) + +func Random(length int) (string, error) { + return extbase62.Random(length) +} + +func RandomWithReader(length int, reader io.Reader) (string, error) { + return extbase62.RandomWithReader(length, reader) +} diff --git a/sdk/helper/certutil/certutil_test.go b/sdk/helper/certutil/certutil_test.go new file mode 100644 index 0000000..8b55094 --- /dev/null +++ b/sdk/helper/certutil/certutil_test.go @@ -0,0 +1,1047 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package certutil + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/json" + "encoding/pem" + "fmt" + "math/big" + mathrand "math/rand" + "reflect" + "strings" + "sync" + "testing" + "time" + + "github.com/fatih/structs" +) + +// Tests converting back and forth between a CertBundle and a ParsedCertBundle. +// +// Also tests the GetSubjKeyID, GetHexFormatted, ParseHexFormatted and +// ParsedCertBundle.getSigner functions. +func TestCertBundleConversion(t *testing.T) { + cbuts := []*CertBundle{ + refreshRSACertBundle(), + refreshRSACertBundleWithChain(), + refreshRSA8CertBundle(), + refreshRSA8CertBundleWithChain(), + refreshECCertBundle(), + refreshECCertBundleWithChain(), + refreshEC8CertBundle(), + refreshEC8CertBundleWithChain(), + refreshEd255198CertBundle(), + refreshEd255198CertBundleWithChain(), + } + + for i, cbut := range cbuts { + pcbut, err := cbut.ToParsedCertBundle() + if err != nil { + t.Logf("Error occurred with bundle %d in test array (index %d).\n", i+1, i) + t.Errorf("Error converting to parsed cert bundle: %s", err) + continue + } + + err = compareCertBundleToParsedCertBundle(cbut, pcbut) + if err != nil { + t.Logf("Error occurred with bundle %d in test array (index %d).\n", i+1, i) + t.Errorf(err.Error()) + } + + cbut, err := pcbut.ToCertBundle() + if err != nil { + t.Fatalf("Error converting to cert bundle: %s", err) + } + + err = compareCertBundleToParsedCertBundle(cbut, pcbut) + if err != nil { + t.Fatalf(err.Error()) + } + } +} + +func BenchmarkCertBundleParsing(b *testing.B) { + for i := 0; i < b.N; i++ { + cbuts := []*CertBundle{ + refreshRSACertBundle(), + refreshRSACertBundleWithChain(), + refreshRSA8CertBundle(), + refreshRSA8CertBundleWithChain(), + refreshECCertBundle(), + refreshECCertBundleWithChain(), + refreshEC8CertBundle(), + refreshEC8CertBundleWithChain(), + refreshEd255198CertBundle(), + refreshEd255198CertBundleWithChain(), + } + + for i, cbut := range cbuts { + pcbut, err := cbut.ToParsedCertBundle() + if err != nil { + b.Logf("Error occurred with bundle %d in test array (index %d).\n", i+1, i) + b.Errorf("Error converting to parsed cert bundle: %s", err) + continue + } + + cbut, err = pcbut.ToCertBundle() + if err != nil { + b.Fatalf("Error converting to cert bundle: %s", err) + } + } + } +} + +func TestCertBundleParsing(t *testing.T) { + cbuts := []*CertBundle{ + refreshRSACertBundle(), + refreshRSACertBundleWithChain(), + refreshRSA8CertBundle(), + refreshRSA8CertBundleWithChain(), + refreshECCertBundle(), + refreshECCertBundleWithChain(), + refreshEC8CertBundle(), + refreshEC8CertBundleWithChain(), + refreshEd255198CertBundle(), + refreshEd255198CertBundleWithChain(), + } + + for i, cbut := range cbuts { + jsonString, err := json.Marshal(cbut) + if err != nil { + t.Logf("Error occurred with bundle %d in test array (index %d).\n", i+1, i) + t.Fatalf("Error marshaling testing certbundle to JSON: %s", err) + } + pcbut, err := ParsePKIJSON(jsonString) + if err != nil { + t.Logf("Error occurred with bundle %d in test array (index %d).\n", i+1, i) + t.Fatalf("Error during JSON bundle handling: %s", err) + } + err = compareCertBundleToParsedCertBundle(cbut, pcbut) + if err != nil { + t.Logf("Error occurred with bundle %d in test array (index %d).\n", i+1, i) + t.Fatalf(err.Error()) + } + + dataMap := structs.New(cbut).Map() + pcbut, err = ParsePKIMap(dataMap) + if err != nil { + t.Logf("Error occurred with bundle %d in test array (index %d).\n", i+1, i) + t.Fatalf("Error during JSON bundle handling: %s", err) + } + err = compareCertBundleToParsedCertBundle(cbut, pcbut) + if err != nil { + t.Logf("Error occurred with bundle %d in test array (index %d).\n", i+1, i) + t.Fatalf(err.Error()) + } + + pcbut, err = ParsePEMBundle(cbut.ToPEMBundle()) + if err != nil { + t.Logf("Error occurred with bundle %d in test array (index %d).\n", i+1, i) + t.Fatalf("Error during JSON bundle handling: %s", err) + } + err = compareCertBundleToParsedCertBundle(cbut, pcbut) + if err != nil { + t.Logf("Error occurred with bundle %d in test array (index %d).\n", i+1, i) + t.Fatalf(err.Error()) + } + } +} + +func compareCertBundleToParsedCertBundle(cbut *CertBundle, pcbut *ParsedCertBundle) error { + if cbut == nil { + return fmt.Errorf("got nil bundle") + } + if pcbut == nil { + return fmt.Errorf("got nil parsed bundle") + } + + switch { + case pcbut.Certificate == nil: + return fmt.Errorf("parsed bundle has nil certificate") + case pcbut.PrivateKey == nil: + return fmt.Errorf("parsed bundle has nil private key") + } + + switch cbut.PrivateKey { + case privRSAKeyPem: + if pcbut.PrivateKeyType != RSAPrivateKey { + return fmt.Errorf("parsed bundle has wrong private key type: %v, should be 'rsa' (%v)", pcbut.PrivateKeyType, RSAPrivateKey) + } + case privRSA8KeyPem: + if pcbut.PrivateKeyType != RSAPrivateKey { + return fmt.Errorf("parsed bundle has wrong pkcs8 private key type: %v, should be 'rsa' (%v)", pcbut.PrivateKeyType, RSAPrivateKey) + } + case privECKeyPem: + if pcbut.PrivateKeyType != ECPrivateKey { + return fmt.Errorf("parsed bundle has wrong private key type: %v, should be 'ec' (%v)", pcbut.PrivateKeyType, ECPrivateKey) + } + case privEC8KeyPem: + if pcbut.PrivateKeyType != ECPrivateKey { + return fmt.Errorf("parsed bundle has wrong pkcs8 private key type: %v, should be 'ec' (%v)", pcbut.PrivateKeyType, ECPrivateKey) + } + case privEd255198KeyPem: + if pcbut.PrivateKeyType != Ed25519PrivateKey { + return fmt.Errorf("parsed bundle has wrong pkcs8 private key type: %v, should be 'ed25519' (%v)", pcbut.PrivateKeyType, ECPrivateKey) + } + default: + return fmt.Errorf("parsed bundle has unknown private key type") + } + + subjKeyID, err := GetSubjKeyID(pcbut.PrivateKey) + if err != nil { + return fmt.Errorf("error when getting subject key id: %s", err) + } + if bytes.Compare(subjKeyID, pcbut.Certificate.SubjectKeyId) != 0 { + return fmt.Errorf("parsed bundle private key does not match subject key id\nGot\n%#v\nExpected\n%#v\nCert\n%#v", subjKeyID, pcbut.Certificate.SubjectKeyId, *pcbut.Certificate) + } + + switch { + case len(pcbut.CAChain) > 0 && len(cbut.CAChain) == 0: + return fmt.Errorf("parsed bundle ca chain has certs when cert bundle does not") + case len(pcbut.CAChain) == 0 && len(cbut.CAChain) > 0: + return fmt.Errorf("cert bundle ca chain has certs when parsed cert bundle does not") + } + + cb, err := pcbut.ToCertBundle() + if err != nil { + return fmt.Errorf("thrown error during parsed bundle conversion: %s\n\nInput was: %#v", err, *pcbut) + } + + switch { + case len(cb.Certificate) == 0: + return fmt.Errorf("bundle has nil certificate") + case len(cb.PrivateKey) == 0: + return fmt.Errorf("bundle has nil private key") + case len(cb.CAChain[0]) == 0: + return fmt.Errorf("bundle has nil issuing CA") + } + + switch pcbut.PrivateKeyType { + case RSAPrivateKey: + if cb.PrivateKey != privRSAKeyPem && cb.PrivateKey != privRSA8KeyPem { + return fmt.Errorf("bundle private key does not match") + } + case ECPrivateKey: + if cb.PrivateKey != privECKeyPem && cb.PrivateKey != privEC8KeyPem { + return fmt.Errorf("bundle private key does not match") + } + case Ed25519PrivateKey: + if cb.PrivateKey != privEd255198KeyPem { + return fmt.Errorf("bundle private key does not match") + } + default: + return fmt.Errorf("certBundle has unknown private key type") + } + + if cb.SerialNumber != GetHexFormatted(pcbut.Certificate.SerialNumber.Bytes(), ":") { + return fmt.Errorf("bundle serial number does not match") + } + + if !bytes.Equal(pcbut.Certificate.SerialNumber.Bytes(), ParseHexFormatted(cb.SerialNumber, ":")) { + return fmt.Errorf("failed re-parsing hex formatted number %s", cb.SerialNumber) + } + + switch { + case len(pcbut.CAChain) > 0 && len(cb.CAChain) == 0: + return fmt.Errorf("parsed bundle ca chain has certs when cert bundle does not") + case len(pcbut.CAChain) == 0 && len(cb.CAChain) > 0: + return fmt.Errorf("cert bundle ca chain has certs when parsed cert bundle does not") + case !reflect.DeepEqual(cbut.CAChain, cb.CAChain): + return fmt.Errorf("cert bundle ca chain does not match: %#v\n\n%#v", cbut.CAChain, cb.CAChain) + } + + return nil +} + +func TestCSRBundleConversion(t *testing.T) { + csrbuts := []*CSRBundle{ + refreshRSACSRBundle(), + refreshECCSRBundle(), + refreshEd25519CSRBundle(), + } + + for _, csrbut := range csrbuts { + pcsrbut, err := csrbut.ToParsedCSRBundle() + if err != nil { + t.Fatalf("Error converting to parsed CSR bundle: %v", err) + } + + err = compareCSRBundleToParsedCSRBundle(csrbut, pcsrbut) + if err != nil { + t.Fatalf(err.Error()) + } + + csrbut, err = pcsrbut.ToCSRBundle() + if err != nil { + t.Fatalf("Error converting to CSR bundle: %v", err) + } + + err = compareCSRBundleToParsedCSRBundle(csrbut, pcsrbut) + if err != nil { + t.Fatalf(err.Error()) + } + } +} + +func compareCSRBundleToParsedCSRBundle(csrbut *CSRBundle, pcsrbut *ParsedCSRBundle) error { + if csrbut == nil { + return fmt.Errorf("got nil bundle") + } + if pcsrbut == nil { + return fmt.Errorf("got nil parsed bundle") + } + + switch { + case pcsrbut.CSR == nil: + return fmt.Errorf("parsed bundle has nil csr") + case pcsrbut.PrivateKey == nil: + return fmt.Errorf("parsed bundle has nil private key") + } + + switch csrbut.PrivateKey { + case privRSAKeyPem: + if pcsrbut.PrivateKeyType != RSAPrivateKey { + return fmt.Errorf("parsed bundle has wrong private key type") + } + case privECKeyPem: + if pcsrbut.PrivateKeyType != ECPrivateKey { + return fmt.Errorf("parsed bundle has wrong private key type") + } + case privEd255198KeyPem: + if pcsrbut.PrivateKeyType != Ed25519PrivateKey { + return fmt.Errorf("parsed bundle has wrong private key type") + } + default: + return fmt.Errorf("parsed bundle has unknown private key type") + } + + csrb, err := pcsrbut.ToCSRBundle() + if err != nil { + return fmt.Errorf("Thrown error during parsed bundle conversion: %s\n\nInput was: %#v", err, *pcsrbut) + } + + switch { + case len(csrb.CSR) == 0: + return fmt.Errorf("bundle has nil certificate") + case len(csrb.PrivateKey) == 0: + return fmt.Errorf("bundle has nil private key") + } + + switch csrb.PrivateKeyType { + case "rsa": + if pcsrbut.PrivateKeyType != RSAPrivateKey { + return fmt.Errorf("bundle has wrong private key type") + } + if csrb.PrivateKey != privRSAKeyPem { + return fmt.Errorf("bundle rsa private key does not match\nGot\n%#v\nExpected\n%#v", csrb.PrivateKey, privRSAKeyPem) + } + case "ec": + if pcsrbut.PrivateKeyType != ECPrivateKey { + return fmt.Errorf("bundle has wrong private key type") + } + if csrb.PrivateKey != privECKeyPem { + return fmt.Errorf("bundle ec private key does not match") + } + case "ed25519": + if pcsrbut.PrivateKeyType != Ed25519PrivateKey { + return fmt.Errorf("bundle has wrong private key type") + } + if csrb.PrivateKey != privEd255198KeyPem { + return fmt.Errorf("bundle ed25519 private key does not match") + } + default: + return fmt.Errorf("bundle has unknown private key type") + } + + return nil +} + +func TestTLSConfig(t *testing.T) { + cbut := refreshRSACertBundle() + + pcbut, err := cbut.ToParsedCertBundle() + if err != nil { + t.Fatalf("Error getting parsed cert bundle: %s", err) + } + + usages := []TLSUsage{ + TLSUnknown, + TLSClient, + TLSServer, + TLSClient | TLSServer, + } + + for _, usage := range usages { + tlsConfig, err := pcbut.GetTLSConfig(usage) + if err != nil { + t.Fatalf("Error getting tls config: %s", err) + } + if tlsConfig == nil { + t.Fatalf("Got nil tls.Config") + } + + if len(tlsConfig.Certificates) != 1 { + t.Fatalf("Unexpected length in config.Certificates") + } + + // Length should be 2, since we passed in a CA + if len(tlsConfig.Certificates[0].Certificate) != 2 { + t.Fatalf("Did not find both certificates in config.Certificates.Certificate") + } + + if tlsConfig.Certificates[0].Leaf != pcbut.Certificate { + t.Fatalf("Leaf certificate does not match parsed bundle's certificate") + } + + if tlsConfig.Certificates[0].PrivateKey != pcbut.PrivateKey { + t.Fatalf("Config's private key does not match parsed bundle's private key") + } + + switch usage { + case TLSServer | TLSClient: + if len(tlsConfig.ClientCAs.Subjects()) != 1 || bytes.Compare(tlsConfig.ClientCAs.Subjects()[0], pcbut.CAChain[0].Certificate.RawSubject) != 0 { + t.Fatalf("CA certificate not in client cert pool as expected") + } + if len(tlsConfig.RootCAs.Subjects()) != 1 || bytes.Compare(tlsConfig.RootCAs.Subjects()[0], pcbut.CAChain[0].Certificate.RawSubject) != 0 { + t.Fatalf("CA certificate not in root cert pool as expected") + } + case TLSServer: + if len(tlsConfig.ClientCAs.Subjects()) != 1 || bytes.Compare(tlsConfig.ClientCAs.Subjects()[0], pcbut.CAChain[0].Certificate.RawSubject) != 0 { + t.Fatalf("CA certificate not in client cert pool as expected") + } + if tlsConfig.RootCAs != nil { + t.Fatalf("Found root pools in config object when not expected") + } + case TLSClient: + if len(tlsConfig.RootCAs.Subjects()) != 1 || bytes.Compare(tlsConfig.RootCAs.Subjects()[0], pcbut.CAChain[0].Certificate.RawSubject) != 0 { + t.Fatalf("CA certificate not in root cert pool as expected") + } + if tlsConfig.ClientCAs != nil { + t.Fatalf("Found root pools in config object when not expected") + } + default: + if tlsConfig.RootCAs != nil || tlsConfig.ClientCAs != nil { + t.Fatalf("Found root pools in config object when not expected") + } + } + } +} + +func TestNewCertPool(t *testing.T) { + caExample := `-----BEGIN CERTIFICATE----- +MIIC5zCCAc+gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p +a3ViZUNBMB4XDTE5MTIxMDIzMDUxOVoXDTI5MTIwODIzMDUxOVowFTETMBEGA1UE +AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANFi +/RIdMHd865X6JygTb9riX01DA3QnR+RoXDXNnj8D3LziLG2n8ItXMJvWbU3sxxyy +nX9HxJ0SIeexj1cYzdQBtJDjO1/PeuKc4CZ7zCukCAtHz8mC7BDPOU7F7pggpcQ0 +/t/pa2m22hmCu8aDF9WlUYHtJpYATnI/A5vz/VFLR9daxmkl59Qo3oHITj7vAzSx +/75r9cibpQyJ+FhiHOZHQWYY2JYw2g4v5hm5hg5SFM9yFcZ75ISI9ebyFFIl9iBY +zAk9jqv1mXvLr0Q39AVwMTamvGuap1oocjM9NIhQvaFL/DNqF1ouDQjCf5u2imLc +TraO1/2KO8fqwOZCOrMCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW +MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3 +DQEBCwUAA4IBAQBtVZCwCPqUUUpIClAlE9nc2fo2bTs9gsjXRmqdQ5oaSomSLE93 +aJWYFuAhxPXtlApbLYZfW2m1sM3mTVQN60y0uE4e1jdSN1ErYQ9slJdYDAMaEmOh +iSexj+Nd1scUiMHV9lf3ps5J8sYeCpwZX3sPmw7lqZojTS12pANBDcigsaj5RRyN +9GyP3WkSQUsTpWlDb9Fd+KNdkCVw7nClIpBPA2KW4BQKw/rNSvOFD61mbzc89lo0 +Q9IFGQFFF8jO18lbyWqnRBGXcS4/G7jQ3S7C121d14YLUeAYOM7pJykI1g4CLx9y +vitin0L6nprauWkKO38XgM4T75qKZpqtiOcT +-----END CERTIFICATE----- +` + if _, err := NewCertPool(bytes.NewReader([]byte(caExample))); err != nil { + t.Fatal(err) + } +} + +func TestGetPublicKeySize(t *testing.T) { + rsa, err := rsa.GenerateKey(rand.Reader, 3072) + if err != nil { + t.Fatal(err) + } + if GetPublicKeySize(&rsa.PublicKey) != 3072 { + t.Fatal("unexpected rsa key size") + } + ecdsa, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader) + if err != nil { + t.Fatal(err) + } + if GetPublicKeySize(&ecdsa.PublicKey) != 384 { + t.Fatal("unexpected ecdsa key size") + } + ed25519, _, err := ed25519.GenerateKey(rand.Reader) + if err != nil { + t.Fatal(err) + } + if GetPublicKeySize(ed25519) != 256 { + t.Fatal("unexpected ed25519 key size") + } + // Skipping DSA as too slow +} + +func refreshRSA8CertBundle() *CertBundle { + initTest.Do(setCerts) + return &CertBundle{ + Certificate: certRSAPem, + PrivateKey: privRSA8KeyPem, + CAChain: []string{issuingCaChainPem[0]}, + } +} + +func refreshRSA8CertBundleWithChain() *CertBundle { + initTest.Do(setCerts) + ret := refreshRSA8CertBundle() + ret.CAChain = issuingCaChainPem + return ret +} + +func refreshRSACertBundle() *CertBundle { + initTest.Do(setCerts) + return &CertBundle{ + Certificate: certRSAPem, + CAChain: []string{issuingCaChainPem[0]}, + PrivateKey: privRSAKeyPem, + } +} + +func refreshRSACertBundleWithChain() *CertBundle { + initTest.Do(setCerts) + ret := refreshRSACertBundle() + ret.CAChain = issuingCaChainPem + return ret +} + +func refreshECCertBundle() *CertBundle { + initTest.Do(setCerts) + return &CertBundle{ + Certificate: certECPem, + CAChain: []string{issuingCaChainPem[0]}, + PrivateKey: privECKeyPem, + } +} + +func refreshECCertBundleWithChain() *CertBundle { + initTest.Do(setCerts) + ret := refreshECCertBundle() + ret.CAChain = issuingCaChainPem + return ret +} + +func refreshEd255198CertBundle() *CertBundle { + initTest.Do(setCerts) + return &CertBundle{ + Certificate: certEd25519Pem, + PrivateKey: privEd255198KeyPem, + CAChain: []string{issuingCaChainPem[0]}, + } +} + +func refreshEd255198CertBundleWithChain() *CertBundle { + initTest.Do(setCerts) + ret := refreshEd255198CertBundle() + ret.CAChain = issuingCaChainPem + return ret +} + +func refreshEd25519CSRBundle() *CSRBundle { + initTest.Do(setCerts) + return &CSRBundle{ + CSR: csrEd25519Pem, + PrivateKey: privEd255198KeyPem, + } +} + +func refreshRSACSRBundle() *CSRBundle { + initTest.Do(setCerts) + return &CSRBundle{ + CSR: csrRSAPem, + PrivateKey: privRSAKeyPem, + } +} + +func refreshECCSRBundle() *CSRBundle { + initTest.Do(setCerts) + return &CSRBundle{ + CSR: csrECPem, + PrivateKey: privECKeyPem, + } +} + +func refreshEC8CertBundle() *CertBundle { + initTest.Do(setCerts) + return &CertBundle{ + Certificate: certECPem, + PrivateKey: privEC8KeyPem, + CAChain: []string{issuingCaChainPem[0]}, + } +} + +func refreshEC8CertBundleWithChain() *CertBundle { + initTest.Do(setCerts) + ret := refreshEC8CertBundle() + ret.CAChain = issuingCaChainPem + return ret +} + +func setCerts() { + caKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + panic(err) + } + subjKeyID, err := GetSubjKeyID(caKey) + if err != nil { + panic(err) + } + caCertTemplate := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "root.localhost", + }, + SubjectKeyId: subjKeyID, + DNSNames: []string{"root.localhost"}, + KeyUsage: x509.KeyUsage(x509.KeyUsageCertSign | x509.KeyUsageCRLSign), + SerialNumber: big.NewInt(mathrand.Int63()), + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(262980 * time.Hour), + BasicConstraintsValid: true, + IsCA: true, + } + caBytes, err := x509.CreateCertificate(rand.Reader, caCertTemplate, caCertTemplate, caKey.Public(), caKey) + if err != nil { + panic(err) + } + caCert, err := x509.ParseCertificate(caBytes) + if err != nil { + panic(err) + } + caCertPEMBlock := &pem.Block{ + Type: "CERTIFICATE", + Bytes: caBytes, + } + caCertPEM := strings.TrimSpace(string(pem.EncodeToMemory(caCertPEMBlock))) + + intKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + panic(err) + } + subjKeyID, err = GetSubjKeyID(intKey) + if err != nil { + panic(err) + } + intCertTemplate := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "int.localhost", + }, + SubjectKeyId: subjKeyID, + DNSNames: []string{"int.localhost"}, + KeyUsage: x509.KeyUsage(x509.KeyUsageCertSign | x509.KeyUsageCRLSign), + SerialNumber: big.NewInt(mathrand.Int63()), + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(262980 * time.Hour), + BasicConstraintsValid: true, + IsCA: true, + } + intBytes, err := x509.CreateCertificate(rand.Reader, intCertTemplate, caCert, intKey.Public(), caKey) + if err != nil { + panic(err) + } + intCert, err := x509.ParseCertificate(intBytes) + if err != nil { + panic(err) + } + intCertPEMBlock := &pem.Block{ + Type: "CERTIFICATE", + Bytes: intBytes, + } + intCertPEM := strings.TrimSpace(string(pem.EncodeToMemory(intCertPEMBlock))) + + // EC generation + { + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + panic(err) + } + subjKeyID, err := GetSubjKeyID(key) + if err != nil { + panic(err) + } + certTemplate := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "localhost", + }, + SubjectKeyId: subjKeyID, + DNSNames: []string{"localhost"}, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageServerAuth, + x509.ExtKeyUsageClientAuth, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement, + SerialNumber: big.NewInt(mathrand.Int63()), + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(262980 * time.Hour), + } + csrTemplate := &x509.CertificateRequest{ + Subject: pkix.Name{ + CommonName: "localhost", + }, + DNSNames: []string{"localhost"}, + } + csrBytes, err := x509.CreateCertificateRequest(rand.Reader, csrTemplate, key) + if err != nil { + panic(err) + } + csrPEMBlock := &pem.Block{ + Type: "CERTIFICATE REQUEST", + Bytes: csrBytes, + } + csrECPem = strings.TrimSpace(string(pem.EncodeToMemory(csrPEMBlock))) + certBytes, err := x509.CreateCertificate(rand.Reader, certTemplate, intCert, key.Public(), intKey) + if err != nil { + panic(err) + } + certPEMBlock := &pem.Block{ + Type: "CERTIFICATE", + Bytes: certBytes, + } + certECPem = strings.TrimSpace(string(pem.EncodeToMemory(certPEMBlock))) + marshaledKey, err := x509.MarshalECPrivateKey(key) + if err != nil { + panic(err) + } + keyPEMBlock := &pem.Block{ + Type: "EC PRIVATE KEY", + Bytes: marshaledKey, + } + privECKeyPem = strings.TrimSpace(string(pem.EncodeToMemory(keyPEMBlock))) + marshaledKey, err = x509.MarshalPKCS8PrivateKey(key) + if err != nil { + panic(err) + } + keyPEMBlock = &pem.Block{ + Type: "PRIVATE KEY", + Bytes: marshaledKey, + } + privEC8KeyPem = strings.TrimSpace(string(pem.EncodeToMemory(keyPEMBlock))) + } + + // RSA generation + { + key, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + panic(err) + } + subjKeyID, err := GetSubjKeyID(key) + if err != nil { + panic(err) + } + certTemplate := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "localhost", + }, + SubjectKeyId: subjKeyID, + DNSNames: []string{"localhost"}, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageServerAuth, + x509.ExtKeyUsageClientAuth, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement, + SerialNumber: big.NewInt(mathrand.Int63()), + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(262980 * time.Hour), + } + csrTemplate := &x509.CertificateRequest{ + Subject: pkix.Name{ + CommonName: "localhost", + }, + DNSNames: []string{"localhost"}, + } + csrBytes, err := x509.CreateCertificateRequest(rand.Reader, csrTemplate, key) + if err != nil { + panic(err) + } + csrPEMBlock := &pem.Block{ + Type: "CERTIFICATE REQUEST", + Bytes: csrBytes, + } + csrRSAPem = strings.TrimSpace(string(pem.EncodeToMemory(csrPEMBlock))) + certBytes, err := x509.CreateCertificate(rand.Reader, certTemplate, intCert, key.Public(), intKey) + if err != nil { + panic(err) + } + certPEMBlock := &pem.Block{ + Type: "CERTIFICATE", + Bytes: certBytes, + } + certRSAPem = strings.TrimSpace(string(pem.EncodeToMemory(certPEMBlock))) + marshaledKey := x509.MarshalPKCS1PrivateKey(key) + keyPEMBlock := &pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: marshaledKey, + } + privRSAKeyPem = strings.TrimSpace(string(pem.EncodeToMemory(keyPEMBlock))) + marshaledKey, err = x509.MarshalPKCS8PrivateKey(key) + if err != nil { + panic(err) + } + keyPEMBlock = &pem.Block{ + Type: "PRIVATE KEY", + Bytes: marshaledKey, + } + privRSA8KeyPem = strings.TrimSpace(string(pem.EncodeToMemory(keyPEMBlock))) + } + + // Ed25519 generation + { + pubkey, privkey, err := ed25519.GenerateKey(rand.Reader) + if err != nil { + panic(err) + } + subjKeyID, err := GetSubjKeyID(privkey) + if err != nil { + panic(err) + } + certTemplate := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "localhost", + }, + SubjectKeyId: subjKeyID, + DNSNames: []string{"localhost"}, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageServerAuth, + x509.ExtKeyUsageClientAuth, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement, + SerialNumber: big.NewInt(mathrand.Int63()), + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(262980 * time.Hour), + } + csrTemplate := &x509.CertificateRequest{ + Subject: pkix.Name{ + CommonName: "localhost", + }, + DNSNames: []string{"localhost"}, + } + csrBytes, err := x509.CreateCertificateRequest(rand.Reader, csrTemplate, privkey) + if err != nil { + panic(err) + } + csrPEMBlock := &pem.Block{ + Type: "CERTIFICATE REQUEST", + Bytes: csrBytes, + } + csrEd25519Pem = strings.TrimSpace(string(pem.EncodeToMemory(csrPEMBlock))) + certBytes, err := x509.CreateCertificate(rand.Reader, certTemplate, intCert, pubkey, intKey) + if err != nil { + panic(err) + } + certPEMBlock := &pem.Block{ + Type: "CERTIFICATE", + Bytes: certBytes, + } + certEd25519Pem = strings.TrimSpace(string(pem.EncodeToMemory(certPEMBlock))) + marshaledKey, err := x509.MarshalPKCS8PrivateKey(privkey) + if err != nil { + panic(err) + } + keyPEMBlock := &pem.Block{ + Type: "PRIVATE KEY", + Bytes: marshaledKey, + } + privEd255198KeyPem = strings.TrimSpace(string(pem.EncodeToMemory(keyPEMBlock))) + } + + issuingCaChainPem = []string{intCertPEM, caCertPEM} +} + +func TestComparePublicKeysAndType(t *testing.T) { + rsa1 := genRsaKey(t).Public() + rsa2 := genRsaKey(t).Public() + eddsa1 := genEdDSA(t).Public() + eddsa2 := genEdDSA(t).Public() + ed25519_1, _ := genEd25519Key(t) + ed25519_2, _ := genEd25519Key(t) + + type args struct { + key1Iface crypto.PublicKey + key2Iface crypto.PublicKey + } + tests := []struct { + name string + args args + want bool + wantErr bool + }{ + {name: "RSA_Equal", args: args{key1Iface: rsa1, key2Iface: rsa1}, want: true, wantErr: false}, + {name: "RSA_NotEqual", args: args{key1Iface: rsa1, key2Iface: rsa2}, want: false, wantErr: false}, + {name: "EDDSA_Equal", args: args{key1Iface: eddsa1, key2Iface: eddsa1}, want: true, wantErr: false}, + {name: "EDDSA_NotEqual", args: args{key1Iface: eddsa1, key2Iface: eddsa2}, want: false, wantErr: false}, + {name: "ED25519_Equal", args: args{key1Iface: ed25519_1, key2Iface: ed25519_1}, want: true, wantErr: false}, + {name: "ED25519_NotEqual", args: args{key1Iface: ed25519_1, key2Iface: ed25519_2}, want: false, wantErr: false}, + {name: "Mismatched_RSA", args: args{key1Iface: rsa1, key2Iface: ed25519_2}, want: false, wantErr: false}, + {name: "Mismatched_EDDSA", args: args{key1Iface: ed25519_1, key2Iface: rsa1}, want: false, wantErr: false}, + {name: "Mismatched_ED25519", args: args{key1Iface: ed25519_1, key2Iface: rsa1}, want: false, wantErr: false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := ComparePublicKeysAndType(tt.args.key1Iface, tt.args.key2Iface) + if (err != nil) != tt.wantErr { + t.Errorf("ComparePublicKeysAndType() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("ComparePublicKeysAndType() got = %v, want %v", got, tt.want) + } + }) + } +} + +func TestNotAfterValues(t *testing.T) { + if ErrNotAfterBehavior != 0 { + t.Fatalf("Expected ErrNotAfterBehavior=%v to have value 0", ErrNotAfterBehavior) + } + + if TruncateNotAfterBehavior != 1 { + t.Fatalf("Expected TruncateNotAfterBehavior=%v to have value 1", TruncateNotAfterBehavior) + } + + if PermitNotAfterBehavior != 2 { + t.Fatalf("Expected PermitNotAfterBehavior=%v to have value 2", PermitNotAfterBehavior) + } +} + +func TestSignatureAlgorithmRoundTripping(t *testing.T) { + for leftName, value := range SignatureAlgorithmNames { + if leftName == "pureed25519" && value == x509.PureEd25519 { + continue + } + + rightName, present := InvSignatureAlgorithmNames[value] + if !present { + t.Fatalf("%v=%v is present in SignatureAlgorithmNames but not in InvSignatureAlgorithmNames", leftName, value) + } + + if strings.ToLower(rightName) != leftName { + t.Fatalf("%v=%v is present in SignatureAlgorithmNames but inverse for %v has different name: %v", leftName, value, value, rightName) + } + } + + for leftValue, name := range InvSignatureAlgorithmNames { + rightValue, present := SignatureAlgorithmNames[strings.ToLower(name)] + if !present { + t.Fatalf("%v=%v is present in InvSignatureAlgorithmNames but not in SignatureAlgorithmNames", leftValue, name) + } + + if rightValue != leftValue { + t.Fatalf("%v=%v is present in InvSignatureAlgorithmNames but forwards for %v has different value: %v", leftValue, name, name, rightValue) + } + } +} + +// TestParseBasicConstraintExtension Verify extension generation and parsing of x509 basic constraint extensions +// works as expected. +func TestBasicConstraintExtension(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + isCA bool + maxPathLen int + }{ + {"empty-seq", false, -1}, + {"just-ca-true", true, -1}, + {"just-ca-with-maxpathlen", true, 2}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ext, err := CreateBasicConstraintExtension(tt.isCA, tt.maxPathLen) + if err != nil { + t.Fatalf("failed generating basic extension: %v", err) + } + + gotIsCa, gotMaxPathLen, err := ParseBasicConstraintExtension(ext) + if err != nil { + t.Fatalf("failed parsing basic extension: %v", err) + } + + if tt.isCA != gotIsCa { + t.Fatalf("expected isCa (%v) got isCa (%v)", tt.isCA, gotIsCa) + } + + if tt.maxPathLen != gotMaxPathLen { + t.Fatalf("expected maxPathLen (%v) got maxPathLen (%v)", tt.maxPathLen, gotMaxPathLen) + } + }) + } + + t.Run("bad-extension-oid", func(t *testing.T) { + // Test invalid type errors out + _, _, err := ParseBasicConstraintExtension(pkix.Extension{}) + if err == nil { + t.Fatalf("should have failed parsing non-basic constraint extension") + } + }) + + t.Run("garbage-value", func(t *testing.T) { + extraBytes, err := asn1.Marshal("a string") + if err != nil { + t.Fatalf("failed encoding the struct: %v", err) + } + ext := pkix.Extension{ + Id: ExtensionBasicConstraintsOID, + Value: extraBytes, + } + _, _, err = ParseBasicConstraintExtension(ext) + if err == nil { + t.Fatalf("should have failed parsing basic constraint with extra information") + } + }) +} + +func genRsaKey(t *testing.T) *rsa.PrivateKey { + key, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + t.Fatal(err) + } + return key +} + +func genEdDSA(t *testing.T) *ecdsa.PrivateKey { + key, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader) + if err != nil { + t.Fatal(err) + } + return key +} + +func genEd25519Key(t *testing.T) (ed25519.PublicKey, ed25519.PrivateKey) { + key, priv, err := ed25519.GenerateKey(rand.Reader) + if err != nil { + t.Fatal(err) + } + return key, priv +} + +var ( + initTest sync.Once + privRSA8KeyPem string + privRSAKeyPem string + csrRSAPem string + certRSAPem string + privEd255198KeyPem string + csrEd25519Pem string + certEd25519Pem string + privECKeyPem string + csrECPem string + privEC8KeyPem string + certECPem string + issuingCaChainPem []string +) diff --git a/sdk/helper/certutil/helpers.go b/sdk/helper/certutil/helpers.go new file mode 100644 index 0000000..2847202 --- /dev/null +++ b/sdk/helper/certutil/helpers.go @@ -0,0 +1,1459 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package certutil + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/pem" + "errors" + "fmt" + "io" + "io/ioutil" + "math/big" + "net" + "net/url" + "strconv" + "strings" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/mitchellh/mapstructure" + "golang.org/x/crypto/cryptobyte" + cbasn1 "golang.org/x/crypto/cryptobyte/asn1" +) + +const rsaMinimumSecureKeySize = 2048 + +// Mapping of key types to default key lengths +var defaultAlgorithmKeyBits = map[string]int{ + "rsa": 2048, + "ec": 256, +} + +// Mapping of NIST P-Curve's key length to expected signature bits. +var expectedNISTPCurveHashBits = map[int]int{ + 224: 256, + 256: 256, + 384: 384, + 521: 512, +} + +// Mapping of constant names<->constant values for SignatureAlgorithm +var SignatureAlgorithmNames = map[string]x509.SignatureAlgorithm{ + "sha256withrsa": x509.SHA256WithRSA, + "sha384withrsa": x509.SHA384WithRSA, + "sha512withrsa": x509.SHA512WithRSA, + "ecdsawithsha256": x509.ECDSAWithSHA256, + "ecdsawithsha384": x509.ECDSAWithSHA384, + "ecdsawithsha512": x509.ECDSAWithSHA512, + "sha256withrsapss": x509.SHA256WithRSAPSS, + "sha384withrsapss": x509.SHA384WithRSAPSS, + "sha512withrsapss": x509.SHA512WithRSAPSS, + "pureed25519": x509.PureEd25519, + "ed25519": x509.PureEd25519, // Duplicated for clarity; most won't expect the "Pure" prefix. +} + +// Mapping of constant values<->constant names for SignatureAlgorithm +var InvSignatureAlgorithmNames = map[x509.SignatureAlgorithm]string{ + x509.SHA256WithRSA: "SHA256WithRSA", + x509.SHA384WithRSA: "SHA384WithRSA", + x509.SHA512WithRSA: "SHA512WithRSA", + x509.ECDSAWithSHA256: "ECDSAWithSHA256", + x509.ECDSAWithSHA384: "ECDSAWithSHA384", + x509.ECDSAWithSHA512: "ECDSAWithSHA512", + x509.SHA256WithRSAPSS: "SHA256WithRSAPSS", + x509.SHA384WithRSAPSS: "SHA384WithRSAPSS", + x509.SHA512WithRSAPSS: "SHA512WithRSAPSS", + x509.PureEd25519: "Ed25519", +} + +// OID for RFC 5280 CRL Number extension. +// +// > id-ce-cRLNumber OBJECT IDENTIFIER ::= { id-ce 20 } +var CRLNumberOID = asn1.ObjectIdentifier([]int{2, 5, 29, 20}) + +// OID for RFC 5280 Delta CRL Indicator CRL extension. +// +// > id-ce-deltaCRLIndicator OBJECT IDENTIFIER ::= { id-ce 27 } +var DeltaCRLIndicatorOID = asn1.ObjectIdentifier([]int{2, 5, 29, 27}) + +// GetHexFormatted returns the byte buffer formatted in hex with +// the specified separator between bytes. +func GetHexFormatted(buf []byte, sep string) string { + var ret bytes.Buffer + for _, cur := range buf { + if ret.Len() > 0 { + fmt.Fprintf(&ret, sep) + } + fmt.Fprintf(&ret, "%02x", cur) + } + return ret.String() +} + +// ParseHexFormatted returns the raw bytes from a formatted hex string +func ParseHexFormatted(in, sep string) []byte { + var ret bytes.Buffer + var err error + var inBits uint64 + inBytes := strings.Split(in, sep) + for _, inByte := range inBytes { + if inBits, err = strconv.ParseUint(inByte, 16, 8); err != nil { + return nil + } + ret.WriteByte(uint8(inBits)) + } + return ret.Bytes() +} + +// GetSubjKeyID returns the subject key ID. The computed ID is the SHA-1 hash of +// the marshaled public key according to +// https://tools.ietf.org/html/rfc5280#section-4.2.1.2 (1) +func GetSubjKeyID(privateKey crypto.Signer) ([]byte, error) { + if privateKey == nil { + return nil, errutil.InternalError{Err: "passed-in private key is nil"} + } + return GetSubjectKeyID(privateKey.Public()) +} + +// Returns the explicit SKID when used for cross-signing, else computes a new +// SKID from the key itself. +func getSubjectKeyIDFromBundle(data *CreationBundle) ([]byte, error) { + if len(data.Params.SKID) > 0 { + return data.Params.SKID, nil + } + + return GetSubjectKeyID(data.CSR.PublicKey) +} + +func GetSubjectKeyID(pub interface{}) ([]byte, error) { + var publicKeyBytes []byte + switch pub := pub.(type) { + case *rsa.PublicKey: + type pkcs1PublicKey struct { + N *big.Int + E int + } + + var err error + publicKeyBytes, err = asn1.Marshal(pkcs1PublicKey{ + N: pub.N, + E: pub.E, + }) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("error marshalling public key: %s", err)} + } + case *ecdsa.PublicKey: + publicKeyBytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y) + case ed25519.PublicKey: + publicKeyBytes = pub + default: + return nil, errutil.InternalError{Err: fmt.Sprintf("unsupported public key type: %T", pub)} + } + skid := sha1.Sum(publicKeyBytes) + return skid[:], nil +} + +// ParsePKIMap takes a map (for instance, the Secret.Data +// returned from the PKI backend) and returns a ParsedCertBundle. +func ParsePKIMap(data map[string]interface{}) (*ParsedCertBundle, error) { + result := &CertBundle{} + err := mapstructure.Decode(data, result) + if err != nil { + return nil, errutil.UserError{Err: err.Error()} + } + + return result.ToParsedCertBundle() +} + +// ParsePKIJSON takes a JSON-encoded string and returns a ParsedCertBundle. +// +// This can be either the output of an +// issue call from the PKI backend or just its data member; or, +// JSON not coming from the PKI backend. +func ParsePKIJSON(input []byte) (*ParsedCertBundle, error) { + result := &CertBundle{} + err := jsonutil.DecodeJSON(input, &result) + + if err == nil { + return result.ToParsedCertBundle() + } + + var secret Secret + err = jsonutil.DecodeJSON(input, &secret) + + if err == nil { + return ParsePKIMap(secret.Data) + } + + return nil, errutil.UserError{Err: "unable to parse out of either secret data or a secret object"} +} + +func ParseDERKey(privateKeyBytes []byte) (signer crypto.Signer, format BlockType, err error) { + var firstError error + if signer, firstError = x509.ParseECPrivateKey(privateKeyBytes); firstError == nil { + format = ECBlock + return + } + + var secondError error + if signer, secondError = x509.ParsePKCS1PrivateKey(privateKeyBytes); secondError == nil { + format = PKCS1Block + return + } + + var thirdError error + var rawKey interface{} + if rawKey, thirdError = x509.ParsePKCS8PrivateKey(privateKeyBytes); thirdError == nil { + switch rawSigner := rawKey.(type) { + case *rsa.PrivateKey: + signer = rawSigner + case *ecdsa.PrivateKey: + signer = rawSigner + case ed25519.PrivateKey: + signer = rawSigner + default: + return nil, UnknownBlock, errutil.InternalError{Err: "unknown type for parsed PKCS8 Private Key"} + } + + format = PKCS8Block + return + } + + return nil, UnknownBlock, fmt.Errorf("got errors attempting to parse DER private key:\n1. %v\n2. %v\n3. %v", firstError, secondError, thirdError) +} + +func ParsePEMKey(keyPem string) (crypto.Signer, BlockType, error) { + pemBlock, _ := pem.Decode([]byte(keyPem)) + if pemBlock == nil { + return nil, UnknownBlock, errutil.UserError{Err: "no data found in PEM block"} + } + + return ParseDERKey(pemBlock.Bytes) +} + +// ParsePEMBundle takes a string of concatenated PEM-format certificate +// and private key values and decodes/parses them, checking validity along +// the way. The first certificate must be the subject certificate and issuing +// certificates may follow. There must be at most one private key. +func ParsePEMBundle(pemBundle string) (*ParsedCertBundle, error) { + if len(pemBundle) == 0 { + return nil, errutil.UserError{Err: "empty pem bundle"} + } + + pemBytes := []byte(pemBundle) + var pemBlock *pem.Block + parsedBundle := &ParsedCertBundle{} + var certPath []*CertBlock + + for len(pemBytes) > 0 { + pemBlock, pemBytes = pem.Decode(pemBytes) + if pemBlock == nil { + return nil, errutil.UserError{Err: "no data found in PEM block"} + } + + if signer, format, err := ParseDERKey(pemBlock.Bytes); err == nil { + if parsedBundle.PrivateKeyType != UnknownPrivateKey { + return nil, errutil.UserError{Err: "more than one private key given; provide only one private key in the bundle"} + } + + parsedBundle.PrivateKeyFormat = format + parsedBundle.PrivateKeyType = GetPrivateKeyTypeFromSigner(signer) + if parsedBundle.PrivateKeyType == UnknownPrivateKey { + return nil, errutil.UserError{Err: "Unknown type of private key included in the bundle: %v"} + } + + parsedBundle.PrivateKeyBytes = pemBlock.Bytes + parsedBundle.PrivateKey = signer + } else if certificates, err := x509.ParseCertificates(pemBlock.Bytes); err == nil { + certPath = append(certPath, &CertBlock{ + Certificate: certificates[0], + Bytes: pemBlock.Bytes, + }) + } else if x509.IsEncryptedPEMBlock(pemBlock) { + return nil, errutil.UserError{Err: "Encrypted private key given; provide only decrypted private key in the bundle"} + } + } + + for i, certBlock := range certPath { + if i == 0 { + parsedBundle.Certificate = certBlock.Certificate + parsedBundle.CertificateBytes = certBlock.Bytes + } else { + parsedBundle.CAChain = append(parsedBundle.CAChain, certBlock) + } + } + + if err := parsedBundle.Verify(); err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("verification of parsed bundle failed: %s", err)} + } + + return parsedBundle, nil +} + +// GeneratePrivateKey generates a private key with the specified type and key bits. +func GeneratePrivateKey(keyType string, keyBits int, container ParsedPrivateKeyContainer) error { + return generatePrivateKey(keyType, keyBits, container, nil) +} + +// GeneratePrivateKeyWithRandomSource generates a private key with the specified type and key bits. +// GeneratePrivateKeyWithRandomSource uses randomness from the entropyReader to generate the private key. +func GeneratePrivateKeyWithRandomSource(keyType string, keyBits int, container ParsedPrivateKeyContainer, entropyReader io.Reader) error { + return generatePrivateKey(keyType, keyBits, container, entropyReader) +} + +// generatePrivateKey generates a private key with the specified type and key bits. +// generatePrivateKey uses randomness from the entropyReader to generate the private key. +func generatePrivateKey(keyType string, keyBits int, container ParsedPrivateKeyContainer, entropyReader io.Reader) error { + var err error + var privateKeyType PrivateKeyType + var privateKeyBytes []byte + var privateKey crypto.Signer + + var randReader io.Reader = rand.Reader + if entropyReader != nil { + randReader = entropyReader + } + + switch keyType { + case "rsa": + // XXX: there is a false-positive CodeQL path here around keyBits; + // because of a default zero value in the TypeDurationSecond and + // TypeSignedDurationSecond cases of schema.DefaultOrZero(), it + // thinks it is possible to end up with < 2048 bit RSA Key here. + // While this is true for SSH keys, it isn't true for PKI keys + // due to ValidateKeyTypeLength(...) below. While we could close + // the report as a false-positive, enforcing a minimum keyBits size + // here of 2048 would ensure no other paths exist. + if keyBits < 2048 { + return errutil.InternalError{Err: fmt.Sprintf("insecure bit length for RSA private key: %d", keyBits)} + } + privateKeyType = RSAPrivateKey + privateKey, err = rsa.GenerateKey(randReader, keyBits) + if err != nil { + return errutil.InternalError{Err: fmt.Sprintf("error generating RSA private key: %v", err)} + } + privateKeyBytes = x509.MarshalPKCS1PrivateKey(privateKey.(*rsa.PrivateKey)) + case "ec": + privateKeyType = ECPrivateKey + var curve elliptic.Curve + switch keyBits { + case 224: + curve = elliptic.P224() + case 256: + curve = elliptic.P256() + case 384: + curve = elliptic.P384() + case 521: + curve = elliptic.P521() + default: + return errutil.UserError{Err: fmt.Sprintf("unsupported bit length for EC key: %d", keyBits)} + } + privateKey, err = ecdsa.GenerateKey(curve, randReader) + if err != nil { + return errutil.InternalError{Err: fmt.Sprintf("error generating EC private key: %v", err)} + } + privateKeyBytes, err = x509.MarshalECPrivateKey(privateKey.(*ecdsa.PrivateKey)) + if err != nil { + return errutil.InternalError{Err: fmt.Sprintf("error marshalling EC private key: %v", err)} + } + case "ed25519": + privateKeyType = Ed25519PrivateKey + _, privateKey, err = ed25519.GenerateKey(randReader) + if err != nil { + return errutil.InternalError{Err: fmt.Sprintf("error generating ed25519 private key: %v", err)} + } + privateKeyBytes, err = x509.MarshalPKCS8PrivateKey(privateKey.(ed25519.PrivateKey)) + if err != nil { + return errutil.InternalError{Err: fmt.Sprintf("error marshalling Ed25519 private key: %v", err)} + } + default: + return errutil.UserError{Err: fmt.Sprintf("unknown key type: %s", keyType)} + } + + container.SetParsedPrivateKey(privateKey, privateKeyType, privateKeyBytes) + return nil +} + +// GenerateSerialNumber generates a serial number suitable for a certificate +func GenerateSerialNumber() (*big.Int, error) { + return generateSerialNumber(rand.Reader) +} + +// GenerateSerialNumberWithRandomSource generates a serial number suitable +// for a certificate with custom entropy. +func GenerateSerialNumberWithRandomSource(randReader io.Reader) (*big.Int, error) { + return generateSerialNumber(randReader) +} + +func generateSerialNumber(randReader io.Reader) (*big.Int, error) { + serial, err := rand.Int(randReader, (&big.Int{}).Exp(big.NewInt(2), big.NewInt(159), nil)) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("error generating serial number: %v", err)} + } + return serial, nil +} + +// ComparePublicKeysAndType compares two public keys and returns true if they match, +// false if their types or contents differ, and an error on unsupported key types. +func ComparePublicKeysAndType(key1Iface, key2Iface crypto.PublicKey) (bool, error) { + equal, err := ComparePublicKeys(key1Iface, key2Iface) + if err != nil { + if strings.Contains(err.Error(), "key types do not match:") { + return false, nil + } + } + + return equal, err +} + +// ComparePublicKeys compares two public keys and returns true if they match, +// returns an error if public key types are mismatched, or they are an unsupported key type. +func ComparePublicKeys(key1Iface, key2Iface crypto.PublicKey) (bool, error) { + switch key1Iface.(type) { + case *rsa.PublicKey: + key1 := key1Iface.(*rsa.PublicKey) + key2, ok := key2Iface.(*rsa.PublicKey) + if !ok { + return false, fmt.Errorf("key types do not match: %T and %T", key1Iface, key2Iface) + } + if key1.N.Cmp(key2.N) != 0 || + key1.E != key2.E { + return false, nil + } + return true, nil + + case *ecdsa.PublicKey: + key1 := key1Iface.(*ecdsa.PublicKey) + key2, ok := key2Iface.(*ecdsa.PublicKey) + if !ok { + return false, fmt.Errorf("key types do not match: %T and %T", key1Iface, key2Iface) + } + if key1.X.Cmp(key2.X) != 0 || + key1.Y.Cmp(key2.Y) != 0 { + return false, nil + } + key1Params := key1.Params() + key2Params := key2.Params() + if key1Params.P.Cmp(key2Params.P) != 0 || + key1Params.N.Cmp(key2Params.N) != 0 || + key1Params.B.Cmp(key2Params.B) != 0 || + key1Params.Gx.Cmp(key2Params.Gx) != 0 || + key1Params.Gy.Cmp(key2Params.Gy) != 0 || + key1Params.BitSize != key2Params.BitSize { + return false, nil + } + return true, nil + case ed25519.PublicKey: + key1 := key1Iface.(ed25519.PublicKey) + key2, ok := key2Iface.(ed25519.PublicKey) + if !ok { + return false, fmt.Errorf("key types do not match: %T and %T", key1Iface, key2Iface) + } + if !key1.Equal(key2) { + return false, nil + } + return true, nil + default: + return false, fmt.Errorf("cannot compare key with type %T", key1Iface) + } +} + +// ParsePublicKeyPEM is used to parse RSA and ECDSA public keys from PEMs +func ParsePublicKeyPEM(data []byte) (interface{}, error) { + block, data := pem.Decode(data) + if block != nil { + if len(bytes.TrimSpace(data)) > 0 { + return nil, errutil.UserError{Err: "unexpected trailing data after parsed PEM block"} + } + var rawKey interface{} + var err error + if rawKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + rawKey = cert.PublicKey + } else { + return nil, err + } + } + + switch key := rawKey.(type) { + case *rsa.PublicKey: + return key, nil + case *ecdsa.PublicKey: + return key, nil + case ed25519.PublicKey: + return key, nil + } + } + return nil, errors.New("data does not contain any valid public keys") +} + +// AddPolicyIdentifiers adds certificate policies extension, based on CreationBundle +func AddPolicyIdentifiers(data *CreationBundle, certTemplate *x509.Certificate) { + oidOnly := true + for _, oidStr := range data.Params.PolicyIdentifiers { + oid, err := StringToOid(oidStr) + if err == nil { + certTemplate.PolicyIdentifiers = append(certTemplate.PolicyIdentifiers, oid) + } + if err != nil { + oidOnly = false + } + } + if !oidOnly { // Because all policy information is held in the same extension, when we use an extra extension to + // add policy qualifier information, that overwrites any information in the PolicyIdentifiers field on the Cert + // Template, so we need to reparse all the policy identifiers here + extension, err := CreatePolicyInformationExtensionFromStorageStrings(data.Params.PolicyIdentifiers) + if err == nil { + // If this errors out, don't add it, rely on the OIDs parsed into PolicyIdentifiers above + certTemplate.ExtraExtensions = append(certTemplate.ExtraExtensions, *extension) + } + } +} + +// AddExtKeyUsageOids adds custom extended key usage OIDs to certificate +func AddExtKeyUsageOids(data *CreationBundle, certTemplate *x509.Certificate) { + for _, oidstr := range data.Params.ExtKeyUsageOIDs { + oid, err := StringToOid(oidstr) + if err == nil { + certTemplate.UnknownExtKeyUsage = append(certTemplate.UnknownExtKeyUsage, oid) + } + } +} + +func HandleOtherCSRSANs(in *x509.CertificateRequest, sans map[string][]string) error { + certTemplate := &x509.Certificate{ + DNSNames: in.DNSNames, + IPAddresses: in.IPAddresses, + EmailAddresses: in.EmailAddresses, + URIs: in.URIs, + } + if err := HandleOtherSANs(certTemplate, sans); err != nil { + return err + } + if len(certTemplate.ExtraExtensions) > 0 { + for _, v := range certTemplate.ExtraExtensions { + in.ExtraExtensions = append(in.ExtraExtensions, v) + } + } + return nil +} + +func HandleOtherSANs(in *x509.Certificate, sans map[string][]string) error { + // If other SANs is empty we return which causes normal Go stdlib parsing + // of the other SAN types + if len(sans) == 0 { + return nil + } + + var rawValues []asn1.RawValue + + // We need to generate an IMPLICIT sequence for compatibility with OpenSSL + // -- it's an open question what the default for RFC 5280 actually is, see + // https://github.com/openssl/openssl/issues/5091 -- so we have to use + // cryptobyte because using the asn1 package's marshaling always produces + // an EXPLICIT sequence. Note that asn1 is way too magical according to + // agl, and cryptobyte is modeled after the CBB/CBS bits that agl put into + // boringssl. + for oid, vals := range sans { + for _, val := range vals { + var b cryptobyte.Builder + oidStr, err := StringToOid(oid) + if err != nil { + return err + } + b.AddASN1ObjectIdentifier(oidStr) + b.AddASN1(cbasn1.Tag(0).ContextSpecific().Constructed(), func(b *cryptobyte.Builder) { + b.AddASN1(cbasn1.UTF8String, func(b *cryptobyte.Builder) { + b.AddBytes([]byte(val)) + }) + }) + m, err := b.Bytes() + if err != nil { + return err + } + rawValues = append(rawValues, asn1.RawValue{Tag: 0, Class: 2, IsCompound: true, Bytes: m}) + } + } + + // If other SANs is empty we return which causes normal Go stdlib parsing + // of the other SAN types + if len(rawValues) == 0 { + return nil + } + + // Append any existing SANs, sans marshalling + rawValues = append(rawValues, marshalSANs(in.DNSNames, in.EmailAddresses, in.IPAddresses, in.URIs)...) + + // Marshal and add to ExtraExtensions + ext := pkix.Extension{ + // This is the defined OID for subjectAltName + Id: asn1.ObjectIdentifier{2, 5, 29, 17}, + } + var err error + ext.Value, err = asn1.Marshal(rawValues) + if err != nil { + return err + } + in.ExtraExtensions = append(in.ExtraExtensions, ext) + + return nil +} + +// Note: Taken from the Go source code since it's not public, and used in the +// modified function below (which also uses these consts upstream) +const ( + nameTypeEmail = 1 + nameTypeDNS = 2 + nameTypeURI = 6 + nameTypeIP = 7 +) + +// Note: Taken from the Go source code since it's not public, plus changed to not marshal +// marshalSANs marshals a list of addresses into a the contents of an X.509 +// SubjectAlternativeName extension. +func marshalSANs(dnsNames, emailAddresses []string, ipAddresses []net.IP, uris []*url.URL) []asn1.RawValue { + var rawValues []asn1.RawValue + for _, name := range dnsNames { + rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeDNS, Class: 2, Bytes: []byte(name)}) + } + for _, email := range emailAddresses { + rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeEmail, Class: 2, Bytes: []byte(email)}) + } + for _, rawIP := range ipAddresses { + // If possible, we always want to encode IPv4 addresses in 4 bytes. + ip := rawIP.To4() + if ip == nil { + ip = rawIP + } + rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeIP, Class: 2, Bytes: ip}) + } + for _, uri := range uris { + rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeURI, Class: 2, Bytes: []byte(uri.String())}) + } + return rawValues +} + +func StringToOid(in string) (asn1.ObjectIdentifier, error) { + split := strings.Split(in, ".") + ret := make(asn1.ObjectIdentifier, 0, len(split)) + for _, v := range split { + i, err := strconv.Atoi(v) + if err != nil { + return nil, err + } + ret = append(ret, i) + } + return asn1.ObjectIdentifier(ret), nil +} + +// Returns default key bits for the specified key type, or the present value +// if keyBits is non-zero. +func DefaultOrValueKeyBits(keyType string, keyBits int) (int, error) { + if keyBits == 0 { + newValue, present := defaultAlgorithmKeyBits[keyType] + if present { + keyBits = newValue + } /* else { + // We cannot return an error here as ed25519 (and potentially ed448 + // in the future) aren't in defaultAlgorithmKeyBits -- the value of + // the keyBits parameter is ignored under that algorithm. + } */ + } + + return keyBits, nil +} + +// Returns default signature hash bit length for the specified key type and +// bits, or the present value if hashBits is non-zero. Returns an error under +// certain internal circumstances. +func DefaultOrValueHashBits(keyType string, keyBits int, hashBits int) (int, error) { + if keyType == "ec" { + // Enforcement of curve moved to selectSignatureAlgorithmForECDSA. See + // note there about why. + } else if keyType == "rsa" && hashBits == 0 { + // To match previous behavior (and ignoring NIST's recommendations for + // hash size to align with RSA key sizes), default to SHA-2-256. + hashBits = 256 + } else if keyType == "ed25519" || keyType == "ed448" || keyType == "any" { + // No-op; ed25519 and ed448 internally specify their own hash and + // we do not need to select one. Double hashing isn't supported in + // certificate signing. Additionally, the any key type can't know + // what hash algorithm to use yet, so default to zero. + return 0, nil + } + + return hashBits, nil +} + +// Validates that the combination of keyType, keyBits, and hashBits are +// valid together; replaces individual calls to ValidateSignatureLength and +// ValidateKeyTypeLength. Also updates the value of keyBits and hashBits on +// return. +func ValidateDefaultOrValueKeyTypeSignatureLength(keyType string, keyBits int, hashBits int) (int, int, error) { + var err error + + if keyBits, err = DefaultOrValueKeyBits(keyType, keyBits); err != nil { + return keyBits, hashBits, err + } + + if err = ValidateKeyTypeLength(keyType, keyBits); err != nil { + return keyBits, hashBits, err + } + + if hashBits, err = DefaultOrValueHashBits(keyType, keyBits, hashBits); err != nil { + return keyBits, hashBits, err + } + + // Note that this check must come after we've selected a value for + // hashBits above, in the event it was left as the default, but we + // were allowed to update it. + if err = ValidateSignatureLength(keyType, hashBits); err != nil { + return keyBits, hashBits, err + } + + return keyBits, hashBits, nil +} + +// Validates that the length of the hash (in bits) used in the signature +// calculation is a known, approved value. +func ValidateSignatureLength(keyType string, hashBits int) error { + if keyType == "any" || keyType == "ec" || keyType == "ed25519" || keyType == "ed448" { + // ed25519 and ed448 include built-in hashing and is not externally + // configurable. There are three modes for each of these schemes: + // + // 1. Built-in hash (default, used in TLS, x509). + // 2. Double hash (notably used in some block-chain implementations, + // but largely regarded as a specialized use case with security + // concerns). + // 3. No hash (bring your own hash function, less commonly used). + // + // In all cases, we won't have a hash algorithm to validate here, so + // return nil. + // + // Additionally, when KeyType is any, we can't yet validate the + // signature algorithm size, so it takes the default zero value. + // + // When KeyType is ec, we also can't validate this value as we're + // forcefully ignoring the users' choice and specifying a value based + // on issuer type. + return nil + } + + switch hashBits { + case 256: + case 384: + case 512: + default: + return fmt.Errorf("unsupported hash signature algorithm: %d", hashBits) + } + + return nil +} + +func ValidateKeyTypeLength(keyType string, keyBits int) error { + switch keyType { + case "rsa": + if keyBits < rsaMinimumSecureKeySize { + return fmt.Errorf("RSA keys < %d bits are unsafe and not supported: got %d", rsaMinimumSecureKeySize, keyBits) + } + + switch keyBits { + case 2048: + case 3072: + case 4096: + case 8192: + default: + return fmt.Errorf("unsupported bit length for RSA key: %d", keyBits) + } + case "ec": + _, present := expectedNISTPCurveHashBits[keyBits] + if !present { + return fmt.Errorf("unsupported bit length for EC key: %d", keyBits) + } + case "any", "ed25519": + default: + return fmt.Errorf("unknown key type %s", keyType) + } + + return nil +} + +// CreateCertificate uses CreationBundle and the default rand.Reader to +// generate a cert/keypair. +func CreateCertificate(data *CreationBundle) (*ParsedCertBundle, error) { + return createCertificate(data, rand.Reader, generatePrivateKey) +} + +// CreateCertificateWithRandomSource uses CreationBundle and a custom +// io.Reader for randomness to generate a cert/keypair. +func CreateCertificateWithRandomSource(data *CreationBundle, randReader io.Reader) (*ParsedCertBundle, error) { + return createCertificate(data, randReader, generatePrivateKey) +} + +// KeyGenerator Allow us to override how/what generates the private key +type KeyGenerator func(keyType string, keyBits int, container ParsedPrivateKeyContainer, entropyReader io.Reader) error + +func CreateCertificateWithKeyGenerator(data *CreationBundle, randReader io.Reader, keyGenerator KeyGenerator) (*ParsedCertBundle, error) { + return createCertificate(data, randReader, keyGenerator) +} + +// Set correct RSA sig algo +func certTemplateSetSigAlgo(certTemplate *x509.Certificate, data *CreationBundle) { + if data.Params.UsePSS { + switch data.Params.SignatureBits { + case 256: + certTemplate.SignatureAlgorithm = x509.SHA256WithRSAPSS + case 384: + certTemplate.SignatureAlgorithm = x509.SHA384WithRSAPSS + case 512: + certTemplate.SignatureAlgorithm = x509.SHA512WithRSAPSS + } + } else { + switch data.Params.SignatureBits { + case 256: + certTemplate.SignatureAlgorithm = x509.SHA256WithRSA + case 384: + certTemplate.SignatureAlgorithm = x509.SHA384WithRSA + case 512: + certTemplate.SignatureAlgorithm = x509.SHA512WithRSA + } + } +} + +// selectSignatureAlgorithmForRSA returns the proper x509.SignatureAlgorithm based on various properties set in the +// Creation Bundle parameter. This method will default to a SHA256 signature algorithm if the requested signature +// bits is not set/unknown. +func selectSignatureAlgorithmForRSA(data *CreationBundle) x509.SignatureAlgorithm { + if data.Params.UsePSS { + switch data.Params.SignatureBits { + case 256: + return x509.SHA256WithRSAPSS + case 384: + return x509.SHA384WithRSAPSS + case 512: + return x509.SHA512WithRSAPSS + default: + return x509.SHA256WithRSAPSS + } + } + + switch data.Params.SignatureBits { + case 256: + return x509.SHA256WithRSA + case 384: + return x509.SHA384WithRSA + case 512: + return x509.SHA512WithRSA + default: + return x509.SHA256WithRSA + } +} + +func createCertificate(data *CreationBundle, randReader io.Reader, privateKeyGenerator KeyGenerator) (*ParsedCertBundle, error) { + var err error + result := &ParsedCertBundle{} + + serialNumber, err := GenerateSerialNumber() + if err != nil { + return nil, err + } + + if err := privateKeyGenerator(data.Params.KeyType, + data.Params.KeyBits, + result, randReader); err != nil { + return nil, err + } + + subjKeyID, err := GetSubjKeyID(result.PrivateKey) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("error getting subject key ID: %s", err)} + } + + certTemplate := &x509.Certificate{ + SerialNumber: serialNumber, + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: data.Params.NotAfter, + IsCA: false, + SubjectKeyId: subjKeyID, + Subject: data.Params.Subject, + DNSNames: data.Params.DNSNames, + EmailAddresses: data.Params.EmailAddresses, + IPAddresses: data.Params.IPAddresses, + URIs: data.Params.URIs, + } + if data.Params.NotBeforeDuration > 0 { + certTemplate.NotBefore = time.Now().Add(-1 * data.Params.NotBeforeDuration) + } + + if err := HandleOtherSANs(certTemplate, data.Params.OtherSANs); err != nil { + return nil, errutil.InternalError{Err: errwrap.Wrapf("error marshaling other SANs: {{err}}", err).Error()} + } + + // Add this before calling addKeyUsages + if data.SigningBundle == nil { + certTemplate.IsCA = true + } else if data.Params.BasicConstraintsValidForNonCA { + certTemplate.BasicConstraintsValid = true + certTemplate.IsCA = false + } + + // This will only be filled in from the generation paths + if len(data.Params.PermittedDNSDomains) > 0 { + certTemplate.PermittedDNSDomains = data.Params.PermittedDNSDomains + certTemplate.PermittedDNSDomainsCritical = true + } + + AddPolicyIdentifiers(data, certTemplate) + + AddKeyUsages(data, certTemplate) + + AddExtKeyUsageOids(data, certTemplate) + + certTemplate.IssuingCertificateURL = data.Params.URLs.IssuingCertificates + certTemplate.CRLDistributionPoints = data.Params.URLs.CRLDistributionPoints + certTemplate.OCSPServer = data.Params.URLs.OCSPServers + + var certBytes []byte + if data.SigningBundle != nil { + privateKeyType := data.SigningBundle.PrivateKeyType + if privateKeyType == ManagedPrivateKey { + privateKeyType = GetPrivateKeyTypeFromSigner(data.SigningBundle.PrivateKey) + } + switch privateKeyType { + case RSAPrivateKey: + certTemplateSetSigAlgo(certTemplate, data) + case Ed25519PrivateKey: + certTemplate.SignatureAlgorithm = x509.PureEd25519 + case ECPrivateKey: + certTemplate.SignatureAlgorithm = selectSignatureAlgorithmForECDSA(data.SigningBundle.PrivateKey.Public(), data.Params.SignatureBits) + } + + caCert := data.SigningBundle.Certificate + certTemplate.AuthorityKeyId = caCert.SubjectKeyId + + certBytes, err = x509.CreateCertificate(randReader, certTemplate, caCert, result.PrivateKey.Public(), data.SigningBundle.PrivateKey) + } else { + // Creating a self-signed root + if data.Params.MaxPathLength == 0 { + certTemplate.MaxPathLen = 0 + certTemplate.MaxPathLenZero = true + } else { + certTemplate.MaxPathLen = data.Params.MaxPathLength + } + + switch data.Params.KeyType { + case "rsa": + certTemplateSetSigAlgo(certTemplate, data) + case "ed25519": + certTemplate.SignatureAlgorithm = x509.PureEd25519 + case "ec": + certTemplate.SignatureAlgorithm = selectSignatureAlgorithmForECDSA(result.PrivateKey.Public(), data.Params.SignatureBits) + } + + certTemplate.AuthorityKeyId = subjKeyID + certTemplate.BasicConstraintsValid = true + certBytes, err = x509.CreateCertificate(randReader, certTemplate, certTemplate, result.PrivateKey.Public(), result.PrivateKey) + } + + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to create certificate: %s", err)} + } + + result.CertificateBytes = certBytes + result.Certificate, err = x509.ParseCertificate(certBytes) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse created certificate: %s", err)} + } + + if data.SigningBundle != nil { + if (len(data.SigningBundle.Certificate.AuthorityKeyId) > 0 && + !bytes.Equal(data.SigningBundle.Certificate.AuthorityKeyId, data.SigningBundle.Certificate.SubjectKeyId)) || + data.Params.ForceAppendCaChain { + var chain []*CertBlock + + signingChain := data.SigningBundle.CAChain + // Some bundles already include the root included in the chain, so don't include it twice. + if len(signingChain) == 0 || !bytes.Equal(signingChain[0].Bytes, data.SigningBundle.CertificateBytes) { + chain = append(chain, &CertBlock{ + Certificate: data.SigningBundle.Certificate, + Bytes: data.SigningBundle.CertificateBytes, + }) + } + + if len(signingChain) > 0 { + chain = append(chain, signingChain...) + } + + result.CAChain = chain + } + } + + return result, nil +} + +func selectSignatureAlgorithmForECDSA(pub crypto.PublicKey, signatureBits int) x509.SignatureAlgorithm { + // Previously we preferred the user-specified signature bits for ECDSA + // keys. However, this could result in using a longer hash function than + // the underlying NIST P-curve will encode (e.g., a SHA-512 hash with a + // P-256 key). This isn't ideal: the hash is implicitly truncated + // (effectively turning it into SHA-512/256) and we then need to rely + // on the prefix security of the hash. Since both NIST and Mozilla guidance + // suggest instead using the correct hash function, we should prefer that + // over the operator-specified signatureBits. + // + // Lastly, note that pub above needs to be the _signer's_ public key; + // the issue with DefaultOrValueHashBits is that it is called at role + // configuration time, which might _precede_ issuer generation. Thus + // it only has access to the desired key type and not the actual issuer. + // The reference from that function is reproduced below: + // + // > To comply with BSI recommendations Section 4.2 and Mozilla root + // > store policy section 5.1.2, enforce that NIST P-curves use a hash + // > length corresponding to curve length. Note that ed25519 does not + // > implement the "ec" key type. + key, ok := pub.(*ecdsa.PublicKey) + if !ok { + return x509.ECDSAWithSHA256 + } + switch key.Curve { + case elliptic.P224(), elliptic.P256(): + return x509.ECDSAWithSHA256 + case elliptic.P384(): + return x509.ECDSAWithSHA384 + case elliptic.P521(): + return x509.ECDSAWithSHA512 + default: + return x509.ECDSAWithSHA256 + } +} + +var ( + ExtensionBasicConstraintsOID = []int{2, 5, 29, 19} + ExtensionSubjectAltNameOID = []int{2, 5, 29, 17} +) + +// CreateCSR creates a CSR with the default rand.Reader to +// generate a cert/keypair. This is currently only meant +// for use when generating an intermediate certificate. +func CreateCSR(data *CreationBundle, addBasicConstraints bool) (*ParsedCSRBundle, error) { + return createCSR(data, addBasicConstraints, rand.Reader, generatePrivateKey) +} + +// CreateCSRWithRandomSource creates a CSR with a custom io.Reader +// for randomness to generate a cert/keypair. +func CreateCSRWithRandomSource(data *CreationBundle, addBasicConstraints bool, randReader io.Reader) (*ParsedCSRBundle, error) { + return createCSR(data, addBasicConstraints, randReader, generatePrivateKey) +} + +// CreateCSRWithKeyGenerator creates a CSR with a custom io.Reader +// for randomness to generate a cert/keypair with the provided private key generator. +func CreateCSRWithKeyGenerator(data *CreationBundle, addBasicConstraints bool, randReader io.Reader, keyGenerator KeyGenerator) (*ParsedCSRBundle, error) { + return createCSR(data, addBasicConstraints, randReader, keyGenerator) +} + +func createCSR(data *CreationBundle, addBasicConstraints bool, randReader io.Reader, keyGenerator KeyGenerator) (*ParsedCSRBundle, error) { + var err error + result := &ParsedCSRBundle{} + + if err := keyGenerator(data.Params.KeyType, + data.Params.KeyBits, + result, randReader); err != nil { + return nil, err + } + + // Like many root CAs, other information is ignored + csrTemplate := &x509.CertificateRequest{ + Subject: data.Params.Subject, + DNSNames: data.Params.DNSNames, + EmailAddresses: data.Params.EmailAddresses, + IPAddresses: data.Params.IPAddresses, + URIs: data.Params.URIs, + } + + if err := HandleOtherCSRSANs(csrTemplate, data.Params.OtherSANs); err != nil { + return nil, errutil.InternalError{Err: errwrap.Wrapf("error marshaling other SANs: {{err}}", err).Error()} + } + + if addBasicConstraints { + type basicConstraints struct { + IsCA bool `asn1:"optional"` + MaxPathLen int `asn1:"optional,default:-1"` + } + val, err := asn1.Marshal(basicConstraints{IsCA: true, MaxPathLen: -1}) + if err != nil { + return nil, errutil.InternalError{Err: errwrap.Wrapf("error marshaling basic constraints: {{err}}", err).Error()} + } + ext := pkix.Extension{ + Id: ExtensionBasicConstraintsOID, + Value: val, + Critical: true, + } + csrTemplate.ExtraExtensions = append(csrTemplate.ExtraExtensions, ext) + } + + switch data.Params.KeyType { + case "rsa": + // use specified RSA algorithm defaulting to the appropriate SHA256 RSA signature type + csrTemplate.SignatureAlgorithm = selectSignatureAlgorithmForRSA(data) + case "ec": + csrTemplate.SignatureAlgorithm = selectSignatureAlgorithmForECDSA(result.PrivateKey.Public(), data.Params.SignatureBits) + case "ed25519": + csrTemplate.SignatureAlgorithm = x509.PureEd25519 + } + + csr, err := x509.CreateCertificateRequest(randReader, csrTemplate, result.PrivateKey) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to create certificate: %s", err)} + } + + result.CSRBytes = csr + result.CSR, err = x509.ParseCertificateRequest(csr) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse created certificate: %v", err)} + } + + if err = result.CSR.CheckSignature(); err != nil { + return nil, errors.New("failed signature validation for CSR") + } + + return result, nil +} + +// SignCertificate performs the heavy lifting +// of generating a certificate from a CSR. +// Returns a ParsedCertBundle sans private keys. +func SignCertificate(data *CreationBundle) (*ParsedCertBundle, error) { + return signCertificate(data, rand.Reader) +} + +// SignCertificateWithRandomSource generates a certificate +// from a CSR, using custom randomness from the randReader. +// Returns a ParsedCertBundle sans private keys. +func SignCertificateWithRandomSource(data *CreationBundle, randReader io.Reader) (*ParsedCertBundle, error) { + return signCertificate(data, randReader) +} + +func signCertificate(data *CreationBundle, randReader io.Reader) (*ParsedCertBundle, error) { + switch { + case data == nil: + return nil, errutil.UserError{Err: "nil data bundle given to signCertificate"} + case data.Params == nil: + return nil, errutil.UserError{Err: "nil parameters given to signCertificate"} + case data.SigningBundle == nil: + return nil, errutil.UserError{Err: "nil signing bundle given to signCertificate"} + case data.CSR == nil: + return nil, errutil.UserError{Err: "nil csr given to signCertificate"} + } + + err := data.CSR.CheckSignature() + if err != nil { + return nil, errutil.UserError{Err: "request signature invalid"} + } + + result := &ParsedCertBundle{} + + serialNumber, err := GenerateSerialNumber() + if err != nil { + return nil, err + } + + subjKeyID, err := getSubjectKeyIDFromBundle(data) + if err != nil { + return nil, err + } + + caCert := data.SigningBundle.Certificate + + certTemplate := &x509.Certificate{ + SerialNumber: serialNumber, + Subject: data.Params.Subject, + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: data.Params.NotAfter, + SubjectKeyId: subjKeyID[:], + AuthorityKeyId: caCert.SubjectKeyId, + } + if data.Params.NotBeforeDuration > 0 { + certTemplate.NotBefore = time.Now().Add(-1 * data.Params.NotBeforeDuration) + } + + privateKeyType := data.SigningBundle.PrivateKeyType + if privateKeyType == ManagedPrivateKey { + privateKeyType = GetPrivateKeyTypeFromSigner(data.SigningBundle.PrivateKey) + } + + switch privateKeyType { + case RSAPrivateKey: + certTemplateSetSigAlgo(certTemplate, data) + case ECPrivateKey: + switch data.Params.SignatureBits { + case 256: + certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA256 + case 384: + certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA384 + case 512: + certTemplate.SignatureAlgorithm = x509.ECDSAWithSHA512 + } + } + + if data.Params.UseCSRValues { + certTemplate.Subject = data.CSR.Subject + certTemplate.Subject.ExtraNames = certTemplate.Subject.Names + + certTemplate.DNSNames = data.CSR.DNSNames + certTemplate.EmailAddresses = data.CSR.EmailAddresses + certTemplate.IPAddresses = data.CSR.IPAddresses + certTemplate.URIs = data.CSR.URIs + + for _, name := range data.CSR.Extensions { + if !name.Id.Equal(ExtensionBasicConstraintsOID) && !(len(data.Params.OtherSANs) > 0 && name.Id.Equal(ExtensionSubjectAltNameOID)) { + certTemplate.ExtraExtensions = append(certTemplate.ExtraExtensions, name) + } + } + + } else { + certTemplate.DNSNames = data.Params.DNSNames + certTemplate.EmailAddresses = data.Params.EmailAddresses + certTemplate.IPAddresses = data.Params.IPAddresses + certTemplate.URIs = data.Params.URIs + } + + if err := HandleOtherSANs(certTemplate, data.Params.OtherSANs); err != nil { + return nil, errutil.InternalError{Err: errwrap.Wrapf("error marshaling other SANs: {{err}}", err).Error()} + } + + AddPolicyIdentifiers(data, certTemplate) + + AddKeyUsages(data, certTemplate) + + AddExtKeyUsageOids(data, certTemplate) + + var certBytes []byte + + certTemplate.IssuingCertificateURL = data.Params.URLs.IssuingCertificates + certTemplate.CRLDistributionPoints = data.Params.URLs.CRLDistributionPoints + certTemplate.OCSPServer = data.SigningBundle.URLs.OCSPServers + + if data.Params.IsCA { + certTemplate.BasicConstraintsValid = true + certTemplate.IsCA = true + + if data.SigningBundle.Certificate.MaxPathLen == 0 && + data.SigningBundle.Certificate.MaxPathLenZero { + return nil, errutil.UserError{Err: "signing certificate has a max path length of zero, and cannot issue further CA certificates"} + } + + certTemplate.MaxPathLen = data.Params.MaxPathLength + if certTemplate.MaxPathLen == 0 { + certTemplate.MaxPathLenZero = true + } + } else if data.Params.BasicConstraintsValidForNonCA { + certTemplate.BasicConstraintsValid = true + certTemplate.IsCA = false + } + + if len(data.Params.PermittedDNSDomains) > 0 { + certTemplate.PermittedDNSDomains = data.Params.PermittedDNSDomains + certTemplate.PermittedDNSDomainsCritical = true + } + + certBytes, err = x509.CreateCertificate(randReader, certTemplate, caCert, data.CSR.PublicKey, data.SigningBundle.PrivateKey) + + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to create certificate: %s", err)} + } + + result.CertificateBytes = certBytes + result.Certificate, err = x509.ParseCertificate(certBytes) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to parse created certificate: %s", err)} + } + + result.CAChain = data.SigningBundle.GetFullChain() + + return result, nil +} + +func NewCertPool(reader io.Reader) (*x509.CertPool, error) { + pemBlock, err := ioutil.ReadAll(reader) + if err != nil { + return nil, err + } + certs, err := parseCertsPEM(pemBlock) + if err != nil { + return nil, fmt.Errorf("error reading certs: %s", err) + } + pool := x509.NewCertPool() + for _, cert := range certs { + pool.AddCert(cert) + } + return pool, nil +} + +// parseCertsPEM returns the x509.Certificates contained in the given PEM-encoded byte array +// Returns an error if a certificate could not be parsed, or if the data does not contain any certificates +func parseCertsPEM(pemCerts []byte) ([]*x509.Certificate, error) { + ok := false + certs := []*x509.Certificate{} + for len(pemCerts) > 0 { + var block *pem.Block + block, pemCerts = pem.Decode(pemCerts) + if block == nil { + break + } + // Only use PEM "CERTIFICATE" blocks without extra headers + if block.Type != "CERTIFICATE" || len(block.Headers) != 0 { + continue + } + + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return certs, err + } + + certs = append(certs, cert) + ok = true + } + + if !ok { + return certs, errors.New("data does not contain any valid RSA or ECDSA certificates") + } + return certs, nil +} + +// GetPublicKeySize returns the key size in bits for a given arbitrary crypto.PublicKey +// Returns -1 for an unsupported key type. +func GetPublicKeySize(key crypto.PublicKey) int { + if key, ok := key.(*rsa.PublicKey); ok { + return key.Size() * 8 + } + if key, ok := key.(*ecdsa.PublicKey); ok { + return key.Params().BitSize + } + if key, ok := key.(ed25519.PublicKey); ok { + return len(key) * 8 + } + if key, ok := key.(dsa.PublicKey); ok { + return key.Y.BitLen() + } + + return -1 +} + +// CreateKeyBundle create a KeyBundle struct object which includes a generated key +// of keyType with keyBits leveraging the randomness from randReader. +func CreateKeyBundle(keyType string, keyBits int, randReader io.Reader) (KeyBundle, error) { + return CreateKeyBundleWithKeyGenerator(keyType, keyBits, randReader, generatePrivateKey) +} + +// CreateKeyBundleWithKeyGenerator create a KeyBundle struct object which includes +// a generated key of keyType with keyBits leveraging the randomness from randReader and +// delegates the actual key generation to keyGenerator +func CreateKeyBundleWithKeyGenerator(keyType string, keyBits int, randReader io.Reader, keyGenerator KeyGenerator) (KeyBundle, error) { + result := KeyBundle{} + if err := keyGenerator(keyType, keyBits, &result, randReader); err != nil { + return result, err + } + return result, nil +} + +// CreateDeltaCRLIndicatorExt allows creating correctly formed delta CRLs +// that point back to the last complete CRL that they're based on. +func CreateDeltaCRLIndicatorExt(completeCRLNumber int64) (pkix.Extension, error) { + bigNum := big.NewInt(completeCRLNumber) + bigNumValue, err := asn1.Marshal(bigNum) + if err != nil { + return pkix.Extension{}, fmt.Errorf("unable to marshal complete CRL number (%v): %v", completeCRLNumber, err) + } + return pkix.Extension{ + Id: DeltaCRLIndicatorOID, + // > When a conforming CRL issuer generates a delta CRL, the delta + // > CRL MUST include a critical delta CRL indicator extension. + Critical: true, + // This extension only includes the complete CRL number: + // + // > BaseCRLNumber ::= CRLNumber + // + // But, this needs to be encoded as a big number for encoding/asn1 + // to work properly. + Value: bigNumValue, + }, nil +} + +// ParseBasicConstraintExtension parses a basic constraint pkix.Extension, useful if attempting to validate +// CSRs are requesting CA privileges as Go does not expose its implementation. Values returned are +// IsCA, MaxPathLen or error. If MaxPathLen was not set, a value of -1 will be returned. +func ParseBasicConstraintExtension(ext pkix.Extension) (bool, int, error) { + if !ext.Id.Equal(ExtensionBasicConstraintsOID) { + return false, -1, fmt.Errorf("passed in extension was not a basic constraint extension") + } + + // All elements are set to optional here, as it is possible that we receive a CSR with the extension + // containing an empty sequence by spec. + type basicConstraints struct { + IsCA bool `asn1:"optional"` + MaxPathLen int `asn1:"optional,default:-1"` + } + bc := &basicConstraints{} + leftOver, err := asn1.Unmarshal(ext.Value, bc) + if err != nil { + return false, -1, fmt.Errorf("failed unmarshalling extension value: %w", err) + } + + numLeftOver := len(bytes.TrimSpace(leftOver)) + if numLeftOver > 0 { + return false, -1, fmt.Errorf("%d extra bytes within basic constraints value extension", numLeftOver) + } + + return bc.IsCA, bc.MaxPathLen, nil +} + +// CreateBasicConstraintExtension create a basic constraint extension based on inputs, +// if isCa is false, an empty value sequence will be returned with maxPath being +// ignored. If isCa is true maxPath can be set to -1 to not set a maxPath value. +func CreateBasicConstraintExtension(isCa bool, maxPath int) (pkix.Extension, error) { + var asn1Bytes []byte + var err error + + switch { + case isCa && maxPath >= 0: + CaAndMaxPathLen := struct { + IsCa bool `asn1:""` + MaxPathLen int `asn1:""` + }{ + IsCa: isCa, + MaxPathLen: maxPath, + } + asn1Bytes, err = asn1.Marshal(CaAndMaxPathLen) + case isCa && maxPath < 0: + justCa := struct { + IsCa bool `asn1:""` + }{IsCa: isCa} + asn1Bytes, err = asn1.Marshal(justCa) + default: + asn1Bytes, err = asn1.Marshal(struct{}{}) + } + + if err != nil { + return pkix.Extension{}, err + } + + return pkix.Extension{ + Id: ExtensionBasicConstraintsOID, + Critical: true, + Value: asn1Bytes, + }, nil +} diff --git a/sdk/helper/certutil/types.go b/sdk/helper/certutil/types.go new file mode 100644 index 0000000..039ff8a --- /dev/null +++ b/sdk/helper/certutil/types.go @@ -0,0 +1,1021 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package certutil contains helper functions that are mostly used +// with the PKI backend but can be generally useful. Functionality +// includes helpers for converting a certificate/private key bundle +// between DER and PEM, printing certificate serial numbers, and more. +// +// Functionality specific to the PKI backend includes some types +// and helper methods to make requesting certificates from the +// backend easy. +package certutil + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "math/big" + "net" + "net/url" + "strings" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/helper/errutil" +) + +const ( + PrivateKeyTypeP521 = "p521" +) + +// This can be one of a few key types so the different params may or may not be filled +type ClusterKeyParams struct { + Type string `json:"type" structs:"type" mapstructure:"type"` + X *big.Int `json:"x" structs:"x" mapstructure:"x"` + Y *big.Int `json:"y" structs:"y" mapstructure:"y"` + D *big.Int `json:"d" structs:"d" mapstructure:"d"` +} + +// Secret is used to attempt to unmarshal a Vault secret +// JSON response, as a convenience +type Secret struct { + Data map[string]interface{} `json:"data"` +} + +// PrivateKeyType holds a string representation of the type of private key (ec +// or rsa) referenced in CertBundle and ParsedCertBundle. This uses colloquial +// names rather than official names, to eliminate confusion +type PrivateKeyType string + +// Well-known PrivateKeyTypes +const ( + UnknownPrivateKey PrivateKeyType = "" + RSAPrivateKey PrivateKeyType = "rsa" + ECPrivateKey PrivateKeyType = "ec" + Ed25519PrivateKey PrivateKeyType = "ed25519" + ManagedPrivateKey PrivateKeyType = "ManagedPrivateKey" +) + +// TLSUsage controls whether the intended usage of a *tls.Config +// returned from ParsedCertBundle.getTLSConfig is for server use, +// client use, or both, which affects which values are set +type TLSUsage int + +// Well-known TLSUsage types +const ( + TLSUnknown TLSUsage = 0 + TLSServer TLSUsage = 1 << iota + TLSClient +) + +// BlockType indicates the serialization format of the key +type BlockType string + +// Well-known formats +const ( + UnknownBlock BlockType = "" + PKCS1Block BlockType = "RSA PRIVATE KEY" + PKCS8Block BlockType = "PRIVATE KEY" + ECBlock BlockType = "EC PRIVATE KEY" +) + +// ParsedPrivateKeyContainer allows common key setting for certs and CSRs +type ParsedPrivateKeyContainer interface { + SetParsedPrivateKey(crypto.Signer, PrivateKeyType, []byte) +} + +// CertBlock contains the DER-encoded certificate and the PEM +// block's byte array +type CertBlock struct { + Certificate *x509.Certificate + Bytes []byte +} + +// CertBundle contains a key type, a PEM-encoded private key, +// a PEM-encoded certificate, and a string-encoded serial number, +// returned from a successful Issue request +type CertBundle struct { + PrivateKeyType PrivateKeyType `json:"private_key_type" structs:"private_key_type" mapstructure:"private_key_type"` + Certificate string `json:"certificate" structs:"certificate" mapstructure:"certificate"` + IssuingCA string `json:"issuing_ca" structs:"issuing_ca" mapstructure:"issuing_ca"` + CAChain []string `json:"ca_chain" structs:"ca_chain" mapstructure:"ca_chain"` + PrivateKey string `json:"private_key" structs:"private_key" mapstructure:"private_key"` + SerialNumber string `json:"serial_number" structs:"serial_number" mapstructure:"serial_number"` +} + +// ParsedCertBundle contains a key type, a DER-encoded private key, +// and a DER-encoded certificate +type ParsedCertBundle struct { + PrivateKeyType PrivateKeyType + PrivateKeyFormat BlockType + PrivateKeyBytes []byte + PrivateKey crypto.Signer + CertificateBytes []byte + Certificate *x509.Certificate + CAChain []*CertBlock +} + +// CSRBundle contains a key type, a PEM-encoded private key, +// and a PEM-encoded CSR +type CSRBundle struct { + PrivateKeyType PrivateKeyType `json:"private_key_type" structs:"private_key_type" mapstructure:"private_key_type"` + CSR string `json:"csr" structs:"csr" mapstructure:"csr"` + PrivateKey string `json:"private_key" structs:"private_key" mapstructure:"private_key"` +} + +// ParsedCSRBundle contains a key type, a DER-encoded private key, +// and a DER-encoded certificate request +type ParsedCSRBundle struct { + PrivateKeyType PrivateKeyType + PrivateKeyBytes []byte + PrivateKey crypto.Signer + CSRBytes []byte + CSR *x509.CertificateRequest +} + +type KeyBundle struct { + PrivateKeyType PrivateKeyType + PrivateKeyBytes []byte + PrivateKey crypto.Signer +} + +func GetPrivateKeyTypeFromSigner(signer crypto.Signer) PrivateKeyType { + // We look at the public key types to work-around limitations/typing of managed keys. + switch signer.Public().(type) { + case *rsa.PublicKey: + return RSAPrivateKey + case *ecdsa.PublicKey: + return ECPrivateKey + case ed25519.PublicKey: + return Ed25519PrivateKey + } + return UnknownPrivateKey +} + +// ToPEMBundle converts a string-based certificate bundle +// to a PEM-based string certificate bundle in trust path +// order, leaf certificate first +func (c *CertBundle) ToPEMBundle() string { + var result []string + + if len(c.PrivateKey) > 0 { + result = append(result, c.PrivateKey) + } + if len(c.Certificate) > 0 { + result = append(result, c.Certificate) + } + if len(c.CAChain) > 0 { + result = append(result, c.CAChain...) + } + + return strings.Join(result, "\n") +} + +// ToParsedCertBundle converts a string-based certificate bundle +// to a byte-based raw certificate bundle +func (c *CertBundle) ToParsedCertBundle() (*ParsedCertBundle, error) { + return c.ToParsedCertBundleWithExtractor(extractAndSetPrivateKey) +} + +// PrivateKeyExtractor extract out a private key from the passed in +// CertBundle and set the appropriate bits within the ParsedCertBundle. +type PrivateKeyExtractor func(c *CertBundle, parsedBundle *ParsedCertBundle) error + +func (c *CertBundle) ToParsedCertBundleWithExtractor(privateKeyExtractor PrivateKeyExtractor) (*ParsedCertBundle, error) { + var err error + var pemBlock *pem.Block + result := &ParsedCertBundle{} + + err = privateKeyExtractor(c, result) + if err != nil { + return nil, err + } + + if len(c.Certificate) > 0 { + pemBlock, _ = pem.Decode([]byte(c.Certificate)) + if pemBlock == nil { + return nil, errutil.UserError{Err: "Error decoding certificate from cert bundle"} + } + result.CertificateBytes = pemBlock.Bytes + result.Certificate, err = x509.ParseCertificate(result.CertificateBytes) + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Error encountered parsing certificate bytes from raw bundle: %v", err)} + } + } + switch { + case len(c.CAChain) > 0: + for _, cert := range c.CAChain { + pemBlock, _ := pem.Decode([]byte(cert)) + if pemBlock == nil { + return nil, errutil.UserError{Err: "Error decoding certificate from cert bundle"} + } + + parsedCert, err := x509.ParseCertificate(pemBlock.Bytes) + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Error encountered parsing certificate bytes from raw bundle via CA chain: %v", err)} + } + + certBlock := &CertBlock{ + Bytes: pemBlock.Bytes, + Certificate: parsedCert, + } + result.CAChain = append(result.CAChain, certBlock) + } + + // For backwards compatibility + case len(c.IssuingCA) > 0: + pemBlock, _ = pem.Decode([]byte(c.IssuingCA)) + if pemBlock == nil { + return nil, errutil.UserError{Err: "Error decoding ca certificate from cert bundle"} + } + + parsedCert, err := x509.ParseCertificate(pemBlock.Bytes) + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Error encountered parsing certificate bytes from raw bundle via issuing CA: %v", err)} + } + + certBlock := &CertBlock{ + Bytes: pemBlock.Bytes, + Certificate: parsedCert, + } + result.CAChain = append(result.CAChain, certBlock) + } + + // Populate if it isn't there already + if len(c.SerialNumber) == 0 && len(c.Certificate) > 0 { + c.SerialNumber = GetHexFormatted(result.Certificate.SerialNumber.Bytes(), ":") + } + + return result, nil +} + +func extractAndSetPrivateKey(c *CertBundle, parsedBundle *ParsedCertBundle) error { + if len(c.PrivateKey) == 0 { + return nil + } + + pemBlock, _ := pem.Decode([]byte(c.PrivateKey)) + if pemBlock == nil { + return errutil.UserError{Err: "Error decoding private key from cert bundle"} + } + + parsedBundle.PrivateKeyBytes = pemBlock.Bytes + parsedBundle.PrivateKeyFormat = BlockType(strings.TrimSpace(pemBlock.Type)) + + switch parsedBundle.PrivateKeyFormat { + case ECBlock: + parsedBundle.PrivateKeyType, c.PrivateKeyType = ECPrivateKey, ECPrivateKey + case PKCS1Block: + c.PrivateKeyType, parsedBundle.PrivateKeyType = RSAPrivateKey, RSAPrivateKey + case PKCS8Block: + t, err := getPKCS8Type(pemBlock.Bytes) + if err != nil { + return errutil.UserError{Err: fmt.Sprintf("Error getting key type from pkcs#8: %v", err)} + } + parsedBundle.PrivateKeyType = t + switch t { + case ECPrivateKey: + c.PrivateKeyType = ECPrivateKey + case RSAPrivateKey: + c.PrivateKeyType = RSAPrivateKey + case Ed25519PrivateKey: + c.PrivateKeyType = Ed25519PrivateKey + case ManagedPrivateKey: + c.PrivateKeyType = ManagedPrivateKey + } + default: + return errutil.UserError{Err: fmt.Sprintf("Unsupported key block type: %s", pemBlock.Type)} + } + + var err error + parsedBundle.PrivateKey, err = parsedBundle.getSigner() + if err != nil { + return errutil.UserError{Err: fmt.Sprintf("Error getting signer: %s", err)} + } + return nil +} + +// ToCertBundle converts a byte-based raw DER certificate bundle +// to a PEM-based string certificate bundle +func (p *ParsedCertBundle) ToCertBundle() (*CertBundle, error) { + result := &CertBundle{} + block := pem.Block{ + Type: "CERTIFICATE", + } + + if p.Certificate != nil { + result.SerialNumber = strings.TrimSpace(GetHexFormatted(p.Certificate.SerialNumber.Bytes(), ":")) + } + + if p.CertificateBytes != nil && len(p.CertificateBytes) > 0 { + block.Bytes = p.CertificateBytes + result.Certificate = strings.TrimSpace(string(pem.EncodeToMemory(&block))) + } + + for _, caCert := range p.CAChain { + block.Bytes = caCert.Bytes + certificate := strings.TrimSpace(string(pem.EncodeToMemory(&block))) + + result.CAChain = append(result.CAChain, certificate) + } + + if p.PrivateKeyBytes != nil && len(p.PrivateKeyBytes) > 0 { + block.Type = string(p.PrivateKeyFormat) + block.Bytes = p.PrivateKeyBytes + result.PrivateKeyType = p.PrivateKeyType + + // Handle bundle not parsed by us + if block.Type == "" { + switch p.PrivateKeyType { + case ECPrivateKey: + block.Type = string(ECBlock) + case RSAPrivateKey: + block.Type = string(PKCS1Block) + case Ed25519PrivateKey: + block.Type = string(PKCS8Block) + } + } + + result.PrivateKey = strings.TrimSpace(string(pem.EncodeToMemory(&block))) + } + + return result, nil +} + +// Verify checks if the parsed bundle is valid. It validates the public +// key of the certificate to the private key and checks the certificate trust +// chain for path issues. +func (p *ParsedCertBundle) Verify() error { + // If private key exists, check if it matches the public key of cert + if p.PrivateKey != nil && p.Certificate != nil { + equal, err := ComparePublicKeys(p.Certificate.PublicKey, p.PrivateKey.Public()) + if err != nil { + return errwrap.Wrapf("could not compare public and private keys: {{err}}", err) + } + if !equal { + return fmt.Errorf("public key of certificate does not match private key") + } + } + + certPath := p.GetCertificatePath() + if len(certPath) > 1 { + for i, caCert := range certPath[1:] { + if !caCert.Certificate.IsCA { + return fmt.Errorf("certificate %d of certificate chain is not a certificate authority", i+1) + } + if !bytes.Equal(certPath[i].Certificate.AuthorityKeyId, caCert.Certificate.SubjectKeyId) { + return fmt.Errorf("certificate %d of certificate chain ca trust path is incorrect (%q/%q) (%X/%X)", + i+1, + certPath[i].Certificate.Subject.CommonName, caCert.Certificate.Subject.CommonName, + certPath[i].Certificate.AuthorityKeyId, caCert.Certificate.SubjectKeyId) + } + } + } + + return nil +} + +// GetCertificatePath returns a slice of certificates making up a path, pulled +// from the parsed cert bundle +func (p *ParsedCertBundle) GetCertificatePath() []*CertBlock { + var certPath []*CertBlock + + certPath = append(certPath, &CertBlock{ + Certificate: p.Certificate, + Bytes: p.CertificateBytes, + }) + + if len(p.CAChain) > 0 { + // Root CA puts itself in the chain + if p.CAChain[0].Certificate.SerialNumber != p.Certificate.SerialNumber { + certPath = append(certPath, p.CAChain...) + } + } + + return certPath +} + +// GetSigner returns a crypto.Signer corresponding to the private key +// contained in this ParsedCertBundle. The Signer contains a Public() function +// for getting the corresponding public. The Signer can also be +// type-converted to private keys +func (p *ParsedCertBundle) getSigner() (crypto.Signer, error) { + var signer crypto.Signer + var err error + + if p.PrivateKeyBytes == nil || len(p.PrivateKeyBytes) == 0 { + return nil, errutil.UserError{Err: "Given parsed cert bundle does not have private key information"} + } + + switch p.PrivateKeyFormat { + case ECBlock: + signer, err = x509.ParseECPrivateKey(p.PrivateKeyBytes) + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Unable to parse CA's private EC key: %s", err)} + } + + case PKCS1Block: + signer, err = x509.ParsePKCS1PrivateKey(p.PrivateKeyBytes) + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Unable to parse CA's private RSA key: %s", err)} + } + + case PKCS8Block: + if k, err := x509.ParsePKCS8PrivateKey(p.PrivateKeyBytes); err == nil { + switch k := k.(type) { + case *rsa.PrivateKey, *ecdsa.PrivateKey, ed25519.PrivateKey: + return k.(crypto.Signer), nil + default: + return nil, errutil.UserError{Err: "Found unknown private key type in pkcs#8 wrapping"} + } + } + return nil, errutil.UserError{Err: fmt.Sprintf("Failed to parse pkcs#8 key: %v", err)} + default: + return nil, errutil.UserError{Err: "Unable to determine type of private key; only RSA and EC are supported"} + } + return signer, nil +} + +// SetParsedPrivateKey sets the private key parameters on the bundle +func (p *ParsedCertBundle) SetParsedPrivateKey(privateKey crypto.Signer, privateKeyType PrivateKeyType, privateKeyBytes []byte) { + p.PrivateKey = privateKey + p.PrivateKeyType = privateKeyType + p.PrivateKeyBytes = privateKeyBytes +} + +func getPKCS8Type(bs []byte) (PrivateKeyType, error) { + k, err := x509.ParsePKCS8PrivateKey(bs) + if err != nil { + return UnknownPrivateKey, errutil.UserError{Err: fmt.Sprintf("Failed to parse pkcs#8 key: %v", err)} + } + + switch k.(type) { + case *ecdsa.PrivateKey: + return ECPrivateKey, nil + case *rsa.PrivateKey: + return RSAPrivateKey, nil + case ed25519.PrivateKey: + return Ed25519PrivateKey, nil + default: + return UnknownPrivateKey, errutil.UserError{Err: "Found unknown private key type in pkcs#8 wrapping"} + } +} + +// ToParsedCSRBundle converts a string-based CSR bundle +// to a byte-based raw CSR bundle +func (c *CSRBundle) ToParsedCSRBundle() (*ParsedCSRBundle, error) { + result := &ParsedCSRBundle{} + var err error + var pemBlock *pem.Block + + if len(c.PrivateKey) > 0 { + pemBlock, _ = pem.Decode([]byte(c.PrivateKey)) + if pemBlock == nil { + return nil, errutil.UserError{Err: "Error decoding private key from cert bundle"} + } + result.PrivateKeyBytes = pemBlock.Bytes + + switch BlockType(pemBlock.Type) { + case ECBlock: + result.PrivateKeyType = ECPrivateKey + case PKCS1Block: + result.PrivateKeyType = RSAPrivateKey + default: + // Try to figure it out and correct + if _, err := x509.ParseECPrivateKey(pemBlock.Bytes); err == nil { + result.PrivateKeyType = ECPrivateKey + c.PrivateKeyType = "ec" + } else if _, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes); err == nil { + result.PrivateKeyType = RSAPrivateKey + c.PrivateKeyType = "rsa" + } else if _, err := x509.ParsePKCS8PrivateKey(pemBlock.Bytes); err == nil { + result.PrivateKeyType = Ed25519PrivateKey + c.PrivateKeyType = "ed25519" + } else { + return nil, errutil.UserError{Err: fmt.Sprintf("Unknown private key type in bundle: %s", c.PrivateKeyType)} + } + } + + result.PrivateKey, err = result.getSigner() + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Error getting signer: %s", err)} + } + } + + if len(c.CSR) > 0 { + pemBlock, _ = pem.Decode([]byte(c.CSR)) + if pemBlock == nil { + return nil, errutil.UserError{Err: "Error decoding certificate from cert bundle"} + } + result.CSRBytes = pemBlock.Bytes + result.CSR, err = x509.ParseCertificateRequest(result.CSRBytes) + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Error encountered parsing certificate bytes from raw bundle via CSR: %v", err)} + } + } + + return result, nil +} + +// ToCSRBundle converts a byte-based raw DER certificate bundle +// to a PEM-based string certificate bundle +func (p *ParsedCSRBundle) ToCSRBundle() (*CSRBundle, error) { + result := &CSRBundle{} + block := pem.Block{ + Type: "CERTIFICATE REQUEST", + } + + if p.CSRBytes != nil && len(p.CSRBytes) > 0 { + block.Bytes = p.CSRBytes + result.CSR = strings.TrimSpace(string(pem.EncodeToMemory(&block))) + } + + if p.PrivateKeyBytes != nil && len(p.PrivateKeyBytes) > 0 { + block.Bytes = p.PrivateKeyBytes + switch p.PrivateKeyType { + case RSAPrivateKey: + result.PrivateKeyType = "rsa" + block.Type = "RSA PRIVATE KEY" + case ECPrivateKey: + result.PrivateKeyType = "ec" + block.Type = "EC PRIVATE KEY" + case Ed25519PrivateKey: + result.PrivateKeyType = "ed25519" + block.Type = "PRIVATE KEY" + case ManagedPrivateKey: + result.PrivateKeyType = ManagedPrivateKey + block.Type = "PRIVATE KEY" + default: + return nil, errutil.InternalError{Err: "Could not determine private key type when creating block"} + } + result.PrivateKey = strings.TrimSpace(string(pem.EncodeToMemory(&block))) + } + + return result, nil +} + +// GetSigner returns a crypto.Signer corresponding to the private key +// contained in this ParsedCSRBundle. The Signer contains a Public() function +// for getting the corresponding public. The Signer can also be +// type-converted to private keys +func (p *ParsedCSRBundle) getSigner() (crypto.Signer, error) { + var signer crypto.Signer + var err error + + if p.PrivateKeyBytes == nil || len(p.PrivateKeyBytes) == 0 { + return nil, errutil.UserError{Err: "Given parsed cert bundle does not have private key information"} + } + + switch p.PrivateKeyType { + case ECPrivateKey: + signer, err = x509.ParseECPrivateKey(p.PrivateKeyBytes) + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Unable to parse CA's private EC key: %s", err)} + } + + case RSAPrivateKey: + signer, err = x509.ParsePKCS1PrivateKey(p.PrivateKeyBytes) + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Unable to parse CA's private RSA key: %s", err)} + } + + case Ed25519PrivateKey: + signerd, err := x509.ParsePKCS8PrivateKey(p.PrivateKeyBytes) + signer = signerd.(ed25519.PrivateKey) + if err != nil { + return nil, errutil.UserError{Err: fmt.Sprintf("Unable to parse CA's private Ed25519 key: %s", err)} + } + + default: + return nil, errutil.UserError{Err: "Unable to determine type of private key; only RSA, Ed25519 and EC are supported"} + } + return signer, nil +} + +// SetParsedPrivateKey sets the private key parameters on the bundle +func (p *ParsedCSRBundle) SetParsedPrivateKey(privateKey crypto.Signer, privateKeyType PrivateKeyType, privateKeyBytes []byte) { + p.PrivateKey = privateKey + p.PrivateKeyType = privateKeyType + p.PrivateKeyBytes = privateKeyBytes +} + +// getTLSConfig returns a TLS config generally suitable for client +// authentication. The returned TLS config can be modified slightly +// to be made suitable for a server requiring client authentication; +// specifically, you should set the value of ClientAuth in the returned +// config to match your needs. +func (p *ParsedCertBundle) GetTLSConfig(usage TLSUsage) (*tls.Config, error) { + tlsCert := tls.Certificate{ + Certificate: [][]byte{}, + } + + tlsConfig := &tls.Config{ + MinVersion: tls.VersionTLS12, + } + + if p.Certificate != nil { + tlsCert.Leaf = p.Certificate + } + + if p.PrivateKey != nil { + tlsCert.PrivateKey = p.PrivateKey + } + + if p.CertificateBytes != nil && len(p.CertificateBytes) > 0 { + tlsCert.Certificate = append(tlsCert.Certificate, p.CertificateBytes) + } + + if len(p.CAChain) > 0 { + for _, cert := range p.CAChain { + tlsCert.Certificate = append(tlsCert.Certificate, cert.Bytes) + } + + // Technically we only need one cert, but this doesn't duplicate code + certBundle, err := p.ToCertBundle() + if err != nil { + return nil, errwrap.Wrapf("error converting parsed bundle to string bundle when getting TLS config: {{err}}", err) + } + + caPool := x509.NewCertPool() + ok := caPool.AppendCertsFromPEM([]byte(certBundle.CAChain[0])) + if !ok { + return nil, fmt.Errorf("could not append CA certificate") + } + + if usage&TLSServer > 0 { + tlsConfig.ClientCAs = caPool + tlsConfig.ClientAuth = tls.VerifyClientCertIfGiven + } + if usage&TLSClient > 0 { + tlsConfig.RootCAs = caPool + } + } + + if tlsCert.Certificate != nil && len(tlsCert.Certificate) > 0 { + tlsConfig.Certificates = []tls.Certificate{tlsCert} + } + + return tlsConfig, nil +} + +// IssueData is a structure that is suitable for marshaling into a request; +// either via JSON, or into a map[string]interface{} via the structs package +type IssueData struct { + TTL string `json:"ttl" structs:"ttl" mapstructure:"ttl"` + CommonName string `json:"common_name" structs:"common_name" mapstructure:"common_name"` + OU string `json:"ou" structs:"ou" mapstructure:"ou"` + AltNames string `json:"alt_names" structs:"alt_names" mapstructure:"alt_names"` + IPSANs string `json:"ip_sans" structs:"ip_sans" mapstructure:"ip_sans"` + CSR string `json:"csr" structs:"csr" mapstructure:"csr"` + OtherSANs string `json:"other_sans" structs:"other_sans" mapstructure:"other_sans"` +} + +type URLEntries struct { + IssuingCertificates []string `json:"issuing_certificates" structs:"issuing_certificates" mapstructure:"issuing_certificates"` + CRLDistributionPoints []string `json:"crl_distribution_points" structs:"crl_distribution_points" mapstructure:"crl_distribution_points"` + OCSPServers []string `json:"ocsp_servers" structs:"ocsp_servers" mapstructure:"ocsp_servers"` +} + +type NotAfterBehavior int + +const ( + ErrNotAfterBehavior NotAfterBehavior = iota + TruncateNotAfterBehavior + PermitNotAfterBehavior +) + +var notAfterBehaviorNames = map[NotAfterBehavior]string{ + ErrNotAfterBehavior: "err", + TruncateNotAfterBehavior: "truncate", + PermitNotAfterBehavior: "permit", +} + +func (n NotAfterBehavior) String() string { + if name, ok := notAfterBehaviorNames[n]; ok && len(name) > 0 { + return name + } + + return "unknown" +} + +type CAInfoBundle struct { + ParsedCertBundle + URLs *URLEntries + LeafNotAfterBehavior NotAfterBehavior + RevocationSigAlg x509.SignatureAlgorithm +} + +func (b *CAInfoBundle) GetCAChain() []*CertBlock { + chain := []*CertBlock{} + + // Include issuing CA in Chain, not including Root Authority + if (len(b.Certificate.AuthorityKeyId) > 0 && + !bytes.Equal(b.Certificate.AuthorityKeyId, b.Certificate.SubjectKeyId)) || + (len(b.Certificate.AuthorityKeyId) == 0 && + !bytes.Equal(b.Certificate.RawIssuer, b.Certificate.RawSubject)) { + + chain = b.GetFullChain() + } + + return chain +} + +func (b *CAInfoBundle) GetFullChain() []*CertBlock { + var chain []*CertBlock + + // Some bundles already include the root included in the chain, + // so don't include it twice. + if len(b.CAChain) == 0 || !bytes.Equal(b.CAChain[0].Bytes, b.CertificateBytes) { + chain = append(chain, &CertBlock{ + Certificate: b.Certificate, + Bytes: b.CertificateBytes, + }) + } + + if len(b.CAChain) > 0 { + chain = append(chain, b.CAChain...) + } + + return chain +} + +type CertExtKeyUsage int + +const ( + AnyExtKeyUsage CertExtKeyUsage = 1 << iota + ServerAuthExtKeyUsage + ClientAuthExtKeyUsage + CodeSigningExtKeyUsage + EmailProtectionExtKeyUsage + IpsecEndSystemExtKeyUsage + IpsecTunnelExtKeyUsage + IpsecUserExtKeyUsage + TimeStampingExtKeyUsage + OcspSigningExtKeyUsage + MicrosoftServerGatedCryptoExtKeyUsage + NetscapeServerGatedCryptoExtKeyUsage + MicrosoftCommercialCodeSigningExtKeyUsage + MicrosoftKernelCodeSigningExtKeyUsage +) + +type CreationParameters struct { + Subject pkix.Name + DNSNames []string + EmailAddresses []string + IPAddresses []net.IP + URIs []*url.URL + OtherSANs map[string][]string + IsCA bool + KeyType string + KeyBits int + NotAfter time.Time + KeyUsage x509.KeyUsage + ExtKeyUsage CertExtKeyUsage + ExtKeyUsageOIDs []string + PolicyIdentifiers []string + BasicConstraintsValidForNonCA bool + SignatureBits int + UsePSS bool + ForceAppendCaChain bool + + // Only used when signing a CA cert + UseCSRValues bool + PermittedDNSDomains []string + + // URLs to encode into the certificate + URLs *URLEntries + + // The maximum path length to encode + MaxPathLength int + + // The duration the certificate will use NotBefore + NotBeforeDuration time.Duration + + // The explicit SKID to use; especially useful for cross-signing. + SKID []byte +} + +type CreationBundle struct { + Params *CreationParameters + SigningBundle *CAInfoBundle + CSR *x509.CertificateRequest +} + +// addKeyUsages adds appropriate key usages to the template given the creation +// information +func AddKeyUsages(data *CreationBundle, certTemplate *x509.Certificate) { + if data.Params.IsCA { + certTemplate.KeyUsage = x509.KeyUsage(x509.KeyUsageCertSign | x509.KeyUsageCRLSign) + return + } + + certTemplate.KeyUsage = data.Params.KeyUsage + + if data.Params.ExtKeyUsage&AnyExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageAny) + } + + if data.Params.ExtKeyUsage&ServerAuthExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageServerAuth) + } + + if data.Params.ExtKeyUsage&ClientAuthExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageClientAuth) + } + + if data.Params.ExtKeyUsage&CodeSigningExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageCodeSigning) + } + + if data.Params.ExtKeyUsage&EmailProtectionExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageEmailProtection) + } + + if data.Params.ExtKeyUsage&IpsecEndSystemExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageIPSECEndSystem) + } + + if data.Params.ExtKeyUsage&IpsecTunnelExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageIPSECTunnel) + } + + if data.Params.ExtKeyUsage&IpsecUserExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageIPSECUser) + } + + if data.Params.ExtKeyUsage&TimeStampingExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageTimeStamping) + } + + if data.Params.ExtKeyUsage&OcspSigningExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageOCSPSigning) + } + + if data.Params.ExtKeyUsage&MicrosoftServerGatedCryptoExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageMicrosoftServerGatedCrypto) + } + + if data.Params.ExtKeyUsage&NetscapeServerGatedCryptoExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageNetscapeServerGatedCrypto) + } + + if data.Params.ExtKeyUsage&MicrosoftCommercialCodeSigningExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageMicrosoftCommercialCodeSigning) + } + + if data.Params.ExtKeyUsage&MicrosoftKernelCodeSigningExtKeyUsage != 0 { + certTemplate.ExtKeyUsage = append(certTemplate.ExtKeyUsage, x509.ExtKeyUsageMicrosoftKernelCodeSigning) + } +} + +// SetParsedPrivateKey sets the private key parameters on the bundle +func (p *KeyBundle) SetParsedPrivateKey(privateKey crypto.Signer, privateKeyType PrivateKeyType, privateKeyBytes []byte) { + p.PrivateKey = privateKey + p.PrivateKeyType = privateKeyType + p.PrivateKeyBytes = privateKeyBytes +} + +func (p *KeyBundle) ToPrivateKeyPemString() (string, error) { + block := pem.Block{} + + if p.PrivateKeyBytes != nil && len(p.PrivateKeyBytes) > 0 { + block.Bytes = p.PrivateKeyBytes + switch p.PrivateKeyType { + case RSAPrivateKey: + block.Type = "RSA PRIVATE KEY" + case ECPrivateKey: + block.Type = "EC PRIVATE KEY" + default: + block.Type = "PRIVATE KEY" + } + privateKeyPemString := strings.TrimSpace(string(pem.EncodeToMemory(&block))) + return privateKeyPemString, nil + } + + return "", errutil.InternalError{Err: "No Private Key Bytes to Wrap"} +} + +// PolicyIdentifierWithQualifierEntry Structure for Internal Storage +type PolicyIdentifierWithQualifierEntry struct { + PolicyIdentifierOid string `json:"oid",mapstructure:"oid"` + CPS string `json:"cps,omitempty",mapstructure:"cps"` + Notice string `json:"notice,omitempty",mapstructure:"notice"` +} + +// GetPolicyIdentifierFromString parses out the internal structure of a Policy Identifier +func GetPolicyIdentifierFromString(policyIdentifier string) (*PolicyIdentifierWithQualifierEntry, error) { + if policyIdentifier == "" { + return nil, nil + } + entry := &PolicyIdentifierWithQualifierEntry{} + // Either a OID, or a JSON Entry: First check OID: + _, err := StringToOid(policyIdentifier) + if err == nil { + entry.PolicyIdentifierOid = policyIdentifier + return entry, nil + } + // Now Check If JSON Entry + jsonErr := json.Unmarshal([]byte(policyIdentifier), &entry) + if jsonErr != nil { // Neither, if we got here + return entry, errors.New(fmt.Sprintf("Policy Identifier %q is neither a valid OID: %s, Nor JSON Policy Identifier: %s", policyIdentifier, err.Error(), jsonErr.Error())) + } + return entry, nil +} + +// Policy Identifier with Qualifier Structure for ASN Marshalling: + +var policyInformationOid = asn1.ObjectIdentifier{2, 5, 29, 32} + +type policyInformation struct { + PolicyIdentifier asn1.ObjectIdentifier + Qualifiers []interface{} `asn1:"tag:optional,omitempty"` +} + +var cpsPolicyQualifierID = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 2, 1} + +type cpsUrlPolicyQualifier struct { + PolicyQualifierID asn1.ObjectIdentifier + Qualifier string `asn1:"tag:optional,ia5"` +} + +var userNoticePolicyQualifierID = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 2, 2} + +type userNoticePolicyQualifier struct { + PolicyQualifierID asn1.ObjectIdentifier + Qualifier userNotice +} + +type userNotice struct { + ExplicitText string `asn1:"tag:optional,utf8"` +} + +func createPolicyIdentifierWithQualifier(entry PolicyIdentifierWithQualifierEntry) (*policyInformation, error) { + // Each Policy is Identified by a Unique ID, as designated here: + policyOid, err := StringToOid(entry.PolicyIdentifierOid) + if err != nil { + return nil, err + } + pi := policyInformation{ + PolicyIdentifier: policyOid, + } + if entry.CPS != "" { + qualifier := cpsUrlPolicyQualifier{ + PolicyQualifierID: cpsPolicyQualifierID, + Qualifier: entry.CPS, + } + pi.Qualifiers = append(pi.Qualifiers, qualifier) + } + if entry.Notice != "" { + qualifier := userNoticePolicyQualifier{ + PolicyQualifierID: userNoticePolicyQualifierID, + Qualifier: userNotice{ + ExplicitText: entry.Notice, + }, + } + pi.Qualifiers = append(pi.Qualifiers, qualifier) + } + return &pi, nil +} + +// CreatePolicyInformationExtensionFromStorageStrings parses the stored policyIdentifiers, which might be JSON Policy +// Identifier with Qualifier Entries or String OIDs, and returns an extension if everything parsed correctly, and an +// error if constructing +func CreatePolicyInformationExtensionFromStorageStrings(policyIdentifiers []string) (*pkix.Extension, error) { + var policyInformationList []policyInformation + for _, policyIdentifierStr := range policyIdentifiers { + policyIdentifierEntry, err := GetPolicyIdentifierFromString(policyIdentifierStr) + if err != nil { + return nil, err + } + if policyIdentifierEntry != nil { // Okay to skip empty entries if there is no error + policyInformationStruct, err := createPolicyIdentifierWithQualifier(*policyIdentifierEntry) + if err != nil { + return nil, err + } + policyInformationList = append(policyInformationList, *policyInformationStruct) + } + } + asn1Bytes, err := asn1.Marshal(policyInformationList) + if err != nil { + return nil, err + } + return &pkix.Extension{ + Id: policyInformationOid, + Critical: false, + Value: asn1Bytes, + }, nil +} + +// Subject Attribute OIDs +var SubjectPilotUserIDAttributeOID = asn1.ObjectIdentifier{0, 9, 2342, 19200300, 100, 1, 1} diff --git a/sdk/helper/cidrutil/cidr.go b/sdk/helper/cidrutil/cidr.go new file mode 100644 index 0000000..9d2a418 --- /dev/null +++ b/sdk/helper/cidrutil/cidr.go @@ -0,0 +1,224 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cidrutil + +import ( + "fmt" + "net" + "strings" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-secure-stdlib/strutil" + sockaddr "github.com/hashicorp/go-sockaddr" +) + +func isIPAddr(cidr sockaddr.SockAddr) bool { + return (cidr.Type() & sockaddr.TypeIP) != 0 +} + +// RemoteAddrIsOk checks if the given remote address is either: +// - OK because there's no CIDR whitelist +// - OK because it's in the CIDR whitelist +func RemoteAddrIsOk(remoteAddr string, boundCIDRs []*sockaddr.SockAddrMarshaler) bool { + if len(boundCIDRs) == 0 { + // There's no CIDR whitelist. + return true + } + remoteSockAddr, err := sockaddr.NewSockAddr(remoteAddr) + if err != nil { + // Can't tell, err on the side of less access. + return false + } + for _, cidr := range boundCIDRs { + if isIPAddr(cidr) && cidr.Contains(remoteSockAddr) { + // Whitelisted. + return true + } + } + // Not whitelisted. + return false +} + +// IPBelongsToCIDR checks if the given IP is encompassed by the given CIDR block +func IPBelongsToCIDR(ipAddr string, cidr string) (bool, error) { + if ipAddr == "" { + return false, fmt.Errorf("missing IP address") + } + + ip := net.ParseIP(ipAddr) + if ip == nil { + return false, fmt.Errorf("invalid IP address") + } + + _, ipnet, err := net.ParseCIDR(cidr) + if err != nil { + return false, err + } + + if !ipnet.Contains(ip) { + return false, nil + } + + return true, nil +} + +// IPBelongsToCIDRBlocksSlice checks if the given IP is encompassed by any of the given +// CIDR blocks +func IPBelongsToCIDRBlocksSlice(ipAddr string, cidrs []string) (bool, error) { + if ipAddr == "" { + return false, fmt.Errorf("missing IP address") + } + + if len(cidrs) == 0 { + return false, fmt.Errorf("missing CIDR blocks to be checked against") + } + + if ip := net.ParseIP(ipAddr); ip == nil { + return false, fmt.Errorf("invalid IP address") + } + + for _, cidr := range cidrs { + belongs, err := IPBelongsToCIDR(ipAddr, cidr) + if err != nil { + return false, err + } + if belongs { + return true, nil + } + } + + return false, nil +} + +// ValidateCIDRListString checks if the list of CIDR blocks are valid, given +// that the input is a string composed by joining all the CIDR blocks using a +// separator. The input is separated based on the given separator and validity +// of each is checked. +func ValidateCIDRListString(cidrList string, separator string) (bool, error) { + if cidrList == "" { + return false, fmt.Errorf("missing CIDR list that needs validation") + } + if separator == "" { + return false, fmt.Errorf("missing separator") + } + + return ValidateCIDRListSlice(strutil.ParseDedupLowercaseAndSortStrings(cidrList, separator)) +} + +// ValidateCIDRListSlice checks if the given list of CIDR blocks are valid +func ValidateCIDRListSlice(cidrBlocks []string) (bool, error) { + if len(cidrBlocks) == 0 { + return false, fmt.Errorf("missing CIDR blocks that needs validation") + } + + for _, block := range cidrBlocks { + if _, _, err := net.ParseCIDR(strings.TrimSpace(block)); err != nil { + return false, err + } + } + + return true, nil +} + +// Subset checks if the IPs belonging to a given CIDR block is a subset of IPs +// belonging to another CIDR block. +func Subset(cidr1, cidr2 string) (bool, error) { + if cidr1 == "" { + return false, fmt.Errorf("missing CIDR to be checked against") + } + + if cidr2 == "" { + return false, fmt.Errorf("missing CIDR that needs to be checked") + } + + ip1, net1, err := net.ParseCIDR(cidr1) + if err != nil { + return false, errwrap.Wrapf("failed to parse the CIDR to be checked against: {{err}}", err) + } + + zeroAddr := false + if ip := ip1.To4(); ip != nil && ip.Equal(net.IPv4zero) { + zeroAddr = true + } + if ip := ip1.To16(); ip != nil && ip.Equal(net.IPv6zero) { + zeroAddr = true + } + + maskLen1, _ := net1.Mask.Size() + if !zeroAddr && maskLen1 == 0 { + return false, fmt.Errorf("CIDR to be checked against is not in its canonical form") + } + + ip2, net2, err := net.ParseCIDR(cidr2) + if err != nil { + return false, errwrap.Wrapf("failed to parse the CIDR that needs to be checked: {{err}}", err) + } + + zeroAddr = false + if ip := ip2.To4(); ip != nil && ip.Equal(net.IPv4zero) { + zeroAddr = true + } + if ip := ip2.To16(); ip != nil && ip.Equal(net.IPv6zero) { + zeroAddr = true + } + + maskLen2, _ := net2.Mask.Size() + if !zeroAddr && maskLen2 == 0 { + return false, fmt.Errorf("CIDR that needs to be checked is not in its canonical form") + } + + // If the mask length of the CIDR that needs to be checked is smaller + // then the mask length of the CIDR to be checked against, then the + // former will encompass more IPs than the latter, and hence can't be a + // subset of the latter. + if maskLen2 < maskLen1 { + return false, nil + } + + belongs, err := IPBelongsToCIDR(net2.IP.String(), cidr1) + if err != nil { + return false, err + } + + return belongs, nil +} + +// SubsetBlocks checks if each CIDR block of a given set of CIDR blocks, is a +// subset of at least one CIDR block belonging to another set of CIDR blocks. +// First parameter is the set of CIDR blocks to check against and the second +// parameter is the set of CIDR blocks that needs to be checked. +func SubsetBlocks(cidrBlocks1, cidrBlocks2 []string) (bool, error) { + if len(cidrBlocks1) == 0 { + return false, fmt.Errorf("missing CIDR blocks to be checked against") + } + + if len(cidrBlocks2) == 0 { + return false, fmt.Errorf("missing CIDR blocks that needs to be checked") + } + + // Check if all the elements of cidrBlocks2 is a subset of at least one + // element of cidrBlocks1 + for _, cidrBlock2 := range cidrBlocks2 { + isSubset := false + for _, cidrBlock1 := range cidrBlocks1 { + subset, err := Subset(cidrBlock1, cidrBlock2) + if err != nil { + return false, err + } + // If CIDR is a subset of any of the CIDR block, its + // good enough. Break out. + if subset { + isSubset = true + break + } + } + // CIDR block was not a subset of any of the CIDR blocks in the + // set of blocks to check against + if !isSubset { + return false, nil + } + } + + return true, nil +} diff --git a/sdk/helper/cidrutil/cidr_test.go b/sdk/helper/cidrutil/cidr_test.go new file mode 100644 index 0000000..e6fc576 --- /dev/null +++ b/sdk/helper/cidrutil/cidr_test.go @@ -0,0 +1,229 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cidrutil + +import ( + "testing" + + sockaddr "github.com/hashicorp/go-sockaddr" +) + +func TestCIDRUtil_IPBelongsToCIDR(t *testing.T) { + ip := "192.168.25.30" + cidr := "192.168.26.30/16" + + belongs, err := IPBelongsToCIDR(ip, cidr) + if err != nil { + t.Fatal(err) + } + if !belongs { + t.Fatalf("expected IP %q to belong to CIDR %q", ip, cidr) + } + + ip = "10.197.192.6" + cidr = "10.197.192.0/18" + belongs, err = IPBelongsToCIDR(ip, cidr) + if err != nil { + t.Fatal(err) + } + if !belongs { + t.Fatalf("expected IP %q to belong to CIDR %q", ip, cidr) + } + + ip = "192.168.25.30" + cidr = "192.168.26.30/24" + belongs, err = IPBelongsToCIDR(ip, cidr) + if err != nil { + t.Fatal(err) + } + if belongs { + t.Fatalf("expected IP %q to not belong to CIDR %q", ip, cidr) + } + + ip = "192.168.25.30.100" + cidr = "192.168.26.30/24" + belongs, err = IPBelongsToCIDR(ip, cidr) + if err == nil { + t.Fatalf("expected an error") + } +} + +func TestCIDRUtil_IPBelongsToCIDRBlocksSlice(t *testing.T) { + ip := "192.168.27.29" + cidrList := []string{"172.169.100.200/18", "192.168.0.0/16", "10.10.20.20/24"} + + belongs, err := IPBelongsToCIDRBlocksSlice(ip, cidrList) + if err != nil { + t.Fatal(err) + } + if !belongs { + t.Fatalf("expected IP %q to belong to one of the CIDRs in %q", ip, cidrList) + } + + ip = "192.168.27.29" + cidrList = []string{"172.169.100.200/18", "192.168.0.0.0/16", "10.10.20.20/24"} + + belongs, err = IPBelongsToCIDRBlocksSlice(ip, cidrList) + if err == nil { + t.Fatalf("expected an error") + } + + ip = "30.40.50.60" + cidrList = []string{"172.169.100.200/18", "192.168.0.0/16", "10.10.20.20/24"} + + belongs, err = IPBelongsToCIDRBlocksSlice(ip, cidrList) + if err != nil { + t.Fatal(err) + } + if belongs { + t.Fatalf("expected IP %q to not belong to one of the CIDRs in %q", ip, cidrList) + } +} + +func TestCIDRUtil_ValidateCIDRListString(t *testing.T) { + cidrList := "172.169.100.200/18,192.168.0.0/16,10.10.20.20/24" + + valid, err := ValidateCIDRListString(cidrList, ",") + if err != nil { + t.Fatal(err) + } + if !valid { + t.Fatalf("expected CIDR list %q to be valid", cidrList) + } + + cidrList = "172.169.100.200,192.168.0.0/16,10.10.20.20/24" + valid, err = ValidateCIDRListString(cidrList, ",") + if err == nil { + t.Fatal("expected an error") + } + + cidrList = "172.169.100.200/18,192.168.0.0.0/16,10.10.20.20/24" + valid, err = ValidateCIDRListString(cidrList, ",") + if err == nil { + t.Fatal("expected an error") + } +} + +func TestCIDRUtil_ValidateCIDRListSlice(t *testing.T) { + cidrList := []string{"172.169.100.200/18", "192.168.0.0/16", "10.10.20.20/24"} + + valid, err := ValidateCIDRListSlice(cidrList) + if err != nil { + t.Fatal(err) + } + if !valid { + t.Fatalf("expected CIDR list %q to be valid", cidrList) + } + + cidrList = []string{"172.169.100.200", "192.168.0.0/16", "10.10.20.20/24"} + valid, err = ValidateCIDRListSlice(cidrList) + if err == nil { + t.Fatal("expected an error") + } + + cidrList = []string{"172.169.100.200/18", "192.168.0.0.0/16", "10.10.20.20/24"} + valid, err = ValidateCIDRListSlice(cidrList) + if err == nil { + t.Fatal("expected an error") + } +} + +func TestCIDRUtil_Subset(t *testing.T) { + cidr1 := "192.168.27.29/24" + cidr2 := "192.168.27.29/24" + subset, err := Subset(cidr1, cidr2) + if err != nil { + t.Fatal(err) + } + if !subset { + t.Fatalf("expected CIDR %q to be a subset of CIDR %q", cidr2, cidr1) + } + + cidr1 = "192.168.27.29/16" + cidr2 = "192.168.27.29/24" + subset, err = Subset(cidr1, cidr2) + if err != nil { + t.Fatal(err) + } + if !subset { + t.Fatalf("expected CIDR %q to be a subset of CIDR %q", cidr2, cidr1) + } + + cidr1 = "192.168.27.29/24" + cidr2 = "192.168.27.29/16" + subset, err = Subset(cidr1, cidr2) + if err != nil { + t.Fatal(err) + } + if subset { + t.Fatalf("expected CIDR %q to not be a subset of CIDR %q", cidr2, cidr1) + } + + cidr1 = "192.168.0.128/25" + cidr2 = "192.168.0.0/24" + subset, err = Subset(cidr1, cidr2) + if err != nil { + t.Fatal(err) + } + if subset { + t.Fatalf("expected CIDR %q to not be a subset of CIDR %q", cidr2, cidr1) + } + subset, err = Subset(cidr2, cidr1) + if err != nil { + t.Fatal(err) + } + if !subset { + t.Fatalf("expected CIDR %q to be a subset of CIDR %q", cidr1, cidr2) + } +} + +func TestCIDRUtil_SubsetBlocks(t *testing.T) { + cidrBlocks1 := []string{"192.168.27.29/16", "172.245.30.40/24", "10.20.30.40/30"} + cidrBlocks2 := []string{"192.168.27.29/20", "172.245.30.40/25", "10.20.30.40/32"} + + subset, err := SubsetBlocks(cidrBlocks1, cidrBlocks2) + if err != nil { + t.Fatal(err) + } + if !subset { + t.Fatalf("expected CIDR blocks %q to be a subset of CIDR blocks %q", cidrBlocks2, cidrBlocks1) + } + + cidrBlocks1 = []string{"192.168.27.29/16", "172.245.30.40/25", "10.20.30.40/30"} + cidrBlocks2 = []string{"192.168.27.29/20", "172.245.30.40/24", "10.20.30.40/32"} + + subset, err = SubsetBlocks(cidrBlocks1, cidrBlocks2) + if err != nil { + t.Fatal(err) + } + if subset { + t.Fatalf("expected CIDR blocks %q to not be a subset of CIDR blocks %q", cidrBlocks2, cidrBlocks1) + } +} + +func TestCIDRUtil_RemoteAddrIsOk_NegativeTest(t *testing.T) { + addr, err := sockaddr.NewSockAddr("127.0.0.1/8") + if err != nil { + t.Fatal(err) + } + boundCIDRs := []*sockaddr.SockAddrMarshaler{ + {addr}, + } + if RemoteAddrIsOk("123.0.0.1", boundCIDRs) { + t.Fatal("remote address of 123.0.0.1/2 should not be allowed for 127.0.0.1/8") + } +} + +func TestCIDRUtil_RemoteAddrIsOk_PositiveTest(t *testing.T) { + addr, err := sockaddr.NewSockAddr("127.0.0.1/8") + if err != nil { + t.Fatal(err) + } + boundCIDRs := []*sockaddr.SockAddrMarshaler{ + {addr}, + } + if !RemoteAddrIsOk("127.0.0.1", boundCIDRs) { + t.Fatal("remote address of 127.0.0.1 should be allowed for 127.0.0.1/8") + } +} diff --git a/sdk/helper/compressutil/compress.go b/sdk/helper/compressutil/compress.go new file mode 100644 index 0000000..9e96d8d --- /dev/null +++ b/sdk/helper/compressutil/compress.go @@ -0,0 +1,225 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package compressutil + +import ( + "bytes" + "compress/gzip" + "compress/lzw" + "fmt" + "io" + + "github.com/golang/snappy" + "github.com/hashicorp/errwrap" + "github.com/pierrec/lz4" +) + +const ( + // A byte value used as a canary prefix for the compressed information + // which is used to distinguish if a JSON input is compressed or not. + // The value of this constant should not be a first character of any + // valid JSON string. + + CompressionTypeGzip = "gzip" + CompressionCanaryGzip byte = 'G' + + CompressionTypeLZW = "lzw" + CompressionCanaryLZW byte = 'L' + + CompressionTypeSnappy = "snappy" + CompressionCanarySnappy byte = 'S' + + CompressionTypeLZ4 = "lz4" + CompressionCanaryLZ4 byte = '4' +) + +// SnappyReadCloser embeds the snappy reader which implements the io.Reader +// interface. The decompress procedure in this utility expects an +// io.ReadCloser. This type implements the io.Closer interface to retain the +// generic way of decompression. +type CompressUtilReadCloser struct { + io.Reader +} + +// Close is a noop method implemented only to satisfy the io.Closer interface +func (c *CompressUtilReadCloser) Close() error { + return nil +} + +// CompressionConfig is used to select a compression type to be performed by +// Compress and Decompress utilities. +// Supported types are: +// * CompressionTypeLZW +// * CompressionTypeGzip +// * CompressionTypeSnappy +// * CompressionTypeLZ4 +// +// When using CompressionTypeGzip, the compression levels can also be chosen: +// * gzip.DefaultCompression +// * gzip.BestSpeed +// * gzip.BestCompression +type CompressionConfig struct { + // Type of the compression algorithm to be used + Type string + + // When using Gzip format, the compression level to employ + GzipCompressionLevel int +} + +// Compress places the canary byte in a buffer and uses the same buffer to fill +// in the compressed information of the given input. The configuration supports +// two type of compression: LZW and Gzip. When using Gzip compression format, +// if GzipCompressionLevel is not specified, the 'gzip.DefaultCompression' will +// be assumed. +func Compress(data []byte, config *CompressionConfig) ([]byte, error) { + var buf bytes.Buffer + var writer io.WriteCloser + var err error + + if config == nil { + return nil, fmt.Errorf("config is nil") + } + + // Write the canary into the buffer and create writer to compress the + // input data based on the configured type + switch config.Type { + case CompressionTypeLZW: + buf.Write([]byte{CompressionCanaryLZW}) + writer = lzw.NewWriter(&buf, lzw.LSB, 8) + + case CompressionTypeGzip: + buf.Write([]byte{CompressionCanaryGzip}) + + switch { + case config.GzipCompressionLevel == gzip.BestCompression, + config.GzipCompressionLevel == gzip.BestSpeed, + config.GzipCompressionLevel == gzip.DefaultCompression: + // These are valid compression levels + default: + // If compression level is set to NoCompression or to + // any invalid value, fallback to Defaultcompression + config.GzipCompressionLevel = gzip.DefaultCompression + } + writer, err = gzip.NewWriterLevel(&buf, config.GzipCompressionLevel) + + case CompressionTypeSnappy: + buf.Write([]byte{CompressionCanarySnappy}) + writer = snappy.NewBufferedWriter(&buf) + + case CompressionTypeLZ4: + buf.Write([]byte{CompressionCanaryLZ4}) + writer = lz4.NewWriter(&buf) + + default: + return nil, fmt.Errorf("unsupported compression type") + } + + if err != nil { + return nil, errwrap.Wrapf("failed to create a compression writer: {{err}}", err) + } + + if writer == nil { + return nil, fmt.Errorf("failed to create a compression writer") + } + + // Compress the input and place it in the same buffer containing the + // canary byte. + if _, err = writer.Write(data); err != nil { + return nil, errwrap.Wrapf("failed to compress input data: err: {{err}}", err) + } + + // Close the io.WriteCloser + if err = writer.Close(); err != nil { + return nil, err + } + + // Return the compressed bytes with canary byte at the start + return buf.Bytes(), nil +} + +// Decompress checks if the first byte in the input matches the canary byte. +// If the first byte is a canary byte, then the input past the canary byte +// will be decompressed using the method specified in the given configuration. +// If the first byte isn't a canary byte, then the utility returns a boolean +// value indicating that the input was not compressed. +func Decompress(data []byte) ([]byte, bool, error) { + bytes, _, notCompressed, err := DecompressWithCanary(data) + return bytes, notCompressed, err +} + +// DecompressWithCanary checks if the first byte in the input matches the canary byte. +// If the first byte is a canary byte, then the input past the canary byte +// will be decompressed using the method specified in the given configuration. The type of compression used is also +// returned. If the first byte isn't a canary byte, then the utility returns a boolean +// value indicating that the input was not compressed. +func DecompressWithCanary(data []byte) ([]byte, string, bool, error) { + var err error + var reader io.ReadCloser + var compressionType string + if data == nil || len(data) == 0 { + return nil, "", false, fmt.Errorf("'data' being decompressed is empty") + } + + canary := data[0] + cData := data[1:] + + switch canary { + // If the first byte matches the canary byte, remove the canary + // byte and try to decompress the data that is after the canary. + case CompressionCanaryGzip: + if len(data) < 2 { + return nil, "", false, fmt.Errorf("invalid 'data' after the canary") + } + reader, err = gzip.NewReader(bytes.NewReader(cData)) + compressionType = CompressionTypeGzip + + case CompressionCanaryLZW: + if len(data) < 2 { + return nil, "", false, fmt.Errorf("invalid 'data' after the canary") + } + reader = lzw.NewReader(bytes.NewReader(cData), lzw.LSB, 8) + compressionType = CompressionTypeLZW + + case CompressionCanarySnappy: + if len(data) < 2 { + return nil, "", false, fmt.Errorf("invalid 'data' after the canary") + } + reader = &CompressUtilReadCloser{ + Reader: snappy.NewReader(bytes.NewReader(cData)), + } + compressionType = CompressionTypeSnappy + + case CompressionCanaryLZ4: + if len(data) < 2 { + return nil, "", false, fmt.Errorf("invalid 'data' after the canary") + } + reader = &CompressUtilReadCloser{ + Reader: lz4.NewReader(bytes.NewReader(cData)), + } + compressionType = CompressionTypeLZ4 + + default: + // If the first byte doesn't match the canary byte, it means + // that the content was not compressed at all. Indicate the + // caller that the input was not compressed. + return nil, "", true, nil + } + if err != nil { + return nil, "", false, errwrap.Wrapf("failed to create a compression reader: {{err}}", err) + } + if reader == nil { + return nil, "", false, fmt.Errorf("failed to create a compression reader") + } + + // Close the io.ReadCloser + defer reader.Close() + + // Read all the compressed data into a buffer + var buf bytes.Buffer + if _, err = io.Copy(&buf, reader); err != nil { + return nil, "", false, err + } + + return buf.Bytes(), compressionType, false, nil +} diff --git a/sdk/helper/compressutil/compress_test.go b/sdk/helper/compressutil/compress_test.go new file mode 100644 index 0000000..7d90ce8 --- /dev/null +++ b/sdk/helper/compressutil/compress_test.go @@ -0,0 +1,118 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package compressutil + +import ( + "bytes" + "compress/gzip" + "testing" +) + +func TestCompressUtil_CompressDecompress(t *testing.T) { + t.Parallel() + + tests := []struct { + compressionType string + compressionConfig CompressionConfig + canary byte + }{ + { + "GZIP default implicit", + CompressionConfig{Type: CompressionTypeGzip}, + CompressionCanaryGzip, + }, + { + "GZIP default explicit", + CompressionConfig{Type: CompressionTypeGzip, GzipCompressionLevel: gzip.DefaultCompression}, + CompressionCanaryGzip, + }, + { + "GZIP best speed", + CompressionConfig{Type: CompressionTypeGzip, GzipCompressionLevel: gzip.BestSpeed}, + CompressionCanaryGzip, + }, + { + "GZIP best compression", + CompressionConfig{Type: CompressionTypeGzip, GzipCompressionLevel: gzip.BestCompression}, + CompressionCanaryGzip, + }, + { + "Snappy", + CompressionConfig{Type: CompressionTypeSnappy}, + CompressionCanarySnappy, + }, + { + "LZ4", + CompressionConfig{Type: CompressionTypeLZ4}, + CompressionCanaryLZ4, + }, + { + "LZW", + CompressionConfig{Type: CompressionTypeLZW}, + CompressionCanaryLZW, + }, + } + + inputJSONBytes := []byte(`{"sample":"data","verification":"process"}`) + + for _, test := range tests { + // Compress the input + compressedJSONBytes, err := Compress(inputJSONBytes, &test.compressionConfig) + if err != nil { + t.Fatalf("compress error (%s): %s", test.compressionType, err) + } + if len(compressedJSONBytes) == 0 { + t.Fatalf("failed to compress data in %s format", test.compressionType) + } + + // Check the presence of the canary + if compressedJSONBytes[0] != test.canary { + t.Fatalf("bad (%s): compression canary: expected: %d actual: %d", test.compressionType, test.canary, compressedJSONBytes[0]) + } + + decompressedJSONBytes, wasNotCompressed, err := Decompress(compressedJSONBytes) + if err != nil { + t.Fatalf("decompress error (%s): %s", test.compressionType, err) + } + + // Check if the input for decompress was not compressed in the first place + if wasNotCompressed { + t.Fatalf("bad (%s): expected compressed bytes", test.compressionType) + } + + if len(decompressedJSONBytes) == 0 { + t.Fatalf("bad (%s): expected decompressed bytes", test.compressionType) + } + + // Compare the value after decompression + if !bytes.Equal(inputJSONBytes, decompressedJSONBytes) { + t.Fatalf("bad (%s): decompressed value;\nexpected: %q\nactual: %q", test.compressionType, string(inputJSONBytes), string(decompressedJSONBytes)) + } + + decompressedJSONBytes, compressionType, wasNotCompressed, err := DecompressWithCanary(compressedJSONBytes) + if err != nil { + t.Fatalf("decompress error (%s): %s", test.compressionType, err) + } + + if compressionType != test.compressionConfig.Type { + t.Fatalf("bad compressionType value;\nexpected: %q\naction: %q", test.compressionConfig.Type, compressionType) + } + } +} + +func TestCompressUtil_InvalidConfigurations(t *testing.T) { + t.Parallel() + + inputJSONBytes := []byte(`{"sample":"data","verification":"process"}`) + + // Test nil configuration + if _, err := Compress(inputJSONBytes, nil); err == nil { + t.Fatal("expected an error") + } + + // Test invalid configuration + if _, err := Compress(inputJSONBytes, &CompressionConfig{}); err == nil { + t.Fatal("expected an error") + } +} diff --git a/sdk/helper/consts/agent.go b/sdk/helper/consts/agent.go new file mode 100644 index 0000000..53b8b8e --- /dev/null +++ b/sdk/helper/consts/agent.go @@ -0,0 +1,15 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package consts + +// AgentPathCacheClear is the path that the agent will use as its cache-clear +// endpoint. +const AgentPathCacheClear = "/agent/v1/cache-clear" + +// AgentPathMetrics is the path the agent will use to expose its internal +// metrics. +const AgentPathMetrics = "/agent/v1/metrics" + +// AgentPathQuit is the path that the agent will use to trigger stopping it. +const AgentPathQuit = "/agent/v1/quit" diff --git a/sdk/helper/consts/consts.go b/sdk/helper/consts/consts.go new file mode 100644 index 0000000..036ccf5 --- /dev/null +++ b/sdk/helper/consts/consts.go @@ -0,0 +1,46 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package consts + +const ( + // ExpirationRestoreWorkerCount specifies the number of workers to use while + // restoring leases into the expiration manager + ExpirationRestoreWorkerCount = 64 + + // NamespaceHeaderName is the header set to specify which namespace the + // request is indented for. + NamespaceHeaderName = "X-Vault-Namespace" + + // AuthHeaderName is the name of the header containing the token. + AuthHeaderName = "X-Vault-Token" + + // RequestHeaderName is the name of the header used by the Agent for + // SSRF protection. + RequestHeaderName = "X-Vault-Request" + + // PerformanceReplicationALPN is the negotiated protocol used for + // performance replication. + PerformanceReplicationALPN = "replication_v1" + + // DRReplicationALPN is the negotiated protocol used for dr replication. + DRReplicationALPN = "replication_dr_v1" + + PerfStandbyALPN = "perf_standby_v1" + + RequestForwardingALPN = "req_fw_sb-act_v1" + + RaftStorageALPN = "raft_storage_v1" + + // ReplicationResolverALPN is the negotiated protocol used for + // resolving replicaiton addresses + ReplicationResolverALPN = "replication_resolver_v1" + + VaultEnableFilePermissionsCheckEnv = "VAULT_ENABLE_FILE_PERMISSIONS_CHECK" + + VaultDisableUserLockout = "VAULT_DISABLE_USER_LOCKOUT" + + PerformanceReplicationPathTarget = "performance" + + DRReplicationPathTarget = "dr" +) diff --git a/sdk/helper/consts/deprecation_status.go b/sdk/helper/consts/deprecation_status.go new file mode 100644 index 0000000..e72292b --- /dev/null +++ b/sdk/helper/consts/deprecation_status.go @@ -0,0 +1,37 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package consts + +// EnvVaultAllowPendingRemovalMounts allows Pending Removal builtins to be +// mounted as if they are Deprecated to facilitate migration to supported +// builtin plugins. +const EnvVaultAllowPendingRemovalMounts = "VAULT_ALLOW_PENDING_REMOVAL_MOUNTS" + +// DeprecationStatus represents the current deprecation state for builtins +type DeprecationStatus uint32 + +// These are the states of deprecation for builtin plugins +const ( + Supported = iota + Deprecated + PendingRemoval + Removed + Unknown +) + +// String returns the string representation of a builtin deprecation status +func (s DeprecationStatus) String() string { + switch s { + case Supported: + return "supported" + case Deprecated: + return "deprecated" + case PendingRemoval: + return "pending removal" + case Removed: + return "removed" + default: + return "" + } +} diff --git a/sdk/helper/consts/error.go b/sdk/helper/consts/error.go new file mode 100644 index 0000000..5bd3f5e --- /dev/null +++ b/sdk/helper/consts/error.go @@ -0,0 +1,28 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package consts + +import "errors" + +var ( + // ErrSealed is returned if an operation is performed on a sealed barrier. + // No operation is expected to succeed before unsealing + ErrSealed = errors.New("Vault is sealed") + + // ErrAPILocked is returned if an operation is performed when the API is + // locked for the request namespace. + ErrAPILocked = errors.New("API access to this namespace has been locked by an administrator") + + // ErrStandby is returned if an operation is performed on a standby Vault. + // No operation is expected to succeed until active. + ErrStandby = errors.New("Vault is in standby mode") + + // ErrPathContainsParentReferences is returned when a path contains parent + // references. + ErrPathContainsParentReferences = errors.New("path cannot contain parent references") + + // ErrInvalidWrappingToken is returned when checking for the validity of + // a wrapping token that turns out to be invalid. + ErrInvalidWrappingToken = errors.New("wrapping token is not valid or does not exist") +) diff --git a/sdk/helper/consts/plugin_types.go b/sdk/helper/consts/plugin_types.go new file mode 100644 index 0000000..6bc14b5 --- /dev/null +++ b/sdk/helper/consts/plugin_types.go @@ -0,0 +1,66 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package consts + +// NOTE: this file has been copied to +// https://github.com/hashicorp/vault/blob/main/api/plugin_types.go +// Any changes made should be made to both files at the same time. + +import "fmt" + +var PluginTypes = []PluginType{ + PluginTypeUnknown, + PluginTypeCredential, + PluginTypeDatabase, + PluginTypeSecrets, +} + +type PluginType uint32 + +// This is a list of PluginTypes used by Vault. +// If we need to add any in the future, it would +// be best to add them to the _end_ of the list below +// because they resolve to incrementing numbers, +// which may be saved in state somewhere. Thus if +// the name for one of those numbers changed because +// a value were added to the middle, that could cause +// the wrong plugin types to be read from storage +// for a given underlying number. Example of the problem +// here: https://play.golang.org/p/YAaPw5ww3er +const ( + PluginTypeUnknown PluginType = iota + PluginTypeCredential + PluginTypeDatabase + PluginTypeSecrets +) + +func (p PluginType) String() string { + switch p { + case PluginTypeUnknown: + return "unknown" + case PluginTypeCredential: + return "auth" + case PluginTypeDatabase: + return "database" + case PluginTypeSecrets: + return "secret" + default: + return "unsupported" + } +} + +func ParsePluginType(pluginType string) (PluginType, error) { + switch pluginType { + case "unknown": + return PluginTypeUnknown, nil + case "auth": + return PluginTypeCredential, nil + case "database": + return PluginTypeDatabase, nil + case "secret": + return PluginTypeSecrets, nil + default: + return PluginTypeUnknown, fmt.Errorf("%q is not a supported plugin type", pluginType) + } +} diff --git a/sdk/helper/consts/proxy.go b/sdk/helper/consts/proxy.go new file mode 100644 index 0000000..0fc4117 --- /dev/null +++ b/sdk/helper/consts/proxy.go @@ -0,0 +1,15 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package consts + +// ProxyPathCacheClear is the path that the proxy will use as its cache-clear +// endpoint. +const ProxyPathCacheClear = "/proxy/v1/cache-clear" + +// ProxyPathMetrics is the path the proxy will use to expose its internal +// metrics. +const ProxyPathMetrics = "/proxy/v1/metrics" + +// ProxyPathQuit is the path that the proxy will use to trigger stopping it. +const ProxyPathQuit = "/proxy/v1/quit" diff --git a/sdk/helper/consts/replication.go b/sdk/helper/consts/replication.go new file mode 100644 index 0000000..2a1511a --- /dev/null +++ b/sdk/helper/consts/replication.go @@ -0,0 +1,162 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package consts + +const ( + // N.B. This needs to be excluded from replication despite the name; it's + // merely saying that this is cluster information for the replicated + // cluster. + CoreReplicatedClusterPrefix = "core/cluster/replicated/" + CoreReplicatedClusterPrefixDR = "core/cluster/replicated-dr/" + + CoreReplicatedClusterInfoPath = CoreReplicatedClusterPrefix + "info" + CoreReplicatedClusterSecondariesPrefix = CoreReplicatedClusterPrefix + "secondaries/" + CoreReplicatedClusterInfoPathDR = CoreReplicatedClusterPrefixDR + "info" + CoreReplicatedClusterSecondariesPrefixDR = CoreReplicatedClusterPrefixDR + "secondaries/" + + // This is an identifier for the current secondary in the replicated paths + // manager. It should contain a character that is not allowed in secondary + // ids to ensure it doesn't collide. + CurrentReplicatedSecondaryIdentifier = ".current" + CoreFeatureFlagPath = "core/cluster/feature-flags" +) + +type ReplicationState uint32 + +const ( + _ ReplicationState = iota + OldReplicationPrimary + OldReplicationSecondary + OldReplicationBootstrapping + // Don't add anything here. Adding anything to this Old block would cause + // the rest of the values to change below. This was done originally to + // ensure no overlap between old and new values. + + ReplicationUnknown ReplicationState = 0 + ReplicationPerformancePrimary ReplicationState = 1 << iota // Note -- iota is 5 here! + ReplicationPerformanceSecondary + OldSplitReplicationBootstrapping + ReplicationDRPrimary + ReplicationDRSecondary + ReplicationPerformanceBootstrapping + ReplicationDRBootstrapping + ReplicationPerformanceDisabled + ReplicationDRDisabled + ReplicationPerformanceStandby +) + +// We verify no change to the above values are made +func init() { + if OldReplicationBootstrapping != 3 { + panic("Replication Constants have changed") + } + + if ReplicationPerformancePrimary != 1<<5 { + panic("Replication Constants have changed") + } +} + +func (r ReplicationState) string() string { + switch r { + case ReplicationPerformanceSecondary: + return "secondary" + case ReplicationPerformancePrimary: + return "primary" + case ReplicationPerformanceBootstrapping: + return "bootstrapping" + case ReplicationPerformanceDisabled: + return "disabled" + case ReplicationDRPrimary: + return "primary" + case ReplicationDRSecondary: + return "secondary" + case ReplicationDRBootstrapping: + return "bootstrapping" + case ReplicationDRDisabled: + return "disabled" + } + + return "unknown" +} + +func (r ReplicationState) StateStrings() []string { + var ret []string + if r.HasState(ReplicationPerformanceSecondary) { + ret = append(ret, "perf-secondary") + } + if r.HasState(ReplicationPerformancePrimary) { + ret = append(ret, "perf-primary") + } + if r.HasState(ReplicationPerformanceBootstrapping) { + ret = append(ret, "perf-bootstrapping") + } + if r.HasState(ReplicationPerformanceDisabled) { + ret = append(ret, "perf-disabled") + } + if r.HasState(ReplicationDRPrimary) { + ret = append(ret, "dr-primary") + } + if r.HasState(ReplicationDRSecondary) { + ret = append(ret, "dr-secondary") + } + if r.HasState(ReplicationDRBootstrapping) { + ret = append(ret, "dr-bootstrapping") + } + if r.HasState(ReplicationDRDisabled) { + ret = append(ret, "dr-disabled") + } + if r.HasState(ReplicationPerformanceStandby) { + ret = append(ret, "perfstandby") + } + + return ret +} + +func (r ReplicationState) GetDRString() string { + switch { + case r.HasState(ReplicationDRBootstrapping): + return ReplicationDRBootstrapping.string() + case r.HasState(ReplicationDRPrimary): + return ReplicationDRPrimary.string() + case r.HasState(ReplicationDRSecondary): + return ReplicationDRSecondary.string() + case r.HasState(ReplicationDRDisabled): + return ReplicationDRDisabled.string() + default: + return "unknown" + } +} + +func (r ReplicationState) GetPerformanceString() string { + switch { + case r.HasState(ReplicationPerformanceBootstrapping): + return ReplicationPerformanceBootstrapping.string() + case r.HasState(ReplicationPerformancePrimary): + return ReplicationPerformancePrimary.string() + case r.HasState(ReplicationPerformanceSecondary): + return ReplicationPerformanceSecondary.string() + case r.HasState(ReplicationPerformanceDisabled): + return ReplicationPerformanceDisabled.string() + default: + return "unknown" + } +} + +func (r ReplicationState) IsPrimaryState() bool { + return r.HasState(ReplicationPerformancePrimary | ReplicationDRPrimary) +} + +func (r ReplicationState) HasState(flag ReplicationState) bool { return r&flag != 0 } +func (r *ReplicationState) AddState(flag ReplicationState) { *r |= flag } +func (r *ReplicationState) ClearState(flag ReplicationState) { *r &= ^flag } +func (r *ReplicationState) ToggleState(flag ReplicationState) { *r ^= flag } + +type HAState uint32 + +const ( + _ HAState = iota + Standby + PerfStandby + Active +) diff --git a/sdk/helper/consts/token_consts.go b/sdk/helper/consts/token_consts.go new file mode 100644 index 0000000..108e7ba --- /dev/null +++ b/sdk/helper/consts/token_consts.go @@ -0,0 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package consts + +const ( + ServiceTokenPrefix = "hvs." + BatchTokenPrefix = "hvb." + RecoveryTokenPrefix = "hvr." + LegacyServiceTokenPrefix = "s." + LegacyBatchTokenPrefix = "b." + LegacyRecoveryTokenPrefix = "r." +) diff --git a/sdk/helper/cryptoutil/cryptoutil.go b/sdk/helper/cryptoutil/cryptoutil.go new file mode 100644 index 0000000..956dad3 --- /dev/null +++ b/sdk/helper/cryptoutil/cryptoutil.go @@ -0,0 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cryptoutil + +import "golang.org/x/crypto/blake2b" + +func Blake2b256Hash(key string) []byte { + hf, _ := blake2b.New256(nil) + + hf.Write([]byte(key)) + + return hf.Sum(nil) +} diff --git a/sdk/helper/cryptoutil/cryptoutil_test.go b/sdk/helper/cryptoutil/cryptoutil_test.go new file mode 100644 index 0000000..35799e4 --- /dev/null +++ b/sdk/helper/cryptoutil/cryptoutil_test.go @@ -0,0 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cryptoutil + +import "testing" + +func TestBlake2b256Hash(t *testing.T) { + hashVal := Blake2b256Hash("sampletext") + + if string(hashVal) == "" || string(hashVal) == "sampletext" { + t.Fatalf("failed to hash the text") + } +} diff --git a/sdk/helper/custommetadata/custom_metadata.go b/sdk/helper/custommetadata/custom_metadata.go new file mode 100644 index 0000000..81d4c27 --- /dev/null +++ b/sdk/helper/custommetadata/custom_metadata.go @@ -0,0 +1,106 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package custommetadata + +import ( + "fmt" + + "github.com/mitchellh/mapstructure" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-secure-stdlib/strutil" +) + +// The following constants are used by Validate and are meant to be imposed +// broadly for consistency. +const ( + maxKeys = 64 + maxKeyLength = 128 + maxValueLength = 512 + validationErrorPrefix = "custom_metadata validation failed" +) + +// Parse is used to effectively convert the TypeMap +// (map[string]interface{}) into a TypeKVPairs (map[string]string) +// which is how custom_metadata is stored. Defining custom_metadata +// as a TypeKVPairs will convert nulls into empty strings. A null, +// however, is essential for a PATCH operation in that it signals +// the handler to remove the field. The filterNils flag should +// only be used during a patch operation. +func Parse(raw map[string]interface{}, filterNils bool) (map[string]string, error) { + customMetadata := map[string]string{} + for k, v := range raw { + if filterNils && v == nil { + continue + } + + var s string + if err := mapstructure.WeakDecode(v, &s); err != nil { + return nil, err + } + + customMetadata[k] = s + } + + return customMetadata, nil +} + +// Validate will perform input validation for custom metadata. +// CustomMetadata should be arbitrary user-provided key-value pairs meant to +// provide supplemental information about a resource. If the key count +// exceeds maxKeys, the validation will be short-circuited to prevent +// unnecessary (and potentially costly) validation to be run. If the key count +// falls at or below maxKeys, multiple checks will be made per key and value. +// These checks include: +// - 0 < length of key <= maxKeyLength +// - 0 < length of value <= maxValueLength +// - keys and values cannot include unprintable characters +func Validate(cm map[string]string) error { + var errs *multierror.Error + + if keyCount := len(cm); keyCount > maxKeys { + errs = multierror.Append(errs, fmt.Errorf("%s: payload must contain at most %d keys, provided %d", + validationErrorPrefix, + maxKeys, + keyCount)) + + return errs.ErrorOrNil() + } + + // Perform validation on each key and value and return ALL errors + for key, value := range cm { + if keyLen := len(key); 0 == keyLen || keyLen > maxKeyLength { + errs = multierror.Append(errs, fmt.Errorf("%s: length of key %q is %d but must be 0 < len(key) <= %d", + validationErrorPrefix, + key, + keyLen, + maxKeyLength)) + } + + if valueLen := len(value); 0 == valueLen || valueLen > maxValueLength { + errs = multierror.Append(errs, fmt.Errorf("%s: length of value for key %q is %d but must be 0 < len(value) <= %d", + validationErrorPrefix, + key, + valueLen, + maxValueLength)) + } + + if !strutil.Printable(key) { + // Include unquoted format (%s) to also include the string without the unprintable + // characters visible to allow for easier debug and key identification + errs = multierror.Append(errs, fmt.Errorf("%s: key %q (%s) contains unprintable characters", + validationErrorPrefix, + key, + key)) + } + + if !strutil.Printable(value) { + errs = multierror.Append(errs, fmt.Errorf("%s: value for key %q contains unprintable characters", + validationErrorPrefix, + key)) + } + } + + return errs.ErrorOrNil() +} diff --git a/sdk/helper/custommetadata/custom_metadata_test.go b/sdk/helper/custommetadata/custom_metadata_test.go new file mode 100644 index 0000000..2b25d99 --- /dev/null +++ b/sdk/helper/custommetadata/custom_metadata_test.go @@ -0,0 +1,88 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package custommetadata + +import ( + "strconv" + "strings" + "testing" +) + +func TestValidate(t *testing.T) { + cases := []struct { + name string + input map[string]string + shouldPass bool + }{ + { + "valid", + map[string]string{ + "foo": "abc", + "bar": "def", + "baz": "ghi", + }, + true, + }, + { + "too_many_keys", + func() map[string]string { + cm := make(map[string]string) + + for i := 0; i < maxKeyLength+1; i++ { + s := strconv.Itoa(i) + cm[s] = s + } + + return cm + }(), + false, + }, + { + "key_too_long", + map[string]string{ + strings.Repeat("a", maxKeyLength+1): "abc", + }, + false, + }, + { + "value_too_long", + map[string]string{ + "foo": strings.Repeat("a", maxValueLength+1), + }, + false, + }, + { + "unprintable_key", + map[string]string{ + "unprint\u200bable": "abc", + }, + false, + }, + { + "unprintable_value", + map[string]string{ + "foo": "unprint\u200bable", + }, + false, + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + err := Validate(tc.input) + + if tc.shouldPass && err != nil { + t.Fatalf("expected validation to pass, input: %#v, err: %v", tc.input, err) + } + + if !tc.shouldPass && err == nil { + t.Fatalf("expected validation to fail, input: %#v, err: %v", tc.input, err) + } + }) + } +} diff --git a/sdk/helper/dbtxn/dbtxn.go b/sdk/helper/dbtxn/dbtxn.go new file mode 100644 index 0000000..12288d5 --- /dev/null +++ b/sdk/helper/dbtxn/dbtxn.go @@ -0,0 +1,86 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dbtxn + +import ( + "context" + "database/sql" + "fmt" + "strings" +) + +// ExecuteDBQuery handles executing one single statement while properly releasing its resources. +// - ctx: Required +// - db: Required +// - config: Optional, may be nil +// - query: Required +func ExecuteDBQuery(ctx context.Context, db *sql.DB, params map[string]string, query string) error { + parsedQuery := parseQuery(params, query) + + stmt, err := db.PrepareContext(ctx, parsedQuery) + if err != nil { + return err + } + defer stmt.Close() + + return execute(ctx, stmt) +} + +// ExecuteDBQueryDirect handles executing one single statement without preparing the query +// before executing it, which can be more efficient. +// - ctx: Required +// - db: Required +// - config: Optional, may be nil +// - query: Required +func ExecuteDBQueryDirect(ctx context.Context, db *sql.DB, params map[string]string, query string) error { + parsedQuery := parseQuery(params, query) + _, err := db.ExecContext(ctx, parsedQuery) + return err +} + +// ExecuteTxQuery handles executing one single statement while properly releasing its resources. +// - ctx: Required +// - tx: Required +// - config: Optional, may be nil +// - query: Required +func ExecuteTxQuery(ctx context.Context, tx *sql.Tx, params map[string]string, query string) error { + parsedQuery := parseQuery(params, query) + + stmt, err := tx.PrepareContext(ctx, parsedQuery) + if err != nil { + return err + } + defer stmt.Close() + + return execute(ctx, stmt) +} + +// ExecuteTxQueryDirect handles executing one single statement. +// - ctx: Required +// - tx: Required +// - config: Optional, may be nil +// - query: Required +func ExecuteTxQueryDirect(ctx context.Context, tx *sql.Tx, params map[string]string, query string) error { + parsedQuery := parseQuery(params, query) + _, err := tx.ExecContext(ctx, parsedQuery) + return err +} + +func execute(ctx context.Context, stmt *sql.Stmt) error { + if _, err := stmt.ExecContext(ctx); err != nil { + return err + } + return nil +} + +func parseQuery(m map[string]string, tpl string) string { + if m == nil || len(m) <= 0 { + return tpl + } + + for k, v := range m { + tpl = strings.ReplaceAll(tpl, fmt.Sprintf("{{%s}}", k), v) + } + return tpl +} diff --git a/sdk/helper/docker/testhelpers.go b/sdk/helper/docker/testhelpers.go new file mode 100644 index 0000000..7902750 --- /dev/null +++ b/sdk/helper/docker/testhelpers.go @@ -0,0 +1,911 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package docker + +import ( + "archive/tar" + "bufio" + "bytes" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/url" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/cenkalti/backoff/v3" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/go-connections/nat" + "github.com/hashicorp/go-uuid" +) + +const DockerAPIVersion = "1.40" + +type Runner struct { + DockerAPI *client.Client + RunOptions RunOptions +} + +type RunOptions struct { + ImageRepo string + ImageTag string + ContainerName string + Cmd []string + Entrypoint []string + Env []string + NetworkName string + NetworkID string + CopyFromTo map[string]string + Ports []string + DoNotAutoRemove bool + AuthUsername string + AuthPassword string + OmitLogTimestamps bool + LogConsumer func(string) + Capabilities []string + PreDelete bool + PostStart func(string, string) error + LogStderr io.Writer + LogStdout io.Writer + VolumeNameToMountPoint map[string]string +} + +func NewDockerAPI() (*client.Client, error) { + return client.NewClientWithOpts(client.FromEnv, client.WithVersion(DockerAPIVersion)) +} + +func NewServiceRunner(opts RunOptions) (*Runner, error) { + dapi, err := NewDockerAPI() + if err != nil { + return nil, err + } + + if opts.NetworkName == "" { + opts.NetworkName = os.Getenv("TEST_DOCKER_NETWORK_NAME") + } + if opts.NetworkName != "" { + nets, err := dapi.NetworkList(context.TODO(), types.NetworkListOptions{ + Filters: filters.NewArgs(filters.Arg("name", opts.NetworkName)), + }) + if err != nil { + return nil, err + } + if len(nets) != 1 { + return nil, fmt.Errorf("expected exactly one docker network named %q, got %d", opts.NetworkName, len(nets)) + } + opts.NetworkID = nets[0].ID + } + if opts.NetworkID == "" { + opts.NetworkID = os.Getenv("TEST_DOCKER_NETWORK_ID") + } + if opts.ContainerName == "" { + if strings.Contains(opts.ImageRepo, "/") { + return nil, fmt.Errorf("ContainerName is required for non-library images") + } + // If there's no slash in the repo it's almost certainly going to be + // a good container name. + opts.ContainerName = opts.ImageRepo + } + return &Runner{ + DockerAPI: dapi, + RunOptions: opts, + }, nil +} + +type ServiceConfig interface { + Address() string + URL() *url.URL +} + +func NewServiceHostPort(host string, port int) *ServiceHostPort { + return &ServiceHostPort{address: fmt.Sprintf("%s:%d", host, port)} +} + +func NewServiceHostPortParse(s string) (*ServiceHostPort, error) { + pieces := strings.Split(s, ":") + if len(pieces) != 2 { + return nil, fmt.Errorf("address must be of the form host:port, got: %v", s) + } + + port, err := strconv.Atoi(pieces[1]) + if err != nil || port < 1 { + return nil, fmt.Errorf("address must be of the form host:port, got: %v", s) + } + + return &ServiceHostPort{s}, nil +} + +type ServiceHostPort struct { + address string +} + +func (s ServiceHostPort) Address() string { + return s.address +} + +func (s ServiceHostPort) URL() *url.URL { + return &url.URL{Host: s.address} +} + +func NewServiceURLParse(s string) (*ServiceURL, error) { + u, err := url.Parse(s) + if err != nil { + return nil, err + } + return &ServiceURL{u: *u}, nil +} + +func NewServiceURL(u url.URL) *ServiceURL { + return &ServiceURL{u: u} +} + +type ServiceURL struct { + u url.URL +} + +func (s ServiceURL) Address() string { + return s.u.Host +} + +func (s ServiceURL) URL() *url.URL { + return &s.u +} + +// ServiceAdapter verifies connectivity to the service, then returns either the +// connection string (typically a URL) and nil, or empty string and an error. +type ServiceAdapter func(ctx context.Context, host string, port int) (ServiceConfig, error) + +// StartService will start the runner's configured docker container with a +// random UUID suffix appended to the name to make it unique and will return +// either a hostname or local address depending on if a Docker network was given. +// +// Most tests can default to using this. +func (d *Runner) StartService(ctx context.Context, connect ServiceAdapter) (*Service, error) { + serv, _, err := d.StartNewService(ctx, true, false, connect) + + return serv, err +} + +type LogConsumerWriter struct { + consumer func(string) +} + +func (l LogConsumerWriter) Write(p []byte) (n int, err error) { + // TODO this assumes that we're never passed partial log lines, which + // seems a safe assumption for now based on how docker looks to implement + // logging, but might change in the future. + scanner := bufio.NewScanner(bytes.NewReader(p)) + scanner.Buffer(make([]byte, 64*1024), bufio.MaxScanTokenSize) + for scanner.Scan() { + l.consumer(scanner.Text()) + } + return len(p), nil +} + +var _ io.Writer = &LogConsumerWriter{} + +// StartNewService will start the runner's configured docker container but with the +// ability to control adding a name suffix or forcing a local address to be returned. +// 'addSuffix' will add a random UUID to the end of the container name. +// 'forceLocalAddr' will force the container address returned to be in the +// form of '127.0.0.1:1234' where 1234 is the mapped container port. +func (d *Runner) StartNewService(ctx context.Context, addSuffix, forceLocalAddr bool, connect ServiceAdapter) (*Service, string, error) { + if d.RunOptions.PreDelete { + name := d.RunOptions.ContainerName + matches, err := d.DockerAPI.ContainerList(ctx, types.ContainerListOptions{ + All: true, + // TODO use labels to ensure we don't delete anything we shouldn't + Filters: filters.NewArgs( + filters.Arg("name", name), + ), + }) + if err != nil { + return nil, "", fmt.Errorf("failed to list containers named %q", name) + } + for _, cont := range matches { + err = d.DockerAPI.ContainerRemove(ctx, cont.ID, types.ContainerRemoveOptions{Force: true}) + if err != nil { + return nil, "", fmt.Errorf("failed to pre-delete container named %q", name) + } + } + } + result, err := d.Start(context.Background(), addSuffix, forceLocalAddr) + if err != nil { + return nil, "", err + } + + var wg sync.WaitGroup + consumeLogs := false + var logStdout, logStderr io.Writer + if d.RunOptions.LogStdout != nil && d.RunOptions.LogStderr != nil { + consumeLogs = true + logStdout = d.RunOptions.LogStdout + logStderr = d.RunOptions.LogStderr + } else if d.RunOptions.LogConsumer != nil { + consumeLogs = true + logStdout = &LogConsumerWriter{d.RunOptions.LogConsumer} + logStderr = &LogConsumerWriter{d.RunOptions.LogConsumer} + } + + // The waitgroup wg is used here to support some stuff in NewDockerCluster. + // We can't generate the PKI cert for the https listener until we know the + // container's address, meaning we must first start the container, then + // generate the cert, then copy it into the container, then signal Vault + // to reload its config/certs. However, if we SIGHUP Vault before Vault + // has installed its signal handler, that will kill Vault, since the default + // behaviour for HUP is termination. So the PostStart that NewDockerCluster + // passes in (which does all that PKI cert stuff) waits to see output from + // Vault on stdout/stderr before it sends the signal, and we don't want to + // run the PostStart until we've hooked into the docker logs. + if consumeLogs { + wg.Add(1) + go func() { + // We must run inside a goroutine because we're using Follow:true, + // and StdCopy will block until the log stream is closed. + stream, err := d.DockerAPI.ContainerLogs(context.Background(), result.Container.ID, types.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + Timestamps: !d.RunOptions.OmitLogTimestamps, + Details: true, + Follow: true, + }) + wg.Done() + if err != nil { + d.RunOptions.LogConsumer(fmt.Sprintf("error reading container logs: %v", err)) + } else { + _, err := stdcopy.StdCopy(logStdout, logStderr, stream) + if err != nil { + d.RunOptions.LogConsumer(fmt.Sprintf("error demultiplexing docker logs: %v", err)) + } + } + }() + } + wg.Wait() + + if d.RunOptions.PostStart != nil { + if err := d.RunOptions.PostStart(result.Container.ID, result.RealIP); err != nil { + return nil, "", fmt.Errorf("poststart failed: %w", err) + } + } + + cleanup := func() { + for i := 0; i < 10; i++ { + err := d.DockerAPI.ContainerRemove(ctx, result.Container.ID, types.ContainerRemoveOptions{Force: true}) + if err == nil || client.IsErrNotFound(err) { + return + } + time.Sleep(1 * time.Second) + } + } + + bo := backoff.NewExponentialBackOff() + bo.MaxInterval = time.Second * 5 + bo.MaxElapsedTime = 2 * time.Minute + + pieces := strings.Split(result.Addrs[0], ":") + portInt, err := strconv.Atoi(pieces[1]) + if err != nil { + return nil, "", err + } + + var config ServiceConfig + err = backoff.Retry(func() error { + container, err := d.DockerAPI.ContainerInspect(ctx, result.Container.ID) + if err != nil || !container.State.Running { + return backoff.Permanent(fmt.Errorf("failed inspect or container %q not running: %w", result.Container.ID, err)) + } + + c, err := connect(ctx, pieces[0], portInt) + if err != nil { + return err + } + if c == nil { + return fmt.Errorf("service adapter returned nil error and config") + } + config = c + return nil + }, bo) + + if err != nil { + if !d.RunOptions.DoNotAutoRemove { + cleanup() + } + return nil, "", err + } + + return &Service{ + Config: config, + Cleanup: cleanup, + Container: result.Container, + StartResult: result, + }, result.Container.ID, nil +} + +type Service struct { + Config ServiceConfig + Cleanup func() + Container *types.ContainerJSON + StartResult *StartResult +} + +type StartResult struct { + Container *types.ContainerJSON + Addrs []string + RealIP string +} + +func (d *Runner) Start(ctx context.Context, addSuffix, forceLocalAddr bool) (*StartResult, error) { + name := d.RunOptions.ContainerName + if addSuffix { + suffix, err := uuid.GenerateUUID() + if err != nil { + return nil, err + } + name += "-" + suffix + } + + cfg := &container.Config{ + Hostname: name, + Image: fmt.Sprintf("%s:%s", d.RunOptions.ImageRepo, d.RunOptions.ImageTag), + Env: d.RunOptions.Env, + Cmd: d.RunOptions.Cmd, + } + if len(d.RunOptions.Ports) > 0 { + cfg.ExposedPorts = make(map[nat.Port]struct{}) + for _, p := range d.RunOptions.Ports { + cfg.ExposedPorts[nat.Port(p)] = struct{}{} + } + } + if len(d.RunOptions.Entrypoint) > 0 { + cfg.Entrypoint = strslice.StrSlice(d.RunOptions.Entrypoint) + } + + hostConfig := &container.HostConfig{ + AutoRemove: !d.RunOptions.DoNotAutoRemove, + PublishAllPorts: true, + } + if len(d.RunOptions.Capabilities) > 0 { + hostConfig.CapAdd = d.RunOptions.Capabilities + } + + netConfig := &network.NetworkingConfig{} + if d.RunOptions.NetworkID != "" { + netConfig.EndpointsConfig = map[string]*network.EndpointSettings{ + d.RunOptions.NetworkID: {}, + } + } + + // best-effort pull + var opts types.ImageCreateOptions + if d.RunOptions.AuthUsername != "" && d.RunOptions.AuthPassword != "" { + var buf bytes.Buffer + auth := map[string]string{ + "username": d.RunOptions.AuthUsername, + "password": d.RunOptions.AuthPassword, + } + if err := json.NewEncoder(&buf).Encode(auth); err != nil { + return nil, err + } + opts.RegistryAuth = base64.URLEncoding.EncodeToString(buf.Bytes()) + } + resp, _ := d.DockerAPI.ImageCreate(ctx, cfg.Image, opts) + if resp != nil { + _, _ = ioutil.ReadAll(resp) + } + + for vol, mtpt := range d.RunOptions.VolumeNameToMountPoint { + hostConfig.Mounts = append(hostConfig.Mounts, mount.Mount{ + Type: "volume", + Source: vol, + Target: mtpt, + ReadOnly: false, + }) + } + + c, err := d.DockerAPI.ContainerCreate(ctx, cfg, hostConfig, netConfig, nil, cfg.Hostname) + if err != nil { + return nil, fmt.Errorf("container create failed: %v", err) + } + + for from, to := range d.RunOptions.CopyFromTo { + if err := copyToContainer(ctx, d.DockerAPI, c.ID, from, to); err != nil { + _ = d.DockerAPI.ContainerRemove(ctx, c.ID, types.ContainerRemoveOptions{}) + return nil, err + } + } + + err = d.DockerAPI.ContainerStart(ctx, c.ID, types.ContainerStartOptions{}) + if err != nil { + _ = d.DockerAPI.ContainerRemove(ctx, c.ID, types.ContainerRemoveOptions{}) + return nil, fmt.Errorf("container start failed: %v", err) + } + + inspect, err := d.DockerAPI.ContainerInspect(ctx, c.ID) + if err != nil { + _ = d.DockerAPI.ContainerRemove(ctx, c.ID, types.ContainerRemoveOptions{}) + return nil, err + } + + var addrs []string + for _, port := range d.RunOptions.Ports { + pieces := strings.Split(port, "/") + if len(pieces) < 2 { + return nil, fmt.Errorf("expected port of the form 1234/tcp, got: %s", port) + } + if d.RunOptions.NetworkID != "" && !forceLocalAddr { + addrs = append(addrs, fmt.Sprintf("%s:%s", cfg.Hostname, pieces[0])) + } else { + mapped, ok := inspect.NetworkSettings.Ports[nat.Port(port)] + if !ok || len(mapped) == 0 { + return nil, fmt.Errorf("no port mapping found for %s", port) + } + addrs = append(addrs, fmt.Sprintf("127.0.0.1:%s", mapped[0].HostPort)) + } + } + + var realIP string + if d.RunOptions.NetworkID == "" { + if len(inspect.NetworkSettings.Networks) > 1 { + return nil, fmt.Errorf("Set d.RunOptions.NetworkName instead for container with multiple networks: %v", inspect.NetworkSettings.Networks) + } + for _, network := range inspect.NetworkSettings.Networks { + realIP = network.IPAddress + break + } + } else { + realIP = inspect.NetworkSettings.Networks[d.RunOptions.NetworkName].IPAddress + } + + return &StartResult{ + Container: &inspect, + Addrs: addrs, + RealIP: realIP, + }, nil +} + +func (d *Runner) RefreshFiles(ctx context.Context, containerID string) error { + for from, to := range d.RunOptions.CopyFromTo { + if err := copyToContainer(ctx, d.DockerAPI, containerID, from, to); err != nil { + // TODO too drastic? + _ = d.DockerAPI.ContainerRemove(ctx, containerID, types.ContainerRemoveOptions{}) + return err + } + } + return d.DockerAPI.ContainerKill(ctx, containerID, "SIGHUP") +} + +func (d *Runner) Stop(ctx context.Context, containerID string) error { + if d.RunOptions.NetworkID != "" { + if err := d.DockerAPI.NetworkDisconnect(ctx, d.RunOptions.NetworkID, containerID, true); err != nil { + return fmt.Errorf("error disconnecting network (%v): %v", d.RunOptions.NetworkID, err) + } + } + + // timeout in seconds + timeout := 5 + options := container.StopOptions{ + Timeout: &timeout, + } + if err := d.DockerAPI.ContainerStop(ctx, containerID, options); err != nil { + return fmt.Errorf("error stopping container: %v", err) + } + + return nil +} + +func (d *Runner) Restart(ctx context.Context, containerID string) error { + if err := d.DockerAPI.ContainerStart(ctx, containerID, types.ContainerStartOptions{}); err != nil { + return err + } + + ends := &network.EndpointSettings{ + NetworkID: d.RunOptions.NetworkID, + } + + return d.DockerAPI.NetworkConnect(ctx, d.RunOptions.NetworkID, containerID, ends) +} + +func copyToContainer(ctx context.Context, dapi *client.Client, containerID, from, to string) error { + srcInfo, err := archive.CopyInfoSourcePath(from, false) + if err != nil { + return fmt.Errorf("error copying from source %q: %v", from, err) + } + + srcArchive, err := archive.TarResource(srcInfo) + if err != nil { + return fmt.Errorf("error creating tar from source %q: %v", from, err) + } + defer srcArchive.Close() + + dstInfo := archive.CopyInfo{Path: to} + + dstDir, content, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, dstInfo) + if err != nil { + return fmt.Errorf("error preparing copy from %q -> %q: %v", from, to, err) + } + defer content.Close() + err = dapi.CopyToContainer(ctx, containerID, dstDir, content, types.CopyToContainerOptions{}) + if err != nil { + return fmt.Errorf("error copying from %q -> %q: %v", from, to, err) + } + + return nil +} + +type RunCmdOpt interface { + Apply(cfg *types.ExecConfig) error +} + +type RunCmdUser string + +var _ RunCmdOpt = (*RunCmdUser)(nil) + +func (u RunCmdUser) Apply(cfg *types.ExecConfig) error { + cfg.User = string(u) + return nil +} + +func (d *Runner) RunCmdWithOutput(ctx context.Context, container string, cmd []string, opts ...RunCmdOpt) ([]byte, []byte, int, error) { + return RunCmdWithOutput(d.DockerAPI, ctx, container, cmd, opts...) +} + +func RunCmdWithOutput(api *client.Client, ctx context.Context, container string, cmd []string, opts ...RunCmdOpt) ([]byte, []byte, int, error) { + runCfg := types.ExecConfig{ + AttachStdout: true, + AttachStderr: true, + Cmd: cmd, + } + + for index, opt := range opts { + if err := opt.Apply(&runCfg); err != nil { + return nil, nil, -1, fmt.Errorf("error applying option (%d / %v): %w", index, opt, err) + } + } + + ret, err := api.ContainerExecCreate(ctx, container, runCfg) + if err != nil { + return nil, nil, -1, fmt.Errorf("error creating execution environment: %v\ncfg: %v\n", err, runCfg) + } + + resp, err := api.ContainerExecAttach(ctx, ret.ID, types.ExecStartCheck{}) + if err != nil { + return nil, nil, -1, fmt.Errorf("error attaching to command execution: %v\ncfg: %v\nret: %v\n", err, runCfg, ret) + } + defer resp.Close() + + var stdoutB bytes.Buffer + var stderrB bytes.Buffer + if _, err := stdcopy.StdCopy(&stdoutB, &stderrB, resp.Reader); err != nil { + return nil, nil, -1, fmt.Errorf("error reading command output: %v", err) + } + + stdout := stdoutB.Bytes() + stderr := stderrB.Bytes() + + // Fetch return code. + info, err := api.ContainerExecInspect(ctx, ret.ID) + if err != nil { + return stdout, stderr, -1, fmt.Errorf("error reading command exit code: %v", err) + } + + return stdout, stderr, info.ExitCode, nil +} + +func (d *Runner) RunCmdInBackground(ctx context.Context, container string, cmd []string, opts ...RunCmdOpt) (string, error) { + return RunCmdInBackground(d.DockerAPI, ctx, container, cmd, opts...) +} + +func RunCmdInBackground(api *client.Client, ctx context.Context, container string, cmd []string, opts ...RunCmdOpt) (string, error) { + runCfg := types.ExecConfig{ + AttachStdout: true, + AttachStderr: true, + Cmd: cmd, + } + + for index, opt := range opts { + if err := opt.Apply(&runCfg); err != nil { + return "", fmt.Errorf("error applying option (%d / %v): %w", index, opt, err) + } + } + + ret, err := api.ContainerExecCreate(ctx, container, runCfg) + if err != nil { + return "", fmt.Errorf("error creating execution environment: %w\ncfg: %v\n", err, runCfg) + } + + err = api.ContainerExecStart(ctx, ret.ID, types.ExecStartCheck{}) + if err != nil { + return "", fmt.Errorf("error starting command execution: %w\ncfg: %v\nret: %v\n", err, runCfg, ret) + } + + return ret.ID, nil +} + +// Mapping of path->contents +type PathContents interface { + UpdateHeader(header *tar.Header) error + Get() ([]byte, error) + SetMode(mode int64) + SetOwners(uid int, gid int) +} + +type FileContents struct { + Data []byte + Mode int64 + UID int + GID int +} + +func (b FileContents) UpdateHeader(header *tar.Header) error { + header.Mode = b.Mode + header.Uid = b.UID + header.Gid = b.GID + return nil +} + +func (b FileContents) Get() ([]byte, error) { + return b.Data, nil +} + +func (b *FileContents) SetMode(mode int64) { + b.Mode = mode +} + +func (b *FileContents) SetOwners(uid int, gid int) { + b.UID = uid + b.GID = gid +} + +func PathContentsFromBytes(data []byte) PathContents { + return &FileContents{ + Data: data, + Mode: 0o644, + } +} + +func PathContentsFromString(data string) PathContents { + return PathContentsFromBytes([]byte(data)) +} + +type BuildContext map[string]PathContents + +func NewBuildContext() BuildContext { + return BuildContext{} +} + +func BuildContextFromTarball(reader io.Reader) (BuildContext, error) { + archive := tar.NewReader(reader) + bCtx := NewBuildContext() + + for true { + header, err := archive.Next() + if err != nil { + if err == io.EOF { + break + } + + return nil, fmt.Errorf("failed to parse provided tarball: %v", err) + } + + data := make([]byte, int(header.Size)) + read, err := archive.Read(data) + if err != nil { + return nil, fmt.Errorf("failed to parse read from provided tarball: %v", err) + } + + if read != int(header.Size) { + return nil, fmt.Errorf("unexpectedly short read on tarball: %v of %v", read, header.Size) + } + + bCtx[header.Name] = &FileContents{ + Data: data, + Mode: header.Mode, + UID: header.Uid, + GID: header.Gid, + } + } + + return bCtx, nil +} + +func (bCtx *BuildContext) ToTarball() (io.Reader, error) { + var err error + buffer := new(bytes.Buffer) + tarBuilder := tar.NewWriter(buffer) + defer tarBuilder.Close() + + now := time.Now() + for filepath, contents := range *bCtx { + fileHeader := &tar.Header{ + Name: filepath, + ModTime: now, + AccessTime: now, + ChangeTime: now, + } + if contents == nil && !strings.HasSuffix(filepath, "/") { + return nil, fmt.Errorf("expected file path (%v) to have trailing / due to nil contents, indicating directory", filepath) + } + + if err := contents.UpdateHeader(fileHeader); err != nil { + return nil, fmt.Errorf("failed to update tar header entry for %v: %w", filepath, err) + } + + var rawContents []byte + if contents != nil { + rawContents, err = contents.Get() + if err != nil { + return nil, fmt.Errorf("failed to get file contents for %v: %w", filepath, err) + } + + fileHeader.Size = int64(len(rawContents)) + } + + if err := tarBuilder.WriteHeader(fileHeader); err != nil { + return nil, fmt.Errorf("failed to write tar header entry for %v: %w", filepath, err) + } + + if contents != nil { + if _, err := tarBuilder.Write(rawContents); err != nil { + return nil, fmt.Errorf("failed to write tar file entry for %v: %w", filepath, err) + } + } + } + + return bytes.NewReader(buffer.Bytes()), nil +} + +type BuildOpt interface { + Apply(cfg *types.ImageBuildOptions) error +} + +type BuildRemove bool + +var _ BuildOpt = (*BuildRemove)(nil) + +func (u BuildRemove) Apply(cfg *types.ImageBuildOptions) error { + cfg.Remove = bool(u) + return nil +} + +type BuildForceRemove bool + +var _ BuildOpt = (*BuildForceRemove)(nil) + +func (u BuildForceRemove) Apply(cfg *types.ImageBuildOptions) error { + cfg.ForceRemove = bool(u) + return nil +} + +type BuildPullParent bool + +var _ BuildOpt = (*BuildPullParent)(nil) + +func (u BuildPullParent) Apply(cfg *types.ImageBuildOptions) error { + cfg.PullParent = bool(u) + return nil +} + +type BuildArgs map[string]*string + +var _ BuildOpt = (*BuildArgs)(nil) + +func (u BuildArgs) Apply(cfg *types.ImageBuildOptions) error { + cfg.BuildArgs = u + return nil +} + +type BuildTags []string + +var _ BuildOpt = (*BuildTags)(nil) + +func (u BuildTags) Apply(cfg *types.ImageBuildOptions) error { + cfg.Tags = u + return nil +} + +const containerfilePath = "_containerfile" + +func (d *Runner) BuildImage(ctx context.Context, containerfile string, containerContext BuildContext, opts ...BuildOpt) ([]byte, error) { + return BuildImage(ctx, d.DockerAPI, containerfile, containerContext, opts...) +} + +func BuildImage(ctx context.Context, api *client.Client, containerfile string, containerContext BuildContext, opts ...BuildOpt) ([]byte, error) { + var cfg types.ImageBuildOptions + + // Build container context tarball, provisioning containerfile in. + containerContext[containerfilePath] = PathContentsFromBytes([]byte(containerfile)) + tar, err := containerContext.ToTarball() + if err != nil { + return nil, fmt.Errorf("failed to create build image context tarball: %w", err) + } + cfg.Dockerfile = "/" + containerfilePath + + // Apply all given options + for index, opt := range opts { + if err := opt.Apply(&cfg); err != nil { + return nil, fmt.Errorf("failed to apply option (%d / %v): %w", index, opt, err) + } + } + + resp, err := api.ImageBuild(ctx, tar, cfg) + if err != nil { + return nil, fmt.Errorf("failed to build image: %v", err) + } + + output, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("failed to read image build output: %w", err) + } + + return output, nil +} + +func (d *Runner) CopyTo(container string, destination string, contents BuildContext) error { + // XXX: currently we use the default options but we might want to allow + // modifying cfg.CopyUIDGID in the future. + var cfg types.CopyToContainerOptions + + // Convert our provided contents to a tarball to ship up. + tar, err := contents.ToTarball() + if err != nil { + return fmt.Errorf("failed to build contents into tarball: %v", err) + } + + return d.DockerAPI.CopyToContainer(context.Background(), container, destination, tar, cfg) +} + +func (d *Runner) CopyFrom(container string, source string) (BuildContext, *types.ContainerPathStat, error) { + reader, stat, err := d.DockerAPI.CopyFromContainer(context.Background(), container, source) + if err != nil { + return nil, nil, fmt.Errorf("failed to read %v from container: %v", source, err) + } + + result, err := BuildContextFromTarball(reader) + if err != nil { + return nil, nil, fmt.Errorf("failed to build archive from result: %v", err) + } + + return result, &stat, nil +} + +func (d *Runner) GetNetworkAndAddresses(container string) (map[string]string, error) { + response, err := d.DockerAPI.ContainerInspect(context.Background(), container) + if err != nil { + return nil, fmt.Errorf("failed to fetch container inspection data: %v", err) + } + + if response.NetworkSettings == nil || len(response.NetworkSettings.Networks) == 0 { + return nil, fmt.Errorf("container (%v) had no associated network settings: %v", container, response) + } + + ret := make(map[string]string) + ns := response.NetworkSettings.Networks + for network, data := range ns { + if data == nil { + continue + } + + ret[network] = data.IPAddress + } + + if len(ret) == 0 { + return nil, fmt.Errorf("no valid network data for container (%v): %v", container, response) + } + + return ret, nil +} diff --git a/sdk/helper/errutil/error.go b/sdk/helper/errutil/error.go new file mode 100644 index 0000000..1866343 --- /dev/null +++ b/sdk/helper/errutil/error.go @@ -0,0 +1,23 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package errutil + +// UserError represents an error generated due to invalid user input +type UserError struct { + Err string +} + +func (e UserError) Error() string { + return e.Err +} + +// InternalError represents an error generated internally, +// presumably not due to invalid user input +type InternalError struct { + Err string +} + +func (e InternalError) Error() string { + return e.Err +} diff --git a/sdk/helper/hclutil/hcl.go b/sdk/helper/hclutil/hcl.go new file mode 100644 index 0000000..a78d820 --- /dev/null +++ b/sdk/helper/hclutil/hcl.go @@ -0,0 +1,39 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package hclutil + +import ( + "fmt" + + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/hcl/hcl/ast" +) + +// CheckHCLKeys checks whether the keys in the AST list contains any of the valid keys provided. +func CheckHCLKeys(node ast.Node, valid []string) error { + var list *ast.ObjectList + switch n := node.(type) { + case *ast.ObjectList: + list = n + case *ast.ObjectType: + list = n.List + default: + return fmt.Errorf("cannot check HCL keys of type %T", n) + } + + validMap := make(map[string]struct{}, len(valid)) + for _, v := range valid { + validMap[v] = struct{}{} + } + + var result error + for _, item := range list.Items { + key := item.Keys[0].Token.Value().(string) + if _, ok := validMap[key]; !ok { + result = multierror.Append(result, fmt.Errorf("invalid key %q on line %d", key, item.Assign.Line)) + } + } + + return result +} diff --git a/sdk/helper/identitytpl/templating.go b/sdk/helper/identitytpl/templating.go new file mode 100644 index 0000000..4cbf1e2 --- /dev/null +++ b/sdk/helper/identitytpl/templating.go @@ -0,0 +1,370 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package identitytpl + +import ( + "encoding/json" + "errors" + "fmt" + "strconv" + "strings" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/sdk/logical" +) + +var ( + ErrUnbalancedTemplatingCharacter = errors.New("unbalanced templating characters") + ErrNoEntityAttachedToToken = errors.New("string contains entity template directives but no entity was provided") + ErrNoGroupsAttachedToToken = errors.New("string contains groups template directives but no groups were provided") + ErrTemplateValueNotFound = errors.New("no value could be found for one of the template directives") +) + +const ( + ACLTemplating = iota // must be the first value for backwards compatibility + JSONTemplating +) + +type PopulateStringInput struct { + String string + ValidityCheckOnly bool + Entity *logical.Entity + Groups []*logical.Group + NamespaceID string + Mode int // processing mode, ACLTemplate or JSONTemplating + Now time.Time // optional, defaults to current time + + templateHandler templateHandlerFunc + groupIDs []string + groupNames []string +} + +// templateHandlerFunc allows generating string outputs based on data type, and +// different handlers can be used based on mode. For example in ACL mode, strings +// are emitted verbatim, but they're wrapped in double quotes for JSON mode. And +// some structures, like slices, might be rendered in one mode but prohibited in +// another. +type templateHandlerFunc func(interface{}, ...string) (string, error) + +// aclTemplateHandler processes known parameter data types when operating +// in ACL mode. +func aclTemplateHandler(v interface{}, keys ...string) (string, error) { + switch t := v.(type) { + case string: + if t == "" { + return "", ErrTemplateValueNotFound + } + return t, nil + case []string: + return "", ErrTemplateValueNotFound + case map[string]string: + if len(keys) > 0 { + val, ok := t[keys[0]] + if ok { + return val, nil + } + } + return "", ErrTemplateValueNotFound + } + + return "", fmt.Errorf("unknown type: %T", v) +} + +// jsonTemplateHandler processes known parameter data types when operating +// in JSON mode. +func jsonTemplateHandler(v interface{}, keys ...string) (string, error) { + jsonMarshaller := func(v interface{}) (string, error) { + enc, err := json.Marshal(v) + if err != nil { + return "", err + } + return string(enc), nil + } + + switch t := v.(type) { + case string: + return strconv.Quote(t), nil + case []string: + return jsonMarshaller(t) + case map[string]string: + if len(keys) > 0 { + return strconv.Quote(t[keys[0]]), nil + } + if t == nil { + return "{}", nil + } + return jsonMarshaller(t) + } + + return "", fmt.Errorf("unknown type: %T", v) +} + +func PopulateString(p PopulateStringInput) (bool, string, error) { + if p.String == "" { + return false, "", nil + } + + // preprocess groups + for _, g := range p.Groups { + p.groupNames = append(p.groupNames, g.Name) + p.groupIDs = append(p.groupIDs, g.ID) + } + + // set up mode-specific handler + switch p.Mode { + case ACLTemplating: + p.templateHandler = aclTemplateHandler + case JSONTemplating: + p.templateHandler = jsonTemplateHandler + default: + return false, "", fmt.Errorf("unknown mode %q", p.Mode) + } + + var subst bool + splitStr := strings.Split(p.String, "{{") + + if len(splitStr) >= 1 { + if strings.Contains(splitStr[0], "}}") { + return false, "", ErrUnbalancedTemplatingCharacter + } + if len(splitStr) == 1 { + return false, p.String, nil + } + } + + var b strings.Builder + if !p.ValidityCheckOnly { + b.Grow(2 * len(p.String)) + } + + for i, str := range splitStr { + if i == 0 { + if !p.ValidityCheckOnly { + b.WriteString(str) + } + continue + } + splitPiece := strings.Split(str, "}}") + switch len(splitPiece) { + case 2: + subst = true + if !p.ValidityCheckOnly { + tmplStr, err := performTemplating(strings.TrimSpace(splitPiece[0]), &p) + if err != nil { + return false, "", err + } + b.WriteString(tmplStr) + b.WriteString(splitPiece[1]) + } + default: + return false, "", ErrUnbalancedTemplatingCharacter + } + } + + return subst, b.String(), nil +} + +func performTemplating(input string, p *PopulateStringInput) (string, error) { + performAliasTemplating := func(trimmed string, alias *logical.Alias) (string, error) { + switch { + case trimmed == "id": + return p.templateHandler(alias.ID) + + case trimmed == "name": + return p.templateHandler(alias.Name) + + case trimmed == "metadata": + return p.templateHandler(alias.Metadata) + + case strings.HasPrefix(trimmed, "metadata."): + split := strings.SplitN(trimmed, ".", 2) + return p.templateHandler(alias.Metadata, split[1]) + + case trimmed == "custom_metadata": + return p.templateHandler(alias.CustomMetadata) + + case strings.HasPrefix(trimmed, "custom_metadata."): + + split := strings.SplitN(trimmed, ".", 2) + return p.templateHandler(alias.CustomMetadata, split[1]) + + } + + return "", ErrTemplateValueNotFound + } + + performEntityTemplating := func(trimmed string) (string, error) { + switch { + case trimmed == "id": + return p.templateHandler(p.Entity.ID) + + case trimmed == "name": + return p.templateHandler(p.Entity.Name) + + case trimmed == "metadata": + return p.templateHandler(p.Entity.Metadata) + + case strings.HasPrefix(trimmed, "metadata."): + split := strings.SplitN(trimmed, ".", 2) + return p.templateHandler(p.Entity.Metadata, split[1]) + + case trimmed == "groups.names": + return p.templateHandler(p.groupNames) + + case trimmed == "groups.ids": + return p.templateHandler(p.groupIDs) + + case strings.HasPrefix(trimmed, "aliases."): + split := strings.SplitN(strings.TrimPrefix(trimmed, "aliases."), ".", 2) + if len(split) != 2 { + return "", errors.New("invalid alias selector") + } + var alias *logical.Alias + for _, a := range p.Entity.Aliases { + if split[0] == a.MountAccessor { + alias = a + break + } + } + if alias == nil { + if p.Mode == ACLTemplating { + return "", errors.New("alias not found") + } + + // An empty alias is sufficient for generating defaults + alias = &logical.Alias{Metadata: make(map[string]string), CustomMetadata: make(map[string]string)} + } + return performAliasTemplating(split[1], alias) + } + + return "", ErrTemplateValueNotFound + } + + performGroupsTemplating := func(trimmed string) (string, error) { + var ids bool + + selectorSplit := strings.SplitN(trimmed, ".", 2) + + switch { + case len(selectorSplit) != 2: + return "", errors.New("invalid groups selector") + + case selectorSplit[0] == "ids": + ids = true + + case selectorSplit[0] == "names": + + default: + return "", errors.New("invalid groups selector") + } + trimmed = selectorSplit[1] + + accessorSplit := strings.SplitN(trimmed, ".", 2) + if len(accessorSplit) != 2 { + return "", errors.New("invalid groups accessor") + } + var found *logical.Group + for _, group := range p.Groups { + var compare string + if ids { + compare = group.ID + } else { + if p.NamespaceID != "" && group.NamespaceID != p.NamespaceID { + continue + } + compare = group.Name + } + + if compare == accessorSplit[0] { + found = group + break + } + } + + if found == nil { + return "", fmt.Errorf("entity is not a member of group %q", accessorSplit[0]) + } + + trimmed = accessorSplit[1] + + switch { + case trimmed == "id": + return found.ID, nil + + case trimmed == "name": + if found.Name == "" { + return "", ErrTemplateValueNotFound + } + return found.Name, nil + + case strings.HasPrefix(trimmed, "metadata."): + val, ok := found.Metadata[strings.TrimPrefix(trimmed, "metadata.")] + if !ok { + return "", ErrTemplateValueNotFound + } + return val, nil + } + + return "", ErrTemplateValueNotFound + } + + performTimeTemplating := func(trimmed string) (string, error) { + now := p.Now + if now.IsZero() { + now = time.Now() + } + + opsSplit := strings.SplitN(trimmed, ".", 3) + + if opsSplit[0] != "now" { + return "", fmt.Errorf("invalid time selector %q", opsSplit[0]) + } + + result := now + switch len(opsSplit) { + case 1: + // return current time + case 2: + return "", errors.New("missing time operand") + + case 3: + duration, err := parseutil.ParseDurationSecond(opsSplit[2]) + if err != nil { + return "", errwrap.Wrapf("invalid duration: {{err}}", err) + } + + switch opsSplit[1] { + case "plus": + result = result.Add(duration) + case "minus": + result = result.Add(-duration) + default: + return "", fmt.Errorf("invalid time operator %q", opsSplit[1]) + } + } + + return strconv.FormatInt(result.Unix(), 10), nil + } + + switch { + case strings.HasPrefix(input, "identity.entity."): + if p.Entity == nil { + return "", ErrNoEntityAttachedToToken + } + return performEntityTemplating(strings.TrimPrefix(input, "identity.entity.")) + + case strings.HasPrefix(input, "identity.groups."): + if len(p.Groups) == 0 { + return "", ErrNoGroupsAttachedToToken + } + return performGroupsTemplating(strings.TrimPrefix(input, "identity.groups.")) + + case strings.HasPrefix(input, "time."): + return performTimeTemplating(strings.TrimPrefix(input, "time.")) + } + + return "", ErrTemplateValueNotFound +} diff --git a/sdk/helper/identitytpl/templating_test.go b/sdk/helper/identitytpl/templating_test.go new file mode 100644 index 0000000..d17409e --- /dev/null +++ b/sdk/helper/identitytpl/templating_test.go @@ -0,0 +1,558 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package identitytpl + +import ( + "errors" + "fmt" + "math" + "strconv" + "testing" + "time" + + "github.com/hashicorp/vault/sdk/logical" +) + +// intentionally != time.Now() to catch latent used of time.Now instead of +// passed in values +var testNow = time.Now().Add(100 * time.Hour) + +func TestPopulate_Basic(t *testing.T) { + tests := []struct { + mode int + name string + input string + output string + err error + entityName string + metadata map[string]string + aliasAccessor string + aliasID string + aliasName string + nilEntity bool + validityCheckOnly bool + aliasMetadata map[string]string + aliasCustomMetadata map[string]string + groupName string + groupMetadata map[string]string + groupMemberships []string + now time.Time + }{ + // time.* tests. Keep tests with time.Now() at the front to avoid false + // positives due to the second changing during the test + { + name: "time now", + input: "{{time.now}}", + output: strconv.Itoa(int(testNow.Unix())), + now: testNow, + }, + { + name: "time plus", + input: "{{time.now.plus.1h}}", + output: strconv.Itoa(int(testNow.Unix() + (60 * 60))), + now: testNow, + }, + { + name: "time plus", + input: "{{time.now.minus.5m}}", + output: strconv.Itoa(int(testNow.Unix() - (5 * 60))), + now: testNow, + }, + { + name: "invalid operator", + input: "{{time.now.divide.5m}}", + err: errors.New("invalid time operator \"divide\""), + }, + { + name: "time missing operand", + input: "{{time.now.plus}}", + err: errors.New("missing time operand"), + }, + + { + name: "no_templating", + input: "path foobar {", + output: "path foobar {", + }, + { + name: "only_closing", + input: "path foobar}} {", + err: ErrUnbalancedTemplatingCharacter, + }, + { + name: "closing_in_front", + input: "path }} {{foobar}} {", + err: ErrUnbalancedTemplatingCharacter, + }, + { + name: "closing_in_back", + input: "path {{foobar}} }}", + err: ErrUnbalancedTemplatingCharacter, + }, + { + name: "basic", + input: "path /{{identity.entity.id}}/ {", + output: "path /entityID/ {", + }, + { + name: "multiple", + input: "path {{identity.entity.name}} {\n\tval = {{identity.entity.metadata.foo}}\n}", + entityName: "entityName", + metadata: map[string]string{"foo": "bar"}, + output: "path entityName {\n\tval = bar\n}", + }, + { + name: "multiple_bad_name", + input: "path {{identity.entity.name}} {\n\tval = {{identity.entity.metadata.foo}}\n}", + metadata: map[string]string{"foo": "bar"}, + err: ErrTemplateValueNotFound, + }, + { + name: "unbalanced_close", + input: "path {{identity.entity.id}} {\n\tval = {{ent}}ity.metadata.foo}}\n}", + err: ErrUnbalancedTemplatingCharacter, + }, + { + name: "unbalanced_open", + input: "path {{identity.entity.id}} {\n\tval = {{ent{{ity.metadata.foo}}\n}", + err: ErrUnbalancedTemplatingCharacter, + }, + { + name: "no_entity_no_directives", + input: "path {{identity.entity.id}} {\n\tval = {{ent{{ity.metadata.foo}}\n}", + err: ErrNoEntityAttachedToToken, + nilEntity: true, + }, + { + name: "no_entity_no_diretives", + input: "path name {\n\tval = foo\n}", + output: "path name {\n\tval = foo\n}", + nilEntity: true, + }, + { + name: "alias_id_name", + input: "path {{ identity.entity.name}} {\n\tval = {{identity.entity.aliases.foomount.id}} nval = {{identity.entity.aliases.foomount.name}}\n}", + entityName: "entityName", + aliasAccessor: "foomount", + aliasID: "aliasID", + aliasName: "aliasName", + metadata: map[string]string{"foo": "bar"}, + output: "path entityName {\n\tval = aliasID nval = aliasName\n}", + }, + { + name: "alias_id_name_bad_selector", + input: "path foobar {\n\tval = {{identity.entity.aliases.foomount}}\n}", + aliasAccessor: "foomount", + err: errors.New("invalid alias selector"), + }, + { + name: "alias_id_name_bad_accessor", + input: "path \"foobar\" {\n\tval = {{identity.entity.aliases.barmount.id}}\n}", + aliasAccessor: "foomount", + err: errors.New("alias not found"), + }, + { + name: "alias_id_name", + input: "path \"{{identity.entity.name}}\" {\n\tval = {{identity.entity.aliases.foomount.metadata.zip}}\n}", + entityName: "entityName", + aliasAccessor: "foomount", + aliasID: "aliasID", + metadata: map[string]string{"foo": "bar"}, + aliasMetadata: map[string]string{"zip": "zap"}, + output: "path \"entityName\" {\n\tval = zap\n}", + }, + { + name: "group_name", + input: "path \"{{identity.groups.ids.groupID.name}}\" {\n\tval = {{identity.entity.name}}\n}", + entityName: "entityName", + groupName: "groupName", + output: "path \"groupName\" {\n\tval = entityName\n}", + }, + { + name: "group_bad_id", + input: "path \"{{identity.groups.ids.hroupID.name}}\" {\n\tval = {{identity.entity.name}}\n}", + entityName: "entityName", + groupName: "groupName", + err: errors.New("entity is not a member of group \"hroupID\""), + }, + { + name: "group_id", + input: "path \"{{identity.groups.names.groupName.id}}\" {\n\tval = {{identity.entity.name}}\n}", + entityName: "entityName", + groupName: "groupName", + output: "path \"groupID\" {\n\tval = entityName\n}", + }, + { + name: "group_bad_name", + input: "path \"{{identity.groups.names.hroupName.id}}\" {\n\tval = {{identity.entity.name}}\n}", + entityName: "entityName", + groupName: "groupName", + err: errors.New("entity is not a member of group \"hroupName\""), + }, + { + name: "metadata_object_disallowed", + input: "{{identity.entity.metadata}}", + metadata: map[string]string{"foo": "bar"}, + err: ErrTemplateValueNotFound, + }, + { + name: "alias_metadata_object_disallowed", + input: "{{identity.entity.aliases.foomount.metadata}}", + aliasAccessor: "foomount", + aliasMetadata: map[string]string{"foo": "bar"}, + err: ErrTemplateValueNotFound, + }, + { + name: "groups.names_disallowed", + input: "{{identity.entity.groups.names}}", + groupMemberships: []string{"foo", "bar"}, + err: ErrTemplateValueNotFound, + }, + { + name: "groups.ids_disallowed", + input: "{{identity.entity.groups.ids}}", + groupMemberships: []string{"foo", "bar"}, + err: ErrTemplateValueNotFound, + }, + + // missing selector cases + { + mode: JSONTemplating, + name: "entity id", + input: "{{identity.entity.id}}", + output: `"entityID"`, + }, + { + mode: JSONTemplating, + name: "entity name", + input: "{{identity.entity.name}}", + entityName: "entityName", + output: `"entityName"`, + }, + { + mode: JSONTemplating, + name: "entity name missing", + input: "{{identity.entity.name}}", + output: `""`, + }, + { + mode: JSONTemplating, + name: "alias name/id", + input: "{{identity.entity.aliases.foomount.id}} {{identity.entity.aliases.foomount.name}}", + aliasAccessor: "foomount", + aliasID: "aliasID", + aliasName: "aliasName", + output: `"aliasID" "aliasName"`, + }, + { + mode: JSONTemplating, + name: "one metadata key", + input: "{{identity.entity.metadata.color}}", + metadata: map[string]string{"foo": "bar", "color": "green"}, + output: `"green"`, + }, + { + mode: JSONTemplating, + name: "one metadata key not found", + input: "{{identity.entity.metadata.size}}", + metadata: map[string]string{"foo": "bar", "color": "green"}, + output: `""`, + }, + { + mode: JSONTemplating, + name: "all entity metadata", + input: "{{identity.entity.metadata}}", + metadata: map[string]string{"foo": "bar", "color": "green"}, + output: `{"color":"green","foo":"bar"}`, + }, + { + mode: JSONTemplating, + name: "null entity metadata", + input: "{{identity.entity.metadata}}", + output: `{}`, + }, + { + mode: JSONTemplating, + name: "groups.names", + input: "{{identity.entity.groups.names}}", + groupMemberships: []string{"foo", "bar"}, + output: `["foo","bar"]`, + }, + { + mode: JSONTemplating, + name: "groups.ids", + input: "{{identity.entity.groups.ids}}", + groupMemberships: []string{"foo", "bar"}, + output: `["foo_0","bar_1"]`, + }, + { + mode: JSONTemplating, + name: "one alias metadata key", + input: "{{identity.entity.aliases.aws_123.metadata.color}}", + aliasAccessor: "aws_123", + aliasMetadata: map[string]string{"foo": "bar", "color": "green"}, + output: `"green"`, + }, + { + mode: JSONTemplating, + name: "one alias metadata key not found", + input: "{{identity.entity.aliases.aws_123.metadata.size}}", + aliasAccessor: "aws_123", + aliasMetadata: map[string]string{"foo": "bar", "color": "green"}, + output: `""`, + }, + { + mode: JSONTemplating, + name: "one alias metadata, accessor not found", + input: "{{identity.entity.aliases.aws_123.metadata.size}}", + aliasAccessor: "not_gonna_match", + aliasMetadata: map[string]string{"foo": "bar", "color": "green"}, + output: `""`, + }, + { + mode: JSONTemplating, + name: "all alias metadata", + input: "{{identity.entity.aliases.aws_123.metadata}}", + aliasAccessor: "aws_123", + aliasMetadata: map[string]string{"foo": "bar", "color": "green"}, + output: `{"color":"green","foo":"bar"}`, + }, + { + mode: JSONTemplating, + name: "null alias metadata", + input: "{{identity.entity.aliases.aws_123.metadata}}", + aliasAccessor: "aws_123", + output: `{}`, + }, + { + mode: JSONTemplating, + name: "all alias metadata, accessor not found", + input: "{{identity.entity.aliases.aws_123.metadata}}", + aliasAccessor: "not_gonna_match", + aliasMetadata: map[string]string{"foo": "bar", "color": "green"}, + output: `{}`, + }, + { + mode: JSONTemplating, + name: "one alias custom metadata key", + input: "{{identity.entity.aliases.aws_123.custom_metadata.foo}}", + aliasAccessor: "aws_123", + aliasCustomMetadata: map[string]string{"foo": "abc", "bar": "123"}, + output: `"abc"`, + }, + { + mode: JSONTemplating, + name: "one alias custom metadata key not found", + input: "{{identity.entity.aliases.aws_123.custom_metadata.size}}", + aliasAccessor: "aws_123", + aliasCustomMetadata: map[string]string{"foo": "abc", "bar": "123"}, + output: `""`, + }, + { + mode: JSONTemplating, + name: "one alias custom metadata, accessor not found", + input: "{{identity.entity.aliases.aws_123.custom_metadata.size}}", + aliasAccessor: "not_gonna_match", + aliasCustomMetadata: map[string]string{"foo": "abc", "bar": "123"}, + output: `""`, + }, + { + mode: JSONTemplating, + name: "all alias custom metadata", + input: "{{identity.entity.aliases.aws_123.custom_metadata}}", + aliasAccessor: "aws_123", + aliasCustomMetadata: map[string]string{"foo": "abc", "bar": "123"}, + output: `{"bar":"123","foo":"abc"}`, + }, + { + mode: JSONTemplating, + name: "null alias custom metadata", + input: "{{identity.entity.aliases.aws_123.custom_metadata}}", + aliasAccessor: "aws_123", + output: `{}`, + }, + { + mode: JSONTemplating, + name: "all alias custom metadata, accessor not found", + input: "{{identity.entity.aliases.aws_123.custom_metadata}}", + aliasAccessor: "not_gonna_match", + aliasCustomMetadata: map[string]string{"foo": "abc", "bar": "123"}, + output: `{}`, + }, + } + + for _, test := range tests { + var entity *logical.Entity + if !test.nilEntity { + entity = &logical.Entity{ + ID: "entityID", + Name: test.entityName, + Metadata: test.metadata, + } + } + if test.aliasAccessor != "" { + entity.Aliases = []*logical.Alias{ + { + MountAccessor: test.aliasAccessor, + ID: test.aliasID, + Name: test.aliasName, + Metadata: test.aliasMetadata, + CustomMetadata: test.aliasCustomMetadata, + }, + } + } + var groups []*logical.Group + if test.groupName != "" { + groups = append(groups, &logical.Group{ + ID: "groupID", + Name: test.groupName, + Metadata: test.groupMetadata, + NamespaceID: "root", + }) + } + + if test.groupMemberships != nil { + for i, groupName := range test.groupMemberships { + groups = append(groups, &logical.Group{ + ID: fmt.Sprintf("%s_%d", groupName, i), + Name: groupName, + }) + } + } + + subst, out, err := PopulateString(PopulateStringInput{ + Mode: test.mode, + ValidityCheckOnly: test.validityCheckOnly, + String: test.input, + Entity: entity, + Groups: groups, + NamespaceID: "root", + Now: test.now, + }) + if err != nil { + if test.err == nil { + t.Fatalf("%s: expected success, got error: %v", test.name, err) + } + if err.Error() != test.err.Error() { + t.Fatalf("%s: got error: %v", test.name, err) + } + } + if out != test.output { + t.Fatalf("%s: bad output: %s, expected: %s", test.name, out, test.output) + } + if err == nil && !subst && out != test.input { + t.Fatalf("%s: bad subst flag", test.name) + } + } +} + +func TestPopulate_CurrentTime(t *testing.T) { + now := time.Now() + + // Test that an unset Now parameter results in current time + input := PopulateStringInput{ + Mode: JSONTemplating, + String: `{{time.now}}`, + } + + _, out, err := PopulateString(input) + if err != nil { + t.Fatal(err) + } + + nowPopulated, err := strconv.Atoi(out) + if err != nil { + t.Fatal(err) + } + + diff := math.Abs(float64(int64(nowPopulated) - now.Unix())) + if diff > 1 { + t.Fatalf("expected time within 1 second. Got diff of: %f", diff) + } +} + +func TestPopulate_FullObject(t *testing.T) { + testEntity := &logical.Entity{ + ID: "abc-123", + Name: "Entity Name", + Metadata: map[string]string{ + "color": "green", + "size": "small", + "non-printable": "\"\n\t", + }, + Aliases: []*logical.Alias{ + { + MountAccessor: "aws_123", + Metadata: map[string]string{ + "service": "ec2", + "region": "west", + }, + CustomMetadata: map[string]string{ + "foo": "abc", + "bar": "123", + }, + }, + }, + } + + testGroups := []*logical.Group{ + {ID: "a08b0c02", Name: "g1"}, + {ID: "239bef91", Name: "g2"}, + } + + template := ` + { + "id": {{identity.entity.id}}, + "name": {{identity.entity.name}}, + "all metadata": {{identity.entity.metadata}}, + "one metadata key": {{identity.entity.metadata.color}}, + "one metadata key not found": {{identity.entity.metadata.asldfk}}, + "alias metadata": {{identity.entity.aliases.aws_123.metadata}}, + "alias not found metadata": {{identity.entity.aliases.blahblah.metadata}}, + "one alias metadata key": {{identity.entity.aliases.aws_123.metadata.service}}, + "one not found alias metadata key": {{identity.entity.aliases.blahblah.metadata.service}}, + "group names": {{identity.entity.groups.names}}, + "group ids": {{identity.entity.groups.ids}}, + "repeated and": {"nested element": {{identity.entity.name}}}, + "alias custom metadata": {{identity.entity.aliases.aws_123.custom_metadata}}, + "alias not found custom metadata": {{identity.entity.aliases.blahblah.custom_metadata}}, + "one alias custom metadata key": {{identity.entity.aliases.aws_123.custom_metadata.foo}}, + "one not found alias custom metadata key": {{identity.entity.aliases.blahblah.custom_metadata.foo}}, + }` + + expected := ` + { + "id": "abc-123", + "name": "Entity Name", + "all metadata": {"color":"green","non-printable":"\"\n\t","size":"small"}, + "one metadata key": "green", + "one metadata key not found": "", + "alias metadata": {"region":"west","service":"ec2"}, + "alias not found metadata": {}, + "one alias metadata key": "ec2", + "one not found alias metadata key": "", + "group names": ["g1","g2"], + "group ids": ["a08b0c02","239bef91"], + "repeated and": {"nested element": "Entity Name"}, + "alias custom metadata": {"bar":"123","foo":"abc"}, + "alias not found custom metadata": {}, + "one alias custom metadata key": "abc", + "one not found alias custom metadata key": "", + }` + + input := PopulateStringInput{ + Mode: JSONTemplating, + String: template, + Entity: testEntity, + Groups: testGroups, + } + _, out, err := PopulateString(input) + if err != nil { + t.Fatal(err) + } + + if out != expected { + t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, out) + } +} diff --git a/sdk/helper/jsonutil/json.go b/sdk/helper/jsonutil/json.go new file mode 100644 index 0000000..1abd9fa --- /dev/null +++ b/sdk/helper/jsonutil/json.go @@ -0,0 +1,103 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsonutil + +import ( + "bytes" + "compress/gzip" + "encoding/json" + "fmt" + "io" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/helper/compressutil" +) + +// Encodes/Marshals the given object into JSON +func EncodeJSON(in interface{}) ([]byte, error) { + if in == nil { + return nil, fmt.Errorf("input for encoding is nil") + } + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + if err := enc.Encode(in); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// EncodeJSONAndCompress encodes the given input into JSON and compresses the +// encoded value (using Gzip format BestCompression level, by default). A +// canary byte is placed at the beginning of the returned bytes for the logic +// in decompression method to identify compressed input. +func EncodeJSONAndCompress(in interface{}, config *compressutil.CompressionConfig) ([]byte, error) { + if in == nil { + return nil, fmt.Errorf("input for encoding is nil") + } + + // First JSON encode the given input + encodedBytes, err := EncodeJSON(in) + if err != nil { + return nil, err + } + + if config == nil { + config = &compressutil.CompressionConfig{ + Type: compressutil.CompressionTypeGzip, + GzipCompressionLevel: gzip.BestCompression, + } + } + + return compressutil.Compress(encodedBytes, config) +} + +// DecodeJSON tries to decompress the given data. The call to decompress, fails +// if the content was not compressed in the first place, which is identified by +// a canary byte before the compressed data. If the data is not compressed, it +// is JSON decoded directly. Otherwise the decompressed data will be JSON +// decoded. +func DecodeJSON(data []byte, out interface{}) error { + if data == nil || len(data) == 0 { + return fmt.Errorf("'data' being decoded is nil") + } + if out == nil { + return fmt.Errorf("output parameter 'out' is nil") + } + + // Decompress the data if it was compressed in the first place + decompressedBytes, uncompressed, err := compressutil.Decompress(data) + if err != nil { + return errwrap.Wrapf("failed to decompress JSON: {{err}}", err) + } + if !uncompressed && (decompressedBytes == nil || len(decompressedBytes) == 0) { + return fmt.Errorf("decompressed data being decoded is invalid") + } + + // If the input supplied failed to contain the compression canary, it + // will be notified by the compression utility. Decode the decompressed + // input. + if !uncompressed { + data = decompressedBytes + } + + return DecodeJSONFromReader(bytes.NewReader(data), out) +} + +// Decodes/Unmarshals the given io.Reader pointing to a JSON, into a desired object +func DecodeJSONFromReader(r io.Reader, out interface{}) error { + if r == nil { + return fmt.Errorf("'io.Reader' being decoded is nil") + } + if out == nil { + return fmt.Errorf("output parameter 'out' is nil") + } + + dec := json.NewDecoder(r) + + // While decoding JSON values, interpret the integer values as `json.Number`s instead of `float64`. + dec.UseNumber() + + // Since 'out' is an interface representing a pointer, pass it to the decoder without an '&' + return dec.Decode(out) +} diff --git a/sdk/helper/jsonutil/json_test.go b/sdk/helper/jsonutil/json_test.go new file mode 100644 index 0000000..10aabf1 --- /dev/null +++ b/sdk/helper/jsonutil/json_test.go @@ -0,0 +1,144 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package jsonutil + +import ( + "bytes" + "compress/gzip" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/hashicorp/vault/sdk/helper/compressutil" +) + +func TestJSONUtil_CompressDecompressJSON(t *testing.T) { + expected := map[string]interface{}{ + "test": "data", + "validation": "process", + } + + // Compress an object + compressedBytes, err := EncodeJSONAndCompress(expected, nil) + if err != nil { + t.Fatal(err) + } + if len(compressedBytes) == 0 { + t.Fatal("expected compressed data") + } + + // Check if canary is present in the compressed data + if compressedBytes[0] != compressutil.CompressionCanaryGzip { + t.Fatalf("canary missing in compressed data") + } + + // Decompress and decode the compressed information and verify the functional + // behavior + var actual map[string]interface{} + if err = DecodeJSON(compressedBytes, &actual); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(expected, actual) { + t.Fatalf("bad: expected: %#v\nactual: %#v", expected, actual) + } + for key := range actual { + delete(actual, key) + } + + // Test invalid data + if err = DecodeJSON([]byte{}, &actual); err == nil { + t.Fatalf("expected a failure") + } + + // Test invalid data after the canary byte + var buf bytes.Buffer + buf.Write([]byte{compressutil.CompressionCanaryGzip}) + if err = DecodeJSON(buf.Bytes(), &actual); err == nil { + t.Fatalf("expected a failure") + } + + // Compress an object + compressedBytes, err = EncodeJSONAndCompress(expected, &compressutil.CompressionConfig{ + Type: compressutil.CompressionTypeGzip, + GzipCompressionLevel: gzip.BestSpeed, + }) + if err != nil { + t.Fatal(err) + } + if len(compressedBytes) == 0 { + t.Fatal("expected compressed data") + } + + // Check if canary is present in the compressed data + if compressedBytes[0] != compressutil.CompressionCanaryGzip { + t.Fatalf("canary missing in compressed data") + } + + // Decompress and decode the compressed information and verify the functional + // behavior + if err = DecodeJSON(compressedBytes, &actual); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(expected, actual) { + t.Fatalf("bad: expected: %#v\nactual: %#v", expected, actual) + } +} + +func TestJSONUtil_EncodeJSON(t *testing.T) { + input := map[string]interface{}{ + "test": "data", + "validation": "process", + } + + actualBytes, err := EncodeJSON(input) + if err != nil { + t.Fatalf("failed to encode JSON: %v", err) + } + + actual := strings.TrimSpace(string(actualBytes)) + expected := `{"test":"data","validation":"process"}` + + if actual != expected { + t.Fatalf("bad: encoded JSON: expected:%s\nactual:%s\n", expected, string(actualBytes)) + } +} + +func TestJSONUtil_DecodeJSON(t *testing.T) { + input := `{"test":"data","validation":"process"}` + + var actual map[string]interface{} + + err := DecodeJSON([]byte(input), &actual) + if err != nil { + fmt.Printf("decoding err: %v\n", err) + } + + expected := map[string]interface{}{ + "test": "data", + "validation": "process", + } + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual) + } +} + +func TestJSONUtil_DecodeJSONFromReader(t *testing.T) { + input := `{"test":"data","validation":"process"}` + + var actual map[string]interface{} + + err := DecodeJSONFromReader(bytes.NewReader([]byte(input)), &actual) + if err != nil { + fmt.Printf("decoding err: %v\n", err) + } + + expected := map[string]interface{}{ + "test": "data", + "validation": "process", + } + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: expected:%#v\nactual:%#v", expected, actual) + } +} diff --git a/sdk/helper/kdf/kdf.go b/sdk/helper/kdf/kdf.go new file mode 100644 index 0000000..e9964ba --- /dev/null +++ b/sdk/helper/kdf/kdf.go @@ -0,0 +1,85 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// This package is used to implement Key Derivation Functions (KDF) +// based on the recommendations of NIST SP 800-108. These are useful +// for generating unique-per-transaction keys, or situations in which +// a key hierarchy may be useful. +package kdf + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/binary" + "fmt" + "math" +) + +// PRF is a pseudo-random function that takes a key or seed, +// as well as additional binary data and generates output that is +// indistinguishable from random. Examples are cryptographic hash +// functions or block ciphers. +type PRF func([]byte, []byte) ([]byte, error) + +// CounterMode implements the counter mode KDF that uses a pseudo-random-function (PRF) +// along with a counter to generate derived keys. The KDF takes a base key +// a derivation context, and the required number of output bits. +func CounterMode(prf PRF, prfLen uint32, key []byte, context []byte, bits uint32) ([]byte, error) { + // Ensure the PRF is byte aligned + if prfLen%8 != 0 { + return nil, fmt.Errorf("PRF must be byte aligned") + } + + // Ensure the bits required are byte aligned + if bits%8 != 0 { + return nil, fmt.Errorf("bits required must be byte aligned") + } + + // Determine the number of rounds required + rounds := bits / prfLen + if bits%prfLen != 0 { + rounds++ + } + + if len(context) > math.MaxInt-8 { + return nil, fmt.Errorf("too much context specified; would overflow: %d bytes", len(context)) + } + + // Allocate and setup the input + input := make([]byte, 4+len(context)+4) + copy(input[4:], context) + binary.BigEndian.PutUint32(input[4+len(context):], bits) + + // Iteratively generate more key material + var out []byte + var i uint32 + for i = 0; i < rounds; i++ { + // Update the counter in the input string + binary.BigEndian.PutUint32(input[:4], i) + + // Compute more key material + part, err := prf(key, input) + if err != nil { + return nil, err + } + if uint32(len(part)*8) != prfLen { + return nil, fmt.Errorf("PRF length mis-match (%d vs %d)", len(part)*8, prfLen) + } + out = append(out, part...) + } + + // Return the desired number of output bytes + return out[:bits/8], nil +} + +const ( + // HMACSHA256PRFLen is the length of output from HMACSHA256PRF + HMACSHA256PRFLen uint32 = 256 +) + +// HMACSHA256PRF is a pseudo-random-function (PRF) that uses an HMAC-SHA256 +func HMACSHA256PRF(key []byte, data []byte) ([]byte, error) { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil), nil +} diff --git a/sdk/helper/kdf/kdf_test.go b/sdk/helper/kdf/kdf_test.go new file mode 100644 index 0000000..ed5c0a1 --- /dev/null +++ b/sdk/helper/kdf/kdf_test.go @@ -0,0 +1,78 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package kdf + +import ( + "bytes" + "testing" +) + +func TestCounterMode(t *testing.T) { + key := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + context := []byte("the quick brown fox") + prf := HMACSHA256PRF + prfLen := HMACSHA256PRFLen + + // Expect256 was generated in python with + // import hashlib, hmac + // hash = hashlib.sha256 + // context = "the quick brown fox" + // key = "".join([chr(x) for x in range(1, 17)]) + // inp = "\x00\x00\x00\x00"+context+"\x00\x00\x01\x00" + // digest = hmac.HMAC(key, inp, hash).digest() + // print [ord(x) for x in digest] + expect256 := []byte{ + 219, 25, 238, 6, 185, 236, 180, 64, 248, 152, 251, + 153, 79, 5, 141, 222, 66, 200, 66, 143, 40, 3, 101, 221, 206, 163, 102, + 80, 88, 234, 87, 157, + } + + for _, l := range []uint32{128, 256, 384, 1024} { + out, err := CounterMode(prf, prfLen, key, context, l) + if err != nil { + t.Fatalf("err: %v", err) + } + + if uint32(len(out)*8) != l { + t.Fatalf("bad length: %#v", out) + } + + if bytes.Contains(out, key) { + t.Fatalf("output contains key") + } + + if l == 256 && !bytes.Equal(out, expect256) { + t.Fatalf("mis-match") + } + } +} + +func TestHMACSHA256PRF(t *testing.T) { + key := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + data := []byte("foobarbaz") + out, err := HMACSHA256PRF(key, data) + if err != nil { + t.Fatalf("err: %v", err) + } + + if uint32(len(out)*8) != HMACSHA256PRFLen { + t.Fatalf("Bad len") + } + + // Expect was generated in python with: + // import hashlib, hmac + // hash = hashlib.sha256 + // msg = "foobarbaz" + // key = "".join([chr(x) for x in range(1, 17)]) + // hm = hmac.HMAC(key, msg, hash) + // print [ord(x) for x in hm.digest()] + expect := []byte{ + 9, 50, 146, 8, 188, 130, 150, 107, 205, 147, 82, 170, + 253, 183, 26, 38, 167, 194, 220, 111, 56, 118, 219, 209, 31, 52, 137, + 90, 246, 133, 191, 124, + } + if !bytes.Equal(expect, out) { + t.Fatalf("mis-matched output") + } +} diff --git a/sdk/helper/keysutil/cache.go b/sdk/helper/keysutil/cache.go new file mode 100644 index 0000000..fb55091 --- /dev/null +++ b/sdk/helper/keysutil/cache.go @@ -0,0 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package keysutil + +type Cache interface { + Delete(key interface{}) + Load(key interface{}) (value interface{}, ok bool) + Store(key, value interface{}) + Size() int +} diff --git a/sdk/helper/keysutil/consts.go b/sdk/helper/keysutil/consts.go new file mode 100644 index 0000000..b684232 --- /dev/null +++ b/sdk/helper/keysutil/consts.go @@ -0,0 +1,83 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package keysutil + +import ( + "crypto" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "hash" + + "golang.org/x/crypto/sha3" +) + +type HashType uint32 + +const ( + HashTypeNone HashType = iota + HashTypeSHA1 + HashTypeSHA2224 + HashTypeSHA2256 + HashTypeSHA2384 + HashTypeSHA2512 + HashTypeSHA3224 + HashTypeSHA3256 + HashTypeSHA3384 + HashTypeSHA3512 +) + +type MarshalingType uint32 + +const ( + _ = iota + MarshalingTypeASN1 MarshalingType = iota + MarshalingTypeJWS +) + +var ( + HashTypeMap = map[string]HashType{ + "none": HashTypeNone, + "sha1": HashTypeSHA1, + "sha2-224": HashTypeSHA2224, + "sha2-256": HashTypeSHA2256, + "sha2-384": HashTypeSHA2384, + "sha2-512": HashTypeSHA2512, + "sha3-224": HashTypeSHA3224, + "sha3-256": HashTypeSHA3256, + "sha3-384": HashTypeSHA3384, + "sha3-512": HashTypeSHA3512, + } + + HashFuncMap = map[HashType]func() hash.Hash{ + HashTypeNone: nil, + HashTypeSHA1: sha1.New, + HashTypeSHA2224: sha256.New224, + HashTypeSHA2256: sha256.New, + HashTypeSHA2384: sha512.New384, + HashTypeSHA2512: sha512.New, + HashTypeSHA3224: sha3.New224, + HashTypeSHA3256: sha3.New256, + HashTypeSHA3384: sha3.New384, + HashTypeSHA3512: sha3.New512, + } + + CryptoHashMap = map[HashType]crypto.Hash{ + HashTypeNone: 0, + HashTypeSHA1: crypto.SHA1, + HashTypeSHA2224: crypto.SHA224, + HashTypeSHA2256: crypto.SHA256, + HashTypeSHA2384: crypto.SHA384, + HashTypeSHA2512: crypto.SHA512, + HashTypeSHA3224: crypto.SHA3_224, + HashTypeSHA3256: crypto.SHA3_256, + HashTypeSHA3384: crypto.SHA3_384, + HashTypeSHA3512: crypto.SHA3_512, + } + + MarshalingTypeMap = map[string]MarshalingType{ + "asn1": MarshalingTypeASN1, + "jws": MarshalingTypeJWS, + } +) diff --git a/sdk/helper/keysutil/encrypted_key_storage.go b/sdk/helper/keysutil/encrypted_key_storage.go new file mode 100644 index 0000000..7314758 --- /dev/null +++ b/sdk/helper/keysutil/encrypted_key_storage.go @@ -0,0 +1,285 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package keysutil + +import ( + "context" + "encoding/base64" + "errors" + "math/big" + paths "path" + "sort" + "strings" + + lru "github.com/hashicorp/golang-lru" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + // DefaultCacheSize is used if no cache size is specified for + // NewEncryptedKeyStorage. This value is the number of cache entries to + // store, not the size in bytes of the cache. + DefaultCacheSize = 16 * 1024 + + // DefaultPrefix is used if no prefix is specified for + // NewEncryptedKeyStorage. Prefix must be defined so we can provide context + // for the base folder. + DefaultPrefix = "encryptedkeys/" + + // EncryptedKeyPolicyVersionTpl is a template that can be used to minimize + // the amount of data that's stored with the ciphertext. + EncryptedKeyPolicyVersionTpl = "{{version}}:" +) + +var ( + // ErrPolicyDerivedKeys is returned if the provided policy does not use + // derived keys. This is a requirement for this storage implementation. + ErrPolicyDerivedKeys = errors.New("key policy must use derived keys") + + // ErrPolicyConvergentEncryption is returned if the provided policy does not use + // convergent encryption. This is a requirement for this storage implementation. + ErrPolicyConvergentEncryption = errors.New("key policy must use convergent encryption") + + // ErrPolicyConvergentVersion is returned if the provided policy does not use + // a new enough convergent version. This is a requirement for this storage + // implementation. + ErrPolicyConvergentVersion = errors.New("key policy must use convergent version > 2") + + // ErrNilStorage is returned if the provided storage is nil. + ErrNilStorage = errors.New("nil storage provided") + + // ErrNilPolicy is returned if the provided policy is nil. + ErrNilPolicy = errors.New("nil policy provided") +) + +// EncryptedKeyStorageConfig is used to configure an EncryptedKeyStorage object. +type EncryptedKeyStorageConfig struct { + // Policy is the key policy to use to encrypt the key paths. + Policy *Policy + + // Prefix is the storage prefix for this instance of the EncryptedKeyStorage + // object. This is stored in plaintext. If not set the DefaultPrefix will be + // used. + Prefix string + + // CacheSize is the number of elements to cache. If not set the + // DetaultCacheSize will be used. + CacheSize int +} + +// NewEncryptedKeyStorageWrapper takes an EncryptedKeyStorageConfig and returns a new +// EncryptedKeyStorage object. +func NewEncryptedKeyStorageWrapper(config EncryptedKeyStorageConfig) (*EncryptedKeyStorageWrapper, error) { + if config.Policy == nil { + return nil, ErrNilPolicy + } + + if !config.Policy.Derived { + return nil, ErrPolicyDerivedKeys + } + + if !config.Policy.ConvergentEncryption { + return nil, ErrPolicyConvergentEncryption + } + + if config.Prefix == "" { + config.Prefix = DefaultPrefix + } + + if !strings.HasSuffix(config.Prefix, "/") { + config.Prefix += "/" + } + + size := config.CacheSize + if size <= 0 { + size = DefaultCacheSize + } + + cache, err := lru.New2Q(size) + if err != nil { + return nil, err + } + + return &EncryptedKeyStorageWrapper{ + policy: config.Policy, + prefix: config.Prefix, + lru: cache, + }, nil +} + +type EncryptedKeyStorageWrapper struct { + policy *Policy + lru *lru.TwoQueueCache + prefix string +} + +func (f *EncryptedKeyStorageWrapper) Wrap(s logical.Storage) logical.Storage { + return &encryptedKeyStorage{ + policy: f.policy, + s: s, + prefix: f.prefix, + lru: f.lru, + } +} + +// EncryptedKeyStorage implements the logical.Storage interface and ensures the +// storage paths are encrypted in the underlying storage. +type encryptedKeyStorage struct { + policy *Policy + s logical.Storage + lru *lru.TwoQueueCache + + prefix string +} + +func ensureTailingSlash(path string) string { + if !strings.HasSuffix(path, "/") { + return path + "/" + } + return path +} + +// List implements the logical.Storage List method, and decrypts all the items +// in a path prefix. This can only operate on full folder structures so the +// prefix should end in a "/". +func (s *encryptedKeyStorage) List(ctx context.Context, prefix string) ([]string, error) { + var decoder big.Int + + encPrefix, err := s.encryptPath(prefix) + if err != nil { + return nil, err + } + + keys, err := s.s.List(ctx, ensureTailingSlash(encPrefix)) + if err != nil { + return keys, err + } + + decryptedKeys := make([]string, len(keys)) + + // The context for the decryption operations will be the object's prefix + // joined with the provided prefix. Join cleans the path ensuring there + // isn't a trailing "/". + context := []byte(paths.Join(s.prefix, prefix)) + + for i, k := range keys { + raw, ok := s.lru.Get(k) + if ok { + // cache HIT, we can bail early and skip the decode & decrypt operations. + decryptedKeys[i] = raw.(string) + continue + } + + // If a folder is included in the keys it will have a trailing "/". + // We need to remove this before decoding/decrypting and add it back + // later. + appendSlash := strings.HasSuffix(k, "/") + if appendSlash { + k = strings.TrimSuffix(k, "/") + } + + decoder.SetString(k, 62) + decoded := decoder.Bytes() + if len(decoded) == 0 { + return nil, errors.New("could not decode key") + } + + // Decrypt the data with the object's key policy. + encodedPlaintext, err := s.policy.Decrypt(context, nil, string(decoded[:])) + if err != nil { + return nil, err + } + + // The plaintext is still base64 encoded, decode it. + decoded, err = base64.StdEncoding.DecodeString(encodedPlaintext) + if err != nil { + return nil, err + } + + plaintext := string(decoded[:]) + + // Add the slash back to the plaintext value + if appendSlash { + plaintext += "/" + k += "/" + } + + // We want to store the unencoded version of the key in the cache. + // This will make it more performent when it's a HIT. + s.lru.Add(k, plaintext) + + decryptedKeys[i] = plaintext + } + + sort.Strings(decryptedKeys) + return decryptedKeys, nil +} + +// Get implements the logical.Storage Get method. +func (s *encryptedKeyStorage) Get(ctx context.Context, path string) (*logical.StorageEntry, error) { + encPath, err := s.encryptPath(path) + if err != nil { + return nil, err + } + + return s.s.Get(ctx, encPath) +} + +// Put implements the logical.Storage Put method. +func (s *encryptedKeyStorage) Put(ctx context.Context, entry *logical.StorageEntry) error { + encPath, err := s.encryptPath(entry.Key) + if err != nil { + return err + } + e := &logical.StorageEntry{} + *e = *entry + + e.Key = encPath + + return s.s.Put(ctx, e) +} + +// Delete implements the logical.Storage Delete method. +func (s *encryptedKeyStorage) Delete(ctx context.Context, path string) error { + encPath, err := s.encryptPath(path) + if err != nil { + return err + } + + return s.s.Delete(ctx, encPath) +} + +// encryptPath takes a plaintext path and encrypts each path section (separated +// by "/") with the object's key policy. The context for each encryption is the +// plaintext path prefix for the key. +func (s *encryptedKeyStorage) encryptPath(path string) (string, error) { + var encoder big.Int + + if path == "" || path == "/" { + return s.prefix, nil + } + + path = paths.Clean(path) + + // Trim the prefix if it starts with a "/" + path = strings.TrimPrefix(path, "/") + + parts := strings.Split(path, "/") + + encPath := s.prefix + context := strings.TrimSuffix(s.prefix, "/") + for _, p := range parts { + encoded := base64.StdEncoding.EncodeToString([]byte(p)) + ciphertext, err := s.policy.Encrypt(0, []byte(context), nil, encoded) + if err != nil { + return "", err + } + + encoder.SetBytes([]byte(ciphertext)) + encPath = paths.Join(encPath, encoder.Text(62)) + context = paths.Join(context, p) + } + + return encPath, nil +} diff --git a/sdk/helper/keysutil/encrypted_key_storage_test.go b/sdk/helper/keysutil/encrypted_key_storage_test.go new file mode 100644 index 0000000..5147027 --- /dev/null +++ b/sdk/helper/keysutil/encrypted_key_storage_test.go @@ -0,0 +1,325 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package keysutil + +import ( + "context" + "crypto/rand" + "fmt" + "reflect" + "testing" + + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/sdk/logical" +) + +var compilerOpt []string + +func TestEncrytedKeysStorage_BadPolicy(t *testing.T) { + policy := NewPolicy(PolicyConfig{ + Name: "metadata", + Type: KeyType_AES256_GCM96, + Derived: false, + KDF: Kdf_hkdf_sha256, + ConvergentEncryption: true, + VersionTemplate: EncryptedKeyPolicyVersionTpl, + }) + + _, err := NewEncryptedKeyStorageWrapper(EncryptedKeyStorageConfig{ + Policy: policy, + Prefix: "prefix", + }) + if err != ErrPolicyDerivedKeys { + t.Fatalf("Unexpected Error: %s", err) + } + + policy = NewPolicy(PolicyConfig{ + Name: "metadata", + Type: KeyType_AES256_GCM96, + Derived: true, + KDF: Kdf_hkdf_sha256, + ConvergentEncryption: false, + VersionTemplate: EncryptedKeyPolicyVersionTpl, + }) + + _, err = NewEncryptedKeyStorageWrapper(EncryptedKeyStorageConfig{ + Policy: policy, + Prefix: "prefix", + }) + if err != ErrPolicyConvergentEncryption { + t.Fatalf("Unexpected Error: %s", err) + } + + policy = NewPolicy(PolicyConfig{ + Name: "metadata", + Type: KeyType_AES256_GCM96, + Derived: true, + KDF: Kdf_hkdf_sha256, + ConvergentEncryption: true, + VersionTemplate: EncryptedKeyPolicyVersionTpl, + }) + _, err = NewEncryptedKeyStorageWrapper(EncryptedKeyStorageConfig{ + Policy: policy, + Prefix: "prefix", + }) + if err != nil { + t.Fatalf("Unexpected Error: %s", err) + } +} + +func TestEncryptedKeysStorage_List(t *testing.T) { + s := &logical.InmemStorage{} + policy := NewPolicy(PolicyConfig{ + Name: "metadata", + Type: KeyType_AES256_GCM96, + Derived: true, + KDF: Kdf_hkdf_sha256, + ConvergentEncryption: true, + VersionTemplate: EncryptedKeyPolicyVersionTpl, + }) + + ctx := context.Background() + + err := policy.Rotate(ctx, s, rand.Reader) + if err != nil { + t.Fatal(err) + } + + es, err := NewEncryptedKeyStorageWrapper(EncryptedKeyStorageConfig{ + Policy: policy, + Prefix: "prefix", + }) + if err != nil { + t.Fatal(err) + } + + err = es.Wrap(s).Put(ctx, &logical.StorageEntry{ + Key: "test", + Value: []byte("test"), + }) + if err != nil { + t.Fatal(err) + } + + err = es.Wrap(s).Put(ctx, &logical.StorageEntry{ + Key: "test/foo", + Value: []byte("test"), + }) + if err != nil { + t.Fatal(err) + } + + err = es.Wrap(s).Put(ctx, &logical.StorageEntry{ + Key: "test/foo1/test", + Value: []byte("test"), + }) + if err != nil { + t.Fatal(err) + } + + keys, err := es.Wrap(s).List(ctx, "test/") + if err != nil { + t.Fatal(err) + } + + // Test prefixed with "/" + keys, err = es.Wrap(s).List(ctx, "/test/") + if err != nil { + t.Fatal(err) + } + + if len(keys) != 2 || keys[1] != "foo1/" || keys[0] != "foo" { + t.Fatalf("bad keys: %#v", keys) + } + + keys, err = es.Wrap(s).List(ctx, "/") + if err != nil { + t.Fatal(err) + } + if len(keys) != 2 || keys[0] != "test" || keys[1] != "test/" { + t.Fatalf("bad keys: %#v", keys) + } + + keys, err = es.Wrap(s).List(ctx, "") + if err != nil { + t.Fatal(err) + } + if len(keys) != 2 || keys[0] != "test" || keys[1] != "test/" { + t.Fatalf("bad keys: %#v", keys) + } +} + +func TestEncryptedKeysStorage_CRUD(t *testing.T) { + s := &logical.InmemStorage{} + policy := NewPolicy(PolicyConfig{ + Name: "metadata", + Type: KeyType_AES256_GCM96, + Derived: true, + KDF: Kdf_hkdf_sha256, + ConvergentEncryption: true, + VersionTemplate: EncryptedKeyPolicyVersionTpl, + }) + + ctx := context.Background() + + err := policy.Rotate(ctx, s, rand.Reader) + if err != nil { + t.Fatal(err) + } + + es, err := NewEncryptedKeyStorageWrapper(EncryptedKeyStorageConfig{ + Policy: policy, + Prefix: "prefix", + }) + if err != nil { + t.Fatal(err) + } + + err = es.Wrap(s).Put(ctx, &logical.StorageEntry{ + Key: "test/foo", + Value: []byte("test"), + }) + if err != nil { + t.Fatal(err) + } + + err = es.Wrap(s).Put(ctx, &logical.StorageEntry{ + Key: "test/foo1/test", + Value: []byte("test"), + }) + if err != nil { + t.Fatal(err) + } + + keys, err := es.Wrap(s).List(ctx, "test/") + if err != nil { + t.Fatal(err) + } + + // Test prefixed with "/" + keys, err = es.Wrap(s).List(ctx, "/test/") + if err != nil { + t.Fatal(err) + } + + if len(keys) != 2 || !strutil.StrListContains(keys, "foo1/") || !strutil.StrListContains(keys, "foo") { + t.Fatalf("bad keys: %#v", keys) + } + + // Test the cached value is correct + keys, err = es.Wrap(s).List(ctx, "test/") + if err != nil { + t.Fatal(err) + } + + if len(keys) != 2 || !strutil.StrListContains(keys, "foo1/") || !strutil.StrListContains(keys, "foo") { + t.Fatalf("bad keys: %#v", keys) + } + + data, err := es.Wrap(s).Get(ctx, "test/foo") + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(data.Value, []byte("test")) { + t.Fatalf("bad data: %#v", data) + } + + err = es.Wrap(s).Delete(ctx, "test/foo") + if err != nil { + t.Fatal(err) + } + + data, err = es.Wrap(s).Get(ctx, "test/foo") + if err != nil { + t.Fatal(err) + } + if data != nil { + t.Fatal("data should be nil") + } +} + +func BenchmarkEncrytedKeyStorage_List(b *testing.B) { + s := &logical.InmemStorage{} + policy := NewPolicy(PolicyConfig{ + Name: "metadata", + Type: KeyType_AES256_GCM96, + Derived: true, + KDF: Kdf_hkdf_sha256, + ConvergentEncryption: true, + VersionTemplate: EncryptedKeyPolicyVersionTpl, + }) + + ctx := context.Background() + + err := policy.Rotate(ctx, s, rand.Reader) + if err != nil { + b.Fatal(err) + } + + es, err := NewEncryptedKeyStorageWrapper(EncryptedKeyStorageConfig{ + Policy: policy, + Prefix: "prefix", + }) + if err != nil { + b.Fatal(err) + } + + for i := 0; i < 10000; i++ { + err = es.Wrap(s).Put(ctx, &logical.StorageEntry{ + Key: fmt.Sprintf("test/%d", i), + Value: []byte("test"), + }) + if err != nil { + b.Fatal(err) + } + } + b.ResetTimer() + + for i := 0; i < b.N; i++ { + keys, err := es.Wrap(s).List(ctx, "test/") + if err != nil { + b.Fatal(err) + } + compilerOpt = keys + } +} + +func BenchmarkEncrytedKeyStorage_Put(b *testing.B) { + s := &logical.InmemStorage{} + policy := NewPolicy(PolicyConfig{ + Name: "metadata", + Type: KeyType_AES256_GCM96, + Derived: true, + KDF: Kdf_hkdf_sha256, + ConvergentEncryption: true, + VersionTemplate: EncryptedKeyPolicyVersionTpl, + }) + + ctx := context.Background() + + err := policy.Rotate(ctx, s, rand.Reader) + if err != nil { + b.Fatal(err) + } + + es, err := NewEncryptedKeyStorageWrapper(EncryptedKeyStorageConfig{ + Policy: policy, + Prefix: "prefix", + }) + if err != nil { + b.Fatal(err) + } + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + err = es.Wrap(s).Put(ctx, &logical.StorageEntry{ + Key: fmt.Sprintf("test/%d", i), + Value: []byte("test"), + }) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/sdk/helper/keysutil/lock_manager.go b/sdk/helper/keysutil/lock_manager.go new file mode 100644 index 0000000..6d2881e --- /dev/null +++ b/sdk/helper/keysutil/lock_manager.go @@ -0,0 +1,586 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package keysutil + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "sync" + "sync/atomic" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/locksutil" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + shared = false + exclusive = true + currentConvergentVersion = 3 +) + +var errNeedExclusiveLock = errors.New("an exclusive lock is needed for this operation") + +// PolicyRequest holds values used when requesting a policy. Most values are +// only used during an upsert. +type PolicyRequest struct { + // The storage to use + Storage logical.Storage + + // The name of the policy + Name string + + // The key type + KeyType KeyType + + // The key size for variable key size algorithms + KeySize int + + // Whether it should be derived + Derived bool + + // Whether to enable convergent encryption + Convergent bool + + // Whether to allow export + Exportable bool + + // Whether to upsert + Upsert bool + + // Whether to allow plaintext backup + AllowPlaintextBackup bool + + // How frequently the key should automatically rotate + AutoRotatePeriod time.Duration + + // AllowImportedKeyRotation indicates whether an imported key may be rotated by Vault + AllowImportedKeyRotation bool + + // Indicates whether a private or public key is imported/upserted + IsPrivateKey bool + + // The UUID of the managed key, if using one + ManagedKeyUUID string +} + +type LockManager struct { + useCache bool + cache Cache + keyLocks []*locksutil.LockEntry +} + +func NewLockManager(useCache bool, cacheSize int) (*LockManager, error) { + // determine the type of cache to create + var cache Cache + switch { + case !useCache: + case cacheSize < 0: + return nil, errors.New("cache size must be greater or equal to zero") + case cacheSize == 0: + cache = NewTransitSyncMap() + case cacheSize > 0: + newLRUCache, err := NewTransitLRU(cacheSize) + if err != nil { + return nil, errwrap.Wrapf("failed to create cache: {{err}}", err) + } + cache = newLRUCache + } + + lm := &LockManager{ + useCache: useCache, + cache: cache, + keyLocks: locksutil.CreateLocks(), + } + + return lm, nil +} + +func (lm *LockManager) GetCacheSize() int { + if !lm.useCache { + return 0 + } + return lm.cache.Size() +} + +func (lm *LockManager) GetUseCache() bool { + return lm.useCache +} + +func (lm *LockManager) InvalidatePolicy(name string) { + if lm.useCache { + lm.cache.Delete(name) + } +} + +func (lm *LockManager) InitCache(cacheSize int) error { + if lm.useCache { + switch { + case cacheSize < 0: + return errors.New("cache size must be greater or equal to zero") + case cacheSize == 0: + lm.cache = NewTransitSyncMap() + case cacheSize > 0: + newLRUCache, err := NewTransitLRU(cacheSize) + if err != nil { + return errwrap.Wrapf("failed to create cache: {{err}}", err) + } + lm.cache = newLRUCache + } + } + return nil +} + +// RestorePolicy acquires an exclusive lock on the policy name and restores the +// given policy along with the archive. +func (lm *LockManager) RestorePolicy(ctx context.Context, storage logical.Storage, name, backup string, force bool) error { + backupBytes, err := base64.StdEncoding.DecodeString(backup) + if err != nil { + return err + } + + var keyData KeyData + err = jsonutil.DecodeJSON(backupBytes, &keyData) + if err != nil { + return err + } + + // Set a different name if desired + if name != "" { + keyData.Policy.Name = name + } + + name = keyData.Policy.Name + + // Grab the exclusive lock as we'll be modifying disk + lock := locksutil.LockForKey(lm.keyLocks, name) + lock.Lock() + defer lock.Unlock() + + var ok bool + var pRaw interface{} + + // If the policy is in cache and 'force' is not specified, error out. Anywhere + // that would put it in the cache will also be protected by the mutex above, + // so we don't need to re-check the cache later. + if lm.useCache { + pRaw, ok = lm.cache.Load(name) + if ok && !force { + return fmt.Errorf("key %q already exists", name) + } + } + + // Conditionally look up the policy from storage, depending on the use of + // 'force' and if the policy was found in cache. + // + // - If was not found in cache and we are not using 'force', look for it in + // storage. If found, error out. + // + // - If it was found in cache and we are using 'force', pRaw will not be nil + // and we do not look the policy up from storage + // + // - If it was found in cache and we are not using 'force', we should have + // returned above with error + var p *Policy + if pRaw == nil { + p, err = lm.getPolicyFromStorage(ctx, storage, name) + if err != nil { + return err + } + if p != nil && !force { + return fmt.Errorf("key %q already exists", name) + } + } + + // If both pRaw and p above are nil and 'force' is specified, we don't need to + // grab policy locks as we have ensured it doesn't already exist, so there + // will be no races as nothing else has this pointer. If 'force' was not used, + // an error would have been returned by now if the policy already existed + if pRaw != nil { + p = pRaw.(*Policy) + } + if p != nil { + p.l.Lock() + defer p.l.Unlock() + } + + // Restore the archived keys + if keyData.ArchivedKeys != nil { + err = keyData.Policy.storeArchive(ctx, storage, keyData.ArchivedKeys) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("failed to restore archived keys for key %q: {{err}}", name), err) + } + } + + // Mark that policy as a restored key + keyData.Policy.RestoreInfo = &RestoreInfo{ + Time: time.Now(), + Version: keyData.Policy.LatestVersion, + } + + // Restore the policy. This will also attempt to adjust the archive. + err = keyData.Policy.Persist(ctx, storage) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("failed to restore the policy %q: {{err}}", name), err) + } + + keyData.Policy.l = new(sync.RWMutex) + + // Update the cache to contain the restored policy + if lm.useCache { + lm.cache.Store(name, keyData.Policy) + } + return nil +} + +func (lm *LockManager) BackupPolicy(ctx context.Context, storage logical.Storage, name string) (string, error) { + var p *Policy + var err error + + // Backup writes information about when the backup took place, so we get an + // exclusive lock here + lock := locksutil.LockForKey(lm.keyLocks, name) + lock.Lock() + defer lock.Unlock() + + var ok bool + var pRaw interface{} + + if lm.useCache { + pRaw, ok = lm.cache.Load(name) + } + if ok { + p = pRaw.(*Policy) + p.l.Lock() + defer p.l.Unlock() + } else { + // If the policy doesn't exit in storage, error out + p, err = lm.getPolicyFromStorage(ctx, storage, name) + if err != nil { + return "", err + } + if p == nil { + return "", fmt.Errorf(fmt.Sprintf("key %q not found", name)) + } + } + + if atomic.LoadUint32(&p.deleted) == 1 { + return "", fmt.Errorf(fmt.Sprintf("key %q not found", name)) + } + + backup, err := p.Backup(ctx, storage) + if err != nil { + return "", err + } + + return backup, nil +} + +// When the function returns, if caching was disabled, the Policy's lock must +// be unlocked when the caller is done (and it should not be re-locked). +func (lm *LockManager) GetPolicy(ctx context.Context, req PolicyRequest, rand io.Reader) (retP *Policy, retUpserted bool, retErr error) { + var p *Policy + var err error + var ok bool + var pRaw interface{} + + // Check if it's in our cache. If so, return right away. + if lm.useCache { + pRaw, ok = lm.cache.Load(req.Name) + } + if ok { + p = pRaw.(*Policy) + if atomic.LoadUint32(&p.deleted) == 1 { + return nil, false, nil + } + return p, false, nil + } + + // We're not using the cache, or it wasn't found; get an exclusive lock. + // This ensures that any other process writing the actual storage will be + // finished before we load from storage. + lock := locksutil.LockForKey(lm.keyLocks, req.Name) + lock.Lock() + + // If we are using the cache, defer the lock unlock; otherwise we will + // return from here with the lock still held. + cleanup := func() { + switch { + // If using the cache we always unlock, the caller locks the policy + // themselves + case lm.useCache: + lock.Unlock() + + // If not using the cache, if we aren't returning a policy the caller + // doesn't have a lock, so we must unlock + case retP == nil: + lock.Unlock() + } + } + + // Check the cache again + if lm.useCache { + pRaw, ok = lm.cache.Load(req.Name) + } + if ok { + p = pRaw.(*Policy) + if atomic.LoadUint32(&p.deleted) == 1 { + cleanup() + return nil, false, nil + } + retP = p + cleanup() + return + } + + // Load it from storage + p, err = lm.getPolicyFromStorage(ctx, req.Storage, req.Name) + if err != nil { + cleanup() + return nil, false, err + } + // We don't need to lock the policy as there would be no other holders of + // the pointer + + if p == nil { + // This is the only place we upsert a new policy, so if upsert is not + // specified, or the lock type is wrong, unlock before returning + if !req.Upsert { + cleanup() + return nil, false, nil + } + + // We create the policy here, then at the end we do a LoadOrStore. If + // it's been loaded since we last checked the cache, we return an error + // to the user to let them know that their request can't be satisfied + // because we don't know if the parameters match. + + switch req.KeyType { + case KeyType_AES128_GCM96, KeyType_AES256_GCM96, KeyType_ChaCha20_Poly1305: + if req.Convergent && !req.Derived { + cleanup() + return nil, false, fmt.Errorf("convergent encryption requires derivation to be enabled") + } + + case KeyType_ECDSA_P256, KeyType_ECDSA_P384, KeyType_ECDSA_P521: + if req.Derived || req.Convergent { + cleanup() + return nil, false, fmt.Errorf("key derivation and convergent encryption not supported for keys of type %v", req.KeyType) + } + + case KeyType_ED25519: + if req.Convergent { + cleanup() + return nil, false, fmt.Errorf("convergent encryption not supported for keys of type %v", req.KeyType) + } + + case KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096: + if req.Derived || req.Convergent { + cleanup() + return nil, false, fmt.Errorf("key derivation and convergent encryption not supported for keys of type %v", req.KeyType) + } + case KeyType_HMAC: + if req.Derived || req.Convergent { + cleanup() + return nil, false, fmt.Errorf("key derivation and convergent encryption not supported for keys of type %v", req.KeyType) + } + + case KeyType_MANAGED_KEY: + if req.Derived || req.Convergent { + cleanup() + return nil, false, fmt.Errorf("key derivation and convergent encryption not supported for keys of type %v", req.KeyType) + } + + default: + cleanup() + return nil, false, fmt.Errorf("unsupported key type %v", req.KeyType) + } + + p = &Policy{ + l: new(sync.RWMutex), + Name: req.Name, + Type: req.KeyType, + Derived: req.Derived, + Exportable: req.Exportable, + AllowPlaintextBackup: req.AllowPlaintextBackup, + AutoRotatePeriod: req.AutoRotatePeriod, + KeySize: req.KeySize, + } + + if req.Derived { + p.KDF = Kdf_hkdf_sha256 + if req.Convergent { + p.ConvergentEncryption = true + // As of version 3 we store the version within each key, so we + // set to -1 to indicate that the value in the policy has no + // meaning. We still, for backwards compatibility, fall back to + // this value if the key doesn't have one, which means it will + // only be -1 in the case where every key version is >= 3 + p.ConvergentVersion = -1 + } + } + + // Performs the actual persist and does setup + if p.Type == KeyType_MANAGED_KEY { + err = p.RotateManagedKey(ctx, req.Storage, req.ManagedKeyUUID) + } else { + err = p.Rotate(ctx, req.Storage, rand) + } + if err != nil { + cleanup() + return nil, false, err + } + + if lm.useCache { + lm.cache.Store(req.Name, p) + } else { + p.l = &lock.RWMutex + p.writeLocked = true + } + + // We don't need to worry about upgrading since it will be a new policy + retP = p + retUpserted = true + cleanup() + return + } + + if p.NeedsUpgrade() { + if err := p.Upgrade(ctx, req.Storage, rand); err != nil { + cleanup() + return nil, false, err + } + } + + if lm.useCache { + lm.cache.Store(req.Name, p) + } else { + p.l = &lock.RWMutex + p.writeLocked = true + } + + retP = p + cleanup() + return +} + +func (lm *LockManager) ImportPolicy(ctx context.Context, req PolicyRequest, key []byte, rand io.Reader) error { + var p *Policy + var err error + var ok bool + var pRaw interface{} + + // Check if it's in our cache + if lm.useCache { + pRaw, ok = lm.cache.Load(req.Name) + } + if ok { + p = pRaw.(*Policy) + if atomic.LoadUint32(&p.deleted) == 1 { + return nil + } + } + + // We're not using the cache, or it wasn't found; get an exclusive lock. + // This ensures that any other process writing the actual storage will be + // finished before we load from storage. + lock := locksutil.LockForKey(lm.keyLocks, req.Name) + lock.Lock() + defer lock.Unlock() + + // Load it from storage + p, err = lm.getPolicyFromStorage(ctx, req.Storage, req.Name) + if err != nil { + return err + } + + if p == nil { + p = &Policy{ + l: new(sync.RWMutex), + Name: req.Name, + Type: req.KeyType, + Derived: req.Derived, + Exportable: req.Exportable, + AllowPlaintextBackup: req.AllowPlaintextBackup, + AutoRotatePeriod: req.AutoRotatePeriod, + AllowImportedKeyRotation: req.AllowImportedKeyRotation, + Imported: true, + } + } + + err = p.ImportPublicOrPrivate(ctx, req.Storage, key, req.IsPrivateKey, rand) + if err != nil { + return fmt.Errorf("error importing key: %s", err) + } + + if lm.useCache { + lm.cache.Store(req.Name, p) + } + + return nil +} + +func (lm *LockManager) DeletePolicy(ctx context.Context, storage logical.Storage, name string) error { + var p *Policy + var err error + var ok bool + var pRaw interface{} + + // We may be writing to disk, so grab an exclusive lock. This prevents bad + // behavior when the cache is turned off. We also lock the shared policy + // object to make sure no requests are in flight. + lock := locksutil.LockForKey(lm.keyLocks, name) + lock.Lock() + defer lock.Unlock() + + if lm.useCache { + pRaw, ok = lm.cache.Load(name) + } + if ok { + p = pRaw.(*Policy) + p.l.Lock() + defer p.l.Unlock() + } + + if p == nil { + p, err = lm.getPolicyFromStorage(ctx, storage, name) + if err != nil { + return err + } + if p == nil { + return fmt.Errorf("could not delete key; not found") + } + } + + if !p.DeletionAllowed { + return fmt.Errorf("deletion is not allowed for this key") + } + + atomic.StoreUint32(&p.deleted, 1) + + if lm.useCache { + lm.cache.Delete(name) + } + + err = storage.Delete(ctx, "policy/"+name) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("error deleting key %q: {{err}}", name), err) + } + + err = storage.Delete(ctx, "archive/"+name) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("error deleting key %q archive: {{err}}", name), err) + } + + return nil +} + +func (lm *LockManager) getPolicyFromStorage(ctx context.Context, storage logical.Storage, name string) (*Policy, error) { + return LoadPolicy(ctx, storage, "policy/"+name) +} diff --git a/sdk/helper/keysutil/managed_key_util.go b/sdk/helper/keysutil/managed_key_util.go new file mode 100644 index 0000000..bb3c0b2 --- /dev/null +++ b/sdk/helper/keysutil/managed_key_util.go @@ -0,0 +1,45 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build !enterprise + +package keysutil + +import ( + "context" + "errors" + + "github.com/hashicorp/vault/sdk/logical" +) + +type ManagedKeyParameters struct { + ManagedKeySystemView logical.ManagedKeySystemView + BackendUUID string + Context context.Context +} + +var errEntOnly = errors.New("managed keys are supported within enterprise edition only") + +func (p *Policy) decryptWithManagedKey(params ManagedKeyParameters, keyEntry KeyEntry, ciphertext []byte, nonce []byte, aad []byte) (plaintext []byte, err error) { + return nil, errEntOnly +} + +func (p *Policy) encryptWithManagedKey(params ManagedKeyParameters, keyEntry KeyEntry, plaintext []byte, nonce []byte, aad []byte) (ciphertext []byte, err error) { + return nil, errEntOnly +} + +func (p *Policy) signWithManagedKey(options *SigningOptions, keyEntry KeyEntry, input []byte) (sig []byte, err error) { + return nil, errEntOnly +} + +func (p *Policy) verifyWithManagedKey(options *SigningOptions, keyEntry KeyEntry, input, sig []byte) (verified bool, err error) { + return false, errEntOnly +} + +func (p *Policy) HMACWithManagedKey(ctx context.Context, ver int, managedKeySystemView logical.ManagedKeySystemView, backendUUID string, algorithm string, data []byte) (hmacBytes []byte, err error) { + return nil, errEntOnly +} + +func (p *Policy) RotateManagedKey(ctx context.Context, storage logical.Storage, managedKeyUUID string) error { + return errEntOnly +} diff --git a/sdk/helper/keysutil/policy.go b/sdk/helper/keysutil/policy.go new file mode 100644 index 0000000..6d1ad28 --- /dev/null +++ b/sdk/helper/keysutil/policy.go @@ -0,0 +1,2408 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package keysutil + +import ( + "bytes" + "context" + "crypto" + "crypto/aes" + "crypto/cipher" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/hmac" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/asn1" + "encoding/base64" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "hash" + "io" + "math/big" + "path" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "golang.org/x/crypto/chacha20poly1305" + "golang.org/x/crypto/ed25519" + "golang.org/x/crypto/hkdf" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/kdf" + "github.com/hashicorp/vault/sdk/logical" + + "github.com/google/tink/go/kwp/subtle" +) + +// Careful with iota; don't put anything before it in this const block because +// we need the default of zero to be the old-style KDF +const ( + Kdf_hmac_sha256_counter = iota // built-in helper + Kdf_hkdf_sha256 // golang.org/x/crypto/hkdf + + HmacMinKeySize = 256 / 8 + HmacMaxKeySize = 4096 / 8 +) + +// Or this one...we need the default of zero to be the original AES256-GCM96 +const ( + KeyType_AES256_GCM96 = iota + KeyType_ECDSA_P256 + KeyType_ED25519 + KeyType_RSA2048 + KeyType_RSA4096 + KeyType_ChaCha20_Poly1305 + KeyType_ECDSA_P384 + KeyType_ECDSA_P521 + KeyType_AES128_GCM96 + KeyType_RSA3072 + KeyType_MANAGED_KEY + KeyType_HMAC +) + +const ( + // ErrTooOld is returned whtn the ciphertext or signatures's key version is + // too old. + ErrTooOld = "ciphertext or signature version is disallowed by policy (too old)" + + // DefaultVersionTemplate is used when no version template is provided. + DefaultVersionTemplate = "vault:v{{version}}:" +) + +type AEADFactory interface { + GetAEAD(iv []byte) (cipher.AEAD, error) +} + +type AssociatedDataFactory interface { + GetAssociatedData() ([]byte, error) +} + +type ManagedKeyFactory interface { + GetManagedKeyParameters() ManagedKeyParameters +} + +type RestoreInfo struct { + Time time.Time `json:"time"` + Version int `json:"version"` +} + +type BackupInfo struct { + Time time.Time `json:"time"` + Version int `json:"version"` +} + +type SigningOptions struct { + HashAlgorithm HashType + Marshaling MarshalingType + SaltLength int + SigAlgorithm string + ManagedKeyParams ManagedKeyParameters +} + +type SigningResult struct { + Signature string + PublicKey []byte +} + +type ecdsaSignature struct { + R, S *big.Int +} + +type KeyType int + +func (kt KeyType) EncryptionSupported() bool { + switch kt { + case KeyType_AES128_GCM96, KeyType_AES256_GCM96, KeyType_ChaCha20_Poly1305, KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096, KeyType_MANAGED_KEY: + return true + } + return false +} + +func (kt KeyType) DecryptionSupported() bool { + switch kt { + case KeyType_AES128_GCM96, KeyType_AES256_GCM96, KeyType_ChaCha20_Poly1305, KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096, KeyType_MANAGED_KEY: + return true + } + return false +} + +func (kt KeyType) SigningSupported() bool { + switch kt { + case KeyType_ECDSA_P256, KeyType_ECDSA_P384, KeyType_ECDSA_P521, KeyType_ED25519, KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096, KeyType_MANAGED_KEY: + return true + } + return false +} + +func (kt KeyType) HashSignatureInput() bool { + switch kt { + case KeyType_ECDSA_P256, KeyType_ECDSA_P384, KeyType_ECDSA_P521, KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096, KeyType_MANAGED_KEY: + return true + } + return false +} + +func (kt KeyType) DerivationSupported() bool { + switch kt { + case KeyType_AES128_GCM96, KeyType_AES256_GCM96, KeyType_ChaCha20_Poly1305, KeyType_ED25519: + return true + } + return false +} + +func (kt KeyType) AssociatedDataSupported() bool { + switch kt { + case KeyType_AES128_GCM96, KeyType_AES256_GCM96, KeyType_ChaCha20_Poly1305, KeyType_MANAGED_KEY: + return true + } + return false +} + +func (kt KeyType) ImportPublicKeySupported() bool { + switch kt { + case KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096, KeyType_ECDSA_P256, KeyType_ECDSA_P384, KeyType_ECDSA_P521, KeyType_ED25519: + return true + } + return false +} + +func (kt KeyType) String() string { + switch kt { + case KeyType_AES128_GCM96: + return "aes128-gcm96" + case KeyType_AES256_GCM96: + return "aes256-gcm96" + case KeyType_ChaCha20_Poly1305: + return "chacha20-poly1305" + case KeyType_ECDSA_P256: + return "ecdsa-p256" + case KeyType_ECDSA_P384: + return "ecdsa-p384" + case KeyType_ECDSA_P521: + return "ecdsa-p521" + case KeyType_ED25519: + return "ed25519" + case KeyType_RSA2048: + return "rsa-2048" + case KeyType_RSA3072: + return "rsa-3072" + case KeyType_RSA4096: + return "rsa-4096" + case KeyType_HMAC: + return "hmac" + case KeyType_MANAGED_KEY: + return "managed_key" + } + + return "[unknown]" +} + +type KeyData struct { + Policy *Policy `json:"policy"` + ArchivedKeys *archivedKeys `json:"archived_keys"` +} + +// KeyEntry stores the key and metadata +type KeyEntry struct { + // AES or some other kind that is a pure byte slice like ED25519 + Key []byte `json:"key"` + + // Key used for HMAC functions + HMACKey []byte `json:"hmac_key"` + + // Time of creation + CreationTime time.Time `json:"time"` + + EC_X *big.Int `json:"ec_x"` + EC_Y *big.Int `json:"ec_y"` + EC_D *big.Int `json:"ec_d"` + + RSAKey *rsa.PrivateKey `json:"rsa_key"` + RSAPublicKey *rsa.PublicKey `json:"rsa_public_key"` + + // The public key in an appropriate format for the type of key + FormattedPublicKey string `json:"public_key"` + + // If convergent is enabled, the version (falling back to what's in the + // policy) + ConvergentVersion int `json:"convergent_version"` + + // This is deprecated (but still filled) in favor of the value above which + // is more precise + DeprecatedCreationTime int64 `json:"creation_time"` + + ManagedKeyUUID string `json:"managed_key_id,omitempty"` +} + +func (ke *KeyEntry) IsPrivateKeyMissing() bool { + if ke.RSAKey != nil || ke.EC_D != nil || len(ke.Key) != 0 || len(ke.ManagedKeyUUID) != 0 { + return false + } + + return true +} + +// deprecatedKeyEntryMap is used to allow JSON marshal/unmarshal +type deprecatedKeyEntryMap map[int]KeyEntry + +// MarshalJSON implements JSON marshaling +func (kem deprecatedKeyEntryMap) MarshalJSON() ([]byte, error) { + intermediate := map[string]KeyEntry{} + for k, v := range kem { + intermediate[strconv.Itoa(k)] = v + } + return json.Marshal(&intermediate) +} + +// MarshalJSON implements JSON unmarshalling +func (kem deprecatedKeyEntryMap) UnmarshalJSON(data []byte) error { + intermediate := map[string]KeyEntry{} + if err := jsonutil.DecodeJSON(data, &intermediate); err != nil { + return err + } + for k, v := range intermediate { + keyval, err := strconv.Atoi(k) + if err != nil { + return err + } + kem[keyval] = v + } + + return nil +} + +// keyEntryMap is used to allow JSON marshal/unmarshal +type keyEntryMap map[string]KeyEntry + +// PolicyConfig is used to create a new policy +type PolicyConfig struct { + // The name of the policy + Name string `json:"name"` + + // The type of key + Type KeyType + + // Derived keys MUST provide a context and the master underlying key is + // never used. + Derived bool + KDF int + ConvergentEncryption bool + + // Whether the key is exportable + Exportable bool + + // Whether the key is allowed to be deleted + DeletionAllowed bool + + // AllowPlaintextBackup allows taking backup of the policy in plaintext + AllowPlaintextBackup bool + + // VersionTemplate is used to prefix the ciphertext with information about + // the key version. It must inclide {{version}} and a delimiter between the + // version prefix and the ciphertext. + VersionTemplate string + + // StoragePrefix is used to add a prefix when storing and retrieving the + // policy object. + StoragePrefix string +} + +// NewPolicy takes a policy config and returns a Policy with those settings. +func NewPolicy(config PolicyConfig) *Policy { + return &Policy{ + l: new(sync.RWMutex), + Name: config.Name, + Type: config.Type, + Derived: config.Derived, + KDF: config.KDF, + ConvergentEncryption: config.ConvergentEncryption, + ConvergentVersion: -1, + Exportable: config.Exportable, + DeletionAllowed: config.DeletionAllowed, + AllowPlaintextBackup: config.AllowPlaintextBackup, + VersionTemplate: config.VersionTemplate, + StoragePrefix: config.StoragePrefix, + } +} + +// LoadPolicy will load a policy from the provided storage path and set the +// necessary un-exported variables. It is particularly useful when accessing a +// policy without the lock manager. +func LoadPolicy(ctx context.Context, s logical.Storage, path string) (*Policy, error) { + raw, err := s.Get(ctx, path) + if err != nil { + return nil, err + } + if raw == nil { + return nil, nil + } + + var policy Policy + err = jsonutil.DecodeJSON(raw.Value, &policy) + if err != nil { + return nil, err + } + + // Migrate RSA private keys to include their private counterpart. This lets + // us reference RSAPublicKey whenever we need to, without necessarily + // needing the private key handy, synchronizing the behavior with EC and + // Ed25519 key pairs. + switch policy.Type { + case KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096: + for _, entry := range policy.Keys { + if entry.RSAPublicKey == nil && entry.RSAKey != nil { + entry.RSAPublicKey = entry.RSAKey.Public().(*rsa.PublicKey) + } + } + } + + policy.l = new(sync.RWMutex) + + return &policy, nil +} + +// Policy is the struct used to store metadata +type Policy struct { + // This is a pointer on purpose: if we are running with cache disabled we + // need to actually swap in the lock manager's lock for this policy with + // the local lock. + l *sync.RWMutex + // writeLocked allows us to implement Lock() and Unlock() + writeLocked bool + // Stores whether it's been deleted. This acts as a guard for operations + // that may write data, e.g. if one request rotates and that request is + // served after a delete. + deleted uint32 + + Name string `json:"name"` + Key []byte `json:"key,omitempty"` // DEPRECATED + KeySize int `json:"key_size,omitempty"` // For algorithms with variable key sizes + Keys keyEntryMap `json:"keys"` + + // Derived keys MUST provide a context and the master underlying key is + // never used. If convergent encryption is true, the context will be used + // as the nonce as well. + Derived bool `json:"derived"` + KDF int `json:"kdf"` + ConvergentEncryption bool `json:"convergent_encryption"` + + // Whether the key is exportable + Exportable bool `json:"exportable"` + + // The minimum version of the key allowed to be used for decryption + MinDecryptionVersion int `json:"min_decryption_version"` + + // The minimum version of the key allowed to be used for encryption + MinEncryptionVersion int `json:"min_encryption_version"` + + // The latest key version in this policy + LatestVersion int `json:"latest_version"` + + // The latest key version in the archive. We never delete these, so this is + // a max. + ArchiveVersion int `json:"archive_version"` + + // ArchiveMinVersion is the minimum version of the key in the archive. + ArchiveMinVersion int `json:"archive_min_version"` + + // MinAvailableVersion is the minimum version of the key present. All key + // versions before this would have been deleted. + MinAvailableVersion int `json:"min_available_version"` + + // Whether the key is allowed to be deleted + DeletionAllowed bool `json:"deletion_allowed"` + + // The version of the convergent nonce to use + ConvergentVersion int `json:"convergent_version"` + + // The type of key + Type KeyType `json:"type"` + + // BackupInfo indicates the information about the backup action taken on + // this policy + BackupInfo *BackupInfo `json:"backup_info"` + + // RestoreInfo indicates the information about the restore action taken on + // this policy + RestoreInfo *RestoreInfo `json:"restore_info"` + + // AllowPlaintextBackup allows taking backup of the policy in plaintext + AllowPlaintextBackup bool `json:"allow_plaintext_backup"` + + // VersionTemplate is used to prefix the ciphertext with information about + // the key version. It must inclide {{version}} and a delimiter between the + // version prefix and the ciphertext. + VersionTemplate string `json:"version_template"` + + // StoragePrefix is used to add a prefix when storing and retrieving the + // policy object. + StoragePrefix string `json:"storage_prefix"` + + // AutoRotatePeriod defines how frequently the key should automatically + // rotate. Setting this to zero disables automatic rotation for the key. + AutoRotatePeriod time.Duration `json:"auto_rotate_period"` + + // versionPrefixCache stores caches of version prefix strings and the split + // version template. + versionPrefixCache sync.Map + + // Imported indicates whether the key was generated by Vault or imported + // from an external source + Imported bool + + // AllowImportedKeyRotation indicates whether an imported key may be rotated by Vault + AllowImportedKeyRotation bool +} + +func (p *Policy) Lock(exclusive bool) { + if exclusive { + p.l.Lock() + p.writeLocked = true + } else { + p.l.RLock() + } +} + +func (p *Policy) Unlock() { + if p.writeLocked { + p.writeLocked = false + p.l.Unlock() + } else { + p.l.RUnlock() + } +} + +// ArchivedKeys stores old keys. This is used to keep the key loading time sane +// when there are huge numbers of rotations. +type archivedKeys struct { + Keys []KeyEntry `json:"keys"` +} + +func (p *Policy) LoadArchive(ctx context.Context, storage logical.Storage) (*archivedKeys, error) { + archive := &archivedKeys{} + + raw, err := storage.Get(ctx, path.Join(p.StoragePrefix, "archive", p.Name)) + if err != nil { + return nil, err + } + if raw == nil { + archive.Keys = make([]KeyEntry, 0) + return archive, nil + } + + if err := jsonutil.DecodeJSON(raw.Value, archive); err != nil { + return nil, err + } + + return archive, nil +} + +func (p *Policy) storeArchive(ctx context.Context, storage logical.Storage, archive *archivedKeys) error { + // Encode the policy + buf, err := json.Marshal(archive) + if err != nil { + return err + } + + // Write the policy into storage + err = storage.Put(ctx, &logical.StorageEntry{ + Key: path.Join(p.StoragePrefix, "archive", p.Name), + Value: buf, + }) + if err != nil { + return err + } + + return nil +} + +// handleArchiving manages the movement of keys to and from the policy archive. +// This should *ONLY* be called from Persist() since it assumes that the policy +// will be persisted afterwards. +func (p *Policy) handleArchiving(ctx context.Context, storage logical.Storage) error { + // We need to move keys that are no longer accessible to archivedKeys, and keys + // that now need to be accessible back here. + // + // For safety, because there isn't really a good reason to, we never delete + // keys from the archive even when we move them back. + + // Check if we have the latest minimum version in the current set of keys + _, keysContainsMinimum := p.Keys[strconv.Itoa(p.MinDecryptionVersion)] + + // Sanity checks + switch { + case p.MinDecryptionVersion < 1: + return fmt.Errorf("minimum decryption version of %d is less than 1", p.MinDecryptionVersion) + case p.LatestVersion < 1: + return fmt.Errorf("latest version of %d is less than 1", p.LatestVersion) + case !keysContainsMinimum && p.ArchiveVersion != p.LatestVersion: + return fmt.Errorf("need to move keys from archive but archive version not up-to-date") + case p.ArchiveVersion > p.LatestVersion: + return fmt.Errorf("archive version of %d is greater than the latest version %d", + p.ArchiveVersion, p.LatestVersion) + case p.MinEncryptionVersion > 0 && p.MinEncryptionVersion < p.MinDecryptionVersion: + return fmt.Errorf("minimum decryption version of %d is greater than minimum encryption version %d", + p.MinDecryptionVersion, p.MinEncryptionVersion) + case p.MinDecryptionVersion > p.LatestVersion: + return fmt.Errorf("minimum decryption version of %d is greater than the latest version %d", + p.MinDecryptionVersion, p.LatestVersion) + } + + archive, err := p.LoadArchive(ctx, storage) + if err != nil { + return err + } + + if !keysContainsMinimum { + // Need to move keys *from* archive + for i := p.MinDecryptionVersion; i <= p.LatestVersion; i++ { + p.Keys[strconv.Itoa(i)] = archive.Keys[i-p.MinAvailableVersion] + } + + return nil + } + + // Need to move keys *to* archive + + // We need a size that is equivalent to the latest version (number of keys) + // but adding one since slice numbering starts at 0 and we're indexing by + // key version + if len(archive.Keys)+p.MinAvailableVersion < p.LatestVersion+1 { + // Increase the size of the archive slice + newKeys := make([]KeyEntry, p.LatestVersion-p.MinAvailableVersion+1) + copy(newKeys, archive.Keys) + archive.Keys = newKeys + } + + // We are storing all keys in the archive, so we ensure that it is up to + // date up to p.LatestVersion + for i := p.ArchiveVersion + 1; i <= p.LatestVersion; i++ { + archive.Keys[i-p.MinAvailableVersion] = p.Keys[strconv.Itoa(i)] + p.ArchiveVersion = i + } + + // Trim the keys if required + if p.ArchiveMinVersion < p.MinAvailableVersion { + archive.Keys = archive.Keys[p.MinAvailableVersion-p.ArchiveMinVersion:] + p.ArchiveMinVersion = p.MinAvailableVersion + } + + err = p.storeArchive(ctx, storage, archive) + if err != nil { + return err + } + + // Perform deletion afterwards so that if there is an error saving we + // haven't messed with the current policy + for i := p.LatestVersion - len(p.Keys) + 1; i < p.MinDecryptionVersion; i++ { + delete(p.Keys, strconv.Itoa(i)) + } + + return nil +} + +func (p *Policy) Persist(ctx context.Context, storage logical.Storage) (retErr error) { + if atomic.LoadUint32(&p.deleted) == 1 { + return errors.New("key has been deleted, not persisting") + } + + // Other functions will take care of restoring other values; this is just + // responsible for archiving and keys since the archive function can modify + // keys. At the moment one of the other functions calling persist will also + // roll back keys, but better safe than sorry and this doesn't happen + // enough to worry about the speed tradeoff. + priorArchiveVersion := p.ArchiveVersion + var priorKeys keyEntryMap + + if p.Keys != nil { + priorKeys = keyEntryMap{} + for k, v := range p.Keys { + priorKeys[k] = v + } + } + + defer func() { + if retErr != nil { + p.ArchiveVersion = priorArchiveVersion + p.Keys = priorKeys + } + }() + + err := p.handleArchiving(ctx, storage) + if err != nil { + return err + } + + // Encode the policy + buf, err := p.Serialize() + if err != nil { + return err + } + + // Write the policy into storage + err = storage.Put(ctx, &logical.StorageEntry{ + Key: path.Join(p.StoragePrefix, "policy", p.Name), + Value: buf, + }) + if err != nil { + return err + } + + return nil +} + +func (p *Policy) Serialize() ([]byte, error) { + return json.Marshal(p) +} + +func (p *Policy) NeedsUpgrade() bool { + // Ensure we've moved from Key -> Keys + if p.Key != nil && len(p.Key) > 0 { + return true + } + + // With archiving, past assumptions about the length of the keys map are no + // longer valid + if p.LatestVersion == 0 && len(p.Keys) != 0 { + return true + } + + // We disallow setting the version to 0, since they start at 1 since moving + // to rotate-able keys, so update if it's set to 0 + if p.MinDecryptionVersion == 0 { + return true + } + + // On first load after an upgrade, copy keys to the archive + if p.ArchiveVersion == 0 { + return true + } + + // Need to write the version if zero; for version 3 on we set this to -1 to + // ignore it since we store this information in each key entry + if p.ConvergentEncryption && p.ConvergentVersion == 0 { + return true + } + + if p.Keys[strconv.Itoa(p.LatestVersion)].HMACKey == nil || len(p.Keys[strconv.Itoa(p.LatestVersion)].HMACKey) == 0 { + return true + } + + return false +} + +func (p *Policy) Upgrade(ctx context.Context, storage logical.Storage, randReader io.Reader) (retErr error) { + priorKey := p.Key + priorLatestVersion := p.LatestVersion + priorMinDecryptionVersion := p.MinDecryptionVersion + priorConvergentVersion := p.ConvergentVersion + var priorKeys keyEntryMap + + if p.Keys != nil { + priorKeys = keyEntryMap{} + for k, v := range p.Keys { + priorKeys[k] = v + } + } + + defer func() { + if retErr != nil { + p.Key = priorKey + p.LatestVersion = priorLatestVersion + p.MinDecryptionVersion = priorMinDecryptionVersion + p.ConvergentVersion = priorConvergentVersion + p.Keys = priorKeys + } + }() + + persistNeeded := false + // Ensure we've moved from Key -> Keys + if p.Key != nil && len(p.Key) > 0 { + p.MigrateKeyToKeysMap() + persistNeeded = true + } + + // With archiving, past assumptions about the length of the keys map are no + // longer valid + if p.LatestVersion == 0 && len(p.Keys) != 0 { + p.LatestVersion = len(p.Keys) + persistNeeded = true + } + + // We disallow setting the version to 0, since they start at 1 since moving + // to rotate-able keys, so update if it's set to 0 + if p.MinDecryptionVersion == 0 { + p.MinDecryptionVersion = 1 + persistNeeded = true + } + + // On first load after an upgrade, copy keys to the archive + if p.ArchiveVersion == 0 { + persistNeeded = true + } + + if p.ConvergentEncryption && p.ConvergentVersion == 0 { + p.ConvergentVersion = 1 + persistNeeded = true + } + + if p.Keys[strconv.Itoa(p.LatestVersion)].HMACKey == nil || len(p.Keys[strconv.Itoa(p.LatestVersion)].HMACKey) == 0 { + entry := p.Keys[strconv.Itoa(p.LatestVersion)] + hmacKey, err := uuid.GenerateRandomBytesWithReader(32, randReader) + if err != nil { + return err + } + entry.HMACKey = hmacKey + p.Keys[strconv.Itoa(p.LatestVersion)] = entry + persistNeeded = true + + if p.Type == KeyType_HMAC { + entry.HMACKey = entry.Key + } + } + + if persistNeeded { + err := p.Persist(ctx, storage) + if err != nil { + return err + } + } + + return nil +} + +// GetKey is used to derive the encryption key that should be used depending +// on the policy. If derivation is disabled the raw key is used and no context +// is required, otherwise the KDF mode is used with the context to derive the +// proper key. +func (p *Policy) GetKey(context []byte, ver, numBytes int) ([]byte, error) { + // Fast-path non-derived keys + if !p.Derived { + keyEntry, err := p.safeGetKeyEntry(ver) + if err != nil { + return nil, err + } + + return keyEntry.Key, nil + } + + return p.DeriveKey(context, nil, ver, numBytes) +} + +// DeriveKey is used to derive a symmetric key given a context and salt. This does not +// check the policies Derived flag, but just implements the derivation logic. GetKey +// is responsible for switching on the policy config. +func (p *Policy) DeriveKey(context, salt []byte, ver int, numBytes int) ([]byte, error) { + if !p.Type.DerivationSupported() { + return nil, errutil.UserError{Err: fmt.Sprintf("derivation not supported for key type %v", p.Type)} + } + + if p.Keys == nil || p.LatestVersion == 0 { + return nil, errutil.InternalError{Err: "unable to access the key; no key versions found"} + } + + if ver <= 0 || ver > p.LatestVersion { + return nil, errutil.UserError{Err: "invalid key version"} + } + + // Ensure a context is provided + if len(context) == 0 { + return nil, errutil.UserError{Err: "missing 'context' for key derivation; the key was created using a derived key, which means additional, per-request information must be included in order to perform operations with the key"} + } + + keyEntry, err := p.safeGetKeyEntry(ver) + if err != nil { + return nil, err + } + + switch p.KDF { + case Kdf_hmac_sha256_counter: + prf := kdf.HMACSHA256PRF + prfLen := kdf.HMACSHA256PRFLen + return kdf.CounterMode(prf, prfLen, keyEntry.Key, append(context, salt...), 256) + + case Kdf_hkdf_sha256: + reader := hkdf.New(sha256.New, keyEntry.Key, salt, context) + derBytes := bytes.NewBuffer(nil) + derBytes.Grow(numBytes) + limReader := &io.LimitedReader{ + R: reader, + N: int64(numBytes), + } + + switch p.Type { + case KeyType_AES128_GCM96, KeyType_AES256_GCM96, KeyType_ChaCha20_Poly1305: + n, err := derBytes.ReadFrom(limReader) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("error reading returned derived bytes: %v", err)} + } + if n != int64(numBytes) { + return nil, errutil.InternalError{Err: fmt.Sprintf("unable to read enough derived bytes, needed %d, got %d", numBytes, n)} + } + return derBytes.Bytes(), nil + + case KeyType_ED25519: + // We use the limited reader containing the derived bytes as the + // "random" input to the generation function + _, pri, err := ed25519.GenerateKey(limReader) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("error generating derived key: %v", err)} + } + return pri, nil + + default: + return nil, errutil.InternalError{Err: "unsupported key type for derivation"} + } + + default: + return nil, errutil.InternalError{Err: "unsupported key derivation mode"} + } +} + +func (p *Policy) safeGetKeyEntry(ver int) (KeyEntry, error) { + keyVerStr := strconv.Itoa(ver) + keyEntry, ok := p.Keys[keyVerStr] + if !ok { + return keyEntry, errutil.UserError{Err: "no such key version"} + } + return keyEntry, nil +} + +func (p *Policy) convergentVersion(ver int) int { + if !p.ConvergentEncryption { + return 0 + } + + convergentVersion := p.ConvergentVersion + if convergentVersion == 0 { + // For some reason, not upgraded yet + convergentVersion = 1 + } + currKey := p.Keys[strconv.Itoa(ver)] + if currKey.ConvergentVersion != 0 { + convergentVersion = currKey.ConvergentVersion + } + + return convergentVersion +} + +func (p *Policy) Encrypt(ver int, context, nonce []byte, value string) (string, error) { + return p.EncryptWithFactory(ver, context, nonce, value, nil) +} + +func (p *Policy) Decrypt(context, nonce []byte, value string) (string, error) { + return p.DecryptWithFactory(context, nonce, value, nil) +} + +func (p *Policy) DecryptWithFactory(context, nonce []byte, value string, factories ...interface{}) (string, error) { + if !p.Type.DecryptionSupported() { + return "", errutil.UserError{Err: fmt.Sprintf("message decryption not supported for key type %v", p.Type)} + } + + tplParts, err := p.getTemplateParts() + if err != nil { + return "", err + } + + // Verify the prefix + if !strings.HasPrefix(value, tplParts[0]) { + return "", errutil.UserError{Err: "invalid ciphertext: no prefix"} + } + + splitVerCiphertext := strings.SplitN(strings.TrimPrefix(value, tplParts[0]), tplParts[1], 2) + if len(splitVerCiphertext) != 2 { + return "", errutil.UserError{Err: "invalid ciphertext: wrong number of fields"} + } + + ver, err := strconv.Atoi(splitVerCiphertext[0]) + if err != nil { + return "", errutil.UserError{Err: "invalid ciphertext: version number could not be decoded"} + } + + if ver == 0 { + // Compatibility mode with initial implementation, where keys start at + // zero + ver = 1 + } + + if ver > p.LatestVersion { + return "", errutil.UserError{Err: "invalid ciphertext: version is too new"} + } + + if p.MinDecryptionVersion > 0 && ver < p.MinDecryptionVersion { + return "", errutil.UserError{Err: ErrTooOld} + } + + convergentVersion := p.convergentVersion(ver) + if convergentVersion == 1 && (nonce == nil || len(nonce) == 0) { + return "", errutil.UserError{Err: "invalid convergent nonce supplied"} + } + + // Decode the base64 + decoded, err := base64.StdEncoding.DecodeString(splitVerCiphertext[1]) + if err != nil { + return "", errutil.UserError{Err: "invalid ciphertext: could not decode base64"} + } + + var plain []byte + + switch p.Type { + case KeyType_AES128_GCM96, KeyType_AES256_GCM96, KeyType_ChaCha20_Poly1305: + numBytes := 32 + if p.Type == KeyType_AES128_GCM96 { + numBytes = 16 + } + + encKey, err := p.GetKey(context, ver, numBytes) + if err != nil { + return "", err + } + + if len(encKey) != numBytes { + return "", errutil.InternalError{Err: "could not derive enc key, length not correct"} + } + + symopts := SymmetricOpts{ + Convergent: p.ConvergentEncryption, + ConvergentVersion: p.ConvergentVersion, + } + for index, rawFactory := range factories { + if rawFactory == nil { + continue + } + switch factory := rawFactory.(type) { + case AEADFactory: + symopts.AEADFactory = factory + case AssociatedDataFactory: + symopts.AdditionalData, err = factory.GetAssociatedData() + if err != nil { + return "", errutil.InternalError{Err: fmt.Sprintf("unable to get associated_data/additional_data from factory[%d]: %v", index, err)} + } + case ManagedKeyFactory: + default: + return "", errutil.InternalError{Err: fmt.Sprintf("unknown type of factory[%d]: %T", index, rawFactory)} + } + } + + plain, err = p.SymmetricDecryptRaw(encKey, decoded, symopts) + if err != nil { + return "", err + } + case KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096: + keyEntry, err := p.safeGetKeyEntry(ver) + if err != nil { + return "", err + } + key := keyEntry.RSAKey + if key == nil { + return "", errutil.InternalError{Err: fmt.Sprintf("cannot decrypt ciphertext, key version does not have a private counterpart")} + } + plain, err = rsa.DecryptOAEP(sha256.New(), rand.Reader, key, decoded, nil) + if err != nil { + return "", errutil.InternalError{Err: fmt.Sprintf("failed to RSA decrypt the ciphertext: %v", err)} + } + case KeyType_MANAGED_KEY: + keyEntry, err := p.safeGetKeyEntry(ver) + if err != nil { + return "", err + } + var aad []byte + var managedKeyFactory ManagedKeyFactory + for _, f := range factories { + switch factory := f.(type) { + case AssociatedDataFactory: + aad, err = factory.GetAssociatedData() + if err != nil { + return "", err + } + case ManagedKeyFactory: + managedKeyFactory = factory + } + } + + if managedKeyFactory == nil { + return "", errors.New("key type is managed_key, but managed key parameters were not provided") + } + + plain, err = p.decryptWithManagedKey(managedKeyFactory.GetManagedKeyParameters(), keyEntry, decoded, nonce, aad) + if err != nil { + return "", err + } + + default: + return "", errutil.InternalError{Err: fmt.Sprintf("unsupported key type %v", p.Type)} + } + + return base64.StdEncoding.EncodeToString(plain), nil +} + +func (p *Policy) HMACKey(version int) ([]byte, error) { + switch { + case version < 0: + return nil, fmt.Errorf("key version does not exist (cannot be negative)") + case version > p.LatestVersion: + return nil, fmt.Errorf("key version does not exist; latest key version is %d", p.LatestVersion) + } + keyEntry, err := p.safeGetKeyEntry(version) + if err != nil { + return nil, err + } + + if p.Type == KeyType_HMAC { + return keyEntry.Key, nil + } + if keyEntry.HMACKey == nil { + return nil, fmt.Errorf("no HMAC key exists for that key version") + } + return keyEntry.HMACKey, nil +} + +func (p *Policy) Sign(ver int, context, input []byte, hashAlgorithm HashType, sigAlgorithm string, marshaling MarshalingType) (*SigningResult, error) { + return p.SignWithOptions(ver, context, input, &SigningOptions{ + HashAlgorithm: hashAlgorithm, + Marshaling: marshaling, + SaltLength: rsa.PSSSaltLengthAuto, + SigAlgorithm: sigAlgorithm, + }) +} + +func (p *Policy) minRSAPSSSaltLength() int { + // https://cs.opensource.google/go/go/+/refs/tags/go1.19:src/crypto/rsa/pss.go;l=247 + return rsa.PSSSaltLengthEqualsHash +} + +func (p *Policy) maxRSAPSSSaltLength(keyBitLen int, hash crypto.Hash) int { + // https://cs.opensource.google/go/go/+/refs/tags/go1.19:src/crypto/rsa/pss.go;l=288 + return (keyBitLen-1+7)/8 - 2 - hash.Size() +} + +func (p *Policy) validRSAPSSSaltLength(keyBitLen int, hash crypto.Hash, saltLength int) bool { + return p.minRSAPSSSaltLength() <= saltLength && saltLength <= p.maxRSAPSSSaltLength(keyBitLen, hash) +} + +func (p *Policy) SignWithOptions(ver int, context, input []byte, options *SigningOptions) (*SigningResult, error) { + if !p.Type.SigningSupported() { + return nil, fmt.Errorf("message signing not supported for key type %v", p.Type) + } + + switch { + case ver == 0: + ver = p.LatestVersion + case ver < 0: + return nil, errutil.UserError{Err: "requested version for signing is negative"} + case ver > p.LatestVersion: + return nil, errutil.UserError{Err: "requested version for signing is higher than the latest key version"} + case p.MinEncryptionVersion > 0 && ver < p.MinEncryptionVersion: + return nil, errutil.UserError{Err: "requested version for signing is less than the minimum encryption key version"} + } + + var sig []byte + var pubKey []byte + var err error + keyParams, err := p.safeGetKeyEntry(ver) + if err != nil { + return nil, err + } + + // Before signing, check if key has its private part, if not return error + if keyParams.IsPrivateKeyMissing() { + return nil, errutil.UserError{Err: "requested version for signing does not contain a private part"} + } + + hashAlgorithm := options.HashAlgorithm + marshaling := options.Marshaling + saltLength := options.SaltLength + sigAlgorithm := options.SigAlgorithm + + switch p.Type { + case KeyType_ECDSA_P256, KeyType_ECDSA_P384, KeyType_ECDSA_P521: + var curveBits int + var curve elliptic.Curve + switch p.Type { + case KeyType_ECDSA_P384: + curveBits = 384 + curve = elliptic.P384() + case KeyType_ECDSA_P521: + curveBits = 521 + curve = elliptic.P521() + default: + curveBits = 256 + curve = elliptic.P256() + } + + key := &ecdsa.PrivateKey{ + PublicKey: ecdsa.PublicKey{ + Curve: curve, + X: keyParams.EC_X, + Y: keyParams.EC_Y, + }, + D: keyParams.EC_D, + } + + r, s, err := ecdsa.Sign(rand.Reader, key, input) + if err != nil { + return nil, err + } + + switch marshaling { + case MarshalingTypeASN1: + // This is used by openssl and X.509 + sig, err = asn1.Marshal(ecdsaSignature{ + R: r, + S: s, + }) + if err != nil { + return nil, err + } + + case MarshalingTypeJWS: + // This is used by JWS + + // First we have to get the length of the curve in bytes. Although + // we only support 256 now, we'll do this in an agnostic way so we + // can reuse this marshaling if we support e.g. 521. Getting the + // number of bytes without rounding up would be 65.125 so we need + // to add one in that case. + keyLen := curveBits / 8 + if curveBits%8 > 0 { + keyLen++ + } + + // Now create the output array + sig = make([]byte, keyLen*2) + rb := r.Bytes() + sb := s.Bytes() + copy(sig[keyLen-len(rb):], rb) + copy(sig[2*keyLen-len(sb):], sb) + + default: + return nil, errutil.UserError{Err: "requested marshaling type is invalid"} + } + + case KeyType_ED25519: + var key ed25519.PrivateKey + + if p.Derived { + // Derive the key that should be used + var err error + key, err = p.GetKey(context, ver, 32) + if err != nil { + return nil, errutil.InternalError{Err: fmt.Sprintf("error deriving key: %v", err)} + } + pubKey = key.Public().(ed25519.PublicKey) + } else { + key = ed25519.PrivateKey(keyParams.Key) + } + + // Per docs, do not pre-hash ed25519; it does two passes and performs + // its own hashing + sig, err = key.Sign(rand.Reader, input, crypto.Hash(0)) + if err != nil { + return nil, err + } + + case KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096: + key := keyParams.RSAKey + + algo, ok := CryptoHashMap[hashAlgorithm] + if !ok { + return nil, errutil.InternalError{Err: "unsupported hash algorithm"} + } + + if sigAlgorithm == "" { + sigAlgorithm = "pss" + } + + switch sigAlgorithm { + case "pss": + if !p.validRSAPSSSaltLength(key.N.BitLen(), algo, saltLength) { + return nil, errutil.UserError{Err: fmt.Sprintf("requested salt length %d is invalid", saltLength)} + } + sig, err = rsa.SignPSS(rand.Reader, key, algo, input, &rsa.PSSOptions{SaltLength: saltLength}) + if err != nil { + return nil, err + } + case "pkcs1v15": + sig, err = rsa.SignPKCS1v15(rand.Reader, key, algo, input) + if err != nil { + return nil, err + } + default: + return nil, errutil.InternalError{Err: fmt.Sprintf("unsupported rsa signature algorithm %s", sigAlgorithm)} + } + + case KeyType_MANAGED_KEY: + keyEntry, err := p.safeGetKeyEntry(ver) + if err != nil { + return nil, err + } + + sig, err = p.signWithManagedKey(options, keyEntry, input) + if err != nil { + return nil, err + } + + default: + return nil, fmt.Errorf("unsupported key type %v", p.Type) + } + + // Convert to base64 + var encoded string + switch marshaling { + case MarshalingTypeASN1: + encoded = base64.StdEncoding.EncodeToString(sig) + case MarshalingTypeJWS: + encoded = base64.RawURLEncoding.EncodeToString(sig) + } + res := &SigningResult{ + Signature: p.getVersionPrefix(ver) + encoded, + PublicKey: pubKey, + } + + return res, nil +} + +func (p *Policy) VerifySignature(context, input []byte, hashAlgorithm HashType, sigAlgorithm string, marshaling MarshalingType, sig string) (bool, error) { + return p.VerifySignatureWithOptions(context, input, sig, &SigningOptions{ + HashAlgorithm: hashAlgorithm, + Marshaling: marshaling, + SaltLength: rsa.PSSSaltLengthAuto, + SigAlgorithm: sigAlgorithm, + }) +} + +func (p *Policy) VerifySignatureWithOptions(context, input []byte, sig string, options *SigningOptions) (bool, error) { + if !p.Type.SigningSupported() { + return false, errutil.UserError{Err: fmt.Sprintf("message verification not supported for key type %v", p.Type)} + } + + tplParts, err := p.getTemplateParts() + if err != nil { + return false, err + } + + // Verify the prefix + if !strings.HasPrefix(sig, tplParts[0]) { + return false, errutil.UserError{Err: "invalid signature: no prefix"} + } + + splitVerSig := strings.SplitN(strings.TrimPrefix(sig, tplParts[0]), tplParts[1], 2) + if len(splitVerSig) != 2 { + return false, errutil.UserError{Err: "invalid signature: wrong number of fields"} + } + + ver, err := strconv.Atoi(splitVerSig[0]) + if err != nil { + return false, errutil.UserError{Err: "invalid signature: version number could not be decoded"} + } + + if ver > p.LatestVersion { + return false, errutil.UserError{Err: "invalid signature: version is too new"} + } + + if p.MinDecryptionVersion > 0 && ver < p.MinDecryptionVersion { + return false, errutil.UserError{Err: ErrTooOld} + } + + hashAlgorithm := options.HashAlgorithm + marshaling := options.Marshaling + saltLength := options.SaltLength + sigAlgorithm := options.SigAlgorithm + + var sigBytes []byte + switch marshaling { + case MarshalingTypeASN1: + sigBytes, err = base64.StdEncoding.DecodeString(splitVerSig[1]) + case MarshalingTypeJWS: + sigBytes, err = base64.RawURLEncoding.DecodeString(splitVerSig[1]) + default: + return false, errutil.UserError{Err: "requested marshaling type is invalid"} + } + if err != nil { + return false, errutil.UserError{Err: "invalid base64 signature value"} + } + + switch p.Type { + case KeyType_ECDSA_P256, KeyType_ECDSA_P384, KeyType_ECDSA_P521: + var curve elliptic.Curve + switch p.Type { + case KeyType_ECDSA_P384: + curve = elliptic.P384() + case KeyType_ECDSA_P521: + curve = elliptic.P521() + default: + curve = elliptic.P256() + } + + var ecdsaSig ecdsaSignature + + switch marshaling { + case MarshalingTypeASN1: + rest, err := asn1.Unmarshal(sigBytes, &ecdsaSig) + if err != nil { + return false, errutil.UserError{Err: "supplied signature is invalid"} + } + if rest != nil && len(rest) != 0 { + return false, errutil.UserError{Err: "supplied signature contains extra data"} + } + + case MarshalingTypeJWS: + paramLen := len(sigBytes) / 2 + rb := sigBytes[:paramLen] + sb := sigBytes[paramLen:] + ecdsaSig.R = new(big.Int) + ecdsaSig.R.SetBytes(rb) + ecdsaSig.S = new(big.Int) + ecdsaSig.S.SetBytes(sb) + } + + keyParams, err := p.safeGetKeyEntry(ver) + if err != nil { + return false, err + } + key := &ecdsa.PublicKey{ + Curve: curve, + X: keyParams.EC_X, + Y: keyParams.EC_Y, + } + + return ecdsa.Verify(key, input, ecdsaSig.R, ecdsaSig.S), nil + + case KeyType_ED25519: + var pub ed25519.PublicKey + + if p.Derived { + // Derive the key that should be used + key, err := p.GetKey(context, ver, 32) + if err != nil { + return false, errutil.InternalError{Err: fmt.Sprintf("error deriving key: %v", err)} + } + pub = ed25519.PrivateKey(key).Public().(ed25519.PublicKey) + } else { + keyEntry, err := p.safeGetKeyEntry(ver) + if err != nil { + return false, err + } + + raw, err := base64.StdEncoding.DecodeString(keyEntry.FormattedPublicKey) + if err != nil { + return false, err + } + + pub = ed25519.PublicKey(raw) + } + + return ed25519.Verify(pub, input, sigBytes), nil + + case KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096: + keyEntry, err := p.safeGetKeyEntry(ver) + if err != nil { + return false, err + } + + algo, ok := CryptoHashMap[hashAlgorithm] + if !ok { + return false, errutil.InternalError{Err: "unsupported hash algorithm"} + } + + if sigAlgorithm == "" { + sigAlgorithm = "pss" + } + + switch sigAlgorithm { + case "pss": + publicKey := keyEntry.RSAPublicKey + if !keyEntry.IsPrivateKeyMissing() { + publicKey = &keyEntry.RSAKey.PublicKey + } + if !p.validRSAPSSSaltLength(publicKey.N.BitLen(), algo, saltLength) { + return false, errutil.UserError{Err: fmt.Sprintf("requested salt length %d is invalid", saltLength)} + } + err = rsa.VerifyPSS(publicKey, algo, input, sigBytes, &rsa.PSSOptions{SaltLength: saltLength}) + case "pkcs1v15": + publicKey := keyEntry.RSAPublicKey + if !keyEntry.IsPrivateKeyMissing() { + publicKey = &keyEntry.RSAKey.PublicKey + } + err = rsa.VerifyPKCS1v15(publicKey, algo, input, sigBytes) + default: + return false, errutil.InternalError{Err: fmt.Sprintf("unsupported rsa signature algorithm %s", sigAlgorithm)} + } + + return err == nil, nil + + case KeyType_MANAGED_KEY: + keyEntry, err := p.safeGetKeyEntry(ver) + if err != nil { + return false, err + } + + return p.verifyWithManagedKey(options, keyEntry, input, sigBytes) + + default: + return false, errutil.InternalError{Err: fmt.Sprintf("unsupported key type %v", p.Type)} + } +} + +func (p *Policy) Import(ctx context.Context, storage logical.Storage, key []byte, randReader io.Reader) error { + return p.ImportPublicOrPrivate(ctx, storage, key, true, randReader) +} + +func (p *Policy) ImportPublicOrPrivate(ctx context.Context, storage logical.Storage, key []byte, isPrivateKey bool, randReader io.Reader) error { + now := time.Now() + entry := KeyEntry{ + CreationTime: now, + DeprecatedCreationTime: now.Unix(), + } + + // Before we insert this entry, check if the latest version is incomplete + // and this entry matches the current version; if so, return without + // updating to the next version. + if p.LatestVersion > 0 { + latestKey := p.Keys[strconv.Itoa(p.LatestVersion)] + if latestKey.IsPrivateKeyMissing() && isPrivateKey { + if err := p.ImportPrivateKeyForVersion(ctx, storage, p.LatestVersion, key); err == nil { + return nil + } + } + } + + if p.Type != KeyType_HMAC { + hmacKey, err := uuid.GenerateRandomBytesWithReader(32, randReader) + if err != nil { + return err + } + entry.HMACKey = hmacKey + } + + if p.Type == KeyType_ED25519 && p.Derived && !isPrivateKey { + return fmt.Errorf("unable to import only public key for derived Ed25519 key: imported key should not be an Ed25519 key pair but is instead an HKDF key") + } + + if (p.Type == KeyType_AES128_GCM96 && len(key) != 16) || + ((p.Type == KeyType_AES256_GCM96 || p.Type == KeyType_ChaCha20_Poly1305) && len(key) != 32) || + (p.Type == KeyType_HMAC && (len(key) < HmacMinKeySize || len(key) > HmacMaxKeySize)) { + return fmt.Errorf("invalid key size %d bytes for key type %s", len(key), p.Type) + } + + if p.Type == KeyType_AES128_GCM96 || p.Type == KeyType_AES256_GCM96 || p.Type == KeyType_ChaCha20_Poly1305 || p.Type == KeyType_HMAC { + entry.Key = key + if p.Type == KeyType_HMAC { + p.KeySize = len(key) + entry.HMACKey = key + } + } else { + var parsedKey any + var err error + if isPrivateKey { + parsedKey, err = x509.ParsePKCS8PrivateKey(key) + if err != nil { + if strings.Contains(err.Error(), "unknown elliptic curve") { + var edErr error + parsedKey, edErr = ParsePKCS8Ed25519PrivateKey(key) + if edErr != nil { + return fmt.Errorf("error parsing asymmetric key:\n - assuming contents are an ed25519 private key: %s\n - original error: %v", edErr, err) + } + + // Parsing as Ed25519-in-PKCS8-ECPrivateKey succeeded! + } else if strings.Contains(err.Error(), oidSignatureRSAPSS.String()) { + var rsaErr error + parsedKey, rsaErr = ParsePKCS8RSAPSSPrivateKey(key) + if rsaErr != nil { + return fmt.Errorf("error parsing asymmetric key:\n - assuming contents are an RSA/PSS private key: %v\n - original error: %w", rsaErr, err) + } + + // Parsing as RSA-PSS in PKCS8 succeeded! + } else { + return fmt.Errorf("error parsing asymmetric key: %s", err) + } + } + } else { + pemBlock, _ := pem.Decode(key) + if pemBlock == nil { + return fmt.Errorf("error parsing public key: not in PEM format") + } + + parsedKey, err = x509.ParsePKIXPublicKey(pemBlock.Bytes) + if err != nil { + return fmt.Errorf("error parsing public key: %w", err) + } + } + + err = entry.parseFromKey(p.Type, parsedKey) + if err != nil { + return err + } + } + + p.LatestVersion += 1 + + if p.Keys == nil { + // This is an initial key rotation when generating a new policy. We + // don't need to call migrate here because if we've called getPolicy to + // get the policy in the first place it will have been run. + p.Keys = keyEntryMap{} + } + p.Keys[strconv.Itoa(p.LatestVersion)] = entry + + // This ensures that with new key creations min decryption version is set + // to 1 rather than the int default of 0, since keys start at 1 (either + // fresh or after migration to the key map) + if p.MinDecryptionVersion == 0 { + p.MinDecryptionVersion = 1 + } + + return p.Persist(ctx, storage) +} + +// Rotate rotates the policy and persists it to storage. +// If the rotation partially fails, the policy state will be restored. +func (p *Policy) Rotate(ctx context.Context, storage logical.Storage, randReader io.Reader) (retErr error) { + priorLatestVersion := p.LatestVersion + priorMinDecryptionVersion := p.MinDecryptionVersion + var priorKeys keyEntryMap + + if p.Imported && !p.AllowImportedKeyRotation { + return fmt.Errorf("imported key %s does not allow rotation within Vault", p.Name) + } + + if p.Keys != nil { + priorKeys = keyEntryMap{} + for k, v := range p.Keys { + priorKeys[k] = v + } + } + + defer func() { + if retErr != nil { + p.LatestVersion = priorLatestVersion + p.MinDecryptionVersion = priorMinDecryptionVersion + p.Keys = priorKeys + } + }() + + if err := p.RotateInMemory(randReader); err != nil { + return err + } + + p.Imported = false + return p.Persist(ctx, storage) +} + +// RotateInMemory rotates the policy but does not persist it to storage. +func (p *Policy) RotateInMemory(randReader io.Reader) (retErr error) { + now := time.Now() + entry := KeyEntry{ + CreationTime: now, + DeprecatedCreationTime: now.Unix(), + } + + hmacKey, err := uuid.GenerateRandomBytesWithReader(32, randReader) + if err != nil { + return err + } + entry.HMACKey = hmacKey + + switch p.Type { + case KeyType_AES128_GCM96, KeyType_AES256_GCM96, KeyType_ChaCha20_Poly1305, KeyType_HMAC: + // Default to 256 bit key + numBytes := 32 + if p.Type == KeyType_AES128_GCM96 { + numBytes = 16 + } else if p.Type == KeyType_HMAC { + numBytes = p.KeySize + if numBytes < HmacMinKeySize || numBytes > HmacMaxKeySize { + return fmt.Errorf("invalid key size for HMAC key, must be between %d and %d bytes", HmacMinKeySize, HmacMaxKeySize) + } + } + newKey, err := uuid.GenerateRandomBytesWithReader(numBytes, randReader) + if err != nil { + return err + } + entry.Key = newKey + + if p.Type == KeyType_HMAC { + // To avoid causing problems, ensure HMACKey = Key. + entry.HMACKey = newKey + } + + case KeyType_ECDSA_P256, KeyType_ECDSA_P384, KeyType_ECDSA_P521: + var curve elliptic.Curve + switch p.Type { + case KeyType_ECDSA_P384: + curve = elliptic.P384() + case KeyType_ECDSA_P521: + curve = elliptic.P521() + default: + curve = elliptic.P256() + } + + privKey, err := ecdsa.GenerateKey(curve, rand.Reader) + if err != nil { + return err + } + entry.EC_D = privKey.D + entry.EC_X = privKey.X + entry.EC_Y = privKey.Y + derBytes, err := x509.MarshalPKIXPublicKey(privKey.Public()) + if err != nil { + return errwrap.Wrapf("error marshaling public key: {{err}}", err) + } + pemBlock := &pem.Block{ + Type: "PUBLIC KEY", + Bytes: derBytes, + } + pemBytes := pem.EncodeToMemory(pemBlock) + if pemBytes == nil || len(pemBytes) == 0 { + return fmt.Errorf("error PEM-encoding public key") + } + entry.FormattedPublicKey = string(pemBytes) + + case KeyType_ED25519: + // Go uses a 64-byte private key for Ed25519 keys (private+public, each + // 32-bytes long). When we do Key derivation, we still generate a 32-byte + // random value (and compute the corresponding Ed25519 public key), but + // use this entire 64-byte key as if it was an HKDF key. The corresponding + // underlying public key is never returned (which is probably good, because + // doing so would leak half of our HKDF key...), but means we cannot import + // derived-enabled Ed25519 public key components. + pub, pri, err := ed25519.GenerateKey(randReader) + if err != nil { + return err + } + entry.Key = pri + entry.FormattedPublicKey = base64.StdEncoding.EncodeToString(pub) + case KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096: + bitSize := 2048 + if p.Type == KeyType_RSA3072 { + bitSize = 3072 + } + if p.Type == KeyType_RSA4096 { + bitSize = 4096 + } + + entry.RSAKey, err = rsa.GenerateKey(randReader, bitSize) + if err != nil { + return err + } + } + + if p.ConvergentEncryption { + if p.ConvergentVersion == -1 || p.ConvergentVersion > 1 { + entry.ConvergentVersion = currentConvergentVersion + } + } + + p.LatestVersion += 1 + + if p.Keys == nil { + // This is an initial key rotation when generating a new policy. We + // don't need to call migrate here because if we've called getPolicy to + // get the policy in the first place it will have been run. + p.Keys = keyEntryMap{} + } + p.Keys[strconv.Itoa(p.LatestVersion)] = entry + + // This ensures that with new key creations min decryption version is set + // to 1 rather than the int default of 0, since keys start at 1 (either + // fresh or after migration to the key map) + if p.MinDecryptionVersion == 0 { + p.MinDecryptionVersion = 1 + } + + return nil +} + +func (p *Policy) MigrateKeyToKeysMap() { + now := time.Now() + p.Keys = keyEntryMap{ + "1": KeyEntry{ + Key: p.Key, + CreationTime: now, + DeprecatedCreationTime: now.Unix(), + }, + } + p.Key = nil +} + +// Backup should be called with an exclusive lock held on the policy +func (p *Policy) Backup(ctx context.Context, storage logical.Storage) (out string, retErr error) { + if !p.Exportable { + return "", fmt.Errorf("exporting is disallowed on the policy") + } + + if !p.AllowPlaintextBackup { + return "", fmt.Errorf("plaintext backup is disallowed on the policy") + } + + priorBackupInfo := p.BackupInfo + + defer func() { + if retErr != nil { + p.BackupInfo = priorBackupInfo + } + }() + + // Create a record of this backup operation in the policy + p.BackupInfo = &BackupInfo{ + Time: time.Now(), + Version: p.LatestVersion, + } + err := p.Persist(ctx, storage) + if err != nil { + return "", errwrap.Wrapf("failed to persist policy with backup info: {{err}}", err) + } + + // Load the archive only after persisting the policy as the archive can get + // adjusted while persisting the policy + archivedKeys, err := p.LoadArchive(ctx, storage) + if err != nil { + return "", err + } + + keyData := &KeyData{ + Policy: p, + ArchivedKeys: archivedKeys, + } + + encodedBackup, err := jsonutil.EncodeJSON(keyData) + if err != nil { + return "", err + } + + return base64.StdEncoding.EncodeToString(encodedBackup), nil +} + +func (p *Policy) getTemplateParts() ([]string, error) { + partsRaw, ok := p.versionPrefixCache.Load("template-parts") + if ok { + return partsRaw.([]string), nil + } + + template := p.VersionTemplate + if template == "" { + template = DefaultVersionTemplate + } + + tplParts := strings.Split(template, "{{version}}") + if len(tplParts) != 2 { + return nil, errutil.InternalError{Err: "error parsing version template"} + } + + p.versionPrefixCache.Store("template-parts", tplParts) + return tplParts, nil +} + +func (p *Policy) getVersionPrefix(ver int) string { + prefixRaw, ok := p.versionPrefixCache.Load(ver) + if ok { + return prefixRaw.(string) + } + + template := p.VersionTemplate + if template == "" { + template = DefaultVersionTemplate + } + + prefix := strings.ReplaceAll(template, "{{version}}", strconv.Itoa(ver)) + p.versionPrefixCache.Store(ver, prefix) + + return prefix +} + +// SymmetricOpts are the arguments to symmetric operations that are "optional", e.g. +// not always used. This improves the aesthetics of calls to those functions. +type SymmetricOpts struct { + // Whether to use convergent encryption + Convergent bool + // The version of the convergent encryption scheme + ConvergentVersion int + // The nonce, if not randomly generated + Nonce []byte + // Additional data to include in AEAD authentication + AdditionalData []byte + // The HMAC key, for generating IVs in convergent encryption + HMACKey []byte + // Allows an external provider of the AEAD, for e.g. managed keys + AEADFactory AEADFactory +} + +// Symmetrically encrypt a plaintext given the convergence configuration and appropriate keys +func (p *Policy) SymmetricEncryptRaw(ver int, encKey, plaintext []byte, opts SymmetricOpts) ([]byte, error) { + var aead cipher.AEAD + var err error + nonce := opts.Nonce + + switch p.Type { + case KeyType_AES128_GCM96, KeyType_AES256_GCM96: + // Setup the cipher + aesCipher, err := aes.NewCipher(encKey) + if err != nil { + return nil, errutil.InternalError{Err: err.Error()} + } + + // Setup the GCM AEAD + gcm, err := cipher.NewGCM(aesCipher) + if err != nil { + return nil, errutil.InternalError{Err: err.Error()} + } + + aead = gcm + + case KeyType_ChaCha20_Poly1305: + cha, err := chacha20poly1305.New(encKey) + if err != nil { + return nil, errutil.InternalError{Err: err.Error()} + } + + aead = cha + case KeyType_MANAGED_KEY: + if opts.Convergent || len(opts.Nonce) != 0 { + return nil, errutil.UserError{Err: "cannot use convergent encryption or provide a nonce to managed-key backed encryption"} + } + if opts.AEADFactory == nil { + return nil, errors.New("expected AEAD factory from managed key, none provided") + } + aead, err = opts.AEADFactory.GetAEAD(nonce) + if err != nil { + return nil, err + } + } + + if opts.Convergent { + convergentVersion := p.convergentVersion(ver) + switch convergentVersion { + case 1: + if len(opts.Nonce) != aead.NonceSize() { + return nil, errutil.UserError{Err: fmt.Sprintf("base64-decoded nonce must be %d bytes long when using convergent encryption with this key", aead.NonceSize())} + } + case 2, 3: + if len(opts.HMACKey) == 0 { + return nil, errutil.InternalError{Err: fmt.Sprintf("invalid hmac key length of zero")} + } + nonceHmac := hmac.New(sha256.New, opts.HMACKey) + nonceHmac.Write(plaintext) + nonceSum := nonceHmac.Sum(nil) + nonce = nonceSum[:aead.NonceSize()] + default: + return nil, errutil.InternalError{Err: fmt.Sprintf("unhandled convergent version %d", convergentVersion)} + } + } else if len(nonce) == 0 { + // Compute random nonce + nonce, err = uuid.GenerateRandomBytes(aead.NonceSize()) + if err != nil { + return nil, errutil.InternalError{Err: err.Error()} + } + } else if len(nonce) != aead.NonceSize() { + return nil, errutil.UserError{Err: fmt.Sprintf("base64-decoded nonce must be %d bytes long but given %d bytes", aead.NonceSize(), len(nonce))} + } + + // Encrypt and tag with AEAD + ciphertext := aead.Seal(nil, nonce, plaintext, opts.AdditionalData) + + // Place the encrypted data after the nonce + if !opts.Convergent || p.convergentVersion(ver) > 1 { + ciphertext = append(nonce, ciphertext...) + } + return ciphertext, nil +} + +// Symmetrically decrypt a ciphertext given the convergence configuration and appropriate keys +func (p *Policy) SymmetricDecryptRaw(encKey, ciphertext []byte, opts SymmetricOpts) ([]byte, error) { + var aead cipher.AEAD + var err error + var nonce []byte + + switch p.Type { + case KeyType_AES128_GCM96, KeyType_AES256_GCM96: + // Setup the cipher + aesCipher, err := aes.NewCipher(encKey) + if err != nil { + return nil, errutil.InternalError{Err: err.Error()} + } + + // Setup the GCM AEAD + gcm, err := cipher.NewGCM(aesCipher) + if err != nil { + return nil, errutil.InternalError{Err: err.Error()} + } + + aead = gcm + + case KeyType_ChaCha20_Poly1305: + cha, err := chacha20poly1305.New(encKey) + if err != nil { + return nil, errutil.InternalError{Err: err.Error()} + } + + aead = cha + case KeyType_MANAGED_KEY: + aead, err = opts.AEADFactory.GetAEAD(nonce) + if err != nil { + return nil, err + } + } + + if len(ciphertext) < aead.NonceSize() { + return nil, errutil.UserError{Err: "invalid ciphertext length"} + } + + // Extract the nonce and ciphertext + var trueCT []byte + if opts.Convergent && opts.ConvergentVersion == 1 { + trueCT = ciphertext + } else { + nonce = ciphertext[:aead.NonceSize()] + trueCT = ciphertext[aead.NonceSize():] + } + + // Verify and Decrypt + plain, err := aead.Open(nil, nonce, trueCT, opts.AdditionalData) + if err != nil { + return nil, errutil.UserError{Err: err.Error()} + } + return plain, nil +} + +func (p *Policy) EncryptWithFactory(ver int, context []byte, nonce []byte, value string, factories ...interface{}) (string, error) { + if !p.Type.EncryptionSupported() { + return "", errutil.UserError{Err: fmt.Sprintf("message encryption not supported for key type %v", p.Type)} + } + + // Decode the plaintext value + plaintext, err := base64.StdEncoding.DecodeString(value) + if err != nil { + return "", errutil.UserError{Err: err.Error()} + } + + switch { + case ver == 0: + ver = p.LatestVersion + case ver < 0: + return "", errutil.UserError{Err: "requested version for encryption is negative"} + case ver > p.LatestVersion: + return "", errutil.UserError{Err: "requested version for encryption is higher than the latest key version"} + case ver < p.MinEncryptionVersion: + return "", errutil.UserError{Err: "requested version for encryption is less than the minimum encryption key version"} + } + + var ciphertext []byte + + switch p.Type { + case KeyType_AES128_GCM96, KeyType_AES256_GCM96, KeyType_ChaCha20_Poly1305: + hmacKey := context + + var encKey []byte + var deriveHMAC bool + + encBytes := 32 + hmacBytes := 0 + convergentVersion := p.convergentVersion(ver) + if convergentVersion > 2 { + deriveHMAC = true + hmacBytes = 32 + if len(nonce) > 0 { + return "", errutil.UserError{Err: "nonce provided when not allowed"} + } + } else if len(nonce) > 0 && (!p.ConvergentEncryption || convergentVersion != 1) { + return "", errutil.UserError{Err: "nonce provided when not allowed"} + } + if p.Type == KeyType_AES128_GCM96 { + encBytes = 16 + } + + key, err := p.GetKey(context, ver, encBytes+hmacBytes) + if err != nil { + return "", err + } + + if len(key) < encBytes+hmacBytes { + return "", errutil.InternalError{Err: "could not derive key, length too small"} + } + + encKey = key[:encBytes] + if len(encKey) != encBytes { + return "", errutil.InternalError{Err: "could not derive enc key, length not correct"} + } + if deriveHMAC { + hmacKey = key[encBytes:] + if len(hmacKey) != hmacBytes { + return "", errutil.InternalError{Err: "could not derive hmac key, length not correct"} + } + } + + symopts := SymmetricOpts{ + Convergent: p.ConvergentEncryption, + HMACKey: hmacKey, + Nonce: nonce, + } + for index, rawFactory := range factories { + if rawFactory == nil { + continue + } + switch factory := rawFactory.(type) { + case AEADFactory: + symopts.AEADFactory = factory + case AssociatedDataFactory: + symopts.AdditionalData, err = factory.GetAssociatedData() + if err != nil { + return "", errutil.InternalError{Err: fmt.Sprintf("unable to get associated_data/additional_data from factory[%d]: %v", index, err)} + } + case ManagedKeyFactory: + default: + return "", errutil.InternalError{Err: fmt.Sprintf("unknown type of factory[%d]: %T", index, rawFactory)} + } + } + + ciphertext, err = p.SymmetricEncryptRaw(ver, encKey, plaintext, symopts) + if err != nil { + return "", err + } + case KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096: + keyEntry, err := p.safeGetKeyEntry(ver) + if err != nil { + return "", err + } + var publicKey *rsa.PublicKey + if keyEntry.RSAKey != nil { + publicKey = &keyEntry.RSAKey.PublicKey + } else { + publicKey = keyEntry.RSAPublicKey + } + ciphertext, err = rsa.EncryptOAEP(sha256.New(), rand.Reader, publicKey, plaintext, nil) + if err != nil { + return "", errutil.InternalError{Err: fmt.Sprintf("failed to RSA encrypt the plaintext: %v", err)} + } + case KeyType_MANAGED_KEY: + keyEntry, err := p.safeGetKeyEntry(ver) + if err != nil { + return "", err + } + + var aad []byte + var managedKeyFactory ManagedKeyFactory + for _, f := range factories { + switch factory := f.(type) { + case AssociatedDataFactory: + aad, err = factory.GetAssociatedData() + if err != nil { + return "", nil + } + case ManagedKeyFactory: + managedKeyFactory = factory + } + } + + if managedKeyFactory == nil { + return "", errors.New("key type is managed_key, but managed key parameters were not provided") + } + + ciphertext, err = p.encryptWithManagedKey(managedKeyFactory.GetManagedKeyParameters(), keyEntry, plaintext, nonce, aad) + if err != nil { + return "", err + } + + default: + return "", errutil.InternalError{Err: fmt.Sprintf("unsupported key type %v", p.Type)} + } + + // Convert to base64 + encoded := base64.StdEncoding.EncodeToString(ciphertext) + + // Prepend some information + encoded = p.getVersionPrefix(ver) + encoded + + return encoded, nil +} + +func (p *Policy) KeyVersionCanBeUpdated(keyVersion int, isPrivateKey bool) error { + keyEntry, err := p.safeGetKeyEntry(keyVersion) + if err != nil { + return err + } + + if !p.Type.ImportPublicKeySupported() { + return errors.New("provided type does not support importing key versions") + } + + isPrivateKeyMissing := keyEntry.IsPrivateKeyMissing() + if isPrivateKeyMissing && !isPrivateKey { + return errors.New("cannot add a public key to a key version that already has a public key set") + } + + if !isPrivateKeyMissing { + return errors.New("private key imported, key version cannot be updated") + } + + return nil +} + +func (p *Policy) ImportPrivateKeyForVersion(ctx context.Context, storage logical.Storage, keyVersion int, key []byte) error { + keyEntry, err := p.safeGetKeyEntry(keyVersion) + if err != nil { + return err + } + + // Parse key + parsedPrivateKey, err := x509.ParsePKCS8PrivateKey(key) + if err != nil { + if strings.Contains(err.Error(), "unknown elliptic curve") { + var edErr error + parsedPrivateKey, edErr = ParsePKCS8Ed25519PrivateKey(key) + if edErr != nil { + return fmt.Errorf("error parsing asymmetric key:\n - assuming contents are an ed25519 private key: %s\n - original error: %v", edErr, err) + } + + // Parsing as Ed25519-in-PKCS8-ECPrivateKey succeeded! + } else if strings.Contains(err.Error(), oidSignatureRSAPSS.String()) { + var rsaErr error + parsedPrivateKey, rsaErr = ParsePKCS8RSAPSSPrivateKey(key) + if rsaErr != nil { + return fmt.Errorf("error parsing asymmetric key:\n - assuming contents are an RSA/PSS private key: %v\n - original error: %w", rsaErr, err) + } + + // Parsing as RSA-PSS in PKCS8 succeeded! + } else { + return fmt.Errorf("error parsing asymmetric key: %s", err) + } + } + + switch parsedPrivateKey.(type) { + case *ecdsa.PrivateKey: + ecdsaKey := parsedPrivateKey.(*ecdsa.PrivateKey) + pemBlock, _ := pem.Decode([]byte(keyEntry.FormattedPublicKey)) + if pemBlock == nil { + return fmt.Errorf("failed to parse key entry public key: invalid PEM blob") + } + publicKey, err := x509.ParsePKIXPublicKey(pemBlock.Bytes) + if err != nil || publicKey == nil { + return fmt.Errorf("failed to parse key entry public key: %v", err) + } + if !publicKey.(*ecdsa.PublicKey).Equal(&ecdsaKey.PublicKey) { + return fmt.Errorf("cannot import key, key pair does not match") + } + case *rsa.PrivateKey: + rsaKey := parsedPrivateKey.(*rsa.PrivateKey) + if !rsaKey.PublicKey.Equal(keyEntry.RSAPublicKey) { + return fmt.Errorf("cannot import key, key pair does not match") + } + case ed25519.PrivateKey: + ed25519Key := parsedPrivateKey.(ed25519.PrivateKey) + publicKey, err := base64.StdEncoding.DecodeString(keyEntry.FormattedPublicKey) + if err != nil { + return fmt.Errorf("failed to parse key entry public key: %v", err) + } + if !ed25519.PublicKey(publicKey).Equal(ed25519Key.Public()) { + return fmt.Errorf("cannot import key, key pair does not match") + } + } + + err = keyEntry.parseFromKey(p.Type, parsedPrivateKey) + if err != nil { + return err + } + + p.Keys[strconv.Itoa(keyVersion)] = keyEntry + + return p.Persist(ctx, storage) +} + +func (ke *KeyEntry) parseFromKey(PolKeyType KeyType, parsedKey any) error { + switch parsedKey.(type) { + case *ecdsa.PrivateKey, *ecdsa.PublicKey: + if PolKeyType != KeyType_ECDSA_P256 && PolKeyType != KeyType_ECDSA_P384 && PolKeyType != KeyType_ECDSA_P521 { + return fmt.Errorf("invalid key type: expected %s, got %T", PolKeyType, parsedKey) + } + + curve := elliptic.P256() + if PolKeyType == KeyType_ECDSA_P384 { + curve = elliptic.P384() + } else if PolKeyType == KeyType_ECDSA_P521 { + curve = elliptic.P521() + } + + var derBytes []byte + var err error + ecdsaKey, ok := parsedKey.(*ecdsa.PrivateKey) + if ok { + + if ecdsaKey.Curve != curve { + return fmt.Errorf("invalid curve: expected %s, got %s", curve.Params().Name, ecdsaKey.Curve.Params().Name) + } + + ke.EC_D = ecdsaKey.D + ke.EC_X = ecdsaKey.X + ke.EC_Y = ecdsaKey.Y + + derBytes, err = x509.MarshalPKIXPublicKey(ecdsaKey.Public()) + if err != nil { + return errwrap.Wrapf("error marshaling public key: {{err}}", err) + } + } else { + ecdsaKey := parsedKey.(*ecdsa.PublicKey) + + if ecdsaKey.Curve != curve { + return fmt.Errorf("invalid curve: expected %s, got %s", curve.Params().Name, ecdsaKey.Curve.Params().Name) + } + + ke.EC_X = ecdsaKey.X + ke.EC_Y = ecdsaKey.Y + + derBytes, err = x509.MarshalPKIXPublicKey(ecdsaKey) + if err != nil { + return errwrap.Wrapf("error marshaling public key: {{err}}", err) + } + } + + pemBlock := &pem.Block{ + Type: "PUBLIC KEY", + Bytes: derBytes, + } + pemBytes := pem.EncodeToMemory(pemBlock) + if pemBytes == nil || len(pemBytes) == 0 { + return fmt.Errorf("error PEM-encoding public key") + } + ke.FormattedPublicKey = string(pemBytes) + case ed25519.PrivateKey, ed25519.PublicKey: + if PolKeyType != KeyType_ED25519 { + return fmt.Errorf("invalid key type: expected %s, got %T", PolKeyType, parsedKey) + } + + privateKey, ok := parsedKey.(ed25519.PrivateKey) + if ok { + ke.Key = privateKey + publicKey := privateKey.Public().(ed25519.PublicKey) + ke.FormattedPublicKey = base64.StdEncoding.EncodeToString(publicKey) + } else { + publicKey := parsedKey.(ed25519.PublicKey) + ke.FormattedPublicKey = base64.StdEncoding.EncodeToString(publicKey) + } + case *rsa.PrivateKey, *rsa.PublicKey: + if PolKeyType != KeyType_RSA2048 && PolKeyType != KeyType_RSA3072 && PolKeyType != KeyType_RSA4096 { + return fmt.Errorf("invalid key type: expected %s, got %T", PolKeyType, parsedKey) + } + + keyBytes := 256 + if PolKeyType == KeyType_RSA3072 { + keyBytes = 384 + } else if PolKeyType == KeyType_RSA4096 { + keyBytes = 512 + } + + rsaKey, ok := parsedKey.(*rsa.PrivateKey) + if ok { + if rsaKey.Size() != keyBytes { + return fmt.Errorf("invalid key size: expected %d bytes, got %d bytes", keyBytes, rsaKey.Size()) + } + ke.RSAKey = rsaKey + ke.RSAPublicKey = rsaKey.Public().(*rsa.PublicKey) + } else { + rsaKey := parsedKey.(*rsa.PublicKey) + if rsaKey.Size() != keyBytes { + return fmt.Errorf("invalid key size: expected %d bytes, got %d bytes", keyBytes, rsaKey.Size()) + } + ke.RSAPublicKey = rsaKey + } + default: + return fmt.Errorf("invalid key type: expected %s, got %T", PolKeyType, parsedKey) + } + + return nil +} + +func (p *Policy) WrapKey(ver int, targetKey interface{}, targetKeyType KeyType, hash hash.Hash) (string, error) { + if !p.Type.SigningSupported() { + return "", fmt.Errorf("message signing not supported for key type %v", p.Type) + } + + switch { + case ver == 0: + ver = p.LatestVersion + case ver < 0: + return "", errutil.UserError{Err: "requested version for key wrapping is negative"} + case ver > p.LatestVersion: + return "", errutil.UserError{Err: "requested version for key wrapping is higher than the latest key version"} + case p.MinEncryptionVersion > 0 && ver < p.MinEncryptionVersion: + return "", errutil.UserError{Err: "requested version for key wrapping is less than the minimum encryption key version"} + } + + keyEntry, err := p.safeGetKeyEntry(ver) + if err != nil { + return "", err + } + + return keyEntry.WrapKey(targetKey, targetKeyType, hash) +} + +func (ke *KeyEntry) WrapKey(targetKey interface{}, targetKeyType KeyType, hash hash.Hash) (string, error) { + // Presently this method implements a CKM_RSA_AES_KEY_WRAP-compatible + // wrapping interface and only works on RSA keyEntries as a result. + if ke.RSAPublicKey == nil { + return "", fmt.Errorf("unsupported key type in use; must be a rsa key") + } + + var preppedTargetKey []byte + switch targetKeyType { + case KeyType_AES128_GCM96, KeyType_AES256_GCM96, KeyType_ChaCha20_Poly1305, KeyType_HMAC: + var ok bool + preppedTargetKey, ok = targetKey.([]byte) + if !ok { + return "", fmt.Errorf("failed to wrap target key for import: symmetric key not provided in byte format (%T)", targetKey) + } + default: + var err error + preppedTargetKey, err = x509.MarshalPKCS8PrivateKey(targetKey) + if err != nil { + return "", fmt.Errorf("failed to wrap target key for import: %w", err) + } + } + + result, err := wrapTargetPKCS8ForImport(ke.RSAPublicKey, preppedTargetKey, hash) + if err != nil { + return result, fmt.Errorf("failed to wrap target key for import: %w", err) + } + + return result, nil +} + +func wrapTargetPKCS8ForImport(wrappingKey *rsa.PublicKey, preppedTargetKey []byte, hash hash.Hash) (string, error) { + // Generate an ephemeral AES-256 key + ephKey, err := uuid.GenerateRandomBytes(32) + if err != nil { + return "", fmt.Errorf("failed to generate an ephemeral AES wrapping key: %w", err) + } + + // Wrap ephemeral AES key with public wrapping key + ephKeyWrapped, err := rsa.EncryptOAEP(hash, rand.Reader, wrappingKey, ephKey, []byte{} /* label */) + if err != nil { + return "", fmt.Errorf("failed to encrypt ephemeral wrapping key with public key: %w", err) + } + + // Create KWP instance for wrapping target key + kwp, err := subtle.NewKWP(ephKey) + if err != nil { + return "", fmt.Errorf("failed to generate new KWP from AES key: %w", err) + } + + // Wrap target key with KWP + targetKeyWrapped, err := kwp.Wrap(preppedTargetKey) + if err != nil { + return "", fmt.Errorf("failed to wrap target key with KWP: %w", err) + } + + // Combined wrapped keys into a single blob and base64 encode + wrappedKeys := append(ephKeyWrapped, targetKeyWrapped...) + return base64.StdEncoding.EncodeToString(wrappedKeys), nil +} diff --git a/sdk/helper/keysutil/policy_test.go b/sdk/helper/keysutil/policy_test.go new file mode 100644 index 0000000..f5e4d35 --- /dev/null +++ b/sdk/helper/keysutil/policy_test.go @@ -0,0 +1,1069 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package keysutil + +import ( + "bytes" + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "errors" + "fmt" + mathrand "math/rand" + "reflect" + "strconv" + "strings" + "sync" + "testing" + "time" + + "golang.org/x/crypto/ed25519" + + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/mitchellh/copystructure" +) + +func TestPolicy_KeyEntryMapUpgrade(t *testing.T) { + now := time.Now() + old := map[int]KeyEntry{ + 1: { + Key: []byte("samplekey"), + HMACKey: []byte("samplehmackey"), + CreationTime: now, + FormattedPublicKey: "sampleformattedpublickey", + }, + 2: { + Key: []byte("samplekey2"), + HMACKey: []byte("samplehmackey2"), + CreationTime: now.Add(10 * time.Second), + FormattedPublicKey: "sampleformattedpublickey2", + }, + } + + oldEncoded, err := jsonutil.EncodeJSON(old) + if err != nil { + t.Fatal(err) + } + + var new keyEntryMap + err = jsonutil.DecodeJSON(oldEncoded, &new) + if err != nil { + t.Fatal(err) + } + + newEncoded, err := jsonutil.EncodeJSON(&new) + if err != nil { + t.Fatal(err) + } + + if string(oldEncoded) != string(newEncoded) { + t.Fatalf("failed to upgrade key entry map;\nold: %q\nnew: %q", string(oldEncoded), string(newEncoded)) + } +} + +func Test_KeyUpgrade(t *testing.T) { + lockManagerWithCache, _ := NewLockManager(true, 0) + lockManagerWithoutCache, _ := NewLockManager(false, 0) + testKeyUpgradeCommon(t, lockManagerWithCache) + testKeyUpgradeCommon(t, lockManagerWithoutCache) +} + +func testKeyUpgradeCommon(t *testing.T, lm *LockManager) { + ctx := context.Background() + + storage := &logical.InmemStorage{} + p, upserted, err := lm.GetPolicy(ctx, PolicyRequest{ + Upsert: true, + Storage: storage, + KeyType: KeyType_AES256_GCM96, + Name: "test", + }, rand.Reader) + if err != nil { + t.Fatal(err) + } + if p == nil { + t.Fatal("nil policy") + } + if !upserted { + t.Fatal("expected an upsert") + } + if !lm.useCache { + p.Unlock() + } + + testBytes := make([]byte, len(p.Keys["1"].Key)) + copy(testBytes, p.Keys["1"].Key) + + p.Key = p.Keys["1"].Key + p.Keys = nil + p.MigrateKeyToKeysMap() + if p.Key != nil { + t.Fatal("policy.Key is not nil") + } + if len(p.Keys) != 1 { + t.Fatal("policy.Keys is the wrong size") + } + if !reflect.DeepEqual(testBytes, p.Keys["1"].Key) { + t.Fatal("key mismatch") + } +} + +func Test_ArchivingUpgrade(t *testing.T) { + lockManagerWithCache, _ := NewLockManager(true, 0) + lockManagerWithoutCache, _ := NewLockManager(false, 0) + testArchivingUpgradeCommon(t, lockManagerWithCache) + testArchivingUpgradeCommon(t, lockManagerWithoutCache) +} + +func testArchivingUpgradeCommon(t *testing.T, lm *LockManager) { + ctx := context.Background() + + // First, we generate a policy and rotate it a number of times. Each time + // we'll ensure that we have the expected number of keys in the archive and + // the main keys object, which without changing the min version should be + // zero and latest, respectively + + storage := &logical.InmemStorage{} + p, _, err := lm.GetPolicy(ctx, PolicyRequest{ + Upsert: true, + Storage: storage, + KeyType: KeyType_AES256_GCM96, + Name: "test", + }, rand.Reader) + if err != nil { + t.Fatal(err) + } + if p == nil { + t.Fatal("nil policy") + } + if !lm.useCache { + p.Unlock() + } + + // Store the initial key in the archive + keysArchive := []KeyEntry{{}, p.Keys["1"]} + checkKeys(t, ctx, p, storage, keysArchive, "initial", 1, 1, 1) + + for i := 2; i <= 10; i++ { + err = p.Rotate(ctx, storage, rand.Reader) + if err != nil { + t.Fatal(err) + } + keysArchive = append(keysArchive, p.Keys[strconv.Itoa(i)]) + checkKeys(t, ctx, p, storage, keysArchive, "rotate", i, i, i) + } + + // Now, wipe the archive and set the archive version to zero + err = storage.Delete(ctx, "archive/test") + if err != nil { + t.Fatal(err) + } + p.ArchiveVersion = 0 + + // Store it, but without calling persist, so we don't trigger + // handleArchiving() + buf, err := p.Serialize() + if err != nil { + t.Fatal(err) + } + + // Write the policy into storage + err = storage.Put(ctx, &logical.StorageEntry{ + Key: "policy/" + p.Name, + Value: buf, + }) + if err != nil { + t.Fatal(err) + } + + // If we're caching, expire from the cache since we modified it + // under-the-hood + if lm.useCache { + lm.cache.Delete("test") + } + + // Now get the policy again; the upgrade should happen automatically + p, _, err = lm.GetPolicy(ctx, PolicyRequest{ + Storage: storage, + Name: "test", + }, rand.Reader) + if err != nil { + t.Fatal(err) + } + if p == nil { + t.Fatal("nil policy") + } + if !lm.useCache { + p.Unlock() + } + + checkKeys(t, ctx, p, storage, keysArchive, "upgrade", 10, 10, 10) + + // Let's check some deletion logic while we're at it + + // The policy should be in there + if lm.useCache { + _, ok := lm.cache.Load("test") + if !ok { + t.Fatal("nil policy in cache") + } + } + + // First we'll do this wrong, by not setting the deletion flag + err = lm.DeletePolicy(ctx, storage, "test") + if err == nil { + t.Fatal("got nil error, but should not have been able to delete since we didn't set the deletion flag on the policy") + } + + // The policy should still be in there + if lm.useCache { + _, ok := lm.cache.Load("test") + if !ok { + t.Fatal("nil policy in cache") + } + } + + p, _, err = lm.GetPolicy(ctx, PolicyRequest{ + Storage: storage, + Name: "test", + }, rand.Reader) + if err != nil { + t.Fatal(err) + } + if p == nil { + t.Fatal("policy nil after bad delete") + } + if !lm.useCache { + p.Unlock() + } + + // Now do it properly + p.DeletionAllowed = true + err = p.Persist(ctx, storage) + if err != nil { + t.Fatal(err) + } + err = lm.DeletePolicy(ctx, storage, "test") + if err != nil { + t.Fatal(err) + } + + // The policy should *not* be in there + if lm.useCache { + _, ok := lm.cache.Load("test") + if ok { + t.Fatal("non-nil policy in cache") + } + } + + p, _, err = lm.GetPolicy(ctx, PolicyRequest{ + Storage: storage, + Name: "test", + }, rand.Reader) + if err != nil { + t.Fatal(err) + } + if p != nil { + t.Fatal("policy not nil after delete") + } +} + +func Test_Archiving(t *testing.T) { + lockManagerWithCache, _ := NewLockManager(true, 0) + lockManagerWithoutCache, _ := NewLockManager(false, 0) + testArchivingUpgradeCommon(t, lockManagerWithCache) + testArchivingUpgradeCommon(t, lockManagerWithoutCache) +} + +func testArchivingCommon(t *testing.T, lm *LockManager) { + ctx := context.Background() + + // First, we generate a policy and rotate it a number of times. Each time + // we'll ensure that we have the expected number of keys in the archive and + // the main keys object, which without changing the min version should be + // zero and latest, respectively + + storage := &logical.InmemStorage{} + p, _, err := lm.GetPolicy(ctx, PolicyRequest{ + Upsert: true, + Storage: storage, + KeyType: KeyType_AES256_GCM96, + Name: "test", + }, rand.Reader) + if err != nil { + t.Fatal(err) + } + if p == nil { + t.Fatal("nil policy") + } + if !lm.useCache { + p.Unlock() + } + + // Store the initial key in the archive + keysArchive := []KeyEntry{{}, p.Keys["1"]} + checkKeys(t, ctx, p, storage, keysArchive, "initial", 1, 1, 1) + + for i := 2; i <= 10; i++ { + err = p.Rotate(ctx, storage, rand.Reader) + if err != nil { + t.Fatal(err) + } + keysArchive = append(keysArchive, p.Keys[strconv.Itoa(i)]) + checkKeys(t, ctx, p, storage, keysArchive, "rotate", i, i, i) + } + + // Move the min decryption version up + for i := 1; i <= 10; i++ { + p.MinDecryptionVersion = i + + err = p.Persist(ctx, storage) + if err != nil { + t.Fatal(err) + } + // We expect to find: + // * The keys in archive are the same as the latest version + // * The latest version is constant + // * The number of keys in the policy itself is from the min + // decryption version up to the latest version, so for e.g. 7 and + // 10, you'd need 7, 8, 9, and 10 -- IOW, latest version - min + // decryption version plus 1 (the min decryption version key + // itself) + checkKeys(t, ctx, p, storage, keysArchive, "minadd", 10, 10, p.LatestVersion-p.MinDecryptionVersion+1) + } + + // Move the min decryption version down + for i := 10; i >= 1; i-- { + p.MinDecryptionVersion = i + + err = p.Persist(ctx, storage) + if err != nil { + t.Fatal(err) + } + // We expect to find: + // * The keys in archive are never removed so same as the latest version + // * The latest version is constant + // * The number of keys in the policy itself is from the min + // decryption version up to the latest version, so for e.g. 7 and + // 10, you'd need 7, 8, 9, and 10 -- IOW, latest version - min + // decryption version plus 1 (the min decryption version key + // itself) + checkKeys(t, ctx, p, storage, keysArchive, "minsub", 10, 10, p.LatestVersion-p.MinDecryptionVersion+1) + } +} + +func checkKeys(t *testing.T, + ctx context.Context, + p *Policy, + storage logical.Storage, + keysArchive []KeyEntry, + action string, + archiveVer, latestVer, keysSize int, +) { + // Sanity check + if len(keysArchive) != latestVer+1 { + t.Fatalf("latest expected key version is %d, expected test keys archive size is %d, "+ + "but keys archive is of size %d", latestVer, latestVer+1, len(keysArchive)) + } + + archive, err := p.LoadArchive(ctx, storage) + if err != nil { + t.Fatal(err) + } + + badArchiveVer := false + if archiveVer == 0 { + if len(archive.Keys) != 0 || p.ArchiveVersion != 0 { + badArchiveVer = true + } + } else { + // We need to subtract one because we have the indexes match key + // versions, which start at 1. So for an archive version of 1, we + // actually have two entries -- a blank 0 entry, and the key at spot 1 + if archiveVer != len(archive.Keys)-1 || archiveVer != p.ArchiveVersion { + badArchiveVer = true + } + } + if badArchiveVer { + t.Fatalf( + "expected archive version %d, found length of archive keys %d and policy archive version %d", + archiveVer, len(archive.Keys), p.ArchiveVersion, + ) + } + + if latestVer != p.LatestVersion { + t.Fatalf( + "expected latest version %d, found %d", + latestVer, p.LatestVersion, + ) + } + + if keysSize != len(p.Keys) { + t.Fatalf( + "expected keys size %d, found %d, action is %s, policy is \n%#v\n", + keysSize, len(p.Keys), action, p, + ) + } + + for i := p.MinDecryptionVersion; i <= p.LatestVersion; i++ { + if _, ok := p.Keys[strconv.Itoa(i)]; !ok { + t.Fatalf( + "expected key %d, did not find it in policy keys", i, + ) + } + } + + for i := p.MinDecryptionVersion; i <= p.LatestVersion; i++ { + ver := strconv.Itoa(i) + if !p.Keys[ver].CreationTime.Equal(keysArchive[i].CreationTime) { + t.Fatalf("key %d not equivalent between policy keys and test keys archive; policy keys:\n%#v\ntest keys archive:\n%#v\n", i, p.Keys[ver], keysArchive[i]) + } + polKey := p.Keys[ver] + polKey.CreationTime = keysArchive[i].CreationTime + p.Keys[ver] = polKey + if !reflect.DeepEqual(p.Keys[ver], keysArchive[i]) { + t.Fatalf("key %d not equivalent between policy keys and test keys archive; policy keys:\n%#v\ntest keys archive:\n%#v\n", i, p.Keys[ver], keysArchive[i]) + } + } + + for i := 1; i < len(archive.Keys); i++ { + if !reflect.DeepEqual(archive.Keys[i].Key, keysArchive[i].Key) { + t.Fatalf("key %d not equivalent between policy archive and test keys archive; policy archive:\n%#v\ntest keys archive:\n%#v\n", i, archive.Keys[i].Key, keysArchive[i].Key) + } + } +} + +func Test_StorageErrorSafety(t *testing.T) { + ctx := context.Background() + lm, _ := NewLockManager(true, 0) + + storage := &logical.InmemStorage{} + p, _, err := lm.GetPolicy(ctx, PolicyRequest{ + Upsert: true, + Storage: storage, + KeyType: KeyType_AES256_GCM96, + Name: "test", + }, rand.Reader) + if err != nil { + t.Fatal(err) + } + if p == nil { + t.Fatal("nil policy") + } + + // Store the initial key in the archive + keysArchive := []KeyEntry{{}, p.Keys["1"]} + checkKeys(t, ctx, p, storage, keysArchive, "initial", 1, 1, 1) + + // We use checkKeys here just for sanity; it doesn't really handle cases of + // errors below so we do more targeted testing later + for i := 2; i <= 5; i++ { + err = p.Rotate(ctx, storage, rand.Reader) + if err != nil { + t.Fatal(err) + } + keysArchive = append(keysArchive, p.Keys[strconv.Itoa(i)]) + checkKeys(t, ctx, p, storage, keysArchive, "rotate", i, i, i) + } + + underlying := storage.Underlying() + underlying.FailPut(true) + + priorLen := len(p.Keys) + + err = p.Rotate(ctx, storage, rand.Reader) + if err == nil { + t.Fatal("expected error") + } + + if len(p.Keys) != priorLen { + t.Fatal("length of keys should not have changed") + } +} + +func Test_BadUpgrade(t *testing.T) { + ctx := context.Background() + lm, _ := NewLockManager(true, 0) + storage := &logical.InmemStorage{} + p, _, err := lm.GetPolicy(ctx, PolicyRequest{ + Upsert: true, + Storage: storage, + KeyType: KeyType_AES256_GCM96, + Name: "test", + }, rand.Reader) + if err != nil { + t.Fatal(err) + } + if p == nil { + t.Fatal("nil policy") + } + + orig, err := copystructure.Copy(p) + if err != nil { + t.Fatal(err) + } + orig.(*Policy).l = p.l + + p.Key = p.Keys["1"].Key + p.Keys = nil + p.MinDecryptionVersion = 0 + + if err := p.Upgrade(ctx, storage, rand.Reader); err != nil { + t.Fatal(err) + } + + k := p.Keys["1"] + o := orig.(*Policy).Keys["1"] + k.CreationTime = o.CreationTime + k.HMACKey = o.HMACKey + p.Keys["1"] = k + p.versionPrefixCache = sync.Map{} + + if !reflect.DeepEqual(orig, p) { + t.Fatalf("not equal:\n%#v\n%#v", orig, p) + } + + // Do it again with a failing storage call + underlying := storage.Underlying() + underlying.FailPut(true) + + p.Key = p.Keys["1"].Key + p.Keys = nil + p.MinDecryptionVersion = 0 + + if err := p.Upgrade(ctx, storage, rand.Reader); err == nil { + t.Fatal("expected error") + } + + if p.MinDecryptionVersion == 1 { + t.Fatal("min decryption version was changed") + } + if p.Keys != nil { + t.Fatal("found upgraded keys") + } + if p.Key == nil { + t.Fatal("non-upgraded key not found") + } +} + +func Test_BadArchive(t *testing.T) { + ctx := context.Background() + lm, _ := NewLockManager(true, 0) + storage := &logical.InmemStorage{} + p, _, err := lm.GetPolicy(ctx, PolicyRequest{ + Upsert: true, + Storage: storage, + KeyType: KeyType_AES256_GCM96, + Name: "test", + }, rand.Reader) + if err != nil { + t.Fatal(err) + } + if p == nil { + t.Fatal("nil policy") + } + + for i := 2; i <= 10; i++ { + err = p.Rotate(ctx, storage, rand.Reader) + if err != nil { + t.Fatal(err) + } + } + + p.MinDecryptionVersion = 5 + if err := p.Persist(ctx, storage); err != nil { + t.Fatal(err) + } + if p.ArchiveVersion != 10 { + t.Fatalf("unexpected archive version %d", p.ArchiveVersion) + } + if len(p.Keys) != 6 { + t.Fatalf("unexpected key length %d", len(p.Keys)) + } + + // Set back + p.MinDecryptionVersion = 1 + if err := p.Persist(ctx, storage); err != nil { + t.Fatal(err) + } + if p.ArchiveVersion != 10 { + t.Fatalf("unexpected archive version %d", p.ArchiveVersion) + } + if len(p.Keys) != 10 { + t.Fatalf("unexpected key length %d", len(p.Keys)) + } + + // Run it again but we'll turn off storage along the way + p.MinDecryptionVersion = 5 + if err := p.Persist(ctx, storage); err != nil { + t.Fatal(err) + } + if p.ArchiveVersion != 10 { + t.Fatalf("unexpected archive version %d", p.ArchiveVersion) + } + if len(p.Keys) != 6 { + t.Fatalf("unexpected key length %d", len(p.Keys)) + } + + underlying := storage.Underlying() + underlying.FailPut(true) + + // Set back, which should cause p.Keys to be changed if the persist works, + // but it doesn't + p.MinDecryptionVersion = 1 + if err := p.Persist(ctx, storage); err == nil { + t.Fatal("expected error during put") + } + if p.ArchiveVersion != 10 { + t.Fatalf("unexpected archive version %d", p.ArchiveVersion) + } + // Here's the expected change + if len(p.Keys) != 6 { + t.Fatalf("unexpected key length %d", len(p.Keys)) + } +} + +func Test_Import(t *testing.T) { + ctx := context.Background() + storage := &logical.InmemStorage{} + testKeys, err := generateTestKeys() + if err != nil { + t.Fatalf("error generating test keys: %s", err) + } + + tests := map[string]struct { + policy Policy + key []byte + shouldError bool + }{ + "import AES key": { + policy: Policy{ + Name: "test-aes-key", + Type: KeyType_AES256_GCM96, + }, + key: testKeys[KeyType_AES256_GCM96], + shouldError: false, + }, + "import RSA key": { + policy: Policy{ + Name: "test-rsa-key", + Type: KeyType_RSA2048, + }, + key: testKeys[KeyType_RSA2048], + shouldError: false, + }, + "import ECDSA key": { + policy: Policy{ + Name: "test-ecdsa-key", + Type: KeyType_ECDSA_P256, + }, + key: testKeys[KeyType_ECDSA_P256], + shouldError: false, + }, + "import ED25519 key": { + policy: Policy{ + Name: "test-ed25519-key", + Type: KeyType_ED25519, + }, + key: testKeys[KeyType_ED25519], + shouldError: false, + }, + "import incorrect key type": { + policy: Policy{ + Name: "test-ed25519-key", + Type: KeyType_ED25519, + }, + key: testKeys[KeyType_AES256_GCM96], + shouldError: true, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + if err := test.policy.Import(ctx, storage, test.key, rand.Reader); (err != nil) != test.shouldError { + t.Fatalf("error importing key: %s", err) + } + }) + } +} + +func generateTestKeys() (map[KeyType][]byte, error) { + keyMap := make(map[KeyType][]byte) + + rsaKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return nil, err + } + rsaKeyBytes, err := x509.MarshalPKCS8PrivateKey(rsaKey) + if err != nil { + return nil, err + } + keyMap[KeyType_RSA2048] = rsaKeyBytes + + rsaKey, err = rsa.GenerateKey(rand.Reader, 3072) + if err != nil { + return nil, err + } + rsaKeyBytes, err = x509.MarshalPKCS8PrivateKey(rsaKey) + if err != nil { + return nil, err + } + keyMap[KeyType_RSA3072] = rsaKeyBytes + + rsaKey, err = rsa.GenerateKey(rand.Reader, 4096) + if err != nil { + return nil, err + } + rsaKeyBytes, err = x509.MarshalPKCS8PrivateKey(rsaKey) + if err != nil { + return nil, err + } + keyMap[KeyType_RSA4096] = rsaKeyBytes + + ecdsaKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + return nil, err + } + ecdsaKeyBytes, err := x509.MarshalPKCS8PrivateKey(ecdsaKey) + if err != nil { + return nil, err + } + keyMap[KeyType_ECDSA_P256] = ecdsaKeyBytes + + _, ed25519Key, err := ed25519.GenerateKey(rand.Reader) + if err != nil { + return nil, err + } + ed25519KeyBytes, err := x509.MarshalPKCS8PrivateKey(ed25519Key) + if err != nil { + return nil, err + } + keyMap[KeyType_ED25519] = ed25519KeyBytes + + aesKey := make([]byte, 32) + _, err = rand.Read(aesKey) + if err != nil { + return nil, err + } + keyMap[KeyType_AES256_GCM96] = aesKey + + return keyMap, nil +} + +func BenchmarkSymmetric(b *testing.B) { + ctx := context.Background() + lm, _ := NewLockManager(true, 0) + storage := &logical.InmemStorage{} + p, _, _ := lm.GetPolicy(ctx, PolicyRequest{ + Upsert: true, + Storage: storage, + KeyType: KeyType_AES256_GCM96, + Name: "test", + }, rand.Reader) + key, _ := p.GetKey(nil, 1, 32) + pt := make([]byte, 10) + ad := make([]byte, 10) + for i := 0; i < b.N; i++ { + ct, _ := p.SymmetricEncryptRaw(1, key, pt, + SymmetricOpts{ + AdditionalData: ad, + }) + pt2, _ := p.SymmetricDecryptRaw(key, ct, SymmetricOpts{ + AdditionalData: ad, + }) + if !bytes.Equal(pt, pt2) { + b.Fail() + } + } +} + +func saltOptions(options SigningOptions, saltLength int) SigningOptions { + return SigningOptions{ + HashAlgorithm: options.HashAlgorithm, + Marshaling: options.Marshaling, + SaltLength: saltLength, + SigAlgorithm: options.SigAlgorithm, + } +} + +func manualVerify(depth int, t *testing.T, p *Policy, input []byte, sig *SigningResult, options SigningOptions) { + tabs := strings.Repeat("\t", depth) + t.Log(tabs, "Manually verifying signature with options:", options) + + tabs = strings.Repeat("\t", depth+1) + verified, err := p.VerifySignatureWithOptions(nil, input, sig.Signature, &options) + if err != nil { + t.Fatal(tabs, "❌ Failed to manually verify signature:", err) + } + if !verified { + t.Fatal(tabs, "❌ Failed to manually verify signature") + } +} + +func autoVerify(depth int, t *testing.T, p *Policy, input []byte, sig *SigningResult, options SigningOptions) { + tabs := strings.Repeat("\t", depth) + t.Log(tabs, "Automatically verifying signature with options:", options) + + tabs = strings.Repeat("\t", depth+1) + verified, err := p.VerifySignature(nil, input, options.HashAlgorithm, options.SigAlgorithm, options.Marshaling, sig.Signature) + if err != nil { + t.Fatal(tabs, "❌ Failed to automatically verify signature:", err) + } + if !verified { + t.Fatal(tabs, "❌ Failed to automatically verify signature") + } +} + +func Test_RSA_PSS(t *testing.T) { + t.Log("Testing RSA PSS") + mathrand.Seed(time.Now().UnixNano()) + + var userError errutil.UserError + ctx := context.Background() + storage := &logical.InmemStorage{} + // https://crypto.stackexchange.com/a/1222 + input := []byte("the ancients say the longer the salt, the more provable the security") + sigAlgorithm := "pss" + + tabs := make(map[int]string) + for i := 1; i <= 6; i++ { + tabs[i] = strings.Repeat("\t", i) + } + + test_RSA_PSS := func(t *testing.T, p *Policy, rsaKey *rsa.PrivateKey, hashType HashType, + marshalingType MarshalingType, + ) { + unsaltedOptions := SigningOptions{ + HashAlgorithm: hashType, + Marshaling: marshalingType, + SigAlgorithm: sigAlgorithm, + } + cryptoHash := CryptoHashMap[hashType] + minSaltLength := p.minRSAPSSSaltLength() + maxSaltLength := p.maxRSAPSSSaltLength(rsaKey.N.BitLen(), cryptoHash) + hash := cryptoHash.New() + hash.Write(input) + input = hash.Sum(nil) + + // 1. Make an "automatic" signature with the given key size and hash algorithm, + // but an automatically chosen salt length. + t.Log(tabs[3], "Make an automatic signature") + sig, err := p.Sign(0, nil, input, hashType, sigAlgorithm, marshalingType) + if err != nil { + // A bit of a hack but FIPS go does not support some hash types + if isUnsupportedGoHashType(hashType, err) { + t.Skip(tabs[4], "skipping test as FIPS Go does not support hash type") + return + } + t.Fatal(tabs[4], "❌ Failed to automatically sign:", err) + } + + // 1.1 Verify this automatic signature using the *inferred* salt length. + autoVerify(4, t, p, input, sig, unsaltedOptions) + + // 1.2. Verify this automatic signature using the *correct, given* salt length. + manualVerify(4, t, p, input, sig, saltOptions(unsaltedOptions, maxSaltLength)) + + // 1.3. Try to verify this automatic signature using *incorrect, given* salt lengths. + t.Log(tabs[4], "Test incorrect salt lengths") + incorrectSaltLengths := []int{minSaltLength, maxSaltLength - 1} + for _, saltLength := range incorrectSaltLengths { + t.Log(tabs[5], "Salt length:", saltLength) + saltedOptions := saltOptions(unsaltedOptions, saltLength) + + verified, _ := p.VerifySignatureWithOptions(nil, input, sig.Signature, &saltedOptions) + if verified { + t.Fatal(tabs[6], "❌ Failed to invalidate", verified, "signature using incorrect salt length:", err) + } + } + + // 2. Rule out boundary, invalid salt lengths. + t.Log(tabs[3], "Test invalid salt lengths") + invalidSaltLengths := []int{minSaltLength - 1, maxSaltLength + 1} + for _, saltLength := range invalidSaltLengths { + t.Log(tabs[4], "Salt length:", saltLength) + saltedOptions := saltOptions(unsaltedOptions, saltLength) + + // 2.1. Fail to sign. + t.Log(tabs[5], "Try to make a manual signature") + _, err := p.SignWithOptions(0, nil, input, &saltedOptions) + if !errors.As(err, &userError) { + t.Fatal(tabs[6], "❌ Failed to reject invalid salt length:", err) + } + + // 2.2. Fail to verify. + t.Log(tabs[5], "Try to verify an automatic signature using an invalid salt length") + _, err = p.VerifySignatureWithOptions(nil, input, sig.Signature, &saltedOptions) + if !errors.As(err, &userError) { + t.Fatal(tabs[6], "❌ Failed to reject invalid salt length:", err) + } + } + + // 3. For three possible valid salt lengths... + t.Log(tabs[3], "Test three possible valid salt lengths") + midSaltLength := mathrand.Intn(maxSaltLength-1) + 1 // [1, maxSaltLength) + validSaltLengths := []int{minSaltLength, midSaltLength, maxSaltLength} + for _, saltLength := range validSaltLengths { + t.Log(tabs[4], "Salt length:", saltLength) + saltedOptions := saltOptions(unsaltedOptions, saltLength) + + // 3.1. Make a "manual" signature with the given key size, hash algorithm, and salt length. + t.Log(tabs[5], "Make a manual signature") + sig, err := p.SignWithOptions(0, nil, input, &saltedOptions) + if err != nil { + t.Fatal(tabs[6], "❌ Failed to manually sign:", err) + } + + // 3.2. Verify this manual signature using the *correct, given* salt length. + manualVerify(6, t, p, input, sig, saltedOptions) + + // 3.3. Verify this manual signature using the *inferred* salt length. + autoVerify(6, t, p, input, sig, unsaltedOptions) + } + } + + rsaKeyTypes := []KeyType{KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096} + testKeys, err := generateTestKeys() + if err != nil { + t.Fatalf("error generating test keys: %s", err) + } + + // 1. For each standard RSA key size 2048, 3072, and 4096... + for _, rsaKeyType := range rsaKeyTypes { + t.Log("Key size: ", rsaKeyType) + p := &Policy{ + Name: fmt.Sprint(rsaKeyType), // NOTE: crucial to create a new key per key size + Type: rsaKeyType, + } + + rsaKeyBytes := testKeys[rsaKeyType] + err := p.Import(ctx, storage, rsaKeyBytes, rand.Reader) + if err != nil { + t.Fatal(tabs[1], "❌ Failed to import key:", err) + } + rsaKeyAny, err := x509.ParsePKCS8PrivateKey(rsaKeyBytes) + if err != nil { + t.Fatalf("error parsing test keys: %s", err) + } + rsaKey := rsaKeyAny.(*rsa.PrivateKey) + + // 2. For each hash algorithm... + for hashAlgorithm, hashType := range HashTypeMap { + t.Log(tabs[1], "Hash algorithm:", hashAlgorithm) + if hashAlgorithm == "none" { + continue + } + + // 3. For each marshaling type... + for marshalingName, marshalingType := range MarshalingTypeMap { + t.Log(tabs[2], "Marshaling type:", marshalingName) + testName := fmt.Sprintf("%s-%s-%s", rsaKeyType, hashAlgorithm, marshalingName) + t.Run(testName, func(t *testing.T) { test_RSA_PSS(t, p, rsaKey, hashType, marshalingType) }) + } + } + } +} + +func Test_RSA_PKCS1(t *testing.T) { + t.Log("Testing RSA PKCS#1v1.5") + + ctx := context.Background() + storage := &logical.InmemStorage{} + // https://crypto.stackexchange.com/a/1222 + input := []byte("Sphinx of black quartz, judge my vow") + sigAlgorithm := "pkcs1v15" + + tabs := make(map[int]string) + for i := 1; i <= 6; i++ { + tabs[i] = strings.Repeat("\t", i) + } + + test_RSA_PKCS1 := func(t *testing.T, p *Policy, rsaKey *rsa.PrivateKey, hashType HashType, + marshalingType MarshalingType, + ) { + unsaltedOptions := SigningOptions{ + HashAlgorithm: hashType, + Marshaling: marshalingType, + SigAlgorithm: sigAlgorithm, + } + cryptoHash := CryptoHashMap[hashType] + + // PKCS#1v1.5 NoOID uses a direct input and assumes it is pre-hashed. + if hashType != 0 { + hash := cryptoHash.New() + hash.Write(input) + input = hash.Sum(nil) + } + + // 1. Make a signature with the given key size and hash algorithm. + t.Log(tabs[3], "Make an automatic signature") + sig, err := p.Sign(0, nil, input, hashType, sigAlgorithm, marshalingType) + if err != nil { + // A bit of a hack but FIPS go does not support some hash types + if isUnsupportedGoHashType(hashType, err) { + t.Skip(tabs[4], "skipping test as FIPS Go does not support hash type") + return + } + t.Fatal(tabs[4], "❌ Failed to automatically sign:", err) + } + + // 1.1 Verify this signature using the *inferred* salt length. + autoVerify(4, t, p, input, sig, unsaltedOptions) + } + + rsaKeyTypes := []KeyType{KeyType_RSA2048, KeyType_RSA3072, KeyType_RSA4096} + testKeys, err := generateTestKeys() + if err != nil { + t.Fatalf("error generating test keys: %s", err) + } + + // 1. For each standard RSA key size 2048, 3072, and 4096... + for _, rsaKeyType := range rsaKeyTypes { + t.Log("Key size: ", rsaKeyType) + p := &Policy{ + Name: fmt.Sprint(rsaKeyType), // NOTE: crucial to create a new key per key size + Type: rsaKeyType, + } + + rsaKeyBytes := testKeys[rsaKeyType] + err := p.Import(ctx, storage, rsaKeyBytes, rand.Reader) + if err != nil { + t.Fatal(tabs[1], "❌ Failed to import key:", err) + } + rsaKeyAny, err := x509.ParsePKCS8PrivateKey(rsaKeyBytes) + if err != nil { + t.Fatalf("error parsing test keys: %s", err) + } + rsaKey := rsaKeyAny.(*rsa.PrivateKey) + + // 2. For each hash algorithm... + for hashAlgorithm, hashType := range HashTypeMap { + t.Log(tabs[1], "Hash algorithm:", hashAlgorithm) + + // 3. For each marshaling type... + for marshalingName, marshalingType := range MarshalingTypeMap { + t.Log(tabs[2], "Marshaling type:", marshalingName) + testName := fmt.Sprintf("%s-%s-%s", rsaKeyType, hashAlgorithm, marshalingName) + t.Run(testName, func(t *testing.T) { test_RSA_PKCS1(t, p, rsaKey, hashType, marshalingType) }) + } + } + } +} + +// Normal Go builds support all the hash functions for RSA_PSS signatures but the +// FIPS Go build does not support at this time the SHA3 hashes as FIPS 140_2 does +// not accept them. +func isUnsupportedGoHashType(hashType HashType, err error) bool { + switch hashType { + case HashTypeSHA3224, HashTypeSHA3256, HashTypeSHA3384, HashTypeSHA3512: + return strings.Contains(err.Error(), "unsupported hash function") + } + + return false +} diff --git a/sdk/helper/keysutil/transit_lru.go b/sdk/helper/keysutil/transit_lru.go new file mode 100644 index 0000000..66ea66d --- /dev/null +++ b/sdk/helper/keysutil/transit_lru.go @@ -0,0 +1,32 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package keysutil + +import lru "github.com/hashicorp/golang-lru" + +type TransitLRU struct { + size int + lru *lru.TwoQueueCache +} + +func NewTransitLRU(size int) (*TransitLRU, error) { + lru, err := lru.New2Q(size) + return &TransitLRU{lru: lru, size: size}, err +} + +func (c *TransitLRU) Delete(key interface{}) { + c.lru.Remove(key) +} + +func (c *TransitLRU) Load(key interface{}) (value interface{}, ok bool) { + return c.lru.Get(key) +} + +func (c *TransitLRU) Store(key, value interface{}) { + c.lru.Add(key, value) +} + +func (c *TransitLRU) Size() int { + return c.size +} diff --git a/sdk/helper/keysutil/transit_syncmap.go b/sdk/helper/keysutil/transit_syncmap.go new file mode 100644 index 0000000..fddcf70 --- /dev/null +++ b/sdk/helper/keysutil/transit_syncmap.go @@ -0,0 +1,32 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package keysutil + +import ( + "sync" +) + +type TransitSyncMap struct { + syncmap sync.Map +} + +func NewTransitSyncMap() *TransitSyncMap { + return &TransitSyncMap{syncmap: sync.Map{}} +} + +func (c *TransitSyncMap) Delete(key interface{}) { + c.syncmap.Delete(key) +} + +func (c *TransitSyncMap) Load(key interface{}) (value interface{}, ok bool) { + return c.syncmap.Load(key) +} + +func (c *TransitSyncMap) Store(key, value interface{}) { + c.syncmap.Store(key, value) +} + +func (c *TransitSyncMap) Size() int { + return 0 +} diff --git a/sdk/helper/keysutil/util.go b/sdk/helper/keysutil/util.go new file mode 100644 index 0000000..94a56d4 --- /dev/null +++ b/sdk/helper/keysutil/util.go @@ -0,0 +1,151 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package keysutil + +import ( + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" + + "golang.org/x/crypto/ed25519" +) + +// pkcs8 reflects an ASN.1, PKCS #8 PrivateKey. See +// ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-8/pkcs-8v1_2.asn +// and RFC 5208. +// +// Copied from Go: https://github.com/golang/go/blob/master/src/crypto/x509/pkcs8.go#L17-L80 +type pkcs8 struct { + Version int + Algo pkix.AlgorithmIdentifier + PrivateKey []byte + // optional attributes omitted. +} + +// ecPrivateKey reflects an ASN.1 Elliptic Curve Private Key Structure. +// References: +// +// RFC 5915 +// SEC1 - http://www.secg.org/sec1-v2.pdf +// +// Per RFC 5915 the NamedCurveOID is marked as ASN.1 OPTIONAL, however in +// most cases it is not. +// +// Copied from Go: https://github.com/golang/go/blob/master/src/crypto/x509/sec1.go#L18-L31 +type ecPrivateKey struct { + Version int + PrivateKey []byte + NamedCurveOID asn1.ObjectIdentifier `asn1:"optional,explicit,tag:0"` + + // Because the PKCS8/RFC 5915 encoding of the Ed25519 key uses the + // RFC 8032 Ed25519 seed format, we can ignore the public key parameter + // and infer it later. +} + +var ( + // See crypto/x509/x509.go in the Go toolchain source distribution. + oidPublicKeyECDSA = asn1.ObjectIdentifier{1, 2, 840, 10045, 2, 1} + + // NSS encodes Ed25519 private keys with the OID 1.3.6.1.4.1.11591.15.1 + // from https://tools.ietf.org/html/draft-josefsson-pkix-newcurves-01. + // See https://github.com/nss-dev/nss/blob/NSS_3_79_BRANCH/lib/util/secoid.c#L600-L603. + oidNSSPKIXEd25519 = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11591, 15, 1} + + // Other implementations may use the OID 1.3.101.110 from + // https://datatracker.ietf.org/doc/html/rfc8410. + oidRFC8410Ed25519 = asn1.ObjectIdentifier{1, 3, 101, 110} + + // See crypto/x509/x509.go in the Go toolchain source distribution. + oidSignatureRSAPSS = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 10} +) + +func isEd25519OID(oid asn1.ObjectIdentifier) bool { + return oidNSSPKIXEd25519.Equal(oid) || oidRFC8410Ed25519.Equal(oid) +} + +// ParsePKCS8PrivateKey parses an unencrypted private key in PKCS #8, ASN.1 DER form. +// +// It returns a *rsa.PrivateKey, a *ecdsa.PrivateKey, or a ed25519.PrivateKey. +// More types might be supported in the future. +// +// This kind of key is commonly encoded in PEM blocks of type "PRIVATE KEY". +func ParsePKCS8Ed25519PrivateKey(der []byte) (key interface{}, err error) { + var privKey pkcs8 + var ed25519Key ecPrivateKey + + var checkedOID bool + + // If this err is nil, we assume we directly have a ECPrivateKey structure + // with explicit OID; ignore this error for now and return the latter err + // instead if neither parse correctly. + if _, err := asn1.Unmarshal(der, &privKey); err == nil { + switch { + case privKey.Algo.Algorithm.Equal(oidPublicKeyECDSA): + bytes := privKey.Algo.Parameters.FullBytes + namedCurveOID := new(asn1.ObjectIdentifier) + if _, err := asn1.Unmarshal(bytes, namedCurveOID); err != nil { + namedCurveOID = nil + } + + if namedCurveOID == nil || !isEd25519OID(*namedCurveOID) { + return nil, errors.New("keysutil: failed to parse private key (invalid, non-ed25519 curve parameter OID)") + } + + der = privKey.PrivateKey + checkedOID = true + default: + // The Go standard library already parses RFC 8410 keys; the + // inclusion of the OID here is in case it is used with the + // regular ECDSA PrivateKey structure, rather than the struct + // recognized by the Go standard library. + return nil, errors.New("keysutil: failed to parse key as ed25519 private key") + } + } + + _, err = asn1.Unmarshal(der, &ed25519Key) + if err != nil { + return nil, fmt.Errorf("keysutil: failed to parse private key (inner Ed25519 ECPrivateKey format was incorrect): %v", err) + } + + if !checkedOID && !isEd25519OID(ed25519Key.NamedCurveOID) { + return nil, errors.New("keysutil: failed to parse private key (invalid, non-ed25519 curve parameter OID)") + } + + if len(ed25519Key.PrivateKey) != 32 { + return nil, fmt.Errorf("keysutil: failed to parse private key as ed25519 private key: got %v bytes but expected %v byte RFC 8032 seed", len(ed25519Key.PrivateKey), ed25519.SeedSize) + } + + return ed25519.NewKeyFromSeed(ed25519Key.PrivateKey), nil +} + +// ParsePKCS8PrivateKey parses an unencrypted private key in PKCS #8, ASN.1 DER form. +// +// This helper only supports RSA/PSS keys (with OID 1.2.840.113549.1.1.10). +// +// It returns a *rsa.PrivateKey, a *ecdsa.PrivateKey, or a ed25519.PrivateKey. +// More types might be supported in the future. +// +// This kind of key is commonly encoded in PEM blocks of type "PRIVATE KEY". +func ParsePKCS8RSAPSSPrivateKey(der []byte) (key interface{}, err error) { + var privKey pkcs8 + if _, err := asn1.Unmarshal(der, &privKey); err == nil { + switch { + case privKey.Algo.Algorithm.Equal(oidSignatureRSAPSS): + // Fall through; there's no parameters here unlike ECDSA + // containers, so we can go to parsing the inner rsaPrivateKey + // object. + default: + return nil, errors.New("keysutil: failed to parse key as RSA PSS private key") + } + } + + key, err = x509.ParsePKCS1PrivateKey(privKey.PrivateKey) + if err != nil { + return nil, fmt.Errorf("keysutil: failed to parse inner RSA PSS private key: %w", err) + } + + return key, nil +} diff --git a/sdk/helper/ldaputil/client.go b/sdk/helper/ldaputil/client.go new file mode 100644 index 0000000..a1901fd --- /dev/null +++ b/sdk/helper/ldaputil/client.go @@ -0,0 +1,752 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ldaputil + +import ( + "bytes" + "crypto/tls" + "crypto/x509" + "encoding/binary" + "encoding/hex" + "fmt" + "math" + "net" + "net/url" + "strings" + "sync" + "text/template" + "time" + + "github.com/go-ldap/ldap/v3" + hclog "github.com/hashicorp/go-hclog" + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-secure-stdlib/tlsutil" +) + +type Client struct { + Logger hclog.Logger + LDAP LDAP +} + +func (c *Client) DialLDAP(cfg *ConfigEntry) (Connection, error) { + var retErr *multierror.Error + var conn Connection + urls := strings.Split(cfg.Url, ",") + + for _, uut := range urls { + u, err := url.Parse(uut) + if err != nil { + retErr = multierror.Append(retErr, fmt.Errorf(fmt.Sprintf("error parsing url %q: {{err}}", uut), err)) + continue + } + host, port, err := net.SplitHostPort(u.Host) + if err != nil { + host = u.Host + } + + var tlsConfig *tls.Config + dialer := net.Dialer{ + Timeout: time.Duration(cfg.ConnectionTimeout) * time.Second, + } + + switch u.Scheme { + case "ldap": + if port == "" { + port = "389" + } + + fullAddr := fmt.Sprintf("%s://%s", u.Scheme, net.JoinHostPort(host, port)) + opt := ldap.DialWithDialer(&dialer) + + conn, err = c.LDAP.DialURL(fullAddr, opt) + if err != nil { + break + } + if conn == nil { + err = fmt.Errorf("empty connection after dialing") + break + } + if cfg.StartTLS { + tlsConfig, err = getTLSConfig(cfg, host) + if err != nil { + break + } + err = conn.StartTLS(tlsConfig) + } + case "ldaps": + if port == "" { + port = "636" + } + tlsConfig, err = getTLSConfig(cfg, host) + if err != nil { + break + } + + fullAddr := fmt.Sprintf("%s://%s", u.Scheme, net.JoinHostPort(host, port)) + opt := ldap.DialWithDialer(&dialer) + tls := ldap.DialWithTLSConfig(tlsConfig) + + conn, err = c.LDAP.DialURL(fullAddr, opt, tls) + if err != nil { + break + } + default: + retErr = multierror.Append(retErr, fmt.Errorf("invalid LDAP scheme in url %q", net.JoinHostPort(host, port))) + continue + } + if err == nil { + if retErr != nil { + if c.Logger.IsDebug() { + c.Logger.Debug("errors connecting to some hosts", "error", retErr.Error()) + } + } + retErr = nil + break + } + retErr = multierror.Append(retErr, fmt.Errorf(fmt.Sprintf("error connecting to host %q: {{err}}", uut), err)) + } + if retErr != nil { + return nil, retErr + } + if timeout := cfg.RequestTimeout; timeout > 0 { + conn.SetTimeout(time.Duration(timeout) * time.Second) + } + return conn, nil +} + +/* + * Searches for a username in the ldap server, returning a minimal subset of the + * user's attributes (if found) + */ +func (c *Client) makeLdapSearchRequest(cfg *ConfigEntry, conn Connection, username string) (*ldap.SearchResult, error) { + // Note: The logic below drives the logic in ConfigEntry.Validate(). + // If updated, please update there as well. + var err error + if cfg.BindPassword != "" { + err = conn.Bind(cfg.BindDN, cfg.BindPassword) + } else { + err = conn.UnauthenticatedBind(cfg.BindDN) + } + if err != nil { + return nil, fmt.Errorf("LDAP bind (service) failed: %w", err) + } + + renderedFilter, err := c.RenderUserSearchFilter(cfg, username) + if err != nil { + return nil, err + } + + if c.Logger.IsDebug() { + c.Logger.Debug("discovering user", "userdn", cfg.UserDN, "filter", renderedFilter) + } + ldapRequest := &ldap.SearchRequest{ + BaseDN: cfg.UserDN, + DerefAliases: ldapDerefAliasMap[cfg.DerefAliases], + Scope: ldap.ScopeWholeSubtree, + Filter: renderedFilter, + SizeLimit: 2, // Should be only 1 result. Any number larger (2 or more) means access denied. + Attributes: []string{ + cfg.UserAttr, // Return only needed attributes + }, + } + + result, err := conn.Search(ldapRequest) + if err != nil { + return nil, err + } + + return result, nil +} + +/* + * Discover and return the bind string for the user attempting to authenticate, as well as the + * value to use for the identity alias. + * This is handled in one of several ways: + * + * 1. If DiscoverDN is set, the user object will be searched for using userdn (base search path) + * and userattr (the attribute that maps to the provided username) or user search filter. + * The bind will either be anonymous or use binddn and bindpassword if they were provided. + * 2. If upndomain is set, the user dn and alias attribte are constructed as 'username@upndomain'. + * See https://msdn.microsoft.com/en-us/library/cc223499.aspx + * + */ +func (c *Client) GetUserBindDN(cfg *ConfigEntry, conn Connection, username string) (string, error) { + bindDN := "" + + // Note: The logic below drives the logic in ConfigEntry.Validate(). + // If updated, please update there as well. + if cfg.DiscoverDN || (cfg.BindDN != "" && cfg.BindPassword != "") { + + result, err := c.makeLdapSearchRequest(cfg, conn, username) + if err != nil { + return bindDN, fmt.Errorf("LDAP search for binddn failed %w", err) + } + if len(result.Entries) != 1 { + return bindDN, fmt.Errorf("LDAP search for binddn 0 or not unique") + } + + bindDN = result.Entries[0].DN + + } else { + if cfg.UPNDomain != "" { + bindDN = fmt.Sprintf("%s@%s", EscapeLDAPValue(username), cfg.UPNDomain) + } else { + bindDN = fmt.Sprintf("%s=%s,%s", cfg.UserAttr, EscapeLDAPValue(username), cfg.UserDN) + } + } + + return bindDN, nil +} + +func (c *Client) RenderUserSearchFilter(cfg *ConfigEntry, username string) (string, error) { + // The UserFilter can be blank if not set, or running this version of the code + // on an existing ldap configuration + if cfg.UserFilter == "" { + cfg.UserFilter = "({{.UserAttr}}={{.Username}})" + } + + // If userfilter was defined, resolve it as a Go template and use the query to + // find the login user + if c.Logger.IsDebug() { + c.Logger.Debug("compiling search filter", "search_filter", cfg.UserFilter) + } + + // Parse the configuration as a template. + // Example template "({{.UserAttr}}={{.Username}})" + t, err := template.New("queryTemplate").Parse(cfg.UserFilter) + if err != nil { + return "", fmt.Errorf("LDAP search failed due to template compilation error: %w", err) + } + + // Build context to pass to template - we will be exposing UserDn and Username. + context := struct { + UserAttr string + Username string + }{ + ldap.EscapeFilter(cfg.UserAttr), + ldap.EscapeFilter(username), + } + if cfg.UPNDomain != "" { + context.UserAttr = "userPrincipalName" + // Intentionally, calling EscapeFilter(...) (vs EscapeValue) since the + // username is being injected into a search filter. + // As an untrusted string, the username must be escaped according to RFC + // 4515, in order to prevent attackers from injecting characters that could modify the filter + context.Username = fmt.Sprintf("%s@%s", ldap.EscapeFilter(username), cfg.UPNDomain) + } + + // Execute the template. Note that the template context contains escaped input and does + // not provide behavior via functions. Additionally, no function map has been provided + // during template initialization. The only template functions available during execution + // are the predefined global functions: https://pkg.go.dev/text/template#hdr-Functions + var renderedFilter bytes.Buffer + if err := t.Execute(&renderedFilter, context); err != nil { + return "", fmt.Errorf("LDAP search failed due to template parsing error: %w", err) + } + + return renderedFilter.String(), nil +} + +/* + * Returns the value to be used for the entity alias of this user + * This is handled in one of several ways: + * + * 1. If DiscoverDN is set, the user will be searched for using userdn (base search path) + * and userattr (the attribute that maps to the provided username) or user search filter. + * The bind will either be anonymous or use binddn and bindpassword if they were provided. + * 2. If upndomain is set, the alias attribte is constructed as 'username@upndomain'. + * + */ +func (c *Client) GetUserAliasAttributeValue(cfg *ConfigEntry, conn Connection, username string) (string, error) { + aliasAttributeValue := "" + + // Note: The logic below drives the logic in ConfigEntry.Validate(). + // If updated, please update there as well. + if cfg.DiscoverDN || (cfg.BindDN != "" && cfg.BindPassword != "") { + + result, err := c.makeLdapSearchRequest(cfg, conn, username) + if err != nil { + return aliasAttributeValue, fmt.Errorf("LDAP search for entity alias attribute failed: %w", err) + } + if len(result.Entries) != 1 { + return aliasAttributeValue, fmt.Errorf("LDAP search for entity alias attribute 0 or not unique") + } + + if len(result.Entries[0].Attributes) != 1 { + return aliasAttributeValue, fmt.Errorf("LDAP attribute missing for entity alias mapping") + } + + if len(result.Entries[0].Attributes[0].Values) != 1 { + return aliasAttributeValue, fmt.Errorf("LDAP entity alias attribute %s empty or not unique for entity alias mapping", cfg.UserAttr) + } + + aliasAttributeValue = result.Entries[0].Attributes[0].Values[0] + } else { + if cfg.UPNDomain != "" { + aliasAttributeValue = fmt.Sprintf("%s@%s", EscapeLDAPValue(username), cfg.UPNDomain) + } else { + aliasAttributeValue = fmt.Sprintf("%s=%s,%s", cfg.UserAttr, EscapeLDAPValue(username), cfg.UserDN) + } + } + + return aliasAttributeValue, nil +} + +/* + * Returns the DN of the object representing the authenticated user. + */ +func (c *Client) GetUserDN(cfg *ConfigEntry, conn Connection, bindDN, username string) (string, error) { + userDN := "" + if cfg.UPNDomain != "" { + // Find the distinguished name for the user if userPrincipalName used for login + filter := fmt.Sprintf("(userPrincipalName=%s@%s)", EscapeLDAPValue(username), cfg.UPNDomain) + if c.Logger.IsDebug() { + c.Logger.Debug("searching upn", "userdn", cfg.UserDN, "filter", filter) + } + result, err := conn.Search(&ldap.SearchRequest{ + BaseDN: cfg.UserDN, + Scope: ldap.ScopeWholeSubtree, + DerefAliases: ldapDerefAliasMap[cfg.DerefAliases], + Filter: filter, + SizeLimit: math.MaxInt32, + }) + if err != nil { + return userDN, fmt.Errorf("LDAP search failed for detecting user: %w", err) + } + for _, e := range result.Entries { + userDN = e.DN + } + } else { + userDN = bindDN + } + + return userDN, nil +} + +func (c *Client) performLdapFilterGroupsSearch(cfg *ConfigEntry, conn Connection, userDN string, username string) ([]*ldap.Entry, error) { + if cfg.GroupFilter == "" { + c.Logger.Warn("groupfilter is empty, will not query server") + return make([]*ldap.Entry, 0), nil + } + + if cfg.GroupDN == "" { + c.Logger.Warn("groupdn is empty, will not query server") + return make([]*ldap.Entry, 0), nil + } + + // If groupfilter was defined, resolve it as a Go template and use the query for + // returning the user's groups + if c.Logger.IsDebug() { + c.Logger.Debug("compiling group filter", "group_filter", cfg.GroupFilter) + } + + // Parse the configuration as a template. + // Example template "(&(objectClass=group)(member:1.2.840.113556.1.4.1941:={{.UserDN}}))" + t, err := template.New("queryTemplate").Parse(cfg.GroupFilter) + if err != nil { + return nil, fmt.Errorf("LDAP search failed due to template compilation error: %w", err) + } + + // Build context to pass to template - we will be exposing UserDn and Username. + context := struct { + UserDN string + Username string + }{ + ldap.EscapeFilter(userDN), + ldap.EscapeFilter(username), + } + + var renderedQuery bytes.Buffer + if err := t.Execute(&renderedQuery, context); err != nil { + return nil, fmt.Errorf("LDAP search failed due to template parsing error: %w", err) + } + + if c.Logger.IsDebug() { + c.Logger.Debug("searching", "groupdn", cfg.GroupDN, "rendered_query", renderedQuery.String()) + } + + result, err := conn.Search(&ldap.SearchRequest{ + BaseDN: cfg.GroupDN, + Scope: ldap.ScopeWholeSubtree, + DerefAliases: ldapDerefAliasMap[cfg.DerefAliases], + Filter: renderedQuery.String(), + Attributes: []string{ + cfg.GroupAttr, + }, + SizeLimit: math.MaxInt32, + }) + if err != nil { + return nil, fmt.Errorf("LDAP search failed: %w", err) + } + + return result.Entries, nil +} + +func (c *Client) performLdapFilterGroupsSearchPaging(cfg *ConfigEntry, conn PagingConnection, userDN string, username string) ([]*ldap.Entry, error) { + if cfg.GroupFilter == "" { + c.Logger.Warn("groupfilter is empty, will not query server") + return make([]*ldap.Entry, 0), nil + } + + if cfg.GroupDN == "" { + c.Logger.Warn("groupdn is empty, will not query server") + return make([]*ldap.Entry, 0), nil + } + + // If groupfilter was defined, resolve it as a Go template and use the query for + // returning the user's groups + if c.Logger.IsDebug() { + c.Logger.Debug("compiling group filter", "group_filter", cfg.GroupFilter) + } + + // Parse the configuration as a template. + // Example template "(&(objectClass=group)(member:1.2.840.113556.1.4.1941:={{.UserDN}}))" + t, err := template.New("queryTemplate").Parse(cfg.GroupFilter) + if err != nil { + return nil, fmt.Errorf("LDAP search failed due to template compilation error: %w", err) + } + + // Build context to pass to template - we will be exposing UserDn and Username. + context := struct { + UserDN string + Username string + }{ + ldap.EscapeFilter(userDN), + ldap.EscapeFilter(username), + } + + // Execute the template. Note that the template context contains escaped input and does + // not provide behavior via functions. Additionally, no function map has been provided + // during template initialization. The only template functions available during execution + // are the predefined global functions: https://pkg.go.dev/text/template#hdr-Functions + var renderedQuery bytes.Buffer + if err := t.Execute(&renderedQuery, context); err != nil { + return nil, fmt.Errorf("LDAP search failed due to template parsing error: %w", err) + } + + if c.Logger.IsDebug() { + c.Logger.Debug("searching", "groupdn", cfg.GroupDN, "rendered_query", renderedQuery.String()) + } + + result, err := conn.SearchWithPaging(&ldap.SearchRequest{ + BaseDN: cfg.GroupDN, + Scope: ldap.ScopeWholeSubtree, + DerefAliases: ldapDerefAliasMap[cfg.DerefAliases], + Filter: renderedQuery.String(), + Attributes: []string{ + cfg.GroupAttr, + }, + SizeLimit: math.MaxInt32, + }, uint32(cfg.MaximumPageSize)) + if err != nil { + return nil, fmt.Errorf("LDAP search failed: %w", err) + } + + return result.Entries, nil +} + +func sidBytesToString(b []byte) (string, error) { + reader := bytes.NewReader(b) + + var revision, subAuthorityCount uint8 + var identifierAuthorityParts [3]uint16 + + if err := binary.Read(reader, binary.LittleEndian, &revision); err != nil { + return "", fmt.Errorf(fmt.Sprintf("SID %#v convert failed reading Revision: {{err}}", b), err) + } + + if err := binary.Read(reader, binary.LittleEndian, &subAuthorityCount); err != nil { + return "", fmt.Errorf(fmt.Sprintf("SID %#v convert failed reading SubAuthorityCount: {{err}}", b), err) + } + + if err := binary.Read(reader, binary.BigEndian, &identifierAuthorityParts); err != nil { + return "", fmt.Errorf(fmt.Sprintf("SID %#v convert failed reading IdentifierAuthority: {{err}}", b), err) + } + identifierAuthority := (uint64(identifierAuthorityParts[0]) << 32) + (uint64(identifierAuthorityParts[1]) << 16) + uint64(identifierAuthorityParts[2]) + + subAuthority := make([]uint32, subAuthorityCount) + if err := binary.Read(reader, binary.LittleEndian, &subAuthority); err != nil { + return "", fmt.Errorf(fmt.Sprintf("SID %#v convert failed reading SubAuthority: {{err}}", b), err) + } + + result := fmt.Sprintf("S-%d-%d", revision, identifierAuthority) + for _, subAuthorityPart := range subAuthority { + result += fmt.Sprintf("-%d", subAuthorityPart) + } + + return result, nil +} + +func (c *Client) performLdapTokenGroupsSearch(cfg *ConfigEntry, conn Connection, userDN string) ([]*ldap.Entry, error) { + var wg sync.WaitGroup + var lock sync.Mutex + taskChan := make(chan string) + maxWorkers := 10 + + result, err := conn.Search(&ldap.SearchRequest{ + BaseDN: userDN, + Scope: ldap.ScopeBaseObject, + DerefAliases: ldapDerefAliasMap[cfg.DerefAliases], + Filter: "(objectClass=*)", + Attributes: []string{ + "tokenGroups", + }, + SizeLimit: 1, + }) + if err != nil { + return nil, fmt.Errorf("LDAP search failed: %w", err) + } + if len(result.Entries) == 0 { + c.Logger.Warn("unable to read object for group attributes", "userdn", userDN, "groupattr", cfg.GroupAttr) + return make([]*ldap.Entry, 0), nil + } + + userEntry := result.Entries[0] + groupAttrValues := userEntry.GetRawAttributeValues("tokenGroups") + groupEntries := make([]*ldap.Entry, 0, len(groupAttrValues)) + + for i := 0; i < maxWorkers; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + for sid := range taskChan { + groupResult, err := conn.Search(&ldap.SearchRequest{ + BaseDN: fmt.Sprintf("", sid), + Scope: ldap.ScopeBaseObject, + DerefAliases: ldapDerefAliasMap[cfg.DerefAliases], + Filter: "(objectClass=*)", + Attributes: []string{ + "1.1", // RFC no attributes + }, + SizeLimit: 1, + }) + if err != nil { + c.Logger.Warn("unable to read the group sid", "sid", sid) + continue + } + + if len(groupResult.Entries) == 0 { + c.Logger.Warn("unable to find the group", "sid", sid) + continue + } + + lock.Lock() + groupEntries = append(groupEntries, groupResult.Entries[0]) + lock.Unlock() + } + }() + } + + for _, sidBytes := range groupAttrValues { + sidString, err := sidBytesToString(sidBytes) + if err != nil { + c.Logger.Warn("unable to read sid", "err", err) + continue + } + taskChan <- sidString + } + + close(taskChan) + wg.Wait() + + return groupEntries, nil +} + +/* + * getLdapGroups queries LDAP and returns a slice describing the set of groups the authenticated user is a member of. + * + * If cfg.UseTokenGroups is true then the search is performed directly on the userDN. + * The values of those attributes are converted to string SIDs, and then looked up to get ldap.Entry objects. + * Otherwise, the search query is constructed according to cfg.GroupFilter, and run in context of cfg.GroupDN. + * Groups will be resolved from the query results by following the attribute defined in cfg.GroupAttr. + * + * cfg.GroupFilter is a go template and is compiled with the following context: [UserDN, Username] + * UserDN - The DN of the authenticated user + * Username - The Username of the authenticated user + * + * Example: + * cfg.GroupFilter = "(&(objectClass=group)(member:1.2.840.113556.1.4.1941:={{.UserDN}}))" + * cfg.GroupDN = "OU=Groups,DC=myorg,DC=com" + * cfg.GroupAttr = "cn" + * + * NOTE - If cfg.GroupFilter is empty, no query is performed and an empty result slice is returned. + * + */ +func (c *Client) GetLdapGroups(cfg *ConfigEntry, conn Connection, userDN string, username string) ([]string, error) { + var entries []*ldap.Entry + var err error + if cfg.UseTokenGroups { + entries, err = c.performLdapTokenGroupsSearch(cfg, conn, userDN) + } else { + if paging, ok := conn.(PagingConnection); ok && cfg.MaximumPageSize > 0 { + entries, err = c.performLdapFilterGroupsSearchPaging(cfg, paging, userDN, username) + } else { + entries, err = c.performLdapFilterGroupsSearch(cfg, conn, userDN, username) + } + } + if err != nil { + return nil, err + } + + // retrieve the groups in a string/bool map as a structure to avoid duplicates inside + ldapMap := make(map[string]bool) + + for _, e := range entries { + dn, err := ldap.ParseDN(e.DN) + if err != nil || len(dn.RDNs) == 0 { + continue + } + + // Enumerate attributes of each result, parse out CN and add as group + values := e.GetAttributeValues(cfg.GroupAttr) + if len(values) > 0 { + for _, val := range values { + groupCN := getCN(cfg, val) + ldapMap[groupCN] = true + } + } else { + // If groupattr didn't resolve, use self (enumerating group objects) + groupCN := getCN(cfg, e.DN) + ldapMap[groupCN] = true + } + } + + ldapGroups := make([]string, 0, len(ldapMap)) + for key := range ldapMap { + ldapGroups = append(ldapGroups, key) + } + + return ldapGroups, nil +} + +// EscapeLDAPValue is exported because a plugin uses it outside this package. +// EscapeLDAPValue will properly escape the input string as an ldap value +// rfc4514 states the following must be escaped: +// - leading space or hash +// - trailing space +// - special characters '"', '+', ',', ';', '<', '>', '\\' +// - hex +func EscapeLDAPValue(input string) string { + if input == "" { + return "" + } + + buf := bytes.Buffer{} + + escFn := func(c byte) { + buf.WriteByte('\\') + buf.WriteByte(c) + } + + inputLen := len(input) + for i := 0; i < inputLen; i++ { + char := input[i] + switch { + case i == 0 && char == ' ' || char == '#': + // leading space or hash. + escFn(char) + continue + case i == inputLen-1 && char == ' ': + // trailing space. + escFn(char) + continue + case specialChar(char): + escFn(char) + continue + case char < ' ' || char > '~': + // anything that's not between the ascii space and tilde must be hex + buf.WriteByte('\\') + buf.WriteString(hex.EncodeToString([]byte{char})) + continue + default: + // everything remaining, doesn't need to be escaped + buf.WriteByte(char) + } + } + return buf.String() +} + +func specialChar(char byte) bool { + switch char { + case '"', '+', ',', ';', '<', '>', '\\': + return true + default: + return false + } +} + +/* + * Parses a distinguished name and returns the CN portion. + * Given a non-conforming string (such as an already-extracted CN), + * it will be returned as-is. + */ +func getCN(cfg *ConfigEntry, dn string) string { + parsedDN, err := ldap.ParseDN(dn) + if err != nil || len(parsedDN.RDNs) == 0 { + // It was already a CN, return as-is + return dn + } + + for _, rdn := range parsedDN.RDNs { + for _, rdnAttr := range rdn.Attributes { + if cfg.UsePre111GroupCNBehavior == nil || *cfg.UsePre111GroupCNBehavior { + if rdnAttr.Type == "CN" { + return rdnAttr.Value + } + } else { + if strings.EqualFold(rdnAttr.Type, "CN") { + return rdnAttr.Value + } + } + } + } + + // Default, return self + return dn +} + +func getTLSConfig(cfg *ConfigEntry, host string) (*tls.Config, error) { + tlsConfig := &tls.Config{ + ServerName: host, + } + + if cfg.TLSMinVersion != "" { + tlsMinVersion, ok := tlsutil.TLSLookup[cfg.TLSMinVersion] + if !ok { + return nil, fmt.Errorf("invalid 'tls_min_version' in config") + } + tlsConfig.MinVersion = tlsMinVersion + } + + if cfg.TLSMaxVersion != "" { + tlsMaxVersion, ok := tlsutil.TLSLookup[cfg.TLSMaxVersion] + if !ok { + return nil, fmt.Errorf("invalid 'tls_max_version' in config") + } + tlsConfig.MaxVersion = tlsMaxVersion + } + + if cfg.InsecureTLS { + tlsConfig.InsecureSkipVerify = true + } + if cfg.Certificate != "" { + caPool := x509.NewCertPool() + ok := caPool.AppendCertsFromPEM([]byte(cfg.Certificate)) + if !ok { + return nil, fmt.Errorf("could not append CA certificate") + } + tlsConfig.RootCAs = caPool + } + if cfg.ClientTLSCert != "" && cfg.ClientTLSKey != "" { + certificate, err := tls.X509KeyPair([]byte(cfg.ClientTLSCert), []byte(cfg.ClientTLSKey)) + if err != nil { + return nil, fmt.Errorf("failed to parse client X509 key pair: %w", err) + } + tlsConfig.Certificates = append(tlsConfig.Certificates, certificate) + } else if cfg.ClientTLSCert != "" || cfg.ClientTLSKey != "" { + return nil, fmt.Errorf("both client_tls_cert and client_tls_key must be set") + } + return tlsConfig, nil +} diff --git a/sdk/helper/ldaputil/client_test.go b/sdk/helper/ldaputil/client_test.go new file mode 100644 index 0000000..dcce9c6 --- /dev/null +++ b/sdk/helper/ldaputil/client_test.go @@ -0,0 +1,152 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ldaputil + +import ( + "testing" + + "github.com/hashicorp/go-hclog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// TestDialLDAP duplicates a potential panic that was +// present in the previous version of TestDialLDAP, +// then confirms its fix by passing. +func TestDialLDAP(t *testing.T) { + ldapClient := Client{ + Logger: hclog.NewNullLogger(), + LDAP: NewLDAP(), + } + + ce := &ConfigEntry{ + Url: "ldap://localhost:384654786", + RequestTimeout: 3, + } + if _, err := ldapClient.DialLDAP(ce); err == nil { + t.Fatal("expected error") + } +} + +func TestLDAPEscape(t *testing.T) { + testcases := map[string]string{ + "#test": "\\#test", + "test,hello": "test\\,hello", + "test,hel+lo": "test\\,hel\\+lo", + "test\\hello": "test\\\\hello", + " test ": "\\ test \\ ", + "": "", + `\`: `\\`, + "trailing\000": `trailing\00`, + "mid\000dle": `mid\00dle`, + "\000": `\00`, + "multiple\000\000": `multiple\00\00`, + "backlash-before-null\\\000": `backlash-before-null\\\00`, + "trailing\\": `trailing\\`, + "double-escaping\\>": `double-escaping\\\>`, + } + + for test, answer := range testcases { + res := EscapeLDAPValue(test) + if res != answer { + t.Errorf("Failed to escape %s: %s != %s\n", test, res, answer) + } + } +} + +func TestGetTLSConfigs(t *testing.T) { + config := testConfig(t) + if err := config.Validate(); err != nil { + t.Fatal(err) + } + tlsConfig, err := getTLSConfig(config, "138.91.247.105") + if err != nil { + t.Fatal(err) + } + if tlsConfig == nil { + t.Fatal("expected 1 TLS config because there's 1 url") + } + if tlsConfig.InsecureSkipVerify { + t.Fatal("InsecureSkipVerify should be false because we should default to the most secure connection") + } + if tlsConfig.ServerName != "138.91.247.105" { + t.Fatalf("expected ServerName of \"138.91.247.105\" but received %q", tlsConfig.ServerName) + } + expected := uint16(771) + if tlsConfig.MinVersion != expected || tlsConfig.MaxVersion != expected { + t.Fatal("expected TLS min and max version of 771 which corresponds with TLS 1.2 since TLS 1.1 and 1.0 have known vulnerabilities") + } +} + +func TestSIDBytesToString(t *testing.T) { + testcases := map[string][]byte{ + "S-1-5-21-2127521184-1604012920-1887927527-72713": {0x01, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x15, 0x00, 0x00, 0x00, 0xA0, 0x65, 0xCF, 0x7E, 0x78, 0x4B, 0x9B, 0x5F, 0xE7, 0x7C, 0x87, 0x70, 0x09, 0x1C, 0x01, 0x00}, + "S-1-1-0": {0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00}, + "S-1-5": {0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05}, + } + + for answer, test := range testcases { + res, err := sidBytesToString(test) + if err != nil { + t.Errorf("Failed to conver %#v: %s", test, err) + } else if answer != res { + t.Errorf("Failed to convert %#v: %s != %s", test, res, answer) + } + } +} + +func TestClient_renderUserSearchFilter(t *testing.T) { + t.Parallel() + tests := []struct { + name string + conf *ConfigEntry + username string + want string + errContains string + }{ + { + name: "valid-default", + username: "alice", + conf: &ConfigEntry{ + UserAttr: "cn", + }, + want: "(cn=alice)", + }, + { + name: "escaped-malicious-filter", + username: "foo@example.com)((((((((((((((((((((((((((((((((((((((userPrincipalName=foo", + conf: &ConfigEntry{ + UPNDomain: "example.com", + UserFilter: "(&({{.UserAttr}}={{.Username}})({{.UserAttr}}=admin@example.com))", + }, + want: "(&(userPrincipalName=foo@example.com\\29\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28\\28userPrincipalName=foo@example.com)(userPrincipalName=admin@example.com))", + }, + { + name: "bad-filter-unclosed-action", + username: "alice", + conf: &ConfigEntry{ + UserFilter: "hello{{range", + }, + errContains: "search failed due to template compilation error", + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + c := Client{ + Logger: hclog.NewNullLogger(), + LDAP: NewLDAP(), + } + + f, err := c.RenderUserSearchFilter(tc.conf, tc.username) + if tc.errContains != "" { + require.Error(t, err) + assert.ErrorContains(t, err, tc.errContains) + return + } + require.NoError(t, err) + assert.NotEmpty(t, f) + assert.Equal(t, tc.want, f) + }) + } +} diff --git a/sdk/helper/ldaputil/config.go b/sdk/helper/ldaputil/config.go new file mode 100644 index 0000000..dfa34da --- /dev/null +++ b/sdk/helper/ldaputil/config.go @@ -0,0 +1,561 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ldaputil + +import ( + "crypto/tls" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "strings" + "text/template" + + "github.com/hashicorp/go-secure-stdlib/tlsutil" + "github.com/hashicorp/vault/sdk/framework" + + "github.com/hashicorp/errwrap" + + "github.com/go-ldap/ldap/v3" +) + +var ldapDerefAliasMap = map[string]int{ + "never": ldap.NeverDerefAliases, + "finding": ldap.DerefFindingBaseObj, + "searching": ldap.DerefInSearching, + "always": ldap.DerefAlways, +} + +// ConfigFields returns all the config fields that can potentially be used by the LDAP client. +// Not all fields will be used by every integration. +func ConfigFields() map[string]*framework.FieldSchema { + return map[string]*framework.FieldSchema{ + "anonymous_group_search": { + Type: framework.TypeBool, + Default: false, + Description: "Use anonymous binds when performing LDAP group searches (if true the initial credentials will still be used for the initial connection test).", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Anonymous group search", + }, + }, + "url": { + Type: framework.TypeString, + Default: "ldap://127.0.0.1", + Description: "LDAP URL to connect to (default: ldap://127.0.0.1). Multiple URLs can be specified by concatenating them with commas; they will be tried in-order.", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "URL", + }, + }, + + "userdn": { + Type: framework.TypeString, + Description: "LDAP domain to use for users (eg: ou=People,dc=example,dc=org)", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "User DN", + }, + }, + + "binddn": { + Type: framework.TypeString, + Description: "LDAP DN for searching for the user DN (optional)", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Name of Object to bind (binddn)", + }, + }, + + "bindpass": { + Type: framework.TypeString, + Description: "LDAP password for searching for the user DN (optional)", + DisplayAttrs: &framework.DisplayAttributes{ + Sensitive: true, + }, + }, + + "groupdn": { + Type: framework.TypeString, + Description: "LDAP search base to use for group membership search (eg: ou=Groups,dc=example,dc=org)", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Group DN", + }, + }, + + "groupfilter": { + Type: framework.TypeString, + Default: "(|(memberUid={{.Username}})(member={{.UserDN}})(uniqueMember={{.UserDN}}))", + Description: `Go template for querying group membership of user (optional) +The template can access the following context variables: UserDN, Username +Example: (&(objectClass=group)(member:1.2.840.113556.1.4.1941:={{.UserDN}})) +Default: (|(memberUid={{.Username}})(member={{.UserDN}})(uniqueMember={{.UserDN}}))`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Group Filter", + }, + }, + + "groupattr": { + Type: framework.TypeString, + Default: "cn", + Description: `LDAP attribute to follow on objects returned by +in order to enumerate user group membership. +Examples: "cn" or "memberOf", etc. +Default: cn`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Group Attribute", + Value: "cn", + }, + }, + + "userfilter": { + Type: framework.TypeString, + Default: "({{.UserAttr}}={{.Username}})", + Description: `Go template for LDAP user search filer (optional) +The template can access the following context variables: UserAttr, Username +Default: ({{.UserAttr}}={{.Username}})`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "User Search Filter", + }, + }, + + "upndomain": { + Type: framework.TypeString, + Description: "Enables userPrincipalDomain login with [username]@UPNDomain (optional)", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "User Principal (UPN) Domain", + }, + }, + + "username_as_alias": { + Type: framework.TypeBool, + Default: false, + Description: "If true, sets the alias name to the username", + }, + + "userattr": { + Type: framework.TypeString, + Default: "cn", + Description: "Attribute used for users (default: cn)", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "User Attribute", + Value: "cn", + }, + }, + + "certificate": { + Type: framework.TypeString, + Description: "CA certificate to use when verifying LDAP server certificate, must be x509 PEM encoded (optional)", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "CA certificate", + EditType: "file", + }, + }, + + "client_tls_cert": { + Type: framework.TypeString, + Description: "Client certificate to provide to the LDAP server, must be x509 PEM encoded (optional)", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Client certificate", + EditType: "file", + }, + }, + + "client_tls_key": { + Type: framework.TypeString, + Description: "Client certificate key to provide to the LDAP server, must be x509 PEM encoded (optional)", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Client key", + EditType: "file", + }, + }, + + "discoverdn": { + Type: framework.TypeBool, + Description: "Use anonymous bind to discover the bind DN of a user (optional)", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Discover DN", + }, + }, + + "insecure_tls": { + Type: framework.TypeBool, + Description: "Skip LDAP server SSL Certificate verification - VERY insecure (optional)", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Insecure TLS", + }, + }, + + "starttls": { + Type: framework.TypeBool, + Description: "Issue a StartTLS command after establishing unencrypted connection (optional)", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Issue StartTLS", + }, + }, + + "tls_min_version": { + Type: framework.TypeString, + Default: "tls12", + Description: "Minimum TLS version to use. Accepted values are 'tls10', 'tls11', 'tls12' or 'tls13'. Defaults to 'tls12'", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Minimum TLS Version", + }, + AllowedValues: []interface{}{"tls10", "tls11", "tls12", "tls13"}, + }, + + "tls_max_version": { + Type: framework.TypeString, + Default: "tls12", + Description: "Maximum TLS version to use. Accepted values are 'tls10', 'tls11', 'tls12' or 'tls13'. Defaults to 'tls12'", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Maximum TLS Version", + }, + AllowedValues: []interface{}{"tls10", "tls11", "tls12", "tls13"}, + }, + + "deny_null_bind": { + Type: framework.TypeBool, + Default: true, + Description: "Denies an unauthenticated LDAP bind request if the user's password is empty; defaults to true", + }, + + "case_sensitive_names": { + Type: framework.TypeBool, + Description: "If true, case sensitivity will be used when comparing usernames and groups for matching policies.", + }, + + "use_token_groups": { + Type: framework.TypeBool, + Default: false, + Description: "If true, use the Active Directory tokenGroups constructed attribute of the user to find the group memberships. This will find all security groups including nested ones.", + }, + + "use_pre111_group_cn_behavior": { + Type: framework.TypeBool, + Description: "In Vault 1.1.1 a fix for handling group CN values of different cases unfortunately introduced a regression that could cause previously defined groups to not be found due to a change in the resulting name. If set true, the pre-1.1.1 behavior for matching group CNs will be used. This is only needed in some upgrade scenarios for backwards compatibility. It is enabled by default if the config is upgraded but disabled by default on new configurations.", + }, + + "request_timeout": { + Type: framework.TypeDurationSecond, + Description: "Timeout, in seconds, for the connection when making requests against the server before returning back an error.", + Default: "90s", + }, + + "connection_timeout": { + Type: framework.TypeDurationSecond, + Description: "Timeout, in seconds, when attempting to connect to the LDAP server before trying the next URL in the configuration.", + Default: "30s", + }, + + "dereference_aliases": { + Type: framework.TypeString, + Description: "When aliases should be dereferenced on search operations. Accepted values are 'never', 'finding', 'searching', 'always'. Defaults to 'never'.", + Default: "never", + AllowedValues: []interface{}{"never", "finding", "searching", "always"}, + }, + + "max_page_size": { + Type: framework.TypeInt, + Description: "If set to a value greater than 0, the LDAP backend will use the LDAP server's paged search control to request pages of up to the given size. This can be used to avoid hitting the LDAP server's maximum result size limit. Otherwise, the LDAP backend will not use the paged search control.", + Default: 0, + }, + } +} + +/* + * Creates and initializes a ConfigEntry object with its default values, + * as specified by the passed schema. + */ +func NewConfigEntry(existing *ConfigEntry, d *framework.FieldData) (*ConfigEntry, error) { + var hadExisting bool + var cfg *ConfigEntry + + if existing != nil { + cfg = existing + hadExisting = true + } else { + cfg = new(ConfigEntry) + } + + if _, ok := d.Raw["anonymous_group_search"]; ok || !hadExisting { + cfg.AnonymousGroupSearch = d.Get("anonymous_group_search").(bool) + } + + if _, ok := d.Raw["username_as_alias"]; ok || !hadExisting { + cfg.UsernameAsAlias = d.Get("username_as_alias").(bool) + } + + if _, ok := d.Raw["url"]; ok || !hadExisting { + cfg.Url = strings.ToLower(d.Get("url").(string)) + } + + if _, ok := d.Raw["userfilter"]; ok || !hadExisting { + userfilter := d.Get("userfilter").(string) + if userfilter != "" { + // Validate the template before proceeding + _, err := template.New("queryTemplate").Parse(userfilter) + if err != nil { + return nil, errwrap.Wrapf("invalid userfilter: {{err}}", err) + } + } + + cfg.UserFilter = userfilter + } + + if _, ok := d.Raw["userattr"]; ok || !hadExisting { + cfg.UserAttr = strings.ToLower(d.Get("userattr").(string)) + } + + if _, ok := d.Raw["userdn"]; ok || !hadExisting { + cfg.UserDN = d.Get("userdn").(string) + } + + if _, ok := d.Raw["groupdn"]; ok || !hadExisting { + cfg.GroupDN = d.Get("groupdn").(string) + } + + if _, ok := d.Raw["groupfilter"]; ok || !hadExisting { + groupfilter := d.Get("groupfilter").(string) + if groupfilter != "" { + // Validate the template before proceeding + _, err := template.New("queryTemplate").Parse(groupfilter) + if err != nil { + return nil, errwrap.Wrapf("invalid groupfilter: {{err}}", err) + } + } + + cfg.GroupFilter = groupfilter + } + + if _, ok := d.Raw["groupattr"]; ok || !hadExisting { + cfg.GroupAttr = d.Get("groupattr").(string) + } + + if _, ok := d.Raw["upndomain"]; ok || !hadExisting { + cfg.UPNDomain = d.Get("upndomain").(string) + } + + if _, ok := d.Raw["certificate"]; ok || !hadExisting { + certificate := d.Get("certificate").(string) + if certificate != "" { + if err := validateCertificate([]byte(certificate)); err != nil { + return nil, errwrap.Wrapf("failed to parse server tls cert: {{err}}", err) + } + } + cfg.Certificate = certificate + } + + if _, ok := d.Raw["client_tls_cert"]; ok || !hadExisting { + clientTLSCert := d.Get("client_tls_cert").(string) + cfg.ClientTLSCert = clientTLSCert + } + + if _, ok := d.Raw["client_tls_key"]; ok || !hadExisting { + clientTLSKey := d.Get("client_tls_key").(string) + cfg.ClientTLSKey = clientTLSKey + } + + if cfg.ClientTLSCert != "" && cfg.ClientTLSKey != "" { + if _, err := tls.X509KeyPair([]byte(cfg.ClientTLSCert), []byte(cfg.ClientTLSKey)); err != nil { + return nil, errwrap.Wrapf("failed to parse client X509 key pair: {{err}}", err) + } + } else if cfg.ClientTLSCert != "" || cfg.ClientTLSKey != "" { + return nil, fmt.Errorf("both client_tls_cert and client_tls_key must be set") + } + + if _, ok := d.Raw["insecure_tls"]; ok || !hadExisting { + cfg.InsecureTLS = d.Get("insecure_tls").(bool) + } + + if _, ok := d.Raw["tls_min_version"]; ok || !hadExisting { + cfg.TLSMinVersion = d.Get("tls_min_version").(string) + _, ok = tlsutil.TLSLookup[cfg.TLSMinVersion] + if !ok { + return nil, errors.New("invalid 'tls_min_version'") + } + } + + if _, ok := d.Raw["tls_max_version"]; ok || !hadExisting { + cfg.TLSMaxVersion = d.Get("tls_max_version").(string) + _, ok = tlsutil.TLSLookup[cfg.TLSMaxVersion] + if !ok { + return nil, fmt.Errorf("invalid 'tls_max_version'") + } + } + if cfg.TLSMaxVersion < cfg.TLSMinVersion { + return nil, fmt.Errorf("'tls_max_version' must be greater than or equal to 'tls_min_version'") + } + + if _, ok := d.Raw["starttls"]; ok || !hadExisting { + cfg.StartTLS = d.Get("starttls").(bool) + } + + if _, ok := d.Raw["binddn"]; ok || !hadExisting { + cfg.BindDN = d.Get("binddn").(string) + } + + if _, ok := d.Raw["bindpass"]; ok || !hadExisting { + cfg.BindPassword = d.Get("bindpass").(string) + } + + if _, ok := d.Raw["deny_null_bind"]; ok || !hadExisting { + cfg.DenyNullBind = d.Get("deny_null_bind").(bool) + } + + if _, ok := d.Raw["discoverdn"]; ok || !hadExisting { + cfg.DiscoverDN = d.Get("discoverdn").(bool) + } + + if _, ok := d.Raw["case_sensitive_names"]; ok || !hadExisting { + cfg.CaseSensitiveNames = new(bool) + *cfg.CaseSensitiveNames = d.Get("case_sensitive_names").(bool) + } + + usePre111GroupCNBehavior, ok := d.GetOk("use_pre111_group_cn_behavior") + if ok { + cfg.UsePre111GroupCNBehavior = new(bool) + *cfg.UsePre111GroupCNBehavior = usePre111GroupCNBehavior.(bool) + } + + if _, ok := d.Raw["use_token_groups"]; ok || !hadExisting { + cfg.UseTokenGroups = d.Get("use_token_groups").(bool) + } + + if _, ok := d.Raw["request_timeout"]; ok || !hadExisting { + cfg.RequestTimeout = d.Get("request_timeout").(int) + } + + if _, ok := d.Raw["connection_timeout"]; ok || !hadExisting { + cfg.ConnectionTimeout = d.Get("connection_timeout").(int) + } + + if _, ok := d.Raw["dereference_aliases"]; ok || !hadExisting { + cfg.DerefAliases = d.Get("dereference_aliases").(string) + } + + if _, ok := d.Raw["max_page_size"]; ok || !hadExisting { + cfg.MaximumPageSize = d.Get("max_page_size").(int) + } + + return cfg, nil +} + +type ConfigEntry struct { + Url string `json:"url"` + UserDN string `json:"userdn"` + AnonymousGroupSearch bool `json:"anonymous_group_search"` + GroupDN string `json:"groupdn"` + GroupFilter string `json:"groupfilter"` + GroupAttr string `json:"groupattr"` + UPNDomain string `json:"upndomain"` + UsernameAsAlias bool `json:"username_as_alias"` + UserFilter string `json:"userfilter"` + UserAttr string `json:"userattr"` + Certificate string `json:"certificate"` + InsecureTLS bool `json:"insecure_tls"` + StartTLS bool `json:"starttls"` + BindDN string `json:"binddn"` + BindPassword string `json:"bindpass"` + DenyNullBind bool `json:"deny_null_bind"` + DiscoverDN bool `json:"discoverdn"` + TLSMinVersion string `json:"tls_min_version"` + TLSMaxVersion string `json:"tls_max_version"` + UseTokenGroups bool `json:"use_token_groups"` + UsePre111GroupCNBehavior *bool `json:"use_pre111_group_cn_behavior"` + RequestTimeout int `json:"request_timeout"` + ConnectionTimeout int `json:"connection_timeout"` + DerefAliases string `json:"dereference_aliases"` + MaximumPageSize int `json:"max_page_size"` + + // These json tags deviate from snake case because there was a past issue + // where the tag was being ignored, causing it to be jsonified as "CaseSensitiveNames", etc. + // To continue reading in users' previously stored values, + // we chose to carry that forward. + CaseSensitiveNames *bool `json:"CaseSensitiveNames,omitempty"` + ClientTLSCert string `json:"ClientTLSCert"` + ClientTLSKey string `json:"ClientTLSKey"` +} + +func (c *ConfigEntry) Map() map[string]interface{} { + m := c.PasswordlessMap() + m["bindpass"] = c.BindPassword + return m +} + +func (c *ConfigEntry) PasswordlessMap() map[string]interface{} { + m := map[string]interface{}{ + "url": c.Url, + "userdn": c.UserDN, + "groupdn": c.GroupDN, + "groupfilter": c.GroupFilter, + "groupattr": c.GroupAttr, + "userfilter": c.UserFilter, + "upndomain": c.UPNDomain, + "userattr": c.UserAttr, + "certificate": c.Certificate, + "insecure_tls": c.InsecureTLS, + "starttls": c.StartTLS, + "binddn": c.BindDN, + "deny_null_bind": c.DenyNullBind, + "discoverdn": c.DiscoverDN, + "tls_min_version": c.TLSMinVersion, + "tls_max_version": c.TLSMaxVersion, + "use_token_groups": c.UseTokenGroups, + "anonymous_group_search": c.AnonymousGroupSearch, + "request_timeout": c.RequestTimeout, + "connection_timeout": c.ConnectionTimeout, + "username_as_alias": c.UsernameAsAlias, + "dereference_aliases": c.DerefAliases, + "max_page_size": c.MaximumPageSize, + } + if c.CaseSensitiveNames != nil { + m["case_sensitive_names"] = *c.CaseSensitiveNames + } + if c.UsePre111GroupCNBehavior != nil { + m["use_pre111_group_cn_behavior"] = *c.UsePre111GroupCNBehavior + } + return m +} + +func validateCertificate(pemBlock []byte) error { + block, _ := pem.Decode([]byte(pemBlock)) + if block == nil || block.Type != "CERTIFICATE" { + return errors.New("failed to decode PEM block in the certificate") + } + _, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return fmt.Errorf("failed to parse certificate %s", err.Error()) + } + return nil +} + +func (c *ConfigEntry) Validate() error { + if len(c.Url) == 0 { + return errors.New("at least one url must be provided") + } + // Note: This logic is driven by the logic in GetUserBindDN. + // If updating this, please also update the logic there. + if !c.DiscoverDN && (c.BindDN == "" || c.BindPassword == "") && c.UPNDomain == "" && c.UserDN == "" { + return errors.New("cannot derive UserBindDN") + } + tlsMinVersion, ok := tlsutil.TLSLookup[c.TLSMinVersion] + if !ok { + return errors.New("invalid 'tls_min_version' in config") + } + tlsMaxVersion, ok := tlsutil.TLSLookup[c.TLSMaxVersion] + if !ok { + return errors.New("invalid 'tls_max_version' in config") + } + if tlsMaxVersion < tlsMinVersion { + return errors.New("'tls_max_version' must be greater than or equal to 'tls_min_version'") + } + if c.Certificate != "" { + if err := validateCertificate([]byte(c.Certificate)); err != nil { + return errwrap.Wrapf("failed to parse server tls cert: {{err}}", err) + } + } + if c.ClientTLSCert != "" && c.ClientTLSKey != "" { + if _, err := tls.X509KeyPair([]byte(c.ClientTLSCert), []byte(c.ClientTLSKey)); err != nil { + return errwrap.Wrapf("failed to parse client X509 key pair: {{err}}", err) + } + } + return nil +} diff --git a/sdk/helper/ldaputil/config_test.go b/sdk/helper/ldaputil/config_test.go new file mode 100644 index 0000000..b7fd22c --- /dev/null +++ b/sdk/helper/ldaputil/config_test.go @@ -0,0 +1,183 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ldaputil + +import ( + "encoding/json" + "testing" + + "github.com/go-test/deep" + "github.com/hashicorp/vault/sdk/framework" +) + +func TestCertificateValidation(t *testing.T) { + // certificate should default to "" without error if it doesn't exist + config := testConfig(t) + if err := config.Validate(); err != nil { + t.Fatal(err) + } + if config.Certificate != "" { + t.Fatalf("expected no certificate but received %s", config.Certificate) + } + + // certificate should cause an error if a bad one is provided + config.Certificate = "cats" + if err := config.Validate(); err == nil { + t.Fatal("should err due to bad cert") + } + + // valid certificates should pass inspection + config.Certificate = validCertificate + if err := config.Validate(); err != nil { + t.Fatal(err) + } +} + +func TestNewConfigEntry(t *testing.T) { + s := &framework.FieldData{Schema: ConfigFields()} + config, err := NewConfigEntry(nil, s) + if err != nil { + t.Fatal("error getting default config") + } + configFromJSON := testJSONConfig(t, jsonConfigDefault) + + t.Run("equality_check", func(t *testing.T) { + if diff := deep.Equal(config, configFromJSON); len(diff) > 0 { + t.Fatalf("bad, diff: %#v", diff) + } + }) +} + +func TestConfig(t *testing.T) { + config := testConfig(t) + configFromJSON := testJSONConfig(t, jsonConfig) + + t.Run("equality_check", func(t *testing.T) { + if diff := deep.Equal(config, configFromJSON); len(diff) > 0 { + t.Fatalf("bad, diff: %#v", diff) + } + }) + + t.Run("default_use_token_groups", func(t *testing.T) { + if config.UseTokenGroups { + t.Errorf("expected false UseTokenGroups but got %t", config.UseTokenGroups) + } + + if configFromJSON.UseTokenGroups { + t.Errorf("expected false UseTokenGroups from JSON but got %t", configFromJSON.UseTokenGroups) + } + }) +} + +func testConfig(t *testing.T) *ConfigEntry { + t.Helper() + + return &ConfigEntry{ + Url: "ldap://138.91.247.105", + UserDN: "example,com", + BindDN: "kitty", + BindPassword: "cats", + TLSMaxVersion: "tls12", + TLSMinVersion: "tls12", + RequestTimeout: 30, + ConnectionTimeout: 15, + ClientTLSCert: "", + ClientTLSKey: "", + } +} + +func testJSONConfig(t *testing.T, rawJson []byte) *ConfigEntry { + t.Helper() + + config := new(ConfigEntry) + if err := json.Unmarshal(rawJson, config); err != nil { + t.Fatal(err) + } + return config +} + +const validCertificate = ` +-----BEGIN CERTIFICATE----- +MIIF7zCCA9egAwIBAgIJAOY2qjn64Qq5MA0GCSqGSIb3DQEBCwUAMIGNMQswCQYD +VQQGEwJVUzEQMA4GA1UECAwHTm93aGVyZTERMA8GA1UEBwwIVGltYnVrdHUxEjAQ +BgNVBAoMCVRlc3QgRmFrZTENMAsGA1UECwwETm9uZTEPMA0GA1UEAwwGTm9ib2R5 +MSUwIwYJKoZIhvcNAQkBFhZkb25vdHRydXN0QG5vd2hlcmUuY29tMB4XDTE4MDQw +MzIwNDQwOFoXDTE5MDQwMzIwNDQwOFowgY0xCzAJBgNVBAYTAlVTMRAwDgYDVQQI +DAdOb3doZXJlMREwDwYDVQQHDAhUaW1idWt0dTESMBAGA1UECgwJVGVzdCBGYWtl +MQ0wCwYDVQQLDAROb25lMQ8wDQYDVQQDDAZOb2JvZHkxJTAjBgkqhkiG9w0BCQEW +FmRvbm90dHJ1c3RAbm93aGVyZS5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +ggIKAoICAQDzQPGErqjaoFcuUV6QFpSMU6w8wO8F0othik+rrlKERmrGonUGsoum +WqRe6L4ZnxBvCKB6EWjvf894TXOF2cpUnjDAyBePISyPkRBEJS6VS2SEC4AJzmVu +a+P+fZr4Hf7/bEcUr7Ax37yGVZ5i5ByNHgZkBlPxKiGWSmAqIDRZLp9gbu2EkG9q +NOjNLPU+QI2ov6U/laGS1vbE2LahTYeT5yscu9LpllxzFv4lM1f4wYEaM3HuOxzT +l86cGmEr9Q2N4PZ2T0O/s6D4but7c6Bz2XPXy9nWb5bqu0n5bJEpbRFrkryW1ozh +L9uVVz4dyW10pFBJtE42bqA4PRCDQsUof7UfsQF11D1ThrDfKsQa8PxrYdGUHUG9 +GFF1MdTTwaoT90RI582p+6XYV+LNlXcdfyNZO9bMThu9fnCvT7Ey0TKU4MfPrlfT +aIhZmyaHt6mL5p881UPDIvy7paTLgL+C1orLjZAiT//c4Zn+0qG0//Cirxr020UF +3YiEFk2H0bBVwOHoOGw4w5HrvLdyy0ZLDSPQbzkSZ0RusHb5TjiyhtTk/h9vvJv7 +u1fKJub4MzgrBRi16ejFdiWoVuMXRC6fu/ERy3+9DH6LURerbPrdroYypUmTe9N6 +XPeaF1Tc+WO7O/yW96mV7X/D211qjkOtwboZC5kjogVbaZgGzjHCVwIDAQABo1Aw +TjAdBgNVHQ4EFgQU2zWT3HeiMBzusz7AggVqVEL5g0UwHwYDVR0jBBgwFoAU2zWT +3HeiMBzusz7AggVqVEL5g0UwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOC +AgEAwTGcppY86mNRE43uOimeApTfqHJv+lGDTjEoJCZZmzmtxFe6O9+Vk4bH/8/i +gVQvqzBpaWXRt9OhqlFMK7OkX4ZvqXmnShmxib1dz1XxGhbwSec9ca8bill59Jqa +bIOq2SXVMcFD0GwFxfJRBVzHHuB6AwV9B2QN61zeB1oxNGJrUOo80jVkB7+MWMyD +bQqiFCHWGMa6BG4N91KGOTveZCGdBvvVw5j6lt731KjbvL2hB1UHioucOweKLfa4 +QWDImTEjgV68699wKERNL0DCpeD7PcP/L3SY2RJzdyC1CSR7O8yU4lQK7uZGusgB +Mgup+yUaSjxasIqYMebNDDocr5kdwG0+2r2gQdRwc5zLX6YDBn6NLSWjRnY04ZuK +P1cF68rWteWpzJu8bmkJ5r2cqskqrnVK+zz8xMQyEaj548Bnt51ARLHOftR9jkSU +NJWh7zOLZ1r2UUKdDlrMoh3GQO3rvnCJJ16NBM1dB7TUyhMhtF6UOE62BSKdHtQn +d6TqelcRw9WnDsb9IPxRwaXhvGljnYVAgXXlJEI/6nxj2T4wdmL1LWAr6C7DuWGz +8qIvxc4oAau4DsZs2+BwolCFtYc98OjWGcBStBfZz/YYXM+2hKjbONKFxWdEPxGR +Beq3QOqp2+dga36IzQybzPQ8QtotrpSJ3q82zztEvyWiJ7E= +-----END CERTIFICATE----- +` + +var jsonConfig = []byte(`{ + "url": "ldap://138.91.247.105", + "userdn": "example,com", + "binddn": "kitty", + "bindpass": "cats", + "tls_max_version": "tls12", + "tls_min_version": "tls12", + "request_timeout": 30, + "connection_timeout": 15, + "ClientTLSCert": "", + "ClientTLSKey": "" +}`) + +var jsonConfigDefault = []byte(` +{ + "url": "ldap://127.0.0.1", + "userdn": "", + "anonymous_group_search": false, + "groupdn": "", + "groupfilter": "(|(memberUid={{.Username}})(member={{.UserDN}})(uniqueMember={{.UserDN}}))", + "groupattr": "cn", + "upndomain": "", + "userattr": "cn", + "userfilter": "({{.UserAttr}}={{.Username}})", + "certificate": "", + "client_tls_cert": "", + "client_tsl_key": "", + "insecure_tls": false, + "starttls": false, + "binddn": "", + "bindpass": "", + "deny_null_bind": true, + "discoverdn": false, + "tls_min_version": "tls12", + "tls_max_version": "tls12", + "use_token_groups": false, + "use_pre111_group_cn_behavior": null, + "username_as_alias": false, + "request_timeout": 90, + "connection_timeout": 30, + "dereference_aliases": "never", + "max_page_size": 0, + "CaseSensitiveNames": false, + "ClientTLSCert": "", + "ClientTLSKey": "" +} +`) diff --git a/sdk/helper/ldaputil/connection.go b/sdk/helper/ldaputil/connection.go new file mode 100644 index 0000000..c33ad40 --- /dev/null +++ b/sdk/helper/ldaputil/connection.go @@ -0,0 +1,30 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ldaputil + +import ( + "crypto/tls" + "time" + + "github.com/go-ldap/ldap/v3" +) + +// Connection provides the functionality of an LDAP connection, +// but through an interface. +type Connection interface { + Bind(username, password string) error + Close() + Add(addRequest *ldap.AddRequest) error + Modify(modifyRequest *ldap.ModifyRequest) error + Del(delRequest *ldap.DelRequest) error + Search(searchRequest *ldap.SearchRequest) (*ldap.SearchResult, error) + StartTLS(config *tls.Config) error + SetTimeout(timeout time.Duration) + UnauthenticatedBind(username string) error +} + +type PagingConnection interface { + Connection + SearchWithPaging(searchRequest *ldap.SearchRequest, pagingSize uint32) (*ldap.SearchResult, error) +} diff --git a/sdk/helper/ldaputil/ldap.go b/sdk/helper/ldaputil/ldap.go new file mode 100644 index 0000000..bdf746e --- /dev/null +++ b/sdk/helper/ldaputil/ldap.go @@ -0,0 +1,24 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ldaputil + +import ( + "github.com/go-ldap/ldap/v3" +) + +func NewLDAP() LDAP { + return &ldapIfc{} +} + +// LDAP provides ldap functionality, but through an interface +// rather than statically. This allows faking it for tests. +type LDAP interface { + DialURL(addr string, opts ...ldap.DialOpt) (Connection, error) +} + +type ldapIfc struct{} + +func (l *ldapIfc) DialURL(addr string, opts ...ldap.DialOpt) (Connection, error) { + return ldap.DialURL(addr, opts...) +} diff --git a/sdk/helper/license/feature.go b/sdk/helper/license/feature.go new file mode 100644 index 0000000..b42fcd1 --- /dev/null +++ b/sdk/helper/license/feature.go @@ -0,0 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package license + +// Features is a bitmask of feature flags +type Features uint + +const FeatureNone Features = 0 + +func (f Features) HasFeature(flag Features) bool { + return false +} diff --git a/sdk/helper/locksutil/locks.go b/sdk/helper/locksutil/locks.go new file mode 100644 index 0000000..c7538b6 --- /dev/null +++ b/sdk/helper/locksutil/locks.go @@ -0,0 +1,61 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package locksutil + +import ( + "sync" + + "github.com/hashicorp/vault/sdk/helper/cryptoutil" +) + +const ( + LockCount = 256 +) + +type LockEntry struct { + sync.RWMutex +} + +// CreateLocks returns an array so that the locks can be iterated over in +// order. +// +// This is only threadsafe if a process is using a single lock, or iterating +// over the entire lock slice in order. Using a consistent order avoids +// deadlocks because you can never have the following: +// +// Lock A, Lock B +// Lock B, Lock A +// +// Where process 1 is now deadlocked trying to lock B, and process 2 deadlocked trying to lock A +func CreateLocks() []*LockEntry { + ret := make([]*LockEntry, LockCount) + for i := range ret { + ret[i] = new(LockEntry) + } + return ret +} + +func LockIndexForKey(key string) uint8 { + return uint8(cryptoutil.Blake2b256Hash(key)[0]) +} + +func LockForKey(locks []*LockEntry, key string) *LockEntry { + return locks[LockIndexForKey(key)] +} + +func LocksForKeys(locks []*LockEntry, keys []string) []*LockEntry { + lockIndexes := make(map[uint8]struct{}, len(keys)) + for _, k := range keys { + lockIndexes[LockIndexForKey(k)] = struct{}{} + } + + locksToReturn := make([]*LockEntry, 0, len(keys)) + for i, l := range locks { + if _, ok := lockIndexes[uint8(i)]; ok { + locksToReturn = append(locksToReturn, l) + } + } + + return locksToReturn +} diff --git a/sdk/helper/locksutil/locks_test.go b/sdk/helper/locksutil/locks_test.go new file mode 100644 index 0000000..954a463 --- /dev/null +++ b/sdk/helper/locksutil/locks_test.go @@ -0,0 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package locksutil + +import "testing" + +func Test_CreateLocks(t *testing.T) { + locks := CreateLocks() + if len(locks) != 256 { + t.Fatalf("bad: len(locks): expected:256 actual:%d", len(locks)) + } +} diff --git a/sdk/helper/logging/logging.go b/sdk/helper/logging/logging.go new file mode 100644 index 0000000..37dcefa --- /dev/null +++ b/sdk/helper/logging/logging.go @@ -0,0 +1,81 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logging + +import ( + "fmt" + "io" + "os" + "strings" + + log "github.com/hashicorp/go-hclog" +) + +type LogFormat int + +const ( + UnspecifiedFormat LogFormat = iota + StandardFormat + JSONFormat +) + +// Stringer implementation +func (l LogFormat) String() string { + switch l { + case UnspecifiedFormat: + return "unspecified" + case StandardFormat: + return "standard" + case JSONFormat: + return "json" + } + + // unreachable + return "unknown" +} + +// NewVaultLogger creates a new logger with the specified level and a Vault +// formatter +func NewVaultLogger(level log.Level) log.Logger { + return NewVaultLoggerWithWriter(log.DefaultOutput, level) +} + +// NewVaultLoggerWithWriter creates a new logger with the specified level and +// writer and a Vault formatter +func NewVaultLoggerWithWriter(w io.Writer, level log.Level) log.Logger { + opts := &log.LoggerOptions{ + Level: level, + IndependentLevels: true, + Output: w, + JSONFormat: ParseEnvLogFormat() == JSONFormat, + } + return log.New(opts) +} + +// ParseLogFormat parses the log format from the provided string. +func ParseLogFormat(format string) (LogFormat, error) { + switch strings.ToLower(strings.TrimSpace(format)) { + case "": + return UnspecifiedFormat, nil + case "standard": + return StandardFormat, nil + case "json": + return JSONFormat, nil + default: + return UnspecifiedFormat, fmt.Errorf("unknown log format: %s", format) + } +} + +// ParseEnvLogFormat parses the log format from an environment variable. +func ParseEnvLogFormat() LogFormat { + logFormat := os.Getenv("VAULT_LOG_FORMAT") + switch strings.ToLower(logFormat) { + case "json", "vault_json", "vault-json", "vaultjson": + return JSONFormat + case "standard": + return StandardFormat + default: + return UnspecifiedFormat + } +} diff --git a/sdk/helper/logging/logging_test.go b/sdk/helper/logging/logging_test.go new file mode 100644 index 0000000..1607552 --- /dev/null +++ b/sdk/helper/logging/logging_test.go @@ -0,0 +1,67 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logging + +import ( + "errors" + "os" + "reflect" + "testing" +) + +func Test_ParseLogFormat(t *testing.T) { + type testData struct { + format string + expected LogFormat + expectedErr error + } + + tests := []testData{ + {format: "", expected: UnspecifiedFormat, expectedErr: nil}, + {format: " ", expected: UnspecifiedFormat, expectedErr: nil}, + {format: "standard", expected: StandardFormat, expectedErr: nil}, + {format: "STANDARD", expected: StandardFormat, expectedErr: nil}, + {format: "json", expected: JSONFormat, expectedErr: nil}, + {format: " json ", expected: JSONFormat, expectedErr: nil}, + {format: "bogus", expected: UnspecifiedFormat, expectedErr: errors.New("unknown log format: bogus")}, + } + + for _, test := range tests { + result, err := ParseLogFormat(test.format) + if test.expected != result { + t.Errorf("expected %s, got %s", test.expected, result) + } + if !reflect.DeepEqual(test.expectedErr, err) { + t.Errorf("expected error %v, got %v", test.expectedErr, err) + } + } +} + +func Test_ParseEnv_VAULT_LOG_FORMAT(t *testing.T) { + oldVLF := os.Getenv("VAULT_LOG_FORMAT") + defer os.Setenv("VAULT_LOG_FORMAT", oldVLF) + + testParseEnvLogFormat(t, "VAULT_LOG_FORMAT") +} + +func testParseEnvLogFormat(t *testing.T, name string) { + env := []string{ + "json", "vauLT_Json", "VAULT-JSON", "vaulTJSon", + "standard", "STANDARD", + "bogus", + } + + formats := []LogFormat{ + JSONFormat, JSONFormat, JSONFormat, JSONFormat, + StandardFormat, StandardFormat, + UnspecifiedFormat, + } + + for i, e := range env { + os.Setenv(name, e) + if lf := ParseEnvLogFormat(); formats[i] != lf { + t.Errorf("expected %s, got %s", formats[i], lf) + } + } +} diff --git a/sdk/helper/mlock/mlock.go b/sdk/helper/mlock/mlock.go new file mode 100644 index 0000000..5820d15 --- /dev/null +++ b/sdk/helper/mlock/mlock.go @@ -0,0 +1,17 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// DEPRECATED: this has been moved to go-secure-stdlib and will be removed +package mlock + +import ( + extmlock "github.com/hashicorp/go-secure-stdlib/mlock" +) + +func Supported() bool { + return extmlock.Supported() +} + +func LockMemory() error { + return extmlock.LockMemory() +} diff --git a/sdk/helper/ocsp/client.go b/sdk/helper/ocsp/client.go new file mode 100644 index 0000000..8bd9cea --- /dev/null +++ b/sdk/helper/ocsp/client.go @@ -0,0 +1,1174 @@ +// Copyright (c) 2017-2022 Snowflake Computing Inc. All rights reserved. + +package ocsp + +import ( + "bytes" + "context" + "crypto" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/base64" + "errors" + "fmt" + "io" + "math/big" + "net" + "net/http" + "net/url" + "strconv" + "strings" + "sync" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-retryablehttp" + lru "github.com/hashicorp/golang-lru" + "github.com/hashicorp/vault/sdk/helper/certutil" + "golang.org/x/crypto/ocsp" +) + +// FailOpenMode is OCSP fail open mode. FailOpenTrue by default and may +// set to ocspModeFailClosed for fail closed mode +type FailOpenMode uint32 + +type requestFunc func(method, urlStr string, body interface{}) (*retryablehttp.Request, error) + +type clientInterface interface { + Do(req *retryablehttp.Request) (*http.Response, error) +} + +const ( + httpHeaderContentType = "Content-Type" + httpHeaderAccept = "accept" + httpHeaderContentLength = "Content-Length" + httpHeaderHost = "Host" + ocspRequestContentType = "application/ocsp-request" + ocspResponseContentType = "application/ocsp-response" +) + +const ( + ocspFailOpenNotSet FailOpenMode = iota + // FailOpenTrue represents OCSP fail open mode. + FailOpenTrue + // FailOpenFalse represents OCSP fail closed mode. + FailOpenFalse +) + +const ( + ocspModeFailOpen = "FAIL_OPEN" + ocspModeFailClosed = "FAIL_CLOSED" + ocspModeInsecure = "INSECURE" +) + +const ocspCacheKey = "ocsp_cache" + +const ( + // defaultOCSPResponderTimeout is the total timeout for OCSP responder. + defaultOCSPResponderTimeout = 10 * time.Second +) + +const ( + // cacheExpire specifies cache data expiration time in seconds. + cacheExpire = float64(24 * 60 * 60) +) + +type ocspCachedResponse struct { + time float64 + producedAt float64 + thisUpdate float64 + nextUpdate float64 + status ocspStatusCode +} + +type Client struct { + // caRoot includes the CA certificates. + caRoot map[string]*x509.Certificate + // certPool includes the CA certificates. + certPool *x509.CertPool + ocspResponseCache *lru.TwoQueueCache + ocspResponseCacheLock sync.RWMutex + // cacheUpdated is true if the memory cache is updated + cacheUpdated bool + logFactory func() hclog.Logger +} + +type ocspStatusCode int + +type ocspStatus struct { + code ocspStatusCode + err error +} + +const ( + ocspSuccess ocspStatusCode = 0 + ocspStatusGood ocspStatusCode = -1 + ocspStatusRevoked ocspStatusCode = -2 + ocspStatusUnknown ocspStatusCode = -3 + ocspStatusOthers ocspStatusCode = -4 + ocspFailedDecomposeRequest ocspStatusCode = -5 + ocspInvalidValidity ocspStatusCode = -6 + ocspMissedCache ocspStatusCode = -7 + ocspCacheExpired ocspStatusCode = -8 +) + +// copied from crypto/ocsp.go +type certID struct { + HashAlgorithm pkix.AlgorithmIdentifier + NameHash []byte + IssuerKeyHash []byte + SerialNumber *big.Int +} + +// cache key +type certIDKey struct { + NameHash string + IssuerKeyHash string + SerialNumber string +} + +// copied from crypto/ocsp +var hashOIDs = map[crypto.Hash]asn1.ObjectIdentifier{ + crypto.SHA1: asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}), + crypto.SHA256: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 1}), + crypto.SHA384: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 2}), + crypto.SHA512: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 3}), +} + +// copied from crypto/ocsp +func getOIDFromHashAlgorithm(target crypto.Hash) (asn1.ObjectIdentifier, error) { + for hash, oid := range hashOIDs { + if hash == target { + return oid, nil + } + } + return nil, fmt.Errorf("no valid OID is found for the hash algorithm: %v", target) +} + +func (c *Client) ClearCache() { + c.ocspResponseCache.Purge() +} + +func (c *Client) getHashAlgorithmFromOID(target pkix.AlgorithmIdentifier) crypto.Hash { + for hash, oid := range hashOIDs { + if oid.Equal(target.Algorithm) { + return hash + } + } + // no valid hash algorithm is found for the oid. Falling back to SHA1 + return crypto.SHA1 +} + +// isInValidityRange checks the validity +func isInValidityRange(currTime, nextUpdate time.Time) bool { + return !nextUpdate.IsZero() && !currTime.After(nextUpdate) +} + +func extractCertIDKeyFromRequest(ocspReq []byte) (*certIDKey, *ocspStatus) { + r, err := ocsp.ParseRequest(ocspReq) + if err != nil { + return nil, &ocspStatus{ + code: ocspFailedDecomposeRequest, + err: err, + } + } + + // encode CertID, used as a key in the cache + encodedCertID := &certIDKey{ + base64.StdEncoding.EncodeToString(r.IssuerNameHash), + base64.StdEncoding.EncodeToString(r.IssuerKeyHash), + r.SerialNumber.String(), + } + return encodedCertID, &ocspStatus{ + code: ocspSuccess, + } +} + +func (c *Client) encodeCertIDKey(certIDKeyBase64 string) (*certIDKey, error) { + r, err := base64.StdEncoding.DecodeString(certIDKeyBase64) + if err != nil { + return nil, err + } + var cid certID + rest, err := asn1.Unmarshal(r, &cid) + if err != nil { + // error in parsing + return nil, err + } + if len(rest) > 0 { + // extra bytes to the end + return nil, err + } + return &certIDKey{ + base64.StdEncoding.EncodeToString(cid.NameHash), + base64.StdEncoding.EncodeToString(cid.IssuerKeyHash), + cid.SerialNumber.String(), + }, nil +} + +func (c *Client) checkOCSPResponseCache(encodedCertID *certIDKey, subject, issuer *x509.Certificate) (*ocspStatus, error) { + c.ocspResponseCacheLock.RLock() + var cacheValue *ocspCachedResponse + v, ok := c.ocspResponseCache.Get(*encodedCertID) + if ok { + cacheValue = v.(*ocspCachedResponse) + } + c.ocspResponseCacheLock.RUnlock() + + status, err := c.extractOCSPCacheResponseValue(cacheValue, subject, issuer) + if err != nil { + return nil, err + } + if !isValidOCSPStatus(status.code) { + c.deleteOCSPCache(encodedCertID) + } + return status, err +} + +func (c *Client) deleteOCSPCache(encodedCertID *certIDKey) { + c.ocspResponseCacheLock.Lock() + c.ocspResponseCache.Remove(*encodedCertID) + c.cacheUpdated = true + c.ocspResponseCacheLock.Unlock() +} + +func validateOCSP(ocspRes *ocsp.Response) (*ocspStatus, error) { + curTime := time.Now() + + if ocspRes == nil { + return nil, errors.New("OCSP Response is nil") + } + if !isInValidityRange(curTime, ocspRes.NextUpdate) { + return &ocspStatus{ + code: ocspInvalidValidity, + err: fmt.Errorf("invalid validity: producedAt: %v, thisUpdate: %v, nextUpdate: %v", ocspRes.ProducedAt, ocspRes.ThisUpdate, ocspRes.NextUpdate), + }, nil + } + return returnOCSPStatus(ocspRes), nil +} + +func returnOCSPStatus(ocspRes *ocsp.Response) *ocspStatus { + switch ocspRes.Status { + case ocsp.Good: + return &ocspStatus{ + code: ocspStatusGood, + err: nil, + } + case ocsp.Revoked: + return &ocspStatus{ + code: ocspStatusRevoked, + } + case ocsp.Unknown: + return &ocspStatus{ + code: ocspStatusUnknown, + err: errors.New("OCSP status unknown."), + } + default: + return &ocspStatus{ + code: ocspStatusOthers, + err: fmt.Errorf("OCSP others. %v", ocspRes.Status), + } + } +} + +// retryOCSP is the second level of retry method if the returned contents are corrupted. It often happens with OCSP +// serer and retry helps. +func (c *Client) retryOCSP( + ctx context.Context, + client clientInterface, + req requestFunc, + ocspHost *url.URL, + headers map[string]string, + reqBody []byte, + issuer *x509.Certificate, +) (ocspRes *ocsp.Response, ocspResBytes []byte, ocspS *ocspStatus, retErr error) { + doRequest := func(request *retryablehttp.Request) (*http.Response, error) { + if request != nil { + request = request.WithContext(ctx) + for k, v := range headers { + request.Header[k] = append(request.Header[k], v) + } + } + res, err := client.Do(request) + if err != nil { + return nil, err + } + c.Logger().Debug("StatusCode from OCSP Server:", "statusCode", res.StatusCode) + return res, err + } + + for _, method := range []string{"GET", "POST"} { + reqUrl := *ocspHost + var body []byte + + switch method { + case "GET": + reqUrl.Path = reqUrl.Path + "/" + base64.StdEncoding.EncodeToString(reqBody) + case "POST": + body = reqBody + default: + // Programming error; all request/systems errors are multierror + // and appended. + return nil, nil, nil, fmt.Errorf("unknown request method: %v", method) + } + + var res *http.Response + request, err := req(method, reqUrl.String(), bytes.NewBuffer(body)) + if err != nil { + err = fmt.Errorf("error creating %v request: %w", method, err) + retErr = multierror.Append(retErr, err) + continue + } + if res, err = doRequest(request); err != nil { + err = fmt.Errorf("error doing %v request: %w", method, err) + retErr = multierror.Append(retErr, err) + continue + } else { + defer res.Body.Close() + } + + if res.StatusCode != http.StatusOK { + err = fmt.Errorf("HTTP code is not OK on %v request. %v: %v", method, res.StatusCode, res.Status) + retErr = multierror.Append(retErr, err) + continue + } + + ocspResBytes, err = io.ReadAll(res.Body) + if err != nil { + err = fmt.Errorf("error reading %v request body: %w", method, err) + retErr = multierror.Append(retErr, err) + continue + } + + // Reading an OCSP response shouldn't be fatal. A misconfigured + // endpoint might return invalid results for e.g., GET but return + // valid results for POST on retry. This could happen if e.g., the + // server responds with JSON. + ocspRes, err = ocsp.ParseResponse(ocspResBytes /*issuer = */, nil /* !!unsafe!! */) + if err != nil { + err = fmt.Errorf("error parsing %v OCSP response: %w", method, err) + retErr = multierror.Append(retErr, err) + continue + } + + // Above, we use the unsafe issuer=nil parameter to ocsp.ParseResponse + // because Go's library does the wrong thing. + // + // Here, we lack a full chain, but we know we trust the parent issuer, + // so if the Go library incorrectly discards useful certificates, we + // likely cannot verify this without passing through the full chain + // back to the root. + // + // Instead, take one of two paths: 1. if there is no certificate in + // the ocspRes, verify the OCSP response directly with our trusted + // issuer certificate, or 2. if there is a certificate, either verify + // it directly matches our trusted issuer certificate, or verify it + // is signed by our trusted issuer certificate. + // + // See also: https://github.com/golang/go/issues/59641 + // + // This addresses the !!unsafe!! behavior above. + if ocspRes.Certificate == nil { + if err := ocspRes.CheckSignatureFrom(issuer); err != nil { + err = fmt.Errorf("error directly verifying signature on %v OCSP response: %w", method, err) + retErr = multierror.Append(retErr, err) + continue + } + } else { + // Because we have at least one certificate here, we know that + // Go's ocsp library verified the signature from this certificate + // onto the response and it was valid. Now we need to know we trust + // this certificate. There's two ways we can do this: + // + // 1. Via confirming issuer == ocspRes.Certificate, or + // 2. Via confirming ocspRes.Certificate.CheckSignatureFrom(issuer). + if !bytes.Equal(issuer.Raw, ocspRes.Raw) { + // 1 must not hold, so 2 holds; verify the signature. + if err := ocspRes.Certificate.CheckSignatureFrom(issuer); err != nil { + err = fmt.Errorf("error checking chain of trust on %v OCSP response via %v failed: %w", method, issuer.Subject.String(), err) + retErr = multierror.Append(retErr, err) + continue + } + + // Verify the OCSP responder certificate is still valid and + // contains the required EKU since it is a delegated OCSP + // responder certificate. + if ocspRes.Certificate.NotAfter.Before(time.Now()) { + err := fmt.Errorf("error checking delegated OCSP responder on %v OCSP response: certificate has expired", method) + retErr = multierror.Append(retErr, err) + continue + } + haveEKU := false + for _, ku := range ocspRes.Certificate.ExtKeyUsage { + if ku == x509.ExtKeyUsageOCSPSigning { + haveEKU = true + break + } + } + if !haveEKU { + err := fmt.Errorf("error checking delegated OCSP responder on %v OCSP response: certificate lacks the OCSP Signing EKU", method) + retErr = multierror.Append(retErr, err) + continue + } + } + } + + // While we haven't validated the signature on the OCSP response, we + // got what we presume is a definitive answer and simply changing + // methods will likely not help us in that regard. Use this status + // to return without retrying another method, when it looks definitive. + // + // We don't accept ocsp.Unknown here: presumably, we could've hit a CDN + // with static mapping of request->responses, with a default "unknown" + // handler for everything else. By retrying here, we use POST, which + // could hit a live OCSP server with fresher data than the cached CDN. + if ocspRes.Status == ocsp.Good || ocspRes.Status == ocsp.Revoked { + break + } + + // Here, we didn't have a valid response. Even though we didn't get an + // error, we should inform the user that this (valid-looking) response + // wasn't utilized. + err = fmt.Errorf("fetched %v OCSP response of status %v; wanted either good (%v) or revoked (%v)", method, ocspRes.Status, ocsp.Good, ocsp.Revoked) + retErr = multierror.Append(retErr, err) + } + + if ocspRes != nil && ocspResBytes != nil { + // Clear retErr, because we have one parseable-but-maybe-not-quite-correct + // OCSP response. + retErr = nil + ocspS = &ocspStatus{ + code: ocspSuccess, + } + } + + return +} + +// GetRevocationStatus checks the certificate revocation status for subject using issuer certificate. +func (c *Client) GetRevocationStatus(ctx context.Context, subject, issuer *x509.Certificate, conf *VerifyConfig) (*ocspStatus, error) { + status, ocspReq, encodedCertID, err := c.validateWithCache(subject, issuer) + if err != nil { + return nil, err + } + if isValidOCSPStatus(status.code) { + return status, nil + } + if ocspReq == nil || encodedCertID == nil { + return status, nil + } + c.Logger().Debug("cache missed", "server", subject.OCSPServer) + if len(subject.OCSPServer) == 0 && len(conf.OcspServersOverride) == 0 { + return nil, fmt.Errorf("no OCSP responder URL: subject: %v", subject.Subject) + } + ocspHosts := subject.OCSPServer + if len(conf.OcspServersOverride) > 0 { + ocspHosts = conf.OcspServersOverride + } + + var wg sync.WaitGroup + + ocspStatuses := make([]*ocspStatus, len(ocspHosts)) + ocspResponses := make([]*ocsp.Response, len(ocspHosts)) + errors := make([]error, len(ocspHosts)) + + for i, ocspHost := range ocspHosts { + u, err := url.Parse(ocspHost) + if err != nil { + return nil, err + } + + hostname := u.Hostname() + + headers := make(map[string]string) + headers[httpHeaderContentType] = ocspRequestContentType + headers[httpHeaderAccept] = ocspResponseContentType + headers[httpHeaderContentLength] = strconv.Itoa(len(ocspReq)) + headers[httpHeaderHost] = hostname + timeout := defaultOCSPResponderTimeout + + ocspClient := retryablehttp.NewClient() + ocspClient.HTTPClient.Timeout = timeout + ocspClient.HTTPClient.Transport = newInsecureOcspTransport(conf.ExtraCas) + + doRequest := func() error { + if conf.QueryAllServers { + defer wg.Done() + } + ocspRes, _, ocspS, err := c.retryOCSP( + ctx, ocspClient, retryablehttp.NewRequest, u, headers, ocspReq, issuer) + ocspResponses[i] = ocspRes + if err != nil { + errors[i] = err + return err + } + if ocspS.code != ocspSuccess { + ocspStatuses[i] = ocspS + return nil + } + + ret, err := validateOCSP(ocspRes) + if err != nil { + errors[i] = err + return err + } + if isValidOCSPStatus(ret.code) { + ocspStatuses[i] = ret + } else if ret.err != nil { + // This check needs to occur after the isValidOCSPStatus as the unknown + // status also sets an err value within ret. + errors[i] = ret.err + return ret.err + } + return nil + } + if conf.QueryAllServers { + wg.Add(1) + go doRequest() + } else { + err = doRequest() + if err == nil { + break + } + } + } + if conf.QueryAllServers { + wg.Wait() + } + // Good by default + var ret *ocspStatus + ocspRes := ocspResponses[0] + var firstError error + for i := range ocspHosts { + if errors[i] != nil { + if firstError == nil { + firstError = errors[i] + } + } else if ocspStatuses[i] != nil { + switch ocspStatuses[i].code { + case ocspStatusRevoked: + ret = ocspStatuses[i] + ocspRes = ocspResponses[i] + break + case ocspStatusGood: + // Use this response only if we don't have a status already, or if what we have was unknown + if ret == nil || ret.code == ocspStatusUnknown { + ret = ocspStatuses[i] + ocspRes = ocspResponses[i] + } + case ocspStatusUnknown: + if ret == nil { + // We may want to use this as the overall result + ret = ocspStatuses[i] + ocspRes = ocspResponses[i] + } + } + } + } + + // If no server reported the cert revoked, but we did have an error, report it + if (ret == nil || ret.code == ocspStatusUnknown) && firstError != nil { + return nil, firstError + } + // An extra safety in case ret and firstError are both nil + if ret == nil { + return nil, fmt.Errorf("failed to extract a known response code or error from the OCSP server") + } + + // otherwise ret should contain a response for the overall request + if !isValidOCSPStatus(ret.code) { + return ret, nil + } + v := ocspCachedResponse{ + status: ret.code, + time: float64(time.Now().UTC().Unix()), + producedAt: float64(ocspRes.ProducedAt.UTC().Unix()), + thisUpdate: float64(ocspRes.ThisUpdate.UTC().Unix()), + nextUpdate: float64(ocspRes.NextUpdate.UTC().Unix()), + } + + c.ocspResponseCacheLock.Lock() + c.ocspResponseCache.Add(*encodedCertID, &v) + c.cacheUpdated = true + c.ocspResponseCacheLock.Unlock() + return ret, nil +} + +func isValidOCSPStatus(status ocspStatusCode) bool { + return status == ocspStatusGood || status == ocspStatusRevoked || status == ocspStatusUnknown +} + +type VerifyConfig struct { + OcspEnabled bool + ExtraCas []*x509.Certificate + OcspServersOverride []string + OcspFailureMode FailOpenMode + QueryAllServers bool +} + +// VerifyLeafCertificate verifies just the subject against it's direct issuer +func (c *Client) VerifyLeafCertificate(ctx context.Context, subject, issuer *x509.Certificate, conf *VerifyConfig) error { + results, err := c.GetRevocationStatus(ctx, subject, issuer, conf) + if err != nil { + return err + } + if results.code == ocspStatusGood { + return nil + } else { + serial := issuer.SerialNumber + serialHex := strings.TrimSpace(certutil.GetHexFormatted(serial.Bytes(), ":")) + if results.code == ocspStatusRevoked { + return fmt.Errorf("certificate with serial number %s has been revoked", serialHex) + } else if conf.OcspFailureMode == FailOpenFalse { + return fmt.Errorf("unknown OCSP status for cert with serial number %s", strings.TrimSpace(certutil.GetHexFormatted(serial.Bytes(), ":"))) + } else { + c.Logger().Warn("could not validate OCSP status for cert, but continuing in fail open mode", "serial", serialHex) + } + } + return nil +} + +// VerifyPeerCertificate verifies all of certificate revocation status +func (c *Client) VerifyPeerCertificate(ctx context.Context, verifiedChains [][]*x509.Certificate, conf *VerifyConfig) error { + for i := 0; i < len(verifiedChains); i++ { + // Certificate signed by Root CA. This should be one before the last in the Certificate Chain + numberOfNoneRootCerts := len(verifiedChains[i]) - 1 + if !verifiedChains[i][numberOfNoneRootCerts].IsCA || string(verifiedChains[i][numberOfNoneRootCerts].RawIssuer) != string(verifiedChains[i][numberOfNoneRootCerts].RawSubject) { + // Check if the last Non Root Cert is also a CA or is self signed. + // if the last certificate is not, add it to the list + rca := c.caRoot[string(verifiedChains[i][numberOfNoneRootCerts].RawIssuer)] + if rca == nil { + return fmt.Errorf("failed to find root CA. pkix.name: %v", verifiedChains[i][numberOfNoneRootCerts].Issuer) + } + verifiedChains[i] = append(verifiedChains[i], rca) + numberOfNoneRootCerts++ + } + results, err := c.GetAllRevocationStatus(ctx, verifiedChains[i], conf) + if err != nil { + return err + } + if r := c.canEarlyExitForOCSP(results, numberOfNoneRootCerts, conf); r != nil { + return r.err + } + } + + return nil +} + +func (c *Client) canEarlyExitForOCSP(results []*ocspStatus, chainSize int, conf *VerifyConfig) *ocspStatus { + msg := "" + if conf.OcspFailureMode == FailOpenFalse { + // Fail closed. any error is returned to stop connection + for _, r := range results { + if r.err != nil { + return r + } + } + } else { + // Fail open and all results are valid. + allValid := len(results) == chainSize + for _, r := range results { + if !isValidOCSPStatus(r.code) { + allValid = false + break + } + } + for _, r := range results { + if allValid && r.code == ocspStatusRevoked { + return r + } + if r != nil && r.code != ocspStatusGood && r.err != nil { + msg += "" + r.err.Error() + } + } + } + if len(msg) > 0 { + c.Logger().Warn( + "OCSP is set to fail-open, and could not retrieve OCSP based revocation checking but proceeding.", "detail", msg) + } + return nil +} + +func (c *Client) validateWithCacheForAllCertificates(verifiedChains []*x509.Certificate) (bool, error) { + n := len(verifiedChains) - 1 + for j := 0; j < n; j++ { + subject := verifiedChains[j] + issuer := verifiedChains[j+1] + status, _, _, err := c.validateWithCache(subject, issuer) + if err != nil { + return false, err + } + if !isValidOCSPStatus(status.code) { + return false, nil + } + } + return true, nil +} + +func (c *Client) validateWithCache(subject, issuer *x509.Certificate) (*ocspStatus, []byte, *certIDKey, error) { + ocspReq, err := ocsp.CreateRequest(subject, issuer, &ocsp.RequestOptions{}) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to create OCSP request from the certificates: %v", err) + } + encodedCertID, ocspS := extractCertIDKeyFromRequest(ocspReq) + if ocspS.code != ocspSuccess { + return nil, nil, nil, fmt.Errorf("failed to extract CertID from OCSP Request: %v", err) + } + status, err := c.checkOCSPResponseCache(encodedCertID, subject, issuer) + if err != nil { + return nil, nil, nil, err + } + return status, ocspReq, encodedCertID, nil +} + +func (c *Client) GetAllRevocationStatus(ctx context.Context, verifiedChains []*x509.Certificate, conf *VerifyConfig) ([]*ocspStatus, error) { + _, err := c.validateWithCacheForAllCertificates(verifiedChains) + if err != nil { + return nil, err + } + n := len(verifiedChains) - 1 + results := make([]*ocspStatus, n) + for j := 0; j < n; j++ { + results[j], err = c.GetRevocationStatus(ctx, verifiedChains[j], verifiedChains[j+1], conf) + if err != nil { + return nil, err + } + if !isValidOCSPStatus(results[j].code) { + return results, nil + } + } + return results, nil +} + +// verifyPeerCertificateSerial verifies the certificate revocation status in serial. +func (c *Client) verifyPeerCertificateSerial(conf *VerifyConfig) func(_ [][]byte, verifiedChains [][]*x509.Certificate) (err error) { + return func(_ [][]byte, verifiedChains [][]*x509.Certificate) error { + return c.VerifyPeerCertificate(context.TODO(), verifiedChains, conf) + } +} + +func (c *Client) extractOCSPCacheResponseValueWithoutSubject(cacheValue ocspCachedResponse) (*ocspStatus, error) { + return c.extractOCSPCacheResponseValue(&cacheValue, nil, nil) +} + +func (c *Client) extractOCSPCacheResponseValue(cacheValue *ocspCachedResponse, subject, issuer *x509.Certificate) (*ocspStatus, error) { + subjectName := "Unknown" + if subject != nil { + subjectName = subject.Subject.CommonName + } + + curTime := time.Now() + if cacheValue == nil { + return &ocspStatus{ + code: ocspMissedCache, + err: fmt.Errorf("miss cache data. subject: %v", subjectName), + }, nil + } + currentTime := float64(curTime.UTC().Unix()) + if currentTime-cacheValue.time >= cacheExpire { + return &ocspStatus{ + code: ocspCacheExpired, + err: fmt.Errorf("cache expired. current: %v, cache: %v", + time.Unix(int64(currentTime), 0).UTC(), time.Unix(int64(cacheValue.time), 0).UTC()), + }, nil + } + + return validateOCSP(&ocsp.Response{ + ProducedAt: time.Unix(int64(cacheValue.producedAt), 0).UTC(), + ThisUpdate: time.Unix(int64(cacheValue.thisUpdate), 0).UTC(), + NextUpdate: time.Unix(int64(cacheValue.nextUpdate), 0).UTC(), + Status: int(cacheValue.status), + }) +} + +/* +// writeOCSPCache writes a OCSP Response cache +func (c *Client) writeOCSPCache(ctx context.Context, storage logical.Storage) error { + c.Logger().Debug("writing OCSP Response cache") + t := time.Now() + m := make(map[string][]interface{}) + keys := c.ocspResponseCache.Keys() + if len(keys) > persistedCacheSize { + keys = keys[:persistedCacheSize] + } + for _, k := range keys { + e, ok := c.ocspResponseCache.Get(k) + if ok { + entry := e.(*ocspCachedResponse) + // Don't store if expired + if isInValidityRange(t, time.Unix(int64(entry.thisUpdate), 0), time.Unix(int64(entry.nextUpdate), 0)) { + key := k.(certIDKey) + cacheKeyInBase64, err := decodeCertIDKey(&key) + if err != nil { + return err + } + m[cacheKeyInBase64] = []interface{}{entry.status, entry.time, entry.producedAt, entry.thisUpdate, entry.nextUpdate} + } + } + } + + v, err := jsonutil.EncodeJSONAndCompress(m, nil) + if err != nil { + return err + } + entry := logical.StorageEntry{ + Key: ocspCacheKey, + Value: v, + } + return storage.Put(ctx, &entry) +} + +// readOCSPCache reads a OCSP Response cache from storage +func (c *Client) readOCSPCache(ctx context.Context, storage logical.Storage) error { + c.Logger().Debug("reading OCSP Response cache") + + entry, err := storage.Get(ctx, ocspCacheKey) + if err != nil { + return err + } + if entry == nil { + return nil + } + var untypedCache map[string][]interface{} + + err = jsonutil.DecodeJSON(entry.Value, &untypedCache) + if err != nil { + return errors.New("failed to unmarshal OCSP cache") + } + + for k, v := range untypedCache { + key, err := c.encodeCertIDKey(k) + if err != nil { + return err + } + var times [4]float64 + for i, t := range v[1:] { + if jn, ok := t.(json.Number); ok { + times[i], err = jn.Float64() + if err != nil { + return err + } + } else { + times[i] = t.(float64) + } + } + var status int + if jn, ok := v[0].(json.Number); ok { + s, err := jn.Int64() + if err != nil { + return err + } + status = int(s) + } else { + status = v[0].(int) + } + + c.ocspResponseCache.Add(*key, &ocspCachedResponse{ + status: ocspStatusCode(status), + time: times[0], + producedAt: times[1], + thisUpdate: times[2], + nextUpdate: times[3], + }) + } + + return nil +} +*/ + +func New(logFactory func() hclog.Logger, cacheSize int) *Client { + if cacheSize < 100 { + cacheSize = 100 + } + cache, _ := lru.New2Q(cacheSize) + c := Client{ + caRoot: make(map[string]*x509.Certificate), + ocspResponseCache: cache, + logFactory: logFactory, + } + + return &c +} + +func (c *Client) Logger() hclog.Logger { + return c.logFactory() +} + +// insecureOcspTransport is the transport object that doesn't do certificate revocation check. +func newInsecureOcspTransport(extraCas []*x509.Certificate) *http.Transport { + // Get the SystemCertPool, continue with an empty pool on error + rootCAs, _ := x509.SystemCertPool() + if rootCAs == nil { + rootCAs = x509.NewCertPool() + } + for _, c := range extraCas { + rootCAs.AddCert(c) + } + config := &tls.Config{ + RootCAs: rootCAs, + } + return &http.Transport{ + MaxIdleConns: 10, + IdleConnTimeout: 30 * time.Minute, + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + TLSClientConfig: config, + } +} + +// NewTransport includes the certificate revocation check with OCSP in sequential. +func (c *Client) NewTransport(conf *VerifyConfig) *http.Transport { + rootCAs := c.certPool + if rootCAs == nil { + rootCAs, _ = x509.SystemCertPool() + } + if rootCAs == nil { + rootCAs = x509.NewCertPool() + } + for _, c := range conf.ExtraCas { + rootCAs.AddCert(c) + } + return &http.Transport{ + TLSClientConfig: &tls.Config{ + RootCAs: rootCAs, + VerifyPeerCertificate: c.verifyPeerCertificateSerial(conf), + }, + MaxIdleConns: 10, + IdleConnTimeout: 30 * time.Minute, + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + } +} + +/* +func (c *Client) WriteCache(ctx context.Context, storage logical.Storage) error { + c.ocspResponseCacheLock.Lock() + defer c.ocspResponseCacheLock.Unlock() + if c.cacheUpdated { + err := c.writeOCSPCache(ctx, storage) + if err == nil { + c.cacheUpdated = false + } + return err + } + return nil +} + +func (c *Client) ReadCache(ctx context.Context, storage logical.Storage) error { + c.ocspResponseCacheLock.Lock() + defer c.ocspResponseCacheLock.Unlock() + return c.readOCSPCache(ctx, storage) +} +*/ +/* + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) 2017-2022 Snowflake Computing Inc. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ diff --git a/sdk/helper/ocsp/ocsp_test.go b/sdk/helper/ocsp/ocsp_test.go new file mode 100644 index 0000000..677dfa8 --- /dev/null +++ b/sdk/helper/ocsp/ocsp_test.go @@ -0,0 +1,619 @@ +// Copyright (c) 2017-2022 Snowflake Computing Inc. All rights reserved. + +package ocsp + +import ( + "bytes" + "context" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "errors" + "fmt" + "io" + "io/ioutil" + "math/big" + "net" + "net/http" + "net/http/httptest" + "net/url" + "testing" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-retryablehttp" + lru "github.com/hashicorp/golang-lru" + "github.com/stretchr/testify/require" + "golang.org/x/crypto/ocsp" +) + +func TestOCSP(t *testing.T) { + targetURL := []string{ + "https://sfcdev1.blob.core.windows.net/", + "https://sfctest0.snowflakecomputing.com/", + "https://s3-us-west-2.amazonaws.com/sfc-snowsql-updates/?prefix=1.1/windows_x86_64", + } + + conf := VerifyConfig{ + OcspFailureMode: FailOpenFalse, + } + c := New(testLogFactory, 10) + transports := []*http.Transport{ + newInsecureOcspTransport(nil), + c.NewTransport(&conf), + } + + for _, tgt := range targetURL { + c.ocspResponseCache, _ = lru.New2Q(10) + for _, tr := range transports { + c := &http.Client{ + Transport: tr, + Timeout: 30 * time.Second, + } + req, err := http.NewRequest("GET", tgt, bytes.NewReader(nil)) + if err != nil { + t.Fatalf("fail to create a request. err: %v", err) + } + res, err := c.Do(req) + if err != nil { + t.Fatalf("failed to GET contents. err: %v", err) + } + defer res.Body.Close() + _, err = ioutil.ReadAll(res.Body) + if err != nil { + t.Fatalf("failed to read content body for %v", tgt) + } + + } + } +} + +/** +// Used for development, requires an active Vault with PKI setup +func TestMultiOCSP(t *testing.T) { + + targetURL := []string{ + "https://localhost:8200/v1/pki/ocsp", + "https://localhost:8200/v1/pki/ocsp", + "https://localhost:8200/v1/pki/ocsp", + } + + b, _ := pem.Decode([]byte(vaultCert)) + caCert, _ := x509.ParseCertificate(b.Bytes) + conf := VerifyConfig{ + OcspFailureMode: FailOpenFalse, + QueryAllServers: true, + OcspServersOverride: targetURL, + ExtraCas: []*x509.Certificate{caCert}, + } + c := New(testLogFactory, 10) + transports := []*http.Transport{ + newInsecureOcspTransport(conf.ExtraCas), + c.NewTransport(&conf), + } + + tgt := "https://localhost:8200/v1/pki/ca/pem" + c.ocspResponseCache, _ = lru.New2Q(10) + for _, tr := range transports { + c := &http.Client{ + Transport: tr, + Timeout: 30 * time.Second, + } + req, err := http.NewRequest("GET", tgt, bytes.NewReader(nil)) + if err != nil { + t.Fatalf("fail to create a request. err: %v", err) + } + res, err := c.Do(req) + if err != nil { + t.Fatalf("failed to GET contents. err: %v", err) + } + defer res.Body.Close() + _, err = ioutil.ReadAll(res.Body) + if err != nil { + t.Fatalf("failed to read content body for %v", tgt) + } + } +} +*/ + +func TestUnitEncodeCertIDGood(t *testing.T) { + targetURLs := []string{ + "faketestaccount.snowflakecomputing.com:443", + "s3-us-west-2.amazonaws.com:443", + "sfcdev1.blob.core.windows.net:443", + } + for _, tt := range targetURLs { + chainedCerts := getCert(tt) + for i := 0; i < len(chainedCerts)-1; i++ { + subject := chainedCerts[i] + issuer := chainedCerts[i+1] + ocspServers := subject.OCSPServer + if len(ocspServers) == 0 { + t.Fatalf("no OCSP server is found. cert: %v", subject.Subject) + } + ocspReq, err := ocsp.CreateRequest(subject, issuer, &ocsp.RequestOptions{}) + if err != nil { + t.Fatalf("failed to create OCSP request. err: %v", err) + } + var ost *ocspStatus + _, ost = extractCertIDKeyFromRequest(ocspReq) + if ost.err != nil { + t.Fatalf("failed to extract cert ID from the OCSP request. err: %v", ost.err) + } + // better hash. Not sure if the actual OCSP server accepts this, though. + ocspReq, err = ocsp.CreateRequest(subject, issuer, &ocsp.RequestOptions{Hash: crypto.SHA512}) + if err != nil { + t.Fatalf("failed to create OCSP request. err: %v", err) + } + _, ost = extractCertIDKeyFromRequest(ocspReq) + if ost.err != nil { + t.Fatalf("failed to extract cert ID from the OCSP request. err: %v", ost.err) + } + // tweaked request binary + ocspReq, err = ocsp.CreateRequest(subject, issuer, &ocsp.RequestOptions{Hash: crypto.SHA512}) + if err != nil { + t.Fatalf("failed to create OCSP request. err: %v", err) + } + ocspReq[10] = 0 // random change + _, ost = extractCertIDKeyFromRequest(ocspReq) + if ost.err == nil { + t.Fatal("should have failed") + } + } + } +} + +func TestUnitCheckOCSPResponseCache(t *testing.T) { + c := New(testLogFactory, 10) + dummyKey0 := certIDKey{ + NameHash: "dummy0", + IssuerKeyHash: "dummy0", + SerialNumber: "dummy0", + } + dummyKey := certIDKey{ + NameHash: "dummy1", + IssuerKeyHash: "dummy1", + SerialNumber: "dummy1", + } + currentTime := float64(time.Now().UTC().Unix()) + c.ocspResponseCache.Add(dummyKey0, &ocspCachedResponse{time: currentTime}) + subject := &x509.Certificate{} + issuer := &x509.Certificate{} + ost, err := c.checkOCSPResponseCache(&dummyKey, subject, issuer) + if err != nil { + t.Fatal(err) + } + if ost.code != ocspMissedCache { + t.Fatalf("should have failed. expected: %v, got: %v", ocspMissedCache, ost.code) + } + // old timestamp + c.ocspResponseCache.Add(dummyKey, &ocspCachedResponse{time: float64(1395054952)}) + ost, err = c.checkOCSPResponseCache(&dummyKey, subject, issuer) + if err != nil { + t.Fatal(err) + } + if ost.code != ocspCacheExpired { + t.Fatalf("should have failed. expected: %v, got: %v", ocspCacheExpired, ost.code) + } + + // invalid validity + c.ocspResponseCache.Add(dummyKey, &ocspCachedResponse{time: float64(currentTime - 1000)}) + ost, err = c.checkOCSPResponseCache(&dummyKey, subject, nil) + if err == nil && isValidOCSPStatus(ost.code) { + t.Fatalf("should have failed.") + } +} + +func TestUnitExpiredOCSPResponse(t *testing.T) { + rootCaKey, rootCa, leafCert := createCaLeafCerts(t) + + expiredOcspResponse := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + ocspRes := ocsp.Response{ + SerialNumber: big.NewInt(2), + ThisUpdate: now.Add(-1 * time.Hour), + NextUpdate: now.Add(-30 * time.Minute), + Status: ocsp.Good, + } + response, err := ocsp.CreateResponse(rootCa, rootCa, ocspRes, rootCaKey) + if err != nil { + _, _ = w.Write(ocsp.InternalErrorErrorResponse) + t.Fatalf("failed generating OCSP response: %v", err) + } + _, _ = w.Write(response) + }) + ts := httptest.NewServer(expiredOcspResponse) + defer ts.Close() + + logFactory := func() hclog.Logger { + return hclog.NewNullLogger() + } + client := New(logFactory, 100) + + ctx := context.Background() + + config := &VerifyConfig{ + OcspEnabled: true, + OcspServersOverride: []string{ts.URL}, + OcspFailureMode: FailOpenFalse, + QueryAllServers: false, + } + + status, err := client.GetRevocationStatus(ctx, leafCert, rootCa, config) + require.ErrorContains(t, err, "invalid validity", + "Expected error got response: %v, %v", status, err) +} + +func createCaLeafCerts(t *testing.T) (*ecdsa.PrivateKey, *x509.Certificate, *x509.Certificate) { + rootCaKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generated root key for CA") + + // Validate we reject CSRs that contain CN that aren't in the original order + cr := &x509.Certificate{ + Subject: pkix.Name{CommonName: "Root Cert"}, + SerialNumber: big.NewInt(1), + IsCA: true, + BasicConstraintsValid: true, + SignatureAlgorithm: x509.ECDSAWithSHA256, + NotBefore: time.Now().Add(-1 * time.Second), + NotAfter: time.Now().AddDate(1, 0, 0), + KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageOCSPSigning}, + } + rootCaBytes, err := x509.CreateCertificate(rand.Reader, cr, cr, &rootCaKey.PublicKey, rootCaKey) + require.NoError(t, err, "failed generating root ca") + + rootCa, err := x509.ParseCertificate(rootCaBytes) + require.NoError(t, err, "failed parsing root ca") + + leafKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err, "failed generated leaf key") + + cr = &x509.Certificate{ + Subject: pkix.Name{CommonName: "Leaf Cert"}, + SerialNumber: big.NewInt(2), + SignatureAlgorithm: x509.ECDSAWithSHA256, + NotBefore: time.Now().Add(-1 * time.Second), + NotAfter: time.Now().AddDate(1, 0, 0), + KeyUsage: x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + } + leafCertBytes, err := x509.CreateCertificate(rand.Reader, cr, rootCa, &leafKey.PublicKey, rootCaKey) + require.NoError(t, err, "failed generating root ca") + + leafCert, err := x509.ParseCertificate(leafCertBytes) + require.NoError(t, err, "failed parsing root ca") + return rootCaKey, rootCa, leafCert +} + +func TestUnitValidateOCSP(t *testing.T) { + ocspRes := &ocsp.Response{} + ost, err := validateOCSP(ocspRes) + if err == nil && isValidOCSPStatus(ost.code) { + t.Fatalf("should have failed.") + } + + currentTime := time.Now() + ocspRes.ThisUpdate = currentTime.Add(-2 * time.Hour) + ocspRes.NextUpdate = currentTime.Add(2 * time.Hour) + ocspRes.Status = ocsp.Revoked + ost, err = validateOCSP(ocspRes) + if err != nil { + t.Fatal(err) + } + + if ost.code != ocspStatusRevoked { + t.Fatalf("should have failed. expected: %v, got: %v", ocspStatusRevoked, ost.code) + } + ocspRes.Status = ocsp.Good + ost, err = validateOCSP(ocspRes) + if err != nil { + t.Fatal(err) + } + + if ost.code != ocspStatusGood { + t.Fatalf("should have success. expected: %v, got: %v", ocspStatusGood, ost.code) + } + ocspRes.Status = ocsp.Unknown + ost, err = validateOCSP(ocspRes) + if err != nil { + t.Fatal(err) + } + if ost.code != ocspStatusUnknown { + t.Fatalf("should have failed. expected: %v, got: %v", ocspStatusUnknown, ost.code) + } + ocspRes.Status = ocsp.ServerFailed + ost, err = validateOCSP(ocspRes) + if err != nil { + t.Fatal(err) + } + if ost.code != ocspStatusOthers { + t.Fatalf("should have failed. expected: %v, got: %v", ocspStatusOthers, ost.code) + } +} + +func TestUnitEncodeCertID(t *testing.T) { + var st *ocspStatus + _, st = extractCertIDKeyFromRequest([]byte{0x1, 0x2}) + if st.code != ocspFailedDecomposeRequest { + t.Fatalf("failed to get OCSP status. expected: %v, got: %v", ocspFailedDecomposeRequest, st.code) + } +} + +func getCert(addr string) []*x509.Certificate { + tcpConn, err := net.DialTimeout("tcp", addr, 40*time.Second) + if err != nil { + panic(err) + } + defer tcpConn.Close() + + err = tcpConn.SetDeadline(time.Now().Add(10 * time.Second)) + if err != nil { + panic(err) + } + config := tls.Config{InsecureSkipVerify: true, ServerName: addr} + + conn := tls.Client(tcpConn, &config) + defer conn.Close() + + err = conn.Handshake() + if err != nil { + panic(err) + } + + state := conn.ConnectionState() + + return state.PeerCertificates +} + +func TestOCSPRetry(t *testing.T) { + c := New(testLogFactory, 10) + certs := getCert("s3-us-west-2.amazonaws.com:443") + dummyOCSPHost := &url.URL{ + Scheme: "https", + Host: "dummyOCSPHost", + } + client := &fakeHTTPClient{ + cnt: 3, + success: true, + body: []byte{1, 2, 3}, + logger: hclog.New(hclog.DefaultOptions), + t: t, + } + res, b, st, err := c.retryOCSP( + context.TODO(), + client, fakeRequestFunc, + dummyOCSPHost, + make(map[string]string), []byte{0}, certs[len(certs)-1]) + if err == nil { + fmt.Printf("should fail: %v, %v, %v\n", res, b, st) + } + client = &fakeHTTPClient{ + cnt: 30, + success: true, + body: []byte{1, 2, 3}, + logger: hclog.New(hclog.DefaultOptions), + t: t, + } + res, b, st, err = c.retryOCSP( + context.TODO(), + client, fakeRequestFunc, + dummyOCSPHost, + make(map[string]string), []byte{0}, certs[len(certs)-1]) + if err == nil { + fmt.Printf("should fail: %v, %v, %v\n", res, b, st) + } +} + +type tcCanEarlyExit struct { + results []*ocspStatus + resultLen int + retFailOpen *ocspStatus + retFailClosed *ocspStatus +} + +func TestCanEarlyExitForOCSP(t *testing.T) { + testcases := []tcCanEarlyExit{ + { // 0 + results: []*ocspStatus{ + { + code: ocspStatusGood, + }, + { + code: ocspStatusGood, + }, + { + code: ocspStatusGood, + }, + }, + retFailOpen: nil, + retFailClosed: nil, + }, + { // 1 + results: []*ocspStatus{ + { + code: ocspStatusRevoked, + err: errors.New("revoked"), + }, + { + code: ocspStatusGood, + }, + { + code: ocspStatusGood, + }, + }, + retFailOpen: &ocspStatus{ocspStatusRevoked, errors.New("revoked")}, + retFailClosed: &ocspStatus{ocspStatusRevoked, errors.New("revoked")}, + }, + { // 2 + results: []*ocspStatus{ + { + code: ocspStatusUnknown, + err: errors.New("unknown"), + }, + { + code: ocspStatusGood, + }, + { + code: ocspStatusGood, + }, + }, + retFailOpen: nil, + retFailClosed: &ocspStatus{ocspStatusUnknown, errors.New("unknown")}, + }, + { // 3: not taken as revoked if any invalid OCSP response (ocspInvalidValidity) is included. + results: []*ocspStatus{ + { + code: ocspStatusRevoked, + err: errors.New("revoked"), + }, + { + code: ocspInvalidValidity, + }, + { + code: ocspStatusGood, + }, + }, + retFailOpen: nil, + retFailClosed: &ocspStatus{ocspStatusRevoked, errors.New("revoked")}, + }, + { // 4: not taken as revoked if the number of results don't match the expected results. + results: []*ocspStatus{ + { + code: ocspStatusRevoked, + err: errors.New("revoked"), + }, + { + code: ocspStatusGood, + }, + }, + resultLen: 3, + retFailOpen: nil, + retFailClosed: &ocspStatus{ocspStatusRevoked, errors.New("revoked")}, + }, + } + c := New(testLogFactory, 10) + for idx, tt := range testcases { + expectedLen := len(tt.results) + if tt.resultLen > 0 { + expectedLen = tt.resultLen + } + r := c.canEarlyExitForOCSP(tt.results, expectedLen, &VerifyConfig{OcspFailureMode: FailOpenTrue}) + if !(tt.retFailOpen == nil && r == nil) && !(tt.retFailOpen != nil && r != nil && tt.retFailOpen.code == r.code) { + t.Fatalf("%d: failed to match return. expected: %v, got: %v", idx, tt.retFailOpen, r) + } + r = c.canEarlyExitForOCSP(tt.results, expectedLen, &VerifyConfig{OcspFailureMode: FailOpenFalse}) + if !(tt.retFailClosed == nil && r == nil) && !(tt.retFailClosed != nil && r != nil && tt.retFailClosed.code == r.code) { + t.Fatalf("%d: failed to match return. expected: %v, got: %v", idx, tt.retFailClosed, r) + } + } +} + +var testLogger = hclog.New(hclog.DefaultOptions) + +func testLogFactory() hclog.Logger { + return testLogger +} + +type fakeHTTPClient struct { + cnt int // number of retry + success bool // return success after retry in cnt times + timeout bool // timeout + body []byte // return body + t *testing.T + logger hclog.Logger + redirected bool +} + +func (c *fakeHTTPClient) Do(_ *retryablehttp.Request) (*http.Response, error) { + c.cnt-- + if c.cnt < 0 { + c.cnt = 0 + } + c.t.Log("fakeHTTPClient.cnt", c.cnt) + + var retcode int + if !c.redirected { + c.redirected = true + c.cnt++ + retcode = 405 + } else if c.success && c.cnt == 1 { + retcode = 200 + } else { + if c.timeout { + // simulate timeout + time.Sleep(time.Second * 1) + return nil, &fakeHTTPError{ + err: "Whatever reason (Client.Timeout exceeded while awaiting headers)", + timeout: true, + } + } + retcode = 0 + } + + ret := &http.Response{ + StatusCode: retcode, + Body: &fakeResponseBody{body: c.body}, + } + return ret, nil +} + +type fakeHTTPError struct { + err string + timeout bool +} + +func (e *fakeHTTPError) Error() string { return e.err } +func (e *fakeHTTPError) Timeout() bool { return e.timeout } +func (e *fakeHTTPError) Temporary() bool { return true } + +type fakeResponseBody struct { + body []byte + cnt int +} + +func (b *fakeResponseBody) Read(p []byte) (n int, err error) { + if b.cnt == 0 { + copy(p, b.body) + b.cnt = 1 + return len(b.body), nil + } + b.cnt = 0 + return 0, io.EOF +} + +func (b *fakeResponseBody) Close() error { + return nil +} + +func fakeRequestFunc(_, _ string, _ interface{}) (*retryablehttp.Request, error) { + return nil, nil +} + +const vaultCert = `-----BEGIN CERTIFICATE----- +MIIDuTCCAqGgAwIBAgIUA6VeVD1IB5rXcCZRAqPO4zr/GAMwDQYJKoZIhvcNAQEL +BQAwcjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlZBMREwDwYDVQQHDAhTb21lQ2l0 +eTESMBAGA1UECgwJTXlDb21wYW55MRMwEQYDVQQLDApNeURpdmlzaW9uMRowGAYD +VQQDDBF3d3cuY29uaHVnZWNvLmNvbTAeFw0yMjA5MDcxOTA1MzdaFw0yNDA5MDYx +OTA1MzdaMHIxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJWQTERMA8GA1UEBwwIU29t +ZUNpdHkxEjAQBgNVBAoMCU15Q29tcGFueTETMBEGA1UECwwKTXlEaXZpc2lvbjEa +MBgGA1UEAwwRd3d3LmNvbmh1Z2Vjby5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IB +DwAwggEKAoIBAQDL9qzEXi4PIafSAqfcwcmjujFvbG1QZbI8swxnD+w8i4ufAQU5 +LDmvMrGo3ZbhJ0mCihYmFxpjhRdP2raJQ9TysHlPXHtDRpr9ckWTKBz2oIfqVtJ2 +qzteQkWCkDAO7kPqzgCFsMeoMZeONRkeGib0lEzQAbW/Rqnphg8zVVkyQ71DZ7Pc +d5WkC2E28kKcSramhWfVFpxG3hSIrLOX2esEXteLRzKxFPf+gi413JZFKYIWrebP +u5t0++MLNpuX322geoki4BWMjQsd47XILmxZ4aj33ScZvdrZESCnwP76hKIxg9mO +lMxrqSWKVV5jHZrElSEj9LYJgDO1Y6eItn7hAgMBAAGjRzBFMAsGA1UdDwQEAwIE +MDATBgNVHSUEDDAKBggrBgEFBQcDATAhBgNVHREEGjAYggtleGFtcGxlLmNvbYIJ +bG9jYWxob3N0MA0GCSqGSIb3DQEBCwUAA4IBAQA5dPdf5SdtMwe2uSspO/EuWqbM +497vMQBW1Ey8KRKasJjhvOVYMbe7De5YsnW4bn8u5pl0zQGF4hEtpmifAtVvziH/ +K+ritQj9VVNbLLCbFcg+b0kfjt4yrDZ64vWvIeCgPjG1Kme8gdUUWgu9dOud5gdx +qg/tIFv4TRS/eIIymMlfd9owOD3Ig6S5fy4NaAJFAwXf8+3Rzuc+e7JSAPgAufjh +tOTWinxvoiOLuYwo9CyGgq4qKBFsrY0aE0gdA7oTQkpbEbo2EbqiWUl/PTCl1Y4Z +nSZ0n+4q9QC9RLrWwYTwh838d5RVLUst2mBKSA+vn7YkqmBJbdBC6nkd7n7H +-----END CERTIFICATE----- +` diff --git a/sdk/helper/parseutil/parseutil.go b/sdk/helper/parseutil/parseutil.go new file mode 100644 index 0000000..5bea890 --- /dev/null +++ b/sdk/helper/parseutil/parseutil.go @@ -0,0 +1,44 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// DEPRECATED: this has been moved to go-secure-stdlib and will be removed +package parseutil + +import ( + "time" + + extparseutil "github.com/hashicorp/go-secure-stdlib/parseutil" + sockaddr "github.com/hashicorp/go-sockaddr" +) + +func ParseCapacityString(in interface{}) (uint64, error) { + return extparseutil.ParseCapacityString(in) +} + +func ParseDurationSecond(in interface{}) (time.Duration, error) { + return extparseutil.ParseDurationSecond(in) +} + +func ParseAbsoluteTime(in interface{}) (time.Time, error) { + return extparseutil.ParseAbsoluteTime(in) +} + +func ParseInt(in interface{}) (int64, error) { + return extparseutil.ParseInt(in) +} + +func ParseBool(in interface{}) (bool, error) { + return extparseutil.ParseBool(in) +} + +func ParseString(in interface{}) (string, error) { + return extparseutil.ParseString(in) +} + +func ParseCommaStringSlice(in interface{}) ([]string, error) { + return extparseutil.ParseCommaStringSlice(in) +} + +func ParseAddrs(addrs interface{}) ([]*sockaddr.SockAddrMarshaler, error) { + return extparseutil.ParseAddrs(addrs) +} diff --git a/sdk/helper/password/password.go b/sdk/helper/password/password.go new file mode 100644 index 0000000..931a72c --- /dev/null +++ b/sdk/helper/password/password.go @@ -0,0 +1,17 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// DEPRECATED: this has been moved to go-secure-stdlib and will be removed +package password + +import ( + "os" + + extpassword "github.com/hashicorp/go-secure-stdlib/password" +) + +var ErrInterrupted = extpassword.ErrInterrupted + +func Read(f *os.File) (string, error) { + return extpassword.Read(f) +} diff --git a/sdk/helper/pathmanager/pathmanager.go b/sdk/helper/pathmanager/pathmanager.go new file mode 100644 index 0000000..0d2d600 --- /dev/null +++ b/sdk/helper/pathmanager/pathmanager.go @@ -0,0 +1,139 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pathmanager + +import ( + "strings" + "sync" + + iradix "github.com/hashicorp/go-immutable-radix" +) + +// PathManager is a prefix searchable index of paths +type PathManager struct { + l sync.RWMutex + paths *iradix.Tree +} + +// New creates a new path manager +func New() *PathManager { + return &PathManager{ + paths: iradix.New(), + } +} + +// AddPaths adds path to the paths list +func (p *PathManager) AddPaths(paths []string) { + p.l.Lock() + defer p.l.Unlock() + + txn := p.paths.Txn() + for _, prefix := range paths { + if len(prefix) == 0 { + continue + } + + var exception bool + if strings.HasPrefix(prefix, "!") { + prefix = strings.TrimPrefix(prefix, "!") + exception = true + } + + // We trim any trailing *, but we don't touch whether it is a trailing + // slash or not since we want to be able to ignore prefixes that fully + // specify a file + txn.Insert([]byte(strings.TrimSuffix(prefix, "*")), exception) + } + p.paths = txn.Commit() +} + +// RemovePaths removes paths from the paths list +func (p *PathManager) RemovePaths(paths []string) { + p.l.Lock() + defer p.l.Unlock() + + txn := p.paths.Txn() + for _, prefix := range paths { + if len(prefix) == 0 { + continue + } + + // Exceptions aren't stored with the leading ! so strip it + if strings.HasPrefix(prefix, "!") { + prefix = strings.TrimPrefix(prefix, "!") + } + + // We trim any trailing *, but we don't touch whether it is a trailing + // slash or not since we want to be able to ignore prefixes that fully + // specify a file + txn.Delete([]byte(strings.TrimSuffix(prefix, "*"))) + } + p.paths = txn.Commit() +} + +// RemovePathPrefix removes all paths with the given prefix +func (p *PathManager) RemovePathPrefix(prefix string) { + p.l.Lock() + defer p.l.Unlock() + + // We trim any trailing *, but we don't touch whether it is a trailing + // slash or not since we want to be able to ignore prefixes that fully + // specify a file + p.paths, _ = p.paths.DeletePrefix([]byte(strings.TrimSuffix(prefix, "*"))) +} + +// Len returns the number of paths +func (p *PathManager) Len() int { + return p.paths.Len() +} + +// Paths returns the path list +func (p *PathManager) Paths() []string { + p.l.RLock() + defer p.l.RUnlock() + + paths := make([]string, 0, p.paths.Len()) + walkFn := func(k []byte, v interface{}) bool { + paths = append(paths, string(k)) + return false + } + p.paths.Root().Walk(walkFn) + return paths +} + +// HasPath returns if the prefix for the path exists regardless if it is a path +// (ending with /) or a prefix for a leaf node +func (p *PathManager) HasPath(path string) bool { + p.l.RLock() + defer p.l.RUnlock() + + if _, exceptionRaw, ok := p.paths.Root().LongestPrefix([]byte(path)); ok { + var exception bool + if exceptionRaw != nil { + exception = exceptionRaw.(bool) + } + return !exception + } + return false +} + +// HasExactPath returns if the longest match is an exact match for the +// full path +func (p *PathManager) HasExactPath(path string) bool { + p.l.RLock() + defer p.l.RUnlock() + + if val, exceptionRaw, ok := p.paths.Root().LongestPrefix([]byte(path)); ok { + var exception bool + if exceptionRaw != nil { + exception = exceptionRaw.(bool) + } + + strVal := string(val) + if strings.HasSuffix(strVal, "/") || strVal == path { + return !exception + } + } + return false +} diff --git a/sdk/helper/pathmanager/pathmanager_test.go b/sdk/helper/pathmanager/pathmanager_test.go new file mode 100644 index 0000000..515d830 --- /dev/null +++ b/sdk/helper/pathmanager/pathmanager_test.go @@ -0,0 +1,183 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pathmanager + +import ( + "reflect" + "testing" +) + +func TestPathManager(t *testing.T) { + m := New() + + if m.Len() != 0 { + t.Fatalf("bad: path length expect 0, got %d", len(m.Paths())) + } + + paths := []string{ + "path1/", + "path2/", + "path3/", + } + + for _, path := range paths { + if m.HasPath(path) { + t.Fatalf("path should not exist in filtered paths %q", path) + } + } + + // add paths + m.AddPaths(paths) + if m.Len() != 3 { + t.Fatalf("bad: path length expect 3, got %d", len(m.Paths())) + } + if !reflect.DeepEqual(paths, m.Paths()) { + t.Fatalf("mismatch in paths") + } + for _, path := range paths { + if !m.HasPath(path) { + t.Fatalf("path should exist in filtered paths %q", path) + } + } + + // remove the paths + m.RemovePaths(paths) + + for _, path := range paths { + if m.HasPath(path) { + t.Fatalf("path should not exist in filtered paths %q", path) + } + } +} + +func TestPathManager_RemovePrefix(t *testing.T) { + m := New() + + if m.Len() != 0 { + t.Fatalf("bad: path length expect 0, got %d", len(m.Paths())) + } + + paths := []string{ + "path1/", + "path2/", + "path3/", + } + + for _, path := range paths { + if m.HasPath(path) { + t.Fatalf("path should not exist in filtered paths %q", path) + } + } + + // add paths + m.AddPaths(paths) + if m.Len() != 3 { + t.Fatalf("bad: path length expect 3, got %d", len(m.Paths())) + } + if !reflect.DeepEqual(paths, m.Paths()) { + t.Fatalf("mismatch in paths") + } + for _, path := range paths { + if !m.HasPath(path) { + t.Fatalf("path should exist in filtered paths %q", path) + } + } + + // remove the paths + m.RemovePathPrefix("path") + + if m.Len() != 0 { + t.Fatalf("bad: path length expect 0, got %d", len(m.Paths())) + } + + for _, path := range paths { + if m.HasPath(path) { + t.Fatalf("path should not exist in filtered paths %q", path) + } + } +} + +func TestPathManager_HasExactPath(t *testing.T) { + m := New() + paths := []string{ + "path1/key1", + "path1/key1/subkey1", + "path1/key1/subkey2", + "path1/key1/subkey3", + "path2/*", + "path3/", + "!path4/key1", + "!path5/*", + } + m.AddPaths(paths) + if m.Len() != len(paths) { + t.Fatalf("path count does not match: expected %d, got %d", len(paths), m.Len()) + } + + type tCase struct { + key string + expect bool + } + + tcases := []tCase{ + {"path1/key1", true}, + {"path2/key1", true}, + {"path3/key1", true}, + {"path1/key1/subkey1", true}, + {"path1/key1/subkey99", false}, + {"path2/key1/subkey1", true}, + {"path1/key1/subkey1/subkey1", false}, + {"nonexistentpath/key1", false}, + {"path4/key1", false}, + {"path5/key1/subkey1", false}, + } + + for _, tc := range tcases { + if match := m.HasExactPath(tc.key); match != tc.expect { + t.Fatalf("incorrect match: key %q", tc.key) + } + } + + m.RemovePaths(paths) + if len(m.Paths()) != 0 { + t.Fatalf("removing all paths did not clear manager: paths %v", m.Paths()) + } +} + +func TestPathManager_HasPath(t *testing.T) { + m := New() + + m.AddPaths([]string{"a/b/c/"}) + if m.HasPath("a/") { + t.Fatal("should not have path 'a/'") + } + if m.HasPath("a/b/") { + t.Fatal("should not have path 'a/b/'") + } + if !m.HasPath("a/b/c/") { + t.Fatal("should have path 'a/b/c'") + } + + m.AddPaths([]string{"a/"}) + if !m.HasPath("a/") { + t.Fatal("should have path 'a/'") + } + if !m.HasPath("a/b/") { + t.Fatal("should have path 'a/b/'") + } + if !m.HasPath("a/b/c/") { + t.Fatal("should have path 'a/b/c'") + } + + m.RemovePaths([]string{"a/"}) + if m.HasPath("a/") { + t.Fatal("should not have path 'a/'") + } + if m.HasPath("a/b/") { + t.Fatal("should not have path 'a/b/'") + } + if !m.HasPath("a/b/c/") { + t.Fatal("should have path 'a/b/c'") + } +} diff --git a/sdk/helper/pluginutil/env.go b/sdk/helper/pluginutil/env.go new file mode 100644 index 0000000..1b45ef3 --- /dev/null +++ b/sdk/helper/pluginutil/env.go @@ -0,0 +1,80 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pluginutil + +import ( + "os" + + "github.com/hashicorp/go-secure-stdlib/mlock" + version "github.com/hashicorp/go-version" +) + +const ( + // PluginAutoMTLSEnv is used to ensure AutoMTLS is used. This will override + // setting a TLSProviderFunc for a plugin. + PluginAutoMTLSEnv = "VAULT_PLUGIN_AUTOMTLS_ENABLED" + + // PluginMlockEnabled is the ENV name used to pass the configuration for + // enabling mlock + PluginMlockEnabled = "VAULT_PLUGIN_MLOCK_ENABLED" + + // PluginVaultVersionEnv is the ENV name used to pass the version of the + // vault server to the plugin + PluginVaultVersionEnv = "VAULT_VERSION" + + // PluginMetadataModeEnv is an ENV name used to disable TLS communication + // to bootstrap mounting plugins. + PluginMetadataModeEnv = "VAULT_PLUGIN_METADATA_MODE" + + // PluginUnwrapTokenEnv is the ENV name used to pass unwrap tokens to the + // plugin. + PluginUnwrapTokenEnv = "VAULT_UNWRAP_TOKEN" + + // PluginCACertPEMEnv is an ENV name used for holding a CA PEM-encoded + // string. Used for testing. + PluginCACertPEMEnv = "VAULT_TESTING_PLUGIN_CA_PEM" + + // PluginMultiplexingOptOut is an ENV name used to define a comma separated list of plugin names + // opted-out of the multiplexing feature; for emergencies if multiplexing ever causes issues + PluginMultiplexingOptOut = "VAULT_PLUGIN_MULTIPLEXING_OPT_OUT" +) + +// OptionallyEnableMlock determines if mlock should be called, and if so enables +// mlock. +func OptionallyEnableMlock() error { + if os.Getenv(PluginMlockEnabled) == "true" { + return mlock.LockMemory() + } + + return nil +} + +// GRPCSupport defaults to returning true, unless VAULT_VERSION is missing or +// it fails to meet the version constraint. +func GRPCSupport() bool { + verString := os.Getenv(PluginVaultVersionEnv) + // If the env var is empty, we fall back to netrpc for backward compatibility. + if verString == "" { + return false + } + if verString != "unknown" { + ver, err := version.NewVersion(verString) + if err != nil { + return true + } + // Due to some regressions on 0.9.2 & 0.9.3 we now require version 0.9.4 + // to allow the plugin framework to default to gRPC. + constraint, err := version.NewConstraint(">= 0.9.4") + if err != nil { + return true + } + return constraint.Check(ver) + } + return true +} + +// InMetadataMode returns true if the plugin calling this function is running in metadata mode. +func InMetadataMode() bool { + return os.Getenv(PluginMetadataModeEnv) == "true" +} diff --git a/sdk/helper/pluginutil/env_test.go b/sdk/helper/pluginutil/env_test.go new file mode 100644 index 0000000..21f77fa --- /dev/null +++ b/sdk/helper/pluginutil/env_test.go @@ -0,0 +1,64 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pluginutil + +import ( + "os" + "testing" +) + +func TestGRPCSupport(t *testing.T) { + cases := []struct { + envVersion string + expected bool + }{ + { + "0.8.3", + false, + }, + { + "0.9.2", + false, + }, + { + "0.9.3", + false, + }, + { + "0.9.4+ent", + true, + }, + { + "0.9.4-beta", + false, + }, + { + "0.9.4", + true, + }, + { + "unknown", + true, + }, + { + "", + false, + }, + } + + for _, tc := range cases { + t.Run(tc.envVersion, func(t *testing.T) { + err := os.Setenv(PluginVaultVersionEnv, tc.envVersion) + if err != nil { + t.Fatal(err) + } + + result := GRPCSupport() + + if result != tc.expected { + t.Fatalf("got: %t, expected: %t", result, tc.expected) + } + }) + } +} diff --git a/sdk/helper/pluginutil/multiplexing.go b/sdk/helper/pluginutil/multiplexing.go new file mode 100644 index 0000000..8fc86a4 --- /dev/null +++ b/sdk/helper/pluginutil/multiplexing.go @@ -0,0 +1,83 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pluginutil + +import ( + "context" + "errors" + "fmt" + "os" + "strings" + + "github.com/hashicorp/go-secure-stdlib/strutil" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +var ErrNoMultiplexingIDFound = errors.New("no multiplexing ID found") + +type PluginMultiplexingServerImpl struct { + UnimplementedPluginMultiplexingServer + + Supported bool +} + +func (pm PluginMultiplexingServerImpl) MultiplexingSupport(_ context.Context, _ *MultiplexingSupportRequest) (*MultiplexingSupportResponse, error) { + return &MultiplexingSupportResponse{ + Supported: pm.Supported, + }, nil +} + +func MultiplexingSupported(ctx context.Context, cc grpc.ClientConnInterface, name string) (bool, error) { + if cc == nil { + return false, fmt.Errorf("client connection is nil") + } + + out := strings.Split(os.Getenv(PluginMultiplexingOptOut), ",") + if strutil.StrListContains(out, name) { + return false, nil + } + + req := new(MultiplexingSupportRequest) + resp, err := NewPluginMultiplexingClient(cc).MultiplexingSupport(ctx, req) + if err != nil { + + // If the server does not implement the multiplexing server then we can + // assume it is not multiplexed + if status.Code(err) == codes.Unimplemented { + return false, nil + } + + return false, err + } + if resp == nil { + // Somehow got a nil response, assume not multiplexed + return false, nil + } + + return resp.Supported, nil +} + +func GetMultiplexIDFromContext(ctx context.Context) (string, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return "", fmt.Errorf("missing plugin multiplexing metadata") + } + + multiplexIDs := md[MultiplexingCtxKey] + if len(multiplexIDs) == 0 { + return "", ErrNoMultiplexingIDFound + } else if len(multiplexIDs) != 1 { + return "", fmt.Errorf("unexpected number of IDs in metadata: (%d)", len(multiplexIDs)) + } + + multiplexID := multiplexIDs[0] + if multiplexID == "" { + return "", fmt.Errorf("empty multiplex ID in metadata") + } + + return multiplexID, nil +} diff --git a/sdk/helper/pluginutil/multiplexing.pb.go b/sdk/helper/pluginutil/multiplexing.pb.go new file mode 100644 index 0000000..d7663b9 --- /dev/null +++ b/sdk/helper/pluginutil/multiplexing.pb.go @@ -0,0 +1,216 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.12 +// source: sdk/helper/pluginutil/multiplexing.proto + +package pluginutil + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type MultiplexingSupportRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *MultiplexingSupportRequest) Reset() { + *x = MultiplexingSupportRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_helper_pluginutil_multiplexing_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MultiplexingSupportRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MultiplexingSupportRequest) ProtoMessage() {} + +func (x *MultiplexingSupportRequest) ProtoReflect() protoreflect.Message { + mi := &file_sdk_helper_pluginutil_multiplexing_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MultiplexingSupportRequest.ProtoReflect.Descriptor instead. +func (*MultiplexingSupportRequest) Descriptor() ([]byte, []int) { + return file_sdk_helper_pluginutil_multiplexing_proto_rawDescGZIP(), []int{0} +} + +type MultiplexingSupportResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Supported bool `protobuf:"varint,1,opt,name=supported,proto3" json:"supported,omitempty"` +} + +func (x *MultiplexingSupportResponse) Reset() { + *x = MultiplexingSupportResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_helper_pluginutil_multiplexing_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MultiplexingSupportResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MultiplexingSupportResponse) ProtoMessage() {} + +func (x *MultiplexingSupportResponse) ProtoReflect() protoreflect.Message { + mi := &file_sdk_helper_pluginutil_multiplexing_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MultiplexingSupportResponse.ProtoReflect.Descriptor instead. +func (*MultiplexingSupportResponse) Descriptor() ([]byte, []int) { + return file_sdk_helper_pluginutil_multiplexing_proto_rawDescGZIP(), []int{1} +} + +func (x *MultiplexingSupportResponse) GetSupported() bool { + if x != nil { + return x.Supported + } + return false +} + +var File_sdk_helper_pluginutil_multiplexing_proto protoreflect.FileDescriptor + +var file_sdk_helper_pluginutil_multiplexing_proto_rawDesc = []byte{ + 0x0a, 0x28, 0x73, 0x64, 0x6b, 0x2f, 0x68, 0x65, 0x6c, 0x70, 0x65, 0x72, 0x2f, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x75, 0x74, 0x69, 0x6c, 0x2f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, + 0x78, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x78, + 0x69, 0x6e, 0x67, 0x22, 0x1c, 0x0a, 0x1a, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x78, + 0x69, 0x6e, 0x67, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x22, 0x3b, 0x0a, 0x1b, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x78, 0x69, 0x6e, + 0x67, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x09, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x32, 0x97, + 0x01, 0x0a, 0x12, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, + 0x65, 0x78, 0x69, 0x6e, 0x67, 0x12, 0x80, 0x01, 0x0a, 0x13, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, + 0x6c, 0x65, 0x78, 0x69, 0x6e, 0x67, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x33, 0x2e, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x6d, 0x75, 0x6c, 0x74, 0x69, + 0x70, 0x6c, 0x65, 0x78, 0x69, 0x6e, 0x67, 0x2e, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, + 0x78, 0x69, 0x6e, 0x67, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x75, 0x74, 0x69, 0x6c, 0x2e, + 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x78, 0x69, 0x6e, 0x67, 0x2e, 0x4d, 0x75, 0x6c, + 0x74, 0x69, 0x70, 0x6c, 0x65, 0x78, 0x69, 0x6e, 0x67, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, + 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x73, 0x64, 0x6b, 0x2f, 0x68, 0x65, 0x6c, 0x70, 0x65, + 0x72, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x75, 0x74, 0x69, 0x6c, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_sdk_helper_pluginutil_multiplexing_proto_rawDescOnce sync.Once + file_sdk_helper_pluginutil_multiplexing_proto_rawDescData = file_sdk_helper_pluginutil_multiplexing_proto_rawDesc +) + +func file_sdk_helper_pluginutil_multiplexing_proto_rawDescGZIP() []byte { + file_sdk_helper_pluginutil_multiplexing_proto_rawDescOnce.Do(func() { + file_sdk_helper_pluginutil_multiplexing_proto_rawDescData = protoimpl.X.CompressGZIP(file_sdk_helper_pluginutil_multiplexing_proto_rawDescData) + }) + return file_sdk_helper_pluginutil_multiplexing_proto_rawDescData +} + +var file_sdk_helper_pluginutil_multiplexing_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_sdk_helper_pluginutil_multiplexing_proto_goTypes = []interface{}{ + (*MultiplexingSupportRequest)(nil), // 0: pluginutil.multiplexing.MultiplexingSupportRequest + (*MultiplexingSupportResponse)(nil), // 1: pluginutil.multiplexing.MultiplexingSupportResponse +} +var file_sdk_helper_pluginutil_multiplexing_proto_depIdxs = []int32{ + 0, // 0: pluginutil.multiplexing.PluginMultiplexing.MultiplexingSupport:input_type -> pluginutil.multiplexing.MultiplexingSupportRequest + 1, // 1: pluginutil.multiplexing.PluginMultiplexing.MultiplexingSupport:output_type -> pluginutil.multiplexing.MultiplexingSupportResponse + 1, // [1:2] is the sub-list for method output_type + 0, // [0:1] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_sdk_helper_pluginutil_multiplexing_proto_init() } +func file_sdk_helper_pluginutil_multiplexing_proto_init() { + if File_sdk_helper_pluginutil_multiplexing_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_sdk_helper_pluginutil_multiplexing_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MultiplexingSupportRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_helper_pluginutil_multiplexing_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MultiplexingSupportResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_sdk_helper_pluginutil_multiplexing_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_sdk_helper_pluginutil_multiplexing_proto_goTypes, + DependencyIndexes: file_sdk_helper_pluginutil_multiplexing_proto_depIdxs, + MessageInfos: file_sdk_helper_pluginutil_multiplexing_proto_msgTypes, + }.Build() + File_sdk_helper_pluginutil_multiplexing_proto = out.File + file_sdk_helper_pluginutil_multiplexing_proto_rawDesc = nil + file_sdk_helper_pluginutil_multiplexing_proto_goTypes = nil + file_sdk_helper_pluginutil_multiplexing_proto_depIdxs = nil +} diff --git a/sdk/helper/pluginutil/multiplexing.proto b/sdk/helper/pluginutil/multiplexing.proto new file mode 100644 index 0000000..c1a2ca0 --- /dev/null +++ b/sdk/helper/pluginutil/multiplexing.proto @@ -0,0 +1,16 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +syntax = "proto3"; +package pluginutil.multiplexing; + +option go_package = "github.com/hashicorp/vault/sdk/helper/pluginutil"; + +message MultiplexingSupportRequest {} +message MultiplexingSupportResponse { + bool supported = 1; +} + +service PluginMultiplexing { + rpc MultiplexingSupport(MultiplexingSupportRequest) returns (MultiplexingSupportResponse); +} diff --git a/sdk/helper/pluginutil/multiplexing_grpc.pb.go b/sdk/helper/pluginutil/multiplexing_grpc.pb.go new file mode 100644 index 0000000..aa8d0e4 --- /dev/null +++ b/sdk/helper/pluginutil/multiplexing_grpc.pb.go @@ -0,0 +1,101 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package pluginutil + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// PluginMultiplexingClient is the client API for PluginMultiplexing service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type PluginMultiplexingClient interface { + MultiplexingSupport(ctx context.Context, in *MultiplexingSupportRequest, opts ...grpc.CallOption) (*MultiplexingSupportResponse, error) +} + +type pluginMultiplexingClient struct { + cc grpc.ClientConnInterface +} + +func NewPluginMultiplexingClient(cc grpc.ClientConnInterface) PluginMultiplexingClient { + return &pluginMultiplexingClient{cc} +} + +func (c *pluginMultiplexingClient) MultiplexingSupport(ctx context.Context, in *MultiplexingSupportRequest, opts ...grpc.CallOption) (*MultiplexingSupportResponse, error) { + out := new(MultiplexingSupportResponse) + err := c.cc.Invoke(ctx, "/pluginutil.multiplexing.PluginMultiplexing/MultiplexingSupport", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// PluginMultiplexingServer is the server API for PluginMultiplexing service. +// All implementations must embed UnimplementedPluginMultiplexingServer +// for forward compatibility +type PluginMultiplexingServer interface { + MultiplexingSupport(context.Context, *MultiplexingSupportRequest) (*MultiplexingSupportResponse, error) + mustEmbedUnimplementedPluginMultiplexingServer() +} + +// UnimplementedPluginMultiplexingServer must be embedded to have forward compatible implementations. +type UnimplementedPluginMultiplexingServer struct { +} + +func (UnimplementedPluginMultiplexingServer) MultiplexingSupport(context.Context, *MultiplexingSupportRequest) (*MultiplexingSupportResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method MultiplexingSupport not implemented") +} +func (UnimplementedPluginMultiplexingServer) mustEmbedUnimplementedPluginMultiplexingServer() {} + +// UnsafePluginMultiplexingServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to PluginMultiplexingServer will +// result in compilation errors. +type UnsafePluginMultiplexingServer interface { + mustEmbedUnimplementedPluginMultiplexingServer() +} + +func RegisterPluginMultiplexingServer(s grpc.ServiceRegistrar, srv PluginMultiplexingServer) { + s.RegisterService(&PluginMultiplexing_ServiceDesc, srv) +} + +func _PluginMultiplexing_MultiplexingSupport_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MultiplexingSupportRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PluginMultiplexingServer).MultiplexingSupport(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pluginutil.multiplexing.PluginMultiplexing/MultiplexingSupport", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PluginMultiplexingServer).MultiplexingSupport(ctx, req.(*MultiplexingSupportRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// PluginMultiplexing_ServiceDesc is the grpc.ServiceDesc for PluginMultiplexing service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var PluginMultiplexing_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "pluginutil.multiplexing.PluginMultiplexing", + HandlerType: (*PluginMultiplexingServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "MultiplexingSupport", + Handler: _PluginMultiplexing_MultiplexingSupport_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "sdk/helper/pluginutil/multiplexing.proto", +} diff --git a/sdk/helper/pluginutil/multiplexing_test.go b/sdk/helper/pluginutil/multiplexing_test.go new file mode 100644 index 0000000..3f589ff --- /dev/null +++ b/sdk/helper/pluginutil/multiplexing_test.go @@ -0,0 +1,160 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pluginutil + +import ( + "context" + "fmt" + "reflect" + "testing" + + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +func TestMultiplexingSupported(t *testing.T) { + type args struct { + ctx context.Context + cc grpc.ClientConnInterface + name string + } + + type testCase struct { + name string + args args + env string + want bool + wantErr bool + } + + tests := []testCase{ + { + name: "multiplexing is supported if plugin is not opted out", + args: args{ + ctx: context.Background(), + cc: &MockClientConnInterfaceNoop{}, + name: "plugin", + }, + env: "", + want: true, + }, + { + name: "multiplexing is not supported if plugin is opted out", + args: args{ + ctx: context.Background(), + cc: &MockClientConnInterfaceNoop{}, + name: "optedOutPlugin", + }, + env: "optedOutPlugin", + want: false, + }, + { + name: "multiplexing is not supported if plugin among one of the opted out", + args: args{ + ctx: context.Background(), + cc: &MockClientConnInterfaceNoop{}, + name: "optedOutPlugin", + }, + env: "firstPlugin,optedOutPlugin,otherPlugin", + want: false, + }, + { + name: "multiplexing is supported if different plugin is opted out", + args: args{ + ctx: context.Background(), + cc: &MockClientConnInterfaceNoop{}, + name: "plugin", + }, + env: "optedOutPlugin", + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Setenv(PluginMultiplexingOptOut, tt.env) + got, err := MultiplexingSupported(tt.args.ctx, tt.args.cc, tt.args.name) + if (err != nil) != tt.wantErr { + t.Errorf("MultiplexingSupported() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("MultiplexingSupported() got = %v, want %v", got, tt.want) + } + }) + } +} + +func TestGetMultiplexIDFromContext(t *testing.T) { + type testCase struct { + ctx context.Context + expectedResp string + expectedErr error + } + + tests := map[string]testCase{ + "missing plugin multiplexing metadata": { + ctx: context.Background(), + expectedResp: "", + expectedErr: fmt.Errorf("missing plugin multiplexing metadata"), + }, + "unexpected number of IDs in metadata": { + ctx: idCtx(t, "12345", "67891"), + expectedResp: "", + expectedErr: fmt.Errorf("unexpected number of IDs in metadata: (2)"), + }, + "empty multiplex ID in metadata": { + ctx: idCtx(t, ""), + expectedResp: "", + expectedErr: fmt.Errorf("empty multiplex ID in metadata"), + }, + "happy path, id is returned from metadata": { + ctx: idCtx(t, "12345"), + expectedResp: "12345", + expectedErr: nil, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + resp, err := GetMultiplexIDFromContext(test.ctx) + + if test.expectedErr != nil && test.expectedErr.Error() != "" && err == nil { + t.Fatalf("err expected, got nil") + } else if !reflect.DeepEqual(err, test.expectedErr) { + t.Fatalf("Actual error: %#v\nExpected error: %#v", err, test.expectedErr) + } + + if test.expectedErr != nil && test.expectedErr.Error() == "" && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + + if !reflect.DeepEqual(resp, test.expectedResp) { + t.Fatalf("Actual response: %#v\nExpected response: %#v", resp, test.expectedResp) + } + }) + } +} + +// idCtx is a test helper that will return a context with the IDs set in its +// metadata +func idCtx(t *testing.T, ids ...string) context.Context { + // Context doesn't need to timeout since this is just passed through + ctx := context.Background() + md := metadata.MD{} + for _, id := range ids { + md.Append(MultiplexingCtxKey, id) + } + return metadata.NewIncomingContext(ctx, md) +} + +type MockClientConnInterfaceNoop struct{} + +func (m *MockClientConnInterfaceNoop) Invoke(_ context.Context, _ string, _ interface{}, reply interface{}, _ ...grpc.CallOption) error { + reply.(*MultiplexingSupportResponse).Supported = true + return nil +} + +func (m *MockClientConnInterfaceNoop) NewStream(_ context.Context, _ *grpc.StreamDesc, _ string, _ ...grpc.CallOption) (grpc.ClientStream, error) { + return nil, nil +} diff --git a/sdk/helper/pluginutil/run_config.go b/sdk/helper/pluginutil/run_config.go new file mode 100644 index 0000000..be34fa9 --- /dev/null +++ b/sdk/helper/pluginutil/run_config.go @@ -0,0 +1,185 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pluginutil + +import ( + "context" + "crypto/sha256" + "crypto/tls" + "fmt" + "os/exec" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/sdk/helper/consts" +) + +type PluginClientConfig struct { + Name string + PluginType consts.PluginType + Version string + PluginSets map[int]plugin.PluginSet + HandshakeConfig plugin.HandshakeConfig + Logger log.Logger + IsMetadataMode bool + AutoMTLS bool + MLock bool + Wrapper RunnerUtil +} + +type runConfig struct { + // Provided by PluginRunner + command string + args []string + sha256 []byte + + // Initialized with what's in PluginRunner.Env, but can be added to + env []string + + PluginClientConfig +} + +func (rc runConfig) makeConfig(ctx context.Context) (*plugin.ClientConfig, error) { + cmd := exec.Command(rc.command, rc.args...) + cmd.Env = append(cmd.Env, rc.env...) + + // Add the mlock setting to the ENV of the plugin + if rc.MLock || (rc.Wrapper != nil && rc.Wrapper.MlockEnabled()) { + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginMlockEnabled, "true")) + } + version, err := rc.Wrapper.VaultVersion(ctx) + if err != nil { + return nil, err + } + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginVaultVersionEnv, version)) + + if rc.IsMetadataMode { + rc.Logger = rc.Logger.With("metadata", "true") + } + metadataEnv := fmt.Sprintf("%s=%t", PluginMetadataModeEnv, rc.IsMetadataMode) + cmd.Env = append(cmd.Env, metadataEnv) + + automtlsEnv := fmt.Sprintf("%s=%t", PluginAutoMTLSEnv, rc.AutoMTLS) + cmd.Env = append(cmd.Env, automtlsEnv) + + var clientTLSConfig *tls.Config + if !rc.AutoMTLS && !rc.IsMetadataMode { + // Get a CA TLS Certificate + certBytes, key, err := generateCert() + if err != nil { + return nil, err + } + + // Use CA to sign a client cert and return a configured TLS config + clientTLSConfig, err = createClientTLSConfig(certBytes, key) + if err != nil { + return nil, err + } + + // Use CA to sign a server cert and wrap the values in a response wrapped + // token. + wrapToken, err := wrapServerConfig(ctx, rc.Wrapper, certBytes, key) + if err != nil { + return nil, err + } + + // Add the response wrap token to the ENV of the plugin + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", PluginUnwrapTokenEnv, wrapToken)) + } + + secureConfig := &plugin.SecureConfig{ + Checksum: rc.sha256, + Hash: sha256.New(), + } + + clientConfig := &plugin.ClientConfig{ + HandshakeConfig: rc.HandshakeConfig, + VersionedPlugins: rc.PluginSets, + Cmd: cmd, + SecureConfig: secureConfig, + TLSConfig: clientTLSConfig, + Logger: rc.Logger, + AllowedProtocols: []plugin.Protocol{ + plugin.ProtocolNetRPC, + plugin.ProtocolGRPC, + }, + AutoMTLS: rc.AutoMTLS, + } + return clientConfig, nil +} + +func (rc runConfig) run(ctx context.Context) (*plugin.Client, error) { + clientConfig, err := rc.makeConfig(ctx) + if err != nil { + return nil, err + } + + client := plugin.NewClient(clientConfig) + return client, nil +} + +type RunOpt func(*runConfig) + +func Env(env ...string) RunOpt { + return func(rc *runConfig) { + rc.env = append(rc.env, env...) + } +} + +func Runner(wrapper RunnerUtil) RunOpt { + return func(rc *runConfig) { + rc.Wrapper = wrapper + } +} + +func PluginSets(pluginSets map[int]plugin.PluginSet) RunOpt { + return func(rc *runConfig) { + rc.PluginSets = pluginSets + } +} + +func HandshakeConfig(hs plugin.HandshakeConfig) RunOpt { + return func(rc *runConfig) { + rc.HandshakeConfig = hs + } +} + +func Logger(logger log.Logger) RunOpt { + return func(rc *runConfig) { + rc.Logger = logger + } +} + +func MetadataMode(isMetadataMode bool) RunOpt { + return func(rc *runConfig) { + rc.IsMetadataMode = isMetadataMode + } +} + +func AutoMTLS(autoMTLS bool) RunOpt { + return func(rc *runConfig) { + rc.AutoMTLS = autoMTLS + } +} + +func MLock(mlock bool) RunOpt { + return func(rc *runConfig) { + rc.MLock = mlock + } +} + +func (r *PluginRunner) RunConfig(ctx context.Context, opts ...RunOpt) (*plugin.Client, error) { + rc := runConfig{ + command: r.Command, + args: r.Args, + sha256: r.Sha256, + env: r.Env, + } + + for _, opt := range opts { + opt(&rc) + } + + return rc.run(ctx) +} diff --git a/sdk/helper/pluginutil/run_config_test.go b/sdk/helper/pluginutil/run_config_test.go new file mode 100644 index 0000000..e640577 --- /dev/null +++ b/sdk/helper/pluginutil/run_config_test.go @@ -0,0 +1,360 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pluginutil + +import ( + "context" + "fmt" + "os/exec" + "testing" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/sdk/helper/wrapping" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestMakeConfig(t *testing.T) { + type testCase struct { + rc runConfig + + responseWrapInfo *wrapping.ResponseWrapInfo + responseWrapInfoErr error + responseWrapInfoTimes int + + mlockEnabled bool + mlockEnabledTimes int + + expectedConfig *plugin.ClientConfig + expectTLSConfig bool + } + + tests := map[string]testCase{ + "metadata mode, not-AutoMTLS": { + rc: runConfig{ + command: "echo", + args: []string{"foo", "bar"}, + sha256: []byte("some_sha256"), + env: []string{"initial=true"}, + PluginClientConfig: PluginClientConfig{ + PluginSets: map[int]plugin.PluginSet{ + 1: { + "bogus": nil, + }, + }, + HandshakeConfig: plugin.HandshakeConfig{ + ProtocolVersion: 1, + MagicCookieKey: "magic_cookie_key", + MagicCookieValue: "magic_cookie_value", + }, + Logger: hclog.NewNullLogger(), + IsMetadataMode: true, + AutoMTLS: false, + }, + }, + + responseWrapInfoTimes: 0, + + mlockEnabled: false, + mlockEnabledTimes: 1, + + expectedConfig: &plugin.ClientConfig{ + HandshakeConfig: plugin.HandshakeConfig{ + ProtocolVersion: 1, + MagicCookieKey: "magic_cookie_key", + MagicCookieValue: "magic_cookie_value", + }, + VersionedPlugins: map[int]plugin.PluginSet{ + 1: { + "bogus": nil, + }, + }, + Cmd: commandWithEnv( + "echo", + []string{"foo", "bar"}, + []string{ + "initial=true", + fmt.Sprintf("%s=%s", PluginVaultVersionEnv, "dummyversion"), + fmt.Sprintf("%s=%t", PluginMetadataModeEnv, true), + fmt.Sprintf("%s=%t", PluginAutoMTLSEnv, false), + }, + ), + SecureConfig: &plugin.SecureConfig{ + Checksum: []byte("some_sha256"), + // Hash is generated + }, + AllowedProtocols: []plugin.Protocol{ + plugin.ProtocolNetRPC, + plugin.ProtocolGRPC, + }, + Logger: hclog.NewNullLogger(), + AutoMTLS: false, + }, + expectTLSConfig: false, + }, + "non-metadata mode, not-AutoMTLS": { + rc: runConfig{ + command: "echo", + args: []string{"foo", "bar"}, + sha256: []byte("some_sha256"), + env: []string{"initial=true"}, + PluginClientConfig: PluginClientConfig{ + PluginSets: map[int]plugin.PluginSet{ + 1: { + "bogus": nil, + }, + }, + HandshakeConfig: plugin.HandshakeConfig{ + ProtocolVersion: 1, + MagicCookieKey: "magic_cookie_key", + MagicCookieValue: "magic_cookie_value", + }, + Logger: hclog.NewNullLogger(), + IsMetadataMode: false, + AutoMTLS: false, + }, + }, + + responseWrapInfo: &wrapping.ResponseWrapInfo{ + Token: "testtoken", + }, + responseWrapInfoTimes: 1, + + mlockEnabled: true, + mlockEnabledTimes: 1, + + expectedConfig: &plugin.ClientConfig{ + HandshakeConfig: plugin.HandshakeConfig{ + ProtocolVersion: 1, + MagicCookieKey: "magic_cookie_key", + MagicCookieValue: "magic_cookie_value", + }, + VersionedPlugins: map[int]plugin.PluginSet{ + 1: { + "bogus": nil, + }, + }, + Cmd: commandWithEnv( + "echo", + []string{"foo", "bar"}, + []string{ + "initial=true", + fmt.Sprintf("%s=%t", PluginMlockEnabled, true), + fmt.Sprintf("%s=%s", PluginVaultVersionEnv, "dummyversion"), + fmt.Sprintf("%s=%t", PluginMetadataModeEnv, false), + fmt.Sprintf("%s=%t", PluginAutoMTLSEnv, false), + fmt.Sprintf("%s=%s", PluginUnwrapTokenEnv, "testtoken"), + }, + ), + SecureConfig: &plugin.SecureConfig{ + Checksum: []byte("some_sha256"), + // Hash is generated + }, + AllowedProtocols: []plugin.Protocol{ + plugin.ProtocolNetRPC, + plugin.ProtocolGRPC, + }, + Logger: hclog.NewNullLogger(), + AutoMTLS: false, + }, + expectTLSConfig: true, + }, + "metadata mode, AutoMTLS": { + rc: runConfig{ + command: "echo", + args: []string{"foo", "bar"}, + sha256: []byte("some_sha256"), + env: []string{"initial=true"}, + PluginClientConfig: PluginClientConfig{ + PluginSets: map[int]plugin.PluginSet{ + 1: { + "bogus": nil, + }, + }, + HandshakeConfig: plugin.HandshakeConfig{ + ProtocolVersion: 1, + MagicCookieKey: "magic_cookie_key", + MagicCookieValue: "magic_cookie_value", + }, + Logger: hclog.NewNullLogger(), + IsMetadataMode: true, + AutoMTLS: true, + }, + }, + + responseWrapInfoTimes: 0, + + mlockEnabled: false, + mlockEnabledTimes: 1, + + expectedConfig: &plugin.ClientConfig{ + HandshakeConfig: plugin.HandshakeConfig{ + ProtocolVersion: 1, + MagicCookieKey: "magic_cookie_key", + MagicCookieValue: "magic_cookie_value", + }, + VersionedPlugins: map[int]plugin.PluginSet{ + 1: { + "bogus": nil, + }, + }, + Cmd: commandWithEnv( + "echo", + []string{"foo", "bar"}, + []string{ + "initial=true", + fmt.Sprintf("%s=%s", PluginVaultVersionEnv, "dummyversion"), + fmt.Sprintf("%s=%t", PluginMetadataModeEnv, true), + fmt.Sprintf("%s=%t", PluginAutoMTLSEnv, true), + }, + ), + SecureConfig: &plugin.SecureConfig{ + Checksum: []byte("some_sha256"), + // Hash is generated + }, + AllowedProtocols: []plugin.Protocol{ + plugin.ProtocolNetRPC, + plugin.ProtocolGRPC, + }, + Logger: hclog.NewNullLogger(), + AutoMTLS: true, + }, + expectTLSConfig: false, + }, + "not-metadata mode, AutoMTLS": { + rc: runConfig{ + command: "echo", + args: []string{"foo", "bar"}, + sha256: []byte("some_sha256"), + env: []string{"initial=true"}, + PluginClientConfig: PluginClientConfig{ + PluginSets: map[int]plugin.PluginSet{ + 1: { + "bogus": nil, + }, + }, + HandshakeConfig: plugin.HandshakeConfig{ + ProtocolVersion: 1, + MagicCookieKey: "magic_cookie_key", + MagicCookieValue: "magic_cookie_value", + }, + Logger: hclog.NewNullLogger(), + IsMetadataMode: false, + AutoMTLS: true, + }, + }, + + responseWrapInfoTimes: 0, + + mlockEnabled: false, + mlockEnabledTimes: 1, + + expectedConfig: &plugin.ClientConfig{ + HandshakeConfig: plugin.HandshakeConfig{ + ProtocolVersion: 1, + MagicCookieKey: "magic_cookie_key", + MagicCookieValue: "magic_cookie_value", + }, + VersionedPlugins: map[int]plugin.PluginSet{ + 1: { + "bogus": nil, + }, + }, + Cmd: commandWithEnv( + "echo", + []string{"foo", "bar"}, + []string{ + "initial=true", + fmt.Sprintf("%s=%s", PluginVaultVersionEnv, "dummyversion"), + fmt.Sprintf("%s=%t", PluginMetadataModeEnv, false), + fmt.Sprintf("%s=%t", PluginAutoMTLSEnv, true), + }, + ), + SecureConfig: &plugin.SecureConfig{ + Checksum: []byte("some_sha256"), + // Hash is generated + }, + AllowedProtocols: []plugin.Protocol{ + plugin.ProtocolNetRPC, + plugin.ProtocolGRPC, + }, + Logger: hclog.NewNullLogger(), + AutoMTLS: true, + }, + expectTLSConfig: false, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + mockWrapper := new(mockRunnerUtil) + mockWrapper.On("ResponseWrapData", mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(test.responseWrapInfo, test.responseWrapInfoErr) + mockWrapper.On("MlockEnabled"). + Return(test.mlockEnabled) + test.rc.Wrapper = mockWrapper + defer mockWrapper.AssertNumberOfCalls(t, "ResponseWrapData", test.responseWrapInfoTimes) + defer mockWrapper.AssertNumberOfCalls(t, "MlockEnabled", test.mlockEnabledTimes) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + config, err := test.rc.makeConfig(ctx) + if err != nil { + t.Fatalf("no error expected, got: %s", err) + } + + // The following fields are generated, so we just need to check for existence, not specific value + // The value must be nilled out before performing a DeepEqual check + hsh := config.SecureConfig.Hash + if hsh == nil { + t.Fatalf("Missing SecureConfig.Hash") + } + config.SecureConfig.Hash = nil + + if test.expectTLSConfig && config.TLSConfig == nil { + t.Fatalf("TLS config expected, got nil") + } + if !test.expectTLSConfig && config.TLSConfig != nil { + t.Fatalf("no TLS config expected, got: %#v", config.TLSConfig) + } + config.TLSConfig = nil + + require.Equal(t, test.expectedConfig, config) + }) + } +} + +func commandWithEnv(cmd string, args []string, env []string) *exec.Cmd { + c := exec.Command(cmd, args...) + c.Env = env + return c +} + +var _ RunnerUtil = &mockRunnerUtil{} + +type mockRunnerUtil struct { + mock.Mock +} + +func (m *mockRunnerUtil) VaultVersion(ctx context.Context) (string, error) { + return "dummyversion", nil +} + +func (m *mockRunnerUtil) NewPluginClient(ctx context.Context, config PluginClientConfig) (PluginClient, error) { + args := m.Called(ctx, config) + return args.Get(0).(PluginClient), args.Error(1) +} + +func (m *mockRunnerUtil) ResponseWrapData(ctx context.Context, data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) { + args := m.Called(ctx, data, ttl, jwt) + return args.Get(0).(*wrapping.ResponseWrapInfo), args.Error(1) +} + +func (m *mockRunnerUtil) MlockEnabled() bool { + args := m.Called() + return args.Bool(0) +} diff --git a/sdk/helper/pluginutil/runner.go b/sdk/helper/pluginutil/runner.go new file mode 100644 index 0000000..977f95d --- /dev/null +++ b/sdk/helper/pluginutil/runner.go @@ -0,0 +1,119 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pluginutil + +import ( + "context" + "time" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/go-version" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/wrapping" + "google.golang.org/grpc" +) + +// Looker defines the plugin Lookup function that looks into the plugin catalog +// for available plugins and returns a PluginRunner +type Looker interface { + LookupPlugin(ctx context.Context, pluginName string, pluginType consts.PluginType) (*PluginRunner, error) + LookupPluginVersion(ctx context.Context, pluginName string, pluginType consts.PluginType, version string) (*PluginRunner, error) +} + +// RunnerUtil interface defines the functions needed by the runner to wrap the +// metadata needed to run a plugin process. This includes looking up Mlock +// configuration and wrapping data in a response wrapped token. +// logical.SystemView implementations satisfy this interface. +type RunnerUtil interface { + NewPluginClient(ctx context.Context, config PluginClientConfig) (PluginClient, error) + ResponseWrapData(ctx context.Context, data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) + MlockEnabled() bool + VaultVersion(ctx context.Context) (string, error) +} + +// LookRunnerUtil defines the functions for both Looker and Wrapper +type LookRunnerUtil interface { + Looker + RunnerUtil +} + +type PluginClient interface { + Conn() grpc.ClientConnInterface + Reload() error + plugin.ClientProtocol +} + +const MultiplexingCtxKey string = "multiplex_id" + +// PluginRunner defines the metadata needed to run a plugin securely with +// go-plugin. +type PluginRunner struct { + Name string `json:"name" structs:"name"` + Type consts.PluginType `json:"type" structs:"type"` + Version string `json:"version" structs:"version"` + Command string `json:"command" structs:"command"` + Args []string `json:"args" structs:"args"` + Env []string `json:"env" structs:"env"` + Sha256 []byte `json:"sha256" structs:"sha256"` + Builtin bool `json:"builtin" structs:"builtin"` + BuiltinFactory func() (interface{}, error) `json:"-" structs:"-"` +} + +// Run takes a wrapper RunnerUtil instance along with the go-plugin parameters and +// returns a configured plugin.Client with TLS Configured and a wrapping token set +// on PluginUnwrapTokenEnv for plugin process consumption. +func (r *PluginRunner) Run(ctx context.Context, wrapper RunnerUtil, pluginSets map[int]plugin.PluginSet, hs plugin.HandshakeConfig, env []string, logger log.Logger) (*plugin.Client, error) { + return r.RunConfig(ctx, + Runner(wrapper), + PluginSets(pluginSets), + HandshakeConfig(hs), + Env(env...), + Logger(logger), + MetadataMode(false), + ) +} + +// RunMetadataMode returns a configured plugin.Client that will dispense a plugin +// in metadata mode. The PluginMetadataModeEnv is passed in as part of the Cmd to +// plugin.Client, and consumed by the plugin process on api.VaultPluginTLSProvider. +func (r *PluginRunner) RunMetadataMode(ctx context.Context, wrapper RunnerUtil, pluginSets map[int]plugin.PluginSet, hs plugin.HandshakeConfig, env []string, logger log.Logger) (*plugin.Client, error) { + return r.RunConfig(ctx, + Runner(wrapper), + PluginSets(pluginSets), + HandshakeConfig(hs), + Env(env...), + Logger(logger), + MetadataMode(true), + ) +} + +// VersionedPlugin holds any versioning information stored about a plugin in the +// plugin catalog. +type VersionedPlugin struct { + Type string `json:"type"` // string instead of consts.PluginType so that we get the string form in API responses. + Name string `json:"name"` + Version string `json:"version"` + SHA256 string `json:"sha256,omitempty"` + Builtin bool `json:"builtin"` + DeprecationStatus string `json:"deprecation_status,omitempty"` + + // Pre-parsed semver struct of the Version field + SemanticVersion *version.Version `json:"-"` +} + +// CtxCancelIfCanceled takes a context cancel func and a context. If the context is +// shutdown the cancelfunc is called. This is useful for merging two cancel +// functions. +func CtxCancelIfCanceled(f context.CancelFunc, ctxCanceler context.Context) chan struct{} { + quitCh := make(chan struct{}) + go func() { + select { + case <-quitCh: + case <-ctxCanceler.Done(): + f() + } + }() + return quitCh +} diff --git a/sdk/helper/pluginutil/tls.go b/sdk/helper/pluginutil/tls.go new file mode 100644 index 0000000..21b35d9 --- /dev/null +++ b/sdk/helper/pluginutil/tls.go @@ -0,0 +1,109 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pluginutil + +import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/sdk/helper/certutil" +) + +// generateCert is used internally to create certificates for the plugin +// client and server. +func generateCert() ([]byte, *ecdsa.PrivateKey, error) { + key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) + if err != nil { + return nil, nil, err + } + + host, err := uuid.GenerateUUID() + if err != nil { + return nil, nil, err + } + + sn, err := certutil.GenerateSerialNumber() + if err != nil { + return nil, nil, err + } + + template := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: host, + }, + DNSNames: []string{host}, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageClientAuth, + x509.ExtKeyUsageServerAuth, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement, + SerialNumber: sn, + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(262980 * time.Hour), + IsCA: true, + } + + certBytes, err := x509.CreateCertificate(rand.Reader, template, template, key.Public(), key) + if err != nil { + return nil, nil, errwrap.Wrapf("unable to generate client certificate: {{err}}", err) + } + + return certBytes, key, nil +} + +// createClientTLSConfig creates a signed certificate and returns a configured +// TLS config. +func createClientTLSConfig(certBytes []byte, key *ecdsa.PrivateKey) (*tls.Config, error) { + clientCert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, errwrap.Wrapf("error parsing generated plugin certificate: {{err}}", err) + } + + cert := tls.Certificate{ + Certificate: [][]byte{certBytes}, + PrivateKey: key, + Leaf: clientCert, + } + + clientCertPool := x509.NewCertPool() + clientCertPool.AddCert(clientCert) + + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{cert}, + RootCAs: clientCertPool, + ClientCAs: clientCertPool, + ClientAuth: tls.RequireAndVerifyClientCert, + ServerName: clientCert.Subject.CommonName, + MinVersion: tls.VersionTLS12, + } + + return tlsConfig, nil +} + +// wrapServerConfig is used to create a server certificate and private key, then +// wrap them in an unwrap token for later retrieval by the plugin. +func wrapServerConfig(ctx context.Context, sys RunnerUtil, certBytes []byte, key *ecdsa.PrivateKey) (string, error) { + rawKey, err := x509.MarshalECPrivateKey(key) + if err != nil { + return "", err + } + + wrapInfo, err := sys.ResponseWrapData(ctx, map[string]interface{}{ + "ServerCert": certBytes, + "ServerKey": rawKey, + }, time.Second*60, true) + if err != nil { + return "", err + } + + return wrapInfo.Token, nil +} diff --git a/sdk/helper/pointerutil/pointer.go b/sdk/helper/pointerutil/pointer.go new file mode 100644 index 0000000..a3cb558 --- /dev/null +++ b/sdk/helper/pointerutil/pointer.go @@ -0,0 +1,38 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pointerutil + +import ( + "os" + "time" + + "github.com/hashicorp/go-secure-stdlib/parseutil" +) + +// StringPtr returns a pointer to a string value +func StringPtr(s string) *string { + return &s +} + +// BoolPtr returns a pointer to a boolean value +func BoolPtr(b bool) *bool { + return &b +} + +// TimeDurationPtr returns a pointer to a time duration value +func TimeDurationPtr(duration string) *time.Duration { + d, _ := parseutil.ParseDurationSecond(duration) + + return &d +} + +// FileModePtr returns a pointer to the given os.FileMode +func FileModePtr(o os.FileMode) *os.FileMode { + return &o +} + +// Int64Ptr returns a pointer to an int64 value +func Int64Ptr(i int64) *int64 { + return &i +} diff --git a/sdk/helper/policyutil/policyutil.go b/sdk/helper/policyutil/policyutil.go new file mode 100644 index 0000000..a5a8082 --- /dev/null +++ b/sdk/helper/policyutil/policyutil.go @@ -0,0 +1,134 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package policyutil + +import ( + "sort" + "strings" + + "github.com/hashicorp/go-secure-stdlib/strutil" +) + +const ( + AddDefaultPolicy = true + DoNotAddDefaultPolicy = false +) + +// ParsePolicies parses a comma-delimited list of policies. +// The resulting collection will have no duplicate elements. +// If 'root' policy was present in the list of policies, then +// all other policies will be ignored, the result will contain +// just the 'root'. In cases where 'root' is not present, if +// 'default' policy is not already present, it will be added. +func ParsePolicies(policiesRaw interface{}) []string { + if policiesRaw == nil { + return []string{"default"} + } + + var policies []string + switch policiesRaw.(type) { + case string: + if policiesRaw.(string) == "" { + return []string{} + } + policies = strings.Split(policiesRaw.(string), ",") + case []string: + policies = policiesRaw.([]string) + } + + return SanitizePolicies(policies, false) +} + +// SanitizePolicies performs the common input validation tasks +// which are performed on the list of policies across Vault. +// The resulting collection will have no duplicate elements. +// If 'root' policy was present in the list of policies, then +// all other policies will be ignored, the result will contain +// just the 'root'. In cases where 'root' is not present, if +// 'default' policy is not already present, it will be added +// if addDefault is set to true. +func SanitizePolicies(policies []string, addDefault bool) []string { + defaultFound := false + for i, p := range policies { + policies[i] = strings.ToLower(strings.TrimSpace(p)) + // Eliminate unnamed policies. + if policies[i] == "" { + continue + } + + // If 'root' policy is present, ignore all other policies. + if policies[i] == "root" { + policies = []string{"root"} + defaultFound = true + break + } + if policies[i] == "default" { + defaultFound = true + } + } + + // Always add 'default' except only if the policies contain 'root'. + if addDefault && (len(policies) == 0 || !defaultFound) { + policies = append(policies, "default") + } + + return strutil.RemoveDuplicates(policies, true) +} + +// EquivalentPolicies checks whether the given policy sets are equivalent, as in, +// they contain the same values. The benefit of this method is that it leaves +// the "default" policy out of its comparisons as it may be added later by core +// after a set of policies has been saved by a backend. +func EquivalentPolicies(a, b []string) bool { + switch { + case a == nil && b == nil: + return true + case a == nil && len(b) == 1 && b[0] == "default": + return true + case b == nil && len(a) == 1 && a[0] == "default": + return true + case a == nil || b == nil: + return false + } + + // First we'll build maps to ensure unique values and filter default + mapA := map[string]bool{} + mapB := map[string]bool{} + for _, keyA := range a { + if keyA == "default" { + continue + } + mapA[keyA] = true + } + for _, keyB := range b { + if keyB == "default" { + continue + } + mapB[keyB] = true + } + + // Now we'll build our checking slices + var sortedA, sortedB []string + for keyA := range mapA { + sortedA = append(sortedA, keyA) + } + for keyB := range mapB { + sortedB = append(sortedB, keyB) + } + sort.Strings(sortedA) + sort.Strings(sortedB) + + // Finally, compare + if len(sortedA) != len(sortedB) { + return false + } + + for i := range sortedA { + if sortedA[i] != sortedB[i] { + return false + } + } + + return true +} diff --git a/sdk/helper/policyutil/policyutil_test.go b/sdk/helper/policyutil/policyutil_test.go new file mode 100644 index 0000000..2280ba9 --- /dev/null +++ b/sdk/helper/policyutil/policyutil_test.go @@ -0,0 +1,79 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package policyutil + +import "testing" + +func TestSanitizePolicies(t *testing.T) { + expected := []string{"foo", "bar"} + actual := SanitizePolicies([]string{"foo", "bar"}, false) + if !EquivalentPolicies(expected, actual) { + t.Fatalf("bad: expected:%s\ngot:%s\n", expected, actual) + } + + // If 'default' is already added, do not remove it. + expected = []string{"foo", "bar", "default"} + actual = SanitizePolicies([]string{"foo", "bar", "default"}, false) + if !EquivalentPolicies(expected, actual) { + t.Fatalf("bad: expected:%s\ngot:%s\n", expected, actual) + } +} + +func TestParsePolicies(t *testing.T) { + expected := []string{"foo", "bar", "default"} + actual := ParsePolicies("foo,bar") + // add default if not present. + if !EquivalentPolicies(expected, actual) { + t.Fatalf("bad: expected:%s\ngot:%s\n", expected, actual) + } + + // do not add default more than once. + actual = ParsePolicies("foo,bar,default") + if !EquivalentPolicies(expected, actual) { + t.Fatalf("bad: expected:%s\ngot:%s\n", expected, actual) + } + + // handle spaces and tabs. + actual = ParsePolicies(" foo , bar , default") + if !EquivalentPolicies(expected, actual) { + t.Fatalf("bad: expected:%s\ngot:%s\n", expected, actual) + } + + // ignore all others if root is present. + expected = []string{"root"} + actual = ParsePolicies("foo,bar,root") + if !EquivalentPolicies(expected, actual) { + t.Fatalf("bad: expected:%s\ngot:%s\n", expected, actual) + } + + // with spaces and tabs. + expected = []string{"root"} + actual = ParsePolicies("foo ,bar, root ") + if !EquivalentPolicies(expected, actual) { + t.Fatalf("bad: expected:%s\ngot:%s\n", expected, actual) + } +} + +func TestEquivalentPolicies(t *testing.T) { + a := []string{"foo", "bar"} + var b []string + if EquivalentPolicies(a, b) { + t.Fatal("bad") + } + + b = []string{"foo"} + if EquivalentPolicies(a, b) { + t.Fatal("bad") + } + + b = []string{"bar", "foo"} + if !EquivalentPolicies(a, b) { + t.Fatal("bad") + } + + b = []string{"foo", "default", "bar"} + if !EquivalentPolicies(a, b) { + t.Fatal("bad") + } +} diff --git a/sdk/helper/roottoken/decode.go b/sdk/helper/roottoken/decode.go new file mode 100644 index 0000000..9939b67 --- /dev/null +++ b/sdk/helper/roottoken/decode.go @@ -0,0 +1,43 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package roottoken + +import ( + "encoding/base64" + "fmt" + "strings" + + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/sdk/helper/xor" +) + +// DecodeToken will decode the root token returned by the Vault API +// The algorithm was initially used in the generate root command +func DecodeToken(encoded, otp string, otpLength int) (string, error) { + switch otpLength { + case 0: + // Backwards compat + tokenBytes, err := xor.XORBase64(encoded, otp) + if err != nil { + return "", fmt.Errorf("error xoring token: %s", err) + } + + uuidToken, err := uuid.FormatUUID(tokenBytes) + if err != nil { + return "", fmt.Errorf("error formatting base64 token value: %s", err) + } + return strings.TrimSpace(uuidToken), nil + default: + tokenBytes, err := base64.RawStdEncoding.DecodeString(encoded) + if err != nil { + return "", fmt.Errorf("error decoding base64'd token: %v", err) + } + + tokenBytes, err = xor.XORBytes(tokenBytes, []byte(otp)) + if err != nil { + return "", fmt.Errorf("error xoring token: %v", err) + } + return string(tokenBytes), nil + } +} diff --git a/sdk/helper/roottoken/encode.go b/sdk/helper/roottoken/encode.go new file mode 100644 index 0000000..dbbc90a --- /dev/null +++ b/sdk/helper/roottoken/encode.go @@ -0,0 +1,29 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package roottoken + +import ( + "encoding/base64" + "fmt" + + "github.com/hashicorp/vault/sdk/helper/xor" +) + +// EncodeToken gets a token and an OTP and encodes the token. +// The OTP must have the same length as the token. +func EncodeToken(token, otp string) (string, error) { + if len(token) == 0 { + return "", fmt.Errorf("no token provided") + } else if len(otp) == 0 { + return "", fmt.Errorf("no otp provided") + } + + // This function performs decoding checks so rather than decode the OTP, + // just encode the value we're passing in. + tokenBytes, err := xor.XORBytes([]byte(otp), []byte(token)) + if err != nil { + return "", fmt.Errorf("xor of root token failed: %w", err) + } + return base64.RawStdEncoding.EncodeToString(tokenBytes), nil +} diff --git a/sdk/helper/roottoken/encode_test.go b/sdk/helper/roottoken/encode_test.go new file mode 100644 index 0000000..269bf65 --- /dev/null +++ b/sdk/helper/roottoken/encode_test.go @@ -0,0 +1,75 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package roottoken + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestTokenEncodingDecodingWithOTP(t *testing.T) { + otpTestCases := []struct { + token string + name string + otpLength int + expectedEncodingErr string + expectedDecodingErr string + }{ + { + token: "someToken", + name: "test token encoding with base64", + otpLength: 0, + expectedEncodingErr: "xor of root token failed: length of byte slices is not equivalent: 24 != 9", + expectedDecodingErr: "", + }, + { + token: "someToken", + name: "test token encoding with base62", + otpLength: len("someToken"), + expectedEncodingErr: "", + expectedDecodingErr: "", + }, + { + token: "someToken", + name: "test token encoding with base62 - wrong otp length", + otpLength: len("someToken") + 1, + expectedEncodingErr: "xor of root token failed: length of byte slices is not equivalent: 10 != 9", + expectedDecodingErr: "", + }, + { + token: "", + name: "test no token to encode", + otpLength: 0, + expectedEncodingErr: "no token provided", + expectedDecodingErr: "", + }, + } + for _, otpTestCase := range otpTestCases { + t.Run(otpTestCase.name, func(t *testing.T) { + otp, err := GenerateOTP(otpTestCase.otpLength) + if err != nil { + t.Fatal(err.Error()) + } + encodedToken, err := EncodeToken(otpTestCase.token, otp) + if err != nil || otpTestCase.expectedDecodingErr != "" { + assert.EqualError(t, err, otpTestCase.expectedEncodingErr) + return + } + assert.NotEqual(t, otp, encodedToken) + assert.NotEqual(t, encodedToken, otpTestCase.token) + decodedToken, err := DecodeToken(encodedToken, otp, len(otp)) + if err != nil || otpTestCase.expectedDecodingErr != "" { + assert.EqualError(t, err, otpTestCase.expectedDecodingErr) + return + } + assert.Equal(t, otpTestCase.token, decodedToken) + }) + } +} + +func TestTokenEncodingDecodingWithNoOTPorPGPKey(t *testing.T) { + _, err := EncodeToken("", "") + assert.EqualError(t, err, "no token provided") +} diff --git a/sdk/helper/roottoken/otp.go b/sdk/helper/roottoken/otp.go new file mode 100644 index 0000000..4445ec5 --- /dev/null +++ b/sdk/helper/roottoken/otp.go @@ -0,0 +1,43 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package roottoken + +import ( + "crypto/rand" + "encoding/base64" + "fmt" + + "github.com/hashicorp/go-secure-stdlib/base62" +) + +// DefaultBase64EncodedOTPLength is the number of characters that will be randomly generated +// before the Base64 encoding process takes place. +const defaultBase64EncodedOTPLength = 16 + +// GenerateOTP generates a random token and encodes it as a Base64 or as a Base62 encoded string. +// Returns 0 if the generation completed without any error, 2 otherwise, along with the error. +func GenerateOTP(otpLength int) (string, error) { + switch otpLength { + case 0: + // This is the fallback case + buf := make([]byte, defaultBase64EncodedOTPLength) + readLen, err := rand.Read(buf) + if err != nil { + return "", fmt.Errorf("error reading random bytes: %s", err) + } + + if readLen != defaultBase64EncodedOTPLength { + return "", fmt.Errorf("read %d bytes when we should have read 16", readLen) + } + + return base64.StdEncoding.EncodeToString(buf), nil + default: + otp, err := base62.Random(otpLength) + if err != nil { + return "", fmt.Errorf("error reading random bytes: %w", err) + } + + return otp, nil + } +} diff --git a/sdk/helper/roottoken/otp_test.go b/sdk/helper/roottoken/otp_test.go new file mode 100644 index 0000000..53776ec --- /dev/null +++ b/sdk/helper/roottoken/otp_test.go @@ -0,0 +1,22 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package roottoken + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestBase64OTPGeneration(t *testing.T) { + token, err := GenerateOTP(0) + assert.Len(t, token, 24) + assert.Nil(t, err) +} + +func TestBase62OTPGeneration(t *testing.T) { + token, err := GenerateOTP(20) + assert.Len(t, token, 20) + assert.Nil(t, err) +} diff --git a/sdk/helper/salt/salt.go b/sdk/helper/salt/salt.go new file mode 100644 index 0000000..84cbd03 --- /dev/null +++ b/sdk/helper/salt/salt.go @@ -0,0 +1,198 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package salt + +import ( + "context" + "crypto/hmac" + "crypto/sha1" + "crypto/sha256" + "encoding/hex" + "fmt" + "hash" + + "github.com/hashicorp/errwrap" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + // DefaultLocation is the path in the view we store our key salt + // if no other path is provided. + DefaultLocation = "salt" +) + +// Salt is used to manage a persistent salt key which is used to +// hash values. This allows keys to be generated and recovered +// using the global salt. Primarily, this allows paths in the storage +// backend to be obfuscated if they may contain sensitive information. +type Salt struct { + config *Config + salt string + generated bool +} + +type HashFunc func([]byte) []byte + +// Config is used to parameterize the Salt +type Config struct { + // Location is the path in the storage backend for the + // salt. Uses DefaultLocation if not specified. + Location string + + // HashFunc is the hashing function to use for salting. + // Defaults to SHA1 if not provided. + HashFunc HashFunc + + // HMAC allows specification of a hash function to use for + // the HMAC helpers + HMAC func() hash.Hash + + // String prepended to HMAC strings for identification. + // Required if using HMAC + HMACType string +} + +// NewSalt creates a new salt based on the configuration +func NewSalt(ctx context.Context, view logical.Storage, config *Config) (*Salt, error) { + // Setup the configuration + if config == nil { + config = &Config{} + } + if config.Location == "" { + config.Location = DefaultLocation + } + if config.HashFunc == nil { + config.HashFunc = SHA256Hash + } + if config.HMAC == nil { + config.HMAC = sha256.New + config.HMACType = "hmac-sha256" + } + + // Create the salt + s := &Salt{ + config: config, + } + + // Look for the salt + var raw *logical.StorageEntry + var err error + if view != nil { + raw, err = view.Get(ctx, config.Location) + if err != nil { + return nil, errwrap.Wrapf("failed to read salt: {{err}}", err) + } + } + + // Restore the salt if it exists + if raw != nil { + s.salt = string(raw.Value) + } + + // Generate a new salt if necessary + if s.salt == "" { + s.salt, err = uuid.GenerateUUID() + if err != nil { + return nil, errwrap.Wrapf("failed to generate uuid: {{err}}", err) + } + s.generated = true + if view != nil { + raw := &logical.StorageEntry{ + Key: config.Location, + Value: []byte(s.salt), + } + if err := view.Put(ctx, raw); err != nil { + return nil, errwrap.Wrapf("failed to persist salt: {{err}}", err) + } + } + } + + if config.HMAC != nil { + if len(config.HMACType) == 0 { + return nil, fmt.Errorf("HMACType must be defined") + } + } + + return s, nil +} + +// NewNonpersistentSalt creates a new salt with default configuration and no storage usage. +func NewNonpersistentSalt() *Salt { + // Setup the configuration + config := &Config{} + config.Location = "" + config.HashFunc = SHA256Hash + config.HMAC = sha256.New + config.HMACType = "hmac-sha256" + + s := &Salt{ + config: config, + } + s.salt, _ = uuid.GenerateUUID() + s.generated = true + return s +} + +// SaltID is used to apply a salt and hash function to an ID to make sure +// it is not reversible +func (s *Salt) SaltID(id string) string { + return SaltID(s.salt, id, s.config.HashFunc) +} + +// GetHMAC is used to apply a salt and hash function to data to make sure it is +// not reversible, with an additional HMAC +func (s *Salt) GetHMAC(data string) string { + hm := hmac.New(s.config.HMAC, []byte(s.salt)) + hm.Write([]byte(data)) + return hex.EncodeToString(hm.Sum(nil)) +} + +// GetIdentifiedHMAC is used to apply a salt and hash function to data to make +// sure it is not reversible, with an additional HMAC, and ID prepended +func (s *Salt) GetIdentifiedHMAC(data string) string { + return s.config.HMACType + ":" + s.GetHMAC(data) +} + +// DidGenerate returns true if the underlying salt value was generated +// on initialization. +func (s *Salt) DidGenerate() bool { + return s.generated +} + +// SaltIDHashFunc uses the supplied hash function instead of the configured +// hash func in the salt. +func (s *Salt) SaltIDHashFunc(id string, hashFunc HashFunc) string { + return SaltID(s.salt, id, hashFunc) +} + +// SaltID is used to apply a salt and hash function to an ID to make sure +// it is not reversible +func SaltID(salt, id string, hash HashFunc) string { + comb := salt + id + hashVal := hash([]byte(comb)) + return hex.EncodeToString(hashVal) +} + +func HMACValue(salt, val string, hashFunc func() hash.Hash) string { + hm := hmac.New(hashFunc, []byte(salt)) + hm.Write([]byte(val)) + return hex.EncodeToString(hm.Sum(nil)) +} + +func HMACIdentifiedValue(salt, val, hmacType string, hashFunc func() hash.Hash) string { + return hmacType + ":" + HMACValue(salt, val, hashFunc) +} + +// SHA1Hash returns the SHA1 of the input +func SHA1Hash(inp []byte) []byte { + hashed := sha1.Sum(inp) + return hashed[:] +} + +// SHA256Hash returns the SHA256 of the input +func SHA256Hash(inp []byte) []byte { + hashed := sha256.Sum256(inp) + return hashed[:] +} diff --git a/sdk/helper/salt/salt_test.go b/sdk/helper/salt/salt_test.go new file mode 100644 index 0000000..3aec9a2 --- /dev/null +++ b/sdk/helper/salt/salt_test.go @@ -0,0 +1,91 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package salt + +import ( + "context" + "crypto/sha1" + "crypto/sha256" + "testing" + + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/sdk/logical" +) + +func TestSalt(t *testing.T) { + inm := &logical.InmemStorage{} + conf := &Config{} + + salt, err := NewSalt(context.Background(), inm, conf) + if err != nil { + t.Fatalf("err: %v", err) + } + + if !salt.DidGenerate() { + t.Fatalf("expected generation") + } + + // Verify the salt exists + out, err := inm.Get(context.Background(), DefaultLocation) + if err != nil { + t.Fatalf("err: %v", err) + } + if out == nil { + t.Fatalf("missing salt") + } + + // Create a new salt, should restore + salt2, err := NewSalt(context.Background(), inm, conf) + if err != nil { + t.Fatalf("err: %v", err) + } + + if salt2.DidGenerate() { + t.Fatalf("unexpected generation") + } + + // Check for a match + if salt.salt != salt2.salt { + t.Fatalf("salt mismatch: %s %s", salt.salt, salt2.salt) + } + + // Verify a match + id := "foobarbaz" + sid1 := salt.SaltID(id) + sid2 := salt2.SaltID(id) + + if sid1 != sid2 { + t.Fatalf("mismatch") + } +} + +func TestSaltID(t *testing.T) { + salt, err := uuid.GenerateUUID() + if err != nil { + t.Fatal(err) + } + id := "foobarbaz" + + sid1 := SaltID(salt, id, SHA1Hash) + sid2 := SaltID(salt, id, SHA1Hash) + + if len(sid1) != sha1.Size*2 { + t.Fatalf("Bad len: %d %s", len(sid1), sid1) + } + + if sid1 != sid2 { + t.Fatalf("mismatch") + } + + sid1 = SaltID(salt, id, SHA256Hash) + sid2 = SaltID(salt, id, SHA256Hash) + + if len(sid1) != sha256.Size*2 { + t.Fatalf("Bad len: %d", len(sid1)) + } + + if sid1 != sid2 { + t.Fatalf("mismatch") + } +} diff --git a/sdk/helper/strutil/strutil.go b/sdk/helper/strutil/strutil.go new file mode 100644 index 0000000..a9e5069 --- /dev/null +++ b/sdk/helper/strutil/strutil.go @@ -0,0 +1,97 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// DEPRECATED: this has been moved to go-secure-stdlib and will be removed +package strutil + +import ( + extstrutil "github.com/hashicorp/go-secure-stdlib/strutil" +) + +func StrListContainsGlob(haystack []string, needle string) bool { + return extstrutil.StrListContainsGlob(haystack, needle) +} + +func StrListContains(haystack []string, needle string) bool { + return extstrutil.StrListContains(haystack, needle) +} + +func StrListContainsCaseInsensitive(haystack []string, needle string) bool { + return extstrutil.StrListContainsCaseInsensitive(haystack, needle) +} + +func StrListSubset(super, sub []string) bool { + return extstrutil.StrListSubset(super, sub) +} + +func ParseDedupAndSortStrings(input string, sep string) []string { + return extstrutil.ParseDedupAndSortStrings(input, sep) +} + +func ParseDedupLowercaseAndSortStrings(input string, sep string) []string { + return extstrutil.ParseDedupLowercaseAndSortStrings(input, sep) +} + +func ParseKeyValues(input string, out map[string]string, sep string) error { + return extstrutil.ParseKeyValues(input, out, sep) +} + +func ParseArbitraryKeyValues(input string, out map[string]string, sep string) error { + return extstrutil.ParseArbitraryKeyValues(input, out, sep) +} + +func ParseStringSlice(input string, sep string) []string { + return extstrutil.ParseStringSlice(input, sep) +} + +func ParseArbitraryStringSlice(input string, sep string) []string { + return extstrutil.ParseArbitraryStringSlice(input, sep) +} + +func TrimStrings(items []string) []string { + return extstrutil.TrimStrings(items) +} + +func RemoveDuplicates(items []string, lowercase bool) []string { + return extstrutil.RemoveDuplicates(items, lowercase) +} + +func RemoveDuplicatesStable(items []string, caseInsensitive bool) []string { + return extstrutil.RemoveDuplicatesStable(items, caseInsensitive) +} + +func RemoveEmpty(items []string) []string { + return extstrutil.RemoveEmpty(items) +} + +func EquivalentSlices(a, b []string) bool { + return extstrutil.EquivalentSlices(a, b) +} + +func EqualStringMaps(a, b map[string]string) bool { + return extstrutil.EqualStringMaps(a, b) +} + +func StrListDelete(s []string, d string) []string { + return extstrutil.StrListDelete(s, d) +} + +func GlobbedStringsMatch(item, val string) bool { + return extstrutil.GlobbedStringsMatch(item, val) +} + +func AppendIfMissing(slice []string, i string) []string { + return extstrutil.AppendIfMissing(slice, i) +} + +func MergeSlices(args ...[]string) []string { + return extstrutil.MergeSlices(args...) +} + +func Difference(a, b []string, lowercase bool) []string { + return extstrutil.Difference(a, b, lowercase) +} + +func GetString(m map[string]interface{}, key string) (string, error) { + return extstrutil.GetString(m, key) +} diff --git a/sdk/helper/template/funcs.go b/sdk/helper/template/funcs.go new file mode 100644 index 0000000..6d68cab --- /dev/null +++ b/sdk/helper/template/funcs.go @@ -0,0 +1,80 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package template + +import ( + "crypto/sha256" + "encoding/base64" + "fmt" + "strconv" + "strings" + "time" + + UUID "github.com/hashicorp/go-uuid" +) + +func unixTime() string { + return strconv.FormatInt(time.Now().Unix(), 10) +} + +func unixTimeMillis() string { + return strconv.FormatInt(time.Now().UnixNano()/int64(time.Millisecond), 10) +} + +func timestamp(format string) string { + return time.Now().Format(format) +} + +func truncate(maxLen int, str string) (string, error) { + if maxLen <= 0 { + return "", fmt.Errorf("max length must be > 0 but was %d", maxLen) + } + if len(str) > maxLen { + return str[:maxLen], nil + } + return str, nil +} + +const ( + sha256HashLen = 8 +) + +func truncateSHA256(maxLen int, str string) (string, error) { + if maxLen <= 8 { + return "", fmt.Errorf("max length must be > 8 but was %d", maxLen) + } + + if len(str) <= maxLen { + return str, nil + } + + truncIndex := maxLen - sha256HashLen + hash := hashSHA256(str[truncIndex:]) + result := fmt.Sprintf("%s%s", str[:truncIndex], hash[:sha256HashLen]) + return result, nil +} + +func hashSHA256(str string) string { + return fmt.Sprintf("%x", sha256.Sum256([]byte(str))) +} + +func encodeBase64(str string) string { + return base64.StdEncoding.EncodeToString([]byte(str)) +} + +func uppercase(str string) string { + return strings.ToUpper(str) +} + +func lowercase(str string) string { + return strings.ToLower(str) +} + +func replace(find string, replace string, str string) string { + return strings.ReplaceAll(str, find, replace) +} + +func uuid() (string, error) { + return UUID.GenerateUUID() +} diff --git a/sdk/helper/template/funcs_test.go b/sdk/helper/template/funcs_test.go new file mode 100644 index 0000000..4965115 --- /dev/null +++ b/sdk/helper/template/funcs_test.go @@ -0,0 +1,359 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package template + +import ( + "strconv" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestUnixTimestamp(t *testing.T) { + now := time.Now().Unix() + for i := 0; i < 100; i++ { + str := unixTime() + actual, err := strconv.Atoi(str) + require.NoError(t, err) + // Make sure the value generated is from now (or later if the clock ticked over) + require.GreaterOrEqual(t, int64(actual), now) + } +} + +func TestNowNano(t *testing.T) { + now := time.Now().UnixNano() / int64(time.Millisecond) + for i := 0; i < 100; i++ { + str := unixTimeMillis() + actual, err := strconv.ParseUint(str, 10, 64) + require.NoError(t, err) + // Make sure the value generated is from now (or later if the clock ticked over) + require.GreaterOrEqual(t, int64(actual), now) + } +} + +func TestTruncate(t *testing.T) { + type testCase struct { + maxLen int + input string + expected string + expectErr bool + } + + tests := map[string]testCase{ + "negative max length": { + maxLen: -1, + input: "foobarbaz", + expected: "", + expectErr: true, + }, + "zero max length": { + maxLen: 0, + input: "foobarbaz", + expected: "", + expectErr: true, + }, + "one max length": { + maxLen: 1, + input: "foobarbaz", + expected: "f", + expectErr: false, + }, + "half max length": { + maxLen: 5, + input: "foobarbaz", + expected: "fooba", + expectErr: false, + }, + "max length one less than length": { + maxLen: 8, + input: "foobarbaz", + expected: "foobarba", + expectErr: false, + }, + "max length equals string length": { + maxLen: 9, + input: "foobarbaz", + expected: "foobarbaz", + expectErr: false, + }, + "max length greater than string length": { + maxLen: 10, + input: "foobarbaz", + expected: "foobarbaz", + expectErr: false, + }, + "max length significantly greater than string length": { + maxLen: 100, + input: "foobarbaz", + expected: "foobarbaz", + expectErr: false, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + actual, err := truncate(test.maxLen, test.input) + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + + require.Equal(t, test.expected, actual) + }) + } +} + +func TestTruncateSHA256(t *testing.T) { + type testCase struct { + maxLen int + input string + expected string + expectErr bool + } + + tests := map[string]testCase{ + "negative max length": { + maxLen: -1, + input: "thisisareallylongstring", + expected: "", + expectErr: true, + }, + "zero max length": { + maxLen: 0, + input: "thisisareallylongstring", + expected: "", + expectErr: true, + }, + "8 max length": { + maxLen: 8, + input: "thisisareallylongstring", + expected: "", + expectErr: true, + }, + "nine max length": { + maxLen: 9, + input: "thisisareallylongstring", + expected: "t4bb25641", + expectErr: false, + }, + "half max length": { + maxLen: 12, + input: "thisisareallylongstring", + expected: "this704cd12b", + expectErr: false, + }, + "max length one less than length": { + maxLen: 22, + input: "thisisareallylongstring", + expected: "thisisareallyl7f978be6", + expectErr: false, + }, + "max length equals string length": { + maxLen: 23, + input: "thisisareallylongstring", + expected: "thisisareallylongstring", + expectErr: false, + }, + "max length greater than string length": { + maxLen: 24, + input: "thisisareallylongstring", + expected: "thisisareallylongstring", + expectErr: false, + }, + "max length significantly greater than string length": { + maxLen: 100, + input: "thisisareallylongstring", + expected: "thisisareallylongstring", + expectErr: false, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + actual, err := truncateSHA256(test.maxLen, test.input) + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + + require.Equal(t, test.expected, actual) + }) + } +} + +func TestSHA256(t *testing.T) { + type testCase struct { + input string + expected string + } + + tests := map[string]testCase{ + "empty string": { + input: "", + expected: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + }, + "foobar": { + input: "foobar", + expected: "c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2", + }, + "mystring": { + input: "mystring", + expected: "bd3ff47540b31e62d4ca6b07794e5a886b0f655fc322730f26ecd65cc7dd5c90", + }, + "very long string": { + input: "Nullam pharetra mattis laoreet. Mauris feugiat, tortor in malesuada convallis, " + + "eros nunc dapibus erat, eget malesuada purus leo id lorem. Morbi pharetra, libero at malesuada bibendum, " + + "dui quam tristique libero, bibendum cursus diam quam at sem. Vivamus vestibulum orci vel odio posuere, " + + "quis tincidunt ipsum lacinia. Donec elementum a orci quis lobortis. Etiam bibendum ullamcorper varius. " + + "Mauris tempor eros est, at porta erat rutrum ac. Aliquam erat volutpat. Sed sagittis leo non bibendum " + + "lacinia. Praesent id justo iaculis, mattis libero vel, feugiat dui. Morbi id diam non magna imperdiet " + + "imperdiet. Ut tortor arcu, mollis ac maximus ac, sagittis commodo augue. Ut semper, diam pulvinar porta " + + "dignissim, massa ex condimentum enim, sed euismod urna quam vitae ex. Sed id neque vitae magna sagittis " + + "pretium. Suspendisse potenti.", + expected: "3e2a996c20b7a02378204f0843507d335e1ba203df2c4ded8d839d44af24482f", + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + actual := hashSHA256(test.input) + require.Equal(t, test.expected, actual) + }) + } +} + +func TestUppercase(t *testing.T) { + type testCase struct { + input string + expected string + } + + tests := map[string]testCase{ + "empty string": { + input: "", + expected: "", + }, + "lowercase": { + input: "foobar", + expected: "FOOBAR", + }, + "uppercase": { + input: "FOOBAR", + expected: "FOOBAR", + }, + "mixed case": { + input: "fOoBaR", + expected: "FOOBAR", + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + actual := uppercase(test.input) + require.Equal(t, test.expected, actual) + }) + } +} + +func TestLowercase(t *testing.T) { + type testCase struct { + input string + expected string + } + + tests := map[string]testCase{ + "empty string": { + input: "", + expected: "", + }, + "lowercase": { + input: "foobar", + expected: "foobar", + }, + "uppercase": { + input: "FOOBAR", + expected: "foobar", + }, + "mixed case": { + input: "fOoBaR", + expected: "foobar", + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + actual := lowercase(test.input) + require.Equal(t, test.expected, actual) + }) + } +} + +func TestReplace(t *testing.T) { + type testCase struct { + input string + find string + replace string + expected string + } + + tests := map[string]testCase{ + "empty string": { + input: "", + find: "", + replace: "", + expected: "", + }, + "search not found": { + input: "foobar", + find: ".", + replace: "_", + expected: "foobar", + }, + "single character found": { + input: "foo.bar", + find: ".", + replace: "_", + expected: "foo_bar", + }, + "multiple characters found": { + input: "foo.bar.baz", + find: ".", + replace: "_", + expected: "foo_bar_baz", + }, + "find and remove": { + input: "foo.bar", + find: ".", + replace: "", + expected: "foobar", + }, + "find full string": { + input: "foobarbaz", + find: "bar", + replace: "_", + expected: "foo_baz", + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + actual := replace(test.find, test.replace, test.input) + require.Equal(t, test.expected, actual) + }) + } +} + +func TestUUID(t *testing.T) { + re := "^[a-zA-Z0-9]{8}-[a-zA-Z0-9]{4}-[a-zA-Z0-9]{4}-[a-zA-Z0-9]{4}-[a-zA-Z0-9]{12}$" + for i := 0; i < 100; i++ { + id, err := uuid() + require.NoError(t, err) + require.Regexp(t, re, id) + } +} diff --git a/sdk/helper/template/template.go b/sdk/helper/template/template.go new file mode 100644 index 0000000..dea65f3 --- /dev/null +++ b/sdk/helper/template/template.go @@ -0,0 +1,155 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package template + +import ( + "fmt" + "strings" + "text/template" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-secure-stdlib/base62" +) + +type Opt func(*StringTemplate) error + +func Template(rawTemplate string) Opt { + return func(up *StringTemplate) error { + up.rawTemplate = rawTemplate + return nil + } +} + +// Function allows the user to specify functions for use in the template. If the name provided is a function that +// already exists in the function map, this will override the previously specified function. +func Function(name string, f interface{}) Opt { + return func(up *StringTemplate) error { + if name == "" { + return fmt.Errorf("missing function name") + } + if f == nil { + return fmt.Errorf("missing function") + } + up.funcMap[name] = f + return nil + } +} + +// StringTemplate creates strings based on the provided template. +// This uses the go templating language, so anything that adheres to that language will function in this struct. +// There are several custom functions available for use in the template: +// - random +// - Randomly generated characters. This uses the charset specified in RandomCharset. Must include a length. +// Example: {{ rand 20 }} +// +// - truncate +// - Truncates the previous value to the specified length. Must include a maximum length. +// Example: {{ .DisplayName | truncate 10 }} +// +// - truncate_sha256 +// - Truncates the previous value to the specified length. If the original length is greater than the length +// specified, the remaining characters will be sha256 hashed and appended to the end. The hash will be only the first 8 characters The maximum length will +// be no longer than the length specified. +// Example: {{ .DisplayName | truncate_sha256 30 }} +// +// - uppercase +// - Uppercases the previous value. +// Example: {{ .RoleName | uppercase }} +// +// - lowercase +// - Lowercases the previous value. +// Example: {{ .DisplayName | lowercase }} +// +// - replace +// - Performs a string find & replace +// Example: {{ .DisplayName | replace - _ }} +// +// - sha256 +// - SHA256 hashes the previous value. +// Example: {{ .DisplayName | sha256 }} +// +// - base64 +// - base64 encodes the previous value. +// Example: {{ .DisplayName | base64 }} +// +// - unix_time +// - Provides the current unix time in seconds. +// Example: {{ unix_time }} +// +// - unix_time_millis +// - Provides the current unix time in milliseconds. +// Example: {{ unix_time_millis }} +// +// - timestamp +// - Provides the current time. Must include a standard Go format string +// +// - uuid +// - Generates a UUID +// Example: {{ uuid }} +type StringTemplate struct { + rawTemplate string + tmpl *template.Template + funcMap template.FuncMap +} + +// NewTemplate creates a StringTemplate. No arguments are required +// as this has reasonable defaults for all values. +// The default template is specified in the DefaultTemplate constant. +func NewTemplate(opts ...Opt) (up StringTemplate, err error) { + up = StringTemplate{ + funcMap: map[string]interface{}{ + "random": base62.Random, + "truncate": truncate, + "truncate_sha256": truncateSHA256, + "uppercase": uppercase, + "lowercase": lowercase, + "replace": replace, + "sha256": hashSHA256, + "base64": encodeBase64, + + "unix_time": unixTime, + "unix_time_millis": unixTimeMillis, + "timestamp": timestamp, + "uuid": uuid, + }, + } + + merr := &multierror.Error{} + for _, opt := range opts { + merr = multierror.Append(merr, opt(&up)) + } + + err = merr.ErrorOrNil() + if err != nil { + return up, err + } + + if up.rawTemplate == "" { + return StringTemplate{}, fmt.Errorf("missing template") + } + + tmpl, err := template.New("template"). + Funcs(up.funcMap). + Parse(up.rawTemplate) + if err != nil { + return StringTemplate{}, fmt.Errorf("unable to parse template: %w", err) + } + up.tmpl = tmpl + + return up, nil +} + +// Generate based on the provided template +func (up StringTemplate) Generate(data interface{}) (string, error) { + if up.tmpl == nil || up.rawTemplate == "" { + return "", fmt.Errorf("failed to generate: template not initialized") + } + str := &strings.Builder{} + err := up.tmpl.Execute(str, data) + if err != nil { + return "", fmt.Errorf("unable to apply template: %w", err) + } + + return str.String(), nil +} diff --git a/sdk/helper/template/template_test.go b/sdk/helper/template/template_test.go new file mode 100644 index 0000000..2f66bf3 --- /dev/null +++ b/sdk/helper/template/template_test.go @@ -0,0 +1,209 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package template + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestGenerate(t *testing.T) { + type testCase struct { + template string + additionalOpts []Opt + data interface{} + + expected string + expectErr bool + } + + tests := map[string]testCase{ + "template without arguments": { + template: "this is a template", + data: nil, + expected: "this is a template", + expectErr: false, + }, + "template with arguments but no data": { + template: "this is a {{.String}}", + data: nil, + expected: "this is a ", + expectErr: false, + }, + "template with arguments": { + template: "this is a {{.String}}", + data: struct { + String string + }{ + String: "foobar", + }, + expected: "this is a foobar", + expectErr: false, + }, + "template with builtin functions": { + template: `{{.String | truncate 10}} +{{.String | uppercase}} +{{.String | lowercase}} +{{.String | replace " " "."}} +{{.String | sha256}} +{{.String | base64}} +{{.String | truncate_sha256 20}}`, + data: struct { + String string + }{ + String: "Some string with Multiple Capitals LETTERS", + }, + expected: `Some strin +SOME STRING WITH MULTIPLE CAPITALS LETTERS +some string with multiple capitals letters +Some.string.with.Multiple.Capitals.LETTERS +da9872dd96609c72897defa11fe81017a62c3f44339d9d3b43fe37540ede3601 +U29tZSBzdHJpbmcgd2l0aCBNdWx0aXBsZSBDYXBpdGFscyBMRVRURVJT +Some string 6841cf80`, + expectErr: false, + }, + "custom function": { + template: "{{foo}}", + additionalOpts: []Opt{ + Function("foo", func() string { + return "custom-foo" + }), + }, + expected: "custom-foo", + expectErr: false, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + opts := append(test.additionalOpts, Template(test.template)) + st, err := NewTemplate(opts...) + require.NoError(t, err) + + actual, err := st.Generate(test.data) + if test.expectErr && err == nil { + t.Fatalf("err expected, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("no error expected, got: %s", err) + } + + require.Equal(t, test.expected, actual) + }) + } + + t.Run("random", func(t *testing.T) { + for i := 1; i < 100; i++ { + st, err := NewTemplate( + Template(fmt.Sprintf("{{random %d}}", i)), + ) + require.NoError(t, err) + + actual, err := st.Generate(nil) + require.NoError(t, err) + + require.Regexp(t, fmt.Sprintf("^[a-zA-Z0-9]{%d}$", i), actual) + } + }) + + t.Run("unix_time", func(t *testing.T) { + for i := 0; i < 100; i++ { + st, err := NewTemplate( + Template("{{unix_time}}"), + ) + require.NoError(t, err) + + actual, err := st.Generate(nil) + require.NoError(t, err) + + require.Regexp(t, "^[0-9]+$", actual) + } + }) + + t.Run("unix_time_millis", func(t *testing.T) { + for i := 0; i < 100; i++ { + st, err := NewTemplate( + Template("{{unix_time_millis}}"), + ) + require.NoError(t, err) + + actual, err := st.Generate(nil) + require.NoError(t, err) + + require.Regexp(t, "^[0-9]+$", actual) + } + }) + + t.Run("timestamp", func(t *testing.T) { + for i := 0; i < 100; i++ { + st, err := NewTemplate( + Template(`{{timestamp "2006-01-02T15:04:05.000Z"}}`), + ) + require.NoError(t, err) + + actual, err := st.Generate(nil) + require.NoError(t, err) + + require.Regexp(t, `^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$`, actual) + } + }) +} + +func TestBadConstructorArguments(t *testing.T) { + type testCase struct { + opts []Opt + } + + tests := map[string]testCase{ + "missing template": { + opts: nil, + }, + "missing custom function name": { + opts: []Opt{ + Template("foo bar"), + Function("", func() string { + return "foo" + }), + }, + }, + "missing custom function": { + opts: []Opt{ + Template("foo bar"), + Function("foo", nil), + }, + }, + "bad template": { + opts: []Opt{ + Template("{{.String"), + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + st, err := NewTemplate(test.opts...) + require.Error(t, err) + + str, err := st.Generate(nil) + require.Error(t, err) + require.Equal(t, "", str) + }) + } + + t.Run("erroring custom function", func(t *testing.T) { + st, err := NewTemplate( + Template("{{foo}}"), + Function("foo", func() (string, error) { + return "", fmt.Errorf("an error!") + }), + ) + require.NoError(t, err) + + str, err := st.Generate(nil) + require.Error(t, err) + require.Equal(t, "", str) + }) +} diff --git a/sdk/helper/testcluster/consts.go b/sdk/helper/testcluster/consts.go new file mode 100644 index 0000000..e85ef19 --- /dev/null +++ b/sdk/helper/testcluster/consts.go @@ -0,0 +1,12 @@ +package testcluster + +const ( + // EnvVaultLicenseCI is the name of an environment variable that contains + // a signed license string used for Vault Enterprise binary-based tests. + // The binary will be run with the env var VAULT_LICENSE set to this value. + EnvVaultLicenseCI = "VAULT_LICENSE_CI" + + // DefaultCAFile is the path to the CA file. This is a docker-specific + // constant. TODO: needs to be moved to a more relevant place + DefaultCAFile = "/vault/config/ca.pem" +) diff --git a/sdk/helper/testcluster/docker/cert.go b/sdk/helper/testcluster/docker/cert.go new file mode 100644 index 0000000..4704030 --- /dev/null +++ b/sdk/helper/testcluster/docker/cert.go @@ -0,0 +1,88 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package docker + +import ( + "crypto/tls" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "sync" + + "github.com/hashicorp/errwrap" +) + +// ReloadFunc are functions that are called when a reload is requested +type ReloadFunc func() error + +// CertificateGetter satisfies ReloadFunc and its GetCertificate method +// satisfies the tls.GetCertificate function signature. Currently it does not +// allow changing paths after the fact. +type CertificateGetter struct { + sync.RWMutex + + cert *tls.Certificate + + certFile string + keyFile string + passphrase string +} + +func NewCertificateGetter(certFile, keyFile, passphrase string) *CertificateGetter { + return &CertificateGetter{ + certFile: certFile, + keyFile: keyFile, + passphrase: passphrase, + } +} + +func (cg *CertificateGetter) Reload() error { + certPEMBlock, err := ioutil.ReadFile(cg.certFile) + if err != nil { + return err + } + keyPEMBlock, err := ioutil.ReadFile(cg.keyFile) + if err != nil { + return err + } + + // Check for encrypted pem block + keyBlock, _ := pem.Decode(keyPEMBlock) + if keyBlock == nil { + return errors.New("decoded PEM is blank") + } + + if x509.IsEncryptedPEMBlock(keyBlock) { + keyBlock.Bytes, err = x509.DecryptPEMBlock(keyBlock, []byte(cg.passphrase)) + if err != nil { + return errwrap.Wrapf("Decrypting PEM block failed {{err}}", err) + } + keyPEMBlock = pem.EncodeToMemory(keyBlock) + } + + cert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock) + if err != nil { + return err + } + + cg.Lock() + defer cg.Unlock() + + cg.cert = &cert + + return nil +} + +func (cg *CertificateGetter) GetCertificate(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) { + cg.RLock() + defer cg.RUnlock() + + if cg.cert == nil { + return nil, fmt.Errorf("nil certificate") + } + + return cg.cert, nil +} diff --git a/sdk/helper/testcluster/docker/environment.go b/sdk/helper/testcluster/docker/environment.go new file mode 100644 index 0000000..b0d72a0 --- /dev/null +++ b/sdk/helper/testcluster/docker/environment.go @@ -0,0 +1,1153 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package docker + +import ( + "bufio" + "bytes" + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/hex" + "encoding/json" + "encoding/pem" + "fmt" + "io" + "io/ioutil" + "math/big" + mathrand "math/rand" + "net" + "net/http" + "os" + "path/filepath" + "strings" + "sync" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/volume" + docker "github.com/docker/docker/client" + "github.com/hashicorp/go-cleanhttp" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/vault/api" + dockhelper "github.com/hashicorp/vault/sdk/helper/docker" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/helper/testcluster" + uberAtomic "go.uber.org/atomic" + "golang.org/x/net/http2" +) + +var ( + _ testcluster.VaultCluster = &DockerCluster{} + _ testcluster.VaultClusterNode = &DockerClusterNode{} +) + +const MaxClusterNameLength = 52 + +// DockerCluster is used to managing the lifecycle of the test Vault cluster +type DockerCluster struct { + ClusterName string + + ClusterNodes []*DockerClusterNode + + // Certificate fields + *testcluster.CA + RootCAs *x509.CertPool + + barrierKeys [][]byte + recoveryKeys [][]byte + tmpDir string + + // rootToken is the initial root token created when the Vault cluster is + // created. + rootToken string + DockerAPI *docker.Client + ID string + Logger log.Logger + builtTags map[string]struct{} + + storage testcluster.ClusterStorage +} + +func (dc *DockerCluster) NamedLogger(s string) log.Logger { + return dc.Logger.Named(s) +} + +func (dc *DockerCluster) ClusterID() string { + return dc.ID +} + +func (dc *DockerCluster) Nodes() []testcluster.VaultClusterNode { + ret := make([]testcluster.VaultClusterNode, len(dc.ClusterNodes)) + for i := range dc.ClusterNodes { + ret[i] = dc.ClusterNodes[i] + } + return ret +} + +func (dc *DockerCluster) GetBarrierKeys() [][]byte { + return dc.barrierKeys +} + +func testKeyCopy(key []byte) []byte { + result := make([]byte, len(key)) + copy(result, key) + return result +} + +func (dc *DockerCluster) GetRecoveryKeys() [][]byte { + ret := make([][]byte, len(dc.recoveryKeys)) + for i, k := range dc.recoveryKeys { + ret[i] = testKeyCopy(k) + } + return ret +} + +func (dc *DockerCluster) GetBarrierOrRecoveryKeys() [][]byte { + return dc.GetBarrierKeys() +} + +func (dc *DockerCluster) SetBarrierKeys(keys [][]byte) { + dc.barrierKeys = make([][]byte, len(keys)) + for i, k := range keys { + dc.barrierKeys[i] = testKeyCopy(k) + } +} + +func (dc *DockerCluster) SetRecoveryKeys(keys [][]byte) { + dc.recoveryKeys = make([][]byte, len(keys)) + for i, k := range keys { + dc.recoveryKeys[i] = testKeyCopy(k) + } +} + +func (dc *DockerCluster) GetCACertPEMFile() string { + return dc.CACertPEMFile +} + +func (dc *DockerCluster) Cleanup() { + dc.cleanup() +} + +func (dc *DockerCluster) cleanup() error { + var result *multierror.Error + for _, node := range dc.ClusterNodes { + if err := node.cleanup(); err != nil { + result = multierror.Append(result, err) + } + } + + return result.ErrorOrNil() +} + +// GetRootToken returns the root token of the cluster, if set +func (dc *DockerCluster) GetRootToken() string { + return dc.rootToken +} + +func (dc *DockerCluster) SetRootToken(s string) { + dc.Logger.Trace("cluster root token changed", "helpful_env", fmt.Sprintf("VAULT_TOKEN=%s VAULT_CACERT=/vault/config/ca.pem", s)) + dc.rootToken = s +} + +func (n *DockerClusterNode) Name() string { + return n.Cluster.ClusterName + "-" + n.NodeID +} + +func (dc *DockerCluster) setupNode0(ctx context.Context) error { + client := dc.ClusterNodes[0].client + + var resp *api.InitResponse + var err error + for ctx.Err() == nil { + resp, err = client.Sys().Init(&api.InitRequest{ + SecretShares: 3, + SecretThreshold: 3, + }) + if err == nil && resp != nil { + break + } + time.Sleep(500 * time.Millisecond) + } + if err != nil { + return err + } + if resp == nil { + return fmt.Errorf("nil response to init request") + } + + for _, k := range resp.Keys { + raw, err := hex.DecodeString(k) + if err != nil { + return err + } + dc.barrierKeys = append(dc.barrierKeys, raw) + } + + for _, k := range resp.RecoveryKeys { + raw, err := hex.DecodeString(k) + if err != nil { + return err + } + dc.recoveryKeys = append(dc.recoveryKeys, raw) + } + + dc.rootToken = resp.RootToken + client.SetToken(dc.rootToken) + dc.ClusterNodes[0].client = client + + err = testcluster.UnsealNode(ctx, dc, 0) + if err != nil { + return err + } + + err = ensureLeaderMatches(ctx, client, func(leader *api.LeaderResponse) error { + if !leader.IsSelf { + return fmt.Errorf("node %d leader=%v, expected=%v", 0, leader.IsSelf, true) + } + + return nil + }) + + status, err := client.Sys().SealStatusWithContext(ctx) + if err != nil { + return err + } + dc.ID = status.ClusterID + return err +} + +func (dc *DockerCluster) clusterReady(ctx context.Context) error { + for i, node := range dc.ClusterNodes { + expectLeader := i == 0 + err := ensureLeaderMatches(ctx, node.client, func(leader *api.LeaderResponse) error { + if expectLeader != leader.IsSelf { + return fmt.Errorf("node %d leader=%v, expected=%v", i, leader.IsSelf, expectLeader) + } + + return nil + }) + if err != nil { + return err + } + } + + return nil +} + +func (dc *DockerCluster) setupCA(opts *DockerClusterOptions) error { + var err error + var ca testcluster.CA + + if opts != nil && opts.CAKey != nil { + ca.CAKey = opts.CAKey + } else { + ca.CAKey, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + return err + } + } + + var caBytes []byte + if opts != nil && len(opts.CACert) > 0 { + caBytes = opts.CACert + } else { + serialNumber := mathrand.New(mathrand.NewSource(time.Now().UnixNano())).Int63() + CACertTemplate := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "localhost", + }, + KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, + SerialNumber: big.NewInt(serialNumber), + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(262980 * time.Hour), + BasicConstraintsValid: true, + IsCA: true, + } + caBytes, err = x509.CreateCertificate(rand.Reader, CACertTemplate, CACertTemplate, ca.CAKey.Public(), ca.CAKey) + if err != nil { + return err + } + } + CACert, err := x509.ParseCertificate(caBytes) + if err != nil { + return err + } + ca.CACert = CACert + ca.CACertBytes = caBytes + + CACertPEMBlock := &pem.Block{ + Type: "CERTIFICATE", + Bytes: caBytes, + } + ca.CACertPEM = pem.EncodeToMemory(CACertPEMBlock) + + ca.CACertPEMFile = filepath.Join(dc.tmpDir, "ca", "ca.pem") + err = os.WriteFile(ca.CACertPEMFile, ca.CACertPEM, 0o755) + if err != nil { + return err + } + + marshaledCAKey, err := x509.MarshalECPrivateKey(ca.CAKey) + if err != nil { + return err + } + CAKeyPEMBlock := &pem.Block{ + Type: "EC PRIVATE KEY", + Bytes: marshaledCAKey, + } + ca.CAKeyPEM = pem.EncodeToMemory(CAKeyPEMBlock) + + dc.CA = &ca + + return nil +} + +func (n *DockerClusterNode) setupCert(ip string) error { + var err error + + n.ServerKey, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + return err + } + + serialNumber := mathrand.New(mathrand.NewSource(time.Now().UnixNano())).Int63() + certTemplate := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: n.Name(), + }, + DNSNames: []string{"localhost", n.Name()}, + IPAddresses: []net.IP{net.IPv6loopback, net.ParseIP("127.0.0.1"), net.ParseIP(ip)}, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageServerAuth, + x509.ExtKeyUsageClientAuth, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement, + SerialNumber: big.NewInt(serialNumber), + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(262980 * time.Hour), + } + n.ServerCertBytes, err = x509.CreateCertificate(rand.Reader, certTemplate, n.Cluster.CACert, n.ServerKey.Public(), n.Cluster.CAKey) + if err != nil { + return err + } + n.ServerCert, err = x509.ParseCertificate(n.ServerCertBytes) + if err != nil { + return err + } + n.ServerCertPEM = pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: n.ServerCertBytes, + }) + + marshaledKey, err := x509.MarshalECPrivateKey(n.ServerKey) + if err != nil { + return err + } + n.ServerKeyPEM = pem.EncodeToMemory(&pem.Block{ + Type: "EC PRIVATE KEY", + Bytes: marshaledKey, + }) + + n.ServerCertPEMFile = filepath.Join(n.WorkDir, "cert.pem") + err = os.WriteFile(n.ServerCertPEMFile, n.ServerCertPEM, 0o755) + if err != nil { + return err + } + + n.ServerKeyPEMFile = filepath.Join(n.WorkDir, "key.pem") + err = os.WriteFile(n.ServerKeyPEMFile, n.ServerKeyPEM, 0o755) + if err != nil { + return err + } + + tlsCert, err := tls.X509KeyPair(n.ServerCertPEM, n.ServerKeyPEM) + if err != nil { + return err + } + + certGetter := NewCertificateGetter(n.ServerCertPEMFile, n.ServerKeyPEMFile, "") + if err := certGetter.Reload(); err != nil { + return err + } + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{tlsCert}, + RootCAs: n.Cluster.RootCAs, + ClientCAs: n.Cluster.RootCAs, + ClientAuth: tls.RequestClientCert, + NextProtos: []string{"h2", "http/1.1"}, + GetCertificate: certGetter.GetCertificate, + } + + n.tlsConfig = tlsConfig + + err = os.WriteFile(filepath.Join(n.WorkDir, "ca.pem"), n.Cluster.CACertPEM, 0o755) + if err != nil { + return err + } + return nil +} + +func NewTestDockerCluster(t *testing.T, opts *DockerClusterOptions) *DockerCluster { + if opts == nil { + opts = &DockerClusterOptions{} + } + if opts.ClusterName == "" { + opts.ClusterName = strings.ReplaceAll(t.Name(), "/", "-") + } + if opts.Logger == nil { + opts.Logger = logging.NewVaultLogger(log.Trace).Named(t.Name()) + } + if opts.NetworkName == "" { + opts.NetworkName = os.Getenv("TEST_DOCKER_NETWORK_NAME") + } + + ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second) + t.Cleanup(cancel) + + dc, err := NewDockerCluster(ctx, opts) + if err != nil { + t.Fatal(err) + } + dc.Logger.Trace("cluster started", "helpful_env", fmt.Sprintf("VAULT_TOKEN=%s VAULT_CACERT=/vault/config/ca.pem", dc.GetRootToken())) + return dc +} + +func NewDockerCluster(ctx context.Context, opts *DockerClusterOptions) (*DockerCluster, error) { + api, err := dockhelper.NewDockerAPI() + if err != nil { + return nil, err + } + + if opts == nil { + opts = &DockerClusterOptions{} + } + if opts.Logger == nil { + opts.Logger = log.NewNullLogger() + } + if opts.VaultLicense == "" { + opts.VaultLicense = os.Getenv(testcluster.EnvVaultLicenseCI) + } + + dc := &DockerCluster{ + DockerAPI: api, + ClusterName: opts.ClusterName, + Logger: opts.Logger, + builtTags: map[string]struct{}{}, + CA: opts.CA, + storage: opts.Storage, + } + + if err := dc.setupDockerCluster(ctx, opts); err != nil { + dc.Cleanup() + return nil, err + } + + return dc, nil +} + +// DockerClusterNode represents a single instance of Vault in a cluster +type DockerClusterNode struct { + NodeID string + HostPort string + client *api.Client + ServerCert *x509.Certificate + ServerCertBytes []byte + ServerCertPEM []byte + ServerCertPEMFile string + ServerKey *ecdsa.PrivateKey + ServerKeyPEM []byte + ServerKeyPEMFile string + tlsConfig *tls.Config + WorkDir string + Cluster *DockerCluster + Container *types.ContainerJSON + DockerAPI *docker.Client + runner *dockhelper.Runner + Logger log.Logger + cleanupContainer func() + RealAPIAddr string + ContainerNetworkName string + ContainerIPAddress string + ImageRepo string + ImageTag string + DataVolumeName string + cleanupVolume func() +} + +func (n *DockerClusterNode) TLSConfig() *tls.Config { + return n.tlsConfig.Clone() +} + +func (n *DockerClusterNode) APIClient() *api.Client { + // We clone to ensure that whenever this method is called, the caller gets + // back a pristine client, without e.g. any namespace or token changes that + // might pollute a shared client. We clone the config instead of the + // client because (1) Client.clone propagates the replicationStateStore and + // the httpClient pointers, (2) it doesn't copy the tlsConfig at all, and + // (3) if clone returns an error, it doesn't feel as appropriate to panic + // below. Who knows why clone might return an error? + cfg := n.client.CloneConfig() + client, err := api.NewClient(cfg) + if err != nil { + // It seems fine to panic here, since this should be the same input + // we provided to NewClient when we were setup, and we didn't panic then. + // Better not to completely ignore the error though, suppose there's a + // bug in CloneConfig? + panic(fmt.Sprintf("NewClient error on cloned config: %v", err)) + } + client.SetToken(n.Cluster.rootToken) + return client +} + +// NewAPIClient creates and configures a Vault API client to communicate with +// the running Vault Cluster for this DockerClusterNode +func (n *DockerClusterNode) apiConfig() (*api.Config, error) { + transport := cleanhttp.DefaultPooledTransport() + transport.TLSClientConfig = n.TLSConfig() + if err := http2.ConfigureTransport(transport); err != nil { + return nil, err + } + client := &http.Client{ + Transport: transport, + CheckRedirect: func(*http.Request, []*http.Request) error { + // This can of course be overridden per-test by using its own client + return fmt.Errorf("redirects not allowed in these tests") + }, + } + config := api.DefaultConfig() + if config.Error != nil { + return nil, config.Error + } + config.Address = fmt.Sprintf("https://%s", n.HostPort) + config.HttpClient = client + config.MaxRetries = 0 + return config, nil +} + +func (n *DockerClusterNode) newAPIClient() (*api.Client, error) { + config, err := n.apiConfig() + if err != nil { + return nil, err + } + client, err := api.NewClient(config) + if err != nil { + return nil, err + } + client.SetToken(n.Cluster.GetRootToken()) + return client, nil +} + +// Cleanup kills the container of the node and deletes its data volume +func (n *DockerClusterNode) Cleanup() { + n.cleanup() +} + +// Stop kills the container of the node +func (n *DockerClusterNode) Stop() { + n.cleanupContainer() +} + +func (n *DockerClusterNode) cleanup() error { + if n.Container == nil || n.Container.ID == "" { + return nil + } + n.cleanupContainer() + n.cleanupVolume() + return nil +} + +func (n *DockerClusterNode) Start(ctx context.Context, opts *DockerClusterOptions) error { + if n.DataVolumeName == "" { + vol, err := n.DockerAPI.VolumeCreate(ctx, volume.CreateOptions{}) + if err != nil { + return err + } + n.DataVolumeName = vol.Name + n.cleanupVolume = func() { + _ = n.DockerAPI.VolumeRemove(ctx, vol.Name, false) + } + } + vaultCfg := map[string]interface{}{} + vaultCfg["listener"] = map[string]interface{}{ + "tcp": map[string]interface{}{ + "address": fmt.Sprintf("%s:%d", "0.0.0.0", 8200), + "tls_cert_file": "/vault/config/cert.pem", + "tls_key_file": "/vault/config/key.pem", + "telemetry": map[string]interface{}{ + "unauthenticated_metrics_access": true, + }, + }, + } + vaultCfg["telemetry"] = map[string]interface{}{ + "disable_hostname": true, + } + + // Setup storage. Default is raft. + storageType := "raft" + storageOpts := map[string]interface{}{ + // TODO add options from vnc + "path": "/vault/file", + "node_id": n.NodeID, + } + + if opts.Storage != nil { + storageType = opts.Storage.Type() + storageOpts = opts.Storage.Opts() + } + + if opts != nil && opts.VaultNodeConfig != nil { + for k, v := range opts.VaultNodeConfig.StorageOptions { + if _, ok := storageOpts[k].(string); !ok { + storageOpts[k] = v + } + } + } + vaultCfg["storage"] = map[string]interface{}{ + storageType: storageOpts, + } + + //// disable_mlock is required for working in the Docker environment with + //// custom plugins + vaultCfg["disable_mlock"] = true + vaultCfg["api_addr"] = `https://{{- GetAllInterfaces | exclude "flags" "loopback" | attr "address" -}}:8200` + vaultCfg["cluster_addr"] = `https://{{- GetAllInterfaces | exclude "flags" "loopback" | attr "address" -}}:8201` + + vaultCfg["administrative_namespace_path"] = opts.AdministrativeNamespacePath + + systemJSON, err := json.Marshal(vaultCfg) + if err != nil { + return err + } + err = os.WriteFile(filepath.Join(n.WorkDir, "system.json"), systemJSON, 0o644) + if err != nil { + return err + } + + if opts.VaultNodeConfig != nil { + localCfg := *opts.VaultNodeConfig + if opts.VaultNodeConfig.LicensePath != "" { + b, err := os.ReadFile(opts.VaultNodeConfig.LicensePath) + if err != nil || len(b) == 0 { + return fmt.Errorf("unable to read LicensePath at %q: %w", opts.VaultNodeConfig.LicensePath, err) + } + localCfg.LicensePath = "/vault/config/license" + dest := filepath.Join(n.WorkDir, "license") + err = os.WriteFile(dest, b, 0o644) + if err != nil { + return fmt.Errorf("error writing license to %q: %w", dest, err) + } + + } + userJSON, err := json.Marshal(localCfg) + if err != nil { + return err + } + err = os.WriteFile(filepath.Join(n.WorkDir, "user.json"), userJSON, 0o644) + if err != nil { + return err + } + } + + // Create a temporary cert so vault will start up + err = n.setupCert("127.0.0.1") + if err != nil { + return err + } + + caDir := filepath.Join(n.Cluster.tmpDir, "ca") + + // setup plugin bin copy if needed + copyFromTo := map[string]string{ + n.WorkDir: "/vault/config", + caDir: "/usr/local/share/ca-certificates/", + } + + var wg sync.WaitGroup + wg.Add(1) + var seenLogs uberAtomic.Bool + logConsumer := func(s string) { + if seenLogs.CAS(false, true) { + wg.Done() + } + n.Logger.Trace(s) + } + logStdout := &LogConsumerWriter{logConsumer} + logStderr := &LogConsumerWriter{func(s string) { + if seenLogs.CAS(false, true) { + wg.Done() + } + testcluster.JSONLogNoTimestamp(n.Logger, s) + }} + r, err := dockhelper.NewServiceRunner(dockhelper.RunOptions{ + ImageRepo: n.ImageRepo, + ImageTag: n.ImageTag, + // We don't need to run update-ca-certificates in the container, because + // we're providing the CA in the raft join call, and otherwise Vault + // servers don't talk to one another on the API port. + Cmd: append([]string{"server"}, opts.Args...), + Env: []string{ + // For now we're using disable_mlock, because this is for testing + // anyway, and because it prevents us using external plugins. + "SKIP_SETCAP=true", + "VAULT_LOG_FORMAT=json", + "VAULT_LICENSE=" + opts.VaultLicense, + }, + Ports: []string{"8200/tcp", "8201/tcp"}, + ContainerName: n.Name(), + NetworkName: opts.NetworkName, + CopyFromTo: copyFromTo, + LogConsumer: logConsumer, + LogStdout: logStdout, + LogStderr: logStderr, + PreDelete: true, + DoNotAutoRemove: true, + PostStart: func(containerID string, realIP string) error { + err := n.setupCert(realIP) + if err != nil { + return err + } + + // If we signal Vault before it installs its sighup handler, it'll die. + wg.Wait() + n.Logger.Trace("running poststart", "containerID", containerID, "IP", realIP) + return n.runner.RefreshFiles(ctx, containerID) + }, + Capabilities: []string{"NET_ADMIN"}, + OmitLogTimestamps: true, + VolumeNameToMountPoint: map[string]string{ + n.DataVolumeName: "/vault/file", + }, + }) + if err != nil { + return err + } + n.runner = r + + probe := opts.StartProbe + if probe == nil { + probe = func(c *api.Client) error { + _, err = c.Sys().SealStatus() + return err + } + } + svc, _, err := r.StartNewService(ctx, false, false, func(ctx context.Context, host string, port int) (dockhelper.ServiceConfig, error) { + config, err := n.apiConfig() + if err != nil { + return nil, err + } + config.Address = fmt.Sprintf("https://%s:%d", host, port) + client, err := api.NewClient(config) + if err != nil { + return nil, err + } + err = probe(client) + if err != nil { + return nil, err + } + + return dockhelper.NewServiceHostPort(host, port), nil + }) + if err != nil { + return err + } + + n.HostPort = svc.Config.Address() + n.Container = svc.Container + netName := opts.NetworkName + if netName == "" { + if len(svc.Container.NetworkSettings.Networks) > 1 { + return fmt.Errorf("Set d.RunOptions.NetworkName instead for container with multiple networks: %v", svc.Container.NetworkSettings.Networks) + } + for netName = range svc.Container.NetworkSettings.Networks { + // Networks above is a map; we just need to find the first and + // only key of this map (network name). The range handles this + // for us, but we need a loop construction in order to use range. + } + } + n.ContainerNetworkName = netName + n.ContainerIPAddress = svc.Container.NetworkSettings.Networks[netName].IPAddress + n.RealAPIAddr = "https://" + n.ContainerIPAddress + ":8200" + n.cleanupContainer = svc.Cleanup + + client, err := n.newAPIClient() + if err != nil { + return err + } + client.SetToken(n.Cluster.rootToken) + n.client = client + return nil +} + +func (n *DockerClusterNode) Pause(ctx context.Context) error { + return n.DockerAPI.ContainerPause(ctx, n.Container.ID) +} + +func (n *DockerClusterNode) AddNetworkDelay(ctx context.Context, delay time.Duration, targetIP string) error { + ip := net.ParseIP(targetIP) + if ip == nil { + return fmt.Errorf("targetIP %q is not an IP address", targetIP) + } + // Let's attempt to get a unique handle for the filter rule; we'll assume that + // every targetIP has a unique last octet, which is true currently for how + // we're doing docker networking. + lastOctet := ip.To4()[3] + + stdout, stderr, exitCode, err := n.runner.RunCmdWithOutput(ctx, n.Container.ID, []string{ + "/bin/sh", + "-xec", strings.Join([]string{ + fmt.Sprintf("echo isolating node %s", targetIP), + "apk add iproute2", + // If we're running this script a second time on the same node, + // the add dev will fail; since we only want to run the netem + // command once, we'll do so in the case where the add dev doesn't fail. + "tc qdisc add dev eth0 root handle 1: prio && " + + fmt.Sprintf("tc qdisc add dev eth0 parent 1:1 handle 2: netem delay %dms", delay/time.Millisecond), + // Here we create a u32 filter as per https://man7.org/linux/man-pages/man8/tc-u32.8.html + // Its parent is 1:0 (which I guess is the root?) + // Its handle must be unique, so we base it on targetIP + fmt.Sprintf("tc filter add dev eth0 parent 1:0 protocol ip pref 55 handle ::%x u32 match ip dst %s flowid 2:1", lastOctet, targetIP), + }, "; "), + }) + if err != nil { + return err + } + + n.Logger.Trace(string(stdout)) + n.Logger.Trace(string(stderr)) + if exitCode != 0 { + return fmt.Errorf("got nonzero exit code from iptables: %d", exitCode) + } + return nil +} + +// PartitionFromCluster will cause the node to be disconnected at the network +// level from the rest of the docker cluster. It does so in a way that the node +// will not see TCP RSTs and all packets it sends will be "black holed". It +// attempts to keep packets to and from the host intact which allows docker +// daemon to continue streaming logs and any test code to continue making +// requests from the host to the partitioned node. +func (n *DockerClusterNode) PartitionFromCluster(ctx context.Context) error { + stdout, stderr, exitCode, err := n.runner.RunCmdWithOutput(ctx, n.Container.ID, []string{ + "/bin/sh", + "-xec", strings.Join([]string{ + fmt.Sprintf("echo partitioning container from network"), + "apk add iproute2", + // Get the gateway address for the bridge so we can allow host to + // container traffic still. + "GW=$(ip r | grep default | grep eth0 | cut -f 3 -d' ')", + // First delete the rules in case this is called twice otherwise we'll add + // multiple copies and only remove one in Unpartition (yay iptables). + // Ignore the error if it didn't exist. + "iptables -D INPUT -i eth0 ! -s \"$GW\" -j DROP | true", + "iptables -D OUTPUT -o eth0 ! -d \"$GW\" -j DROP | true", + // Add rules to drop all packets in and out of the docker network + // connection. + "iptables -I INPUT -i eth0 ! -s \"$GW\" -j DROP", + "iptables -I OUTPUT -o eth0 ! -d \"$GW\" -j DROP", + }, "; "), + }) + if err != nil { + return err + } + + n.Logger.Trace(string(stdout)) + n.Logger.Trace(string(stderr)) + if exitCode != 0 { + return fmt.Errorf("got nonzero exit code from iptables: %d", exitCode) + } + return nil +} + +// UnpartitionFromCluster reverses a previous call to PartitionFromCluster and +// restores full connectivity. Currently assumes the default "bridge" network. +func (n *DockerClusterNode) UnpartitionFromCluster(ctx context.Context) error { + stdout, stderr, exitCode, err := n.runner.RunCmdWithOutput(ctx, n.Container.ID, []string{ + "/bin/sh", + "-xec", strings.Join([]string{ + fmt.Sprintf("echo un-partitioning container from network"), + // Get the gateway address for the bridge so we can allow host to + // container traffic still. + "GW=$(ip r | grep default | grep eth0 | cut -f 3 -d' ')", + // Remove the rules, ignore if they are not present or iptables wasn't + // installed yet (i.e. no-one called PartitionFromCluster yet). + "iptables -D INPUT -i eth0 ! -s \"$GW\" -j DROP | true", + "iptables -D OUTPUT -o eth0 ! -d \"$GW\" -j DROP | true", + }, "; "), + }) + if err != nil { + return err + } + + n.Logger.Trace(string(stdout)) + n.Logger.Trace(string(stderr)) + if exitCode != 0 { + return fmt.Errorf("got nonzero exit code from iptables: %d", exitCode) + } + return nil +} + +type LogConsumerWriter struct { + consumer func(string) +} + +func (l LogConsumerWriter) Write(p []byte) (n int, err error) { + // TODO this assumes that we're never passed partial log lines, which + // seems a safe assumption for now based on how docker looks to implement + // logging, but might change in the future. + scanner := bufio.NewScanner(bytes.NewReader(p)) + scanner.Buffer(make([]byte, 64*1024), bufio.MaxScanTokenSize) + for scanner.Scan() { + l.consumer(scanner.Text()) + } + return len(p), nil +} + +// DockerClusterOptions has options for setting up the docker cluster +type DockerClusterOptions struct { + testcluster.ClusterOptions + CAKey *ecdsa.PrivateKey + NetworkName string + ImageRepo string + ImageTag string + CA *testcluster.CA + VaultBinary string + Args []string + StartProbe func(*api.Client) error + Storage testcluster.ClusterStorage +} + +func ensureLeaderMatches(ctx context.Context, client *api.Client, ready func(response *api.LeaderResponse) error) error { + var leader *api.LeaderResponse + var err error + for ctx.Err() == nil { + leader, err = client.Sys().Leader() + switch { + case err != nil: + case leader == nil: + err = fmt.Errorf("nil response to leader check") + default: + err = ready(leader) + if err == nil { + return nil + } + } + time.Sleep(500 * time.Millisecond) + } + return fmt.Errorf("error checking leader: %v", err) +} + +const DefaultNumCores = 3 + +// creates a managed docker container running Vault +func (dc *DockerCluster) setupDockerCluster(ctx context.Context, opts *DockerClusterOptions) error { + if opts.TmpDir != "" { + if _, err := os.Stat(opts.TmpDir); os.IsNotExist(err) { + if err := os.MkdirAll(opts.TmpDir, 0o700); err != nil { + return err + } + } + dc.tmpDir = opts.TmpDir + } else { + tempDir, err := ioutil.TempDir("", "vault-test-cluster-") + if err != nil { + return err + } + dc.tmpDir = tempDir + } + caDir := filepath.Join(dc.tmpDir, "ca") + if err := os.MkdirAll(caDir, 0o755); err != nil { + return err + } + + var numCores int + if opts.NumCores == 0 { + numCores = DefaultNumCores + } else { + numCores = opts.NumCores + } + + if dc.CA == nil { + if err := dc.setupCA(opts); err != nil { + return err + } + } + dc.RootCAs = x509.NewCertPool() + dc.RootCAs.AddCert(dc.CA.CACert) + + if dc.storage != nil { + if err := dc.storage.Start(ctx, &opts.ClusterOptions); err != nil { + return err + } + } + + for i := 0; i < numCores; i++ { + if err := dc.addNode(ctx, opts); err != nil { + return err + } + if opts.SkipInit { + continue + } + if i == 0 { + if err := dc.setupNode0(ctx); err != nil { + return nil + } + } else { + if err := dc.joinNode(ctx, i, 0); err != nil { + return err + } + } + } + + return nil +} + +func (dc *DockerCluster) AddNode(ctx context.Context, opts *DockerClusterOptions) error { + leaderIdx, err := testcluster.LeaderNode(ctx, dc) + if err != nil { + return err + } + if err := dc.addNode(ctx, opts); err != nil { + return err + } + + return dc.joinNode(ctx, len(dc.ClusterNodes)-1, leaderIdx) +} + +func (dc *DockerCluster) addNode(ctx context.Context, opts *DockerClusterOptions) error { + tag, err := dc.setupImage(ctx, opts) + if err != nil { + return err + } + i := len(dc.ClusterNodes) + nodeID := fmt.Sprintf("core-%d", i) + node := &DockerClusterNode{ + DockerAPI: dc.DockerAPI, + NodeID: nodeID, + Cluster: dc, + WorkDir: filepath.Join(dc.tmpDir, nodeID), + Logger: dc.Logger.Named(nodeID), + ImageRepo: opts.ImageRepo, + ImageTag: tag, + } + dc.ClusterNodes = append(dc.ClusterNodes, node) + if err := os.MkdirAll(node.WorkDir, 0o755); err != nil { + return err + } + if err := node.Start(ctx, opts); err != nil { + return err + } + return nil +} + +func (dc *DockerCluster) joinNode(ctx context.Context, nodeIdx int, leaderIdx int) error { + if dc.storage != nil && dc.storage.Type() != "raft" { + // Storage is not raft so nothing to do but unseal. + return testcluster.UnsealNode(ctx, dc, nodeIdx) + } + + leader := dc.ClusterNodes[leaderIdx] + + if nodeIdx >= len(dc.ClusterNodes) { + return fmt.Errorf("invalid node %d", nodeIdx) + } + node := dc.ClusterNodes[nodeIdx] + client := node.APIClient() + + var resp *api.RaftJoinResponse + resp, err := client.Sys().RaftJoinWithContext(ctx, &api.RaftJoinRequest{ + // When running locally on a bridge network, the containers must use their + // actual (private) IP to talk to one another. Our code must instead use + // the portmapped address since we're not on their network in that case. + LeaderAPIAddr: leader.RealAPIAddr, + LeaderCACert: string(dc.CACertPEM), + LeaderClientCert: string(node.ServerCertPEM), + LeaderClientKey: string(node.ServerKeyPEM), + }) + if resp == nil || !resp.Joined { + return fmt.Errorf("nil or negative response from raft join request: %v", resp) + } + if err != nil { + return fmt.Errorf("failed to join cluster: %w", err) + } + + return testcluster.UnsealNode(ctx, dc, nodeIdx) +} + +func (dc *DockerCluster) setupImage(ctx context.Context, opts *DockerClusterOptions) (string, error) { + if opts == nil { + opts = &DockerClusterOptions{} + } + sourceTag := opts.ImageTag + if sourceTag == "" { + sourceTag = "latest" + } + + if opts.VaultBinary == "" { + return sourceTag, nil + } + + suffix := "testing" + if sha := os.Getenv("COMMIT_SHA"); sha != "" { + suffix = sha + } + tag := sourceTag + "-" + suffix + if _, ok := dc.builtTags[tag]; ok { + return tag, nil + } + + f, err := os.Open(opts.VaultBinary) + if err != nil { + return "", err + } + data, err := io.ReadAll(f) + if err != nil { + return "", err + } + bCtx := dockhelper.NewBuildContext() + bCtx["vault"] = &dockhelper.FileContents{ + Data: data, + Mode: 0o755, + } + + containerFile := fmt.Sprintf(` +FROM %s:%s +COPY vault /bin/vault +`, opts.ImageRepo, sourceTag) + + _, err = dockhelper.BuildImage(ctx, dc.DockerAPI, containerFile, bCtx, + dockhelper.BuildRemove(true), dockhelper.BuildForceRemove(true), + dockhelper.BuildPullParent(true), + dockhelper.BuildTags([]string{opts.ImageRepo + ":" + tag})) + if err != nil { + return "", err + } + dc.builtTags[tag] = struct{}{} + return tag, nil +} + +/* Notes on testing the non-bridge network case: +- you need the test itself to be running in a container so that it can use + the network; create the network using + docker network create testvault +- this means that you need to mount the docker socket in that test container, + but on macos there's stuff that prevents that from working; to hack that, + on the host run + sudo ln -s "$HOME/Library/Containers/com.docker.docker/Data/docker.raw.sock" /var/run/docker.sock.raw +- run the test container like + docker run --rm -it --network testvault \ + -v /var/run/docker.sock.raw:/var/run/docker.sock \ + -v $(pwd):/home/circleci/go/src/github.com/hashicorp/vault/ \ + -w /home/circleci/go/src/github.com/hashicorp/vault/ \ + "docker.mirror.hashicorp.services/cimg/go:1.19.2" /bin/bash +- in the container you may need to chown/chmod /var/run/docker.sock; use `docker ps` + to test if it's working + +*/ diff --git a/sdk/helper/testcluster/docker/replication.go b/sdk/helper/testcluster/docker/replication.go new file mode 100644 index 0000000..0bd8fee --- /dev/null +++ b/sdk/helper/testcluster/docker/replication.go @@ -0,0 +1,68 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package docker + +import ( + "context" + "fmt" + "os" + "strings" + "testing" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/helper/testcluster" +) + +func DefaultOptions(t *testing.T) *DockerClusterOptions { + return &DockerClusterOptions{ + ImageRepo: "hashicorp/vault", + ImageTag: "latest", + VaultBinary: os.Getenv("VAULT_BINARY"), + ClusterOptions: testcluster.ClusterOptions{ + NumCores: 3, + ClusterName: strings.ReplaceAll(t.Name(), "/", "-"), + VaultNodeConfig: &testcluster.VaultNodeConfig{ + LogLevel: "TRACE", + }, + }, + } +} + +func NewReplicationSetDocker(t *testing.T, opts *DockerClusterOptions) (*testcluster.ReplicationSet, error) { + binary := os.Getenv("VAULT_BINARY") + if binary == "" { + t.Skip("only running docker test when $VAULT_BINARY present") + } + + r := &testcluster.ReplicationSet{ + Clusters: map[string]testcluster.VaultCluster{}, + Logger: logging.NewVaultLogger(hclog.Trace).Named(t.Name()), + } + + // clusterName is used for container name as well. + // A container name should not exceed 64 chars. + // There are additional chars that are added to the name as well + // like "-A-core0". So, setting a max limit for a cluster name. + if len(opts.ClusterName) > MaxClusterNameLength { + return nil, fmt.Errorf("cluster name length exceeded the maximum allowed length of %v", MaxClusterNameLength) + } + + r.Builder = func(ctx context.Context, name string, baseLogger hclog.Logger) (testcluster.VaultCluster, error) { + myOpts := *opts + myOpts.Logger = baseLogger.Named(name) + myOpts.ClusterName += "-" + strings.ReplaceAll(name, "/", "-") + myOpts.CA = r.CA + return NewTestDockerCluster(t, &myOpts), nil + } + + a, err := r.Builder(context.TODO(), "A", r.Logger) + if err != nil { + return nil, err + } + r.Clusters["A"] = a + r.CA = a.(*DockerCluster).CA + + return r, err +} diff --git a/sdk/helper/testcluster/exec.go b/sdk/helper/testcluster/exec.go new file mode 100644 index 0000000..d91a3de --- /dev/null +++ b/sdk/helper/testcluster/exec.go @@ -0,0 +1,324 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package testcluster + +import ( + "bufio" + "context" + "crypto/tls" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" + "time" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/logging" +) + +type ExecDevCluster struct { + ID string + ClusterName string + ClusterNodes []*execDevClusterNode + CACertPEMFile string + barrierKeys [][]byte + recoveryKeys [][]byte + tmpDir string + clientAuthRequired bool + rootToken string + stop func() + stopCh chan struct{} + Logger log.Logger +} + +func (dc *ExecDevCluster) SetRootToken(token string) { + dc.rootToken = token +} + +func (dc *ExecDevCluster) NamedLogger(s string) log.Logger { + return dc.Logger.Named(s) +} + +var _ VaultCluster = &ExecDevCluster{} + +type ExecDevClusterOptions struct { + ClusterOptions + BinaryPath string + // this is -dev-listen-address, defaults to "127.0.0.1:8200" + BaseListenAddress string +} + +func NewTestExecDevCluster(t *testing.T, opts *ExecDevClusterOptions) *ExecDevCluster { + if opts == nil { + opts = &ExecDevClusterOptions{} + } + if opts.ClusterName == "" { + opts.ClusterName = strings.ReplaceAll(t.Name(), "/", "-") + } + if opts.Logger == nil { + opts.Logger = logging.NewVaultLogger(log.Trace).Named(t.Name()) // .Named("container") + } + if opts.VaultLicense == "" { + opts.VaultLicense = os.Getenv(EnvVaultLicenseCI) + } + + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + t.Cleanup(cancel) + + dc, err := NewExecDevCluster(ctx, opts) + if err != nil { + t.Fatal(err) + } + return dc +} + +func NewExecDevCluster(ctx context.Context, opts *ExecDevClusterOptions) (*ExecDevCluster, error) { + dc := &ExecDevCluster{ + ClusterName: opts.ClusterName, + stopCh: make(chan struct{}), + } + + if opts == nil { + opts = &ExecDevClusterOptions{} + } + if opts.NumCores == 0 { + opts.NumCores = 3 + } + if err := dc.setupExecDevCluster(ctx, opts); err != nil { + dc.Cleanup() + return nil, err + } + + return dc, nil +} + +func (dc *ExecDevCluster) setupExecDevCluster(ctx context.Context, opts *ExecDevClusterOptions) (retErr error) { + if opts == nil { + opts = &ExecDevClusterOptions{} + } + if opts.Logger == nil { + opts.Logger = log.NewNullLogger() + } + dc.Logger = opts.Logger + + if opts.TmpDir != "" { + if _, err := os.Stat(opts.TmpDir); os.IsNotExist(err) { + if err := os.MkdirAll(opts.TmpDir, 0o700); err != nil { + return err + } + } + dc.tmpDir = opts.TmpDir + } else { + tempDir, err := os.MkdirTemp("", "vault-test-cluster-") + if err != nil { + return err + } + dc.tmpDir = tempDir + } + + // This context is used to stop the subprocess + execCtx, cancel := context.WithCancel(context.Background()) + dc.stop = func() { + cancel() + close(dc.stopCh) + } + defer func() { + if retErr != nil { + cancel() + } + }() + + bin := opts.BinaryPath + if bin == "" { + bin = "vault" + } + + clusterJsonPath := filepath.Join(dc.tmpDir, "cluster.json") + args := []string{"server", "-dev", "-dev-cluster-json", clusterJsonPath} + switch { + case opts.NumCores == 3: + args = append(args, "-dev-three-node") + case opts.NumCores == 1: + args = append(args, "-dev-tls") + default: + return fmt.Errorf("NumCores=1 and NumCores=3 are the only supported options right now") + } + if opts.BaseListenAddress != "" { + args = append(args, "-dev-listen-address", opts.BaseListenAddress) + } + cmd := exec.CommandContext(execCtx, bin, args...) + cmd.Env = os.Environ() + cmd.Env = append(cmd.Env, "VAULT_LICENSE="+opts.VaultLicense) + cmd.Env = append(cmd.Env, "VAULT_LOG_FORMAT=json") + cmd.Env = append(cmd.Env, "VAULT_DEV_TEMP_DIR="+dc.tmpDir) + if opts.Logger != nil { + stdout, err := cmd.StdoutPipe() + if err != nil { + return err + } + go func() { + outlog := opts.Logger.Named("stdout") + scanner := bufio.NewScanner(stdout) + for scanner.Scan() { + outlog.Trace(scanner.Text()) + } + }() + stderr, err := cmd.StderrPipe() + if err != nil { + return err + } + go func() { + errlog := opts.Logger.Named("stderr") + scanner := bufio.NewScanner(stderr) + // The default buffer is 4k, and Vault can emit bigger log lines + scanner.Buffer(make([]byte, 64*1024), bufio.MaxScanTokenSize) + for scanner.Scan() { + JSONLogNoTimestamp(errlog, scanner.Text()) + } + }() + } + + if err := cmd.Start(); err != nil { + return err + } + + for ctx.Err() == nil { + if b, err := os.ReadFile(clusterJsonPath); err == nil && len(b) > 0 { + var clusterJson ClusterJson + if err := jsonutil.DecodeJSON(b, &clusterJson); err != nil { + continue + } + dc.CACertPEMFile = clusterJson.CACertPath + dc.rootToken = clusterJson.RootToken + for i, node := range clusterJson.Nodes { + config := api.DefaultConfig() + config.Address = node.APIAddress + err := config.ConfigureTLS(&api.TLSConfig{ + CACert: clusterJson.CACertPath, + }) + if err != nil { + return err + } + client, err := api.NewClient(config) + if err != nil { + return err + } + client.SetToken(dc.rootToken) + _, err = client.Sys().ListMounts() + if err != nil { + return err + } + + dc.ClusterNodes = append(dc.ClusterNodes, &execDevClusterNode{ + name: fmt.Sprintf("core-%d", i), + client: client, + }) + } + return nil + } + time.Sleep(500 * time.Millisecond) + } + return ctx.Err() +} + +type execDevClusterNode struct { + name string + client *api.Client +} + +var _ VaultClusterNode = &execDevClusterNode{} + +func (e *execDevClusterNode) Name() string { + return e.name +} + +func (e *execDevClusterNode) APIClient() *api.Client { + // We clone to ensure that whenever this method is called, the caller gets + // back a pristine client, without e.g. any namespace or token changes that + // might pollute a shared client. We clone the config instead of the + // client because (1) Client.clone propagates the replicationStateStore and + // the httpClient pointers, (2) it doesn't copy the tlsConfig at all, and + // (3) if clone returns an error, it doesn't feel as appropriate to panic + // below. Who knows why clone might return an error? + cfg := e.client.CloneConfig() + client, err := api.NewClient(cfg) + if err != nil { + // It seems fine to panic here, since this should be the same input + // we provided to NewClient when we were setup, and we didn't panic then. + // Better not to completely ignore the error though, suppose there's a + // bug in CloneConfig? + panic(fmt.Sprintf("NewClient error on cloned config: %v", err)) + } + client.SetToken(e.client.Token()) + return client +} + +func (e *execDevClusterNode) TLSConfig() *tls.Config { + return e.client.CloneConfig().TLSConfig() +} + +func (dc *ExecDevCluster) ClusterID() string { + return dc.ID +} + +func (dc *ExecDevCluster) Nodes() []VaultClusterNode { + ret := make([]VaultClusterNode, len(dc.ClusterNodes)) + for i := range dc.ClusterNodes { + ret[i] = dc.ClusterNodes[i] + } + return ret +} + +func (dc *ExecDevCluster) GetBarrierKeys() [][]byte { + return dc.barrierKeys +} + +func copyKey(key []byte) []byte { + result := make([]byte, len(key)) + copy(result, key) + return result +} + +func (dc *ExecDevCluster) GetRecoveryKeys() [][]byte { + ret := make([][]byte, len(dc.recoveryKeys)) + for i, k := range dc.recoveryKeys { + ret[i] = copyKey(k) + } + return ret +} + +func (dc *ExecDevCluster) GetBarrierOrRecoveryKeys() [][]byte { + return dc.GetBarrierKeys() +} + +func (dc *ExecDevCluster) SetBarrierKeys(keys [][]byte) { + dc.barrierKeys = make([][]byte, len(keys)) + for i, k := range keys { + dc.barrierKeys[i] = copyKey(k) + } +} + +func (dc *ExecDevCluster) SetRecoveryKeys(keys [][]byte) { + dc.recoveryKeys = make([][]byte, len(keys)) + for i, k := range keys { + dc.recoveryKeys[i] = copyKey(k) + } +} + +func (dc *ExecDevCluster) GetCACertPEMFile() string { + return dc.CACertPEMFile +} + +func (dc *ExecDevCluster) Cleanup() { + dc.stop() +} + +// GetRootToken returns the root token of the cluster, if set +func (dc *ExecDevCluster) GetRootToken() string { + return dc.rootToken +} diff --git a/sdk/helper/testcluster/logging.go b/sdk/helper/testcluster/logging.go new file mode 100644 index 0000000..dda759c --- /dev/null +++ b/sdk/helper/testcluster/logging.go @@ -0,0 +1,37 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package testcluster + +import ( + "encoding/json" + "strings" + + "github.com/hashicorp/go-hclog" +) + +func JSONLogNoTimestamp(outlog hclog.Logger, text string) { + d := json.NewDecoder(strings.NewReader(text)) + m := map[string]interface{}{} + if err := d.Decode(&m); err != nil { + outlog.Error("failed to decode json output from dev vault", "error", err, "input", text) + return + } + + delete(m, "@timestamp") + message := m["@message"].(string) + delete(m, "@message") + level := m["@level"].(string) + delete(m, "@level") + if module, ok := m["@module"]; ok { + delete(m, "@module") + outlog = outlog.Named(module.(string)) + } + + var pairs []interface{} + for k, v := range m { + pairs = append(pairs, k, v) + } + + outlog.Log(hclog.LevelFromString(level), message, pairs...) +} diff --git a/sdk/helper/testcluster/replication.go b/sdk/helper/testcluster/replication.go new file mode 100644 index 0000000..72c8bc6 --- /dev/null +++ b/sdk/helper/testcluster/replication.go @@ -0,0 +1,896 @@ +package testcluster + +import ( + "context" + "encoding/json" + "fmt" + "reflect" + "strings" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/mitchellh/mapstructure" +) + +func GetPerformanceToken(pri VaultCluster, id, secondaryPublicKey string) (string, error) { + client := pri.Nodes()[0].APIClient() + req := map[string]interface{}{ + "id": id, + } + if secondaryPublicKey != "" { + req["secondary_public_key"] = secondaryPublicKey + } + secret, err := client.Logical().Write("sys/replication/performance/primary/secondary-token", req) + if err != nil { + return "", err + } + + if secondaryPublicKey != "" { + return secret.Data["token"].(string), nil + } + return secret.WrapInfo.Token, nil +} + +func EnablePerfPrimary(ctx context.Context, pri VaultCluster) error { + client := pri.Nodes()[0].APIClient() + _, err := client.Logical().WriteWithContext(ctx, "sys/replication/performance/primary/enable", nil) + if err != nil { + return err + } + + err = WaitForPerfReplicationState(ctx, pri, consts.ReplicationPerformancePrimary) + if err != nil { + return err + } + return WaitForActiveNodeAndPerfStandbys(ctx, pri) +} + +func WaitForPerfReplicationState(ctx context.Context, cluster VaultCluster, state consts.ReplicationState) error { + client := cluster.Nodes()[0].APIClient() + var health *api.HealthResponse + var err error + for ctx.Err() == nil { + health, err = client.Sys().HealthWithContext(ctx) + if err == nil && health.ReplicationPerformanceMode == state.GetPerformanceString() { + return nil + } + time.Sleep(500 * time.Millisecond) + } + if err == nil { + err = ctx.Err() + } + return err +} + +func EnablePerformanceSecondaryNoWait(ctx context.Context, perfToken string, pri, sec VaultCluster, updatePrimary bool) error { + postData := map[string]interface{}{ + "token": perfToken, + "ca_file": DefaultCAFile, + } + path := "sys/replication/performance/secondary/enable" + if updatePrimary { + path = "sys/replication/performance/secondary/update-primary" + } + err := WaitForActiveNodeAndPerfStandbys(ctx, sec) + if err != nil { + return err + } + _, err = sec.Nodes()[0].APIClient().Logical().Write(path, postData) + if err != nil { + return err + } + + return WaitForPerfReplicationState(ctx, sec, consts.ReplicationPerformanceSecondary) +} + +func EnablePerformanceSecondary(ctx context.Context, perfToken string, pri, sec VaultCluster, updatePrimary, skipPoisonPill bool) (string, error) { + if err := EnablePerformanceSecondaryNoWait(ctx, perfToken, pri, sec, updatePrimary); err != nil { + return "", err + } + if err := WaitForMatchingMerkleRoots(ctx, "sys/replication/performance/", pri, sec); err != nil { + return "", err + } + root, err := WaitForPerformanceSecondary(ctx, pri, sec, skipPoisonPill) + if err != nil { + return "", err + } + if err := WaitForPerfReplicationWorking(ctx, pri, sec); err != nil { + return "", err + } + return root, nil +} + +func WaitForMatchingMerkleRoots(ctx context.Context, endpoint string, pri, sec VaultCluster) error { + getRoot := func(mode string, cli *api.Client) (string, error) { + status, err := cli.Logical().Read(endpoint + "status") + if err != nil { + return "", err + } + if status == nil || status.Data == nil || status.Data["mode"] == nil { + return "", fmt.Errorf("got nil secret or data") + } + if status.Data["mode"].(string) != mode { + return "", fmt.Errorf("expected mode=%s, got %s", mode, status.Data["mode"].(string)) + } + return status.Data["merkle_root"].(string), nil + } + + secClient := sec.Nodes()[0].APIClient() + priClient := pri.Nodes()[0].APIClient() + for i := 0; i < 30; i++ { + secRoot, err := getRoot("secondary", secClient) + if err != nil { + return err + } + priRoot, err := getRoot("primary", priClient) + if err != nil { + return err + } + + if reflect.DeepEqual(priRoot, secRoot) { + return nil + } + time.Sleep(time.Second) + } + + return fmt.Errorf("roots did not become equal") +} + +func WaitForPerformanceWAL(ctx context.Context, pri, sec VaultCluster) error { + endpoint := "sys/replication/performance/" + if err := WaitForMatchingMerkleRoots(ctx, endpoint, pri, sec); err != nil { + return nil + } + getWAL := func(mode, walKey string, cli *api.Client) (int64, error) { + status, err := cli.Logical().Read(endpoint + "status") + if err != nil { + return 0, err + } + if status == nil || status.Data == nil || status.Data["mode"] == nil { + return 0, fmt.Errorf("got nil secret or data") + } + if status.Data["mode"].(string) != mode { + return 0, fmt.Errorf("expected mode=%s, got %s", mode, status.Data["mode"].(string)) + } + return status.Data[walKey].(json.Number).Int64() + } + + secClient := sec.Nodes()[0].APIClient() + priClient := pri.Nodes()[0].APIClient() + for ctx.Err() == nil { + secLastRemoteWAL, err := getWAL("secondary", "last_remote_wal", secClient) + if err != nil { + return err + } + priLastPerfWAL, err := getWAL("primary", "last_performance_wal", priClient) + if err != nil { + return err + } + + if secLastRemoteWAL >= priLastPerfWAL { + return nil + } + time.Sleep(time.Second) + } + + return fmt.Errorf("performance WALs on the secondary did not catch up with the primary, context err: %w", ctx.Err()) +} + +func WaitForPerformanceSecondary(ctx context.Context, pri, sec VaultCluster, skipPoisonPill bool) (string, error) { + if len(pri.GetRecoveryKeys()) > 0 { + sec.SetBarrierKeys(pri.GetRecoveryKeys()) + sec.SetRecoveryKeys(pri.GetRecoveryKeys()) + } else { + sec.SetBarrierKeys(pri.GetBarrierKeys()) + sec.SetRecoveryKeys(pri.GetBarrierKeys()) + } + + if len(sec.Nodes()) > 1 { + if skipPoisonPill { + // As part of prepareSecondary on the active node the keyring is + // deleted from storage. Its absence can cause standbys to seal + // themselves. But it's not reliable, so we'll seal them + // ourselves to force the issue. + for i := range sec.Nodes()[1:] { + if err := SealNode(ctx, sec, i+1); err != nil { + return "", err + } + } + } else { + // We want to make sure we unseal all the nodes so we first need to wait + // until two of the nodes seal due to the poison pill being written + if err := WaitForNCoresSealed(ctx, sec, len(sec.Nodes())-1); err != nil { + return "", err + } + } + } + if _, err := WaitForActiveNode(ctx, sec); err != nil { + return "", err + } + if err := UnsealAllNodes(ctx, sec); err != nil { + return "", err + } + + perfSecondaryRootToken, err := GenerateRoot(sec, GenerateRootRegular) + if err != nil { + return "", err + } + sec.SetRootToken(perfSecondaryRootToken) + if err := WaitForActiveNodeAndPerfStandbys(ctx, sec); err != nil { + return "", err + } + + return perfSecondaryRootToken, nil +} + +func WaitForPerfReplicationWorking(ctx context.Context, pri, sec VaultCluster) error { + priActiveIdx, err := WaitForActiveNode(ctx, pri) + if err != nil { + return err + } + secActiveIdx, err := WaitForActiveNode(ctx, sec) + if err != nil { + return err + } + + priClient, secClient := pri.Nodes()[priActiveIdx].APIClient(), sec.Nodes()[secActiveIdx].APIClient() + mountPoint, err := uuid.GenerateUUID() + if err != nil { + return err + } + err = priClient.Sys().Mount(mountPoint, &api.MountInput{ + Type: "kv", + Local: false, + }) + if err != nil { + return fmt.Errorf("unable to mount KV engine on primary") + } + + path := mountPoint + "/foo" + _, err = priClient.Logical().Write(path, map[string]interface{}{ + "bar": 1, + }) + if err != nil { + return fmt.Errorf("unable to write KV on primary", "path", path) + } + + for ctx.Err() == nil { + var secret *api.Secret + secret, err = secClient.Logical().Read(path) + if err == nil && secret != nil { + err = priClient.Sys().Unmount(mountPoint) + if err != nil { + return fmt.Errorf("unable to unmount KV engine on primary") + } + return nil + } + time.Sleep(100 * time.Millisecond) + } + if err == nil { + err = ctx.Err() + } + return fmt.Errorf("unable to read replicated KV on secondary, path=%s, err=%v", path, err) +} + +func SetupTwoClusterPerfReplication(ctx context.Context, pri, sec VaultCluster) error { + if err := EnablePerfPrimary(ctx, pri); err != nil { + return err + } + perfToken, err := GetPerformanceToken(pri, sec.ClusterID(), "") + if err != nil { + return err + } + + _, err = EnablePerformanceSecondary(ctx, perfToken, pri, sec, false, false) + return err +} + +// PassiveWaitForActiveNodeAndPerfStandbys should be used instead of +// WaitForActiveNodeAndPerfStandbys when you don't want to do any writes +// as a side-effect. This returns perfStandby nodes in the cluster and +// an error. +func PassiveWaitForActiveNodeAndPerfStandbys(ctx context.Context, pri VaultCluster) (VaultClusterNode, []VaultClusterNode, error) { + leaderNode, standbys, err := GetActiveAndStandbys(ctx, pri) + if err != nil { + return nil, nil, fmt.Errorf("failed to derive standby nodes, %w", err) + } + + for i, node := range standbys { + client := node.APIClient() + // Make sure we get perf standby nodes + if err = EnsureCoreIsPerfStandby(ctx, client); err != nil { + return nil, nil, fmt.Errorf("standby node %d is not a perfStandby, %w", i, err) + } + } + + return leaderNode, standbys, nil +} + +func GetActiveAndStandbys(ctx context.Context, cluster VaultCluster) (VaultClusterNode, []VaultClusterNode, error) { + var leaderIndex int + var err error + if leaderIndex, err = WaitForActiveNode(ctx, cluster); err != nil { + return nil, nil, err + } + + var leaderNode VaultClusterNode + var nodes []VaultClusterNode + for i, node := range cluster.Nodes() { + if i == leaderIndex { + leaderNode = node + continue + } + nodes = append(nodes, node) + } + + return leaderNode, nodes, nil +} + +func EnsureCoreIsPerfStandby(ctx context.Context, client *api.Client) error { + var err error + var health *api.HealthResponse + for ctx.Err() == nil { + health, err = client.Sys().HealthWithContext(ctx) + if err == nil && health.PerformanceStandby { + return nil + } + time.Sleep(time.Millisecond * 500) + } + if err == nil { + err = ctx.Err() + } + return err +} + +func WaitForDRReplicationState(ctx context.Context, cluster VaultCluster, state consts.ReplicationState) error { + client := cluster.Nodes()[0].APIClient() + var health *api.HealthResponse + var err error + for ctx.Err() == nil { + health, err = client.Sys().HealthWithContext(ctx) + if err == nil && health.ReplicationDRMode == state.GetDRString() { + return nil + } + time.Sleep(500 * time.Millisecond) + } + if err == nil { + err = ctx.Err() + } + return err +} + +func EnableDrPrimary(ctx context.Context, pri VaultCluster) error { + client := pri.Nodes()[0].APIClient() + _, err := client.Logical().Write("sys/replication/dr/primary/enable", nil) + if err != nil { + return err + } + + err = WaitForDRReplicationState(ctx, pri, consts.ReplicationDRPrimary) + if err != nil { + return err + } + return WaitForActiveNodeAndPerfStandbys(ctx, pri) +} + +func GenerateDRActivationToken(pri VaultCluster, id, secondaryPublicKey string) (string, error) { + client := pri.Nodes()[0].APIClient() + req := map[string]interface{}{ + "id": id, + } + if secondaryPublicKey != "" { + req["secondary_public_key"] = secondaryPublicKey + } + secret, err := client.Logical().Write("sys/replication/dr/primary/secondary-token", req) + if err != nil { + return "", err + } + + if secondaryPublicKey != "" { + return secret.Data["token"].(string), nil + } + return secret.WrapInfo.Token, nil +} + +func WaitForDRSecondary(ctx context.Context, pri, sec VaultCluster, skipPoisonPill bool) error { + if len(pri.GetRecoveryKeys()) > 0 { + sec.SetBarrierKeys(pri.GetRecoveryKeys()) + sec.SetRecoveryKeys(pri.GetRecoveryKeys()) + } else { + sec.SetBarrierKeys(pri.GetBarrierKeys()) + sec.SetRecoveryKeys(pri.GetBarrierKeys()) + } + + if len(sec.Nodes()) > 1 { + if skipPoisonPill { + // As part of prepareSecondary on the active node the keyring is + // deleted from storage. Its absence can cause standbys to seal + // themselves. But it's not reliable, so we'll seal them + // ourselves to force the issue. + for i := range sec.Nodes()[1:] { + if err := SealNode(ctx, sec, i+1); err != nil { + return err + } + } + } else { + // We want to make sure we unseal all the nodes so we first need to wait + // until two of the nodes seal due to the poison pill being written + if err := WaitForNCoresSealed(ctx, sec, len(sec.Nodes())-1); err != nil { + return err + } + } + } + if _, err := WaitForActiveNode(ctx, sec); err != nil { + return err + } + + // unseal nodes + for i := range sec.Nodes() { + if err := UnsealNode(ctx, sec, i); err != nil { + // Sometimes when we get here it's already unsealed on its own + // and then this fails for DR secondaries so check again + // The error is "path disabled in replication DR secondary mode". + if healthErr := NodeHealthy(ctx, sec, i); healthErr != nil { + // return the original error + return err + } + } + } + + sec.SetRootToken(pri.GetRootToken()) + + if _, err := WaitForActiveNode(ctx, sec); err != nil { + return err + } + + return nil +} + +func EnableDRSecondaryNoWait(ctx context.Context, sec VaultCluster, drToken string) error { + postData := map[string]interface{}{ + "token": drToken, + "ca_file": DefaultCAFile, + } + + _, err := sec.Nodes()[0].APIClient().Logical().Write("sys/replication/dr/secondary/enable", postData) + if err != nil { + return err + } + + return WaitForDRReplicationState(ctx, sec, consts.ReplicationDRSecondary) +} + +func WaitForReplicationStatus(ctx context.Context, client *api.Client, dr bool, accept func(map[string]interface{}) bool) error { + url := "sys/replication/performance/status" + if dr { + url = "sys/replication/dr/status" + } + + var err error + var secret *api.Secret + for ctx.Err() == nil { + secret, err = client.Logical().Read(url) + if err == nil && secret != nil && secret.Data != nil { + if accept(secret.Data) { + return nil + } + } + time.Sleep(500 * time.Millisecond) + } + if err == nil { + err = ctx.Err() + } + + return fmt.Errorf("unable to get acceptable replication status: error=%v secret=%#v", err, secret) +} + +func WaitForDRReplicationWorking(ctx context.Context, pri, sec VaultCluster) error { + priClient := pri.Nodes()[0].APIClient() + secClient := sec.Nodes()[0].APIClient() + + // Make sure we've entered stream-wals mode + err := WaitForReplicationStatus(ctx, secClient, true, func(secret map[string]interface{}) bool { + return secret["state"] == string("stream-wals") + }) + if err != nil { + return err + } + + // Now write some data and make sure that we see last_remote_wal nonzero, i.e. + // at least one WAL has been streamed. + secret, err := priClient.Auth().Token().Create(&api.TokenCreateRequest{}) + if err != nil { + return err + } + + // Revoke the token since some tests won't be happy to see it. + err = priClient.Auth().Token().RevokeTree(secret.Auth.ClientToken) + if err != nil { + return err + } + + err = WaitForReplicationStatus(ctx, secClient, true, func(secret map[string]interface{}) bool { + if secret["state"] != string("stream-wals") { + return false + } + if secret["last_remote_wal"] != nil { + lastRemoteWal, _ := secret["last_remote_wal"].(json.Number).Int64() + return lastRemoteWal > 0 + } + + return false + }) + if err != nil { + return err + } + return nil +} + +func EnableDrSecondary(ctx context.Context, pri, sec VaultCluster, drToken string) error { + err := EnableDRSecondaryNoWait(ctx, sec, drToken) + if err != nil { + return err + } + + if err = WaitForMatchingMerkleRoots(ctx, "sys/replication/dr/", pri, sec); err != nil { + return err + } + + err = WaitForDRSecondary(ctx, pri, sec, false) + if err != nil { + return err + } + + if err = WaitForDRReplicationWorking(ctx, pri, sec); err != nil { + return err + } + return nil +} + +func SetupTwoClusterDRReplication(ctx context.Context, pri, sec VaultCluster) error { + if err := EnableDrPrimary(ctx, pri); err != nil { + return err + } + + drToken, err := GenerateDRActivationToken(pri, sec.ClusterID(), "") + if err != nil { + return err + } + err = EnableDrSecondary(ctx, pri, sec, drToken) + if err != nil { + return err + } + return nil +} + +func DemoteDRPrimary(client *api.Client) error { + _, err := client.Logical().Write("sys/replication/dr/primary/demote", map[string]interface{}{}) + return err +} + +func createBatchToken(client *api.Client, path string) (string, error) { + // TODO: should these be more random in case more than one batch token needs to be created? + suffix := strings.Replace(path, "/", "", -1) + policyName := "path-batch-policy-" + suffix + roleName := "path-batch-role-" + suffix + + rules := fmt.Sprintf(`path "%s" { capabilities = [ "read", "update" ] }`, path) + + // create policy + _, err := client.Logical().Write("sys/policy/"+policyName, map[string]interface{}{ + "policy": rules, + }) + if err != nil { + return "", err + } + + // create a role + _, err = client.Logical().Write("auth/token/roles/"+roleName, map[string]interface{}{ + "allowed_policies": policyName, + "orphan": true, + "renewable": false, + "token_type": "batch", + }) + if err != nil { + return "", err + } + + // create batch token + secret, err := client.Logical().Write("auth/token/create/"+roleName, nil) + if err != nil { + return "", err + } + + return secret.Auth.ClientToken, nil +} + +// PromoteDRSecondaryWithBatchToken creates a batch token for DR promotion +// before promotion, it demotes the primary cluster. The primary cluster needs +// to be functional for the generation of the batch token +func PromoteDRSecondaryWithBatchToken(ctx context.Context, pri, sec VaultCluster) error { + client := pri.Nodes()[0].APIClient() + drToken, err := createBatchToken(client, "sys/replication/dr/secondary/promote") + if err != nil { + return err + } + + err = DemoteDRPrimary(client) + if err != nil { + return err + } + + return promoteDRSecondaryInternal(ctx, sec, drToken) +} + +// PromoteDRSecondary generates a DR operation token on the secondary using +// unseal/recovery keys. Therefore, the primary cluster could potentially +// be out of service. +func PromoteDRSecondary(ctx context.Context, sec VaultCluster) error { + // generate DR operation token to do update primary on vC to point to + // the new perfSec primary vD + drToken, err := GenerateRoot(sec, GenerateRootDR) + if err != nil { + return err + } + return promoteDRSecondaryInternal(ctx, sec, drToken) +} + +func promoteDRSecondaryInternal(ctx context.Context, sec VaultCluster, drToken string) error { + secClient := sec.Nodes()[0].APIClient() + + // Allow retries of 503s, e.g.: replication is still catching up, + // try again later or provide the "force" argument + oldMaxRetries := secClient.MaxRetries() + secClient.SetMaxRetries(10) + defer secClient.SetMaxRetries(oldMaxRetries) + resp, err := secClient.Logical().Write("sys/replication/dr/secondary/promote", map[string]interface{}{ + "dr_operation_token": drToken, + }) + if err != nil { + return err + } + if resp == nil { + return fmt.Errorf("nil status response during DR promotion") + } + + if _, err := WaitForActiveNode(ctx, sec); err != nil { + return err + } + + return WaitForDRReplicationState(ctx, sec, consts.ReplicationDRPrimary) +} + +func checkClusterAddr(ctx context.Context, pri, sec VaultCluster) error { + priClient := pri.Nodes()[0].APIClient() + priLeader, err := priClient.Sys().LeaderWithContext(ctx) + if err != nil { + return err + } + secClient := sec.Nodes()[0].APIClient() + endpoint := "sys/replication/dr/" + status, err := secClient.Logical().Read(endpoint + "status") + if err != nil { + return err + } + if status == nil || status.Data == nil { + return fmt.Errorf("got nil secret or data") + } + + var priAddrs []string + err = mapstructure.Decode(status.Data["known_primary_cluster_addrs"], &priAddrs) + if err != nil { + return err + } + if !strutil.StrListContains(priAddrs, priLeader.LeaderClusterAddress) { + return fmt.Errorf("failed to fine the expected primary cluster address %v in known_primary_cluster_addrs", priLeader.LeaderClusterAddress) + } + + return nil +} + +func UpdatePrimary(ctx context.Context, pri, sec VaultCluster) error { + // generate DR operation token to do update primary on vC to point to + // the new perfSec primary vD + rootToken, err := GenerateRoot(sec, GenerateRootDR) + if err != nil { + return err + } + + // secondary activation token + drToken, err := GenerateDRActivationToken(pri, sec.ClusterID(), "") + if err != nil { + return err + } + + // update-primary on vC (new perfSec Dr secondary) to point to + // the new perfSec Dr primary + secClient := sec.Nodes()[0].APIClient() + resp, err := secClient.Logical().Write("sys/replication/dr/secondary/update-primary", map[string]interface{}{ + "dr_operation_token": rootToken, + "token": drToken, + "ca_file": DefaultCAFile, + }) + if err != nil { + return err + } + if resp == nil { + return fmt.Errorf("nil status response during update primary") + } + + if _, err = WaitForActiveNode(ctx, sec); err != nil { + return err + } + + if err = WaitForDRReplicationState(ctx, sec, consts.ReplicationDRSecondary); err != nil { + return err + } + + if err = checkClusterAddr(ctx, pri, sec); err != nil { + return err + } + + return nil +} + +func SetupFourClusterReplication(ctx context.Context, pri, sec, pridr, secdr VaultCluster) error { + err := SetupTwoClusterPerfReplication(ctx, pri, sec) + if err != nil { + return err + } + err = SetupTwoClusterDRReplication(ctx, pri, pridr) + if err != nil { + return err + } + err = SetupTwoClusterDRReplication(ctx, sec, secdr) + if err != nil { + return err + } + return nil +} + +type ReplicationSet struct { + // By convention, we recommend the following naming scheme for + // clusters in this map: + // A: perf primary + // B: primary's DR + // C: first perf secondary of A + // D: C's DR + // E: second perf secondary of A + // F: E's DR + // ... etc. + // + // We use generic names rather than role-specific names because + // that's less confusing when promotions take place that result in role + // changes. In other words, if D gets promoted to replace C as a perf + // secondary, and C gets demoted and updated to become D's DR secondary, + // they should maintain their initial names of D and C throughout. + Clusters map[string]VaultCluster + Builder ClusterBuilder + Logger hclog.Logger + CA *CA +} + +type ClusterBuilder func(ctx context.Context, name string, logger hclog.Logger) (VaultCluster, error) + +func NewReplicationSet(b ClusterBuilder) (*ReplicationSet, error) { + return &ReplicationSet{ + Clusters: map[string]VaultCluster{}, + Builder: b, + Logger: hclog.NewNullLogger(), + }, nil +} + +func (r *ReplicationSet) StandardPerfReplication(ctx context.Context) error { + for _, name := range []string{"A", "C"} { + if _, ok := r.Clusters[name]; !ok { + cluster, err := r.Builder(ctx, name, r.Logger) + if err != nil { + return err + } + r.Clusters[name] = cluster + } + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + err := SetupTwoClusterPerfReplication(ctx, r.Clusters["A"], r.Clusters["C"]) + if err != nil { + return err + } + + return nil +} + +func (r *ReplicationSet) StandardDRReplication(ctx context.Context) error { + for _, name := range []string{"A", "B"} { + if _, ok := r.Clusters[name]; !ok { + cluster, err := r.Builder(ctx, name, r.Logger) + if err != nil { + return err + } + r.Clusters[name] = cluster + } + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + err := SetupTwoClusterDRReplication(ctx, r.Clusters["A"], r.Clusters["B"]) + if err != nil { + return err + } + + return nil +} + +func (r *ReplicationSet) GetFourReplicationCluster(ctx context.Context) error { + for _, name := range []string{"A", "B", "C", "D"} { + if _, ok := r.Clusters[name]; !ok { + cluster, err := r.Builder(ctx, name, r.Logger) + if err != nil { + return err + } + r.Clusters[name] = cluster + } + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + err := SetupFourClusterReplication(ctx, r.Clusters["A"], r.Clusters["C"], r.Clusters["B"], r.Clusters["D"]) + if err != nil { + return err + } + return nil +} + +func (r *ReplicationSet) Cleanup() { + for _, cluster := range r.Clusters { + cluster.Cleanup() + } +} + +func WaitForPerfReplicationConnectionStatus(ctx context.Context, client *api.Client) error { + type Primary struct { + APIAddress string `mapstructure:"api_address"` + ConnectionStatus string `mapstructure:"connection_status"` + ClusterAddress string `mapstructure:"cluster_address"` + LastHeartbeat string `mapstructure:"last_heartbeat"` + } + type Status struct { + Primaries []Primary `mapstructure:"primaries"` + } + return WaitForPerfReplicationStatus(ctx, client, func(m map[string]interface{}) error { + var status Status + err := mapstructure.Decode(m, &status) + if err != nil { + return err + } + if len(status.Primaries) == 0 { + return fmt.Errorf("primaries is zero") + } + for _, v := range status.Primaries { + if v.ConnectionStatus == "connected" { + return nil + } + } + return fmt.Errorf("no primaries connected") + }) +} + +func WaitForPerfReplicationStatus(ctx context.Context, client *api.Client, accept func(map[string]interface{}) error) error { + var err error + var secret *api.Secret + for ctx.Err() == nil { + secret, err = client.Logical().Read("sys/replication/performance/status") + if err == nil && secret != nil && secret.Data != nil { + if err = accept(secret.Data); err == nil { + return nil + } + } + time.Sleep(500 * time.Millisecond) + } + return fmt.Errorf("unable to get acceptable replication status within allotted time: error=%v secret=%#v", err, secret) +} diff --git a/sdk/helper/testcluster/types.go b/sdk/helper/testcluster/types.go new file mode 100644 index 0000000..989908f --- /dev/null +++ b/sdk/helper/testcluster/types.go @@ -0,0 +1,120 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package testcluster + +import ( + "context" + "crypto/ecdsa" + "crypto/tls" + "crypto/x509" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/api" +) + +type VaultClusterNode interface { + APIClient() *api.Client + TLSConfig() *tls.Config +} + +type VaultCluster interface { + Nodes() []VaultClusterNode + GetBarrierKeys() [][]byte + GetRecoveryKeys() [][]byte + GetBarrierOrRecoveryKeys() [][]byte + SetBarrierKeys([][]byte) + SetRecoveryKeys([][]byte) + GetCACertPEMFile() string + Cleanup() + ClusterID() string + NamedLogger(string) hclog.Logger + SetRootToken(token string) + GetRootToken() string +} + +type VaultNodeConfig struct { + // Not configurable because cluster creator wants to control these: + // PluginDirectory string `hcl:"plugin_directory"` + // APIAddr string `hcl:"api_addr"` + // ClusterAddr string `hcl:"cluster_addr"` + // Storage *Storage `hcl:"-"` + // HAStorage *Storage `hcl:"-"` + // DisableMlock bool `hcl:"disable_mlock"` + // ClusterName string `hcl:"cluster_name"` + + // Not configurable yet: + // Listeners []*Listener `hcl:"-"` + // Seals []*KMS `hcl:"-"` + // Entropy *Entropy `hcl:"-"` + // Telemetry *Telemetry `hcl:"telemetry"` + // HCPLinkConf *HCPLinkConfig `hcl:"cloud"` + // PidFile string `hcl:"pid_file"` + // ServiceRegistrationType string + // ServiceRegistrationOptions map[string]string + + StorageOptions map[string]string + + DefaultMaxRequestDuration time.Duration `json:"default_max_request_duration"` + LogFormat string `json:"log_format"` + LogLevel string `json:"log_level"` + CacheSize int `json:"cache_size"` + DisableCache bool `json:"disable_cache"` + DisablePrintableCheck bool `json:"disable_printable_check"` + EnableUI bool `json:"ui"` + MaxLeaseTTL time.Duration `json:"max_lease_ttl"` + DefaultLeaseTTL time.Duration `json:"default_lease_ttl"` + ClusterCipherSuites string `json:"cluster_cipher_suites"` + PluginFileUid int `json:"plugin_file_uid"` + PluginFilePermissions int `json:"plugin_file_permissions"` + EnableRawEndpoint bool `json:"raw_storage_endpoint"` + DisableClustering bool `json:"disable_clustering"` + DisablePerformanceStandby bool `json:"disable_performance_standby"` + DisableSealWrap bool `json:"disable_sealwrap"` + DisableIndexing bool `json:"disable_indexing"` + DisableSentinelTrace bool `json:"disable_sentinel"` + EnableResponseHeaderHostname bool `json:"enable_response_header_hostname"` + LogRequestsLevel string `json:"log_requests_level"` + EnableResponseHeaderRaftNodeID bool `json:"enable_response_header_raft_node_id"` + LicensePath string `json:"license_path"` +} + +type ClusterNode struct { + APIAddress string `json:"api_address"` +} + +type ClusterJson struct { + Nodes []ClusterNode `json:"nodes"` + CACertPath string `json:"ca_cert_path"` + RootToken string `json:"root_token"` +} + +type ClusterOptions struct { + ClusterName string + KeepStandbysSealed bool + SkipInit bool + CACert []byte + NumCores int + TmpDir string + Logger hclog.Logger + VaultNodeConfig *VaultNodeConfig + VaultLicense string + AdministrativeNamespacePath string +} + +type CA struct { + CACert *x509.Certificate + CACertBytes []byte + CACertPEM []byte + CACertPEMFile string + CAKey *ecdsa.PrivateKey + CAKeyPEM []byte +} + +type ClusterStorage interface { + Start(context.Context, *ClusterOptions) error + Cleanup() error + Opts() map[string]interface{} + Type() string +} diff --git a/sdk/helper/testcluster/util.go b/sdk/helper/testcluster/util.go new file mode 100644 index 0000000..883cd69 --- /dev/null +++ b/sdk/helper/testcluster/util.go @@ -0,0 +1,392 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package testcluster + +import ( + "context" + "encoding/base64" + "encoding/hex" + "fmt" + "sync/atomic" + "time" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/sdk/helper/xor" +) + +// Note that OSS standbys will not accept seal requests. And ent perf standbys +// may fail it as well if they haven't yet been able to get "elected" as perf standbys. +func SealNode(ctx context.Context, cluster VaultCluster, nodeIdx int) error { + if nodeIdx >= len(cluster.Nodes()) { + return fmt.Errorf("invalid nodeIdx %d for cluster", nodeIdx) + } + node := cluster.Nodes()[nodeIdx] + client := node.APIClient() + + err := client.Sys().SealWithContext(ctx) + if err != nil { + return err + } + + return NodeSealed(ctx, cluster, nodeIdx) +} + +func SealAllNodes(ctx context.Context, cluster VaultCluster) error { + for i := range cluster.Nodes() { + if err := SealNode(ctx, cluster, i); err != nil { + return err + } + } + return nil +} + +func UnsealNode(ctx context.Context, cluster VaultCluster, nodeIdx int) error { + if nodeIdx >= len(cluster.Nodes()) { + return fmt.Errorf("invalid nodeIdx %d for cluster", nodeIdx) + } + node := cluster.Nodes()[nodeIdx] + client := node.APIClient() + + for _, key := range cluster.GetBarrierOrRecoveryKeys() { + _, err := client.Sys().UnsealWithContext(ctx, hex.EncodeToString(key)) + if err != nil { + return err + } + } + + return NodeHealthy(ctx, cluster, nodeIdx) +} + +func UnsealAllNodes(ctx context.Context, cluster VaultCluster) error { + for i := range cluster.Nodes() { + if err := UnsealNode(ctx, cluster, i); err != nil { + return err + } + } + return nil +} + +func NodeSealed(ctx context.Context, cluster VaultCluster, nodeIdx int) error { + if nodeIdx >= len(cluster.Nodes()) { + return fmt.Errorf("invalid nodeIdx %d for cluster", nodeIdx) + } + node := cluster.Nodes()[nodeIdx] + client := node.APIClient() + + var health *api.HealthResponse + var err error + for ctx.Err() == nil { + health, err = client.Sys().HealthWithContext(ctx) + switch { + case err != nil: + case !health.Sealed: + err = fmt.Errorf("unsealed: %#v", health) + default: + return nil + } + time.Sleep(500 * time.Millisecond) + } + return fmt.Errorf("node %d is not sealed: %v", nodeIdx, err) +} + +func WaitForNCoresSealed(ctx context.Context, cluster VaultCluster, n int) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + errs := make(chan error) + for i := range cluster.Nodes() { + go func(i int) { + var err error + for ctx.Err() == nil { + err = NodeSealed(ctx, cluster, i) + if err == nil { + errs <- nil + return + } + time.Sleep(100 * time.Millisecond) + } + if err == nil { + err = ctx.Err() + } + errs <- err + }(i) + } + + var merr *multierror.Error + var sealed int + for range cluster.Nodes() { + err := <-errs + if err != nil { + merr = multierror.Append(merr, err) + } else { + sealed++ + if sealed == n { + return nil + } + } + } + + return fmt.Errorf("%d cores were not sealed, errs: %v", n, merr.ErrorOrNil()) +} + +func NodeHealthy(ctx context.Context, cluster VaultCluster, nodeIdx int) error { + if nodeIdx >= len(cluster.Nodes()) { + return fmt.Errorf("invalid nodeIdx %d for cluster", nodeIdx) + } + node := cluster.Nodes()[nodeIdx] + client := node.APIClient() + + var health *api.HealthResponse + var err error + for ctx.Err() == nil { + health, err = client.Sys().HealthWithContext(ctx) + switch { + case err != nil: + case health == nil: + err = fmt.Errorf("nil response to health check") + case health.Sealed: + err = fmt.Errorf("sealed: %#v", health) + default: + return nil + } + time.Sleep(500 * time.Millisecond) + } + return fmt.Errorf("node %d is unhealthy: %v", nodeIdx, err) +} + +func LeaderNode(ctx context.Context, cluster VaultCluster) (int, error) { + // Be robust to multiple nodes thinking they are active. This is possible in + // certain network partition situations where the old leader has not + // discovered it's lost leadership yet. In tests this is only likely to come + // up when we are specifically provoking it, but it's possible it could happen + // at any point if leadership flaps of connectivity suffers transient errors + // etc. so be robust against it. The best solution would be to have some sort + // of epoch like the raft term that is guaranteed to be monotonically + // increasing through elections, however we don't have that abstraction for + // all HABackends in general. The best we have is the ActiveTime. In a + // distributed systems text book this would be bad to rely on due to clock + // sync issues etc. but for our tests it's likely fine because even if we are + // running separate Vault containers, they are all using the same hardware + // clock in the system. + leaderActiveTimes := make(map[int]time.Time) + for i, node := range cluster.Nodes() { + client := node.APIClient() + ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) + resp, err := client.Sys().LeaderWithContext(ctx) + cancel() + if err != nil || resp == nil || !resp.IsSelf { + continue + } + leaderActiveTimes[i] = resp.ActiveTime + } + if len(leaderActiveTimes) == 0 { + return -1, fmt.Errorf("no leader found") + } + // At least one node thinks it is active. If multiple, pick the one with the + // most recent ActiveTime. Note if there is only one then this just returns + // it. + var newestLeaderIdx int + var newestActiveTime time.Time + for i, at := range leaderActiveTimes { + if at.After(newestActiveTime) { + newestActiveTime = at + newestLeaderIdx = i + } + } + return newestLeaderIdx, nil +} + +func WaitForActiveNode(ctx context.Context, cluster VaultCluster) (int, error) { + for ctx.Err() == nil { + if idx, _ := LeaderNode(ctx, cluster); idx != -1 { + return idx, nil + } + time.Sleep(500 * time.Millisecond) + } + return -1, ctx.Err() +} + +func WaitForActiveNodeAndPerfStandbys(ctx context.Context, cluster VaultCluster) error { + logger := cluster.NamedLogger("WaitForActiveNodeAndPerfStandbys") + // This WaitForActiveNode was added because after a Raft cluster is sealed + // and then unsealed, when it comes up it may have a different leader than + // Core0, making this helper fail. + // A sleep before calling WaitForActiveNodeAndPerfStandbys seems to sort + // things out, but so apparently does this. We should be able to eliminate + // this call to WaitForActiveNode by reworking the logic in this method. + leaderIdx, err := WaitForActiveNode(ctx, cluster) + if err != nil { + return err + } + + if len(cluster.Nodes()) == 1 { + return nil + } + + expectedStandbys := len(cluster.Nodes()) - 1 + + mountPoint, err := uuid.GenerateUUID() + if err != nil { + return err + } + leaderClient := cluster.Nodes()[leaderIdx].APIClient() + + for ctx.Err() == nil { + err = leaderClient.Sys().MountWithContext(ctx, mountPoint, &api.MountInput{ + Type: "kv", + Local: true, + }) + if err == nil { + break + } + time.Sleep(1 * time.Second) + } + if err != nil { + return fmt.Errorf("unable to mount KV engine: %v", err) + } + path := mountPoint + "/waitforactivenodeandperfstandbys" + var standbys, actives int64 + errchan := make(chan error, len(cluster.Nodes())) + for i := range cluster.Nodes() { + go func(coreNo int) { + node := cluster.Nodes()[coreNo] + client := node.APIClient() + val := 1 + var err error + defer func() { + errchan <- err + }() + + var lastWAL uint64 + for ctx.Err() == nil { + _, err = leaderClient.Logical().WriteWithContext(ctx, path, map[string]interface{}{ + "bar": val, + }) + val++ + time.Sleep(250 * time.Millisecond) + if err != nil { + continue + } + var leader *api.LeaderResponse + leader, err = client.Sys().LeaderWithContext(ctx) + if err != nil { + logger.Trace("waiting for core", "core", coreNo, "err", err) + continue + } + switch { + case leader.IsSelf: + logger.Trace("waiting for core", "core", coreNo, "isLeader", true) + atomic.AddInt64(&actives, 1) + return + case leader.PerfStandby && leader.PerfStandbyLastRemoteWAL > 0: + switch { + case lastWAL == 0: + lastWAL = leader.PerfStandbyLastRemoteWAL + logger.Trace("waiting for core", "core", coreNo, "lastRemoteWAL", leader.PerfStandbyLastRemoteWAL, "lastWAL", lastWAL) + case lastWAL < leader.PerfStandbyLastRemoteWAL: + logger.Trace("waiting for core", "core", coreNo, "lastRemoteWAL", leader.PerfStandbyLastRemoteWAL, "lastWAL", lastWAL) + atomic.AddInt64(&standbys, 1) + return + } + default: + logger.Trace("waiting for core", "core", coreNo, + "ha_enabled", leader.HAEnabled, + "is_self", leader.IsSelf, + "perf_standby", leader.PerfStandby, + "perf_standby_remote_wal", leader.PerfStandbyLastRemoteWAL) + } + } + }(i) + } + + errs := make([]error, 0, len(cluster.Nodes())) + for range cluster.Nodes() { + errs = append(errs, <-errchan) + } + if actives != 1 || int(standbys) != expectedStandbys { + return fmt.Errorf("expected 1 active core and %d standbys, got %d active and %d standbys, errs: %v", + expectedStandbys, actives, standbys, errs) + } + + for ctx.Err() == nil { + err = leaderClient.Sys().UnmountWithContext(ctx, mountPoint) + if err == nil { + break + } + time.Sleep(time.Second) + } + if err != nil { + return fmt.Errorf("unable to unmount KV engine on primary") + } + return nil +} + +type GenerateRootKind int + +const ( + GenerateRootRegular GenerateRootKind = iota + GenerateRootDR + GenerateRecovery +) + +func GenerateRoot(cluster VaultCluster, kind GenerateRootKind) (string, error) { + // If recovery keys supported, use those to perform root token generation instead + keys := cluster.GetBarrierOrRecoveryKeys() + + client := cluster.Nodes()[0].APIClient() + + var err error + var status *api.GenerateRootStatusResponse + switch kind { + case GenerateRootRegular: + status, err = client.Sys().GenerateRootInit("", "") + case GenerateRootDR: + status, err = client.Sys().GenerateDROperationTokenInit("", "") + case GenerateRecovery: + status, err = client.Sys().GenerateRecoveryOperationTokenInit("", "") + } + if err != nil { + return "", err + } + + if status.Required > len(keys) { + return "", fmt.Errorf("need more keys than have, need %d have %d", status.Required, len(keys)) + } + + otp := status.OTP + + for i, key := range keys { + if i >= status.Required { + break + } + + strKey := base64.StdEncoding.EncodeToString(key) + switch kind { + case GenerateRootRegular: + status, err = client.Sys().GenerateRootUpdate(strKey, status.Nonce) + case GenerateRootDR: + status, err = client.Sys().GenerateDROperationTokenUpdate(strKey, status.Nonce) + case GenerateRecovery: + status, err = client.Sys().GenerateRecoveryOperationTokenUpdate(strKey, status.Nonce) + } + if err != nil { + return "", err + } + } + if !status.Complete { + return "", fmt.Errorf("generate root operation did not end successfully") + } + + tokenBytes, err := base64.RawStdEncoding.DecodeString(status.EncodedToken) + if err != nil { + return "", err + } + tokenBytes, err = xor.XORBytes(tokenBytes, []byte(otp)) + if err != nil { + return "", err + } + return string(tokenBytes), nil +} diff --git a/sdk/helper/testhelpers/output.go b/sdk/helper/testhelpers/output.go new file mode 100644 index 0000000..769a63a --- /dev/null +++ b/sdk/helper/testhelpers/output.go @@ -0,0 +1,81 @@ +package testhelpers + +import ( + "crypto/sha256" + "fmt" + "reflect" + + "github.com/mitchellh/go-testing-interface" + "github.com/mitchellh/mapstructure" +) + +// ToMap renders an input value of any type as a map. This is intended for +// logging human-readable data dumps in test logs, so it uses the `json` +// tags on struct fields: this makes it easy to exclude `"-"` values that +// are typically not interesting, respect omitempty, etc. +// +// We also replace any []byte fields with a hash of their value. +// This is usually sufficient for test log purposes, and is a lot more readable +// than a big array of individual byte values like Go would normally stringify a +// byte slice. +func ToMap(in any) (map[string]any, error) { + temp := make(map[string]any) + cfg := &mapstructure.DecoderConfig{ + TagName: "json", + IgnoreUntaggedFields: true, + Result: &temp, + } + md, err := mapstructure.NewDecoder(cfg) + if err != nil { + return nil, err + } + err = md.Decode(in) + if err != nil { + return nil, err + } + + // mapstructure doesn't call the DecodeHook for each field when doing + // struct->map conversions, but it does for map->map, so call it a second + // time to convert each []byte field. + out := make(map[string]any) + md2, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + Result: &out, + DecodeHook: func(from reflect.Type, to reflect.Type, data interface{}) (interface{}, error) { + if from.Kind() != reflect.Slice || from.Elem().Kind() != reflect.Uint8 { + return data, nil + } + b := data.([]byte) + return fmt.Sprintf("%x", sha256.Sum256(b)), nil + }, + }) + if err != nil { + return nil, err + } + err = md2.Decode(temp) + if err != nil { + return nil, err + } + + return out, nil +} + +// ToString renders its input using ToMap, and returns a string containing the +// result or an error if that fails. +func ToString(in any) string { + m, err := ToMap(in) + if err != nil { + return err.Error() + } + return fmt.Sprintf("%v", m) +} + +// StringOrDie renders its input using ToMap, and returns a string containing the +// result. If rendering yields an error, calls t.Fatal. +func StringOrDie(t testing.T, in any) string { + t.Helper() + m, err := ToMap(in) + if err != nil { + t.Fatal(err) + } + return fmt.Sprintf("%v", m) +} diff --git a/sdk/helper/testhelpers/output_test.go b/sdk/helper/testhelpers/output_test.go new file mode 100644 index 0000000..257d948 --- /dev/null +++ b/sdk/helper/testhelpers/output_test.go @@ -0,0 +1,45 @@ +package testhelpers + +import ( + "fmt" + "reflect" + "testing" +) + +func TestToMap(t *testing.T) { + type s struct { + A string `json:"a"` + B []byte `json:"b"` + C map[string]string `json:"c"` + D string `json:"-"` + } + type args struct { + in s + } + tests := []struct { + name string + args args + want string + wantErr bool + }{ + { + name: "basic", + args: args{s{A: "a", B: []byte("bytes"), C: map[string]string{"k": "v"}, D: "d"}}, + want: "map[a:a b:277089d91c0bdf4f2e6862ba7e4a07605119431f5d13f726dd352b06f1b206a9 c:map[k:v]]", + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m, err := ToMap(&tt.args.in) + if (err != nil) != tt.wantErr { + t.Errorf("ToMap() error = %v, wantErr %v", err, tt.wantErr) + return + } + got := fmt.Sprintf("%s", m) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("ToMap() got = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/sdk/helper/testhelpers/schema/response_validation.go b/sdk/helper/testhelpers/schema/response_validation.go new file mode 100644 index 0000000..430d175 --- /dev/null +++ b/sdk/helper/testhelpers/schema/response_validation.go @@ -0,0 +1,202 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + "testing" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +// ValidateResponse is a test helper that validates whether the given response +// object conforms to the response schema (schema.Fields). It cycles through +// the data map and validates conversions in the schema. In "strict" mode, this +// function will also ensure that the data map has all schema-required fields +// and does not have any fields outside of the schema. +func ValidateResponse(t *testing.T, schema *framework.Response, response *logical.Response, strict bool) { + t.Helper() + + if response != nil { + ValidateResponseData(t, schema, response.Data, strict) + } else { + ValidateResponseData(t, schema, nil, strict) + } +} + +// ValidateResponseData is a test helper that validates whether the given +// response data map conforms to the response schema (schema.Fields). It cycles +// through the data map and validates conversions in the schema. In "strict" +// mode, this function will also ensure that the data map has all schema's +// requred fields and does not have any fields outside of the schema. +func ValidateResponseData(t *testing.T, schema *framework.Response, data map[string]interface{}, strict bool) { + t.Helper() + + if err := validateResponseDataImpl( + schema, + data, + strict, + ); err != nil { + t.Fatalf("validation error: %v; response data: %#v", err, data) + } +} + +// validateResponseDataImpl is extracted so that it can be tested +func validateResponseDataImpl(schema *framework.Response, data map[string]interface{}, strict bool) error { + // nothing to validate + if schema == nil { + return nil + } + + // Certain responses may come through with non-2xx status codes. While + // these are not always errors (e.g. 3xx redirection codes), we don't + // consider them for the purposes of schema validation + if status, exists := data[logical.HTTPStatusCode]; exists { + s, ok := status.(int) + if ok && (s < 200 || s > 299) { + return nil + } + } + + // Marshal the data to JSON and back to convert the map's values into + // JSON strings expected by Validate() and ValidateStrict(). This is + // not efficient and is done for testing purposes only. + jsonBytes, err := json.Marshal(data) + if err != nil { + return fmt.Errorf("failed to convert input to json: %w", err) + } + + var dataWithStringValues map[string]interface{} + if err := json.Unmarshal( + jsonBytes, + &dataWithStringValues, + ); err != nil { + return fmt.Errorf("failed to unmashal data: %w", err) + } + + // these are special fields that will not show up in the final response and + // should be ignored + for _, field := range []string{ + logical.HTTPContentType, + logical.HTTPRawBody, + logical.HTTPStatusCode, + logical.HTTPRawBodyAlreadyJSONDecoded, + logical.HTTPCacheControlHeader, + logical.HTTPPragmaHeader, + logical.HTTPWWWAuthenticateHeader, + } { + delete(dataWithStringValues, field) + + if _, ok := schema.Fields[field]; ok { + return fmt.Errorf("encountered a reserved field in response schema: %s", field) + } + } + + // Validate + fd := framework.FieldData{ + Raw: dataWithStringValues, + Schema: schema.Fields, + } + + if strict { + return fd.ValidateStrict() + } + + return fd.Validate() +} + +// FindResponseSchema is a test helper to extract response schema from the +// given framework path / operation. +func FindResponseSchema(t *testing.T, paths []*framework.Path, pathIdx int, operation logical.Operation) *framework.Response { + t.Helper() + + if pathIdx >= len(paths) { + t.Fatalf("path index %d is out of range", pathIdx) + } + + schemaPath := paths[pathIdx] + + return GetResponseSchema(t, schemaPath, operation) +} + +func GetResponseSchema(t *testing.T, path *framework.Path, operation logical.Operation) *framework.Response { + t.Helper() + + schemaOperation, ok := path.Operations[operation] + if !ok { + t.Fatalf( + "could not find response schema: %s: %q operation does not exist", + path.Pattern, + operation, + ) + } + + var schemaResponses []framework.Response + + for _, status := range []int{ + http.StatusOK, // 200 + http.StatusAccepted, // 202 + http.StatusNoContent, // 204 + } { + schemaResponses, ok = schemaOperation.Properties().Responses[status] + if ok { + break + } + } + + if len(schemaResponses) == 0 { + t.Fatalf( + "could not find response schema: %s: %q operation: no responses found", + path.Pattern, + operation, + ) + } + + return &schemaResponses[0] +} + +// ResponseValidatingCallback can be used in setting up a [vault.TestCluster] +// that validates every response against the openapi specifications. +// +// [vault.TestCluster]: https://pkg.go.dev/github.com/hashicorp/vault/vault#TestCluster +func ResponseValidatingCallback(t *testing.T) func(logical.Backend, *logical.Request, *logical.Response) { + type PathRouter interface { + Route(string) *framework.Path + } + + return func(b logical.Backend, req *logical.Request, resp *logical.Response) { + t.Helper() + + if b == nil { + t.Fatalf("non-nil backend required") + } + + backend, ok := b.(PathRouter) + if !ok { + t.Fatalf("could not cast %T to have `Route(string) *framework.Path`", b) + } + + // The full request path includes the backend but when passing to the + // backend, we have to trim the mount point: + // `sys/mounts/secret` -> `mounts/secret` + // `auth/token/create` -> `create` + requestPath := strings.TrimPrefix(req.Path, req.MountPoint) + + route := backend.Route(requestPath) + if route == nil { + t.Fatalf("backend %T could not find a route for %s", b, req.Path) + } + + ValidateResponse( + t, + GetResponseSchema(t, route, req.Operation), + resp, + true, + ) + } +} diff --git a/sdk/helper/testhelpers/schema/response_validation_test.go b/sdk/helper/testhelpers/schema/response_validation_test.go new file mode 100644 index 0000000..4f4aa8b --- /dev/null +++ b/sdk/helper/testhelpers/schema/response_validation_test.go @@ -0,0 +1,359 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package schema + +import ( + "testing" + "time" + + "github.com/hashicorp/vault/sdk/framework" +) + +func TestValidateResponse(t *testing.T) { + cases := map[string]struct { + schema *framework.Response + response map[string]interface{} + strict bool + errorExpected bool + }{ + "nil schema, nil response, strict": { + schema: nil, + response: nil, + strict: true, + errorExpected: false, + }, + + "nil schema, nil response, not strict": { + schema: nil, + response: nil, + strict: false, + errorExpected: false, + }, + + "nil schema, good response, strict": { + schema: nil, + response: map[string]interface{}{ + "foo": "bar", + }, + strict: true, + errorExpected: false, + }, + + "nil schema, good response, not strict": { + schema: nil, + response: map[string]interface{}{ + "foo": "bar", + }, + strict: true, + errorExpected: false, + }, + + "nil schema fields, good response, strict": { + schema: &framework.Response{}, + response: map[string]interface{}{ + "foo": "bar", + }, + strict: true, + errorExpected: false, + }, + + "nil schema fields, good response, not strict": { + schema: &framework.Response{}, + response: map[string]interface{}{ + "foo": "bar", + }, + strict: true, + errorExpected: false, + }, + + "string schema field, string response, strict": { + schema: &framework.Response{ + Fields: map[string]*framework.FieldSchema{ + "foo": { + Type: framework.TypeString, + }, + }, + }, + response: map[string]interface{}{ + "foo": "bar", + }, + strict: true, + errorExpected: false, + }, + + "string schema field, string response, not strict": { + schema: &framework.Response{ + Fields: map[string]*framework.FieldSchema{ + "foo": { + Type: framework.TypeString, + }, + }, + }, + response: map[string]interface{}{ + "foo": "bar", + }, + strict: false, + errorExpected: false, + }, + + "string schema not required field, empty response, strict": { + schema: &framework.Response{ + Fields: map[string]*framework.FieldSchema{ + "foo": { + Type: framework.TypeString, + Required: false, + }, + }, + }, + response: map[string]interface{}{}, + strict: true, + errorExpected: false, + }, + + "string schema required field, empty response, strict": { + schema: &framework.Response{ + Fields: map[string]*framework.FieldSchema{ + "foo": { + Type: framework.TypeString, + Required: true, + }, + }, + }, + response: map[string]interface{}{}, + strict: true, + errorExpected: true, + }, + + "string schema required field, empty response, not strict": { + schema: &framework.Response{ + Fields: map[string]*framework.FieldSchema{ + "foo": { + Type: framework.TypeString, + Required: true, + }, + }, + }, + response: map[string]interface{}{}, + strict: false, + errorExpected: false, + }, + + "string schema required field, nil response, strict": { + schema: &framework.Response{ + Fields: map[string]*framework.FieldSchema{ + "foo": { + Type: framework.TypeString, + Required: true, + }, + }, + }, + response: nil, + strict: true, + errorExpected: true, + }, + + "string schema required field, nil response, not strict": { + schema: &framework.Response{ + Fields: map[string]*framework.FieldSchema{ + "foo": { + Type: framework.TypeString, + Required: true, + }, + }, + }, + response: nil, + strict: false, + errorExpected: false, + }, + + "empty schema, string response, strict": { + schema: &framework.Response{ + Fields: map[string]*framework.FieldSchema{}, + }, + response: map[string]interface{}{ + "foo": "bar", + }, + strict: true, + errorExpected: true, + }, + + "empty schema, string response, not strict": { + schema: &framework.Response{ + Fields: map[string]*framework.FieldSchema{}, + }, + response: map[string]interface{}{ + "foo": "bar", + }, + strict: false, + errorExpected: false, + }, + + "time schema, string response, strict": { + schema: &framework.Response{ + Fields: map[string]*framework.FieldSchema{ + "time": { + Type: framework.TypeTime, + Required: true, + }, + }, + }, + response: map[string]interface{}{ + "time": "2024-12-11T09:08:07Z", + }, + strict: true, + errorExpected: false, + }, + + "time schema, string response, not strict": { + schema: &framework.Response{ + Fields: map[string]*framework.FieldSchema{ + "time": { + Type: framework.TypeTime, + Required: true, + }, + }, + }, + response: map[string]interface{}{ + "time": "2024-12-11T09:08:07Z", + }, + strict: false, + errorExpected: false, + }, + + "time schema, time response, strict": { + schema: &framework.Response{ + Fields: map[string]*framework.FieldSchema{ + "time": { + Type: framework.TypeTime, + Required: true, + }, + }, + }, + response: map[string]interface{}{ + "time": time.Date(2024, 12, 11, 9, 8, 7, 0, time.UTC), + }, + strict: true, + errorExpected: false, + }, + + "time schema, time response, not strict": { + schema: &framework.Response{ + Fields: map[string]*framework.FieldSchema{ + "time": { + Type: framework.TypeTime, + Required: true, + }, + }, + }, + response: map[string]interface{}{ + "time": time.Date(2024, 12, 11, 9, 8, 7, 0, time.UTC), + }, + strict: false, + errorExpected: false, + }, + + "empty schema, response has http_raw_body, strict": { + schema: &framework.Response{ + Fields: map[string]*framework.FieldSchema{}, + }, + response: map[string]interface{}{ + "http_raw_body": "foo", + }, + strict: true, + errorExpected: false, + }, + + "empty schema, response has http_raw_body, not strict": { + schema: &framework.Response{ + Fields: map[string]*framework.FieldSchema{}, + }, + response: map[string]interface{}{ + "http_raw_body": "foo", + }, + strict: false, + errorExpected: false, + }, + + "string schema field, response has non-200 http_status_code, strict": { + schema: &framework.Response{ + Fields: map[string]*framework.FieldSchema{ + "foo": { + Type: framework.TypeString, + }, + }, + }, + response: map[string]interface{}{ + "http_status_code": 304, + }, + strict: true, + errorExpected: false, + }, + + "string schema field, response has non-200 http_status_code, not strict": { + schema: &framework.Response{ + Fields: map[string]*framework.FieldSchema{ + "foo": { + Type: framework.TypeString, + }, + }, + }, + response: map[string]interface{}{ + "http_status_code": 304, + }, + strict: false, + errorExpected: false, + }, + + "schema has http_raw_body, strict": { + schema: &framework.Response{ + Fields: map[string]*framework.FieldSchema{ + "http_raw_body": { + Type: framework.TypeString, + Required: false, + }, + }, + }, + response: map[string]interface{}{ + "http_raw_body": "foo", + }, + strict: true, + errorExpected: true, + }, + + "schema has http_raw_body, not strict": { + schema: &framework.Response{ + Fields: map[string]*framework.FieldSchema{ + "http_raw_body": { + Type: framework.TypeString, + Required: false, + }, + }, + }, + response: map[string]interface{}{ + "http_raw_body": "foo", + }, + strict: false, + errorExpected: true, + }, + } + + for name, tc := range cases { + name, tc := name, tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + err := validateResponseDataImpl( + tc.schema, + tc.response, + tc.strict, + ) + if err == nil && tc.errorExpected == true { + t.Fatalf("expected an error, got nil") + } + if err != nil && tc.errorExpected == false { + t.Fatalf("unexpected error: %v", err) + } + }) + } +} diff --git a/sdk/helper/tlsutil/tlsutil.go b/sdk/helper/tlsutil/tlsutil.go new file mode 100644 index 0000000..d91af36 --- /dev/null +++ b/sdk/helper/tlsutil/tlsutil.go @@ -0,0 +1,35 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// DEPRECATED: this has been moved to go-secure-stdlib and will be removed +package tlsutil + +import ( + "crypto/tls" + + exttlsutil "github.com/hashicorp/go-secure-stdlib/tlsutil" +) + +var ErrInvalidCertParams = exttlsutil.ErrInvalidCertParams + +var TLSLookup = exttlsutil.TLSLookup + +func ParseCiphers(cipherStr string) ([]uint16, error) { + return exttlsutil.ParseCiphers(cipherStr) +} + +func GetCipherName(cipher uint16) (string, error) { + return exttlsutil.GetCipherName(cipher) +} + +func ClientTLSConfig(caCert []byte, clientCert []byte, clientKey []byte) (*tls.Config, error) { + return exttlsutil.ClientTLSConfig(caCert, clientCert, clientKey) +} + +func LoadClientTLSConfig(caCert, clientCert, clientKey string) (*tls.Config, error) { + return exttlsutil.LoadClientTLSConfig(caCert, clientCert, clientKey) +} + +func SetupTLSConfig(conf map[string]string, address string) (*tls.Config, error) { + return exttlsutil.SetupTLSConfig(conf, address) +} diff --git a/sdk/helper/tokenutil/tokenutil.go b/sdk/helper/tokenutil/tokenutil.go new file mode 100644 index 0000000..4319bd1 --- /dev/null +++ b/sdk/helper/tokenutil/tokenutil.go @@ -0,0 +1,422 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tokenutil + +import ( + "errors" + "fmt" + "time" + + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/go-secure-stdlib/strutil" + sockaddr "github.com/hashicorp/go-sockaddr" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/policyutil" + "github.com/hashicorp/vault/sdk/logical" +) + +// TokenParams contains a set of common parameters that auth plugins can use +// for setting token behavior +type TokenParams struct { + // The set of CIDRs that tokens generated using this role will be bound to + TokenBoundCIDRs []*sockaddr.SockAddrMarshaler `json:"token_bound_cidrs"` + + // If set, the token entry will have an explicit maximum TTL set, rather + // than deferring to role/mount values + TokenExplicitMaxTTL time.Duration `json:"token_explicit_max_ttl" mapstructure:"token_explicit_max_ttl"` + + // The max TTL to use for the token + TokenMaxTTL time.Duration `json:"token_max_ttl" mapstructure:"token_max_ttl"` + + // If set, core will not automatically add default to the policy list + TokenNoDefaultPolicy bool `json:"token_no_default_policy" mapstructure:"token_no_default_policy"` + + // The maximum number of times a token issued from this role may be used. + TokenNumUses int `json:"token_num_uses" mapstructure:"token_num_uses"` + + // If non-zero, tokens created using this role will be able to be renewed + // forever, but will have a fixed renewal period of this value + TokenPeriod time.Duration `json:"token_period" mapstructure:"token_period"` + + // The policies to set + TokenPolicies []string `json:"token_policies" mapstructure:"token_policies"` + + // The type of token this role should issue + TokenType logical.TokenType `json:"token_type" mapstructure:"token_type"` + + // The TTL to user for the token + TokenTTL time.Duration `json:"token_ttl" mapstructure:"token_ttl"` +} + +// AddTokenFields adds fields to an existing role. It panics if it would +// overwrite an existing field. +func AddTokenFields(m map[string]*framework.FieldSchema) { + AddTokenFieldsWithAllowList(m, nil) +} + +// AddTokenFields adds fields to an existing role. It panics if it would +// overwrite an existing field. Allowed can be use to restrict the set, e.g. if +// there would be conflicts. +func AddTokenFieldsWithAllowList(m map[string]*framework.FieldSchema, allowed []string) { + r := TokenFields() + for k, v := range r { + if len(allowed) > 0 && !strutil.StrListContains(allowed, k) { + continue + } + if _, has := m[k]; has { + panic(fmt.Sprintf("adding role field %s would overwrite existing field", k)) + } + m[k] = v + } +} + +// TokenFields provides a set of field schemas for the parameters +func TokenFields() map[string]*framework.FieldSchema { + return map[string]*framework.FieldSchema{ + "token_bound_cidrs": { + Type: framework.TypeCommaStringSlice, + Description: `Comma separated string or JSON list of CIDR blocks. If set, specifies the blocks of IP addresses which are allowed to use the generated token.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Generated Token's Bound CIDRs", + Group: "Tokens", + Description: "A list of CIDR blocks. If set, specifies the blocks of IP addresses which are allowed to use the generated token.", + }, + }, + + "token_explicit_max_ttl": { + Type: framework.TypeDurationSecond, + Description: tokenExplicitMaxTTLHelp, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Generated Token's Explicit Maximum TTL", + Group: "Tokens", + }, + }, + + "token_max_ttl": { + Type: framework.TypeDurationSecond, + Description: "The maximum lifetime of the generated token", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Generated Token's Maximum TTL", + Group: "Tokens", + }, + }, + + "token_no_default_policy": { + Type: framework.TypeBool, + Description: "If true, the 'default' policy will not automatically be added to generated tokens", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Do Not Attach 'default' Policy To Generated Tokens", + Group: "Tokens", + }, + }, + + "token_period": { + Type: framework.TypeDurationSecond, + Description: tokenPeriodHelp, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Generated Token's Period", + Group: "Tokens", + }, + }, + + "token_policies": { + Type: framework.TypeCommaStringSlice, + Description: "Comma-separated list of policies", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Generated Token's Policies", + Group: "Tokens", + Description: "A list of policies that will apply to the generated token for this user.", + }, + }, + + "token_type": { + Type: framework.TypeString, + Default: "default-service", + Description: "The type of token to generate, service or batch", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Generated Token's Type", + Group: "Tokens", + }, + }, + + "token_ttl": { + Type: framework.TypeDurationSecond, + Description: "The initial ttl of the token to generate", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Generated Token's Initial TTL", + Group: "Tokens", + }, + }, + + "token_num_uses": { + Type: framework.TypeInt, + Description: "The maximum number of times a token may be used, a value of zero means unlimited", + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Maximum Uses of Generated Tokens", + Group: "Tokens", + }, + }, + } +} + +// ParseTokenFields provides common field parsing functionality into a TokenFields struct +func (t *TokenParams) ParseTokenFields(req *logical.Request, d *framework.FieldData) error { + if boundCIDRsRaw, ok := d.GetOk("token_bound_cidrs"); ok { + boundCIDRs, err := parseutil.ParseAddrs(boundCIDRsRaw.([]string)) + if err != nil { + return err + } + t.TokenBoundCIDRs = boundCIDRs + } + + if explicitMaxTTLRaw, ok := d.GetOk("token_explicit_max_ttl"); ok { + t.TokenExplicitMaxTTL = time.Duration(explicitMaxTTLRaw.(int)) * time.Second + } + + if maxTTLRaw, ok := d.GetOk("token_max_ttl"); ok { + t.TokenMaxTTL = time.Duration(maxTTLRaw.(int)) * time.Second + } + if t.TokenMaxTTL < 0 { + return errors.New("'token_max_ttl' cannot be negative") + } + + if noDefaultRaw, ok := d.GetOk("token_no_default_policy"); ok { + t.TokenNoDefaultPolicy = noDefaultRaw.(bool) + } + + if periodRaw, ok := d.GetOk("token_period"); ok { + t.TokenPeriod = time.Duration(periodRaw.(int)) * time.Second + } + if t.TokenPeriod < 0 { + return errors.New("'token_period' cannot be negative") + } + + if policiesRaw, ok := d.GetOk("token_policies"); ok { + t.TokenPolicies = policiesRaw.([]string) + } + + if tokenTypeRaw, ok := d.GetOk("token_type"); ok { + var tokenType logical.TokenType + tokenTypeStr := tokenTypeRaw.(string) + switch tokenTypeStr { + case "", "default": + tokenType = logical.TokenTypeDefault + case "service": + tokenType = logical.TokenTypeService + case "batch": + tokenType = logical.TokenTypeBatch + default: + return fmt.Errorf("invalid 'token_type' value %q", tokenTypeStr) + } + t.TokenType = tokenType + } + + if tokenNumUses, ok := d.GetOk("token_num_uses"); ok { + t.TokenNumUses = tokenNumUses.(int) + } + if t.TokenNumUses < 0 { + return errors.New("'token_num_uses' cannot be negative") + } + + if t.TokenType == logical.TokenTypeBatch || t.TokenType == logical.TokenTypeDefaultBatch { + if t.TokenPeriod != 0 { + return errors.New("'token_type' cannot be 'batch' or 'default_batch' when set to generate periodic tokens") + } + if t.TokenNumUses != 0 { + return errors.New("'token_type' cannot be 'batch' or 'default_batch' when set to generate tokens with limited use count") + } + } + + if ttlRaw, ok := d.GetOk("token_ttl"); ok { + t.TokenTTL = time.Duration(ttlRaw.(int)) * time.Second + } + if t.TokenTTL < 0 { + return errors.New("'token_ttl' cannot be negative") + } + if t.TokenTTL > 0 && t.TokenMaxTTL > 0 && t.TokenTTL > t.TokenMaxTTL { + return errors.New("'token_ttl' cannot be greater than 'token_max_ttl'") + } + + return nil +} + +// PopulateTokenData adds information from TokenParams into the map +func (t *TokenParams) PopulateTokenData(m map[string]interface{}) { + m["token_bound_cidrs"] = t.TokenBoundCIDRs + m["token_explicit_max_ttl"] = int64(t.TokenExplicitMaxTTL.Seconds()) + m["token_max_ttl"] = int64(t.TokenMaxTTL.Seconds()) + m["token_no_default_policy"] = t.TokenNoDefaultPolicy + m["token_period"] = int64(t.TokenPeriod.Seconds()) + m["token_policies"] = t.TokenPolicies + m["token_type"] = t.TokenType.String() + m["token_ttl"] = int64(t.TokenTTL.Seconds()) + m["token_num_uses"] = t.TokenNumUses + + if len(t.TokenPolicies) == 0 { + m["token_policies"] = []string{} + } + + if len(t.TokenBoundCIDRs) == 0 { + m["token_bound_cidrs"] = []string{} + } +} + +// PopulateTokenAuth populates Auth with parameters +func (t *TokenParams) PopulateTokenAuth(auth *logical.Auth) { + auth.BoundCIDRs = t.TokenBoundCIDRs + auth.ExplicitMaxTTL = t.TokenExplicitMaxTTL + auth.MaxTTL = t.TokenMaxTTL + auth.NoDefaultPolicy = t.TokenNoDefaultPolicy + auth.Period = t.TokenPeriod + auth.Policies = t.TokenPolicies + auth.Renewable = true + auth.TokenType = t.TokenType + auth.TTL = t.TokenTTL + auth.NumUses = t.TokenNumUses +} + +func DeprecationText(param string) string { + return fmt.Sprintf("Use %q instead. If this and %q are both specified, only %q will be used.", param, param, param) +} + +func upgradeDurationValue(d *framework.FieldData, oldKey, newKey string, oldVal, newVal *time.Duration) error { + _, ok := d.GetOk(newKey) + if !ok { + raw, ok := d.GetOk(oldKey) + if ok { + *oldVal = time.Duration(raw.(int)) * time.Second + *newVal = *oldVal + } + } else { + _, ok = d.GetOk(oldKey) + if ok { + *oldVal = *newVal + } else { + *oldVal = 0 + } + } + + return nil +} + +func upgradeIntValue(d *framework.FieldData, oldKey, newKey string, oldVal, newVal *int) error { + _, ok := d.GetOk(newKey) + if !ok { + raw, ok := d.GetOk(oldKey) + if ok { + *oldVal = raw.(int) + *newVal = *oldVal + } + } else { + _, ok = d.GetOk(oldKey) + if ok { + *oldVal = *newVal + } else { + *oldVal = 0 + } + } + + return nil +} + +func upgradeStringSliceValue(d *framework.FieldData, oldKey, newKey string, oldVal, newVal *[]string) error { + _, ok := d.GetOk(newKey) + if !ok { + raw, ok := d.GetOk(oldKey) + if ok { + // Special case: if we're looking at "token_policies" parse the policies + if newKey == "token_policies" { + *oldVal = policyutil.ParsePolicies(raw) + } else { + *oldVal = raw.([]string) + } + *newVal = *oldVal + } + } else { + _, ok = d.GetOk(oldKey) + if ok { + *oldVal = *newVal + } else { + *oldVal = nil + } + } + + return nil +} + +func upgradeSockAddrSliceValue(d *framework.FieldData, oldKey, newKey string, oldVal, newVal *[]*sockaddr.SockAddrMarshaler) error { + _, ok := d.GetOk(newKey) + if !ok { + raw, ok := d.GetOk(oldKey) + if ok { + boundCIDRs, err := parseutil.ParseAddrs(raw) + if err != nil { + return err + } + *oldVal = boundCIDRs + *newVal = *oldVal + } + } else { + _, ok = d.GetOk(oldKey) + if ok { + *oldVal = *newVal + } else { + *oldVal = nil + } + } + + return nil +} + +// UpgradeValue takes in old/new data keys and old/new values and calls out to +// a helper function to perform upgrades in a standardized way. It reqiures +// pointers in all cases so that we can set directly into the target struct. +func UpgradeValue(d *framework.FieldData, oldKey, newKey string, oldVal, newVal interface{}) error { + switch typedOldVal := oldVal.(type) { + case *time.Duration: + typedNewVal, ok := newVal.(*time.Duration) + if !ok { + return errors.New("mismatch in value types in tokenutil.UpgradeValue") + } + return upgradeDurationValue(d, oldKey, newKey, typedOldVal, typedNewVal) + + case *int: + typedNewVal, ok := newVal.(*int) + if !ok { + return errors.New("mismatch in value types in tokenutil.UpgradeValue") + } + return upgradeIntValue(d, oldKey, newKey, typedOldVal, typedNewVal) + + case *[]string: + typedNewVal, ok := newVal.(*[]string) + if !ok { + return errors.New("mismatch in value types in tokenutil.UpgradeValue") + } + return upgradeStringSliceValue(d, oldKey, newKey, typedOldVal, typedNewVal) + + case *[]*sockaddr.SockAddrMarshaler: + typedNewVal, ok := newVal.(*[]*sockaddr.SockAddrMarshaler) + if !ok { + return errors.New("mismatch in value types in tokenutil.UpgradeValue") + } + return upgradeSockAddrSliceValue(d, oldKey, newKey, typedOldVal, typedNewVal) + + default: + return errors.New("unhandled type in tokenutil.UpgradeValue") + } +} + +const ( + tokenPeriodHelp = `If set, tokens created via this role +will have no max lifetime; instead, their +renewal period will be fixed to this value. +This takes an integer number of seconds, +or a string duration (e.g. "24h").` + tokenExplicitMaxTTLHelp = `If set, tokens created via this role +carry an explicit maximum TTL. During renewal, +the current maximum TTL values of the role +and the mount are not checked for changes, +and any updates to these values will have +no effect on the token being renewed.` +) diff --git a/sdk/helper/useragent/useragent.go b/sdk/helper/useragent/useragent.go new file mode 100644 index 0000000..53569e9 --- /dev/null +++ b/sdk/helper/useragent/useragent.go @@ -0,0 +1,78 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package useragent + +import ( + "fmt" + "runtime" + "strings" + + "github.com/hashicorp/vault/sdk/logical" +) + +var ( + // projectURL is the project URL. + projectURL = "https://www.vaultproject.io/" + + // rt is the runtime - variable for tests. + rt = runtime.Version() +) + +// String returns the consistent user-agent string for Vault. +// Deprecated: use PluginString instead. +// +// Example output: +// +// Vault (+https://www.vaultproject.io/; go1.19.5) +// +// Given comments will be appended to the semicolon-delimited comment section: +// +// Vault (+https://www.vaultproject.io/; go1.19.5; comment-0; comment-1) +// +// At one point the user-agent string returned contained the Vault +// version hardcoded into the vault/sdk/version/ package. This worked for builtin +// plugins that are compiled into the `vault` binary, in that it correctly described +// the version of that Vault binary. It did not work for external plugins: for them, +// the version will be based on the version stored in the sdk based on the +// contents of the external plugin's go.mod. We've kept the String method around +// to avoid breaking builds, but you should be using PluginString. +func String(comments ...string) string { + c := append([]string{"+" + projectURL, rt}, comments...) + return fmt.Sprintf("Vault (%s)", strings.Join(c, "; ")) +} + +// PluginString is usable by plugins to return a user-agent string reflecting +// the running Vault version and an optional plugin name. +// +// e.g. Vault/0.10.4 (+https://www.vaultproject.io/; azure-auth; go1.10.1) +// +// Given comments will be appended to the semicolon-delimited comment section. +// +// e.g. Vault/0.10.4 (+https://www.vaultproject.io/; azure-auth; go1.10.1; comment-0; comment-1) +// +// Returns an empty string if the given env is nil. +func PluginString(env *logical.PluginEnvironment, pluginName string, comments ...string) string { + if env == nil { + return "" + } + + // Construct comments + c := []string{"+" + projectURL} + if pluginName != "" { + c = append(c, pluginName) + } + c = append(c, rt) + c = append(c, comments...) + + // Construct version string + v := env.VaultVersion + if env.VaultVersionPrerelease != "" { + v = fmt.Sprintf("%s-%s", v, env.VaultVersionPrerelease) + } + if env.VaultVersionMetadata != "" { + v = fmt.Sprintf("%s+%s", v, env.VaultVersionMetadata) + } + + return fmt.Sprintf("Vault/%s (%s)", v, strings.Join(c, "; ")) +} diff --git a/sdk/helper/useragent/useragent_test.go b/sdk/helper/useragent/useragent_test.go new file mode 100644 index 0000000..4677bb6 --- /dev/null +++ b/sdk/helper/useragent/useragent_test.go @@ -0,0 +1,179 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package useragent + +import ( + "testing" + + "github.com/hashicorp/vault/sdk/logical" +) + +func TestUserAgent(t *testing.T) { + projectURL = "https://vault-test.com" + rt = "go5.0" + + type args struct { + comments []string + } + tests := []struct { + name string + args args + want string + }{ + { + name: "User agent", + args: args{}, + want: "Vault (+https://vault-test.com; go5.0)", + }, + { + name: "User agent with additional comment", + args: args{ + comments: []string{"pid-abcdefg"}, + }, + want: "Vault (+https://vault-test.com; go5.0; pid-abcdefg)", + }, + { + name: "User agent with additional comments", + args: args{ + comments: []string{"pid-abcdefg", "cloud-provider"}, + }, + want: "Vault (+https://vault-test.com; go5.0; pid-abcdefg; cloud-provider)", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := String(tt.args.comments...); got != tt.want { + t.Errorf("String() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestUserAgentPlugin(t *testing.T) { + projectURL = "https://vault-test.com" + rt = "go5.0" + + type args struct { + pluginName string + pluginEnv *logical.PluginEnvironment + comments []string + } + tests := []struct { + name string + args args + want string + }{ + { + name: "Plugin user agent with nil plugin env", + args: args{ + pluginEnv: nil, + }, + want: "", + }, + { + name: "Plugin user agent without plugin name", + args: args{ + pluginEnv: &logical.PluginEnvironment{ + VaultVersion: "1.2.3", + }, + }, + want: "Vault/1.2.3 (+https://vault-test.com; go5.0)", + }, + { + name: "Plugin user agent without plugin name", + args: args{ + pluginEnv: &logical.PluginEnvironment{ + VaultVersion: "1.2.3", + }, + }, + want: "Vault/1.2.3 (+https://vault-test.com; go5.0)", + }, + { + name: "Plugin user agent with plugin name", + args: args{ + pluginName: "azure-auth", + pluginEnv: &logical.PluginEnvironment{ + VaultVersion: "1.2.3", + }, + }, + want: "Vault/1.2.3 (+https://vault-test.com; azure-auth; go5.0)", + }, + { + name: "Plugin user agent with plugin name and additional comment", + args: args{ + pluginName: "azure-auth", + pluginEnv: &logical.PluginEnvironment{ + VaultVersion: "1.2.3", + }, + comments: []string{"pid-abcdefg"}, + }, + want: "Vault/1.2.3 (+https://vault-test.com; azure-auth; go5.0; pid-abcdefg)", + }, + { + name: "Plugin user agent with plugin name and additional comments", + args: args{ + pluginName: "azure-auth", + pluginEnv: &logical.PluginEnvironment{ + VaultVersion: "1.2.3", + }, + comments: []string{"pid-abcdefg", "cloud-provider"}, + }, + want: "Vault/1.2.3 (+https://vault-test.com; azure-auth; go5.0; pid-abcdefg; cloud-provider)", + }, + { + name: "Plugin user agent with no plugin name and additional comments", + args: args{ + pluginEnv: &logical.PluginEnvironment{ + VaultVersion: "1.2.3", + }, + comments: []string{"pid-abcdefg", "cloud-provider"}, + }, + want: "Vault/1.2.3 (+https://vault-test.com; go5.0; pid-abcdefg; cloud-provider)", + }, + { + name: "Plugin user agent with version prerelease", + args: args{ + pluginName: "azure-auth", + pluginEnv: &logical.PluginEnvironment{ + VaultVersion: "1.2.3", + VaultVersionPrerelease: "dev", + }, + comments: []string{"pid-abcdefg", "cloud-provider"}, + }, + want: "Vault/1.2.3-dev (+https://vault-test.com; azure-auth; go5.0; pid-abcdefg; cloud-provider)", + }, + { + name: "Plugin user agent with version metadata", + args: args{ + pluginName: "azure-auth", + pluginEnv: &logical.PluginEnvironment{ + VaultVersion: "1.2.3", + VaultVersionMetadata: "ent", + }, + comments: []string{"pid-abcdefg", "cloud-provider"}, + }, + want: "Vault/1.2.3+ent (+https://vault-test.com; azure-auth; go5.0; pid-abcdefg; cloud-provider)", + }, + { + name: "Plugin user agent with version prerelease and metadata", + args: args{ + pluginName: "azure-auth", + pluginEnv: &logical.PluginEnvironment{ + VaultVersion: "1.2.3", + VaultVersionPrerelease: "dev", + VaultVersionMetadata: "ent", + }, + comments: []string{"pid-abcdefg", "cloud-provider"}, + }, + want: "Vault/1.2.3-dev+ent (+https://vault-test.com; azure-auth; go5.0; pid-abcdefg; cloud-provider)", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := PluginString(tt.args.pluginEnv, tt.args.pluginName, tt.args.comments...); got != tt.want { + t.Errorf("PluginString() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/sdk/helper/wrapping/wrapinfo.go b/sdk/helper/wrapping/wrapinfo.go new file mode 100644 index 0000000..03a7030 --- /dev/null +++ b/sdk/helper/wrapping/wrapinfo.go @@ -0,0 +1,40 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package wrapping + +import "time" + +type ResponseWrapInfo struct { + // Setting to non-zero specifies that the response should be wrapped. + // Specifies the desired TTL of the wrapping token. + TTL time.Duration `json:"ttl" structs:"ttl" mapstructure:"ttl" sentinel:""` + + // The token containing the wrapped response + Token string `json:"token" structs:"token" mapstructure:"token" sentinel:""` + + // The token accessor for the wrapped response token + Accessor string `json:"accessor" structs:"accessor" mapstructure:"accessor"` + + // The creation time. This can be used with the TTL to figure out an + // expected expiration. + CreationTime time.Time `json:"creation_time" structs:"creation_time" mapstructure:"creation_time" sentinel:""` + + // If the contained response is the output of a token or approle secret-id creation call, the + // created token's/secret-id's accessor will be accessible here + WrappedAccessor string `json:"wrapped_accessor" structs:"wrapped_accessor" mapstructure:"wrapped_accessor" sentinel:""` + + // WrappedEntityID is the entity identifier of the caller who initiated the + // wrapping request + WrappedEntityID string `json:"wrapped_entity_id" structs:"wrapped_entity_id" mapstructure:"wrapped_entity_id" sentinel:""` + + // The format to use. This doesn't get returned, it's only internal. + Format string `json:"format" structs:"format" mapstructure:"format" sentinel:""` + + // CreationPath is the original request path that was used to create + // the wrapped response. + CreationPath string `json:"creation_path" structs:"creation_path" mapstructure:"creation_path" sentinel:""` + + // Controls seal wrapping behavior downstream for specific use cases + SealWrap bool `json:"seal_wrap" structs:"seal_wrap" mapstructure:"seal_wrap" sentinel:""` +} diff --git a/sdk/helper/xor/xor.go b/sdk/helper/xor/xor.go new file mode 100644 index 0000000..098a673 --- /dev/null +++ b/sdk/helper/xor/xor.go @@ -0,0 +1,49 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package xor + +import ( + "encoding/base64" + "fmt" +) + +// XORBytes takes two byte slices and XORs them together, returning the final +// byte slice. It is an error to pass in two byte slices that do not have the +// same length. +func XORBytes(a, b []byte) ([]byte, error) { + if len(a) != len(b) { + return nil, fmt.Errorf("length of byte slices is not equivalent: %d != %d", len(a), len(b)) + } + + buf := make([]byte, len(a)) + + for i := range a { + buf[i] = a[i] ^ b[i] + } + + return buf, nil +} + +// XORBase64 takes two base64-encoded strings and XORs the decoded byte slices +// together, returning the final byte slice. It is an error to pass in two +// strings that do not have the same length to their base64-decoded byte slice. +func XORBase64(a, b string) ([]byte, error) { + aBytes, err := base64.StdEncoding.DecodeString(a) + if err != nil { + return nil, fmt.Errorf("error decoding first base64 value: %w", err) + } + if aBytes == nil || len(aBytes) == 0 { + return nil, fmt.Errorf("decoded first base64 value is nil or empty") + } + + bBytes, err := base64.StdEncoding.DecodeString(b) + if err != nil { + return nil, fmt.Errorf("error decoding second base64 value: %w", err) + } + if bBytes == nil || len(bBytes) == 0 { + return nil, fmt.Errorf("decoded second base64 value is nil or empty") + } + + return XORBytes(aBytes, bBytes) +} diff --git a/sdk/helper/xor/xor_test.go b/sdk/helper/xor/xor_test.go new file mode 100644 index 0000000..143345d --- /dev/null +++ b/sdk/helper/xor/xor_test.go @@ -0,0 +1,25 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package xor + +import ( + "encoding/base64" + "testing" +) + +const ( + tokenB64 = "ZGE0N2JiODkzYjhkMDYxYw==" + xorB64 = "iGiQYG9L0nIp+jRL5+Zk2w==" + expectedB64 = "7AmkVw0p6ksamAwv19BVuA==" +) + +func TestBase64XOR(t *testing.T) { + ret, err := XORBase64(tokenB64, xorB64) + if err != nil { + t.Fatal(err) + } + if res := base64.StdEncoding.EncodeToString(ret); res != expectedB64 { + t.Fatalf("bad: %s", res) + } +} diff --git a/sdk/logical/acme_billing.go b/sdk/logical/acme_billing.go new file mode 100644 index 0000000..6e4f6ef --- /dev/null +++ b/sdk/logical/acme_billing.go @@ -0,0 +1,10 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logical + +import "context" + +type ACMEBillingSystemView interface { + CreateActivityCountEventForIdentifiers(ctx context.Context, identifiers []string) error +} diff --git a/sdk/logical/audit.go b/sdk/logical/audit.go new file mode 100644 index 0000000..30c03e6 --- /dev/null +++ b/sdk/logical/audit.go @@ -0,0 +1,22 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logical + +type LogInput struct { + Type string + Auth *Auth + Request *Request + Response *Response + OuterErr error + NonHMACReqDataKeys []string + NonHMACRespDataKeys []string +} + +type MarshalOptions struct { + ValueHasher func(string) string +} + +type OptMarshaler interface { + MarshalJSONWithOptions(*MarshalOptions) ([]byte, error) +} diff --git a/sdk/logical/auth.go b/sdk/logical/auth.go new file mode 100644 index 0000000..83d9dac --- /dev/null +++ b/sdk/logical/auth.go @@ -0,0 +1,133 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logical + +import ( + "fmt" + "time" + + sockaddr "github.com/hashicorp/go-sockaddr" +) + +// Auth is the resulting authentication information that is part of +// Response for credential backends. It's also attached to Request objects and +// defines the authentication used for the request. This value is audit logged. +type Auth struct { + LeaseOptions + + // InternalData is JSON-encodable data that is stored with the auth struct. + // This will be sent back during a Renew/Revoke for storing internal data + // used for those operations. + InternalData map[string]interface{} `json:"internal_data" mapstructure:"internal_data" structs:"internal_data"` + + // DisplayName is a non-security sensitive identifier that is + // applicable to this Auth. It is used for logging and prefixing + // of dynamic secrets. For example, DisplayName may be "armon" for + // the github credential backend. If the client token is used to + // generate a SQL credential, the user may be "github-armon-uuid". + // This is to help identify the source without using audit tables. + DisplayName string `json:"display_name" mapstructure:"display_name" structs:"display_name"` + + // Policies is the list of policies that the authenticated user + // is associated with. + Policies []string `json:"policies" mapstructure:"policies" structs:"policies"` + + // TokenPolicies and IdentityPolicies break down the list in Policies to + // help determine where a policy was sourced + TokenPolicies []string `json:"token_policies" mapstructure:"token_policies" structs:"token_policies"` + IdentityPolicies []string `json:"identity_policies" mapstructure:"identity_policies" structs:"identity_policies"` + + // ExternalNamespacePolicies represent the policies authorized from + // different namespaces indexed by respective namespace identifiers + ExternalNamespacePolicies map[string][]string `json:"external_namespace_policies" mapstructure:"external_namespace_policies" structs:"external_namespace_policies"` + + // Indicates that the default policy should not be added by core when + // creating a token. The default policy will still be added if it's + // explicitly defined. + NoDefaultPolicy bool `json:"no_default_policy" mapstructure:"no_default_policy" structs:"no_default_policy"` + + // Metadata is used to attach arbitrary string-type metadata to + // an authenticated user. This metadata will be outputted into the + // audit log. + Metadata map[string]string `json:"metadata" mapstructure:"metadata" structs:"metadata"` + + // ClientToken is the token that is generated for the authentication. + // This will be filled in by Vault core when an auth structure is + // returned. Setting this manually will have no effect. + ClientToken string `json:"client_token" mapstructure:"client_token" structs:"client_token"` + + // Accessor is the identifier for the ClientToken. This can be used + // to perform management functionalities (especially revocation) when + // ClientToken in the audit logs are obfuscated. Accessor can be used + // to revoke a ClientToken and to lookup the capabilities of the ClientToken, + // both without actually knowing the ClientToken. + Accessor string `json:"accessor" mapstructure:"accessor" structs:"accessor"` + + // Period indicates that the token generated using this Auth object + // should never expire. The token should be renewed within the duration + // specified by this period. + Period time.Duration `json:"period" mapstructure:"period" structs:"period"` + + // ExplicitMaxTTL is the max TTL that constrains periodic tokens. For normal + // tokens, this value is constrained by the configured max ttl. + ExplicitMaxTTL time.Duration `json:"explicit_max_ttl" mapstructure:"explicit_max_ttl" structs:"explicit_max_ttl"` + + // Number of allowed uses of the issued token + NumUses int `json:"num_uses" mapstructure:"num_uses" structs:"num_uses"` + + // EntityID is the identifier of the entity in identity store to which the + // identity of the authenticating client belongs to. + EntityID string `json:"entity_id" mapstructure:"entity_id" structs:"entity_id"` + + // Alias is the information about the authenticated client returned by + // the auth backend + Alias *Alias `json:"alias" mapstructure:"alias" structs:"alias"` + + // GroupAliases are the informational mappings of external groups which an + // authenticated user belongs to. This is used to check if there are + // mappings groups for the group aliases in identity store. For all the + // matching groups, the entity ID of the user will be added. + GroupAliases []*Alias `json:"group_aliases" mapstructure:"group_aliases" structs:"group_aliases"` + + // The set of CIDRs that this token can be used with + BoundCIDRs []*sockaddr.SockAddrMarshaler `json:"bound_cidrs"` + + // CreationPath is a path that the backend can return to use in the lease. + // This is currently only supported for the token store where roles may + // change the perceived path of the lease, even though they don't change + // the request path itself. + CreationPath string `json:"creation_path"` + + // TokenType is the type of token being requested + TokenType TokenType `json:"token_type"` + + // Orphan is set if the token does not have a parent + Orphan bool `json:"orphan"` + + // PolicyResults is the set of policies that grant the token access to the + // requesting path. + PolicyResults *PolicyResults `json:"policy_results"` + + // MFARequirement + MFARequirement *MFARequirement `json:"mfa_requirement"` + + // EntityCreated is set to true if an entity is created as part of a login request + EntityCreated bool `json:"entity_created"` +} + +func (a *Auth) GoString() string { + return fmt.Sprintf("*%#v", *a) +} + +type PolicyResults struct { + Allowed bool `json:"allowed"` + GrantingPolicies []PolicyInfo `json:"granting_policies"` +} + +type PolicyInfo struct { + Name string `json:"name"` + NamespaceId string `json:"namespace_id"` + NamespacePath string `json:"namespace_path"` + Type string `json:"type"` +} diff --git a/sdk/logical/connection.go b/sdk/logical/connection.go new file mode 100644 index 0000000..e590e6f --- /dev/null +++ b/sdk/logical/connection.go @@ -0,0 +1,21 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logical + +import ( + "crypto/tls" +) + +// Connection represents the connection information for a request. This +// is present on the Request structure for credential backends. +type Connection struct { + // RemoteAddr is the network address that sent the request. + RemoteAddr string `json:"remote_addr"` + + // RemotePort is the network port that sent the request. + RemotePort int `json:"remote_port"` + + // ConnState is the TLS connection state if applicable. + ConnState *tls.ConnectionState `sentinel:""` +} diff --git a/sdk/logical/controlgroup.go b/sdk/logical/controlgroup.go new file mode 100644 index 0000000..e166f00 --- /dev/null +++ b/sdk/logical/controlgroup.go @@ -0,0 +1,20 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logical + +import ( + "time" +) + +type ControlGroup struct { + Authorizations []*Authz `json:"authorizations"` + RequestTime time.Time `json:"request_time"` + Approved bool `json:"approved"` + NamespaceID string `json:"namespace_id"` +} + +type Authz struct { + Token string `json:"token"` + AuthorizationTime time.Time `json:"authorization_time"` +} diff --git a/sdk/logical/error.go b/sdk/logical/error.go new file mode 100644 index 0000000..5605784 --- /dev/null +++ b/sdk/logical/error.go @@ -0,0 +1,125 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logical + +import "errors" + +var ( + // ErrUnsupportedOperation is returned if the operation is not supported + // by the logical backend. + ErrUnsupportedOperation = errors.New("unsupported operation") + + // ErrUnsupportedPath is returned if the path is not supported + // by the logical backend. + ErrUnsupportedPath = errors.New("unsupported path") + + // ErrInvalidRequest is returned if the request is invalid + ErrInvalidRequest = errors.New("invalid request") + + // ErrPermissionDenied is returned if the client is not authorized + ErrPermissionDenied = errors.New("permission denied") + + // ErrInvalidCredentials is returned when the provided credentials are incorrect + // This is used internally for user lockout purposes. This is not seen externally. + // The status code returned does not change because of this error + ErrInvalidCredentials = errors.New("invalid credentials") + + // ErrMultiAuthzPending is returned if the the request needs more + // authorizations + ErrMultiAuthzPending = errors.New("request needs further approval") + + // ErrUpstreamRateLimited is returned when Vault receives a rate limited + // response from an upstream + ErrUpstreamRateLimited = errors.New("upstream rate limited") + + // ErrPerfStandbyForward is returned when Vault is in a state such that a + // perf standby cannot satisfy a request + ErrPerfStandbyPleaseForward = errors.New("please forward to the active node") + + // ErrLeaseCountQuotaExceeded is returned when a request is rejected due to a lease + // count quota being exceeded. + ErrLeaseCountQuotaExceeded = errors.New("lease count quota exceeded") + + // ErrRateLimitQuotaExceeded is returned when a request is rejected due to a + // rate limit quota being exceeded. + ErrRateLimitQuotaExceeded = errors.New("rate limit quota exceeded") + + // ErrUnrecoverable is returned when a request fails due to something that + // is likely to require manual intervention. This is a generic form of an + // unrecoverable error. + // e.g.: misconfigured or disconnected storage backend. + ErrUnrecoverable = errors.New("unrecoverable error") + + // ErrMissingRequiredState is returned when a request can't be satisfied + // with the data in the local node's storage, based on the provided + // X-Vault-Index request header. + ErrMissingRequiredState = errors.New("required index state not present") + + // Error indicating that the requested path used to serve a purpose in older + // versions, but the functionality has now been removed + ErrPathFunctionalityRemoved = errors.New("functionality on this path has been removed") +) + +type HTTPCodedError interface { + Error() string + Code() int +} + +func CodedError(status int, msg string) HTTPCodedError { + return &codedError{ + Status: status, + Message: msg, + } +} + +var _ HTTPCodedError = (*codedError)(nil) + +type codedError struct { + Status int + Message string +} + +func (e *codedError) Error() string { + return e.Message +} + +func (e *codedError) Code() int { + return e.Status +} + +// Struct to identify user input errors. This is helpful in responding the +// appropriate status codes to clients from the HTTP endpoints. +type StatusBadRequest struct { + Err string +} + +// Implementing error interface +func (s *StatusBadRequest) Error() string { + return s.Err +} + +// This is a new type declared to not cause potential compatibility problems if +// the logic around the CodedError changes; in particular for logical request +// paths it is basically ignored, and changing that behavior might cause +// unforeseen issues. +type ReplicationCodedError struct { + Msg string + Code int +} + +func (r *ReplicationCodedError) Error() string { + return r.Msg +} + +type KeyNotFoundError struct { + Err error +} + +func (e *KeyNotFoundError) WrappedErrors() []error { + return []error{e.Err} +} + +func (e *KeyNotFoundError) Error() string { + return e.Err.Error() +} diff --git a/sdk/logical/event.pb.go b/sdk/logical/event.pb.go new file mode 100644 index 0000000..22e908d --- /dev/null +++ b/sdk/logical/event.pb.go @@ -0,0 +1,413 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.12 +// source: sdk/logical/event.proto + +package logical + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// EventPluginInfo contains data related to the plugin that generated an event. +type EventPluginInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The type of plugin this event originated from, i.e., "auth" or "secrets. + MountClass string `protobuf:"bytes,1,opt,name=mount_class,json=mountClass,proto3" json:"mount_class,omitempty"` + // Unique ID of the mount entry, e.g., "kv_957bb7d8" + MountAccessor string `protobuf:"bytes,2,opt,name=mount_accessor,json=mountAccessor,proto3" json:"mount_accessor,omitempty"` + // Mount path of the plugin this event originated from, e.g., "secret/" + MountPath string `protobuf:"bytes,3,opt,name=mount_path,json=mountPath,proto3" json:"mount_path,omitempty"` + // Plugin name that this event originated from, e.g., "kv" + Plugin string `protobuf:"bytes,4,opt,name=plugin,proto3" json:"plugin,omitempty"` + // Plugin version of the plugin this event originated from, e.g., "v0.13.3+builtin" + PluginVersion string `protobuf:"bytes,5,opt,name=plugin_version,json=pluginVersion,proto3" json:"plugin_version,omitempty"` + // Mount version that this event originated from, i.e., if KVv2, then "2". Usually empty. + Version string `protobuf:"bytes,6,opt,name=version,proto3" json:"version,omitempty"` +} + +func (x *EventPluginInfo) Reset() { + *x = EventPluginInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_logical_event_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EventPluginInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EventPluginInfo) ProtoMessage() {} + +func (x *EventPluginInfo) ProtoReflect() protoreflect.Message { + mi := &file_sdk_logical_event_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EventPluginInfo.ProtoReflect.Descriptor instead. +func (*EventPluginInfo) Descriptor() ([]byte, []int) { + return file_sdk_logical_event_proto_rawDescGZIP(), []int{0} +} + +func (x *EventPluginInfo) GetMountClass() string { + if x != nil { + return x.MountClass + } + return "" +} + +func (x *EventPluginInfo) GetMountAccessor() string { + if x != nil { + return x.MountAccessor + } + return "" +} + +func (x *EventPluginInfo) GetMountPath() string { + if x != nil { + return x.MountPath + } + return "" +} + +func (x *EventPluginInfo) GetPlugin() string { + if x != nil { + return x.Plugin + } + return "" +} + +func (x *EventPluginInfo) GetPluginVersion() string { + if x != nil { + return x.PluginVersion + } + return "" +} + +func (x *EventPluginInfo) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +// EventData contains event data in a CloudEvents container. +type EventData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // ID identifies the event. It is required. The combination of + // CloudEvents Source (i.e., Vault cluster) + ID must be unique. + // Events with the same Source + ID can be assumed to be duplicates + // by consumers. + // Be careful when setting this manually that the ID contains enough + // entropy to be unique, or possibly that it is idempotent, such + // as a hash of other fields with sufficient uniqueness. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Arbitrary non-secret data. Optional. + Metadata *structpb.Struct `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` + // Any IDs that the event relates to, i.e., UUIDs, paths. + EntityIds []string `protobuf:"bytes,3,rep,name=entity_ids,json=entityIds,proto3" json:"entity_ids,omitempty"` + // Human-readable note. + Note string `protobuf:"bytes,4,opt,name=note,proto3" json:"note,omitempty"` +} + +func (x *EventData) Reset() { + *x = EventData{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_logical_event_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EventData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EventData) ProtoMessage() {} + +func (x *EventData) ProtoReflect() protoreflect.Message { + mi := &file_sdk_logical_event_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EventData.ProtoReflect.Descriptor instead. +func (*EventData) Descriptor() ([]byte, []int) { + return file_sdk_logical_event_proto_rawDescGZIP(), []int{1} +} + +func (x *EventData) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *EventData) GetMetadata() *structpb.Struct { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *EventData) GetEntityIds() []string { + if x != nil { + return x.EntityIds + } + return nil +} + +func (x *EventData) GetNote() string { + if x != nil { + return x.Note + } + return "" +} + +// EventReceived is used to consume events and includes additional metadata regarding +// the event type and plugin information. +type EventReceived struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Event *EventData `protobuf:"bytes,1,opt,name=event,proto3" json:"event,omitempty"` + // namespace path + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + EventType string `protobuf:"bytes,3,opt,name=event_type,json=eventType,proto3" json:"event_type,omitempty"` + PluginInfo *EventPluginInfo `protobuf:"bytes,4,opt,name=plugin_info,json=pluginInfo,proto3" json:"plugin_info,omitempty"` +} + +func (x *EventReceived) Reset() { + *x = EventReceived{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_logical_event_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EventReceived) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EventReceived) ProtoMessage() {} + +func (x *EventReceived) ProtoReflect() protoreflect.Message { + mi := &file_sdk_logical_event_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EventReceived.ProtoReflect.Descriptor instead. +func (*EventReceived) Descriptor() ([]byte, []int) { + return file_sdk_logical_event_proto_rawDescGZIP(), []int{2} +} + +func (x *EventReceived) GetEvent() *EventData { + if x != nil { + return x.Event + } + return nil +} + +func (x *EventReceived) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *EventReceived) GetEventType() string { + if x != nil { + return x.EventType + } + return "" +} + +func (x *EventReceived) GetPluginInfo() *EventPluginInfo { + if x != nil { + return x.PluginInfo + } + return nil +} + +var File_sdk_logical_event_proto protoreflect.FileDescriptor + +var file_sdk_logical_event_proto_rawDesc = []byte{ + 0x0a, 0x17, 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2f, 0x65, 0x76, + 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x6c, 0x6f, 0x67, 0x69, 0x63, + 0x61, 0x6c, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0xd1, 0x01, 0x0a, 0x0f, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x63, 0x6c, + 0x61, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6d, 0x6f, 0x75, 0x6e, 0x74, + 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6d, + 0x6f, 0x75, 0x6e, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, + 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x50, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x5f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x83, 0x01, 0x0a, 0x09, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x44, 0x61, + 0x74, 0x61, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, + 0x69, 0x64, 0x12, 0x33, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x08, 0x6d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x65, 0x6e, 0x74, + 0x69, 0x74, 0x79, 0x49, 0x64, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x6f, 0x74, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x6f, 0x74, 0x65, 0x22, 0xb1, 0x01, 0x0a, 0x0d, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x12, 0x28, 0x0a, 0x05, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6c, 0x6f, + 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x44, 0x61, 0x74, 0x61, 0x52, + 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x0b, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x5f, 0x69, 0x6e, + 0x66, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, + 0x61, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, + 0x66, 0x6f, 0x52, 0x0a, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x28, + 0x5a, 0x26, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, + 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x73, 0x64, 0x6b, + 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_sdk_logical_event_proto_rawDescOnce sync.Once + file_sdk_logical_event_proto_rawDescData = file_sdk_logical_event_proto_rawDesc +) + +func file_sdk_logical_event_proto_rawDescGZIP() []byte { + file_sdk_logical_event_proto_rawDescOnce.Do(func() { + file_sdk_logical_event_proto_rawDescData = protoimpl.X.CompressGZIP(file_sdk_logical_event_proto_rawDescData) + }) + return file_sdk_logical_event_proto_rawDescData +} + +var file_sdk_logical_event_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_sdk_logical_event_proto_goTypes = []interface{}{ + (*EventPluginInfo)(nil), // 0: logical.EventPluginInfo + (*EventData)(nil), // 1: logical.EventData + (*EventReceived)(nil), // 2: logical.EventReceived + (*structpb.Struct)(nil), // 3: google.protobuf.Struct +} +var file_sdk_logical_event_proto_depIdxs = []int32{ + 3, // 0: logical.EventData.metadata:type_name -> google.protobuf.Struct + 1, // 1: logical.EventReceived.event:type_name -> logical.EventData + 0, // 2: logical.EventReceived.plugin_info:type_name -> logical.EventPluginInfo + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_sdk_logical_event_proto_init() } +func file_sdk_logical_event_proto_init() { + if File_sdk_logical_event_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_sdk_logical_event_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EventPluginInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_logical_event_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EventData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_logical_event_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EventReceived); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_sdk_logical_event_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_sdk_logical_event_proto_goTypes, + DependencyIndexes: file_sdk_logical_event_proto_depIdxs, + MessageInfos: file_sdk_logical_event_proto_msgTypes, + }.Build() + File_sdk_logical_event_proto = out.File + file_sdk_logical_event_proto_rawDesc = nil + file_sdk_logical_event_proto_goTypes = nil + file_sdk_logical_event_proto_depIdxs = nil +} diff --git a/sdk/logical/event.proto b/sdk/logical/event.proto new file mode 100644 index 0000000..6e36e5e --- /dev/null +++ b/sdk/logical/event.proto @@ -0,0 +1,54 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +syntax = "proto3"; + +option go_package = "github.com/hashicorp/vault/sdk/logical"; + +package logical; + +import "google/protobuf/struct.proto"; + +// EventPluginInfo contains data related to the plugin that generated an event. +message EventPluginInfo { + // The type of plugin this event originated from, i.e., "auth" or "secrets. + string mount_class = 1; + // Unique ID of the mount entry, e.g., "kv_957bb7d8" + string mount_accessor = 2; + // Mount path of the plugin this event originated from, e.g., "secret/" + string mount_path = 3; + // Plugin name that this event originated from, e.g., "kv" + string plugin = 4; + // Plugin version of the plugin this event originated from, e.g., "v0.13.3+builtin" + string plugin_version = 5; + // Mount version that this event originated from, i.e., if KVv2, then "2". Usually empty. + string version = 6; +} + +// EventData contains event data in a CloudEvents container. +message EventData { + // ID identifies the event. It is required. The combination of + // CloudEvents Source (i.e., Vault cluster) + ID must be unique. + // Events with the same Source + ID can be assumed to be duplicates + // by consumers. + // Be careful when setting this manually that the ID contains enough + // entropy to be unique, or possibly that it is idempotent, such + // as a hash of other fields with sufficient uniqueness. + string id = 1; + // Arbitrary non-secret data. Optional. + google.protobuf.Struct metadata = 2; + // Any IDs that the event relates to, i.e., UUIDs, paths. + repeated string entity_ids = 3; + // Human-readable note. + string note = 4; +} + +// EventReceived is used to consume events and includes additional metadata regarding +// the event type and plugin information. +message EventReceived { + EventData event = 1; + // namespace path + string namespace = 2; + string event_type = 3; + EventPluginInfo plugin_info = 4; +} diff --git a/sdk/logical/events.go b/sdk/logical/events.go new file mode 100644 index 0000000..cbd3f73 --- /dev/null +++ b/sdk/logical/events.go @@ -0,0 +1,34 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logical + +import ( + "context" + + "github.com/hashicorp/go-uuid" +) + +// ID is an alias to GetId() for CloudEvents compatibility. +func (x *EventReceived) ID() string { + return x.Event.GetId() +} + +// NewEvent returns an event with a new, random EID. +func NewEvent() (*EventData, error) { + id, err := uuid.GenerateUUID() + if err != nil { + return nil, err + } + return &EventData{ + Id: id, + }, nil +} + +// EventType represents a topic, and is a wrapper around eventlogger.EventType. +type EventType string + +// EventSender sends events to the common event bus. +type EventSender interface { + Send(ctx context.Context, eventType EventType, event *EventData) error +} diff --git a/sdk/logical/identity.pb.go b/sdk/logical/identity.pb.go new file mode 100644 index 0000000..fedc5f5 --- /dev/null +++ b/sdk/logical/identity.pb.go @@ -0,0 +1,721 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.12 +// source: sdk/logical/identity.proto + +package logical + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Entity struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // ID is the unique identifier for the entity + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + // Name is the human-friendly unique identifier for the entity + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Aliases contains thhe alias mappings for the given entity + Aliases []*Alias `protobuf:"bytes,3,rep,name=aliases,proto3" json:"aliases,omitempty"` + // Metadata represents the custom data tied to this entity + Metadata map[string]string `protobuf:"bytes,4,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Disabled is true if the entity is disabled. + Disabled bool `protobuf:"varint,5,opt,name=disabled,proto3" json:"disabled,omitempty"` + // NamespaceID is the identifier of the namespace to which this entity + // belongs to. + NamespaceID string `protobuf:"bytes,6,opt,name=namespace_id,json=namespaceID,proto3" json:"namespace_id,omitempty"` +} + +func (x *Entity) Reset() { + *x = Entity{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_logical_identity_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Entity) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Entity) ProtoMessage() {} + +func (x *Entity) ProtoReflect() protoreflect.Message { + mi := &file_sdk_logical_identity_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Entity.ProtoReflect.Descriptor instead. +func (*Entity) Descriptor() ([]byte, []int) { + return file_sdk_logical_identity_proto_rawDescGZIP(), []int{0} +} + +func (x *Entity) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *Entity) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Entity) GetAliases() []*Alias { + if x != nil { + return x.Aliases + } + return nil +} + +func (x *Entity) GetMetadata() map[string]string { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *Entity) GetDisabled() bool { + if x != nil { + return x.Disabled + } + return false +} + +func (x *Entity) GetNamespaceID() string { + if x != nil { + return x.NamespaceID + } + return "" +} + +type Alias struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // MountType is the backend mount's type to which this identity belongs + MountType string `protobuf:"bytes,1,opt,name=mount_type,json=mountType,proto3" json:"mount_type,omitempty"` + // MountAccessor is the identifier of the mount entry to which this + // identity belongs + MountAccessor string `protobuf:"bytes,2,opt,name=mount_accessor,json=mountAccessor,proto3" json:"mount_accessor,omitempty"` + // Name is the identifier of this identity in its authentication source + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // Metadata represents the custom data tied to this alias. Fields added + // to it should have a low rate of change (or no change) because each + // change incurs a storage write, so quickly-changing fields can have + // a significant performance impact at scale. See the SDK's + // "aliasmetadata" package for a helper that eases and standardizes + // using this safely. + Metadata map[string]string `protobuf:"bytes,4,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // ID is the unique identifier for the alias + ID string `protobuf:"bytes,5,opt,name=ID,proto3" json:"ID,omitempty"` + // NamespaceID is the identifier of the namespace to which this alias + // belongs. + NamespaceID string `protobuf:"bytes,6,opt,name=namespace_id,json=namespaceID,proto3" json:"namespace_id,omitempty"` + // Custom Metadata represents the custom data tied to this alias + CustomMetadata map[string]string `protobuf:"bytes,7,rep,name=custom_metadata,json=customMetadata,proto3" json:"custom_metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Local indicates if the alias only belongs to the cluster where it was + // created. If true, the alias will be stored in a location that are ignored + // by the performance replication subsystem. + Local bool `protobuf:"varint,8,opt,name=local,proto3" json:"local,omitempty"` +} + +func (x *Alias) Reset() { + *x = Alias{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_logical_identity_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Alias) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Alias) ProtoMessage() {} + +func (x *Alias) ProtoReflect() protoreflect.Message { + mi := &file_sdk_logical_identity_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Alias.ProtoReflect.Descriptor instead. +func (*Alias) Descriptor() ([]byte, []int) { + return file_sdk_logical_identity_proto_rawDescGZIP(), []int{1} +} + +func (x *Alias) GetMountType() string { + if x != nil { + return x.MountType + } + return "" +} + +func (x *Alias) GetMountAccessor() string { + if x != nil { + return x.MountAccessor + } + return "" +} + +func (x *Alias) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Alias) GetMetadata() map[string]string { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *Alias) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *Alias) GetNamespaceID() string { + if x != nil { + return x.NamespaceID + } + return "" +} + +func (x *Alias) GetCustomMetadata() map[string]string { + if x != nil { + return x.CustomMetadata + } + return nil +} + +func (x *Alias) GetLocal() bool { + if x != nil { + return x.Local + } + return false +} + +type Group struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // ID is the unique identifier for the group + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + // Name is the human-friendly unique identifier for the group + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Metadata represents the custom data tied to this group + Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // NamespaceID is the identifier of the namespace to which this group + // belongs to. + NamespaceID string `protobuf:"bytes,4,opt,name=namespace_id,json=namespaceID,proto3" json:"namespace_id,omitempty"` +} + +func (x *Group) Reset() { + *x = Group{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_logical_identity_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Group) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Group) ProtoMessage() {} + +func (x *Group) ProtoReflect() protoreflect.Message { + mi := &file_sdk_logical_identity_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Group.ProtoReflect.Descriptor instead. +func (*Group) Descriptor() ([]byte, []int) { + return file_sdk_logical_identity_proto_rawDescGZIP(), []int{2} +} + +func (x *Group) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *Group) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Group) GetMetadata() map[string]string { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *Group) GetNamespaceID() string { + if x != nil { + return x.NamespaceID + } + return "" +} + +type MFAMethodID struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + UsesPasscode bool `protobuf:"varint,3,opt,name=uses_passcode,json=usesPasscode,proto3" json:"uses_passcode,omitempty"` + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *MFAMethodID) Reset() { + *x = MFAMethodID{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_logical_identity_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MFAMethodID) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MFAMethodID) ProtoMessage() {} + +func (x *MFAMethodID) ProtoReflect() protoreflect.Message { + mi := &file_sdk_logical_identity_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MFAMethodID.ProtoReflect.Descriptor instead. +func (*MFAMethodID) Descriptor() ([]byte, []int) { + return file_sdk_logical_identity_proto_rawDescGZIP(), []int{3} +} + +func (x *MFAMethodID) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *MFAMethodID) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *MFAMethodID) GetUsesPasscode() bool { + if x != nil { + return x.UsesPasscode + } + return false +} + +func (x *MFAMethodID) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +type MFAConstraintAny struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Any []*MFAMethodID `protobuf:"bytes,1,rep,name=any,proto3" json:"any,omitempty"` +} + +func (x *MFAConstraintAny) Reset() { + *x = MFAConstraintAny{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_logical_identity_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MFAConstraintAny) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MFAConstraintAny) ProtoMessage() {} + +func (x *MFAConstraintAny) ProtoReflect() protoreflect.Message { + mi := &file_sdk_logical_identity_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MFAConstraintAny.ProtoReflect.Descriptor instead. +func (*MFAConstraintAny) Descriptor() ([]byte, []int) { + return file_sdk_logical_identity_proto_rawDescGZIP(), []int{4} +} + +func (x *MFAConstraintAny) GetAny() []*MFAMethodID { + if x != nil { + return x.Any + } + return nil +} + +type MFARequirement struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MFARequestID string `protobuf:"bytes,1,opt,name=mfa_request_id,json=mfaRequestId,proto3" json:"mfa_request_id,omitempty"` + MFAConstraints map[string]*MFAConstraintAny `protobuf:"bytes,2,rep,name=mfa_constraints,json=mfaConstraints,proto3" json:"mfa_constraints,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *MFARequirement) Reset() { + *x = MFARequirement{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_logical_identity_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MFARequirement) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MFARequirement) ProtoMessage() {} + +func (x *MFARequirement) ProtoReflect() protoreflect.Message { + mi := &file_sdk_logical_identity_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MFARequirement.ProtoReflect.Descriptor instead. +func (*MFARequirement) Descriptor() ([]byte, []int) { + return file_sdk_logical_identity_proto_rawDescGZIP(), []int{5} +} + +func (x *MFARequirement) GetMFARequestID() string { + if x != nil { + return x.MFARequestID + } + return "" +} + +func (x *MFARequirement) GetMFAConstraints() map[string]*MFAConstraintAny { + if x != nil { + return x.MFAConstraints + } + return nil +} + +var File_sdk_logical_identity_proto protoreflect.FileDescriptor + +var file_sdk_logical_identity_proto_rawDesc = []byte{ + 0x0a, 0x1a, 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2f, 0x69, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x6c, 0x6f, + 0x67, 0x69, 0x63, 0x61, 0x6c, 0x22, 0x8d, 0x02, 0x0a, 0x06, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, + 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x39, + 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x1d, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x69, 0x73, + 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x69, 0x73, + 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb1, 0x03, 0x0a, 0x05, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, + 0x1d, 0x0a, 0x0a, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, + 0x0a, 0x0e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x41, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x08, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6c, 0x6f, + 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x2e, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x02, 0x49, 0x44, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x4b, 0x0a, 0x0f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x22, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x2e, + 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x05, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x41, 0x0a, 0x13, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc5, 0x01, 0x0a, 0x05, 0x47, 0x72, + 0x6f, 0x75, 0x70, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x02, 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6c, 0x6f, 0x67, 0x69, + 0x63, 0x61, 0x6c, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, + 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x49, 0x64, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0x6a, 0x0a, 0x0b, 0x4d, 0x46, 0x41, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x49, 0x44, + 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x02, 0x69, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x73, 0x65, 0x73, 0x5f, 0x70, 0x61, 0x73, + 0x73, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x75, 0x73, 0x65, + 0x73, 0x50, 0x61, 0x73, 0x73, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x3a, 0x0a, + 0x10, 0x4d, 0x46, 0x41, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x41, 0x6e, + 0x79, 0x12, 0x26, 0x0a, 0x03, 0x61, 0x6e, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, + 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x4d, 0x46, 0x41, 0x4d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x49, 0x44, 0x52, 0x03, 0x61, 0x6e, 0x79, 0x22, 0xea, 0x01, 0x0a, 0x0e, 0x4d, 0x46, + 0x41, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0e, + 0x6d, 0x66, 0x61, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6d, 0x66, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x49, 0x64, 0x12, 0x54, 0x0a, 0x0f, 0x6d, 0x66, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x72, + 0x61, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6c, 0x6f, + 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x4d, 0x46, 0x41, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x66, 0x61, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, + 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x6d, 0x66, 0x61, 0x43, 0x6f, 0x6e, + 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x1a, 0x5c, 0x0a, 0x13, 0x4d, 0x66, 0x61, 0x43, + 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x2f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x4d, 0x46, 0x41, 0x43, 0x6f, + 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x28, 0x5a, 0x26, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, + 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_sdk_logical_identity_proto_rawDescOnce sync.Once + file_sdk_logical_identity_proto_rawDescData = file_sdk_logical_identity_proto_rawDesc +) + +func file_sdk_logical_identity_proto_rawDescGZIP() []byte { + file_sdk_logical_identity_proto_rawDescOnce.Do(func() { + file_sdk_logical_identity_proto_rawDescData = protoimpl.X.CompressGZIP(file_sdk_logical_identity_proto_rawDescData) + }) + return file_sdk_logical_identity_proto_rawDescData +} + +var file_sdk_logical_identity_proto_msgTypes = make([]protoimpl.MessageInfo, 11) +var file_sdk_logical_identity_proto_goTypes = []interface{}{ + (*Entity)(nil), // 0: logical.Entity + (*Alias)(nil), // 1: logical.Alias + (*Group)(nil), // 2: logical.Group + (*MFAMethodID)(nil), // 3: logical.MFAMethodID + (*MFAConstraintAny)(nil), // 4: logical.MFAConstraintAny + (*MFARequirement)(nil), // 5: logical.MFARequirement + nil, // 6: logical.Entity.MetadataEntry + nil, // 7: logical.Alias.MetadataEntry + nil, // 8: logical.Alias.CustomMetadataEntry + nil, // 9: logical.Group.MetadataEntry + nil, // 10: logical.MFARequirement.MFAConstraintsEntry +} +var file_sdk_logical_identity_proto_depIDxs = []int32{ + 1, // 0: logical.Entity.aliases:type_name -> logical.Alias + 6, // 1: logical.Entity.metadata:type_name -> logical.Entity.MetadataEntry + 7, // 2: logical.Alias.metadata:type_name -> logical.Alias.MetadataEntry + 8, // 3: logical.Alias.custom_metadata:type_name -> logical.Alias.CustomMetadataEntry + 9, // 4: logical.Group.metadata:type_name -> logical.Group.MetadataEntry + 3, // 5: logical.MFAConstraintAny.any:type_name -> logical.MFAMethodID + 10, // 6: logical.MFARequirement.mfa_constraints:type_name -> logical.MFARequirement.MFAConstraintsEntry + 4, // 7: logical.MFARequirement.MFAConstraintsEntry.value:type_name -> logical.MFAConstraintAny + 8, // [8:8] is the sub-list for method output_type + 8, // [8:8] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name +} + +func init() { file_sdk_logical_identity_proto_init() } +func file_sdk_logical_identity_proto_init() { + if File_sdk_logical_identity_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_sdk_logical_identity_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Entity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_logical_identity_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Alias); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_logical_identity_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Group); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_logical_identity_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MFAMethodID); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_logical_identity_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MFAConstraintAny); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_logical_identity_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MFARequirement); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_sdk_logical_identity_proto_rawDesc, + NumEnums: 0, + NumMessages: 11, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_sdk_logical_identity_proto_goTypes, + DependencyIndexes: file_sdk_logical_identity_proto_depIDxs, + MessageInfos: file_sdk_logical_identity_proto_msgTypes, + }.Build() + File_sdk_logical_identity_proto = out.File + file_sdk_logical_identity_proto_rawDesc = nil + file_sdk_logical_identity_proto_goTypes = nil + file_sdk_logical_identity_proto_depIDxs = nil +} diff --git a/sdk/logical/identity.proto b/sdk/logical/identity.proto new file mode 100644 index 0000000..4a1f341 --- /dev/null +++ b/sdk/logical/identity.proto @@ -0,0 +1,95 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +syntax = "proto3"; + +option go_package = "github.com/hashicorp/vault/sdk/logical"; + +package logical; + +message Entity { + // ID is the unique identifier for the entity + string ID = 1; + + // Name is the human-friendly unique identifier for the entity + string name = 2; + + // Aliases contains thhe alias mappings for the given entity + repeated Alias aliases = 3; + + // Metadata represents the custom data tied to this entity + map metadata = 4; + + // Disabled is true if the entity is disabled. + bool disabled = 5; + + // NamespaceID is the identifier of the namespace to which this entity + // belongs to. + string namespace_id = 6; +} + +message Alias { + // MountType is the backend mount's type to which this identity belongs + string mount_type = 1; + + // MountAccessor is the identifier of the mount entry to which this + // identity belongs + string mount_accessor = 2; + + // Name is the identifier of this identity in its authentication source + string name = 3; + + // Metadata represents the custom data tied to this alias. Fields added + // to it should have a low rate of change (or no change) because each + // change incurs a storage write, so quickly-changing fields can have + // a significant performance impact at scale. See the SDK's + // "aliasmetadata" package for a helper that eases and standardizes + // using this safely. + map metadata = 4; + + // ID is the unique identifier for the alias + string ID = 5; + + // NamespaceID is the identifier of the namespace to which this alias + // belongs. + string namespace_id = 6; + + // Custom Metadata represents the custom data tied to this alias + map custom_metadata = 7; + + // Local indicates if the alias only belongs to the cluster where it was + // created. If true, the alias will be stored in a location that are ignored + // by the performance replication subsystem. + bool local = 8; +} + +message Group { + // ID is the unique identifier for the group + string ID = 1; + + // Name is the human-friendly unique identifier for the group + string name = 2; + + // Metadata represents the custom data tied to this group + map metadata = 3; + + // NamespaceID is the identifier of the namespace to which this group + // belongs to. + string namespace_id = 4; +} + +message MFAMethodID { + string type = 1; + string id = 2; + bool uses_passcode = 3; + string name = 4; +} + +message MFAConstraintAny { + repeated MFAMethodID any = 1; +} + +message MFARequirement { + string mfa_request_id = 1; + map mfa_constraints = 2; +} diff --git a/sdk/logical/lease.go b/sdk/logical/lease.go new file mode 100644 index 0000000..e00fb52 --- /dev/null +++ b/sdk/logical/lease.go @@ -0,0 +1,56 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logical + +import ( + "time" +) + +// LeaseOptions is an embeddable struct to capture common lease +// settings between a Secret and Auth +type LeaseOptions struct { + // TTL is the duration that this secret is valid for. Vault + // will automatically revoke it after the duration. + TTL time.Duration `json:"lease"` + + // MaxTTL is the maximum duration that this secret is valid for. + MaxTTL time.Duration `json:"max_ttl"` + + // Renewable, if true, means that this secret can be renewed. + Renewable bool `json:"renewable"` + + // Increment will be the lease increment that the user requested. + // This is only available on a Renew operation and has no effect + // when returning a response. + Increment time.Duration `json:"-"` + + // IssueTime is the time of issue for the original lease. This is + // only available on Renew and Revoke operations and has no effect when returning + // a response. It can be used to enforce maximum lease periods by + // a logical backend. + IssueTime time.Time `json:"-"` +} + +// LeaseEnabled checks if leasing is enabled +func (l *LeaseOptions) LeaseEnabled() bool { + return l.TTL > 0 +} + +// LeaseTotal is the lease duration with a guard against a negative TTL +func (l *LeaseOptions) LeaseTotal() time.Duration { + if l.TTL <= 0 { + return 0 + } + + return l.TTL +} + +// ExpirationTime computes the time until expiration including the grace period +func (l *LeaseOptions) ExpirationTime() time.Time { + var expireTime time.Time + if l.LeaseEnabled() { + expireTime = time.Now().Add(l.LeaseTotal()) + } + return expireTime +} diff --git a/sdk/logical/lease_test.go b/sdk/logical/lease_test.go new file mode 100644 index 0000000..aee2bbd --- /dev/null +++ b/sdk/logical/lease_test.go @@ -0,0 +1,59 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logical + +import ( + "testing" + "time" +) + +func TestLeaseOptionsLeaseTotal(t *testing.T) { + var l LeaseOptions + l.TTL = 1 * time.Hour + + actual := l.LeaseTotal() + expected := l.TTL + if actual != expected { + t.Fatalf("bad: %s", actual) + } +} + +func TestLeaseOptionsLeaseTotal_grace(t *testing.T) { + var l LeaseOptions + l.TTL = 1 * time.Hour + + actual := l.LeaseTotal() + if actual != l.TTL { + t.Fatalf("bad: %s", actual) + } +} + +func TestLeaseOptionsLeaseTotal_negLease(t *testing.T) { + var l LeaseOptions + l.TTL = -1 * 1 * time.Hour + + actual := l.LeaseTotal() + expected := time.Duration(0) + if actual != expected { + t.Fatalf("bad: %s", actual) + } +} + +func TestLeaseOptionsExpirationTime(t *testing.T) { + var l LeaseOptions + l.TTL = 1 * time.Hour + + limit := time.Now().Add(time.Hour) + exp := l.ExpirationTime() + if exp.Before(limit) { + t.Fatalf("bad: %s", exp) + } +} + +func TestLeaseOptionsExpirationTime_noLease(t *testing.T) { + var l LeaseOptions + if !l.ExpirationTime().IsZero() { + t.Fatal("should be zero") + } +} diff --git a/sdk/logical/logical.go b/sdk/logical/logical.go new file mode 100644 index 0000000..51928d6 --- /dev/null +++ b/sdk/logical/logical.go @@ -0,0 +1,179 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logical + +import ( + "context" + + log "github.com/hashicorp/go-hclog" +) + +// BackendType is the type of backend that is being implemented +type BackendType uint32 + +// The these are the types of backends that can be derived from +// logical.Backend +const ( + TypeUnknown BackendType = 0 // This is also the zero-value for BackendType + TypeLogical BackendType = 1 + TypeCredential BackendType = 2 +) + +// Stringer implementation +func (b BackendType) String() string { + switch b { + case TypeLogical: + return "secret" + case TypeCredential: + return "auth" + } + + return "unknown" +} + +// Backend interface must be implemented to be "mountable" at +// a given path. Requests flow through a router which has various mount +// points that flow to a logical backend. The logic of each backend is flexible, +// and this is what allows materialized keys to function. There can be specialized +// logical backends for various upstreams (Consul, PostgreSQL, MySQL, etc) that can +// interact with remote APIs to generate keys dynamically. This interface also +// allows for a "procfs" like interaction, as internal state can be exposed by +// acting like a logical backend and being mounted. +type Backend interface { + // Initialize is used to initialize a plugin after it has been mounted. + Initialize(context.Context, *InitializationRequest) error + + // HandleRequest is used to handle a request and generate a response. + // The backends must check the operation type and handle appropriately. + HandleRequest(context.Context, *Request) (*Response, error) + + // SpecialPaths is a list of paths that are special in some way. + // See PathType for the types of special paths. The key is the type + // of the special path, and the value is a list of paths for this type. + // This is not a regular expression but is an exact match. If the path + // ends in '*' then it is a prefix-based match. The '*' can only appear + // at the end. + SpecialPaths() *Paths + + // System provides an interface to access certain system configuration + // information, such as globally configured default and max lease TTLs. + System() SystemView + + // Logger provides an interface to access the underlying logger. This + // is useful when a struct embeds a Backend-implemented struct that + // contains a private instance of logger. + Logger() log.Logger + + // HandleExistenceCheck is used to handle a request and generate a response + // indicating whether the given path exists or not; this is used to + // understand whether the request must have a Create or Update capability + // ACL applied. The first bool indicates whether an existence check + // function was found for the backend; the second indicates whether, if an + // existence check function was found, the item exists or not. + HandleExistenceCheck(context.Context, *Request) (bool, bool, error) + + // Cleanup is invoked during an unmount of a backend to allow it to + // handle any cleanup like connection closing or releasing of file handles. + Cleanup(context.Context) + + // InvalidateKey may be invoked when an object is modified that belongs + // to the backend. The backend can use this to clear any caches or reset + // internal state as needed. + InvalidateKey(context.Context, string) + + // Setup is used to set up the backend based on the provided backend + // configuration. + Setup(context.Context, *BackendConfig) error + + // Type returns the BackendType for the particular backend + Type() BackendType +} + +// BackendConfig is provided to the factory to initialize the backend +type BackendConfig struct { + // View should not be stored, and should only be used for initialization + StorageView Storage + + // The backend should use this logger. The log should not contain any secrets. + Logger log.Logger + + // System provides a view into a subset of safe system information that + // is useful for backends, such as the default/max lease TTLs + System SystemView + + // BackendUUID is a unique identifier provided to this backend. It's useful + // when a backend needs a consistent and unique string without using storage. + BackendUUID string + + // Config is the opaque user configuration provided when mounting + Config map[string]string + + // EventsSender provides a mechanism to interact with Vault events. + EventsSender EventSender +} + +// Factory is the factory function to create a logical backend. +type Factory func(context.Context, *BackendConfig) (Backend, error) + +// Paths is the structure of special paths that is used for SpecialPaths. +type Paths struct { + // Root are the API paths that require a root token to access + Root []string + + // Unauthenticated are the API paths that can be accessed without any auth. + // These can't be regular expressions, it is either exact match, a prefix + // match and/or a wildcard match. For prefix match, append '*' as a suffix. + // For a wildcard match, use '+' in the segment to match any identifier + // (e.g. 'foo/+/bar'). Note that '+' can't be adjacent to a non-slash. + Unauthenticated []string + + // LocalStorage are storage paths (prefixes) that are local to this cluster; + // this indicates that these paths should not be replicated across performance clusters + // (DR replication is unaffected). + LocalStorage []string + + // SealWrapStorage are storage paths that, when using a capable seal, + // should be seal wrapped with extra encryption. It is exact matching + // unless it ends with '/' in which case it will be treated as a prefix. + SealWrapStorage []string + + // WriteForwardedStorage are storage paths that, when running on a PR + // Secondary cluster, cause a GRPC call up to the PR Primary cluster's + // active node to handle storage.Put(...) and storage.Delete(...) events. + // These paths MUST include a {{clusterId}} literal, which the write layer + // will resolve to this cluster's UUID ("replication set" identifier). + // storage.List(...) and storage.Get(...) operations occur from the + // locally replicated data set, but can use path template expansion to be + // identifier agnostic. + // + // These paths require careful considerations by developers to use. In + // particular, writes on secondary clusters will not appear (when a + // corresponding read is issued immediately after a write) until the + // replication from primary->secondary has occurred. This replication + // triggers an InvalidateKey(...) call on the secondary, which can be + // used to detect the write has finished syncing. However, this will + // likely occur after the request has finished, so it is important to + // not block on this occurring. + // + // On standby nodes, like all storage write operations, this will trigger + // an ErrReadOnly return. + WriteForwardedStorage []string +} + +type Auditor interface { + AuditRequest(ctx context.Context, input *LogInput) error + AuditResponse(ctx context.Context, input *LogInput) error +} + +type PluginVersion struct { + Version string +} + +// PluginVersioner is an optional interface to return version info. +type PluginVersioner interface { + // PluginVersion returns the version for the backend + PluginVersion() PluginVersion +} + +var EmptyPluginVersion = PluginVersion{""} diff --git a/sdk/logical/logical_storage.go b/sdk/logical/logical_storage.go new file mode 100644 index 0000000..b4fbc2b --- /dev/null +++ b/sdk/logical/logical_storage.go @@ -0,0 +1,55 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logical + +import ( + "context" + + "github.com/hashicorp/vault/sdk/physical" +) + +type LogicalStorage struct { + underlying physical.Backend +} + +func (s *LogicalStorage) Get(ctx context.Context, key string) (*StorageEntry, error) { + entry, err := s.underlying.Get(ctx, key) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + return &StorageEntry{ + Key: entry.Key, + Value: entry.Value, + SealWrap: entry.SealWrap, + }, nil +} + +func (s *LogicalStorage) Put(ctx context.Context, entry *StorageEntry) error { + return s.underlying.Put(ctx, &physical.Entry{ + Key: entry.Key, + Value: entry.Value, + SealWrap: entry.SealWrap, + }) +} + +func (s *LogicalStorage) Delete(ctx context.Context, key string) error { + return s.underlying.Delete(ctx, key) +} + +func (s *LogicalStorage) List(ctx context.Context, prefix string) ([]string, error) { + return s.underlying.List(ctx, prefix) +} + +func (s *LogicalStorage) Underlying() physical.Backend { + return s.underlying +} + +func NewLogicalStorage(underlying physical.Backend) *LogicalStorage { + return &LogicalStorage{ + underlying: underlying, + } +} diff --git a/sdk/logical/managed_key.go b/sdk/logical/managed_key.go new file mode 100644 index 0000000..04727f9 --- /dev/null +++ b/sdk/logical/managed_key.go @@ -0,0 +1,125 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logical + +import ( + "context" + "crypto" + "io" + + wrapping "github.com/hashicorp/go-kms-wrapping/v2" +) + +type KeyUsage int + +const ( + KeyUsageEncrypt KeyUsage = 1 + iota + KeyUsageDecrypt + KeyUsageSign + KeyUsageVerify + KeyUsageWrap + KeyUsageUnwrap + KeyUsageGenerateRandom +) + +type ManagedKey interface { + // Name is a human-readable identifier for a managed key that may change/renamed. Use Uuid if a + // long term consistent identifier is needed. + Name() string + // UUID is a unique identifier for a managed key that is guaranteed to remain + // consistent even if a key is migrated or renamed. + UUID() string + // Present returns true if the key is established in the KMS. This may return false if for example + // an HSM library is not configured on all cluster nodes. + Present(ctx context.Context) (bool, error) + + // AllowsAll returns true if all the requested usages are supported by the managed key. + AllowsAll(usages []KeyUsage) bool +} + +type ( + ManagedKeyConsumer func(context.Context, ManagedKey) error + ManagedSigningKeyConsumer func(context.Context, ManagedSigningKey) error + ManagedEncryptingKeyConsumer func(context.Context, ManagedEncryptingKey) error + ManagedMACKeyConsumer func(context.Context, ManagedMACKey) error + ManagedKeyRandomSourceConsumer func(context.Context, ManagedKeyRandomSource) error +) + +type ManagedKeySystemView interface { + // WithManagedKeyByName retrieves an instantiated managed key for consumption by the given function. The + // provided key can only be used within the scope of that function call + WithManagedKeyByName(ctx context.Context, keyName, backendUUID string, f ManagedKeyConsumer) error + // WithManagedKeyByUUID retrieves an instantiated managed key for consumption by the given function. The + // provided key can only be used within the scope of that function call + WithManagedKeyByUUID(ctx context.Context, keyUuid, backendUUID string, f ManagedKeyConsumer) error + + // WithManagedSigningKeyByName retrieves an instantiated managed signing key for consumption by the given function, + // with the same semantics as WithManagedKeyByName + WithManagedSigningKeyByName(ctx context.Context, keyName, backendUUID string, f ManagedSigningKeyConsumer) error + // WithManagedSigningKeyByUUID retrieves an instantiated managed signing key for consumption by the given function, + // with the same semantics as WithManagedKeyByUUID + WithManagedSigningKeyByUUID(ctx context.Context, keyUuid, backendUUID string, f ManagedSigningKeyConsumer) error + // WithManagedSigningKeyByName retrieves an instantiated managed signing key for consumption by the given function, + // with the same semantics as WithManagedKeyByName + WithManagedEncryptingKeyByName(ctx context.Context, keyName, backendUUID string, f ManagedEncryptingKeyConsumer) error + // WithManagedSigningKeyByUUID retrieves an instantiated managed signing key for consumption by the given function, + // with the same semantics as WithManagedKeyByUUID + WithManagedEncryptingKeyByUUID(ctx context.Context, keyUuid, backendUUID string, f ManagedEncryptingKeyConsumer) error + // WithManagedMACKeyByName retrieves an instantiated managed MAC key by name for consumption by the given function, + // with the same semantics as WithManagedKeyByName. + WithManagedMACKeyByName(ctx context.Context, keyName, backendUUID string, f ManagedMACKeyConsumer) error + // WithManagedMACKeyByUUID retrieves an instantiated managed MAC key by UUID for consumption by the given function, + // with the same semantics as WithManagedKeyByUUID. + WithManagedMACKeyByUUID(ctx context.Context, keyUUID, backendUUID string, f ManagedMACKeyConsumer) error +} + +type ManagedAsymmetricKey interface { + ManagedKey + GetPublicKey(ctx context.Context) (crypto.PublicKey, error) +} + +type ManagedKeyLifecycle interface { + // GenerateKey generates a key in the KMS if it didn't yet exist, returning the id. + // If it already existed, returns the existing id. KMSKey's key material is ignored if present. + GenerateKey(ctx context.Context) (string, error) +} + +type ManagedSigningKey interface { + ManagedAsymmetricKey + + // Sign returns a digital signature of the provided value. The SignerOpts param must provide the hash function + // that generated the value (if any). + // The optional randomSource specifies the source of random values and may be ignored by the implementation + // (such as on HSMs with their own internal RNG) + Sign(ctx context.Context, value []byte, randomSource io.Reader, opts crypto.SignerOpts) ([]byte, error) + + // Verify verifies the provided signature against the value. The SignerOpts param must provide the hash function + // that generated the value (if any). + // If true is returned the signature is correct, false otherwise. + Verify(ctx context.Context, signature, value []byte, opts crypto.SignerOpts) (bool, error) + + // GetSigner returns an implementation of crypto.Signer backed by the managed key. This should be called + // as needed so as to use per request contexts. + GetSigner(context.Context) (crypto.Signer, error) +} + +type ManagedEncryptingKey interface { + ManagedKey + Encrypt(ctx context.Context, plaintext []byte, options ...wrapping.Option) ([]byte, error) + Decrypt(ctx context.Context, ciphertext []byte, options ...wrapping.Option) ([]byte, error) +} + +type ManagedMACKey interface { + ManagedKey + + // MAC generates a MAC tag using the provided algorithm for the provided value. + MAC(ctx context.Context, algorithm string, data []byte) ([]byte, error) +} + +type ManagedKeyRandomSource interface { + ManagedKey + + // GetRandomBytes returns a number (specified by the count parameter) of random bytes sourced from the target managed key. + GetRandomBytes(count int) ([]byte, error) +} diff --git a/sdk/logical/plugin.pb.go b/sdk/logical/plugin.pb.go new file mode 100644 index 0000000..19b18d8 --- /dev/null +++ b/sdk/logical/plugin.pb.go @@ -0,0 +1,174 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.12 +// source: sdk/logical/plugin.proto + +package logical + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type PluginEnvironment struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // VaultVersion is the version of the Vault server + VaultVersion string `protobuf:"bytes,1,opt,name=vault_version,json=vaultVersion,proto3" json:"vault_version,omitempty"` + // VaultVersionPrerelease is the prerelease information of the Vault server + VaultVersionPrerelease string `protobuf:"bytes,2,opt,name=vault_version_prerelease,json=vaultVersionPrerelease,proto3" json:"vault_version_prerelease,omitempty"` + // VaultVersionMetadata is the version metadata of the Vault server + VaultVersionMetadata string `protobuf:"bytes,3,opt,name=vault_version_metadata,json=vaultVersionMetadata,proto3" json:"vault_version_metadata,omitempty"` +} + +func (x *PluginEnvironment) Reset() { + *x = PluginEnvironment{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_logical_plugin_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PluginEnvironment) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PluginEnvironment) ProtoMessage() {} + +func (x *PluginEnvironment) ProtoReflect() protoreflect.Message { + mi := &file_sdk_logical_plugin_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PluginEnvironment.ProtoReflect.Descriptor instead. +func (*PluginEnvironment) Descriptor() ([]byte, []int) { + return file_sdk_logical_plugin_proto_rawDescGZIP(), []int{0} +} + +func (x *PluginEnvironment) GetVaultVersion() string { + if x != nil { + return x.VaultVersion + } + return "" +} + +func (x *PluginEnvironment) GetVaultVersionPrerelease() string { + if x != nil { + return x.VaultVersionPrerelease + } + return "" +} + +func (x *PluginEnvironment) GetVaultVersionMetadata() string { + if x != nil { + return x.VaultVersionMetadata + } + return "" +} + +var File_sdk_logical_plugin_proto protoreflect.FileDescriptor + +var file_sdk_logical_plugin_proto_rawDesc = []byte{ + 0x0a, 0x18, 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2f, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x6c, 0x6f, 0x67, 0x69, + 0x63, 0x61, 0x6c, 0x22, 0xa8, 0x01, 0x0a, 0x11, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x45, 0x6e, + 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x76, 0x61, 0x75, + 0x6c, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x38, + 0x0a, 0x18, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, + 0x70, 0x72, 0x65, 0x72, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x16, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, + 0x65, 0x72, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x76, 0x61, 0x75, 0x6c, + 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x28, + 0x5a, 0x26, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, + 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x73, 0x64, 0x6b, + 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_sdk_logical_plugin_proto_rawDescOnce sync.Once + file_sdk_logical_plugin_proto_rawDescData = file_sdk_logical_plugin_proto_rawDesc +) + +func file_sdk_logical_plugin_proto_rawDescGZIP() []byte { + file_sdk_logical_plugin_proto_rawDescOnce.Do(func() { + file_sdk_logical_plugin_proto_rawDescData = protoimpl.X.CompressGZIP(file_sdk_logical_plugin_proto_rawDescData) + }) + return file_sdk_logical_plugin_proto_rawDescData +} + +var file_sdk_logical_plugin_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_sdk_logical_plugin_proto_goTypes = []interface{}{ + (*PluginEnvironment)(nil), // 0: logical.PluginEnvironment +} +var file_sdk_logical_plugin_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_sdk_logical_plugin_proto_init() } +func file_sdk_logical_plugin_proto_init() { + if File_sdk_logical_plugin_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_sdk_logical_plugin_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PluginEnvironment); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_sdk_logical_plugin_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_sdk_logical_plugin_proto_goTypes, + DependencyIndexes: file_sdk_logical_plugin_proto_depIdxs, + MessageInfos: file_sdk_logical_plugin_proto_msgTypes, + }.Build() + File_sdk_logical_plugin_proto = out.File + file_sdk_logical_plugin_proto_rawDesc = nil + file_sdk_logical_plugin_proto_goTypes = nil + file_sdk_logical_plugin_proto_depIdxs = nil +} diff --git a/sdk/logical/plugin.proto b/sdk/logical/plugin.proto new file mode 100644 index 0000000..0eaa3c5 --- /dev/null +++ b/sdk/logical/plugin.proto @@ -0,0 +1,19 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +syntax = "proto3"; + +option go_package = "github.com/hashicorp/vault/sdk/logical"; + +package logical; + +message PluginEnvironment { + // VaultVersion is the version of the Vault server + string vault_version = 1; + + // VaultVersionPrerelease is the prerelease information of the Vault server + string vault_version_prerelease = 2; + + // VaultVersionMetadata is the version metadata of the Vault server + string vault_version_metadata = 3; +} diff --git a/sdk/logical/request.go b/sdk/logical/request.go new file mode 100644 index 0000000..6435bd7 --- /dev/null +++ b/sdk/logical/request.go @@ -0,0 +1,467 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logical + +import ( + "context" + "fmt" + "io" + "net/http" + "strings" + "time" + + "github.com/mitchellh/copystructure" +) + +// RequestWrapInfo is a struct that stores information about desired response +// and seal wrapping behavior +type RequestWrapInfo struct { + // Setting to non-zero specifies that the response should be wrapped. + // Specifies the desired TTL of the wrapping token. + TTL time.Duration `json:"ttl" structs:"ttl" mapstructure:"ttl" sentinel:""` + + // The format to use for the wrapped response; if not specified it's a bare + // token + Format string `json:"format" structs:"format" mapstructure:"format" sentinel:""` + + // A flag to conforming backends that data for a given request should be + // seal wrapped + SealWrap bool `json:"seal_wrap" structs:"seal_wrap" mapstructure:"seal_wrap" sentinel:""` +} + +func (r *RequestWrapInfo) SentinelGet(key string) (interface{}, error) { + if r == nil { + return nil, nil + } + switch key { + case "ttl": + return r.TTL, nil + case "ttl_seconds": + return int64(r.TTL.Seconds()), nil + } + + return nil, nil +} + +func (r *RequestWrapInfo) SentinelKeys() []string { + return []string{ + "ttl", + "ttl_seconds", + } +} + +type ClientTokenSource uint32 + +const ( + NoClientToken ClientTokenSource = iota + ClientTokenFromVaultHeader + ClientTokenFromAuthzHeader +) + +type WALState struct { + ClusterID string + LocalIndex uint64 + ReplicatedIndex uint64 +} + +const indexStateCtxKey = "index_state" + +// IndexStateContext returns a context with an added value holding the index +// state that should be populated on writes. +func IndexStateContext(ctx context.Context, state *WALState) context.Context { + return context.WithValue(ctx, indexStateCtxKey, state) +} + +// IndexStateFromContext is a helper to look up if the provided context contains +// an index state pointer. +func IndexStateFromContext(ctx context.Context) *WALState { + s, ok := ctx.Value(indexStateCtxKey).(*WALState) + if !ok { + return nil + } + return s +} + +// Request is a struct that stores the parameters and context of a request +// being made to Vault. It is used to abstract the details of the higher level +// request protocol from the handlers. +// +// Note: Many of these have Sentinel disabled because they are values populated +// by the router after policy checks; the token namespace would be the right +// place to access them via Sentinel +type Request struct { + // Id is the uuid associated with each request + ID string `json:"id" structs:"id" mapstructure:"id" sentinel:""` + + // If set, the name given to the replication secondary where this request + // originated + ReplicationCluster string `json:"replication_cluster" structs:"replication_cluster" mapstructure:"replication_cluster" sentinel:""` + + // Operation is the requested operation type + Operation Operation `json:"operation" structs:"operation" mapstructure:"operation"` + + // Path is the full path of the request + Path string `json:"path" structs:"path" mapstructure:"path" sentinel:""` + + // Request data is an opaque map that must have string keys. + Data map[string]interface{} `json:"map" structs:"data" mapstructure:"data"` + + // Storage can be used to durably store and retrieve state. + Storage Storage `json:"-" sentinel:""` + + // Secret will be non-nil only for Revoke and Renew operations + // to represent the secret that was returned prior. + Secret *Secret `json:"secret" structs:"secret" mapstructure:"secret" sentinel:""` + + // Auth will be non-nil only for Renew operations + // to represent the auth that was returned prior. + Auth *Auth `json:"auth" structs:"auth" mapstructure:"auth" sentinel:""` + + // Headers will contain the http headers from the request. This value will + // be used in the audit broker to ensure we are auditing only the allowed + // headers. + Headers map[string][]string `json:"headers" structs:"headers" mapstructure:"headers" sentinel:""` + + // Connection will be non-nil only for credential providers to + // inspect the connection information and potentially use it for + // authentication/protection. + Connection *Connection `json:"connection" structs:"connection" mapstructure:"connection"` + + // ClientToken is provided to the core so that the identity + // can be verified and ACLs applied. This value is passed + // through to the logical backends but after being salted and + // hashed. + ClientToken string `json:"client_token" structs:"client_token" mapstructure:"client_token" sentinel:""` + + // ClientTokenAccessor is provided to the core so that the it can get + // logged as part of request audit logging. + ClientTokenAccessor string `json:"client_token_accessor" structs:"client_token_accessor" mapstructure:"client_token_accessor" sentinel:""` + + // DisplayName is provided to the logical backend to help associate + // dynamic secrets with the source entity. This is not a sensitive + // name, but is useful for operators. + DisplayName string `json:"display_name" structs:"display_name" mapstructure:"display_name" sentinel:""` + + // MountPoint is provided so that a logical backend can generate + // paths relative to itself. The `Path` is effectively the client + // request path with the MountPoint trimmed off. + MountPoint string `json:"mount_point" structs:"mount_point" mapstructure:"mount_point" sentinel:""` + + // MountType is provided so that a logical backend can make decisions + // based on the specific mount type (e.g., if a mount type has different + // aliases, generating different defaults depending on the alias) + MountType string `json:"mount_type" structs:"mount_type" mapstructure:"mount_type" sentinel:""` + + // MountAccessor is provided so that identities returned by the authentication + // backends can be tied to the mount it belongs to. + MountAccessor string `json:"mount_accessor" structs:"mount_accessor" mapstructure:"mount_accessor" sentinel:""` + + // mountRunningVersion is used internally to propagate the semantic version + // of the mounted plugin as reported by its vault.MountEntry to audit logging + mountRunningVersion string + + // mountRunningSha256 is used internally to propagate the encoded sha256 + // of the mounted plugin as reported its vault.MountEntry to audit logging + mountRunningSha256 string + + // mountIsExternalPlugin is used internally to propagate whether + // the backend of the mounted plugin is running externally (i.e., over GRPC) + // to audit logging + mountIsExternalPlugin bool + + // mountClass is used internally to propagate the mount class of the mounted plugin to audit logging + mountClass string + + // WrapInfo contains requested response wrapping parameters + WrapInfo *RequestWrapInfo `json:"wrap_info" structs:"wrap_info" mapstructure:"wrap_info" sentinel:""` + + // ClientTokenRemainingUses represents the allowed number of uses left on the + // token supplied + ClientTokenRemainingUses int `json:"client_token_remaining_uses" structs:"client_token_remaining_uses" mapstructure:"client_token_remaining_uses"` + + // EntityID is the identity of the caller extracted out of the token used + // to make this request + EntityID string `json:"entity_id" structs:"entity_id" mapstructure:"entity_id" sentinel:""` + + // PolicyOverride indicates that the requestor wishes to override + // soft-mandatory Sentinel policies + PolicyOverride bool `json:"policy_override" structs:"policy_override" mapstructure:"policy_override"` + + // Whether the request is unauthenticated, as in, had no client token + // attached. Useful in some situations where the client token is not made + // accessible. + Unauthenticated bool `json:"unauthenticated" structs:"unauthenticated" mapstructure:"unauthenticated"` + + // MFACreds holds the parsed MFA information supplied over the API as part of + // X-Vault-MFA header + MFACreds MFACreds `json:"mfa_creds" structs:"mfa_creds" mapstructure:"mfa_creds" sentinel:""` + + // Cached token entry. This avoids another lookup in request handling when + // we've already looked it up at http handling time. Note that this token + // has not been "used", as in it will not properly take into account use + // count limitations. As a result this field should only ever be used for + // transport to a function that would otherwise do a lookup and then + // properly use the token. + tokenEntry *TokenEntry + + // For replication, contains the last WAL on the remote side after handling + // the request, used for best-effort avoidance of stale read-after-write + lastRemoteWAL uint64 + + // ControlGroup holds the authorizations that have happened on this + // request + ControlGroup *ControlGroup `json:"control_group" structs:"control_group" mapstructure:"control_group" sentinel:""` + + // ClientTokenSource tells us where the client token was sourced from, so + // we can delete it before sending off to plugins + ClientTokenSource ClientTokenSource + + // HTTPRequest, if set, can be used to access fields from the HTTP request + // that generated this logical.Request object, such as the request body. + HTTPRequest *http.Request `json:"-" sentinel:""` + + // ResponseWriter if set can be used to stream a response value to the http + // request that generated this logical.Request object. + ResponseWriter *HTTPResponseWriter `json:"-" sentinel:""` + + // requiredState is used internally to propagate the X-Vault-Index request + // header to later levels of request processing that operate only on + // logical.Request. + requiredState []string + + // responseState is used internally to propagate the state that should appear + // in response headers; it's attached to the request rather than the response + // because not all requests yields non-nil responses. + responseState *WALState + + // ClientID is the identity of the caller. If the token is associated with an + // entity, it will be the same as the EntityID . If the token has no entity, + // this will be the sha256(sorted policies + namespace) associated with the + // client token. + ClientID string `json:"client_id" structs:"client_id" mapstructure:"client_id" sentinel:""` + + // InboundSSCToken is the token that arrives on an inbound request, supplied + // by the vault user. + InboundSSCToken string + + // When a request has been forwarded, contains information of the host the request was forwarded 'from' + ForwardedFrom string `json:"forwarded_from,omitempty"` +} + +// Clone returns a deep copy of the request by using copystructure +func (r *Request) Clone() (*Request, error) { + cpy, err := copystructure.Copy(r) + if err != nil { + return nil, err + } + return cpy.(*Request), nil +} + +// Get returns a data field and guards for nil Data +func (r *Request) Get(key string) interface{} { + if r.Data == nil { + return nil + } + return r.Data[key] +} + +// GetString returns a data field as a string +func (r *Request) GetString(key string) string { + raw := r.Get(key) + s, _ := raw.(string) + return s +} + +func (r *Request) GoString() string { + return fmt.Sprintf("*%#v", *r) +} + +func (r *Request) SentinelGet(key string) (interface{}, error) { + switch key { + case "path": + // Sanitize it here so that it's consistent in policies + return strings.TrimPrefix(r.Path, "/"), nil + + case "wrapping", "wrap_info": + // If the pointer is nil accessing the wrap info is considered + // "undefined" so this allows us to instead discover a TTL of zero + if r.WrapInfo == nil { + return &RequestWrapInfo{}, nil + } + return r.WrapInfo, nil + } + + return nil, nil +} + +func (r *Request) SentinelKeys() []string { + return []string{ + "path", + "wrapping", + "wrap_info", + } +} + +func (r *Request) MountRunningVersion() string { + return r.mountRunningVersion +} + +func (r *Request) SetMountRunningVersion(mountRunningVersion string) { + r.mountRunningVersion = mountRunningVersion +} + +func (r *Request) MountRunningSha256() string { + return r.mountRunningSha256 +} + +func (r *Request) SetMountRunningSha256(mountRunningSha256 string) { + r.mountRunningSha256 = mountRunningSha256 +} + +func (r *Request) MountIsExternalPlugin() bool { + return r.mountIsExternalPlugin +} + +func (r *Request) SetMountIsExternalPlugin(mountIsExternalPlugin bool) { + r.mountIsExternalPlugin = mountIsExternalPlugin +} + +func (r *Request) MountClass() string { + return r.mountClass +} + +func (r *Request) SetMountClass(mountClass string) { + r.mountClass = mountClass +} + +func (r *Request) LastRemoteWAL() uint64 { + return r.lastRemoteWAL +} + +func (r *Request) SetLastRemoteWAL(last uint64) { + r.lastRemoteWAL = last +} + +func (r *Request) RequiredState() []string { + return r.requiredState +} + +func (r *Request) SetRequiredState(state []string) { + r.requiredState = state +} + +func (r *Request) ResponseState() *WALState { + return r.responseState +} + +func (r *Request) SetResponseState(w *WALState) { + r.responseState = w +} + +func (r *Request) TokenEntry() *TokenEntry { + return r.tokenEntry +} + +func (r *Request) SetTokenEntry(te *TokenEntry) { + r.tokenEntry = te +} + +// RenewRequest creates the structure of the renew request. +func RenewRequest(path string, secret *Secret, data map[string]interface{}) *Request { + return &Request{ + Operation: RenewOperation, + Path: path, + Data: data, + Secret: secret, + } +} + +// RenewAuthRequest creates the structure of the renew request for an auth. +func RenewAuthRequest(path string, auth *Auth, data map[string]interface{}) *Request { + return &Request{ + Operation: RenewOperation, + Path: path, + Data: data, + Auth: auth, + } +} + +// RevokeRequest creates the structure of the revoke request. +func RevokeRequest(path string, secret *Secret, data map[string]interface{}) *Request { + return &Request{ + Operation: RevokeOperation, + Path: path, + Data: data, + Secret: secret, + } +} + +// RollbackRequest creates the structure of the revoke request. +func RollbackRequest(path string) *Request { + return &Request{ + Operation: RollbackOperation, + Path: path, + Data: make(map[string]interface{}), + } +} + +// Operation is an enum that is used to specify the type +// of request being made +type Operation string + +const ( + // The operations below are called per path + CreateOperation Operation = "create" + ReadOperation = "read" + UpdateOperation = "update" + PatchOperation = "patch" + DeleteOperation = "delete" + ListOperation = "list" + HelpOperation = "help" + AliasLookaheadOperation = "alias-lookahead" + ResolveRoleOperation = "resolve-role" + HeaderOperation = "header" + + // The operations below are called globally, the path is less relevant. + RevokeOperation Operation = "revoke" + RenewOperation = "renew" + RollbackOperation = "rollback" +) + +type MFACreds map[string][]string + +// InitializationRequest stores the parameters and context of an Initialize() +// call being made to a logical.Backend. +type InitializationRequest struct { + // Storage can be used to durably store and retrieve state. + Storage Storage +} + +type CustomHeader struct { + Name string + Value string +} + +type CtxKeyInFlightRequestID struct{} + +func (c CtxKeyInFlightRequestID) String() string { + return "in-flight-request-ID" +} + +type CtxKeyRequestRole struct{} + +func (c CtxKeyRequestRole) String() string { + return "request-role" +} + +type ctxKeyOriginalBody struct{} + +func ContextOriginalBodyValue(ctx context.Context) (io.ReadCloser, bool) { + value, ok := ctx.Value(ctxKeyOriginalBody{}).(io.ReadCloser) + return value, ok +} + +func CreateContextOriginalBody(parent context.Context, body io.ReadCloser) context.Context { + return context.WithValue(parent, ctxKeyOriginalBody{}, body) +} diff --git a/sdk/logical/response.go b/sdk/logical/response.go new file mode 100644 index 0000000..9ea5bf6 --- /dev/null +++ b/sdk/logical/response.go @@ -0,0 +1,334 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logical + +import ( + "bufio" + "encoding/json" + "errors" + "fmt" + "net" + "net/http" + "strconv" + "sync/atomic" + + "github.com/hashicorp/vault/sdk/helper/wrapping" +) + +const ( + // HTTPContentType can be specified in the Data field of a Response + // so that the HTTP front end can specify a custom Content-Type associated + // with the HTTPRawBody. This can only be used for non-secrets, and should + // be avoided unless absolutely necessary, such as implementing a specification. + // The value must be a string. + HTTPContentType = "http_content_type" + + // HTTPRawBody is the raw content of the HTTP body that goes with the HTTPContentType. + // This can only be specified for non-secrets, and should should be similarly + // avoided like the HTTPContentType. The value must be a byte slice. + HTTPRawBody = "http_raw_body" + + // HTTPStatusCode is the response code of the HTTP body that goes with the HTTPContentType. + // This can only be specified for non-secrets, and should should be similarly + // avoided like the HTTPContentType. The value must be an integer. + HTTPStatusCode = "http_status_code" + + // For unwrapping we may need to know whether the value contained in the + // raw body is already JSON-unmarshaled. The presence of this key indicates + // that it has already been unmarshaled. That way we don't need to simply + // ignore errors. + HTTPRawBodyAlreadyJSONDecoded = "http_raw_body_already_json_decoded" + + // If set, HTTPCacheControlHeader will replace the default Cache-Control=no-store header + // set by the generic wrapping handler. The value must be a string. + HTTPCacheControlHeader = "http_raw_cache_control" + + // If set, HTTPPragmaHeader will set the Pragma response header. + // The value must be a string. + HTTPPragmaHeader = "http_raw_pragma" + + // If set, HTTPWWWAuthenticateHeader will set the WWW-Authenticate response header. + // The value must be a string. + HTTPWWWAuthenticateHeader = "http_www_authenticate" +) + +// Response is a struct that stores the response of a request. +// It is used to abstract the details of the higher level request protocol. +type Response struct { + // Secret, if not nil, denotes that this response represents a secret. + Secret *Secret `json:"secret" structs:"secret" mapstructure:"secret"` + + // Auth, if not nil, contains the authentication information for + // this response. This is only checked and means something for + // credential backends. + Auth *Auth `json:"auth" structs:"auth" mapstructure:"auth"` + + // Response data is an opaque map that must have string keys. For + // secrets, this data is sent down to the user as-is. To store internal + // data that you don't want the user to see, store it in + // Secret.InternalData. + Data map[string]interface{} `json:"data" structs:"data" mapstructure:"data"` + + // Redirect is an HTTP URL to redirect to for further authentication. + // This is only valid for credential backends. This will be blanked + // for any logical backend and ignored. + Redirect string `json:"redirect" structs:"redirect" mapstructure:"redirect"` + + // Warnings allow operations or backends to return warnings in response + // to user actions without failing the action outright. + Warnings []string `json:"warnings" structs:"warnings" mapstructure:"warnings"` + + // Information for wrapping the response in a cubbyhole + WrapInfo *wrapping.ResponseWrapInfo `json:"wrap_info" structs:"wrap_info" mapstructure:"wrap_info"` + + // Headers will contain the http headers from the plugin that it wishes to + // have as part of the output + Headers map[string][]string `json:"headers" structs:"headers" mapstructure:"headers"` +} + +// AddWarning adds a warning into the response's warning list +func (r *Response) AddWarning(warning string) { + if r.Warnings == nil { + r.Warnings = make([]string, 0, 1) + } + r.Warnings = append(r.Warnings, warning) +} + +// IsError returns true if this response seems to indicate an error. +func (r *Response) IsError() bool { + // If the response data contains only an 'error' element, or an 'error' and a 'data' element only + return r != nil && r.Data != nil && r.Data["error"] != nil && (len(r.Data) == 1 || (r.Data["data"] != nil && len(r.Data) == 2)) +} + +func (r *Response) Error() error { + if !r.IsError() { + return nil + } + switch r.Data["error"].(type) { + case string: + return errors.New(r.Data["error"].(string)) + case error: + return r.Data["error"].(error) + } + return nil +} + +// HelpResponse is used to format a help response +func HelpResponse(text string, seeAlso []string, oapiDoc interface{}) *Response { + return &Response{ + Data: map[string]interface{}{ + "help": text, + "see_also": seeAlso, + "openapi": oapiDoc, + }, + } +} + +// ErrorResponse is used to format an error response +func ErrorResponse(text string, vargs ...interface{}) *Response { + if len(vargs) > 0 { + text = fmt.Sprintf(text, vargs...) + } + return &Response{ + Data: map[string]interface{}{ + "error": text, + }, + } +} + +// ListResponse is used to format a response to a list operation. +func ListResponse(keys []string) *Response { + resp := &Response{ + Data: map[string]interface{}{}, + } + if len(keys) != 0 { + resp.Data["keys"] = keys + } + return resp +} + +// ListResponseWithInfo is used to format a response to a list operation and +// return the keys as well as a map with corresponding key info. +func ListResponseWithInfo(keys []string, keyInfo map[string]interface{}) *Response { + resp := ListResponse(keys) + + keyInfoData := make(map[string]interface{}) + for _, key := range keys { + val, ok := keyInfo[key] + if ok { + keyInfoData[key] = val + } + } + + if len(keyInfoData) > 0 { + resp.Data["key_info"] = keyInfoData + } + + return resp +} + +// RespondWithStatusCode takes a response and converts it to a raw response with +// the provided Status Code. +func RespondWithStatusCode(resp *Response, req *Request, code int) (*Response, error) { + ret := &Response{ + Data: map[string]interface{}{ + HTTPContentType: "application/json", + HTTPStatusCode: code, + }, + } + + if resp != nil { + httpResp := LogicalResponseToHTTPResponse(resp) + + if req != nil { + httpResp.RequestID = req.ID + } + + body, err := json.Marshal(httpResp) + if err != nil { + return nil, err + } + + // We default to string here so that the value is HMAC'd via audit. + // Since this function is always marshaling to JSON, this is + // appropriate. + ret.Data[HTTPRawBody] = string(body) + } + + return ret, nil +} + +// HTTPResponseWriter is optionally added to a request object and can be used to +// write directly to the HTTP response writer. +type HTTPResponseWriter struct { + http.ResponseWriter + written *uint32 +} + +// NewHTTPResponseWriter creates a new HTTPResponseWriter object that wraps the +// provided io.Writer. +func NewHTTPResponseWriter(w http.ResponseWriter) *HTTPResponseWriter { + return &HTTPResponseWriter{ + ResponseWriter: w, + written: new(uint32), + } +} + +// Write will write the bytes to the underlying io.Writer. +func (w *HTTPResponseWriter) Write(bytes []byte) (int, error) { + atomic.StoreUint32(w.written, 1) + return w.ResponseWriter.Write(bytes) +} + +// Written tells us if the writer has been written to yet. +func (w *HTTPResponseWriter) Written() bool { + return atomic.LoadUint32(w.written) == 1 +} + +type WrappingResponseWriter interface { + http.ResponseWriter + Wrapped() http.ResponseWriter +} + +type StatusHeaderResponseWriter struct { + wrapped http.ResponseWriter + wroteHeader bool + StatusCode int + headers map[string][]*CustomHeader +} + +func NewStatusHeaderResponseWriter(w http.ResponseWriter, h map[string][]*CustomHeader) *StatusHeaderResponseWriter { + return &StatusHeaderResponseWriter{ + wrapped: w, + wroteHeader: false, + StatusCode: 200, + headers: h, + } +} + +func (w *StatusHeaderResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + if h, ok := w.wrapped.(http.Hijacker); ok { + return h.Hijack() + } + return nil, nil, fmt.Errorf("could not hijack because wrapped connection is %T and it does not implement http.Hijacker", w.wrapped) +} + +func (w *StatusHeaderResponseWriter) Wrapped() http.ResponseWriter { + return w.wrapped +} + +func (w *StatusHeaderResponseWriter) Header() http.Header { + return w.wrapped.Header() +} + +func (w *StatusHeaderResponseWriter) Write(buf []byte) (int, error) { + // It is allowed to only call ResponseWriter.Write and skip + // ResponseWriter.WriteHeader. An example of such a situation is + // "handleUIStub". The Write function will internally set the status code + // 200 for the response for which that call might invoke other + // implementations of the WriteHeader function. So, we still need to set + // the custom headers. In cases where both WriteHeader and Write of + // statusHeaderResponseWriter struct are called the internal call to the + // WriterHeader invoked from inside Write method won't change the headers. + if !w.wroteHeader { + w.setCustomResponseHeaders(w.StatusCode) + } + + return w.wrapped.Write(buf) +} + +func (w *StatusHeaderResponseWriter) WriteHeader(statusCode int) { + w.setCustomResponseHeaders(statusCode) + w.wrapped.WriteHeader(statusCode) + w.StatusCode = statusCode + // in cases where Write is called after WriteHeader, let's prevent setting + // ResponseWriter headers twice + w.wroteHeader = true +} + +func (w *StatusHeaderResponseWriter) setCustomResponseHeaders(status int) { + sch := w.headers + if sch == nil { + return + } + + // Checking the validity of the status code + if status >= 600 || status < 100 { + return + } + + // setter function to set the headers + setter := func(hvl []*CustomHeader) { + for _, hv := range hvl { + w.Header().Set(hv.Name, hv.Value) + } + } + + // Setting the default headers first + setter(sch["default"]) + + // setting the Xyy pattern first + d := fmt.Sprintf("%vxx", status/100) + if val, ok := sch[d]; ok { + setter(val) + } + + // Setting the specific headers + if val, ok := sch[strconv.Itoa(status)]; ok { + setter(val) + } + + return +} + +var _ WrappingResponseWriter = &StatusHeaderResponseWriter{} + +// ResolveRoleResponse returns a standard response to be returned by functions handling a ResolveRoleOperation +func ResolveRoleResponse(roleName string) (*Response, error) { + return &Response{ + Data: map[string]interface{}{ + "role": roleName, + }, + }, nil +} diff --git a/sdk/logical/response_util.go b/sdk/logical/response_util.go new file mode 100644 index 0000000..aef0213 --- /dev/null +++ b/sdk/logical/response_util.go @@ -0,0 +1,218 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logical + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + + "github.com/hashicorp/errwrap" + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/vault/sdk/helper/consts" +) + +// RespondErrorCommon pulls most of the functionality from http's +// respondErrorCommon and some of http's handleLogical and makes it available +// to both the http package and elsewhere. +func RespondErrorCommon(req *Request, resp *Response, err error) (int, error) { + if err == nil && (resp == nil || !resp.IsError()) { + switch { + case req.Operation == ReadOperation || req.Operation == HeaderOperation: + if resp == nil { + return http.StatusNotFound, nil + } + + // Basically: if we have empty "keys" or no keys at all, 404. This + // provides consistency with GET. + case req.Operation == ListOperation && (resp == nil || resp.WrapInfo == nil): + if resp == nil { + return http.StatusNotFound, nil + } + if len(resp.Data) == 0 { + if len(resp.Warnings) > 0 { + return 0, nil + } + return http.StatusNotFound, nil + } + keysRaw, ok := resp.Data["keys"] + if !ok || keysRaw == nil { + // If we don't have keys but have other data, return as-is + if len(resp.Data) > 0 || len(resp.Warnings) > 0 { + return 0, nil + } + return http.StatusNotFound, nil + } + + var keys []string + switch keysRaw.(type) { + case []interface{}: + keys = make([]string, len(keysRaw.([]interface{}))) + for i, el := range keysRaw.([]interface{}) { + s, ok := el.(string) + if !ok { + return http.StatusInternalServerError, nil + } + keys[i] = s + } + + case []string: + keys = keysRaw.([]string) + default: + return http.StatusInternalServerError, nil + } + + if len(keys) == 0 { + return http.StatusNotFound, nil + } + } + + return 0, nil + } + + if errwrap.ContainsType(err, new(ReplicationCodedError)) { + var allErrors error + var codedErr *ReplicationCodedError + errwrap.Walk(err, func(inErr error) { + // The Walk function does not just traverse leaves, and execute the + // callback function on the entire error first. So, if the error is + // of type multierror.Error, we may want to skip storing the entire + // error first to avoid adding duplicate errors when walking down + // the leaf errors + if _, ok := inErr.(*multierror.Error); ok { + return + } + newErr, ok := inErr.(*ReplicationCodedError) + if ok { + codedErr = newErr + } else { + // if the error is of type fmt.wrapError which is typically + // made by calling fmt.Errorf("... %w", err), allErrors will + // contain duplicated error messages + allErrors = multierror.Append(allErrors, inErr) + } + }) + if allErrors != nil { + return codedErr.Code, multierror.Append(fmt.Errorf("errors from both primary and secondary; primary error was %v; secondary errors follow", codedErr.Msg), allErrors) + } + return codedErr.Code, errors.New(codedErr.Msg) + } + + // Start out with internal server error since in most of these cases there + // won't be a response so this won't be overridden + statusCode := http.StatusInternalServerError + // If we actually have a response, start out with bad request + if resp != nil { + statusCode = http.StatusBadRequest + } + + // Now, check the error itself; if it has a specific logical error, set the + // appropriate code + if err != nil { + switch { + case errwrap.ContainsType(err, new(StatusBadRequest)): + statusCode = http.StatusBadRequest + case errwrap.Contains(err, ErrPermissionDenied.Error()): + statusCode = http.StatusForbidden + case errwrap.Contains(err, consts.ErrInvalidWrappingToken.Error()): + statusCode = http.StatusBadRequest + case errwrap.Contains(err, ErrUnsupportedOperation.Error()): + statusCode = http.StatusMethodNotAllowed + case errwrap.Contains(err, ErrUnsupportedPath.Error()): + statusCode = http.StatusNotFound + case errwrap.Contains(err, ErrInvalidRequest.Error()): + statusCode = http.StatusBadRequest + case errwrap.Contains(err, ErrUpstreamRateLimited.Error()): + statusCode = http.StatusBadGateway + case errwrap.Contains(err, ErrRateLimitQuotaExceeded.Error()): + statusCode = http.StatusTooManyRequests + case errwrap.Contains(err, ErrLeaseCountQuotaExceeded.Error()): + statusCode = http.StatusTooManyRequests + case errwrap.Contains(err, ErrMissingRequiredState.Error()): + statusCode = http.StatusPreconditionFailed + case errwrap.Contains(err, ErrPathFunctionalityRemoved.Error()): + statusCode = http.StatusNotFound + case errwrap.Contains(err, ErrRelativePath.Error()): + statusCode = http.StatusBadRequest + case errwrap.Contains(err, ErrInvalidCredentials.Error()): + statusCode = http.StatusBadRequest + } + } + + if resp != nil && resp.IsError() { + err = fmt.Errorf("%s", resp.Data["error"].(string)) + } + + return statusCode, err +} + +// AdjustErrorStatusCode adjusts the status that will be sent in error +// conditions in a way that can be shared across http's respondError and other +// locations. +func AdjustErrorStatusCode(status *int, err error) { + // Handle nested errors + if t, ok := err.(*multierror.Error); ok { + for _, e := range t.Errors { + AdjustErrorStatusCode(status, e) + } + } + + // Adjust status code when sealed + if errwrap.Contains(err, consts.ErrSealed.Error()) { + *status = http.StatusServiceUnavailable + } + + if errwrap.Contains(err, consts.ErrAPILocked.Error()) { + *status = http.StatusServiceUnavailable + } + + // Adjust status code on + if errwrap.Contains(err, "http: request body too large") { + *status = http.StatusRequestEntityTooLarge + } + + // Allow HTTPCoded error passthrough to specify a code + if t, ok := err.(HTTPCodedError); ok { + *status = t.Code() + } +} + +func RespondError(w http.ResponseWriter, status int, err error) { + AdjustErrorStatusCode(&status, err) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(status) + + type ErrorResponse struct { + Errors []string `json:"errors"` + } + resp := &ErrorResponse{Errors: make([]string, 0, 1)} + if err != nil { + resp.Errors = append(resp.Errors, err.Error()) + } + + enc := json.NewEncoder(w) + enc.Encode(resp) +} + +func RespondErrorAndData(w http.ResponseWriter, status int, data interface{}, err error) { + AdjustErrorStatusCode(&status, err) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(status) + + type ErrorAndDataResponse struct { + Errors []string `json:"errors"` + Data interface{} `json:"data""` + } + resp := &ErrorAndDataResponse{Errors: make([]string, 0, 1)} + if err != nil { + resp.Errors = append(resp.Errors, err.Error()) + } + resp.Data = data + + enc := json.NewEncoder(w) + enc.Encode(resp) +} diff --git a/sdk/logical/response_util_test.go b/sdk/logical/response_util_test.go new file mode 100644 index 0000000..eafaa2f --- /dev/null +++ b/sdk/logical/response_util_test.go @@ -0,0 +1,106 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logical + +import ( + "errors" + "strings" + "testing" +) + +func TestResponseUtil_RespondErrorCommon_basic(t *testing.T) { + testCases := []struct { + title string + req *Request + resp *Response + respErr error + expectedStatus int + expectedErr error + }{ + { + title: "Throttled, no error", + respErr: ErrUpstreamRateLimited, + resp: &Response{}, + expectedStatus: 502, + }, + { + title: "Throttled, with error", + respErr: ErrUpstreamRateLimited, + resp: &Response{ + Data: map[string]interface{}{ + "error": "rate limited", + }, + }, + expectedStatus: 502, + }, + { + title: "Read not found", + req: &Request{ + Operation: ReadOperation, + }, + respErr: nil, + expectedStatus: 404, + }, + { + title: "Header not found", + req: &Request{ + Operation: HeaderOperation, + }, + respErr: nil, + expectedStatus: 404, + }, + { + title: "List with response and no keys", + req: &Request{ + Operation: ListOperation, + }, + resp: &Response{}, + respErr: nil, + expectedStatus: 404, + }, + { + title: "List with response and keys", + req: &Request{ + Operation: ListOperation, + }, + resp: &Response{ + Data: map[string]interface{}{ + "keys": []string{"some", "things", "here"}, + }, + }, + respErr: nil, + expectedStatus: 0, + }, + { + title: "Invalid Credentials error ", + respErr: ErrInvalidCredentials, + resp: &Response{ + Data: map[string]interface{}{ + "error": "error due to wrong credentials", + }, + }, + expectedErr: errors.New("error due to wrong credentials"), + expectedStatus: 400, + }, + } + + for _, tc := range testCases { + t.Run(tc.title, func(t *testing.T) { + var status int + var err, respErr error + if tc.respErr != nil { + respErr = tc.respErr + } + status, err = RespondErrorCommon(tc.req, tc.resp, respErr) + if status != tc.expectedStatus { + t.Fatalf("Expected (%d) status code, got (%d)", tc.expectedStatus, status) + } + if tc.expectedErr != nil { + if !strings.Contains(tc.expectedErr.Error(), err.Error()) { + t.Fatalf("Expected error to contain:\n%s\n\ngot:\n%s\n", tc.expectedErr, err) + } + } + }) + } +} diff --git a/sdk/logical/secret.go b/sdk/logical/secret.go new file mode 100644 index 0000000..e6b4d14 --- /dev/null +++ b/sdk/logical/secret.go @@ -0,0 +1,33 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logical + +import "fmt" + +// Secret represents the secret part of a response. +type Secret struct { + LeaseOptions + + // InternalData is JSON-encodable data that is stored with the secret. + // This will be sent back during a Renew/Revoke for storing internal data + // used for those operations. + InternalData map[string]interface{} `json:"internal_data" sentinel:""` + + // LeaseID is the ID returned to the user to manage this secret. + // This is generated by Vault core. Any set value will be ignored. + // For requests, this will always be blank. + LeaseID string `sentinel:""` +} + +func (s *Secret) Validate() error { + if s.TTL < 0 { + return fmt.Errorf("ttl duration must not be less than zero") + } + + return nil +} + +func (s *Secret) GoString() string { + return fmt.Sprintf("*%#v", *s) +} diff --git a/sdk/logical/storage.go b/sdk/logical/storage.go new file mode 100644 index 0000000..16ba60b --- /dev/null +++ b/sdk/logical/storage.go @@ -0,0 +1,166 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logical + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/jsonutil" +) + +// ErrReadOnly is returned when a backend does not support +// writing. This can be caused by a read-only replica or secondary +// cluster operation. +var ErrReadOnly = errors.New("cannot write to readonly storage") + +// ErrSetupReadOnly is returned when a write operation is attempted on a +// storage while the backend is still being setup. +var ErrSetupReadOnly = errors.New("cannot write to storage during setup") + +// Plugins using Paths.WriteForwardedStorage will need to use this sentinel +// in their path to write cross-cluster. See the description of that parameter +// for more information. +const PBPWFClusterSentinel = "{{clusterId}}" + +// Storage is the way that logical backends are able read/write data. +type Storage interface { + List(context.Context, string) ([]string, error) + Get(context.Context, string) (*StorageEntry, error) + Put(context.Context, *StorageEntry) error + Delete(context.Context, string) error +} + +// StorageEntry is the entry for an item in a Storage implementation. +type StorageEntry struct { + Key string + Value []byte + SealWrap bool +} + +// DecodeJSON decodes the 'Value' present in StorageEntry. +func (e *StorageEntry) DecodeJSON(out interface{}) error { + return jsonutil.DecodeJSON(e.Value, out) +} + +// StorageEntryJSON creates a StorageEntry with a JSON-encoded value. +func StorageEntryJSON(k string, v interface{}) (*StorageEntry, error) { + encodedBytes, err := jsonutil.EncodeJSON(v) + if err != nil { + return nil, errwrap.Wrapf("failed to encode storage entry: {{err}}", err) + } + + return &StorageEntry{ + Key: k, + Value: encodedBytes, + }, nil +} + +type ClearableView interface { + List(context.Context, string) ([]string, error) + Delete(context.Context, string) error +} + +// ScanView is used to scan all the keys in a view iteratively +func ScanView(ctx context.Context, view ClearableView, cb func(path string)) error { + frontier := []string{""} + for len(frontier) > 0 { + n := len(frontier) + current := frontier[n-1] + frontier = frontier[:n-1] + + // List the contents + contents, err := view.List(ctx, current) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("list failed at path %q: {{err}}", current), err) + } + + // Handle the contents in the directory + for _, c := range contents { + // Exit if the context has been canceled + if ctx.Err() != nil { + return ctx.Err() + } + fullPath := current + c + if strings.HasSuffix(c, "/") { + frontier = append(frontier, fullPath) + } else { + cb(fullPath) + } + } + } + return nil +} + +// CollectKeys is used to collect all the keys in a view +func CollectKeys(ctx context.Context, view ClearableView) ([]string, error) { + return CollectKeysWithPrefix(ctx, view, "") +} + +// CollectKeysWithPrefix is used to collect all the keys in a view with a given prefix string +func CollectKeysWithPrefix(ctx context.Context, view ClearableView, prefix string) ([]string, error) { + var keys []string + + cb := func(path string) { + if strings.HasPrefix(path, prefix) { + keys = append(keys, path) + } + } + + // Scan for all the keys + if err := ScanView(ctx, view, cb); err != nil { + return nil, err + } + return keys, nil +} + +// ClearView is used to delete all the keys in a view +func ClearView(ctx context.Context, view ClearableView) error { + return ClearViewWithLogging(ctx, view, nil) +} + +func ClearViewWithLogging(ctx context.Context, view ClearableView, logger hclog.Logger) error { + if view == nil { + return nil + } + + if logger == nil { + logger = hclog.NewNullLogger() + } + + // Collect all the keys + keys, err := CollectKeys(ctx, view) + if err != nil { + return err + } + + logger.Debug("clearing view", "total_keys", len(keys)) + + // Delete all the keys + var pctDone int + for idx, key := range keys { + // Rather than keep trying to do stuff with a canceled context, bail; + // storage will fail anyways + if ctx.Err() != nil { + return ctx.Err() + } + if err := view.Delete(ctx, key); err != nil { + return err + } + + newPctDone := idx * 100.0 / len(keys) + if int(newPctDone) > pctDone { + pctDone = int(newPctDone) + logger.Trace("view deletion progress", "percent", pctDone, "keys_deleted", idx) + } + } + + logger.Debug("view cleared") + + return nil +} diff --git a/sdk/logical/storage_inmem.go b/sdk/logical/storage_inmem.go new file mode 100644 index 0000000..62ec582 --- /dev/null +++ b/sdk/logical/storage_inmem.go @@ -0,0 +1,90 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logical + +import ( + "context" + "sync" + + "github.com/hashicorp/vault/sdk/physical" + "github.com/hashicorp/vault/sdk/physical/inmem" +) + +// InmemStorage implements Storage and stores all data in memory. It is +// basically a straight copy of physical.Inmem, but it prevents backends from +// having to load all of physical's dependencies (which are legion) just to +// have some testing storage. +type InmemStorage struct { + underlying physical.Backend + once sync.Once +} + +func (s *InmemStorage) Get(ctx context.Context, key string) (*StorageEntry, error) { + s.once.Do(s.init) + + entry, err := s.underlying.Get(ctx, key) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + return &StorageEntry{ + Key: entry.Key, + Value: entry.Value, + SealWrap: entry.SealWrap, + }, nil +} + +func (s *InmemStorage) Put(ctx context.Context, entry *StorageEntry) error { + s.once.Do(s.init) + + return s.underlying.Put(ctx, &physical.Entry{ + Key: entry.Key, + Value: entry.Value, + SealWrap: entry.SealWrap, + }) +} + +func (s *InmemStorage) Delete(ctx context.Context, key string) error { + s.once.Do(s.init) + + return s.underlying.Delete(ctx, key) +} + +func (s *InmemStorage) List(ctx context.Context, prefix string) ([]string, error) { + s.once.Do(s.init) + + return s.underlying.List(ctx, prefix) +} + +func (s *InmemStorage) Underlying() *inmem.InmemBackend { + s.once.Do(s.init) + + return s.underlying.(*inmem.InmemBackend) +} + +func (s *InmemStorage) FailPut(fail bool) *InmemStorage { + s.Underlying().FailPut(fail) + return s +} + +func (s *InmemStorage) FailGet(fail bool) *InmemStorage { + s.Underlying().FailGet(fail) + return s +} + +func (s *InmemStorage) FailDelete(fail bool) *InmemStorage { + s.Underlying().FailDelete(fail) + return s +} + +func (s *InmemStorage) FailList(fail bool) *InmemStorage { + s.Underlying().FailList(fail) + return s +} + +func (s *InmemStorage) init() { + s.underlying, _ = inmem.NewInmem(nil, nil) +} diff --git a/sdk/logical/storage_inmem_test.go b/sdk/logical/storage_inmem_test.go new file mode 100644 index 0000000..2ed776b --- /dev/null +++ b/sdk/logical/storage_inmem_test.go @@ -0,0 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logical + +import ( + "testing" +) + +func TestInmemStorage(t *testing.T) { + TestStorage(t, new(InmemStorage)) +} diff --git a/sdk/logical/storage_test.go b/sdk/logical/storage_test.go new file mode 100644 index 0000000..1d6014d --- /dev/null +++ b/sdk/logical/storage_test.go @@ -0,0 +1,104 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logical + +import ( + "context" + "testing" + + "github.com/go-test/deep" +) + +var keyList = []string{ + "a", + "b", + "d", + "foo", + "foo42", + "foo/a/b/c", + "c/d/e/f/g", +} + +func TestScanView(t *testing.T) { + s := prepKeyStorage(t) + + keys := make([]string, 0) + err := ScanView(context.Background(), s, func(path string) { + keys = append(keys, path) + }) + if err != nil { + t.Fatal(err) + } + + if diff := deep.Equal(keys, keyList); diff != nil { + t.Fatal(diff) + } +} + +func TestScanView_CancelContext(t *testing.T) { + s := prepKeyStorage(t) + + ctx, cancelCtx := context.WithCancel(context.Background()) + var i int + err := ScanView(ctx, s, func(path string) { + cancelCtx() + i++ + }) + + if err == nil { + t.Error("Want context cancel err, got none") + } + if i != 1 { + t.Errorf("Want i==1, got %d", i) + } +} + +func TestCollectKeys(t *testing.T) { + s := prepKeyStorage(t) + + keys, err := CollectKeys(context.Background(), s) + if err != nil { + t.Fatal(err) + } + + if diff := deep.Equal(keys, keyList); diff != nil { + t.Fatal(diff) + } +} + +func TestCollectKeysPrefix(t *testing.T) { + s := prepKeyStorage(t) + + keys, err := CollectKeysWithPrefix(context.Background(), s, "foo") + if err != nil { + t.Fatal(err) + } + + exp := []string{ + "foo", + "foo42", + "foo/a/b/c", + } + + if diff := deep.Equal(keys, exp); diff != nil { + t.Fatal(diff) + } +} + +func prepKeyStorage(t *testing.T) Storage { + t.Helper() + s := &InmemStorage{} + + for _, key := range keyList { + if err := s.Put(context.Background(), &StorageEntry{ + Key: key, + Value: nil, + SealWrap: false, + }); err != nil { + t.Fatal(err) + } + } + + return s +} diff --git a/sdk/logical/storage_view.go b/sdk/logical/storage_view.go new file mode 100644 index 0000000..df40dca --- /dev/null +++ b/sdk/logical/storage_view.go @@ -0,0 +1,113 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logical + +import ( + "context" + "errors" + "strings" +) + +type StorageView struct { + storage Storage + prefix string +} + +var ErrRelativePath = errors.New("relative paths not supported") + +func NewStorageView(storage Storage, prefix string) *StorageView { + return &StorageView{ + storage: storage, + prefix: prefix, + } +} + +// logical.Storage impl. +func (s *StorageView) List(ctx context.Context, prefix string) ([]string, error) { + if err := s.SanityCheck(prefix); err != nil { + return nil, err + } + return s.storage.List(ctx, s.ExpandKey(prefix)) +} + +// logical.Storage impl. +func (s *StorageView) Get(ctx context.Context, key string) (*StorageEntry, error) { + if err := s.SanityCheck(key); err != nil { + return nil, err + } + entry, err := s.storage.Get(ctx, s.ExpandKey(key)) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + entry.Key = s.TruncateKey(entry.Key) + + return &StorageEntry{ + Key: entry.Key, + Value: entry.Value, + SealWrap: entry.SealWrap, + }, nil +} + +// logical.Storage impl. +func (s *StorageView) Put(ctx context.Context, entry *StorageEntry) error { + if entry == nil { + return errors.New("cannot write nil entry") + } + + if err := s.SanityCheck(entry.Key); err != nil { + return err + } + + expandedKey := s.ExpandKey(entry.Key) + + nested := &StorageEntry{ + Key: expandedKey, + Value: entry.Value, + SealWrap: entry.SealWrap, + } + + return s.storage.Put(ctx, nested) +} + +// logical.Storage impl. +func (s *StorageView) Delete(ctx context.Context, key string) error { + if err := s.SanityCheck(key); err != nil { + return err + } + + expandedKey := s.ExpandKey(key) + + return s.storage.Delete(ctx, expandedKey) +} + +func (s *StorageView) Prefix() string { + return s.prefix +} + +// SubView constructs a nested sub-view using the given prefix +func (s *StorageView) SubView(prefix string) *StorageView { + sub := s.ExpandKey(prefix) + return &StorageView{storage: s.storage, prefix: sub} +} + +// SanityCheck is used to perform a sanity check on a key +func (s *StorageView) SanityCheck(key string) error { + if strings.Contains(key, "..") { + return ErrRelativePath + } + return nil +} + +// ExpandKey is used to expand to the full key path with the prefix +func (s *StorageView) ExpandKey(suffix string) string { + return s.prefix + suffix +} + +// TruncateKey is used to remove the prefix of the key +func (s *StorageView) TruncateKey(full string) string { + return strings.TrimPrefix(full, s.prefix) +} diff --git a/sdk/logical/system_view.go b/sdk/logical/system_view.go new file mode 100644 index 0000000..a4ec648 --- /dev/null +++ b/sdk/logical/system_view.go @@ -0,0 +1,264 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logical + +import ( + "context" + "errors" + "fmt" + "io" + "time" + + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/license" + "github.com/hashicorp/vault/sdk/helper/pluginutil" + "github.com/hashicorp/vault/sdk/helper/wrapping" +) + +// SystemView exposes system configuration information in a safe way +// for logical backends to consume +type SystemView interface { + // DefaultLeaseTTL returns the default lease TTL set in Vault configuration + DefaultLeaseTTL() time.Duration + + // MaxLeaseTTL returns the max lease TTL set in Vault configuration; backend + // authors should take care not to issue credentials that last longer than + // this value, as Vault will revoke them + MaxLeaseTTL() time.Duration + + // Returns true if the mount is tainted. A mount is tainted if it is in the + // process of being unmounted. This should only be used in special + // circumstances; a primary use-case is as a guard in revocation functions. + // If revocation of a backend's leases fails it can keep the unmounting + // process from being successful. If the reason for this failure is not + // relevant when the mount is tainted (for instance, saving a CRL to disk + // when the stored CRL will be removed during the unmounting process + // anyways), we can ignore the errors to allow unmounting to complete. + Tainted() bool + + // Returns true if caching is disabled. If true, no caches should be used, + // despite known slowdowns. + CachingDisabled() bool + + // When run from a system view attached to a request, indicates whether the + // request is affecting a local mount or not + LocalMount() bool + + // ReplicationState indicates the state of cluster replication + ReplicationState() consts.ReplicationState + + // HasFeature returns true if the feature is currently enabled + HasFeature(feature license.Features) bool + + // ResponseWrapData wraps the given data in a cubbyhole and returns the + // token used to unwrap. + ResponseWrapData(ctx context.Context, data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) + + // LookupPlugin looks into the plugin catalog for a plugin with the given + // name. Returns a PluginRunner or an error if a plugin can not be found. + LookupPlugin(ctx context.Context, pluginName string, pluginType consts.PluginType) (*pluginutil.PluginRunner, error) + + // LookupPluginVersion looks into the plugin catalog for a plugin with the given + // name and version. Returns a PluginRunner or an error if a plugin can not be found. + LookupPluginVersion(ctx context.Context, pluginName string, pluginType consts.PluginType, version string) (*pluginutil.PluginRunner, error) + + // ListVersionedPlugins returns information about all plugins of a certain + // type in the catalog, including any versioning information stored for them. + ListVersionedPlugins(ctx context.Context, pluginType consts.PluginType) ([]pluginutil.VersionedPlugin, error) + + // NewPluginClient returns a client for managing the lifecycle of plugin + // processes + NewPluginClient(ctx context.Context, config pluginutil.PluginClientConfig) (pluginutil.PluginClient, error) + + // MlockEnabled returns the configuration setting for enabling mlock on + // plugins. + MlockEnabled() bool + + // EntityInfo returns a subset of information related to the identity entity + // for the given entity id + EntityInfo(entityID string) (*Entity, error) + + // GroupsForEntity returns the group membership information for the provided + // entity id + GroupsForEntity(entityID string) ([]*Group, error) + + // PluginEnv returns Vault environment information used by plugins + PluginEnv(context.Context) (*PluginEnvironment, error) + + // VaultVersion returns the version string for the currently running Vault. + VaultVersion(context.Context) (string, error) + + // GeneratePasswordFromPolicy generates a password from the policy referenced. + // If the policy does not exist, this will return an error. + GeneratePasswordFromPolicy(ctx context.Context, policyName string) (password string, err error) + + // ClusterID returns the replication ClusterID, for use with path-based + // write forwarding (WriteForwardedPaths). This value will be templated + // in for the {{cluterId}} sentinel. + ClusterID(ctx context.Context) (string, error) +} + +type PasswordPolicy interface { + // Generate a random password + Generate(context.Context, io.Reader) (string, error) +} + +type ExtendedSystemView interface { + Auditor() Auditor + ForwardGenericRequest(context.Context, *Request) (*Response, error) + + // APILockShouldBlockRequest returns whether a namespace for the requested + // mount is locked and should be blocked + APILockShouldBlockRequest() (bool, error) +} + +type PasswordGenerator func() (password string, err error) + +type StaticSystemView struct { + DefaultLeaseTTLVal time.Duration + MaxLeaseTTLVal time.Duration + SudoPrivilegeVal bool + TaintedVal bool + CachingDisabledVal bool + Primary bool + EnableMlock bool + LocalMountVal bool + ReplicationStateVal consts.ReplicationState + EntityVal *Entity + GroupsVal []*Group + Features license.Features + PluginEnvironment *PluginEnvironment + PasswordPolicies map[string]PasswordGenerator + VersionString string + ClusterUUID string + APILockShouldBlockRequestVal bool +} + +type noopAuditor struct{} + +func (a noopAuditor) AuditRequest(ctx context.Context, input *LogInput) error { + return nil +} + +func (a noopAuditor) AuditResponse(ctx context.Context, input *LogInput) error { + return nil +} + +func (d StaticSystemView) Auditor() Auditor { + return noopAuditor{} +} + +func (d StaticSystemView) ForwardGenericRequest(ctx context.Context, req *Request) (*Response, error) { + return nil, errors.New("ForwardGenericRequest is not implemented in StaticSystemView") +} + +func (d StaticSystemView) DefaultLeaseTTL() time.Duration { + return d.DefaultLeaseTTLVal +} + +func (d StaticSystemView) MaxLeaseTTL() time.Duration { + return d.MaxLeaseTTLVal +} + +func (d StaticSystemView) SudoPrivilege(_ context.Context, path string, token string) bool { + return d.SudoPrivilegeVal +} + +func (d StaticSystemView) Tainted() bool { + return d.TaintedVal +} + +func (d StaticSystemView) CachingDisabled() bool { + return d.CachingDisabledVal +} + +func (d StaticSystemView) LocalMount() bool { + return d.LocalMountVal +} + +func (d StaticSystemView) ReplicationState() consts.ReplicationState { + return d.ReplicationStateVal +} + +func (d StaticSystemView) NewPluginClient(ctx context.Context, config pluginutil.PluginClientConfig) (pluginutil.PluginClient, error) { + return nil, errors.New("NewPluginClient is not implemented in StaticSystemView") +} + +func (d StaticSystemView) ResponseWrapData(_ context.Context, data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) { + return nil, errors.New("ResponseWrapData is not implemented in StaticSystemView") +} + +func (d StaticSystemView) LookupPlugin(_ context.Context, _ string, _ consts.PluginType) (*pluginutil.PluginRunner, error) { + return nil, errors.New("LookupPlugin is not implemented in StaticSystemView") +} + +func (d StaticSystemView) LookupPluginVersion(_ context.Context, _ string, _ consts.PluginType, _ string) (*pluginutil.PluginRunner, error) { + return nil, errors.New("LookupPluginVersion is not implemented in StaticSystemView") +} + +func (d StaticSystemView) ListVersionedPlugins(_ context.Context, _ consts.PluginType) ([]pluginutil.VersionedPlugin, error) { + return nil, errors.New("ListVersionedPlugins is not implemented in StaticSystemView") +} + +func (d StaticSystemView) MlockEnabled() bool { + return d.EnableMlock +} + +func (d StaticSystemView) EntityInfo(entityID string) (*Entity, error) { + return d.EntityVal, nil +} + +func (d StaticSystemView) GroupsForEntity(entityID string) ([]*Group, error) { + return d.GroupsVal, nil +} + +func (d StaticSystemView) HasFeature(feature license.Features) bool { + return d.Features.HasFeature(feature) +} + +func (d StaticSystemView) PluginEnv(_ context.Context) (*PluginEnvironment, error) { + return d.PluginEnvironment, nil +} + +func (d StaticSystemView) VaultVersion(_ context.Context) (string, error) { + return d.VersionString, nil +} + +func (d StaticSystemView) GeneratePasswordFromPolicy(ctx context.Context, policyName string) (password string, err error) { + select { + case <-ctx.Done(): + return "", fmt.Errorf("context timed out") + default: + } + + if d.PasswordPolicies == nil { + return "", fmt.Errorf("password policy not found") + } + policy, exists := d.PasswordPolicies[policyName] + if !exists { + return "", fmt.Errorf("password policy not found") + } + return policy() +} + +func (d *StaticSystemView) SetPasswordPolicy(name string, generator PasswordGenerator) { + if d.PasswordPolicies == nil { + d.PasswordPolicies = map[string]PasswordGenerator{} + } + d.PasswordPolicies[name] = generator +} + +func (d *StaticSystemView) DeletePasswordPolicy(name string) (existed bool) { + _, existed = d.PasswordPolicies[name] + delete(d.PasswordPolicies, name) + return existed +} + +func (d StaticSystemView) ClusterID(ctx context.Context) (string, error) { + return d.ClusterUUID, nil +} + +func (d StaticSystemView) APILockShouldBlockRequest() (bool, error) { + return d.APILockShouldBlockRequestVal, nil +} diff --git a/sdk/logical/testing.go b/sdk/logical/testing.go new file mode 100644 index 0000000..a173c7c --- /dev/null +++ b/sdk/logical/testing.go @@ -0,0 +1,91 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logical + +import ( + "context" + "reflect" + "time" + + testing "github.com/mitchellh/go-testing-interface" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/logging" +) + +// TestRequest is a helper to create a purely in-memory Request struct. +func TestRequest(t testing.T, op Operation, path string) *Request { + return &Request{ + Operation: op, + Path: path, + Data: make(map[string]interface{}), + Storage: new(InmemStorage), + Connection: &Connection{}, + } +} + +// TestStorage is a helper that can be used from unit tests to verify +// the behavior of a Storage impl. +func TestStorage(t testing.T, s Storage) { + keys, err := s.List(context.Background(), "") + if err != nil { + t.Fatalf("list error: %s", err) + } + if len(keys) > 0 { + t.Fatalf("should have no keys to start: %#v", keys) + } + + entry := &StorageEntry{Key: "foo", Value: []byte("bar")} + if err := s.Put(context.Background(), entry); err != nil { + t.Fatalf("put error: %s", err) + } + + actual, err := s.Get(context.Background(), "foo") + if err != nil { + t.Fatalf("get error: %s", err) + } + if !reflect.DeepEqual(actual, entry) { + t.Fatalf("wrong value. Expected: %#v\nGot: %#v", entry, actual) + } + + keys, err = s.List(context.Background(), "") + if err != nil { + t.Fatalf("list error: %s", err) + } + if !reflect.DeepEqual(keys, []string{"foo"}) { + t.Fatalf("bad keys: %#v", keys) + } + + if err := s.Delete(context.Background(), "foo"); err != nil { + t.Fatalf("put error: %s", err) + } + + keys, err = s.List(context.Background(), "") + if err != nil { + t.Fatalf("list error: %s", err) + } + if len(keys) > 0 { + t.Fatalf("should have no keys to start: %#v", keys) + } +} + +func TestSystemView() *StaticSystemView { + defaultLeaseTTLVal := time.Hour * 24 + maxLeaseTTLVal := time.Hour * 24 * 2 + return &StaticSystemView{ + DefaultLeaseTTLVal: defaultLeaseTTLVal, + MaxLeaseTTLVal: maxLeaseTTLVal, + VersionString: "testVersionString", + } +} + +func TestBackendConfig() *BackendConfig { + bc := &BackendConfig{ + Logger: logging.NewVaultLogger(log.Trace), + System: TestSystemView(), + Config: make(map[string]string), + } + + return bc +} diff --git a/sdk/logical/token.go b/sdk/logical/token.go new file mode 100644 index 0000000..a27a73a --- /dev/null +++ b/sdk/logical/token.go @@ -0,0 +1,307 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logical + +import ( + "crypto/sha256" + "encoding/base64" + "fmt" + "sort" + "strings" + "time" + + sockaddr "github.com/hashicorp/go-sockaddr" +) + +type TokenType uint8 + +const ( + // TokenTypeDefault means "use the default, if any, that is currently set + // on the mount". If not set, results in a Service token. + TokenTypeDefault TokenType = iota + + // TokenTypeService is a "normal" Vault token for long-lived services + TokenTypeService + + // TokenTypeBatch is a batch token + TokenTypeBatch + + // TokenTypeDefaultService configured on a mount, means that if + // TokenTypeDefault is sent back by the mount, create Service tokens + TokenTypeDefaultService + + // TokenTypeDefaultBatch configured on a mount, means that if + // TokenTypeDefault is sent back by the mount, create Batch tokens + TokenTypeDefaultBatch + + // ClientIDTWEDelimiter Delimiter between the string fields used to generate a client + // ID for tokens without entities. This is the 0 character, which + // is a non-printable string. Please see unicode.IsPrint for details. + ClientIDTWEDelimiter = rune('\x00') + + // SortedPoliciesTWEDelimiter Delimiter between each policy in the sorted policies used to + // generate a client ID for tokens without entities. This is the 127 + // character, which is a non-printable string. Please see unicode.IsPrint + // for details. + SortedPoliciesTWEDelimiter = rune('\x7F') +) + +func (t *TokenType) UnmarshalJSON(b []byte) error { + if len(b) == 1 { + *t = TokenType(b[0] - '0') + return nil + } + + // Handle upgrade from pre-1.2 where we were serialized as string: + s := string(b) + switch s { + case `"default"`, `""`: + *t = TokenTypeDefault + case `"service"`: + *t = TokenTypeService + case `"batch"`: + *t = TokenTypeBatch + case `"default-service"`: + *t = TokenTypeDefaultService + case `"default-batch"`: + *t = TokenTypeDefaultBatch + default: + return fmt.Errorf("unknown token type %q", s) + } + return nil +} + +func (t TokenType) String() string { + switch t { + case TokenTypeDefault: + return "default" + case TokenTypeService: + return "service" + case TokenTypeBatch: + return "batch" + case TokenTypeDefaultService: + return "default-service" + case TokenTypeDefaultBatch: + return "default-batch" + default: + panic("unreachable") + } +} + +// TokenEntry is used to represent a given token +type TokenEntry struct { + Type TokenType `json:"type" mapstructure:"type" structs:"type" sentinel:""` + + // ID of this entry, generally a random UUID + ID string `json:"id" mapstructure:"id" structs:"id" sentinel:""` + + // ExternalID is the ID of a newly created service + // token that will be returned to a user + ExternalID string `json:"-"` + + // Accessor for this token, a random UUID + Accessor string `json:"accessor" mapstructure:"accessor" structs:"accessor" sentinel:""` + + // Parent token, used for revocation trees + Parent string `json:"parent" mapstructure:"parent" structs:"parent" sentinel:""` + + // Which named policies should be used + Policies []string `json:"policies" mapstructure:"policies" structs:"policies"` + + // InlinePolicy specifies ACL rules to be applied to this token entry. + InlinePolicy string `json:"inline_policy" mapstructure:"inline_policy" structs:"inline_policy"` + + // Used for audit trails, this is something like "auth/user/login" + Path string `json:"path" mapstructure:"path" structs:"path"` + + // Used for auditing. This could include things like "source", "user", "ip" + Meta map[string]string `json:"meta" mapstructure:"meta" structs:"meta" sentinel:"meta"` + + // InternalMeta is used to store internal metadata. This metadata will not be audit logged or returned from lookup APIs. + InternalMeta map[string]string `json:"internal_meta" mapstructure:"internal_meta" structs:"internal_meta"` + + // Used for operators to be able to associate with the source + DisplayName string `json:"display_name" mapstructure:"display_name" structs:"display_name"` + + // Used to restrict the number of uses (zero is unlimited). This is to + // support one-time-tokens (generalized). There are a few special values: + // if it's -1 it has run through its use counts and is executing its final + // use; if it's -2 it is tainted, which means revocation is currently + // running on it; and if it's -3 it's also tainted but revocation + // previously ran and failed, so this hints the tidy function to try it + // again. + NumUses int `json:"num_uses" mapstructure:"num_uses" structs:"num_uses"` + + // Time of token creation + CreationTime int64 `json:"creation_time" mapstructure:"creation_time" structs:"creation_time" sentinel:""` + + // Duration set when token was created + TTL time.Duration `json:"ttl" mapstructure:"ttl" structs:"ttl" sentinel:""` + + // Explicit maximum TTL on the token + ExplicitMaxTTL time.Duration `json:"explicit_max_ttl" mapstructure:"explicit_max_ttl" structs:"explicit_max_ttl" sentinel:""` + + // If set, the role that was used for parameters at creation time + Role string `json:"role" mapstructure:"role" structs:"role"` + + // If set, the period of the token. This is only used when created directly + // through the create endpoint; periods managed by roles or other auth + // backends are subject to those renewal rules. + Period time.Duration `json:"period" mapstructure:"period" structs:"period" sentinel:""` + + // These are the deprecated fields + DisplayNameDeprecated string `json:"DisplayName" mapstructure:"DisplayName" structs:"DisplayName" sentinel:""` + NumUsesDeprecated int `json:"NumUses" mapstructure:"NumUses" structs:"NumUses" sentinel:""` + CreationTimeDeprecated int64 `json:"CreationTime" mapstructure:"CreationTime" structs:"CreationTime" sentinel:""` + ExplicitMaxTTLDeprecated time.Duration `json:"ExplicitMaxTTL" mapstructure:"ExplicitMaxTTL" structs:"ExplicitMaxTTL" sentinel:""` + + // EntityID is the ID of the entity associated with this token. + EntityID string `json:"entity_id" mapstructure:"entity_id" structs:"entity_id"` + + // If NoIdentityPolicies is true, the token will not inherit + // identity policies from the associated EntityID. + NoIdentityPolicies bool `json:"no_identity_policies" mapstructure:"no_identity_policies" structs:"no_identity_policies"` + + // The set of CIDRs that this token can be used with + BoundCIDRs []*sockaddr.SockAddrMarshaler `json:"bound_cidrs" sentinel:""` + + // NamespaceID is the identifier of the namespace to which this token is + // confined to. Do not return this value over the API when the token is + // being looked up. + NamespaceID string `json:"namespace_id" mapstructure:"namespace_id" structs:"namespace_id" sentinel:""` + + // CubbyholeID is the identifier of the cubbyhole storage belonging to this + // token + CubbyholeID string `json:"cubbyhole_id" mapstructure:"cubbyhole_id" structs:"cubbyhole_id" sentinel:""` +} + +// CreateClientID returns the client ID, and a boolean which is false if the clientID +// has an entity, and true otherwise +func (te *TokenEntry) CreateClientID() (string, bool) { + var clientIDInputBuilder strings.Builder + + // if entry has an associated entity ID, return it + if te.EntityID != "" { + return te.EntityID, false + } + + // The entry is associated with a TWE (token without entity). In this case + // we must create a client ID by calculating the following formula: + // clientID = SHA256(sorted policies + namespace) + + // Step 1: Copy entry policies to a new struct + sortedPolicies := make([]string, len(te.Policies)) + copy(sortedPolicies, te.Policies) + + // Step 2: Sort and join copied policies + sort.Strings(sortedPolicies) + for _, pol := range sortedPolicies { + clientIDInputBuilder.WriteRune(SortedPoliciesTWEDelimiter) + clientIDInputBuilder.WriteString(pol) + } + + // Step 3: Add namespace ID + clientIDInputBuilder.WriteRune(ClientIDTWEDelimiter) + clientIDInputBuilder.WriteString(te.NamespaceID) + + if clientIDInputBuilder.Len() == 0 { + return "", true + } + // Step 4: Remove the first character in the string, as it's an unnecessary delimiter + clientIDInput := clientIDInputBuilder.String()[1:] + + // Step 5: Hash the sum + hashed := sha256.Sum256([]byte(clientIDInput)) + return base64.StdEncoding.EncodeToString(hashed[:]), true +} + +func (te *TokenEntry) SentinelGet(key string) (interface{}, error) { + if te == nil { + return nil, nil + } + switch key { + case "policies": + return te.Policies, nil + + case "path": + return te.Path, nil + + case "display_name": + return te.DisplayName, nil + + case "num_uses": + return te.NumUses, nil + + case "role": + return te.Role, nil + + case "entity_id": + return te.EntityID, nil + + case "period": + return te.Period, nil + + case "period_seconds": + return int64(te.Period.Seconds()), nil + + case "explicit_max_ttl": + return te.ExplicitMaxTTL, nil + + case "explicit_max_ttl_seconds": + return int64(te.ExplicitMaxTTL.Seconds()), nil + + case "creation_ttl": + return te.TTL, nil + + case "creation_ttl_seconds": + return int64(te.TTL.Seconds()), nil + + case "creation_time": + return time.Unix(te.CreationTime, 0).Format(time.RFC3339Nano), nil + + case "creation_time_unix": + return time.Unix(te.CreationTime, 0), nil + + case "meta", "metadata": + return te.Meta, nil + + case "type": + teType := te.Type + switch teType { + case TokenTypeBatch, TokenTypeService: + case TokenTypeDefault: + teType = TokenTypeService + default: + return "unknown", nil + } + return teType.String(), nil + } + + return nil, nil +} + +func (te *TokenEntry) SentinelKeys() []string { + return []string{ + "period", + "period_seconds", + "explicit_max_ttl", + "explicit_max_ttl_seconds", + "creation_ttl", + "creation_ttl_seconds", + "creation_time", + "creation_time_unix", + "meta", + "metadata", + "type", + } +} + +// IsRoot returns false if the token is not root (or doesn't exist) +func (te *TokenEntry) IsRoot() bool { + if te == nil { + return false + } + + return len(te.Policies) == 1 && te.Policies[0] == "root" +} diff --git a/sdk/logical/token_test.go b/sdk/logical/token_test.go new file mode 100644 index 0000000..641d688 --- /dev/null +++ b/sdk/logical/token_test.go @@ -0,0 +1,106 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logical + +import ( + "crypto/sha256" + "encoding/base64" + "encoding/json" + "testing" +) + +func TestJSONSerialization(t *testing.T) { + tt := TokenTypeDefaultBatch + s, err := json.Marshal(tt) + if err != nil { + t.Fatal(err) + } + + var utt TokenType + err = json.Unmarshal(s, &utt) + if err != nil { + t.Fatal(err) + } + + if tt != utt { + t.Fatalf("expected %v, got %v", tt, utt) + } + + utt = TokenTypeDefault + err = json.Unmarshal([]byte(`"default-batch"`), &utt) + if err != nil { + t.Fatal(err) + } + if tt != utt { + t.Fatalf("expected %v, got %v", tt, utt) + } + + // Test on an empty value, which should unmarshal into TokenTypeDefault + tt = TokenTypeDefault + err = json.Unmarshal([]byte(`""`), &utt) + if err != nil { + t.Fatal(err) + } + if tt != utt { + t.Fatalf("expected %v, got %v", tt, utt) + } +} + +// TestCreateClientID verifies that CreateClientID uses the entity ID for a token +// entry if one exists, and creates an appropriate client ID otherwise. +func TestCreateClientID(t *testing.T) { + entry := TokenEntry{NamespaceID: "namespaceFoo", Policies: []string{"bar", "baz", "foo", "banana"}} + id, isTWE := entry.CreateClientID() + if !isTWE { + t.Fatalf("TWE token should return true value in isTWE bool") + } + expectedIDPlaintext := "banana" + string(SortedPoliciesTWEDelimiter) + "bar" + + string(SortedPoliciesTWEDelimiter) + "baz" + + string(SortedPoliciesTWEDelimiter) + "foo" + string(ClientIDTWEDelimiter) + "namespaceFoo" + + hashed := sha256.Sum256([]byte(expectedIDPlaintext)) + expectedID := base64.StdEncoding.EncodeToString(hashed[:]) + if expectedID != id { + t.Fatalf("wrong ID: expected %s, found %s", expectedID, id) + } + // Test with entityID + entry = TokenEntry{EntityID: "entityFoo", NamespaceID: "namespaceFoo", Policies: []string{"bar", "baz", "foo", "banana"}} + id, isTWE = entry.CreateClientID() + if isTWE { + t.Fatalf("token with entity should return false value in isTWE bool") + } + if id != "entityFoo" { + t.Fatalf("client ID should be entity ID") + } + + // Test without namespace + entry = TokenEntry{Policies: []string{"bar", "baz", "foo", "banana"}} + id, isTWE = entry.CreateClientID() + if !isTWE { + t.Fatalf("TWE token should return true value in isTWE bool") + } + expectedIDPlaintext = "banana" + string(SortedPoliciesTWEDelimiter) + "bar" + + string(SortedPoliciesTWEDelimiter) + "baz" + + string(SortedPoliciesTWEDelimiter) + "foo" + string(ClientIDTWEDelimiter) + + hashed = sha256.Sum256([]byte(expectedIDPlaintext)) + expectedID = base64.StdEncoding.EncodeToString(hashed[:]) + if expectedID != id { + t.Fatalf("wrong ID: expected %s, found %s", expectedID, id) + } + + // Test without policies + entry = TokenEntry{NamespaceID: "namespaceFoo"} + id, isTWE = entry.CreateClientID() + if !isTWE { + t.Fatalf("TWE token should return true value in isTWE bool") + } + expectedIDPlaintext = "namespaceFoo" + + hashed = sha256.Sum256([]byte(expectedIDPlaintext)) + expectedID = base64.StdEncoding.EncodeToString(hashed[:]) + if expectedID != id { + t.Fatalf("wrong ID: expected %s, found %s", expectedID, id) + } +} diff --git a/sdk/logical/translate_response.go b/sdk/logical/translate_response.go new file mode 100644 index 0000000..ef5ba5f --- /dev/null +++ b/sdk/logical/translate_response.go @@ -0,0 +1,164 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logical + +import ( + "bytes" + "encoding/json" + "fmt" + "time" +) + +// This logic was pulled from the http package so that it can be used for +// encoding wrapped responses as well. It simply translates the logical +// response to an http response, with the values we want and omitting the +// values we don't. +func LogicalResponseToHTTPResponse(input *Response) *HTTPResponse { + httpResp := &HTTPResponse{ + Data: input.Data, + Warnings: input.Warnings, + Headers: input.Headers, + } + + if input.Secret != nil { + httpResp.LeaseID = input.Secret.LeaseID + httpResp.Renewable = input.Secret.Renewable + httpResp.LeaseDuration = int(input.Secret.TTL.Seconds()) + } + + // If we have authentication information, then + // set up the result structure. + if input.Auth != nil { + httpResp.Auth = &HTTPAuth{ + ClientToken: input.Auth.ClientToken, + Accessor: input.Auth.Accessor, + Policies: input.Auth.Policies, + TokenPolicies: input.Auth.TokenPolicies, + IdentityPolicies: input.Auth.IdentityPolicies, + Metadata: input.Auth.Metadata, + LeaseDuration: int(input.Auth.TTL.Seconds()), + Renewable: input.Auth.Renewable, + EntityID: input.Auth.EntityID, + TokenType: input.Auth.TokenType.String(), + Orphan: input.Auth.Orphan, + MFARequirement: input.Auth.MFARequirement, + NumUses: input.Auth.NumUses, + } + } + + return httpResp +} + +func HTTPResponseToLogicalResponse(input *HTTPResponse) *Response { + logicalResp := &Response{ + Data: input.Data, + Warnings: input.Warnings, + Headers: input.Headers, + } + + if input.LeaseID != "" { + logicalResp.Secret = &Secret{ + LeaseID: input.LeaseID, + } + logicalResp.Secret.Renewable = input.Renewable + logicalResp.Secret.TTL = time.Second * time.Duration(input.LeaseDuration) + } + + if input.Auth != nil { + logicalResp.Auth = &Auth{ + ClientToken: input.Auth.ClientToken, + Accessor: input.Auth.Accessor, + Policies: input.Auth.Policies, + TokenPolicies: input.Auth.TokenPolicies, + IdentityPolicies: input.Auth.IdentityPolicies, + Metadata: input.Auth.Metadata, + EntityID: input.Auth.EntityID, + Orphan: input.Auth.Orphan, + } + logicalResp.Auth.Renewable = input.Auth.Renewable + logicalResp.Auth.TTL = time.Second * time.Duration(input.Auth.LeaseDuration) + switch input.Auth.TokenType { + case "service": + logicalResp.Auth.TokenType = TokenTypeService + case "batch": + logicalResp.Auth.TokenType = TokenTypeBatch + } + } + + return logicalResp +} + +type HTTPResponse struct { + RequestID string `json:"request_id"` + LeaseID string `json:"lease_id"` + Renewable bool `json:"renewable"` + LeaseDuration int `json:"lease_duration"` + Data map[string]interface{} `json:"data"` + WrapInfo *HTTPWrapInfo `json:"wrap_info"` + Warnings []string `json:"warnings"` + Headers map[string][]string `json:"-"` + Auth *HTTPAuth `json:"auth"` +} + +type HTTPAuth struct { + ClientToken string `json:"client_token"` + Accessor string `json:"accessor"` + Policies []string `json:"policies"` + TokenPolicies []string `json:"token_policies,omitempty"` + IdentityPolicies []string `json:"identity_policies,omitempty"` + Metadata map[string]string `json:"metadata"` + LeaseDuration int `json:"lease_duration"` + Renewable bool `json:"renewable"` + EntityID string `json:"entity_id"` + TokenType string `json:"token_type"` + Orphan bool `json:"orphan"` + MFARequirement *MFARequirement `json:"mfa_requirement"` + NumUses int `json:"num_uses"` +} + +type HTTPWrapInfo struct { + Token string `json:"token"` + Accessor string `json:"accessor"` + TTL int `json:"ttl"` + CreationTime string `json:"creation_time"` + CreationPath string `json:"creation_path"` + WrappedAccessor string `json:"wrapped_accessor,omitempty"` +} + +type HTTPSysInjector struct { + Response *HTTPResponse +} + +func (h HTTPSysInjector) MarshalJSON() ([]byte, error) { + j, err := json.Marshal(h.Response) + if err != nil { + return nil, err + } + // Fast path no data or empty data + if h.Response.Data == nil || len(h.Response.Data) == 0 { + return j, nil + } + // Marshaling a response will always be a JSON object, meaning it will + // always start with '{', so we hijack this to prepend necessary values + + var buf bytes.Buffer + buf.WriteRune('{') + for k, v := range h.Response.Data { + // Marshal each key/value individually + mk, err := json.Marshal(k) + if err != nil { + return nil, err + } + mv, err := json.Marshal(v) + if err != nil { + return nil, err + } + // Write into the final buffer. We'll never have a valid response + // without any fields so we can unconditionally add a comma after each. + buf.WriteString(fmt.Sprintf("%s: %s, ", mk, mv)) + } + // Add the rest, without the first '{' + buf.Write(j[1:]) + return buf.Bytes(), nil +} diff --git a/sdk/logical/version.pb.go b/sdk/logical/version.pb.go new file mode 100644 index 0000000..9962824 --- /dev/null +++ b/sdk/logical/version.pb.go @@ -0,0 +1,207 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.12 +// source: sdk/logical/version.proto + +package logical + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Empty struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Empty) Reset() { + *x = Empty{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_logical_version_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Empty) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Empty) ProtoMessage() {} + +func (x *Empty) ProtoReflect() protoreflect.Message { + mi := &file_sdk_logical_version_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Empty.ProtoReflect.Descriptor instead. +func (*Empty) Descriptor() ([]byte, []int) { + return file_sdk_logical_version_proto_rawDescGZIP(), []int{0} +} + +// VersionReply is the reply for the Version method. +type VersionReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PluginVersion string `protobuf:"bytes,1,opt,name=plugin_version,json=pluginVersion,proto3" json:"plugin_version,omitempty"` +} + +func (x *VersionReply) Reset() { + *x = VersionReply{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_logical_version_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VersionReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VersionReply) ProtoMessage() {} + +func (x *VersionReply) ProtoReflect() protoreflect.Message { + mi := &file_sdk_logical_version_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VersionReply.ProtoReflect.Descriptor instead. +func (*VersionReply) Descriptor() ([]byte, []int) { + return file_sdk_logical_version_proto_rawDescGZIP(), []int{1} +} + +func (x *VersionReply) GetPluginVersion() string { + if x != nil { + return x.PluginVersion + } + return "" +} + +var File_sdk_logical_version_proto protoreflect.FileDescriptor + +var file_sdk_logical_version_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x6c, 0x6f, 0x67, + 0x69, 0x63, 0x61, 0x6c, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x35, 0x0a, + 0x0c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x25, 0x0a, + 0x0e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x32, 0x41, 0x0a, 0x0d, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x1a, 0x15, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x42, 0x28, 0x5a, 0x26, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, + 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, + 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_sdk_logical_version_proto_rawDescOnce sync.Once + file_sdk_logical_version_proto_rawDescData = file_sdk_logical_version_proto_rawDesc +) + +func file_sdk_logical_version_proto_rawDescGZIP() []byte { + file_sdk_logical_version_proto_rawDescOnce.Do(func() { + file_sdk_logical_version_proto_rawDescData = protoimpl.X.CompressGZIP(file_sdk_logical_version_proto_rawDescData) + }) + return file_sdk_logical_version_proto_rawDescData +} + +var file_sdk_logical_version_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_sdk_logical_version_proto_goTypes = []interface{}{ + (*Empty)(nil), // 0: logical.Empty + (*VersionReply)(nil), // 1: logical.VersionReply +} +var file_sdk_logical_version_proto_depIdxs = []int32{ + 0, // 0: logical.PluginVersion.Version:input_type -> logical.Empty + 1, // 1: logical.PluginVersion.Version:output_type -> logical.VersionReply + 1, // [1:2] is the sub-list for method output_type + 0, // [0:1] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_sdk_logical_version_proto_init() } +func file_sdk_logical_version_proto_init() { + if File_sdk_logical_version_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_sdk_logical_version_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Empty); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_logical_version_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VersionReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_sdk_logical_version_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_sdk_logical_version_proto_goTypes, + DependencyIndexes: file_sdk_logical_version_proto_depIdxs, + MessageInfos: file_sdk_logical_version_proto_msgTypes, + }.Build() + File_sdk_logical_version_proto = out.File + file_sdk_logical_version_proto_rawDesc = nil + file_sdk_logical_version_proto_goTypes = nil + file_sdk_logical_version_proto_depIdxs = nil +} diff --git a/sdk/logical/version.proto b/sdk/logical/version.proto new file mode 100644 index 0000000..860ddc5 --- /dev/null +++ b/sdk/logical/version.proto @@ -0,0 +1,20 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +syntax = "proto3"; +package logical; + +option go_package = "github.com/hashicorp/vault/sdk/logical"; + +message Empty {} + +// VersionReply is the reply for the Version method. +message VersionReply { + string plugin_version = 1; +} + +// PluginVersion is an optional RPC service implemented by plugins. +service PluginVersion { + // Version returns version information for the plugin. + rpc Version(Empty) returns (VersionReply); +} \ No newline at end of file diff --git a/sdk/logical/version_grpc.pb.go b/sdk/logical/version_grpc.pb.go new file mode 100644 index 0000000..a69e970 --- /dev/null +++ b/sdk/logical/version_grpc.pb.go @@ -0,0 +1,103 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package logical + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// PluginVersionClient is the client API for PluginVersion service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type PluginVersionClient interface { + // Version returns version information for the plugin. + Version(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*VersionReply, error) +} + +type pluginVersionClient struct { + cc grpc.ClientConnInterface +} + +func NewPluginVersionClient(cc grpc.ClientConnInterface) PluginVersionClient { + return &pluginVersionClient{cc} +} + +func (c *pluginVersionClient) Version(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*VersionReply, error) { + out := new(VersionReply) + err := c.cc.Invoke(ctx, "/logical.PluginVersion/Version", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// PluginVersionServer is the server API for PluginVersion service. +// All implementations must embed UnimplementedPluginVersionServer +// for forward compatibility +type PluginVersionServer interface { + // Version returns version information for the plugin. + Version(context.Context, *Empty) (*VersionReply, error) + mustEmbedUnimplementedPluginVersionServer() +} + +// UnimplementedPluginVersionServer must be embedded to have forward compatible implementations. +type UnimplementedPluginVersionServer struct { +} + +func (UnimplementedPluginVersionServer) Version(context.Context, *Empty) (*VersionReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method Version not implemented") +} +func (UnimplementedPluginVersionServer) mustEmbedUnimplementedPluginVersionServer() {} + +// UnsafePluginVersionServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to PluginVersionServer will +// result in compilation errors. +type UnsafePluginVersionServer interface { + mustEmbedUnimplementedPluginVersionServer() +} + +func RegisterPluginVersionServer(s grpc.ServiceRegistrar, srv PluginVersionServer) { + s.RegisterService(&PluginVersion_ServiceDesc, srv) +} + +func _PluginVersion_Version_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PluginVersionServer).Version(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/logical.PluginVersion/Version", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PluginVersionServer).Version(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +// PluginVersion_ServiceDesc is the grpc.ServiceDesc for PluginVersion service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var PluginVersion_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "logical.PluginVersion", + HandlerType: (*PluginVersionServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Version", + Handler: _PluginVersion_Version_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "sdk/logical/version.proto", +} diff --git a/sdk/physical/cache.go b/sdk/physical/cache.go new file mode 100644 index 0000000..874d6c5 --- /dev/null +++ b/sdk/physical/cache.go @@ -0,0 +1,273 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package physical + +import ( + "context" + "sync/atomic" + + metrics "github.com/armon/go-metrics" + log "github.com/hashicorp/go-hclog" + lru "github.com/hashicorp/golang-lru" + "github.com/hashicorp/vault/sdk/helper/locksutil" + "github.com/hashicorp/vault/sdk/helper/pathmanager" +) + +const ( + // DefaultCacheSize is used if no cache size is specified for NewCache + DefaultCacheSize = 128 * 1024 + + // refreshCacheCtxKey is a ctx value that denotes the cache should be + // refreshed during a Get call. + refreshCacheCtxKey = "refresh_cache" +) + +// These paths don't need to be cached by the LRU cache. This should +// particularly help memory pressure when unsealing. +var cacheExceptionsPaths = []string{ + "wal/logs/", + "index/pages/", + "index-dr/pages/", + "sys/expire/", + "core/poison-pill", + "core/raft/tls", + + // Add barrierSealConfigPath and recoverySealConfigPlaintextPath to the cache + // exceptions to avoid unseal errors. See VAULT-17227 + "core/seal-config", + "core/recovery-config", + + // we need to make sure the persisted license is read from the storage + // to ensure the changes to the autoloaded license on the active node + // is observed on the perfStandby nodes + "core/autoloaded-license", +} + +// CacheRefreshContext returns a context with an added value denoting if the +// cache should attempt a refresh. +func CacheRefreshContext(ctx context.Context, r bool) context.Context { + return context.WithValue(ctx, refreshCacheCtxKey, r) +} + +// cacheRefreshFromContext is a helper to look up if the provided context is +// requesting a cache refresh. +func cacheRefreshFromContext(ctx context.Context) bool { + r, ok := ctx.Value(refreshCacheCtxKey).(bool) + if !ok { + return false + } + return r +} + +// Cache is used to wrap an underlying physical backend +// and provide an LRU cache layer on top. Most of the reads done by +// Vault are for policy objects so there is a large read reduction +// by using a simple write-through cache. +type Cache struct { + backend Backend + lru *lru.TwoQueueCache + locks []*locksutil.LockEntry + logger log.Logger + enabled *uint32 + cacheExceptions *pathmanager.PathManager + metricSink metrics.MetricSink +} + +// TransactionalCache is a Cache that wraps the physical that is transactional +type TransactionalCache struct { + *Cache + Transactional +} + +// Verify Cache satisfies the correct interfaces +var ( + _ ToggleablePurgemonster = (*Cache)(nil) + _ ToggleablePurgemonster = (*TransactionalCache)(nil) + _ Backend = (*Cache)(nil) + _ Transactional = (*TransactionalCache)(nil) +) + +// NewCache returns a physical cache of the given size. +// If no size is provided, the default size is used. +func NewCache(b Backend, size int, logger log.Logger, metricSink metrics.MetricSink) *Cache { + if logger.IsDebug() { + logger.Debug("creating LRU cache", "size", size) + } + if size <= 0 { + size = DefaultCacheSize + } + + pm := pathmanager.New() + pm.AddPaths(cacheExceptionsPaths) + + cache, _ := lru.New2Q(size) + c := &Cache{ + backend: b, + lru: cache, + locks: locksutil.CreateLocks(), + logger: logger, + // This fails safe. + enabled: new(uint32), + cacheExceptions: pm, + metricSink: metricSink, + } + return c +} + +func NewTransactionalCache(b Backend, size int, logger log.Logger, metricSink metrics.MetricSink) *TransactionalCache { + c := &TransactionalCache{ + Cache: NewCache(b, size, logger, metricSink), + Transactional: b.(Transactional), + } + return c +} + +func (c *Cache) ShouldCache(key string) bool { + if atomic.LoadUint32(c.enabled) == 0 { + return false + } + + return !c.cacheExceptions.HasPath(key) +} + +// SetEnabled is used to toggle whether the cache is on or off. It must be +// called with true to actually activate the cache after creation. +func (c *Cache) SetEnabled(enabled bool) { + if enabled { + atomic.StoreUint32(c.enabled, 1) + return + } + atomic.StoreUint32(c.enabled, 0) +} + +// Purge is used to clear the cache +func (c *Cache) Purge(ctx context.Context) { + // Lock the world + for _, lock := range c.locks { + lock.Lock() + defer lock.Unlock() + } + + c.lru.Purge() +} + +func (c *Cache) Put(ctx context.Context, entry *Entry) error { + if entry != nil && !c.ShouldCache(entry.Key) { + return c.backend.Put(ctx, entry) + } + + lock := locksutil.LockForKey(c.locks, entry.Key) + lock.Lock() + defer lock.Unlock() + + err := c.backend.Put(ctx, entry) + if err == nil { + c.lru.Add(entry.Key, entry) + c.metricSink.IncrCounter([]string{"cache", "write"}, 1) + } + return err +} + +func (c *Cache) Get(ctx context.Context, key string) (*Entry, error) { + if !c.ShouldCache(key) { + return c.backend.Get(ctx, key) + } + + lock := locksutil.LockForKey(c.locks, key) + lock.RLock() + defer lock.RUnlock() + + // Check the LRU first + if !cacheRefreshFromContext(ctx) { + if raw, ok := c.lru.Get(key); ok { + if raw == nil { + return nil, nil + } + c.metricSink.IncrCounter([]string{"cache", "hit"}, 1) + return raw.(*Entry), nil + } + } + + c.metricSink.IncrCounter([]string{"cache", "miss"}, 1) + // Read from the underlying backend + ent, err := c.backend.Get(ctx, key) + if err != nil { + return nil, err + } + + // Cache the result, even if nil + c.lru.Add(key, ent) + + return ent, nil +} + +func (c *Cache) Delete(ctx context.Context, key string) error { + if !c.ShouldCache(key) { + return c.backend.Delete(ctx, key) + } + + lock := locksutil.LockForKey(c.locks, key) + lock.Lock() + defer lock.Unlock() + + err := c.backend.Delete(ctx, key) + if err == nil { + c.lru.Remove(key) + } + return err +} + +func (c *Cache) List(ctx context.Context, prefix string) ([]string, error) { + // Always pass-through as this would be difficult to cache. For the same + // reason we don't lock as we can't reasonably know which locks to readlock + // ahead of time. + return c.backend.List(ctx, prefix) +} + +func (c *TransactionalCache) Locks() []*locksutil.LockEntry { + return c.locks +} + +func (c *TransactionalCache) LRU() *lru.TwoQueueCache { + return c.lru +} + +func (c *TransactionalCache) Transaction(ctx context.Context, txns []*TxnEntry) error { + // Bypass the locking below + if atomic.LoadUint32(c.enabled) == 0 { + return c.Transactional.Transaction(ctx, txns) + } + + // Collect keys that need to be locked + var keys []string + for _, curr := range txns { + keys = append(keys, curr.Entry.Key) + } + // Lock the keys + for _, l := range locksutil.LocksForKeys(c.locks, keys) { + l.Lock() + defer l.Unlock() + } + + if err := c.Transactional.Transaction(ctx, txns); err != nil { + return err + } + + for _, txn := range txns { + if !c.ShouldCache(txn.Entry.Key) { + continue + } + + switch txn.Operation { + case PutOperation: + c.lru.Add(txn.Entry.Key, txn.Entry) + c.metricSink.IncrCounter([]string{"cache", "write"}, 1) + case DeleteOperation: + c.lru.Remove(txn.Entry.Key) + c.metricSink.IncrCounter([]string{"cache", "delete"}, 1) + } + } + + return nil +} diff --git a/sdk/physical/encoding.go b/sdk/physical/encoding.go new file mode 100644 index 0000000..49e00ae --- /dev/null +++ b/sdk/physical/encoding.go @@ -0,0 +1,111 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package physical + +import ( + "context" + "errors" + "strings" + "unicode" + "unicode/utf8" +) + +var ( + ErrNonUTF8 = errors.New("key contains invalid UTF-8 characters") + ErrNonPrintable = errors.New("key contains non-printable characters") +) + +// StorageEncoding is used to add errors into underlying physical requests +type StorageEncoding struct { + Backend +} + +// TransactionalStorageEncoding is the transactional version of the error +// injector +type TransactionalStorageEncoding struct { + *StorageEncoding + Transactional +} + +// Verify StorageEncoding satisfies the correct interfaces +var ( + _ Backend = (*StorageEncoding)(nil) + _ Transactional = (*TransactionalStorageEncoding)(nil) +) + +// NewStorageEncoding returns a wrapped physical backend and verifies the key +// encoding +func NewStorageEncoding(b Backend) Backend { + enc := &StorageEncoding{ + Backend: b, + } + + if bTxn, ok := b.(Transactional); ok { + return &TransactionalStorageEncoding{ + StorageEncoding: enc, + Transactional: bTxn, + } + } + + return enc +} + +func (e *StorageEncoding) containsNonPrintableChars(key string) bool { + idx := strings.IndexFunc(key, func(c rune) bool { + return !unicode.IsPrint(c) + }) + + return idx != -1 +} + +func (e *StorageEncoding) Put(ctx context.Context, entry *Entry) error { + if !utf8.ValidString(entry.Key) { + return ErrNonUTF8 + } + + if e.containsNonPrintableChars(entry.Key) { + return ErrNonPrintable + } + + return e.Backend.Put(ctx, entry) +} + +func (e *StorageEncoding) Delete(ctx context.Context, key string) error { + if !utf8.ValidString(key) { + return ErrNonUTF8 + } + + if e.containsNonPrintableChars(key) { + return ErrNonPrintable + } + + return e.Backend.Delete(ctx, key) +} + +func (e *TransactionalStorageEncoding) Transaction(ctx context.Context, txns []*TxnEntry) error { + for _, txn := range txns { + if !utf8.ValidString(txn.Entry.Key) { + return ErrNonUTF8 + } + + if e.containsNonPrintableChars(txn.Entry.Key) { + return ErrNonPrintable + } + + } + + return e.Transactional.Transaction(ctx, txns) +} + +func (e *StorageEncoding) Purge(ctx context.Context) { + if purgeable, ok := e.Backend.(ToggleablePurgemonster); ok { + purgeable.Purge(ctx) + } +} + +func (e *StorageEncoding) SetEnabled(enabled bool) { + if purgeable, ok := e.Backend.(ToggleablePurgemonster); ok { + purgeable.SetEnabled(enabled) + } +} diff --git a/sdk/physical/entry.go b/sdk/physical/entry.go new file mode 100644 index 0000000..1d90742 --- /dev/null +++ b/sdk/physical/entry.go @@ -0,0 +1,23 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package physical + +import ( + "encoding/hex" + "fmt" +) + +// Entry is used to represent data stored by the physical backend +type Entry struct { + Key string + Value []byte + SealWrap bool `json:"seal_wrap,omitempty"` + + // Only used in replication + ValueHash []byte +} + +func (e *Entry) String() string { + return fmt.Sprintf("Key: %s. SealWrap: %t. Value: %s. ValueHash: %s", e.Key, e.SealWrap, hex.EncodeToString(e.Value), hex.EncodeToString(e.ValueHash)) +} diff --git a/sdk/physical/error.go b/sdk/physical/error.go new file mode 100644 index 0000000..4af7b7d --- /dev/null +++ b/sdk/physical/error.go @@ -0,0 +1,113 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package physical + +import ( + "context" + "errors" + "math/rand" + "sync" + "time" + + log "github.com/hashicorp/go-hclog" +) + +const ( + // DefaultErrorPercent is used to determin how often we error + DefaultErrorPercent = 20 +) + +// ErrorInjector is used to add errors into underlying physical requests +type ErrorInjector struct { + backend Backend + errorPercent int + randomLock *sync.Mutex + random *rand.Rand +} + +// TransactionalErrorInjector is the transactional version of the error +// injector +type TransactionalErrorInjector struct { + *ErrorInjector + Transactional +} + +// Verify ErrorInjector satisfies the correct interfaces +var ( + _ Backend = (*ErrorInjector)(nil) + _ Transactional = (*TransactionalErrorInjector)(nil) +) + +// NewErrorInjector returns a wrapped physical backend to inject error +func NewErrorInjector(b Backend, errorPercent int, logger log.Logger) *ErrorInjector { + if errorPercent < 0 || errorPercent > 100 { + errorPercent = DefaultErrorPercent + } + logger.Info("creating error injector") + + return &ErrorInjector{ + backend: b, + errorPercent: errorPercent, + randomLock: new(sync.Mutex), + random: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))), + } +} + +// NewTransactionalErrorInjector creates a new transactional ErrorInjector +func NewTransactionalErrorInjector(b Backend, errorPercent int, logger log.Logger) *TransactionalErrorInjector { + return &TransactionalErrorInjector{ + ErrorInjector: NewErrorInjector(b, errorPercent, logger), + Transactional: b.(Transactional), + } +} + +func (e *ErrorInjector) SetErrorPercentage(p int) { + e.errorPercent = p +} + +func (e *ErrorInjector) addError() error { + e.randomLock.Lock() + roll := e.random.Intn(100) + e.randomLock.Unlock() + if roll < e.errorPercent { + return errors.New("random error") + } + + return nil +} + +func (e *ErrorInjector) Put(ctx context.Context, entry *Entry) error { + if err := e.addError(); err != nil { + return err + } + return e.backend.Put(ctx, entry) +} + +func (e *ErrorInjector) Get(ctx context.Context, key string) (*Entry, error) { + if err := e.addError(); err != nil { + return nil, err + } + return e.backend.Get(ctx, key) +} + +func (e *ErrorInjector) Delete(ctx context.Context, key string) error { + if err := e.addError(); err != nil { + return err + } + return e.backend.Delete(ctx, key) +} + +func (e *ErrorInjector) List(ctx context.Context, prefix string) ([]string, error) { + if err := e.addError(); err != nil { + return nil, err + } + return e.backend.List(ctx, prefix) +} + +func (e *TransactionalErrorInjector) Transaction(ctx context.Context, txns []*TxnEntry) error { + if err := e.addError(); err != nil { + return err + } + return e.Transactional.Transaction(ctx, txns) +} diff --git a/sdk/physical/file/file.go b/sdk/physical/file/file.go new file mode 100644 index 0000000..d7ad9de --- /dev/null +++ b/sdk/physical/file/file.go @@ -0,0 +1,383 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package file + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strings" + "sync" + + "github.com/hashicorp/errwrap" + log "github.com/hashicorp/go-hclog" + + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/physical" +) + +// Verify FileBackend satisfies the correct interfaces +var ( + _ physical.Backend = (*FileBackend)(nil) + _ physical.Transactional = (*TransactionalFileBackend)(nil) + _ physical.PseudoTransactional = (*FileBackend)(nil) +) + +// FileBackend is a physical backend that stores data on disk +// at a given file path. It can be used for durable single server +// situations, or to develop locally where durability is not critical. +// +// WARNING: the file backend implementation is currently extremely unsafe +// and non-performant. It is meant mostly for local testing and development. +// It can be improved in the future. +type FileBackend struct { + sync.RWMutex + path string + logger log.Logger + permitPool *physical.PermitPool +} + +type TransactionalFileBackend struct { + FileBackend +} + +type fileEntry struct { + Value []byte +} + +// NewFileBackend constructs a FileBackend using the given directory +func NewFileBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { + path, ok := conf["path"] + if !ok { + return nil, fmt.Errorf("'path' must be set") + } + + return &FileBackend{ + path: path, + logger: logger, + permitPool: physical.NewPermitPool(physical.DefaultParallelOperations), + }, nil +} + +func NewTransactionalFileBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { + path, ok := conf["path"] + if !ok { + return nil, fmt.Errorf("'path' must be set") + } + + // Create a pool of size 1 so only one operation runs at a time + return &TransactionalFileBackend{ + FileBackend: FileBackend{ + path: path, + logger: logger, + permitPool: physical.NewPermitPool(1), + }, + }, nil +} + +func (b *FileBackend) Delete(ctx context.Context, path string) error { + b.permitPool.Acquire() + defer b.permitPool.Release() + + b.Lock() + defer b.Unlock() + + return b.DeleteInternal(ctx, path) +} + +func (b *FileBackend) DeleteInternal(ctx context.Context, path string) error { + if path == "" { + return nil + } + + if err := b.validatePath(path); err != nil { + return err + } + + basePath, key := b.expandPath(path) + fullPath := filepath.Join(basePath, key) + + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + err := os.Remove(fullPath) + if err != nil && !os.IsNotExist(err) { + return errwrap.Wrapf(fmt.Sprintf("failed to remove %q: {{err}}", fullPath), err) + } + + err = b.cleanupLogicalPath(path) + + return err +} + +// cleanupLogicalPath is used to remove all empty nodes, beginning with deepest +// one, aborting on first non-empty one, up to top-level node. +func (b *FileBackend) cleanupLogicalPath(path string) error { + nodes := strings.Split(path, fmt.Sprintf("%c", os.PathSeparator)) + for i := len(nodes) - 1; i > 0; i-- { + fullPath := filepath.Join(b.path, filepath.Join(nodes[:i]...)) + + dir, err := os.Open(fullPath) + if err != nil { + if dir != nil { + dir.Close() + } + if os.IsNotExist(err) { + return nil + } else { + return err + } + } + + list, err := dir.Readdir(1) + dir.Close() + if err != nil && err != io.EOF { + return err + } + + // If we have no entries, it's an empty directory; remove it + if err == io.EOF || list == nil || len(list) == 0 { + err = os.Remove(fullPath) + if err != nil { + return err + } + } + } + + return nil +} + +func (b *FileBackend) Get(ctx context.Context, k string) (*physical.Entry, error) { + b.permitPool.Acquire() + defer b.permitPool.Release() + + b.RLock() + defer b.RUnlock() + + return b.GetInternal(ctx, k) +} + +func (b *FileBackend) GetInternal(ctx context.Context, k string) (*physical.Entry, error) { + if err := b.validatePath(k); err != nil { + return nil, err + } + + path, key := b.expandPath(k) + path = filepath.Join(path, key) + + // If we stat it and it exists but is size zero, it may be left from some + // previous FS error like out-of-space. No Vault entry will ever be zero + // length, so simply remove it and return nil. + fi, err := os.Stat(path) + if err == nil { + if fi.Size() == 0 { + // Best effort, ignore errors + os.Remove(path) + return nil, nil + } + } + + f, err := os.Open(path) + if f != nil { + defer f.Close() + } + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + + return nil, err + } + + var entry fileEntry + if err := jsonutil.DecodeJSONFromReader(f, &entry); err != nil { + return nil, err + } + + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + return &physical.Entry{ + Key: k, + Value: entry.Value, + }, nil +} + +func (b *FileBackend) Put(ctx context.Context, entry *physical.Entry) error { + b.permitPool.Acquire() + defer b.permitPool.Release() + + b.Lock() + defer b.Unlock() + + return b.PutInternal(ctx, entry) +} + +func (b *FileBackend) PutInternal(ctx context.Context, entry *physical.Entry) error { + if err := b.validatePath(entry.Key); err != nil { + return err + } + path, key := b.expandPath(entry.Key) + + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + // Make the parent tree + if err := os.MkdirAll(path, 0o700); err != nil { + return err + } + + // JSON encode the entry and write it + fullPath := filepath.Join(path, key) + tempPath := fullPath + ".temp" + f, err := os.OpenFile( + tempPath, + os.O_CREATE|os.O_TRUNC|os.O_WRONLY, + 0o600) + if err != nil { + if f != nil { + f.Close() + } + return err + } + if f == nil { + return errors.New("could not successfully get a file handle") + } + + enc := json.NewEncoder(f) + encErr := enc.Encode(&fileEntry{ + Value: entry.Value, + }) + f.Close() + if encErr == nil { + err = os.Rename(tempPath, fullPath) + if err != nil { + return err + } + return nil + } + + // Everything below is best-effort and will result in encErr being returned + + // See if we ended up with a zero-byte file and if so delete it, might be a + // case of disk being full but the file info is in metadata that is + // reserved. + fi, err := os.Stat(tempPath) + if err != nil { + return encErr + } + if fi == nil { + return encErr + } + if fi.Size() == 0 { + os.Remove(tempPath) + } + return encErr +} + +func (b *FileBackend) List(ctx context.Context, prefix string) ([]string, error) { + b.permitPool.Acquire() + defer b.permitPool.Release() + + b.RLock() + defer b.RUnlock() + + return b.ListInternal(ctx, prefix) +} + +func (b *FileBackend) ListInternal(ctx context.Context, prefix string) ([]string, error) { + if err := b.validatePath(prefix); err != nil { + return nil, err + } + + path := b.path + if prefix != "" { + path = filepath.Join(path, prefix) + } + + // Read the directory contents + f, err := os.Open(path) + if f != nil { + defer f.Close() + } + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + + return nil, err + } + + names, err := f.Readdirnames(-1) + if err != nil { + return nil, err + } + + for i, name := range names { + fi, err := os.Stat(filepath.Join(path, name)) + if err != nil { + return nil, err + } + if fi.IsDir() { + names[i] = name + "/" + } else { + if name[0] == '_' { + names[i] = name[1:] + } + } + } + + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + if len(names) > 0 { + sort.Strings(names) + } + + return names, nil +} + +func (b *FileBackend) expandPath(k string) (string, string) { + path := filepath.Join(b.path, k) + key := filepath.Base(path) + path = filepath.Dir(path) + return path, "_" + key +} + +func (b *FileBackend) validatePath(path string) error { + switch { + case strings.Contains(path, ".."): + return consts.ErrPathContainsParentReferences + } + + return nil +} + +func (b *TransactionalFileBackend) Transaction(ctx context.Context, txns []*physical.TxnEntry) error { + b.permitPool.Acquire() + defer b.permitPool.Release() + + b.Lock() + defer b.Unlock() + + return physical.GenericTransactionHandler(ctx, b, txns) +} diff --git a/sdk/physical/file/file_test.go b/sdk/physical/file/file_test.go new file mode 100644 index 0000000..14c3309 --- /dev/null +++ b/sdk/physical/file/file_test.go @@ -0,0 +1,242 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package file + +import ( + "context" + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "testing" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/physical" +) + +func TestFileBackend_Base64URLEncoding(t *testing.T) { + backendPath, err := ioutil.TempDir("", "vault") + if err != nil { + t.Fatalf("err: %s", err) + } + defer os.RemoveAll(backendPath) + + logger := logging.NewVaultLogger(log.Debug) + + b, err := NewFileBackend(map[string]string{ + "path": backendPath, + }, logger) + if err != nil { + t.Fatalf("err: %s", err) + } + + // List the entries. Length should be zero. + keys, err := b.List(context.Background(), "") + if err != nil { + t.Fatalf("err: %v", err) + } + if len(keys) != 0 { + t.Fatalf("bad: len(keys): expected: 0, actual: %d", len(keys)) + } + + // Create a storage entry without base64 encoding the file name + rawFullPath := filepath.Join(backendPath, "_foo") + e := &physical.Entry{Key: "foo", Value: []byte("test")} + f, err := os.OpenFile( + rawFullPath, + os.O_CREATE|os.O_TRUNC|os.O_WRONLY, + 0o600) + if err != nil { + t.Fatal(err) + } + json.NewEncoder(f).Encode(e) + f.Close() + + // Get should work + out, err := b.Get(context.Background(), "foo") + if err != nil { + t.Fatalf("err: %v", err) + } + if !reflect.DeepEqual(out, e) { + t.Fatalf("bad: %v expected: %v", out, e) + } + + // List the entries. There should be one entry. + keys, err = b.List(context.Background(), "") + if err != nil { + t.Fatalf("err: %v", err) + } + if len(keys) != 1 { + t.Fatalf("bad: len(keys): expected: 1, actual: %d", len(keys)) + } + + err = b.Put(context.Background(), e) + if err != nil { + t.Fatalf("err: %v", err) + } + + // List the entries again. There should still be one entry. + keys, err = b.List(context.Background(), "") + if err != nil { + t.Fatalf("err: %v", err) + } + if len(keys) != 1 { + t.Fatalf("bad: len(keys): expected: 1, actual: %d", len(keys)) + } + + // Get should work + out, err = b.Get(context.Background(), "foo") + if err != nil { + t.Fatalf("err: %v", err) + } + if !reflect.DeepEqual(out, e) { + t.Fatalf("bad: %v expected: %v", out, e) + } + + err = b.Delete(context.Background(), "foo") + if err != nil { + t.Fatalf("err: %v", err) + } + + out, err = b.Get(context.Background(), "foo") + if err != nil { + t.Fatalf("err: %v", err) + } + if out != nil { + t.Fatalf("bad: entry: expected: nil, actual: %#v", e) + } + + keys, err = b.List(context.Background(), "") + if err != nil { + t.Fatalf("err: %v", err) + } + if len(keys) != 0 { + t.Fatalf("bad: len(keys): expected: 0, actual: %d", len(keys)) + } + + f, err = os.OpenFile( + rawFullPath, + os.O_CREATE|os.O_TRUNC|os.O_WRONLY, + 0o600) + if err != nil { + t.Fatal(err) + } + json.NewEncoder(f).Encode(e) + f.Close() + + keys, err = b.List(context.Background(), "") + if err != nil { + t.Fatalf("err: %v", err) + } + if len(keys) != 1 { + t.Fatalf("bad: len(keys): expected: 1, actual: %d", len(keys)) + } +} + +func TestFileBackend_ValidatePath(t *testing.T) { + dir, err := ioutil.TempDir("", "vault") + if err != nil { + t.Fatalf("err: %s", err) + } + defer os.RemoveAll(dir) + + logger := logging.NewVaultLogger(log.Debug) + + b, err := NewFileBackend(map[string]string{ + "path": dir, + }, logger) + if err != nil { + t.Fatalf("err: %s", err) + } + + if err := b.Delete(context.Background(), "foo/bar/../zip"); err == nil { + t.Fatal("expected error") + } + if err := b.Delete(context.Background(), "foo/bar/zip"); err != nil { + t.Fatal("did not expect error") + } +} + +func TestFileBackend(t *testing.T) { + dir, err := ioutil.TempDir("", "vault") + if err != nil { + t.Fatalf("err: %s", err) + } + defer os.RemoveAll(dir) + + logger := logging.NewVaultLogger(log.Debug) + + b, err := NewFileBackend(map[string]string{ + "path": dir, + }, logger) + if err != nil { + t.Fatalf("err: %s", err) + } + + physical.ExerciseBackend(t, b) + + // Underscores should not trip things up; ref GH-3476 + e := &physical.Entry{Key: "_zip", Value: []byte("foobar")} + err = b.Put(context.Background(), e) + if err != nil { + t.Fatalf("err: %v", err) + } + e = &physical.Entry{Key: "_zip/_zap", Value: []byte("boofar")} + err = b.Put(context.Background(), e) + if err != nil { + t.Fatalf("err: %v", err) + } + e, err = b.Get(context.Background(), "_zip/_zap") + if err != nil { + t.Fatalf("err: %v", err) + } + if e == nil { + t.Fatal("got nil entry") + } + vals, err := b.List(context.Background(), "") + if err != nil { + t.Fatal(err) + } + if len(vals) != 2 || vals[0] == vals[1] { + t.Fatalf("bad: %v", vals) + } + for _, val := range vals { + if val != "_zip/" && val != "_zip" { + t.Fatalf("bad val: %v", val) + } + } + vals, err = b.List(context.Background(), "_zip/") + if err != nil { + t.Fatal(err) + } + if len(vals) != 1 || vals[0] != "_zap" { + t.Fatalf("bad: %v", vals) + } + err = b.Delete(context.Background(), "_zip/_zap") + if err != nil { + t.Fatal(err) + } + vals, err = b.List(context.Background(), "") + if err != nil { + t.Fatal(err) + } + if len(vals) != 1 || vals[0] != "_zip" { + t.Fatalf("bad: %v", vals) + } + err = b.Delete(context.Background(), "_zip") + if err != nil { + t.Fatal(err) + } + vals, err = b.List(context.Background(), "") + if err != nil { + t.Fatal(err) + } + if len(vals) != 0 { + t.Fatalf("bad: %v", vals) + } + + physical.ExerciseBackend_ListPrefix(t, b) +} diff --git a/sdk/physical/inmem/cache_test.go b/sdk/physical/inmem/cache_test.go new file mode 100644 index 0000000..3014fc1 --- /dev/null +++ b/sdk/physical/inmem/cache_test.go @@ -0,0 +1,333 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package inmem + +import ( + "context" + "testing" + + "github.com/armon/go-metrics" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/physical" +) + +func TestCache(t *testing.T) { + logger := logging.NewVaultLogger(log.Debug) + + inm, err := NewInmem(nil, logger) + if err != nil { + t.Fatal(err) + } + + cache := physical.NewCache(inm, 0, logger, &metrics.BlackholeSink{}) + cache.SetEnabled(true) + physical.ExerciseBackend(t, cache) + physical.ExerciseBackend_ListPrefix(t, cache) +} + +func TestCache_Purge(t *testing.T) { + logger := logging.NewVaultLogger(log.Debug) + + inm, err := NewInmem(nil, logger) + if err != nil { + t.Fatal(err) + } + cache := physical.NewCache(inm, 0, logger, &metrics.BlackholeSink{}) + cache.SetEnabled(true) + + ent := &physical.Entry{ + Key: "foo", + Value: []byte("bar"), + } + err = cache.Put(context.Background(), ent) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Delete from under + inm.Delete(context.Background(), "foo") + if err != nil { + t.Fatal(err) + } + + // Read should work + out, err := cache.Get(context.Background(), "foo") + if err != nil { + t.Fatalf("err: %v", err) + } + if out == nil { + t.Fatalf("should have key") + } + + // Clear the cache + cache.Purge(context.Background()) + + // Read should fail + out, err = cache.Get(context.Background(), "foo") + if err != nil { + t.Fatalf("err: %v", err) + } + if out != nil { + t.Fatalf("should not have key") + } +} + +func TestCache_Disable(t *testing.T) { + logger := logging.NewVaultLogger(log.Debug) + + inm, err := NewInmem(nil, logger) + if err != nil { + t.Fatal(err) + } + cache := physical.NewCache(inm, 0, logger, &metrics.BlackholeSink{}) + + disabledTests := func() { + ent := &physical.Entry{ + Key: "foo", + Value: []byte("bar"), + } + err = inm.Put(context.Background(), ent) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Read should work + out, err := cache.Get(context.Background(), "foo") + if err != nil { + t.Fatalf("err: %v", err) + } + if out == nil { + t.Fatalf("should have key") + } + + err = inm.Delete(context.Background(), ent.Key) + if err != nil { + t.Fatal(err) + } + + // Should not work + out, err = cache.Get(context.Background(), "foo") + if err != nil { + t.Fatalf("err: %v", err) + } + if out != nil { + t.Fatalf("should not have key") + } + + // Put through the cache and try again + err = cache.Put(context.Background(), ent) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Read should work in both + out, err = inm.Get(context.Background(), "foo") + if err != nil { + t.Fatalf("err: %v", err) + } + if out == nil { + t.Fatalf("should have key") + } + out, err = cache.Get(context.Background(), "foo") + if err != nil { + t.Fatalf("err: %v", err) + } + if out == nil { + t.Fatalf("should have key") + } + + err = inm.Delete(context.Background(), ent.Key) + if err != nil { + t.Fatal(err) + } + + // Should not work + out, err = cache.Get(context.Background(), "foo") + if err != nil { + t.Fatalf("err: %v", err) + } + if out != nil { + t.Fatalf("should not have key") + } + } + + enabledTests := func() { + ent := &physical.Entry{ + Key: "foo", + Value: []byte("bar"), + } + err = inm.Put(context.Background(), ent) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Read should work + out, err := cache.Get(context.Background(), "foo") + if err != nil { + t.Fatalf("err: %v", err) + } + if out == nil { + t.Fatalf("should have key") + } + + err = inm.Delete(context.Background(), ent.Key) + if err != nil { + t.Fatal(err) + } + + // Should work + out, err = cache.Get(context.Background(), "foo") + if err != nil { + t.Fatalf("err: %v", err) + } + if out == nil { + t.Fatalf("should have key") + } + + // Put through the cache and try again + err = cache.Put(context.Background(), ent) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Read should work for both + out, err = inm.Get(context.Background(), "foo") + if err != nil { + t.Fatalf("err: %v", err) + } + if out == nil { + t.Fatalf("should have key") + } + out, err = cache.Get(context.Background(), "foo") + if err != nil { + t.Fatalf("err: %v", err) + } + if out == nil { + t.Fatalf("should have key") + } + + err = inm.Delete(context.Background(), ent.Key) + if err != nil { + t.Fatal(err) + } + + // Should work + out, err = cache.Get(context.Background(), "foo") + if err != nil { + t.Fatalf("err: %v", err) + } + if out == nil { + t.Fatalf("should have key") + } + + // Put through the cache + err = cache.Put(context.Background(), ent) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Read should work for both + out, err = inm.Get(context.Background(), "foo") + if err != nil { + t.Fatalf("err: %v", err) + } + if out == nil { + t.Fatalf("should have key") + } + out, err = cache.Get(context.Background(), "foo") + if err != nil { + t.Fatalf("err: %v", err) + } + if out == nil { + t.Fatalf("should have key") + } + + // Delete via cache + err = cache.Delete(context.Background(), ent.Key) + if err != nil { + t.Fatal(err) + } + + // Read should not work for either + out, err = inm.Get(context.Background(), "foo") + if err != nil { + t.Fatalf("err: %v", err) + } + if out != nil { + t.Fatalf("should not have key") + } + out, err = cache.Get(context.Background(), "foo") + if err != nil { + t.Fatalf("err: %v", err) + } + if out != nil { + t.Fatalf("should not have key") + } + } + + disabledTests() + cache.SetEnabled(true) + enabledTests() + cache.SetEnabled(false) + disabledTests() +} + +func TestCache_Refresh(t *testing.T) { + logger := logging.NewVaultLogger(log.Debug) + + inm, err := NewInmem(nil, logger) + if err != nil { + t.Fatal(err) + } + cache := physical.NewCache(inm, 0, logger, &metrics.BlackholeSink{}) + cache.SetEnabled(true) + + ent := &physical.Entry{ + Key: "foo", + Value: []byte("bar"), + } + err = cache.Put(context.Background(), ent) + if err != nil { + t.Fatalf("err: %v", err) + } + + ent2 := &physical.Entry{ + Key: "foo", + Value: []byte("baz"), + } + // Update below cache + err = inm.Put(context.Background(), ent2) + if err != nil { + t.Fatalf("err: %v", err) + } + + r, err := cache.Get(context.Background(), "foo") + if err != nil { + t.Fatalf("err: %v", err) + } + + if string(r.Value) != "bar" { + t.Fatalf("expected value bar, got %s", string(r.Value)) + } + + // Refresh the cache + r, err = cache.Get(physical.CacheRefreshContext(context.Background(), true), "foo") + if err != nil { + t.Fatalf("err: %v", err) + } + + if string(r.Value) != "baz" { + t.Fatalf("expected value baz, got %s", string(r.Value)) + } + + // Make sure new value is in cache + r, err = cache.Get(context.Background(), "foo") + if err != nil { + t.Fatalf("err: %v", err) + } + if string(r.Value) != "baz" { + t.Fatalf("expected value baz, got %s", string(r.Value)) + } +} diff --git a/sdk/physical/inmem/inmem.go b/sdk/physical/inmem/inmem.go new file mode 100644 index 0000000..e4fa1f6 --- /dev/null +++ b/sdk/physical/inmem/inmem.go @@ -0,0 +1,313 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package inmem + +import ( + "context" + "errors" + "fmt" + "os" + "strconv" + "strings" + "sync" + "sync/atomic" + + "github.com/armon/go-radix" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/physical" +) + +// Verify interfaces are satisfied +var ( + _ physical.Backend = (*InmemBackend)(nil) + _ physical.HABackend = (*InmemHABackend)(nil) + _ physical.HABackend = (*TransactionalInmemHABackend)(nil) + _ physical.Lock = (*InmemLock)(nil) + _ physical.Transactional = (*TransactionalInmemBackend)(nil) + _ physical.Transactional = (*TransactionalInmemHABackend)(nil) +) + +var ( + PutDisabledError = errors.New("put operations disabled in inmem backend") + GetDisabledError = errors.New("get operations disabled in inmem backend") + DeleteDisabledError = errors.New("delete operations disabled in inmem backend") + ListDisabledError = errors.New("list operations disabled in inmem backend") + GetInTxnDisabledError = errors.New("get operations inside transactions are disabled in inmem backend") +) + +// InmemBackend is an in-memory only physical backend. It is useful +// for testing and development situations where the data is not +// expected to be durable. +type InmemBackend struct { + sync.RWMutex + root *radix.Tree + permitPool *physical.PermitPool + logger log.Logger + failGet *uint32 + failPut *uint32 + failDelete *uint32 + failList *uint32 + failGetInTxn *uint32 + logOps bool + maxValueSize int +} + +type TransactionalInmemBackend struct { + InmemBackend +} + +// NewInmem constructs a new in-memory backend +func NewInmem(conf map[string]string, logger log.Logger) (physical.Backend, error) { + maxValueSize := 0 + maxValueSizeStr, ok := conf["max_value_size"] + if ok { + var err error + maxValueSize, err = strconv.Atoi(maxValueSizeStr) + if err != nil { + return nil, err + } + } + + return &InmemBackend{ + root: radix.New(), + permitPool: physical.NewPermitPool(physical.DefaultParallelOperations), + logger: logger, + failGet: new(uint32), + failPut: new(uint32), + failDelete: new(uint32), + failList: new(uint32), + failGetInTxn: new(uint32), + logOps: os.Getenv("VAULT_INMEM_LOG_ALL_OPS") != "", + maxValueSize: maxValueSize, + }, nil +} + +// Basically for now just creates a permit pool of size 1 so only one operation +// can run at a time +func NewTransactionalInmem(conf map[string]string, logger log.Logger) (physical.Backend, error) { + maxValueSize := 0 + maxValueSizeStr, ok := conf["max_value_size"] + if ok { + var err error + maxValueSize, err = strconv.Atoi(maxValueSizeStr) + if err != nil { + return nil, err + } + } + + return &TransactionalInmemBackend{ + InmemBackend: InmemBackend{ + root: radix.New(), + permitPool: physical.NewPermitPool(1), + logger: logger, + failGet: new(uint32), + failPut: new(uint32), + failDelete: new(uint32), + failList: new(uint32), + failGetInTxn: new(uint32), + logOps: os.Getenv("VAULT_INMEM_LOG_ALL_OPS") != "", + maxValueSize: maxValueSize, + }, + }, nil +} + +// Put is used to insert or update an entry +func (i *InmemBackend) Put(ctx context.Context, entry *physical.Entry) error { + i.permitPool.Acquire() + defer i.permitPool.Release() + + i.Lock() + defer i.Unlock() + + return i.PutInternal(ctx, entry) +} + +func (i *InmemBackend) PutInternal(ctx context.Context, entry *physical.Entry) error { + if i.logOps { + i.logger.Trace("put", "key", entry.Key) + } + if atomic.LoadUint32(i.failPut) != 0 { + return PutDisabledError + } + + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + if i.maxValueSize > 0 && len(entry.Value) > i.maxValueSize { + return fmt.Errorf("%s", physical.ErrValueTooLarge) + } + + i.root.Insert(entry.Key, entry.Value) + return nil +} + +func (i *InmemBackend) FailPut(fail bool) { + var val uint32 + if fail { + val = 1 + } + atomic.StoreUint32(i.failPut, val) +} + +// Get is used to fetch an entry +func (i *InmemBackend) Get(ctx context.Context, key string) (*physical.Entry, error) { + i.permitPool.Acquire() + defer i.permitPool.Release() + + i.RLock() + defer i.RUnlock() + + return i.GetInternal(ctx, key) +} + +func (i *InmemBackend) GetInternal(ctx context.Context, key string) (*physical.Entry, error) { + if i.logOps { + i.logger.Trace("get", "key", key) + } + if atomic.LoadUint32(i.failGet) != 0 { + return nil, GetDisabledError + } + + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + if raw, ok := i.root.Get(key); ok { + return &physical.Entry{ + Key: key, + Value: raw.([]byte), + }, nil + } + return nil, nil +} + +func (i *InmemBackend) FailGet(fail bool) { + var val uint32 + if fail { + val = 1 + } + atomic.StoreUint32(i.failGet, val) +} + +func (i *InmemBackend) FailGetInTxn(fail bool) { + var val uint32 + if fail { + val = 1 + } + atomic.StoreUint32(i.failGetInTxn, val) +} + +// Delete is used to permanently delete an entry +func (i *InmemBackend) Delete(ctx context.Context, key string) error { + i.permitPool.Acquire() + defer i.permitPool.Release() + + i.Lock() + defer i.Unlock() + + return i.DeleteInternal(ctx, key) +} + +func (i *InmemBackend) DeleteInternal(ctx context.Context, key string) error { + if i.logOps { + i.logger.Trace("delete", "key", key) + } + if atomic.LoadUint32(i.failDelete) != 0 { + return DeleteDisabledError + } + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + i.root.Delete(key) + return nil +} + +func (i *InmemBackend) FailDelete(fail bool) { + var val uint32 + if fail { + val = 1 + } + atomic.StoreUint32(i.failDelete, val) +} + +// List is used to list all the keys under a given +// prefix, up to the next prefix. +func (i *InmemBackend) List(ctx context.Context, prefix string) ([]string, error) { + i.permitPool.Acquire() + defer i.permitPool.Release() + + i.RLock() + defer i.RUnlock() + + return i.ListInternal(ctx, prefix) +} + +func (i *InmemBackend) ListInternal(ctx context.Context, prefix string) ([]string, error) { + if i.logOps { + i.logger.Trace("list", "prefix", prefix) + } + if atomic.LoadUint32(i.failList) != 0 { + return nil, ListDisabledError + } + + var out []string + seen := make(map[string]interface{}) + walkFn := func(s string, v interface{}) bool { + trimmed := strings.TrimPrefix(s, prefix) + sep := strings.Index(trimmed, "/") + if sep == -1 { + out = append(out, trimmed) + } else { + trimmed = trimmed[:sep+1] + if _, ok := seen[trimmed]; !ok { + out = append(out, trimmed) + seen[trimmed] = struct{}{} + } + } + return false + } + i.root.WalkPrefix(prefix, walkFn) + + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + return out, nil +} + +func (i *InmemBackend) FailList(fail bool) { + var val uint32 + if fail { + val = 1 + } + atomic.StoreUint32(i.failList, val) +} + +// Transaction implements the transaction interface +func (t *TransactionalInmemBackend) Transaction(ctx context.Context, txns []*physical.TxnEntry) error { + t.permitPool.Acquire() + defer t.permitPool.Release() + + t.Lock() + defer t.Unlock() + + failGetInTxn := atomic.LoadUint32(t.failGetInTxn) + for _, t := range txns { + if t.Operation == physical.GetOperation && failGetInTxn != 0 { + return GetInTxnDisabledError + } + } + + return physical.GenericTransactionHandler(ctx, t, txns) +} diff --git a/sdk/physical/inmem/inmem_ha.go b/sdk/physical/inmem/inmem_ha.go new file mode 100644 index 0000000..1db26ca --- /dev/null +++ b/sdk/physical/inmem/inmem_ha.go @@ -0,0 +1,170 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package inmem + +import ( + "fmt" + "sync" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/physical" +) + +type InmemHABackend struct { + physical.Backend + locks map[string]string + l *sync.Mutex + cond *sync.Cond + logger log.Logger +} + +type TransactionalInmemHABackend struct { + physical.Transactional + InmemHABackend +} + +// NewInmemHA constructs a new in-memory HA backend. This is only for testing. +func NewInmemHA(_ map[string]string, logger log.Logger) (physical.Backend, error) { + be, err := NewInmem(nil, logger) + if err != nil { + return nil, err + } + + in := &InmemHABackend{ + Backend: be, + locks: make(map[string]string), + logger: logger, + l: new(sync.Mutex), + } + in.cond = sync.NewCond(in.l) + return in, nil +} + +func NewTransactionalInmemHA(_ map[string]string, logger log.Logger) (physical.Backend, error) { + transInmem, err := NewTransactionalInmem(nil, logger) + if err != nil { + return nil, err + } + inmemHA := InmemHABackend{ + Backend: transInmem, + locks: make(map[string]string), + logger: logger, + l: new(sync.Mutex), + } + + in := &TransactionalInmemHABackend{ + InmemHABackend: inmemHA, + Transactional: transInmem.(physical.Transactional), + } + in.cond = sync.NewCond(in.l) + return in, nil +} + +// LockWith is used for mutual exclusion based on the given key. +func (i *InmemHABackend) LockWith(key, value string) (physical.Lock, error) { + l := &InmemLock{ + in: i, + key: key, + value: value, + } + return l, nil +} + +// LockMapSize is used in some tests to determine whether this backend has ever +// been used for HA purposes rather than simply for storage +func (i *InmemHABackend) LockMapSize() int { + return len(i.locks) +} + +// HAEnabled indicates whether the HA functionality should be exposed. +// Currently always returns true. +func (i *InmemHABackend) HAEnabled() bool { + return true +} + +// InmemLock is an in-memory Lock implementation for the HABackend +type InmemLock struct { + in *InmemHABackend + key string + value string + + held bool + leaderCh chan struct{} + l sync.Mutex +} + +func (i *InmemLock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { + i.l.Lock() + defer i.l.Unlock() + if i.held { + return nil, fmt.Errorf("lock already held") + } + + // Attempt an async acquisition + didLock := make(chan struct{}) + releaseCh := make(chan bool, 1) + go func() { + // Wait to acquire the lock + i.in.l.Lock() + _, ok := i.in.locks[i.key] + for ok { + i.in.cond.Wait() + _, ok = i.in.locks[i.key] + } + i.in.locks[i.key] = i.value + i.in.l.Unlock() + + // Signal that lock is held + close(didLock) + + // Handle an early abort + release := <-releaseCh + if release { + i.in.l.Lock() + delete(i.in.locks, i.key) + i.in.l.Unlock() + i.in.cond.Broadcast() + } + }() + + // Wait for lock acquisition or shutdown + select { + case <-didLock: + releaseCh <- false + case <-stopCh: + releaseCh <- true + return nil, nil + } + + // Create the leader channel + i.held = true + i.leaderCh = make(chan struct{}) + return i.leaderCh, nil +} + +func (i *InmemLock) Unlock() error { + i.l.Lock() + defer i.l.Unlock() + + if !i.held { + return nil + } + + close(i.leaderCh) + i.leaderCh = nil + i.held = false + + i.in.l.Lock() + delete(i.in.locks, i.key) + i.in.l.Unlock() + i.in.cond.Broadcast() + return nil +} + +func (i *InmemLock) Value() (bool, string, error) { + i.in.l.Lock() + val, ok := i.in.locks[i.key] + i.in.l.Unlock() + return ok, val, nil +} diff --git a/sdk/physical/inmem/inmem_ha_test.go b/sdk/physical/inmem/inmem_ha_test.go new file mode 100644 index 0000000..bb427a3 --- /dev/null +++ b/sdk/physical/inmem/inmem_ha_test.go @@ -0,0 +1,24 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package inmem + +import ( + "testing" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/physical" +) + +func TestInmemHA(t *testing.T) { + logger := logging.NewVaultLogger(log.Debug) + + inm, err := NewInmemHA(nil, logger) + if err != nil { + t.Fatal(err) + } + + // Use the same inmem backend to acquire the same set of locks + physical.ExerciseHABackend(t, inm.(physical.HABackend), inm.(physical.HABackend)) +} diff --git a/sdk/physical/inmem/inmem_test.go b/sdk/physical/inmem/inmem_test.go new file mode 100644 index 0000000..56c029a --- /dev/null +++ b/sdk/physical/inmem/inmem_test.go @@ -0,0 +1,23 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package inmem + +import ( + "testing" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/physical" +) + +func TestInmem(t *testing.T) { + logger := logging.NewVaultLogger(log.Debug) + + inm, err := NewInmem(nil, logger) + if err != nil { + t.Fatal(err) + } + physical.ExerciseBackend(t, inm) + physical.ExerciseBackend_ListPrefix(t, inm) +} diff --git a/sdk/physical/inmem/physical_view_test.go b/sdk/physical/inmem/physical_view_test.go new file mode 100644 index 0000000..24b47d7 --- /dev/null +++ b/sdk/physical/inmem/physical_view_test.go @@ -0,0 +1,124 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package inmem + +import ( + "context" + "testing" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/physical" +) + +func TestPhysicalView_impl(t *testing.T) { + var _ physical.Backend = new(physical.View) +} + +func newInmemTestBackend() (physical.Backend, error) { + logger := logging.NewVaultLogger(log.Debug) + return NewInmem(nil, logger) +} + +func TestPhysicalView_BadKeysKeys(t *testing.T) { + backend, err := newInmemTestBackend() + if err != nil { + t.Fatal(err) + } + view := physical.NewView(backend, "foo/") + + _, err = view.List(context.Background(), "../") + if err == nil { + t.Fatalf("expected error") + } + + _, err = view.Get(context.Background(), "../") + if err == nil { + t.Fatalf("expected error") + } + + err = view.Delete(context.Background(), "../foo") + if err == nil { + t.Fatalf("expected error") + } + + le := &physical.Entry{ + Key: "../foo", + Value: []byte("test"), + } + err = view.Put(context.Background(), le) + if err == nil { + t.Fatalf("expected error") + } +} + +func TestPhysicalView(t *testing.T) { + backend, err := newInmemTestBackend() + if err != nil { + t.Fatal(err) + } + + view := physical.NewView(backend, "foo/") + + // Write a key outside of foo/ + entry := &physical.Entry{Key: "test", Value: []byte("test")} + if err := backend.Put(context.Background(), entry); err != nil { + t.Fatalf("bad: %v", err) + } + + // List should have no visibility + keys, err := view.List(context.Background(), "") + if err != nil { + t.Fatalf("err: %v", err) + } + if len(keys) != 0 { + t.Fatalf("bad: %v", err) + } + + // Get should have no visibility + out, err := view.Get(context.Background(), "test") + if err != nil { + t.Fatalf("err: %v", err) + } + if out != nil { + t.Fatalf("bad: %v", out) + } + + // Try to put the same entry via the view + if err := view.Put(context.Background(), entry); err != nil { + t.Fatalf("err: %v", err) + } + + // Check it is nested + entry, err = backend.Get(context.Background(), "foo/test") + if err != nil { + t.Fatalf("err: %v", err) + } + if entry == nil { + t.Fatalf("missing nested foo/test") + } + + // Delete nested + if err := view.Delete(context.Background(), "test"); err != nil { + t.Fatalf("err: %v", err) + } + + // Check the nested key + entry, err = backend.Get(context.Background(), "foo/test") + if err != nil { + t.Fatalf("err: %v", err) + } + if entry != nil { + t.Fatalf("nested foo/test should be gone") + } + + // Check the non-nested key + entry, err = backend.Get(context.Background(), "test") + if err != nil { + t.Fatalf("err: %v", err) + } + if entry == nil { + t.Fatalf("root test missing") + } +} diff --git a/sdk/physical/inmem/transactions_test.go b/sdk/physical/inmem/transactions_test.go new file mode 100644 index 0000000..71a4829 --- /dev/null +++ b/sdk/physical/inmem/transactions_test.go @@ -0,0 +1,153 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package inmem + +import ( + "context" + "fmt" + "reflect" + "sort" + "testing" + + radix "github.com/armon/go-radix" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/physical" +) + +type faultyPseudo struct { + underlying InmemBackend + faultyPaths map[string]struct{} +} + +func (f *faultyPseudo) Get(ctx context.Context, key string) (*physical.Entry, error) { + return f.underlying.Get(context.Background(), key) +} + +func (f *faultyPseudo) Put(ctx context.Context, entry *physical.Entry) error { + return f.underlying.Put(context.Background(), entry) +} + +func (f *faultyPseudo) Delete(ctx context.Context, key string) error { + return f.underlying.Delete(context.Background(), key) +} + +func (f *faultyPseudo) GetInternal(ctx context.Context, key string) (*physical.Entry, error) { + if _, ok := f.faultyPaths[key]; ok { + return nil, fmt.Errorf("fault") + } + return f.underlying.GetInternal(context.Background(), key) +} + +func (f *faultyPseudo) PutInternal(ctx context.Context, entry *physical.Entry) error { + if _, ok := f.faultyPaths[entry.Key]; ok { + return fmt.Errorf("fault") + } + return f.underlying.PutInternal(context.Background(), entry) +} + +func (f *faultyPseudo) DeleteInternal(ctx context.Context, key string) error { + if _, ok := f.faultyPaths[key]; ok { + return fmt.Errorf("fault") + } + return f.underlying.DeleteInternal(context.Background(), key) +} + +func (f *faultyPseudo) List(ctx context.Context, prefix string) ([]string, error) { + return f.underlying.List(context.Background(), prefix) +} + +func (f *faultyPseudo) Transaction(ctx context.Context, txns []*physical.TxnEntry) error { + f.underlying.permitPool.Acquire() + defer f.underlying.permitPool.Release() + + f.underlying.Lock() + defer f.underlying.Unlock() + + return physical.GenericTransactionHandler(ctx, f, txns) +} + +func newFaultyPseudo(logger log.Logger, faultyPaths []string) *faultyPseudo { + out := &faultyPseudo{ + underlying: InmemBackend{ + root: radix.New(), + permitPool: physical.NewPermitPool(1), + logger: logger.Named("storage.inmembackend"), + failGet: new(uint32), + failPut: new(uint32), + failDelete: new(uint32), + failList: new(uint32), + }, + faultyPaths: make(map[string]struct{}, len(faultyPaths)), + } + for _, v := range faultyPaths { + out.faultyPaths[v] = struct{}{} + } + return out +} + +func TestPseudo_Basic(t *testing.T) { + logger := logging.NewVaultLogger(log.Debug) + p := newFaultyPseudo(logger, nil) + physical.ExerciseBackend(t, p) + physical.ExerciseBackend_ListPrefix(t, p) +} + +func TestPseudo_SuccessfulTransaction(t *testing.T) { + logger := logging.NewVaultLogger(log.Debug) + p := newFaultyPseudo(logger, nil) + + physical.ExerciseTransactionalBackend(t, p) +} + +func TestPseudo_FailedTransaction(t *testing.T) { + logger := logging.NewVaultLogger(log.Debug) + p := newFaultyPseudo(logger, []string{"zip"}) + + txns := physical.SetupTestingTransactions(t, p) + if err := p.Transaction(context.Background(), txns); err == nil { + t.Fatal("expected error during transaction") + } + + keys, err := p.List(context.Background(), "") + if err != nil { + t.Fatal(err) + } + + expected := []string{"foo", "zip", "deleteme", "deleteme2"} + + sort.Strings(keys) + sort.Strings(expected) + if !reflect.DeepEqual(keys, expected) { + t.Fatalf("mismatch: expected\n%#v\ngot\n%#v\n", expected, keys) + } + + entry, err := p.Get(context.Background(), "foo") + if err != nil { + t.Fatal(err) + } + if entry == nil { + t.Fatal("got nil entry") + } + if entry.Value == nil { + t.Fatal("got nil value") + } + if string(entry.Value) != "bar" { + t.Fatal("values did not rollback correctly") + } + + entry, err = p.Get(context.Background(), "zip") + if err != nil { + t.Fatal(err) + } + if entry == nil { + t.Fatal("got nil entry") + } + if entry.Value == nil { + t.Fatal("got nil value") + } + if string(entry.Value) != "zap" { + t.Fatal("values did not rollback correctly") + } +} diff --git a/sdk/physical/latency.go b/sdk/physical/latency.go new file mode 100644 index 0000000..f4cced5 --- /dev/null +++ b/sdk/physical/latency.go @@ -0,0 +1,119 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package physical + +import ( + "context" + "math/rand" + "sync" + "time" + + log "github.com/hashicorp/go-hclog" + uberAtomic "go.uber.org/atomic" +) + +const ( + // DefaultJitterPercent is used if no cache size is specified for NewCache + DefaultJitterPercent = 20 +) + +// LatencyInjector is used to add latency into underlying physical requests +type LatencyInjector struct { + logger log.Logger + backend Backend + latency *uberAtomic.Duration + jitterPercent int + randomLock *sync.Mutex + random *rand.Rand +} + +// TransactionalLatencyInjector is the transactional version of the latency +// injector +type TransactionalLatencyInjector struct { + *LatencyInjector + Transactional +} + +// Verify LatencyInjector satisfies the correct interfaces +var ( + _ Backend = (*LatencyInjector)(nil) + _ Transactional = (*TransactionalLatencyInjector)(nil) +) + +// NewLatencyInjector returns a wrapped physical backend to simulate latency +func NewLatencyInjector(b Backend, latency time.Duration, jitter int, logger log.Logger) *LatencyInjector { + if jitter < 0 || jitter > 100 { + jitter = DefaultJitterPercent + } + logger.Info("creating latency injector") + + return &LatencyInjector{ + logger: logger, + backend: b, + latency: uberAtomic.NewDuration(latency), + jitterPercent: jitter, + randomLock: new(sync.Mutex), + random: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))), + } +} + +// NewTransactionalLatencyInjector creates a new transactional LatencyInjector +// jitter is the random percent that latency will vary between. +// For example, if you specify latency = 50ms and jitter = 20, then for any +// given operation, the latency will be 50ms +- 10ms (20% of 50), or between 40 and 60ms. +func NewTransactionalLatencyInjector(b Backend, latency time.Duration, jitter int, logger log.Logger) *TransactionalLatencyInjector { + return &TransactionalLatencyInjector{ + LatencyInjector: NewLatencyInjector(b, latency, jitter, logger), + Transactional: b.(Transactional), + } +} + +func (l *LatencyInjector) SetLatency(latency time.Duration) { + l.logger.Info("Changing backend latency", "latency", latency) + l.latency.Store(latency) +} + +func (l *LatencyInjector) addLatency() { + // Calculate a value between 1 +- jitter% + percent := 100 + if l.jitterPercent > 0 { + min := 100 - l.jitterPercent + max := 100 + l.jitterPercent + l.randomLock.Lock() + percent = l.random.Intn(max-min) + min + l.randomLock.Unlock() + } + latencyDuration := time.Duration(int(l.latency.Load()) * percent / 100) + time.Sleep(latencyDuration) +} + +// Put is a latent put request +func (l *LatencyInjector) Put(ctx context.Context, entry *Entry) error { + l.addLatency() + return l.backend.Put(ctx, entry) +} + +// Get is a latent get request +func (l *LatencyInjector) Get(ctx context.Context, key string) (*Entry, error) { + l.addLatency() + return l.backend.Get(ctx, key) +} + +// Delete is a latent delete request +func (l *LatencyInjector) Delete(ctx context.Context, key string) error { + l.addLatency() + return l.backend.Delete(ctx, key) +} + +// List is a latent list request +func (l *LatencyInjector) List(ctx context.Context, prefix string) ([]string, error) { + l.addLatency() + return l.backend.List(ctx, prefix) +} + +// Transaction is a latent transaction request +func (l *TransactionalLatencyInjector) Transaction(ctx context.Context, txns []*TxnEntry) error { + l.addLatency() + return l.Transactional.Transaction(ctx, txns) +} diff --git a/sdk/physical/physical.go b/sdk/physical/physical.go new file mode 100644 index 0000000..8a6e488 --- /dev/null +++ b/sdk/physical/physical.go @@ -0,0 +1,200 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package physical + +import ( + "context" + "strings" + + log "github.com/hashicorp/go-hclog" +) + +const DefaultParallelOperations = 128 + +// The operation type +type Operation string + +const ( + DeleteOperation Operation = "delete" + GetOperation = "get" + ListOperation = "list" + PutOperation = "put" +) + +const ( + ErrValueTooLarge = "put failed due to value being too large" + ErrKeyTooLarge = "put failed due to key being too large" +) + +// Backend is the interface required for a physical +// backend. A physical backend is used to durably store +// data outside of Vault. As such, it is completely untrusted, +// and is only accessed via a security barrier. The backends +// must represent keys in a hierarchical manner. All methods +// are expected to be thread safe. +type Backend interface { + // Put is used to insert or update an entry + Put(ctx context.Context, entry *Entry) error + + // Get is used to fetch an entry + Get(ctx context.Context, key string) (*Entry, error) + + // Delete is used to permanently delete an entry + Delete(ctx context.Context, key string) error + + // List is used to list all the keys under a given + // prefix, up to the next prefix. + List(ctx context.Context, prefix string) ([]string, error) +} + +// HABackend is an extensions to the standard physical +// backend to support high-availability. Vault only expects to +// use mutual exclusion to allow multiple instances to act as a +// hot standby for a leader that services all requests. +type HABackend interface { + // LockWith is used for mutual exclusion based on the given key. + LockWith(key, value string) (Lock, error) + + // Whether or not HA functionality is enabled + HAEnabled() bool +} + +// FencingHABackend is an HABackend which provides the additional guarantee that +// each Lock it returns from LockWith is also a FencingLock. A FencingLock +// provides a mechanism to retrieve a fencing token that can be included by +// future writes by the backend to ensure that it is still the current lock +// holder at the time the write commits. Without this timing might allow a lock +// holder not to notice it's no longer the active node for long enough for it to +// write data to storage even while a new active node is writing causing +// corruption. For Consul backend the fencing token is the session id which is +// submitted with `check-session` operation on each write to ensure the write +// only completes if the session is still holding the lock. For raft backend +// this isn't needed because our in-process raft library is unable to write if +// it's not the leader anyway. +// +// If you implement this, Vault will call RegisterActiveNodeLock with the Lock +// instance returned by LockWith after it successfully locks it. This keeps the +// backend oblivious to the specific key we use for active node locks and allows +// potential future usage of locks for other purposes in the future. +// +// Note that all implementations must support writing to storage before +// RegisterActiveNodeLock is called to support initialization of a new cluster. +// They must also skip fencing writes if the write's Context contains a special +// value. This is necessary to allow Vault to clear and re-initialise secondary +// clusters even though there is already an active node with a specific lock +// session since we clear the cluster while Vault is sealed and clearing the +// data might remove the lock in some storages (e.g. Consul). As noted above +// it's not generally safe to allow unfenced writes after a lock so instead we +// special case just a few types of writes that only happen rarely while the +// cluster is sealed. See the IsUnfencedWrite helper function. +type FencingHABackend interface { + HABackend + + RegisterActiveNodeLock(l Lock) error +} + +// unfencedWriteContextKeyType is a special type to identify context values to +// disable fencing. It's a separate type per the best-practice in Context.Value +// docs to avoid collisions even if the key might match. +type unfencedWriteContextKeyType string + +const ( + // unfencedWriteContextKey is the context key we pass the option to bypass + // fencing through to a FencingHABackend. Note that this is not an ideal use + // of context values and violates the "do not use it for optional arguments" + // guidance but has been agreed as a pragmatic option for this case rather + // than needing to specialize every physical.Backend to understand this + // option. + unfencedWriteContextKey unfencedWriteContextKeyType = "vault-disable-fencing" +) + +// UnfencedWriteCtx adds metadata to a ctx such that any writes performed +// directly on a FencingHABackend using that context will _not_ add a fencing +// token. +func UnfencedWriteCtx(ctx context.Context) context.Context { + return context.WithValue(ctx, unfencedWriteContextKey, true) +} + +// IsUnfencedWrite returns whether or not the context passed has the unfenced +// flag value set. +func IsUnfencedWrite(ctx context.Context) bool { + isUnfenced, ok := ctx.Value(unfencedWriteContextKey).(bool) + return ok && isUnfenced +} + +// ToggleablePurgemonster is an interface for backends that can toggle on or +// off special functionality and/or support purging. This is only used for the +// cache, don't use it for other things. +type ToggleablePurgemonster interface { + Purge(ctx context.Context) + SetEnabled(bool) +} + +// RedirectDetect is an optional interface that an HABackend +// can implement. If they do, a redirect address can be automatically +// detected. +type RedirectDetect interface { + // DetectHostAddr is used to detect the host address + DetectHostAddr() (string, error) +} + +type Lock interface { + // Lock is used to acquire the given lock + // The stopCh is optional and if closed should interrupt the lock + // acquisition attempt. The return struct should be closed when + // leadership is lost. + Lock(stopCh <-chan struct{}) (<-chan struct{}, error) + + // Unlock is used to release the lock + Unlock() error + + // Returns the value of the lock and if it is held by _any_ node + Value() (bool, string, error) +} + +// Factory is the factory function to create a physical backend. +type Factory func(config map[string]string, logger log.Logger) (Backend, error) + +// PermitPool is used to limit maximum outstanding requests +type PermitPool struct { + sem chan int +} + +// NewPermitPool returns a new permit pool with the provided +// number of permits +func NewPermitPool(permits int) *PermitPool { + if permits < 1 { + permits = DefaultParallelOperations + } + return &PermitPool{ + sem: make(chan int, permits), + } +} + +// Acquire returns when a permit has been acquired +func (c *PermitPool) Acquire() { + c.sem <- 1 +} + +// Release returns a permit to the pool +func (c *PermitPool) Release() { + <-c.sem +} + +// Get number of requests in the permit pool +func (c *PermitPool) CurrentPermits() int { + return len(c.sem) +} + +// Prefixes is a shared helper function returns all parent 'folders' for a +// given vault key. +// e.g. for 'foo/bar/baz', it returns ['foo', 'foo/bar'] +func Prefixes(s string) []string { + components := strings.Split(s, "/") + result := []string{} + for i := 1; i < len(components); i++ { + result = append(result, strings.Join(components[:i], "/")) + } + return result +} diff --git a/sdk/physical/physical_access.go b/sdk/physical/physical_access.go new file mode 100644 index 0000000..048ee83 --- /dev/null +++ b/sdk/physical/physical_access.go @@ -0,0 +1,43 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package physical + +import ( + "context" +) + +// PhysicalAccess is a wrapper around physical.Backend that allows Core to +// expose its physical storage operations through PhysicalAccess() while +// restricting the ability to modify Core.physical itself. +type PhysicalAccess struct { + physical Backend +} + +var _ Backend = (*PhysicalAccess)(nil) + +func NewPhysicalAccess(physical Backend) *PhysicalAccess { + return &PhysicalAccess{physical: physical} +} + +func (p *PhysicalAccess) Put(ctx context.Context, entry *Entry) error { + return p.physical.Put(ctx, entry) +} + +func (p *PhysicalAccess) Get(ctx context.Context, key string) (*Entry, error) { + return p.physical.Get(ctx, key) +} + +func (p *PhysicalAccess) Delete(ctx context.Context, key string) error { + return p.physical.Delete(ctx, key) +} + +func (p *PhysicalAccess) List(ctx context.Context, prefix string) ([]string, error) { + return p.physical.List(ctx, prefix) +} + +func (p *PhysicalAccess) Purge(ctx context.Context) { + if purgeable, ok := p.physical.(ToggleablePurgemonster); ok { + purgeable.Purge(ctx) + } +} diff --git a/sdk/physical/physical_view.go b/sdk/physical/physical_view.go new file mode 100644 index 0000000..0369e13 --- /dev/null +++ b/sdk/physical/physical_view.go @@ -0,0 +1,97 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package physical + +import ( + "context" + "errors" + "strings" +) + +var ErrRelativePath = errors.New("relative paths not supported") + +// View represents a prefixed view of a physical backend +type View struct { + backend Backend + prefix string +} + +// Verify View satisfies the correct interfaces +var _ Backend = (*View)(nil) + +// NewView takes an underlying physical backend and returns +// a view of it that can only operate with the given prefix. +func NewView(backend Backend, prefix string) *View { + return &View{ + backend: backend, + prefix: prefix, + } +} + +// List the contents of the prefixed view +func (v *View) List(ctx context.Context, prefix string) ([]string, error) { + if err := v.sanityCheck(prefix); err != nil { + return nil, err + } + return v.backend.List(ctx, v.expandKey(prefix)) +} + +// Get the key of the prefixed view +func (v *View) Get(ctx context.Context, key string) (*Entry, error) { + if err := v.sanityCheck(key); err != nil { + return nil, err + } + entry, err := v.backend.Get(ctx, v.expandKey(key)) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + entry.Key = v.truncateKey(entry.Key) + + return &Entry{ + Key: entry.Key, + Value: entry.Value, + }, nil +} + +// Put the entry into the prefix view +func (v *View) Put(ctx context.Context, entry *Entry) error { + if err := v.sanityCheck(entry.Key); err != nil { + return err + } + + nested := &Entry{ + Key: v.expandKey(entry.Key), + Value: entry.Value, + } + return v.backend.Put(ctx, nested) +} + +// Delete the entry from the prefix view +func (v *View) Delete(ctx context.Context, key string) error { + if err := v.sanityCheck(key); err != nil { + return err + } + return v.backend.Delete(ctx, v.expandKey(key)) +} + +// sanityCheck is used to perform a sanity check on a key +func (v *View) sanityCheck(key string) error { + if strings.Contains(key, "..") { + return ErrRelativePath + } + return nil +} + +// expandKey is used to expand to the full key path with the prefix +func (v *View) expandKey(suffix string) string { + return v.prefix + suffix +} + +// truncateKey is used to remove the prefix of the key +func (v *View) truncateKey(full string) string { + return strings.TrimPrefix(full, v.prefix) +} diff --git a/sdk/physical/testing.go b/sdk/physical/testing.go new file mode 100644 index 0000000..b80f369 --- /dev/null +++ b/sdk/physical/testing.go @@ -0,0 +1,520 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package physical + +import ( + "context" + "reflect" + "sort" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func ExerciseBackend(t testing.TB, b Backend) { + t.Helper() + + // Should be empty + keys, err := b.List(context.Background(), "") + if err != nil { + t.Fatalf("initial list failed: %v", err) + } + if len(keys) != 0 { + t.Errorf("initial not empty: %v", keys) + } + + // Delete should work if it does not exist + err = b.Delete(context.Background(), "foo") + if err != nil { + t.Fatalf("idempotent delete: %v", err) + } + + // Get should not fail, but be nil + out, err := b.Get(context.Background(), "foo") + if err != nil { + t.Fatalf("initial get failed: %v", err) + } + if out != nil { + t.Errorf("initial get was not nil: %v", out) + } + + // Make an entry + e := &Entry{Key: "foo", Value: []byte("test")} + err = b.Put(context.Background(), e) + if err != nil { + t.Fatalf("put failed: %v", err) + } + + // Get should work + out, err = b.Get(context.Background(), "foo") + if err != nil { + t.Fatalf("get failed: %v", err) + } + if !reflect.DeepEqual(out, e) { + t.Errorf("bad: %v expected: %v", out, e) + } + + // List should not be empty + keys, err = b.List(context.Background(), "") + if err != nil { + t.Fatalf("list failed: %v", err) + } + if len(keys) != 1 || keys[0] != "foo" { + t.Errorf("keys[0] did not equal foo: %v", keys) + } + + // Delete should work + err = b.Delete(context.Background(), "foo") + if err != nil { + t.Fatalf("delete: %v", err) + } + + // Should be empty + keys, err = b.List(context.Background(), "") + if err != nil { + t.Fatalf("list after delete: %v", err) + } + if len(keys) != 0 { + t.Errorf("list after delete not empty: %v", keys) + } + + // Get should fail + out, err = b.Get(context.Background(), "foo") + if err != nil { + t.Fatalf("get after delete: %v", err) + } + if out != nil { + t.Errorf("get after delete not nil: %v", out) + } + + // Multiple Puts should work; GH-189 + e = &Entry{Key: "foo", Value: []byte("test")} + err = b.Put(context.Background(), e) + if err != nil { + t.Fatalf("multi put 1 failed: %v", err) + } + e = &Entry{Key: "foo", Value: []byte("test")} + err = b.Put(context.Background(), e) + if err != nil { + t.Fatalf("multi put 2 failed: %v", err) + } + + // Make a nested entry + e = &Entry{Key: "foo/bar", Value: []byte("baz")} + err = b.Put(context.Background(), e) + if err != nil { + t.Fatalf("nested put failed: %v", err) + } + + // Get should work + out, err = b.Get(context.Background(), "foo/bar") + if err != nil { + t.Fatalf("get failed: %v", err) + } + if !reflect.DeepEqual(out, e) { + t.Errorf("bad: %v expected: %v", out, e) + } + + keys, err = b.List(context.Background(), "") + if err != nil { + t.Fatalf("list multi failed: %v", err) + } + sort.Strings(keys) + if len(keys) != 2 || keys[0] != "foo" || keys[1] != "foo/" { + t.Errorf("expected 2 keys [foo, foo/]: %v", keys) + } + + // Delete with children should work + err = b.Delete(context.Background(), "foo") + if err != nil { + t.Fatalf("delete after multi: %v", err) + } + + // Get should return the child + out, err = b.Get(context.Background(), "foo/bar") + if err != nil { + t.Fatalf("get after multi delete: %v", err) + } + if out == nil { + t.Errorf("get after multi delete not nil: %v", out) + } + + // Removal of nested secret should not leave artifacts + e = &Entry{Key: "foo/nested1/nested2/nested3", Value: []byte("baz")} + err = b.Put(context.Background(), e) + if err != nil { + t.Fatalf("deep nest: %v", err) + } + + err = b.Delete(context.Background(), "foo/nested1/nested2/nested3") + if err != nil { + t.Fatalf("failed to remove deep nest: %v", err) + } + + keys, err = b.List(context.Background(), "foo/") + if err != nil { + t.Fatalf("err: %v", err) + } + if len(keys) != 1 || keys[0] != "bar" { + t.Errorf("should be exactly 1 key == bar: %v", keys) + } + + // Make a second nested entry to test prefix removal + e = &Entry{Key: "foo/zip", Value: []byte("zap")} + err = b.Put(context.Background(), e) + if err != nil { + t.Fatalf("failed to create second nested: %v", err) + } + + // Delete should not remove the prefix + err = b.Delete(context.Background(), "foo/bar") + if err != nil { + t.Fatalf("failed to delete nested prefix: %v", err) + } + + keys, err = b.List(context.Background(), "") + if err != nil { + t.Fatalf("list nested prefix: %v", err) + } + if len(keys) != 1 || keys[0] != "foo/" { + t.Errorf("should be exactly 1 key == foo/: %v", keys) + } + + // Delete should remove the prefix + err = b.Delete(context.Background(), "foo/zip") + if err != nil { + t.Fatalf("failed to delete second prefix: %v", err) + } + + keys, err = b.List(context.Background(), "") + if err != nil { + t.Fatalf("listing after second delete failed: %v", err) + } + if len(keys) != 0 { + t.Errorf("should be empty at end: %v", keys) + } + + // When the root path is empty, adding and removing deep nested values should not break listing + e = &Entry{Key: "foo/nested1/nested2/value1", Value: []byte("baz")} + err = b.Put(context.Background(), e) + if err != nil { + t.Fatalf("deep nest: %v", err) + } + + e = &Entry{Key: "foo/nested1/nested2/value2", Value: []byte("baz")} + err = b.Put(context.Background(), e) + if err != nil { + t.Fatalf("deep nest: %v", err) + } + + err = b.Delete(context.Background(), "foo/nested1/nested2/value2") + if err != nil { + t.Fatalf("failed to remove deep nest: %v", err) + } + + keys, err = b.List(context.Background(), "") + if err != nil { + t.Fatalf("listing of root failed after deletion: %v", err) + } + if len(keys) == 0 { + t.Errorf("root is returning empty after deleting a single nested value, expected nested1/: %v", keys) + keys, err = b.List(context.Background(), "foo/nested1") + if err != nil { + t.Fatalf("listing of expected nested path 'foo/nested1' failed: %v", err) + } + // prove that the root should not be empty and that foo/nested1 exists + if len(keys) != 0 { + t.Logf(" keys can still be listed from nested1/ so it's not empty, expected nested2/: %v", keys) + } + } + + // cleanup left over listing bug test value + err = b.Delete(context.Background(), "foo/nested1/nested2/value1") + if err != nil { + t.Fatalf("failed to remove deep nest: %v", err) + } + + keys, err = b.List(context.Background(), "") + if err != nil { + t.Fatalf("listing of root failed after delete of deep nest: %v", err) + } + if len(keys) != 0 { + t.Errorf("should be empty at end: %v", keys) + } +} + +func ExerciseBackend_ListPrefix(t testing.TB, b Backend) { + t.Helper() + + e1 := &Entry{Key: "foo", Value: []byte("test")} + e2 := &Entry{Key: "foo/bar", Value: []byte("test")} + e3 := &Entry{Key: "foo/bar/baz", Value: []byte("test")} + + defer func() { + b.Delete(context.Background(), "foo") + b.Delete(context.Background(), "foo/bar") + b.Delete(context.Background(), "foo/bar/baz") + }() + + err := b.Put(context.Background(), e1) + if err != nil { + t.Fatalf("failed to put entry 1: %v", err) + } + err = b.Put(context.Background(), e2) + if err != nil { + t.Fatalf("failed to put entry 2: %v", err) + } + err = b.Put(context.Background(), e3) + if err != nil { + t.Fatalf("failed to put entry 3: %v", err) + } + + // Scan the root + keys, err := b.List(context.Background(), "") + if err != nil { + t.Fatalf("list root: %v", err) + } + sort.Strings(keys) + if len(keys) != 2 || keys[0] != "foo" || keys[1] != "foo/" { + t.Errorf("root expected [foo foo/]: %v", keys) + } + + // Scan foo/ + keys, err = b.List(context.Background(), "foo/") + if err != nil { + t.Fatalf("list level 1: %v", err) + } + sort.Strings(keys) + if len(keys) != 2 || keys[0] != "bar" || keys[1] != "bar/" { + t.Errorf("level 1 expected [bar bar/]: %v", keys) + } + + // Scan foo/bar/ + keys, err = b.List(context.Background(), "foo/bar/") + if err != nil { + t.Fatalf("list level 2: %v", err) + } + sort.Strings(keys) + if len(keys) != 1 || keys[0] != "baz" { + t.Errorf("level 1 expected [baz]: %v", keys) + } +} + +func ExerciseHABackend(t testing.TB, b HABackend, b2 HABackend) { + t.Helper() + + // Get the lock + lock, err := b.LockWith("foo", "bar") + if err != nil { + t.Fatalf("initial lock: %v", err) + } + + // Attempt to lock + leaderCh, err := lock.Lock(nil) + if err != nil { + t.Fatalf("lock attempt 1: %v", err) + } + if leaderCh == nil { + t.Fatalf("missing leaderCh") + } + + // Check the value + held, val, err := lock.Value() + if err != nil { + t.Fatalf("err: %v", err) + } + if !held { + t.Errorf("should be held") + } + if val != "bar" { + t.Errorf("expected value bar: %v", err) + } + + // Check if it's fencing that we can register the lock + if fba, ok := b.(FencingHABackend); ok { + require.NoError(t, fba.RegisterActiveNodeLock(lock)) + } + + // Second acquisition should fail + lock2, err := b2.LockWith("foo", "baz") + if err != nil { + t.Fatalf("lock 2: %v", err) + } + + // Checking the lock from b2 should discover that the lock is held since held + // implies only that there is _some_ leader not that b2 is leader (this was + // not clear before so we make it explicit with this assertion). + held2, val2, err := lock2.Value() + require.NoError(t, err) + require.Equal(t, "bar", val2) + require.True(t, held2) + + // Cancel attempt in 50 msec + stopCh := make(chan struct{}) + time.AfterFunc(50*time.Millisecond, func() { + close(stopCh) + }) + + // Attempt to lock + leaderCh2, err := lock2.Lock(stopCh) + if err != nil { + t.Fatalf("stop lock 2: %v", err) + } + if leaderCh2 != nil { + t.Errorf("should not have gotten leaderCh: %v", leaderCh2) + } + + // Release the first lock + lock.Unlock() + + // Attempt to lock should work + leaderCh2, err = lock2.Lock(nil) + if err != nil { + t.Fatalf("lock 2 lock: %v", err) + } + if leaderCh2 == nil { + t.Errorf("should get leaderCh") + } + + // Check if it's fencing that we can register the lock + if fba2, ok := b2.(FencingHABackend); ok { + require.NoError(t, fba2.RegisterActiveNodeLock(lock)) + } + + // Check the value + held, val, err = lock2.Value() + if err != nil { + t.Fatalf("value: %v", err) + } + if !held { + t.Errorf("should still be held") + } + if val != "baz" { + t.Errorf("expected: baz, got: %v", val) + } + + // Cleanup + lock2.Unlock() +} + +func ExerciseTransactionalBackend(t testing.TB, b Backend) { + t.Helper() + tb, ok := b.(Transactional) + if !ok { + t.Fatal("Not a transactional backend") + } + + txns := SetupTestingTransactions(t, b) + + if err := tb.Transaction(context.Background(), txns); err != nil { + t.Fatal(err) + } + + keys, err := b.List(context.Background(), "") + if err != nil { + t.Fatal(err) + } + + expected := []string{"foo", "zip"} + + sort.Strings(keys) + sort.Strings(expected) + if !reflect.DeepEqual(keys, expected) { + t.Fatalf("mismatch: expected\n%#v\ngot\n%#v\n", expected, keys) + } + + entry, err := b.Get(context.Background(), "foo") + if err != nil { + t.Fatal(err) + } + if entry == nil { + t.Fatal("got nil entry") + } + if entry.Value == nil { + t.Fatal("got nil value") + } + if string(entry.Value) != "bar3" { + t.Fatal("updates did not apply correctly") + } + + entry, err = b.Get(context.Background(), "zip") + if err != nil { + t.Fatal(err) + } + if entry == nil { + t.Fatal("got nil entry") + } + if entry.Value == nil { + t.Fatal("got nil value") + } + if string(entry.Value) != "zap3" { + t.Fatal("updates did not apply correctly") + } +} + +func SetupTestingTransactions(t testing.TB, b Backend) []*TxnEntry { + t.Helper() + // Add a few keys so that we test rollback with deletion + if err := b.Put(context.Background(), &Entry{ + Key: "foo", + Value: []byte("bar"), + }); err != nil { + t.Fatal(err) + } + if err := b.Put(context.Background(), &Entry{ + Key: "zip", + Value: []byte("zap"), + }); err != nil { + t.Fatal(err) + } + if err := b.Put(context.Background(), &Entry{ + Key: "deleteme", + }); err != nil { + t.Fatal(err) + } + if err := b.Put(context.Background(), &Entry{ + Key: "deleteme2", + }); err != nil { + t.Fatal(err) + } + + txns := []*TxnEntry{ + { + Operation: PutOperation, + Entry: &Entry{ + Key: "foo", + Value: []byte("bar2"), + }, + }, + { + Operation: DeleteOperation, + Entry: &Entry{ + Key: "deleteme", + }, + }, + { + Operation: PutOperation, + Entry: &Entry{ + Key: "foo", + Value: []byte("bar3"), + }, + }, + { + Operation: DeleteOperation, + Entry: &Entry{ + Key: "deleteme2", + }, + }, + { + Operation: PutOperation, + Entry: &Entry{ + Key: "zip", + Value: []byte("zap3"), + }, + }, + } + + return txns +} diff --git a/sdk/physical/transactions.go b/sdk/physical/transactions.go new file mode 100644 index 0000000..8d4e333 --- /dev/null +++ b/sdk/physical/transactions.go @@ -0,0 +1,153 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package physical + +import ( + "context" + "fmt" + + "github.com/hashicorp/go-multierror" +) + +// TxnEntry is an operation that takes atomically as part of +// a transactional update. Only supported by Transactional backends. +type TxnEntry struct { + Operation Operation + Entry *Entry +} + +func (t *TxnEntry) String() string { + return fmt.Sprintf("Operation: %s. Entry: %s", t.Operation, t.Entry) +} + +// Transactional is an optional interface for backends that +// support doing transactional updates of multiple keys. This is +// required for some features such as replication. +type Transactional interface { + // The function to run a transaction + Transaction(context.Context, []*TxnEntry) error +} + +type TransactionalBackend interface { + Backend + Transactional +} + +type PseudoTransactional interface { + // An internal function should do no locking or permit pool acquisition. + // Depending on the backend and if it natively supports transactions, these + // may simply chain to the normal backend functions. + GetInternal(context.Context, string) (*Entry, error) + PutInternal(context.Context, *Entry) error + DeleteInternal(context.Context, string) error +} + +// Implements the transaction interface +func GenericTransactionHandler(ctx context.Context, t PseudoTransactional, txns []*TxnEntry) (retErr error) { + rollbackStack := make([]*TxnEntry, 0, len(txns)) + var dirty bool + + // Update all of our GET transaction entries, so we can populate existing values back at the wal layer. + for _, txn := range txns { + if txn.Operation == GetOperation { + entry, err := t.GetInternal(ctx, txn.Entry.Key) + if err != nil { + return err + } + if entry != nil { + txn.Entry.Value = entry.Value + } + } + } + + // We walk the transactions in order; each successful operation goes into a + // LIFO for rollback if we hit an error along the way +TxnWalk: + for _, txn := range txns { + switch txn.Operation { + case DeleteOperation: + entry, err := t.GetInternal(ctx, txn.Entry.Key) + if err != nil { + retErr = multierror.Append(retErr, err) + dirty = true + break TxnWalk + } + if entry == nil { + // Nothing to delete or roll back + continue + } + rollbackEntry := &TxnEntry{ + Operation: PutOperation, + Entry: &Entry{ + Key: entry.Key, + Value: entry.Value, + }, + } + err = t.DeleteInternal(ctx, txn.Entry.Key) + if err != nil { + retErr = multierror.Append(retErr, err) + dirty = true + break TxnWalk + } + rollbackStack = append([]*TxnEntry{rollbackEntry}, rollbackStack...) + + case PutOperation: + entry, err := t.GetInternal(ctx, txn.Entry.Key) + if err != nil { + retErr = multierror.Append(retErr, err) + dirty = true + break TxnWalk + } + + // Nothing existed so in fact rolling back requires a delete + var rollbackEntry *TxnEntry + if entry == nil { + rollbackEntry = &TxnEntry{ + Operation: DeleteOperation, + Entry: &Entry{ + Key: txn.Entry.Key, + }, + } + } else { + rollbackEntry = &TxnEntry{ + Operation: PutOperation, + Entry: &Entry{ + Key: entry.Key, + Value: entry.Value, + }, + } + } + + err = t.PutInternal(ctx, txn.Entry) + if err != nil { + retErr = multierror.Append(retErr, err) + dirty = true + break TxnWalk + } + rollbackStack = append([]*TxnEntry{rollbackEntry}, rollbackStack...) + } + } + + // Need to roll back because we hit an error along the way + if dirty { + // While traversing this, if we get an error, we continue anyways in + // best-effort fashion + for _, txn := range rollbackStack { + switch txn.Operation { + case DeleteOperation: + err := t.DeleteInternal(ctx, txn.Entry.Key) + if err != nil { + retErr = multierror.Append(retErr, err) + } + case PutOperation: + err := t.PutInternal(ctx, txn.Entry) + if err != nil { + retErr = multierror.Append(retErr, err) + } + } + } + } + + return +} diff --git a/sdk/plugin/backend.go b/sdk/plugin/backend.go new file mode 100644 index 0000000..2da1378 --- /dev/null +++ b/sdk/plugin/backend.go @@ -0,0 +1,76 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "context" + "sync/atomic" + + "google.golang.org/grpc" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/sdk/helper/pluginutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/plugin/pb" +) + +var ( + _ plugin.Plugin = (*GRPCBackendPlugin)(nil) + _ plugin.GRPCPlugin = (*GRPCBackendPlugin)(nil) +) + +// GRPCBackendPlugin is the plugin.Plugin implementation that only supports GRPC +// transport +type GRPCBackendPlugin struct { + Factory logical.Factory + MetadataMode bool + Logger log.Logger + + MultiplexingSupport bool + + // Embeding this will disable the netRPC protocol + plugin.NetRPCUnsupportedPlugin +} + +func (b GRPCBackendPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { + server := backendGRPCPluginServer{ + broker: broker, + factory: b.Factory, + instances: make(map[string]backendInstance), + // We pass the logger down into the backend so go-plugin will + // forward logs for us. + logger: b.Logger, + } + + if b.MultiplexingSupport { + // Multiplexing is enabled for this plugin, register the server so we + // can tell the client in Vault. + pluginutil.RegisterPluginMultiplexingServer(s, pluginutil.PluginMultiplexingServerImpl{ + Supported: true, + }) + server.multiplexingSupport = true + } + + pb.RegisterBackendServer(s, &server) + logical.RegisterPluginVersionServer(s, &server) + return nil +} + +func (b *GRPCBackendPlugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { + ret := &backendGRPCPluginClient{ + client: pb.NewBackendClient(c), + versionClient: logical.NewPluginVersionClient(c), + broker: broker, + cleanupCh: make(chan struct{}), + doneCtx: ctx, + metadataMode: b.MetadataMode, + } + + // Create the value and set the type + ret.server = new(atomic.Value) + ret.server.Store((*grpc.Server)(nil)) + + return ret, nil +} diff --git a/sdk/plugin/grpc_backend.go b/sdk/plugin/grpc_backend.go new file mode 100644 index 0000000..f0114b9 --- /dev/null +++ b/sdk/plugin/grpc_backend.go @@ -0,0 +1,15 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "math" + + "google.golang.org/grpc" +) + +var largeMsgGRPCCallOpts []grpc.CallOption = []grpc.CallOption{ + grpc.MaxCallSendMsgSize(math.MaxInt32), + grpc.MaxCallRecvMsgSize(math.MaxInt32), +} diff --git a/sdk/plugin/grpc_backend_client.go b/sdk/plugin/grpc_backend_client.go new file mode 100644 index 0000000..a343356 --- /dev/null +++ b/sdk/plugin/grpc_backend_client.go @@ -0,0 +1,307 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "context" + "errors" + "math" + "sync/atomic" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/sdk/helper/pluginutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/plugin/pb" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var ( + ErrPluginShutdown = errors.New("plugin is shut down") + ErrClientInMetadataMode = errors.New("plugin client can not perform action while in metadata mode") +) + +// Validate backendGRPCPluginClient satisfies the logical.Backend interface +var _ logical.Backend = &backendGRPCPluginClient{} + +// backendPluginClient implements logical.Backend and is the +// go-plugin client. +type backendGRPCPluginClient struct { + broker *plugin.GRPCBroker + client pb.BackendClient + versionClient logical.PluginVersionClient + metadataMode bool + + system logical.SystemView + logger log.Logger + + // This is used to signal to the Cleanup function that it can proceed + // because we have a defined server + cleanupCh chan struct{} + + // server is the grpc server used for serving storage and sysview requests. + server *atomic.Value + + doneCtx context.Context +} + +func (b *backendGRPCPluginClient) Initialize(ctx context.Context, _ *logical.InitializationRequest) error { + if b.metadataMode { + return nil + } + + ctx, cancel := context.WithCancel(ctx) + quitCh := pluginutil.CtxCancelIfCanceled(cancel, b.doneCtx) + defer close(quitCh) + defer cancel() + + reply, err := b.client.Initialize(ctx, &pb.InitializeArgs{}, largeMsgGRPCCallOpts...) + if err != nil { + if b.doneCtx.Err() != nil { + return ErrPluginShutdown + } + + // If the plugin doesn't have Initialize implemented we should not fail + // the initialize call; otherwise this could halt startup of vault. + grpcStatus, ok := status.FromError(err) + if ok && grpcStatus.Code() == codes.Unimplemented { + return nil + } + + return err + } + if reply.Err != nil { + return pb.ProtoErrToErr(reply.Err) + } + + return nil +} + +func (b *backendGRPCPluginClient) HandleRequest(ctx context.Context, req *logical.Request) (*logical.Response, error) { + if b.metadataMode { + return nil, ErrClientInMetadataMode + } + + ctx, cancel := context.WithCancel(ctx) + quitCh := pluginutil.CtxCancelIfCanceled(cancel, b.doneCtx) + defer close(quitCh) + defer cancel() + + protoReq, err := pb.LogicalRequestToProtoRequest(req) + if err != nil { + return nil, err + } + + reply, err := b.client.HandleRequest(ctx, &pb.HandleRequestArgs{ + Request: protoReq, + }, largeMsgGRPCCallOpts...) + if err != nil { + if b.doneCtx.Err() != nil { + return nil, ErrPluginShutdown + } + + return nil, err + } + resp, err := pb.ProtoResponseToLogicalResponse(reply.Response) + if err != nil { + return nil, err + } + if reply.Err != nil { + return resp, pb.ProtoErrToErr(reply.Err) + } + + return resp, nil +} + +func (b *backendGRPCPluginClient) SpecialPaths() *logical.Paths { + reply, err := b.client.SpecialPaths(b.doneCtx, &pb.Empty{}) + if err != nil { + return nil + } + + if reply.Paths == nil { + return nil + } + + return &logical.Paths{ + Root: reply.Paths.Root, + Unauthenticated: reply.Paths.Unauthenticated, + LocalStorage: reply.Paths.LocalStorage, + SealWrapStorage: reply.Paths.SealWrapStorage, + WriteForwardedStorage: reply.Paths.WriteForwardedStorage, + } +} + +// System returns vault's system view. The backend client stores the view during +// Setup, so there is no need to shim the system just to get it back. +func (b *backendGRPCPluginClient) System() logical.SystemView { + return b.system +} + +// Logger returns vault's logger. The backend client stores the logger during +// Setup, so there is no need to shim the logger just to get it back. +func (b *backendGRPCPluginClient) Logger() log.Logger { + return b.logger +} + +func (b *backendGRPCPluginClient) HandleExistenceCheck(ctx context.Context, req *logical.Request) (bool, bool, error) { + if b.metadataMode { + return false, false, ErrClientInMetadataMode + } + + protoReq, err := pb.LogicalRequestToProtoRequest(req) + if err != nil { + return false, false, err + } + + ctx, cancel := context.WithCancel(ctx) + quitCh := pluginutil.CtxCancelIfCanceled(cancel, b.doneCtx) + defer close(quitCh) + defer cancel() + reply, err := b.client.HandleExistenceCheck(ctx, &pb.HandleExistenceCheckArgs{ + Request: protoReq, + }, largeMsgGRPCCallOpts...) + if err != nil { + if b.doneCtx.Err() != nil { + return false, false, ErrPluginShutdown + } + return false, false, err + } + if reply.Err != nil { + return false, false, pb.ProtoErrToErr(reply.Err) + } + + return reply.CheckFound, reply.Exists, nil +} + +func (b *backendGRPCPluginClient) Cleanup(ctx context.Context) { + ctx, cancel := context.WithCancel(ctx) + quitCh := pluginutil.CtxCancelIfCanceled(cancel, b.doneCtx) + defer close(quitCh) + defer cancel() + + b.client.Cleanup(ctx, &pb.Empty{}) + + // This will block until Setup has run the function to create a new server + // in b.server. If we stop here before it has a chance to actually start + // listening, when it starts listening it will immediately error out and + // exit, which is fine. Overall this ensures that we do not miss stopping + // the server if it ends up being created after Cleanup is called. + <-b.cleanupCh + server := b.server.Load() + if server != nil { + server.(*grpc.Server).GracefulStop() + } +} + +func (b *backendGRPCPluginClient) InvalidateKey(ctx context.Context, key string) { + if b.metadataMode { + return + } + + ctx, cancel := context.WithCancel(ctx) + quitCh := pluginutil.CtxCancelIfCanceled(cancel, b.doneCtx) + defer close(quitCh) + defer cancel() + + b.client.InvalidateKey(ctx, &pb.InvalidateKeyArgs{ + Key: key, + }) +} + +func (b *backendGRPCPluginClient) Setup(ctx context.Context, config *logical.BackendConfig) error { + // Shim logical.Storage + storageImpl := config.StorageView + if b.metadataMode { + storageImpl = &NOOPStorage{} + } + storage := &GRPCStorageServer{ + impl: storageImpl, + } + + // Shim logical.SystemView + sysViewImpl := config.System + if b.metadataMode { + sysViewImpl = &logical.StaticSystemView{} + } + sysView := &gRPCSystemViewServer{ + impl: sysViewImpl, + } + + events := &GRPCEventsServer{ + impl: config.EventsSender, + } + + // Register the server in this closure. + serverFunc := func(opts []grpc.ServerOption) *grpc.Server { + opts = append(opts, grpc.MaxRecvMsgSize(math.MaxInt32)) + opts = append(opts, grpc.MaxSendMsgSize(math.MaxInt32)) + + s := grpc.NewServer(opts...) + pb.RegisterSystemViewServer(s, sysView) + pb.RegisterStorageServer(s, storage) + pb.RegisterEventsServer(s, events) + b.server.Store(s) + close(b.cleanupCh) + return s + } + brokerID := b.broker.NextId() + go b.broker.AcceptAndServe(brokerID, serverFunc) + + args := &pb.SetupArgs{ + BrokerID: brokerID, + Config: config.Config, + BackendUUID: config.BackendUUID, + } + + ctx, cancel := context.WithCancel(ctx) + quitCh := pluginutil.CtxCancelIfCanceled(cancel, b.doneCtx) + defer close(quitCh) + defer cancel() + + reply, err := b.client.Setup(ctx, args) + if err != nil { + return err + } + if reply.Err != "" { + return errors.New(reply.Err) + } + + // Set system and logger for getter methods + b.system = config.System + b.logger = config.Logger + + return nil +} + +func (b *backendGRPCPluginClient) Type() logical.BackendType { + reply, err := b.client.Type(b.doneCtx, &pb.Empty{}) + if err != nil { + return logical.TypeUnknown + } + + return logical.BackendType(reply.Type) +} + +func (b *backendGRPCPluginClient) PluginVersion() logical.PluginVersion { + reply, err := b.versionClient.Version(b.doneCtx, &logical.Empty{}) + if err != nil { + if stErr, ok := status.FromError(err); ok { + if stErr.Code() == codes.Unimplemented { + return logical.EmptyPluginVersion + } + } + b.Logger().Warn("Unknown error getting plugin version", "err", err) + return logical.EmptyPluginVersion + } + return logical.PluginVersion{ + Version: reply.GetPluginVersion(), + } +} + +func (b *backendGRPCPluginClient) IsExternal() bool { + return true +} diff --git a/sdk/plugin/grpc_backend_server.go b/sdk/plugin/grpc_backend_server.go new file mode 100644 index 0000000..3356e46 --- /dev/null +++ b/sdk/plugin/grpc_backend_server.go @@ -0,0 +1,294 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "context" + "errors" + "fmt" + "sync" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/sdk/helper/pluginutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/plugin/pb" + "google.golang.org/grpc" +) + +var ErrServerInMetadataMode = errors.New("plugin server can not perform action while in metadata mode") + +// singleImplementationID is the string used to define the instance ID of a +// non-multiplexed plugin +const singleImplementationID string = "single" + +type backendInstance struct { + brokeredClient *grpc.ClientConn + backend logical.Backend +} + +type backendGRPCPluginServer struct { + pb.UnimplementedBackendServer + logical.UnimplementedPluginVersionServer + + broker *plugin.GRPCBroker + + instances map[string]backendInstance + instancesLock sync.RWMutex + multiplexingSupport bool + + factory logical.Factory + + logger log.Logger +} + +// getBackendAndBrokeredClientInternal returns the backend and client +// connection but does not hold a lock +func (b *backendGRPCPluginServer) getBackendAndBrokeredClientInternal(ctx context.Context) (logical.Backend, *grpc.ClientConn, error) { + if b.multiplexingSupport { + id, err := pluginutil.GetMultiplexIDFromContext(ctx) + if err != nil { + return nil, nil, err + } + + if inst, ok := b.instances[id]; ok { + return inst.backend, inst.brokeredClient, nil + } + + } + + if singleImpl, ok := b.instances[singleImplementationID]; ok { + return singleImpl.backend, singleImpl.brokeredClient, nil + } + + return nil, nil, fmt.Errorf("no backend instance found") +} + +// getBackendAndBrokeredClient holds a read lock and returns the backend and +// client connection +func (b *backendGRPCPluginServer) getBackendAndBrokeredClient(ctx context.Context) (logical.Backend, *grpc.ClientConn, error) { + b.instancesLock.RLock() + defer b.instancesLock.RUnlock() + return b.getBackendAndBrokeredClientInternal(ctx) +} + +// Setup dials into the plugin's broker to get a shimmed storage, logger, and +// system view of the backend. This method also instantiates the underlying +// backend through its factory func for the server side of the plugin. +func (b *backendGRPCPluginServer) Setup(ctx context.Context, args *pb.SetupArgs) (*pb.SetupReply, error) { + var err error + id := singleImplementationID + + if b.multiplexingSupport { + id, err = pluginutil.GetMultiplexIDFromContext(ctx) + if err != nil { + return &pb.SetupReply{}, err + } + } + + // Dial for storage + brokeredClient, err := b.broker.Dial(args.BrokerID) + if err != nil { + return &pb.SetupReply{}, err + } + + storage := newGRPCStorageClient(brokeredClient) + sysView := newGRPCSystemView(brokeredClient) + events := newGRPCEventsClient(brokeredClient) + + config := &logical.BackendConfig{ + StorageView: storage, + Logger: b.logger, + System: sysView, + Config: args.Config, + BackendUUID: args.BackendUUID, + EventsSender: events, + } + + // Call the underlying backend factory after shims have been created + // to set b.backend + backend, err := b.factory(ctx, config) + if err != nil { + return &pb.SetupReply{ + Err: pb.ErrToString(err), + }, nil + } + + b.instancesLock.Lock() + defer b.instancesLock.Unlock() + b.instances[id] = backendInstance{ + brokeredClient: brokeredClient, + backend: backend, + } + + return &pb.SetupReply{}, nil +} + +func (b *backendGRPCPluginServer) HandleRequest(ctx context.Context, args *pb.HandleRequestArgs) (*pb.HandleRequestReply, error) { + backend, brokeredClient, err := b.getBackendAndBrokeredClient(ctx) + if err != nil { + return &pb.HandleRequestReply{}, err + } + + if pluginutil.InMetadataMode() { + return &pb.HandleRequestReply{}, ErrServerInMetadataMode + } + + logicalReq, err := pb.ProtoRequestToLogicalRequest(args.Request) + if err != nil { + return &pb.HandleRequestReply{}, err + } + + logicalReq.Storage = newGRPCStorageClient(brokeredClient) + + resp, respErr := backend.HandleRequest(ctx, logicalReq) + + pbResp, err := pb.LogicalResponseToProtoResponse(resp) + if err != nil { + return &pb.HandleRequestReply{}, err + } + + return &pb.HandleRequestReply{ + Response: pbResp, + Err: pb.ErrToProtoErr(respErr), + }, nil +} + +func (b *backendGRPCPluginServer) Initialize(ctx context.Context, _ *pb.InitializeArgs) (*pb.InitializeReply, error) { + backend, brokeredClient, err := b.getBackendAndBrokeredClient(ctx) + if err != nil { + return &pb.InitializeReply{}, err + } + + if pluginutil.InMetadataMode() { + return &pb.InitializeReply{}, ErrServerInMetadataMode + } + + req := &logical.InitializationRequest{ + Storage: newGRPCStorageClient(brokeredClient), + } + + respErr := backend.Initialize(ctx, req) + + return &pb.InitializeReply{ + Err: pb.ErrToProtoErr(respErr), + }, nil +} + +func (b *backendGRPCPluginServer) SpecialPaths(ctx context.Context, args *pb.Empty) (*pb.SpecialPathsReply, error) { + backend, _, err := b.getBackendAndBrokeredClient(ctx) + if err != nil { + return &pb.SpecialPathsReply{}, err + } + + paths := backend.SpecialPaths() + if paths == nil { + return &pb.SpecialPathsReply{ + Paths: nil, + }, nil + } + + return &pb.SpecialPathsReply{ + Paths: &pb.Paths{ + Root: paths.Root, + Unauthenticated: paths.Unauthenticated, + LocalStorage: paths.LocalStorage, + SealWrapStorage: paths.SealWrapStorage, + WriteForwardedStorage: paths.WriteForwardedStorage, + }, + }, nil +} + +func (b *backendGRPCPluginServer) HandleExistenceCheck(ctx context.Context, args *pb.HandleExistenceCheckArgs) (*pb.HandleExistenceCheckReply, error) { + backend, brokeredClient, err := b.getBackendAndBrokeredClient(ctx) + if err != nil { + return &pb.HandleExistenceCheckReply{}, err + } + + if pluginutil.InMetadataMode() { + return &pb.HandleExistenceCheckReply{}, ErrServerInMetadataMode + } + + logicalReq, err := pb.ProtoRequestToLogicalRequest(args.Request) + if err != nil { + return &pb.HandleExistenceCheckReply{}, err + } + + logicalReq.Storage = newGRPCStorageClient(brokeredClient) + + checkFound, exists, err := backend.HandleExistenceCheck(ctx, logicalReq) + return &pb.HandleExistenceCheckReply{ + CheckFound: checkFound, + Exists: exists, + Err: pb.ErrToProtoErr(err), + }, nil +} + +func (b *backendGRPCPluginServer) Cleanup(ctx context.Context, _ *pb.Empty) (*pb.Empty, error) { + b.instancesLock.Lock() + defer b.instancesLock.Unlock() + + backend, brokeredClient, err := b.getBackendAndBrokeredClientInternal(ctx) + if err != nil { + return &pb.Empty{}, err + } + + backend.Cleanup(ctx) + + // Close rpc clients + brokeredClient.Close() + + if b.multiplexingSupport { + id, err := pluginutil.GetMultiplexIDFromContext(ctx) + if err != nil { + return nil, err + } + delete(b.instances, id) + } else if _, ok := b.instances[singleImplementationID]; ok { + delete(b.instances, singleImplementationID) + } + + return &pb.Empty{}, nil +} + +func (b *backendGRPCPluginServer) InvalidateKey(ctx context.Context, args *pb.InvalidateKeyArgs) (*pb.Empty, error) { + backend, _, err := b.getBackendAndBrokeredClient(ctx) + if err != nil { + return &pb.Empty{}, err + } + + if pluginutil.InMetadataMode() { + return &pb.Empty{}, ErrServerInMetadataMode + } + + backend.InvalidateKey(ctx, args.Key) + return &pb.Empty{}, nil +} + +func (b *backendGRPCPluginServer) Type(ctx context.Context, _ *pb.Empty) (*pb.TypeReply, error) { + backend, _, err := b.getBackendAndBrokeredClient(ctx) + if err != nil { + return &pb.TypeReply{}, err + } + + return &pb.TypeReply{ + Type: uint32(backend.Type()), + }, nil +} + +func (b *backendGRPCPluginServer) Version(ctx context.Context, _ *logical.Empty) (*logical.VersionReply, error) { + backend, _, err := b.getBackendAndBrokeredClient(ctx) + if err != nil { + return &logical.VersionReply{}, err + } + + if versioner, ok := backend.(logical.PluginVersioner); ok { + return &logical.VersionReply{ + PluginVersion: versioner.PluginVersion().Version, + }, nil + } + return &logical.VersionReply{ + PluginVersion: "", + }, nil +} diff --git a/sdk/plugin/grpc_backend_test.go b/sdk/plugin/grpc_backend_test.go new file mode 100644 index 0000000..01a6ea6 --- /dev/null +++ b/sdk/plugin/grpc_backend_test.go @@ -0,0 +1,205 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "context" + "os" + "testing" + "time" + + log "github.com/hashicorp/go-hclog" + gplugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/plugin/mock" +) + +func TestGRPCBackendPlugin_impl(t *testing.T) { + var _ gplugin.Plugin = new(GRPCBackendPlugin) + var _ logical.Backend = new(backendGRPCPluginClient) +} + +func TestGRPCBackendPlugin_HandleRequest(t *testing.T) { + b, cleanup := testGRPCBackend(t) + defer cleanup() + + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.CreateOperation, + Path: "kv/foo", + Data: map[string]interface{}{ + "value": "bar", + }, + }) + if err != nil { + t.Fatal(err) + } + if resp.Data["value"] != "bar" { + t.Fatalf("bad: %#v", resp) + } +} + +func TestGRPCBackendPlugin_SpecialPaths(t *testing.T) { + b, cleanup := testGRPCBackend(t) + defer cleanup() + + paths := b.SpecialPaths() + if paths == nil { + t.Fatal("SpecialPaths() returned nil") + } +} + +func TestGRPCBackendPlugin_System(t *testing.T) { + b, cleanup := testGRPCBackend(t) + defer cleanup() + + sys := b.System() + if sys == nil { + t.Fatal("System() returned nil") + } + + actual := sys.DefaultLeaseTTL() + expected := 300 * time.Second + + if actual != expected { + t.Fatalf("bad: %v, expected %v", actual, expected) + } +} + +func TestGRPCBackendPlugin_Logger(t *testing.T) { + b, cleanup := testGRPCBackend(t) + defer cleanup() + + logger := b.Logger() + if logger == nil { + t.Fatal("Logger() returned nil") + } +} + +func TestGRPCBackendPlugin_HandleExistenceCheck(t *testing.T) { + b, cleanup := testGRPCBackend(t) + defer cleanup() + + checkFound, exists, err := b.HandleExistenceCheck(context.Background(), &logical.Request{ + Operation: logical.CreateOperation, + Path: "kv/foo", + Data: map[string]interface{}{"value": "bar"}, + }) + if err != nil { + t.Fatal(err) + } + if !checkFound { + t.Fatal("existence check not found for path 'kv/foo") + } + if exists { + t.Fatal("existence check should have returned 'false' for 'kv/foo'") + } +} + +func TestGRPCBackendPlugin_Cleanup(t *testing.T) { + b, cleanup := testGRPCBackend(t) + defer cleanup() + + b.Cleanup(context.Background()) +} + +func TestGRPCBackendPlugin_InvalidateKey(t *testing.T) { + b, cleanup := testGRPCBackend(t) + defer cleanup() + + ctx := context.Background() + + resp, err := b.HandleRequest(ctx, &logical.Request{ + Operation: logical.ReadOperation, + Path: "internal", + }) + if err != nil { + t.Fatal(err) + } + if resp.Data["value"] == "" { + t.Fatalf("bad: %#v, expected non-empty value", resp) + } + + b.InvalidateKey(ctx, "internal") + + resp, err = b.HandleRequest(ctx, &logical.Request{ + Operation: logical.ReadOperation, + Path: "internal", + }) + if err != nil { + t.Fatal(err) + } + if resp.Data["value"] != "" { + t.Fatalf("bad: expected empty response data, got %#v", resp) + } +} + +func TestGRPCBackendPlugin_Setup(t *testing.T) { + _, cleanup := testGRPCBackend(t) + defer cleanup() +} + +func TestGRPCBackendPlugin_Initialize(t *testing.T) { + b, cleanup := testGRPCBackend(t) + defer cleanup() + + err := b.Initialize(context.Background(), &logical.InitializationRequest{}) + if err != nil { + t.Fatal(err) + } +} + +func TestGRPCBackendPlugin_Version(t *testing.T) { + b, cleanup := testGRPCBackend(t) + defer cleanup() + + versioner, ok := b.(logical.PluginVersioner) + if !ok { + t.Fatalf("Expected %T to implement logical.PluginVersioner interface", b) + } + + version := versioner.PluginVersion().Version + if version != "v0.0.0+mock" { + t.Fatalf("Got version %s, expected 'mock'", version) + } +} + +func testGRPCBackend(t *testing.T) (logical.Backend, func()) { + // Create a mock provider + pluginMap := map[string]gplugin.Plugin{ + "backend": &GRPCBackendPlugin{ + Factory: mock.Factory, + Logger: log.New(&log.LoggerOptions{ + Level: log.Debug, + Output: os.Stderr, + JSONFormat: true, + }), + }, + } + client, _ := gplugin.TestPluginGRPCConn(t, pluginMap) + cleanup := func() { + client.Close() + } + + // Request the backend + raw, err := client.Dispense(BackendPluginName) + if err != nil { + t.Fatal(err) + } + b := raw.(logical.Backend) + + err = b.Setup(context.Background(), &logical.BackendConfig{ + Logger: logging.NewVaultLogger(log.Debug), + System: &logical.StaticSystemView{ + DefaultLeaseTTLVal: 300 * time.Second, + MaxLeaseTTLVal: 1800 * time.Second, + }, + StorageView: &logical.InmemStorage{}, + }) + if err != nil { + t.Fatal(err) + } + + return b, cleanup +} diff --git a/sdk/plugin/grpc_events.go b/sdk/plugin/grpc_events.go new file mode 100644 index 0000000..05d788c --- /dev/null +++ b/sdk/plugin/grpc_events.go @@ -0,0 +1,49 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "context" + + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/plugin/pb" + "google.golang.org/grpc" +) + +func newGRPCEventsClient(conn *grpc.ClientConn) *GRPCEventsClient { + return &GRPCEventsClient{ + client: pb.NewEventsClient(conn), + } +} + +type GRPCEventsClient struct { + client pb.EventsClient +} + +var _ logical.EventSender = (*GRPCEventsClient)(nil) + +func (s *GRPCEventsClient) Send(ctx context.Context, eventType logical.EventType, event *logical.EventData) error { + _, err := s.client.SendEvent(ctx, &pb.SendEventRequest{ + EventType: string(eventType), + Event: event, + }) + return err +} + +type GRPCEventsServer struct { + pb.UnimplementedEventsServer + impl logical.EventSender +} + +func (s *GRPCEventsServer) SendEvent(ctx context.Context, req *pb.SendEventRequest) (*pb.Empty, error) { + if s.impl == nil { + return &pb.Empty{}, nil + } + + err := s.impl.Send(ctx, logical.EventType(req.EventType), req.Event) + if err != nil { + return nil, err + } + return &pb.Empty{}, nil +} diff --git a/sdk/plugin/grpc_storage.go b/sdk/plugin/grpc_storage.go new file mode 100644 index 0000000..5c2f0de --- /dev/null +++ b/sdk/plugin/grpc_storage.go @@ -0,0 +1,154 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "context" + "errors" + + "google.golang.org/grpc" + + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/plugin/pb" +) + +var errMissingStorage = errors.New("missing storage implementation: this method should not be called during plugin Setup, but only during and after Initialize") + +func newGRPCStorageClient(conn *grpc.ClientConn) *GRPCStorageClient { + return &GRPCStorageClient{ + client: pb.NewStorageClient(conn), + } +} + +// GRPCStorageClient is an implementation of logical.Storage that communicates +// over RPC. +type GRPCStorageClient struct { + client pb.StorageClient +} + +func (s *GRPCStorageClient) List(ctx context.Context, prefix string) ([]string, error) { + reply, err := s.client.List(ctx, &pb.StorageListArgs{ + Prefix: prefix, + }, largeMsgGRPCCallOpts...) + if err != nil { + return []string{}, err + } + if reply.Err != "" { + return reply.Keys, errors.New(reply.Err) + } + return reply.Keys, nil +} + +func (s *GRPCStorageClient) Get(ctx context.Context, key string) (*logical.StorageEntry, error) { + reply, err := s.client.Get(ctx, &pb.StorageGetArgs{ + Key: key, + }, largeMsgGRPCCallOpts...) + if err != nil { + return nil, err + } + if reply.Err != "" { + return nil, errors.New(reply.Err) + } + return pb.ProtoStorageEntryToLogicalStorageEntry(reply.Entry), nil +} + +func (s *GRPCStorageClient) Put(ctx context.Context, entry *logical.StorageEntry) error { + reply, err := s.client.Put(ctx, &pb.StoragePutArgs{ + Entry: pb.LogicalStorageEntryToProtoStorageEntry(entry), + }, largeMsgGRPCCallOpts...) + if err != nil { + return err + } + if reply.Err != "" { + return errors.New(reply.Err) + } + return nil +} + +func (s *GRPCStorageClient) Delete(ctx context.Context, key string) error { + reply, err := s.client.Delete(ctx, &pb.StorageDeleteArgs{ + Key: key, + }) + if err != nil { + return err + } + if reply.Err != "" { + return errors.New(reply.Err) + } + return nil +} + +// GRPCStorageServer is a net/rpc compatible structure for serving +type GRPCStorageServer struct { + pb.UnimplementedStorageServer + impl logical.Storage +} + +func (s *GRPCStorageServer) List(ctx context.Context, args *pb.StorageListArgs) (*pb.StorageListReply, error) { + if s.impl == nil { + return nil, errMissingStorage + } + keys, err := s.impl.List(ctx, args.Prefix) + return &pb.StorageListReply{ + Keys: keys, + Err: pb.ErrToString(err), + }, nil +} + +func (s *GRPCStorageServer) Get(ctx context.Context, args *pb.StorageGetArgs) (*pb.StorageGetReply, error) { + if s.impl == nil { + return nil, errMissingStorage + } + storageEntry, err := s.impl.Get(ctx, args.Key) + if storageEntry == nil { + return &pb.StorageGetReply{ + Entry: nil, + Err: pb.ErrToString(err), + }, nil + } + return &pb.StorageGetReply{ + Entry: pb.LogicalStorageEntryToProtoStorageEntry(storageEntry), + Err: pb.ErrToString(err), + }, nil +} + +func (s *GRPCStorageServer) Put(ctx context.Context, args *pb.StoragePutArgs) (*pb.StoragePutReply, error) { + if s.impl == nil { + return nil, errMissingStorage + } + err := s.impl.Put(ctx, pb.ProtoStorageEntryToLogicalStorageEntry(args.Entry)) + return &pb.StoragePutReply{ + Err: pb.ErrToString(err), + }, nil +} + +func (s *GRPCStorageServer) Delete(ctx context.Context, args *pb.StorageDeleteArgs) (*pb.StorageDeleteReply, error) { + if s.impl == nil { + return nil, errMissingStorage + } + err := s.impl.Delete(ctx, args.Key) + return &pb.StorageDeleteReply{ + Err: pb.ErrToString(err), + }, nil +} + +// NOOPStorage is used to deny access to the storage interface while running a +// backend plugin in metadata mode. +type NOOPStorage struct{} + +func (s *NOOPStorage) List(_ context.Context, prefix string) ([]string, error) { + return []string{}, nil +} + +func (s *NOOPStorage) Get(_ context.Context, key string) (*logical.StorageEntry, error) { + return nil, nil +} + +func (s *NOOPStorage) Put(_ context.Context, entry *logical.StorageEntry) error { + return nil +} + +func (s *NOOPStorage) Delete(_ context.Context, key string) error { + return nil +} diff --git a/sdk/plugin/grpc_system.go b/sdk/plugin/grpc_system.go new file mode 100644 index 0000000..bf4537b --- /dev/null +++ b/sdk/plugin/grpc_system.go @@ -0,0 +1,396 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/license" + "github.com/hashicorp/vault/sdk/helper/pluginutil" + "github.com/hashicorp/vault/sdk/helper/wrapping" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/plugin/pb" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var errMissingSystemView = errors.New("missing system view implementation: this method should not be called during plugin Setup, but only during and after Initialize") + +func newGRPCSystemView(conn *grpc.ClientConn) *gRPCSystemViewClient { + return &gRPCSystemViewClient{ + client: pb.NewSystemViewClient(conn), + } +} + +var _ logical.SystemView = &gRPCSystemViewClient{} + +type gRPCSystemViewClient struct { + client pb.SystemViewClient +} + +func (s *gRPCSystemViewClient) DefaultLeaseTTL() time.Duration { + reply, err := s.client.DefaultLeaseTTL(context.Background(), &pb.Empty{}) + if err != nil { + return 0 + } + + return time.Duration(reply.TTL) +} + +func (s *gRPCSystemViewClient) MaxLeaseTTL() time.Duration { + reply, err := s.client.MaxLeaseTTL(context.Background(), &pb.Empty{}) + if err != nil { + return 0 + } + + return time.Duration(reply.TTL) +} + +func (s *gRPCSystemViewClient) Tainted() bool { + reply, err := s.client.Tainted(context.Background(), &pb.Empty{}) + if err != nil { + return false + } + + return reply.Tainted +} + +func (s *gRPCSystemViewClient) CachingDisabled() bool { + reply, err := s.client.CachingDisabled(context.Background(), &pb.Empty{}) + if err != nil { + return false + } + + return reply.Disabled +} + +func (s *gRPCSystemViewClient) ReplicationState() consts.ReplicationState { + reply, err := s.client.ReplicationState(context.Background(), &pb.Empty{}) + if err != nil { + return consts.ReplicationUnknown + } + + return consts.ReplicationState(reply.State) +} + +func (s *gRPCSystemViewClient) ResponseWrapData(ctx context.Context, data map[string]interface{}, ttl time.Duration, jwt bool) (*wrapping.ResponseWrapInfo, error) { + buf, err := json.Marshal(data) + if err != nil { + return nil, err + } + + reply, err := s.client.ResponseWrapData(ctx, &pb.ResponseWrapDataArgs{ + Data: string(buf[:]), + TTL: int64(ttl), + JWT: false, + }) + if err != nil { + return nil, err + } + if reply.Err != "" { + return nil, errors.New(reply.Err) + } + + info, err := pb.ProtoResponseWrapInfoToLogicalResponseWrapInfo(reply.WrapInfo) + if err != nil { + return nil, err + } + + return info, nil +} + +func (s *gRPCSystemViewClient) NewPluginClient(ctx context.Context, config pluginutil.PluginClientConfig) (pluginutil.PluginClient, error) { + return nil, fmt.Errorf("cannot call NewPluginClient from a plugin backend") +} + +func (s *gRPCSystemViewClient) LookupPlugin(_ context.Context, _ string, _ consts.PluginType) (*pluginutil.PluginRunner, error) { + return nil, fmt.Errorf("cannot call LookupPlugin from a plugin backend") +} + +func (s *gRPCSystemViewClient) LookupPluginVersion(_ context.Context, _ string, _ consts.PluginType, _ string) (*pluginutil.PluginRunner, error) { + return nil, fmt.Errorf("cannot call LookupPluginVersion from a plugin backend") +} + +func (s *gRPCSystemViewClient) ListVersionedPlugins(_ context.Context, _ consts.PluginType) ([]pluginutil.VersionedPlugin, error) { + return nil, fmt.Errorf("cannot call ListVersionedPlugins from a plugin backend") +} + +func (s *gRPCSystemViewClient) MlockEnabled() bool { + reply, err := s.client.MlockEnabled(context.Background(), &pb.Empty{}) + if err != nil { + return false + } + + return reply.Enabled +} + +func (s *gRPCSystemViewClient) HasFeature(feature license.Features) bool { + // Not implemented + return false +} + +func (s *gRPCSystemViewClient) LocalMount() bool { + reply, err := s.client.LocalMount(context.Background(), &pb.Empty{}) + if err != nil { + return false + } + + return reply.Local +} + +func (s *gRPCSystemViewClient) EntityInfo(entityID string) (*logical.Entity, error) { + reply, err := s.client.EntityInfo(context.Background(), &pb.EntityInfoArgs{ + EntityID: entityID, + }) + if err != nil { + return nil, err + } + if reply.Err != "" { + return nil, errors.New(reply.Err) + } + + return reply.Entity, nil +} + +func (s *gRPCSystemViewClient) GroupsForEntity(entityID string) ([]*logical.Group, error) { + reply, err := s.client.GroupsForEntity(context.Background(), &pb.EntityInfoArgs{ + EntityID: entityID, + }) + if err != nil { + return nil, err + } + if reply.Err != "" { + return nil, errors.New(reply.Err) + } + + return reply.Groups, nil +} + +func (s *gRPCSystemViewClient) PluginEnv(ctx context.Context) (*logical.PluginEnvironment, error) { + reply, err := s.client.PluginEnv(ctx, &pb.Empty{}) + if err != nil { + return nil, err + } + + return reply.PluginEnvironment, nil +} + +func (s *gRPCSystemViewClient) VaultVersion(ctx context.Context) (string, error) { + reply, err := s.client.PluginEnv(ctx, &pb.Empty{}) + if err != nil { + return "", err + } + + return reply.PluginEnvironment.VaultVersion, nil +} + +func (s *gRPCSystemViewClient) GeneratePasswordFromPolicy(ctx context.Context, policyName string) (password string, err error) { + req := &pb.GeneratePasswordFromPolicyRequest{ + PolicyName: policyName, + } + resp, err := s.client.GeneratePasswordFromPolicy(ctx, req) + if err != nil { + return "", err + } + return resp.Password, nil +} + +func (s gRPCSystemViewClient) ClusterID(ctx context.Context) (string, error) { + reply, err := s.client.ClusterInfo(ctx, &pb.Empty{}) + if err != nil { + return "", err + } + + return reply.ClusterID, nil +} + +type gRPCSystemViewServer struct { + pb.UnimplementedSystemViewServer + + impl logical.SystemView +} + +func (s *gRPCSystemViewServer) DefaultLeaseTTL(ctx context.Context, _ *pb.Empty) (*pb.TTLReply, error) { + if s.impl == nil { + return nil, errMissingSystemView + } + ttl := s.impl.DefaultLeaseTTL() + return &pb.TTLReply{ + TTL: int64(ttl), + }, nil +} + +func (s *gRPCSystemViewServer) MaxLeaseTTL(ctx context.Context, _ *pb.Empty) (*pb.TTLReply, error) { + if s.impl == nil { + return nil, errMissingSystemView + } + ttl := s.impl.MaxLeaseTTL() + return &pb.TTLReply{ + TTL: int64(ttl), + }, nil +} + +func (s *gRPCSystemViewServer) Tainted(ctx context.Context, _ *pb.Empty) (*pb.TaintedReply, error) { + if s.impl == nil { + return nil, errMissingSystemView + } + tainted := s.impl.Tainted() + return &pb.TaintedReply{ + Tainted: tainted, + }, nil +} + +func (s *gRPCSystemViewServer) CachingDisabled(ctx context.Context, _ *pb.Empty) (*pb.CachingDisabledReply, error) { + if s.impl == nil { + return nil, errMissingSystemView + } + cachingDisabled := s.impl.CachingDisabled() + return &pb.CachingDisabledReply{ + Disabled: cachingDisabled, + }, nil +} + +func (s *gRPCSystemViewServer) ReplicationState(ctx context.Context, _ *pb.Empty) (*pb.ReplicationStateReply, error) { + if s.impl == nil { + return nil, errMissingSystemView + } + replicationState := s.impl.ReplicationState() + return &pb.ReplicationStateReply{ + State: int32(replicationState), + }, nil +} + +func (s *gRPCSystemViewServer) ResponseWrapData(ctx context.Context, args *pb.ResponseWrapDataArgs) (*pb.ResponseWrapDataReply, error) { + if s.impl == nil { + return nil, errMissingSystemView + } + data := map[string]interface{}{} + err := json.Unmarshal([]byte(args.Data), &data) + if err != nil { + return &pb.ResponseWrapDataReply{}, err + } + + // Do not allow JWTs to be returned + info, err := s.impl.ResponseWrapData(ctx, data, time.Duration(args.TTL), false) + if err != nil { + return &pb.ResponseWrapDataReply{ + Err: pb.ErrToString(err), + }, nil + } + + pbInfo, err := pb.LogicalResponseWrapInfoToProtoResponseWrapInfo(info) + if err != nil { + return &pb.ResponseWrapDataReply{}, err + } + + return &pb.ResponseWrapDataReply{ + WrapInfo: pbInfo, + }, nil +} + +func (s *gRPCSystemViewServer) MlockEnabled(ctx context.Context, _ *pb.Empty) (*pb.MlockEnabledReply, error) { + if s.impl == nil { + return nil, errMissingSystemView + } + enabled := s.impl.MlockEnabled() + return &pb.MlockEnabledReply{ + Enabled: enabled, + }, nil +} + +func (s *gRPCSystemViewServer) LocalMount(ctx context.Context, _ *pb.Empty) (*pb.LocalMountReply, error) { + if s.impl == nil { + return nil, errMissingSystemView + } + local := s.impl.LocalMount() + return &pb.LocalMountReply{ + Local: local, + }, nil +} + +func (s *gRPCSystemViewServer) EntityInfo(ctx context.Context, args *pb.EntityInfoArgs) (*pb.EntityInfoReply, error) { + if s.impl == nil { + return nil, errMissingSystemView + } + entity, err := s.impl.EntityInfo(args.EntityID) + if err != nil { + return &pb.EntityInfoReply{ + Err: pb.ErrToString(err), + }, nil + } + return &pb.EntityInfoReply{ + Entity: entity, + }, nil +} + +func (s *gRPCSystemViewServer) GroupsForEntity(ctx context.Context, args *pb.EntityInfoArgs) (*pb.GroupsForEntityReply, error) { + if s.impl == nil { + return nil, errMissingSystemView + } + groups, err := s.impl.GroupsForEntity(args.EntityID) + if err != nil { + return &pb.GroupsForEntityReply{ + Err: pb.ErrToString(err), + }, nil + } + return &pb.GroupsForEntityReply{ + Groups: groups, + }, nil +} + +func (s *gRPCSystemViewServer) PluginEnv(ctx context.Context, _ *pb.Empty) (*pb.PluginEnvReply, error) { + if s.impl == nil { + return nil, errMissingSystemView + } + pluginEnv, err := s.impl.PluginEnv(ctx) + if err != nil { + return &pb.PluginEnvReply{ + Err: pb.ErrToString(err), + }, nil + } + return &pb.PluginEnvReply{ + PluginEnvironment: pluginEnv, + }, nil +} + +func (s *gRPCSystemViewServer) GeneratePasswordFromPolicy(ctx context.Context, req *pb.GeneratePasswordFromPolicyRequest) (*pb.GeneratePasswordFromPolicyReply, error) { + if s.impl == nil { + return nil, errMissingSystemView + } + policyName := req.PolicyName + if policyName == "" { + return &pb.GeneratePasswordFromPolicyReply{}, status.Errorf(codes.InvalidArgument, "no password policy specified") + } + + password, err := s.impl.GeneratePasswordFromPolicy(ctx, policyName) + if err != nil { + return &pb.GeneratePasswordFromPolicyReply{}, status.Errorf(codes.Internal, "failed to generate password") + } + + resp := &pb.GeneratePasswordFromPolicyReply{ + Password: password, + } + return resp, nil +} + +func (s *gRPCSystemViewServer) ClusterInfo(ctx context.Context, _ *pb.Empty) (*pb.ClusterInfoReply, error) { + if s.impl == nil { + return nil, errMissingSystemView + } + + clusterId, err := s.impl.ClusterID(ctx) + if err != nil { + return &pb.ClusterInfoReply{}, status.Errorf(codes.Internal, "failed to fetch cluster id") + } + + return &pb.ClusterInfoReply{ + ClusterID: clusterId, + }, nil +} diff --git a/sdk/plugin/grpc_system_test.go b/sdk/plugin/grpc_system_test.go new file mode 100644 index 0000000..19a5ecb --- /dev/null +++ b/sdk/plugin/grpc_system_test.go @@ -0,0 +1,283 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "context" + "reflect" + "testing" + "time" + + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/plugin/pb" + "google.golang.org/grpc" + "google.golang.org/protobuf/proto" +) + +func TestSystem_GRPC_ReturnsErrIfSystemViewNil(t *testing.T) { + _, err := new(gRPCSystemViewServer).ReplicationState(context.Background(), nil) + if err == nil { + t.Error("Expected error when using server with no impl") + } +} + +func TestSystem_GRPC_GRPC_impl(t *testing.T) { + var _ logical.SystemView = new(gRPCSystemViewClient) +} + +func TestSystem_GRPC_defaultLeaseTTL(t *testing.T) { + sys := logical.TestSystemView() + client, _ := plugin.TestGRPCConn(t, func(s *grpc.Server) { + pb.RegisterSystemViewServer(s, &gRPCSystemViewServer{ + impl: sys, + }) + }) + defer client.Close() + testSystemView := newGRPCSystemView(client) + + expected := sys.DefaultLeaseTTL() + actual := testSystemView.DefaultLeaseTTL() + if !reflect.DeepEqual(expected, actual) { + t.Fatalf("expected: %v, got: %v", expected, actual) + } +} + +func TestSystem_GRPC_maxLeaseTTL(t *testing.T) { + sys := logical.TestSystemView() + client, _ := plugin.TestGRPCConn(t, func(s *grpc.Server) { + pb.RegisterSystemViewServer(s, &gRPCSystemViewServer{ + impl: sys, + }) + }) + defer client.Close() + testSystemView := newGRPCSystemView(client) + + expected := sys.MaxLeaseTTL() + actual := testSystemView.MaxLeaseTTL() + if !reflect.DeepEqual(expected, actual) { + t.Fatalf("expected: %v, got: %v", expected, actual) + } +} + +func TestSystem_GRPC_tainted(t *testing.T) { + sys := logical.TestSystemView() + sys.TaintedVal = true + client, _ := plugin.TestGRPCConn(t, func(s *grpc.Server) { + pb.RegisterSystemViewServer(s, &gRPCSystemViewServer{ + impl: sys, + }) + }) + defer client.Close() + testSystemView := newGRPCSystemView(client) + + expected := sys.Tainted() + actual := testSystemView.Tainted() + if !reflect.DeepEqual(expected, actual) { + t.Fatalf("expected: %v, got: %v", expected, actual) + } +} + +func TestSystem_GRPC_cachingDisabled(t *testing.T) { + sys := logical.TestSystemView() + sys.CachingDisabledVal = true + client, _ := plugin.TestGRPCConn(t, func(s *grpc.Server) { + pb.RegisterSystemViewServer(s, &gRPCSystemViewServer{ + impl: sys, + }) + }) + defer client.Close() + testSystemView := newGRPCSystemView(client) + + expected := sys.CachingDisabled() + actual := testSystemView.CachingDisabled() + if !reflect.DeepEqual(expected, actual) { + t.Fatalf("expected: %v, got: %v", expected, actual) + } +} + +func TestSystem_GRPC_replicationState(t *testing.T) { + sys := logical.TestSystemView() + sys.ReplicationStateVal = consts.ReplicationPerformancePrimary + client, _ := plugin.TestGRPCConn(t, func(s *grpc.Server) { + pb.RegisterSystemViewServer(s, &gRPCSystemViewServer{ + impl: sys, + }) + }) + defer client.Close() + testSystemView := newGRPCSystemView(client) + + expected := sys.ReplicationState() + actual := testSystemView.ReplicationState() + if !reflect.DeepEqual(expected, actual) { + t.Fatalf("expected: %v, got: %v", expected, actual) + } +} + +func TestSystem_GRPC_lookupPlugin(t *testing.T) { + sys := logical.TestSystemView() + client, _ := plugin.TestGRPCConn(t, func(s *grpc.Server) { + pb.RegisterSystemViewServer(s, &gRPCSystemViewServer{ + impl: sys, + }) + }) + defer client.Close() + + testSystemView := newGRPCSystemView(client) + + if _, err := testSystemView.LookupPlugin(context.Background(), "foo", consts.PluginTypeDatabase); err == nil { + t.Fatal("LookPlugin(): expected error on due to unsupported call from plugin") + } +} + +func TestSystem_GRPC_mlockEnabled(t *testing.T) { + sys := logical.TestSystemView() + sys.EnableMlock = true + client, _ := plugin.TestGRPCConn(t, func(s *grpc.Server) { + pb.RegisterSystemViewServer(s, &gRPCSystemViewServer{ + impl: sys, + }) + }) + defer client.Close() + + testSystemView := newGRPCSystemView(client) + + expected := sys.MlockEnabled() + actual := testSystemView.MlockEnabled() + if !reflect.DeepEqual(expected, actual) { + t.Fatalf("expected: %v, got: %v", expected, actual) + } +} + +func TestSystem_GRPC_entityInfo(t *testing.T) { + sys := logical.TestSystemView() + sys.EntityVal = &logical.Entity{ + ID: "id", + Name: "name", + Metadata: map[string]string{ + "foo": "bar", + }, + Aliases: []*logical.Alias{ + { + MountType: "logical", + MountAccessor: "accessor", + Name: "name", + Metadata: map[string]string{ + "zip": "zap", + }, + }, + }, + Disabled: true, + } + client, _ := plugin.TestGRPCConn(t, func(s *grpc.Server) { + pb.RegisterSystemViewServer(s, &gRPCSystemViewServer{ + impl: sys, + }) + }) + defer client.Close() + testSystemView := newGRPCSystemView(client) + + actual, err := testSystemView.EntityInfo("") + if err != nil { + t.Fatal(err) + } + if !proto.Equal(sys.EntityVal, actual) { + t.Fatalf("expected: %v, got: %v", sys.EntityVal, actual) + } +} + +func TestSystem_GRPC_groupsForEntity(t *testing.T) { + sys := logical.TestSystemView() + sys.GroupsVal = []*logical.Group{ + { + ID: "group1-id", + Name: "group1", + Metadata: map[string]string{ + "group-metadata": "metadata-value", + }, + }, + } + client, _ := plugin.TestGRPCConn(t, func(s *grpc.Server) { + pb.RegisterSystemViewServer(s, &gRPCSystemViewServer{ + impl: sys, + }) + }) + defer client.Close() + testSystemView := newGRPCSystemView(client) + + actual, err := testSystemView.GroupsForEntity("") + if err != nil { + t.Fatal(err) + } + if !proto.Equal(sys.GroupsVal[0], actual[0]) { + t.Fatalf("expected: %v, got: %v", sys.GroupsVal, actual) + } +} + +func TestSystem_GRPC_pluginEnv(t *testing.T) { + sys := logical.TestSystemView() + sys.PluginEnvironment = &logical.PluginEnvironment{ + VaultVersion: "0.10.42", + VaultVersionPrerelease: "dev", + VaultVersionMetadata: "ent", + } + client, _ := plugin.TestGRPCConn(t, func(s *grpc.Server) { + pb.RegisterSystemViewServer(s, &gRPCSystemViewServer{ + impl: sys, + }) + }) + defer client.Close() + + testSystemView := newGRPCSystemView(client) + + expected, err := sys.PluginEnv(context.Background()) + if err != nil { + t.Fatal(err) + } + + actual, err := testSystemView.PluginEnv(context.Background()) + if err != nil { + t.Fatal(err) + } + + if !proto.Equal(expected, actual) { + t.Fatalf("expected: %v, got: %v", expected, actual) + } +} + +func TestSystem_GRPC_GeneratePasswordFromPolicy(t *testing.T) { + policyName := "testpolicy" + expectedPassword := "87354qtnjgrehiogd9u0t43" + passGen := func() (password string, err error) { + return expectedPassword, nil + } + sys := &logical.StaticSystemView{ + PasswordPolicies: map[string]logical.PasswordGenerator{ + policyName: passGen, + }, + } + + client, server := plugin.TestGRPCConn(t, func(s *grpc.Server) { + pb.RegisterSystemViewServer(s, &gRPCSystemViewServer{ + impl: sys, + }) + }) + defer server.Stop() + defer client.Close() + + testSystemView := newGRPCSystemView(client) + + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + + password, err := testSystemView.GeneratePasswordFromPolicy(ctx, policyName) + if err != nil { + t.Fatalf("no error expected, got: %s", err) + } + + if password != expectedPassword { + t.Fatalf("Actual password: %s\nExpected password: %s", password, expectedPassword) + } +} diff --git a/sdk/plugin/logger.go b/sdk/plugin/logger.go new file mode 100644 index 0000000..1ef4694 --- /dev/null +++ b/sdk/plugin/logger.go @@ -0,0 +1,135 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import hclog "github.com/hashicorp/go-hclog" + +type LoggerServer struct { + logger hclog.Logger +} + +func (l *LoggerServer) Trace(args *LoggerArgs, _ *struct{}) error { + l.logger.Trace(args.Msg, args.Args...) + return nil +} + +func (l *LoggerServer) Debug(args *LoggerArgs, _ *struct{}) error { + l.logger.Debug(args.Msg, args.Args...) + return nil +} + +func (l *LoggerServer) Info(args *LoggerArgs, _ *struct{}) error { + l.logger.Info(args.Msg, args.Args...) + return nil +} + +func (l *LoggerServer) Warn(args *LoggerArgs, reply *LoggerReply) error { + l.logger.Warn(args.Msg, args.Args...) + return nil +} + +func (l *LoggerServer) Error(args *LoggerArgs, reply *LoggerReply) error { + l.logger.Error(args.Msg, args.Args...) + return nil +} + +func (l *LoggerServer) Log(args *LoggerArgs, _ *struct{}) error { + switch translateLevel(args.Level) { + + case hclog.Trace: + l.logger.Trace(args.Msg, args.Args...) + + case hclog.Debug: + l.logger.Debug(args.Msg, args.Args...) + + case hclog.Info: + l.logger.Info(args.Msg, args.Args...) + + case hclog.Warn: + l.logger.Warn(args.Msg, args.Args...) + + case hclog.Error: + l.logger.Error(args.Msg, args.Args...) + + case hclog.NoLevel: + } + return nil +} + +func (l *LoggerServer) SetLevel(args int, _ *struct{}) error { + level := translateLevel(args) + l.logger = hclog.New(&hclog.LoggerOptions{Level: level}) + return nil +} + +func (l *LoggerServer) IsTrace(args interface{}, reply *LoggerReply) error { + result := l.logger.IsTrace() + *reply = LoggerReply{ + IsTrue: result, + } + return nil +} + +func (l *LoggerServer) IsDebug(args interface{}, reply *LoggerReply) error { + result := l.logger.IsDebug() + *reply = LoggerReply{ + IsTrue: result, + } + return nil +} + +func (l *LoggerServer) IsInfo(args interface{}, reply *LoggerReply) error { + result := l.logger.IsInfo() + *reply = LoggerReply{ + IsTrue: result, + } + return nil +} + +func (l *LoggerServer) IsWarn(args interface{}, reply *LoggerReply) error { + result := l.logger.IsWarn() + *reply = LoggerReply{ + IsTrue: result, + } + return nil +} + +type LoggerArgs struct { + Level int + Msg string + Args []interface{} +} + +// LoggerReply contains the RPC reply. Not all fields may be used +// for a particular RPC call. +type LoggerReply struct { + IsTrue bool + Error error +} + +func translateLevel(logxiLevel int) hclog.Level { + switch logxiLevel { + + case 1000, 10: + // logxi.LevelAll, logxi.LevelTrace: + return hclog.Trace + + case 7: + // logxi.LevelDebug: + return hclog.Debug + + case 6, 5: + // logxi.LevelInfo, logxi.LevelNotice: + return hclog.Info + + case 4: + // logxi.LevelWarn: + return hclog.Warn + + case 3, 2, 1, -1: + // logxi.LevelError, logxi.LevelFatal, logxi.LevelAlert, logxi.LevelEmergency: + return hclog.Error + } + return hclog.NoLevel +} diff --git a/sdk/plugin/logger_test.go b/sdk/plugin/logger_test.go new file mode 100644 index 0000000..c47a70b --- /dev/null +++ b/sdk/plugin/logger_test.go @@ -0,0 +1,265 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "bufio" + "bytes" + "io/ioutil" + "net/rpc" + "strings" + "testing" + + hclog "github.com/hashicorp/go-hclog" + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/sdk/helper/logging" +) + +func TestLogger_levels(t *testing.T) { + client, server := plugin.TestRPCConn(t) + defer client.Close() + + var buf bytes.Buffer + writer := bufio.NewWriter(&buf) + + l := logging.NewVaultLoggerWithWriter(writer, hclog.Trace) + + server.RegisterName("Plugin", &LoggerServer{ + logger: l, + }) + + expected := "foobar" + testLogger := &deprecatedLoggerClient{client: client} + + // Test trace + testLogger.Trace(expected) + if err := writer.Flush(); err != nil { + t.Fatal(err) + } + result := buf.String() + buf.Reset() + if !strings.Contains(result, expected) { + t.Fatalf("expected log to contain %s, got %s", expected, result) + } + + // Test debug + testLogger.Debug(expected) + if err := writer.Flush(); err != nil { + t.Fatal(err) + } + result = buf.String() + buf.Reset() + if !strings.Contains(result, expected) { + t.Fatalf("expected log to contain %s, got %s", expected, result) + } + + // Test debug + testLogger.Info(expected) + if err := writer.Flush(); err != nil { + t.Fatal(err) + } + result = buf.String() + buf.Reset() + if !strings.Contains(result, expected) { + t.Fatalf("expected log to contain %s, got %s", expected, result) + } + + // Test warn + testLogger.Warn(expected) + if err := writer.Flush(); err != nil { + t.Fatal(err) + } + result = buf.String() + buf.Reset() + if !strings.Contains(result, expected) { + t.Fatalf("expected log to contain %s, got %s", expected, result) + } + + // Test error + testLogger.Error(expected) + if err := writer.Flush(); err != nil { + t.Fatal(err) + } + result = buf.String() + buf.Reset() + if !strings.Contains(result, expected) { + t.Fatalf("expected log to contain %s, got %s", expected, result) + } + + // Test fatal + testLogger.Fatal(expected) + if err := writer.Flush(); err != nil { + t.Fatal(err) + } + result = buf.String() + buf.Reset() + if result != "" { + t.Fatalf("expected log Fatal() to be no-op, got %s", result) + } +} + +func TestLogger_isLevels(t *testing.T) { + client, server := plugin.TestRPCConn(t) + defer client.Close() + + l := logging.NewVaultLoggerWithWriter(ioutil.Discard, hclog.Trace) + + server.RegisterName("Plugin", &LoggerServer{ + logger: l, + }) + + testLogger := &deprecatedLoggerClient{client: client} + + if !testLogger.IsDebug() || !testLogger.IsInfo() || !testLogger.IsTrace() || !testLogger.IsWarn() { + t.Fatal("expected logger to return true for all logger level checks") + } +} + +func TestLogger_log(t *testing.T) { + client, server := plugin.TestRPCConn(t) + defer client.Close() + + var buf bytes.Buffer + writer := bufio.NewWriter(&buf) + + l := logging.NewVaultLoggerWithWriter(writer, hclog.Trace) + + server.RegisterName("Plugin", &LoggerServer{ + logger: l, + }) + + expected := "foobar" + testLogger := &deprecatedLoggerClient{client: client} + + // Test trace 6 = logxi.LevelInfo + testLogger.Log(6, expected, nil) + if err := writer.Flush(); err != nil { + t.Fatal(err) + } + result := buf.String() + if !strings.Contains(result, expected) { + t.Fatalf("expected log to contain %s, got %s", expected, result) + } +} + +func TestLogger_setLevel(t *testing.T) { + client, server := plugin.TestRPCConn(t) + defer client.Close() + + l := hclog.New(&hclog.LoggerOptions{Output: ioutil.Discard}) + + server.RegisterName("Plugin", &LoggerServer{ + logger: l, + }) + + testLogger := &deprecatedLoggerClient{client: client} + testLogger.SetLevel(4) // 4 == logxi.LevelWarn + + if !testLogger.IsWarn() { + t.Fatal("expected logger to support warn level") + } +} + +type deprecatedLoggerClient struct { + client *rpc.Client +} + +func (l *deprecatedLoggerClient) Trace(msg string, args ...interface{}) { + cArgs := &LoggerArgs{ + Msg: msg, + Args: args, + } + l.client.Call("Plugin.Trace", cArgs, &struct{}{}) +} + +func (l *deprecatedLoggerClient) Debug(msg string, args ...interface{}) { + cArgs := &LoggerArgs{ + Msg: msg, + Args: args, + } + l.client.Call("Plugin.Debug", cArgs, &struct{}{}) +} + +func (l *deprecatedLoggerClient) Info(msg string, args ...interface{}) { + cArgs := &LoggerArgs{ + Msg: msg, + Args: args, + } + l.client.Call("Plugin.Info", cArgs, &struct{}{}) +} + +func (l *deprecatedLoggerClient) Warn(msg string, args ...interface{}) error { + var reply LoggerReply + cArgs := &LoggerArgs{ + Msg: msg, + Args: args, + } + err := l.client.Call("Plugin.Warn", cArgs, &reply) + if err != nil { + return err + } + if reply.Error != nil { + return reply.Error + } + + return nil +} + +func (l *deprecatedLoggerClient) Error(msg string, args ...interface{}) error { + var reply LoggerReply + cArgs := &LoggerArgs{ + Msg: msg, + Args: args, + } + err := l.client.Call("Plugin.Error", cArgs, &reply) + if err != nil { + return err + } + if reply.Error != nil { + return reply.Error + } + + return nil +} + +func (l *deprecatedLoggerClient) Fatal(msg string, args ...interface{}) { + // NOOP since it's not actually used within vault +} + +func (l *deprecatedLoggerClient) Log(level int, msg string, args []interface{}) { + cArgs := &LoggerArgs{ + Level: level, + Msg: msg, + Args: args, + } + l.client.Call("Plugin.Log", cArgs, &struct{}{}) +} + +func (l *deprecatedLoggerClient) SetLevel(level int) { + l.client.Call("Plugin.SetLevel", level, &struct{}{}) +} + +func (l *deprecatedLoggerClient) IsTrace() bool { + var reply LoggerReply + l.client.Call("Plugin.IsTrace", new(interface{}), &reply) + return reply.IsTrue +} + +func (l *deprecatedLoggerClient) IsDebug() bool { + var reply LoggerReply + l.client.Call("Plugin.IsDebug", new(interface{}), &reply) + return reply.IsTrue +} + +func (l *deprecatedLoggerClient) IsInfo() bool { + var reply LoggerReply + l.client.Call("Plugin.IsInfo", new(interface{}), &reply) + return reply.IsTrue +} + +func (l *deprecatedLoggerClient) IsWarn() bool { + var reply LoggerReply + l.client.Call("Plugin.IsWarn", new(interface{}), &reply) + return reply.IsTrue +} diff --git a/sdk/plugin/middleware.go b/sdk/plugin/middleware.go new file mode 100644 index 0000000..4411c78 --- /dev/null +++ b/sdk/plugin/middleware.go @@ -0,0 +1,115 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "context" + "time" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/logical" +) + +// backendPluginClient implements logical.Backend and is the +// go-plugin client. +type BackendTracingMiddleware struct { + logger log.Logger + + next logical.Backend +} + +// Validate the backendTracingMiddle object satisfies the backend interface +var _ logical.Backend = &BackendTracingMiddleware{} + +func (b *BackendTracingMiddleware) Initialize(ctx context.Context, req *logical.InitializationRequest) (err error) { + defer func(then time.Time) { + b.logger.Trace("initialize", "status", "finished", "err", err, "took", time.Since(then)) + }(time.Now()) + + b.logger.Trace("initialize", "status", "started") + return b.next.Initialize(ctx, req) +} + +func (b *BackendTracingMiddleware) HandleRequest(ctx context.Context, req *logical.Request) (resp *logical.Response, err error) { + defer func(then time.Time) { + b.logger.Trace("handle request", "path", req.Path, "status", "finished", "err", err, "took", time.Since(then)) + }(time.Now()) + + b.logger.Trace("handle request", "path", req.Path, "status", "started") + return b.next.HandleRequest(ctx, req) +} + +func (b *BackendTracingMiddleware) SpecialPaths() *logical.Paths { + defer func(then time.Time) { + b.logger.Trace("special paths", "status", "finished", "took", time.Since(then)) + }(time.Now()) + + b.logger.Trace("special paths", "status", "started") + return b.next.SpecialPaths() +} + +func (b *BackendTracingMiddleware) System() logical.SystemView { + return b.next.System() +} + +func (b *BackendTracingMiddleware) Logger() log.Logger { + return b.next.Logger() +} + +func (b *BackendTracingMiddleware) HandleExistenceCheck(ctx context.Context, req *logical.Request) (found bool, exists bool, err error) { + defer func(then time.Time) { + b.logger.Trace("handle existence check", "path", req.Path, "status", "finished", "err", err, "took", time.Since(then)) + }(time.Now()) + + b.logger.Trace("handle existence check", "path", req.Path, "status", "started") + return b.next.HandleExistenceCheck(ctx, req) +} + +func (b *BackendTracingMiddleware) Cleanup(ctx context.Context) { + defer func(then time.Time) { + b.logger.Trace("cleanup", "status", "finished", "took", time.Since(then)) + }(time.Now()) + + b.logger.Trace("cleanup", "status", "started") + b.next.Cleanup(ctx) +} + +func (b *BackendTracingMiddleware) InvalidateKey(ctx context.Context, key string) { + defer func(then time.Time) { + b.logger.Trace("invalidate key", "key", key, "status", "finished", "took", time.Since(then)) + }(time.Now()) + + b.logger.Trace("invalidate key", "key", key, "status", "started") + b.next.InvalidateKey(ctx, key) +} + +func (b *BackendTracingMiddleware) Setup(ctx context.Context, config *logical.BackendConfig) (err error) { + defer func(then time.Time) { + b.logger.Trace("setup", "status", "finished", "err", err, "took", time.Since(then)) + }(time.Now()) + + b.logger.Trace("setup", "status", "started") + return b.next.Setup(ctx, config) +} + +func (b *BackendTracingMiddleware) Type() logical.BackendType { + defer func(then time.Time) { + b.logger.Trace("type", "status", "finished", "took", time.Since(then)) + }(time.Now()) + + b.logger.Trace("type", "status", "started") + return b.next.Type() +} + +func (b *BackendTracingMiddleware) PluginVersion() logical.PluginVersion { + defer func(then time.Time) { + b.logger.Trace("version", "status", "finished", "took", time.Since(then)) + }(time.Now()) + + b.logger.Trace("version", "status", "started") + if versioner, ok := b.next.(logical.PluginVersioner); ok { + return versioner.PluginVersion() + } + return logical.EmptyPluginVersion +} diff --git a/sdk/plugin/mock/backend.go b/sdk/plugin/mock/backend.go new file mode 100644 index 0000000..9b3aa2c --- /dev/null +++ b/sdk/plugin/mock/backend.go @@ -0,0 +1,87 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package mock + +import ( + "context" + "os" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +const MockPluginVersionEnv = "TESTING_MOCK_VAULT_PLUGIN_VERSION" + +// New returns a new backend as an interface. This func +// is only necessary for builtin backend plugins. +func New() (interface{}, error) { + return Backend(), nil +} + +// Factory returns a new backend as logical.Backend. +func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + b := Backend() + if err := b.Setup(ctx, conf); err != nil { + return nil, err + } + return b, nil +} + +// FactoryType is a wrapper func that allows the Factory func to specify +// the backend type for the mock backend plugin instance. +func FactoryType(backendType logical.BackendType) logical.Factory { + return func(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + b := Backend() + b.BackendType = backendType + if err := b.Setup(ctx, conf); err != nil { + return nil, err + } + return b, nil + } +} + +// Backend returns a private embedded struct of framework.Backend. +func Backend() *backend { + var b backend + b.Backend = &framework.Backend{ + Help: "", + Paths: framework.PathAppend( + errorPaths(&b), + kvPaths(&b), + []*framework.Path{ + pathInternal(&b), + pathSpecial(&b), + pathRaw(&b), + }, + ), + PathsSpecial: &logical.Paths{ + Unauthenticated: []string{ + "special", + }, + }, + Secrets: []*framework.Secret{}, + Invalidate: b.invalidate, + BackendType: logical.TypeLogical, + } + b.internal = "bar" + b.RunningVersion = "v0.0.0+mock" + if version := os.Getenv(MockPluginVersionEnv); version != "" { + b.RunningVersion = version + } + return &b +} + +type backend struct { + *framework.Backend + + // internal is used to test invalidate + internal string +} + +func (b *backend) invalidate(ctx context.Context, key string) { + switch key { + case "internal": + b.internal = "" + } +} diff --git a/sdk/plugin/mock/backend_test.go b/sdk/plugin/mock/backend_test.go new file mode 100644 index 0000000..640eec1 --- /dev/null +++ b/sdk/plugin/mock/backend_test.go @@ -0,0 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package mock + +import ( + "testing" + + "github.com/hashicorp/vault/sdk/logical" +) + +func TestBackend_impl(t *testing.T) { + var _ logical.Backend = new(backend) +} diff --git a/sdk/plugin/mock/path_errors.go b/sdk/plugin/mock/path_errors.go new file mode 100644 index 0000000..f5e5b12 --- /dev/null +++ b/sdk/plugin/mock/path_errors.go @@ -0,0 +1,77 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package mock + +import ( + "context" + "errors" + "net/rpc" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/plugin/pb" +) + +// pathInternal is used to test viewing internal backend values. In this case, +// it is used to test the invalidate func. +func errorPaths(b *backend) []*framework.Path { + return []*framework.Path{ + { + Pattern: "errors/rpc", + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathErrorRPCRead, + }, + }, + { + Pattern: "errors/kill", + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathErrorRPCRead, + }, + }, + { + Pattern: "errors/type", + Fields: map[string]*framework.FieldSchema{ + "err_type": {Type: framework.TypeInt}, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.CreateOperation: b.pathErrorRPCRead, + logical.UpdateOperation: b.pathErrorRPCRead, + }, + }, + } +} + +func (b *backend) pathErrorRPCRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + errTypeRaw, ok := data.GetOk("err_type") + if !ok { + return nil, rpc.ErrShutdown + } + + var err error + switch uint32(errTypeRaw.(int)) { + case pb.ErrTypeUnknown: + err = errors.New("test") + case pb.ErrTypeUserError: + err = errutil.UserError{Err: "test"} + case pb.ErrTypeInternalError: + err = errutil.InternalError{Err: "test"} + case pb.ErrTypeCodedError: + err = logical.CodedError(403, "test") + case pb.ErrTypeStatusBadRequest: + err = &logical.StatusBadRequest{Err: "test"} + case pb.ErrTypeUnsupportedOperation: + err = logical.ErrUnsupportedOperation + case pb.ErrTypeUnsupportedPath: + err = logical.ErrUnsupportedPath + case pb.ErrTypeInvalidRequest: + err = logical.ErrInvalidRequest + case pb.ErrTypePermissionDenied: + err = logical.ErrPermissionDenied + case pb.ErrTypeMultiAuthzPending: + err = logical.ErrMultiAuthzPending + } + + return nil, err +} diff --git a/sdk/plugin/mock/path_internal.go b/sdk/plugin/mock/path_internal.go new file mode 100644 index 0000000..30c2926 --- /dev/null +++ b/sdk/plugin/mock/path_internal.go @@ -0,0 +1,42 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package mock + +import ( + "context" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +// pathInternal is used to test viewing internal backend values. In this case, +// it is used to test the invalidate func. +func pathInternal(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "internal", + Fields: map[string]*framework.FieldSchema{ + "value": {Type: framework.TypeString}, + }, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.UpdateOperation: b.pathInternalUpdate, + logical.ReadOperation: b.pathInternalRead, + }, + } +} + +func (b *backend) pathInternalUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + value := data.Get("value").(string) + b.internal = value + // Return the secret + return nil, nil +} + +func (b *backend) pathInternalRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // Return the secret + return &logical.Response{ + Data: map[string]interface{}{ + "value": b.internal, + }, + }, nil +} diff --git a/sdk/plugin/mock/path_kv.go b/sdk/plugin/mock/path_kv.go new file mode 100644 index 0000000..fd80805 --- /dev/null +++ b/sdk/plugin/mock/path_kv.go @@ -0,0 +1,115 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package mock + +import ( + "context" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +// kvPaths is used to test CRUD and List operations. It is a simplified +// version of the passthrough backend that only accepts string values. +func kvPaths(b *backend) []*framework.Path { + return []*framework.Path{ + { + Pattern: "kv/?", + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ListOperation: b.pathKVList, + }, + }, + { + Pattern: "kv/" + framework.GenericNameRegex("key"), + Fields: map[string]*framework.FieldSchema{ + "key": {Type: framework.TypeString}, + "value": {Type: framework.TypeString}, + "version": {Type: framework.TypeInt}, + }, + ExistenceCheck: b.pathExistenceCheck, + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathKVRead, + logical.CreateOperation: b.pathKVCreateUpdate, + logical.UpdateOperation: b.pathKVCreateUpdate, + logical.DeleteOperation: b.pathKVDelete, + }, + }, + } +} + +func (b *backend) pathExistenceCheck(ctx context.Context, req *logical.Request, data *framework.FieldData) (bool, error) { + out, err := req.Storage.Get(ctx, req.Path) + if err != nil { + return false, errwrap.Wrapf("existence check failed: {{err}}", err) + } + + return out != nil, nil +} + +func (b *backend) pathKVRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + version := data.Get("version").(int) + + entry, err := req.Storage.Get(ctx, req.Path) + if err != nil { + return nil, err + } + + if entry == nil { + return nil, nil + } + + value := string(entry.Value) + + b.Logger().Info("reading value", "key", req.Path, "value", value) + // Return the secret + resp := &logical.Response{ + Data: map[string]interface{}{ + "value": value, + "version": version, + }, + } + if version != 0 { + resp.Data["version"] = version + } + return resp, nil +} + +func (b *backend) pathKVCreateUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + value := data.Get("value").(string) + + b.Logger().Info("storing value", "key", req.Path, "value", value) + entry := &logical.StorageEntry{ + Key: req.Path, + Value: []byte(value), + } + + s := req.Storage + err := s.Put(ctx, entry) + if err != nil { + return nil, err + } + + return &logical.Response{ + Data: map[string]interface{}{ + "value": value, + }, + }, nil +} + +func (b *backend) pathKVDelete(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + if err := req.Storage.Delete(ctx, req.Path); err != nil { + return nil, err + } + + return nil, nil +} + +func (b *backend) pathKVList(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + vals, err := req.Storage.List(ctx, "kv/") + if err != nil { + return nil, err + } + return logical.ListResponse(vals), nil +} diff --git a/sdk/plugin/mock/path_raw.go b/sdk/plugin/mock/path_raw.go new file mode 100644 index 0000000..2a4b77f --- /dev/null +++ b/sdk/plugin/mock/path_raw.go @@ -0,0 +1,31 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package mock + +import ( + "context" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +// pathRaw is used to test raw responses. +func pathRaw(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "raw", + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathRawRead, + }, + } +} + +func (b *backend) pathRawRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + return &logical.Response{ + Data: map[string]interface{}{ + logical.HTTPContentType: "text/plain", + logical.HTTPRawBody: []byte("Response"), + logical.HTTPStatusCode: 200, + }, + }, nil +} diff --git a/sdk/plugin/mock/path_special.go b/sdk/plugin/mock/path_special.go new file mode 100644 index 0000000..4223f91 --- /dev/null +++ b/sdk/plugin/mock/path_special.go @@ -0,0 +1,30 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package mock + +import ( + "context" + + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/logical" +) + +// pathSpecial is used to test special paths. +func pathSpecial(b *backend) *framework.Path { + return &framework.Path{ + Pattern: "special", + Callbacks: map[logical.Operation]framework.OperationFunc{ + logical.ReadOperation: b.pathSpecialRead, + }, + } +} + +func (b *backend) pathSpecialRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + // Return the secret + return &logical.Response{ + Data: map[string]interface{}{ + "data": "foo", + }, + }, nil +} diff --git a/sdk/plugin/pb/backend.pb.go b/sdk/plugin/pb/backend.pb.go new file mode 100644 index 0000000..82bbae2 --- /dev/null +++ b/sdk/plugin/pb/backend.pb.go @@ -0,0 +1,4943 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.12 +// source: sdk/plugin/pb/backend.proto + +package pb + +import ( + logical "github.com/hashicorp/vault/sdk/logical" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Empty struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Empty) Reset() { + *x = Empty{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Empty) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Empty) ProtoMessage() {} + +func (x *Empty) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Empty.ProtoReflect.Descriptor instead. +func (*Empty) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{0} +} + +type Header struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Header []string `protobuf:"bytes,1,rep,name=header,proto3" json:"header,omitempty"` +} + +func (x *Header) Reset() { + *x = Header{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Header) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Header) ProtoMessage() {} + +func (x *Header) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Header.ProtoReflect.Descriptor instead. +func (*Header) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{1} +} + +func (x *Header) GetHeader() []string { + if x != nil { + return x.Header + } + return nil +} + +type ProtoError struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Error type can be one of: + // ErrTypeUnknown uint32 = iota + // ErrTypeUserError + // ErrTypeInternalError + // ErrTypeCodedError + // ErrTypeStatusBadRequest + // ErrTypeUnsupportedOperation + // ErrTypeUnsupportedPath + // ErrTypeInvalidRequest + // ErrTypePermissionDenied + // ErrTypeMultiAuthzPending + // ErrTypeUnrecoverable + ErrType uint32 `protobuf:"varint,1,opt,name=err_type,json=errType,proto3" json:"err_type,omitempty"` + ErrMsg string `protobuf:"bytes,2,opt,name=err_msg,json=errMsg,proto3" json:"err_msg,omitempty"` + ErrCode int64 `protobuf:"varint,3,opt,name=err_code,json=errCode,proto3" json:"err_code,omitempty"` +} + +func (x *ProtoError) Reset() { + *x = ProtoError{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProtoError) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProtoError) ProtoMessage() {} + +func (x *ProtoError) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProtoError.ProtoReflect.Descriptor instead. +func (*ProtoError) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{2} +} + +func (x *ProtoError) GetErrType() uint32 { + if x != nil { + return x.ErrType + } + return 0 +} + +func (x *ProtoError) GetErrMsg() string { + if x != nil { + return x.ErrMsg + } + return "" +} + +func (x *ProtoError) GetErrCode() int64 { + if x != nil { + return x.ErrCode + } + return 0 +} + +// Paths is the structure of special paths that is used for SpecialPaths. +type Paths struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Root are the paths that require a root token to access + Root []string `protobuf:"bytes,1,rep,name=root,proto3" json:"root,omitempty"` + // Unauthenticated are the paths that can be accessed without any auth. + Unauthenticated []string `protobuf:"bytes,2,rep,name=unauthenticated,proto3" json:"unauthenticated,omitempty"` + // LocalStorage are paths (prefixes) that are local to this instance; this + // indicates that these paths should not be replicated + LocalStorage []string `protobuf:"bytes,3,rep,name=local_storage,json=localStorage,proto3" json:"local_storage,omitempty"` + // SealWrapStorage are storage paths that, when using a capable seal, + // should be seal wrapped with extra encryption. It is exact matching + // unless it ends with '/' in which case it will be treated as a prefix. + SealWrapStorage []string `protobuf:"bytes,4,rep,name=seal_wrap_storage,json=sealWrapStorage,proto3" json:"seal_wrap_storage,omitempty"` + // WriteForwardedStorage are storage paths that, when running on a PR + // Secondary cluster, cause a GRPC call up to the PR Primary cluster's + // active node to handle storage.Put(...) and storage.Delete(...) events. + // + // See extended note in /sdk/logical/logical.go. + WriteForwardedStorage []string `protobuf:"bytes,5,rep,name=write_forwarded_storage,json=writeForwardedStorage,proto3" json:"write_forwarded_storage,omitempty"` +} + +func (x *Paths) Reset() { + *x = Paths{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Paths) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Paths) ProtoMessage() {} + +func (x *Paths) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Paths.ProtoReflect.Descriptor instead. +func (*Paths) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{3} +} + +func (x *Paths) GetRoot() []string { + if x != nil { + return x.Root + } + return nil +} + +func (x *Paths) GetUnauthenticated() []string { + if x != nil { + return x.Unauthenticated + } + return nil +} + +func (x *Paths) GetLocalStorage() []string { + if x != nil { + return x.LocalStorage + } + return nil +} + +func (x *Paths) GetSealWrapStorage() []string { + if x != nil { + return x.SealWrapStorage + } + return nil +} + +func (x *Paths) GetWriteForwardedStorage() []string { + if x != nil { + return x.WriteForwardedStorage + } + return nil +} + +type Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // ID is the uuid associated with each request + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // If set, the name given to the replication secondary where this request + // originated + ReplicationCluster string `protobuf:"bytes,2,opt,name=ReplicationCluster,proto3" json:"ReplicationCluster,omitempty"` + // Operation is the requested operation type + Operation string `protobuf:"bytes,3,opt,name=operation,proto3" json:"operation,omitempty"` + // Path is the part of the request path not consumed by the + // routing. As an example, if the original request path is "prod/aws/foo" + // and the AWS logical backend is mounted at "prod/aws/", then the + // final path is "foo" since the mount prefix is trimmed. + Path string `protobuf:"bytes,4,opt,name=path,proto3" json:"path,omitempty"` + // Request data is a JSON object that must have keys with string type. + Data string `protobuf:"bytes,5,opt,name=data,proto3" json:"data,omitempty"` + // Secret will be non-nil only for Revoke and Renew operations + // to represent the secret that was returned prior. + Secret *Secret `protobuf:"bytes,6,opt,name=secret,proto3" json:"secret,omitempty"` + // Auth will be non-nil only for Renew operations + // to represent the auth that was returned prior. + Auth *Auth `protobuf:"bytes,7,opt,name=auth,proto3" json:"auth,omitempty"` + // Headers will contain the http headers from the request. This value will + // be used in the audit broker to ensure we are auditing only the allowed + // headers. + Headers map[string]*Header `protobuf:"bytes,8,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // ClientToken is provided to the core so that the identity + // can be verified and ACLs applied. This value is passed + // through to the logical backends but after being salted and + // hashed. + ClientToken string `protobuf:"bytes,9,opt,name=client_token,json=clientToken,proto3" json:"client_token,omitempty"` + // ClientTokenAccessor is provided to the core so that the it can get + // logged as part of request audit logging. + ClientTokenAccessor string `protobuf:"bytes,10,opt,name=client_token_accessor,json=clientTokenAccessor,proto3" json:"client_token_accessor,omitempty"` + // DisplayName is provided to the logical backend to help associate + // dynamic secrets with the source entity. This is not a sensitive + // name, but is useful for operators. + DisplayName string `protobuf:"bytes,11,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // MountPoint is provided so that a logical backend can generate + // paths relative to itself. The `Path` is effectively the client + // request path with the MountPoint trimmed off. + MountPoint string `protobuf:"bytes,12,opt,name=mount_point,json=mountPoint,proto3" json:"mount_point,omitempty"` + // MountType is provided so that a logical backend can make decisions + // based on the specific mount type (e.g., if a mount type has different + // aliases, generating different defaults depending on the alias) + MountType string `protobuf:"bytes,13,opt,name=mount_type,json=mountType,proto3" json:"mount_type,omitempty"` + // MountAccessor is provided so that identities returned by the authentication + // backends can be tied to the mount it belongs to. + MountAccessor string `protobuf:"bytes,14,opt,name=mount_accessor,json=mountAccessor,proto3" json:"mount_accessor,omitempty"` + // WrapInfo contains requested response wrapping parameters + WrapInfo *RequestWrapInfo `protobuf:"bytes,15,opt,name=wrap_info,json=wrapInfo,proto3" json:"wrap_info,omitempty"` + // ClientTokenRemainingUses represents the allowed number of uses left on the + // token supplied + ClientTokenRemainingUses int64 `protobuf:"varint,16,opt,name=client_token_remaining_uses,json=clientTokenRemainingUses,proto3" json:"client_token_remaining_uses,omitempty"` + // EntityID is the identity of the caller extracted out of the token used + // to make this request + EntityID string `protobuf:"bytes,17,opt,name=entity_id,json=entityId,proto3" json:"entity_id,omitempty"` + // PolicyOverride indicates that the requestor wishes to override + // soft-mandatory Sentinel policies + PolicyOverride bool `protobuf:"varint,18,opt,name=policy_override,json=policyOverride,proto3" json:"policy_override,omitempty"` + // Whether the request is unauthenticated, as in, had no client token + // attached. Useful in some situations where the client token is not made + // accessible. + Unauthenticated bool `protobuf:"varint,19,opt,name=unauthenticated,proto3" json:"unauthenticated,omitempty"` + // Connection will be non-nil only for credential providers to + // inspect the connection information and potentially use it for + // authentication/protection. + Connection *Connection `protobuf:"bytes,20,opt,name=connection,proto3" json:"connection,omitempty"` +} + +func (x *Request) Reset() { + *x = Request{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Request) ProtoMessage() {} + +func (x *Request) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Request.ProtoReflect.Descriptor instead. +func (*Request) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{4} +} + +func (x *Request) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *Request) GetReplicationCluster() string { + if x != nil { + return x.ReplicationCluster + } + return "" +} + +func (x *Request) GetOperation() string { + if x != nil { + return x.Operation + } + return "" +} + +func (x *Request) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *Request) GetData() string { + if x != nil { + return x.Data + } + return "" +} + +func (x *Request) GetSecret() *Secret { + if x != nil { + return x.Secret + } + return nil +} + +func (x *Request) GetAuth() *Auth { + if x != nil { + return x.Auth + } + return nil +} + +func (x *Request) GetHeaders() map[string]*Header { + if x != nil { + return x.Headers + } + return nil +} + +func (x *Request) GetClientToken() string { + if x != nil { + return x.ClientToken + } + return "" +} + +func (x *Request) GetClientTokenAccessor() string { + if x != nil { + return x.ClientTokenAccessor + } + return "" +} + +func (x *Request) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *Request) GetMountPoint() string { + if x != nil { + return x.MountPoint + } + return "" +} + +func (x *Request) GetMountType() string { + if x != nil { + return x.MountType + } + return "" +} + +func (x *Request) GetMountAccessor() string { + if x != nil { + return x.MountAccessor + } + return "" +} + +func (x *Request) GetWrapInfo() *RequestWrapInfo { + if x != nil { + return x.WrapInfo + } + return nil +} + +func (x *Request) GetClientTokenRemainingUses() int64 { + if x != nil { + return x.ClientTokenRemainingUses + } + return 0 +} + +func (x *Request) GetEntityID() string { + if x != nil { + return x.EntityID + } + return "" +} + +func (x *Request) GetPolicyOverride() bool { + if x != nil { + return x.PolicyOverride + } + return false +} + +func (x *Request) GetUnauthenticated() bool { + if x != nil { + return x.Unauthenticated + } + return false +} + +func (x *Request) GetConnection() *Connection { + if x != nil { + return x.Connection + } + return nil +} + +type Auth struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + LeaseOptions *LeaseOptions `protobuf:"bytes,1,opt,name=lease_options,json=leaseOptions,proto3" json:"lease_options,omitempty"` + // InternalData is a JSON object that is stored with the auth struct. + // This will be sent back during a Renew/Revoke for storing internal data + // used for those operations. + InternalData string `protobuf:"bytes,2,opt,name=internal_data,json=internalData,proto3" json:"internal_data,omitempty"` + // DisplayName is a non-security sensitive identifier that is + // applicable to this Auth. It is used for logging and prefixing + // of dynamic secrets. For example, DisplayName may be "armon" for + // the github credential backend. If the client token is used to + // generate a SQL credential, the user may be "github-armon-uuid". + // This is to help identify the source without using audit tables. + DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + // Policies is the list of policies that the authenticated user + // is associated with. + Policies []string `protobuf:"bytes,4,rep,name=policies,proto3" json:"policies,omitempty"` + // Metadata is used to attach arbitrary string-type metadata to + // an authenticated user. This metadata will be outputted into the + // audit log. + Metadata map[string]string `protobuf:"bytes,5,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // ClientToken is the token that is generated for the authentication. + // This will be filled in by Vault core when an auth structure is + // returned. Setting this manually will have no effect. + ClientToken string `protobuf:"bytes,6,opt,name=client_token,json=clientToken,proto3" json:"client_token,omitempty"` + // Accessor is the identifier for the ClientToken. This can be used + // to perform management functionalities (especially revocation) when + // ClientToken in the audit logs are obfuscated. Accessor can be used + // to revoke a ClientToken and to lookup the capabilities of the ClientToken, + // both without actually knowing the ClientToken. + Accessor string `protobuf:"bytes,7,opt,name=accessor,proto3" json:"accessor,omitempty"` + // Period indicates that the token generated using this Auth object + // should never expire. The token should be renewed within the duration + // specified by this period. + Period int64 `protobuf:"varint,8,opt,name=period,proto3" json:"period,omitempty"` + // Number of allowed uses of the issued token + NumUses int64 `protobuf:"varint,9,opt,name=num_uses,json=numUses,proto3" json:"num_uses,omitempty"` + // EntityID is the identifier of the entity in identity store to which the + // identity of the authenticating client belongs to. + EntityID string `protobuf:"bytes,10,opt,name=entity_id,json=entityId,proto3" json:"entity_id,omitempty"` + // Alias is the information about the authenticated client returned by + // the auth backend + Alias *logical.Alias `protobuf:"bytes,11,opt,name=alias,proto3" json:"alias,omitempty"` + // GroupAliases are the informational mappings of external groups which an + // authenticated user belongs to. This is used to check if there are + // mappings groups for the group aliases in identity store. For all the + // matching groups, the entity ID of the user will be added. + GroupAliases []*logical.Alias `protobuf:"bytes,12,rep,name=group_aliases,json=groupAliases,proto3" json:"group_aliases,omitempty"` + // If set, restricts usage of the certificates to client IPs falling within + // the range of the specified CIDR(s). + BoundCIDRs []string `protobuf:"bytes,13,rep,name=bound_cidrs,json=boundCidrs,proto3" json:"bound_cidrs,omitempty"` + // TokenPolicies and IdentityPolicies break down the list in Policies to + // help determine where a policy was sourced + TokenPolicies []string `protobuf:"bytes,14,rep,name=token_policies,json=tokenPolicies,proto3" json:"token_policies,omitempty"` + IdentityPolicies []string `protobuf:"bytes,15,rep,name=identity_policies,json=identityPolicies,proto3" json:"identity_policies,omitempty"` + // Explicit maximum lifetime for the token. Unlike normal TTLs, the maximum + // TTL is a hard limit and cannot be exceeded, also counts for periodic tokens. + ExplicitMaxTTL int64 `protobuf:"varint,16,opt,name=explicit_max_ttl,json=explicitMaxTtl,proto3" json:"explicit_max_ttl,omitempty"` + // TokenType is the type of token being requested + TokenType uint32 `protobuf:"varint,17,opt,name=token_type,json=tokenType,proto3" json:"token_type,omitempty"` + // Whether the default policy should be added automatically by core + NoDefaultPolicy bool `protobuf:"varint,18,opt,name=no_default_policy,json=noDefaultPolicy,proto3" json:"no_default_policy,omitempty"` +} + +func (x *Auth) Reset() { + *x = Auth{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Auth) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Auth) ProtoMessage() {} + +func (x *Auth) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Auth.ProtoReflect.Descriptor instead. +func (*Auth) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{5} +} + +func (x *Auth) GetLeaseOptions() *LeaseOptions { + if x != nil { + return x.LeaseOptions + } + return nil +} + +func (x *Auth) GetInternalData() string { + if x != nil { + return x.InternalData + } + return "" +} + +func (x *Auth) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *Auth) GetPolicies() []string { + if x != nil { + return x.Policies + } + return nil +} + +func (x *Auth) GetMetadata() map[string]string { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *Auth) GetClientToken() string { + if x != nil { + return x.ClientToken + } + return "" +} + +func (x *Auth) GetAccessor() string { + if x != nil { + return x.Accessor + } + return "" +} + +func (x *Auth) GetPeriod() int64 { + if x != nil { + return x.Period + } + return 0 +} + +func (x *Auth) GetNumUses() int64 { + if x != nil { + return x.NumUses + } + return 0 +} + +func (x *Auth) GetEntityID() string { + if x != nil { + return x.EntityID + } + return "" +} + +func (x *Auth) GetAlias() *logical.Alias { + if x != nil { + return x.Alias + } + return nil +} + +func (x *Auth) GetGroupAliases() []*logical.Alias { + if x != nil { + return x.GroupAliases + } + return nil +} + +func (x *Auth) GetBoundCIDRs() []string { + if x != nil { + return x.BoundCIDRs + } + return nil +} + +func (x *Auth) GetTokenPolicies() []string { + if x != nil { + return x.TokenPolicies + } + return nil +} + +func (x *Auth) GetIdentityPolicies() []string { + if x != nil { + return x.IdentityPolicies + } + return nil +} + +func (x *Auth) GetExplicitMaxTTL() int64 { + if x != nil { + return x.ExplicitMaxTTL + } + return 0 +} + +func (x *Auth) GetTokenType() uint32 { + if x != nil { + return x.TokenType + } + return 0 +} + +func (x *Auth) GetNoDefaultPolicy() bool { + if x != nil { + return x.NoDefaultPolicy + } + return false +} + +type TokenEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Accessor string `protobuf:"bytes,2,opt,name=accessor,proto3" json:"accessor,omitempty"` + Parent string `protobuf:"bytes,3,opt,name=parent,proto3" json:"parent,omitempty"` + Policies []string `protobuf:"bytes,4,rep,name=policies,proto3" json:"policies,omitempty"` + Path string `protobuf:"bytes,5,opt,name=path,proto3" json:"path,omitempty"` + Meta map[string]string `protobuf:"bytes,6,rep,name=meta,proto3" json:"meta,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + DisplayName string `protobuf:"bytes,7,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"` + NumUses int64 `protobuf:"varint,8,opt,name=num_uses,json=numUses,proto3" json:"num_uses,omitempty"` + CreationTime int64 `protobuf:"varint,9,opt,name=creation_time,json=creationTime,proto3" json:"creation_time,omitempty"` + TTL int64 `protobuf:"varint,10,opt,name=ttl,proto3" json:"ttl,omitempty"` + ExplicitMaxTTL int64 `protobuf:"varint,11,opt,name=explicit_max_ttl,json=explicitMaxTtl,proto3" json:"explicit_max_ttl,omitempty"` + Role string `protobuf:"bytes,12,opt,name=role,proto3" json:"role,omitempty"` + Period int64 `protobuf:"varint,13,opt,name=period,proto3" json:"period,omitempty"` + EntityID string `protobuf:"bytes,14,opt,name=entity_id,json=entityId,proto3" json:"entity_id,omitempty"` + BoundCIDRs []string `protobuf:"bytes,15,rep,name=bound_cidrs,json=boundCidrs,proto3" json:"bound_cidrs,omitempty"` + NamespaceID string `protobuf:"bytes,16,opt,name=namespace_id,json=namespaceID,proto3" json:"namespace_id,omitempty"` + CubbyholeID string `protobuf:"bytes,17,opt,name=cubbyhole_id,json=cubbyholeId,proto3" json:"cubbyhole_id,omitempty"` + Type uint32 `protobuf:"varint,18,opt,name=type,proto3" json:"type,omitempty"` + InternalMeta map[string]string `protobuf:"bytes,19,rep,name=internal_meta,json=internalMeta,proto3" json:"internal_meta,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + InlinePolicy string `protobuf:"bytes,20,opt,name=inline_policy,json=inlinePolicy,proto3" json:"inline_policy,omitempty"` + NoIdentityPolicies bool `protobuf:"varint,21,opt,name=no_identity_policies,json=noIdentityPolicies,proto3" json:"no_identity_policies,omitempty"` + ExternalID string `protobuf:"bytes,22,opt,name=external_id,json=externalId,proto3" json:"external_id,omitempty"` +} + +func (x *TokenEntry) Reset() { + *x = TokenEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TokenEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TokenEntry) ProtoMessage() {} + +func (x *TokenEntry) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TokenEntry.ProtoReflect.Descriptor instead. +func (*TokenEntry) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{6} +} + +func (x *TokenEntry) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *TokenEntry) GetAccessor() string { + if x != nil { + return x.Accessor + } + return "" +} + +func (x *TokenEntry) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *TokenEntry) GetPolicies() []string { + if x != nil { + return x.Policies + } + return nil +} + +func (x *TokenEntry) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *TokenEntry) GetMeta() map[string]string { + if x != nil { + return x.Meta + } + return nil +} + +func (x *TokenEntry) GetDisplayName() string { + if x != nil { + return x.DisplayName + } + return "" +} + +func (x *TokenEntry) GetNumUses() int64 { + if x != nil { + return x.NumUses + } + return 0 +} + +func (x *TokenEntry) GetCreationTime() int64 { + if x != nil { + return x.CreationTime + } + return 0 +} + +func (x *TokenEntry) GetTTL() int64 { + if x != nil { + return x.TTL + } + return 0 +} + +func (x *TokenEntry) GetExplicitMaxTTL() int64 { + if x != nil { + return x.ExplicitMaxTTL + } + return 0 +} + +func (x *TokenEntry) GetRole() string { + if x != nil { + return x.Role + } + return "" +} + +func (x *TokenEntry) GetPeriod() int64 { + if x != nil { + return x.Period + } + return 0 +} + +func (x *TokenEntry) GetEntityID() string { + if x != nil { + return x.EntityID + } + return "" +} + +func (x *TokenEntry) GetBoundCIDRs() []string { + if x != nil { + return x.BoundCIDRs + } + return nil +} + +func (x *TokenEntry) GetNamespaceID() string { + if x != nil { + return x.NamespaceID + } + return "" +} + +func (x *TokenEntry) GetCubbyholeID() string { + if x != nil { + return x.CubbyholeID + } + return "" +} + +func (x *TokenEntry) GetType() uint32 { + if x != nil { + return x.Type + } + return 0 +} + +func (x *TokenEntry) GetInternalMeta() map[string]string { + if x != nil { + return x.InternalMeta + } + return nil +} + +func (x *TokenEntry) GetInlinePolicy() string { + if x != nil { + return x.InlinePolicy + } + return "" +} + +func (x *TokenEntry) GetNoIdentityPolicies() bool { + if x != nil { + return x.NoIdentityPolicies + } + return false +} + +func (x *TokenEntry) GetExternalID() string { + if x != nil { + return x.ExternalID + } + return "" +} + +type LeaseOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TTL int64 `protobuf:"varint,1,opt,name=TTL,proto3" json:"TTL,omitempty"` + Renewable bool `protobuf:"varint,2,opt,name=renewable,proto3" json:"renewable,omitempty"` + Increment int64 `protobuf:"varint,3,opt,name=increment,proto3" json:"increment,omitempty"` + IssueTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=issue_time,json=issueTime,proto3" json:"issue_time,omitempty"` + MaxTTL int64 `protobuf:"varint,5,opt,name=MaxTTL,proto3" json:"MaxTTL,omitempty"` +} + +func (x *LeaseOptions) Reset() { + *x = LeaseOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LeaseOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LeaseOptions) ProtoMessage() {} + +func (x *LeaseOptions) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LeaseOptions.ProtoReflect.Descriptor instead. +func (*LeaseOptions) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{7} +} + +func (x *LeaseOptions) GetTTL() int64 { + if x != nil { + return x.TTL + } + return 0 +} + +func (x *LeaseOptions) GetRenewable() bool { + if x != nil { + return x.Renewable + } + return false +} + +func (x *LeaseOptions) GetIncrement() int64 { + if x != nil { + return x.Increment + } + return 0 +} + +func (x *LeaseOptions) GetIssueTime() *timestamppb.Timestamp { + if x != nil { + return x.IssueTime + } + return nil +} + +func (x *LeaseOptions) GetMaxTTL() int64 { + if x != nil { + return x.MaxTTL + } + return 0 +} + +type Secret struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + LeaseOptions *LeaseOptions `protobuf:"bytes,1,opt,name=lease_options,json=leaseOptions,proto3" json:"lease_options,omitempty"` + // InternalData is a JSON object that is stored with the secret. + // This will be sent back during a Renew/Revoke for storing internal data + // used for those operations. + InternalData string `protobuf:"bytes,2,opt,name=internal_data,json=internalData,proto3" json:"internal_data,omitempty"` + // LeaseID is the ID returned to the user to manage this secret. + // This is generated by Vault core. Any set value will be ignored. + // For requests, this will always be blank. + LeaseID string `protobuf:"bytes,3,opt,name=lease_id,json=leaseId,proto3" json:"lease_id,omitempty"` +} + +func (x *Secret) Reset() { + *x = Secret{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Secret) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Secret) ProtoMessage() {} + +func (x *Secret) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Secret.ProtoReflect.Descriptor instead. +func (*Secret) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{8} +} + +func (x *Secret) GetLeaseOptions() *LeaseOptions { + if x != nil { + return x.LeaseOptions + } + return nil +} + +func (x *Secret) GetInternalData() string { + if x != nil { + return x.InternalData + } + return "" +} + +func (x *Secret) GetLeaseID() string { + if x != nil { + return x.LeaseID + } + return "" +} + +type Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Secret, if not nil, denotes that this response represents a secret. + Secret *Secret `protobuf:"bytes,1,opt,name=secret,proto3" json:"secret,omitempty"` + // Auth, if not nil, contains the authentication information for + // this response. This is only checked and means something for + // credential backends. + Auth *Auth `protobuf:"bytes,2,opt,name=auth,proto3" json:"auth,omitempty"` + // Response data is a JSON object that must have string keys. For + // secrets, this data is sent down to the user as-is. To store internal + // data that you don't want the user to see, store it in + // Secret.InternalData. + Data string `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` + // Redirect is an HTTP URL to redirect to for further authentication. + // This is only valid for credential backends. This will be blanked + // for any logical backend and ignored. + Redirect string `protobuf:"bytes,4,opt,name=redirect,proto3" json:"redirect,omitempty"` + // Warnings allow operations or backends to return warnings in response + // to user actions without failing the action outright. + Warnings []string `protobuf:"bytes,5,rep,name=warnings,proto3" json:"warnings,omitempty"` + // Information for wrapping the response in a cubbyhole + WrapInfo *ResponseWrapInfo `protobuf:"bytes,6,opt,name=wrap_info,json=wrapInfo,proto3" json:"wrap_info,omitempty"` + // Headers will contain the http headers from the response. This value will + // be used in the audit broker to ensure we are auditing only the allowed + // headers. + Headers map[string]*Header `protobuf:"bytes,7,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Response) Reset() { + *x = Response{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Response) ProtoMessage() {} + +func (x *Response) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Response.ProtoReflect.Descriptor instead. +func (*Response) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{9} +} + +func (x *Response) GetSecret() *Secret { + if x != nil { + return x.Secret + } + return nil +} + +func (x *Response) GetAuth() *Auth { + if x != nil { + return x.Auth + } + return nil +} + +func (x *Response) GetData() string { + if x != nil { + return x.Data + } + return "" +} + +func (x *Response) GetRedirect() string { + if x != nil { + return x.Redirect + } + return "" +} + +func (x *Response) GetWarnings() []string { + if x != nil { + return x.Warnings + } + return nil +} + +func (x *Response) GetWrapInfo() *ResponseWrapInfo { + if x != nil { + return x.WrapInfo + } + return nil +} + +func (x *Response) GetHeaders() map[string]*Header { + if x != nil { + return x.Headers + } + return nil +} + +type ResponseWrapInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Setting to non-zero specifies that the response should be wrapped. + // Specifies the desired TTL of the wrapping token. + TTL int64 `protobuf:"varint,1,opt,name=TTL,proto3" json:"TTL,omitempty"` + // The token containing the wrapped response + Token string `protobuf:"bytes,2,opt,name=token,proto3" json:"token,omitempty"` + // The token accessor for the wrapped response token + Accessor string `protobuf:"bytes,3,opt,name=accessor,proto3" json:"accessor,omitempty"` + // The creation time. This can be used with the TTL to figure out an + // expected expiration. + CreationTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=creation_time,json=creationTime,proto3" json:"creation_time,omitempty"` + // If the contained response is the output of a token creation call, the + // created token's accessor will be accessible here + WrappedAccessor string `protobuf:"bytes,5,opt,name=wrapped_accessor,json=wrappedAccessor,proto3" json:"wrapped_accessor,omitempty"` + // WrappedEntityID is the entity identifier of the caller who initiated the + // wrapping request + WrappedEntityID string `protobuf:"bytes,6,opt,name=wrapped_entity_id,json=wrappedEntityID,proto3" json:"wrapped_entity_id,omitempty"` + // The format to use. This doesn't get returned, it's only internal. + Format string `protobuf:"bytes,7,opt,name=format,proto3" json:"format,omitempty"` + // CreationPath is the original request path that was used to create + // the wrapped response. + CreationPath string `protobuf:"bytes,8,opt,name=creation_path,json=creationPath,proto3" json:"creation_path,omitempty"` + // Controls seal wrapping behavior downstream for specific use cases + SealWrap bool `protobuf:"varint,9,opt,name=seal_wrap,json=sealWrap,proto3" json:"seal_wrap,omitempty"` +} + +func (x *ResponseWrapInfo) Reset() { + *x = ResponseWrapInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResponseWrapInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResponseWrapInfo) ProtoMessage() {} + +func (x *ResponseWrapInfo) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResponseWrapInfo.ProtoReflect.Descriptor instead. +func (*ResponseWrapInfo) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{10} +} + +func (x *ResponseWrapInfo) GetTTL() int64 { + if x != nil { + return x.TTL + } + return 0 +} + +func (x *ResponseWrapInfo) GetToken() string { + if x != nil { + return x.Token + } + return "" +} + +func (x *ResponseWrapInfo) GetAccessor() string { + if x != nil { + return x.Accessor + } + return "" +} + +func (x *ResponseWrapInfo) GetCreationTime() *timestamppb.Timestamp { + if x != nil { + return x.CreationTime + } + return nil +} + +func (x *ResponseWrapInfo) GetWrappedAccessor() string { + if x != nil { + return x.WrappedAccessor + } + return "" +} + +func (x *ResponseWrapInfo) GetWrappedEntityID() string { + if x != nil { + return x.WrappedEntityID + } + return "" +} + +func (x *ResponseWrapInfo) GetFormat() string { + if x != nil { + return x.Format + } + return "" +} + +func (x *ResponseWrapInfo) GetCreationPath() string { + if x != nil { + return x.CreationPath + } + return "" +} + +func (x *ResponseWrapInfo) GetSealWrap() bool { + if x != nil { + return x.SealWrap + } + return false +} + +type RequestWrapInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Setting to non-zero specifies that the response should be wrapped. + // Specifies the desired TTL of the wrapping token. + TTL int64 `protobuf:"varint,1,opt,name=TTL,proto3" json:"TTL,omitempty"` + // The format to use for the wrapped response; if not specified it's a bare + // token + Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` + // A flag to conforming backends that data for a given request should be + // seal wrapped + SealWrap bool `protobuf:"varint,3,opt,name=seal_wrap,json=sealWrap,proto3" json:"seal_wrap,omitempty"` +} + +func (x *RequestWrapInfo) Reset() { + *x = RequestWrapInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestWrapInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestWrapInfo) ProtoMessage() {} + +func (x *RequestWrapInfo) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestWrapInfo.ProtoReflect.Descriptor instead. +func (*RequestWrapInfo) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{11} +} + +func (x *RequestWrapInfo) GetTTL() int64 { + if x != nil { + return x.TTL + } + return 0 +} + +func (x *RequestWrapInfo) GetFormat() string { + if x != nil { + return x.Format + } + return "" +} + +func (x *RequestWrapInfo) GetSealWrap() bool { + if x != nil { + return x.SealWrap + } + return false +} + +// HandleRequestArgs is the args for HandleRequest method. +type HandleRequestArgs struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + StorageID uint32 `protobuf:"varint,1,opt,name=storage_id,json=storageId,proto3" json:"storage_id,omitempty"` + Request *Request `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"` +} + +func (x *HandleRequestArgs) Reset() { + *x = HandleRequestArgs{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HandleRequestArgs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HandleRequestArgs) ProtoMessage() {} + +func (x *HandleRequestArgs) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HandleRequestArgs.ProtoReflect.Descriptor instead. +func (*HandleRequestArgs) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{12} +} + +func (x *HandleRequestArgs) GetStorageID() uint32 { + if x != nil { + return x.StorageID + } + return 0 +} + +func (x *HandleRequestArgs) GetRequest() *Request { + if x != nil { + return x.Request + } + return nil +} + +// HandleRequestReply is the reply for HandleRequest method. +type HandleRequestReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Response *Response `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"` + Err *ProtoError `protobuf:"bytes,2,opt,name=err,proto3" json:"err,omitempty"` +} + +func (x *HandleRequestReply) Reset() { + *x = HandleRequestReply{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HandleRequestReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HandleRequestReply) ProtoMessage() {} + +func (x *HandleRequestReply) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HandleRequestReply.ProtoReflect.Descriptor instead. +func (*HandleRequestReply) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{13} +} + +func (x *HandleRequestReply) GetResponse() *Response { + if x != nil { + return x.Response + } + return nil +} + +func (x *HandleRequestReply) GetErr() *ProtoError { + if x != nil { + return x.Err + } + return nil +} + +// InitializeArgs is the args for Initialize method. +type InitializeArgs struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *InitializeArgs) Reset() { + *x = InitializeArgs{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InitializeArgs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InitializeArgs) ProtoMessage() {} + +func (x *InitializeArgs) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InitializeArgs.ProtoReflect.Descriptor instead. +func (*InitializeArgs) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{14} +} + +// InitializeReply is the reply for Initialize method. +type InitializeReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Err *ProtoError `protobuf:"bytes,1,opt,name=err,proto3" json:"err,omitempty"` +} + +func (x *InitializeReply) Reset() { + *x = InitializeReply{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InitializeReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InitializeReply) ProtoMessage() {} + +func (x *InitializeReply) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InitializeReply.ProtoReflect.Descriptor instead. +func (*InitializeReply) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{15} +} + +func (x *InitializeReply) GetErr() *ProtoError { + if x != nil { + return x.Err + } + return nil +} + +// SpecialPathsReply is the reply for SpecialPaths method. +type SpecialPathsReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Paths *Paths `protobuf:"bytes,1,opt,name=paths,proto3" json:"paths,omitempty"` +} + +func (x *SpecialPathsReply) Reset() { + *x = SpecialPathsReply{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SpecialPathsReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SpecialPathsReply) ProtoMessage() {} + +func (x *SpecialPathsReply) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SpecialPathsReply.ProtoReflect.Descriptor instead. +func (*SpecialPathsReply) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{16} +} + +func (x *SpecialPathsReply) GetPaths() *Paths { + if x != nil { + return x.Paths + } + return nil +} + +// HandleExistenceCheckArgs is the args for HandleExistenceCheck method. +type HandleExistenceCheckArgs struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + StorageID uint32 `protobuf:"varint,1,opt,name=storage_id,json=storageId,proto3" json:"storage_id,omitempty"` + Request *Request `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"` +} + +func (x *HandleExistenceCheckArgs) Reset() { + *x = HandleExistenceCheckArgs{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HandleExistenceCheckArgs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HandleExistenceCheckArgs) ProtoMessage() {} + +func (x *HandleExistenceCheckArgs) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HandleExistenceCheckArgs.ProtoReflect.Descriptor instead. +func (*HandleExistenceCheckArgs) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{17} +} + +func (x *HandleExistenceCheckArgs) GetStorageID() uint32 { + if x != nil { + return x.StorageID + } + return 0 +} + +func (x *HandleExistenceCheckArgs) GetRequest() *Request { + if x != nil { + return x.Request + } + return nil +} + +// HandleExistenceCheckReply is the reply for HandleExistenceCheck method. +type HandleExistenceCheckReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + CheckFound bool `protobuf:"varint,1,opt,name=check_found,json=checkFound,proto3" json:"check_found,omitempty"` + Exists bool `protobuf:"varint,2,opt,name=exists,proto3" json:"exists,omitempty"` + Err *ProtoError `protobuf:"bytes,3,opt,name=err,proto3" json:"err,omitempty"` +} + +func (x *HandleExistenceCheckReply) Reset() { + *x = HandleExistenceCheckReply{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HandleExistenceCheckReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HandleExistenceCheckReply) ProtoMessage() {} + +func (x *HandleExistenceCheckReply) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HandleExistenceCheckReply.ProtoReflect.Descriptor instead. +func (*HandleExistenceCheckReply) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{18} +} + +func (x *HandleExistenceCheckReply) GetCheckFound() bool { + if x != nil { + return x.CheckFound + } + return false +} + +func (x *HandleExistenceCheckReply) GetExists() bool { + if x != nil { + return x.Exists + } + return false +} + +func (x *HandleExistenceCheckReply) GetErr() *ProtoError { + if x != nil { + return x.Err + } + return nil +} + +// SetupArgs is the args for Setup method. +type SetupArgs struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BrokerID uint32 `protobuf:"varint,1,opt,name=broker_id,json=brokerId,proto3" json:"broker_id,omitempty"` + Config map[string]string `protobuf:"bytes,2,rep,name=Config,proto3" json:"Config,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + BackendUUID string `protobuf:"bytes,3,opt,name=backendUUID,proto3" json:"backendUUID,omitempty"` +} + +func (x *SetupArgs) Reset() { + *x = SetupArgs{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SetupArgs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetupArgs) ProtoMessage() {} + +func (x *SetupArgs) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetupArgs.ProtoReflect.Descriptor instead. +func (*SetupArgs) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{19} +} + +func (x *SetupArgs) GetBrokerID() uint32 { + if x != nil { + return x.BrokerID + } + return 0 +} + +func (x *SetupArgs) GetConfig() map[string]string { + if x != nil { + return x.Config + } + return nil +} + +func (x *SetupArgs) GetBackendUUID() string { + if x != nil { + return x.BackendUUID + } + return "" +} + +// SetupReply is the reply for Setup method. +type SetupReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Err string `protobuf:"bytes,1,opt,name=err,proto3" json:"err,omitempty"` +} + +func (x *SetupReply) Reset() { + *x = SetupReply{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SetupReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetupReply) ProtoMessage() {} + +func (x *SetupReply) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetupReply.ProtoReflect.Descriptor instead. +func (*SetupReply) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{20} +} + +func (x *SetupReply) GetErr() string { + if x != nil { + return x.Err + } + return "" +} + +// TypeReply is the reply for the Type method. +type TypeReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type uint32 `protobuf:"varint,1,opt,name=type,proto3" json:"type,omitempty"` +} + +func (x *TypeReply) Reset() { + *x = TypeReply{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TypeReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TypeReply) ProtoMessage() {} + +func (x *TypeReply) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TypeReply.ProtoReflect.Descriptor instead. +func (*TypeReply) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{21} +} + +func (x *TypeReply) GetType() uint32 { + if x != nil { + return x.Type + } + return 0 +} + +type InvalidateKeyArgs struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` +} + +func (x *InvalidateKeyArgs) Reset() { + *x = InvalidateKeyArgs{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InvalidateKeyArgs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InvalidateKeyArgs) ProtoMessage() {} + +func (x *InvalidateKeyArgs) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InvalidateKeyArgs.ProtoReflect.Descriptor instead. +func (*InvalidateKeyArgs) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{22} +} + +func (x *InvalidateKeyArgs) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +type StorageEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + SealWrap bool `protobuf:"varint,3,opt,name=seal_wrap,json=sealWrap,proto3" json:"seal_wrap,omitempty"` +} + +func (x *StorageEntry) Reset() { + *x = StorageEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StorageEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StorageEntry) ProtoMessage() {} + +func (x *StorageEntry) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StorageEntry.ProtoReflect.Descriptor instead. +func (*StorageEntry) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{23} +} + +func (x *StorageEntry) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *StorageEntry) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +func (x *StorageEntry) GetSealWrap() bool { + if x != nil { + return x.SealWrap + } + return false +} + +type StorageListArgs struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Prefix string `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"` +} + +func (x *StorageListArgs) Reset() { + *x = StorageListArgs{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StorageListArgs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StorageListArgs) ProtoMessage() {} + +func (x *StorageListArgs) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StorageListArgs.ProtoReflect.Descriptor instead. +func (*StorageListArgs) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{24} +} + +func (x *StorageListArgs) GetPrefix() string { + if x != nil { + return x.Prefix + } + return "" +} + +type StorageListReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keys []string `protobuf:"bytes,1,rep,name=keys,proto3" json:"keys,omitempty"` + Err string `protobuf:"bytes,2,opt,name=err,proto3" json:"err,omitempty"` +} + +func (x *StorageListReply) Reset() { + *x = StorageListReply{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StorageListReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StorageListReply) ProtoMessage() {} + +func (x *StorageListReply) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StorageListReply.ProtoReflect.Descriptor instead. +func (*StorageListReply) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{25} +} + +func (x *StorageListReply) GetKeys() []string { + if x != nil { + return x.Keys + } + return nil +} + +func (x *StorageListReply) GetErr() string { + if x != nil { + return x.Err + } + return "" +} + +type StorageGetArgs struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` +} + +func (x *StorageGetArgs) Reset() { + *x = StorageGetArgs{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StorageGetArgs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StorageGetArgs) ProtoMessage() {} + +func (x *StorageGetArgs) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StorageGetArgs.ProtoReflect.Descriptor instead. +func (*StorageGetArgs) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{26} +} + +func (x *StorageGetArgs) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +type StorageGetReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entry *StorageEntry `protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"` + Err string `protobuf:"bytes,2,opt,name=err,proto3" json:"err,omitempty"` +} + +func (x *StorageGetReply) Reset() { + *x = StorageGetReply{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StorageGetReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StorageGetReply) ProtoMessage() {} + +func (x *StorageGetReply) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StorageGetReply.ProtoReflect.Descriptor instead. +func (*StorageGetReply) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{27} +} + +func (x *StorageGetReply) GetEntry() *StorageEntry { + if x != nil { + return x.Entry + } + return nil +} + +func (x *StorageGetReply) GetErr() string { + if x != nil { + return x.Err + } + return "" +} + +type StoragePutArgs struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entry *StorageEntry `protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"` +} + +func (x *StoragePutArgs) Reset() { + *x = StoragePutArgs{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StoragePutArgs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StoragePutArgs) ProtoMessage() {} + +func (x *StoragePutArgs) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StoragePutArgs.ProtoReflect.Descriptor instead. +func (*StoragePutArgs) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{28} +} + +func (x *StoragePutArgs) GetEntry() *StorageEntry { + if x != nil { + return x.Entry + } + return nil +} + +type StoragePutReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Err string `protobuf:"bytes,1,opt,name=err,proto3" json:"err,omitempty"` +} + +func (x *StoragePutReply) Reset() { + *x = StoragePutReply{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StoragePutReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StoragePutReply) ProtoMessage() {} + +func (x *StoragePutReply) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StoragePutReply.ProtoReflect.Descriptor instead. +func (*StoragePutReply) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{29} +} + +func (x *StoragePutReply) GetErr() string { + if x != nil { + return x.Err + } + return "" +} + +type StorageDeleteArgs struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` +} + +func (x *StorageDeleteArgs) Reset() { + *x = StorageDeleteArgs{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StorageDeleteArgs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StorageDeleteArgs) ProtoMessage() {} + +func (x *StorageDeleteArgs) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StorageDeleteArgs.ProtoReflect.Descriptor instead. +func (*StorageDeleteArgs) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{30} +} + +func (x *StorageDeleteArgs) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +type StorageDeleteReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Err string `protobuf:"bytes,1,opt,name=err,proto3" json:"err,omitempty"` +} + +func (x *StorageDeleteReply) Reset() { + *x = StorageDeleteReply{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StorageDeleteReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StorageDeleteReply) ProtoMessage() {} + +func (x *StorageDeleteReply) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StorageDeleteReply.ProtoReflect.Descriptor instead. +func (*StorageDeleteReply) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{31} +} + +func (x *StorageDeleteReply) GetErr() string { + if x != nil { + return x.Err + } + return "" +} + +type TTLReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TTL int64 `protobuf:"varint,1,opt,name=TTL,proto3" json:"TTL,omitempty"` +} + +func (x *TTLReply) Reset() { + *x = TTLReply{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TTLReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TTLReply) ProtoMessage() {} + +func (x *TTLReply) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TTLReply.ProtoReflect.Descriptor instead. +func (*TTLReply) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{32} +} + +func (x *TTLReply) GetTTL() int64 { + if x != nil { + return x.TTL + } + return 0 +} + +type TaintedReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Tainted bool `protobuf:"varint,1,opt,name=tainted,proto3" json:"tainted,omitempty"` +} + +func (x *TaintedReply) Reset() { + *x = TaintedReply{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TaintedReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TaintedReply) ProtoMessage() {} + +func (x *TaintedReply) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TaintedReply.ProtoReflect.Descriptor instead. +func (*TaintedReply) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{33} +} + +func (x *TaintedReply) GetTainted() bool { + if x != nil { + return x.Tainted + } + return false +} + +type CachingDisabledReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Disabled bool `protobuf:"varint,1,opt,name=disabled,proto3" json:"disabled,omitempty"` +} + +func (x *CachingDisabledReply) Reset() { + *x = CachingDisabledReply{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CachingDisabledReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CachingDisabledReply) ProtoMessage() {} + +func (x *CachingDisabledReply) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CachingDisabledReply.ProtoReflect.Descriptor instead. +func (*CachingDisabledReply) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{34} +} + +func (x *CachingDisabledReply) GetDisabled() bool { + if x != nil { + return x.Disabled + } + return false +} + +type ReplicationStateReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + State int32 `protobuf:"varint,1,opt,name=state,proto3" json:"state,omitempty"` +} + +func (x *ReplicationStateReply) Reset() { + *x = ReplicationStateReply{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReplicationStateReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReplicationStateReply) ProtoMessage() {} + +func (x *ReplicationStateReply) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReplicationStateReply.ProtoReflect.Descriptor instead. +func (*ReplicationStateReply) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{35} +} + +func (x *ReplicationStateReply) GetState() int32 { + if x != nil { + return x.State + } + return 0 +} + +type ResponseWrapDataArgs struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Data string `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + TTL int64 `protobuf:"varint,2,opt,name=TTL,proto3" json:"TTL,omitempty"` + JWT bool `protobuf:"varint,3,opt,name=JWT,proto3" json:"JWT,omitempty"` +} + +func (x *ResponseWrapDataArgs) Reset() { + *x = ResponseWrapDataArgs{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResponseWrapDataArgs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResponseWrapDataArgs) ProtoMessage() {} + +func (x *ResponseWrapDataArgs) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResponseWrapDataArgs.ProtoReflect.Descriptor instead. +func (*ResponseWrapDataArgs) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{36} +} + +func (x *ResponseWrapDataArgs) GetData() string { + if x != nil { + return x.Data + } + return "" +} + +func (x *ResponseWrapDataArgs) GetTTL() int64 { + if x != nil { + return x.TTL + } + return 0 +} + +func (x *ResponseWrapDataArgs) GetJWT() bool { + if x != nil { + return x.JWT + } + return false +} + +type ResponseWrapDataReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + WrapInfo *ResponseWrapInfo `protobuf:"bytes,1,opt,name=wrap_info,json=wrapInfo,proto3" json:"wrap_info,omitempty"` + Err string `protobuf:"bytes,2,opt,name=err,proto3" json:"err,omitempty"` +} + +func (x *ResponseWrapDataReply) Reset() { + *x = ResponseWrapDataReply{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResponseWrapDataReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResponseWrapDataReply) ProtoMessage() {} + +func (x *ResponseWrapDataReply) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResponseWrapDataReply.ProtoReflect.Descriptor instead. +func (*ResponseWrapDataReply) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{37} +} + +func (x *ResponseWrapDataReply) GetWrapInfo() *ResponseWrapInfo { + if x != nil { + return x.WrapInfo + } + return nil +} + +func (x *ResponseWrapDataReply) GetErr() string { + if x != nil { + return x.Err + } + return "" +} + +type MlockEnabledReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` +} + +func (x *MlockEnabledReply) Reset() { + *x = MlockEnabledReply{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MlockEnabledReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MlockEnabledReply) ProtoMessage() {} + +func (x *MlockEnabledReply) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MlockEnabledReply.ProtoReflect.Descriptor instead. +func (*MlockEnabledReply) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{38} +} + +func (x *MlockEnabledReply) GetEnabled() bool { + if x != nil { + return x.Enabled + } + return false +} + +type LocalMountReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Local bool `protobuf:"varint,1,opt,name=local,proto3" json:"local,omitempty"` +} + +func (x *LocalMountReply) Reset() { + *x = LocalMountReply{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LocalMountReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocalMountReply) ProtoMessage() {} + +func (x *LocalMountReply) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[39] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LocalMountReply.ProtoReflect.Descriptor instead. +func (*LocalMountReply) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{39} +} + +func (x *LocalMountReply) GetLocal() bool { + if x != nil { + return x.Local + } + return false +} + +type EntityInfoArgs struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + EntityID string `protobuf:"bytes,1,opt,name=entity_id,json=entityId,proto3" json:"entity_id,omitempty"` +} + +func (x *EntityInfoArgs) Reset() { + *x = EntityInfoArgs{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EntityInfoArgs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EntityInfoArgs) ProtoMessage() {} + +func (x *EntityInfoArgs) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[40] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EntityInfoArgs.ProtoReflect.Descriptor instead. +func (*EntityInfoArgs) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{40} +} + +func (x *EntityInfoArgs) GetEntityID() string { + if x != nil { + return x.EntityID + } + return "" +} + +type EntityInfoReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entity *logical.Entity `protobuf:"bytes,1,opt,name=entity,proto3" json:"entity,omitempty"` + Err string `protobuf:"bytes,2,opt,name=err,proto3" json:"err,omitempty"` +} + +func (x *EntityInfoReply) Reset() { + *x = EntityInfoReply{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EntityInfoReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EntityInfoReply) ProtoMessage() {} + +func (x *EntityInfoReply) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[41] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EntityInfoReply.ProtoReflect.Descriptor instead. +func (*EntityInfoReply) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{41} +} + +func (x *EntityInfoReply) GetEntity() *logical.Entity { + if x != nil { + return x.Entity + } + return nil +} + +func (x *EntityInfoReply) GetErr() string { + if x != nil { + return x.Err + } + return "" +} + +type GroupsForEntityReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Groups []*logical.Group `protobuf:"bytes,1,rep,name=groups,proto3" json:"groups,omitempty"` + Err string `protobuf:"bytes,2,opt,name=err,proto3" json:"err,omitempty"` +} + +func (x *GroupsForEntityReply) Reset() { + *x = GroupsForEntityReply{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GroupsForEntityReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GroupsForEntityReply) ProtoMessage() {} + +func (x *GroupsForEntityReply) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[42] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GroupsForEntityReply.ProtoReflect.Descriptor instead. +func (*GroupsForEntityReply) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{42} +} + +func (x *GroupsForEntityReply) GetGroups() []*logical.Group { + if x != nil { + return x.Groups + } + return nil +} + +func (x *GroupsForEntityReply) GetErr() string { + if x != nil { + return x.Err + } + return "" +} + +type PluginEnvReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PluginEnvironment *logical.PluginEnvironment `protobuf:"bytes,1,opt,name=plugin_environment,json=pluginEnvironment,proto3" json:"plugin_environment,omitempty"` + Err string `protobuf:"bytes,2,opt,name=err,proto3" json:"err,omitempty"` +} + +func (x *PluginEnvReply) Reset() { + *x = PluginEnvReply{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PluginEnvReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PluginEnvReply) ProtoMessage() {} + +func (x *PluginEnvReply) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[43] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PluginEnvReply.ProtoReflect.Descriptor instead. +func (*PluginEnvReply) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{43} +} + +func (x *PluginEnvReply) GetPluginEnvironment() *logical.PluginEnvironment { + if x != nil { + return x.PluginEnvironment + } + return nil +} + +func (x *PluginEnvReply) GetErr() string { + if x != nil { + return x.Err + } + return "" +} + +type GeneratePasswordFromPolicyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PolicyName string `protobuf:"bytes,1,opt,name=policy_name,json=policyName,proto3" json:"policy_name,omitempty"` +} + +func (x *GeneratePasswordFromPolicyRequest) Reset() { + *x = GeneratePasswordFromPolicyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GeneratePasswordFromPolicyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GeneratePasswordFromPolicyRequest) ProtoMessage() {} + +func (x *GeneratePasswordFromPolicyRequest) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[44] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GeneratePasswordFromPolicyRequest.ProtoReflect.Descriptor instead. +func (*GeneratePasswordFromPolicyRequest) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{44} +} + +func (x *GeneratePasswordFromPolicyRequest) GetPolicyName() string { + if x != nil { + return x.PolicyName + } + return "" +} + +type GeneratePasswordFromPolicyReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Password string `protobuf:"bytes,1,opt,name=password,proto3" json:"password,omitempty"` +} + +func (x *GeneratePasswordFromPolicyReply) Reset() { + *x = GeneratePasswordFromPolicyReply{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GeneratePasswordFromPolicyReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GeneratePasswordFromPolicyReply) ProtoMessage() {} + +func (x *GeneratePasswordFromPolicyReply) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[45] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GeneratePasswordFromPolicyReply.ProtoReflect.Descriptor instead. +func (*GeneratePasswordFromPolicyReply) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{45} +} + +func (x *GeneratePasswordFromPolicyReply) GetPassword() string { + if x != nil { + return x.Password + } + return "" +} + +type ClusterInfoReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ClusterName string `protobuf:"bytes,1,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` + ClusterID string `protobuf:"bytes,2,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + Err string `protobuf:"bytes,3,opt,name=err,proto3" json:"err,omitempty"` +} + +func (x *ClusterInfoReply) Reset() { + *x = ClusterInfoReply{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClusterInfoReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClusterInfoReply) ProtoMessage() {} + +func (x *ClusterInfoReply) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[46] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClusterInfoReply.ProtoReflect.Descriptor instead. +func (*ClusterInfoReply) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{46} +} + +func (x *ClusterInfoReply) GetClusterName() string { + if x != nil { + return x.ClusterName + } + return "" +} + +func (x *ClusterInfoReply) GetClusterID() string { + if x != nil { + return x.ClusterID + } + return "" +} + +func (x *ClusterInfoReply) GetErr() string { + if x != nil { + return x.Err + } + return "" +} + +type Connection struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // RemoteAddr is the network address that sent the request. + RemoteAddr string `protobuf:"bytes,1,opt,name=remote_addr,json=remoteAddr,proto3" json:"remote_addr,omitempty"` + // RemotePort is the network port that sent the request. + RemotePort int32 `protobuf:"varint,3,opt,name=remote_port,json=remotePort,proto3" json:"remote_port,omitempty"` + // ConnectionState is the marshalled tls.ConnectionState from the original + // request + ConnectionState *ConnectionState `protobuf:"bytes,2,opt,name=connection_state,json=connectionState,proto3" json:"connection_state,omitempty"` +} + +func (x *Connection) Reset() { + *x = Connection{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Connection) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Connection) ProtoMessage() {} + +func (x *Connection) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[47] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Connection.ProtoReflect.Descriptor instead. +func (*Connection) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{47} +} + +func (x *Connection) GetRemoteAddr() string { + if x != nil { + return x.RemoteAddr + } + return "" +} + +func (x *Connection) GetRemotePort() int32 { + if x != nil { + return x.RemotePort + } + return 0 +} + +func (x *Connection) GetConnectionState() *ConnectionState { + if x != nil { + return x.ConnectionState + } + return nil +} + +type ConnectionState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Version uint32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + HandshakeComplete bool `protobuf:"varint,2,opt,name=handshake_complete,json=handshakeComplete,proto3" json:"handshake_complete,omitempty"` + DidResume bool `protobuf:"varint,3,opt,name=did_resume,json=didResume,proto3" json:"did_resume,omitempty"` + CipherSuite uint32 `protobuf:"varint,4,opt,name=cipher_suite,json=cipherSuite,proto3" json:"cipher_suite,omitempty"` + NegotiatedProtocol string `protobuf:"bytes,5,opt,name=negotiated_protocol,json=negotiatedProtocol,proto3" json:"negotiated_protocol,omitempty"` + NegotiatedProtocolIsMutual bool `protobuf:"varint,6,opt,name=negotiated_protocol_is_mutual,json=negotiatedProtocolIsMutual,proto3" json:"negotiated_protocol_is_mutual,omitempty"` + ServerName string `protobuf:"bytes,7,opt,name=server_name,json=serverName,proto3" json:"server_name,omitempty"` + PeerCertificates *CertificateChain `protobuf:"bytes,8,opt,name=peer_certificates,json=peerCertificates,proto3" json:"peer_certificates,omitempty"` + VerifiedChains []*CertificateChain `protobuf:"bytes,9,rep,name=verified_chains,json=verifiedChains,proto3" json:"verified_chains,omitempty"` + SignedCertificateTimestamps [][]byte `protobuf:"bytes,10,rep,name=signed_certificate_timestamps,json=signedCertificateTimestamps,proto3" json:"signed_certificate_timestamps,omitempty"` + OcspResponse []byte `protobuf:"bytes,11,opt,name=ocsp_response,json=ocspResponse,proto3" json:"ocsp_response,omitempty"` + TlsUnique []byte `protobuf:"bytes,12,opt,name=tls_unique,json=tlsUnique,proto3" json:"tls_unique,omitempty"` +} + +func (x *ConnectionState) Reset() { + *x = ConnectionState{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConnectionState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConnectionState) ProtoMessage() {} + +func (x *ConnectionState) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[48] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConnectionState.ProtoReflect.Descriptor instead. +func (*ConnectionState) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{48} +} + +func (x *ConnectionState) GetVersion() uint32 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *ConnectionState) GetHandshakeComplete() bool { + if x != nil { + return x.HandshakeComplete + } + return false +} + +func (x *ConnectionState) GetDidResume() bool { + if x != nil { + return x.DidResume + } + return false +} + +func (x *ConnectionState) GetCipherSuite() uint32 { + if x != nil { + return x.CipherSuite + } + return 0 +} + +func (x *ConnectionState) GetNegotiatedProtocol() string { + if x != nil { + return x.NegotiatedProtocol + } + return "" +} + +func (x *ConnectionState) GetNegotiatedProtocolIsMutual() bool { + if x != nil { + return x.NegotiatedProtocolIsMutual + } + return false +} + +func (x *ConnectionState) GetServerName() string { + if x != nil { + return x.ServerName + } + return "" +} + +func (x *ConnectionState) GetPeerCertificates() *CertificateChain { + if x != nil { + return x.PeerCertificates + } + return nil +} + +func (x *ConnectionState) GetVerifiedChains() []*CertificateChain { + if x != nil { + return x.VerifiedChains + } + return nil +} + +func (x *ConnectionState) GetSignedCertificateTimestamps() [][]byte { + if x != nil { + return x.SignedCertificateTimestamps + } + return nil +} + +func (x *ConnectionState) GetOcspResponse() []byte { + if x != nil { + return x.OcspResponse + } + return nil +} + +func (x *ConnectionState) GetTlsUnique() []byte { + if x != nil { + return x.TlsUnique + } + return nil +} + +type Certificate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Asn1Data []byte `protobuf:"bytes,1,opt,name=asn1_data,json=asn1Data,proto3" json:"asn1_data,omitempty"` +} + +func (x *Certificate) Reset() { + *x = Certificate{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Certificate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Certificate) ProtoMessage() {} + +func (x *Certificate) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[49] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Certificate.ProtoReflect.Descriptor instead. +func (*Certificate) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{49} +} + +func (x *Certificate) GetAsn1Data() []byte { + if x != nil { + return x.Asn1Data + } + return nil +} + +type CertificateChain struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Certificates []*Certificate `protobuf:"bytes,1,rep,name=certificates,proto3" json:"certificates,omitempty"` +} + +func (x *CertificateChain) Reset() { + *x = CertificateChain{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CertificateChain) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CertificateChain) ProtoMessage() {} + +func (x *CertificateChain) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[50] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CertificateChain.ProtoReflect.Descriptor instead. +func (*CertificateChain) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{50} +} + +func (x *CertificateChain) GetCertificates() []*Certificate { + if x != nil { + return x.Certificates + } + return nil +} + +type SendEventRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + EventType string `protobuf:"bytes,1,opt,name=event_type,json=eventType,proto3" json:"event_type,omitempty"` + Event *logical.EventData `protobuf:"bytes,2,opt,name=event,proto3" json:"event,omitempty"` +} + +func (x *SendEventRequest) Reset() { + *x = SendEventRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SendEventRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SendEventRequest) ProtoMessage() {} + +func (x *SendEventRequest) ProtoReflect() protoreflect.Message { + mi := &file_sdk_plugin_pb_backend_proto_msgTypes[51] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SendEventRequest.ProtoReflect.Descriptor instead. +func (*SendEventRequest) Descriptor() ([]byte, []int) { + return file_sdk_plugin_pb_backend_proto_rawDescGZIP(), []int{51} +} + +func (x *SendEventRequest) GetEventType() string { + if x != nil { + return x.EventType + } + return "" +} + +func (x *SendEventRequest) GetEvent() *logical.EventData { + if x != nil { + return x.Event + } + return nil +} + +var File_sdk_plugin_pb_backend_proto protoreflect.FileDescriptor + +var file_sdk_plugin_pb_backend_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2f, 0x70, 0x62, 0x2f, + 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, + 0x62, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x17, 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2f, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1a, 0x73, 0x64, 0x6b, + 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x18, 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, + 0x69, 0x63, 0x61, 0x6c, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x20, 0x0a, 0x06, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x22, 0x5b, 0x0a, 0x0a, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x65, 0x72, + 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x65, 0x72, + 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x72, 0x72, 0x5f, 0x6d, 0x73, 0x67, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x72, 0x72, 0x4d, 0x73, 0x67, 0x12, 0x19, + 0x0a, 0x08, 0x65, 0x72, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x07, 0x65, 0x72, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x22, 0xce, 0x01, 0x0a, 0x05, 0x50, 0x61, + 0x74, 0x68, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x12, 0x28, 0x0a, 0x0f, 0x75, 0x6e, 0x61, 0x75, 0x74, + 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0f, 0x75, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x53, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x65, 0x61, 0x6c, 0x5f, 0x77, + 0x72, 0x61, 0x70, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0f, 0x73, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x61, 0x70, 0x53, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x12, 0x36, 0x0a, 0x17, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x77, + 0x61, 0x72, 0x64, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x15, 0x77, 0x72, 0x69, 0x74, 0x65, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, + 0x64, 0x65, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x22, 0xbf, 0x06, 0x0a, 0x07, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2e, 0x0a, 0x12, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x12, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x22, 0x0a, 0x06, + 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, + 0x62, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x12, 0x1c, 0x0a, 0x04, 0x61, 0x75, 0x74, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, + 0x2e, 0x70, 0x62, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x52, 0x04, 0x61, 0x75, 0x74, 0x68, 0x12, 0x32, + 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x18, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x6f, 0x6b, + 0x65, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x32, 0x0a, 0x15, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x0a, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, + 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, + 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1d, 0x0a, + 0x0a, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0e, + 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x0e, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x6f, 0x72, 0x12, 0x30, 0x0a, 0x09, 0x77, 0x72, 0x61, 0x70, 0x5f, 0x69, 0x6e, 0x66, 0x6f, + 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x57, 0x72, 0x61, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x77, 0x72, 0x61, + 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x3d, 0x0a, 0x1b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x72, 0x65, 0x6d, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, + 0x75, 0x73, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x03, 0x52, 0x18, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x6d, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, + 0x55, 0x73, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, + 0x64, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, + 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x6f, 0x76, 0x65, 0x72, + 0x72, 0x69, 0x64, 0x65, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x28, 0x0a, 0x0f, 0x75, 0x6e, + 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x13, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0f, 0x75, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x46, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x20, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xe6, 0x05, 0x0a, + 0x04, 0x41, 0x75, 0x74, 0x68, 0x12, 0x35, 0x0a, 0x0d, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x5f, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, + 0x62, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0c, + 0x6c, 0x65, 0x61, 0x73, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x23, 0x0a, 0x0d, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x61, 0x74, + 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, + 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, + 0x12, 0x32, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x62, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x2e, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x6f, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x6f, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x6e, + 0x75, 0x6d, 0x5f, 0x75, 0x73, 0x65, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6e, + 0x75, 0x6d, 0x55, 0x73, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x5f, 0x69, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x49, 0x64, 0x12, 0x24, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x41, 0x6c, 0x69, + 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x33, 0x0a, 0x0d, 0x67, 0x72, 0x6f, + 0x75, 0x70, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x41, 0x6c, 0x69, 0x61, 0x73, + 0x52, 0x0c, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x1f, + 0x0a, 0x0b, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x63, 0x69, 0x64, 0x72, 0x73, 0x18, 0x0d, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x0a, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x69, 0x64, 0x72, 0x73, 0x12, + 0x25, 0x0a, 0x0e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, + 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x69, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x5f, + 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x74, 0x6c, 0x18, 0x10, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x65, + 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x4d, 0x61, 0x78, 0x54, 0x74, 0x6c, 0x12, 0x1d, 0x0a, + 0x0a, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x09, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2a, 0x0a, 0x11, + 0x6e, 0x6f, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x6e, 0x6f, 0x44, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xca, 0x06, 0x0a, 0x0a, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x02, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, + 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x6c, 0x69, + 0x63, 0x69, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x6c, 0x69, + 0x63, 0x69, 0x65, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x2c, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, + 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, + 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, + 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x75, 0x6d, + 0x5f, 0x75, 0x73, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6e, 0x75, 0x6d, + 0x55, 0x73, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x63, 0x72, 0x65, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x28, 0x0a, 0x10, 0x65, + 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x74, 0x6c, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x65, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x4d, + 0x61, 0x78, 0x54, 0x74, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x0c, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x65, 0x72, + 0x69, 0x6f, 0x64, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, + 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x0e, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x1f, + 0x0a, 0x0b, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x63, 0x69, 0x64, 0x72, 0x73, 0x18, 0x0f, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x0a, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x43, 0x69, 0x64, 0x72, 0x73, 0x12, + 0x21, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, + 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x75, 0x62, 0x62, 0x79, 0x68, 0x6f, 0x6c, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x75, 0x62, 0x62, 0x79, 0x68, + 0x6f, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x12, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x45, 0x0a, 0x0d, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x20, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x0c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, + 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x18, 0x14, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x30, 0x0a, 0x14, 0x6e, 0x6f, 0x5f, 0x69, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x15, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x12, 0x6e, 0x6f, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x16, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x78, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x1a, 0x3f, 0x0a, 0x11, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4d, 0x65, 0x74, + 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x22, 0xaf, 0x01, 0x0a, 0x0c, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x03, 0x54, 0x54, 0x4c, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x6e, 0x65, 0x77, 0x61, 0x62, + 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x6e, 0x65, 0x77, 0x61, + 0x62, 0x6c, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x69, 0x73, 0x73, 0x75, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x52, 0x09, 0x69, 0x73, 0x73, 0x75, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, + 0x4d, 0x61, 0x78, 0x54, 0x54, 0x4c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x4d, 0x61, + 0x78, 0x54, 0x54, 0x4c, 0x22, 0x7f, 0x0a, 0x06, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x35, + 0x0a, 0x0d, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0c, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x12, 0x19, 0x0a, 0x08, 0x6c, 0x65, + 0x61, 0x73, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6c, 0x65, + 0x61, 0x73, 0x65, 0x49, 0x64, 0x22, 0xc8, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x22, 0x0a, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x06, + 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x1c, 0x0a, 0x04, 0x61, 0x75, 0x74, 0x68, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x70, 0x62, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x52, 0x04, + 0x61, 0x75, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x64, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x64, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x73, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x73, + 0x12, 0x31, 0x0a, 0x09, 0x77, 0x72, 0x61, 0x70, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x57, 0x72, 0x61, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x77, 0x72, 0x61, 0x70, 0x49, + 0x6e, 0x66, 0x6f, 0x12, 0x33, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x07, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x1a, 0x46, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x20, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x2e, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x22, 0xc8, 0x02, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, + 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x03, 0x54, 0x54, 0x4c, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1a, 0x0a, + 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x3f, 0x0a, 0x0d, 0x63, 0x72, 0x65, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0c, 0x63, 0x72, + 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x77, 0x72, + 0x61, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x64, 0x41, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x2a, 0x0a, 0x11, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x64, + 0x5f, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, + 0x64, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x72, 0x65, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x12, 0x1b, + 0x0a, 0x09, 0x73, 0x65, 0x61, 0x6c, 0x5f, 0x77, 0x72, 0x61, 0x70, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x08, 0x73, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x61, 0x70, 0x22, 0x58, 0x0a, 0x0f, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x57, 0x72, 0x61, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x10, + 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x54, 0x54, 0x4c, + 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x61, 0x6c, + 0x5f, 0x77, 0x72, 0x61, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x61, + 0x6c, 0x57, 0x72, 0x61, 0x70, 0x22, 0x59, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x41, 0x72, 0x67, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x07, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x70, 0x62, 0x2e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x22, 0x60, 0x0a, 0x12, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x28, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x20, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, + 0x70, 0x62, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, + 0x72, 0x72, 0x22, 0x10, 0x0a, 0x0e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, + 0x41, 0x72, 0x67, 0x73, 0x22, 0x33, 0x0a, 0x0f, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, + 0x7a, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x20, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x45, + 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x34, 0x0a, 0x11, 0x53, 0x70, 0x65, + 0x63, 0x69, 0x61, 0x6c, 0x50, 0x61, 0x74, 0x68, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1f, + 0x0a, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, + 0x70, 0x62, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x73, 0x52, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x22, + 0x60, 0x0a, 0x18, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x63, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x72, 0x67, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x09, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x07, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x70, 0x62, + 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x22, 0x76, 0x0a, 0x19, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x63, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1f, + 0x0a, 0x0b, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0a, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x12, + 0x16, 0x0a, 0x06, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x06, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x12, 0x20, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x45, + 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0xb8, 0x01, 0x0a, 0x09, 0x53, 0x65, + 0x74, 0x75, 0x70, 0x41, 0x72, 0x67, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x62, 0x72, 0x6f, 0x6b, 0x65, + 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x62, 0x72, 0x6f, 0x6b, + 0x65, 0x72, 0x49, 0x64, 0x12, 0x31, 0x0a, 0x06, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, 0x74, 0x75, 0x70, 0x41, + 0x72, 0x67, 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x06, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x20, 0x0a, 0x0b, 0x62, 0x61, 0x63, 0x6b, 0x65, + 0x6e, 0x64, 0x55, 0x55, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x62, 0x61, + 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x55, 0x55, 0x49, 0x44, 0x1a, 0x39, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0x1e, 0x0a, 0x0a, 0x53, 0x65, 0x74, 0x75, 0x70, 0x52, 0x65, 0x70, + 0x6c, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x65, 0x72, 0x72, 0x22, 0x1f, 0x0a, 0x09, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x70, 0x6c, + 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x25, 0x0a, 0x11, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x41, 0x72, 0x67, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x53, 0x0a, 0x0c, + 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x61, 0x6c, 0x5f, 0x77, 0x72, 0x61, + 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x61, 0x6c, 0x57, 0x72, 0x61, + 0x70, 0x22, 0x29, 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4c, 0x69, 0x73, 0x74, + 0x41, 0x72, 0x67, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x22, 0x38, 0x0a, 0x10, + 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, + 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, + 0x6b, 0x65, 0x79, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x22, 0x0a, 0x0e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x47, 0x65, 0x74, 0x41, 0x72, 0x67, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x4b, 0x0a, 0x0f, 0x53, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x26, 0x0a, + 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, + 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, + 0x65, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x38, 0x0a, 0x0e, 0x53, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x50, 0x75, 0x74, 0x41, 0x72, 0x67, 0x73, 0x12, 0x26, 0x0a, 0x05, 0x65, 0x6e, 0x74, + 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, + 0x79, 0x22, 0x23, 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x75, 0x74, 0x52, + 0x65, 0x70, 0x6c, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x25, 0x0a, 0x11, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x72, 0x67, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x26, 0x0a, + 0x12, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, + 0x70, 0x6c, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x1c, 0x0a, 0x08, 0x54, 0x54, 0x4c, 0x52, 0x65, 0x70, 0x6c, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, + 0x54, 0x54, 0x4c, 0x22, 0x28, 0x0a, 0x0c, 0x54, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, + 0x70, 0x6c, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x74, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x22, 0x32, 0x0a, + 0x14, 0x43, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, + 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, + 0x64, 0x22, 0x2d, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x22, 0x4e, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, 0x70, + 0x44, 0x61, 0x74, 0x61, 0x41, 0x72, 0x67, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x10, 0x0a, 0x03, + 0x54, 0x54, 0x4c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x54, 0x54, 0x4c, 0x12, 0x10, + 0x0a, 0x03, 0x4a, 0x57, 0x54, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x4a, 0x57, 0x54, + 0x22, 0x5c, 0x0a, 0x15, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, 0x70, + 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x31, 0x0a, 0x09, 0x77, 0x72, 0x61, + 0x70, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, + 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, 0x70, 0x49, 0x6e, + 0x66, 0x6f, 0x52, 0x08, 0x77, 0x72, 0x61, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x10, 0x0a, 0x03, + 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x2d, + 0x0a, 0x11, 0x4d, 0x6c, 0x6f, 0x63, 0x6b, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, 0x65, + 0x70, 0x6c, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x27, 0x0a, + 0x0f, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x05, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x22, 0x2d, 0x0a, 0x0e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x49, 0x6e, 0x66, 0x6f, 0x41, 0x72, 0x67, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, + 0x69, 0x74, 0x79, 0x49, 0x64, 0x22, 0x4c, 0x0a, 0x0f, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, + 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x27, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, + 0x61, 0x6c, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x65, 0x72, 0x72, 0x22, 0x50, 0x0a, 0x14, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x46, 0x6f, 0x72, + 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x26, 0x0a, 0x06, 0x67, + 0x72, 0x6f, 0x75, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, + 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x06, 0x67, 0x72, 0x6f, + 0x75, 0x70, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x6d, 0x0a, 0x0e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x45, + 0x6e, 0x76, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x49, 0x0a, 0x12, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x5f, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x50, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x52, + 0x11, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, + 0x6e, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x65, 0x72, 0x72, 0x22, 0x44, 0x0a, 0x21, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x3d, 0x0a, 0x1f, 0x47, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x46, 0x72, + 0x6f, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1a, 0x0a, + 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0x66, 0x0a, 0x10, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x21, 0x0a, + 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, + 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x72, + 0x72, 0x22, 0x8e, 0x01, 0x0a, 0x0a, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x64, 0x64, + 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x70, 0x6f, 0x72, 0x74, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x6f, + 0x72, 0x74, 0x12, 0x3e, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, + 0x62, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x22, 0xbb, 0x04, 0x0a, 0x0f, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x2d, 0x0a, 0x12, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x63, 0x6f, + 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x68, 0x61, + 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x12, + 0x1d, 0x0a, 0x0a, 0x64, 0x69, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x09, 0x64, 0x69, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x12, 0x21, + 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x69, 0x74, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x53, 0x75, 0x69, 0x74, + 0x65, 0x12, 0x2f, 0x0a, 0x13, 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, 0x5f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, + 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, + 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x69, 0x73, 0x5f, 0x6d, 0x75, 0x74, + 0x75, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x6e, 0x65, 0x67, 0x6f, 0x74, + 0x69, 0x61, 0x74, 0x65, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x49, 0x73, 0x4d, + 0x75, 0x74, 0x75, 0x61, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x41, 0x0a, 0x11, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x10, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x0f, 0x76, 0x65, 0x72, + 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x09, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x0e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, + 0x65, 0x64, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x42, 0x0a, 0x1d, 0x73, 0x69, 0x67, 0x6e, + 0x65, 0x64, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0c, 0x52, + 0x1b, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x12, 0x23, 0x0a, 0x0d, + 0x6f, 0x63, 0x73, 0x70, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x0b, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x6f, 0x63, 0x73, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6c, 0x73, 0x5f, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x18, + 0x0c, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x74, 0x6c, 0x73, 0x55, 0x6e, 0x69, 0x71, 0x75, 0x65, + 0x22, 0x2a, 0x0a, 0x0b, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, + 0x1b, 0x0a, 0x09, 0x61, 0x73, 0x6e, 0x31, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x08, 0x61, 0x73, 0x6e, 0x31, 0x44, 0x61, 0x74, 0x61, 0x22, 0x47, 0x0a, 0x10, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, + 0x12, 0x33, 0x0a, 0x0c, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x73, 0x22, 0x5b, 0x0a, 0x10, 0x53, 0x65, 0x6e, 0x64, 0x45, 0x76, 0x65, + 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x65, + 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, + 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x44, 0x61, 0x74, 0x61, 0x52, 0x05, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x32, 0xa5, 0x03, 0x0a, 0x07, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x12, 0x3e, + 0x0a, 0x0d, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x15, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x16, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x61, 0x6e, 0x64, + 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x30, + 0x0a, 0x0c, 0x53, 0x70, 0x65, 0x63, 0x69, 0x61, 0x6c, 0x50, 0x61, 0x74, 0x68, 0x73, 0x12, 0x09, + 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x15, 0x2e, 0x70, 0x62, 0x2e, 0x53, + 0x70, 0x65, 0x63, 0x69, 0x61, 0x6c, 0x50, 0x61, 0x74, 0x68, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, + 0x12, 0x53, 0x0a, 0x14, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x63, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1c, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x61, + 0x6e, 0x64, 0x6c, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x1d, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x61, 0x6e, 0x64, + 0x6c, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1f, 0x0a, 0x07, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, + 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x09, 0x2e, 0x70, 0x62, + 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x31, 0x0a, 0x0d, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x15, 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x09, + 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x26, 0x0a, 0x05, 0x53, 0x65, 0x74, + 0x75, 0x70, 0x12, 0x0d, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, 0x74, 0x75, 0x70, 0x41, 0x72, 0x67, + 0x73, 0x1a, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, 0x74, 0x75, 0x70, 0x52, 0x65, 0x70, 0x6c, + 0x79, 0x12, 0x35, 0x0a, 0x0a, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x12, + 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x41, + 0x72, 0x67, 0x73, 0x1a, 0x13, 0x2e, 0x70, 0x62, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, + 0x69, 0x7a, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x20, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x0d, 0x2e, 0x70, 0x62, + 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x32, 0xd5, 0x01, 0x0a, 0x07, 0x53, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x31, 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x13, + 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x41, + 0x72, 0x67, 0x73, 0x1a, 0x14, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x2e, 0x0a, 0x03, 0x47, 0x65, 0x74, + 0x12, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x47, 0x65, 0x74, + 0x41, 0x72, 0x67, 0x73, 0x1a, 0x13, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x2e, 0x0a, 0x03, 0x50, 0x75, 0x74, + 0x12, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x75, 0x74, + 0x41, 0x72, 0x67, 0x73, 0x1a, 0x13, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x50, 0x75, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x37, 0x0a, 0x06, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x12, 0x15, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x16, 0x2e, 0x70, 0x62, 0x2e, + 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x70, + 0x6c, 0x79, 0x32, 0xe1, 0x05, 0x0a, 0x0a, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x56, 0x69, 0x65, + 0x77, 0x12, 0x2a, 0x0a, 0x0f, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4c, 0x65, 0x61, 0x73, + 0x65, 0x54, 0x54, 0x4c, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, + 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x54, 0x4c, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x26, 0x0a, + 0x0b, 0x4d, 0x61, 0x78, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x54, 0x54, 0x4c, 0x12, 0x09, 0x2e, 0x70, + 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x54, 0x4c, + 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x26, 0x0a, 0x07, 0x54, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, + 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x10, 0x2e, 0x70, 0x62, + 0x2e, 0x54, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x36, 0x0a, + 0x0f, 0x43, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, + 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x18, 0x2e, 0x70, 0x62, + 0x2e, 0x43, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, + 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x38, 0x0a, 0x10, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x19, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, + 0x47, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, 0x70, 0x44, + 0x61, 0x74, 0x61, 0x12, 0x18, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x57, 0x72, 0x61, 0x70, 0x44, 0x61, 0x74, 0x61, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x19, 0x2e, + 0x70, 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x57, 0x72, 0x61, 0x70, 0x44, + 0x61, 0x74, 0x61, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x30, 0x0a, 0x0c, 0x4d, 0x6c, 0x6f, 0x63, + 0x6b, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x1a, 0x15, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x6c, 0x6f, 0x63, 0x6b, 0x45, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x2c, 0x0a, 0x0a, 0x4c, 0x6f, + 0x63, 0x61, 0x6c, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x1a, 0x13, 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4d, 0x6f, + 0x75, 0x6e, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x35, 0x0a, 0x0a, 0x45, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x13, 0x2e, 0x70, 0x62, 0x2e, + 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, + 0x2a, 0x0a, 0x09, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x45, 0x6e, 0x76, 0x12, 0x09, 0x2e, 0x70, + 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x45, 0x6e, 0x76, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x3f, 0x0a, 0x0f, 0x47, + 0x72, 0x6f, 0x75, 0x70, 0x73, 0x46, 0x6f, 0x72, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x12, + 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x41, 0x72, + 0x67, 0x73, 0x1a, 0x18, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x46, 0x6f, + 0x72, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x68, 0x0a, 0x1a, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, + 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x25, 0x2e, 0x70, 0x62, 0x2e, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, + 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x23, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x50, + 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x2e, 0x0a, 0x0b, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x1a, 0x14, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x32, 0x36, 0x0a, 0x06, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, + 0x12, 0x2c, 0x0a, 0x09, 0x53, 0x65, 0x6e, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x2e, + 0x70, 0x62, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x2a, + 0x5a, 0x28, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, + 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x73, 0x64, 0x6b, + 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_sdk_plugin_pb_backend_proto_rawDescOnce sync.Once + file_sdk_plugin_pb_backend_proto_rawDescData = file_sdk_plugin_pb_backend_proto_rawDesc +) + +func file_sdk_plugin_pb_backend_proto_rawDescGZIP() []byte { + file_sdk_plugin_pb_backend_proto_rawDescOnce.Do(func() { + file_sdk_plugin_pb_backend_proto_rawDescData = protoimpl.X.CompressGZIP(file_sdk_plugin_pb_backend_proto_rawDescData) + }) + return file_sdk_plugin_pb_backend_proto_rawDescData +} + +var file_sdk_plugin_pb_backend_proto_msgTypes = make([]protoimpl.MessageInfo, 58) +var file_sdk_plugin_pb_backend_proto_goTypes = []interface{}{ + (*Empty)(nil), // 0: pb.Empty + (*Header)(nil), // 1: pb.Header + (*ProtoError)(nil), // 2: pb.ProtoError + (*Paths)(nil), // 3: pb.Paths + (*Request)(nil), // 4: pb.Request + (*Auth)(nil), // 5: pb.Auth + (*TokenEntry)(nil), // 6: pb.TokenEntry + (*LeaseOptions)(nil), // 7: pb.LeaseOptions + (*Secret)(nil), // 8: pb.Secret + (*Response)(nil), // 9: pb.Response + (*ResponseWrapInfo)(nil), // 10: pb.ResponseWrapInfo + (*RequestWrapInfo)(nil), // 11: pb.RequestWrapInfo + (*HandleRequestArgs)(nil), // 12: pb.HandleRequestArgs + (*HandleRequestReply)(nil), // 13: pb.HandleRequestReply + (*InitializeArgs)(nil), // 14: pb.InitializeArgs + (*InitializeReply)(nil), // 15: pb.InitializeReply + (*SpecialPathsReply)(nil), // 16: pb.SpecialPathsReply + (*HandleExistenceCheckArgs)(nil), // 17: pb.HandleExistenceCheckArgs + (*HandleExistenceCheckReply)(nil), // 18: pb.HandleExistenceCheckReply + (*SetupArgs)(nil), // 19: pb.SetupArgs + (*SetupReply)(nil), // 20: pb.SetupReply + (*TypeReply)(nil), // 21: pb.TypeReply + (*InvalidateKeyArgs)(nil), // 22: pb.InvalidateKeyArgs + (*StorageEntry)(nil), // 23: pb.StorageEntry + (*StorageListArgs)(nil), // 24: pb.StorageListArgs + (*StorageListReply)(nil), // 25: pb.StorageListReply + (*StorageGetArgs)(nil), // 26: pb.StorageGetArgs + (*StorageGetReply)(nil), // 27: pb.StorageGetReply + (*StoragePutArgs)(nil), // 28: pb.StoragePutArgs + (*StoragePutReply)(nil), // 29: pb.StoragePutReply + (*StorageDeleteArgs)(nil), // 30: pb.StorageDeleteArgs + (*StorageDeleteReply)(nil), // 31: pb.StorageDeleteReply + (*TTLReply)(nil), // 32: pb.TTLReply + (*TaintedReply)(nil), // 33: pb.TaintedReply + (*CachingDisabledReply)(nil), // 34: pb.CachingDisabledReply + (*ReplicationStateReply)(nil), // 35: pb.ReplicationStateReply + (*ResponseWrapDataArgs)(nil), // 36: pb.ResponseWrapDataArgs + (*ResponseWrapDataReply)(nil), // 37: pb.ResponseWrapDataReply + (*MlockEnabledReply)(nil), // 38: pb.MlockEnabledReply + (*LocalMountReply)(nil), // 39: pb.LocalMountReply + (*EntityInfoArgs)(nil), // 40: pb.EntityInfoArgs + (*EntityInfoReply)(nil), // 41: pb.EntityInfoReply + (*GroupsForEntityReply)(nil), // 42: pb.GroupsForEntityReply + (*PluginEnvReply)(nil), // 43: pb.PluginEnvReply + (*GeneratePasswordFromPolicyRequest)(nil), // 44: pb.GeneratePasswordFromPolicyRequest + (*GeneratePasswordFromPolicyReply)(nil), // 45: pb.GeneratePasswordFromPolicyReply + (*ClusterInfoReply)(nil), // 46: pb.ClusterInfoReply + (*Connection)(nil), // 47: pb.Connection + (*ConnectionState)(nil), // 48: pb.ConnectionState + (*Certificate)(nil), // 49: pb.Certificate + (*CertificateChain)(nil), // 50: pb.CertificateChain + (*SendEventRequest)(nil), // 51: pb.SendEventRequest + nil, // 52: pb.Request.HeadersEntry + nil, // 53: pb.Auth.MetadataEntry + nil, // 54: pb.TokenEntry.MetaEntry + nil, // 55: pb.TokenEntry.InternalMetaEntry + nil, // 56: pb.Response.HeadersEntry + nil, // 57: pb.SetupArgs.ConfigEntry + (*logical.Alias)(nil), // 58: logical.Alias + (*timestamppb.Timestamp)(nil), // 59: google.protobuf.Timestamp + (*logical.Entity)(nil), // 60: logical.Entity + (*logical.Group)(nil), // 61: logical.Group + (*logical.PluginEnvironment)(nil), // 62: logical.PluginEnvironment + (*logical.EventData)(nil), // 63: logical.EventData +} +var file_sdk_plugin_pb_backend_proto_depIDxs = []int32{ + 8, // 0: pb.Request.secret:type_name -> pb.Secret + 5, // 1: pb.Request.auth:type_name -> pb.Auth + 52, // 2: pb.Request.headers:type_name -> pb.Request.HeadersEntry + 11, // 3: pb.Request.wrap_info:type_name -> pb.RequestWrapInfo + 47, // 4: pb.Request.connection:type_name -> pb.Connection + 7, // 5: pb.Auth.lease_options:type_name -> pb.LeaseOptions + 53, // 6: pb.Auth.metadata:type_name -> pb.Auth.MetadataEntry + 58, // 7: pb.Auth.alias:type_name -> logical.Alias + 58, // 8: pb.Auth.group_aliases:type_name -> logical.Alias + 54, // 9: pb.TokenEntry.meta:type_name -> pb.TokenEntry.MetaEntry + 55, // 10: pb.TokenEntry.internal_meta:type_name -> pb.TokenEntry.InternalMetaEntry + 59, // 11: pb.LeaseOptions.issue_time:type_name -> google.protobuf.Timestamp + 7, // 12: pb.Secret.lease_options:type_name -> pb.LeaseOptions + 8, // 13: pb.Response.secret:type_name -> pb.Secret + 5, // 14: pb.Response.auth:type_name -> pb.Auth + 10, // 15: pb.Response.wrap_info:type_name -> pb.ResponseWrapInfo + 56, // 16: pb.Response.headers:type_name -> pb.Response.HeadersEntry + 59, // 17: pb.ResponseWrapInfo.creation_time:type_name -> google.protobuf.Timestamp + 4, // 18: pb.HandleRequestArgs.request:type_name -> pb.Request + 9, // 19: pb.HandleRequestReply.response:type_name -> pb.Response + 2, // 20: pb.HandleRequestReply.err:type_name -> pb.ProtoError + 2, // 21: pb.InitializeReply.err:type_name -> pb.ProtoError + 3, // 22: pb.SpecialPathsReply.paths:type_name -> pb.Paths + 4, // 23: pb.HandleExistenceCheckArgs.request:type_name -> pb.Request + 2, // 24: pb.HandleExistenceCheckReply.err:type_name -> pb.ProtoError + 57, // 25: pb.SetupArgs.Config:type_name -> pb.SetupArgs.ConfigEntry + 23, // 26: pb.StorageGetReply.entry:type_name -> pb.StorageEntry + 23, // 27: pb.StoragePutArgs.entry:type_name -> pb.StorageEntry + 10, // 28: pb.ResponseWrapDataReply.wrap_info:type_name -> pb.ResponseWrapInfo + 60, // 29: pb.EntityInfoReply.entity:type_name -> logical.Entity + 61, // 30: pb.GroupsForEntityReply.groups:type_name -> logical.Group + 62, // 31: pb.PluginEnvReply.plugin_environment:type_name -> logical.PluginEnvironment + 48, // 32: pb.Connection.connection_state:type_name -> pb.ConnectionState + 50, // 33: pb.ConnectionState.peer_certificates:type_name -> pb.CertificateChain + 50, // 34: pb.ConnectionState.verified_chains:type_name -> pb.CertificateChain + 49, // 35: pb.CertificateChain.certificates:type_name -> pb.Certificate + 63, // 36: pb.SendEventRequest.event:type_name -> logical.EventData + 1, // 37: pb.Request.HeadersEntry.value:type_name -> pb.Header + 1, // 38: pb.Response.HeadersEntry.value:type_name -> pb.Header + 12, // 39: pb.Backend.HandleRequest:input_type -> pb.HandleRequestArgs + 0, // 40: pb.Backend.SpecialPaths:input_type -> pb.Empty + 17, // 41: pb.Backend.HandleExistenceCheck:input_type -> pb.HandleExistenceCheckArgs + 0, // 42: pb.Backend.Cleanup:input_type -> pb.Empty + 22, // 43: pb.Backend.InvalidateKey:input_type -> pb.InvalidateKeyArgs + 19, // 44: pb.Backend.Setup:input_type -> pb.SetupArgs + 14, // 45: pb.Backend.Initialize:input_type -> pb.InitializeArgs + 0, // 46: pb.Backend.Type:input_type -> pb.Empty + 24, // 47: pb.Storage.List:input_type -> pb.StorageListArgs + 26, // 48: pb.Storage.Get:input_type -> pb.StorageGetArgs + 28, // 49: pb.Storage.Put:input_type -> pb.StoragePutArgs + 30, // 50: pb.Storage.Delete:input_type -> pb.StorageDeleteArgs + 0, // 51: pb.SystemView.DefaultLeaseTTL:input_type -> pb.Empty + 0, // 52: pb.SystemView.MaxLeaseTTL:input_type -> pb.Empty + 0, // 53: pb.SystemView.Tainted:input_type -> pb.Empty + 0, // 54: pb.SystemView.CachingDisabled:input_type -> pb.Empty + 0, // 55: pb.SystemView.ReplicationState:input_type -> pb.Empty + 36, // 56: pb.SystemView.ResponseWrapData:input_type -> pb.ResponseWrapDataArgs + 0, // 57: pb.SystemView.MlockEnabled:input_type -> pb.Empty + 0, // 58: pb.SystemView.LocalMount:input_type -> pb.Empty + 40, // 59: pb.SystemView.EntityInfo:input_type -> pb.EntityInfoArgs + 0, // 60: pb.SystemView.PluginEnv:input_type -> pb.Empty + 40, // 61: pb.SystemView.GroupsForEntity:input_type -> pb.EntityInfoArgs + 44, // 62: pb.SystemView.GeneratePasswordFromPolicy:input_type -> pb.GeneratePasswordFromPolicyRequest + 0, // 63: pb.SystemView.ClusterInfo:input_type -> pb.Empty + 51, // 64: pb.Events.SendEvent:input_type -> pb.SendEventRequest + 13, // 65: pb.Backend.HandleRequest:output_type -> pb.HandleRequestReply + 16, // 66: pb.Backend.SpecialPaths:output_type -> pb.SpecialPathsReply + 18, // 67: pb.Backend.HandleExistenceCheck:output_type -> pb.HandleExistenceCheckReply + 0, // 68: pb.Backend.Cleanup:output_type -> pb.Empty + 0, // 69: pb.Backend.InvalidateKey:output_type -> pb.Empty + 20, // 70: pb.Backend.Setup:output_type -> pb.SetupReply + 15, // 71: pb.Backend.Initialize:output_type -> pb.InitializeReply + 21, // 72: pb.Backend.Type:output_type -> pb.TypeReply + 25, // 73: pb.Storage.List:output_type -> pb.StorageListReply + 27, // 74: pb.Storage.Get:output_type -> pb.StorageGetReply + 29, // 75: pb.Storage.Put:output_type -> pb.StoragePutReply + 31, // 76: pb.Storage.Delete:output_type -> pb.StorageDeleteReply + 32, // 77: pb.SystemView.DefaultLeaseTTL:output_type -> pb.TTLReply + 32, // 78: pb.SystemView.MaxLeaseTTL:output_type -> pb.TTLReply + 33, // 79: pb.SystemView.Tainted:output_type -> pb.TaintedReply + 34, // 80: pb.SystemView.CachingDisabled:output_type -> pb.CachingDisabledReply + 35, // 81: pb.SystemView.ReplicationState:output_type -> pb.ReplicationStateReply + 37, // 82: pb.SystemView.ResponseWrapData:output_type -> pb.ResponseWrapDataReply + 38, // 83: pb.SystemView.MlockEnabled:output_type -> pb.MlockEnabledReply + 39, // 84: pb.SystemView.LocalMount:output_type -> pb.LocalMountReply + 41, // 85: pb.SystemView.EntityInfo:output_type -> pb.EntityInfoReply + 43, // 86: pb.SystemView.PluginEnv:output_type -> pb.PluginEnvReply + 42, // 87: pb.SystemView.GroupsForEntity:output_type -> pb.GroupsForEntityReply + 45, // 88: pb.SystemView.GeneratePasswordFromPolicy:output_type -> pb.GeneratePasswordFromPolicyReply + 46, // 89: pb.SystemView.ClusterInfo:output_type -> pb.ClusterInfoReply + 0, // 90: pb.Events.SendEvent:output_type -> pb.Empty + 65, // [65:91] is the sub-list for method output_type + 39, // [39:65] is the sub-list for method input_type + 39, // [39:39] is the sub-list for extension type_name + 39, // [39:39] is the sub-list for extension extendee + 0, // [0:39] is the sub-list for field type_name +} + +func init() { file_sdk_plugin_pb_backend_proto_init() } +func file_sdk_plugin_pb_backend_proto_init() { + if File_sdk_plugin_pb_backend_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_sdk_plugin_pb_backend_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Empty); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Header); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProtoError); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Paths); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Auth); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TokenEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LeaseOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Secret); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResponseWrapInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestWrapInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HandleRequestArgs); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HandleRequestReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InitializeArgs); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InitializeReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SpecialPathsReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HandleExistenceCheckArgs); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HandleExistenceCheckReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetupArgs); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetupReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TypeReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InvalidateKeyArgs); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StorageEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StorageListArgs); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StorageListReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StorageGetArgs); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StorageGetReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StoragePutArgs); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StoragePutReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StorageDeleteArgs); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StorageDeleteReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TTLReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TaintedReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CachingDisabledReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReplicationStateReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResponseWrapDataArgs); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResponseWrapDataReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MlockEnabledReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocalMountReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EntityInfoArgs); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EntityInfoReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GroupsForEntityReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PluginEnvReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GeneratePasswordFromPolicyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GeneratePasswordFromPolicyReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClusterInfoReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Connection); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConnectionState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Certificate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CertificateChain); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sdk_plugin_pb_backend_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SendEventRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_sdk_plugin_pb_backend_proto_rawDesc, + NumEnums: 0, + NumMessages: 58, + NumExtensions: 0, + NumServices: 4, + }, + GoTypes: file_sdk_plugin_pb_backend_proto_goTypes, + DependencyIndexes: file_sdk_plugin_pb_backend_proto_depIDxs, + MessageInfos: file_sdk_plugin_pb_backend_proto_msgTypes, + }.Build() + File_sdk_plugin_pb_backend_proto = out.File + file_sdk_plugin_pb_backend_proto_rawDesc = nil + file_sdk_plugin_pb_backend_proto_goTypes = nil + file_sdk_plugin_pb_backend_proto_depIDxs = nil +} diff --git a/sdk/plugin/pb/backend.proto b/sdk/plugin/pb/backend.proto new file mode 100644 index 0000000..ded4077 --- /dev/null +++ b/sdk/plugin/pb/backend.proto @@ -0,0 +1,688 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +syntax = "proto3"; +package pb; + +option go_package = "github.com/hashicorp/vault/sdk/plugin/pb"; + +import "google/protobuf/timestamp.proto"; +import "sdk/logical/event.proto"; +import "sdk/logical/identity.proto"; +import "sdk/logical/plugin.proto"; + +message Empty {} + +message Header { + repeated string header = 1; +} + +message ProtoError { + // Error type can be one of: + // ErrTypeUnknown uint32 = iota + // ErrTypeUserError + // ErrTypeInternalError + // ErrTypeCodedError + // ErrTypeStatusBadRequest + // ErrTypeUnsupportedOperation + // ErrTypeUnsupportedPath + // ErrTypeInvalidRequest + // ErrTypePermissionDenied + // ErrTypeMultiAuthzPending + // ErrTypeUnrecoverable + uint32 err_type = 1; + string err_msg = 2; + int64 err_code = 3; +} + +// Paths is the structure of special paths that is used for SpecialPaths. +message Paths { + // Root are the paths that require a root token to access + repeated string root = 1; + + // Unauthenticated are the paths that can be accessed without any auth. + repeated string unauthenticated = 2; + + // LocalStorage are paths (prefixes) that are local to this instance; this + // indicates that these paths should not be replicated + repeated string local_storage = 3; + + // SealWrapStorage are storage paths that, when using a capable seal, + // should be seal wrapped with extra encryption. It is exact matching + // unless it ends with '/' in which case it will be treated as a prefix. + repeated string seal_wrap_storage = 4; + + // WriteForwardedStorage are storage paths that, when running on a PR + // Secondary cluster, cause a GRPC call up to the PR Primary cluster's + // active node to handle storage.Put(...) and storage.Delete(...) events. + // + // See extended note in /sdk/logical/logical.go. + repeated string write_forwarded_storage = 5; +} + +message Request { + // Id is the uuid associated with each request + string id = 1; + + // If set, the name given to the replication secondary where this request + // originated + string ReplicationCluster = 2; + + // Operation is the requested operation type + string operation = 3; + + // Path is the part of the request path not consumed by the + // routing. As an example, if the original request path is "prod/aws/foo" + // and the AWS logical backend is mounted at "prod/aws/", then the + // final path is "foo" since the mount prefix is trimmed. + string path = 4; + + // Request data is a JSON object that must have keys with string type. + string data = 5; + + // Secret will be non-nil only for Revoke and Renew operations + // to represent the secret that was returned prior. + Secret secret = 6; + + // Auth will be non-nil only for Renew operations + // to represent the auth that was returned prior. + Auth auth = 7; + + // Headers will contain the http headers from the request. This value will + // be used in the audit broker to ensure we are auditing only the allowed + // headers. + map headers = 8; + + // ClientToken is provided to the core so that the identity + // can be verified and ACLs applied. This value is passed + // through to the logical backends but after being salted and + // hashed. + string client_token = 9; + + // ClientTokenAccessor is provided to the core so that the it can get + // logged as part of request audit logging. + string client_token_accessor = 10; + + // DisplayName is provided to the logical backend to help associate + // dynamic secrets with the source entity. This is not a sensitive + // name, but is useful for operators. + string display_name = 11; + + // MountPoint is provided so that a logical backend can generate + // paths relative to itself. The `Path` is effectively the client + // request path with the MountPoint trimmed off. + string mount_point = 12; + + // MountType is provided so that a logical backend can make decisions + // based on the specific mount type (e.g., if a mount type has different + // aliases, generating different defaults depending on the alias) + string mount_type = 13; + + // MountAccessor is provided so that identities returned by the authentication + // backends can be tied to the mount it belongs to. + string mount_accessor = 14; + + // WrapInfo contains requested response wrapping parameters + RequestWrapInfo wrap_info = 15; + + // ClientTokenRemainingUses represents the allowed number of uses left on the + // token supplied + int64 client_token_remaining_uses = 16; + + // EntityID is the identity of the caller extracted out of the token used + // to make this request + string entity_id = 17; + + // PolicyOverride indicates that the requestor wishes to override + // soft-mandatory Sentinel policies + bool policy_override = 18; + + // Whether the request is unauthenticated, as in, had no client token + // attached. Useful in some situations where the client token is not made + // accessible. + bool unauthenticated = 19; + + // Connection will be non-nil only for credential providers to + // inspect the connection information and potentially use it for + // authentication/protection. + Connection connection = 20; +} + +message Auth { + LeaseOptions lease_options = 1; + + // InternalData is a JSON object that is stored with the auth struct. + // This will be sent back during a Renew/Revoke for storing internal data + // used for those operations. + string internal_data = 2; + + // DisplayName is a non-security sensitive identifier that is + // applicable to this Auth. It is used for logging and prefixing + // of dynamic secrets. For example, DisplayName may be "armon" for + // the github credential backend. If the client token is used to + // generate a SQL credential, the user may be "github-armon-uuid". + // This is to help identify the source without using audit tables. + string display_name = 3; + + // Policies is the list of policies that the authenticated user + // is associated with. + repeated string policies = 4; + + // Metadata is used to attach arbitrary string-type metadata to + // an authenticated user. This metadata will be outputted into the + // audit log. + map metadata = 5; + + // ClientToken is the token that is generated for the authentication. + // This will be filled in by Vault core when an auth structure is + // returned. Setting this manually will have no effect. + string client_token = 6; + + // Accessor is the identifier for the ClientToken. This can be used + // to perform management functionalities (especially revocation) when + // ClientToken in the audit logs are obfuscated. Accessor can be used + // to revoke a ClientToken and to lookup the capabilities of the ClientToken, + // both without actually knowing the ClientToken. + string accessor = 7; + + // Period indicates that the token generated using this Auth object + // should never expire. The token should be renewed within the duration + // specified by this period. + int64 period = 8; + + // Number of allowed uses of the issued token + int64 num_uses = 9; + + // EntityID is the identifier of the entity in identity store to which the + // identity of the authenticating client belongs to. + string entity_id = 10; + + // Alias is the information about the authenticated client returned by + // the auth backend + logical.Alias alias = 11; + + // GroupAliases are the informational mappings of external groups which an + // authenticated user belongs to. This is used to check if there are + // mappings groups for the group aliases in identity store. For all the + // matching groups, the entity ID of the user will be added. + repeated logical.Alias group_aliases = 12; + + // If set, restricts usage of the certificates to client IPs falling within + // the range of the specified CIDR(s). + repeated string bound_cidrs = 13; + + // TokenPolicies and IdentityPolicies break down the list in Policies to + // help determine where a policy was sourced + repeated string token_policies = 14; + repeated string identity_policies = 15; + + // Explicit maximum lifetime for the token. Unlike normal TTLs, the maximum + // TTL is a hard limit and cannot be exceeded, also counts for periodic tokens. + int64 explicit_max_ttl = 16; + + // TokenType is the type of token being requested + uint32 token_type = 17; + + // Whether the default policy should be added automatically by core + bool no_default_policy = 18; +} + +message TokenEntry { + string id = 1; + string accessor = 2; + string parent = 3; + repeated string policies = 4; + string path = 5; + map meta = 6; + string display_name = 7; + int64 num_uses = 8; + int64 creation_time = 9; + int64 ttl = 10; + int64 explicit_max_ttl = 11; + string role = 12; + int64 period = 13; + string entity_id = 14; + repeated string bound_cidrs = 15; + string namespace_id = 16; + string cubbyhole_id = 17; + uint32 type = 18; + map internal_meta = 19; + string inline_policy = 20; + bool no_identity_policies = 21; + string external_id = 22; +} + +message LeaseOptions { + int64 TTL = 1; + + bool renewable = 2; + + int64 increment = 3; + + google.protobuf.Timestamp issue_time = 4; + + int64 MaxTTL = 5; +} + +message Secret { + LeaseOptions lease_options = 1; + + // InternalData is a JSON object that is stored with the secret. + // This will be sent back during a Renew/Revoke for storing internal data + // used for those operations. + string internal_data = 2; + + // LeaseID is the ID returned to the user to manage this secret. + // This is generated by Vault core. Any set value will be ignored. + // For requests, this will always be blank. + string lease_id = 3; +} + +message Response { + // Secret, if not nil, denotes that this response represents a secret. + Secret secret = 1; + + // Auth, if not nil, contains the authentication information for + // this response. This is only checked and means something for + // credential backends. + Auth auth = 2; + + // Response data is a JSON object that must have string keys. For + // secrets, this data is sent down to the user as-is. To store internal + // data that you don't want the user to see, store it in + // Secret.InternalData. + string data = 3; + + // Redirect is an HTTP URL to redirect to for further authentication. + // This is only valid for credential backends. This will be blanked + // for any logical backend and ignored. + string redirect = 4; + + // Warnings allow operations or backends to return warnings in response + // to user actions without failing the action outright. + repeated string warnings = 5; + + // Information for wrapping the response in a cubbyhole + ResponseWrapInfo wrap_info = 6; + + // Headers will contain the http headers from the response. This value will + // be used in the audit broker to ensure we are auditing only the allowed + // headers. + map headers = 7; +} + +message ResponseWrapInfo { + // Setting to non-zero specifies that the response should be wrapped. + // Specifies the desired TTL of the wrapping token. + int64 TTL = 1; + + // The token containing the wrapped response + string token = 2; + + // The token accessor for the wrapped response token + string accessor = 3; + + // The creation time. This can be used with the TTL to figure out an + // expected expiration. + google.protobuf.Timestamp creation_time = 4; + + // If the contained response is the output of a token creation call, the + // created token's accessor will be accessible here + string wrapped_accessor = 5; + + // WrappedEntityID is the entity identifier of the caller who initiated the + // wrapping request + string wrapped_entity_id = 6; + + // The format to use. This doesn't get returned, it's only internal. + string format = 7; + + // CreationPath is the original request path that was used to create + // the wrapped response. + string creation_path = 8; + + // Controls seal wrapping behavior downstream for specific use cases + bool seal_wrap = 9; +} + +message RequestWrapInfo { + // Setting to non-zero specifies that the response should be wrapped. + // Specifies the desired TTL of the wrapping token. + int64 TTL = 1; + + // The format to use for the wrapped response; if not specified it's a bare + // token + string format = 2; + + // A flag to conforming backends that data for a given request should be + // seal wrapped + bool seal_wrap = 3; +} + +// HandleRequestArgs is the args for HandleRequest method. +message HandleRequestArgs { + uint32 storage_id = 1; + Request request = 2; +} + +// HandleRequestReply is the reply for HandleRequest method. +message HandleRequestReply { + Response response = 1; + ProtoError err = 2; +} + +// InitializeArgs is the args for Initialize method. +message InitializeArgs { +} + +// InitializeReply is the reply for Initialize method. +message InitializeReply { + ProtoError err = 1; +} + +// SpecialPathsReply is the reply for SpecialPaths method. +message SpecialPathsReply { + Paths paths = 1; +} + +// HandleExistenceCheckArgs is the args for HandleExistenceCheck method. +message HandleExistenceCheckArgs { + uint32 storage_id = 1; + Request request = 2; +} + +// HandleExistenceCheckReply is the reply for HandleExistenceCheck method. +message HandleExistenceCheckReply { + bool check_found = 1; + bool exists = 2; + ProtoError err = 3; +} + +// SetupArgs is the args for Setup method. +message SetupArgs { + uint32 broker_id = 1; + map Config = 2; + string backendUUID = 3; +} + +// SetupReply is the reply for Setup method. +message SetupReply { + string err = 1; +} + +// TypeReply is the reply for the Type method. +message TypeReply { + uint32 type = 1; +} + +message InvalidateKeyArgs { + string key = 1; +} + +// Backend is the interface that plugins must satisfy. The plugin should +// implement the server for this service. Requests will first run the +// HandleExistenceCheck rpc then run the HandleRequests rpc. +service Backend { + // HandleRequest is used to handle a request and generate a response. + // The plugins must check the operation type and handle appropriately. + rpc HandleRequest(HandleRequestArgs) returns (HandleRequestReply); + + // SpecialPaths is a list of paths that are special in some way. + // See PathType for the types of special paths. The key is the type + // of the special path, and the value is a list of paths for this type. + // This is not a regular expression but is an exact match. If the path + // ends in '*' then it is a prefix-based match. The '*' can only appear + // at the end. + rpc SpecialPaths(Empty) returns (SpecialPathsReply); + + // HandleExistenceCheck is used to handle a request and generate a response + // indicating whether the given path exists or not; this is used to + // understand whether the request must have a Create or Update capability + // ACL applied. The first bool indicates whether an existence check + // function was found for the backend; the second indicates whether, if an + // existence check function was found, the item exists or not. + rpc HandleExistenceCheck(HandleExistenceCheckArgs) returns (HandleExistenceCheckReply); + + // Cleanup is invoked during an unmount of a backend to allow it to + // handle any cleanup like connection closing or releasing of file handles. + // Cleanup is called right before Vault closes the plugin process. + rpc Cleanup(Empty) returns (Empty); + + // InvalidateKey may be invoked when an object is modified that belongs + // to the backend. The backend can use this to clear any caches or reset + // internal state as needed. + rpc InvalidateKey(InvalidateKeyArgs) returns (Empty); + + // Setup is used to set up the backend based on the provided backend + // configuration. The plugin's setup implementation should use the provided + // broker_id to create a connection back to Vault for use with the Storage + // and SystemView clients. + rpc Setup(SetupArgs) returns (SetupReply); + + // Initialize is invoked just after mounting a backend to allow it to + // handle any initialization tasks that need to be performed. + rpc Initialize(InitializeArgs) returns (InitializeReply); + + // Type returns the BackendType for the particular backend + rpc Type(Empty) returns (TypeReply); +} + +message StorageEntry { + string key = 1; + bytes value = 2; + bool seal_wrap = 3; +} + +message StorageListArgs { + string prefix = 1; +} + +message StorageListReply { + repeated string keys = 1; + string err = 2; +} + +message StorageGetArgs { + string key = 1; +} + +message StorageGetReply { + StorageEntry entry = 1; + string err = 2; +} + +message StoragePutArgs { + StorageEntry entry = 1; +} + +message StoragePutReply { + string err = 1; +} + +message StorageDeleteArgs { + string key = 1; +} + +message StorageDeleteReply { + string err = 1; +} + +// Storage is the way that plugins are able read/write data. Plugins should +// implement the client for this service. +service Storage { + rpc List(StorageListArgs) returns (StorageListReply); + rpc Get(StorageGetArgs) returns (StorageGetReply); + rpc Put(StoragePutArgs) returns (StoragePutReply); + rpc Delete(StorageDeleteArgs) returns (StorageDeleteReply); +} + +message TTLReply { + int64 TTL = 1; +} + +message TaintedReply { + bool tainted = 1; +} + +message CachingDisabledReply { + bool disabled = 1; +} + +message ReplicationStateReply { + int32 state = 1; +} + +message ResponseWrapDataArgs { + string data = 1; + int64 TTL = 2; + bool JWT = 3; +} + +message ResponseWrapDataReply { + ResponseWrapInfo wrap_info = 1; + string err = 2; +} + +message MlockEnabledReply { + bool enabled = 1; +} + +message LocalMountReply { + bool local = 1; +} + +message EntityInfoArgs { + string entity_id = 1; +} + +message EntityInfoReply { + logical.Entity entity = 1; + string err = 2; +} + +message GroupsForEntityReply { + repeated logical.Group groups = 1; + string err = 2; +} + +message PluginEnvReply { + logical.PluginEnvironment plugin_environment = 1; + string err = 2; +} + +message GeneratePasswordFromPolicyRequest { + string policy_name = 1; +} + +message GeneratePasswordFromPolicyReply { + string password = 1; +} + +message ClusterInfoReply { + string cluster_name = 1; + string cluster_id = 2; + string err = 3; +} + +// SystemView exposes system configuration information in a safe way for plugins +// to consume. Plugins should implement the client for this service. +service SystemView { + // DefaultLeaseTTL returns the default lease TTL set in Vault configuration + rpc DefaultLeaseTTL(Empty) returns (TTLReply); + + // MaxLeaseTTL returns the max lease TTL set in Vault configuration; backend + // authors should take care not to issue credentials that last longer than + // this value, as Vault will revoke them + rpc MaxLeaseTTL(Empty) returns (TTLReply); + + // Tainted, returns true if the mount is tainted. A mount is tainted if it is in the + // process of being unmounted. This should only be used in special + // circumstances; a primary use-case is as a guard in revocation functions. + // If revocation of a backend's leases fails it can keep the unmounting + // process from being successful. If the reason for this failure is not + // relevant when the mount is tainted (for instance, saving a CRL to disk + // when the stored CRL will be removed during the unmounting process + // anyways), we can ignore the errors to allow unmounting to complete. + rpc Tainted(Empty) returns (TaintedReply); + + // CachingDisabled returns true if caching is disabled. If true, no caches + // should be used, despite known slowdowns. + rpc CachingDisabled(Empty) returns (CachingDisabledReply); + + // ReplicationState indicates the state of cluster replication + rpc ReplicationState(Empty) returns (ReplicationStateReply); + + // ResponseWrapData wraps the given data in a cubbyhole and returns the + // token used to unwrap. + rpc ResponseWrapData(ResponseWrapDataArgs) returns (ResponseWrapDataReply); + + // MlockEnabled returns the configuration setting for enabling mlock on + // plugins. + rpc MlockEnabled(Empty) returns (MlockEnabledReply); + + // LocalMount, when run from a system view attached to a request, indicates + // whether the request is affecting a local mount or not + rpc LocalMount(Empty) returns (LocalMountReply); + + // EntityInfo returns the basic entity information for the given entity id + rpc EntityInfo(EntityInfoArgs) returns (EntityInfoReply); + + // PluginEnv returns Vault environment information used by plugins + rpc PluginEnv(Empty) returns (PluginEnvReply); + + // GroupsForEntity returns the group membership information for the given + // entity id + rpc GroupsForEntity(EntityInfoArgs) returns (GroupsForEntityReply); + + // GeneratePasswordFromPolicy generates a password from an existing password policy + rpc GeneratePasswordFromPolicy(GeneratePasswordFromPolicyRequest) returns (GeneratePasswordFromPolicyReply); + + // ClusterInfo returns the ClusterID information; may be reused if ClusterName is also exposed. + rpc ClusterInfo(Empty) returns (ClusterInfoReply); +} + +message Connection { + // RemoteAddr is the network address that sent the request. + string remote_addr = 1; + + // RemotePort is the network port that sent the request. + int32 remote_port = 3; + + // ConnectionState is the marshalled tls.ConnectionState from the original + // request + ConnectionState connection_state = 2; +} + +message ConnectionState { + uint32 version = 1; + bool handshake_complete = 2; + bool did_resume = 3; + uint32 cipher_suite = 4; + string negotiated_protocol = 5; + bool negotiated_protocol_is_mutual = 6; + string server_name = 7; + CertificateChain peer_certificates = 8; + + repeated CertificateChain verified_chains = 9; + repeated bytes signed_certificate_timestamps = 10; + + bytes ocsp_response = 11; + bytes tls_unique = 12; +} + +message Certificate { + bytes asn1_data = 1; +} + +message CertificateChain { + repeated Certificate certificates = 1; +} + +message SendEventRequest { + string event_type = 1; + logical.EventData event = 2; +} + +service Events { + rpc SendEvent(SendEventRequest) returns (Empty); +} \ No newline at end of file diff --git a/sdk/plugin/pb/backend_grpc.pb.go b/sdk/plugin/pb/backend_grpc.pb.go new file mode 100644 index 0000000..a8f3107 --- /dev/null +++ b/sdk/plugin/pb/backend_grpc.pb.go @@ -0,0 +1,1259 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package pb + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// BackendClient is the client API for Backend service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type BackendClient interface { + // HandleRequest is used to handle a request and generate a response. + // The plugins must check the operation type and handle appropriately. + HandleRequest(ctx context.Context, in *HandleRequestArgs, opts ...grpc.CallOption) (*HandleRequestReply, error) + // SpecialPaths is a list of paths that are special in some way. + // See PathType for the types of special paths. The key is the type + // of the special path, and the value is a list of paths for this type. + // This is not a regular expression but is an exact match. If the path + // ends in '*' then it is a prefix-based match. The '*' can only appear + // at the end. + SpecialPaths(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*SpecialPathsReply, error) + // HandleExistenceCheck is used to handle a request and generate a response + // indicating whether the given path exists or not; this is used to + // understand whether the request must have a Create or Update capability + // ACL applied. The first bool indicates whether an existence check + // function was found for the backend; the second indicates whether, if an + // existence check function was found, the item exists or not. + HandleExistenceCheck(ctx context.Context, in *HandleExistenceCheckArgs, opts ...grpc.CallOption) (*HandleExistenceCheckReply, error) + // Cleanup is invoked during an unmount of a backend to allow it to + // handle any cleanup like connection closing or releasing of file handles. + // Cleanup is called right before Vault closes the plugin process. + Cleanup(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) + // InvalidateKey may be invoked when an object is modified that belongs + // to the backend. The backend can use this to clear any caches or reset + // internal state as needed. + InvalidateKey(ctx context.Context, in *InvalidateKeyArgs, opts ...grpc.CallOption) (*Empty, error) + // Setup is used to set up the backend based on the provided backend + // configuration. The plugin's setup implementation should use the provided + // broker_id to create a connection back to Vault for use with the Storage + // and SystemView clients. + Setup(ctx context.Context, in *SetupArgs, opts ...grpc.CallOption) (*SetupReply, error) + // Initialize is invoked just after mounting a backend to allow it to + // handle any initialization tasks that need to be performed. + Initialize(ctx context.Context, in *InitializeArgs, opts ...grpc.CallOption) (*InitializeReply, error) + // Type returns the BackendType for the particular backend + Type(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TypeReply, error) +} + +type backendClient struct { + cc grpc.ClientConnInterface +} + +func NewBackendClient(cc grpc.ClientConnInterface) BackendClient { + return &backendClient{cc} +} + +func (c *backendClient) HandleRequest(ctx context.Context, in *HandleRequestArgs, opts ...grpc.CallOption) (*HandleRequestReply, error) { + out := new(HandleRequestReply) + err := c.cc.Invoke(ctx, "/pb.Backend/HandleRequest", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backendClient) SpecialPaths(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*SpecialPathsReply, error) { + out := new(SpecialPathsReply) + err := c.cc.Invoke(ctx, "/pb.Backend/SpecialPaths", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backendClient) HandleExistenceCheck(ctx context.Context, in *HandleExistenceCheckArgs, opts ...grpc.CallOption) (*HandleExistenceCheckReply, error) { + out := new(HandleExistenceCheckReply) + err := c.cc.Invoke(ctx, "/pb.Backend/HandleExistenceCheck", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backendClient) Cleanup(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { + out := new(Empty) + err := c.cc.Invoke(ctx, "/pb.Backend/Cleanup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backendClient) InvalidateKey(ctx context.Context, in *InvalidateKeyArgs, opts ...grpc.CallOption) (*Empty, error) { + out := new(Empty) + err := c.cc.Invoke(ctx, "/pb.Backend/InvalidateKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backendClient) Setup(ctx context.Context, in *SetupArgs, opts ...grpc.CallOption) (*SetupReply, error) { + out := new(SetupReply) + err := c.cc.Invoke(ctx, "/pb.Backend/Setup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backendClient) Initialize(ctx context.Context, in *InitializeArgs, opts ...grpc.CallOption) (*InitializeReply, error) { + out := new(InitializeReply) + err := c.cc.Invoke(ctx, "/pb.Backend/Initialize", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backendClient) Type(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TypeReply, error) { + out := new(TypeReply) + err := c.cc.Invoke(ctx, "/pb.Backend/Type", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// BackendServer is the server API for Backend service. +// All implementations must embed UnimplementedBackendServer +// for forward compatibility +type BackendServer interface { + // HandleRequest is used to handle a request and generate a response. + // The plugins must check the operation type and handle appropriately. + HandleRequest(context.Context, *HandleRequestArgs) (*HandleRequestReply, error) + // SpecialPaths is a list of paths that are special in some way. + // See PathType for the types of special paths. The key is the type + // of the special path, and the value is a list of paths for this type. + // This is not a regular expression but is an exact match. If the path + // ends in '*' then it is a prefix-based match. The '*' can only appear + // at the end. + SpecialPaths(context.Context, *Empty) (*SpecialPathsReply, error) + // HandleExistenceCheck is used to handle a request and generate a response + // indicating whether the given path exists or not; this is used to + // understand whether the request must have a Create or Update capability + // ACL applied. The first bool indicates whether an existence check + // function was found for the backend; the second indicates whether, if an + // existence check function was found, the item exists or not. + HandleExistenceCheck(context.Context, *HandleExistenceCheckArgs) (*HandleExistenceCheckReply, error) + // Cleanup is invoked during an unmount of a backend to allow it to + // handle any cleanup like connection closing or releasing of file handles. + // Cleanup is called right before Vault closes the plugin process. + Cleanup(context.Context, *Empty) (*Empty, error) + // InvalidateKey may be invoked when an object is modified that belongs + // to the backend. The backend can use this to clear any caches or reset + // internal state as needed. + InvalidateKey(context.Context, *InvalidateKeyArgs) (*Empty, error) + // Setup is used to set up the backend based on the provided backend + // configuration. The plugin's setup implementation should use the provided + // broker_id to create a connection back to Vault for use with the Storage + // and SystemView clients. + Setup(context.Context, *SetupArgs) (*SetupReply, error) + // Initialize is invoked just after mounting a backend to allow it to + // handle any initialization tasks that need to be performed. + Initialize(context.Context, *InitializeArgs) (*InitializeReply, error) + // Type returns the BackendType for the particular backend + Type(context.Context, *Empty) (*TypeReply, error) + mustEmbedUnimplementedBackendServer() +} + +// UnimplementedBackendServer must be embedded to have forward compatible implementations. +type UnimplementedBackendServer struct { +} + +func (UnimplementedBackendServer) HandleRequest(context.Context, *HandleRequestArgs) (*HandleRequestReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method HandleRequest not implemented") +} +func (UnimplementedBackendServer) SpecialPaths(context.Context, *Empty) (*SpecialPathsReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method SpecialPaths not implemented") +} +func (UnimplementedBackendServer) HandleExistenceCheck(context.Context, *HandleExistenceCheckArgs) (*HandleExistenceCheckReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method HandleExistenceCheck not implemented") +} +func (UnimplementedBackendServer) Cleanup(context.Context, *Empty) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Cleanup not implemented") +} +func (UnimplementedBackendServer) InvalidateKey(context.Context, *InvalidateKeyArgs) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method InvalidateKey not implemented") +} +func (UnimplementedBackendServer) Setup(context.Context, *SetupArgs) (*SetupReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method Setup not implemented") +} +func (UnimplementedBackendServer) Initialize(context.Context, *InitializeArgs) (*InitializeReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method Initialize not implemented") +} +func (UnimplementedBackendServer) Type(context.Context, *Empty) (*TypeReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method Type not implemented") +} +func (UnimplementedBackendServer) mustEmbedUnimplementedBackendServer() {} + +// UnsafeBackendServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to BackendServer will +// result in compilation errors. +type UnsafeBackendServer interface { + mustEmbedUnimplementedBackendServer() +} + +func RegisterBackendServer(s grpc.ServiceRegistrar, srv BackendServer) { + s.RegisterService(&Backend_ServiceDesc, srv) +} + +func _Backend_HandleRequest_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HandleRequestArgs) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackendServer).HandleRequest(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.Backend/HandleRequest", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackendServer).HandleRequest(ctx, req.(*HandleRequestArgs)) + } + return interceptor(ctx, in, info, handler) +} + +func _Backend_SpecialPaths_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackendServer).SpecialPaths(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.Backend/SpecialPaths", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackendServer).SpecialPaths(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _Backend_HandleExistenceCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HandleExistenceCheckArgs) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackendServer).HandleExistenceCheck(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.Backend/HandleExistenceCheck", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackendServer).HandleExistenceCheck(ctx, req.(*HandleExistenceCheckArgs)) + } + return interceptor(ctx, in, info, handler) +} + +func _Backend_Cleanup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackendServer).Cleanup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.Backend/Cleanup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackendServer).Cleanup(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _Backend_InvalidateKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InvalidateKeyArgs) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackendServer).InvalidateKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.Backend/InvalidateKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackendServer).InvalidateKey(ctx, req.(*InvalidateKeyArgs)) + } + return interceptor(ctx, in, info, handler) +} + +func _Backend_Setup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetupArgs) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackendServer).Setup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.Backend/Setup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackendServer).Setup(ctx, req.(*SetupArgs)) + } + return interceptor(ctx, in, info, handler) +} + +func _Backend_Initialize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InitializeArgs) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackendServer).Initialize(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.Backend/Initialize", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackendServer).Initialize(ctx, req.(*InitializeArgs)) + } + return interceptor(ctx, in, info, handler) +} + +func _Backend_Type_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackendServer).Type(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.Backend/Type", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackendServer).Type(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +// Backend_ServiceDesc is the grpc.ServiceDesc for Backend service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Backend_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "pb.Backend", + HandlerType: (*BackendServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "HandleRequest", + Handler: _Backend_HandleRequest_Handler, + }, + { + MethodName: "SpecialPaths", + Handler: _Backend_SpecialPaths_Handler, + }, + { + MethodName: "HandleExistenceCheck", + Handler: _Backend_HandleExistenceCheck_Handler, + }, + { + MethodName: "Cleanup", + Handler: _Backend_Cleanup_Handler, + }, + { + MethodName: "InvalidateKey", + Handler: _Backend_InvalidateKey_Handler, + }, + { + MethodName: "Setup", + Handler: _Backend_Setup_Handler, + }, + { + MethodName: "Initialize", + Handler: _Backend_Initialize_Handler, + }, + { + MethodName: "Type", + Handler: _Backend_Type_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "sdk/plugin/pb/backend.proto", +} + +// StorageClient is the client API for Storage service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type StorageClient interface { + List(ctx context.Context, in *StorageListArgs, opts ...grpc.CallOption) (*StorageListReply, error) + Get(ctx context.Context, in *StorageGetArgs, opts ...grpc.CallOption) (*StorageGetReply, error) + Put(ctx context.Context, in *StoragePutArgs, opts ...grpc.CallOption) (*StoragePutReply, error) + Delete(ctx context.Context, in *StorageDeleteArgs, opts ...grpc.CallOption) (*StorageDeleteReply, error) +} + +type storageClient struct { + cc grpc.ClientConnInterface +} + +func NewStorageClient(cc grpc.ClientConnInterface) StorageClient { + return &storageClient{cc} +} + +func (c *storageClient) List(ctx context.Context, in *StorageListArgs, opts ...grpc.CallOption) (*StorageListReply, error) { + out := new(StorageListReply) + err := c.cc.Invoke(ctx, "/pb.Storage/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) Get(ctx context.Context, in *StorageGetArgs, opts ...grpc.CallOption) (*StorageGetReply, error) { + out := new(StorageGetReply) + err := c.cc.Invoke(ctx, "/pb.Storage/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) Put(ctx context.Context, in *StoragePutArgs, opts ...grpc.CallOption) (*StoragePutReply, error) { + out := new(StoragePutReply) + err := c.cc.Invoke(ctx, "/pb.Storage/Put", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) Delete(ctx context.Context, in *StorageDeleteArgs, opts ...grpc.CallOption) (*StorageDeleteReply, error) { + out := new(StorageDeleteReply) + err := c.cc.Invoke(ctx, "/pb.Storage/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// StorageServer is the server API for Storage service. +// All implementations must embed UnimplementedStorageServer +// for forward compatibility +type StorageServer interface { + List(context.Context, *StorageListArgs) (*StorageListReply, error) + Get(context.Context, *StorageGetArgs) (*StorageGetReply, error) + Put(context.Context, *StoragePutArgs) (*StoragePutReply, error) + Delete(context.Context, *StorageDeleteArgs) (*StorageDeleteReply, error) + mustEmbedUnimplementedStorageServer() +} + +// UnimplementedStorageServer must be embedded to have forward compatible implementations. +type UnimplementedStorageServer struct { +} + +func (UnimplementedStorageServer) List(context.Context, *StorageListArgs) (*StorageListReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method List not implemented") +} +func (UnimplementedStorageServer) Get(context.Context, *StorageGetArgs) (*StorageGetReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") +} +func (UnimplementedStorageServer) Put(context.Context, *StoragePutArgs) (*StoragePutReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method Put not implemented") +} +func (UnimplementedStorageServer) Delete(context.Context, *StorageDeleteArgs) (*StorageDeleteReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") +} +func (UnimplementedStorageServer) mustEmbedUnimplementedStorageServer() {} + +// UnsafeStorageServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to StorageServer will +// result in compilation errors. +type UnsafeStorageServer interface { + mustEmbedUnimplementedStorageServer() +} + +func RegisterStorageServer(s grpc.ServiceRegistrar, srv StorageServer) { + s.RegisterService(&Storage_ServiceDesc, srv) +} + +func _Storage_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StorageListArgs) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.Storage/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).List(ctx, req.(*StorageListArgs)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StorageGetArgs) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.Storage/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).Get(ctx, req.(*StorageGetArgs)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_Put_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StoragePutArgs) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).Put(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.Storage/Put", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).Put(ctx, req.(*StoragePutArgs)) + } + return interceptor(ctx, in, info, handler) +} + +func _Storage_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StorageDeleteArgs) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.Storage/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).Delete(ctx, req.(*StorageDeleteArgs)) + } + return interceptor(ctx, in, info, handler) +} + +// Storage_ServiceDesc is the grpc.ServiceDesc for Storage service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Storage_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "pb.Storage", + HandlerType: (*StorageServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "List", + Handler: _Storage_List_Handler, + }, + { + MethodName: "Get", + Handler: _Storage_Get_Handler, + }, + { + MethodName: "Put", + Handler: _Storage_Put_Handler, + }, + { + MethodName: "Delete", + Handler: _Storage_Delete_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "sdk/plugin/pb/backend.proto", +} + +// SystemViewClient is the client API for SystemView service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type SystemViewClient interface { + // DefaultLeaseTTL returns the default lease TTL set in Vault configuration + DefaultLeaseTTL(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TTLReply, error) + // MaxLeaseTTL returns the max lease TTL set in Vault configuration; backend + // authors should take care not to issue credentials that last longer than + // this value, as Vault will revoke them + MaxLeaseTTL(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TTLReply, error) + // Tainted, returns true if the mount is tainted. A mount is tainted if it is in the + // process of being unmounted. This should only be used in special + // circumstances; a primary use-case is as a guard in revocation functions. + // If revocation of a backend's leases fails it can keep the unmounting + // process from being successful. If the reason for this failure is not + // relevant when the mount is tainted (for instance, saving a CRL to disk + // when the stored CRL will be removed during the unmounting process + // anyways), we can ignore the errors to allow unmounting to complete. + Tainted(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TaintedReply, error) + // CachingDisabled returns true if caching is disabled. If true, no caches + // should be used, despite known slowdowns. + CachingDisabled(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*CachingDisabledReply, error) + // ReplicationState indicates the state of cluster replication + ReplicationState(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*ReplicationStateReply, error) + // ResponseWrapData wraps the given data in a cubbyhole and returns the + // token used to unwrap. + ResponseWrapData(ctx context.Context, in *ResponseWrapDataArgs, opts ...grpc.CallOption) (*ResponseWrapDataReply, error) + // MlockEnabled returns the configuration setting for enabling mlock on + // plugins. + MlockEnabled(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*MlockEnabledReply, error) + // LocalMount, when run from a system view attached to a request, indicates + // whether the request is affecting a local mount or not + LocalMount(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*LocalMountReply, error) + // EntityInfo returns the basic entity information for the given entity id + EntityInfo(ctx context.Context, in *EntityInfoArgs, opts ...grpc.CallOption) (*EntityInfoReply, error) + // PluginEnv returns Vault environment information used by plugins + PluginEnv(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*PluginEnvReply, error) + // GroupsForEntity returns the group membership information for the given + // entity id + GroupsForEntity(ctx context.Context, in *EntityInfoArgs, opts ...grpc.CallOption) (*GroupsForEntityReply, error) + // GeneratePasswordFromPolicy generates a password from an existing password policy + GeneratePasswordFromPolicy(ctx context.Context, in *GeneratePasswordFromPolicyRequest, opts ...grpc.CallOption) (*GeneratePasswordFromPolicyReply, error) + // ClusterInfo returns the ClusterID information; may be reused if ClusterName is also exposed. + ClusterInfo(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*ClusterInfoReply, error) +} + +type systemViewClient struct { + cc grpc.ClientConnInterface +} + +func NewSystemViewClient(cc grpc.ClientConnInterface) SystemViewClient { + return &systemViewClient{cc} +} + +func (c *systemViewClient) DefaultLeaseTTL(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TTLReply, error) { + out := new(TTLReply) + err := c.cc.Invoke(ctx, "/pb.SystemView/DefaultLeaseTTL", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *systemViewClient) MaxLeaseTTL(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TTLReply, error) { + out := new(TTLReply) + err := c.cc.Invoke(ctx, "/pb.SystemView/MaxLeaseTTL", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *systemViewClient) Tainted(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*TaintedReply, error) { + out := new(TaintedReply) + err := c.cc.Invoke(ctx, "/pb.SystemView/Tainted", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *systemViewClient) CachingDisabled(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*CachingDisabledReply, error) { + out := new(CachingDisabledReply) + err := c.cc.Invoke(ctx, "/pb.SystemView/CachingDisabled", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *systemViewClient) ReplicationState(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*ReplicationStateReply, error) { + out := new(ReplicationStateReply) + err := c.cc.Invoke(ctx, "/pb.SystemView/ReplicationState", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *systemViewClient) ResponseWrapData(ctx context.Context, in *ResponseWrapDataArgs, opts ...grpc.CallOption) (*ResponseWrapDataReply, error) { + out := new(ResponseWrapDataReply) + err := c.cc.Invoke(ctx, "/pb.SystemView/ResponseWrapData", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *systemViewClient) MlockEnabled(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*MlockEnabledReply, error) { + out := new(MlockEnabledReply) + err := c.cc.Invoke(ctx, "/pb.SystemView/MlockEnabled", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *systemViewClient) LocalMount(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*LocalMountReply, error) { + out := new(LocalMountReply) + err := c.cc.Invoke(ctx, "/pb.SystemView/LocalMount", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *systemViewClient) EntityInfo(ctx context.Context, in *EntityInfoArgs, opts ...grpc.CallOption) (*EntityInfoReply, error) { + out := new(EntityInfoReply) + err := c.cc.Invoke(ctx, "/pb.SystemView/EntityInfo", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *systemViewClient) PluginEnv(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*PluginEnvReply, error) { + out := new(PluginEnvReply) + err := c.cc.Invoke(ctx, "/pb.SystemView/PluginEnv", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *systemViewClient) GroupsForEntity(ctx context.Context, in *EntityInfoArgs, opts ...grpc.CallOption) (*GroupsForEntityReply, error) { + out := new(GroupsForEntityReply) + err := c.cc.Invoke(ctx, "/pb.SystemView/GroupsForEntity", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *systemViewClient) GeneratePasswordFromPolicy(ctx context.Context, in *GeneratePasswordFromPolicyRequest, opts ...grpc.CallOption) (*GeneratePasswordFromPolicyReply, error) { + out := new(GeneratePasswordFromPolicyReply) + err := c.cc.Invoke(ctx, "/pb.SystemView/GeneratePasswordFromPolicy", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *systemViewClient) ClusterInfo(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*ClusterInfoReply, error) { + out := new(ClusterInfoReply) + err := c.cc.Invoke(ctx, "/pb.SystemView/ClusterInfo", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// SystemViewServer is the server API for SystemView service. +// All implementations must embed UnimplementedSystemViewServer +// for forward compatibility +type SystemViewServer interface { + // DefaultLeaseTTL returns the default lease TTL set in Vault configuration + DefaultLeaseTTL(context.Context, *Empty) (*TTLReply, error) + // MaxLeaseTTL returns the max lease TTL set in Vault configuration; backend + // authors should take care not to issue credentials that last longer than + // this value, as Vault will revoke them + MaxLeaseTTL(context.Context, *Empty) (*TTLReply, error) + // Tainted, returns true if the mount is tainted. A mount is tainted if it is in the + // process of being unmounted. This should only be used in special + // circumstances; a primary use-case is as a guard in revocation functions. + // If revocation of a backend's leases fails it can keep the unmounting + // process from being successful. If the reason for this failure is not + // relevant when the mount is tainted (for instance, saving a CRL to disk + // when the stored CRL will be removed during the unmounting process + // anyways), we can ignore the errors to allow unmounting to complete. + Tainted(context.Context, *Empty) (*TaintedReply, error) + // CachingDisabled returns true if caching is disabled. If true, no caches + // should be used, despite known slowdowns. + CachingDisabled(context.Context, *Empty) (*CachingDisabledReply, error) + // ReplicationState indicates the state of cluster replication + ReplicationState(context.Context, *Empty) (*ReplicationStateReply, error) + // ResponseWrapData wraps the given data in a cubbyhole and returns the + // token used to unwrap. + ResponseWrapData(context.Context, *ResponseWrapDataArgs) (*ResponseWrapDataReply, error) + // MlockEnabled returns the configuration setting for enabling mlock on + // plugins. + MlockEnabled(context.Context, *Empty) (*MlockEnabledReply, error) + // LocalMount, when run from a system view attached to a request, indicates + // whether the request is affecting a local mount or not + LocalMount(context.Context, *Empty) (*LocalMountReply, error) + // EntityInfo returns the basic entity information for the given entity id + EntityInfo(context.Context, *EntityInfoArgs) (*EntityInfoReply, error) + // PluginEnv returns Vault environment information used by plugins + PluginEnv(context.Context, *Empty) (*PluginEnvReply, error) + // GroupsForEntity returns the group membership information for the given + // entity id + GroupsForEntity(context.Context, *EntityInfoArgs) (*GroupsForEntityReply, error) + // GeneratePasswordFromPolicy generates a password from an existing password policy + GeneratePasswordFromPolicy(context.Context, *GeneratePasswordFromPolicyRequest) (*GeneratePasswordFromPolicyReply, error) + // ClusterInfo returns the ClusterID information; may be reused if ClusterName is also exposed. + ClusterInfo(context.Context, *Empty) (*ClusterInfoReply, error) + mustEmbedUnimplementedSystemViewServer() +} + +// UnimplementedSystemViewServer must be embedded to have forward compatible implementations. +type UnimplementedSystemViewServer struct { +} + +func (UnimplementedSystemViewServer) DefaultLeaseTTL(context.Context, *Empty) (*TTLReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method DefaultLeaseTTL not implemented") +} +func (UnimplementedSystemViewServer) MaxLeaseTTL(context.Context, *Empty) (*TTLReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method MaxLeaseTTL not implemented") +} +func (UnimplementedSystemViewServer) Tainted(context.Context, *Empty) (*TaintedReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method Tainted not implemented") +} +func (UnimplementedSystemViewServer) CachingDisabled(context.Context, *Empty) (*CachingDisabledReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method CachingDisabled not implemented") +} +func (UnimplementedSystemViewServer) ReplicationState(context.Context, *Empty) (*ReplicationStateReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReplicationState not implemented") +} +func (UnimplementedSystemViewServer) ResponseWrapData(context.Context, *ResponseWrapDataArgs) (*ResponseWrapDataReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method ResponseWrapData not implemented") +} +func (UnimplementedSystemViewServer) MlockEnabled(context.Context, *Empty) (*MlockEnabledReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method MlockEnabled not implemented") +} +func (UnimplementedSystemViewServer) LocalMount(context.Context, *Empty) (*LocalMountReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method LocalMount not implemented") +} +func (UnimplementedSystemViewServer) EntityInfo(context.Context, *EntityInfoArgs) (*EntityInfoReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method EntityInfo not implemented") +} +func (UnimplementedSystemViewServer) PluginEnv(context.Context, *Empty) (*PluginEnvReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method PluginEnv not implemented") +} +func (UnimplementedSystemViewServer) GroupsForEntity(context.Context, *EntityInfoArgs) (*GroupsForEntityReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method GroupsForEntity not implemented") +} +func (UnimplementedSystemViewServer) GeneratePasswordFromPolicy(context.Context, *GeneratePasswordFromPolicyRequest) (*GeneratePasswordFromPolicyReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method GeneratePasswordFromPolicy not implemented") +} +func (UnimplementedSystemViewServer) ClusterInfo(context.Context, *Empty) (*ClusterInfoReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method ClusterInfo not implemented") +} +func (UnimplementedSystemViewServer) mustEmbedUnimplementedSystemViewServer() {} + +// UnsafeSystemViewServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to SystemViewServer will +// result in compilation errors. +type UnsafeSystemViewServer interface { + mustEmbedUnimplementedSystemViewServer() +} + +func RegisterSystemViewServer(s grpc.ServiceRegistrar, srv SystemViewServer) { + s.RegisterService(&SystemView_ServiceDesc, srv) +} + +func _SystemView_DefaultLeaseTTL_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SystemViewServer).DefaultLeaseTTL(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.SystemView/DefaultLeaseTTL", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SystemViewServer).DefaultLeaseTTL(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _SystemView_MaxLeaseTTL_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SystemViewServer).MaxLeaseTTL(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.SystemView/MaxLeaseTTL", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SystemViewServer).MaxLeaseTTL(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _SystemView_Tainted_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SystemViewServer).Tainted(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.SystemView/Tainted", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SystemViewServer).Tainted(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _SystemView_CachingDisabled_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SystemViewServer).CachingDisabled(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.SystemView/CachingDisabled", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SystemViewServer).CachingDisabled(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _SystemView_ReplicationState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SystemViewServer).ReplicationState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.SystemView/ReplicationState", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SystemViewServer).ReplicationState(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _SystemView_ResponseWrapData_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ResponseWrapDataArgs) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SystemViewServer).ResponseWrapData(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.SystemView/ResponseWrapData", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SystemViewServer).ResponseWrapData(ctx, req.(*ResponseWrapDataArgs)) + } + return interceptor(ctx, in, info, handler) +} + +func _SystemView_MlockEnabled_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SystemViewServer).MlockEnabled(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.SystemView/MlockEnabled", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SystemViewServer).MlockEnabled(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _SystemView_LocalMount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SystemViewServer).LocalMount(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.SystemView/LocalMount", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SystemViewServer).LocalMount(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _SystemView_EntityInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(EntityInfoArgs) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SystemViewServer).EntityInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.SystemView/EntityInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SystemViewServer).EntityInfo(ctx, req.(*EntityInfoArgs)) + } + return interceptor(ctx, in, info, handler) +} + +func _SystemView_PluginEnv_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SystemViewServer).PluginEnv(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.SystemView/PluginEnv", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SystemViewServer).PluginEnv(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _SystemView_GroupsForEntity_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(EntityInfoArgs) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SystemViewServer).GroupsForEntity(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.SystemView/GroupsForEntity", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SystemViewServer).GroupsForEntity(ctx, req.(*EntityInfoArgs)) + } + return interceptor(ctx, in, info, handler) +} + +func _SystemView_GeneratePasswordFromPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GeneratePasswordFromPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SystemViewServer).GeneratePasswordFromPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.SystemView/GeneratePasswordFromPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SystemViewServer).GeneratePasswordFromPolicy(ctx, req.(*GeneratePasswordFromPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SystemView_ClusterInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SystemViewServer).ClusterInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.SystemView/ClusterInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SystemViewServer).ClusterInfo(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +// SystemView_ServiceDesc is the grpc.ServiceDesc for SystemView service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var SystemView_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "pb.SystemView", + HandlerType: (*SystemViewServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "DefaultLeaseTTL", + Handler: _SystemView_DefaultLeaseTTL_Handler, + }, + { + MethodName: "MaxLeaseTTL", + Handler: _SystemView_MaxLeaseTTL_Handler, + }, + { + MethodName: "Tainted", + Handler: _SystemView_Tainted_Handler, + }, + { + MethodName: "CachingDisabled", + Handler: _SystemView_CachingDisabled_Handler, + }, + { + MethodName: "ReplicationState", + Handler: _SystemView_ReplicationState_Handler, + }, + { + MethodName: "ResponseWrapData", + Handler: _SystemView_ResponseWrapData_Handler, + }, + { + MethodName: "MlockEnabled", + Handler: _SystemView_MlockEnabled_Handler, + }, + { + MethodName: "LocalMount", + Handler: _SystemView_LocalMount_Handler, + }, + { + MethodName: "EntityInfo", + Handler: _SystemView_EntityInfo_Handler, + }, + { + MethodName: "PluginEnv", + Handler: _SystemView_PluginEnv_Handler, + }, + { + MethodName: "GroupsForEntity", + Handler: _SystemView_GroupsForEntity_Handler, + }, + { + MethodName: "GeneratePasswordFromPolicy", + Handler: _SystemView_GeneratePasswordFromPolicy_Handler, + }, + { + MethodName: "ClusterInfo", + Handler: _SystemView_ClusterInfo_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "sdk/plugin/pb/backend.proto", +} + +// EventsClient is the client API for Events service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type EventsClient interface { + SendEvent(ctx context.Context, in *SendEventRequest, opts ...grpc.CallOption) (*Empty, error) +} + +type eventsClient struct { + cc grpc.ClientConnInterface +} + +func NewEventsClient(cc grpc.ClientConnInterface) EventsClient { + return &eventsClient{cc} +} + +func (c *eventsClient) SendEvent(ctx context.Context, in *SendEventRequest, opts ...grpc.CallOption) (*Empty, error) { + out := new(Empty) + err := c.cc.Invoke(ctx, "/pb.Events/SendEvent", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// EventsServer is the server API for Events service. +// All implementations must embed UnimplementedEventsServer +// for forward compatibility +type EventsServer interface { + SendEvent(context.Context, *SendEventRequest) (*Empty, error) + mustEmbedUnimplementedEventsServer() +} + +// UnimplementedEventsServer must be embedded to have forward compatible implementations. +type UnimplementedEventsServer struct { +} + +func (UnimplementedEventsServer) SendEvent(context.Context, *SendEventRequest) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method SendEvent not implemented") +} +func (UnimplementedEventsServer) mustEmbedUnimplementedEventsServer() {} + +// UnsafeEventsServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to EventsServer will +// result in compilation errors. +type UnsafeEventsServer interface { + mustEmbedUnimplementedEventsServer() +} + +func RegisterEventsServer(s grpc.ServiceRegistrar, srv EventsServer) { + s.RegisterService(&Events_ServiceDesc, srv) +} + +func _Events_SendEvent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SendEventRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EventsServer).SendEvent(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pb.Events/SendEvent", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EventsServer).SendEvent(ctx, req.(*SendEventRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Events_ServiceDesc is the grpc.ServiceDesc for Events service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Events_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "pb.Events", + HandlerType: (*EventsServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SendEvent", + Handler: _Events_SendEvent_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "sdk/plugin/pb/backend.proto", +} diff --git a/sdk/plugin/pb/translation.go b/sdk/plugin/pb/translation.go new file mode 100644 index 0000000..92ca9af --- /dev/null +++ b/sdk/plugin/pb/translation.go @@ -0,0 +1,788 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pb + +import ( + "crypto/tls" + "crypto/x509" + "encoding/json" + "errors" + "time" + + "github.com/golang/protobuf/ptypes" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/helper/wrapping" + "github.com/hashicorp/vault/sdk/logical" +) + +const ( + ErrTypeUnknown uint32 = iota + ErrTypeUserError + ErrTypeInternalError + ErrTypeCodedError + ErrTypeStatusBadRequest + ErrTypeUnsupportedOperation + ErrTypeUnsupportedPath + ErrTypeInvalidRequest + ErrTypePermissionDenied + ErrTypeMultiAuthzPending + ErrTypeUnrecoverable +) + +func ProtoErrToErr(e *ProtoError) error { + if e == nil { + return nil + } + + var err error + switch e.ErrType { + case ErrTypeUnknown: + err = errors.New(e.ErrMsg) + case ErrTypeUserError: + err = errutil.UserError{Err: e.ErrMsg} + case ErrTypeInternalError: + err = errutil.InternalError{Err: e.ErrMsg} + case ErrTypeCodedError: + err = logical.CodedError(int(e.ErrCode), e.ErrMsg) + case ErrTypeStatusBadRequest: + err = &logical.StatusBadRequest{Err: e.ErrMsg} + case ErrTypeUnsupportedOperation: + err = logical.ErrUnsupportedOperation + case ErrTypeUnsupportedPath: + err = logical.ErrUnsupportedPath + case ErrTypeInvalidRequest: + err = logical.ErrInvalidRequest + case ErrTypePermissionDenied: + err = logical.ErrPermissionDenied + case ErrTypeMultiAuthzPending: + err = logical.ErrMultiAuthzPending + case ErrTypeUnrecoverable: + err = logical.ErrUnrecoverable + } + + return err +} + +func ErrToProtoErr(e error) *ProtoError { + if e == nil { + return nil + } + pbErr := &ProtoError{ + ErrMsg: e.Error(), + ErrType: ErrTypeUnknown, + } + + switch e.(type) { + case errutil.UserError: + pbErr.ErrType = ErrTypeUserError + case errutil.InternalError: + pbErr.ErrType = ErrTypeInternalError + case logical.HTTPCodedError: + pbErr.ErrType = ErrTypeCodedError + pbErr.ErrCode = int64(e.(logical.HTTPCodedError).Code()) + case *logical.StatusBadRequest: + pbErr.ErrType = ErrTypeStatusBadRequest + } + + switch { + case e == logical.ErrUnsupportedOperation: + pbErr.ErrType = ErrTypeUnsupportedOperation + case e == logical.ErrUnsupportedPath: + pbErr.ErrType = ErrTypeUnsupportedPath + case e == logical.ErrInvalidRequest: + pbErr.ErrType = ErrTypeInvalidRequest + case e == logical.ErrPermissionDenied: + pbErr.ErrType = ErrTypePermissionDenied + case e == logical.ErrMultiAuthzPending: + pbErr.ErrType = ErrTypeMultiAuthzPending + case e == logical.ErrUnrecoverable: + pbErr.ErrType = ErrTypeUnrecoverable + } + + return pbErr +} + +func ErrToString(e error) string { + if e == nil { + return "" + } + + return e.Error() +} + +func LogicalStorageEntryToProtoStorageEntry(e *logical.StorageEntry) *StorageEntry { + if e == nil { + return nil + } + + return &StorageEntry{ + Key: e.Key, + Value: e.Value, + SealWrap: e.SealWrap, + } +} + +func ProtoStorageEntryToLogicalStorageEntry(e *StorageEntry) *logical.StorageEntry { + if e == nil { + return nil + } + + return &logical.StorageEntry{ + Key: e.Key, + Value: e.Value, + SealWrap: e.SealWrap, + } +} + +func ProtoLeaseOptionsToLogicalLeaseOptions(l *LeaseOptions) (logical.LeaseOptions, error) { + if l == nil { + return logical.LeaseOptions{}, nil + } + + t, err := ptypes.Timestamp(l.IssueTime) + return logical.LeaseOptions{ + TTL: time.Duration(l.TTL), + Renewable: l.Renewable, + Increment: time.Duration(l.Increment), + IssueTime: t, + MaxTTL: time.Duration(l.MaxTTL), + }, err +} + +func LogicalLeaseOptionsToProtoLeaseOptions(l logical.LeaseOptions) (*LeaseOptions, error) { + t, err := ptypes.TimestampProto(l.IssueTime) + if err != nil { + return nil, err + } + + return &LeaseOptions{ + TTL: int64(l.TTL), + Renewable: l.Renewable, + Increment: int64(l.Increment), + IssueTime: t, + MaxTTL: int64(l.MaxTTL), + }, err +} + +func ProtoSecretToLogicalSecret(s *Secret) (*logical.Secret, error) { + if s == nil { + return nil, nil + } + + data := map[string]interface{}{} + err := json.Unmarshal([]byte(s.InternalData), &data) + if err != nil { + return nil, err + } + + lease, err := ProtoLeaseOptionsToLogicalLeaseOptions(s.LeaseOptions) + if err != nil { + return nil, err + } + + return &logical.Secret{ + LeaseOptions: lease, + InternalData: data, + LeaseID: s.LeaseID, + }, nil +} + +func LogicalSecretToProtoSecret(s *logical.Secret) (*Secret, error) { + if s == nil { + return nil, nil + } + + buf, err := json.Marshal(s.InternalData) + if err != nil { + return nil, err + } + + lease, err := LogicalLeaseOptionsToProtoLeaseOptions(s.LeaseOptions) + if err != nil { + return nil, err + } + + return &Secret{ + LeaseOptions: lease, + InternalData: string(buf[:]), + LeaseID: s.LeaseID, + }, err +} + +func LogicalRequestToProtoRequest(r *logical.Request) (*Request, error) { + if r == nil { + return nil, nil + } + + buf, err := json.Marshal(r.Data) + if err != nil { + return nil, err + } + + secret, err := LogicalSecretToProtoSecret(r.Secret) + if err != nil { + return nil, err + } + + auth, err := LogicalAuthToProtoAuth(r.Auth) + if err != nil { + return nil, err + } + + headers := map[string]*Header{} + for k, v := range r.Headers { + headers[k] = &Header{Header: v} + } + + return &Request{ + ID: r.ID, + ReplicationCluster: r.ReplicationCluster, + Operation: string(r.Operation), + Path: r.Path, + Data: string(buf[:]), + Secret: secret, + Auth: auth, + Headers: headers, + ClientToken: r.ClientToken, + ClientTokenAccessor: r.ClientTokenAccessor, + DisplayName: r.DisplayName, + MountPoint: r.MountPoint, + MountType: r.MountType, + MountAccessor: r.MountAccessor, + WrapInfo: LogicalRequestWrapInfoToProtoRequestWrapInfo(r.WrapInfo), + ClientTokenRemainingUses: int64(r.ClientTokenRemainingUses), + Connection: LogicalConnectionToProtoConnection(r.Connection), + EntityID: r.EntityID, + PolicyOverride: r.PolicyOverride, + Unauthenticated: r.Unauthenticated, + }, nil +} + +func ProtoRequestToLogicalRequest(r *Request) (*logical.Request, error) { + if r == nil { + return nil, nil + } + + data := map[string]interface{}{} + err := json.Unmarshal([]byte(r.Data), &data) + if err != nil { + return nil, err + } + + secret, err := ProtoSecretToLogicalSecret(r.Secret) + if err != nil { + return nil, err + } + + auth, err := ProtoAuthToLogicalAuth(r.Auth) + if err != nil { + return nil, err + } + + var headers map[string][]string + if len(r.Headers) > 0 { + headers = make(map[string][]string, len(r.Headers)) + for k, v := range r.Headers { + headers[k] = v.Header + } + } + + connection, err := ProtoConnectionToLogicalConnection(r.Connection) + if err != nil { + return nil, err + } + + return &logical.Request{ + ID: r.ID, + ReplicationCluster: r.ReplicationCluster, + Operation: logical.Operation(r.Operation), + Path: r.Path, + Data: data, + Secret: secret, + Auth: auth, + Headers: headers, + ClientToken: r.ClientToken, + ClientTokenAccessor: r.ClientTokenAccessor, + DisplayName: r.DisplayName, + MountPoint: r.MountPoint, + MountType: r.MountType, + MountAccessor: r.MountAccessor, + WrapInfo: ProtoRequestWrapInfoToLogicalRequestWrapInfo(r.WrapInfo), + ClientTokenRemainingUses: int(r.ClientTokenRemainingUses), + Connection: connection, + EntityID: r.EntityID, + PolicyOverride: r.PolicyOverride, + Unauthenticated: r.Unauthenticated, + }, nil +} + +func LogicalConnectionToProtoConnection(c *logical.Connection) *Connection { + if c == nil { + return nil + } + + return &Connection{ + RemoteAddr: c.RemoteAddr, + ConnectionState: TLSConnectionStateToProtoConnectionState(c.ConnState), + } +} + +func ProtoConnectionToLogicalConnection(c *Connection) (*logical.Connection, error) { + if c == nil { + return nil, nil + } + + cs, err := ProtoConnectionStateToTLSConnectionState(c.ConnectionState) + if err != nil { + return nil, err + } + + return &logical.Connection{ + RemoteAddr: c.RemoteAddr, + ConnState: cs, + }, nil +} + +func LogicalRequestWrapInfoToProtoRequestWrapInfo(i *logical.RequestWrapInfo) *RequestWrapInfo { + if i == nil { + return nil + } + + return &RequestWrapInfo{ + TTL: int64(i.TTL), + Format: i.Format, + SealWrap: i.SealWrap, + } +} + +func ProtoRequestWrapInfoToLogicalRequestWrapInfo(i *RequestWrapInfo) *logical.RequestWrapInfo { + if i == nil { + return nil + } + + return &logical.RequestWrapInfo{ + TTL: time.Duration(i.TTL), + Format: i.Format, + SealWrap: i.SealWrap, + } +} + +func ProtoResponseToLogicalResponse(r *Response) (*logical.Response, error) { + if r == nil { + return nil, nil + } + + secret, err := ProtoSecretToLogicalSecret(r.Secret) + if err != nil { + return nil, err + } + + auth, err := ProtoAuthToLogicalAuth(r.Auth) + if err != nil { + return nil, err + } + + data := map[string]interface{}{} + err = json.Unmarshal([]byte(r.Data), &data) + if err != nil { + return nil, err + } + + wrapInfo, err := ProtoResponseWrapInfoToLogicalResponseWrapInfo(r.WrapInfo) + if err != nil { + return nil, err + } + + var headers map[string][]string + if len(r.Headers) > 0 { + headers = make(map[string][]string, len(r.Headers)) + for k, v := range r.Headers { + headers[k] = v.Header + } + } + + return &logical.Response{ + Secret: secret, + Auth: auth, + Data: data, + Redirect: r.Redirect, + Warnings: r.Warnings, + WrapInfo: wrapInfo, + Headers: headers, + }, nil +} + +func ProtoResponseWrapInfoToLogicalResponseWrapInfo(i *ResponseWrapInfo) (*wrapping.ResponseWrapInfo, error) { + if i == nil { + return nil, nil + } + + t, err := ptypes.Timestamp(i.CreationTime) + if err != nil { + return nil, err + } + + return &wrapping.ResponseWrapInfo{ + TTL: time.Duration(i.TTL), + Token: i.Token, + Accessor: i.Accessor, + CreationTime: t, + WrappedAccessor: i.WrappedAccessor, + WrappedEntityID: i.WrappedEntityID, + Format: i.Format, + CreationPath: i.CreationPath, + SealWrap: i.SealWrap, + }, nil +} + +func LogicalResponseWrapInfoToProtoResponseWrapInfo(i *wrapping.ResponseWrapInfo) (*ResponseWrapInfo, error) { + if i == nil { + return nil, nil + } + + t, err := ptypes.TimestampProto(i.CreationTime) + if err != nil { + return nil, err + } + + return &ResponseWrapInfo{ + TTL: int64(i.TTL), + Token: i.Token, + Accessor: i.Accessor, + CreationTime: t, + WrappedAccessor: i.WrappedAccessor, + WrappedEntityID: i.WrappedEntityID, + Format: i.Format, + CreationPath: i.CreationPath, + SealWrap: i.SealWrap, + }, nil +} + +func LogicalResponseToProtoResponse(r *logical.Response) (*Response, error) { + if r == nil { + return nil, nil + } + + secret, err := LogicalSecretToProtoSecret(r.Secret) + if err != nil { + return nil, err + } + + auth, err := LogicalAuthToProtoAuth(r.Auth) + if err != nil { + return nil, err + } + + buf, err := json.Marshal(r.Data) + if err != nil { + return nil, err + } + + wrapInfo, err := LogicalResponseWrapInfoToProtoResponseWrapInfo(r.WrapInfo) + if err != nil { + return nil, err + } + + headers := map[string]*Header{} + for k, v := range r.Headers { + headers[k] = &Header{Header: v} + } + + return &Response{ + Secret: secret, + Auth: auth, + Data: string(buf[:]), + Redirect: r.Redirect, + Warnings: r.Warnings, + WrapInfo: wrapInfo, + Headers: headers, + }, nil +} + +func LogicalAuthToProtoAuth(a *logical.Auth) (*Auth, error) { + if a == nil { + return nil, nil + } + + buf, err := json.Marshal(a.InternalData) + if err != nil { + return nil, err + } + + lo, err := LogicalLeaseOptionsToProtoLeaseOptions(a.LeaseOptions) + if err != nil { + return nil, err + } + + boundCIDRs := make([]string, len(a.BoundCIDRs)) + for i, cidr := range a.BoundCIDRs { + boundCIDRs[i] = cidr.String() + } + + return &Auth{ + LeaseOptions: lo, + TokenType: uint32(a.TokenType), + InternalData: string(buf[:]), + DisplayName: a.DisplayName, + Policies: a.Policies, + TokenPolicies: a.TokenPolicies, + IdentityPolicies: a.IdentityPolicies, + NoDefaultPolicy: a.NoDefaultPolicy, + Metadata: a.Metadata, + ClientToken: a.ClientToken, + Accessor: a.Accessor, + Period: int64(a.Period), + NumUses: int64(a.NumUses), + EntityID: a.EntityID, + Alias: a.Alias, + GroupAliases: a.GroupAliases, + BoundCIDRs: boundCIDRs, + ExplicitMaxTTL: int64(a.ExplicitMaxTTL), + }, nil +} + +func ProtoAuthToLogicalAuth(a *Auth) (*logical.Auth, error) { + if a == nil { + return nil, nil + } + + data := map[string]interface{}{} + err := json.Unmarshal([]byte(a.InternalData), &data) + if err != nil { + return nil, err + } + + lo, err := ProtoLeaseOptionsToLogicalLeaseOptions(a.LeaseOptions) + if err != nil { + return nil, err + } + + boundCIDRs, err := parseutil.ParseAddrs(a.BoundCIDRs) + if err != nil { + return nil, err + } + if len(boundCIDRs) == 0 { + // On inbound auths, if auth.BoundCIDRs is empty, it will be nil. + // Let's match that behavior outbound. + boundCIDRs = nil + } + + return &logical.Auth{ + LeaseOptions: lo, + TokenType: logical.TokenType(a.TokenType), + InternalData: data, + DisplayName: a.DisplayName, + Policies: a.Policies, + TokenPolicies: a.TokenPolicies, + IdentityPolicies: a.IdentityPolicies, + NoDefaultPolicy: a.NoDefaultPolicy, + Metadata: a.Metadata, + ClientToken: a.ClientToken, + Accessor: a.Accessor, + Period: time.Duration(a.Period), + NumUses: int(a.NumUses), + EntityID: a.EntityID, + Alias: a.Alias, + GroupAliases: a.GroupAliases, + BoundCIDRs: boundCIDRs, + ExplicitMaxTTL: time.Duration(a.ExplicitMaxTTL), + }, nil +} + +func LogicalTokenEntryToProtoTokenEntry(t *logical.TokenEntry) *TokenEntry { + if t == nil { + return nil + } + + boundCIDRs := make([]string, len(t.BoundCIDRs)) + for i, cidr := range t.BoundCIDRs { + boundCIDRs[i] = cidr.String() + } + + return &TokenEntry{ + ID: t.ID, + Accessor: t.Accessor, + Parent: t.Parent, + Policies: t.Policies, + InlinePolicy: t.InlinePolicy, + Path: t.Path, + Meta: t.Meta, + InternalMeta: t.InternalMeta, + DisplayName: t.DisplayName, + NumUses: int64(t.NumUses), + CreationTime: t.CreationTime, + TTL: int64(t.TTL), + ExplicitMaxTTL: int64(t.ExplicitMaxTTL), + Role: t.Role, + Period: int64(t.Period), + EntityID: t.EntityID, + NoIdentityPolicies: t.NoIdentityPolicies, + BoundCIDRs: boundCIDRs, + NamespaceID: t.NamespaceID, + CubbyholeID: t.CubbyholeID, + Type: uint32(t.Type), + ExternalID: t.ExternalID, + } +} + +func ProtoTokenEntryToLogicalTokenEntry(t *TokenEntry) (*logical.TokenEntry, error) { + if t == nil { + return nil, nil + } + + boundCIDRs, err := parseutil.ParseAddrs(t.BoundCIDRs) + if err != nil { + return nil, err + } + if len(boundCIDRs) == 0 { + // On inbound auths, if auth.BoundCIDRs is empty, it will be nil. + // Let's match that behavior outbound. + boundCIDRs = nil + } + + return &logical.TokenEntry{ + ID: t.ID, + Accessor: t.Accessor, + Parent: t.Parent, + Policies: t.Policies, + InlinePolicy: t.InlinePolicy, + Path: t.Path, + Meta: t.Meta, + InternalMeta: t.InternalMeta, + DisplayName: t.DisplayName, + NumUses: int(t.NumUses), + CreationTime: t.CreationTime, + TTL: time.Duration(t.TTL), + ExplicitMaxTTL: time.Duration(t.ExplicitMaxTTL), + Role: t.Role, + Period: time.Duration(t.Period), + EntityID: t.EntityID, + NoIdentityPolicies: t.NoIdentityPolicies, + BoundCIDRs: boundCIDRs, + NamespaceID: t.NamespaceID, + CubbyholeID: t.CubbyholeID, + Type: logical.TokenType(t.Type), + ExternalID: t.ExternalID, + }, nil +} + +func TLSConnectionStateToProtoConnectionState(connState *tls.ConnectionState) *ConnectionState { + if connState == nil { + return nil + } + + var verifiedChains []*CertificateChain + + if lvc := len(connState.VerifiedChains); lvc > 0 { + verifiedChains = make([]*CertificateChain, lvc) + for i, vc := range connState.VerifiedChains { + verifiedChains[i] = CertificateChainToProtoCertificateChain(vc) + } + } + + return &ConnectionState{ + Version: uint32(connState.Version), + HandshakeComplete: connState.HandshakeComplete, + DidResume: connState.DidResume, + CipherSuite: uint32(connState.CipherSuite), + NegotiatedProtocol: connState.NegotiatedProtocol, + NegotiatedProtocolIsMutual: connState.NegotiatedProtocolIsMutual, + ServerName: connState.ServerName, + PeerCertificates: CertificateChainToProtoCertificateChain(connState.PeerCertificates), + VerifiedChains: verifiedChains, + SignedCertificateTimestamps: connState.SignedCertificateTimestamps, + OcspResponse: connState.OCSPResponse, + TlsUnique: connState.TLSUnique, + } +} + +func ProtoConnectionStateToTLSConnectionState(cs *ConnectionState) (*tls.ConnectionState, error) { + if cs == nil { + return nil, nil + } + + var ( + err error + peerCertificates []*x509.Certificate + verifiedChains [][]*x509.Certificate + ) + + if peerCertificates, err = ProtoCertificateChainToCertificateChain(cs.PeerCertificates); err != nil { + return nil, err + } + + if lvc := len(cs.VerifiedChains); lvc > 0 { + verifiedChains = make([][]*x509.Certificate, lvc) + for i, vc := range cs.VerifiedChains { + if verifiedChains[i], err = ProtoCertificateChainToCertificateChain(vc); err != nil { + return nil, err + } + } + } + + connState := &tls.ConnectionState{ + Version: uint16(cs.Version), + HandshakeComplete: cs.HandshakeComplete, + DidResume: cs.DidResume, + CipherSuite: uint16(cs.CipherSuite), + NegotiatedProtocol: cs.NegotiatedProtocol, + NegotiatedProtocolIsMutual: cs.NegotiatedProtocolIsMutual, + ServerName: cs.ServerName, + PeerCertificates: peerCertificates, + VerifiedChains: verifiedChains, + SignedCertificateTimestamps: cs.SignedCertificateTimestamps, + OCSPResponse: cs.OcspResponse, + TLSUnique: cs.TlsUnique, + } + + return connState, nil +} + +func CertificateChainToProtoCertificateChain(chain []*x509.Certificate) *CertificateChain { + if len(chain) == 0 { + return nil + } + + cc := &CertificateChain{Certificates: make([]*Certificate, len(chain))} + + for i, c := range chain { + cc.Certificates[i] = X509CertificateToProtoCertificate(c) + } + + return cc +} + +func ProtoCertificateChainToCertificateChain(cc *CertificateChain) ([]*x509.Certificate, error) { + if cc == nil || len(cc.Certificates) == 0 { + return nil, nil + } + + certs := make([]*x509.Certificate, len(cc.Certificates)) + + for i, c := range cc.Certificates { + var err error + if certs[i], err = ProtoCertificateToX509Certificate(c); err != nil { + return nil, err + } + } + + return certs, nil +} + +func X509CertificateToProtoCertificate(cert *x509.Certificate) *Certificate { + if cert == nil { + return nil + } + + return &Certificate{Asn1Data: cert.Raw} +} + +func ProtoCertificateToX509Certificate(c *Certificate) (*x509.Certificate, error) { + if c == nil { + return nil, nil + } + + return x509.ParseCertificate(c.Asn1Data) +} diff --git a/sdk/plugin/pb/translation_test.go b/sdk/plugin/pb/translation_test.go new file mode 100644 index 0000000..3097925 --- /dev/null +++ b/sdk/plugin/pb/translation_test.go @@ -0,0 +1,315 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package pb + +import ( + "crypto/tls" + "crypto/x509" + "encoding/pem" + "errors" + "reflect" + "testing" + "time" + + "github.com/hashicorp/vault/sdk/helper/errutil" + "github.com/hashicorp/vault/sdk/helper/wrapping" + "github.com/hashicorp/vault/sdk/logical" +) + +func TestTranslation_Errors(t *testing.T) { + errs := []error{ + nil, + errors.New("test"), + errutil.UserError{Err: "test"}, + errutil.InternalError{Err: "test"}, + logical.CodedError(403, "test"), + &logical.StatusBadRequest{Err: "test"}, + logical.ErrUnsupportedOperation, + logical.ErrUnsupportedPath, + logical.ErrInvalidRequest, + logical.ErrPermissionDenied, + logical.ErrMultiAuthzPending, + } + + for _, err := range errs { + pe := ErrToProtoErr(err) + e := ProtoErrToErr(pe) + + if !reflect.DeepEqual(e, err) { + t.Fatalf("Errs did not match: %#v, %#v", e, err) + } + } +} + +func TestTranslation_StorageEntry(t *testing.T) { + tCases := []*logical.StorageEntry{ + nil, + {Key: "key", Value: []byte("value")}, + {Key: "key1", Value: []byte("value1"), SealWrap: true}, + {Key: "key1", SealWrap: true}, + } + + for _, c := range tCases { + p := LogicalStorageEntryToProtoStorageEntry(c) + e := ProtoStorageEntryToLogicalStorageEntry(p) + + if !reflect.DeepEqual(c, e) { + t.Fatalf("Entries did not match: %#v, %#v", e, c) + } + } +} + +func TestTranslation_Request(t *testing.T) { + certs, err := peerCertificates() + if err != nil { + t.Logf("No test certificates were generated: %v", err) + } + + tCases := []*logical.Request{ + nil, + { + ID: "ID", + ReplicationCluster: "RID", + Operation: logical.CreateOperation, + Path: "test/foo", + ClientToken: "token", + ClientTokenAccessor: "accessor", + DisplayName: "display", + MountPoint: "test", + MountType: "secret", + MountAccessor: "test-231234", + ClientTokenRemainingUses: 1, + EntityID: "tester", + PolicyOverride: true, + Unauthenticated: true, + Connection: &logical.Connection{ + RemoteAddr: "localhost", + ConnState: &tls.ConnectionState{ + Version: tls.VersionTLS12, + HandshakeComplete: true, + PeerCertificates: certs, + }, + }, + }, + { + ID: "ID", + ReplicationCluster: "RID", + Operation: logical.CreateOperation, + Path: "test/foo", + Data: map[string]interface{}{ + "string": "string", + "bool": true, + "array": []interface{}{"1", "2"}, + "map": map[string]interface{}{ + "key": "value", + }, + }, + Secret: &logical.Secret{ + LeaseOptions: logical.LeaseOptions{ + TTL: time.Second, + MaxTTL: time.Second, + Renewable: true, + Increment: time.Second, + IssueTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + }, + InternalData: map[string]interface{}{ + "role": "test", + }, + LeaseID: "LeaseID", + }, + Auth: &logical.Auth{ + LeaseOptions: logical.LeaseOptions{ + TTL: time.Second, + MaxTTL: time.Second, + Renewable: true, + Increment: time.Second, + IssueTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + }, + InternalData: map[string]interface{}{ + "role": "test", + }, + DisplayName: "test", + Policies: []string{"test", "Test"}, + Metadata: map[string]string{ + "test": "test", + }, + ClientToken: "token", + Accessor: "accessor", + Period: 5 * time.Second, + NumUses: 1, + EntityID: "id", + Alias: &logical.Alias{ + MountType: "type", + MountAccessor: "accessor", + Name: "name", + }, + GroupAliases: []*logical.Alias{ + { + MountType: "type", + MountAccessor: "accessor", + Name: "name", + }, + }, + }, + Headers: map[string][]string{ + "X-Vault-Test": {"test"}, + }, + ClientToken: "token", + ClientTokenAccessor: "accessor", + DisplayName: "display", + MountPoint: "test", + MountType: "secret", + MountAccessor: "test-231234", + WrapInfo: &logical.RequestWrapInfo{ + TTL: time.Second, + Format: "token", + SealWrap: true, + }, + ClientTokenRemainingUses: 1, + EntityID: "tester", + PolicyOverride: true, + Unauthenticated: true, + }, + } + + for _, c := range tCases { + p, err := LogicalRequestToProtoRequest(c) + if err != nil { + t.Fatal(err) + } + r, err := ProtoRequestToLogicalRequest(p) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(c, r) { + t.Fatalf("Requests did not match: \n%#v, \n%#v", c, r) + } + } +} + +func TestTranslation_Response(t *testing.T) { + tCases := []*logical.Response{ + nil, + { + Data: map[string]interface{}{ + "data": "blah", + }, + Warnings: []string{"warning"}, + }, + { + Data: map[string]interface{}{ + "string": "string", + "bool": true, + "array": []interface{}{"1", "2"}, + "map": map[string]interface{}{ + "key": "value", + }, + }, + Secret: &logical.Secret{ + LeaseOptions: logical.LeaseOptions{ + TTL: time.Second, + MaxTTL: time.Second, + Renewable: true, + Increment: time.Second, + IssueTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + }, + InternalData: map[string]interface{}{ + "role": "test", + }, + LeaseID: "LeaseID", + }, + Auth: &logical.Auth{ + LeaseOptions: logical.LeaseOptions{ + TTL: time.Second, + MaxTTL: time.Second, + Renewable: true, + Increment: time.Second, + IssueTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + }, + InternalData: map[string]interface{}{ + "role": "test", + }, + DisplayName: "test", + Policies: []string{"test", "Test"}, + Metadata: map[string]string{ + "test": "test", + }, + ClientToken: "token", + Accessor: "accessor", + Period: 5 * time.Second, + NumUses: 1, + EntityID: "id", + Alias: &logical.Alias{ + MountType: "type", + MountAccessor: "accessor", + Name: "name", + }, + GroupAliases: []*logical.Alias{ + { + MountType: "type", + MountAccessor: "accessor", + Name: "name", + }, + }, + }, + WrapInfo: &wrapping.ResponseWrapInfo{ + TTL: time.Second, + Token: "token", + Accessor: "accessor", + CreationTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + WrappedAccessor: "wrapped-accessor", + WrappedEntityID: "id", + Format: "token", + CreationPath: "test/foo", + SealWrap: true, + }, + }, + } + + for _, c := range tCases { + p, err := LogicalResponseToProtoResponse(c) + if err != nil { + t.Fatal(err) + } + r, err := ProtoResponseToLogicalResponse(p) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(c, r) { + t.Fatalf("Requests did not match: \n%#v, \n%#v", c, r) + } + } +} + +// This is the contents of $GOROOT/src/crypto/tls/testdata/example-cert.pem +// If it's good enough for testing the crypto/tls package it's good enough +// for Vault. +const exampleCert = ` +-----BEGIN CERTIFICATE----- +MIIBhTCCASugAwIBAgIQIRi6zePL6mKjOipn+dNuaTAKBggqhkjOPQQDAjASMRAw +DgYDVQQKEwdBY21lIENvMB4XDTE3MTAyMDE5NDMwNloXDTE4MTAyMDE5NDMwNlow +EjEQMA4GA1UEChMHQWNtZSBDbzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABD0d +7VNhbWvZLWPuj/RtHFjvtJBEwOkhbN/BnnE8rnZR8+sbwnc/KhCk3FhnpHZnQz7B +5aETbbIgmuvewdjvSBSjYzBhMA4GA1UdDwEB/wQEAwICpDATBgNVHSUEDDAKBggr +BgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MCkGA1UdEQQiMCCCDmxvY2FsaG9zdDo1 +NDUzgg4xMjcuMC4wLjE6NTQ1MzAKBggqhkjOPQQDAgNIADBFAiEA2zpJEPQyz6/l +Wf86aX6PepsntZv2GYlA5UpabfT2EZICICpJ5h/iI+i341gBmLiAFQOyTDT+/wQc +6MF9+Yw1Yy0t +-----END CERTIFICATE-----` + +func peerCertificates() ([]*x509.Certificate, error) { + blk, _ := pem.Decode([]byte(exampleCert)) + if blk == nil { + return nil, errors.New("cannot decode example certificate") + } + + cert, err := x509.ParseCertificate(blk.Bytes) + if err != nil { + return nil, err + } + + return []*x509.Certificate{cert}, nil +} diff --git a/sdk/plugin/plugin.go b/sdk/plugin/plugin.go new file mode 100644 index 0000000..ec58417 --- /dev/null +++ b/sdk/plugin/plugin.go @@ -0,0 +1,157 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "context" + "errors" + "fmt" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/pluginutil" + "github.com/hashicorp/vault/sdk/logical" +) + +// BackendPluginClient is a wrapper around backendPluginClient +// that also contains its plugin.Client instance. It's primarily +// used to cleanly kill the client on Cleanup() +type BackendPluginClient struct { + client *plugin.Client + + logical.Backend +} + +// Cleanup calls the RPC client's Cleanup() func and also calls +// the go-plugin's client Kill() func +func (b *BackendPluginClient) Cleanup(ctx context.Context) { + b.Backend.Cleanup(ctx) + b.client.Kill() +} + +// NewBackendWithVersion will return an instance of an RPC-based client implementation of the backend for +// external plugins, or a concrete implementation of the backend if it is a builtin backend. +// The backend is returned as a logical.Backend interface. The isMetadataMode param determines whether +// the plugin should run in metadata mode. +func NewBackendWithVersion(ctx context.Context, pluginName string, pluginType consts.PluginType, sys pluginutil.LookRunnerUtil, conf *logical.BackendConfig, isMetadataMode bool, version string) (logical.Backend, error) { + // Look for plugin in the plugin catalog + pluginRunner, err := sys.LookupPluginVersion(ctx, pluginName, pluginType, version) + if err != nil { + return nil, err + } + + var backend logical.Backend + if pluginRunner.Builtin { + // Plugin is builtin so we can retrieve an instance of the interface + // from the pluginRunner. Then cast it to logical.Factory. + rawFactory, err := pluginRunner.BuiltinFactory() + if err != nil { + return nil, fmt.Errorf("error getting plugin type: %q", err) + } + + if factory, ok := rawFactory.(logical.Factory); !ok { + return nil, fmt.Errorf("unsupported backend type: %q", pluginName) + } else { + if backend, err = factory(ctx, conf); err != nil { + return nil, err + } + } + } else { + // create a backendPluginClient instance + backend, err = NewPluginClient(ctx, sys, pluginRunner, conf.Logger, isMetadataMode) + if err != nil { + return nil, err + } + } + + return backend, nil +} + +// NewBackend will return an instance of an RPC-based client implementation of the backend for +// external plugins, or a concrete implementation of the backend if it is a builtin backend. +// The backend is returned as a logical.Backend interface. The isMetadataMode param determines whether +// the plugin should run in metadata mode. +func NewBackend(ctx context.Context, pluginName string, pluginType consts.PluginType, sys pluginutil.LookRunnerUtil, conf *logical.BackendConfig, isMetadataMode bool) (logical.Backend, error) { + return NewBackendWithVersion(ctx, pluginName, pluginType, sys, conf, isMetadataMode, "") +} + +func NewPluginClient(ctx context.Context, sys pluginutil.RunnerUtil, pluginRunner *pluginutil.PluginRunner, logger log.Logger, isMetadataMode bool) (logical.Backend, error) { + // pluginMap is the map of plugins we can dispense. + pluginSet := map[int]plugin.PluginSet{ + // Version 3 used to supports both protocols. We want to keep it around + // since it's possible old plugins built against this version will still + // work with gRPC. There is currently no difference between version 3 + // and version 4. + 3: { + "backend": &GRPCBackendPlugin{ + MetadataMode: isMetadataMode, + }, + }, + 4: { + "backend": &GRPCBackendPlugin{ + MetadataMode: isMetadataMode, + }, + }, + } + + namedLogger := logger.Named(pluginRunner.Name) + + var client *plugin.Client + var err error + if isMetadataMode { + client, err = pluginRunner.RunMetadataMode(ctx, sys, pluginSet, HandshakeConfig, []string{}, namedLogger) + } else { + client, err = pluginRunner.Run(ctx, sys, pluginSet, HandshakeConfig, []string{}, namedLogger) + } + if err != nil { + return nil, err + } + + // Connect via RPC + rpcClient, err := client.Client() + if err != nil { + return nil, err + } + + // Request the plugin + raw, err := rpcClient.Dispense("backend") + if err != nil { + return nil, err + } + + var backend logical.Backend + var transport string + // We should have a logical backend type now. This feels like a normal interface + // implementation but is in fact over an RPC connection. + switch b := raw.(type) { + case *backendGRPCPluginClient: + backend = b + transport = "gRPC" + default: + return nil, errors.New("unsupported plugin client type") + } + + // Wrap the backend in a tracing middleware + if namedLogger.IsTrace() { + backend = &BackendTracingMiddleware{ + logger: namedLogger.With("transport", transport), + next: backend, + } + } + + return &BackendPluginClient{ + client: client, + Backend: backend, + }, nil +} + +func (b *BackendPluginClient) PluginVersion() logical.PluginVersion { + if versioner, ok := b.Backend.(logical.PluginVersioner); ok { + return versioner.PluginVersion() + } + return logical.EmptyPluginVersion +} + +var _ logical.PluginVersioner = (*BackendPluginClient)(nil) diff --git a/sdk/plugin/plugin_v5.go b/sdk/plugin/plugin_v5.go new file mode 100644 index 0000000..cc2d138 --- /dev/null +++ b/sdk/plugin/plugin_v5.go @@ -0,0 +1,184 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "context" + "errors" + "fmt" + + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/pluginutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/plugin/pb" +) + +// BackendPluginClientV5 is a wrapper around backendPluginClient +// that also contains its plugin.Client instance. It's primarily +// used to cleanly kill the client on Cleanup() +type BackendPluginClientV5 struct { + client pluginutil.PluginClient + + logical.Backend +} + +type ContextKey string + +func (c ContextKey) String() string { + return "plugin" + string(c) +} + +const ContextKeyPluginReload = ContextKey("plugin-reload") + +// Cleanup cleans up the go-plugin client and the plugin catalog +func (b *BackendPluginClientV5) Cleanup(ctx context.Context) { + _, ok := ctx.Value(ContextKeyPluginReload).(string) + if !ok { + b.Backend.Cleanup(ctx) + b.client.Close() + return + } + b.Backend.Cleanup(ctx) + b.client.Reload() +} + +func (b *BackendPluginClientV5) IsExternal() bool { + return true +} + +func (b *BackendPluginClientV5) PluginVersion() logical.PluginVersion { + if versioner, ok := b.Backend.(logical.PluginVersioner); ok { + return versioner.PluginVersion() + } + return logical.EmptyPluginVersion +} + +var _ logical.PluginVersioner = (*BackendPluginClientV5)(nil) + +// NewBackendV5 will return an instance of an RPC-based client implementation of +// the backend for external plugins, or a concrete implementation of the +// backend if it is a builtin backend. The backend is returned as a +// logical.Backend interface. +func NewBackendV5(ctx context.Context, pluginName string, pluginType consts.PluginType, pluginVersion string, sys pluginutil.LookRunnerUtil, conf *logical.BackendConfig) (logical.Backend, error) { + // Look for plugin in the plugin catalog + pluginRunner, err := sys.LookupPluginVersion(ctx, pluginName, pluginType, pluginVersion) + if err != nil { + return nil, err + } + + var backend logical.Backend + if pluginRunner.Builtin { + // Plugin is builtin so we can retrieve an instance of the interface + // from the pluginRunner. Then cast it to logical.Factory. + rawFactory, err := pluginRunner.BuiltinFactory() + if err != nil { + return nil, fmt.Errorf("error getting plugin type: %q", err) + } + + if factory, ok := rawFactory.(logical.Factory); !ok { + return nil, fmt.Errorf("unsupported backend type: %q", pluginName) + } else { + if backend, err = factory(ctx, conf); err != nil { + return nil, err + } + } + } else { + // create a backendPluginClient instance + config := pluginutil.PluginClientConfig{ + Name: pluginName, + PluginSets: PluginSet, + PluginType: pluginType, + Version: pluginVersion, + HandshakeConfig: HandshakeConfig, + Logger: conf.Logger.Named(pluginName), + AutoMTLS: true, + Wrapper: sys, + } + backend, err = NewPluginClientV5(ctx, sys, config) + if err != nil { + return nil, err + } + } + + return backend, nil +} + +// PluginSet is the map of plugins we can dispense. +var PluginSet = map[int]plugin.PluginSet{ + 5: { + "backend": &GRPCBackendPlugin{}, + }, +} + +func Dispense(rpcClient plugin.ClientProtocol, pluginClient pluginutil.PluginClient) (logical.Backend, error) { + // Request the plugin + raw, err := rpcClient.Dispense("backend") + if err != nil { + return nil, err + } + + var backend logical.Backend + // We should have a logical backend type now. This feels like a normal interface + // implementation but is in fact over an RPC connection. + switch c := raw.(type) { + case *backendGRPCPluginClient: + // This is an abstraction leak from go-plugin but it is necessary in + // order to enable multiplexing on multiplexed plugins + c.client = pb.NewBackendClient(pluginClient.Conn()) + c.versionClient = logical.NewPluginVersionClient(pluginClient.Conn()) + + backend = c + default: + return nil, errors.New("unsupported plugin client type") + } + + return &BackendPluginClientV5{ + client: pluginClient, + Backend: backend, + }, nil +} + +func NewPluginClientV5(ctx context.Context, sys pluginutil.RunnerUtil, config pluginutil.PluginClientConfig) (logical.Backend, error) { + pluginClient, err := sys.NewPluginClient(ctx, config) + if err != nil { + return nil, err + } + + // Request the plugin + raw, err := pluginClient.Dispense("backend") + if err != nil { + return nil, err + } + + var backend logical.Backend + var transport string + // We should have a logical backend type now. This feels like a normal interface + // implementation but is in fact over an RPC connection. + switch c := raw.(type) { + case *backendGRPCPluginClient: + // This is an abstraction leak from go-plugin but it is necessary in + // order to enable multiplexing on multiplexed plugins + c.client = pb.NewBackendClient(pluginClient.Conn()) + c.versionClient = logical.NewPluginVersionClient(pluginClient.Conn()) + + backend = c + transport = "gRPC" + default: + return nil, errors.New("unsupported plugin client type") + } + + // Wrap the backend in a tracing middleware + if config.Logger.IsTrace() { + backend = &BackendTracingMiddleware{ + logger: config.Logger.With("transport", transport), + next: backend, + } + } + + return &BackendPluginClientV5{ + client: pluginClient, + Backend: backend, + }, nil +} diff --git a/sdk/plugin/serve.go b/sdk/plugin/serve.go new file mode 100644 index 0000000..9ad2b82 --- /dev/null +++ b/sdk/plugin/serve.go @@ -0,0 +1,167 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "crypto/tls" + "math" + "os" + + "google.golang.org/grpc" + + log "github.com/hashicorp/go-hclog" + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/sdk/helper/pluginutil" + "github.com/hashicorp/vault/sdk/logical" +) + +// BackendPluginName is the name of the plugin that can be +// dispensed from the plugin server. +const BackendPluginName = "backend" + +type TLSProviderFunc func() (*tls.Config, error) + +type ServeOpts struct { + BackendFactoryFunc logical.Factory + TLSProviderFunc TLSProviderFunc + Logger log.Logger +} + +// Serve is a helper function used to serve a backend plugin. This +// should be ran on the plugin's main process. +func Serve(opts *ServeOpts) error { + logger := opts.Logger + if logger == nil { + logger = log.New(&log.LoggerOptions{ + Level: log.Trace, + Output: os.Stderr, + JSONFormat: true, + }) + } + + // pluginMap is the map of plugins we can dispense. + pluginSets := map[int]plugin.PluginSet{ + // Version 3 used to supports both protocols. We want to keep it around + // since it's possible old plugins built against this version will still + // work with gRPC. There is currently no difference between version 3 + // and version 4. + 3: { + "backend": &GRPCBackendPlugin{ + Factory: opts.BackendFactoryFunc, + Logger: logger, + }, + }, + 4: { + "backend": &GRPCBackendPlugin{ + Factory: opts.BackendFactoryFunc, + Logger: logger, + }, + }, + 5: { + "backend": &GRPCBackendPlugin{ + Factory: opts.BackendFactoryFunc, + MultiplexingSupport: false, + Logger: logger, + }, + }, + } + + err := pluginutil.OptionallyEnableMlock() + if err != nil { + return err + } + + serveOpts := &plugin.ServeConfig{ + HandshakeConfig: HandshakeConfig, + VersionedPlugins: pluginSets, + TLSProvider: opts.TLSProviderFunc, + Logger: logger, + + // A non-nil value here enables gRPC serving for this plugin... + GRPCServer: func(opts []grpc.ServerOption) *grpc.Server { + opts = append(opts, grpc.MaxRecvMsgSize(math.MaxInt32)) + opts = append(opts, grpc.MaxSendMsgSize(math.MaxInt32)) + return plugin.DefaultGRPCServer(opts) + }, + } + + plugin.Serve(serveOpts) + + return nil +} + +// ServeMultiplex is a helper function used to serve a backend plugin. This +// should be ran on the plugin's main process. +func ServeMultiplex(opts *ServeOpts) error { + logger := opts.Logger + if logger == nil { + logger = log.New(&log.LoggerOptions{ + Level: log.Info, + Output: os.Stderr, + JSONFormat: true, + }) + } + + // pluginMap is the map of plugins we can dispense. + pluginSets := map[int]plugin.PluginSet{ + // Version 3 used to supports both protocols. We want to keep it around + // since it's possible old plugins built against this version will still + // work with gRPC. There is currently no difference between version 3 + // and version 4. + 3: { + "backend": &GRPCBackendPlugin{ + Factory: opts.BackendFactoryFunc, + Logger: logger, + }, + }, + 4: { + "backend": &GRPCBackendPlugin{ + Factory: opts.BackendFactoryFunc, + Logger: logger, + }, + }, + 5: { + "backend": &GRPCBackendPlugin{ + Factory: opts.BackendFactoryFunc, + MultiplexingSupport: true, + Logger: logger, + }, + }, + } + + err := pluginutil.OptionallyEnableMlock() + if err != nil { + return err + } + + serveOpts := &plugin.ServeConfig{ + HandshakeConfig: HandshakeConfig, + VersionedPlugins: pluginSets, + Logger: logger, + + // A non-nil value here enables gRPC serving for this plugin... + GRPCServer: func(opts []grpc.ServerOption) *grpc.Server { + opts = append(opts, grpc.MaxRecvMsgSize(math.MaxInt32)) + opts = append(opts, grpc.MaxSendMsgSize(math.MaxInt32)) + return plugin.DefaultGRPCServer(opts) + }, + + // TLSProvider is required to support v3 and v4 plugins. + // It will be ignored for v5 which uses AutoMTLS + TLSProvider: opts.TLSProviderFunc, + } + + plugin.Serve(serveOpts) + + return nil +} + +// handshakeConfigs are used to just do a basic handshake between +// a plugin and host. If the handshake fails, a user friendly error is shown. +// This prevents users from executing bad plugins or executing a plugin +// directory. It is a UX feature, not a security feature. +var HandshakeConfig = plugin.HandshakeConfig{ + MagicCookieKey: "VAULT_BACKEND_PLUGIN", + MagicCookieValue: "6669da05-b1c8-4f49-97d9-c8e5bed98e20", +} diff --git a/sdk/plugin/storage_test.go b/sdk/plugin/storage_test.go new file mode 100644 index 0000000..61a5dee --- /dev/null +++ b/sdk/plugin/storage_test.go @@ -0,0 +1,40 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package plugin + +import ( + "context" + "testing" + + "google.golang.org/grpc" + + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/plugin/pb" +) + +func TestStorage_GRPC_ReturnsErrIfStorageNil(t *testing.T) { + _, err := new(GRPCStorageServer).Get(context.Background(), nil) + if err == nil { + t.Error("Expected error when using server with no impl") + } +} + +func TestStorage_impl(t *testing.T) { + var _ logical.Storage = new(GRPCStorageClient) +} + +func TestStorage_GRPC(t *testing.T) { + storage := &logical.InmemStorage{} + client, _ := plugin.TestGRPCConn(t, func(s *grpc.Server) { + pb.RegisterStorageServer(s, &GRPCStorageServer{ + impl: storage, + }) + }) + defer client.Close() + + testStorage := &GRPCStorageClient{client: pb.NewStorageClient(client)} + + logical.TestStorage(t, testStorage) +} diff --git a/sdk/queue/README.md b/sdk/queue/README.md new file mode 100644 index 0000000..345db19 --- /dev/null +++ b/sdk/queue/README.md @@ -0,0 +1,9 @@ +Vault SDK - Queue +================= + +The `queue` package provides Vault plugins with a Priority Queue. It can be used +as an in-memory list of `queue.Item` sorted by their `priority`, and offers +methods to find or remove items by their key. Internally it +uses `container/heap`; see [Example Priority +Queue](https://golang.org/pkg/container/heap/#example__priorityQueue) + diff --git a/sdk/queue/priority_queue.go b/sdk/queue/priority_queue.go new file mode 100644 index 0000000..802a538 --- /dev/null +++ b/sdk/queue/priority_queue.go @@ -0,0 +1,196 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package queue provides Vault plugins with a Priority Queue. It can be used +// as an in-memory list of queue.Item sorted by their priority, and offers +// methods to find or remove items by their key. Internally it uses +// container/heap; see Example Priority Queue: +// https://golang.org/pkg/container/heap/#example__priorityQueue +package queue + +import ( + "container/heap" + "errors" + "sync" + + "github.com/mitchellh/copystructure" +) + +// ErrEmpty is returned for queues with no items +var ErrEmpty = errors.New("queue is empty") + +// ErrDuplicateItem is returned when the queue attmepts to push an item to a key that +// already exists. The queue does not attempt to update, instead returns this +// error. If an Item needs to be updated or replaced, pop the item first. +var ErrDuplicateItem = errors.New("duplicate item") + +// New initializes the internal data structures and returns a new +// PriorityQueue +func New() *PriorityQueue { + pq := PriorityQueue{ + data: make(queue, 0), + dataMap: make(map[string]*Item), + } + heap.Init(&pq.data) + return &pq +} + +// PriorityQueue facilitates queue of Items, providing Push, Pop, and +// PopByKey convenience methods. The ordering (priority) is an int64 value +// with the smallest value is the highest priority. PriorityQueue maintains both +// an internal slice for the queue as well as a map of the same items with their +// keys as the index. This enables users to find specific items by key. The map +// must be kept in sync with the data slice. +// See https://golang.org/pkg/container/heap/#example__priorityQueue +type PriorityQueue struct { + // data is the internal structure that holds the queue, and is operated on by + // heap functions + data queue + + // dataMap represents all the items in the queue, with unique indexes, used + // for finding specific items. dataMap is kept in sync with the data slice + dataMap map[string]*Item + + // lock is a read/write mutex, and used to facilitate read/write locks on the + // data and dataMap fields + lock sync.RWMutex +} + +// queue is the internal data structure used to satisfy heap.Interface. This +// prevents users from calling Pop and Push heap methods directly +type queue []*Item + +// Item is something managed in the priority queue +type Item struct { + // Key is a unique string used to identify items in the internal data map + Key string + // Value is an unspecified type that implementations can use to store + // information + Value interface{} + + // Priority determines ordering in the queue, with the lowest value being the + // highest priority + Priority int64 + + // index is an internal value used by the heap package, and should not be + // modified by any consumer of the priority queue + index int +} + +// Len returns the count of items in the Priority Queue +func (pq *PriorityQueue) Len() int { + pq.lock.RLock() + defer pq.lock.RUnlock() + return pq.data.Len() +} + +// Pop pops the highest priority item from the queue. This is a +// wrapper/convenience method that calls heap.Pop, so consumers do not need to +// invoke heap functions directly +func (pq *PriorityQueue) Pop() (*Item, error) { + pq.lock.Lock() + defer pq.lock.Unlock() + + if pq.data.Len() == 0 { + return nil, ErrEmpty + } + + item := heap.Pop(&pq.data).(*Item) + delete(pq.dataMap, item.Key) + return item, nil +} + +// Push pushes an item on to the queue. This is a wrapper/convenience +// method that calls heap.Push, so consumers do not need to invoke heap +// functions directly. Items must have unique Keys, and Items in the queue +// cannot be updated. To modify an Item, users must first remove it and re-push +// it after modifications +func (pq *PriorityQueue) Push(i *Item) error { + if i == nil || i.Key == "" { + return errors.New("error adding item: Item Key is required") + } + + pq.lock.Lock() + defer pq.lock.Unlock() + + if _, ok := pq.dataMap[i.Key]; ok { + return ErrDuplicateItem + } + // Copy the item value(s) so that modifications to the source item does not + // affect the item on the queue + clone, err := copystructure.Copy(i) + if err != nil { + return err + } + + pq.dataMap[i.Key] = clone.(*Item) + heap.Push(&pq.data, clone) + return nil +} + +// PopByKey searches the queue for an item with the given key and removes it +// from the queue if found. Returns nil if not found. This method must fix the +// queue after removing any key. +func (pq *PriorityQueue) PopByKey(key string) (*Item, error) { + pq.lock.Lock() + defer pq.lock.Unlock() + + item, ok := pq.dataMap[key] + if !ok { + return nil, nil + } + + // Remove the item the heap and delete it from the dataMap + itemRaw := heap.Remove(&pq.data, item.index) + delete(pq.dataMap, key) + + if itemRaw != nil { + if i, ok := itemRaw.(*Item); ok { + return i, nil + } + } + + return nil, nil +} + +// Len returns the number of items in the queue data structure. Do not use this +// method directly on the queue, use PriorityQueue.Len() instead. +func (q queue) Len() int { return len(q) } + +// Less returns whether the Item with index i should sort before the Item with +// index j in the queue. This method is used by the queue to determine priority +// internally; the Item with the lower value wins. (priority zero is higher +// priority than 1). The priority of Items with equal values is undetermined. +func (q queue) Less(i, j int) bool { + return q[i].Priority < q[j].Priority +} + +// Swap swaps things in-place; part of sort.Interface +func (q queue) Swap(i, j int) { + q[i], q[j] = q[j], q[i] + q[i].index = i + q[j].index = j +} + +// Push is used by heap.Interface to push items onto the heap. This method is +// invoked by container/heap, and should not be used directly. +// See: https://golang.org/pkg/container/heap/#Interface +func (q *queue) Push(x interface{}) { + n := len(*q) + item := x.(*Item) + item.index = n + *q = append(*q, item) +} + +// Pop is used by heap.Interface to pop items off of the heap. This method is +// invoked by container/heap, and should not be used directly. +// See: https://golang.org/pkg/container/heap/#Interface +func (q *queue) Pop() interface{} { + old := *q + n := len(old) + item := old[n-1] + old[n-1] = nil // avoid memory leak + item.index = -1 // for safety + *q = old[0 : n-1] + return item +} diff --git a/sdk/queue/priority_queue_test.go b/sdk/queue/priority_queue_test.go new file mode 100644 index 0000000..108a26c --- /dev/null +++ b/sdk/queue/priority_queue_test.go @@ -0,0 +1,212 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package queue + +import ( + "container/heap" + "fmt" + "testing" + "time" +) + +// Ensure we satisfy the heap.Interface +var _ heap.Interface = &queue{} + +// some tests rely on the ordering of items from this method +func testCases() (tc []*Item) { + // create a slice of items with priority / times offest by these seconds + for i, m := range []time.Duration{ + 5, + 183600, // 51 hours + 15, // 15 seconds + 45, // 45 seconds + 900, // 15 minutes + 300, // 5 minutes + 7200, // 2 hours + 183600, // 51 hours + 7201, // 2 hours, 1 second + 115200, // 32 hours + 1209600, // 2 weeks + } { + n := time.Now() + ft := n.Add(time.Second * m) + tc = append(tc, &Item{ + Key: fmt.Sprintf("item-%d", i), + Value: 1, + Priority: ft.Unix(), + }) + } + return +} + +func TestPriorityQueue_New(t *testing.T) { + pq := New() + + if len(pq.data) != len(pq.dataMap) || len(pq.data) != 0 { + t.Fatalf("error in queue/map size, expected data and map to be initialized, got (%d) and (%d)", len(pq.data), len(pq.dataMap)) + } + + if pq.Len() != 0 { + t.Fatalf("expected new queue to have zero size, got (%d)", pq.Len()) + } +} + +func TestPriorityQueue_Push(t *testing.T) { + pq := New() + + // don't allow nil pushing + if err := pq.Push(nil); err == nil { + t.Fatal("Expected error on pushing nil") + } + + tc := testCases() + tcl := len(tc) + for _, i := range tc { + if err := pq.Push(i); err != nil { + t.Fatal(err) + } + } + + if pq.Len() != tcl { + t.Fatalf("error adding items, expected (%d) items, got (%d)", tcl, pq.Len()) + } + + testValidateInternalData(t, pq, len(tc), false) + + item, err := pq.Pop() + if err != nil { + t.Fatalf("error popping item: %s", err) + } + if tc[0].Priority != item.Priority { + t.Fatalf("expected tc[0] and popped item to match, got (%q) and (%q)", tc[0], item.Priority) + } + if tc[0].Key != item.Key { + t.Fatalf("expected tc[0] and popped item to match, got (%q) and (%q)", tc[0], item.Priority) + } + + testValidateInternalData(t, pq, len(tc)-1, false) + + // push item with no key + dErr := pq.Push(tc[1]) + if dErr != ErrDuplicateItem { + t.Fatal(err) + } + // push item with no key + tc[2].Key = "" + kErr := pq.Push(tc[2]) + if kErr != nil && kErr.Error() != "error adding item: Item Key is required" { + t.Fatal(kErr) + } + + testValidateInternalData(t, pq, len(tc)-1, true) + + // check nil,nil error for not found + i, err := pq.PopByKey("empty") + if err != nil && i != nil { + t.Fatalf("expected nil error for PopByKey of non-existing key, got: %s", err) + } +} + +func TestPriorityQueue_Pop(t *testing.T) { + pq := New() + + tc := testCases() + for _, i := range tc { + if err := pq.Push(i); err != nil { + t.Fatal(err) + } + } + + topItem, err := pq.Pop() + if err != nil { + t.Fatalf("error calling pop: %s", err) + } + if tc[0].Priority != topItem.Priority { + t.Fatalf("expected tc[0] and popped item to match, got (%q) and (%q)", tc[0], topItem.Priority) + } + if tc[0].Key != topItem.Key { + t.Fatalf("expected tc[0] and popped item to match, got (%q) and (%q)", tc[0], topItem.Priority) + } + + var items []*Item + items = append(items, topItem) + // pop the remaining items, compare size of input and output + i, _ := pq.Pop() + for ; i != nil; i, _ = pq.Pop() { + items = append(items, i) + } + if len(items) != len(tc) { + t.Fatalf("expected popped item count to match test cases, got (%d)", len(items)) + } +} + +func TestPriorityQueue_PopByKey(t *testing.T) { + pq := New() + + tc := testCases() + for _, i := range tc { + if err := pq.Push(i); err != nil { + t.Fatal(err) + } + } + + // grab the top priority item, to capture it's value for checking later + item, _ := pq.Pop() + oldPriority := item.Priority + oldKey := item.Key + + // push the item back on, so it gets removed with PopByKey and we verify + // the top item has changed later + err := pq.Push(item) + if err != nil { + t.Fatalf("error re-pushing top item: %s", err) + } + + popKeys := []int{2, 4, 7, 1, 0} + for _, i := range popKeys { + item, err := pq.PopByKey(fmt.Sprintf("item-%d", i)) + if err != nil { + t.Fatalf("failed to pop item-%d, \n\terr: %s\n\titem: %#v", i, err, item) + } + } + + testValidateInternalData(t, pq, len(tc)-len(popKeys), false) + + // grab the top priority item again, to compare with the top item priority + // from above + item, _ = pq.Pop() + newPriority := item.Priority + newKey := item.Key + + if oldPriority == newPriority || oldKey == newKey { + t.Fatalf("expected old/new key and priority to differ, got (%s/%s) and (%d/%d)", oldKey, newKey, oldPriority, newPriority) + } + + testValidateInternalData(t, pq, len(tc)-len(popKeys)-1, true) +} + +// testValidateInternalData checks the internal data structure of the PriorityQueue +// and verifies that items are in-sync. Use drain only at the end of a test, +// because it will mutate the input queue +func testValidateInternalData(t *testing.T, pq *PriorityQueue, expectedSize int, drain bool) { + actualSize := pq.Len() + if actualSize != expectedSize { + t.Fatalf("expected new queue size to be (%d), got (%d)", expectedSize, actualSize) + } + + if len(pq.data) != len(pq.dataMap) || len(pq.data) != expectedSize { + t.Fatalf("error in queue/map size, expected data and map to be (%d), got (%d) and (%d)", expectedSize, len(pq.data), len(pq.dataMap)) + } + + if drain && pq.Len() > 0 { + // pop all the items, verify lengths + i, _ := pq.Pop() + for ; i != nil; i, _ = pq.Pop() { + expectedSize-- + if len(pq.data) != len(pq.dataMap) || len(pq.data) != expectedSize { + t.Fatalf("error in queue/map size, expected data and map to be (%d), got (%d) and (%d)", expectedSize, len(pq.data), len(pq.dataMap)) + } + } + } +} diff --git a/serviceregistration/consul/consul_service_registration.go b/serviceregistration/consul/consul_service_registration.go new file mode 100644 index 0000000..a49ab4f --- /dev/null +++ b/serviceregistration/consul/consul_service_registration.go @@ -0,0 +1,640 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package consul + +import ( + "context" + "errors" + "fmt" + "math/rand" + "net" + "net/http" + "net/url" + "regexp" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/hashicorp/consul/api" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/go-secure-stdlib/tlsutil" + "github.com/hashicorp/vault/sdk/helper/consts" + sr "github.com/hashicorp/vault/serviceregistration" + "github.com/hashicorp/vault/vault/diagnose" + atomicB "go.uber.org/atomic" + "golang.org/x/net/http2" +) + +const ( + // checkJitterFactor specifies the jitter factor used to stagger checks + checkJitterFactor = 16 + + // checkMinBuffer specifies provides a guarantee that a check will not + // be executed too close to the TTL check timeout + checkMinBuffer = 100 * time.Millisecond + + // consulRetryInterval specifies the retry duration to use when an + // API call to the Consul agent fails. + consulRetryInterval = 1 * time.Second + + // defaultCheckTimeout changes the timeout of TTL checks + defaultCheckTimeout = 5 * time.Second + + // DefaultServiceName is the default Consul service name used when + // advertising a Vault instance. + DefaultServiceName = "vault" + + // reconcileTimeout is how often Vault should query Consul to detect + // and fix any state drift. + reconcileTimeout = 60 * time.Second + + // metaExternalSource is a metadata value for external-source that can be + // used by the Consul UI. + metaExternalSource = "vault" +) + +var hostnameRegex = regexp.MustCompile(`^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$`) + +// serviceRegistration is a ServiceRegistration that advertises the state of +// Vault to Consul. +type serviceRegistration struct { + Client *api.Client + + logger log.Logger + serviceLock sync.RWMutex + redirectHost string + redirectPort int64 + serviceName string + serviceTags []string + serviceAddress *string + disableRegistration bool + checkTimeout time.Duration + + notifyActiveCh chan struct{} + notifySealedCh chan struct{} + notifyPerfStandbyCh chan struct{} + notifyInitializedCh chan struct{} + + isActive *atomicB.Bool + isSealed *atomicB.Bool + isPerfStandby *atomicB.Bool + isInitialized *atomicB.Bool +} + +// NewConsulServiceRegistration constructs a Consul-based ServiceRegistration. +func NewServiceRegistration(conf map[string]string, logger log.Logger, state sr.State) (sr.ServiceRegistration, error) { + // Allow admins to disable consul integration + disableReg, ok := conf["disable_registration"] + var disableRegistration bool + if ok && disableReg != "" { + b, err := parseutil.ParseBool(disableReg) + if err != nil { + return nil, fmt.Errorf("failed parsing disable_registration parameter: %w", err) + } + disableRegistration = b + } + if logger.IsDebug() { + logger.Debug("config disable_registration set", "disable_registration", disableRegistration) + } + + // Get the service name to advertise in Consul + service, ok := conf["service"] + if !ok { + service = DefaultServiceName + } + if !hostnameRegex.MatchString(service) { + return nil, errors.New("service name must be valid per RFC 1123 and can contain only alphanumeric characters or dashes") + } + if logger.IsDebug() { + logger.Debug("config service set", "service", service) + } + + // Get the additional tags to attach to the registered service name + tags := conf["service_tags"] + if logger.IsDebug() { + logger.Debug("config service_tags set", "service_tags", tags) + } + + // Get the service-specific address to override the use of the HA redirect address + var serviceAddr *string + serviceAddrStr, ok := conf["service_address"] + if ok { + serviceAddr = &serviceAddrStr + } + if logger.IsDebug() { + logger.Debug("config service_address set", "service_address", serviceAddrStr) + } + + checkTimeout := defaultCheckTimeout + checkTimeoutStr, ok := conf["check_timeout"] + if ok { + d, err := parseutil.ParseDurationSecond(checkTimeoutStr) + if err != nil { + return nil, err + } + + min, _ := durationMinusBufferDomain(d, checkMinBuffer, checkJitterFactor) + if min < checkMinBuffer { + return nil, fmt.Errorf("consul check_timeout must be greater than %v", min) + } + + checkTimeout = d + if logger.IsDebug() { + logger.Debug("config check_timeout set", "check_timeout", d) + } + } + + // Configure the client + consulConf := api.DefaultConfig() + // Set MaxIdleConnsPerHost to the number of processes used in expiration.Restore + consulConf.Transport.MaxIdleConnsPerHost = consts.ExpirationRestoreWorkerCount + + SetupSecureTLS(context.Background(), consulConf, conf, logger, false) + + consulConf.HttpClient = &http.Client{Transport: consulConf.Transport} + client, err := api.NewClient(consulConf) + if err != nil { + return nil, fmt.Errorf("client setup failed: %w", err) + } + + // Setup the backend + c := &serviceRegistration{ + Client: client, + + logger: logger, + serviceName: service, + serviceTags: strutil.ParseDedupLowercaseAndSortStrings(tags, ","), + serviceAddress: serviceAddr, + checkTimeout: checkTimeout, + disableRegistration: disableRegistration, + + notifyActiveCh: make(chan struct{}), + notifySealedCh: make(chan struct{}), + notifyPerfStandbyCh: make(chan struct{}), + notifyInitializedCh: make(chan struct{}), + + isActive: atomicB.NewBool(state.IsActive), + isSealed: atomicB.NewBool(state.IsSealed), + isPerfStandby: atomicB.NewBool(state.IsPerformanceStandby), + isInitialized: atomicB.NewBool(state.IsInitialized), + } + return c, nil +} + +func SetupSecureTLS(ctx context.Context, consulConf *api.Config, conf map[string]string, logger log.Logger, isDiagnose bool) error { + if addr, ok := conf["address"]; ok { + consulConf.Address = addr + if logger.IsDebug() { + logger.Debug("config address set", "address", addr) + } + + // Copied from the Consul API module; set the Scheme based on + // the protocol field if address looks ike a URL. + // This can enable the TLS configuration below. + parts := strings.SplitN(addr, "://", 2) + if len(parts) == 2 { + if parts[0] == "http" || parts[0] == "https" { + consulConf.Scheme = parts[0] + consulConf.Address = parts[1] + if logger.IsDebug() { + logger.Debug("config address parsed", "scheme", parts[0]) + logger.Debug("config scheme parsed", "address", parts[1]) + } + } // allow "unix:" or whatever else consul supports in the future + } + } + if scheme, ok := conf["scheme"]; ok { + consulConf.Scheme = scheme + if logger.IsDebug() { + logger.Debug("config scheme set", "scheme", scheme) + } + } + if token, ok := conf["token"]; ok { + consulConf.Token = token + logger.Debug("config token set") + } + + if consulConf.Scheme == "https" { + if isDiagnose { + certPath, okCert := conf["tls_cert_file"] + keyPath, okKey := conf["tls_key_file"] + if okCert && okKey { + warnings, err := diagnose.TLSFileChecks(certPath, keyPath) + for _, warning := range warnings { + diagnose.Warn(ctx, warning) + } + if err != nil { + return err + } + return nil + } + return fmt.Errorf("key or cert path: %s, %s, cannot be loaded from consul config file", certPath, keyPath) + } + + // Use the parsed Address instead of the raw conf['address'] + tlsClientConfig, err := tlsutil.SetupTLSConfig(conf, consulConf.Address) + if err != nil { + return err + } + + consulConf.Transport.TLSClientConfig = tlsClientConfig + if err := http2.ConfigureTransport(consulConf.Transport); err != nil { + return err + } + logger.Debug("configured TLS") + } else { + if isDiagnose { + diagnose.Skipped(ctx, "HTTPS is not used, Skipping TLS verification.") + } + } + return nil +} + +func (c *serviceRegistration) Run(shutdownCh <-chan struct{}, wait *sync.WaitGroup, redirectAddr string) error { + go func() { + if err := c.runServiceRegistration(wait, shutdownCh, redirectAddr); err != nil { + if c.logger.IsError() { + c.logger.Error(fmt.Sprintf("error running service registration: %s", err)) + } + } + }() + return nil +} + +func (c *serviceRegistration) NotifyActiveStateChange(isActive bool) error { + c.isActive.Store(isActive) + select { + case c.notifyActiveCh <- struct{}{}: + default: + // NOTE: If this occurs Vault's active status could be out of + // sync with Consul until reconcileTimer expires. + c.logger.Warn("concurrent state change notify dropped") + } + + return nil +} + +func (c *serviceRegistration) NotifyPerformanceStandbyStateChange(isStandby bool) error { + c.isPerfStandby.Store(isStandby) + select { + case c.notifyPerfStandbyCh <- struct{}{}: + default: + // NOTE: If this occurs Vault's active status could be out of + // sync with Consul until reconcileTimer expires. + c.logger.Warn("concurrent state change notify dropped") + } + + return nil +} + +func (c *serviceRegistration) NotifySealedStateChange(isSealed bool) error { + c.isSealed.Store(isSealed) + select { + case c.notifySealedCh <- struct{}{}: + default: + // NOTE: If this occurs Vault's sealed status could be out of + // sync with Consul until checkTimer expires. + c.logger.Warn("concurrent sealed state change notify dropped") + } + + return nil +} + +func (c *serviceRegistration) NotifyInitializedStateChange(isInitialized bool) error { + c.isInitialized.Store(isInitialized) + select { + case c.notifyInitializedCh <- struct{}{}: + default: + // NOTE: If this occurs Vault's initialized status could be out of + // sync with Consul until checkTimer expires. + c.logger.Warn("concurrent initalize state change notify dropped") + } + + return nil +} + +func (c *serviceRegistration) checkDuration() time.Duration { + return durationMinusBuffer(c.checkTimeout, checkMinBuffer, checkJitterFactor) +} + +func (c *serviceRegistration) runServiceRegistration(waitGroup *sync.WaitGroup, shutdownCh <-chan struct{}, redirectAddr string) (err error) { + if err := c.setRedirectAddr(redirectAddr); err != nil { + return err + } + + // 'server' command will wait for the below goroutine to complete + waitGroup.Add(1) + + go c.runEventDemuxer(waitGroup, shutdownCh) + + return nil +} + +func (c *serviceRegistration) runEventDemuxer(waitGroup *sync.WaitGroup, shutdownCh <-chan struct{}) { + // This defer statement should be executed last. So push it first. + defer waitGroup.Done() + + // Fire the reconcileTimer immediately upon starting the event demuxer + reconcileTimer := time.NewTimer(0) + defer reconcileTimer.Stop() + + // Schedule the first check. Consul TTL checks are passing by + // default, checkTimer does not need to be run immediately. + checkTimer := time.NewTimer(c.checkDuration()) + defer checkTimer.Stop() + + // Use a reactor pattern to handle and dispatch events to singleton + // goroutine handlers for execution. It is not acceptable to drop + // inbound events from Notify*(). + // + // goroutines are dispatched if the demuxer can acquire a lock (via + // an atomic CAS incr) on the handler. Handlers are responsible for + // deregistering themselves (atomic CAS decr). Handlers and the + // demuxer share a lock to synchronize information at the beginning + // and end of a handler's life (or after a handler wakes up from + // sleeping during a back-off/retry). + var shutdown atomicB.Bool + var registeredServiceID string + checkLock := new(int32) + serviceRegLock := new(int32) + + for !shutdown.Load() { + select { + case <-c.notifyActiveCh: + // Run reconcile immediately upon active state change notification + reconcileTimer.Reset(0) + case <-c.notifySealedCh: + // Run check timer immediately upon a seal state change notification + checkTimer.Reset(0) + case <-c.notifyPerfStandbyCh: + // Run check timer immediately upon a perfstandby state change notification + checkTimer.Reset(0) + case <-c.notifyInitializedCh: + // Run check timer immediately upon an initialized state change notification + checkTimer.Reset(0) + case <-reconcileTimer.C: + // Unconditionally rearm the reconcileTimer + reconcileTimer.Reset(reconcileTimeout - randomStagger(reconcileTimeout/checkJitterFactor)) + + // Abort if service discovery is disabled or a + // reconcile handler is already active + if !c.disableRegistration && atomic.CompareAndSwapInt32(serviceRegLock, 0, 1) { + // Enter handler with serviceRegLock held + go func() { + defer atomic.CompareAndSwapInt32(serviceRegLock, 1, 0) + for !shutdown.Load() { + serviceID, err := c.reconcileConsul(registeredServiceID) + if err != nil { + if c.logger.IsWarn() { + c.logger.Warn("reconcile unable to talk with Consul backend", "error", err) + } + time.Sleep(consulRetryInterval) + continue + } + + c.serviceLock.Lock() + defer c.serviceLock.Unlock() + + registeredServiceID = serviceID + return + } + }() + } + case <-checkTimer.C: + checkTimer.Reset(c.checkDuration()) + // Abort if service discovery is disabled or a + // reconcile handler is active + if !c.disableRegistration && atomic.CompareAndSwapInt32(checkLock, 0, 1) { + // Enter handler with checkLock held + go func() { + defer atomic.CompareAndSwapInt32(checkLock, 1, 0) + for !shutdown.Load() { + if err := c.runCheck(c.isSealed.Load()); err != nil { + if c.logger.IsWarn() { + c.logger.Warn("check unable to talk with Consul backend", "error", err) + } + time.Sleep(consulRetryInterval) + continue + } + return + } + }() + } + case <-shutdownCh: + c.logger.Info("shutting down consul backend") + shutdown.Store(true) + } + } + + c.serviceLock.RLock() + defer c.serviceLock.RUnlock() + if err := c.Client.Agent().ServiceDeregister(registeredServiceID); err != nil { + if c.logger.IsWarn() { + c.logger.Warn("service deregistration failed", "error", err) + } + } +} + +// checkID returns the ID used for a Consul Check. Assume at least a read +// lock is held. +func (c *serviceRegistration) checkID() string { + return fmt.Sprintf("%s:vault-sealed-check", c.serviceID()) +} + +// serviceID returns the Vault ServiceID for use in Consul. Assume at least +// a read lock is held. +func (c *serviceRegistration) serviceID() string { + return fmt.Sprintf("%s:%s:%d", c.serviceName, c.redirectHost, c.redirectPort) +} + +// reconcileConsul queries the state of Vault Core and Consul and fixes up +// Consul's state according to what's in Vault. reconcileConsul is called +// without any locks held and can be run concurrently, therefore no changes +// to serviceRegistration can be made in this method (i.e. wtb const receiver for +// compiler enforced safety). +func (c *serviceRegistration) reconcileConsul(registeredServiceID string) (serviceID string, err error) { + agent := c.Client.Agent() + catalog := c.Client.Catalog() + + serviceID = c.serviceID() + + // Get the current state of Vault from Consul + var currentVaultService *api.CatalogService + if services, _, err := catalog.Service(c.serviceName, "", &api.QueryOptions{AllowStale: true}); err == nil { + for _, service := range services { + if serviceID == service.ServiceID { + currentVaultService = service + break + } + } + } + + tags := c.fetchServiceTags(c.isActive.Load(), c.isPerfStandby.Load(), c.isInitialized.Load()) + + var reregister bool + + switch { + case currentVaultService == nil, registeredServiceID == "": + reregister = true + default: + switch { + case !strutil.EquivalentSlices(currentVaultService.ServiceTags, tags): + reregister = true + } + } + + if !reregister { + // When re-registration is not required, return a valid serviceID + // to avoid registration in the next cycle. + return serviceID, nil + } + + // If service address was set explicitly in configuration, use that + // as the service-specific address instead of the HA redirect address. + var serviceAddress string + if c.serviceAddress == nil { + serviceAddress = c.redirectHost + } else { + serviceAddress = *c.serviceAddress + } + + service := &api.AgentServiceRegistration{ + ID: serviceID, + Name: c.serviceName, + Tags: tags, + Port: int(c.redirectPort), + Address: serviceAddress, + EnableTagOverride: false, + Meta: map[string]string{ + "external-source": metaExternalSource, + }, + } + + checkStatus := api.HealthCritical + if !c.isSealed.Load() { + checkStatus = api.HealthPassing + } + + sealedCheck := &api.AgentCheckRegistration{ + ID: c.checkID(), + Name: "Vault Sealed Status", + Notes: "Vault service is healthy when Vault is in an unsealed status and can become an active Vault server", + ServiceID: serviceID, + AgentServiceCheck: api.AgentServiceCheck{ + TTL: c.checkTimeout.String(), + Status: checkStatus, + }, + } + + if err := agent.ServiceRegister(service); err != nil { + return "", fmt.Errorf(`service registration failed: %w`, err) + } + + if err := agent.CheckRegister(sealedCheck); err != nil { + return serviceID, fmt.Errorf(`service check registration failed: %w`, err) + } + + return serviceID, nil +} + +// runCheck immediately pushes a TTL check. +func (c *serviceRegistration) runCheck(sealed bool) error { + // Run a TTL check + agent := c.Client.Agent() + if !sealed { + return agent.PassTTL(c.checkID(), "Vault Unsealed") + } else { + return agent.FailTTL(c.checkID(), "Vault Sealed") + } +} + +// fetchServiceTags returns all of the relevant tags for Consul. +func (c *serviceRegistration) fetchServiceTags(active, perfStandby, initialized bool) []string { + activeTag := "standby" + if active { + activeTag = "active" + } + + result := append(c.serviceTags, activeTag) + + if perfStandby { + result = append(c.serviceTags, "performance-standby") + } + + if initialized { + result = append(result, "initialized") + } + + return result +} + +func (c *serviceRegistration) setRedirectAddr(addr string) (err error) { + if addr == "" { + return fmt.Errorf("redirect address must not be empty") + } + + url, err := url.Parse(addr) + if err != nil { + return fmt.Errorf("failed to parse redirect URL %q: %w", addr, err) + } + + var portStr string + c.redirectHost, portStr, err = net.SplitHostPort(url.Host) + if err != nil { + if url.Scheme == "http" { + portStr = "80" + } else if url.Scheme == "https" { + portStr = "443" + } else if url.Scheme == "unix" { + portStr = "-1" + c.redirectHost = url.Path + } else { + return fmt.Errorf("failed to find a host:port in redirect address %q: %w", url.Host, err) + } + } + c.redirectPort, err = strconv.ParseInt(portStr, 10, 0) + if err != nil || c.redirectPort < -1 || c.redirectPort > 65535 { + return fmt.Errorf("failed to parse valid port %q: %w", portStr, err) + } + + return nil +} + +// durationMinusBuffer returns a duration, minus a buffer and jitter +// subtracted from the duration. This function is used primarily for +// servicing Consul TTL Checks in advance of the TTL. +func durationMinusBuffer(intv time.Duration, buffer time.Duration, jitter int64) time.Duration { + d := intv - buffer + if jitter == 0 { + d -= randomStagger(d) + } else { + d -= randomStagger(time.Duration(int64(d) / jitter)) + } + return d +} + +// durationMinusBufferDomain returns the domain of valid durations from a +// call to durationMinusBuffer. This function is used to check user +// specified input values to durationMinusBuffer. +func durationMinusBufferDomain(intv time.Duration, buffer time.Duration, jitter int64) (min time.Duration, max time.Duration) { + max = intv - buffer + if jitter == 0 { + min = max + } else { + min = max - time.Duration(int64(max)/jitter) + } + return min, max +} + +// randomStagger returns an interval between 0 and the duration +func randomStagger(intv time.Duration) time.Duration { + if intv == 0 { + return 0 + } + return time.Duration(uint64(rand.Int63()) % uint64(intv)) +} diff --git a/serviceregistration/consul/consul_service_registration_test.go b/serviceregistration/consul/consul_service_registration_test.go new file mode 100644 index 0000000..0ced651 --- /dev/null +++ b/serviceregistration/consul/consul_service_registration_test.go @@ -0,0 +1,565 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package consul + +import ( + "os" + "reflect" + "sync" + "testing" + "time" + + "github.com/go-test/deep" + "github.com/hashicorp/consul/api" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/testhelpers/consul" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/physical" + "github.com/hashicorp/vault/sdk/physical/inmem" + sr "github.com/hashicorp/vault/serviceregistration" + "github.com/hashicorp/vault/vault" +) + +type consulConf map[string]string + +func testConsulServiceRegistration(t *testing.T) *serviceRegistration { + return testConsulServiceRegistrationConfig(t, &consulConf{}) +} + +func testConsulServiceRegistrationConfig(t *testing.T, conf *consulConf) *serviceRegistration { + logger := logging.NewVaultLogger(log.Debug) + + shutdownCh := make(chan struct{}) + defer func() { + close(shutdownCh) + }() + be, err := NewServiceRegistration(*conf, logger, sr.State{}) + if err != nil { + t.Fatalf("Expected Consul to initialize: %v", err) + } + if err := be.Run(shutdownCh, &sync.WaitGroup{}, ""); err != nil { + t.Fatal(err) + } + + c, ok := be.(*serviceRegistration) + if !ok { + t.Fatalf("Expected serviceRegistration") + } + + return c +} + +// TestConsul_ServiceRegistration tests whether consul ServiceRegistration works +func TestConsul_ServiceRegistration(t *testing.T) { + // Prepare a docker-based consul instance + cleanup, config := consul.PrepareTestContainer(t, "", false, true) + defer cleanup() + + // Create a consul client + client, err := api.NewClient(config.APIConfig()) + if err != nil { + t.Fatal(err) + } + + // waitForServices waits for the services in the Consul catalog to + // reach an expected value, returning the delta if that doesn't happen in time. + waitForServices := func(t *testing.T, expected map[string][]string) map[string][]string { + t.Helper() + // Wait for up to 10 seconds + var services map[string][]string + var err error + for i := 0; i < 10; i++ { + services, _, err = client.Catalog().Services(nil) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(services, expected); diff == nil { + return services + } + time.Sleep(time.Second) + } + t.Fatalf("Catalog Services never reached: got: %v, expected state: %v", services, expected) + return nil + } + + shutdownCh := make(chan struct{}) + defer func() { + close(shutdownCh) + }() + const redirectAddr = "http://127.0.0.1:8200" + + // Create a ServiceRegistration that points to our consul instance + logger := logging.NewVaultLogger(log.Trace) + sd, err := NewServiceRegistration(map[string]string{ + "address": config.Address(), + "token": config.Token, + }, logger, sr.State{}) + if err != nil { + t.Fatal(err) + } + if err := sd.Run(shutdownCh, &sync.WaitGroup{}, redirectAddr); err != nil { + t.Fatal(err) + } + + // Create the core + inm, err := inmem.NewInmemHA(nil, logger) + if err != nil { + t.Fatal(err) + } + inmha, err := inmem.NewInmemHA(nil, logger) + if err != nil { + t.Fatal(err) + } + core, err := vault.NewCore(&vault.CoreConfig{ + ServiceRegistration: sd, + Physical: inm, + HAPhysical: inmha.(physical.HABackend), + RedirectAddr: redirectAddr, + DisableMlock: true, + }) + if err != nil { + t.Fatal(err) + } + defer core.Shutdown() + + waitForServices(t, map[string][]string{ + "consul": {}, + "vault": {"standby"}, + }) + + // Initialize and unseal the core + keys, _ := vault.TestCoreInit(t, core) + for _, key := range keys { + if _, err := vault.TestCoreUnseal(core, vault.TestKeyCopy(key)); err != nil { + t.Fatalf("unseal err: %s", err) + } + } + if core.Sealed() { + t.Fatal("should not be sealed") + } + + // Wait for the core to become active + vault.TestWaitActive(t, core) + + waitForServices(t, map[string][]string{ + "consul": {}, + "vault": {"active", "initialized"}, + }) +} + +func TestConsul_ServiceAddress(t *testing.T) { + tests := []struct { + consulConfig map[string]string + serviceAddrNil bool + }{ + { + consulConfig: map[string]string{ + "service_address": "", + }, + }, + { + consulConfig: map[string]string{ + "service_address": "vault.example.com", + }, + }, + { + serviceAddrNil: true, + }, + } + + for _, test := range tests { + shutdownCh := make(chan struct{}) + logger := logging.NewVaultLogger(log.Debug) + + be, err := NewServiceRegistration(test.consulConfig, logger, sr.State{}) + if err != nil { + t.Fatalf("expected Consul to initialize: %v", err) + } + if err := be.Run(shutdownCh, &sync.WaitGroup{}, ""); err != nil { + t.Fatal(err) + } + + c, ok := be.(*serviceRegistration) + if !ok { + t.Fatalf("Expected ConsulServiceRegistration") + } + + if test.serviceAddrNil { + if c.serviceAddress != nil { + t.Fatalf("expected service address to be nil") + } + } else { + if c.serviceAddress == nil { + t.Fatalf("did not expect service address to be nil") + } + } + close(shutdownCh) + } +} + +func TestConsul_newConsulServiceRegistration(t *testing.T) { + tests := []struct { + name string + consulConfig map[string]string + fail bool + redirectAddr string + checkTimeout time.Duration + path string + service string + address string + scheme string + token string + max_parallel int + disableReg bool + consistencyMode string + }{ + { + name: "Valid default config", + consulConfig: map[string]string{}, + checkTimeout: 5 * time.Second, + redirectAddr: "http://127.0.0.1:8200", + path: "vault/", + service: "vault", + address: "127.0.0.1:8500", + scheme: "http", + token: "", + max_parallel: 4, + disableReg: false, + consistencyMode: "default", + }, + { + name: "Valid modified config", + consulConfig: map[string]string{ + "path": "seaTech/", + "service": "astronomy", + "redirect_addr": "http://127.0.0.2:8200", + "check_timeout": "6s", + "address": "127.0.0.2", + "scheme": "https", + "token": "deadbeef-cafeefac-deadc0de-feedface", + "max_parallel": "4", + "disable_registration": "false", + "consistency_mode": "strong", + }, + checkTimeout: 6 * time.Second, + path: "seaTech/", + service: "astronomy", + redirectAddr: "http://127.0.0.2:8200", + address: "127.0.0.2", + scheme: "https", + token: "deadbeef-cafeefac-deadc0de-feedface", + max_parallel: 4, + consistencyMode: "strong", + }, + { + name: "Unix socket", + consulConfig: map[string]string{ + "address": "unix:///tmp/.consul.http.sock", + }, + address: "/tmp/.consul.http.sock", + scheme: "http", // Default, not overridden? + + // Defaults + checkTimeout: 5 * time.Second, + redirectAddr: "http://127.0.0.1:8200", + path: "vault/", + service: "vault", + token: "", + max_parallel: 4, + disableReg: false, + consistencyMode: "default", + }, + { + name: "Scheme in address", + consulConfig: map[string]string{ + "address": "https://127.0.0.2:5000", + }, + address: "127.0.0.2:5000", + scheme: "https", + + // Defaults + checkTimeout: 5 * time.Second, + redirectAddr: "http://127.0.0.1:8200", + path: "vault/", + service: "vault", + token: "", + max_parallel: 4, + disableReg: false, + consistencyMode: "default", + }, + { + name: "check timeout too short", + fail: true, + consulConfig: map[string]string{ + "check_timeout": "99ms", + }, + }, + } + + for _, test := range tests { + shutdownCh := make(chan struct{}) + logger := logging.NewVaultLogger(log.Debug) + + be, err := NewServiceRegistration(test.consulConfig, logger, sr.State{}) + if test.fail { + if err == nil { + t.Fatalf(`Expected config "%s" to fail`, test.name) + } else { + continue + } + } else if !test.fail && err != nil { + t.Fatalf("Expected config %s to not fail: %v", test.name, err) + } + if err := be.Run(shutdownCh, &sync.WaitGroup{}, ""); err != nil { + t.Fatal(err) + } + + c, ok := be.(*serviceRegistration) + if !ok { + t.Fatalf("Expected ConsulServiceRegistration: %s", test.name) + } + c.disableRegistration = true + + if !c.disableRegistration { + addr := os.Getenv("CONSUL_HTTP_ADDR") + if addr == "" { + continue + } + } + + if test.checkTimeout != c.checkTimeout { + t.Errorf("bad: %v != %v", test.checkTimeout, c.checkTimeout) + } + + if test.service != c.serviceName { + t.Errorf("bad: %v != %v", test.service, c.serviceName) + } + + // The configuration stored in the Consul "client" object is not exported, so + // we either have to skip validating it, or add a method to export it, or use reflection. + consulConfig := reflect.Indirect(reflect.ValueOf(c.Client)).FieldByName("config") + consulConfigScheme := consulConfig.FieldByName("Scheme").String() + consulConfigAddress := consulConfig.FieldByName("Address").String() + + if test.scheme != consulConfigScheme { + t.Errorf("bad scheme value: %v != %v", test.scheme, consulConfigScheme) + } + + if test.address != consulConfigAddress { + t.Errorf("bad address value: %v != %v", test.address, consulConfigAddress) + } + + // FIXME(sean@): Unable to test max_parallel + // if test.max_parallel != cap(c.permitPool) { + // t.Errorf("bad: %v != %v", test.max_parallel, cap(c.permitPool)) + // } + close(shutdownCh) + } +} + +func TestConsul_serviceTags(t *testing.T) { + tests := []struct { + active bool + perfStandby bool + initialized bool + tags []string + }{ + { + active: true, + perfStandby: false, + initialized: false, + tags: []string{"active"}, + }, + { + active: false, + perfStandby: false, + initialized: false, + tags: []string{"standby"}, + }, + { + active: false, + perfStandby: true, + initialized: false, + tags: []string{"performance-standby"}, + }, + { + active: true, + perfStandby: true, + initialized: false, + tags: []string{"performance-standby"}, + }, + { + active: true, + perfStandby: false, + initialized: true, + tags: []string{"active", "initialized"}, + }, + { + active: false, + perfStandby: false, + initialized: true, + tags: []string{"standby", "initialized"}, + }, + { + active: false, + perfStandby: true, + initialized: true, + tags: []string{"performance-standby", "initialized"}, + }, + { + active: true, + perfStandby: true, + initialized: true, + tags: []string{"performance-standby", "initialized"}, + }, + } + + c := testConsulServiceRegistration(t) + + for _, test := range tests { + tags := c.fetchServiceTags(test.active, test.perfStandby, test.initialized) + if !reflect.DeepEqual(tags[:], test.tags[:]) { + t.Errorf("Bad %v: %v %v", test.active, tags, test.tags) + } + } +} + +func TestConsul_setRedirectAddr(t *testing.T) { + tests := []struct { + addr string + host string + port int64 + pass bool + }{ + { + addr: "http://127.0.0.1:8200/", + host: "127.0.0.1", + port: 8200, + pass: true, + }, + { + addr: "http://127.0.0.1:8200", + host: "127.0.0.1", + port: 8200, + pass: true, + }, + { + addr: "https://127.0.0.1:8200", + host: "127.0.0.1", + port: 8200, + pass: true, + }, + { + addr: "unix:///tmp/.vault.addr.sock", + host: "/tmp/.vault.addr.sock", + port: -1, + pass: true, + }, + { + addr: "127.0.0.1:8200", + pass: false, + }, + { + addr: "127.0.0.1", + pass: false, + }, + } + for _, test := range tests { + c := testConsulServiceRegistration(t) + err := c.setRedirectAddr(test.addr) + if test.pass { + if err != nil { + t.Fatalf("bad: %v", err) + } + } else { + if err == nil { + t.Fatalf("bad, expected fail") + } else { + continue + } + } + + if c.redirectHost != test.host { + t.Fatalf("bad: %v != %v", c.redirectHost, test.host) + } + + if c.redirectPort != test.port { + t.Fatalf("bad: %v != %v", c.redirectPort, test.port) + } + } +} + +func TestConsul_serviceID(t *testing.T) { + tests := []struct { + name string + redirectAddr string + serviceName string + expected string + valid bool + }{ + { + name: "valid host w/o slash", + redirectAddr: "http://127.0.0.1:8200", + serviceName: "sea-tech-astronomy", + expected: "sea-tech-astronomy:127.0.0.1:8200", + valid: true, + }, + { + name: "valid host w/ slash", + redirectAddr: "http://127.0.0.1:8200/", + serviceName: "sea-tech-astronomy", + expected: "sea-tech-astronomy:127.0.0.1:8200", + valid: true, + }, + { + name: "valid https host w/ slash", + redirectAddr: "https://127.0.0.1:8200/", + serviceName: "sea-tech-astronomy", + expected: "sea-tech-astronomy:127.0.0.1:8200", + valid: true, + }, + { + name: "invalid host name", + redirectAddr: "https://127.0.0.1:8200/", + serviceName: "sea_tech_astronomy", + expected: "", + valid: false, + }, + } + + logger := logging.NewVaultLogger(log.Debug) + + for _, test := range tests { + shutdownCh := make(chan struct{}) + be, err := NewServiceRegistration(consulConf{ + "service": test.serviceName, + }, logger, sr.State{}) + if !test.valid { + if err == nil { + t.Fatalf("expected an error initializing for name %q", test.serviceName) + } + continue + } + if test.valid && err != nil { + t.Fatalf("expected Consul to initialize: %v", err) + } + if err := be.Run(shutdownCh, &sync.WaitGroup{}, ""); err != nil { + t.Fatal(err) + } + + c, ok := be.(*serviceRegistration) + if !ok { + t.Fatalf("Expected serviceRegistration") + } + + if err := c.setRedirectAddr(test.redirectAddr); err != nil { + t.Fatalf("bad: %s %v", test.name, err) + } + + serviceID := c.serviceID() + if serviceID != test.expected { + t.Fatalf("bad: %v != %v", serviceID, test.expected) + } + } +} diff --git a/serviceregistration/kubernetes/client/client.go b/serviceregistration/kubernetes/client/client.go new file mode 100644 index 0000000..96d1952 --- /dev/null +++ b/serviceregistration/kubernetes/client/client.go @@ -0,0 +1,288 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package client + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "net/http" + "strings" + "time" + "unicode" + + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-retryablehttp" +) + +var ( + // Retry configuration + RetryWaitMin = 500 * time.Millisecond + RetryWaitMax = 30 * time.Second + RetryMax = 10 + + // Standard errs + ErrNamespaceUnset = errors.New(`"namespace" is unset`) + ErrPodNameUnset = errors.New(`"podName" is unset`) + ErrNotInCluster = errors.New("unable to load in-cluster configuration, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined") +) + +// Client is a minimal Kubernetes client. We rolled our own because the existing +// Kubernetes client-go library available externally has a high number of dependencies +// and we thought it wasn't worth it for only two API calls. If at some point they break +// the client into smaller modules, or if we add quite a few methods to this client, it may +// be worthwhile to revisit that decision. +type Client struct { + logger hclog.Logger + config *Config + stopCh chan struct{} +} + +// New instantiates a Client. The stopCh is used for exiting retry loops +// when closed. +func New(logger hclog.Logger) (*Client, error) { + config, err := inClusterConfig() + if err != nil { + return nil, err + } + return &Client{ + logger: logger, + config: config, + stopCh: make(chan struct{}), + }, nil +} + +func (c *Client) Shutdown() { + close(c.stopCh) +} + +// GetPod gets a pod from the Kubernetes API. +func (c *Client) GetPod(namespace, podName string) (*Pod, error) { + endpoint := fmt.Sprintf("/api/v1/namespaces/%s/pods/%s", namespace, podName) + method := http.MethodGet + + // Validate that we received required parameters. + if namespace == "" { + return nil, ErrNamespaceUnset + } + if podName == "" { + return nil, ErrPodNameUnset + } + + req, err := http.NewRequest(method, c.config.Host+endpoint, nil) + if err != nil { + return nil, err + } + pod := &Pod{} + if err := c.do(req, pod); err != nil { + return nil, err + } + return pod, nil +} + +// PatchPod updates the pod's tags to the given ones. +// It does so non-destructively, or in other words, without tearing down +// the pod. +func (c *Client) PatchPod(namespace, podName string, patches ...*Patch) error { + endpoint := fmt.Sprintf("/api/v1/namespaces/%s/pods/%s", namespace, podName) + method := http.MethodPatch + + // Validate that we received required parameters. + if namespace == "" { + return ErrNamespaceUnset + } + if podName == "" { + return ErrPodNameUnset + } + if len(patches) == 0 { + // No work to perform. + return nil + } + + var jsonPatches []map[string]interface{} + for _, patch := range patches { + if patch.Operation == Unset { + return errors.New("patch operation must be set") + } + jsonPatches = append(jsonPatches, map[string]interface{}{ + "op": patch.Operation, + "path": patch.Path, + "value": patch.Value, + }) + } + body, err := json.Marshal(jsonPatches) + if err != nil { + return err + } + req, err := http.NewRequest(method, c.config.Host+endpoint, bytes.NewReader(body)) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json-patch+json") + return c.do(req, nil) +} + +// do executes the given request, retrying if necessary. +func (c *Client) do(req *http.Request, ptrToReturnObj interface{}) error { + // Finish setting up a valid request. + retryableReq, err := retryablehttp.FromRequest(req) + if err != nil { + return err + } + + // Build a context that will call the cancelFunc when we receive + // a stop from our stopChan. This allows us to exit from our retry + // loop during a shutdown, rather than hanging. + ctx, cancelFunc := context.WithCancel(context.Background()) + go func() { + select { + case <-ctx.Done(): + case <-c.stopCh: + cancelFunc() + } + }() + retryableReq.WithContext(ctx) + + retryableReq.Header.Set("Authorization", "Bearer "+c.config.BearerToken) + retryableReq.Header.Set("Accept", "application/json") + + client := &retryablehttp.Client{ + HTTPClient: cleanhttp.DefaultClient(), + RetryWaitMin: RetryWaitMin, + RetryWaitMax: RetryWaitMax, + RetryMax: RetryMax, + CheckRetry: c.getCheckRetry(req), + Backoff: retryablehttp.DefaultBackoff, + } + client.HTTPClient.Transport = &http.Transport{ + TLSClientConfig: &tls.Config{ + RootCAs: c.config.CACertPool, + }, + } + + // Execute and retry the request. This client comes with exponential backoff and + // jitter already rolled in. + resp, err := client.Do(retryableReq) + if err != nil { + return err + } + defer func() { + if err := resp.Body.Close(); err != nil { + if c.logger.IsWarn() { + // Failing to close response bodies can present as a memory leak so it's + // important to surface it. + c.logger.Warn(fmt.Sprintf("unable to close response body: %s", err)) + } + } + }() + + // If we're not supposed to read out the body, we have nothing further + // to do here. + if ptrToReturnObj == nil { + return nil + } + + // Attempt to read out the body into the given return object. + return json.NewDecoder(resp.Body).Decode(ptrToReturnObj) +} + +func (c *Client) getCheckRetry(req *http.Request) retryablehttp.CheckRetry { + return func(ctx context.Context, resp *http.Response, err error) (bool, error) { + if resp == nil { + return true, fmt.Errorf("nil response: %s", req.URL.RequestURI()) + } + switch resp.StatusCode { + case 200, 201, 202, 204: + // Success. + return false, nil + case 401, 403: + // Perhaps the token from our bearer token file has been refreshed. + config, err := inClusterConfig() + if err != nil { + return false, err + } + if config.BearerToken == c.config.BearerToken { + // It's the same token. + return false, fmt.Errorf("bad status code: %s", sanitizedDebuggingInfo(req, resp.StatusCode)) + } + c.config = config + // Continue to try again, but return the error too in case the caller would rather read it out. + return true, fmt.Errorf("bad status code: %s", sanitizedDebuggingInfo(req, resp.StatusCode)) + case 404: + return false, &ErrNotFound{debuggingInfo: sanitizedDebuggingInfo(req, resp.StatusCode)} + case 500, 502, 503, 504: + // Could be transient. + return true, fmt.Errorf("unexpected status code: %s", sanitizedDebuggingInfo(req, resp.StatusCode)) + } + // Unexpected. + return false, fmt.Errorf("unexpected status code: %s", sanitizedDebuggingInfo(req, resp.StatusCode)) + } +} + +type Pod struct { + Metadata *Metadata `json:"metadata,omitempty"` +} + +type Metadata struct { + Name string `json:"name,omitempty"` + + // This map will be nil if no "labels" key was provided. + // It will be populated but have a length of zero if the + // key was provided, but no values. + Labels map[string]string `json:"labels,omitempty"` +} + +type PatchOperation string + +const ( + Unset PatchOperation = "unset" + Add = "add" + Replace = "replace" +) + +type Patch struct { + Operation PatchOperation + Path string + Value interface{} +} + +type ErrNotFound struct { + debuggingInfo string +} + +func (e *ErrNotFound) Error() string { + return e.debuggingInfo +} + +// Sanitize is for "data" being sent to the Kubernetes API. +// Data must consist of alphanumeric characters, '-', '_' or '.'. +// Any other characters found in the original value will be stripped, +// and the surrounding characters will be concatenated. +func Sanitize(val string) string { + return strings.Map(replaceBadCharsWithDashes, val) +} + +func replaceBadCharsWithDashes(r rune) rune { + if unicode.IsLetter(r) { + return r + } + if unicode.IsNumber(r) { + return r + } + switch string(r) { + case "-", "_", ".": + return r + } + return '-' +} + +// sanitizedDebuggingInfo provides a returnable string that can be used for debugging. This is intentionally somewhat vague +// because we don't want to leak secrets that may be in a request or response body. +func sanitizedDebuggingInfo(req *http.Request, respStatus int) string { + return fmt.Sprintf("req method: %s, req url: %s, resp statuscode: %d", req.Method, req.URL, respStatus) +} diff --git a/serviceregistration/kubernetes/client/client_test.go b/serviceregistration/kubernetes/client/client_test.go new file mode 100644 index 0000000..de11dad --- /dev/null +++ b/serviceregistration/kubernetes/client/client_test.go @@ -0,0 +1,143 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package client + +import ( + "errors" + "os" + "testing" + + "github.com/hashicorp/go-hclog" + kubetest "github.com/hashicorp/vault/serviceregistration/kubernetes/testing" +) + +func TestClient(t *testing.T) { + testState, testConf, closeFunc := kubetest.Server(t) + defer closeFunc() + + Scheme = testConf.ClientScheme + TokenFile = testConf.PathToTokenFile + RootCAFile = testConf.PathToRootCAFile + if err := os.Setenv(EnvVarKubernetesServiceHost, testConf.ServiceHost); err != nil { + t.Fatal(err) + } + if err := os.Setenv(EnvVarKubernetesServicePort, testConf.ServicePort); err != nil { + t.Fatal(err) + } + + client, err := New(hclog.Default()) + if err != nil { + t.Fatal(err) + } + e := &env{ + client: client, + testState: testState, + } + e.TestGetPod(t) + e.TestGetPodNotFound(t) + e.TestUpdatePodTags(t) + e.TestUpdatePodTagsNotFound(t) +} + +type env struct { + client *Client + testState *kubetest.State +} + +func (e *env) TestGetPod(t *testing.T) { + pod, err := e.client.GetPod(kubetest.ExpectedNamespace, kubetest.ExpectedPodName) + if err != nil { + t.Fatal(err) + } + if pod.Metadata.Name != "shell-demo" { + t.Fatalf("expected %q but received %q", "shell-demo", pod.Metadata.Name) + } +} + +func (e *env) TestGetPodNotFound(t *testing.T) { + _, err := e.client.GetPod(kubetest.ExpectedNamespace, "no-exist") + if err == nil { + t.Fatal("expected error because pod is unfound") + } + if wrapped := errors.Unwrap(err); wrapped != nil { + err = wrapped + } + if _, ok := err.(*ErrNotFound); !ok { + t.Fatalf("expected *ErrNotFound but received %T (%s)", err, err) + } +} + +func (e *env) TestUpdatePodTags(t *testing.T) { + if err := e.client.PatchPod(kubetest.ExpectedNamespace, kubetest.ExpectedPodName, &Patch{ + Operation: Add, + Path: "/metadata/labels/fizz", + Value: "buzz", + }); err != nil { + t.Fatal(err) + } + if e.testState.NumPatches() != 1 { + t.Fatalf("expected 1 label but received %+v", e.testState) + } + if e.testState.Get("/metadata/labels/fizz")["value"] != "buzz" { + t.Fatalf("expected buzz but received %q", e.testState.Get("fizz")["value"]) + } + if e.testState.Get("/metadata/labels/fizz")["op"] != "add" { + t.Fatalf("expected add but received %q", e.testState.Get("fizz")["op"]) + } +} + +func (e *env) TestUpdatePodTagsNotFound(t *testing.T) { + err := e.client.PatchPod(kubetest.ExpectedNamespace, "no-exist", &Patch{ + Operation: Add, + Path: "/metadata/labels/fizz", + Value: "buzz", + }) + if err == nil { + t.Fatal("expected error because pod is unfound") + } + if wrapped := errors.Unwrap(err); wrapped != nil { + err = wrapped + } + if _, ok := err.(*ErrNotFound); !ok { + t.Fatalf("expected *ErrNotFound but received %T", err) + } +} + +func TestSanitize(t *testing.T) { + expected := "fizz-buzz" + result := Sanitize("fizz+buzz") + if result != expected { + t.Fatalf("expected %q but received %q", expected, result) + } + + expected = "fizz_buzz" + result = Sanitize("fizz_buzz") + if result != expected { + t.Fatalf("expected %q but received %q", expected, result) + } + + expected = "fizz.buzz" + result = Sanitize("fizz.buzz") + if result != expected { + t.Fatalf("expected %q but received %q", expected, result) + } + + expected = "fizz-buzz" + result = Sanitize("fizz-buzz") + if result != expected { + t.Fatalf("expected %q but received %q", expected, result) + } + + expected = "123--fhd" + result = Sanitize("123-*fhd") + if result != expected { + t.Fatalf("expected %q but received %q", expected, result) + } + + expected = "1.4.0-beta1-ent" + result = Sanitize("1.4.0-beta1+ent") + if result != expected { + t.Fatalf("expected %q but received %q", expected, result) + } +} diff --git a/serviceregistration/kubernetes/client/cmd/kubeclient/main.go b/serviceregistration/kubernetes/client/cmd/kubeclient/main.go new file mode 100644 index 0000000..7060a06 --- /dev/null +++ b/serviceregistration/kubernetes/client/cmd/kubeclient/main.go @@ -0,0 +1,113 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +// This code builds a minimal binary of the lightweight kubernetes +// client and exposes it for manual testing. +// The intention is that the binary can be built and dropped into +// a Kube environment like this: +// https://kubernetes.io/docs/tasks/debug-application-cluster/get-shell-running-container/ +// Then, commands can be run to test its API calls. +// The above commands are intended to be run inside an instance of +// minikube that has been started. +// After building this binary, place it in the container like this: +// $ kubectl cp kubeclient /shell-demo:/ +// At first you may get 403's, which can be resolved using this: +// https://github.com/fabric8io/fabric8/issues/6840#issuecomment-307560275 +// +// Example calls: +// ./kubeclient -call='get-pod' -namespace='default' -pod-name='shell-demo' +// ./kubeclient -call='patch-pod' -namespace='default' -pod-name='shell-demo' -patches='/metadata/labels/fizz:buzz,/metadata/labels/foo:bar' + +import ( + "encoding/json" + "flag" + "fmt" + "os" + "os/signal" + "strings" + "syscall" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/serviceregistration/kubernetes/client" +) + +var ( + callToMake string + patchesToAdd string + namespace string + podName string +) + +func init() { + flag.StringVar(&callToMake, "call", "", `the call to make: 'get-pod' or 'patch-pod'`) + flag.StringVar(&patchesToAdd, "patches", "", `if call is "patch-pod", the patches to do like so: "/metadata/labels/fizz:buzz,/metadata/labels/foo:bar"`) + flag.StringVar(&namespace, "namespace", "", "the namespace to use") + flag.StringVar(&podName, "pod-name", "", "the pod name to use") +} + +func main() { + flag.Parse() + + c, err := client.New(hclog.Default()) + if err != nil { + panic(err) + } + + reqCh := make(chan struct{}) + shutdownCh := makeShutdownCh() + + go func() { + defer close(reqCh) + + switch callToMake { + case "get-pod": + pod, err := c.GetPod(namespace, podName) + if err != nil { + panic(err) + } + b, _ := json.Marshal(pod) + fmt.Printf("pod: %s\n", b) + return + case "patch-pod": + patchPairs := strings.Split(patchesToAdd, ",") + var patches []*client.Patch + for _, patchPair := range patchPairs { + fields := strings.Split(patchPair, ":") + if len(fields) != 2 { + panic(fmt.Errorf("unable to split %s from selectors provided of %s", fields, patchesToAdd)) + } + patches = append(patches, &client.Patch{ + Operation: client.Replace, + Path: fields[0], + Value: fields[1], + }) + } + if err := c.PatchPod(namespace, podName, patches...); err != nil { + panic(err) + } + return + default: + panic(fmt.Errorf(`unsupported call provided: %q`, callToMake)) + } + }() + + select { + case <-shutdownCh: + fmt.Println("Interrupt received, exiting...") + case <-reqCh: + } +} + +func makeShutdownCh() chan struct{} { + resultCh := make(chan struct{}) + + shutdownCh := make(chan os.Signal, 4) + signal.Notify(shutdownCh, os.Interrupt, syscall.SIGTERM) + go func() { + <-shutdownCh + close(resultCh) + }() + return resultCh +} diff --git a/serviceregistration/kubernetes/client/config.go b/serviceregistration/kubernetes/client/config.go new file mode 100644 index 0000000..be98240 --- /dev/null +++ b/serviceregistration/kubernetes/client/config.go @@ -0,0 +1,101 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package client + +import ( + "bytes" + "crypto/x509" + "io/ioutil" + "net" + "os" + + "github.com/hashicorp/vault/sdk/helper/certutil" +) + +const ( + // These environment variables aren't set by default. + // Vault may read them in if set through these environment variables. + // Example here: + // https://kubernetes.io/docs/tasks/inject-data-application/environment-variable-expose-pod-information/ + // The client itself does nothing directly with these variables, it's + // up to the caller. However, they live here so they'll be consistently + // named should the client ever be reused. + // We generally recommend preferring environmental settings over configured + // ones, allowing settings from the Downward API to override hard-coded + // ones. + EnvVarKubernetesNamespace = "VAULT_K8S_NAMESPACE" + EnvVarKubernetesPodName = "VAULT_K8S_POD_NAME" + + // The service host and port environment variables are + // set by default inside a Kubernetes environment. + EnvVarKubernetesServiceHost = "KUBERNETES_SERVICE_HOST" + EnvVarKubernetesServicePort = "KUBERNETES_SERVICE_PORT" +) + +var ( + // These are presented as variables so they can be updated + // to point at test fixtures if needed. They aren't passed + // into inClusterConfig to avoid dependency injection. + Scheme = "https://" + TokenFile = "/var/run/secrets/kubernetes.io/serviceaccount/token" + RootCAFile = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" +) + +// inClusterConfig returns a config object which uses the service account +// kubernetes gives to services. It's intended for clients that expect to be +// running inside a service running on kubernetes. It will return ErrNotInCluster +// if called from a process not running in a kubernetes environment. +// inClusterConfig is based on this: +// https://github.com/kubernetes/client-go/blob/a56922badea0f2a91771411eaa1173c9e9243908/rest/config.go#L451 +func inClusterConfig() (*Config, error) { + host, port := os.Getenv(EnvVarKubernetesServiceHost), os.Getenv(EnvVarKubernetesServicePort) + if len(host) == 0 || len(port) == 0 { + return nil, ErrNotInCluster + } + + token, err := ioutil.ReadFile(TokenFile) + if err != nil { + return nil, err + } + + caBytes, err := ioutil.ReadFile(RootCAFile) + if err != nil { + return nil, err + } + pool, err := certutil.NewCertPool(bytes.NewReader(caBytes)) + if err != nil { + return nil, err + } + return &Config{ + Host: Scheme + net.JoinHostPort(host, port), + CACertPool: pool, + BearerToken: string(token), + BearerTokenFile: TokenFile, + }, nil +} + +// This config is based on the one returned here: +// https://github.com/kubernetes/client-go/blob/a56922badea0f2a91771411eaa1173c9e9243908/rest/config.go#L451 +// It is pared down to the absolute minimum fields used by this code. +// The CACertPool is promoted to the top level from being originally on the TLSClientConfig +// because it is the only parameter of the TLSClientConfig used by this code. +// Also, it made more sense to simply reuse the pool rather than holding raw values +// and parsing it repeatedly. +type Config struct { + CACertPool *x509.CertPool + + // Host must be a host string, a host:port pair, or a URL to the base of the apiserver. + // If a URL is given then the (optional) Path of that URL represents a prefix that must + // be appended to all request URIs used to access the apiserver. This allows a frontend + // proxy to easily relocate all of the apiserver endpoints. + Host string + + // Server requires Bearer authentication. This client will not attempt to use + // refresh tokens for an OAuth2 flow. + BearerToken string + + // Path to a file containing a BearerToken. + // If set, checks for a new token in the case of authorization errors. + BearerTokenFile string +} diff --git a/serviceregistration/kubernetes/retry_handler.go b/serviceregistration/kubernetes/retry_handler.go new file mode 100644 index 0000000..46ac18e --- /dev/null +++ b/serviceregistration/kubernetes/retry_handler.go @@ -0,0 +1,259 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package kubernetes + +import ( + "fmt" + "strconv" + "sync" + "time" + + "github.com/hashicorp/go-hclog" + sr "github.com/hashicorp/vault/serviceregistration" + "github.com/hashicorp/vault/serviceregistration/kubernetes/client" + "github.com/oklog/run" +) + +// How often to retry sending a state update if it fails. +var retryFreq = 5 * time.Second + +// retryHandler executes retries. +// It is thread-safe. +type retryHandler struct { + // These don't need a mutex because they're never mutated. + logger hclog.Logger + namespace, podName string + + // To synchronize setInitialState and patchesToRetry. + lock sync.Mutex + + // initialStateSet determines whether an initial state has been set + // successfully or whether a state already exists. + initialStateSet bool + + // State stores an initial state to be set + initialState sr.State + + // The map holds the path to the label being updated. It will only either + // not hold a particular label, or hold _the last_ state we were aware of. + // These should only be updated after initial state has been set. + patchesToRetry map[string]*client.Patch + + // client is the Client to use when making API calls against kubernetes + client *client.Client +} + +// Run must be called for retries to be started. +func (r *retryHandler) Run(shutdownCh <-chan struct{}, wait *sync.WaitGroup) { + r.setInitialState(shutdownCh) + + // Run this in a go func so this call doesn't block. + wait.Add(1) + go func() { + // Make sure Vault will give us time to finish up here. + defer wait.Done() + + var g run.Group + + // This run group watches for the shutdownCh + shutdownActorStop := make(chan struct{}) + g.Add(func() error { + select { + case <-shutdownCh: + case <-shutdownActorStop: + } + return nil + }, func(error) { + close(shutdownActorStop) + }) + + checkUpdateStateStop := make(chan struct{}) + g.Add(func() error { + r.periodicUpdateState(checkUpdateStateStop) + return nil + }, func(error) { + close(checkUpdateStateStop) + r.client.Shutdown() + }) + + if err := g.Run(); err != nil { + r.logger.Error("error encountered during periodic state update", "error", err) + } + }() +} + +func (r *retryHandler) setInitialState(shutdownCh <-chan struct{}) { + r.lock.Lock() + defer r.lock.Unlock() + + doneCh := make(chan struct{}) + + go func() { + if err := r.setInitialStateInternal(); err != nil { + if r.logger.IsWarn() { + r.logger.Warn(fmt.Sprintf("unable to set initial state due to %s, will retry", err.Error())) + } + } + close(doneCh) + }() + + // Wait until the state is set or shutdown happens + select { + case <-doneCh: + case <-shutdownCh: + } +} + +// Notify adds a patch to be retried until it's either completed without +// error, or no longer needed. +func (r *retryHandler) Notify(patch *client.Patch) { + r.lock.Lock() + defer r.lock.Unlock() + + // Initial state must be set first, or subsequent notifications we've + // received could get smashed by a late-arriving initial state. + // We will store this to retry it when appropriate. + if !r.initialStateSet { + if r.logger.IsWarn() { + r.logger.Warn(fmt.Sprintf("cannot notify of present state for %s because initial state is unset", patch.Path)) + } + r.patchesToRetry[patch.Path] = patch + return + } + + // Initial state has been sent, so it's OK to attempt a patch immediately. + if err := r.client.PatchPod(r.namespace, r.podName, patch); err != nil { + if r.logger.IsWarn() { + r.logger.Warn(fmt.Sprintf("unable to update state for %s due to %s, will retry", patch.Path, err.Error())) + } + r.patchesToRetry[patch.Path] = patch + } +} + +// setInitialStateInternal sets the initial state remotely. This should be +// called with the lock held. +func (r *retryHandler) setInitialStateInternal() error { + // If this is set, we return immediately + if r.initialStateSet { + return nil + } + + // Verify that the pod exists and our configuration looks good. + pod, err := r.client.GetPod(r.namespace, r.podName) + if err != nil { + return err + } + + // Now to initially label our pod. + if pod.Metadata == nil { + // This should never happen IRL, just being defensive. + return fmt.Errorf("no pod metadata on %+v", pod) + } + if pod.Metadata.Labels == nil { + // Notify the labels field, and the labels as part of that one call. + // The reason we must take a different approach to adding them is discussed here: + // https://stackoverflow.com/questions/57480205/error-while-applying-json-patch-to-kubernetes-custom-resource + if err := r.client.PatchPod(r.namespace, r.podName, &client.Patch{ + Operation: client.Add, + Path: "/metadata/labels", + Value: map[string]string{ + labelVaultVersion: r.initialState.VaultVersion, + labelActive: strconv.FormatBool(r.initialState.IsActive), + labelSealed: strconv.FormatBool(r.initialState.IsSealed), + labelPerfStandby: strconv.FormatBool(r.initialState.IsPerformanceStandby), + labelInitialized: strconv.FormatBool(r.initialState.IsInitialized), + }, + }); err != nil { + return err + } + } else { + // Create the labels through a patch to each individual field. + patches := []*client.Patch{ + { + Operation: client.Replace, + Path: pathToLabels + labelVaultVersion, + Value: r.initialState.VaultVersion, + }, + { + Operation: client.Replace, + Path: pathToLabels + labelActive, + Value: strconv.FormatBool(r.initialState.IsActive), + }, + { + Operation: client.Replace, + Path: pathToLabels + labelSealed, + Value: strconv.FormatBool(r.initialState.IsSealed), + }, + { + Operation: client.Replace, + Path: pathToLabels + labelPerfStandby, + Value: strconv.FormatBool(r.initialState.IsPerformanceStandby), + }, + { + Operation: client.Replace, + Path: pathToLabels + labelInitialized, + Value: strconv.FormatBool(r.initialState.IsInitialized), + }, + } + if err := r.client.PatchPod(r.namespace, r.podName, patches...); err != nil { + return err + } + } + r.initialStateSet = true + return nil +} + +func (r *retryHandler) periodicUpdateState(stopCh chan struct{}) { + retry := time.NewTicker(retryFreq) + defer retry.Stop() + + for { + // Call updateState immediately so we don't wait for the first tick + // if setting the initial state + r.updateState() + + select { + case <-stopCh: + return + case <-retry.C: + } + } +} + +func (r *retryHandler) updateState() { + r.lock.Lock() + defer r.lock.Unlock() + + // Initial state must be set first, or subsequent notifications we've + // received could get smashed by a late-arriving initial state. + // If the state is already set, this is a no-op. + if err := r.setInitialStateInternal(); err != nil { + if r.logger.IsWarn() { + r.logger.Warn(fmt.Sprintf("unable to set initial state due to %s, will retry", err.Error())) + } + // On failure, we leave the initial state func populated for + // the next retry. + return + } + + if len(r.patchesToRetry) == 0 { + // Nothing further to do here. + return + } + + patches := make([]*client.Patch, len(r.patchesToRetry)) + i := 0 + for _, patch := range r.patchesToRetry { + patches[i] = patch + i++ + } + + if err := r.client.PatchPod(r.namespace, r.podName, patches...); err != nil { + if r.logger.IsWarn() { + r.logger.Warn(fmt.Sprintf("unable to update state for due to %s, will retry", err.Error())) + } + return + } + r.patchesToRetry = make(map[string]*client.Patch) +} diff --git a/serviceregistration/kubernetes/retry_handler_test.go b/serviceregistration/kubernetes/retry_handler_test.go new file mode 100644 index 0000000..0dd6111 --- /dev/null +++ b/serviceregistration/kubernetes/retry_handler_test.go @@ -0,0 +1,456 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package kubernetes + +import ( + "os" + "sync" + "testing" + "time" + + "github.com/hashicorp/go-hclog" + sr "github.com/hashicorp/vault/serviceregistration" + "github.com/hashicorp/vault/serviceregistration/kubernetes/client" + kubetest "github.com/hashicorp/vault/serviceregistration/kubernetes/testing" +) + +func TestRetryHandlerSimple(t *testing.T) { + if testing.Short() { + t.Skip("skipping because this test takes 10-15 seconds") + } + + testState, testConf, closeFunc := kubetest.Server(t) + defer closeFunc() + + client.Scheme = testConf.ClientScheme + client.TokenFile = testConf.PathToTokenFile + client.RootCAFile = testConf.PathToRootCAFile + if err := os.Setenv(client.EnvVarKubernetesServiceHost, testConf.ServiceHost); err != nil { + t.Fatal(err) + } + if err := os.Setenv(client.EnvVarKubernetesServicePort, testConf.ServicePort); err != nil { + t.Fatal(err) + } + + logger := hclog.NewNullLogger() + shutdownCh := make(chan struct{}) + wait := &sync.WaitGroup{} + + c, err := client.New(logger) + if err != nil { + t.Fatal(err) + } + + r := &retryHandler{ + logger: logger, + namespace: kubetest.ExpectedNamespace, + podName: kubetest.ExpectedPodName, + patchesToRetry: make(map[string]*client.Patch), + client: c, + initialState: sr.State{}, + } + r.Run(shutdownCh, wait) + + // Initial number of patches upon Run from setting the initial state + initStatePatches := testState.NumPatches() + if initStatePatches == 0 { + t.Fatalf("expected number of states patches after initial patches to be non-zero") + } + + // Send a new patch + testPatch := &client.Patch{ + Operation: client.Add, + Path: "patch-path", + Value: "true", + } + r.Notify(testPatch) + + // Wait ample until the next try should have occurred. + <-time.NewTimer(retryFreq * 2).C + + if testState.NumPatches() != initStatePatches+1 { + t.Fatalf("expected 1 patch, got: %d", testState.NumPatches()) + } +} + +func TestRetryHandlerAdd(t *testing.T) { + _, testConf, closeFunc := kubetest.Server(t) + defer closeFunc() + + client.Scheme = testConf.ClientScheme + client.TokenFile = testConf.PathToTokenFile + client.RootCAFile = testConf.PathToRootCAFile + if err := os.Setenv(client.EnvVarKubernetesServiceHost, testConf.ServiceHost); err != nil { + t.Fatal(err) + } + if err := os.Setenv(client.EnvVarKubernetesServicePort, testConf.ServicePort); err != nil { + t.Fatal(err) + } + + logger := hclog.NewNullLogger() + c, err := client.New(logger) + if err != nil { + t.Fatal(err) + } + + r := &retryHandler{ + logger: hclog.NewNullLogger(), + namespace: "some-namespace", + podName: "some-pod-name", + patchesToRetry: make(map[string]*client.Patch), + client: c, + } + + testPatch1 := &client.Patch{ + Operation: client.Add, + Path: "one", + Value: "true", + } + testPatch2 := &client.Patch{ + Operation: client.Add, + Path: "two", + Value: "true", + } + testPatch3 := &client.Patch{ + Operation: client.Add, + Path: "three", + Value: "true", + } + testPatch4 := &client.Patch{ + Operation: client.Add, + Path: "four", + Value: "true", + } + + // Should be able to add all 4 patches. + r.Notify(testPatch1) + if len(r.patchesToRetry) != 1 { + t.Fatal("expected 1 patch") + } + + r.Notify(testPatch2) + if len(r.patchesToRetry) != 2 { + t.Fatal("expected 2 patches") + } + + r.Notify(testPatch3) + if len(r.patchesToRetry) != 3 { + t.Fatal("expected 3 patches") + } + + r.Notify(testPatch4) + if len(r.patchesToRetry) != 4 { + t.Fatal("expected 4 patches") + } + + // Adding a dupe should result in no change. + r.Notify(testPatch4) + if len(r.patchesToRetry) != 4 { + t.Fatal("expected 4 patches") + } + + // Adding a reversion should result in its twin being subtracted. + r.Notify(&client.Patch{ + Operation: client.Add, + Path: "four", + Value: "false", + }) + if len(r.patchesToRetry) != 4 { + t.Fatal("expected 4 patches") + } + + r.Notify(&client.Patch{ + Operation: client.Add, + Path: "three", + Value: "false", + }) + if len(r.patchesToRetry) != 4 { + t.Fatal("expected 4 patches") + } + + r.Notify(&client.Patch{ + Operation: client.Add, + Path: "two", + Value: "false", + }) + if len(r.patchesToRetry) != 4 { + t.Fatal("expected 4 patches") + } + + r.Notify(&client.Patch{ + Operation: client.Add, + Path: "one", + Value: "false", + }) + if len(r.patchesToRetry) != 4 { + t.Fatal("expected 4 patches") + } +} + +// This is meant to be run with the -race flag on. +func TestRetryHandlerRacesAndDeadlocks(t *testing.T) { + _, testConf, closeFunc := kubetest.Server(t) + defer closeFunc() + + client.Scheme = testConf.ClientScheme + client.TokenFile = testConf.PathToTokenFile + client.RootCAFile = testConf.PathToRootCAFile + if err := os.Setenv(client.EnvVarKubernetesServiceHost, testConf.ServiceHost); err != nil { + t.Fatal(err) + } + if err := os.Setenv(client.EnvVarKubernetesServicePort, testConf.ServicePort); err != nil { + t.Fatal(err) + } + + logger := hclog.NewNullLogger() + shutdownCh := make(chan struct{}) + wait := &sync.WaitGroup{} + testPatch := &client.Patch{ + Operation: client.Add, + Path: "patch-path", + Value: "true", + } + + c, err := client.New(logger) + if err != nil { + t.Fatal(err) + } + + r := &retryHandler{ + logger: logger, + namespace: kubetest.ExpectedNamespace, + podName: kubetest.ExpectedPodName, + patchesToRetry: make(map[string]*client.Patch), + initialState: sr.State{}, + client: c, + } + + // Now hit it as quickly as possible to see if we can produce + // races or deadlocks. + start := make(chan struct{}) + done := make(chan bool) + numRoutines := 100 + for i := 0; i < numRoutines; i++ { + go func() { + <-start + r.Notify(testPatch) + done <- true + }() + go func() { + <-start + r.Run(shutdownCh, wait) + done <- true + }() + } + close(start) + + // Allow up to 5 seconds for everything to finish. + timer := time.NewTimer(5 * time.Second) + for i := 0; i < numRoutines*2; i++ { + select { + case <-timer.C: + t.Fatal("test took too long to complete, check for deadlock") + case <-done: + } + } +} + +// In this test, the API server sends bad responses for 5 seconds, +// then sends good responses, and we make sure we get the expected behavior. +func TestRetryHandlerAPIConnectivityProblemsInitialState(t *testing.T) { + if testing.Short() { + t.Skip() + } + + testState, testConf, closeFunc := kubetest.Server(t) + defer closeFunc() + kubetest.ReturnGatewayTimeouts.Store(true) + + client.Scheme = testConf.ClientScheme + client.TokenFile = testConf.PathToTokenFile + client.RootCAFile = testConf.PathToRootCAFile + client.RetryMax = 0 + if err := os.Setenv(client.EnvVarKubernetesServiceHost, testConf.ServiceHost); err != nil { + t.Fatal(err) + } + if err := os.Setenv(client.EnvVarKubernetesServicePort, testConf.ServicePort); err != nil { + t.Fatal(err) + } + + shutdownCh := make(chan struct{}) + wait := &sync.WaitGroup{} + reg, err := NewServiceRegistration(map[string]string{ + "namespace": kubetest.ExpectedNamespace, + "pod_name": kubetest.ExpectedPodName, + }, hclog.NewNullLogger(), sr.State{ + VaultVersion: "vault-version", + IsInitialized: true, + IsSealed: true, + IsActive: true, + IsPerformanceStandby: true, + }) + if err != nil { + t.Fatal(err) + } + if err := reg.Run(shutdownCh, wait, ""); err != nil { + t.Fatal(err) + } + + // At this point, since the initial state can't be set, + // remotely we should have false for all these labels. + patch := testState.Get(pathToLabels + labelVaultVersion) + if patch != nil { + t.Fatal("expected no value") + } + patch = testState.Get(pathToLabels + labelActive) + if patch != nil { + t.Fatal("expected no value") + } + patch = testState.Get(pathToLabels + labelSealed) + if patch != nil { + t.Fatal("expected no value") + } + patch = testState.Get(pathToLabels + labelPerfStandby) + if patch != nil { + t.Fatal("expected no value") + } + patch = testState.Get(pathToLabels + labelInitialized) + if patch != nil { + t.Fatal("expected no value") + } + + kubetest.ReturnGatewayTimeouts.Store(false) + + // Now we need to wait to give the retry handler + // a chance to update these values. + time.Sleep(retryFreq + time.Second) + val := testState.Get(pathToLabels + labelVaultVersion)["value"] + if val != "vault-version" { + t.Fatal("expected vault-version") + } + val = testState.Get(pathToLabels + labelActive)["value"] + if val != "true" { + t.Fatal("expected true") + } + val = testState.Get(pathToLabels + labelSealed)["value"] + if val != "true" { + t.Fatal("expected true") + } + val = testState.Get(pathToLabels + labelPerfStandby)["value"] + if val != "true" { + t.Fatal("expected true") + } + val = testState.Get(pathToLabels + labelInitialized)["value"] + if val != "true" { + t.Fatal("expected true") + } +} + +// In this test, the API server sends bad responses for 5 seconds, +// then sends good responses, and we make sure we get the expected behavior. +func TestRetryHandlerAPIConnectivityProblemsNotifications(t *testing.T) { + if testing.Short() { + t.Skip() + } + + testState, testConf, closeFunc := kubetest.Server(t) + defer closeFunc() + kubetest.ReturnGatewayTimeouts.Store(true) + + client.Scheme = testConf.ClientScheme + client.TokenFile = testConf.PathToTokenFile + client.RootCAFile = testConf.PathToRootCAFile + client.RetryMax = 0 + if err := os.Setenv(client.EnvVarKubernetesServiceHost, testConf.ServiceHost); err != nil { + t.Fatal(err) + } + if err := os.Setenv(client.EnvVarKubernetesServicePort, testConf.ServicePort); err != nil { + t.Fatal(err) + } + + shutdownCh := make(chan struct{}) + wait := &sync.WaitGroup{} + reg, err := NewServiceRegistration(map[string]string{ + "namespace": kubetest.ExpectedNamespace, + "pod_name": kubetest.ExpectedPodName, + }, hclog.NewNullLogger(), sr.State{ + VaultVersion: "vault-version", + IsInitialized: false, + IsSealed: false, + IsActive: false, + IsPerformanceStandby: false, + }) + if err != nil { + t.Fatal(err) + } + + if err := reg.NotifyActiveStateChange(true); err != nil { + t.Fatal(err) + } + if err := reg.NotifyInitializedStateChange(true); err != nil { + t.Fatal(err) + } + if err := reg.NotifyPerformanceStandbyStateChange(true); err != nil { + t.Fatal(err) + } + if err := reg.NotifySealedStateChange(true); err != nil { + t.Fatal(err) + } + + if err := reg.Run(shutdownCh, wait, ""); err != nil { + t.Fatal(err) + } + + // At this point, since the initial state can't be set, + // remotely we should have false for all these labels. + patch := testState.Get(pathToLabels + labelVaultVersion) + if patch != nil { + t.Fatal("expected no value") + } + patch = testState.Get(pathToLabels + labelActive) + if patch != nil { + t.Fatal("expected no value") + } + patch = testState.Get(pathToLabels + labelSealed) + if patch != nil { + t.Fatal("expected no value") + } + patch = testState.Get(pathToLabels + labelPerfStandby) + if patch != nil { + t.Fatal("expected no value") + } + patch = testState.Get(pathToLabels + labelInitialized) + if patch != nil { + t.Fatal("expected no value") + } + + kubetest.ReturnGatewayTimeouts.Store(false) + + // Now we need to wait to give the retry handler + // a chance to update these values. + time.Sleep(retryFreq + time.Second) + + // They should be "true" if the Notifications were set after the + // initial state. + val := testState.Get(pathToLabels + labelVaultVersion)["value"] + if val != "vault-version" { + t.Fatal("expected vault-version") + } + val = testState.Get(pathToLabels + labelActive)["value"] + if val != "true" { + t.Fatal("expected true") + } + val = testState.Get(pathToLabels + labelSealed)["value"] + if val != "true" { + t.Fatal("expected true") + } + val = testState.Get(pathToLabels + labelPerfStandby)["value"] + if val != "true" { + t.Fatal("expected true") + } + val = testState.Get(pathToLabels + labelInitialized)["value"] + if val != "true" { + t.Fatal("expected true") + } +} diff --git a/serviceregistration/kubernetes/service_registration.go b/serviceregistration/kubernetes/service_registration.go new file mode 100644 index 0000000..f377cbb --- /dev/null +++ b/serviceregistration/kubernetes/service_registration.go @@ -0,0 +1,123 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package kubernetes + +import ( + "fmt" + "os" + "strconv" + "sync" + + "github.com/hashicorp/go-hclog" + sr "github.com/hashicorp/vault/serviceregistration" + "github.com/hashicorp/vault/serviceregistration/kubernetes/client" +) + +const ( + // Labels are placed in a pod's metadata. + labelVaultVersion = "vault-version" + labelActive = "vault-active" + labelSealed = "vault-sealed" + labelPerfStandby = "vault-perf-standby" + labelInitialized = "vault-initialized" + + // This is the path to where these labels are applied. + pathToLabels = "/metadata/labels/" +) + +func NewServiceRegistration(config map[string]string, logger hclog.Logger, state sr.State) (sr.ServiceRegistration, error) { + namespace, err := getRequiredField(logger, config, client.EnvVarKubernetesNamespace, "namespace") + if err != nil { + return nil, err + } + podName, err := getRequiredField(logger, config, client.EnvVarKubernetesPodName, "pod_name") + if err != nil { + return nil, err + } + + c, err := client.New(logger) + if err != nil { + return nil, err + } + + // The Vault version must be sanitized because it can contain special + // characters like "+" which aren't acceptable by the Kube API. + state.VaultVersion = client.Sanitize(state.VaultVersion) + return &serviceRegistration{ + logger: logger, + namespace: namespace, + podName: podName, + retryHandler: &retryHandler{ + logger: logger, + namespace: namespace, + podName: podName, + initialState: state, + patchesToRetry: make(map[string]*client.Patch), + client: c, + }, + }, nil +} + +type serviceRegistration struct { + logger hclog.Logger + namespace, podName string + retryHandler *retryHandler +} + +func (r *serviceRegistration) Run(shutdownCh <-chan struct{}, wait *sync.WaitGroup, _ string) error { + r.retryHandler.Run(shutdownCh, wait) + return nil +} + +func (r *serviceRegistration) NotifyActiveStateChange(isActive bool) error { + r.retryHandler.Notify(&client.Patch{ + Operation: client.Replace, + Path: pathToLabels + labelActive, + Value: strconv.FormatBool(isActive), + }) + return nil +} + +func (r *serviceRegistration) NotifySealedStateChange(isSealed bool) error { + r.retryHandler.Notify(&client.Patch{ + Operation: client.Replace, + Path: pathToLabels + labelSealed, + Value: strconv.FormatBool(isSealed), + }) + return nil +} + +func (r *serviceRegistration) NotifyPerformanceStandbyStateChange(isStandby bool) error { + r.retryHandler.Notify(&client.Patch{ + Operation: client.Replace, + Path: pathToLabels + labelPerfStandby, + Value: strconv.FormatBool(isStandby), + }) + return nil +} + +func (r *serviceRegistration) NotifyInitializedStateChange(isInitialized bool) error { + r.retryHandler.Notify(&client.Patch{ + Operation: client.Replace, + Path: pathToLabels + labelInitialized, + Value: strconv.FormatBool(isInitialized), + }) + return nil +} + +func getRequiredField(logger hclog.Logger, config map[string]string, envVar, configParam string) (string, error) { + value := "" + switch { + case os.Getenv(envVar) != "": + value = os.Getenv(envVar) + case config[configParam] != "": + value = config[configParam] + default: + return "", fmt.Errorf(`%s must be provided via %q or the %q config parameter`, configParam, envVar, configParam) + } + if logger.IsDebug() { + logger.Debug(fmt.Sprintf("%q: %q", configParam, value)) + } + return value, nil +} diff --git a/serviceregistration/kubernetes/service_registration_test.go b/serviceregistration/kubernetes/service_registration_test.go new file mode 100644 index 0000000..a6a93c9 --- /dev/null +++ b/serviceregistration/kubernetes/service_registration_test.go @@ -0,0 +1,133 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package kubernetes + +import ( + "os" + "strconv" + "sync" + "testing" + + "github.com/hashicorp/go-hclog" + sr "github.com/hashicorp/vault/serviceregistration" + "github.com/hashicorp/vault/serviceregistration/kubernetes/client" + kubetest "github.com/hashicorp/vault/serviceregistration/kubernetes/testing" +) + +var testVersion = "version1" + +func TestServiceRegistration(t *testing.T) { + testState, testConf, closeFunc := kubetest.Server(t) + defer closeFunc() + + client.Scheme = testConf.ClientScheme + client.TokenFile = testConf.PathToTokenFile + client.RootCAFile = testConf.PathToRootCAFile + if err := os.Setenv(client.EnvVarKubernetesServiceHost, testConf.ServiceHost); err != nil { + t.Fatal(err) + } + if err := os.Setenv(client.EnvVarKubernetesServicePort, testConf.ServicePort); err != nil { + t.Fatal(err) + } + + if testState.NumPatches() != 0 { + t.Fatalf("expected 0 patches but have %d: %+v", testState.NumPatches(), testState) + } + shutdownCh := make(chan struct{}) + config := map[string]string{ + "namespace": kubetest.ExpectedNamespace, + "pod_name": kubetest.ExpectedPodName, + } + logger := hclog.NewNullLogger() + state := sr.State{ + VaultVersion: testVersion, + IsInitialized: true, + IsSealed: true, + IsActive: true, + IsPerformanceStandby: true, + } + reg, err := NewServiceRegistration(config, logger, state) + if err != nil { + t.Fatal(err) + } + if err := reg.Run(shutdownCh, &sync.WaitGroup{}, ""); err != nil { + t.Fatal(err) + } + + // Test initial state. + if testState.NumPatches() != 5 { + t.Fatalf("expected 5 current labels but have %d: %+v", testState.NumPatches(), testState) + } + if testState.Get(pathToLabels + labelVaultVersion)["value"] != testVersion { + t.Fatalf("expected %q but received %q", testVersion, testState.Get(pathToLabels + labelVaultVersion)["value"]) + } + if testState.Get(pathToLabels + labelActive)["value"] != strconv.FormatBool(true) { + t.Fatalf("expected %q but received %q", strconv.FormatBool(true), testState.Get(pathToLabels + labelActive)["value"]) + } + if testState.Get(pathToLabels + labelSealed)["value"] != strconv.FormatBool(true) { + t.Fatalf("expected %q but received %q", strconv.FormatBool(true), testState.Get(pathToLabels + labelSealed)["value"]) + } + if testState.Get(pathToLabels + labelPerfStandby)["value"] != strconv.FormatBool(true) { + t.Fatalf("expected %q but received %q", strconv.FormatBool(true), testState.Get(pathToLabels + labelPerfStandby)["value"]) + } + if testState.Get(pathToLabels + labelInitialized)["value"] != strconv.FormatBool(true) { + t.Fatalf("expected %q but received %q", strconv.FormatBool(true), testState.Get(pathToLabels + labelInitialized)["value"]) + } + + // Test NotifyActiveStateChange. + if err := reg.NotifyActiveStateChange(false); err != nil { + t.Fatal(err) + } + if testState.Get(pathToLabels + labelActive)["value"] != strconv.FormatBool(false) { + t.Fatalf("expected %q but received %q", strconv.FormatBool(false), testState.Get(pathToLabels + labelActive)["value"]) + } + if err := reg.NotifyActiveStateChange(true); err != nil { + t.Fatal(err) + } + if testState.Get(pathToLabels + labelActive)["value"] != strconv.FormatBool(true) { + t.Fatalf("expected %q but received %q", strconv.FormatBool(true), testState.Get(pathToLabels + labelActive)["value"]) + } + + // Test NotifySealedStateChange. + if err := reg.NotifySealedStateChange(false); err != nil { + t.Fatal(err) + } + if testState.Get(pathToLabels + labelSealed)["value"] != strconv.FormatBool(false) { + t.Fatalf("expected %q but received %q", strconv.FormatBool(false), testState.Get(pathToLabels + labelSealed)["value"]) + } + if err := reg.NotifySealedStateChange(true); err != nil { + t.Fatal(err) + } + if testState.Get(pathToLabels + labelSealed)["value"] != strconv.FormatBool(true) { + t.Fatalf("expected %q but received %q", strconv.FormatBool(true), testState.Get(pathToLabels + labelSealed)["value"]) + } + + // Test NotifyPerformanceStandbyStateChange. + if err := reg.NotifyPerformanceStandbyStateChange(false); err != nil { + t.Fatal(err) + } + if testState.Get(pathToLabels + labelPerfStandby)["value"] != strconv.FormatBool(false) { + t.Fatalf("expected %q but received %q", strconv.FormatBool(false), testState.Get(pathToLabels + labelPerfStandby)["value"]) + } + if err := reg.NotifyPerformanceStandbyStateChange(true); err != nil { + t.Fatal(err) + } + if testState.Get(pathToLabels + labelPerfStandby)["value"] != strconv.FormatBool(true) { + t.Fatalf("expected %q but received %q", strconv.FormatBool(true), testState.Get(pathToLabels + labelPerfStandby)["value"]) + } + + // Test NotifyInitializedStateChange. + if err := reg.NotifyInitializedStateChange(false); err != nil { + t.Fatal(err) + } + if testState.Get(pathToLabels + labelInitialized)["value"] != strconv.FormatBool(false) { + t.Fatalf("expected %q but received %q", strconv.FormatBool(false), testState.Get(pathToLabels + labelInitialized)["value"]) + } + if err := reg.NotifyInitializedStateChange(true); err != nil { + t.Fatal(err) + } + if testState.Get(pathToLabels + labelInitialized)["value"] != strconv.FormatBool(true) { + t.Fatalf("expected %q but received %q", strconv.FormatBool(true), testState.Get(pathToLabels + labelInitialized)["value"]) + } +} diff --git a/serviceregistration/kubernetes/testing/README.md b/serviceregistration/kubernetes/testing/README.md new file mode 100644 index 0000000..940415b --- /dev/null +++ b/serviceregistration/kubernetes/testing/README.md @@ -0,0 +1,54 @@ +# How to Test Manually + +- `$ minikube start` +- In the Vault folder, `$ make dev XC_ARCH=amd64 XC_OS=linux XC_OSARCH=linux/amd64` +- Create a file called `vault-test.yaml` with the following contents: + +``` +apiVersion: v1 +kind: Pod +metadata: + name: vault +spec: + containers: + - name: nginx + image: nginx + command: [ "sh", "-c"] + args: + - while true; do + echo -en '\n'; + printenv VAULT_K8S_POD_NAME VAULT_K8S_NAMESPACE; + sleep 10; + done; + env: + - name: VAULT_K8S_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: VAULT_K8S_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + restartPolicy: Never +``` + +- Create the pod: `$ kubectl apply -f vault-test.yaml` +- View the full initial state of the pod: `$ kubectl get pod vault -o=yaml > initialstate.txt` +- Drop the Vault binary into the pod: `$ kubectl cp bin/vault /vault:/` +- Drop to the shell within the pod: `$ kubectl exec -it vault -- /bin/bash` +- Install a text editor: `$ apt-get update`, `$ apt-get install nano` +- Write a test Vault config to `vault.config` like: + +``` +storage "inmem" {} +service_registration "kubernetes" {} +disable_mlock = true +ui = true +api_addr = "http://127.0.0.1:8200" +log_level = "debug" +``` + +- Run Vault: `$ ./vault server -config=vault.config -dev -dev-root-token-id=root` +- If 403's are received, you may need to grant RBAC, example here: https://github.com/fabric8io/fabric8/issues/6840#issuecomment-307560275 +- In a separate window outside the pod, view the resulting state of the pod: `$ kubectl get pod vault -o=yaml > currentstate.txt` +- View the differences: `$ diff initialstate.txt currentstate.txt` diff --git a/serviceregistration/kubernetes/testing/ca.crt b/serviceregistration/kubernetes/testing/ca.crt new file mode 100644 index 0000000..077f48b --- /dev/null +++ b/serviceregistration/kubernetes/testing/ca.crt @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC5zCCAc+gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p +a3ViZUNBMB4XDTE5MTIxMDIzMDUxOVoXDTI5MTIwODIzMDUxOVowFTETMBEGA1UE +AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANFi +/RIdMHd865X6JygTb9riX01DA3QnR+RoXDXNnj8D3LziLG2n8ItXMJvWbU3sxxyy +nX9HxJ0SIeexj1cYzdQBtJDjO1/PeuKc4CZ7zCukCAtHz8mC7BDPOU7F7pggpcQ0 +/t/pa2m22hmCu8aDF9WlUYHtJpYATnI/A5vz/VFLR9daxmkl59Qo3oHITj7vAzSx +/75r9cibpQyJ+FhiHOZHQWYY2JYw2g4v5hm5hg5SFM9yFcZ75ISI9ebyFFIl9iBY +zAk9jqv1mXvLr0Q39AVwMTamvGuap1oocjM9NIhQvaFL/DNqF1ouDQjCf5u2imLc +TraO1/2KO8fqwOZCOrMCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW +MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3 +DQEBCwUAA4IBAQBtVZCwCPqUUUpIClAlE9nc2fo2bTs9gsjXRmqdQ5oaSomSLE93 +aJWYFuAhxPXtlApbLYZfW2m1sM3mTVQN60y0uE4e1jdSN1ErYQ9slJdYDAMaEmOh +iSexj+Nd1scUiMHV9lf3ps5J8sYeCpwZX3sPmw7lqZojTS12pANBDcigsaj5RRyN +9GyP3WkSQUsTpWlDb9Fd+KNdkCVw7nClIpBPA2KW4BQKw/rNSvOFD61mbzc89lo0 +Q9IFGQFFF8jO18lbyWqnRBGXcS4/G7jQ3S7C121d14YLUeAYOM7pJykI1g4CLx9y +vitin0L6nprauWkKO38XgM4T75qKZpqtiOcT +-----END CERTIFICATE----- diff --git a/serviceregistration/kubernetes/testing/resp-get-pod.json b/serviceregistration/kubernetes/testing/resp-get-pod.json new file mode 100644 index 0000000..229eb1f --- /dev/null +++ b/serviceregistration/kubernetes/testing/resp-get-pod.json @@ -0,0 +1,120 @@ +{ + "kind": "Pod", + "apiVersion": "v1", + "metadata": { + "name": "shell-demo", + "labels": {"fizz": "buzz"}, + "namespace": "default", + "selfLink": "/api/v1/namespaces/default/pods/shell-demo", + "uid": "7ecb93ff-aa64-426d-b330-2c0b2c0957a2", + "resourceVersion": "87798", + "creationTimestamp": "2020-01-10T19:22:40Z", + "annotations": { + "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Pod\",\"metadata\":{\"annotations\":{},\"name\":\"shell-demo\",\"namespace\":\"default\"},\"spec\":{\"containers\":[{\"image\":\"nginx\",\"name\":\"nginx\",\"volumeMounts\":[{\"mountPath\":\"/usr/share/nginx/html\",\"name\":\"shared-data\"}]}],\"dnsPolicy\":\"Default\",\"hostNetwork\":true,\"volumes\":[{\"emptyDir\":{},\"name\":\"shared-data\"}]}}\n" + } + }, + "spec": { + "volumes": [{ + "name": "shared-data", + "emptyDir": {} + }, { + "name": "default-token-5fjt9", + "secret": { + "secretName": "default-token-5fjt9", + "defaultMode": 420 + } + }], + "containers": [{ + "name": "nginx", + "image": "nginx", + "resources": {}, + "volumeMounts": [{ + "name": "shared-data", + "mountPath": "/usr/share/nginx/html" + }, { + "name": "default-token-5fjt9", + "readOnly": true, + "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount" + }], + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "imagePullPolicy": "Always" + }], + "restartPolicy": "Always", + "terminationGracePeriodSeconds": 30, + "dnsPolicy": "Default", + "serviceAccountName": "default", + "serviceAccount": "default", + "nodeName": "minikube", + "hostNetwork": true, + "securityContext": {}, + "schedulerName": "default-scheduler", + "tolerations": [{ + "key": "node.kubernetes.io/not-ready", + "operator": "Exists", + "effect": "NoExecute", + "tolerationSeconds": 300 + }, { + "key": "node.kubernetes.io/unreachable", + "operator": "Exists", + "effect": "NoExecute", + "tolerationSeconds": 300 + }], + "priority": 0, + "enableServiceLinks": true + }, + "status": { + "phase": "Running", + "conditions": [{ + "type": "Initialized", + "status": "True", + "lastProbeTime": null, + "lastTransitionTime": "2020-01-10T19:22:40Z" + }, { + "type": "Ready", + "status": "True", + "lastProbeTime": null, + "lastTransitionTime": "2020-01-10T20:20:55Z" + }, { + "type": "ContainersReady", + "status": "True", + "lastProbeTime": null, + "lastTransitionTime": "2020-01-10T20:20:55Z" + }, { + "type": "PodScheduled", + "status": "True", + "lastProbeTime": null, + "lastTransitionTime": "2020-01-10T19:22:40Z" + }], + "hostIP": "192.168.99.100", + "podIP": "192.168.99.100", + "podIPs": [{ + "ip": "192.168.99.100" + }], + "startTime": "2020-01-10T19:22:40Z", + "containerStatuses": [{ + "name": "nginx", + "state": { + "running": { + "startedAt": "2020-01-10T20:20:55Z" + } + }, + "lastState": { + "terminated": { + "exitCode": 0, + "reason": "Completed", + "startedAt": "2020-01-10T19:22:53Z", + "finishedAt": "2020-01-10T20:12:03Z", + "containerID": "docker://ed8bc068cd313ea5adb72780e8015ab09ecb61ea077e39304b4a3fe581f471c4" + } + }, + "ready": true, + "restartCount": 1, + "image": "nginx:latest", + "imageID": "docker-pullable://nginx@sha256:8aa7f6a9585d908a63e5e418dc5d14ae7467d2e36e1ab4f0d8f9d059a3d071ce", + "containerID": "docker://a8ee34466791bc6f082f271f40cdfc43625cea81831b1029b1e90b4f6949f6df", + "started": true + }], + "qosClass": "BestEffort" + } +} diff --git a/serviceregistration/kubernetes/testing/resp-not-found.json b/serviceregistration/kubernetes/testing/resp-not-found.json new file mode 100644 index 0000000..800a962 --- /dev/null +++ b/serviceregistration/kubernetes/testing/resp-not-found.json @@ -0,0 +1,13 @@ +{ + "kind": "Status", + "apiVersion": "v1", + "metadata": {}, + "status": "Failure", + "message": "pods \"shell-dem\" not found", + "reason": "NotFound", + "details": { + "name": "shell-dem", + "kind": "pods" + }, + "code": 404 +} diff --git a/serviceregistration/kubernetes/testing/resp-update-pod.json b/serviceregistration/kubernetes/testing/resp-update-pod.json new file mode 100644 index 0000000..e808691 --- /dev/null +++ b/serviceregistration/kubernetes/testing/resp-update-pod.json @@ -0,0 +1,123 @@ +{ + "kind": "Pod", + "apiVersion": "v1", + "metadata": { + "name": "shell-demo", + "namespace": "default", + "selfLink": "/api/v1/namespaces/default/pods/shell-demo", + "uid": "7ecb93ff-aa64-426d-b330-2c0b2c0957a2", + "resourceVersion": "96433", + "creationTimestamp": "2020-01-10T19:22:40Z", + "labels": { + "fizz": "buzz", + "foo": "bar" + }, + "annotations": { + "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Pod\",\"metadata\":{\"annotations\":{},\"name\":\"shell-demo\",\"namespace\":\"default\"},\"spec\":{\"containers\":[{\"image\":\"nginx\",\"name\":\"nginx\",\"volumeMounts\":[{\"mountPath\":\"/usr/share/nginx/html\",\"name\":\"shared-data\"}]}],\"dnsPolicy\":\"Default\",\"hostNetwork\":true,\"volumes\":[{\"emptyDir\":{},\"name\":\"shared-data\"}]}}\n" + } + }, + "spec": { + "volumes": [{ + "name": "shared-data", + "emptyDir": {} + }, { + "name": "default-token-5fjt9", + "secret": { + "secretName": "default-token-5fjt9", + "defaultMode": 420 + } + }], + "containers": [{ + "name": "nginx", + "image": "nginx", + "resources": {}, + "volumeMounts": [{ + "name": "shared-data", + "mountPath": "/usr/share/nginx/html" + }, { + "name": "default-token-5fjt9", + "readOnly": true, + "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount" + }], + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "imagePullPolicy": "Always" + }], + "restartPolicy": "Always", + "terminationGracePeriodSeconds": 30, + "dnsPolicy": "Default", + "serviceAccountName": "default", + "serviceAccount": "default", + "nodeName": "minikube", + "hostNetwork": true, + "securityContext": {}, + "schedulerName": "default-scheduler", + "tolerations": [{ + "key": "node.kubernetes.io/not-ready", + "operator": "Exists", + "effect": "NoExecute", + "tolerationSeconds": 300 + }, { + "key": "node.kubernetes.io/unreachable", + "operator": "Exists", + "effect": "NoExecute", + "tolerationSeconds": 300 + }], + "priority": 0, + "enableServiceLinks": true + }, + "status": { + "phase": "Running", + "conditions": [{ + "type": "Initialized", + "status": "True", + "lastProbeTime": null, + "lastTransitionTime": "2020-01-10T19:22:40Z" + }, { + "type": "Ready", + "status": "True", + "lastProbeTime": null, + "lastTransitionTime": "2020-01-10T20:20:55Z" + }, { + "type": "ContainersReady", + "status": "True", + "lastProbeTime": null, + "lastTransitionTime": "2020-01-10T20:20:55Z" + }, { + "type": "PodScheduled", + "status": "True", + "lastProbeTime": null, + "lastTransitionTime": "2020-01-10T19:22:40Z" + }], + "hostIP": "192.168.99.100", + "podIP": "192.168.99.100", + "podIPs": [{ + "ip": "192.168.99.100" + }], + "startTime": "2020-01-10T19:22:40Z", + "containerStatuses": [{ + "name": "nginx", + "state": { + "running": { + "startedAt": "2020-01-10T20:20:55Z" + } + }, + "lastState": { + "terminated": { + "exitCode": 0, + "reason": "Completed", + "startedAt": "2020-01-10T19:22:53Z", + "finishedAt": "2020-01-10T20:12:03Z", + "containerID": "docker://ed8bc068cd313ea5adb72780e8015ab09ecb61ea077e39304b4a3fe581f471c4" + } + }, + "ready": true, + "restartCount": 1, + "image": "nginx:latest", + "imageID": "docker-pullable://nginx@sha256:8aa7f6a9585d908a63e5e418dc5d14ae7467d2e36e1ab4f0d8f9d059a3d071ce", + "containerID": "docker://a8ee34466791bc6f082f271f40cdfc43625cea81831b1029b1e90b4f6949f6df", + "started": true + }], + "qosClass": "BestEffort" + } +} diff --git a/serviceregistration/kubernetes/testing/testserver.go b/serviceregistration/kubernetes/testing/testserver.go new file mode 100644 index 0000000..6ceb940 --- /dev/null +++ b/serviceregistration/kubernetes/testing/testserver.go @@ -0,0 +1,241 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package testing + +import ( + _ "embed" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "path" + "strings" + "sync" + "testing" + + "go.uber.org/atomic" +) + +const ( + ExpectedNamespace = "default" + ExpectedPodName = "shell-demo" +) + +// Pull real-life-based testing data in from files at compile time. +// We decided to embed them in the test binary because of past issues +// with reading files that we encountered on CI workers. + +//go:embed ca.crt +var caCrt string + +//go:embed resp-get-pod.json +var getPodResponse string + +//go:embed resp-not-found.json +var notFoundResponse string + +//go:embed resp-update-pod.json +var updatePodTagsResponse string + +//go:embed token +var token string + +var ( + // ReturnGatewayTimeouts toggles whether the test server should return, + // well, gateway timeouts... + ReturnGatewayTimeouts = atomic.NewBool(false) + + pathToFiles = func() string { + wd, _ := os.Getwd() + repoName := "vault-enterprise" + if !strings.Contains(wd, repoName) { + repoName = "vault" + } + pathParts := strings.Split(wd, repoName) + return pathParts[0] + "vault/serviceregistration/kubernetes/testing/" + }() +) + +// Conf returns the info needed to configure the client to point at +// the test server. This must be done by the caller to avoid an import +// cycle between the client and the testserver. Example usage: +// +// client.Scheme = testConf.ClientScheme +// client.TokenFile = testConf.PathToTokenFile +// client.RootCAFile = testConf.PathToRootCAFile +// if err := os.Setenv(client.EnvVarKubernetesServiceHost, testConf.ServiceHost); err != nil { +// t.Fatal(err) +// } +// if err := os.Setenv(client.EnvVarKubernetesServicePort, testConf.ServicePort); err != nil { +// t.Fatal(err) +// } +type Conf struct { + ClientScheme, PathToTokenFile, PathToRootCAFile, ServiceHost, ServicePort string +} + +// Server returns an http test server that can be used to test +// Kubernetes client code. It also retains the current state, +// and a func to close the server and to clean up any temporary +// files. +func Server(t *testing.T) (testState *State, testConf *Conf, closeFunc func()) { + testState = &State{m: &sync.Map{}} + testConf = &Conf{ + ClientScheme: "http://", + } + + // We're going to have multiple close funcs to call. + var closers []func() + closeFunc = func() { + for _, closer := range closers { + closer() + } + } + + // Plant our token in a place where it can be read for the config. + tmpToken, err := ioutil.TempFile("", "token") + if err != nil { + t.Fatal(err) + } + closers = append(closers, func() { + os.Remove(tmpToken.Name()) + }) + if _, err = tmpToken.WriteString(token); err != nil { + closeFunc() + t.Fatal(err) + } + if err := tmpToken.Close(); err != nil { + closeFunc() + t.Fatal(err) + } + testConf.PathToTokenFile = tmpToken.Name() + + tmpCACrt, err := ioutil.TempFile("", "ca.crt") + if err != nil { + closeFunc() + t.Fatal(err) + } + closers = append(closers, func() { + os.Remove(tmpCACrt.Name()) + }) + if _, err = tmpCACrt.WriteString(caCrt); err != nil { + closeFunc() + t.Fatal(err) + } + if err := tmpCACrt.Close(); err != nil { + closeFunc() + t.Fatal(err) + } + testConf.PathToRootCAFile = tmpCACrt.Name() + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if ReturnGatewayTimeouts.Load() { + w.WriteHeader(504) + return + } + namespace, podName, err := parsePath(r.URL.Path) + if err != nil { + w.WriteHeader(400) + w.Write([]byte(fmt.Sprintf("unable to parse %s: %s", r.URL.Path, err.Error()))) + return + } + + switch { + case namespace != ExpectedNamespace, podName != ExpectedPodName: + w.WriteHeader(404) + w.Write([]byte(notFoundResponse)) + return + case r.Method == http.MethodGet: + w.WriteHeader(200) + w.Write([]byte(getPodResponse)) + return + case r.Method == http.MethodPatch: + var patches []interface{} + if err := json.NewDecoder(r.Body).Decode(&patches); err != nil { + w.WriteHeader(400) + w.Write([]byte(fmt.Sprintf("unable to decode patches %s: %s", r.URL.Path, err.Error()))) + return + } + for _, patch := range patches { + patchMap := patch.(map[string]interface{}) + p := patchMap["path"].(string) + testState.store(p, patchMap) + } + w.WriteHeader(200) + w.Write([]byte(updatePodTagsResponse)) + return + default: + w.WriteHeader(400) + w.Write([]byte(fmt.Sprintf("unexpected request method: %s", r.Method))) + } + })) + closers = append(closers, ts.Close) + + // ts.URL example: http://127.0.0.1:35681 + urlFields := strings.Split(ts.URL, "://") + if len(urlFields) != 2 { + closeFunc() + t.Fatal("received unexpected test url: " + ts.URL) + } + urlFields = strings.Split(urlFields[1], ":") + if len(urlFields) != 2 { + closeFunc() + t.Fatal("received unexpected test url: " + ts.URL) + } + testConf.ServiceHost = urlFields[0] + testConf.ServicePort = urlFields[1] + return testState, testConf, closeFunc +} + +type State struct { + m *sync.Map +} + +func (s *State) NumPatches() int { + l := 0 + f := func(key, value interface{}) bool { + l++ + return true + } + s.m.Range(f) + return l +} + +func (s *State) Get(key string) map[string]interface{} { + v, ok := s.m.Load(key) + if !ok { + return nil + } + patch, ok := v.(map[string]interface{}) + if !ok { + return nil + } + return patch +} + +func (s *State) store(k string, p map[string]interface{}) { + s.m.Store(k, p) +} + +// The path should be formatted like this: +// fmt.Sprintf("/api/v1/namespaces/%s/pods/%s", namespace, podName) +func parsePath(urlPath string) (namespace, podName string, err error) { + original := urlPath + podName = path.Base(urlPath) + urlPath = strings.TrimSuffix(urlPath, "/pods/"+podName) + namespace = path.Base(urlPath) + if original != fmt.Sprintf("/api/v1/namespaces/%s/pods/%s", namespace, podName) { + return "", "", fmt.Errorf("received unexpected path: %s", original) + } + return namespace, podName, nil +} + +func readFile(fileName string) (string, error) { + b, err := ioutil.ReadFile(pathToFiles + fileName) + if err != nil { + return "", err + } + return string(b), nil +} diff --git a/serviceregistration/kubernetes/testing/token b/serviceregistration/kubernetes/testing/token new file mode 100644 index 0000000..42d4949 --- /dev/null +++ b/serviceregistration/kubernetes/testing/token @@ -0,0 +1 @@ +eyJhbGciOiJSUzI1NiIsImtpZCI6IjZVQU91ckJYcTZKRHQtWHpaOExib2EyUlFZQWZObms2d25mY3ZtVm1NNUUifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJkZWZhdWx0Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6ImRlZmF1bHQtdG9rZW4tNWZqdDkiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiZGVmYXVsdCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6ImY0NGUyMDIxLTU2YWItNDEzNC1hMjMxLTBlMDJmNjhmNzJhNiIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDpkZWZhdWx0OmRlZmF1bHQifQ.hgMbuT0hlxG04fDvI_Iyxtbwc8M-i3q3K7CqIGC_jYSjVlyezHN_0BeIB3rE0_M2xvbIs6chsWFZVsK_8Pj6ho7VT0x5PWy5n6KsqTBz8LPpjWpsaxpYQos0RzgA3KLnuzZE8Cl-v-PwWQK57jgbS4AdlXujQXdtLXJNwNAKI0pvCASA6UXP55_X845EsJkyT1J-bURSS3Le3g9A4pDoQ_MUv7hqa-p7yQEtFfYCkq1KKrUJZMRjmS4qda1rg-Em-dw9RFvQtPodRYF0DKT7A7qgmLUfIkuky3NnsQtvaUo8ZVtUiwIEfRdqw1oQIY4CSYz-wUl2xZa7n2QQBROE7w \ No newline at end of file diff --git a/serviceregistration/service_registration.go b/serviceregistration/service_registration.go new file mode 100644 index 0000000..79f5b20 --- /dev/null +++ b/serviceregistration/service_registration.go @@ -0,0 +1,99 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package serviceregistration + +/* +ServiceRegistration is an interface that can be fulfilled to use +varying applications for service discovery, regardless of the physical +back-end used. + +Service registration implements notifications for changes in _dynamic_ +properties regarding Vault's health. Vault's version is the only static +property given in state for now, but if there's a need for more in the future, +we could add them on. +*/ + +import ( + "sync" + + log "github.com/hashicorp/go-hclog" +) + +type State struct { + VaultVersion string + IsInitialized, IsSealed, IsActive, IsPerformanceStandby bool +} + +// Factory is the factory function to create a ServiceRegistration. +// The config is the key/value pairs set _inside_ the service registration config stanza. +// The state is the initial state. +// The redirectAddr is Vault core's RedirectAddr. +type Factory func(config map[string]string, logger log.Logger, state State) (ServiceRegistration, error) + +// ServiceRegistration is an interface that advertises the state of Vault to a +// service discovery network. +type ServiceRegistration interface { + // Run provides a shutdownCh, wait WaitGroup, and redirectAddr. The + // shutdownCh is for monitoring when a shutdown occurs and initiating any + // actions needed to leave service registration in a final state. When + // finished, signalling that with wait means that Vault will wait until + // complete. The redirectAddr is an optional parameter for implementations + // that might need to communicate with Vault's listener via this address. + // + // Run is called just after Factory instantiation so can be relied upon + // for controlling shutdown behavior. + // Here is an example of its intended use: + // func Run(shutdownCh <-chan struct{}, wait sync.WaitGroup, redirectAddr string) error { + // + // // Since we are going to want Vault to wait to shutdown + // // until after we do cleanup... + // wait.Add(1) + // + // // Run shutdown code in a goroutine so Run doesn't block. + // go func(){ + // // Ensure that when this ends, no matter how it ends, + // // we don't cause Vault to hang on shutdown. + // defer wait.Done() + // + // // Now wait until we're actually receiving a shutdown. + // <-shutdownCh + // + // // Now do whatever we need to clean up. + // if err := someService.SetFinalState(); err != nil { + // // Log it at error level. + // } + // }() + // return nil + // } + Run(shutdownCh <-chan struct{}, wait *sync.WaitGroup, redirectAddr string) error + + // NotifyActiveStateChange is used by Core to notify that this Vault + // instance has changed its status on whether it's active or is + // a standby. + // If errors are returned, Vault only logs a warning, so it is + // the implementation's responsibility to retry updating state + // in the face of errors. + NotifyActiveStateChange(isActive bool) error + + // NotifySealedStateChange is used by Core to notify that Vault has changed + // its Sealed status to sealed or unsealed. + // If errors are returned, Vault only logs a warning, so it is + // the implementation's responsibility to retry updating state + // in the face of errors. + NotifySealedStateChange(isSealed bool) error + + // NotifyPerformanceStandbyStateChange is used by Core to notify that this + // Vault instance has changed its performance standby status. + // If errors are returned, Vault only logs a warning, so it is + // the implementation's responsibility to retry updating state + // in the face of errors. + NotifyPerformanceStandbyStateChange(isStandby bool) error + + // NotifyInitializedStateChange is used by Core to notify that storage + // has been initialized. An unsealed core will always also be initialized. + // If errors are returned, Vault only logs a warning, so it is + // the implementation's responsibility to retry updating state + // in the face of errors. + NotifyInitializedStateChange(isInitialized bool) error +} diff --git a/shamir/shamir.go b/shamir/shamir.go new file mode 100644 index 0000000..d9c0271 --- /dev/null +++ b/shamir/shamir.go @@ -0,0 +1,246 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package shamir + +import ( + "crypto/rand" + "crypto/subtle" + "fmt" + mathrand "math/rand" + "time" +) + +const ( + // ShareOverhead is the byte size overhead of each share + // when using Split on a secret. This is caused by appending + // a one byte tag to the share. + ShareOverhead = 1 +) + +// polynomial represents a polynomial of arbitrary degree +type polynomial struct { + coefficients []uint8 +} + +// makePolynomial constructs a random polynomial of the given +// degree but with the provided intercept value. +func makePolynomial(intercept, degree uint8) (polynomial, error) { + // Create a wrapper + p := polynomial{ + coefficients: make([]byte, degree+1), + } + + // Ensure the intercept is set + p.coefficients[0] = intercept + + // Assign random co-efficients to the polynomial + if _, err := rand.Read(p.coefficients[1:]); err != nil { + return p, err + } + + return p, nil +} + +// evaluate returns the value of the polynomial for the given x +func (p *polynomial) evaluate(x uint8) uint8 { + // Special case the origin + if x == 0 { + return p.coefficients[0] + } + + // Compute the polynomial value using Horner's method. + degree := len(p.coefficients) - 1 + out := p.coefficients[degree] + for i := degree - 1; i >= 0; i-- { + coeff := p.coefficients[i] + out = add(mult(out, x), coeff) + } + return out +} + +// interpolatePolynomial takes N sample points and returns +// the value at a given x using a lagrange interpolation. +func interpolatePolynomial(x_samples, y_samples []uint8, x uint8) uint8 { + limit := len(x_samples) + var result, basis uint8 + for i := 0; i < limit; i++ { + basis = 1 + for j := 0; j < limit; j++ { + if i == j { + continue + } + num := add(x, x_samples[j]) + denom := add(x_samples[i], x_samples[j]) + term := div(num, denom) + basis = mult(basis, term) + } + group := mult(y_samples[i], basis) + result = add(result, group) + } + return result +} + +// div divides two numbers in GF(2^8) +func div(a, b uint8) uint8 { + if b == 0 { + // leaks some timing information but we don't care anyways as this + // should never happen, hence the panic + panic("divide by zero") + } + + ret := int(mult(a, inverse(b))) + + // Ensure we return zero if a is zero but aren't subject to timing attacks + ret = subtle.ConstantTimeSelect(subtle.ConstantTimeByteEq(a, 0), 0, ret) + return uint8(ret) +} + +// inverse calculates the inverse of a number in GF(2^8) +func inverse(a uint8) uint8 { + b := mult(a, a) + c := mult(a, b) + b = mult(c, c) + b = mult(b, b) + c = mult(b, c) + b = mult(b, b) + b = mult(b, b) + b = mult(b, c) + b = mult(b, b) + b = mult(a, b) + + return mult(b, b) +} + +// mult multiplies two numbers in GF(2^8) +func mult(a, b uint8) (out uint8) { + var r uint8 = 0 + var i uint8 = 8 + + for i > 0 { + i-- + r = (-(b >> i & 1) & a) ^ (-(r >> 7) & 0x1B) ^ (r + r) + } + + return r +} + +// add combines two numbers in GF(2^8) +// This can also be used for subtraction since it is symmetric. +func add(a, b uint8) uint8 { + return a ^ b +} + +// Split takes an arbitrarily long secret and generates a `parts` +// number of shares, `threshold` of which are required to reconstruct +// the secret. The parts and threshold must be at least 2, and less +// than 256. The returned shares are each one byte longer than the secret +// as they attach a tag used to reconstruct the secret. +func Split(secret []byte, parts, threshold int) ([][]byte, error) { + // Sanity check the input + if parts < threshold { + return nil, fmt.Errorf("parts cannot be less than threshold") + } + if parts > 255 { + return nil, fmt.Errorf("parts cannot exceed 255") + } + if threshold < 2 { + return nil, fmt.Errorf("threshold must be at least 2") + } + if threshold > 255 { + return nil, fmt.Errorf("threshold cannot exceed 255") + } + if len(secret) == 0 { + return nil, fmt.Errorf("cannot split an empty secret") + } + + // Generate random list of x coordinates + mathrand.Seed(time.Now().UnixNano()) + xCoordinates := mathrand.Perm(255) + + // Allocate the output array, initialize the final byte + // of the output with the offset. The representation of each + // output is {y1, y2, .., yN, x}. + out := make([][]byte, parts) + for idx := range out { + out[idx] = make([]byte, len(secret)+1) + out[idx][len(secret)] = uint8(xCoordinates[idx]) + 1 + } + + // Construct a random polynomial for each byte of the secret. + // Because we are using a field of size 256, we can only represent + // a single byte as the intercept of the polynomial, so we must + // use a new polynomial for each byte. + for idx, val := range secret { + p, err := makePolynomial(val, uint8(threshold-1)) + if err != nil { + return nil, fmt.Errorf("failed to generate polynomial: %w", err) + } + + // Generate a `parts` number of (x,y) pairs + // We cheat by encoding the x value once as the final index, + // so that it only needs to be stored once. + for i := 0; i < parts; i++ { + x := uint8(xCoordinates[i]) + 1 + y := p.evaluate(x) + out[i][idx] = y + } + } + + // Return the encoded secrets + return out, nil +} + +// Combine is used to reverse a Split and reconstruct a secret +// once a `threshold` number of parts are available. +func Combine(parts [][]byte) ([]byte, error) { + // Verify enough parts provided + if len(parts) < 2 { + return nil, fmt.Errorf("less than two parts cannot be used to reconstruct the secret") + } + + // Verify the parts are all the same length + firstPartLen := len(parts[0]) + if firstPartLen < 2 { + return nil, fmt.Errorf("parts must be at least two bytes") + } + for i := 1; i < len(parts); i++ { + if len(parts[i]) != firstPartLen { + return nil, fmt.Errorf("all parts must be the same length") + } + } + + // Create a buffer to store the reconstructed secret + secret := make([]byte, firstPartLen-1) + + // Buffer to store the samples + x_samples := make([]uint8, len(parts)) + y_samples := make([]uint8, len(parts)) + + // Set the x value for each sample and ensure no x_sample values are the same, + // otherwise div() can be unhappy + checkMap := map[byte]bool{} + for i, part := range parts { + samp := part[firstPartLen-1] + if exists := checkMap[samp]; exists { + return nil, fmt.Errorf("duplicate part detected") + } + checkMap[samp] = true + x_samples[i] = samp + } + + // Reconstruct each byte + for idx := range secret { + // Set the y value for each sample + for i, part := range parts { + y_samples[i] = part[idx] + } + + // Interpolate the polynomial and compute the value at 0 + val := interpolatePolynomial(x_samples, y_samples, 0) + + // Evaluate the 0th value to get the intercept + secret[idx] = val + } + return secret, nil +} diff --git a/shamir/shamir_test.go b/shamir/shamir_test.go new file mode 100644 index 0000000..940a34e --- /dev/null +++ b/shamir/shamir_test.go @@ -0,0 +1,201 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package shamir + +import ( + "bytes" + "testing" +) + +func TestSplit_invalid(t *testing.T) { + secret := []byte("test") + + if _, err := Split(secret, 0, 0); err == nil { + t.Fatalf("expect error") + } + + if _, err := Split(secret, 2, 3); err == nil { + t.Fatalf("expect error") + } + + if _, err := Split(secret, 1000, 3); err == nil { + t.Fatalf("expect error") + } + + if _, err := Split(secret, 10, 1); err == nil { + t.Fatalf("expect error") + } + + if _, err := Split(nil, 3, 2); err == nil { + t.Fatalf("expect error") + } +} + +func TestSplit(t *testing.T) { + secret := []byte("test") + + out, err := Split(secret, 5, 3) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(out) != 5 { + t.Fatalf("bad: %v", out) + } + + for _, share := range out { + if len(share) != len(secret)+1 { + t.Fatalf("bad: %v", out) + } + } +} + +func TestCombine_invalid(t *testing.T) { + // Not enough parts + if _, err := Combine(nil); err == nil { + t.Fatalf("should err") + } + + // Mis-match in length + parts := [][]byte{ + []byte("foo"), + []byte("ba"), + } + if _, err := Combine(parts); err == nil { + t.Fatalf("should err") + } + + // Too short + parts = [][]byte{ + []byte("f"), + []byte("b"), + } + if _, err := Combine(parts); err == nil { + t.Fatalf("should err") + } + + parts = [][]byte{ + []byte("foo"), + []byte("foo"), + } + if _, err := Combine(parts); err == nil { + t.Fatalf("should err") + } +} + +func TestCombine(t *testing.T) { + secret := []byte("test") + + out, err := Split(secret, 5, 3) + if err != nil { + t.Fatalf("err: %v", err) + } + + // There is 5*4*3 possible choices, + // we will just brute force try them all + for i := 0; i < 5; i++ { + for j := 0; j < 5; j++ { + if j == i { + continue + } + for k := 0; k < 5; k++ { + if k == i || k == j { + continue + } + parts := [][]byte{out[i], out[j], out[k]} + recomb, err := Combine(parts) + if err != nil { + t.Fatalf("err: %v", err) + } + + if !bytes.Equal(recomb, secret) { + t.Errorf("parts: (i:%d, j:%d, k:%d) %v", i, j, k, parts) + t.Fatalf("bad: %v %v", recomb, secret) + } + } + } + } +} + +func TestField_Add(t *testing.T) { + if out := add(16, 16); out != 0 { + t.Fatalf("Bad: %v 16", out) + } + + if out := add(3, 4); out != 7 { + t.Fatalf("Bad: %v 7", out) + } +} + +func TestField_Mult(t *testing.T) { + if out := mult(3, 7); out != 9 { + t.Fatalf("Bad: %v 9", out) + } + + if out := mult(3, 0); out != 0 { + t.Fatalf("Bad: %v 0", out) + } + + if out := mult(0, 3); out != 0 { + t.Fatalf("Bad: %v 0", out) + } +} + +func TestField_Divide(t *testing.T) { + if out := div(0, 7); out != 0 { + t.Fatalf("Bad: %v 0", out) + } + + if out := div(3, 3); out != 1 { + t.Fatalf("Bad: %v 1", out) + } + + if out := div(6, 3); out != 2 { + t.Fatalf("Bad: %v 2", out) + } +} + +func TestPolynomial_Random(t *testing.T) { + p, err := makePolynomial(42, 2) + if err != nil { + t.Fatalf("err: %v", err) + } + + if p.coefficients[0] != 42 { + t.Fatalf("bad: %v", p.coefficients) + } +} + +func TestPolynomial_Eval(t *testing.T) { + p, err := makePolynomial(42, 1) + if err != nil { + t.Fatalf("err: %v", err) + } + + if out := p.evaluate(0); out != 42 { + t.Fatalf("bad: %v", out) + } + + out := p.evaluate(1) + exp := add(42, mult(1, p.coefficients[1])) + if out != exp { + t.Fatalf("bad: %v %v %v", out, exp, p.coefficients) + } +} + +func TestInterpolate_Rand(t *testing.T) { + for i := 0; i < 256; i++ { + p, err := makePolynomial(uint8(i), 2) + if err != nil { + t.Fatalf("err: %v", err) + } + + x_vals := []uint8{1, 2, 3} + y_vals := []uint8{p.evaluate(1), p.evaluate(2), p.evaluate(3)} + out := interpolatePolynomial(x_vals, y_vals, 0) + if out != uint8(i) { + t.Fatalf("Bad: %v %d", out, i) + } + } +} diff --git a/terraform/README.md b/terraform/README.md new file mode 100644 index 0000000..42a9c23 --- /dev/null +++ b/terraform/README.md @@ -0,0 +1,6 @@ +# Looking for the terraform/aws module? + +This directory has been removed. Please instead refer to: + +- [hashicorp/terraform-aws-vault-starter](https://github.com/hashicorp/terraform-aws-vault-starter), or +- [hashicorp/terraform-aws-vault](https://github.com/hashicorp/terraform-aws-vault) diff --git a/tools/codechecker/.bin/codechecker b/tools/codechecker/.bin/codechecker new file mode 100755 index 0000000..7c47d65 Binary files /dev/null and b/tools/codechecker/.bin/codechecker differ diff --git a/tools/codechecker/main.go b/tools/codechecker/main.go new file mode 100644 index 0000000..94d8048 --- /dev/null +++ b/tools/codechecker/main.go @@ -0,0 +1,14 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package main + +import ( + "github.com/hashicorp/vault/tools/codechecker/pkg/godoctests" + "github.com/hashicorp/vault/tools/codechecker/pkg/gonilnilfunctions" + "golang.org/x/tools/go/analysis/multichecker" +) + +func main() { + multichecker.Main(gonilnilfunctions.Analyzer, godoctests.Analyzer) +} diff --git a/tools/codechecker/pkg/godoctests/analyzer.go b/tools/codechecker/pkg/godoctests/analyzer.go new file mode 100644 index 0000000..e771f9c --- /dev/null +++ b/tools/codechecker/pkg/godoctests/analyzer.go @@ -0,0 +1,81 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package godoctests + +import ( + "go/ast" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +var Analyzer = &analysis.Analyzer{ + Name: "godoctests", + Doc: "Verifies that every go test has a go doc", + Run: run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspector := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.FuncDecl)(nil), + } + + inspector.Preorder(nodeFilter, func(node ast.Node) { + funcDecl, ok := node.(*ast.FuncDecl) + if !ok { + return + } + + // starts with 'Test' + if !strings.HasPrefix(funcDecl.Name.Name, "Test") { + return + } + + // has one parameter + params := funcDecl.Type.Params.List + if len(params) != 1 { + return + } + + // parameter is a pointer + firstParamType, ok := params[0].Type.(*ast.StarExpr) + if !ok { + return + } + + selector, ok := firstParamType.X.(*ast.SelectorExpr) + if !ok { + return + } + + // the pointer comes from package 'testing' + selectorIdent, ok := selector.X.(*ast.Ident) + if !ok { + return + } + if selectorIdent.Name != "testing" { + return + } + + // the pointer has type 'T' + if selector.Sel == nil || selector.Sel.Name != "T" { + return + } + + // then there must be a godoc + if funcDecl.Doc == nil { + pass.Reportf(node.Pos(), "Test %s is missing a go doc", + funcDecl.Name.Name) + } else if !strings.HasPrefix(funcDecl.Doc.Text(), funcDecl.Name.Name) { + pass.Reportf(node.Pos(), "Test %s must have a go doc beginning with the function name", + funcDecl.Name.Name) + } + }) + return nil, nil +} diff --git a/tools/codechecker/pkg/godoctests/analyzer_test.go b/tools/codechecker/pkg/godoctests/analyzer_test.go new file mode 100644 index 0000000..81af24d --- /dev/null +++ b/tools/codechecker/pkg/godoctests/analyzer_test.go @@ -0,0 +1,23 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package godoctests + +import ( + "os" + "path/filepath" + "testing" + + "golang.org/x/tools/go/analysis/analysistest" +) + +// TestAnalyzer runs the analyzer on the test functions in testdata/funcs.go. The report from the analyzer is compared against +// the comments in funcs.go beginning with "want." If there is no comment beginning with "want", then the analyzer is expected +// not to report anything. +func TestAnalyzer(t *testing.T) { + f, err := os.Getwd() + if err != nil { + t.Fatal("failed to get working directory", err) + } + analysistest.Run(t, filepath.Join(f, "testdata"), Analyzer, ".") +} diff --git a/tools/codechecker/pkg/godoctests/testdata/funcs.go b/tools/codechecker/pkg/godoctests/testdata/funcs.go new file mode 100644 index 0000000..e9d5fea --- /dev/null +++ b/tools/codechecker/pkg/godoctests/testdata/funcs.go @@ -0,0 +1,20 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package testdata + +import "testing" + +// Test_GoDocOK is a test that has a go doc +func Test_GoDocOK(t *testing.T) {} + +func Test_NoGoDocFails(t *testing.T) {} // want "Test Test_NoGoDocFails is missing a go doc" + +// This test does not have a go doc beginning with the function name +func Test_BadGoDocFails(t *testing.T) {} // want "Test Test_BadGoDocFails must have a go doc beginning with the function name" + +func test_TestHelperNoGoDocOK(t *testing.T) {} + +func Test_DifferentSignatureNoGoDocOK() {} + +func Test_DifferentSignature2NoGoDocOK(t *testing.T, a int) {} diff --git a/tools/codechecker/pkg/gonilnilfunctions/analyzer.go b/tools/codechecker/pkg/gonilnilfunctions/analyzer.go new file mode 100644 index 0000000..5f4dd1d --- /dev/null +++ b/tools/codechecker/pkg/gonilnilfunctions/analyzer.go @@ -0,0 +1,171 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package gonilnilfunctions + +import ( + "go/ast" + "go/types" + "reflect" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +var Analyzer = &analysis.Analyzer{ + Name: "gonilnilfunctions", + Doc: "Verifies that every go function with error as one of its two return types cannot return nil, nil", + Run: run, + ResultType: reflect.TypeOf((interface{})(nil)), + Requires: []*analysis.Analyzer{inspect.Analyzer}, +} + +// getNestedReturnStatements searches the AST for return statements, and returns +// them in a tail-call optimized list. +func getNestedReturnStatements(s ast.Stmt, returns []*ast.ReturnStmt) []*ast.ReturnStmt { + switch s := s.(type) { + case *ast.BlockStmt: + statements := make([]*ast.ReturnStmt, 0) + for _, stmt := range s.List { + statements = append(statements, getNestedReturnStatements(stmt, make([]*ast.ReturnStmt, 0))...) + } + + return append(returns, statements...) + case *ast.BranchStmt: + return returns + case *ast.ForStmt: + return getNestedReturnStatements(s.Body, returns) + case *ast.IfStmt: + return getNestedReturnStatements(s.Body, returns) + case *ast.LabeledStmt: + return getNestedReturnStatements(s.Stmt, returns) + case *ast.RangeStmt: + return getNestedReturnStatements(s.Body, returns) + case *ast.ReturnStmt: + return append(returns, s) + case *ast.SwitchStmt: + return getNestedReturnStatements(s.Body, returns) + case *ast.SelectStmt: + return getNestedReturnStatements(s.Body, returns) + case *ast.TypeSwitchStmt: + return getNestedReturnStatements(s.Body, returns) + case *ast.CommClause: + statements := make([]*ast.ReturnStmt, 0) + for _, stmt := range s.Body { + statements = append(statements, getNestedReturnStatements(stmt, make([]*ast.ReturnStmt, 0))...) + } + + return append(returns, statements...) + case *ast.CaseClause: + statements := make([]*ast.ReturnStmt, 0) + for _, stmt := range s.Body { + statements = append(statements, getNestedReturnStatements(stmt, make([]*ast.ReturnStmt, 0))...) + } + + return append(returns, statements...) + case *ast.ExprStmt: + return returns + } + return returns +} + +// run runs the analysis, failing for functions whose signatures contain two results including one error +// (e.g. (something, error)), that contain multiple nil returns +func run(pass *analysis.Pass) (interface{}, error) { + inspector := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.FuncDecl)(nil), + } + + inspector.Preorder(nodeFilter, func(node ast.Node) { + funcDecl, ok := node.(*ast.FuncDecl) + if !ok { + return + } + + // If the function has the "Ignore" godoc comment, skip it + if strings.Contains(funcDecl.Doc.Text(), "ignore-nil-nil-function-check") { + return + } + + // The function returns something + if funcDecl == nil || funcDecl.Type == nil || funcDecl.Type.Results == nil { + return + } + + // The function has more than 1 return value + results := funcDecl.Type.Results.List + if len(results) < 2 { + return + } + + // isError is a helper function to check if a Field is of error type + isError := func(field *ast.Field) bool { + if named, ok := pass.TypesInfo.TypeOf(field.Type).(*types.Named); ok { + namedObject := named.Obj() + return namedObject != nil && namedObject.Pkg() == nil && namedObject.Name() == "error" + } + return false + } + + // one of the return values is error + var errorFound bool + for _, result := range results { + if isError(result) { + errorFound = true + break + } + } + + if !errorFound { + return + } + + // Since these statements might be e.g. blocks with + // other statements inside, we need to get the return statements + // from inside them, first. + statements := funcDecl.Body.List + + returnStatements := make([]*ast.ReturnStmt, 0) + for _, statement := range statements { + returnStatements = append(returnStatements, getNestedReturnStatements(statement, make([]*ast.ReturnStmt, 0))...) + } + + for _, returnStatement := range returnStatements { + numResultsNil := 0 + results := returnStatement.Results + + // We only want two-arg functions (something, nil) + // We can remove this block in the future if we change our mind + if len(results) != 2 { + continue + } + + for _, result := range results { + // nil is an ident + ident, isIdent := result.(*ast.Ident) + if isIdent { + if ident.Name == "nil" { + // We found one nil in the return list + numResultsNil++ + } + } + } + // We found N nils, and our function returns N results, so this fails the check + if numResultsNil == len(results) { + // All the return values are nil, so we fail the report + pass.Reportf(node.Pos(), "Function %s can return an error, and has a statement that returns only nils", + funcDecl.Name.Name) + + // We break out of the loop of checking return statements, so that we don't repeat ourselves + break + } + } + }) + + var success interface{} + return success, nil +} diff --git a/tools/codechecker/pkg/gonilnilfunctions/analyzer_test.go b/tools/codechecker/pkg/gonilnilfunctions/analyzer_test.go new file mode 100644 index 0000000..b4c8cf2 --- /dev/null +++ b/tools/codechecker/pkg/gonilnilfunctions/analyzer_test.go @@ -0,0 +1,23 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package gonilnilfunctions + +import ( + "os" + "path/filepath" + "testing" + + "golang.org/x/tools/go/analysis/analysistest" +) + +// TestAnalyzer runs the analyzer on the test functions in testdata/funcs.go. The report from the analyzer is compared against +// the comments in funcs.go beginning with "want." If there is no comment beginning with "want", then the analyzer is expected +// not to report anything. +func TestAnalyzer(t *testing.T) { + f, err := os.Getwd() + if err != nil { + t.Fatal("failed to get working directory", err) + } + analysistest.Run(t, filepath.Join(f, "testdata"), Analyzer, ".") +} diff --git a/tools/codechecker/pkg/gonilnilfunctions/testdata/funcs.go b/tools/codechecker/pkg/gonilnilfunctions/testdata/funcs.go new file mode 100644 index 0000000..f783f01 --- /dev/null +++ b/tools/codechecker/pkg/gonilnilfunctions/testdata/funcs.go @@ -0,0 +1,73 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package testdata + +func ReturnReturnOkay() (any, error) { + var i interface{} + return i, nil +} + +func OneGoodOneBad() (any, error) { // want "Function OneGoodOneBad can return an error, and has a statement that returns only nils" + var i interface{} + if true { + return i, nil + } + return nil, nil +} + +func OneBadOneGood() (any, error) { // want "Function OneBadOneGood can return an error, and has a statement that returns only nils" + var i interface{} + if true { + return nil, nil + } + return i, nil +} + +func EmptyFunc() {} + +func TwoNilNils() (any, error) { // want "Function TwoNilNils can return an error, and has a statement that returns only nils" + if true { + return nil, nil + } + return nil, nil +} + +// ThreeResults should not fail, as while it returns nil, nil, nil, it has three results, not two. +func ThreeResults() (any, any, error) { + return nil, nil, nil +} + +func TwoArgsNoError() (any, any) { + return nil, nil +} + +func NestedReturn() (any, error) { // want "Function NestedReturn can return an error, and has a statement that returns only nils" + { + { + { + return nil, nil + } + } + } +} + +func NestedForReturn() (any, error) { // want "Function NestedForReturn can return an error, and has a statement that returns only nils" + for { + for i := 0; i < 100; i++ { + { + return nil, nil + } + } + } +} + +func AnyErrorNilNil() (any, error) { // want "Function AnyErrorNilNil can return an error, and has a statement that returns only nils" + return nil, nil +} + +// Skipped should be skipped because of the following line: +// ignore-nil-nil-function-check +func Skipped() (any, error) { + return nil, nil +} diff --git a/tools/semgrep/ci/atomic.yml b/tools/semgrep/ci/atomic.yml new file mode 100644 index 0000000..1d6b2a9 --- /dev/null +++ b/tools/semgrep/ci/atomic.yml @@ -0,0 +1,20 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +rules: + - id: atomics-64bit-safety + patterns: + - pattern: | + type $TYPE struct { + ... + $VAR atomic.$ATOMIC_TYPE + ... + } + - metavariable-regex: + # We only care about 64 bit atomic types + metavariable: "$ATOMIC_TYPE" + regex: ".*64" + message: "Use pointers with member variables of uber-go/atomic types" + languages: [go] + severity: ERROR + diff --git a/tools/semgrep/ci/bad-multierror-append.yml b/tools/semgrep/ci/bad-multierror-append.yml new file mode 100644 index 0000000..bebb200 --- /dev/null +++ b/tools/semgrep/ci/bad-multierror-append.yml @@ -0,0 +1,17 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +rules: + - id: bad-multierror-append + patterns: + - pattern-either: + - pattern: $ERR = multierror.Append($ERRORS, $ERR) + - pattern: $ERR = multierror.Append($ERR, $ERR) + - pattern: $ERRORS = multierror.Append($ERR, $ERR) + - pattern: $ERRORS = multierror.Append($ERR, $ERRORS) + message: Bad Multierror Append + languages: + - go + severity: ERROR + metadata: + license: MIT diff --git a/tools/semgrep/ci/bad-nil-guard.yml b/tools/semgrep/ci/bad-nil-guard.yml new file mode 100644 index 0000000..7000369 --- /dev/null +++ b/tools/semgrep/ci/bad-nil-guard.yml @@ -0,0 +1,20 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +rules: + - id: bad-nil-guard + patterns: + - pattern-either: + - pattern: $X == nil && <... $X.$F ...> + - pattern: $X != nil || <... $X.$F ...> + - pattern: <... $X.$F ...> && $X != nil + - pattern: <... $X.$F ...> || $X == nil + - pattern: <... $X.$F ...> && $X == nil + - pattern: <... $X.$F ...> || $X != nil + message: Bad nil guard + languages: + - go + severity: ERROR + metadata: + license: MIT + diff --git a/tools/semgrep/ci/error-shadowing.yml b/tools/semgrep/ci/error-shadowing.yml new file mode 100644 index 0000000..43ea1a3 --- /dev/null +++ b/tools/semgrep/ci/error-shadowing.yml @@ -0,0 +1,126 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +rules: + - id: error-shadow-check-types + patterns: + - pattern: | + ..., ($ERR: error) = $FUNC(...) + ... + ..., $ERR = ... + - pattern-not: | + ..., ($ERR: error) = $FUNC(...) + ... + if <... $ERR == nil ...> { + ... + } + ... + ..., $ERR = ... + - pattern-not: | + ..., ($ERR: error) = $FUNC(...) + ... + if <... $ERR != nil ...> { + ... + } + ... + ..., $ERR = ... + - pattern-not: | + ..., ($ERR: error) = $FUNC(...) + ... + $ERRCHECK(..., $ERR, ...) + ... + ..., $ERR = ... + # This case is not specific enough but semgrep doesn't let you do any + # special searching within a switch statement. We will assume if there + # is a switch statement it's doing error checking, though this isn't + # guaranteed. + - pattern-not: | + ..., ($ERR: error) = $FUNC(...) + ... + switch { + case ... + } + ... + ..., $ERR = ... + message: Potential Error Shadowing + languages: + - go + severity: ERROR + + + - id: error-shadow-check-regex + patterns: + - pattern: | + ..., $ERR = $FUNC(...) + ... + ..., $ERR = ... + - pattern-not: | + ..., $ERR = $FUNC(...) + ... + if <... $ERR == nil ...> { + ... + } + ... + ..., $ERR = ... + - pattern-not: | + ..., $ERR = $FUNC(...) + ... + if <... $ERR != nil ...> { + ... + } + ... + ..., $ERR = ... + - pattern-not: | + ..., $ERR = $FUNC(...) + ... + $ERRCHECK(..., $ERR, ...) + ... + ..., $ERR = ... + + # This pattern is used in as a itteration mechanism for a test + - pattern-not: | + ..., $ERR = $FUNC(...) + ... + for $ERR == nil { + ... + } + ... + ..., $ERR = ... + + # A few places we test against logical.Err* types + - pattern-not: | + ..., $ERR = $FUNC(...) + ... + if $ERR != logical.$ERRTYPE { + ... + } + ... + ..., $ERR = ... + # This case is not specific enough but semgrep doesn't let you do any + # special searching within a switch statement. We will assume if there + # is a switch statement it's doing error checking, though this isn't + # guaranteed. + - pattern-not: | + ..., $ERR = $FUNC(...) + ... + switch ... { + case ... + } + ... + ..., $ERR = ... + - pattern-not: | + ..., $ERR = $FUNC(...) + ... + switch { + case ... + } + ... + ..., $ERR = ... + - metavariable-regex: + metavariable: $ERR + regex: "err" + message: Potential Error Shadowing (regex) + languages: + - go + severity: ERROR + diff --git a/tools/semgrep/ci/fmt-printf.yml b/tools/semgrep/ci/fmt-printf.yml new file mode 100644 index 0000000..fc6e824 --- /dev/null +++ b/tools/semgrep/ci/fmt-printf.yml @@ -0,0 +1,19 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +rules: + - id: fmt.Printf + languages: [go] + message: fmt.Printf/Println is forbidden outside of cmd and test files + patterns: + - pattern-either: + - pattern: fmt.Printf + - pattern: fmt.Println + severity: ERROR + paths: + exclude: + - "*_test.go" + - "cmd/*.go" + - "cmd/**/*.go" + - sdk/database/dbplugin/server.go # effectively a cmd + - sdk/database/dbplugin/v5/plugin_server.go # effectively a cmd diff --git a/tools/semgrep/ci/hashsum.yml b/tools/semgrep/ci/hashsum.yml new file mode 100644 index 0000000..82765a1 --- /dev/null +++ b/tools/semgrep/ci/hashsum.yml @@ -0,0 +1,28 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +rules: + - id: hash-sum-without-write + patterns: + - pattern-either: + - pattern: | + $HASH.New().Sum($SLICE) + - pattern: | + $H := $HASH.New() + ... + $H.Sum($SLICE) + - pattern-not: | + $H := $HASH.New() + ... + $H.Write(...) + ... + $H.Sum($SLICE) + - pattern-not: | + $H := $HASH.New() + ... + $FUNC(..., $H, ...) + ... + $H.Sum($SLICE) + message: "odd hash.Sum call flow" + languages: [go] + severity: ERROR diff --git a/tools/semgrep/ci/hmac-bytes.yml b/tools/semgrep/ci/hmac-bytes.yml new file mode 100644 index 0000000..d8da277 --- /dev/null +++ b/tools/semgrep/ci/hmac-bytes.yml @@ -0,0 +1,22 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +rules: + - id: use-hmac-equal + patterns: + - pattern-either: + - pattern: | + $MAC = hmac.New(...) + ... + $H = $MAC.Sum(...) + ... + bytes.Equal($H, ...) + - pattern: | + $MAC = hmac.New(...) + ... + $H = $MAC.Sum(...) + ... + bytes.Equal(..., $H) + message: "Comparing a MAC with bytes.Equal()" + languages: [go] + severity: ERROR diff --git a/tools/semgrep/ci/hmac-hash.yml b/tools/semgrep/ci/hmac-hash.yml new file mode 100644 index 0000000..2b03883 --- /dev/null +++ b/tools/semgrep/ci/hmac-hash.yml @@ -0,0 +1,24 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +rules: + - id: hmac-needs-new + patterns: + - pattern-either: + - pattern: | + $H := $HASH.New() + ... + $FUNC := func() hash.Hash { return $H } + ... + hmac.New($FUNC, ...) + - pattern: | + $H := $HASH.New() + ... + hmac.New(func() hash.Hash { return $H }, ...) + + - pattern: | + hmac.New(func() hash.Hash { return ( $H : hash.Hash) }, ...) + + message: "calling hmac.New with unchanging hash.New" + languages: [go] + severity: ERROR diff --git a/tools/semgrep/ci/logger-format-string.yml b/tools/semgrep/ci/logger-format-string.yml new file mode 100644 index 0000000..14cb6cd --- /dev/null +++ b/tools/semgrep/ci/logger-format-string.yml @@ -0,0 +1,25 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +rules: + - id: logger-used-with-format-string + patterns: + - pattern-either: + - pattern: | + $LOGGER.Trace("=~/.*%[v#T%tbcdoOqxXUbeEfFgGps].*/",...) + - pattern: | + $LOGGER.Debug("=~/.*%[v#T%tbcdoOqxXUbeEfFgGps].*/",...) + - pattern: | + $LOGGER.Info("=~/.*%[v#T%tbcdoOqxXUbeEfFgGps].*/",...) + - pattern: | + $LOGGER.Warn("=~/.*%[v#T%tbcdoOqxXUbeEfFgGps].*/",...) + - pattern: | + $LOGGER.Error("=~/.*%[v#T%tbcdoOqxXUbeEfFgGps].*/",...) + - pattern-inside: | + import $LOG "github.com/hashicorp/go-hclog" + ... + message: "Logger message looks like format string" + languages: [go] + severity: ERROR + + \ No newline at end of file diff --git a/tools/semgrep/ci/loop-time-after.yml b/tools/semgrep/ci/loop-time-after.yml new file mode 100644 index 0000000..e3a5183 --- /dev/null +++ b/tools/semgrep/ci/loop-time-after.yml @@ -0,0 +1,20 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +rules: + - id: loop-time-after + pattern: | + for ... { + ... + select { + case ... + case <-time.After(...): + ... + case ... + } + ... + } + message: <-time.After() used in for loop, consider using a ticker or a timer instead + languages: + - go + severity: WARNING \ No newline at end of file diff --git a/tools/semgrep/ci/loopclosure.yml b/tools/semgrep/ci/loopclosure.yml new file mode 100644 index 0000000..88ab134 --- /dev/null +++ b/tools/semgrep/ci/loopclosure.yml @@ -0,0 +1,31 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +rules: + - id: loopclosure + patterns: + - pattern-inside: | + for $A, $B := range $C { + ... + } + - pattern-inside: | + go func() { + ... + }() + - pattern-not-inside: | + go func(..., $B, ...) { + ... + }(..., $B, ...) + - pattern-not-inside: | + go func() { + ... + for ... { + ... + } + ... + }() + - pattern: $B + message: Loop variable $B used inside goroutine + languages: + - go + severity: WARNING diff --git a/tools/semgrep/ci/no-nil-check.yml b/tools/semgrep/ci/no-nil-check.yml new file mode 100644 index 0000000..0b1f1ce --- /dev/null +++ b/tools/semgrep/ci/no-nil-check.yml @@ -0,0 +1,150 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +rules: + - id: nil-check-logical-storage + patterns: + - pattern-either: + - pattern: | + $VAR, $ERR = ($S : logical.Storage).Get(...) + ... + $VAR.$FOO + - pattern: | + $VAR, $ERR = ($S : logical.Storage).Get(...) + ... + $FUNC2(..., $VAR, ...) + - pattern-not: | + $VAR, $ERR = ($S : logical.Storage).Get(...) + ... + if <... $VAR == nil ...> { + ... + } + ... + - pattern-not: | + $VAR, $ERR = ($S : logical.Storage).Get(...) + ... + if <... $VAR != nil ...> { + ... + } + ... + message: missed nil check + languages: + - go + severity: ERROR + + +# physical.Storage.Get + - id: nil-check-physical-storage + patterns: + - pattern-either: + - pattern: | + $VAR, $ERR = ($S : physical.Storage).Get(...) + ... + $VAR.$FOO + - pattern: | + $VAR, $ERR = ($S : physical.Storage).Get(...) + ... + $FUNC2(..., $VAR, ...) + - pattern-not: | + $VAR, $ERR = ($S : physical.Storage).Get(...) + ... + if <... $VAR == nil ...> { + ... + } + ... + - pattern-not: | + $VAR, $ERR = ($S : physical.Storage).Get(...) + ... + if <... $VAR != nil ...> { + ... + } + ... + message: missed nil check + languages: + - go + severity: ERROR + +# NamespaceByID + - id: nil-check-physical-storage-by-nsid + patterns: + - pattern-either: + - pattern: | + $VAR, $ERR = NamespaceByID(...) + ... + $VAR.$FOO + - pattern: | + $VAR, $ERR = NamespaceByID(...) + ... + $FUNC2(..., $VAR, ...) + - pattern-not: | + $VAR, $ERR = NamespaceByID(...) + ... + if <... $VAR == nil ...> { + ... + } + ... + - pattern-not: | + $VAR, $ERR = NamespaceByID(...) + ... + if <... $VAR != nil ...> { + ... + } + ... + # this is a special case for custom nil namespace handling logic in + # activity log + - pattern-not: | + $VAR, $ERR = NamespaceByID(...) + ... + if a.includeInResponse(..., $VAR) { + ... + } + ... + message: missed nil check + languages: + - go + severity: ERROR + + - id: nil-check-logical-storage-regex + paths: + exclude: + # This file has a valid case that I couldn't work around easily in the + # semgrep rule. Ignore it for now + - "vault/ui.go" + patterns: + - pattern-either: + - pattern: | + $VAR, $ERR = $STORAGE.Get(...) + ... + $VAR.$FOO + - pattern: | + $VAR, $ERR = $STORAGE.Get(...) + ... + $FUNC2(..., $VAR, ...) + - pattern-not: | + $VAR, $ERR = $STORAGE.Get(...) + ... + if <... $VAR == nil ...> { + ... + } + ... + - pattern-not: | + $VAR, $ERR = $STORAGE.Get(...) + ... + if <... $VAR != nil ...> { + ... + } + ... + - pattern-not: | + $VAR, $ERR = $STORAGE.Get(...) + ... + switch $VAR { + case ... + } + ... + - metavariable-regex: + metavariable: $STORAGE + regex: ((.*)Storage|(.*)\.s|(.*)\.barrier|(.*)\.view|(.*)\.barrierView|(.*)\.physical|(.*)\.underlying) + message: missed nil check + languages: + - go + severity: ERROR diff --git a/tools/semgrep/ci/oddifsequence.yml b/tools/semgrep/ci/oddifsequence.yml new file mode 100644 index 0000000..77b71b6 --- /dev/null +++ b/tools/semgrep/ci/oddifsequence.yml @@ -0,0 +1,98 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +rules: + - id: odd-sequence-ifs + patterns: + - pattern-either: + - pattern: | + if $X { return ... } + if $X { ... } + - pattern: | + if ! $X { return ... } + if $X { ... } + - pattern: | + if $X { return ... } + if ! $X { ... } + - pattern: | + if $X == $Y { return ... } + if $X != $Y { ... } + - pattern: | + if $X != $Y { return ... } + if $X == $Y { ... } + - pattern: | + if $X { return ... } + for $X { ... } + - pattern: | + if $X { + if $X { ... } + ... + } + - pattern: | + if $X { + if ! $X { ... } + ... + } + - pattern: | + if ! $X { + if $X { ... } + ... + } + - pattern: | + if $X == $Y { + if $X != $Y { ... } + ... + } + - pattern: | + if $X != $Y { + if $X == $Y { ... } + ... + } + - pattern: | + if $X { + for ! $X { ... } + ... + } + - pattern: | + if ! $X { + for $X { ... } + ... + } + - pattern: | + if $X == $Y { + for $X != $Y { ... } + ... + } + - pattern: | + if $X != $Y { + for $X == $Y { ... } + ... + } + - pattern: | + for $X { + if $X { ... } + ... + } + - pattern: | + for $X { + if ! $X { ... } + ... + } + - pattern: | + for ! $X { + if $X { ... } + ... + } + - pattern: | + for $X == $Y { + if $X != $Y { ... } + ... + } + - pattern: | + for $X != $Y { + if $X == $Y { ... } + ... + } + message: "Odd sequence of ifs" + languages: [go] + severity: ERROR diff --git a/tools/semgrep/ci/return-nil-error.yml b/tools/semgrep/ci/return-nil-error.yml new file mode 100644 index 0000000..a91e4ea --- /dev/null +++ b/tools/semgrep/ci/return-nil-error.yml @@ -0,0 +1,21 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +rules: + - id: return-nil + patterns: + - pattern-either: + - pattern: | + if err == nil { + return err + } + - pattern: | + if err == nil { + return ..., err + } + message: return nil err instead of nil value + languages: + - go + severity: ERROR + metadata: + license: MIT diff --git a/tools/semgrep/ci/return-nil.yml b/tools/semgrep/ci/return-nil.yml new file mode 100644 index 0000000..2a6447c --- /dev/null +++ b/tools/semgrep/ci/return-nil.yml @@ -0,0 +1,22 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +rules: + - id: hc-return-nil + patterns: + - pattern-either: + - pattern: | + if $X == nil { + return $X + } + - pattern: | + if $X != nil { + return ... + } + return $X + message: return nil instead of nil value + languages: + - go + severity: ERROR + metadata: + license: MIT diff --git a/tools/semgrep/ci/time-parse-duration.yml b/tools/semgrep/ci/time-parse-duration.yml new file mode 100644 index 0000000..28f3408 --- /dev/null +++ b/tools/semgrep/ci/time-parse-duration.yml @@ -0,0 +1,10 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +rules: + - id: time-parse-duration + patterns: + - pattern: time.ParseDuration + message: "Usage of time.ParseDuration. Use parseutil.ParseDurationSeconds, instead!" + languages: [go] + severity: ERROR diff --git a/tools/semgrep/ci/wrongerrcall.yml b/tools/semgrep/ci/wrongerrcall.yml new file mode 100644 index 0000000..315e26d --- /dev/null +++ b/tools/semgrep/ci/wrongerrcall.yml @@ -0,0 +1,19 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +rules: + - id: maybe-wrong-err + patterns: + - pattern-either: + - pattern: | + if $F.Err() != nil { + return ..., <... err ...> + } + - pattern: | + if $F.Err() != nil { + return <... err ...> + } + + message: "maybe returning wrong error" + languages: [go] + severity: WARNING diff --git a/tools/semgrep/ci/wronglock.yml b/tools/semgrep/ci/wronglock.yml new file mode 100644 index 0000000..126a544 --- /dev/null +++ b/tools/semgrep/ci/wronglock.yml @@ -0,0 +1,22 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +rules: + - id: wrong-lock-unlock + patterns: + - pattern-either: + - pattern: | + $M.Lock() + defer $M.RUnlock() + - pattern: | + $M.RLock() + defer $M.Unlock() + - pattern: | + $M.Lock() + defer $M.Lock() + - pattern: | + $M.RLock() + defer $M.RLock() + message: "Wrong lock/unlock pair?" + languages: [go] + severity: ERROR diff --git a/tools/semgrep/hostport.yml b/tools/semgrep/hostport.yml new file mode 100644 index 0000000..28613ec --- /dev/null +++ b/tools/semgrep/hostport.yml @@ -0,0 +1,31 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +# https://github.com/golang/go/issues/28308, from @stapelberg +rules: +- id: sprintf-host-port + pattern-either: + - patterns: + - pattern-either: + - pattern: fmt.Sprintf("%s:%s", $NET, $XX) + - pattern: fmt.Sprintf("%s:%d", $NET, $XX) + - pattern: fmt.Sprintf("%s:%s", $XX, $NET) + - pattern: fmt.Sprintf("%s:%d", $XX, $NET) + - pattern: $NET = fmt.Sprintf("%s:%d", ..., ...) + - pattern: $NET = fmt.Sprintf("%s:%s", ..., ...) + - metavariable-regex: + metavariable: '$NET' + regex: '(?i).*(port|addr|host|listen|bind|ip)' + - patterns: + - pattern: fmt.Sprintf($XX, $NET) + - metavariable-regex: + metavariable: '$XX' + regex: '"%s:[0-9]+"' + - metavariable-regex: + metavariable: '$NET' + regex: '(?i).*(port|addr|host|listen|bind|ip)' + message: | + use net.JoinHostPort instead of fmt.Sprintf($XX, $NET) + languages: [go] + severity: ERROR + diff --git a/tools/semgrep/joinpath.yml b/tools/semgrep/joinpath.yml new file mode 100644 index 0000000..ec27127 --- /dev/null +++ b/tools/semgrep/joinpath.yml @@ -0,0 +1,13 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +rules: + - id: use-strings-join-path + patterns: + - pattern-either: + - pattern: strings.Join(..., "/") + - pattern: strings.Join(..., "\\") + - pattern: strings.Join(..., `\`) + message: "did you want path.Join() or filepath.Join()?" + languages: [go] + severity: ERROR diff --git a/tools/semgrep/lock-not-unlocked-on-return.yml b/tools/semgrep/lock-not-unlocked-on-return.yml new file mode 100644 index 0000000..6482b71 --- /dev/null +++ b/tools/semgrep/lock-not-unlocked-on-return.yml @@ -0,0 +1,296 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +rules: + - id: lock_not_unlocked + message: | + Lock $LOCK not unlocked on branch with $COND + languages: [go] + severity: WARNING + patterns: + - pattern: | + $LOCK.Lock() + ... + if $COND { + ... + return ... + } + # manual unlock before return + - pattern-not: | + $LOCK.Lock() + ... + if $COND { + ... + $LOCK.Unlock() + ... + return ... + } + - pattern-not: | + $LOCK.Lock() + ... + $LOCK.Unlock() + ... + if $COND { + ... + return ... + } + # manual unlock with release function + - pattern-not: | + $LOCK.Lock() + ... + $UNLOCKFN = $LOCK.Unlock + ... + if $COND { + ... + $UNLOCKFN() + ... + return ... + } + - pattern-not: | + $LOCK.Lock() + ... + $UNLOCKFN := $LOCK.Unlock + ... + if $COND { + ... + $UNLOCKFN() + ... + return ... + } + # defered unlock + - pattern-not: | + $LOCK.Lock() + ... + defer $LOCK.Unlock() + ... + if $COND { + ... + return ... + } + - pattern-not: | + $LOCK.Lock() + ... + if $COND { + ... + defer $LOCK.Unlock() + ... + return ... + } + - pattern-not: | + $LOCK.Lock() + ... + defer func(){ + ... + $LOCK.Unlock() + ... + }() + ... + if $COND { + ... + return ... + } + # deferred unlock with release function + - pattern-not: | + $LOCK.Lock() + ... + $UNLOCKFN := $LOCK.Unlock + ... + defer func() { + ... + $UNLOCKFN() + ... + }() + ... + if $COND { + ... + return ... + } + - pattern-not: | + $LOCK.Lock() + ... + $UNLOCKFN = $LOCK.Unlock + ... + defer func() { + ... + $UNLOCKFN() + ... + }() + ... + if $COND { + ... + return ... + } + # variation where defer is called first, + # unlock function is changed afterwards + - pattern-not-inside: | + defer func() { + ... + $UNLOCKFN() + ... + }() + ... + $LOCK.Lock() + ... + $UNLOCKFN = $LOCK.Unlock + ... + if $COND { + ... + return ... + } + # variation where defer is called previously, lock is reacquired + # maybe include the Unlock call here? + - pattern-not-inside: | + defer $LOCK.Unlock() + ... + $LOCK.Lock() + ... + if $COND { + ... + return ... + } + - id: read_lock_not_unlocked + message: | + Lock $LOCK not unlocked on branch with $COND + languages: [go] + severity: WARNING + patterns: + - pattern: | + $LOCK.RLock() + ... + if $COND { + ... + return ... + } + # manual unlock before return + - pattern-not: | + $LOCK.RLock() + ... + if $COND { + ... + $LOCK.RUnlock() + ... + return ... + } + - pattern-not: | + $LOCK.RLock() + ... + $LOCK.RUnlock() + ... + if $COND { + ... + return ... + } + # manual unlock with release function + - pattern-not: | + $LOCK.RLock() + ... + $UNLOCKFN = $LOCK.RUnlock + ... + if $COND { + ... + $UNLOCKFN() + ... + return ... + } + - pattern-not: | + $LOCK.RLock() + ... + $UNLOCKFN := $LOCK.RUnlock + ... + if $COND { + ... + $UNLOCKFN() + ... + return ... + } + # defered unlock + - pattern-not: | + $LOCK.RLock() + ... + defer $LOCK.RUnlock() + ... + if $COND { + ... + return ... + } + - pattern-not: | + $LOCK.RLock() + ... + if $COND { + ... + defer $LOCK.RUnlock() + ... + return ... + } + - pattern-not: | + $LOCK.RLock() + ... + defer func(){ + ... + $LOCK.RUnlock() + ... + }() + ... + if $COND { + ... + return ... + } + # deferred unlock with release function + - pattern-not: | + $LOCK.RLock() + ... + $UNLOCKFN := $LOCK.RUnlock + ... + defer func() { + ... + $UNLOCKFN() + ... + }() + ... + if $COND { + ... + return ... + } + - pattern-not: | + $LOCK.RLock() + ... + $UNLOCKFN = $LOCK.RUnlock + ... + defer func() { + ... + $UNLOCKFN() + ... + }() + ... + if $COND { + ... + return ... + } + # variation where defer is called first, + # unlock function is changed afterwards + - pattern-not-inside: | + defer func() { + ... + $UNLOCKFN() + ... + }() + ... + $LOCK.RLock() + ... + $UNLOCKFN = $LOCK.RUnlock + ... + if $COND { + ... + return ... + } + # variation where defer is called previously, lock is reacquired + # maybe include the Unlock call here? + - pattern-not-inside: | + defer $LOCK.RUnlock() + ... + $LOCK.RLock() + ... + if $COND { + ... + return ... + } \ No newline at end of file diff --git a/tools/semgrep/logger-sprintf.yml b/tools/semgrep/logger-sprintf.yml new file mode 100644 index 0000000..7d2f48b --- /dev/null +++ b/tools/semgrep/logger-sprintf.yml @@ -0,0 +1,32 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +rules: + - id: logger-used-with-sprintf + patterns: + - pattern-either: + - pattern: | + logger.Trace(fmt.Sprintf(...)) + - pattern: | + logger.Debug(fmt.Sprintf(...)) + - pattern: | + logger.Info(fmt.Sprintf(...)) + - pattern: | + logger.Warn(fmt.Sprintf(...)) + - pattern: | + logger.Error(fmt.Sprintf(...)) + - pattern: | + $PARENT.logger.Trace(fmt.Sprintf(...)) + - pattern: | + $PARENT.logger.Debug(fmt.Sprintf(...)) + - pattern: | + $PARENT.logger.Info(fmt.Sprintf(...)) + - pattern: | + $PARENT.logger.Warn(fmt.Sprintf(...)) + - pattern: | + $PARENT.logger.Error(fmt.Sprintf(...)) + message: "Logger message generated by Sprintf" + languages: [go] + severity: WARNING + + diff --git a/tools/semgrep/paths-with-callbacks-and-operations.yml b/tools/semgrep/paths-with-callbacks-and-operations.yml new file mode 100644 index 0000000..e29cbab --- /dev/null +++ b/tools/semgrep/paths-with-callbacks-and-operations.yml @@ -0,0 +1,16 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +rules: + - id: path-has-both-callbacks-and-operations + patterns: + - pattern-either: + - pattern: | + []*framework.Path{..., {..., Pattern: $PATTERN, ..., Callbacks:$CALL, ..., Operations:$OP, ... }, ...} + - pattern: | + []*framework.Path{..., {..., Pattern: $PATTERN, ..., Operations:$OP, ..., Callbacks:$CALL, ... }, ...} + + message: "Path has both Callbacks and Operations for pattern $PATTERN" + languages: [go] + severity: ERROR + \ No newline at end of file diff --git a/tools/semgrep/paths-with-callbacks.yml b/tools/semgrep/paths-with-callbacks.yml new file mode 100644 index 0000000..9049a1d --- /dev/null +++ b/tools/semgrep/paths-with-callbacks.yml @@ -0,0 +1,12 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +rules: + - id: uses-path-callbacks + patterns: + - pattern: | + []*framework.Path{..., {..., Pattern: $PATTERN, ..., Callbacks:$CALL, ...}, ...} + + message: "Path has a Callback for pattern $PATTERN" + languages: [go] + severity: WARNING \ No newline at end of file diff --git a/tools/semgrep/physical-storage.yml b/tools/semgrep/physical-storage.yml new file mode 100644 index 0000000..e7e978c --- /dev/null +++ b/tools/semgrep/physical-storage.yml @@ -0,0 +1,12 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +rules: + - id: physical-storage-bypass-encryption + patterns: + - pattern-either: + - pattern: $CORE.physical.Put(...) + - pattern: $CORE.underlyingPhysical.Put(...) + message: "Bypassing encryption by accessing physical storage directly" + languages: [go] + severity: WARNING diff --git a/tools/semgrep/replication-has-state.yml b/tools/semgrep/replication-has-state.yml new file mode 100644 index 0000000..7868e32 --- /dev/null +++ b/tools/semgrep/replication-has-state.yml @@ -0,0 +1,61 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +rules: + - id: replication-state-should-use-IsPerfSecondary + patterns: + - pattern: | + $CORE.ReplicationState().HasState(consts.ReplicationPerformanceSecondary) + # Not the defining function + - pattern-not-inside: | + func ($CORE *Core) IsPerfSecondary() bool { + ... + } + # Not a call to System() + - pattern-not: | + $BACKEND.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary) + - pattern-not: | + $IDENTITYSTORE.localNode.ReplicationState().HasState(consts.ReplicationPerformanceSecondary) + message: "Consider replacing ReplicationState().HasState(...) with IsPerfSecondary()" + languages: [go] + severity: WARNING + fix: $CORE.IsPerfSecondary() + + - id: replication-state-should-use-IsDrSecondar + patterns: + - pattern: | + $CORE.ReplicationState().HasState(consts.ReplicationDRSecondary) + # Not the defining function + - pattern-not-inside: | + func ($CORE *Core) IsDRSecondary() bool { + ... + } + # Not a call to System() + - pattern-not: | + $BACKEND.System().ReplicationState().HasState(consts.ReplicationDRSecondary) + - pattern-not: | + $IDENTITYSTORE.localNode.ReplicationState().HasState(consts.ReplicationDRSecondary) + message: "Consider replacing ReplicationState().HasState(...) with IsDRSecondary()" + languages: [go] + severity: WARNING + fix: $CORE.IsDRSecondary() + + - id: replication-state-in-handler-op + patterns: + - pattern: | + $B.System().ReplicationState().HasState($STATE) + - pattern-inside: | + func ($T $TYPE) $FUNC($CTX context.Context, $REQ *logical.Request, $D *framework.FieldData) (*logical.Response, error) { + ... + } + message: "Consider using frameworks ForwardPerformance* setting" + languages: [go] + severity: WARNING + + - id: replication-state-bad-logic + patterns: + - pattern: | + b.System().LocalMount() || !b.System().ReplicationState().HasState(<... consts.ReplicationPerformanceStandby ...>) + message: "Invalid replication state handling of local mounts" + languages: [go] + severity: ERROR diff --git a/tools/semgrep/self-equals.yml b/tools/semgrep/self-equals.yml new file mode 100644 index 0000000..ae7c1ff --- /dev/null +++ b/tools/semgrep/self-equals.yml @@ -0,0 +1,12 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +rules: + - id: self-equals + patterns: + - pattern-either: + - pattern: $X == $X + - pattern: $X != $X + message: "Comparing with self" + languages: [go] + severity: ERROR diff --git a/tools/tools.go b/tools/tools.go new file mode 100644 index 0000000..a3f7432 --- /dev/null +++ b/tools/tools.go @@ -0,0 +1,43 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:build tools + +// This file ensures tool dependencies are kept in sync. This is the +// recommended way of doing this according to +// https://github.com/golang/go/wiki/Modules#how-can-i-track-tool-dependencies-for-a-module +// To install the following tools at the version used by this repo run: +// $ make bootstrap +// or +// $ go generate -tags tools tools/tools.go + +package tools + +//go:generate go install golang.org/x/tools/cmd/goimports +//go:generate go install github.com/client9/misspell/cmd/misspell +//go:generate go install mvdan.cc/gofumpt +//go:generate go install google.golang.org/protobuf/cmd/protoc-gen-go +//go:generate go install google.golang.org/grpc/cmd/protoc-gen-go-grpc +//go:generate go install github.com/favadi/protoc-go-inject-tag +//go:generate go install honnef.co/go/tools/cmd/staticcheck +//go:generate go install github.com/golangci/revgrep/cmd/revgrep +//go:generate go install gotest.tools/gotestsum +import ( + _ "golang.org/x/tools/cmd/goimports" + + _ "github.com/client9/misspell/cmd/misspell" + + _ "mvdan.cc/gofumpt" + + _ "google.golang.org/protobuf/cmd/protoc-gen-go" + + _ "google.golang.org/grpc/cmd/protoc-gen-go-grpc" + + _ "github.com/favadi/protoc-go-inject-tag" + + _ "github.com/golangci/revgrep/cmd/revgrep" + + _ "gotest.tools/gotestsum" + + _ "honnef.co/go/tools/cmd/staticcheck" +) diff --git a/ui/.browserslistrc b/ui/.browserslistrc new file mode 100644 index 0000000..4a67162 --- /dev/null +++ b/ui/.browserslistrc @@ -0,0 +1,4 @@ +last 1 chrome version +last 1 firefox version +last 1 safari version +last 1 edge version diff --git a/ui/.editorconfig b/ui/.editorconfig new file mode 100644 index 0000000..ab46339 --- /dev/null +++ b/ui/.editorconfig @@ -0,0 +1,24 @@ +# EditorConfig helps developers define and maintain consistent +# coding styles between different editors and IDEs +# editorconfig.org + +root = true + +[*] +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true +indent_style = space +indent_size = 2 + +[*.js] +quote_type = single +max_line_length = 110 + + +[*.hbs] +insert_final_newline = false + +[*.{diff,md}] +trim_trailing_whitespace = false diff --git a/ui/.ember-cli b/ui/.ember-cli new file mode 100644 index 0000000..f6e5987 --- /dev/null +++ b/ui/.ember-cli @@ -0,0 +1,16 @@ +{ + /** + Ember CLI sends analytics information by default. The data is completely + anonymous, but there are times when you might want to disable this behavior. + + Setting `disableAnalytics` to true will prevent any data from being sent. + */ + "disableAnalytics": true, + "output-path": "../http/web_ui", + + /** + Setting `isTypeScriptProject` to true will force the blueprint generators to generate TypeScript + rather than JavaScript by default, when a TypeScript version of a given blueprint is available. + */ + "isTypeScriptProject": false +} diff --git a/ui/.env b/ui/.env new file mode 100644 index 0000000..d901454 --- /dev/null +++ b/ui/.env @@ -0,0 +1 @@ +STORYBOOK_NAME=vault \ No newline at end of file diff --git a/ui/.eslintignore b/ui/.eslintignore new file mode 100644 index 0000000..c352da9 --- /dev/null +++ b/ui/.eslintignore @@ -0,0 +1,30 @@ +# unconventional js +/blueprints/*/files/ +/vendor/ + +# compiled output +/dist/ +/tmp/ + +# dependencies +/bower_components/ +/node_modules/ +/.yarn/ + +# misc +/coverage/ +!.* +.*/ +.eslintcache + +# ember-try +/.node_modules.ember-try/ +/bower.json.ember-try +/npm-shrinkwrap.json.ember-try +/package.json.ember-try +/package-lock.json.ember-try +/yarn.lock.ember-try +/tests/helpers/vault-keys.js + +# typescript declaration files +*.d.ts diff --git a/ui/.eslintrc.js b/ui/.eslintrc.js new file mode 100644 index 0000000..bb45483 --- /dev/null +++ b/ui/.eslintrc.js @@ -0,0 +1,81 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +/* eslint-disable no-undef */ + +'use strict'; + +module.exports = { + parser: 'babel-eslint', + root: true, + parserOptions: { + ecmaVersion: 2018, + sourceType: 'module', + ecmaFeatures: { + legacyDecorators: true, + }, + }, + plugins: ['ember'], + extends: [ + 'eslint:recommended', + 'plugin:ember/recommended', + 'plugin:prettier/recommended', + 'plugin:compat/recommended', + ], + env: { + browser: true, + }, + rules: { + 'no-console': 'error', + 'prefer-const': ['error', { destructuring: 'all' }], + 'ember/no-mixins': 'warn', + 'ember/no-new-mixins': 'off', // should be warn but then every line of the mixin is green + // need to be fully glimmerized before these rules can be turned on + 'ember/no-classic-classes': 'off', + 'ember/no-classic-components': 'off', + 'ember/no-actions-hash': 'off', + 'ember/require-tagless-components': 'off', + 'ember/no-component-lifecycle-hooks': 'off', + }, + overrides: [ + // node files + { + files: [ + './.eslintrc.js', + './.prettierrc.js', + './.template-lintrc.js', + './ember-cli-build.js', + './testem.js', + './blueprints/*/index.js', + './config/**/*.js', + './lib/*/index.js', + './server/**/*.js', + ], + parserOptions: { + sourceType: 'script', + }, + env: { + browser: false, + node: true, + }, + plugins: ['node'], + extends: ['plugin:node/recommended'], + rules: { + // this can be removed once the following is fixed + // https://github.com/mysticatea/eslint-plugin-node/issues/77 + 'node/no-unpublished-require': 'off', + }, + }, + { + // test files + files: ['tests/**/*-test.{js,ts}'], + extends: ['plugin:qunit/recommended'], + }, + { + files: ['**/*.ts'], + extends: ['plugin:@typescript-eslint/recommended'], + }, + ], +}; diff --git a/ui/.gitignore b/ui/.gitignore new file mode 100644 index 0000000..f51263f --- /dev/null +++ b/ui/.gitignore @@ -0,0 +1,40 @@ +# See https://help.github.com/ignore-files/ for more about ignoring files. + +# compiled output +/dist/ +/tmp/ + +# dependencies +/bower_components/ +/node_modules/ + +# misc +/.sass-cache +/.eslintcache +/connect.lock +/coverage/ +/libpeerconnection.log +/npm-debug.log* +/testem.log +/yarn-error.log +package-lock.json + +# ember-try +/.node_modules.ember-try/ +/bower.json.ember-try +/npm-shrinkwrap.json.ember-try +/package.json.ember-try +/package-lock.json.ember-try +/yarn.lock.ember-try + +# broccoli-debug +/DEBUG/ + +# yarn +.pnp.* +.yarn/* +!.yarn/patches +!.yarn/plugins +!.yarn/releases +!.yarn/sdks +!.yarn/versions diff --git a/ui/.prettierignore b/ui/.prettierignore new file mode 100644 index 0000000..4178fd5 --- /dev/null +++ b/ui/.prettierignore @@ -0,0 +1,25 @@ +# unconventional js +/blueprints/*/files/ +/vendor/ + +# compiled output +/dist/ +/tmp/ + +# dependencies +/bower_components/ +/node_modules/ + +# misc +/coverage/ +!.* +.eslintcache +.lint-todo/ + +# ember-try +/.node_modules.ember-try/ +/bower.json.ember-try +/npm-shrinkwrap.json.ember-try +/package.json.ember-try +/package-lock.json.ember-try +/yarn.lock.ember-try diff --git a/ui/.prettierrc.js b/ui/.prettierrc.js new file mode 100644 index 0000000..8c77635 --- /dev/null +++ b/ui/.prettierrc.js @@ -0,0 +1,21 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +'use strict'; + +module.exports = { + singleQuote: true, + trailingComma: 'es5', + printWidth: 110, + overrides: [ + { + files: '*.hbs', + options: { + singleQuote: false, + printWidth: 125, + }, + }, + ], +}; diff --git a/ui/.template-lintrc.js b/ui/.template-lintrc.js new file mode 100644 index 0000000..3540521 --- /dev/null +++ b/ui/.template-lintrc.js @@ -0,0 +1,59 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +'use strict'; + +const fs = require('fs'); +let testOverrides = {}; +try { + // ember-template-lint no longer exports anything so we cannot access the rule definitions conventionally + // read file, convert to json string and parse + const toJSON = (str) => { + return JSON.parse( + str + .slice(str.indexOf(':') + 2) // get rid of export statement + .slice(0, -(str.length - str.lastIndexOf(','))) // remove trailing brackets from export + .replace(/:.*,/g, `: ${false},`) // convert values to false + .replace(/,([^,]*)$/, '$1') // remove last comma + .replace(/'/g, '"') // convert to double quotes + .replace(/(\w[^"].*[^"]):/g, '"$1":') // wrap quotes around single word keys + .trim() + ); + }; + const recommended = toJSON( + fs.readFileSync('node_modules/ember-template-lint/lib/config/recommended.js').toString() + ); + const stylistic = toJSON( + fs.readFileSync('node_modules/ember-template-lint/lib/config/stylistic.js').toString() + ); + testOverrides = { + ...recommended, + ...stylistic, + prettier: false, + }; +} catch (error) { + console.log(error); // eslint-disable-line +} + +module.exports = { + plugins: ['ember-template-lint-plugin-prettier'], + extends: ['recommended', 'ember-template-lint-plugin-prettier:recommended'], + rules: { + 'no-action': 'off', + 'no-implicit-this': { + allow: ['supported-auth-backends'], + }, + 'require-input-label': 'off', + }, + ignore: ['lib/story-md', 'tests/**'], + // ember language server vscode extension does not currently respect the ignore field + // override all rules manually as workaround to align with cli + overrides: [ + { + files: ['**/*-test.js'], + rules: testOverrides, + }, + ], +}; diff --git a/ui/.watchmanconfig b/ui/.watchmanconfig new file mode 100644 index 0000000..e7834e3 --- /dev/null +++ b/ui/.watchmanconfig @@ -0,0 +1,3 @@ +{ + "ignore_dirs": ["tmp", "dist"] +} diff --git a/ui/.yarn/releases/yarn-3.5.0.cjs b/ui/.yarn/releases/yarn-3.5.0.cjs new file mode 100755 index 0000000..093e64a --- /dev/null +++ b/ui/.yarn/releases/yarn-3.5.0.cjs @@ -0,0 +1,873 @@ +#!/usr/bin/env node +/* eslint-disable */ +//prettier-ignore +(()=>{var Qge=Object.create;var AS=Object.defineProperty;var bge=Object.getOwnPropertyDescriptor;var Sge=Object.getOwnPropertyNames;var vge=Object.getPrototypeOf,xge=Object.prototype.hasOwnProperty;var J=(r=>typeof require<"u"?require:typeof Proxy<"u"?new Proxy(r,{get:(e,t)=>(typeof require<"u"?require:e)[t]}):r)(function(r){if(typeof require<"u")return require.apply(this,arguments);throw new Error('Dynamic require of "'+r+'" is not supported')});var Pge=(r,e)=>()=>(r&&(e=r(r=0)),e);var w=(r,e)=>()=>(e||r((e={exports:{}}).exports,e),e.exports),ut=(r,e)=>{for(var t in e)AS(r,t,{get:e[t],enumerable:!0})},Dge=(r,e,t,i)=>{if(e&&typeof e=="object"||typeof e=="function")for(let n of Sge(e))!xge.call(r,n)&&n!==t&&AS(r,n,{get:()=>e[n],enumerable:!(i=bge(e,n))||i.enumerable});return r};var Pe=(r,e,t)=>(t=r!=null?Qge(vge(r)):{},Dge(e||!r||!r.__esModule?AS(t,"default",{value:r,enumerable:!0}):t,r));var QK=w((GXe,BK)=>{BK.exports=wK;wK.sync=Zge;var IK=J("fs");function Xge(r,e){var t=e.pathExt!==void 0?e.pathExt:process.env.PATHEXT;if(!t||(t=t.split(";"),t.indexOf("")!==-1))return!0;for(var i=0;i{xK.exports=SK;SK.sync=_ge;var bK=J("fs");function SK(r,e,t){bK.stat(r,function(i,n){t(i,i?!1:vK(n,e))})}function _ge(r,e){return vK(bK.statSync(r),e)}function vK(r,e){return r.isFile()&&$ge(r,e)}function $ge(r,e){var t=r.mode,i=r.uid,n=r.gid,s=e.uid!==void 0?e.uid:process.getuid&&process.getuid(),o=e.gid!==void 0?e.gid:process.getgid&&process.getgid(),a=parseInt("100",8),l=parseInt("010",8),c=parseInt("001",8),u=a|l,g=t&c||t&l&&n===o||t&a&&i===s||t&u&&s===0;return g}});var kK=w((qXe,DK)=>{var jXe=J("fs"),sI;process.platform==="win32"||global.TESTING_WINDOWS?sI=QK():sI=PK();DK.exports=SS;SS.sync=efe;function SS(r,e,t){if(typeof e=="function"&&(t=e,e={}),!t){if(typeof Promise!="function")throw new TypeError("callback not provided");return new Promise(function(i,n){SS(r,e||{},function(s,o){s?n(s):i(o)})})}sI(r,e||{},function(i,n){i&&(i.code==="EACCES"||e&&e.ignoreErrors)&&(i=null,n=!1),t(i,n)})}function efe(r,e){try{return sI.sync(r,e||{})}catch(t){if(e&&e.ignoreErrors||t.code==="EACCES")return!1;throw t}}});var MK=w((JXe,OK)=>{var vg=process.platform==="win32"||process.env.OSTYPE==="cygwin"||process.env.OSTYPE==="msys",RK=J("path"),tfe=vg?";":":",FK=kK(),NK=r=>Object.assign(new Error(`not found: ${r}`),{code:"ENOENT"}),LK=(r,e)=>{let t=e.colon||tfe,i=r.match(/\//)||vg&&r.match(/\\/)?[""]:[...vg?[process.cwd()]:[],...(e.path||process.env.PATH||"").split(t)],n=vg?e.pathExt||process.env.PATHEXT||".EXE;.CMD;.BAT;.COM":"",s=vg?n.split(t):[""];return vg&&r.indexOf(".")!==-1&&s[0]!==""&&s.unshift(""),{pathEnv:i,pathExt:s,pathExtExe:n}},TK=(r,e,t)=>{typeof e=="function"&&(t=e,e={}),e||(e={});let{pathEnv:i,pathExt:n,pathExtExe:s}=LK(r,e),o=[],a=c=>new Promise((u,g)=>{if(c===i.length)return e.all&&o.length?u(o):g(NK(r));let f=i[c],h=/^".*"$/.test(f)?f.slice(1,-1):f,p=RK.join(h,r),C=!h&&/^\.[\\\/]/.test(r)?r.slice(0,2)+p:p;u(l(C,c,0))}),l=(c,u,g)=>new Promise((f,h)=>{if(g===n.length)return f(a(u+1));let p=n[g];FK(c+p,{pathExt:s},(C,y)=>{if(!C&&y)if(e.all)o.push(c+p);else return f(c+p);return f(l(c,u,g+1))})});return t?a(0).then(c=>t(null,c),t):a(0)},rfe=(r,e)=>{e=e||{};let{pathEnv:t,pathExt:i,pathExtExe:n}=LK(r,e),s=[];for(let o=0;o{"use strict";var KK=(r={})=>{let e=r.env||process.env;return(r.platform||process.platform)!=="win32"?"PATH":Object.keys(e).reverse().find(i=>i.toUpperCase()==="PATH")||"Path"};vS.exports=KK;vS.exports.default=KK});var jK=w((zXe,YK)=>{"use strict";var HK=J("path"),ife=MK(),nfe=UK();function GK(r,e){let t=r.options.env||process.env,i=process.cwd(),n=r.options.cwd!=null,s=n&&process.chdir!==void 0&&!process.chdir.disabled;if(s)try{process.chdir(r.options.cwd)}catch{}let o;try{o=ife.sync(r.command,{path:t[nfe({env:t})],pathExt:e?HK.delimiter:void 0})}catch{}finally{s&&process.chdir(i)}return o&&(o=HK.resolve(n?r.options.cwd:"",o)),o}function sfe(r){return GK(r)||GK(r,!0)}YK.exports=sfe});var qK=w((VXe,PS)=>{"use strict";var xS=/([()\][%!^"`<>&|;, *?])/g;function ofe(r){return r=r.replace(xS,"^$1"),r}function afe(r,e){return r=`${r}`,r=r.replace(/(\\*)"/g,'$1$1\\"'),r=r.replace(/(\\*)$/,"$1$1"),r=`"${r}"`,r=r.replace(xS,"^$1"),e&&(r=r.replace(xS,"^$1")),r}PS.exports.command=ofe;PS.exports.argument=afe});var WK=w((XXe,JK)=>{"use strict";JK.exports=/^#!(.*)/});var VK=w((ZXe,zK)=>{"use strict";var Afe=WK();zK.exports=(r="")=>{let e=r.match(Afe);if(!e)return null;let[t,i]=e[0].replace(/#! ?/,"").split(" "),n=t.split("/").pop();return n==="env"?i:i?`${n} ${i}`:n}});var ZK=w((_Xe,XK)=>{"use strict";var DS=J("fs"),lfe=VK();function cfe(r){let t=Buffer.alloc(150),i;try{i=DS.openSync(r,"r"),DS.readSync(i,t,0,150,0),DS.closeSync(i)}catch{}return lfe(t.toString())}XK.exports=cfe});var tU=w(($Xe,eU)=>{"use strict";var ufe=J("path"),_K=jK(),$K=qK(),gfe=ZK(),ffe=process.platform==="win32",hfe=/\.(?:com|exe)$/i,pfe=/node_modules[\\/].bin[\\/][^\\/]+\.cmd$/i;function dfe(r){r.file=_K(r);let e=r.file&&gfe(r.file);return e?(r.args.unshift(r.file),r.command=e,_K(r)):r.file}function Cfe(r){if(!ffe)return r;let e=dfe(r),t=!hfe.test(e);if(r.options.forceShell||t){let i=pfe.test(e);r.command=ufe.normalize(r.command),r.command=$K.command(r.command),r.args=r.args.map(s=>$K.argument(s,i));let n=[r.command].concat(r.args).join(" ");r.args=["/d","/s","/c",`"${n}"`],r.command=process.env.comspec||"cmd.exe",r.options.windowsVerbatimArguments=!0}return r}function mfe(r,e,t){e&&!Array.isArray(e)&&(t=e,e=null),e=e?e.slice(0):[],t=Object.assign({},t);let i={command:r,args:e,options:t,file:void 0,original:{command:r,args:e}};return t.shell?i:Cfe(i)}eU.exports=mfe});var nU=w((eZe,iU)=>{"use strict";var kS=process.platform==="win32";function RS(r,e){return Object.assign(new Error(`${e} ${r.command} ENOENT`),{code:"ENOENT",errno:"ENOENT",syscall:`${e} ${r.command}`,path:r.command,spawnargs:r.args})}function Efe(r,e){if(!kS)return;let t=r.emit;r.emit=function(i,n){if(i==="exit"){let s=rU(n,e,"spawn");if(s)return t.call(r,"error",s)}return t.apply(r,arguments)}}function rU(r,e){return kS&&r===1&&!e.file?RS(e.original,"spawn"):null}function Ife(r,e){return kS&&r===1&&!e.file?RS(e.original,"spawnSync"):null}iU.exports={hookChildProcess:Efe,verifyENOENT:rU,verifyENOENTSync:Ife,notFoundError:RS}});var LS=w((tZe,xg)=>{"use strict";var sU=J("child_process"),FS=tU(),NS=nU();function oU(r,e,t){let i=FS(r,e,t),n=sU.spawn(i.command,i.args,i.options);return NS.hookChildProcess(n,i),n}function yfe(r,e,t){let i=FS(r,e,t),n=sU.spawnSync(i.command,i.args,i.options);return n.error=n.error||NS.verifyENOENTSync(n.status,i),n}xg.exports=oU;xg.exports.spawn=oU;xg.exports.sync=yfe;xg.exports._parse=FS;xg.exports._enoent=NS});var AU=w((rZe,aU)=>{"use strict";function wfe(r,e){function t(){this.constructor=r}t.prototype=e.prototype,r.prototype=new t}function Wl(r,e,t,i){this.message=r,this.expected=e,this.found=t,this.location=i,this.name="SyntaxError",typeof Error.captureStackTrace=="function"&&Error.captureStackTrace(this,Wl)}wfe(Wl,Error);Wl.buildMessage=function(r,e){var t={literal:function(c){return'"'+n(c.text)+'"'},class:function(c){var u="",g;for(g=0;g0){for(g=1,f=1;g>",ie=me(">>",!1),de=">&",_e=me(">&",!1),Pt=">",It=me(">",!1),Or="<<<",ii=me("<<<",!1),gi="<&",hr=me("<&",!1),fi="<",ni=me("<",!1),Os=function(m){return{type:"argument",segments:[].concat(...m)}},pr=function(m){return m},Ii="$'",es=me("$'",!1),ua="'",pA=me("'",!1),ag=function(m){return[{type:"text",text:m}]},ts='""',dA=me('""',!1),ga=function(){return{type:"text",text:""}},yp='"',CA=me('"',!1),mA=function(m){return m},wr=function(m){return{type:"arithmetic",arithmetic:m,quoted:!0}},kl=function(m){return{type:"shell",shell:m,quoted:!0}},Ag=function(m){return{type:"variable",...m,quoted:!0}},Io=function(m){return{type:"text",text:m}},lg=function(m){return{type:"arithmetic",arithmetic:m,quoted:!1}},wp=function(m){return{type:"shell",shell:m,quoted:!1}},Bp=function(m){return{type:"variable",...m,quoted:!1}},vr=function(m){return{type:"glob",pattern:m}},se=/^[^']/,yo=Je(["'"],!0,!1),kn=function(m){return m.join("")},cg=/^[^$"]/,Qt=Je(["$",'"'],!0,!1),Rl=`\\ +`,Rn=me(`\\ +`,!1),rs=function(){return""},is="\\",gt=me("\\",!1),wo=/^[\\$"`]/,At=Je(["\\","$",'"',"`"],!1,!1),an=function(m){return m},S="\\a",Tt=me("\\a",!1),ug=function(){return"a"},Fl="\\b",Qp=me("\\b",!1),bp=function(){return"\b"},Sp=/^[Ee]/,vp=Je(["E","e"],!1,!1),xp=function(){return"\x1B"},G="\\f",yt=me("\\f",!1),EA=function(){return"\f"},Ji="\\n",Nl=me("\\n",!1),Xe=function(){return` +`},fa="\\r",gg=me("\\r",!1),FE=function(){return"\r"},Pp="\\t",NE=me("\\t",!1),ar=function(){return" "},Fn="\\v",Ll=me("\\v",!1),Dp=function(){return"\v"},Ms=/^[\\'"?]/,ha=Je(["\\","'",'"',"?"],!1,!1),An=function(m){return String.fromCharCode(parseInt(m,16))},Te="\\x",fg=me("\\x",!1),Tl="\\u",Ks=me("\\u",!1),Ol="\\U",IA=me("\\U",!1),hg=function(m){return String.fromCodePoint(parseInt(m,16))},pg=/^[0-7]/,pa=Je([["0","7"]],!1,!1),da=/^[0-9a-fA-f]/,rt=Je([["0","9"],["a","f"],["A","f"]],!1,!1),Bo=nt(),yA="-",Ml=me("-",!1),Us="+",Kl=me("+",!1),LE=".",kp=me(".",!1),dg=function(m,b,N){return{type:"number",value:(m==="-"?-1:1)*parseFloat(b.join("")+"."+N.join(""))}},Rp=function(m,b){return{type:"number",value:(m==="-"?-1:1)*parseInt(b.join(""))}},TE=function(m){return{type:"variable",...m}},Ul=function(m){return{type:"variable",name:m}},OE=function(m){return m},Cg="*",wA=me("*",!1),Rr="/",ME=me("/",!1),Hs=function(m,b,N){return{type:b==="*"?"multiplication":"division",right:N}},Gs=function(m,b){return b.reduce((N,U)=>({left:N,...U}),m)},mg=function(m,b,N){return{type:b==="+"?"addition":"subtraction",right:N}},BA="$((",R=me("$((",!1),q="))",Ce=me("))",!1),Ke=function(m){return m},Re="$(",ze=me("$(",!1),dt=function(m){return m},Ft="${",Nn=me("${",!1),qb=":-",S1=me(":-",!1),v1=function(m,b){return{name:m,defaultValue:b}},Jb=":-}",x1=me(":-}",!1),P1=function(m){return{name:m,defaultValue:[]}},Wb=":+",D1=me(":+",!1),k1=function(m,b){return{name:m,alternativeValue:b}},zb=":+}",R1=me(":+}",!1),F1=function(m){return{name:m,alternativeValue:[]}},Vb=function(m){return{name:m}},N1="$",L1=me("$",!1),T1=function(m){return e.isGlobPattern(m)},O1=function(m){return m},Xb=/^[a-zA-Z0-9_]/,Zb=Je([["a","z"],["A","Z"],["0","9"],"_"],!1,!1),_b=function(){return T()},$b=/^[$@*?#a-zA-Z0-9_\-]/,eS=Je(["$","@","*","?","#",["a","z"],["A","Z"],["0","9"],"_","-"],!1,!1),M1=/^[(){}<>$|&; \t"']/,Eg=Je(["(",")","{","}","<",">","$","|","&",";"," "," ",'"',"'"],!1,!1),tS=/^[<>&; \t"']/,rS=Je(["<",">","&",";"," "," ",'"',"'"],!1,!1),KE=/^[ \t]/,UE=Je([" "," "],!1,!1),Q=0,Me=0,QA=[{line:1,column:1}],d=0,E=[],I=0,k;if("startRule"in e){if(!(e.startRule in i))throw new Error(`Can't start parsing from rule "`+e.startRule+'".');n=i[e.startRule]}function T(){return r.substring(Me,Q)}function Z(){return Et(Me,Q)}function te(m,b){throw b=b!==void 0?b:Et(Me,Q),Ri([lt(m)],r.substring(Me,Q),b)}function Be(m,b){throw b=b!==void 0?b:Et(Me,Q),Ln(m,b)}function me(m,b){return{type:"literal",text:m,ignoreCase:b}}function Je(m,b,N){return{type:"class",parts:m,inverted:b,ignoreCase:N}}function nt(){return{type:"any"}}function wt(){return{type:"end"}}function lt(m){return{type:"other",description:m}}function it(m){var b=QA[m],N;if(b)return b;for(N=m-1;!QA[N];)N--;for(b=QA[N],b={line:b.line,column:b.column};Nd&&(d=Q,E=[]),E.push(m))}function Ln(m,b){return new Wl(m,null,null,b)}function Ri(m,b,N){return new Wl(Wl.buildMessage(m,b),m,b,N)}function bA(){var m,b;return m=Q,b=Mr(),b===t&&(b=null),b!==t&&(Me=m,b=s(b)),m=b,m}function Mr(){var m,b,N,U,ce;if(m=Q,b=Kr(),b!==t){for(N=[],U=He();U!==t;)N.push(U),U=He();N!==t?(U=Ca(),U!==t?(ce=ns(),ce===t&&(ce=null),ce!==t?(Me=m,b=o(b,U,ce),m=b):(Q=m,m=t)):(Q=m,m=t)):(Q=m,m=t)}else Q=m,m=t;if(m===t)if(m=Q,b=Kr(),b!==t){for(N=[],U=He();U!==t;)N.push(U),U=He();N!==t?(U=Ca(),U===t&&(U=null),U!==t?(Me=m,b=a(b,U),m=b):(Q=m,m=t)):(Q=m,m=t)}else Q=m,m=t;return m}function ns(){var m,b,N,U,ce;for(m=Q,b=[],N=He();N!==t;)b.push(N),N=He();if(b!==t)if(N=Mr(),N!==t){for(U=[],ce=He();ce!==t;)U.push(ce),ce=He();U!==t?(Me=m,b=l(N),m=b):(Q=m,m=t)}else Q=m,m=t;else Q=m,m=t;return m}function Ca(){var m;return r.charCodeAt(Q)===59?(m=c,Q++):(m=t,I===0&&Qe(u)),m===t&&(r.charCodeAt(Q)===38?(m=g,Q++):(m=t,I===0&&Qe(f))),m}function Kr(){var m,b,N;return m=Q,b=K1(),b!==t?(N=age(),N===t&&(N=null),N!==t?(Me=m,b=h(b,N),m=b):(Q=m,m=t)):(Q=m,m=t),m}function age(){var m,b,N,U,ce,Se,ht;for(m=Q,b=[],N=He();N!==t;)b.push(N),N=He();if(b!==t)if(N=Age(),N!==t){for(U=[],ce=He();ce!==t;)U.push(ce),ce=He();if(U!==t)if(ce=Kr(),ce!==t){for(Se=[],ht=He();ht!==t;)Se.push(ht),ht=He();Se!==t?(Me=m,b=p(N,ce),m=b):(Q=m,m=t)}else Q=m,m=t;else Q=m,m=t}else Q=m,m=t;else Q=m,m=t;return m}function Age(){var m;return r.substr(Q,2)===C?(m=C,Q+=2):(m=t,I===0&&Qe(y)),m===t&&(r.substr(Q,2)===B?(m=B,Q+=2):(m=t,I===0&&Qe(v))),m}function K1(){var m,b,N;return m=Q,b=uge(),b!==t?(N=lge(),N===t&&(N=null),N!==t?(Me=m,b=D(b,N),m=b):(Q=m,m=t)):(Q=m,m=t),m}function lge(){var m,b,N,U,ce,Se,ht;for(m=Q,b=[],N=He();N!==t;)b.push(N),N=He();if(b!==t)if(N=cge(),N!==t){for(U=[],ce=He();ce!==t;)U.push(ce),ce=He();if(U!==t)if(ce=K1(),ce!==t){for(Se=[],ht=He();ht!==t;)Se.push(ht),ht=He();Se!==t?(Me=m,b=L(N,ce),m=b):(Q=m,m=t)}else Q=m,m=t;else Q=m,m=t}else Q=m,m=t;else Q=m,m=t;return m}function cge(){var m;return r.substr(Q,2)===H?(m=H,Q+=2):(m=t,I===0&&Qe(j)),m===t&&(r.charCodeAt(Q)===124?(m=$,Q++):(m=t,I===0&&Qe(V))),m}function HE(){var m,b,N,U,ce,Se;if(m=Q,b=Z1(),b!==t)if(r.charCodeAt(Q)===61?(N=W,Q++):(N=t,I===0&&Qe(_)),N!==t)if(U=G1(),U!==t){for(ce=[],Se=He();Se!==t;)ce.push(Se),Se=He();ce!==t?(Me=m,b=A(b,U),m=b):(Q=m,m=t)}else Q=m,m=t;else Q=m,m=t;else Q=m,m=t;if(m===t)if(m=Q,b=Z1(),b!==t)if(r.charCodeAt(Q)===61?(N=W,Q++):(N=t,I===0&&Qe(_)),N!==t){for(U=[],ce=He();ce!==t;)U.push(ce),ce=He();U!==t?(Me=m,b=ae(b),m=b):(Q=m,m=t)}else Q=m,m=t;else Q=m,m=t;return m}function uge(){var m,b,N,U,ce,Se,ht,Bt,Jr,hi,ss;for(m=Q,b=[],N=He();N!==t;)b.push(N),N=He();if(b!==t)if(r.charCodeAt(Q)===40?(N=ge,Q++):(N=t,I===0&&Qe(re)),N!==t){for(U=[],ce=He();ce!==t;)U.push(ce),ce=He();if(U!==t)if(ce=Mr(),ce!==t){for(Se=[],ht=He();ht!==t;)Se.push(ht),ht=He();if(Se!==t)if(r.charCodeAt(Q)===41?(ht=O,Q++):(ht=t,I===0&&Qe(F)),ht!==t){for(Bt=[],Jr=He();Jr!==t;)Bt.push(Jr),Jr=He();if(Bt!==t){for(Jr=[],hi=Fp();hi!==t;)Jr.push(hi),hi=Fp();if(Jr!==t){for(hi=[],ss=He();ss!==t;)hi.push(ss),ss=He();hi!==t?(Me=m,b=ue(ce,Jr),m=b):(Q=m,m=t)}else Q=m,m=t}else Q=m,m=t}else Q=m,m=t;else Q=m,m=t}else Q=m,m=t;else Q=m,m=t}else Q=m,m=t;else Q=m,m=t;if(m===t){for(m=Q,b=[],N=He();N!==t;)b.push(N),N=He();if(b!==t)if(r.charCodeAt(Q)===123?(N=he,Q++):(N=t,I===0&&Qe(ke)),N!==t){for(U=[],ce=He();ce!==t;)U.push(ce),ce=He();if(U!==t)if(ce=Mr(),ce!==t){for(Se=[],ht=He();ht!==t;)Se.push(ht),ht=He();if(Se!==t)if(r.charCodeAt(Q)===125?(ht=Fe,Q++):(ht=t,I===0&&Qe(Ne)),ht!==t){for(Bt=[],Jr=He();Jr!==t;)Bt.push(Jr),Jr=He();if(Bt!==t){for(Jr=[],hi=Fp();hi!==t;)Jr.push(hi),hi=Fp();if(Jr!==t){for(hi=[],ss=He();ss!==t;)hi.push(ss),ss=He();hi!==t?(Me=m,b=oe(ce,Jr),m=b):(Q=m,m=t)}else Q=m,m=t}else Q=m,m=t}else Q=m,m=t;else Q=m,m=t}else Q=m,m=t;else Q=m,m=t}else Q=m,m=t;else Q=m,m=t;if(m===t){for(m=Q,b=[],N=He();N!==t;)b.push(N),N=He();if(b!==t){for(N=[],U=HE();U!==t;)N.push(U),U=HE();if(N!==t){for(U=[],ce=He();ce!==t;)U.push(ce),ce=He();if(U!==t){if(ce=[],Se=H1(),Se!==t)for(;Se!==t;)ce.push(Se),Se=H1();else ce=t;if(ce!==t){for(Se=[],ht=He();ht!==t;)Se.push(ht),ht=He();Se!==t?(Me=m,b=le(N,ce),m=b):(Q=m,m=t)}else Q=m,m=t}else Q=m,m=t}else Q=m,m=t}else Q=m,m=t;if(m===t){for(m=Q,b=[],N=He();N!==t;)b.push(N),N=He();if(b!==t){if(N=[],U=HE(),U!==t)for(;U!==t;)N.push(U),U=HE();else N=t;if(N!==t){for(U=[],ce=He();ce!==t;)U.push(ce),ce=He();U!==t?(Me=m,b=we(N),m=b):(Q=m,m=t)}else Q=m,m=t}else Q=m,m=t}}}return m}function U1(){var m,b,N,U,ce;for(m=Q,b=[],N=He();N!==t;)b.push(N),N=He();if(b!==t){if(N=[],U=GE(),U!==t)for(;U!==t;)N.push(U),U=GE();else N=t;if(N!==t){for(U=[],ce=He();ce!==t;)U.push(ce),ce=He();U!==t?(Me=m,b=fe(N),m=b):(Q=m,m=t)}else Q=m,m=t}else Q=m,m=t;return m}function H1(){var m,b,N;for(m=Q,b=[],N=He();N!==t;)b.push(N),N=He();if(b!==t?(N=Fp(),N!==t?(Me=m,b=Ae(N),m=b):(Q=m,m=t)):(Q=m,m=t),m===t){for(m=Q,b=[],N=He();N!==t;)b.push(N),N=He();b!==t?(N=GE(),N!==t?(Me=m,b=Ae(N),m=b):(Q=m,m=t)):(Q=m,m=t)}return m}function Fp(){var m,b,N,U,ce;for(m=Q,b=[],N=He();N!==t;)b.push(N),N=He();return b!==t?(qe.test(r.charAt(Q))?(N=r.charAt(Q),Q++):(N=t,I===0&&Qe(ne)),N===t&&(N=null),N!==t?(U=gge(),U!==t?(ce=GE(),ce!==t?(Me=m,b=Y(N,U,ce),m=b):(Q=m,m=t)):(Q=m,m=t)):(Q=m,m=t)):(Q=m,m=t),m}function gge(){var m;return r.substr(Q,2)===pe?(m=pe,Q+=2):(m=t,I===0&&Qe(ie)),m===t&&(r.substr(Q,2)===de?(m=de,Q+=2):(m=t,I===0&&Qe(_e)),m===t&&(r.charCodeAt(Q)===62?(m=Pt,Q++):(m=t,I===0&&Qe(It)),m===t&&(r.substr(Q,3)===Or?(m=Or,Q+=3):(m=t,I===0&&Qe(ii)),m===t&&(r.substr(Q,2)===gi?(m=gi,Q+=2):(m=t,I===0&&Qe(hr)),m===t&&(r.charCodeAt(Q)===60?(m=fi,Q++):(m=t,I===0&&Qe(ni))))))),m}function GE(){var m,b,N;for(m=Q,b=[],N=He();N!==t;)b.push(N),N=He();return b!==t?(N=G1(),N!==t?(Me=m,b=Ae(N),m=b):(Q=m,m=t)):(Q=m,m=t),m}function G1(){var m,b,N;if(m=Q,b=[],N=Y1(),N!==t)for(;N!==t;)b.push(N),N=Y1();else b=t;return b!==t&&(Me=m,b=Os(b)),m=b,m}function Y1(){var m,b;return m=Q,b=fge(),b!==t&&(Me=m,b=pr(b)),m=b,m===t&&(m=Q,b=hge(),b!==t&&(Me=m,b=pr(b)),m=b,m===t&&(m=Q,b=pge(),b!==t&&(Me=m,b=pr(b)),m=b,m===t&&(m=Q,b=dge(),b!==t&&(Me=m,b=pr(b)),m=b))),m}function fge(){var m,b,N,U;return m=Q,r.substr(Q,2)===Ii?(b=Ii,Q+=2):(b=t,I===0&&Qe(es)),b!==t?(N=Ege(),N!==t?(r.charCodeAt(Q)===39?(U=ua,Q++):(U=t,I===0&&Qe(pA)),U!==t?(Me=m,b=ag(N),m=b):(Q=m,m=t)):(Q=m,m=t)):(Q=m,m=t),m}function hge(){var m,b,N,U;return m=Q,r.charCodeAt(Q)===39?(b=ua,Q++):(b=t,I===0&&Qe(pA)),b!==t?(N=Cge(),N!==t?(r.charCodeAt(Q)===39?(U=ua,Q++):(U=t,I===0&&Qe(pA)),U!==t?(Me=m,b=ag(N),m=b):(Q=m,m=t)):(Q=m,m=t)):(Q=m,m=t),m}function pge(){var m,b,N,U;if(m=Q,r.substr(Q,2)===ts?(b=ts,Q+=2):(b=t,I===0&&Qe(dA)),b!==t&&(Me=m,b=ga()),m=b,m===t)if(m=Q,r.charCodeAt(Q)===34?(b=yp,Q++):(b=t,I===0&&Qe(CA)),b!==t){for(N=[],U=j1();U!==t;)N.push(U),U=j1();N!==t?(r.charCodeAt(Q)===34?(U=yp,Q++):(U=t,I===0&&Qe(CA)),U!==t?(Me=m,b=mA(N),m=b):(Q=m,m=t)):(Q=m,m=t)}else Q=m,m=t;return m}function dge(){var m,b,N;if(m=Q,b=[],N=q1(),N!==t)for(;N!==t;)b.push(N),N=q1();else b=t;return b!==t&&(Me=m,b=mA(b)),m=b,m}function j1(){var m,b;return m=Q,b=V1(),b!==t&&(Me=m,b=wr(b)),m=b,m===t&&(m=Q,b=X1(),b!==t&&(Me=m,b=kl(b)),m=b,m===t&&(m=Q,b=oS(),b!==t&&(Me=m,b=Ag(b)),m=b,m===t&&(m=Q,b=mge(),b!==t&&(Me=m,b=Io(b)),m=b))),m}function q1(){var m,b;return m=Q,b=V1(),b!==t&&(Me=m,b=lg(b)),m=b,m===t&&(m=Q,b=X1(),b!==t&&(Me=m,b=wp(b)),m=b,m===t&&(m=Q,b=oS(),b!==t&&(Me=m,b=Bp(b)),m=b,m===t&&(m=Q,b=wge(),b!==t&&(Me=m,b=vr(b)),m=b,m===t&&(m=Q,b=yge(),b!==t&&(Me=m,b=Io(b)),m=b)))),m}function Cge(){var m,b,N;for(m=Q,b=[],se.test(r.charAt(Q))?(N=r.charAt(Q),Q++):(N=t,I===0&&Qe(yo));N!==t;)b.push(N),se.test(r.charAt(Q))?(N=r.charAt(Q),Q++):(N=t,I===0&&Qe(yo));return b!==t&&(Me=m,b=kn(b)),m=b,m}function mge(){var m,b,N;if(m=Q,b=[],N=J1(),N===t&&(cg.test(r.charAt(Q))?(N=r.charAt(Q),Q++):(N=t,I===0&&Qe(Qt))),N!==t)for(;N!==t;)b.push(N),N=J1(),N===t&&(cg.test(r.charAt(Q))?(N=r.charAt(Q),Q++):(N=t,I===0&&Qe(Qt)));else b=t;return b!==t&&(Me=m,b=kn(b)),m=b,m}function J1(){var m,b,N;return m=Q,r.substr(Q,2)===Rl?(b=Rl,Q+=2):(b=t,I===0&&Qe(Rn)),b!==t&&(Me=m,b=rs()),m=b,m===t&&(m=Q,r.charCodeAt(Q)===92?(b=is,Q++):(b=t,I===0&&Qe(gt)),b!==t?(wo.test(r.charAt(Q))?(N=r.charAt(Q),Q++):(N=t,I===0&&Qe(At)),N!==t?(Me=m,b=an(N),m=b):(Q=m,m=t)):(Q=m,m=t)),m}function Ege(){var m,b,N;for(m=Q,b=[],N=W1(),N===t&&(se.test(r.charAt(Q))?(N=r.charAt(Q),Q++):(N=t,I===0&&Qe(yo)));N!==t;)b.push(N),N=W1(),N===t&&(se.test(r.charAt(Q))?(N=r.charAt(Q),Q++):(N=t,I===0&&Qe(yo)));return b!==t&&(Me=m,b=kn(b)),m=b,m}function W1(){var m,b,N;return m=Q,r.substr(Q,2)===S?(b=S,Q+=2):(b=t,I===0&&Qe(Tt)),b!==t&&(Me=m,b=ug()),m=b,m===t&&(m=Q,r.substr(Q,2)===Fl?(b=Fl,Q+=2):(b=t,I===0&&Qe(Qp)),b!==t&&(Me=m,b=bp()),m=b,m===t&&(m=Q,r.charCodeAt(Q)===92?(b=is,Q++):(b=t,I===0&&Qe(gt)),b!==t?(Sp.test(r.charAt(Q))?(N=r.charAt(Q),Q++):(N=t,I===0&&Qe(vp)),N!==t?(Me=m,b=xp(),m=b):(Q=m,m=t)):(Q=m,m=t),m===t&&(m=Q,r.substr(Q,2)===G?(b=G,Q+=2):(b=t,I===0&&Qe(yt)),b!==t&&(Me=m,b=EA()),m=b,m===t&&(m=Q,r.substr(Q,2)===Ji?(b=Ji,Q+=2):(b=t,I===0&&Qe(Nl)),b!==t&&(Me=m,b=Xe()),m=b,m===t&&(m=Q,r.substr(Q,2)===fa?(b=fa,Q+=2):(b=t,I===0&&Qe(gg)),b!==t&&(Me=m,b=FE()),m=b,m===t&&(m=Q,r.substr(Q,2)===Pp?(b=Pp,Q+=2):(b=t,I===0&&Qe(NE)),b!==t&&(Me=m,b=ar()),m=b,m===t&&(m=Q,r.substr(Q,2)===Fn?(b=Fn,Q+=2):(b=t,I===0&&Qe(Ll)),b!==t&&(Me=m,b=Dp()),m=b,m===t&&(m=Q,r.charCodeAt(Q)===92?(b=is,Q++):(b=t,I===0&&Qe(gt)),b!==t?(Ms.test(r.charAt(Q))?(N=r.charAt(Q),Q++):(N=t,I===0&&Qe(ha)),N!==t?(Me=m,b=an(N),m=b):(Q=m,m=t)):(Q=m,m=t),m===t&&(m=Ige()))))))))),m}function Ige(){var m,b,N,U,ce,Se,ht,Bt,Jr,hi,ss,aS;return m=Q,r.charCodeAt(Q)===92?(b=is,Q++):(b=t,I===0&&Qe(gt)),b!==t?(N=iS(),N!==t?(Me=m,b=An(N),m=b):(Q=m,m=t)):(Q=m,m=t),m===t&&(m=Q,r.substr(Q,2)===Te?(b=Te,Q+=2):(b=t,I===0&&Qe(fg)),b!==t?(N=Q,U=Q,ce=iS(),ce!==t?(Se=Tn(),Se!==t?(ce=[ce,Se],U=ce):(Q=U,U=t)):(Q=U,U=t),U===t&&(U=iS()),U!==t?N=r.substring(N,Q):N=U,N!==t?(Me=m,b=An(N),m=b):(Q=m,m=t)):(Q=m,m=t),m===t&&(m=Q,r.substr(Q,2)===Tl?(b=Tl,Q+=2):(b=t,I===0&&Qe(Ks)),b!==t?(N=Q,U=Q,ce=Tn(),ce!==t?(Se=Tn(),Se!==t?(ht=Tn(),ht!==t?(Bt=Tn(),Bt!==t?(ce=[ce,Se,ht,Bt],U=ce):(Q=U,U=t)):(Q=U,U=t)):(Q=U,U=t)):(Q=U,U=t),U!==t?N=r.substring(N,Q):N=U,N!==t?(Me=m,b=An(N),m=b):(Q=m,m=t)):(Q=m,m=t),m===t&&(m=Q,r.substr(Q,2)===Ol?(b=Ol,Q+=2):(b=t,I===0&&Qe(IA)),b!==t?(N=Q,U=Q,ce=Tn(),ce!==t?(Se=Tn(),Se!==t?(ht=Tn(),ht!==t?(Bt=Tn(),Bt!==t?(Jr=Tn(),Jr!==t?(hi=Tn(),hi!==t?(ss=Tn(),ss!==t?(aS=Tn(),aS!==t?(ce=[ce,Se,ht,Bt,Jr,hi,ss,aS],U=ce):(Q=U,U=t)):(Q=U,U=t)):(Q=U,U=t)):(Q=U,U=t)):(Q=U,U=t)):(Q=U,U=t)):(Q=U,U=t)):(Q=U,U=t),U!==t?N=r.substring(N,Q):N=U,N!==t?(Me=m,b=hg(N),m=b):(Q=m,m=t)):(Q=m,m=t)))),m}function iS(){var m;return pg.test(r.charAt(Q))?(m=r.charAt(Q),Q++):(m=t,I===0&&Qe(pa)),m}function Tn(){var m;return da.test(r.charAt(Q))?(m=r.charAt(Q),Q++):(m=t,I===0&&Qe(rt)),m}function yge(){var m,b,N,U,ce;if(m=Q,b=[],N=Q,r.charCodeAt(Q)===92?(U=is,Q++):(U=t,I===0&&Qe(gt)),U!==t?(r.length>Q?(ce=r.charAt(Q),Q++):(ce=t,I===0&&Qe(Bo)),ce!==t?(Me=N,U=an(ce),N=U):(Q=N,N=t)):(Q=N,N=t),N===t&&(N=Q,U=Q,I++,ce=_1(),I--,ce===t?U=void 0:(Q=U,U=t),U!==t?(r.length>Q?(ce=r.charAt(Q),Q++):(ce=t,I===0&&Qe(Bo)),ce!==t?(Me=N,U=an(ce),N=U):(Q=N,N=t)):(Q=N,N=t)),N!==t)for(;N!==t;)b.push(N),N=Q,r.charCodeAt(Q)===92?(U=is,Q++):(U=t,I===0&&Qe(gt)),U!==t?(r.length>Q?(ce=r.charAt(Q),Q++):(ce=t,I===0&&Qe(Bo)),ce!==t?(Me=N,U=an(ce),N=U):(Q=N,N=t)):(Q=N,N=t),N===t&&(N=Q,U=Q,I++,ce=_1(),I--,ce===t?U=void 0:(Q=U,U=t),U!==t?(r.length>Q?(ce=r.charAt(Q),Q++):(ce=t,I===0&&Qe(Bo)),ce!==t?(Me=N,U=an(ce),N=U):(Q=N,N=t)):(Q=N,N=t));else b=t;return b!==t&&(Me=m,b=kn(b)),m=b,m}function nS(){var m,b,N,U,ce,Se;if(m=Q,r.charCodeAt(Q)===45?(b=yA,Q++):(b=t,I===0&&Qe(Ml)),b===t&&(r.charCodeAt(Q)===43?(b=Us,Q++):(b=t,I===0&&Qe(Kl))),b===t&&(b=null),b!==t){if(N=[],qe.test(r.charAt(Q))?(U=r.charAt(Q),Q++):(U=t,I===0&&Qe(ne)),U!==t)for(;U!==t;)N.push(U),qe.test(r.charAt(Q))?(U=r.charAt(Q),Q++):(U=t,I===0&&Qe(ne));else N=t;if(N!==t)if(r.charCodeAt(Q)===46?(U=LE,Q++):(U=t,I===0&&Qe(kp)),U!==t){if(ce=[],qe.test(r.charAt(Q))?(Se=r.charAt(Q),Q++):(Se=t,I===0&&Qe(ne)),Se!==t)for(;Se!==t;)ce.push(Se),qe.test(r.charAt(Q))?(Se=r.charAt(Q),Q++):(Se=t,I===0&&Qe(ne));else ce=t;ce!==t?(Me=m,b=dg(b,N,ce),m=b):(Q=m,m=t)}else Q=m,m=t;else Q=m,m=t}else Q=m,m=t;if(m===t){if(m=Q,r.charCodeAt(Q)===45?(b=yA,Q++):(b=t,I===0&&Qe(Ml)),b===t&&(r.charCodeAt(Q)===43?(b=Us,Q++):(b=t,I===0&&Qe(Kl))),b===t&&(b=null),b!==t){if(N=[],qe.test(r.charAt(Q))?(U=r.charAt(Q),Q++):(U=t,I===0&&Qe(ne)),U!==t)for(;U!==t;)N.push(U),qe.test(r.charAt(Q))?(U=r.charAt(Q),Q++):(U=t,I===0&&Qe(ne));else N=t;N!==t?(Me=m,b=Rp(b,N),m=b):(Q=m,m=t)}else Q=m,m=t;if(m===t&&(m=Q,b=oS(),b!==t&&(Me=m,b=TE(b)),m=b,m===t&&(m=Q,b=Hl(),b!==t&&(Me=m,b=Ul(b)),m=b,m===t)))if(m=Q,r.charCodeAt(Q)===40?(b=ge,Q++):(b=t,I===0&&Qe(re)),b!==t){for(N=[],U=He();U!==t;)N.push(U),U=He();if(N!==t)if(U=z1(),U!==t){for(ce=[],Se=He();Se!==t;)ce.push(Se),Se=He();ce!==t?(r.charCodeAt(Q)===41?(Se=O,Q++):(Se=t,I===0&&Qe(F)),Se!==t?(Me=m,b=OE(U),m=b):(Q=m,m=t)):(Q=m,m=t)}else Q=m,m=t;else Q=m,m=t}else Q=m,m=t}return m}function sS(){var m,b,N,U,ce,Se,ht,Bt;if(m=Q,b=nS(),b!==t){for(N=[],U=Q,ce=[],Se=He();Se!==t;)ce.push(Se),Se=He();if(ce!==t)if(r.charCodeAt(Q)===42?(Se=Cg,Q++):(Se=t,I===0&&Qe(wA)),Se===t&&(r.charCodeAt(Q)===47?(Se=Rr,Q++):(Se=t,I===0&&Qe(ME))),Se!==t){for(ht=[],Bt=He();Bt!==t;)ht.push(Bt),Bt=He();ht!==t?(Bt=nS(),Bt!==t?(Me=U,ce=Hs(b,Se,Bt),U=ce):(Q=U,U=t)):(Q=U,U=t)}else Q=U,U=t;else Q=U,U=t;for(;U!==t;){for(N.push(U),U=Q,ce=[],Se=He();Se!==t;)ce.push(Se),Se=He();if(ce!==t)if(r.charCodeAt(Q)===42?(Se=Cg,Q++):(Se=t,I===0&&Qe(wA)),Se===t&&(r.charCodeAt(Q)===47?(Se=Rr,Q++):(Se=t,I===0&&Qe(ME))),Se!==t){for(ht=[],Bt=He();Bt!==t;)ht.push(Bt),Bt=He();ht!==t?(Bt=nS(),Bt!==t?(Me=U,ce=Hs(b,Se,Bt),U=ce):(Q=U,U=t)):(Q=U,U=t)}else Q=U,U=t;else Q=U,U=t}N!==t?(Me=m,b=Gs(b,N),m=b):(Q=m,m=t)}else Q=m,m=t;return m}function z1(){var m,b,N,U,ce,Se,ht,Bt;if(m=Q,b=sS(),b!==t){for(N=[],U=Q,ce=[],Se=He();Se!==t;)ce.push(Se),Se=He();if(ce!==t)if(r.charCodeAt(Q)===43?(Se=Us,Q++):(Se=t,I===0&&Qe(Kl)),Se===t&&(r.charCodeAt(Q)===45?(Se=yA,Q++):(Se=t,I===0&&Qe(Ml))),Se!==t){for(ht=[],Bt=He();Bt!==t;)ht.push(Bt),Bt=He();ht!==t?(Bt=sS(),Bt!==t?(Me=U,ce=mg(b,Se,Bt),U=ce):(Q=U,U=t)):(Q=U,U=t)}else Q=U,U=t;else Q=U,U=t;for(;U!==t;){for(N.push(U),U=Q,ce=[],Se=He();Se!==t;)ce.push(Se),Se=He();if(ce!==t)if(r.charCodeAt(Q)===43?(Se=Us,Q++):(Se=t,I===0&&Qe(Kl)),Se===t&&(r.charCodeAt(Q)===45?(Se=yA,Q++):(Se=t,I===0&&Qe(Ml))),Se!==t){for(ht=[],Bt=He();Bt!==t;)ht.push(Bt),Bt=He();ht!==t?(Bt=sS(),Bt!==t?(Me=U,ce=mg(b,Se,Bt),U=ce):(Q=U,U=t)):(Q=U,U=t)}else Q=U,U=t;else Q=U,U=t}N!==t?(Me=m,b=Gs(b,N),m=b):(Q=m,m=t)}else Q=m,m=t;return m}function V1(){var m,b,N,U,ce,Se;if(m=Q,r.substr(Q,3)===BA?(b=BA,Q+=3):(b=t,I===0&&Qe(R)),b!==t){for(N=[],U=He();U!==t;)N.push(U),U=He();if(N!==t)if(U=z1(),U!==t){for(ce=[],Se=He();Se!==t;)ce.push(Se),Se=He();ce!==t?(r.substr(Q,2)===q?(Se=q,Q+=2):(Se=t,I===0&&Qe(Ce)),Se!==t?(Me=m,b=Ke(U),m=b):(Q=m,m=t)):(Q=m,m=t)}else Q=m,m=t;else Q=m,m=t}else Q=m,m=t;return m}function X1(){var m,b,N,U;return m=Q,r.substr(Q,2)===Re?(b=Re,Q+=2):(b=t,I===0&&Qe(ze)),b!==t?(N=Mr(),N!==t?(r.charCodeAt(Q)===41?(U=O,Q++):(U=t,I===0&&Qe(F)),U!==t?(Me=m,b=dt(N),m=b):(Q=m,m=t)):(Q=m,m=t)):(Q=m,m=t),m}function oS(){var m,b,N,U,ce,Se;return m=Q,r.substr(Q,2)===Ft?(b=Ft,Q+=2):(b=t,I===0&&Qe(Nn)),b!==t?(N=Hl(),N!==t?(r.substr(Q,2)===qb?(U=qb,Q+=2):(U=t,I===0&&Qe(S1)),U!==t?(ce=U1(),ce!==t?(r.charCodeAt(Q)===125?(Se=Fe,Q++):(Se=t,I===0&&Qe(Ne)),Se!==t?(Me=m,b=v1(N,ce),m=b):(Q=m,m=t)):(Q=m,m=t)):(Q=m,m=t)):(Q=m,m=t)):(Q=m,m=t),m===t&&(m=Q,r.substr(Q,2)===Ft?(b=Ft,Q+=2):(b=t,I===0&&Qe(Nn)),b!==t?(N=Hl(),N!==t?(r.substr(Q,3)===Jb?(U=Jb,Q+=3):(U=t,I===0&&Qe(x1)),U!==t?(Me=m,b=P1(N),m=b):(Q=m,m=t)):(Q=m,m=t)):(Q=m,m=t),m===t&&(m=Q,r.substr(Q,2)===Ft?(b=Ft,Q+=2):(b=t,I===0&&Qe(Nn)),b!==t?(N=Hl(),N!==t?(r.substr(Q,2)===Wb?(U=Wb,Q+=2):(U=t,I===0&&Qe(D1)),U!==t?(ce=U1(),ce!==t?(r.charCodeAt(Q)===125?(Se=Fe,Q++):(Se=t,I===0&&Qe(Ne)),Se!==t?(Me=m,b=k1(N,ce),m=b):(Q=m,m=t)):(Q=m,m=t)):(Q=m,m=t)):(Q=m,m=t)):(Q=m,m=t),m===t&&(m=Q,r.substr(Q,2)===Ft?(b=Ft,Q+=2):(b=t,I===0&&Qe(Nn)),b!==t?(N=Hl(),N!==t?(r.substr(Q,3)===zb?(U=zb,Q+=3):(U=t,I===0&&Qe(R1)),U!==t?(Me=m,b=F1(N),m=b):(Q=m,m=t)):(Q=m,m=t)):(Q=m,m=t),m===t&&(m=Q,r.substr(Q,2)===Ft?(b=Ft,Q+=2):(b=t,I===0&&Qe(Nn)),b!==t?(N=Hl(),N!==t?(r.charCodeAt(Q)===125?(U=Fe,Q++):(U=t,I===0&&Qe(Ne)),U!==t?(Me=m,b=Vb(N),m=b):(Q=m,m=t)):(Q=m,m=t)):(Q=m,m=t),m===t&&(m=Q,r.charCodeAt(Q)===36?(b=N1,Q++):(b=t,I===0&&Qe(L1)),b!==t?(N=Hl(),N!==t?(Me=m,b=Vb(N),m=b):(Q=m,m=t)):(Q=m,m=t)))))),m}function wge(){var m,b,N;return m=Q,b=Bge(),b!==t?(Me=Q,N=T1(b),N?N=void 0:N=t,N!==t?(Me=m,b=O1(b),m=b):(Q=m,m=t)):(Q=m,m=t),m}function Bge(){var m,b,N,U,ce;if(m=Q,b=[],N=Q,U=Q,I++,ce=$1(),I--,ce===t?U=void 0:(Q=U,U=t),U!==t?(r.length>Q?(ce=r.charAt(Q),Q++):(ce=t,I===0&&Qe(Bo)),ce!==t?(Me=N,U=an(ce),N=U):(Q=N,N=t)):(Q=N,N=t),N!==t)for(;N!==t;)b.push(N),N=Q,U=Q,I++,ce=$1(),I--,ce===t?U=void 0:(Q=U,U=t),U!==t?(r.length>Q?(ce=r.charAt(Q),Q++):(ce=t,I===0&&Qe(Bo)),ce!==t?(Me=N,U=an(ce),N=U):(Q=N,N=t)):(Q=N,N=t);else b=t;return b!==t&&(Me=m,b=kn(b)),m=b,m}function Z1(){var m,b,N;if(m=Q,b=[],Xb.test(r.charAt(Q))?(N=r.charAt(Q),Q++):(N=t,I===0&&Qe(Zb)),N!==t)for(;N!==t;)b.push(N),Xb.test(r.charAt(Q))?(N=r.charAt(Q),Q++):(N=t,I===0&&Qe(Zb));else b=t;return b!==t&&(Me=m,b=_b()),m=b,m}function Hl(){var m,b,N;if(m=Q,b=[],$b.test(r.charAt(Q))?(N=r.charAt(Q),Q++):(N=t,I===0&&Qe(eS)),N!==t)for(;N!==t;)b.push(N),$b.test(r.charAt(Q))?(N=r.charAt(Q),Q++):(N=t,I===0&&Qe(eS));else b=t;return b!==t&&(Me=m,b=_b()),m=b,m}function _1(){var m;return M1.test(r.charAt(Q))?(m=r.charAt(Q),Q++):(m=t,I===0&&Qe(Eg)),m}function $1(){var m;return tS.test(r.charAt(Q))?(m=r.charAt(Q),Q++):(m=t,I===0&&Qe(rS)),m}function He(){var m,b;if(m=[],KE.test(r.charAt(Q))?(b=r.charAt(Q),Q++):(b=t,I===0&&Qe(UE)),b!==t)for(;b!==t;)m.push(b),KE.test(r.charAt(Q))?(b=r.charAt(Q),Q++):(b=t,I===0&&Qe(UE));else m=t;return m}if(k=n(),k!==t&&Q===r.length)return k;throw k!==t&&Q{"use strict";function Qfe(r,e){function t(){this.constructor=r}t.prototype=e.prototype,r.prototype=new t}function Vl(r,e,t,i){this.message=r,this.expected=e,this.found=t,this.location=i,this.name="SyntaxError",typeof Error.captureStackTrace=="function"&&Error.captureStackTrace(this,Vl)}Qfe(Vl,Error);Vl.buildMessage=function(r,e){var t={literal:function(c){return'"'+n(c.text)+'"'},class:function(c){var u="",g;for(g=0;g0){for(g=1,f=1;gH&&(H=v,j=[]),j.push(ne))}function Ne(ne,Y){return new Vl(ne,null,null,Y)}function oe(ne,Y,pe){return new Vl(Vl.buildMessage(ne,Y),ne,Y,pe)}function le(){var ne,Y,pe,ie;return ne=v,Y=we(),Y!==t?(r.charCodeAt(v)===47?(pe=s,v++):(pe=t,$===0&&Fe(o)),pe!==t?(ie=we(),ie!==t?(D=ne,Y=a(Y,ie),ne=Y):(v=ne,ne=t)):(v=ne,ne=t)):(v=ne,ne=t),ne===t&&(ne=v,Y=we(),Y!==t&&(D=ne,Y=l(Y)),ne=Y),ne}function we(){var ne,Y,pe,ie;return ne=v,Y=fe(),Y!==t?(r.charCodeAt(v)===64?(pe=c,v++):(pe=t,$===0&&Fe(u)),pe!==t?(ie=qe(),ie!==t?(D=ne,Y=g(Y,ie),ne=Y):(v=ne,ne=t)):(v=ne,ne=t)):(v=ne,ne=t),ne===t&&(ne=v,Y=fe(),Y!==t&&(D=ne,Y=f(Y)),ne=Y),ne}function fe(){var ne,Y,pe,ie,de;return ne=v,r.charCodeAt(v)===64?(Y=c,v++):(Y=t,$===0&&Fe(u)),Y!==t?(pe=Ae(),pe!==t?(r.charCodeAt(v)===47?(ie=s,v++):(ie=t,$===0&&Fe(o)),ie!==t?(de=Ae(),de!==t?(D=ne,Y=h(),ne=Y):(v=ne,ne=t)):(v=ne,ne=t)):(v=ne,ne=t)):(v=ne,ne=t),ne===t&&(ne=v,Y=Ae(),Y!==t&&(D=ne,Y=h()),ne=Y),ne}function Ae(){var ne,Y,pe;if(ne=v,Y=[],p.test(r.charAt(v))?(pe=r.charAt(v),v++):(pe=t,$===0&&Fe(C)),pe!==t)for(;pe!==t;)Y.push(pe),p.test(r.charAt(v))?(pe=r.charAt(v),v++):(pe=t,$===0&&Fe(C));else Y=t;return Y!==t&&(D=ne,Y=h()),ne=Y,ne}function qe(){var ne,Y,pe;if(ne=v,Y=[],y.test(r.charAt(v))?(pe=r.charAt(v),v++):(pe=t,$===0&&Fe(B)),pe!==t)for(;pe!==t;)Y.push(pe),y.test(r.charAt(v))?(pe=r.charAt(v),v++):(pe=t,$===0&&Fe(B));else Y=t;return Y!==t&&(D=ne,Y=h()),ne=Y,ne}if(V=n(),V!==t&&v===r.length)return V;throw V!==t&&v{"use strict";function fU(r){return typeof r>"u"||r===null}function Sfe(r){return typeof r=="object"&&r!==null}function vfe(r){return Array.isArray(r)?r:fU(r)?[]:[r]}function xfe(r,e){var t,i,n,s;if(e)for(s=Object.keys(e),t=0,i=s.length;t{"use strict";function Wp(r,e){Error.call(this),this.name="YAMLException",this.reason=r,this.mark=e,this.message=(this.reason||"(unknown reason)")+(this.mark?" "+this.mark.toString():""),Error.captureStackTrace?Error.captureStackTrace(this,this.constructor):this.stack=new Error().stack||""}Wp.prototype=Object.create(Error.prototype);Wp.prototype.constructor=Wp;Wp.prototype.toString=function(e){var t=this.name+": ";return t+=this.reason||"(unknown reason)",!e&&this.mark&&(t+=" "+this.mark.toString()),t};hU.exports=Wp});var CU=w((IZe,dU)=>{"use strict";var pU=Zl();function HS(r,e,t,i,n){this.name=r,this.buffer=e,this.position=t,this.line=i,this.column=n}HS.prototype.getSnippet=function(e,t){var i,n,s,o,a;if(!this.buffer)return null;for(e=e||4,t=t||75,i="",n=this.position;n>0&&`\0\r +\x85\u2028\u2029`.indexOf(this.buffer.charAt(n-1))===-1;)if(n-=1,this.position-n>t/2-1){i=" ... ",n+=5;break}for(s="",o=this.position;ot/2-1){s=" ... ",o-=5;break}return a=this.buffer.slice(n,o),pU.repeat(" ",e)+i+a+s+` +`+pU.repeat(" ",e+this.position-n+i.length)+"^"};HS.prototype.toString=function(e){var t,i="";return this.name&&(i+='in "'+this.name+'" '),i+="at line "+(this.line+1)+", column "+(this.column+1),e||(t=this.getSnippet(),t&&(i+=`: +`+t)),i};dU.exports=HS});var si=w((yZe,EU)=>{"use strict";var mU=kg(),kfe=["kind","resolve","construct","instanceOf","predicate","represent","defaultStyle","styleAliases"],Rfe=["scalar","sequence","mapping"];function Ffe(r){var e={};return r!==null&&Object.keys(r).forEach(function(t){r[t].forEach(function(i){e[String(i)]=t})}),e}function Nfe(r,e){if(e=e||{},Object.keys(e).forEach(function(t){if(kfe.indexOf(t)===-1)throw new mU('Unknown option "'+t+'" is met in definition of "'+r+'" YAML type.')}),this.tag=r,this.kind=e.kind||null,this.resolve=e.resolve||function(){return!0},this.construct=e.construct||function(t){return t},this.instanceOf=e.instanceOf||null,this.predicate=e.predicate||null,this.represent=e.represent||null,this.defaultStyle=e.defaultStyle||null,this.styleAliases=Ffe(e.styleAliases||null),Rfe.indexOf(this.kind)===-1)throw new mU('Unknown kind "'+this.kind+'" is specified for "'+r+'" YAML type.')}EU.exports=Nfe});var _l=w((wZe,yU)=>{"use strict";var IU=Zl(),gI=kg(),Lfe=si();function GS(r,e,t){var i=[];return r.include.forEach(function(n){t=GS(n,e,t)}),r[e].forEach(function(n){t.forEach(function(s,o){s.tag===n.tag&&s.kind===n.kind&&i.push(o)}),t.push(n)}),t.filter(function(n,s){return i.indexOf(s)===-1})}function Tfe(){var r={scalar:{},sequence:{},mapping:{},fallback:{}},e,t;function i(n){r[n.kind][n.tag]=r.fallback[n.tag]=n}for(e=0,t=arguments.length;e{"use strict";var Ofe=si();wU.exports=new Ofe("tag:yaml.org,2002:str",{kind:"scalar",construct:function(r){return r!==null?r:""}})});var bU=w((QZe,QU)=>{"use strict";var Mfe=si();QU.exports=new Mfe("tag:yaml.org,2002:seq",{kind:"sequence",construct:function(r){return r!==null?r:[]}})});var vU=w((bZe,SU)=>{"use strict";var Kfe=si();SU.exports=new Kfe("tag:yaml.org,2002:map",{kind:"mapping",construct:function(r){return r!==null?r:{}}})});var fI=w((SZe,xU)=>{"use strict";var Ufe=_l();xU.exports=new Ufe({explicit:[BU(),bU(),vU()]})});var DU=w((vZe,PU)=>{"use strict";var Hfe=si();function Gfe(r){if(r===null)return!0;var e=r.length;return e===1&&r==="~"||e===4&&(r==="null"||r==="Null"||r==="NULL")}function Yfe(){return null}function jfe(r){return r===null}PU.exports=new Hfe("tag:yaml.org,2002:null",{kind:"scalar",resolve:Gfe,construct:Yfe,predicate:jfe,represent:{canonical:function(){return"~"},lowercase:function(){return"null"},uppercase:function(){return"NULL"},camelcase:function(){return"Null"}},defaultStyle:"lowercase"})});var RU=w((xZe,kU)=>{"use strict";var qfe=si();function Jfe(r){if(r===null)return!1;var e=r.length;return e===4&&(r==="true"||r==="True"||r==="TRUE")||e===5&&(r==="false"||r==="False"||r==="FALSE")}function Wfe(r){return r==="true"||r==="True"||r==="TRUE"}function zfe(r){return Object.prototype.toString.call(r)==="[object Boolean]"}kU.exports=new qfe("tag:yaml.org,2002:bool",{kind:"scalar",resolve:Jfe,construct:Wfe,predicate:zfe,represent:{lowercase:function(r){return r?"true":"false"},uppercase:function(r){return r?"TRUE":"FALSE"},camelcase:function(r){return r?"True":"False"}},defaultStyle:"lowercase"})});var NU=w((PZe,FU)=>{"use strict";var Vfe=Zl(),Xfe=si();function Zfe(r){return 48<=r&&r<=57||65<=r&&r<=70||97<=r&&r<=102}function _fe(r){return 48<=r&&r<=55}function $fe(r){return 48<=r&&r<=57}function ehe(r){if(r===null)return!1;var e=r.length,t=0,i=!1,n;if(!e)return!1;if(n=r[t],(n==="-"||n==="+")&&(n=r[++t]),n==="0"){if(t+1===e)return!0;if(n=r[++t],n==="b"){for(t++;t=0?"0b"+r.toString(2):"-0b"+r.toString(2).slice(1)},octal:function(r){return r>=0?"0"+r.toString(8):"-0"+r.toString(8).slice(1)},decimal:function(r){return r.toString(10)},hexadecimal:function(r){return r>=0?"0x"+r.toString(16).toUpperCase():"-0x"+r.toString(16).toUpperCase().slice(1)}},defaultStyle:"decimal",styleAliases:{binary:[2,"bin"],octal:[8,"oct"],decimal:[10,"dec"],hexadecimal:[16,"hex"]}})});var OU=w((DZe,TU)=>{"use strict";var LU=Zl(),ihe=si(),nhe=new RegExp("^(?:[-+]?(?:0|[1-9][0-9_]*)(?:\\.[0-9_]*)?(?:[eE][-+]?[0-9]+)?|\\.[0-9_]+(?:[eE][-+]?[0-9]+)?|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]*|[-+]?\\.(?:inf|Inf|INF)|\\.(?:nan|NaN|NAN))$");function she(r){return!(r===null||!nhe.test(r)||r[r.length-1]==="_")}function ohe(r){var e,t,i,n;return e=r.replace(/_/g,"").toLowerCase(),t=e[0]==="-"?-1:1,n=[],"+-".indexOf(e[0])>=0&&(e=e.slice(1)),e===".inf"?t===1?Number.POSITIVE_INFINITY:Number.NEGATIVE_INFINITY:e===".nan"?NaN:e.indexOf(":")>=0?(e.split(":").forEach(function(s){n.unshift(parseFloat(s,10))}),e=0,i=1,n.forEach(function(s){e+=s*i,i*=60}),t*e):t*parseFloat(e,10)}var ahe=/^[-+]?[0-9]+e/;function Ahe(r,e){var t;if(isNaN(r))switch(e){case"lowercase":return".nan";case"uppercase":return".NAN";case"camelcase":return".NaN"}else if(Number.POSITIVE_INFINITY===r)switch(e){case"lowercase":return".inf";case"uppercase":return".INF";case"camelcase":return".Inf"}else if(Number.NEGATIVE_INFINITY===r)switch(e){case"lowercase":return"-.inf";case"uppercase":return"-.INF";case"camelcase":return"-.Inf"}else if(LU.isNegativeZero(r))return"-0.0";return t=r.toString(10),ahe.test(t)?t.replace("e",".e"):t}function lhe(r){return Object.prototype.toString.call(r)==="[object Number]"&&(r%1!==0||LU.isNegativeZero(r))}TU.exports=new ihe("tag:yaml.org,2002:float",{kind:"scalar",resolve:she,construct:ohe,predicate:lhe,represent:Ahe,defaultStyle:"lowercase"})});var YS=w((kZe,MU)=>{"use strict";var che=_l();MU.exports=new che({include:[fI()],implicit:[DU(),RU(),NU(),OU()]})});var jS=w((RZe,KU)=>{"use strict";var uhe=_l();KU.exports=new uhe({include:[YS()]})});var YU=w((FZe,GU)=>{"use strict";var ghe=si(),UU=new RegExp("^([0-9][0-9][0-9][0-9])-([0-9][0-9])-([0-9][0-9])$"),HU=new RegExp("^([0-9][0-9][0-9][0-9])-([0-9][0-9]?)-([0-9][0-9]?)(?:[Tt]|[ \\t]+)([0-9][0-9]?):([0-9][0-9]):([0-9][0-9])(?:\\.([0-9]*))?(?:[ \\t]*(Z|([-+])([0-9][0-9]?)(?::([0-9][0-9]))?))?$");function fhe(r){return r===null?!1:UU.exec(r)!==null||HU.exec(r)!==null}function hhe(r){var e,t,i,n,s,o,a,l=0,c=null,u,g,f;if(e=UU.exec(r),e===null&&(e=HU.exec(r)),e===null)throw new Error("Date resolve error");if(t=+e[1],i=+e[2]-1,n=+e[3],!e[4])return new Date(Date.UTC(t,i,n));if(s=+e[4],o=+e[5],a=+e[6],e[7]){for(l=e[7].slice(0,3);l.length<3;)l+="0";l=+l}return e[9]&&(u=+e[10],g=+(e[11]||0),c=(u*60+g)*6e4,e[9]==="-"&&(c=-c)),f=new Date(Date.UTC(t,i,n,s,o,a,l)),c&&f.setTime(f.getTime()-c),f}function phe(r){return r.toISOString()}GU.exports=new ghe("tag:yaml.org,2002:timestamp",{kind:"scalar",resolve:fhe,construct:hhe,instanceOf:Date,represent:phe})});var qU=w((NZe,jU)=>{"use strict";var dhe=si();function Che(r){return r==="<<"||r===null}jU.exports=new dhe("tag:yaml.org,2002:merge",{kind:"scalar",resolve:Che})});var zU=w((LZe,WU)=>{"use strict";var $l;try{JU=J,$l=JU("buffer").Buffer}catch{}var JU,mhe=si(),qS=`ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/= +\r`;function Ehe(r){if(r===null)return!1;var e,t,i=0,n=r.length,s=qS;for(t=0;t64)){if(e<0)return!1;i+=6}return i%8===0}function Ihe(r){var e,t,i=r.replace(/[\r\n=]/g,""),n=i.length,s=qS,o=0,a=[];for(e=0;e>16&255),a.push(o>>8&255),a.push(o&255)),o=o<<6|s.indexOf(i.charAt(e));return t=n%4*6,t===0?(a.push(o>>16&255),a.push(o>>8&255),a.push(o&255)):t===18?(a.push(o>>10&255),a.push(o>>2&255)):t===12&&a.push(o>>4&255),$l?$l.from?$l.from(a):new $l(a):a}function yhe(r){var e="",t=0,i,n,s=r.length,o=qS;for(i=0;i>18&63],e+=o[t>>12&63],e+=o[t>>6&63],e+=o[t&63]),t=(t<<8)+r[i];return n=s%3,n===0?(e+=o[t>>18&63],e+=o[t>>12&63],e+=o[t>>6&63],e+=o[t&63]):n===2?(e+=o[t>>10&63],e+=o[t>>4&63],e+=o[t<<2&63],e+=o[64]):n===1&&(e+=o[t>>2&63],e+=o[t<<4&63],e+=o[64],e+=o[64]),e}function whe(r){return $l&&$l.isBuffer(r)}WU.exports=new mhe("tag:yaml.org,2002:binary",{kind:"scalar",resolve:Ehe,construct:Ihe,predicate:whe,represent:yhe})});var XU=w((TZe,VU)=>{"use strict";var Bhe=si(),Qhe=Object.prototype.hasOwnProperty,bhe=Object.prototype.toString;function She(r){if(r===null)return!0;var e=[],t,i,n,s,o,a=r;for(t=0,i=a.length;t{"use strict";var xhe=si(),Phe=Object.prototype.toString;function Dhe(r){if(r===null)return!0;var e,t,i,n,s,o=r;for(s=new Array(o.length),e=0,t=o.length;e{"use strict";var Rhe=si(),Fhe=Object.prototype.hasOwnProperty;function Nhe(r){if(r===null)return!0;var e,t=r;for(e in t)if(Fhe.call(t,e)&&t[e]!==null)return!1;return!0}function Lhe(r){return r!==null?r:{}}$U.exports=new Rhe("tag:yaml.org,2002:set",{kind:"mapping",resolve:Nhe,construct:Lhe})});var Fg=w((KZe,t2)=>{"use strict";var The=_l();t2.exports=new The({include:[jS()],implicit:[YU(),qU()],explicit:[zU(),XU(),_U(),e2()]})});var i2=w((UZe,r2)=>{"use strict";var Ohe=si();function Mhe(){return!0}function Khe(){}function Uhe(){return""}function Hhe(r){return typeof r>"u"}r2.exports=new Ohe("tag:yaml.org,2002:js/undefined",{kind:"scalar",resolve:Mhe,construct:Khe,predicate:Hhe,represent:Uhe})});var s2=w((HZe,n2)=>{"use strict";var Ghe=si();function Yhe(r){if(r===null||r.length===0)return!1;var e=r,t=/\/([gim]*)$/.exec(r),i="";return!(e[0]==="/"&&(t&&(i=t[1]),i.length>3||e[e.length-i.length-1]!=="/"))}function jhe(r){var e=r,t=/\/([gim]*)$/.exec(r),i="";return e[0]==="/"&&(t&&(i=t[1]),e=e.slice(1,e.length-i.length-1)),new RegExp(e,i)}function qhe(r){var e="/"+r.source+"/";return r.global&&(e+="g"),r.multiline&&(e+="m"),r.ignoreCase&&(e+="i"),e}function Jhe(r){return Object.prototype.toString.call(r)==="[object RegExp]"}n2.exports=new Ghe("tag:yaml.org,2002:js/regexp",{kind:"scalar",resolve:Yhe,construct:jhe,predicate:Jhe,represent:qhe})});var A2=w((GZe,a2)=>{"use strict";var hI;try{o2=J,hI=o2("esprima")}catch{typeof window<"u"&&(hI=window.esprima)}var o2,Whe=si();function zhe(r){if(r===null)return!1;try{var e="("+r+")",t=hI.parse(e,{range:!0});return!(t.type!=="Program"||t.body.length!==1||t.body[0].type!=="ExpressionStatement"||t.body[0].expression.type!=="ArrowFunctionExpression"&&t.body[0].expression.type!=="FunctionExpression")}catch{return!1}}function Vhe(r){var e="("+r+")",t=hI.parse(e,{range:!0}),i=[],n;if(t.type!=="Program"||t.body.length!==1||t.body[0].type!=="ExpressionStatement"||t.body[0].expression.type!=="ArrowFunctionExpression"&&t.body[0].expression.type!=="FunctionExpression")throw new Error("Failed to resolve function");return t.body[0].expression.params.forEach(function(s){i.push(s.name)}),n=t.body[0].expression.body.range,t.body[0].expression.body.type==="BlockStatement"?new Function(i,e.slice(n[0]+1,n[1]-1)):new Function(i,"return "+e.slice(n[0],n[1]))}function Xhe(r){return r.toString()}function Zhe(r){return Object.prototype.toString.call(r)==="[object Function]"}a2.exports=new Whe("tag:yaml.org,2002:js/function",{kind:"scalar",resolve:zhe,construct:Vhe,predicate:Zhe,represent:Xhe})});var zp=w((YZe,c2)=>{"use strict";var l2=_l();c2.exports=l2.DEFAULT=new l2({include:[Fg()],explicit:[i2(),s2(),A2()]})});var P2=w((jZe,Vp)=>{"use strict";var ya=Zl(),C2=kg(),_he=CU(),m2=Fg(),$he=zp(),DA=Object.prototype.hasOwnProperty,pI=1,E2=2,I2=3,dI=4,JS=1,epe=2,u2=3,tpe=/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F-\x84\x86-\x9F\uFFFE\uFFFF]|[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?:[^\uD800-\uDBFF]|^)[\uDC00-\uDFFF]/,rpe=/[\x85\u2028\u2029]/,ipe=/[,\[\]\{\}]/,y2=/^(?:!|!!|![a-z\-]+!)$/i,w2=/^(?:!|[^,\[\]\{\}])(?:%[0-9a-f]{2}|[0-9a-z\-#;\/\?:@&=\+\$,_\.!~\*'\(\)\[\]])*$/i;function g2(r){return Object.prototype.toString.call(r)}function vo(r){return r===10||r===13}function tc(r){return r===9||r===32}function un(r){return r===9||r===32||r===10||r===13}function Ng(r){return r===44||r===91||r===93||r===123||r===125}function npe(r){var e;return 48<=r&&r<=57?r-48:(e=r|32,97<=e&&e<=102?e-97+10:-1)}function spe(r){return r===120?2:r===117?4:r===85?8:0}function ope(r){return 48<=r&&r<=57?r-48:-1}function f2(r){return r===48?"\0":r===97?"\x07":r===98?"\b":r===116||r===9?" ":r===110?` +`:r===118?"\v":r===102?"\f":r===114?"\r":r===101?"\x1B":r===32?" ":r===34?'"':r===47?"/":r===92?"\\":r===78?"\x85":r===95?"\xA0":r===76?"\u2028":r===80?"\u2029":""}function ape(r){return r<=65535?String.fromCharCode(r):String.fromCharCode((r-65536>>10)+55296,(r-65536&1023)+56320)}var B2=new Array(256),Q2=new Array(256);for(ec=0;ec<256;ec++)B2[ec]=f2(ec)?1:0,Q2[ec]=f2(ec);var ec;function Ape(r,e){this.input=r,this.filename=e.filename||null,this.schema=e.schema||$he,this.onWarning=e.onWarning||null,this.legacy=e.legacy||!1,this.json=e.json||!1,this.listener=e.listener||null,this.implicitTypes=this.schema.compiledImplicit,this.typeMap=this.schema.compiledTypeMap,this.length=r.length,this.position=0,this.line=0,this.lineStart=0,this.lineIndent=0,this.documents=[]}function b2(r,e){return new C2(e,new _he(r.filename,r.input,r.position,r.line,r.position-r.lineStart))}function ft(r,e){throw b2(r,e)}function CI(r,e){r.onWarning&&r.onWarning.call(null,b2(r,e))}var h2={YAML:function(e,t,i){var n,s,o;e.version!==null&&ft(e,"duplication of %YAML directive"),i.length!==1&&ft(e,"YAML directive accepts exactly one argument"),n=/^([0-9]+)\.([0-9]+)$/.exec(i[0]),n===null&&ft(e,"ill-formed argument of the YAML directive"),s=parseInt(n[1],10),o=parseInt(n[2],10),s!==1&&ft(e,"unacceptable YAML version of the document"),e.version=i[0],e.checkLineBreaks=o<2,o!==1&&o!==2&&CI(e,"unsupported YAML version of the document")},TAG:function(e,t,i){var n,s;i.length!==2&&ft(e,"TAG directive accepts exactly two arguments"),n=i[0],s=i[1],y2.test(n)||ft(e,"ill-formed tag handle (first argument) of the TAG directive"),DA.call(e.tagMap,n)&&ft(e,'there is a previously declared suffix for "'+n+'" tag handle'),w2.test(s)||ft(e,"ill-formed tag prefix (second argument) of the TAG directive"),e.tagMap[n]=s}};function PA(r,e,t,i){var n,s,o,a;if(e1&&(r.result+=ya.repeat(` +`,e-1))}function lpe(r,e,t){var i,n,s,o,a,l,c,u,g=r.kind,f=r.result,h;if(h=r.input.charCodeAt(r.position),un(h)||Ng(h)||h===35||h===38||h===42||h===33||h===124||h===62||h===39||h===34||h===37||h===64||h===96||(h===63||h===45)&&(n=r.input.charCodeAt(r.position+1),un(n)||t&&Ng(n)))return!1;for(r.kind="scalar",r.result="",s=o=r.position,a=!1;h!==0;){if(h===58){if(n=r.input.charCodeAt(r.position+1),un(n)||t&&Ng(n))break}else if(h===35){if(i=r.input.charCodeAt(r.position-1),un(i))break}else{if(r.position===r.lineStart&&mI(r)||t&&Ng(h))break;if(vo(h))if(l=r.line,c=r.lineStart,u=r.lineIndent,zr(r,!1,-1),r.lineIndent>=e){a=!0,h=r.input.charCodeAt(r.position);continue}else{r.position=o,r.line=l,r.lineStart=c,r.lineIndent=u;break}}a&&(PA(r,s,o,!1),zS(r,r.line-l),s=o=r.position,a=!1),tc(h)||(o=r.position+1),h=r.input.charCodeAt(++r.position)}return PA(r,s,o,!1),r.result?!0:(r.kind=g,r.result=f,!1)}function cpe(r,e){var t,i,n;if(t=r.input.charCodeAt(r.position),t!==39)return!1;for(r.kind="scalar",r.result="",r.position++,i=n=r.position;(t=r.input.charCodeAt(r.position))!==0;)if(t===39)if(PA(r,i,r.position,!0),t=r.input.charCodeAt(++r.position),t===39)i=r.position,r.position++,n=r.position;else return!0;else vo(t)?(PA(r,i,n,!0),zS(r,zr(r,!1,e)),i=n=r.position):r.position===r.lineStart&&mI(r)?ft(r,"unexpected end of the document within a single quoted scalar"):(r.position++,n=r.position);ft(r,"unexpected end of the stream within a single quoted scalar")}function upe(r,e){var t,i,n,s,o,a;if(a=r.input.charCodeAt(r.position),a!==34)return!1;for(r.kind="scalar",r.result="",r.position++,t=i=r.position;(a=r.input.charCodeAt(r.position))!==0;){if(a===34)return PA(r,t,r.position,!0),r.position++,!0;if(a===92){if(PA(r,t,r.position,!0),a=r.input.charCodeAt(++r.position),vo(a))zr(r,!1,e);else if(a<256&&B2[a])r.result+=Q2[a],r.position++;else if((o=spe(a))>0){for(n=o,s=0;n>0;n--)a=r.input.charCodeAt(++r.position),(o=npe(a))>=0?s=(s<<4)+o:ft(r,"expected hexadecimal character");r.result+=ape(s),r.position++}else ft(r,"unknown escape sequence");t=i=r.position}else vo(a)?(PA(r,t,i,!0),zS(r,zr(r,!1,e)),t=i=r.position):r.position===r.lineStart&&mI(r)?ft(r,"unexpected end of the document within a double quoted scalar"):(r.position++,i=r.position)}ft(r,"unexpected end of the stream within a double quoted scalar")}function gpe(r,e){var t=!0,i,n=r.tag,s,o=r.anchor,a,l,c,u,g,f={},h,p,C,y;if(y=r.input.charCodeAt(r.position),y===91)l=93,g=!1,s=[];else if(y===123)l=125,g=!0,s={};else return!1;for(r.anchor!==null&&(r.anchorMap[r.anchor]=s),y=r.input.charCodeAt(++r.position);y!==0;){if(zr(r,!0,e),y=r.input.charCodeAt(r.position),y===l)return r.position++,r.tag=n,r.anchor=o,r.kind=g?"mapping":"sequence",r.result=s,!0;t||ft(r,"missed comma between flow collection entries"),p=h=C=null,c=u=!1,y===63&&(a=r.input.charCodeAt(r.position+1),un(a)&&(c=u=!0,r.position++,zr(r,!0,e))),i=r.line,Tg(r,e,pI,!1,!0),p=r.tag,h=r.result,zr(r,!0,e),y=r.input.charCodeAt(r.position),(u||r.line===i)&&y===58&&(c=!0,y=r.input.charCodeAt(++r.position),zr(r,!0,e),Tg(r,e,pI,!1,!0),C=r.result),g?Lg(r,s,f,p,h,C):c?s.push(Lg(r,null,f,p,h,C)):s.push(h),zr(r,!0,e),y=r.input.charCodeAt(r.position),y===44?(t=!0,y=r.input.charCodeAt(++r.position)):t=!1}ft(r,"unexpected end of the stream within a flow collection")}function fpe(r,e){var t,i,n=JS,s=!1,o=!1,a=e,l=0,c=!1,u,g;if(g=r.input.charCodeAt(r.position),g===124)i=!1;else if(g===62)i=!0;else return!1;for(r.kind="scalar",r.result="";g!==0;)if(g=r.input.charCodeAt(++r.position),g===43||g===45)JS===n?n=g===43?u2:epe:ft(r,"repeat of a chomping mode identifier");else if((u=ope(g))>=0)u===0?ft(r,"bad explicit indentation width of a block scalar; it cannot be less than one"):o?ft(r,"repeat of an indentation width identifier"):(a=e+u-1,o=!0);else break;if(tc(g)){do g=r.input.charCodeAt(++r.position);while(tc(g));if(g===35)do g=r.input.charCodeAt(++r.position);while(!vo(g)&&g!==0)}for(;g!==0;){for(WS(r),r.lineIndent=0,g=r.input.charCodeAt(r.position);(!o||r.lineIndenta&&(a=r.lineIndent),vo(g)){l++;continue}if(r.lineIndente)&&l!==0)ft(r,"bad indentation of a sequence entry");else if(r.lineIndente)&&(Tg(r,e,dI,!0,n)&&(p?f=r.result:h=r.result),p||(Lg(r,c,u,g,f,h,s,o),g=f=h=null),zr(r,!0,-1),y=r.input.charCodeAt(r.position)),r.lineIndent>e&&y!==0)ft(r,"bad indentation of a mapping entry");else if(r.lineIndente?l=1:r.lineIndent===e?l=0:r.lineIndente?l=1:r.lineIndent===e?l=0:r.lineIndent tag; it should be "scalar", not "'+r.kind+'"'),g=0,f=r.implicitTypes.length;g tag; it should be "'+h.kind+'", not "'+r.kind+'"'),h.resolve(r.result)?(r.result=h.construct(r.result),r.anchor!==null&&(r.anchorMap[r.anchor]=r.result)):ft(r,"cannot resolve a node with !<"+r.tag+"> explicit tag")):ft(r,"unknown tag !<"+r.tag+">");return r.listener!==null&&r.listener("close",r),r.tag!==null||r.anchor!==null||u}function mpe(r){var e=r.position,t,i,n,s=!1,o;for(r.version=null,r.checkLineBreaks=r.legacy,r.tagMap={},r.anchorMap={};(o=r.input.charCodeAt(r.position))!==0&&(zr(r,!0,-1),o=r.input.charCodeAt(r.position),!(r.lineIndent>0||o!==37));){for(s=!0,o=r.input.charCodeAt(++r.position),t=r.position;o!==0&&!un(o);)o=r.input.charCodeAt(++r.position);for(i=r.input.slice(t,r.position),n=[],i.length<1&&ft(r,"directive name must not be less than one character in length");o!==0;){for(;tc(o);)o=r.input.charCodeAt(++r.position);if(o===35){do o=r.input.charCodeAt(++r.position);while(o!==0&&!vo(o));break}if(vo(o))break;for(t=r.position;o!==0&&!un(o);)o=r.input.charCodeAt(++r.position);n.push(r.input.slice(t,r.position))}o!==0&&WS(r),DA.call(h2,i)?h2[i](r,i,n):CI(r,'unknown document directive "'+i+'"')}if(zr(r,!0,-1),r.lineIndent===0&&r.input.charCodeAt(r.position)===45&&r.input.charCodeAt(r.position+1)===45&&r.input.charCodeAt(r.position+2)===45?(r.position+=3,zr(r,!0,-1)):s&&ft(r,"directives end mark is expected"),Tg(r,r.lineIndent-1,dI,!1,!0),zr(r,!0,-1),r.checkLineBreaks&&rpe.test(r.input.slice(e,r.position))&&CI(r,"non-ASCII line breaks are interpreted as content"),r.documents.push(r.result),r.position===r.lineStart&&mI(r)){r.input.charCodeAt(r.position)===46&&(r.position+=3,zr(r,!0,-1));return}if(r.position"u"&&(t=e,e=null);var i=S2(r,t);if(typeof e!="function")return i;for(var n=0,s=i.length;n"u"&&(t=e,e=null),v2(r,e,ya.extend({schema:m2},t))}function Ipe(r,e){return x2(r,ya.extend({schema:m2},e))}Vp.exports.loadAll=v2;Vp.exports.load=x2;Vp.exports.safeLoadAll=Epe;Vp.exports.safeLoad=Ipe});var _2=w((qZe,_S)=>{"use strict";var Zp=Zl(),_p=kg(),ype=zp(),wpe=Fg(),O2=Object.prototype.toString,M2=Object.prototype.hasOwnProperty,Bpe=9,Xp=10,Qpe=13,bpe=32,Spe=33,vpe=34,K2=35,xpe=37,Ppe=38,Dpe=39,kpe=42,U2=44,Rpe=45,H2=58,Fpe=61,Npe=62,Lpe=63,Tpe=64,G2=91,Y2=93,Ope=96,j2=123,Mpe=124,q2=125,Ni={};Ni[0]="\\0";Ni[7]="\\a";Ni[8]="\\b";Ni[9]="\\t";Ni[10]="\\n";Ni[11]="\\v";Ni[12]="\\f";Ni[13]="\\r";Ni[27]="\\e";Ni[34]='\\"';Ni[92]="\\\\";Ni[133]="\\N";Ni[160]="\\_";Ni[8232]="\\L";Ni[8233]="\\P";var Kpe=["y","Y","yes","Yes","YES","on","On","ON","n","N","no","No","NO","off","Off","OFF"];function Upe(r,e){var t,i,n,s,o,a,l;if(e===null)return{};for(t={},i=Object.keys(e),n=0,s=i.length;n0?r.charCodeAt(s-1):null,f=f&&R2(o,a)}else{for(s=0;si&&r[g+1]!==" ",g=s);else if(!Og(o))return EI;a=s>0?r.charCodeAt(s-1):null,f=f&&R2(o,a)}c=c||u&&s-g-1>i&&r[g+1]!==" "}return!l&&!c?f&&!n(r)?W2:z2:t>9&&J2(r)?EI:c?X2:V2}function Jpe(r,e,t,i){r.dump=function(){if(e.length===0)return"''";if(!r.noCompatMode&&Kpe.indexOf(e)!==-1)return"'"+e+"'";var n=r.indent*Math.max(1,t),s=r.lineWidth===-1?-1:Math.max(Math.min(r.lineWidth,40),r.lineWidth-n),o=i||r.flowLevel>-1&&t>=r.flowLevel;function a(l){return Gpe(r,l)}switch(qpe(e,o,r.indent,s,a)){case W2:return e;case z2:return"'"+e.replace(/'/g,"''")+"'";case V2:return"|"+F2(e,r.indent)+N2(k2(e,n));case X2:return">"+F2(e,r.indent)+N2(k2(Wpe(e,s),n));case EI:return'"'+zpe(e,s)+'"';default:throw new _p("impossible error: invalid scalar style")}}()}function F2(r,e){var t=J2(r)?String(e):"",i=r[r.length-1]===` +`,n=i&&(r[r.length-2]===` +`||r===` +`),s=n?"+":i?"":"-";return t+s+` +`}function N2(r){return r[r.length-1]===` +`?r.slice(0,-1):r}function Wpe(r,e){for(var t=/(\n+)([^\n]*)/g,i=function(){var c=r.indexOf(` +`);return c=c!==-1?c:r.length,t.lastIndex=c,L2(r.slice(0,c),e)}(),n=r[0]===` +`||r[0]===" ",s,o;o=t.exec(r);){var a=o[1],l=o[2];s=l[0]===" ",i+=a+(!n&&!s&&l!==""?` +`:"")+L2(l,e),n=s}return i}function L2(r,e){if(r===""||r[0]===" ")return r;for(var t=/ [^ ]/g,i,n=0,s,o=0,a=0,l="";i=t.exec(r);)a=i.index,a-n>e&&(s=o>n?o:a,l+=` +`+r.slice(n,s),n=s+1),o=a;return l+=` +`,r.length-n>e&&o>n?l+=r.slice(n,o)+` +`+r.slice(o+1):l+=r.slice(n),l.slice(1)}function zpe(r){for(var e="",t,i,n,s=0;s=55296&&t<=56319&&(i=r.charCodeAt(s+1),i>=56320&&i<=57343)){e+=D2((t-55296)*1024+i-56320+65536),s++;continue}n=Ni[t],e+=!n&&Og(t)?r[s]:n||D2(t)}return e}function Vpe(r,e,t){var i="",n=r.tag,s,o;for(s=0,o=t.length;s1024&&(u+="? "),u+=r.dump+(r.condenseFlow?'"':"")+":"+(r.condenseFlow?"":" "),rc(r,e,c,!1,!1)&&(u+=r.dump,i+=u));r.tag=n,r.dump="{"+i+"}"}function _pe(r,e,t,i){var n="",s=r.tag,o=Object.keys(t),a,l,c,u,g,f;if(r.sortKeys===!0)o.sort();else if(typeof r.sortKeys=="function")o.sort(r.sortKeys);else if(r.sortKeys)throw new _p("sortKeys must be a boolean or a function");for(a=0,l=o.length;a1024,g&&(r.dump&&Xp===r.dump.charCodeAt(0)?f+="?":f+="? "),f+=r.dump,g&&(f+=VS(r,e)),rc(r,e+1,u,!0,g)&&(r.dump&&Xp===r.dump.charCodeAt(0)?f+=":":f+=": ",f+=r.dump,n+=f));r.tag=s,r.dump=n||"{}"}function T2(r,e,t){var i,n,s,o,a,l;for(n=t?r.explicitTypes:r.implicitTypes,s=0,o=n.length;s tag resolver accepts not "'+l+'" style');r.dump=i}return!0}return!1}function rc(r,e,t,i,n,s){r.tag=null,r.dump=t,T2(r,t,!1)||T2(r,t,!0);var o=O2.call(r.dump);i&&(i=r.flowLevel<0||r.flowLevel>e);var a=o==="[object Object]"||o==="[object Array]",l,c;if(a&&(l=r.duplicates.indexOf(t),c=l!==-1),(r.tag!==null&&r.tag!=="?"||c||r.indent!==2&&e>0)&&(n=!1),c&&r.usedDuplicates[l])r.dump="*ref_"+l;else{if(a&&c&&!r.usedDuplicates[l]&&(r.usedDuplicates[l]=!0),o==="[object Object]")i&&Object.keys(r.dump).length!==0?(_pe(r,e,r.dump,n),c&&(r.dump="&ref_"+l+r.dump)):(Zpe(r,e,r.dump),c&&(r.dump="&ref_"+l+" "+r.dump));else if(o==="[object Array]"){var u=r.noArrayIndent&&e>0?e-1:e;i&&r.dump.length!==0?(Xpe(r,u,r.dump,n),c&&(r.dump="&ref_"+l+r.dump)):(Vpe(r,u,r.dump),c&&(r.dump="&ref_"+l+" "+r.dump))}else if(o==="[object String]")r.tag!=="?"&&Jpe(r,r.dump,e,s);else{if(r.skipInvalid)return!1;throw new _p("unacceptable kind of an object to dump "+o)}r.tag!==null&&r.tag!=="?"&&(r.dump="!<"+r.tag+"> "+r.dump)}return!0}function $pe(r,e){var t=[],i=[],n,s;for(XS(r,t,i),n=0,s=i.length;n{"use strict";var II=P2(),$2=_2();function yI(r){return function(){throw new Error("Function "+r+" is deprecated and cannot be used.")}}Fr.exports.Type=si();Fr.exports.Schema=_l();Fr.exports.FAILSAFE_SCHEMA=fI();Fr.exports.JSON_SCHEMA=YS();Fr.exports.CORE_SCHEMA=jS();Fr.exports.DEFAULT_SAFE_SCHEMA=Fg();Fr.exports.DEFAULT_FULL_SCHEMA=zp();Fr.exports.load=II.load;Fr.exports.loadAll=II.loadAll;Fr.exports.safeLoad=II.safeLoad;Fr.exports.safeLoadAll=II.safeLoadAll;Fr.exports.dump=$2.dump;Fr.exports.safeDump=$2.safeDump;Fr.exports.YAMLException=kg();Fr.exports.MINIMAL_SCHEMA=fI();Fr.exports.SAFE_SCHEMA=Fg();Fr.exports.DEFAULT_SCHEMA=zp();Fr.exports.scan=yI("scan");Fr.exports.parse=yI("parse");Fr.exports.compose=yI("compose");Fr.exports.addConstructor=yI("addConstructor")});var rH=w((WZe,tH)=>{"use strict";var tde=eH();tH.exports=tde});var nH=w((zZe,iH)=>{"use strict";function rde(r,e){function t(){this.constructor=r}t.prototype=e.prototype,r.prototype=new t}function ic(r,e,t,i){this.message=r,this.expected=e,this.found=t,this.location=i,this.name="SyntaxError",typeof Error.captureStackTrace=="function"&&Error.captureStackTrace(this,ic)}rde(ic,Error);ic.buildMessage=function(r,e){var t={literal:function(c){return'"'+n(c.text)+'"'},class:function(c){var u="",g;for(g=0;g0){for(g=1,f=1;g({[Ke]:Ce})))},H=function(R){return R},j=function(R){return R},$=Ms("correct indentation"),V=" ",W=ar(" ",!1),_=function(R){return R.length===BA*mg},A=function(R){return R.length===(BA+1)*mg},ae=function(){return BA++,!0},ge=function(){return BA--,!0},re=function(){return gg()},O=Ms("pseudostring"),F=/^[^\r\n\t ?:,\][{}#&*!|>'"%@`\-]/,ue=Fn(["\r",` +`," "," ","?",":",",","]","[","{","}","#","&","*","!","|",">","'",'"',"%","@","`","-"],!0,!1),he=/^[^\r\n\t ,\][{}:#"']/,ke=Fn(["\r",` +`," "," ",",","]","[","{","}",":","#",'"',"'"],!0,!1),Fe=function(){return gg().replace(/^ *| *$/g,"")},Ne="--",oe=ar("--",!1),le=/^[a-zA-Z\/0-9]/,we=Fn([["a","z"],["A","Z"],"/",["0","9"]],!1,!1),fe=/^[^\r\n\t :,]/,Ae=Fn(["\r",` +`," "," ",":",","],!0,!1),qe="null",ne=ar("null",!1),Y=function(){return null},pe="true",ie=ar("true",!1),de=function(){return!0},_e="false",Pt=ar("false",!1),It=function(){return!1},Or=Ms("string"),ii='"',gi=ar('"',!1),hr=function(){return""},fi=function(R){return R},ni=function(R){return R.join("")},Os=/^[^"\\\0-\x1F\x7F]/,pr=Fn(['"',"\\",["\0",""],"\x7F"],!0,!1),Ii='\\"',es=ar('\\"',!1),ua=function(){return'"'},pA="\\\\",ag=ar("\\\\",!1),ts=function(){return"\\"},dA="\\/",ga=ar("\\/",!1),yp=function(){return"/"},CA="\\b",mA=ar("\\b",!1),wr=function(){return"\b"},kl="\\f",Ag=ar("\\f",!1),Io=function(){return"\f"},lg="\\n",wp=ar("\\n",!1),Bp=function(){return` +`},vr="\\r",se=ar("\\r",!1),yo=function(){return"\r"},kn="\\t",cg=ar("\\t",!1),Qt=function(){return" "},Rl="\\u",Rn=ar("\\u",!1),rs=function(R,q,Ce,Ke){return String.fromCharCode(parseInt(`0x${R}${q}${Ce}${Ke}`))},is=/^[0-9a-fA-F]/,gt=Fn([["0","9"],["a","f"],["A","F"]],!1,!1),wo=Ms("blank space"),At=/^[ \t]/,an=Fn([" "," "],!1,!1),S=Ms("white space"),Tt=/^[ \t\n\r]/,ug=Fn([" "," ",` +`,"\r"],!1,!1),Fl=`\r +`,Qp=ar(`\r +`,!1),bp=` +`,Sp=ar(` +`,!1),vp="\r",xp=ar("\r",!1),G=0,yt=0,EA=[{line:1,column:1}],Ji=0,Nl=[],Xe=0,fa;if("startRule"in e){if(!(e.startRule in i))throw new Error(`Can't start parsing from rule "`+e.startRule+'".');n=i[e.startRule]}function gg(){return r.substring(yt,G)}function FE(){return An(yt,G)}function Pp(R,q){throw q=q!==void 0?q:An(yt,G),Tl([Ms(R)],r.substring(yt,G),q)}function NE(R,q){throw q=q!==void 0?q:An(yt,G),fg(R,q)}function ar(R,q){return{type:"literal",text:R,ignoreCase:q}}function Fn(R,q,Ce){return{type:"class",parts:R,inverted:q,ignoreCase:Ce}}function Ll(){return{type:"any"}}function Dp(){return{type:"end"}}function Ms(R){return{type:"other",description:R}}function ha(R){var q=EA[R],Ce;if(q)return q;for(Ce=R-1;!EA[Ce];)Ce--;for(q=EA[Ce],q={line:q.line,column:q.column};CeJi&&(Ji=G,Nl=[]),Nl.push(R))}function fg(R,q){return new ic(R,null,null,q)}function Tl(R,q,Ce){return new ic(ic.buildMessage(R,q),R,q,Ce)}function Ks(){var R;return R=hg(),R}function Ol(){var R,q,Ce;for(R=G,q=[],Ce=IA();Ce!==t;)q.push(Ce),Ce=IA();return q!==t&&(yt=R,q=s(q)),R=q,R}function IA(){var R,q,Ce,Ke,Re;return R=G,q=da(),q!==t?(r.charCodeAt(G)===45?(Ce=o,G++):(Ce=t,Xe===0&&Te(a)),Ce!==t?(Ke=Rr(),Ke!==t?(Re=pa(),Re!==t?(yt=R,q=l(Re),R=q):(G=R,R=t)):(G=R,R=t)):(G=R,R=t)):(G=R,R=t),R}function hg(){var R,q,Ce;for(R=G,q=[],Ce=pg();Ce!==t;)q.push(Ce),Ce=pg();return q!==t&&(yt=R,q=c(q)),R=q,R}function pg(){var R,q,Ce,Ke,Re,ze,dt,Ft,Nn;if(R=G,q=Rr(),q===t&&(q=null),q!==t){if(Ce=G,r.charCodeAt(G)===35?(Ke=u,G++):(Ke=t,Xe===0&&Te(g)),Ke!==t){if(Re=[],ze=G,dt=G,Xe++,Ft=Gs(),Xe--,Ft===t?dt=void 0:(G=dt,dt=t),dt!==t?(r.length>G?(Ft=r.charAt(G),G++):(Ft=t,Xe===0&&Te(f)),Ft!==t?(dt=[dt,Ft],ze=dt):(G=ze,ze=t)):(G=ze,ze=t),ze!==t)for(;ze!==t;)Re.push(ze),ze=G,dt=G,Xe++,Ft=Gs(),Xe--,Ft===t?dt=void 0:(G=dt,dt=t),dt!==t?(r.length>G?(Ft=r.charAt(G),G++):(Ft=t,Xe===0&&Te(f)),Ft!==t?(dt=[dt,Ft],ze=dt):(G=ze,ze=t)):(G=ze,ze=t);else Re=t;Re!==t?(Ke=[Ke,Re],Ce=Ke):(G=Ce,Ce=t)}else G=Ce,Ce=t;if(Ce===t&&(Ce=null),Ce!==t){if(Ke=[],Re=Hs(),Re!==t)for(;Re!==t;)Ke.push(Re),Re=Hs();else Ke=t;Ke!==t?(yt=R,q=h(),R=q):(G=R,R=t)}else G=R,R=t}else G=R,R=t;if(R===t&&(R=G,q=da(),q!==t?(Ce=Ml(),Ce!==t?(Ke=Rr(),Ke===t&&(Ke=null),Ke!==t?(r.charCodeAt(G)===58?(Re=p,G++):(Re=t,Xe===0&&Te(C)),Re!==t?(ze=Rr(),ze===t&&(ze=null),ze!==t?(dt=pa(),dt!==t?(yt=R,q=y(Ce,dt),R=q):(G=R,R=t)):(G=R,R=t)):(G=R,R=t)):(G=R,R=t)):(G=R,R=t)):(G=R,R=t),R===t&&(R=G,q=da(),q!==t?(Ce=Us(),Ce!==t?(Ke=Rr(),Ke===t&&(Ke=null),Ke!==t?(r.charCodeAt(G)===58?(Re=p,G++):(Re=t,Xe===0&&Te(C)),Re!==t?(ze=Rr(),ze===t&&(ze=null),ze!==t?(dt=pa(),dt!==t?(yt=R,q=y(Ce,dt),R=q):(G=R,R=t)):(G=R,R=t)):(G=R,R=t)):(G=R,R=t)):(G=R,R=t)):(G=R,R=t),R===t))){if(R=G,q=da(),q!==t)if(Ce=Us(),Ce!==t)if(Ke=Rr(),Ke!==t)if(Re=LE(),Re!==t){if(ze=[],dt=Hs(),dt!==t)for(;dt!==t;)ze.push(dt),dt=Hs();else ze=t;ze!==t?(yt=R,q=y(Ce,Re),R=q):(G=R,R=t)}else G=R,R=t;else G=R,R=t;else G=R,R=t;else G=R,R=t;if(R===t)if(R=G,q=da(),q!==t)if(Ce=Us(),Ce!==t){if(Ke=[],Re=G,ze=Rr(),ze===t&&(ze=null),ze!==t?(r.charCodeAt(G)===44?(dt=B,G++):(dt=t,Xe===0&&Te(v)),dt!==t?(Ft=Rr(),Ft===t&&(Ft=null),Ft!==t?(Nn=Us(),Nn!==t?(yt=Re,ze=D(Ce,Nn),Re=ze):(G=Re,Re=t)):(G=Re,Re=t)):(G=Re,Re=t)):(G=Re,Re=t),Re!==t)for(;Re!==t;)Ke.push(Re),Re=G,ze=Rr(),ze===t&&(ze=null),ze!==t?(r.charCodeAt(G)===44?(dt=B,G++):(dt=t,Xe===0&&Te(v)),dt!==t?(Ft=Rr(),Ft===t&&(Ft=null),Ft!==t?(Nn=Us(),Nn!==t?(yt=Re,ze=D(Ce,Nn),Re=ze):(G=Re,Re=t)):(G=Re,Re=t)):(G=Re,Re=t)):(G=Re,Re=t);else Ke=t;Ke!==t?(Re=Rr(),Re===t&&(Re=null),Re!==t?(r.charCodeAt(G)===58?(ze=p,G++):(ze=t,Xe===0&&Te(C)),ze!==t?(dt=Rr(),dt===t&&(dt=null),dt!==t?(Ft=pa(),Ft!==t?(yt=R,q=L(Ce,Ke,Ft),R=q):(G=R,R=t)):(G=R,R=t)):(G=R,R=t)):(G=R,R=t)):(G=R,R=t)}else G=R,R=t;else G=R,R=t}return R}function pa(){var R,q,Ce,Ke,Re,ze,dt;if(R=G,q=G,Xe++,Ce=G,Ke=Gs(),Ke!==t?(Re=rt(),Re!==t?(r.charCodeAt(G)===45?(ze=o,G++):(ze=t,Xe===0&&Te(a)),ze!==t?(dt=Rr(),dt!==t?(Ke=[Ke,Re,ze,dt],Ce=Ke):(G=Ce,Ce=t)):(G=Ce,Ce=t)):(G=Ce,Ce=t)):(G=Ce,Ce=t),Xe--,Ce!==t?(G=q,q=void 0):q=t,q!==t?(Ce=Hs(),Ce!==t?(Ke=Bo(),Ke!==t?(Re=Ol(),Re!==t?(ze=yA(),ze!==t?(yt=R,q=H(Re),R=q):(G=R,R=t)):(G=R,R=t)):(G=R,R=t)):(G=R,R=t)):(G=R,R=t),R===t&&(R=G,q=Gs(),q!==t?(Ce=Bo(),Ce!==t?(Ke=hg(),Ke!==t?(Re=yA(),Re!==t?(yt=R,q=H(Ke),R=q):(G=R,R=t)):(G=R,R=t)):(G=R,R=t)):(G=R,R=t),R===t))if(R=G,q=Kl(),q!==t){if(Ce=[],Ke=Hs(),Ke!==t)for(;Ke!==t;)Ce.push(Ke),Ke=Hs();else Ce=t;Ce!==t?(yt=R,q=j(q),R=q):(G=R,R=t)}else G=R,R=t;return R}function da(){var R,q,Ce;for(Xe++,R=G,q=[],r.charCodeAt(G)===32?(Ce=V,G++):(Ce=t,Xe===0&&Te(W));Ce!==t;)q.push(Ce),r.charCodeAt(G)===32?(Ce=V,G++):(Ce=t,Xe===0&&Te(W));return q!==t?(yt=G,Ce=_(q),Ce?Ce=void 0:Ce=t,Ce!==t?(q=[q,Ce],R=q):(G=R,R=t)):(G=R,R=t),Xe--,R===t&&(q=t,Xe===0&&Te($)),R}function rt(){var R,q,Ce;for(R=G,q=[],r.charCodeAt(G)===32?(Ce=V,G++):(Ce=t,Xe===0&&Te(W));Ce!==t;)q.push(Ce),r.charCodeAt(G)===32?(Ce=V,G++):(Ce=t,Xe===0&&Te(W));return q!==t?(yt=G,Ce=A(q),Ce?Ce=void 0:Ce=t,Ce!==t?(q=[q,Ce],R=q):(G=R,R=t)):(G=R,R=t),R}function Bo(){var R;return yt=G,R=ae(),R?R=void 0:R=t,R}function yA(){var R;return yt=G,R=ge(),R?R=void 0:R=t,R}function Ml(){var R;return R=Ul(),R===t&&(R=kp()),R}function Us(){var R,q,Ce;if(R=Ul(),R===t){if(R=G,q=[],Ce=dg(),Ce!==t)for(;Ce!==t;)q.push(Ce),Ce=dg();else q=t;q!==t&&(yt=R,q=re()),R=q}return R}function Kl(){var R;return R=Rp(),R===t&&(R=TE(),R===t&&(R=Ul(),R===t&&(R=kp()))),R}function LE(){var R;return R=Rp(),R===t&&(R=Ul(),R===t&&(R=dg())),R}function kp(){var R,q,Ce,Ke,Re,ze;if(Xe++,R=G,F.test(r.charAt(G))?(q=r.charAt(G),G++):(q=t,Xe===0&&Te(ue)),q!==t){for(Ce=[],Ke=G,Re=Rr(),Re===t&&(Re=null),Re!==t?(he.test(r.charAt(G))?(ze=r.charAt(G),G++):(ze=t,Xe===0&&Te(ke)),ze!==t?(Re=[Re,ze],Ke=Re):(G=Ke,Ke=t)):(G=Ke,Ke=t);Ke!==t;)Ce.push(Ke),Ke=G,Re=Rr(),Re===t&&(Re=null),Re!==t?(he.test(r.charAt(G))?(ze=r.charAt(G),G++):(ze=t,Xe===0&&Te(ke)),ze!==t?(Re=[Re,ze],Ke=Re):(G=Ke,Ke=t)):(G=Ke,Ke=t);Ce!==t?(yt=R,q=Fe(),R=q):(G=R,R=t)}else G=R,R=t;return Xe--,R===t&&(q=t,Xe===0&&Te(O)),R}function dg(){var R,q,Ce,Ke,Re;if(R=G,r.substr(G,2)===Ne?(q=Ne,G+=2):(q=t,Xe===0&&Te(oe)),q===t&&(q=null),q!==t)if(le.test(r.charAt(G))?(Ce=r.charAt(G),G++):(Ce=t,Xe===0&&Te(we)),Ce!==t){for(Ke=[],fe.test(r.charAt(G))?(Re=r.charAt(G),G++):(Re=t,Xe===0&&Te(Ae));Re!==t;)Ke.push(Re),fe.test(r.charAt(G))?(Re=r.charAt(G),G++):(Re=t,Xe===0&&Te(Ae));Ke!==t?(yt=R,q=Fe(),R=q):(G=R,R=t)}else G=R,R=t;else G=R,R=t;return R}function Rp(){var R,q;return R=G,r.substr(G,4)===qe?(q=qe,G+=4):(q=t,Xe===0&&Te(ne)),q!==t&&(yt=R,q=Y()),R=q,R}function TE(){var R,q;return R=G,r.substr(G,4)===pe?(q=pe,G+=4):(q=t,Xe===0&&Te(ie)),q!==t&&(yt=R,q=de()),R=q,R===t&&(R=G,r.substr(G,5)===_e?(q=_e,G+=5):(q=t,Xe===0&&Te(Pt)),q!==t&&(yt=R,q=It()),R=q),R}function Ul(){var R,q,Ce,Ke;return Xe++,R=G,r.charCodeAt(G)===34?(q=ii,G++):(q=t,Xe===0&&Te(gi)),q!==t?(r.charCodeAt(G)===34?(Ce=ii,G++):(Ce=t,Xe===0&&Te(gi)),Ce!==t?(yt=R,q=hr(),R=q):(G=R,R=t)):(G=R,R=t),R===t&&(R=G,r.charCodeAt(G)===34?(q=ii,G++):(q=t,Xe===0&&Te(gi)),q!==t?(Ce=OE(),Ce!==t?(r.charCodeAt(G)===34?(Ke=ii,G++):(Ke=t,Xe===0&&Te(gi)),Ke!==t?(yt=R,q=fi(Ce),R=q):(G=R,R=t)):(G=R,R=t)):(G=R,R=t)),Xe--,R===t&&(q=t,Xe===0&&Te(Or)),R}function OE(){var R,q,Ce;if(R=G,q=[],Ce=Cg(),Ce!==t)for(;Ce!==t;)q.push(Ce),Ce=Cg();else q=t;return q!==t&&(yt=R,q=ni(q)),R=q,R}function Cg(){var R,q,Ce,Ke,Re,ze;return Os.test(r.charAt(G))?(R=r.charAt(G),G++):(R=t,Xe===0&&Te(pr)),R===t&&(R=G,r.substr(G,2)===Ii?(q=Ii,G+=2):(q=t,Xe===0&&Te(es)),q!==t&&(yt=R,q=ua()),R=q,R===t&&(R=G,r.substr(G,2)===pA?(q=pA,G+=2):(q=t,Xe===0&&Te(ag)),q!==t&&(yt=R,q=ts()),R=q,R===t&&(R=G,r.substr(G,2)===dA?(q=dA,G+=2):(q=t,Xe===0&&Te(ga)),q!==t&&(yt=R,q=yp()),R=q,R===t&&(R=G,r.substr(G,2)===CA?(q=CA,G+=2):(q=t,Xe===0&&Te(mA)),q!==t&&(yt=R,q=wr()),R=q,R===t&&(R=G,r.substr(G,2)===kl?(q=kl,G+=2):(q=t,Xe===0&&Te(Ag)),q!==t&&(yt=R,q=Io()),R=q,R===t&&(R=G,r.substr(G,2)===lg?(q=lg,G+=2):(q=t,Xe===0&&Te(wp)),q!==t&&(yt=R,q=Bp()),R=q,R===t&&(R=G,r.substr(G,2)===vr?(q=vr,G+=2):(q=t,Xe===0&&Te(se)),q!==t&&(yt=R,q=yo()),R=q,R===t&&(R=G,r.substr(G,2)===kn?(q=kn,G+=2):(q=t,Xe===0&&Te(cg)),q!==t&&(yt=R,q=Qt()),R=q,R===t&&(R=G,r.substr(G,2)===Rl?(q=Rl,G+=2):(q=t,Xe===0&&Te(Rn)),q!==t?(Ce=wA(),Ce!==t?(Ke=wA(),Ke!==t?(Re=wA(),Re!==t?(ze=wA(),ze!==t?(yt=R,q=rs(Ce,Ke,Re,ze),R=q):(G=R,R=t)):(G=R,R=t)):(G=R,R=t)):(G=R,R=t)):(G=R,R=t)))))))))),R}function wA(){var R;return is.test(r.charAt(G))?(R=r.charAt(G),G++):(R=t,Xe===0&&Te(gt)),R}function Rr(){var R,q;if(Xe++,R=[],At.test(r.charAt(G))?(q=r.charAt(G),G++):(q=t,Xe===0&&Te(an)),q!==t)for(;q!==t;)R.push(q),At.test(r.charAt(G))?(q=r.charAt(G),G++):(q=t,Xe===0&&Te(an));else R=t;return Xe--,R===t&&(q=t,Xe===0&&Te(wo)),R}function ME(){var R,q;if(Xe++,R=[],Tt.test(r.charAt(G))?(q=r.charAt(G),G++):(q=t,Xe===0&&Te(ug)),q!==t)for(;q!==t;)R.push(q),Tt.test(r.charAt(G))?(q=r.charAt(G),G++):(q=t,Xe===0&&Te(ug));else R=t;return Xe--,R===t&&(q=t,Xe===0&&Te(S)),R}function Hs(){var R,q,Ce,Ke,Re,ze;if(R=G,q=Gs(),q!==t){for(Ce=[],Ke=G,Re=Rr(),Re===t&&(Re=null),Re!==t?(ze=Gs(),ze!==t?(Re=[Re,ze],Ke=Re):(G=Ke,Ke=t)):(G=Ke,Ke=t);Ke!==t;)Ce.push(Ke),Ke=G,Re=Rr(),Re===t&&(Re=null),Re!==t?(ze=Gs(),ze!==t?(Re=[Re,ze],Ke=Re):(G=Ke,Ke=t)):(G=Ke,Ke=t);Ce!==t?(q=[q,Ce],R=q):(G=R,R=t)}else G=R,R=t;return R}function Gs(){var R;return r.substr(G,2)===Fl?(R=Fl,G+=2):(R=t,Xe===0&&Te(Qp)),R===t&&(r.charCodeAt(G)===10?(R=bp,G++):(R=t,Xe===0&&Te(Sp)),R===t&&(r.charCodeAt(G)===13?(R=vp,G++):(R=t,Xe===0&&Te(xp)))),R}let mg=2,BA=0;if(fa=n(),fa!==t&&G===r.length)return fa;throw fa!==t&&G{"use strict";var Ade=r=>{let e=!1,t=!1,i=!1;for(let n=0;n{if(!(typeof r=="string"||Array.isArray(r)))throw new TypeError("Expected the input to be `string | string[]`");e=Object.assign({pascalCase:!1},e);let t=n=>e.pascalCase?n.charAt(0).toUpperCase()+n.slice(1):n;return Array.isArray(r)?r=r.map(n=>n.trim()).filter(n=>n.length).join("-"):r=r.trim(),r.length===0?"":r.length===1?e.pascalCase?r.toUpperCase():r.toLowerCase():(r!==r.toLowerCase()&&(r=Ade(r)),r=r.replace(/^[_.\- ]+/,"").toLowerCase().replace(/[_.\- ]+(\w|$)/g,(n,s)=>s.toUpperCase()).replace(/\d+(\w|$)/g,n=>n.toUpperCase()),t(r))};ev.exports=lH;ev.exports.default=lH});var uH=w((e_e,lde)=>{lde.exports=[{name:"AppVeyor",constant:"APPVEYOR",env:"APPVEYOR",pr:"APPVEYOR_PULL_REQUEST_NUMBER"},{name:"Azure Pipelines",constant:"AZURE_PIPELINES",env:"SYSTEM_TEAMFOUNDATIONCOLLECTIONURI",pr:"SYSTEM_PULLREQUEST_PULLREQUESTID"},{name:"Appcircle",constant:"APPCIRCLE",env:"AC_APPCIRCLE"},{name:"Bamboo",constant:"BAMBOO",env:"bamboo_planKey"},{name:"Bitbucket Pipelines",constant:"BITBUCKET",env:"BITBUCKET_COMMIT",pr:"BITBUCKET_PR_ID"},{name:"Bitrise",constant:"BITRISE",env:"BITRISE_IO",pr:"BITRISE_PULL_REQUEST"},{name:"Buddy",constant:"BUDDY",env:"BUDDY_WORKSPACE_ID",pr:"BUDDY_EXECUTION_PULL_REQUEST_ID"},{name:"Buildkite",constant:"BUILDKITE",env:"BUILDKITE",pr:{env:"BUILDKITE_PULL_REQUEST",ne:"false"}},{name:"CircleCI",constant:"CIRCLE",env:"CIRCLECI",pr:"CIRCLE_PULL_REQUEST"},{name:"Cirrus CI",constant:"CIRRUS",env:"CIRRUS_CI",pr:"CIRRUS_PR"},{name:"AWS CodeBuild",constant:"CODEBUILD",env:"CODEBUILD_BUILD_ARN"},{name:"Codefresh",constant:"CODEFRESH",env:"CF_BUILD_ID",pr:{any:["CF_PULL_REQUEST_NUMBER","CF_PULL_REQUEST_ID"]}},{name:"Codeship",constant:"CODESHIP",env:{CI_NAME:"codeship"}},{name:"Drone",constant:"DRONE",env:"DRONE",pr:{DRONE_BUILD_EVENT:"pull_request"}},{name:"dsari",constant:"DSARI",env:"DSARI"},{name:"GitHub Actions",constant:"GITHUB_ACTIONS",env:"GITHUB_ACTIONS",pr:{GITHUB_EVENT_NAME:"pull_request"}},{name:"GitLab CI",constant:"GITLAB",env:"GITLAB_CI",pr:"CI_MERGE_REQUEST_ID"},{name:"GoCD",constant:"GOCD",env:"GO_PIPELINE_LABEL"},{name:"LayerCI",constant:"LAYERCI",env:"LAYERCI",pr:"LAYERCI_PULL_REQUEST"},{name:"Hudson",constant:"HUDSON",env:"HUDSON_URL"},{name:"Jenkins",constant:"JENKINS",env:["JENKINS_URL","BUILD_ID"],pr:{any:["ghprbPullId","CHANGE_ID"]}},{name:"Magnum CI",constant:"MAGNUM",env:"MAGNUM"},{name:"Netlify CI",constant:"NETLIFY",env:"NETLIFY",pr:{env:"PULL_REQUEST",ne:"false"}},{name:"Nevercode",constant:"NEVERCODE",env:"NEVERCODE",pr:{env:"NEVERCODE_PULL_REQUEST",ne:"false"}},{name:"Render",constant:"RENDER",env:"RENDER",pr:{IS_PULL_REQUEST:"true"}},{name:"Sail CI",constant:"SAIL",env:"SAILCI",pr:"SAIL_PULL_REQUEST_NUMBER"},{name:"Semaphore",constant:"SEMAPHORE",env:"SEMAPHORE",pr:"PULL_REQUEST_NUMBER"},{name:"Screwdriver",constant:"SCREWDRIVER",env:"SCREWDRIVER",pr:{env:"SD_PULL_REQUEST",ne:"false"}},{name:"Shippable",constant:"SHIPPABLE",env:"SHIPPABLE",pr:{IS_PULL_REQUEST:"true"}},{name:"Solano CI",constant:"SOLANO",env:"TDDIUM",pr:"TDDIUM_PR_ID"},{name:"Strider CD",constant:"STRIDER",env:"STRIDER"},{name:"TaskCluster",constant:"TASKCLUSTER",env:["TASK_ID","RUN_ID"]},{name:"TeamCity",constant:"TEAMCITY",env:"TEAMCITY_VERSION"},{name:"Travis CI",constant:"TRAVIS",env:"TRAVIS",pr:{env:"TRAVIS_PULL_REQUEST",ne:"false"}},{name:"Vercel",constant:"VERCEL",env:"NOW_BUILDER"},{name:"Visual Studio App Center",constant:"APPCENTER",env:"APPCENTER_BUILD_ID"}]});var nc=w(Mn=>{"use strict";var fH=uH(),xo=process.env;Object.defineProperty(Mn,"_vendors",{value:fH.map(function(r){return r.constant})});Mn.name=null;Mn.isPR=null;fH.forEach(function(r){let t=(Array.isArray(r.env)?r.env:[r.env]).every(function(i){return gH(i)});if(Mn[r.constant]=t,t)switch(Mn.name=r.name,typeof r.pr){case"string":Mn.isPR=!!xo[r.pr];break;case"object":"env"in r.pr?Mn.isPR=r.pr.env in xo&&xo[r.pr.env]!==r.pr.ne:"any"in r.pr?Mn.isPR=r.pr.any.some(function(i){return!!xo[i]}):Mn.isPR=gH(r.pr);break;default:Mn.isPR=null}});Mn.isCI=!!(xo.CI||xo.CONTINUOUS_INTEGRATION||xo.BUILD_NUMBER||xo.RUN_ID||Mn.name);function gH(r){return typeof r=="string"?!!xo[r]:Object.keys(r).every(function(e){return xo[e]===r[e]})}});var gn={};ut(gn,{KeyRelationship:()=>sc,applyCascade:()=>nd,base64RegExp:()=>mH,colorStringAlphaRegExp:()=>CH,colorStringRegExp:()=>dH,computeKey:()=>kA,getPrintable:()=>Vr,hasExactLength:()=>BH,hasForbiddenKeys:()=>Hde,hasKeyRelationship:()=>av,hasMaxLength:()=>Qde,hasMinLength:()=>Bde,hasMutuallyExclusiveKeys:()=>Gde,hasRequiredKeys:()=>Ude,hasUniqueItems:()=>bde,isArray:()=>pde,isAtLeast:()=>xde,isAtMost:()=>Pde,isBase64:()=>Mde,isBoolean:()=>gde,isDate:()=>hde,isDict:()=>Cde,isEnum:()=>Vi,isHexColor:()=>Ode,isISO8601:()=>Tde,isInExclusiveRange:()=>kde,isInInclusiveRange:()=>Dde,isInstanceOf:()=>Ede,isInteger:()=>Rde,isJSON:()=>Kde,isLiteral:()=>cde,isLowerCase:()=>Fde,isNegative:()=>Sde,isNullable:()=>wde,isNumber:()=>fde,isObject:()=>mde,isOneOf:()=>Ide,isOptional:()=>yde,isPositive:()=>vde,isString:()=>id,isTuple:()=>dde,isUUID4:()=>Lde,isUnknown:()=>wH,isUpperCase:()=>Nde,iso8601RegExp:()=>ov,makeCoercionFn:()=>oc,makeSetter:()=>yH,makeTrait:()=>IH,makeValidator:()=>bt,matchesRegExp:()=>sd,plural:()=>vI,pushError:()=>pt,simpleKeyRegExp:()=>pH,uuid4RegExp:()=>EH});function bt({test:r}){return IH(r)()}function Vr(r){return r===null?"null":r===void 0?"undefined":r===""?"an empty string":JSON.stringify(r)}function kA(r,e){var t,i,n;return typeof e=="number"?`${(t=r==null?void 0:r.p)!==null&&t!==void 0?t:"."}[${e}]`:pH.test(e)?`${(i=r==null?void 0:r.p)!==null&&i!==void 0?i:""}.${e}`:`${(n=r==null?void 0:r.p)!==null&&n!==void 0?n:"."}[${JSON.stringify(e)}]`}function oc(r,e){return t=>{let i=r[e];return r[e]=t,oc(r,e).bind(null,i)}}function yH(r,e){return t=>{r[e]=t}}function vI(r,e,t){return r===1?e:t}function pt({errors:r,p:e}={},t){return r==null||r.push(`${e!=null?e:"."}: ${t}`),!1}function cde(r){return bt({test:(e,t)=>e!==r?pt(t,`Expected a literal (got ${Vr(r)})`):!0})}function Vi(r){let e=Array.isArray(r)?r:Object.values(r),t=new Set(e);return bt({test:(i,n)=>t.has(i)?!0:pt(n,`Expected a valid enumeration value (got ${Vr(i)})`)})}var pH,dH,CH,mH,EH,ov,IH,wH,id,ude,gde,fde,hde,pde,dde,Cde,mde,Ede,Ide,nd,yde,wde,Bde,Qde,BH,bde,Sde,vde,xde,Pde,Dde,kde,Rde,sd,Fde,Nde,Lde,Tde,Ode,Mde,Kde,Ude,Hde,Gde,sc,Yde,av,as=Pge(()=>{pH=/^[a-zA-Z_][a-zA-Z0-9_]*$/,dH=/^#[0-9a-f]{6}$/i,CH=/^#[0-9a-f]{6}([0-9a-f]{2})?$/i,mH=/^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$/,EH=/^[a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[89aAbB][a-f0-9]{3}-[a-f0-9]{12}$/i,ov=/^(?:[1-9]\d{3}(-?)(?:(?:0[1-9]|1[0-2])\1(?:0[1-9]|1\d|2[0-8])|(?:0[13-9]|1[0-2])\1(?:29|30)|(?:0[13578]|1[02])(?:\1)31|00[1-9]|0[1-9]\d|[12]\d{2}|3(?:[0-5]\d|6[0-5]))|(?:[1-9]\d(?:0[48]|[2468][048]|[13579][26])|(?:[2468][048]|[13579][26])00)(?:(-?)02(?:\2)29|-?366))T(?:[01]\d|2[0-3])(:?)[0-5]\d(?:\3[0-5]\d)?(?:Z|[+-][01]\d(?:\3[0-5]\d)?)$/,IH=r=>()=>r;wH=()=>bt({test:(r,e)=>!0});id=()=>bt({test:(r,e)=>typeof r!="string"?pt(e,`Expected a string (got ${Vr(r)})`):!0});ude=new Map([["true",!0],["True",!0],["1",!0],[1,!0],["false",!1],["False",!1],["0",!1],[0,!1]]),gde=()=>bt({test:(r,e)=>{var t;if(typeof r!="boolean"){if(typeof(e==null?void 0:e.coercions)<"u"){if(typeof(e==null?void 0:e.coercion)>"u")return pt(e,"Unbound coercion result");let i=ude.get(r);if(typeof i<"u")return e.coercions.push([(t=e.p)!==null&&t!==void 0?t:".",e.coercion.bind(null,i)]),!0}return pt(e,`Expected a boolean (got ${Vr(r)})`)}return!0}}),fde=()=>bt({test:(r,e)=>{var t;if(typeof r!="number"){if(typeof(e==null?void 0:e.coercions)<"u"){if(typeof(e==null?void 0:e.coercion)>"u")return pt(e,"Unbound coercion result");let i;if(typeof r=="string"){let n;try{n=JSON.parse(r)}catch{}if(typeof n=="number")if(JSON.stringify(n)===r)i=n;else return pt(e,`Received a number that can't be safely represented by the runtime (${r})`)}if(typeof i<"u")return e.coercions.push([(t=e.p)!==null&&t!==void 0?t:".",e.coercion.bind(null,i)]),!0}return pt(e,`Expected a number (got ${Vr(r)})`)}return!0}}),hde=()=>bt({test:(r,e)=>{var t;if(!(r instanceof Date)){if(typeof(e==null?void 0:e.coercions)<"u"){if(typeof(e==null?void 0:e.coercion)>"u")return pt(e,"Unbound coercion result");let i;if(typeof r=="string"&&ov.test(r))i=new Date(r);else{let n;if(typeof r=="string"){let s;try{s=JSON.parse(r)}catch{}typeof s=="number"&&(n=s)}else typeof r=="number"&&(n=r);if(typeof n<"u")if(Number.isSafeInteger(n)||!Number.isSafeInteger(n*1e3))i=new Date(n*1e3);else return pt(e,`Received a timestamp that can't be safely represented by the runtime (${r})`)}if(typeof i<"u")return e.coercions.push([(t=e.p)!==null&&t!==void 0?t:".",e.coercion.bind(null,i)]),!0}return pt(e,`Expected a date (got ${Vr(r)})`)}return!0}}),pde=(r,{delimiter:e}={})=>bt({test:(t,i)=>{var n;if(typeof t=="string"&&typeof e<"u"&&typeof(i==null?void 0:i.coercions)<"u"){if(typeof(i==null?void 0:i.coercion)>"u")return pt(i,"Unbound coercion result");t=t.split(e),i.coercions.push([(n=i.p)!==null&&n!==void 0?n:".",i.coercion.bind(null,t)])}if(!Array.isArray(t))return pt(i,`Expected an array (got ${Vr(t)})`);let s=!0;for(let o=0,a=t.length;o{let t=BH(r.length);return bt({test:(i,n)=>{var s;if(typeof i=="string"&&typeof e<"u"&&typeof(n==null?void 0:n.coercions)<"u"){if(typeof(n==null?void 0:n.coercion)>"u")return pt(n,"Unbound coercion result");i=i.split(e),n.coercions.push([(s=n.p)!==null&&s!==void 0?s:".",n.coercion.bind(null,i)])}if(!Array.isArray(i))return pt(n,`Expected a tuple (got ${Vr(i)})`);let o=t(i,Object.assign({},n));for(let a=0,l=i.length;abt({test:(t,i)=>{if(typeof t!="object"||t===null)return pt(i,`Expected an object (got ${Vr(t)})`);let n=Object.keys(t),s=!0;for(let o=0,a=n.length;o{let t=Object.keys(r);return bt({test:(i,n)=>{if(typeof i!="object"||i===null)return pt(n,`Expected an object (got ${Vr(i)})`);let s=new Set([...t,...Object.keys(i)]),o={},a=!0;for(let l of s){if(l==="constructor"||l==="__proto__")a=pt(Object.assign(Object.assign({},n),{p:kA(n,l)}),"Unsafe property name");else{let c=Object.prototype.hasOwnProperty.call(r,l)?r[l]:void 0,u=Object.prototype.hasOwnProperty.call(i,l)?i[l]:void 0;typeof c<"u"?a=c(u,Object.assign(Object.assign({},n),{p:kA(n,l),coercion:oc(i,l)}))&&a:e===null?a=pt(Object.assign(Object.assign({},n),{p:kA(n,l)}),`Extraneous property (got ${Vr(u)})`):Object.defineProperty(o,l,{enumerable:!0,get:()=>u,set:yH(i,l)})}if(!a&&(n==null?void 0:n.errors)==null)break}return e!==null&&(a||(n==null?void 0:n.errors)!=null)&&(a=e(o,n)&&a),a}})},Ede=r=>bt({test:(e,t)=>e instanceof r?!0:pt(t,`Expected an instance of ${r.name} (got ${Vr(e)})`)}),Ide=(r,{exclusive:e=!1}={})=>bt({test:(t,i)=>{var n,s,o;let a=[],l=typeof(i==null?void 0:i.errors)<"u"?[]:void 0;for(let c=0,u=r.length;c1?pt(i,`Expected to match exactly a single predicate (matched ${a.join(", ")})`):(o=i==null?void 0:i.errors)===null||o===void 0||o.push(...l),!1}}),nd=(r,e)=>bt({test:(t,i)=>{var n,s;let o={value:t},a=typeof(i==null?void 0:i.coercions)<"u"?oc(o,"value"):void 0,l=typeof(i==null?void 0:i.coercions)<"u"?[]:void 0;if(!r(t,Object.assign(Object.assign({},i),{coercion:a,coercions:l})))return!1;let c=[];if(typeof l<"u")for(let[,u]of l)c.push(u());try{if(typeof(i==null?void 0:i.coercions)<"u"){if(o.value!==t){if(typeof(i==null?void 0:i.coercion)>"u")return pt(i,"Unbound coercion result");i.coercions.push([(n=i.p)!==null&&n!==void 0?n:".",i.coercion.bind(null,o.value)])}(s=i==null?void 0:i.coercions)===null||s===void 0||s.push(...l)}return e.every(u=>u(o.value,i))}finally{for(let u of c)u()}}}),yde=r=>bt({test:(e,t)=>typeof e>"u"?!0:r(e,t)}),wde=r=>bt({test:(e,t)=>e===null?!0:r(e,t)}),Bde=r=>bt({test:(e,t)=>e.length>=r?!0:pt(t,`Expected to have a length of at least ${r} elements (got ${e.length})`)}),Qde=r=>bt({test:(e,t)=>e.length<=r?!0:pt(t,`Expected to have a length of at most ${r} elements (got ${e.length})`)}),BH=r=>bt({test:(e,t)=>e.length!==r?pt(t,`Expected to have a length of exactly ${r} elements (got ${e.length})`):!0}),bde=({map:r}={})=>bt({test:(e,t)=>{let i=new Set,n=new Set;for(let s=0,o=e.length;sbt({test:(r,e)=>r<=0?!0:pt(e,`Expected to be negative (got ${r})`)}),vde=()=>bt({test:(r,e)=>r>=0?!0:pt(e,`Expected to be positive (got ${r})`)}),xde=r=>bt({test:(e,t)=>e>=r?!0:pt(t,`Expected to be at least ${r} (got ${e})`)}),Pde=r=>bt({test:(e,t)=>e<=r?!0:pt(t,`Expected to be at most ${r} (got ${e})`)}),Dde=(r,e)=>bt({test:(t,i)=>t>=r&&t<=e?!0:pt(i,`Expected to be in the [${r}; ${e}] range (got ${t})`)}),kde=(r,e)=>bt({test:(t,i)=>t>=r&&tbt({test:(e,t)=>e!==Math.round(e)?pt(t,`Expected to be an integer (got ${e})`):Number.isSafeInteger(e)?!0:pt(t,`Expected to be a safe integer (got ${e})`)}),sd=r=>bt({test:(e,t)=>r.test(e)?!0:pt(t,`Expected to match the pattern ${r.toString()} (got ${Vr(e)})`)}),Fde=()=>bt({test:(r,e)=>r!==r.toLowerCase()?pt(e,`Expected to be all-lowercase (got ${r})`):!0}),Nde=()=>bt({test:(r,e)=>r!==r.toUpperCase()?pt(e,`Expected to be all-uppercase (got ${r})`):!0}),Lde=()=>bt({test:(r,e)=>EH.test(r)?!0:pt(e,`Expected to be a valid UUID v4 (got ${Vr(r)})`)}),Tde=()=>bt({test:(r,e)=>ov.test(r)?!1:pt(e,`Expected to be a valid ISO 8601 date string (got ${Vr(r)})`)}),Ode=({alpha:r=!1})=>bt({test:(e,t)=>(r?dH.test(e):CH.test(e))?!0:pt(t,`Expected to be a valid hexadecimal color string (got ${Vr(e)})`)}),Mde=()=>bt({test:(r,e)=>mH.test(r)?!0:pt(e,`Expected to be a valid base 64 string (got ${Vr(r)})`)}),Kde=(r=wH())=>bt({test:(e,t)=>{let i;try{i=JSON.parse(e)}catch{return pt(t,`Expected to be a valid JSON string (got ${Vr(e)})`)}return r(i,t)}}),Ude=r=>{let e=new Set(r);return bt({test:(t,i)=>{let n=new Set(Object.keys(t)),s=[];for(let o of e)n.has(o)||s.push(o);return s.length>0?pt(i,`Missing required ${vI(s.length,"property","properties")} ${s.map(o=>`"${o}"`).join(", ")}`):!0}})},Hde=r=>{let e=new Set(r);return bt({test:(t,i)=>{let n=new Set(Object.keys(t)),s=[];for(let o of e)n.has(o)&&s.push(o);return s.length>0?pt(i,`Forbidden ${vI(s.length,"property","properties")} ${s.map(o=>`"${o}"`).join(", ")}`):!0}})},Gde=r=>{let e=new Set(r);return bt({test:(t,i)=>{let n=new Set(Object.keys(t)),s=[];for(let o of e)n.has(o)&&s.push(o);return s.length>1?pt(i,`Mutually exclusive properties ${s.map(o=>`"${o}"`).join(", ")}`):!0}})};(function(r){r.Forbids="Forbids",r.Requires="Requires"})(sc||(sc={}));Yde={[sc.Forbids]:{expect:!1,message:"forbids using"},[sc.Requires]:{expect:!0,message:"requires using"}},av=(r,e,t,{ignore:i=[]}={})=>{let n=new Set(i),s=new Set(t),o=Yde[e];return bt({test:(a,l)=>{let c=new Set(Object.keys(a));if(!c.has(r)||n.has(a[r]))return!0;let u=[];for(let g of s)(c.has(g)&&!n.has(a[g]))!==o.expect&&u.push(g);return u.length>=1?pt(l,`Property "${r}" ${o.message} ${vI(u.length,"property","properties")} ${u.map(g=>`"${g}"`).join(", ")}`):!0}})}});var UH=w((e$e,KH)=>{"use strict";KH.exports=(r,...e)=>new Promise(t=>{t(r(...e))})});var Yg=w((t$e,pv)=>{"use strict";var oCe=UH(),HH=r=>{if(r<1)throw new TypeError("Expected `concurrency` to be a number from 1 and up");let e=[],t=0,i=()=>{t--,e.length>0&&e.shift()()},n=(a,l,...c)=>{t++;let u=oCe(a,...c);l(u),u.then(i,i)},s=(a,l,...c)=>{tnew Promise(c=>s(a,c,...l));return Object.defineProperties(o,{activeCount:{get:()=>t},pendingCount:{get:()=>e.length}}),o};pv.exports=HH;pv.exports.default=HH});var cd=w((i$e,GH)=>{var aCe="2.0.0",ACe=Number.MAX_SAFE_INTEGER||9007199254740991,lCe=16;GH.exports={SEMVER_SPEC_VERSION:aCe,MAX_LENGTH:256,MAX_SAFE_INTEGER:ACe,MAX_SAFE_COMPONENT_LENGTH:lCe}});var ud=w((n$e,YH)=>{var cCe=typeof process=="object"&&process.env&&process.env.NODE_DEBUG&&/\bsemver\b/i.test(process.env.NODE_DEBUG)?(...r)=>console.error("SEMVER",...r):()=>{};YH.exports=cCe});var ac=w((FA,jH)=>{var{MAX_SAFE_COMPONENT_LENGTH:dv}=cd(),uCe=ud();FA=jH.exports={};var gCe=FA.re=[],et=FA.src=[],tt=FA.t={},fCe=0,St=(r,e,t)=>{let i=fCe++;uCe(i,e),tt[r]=i,et[i]=e,gCe[i]=new RegExp(e,t?"g":void 0)};St("NUMERICIDENTIFIER","0|[1-9]\\d*");St("NUMERICIDENTIFIERLOOSE","[0-9]+");St("NONNUMERICIDENTIFIER","\\d*[a-zA-Z-][a-zA-Z0-9-]*");St("MAINVERSION",`(${et[tt.NUMERICIDENTIFIER]})\\.(${et[tt.NUMERICIDENTIFIER]})\\.(${et[tt.NUMERICIDENTIFIER]})`);St("MAINVERSIONLOOSE",`(${et[tt.NUMERICIDENTIFIERLOOSE]})\\.(${et[tt.NUMERICIDENTIFIERLOOSE]})\\.(${et[tt.NUMERICIDENTIFIERLOOSE]})`);St("PRERELEASEIDENTIFIER",`(?:${et[tt.NUMERICIDENTIFIER]}|${et[tt.NONNUMERICIDENTIFIER]})`);St("PRERELEASEIDENTIFIERLOOSE",`(?:${et[tt.NUMERICIDENTIFIERLOOSE]}|${et[tt.NONNUMERICIDENTIFIER]})`);St("PRERELEASE",`(?:-(${et[tt.PRERELEASEIDENTIFIER]}(?:\\.${et[tt.PRERELEASEIDENTIFIER]})*))`);St("PRERELEASELOOSE",`(?:-?(${et[tt.PRERELEASEIDENTIFIERLOOSE]}(?:\\.${et[tt.PRERELEASEIDENTIFIERLOOSE]})*))`);St("BUILDIDENTIFIER","[0-9A-Za-z-]+");St("BUILD",`(?:\\+(${et[tt.BUILDIDENTIFIER]}(?:\\.${et[tt.BUILDIDENTIFIER]})*))`);St("FULLPLAIN",`v?${et[tt.MAINVERSION]}${et[tt.PRERELEASE]}?${et[tt.BUILD]}?`);St("FULL",`^${et[tt.FULLPLAIN]}$`);St("LOOSEPLAIN",`[v=\\s]*${et[tt.MAINVERSIONLOOSE]}${et[tt.PRERELEASELOOSE]}?${et[tt.BUILD]}?`);St("LOOSE",`^${et[tt.LOOSEPLAIN]}$`);St("GTLT","((?:<|>)?=?)");St("XRANGEIDENTIFIERLOOSE",`${et[tt.NUMERICIDENTIFIERLOOSE]}|x|X|\\*`);St("XRANGEIDENTIFIER",`${et[tt.NUMERICIDENTIFIER]}|x|X|\\*`);St("XRANGEPLAIN",`[v=\\s]*(${et[tt.XRANGEIDENTIFIER]})(?:\\.(${et[tt.XRANGEIDENTIFIER]})(?:\\.(${et[tt.XRANGEIDENTIFIER]})(?:${et[tt.PRERELEASE]})?${et[tt.BUILD]}?)?)?`);St("XRANGEPLAINLOOSE",`[v=\\s]*(${et[tt.XRANGEIDENTIFIERLOOSE]})(?:\\.(${et[tt.XRANGEIDENTIFIERLOOSE]})(?:\\.(${et[tt.XRANGEIDENTIFIERLOOSE]})(?:${et[tt.PRERELEASELOOSE]})?${et[tt.BUILD]}?)?)?`);St("XRANGE",`^${et[tt.GTLT]}\\s*${et[tt.XRANGEPLAIN]}$`);St("XRANGELOOSE",`^${et[tt.GTLT]}\\s*${et[tt.XRANGEPLAINLOOSE]}$`);St("COERCE",`(^|[^\\d])(\\d{1,${dv}})(?:\\.(\\d{1,${dv}}))?(?:\\.(\\d{1,${dv}}))?(?:$|[^\\d])`);St("COERCERTL",et[tt.COERCE],!0);St("LONETILDE","(?:~>?)");St("TILDETRIM",`(\\s*)${et[tt.LONETILDE]}\\s+`,!0);FA.tildeTrimReplace="$1~";St("TILDE",`^${et[tt.LONETILDE]}${et[tt.XRANGEPLAIN]}$`);St("TILDELOOSE",`^${et[tt.LONETILDE]}${et[tt.XRANGEPLAINLOOSE]}$`);St("LONECARET","(?:\\^)");St("CARETTRIM",`(\\s*)${et[tt.LONECARET]}\\s+`,!0);FA.caretTrimReplace="$1^";St("CARET",`^${et[tt.LONECARET]}${et[tt.XRANGEPLAIN]}$`);St("CARETLOOSE",`^${et[tt.LONECARET]}${et[tt.XRANGEPLAINLOOSE]}$`);St("COMPARATORLOOSE",`^${et[tt.GTLT]}\\s*(${et[tt.LOOSEPLAIN]})$|^$`);St("COMPARATOR",`^${et[tt.GTLT]}\\s*(${et[tt.FULLPLAIN]})$|^$`);St("COMPARATORTRIM",`(\\s*)${et[tt.GTLT]}\\s*(${et[tt.LOOSEPLAIN]}|${et[tt.XRANGEPLAIN]})`,!0);FA.comparatorTrimReplace="$1$2$3";St("HYPHENRANGE",`^\\s*(${et[tt.XRANGEPLAIN]})\\s+-\\s+(${et[tt.XRANGEPLAIN]})\\s*$`);St("HYPHENRANGELOOSE",`^\\s*(${et[tt.XRANGEPLAINLOOSE]})\\s+-\\s+(${et[tt.XRANGEPLAINLOOSE]})\\s*$`);St("STAR","(<|>)?=?\\s*\\*");St("GTE0","^\\s*>=\\s*0.0.0\\s*$");St("GTE0PRE","^\\s*>=\\s*0.0.0-0\\s*$")});var gd=w((s$e,qH)=>{var hCe=["includePrerelease","loose","rtl"],pCe=r=>r?typeof r!="object"?{loose:!0}:hCe.filter(e=>r[e]).reduce((e,t)=>(e[t]=!0,e),{}):{};qH.exports=pCe});var FI=w((o$e,zH)=>{var JH=/^[0-9]+$/,WH=(r,e)=>{let t=JH.test(r),i=JH.test(e);return t&&i&&(r=+r,e=+e),r===e?0:t&&!i?-1:i&&!t?1:rWH(e,r);zH.exports={compareIdentifiers:WH,rcompareIdentifiers:dCe}});var Ti=w((a$e,_H)=>{var NI=ud(),{MAX_LENGTH:VH,MAX_SAFE_INTEGER:LI}=cd(),{re:XH,t:ZH}=ac(),CCe=gd(),{compareIdentifiers:fd}=FI(),Hn=class{constructor(e,t){if(t=CCe(t),e instanceof Hn){if(e.loose===!!t.loose&&e.includePrerelease===!!t.includePrerelease)return e;e=e.version}else if(typeof e!="string")throw new TypeError(`Invalid Version: ${e}`);if(e.length>VH)throw new TypeError(`version is longer than ${VH} characters`);NI("SemVer",e,t),this.options=t,this.loose=!!t.loose,this.includePrerelease=!!t.includePrerelease;let i=e.trim().match(t.loose?XH[ZH.LOOSE]:XH[ZH.FULL]);if(!i)throw new TypeError(`Invalid Version: ${e}`);if(this.raw=e,this.major=+i[1],this.minor=+i[2],this.patch=+i[3],this.major>LI||this.major<0)throw new TypeError("Invalid major version");if(this.minor>LI||this.minor<0)throw new TypeError("Invalid minor version");if(this.patch>LI||this.patch<0)throw new TypeError("Invalid patch version");i[4]?this.prerelease=i[4].split(".").map(n=>{if(/^[0-9]+$/.test(n)){let s=+n;if(s>=0&&s=0;)typeof this.prerelease[i]=="number"&&(this.prerelease[i]++,i=-2);i===-1&&this.prerelease.push(0)}t&&(this.prerelease[0]===t?isNaN(this.prerelease[1])&&(this.prerelease=[t,0]):this.prerelease=[t,0]);break;default:throw new Error(`invalid increment argument: ${e}`)}return this.format(),this.raw=this.version,this}};_H.exports=Hn});var Ac=w((A$e,rG)=>{var{MAX_LENGTH:mCe}=cd(),{re:$H,t:eG}=ac(),tG=Ti(),ECe=gd(),ICe=(r,e)=>{if(e=ECe(e),r instanceof tG)return r;if(typeof r!="string"||r.length>mCe||!(e.loose?$H[eG.LOOSE]:$H[eG.FULL]).test(r))return null;try{return new tG(r,e)}catch{return null}};rG.exports=ICe});var nG=w((l$e,iG)=>{var yCe=Ac(),wCe=(r,e)=>{let t=yCe(r,e);return t?t.version:null};iG.exports=wCe});var oG=w((c$e,sG)=>{var BCe=Ac(),QCe=(r,e)=>{let t=BCe(r.trim().replace(/^[=v]+/,""),e);return t?t.version:null};sG.exports=QCe});var AG=w((u$e,aG)=>{var bCe=Ti(),SCe=(r,e,t,i)=>{typeof t=="string"&&(i=t,t=void 0);try{return new bCe(r,t).inc(e,i).version}catch{return null}};aG.exports=SCe});var As=w((g$e,cG)=>{var lG=Ti(),vCe=(r,e,t)=>new lG(r,t).compare(new lG(e,t));cG.exports=vCe});var TI=w((f$e,uG)=>{var xCe=As(),PCe=(r,e,t)=>xCe(r,e,t)===0;uG.exports=PCe});var hG=w((h$e,fG)=>{var gG=Ac(),DCe=TI(),kCe=(r,e)=>{if(DCe(r,e))return null;{let t=gG(r),i=gG(e),n=t.prerelease.length||i.prerelease.length,s=n?"pre":"",o=n?"prerelease":"";for(let a in t)if((a==="major"||a==="minor"||a==="patch")&&t[a]!==i[a])return s+a;return o}};fG.exports=kCe});var dG=w((p$e,pG)=>{var RCe=Ti(),FCe=(r,e)=>new RCe(r,e).major;pG.exports=FCe});var mG=w((d$e,CG)=>{var NCe=Ti(),LCe=(r,e)=>new NCe(r,e).minor;CG.exports=LCe});var IG=w((C$e,EG)=>{var TCe=Ti(),OCe=(r,e)=>new TCe(r,e).patch;EG.exports=OCe});var wG=w((m$e,yG)=>{var MCe=Ac(),KCe=(r,e)=>{let t=MCe(r,e);return t&&t.prerelease.length?t.prerelease:null};yG.exports=KCe});var QG=w((E$e,BG)=>{var UCe=As(),HCe=(r,e,t)=>UCe(e,r,t);BG.exports=HCe});var SG=w((I$e,bG)=>{var GCe=As(),YCe=(r,e)=>GCe(r,e,!0);bG.exports=YCe});var OI=w((y$e,xG)=>{var vG=Ti(),jCe=(r,e,t)=>{let i=new vG(r,t),n=new vG(e,t);return i.compare(n)||i.compareBuild(n)};xG.exports=jCe});var DG=w((w$e,PG)=>{var qCe=OI(),JCe=(r,e)=>r.sort((t,i)=>qCe(t,i,e));PG.exports=JCe});var RG=w((B$e,kG)=>{var WCe=OI(),zCe=(r,e)=>r.sort((t,i)=>WCe(i,t,e));kG.exports=zCe});var hd=w((Q$e,FG)=>{var VCe=As(),XCe=(r,e,t)=>VCe(r,e,t)>0;FG.exports=XCe});var MI=w((b$e,NG)=>{var ZCe=As(),_Ce=(r,e,t)=>ZCe(r,e,t)<0;NG.exports=_Ce});var Cv=w((S$e,LG)=>{var $Ce=As(),eme=(r,e,t)=>$Ce(r,e,t)!==0;LG.exports=eme});var KI=w((v$e,TG)=>{var tme=As(),rme=(r,e,t)=>tme(r,e,t)>=0;TG.exports=rme});var UI=w((x$e,OG)=>{var ime=As(),nme=(r,e,t)=>ime(r,e,t)<=0;OG.exports=nme});var mv=w((P$e,MG)=>{var sme=TI(),ome=Cv(),ame=hd(),Ame=KI(),lme=MI(),cme=UI(),ume=(r,e,t,i)=>{switch(e){case"===":return typeof r=="object"&&(r=r.version),typeof t=="object"&&(t=t.version),r===t;case"!==":return typeof r=="object"&&(r=r.version),typeof t=="object"&&(t=t.version),r!==t;case"":case"=":case"==":return sme(r,t,i);case"!=":return ome(r,t,i);case">":return ame(r,t,i);case">=":return Ame(r,t,i);case"<":return lme(r,t,i);case"<=":return cme(r,t,i);default:throw new TypeError(`Invalid operator: ${e}`)}};MG.exports=ume});var UG=w((D$e,KG)=>{var gme=Ti(),fme=Ac(),{re:HI,t:GI}=ac(),hme=(r,e)=>{if(r instanceof gme)return r;if(typeof r=="number"&&(r=String(r)),typeof r!="string")return null;e=e||{};let t=null;if(!e.rtl)t=r.match(HI[GI.COERCE]);else{let i;for(;(i=HI[GI.COERCERTL].exec(r))&&(!t||t.index+t[0].length!==r.length);)(!t||i.index+i[0].length!==t.index+t[0].length)&&(t=i),HI[GI.COERCERTL].lastIndex=i.index+i[1].length+i[2].length;HI[GI.COERCERTL].lastIndex=-1}return t===null?null:fme(`${t[2]}.${t[3]||"0"}.${t[4]||"0"}`,e)};KG.exports=hme});var GG=w((k$e,HG)=>{"use strict";HG.exports=function(r){r.prototype[Symbol.iterator]=function*(){for(let e=this.head;e;e=e.next)yield e.value}}});var YI=w((R$e,YG)=>{"use strict";YG.exports=Ht;Ht.Node=lc;Ht.create=Ht;function Ht(r){var e=this;if(e instanceof Ht||(e=new Ht),e.tail=null,e.head=null,e.length=0,r&&typeof r.forEach=="function")r.forEach(function(n){e.push(n)});else if(arguments.length>0)for(var t=0,i=arguments.length;t1)t=e;else if(this.head)i=this.head.next,t=this.head.value;else throw new TypeError("Reduce of empty list with no initial value");for(var n=0;i!==null;n++)t=r(t,i.value,n),i=i.next;return t};Ht.prototype.reduceReverse=function(r,e){var t,i=this.tail;if(arguments.length>1)t=e;else if(this.tail)i=this.tail.prev,t=this.tail.value;else throw new TypeError("Reduce of empty list with no initial value");for(var n=this.length-1;i!==null;n--)t=r(t,i.value,n),i=i.prev;return t};Ht.prototype.toArray=function(){for(var r=new Array(this.length),e=0,t=this.head;t!==null;e++)r[e]=t.value,t=t.next;return r};Ht.prototype.toArrayReverse=function(){for(var r=new Array(this.length),e=0,t=this.tail;t!==null;e++)r[e]=t.value,t=t.prev;return r};Ht.prototype.slice=function(r,e){e=e||this.length,e<0&&(e+=this.length),r=r||0,r<0&&(r+=this.length);var t=new Ht;if(ethis.length&&(e=this.length);for(var i=0,n=this.head;n!==null&&ithis.length&&(e=this.length);for(var i=this.length,n=this.tail;n!==null&&i>e;i--)n=n.prev;for(;n!==null&&i>r;i--,n=n.prev)t.push(n.value);return t};Ht.prototype.splice=function(r,e,...t){r>this.length&&(r=this.length-1),r<0&&(r=this.length+r);for(var i=0,n=this.head;n!==null&&i{"use strict";var mme=YI(),cc=Symbol("max"),ba=Symbol("length"),jg=Symbol("lengthCalculator"),dd=Symbol("allowStale"),uc=Symbol("maxAge"),Qa=Symbol("dispose"),jG=Symbol("noDisposeOnSet"),di=Symbol("lruList"),Vs=Symbol("cache"),JG=Symbol("updateAgeOnGet"),Ev=()=>1,yv=class{constructor(e){if(typeof e=="number"&&(e={max:e}),e||(e={}),e.max&&(typeof e.max!="number"||e.max<0))throw new TypeError("max must be a non-negative number");let t=this[cc]=e.max||1/0,i=e.length||Ev;if(this[jg]=typeof i!="function"?Ev:i,this[dd]=e.stale||!1,e.maxAge&&typeof e.maxAge!="number")throw new TypeError("maxAge must be a number");this[uc]=e.maxAge||0,this[Qa]=e.dispose,this[jG]=e.noDisposeOnSet||!1,this[JG]=e.updateAgeOnGet||!1,this.reset()}set max(e){if(typeof e!="number"||e<0)throw new TypeError("max must be a non-negative number");this[cc]=e||1/0,pd(this)}get max(){return this[cc]}set allowStale(e){this[dd]=!!e}get allowStale(){return this[dd]}set maxAge(e){if(typeof e!="number")throw new TypeError("maxAge must be a non-negative number");this[uc]=e,pd(this)}get maxAge(){return this[uc]}set lengthCalculator(e){typeof e!="function"&&(e=Ev),e!==this[jg]&&(this[jg]=e,this[ba]=0,this[di].forEach(t=>{t.length=this[jg](t.value,t.key),this[ba]+=t.length})),pd(this)}get lengthCalculator(){return this[jg]}get length(){return this[ba]}get itemCount(){return this[di].length}rforEach(e,t){t=t||this;for(let i=this[di].tail;i!==null;){let n=i.prev;qG(this,e,i,t),i=n}}forEach(e,t){t=t||this;for(let i=this[di].head;i!==null;){let n=i.next;qG(this,e,i,t),i=n}}keys(){return this[di].toArray().map(e=>e.key)}values(){return this[di].toArray().map(e=>e.value)}reset(){this[Qa]&&this[di]&&this[di].length&&this[di].forEach(e=>this[Qa](e.key,e.value)),this[Vs]=new Map,this[di]=new mme,this[ba]=0}dump(){return this[di].map(e=>jI(this,e)?!1:{k:e.key,v:e.value,e:e.now+(e.maxAge||0)}).toArray().filter(e=>e)}dumpLru(){return this[di]}set(e,t,i){if(i=i||this[uc],i&&typeof i!="number")throw new TypeError("maxAge must be a number");let n=i?Date.now():0,s=this[jg](t,e);if(this[Vs].has(e)){if(s>this[cc])return qg(this,this[Vs].get(e)),!1;let l=this[Vs].get(e).value;return this[Qa]&&(this[jG]||this[Qa](e,l.value)),l.now=n,l.maxAge=i,l.value=t,this[ba]+=s-l.length,l.length=s,this.get(e),pd(this),!0}let o=new wv(e,t,s,n,i);return o.length>this[cc]?(this[Qa]&&this[Qa](e,t),!1):(this[ba]+=o.length,this[di].unshift(o),this[Vs].set(e,this[di].head),pd(this),!0)}has(e){if(!this[Vs].has(e))return!1;let t=this[Vs].get(e).value;return!jI(this,t)}get(e){return Iv(this,e,!0)}peek(e){return Iv(this,e,!1)}pop(){let e=this[di].tail;return e?(qg(this,e),e.value):null}del(e){qg(this,this[Vs].get(e))}load(e){this.reset();let t=Date.now();for(let i=e.length-1;i>=0;i--){let n=e[i],s=n.e||0;if(s===0)this.set(n.k,n.v);else{let o=s-t;o>0&&this.set(n.k,n.v,o)}}}prune(){this[Vs].forEach((e,t)=>Iv(this,t,!1))}},Iv=(r,e,t)=>{let i=r[Vs].get(e);if(i){let n=i.value;if(jI(r,n)){if(qg(r,i),!r[dd])return}else t&&(r[JG]&&(i.value.now=Date.now()),r[di].unshiftNode(i));return n.value}},jI=(r,e)=>{if(!e||!e.maxAge&&!r[uc])return!1;let t=Date.now()-e.now;return e.maxAge?t>e.maxAge:r[uc]&&t>r[uc]},pd=r=>{if(r[ba]>r[cc])for(let e=r[di].tail;r[ba]>r[cc]&&e!==null;){let t=e.prev;qg(r,e),e=t}},qg=(r,e)=>{if(e){let t=e.value;r[Qa]&&r[Qa](t.key,t.value),r[ba]-=t.length,r[Vs].delete(t.key),r[di].removeNode(e)}},wv=class{constructor(e,t,i,n,s){this.key=e,this.value=t,this.length=i,this.now=n,this.maxAge=s||0}},qG=(r,e,t,i)=>{let n=t.value;jI(r,n)&&(qg(r,t),r[dd]||(n=void 0)),n&&e.call(i,n.value,n.key,r)};WG.exports=yv});var ls=w((N$e,_G)=>{var gc=class{constructor(e,t){if(t=Ime(t),e instanceof gc)return e.loose===!!t.loose&&e.includePrerelease===!!t.includePrerelease?e:new gc(e.raw,t);if(e instanceof Bv)return this.raw=e.value,this.set=[[e]],this.format(),this;if(this.options=t,this.loose=!!t.loose,this.includePrerelease=!!t.includePrerelease,this.raw=e,this.set=e.split(/\s*\|\|\s*/).map(i=>this.parseRange(i.trim())).filter(i=>i.length),!this.set.length)throw new TypeError(`Invalid SemVer Range: ${e}`);if(this.set.length>1){let i=this.set[0];if(this.set=this.set.filter(n=>!XG(n[0])),this.set.length===0)this.set=[i];else if(this.set.length>1){for(let n of this.set)if(n.length===1&&bme(n[0])){this.set=[n];break}}}this.format()}format(){return this.range=this.set.map(e=>e.join(" ").trim()).join("||").trim(),this.range}toString(){return this.range}parseRange(e){e=e.trim();let i=`parseRange:${Object.keys(this.options).join(",")}:${e}`,n=VG.get(i);if(n)return n;let s=this.options.loose,o=s?Oi[Qi.HYPHENRANGELOOSE]:Oi[Qi.HYPHENRANGE];e=e.replace(o,Lme(this.options.includePrerelease)),Gr("hyphen replace",e),e=e.replace(Oi[Qi.COMPARATORTRIM],wme),Gr("comparator trim",e,Oi[Qi.COMPARATORTRIM]),e=e.replace(Oi[Qi.TILDETRIM],Bme),e=e.replace(Oi[Qi.CARETTRIM],Qme),e=e.split(/\s+/).join(" ");let a=s?Oi[Qi.COMPARATORLOOSE]:Oi[Qi.COMPARATOR],l=e.split(" ").map(f=>Sme(f,this.options)).join(" ").split(/\s+/).map(f=>Nme(f,this.options)).filter(this.options.loose?f=>!!f.match(a):()=>!0).map(f=>new Bv(f,this.options)),c=l.length,u=new Map;for(let f of l){if(XG(f))return[f];u.set(f.value,f)}u.size>1&&u.has("")&&u.delete("");let g=[...u.values()];return VG.set(i,g),g}intersects(e,t){if(!(e instanceof gc))throw new TypeError("a Range is required");return this.set.some(i=>ZG(i,t)&&e.set.some(n=>ZG(n,t)&&i.every(s=>n.every(o=>s.intersects(o,t)))))}test(e){if(!e)return!1;if(typeof e=="string")try{e=new yme(e,this.options)}catch{return!1}for(let t=0;tr.value==="<0.0.0-0",bme=r=>r.value==="",ZG=(r,e)=>{let t=!0,i=r.slice(),n=i.pop();for(;t&&i.length;)t=i.every(s=>n.intersects(s,e)),n=i.pop();return t},Sme=(r,e)=>(Gr("comp",r,e),r=Pme(r,e),Gr("caret",r),r=vme(r,e),Gr("tildes",r),r=kme(r,e),Gr("xrange",r),r=Fme(r,e),Gr("stars",r),r),Zi=r=>!r||r.toLowerCase()==="x"||r==="*",vme=(r,e)=>r.trim().split(/\s+/).map(t=>xme(t,e)).join(" "),xme=(r,e)=>{let t=e.loose?Oi[Qi.TILDELOOSE]:Oi[Qi.TILDE];return r.replace(t,(i,n,s,o,a)=>{Gr("tilde",r,i,n,s,o,a);let l;return Zi(n)?l="":Zi(s)?l=`>=${n}.0.0 <${+n+1}.0.0-0`:Zi(o)?l=`>=${n}.${s}.0 <${n}.${+s+1}.0-0`:a?(Gr("replaceTilde pr",a),l=`>=${n}.${s}.${o}-${a} <${n}.${+s+1}.0-0`):l=`>=${n}.${s}.${o} <${n}.${+s+1}.0-0`,Gr("tilde return",l),l})},Pme=(r,e)=>r.trim().split(/\s+/).map(t=>Dme(t,e)).join(" "),Dme=(r,e)=>{Gr("caret",r,e);let t=e.loose?Oi[Qi.CARETLOOSE]:Oi[Qi.CARET],i=e.includePrerelease?"-0":"";return r.replace(t,(n,s,o,a,l)=>{Gr("caret",r,n,s,o,a,l);let c;return Zi(s)?c="":Zi(o)?c=`>=${s}.0.0${i} <${+s+1}.0.0-0`:Zi(a)?s==="0"?c=`>=${s}.${o}.0${i} <${s}.${+o+1}.0-0`:c=`>=${s}.${o}.0${i} <${+s+1}.0.0-0`:l?(Gr("replaceCaret pr",l),s==="0"?o==="0"?c=`>=${s}.${o}.${a}-${l} <${s}.${o}.${+a+1}-0`:c=`>=${s}.${o}.${a}-${l} <${s}.${+o+1}.0-0`:c=`>=${s}.${o}.${a}-${l} <${+s+1}.0.0-0`):(Gr("no pr"),s==="0"?o==="0"?c=`>=${s}.${o}.${a}${i} <${s}.${o}.${+a+1}-0`:c=`>=${s}.${o}.${a}${i} <${s}.${+o+1}.0-0`:c=`>=${s}.${o}.${a} <${+s+1}.0.0-0`),Gr("caret return",c),c})},kme=(r,e)=>(Gr("replaceXRanges",r,e),r.split(/\s+/).map(t=>Rme(t,e)).join(" ")),Rme=(r,e)=>{r=r.trim();let t=e.loose?Oi[Qi.XRANGELOOSE]:Oi[Qi.XRANGE];return r.replace(t,(i,n,s,o,a,l)=>{Gr("xRange",r,i,n,s,o,a,l);let c=Zi(s),u=c||Zi(o),g=u||Zi(a),f=g;return n==="="&&f&&(n=""),l=e.includePrerelease?"-0":"",c?n===">"||n==="<"?i="<0.0.0-0":i="*":n&&f?(u&&(o=0),a=0,n===">"?(n=">=",u?(s=+s+1,o=0,a=0):(o=+o+1,a=0)):n==="<="&&(n="<",u?s=+s+1:o=+o+1),n==="<"&&(l="-0"),i=`${n+s}.${o}.${a}${l}`):u?i=`>=${s}.0.0${l} <${+s+1}.0.0-0`:g&&(i=`>=${s}.${o}.0${l} <${s}.${+o+1}.0-0`),Gr("xRange return",i),i})},Fme=(r,e)=>(Gr("replaceStars",r,e),r.trim().replace(Oi[Qi.STAR],"")),Nme=(r,e)=>(Gr("replaceGTE0",r,e),r.trim().replace(Oi[e.includePrerelease?Qi.GTE0PRE:Qi.GTE0],"")),Lme=r=>(e,t,i,n,s,o,a,l,c,u,g,f,h)=>(Zi(i)?t="":Zi(n)?t=`>=${i}.0.0${r?"-0":""}`:Zi(s)?t=`>=${i}.${n}.0${r?"-0":""}`:o?t=`>=${t}`:t=`>=${t}${r?"-0":""}`,Zi(c)?l="":Zi(u)?l=`<${+c+1}.0.0-0`:Zi(g)?l=`<${c}.${+u+1}.0-0`:f?l=`<=${c}.${u}.${g}-${f}`:r?l=`<${c}.${u}.${+g+1}-0`:l=`<=${l}`,`${t} ${l}`.trim()),Tme=(r,e,t)=>{for(let i=0;i0){let n=r[i].semver;if(n.major===e.major&&n.minor===e.minor&&n.patch===e.patch)return!0}return!1}return!0}});var Cd=w((L$e,iY)=>{var md=Symbol("SemVer ANY"),Jg=class{static get ANY(){return md}constructor(e,t){if(t=Ome(t),e instanceof Jg){if(e.loose===!!t.loose)return e;e=e.value}bv("comparator",e,t),this.options=t,this.loose=!!t.loose,this.parse(e),this.semver===md?this.value="":this.value=this.operator+this.semver.version,bv("comp",this)}parse(e){let t=this.options.loose?$G[eY.COMPARATORLOOSE]:$G[eY.COMPARATOR],i=e.match(t);if(!i)throw new TypeError(`Invalid comparator: ${e}`);this.operator=i[1]!==void 0?i[1]:"",this.operator==="="&&(this.operator=""),i[2]?this.semver=new tY(i[2],this.options.loose):this.semver=md}toString(){return this.value}test(e){if(bv("Comparator.test",e,this.options.loose),this.semver===md||e===md)return!0;if(typeof e=="string")try{e=new tY(e,this.options)}catch{return!1}return Qv(e,this.operator,this.semver,this.options)}intersects(e,t){if(!(e instanceof Jg))throw new TypeError("a Comparator is required");if((!t||typeof t!="object")&&(t={loose:!!t,includePrerelease:!1}),this.operator==="")return this.value===""?!0:new rY(e.value,t).test(this.value);if(e.operator==="")return e.value===""?!0:new rY(this.value,t).test(e.semver);let i=(this.operator===">="||this.operator===">")&&(e.operator===">="||e.operator===">"),n=(this.operator==="<="||this.operator==="<")&&(e.operator==="<="||e.operator==="<"),s=this.semver.version===e.semver.version,o=(this.operator===">="||this.operator==="<=")&&(e.operator===">="||e.operator==="<="),a=Qv(this.semver,"<",e.semver,t)&&(this.operator===">="||this.operator===">")&&(e.operator==="<="||e.operator==="<"),l=Qv(this.semver,">",e.semver,t)&&(this.operator==="<="||this.operator==="<")&&(e.operator===">="||e.operator===">");return i||n||s&&o||a||l}};iY.exports=Jg;var Ome=gd(),{re:$G,t:eY}=ac(),Qv=mv(),bv=ud(),tY=Ti(),rY=ls()});var Ed=w((T$e,nY)=>{var Mme=ls(),Kme=(r,e,t)=>{try{e=new Mme(e,t)}catch{return!1}return e.test(r)};nY.exports=Kme});var oY=w((O$e,sY)=>{var Ume=ls(),Hme=(r,e)=>new Ume(r,e).set.map(t=>t.map(i=>i.value).join(" ").trim().split(" "));sY.exports=Hme});var AY=w((M$e,aY)=>{var Gme=Ti(),Yme=ls(),jme=(r,e,t)=>{let i=null,n=null,s=null;try{s=new Yme(e,t)}catch{return null}return r.forEach(o=>{s.test(o)&&(!i||n.compare(o)===-1)&&(i=o,n=new Gme(i,t))}),i};aY.exports=jme});var cY=w((K$e,lY)=>{var qme=Ti(),Jme=ls(),Wme=(r,e,t)=>{let i=null,n=null,s=null;try{s=new Jme(e,t)}catch{return null}return r.forEach(o=>{s.test(o)&&(!i||n.compare(o)===1)&&(i=o,n=new qme(i,t))}),i};lY.exports=Wme});var fY=w((U$e,gY)=>{var Sv=Ti(),zme=ls(),uY=hd(),Vme=(r,e)=>{r=new zme(r,e);let t=new Sv("0.0.0");if(r.test(t)||(t=new Sv("0.0.0-0"),r.test(t)))return t;t=null;for(let i=0;i{let a=new Sv(o.semver.version);switch(o.operator){case">":a.prerelease.length===0?a.patch++:a.prerelease.push(0),a.raw=a.format();case"":case">=":(!s||uY(a,s))&&(s=a);break;case"<":case"<=":break;default:throw new Error(`Unexpected operation: ${o.operator}`)}}),s&&(!t||uY(t,s))&&(t=s)}return t&&r.test(t)?t:null};gY.exports=Vme});var pY=w((H$e,hY)=>{var Xme=ls(),Zme=(r,e)=>{try{return new Xme(r,e).range||"*"}catch{return null}};hY.exports=Zme});var qI=w((G$e,EY)=>{var _me=Ti(),mY=Cd(),{ANY:$me}=mY,eEe=ls(),tEe=Ed(),dY=hd(),CY=MI(),rEe=UI(),iEe=KI(),nEe=(r,e,t,i)=>{r=new _me(r,i),e=new eEe(e,i);let n,s,o,a,l;switch(t){case">":n=dY,s=rEe,o=CY,a=">",l=">=";break;case"<":n=CY,s=iEe,o=dY,a="<",l="<=";break;default:throw new TypeError('Must provide a hilo val of "<" or ">"')}if(tEe(r,e,i))return!1;for(let c=0;c{h.semver===$me&&(h=new mY(">=0.0.0")),g=g||h,f=f||h,n(h.semver,g.semver,i)?g=h:o(h.semver,f.semver,i)&&(f=h)}),g.operator===a||g.operator===l||(!f.operator||f.operator===a)&&s(r,f.semver))return!1;if(f.operator===l&&o(r,f.semver))return!1}return!0};EY.exports=nEe});var yY=w((Y$e,IY)=>{var sEe=qI(),oEe=(r,e,t)=>sEe(r,e,">",t);IY.exports=oEe});var BY=w((j$e,wY)=>{var aEe=qI(),AEe=(r,e,t)=>aEe(r,e,"<",t);wY.exports=AEe});var SY=w((q$e,bY)=>{var QY=ls(),lEe=(r,e,t)=>(r=new QY(r,t),e=new QY(e,t),r.intersects(e));bY.exports=lEe});var xY=w((J$e,vY)=>{var cEe=Ed(),uEe=As();vY.exports=(r,e,t)=>{let i=[],n=null,s=null,o=r.sort((u,g)=>uEe(u,g,t));for(let u of o)cEe(u,e,t)?(s=u,n||(n=u)):(s&&i.push([n,s]),s=null,n=null);n&&i.push([n,null]);let a=[];for(let[u,g]of i)u===g?a.push(u):!g&&u===o[0]?a.push("*"):g?u===o[0]?a.push(`<=${g}`):a.push(`${u} - ${g}`):a.push(`>=${u}`);let l=a.join(" || "),c=typeof e.raw=="string"?e.raw:String(e);return l.length{var PY=ls(),JI=Cd(),{ANY:vv}=JI,Id=Ed(),xv=As(),gEe=(r,e,t={})=>{if(r===e)return!0;r=new PY(r,t),e=new PY(e,t);let i=!1;e:for(let n of r.set){for(let s of e.set){let o=fEe(n,s,t);if(i=i||o!==null,o)continue e}if(i)return!1}return!0},fEe=(r,e,t)=>{if(r===e)return!0;if(r.length===1&&r[0].semver===vv){if(e.length===1&&e[0].semver===vv)return!0;t.includePrerelease?r=[new JI(">=0.0.0-0")]:r=[new JI(">=0.0.0")]}if(e.length===1&&e[0].semver===vv){if(t.includePrerelease)return!0;e=[new JI(">=0.0.0")]}let i=new Set,n,s;for(let h of r)h.operator===">"||h.operator===">="?n=DY(n,h,t):h.operator==="<"||h.operator==="<="?s=kY(s,h,t):i.add(h.semver);if(i.size>1)return null;let o;if(n&&s){if(o=xv(n.semver,s.semver,t),o>0)return null;if(o===0&&(n.operator!==">="||s.operator!=="<="))return null}for(let h of i){if(n&&!Id(h,String(n),t)||s&&!Id(h,String(s),t))return null;for(let p of e)if(!Id(h,String(p),t))return!1;return!0}let a,l,c,u,g=s&&!t.includePrerelease&&s.semver.prerelease.length?s.semver:!1,f=n&&!t.includePrerelease&&n.semver.prerelease.length?n.semver:!1;g&&g.prerelease.length===1&&s.operator==="<"&&g.prerelease[0]===0&&(g=!1);for(let h of e){if(u=u||h.operator===">"||h.operator===">=",c=c||h.operator==="<"||h.operator==="<=",n){if(f&&h.semver.prerelease&&h.semver.prerelease.length&&h.semver.major===f.major&&h.semver.minor===f.minor&&h.semver.patch===f.patch&&(f=!1),h.operator===">"||h.operator===">="){if(a=DY(n,h,t),a===h&&a!==n)return!1}else if(n.operator===">="&&!Id(n.semver,String(h),t))return!1}if(s){if(g&&h.semver.prerelease&&h.semver.prerelease.length&&h.semver.major===g.major&&h.semver.minor===g.minor&&h.semver.patch===g.patch&&(g=!1),h.operator==="<"||h.operator==="<="){if(l=kY(s,h,t),l===h&&l!==s)return!1}else if(s.operator==="<="&&!Id(s.semver,String(h),t))return!1}if(!h.operator&&(s||n)&&o!==0)return!1}return!(n&&c&&!s&&o!==0||s&&u&&!n&&o!==0||f||g)},DY=(r,e,t)=>{if(!r)return e;let i=xv(r.semver,e.semver,t);return i>0?r:i<0||e.operator===">"&&r.operator===">="?e:r},kY=(r,e,t)=>{if(!r)return e;let i=xv(r.semver,e.semver,t);return i<0?r:i>0||e.operator==="<"&&r.operator==="<="?e:r};RY.exports=gEe});var Xr=w((z$e,NY)=>{var Pv=ac();NY.exports={re:Pv.re,src:Pv.src,tokens:Pv.t,SEMVER_SPEC_VERSION:cd().SEMVER_SPEC_VERSION,SemVer:Ti(),compareIdentifiers:FI().compareIdentifiers,rcompareIdentifiers:FI().rcompareIdentifiers,parse:Ac(),valid:nG(),clean:oG(),inc:AG(),diff:hG(),major:dG(),minor:mG(),patch:IG(),prerelease:wG(),compare:As(),rcompare:QG(),compareLoose:SG(),compareBuild:OI(),sort:DG(),rsort:RG(),gt:hd(),lt:MI(),eq:TI(),neq:Cv(),gte:KI(),lte:UI(),cmp:mv(),coerce:UG(),Comparator:Cd(),Range:ls(),satisfies:Ed(),toComparators:oY(),maxSatisfying:AY(),minSatisfying:cY(),minVersion:fY(),validRange:pY(),outside:qI(),gtr:yY(),ltr:BY(),intersects:SY(),simplifyRange:xY(),subset:FY()}});var Dv=w(WI=>{"use strict";Object.defineProperty(WI,"__esModule",{value:!0});WI.VERSION=void 0;WI.VERSION="9.1.0"});var Gt=w((exports,module)=>{"use strict";var __spreadArray=exports&&exports.__spreadArray||function(r,e,t){if(t||arguments.length===2)for(var i=0,n=e.length,s;i{(function(r,e){typeof define=="function"&&define.amd?define([],e):typeof zI=="object"&&zI.exports?zI.exports=e():r.regexpToAst=e()})(typeof self<"u"?self:LY,function(){function r(){}r.prototype.saveState=function(){return{idx:this.idx,input:this.input,groupIdx:this.groupIdx}},r.prototype.restoreState=function(p){this.idx=p.idx,this.input=p.input,this.groupIdx=p.groupIdx},r.prototype.pattern=function(p){this.idx=0,this.input=p,this.groupIdx=0,this.consumeChar("/");var C=this.disjunction();this.consumeChar("/");for(var y={type:"Flags",loc:{begin:this.idx,end:p.length},global:!1,ignoreCase:!1,multiLine:!1,unicode:!1,sticky:!1};this.isRegExpFlag();)switch(this.popChar()){case"g":o(y,"global");break;case"i":o(y,"ignoreCase");break;case"m":o(y,"multiLine");break;case"u":o(y,"unicode");break;case"y":o(y,"sticky");break}if(this.idx!==this.input.length)throw Error("Redundant input: "+this.input.substring(this.idx));return{type:"Pattern",flags:y,value:C,loc:this.loc(0)}},r.prototype.disjunction=function(){var p=[],C=this.idx;for(p.push(this.alternative());this.peekChar()==="|";)this.consumeChar("|"),p.push(this.alternative());return{type:"Disjunction",value:p,loc:this.loc(C)}},r.prototype.alternative=function(){for(var p=[],C=this.idx;this.isTerm();)p.push(this.term());return{type:"Alternative",value:p,loc:this.loc(C)}},r.prototype.term=function(){return this.isAssertion()?this.assertion():this.atom()},r.prototype.assertion=function(){var p=this.idx;switch(this.popChar()){case"^":return{type:"StartAnchor",loc:this.loc(p)};case"$":return{type:"EndAnchor",loc:this.loc(p)};case"\\":switch(this.popChar()){case"b":return{type:"WordBoundary",loc:this.loc(p)};case"B":return{type:"NonWordBoundary",loc:this.loc(p)}}throw Error("Invalid Assertion Escape");case"(":this.consumeChar("?");var C;switch(this.popChar()){case"=":C="Lookahead";break;case"!":C="NegativeLookahead";break}a(C);var y=this.disjunction();return this.consumeChar(")"),{type:C,value:y,loc:this.loc(p)}}l()},r.prototype.quantifier=function(p){var C,y=this.idx;switch(this.popChar()){case"*":C={atLeast:0,atMost:1/0};break;case"+":C={atLeast:1,atMost:1/0};break;case"?":C={atLeast:0,atMost:1};break;case"{":var B=this.integerIncludingZero();switch(this.popChar()){case"}":C={atLeast:B,atMost:B};break;case",":var v;this.isDigit()?(v=this.integerIncludingZero(),C={atLeast:B,atMost:v}):C={atLeast:B,atMost:1/0},this.consumeChar("}");break}if(p===!0&&C===void 0)return;a(C);break}if(!(p===!0&&C===void 0))return a(C),this.peekChar(0)==="?"?(this.consumeChar("?"),C.greedy=!1):C.greedy=!0,C.type="Quantifier",C.loc=this.loc(y),C},r.prototype.atom=function(){var p,C=this.idx;switch(this.peekChar()){case".":p=this.dotAll();break;case"\\":p=this.atomEscape();break;case"[":p=this.characterClass();break;case"(":p=this.group();break}return p===void 0&&this.isPatternCharacter()&&(p=this.patternCharacter()),a(p),p.loc=this.loc(C),this.isQuantifier()&&(p.quantifier=this.quantifier()),p},r.prototype.dotAll=function(){return this.consumeChar("."),{type:"Set",complement:!0,value:[n(` +`),n("\r"),n("\u2028"),n("\u2029")]}},r.prototype.atomEscape=function(){switch(this.consumeChar("\\"),this.peekChar()){case"1":case"2":case"3":case"4":case"5":case"6":case"7":case"8":case"9":return this.decimalEscapeAtom();case"d":case"D":case"s":case"S":case"w":case"W":return this.characterClassEscape();case"f":case"n":case"r":case"t":case"v":return this.controlEscapeAtom();case"c":return this.controlLetterEscapeAtom();case"0":return this.nulCharacterAtom();case"x":return this.hexEscapeSequenceAtom();case"u":return this.regExpUnicodeEscapeSequenceAtom();default:return this.identityEscapeAtom()}},r.prototype.decimalEscapeAtom=function(){var p=this.positiveInteger();return{type:"GroupBackReference",value:p}},r.prototype.characterClassEscape=function(){var p,C=!1;switch(this.popChar()){case"d":p=u;break;case"D":p=u,C=!0;break;case"s":p=f;break;case"S":p=f,C=!0;break;case"w":p=g;break;case"W":p=g,C=!0;break}return a(p),{type:"Set",value:p,complement:C}},r.prototype.controlEscapeAtom=function(){var p;switch(this.popChar()){case"f":p=n("\f");break;case"n":p=n(` +`);break;case"r":p=n("\r");break;case"t":p=n(" ");break;case"v":p=n("\v");break}return a(p),{type:"Character",value:p}},r.prototype.controlLetterEscapeAtom=function(){this.consumeChar("c");var p=this.popChar();if(/[a-zA-Z]/.test(p)===!1)throw Error("Invalid ");var C=p.toUpperCase().charCodeAt(0)-64;return{type:"Character",value:C}},r.prototype.nulCharacterAtom=function(){return this.consumeChar("0"),{type:"Character",value:n("\0")}},r.prototype.hexEscapeSequenceAtom=function(){return this.consumeChar("x"),this.parseHexDigits(2)},r.prototype.regExpUnicodeEscapeSequenceAtom=function(){return this.consumeChar("u"),this.parseHexDigits(4)},r.prototype.identityEscapeAtom=function(){var p=this.popChar();return{type:"Character",value:n(p)}},r.prototype.classPatternCharacterAtom=function(){switch(this.peekChar()){case` +`:case"\r":case"\u2028":case"\u2029":case"\\":case"]":throw Error("TBD");default:var p=this.popChar();return{type:"Character",value:n(p)}}},r.prototype.characterClass=function(){var p=[],C=!1;for(this.consumeChar("["),this.peekChar(0)==="^"&&(this.consumeChar("^"),C=!0);this.isClassAtom();){var y=this.classAtom(),B=y.type==="Character";if(B&&this.isRangeDash()){this.consumeChar("-");var v=this.classAtom(),D=v.type==="Character";if(D){if(v.value=this.input.length)throw Error("Unexpected end of input");this.idx++},r.prototype.loc=function(p){return{begin:p,end:this.idx}};var e=/[0-9a-fA-F]/,t=/[0-9]/,i=/[1-9]/;function n(p){return p.charCodeAt(0)}function s(p,C){p.length!==void 0?p.forEach(function(y){C.push(y)}):C.push(p)}function o(p,C){if(p[C]===!0)throw"duplicate flag "+C;p[C]=!0}function a(p){if(p===void 0)throw Error("Internal Error - Should never get here!")}function l(){throw Error("Internal Error - Should never get here!")}var c,u=[];for(c=n("0");c<=n("9");c++)u.push(c);var g=[n("_")].concat(u);for(c=n("a");c<=n("z");c++)g.push(c);for(c=n("A");c<=n("Z");c++)g.push(c);var f=[n(" "),n("\f"),n(` +`),n("\r"),n(" "),n("\v"),n(" "),n("\xA0"),n("\u1680"),n("\u2000"),n("\u2001"),n("\u2002"),n("\u2003"),n("\u2004"),n("\u2005"),n("\u2006"),n("\u2007"),n("\u2008"),n("\u2009"),n("\u200A"),n("\u2028"),n("\u2029"),n("\u202F"),n("\u205F"),n("\u3000"),n("\uFEFF")];function h(){}return h.prototype.visitChildren=function(p){for(var C in p){var y=p[C];p.hasOwnProperty(C)&&(y.type!==void 0?this.visit(y):Array.isArray(y)&&y.forEach(function(B){this.visit(B)},this))}},h.prototype.visit=function(p){switch(p.type){case"Pattern":this.visitPattern(p);break;case"Flags":this.visitFlags(p);break;case"Disjunction":this.visitDisjunction(p);break;case"Alternative":this.visitAlternative(p);break;case"StartAnchor":this.visitStartAnchor(p);break;case"EndAnchor":this.visitEndAnchor(p);break;case"WordBoundary":this.visitWordBoundary(p);break;case"NonWordBoundary":this.visitNonWordBoundary(p);break;case"Lookahead":this.visitLookahead(p);break;case"NegativeLookahead":this.visitNegativeLookahead(p);break;case"Character":this.visitCharacter(p);break;case"Set":this.visitSet(p);break;case"Group":this.visitGroup(p);break;case"GroupBackReference":this.visitGroupBackReference(p);break;case"Quantifier":this.visitQuantifier(p);break}this.visitChildren(p)},h.prototype.visitPattern=function(p){},h.prototype.visitFlags=function(p){},h.prototype.visitDisjunction=function(p){},h.prototype.visitAlternative=function(p){},h.prototype.visitStartAnchor=function(p){},h.prototype.visitEndAnchor=function(p){},h.prototype.visitWordBoundary=function(p){},h.prototype.visitNonWordBoundary=function(p){},h.prototype.visitLookahead=function(p){},h.prototype.visitNegativeLookahead=function(p){},h.prototype.visitCharacter=function(p){},h.prototype.visitSet=function(p){},h.prototype.visitGroup=function(p){},h.prototype.visitGroupBackReference=function(p){},h.prototype.visitQuantifier=function(p){},{RegExpParser:r,BaseRegExpVisitor:h,VERSION:"0.5.0"}})});var ZI=w(Wg=>{"use strict";Object.defineProperty(Wg,"__esModule",{value:!0});Wg.clearRegExpParserCache=Wg.getRegExpAst=void 0;var hEe=VI(),XI={},pEe=new hEe.RegExpParser;function dEe(r){var e=r.toString();if(XI.hasOwnProperty(e))return XI[e];var t=pEe.pattern(e);return XI[e]=t,t}Wg.getRegExpAst=dEe;function CEe(){XI={}}Wg.clearRegExpParserCache=CEe});var UY=w(pn=>{"use strict";var mEe=pn&&pn.__extends||function(){var r=function(e,t){return r=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},r(e,t)};return function(e,t){if(typeof t!="function"&&t!==null)throw new TypeError("Class extends value "+String(t)+" is not a constructor or null");r(e,t);function i(){this.constructor=e}e.prototype=t===null?Object.create(t):(i.prototype=t.prototype,new i)}}();Object.defineProperty(pn,"__esModule",{value:!0});pn.canMatchCharCode=pn.firstCharOptimizedIndices=pn.getOptimizedStartCodesIndices=pn.failedOptimizationPrefixMsg=void 0;var OY=VI(),cs=Gt(),MY=ZI(),Sa=Rv(),KY="Complement Sets are not supported for first char optimization";pn.failedOptimizationPrefixMsg=`Unable to use "first char" lexer optimizations: +`;function EEe(r,e){e===void 0&&(e=!1);try{var t=(0,MY.getRegExpAst)(r),i=$I(t.value,{},t.flags.ignoreCase);return i}catch(s){if(s.message===KY)e&&(0,cs.PRINT_WARNING)(""+pn.failedOptimizationPrefixMsg+(" Unable to optimize: < "+r.toString()+` > +`)+` Complement Sets cannot be automatically optimized. + This will disable the lexer's first char optimizations. + See: https://chevrotain.io/docs/guide/resolving_lexer_errors.html#COMPLEMENT for details.`);else{var n="";e&&(n=` + This will disable the lexer's first char optimizations. + See: https://chevrotain.io/docs/guide/resolving_lexer_errors.html#REGEXP_PARSING for details.`),(0,cs.PRINT_ERROR)(pn.failedOptimizationPrefixMsg+` +`+(" Failed parsing: < "+r.toString()+` > +`)+(" Using the regexp-to-ast library version: "+OY.VERSION+` +`)+" Please open an issue at: https://github.com/bd82/regexp-to-ast/issues"+n)}}return[]}pn.getOptimizedStartCodesIndices=EEe;function $I(r,e,t){switch(r.type){case"Disjunction":for(var i=0;i=Sa.minOptimizationVal)for(var f=u.from>=Sa.minOptimizationVal?u.from:Sa.minOptimizationVal,h=u.to,p=(0,Sa.charCodeToOptimizedIndex)(f),C=(0,Sa.charCodeToOptimizedIndex)(h),y=p;y<=C;y++)e[y]=y}}});break;case"Group":$I(o.value,e,t);break;default:throw Error("Non Exhaustive Match")}var a=o.quantifier!==void 0&&o.quantifier.atLeast===0;if(o.type==="Group"&&kv(o)===!1||o.type!=="Group"&&a===!1)break}break;default:throw Error("non exhaustive match!")}return(0,cs.values)(e)}pn.firstCharOptimizedIndices=$I;function _I(r,e,t){var i=(0,Sa.charCodeToOptimizedIndex)(r);e[i]=i,t===!0&&IEe(r,e)}function IEe(r,e){var t=String.fromCharCode(r),i=t.toUpperCase();if(i!==t){var n=(0,Sa.charCodeToOptimizedIndex)(i.charCodeAt(0));e[n]=n}else{var s=t.toLowerCase();if(s!==t){var n=(0,Sa.charCodeToOptimizedIndex)(s.charCodeAt(0));e[n]=n}}}function TY(r,e){return(0,cs.find)(r.value,function(t){if(typeof t=="number")return(0,cs.contains)(e,t);var i=t;return(0,cs.find)(e,function(n){return i.from<=n&&n<=i.to})!==void 0})}function kv(r){return r.quantifier&&r.quantifier.atLeast===0?!0:r.value?(0,cs.isArray)(r.value)?(0,cs.every)(r.value,kv):kv(r.value):!1}var yEe=function(r){mEe(e,r);function e(t){var i=r.call(this)||this;return i.targetCharCodes=t,i.found=!1,i}return e.prototype.visitChildren=function(t){if(this.found!==!0){switch(t.type){case"Lookahead":this.visitLookahead(t);return;case"NegativeLookahead":this.visitNegativeLookahead(t);return}r.prototype.visitChildren.call(this,t)}},e.prototype.visitCharacter=function(t){(0,cs.contains)(this.targetCharCodes,t.value)&&(this.found=!0)},e.prototype.visitSet=function(t){t.complement?TY(t,this.targetCharCodes)===void 0&&(this.found=!0):TY(t,this.targetCharCodes)!==void 0&&(this.found=!0)},e}(OY.BaseRegExpVisitor);function wEe(r,e){if(e instanceof RegExp){var t=(0,MY.getRegExpAst)(e),i=new yEe(r);return i.visit(t),i.found}else return(0,cs.find)(e,function(n){return(0,cs.contains)(r,n.charCodeAt(0))})!==void 0}pn.canMatchCharCode=wEe});var Rv=w(Ve=>{"use strict";var HY=Ve&&Ve.__extends||function(){var r=function(e,t){return r=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},r(e,t)};return function(e,t){if(typeof t!="function"&&t!==null)throw new TypeError("Class extends value "+String(t)+" is not a constructor or null");r(e,t);function i(){this.constructor=e}e.prototype=t===null?Object.create(t):(i.prototype=t.prototype,new i)}}();Object.defineProperty(Ve,"__esModule",{value:!0});Ve.charCodeToOptimizedIndex=Ve.minOptimizationVal=Ve.buildLineBreakIssueMessage=Ve.LineTerminatorOptimizedTester=Ve.isShortPattern=Ve.isCustomPattern=Ve.cloneEmptyGroups=Ve.performWarningRuntimeChecks=Ve.performRuntimeChecks=Ve.addStickyFlag=Ve.addStartOfInput=Ve.findUnreachablePatterns=Ve.findModesThatDoNotExist=Ve.findInvalidGroupType=Ve.findDuplicatePatterns=Ve.findUnsupportedFlags=Ve.findStartOfInputAnchor=Ve.findEmptyMatchRegExps=Ve.findEndOfInputAnchor=Ve.findInvalidPatterns=Ve.findMissingPatterns=Ve.validatePatterns=Ve.analyzeTokenTypes=Ve.enableSticky=Ve.disableSticky=Ve.SUPPORT_STICKY=Ve.MODES=Ve.DEFAULT_MODE=void 0;var GY=VI(),ir=yd(),xe=Gt(),zg=UY(),YY=ZI(),Do="PATTERN";Ve.DEFAULT_MODE="defaultMode";Ve.MODES="modes";Ve.SUPPORT_STICKY=typeof new RegExp("(?:)").sticky=="boolean";function BEe(){Ve.SUPPORT_STICKY=!1}Ve.disableSticky=BEe;function QEe(){Ve.SUPPORT_STICKY=!0}Ve.enableSticky=QEe;function bEe(r,e){e=(0,xe.defaults)(e,{useSticky:Ve.SUPPORT_STICKY,debug:!1,safeMode:!1,positionTracking:"full",lineTerminatorCharacters:["\r",` +`],tracer:function(v,D){return D()}});var t=e.tracer;t("initCharCodeToOptimizedIndexMap",function(){LEe()});var i;t("Reject Lexer.NA",function(){i=(0,xe.reject)(r,function(v){return v[Do]===ir.Lexer.NA})});var n=!1,s;t("Transform Patterns",function(){n=!1,s=(0,xe.map)(i,function(v){var D=v[Do];if((0,xe.isRegExp)(D)){var L=D.source;return L.length===1&&L!=="^"&&L!=="$"&&L!=="."&&!D.ignoreCase?L:L.length===2&&L[0]==="\\"&&!(0,xe.contains)(["d","D","s","S","t","r","n","t","0","c","b","B","f","v","w","W"],L[1])?L[1]:e.useSticky?Lv(D):Nv(D)}else{if((0,xe.isFunction)(D))return n=!0,{exec:D};if((0,xe.has)(D,"exec"))return n=!0,D;if(typeof D=="string"){if(D.length===1)return D;var H=D.replace(/[\\^$.*+?()[\]{}|]/g,"\\$&"),j=new RegExp(H);return e.useSticky?Lv(j):Nv(j)}else throw Error("non exhaustive match")}})});var o,a,l,c,u;t("misc mapping",function(){o=(0,xe.map)(i,function(v){return v.tokenTypeIdx}),a=(0,xe.map)(i,function(v){var D=v.GROUP;if(D!==ir.Lexer.SKIPPED){if((0,xe.isString)(D))return D;if((0,xe.isUndefined)(D))return!1;throw Error("non exhaustive match")}}),l=(0,xe.map)(i,function(v){var D=v.LONGER_ALT;if(D){var L=(0,xe.isArray)(D)?(0,xe.map)(D,function(H){return(0,xe.indexOf)(i,H)}):[(0,xe.indexOf)(i,D)];return L}}),c=(0,xe.map)(i,function(v){return v.PUSH_MODE}),u=(0,xe.map)(i,function(v){return(0,xe.has)(v,"POP_MODE")})});var g;t("Line Terminator Handling",function(){var v=ij(e.lineTerminatorCharacters);g=(0,xe.map)(i,function(D){return!1}),e.positionTracking!=="onlyOffset"&&(g=(0,xe.map)(i,function(D){if((0,xe.has)(D,"LINE_BREAKS"))return D.LINE_BREAKS;if(tj(D,v)===!1)return(0,zg.canMatchCharCode)(v,D.PATTERN)}))});var f,h,p,C;t("Misc Mapping #2",function(){f=(0,xe.map)(i,Ov),h=(0,xe.map)(s,ej),p=(0,xe.reduce)(i,function(v,D){var L=D.GROUP;return(0,xe.isString)(L)&&L!==ir.Lexer.SKIPPED&&(v[L]=[]),v},{}),C=(0,xe.map)(s,function(v,D){return{pattern:s[D],longerAlt:l[D],canLineTerminator:g[D],isCustom:f[D],short:h[D],group:a[D],push:c[D],pop:u[D],tokenTypeIdx:o[D],tokenType:i[D]}})});var y=!0,B=[];return e.safeMode||t("First Char Optimization",function(){B=(0,xe.reduce)(i,function(v,D,L){if(typeof D.PATTERN=="string"){var H=D.PATTERN.charCodeAt(0),j=Tv(H);Fv(v,j,C[L])}else if((0,xe.isArray)(D.START_CHARS_HINT)){var $;(0,xe.forEach)(D.START_CHARS_HINT,function(W){var _=typeof W=="string"?W.charCodeAt(0):W,A=Tv(_);$!==A&&($=A,Fv(v,A,C[L]))})}else if((0,xe.isRegExp)(D.PATTERN))if(D.PATTERN.unicode)y=!1,e.ensureOptimizations&&(0,xe.PRINT_ERROR)(""+zg.failedOptimizationPrefixMsg+(" Unable to analyze < "+D.PATTERN.toString()+` > pattern. +`)+` The regexp unicode flag is not currently supported by the regexp-to-ast library. + This will disable the lexer's first char optimizations. + For details See: https://chevrotain.io/docs/guide/resolving_lexer_errors.html#UNICODE_OPTIMIZE`);else{var V=(0,zg.getOptimizedStartCodesIndices)(D.PATTERN,e.ensureOptimizations);(0,xe.isEmpty)(V)&&(y=!1),(0,xe.forEach)(V,function(W){Fv(v,W,C[L])})}else e.ensureOptimizations&&(0,xe.PRINT_ERROR)(""+zg.failedOptimizationPrefixMsg+(" TokenType: <"+D.name+`> is using a custom token pattern without providing parameter. +`)+` This will disable the lexer's first char optimizations. + For details See: https://chevrotain.io/docs/guide/resolving_lexer_errors.html#CUSTOM_OPTIMIZE`),y=!1;return v},[])}),t("ArrayPacking",function(){B=(0,xe.packArray)(B)}),{emptyGroups:p,patternIdxToConfig:C,charCodeToPatternIdxToConfig:B,hasCustom:n,canBeOptimized:y}}Ve.analyzeTokenTypes=bEe;function SEe(r,e){var t=[],i=jY(r);t=t.concat(i.errors);var n=qY(i.valid),s=n.valid;return t=t.concat(n.errors),t=t.concat(vEe(s)),t=t.concat(ZY(s)),t=t.concat(_Y(s,e)),t=t.concat($Y(s)),t}Ve.validatePatterns=SEe;function vEe(r){var e=[],t=(0,xe.filter)(r,function(i){return(0,xe.isRegExp)(i[Do])});return e=e.concat(JY(t)),e=e.concat(zY(t)),e=e.concat(VY(t)),e=e.concat(XY(t)),e=e.concat(WY(t)),e}function jY(r){var e=(0,xe.filter)(r,function(n){return!(0,xe.has)(n,Do)}),t=(0,xe.map)(e,function(n){return{message:"Token Type: ->"+n.name+"<- missing static 'PATTERN' property",type:ir.LexerDefinitionErrorType.MISSING_PATTERN,tokenTypes:[n]}}),i=(0,xe.difference)(r,e);return{errors:t,valid:i}}Ve.findMissingPatterns=jY;function qY(r){var e=(0,xe.filter)(r,function(n){var s=n[Do];return!(0,xe.isRegExp)(s)&&!(0,xe.isFunction)(s)&&!(0,xe.has)(s,"exec")&&!(0,xe.isString)(s)}),t=(0,xe.map)(e,function(n){return{message:"Token Type: ->"+n.name+"<- static 'PATTERN' can only be a RegExp, a Function matching the {CustomPatternMatcherFunc} type or an Object matching the {ICustomPattern} interface.",type:ir.LexerDefinitionErrorType.INVALID_PATTERN,tokenTypes:[n]}}),i=(0,xe.difference)(r,e);return{errors:t,valid:i}}Ve.findInvalidPatterns=qY;var xEe=/[^\\][\$]/;function JY(r){var e=function(n){HY(s,n);function s(){var o=n!==null&&n.apply(this,arguments)||this;return o.found=!1,o}return s.prototype.visitEndAnchor=function(o){this.found=!0},s}(GY.BaseRegExpVisitor),t=(0,xe.filter)(r,function(n){var s=n[Do];try{var o=(0,YY.getRegExpAst)(s),a=new e;return a.visit(o),a.found}catch{return xEe.test(s.source)}}),i=(0,xe.map)(t,function(n){return{message:`Unexpected RegExp Anchor Error: + Token Type: ->`+n.name+`<- static 'PATTERN' cannot contain end of input anchor '$' + See chevrotain.io/docs/guide/resolving_lexer_errors.html#ANCHORS for details.`,type:ir.LexerDefinitionErrorType.EOI_ANCHOR_FOUND,tokenTypes:[n]}});return i}Ve.findEndOfInputAnchor=JY;function WY(r){var e=(0,xe.filter)(r,function(i){var n=i[Do];return n.test("")}),t=(0,xe.map)(e,function(i){return{message:"Token Type: ->"+i.name+"<- static 'PATTERN' must not match an empty string",type:ir.LexerDefinitionErrorType.EMPTY_MATCH_PATTERN,tokenTypes:[i]}});return t}Ve.findEmptyMatchRegExps=WY;var PEe=/[^\\[][\^]|^\^/;function zY(r){var e=function(n){HY(s,n);function s(){var o=n!==null&&n.apply(this,arguments)||this;return o.found=!1,o}return s.prototype.visitStartAnchor=function(o){this.found=!0},s}(GY.BaseRegExpVisitor),t=(0,xe.filter)(r,function(n){var s=n[Do];try{var o=(0,YY.getRegExpAst)(s),a=new e;return a.visit(o),a.found}catch{return PEe.test(s.source)}}),i=(0,xe.map)(t,function(n){return{message:`Unexpected RegExp Anchor Error: + Token Type: ->`+n.name+`<- static 'PATTERN' cannot contain start of input anchor '^' + See https://chevrotain.io/docs/guide/resolving_lexer_errors.html#ANCHORS for details.`,type:ir.LexerDefinitionErrorType.SOI_ANCHOR_FOUND,tokenTypes:[n]}});return i}Ve.findStartOfInputAnchor=zY;function VY(r){var e=(0,xe.filter)(r,function(i){var n=i[Do];return n instanceof RegExp&&(n.multiline||n.global)}),t=(0,xe.map)(e,function(i){return{message:"Token Type: ->"+i.name+"<- static 'PATTERN' may NOT contain global('g') or multiline('m')",type:ir.LexerDefinitionErrorType.UNSUPPORTED_FLAGS_FOUND,tokenTypes:[i]}});return t}Ve.findUnsupportedFlags=VY;function XY(r){var e=[],t=(0,xe.map)(r,function(s){return(0,xe.reduce)(r,function(o,a){return s.PATTERN.source===a.PATTERN.source&&!(0,xe.contains)(e,a)&&a.PATTERN!==ir.Lexer.NA&&(e.push(a),o.push(a)),o},[])});t=(0,xe.compact)(t);var i=(0,xe.filter)(t,function(s){return s.length>1}),n=(0,xe.map)(i,function(s){var o=(0,xe.map)(s,function(l){return l.name}),a=(0,xe.first)(s).PATTERN;return{message:"The same RegExp pattern ->"+a+"<-"+("has been used in all of the following Token Types: "+o.join(", ")+" <-"),type:ir.LexerDefinitionErrorType.DUPLICATE_PATTERNS_FOUND,tokenTypes:s}});return n}Ve.findDuplicatePatterns=XY;function ZY(r){var e=(0,xe.filter)(r,function(i){if(!(0,xe.has)(i,"GROUP"))return!1;var n=i.GROUP;return n!==ir.Lexer.SKIPPED&&n!==ir.Lexer.NA&&!(0,xe.isString)(n)}),t=(0,xe.map)(e,function(i){return{message:"Token Type: ->"+i.name+"<- static 'GROUP' can only be Lexer.SKIPPED/Lexer.NA/A String",type:ir.LexerDefinitionErrorType.INVALID_GROUP_TYPE_FOUND,tokenTypes:[i]}});return t}Ve.findInvalidGroupType=ZY;function _Y(r,e){var t=(0,xe.filter)(r,function(n){return n.PUSH_MODE!==void 0&&!(0,xe.contains)(e,n.PUSH_MODE)}),i=(0,xe.map)(t,function(n){var s="Token Type: ->"+n.name+"<- static 'PUSH_MODE' value cannot refer to a Lexer Mode ->"+n.PUSH_MODE+"<-which does not exist";return{message:s,type:ir.LexerDefinitionErrorType.PUSH_MODE_DOES_NOT_EXIST,tokenTypes:[n]}});return i}Ve.findModesThatDoNotExist=_Y;function $Y(r){var e=[],t=(0,xe.reduce)(r,function(i,n,s){var o=n.PATTERN;return o===ir.Lexer.NA||((0,xe.isString)(o)?i.push({str:o,idx:s,tokenType:n}):(0,xe.isRegExp)(o)&&kEe(o)&&i.push({str:o.source,idx:s,tokenType:n})),i},[]);return(0,xe.forEach)(r,function(i,n){(0,xe.forEach)(t,function(s){var o=s.str,a=s.idx,l=s.tokenType;if(n"+i.name+"<-")+`in the lexer's definition. +See https://chevrotain.io/docs/guide/resolving_lexer_errors.html#UNREACHABLE`;e.push({message:c,type:ir.LexerDefinitionErrorType.UNREACHABLE_PATTERN,tokenTypes:[i,l]})}})}),e}Ve.findUnreachablePatterns=$Y;function DEe(r,e){if((0,xe.isRegExp)(e)){var t=e.exec(r);return t!==null&&t.index===0}else{if((0,xe.isFunction)(e))return e(r,0,[],{});if((0,xe.has)(e,"exec"))return e.exec(r,0,[],{});if(typeof e=="string")return e===r;throw Error("non exhaustive match")}}function kEe(r){var e=[".","\\","[","]","|","^","$","(",")","?","*","+","{"];return(0,xe.find)(e,function(t){return r.source.indexOf(t)!==-1})===void 0}function Nv(r){var e=r.ignoreCase?"i":"";return new RegExp("^(?:"+r.source+")",e)}Ve.addStartOfInput=Nv;function Lv(r){var e=r.ignoreCase?"iy":"y";return new RegExp(""+r.source,e)}Ve.addStickyFlag=Lv;function REe(r,e,t){var i=[];return(0,xe.has)(r,Ve.DEFAULT_MODE)||i.push({message:"A MultiMode Lexer cannot be initialized without a <"+Ve.DEFAULT_MODE+`> property in its definition +`,type:ir.LexerDefinitionErrorType.MULTI_MODE_LEXER_WITHOUT_DEFAULT_MODE}),(0,xe.has)(r,Ve.MODES)||i.push({message:"A MultiMode Lexer cannot be initialized without a <"+Ve.MODES+`> property in its definition +`,type:ir.LexerDefinitionErrorType.MULTI_MODE_LEXER_WITHOUT_MODES_PROPERTY}),(0,xe.has)(r,Ve.MODES)&&(0,xe.has)(r,Ve.DEFAULT_MODE)&&!(0,xe.has)(r.modes,r.defaultMode)&&i.push({message:"A MultiMode Lexer cannot be initialized with a "+Ve.DEFAULT_MODE+": <"+r.defaultMode+`>which does not exist +`,type:ir.LexerDefinitionErrorType.MULTI_MODE_LEXER_DEFAULT_MODE_VALUE_DOES_NOT_EXIST}),(0,xe.has)(r,Ve.MODES)&&(0,xe.forEach)(r.modes,function(n,s){(0,xe.forEach)(n,function(o,a){(0,xe.isUndefined)(o)&&i.push({message:"A Lexer cannot be initialized using an undefined Token Type. Mode:"+("<"+s+"> at index: <"+a+`> +`),type:ir.LexerDefinitionErrorType.LEXER_DEFINITION_CANNOT_CONTAIN_UNDEFINED})})}),i}Ve.performRuntimeChecks=REe;function FEe(r,e,t){var i=[],n=!1,s=(0,xe.compact)((0,xe.flatten)((0,xe.mapValues)(r.modes,function(l){return l}))),o=(0,xe.reject)(s,function(l){return l[Do]===ir.Lexer.NA}),a=ij(t);return e&&(0,xe.forEach)(o,function(l){var c=tj(l,a);if(c!==!1){var u=rj(l,c),g={message:u,type:c.issue,tokenType:l};i.push(g)}else(0,xe.has)(l,"LINE_BREAKS")?l.LINE_BREAKS===!0&&(n=!0):(0,zg.canMatchCharCode)(a,l.PATTERN)&&(n=!0)}),e&&!n&&i.push({message:`Warning: No LINE_BREAKS Found. + This Lexer has been defined to track line and column information, + But none of the Token Types can be identified as matching a line terminator. + See https://chevrotain.io/docs/guide/resolving_lexer_errors.html#LINE_BREAKS + for details.`,type:ir.LexerDefinitionErrorType.NO_LINE_BREAKS_FLAGS}),i}Ve.performWarningRuntimeChecks=FEe;function NEe(r){var e={},t=(0,xe.keys)(r);return(0,xe.forEach)(t,function(i){var n=r[i];if((0,xe.isArray)(n))e[i]=[];else throw Error("non exhaustive match")}),e}Ve.cloneEmptyGroups=NEe;function Ov(r){var e=r.PATTERN;if((0,xe.isRegExp)(e))return!1;if((0,xe.isFunction)(e))return!0;if((0,xe.has)(e,"exec"))return!0;if((0,xe.isString)(e))return!1;throw Error("non exhaustive match")}Ve.isCustomPattern=Ov;function ej(r){return(0,xe.isString)(r)&&r.length===1?r.charCodeAt(0):!1}Ve.isShortPattern=ej;Ve.LineTerminatorOptimizedTester={test:function(r){for(var e=r.length,t=this.lastIndex;t Token Type +`)+(" Root cause: "+e.errMsg+`. +`)+" For details See: https://chevrotain.io/docs/guide/resolving_lexer_errors.html#IDENTIFY_TERMINATOR";if(e.issue===ir.LexerDefinitionErrorType.CUSTOM_LINE_BREAK)return`Warning: A Custom Token Pattern should specify the option. +`+(" The problem is in the <"+r.name+`> Token Type +`)+" For details See: https://chevrotain.io/docs/guide/resolving_lexer_errors.html#CUSTOM_LINE_BREAK";throw Error("non exhaustive match")}Ve.buildLineBreakIssueMessage=rj;function ij(r){var e=(0,xe.map)(r,function(t){return(0,xe.isString)(t)&&t.length>0?t.charCodeAt(0):t});return e}function Fv(r,e,t){r[e]===void 0?r[e]=[t]:r[e].push(t)}Ve.minOptimizationVal=256;var ey=[];function Tv(r){return r255?255+~~(r/255):r}}});var Vg=w(Nt=>{"use strict";Object.defineProperty(Nt,"__esModule",{value:!0});Nt.isTokenType=Nt.hasExtendingTokensTypesMapProperty=Nt.hasExtendingTokensTypesProperty=Nt.hasCategoriesProperty=Nt.hasShortKeyProperty=Nt.singleAssignCategoriesToksMap=Nt.assignCategoriesMapProp=Nt.assignCategoriesTokensProp=Nt.assignTokenDefaultProps=Nt.expandCategories=Nt.augmentTokenTypes=Nt.tokenIdxToClass=Nt.tokenShortNameIdx=Nt.tokenStructuredMatcherNoCategories=Nt.tokenStructuredMatcher=void 0;var Zr=Gt();function TEe(r,e){var t=r.tokenTypeIdx;return t===e.tokenTypeIdx?!0:e.isParent===!0&&e.categoryMatchesMap[t]===!0}Nt.tokenStructuredMatcher=TEe;function OEe(r,e){return r.tokenTypeIdx===e.tokenTypeIdx}Nt.tokenStructuredMatcherNoCategories=OEe;Nt.tokenShortNameIdx=1;Nt.tokenIdxToClass={};function MEe(r){var e=nj(r);sj(e),aj(e),oj(e),(0,Zr.forEach)(e,function(t){t.isParent=t.categoryMatches.length>0})}Nt.augmentTokenTypes=MEe;function nj(r){for(var e=(0,Zr.cloneArr)(r),t=r,i=!0;i;){t=(0,Zr.compact)((0,Zr.flatten)((0,Zr.map)(t,function(s){return s.CATEGORIES})));var n=(0,Zr.difference)(t,e);e=e.concat(n),(0,Zr.isEmpty)(n)?i=!1:t=n}return e}Nt.expandCategories=nj;function sj(r){(0,Zr.forEach)(r,function(e){Aj(e)||(Nt.tokenIdxToClass[Nt.tokenShortNameIdx]=e,e.tokenTypeIdx=Nt.tokenShortNameIdx++),Mv(e)&&!(0,Zr.isArray)(e.CATEGORIES)&&(e.CATEGORIES=[e.CATEGORIES]),Mv(e)||(e.CATEGORIES=[]),lj(e)||(e.categoryMatches=[]),cj(e)||(e.categoryMatchesMap={})})}Nt.assignTokenDefaultProps=sj;function oj(r){(0,Zr.forEach)(r,function(e){e.categoryMatches=[],(0,Zr.forEach)(e.categoryMatchesMap,function(t,i){e.categoryMatches.push(Nt.tokenIdxToClass[i].tokenTypeIdx)})})}Nt.assignCategoriesTokensProp=oj;function aj(r){(0,Zr.forEach)(r,function(e){Kv([],e)})}Nt.assignCategoriesMapProp=aj;function Kv(r,e){(0,Zr.forEach)(r,function(t){e.categoryMatchesMap[t.tokenTypeIdx]=!0}),(0,Zr.forEach)(e.CATEGORIES,function(t){var i=r.concat(e);(0,Zr.contains)(i,t)||Kv(i,t)})}Nt.singleAssignCategoriesToksMap=Kv;function Aj(r){return(0,Zr.has)(r,"tokenTypeIdx")}Nt.hasShortKeyProperty=Aj;function Mv(r){return(0,Zr.has)(r,"CATEGORIES")}Nt.hasCategoriesProperty=Mv;function lj(r){return(0,Zr.has)(r,"categoryMatches")}Nt.hasExtendingTokensTypesProperty=lj;function cj(r){return(0,Zr.has)(r,"categoryMatchesMap")}Nt.hasExtendingTokensTypesMapProperty=cj;function KEe(r){return(0,Zr.has)(r,"tokenTypeIdx")}Nt.isTokenType=KEe});var Uv=w(ty=>{"use strict";Object.defineProperty(ty,"__esModule",{value:!0});ty.defaultLexerErrorProvider=void 0;ty.defaultLexerErrorProvider={buildUnableToPopLexerModeMessage:function(r){return"Unable to pop Lexer Mode after encountering Token ->"+r.image+"<- The Mode Stack is empty"},buildUnexpectedCharactersMessage:function(r,e,t,i,n){return"unexpected character: ->"+r.charAt(e)+"<- at offset: "+e+","+(" skipped "+t+" characters.")}}});var yd=w(fc=>{"use strict";Object.defineProperty(fc,"__esModule",{value:!0});fc.Lexer=fc.LexerDefinitionErrorType=void 0;var Xs=Rv(),nr=Gt(),UEe=Vg(),HEe=Uv(),GEe=ZI(),YEe;(function(r){r[r.MISSING_PATTERN=0]="MISSING_PATTERN",r[r.INVALID_PATTERN=1]="INVALID_PATTERN",r[r.EOI_ANCHOR_FOUND=2]="EOI_ANCHOR_FOUND",r[r.UNSUPPORTED_FLAGS_FOUND=3]="UNSUPPORTED_FLAGS_FOUND",r[r.DUPLICATE_PATTERNS_FOUND=4]="DUPLICATE_PATTERNS_FOUND",r[r.INVALID_GROUP_TYPE_FOUND=5]="INVALID_GROUP_TYPE_FOUND",r[r.PUSH_MODE_DOES_NOT_EXIST=6]="PUSH_MODE_DOES_NOT_EXIST",r[r.MULTI_MODE_LEXER_WITHOUT_DEFAULT_MODE=7]="MULTI_MODE_LEXER_WITHOUT_DEFAULT_MODE",r[r.MULTI_MODE_LEXER_WITHOUT_MODES_PROPERTY=8]="MULTI_MODE_LEXER_WITHOUT_MODES_PROPERTY",r[r.MULTI_MODE_LEXER_DEFAULT_MODE_VALUE_DOES_NOT_EXIST=9]="MULTI_MODE_LEXER_DEFAULT_MODE_VALUE_DOES_NOT_EXIST",r[r.LEXER_DEFINITION_CANNOT_CONTAIN_UNDEFINED=10]="LEXER_DEFINITION_CANNOT_CONTAIN_UNDEFINED",r[r.SOI_ANCHOR_FOUND=11]="SOI_ANCHOR_FOUND",r[r.EMPTY_MATCH_PATTERN=12]="EMPTY_MATCH_PATTERN",r[r.NO_LINE_BREAKS_FLAGS=13]="NO_LINE_BREAKS_FLAGS",r[r.UNREACHABLE_PATTERN=14]="UNREACHABLE_PATTERN",r[r.IDENTIFY_TERMINATOR=15]="IDENTIFY_TERMINATOR",r[r.CUSTOM_LINE_BREAK=16]="CUSTOM_LINE_BREAK"})(YEe=fc.LexerDefinitionErrorType||(fc.LexerDefinitionErrorType={}));var wd={deferDefinitionErrorsHandling:!1,positionTracking:"full",lineTerminatorsPattern:/\n|\r\n?/g,lineTerminatorCharacters:[` +`,"\r"],ensureOptimizations:!1,safeMode:!1,errorMessageProvider:HEe.defaultLexerErrorProvider,traceInitPerf:!1,skipValidations:!1};Object.freeze(wd);var jEe=function(){function r(e,t){var i=this;if(t===void 0&&(t=wd),this.lexerDefinition=e,this.lexerDefinitionErrors=[],this.lexerDefinitionWarning=[],this.patternIdxToConfig={},this.charCodeToPatternIdxToConfig={},this.modes=[],this.emptyGroups={},this.config=void 0,this.trackStartLines=!0,this.trackEndLines=!0,this.hasCustom=!1,this.canModeBeOptimized={},typeof t=="boolean")throw Error(`The second argument to the Lexer constructor is now an ILexerConfig Object. +a boolean 2nd argument is no longer supported`);this.config=(0,nr.merge)(wd,t);var n=this.config.traceInitPerf;n===!0?(this.traceInitMaxIdent=1/0,this.traceInitPerf=!0):typeof n=="number"&&(this.traceInitMaxIdent=n,this.traceInitPerf=!0),this.traceInitIndent=-1,this.TRACE_INIT("Lexer Constructor",function(){var s,o=!0;i.TRACE_INIT("Lexer Config handling",function(){if(i.config.lineTerminatorsPattern===wd.lineTerminatorsPattern)i.config.lineTerminatorsPattern=Xs.LineTerminatorOptimizedTester;else if(i.config.lineTerminatorCharacters===wd.lineTerminatorCharacters)throw Error(`Error: Missing property on the Lexer config. + For details See: https://chevrotain.io/docs/guide/resolving_lexer_errors.html#MISSING_LINE_TERM_CHARS`);if(t.safeMode&&t.ensureOptimizations)throw Error('"safeMode" and "ensureOptimizations" flags are mutually exclusive.');i.trackStartLines=/full|onlyStart/i.test(i.config.positionTracking),i.trackEndLines=/full/i.test(i.config.positionTracking),(0,nr.isArray)(e)?(s={modes:{}},s.modes[Xs.DEFAULT_MODE]=(0,nr.cloneArr)(e),s[Xs.DEFAULT_MODE]=Xs.DEFAULT_MODE):(o=!1,s=(0,nr.cloneObj)(e))}),i.config.skipValidations===!1&&(i.TRACE_INIT("performRuntimeChecks",function(){i.lexerDefinitionErrors=i.lexerDefinitionErrors.concat((0,Xs.performRuntimeChecks)(s,i.trackStartLines,i.config.lineTerminatorCharacters))}),i.TRACE_INIT("performWarningRuntimeChecks",function(){i.lexerDefinitionWarning=i.lexerDefinitionWarning.concat((0,Xs.performWarningRuntimeChecks)(s,i.trackStartLines,i.config.lineTerminatorCharacters))})),s.modes=s.modes?s.modes:{},(0,nr.forEach)(s.modes,function(u,g){s.modes[g]=(0,nr.reject)(u,function(f){return(0,nr.isUndefined)(f)})});var a=(0,nr.keys)(s.modes);if((0,nr.forEach)(s.modes,function(u,g){i.TRACE_INIT("Mode: <"+g+"> processing",function(){if(i.modes.push(g),i.config.skipValidations===!1&&i.TRACE_INIT("validatePatterns",function(){i.lexerDefinitionErrors=i.lexerDefinitionErrors.concat((0,Xs.validatePatterns)(u,a))}),(0,nr.isEmpty)(i.lexerDefinitionErrors)){(0,UEe.augmentTokenTypes)(u);var f;i.TRACE_INIT("analyzeTokenTypes",function(){f=(0,Xs.analyzeTokenTypes)(u,{lineTerminatorCharacters:i.config.lineTerminatorCharacters,positionTracking:t.positionTracking,ensureOptimizations:t.ensureOptimizations,safeMode:t.safeMode,tracer:i.TRACE_INIT.bind(i)})}),i.patternIdxToConfig[g]=f.patternIdxToConfig,i.charCodeToPatternIdxToConfig[g]=f.charCodeToPatternIdxToConfig,i.emptyGroups=(0,nr.merge)(i.emptyGroups,f.emptyGroups),i.hasCustom=f.hasCustom||i.hasCustom,i.canModeBeOptimized[g]=f.canBeOptimized}})}),i.defaultMode=s.defaultMode,!(0,nr.isEmpty)(i.lexerDefinitionErrors)&&!i.config.deferDefinitionErrorsHandling){var l=(0,nr.map)(i.lexerDefinitionErrors,function(u){return u.message}),c=l.join(`----------------------- +`);throw new Error(`Errors detected in definition of Lexer: +`+c)}(0,nr.forEach)(i.lexerDefinitionWarning,function(u){(0,nr.PRINT_WARNING)(u.message)}),i.TRACE_INIT("Choosing sub-methods implementations",function(){if(Xs.SUPPORT_STICKY?(i.chopInput=nr.IDENTITY,i.match=i.matchWithTest):(i.updateLastIndex=nr.NOOP,i.match=i.matchWithExec),o&&(i.handleModes=nr.NOOP),i.trackStartLines===!1&&(i.computeNewColumn=nr.IDENTITY),i.trackEndLines===!1&&(i.updateTokenEndLineColumnLocation=nr.NOOP),/full/i.test(i.config.positionTracking))i.createTokenInstance=i.createFullToken;else if(/onlyStart/i.test(i.config.positionTracking))i.createTokenInstance=i.createStartOnlyToken;else if(/onlyOffset/i.test(i.config.positionTracking))i.createTokenInstance=i.createOffsetOnlyToken;else throw Error('Invalid config option: "'+i.config.positionTracking+'"');i.hasCustom?(i.addToken=i.addTokenUsingPush,i.handlePayload=i.handlePayloadWithCustom):(i.addToken=i.addTokenUsingMemberAccess,i.handlePayload=i.handlePayloadNoCustom)}),i.TRACE_INIT("Failed Optimization Warnings",function(){var u=(0,nr.reduce)(i.canModeBeOptimized,function(g,f,h){return f===!1&&g.push(h),g},[]);if(t.ensureOptimizations&&!(0,nr.isEmpty)(u))throw Error("Lexer Modes: < "+u.join(", ")+` > cannot be optimized. + Disable the "ensureOptimizations" lexer config flag to silently ignore this and run the lexer in an un-optimized mode. + Or inspect the console log for details on how to resolve these issues.`)}),i.TRACE_INIT("clearRegExpParserCache",function(){(0,GEe.clearRegExpParserCache)()}),i.TRACE_INIT("toFastProperties",function(){(0,nr.toFastProperties)(i)})})}return r.prototype.tokenize=function(e,t){if(t===void 0&&(t=this.defaultMode),!(0,nr.isEmpty)(this.lexerDefinitionErrors)){var i=(0,nr.map)(this.lexerDefinitionErrors,function(o){return o.message}),n=i.join(`----------------------- +`);throw new Error(`Unable to Tokenize because Errors detected in definition of Lexer: +`+n)}var s=this.tokenizeInternal(e,t);return s},r.prototype.tokenizeInternal=function(e,t){var i=this,n,s,o,a,l,c,u,g,f,h,p,C,y,B,v,D,L=e,H=L.length,j=0,$=0,V=this.hasCustom?0:Math.floor(e.length/10),W=new Array(V),_=[],A=this.trackStartLines?1:void 0,ae=this.trackStartLines?1:void 0,ge=(0,Xs.cloneEmptyGroups)(this.emptyGroups),re=this.trackStartLines,O=this.config.lineTerminatorsPattern,F=0,ue=[],he=[],ke=[],Fe=[];Object.freeze(Fe);var Ne=void 0;function oe(){return ue}function le(pr){var Ii=(0,Xs.charCodeToOptimizedIndex)(pr),es=he[Ii];return es===void 0?Fe:es}var we=function(pr){if(ke.length===1&&pr.tokenType.PUSH_MODE===void 0){var Ii=i.config.errorMessageProvider.buildUnableToPopLexerModeMessage(pr);_.push({offset:pr.startOffset,line:pr.startLine!==void 0?pr.startLine:void 0,column:pr.startColumn!==void 0?pr.startColumn:void 0,length:pr.image.length,message:Ii})}else{ke.pop();var es=(0,nr.last)(ke);ue=i.patternIdxToConfig[es],he=i.charCodeToPatternIdxToConfig[es],F=ue.length;var ua=i.canModeBeOptimized[es]&&i.config.safeMode===!1;he&&ua?Ne=le:Ne=oe}};function fe(pr){ke.push(pr),he=this.charCodeToPatternIdxToConfig[pr],ue=this.patternIdxToConfig[pr],F=ue.length,F=ue.length;var Ii=this.canModeBeOptimized[pr]&&this.config.safeMode===!1;he&&Ii?Ne=le:Ne=oe}fe.call(this,t);for(var Ae;jc.length){c=a,u=g,Ae=_e;break}}}break}}if(c!==null){if(f=c.length,h=Ae.group,h!==void 0&&(p=Ae.tokenTypeIdx,C=this.createTokenInstance(c,j,p,Ae.tokenType,A,ae,f),this.handlePayload(C,u),h===!1?$=this.addToken(W,$,C):ge[h].push(C)),e=this.chopInput(e,f),j=j+f,ae=this.computeNewColumn(ae,f),re===!0&&Ae.canLineTerminator===!0){var It=0,Or=void 0,ii=void 0;O.lastIndex=0;do Or=O.test(c),Or===!0&&(ii=O.lastIndex-1,It++);while(Or===!0);It!==0&&(A=A+It,ae=f-ii,this.updateTokenEndLineColumnLocation(C,h,ii,It,A,ae,f))}this.handleModes(Ae,we,fe,C)}else{for(var gi=j,hr=A,fi=ae,ni=!1;!ni&&j <"+e+">");var n=(0,nr.timer)(t),s=n.time,o=n.value,a=s>10?console.warn:console.log;return this.traceInitIndent time: "+s+"ms"),this.traceInitIndent--,o}else return t()},r.SKIPPED="This marks a skipped Token pattern, this means each token identified by it willbe consumed and then thrown into oblivion, this can be used to for example to completely ignore whitespace.",r.NA=/NOT_APPLICABLE/,r}();fc.Lexer=jEe});var NA=w(bi=>{"use strict";Object.defineProperty(bi,"__esModule",{value:!0});bi.tokenMatcher=bi.createTokenInstance=bi.EOF=bi.createToken=bi.hasTokenLabel=bi.tokenName=bi.tokenLabel=void 0;var Zs=Gt(),qEe=yd(),Hv=Vg();function JEe(r){return Ej(r)?r.LABEL:r.name}bi.tokenLabel=JEe;function WEe(r){return r.name}bi.tokenName=WEe;function Ej(r){return(0,Zs.isString)(r.LABEL)&&r.LABEL!==""}bi.hasTokenLabel=Ej;var zEe="parent",uj="categories",gj="label",fj="group",hj="push_mode",pj="pop_mode",dj="longer_alt",Cj="line_breaks",mj="start_chars_hint";function Ij(r){return VEe(r)}bi.createToken=Ij;function VEe(r){var e=r.pattern,t={};if(t.name=r.name,(0,Zs.isUndefined)(e)||(t.PATTERN=e),(0,Zs.has)(r,zEe))throw`The parent property is no longer supported. +See: https://github.com/chevrotain/chevrotain/issues/564#issuecomment-349062346 for details.`;return(0,Zs.has)(r,uj)&&(t.CATEGORIES=r[uj]),(0,Hv.augmentTokenTypes)([t]),(0,Zs.has)(r,gj)&&(t.LABEL=r[gj]),(0,Zs.has)(r,fj)&&(t.GROUP=r[fj]),(0,Zs.has)(r,pj)&&(t.POP_MODE=r[pj]),(0,Zs.has)(r,hj)&&(t.PUSH_MODE=r[hj]),(0,Zs.has)(r,dj)&&(t.LONGER_ALT=r[dj]),(0,Zs.has)(r,Cj)&&(t.LINE_BREAKS=r[Cj]),(0,Zs.has)(r,mj)&&(t.START_CHARS_HINT=r[mj]),t}bi.EOF=Ij({name:"EOF",pattern:qEe.Lexer.NA});(0,Hv.augmentTokenTypes)([bi.EOF]);function XEe(r,e,t,i,n,s,o,a){return{image:e,startOffset:t,endOffset:i,startLine:n,endLine:s,startColumn:o,endColumn:a,tokenTypeIdx:r.tokenTypeIdx,tokenType:r}}bi.createTokenInstance=XEe;function ZEe(r,e){return(0,Hv.tokenStructuredMatcher)(r,e)}bi.tokenMatcher=ZEe});var dn=w(zt=>{"use strict";var va=zt&&zt.__extends||function(){var r=function(e,t){return r=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},r(e,t)};return function(e,t){if(typeof t!="function"&&t!==null)throw new TypeError("Class extends value "+String(t)+" is not a constructor or null");r(e,t);function i(){this.constructor=e}e.prototype=t===null?Object.create(t):(i.prototype=t.prototype,new i)}}();Object.defineProperty(zt,"__esModule",{value:!0});zt.serializeProduction=zt.serializeGrammar=zt.Terminal=zt.Alternation=zt.RepetitionWithSeparator=zt.Repetition=zt.RepetitionMandatoryWithSeparator=zt.RepetitionMandatory=zt.Option=zt.Alternative=zt.Rule=zt.NonTerminal=zt.AbstractProduction=void 0;var Ar=Gt(),_Ee=NA(),ko=function(){function r(e){this._definition=e}return Object.defineProperty(r.prototype,"definition",{get:function(){return this._definition},set:function(e){this._definition=e},enumerable:!1,configurable:!0}),r.prototype.accept=function(e){e.visit(this),(0,Ar.forEach)(this.definition,function(t){t.accept(e)})},r}();zt.AbstractProduction=ko;var yj=function(r){va(e,r);function e(t){var i=r.call(this,[])||this;return i.idx=1,(0,Ar.assign)(i,(0,Ar.pick)(t,function(n){return n!==void 0})),i}return Object.defineProperty(e.prototype,"definition",{get:function(){return this.referencedRule!==void 0?this.referencedRule.definition:[]},set:function(t){},enumerable:!1,configurable:!0}),e.prototype.accept=function(t){t.visit(this)},e}(ko);zt.NonTerminal=yj;var wj=function(r){va(e,r);function e(t){var i=r.call(this,t.definition)||this;return i.orgText="",(0,Ar.assign)(i,(0,Ar.pick)(t,function(n){return n!==void 0})),i}return e}(ko);zt.Rule=wj;var Bj=function(r){va(e,r);function e(t){var i=r.call(this,t.definition)||this;return i.ignoreAmbiguities=!1,(0,Ar.assign)(i,(0,Ar.pick)(t,function(n){return n!==void 0})),i}return e}(ko);zt.Alternative=Bj;var Qj=function(r){va(e,r);function e(t){var i=r.call(this,t.definition)||this;return i.idx=1,(0,Ar.assign)(i,(0,Ar.pick)(t,function(n){return n!==void 0})),i}return e}(ko);zt.Option=Qj;var bj=function(r){va(e,r);function e(t){var i=r.call(this,t.definition)||this;return i.idx=1,(0,Ar.assign)(i,(0,Ar.pick)(t,function(n){return n!==void 0})),i}return e}(ko);zt.RepetitionMandatory=bj;var Sj=function(r){va(e,r);function e(t){var i=r.call(this,t.definition)||this;return i.idx=1,(0,Ar.assign)(i,(0,Ar.pick)(t,function(n){return n!==void 0})),i}return e}(ko);zt.RepetitionMandatoryWithSeparator=Sj;var vj=function(r){va(e,r);function e(t){var i=r.call(this,t.definition)||this;return i.idx=1,(0,Ar.assign)(i,(0,Ar.pick)(t,function(n){return n!==void 0})),i}return e}(ko);zt.Repetition=vj;var xj=function(r){va(e,r);function e(t){var i=r.call(this,t.definition)||this;return i.idx=1,(0,Ar.assign)(i,(0,Ar.pick)(t,function(n){return n!==void 0})),i}return e}(ko);zt.RepetitionWithSeparator=xj;var Pj=function(r){va(e,r);function e(t){var i=r.call(this,t.definition)||this;return i.idx=1,i.ignoreAmbiguities=!1,i.hasPredicates=!1,(0,Ar.assign)(i,(0,Ar.pick)(t,function(n){return n!==void 0})),i}return Object.defineProperty(e.prototype,"definition",{get:function(){return this._definition},set:function(t){this._definition=t},enumerable:!1,configurable:!0}),e}(ko);zt.Alternation=Pj;var ry=function(){function r(e){this.idx=1,(0,Ar.assign)(this,(0,Ar.pick)(e,function(t){return t!==void 0}))}return r.prototype.accept=function(e){e.visit(this)},r}();zt.Terminal=ry;function $Ee(r){return(0,Ar.map)(r,Bd)}zt.serializeGrammar=$Ee;function Bd(r){function e(s){return(0,Ar.map)(s,Bd)}if(r instanceof yj){var t={type:"NonTerminal",name:r.nonTerminalName,idx:r.idx};return(0,Ar.isString)(r.label)&&(t.label=r.label),t}else{if(r instanceof Bj)return{type:"Alternative",definition:e(r.definition)};if(r instanceof Qj)return{type:"Option",idx:r.idx,definition:e(r.definition)};if(r instanceof bj)return{type:"RepetitionMandatory",idx:r.idx,definition:e(r.definition)};if(r instanceof Sj)return{type:"RepetitionMandatoryWithSeparator",idx:r.idx,separator:Bd(new ry({terminalType:r.separator})),definition:e(r.definition)};if(r instanceof xj)return{type:"RepetitionWithSeparator",idx:r.idx,separator:Bd(new ry({terminalType:r.separator})),definition:e(r.definition)};if(r instanceof vj)return{type:"Repetition",idx:r.idx,definition:e(r.definition)};if(r instanceof Pj)return{type:"Alternation",idx:r.idx,definition:e(r.definition)};if(r instanceof ry){var i={type:"Terminal",name:r.terminalType.name,label:(0,_Ee.tokenLabel)(r.terminalType),idx:r.idx};(0,Ar.isString)(r.label)&&(i.terminalLabel=r.label);var n=r.terminalType.PATTERN;return r.terminalType.PATTERN&&(i.pattern=(0,Ar.isRegExp)(n)?n.source:n),i}else{if(r instanceof wj)return{type:"Rule",name:r.name,orgText:r.orgText,definition:e(r.definition)};throw Error("non exhaustive match")}}}zt.serializeProduction=Bd});var ny=w(iy=>{"use strict";Object.defineProperty(iy,"__esModule",{value:!0});iy.RestWalker=void 0;var Gv=Gt(),Cn=dn(),eIe=function(){function r(){}return r.prototype.walk=function(e,t){var i=this;t===void 0&&(t=[]),(0,Gv.forEach)(e.definition,function(n,s){var o=(0,Gv.drop)(e.definition,s+1);if(n instanceof Cn.NonTerminal)i.walkProdRef(n,o,t);else if(n instanceof Cn.Terminal)i.walkTerminal(n,o,t);else if(n instanceof Cn.Alternative)i.walkFlat(n,o,t);else if(n instanceof Cn.Option)i.walkOption(n,o,t);else if(n instanceof Cn.RepetitionMandatory)i.walkAtLeastOne(n,o,t);else if(n instanceof Cn.RepetitionMandatoryWithSeparator)i.walkAtLeastOneSep(n,o,t);else if(n instanceof Cn.RepetitionWithSeparator)i.walkManySep(n,o,t);else if(n instanceof Cn.Repetition)i.walkMany(n,o,t);else if(n instanceof Cn.Alternation)i.walkOr(n,o,t);else throw Error("non exhaustive match")})},r.prototype.walkTerminal=function(e,t,i){},r.prototype.walkProdRef=function(e,t,i){},r.prototype.walkFlat=function(e,t,i){var n=t.concat(i);this.walk(e,n)},r.prototype.walkOption=function(e,t,i){var n=t.concat(i);this.walk(e,n)},r.prototype.walkAtLeastOne=function(e,t,i){var n=[new Cn.Option({definition:e.definition})].concat(t,i);this.walk(e,n)},r.prototype.walkAtLeastOneSep=function(e,t,i){var n=Dj(e,t,i);this.walk(e,n)},r.prototype.walkMany=function(e,t,i){var n=[new Cn.Option({definition:e.definition})].concat(t,i);this.walk(e,n)},r.prototype.walkManySep=function(e,t,i){var n=Dj(e,t,i);this.walk(e,n)},r.prototype.walkOr=function(e,t,i){var n=this,s=t.concat(i);(0,Gv.forEach)(e.definition,function(o){var a=new Cn.Alternative({definition:[o]});n.walk(a,s)})},r}();iy.RestWalker=eIe;function Dj(r,e,t){var i=[new Cn.Option({definition:[new Cn.Terminal({terminalType:r.separator})].concat(r.definition)})],n=i.concat(e,t);return n}});var Xg=w(sy=>{"use strict";Object.defineProperty(sy,"__esModule",{value:!0});sy.GAstVisitor=void 0;var Ro=dn(),tIe=function(){function r(){}return r.prototype.visit=function(e){var t=e;switch(t.constructor){case Ro.NonTerminal:return this.visitNonTerminal(t);case Ro.Alternative:return this.visitAlternative(t);case Ro.Option:return this.visitOption(t);case Ro.RepetitionMandatory:return this.visitRepetitionMandatory(t);case Ro.RepetitionMandatoryWithSeparator:return this.visitRepetitionMandatoryWithSeparator(t);case Ro.RepetitionWithSeparator:return this.visitRepetitionWithSeparator(t);case Ro.Repetition:return this.visitRepetition(t);case Ro.Alternation:return this.visitAlternation(t);case Ro.Terminal:return this.visitTerminal(t);case Ro.Rule:return this.visitRule(t);default:throw Error("non exhaustive match")}},r.prototype.visitNonTerminal=function(e){},r.prototype.visitAlternative=function(e){},r.prototype.visitOption=function(e){},r.prototype.visitRepetition=function(e){},r.prototype.visitRepetitionMandatory=function(e){},r.prototype.visitRepetitionMandatoryWithSeparator=function(e){},r.prototype.visitRepetitionWithSeparator=function(e){},r.prototype.visitAlternation=function(e){},r.prototype.visitTerminal=function(e){},r.prototype.visitRule=function(e){},r}();sy.GAstVisitor=tIe});var bd=w(Mi=>{"use strict";var rIe=Mi&&Mi.__extends||function(){var r=function(e,t){return r=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},r(e,t)};return function(e,t){if(typeof t!="function"&&t!==null)throw new TypeError("Class extends value "+String(t)+" is not a constructor or null");r(e,t);function i(){this.constructor=e}e.prototype=t===null?Object.create(t):(i.prototype=t.prototype,new i)}}();Object.defineProperty(Mi,"__esModule",{value:!0});Mi.collectMethods=Mi.DslMethodsCollectorVisitor=Mi.getProductionDslName=Mi.isBranchingProd=Mi.isOptionalProd=Mi.isSequenceProd=void 0;var Qd=Gt(),Qr=dn(),iIe=Xg();function nIe(r){return r instanceof Qr.Alternative||r instanceof Qr.Option||r instanceof Qr.Repetition||r instanceof Qr.RepetitionMandatory||r instanceof Qr.RepetitionMandatoryWithSeparator||r instanceof Qr.RepetitionWithSeparator||r instanceof Qr.Terminal||r instanceof Qr.Rule}Mi.isSequenceProd=nIe;function Yv(r,e){e===void 0&&(e=[]);var t=r instanceof Qr.Option||r instanceof Qr.Repetition||r instanceof Qr.RepetitionWithSeparator;return t?!0:r instanceof Qr.Alternation?(0,Qd.some)(r.definition,function(i){return Yv(i,e)}):r instanceof Qr.NonTerminal&&(0,Qd.contains)(e,r)?!1:r instanceof Qr.AbstractProduction?(r instanceof Qr.NonTerminal&&e.push(r),(0,Qd.every)(r.definition,function(i){return Yv(i,e)})):!1}Mi.isOptionalProd=Yv;function sIe(r){return r instanceof Qr.Alternation}Mi.isBranchingProd=sIe;function oIe(r){if(r instanceof Qr.NonTerminal)return"SUBRULE";if(r instanceof Qr.Option)return"OPTION";if(r instanceof Qr.Alternation)return"OR";if(r instanceof Qr.RepetitionMandatory)return"AT_LEAST_ONE";if(r instanceof Qr.RepetitionMandatoryWithSeparator)return"AT_LEAST_ONE_SEP";if(r instanceof Qr.RepetitionWithSeparator)return"MANY_SEP";if(r instanceof Qr.Repetition)return"MANY";if(r instanceof Qr.Terminal)return"CONSUME";throw Error("non exhaustive match")}Mi.getProductionDslName=oIe;var kj=function(r){rIe(e,r);function e(){var t=r!==null&&r.apply(this,arguments)||this;return t.separator="-",t.dslMethods={option:[],alternation:[],repetition:[],repetitionWithSeparator:[],repetitionMandatory:[],repetitionMandatoryWithSeparator:[]},t}return e.prototype.reset=function(){this.dslMethods={option:[],alternation:[],repetition:[],repetitionWithSeparator:[],repetitionMandatory:[],repetitionMandatoryWithSeparator:[]}},e.prototype.visitTerminal=function(t){var i=t.terminalType.name+this.separator+"Terminal";(0,Qd.has)(this.dslMethods,i)||(this.dslMethods[i]=[]),this.dslMethods[i].push(t)},e.prototype.visitNonTerminal=function(t){var i=t.nonTerminalName+this.separator+"Terminal";(0,Qd.has)(this.dslMethods,i)||(this.dslMethods[i]=[]),this.dslMethods[i].push(t)},e.prototype.visitOption=function(t){this.dslMethods.option.push(t)},e.prototype.visitRepetitionWithSeparator=function(t){this.dslMethods.repetitionWithSeparator.push(t)},e.prototype.visitRepetitionMandatory=function(t){this.dslMethods.repetitionMandatory.push(t)},e.prototype.visitRepetitionMandatoryWithSeparator=function(t){this.dslMethods.repetitionMandatoryWithSeparator.push(t)},e.prototype.visitRepetition=function(t){this.dslMethods.repetition.push(t)},e.prototype.visitAlternation=function(t){this.dslMethods.alternation.push(t)},e}(iIe.GAstVisitor);Mi.DslMethodsCollectorVisitor=kj;var oy=new kj;function aIe(r){oy.reset(),r.accept(oy);var e=oy.dslMethods;return oy.reset(),e}Mi.collectMethods=aIe});var qv=w(Fo=>{"use strict";Object.defineProperty(Fo,"__esModule",{value:!0});Fo.firstForTerminal=Fo.firstForBranching=Fo.firstForSequence=Fo.first=void 0;var ay=Gt(),Rj=dn(),jv=bd();function Ay(r){if(r instanceof Rj.NonTerminal)return Ay(r.referencedRule);if(r instanceof Rj.Terminal)return Lj(r);if((0,jv.isSequenceProd)(r))return Fj(r);if((0,jv.isBranchingProd)(r))return Nj(r);throw Error("non exhaustive match")}Fo.first=Ay;function Fj(r){for(var e=[],t=r.definition,i=0,n=t.length>i,s,o=!0;n&&o;)s=t[i],o=(0,jv.isOptionalProd)(s),e=e.concat(Ay(s)),i=i+1,n=t.length>i;return(0,ay.uniq)(e)}Fo.firstForSequence=Fj;function Nj(r){var e=(0,ay.map)(r.definition,function(t){return Ay(t)});return(0,ay.uniq)((0,ay.flatten)(e))}Fo.firstForBranching=Nj;function Lj(r){return[r.terminalType]}Fo.firstForTerminal=Lj});var Jv=w(ly=>{"use strict";Object.defineProperty(ly,"__esModule",{value:!0});ly.IN=void 0;ly.IN="_~IN~_"});var Uj=w(us=>{"use strict";var AIe=us&&us.__extends||function(){var r=function(e,t){return r=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},r(e,t)};return function(e,t){if(typeof t!="function"&&t!==null)throw new TypeError("Class extends value "+String(t)+" is not a constructor or null");r(e,t);function i(){this.constructor=e}e.prototype=t===null?Object.create(t):(i.prototype=t.prototype,new i)}}();Object.defineProperty(us,"__esModule",{value:!0});us.buildInProdFollowPrefix=us.buildBetweenProdsFollowPrefix=us.computeAllProdsFollows=us.ResyncFollowsWalker=void 0;var lIe=ny(),cIe=qv(),Tj=Gt(),Oj=Jv(),uIe=dn(),Mj=function(r){AIe(e,r);function e(t){var i=r.call(this)||this;return i.topProd=t,i.follows={},i}return e.prototype.startWalking=function(){return this.walk(this.topProd),this.follows},e.prototype.walkTerminal=function(t,i,n){},e.prototype.walkProdRef=function(t,i,n){var s=Kj(t.referencedRule,t.idx)+this.topProd.name,o=i.concat(n),a=new uIe.Alternative({definition:o}),l=(0,cIe.first)(a);this.follows[s]=l},e}(lIe.RestWalker);us.ResyncFollowsWalker=Mj;function gIe(r){var e={};return(0,Tj.forEach)(r,function(t){var i=new Mj(t).startWalking();(0,Tj.assign)(e,i)}),e}us.computeAllProdsFollows=gIe;function Kj(r,e){return r.name+e+Oj.IN}us.buildBetweenProdsFollowPrefix=Kj;function fIe(r){var e=r.terminalType.name;return e+r.idx+Oj.IN}us.buildInProdFollowPrefix=fIe});var Sd=w(xa=>{"use strict";Object.defineProperty(xa,"__esModule",{value:!0});xa.defaultGrammarValidatorErrorProvider=xa.defaultGrammarResolverErrorProvider=xa.defaultParserErrorProvider=void 0;var Zg=NA(),hIe=Gt(),_s=Gt(),Wv=dn(),Hj=bd();xa.defaultParserErrorProvider={buildMismatchTokenMessage:function(r){var e=r.expected,t=r.actual,i=r.previous,n=r.ruleName,s=(0,Zg.hasTokenLabel)(e),o=s?"--> "+(0,Zg.tokenLabel)(e)+" <--":"token of type --> "+e.name+" <--",a="Expecting "+o+" but found --> '"+t.image+"' <--";return a},buildNotAllInputParsedMessage:function(r){var e=r.firstRedundant,t=r.ruleName;return"Redundant input, expecting EOF but found: "+e.image},buildNoViableAltMessage:function(r){var e=r.expectedPathsPerAlt,t=r.actual,i=r.previous,n=r.customUserDescription,s=r.ruleName,o="Expecting: ",a=(0,_s.first)(t).image,l=` +but found: '`+a+"'";if(n)return o+n+l;var c=(0,_s.reduce)(e,function(h,p){return h.concat(p)},[]),u=(0,_s.map)(c,function(h){return"["+(0,_s.map)(h,function(p){return(0,Zg.tokenLabel)(p)}).join(", ")+"]"}),g=(0,_s.map)(u,function(h,p){return" "+(p+1)+". "+h}),f=`one of these possible Token sequences: +`+g.join(` +`);return o+f+l},buildEarlyExitMessage:function(r){var e=r.expectedIterationPaths,t=r.actual,i=r.customUserDescription,n=r.ruleName,s="Expecting: ",o=(0,_s.first)(t).image,a=` +but found: '`+o+"'";if(i)return s+i+a;var l=(0,_s.map)(e,function(u){return"["+(0,_s.map)(u,function(g){return(0,Zg.tokenLabel)(g)}).join(",")+"]"}),c=`expecting at least one iteration which starts with one of these possible Token sequences:: + `+("<"+l.join(" ,")+">");return s+c+a}};Object.freeze(xa.defaultParserErrorProvider);xa.defaultGrammarResolverErrorProvider={buildRuleNotFoundError:function(r,e){var t="Invalid grammar, reference to a rule which is not defined: ->"+e.nonTerminalName+`<- +inside top level rule: ->`+r.name+"<-";return t}};xa.defaultGrammarValidatorErrorProvider={buildDuplicateFoundError:function(r,e){function t(u){return u instanceof Wv.Terminal?u.terminalType.name:u instanceof Wv.NonTerminal?u.nonTerminalName:""}var i=r.name,n=(0,_s.first)(e),s=n.idx,o=(0,Hj.getProductionDslName)(n),a=t(n),l=s>0,c="->"+o+(l?s:"")+"<- "+(a?"with argument: ->"+a+"<-":"")+` + appears more than once (`+e.length+" times) in the top level rule: ->"+i+`<-. + For further details see: https://chevrotain.io/docs/FAQ.html#NUMERICAL_SUFFIXES + `;return c=c.replace(/[ \t]+/g," "),c=c.replace(/\s\s+/g,` +`),c},buildNamespaceConflictError:function(r){var e=`Namespace conflict found in grammar. +`+("The grammar has both a Terminal(Token) and a Non-Terminal(Rule) named: <"+r.name+`>. +`)+`To resolve this make sure each Terminal and Non-Terminal names are unique +This is easy to accomplish by using the convention that Terminal names start with an uppercase letter +and Non-Terminal names start with a lower case letter.`;return e},buildAlternationPrefixAmbiguityError:function(r){var e=(0,_s.map)(r.prefixPath,function(n){return(0,Zg.tokenLabel)(n)}).join(", "),t=r.alternation.idx===0?"":r.alternation.idx,i="Ambiguous alternatives: <"+r.ambiguityIndices.join(" ,")+`> due to common lookahead prefix +`+("in inside <"+r.topLevelRule.name+`> Rule, +`)+("<"+e+`> may appears as a prefix path in all these alternatives. +`)+`See: https://chevrotain.io/docs/guide/resolving_grammar_errors.html#COMMON_PREFIX +For Further details.`;return i},buildAlternationAmbiguityError:function(r){var e=(0,_s.map)(r.prefixPath,function(n){return(0,Zg.tokenLabel)(n)}).join(", "),t=r.alternation.idx===0?"":r.alternation.idx,i="Ambiguous Alternatives Detected: <"+r.ambiguityIndices.join(" ,")+"> in "+(" inside <"+r.topLevelRule.name+`> Rule, +`)+("<"+e+`> may appears as a prefix path in all these alternatives. +`);return i=i+`See: https://chevrotain.io/docs/guide/resolving_grammar_errors.html#AMBIGUOUS_ALTERNATIVES +For Further details.`,i},buildEmptyRepetitionError:function(r){var e=(0,Hj.getProductionDslName)(r.repetition);r.repetition.idx!==0&&(e+=r.repetition.idx);var t="The repetition <"+e+"> within Rule <"+r.topLevelRule.name+`> can never consume any tokens. +This could lead to an infinite loop.`;return t},buildTokenNameError:function(r){return"deprecated"},buildEmptyAlternationError:function(r){var e="Ambiguous empty alternative: <"+(r.emptyChoiceIdx+1)+">"+(" in inside <"+r.topLevelRule.name+`> Rule. +`)+"Only the last alternative may be an empty alternative.";return e},buildTooManyAlternativesError:function(r){var e=`An Alternation cannot have more than 256 alternatives: +`+(" inside <"+r.topLevelRule.name+`> Rule. + has `+(r.alternation.definition.length+1)+" alternatives.");return e},buildLeftRecursionError:function(r){var e=r.topLevelRule.name,t=hIe.map(r.leftRecursionPath,function(s){return s.name}),i=e+" --> "+t.concat([e]).join(" --> "),n=`Left Recursion found in grammar. +`+("rule: <"+e+`> can be invoked from itself (directly or indirectly) +`)+(`without consuming any Tokens. The grammar path that causes this is: + `+i+` +`)+` To fix this refactor your grammar to remove the left recursion. +see: https://en.wikipedia.org/wiki/LL_parser#Left_Factoring.`;return n},buildInvalidRuleNameError:function(r){return"deprecated"},buildDuplicateRuleNameError:function(r){var e;r.topLevelRule instanceof Wv.Rule?e=r.topLevelRule.name:e=r.topLevelRule;var t="Duplicate definition, rule: ->"+e+"<- is already defined in the grammar: ->"+r.grammarName+"<-";return t}}});var jj=w(LA=>{"use strict";var pIe=LA&&LA.__extends||function(){var r=function(e,t){return r=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},r(e,t)};return function(e,t){if(typeof t!="function"&&t!==null)throw new TypeError("Class extends value "+String(t)+" is not a constructor or null");r(e,t);function i(){this.constructor=e}e.prototype=t===null?Object.create(t):(i.prototype=t.prototype,new i)}}();Object.defineProperty(LA,"__esModule",{value:!0});LA.GastRefResolverVisitor=LA.resolveGrammar=void 0;var dIe=Gn(),Gj=Gt(),CIe=Xg();function mIe(r,e){var t=new Yj(r,e);return t.resolveRefs(),t.errors}LA.resolveGrammar=mIe;var Yj=function(r){pIe(e,r);function e(t,i){var n=r.call(this)||this;return n.nameToTopRule=t,n.errMsgProvider=i,n.errors=[],n}return e.prototype.resolveRefs=function(){var t=this;(0,Gj.forEach)((0,Gj.values)(this.nameToTopRule),function(i){t.currTopLevel=i,i.accept(t)})},e.prototype.visitNonTerminal=function(t){var i=this.nameToTopRule[t.nonTerminalName];if(i)t.referencedRule=i;else{var n=this.errMsgProvider.buildRuleNotFoundError(this.currTopLevel,t);this.errors.push({message:n,type:dIe.ParserDefinitionErrorType.UNRESOLVED_SUBRULE_REF,ruleName:this.currTopLevel.name,unresolvedRefName:t.nonTerminalName})}},e}(CIe.GAstVisitor);LA.GastRefResolverVisitor=Yj});var xd=w(Nr=>{"use strict";var hc=Nr&&Nr.__extends||function(){var r=function(e,t){return r=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},r(e,t)};return function(e,t){if(typeof t!="function"&&t!==null)throw new TypeError("Class extends value "+String(t)+" is not a constructor or null");r(e,t);function i(){this.constructor=e}e.prototype=t===null?Object.create(t):(i.prototype=t.prototype,new i)}}();Object.defineProperty(Nr,"__esModule",{value:!0});Nr.nextPossibleTokensAfter=Nr.possiblePathsFrom=Nr.NextTerminalAfterAtLeastOneSepWalker=Nr.NextTerminalAfterAtLeastOneWalker=Nr.NextTerminalAfterManySepWalker=Nr.NextTerminalAfterManyWalker=Nr.AbstractNextTerminalAfterProductionWalker=Nr.NextAfterTokenWalker=Nr.AbstractNextPossibleTokensWalker=void 0;var qj=ny(),Kt=Gt(),EIe=qv(),kt=dn(),Jj=function(r){hc(e,r);function e(t,i){var n=r.call(this)||this;return n.topProd=t,n.path=i,n.possibleTokTypes=[],n.nextProductionName="",n.nextProductionOccurrence=0,n.found=!1,n.isAtEndOfPath=!1,n}return e.prototype.startWalking=function(){if(this.found=!1,this.path.ruleStack[0]!==this.topProd.name)throw Error("The path does not start with the walker's top Rule!");return this.ruleStack=(0,Kt.cloneArr)(this.path.ruleStack).reverse(),this.occurrenceStack=(0,Kt.cloneArr)(this.path.occurrenceStack).reverse(),this.ruleStack.pop(),this.occurrenceStack.pop(),this.updateExpectedNext(),this.walk(this.topProd),this.possibleTokTypes},e.prototype.walk=function(t,i){i===void 0&&(i=[]),this.found||r.prototype.walk.call(this,t,i)},e.prototype.walkProdRef=function(t,i,n){if(t.referencedRule.name===this.nextProductionName&&t.idx===this.nextProductionOccurrence){var s=i.concat(n);this.updateExpectedNext(),this.walk(t.referencedRule,s)}},e.prototype.updateExpectedNext=function(){(0,Kt.isEmpty)(this.ruleStack)?(this.nextProductionName="",this.nextProductionOccurrence=0,this.isAtEndOfPath=!0):(this.nextProductionName=this.ruleStack.pop(),this.nextProductionOccurrence=this.occurrenceStack.pop())},e}(qj.RestWalker);Nr.AbstractNextPossibleTokensWalker=Jj;var IIe=function(r){hc(e,r);function e(t,i){var n=r.call(this,t,i)||this;return n.path=i,n.nextTerminalName="",n.nextTerminalOccurrence=0,n.nextTerminalName=n.path.lastTok.name,n.nextTerminalOccurrence=n.path.lastTokOccurrence,n}return e.prototype.walkTerminal=function(t,i,n){if(this.isAtEndOfPath&&t.terminalType.name===this.nextTerminalName&&t.idx===this.nextTerminalOccurrence&&!this.found){var s=i.concat(n),o=new kt.Alternative({definition:s});this.possibleTokTypes=(0,EIe.first)(o),this.found=!0}},e}(Jj);Nr.NextAfterTokenWalker=IIe;var vd=function(r){hc(e,r);function e(t,i){var n=r.call(this)||this;return n.topRule=t,n.occurrence=i,n.result={token:void 0,occurrence:void 0,isEndOfRule:void 0},n}return e.prototype.startWalking=function(){return this.walk(this.topRule),this.result},e}(qj.RestWalker);Nr.AbstractNextTerminalAfterProductionWalker=vd;var yIe=function(r){hc(e,r);function e(){return r!==null&&r.apply(this,arguments)||this}return e.prototype.walkMany=function(t,i,n){if(t.idx===this.occurrence){var s=(0,Kt.first)(i.concat(n));this.result.isEndOfRule=s===void 0,s instanceof kt.Terminal&&(this.result.token=s.terminalType,this.result.occurrence=s.idx)}else r.prototype.walkMany.call(this,t,i,n)},e}(vd);Nr.NextTerminalAfterManyWalker=yIe;var wIe=function(r){hc(e,r);function e(){return r!==null&&r.apply(this,arguments)||this}return e.prototype.walkManySep=function(t,i,n){if(t.idx===this.occurrence){var s=(0,Kt.first)(i.concat(n));this.result.isEndOfRule=s===void 0,s instanceof kt.Terminal&&(this.result.token=s.terminalType,this.result.occurrence=s.idx)}else r.prototype.walkManySep.call(this,t,i,n)},e}(vd);Nr.NextTerminalAfterManySepWalker=wIe;var BIe=function(r){hc(e,r);function e(){return r!==null&&r.apply(this,arguments)||this}return e.prototype.walkAtLeastOne=function(t,i,n){if(t.idx===this.occurrence){var s=(0,Kt.first)(i.concat(n));this.result.isEndOfRule=s===void 0,s instanceof kt.Terminal&&(this.result.token=s.terminalType,this.result.occurrence=s.idx)}else r.prototype.walkAtLeastOne.call(this,t,i,n)},e}(vd);Nr.NextTerminalAfterAtLeastOneWalker=BIe;var QIe=function(r){hc(e,r);function e(){return r!==null&&r.apply(this,arguments)||this}return e.prototype.walkAtLeastOneSep=function(t,i,n){if(t.idx===this.occurrence){var s=(0,Kt.first)(i.concat(n));this.result.isEndOfRule=s===void 0,s instanceof kt.Terminal&&(this.result.token=s.terminalType,this.result.occurrence=s.idx)}else r.prototype.walkAtLeastOneSep.call(this,t,i,n)},e}(vd);Nr.NextTerminalAfterAtLeastOneSepWalker=QIe;function Wj(r,e,t){t===void 0&&(t=[]),t=(0,Kt.cloneArr)(t);var i=[],n=0;function s(c){return c.concat((0,Kt.drop)(r,n+1))}function o(c){var u=Wj(s(c),e,t);return i.concat(u)}for(;t.length=0;ge--){var re=B.definition[ge],O={idx:p,def:re.definition.concat((0,Kt.drop)(h)),ruleStack:C,occurrenceStack:y};g.push(O),g.push(o)}else if(B instanceof kt.Alternative)g.push({idx:p,def:B.definition.concat((0,Kt.drop)(h)),ruleStack:C,occurrenceStack:y});else if(B instanceof kt.Rule)g.push(SIe(B,p,C,y));else throw Error("non exhaustive match")}}return u}Nr.nextPossibleTokensAfter=bIe;function SIe(r,e,t,i){var n=(0,Kt.cloneArr)(t);n.push(r.name);var s=(0,Kt.cloneArr)(i);return s.push(1),{idx:e,def:r.definition,ruleStack:n,occurrenceStack:s}}});var Pd=w(Zt=>{"use strict";var Xj=Zt&&Zt.__extends||function(){var r=function(e,t){return r=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},r(e,t)};return function(e,t){if(typeof t!="function"&&t!==null)throw new TypeError("Class extends value "+String(t)+" is not a constructor or null");r(e,t);function i(){this.constructor=e}e.prototype=t===null?Object.create(t):(i.prototype=t.prototype,new i)}}();Object.defineProperty(Zt,"__esModule",{value:!0});Zt.areTokenCategoriesNotUsed=Zt.isStrictPrefixOfPath=Zt.containsPath=Zt.getLookaheadPathsForOptionalProd=Zt.getLookaheadPathsForOr=Zt.lookAheadSequenceFromAlternatives=Zt.buildSingleAlternativeLookaheadFunction=Zt.buildAlternativesLookAheadFunc=Zt.buildLookaheadFuncForOptionalProd=Zt.buildLookaheadFuncForOr=Zt.getProdType=Zt.PROD_TYPE=void 0;var sr=Gt(),zj=xd(),vIe=ny(),cy=Vg(),TA=dn(),xIe=Xg(),oi;(function(r){r[r.OPTION=0]="OPTION",r[r.REPETITION=1]="REPETITION",r[r.REPETITION_MANDATORY=2]="REPETITION_MANDATORY",r[r.REPETITION_MANDATORY_WITH_SEPARATOR=3]="REPETITION_MANDATORY_WITH_SEPARATOR",r[r.REPETITION_WITH_SEPARATOR=4]="REPETITION_WITH_SEPARATOR",r[r.ALTERNATION=5]="ALTERNATION"})(oi=Zt.PROD_TYPE||(Zt.PROD_TYPE={}));function PIe(r){if(r instanceof TA.Option)return oi.OPTION;if(r instanceof TA.Repetition)return oi.REPETITION;if(r instanceof TA.RepetitionMandatory)return oi.REPETITION_MANDATORY;if(r instanceof TA.RepetitionMandatoryWithSeparator)return oi.REPETITION_MANDATORY_WITH_SEPARATOR;if(r instanceof TA.RepetitionWithSeparator)return oi.REPETITION_WITH_SEPARATOR;if(r instanceof TA.Alternation)return oi.ALTERNATION;throw Error("non exhaustive match")}Zt.getProdType=PIe;function DIe(r,e,t,i,n,s){var o=_j(r,e,t),a=Xv(o)?cy.tokenStructuredMatcherNoCategories:cy.tokenStructuredMatcher;return s(o,i,a,n)}Zt.buildLookaheadFuncForOr=DIe;function kIe(r,e,t,i,n,s){var o=$j(r,e,n,t),a=Xv(o)?cy.tokenStructuredMatcherNoCategories:cy.tokenStructuredMatcher;return s(o[0],a,i)}Zt.buildLookaheadFuncForOptionalProd=kIe;function RIe(r,e,t,i){var n=r.length,s=(0,sr.every)(r,function(l){return(0,sr.every)(l,function(c){return c.length===1})});if(e)return function(l){for(var c=(0,sr.map)(l,function(D){return D.GATE}),u=0;u{"use strict";var Zv=Vt&&Vt.__extends||function(){var r=function(e,t){return r=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},r(e,t)};return function(e,t){if(typeof t!="function"&&t!==null)throw new TypeError("Class extends value "+String(t)+" is not a constructor or null");r(e,t);function i(){this.constructor=e}e.prototype=t===null?Object.create(t):(i.prototype=t.prototype,new i)}}();Object.defineProperty(Vt,"__esModule",{value:!0});Vt.checkPrefixAlternativesAmbiguities=Vt.validateSomeNonEmptyLookaheadPath=Vt.validateTooManyAlts=Vt.RepetionCollector=Vt.validateAmbiguousAlternationAlternatives=Vt.validateEmptyOrAlternative=Vt.getFirstNoneTerminal=Vt.validateNoLeftRecursion=Vt.validateRuleIsOverridden=Vt.validateRuleDoesNotAlreadyExist=Vt.OccurrenceValidationCollector=Vt.identifyProductionForDuplicates=Vt.validateGrammar=void 0;var er=Gt(),br=Gt(),No=Gn(),_v=bd(),_g=Pd(),OIe=xd(),$s=dn(),$v=Xg();function MIe(r,e,t,i,n){var s=er.map(r,function(h){return KIe(h,i)}),o=er.map(r,function(h){return ex(h,h,i)}),a=[],l=[],c=[];(0,br.every)(o,br.isEmpty)&&(a=(0,br.map)(r,function(h){return sq(h,i)}),l=(0,br.map)(r,function(h){return oq(h,e,i)}),c=lq(r,e,i));var u=GIe(r,t,i),g=(0,br.map)(r,function(h){return Aq(h,i)}),f=(0,br.map)(r,function(h){return nq(h,r,n,i)});return er.flatten(s.concat(c,o,a,l,u,g,f))}Vt.validateGrammar=MIe;function KIe(r,e){var t=new iq;r.accept(t);var i=t.allProductions,n=er.groupBy(i,tq),s=er.pick(n,function(a){return a.length>1}),o=er.map(er.values(s),function(a){var l=er.first(a),c=e.buildDuplicateFoundError(r,a),u=(0,_v.getProductionDslName)(l),g={message:c,type:No.ParserDefinitionErrorType.DUPLICATE_PRODUCTIONS,ruleName:r.name,dslName:u,occurrence:l.idx},f=rq(l);return f&&(g.parameter=f),g});return o}function tq(r){return(0,_v.getProductionDslName)(r)+"_#_"+r.idx+"_#_"+rq(r)}Vt.identifyProductionForDuplicates=tq;function rq(r){return r instanceof $s.Terminal?r.terminalType.name:r instanceof $s.NonTerminal?r.nonTerminalName:""}var iq=function(r){Zv(e,r);function e(){var t=r!==null&&r.apply(this,arguments)||this;return t.allProductions=[],t}return e.prototype.visitNonTerminal=function(t){this.allProductions.push(t)},e.prototype.visitOption=function(t){this.allProductions.push(t)},e.prototype.visitRepetitionWithSeparator=function(t){this.allProductions.push(t)},e.prototype.visitRepetitionMandatory=function(t){this.allProductions.push(t)},e.prototype.visitRepetitionMandatoryWithSeparator=function(t){this.allProductions.push(t)},e.prototype.visitRepetition=function(t){this.allProductions.push(t)},e.prototype.visitAlternation=function(t){this.allProductions.push(t)},e.prototype.visitTerminal=function(t){this.allProductions.push(t)},e}($v.GAstVisitor);Vt.OccurrenceValidationCollector=iq;function nq(r,e,t,i){var n=[],s=(0,br.reduce)(e,function(a,l){return l.name===r.name?a+1:a},0);if(s>1){var o=i.buildDuplicateRuleNameError({topLevelRule:r,grammarName:t});n.push({message:o,type:No.ParserDefinitionErrorType.DUPLICATE_RULE_NAME,ruleName:r.name})}return n}Vt.validateRuleDoesNotAlreadyExist=nq;function UIe(r,e,t){var i=[],n;return er.contains(e,r)||(n="Invalid rule override, rule: ->"+r+"<- cannot be overridden in the grammar: ->"+t+"<-as it is not defined in any of the super grammars ",i.push({message:n,type:No.ParserDefinitionErrorType.INVALID_RULE_OVERRIDE,ruleName:r})),i}Vt.validateRuleIsOverridden=UIe;function ex(r,e,t,i){i===void 0&&(i=[]);var n=[],s=Dd(e.definition);if(er.isEmpty(s))return[];var o=r.name,a=er.contains(s,r);a&&n.push({message:t.buildLeftRecursionError({topLevelRule:r,leftRecursionPath:i}),type:No.ParserDefinitionErrorType.LEFT_RECURSION,ruleName:o});var l=er.difference(s,i.concat([r])),c=er.map(l,function(u){var g=er.cloneArr(i);return g.push(u),ex(r,u,t,g)});return n.concat(er.flatten(c))}Vt.validateNoLeftRecursion=ex;function Dd(r){var e=[];if(er.isEmpty(r))return e;var t=er.first(r);if(t instanceof $s.NonTerminal)e.push(t.referencedRule);else if(t instanceof $s.Alternative||t instanceof $s.Option||t instanceof $s.RepetitionMandatory||t instanceof $s.RepetitionMandatoryWithSeparator||t instanceof $s.RepetitionWithSeparator||t instanceof $s.Repetition)e=e.concat(Dd(t.definition));else if(t instanceof $s.Alternation)e=er.flatten(er.map(t.definition,function(o){return Dd(o.definition)}));else if(!(t instanceof $s.Terminal))throw Error("non exhaustive match");var i=(0,_v.isOptionalProd)(t),n=r.length>1;if(i&&n){var s=er.drop(r);return e.concat(Dd(s))}else return e}Vt.getFirstNoneTerminal=Dd;var tx=function(r){Zv(e,r);function e(){var t=r!==null&&r.apply(this,arguments)||this;return t.alternations=[],t}return e.prototype.visitAlternation=function(t){this.alternations.push(t)},e}($v.GAstVisitor);function sq(r,e){var t=new tx;r.accept(t);var i=t.alternations,n=er.reduce(i,function(s,o){var a=er.dropRight(o.definition),l=er.map(a,function(c,u){var g=(0,OIe.nextPossibleTokensAfter)([c],[],null,1);return er.isEmpty(g)?{message:e.buildEmptyAlternationError({topLevelRule:r,alternation:o,emptyChoiceIdx:u}),type:No.ParserDefinitionErrorType.NONE_LAST_EMPTY_ALT,ruleName:r.name,occurrence:o.idx,alternative:u+1}:null});return s.concat(er.compact(l))},[]);return n}Vt.validateEmptyOrAlternative=sq;function oq(r,e,t){var i=new tx;r.accept(i);var n=i.alternations;n=(0,br.reject)(n,function(o){return o.ignoreAmbiguities===!0});var s=er.reduce(n,function(o,a){var l=a.idx,c=a.maxLookahead||e,u=(0,_g.getLookaheadPathsForOr)(l,r,c,a),g=HIe(u,a,r,t),f=cq(u,a,r,t);return o.concat(g,f)},[]);return s}Vt.validateAmbiguousAlternationAlternatives=oq;var aq=function(r){Zv(e,r);function e(){var t=r!==null&&r.apply(this,arguments)||this;return t.allProductions=[],t}return e.prototype.visitRepetitionWithSeparator=function(t){this.allProductions.push(t)},e.prototype.visitRepetitionMandatory=function(t){this.allProductions.push(t)},e.prototype.visitRepetitionMandatoryWithSeparator=function(t){this.allProductions.push(t)},e.prototype.visitRepetition=function(t){this.allProductions.push(t)},e}($v.GAstVisitor);Vt.RepetionCollector=aq;function Aq(r,e){var t=new tx;r.accept(t);var i=t.alternations,n=er.reduce(i,function(s,o){return o.definition.length>255&&s.push({message:e.buildTooManyAlternativesError({topLevelRule:r,alternation:o}),type:No.ParserDefinitionErrorType.TOO_MANY_ALTS,ruleName:r.name,occurrence:o.idx}),s},[]);return n}Vt.validateTooManyAlts=Aq;function lq(r,e,t){var i=[];return(0,br.forEach)(r,function(n){var s=new aq;n.accept(s);var o=s.allProductions;(0,br.forEach)(o,function(a){var l=(0,_g.getProdType)(a),c=a.maxLookahead||e,u=a.idx,g=(0,_g.getLookaheadPathsForOptionalProd)(u,n,l,c),f=g[0];if((0,br.isEmpty)((0,br.flatten)(f))){var h=t.buildEmptyRepetitionError({topLevelRule:n,repetition:a});i.push({message:h,type:No.ParserDefinitionErrorType.NO_NON_EMPTY_LOOKAHEAD,ruleName:n.name})}})}),i}Vt.validateSomeNonEmptyLookaheadPath=lq;function HIe(r,e,t,i){var n=[],s=(0,br.reduce)(r,function(a,l,c){return e.definition[c].ignoreAmbiguities===!0||(0,br.forEach)(l,function(u){var g=[c];(0,br.forEach)(r,function(f,h){c!==h&&(0,_g.containsPath)(f,u)&&e.definition[h].ignoreAmbiguities!==!0&&g.push(h)}),g.length>1&&!(0,_g.containsPath)(n,u)&&(n.push(u),a.push({alts:g,path:u}))}),a},[]),o=er.map(s,function(a){var l=(0,br.map)(a.alts,function(u){return u+1}),c=i.buildAlternationAmbiguityError({topLevelRule:t,alternation:e,ambiguityIndices:l,prefixPath:a.path});return{message:c,type:No.ParserDefinitionErrorType.AMBIGUOUS_ALTS,ruleName:t.name,occurrence:e.idx,alternatives:[a.alts]}});return o}function cq(r,e,t,i){var n=[],s=(0,br.reduce)(r,function(o,a,l){var c=(0,br.map)(a,function(u){return{idx:l,path:u}});return o.concat(c)},[]);return(0,br.forEach)(s,function(o){var a=e.definition[o.idx];if(a.ignoreAmbiguities!==!0){var l=o.idx,c=o.path,u=(0,br.findAll)(s,function(f){return e.definition[f.idx].ignoreAmbiguities!==!0&&f.idx{"use strict";Object.defineProperty($g,"__esModule",{value:!0});$g.validateGrammar=$g.resolveGrammar=void 0;var ix=Gt(),YIe=jj(),jIe=rx(),uq=Sd();function qIe(r){r=(0,ix.defaults)(r,{errMsgProvider:uq.defaultGrammarResolverErrorProvider});var e={};return(0,ix.forEach)(r.rules,function(t){e[t.name]=t}),(0,YIe.resolveGrammar)(e,r.errMsgProvider)}$g.resolveGrammar=qIe;function JIe(r){return r=(0,ix.defaults)(r,{errMsgProvider:uq.defaultGrammarValidatorErrorProvider}),(0,jIe.validateGrammar)(r.rules,r.maxLookahead,r.tokenTypes,r.errMsgProvider,r.grammarName)}$g.validateGrammar=JIe});var ef=w(mn=>{"use strict";var kd=mn&&mn.__extends||function(){var r=function(e,t){return r=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},r(e,t)};return function(e,t){if(typeof t!="function"&&t!==null)throw new TypeError("Class extends value "+String(t)+" is not a constructor or null");r(e,t);function i(){this.constructor=e}e.prototype=t===null?Object.create(t):(i.prototype=t.prototype,new i)}}();Object.defineProperty(mn,"__esModule",{value:!0});mn.EarlyExitException=mn.NotAllInputParsedException=mn.NoViableAltException=mn.MismatchedTokenException=mn.isRecognitionException=void 0;var WIe=Gt(),fq="MismatchedTokenException",hq="NoViableAltException",pq="EarlyExitException",dq="NotAllInputParsedException",Cq=[fq,hq,pq,dq];Object.freeze(Cq);function zIe(r){return(0,WIe.contains)(Cq,r.name)}mn.isRecognitionException=zIe;var uy=function(r){kd(e,r);function e(t,i){var n=this.constructor,s=r.call(this,t)||this;return s.token=i,s.resyncedTokens=[],Object.setPrototypeOf(s,n.prototype),Error.captureStackTrace&&Error.captureStackTrace(s,s.constructor),s}return e}(Error),VIe=function(r){kd(e,r);function e(t,i,n){var s=r.call(this,t,i)||this;return s.previousToken=n,s.name=fq,s}return e}(uy);mn.MismatchedTokenException=VIe;var XIe=function(r){kd(e,r);function e(t,i,n){var s=r.call(this,t,i)||this;return s.previousToken=n,s.name=hq,s}return e}(uy);mn.NoViableAltException=XIe;var ZIe=function(r){kd(e,r);function e(t,i){var n=r.call(this,t,i)||this;return n.name=dq,n}return e}(uy);mn.NotAllInputParsedException=ZIe;var _Ie=function(r){kd(e,r);function e(t,i,n){var s=r.call(this,t,i)||this;return s.previousToken=n,s.name=pq,s}return e}(uy);mn.EarlyExitException=_Ie});var sx=w(Ki=>{"use strict";Object.defineProperty(Ki,"__esModule",{value:!0});Ki.attemptInRepetitionRecovery=Ki.Recoverable=Ki.InRuleRecoveryException=Ki.IN_RULE_RECOVERY_EXCEPTION=Ki.EOF_FOLLOW_KEY=void 0;var gy=NA(),gs=Gt(),$Ie=ef(),eye=Jv(),tye=Gn();Ki.EOF_FOLLOW_KEY={};Ki.IN_RULE_RECOVERY_EXCEPTION="InRuleRecoveryException";function nx(r){this.name=Ki.IN_RULE_RECOVERY_EXCEPTION,this.message=r}Ki.InRuleRecoveryException=nx;nx.prototype=Error.prototype;var rye=function(){function r(){}return r.prototype.initRecoverable=function(e){this.firstAfterRepMap={},this.resyncFollows={},this.recoveryEnabled=(0,gs.has)(e,"recoveryEnabled")?e.recoveryEnabled:tye.DEFAULT_PARSER_CONFIG.recoveryEnabled,this.recoveryEnabled&&(this.attemptInRepetitionRecovery=mq)},r.prototype.getTokenToInsert=function(e){var t=(0,gy.createTokenInstance)(e,"",NaN,NaN,NaN,NaN,NaN,NaN);return t.isInsertedInRecovery=!0,t},r.prototype.canTokenTypeBeInsertedInRecovery=function(e){return!0},r.prototype.tryInRepetitionRecovery=function(e,t,i,n){for(var s=this,o=this.findReSyncTokenType(),a=this.exportLexerState(),l=[],c=!1,u=this.LA(1),g=this.LA(1),f=function(){var h=s.LA(0),p=s.errorMessageProvider.buildMismatchTokenMessage({expected:n,actual:u,previous:h,ruleName:s.getCurrRuleFullName()}),C=new $Ie.MismatchedTokenException(p,u,s.LA(0));C.resyncedTokens=(0,gs.dropRight)(l),s.SAVE_ERROR(C)};!c;)if(this.tokenMatcher(g,n)){f();return}else if(i.call(this)){f(),e.apply(this,t);return}else this.tokenMatcher(g,o)?c=!0:(g=this.SKIP_TOKEN(),this.addToResyncTokens(g,l));this.importLexerState(a)},r.prototype.shouldInRepetitionRecoveryBeTried=function(e,t,i){return!(i===!1||e===void 0||t===void 0||this.tokenMatcher(this.LA(1),e)||this.isBackTracking()||this.canPerformInRuleRecovery(e,this.getFollowsForInRuleRecovery(e,t)))},r.prototype.getFollowsForInRuleRecovery=function(e,t){var i=this.getCurrentGrammarPath(e,t),n=this.getNextPossibleTokenTypes(i);return n},r.prototype.tryInRuleRecovery=function(e,t){if(this.canRecoverWithSingleTokenInsertion(e,t)){var i=this.getTokenToInsert(e);return i}if(this.canRecoverWithSingleTokenDeletion(e)){var n=this.SKIP_TOKEN();return this.consumeToken(),n}throw new nx("sad sad panda")},r.prototype.canPerformInRuleRecovery=function(e,t){return this.canRecoverWithSingleTokenInsertion(e,t)||this.canRecoverWithSingleTokenDeletion(e)},r.prototype.canRecoverWithSingleTokenInsertion=function(e,t){var i=this;if(!this.canTokenTypeBeInsertedInRecovery(e)||(0,gs.isEmpty)(t))return!1;var n=this.LA(1),s=(0,gs.find)(t,function(o){return i.tokenMatcher(n,o)})!==void 0;return s},r.prototype.canRecoverWithSingleTokenDeletion=function(e){var t=this.tokenMatcher(this.LA(2),e);return t},r.prototype.isInCurrentRuleReSyncSet=function(e){var t=this.getCurrFollowKey(),i=this.getFollowSetFromFollowKey(t);return(0,gs.contains)(i,e)},r.prototype.findReSyncTokenType=function(){for(var e=this.flattenFollowSet(),t=this.LA(1),i=2;;){var n=t.tokenType;if((0,gs.contains)(e,n))return n;t=this.LA(i),i++}},r.prototype.getCurrFollowKey=function(){if(this.RULE_STACK.length===1)return Ki.EOF_FOLLOW_KEY;var e=this.getLastExplicitRuleShortName(),t=this.getLastExplicitRuleOccurrenceIndex(),i=this.getPreviousExplicitRuleShortName();return{ruleName:this.shortRuleNameToFullName(e),idxInCallingRule:t,inRule:this.shortRuleNameToFullName(i)}},r.prototype.buildFullFollowKeyStack=function(){var e=this,t=this.RULE_STACK,i=this.RULE_OCCURRENCE_STACK;return(0,gs.map)(t,function(n,s){return s===0?Ki.EOF_FOLLOW_KEY:{ruleName:e.shortRuleNameToFullName(n),idxInCallingRule:i[s],inRule:e.shortRuleNameToFullName(t[s-1])}})},r.prototype.flattenFollowSet=function(){var e=this,t=(0,gs.map)(this.buildFullFollowKeyStack(),function(i){return e.getFollowSetFromFollowKey(i)});return(0,gs.flatten)(t)},r.prototype.getFollowSetFromFollowKey=function(e){if(e===Ki.EOF_FOLLOW_KEY)return[gy.EOF];var t=e.ruleName+e.idxInCallingRule+eye.IN+e.inRule;return this.resyncFollows[t]},r.prototype.addToResyncTokens=function(e,t){return this.tokenMatcher(e,gy.EOF)||t.push(e),t},r.prototype.reSyncTo=function(e){for(var t=[],i=this.LA(1);this.tokenMatcher(i,e)===!1;)i=this.SKIP_TOKEN(),this.addToResyncTokens(i,t);return(0,gs.dropRight)(t)},r.prototype.attemptInRepetitionRecovery=function(e,t,i,n,s,o,a){},r.prototype.getCurrentGrammarPath=function(e,t){var i=this.getHumanReadableRuleStack(),n=(0,gs.cloneArr)(this.RULE_OCCURRENCE_STACK),s={ruleStack:i,occurrenceStack:n,lastTok:e,lastTokOccurrence:t};return s},r.prototype.getHumanReadableRuleStack=function(){var e=this;return(0,gs.map)(this.RULE_STACK,function(t){return e.shortRuleNameToFullName(t)})},r}();Ki.Recoverable=rye;function mq(r,e,t,i,n,s,o){var a=this.getKeyForAutomaticLookahead(i,n),l=this.firstAfterRepMap[a];if(l===void 0){var c=this.getCurrRuleFullName(),u=this.getGAstProductions()[c],g=new s(u,n);l=g.startWalking(),this.firstAfterRepMap[a]=l}var f=l.token,h=l.occurrence,p=l.isEndOfRule;this.RULE_STACK.length===1&&p&&f===void 0&&(f=gy.EOF,h=1),this.shouldInRepetitionRecoveryBeTried(f,h,o)&&this.tryInRepetitionRecovery(r,e,t,f)}Ki.attemptInRepetitionRecovery=mq});var fy=w(Jt=>{"use strict";Object.defineProperty(Jt,"__esModule",{value:!0});Jt.getKeyForAutomaticLookahead=Jt.AT_LEAST_ONE_SEP_IDX=Jt.MANY_SEP_IDX=Jt.AT_LEAST_ONE_IDX=Jt.MANY_IDX=Jt.OPTION_IDX=Jt.OR_IDX=Jt.BITS_FOR_ALT_IDX=Jt.BITS_FOR_RULE_IDX=Jt.BITS_FOR_OCCURRENCE_IDX=Jt.BITS_FOR_METHOD_TYPE=void 0;Jt.BITS_FOR_METHOD_TYPE=4;Jt.BITS_FOR_OCCURRENCE_IDX=8;Jt.BITS_FOR_RULE_IDX=12;Jt.BITS_FOR_ALT_IDX=8;Jt.OR_IDX=1<{"use strict";Object.defineProperty(hy,"__esModule",{value:!0});hy.LooksAhead=void 0;var Pa=Pd(),eo=Gt(),Eq=Gn(),Da=fy(),pc=bd(),nye=function(){function r(){}return r.prototype.initLooksAhead=function(e){this.dynamicTokensEnabled=(0,eo.has)(e,"dynamicTokensEnabled")?e.dynamicTokensEnabled:Eq.DEFAULT_PARSER_CONFIG.dynamicTokensEnabled,this.maxLookahead=(0,eo.has)(e,"maxLookahead")?e.maxLookahead:Eq.DEFAULT_PARSER_CONFIG.maxLookahead,this.lookAheadFuncsCache=(0,eo.isES2015MapSupported)()?new Map:[],(0,eo.isES2015MapSupported)()?(this.getLaFuncFromCache=this.getLaFuncFromMap,this.setLaFuncCache=this.setLaFuncCacheUsingMap):(this.getLaFuncFromCache=this.getLaFuncFromObj,this.setLaFuncCache=this.setLaFuncUsingObj)},r.prototype.preComputeLookaheadFunctions=function(e){var t=this;(0,eo.forEach)(e,function(i){t.TRACE_INIT(i.name+" Rule Lookahead",function(){var n=(0,pc.collectMethods)(i),s=n.alternation,o=n.repetition,a=n.option,l=n.repetitionMandatory,c=n.repetitionMandatoryWithSeparator,u=n.repetitionWithSeparator;(0,eo.forEach)(s,function(g){var f=g.idx===0?"":g.idx;t.TRACE_INIT(""+(0,pc.getProductionDslName)(g)+f,function(){var h=(0,Pa.buildLookaheadFuncForOr)(g.idx,i,g.maxLookahead||t.maxLookahead,g.hasPredicates,t.dynamicTokensEnabled,t.lookAheadBuilderForAlternatives),p=(0,Da.getKeyForAutomaticLookahead)(t.fullRuleNameToShort[i.name],Da.OR_IDX,g.idx);t.setLaFuncCache(p,h)})}),(0,eo.forEach)(o,function(g){t.computeLookaheadFunc(i,g.idx,Da.MANY_IDX,Pa.PROD_TYPE.REPETITION,g.maxLookahead,(0,pc.getProductionDslName)(g))}),(0,eo.forEach)(a,function(g){t.computeLookaheadFunc(i,g.idx,Da.OPTION_IDX,Pa.PROD_TYPE.OPTION,g.maxLookahead,(0,pc.getProductionDslName)(g))}),(0,eo.forEach)(l,function(g){t.computeLookaheadFunc(i,g.idx,Da.AT_LEAST_ONE_IDX,Pa.PROD_TYPE.REPETITION_MANDATORY,g.maxLookahead,(0,pc.getProductionDslName)(g))}),(0,eo.forEach)(c,function(g){t.computeLookaheadFunc(i,g.idx,Da.AT_LEAST_ONE_SEP_IDX,Pa.PROD_TYPE.REPETITION_MANDATORY_WITH_SEPARATOR,g.maxLookahead,(0,pc.getProductionDslName)(g))}),(0,eo.forEach)(u,function(g){t.computeLookaheadFunc(i,g.idx,Da.MANY_SEP_IDX,Pa.PROD_TYPE.REPETITION_WITH_SEPARATOR,g.maxLookahead,(0,pc.getProductionDslName)(g))})})})},r.prototype.computeLookaheadFunc=function(e,t,i,n,s,o){var a=this;this.TRACE_INIT(""+o+(t===0?"":t),function(){var l=(0,Pa.buildLookaheadFuncForOptionalProd)(t,e,s||a.maxLookahead,a.dynamicTokensEnabled,n,a.lookAheadBuilderForOptional),c=(0,Da.getKeyForAutomaticLookahead)(a.fullRuleNameToShort[e.name],i,t);a.setLaFuncCache(c,l)})},r.prototype.lookAheadBuilderForOptional=function(e,t,i){return(0,Pa.buildSingleAlternativeLookaheadFunction)(e,t,i)},r.prototype.lookAheadBuilderForAlternatives=function(e,t,i,n){return(0,Pa.buildAlternativesLookAheadFunc)(e,t,i,n)},r.prototype.getKeyForAutomaticLookahead=function(e,t){var i=this.getLastExplicitRuleShortName();return(0,Da.getKeyForAutomaticLookahead)(i,e,t)},r.prototype.getLaFuncFromCache=function(e){},r.prototype.getLaFuncFromMap=function(e){return this.lookAheadFuncsCache.get(e)},r.prototype.getLaFuncFromObj=function(e){return this.lookAheadFuncsCache[e]},r.prototype.setLaFuncCache=function(e,t){},r.prototype.setLaFuncCacheUsingMap=function(e,t){this.lookAheadFuncsCache.set(e,t)},r.prototype.setLaFuncUsingObj=function(e,t){this.lookAheadFuncsCache[e]=t},r}();hy.LooksAhead=nye});var yq=w(Lo=>{"use strict";Object.defineProperty(Lo,"__esModule",{value:!0});Lo.addNoneTerminalToCst=Lo.addTerminalToCst=Lo.setNodeLocationFull=Lo.setNodeLocationOnlyOffset=void 0;function sye(r,e){isNaN(r.startOffset)===!0?(r.startOffset=e.startOffset,r.endOffset=e.endOffset):r.endOffset{"use strict";Object.defineProperty(OA,"__esModule",{value:!0});OA.defineNameProp=OA.functionName=OA.classNameFromInstance=void 0;var lye=Gt();function cye(r){return Bq(r.constructor)}OA.classNameFromInstance=cye;var wq="name";function Bq(r){var e=r.name;return e||"anonymous"}OA.functionName=Bq;function uye(r,e){var t=Object.getOwnPropertyDescriptor(r,wq);return(0,lye.isUndefined)(t)||t.configurable?(Object.defineProperty(r,wq,{enumerable:!1,configurable:!0,writable:!1,value:e}),!0):!1}OA.defineNameProp=uye});var xq=w(Si=>{"use strict";Object.defineProperty(Si,"__esModule",{value:!0});Si.validateRedundantMethods=Si.validateMissingCstMethods=Si.validateVisitor=Si.CstVisitorDefinitionError=Si.createBaseVisitorConstructorWithDefaults=Si.createBaseSemanticVisitorConstructor=Si.defaultVisit=void 0;var fs=Gt(),Rd=ox();function Qq(r,e){for(var t=(0,fs.keys)(r),i=t.length,n=0;n: + `+(""+s.join(` + +`).replace(/\n/g,` + `)))}}};return t.prototype=i,t.prototype.constructor=t,t._RULE_NAMES=e,t}Si.createBaseSemanticVisitorConstructor=gye;function fye(r,e,t){var i=function(){};(0,Rd.defineNameProp)(i,r+"BaseSemanticsWithDefaults");var n=Object.create(t.prototype);return(0,fs.forEach)(e,function(s){n[s]=Qq}),i.prototype=n,i.prototype.constructor=i,i}Si.createBaseVisitorConstructorWithDefaults=fye;var ax;(function(r){r[r.REDUNDANT_METHOD=0]="REDUNDANT_METHOD",r[r.MISSING_METHOD=1]="MISSING_METHOD"})(ax=Si.CstVisitorDefinitionError||(Si.CstVisitorDefinitionError={}));function bq(r,e){var t=Sq(r,e),i=vq(r,e);return t.concat(i)}Si.validateVisitor=bq;function Sq(r,e){var t=(0,fs.map)(e,function(i){if(!(0,fs.isFunction)(r[i]))return{msg:"Missing visitor method: <"+i+"> on "+(0,Rd.functionName)(r.constructor)+" CST Visitor.",type:ax.MISSING_METHOD,methodName:i}});return(0,fs.compact)(t)}Si.validateMissingCstMethods=Sq;var hye=["constructor","visit","validateVisitor"];function vq(r,e){var t=[];for(var i in r)(0,fs.isFunction)(r[i])&&!(0,fs.contains)(hye,i)&&!(0,fs.contains)(e,i)&&t.push({msg:"Redundant visitor method: <"+i+"> on "+(0,Rd.functionName)(r.constructor)+` CST Visitor +There is no Grammar Rule corresponding to this method's name. +`,type:ax.REDUNDANT_METHOD,methodName:i});return t}Si.validateRedundantMethods=vq});var Dq=w(py=>{"use strict";Object.defineProperty(py,"__esModule",{value:!0});py.TreeBuilder=void 0;var tf=yq(),_r=Gt(),Pq=xq(),pye=Gn(),dye=function(){function r(){}return r.prototype.initTreeBuilder=function(e){if(this.CST_STACK=[],this.outputCst=e.outputCst,this.nodeLocationTracking=(0,_r.has)(e,"nodeLocationTracking")?e.nodeLocationTracking:pye.DEFAULT_PARSER_CONFIG.nodeLocationTracking,!this.outputCst)this.cstInvocationStateUpdate=_r.NOOP,this.cstFinallyStateUpdate=_r.NOOP,this.cstPostTerminal=_r.NOOP,this.cstPostNonTerminal=_r.NOOP,this.cstPostRule=_r.NOOP;else if(/full/i.test(this.nodeLocationTracking))this.recoveryEnabled?(this.setNodeLocationFromToken=tf.setNodeLocationFull,this.setNodeLocationFromNode=tf.setNodeLocationFull,this.cstPostRule=_r.NOOP,this.setInitialNodeLocation=this.setInitialNodeLocationFullRecovery):(this.setNodeLocationFromToken=_r.NOOP,this.setNodeLocationFromNode=_r.NOOP,this.cstPostRule=this.cstPostRuleFull,this.setInitialNodeLocation=this.setInitialNodeLocationFullRegular);else if(/onlyOffset/i.test(this.nodeLocationTracking))this.recoveryEnabled?(this.setNodeLocationFromToken=tf.setNodeLocationOnlyOffset,this.setNodeLocationFromNode=tf.setNodeLocationOnlyOffset,this.cstPostRule=_r.NOOP,this.setInitialNodeLocation=this.setInitialNodeLocationOnlyOffsetRecovery):(this.setNodeLocationFromToken=_r.NOOP,this.setNodeLocationFromNode=_r.NOOP,this.cstPostRule=this.cstPostRuleOnlyOffset,this.setInitialNodeLocation=this.setInitialNodeLocationOnlyOffsetRegular);else if(/none/i.test(this.nodeLocationTracking))this.setNodeLocationFromToken=_r.NOOP,this.setNodeLocationFromNode=_r.NOOP,this.cstPostRule=_r.NOOP,this.setInitialNodeLocation=_r.NOOP;else throw Error('Invalid config option: "'+e.nodeLocationTracking+'"')},r.prototype.setInitialNodeLocationOnlyOffsetRecovery=function(e){e.location={startOffset:NaN,endOffset:NaN}},r.prototype.setInitialNodeLocationOnlyOffsetRegular=function(e){e.location={startOffset:this.LA(1).startOffset,endOffset:NaN}},r.prototype.setInitialNodeLocationFullRecovery=function(e){e.location={startOffset:NaN,startLine:NaN,startColumn:NaN,endOffset:NaN,endLine:NaN,endColumn:NaN}},r.prototype.setInitialNodeLocationFullRegular=function(e){var t=this.LA(1);e.location={startOffset:t.startOffset,startLine:t.startLine,startColumn:t.startColumn,endOffset:NaN,endLine:NaN,endColumn:NaN}},r.prototype.cstInvocationStateUpdate=function(e,t){var i={name:e,children:{}};this.setInitialNodeLocation(i),this.CST_STACK.push(i)},r.prototype.cstFinallyStateUpdate=function(){this.CST_STACK.pop()},r.prototype.cstPostRuleFull=function(e){var t=this.LA(0),i=e.location;i.startOffset<=t.startOffset?(i.endOffset=t.endOffset,i.endLine=t.endLine,i.endColumn=t.endColumn):(i.startOffset=NaN,i.startLine=NaN,i.startColumn=NaN)},r.prototype.cstPostRuleOnlyOffset=function(e){var t=this.LA(0),i=e.location;i.startOffset<=t.startOffset?i.endOffset=t.endOffset:i.startOffset=NaN},r.prototype.cstPostTerminal=function(e,t){var i=this.CST_STACK[this.CST_STACK.length-1];(0,tf.addTerminalToCst)(i,t,e),this.setNodeLocationFromToken(i.location,t)},r.prototype.cstPostNonTerminal=function(e,t){var i=this.CST_STACK[this.CST_STACK.length-1];(0,tf.addNoneTerminalToCst)(i,t,e),this.setNodeLocationFromNode(i.location,e.location)},r.prototype.getBaseCstVisitorConstructor=function(){if((0,_r.isUndefined)(this.baseCstVisitorConstructor)){var e=(0,Pq.createBaseSemanticVisitorConstructor)(this.className,(0,_r.keys)(this.gastProductionsCache));return this.baseCstVisitorConstructor=e,e}return this.baseCstVisitorConstructor},r.prototype.getBaseCstVisitorConstructorWithDefaults=function(){if((0,_r.isUndefined)(this.baseCstVisitorWithDefaultsConstructor)){var e=(0,Pq.createBaseVisitorConstructorWithDefaults)(this.className,(0,_r.keys)(this.gastProductionsCache),this.getBaseCstVisitorConstructor());return this.baseCstVisitorWithDefaultsConstructor=e,e}return this.baseCstVisitorWithDefaultsConstructor},r.prototype.getLastExplicitRuleShortName=function(){var e=this.RULE_STACK;return e[e.length-1]},r.prototype.getPreviousExplicitRuleShortName=function(){var e=this.RULE_STACK;return e[e.length-2]},r.prototype.getLastExplicitRuleOccurrenceIndex=function(){var e=this.RULE_OCCURRENCE_STACK;return e[e.length-1]},r}();py.TreeBuilder=dye});var Rq=w(dy=>{"use strict";Object.defineProperty(dy,"__esModule",{value:!0});dy.LexerAdapter=void 0;var kq=Gn(),Cye=function(){function r(){}return r.prototype.initLexerAdapter=function(){this.tokVector=[],this.tokVectorLength=0,this.currIdx=-1},Object.defineProperty(r.prototype,"input",{get:function(){return this.tokVector},set:function(e){if(this.selfAnalysisDone!==!0)throw Error("Missing invocation at the end of the Parser's constructor.");this.reset(),this.tokVector=e,this.tokVectorLength=e.length},enumerable:!1,configurable:!0}),r.prototype.SKIP_TOKEN=function(){return this.currIdx<=this.tokVector.length-2?(this.consumeToken(),this.LA(1)):kq.END_OF_FILE},r.prototype.LA=function(e){var t=this.currIdx+e;return t<0||this.tokVectorLength<=t?kq.END_OF_FILE:this.tokVector[t]},r.prototype.consumeToken=function(){this.currIdx++},r.prototype.exportLexerState=function(){return this.currIdx},r.prototype.importLexerState=function(e){this.currIdx=e},r.prototype.resetLexerState=function(){this.currIdx=-1},r.prototype.moveToTerminatedState=function(){this.currIdx=this.tokVector.length-1},r.prototype.getLexerPosition=function(){return this.exportLexerState()},r}();dy.LexerAdapter=Cye});var Nq=w(Cy=>{"use strict";Object.defineProperty(Cy,"__esModule",{value:!0});Cy.RecognizerApi=void 0;var Fq=Gt(),mye=ef(),Ax=Gn(),Eye=Sd(),Iye=rx(),yye=dn(),wye=function(){function r(){}return r.prototype.ACTION=function(e){return e.call(this)},r.prototype.consume=function(e,t,i){return this.consumeInternal(t,e,i)},r.prototype.subrule=function(e,t,i){return this.subruleInternal(t,e,i)},r.prototype.option=function(e,t){return this.optionInternal(t,e)},r.prototype.or=function(e,t){return this.orInternal(t,e)},r.prototype.many=function(e,t){return this.manyInternal(e,t)},r.prototype.atLeastOne=function(e,t){return this.atLeastOneInternal(e,t)},r.prototype.CONSUME=function(e,t){return this.consumeInternal(e,0,t)},r.prototype.CONSUME1=function(e,t){return this.consumeInternal(e,1,t)},r.prototype.CONSUME2=function(e,t){return this.consumeInternal(e,2,t)},r.prototype.CONSUME3=function(e,t){return this.consumeInternal(e,3,t)},r.prototype.CONSUME4=function(e,t){return this.consumeInternal(e,4,t)},r.prototype.CONSUME5=function(e,t){return this.consumeInternal(e,5,t)},r.prototype.CONSUME6=function(e,t){return this.consumeInternal(e,6,t)},r.prototype.CONSUME7=function(e,t){return this.consumeInternal(e,7,t)},r.prototype.CONSUME8=function(e,t){return this.consumeInternal(e,8,t)},r.prototype.CONSUME9=function(e,t){return this.consumeInternal(e,9,t)},r.prototype.SUBRULE=function(e,t){return this.subruleInternal(e,0,t)},r.prototype.SUBRULE1=function(e,t){return this.subruleInternal(e,1,t)},r.prototype.SUBRULE2=function(e,t){return this.subruleInternal(e,2,t)},r.prototype.SUBRULE3=function(e,t){return this.subruleInternal(e,3,t)},r.prototype.SUBRULE4=function(e,t){return this.subruleInternal(e,4,t)},r.prototype.SUBRULE5=function(e,t){return this.subruleInternal(e,5,t)},r.prototype.SUBRULE6=function(e,t){return this.subruleInternal(e,6,t)},r.prototype.SUBRULE7=function(e,t){return this.subruleInternal(e,7,t)},r.prototype.SUBRULE8=function(e,t){return this.subruleInternal(e,8,t)},r.prototype.SUBRULE9=function(e,t){return this.subruleInternal(e,9,t)},r.prototype.OPTION=function(e){return this.optionInternal(e,0)},r.prototype.OPTION1=function(e){return this.optionInternal(e,1)},r.prototype.OPTION2=function(e){return this.optionInternal(e,2)},r.prototype.OPTION3=function(e){return this.optionInternal(e,3)},r.prototype.OPTION4=function(e){return this.optionInternal(e,4)},r.prototype.OPTION5=function(e){return this.optionInternal(e,5)},r.prototype.OPTION6=function(e){return this.optionInternal(e,6)},r.prototype.OPTION7=function(e){return this.optionInternal(e,7)},r.prototype.OPTION8=function(e){return this.optionInternal(e,8)},r.prototype.OPTION9=function(e){return this.optionInternal(e,9)},r.prototype.OR=function(e){return this.orInternal(e,0)},r.prototype.OR1=function(e){return this.orInternal(e,1)},r.prototype.OR2=function(e){return this.orInternal(e,2)},r.prototype.OR3=function(e){return this.orInternal(e,3)},r.prototype.OR4=function(e){return this.orInternal(e,4)},r.prototype.OR5=function(e){return this.orInternal(e,5)},r.prototype.OR6=function(e){return this.orInternal(e,6)},r.prototype.OR7=function(e){return this.orInternal(e,7)},r.prototype.OR8=function(e){return this.orInternal(e,8)},r.prototype.OR9=function(e){return this.orInternal(e,9)},r.prototype.MANY=function(e){this.manyInternal(0,e)},r.prototype.MANY1=function(e){this.manyInternal(1,e)},r.prototype.MANY2=function(e){this.manyInternal(2,e)},r.prototype.MANY3=function(e){this.manyInternal(3,e)},r.prototype.MANY4=function(e){this.manyInternal(4,e)},r.prototype.MANY5=function(e){this.manyInternal(5,e)},r.prototype.MANY6=function(e){this.manyInternal(6,e)},r.prototype.MANY7=function(e){this.manyInternal(7,e)},r.prototype.MANY8=function(e){this.manyInternal(8,e)},r.prototype.MANY9=function(e){this.manyInternal(9,e)},r.prototype.MANY_SEP=function(e){this.manySepFirstInternal(0,e)},r.prototype.MANY_SEP1=function(e){this.manySepFirstInternal(1,e)},r.prototype.MANY_SEP2=function(e){this.manySepFirstInternal(2,e)},r.prototype.MANY_SEP3=function(e){this.manySepFirstInternal(3,e)},r.prototype.MANY_SEP4=function(e){this.manySepFirstInternal(4,e)},r.prototype.MANY_SEP5=function(e){this.manySepFirstInternal(5,e)},r.prototype.MANY_SEP6=function(e){this.manySepFirstInternal(6,e)},r.prototype.MANY_SEP7=function(e){this.manySepFirstInternal(7,e)},r.prototype.MANY_SEP8=function(e){this.manySepFirstInternal(8,e)},r.prototype.MANY_SEP9=function(e){this.manySepFirstInternal(9,e)},r.prototype.AT_LEAST_ONE=function(e){this.atLeastOneInternal(0,e)},r.prototype.AT_LEAST_ONE1=function(e){return this.atLeastOneInternal(1,e)},r.prototype.AT_LEAST_ONE2=function(e){this.atLeastOneInternal(2,e)},r.prototype.AT_LEAST_ONE3=function(e){this.atLeastOneInternal(3,e)},r.prototype.AT_LEAST_ONE4=function(e){this.atLeastOneInternal(4,e)},r.prototype.AT_LEAST_ONE5=function(e){this.atLeastOneInternal(5,e)},r.prototype.AT_LEAST_ONE6=function(e){this.atLeastOneInternal(6,e)},r.prototype.AT_LEAST_ONE7=function(e){this.atLeastOneInternal(7,e)},r.prototype.AT_LEAST_ONE8=function(e){this.atLeastOneInternal(8,e)},r.prototype.AT_LEAST_ONE9=function(e){this.atLeastOneInternal(9,e)},r.prototype.AT_LEAST_ONE_SEP=function(e){this.atLeastOneSepFirstInternal(0,e)},r.prototype.AT_LEAST_ONE_SEP1=function(e){this.atLeastOneSepFirstInternal(1,e)},r.prototype.AT_LEAST_ONE_SEP2=function(e){this.atLeastOneSepFirstInternal(2,e)},r.prototype.AT_LEAST_ONE_SEP3=function(e){this.atLeastOneSepFirstInternal(3,e)},r.prototype.AT_LEAST_ONE_SEP4=function(e){this.atLeastOneSepFirstInternal(4,e)},r.prototype.AT_LEAST_ONE_SEP5=function(e){this.atLeastOneSepFirstInternal(5,e)},r.prototype.AT_LEAST_ONE_SEP6=function(e){this.atLeastOneSepFirstInternal(6,e)},r.prototype.AT_LEAST_ONE_SEP7=function(e){this.atLeastOneSepFirstInternal(7,e)},r.prototype.AT_LEAST_ONE_SEP8=function(e){this.atLeastOneSepFirstInternal(8,e)},r.prototype.AT_LEAST_ONE_SEP9=function(e){this.atLeastOneSepFirstInternal(9,e)},r.prototype.RULE=function(e,t,i){if(i===void 0&&(i=Ax.DEFAULT_RULE_CONFIG),(0,Fq.contains)(this.definedRulesNames,e)){var n=Eye.defaultGrammarValidatorErrorProvider.buildDuplicateRuleNameError({topLevelRule:e,grammarName:this.className}),s={message:n,type:Ax.ParserDefinitionErrorType.DUPLICATE_RULE_NAME,ruleName:e};this.definitionErrors.push(s)}this.definedRulesNames.push(e);var o=this.defineRule(e,t,i);return this[e]=o,o},r.prototype.OVERRIDE_RULE=function(e,t,i){i===void 0&&(i=Ax.DEFAULT_RULE_CONFIG);var n=[];n=n.concat((0,Iye.validateRuleIsOverridden)(e,this.definedRulesNames,this.className)),this.definitionErrors=this.definitionErrors.concat(n);var s=this.defineRule(e,t,i);return this[e]=s,s},r.prototype.BACKTRACK=function(e,t){return function(){this.isBackTrackingStack.push(1);var i=this.saveRecogState();try{return e.apply(this,t),!0}catch(n){if((0,mye.isRecognitionException)(n))return!1;throw n}finally{this.reloadRecogState(i),this.isBackTrackingStack.pop()}}},r.prototype.getGAstProductions=function(){return this.gastProductionsCache},r.prototype.getSerializedGastProductions=function(){return(0,yye.serializeGrammar)((0,Fq.values)(this.gastProductionsCache))},r}();Cy.RecognizerApi=wye});var Mq=w(Ey=>{"use strict";Object.defineProperty(Ey,"__esModule",{value:!0});Ey.RecognizerEngine=void 0;var Pr=Gt(),Yn=fy(),my=ef(),Lq=Pd(),rf=xd(),Tq=Gn(),Bye=sx(),Oq=NA(),Fd=Vg(),Qye=ox(),bye=function(){function r(){}return r.prototype.initRecognizerEngine=function(e,t){if(this.className=(0,Qye.classNameFromInstance)(this),this.shortRuleNameToFull={},this.fullRuleNameToShort={},this.ruleShortNameIdx=256,this.tokenMatcher=Fd.tokenStructuredMatcherNoCategories,this.definedRulesNames=[],this.tokensMap={},this.isBackTrackingStack=[],this.RULE_STACK=[],this.RULE_OCCURRENCE_STACK=[],this.gastProductionsCache={},(0,Pr.has)(t,"serializedGrammar"))throw Error(`The Parser's configuration can no longer contain a property. + See: https://chevrotain.io/docs/changes/BREAKING_CHANGES.html#_6-0-0 + For Further details.`);if((0,Pr.isArray)(e)){if((0,Pr.isEmpty)(e))throw Error(`A Token Vocabulary cannot be empty. + Note that the first argument for the parser constructor + is no longer a Token vector (since v4.0).`);if(typeof e[0].startOffset=="number")throw Error(`The Parser constructor no longer accepts a token vector as the first argument. + See: https://chevrotain.io/docs/changes/BREAKING_CHANGES.html#_4-0-0 + For Further details.`)}if((0,Pr.isArray)(e))this.tokensMap=(0,Pr.reduce)(e,function(o,a){return o[a.name]=a,o},{});else if((0,Pr.has)(e,"modes")&&(0,Pr.every)((0,Pr.flatten)((0,Pr.values)(e.modes)),Fd.isTokenType)){var i=(0,Pr.flatten)((0,Pr.values)(e.modes)),n=(0,Pr.uniq)(i);this.tokensMap=(0,Pr.reduce)(n,function(o,a){return o[a.name]=a,o},{})}else if((0,Pr.isObject)(e))this.tokensMap=(0,Pr.cloneObj)(e);else throw new Error(" argument must be An Array of Token constructors, A dictionary of Token constructors or an IMultiModeLexerDefinition");this.tokensMap.EOF=Oq.EOF;var s=(0,Pr.every)((0,Pr.values)(e),function(o){return(0,Pr.isEmpty)(o.categoryMatches)});this.tokenMatcher=s?Fd.tokenStructuredMatcherNoCategories:Fd.tokenStructuredMatcher,(0,Fd.augmentTokenTypes)((0,Pr.values)(this.tokensMap))},r.prototype.defineRule=function(e,t,i){if(this.selfAnalysisDone)throw Error("Grammar rule <"+e+`> may not be defined after the 'performSelfAnalysis' method has been called' +Make sure that all grammar rule definitions are done before 'performSelfAnalysis' is called.`);var n=(0,Pr.has)(i,"resyncEnabled")?i.resyncEnabled:Tq.DEFAULT_RULE_CONFIG.resyncEnabled,s=(0,Pr.has)(i,"recoveryValueFunc")?i.recoveryValueFunc:Tq.DEFAULT_RULE_CONFIG.recoveryValueFunc,o=this.ruleShortNameIdx<t},r.prototype.orInternal=function(e,t){var i=this.getKeyForAutomaticLookahead(Yn.OR_IDX,t),n=(0,Pr.isArray)(e)?e:e.DEF,s=this.getLaFuncFromCache(i),o=s.call(this,n);if(o!==void 0){var a=n[o];return a.ALT.call(this)}this.raiseNoAltException(t,e.ERR_MSG)},r.prototype.ruleFinallyStateUpdate=function(){if(this.RULE_STACK.pop(),this.RULE_OCCURRENCE_STACK.pop(),this.cstFinallyStateUpdate(),this.RULE_STACK.length===0&&this.isAtEndOfInput()===!1){var e=this.LA(1),t=this.errorMessageProvider.buildNotAllInputParsedMessage({firstRedundant:e,ruleName:this.getCurrRuleFullName()});this.SAVE_ERROR(new my.NotAllInputParsedException(t,e))}},r.prototype.subruleInternal=function(e,t,i){var n;try{var s=i!==void 0?i.ARGS:void 0;return n=e.call(this,t,s),this.cstPostNonTerminal(n,i!==void 0&&i.LABEL!==void 0?i.LABEL:e.ruleName),n}catch(o){this.subruleInternalError(o,i,e.ruleName)}},r.prototype.subruleInternalError=function(e,t,i){throw(0,my.isRecognitionException)(e)&&e.partialCstResult!==void 0&&(this.cstPostNonTerminal(e.partialCstResult,t!==void 0&&t.LABEL!==void 0?t.LABEL:i),delete e.partialCstResult),e},r.prototype.consumeInternal=function(e,t,i){var n;try{var s=this.LA(1);this.tokenMatcher(s,e)===!0?(this.consumeToken(),n=s):this.consumeInternalError(e,s,i)}catch(o){n=this.consumeInternalRecovery(e,t,o)}return this.cstPostTerminal(i!==void 0&&i.LABEL!==void 0?i.LABEL:e.name,n),n},r.prototype.consumeInternalError=function(e,t,i){var n,s=this.LA(0);throw i!==void 0&&i.ERR_MSG?n=i.ERR_MSG:n=this.errorMessageProvider.buildMismatchTokenMessage({expected:e,actual:t,previous:s,ruleName:this.getCurrRuleFullName()}),this.SAVE_ERROR(new my.MismatchedTokenException(n,t,s))},r.prototype.consumeInternalRecovery=function(e,t,i){if(this.recoveryEnabled&&i.name==="MismatchedTokenException"&&!this.isBackTracking()){var n=this.getFollowsForInRuleRecovery(e,t);try{return this.tryInRuleRecovery(e,n)}catch(s){throw s.name===Bye.IN_RULE_RECOVERY_EXCEPTION?i:s}}else throw i},r.prototype.saveRecogState=function(){var e=this.errors,t=(0,Pr.cloneArr)(this.RULE_STACK);return{errors:e,lexerState:this.exportLexerState(),RULE_STACK:t,CST_STACK:this.CST_STACK}},r.prototype.reloadRecogState=function(e){this.errors=e.errors,this.importLexerState(e.lexerState),this.RULE_STACK=e.RULE_STACK},r.prototype.ruleInvocationStateUpdate=function(e,t,i){this.RULE_OCCURRENCE_STACK.push(i),this.RULE_STACK.push(e),this.cstInvocationStateUpdate(t,e)},r.prototype.isBackTracking=function(){return this.isBackTrackingStack.length!==0},r.prototype.getCurrRuleFullName=function(){var e=this.getLastExplicitRuleShortName();return this.shortRuleNameToFull[e]},r.prototype.shortRuleNameToFullName=function(e){return this.shortRuleNameToFull[e]},r.prototype.isAtEndOfInput=function(){return this.tokenMatcher(this.LA(1),Oq.EOF)},r.prototype.reset=function(){this.resetLexerState(),this.isBackTrackingStack=[],this.errors=[],this.RULE_STACK=[],this.CST_STACK=[],this.RULE_OCCURRENCE_STACK=[]},r}();Ey.RecognizerEngine=bye});var Uq=w(Iy=>{"use strict";Object.defineProperty(Iy,"__esModule",{value:!0});Iy.ErrorHandler=void 0;var lx=ef(),cx=Gt(),Kq=Pd(),Sye=Gn(),vye=function(){function r(){}return r.prototype.initErrorHandler=function(e){this._errors=[],this.errorMessageProvider=(0,cx.has)(e,"errorMessageProvider")?e.errorMessageProvider:Sye.DEFAULT_PARSER_CONFIG.errorMessageProvider},r.prototype.SAVE_ERROR=function(e){if((0,lx.isRecognitionException)(e))return e.context={ruleStack:this.getHumanReadableRuleStack(),ruleOccurrenceStack:(0,cx.cloneArr)(this.RULE_OCCURRENCE_STACK)},this._errors.push(e),e;throw Error("Trying to save an Error which is not a RecognitionException")},Object.defineProperty(r.prototype,"errors",{get:function(){return(0,cx.cloneArr)(this._errors)},set:function(e){this._errors=e},enumerable:!1,configurable:!0}),r.prototype.raiseEarlyExitException=function(e,t,i){for(var n=this.getCurrRuleFullName(),s=this.getGAstProductions()[n],o=(0,Kq.getLookaheadPathsForOptionalProd)(e,s,t,this.maxLookahead),a=o[0],l=[],c=1;c<=this.maxLookahead;c++)l.push(this.LA(c));var u=this.errorMessageProvider.buildEarlyExitMessage({expectedIterationPaths:a,actual:l,previous:this.LA(0),customUserDescription:i,ruleName:n});throw this.SAVE_ERROR(new lx.EarlyExitException(u,this.LA(1),this.LA(0)))},r.prototype.raiseNoAltException=function(e,t){for(var i=this.getCurrRuleFullName(),n=this.getGAstProductions()[i],s=(0,Kq.getLookaheadPathsForOr)(e,n,this.maxLookahead),o=[],a=1;a<=this.maxLookahead;a++)o.push(this.LA(a));var l=this.LA(0),c=this.errorMessageProvider.buildNoViableAltMessage({expectedPathsPerAlt:s,actual:o,previous:l,customUserDescription:t,ruleName:this.getCurrRuleFullName()});throw this.SAVE_ERROR(new lx.NoViableAltException(c,this.LA(1),l))},r}();Iy.ErrorHandler=vye});var Yq=w(yy=>{"use strict";Object.defineProperty(yy,"__esModule",{value:!0});yy.ContentAssist=void 0;var Hq=xd(),Gq=Gt(),xye=function(){function r(){}return r.prototype.initContentAssist=function(){},r.prototype.computeContentAssist=function(e,t){var i=this.gastProductionsCache[e];if((0,Gq.isUndefined)(i))throw Error("Rule ->"+e+"<- does not exist in this grammar.");return(0,Hq.nextPossibleTokensAfter)([i],t,this.tokenMatcher,this.maxLookahead)},r.prototype.getNextPossibleTokenTypes=function(e){var t=(0,Gq.first)(e.ruleStack),i=this.getGAstProductions(),n=i[t],s=new Hq.NextAfterTokenWalker(n,e).startWalking();return s},r}();yy.ContentAssist=xye});var Zq=w(Qy=>{"use strict";Object.defineProperty(Qy,"__esModule",{value:!0});Qy.GastRecorder=void 0;var En=Gt(),To=dn(),Pye=yd(),Wq=Vg(),zq=NA(),Dye=Gn(),kye=fy(),By={description:"This Object indicates the Parser is during Recording Phase"};Object.freeze(By);var jq=!0,qq=Math.pow(2,kye.BITS_FOR_OCCURRENCE_IDX)-1,Vq=(0,zq.createToken)({name:"RECORDING_PHASE_TOKEN",pattern:Pye.Lexer.NA});(0,Wq.augmentTokenTypes)([Vq]);var Xq=(0,zq.createTokenInstance)(Vq,`This IToken indicates the Parser is in Recording Phase + See: https://chevrotain.io/docs/guide/internals.html#grammar-recording for details`,-1,-1,-1,-1,-1,-1);Object.freeze(Xq);var Rye={name:`This CSTNode indicates the Parser is in Recording Phase + See: https://chevrotain.io/docs/guide/internals.html#grammar-recording for details`,children:{}},Fye=function(){function r(){}return r.prototype.initGastRecorder=function(e){this.recordingProdStack=[],this.RECORDING_PHASE=!1},r.prototype.enableRecording=function(){var e=this;this.RECORDING_PHASE=!0,this.TRACE_INIT("Enable Recording",function(){for(var t=function(n){var s=n>0?n:"";e["CONSUME"+s]=function(o,a){return this.consumeInternalRecord(o,n,a)},e["SUBRULE"+s]=function(o,a){return this.subruleInternalRecord(o,n,a)},e["OPTION"+s]=function(o){return this.optionInternalRecord(o,n)},e["OR"+s]=function(o){return this.orInternalRecord(o,n)},e["MANY"+s]=function(o){this.manyInternalRecord(n,o)},e["MANY_SEP"+s]=function(o){this.manySepFirstInternalRecord(n,o)},e["AT_LEAST_ONE"+s]=function(o){this.atLeastOneInternalRecord(n,o)},e["AT_LEAST_ONE_SEP"+s]=function(o){this.atLeastOneSepFirstInternalRecord(n,o)}},i=0;i<10;i++)t(i);e.consume=function(n,s,o){return this.consumeInternalRecord(s,n,o)},e.subrule=function(n,s,o){return this.subruleInternalRecord(s,n,o)},e.option=function(n,s){return this.optionInternalRecord(s,n)},e.or=function(n,s){return this.orInternalRecord(s,n)},e.many=function(n,s){this.manyInternalRecord(n,s)},e.atLeastOne=function(n,s){this.atLeastOneInternalRecord(n,s)},e.ACTION=e.ACTION_RECORD,e.BACKTRACK=e.BACKTRACK_RECORD,e.LA=e.LA_RECORD})},r.prototype.disableRecording=function(){var e=this;this.RECORDING_PHASE=!1,this.TRACE_INIT("Deleting Recording methods",function(){for(var t=0;t<10;t++){var i=t>0?t:"";delete e["CONSUME"+i],delete e["SUBRULE"+i],delete e["OPTION"+i],delete e["OR"+i],delete e["MANY"+i],delete e["MANY_SEP"+i],delete e["AT_LEAST_ONE"+i],delete e["AT_LEAST_ONE_SEP"+i]}delete e.consume,delete e.subrule,delete e.option,delete e.or,delete e.many,delete e.atLeastOne,delete e.ACTION,delete e.BACKTRACK,delete e.LA})},r.prototype.ACTION_RECORD=function(e){},r.prototype.BACKTRACK_RECORD=function(e,t){return function(){return!0}},r.prototype.LA_RECORD=function(e){return Dye.END_OF_FILE},r.prototype.topLevelRuleRecord=function(e,t){try{var i=new To.Rule({definition:[],name:e});return i.name=e,this.recordingProdStack.push(i),t.call(this),this.recordingProdStack.pop(),i}catch(n){if(n.KNOWN_RECORDER_ERROR!==!0)try{n.message=n.message+` + This error was thrown during the "grammar recording phase" For more info see: + https://chevrotain.io/docs/guide/internals.html#grammar-recording`}catch{throw n}throw n}},r.prototype.optionInternalRecord=function(e,t){return Nd.call(this,To.Option,e,t)},r.prototype.atLeastOneInternalRecord=function(e,t){Nd.call(this,To.RepetitionMandatory,t,e)},r.prototype.atLeastOneSepFirstInternalRecord=function(e,t){Nd.call(this,To.RepetitionMandatoryWithSeparator,t,e,jq)},r.prototype.manyInternalRecord=function(e,t){Nd.call(this,To.Repetition,t,e)},r.prototype.manySepFirstInternalRecord=function(e,t){Nd.call(this,To.RepetitionWithSeparator,t,e,jq)},r.prototype.orInternalRecord=function(e,t){return Nye.call(this,e,t)},r.prototype.subruleInternalRecord=function(e,t,i){if(wy(t),!e||(0,En.has)(e,"ruleName")===!1){var n=new Error(" argument is invalid"+(" expecting a Parser method reference but got: <"+JSON.stringify(e)+">")+(` + inside top level rule: <`+this.recordingProdStack[0].name+">"));throw n.KNOWN_RECORDER_ERROR=!0,n}var s=(0,En.peek)(this.recordingProdStack),o=e.ruleName,a=new To.NonTerminal({idx:t,nonTerminalName:o,label:i==null?void 0:i.LABEL,referencedRule:void 0});return s.definition.push(a),this.outputCst?Rye:By},r.prototype.consumeInternalRecord=function(e,t,i){if(wy(t),!(0,Wq.hasShortKeyProperty)(e)){var n=new Error(" argument is invalid"+(" expecting a TokenType reference but got: <"+JSON.stringify(e)+">")+(` + inside top level rule: <`+this.recordingProdStack[0].name+">"));throw n.KNOWN_RECORDER_ERROR=!0,n}var s=(0,En.peek)(this.recordingProdStack),o=new To.Terminal({idx:t,terminalType:e,label:i==null?void 0:i.LABEL});return s.definition.push(o),Xq},r}();Qy.GastRecorder=Fye;function Nd(r,e,t,i){i===void 0&&(i=!1),wy(t);var n=(0,En.peek)(this.recordingProdStack),s=(0,En.isFunction)(e)?e:e.DEF,o=new r({definition:[],idx:t});return i&&(o.separator=e.SEP),(0,En.has)(e,"MAX_LOOKAHEAD")&&(o.maxLookahead=e.MAX_LOOKAHEAD),this.recordingProdStack.push(o),s.call(this),n.definition.push(o),this.recordingProdStack.pop(),By}function Nye(r,e){var t=this;wy(e);var i=(0,En.peek)(this.recordingProdStack),n=(0,En.isArray)(r)===!1,s=n===!1?r:r.DEF,o=new To.Alternation({definition:[],idx:e,ignoreAmbiguities:n&&r.IGNORE_AMBIGUITIES===!0});(0,En.has)(r,"MAX_LOOKAHEAD")&&(o.maxLookahead=r.MAX_LOOKAHEAD);var a=(0,En.some)(s,function(l){return(0,En.isFunction)(l.GATE)});return o.hasPredicates=a,i.definition.push(o),(0,En.forEach)(s,function(l){var c=new To.Alternative({definition:[]});o.definition.push(c),(0,En.has)(l,"IGNORE_AMBIGUITIES")?c.ignoreAmbiguities=l.IGNORE_AMBIGUITIES:(0,En.has)(l,"GATE")&&(c.ignoreAmbiguities=!0),t.recordingProdStack.push(c),l.ALT.call(t),t.recordingProdStack.pop()}),By}function Jq(r){return r===0?"":""+r}function wy(r){if(r<0||r>qq){var e=new Error("Invalid DSL Method idx value: <"+r+`> + `+("Idx value must be a none negative value smaller than "+(qq+1)));throw e.KNOWN_RECORDER_ERROR=!0,e}}});var $q=w(by=>{"use strict";Object.defineProperty(by,"__esModule",{value:!0});by.PerformanceTracer=void 0;var _q=Gt(),Lye=Gn(),Tye=function(){function r(){}return r.prototype.initPerformanceTracer=function(e){if((0,_q.has)(e,"traceInitPerf")){var t=e.traceInitPerf,i=typeof t=="number";this.traceInitMaxIdent=i?t:1/0,this.traceInitPerf=i?t>0:t}else this.traceInitMaxIdent=0,this.traceInitPerf=Lye.DEFAULT_PARSER_CONFIG.traceInitPerf;this.traceInitIndent=-1},r.prototype.TRACE_INIT=function(e,t){if(this.traceInitPerf===!0){this.traceInitIndent++;var i=new Array(this.traceInitIndent+1).join(" ");this.traceInitIndent <"+e+">");var n=(0,_q.timer)(t),s=n.time,o=n.value,a=s>10?console.warn:console.log;return this.traceInitIndent time: "+s+"ms"),this.traceInitIndent--,o}else return t()},r}();by.PerformanceTracer=Tye});var eJ=w(Sy=>{"use strict";Object.defineProperty(Sy,"__esModule",{value:!0});Sy.applyMixins=void 0;function Oye(r,e){e.forEach(function(t){var i=t.prototype;Object.getOwnPropertyNames(i).forEach(function(n){if(n!=="constructor"){var s=Object.getOwnPropertyDescriptor(i,n);s&&(s.get||s.set)?Object.defineProperty(r.prototype,n,s):r.prototype[n]=t.prototype[n]}})})}Sy.applyMixins=Oye});var Gn=w(dr=>{"use strict";var iJ=dr&&dr.__extends||function(){var r=function(e,t){return r=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},r(e,t)};return function(e,t){if(typeof t!="function"&&t!==null)throw new TypeError("Class extends value "+String(t)+" is not a constructor or null");r(e,t);function i(){this.constructor=e}e.prototype=t===null?Object.create(t):(i.prototype=t.prototype,new i)}}();Object.defineProperty(dr,"__esModule",{value:!0});dr.EmbeddedActionsParser=dr.CstParser=dr.Parser=dr.EMPTY_ALT=dr.ParserDefinitionErrorType=dr.DEFAULT_RULE_CONFIG=dr.DEFAULT_PARSER_CONFIG=dr.END_OF_FILE=void 0;var _i=Gt(),Mye=Uj(),tJ=NA(),nJ=Sd(),rJ=gq(),Kye=sx(),Uye=Iq(),Hye=Dq(),Gye=Rq(),Yye=Nq(),jye=Mq(),qye=Uq(),Jye=Yq(),Wye=Zq(),zye=$q(),Vye=eJ();dr.END_OF_FILE=(0,tJ.createTokenInstance)(tJ.EOF,"",NaN,NaN,NaN,NaN,NaN,NaN);Object.freeze(dr.END_OF_FILE);dr.DEFAULT_PARSER_CONFIG=Object.freeze({recoveryEnabled:!1,maxLookahead:3,dynamicTokensEnabled:!1,outputCst:!0,errorMessageProvider:nJ.defaultParserErrorProvider,nodeLocationTracking:"none",traceInitPerf:!1,skipValidations:!1});dr.DEFAULT_RULE_CONFIG=Object.freeze({recoveryValueFunc:function(){},resyncEnabled:!0});var Xye;(function(r){r[r.INVALID_RULE_NAME=0]="INVALID_RULE_NAME",r[r.DUPLICATE_RULE_NAME=1]="DUPLICATE_RULE_NAME",r[r.INVALID_RULE_OVERRIDE=2]="INVALID_RULE_OVERRIDE",r[r.DUPLICATE_PRODUCTIONS=3]="DUPLICATE_PRODUCTIONS",r[r.UNRESOLVED_SUBRULE_REF=4]="UNRESOLVED_SUBRULE_REF",r[r.LEFT_RECURSION=5]="LEFT_RECURSION",r[r.NONE_LAST_EMPTY_ALT=6]="NONE_LAST_EMPTY_ALT",r[r.AMBIGUOUS_ALTS=7]="AMBIGUOUS_ALTS",r[r.CONFLICT_TOKENS_RULES_NAMESPACE=8]="CONFLICT_TOKENS_RULES_NAMESPACE",r[r.INVALID_TOKEN_NAME=9]="INVALID_TOKEN_NAME",r[r.NO_NON_EMPTY_LOOKAHEAD=10]="NO_NON_EMPTY_LOOKAHEAD",r[r.AMBIGUOUS_PREFIX_ALTS=11]="AMBIGUOUS_PREFIX_ALTS",r[r.TOO_MANY_ALTS=12]="TOO_MANY_ALTS"})(Xye=dr.ParserDefinitionErrorType||(dr.ParserDefinitionErrorType={}));function Zye(r){return r===void 0&&(r=void 0),function(){return r}}dr.EMPTY_ALT=Zye;var vy=function(){function r(e,t){this.definitionErrors=[],this.selfAnalysisDone=!1;var i=this;if(i.initErrorHandler(t),i.initLexerAdapter(),i.initLooksAhead(t),i.initRecognizerEngine(e,t),i.initRecoverable(t),i.initTreeBuilder(t),i.initContentAssist(),i.initGastRecorder(t),i.initPerformanceTracer(t),(0,_i.has)(t,"ignoredIssues"))throw new Error(`The IParserConfig property has been deprecated. + Please use the flag on the relevant DSL method instead. + See: https://chevrotain.io/docs/guide/resolving_grammar_errors.html#IGNORING_AMBIGUITIES + For further details.`);this.skipValidations=(0,_i.has)(t,"skipValidations")?t.skipValidations:dr.DEFAULT_PARSER_CONFIG.skipValidations}return r.performSelfAnalysis=function(e){throw Error("The **static** `performSelfAnalysis` method has been deprecated. \nUse the **instance** method with the same name instead.")},r.prototype.performSelfAnalysis=function(){var e=this;this.TRACE_INIT("performSelfAnalysis",function(){var t;e.selfAnalysisDone=!0;var i=e.className;e.TRACE_INIT("toFastProps",function(){(0,_i.toFastProperties)(e)}),e.TRACE_INIT("Grammar Recording",function(){try{e.enableRecording(),(0,_i.forEach)(e.definedRulesNames,function(s){var o=e[s],a=o.originalGrammarAction,l=void 0;e.TRACE_INIT(s+" Rule",function(){l=e.topLevelRuleRecord(s,a)}),e.gastProductionsCache[s]=l})}finally{e.disableRecording()}});var n=[];if(e.TRACE_INIT("Grammar Resolving",function(){n=(0,rJ.resolveGrammar)({rules:(0,_i.values)(e.gastProductionsCache)}),e.definitionErrors=e.definitionErrors.concat(n)}),e.TRACE_INIT("Grammar Validations",function(){if((0,_i.isEmpty)(n)&&e.skipValidations===!1){var s=(0,rJ.validateGrammar)({rules:(0,_i.values)(e.gastProductionsCache),maxLookahead:e.maxLookahead,tokenTypes:(0,_i.values)(e.tokensMap),errMsgProvider:nJ.defaultGrammarValidatorErrorProvider,grammarName:i});e.definitionErrors=e.definitionErrors.concat(s)}}),(0,_i.isEmpty)(e.definitionErrors)&&(e.recoveryEnabled&&e.TRACE_INIT("computeAllProdsFollows",function(){var s=(0,Mye.computeAllProdsFollows)((0,_i.values)(e.gastProductionsCache));e.resyncFollows=s}),e.TRACE_INIT("ComputeLookaheadFunctions",function(){e.preComputeLookaheadFunctions((0,_i.values)(e.gastProductionsCache))})),!r.DEFER_DEFINITION_ERRORS_HANDLING&&!(0,_i.isEmpty)(e.definitionErrors))throw t=(0,_i.map)(e.definitionErrors,function(s){return s.message}),new Error(`Parser Definition Errors detected: + `+t.join(` +------------------------------- +`))})},r.DEFER_DEFINITION_ERRORS_HANDLING=!1,r}();dr.Parser=vy;(0,Vye.applyMixins)(vy,[Kye.Recoverable,Uye.LooksAhead,Hye.TreeBuilder,Gye.LexerAdapter,jye.RecognizerEngine,Yye.RecognizerApi,qye.ErrorHandler,Jye.ContentAssist,Wye.GastRecorder,zye.PerformanceTracer]);var _ye=function(r){iJ(e,r);function e(t,i){i===void 0&&(i=dr.DEFAULT_PARSER_CONFIG);var n=this,s=(0,_i.cloneObj)(i);return s.outputCst=!0,n=r.call(this,t,s)||this,n}return e}(vy);dr.CstParser=_ye;var $ye=function(r){iJ(e,r);function e(t,i){i===void 0&&(i=dr.DEFAULT_PARSER_CONFIG);var n=this,s=(0,_i.cloneObj)(i);return s.outputCst=!1,n=r.call(this,t,s)||this,n}return e}(vy);dr.EmbeddedActionsParser=$ye});var oJ=w(xy=>{"use strict";Object.defineProperty(xy,"__esModule",{value:!0});xy.createSyntaxDiagramsCode=void 0;var sJ=Dv();function ewe(r,e){var t=e===void 0?{}:e,i=t.resourceBase,n=i===void 0?"https://unpkg.com/chevrotain@"+sJ.VERSION+"/diagrams/":i,s=t.css,o=s===void 0?"https://unpkg.com/chevrotain@"+sJ.VERSION+"/diagrams/diagrams.css":s,a=` + + + + + +`,l=` + +`,c=` + + + + {{content-for "body-footer"}} + + diff --git a/ui/app/initializers/deprecation-filter.js b/ui/app/initializers/deprecation-filter.js new file mode 100644 index 0000000..905c610 --- /dev/null +++ b/ui/app/initializers/deprecation-filter.js @@ -0,0 +1,20 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { registerDeprecationHandler } from '@ember/debug'; + +// https://guides.emberjs.com/release/configuring-ember/handling-deprecations/#toc_filtering-deprecations +export function initialize() { + registerDeprecationHandler((message, options, next) => { + // filter deprecations that are scheduled to be removed in a specific version + // when upgrading or addressing deprecation warnings be sure to update this or remove if not needed + if (options?.until !== '5.0.0') { + next(message, options); + } + return; + }); +} + +export default { initialize }; diff --git a/ui/app/initializers/disable-ember-inspector.js b/ui/app/initializers/disable-ember-inspector.js new file mode 100644 index 0000000..825e6d7 --- /dev/null +++ b/ui/app/initializers/disable-ember-inspector.js @@ -0,0 +1,16 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import config from '../config/environment'; + +export default { + name: 'ember-inspect-disable', + initialize: function () { + if (config.environment === 'production') { + // disables ember inspector + window.NO_EMBER_DEBUG = true; + } + }, +}; diff --git a/ui/app/initializers/ember-data-identifiers.js b/ui/app/initializers/ember-data-identifiers.js new file mode 100644 index 0000000..22733cf --- /dev/null +++ b/ui/app/initializers/ember-data-identifiers.js @@ -0,0 +1,31 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { setIdentifierGenerationMethod } from '@ember-data/store'; +import { dasherize } from '@ember/string'; +import { v4 as uuidv4 } from 'uuid'; + +export function initialize() { + // see this GH issue for more information https://github.com/emberjs/data/issues/8106 + // Ember Data uses uuidv4 library to generate ids which relies on the crypto API which is no available in unsecure contexts + // the suggested polyfill was added in 4.6.2 so until we upgrade we need to define our own id generation method + // https://api.emberjs.com/ember-data/4.5/classes/IdentifierCache/methods/getOrCreateRecordIdentifier?anchor=getOrCreateRecordIdentifier + // the uuid library was brought in to replace other usages of crypto in the app so it is safe to use in unsecure contexts + // adapted from defaultGenerationMethod -- https://github.com/emberjs/data/blob/v4.5.0/packages/store/addon/-private/identifier-cache.ts#LL82-L94C2 + setIdentifierGenerationMethod((data) => { + if (data.lid) { + return data.lid; + } + if (data.id) { + return `@lid:${dasherize(data.type)}-${data.id}`; + } + return uuidv4(); + }); +} + +export default { + name: 'ember-data-identifiers', + initialize, +}; diff --git a/ui/app/initializers/enable-engines.js b/ui/app/initializers/enable-engines.js new file mode 100644 index 0000000..d27ed7a --- /dev/null +++ b/ui/app/initializers/enable-engines.js @@ -0,0 +1,18 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import config from '../config/environment'; + +export function initialize(/* application */) { + // attach mount hooks to the environment config + // context will be the router DSL + config.addRootMounts = function () { + this.mount('replication'); + }; +} + +export default { + initialize, +}; diff --git a/ui/app/instance-initializers/track-csp-event.js b/ui/app/instance-initializers/track-csp-event.js new file mode 100644 index 0000000..c5225f2 --- /dev/null +++ b/ui/app/instance-initializers/track-csp-event.js @@ -0,0 +1,14 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +export function initialize(appInstance) { + const service = appInstance.lookup('service:csp-event'); + service.attach(); +} + +export default { + name: 'track-csp-event', + initialize, +}; diff --git a/ui/app/lib/arg-tokenizer.js b/ui/app/lib/arg-tokenizer.js new file mode 100644 index 0000000..9ee9cb2 --- /dev/null +++ b/ui/app/lib/arg-tokenizer.js @@ -0,0 +1,45 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +// taken from https://github.com/yargs/yargs-parser/blob/v13.1.0/lib/tokenize-arg-string.js to get around import issue +// take an un-split argv string and tokenize it. +export default function (argString) { + if (Array.isArray(argString)) return argString; + + argString = argString.trim(); + + var i = 0; + var prevC = null; + var c = null; + var opening = null; + var args = []; + + for (var ii = 0; ii < argString.length; ii++) { + prevC = c; + c = argString.charAt(ii); + + // split on spaces unless we're in quotes. + if (c === ' ' && !opening) { + if (!(prevC === ' ')) { + i++; + } + continue; + } + + // don't split the string if we're in matching + // opening or closing single and double quotes. + if (c === opening) { + if (!args[i]) args[i] = ''; + opening = null; + } else if ((c === "'" || c === '"') && !opening) { + opening = c; + } + + if (!args[i]) args[i] = ''; + args[i] += c; + } + + return args; +} diff --git a/ui/app/lib/attach-capabilities.js b/ui/app/lib/attach-capabilities.js new file mode 100644 index 0000000..b275a75 --- /dev/null +++ b/ui/app/lib/attach-capabilities.js @@ -0,0 +1,87 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { belongsTo } from '@ember-data/model'; +import { assert, debug } from '@ember/debug'; +import { typeOf } from '@ember/utils'; +import { isArray } from '@ember/array'; + +/* + * + * attachCapabilities + * + * @param modelClass = An Ember Data model class + * @param capabilities - an Object whose keys will added to the model class as related 'capabilities' models + * and whose values should be functions that return the id of the related capabilities model + * + * definition of capabilities be done shorthand with the apiPath tagged template function + * + * + * @usage + * + * let Model = DS.Model.extend({ + * backend: attr(), + * scope: attr(), + * }); + * + * export default attachCapabilities(Model, { + * updatePath: apiPath`${'backend'}/scope/${'scope'}/role/${'id'}`, + * }); + * + */ +export default function attachCapabilities(modelClass, capabilities) { + const capabilityKeys = Object.keys(capabilities); + const newRelationships = capabilityKeys.reduce((ret, key) => { + ret[key] = belongsTo('capabilities'); + return ret; + }, {}); + + //TODO: move this to the application serializer and do it JIT instead of on app boot + debug(`adding new relationships: ${capabilityKeys.join(', ')} to ${modelClass.toString()}`); + modelClass.reopen(newRelationships); + modelClass.reopenClass({ + // relatedCapabilities is called in the application serializer's + // normalizeResponse hook to add the capabilities relationships to the + // JSON-API document used by Ember Data + relatedCapabilities(jsonAPIDoc) { + let { data, included } = jsonAPIDoc; + if (!data) { + data = jsonAPIDoc; + } + if (isArray(data)) { + const newData = data.map(this.relatedCapabilities); + return { + data: newData, + included, + }; + } + const context = { + id: data.id, + ...data.attributes, + }; + for (const newCapability of capabilityKeys) { + const templateFn = capabilities[newCapability]; + const type = typeOf(templateFn); + assert(`expected value of ${newCapability} to be a function but found ${type}.`, type === 'function'); + data.relationships[newCapability] = { + data: { + type: 'capabilities', + id: templateFn(context), + }, + }; + } + + if (included) { + return { + data, + included, + }; + } else { + return data; + } + }, + }); + return modelClass; +} diff --git a/ui/app/lib/console-helpers.ts b/ui/app/lib/console-helpers.ts new file mode 100644 index 0000000..bc636d5 --- /dev/null +++ b/ui/app/lib/console-helpers.ts @@ -0,0 +1,258 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import keys from 'vault/lib/keycodes'; +import AdapterError from '@ember-data/adapter/error'; +import { parse } from 'shell-quote'; + +import argTokenizer from './arg-tokenizer'; +import { StringMap } from 'vault/vault/app-types'; + +// Add new commands to `log-help` component for visibility +const supportedCommands = ['read', 'write', 'list', 'delete', 'kv-get']; +const uiCommands = ['api', 'clearall', 'clear', 'fullscreen', 'refresh']; + +interface DataObj { + [key: string]: string | string[]; +} + +export function extractDataFromStrings(dataArray: string[]): DataObj { + if (!dataArray) return {}; + return dataArray.reduce((accumulator: DataObj, val: string) => { + // will be "key=value" or "foo=bar=baz" + // split on the first = + // default to value of empty string + const [item = '', value = ''] = val.split(/=(.+)?/); + if (!item) return accumulator; + + // if it exists in data already, then we have multiple + // foo=bar in the list and need to make it an array + const existingValue = accumulator[item]; + if (existingValue) { + accumulator[item] = Array.isArray(existingValue) ? [...existingValue, value] : [existingValue, value]; + return accumulator; + } + accumulator[item] = value; + return accumulator; + }, {}); +} + +interface Flags { + field?: string; + format?: string; + force?: boolean; + wrapTTL?: boolean; + [key: string]: string | boolean | undefined; +} +export function extractFlagsFromStrings(flagArray: string[], method: string): Flags { + if (!flagArray) return {}; + return flagArray.reduce((accumulator: Flags, val: string) => { + // val will be "-flag=value" or "--force" + // split on the first = + // default to value or true + const [item, value] = val.split(/=(.+)?/); + if (!item) return accumulator; + + let flagName = item.replace(/^-/, ''); + if (flagName === 'wrap-ttl') { + flagName = 'wrapTTL'; + } else if (method === 'write') { + if (flagName === 'f' || flagName === '-force') { + flagName = 'force'; + } + } + accumulator[flagName] = value || true; + return accumulator; + }, {}); +} + +interface CommandFns { + [key: string]: CallableFunction; +} + +export function executeUICommand( + command: string, + logAndOutput: CallableFunction, + commandFns: CommandFns +): boolean { + const cmd = command.startsWith('api') ? 'api' : command; + const isUICommand = uiCommands.includes(cmd); + if (isUICommand) { + logAndOutput(command); + } + const execCommand = commandFns[cmd]; + if (execCommand && typeof execCommand === 'function') { + execCommand(); + } + return isUICommand; +} + +interface ParsedCommand { + method: string; + path: string; + flagArray: string[]; + dataArray: string[]; +} +export function parseCommand(command: string): ParsedCommand { + const args: string[] = argTokenizer(parse(command)); + if (args[0] === 'vault') { + args.shift(); + } + + const [method = '', ...rest] = args; + let path = ''; + const flags: string[] = []; + const data: string[] = []; + + rest.forEach((arg) => { + if (arg.startsWith('-')) { + flags.push(arg); + } else { + if (path) { + const strippedArg = arg + // we'll have arg=something or arg="lol I need spaces", so need to split on the first = + .split(/=(.+)/) + // if there were quotes, there's an empty string as the last member in the array that we don't want, + // so filter it out + .filter((str) => str !== '') + // glue the data back together + .join('='); + data.push(strippedArg); + } else { + path = arg; + } + } + }); + + if (!supportedCommands.includes(method)) { + throw new Error('invalid command'); + } + return { method, flagArray: flags, path, dataArray: data }; +} + +interface LogResponse { + auth?: StringMap; + data?: StringMap; + wrap_info?: StringMap; + [key: string]: unknown; +} + +export function logFromResponse(response: LogResponse, path: string, method: string, flags: Flags) { + const { format, field } = flags; + const respData: StringMap | undefined = response && (response.auth || response.data || response.wrap_info); + const secret: StringMap | LogResponse = respData || response; + + if (!respData) { + if (method === 'write') { + return { type: 'success', content: `Success! Data written to: ${path}` }; + } else if (method === 'delete') { + return { type: 'success', content: `Success! Data deleted (if it existed) at: ${path}` }; + } + } + + if (field) { + const fieldValue = secret[field]; + let response; + if (fieldValue) { + if (format && format === 'json') { + return { type: 'json', content: fieldValue }; + } + if (typeof fieldValue == 'string') { + response = { type: 'text', content: fieldValue }; + } else if (typeof fieldValue == 'number') { + response = { type: 'text', content: JSON.stringify(fieldValue) }; + } else if (typeof fieldValue == 'boolean') { + response = { type: 'text', content: JSON.stringify(fieldValue) }; + } else if (Array.isArray(fieldValue)) { + response = { type: 'text', content: JSON.stringify(fieldValue) }; + } else { + response = { type: 'object', content: fieldValue }; + } + } else { + response = { type: 'error', content: `Field "${field}" not present in secret` }; + } + return response; + } + + if (format && format === 'json') { + // just print whole response + return { type: 'json', content: response }; + } + + if (method === 'list') { + return { type: 'list', content: secret }; + } + + return { type: 'object', content: secret }; +} + +interface CustomError extends AdapterError { + httpStatus: number; + path: string; + errors: string[]; +} +export function logFromError(error: CustomError, vaultPath: string, method: string) { + let content; + const { httpStatus, path } = error; + const verbClause = { + read: 'reading from', + 'kv-get': 'reading secret', + write: 'writing to', + list: 'listing', + delete: 'deleting at', + }[method]; + + content = `Error ${verbClause}: ${vaultPath}.\nURL: ${path}\nCode: ${httpStatus}`; + + if (typeof error.errors[0] === 'string') { + content = `${content}\nErrors:\n ${error.errors.join('\n ')}`; + } + + return { type: 'error', content }; +} + +interface CommandLog { + type: string; + content?: string; +} +export function shiftCommandIndex(keyCode: number, history: CommandLog[], index: number) { + let newInputValue; + const commandHistoryLength = history.length; + + if (!commandHistoryLength) { + return []; + } + + if (keyCode === keys.UP) { + index -= 1; + if (index < 0) { + index = commandHistoryLength - 1; + } + } else { + index += 1; + if (index === commandHistoryLength) { + newInputValue = ''; + } + if (index > commandHistoryLength) { + index -= 1; + } + } + + if (newInputValue !== '') { + newInputValue = history.objectAt(index)?.content; + } + + return [index, newInputValue]; +} + +export function formattedErrorFromInput(path: string, method: string, flags: Flags, dataArray: string[]) { + if (path === undefined) { + return { type: 'error', content: 'A path is required to make a request.' }; + } + if (method === 'write' && !flags.force && dataArray.length === 0) { + return { type: 'error', content: 'Must supply data or use -force' }; + } + return; +} diff --git a/ui/app/lib/control-group-error.js b/ui/app/lib/control-group-error.js new file mode 100644 index 0000000..9a27ced --- /dev/null +++ b/ui/app/lib/control-group-error.js @@ -0,0 +1,21 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import AdapterError from '@ember-data/adapter/error'; + +export default class ControlGroupError extends AdapterError { + constructor(wrapInfo) { + const { accessor, creation_path, creation_time, token, ttl } = wrapInfo; + super(); + this.message = 'Control Group encountered'; + + // add items from the wrapInfo object to the error + this.token = token; + this.accessor = accessor; + this.creation_path = creation_path; + this.creation_time = creation_time; + this.ttl = ttl; + } +} diff --git a/ui/app/lib/key-utils.js b/ui/app/lib/key-utils.js new file mode 100644 index 0000000..e629037 --- /dev/null +++ b/ui/app/lib/key-utils.js @@ -0,0 +1,52 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +function keyIsFolder(key) { + return key ? !!key.match(/\/$/) : false; +} + +function keyPartsForKey(key) { + if (!key) { + return null; + } + var isFolder = keyIsFolder(key); + var parts = key.split('/'); + if (isFolder) { + parts.pop(); + } + return parts.length > 1 ? parts : null; +} + +function parentKeyForKey(key) { + var parts = keyPartsForKey(key); + if (!parts) { + return null; + } + return parts.slice(0, -1).join('/') + '/'; +} + +function keyWithoutParentKey(key) { + return key ? key.replace(parentKeyForKey(key), '') : null; +} + +function ancestorKeysForKey(key) { + var ancestors = [], + parentKey = parentKeyForKey(key); + + while (parentKey) { + ancestors.unshift(parentKey); + parentKey = parentKeyForKey(parentKey); + } + + return ancestors; +} + +export default { + keyIsFolder, + keyPartsForKey, + parentKeyForKey, + keyWithoutParentKey, + ancestorKeysForKey, +}; diff --git a/ui/app/lib/keycodes.js b/ui/app/lib/keycodes.js new file mode 100644 index 0000000..66164d3 --- /dev/null +++ b/ui/app/lib/keycodes.js @@ -0,0 +1,16 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +// a map of keyCode for use in keyboard event handlers +export default { + ENTER: 13, + ESC: 27, + TAB: 9, + LEFT: 37, + UP: 38, + RIGHT: 39, + DOWN: 40, + T: 116, +}; diff --git a/ui/app/lib/kv-object.js b/ui/app/lib/kv-object.js new file mode 100644 index 0000000..3df5c1e --- /dev/null +++ b/ui/app/lib/kv-object.js @@ -0,0 +1,59 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import ArrayProxy from '@ember/array/proxy'; +import { typeOf } from '@ember/utils'; +import { guidFor } from '@ember/object/internals'; + +export default ArrayProxy.extend({ + fromJSON(json) { + if (json && typeOf(json) !== 'object') { + throw new Error('Vault expects data to be formatted as an JSON object.'); + } + const contents = Object.keys(json || []).map((key) => { + const obj = { + name: key, + value: json[key], + }; + guidFor(obj); + return obj; + }); + this.setObjects( + contents.sort((a, b) => { + if (a.name === '') { + return 1; + } + if (b.name === '') { + return -1; + } + return a.name.localeCompare(b.name); + }) + ); + return this; + }, + + fromJSONString(jsonString) { + return this.fromJSON(JSON.parse(jsonString)); + }, + + toJSON(includeBlanks = false) { + return this.reduce((obj, item) => { + if (!includeBlanks && item.value === '' && item.name === '') { + return obj; + } + const val = typeof item.value === 'undefined' ? '' : item.value; + obj[item.name || ''] = val; + return obj; + }, {}); + }, + + toJSONString(includeBlanks) { + return JSON.stringify(this.toJSON(includeBlanks), null, 2); + }, + + isAdvanced() { + return this.any((item) => typeof item.value !== 'string'); + }, +}); diff --git a/ui/app/lib/local-storage.js b/ui/app/lib/local-storage.js new file mode 100644 index 0000000..b7aeb70 --- /dev/null +++ b/ui/app/lib/local-storage.js @@ -0,0 +1,47 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +export default { + isLocalStorageSupported() { + try { + const key = `__storage__test`; + window.localStorage.setItem(key, null); + window.localStorage.removeItem(key); + return true; + } catch (e) { + // modify the e object so we can customize the error message. + // e.message is readOnly. + e.errors = [`This is likely due to your browser's cookie settings.`]; + throw e; + } + }, + + getItem(key) { + const item = window.localStorage.getItem(key); + return item && JSON.parse(item); + }, + + setItem(key, val) { + window.localStorage.setItem(key, JSON.stringify(val)); + }, + + removeItem(key) { + return window.localStorage.removeItem(key); + }, + + keys() { + return Object.keys(window.localStorage); + }, + + cleanupStorage(string, keyToKeep) { + if (!string) return; + const relevantKeys = this.keys().filter((str) => str.startsWith(string)); + relevantKeys?.forEach((key) => { + if (key !== keyToKeep) { + localStorage.removeItem(key); + } + }); + }, +}; diff --git a/ui/app/lib/memory-storage.js b/ui/app/lib/memory-storage.js new file mode 100644 index 0000000..3728719 --- /dev/null +++ b/ui/app/lib/memory-storage.js @@ -0,0 +1,25 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +const cache = {}; + +export default { + getItem(key) { + var item = cache[key]; + return item && JSON.parse(item); + }, + + setItem(key, val) { + cache[key] = JSON.stringify(val); + }, + + removeItem(key) { + delete cache[key]; + }, + + keys() { + return Object.keys(cache); + }, +}; diff --git a/ui/app/lib/path-to-tree.js b/ui/app/lib/path-to-tree.js new file mode 100644 index 0000000..9359c48 --- /dev/null +++ b/ui/app/lib/path-to-tree.js @@ -0,0 +1,57 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import flat from 'flat'; +import deepmerge from 'deepmerge'; + +const { unflatten } = flat; +const DOT_REPLACEMENT = '☃'; + +//function that takes a list of path and returns a deeply nested object +//representing a tree of all of those paths +// +// +// given ["foo", "bar", "foo1", "foo/bar", "foo/baz", "foo/bar/baz"] +// +// returns { +// bar: null, +// foo: { +// bar: { +// baz: null +// }, +// baz: null, +// }, +// foo1: null, +// } +export default function (paths) { + // first sort the list by length, then alphanumeric + const list = paths.slice(0).sort((a, b) => b.length - a.length || b.localeCompare(a)); + // then reduce to an array + // and we remove all of the items that have a string + // that starts with the same prefix from the list + // so if we have "foo/bar/baz", both "foo" and "foo/bar" + // won't be included in the list + let tree = list.reduce((accumulator, ns) => { + const nsWithPrefix = accumulator.find((path) => path.startsWith(ns)); + // we need to make sure it's a match for the full path part + const isFullMatch = nsWithPrefix && nsWithPrefix.charAt(ns.length) === '/'; + if (!isFullMatch) { + accumulator.push(ns); + } + return accumulator; + }, []); + + tree = tree.sort((a, b) => a.localeCompare(b)); + // after the reduction we're left with an array that contains + // strings that represent the longest branches + // we'll replace the dots in the paths, then expand the path + // to a nested object that we can then query with Ember.get + return deepmerge.all( + tree.map((p) => { + p = p.replace(/\.+/g, DOT_REPLACEMENT); + return unflatten({ [p]: null }, { delimiter: '/', object: true }); + }) + ); +} diff --git a/ui/app/lib/route-paths.js b/ui/app/lib/route-paths.js new file mode 100644 index 0000000..e0125ae --- /dev/null +++ b/ui/app/lib/route-paths.js @@ -0,0 +1,17 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +export const INIT = 'vault.cluster.init'; +export const UNSEAL = 'vault.cluster.unseal'; +export const AUTH = 'vault.cluster.auth'; +export const REDIRECT = 'vault.cluster.redirect'; +export const CLUSTER = 'vault.cluster'; +export const CLUSTER_INDEX = 'vault.cluster.index'; +export const OIDC_CALLBACK = 'vault.cluster.oidc-callback'; +export const OIDC_PROVIDER = 'vault.cluster.oidc-provider'; +export const NS_OIDC_PROVIDER = 'vault.cluster.oidc-provider-ns'; +export const DR_REPLICATION_SECONDARY = 'vault.cluster.replication-dr-promote'; +export const DR_REPLICATION_SECONDARY_DETAILS = 'vault.cluster.replication-dr-promote.details'; +export const EXCLUDED_REDIRECT_URLS = ['/vault/logout']; diff --git a/ui/app/lib/token-storage.js b/ui/app/lib/token-storage.js new file mode 100644 index 0000000..f96e0f5 --- /dev/null +++ b/ui/app/lib/token-storage.js @@ -0,0 +1,21 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import localStorageWrapper from './local-storage'; +import memoryStorage from './memory-storage'; + +export default function (type) { + if (type === 'memory') { + return memoryStorage; + } + let storage; + try { + window.localStorage.getItem('test'); + storage = localStorageWrapper; + } catch (e) { + storage = memoryStorage; + } + return storage; +} diff --git a/ui/app/machines/auth-machine.js b/ui/app/machines/auth-machine.js new file mode 100644 index 0000000..e040418 --- /dev/null +++ b/ui/app/machines/auth-machine.js @@ -0,0 +1,55 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +export default { + key: 'auth', + initial: 'idle', + on: { + RESET: 'idle', + DONE: 'complete', + }, + states: { + idle: { + onEntry: [ + { type: 'routeTransition', params: ['vault.cluster.settings.auth.enable'] }, + { type: 'render', level: 'feature', component: 'wizard/mounts-wizard' }, + { type: 'render', level: 'step', component: 'wizard/auth-idle' }, + ], + on: { + CONTINUE: 'enable', + }, + }, + enable: { + onEntry: [ + { type: 'render', level: 'feature', component: 'wizard/mounts-wizard' }, + { type: 'render', level: 'step', component: 'wizard/auth-enable' }, + ], + on: { + CONTINUE: 'config', + }, + }, + config: { + onEntry: [ + { type: 'render', level: 'feature', component: 'wizard/mounts-wizard' }, + { type: 'render', level: 'step', component: 'wizard/auth-config' }, + ], + on: { + CONTINUE: 'details', + }, + }, + details: { + onEntry: [ + { type: 'render', level: 'step', component: 'wizard/auth-details' }, + { type: 'render', level: 'feature', component: 'wizard/mounts-wizard' }, + ], + on: { + CONTINUE: 'complete', + }, + }, + complete: { + onEntry: ['completeFeature'], + }, + }, +}; diff --git a/ui/app/machines/policies-machine.js b/ui/app/machines/policies-machine.js new file mode 100644 index 0000000..db7c94e --- /dev/null +++ b/ui/app/machines/policies-machine.js @@ -0,0 +1,47 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +export default { + key: 'policies', + initial: 'idle', + states: { + idle: { + onEntry: [ + { type: 'routeTransition', params: ['vault.cluster.policies.index', 'acl'] }, + { type: 'render', level: 'feature', component: 'wizard/policies-intro' }, + ], + on: { + CONTINUE: 'create', + }, + }, + create: { + on: { + CONTINUE: 'details', + }, + onEntry: [{ type: 'render', level: 'feature', component: 'wizard/policies-create' }], + }, + details: { + on: { + CONTINUE: 'delete', + }, + onEntry: [{ type: 'render', level: 'feature', component: 'wizard/policies-details' }], + }, + delete: { + on: { + CONTINUE: 'others', + }, + onEntry: [{ type: 'render', level: 'feature', component: 'wizard/policies-delete' }], + }, + others: { + on: { + CONTINUE: 'complete', + }, + onEntry: [{ type: 'render', level: 'feature', component: 'wizard/policies-others' }], + }, + complete: { + onEntry: ['completeFeature'], + }, + }, +}; diff --git a/ui/app/machines/replication-machine.js b/ui/app/machines/replication-machine.js new file mode 100644 index 0000000..5816ce5 --- /dev/null +++ b/ui/app/machines/replication-machine.js @@ -0,0 +1,29 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +export default { + key: 'replication', + initial: 'setup', + states: { + setup: { + on: { + ENABLEREPLICATION: 'details', + }, + onEntry: [ + { type: 'routeTransition', params: ['vault.cluster.replication'] }, + { type: 'render', level: 'feature', component: 'wizard/replication-setup' }, + ], + }, + details: { + on: { + CONTINUE: 'complete', + }, + onEntry: [{ type: 'render', level: 'feature', component: 'wizard/replication-details' }], + }, + complete: { + onEntry: ['completeFeature'], + }, + }, +}; diff --git a/ui/app/machines/secrets-machine.js b/ui/app/machines/secrets-machine.js new file mode 100644 index 0000000..e8e9353 --- /dev/null +++ b/ui/app/machines/secrets-machine.js @@ -0,0 +1,224 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { supportedSecretBackends } from 'vault/helpers/supported-secret-backends'; +const supportedBackends = supportedSecretBackends(); + +export default { + key: 'secrets', + initial: 'idle', + on: { + RESET: 'idle', + DONE: 'complete', + ERROR: 'error', + }, + states: { + idle: { + onEntry: [ + { type: 'routeTransition', params: ['vault.cluster.settings.mount-secret-backend'] }, + { type: 'render', level: 'feature', component: 'wizard/mounts-wizard' }, + { type: 'render', level: 'step', component: 'wizard/secrets-idle' }, + ], + on: { + CONTINUE: 'enable', + }, + }, + enable: { + onEntry: [ + { type: 'render', level: 'feature', component: 'wizard/mounts-wizard' }, + { type: 'render', level: 'step', component: 'wizard/secrets-enable' }, + ], + on: { + CONTINUE: { + details: { cond: (type) => supportedBackends.includes(type) }, + list: { cond: (type) => !supportedBackends.includes(type) }, + }, + }, + }, + details: { + onEntry: [ + { type: 'render', level: 'feature', component: 'wizard/mounts-wizard' }, + { type: 'render', level: 'step', component: 'wizard/secrets-details' }, + ], + on: { + CONTINUE: { + connection: { + cond: (type) => type === 'database', + }, + role: { + cond: (type) => ['pki', 'aws', 'ssh'].includes(type), + }, + secret: { + cond: (type) => ['kv'].includes(type), + }, + encryption: { + cond: (type) => type === 'transit', + }, + provider: { + cond: (type) => type === 'keymgmt', + }, + }, + }, + }, + connection: { + onEntry: [ + { type: 'render', level: 'step', component: 'wizard/secrets-connection' }, + { type: 'render', level: 'feature', component: 'wizard/mounts-wizard' }, + ], + on: { + CONTINUE: 'displayConnection', + }, + }, + encryption: { + onEntry: [ + { type: 'render', level: 'feature', component: 'wizard/mounts-wizard' }, + { type: 'render', level: 'step', component: 'wizard/secrets-encryption' }, + ], + on: { + CONTINUE: 'display', + }, + }, + credentials: { + onEntry: [ + { type: 'render', level: 'step', component: 'wizard/secrets-credentials' }, + { type: 'render', level: 'feature', component: 'wizard/mounts-wizard' }, + ], + on: { + CONTINUE: 'display', + }, + }, + role: { + onEntry: [ + { type: 'render', level: 'step', component: 'wizard/secrets-role' }, + { type: 'render', level: 'feature', component: 'wizard/mounts-wizard' }, + ], + on: { + CONTINUE: 'displayRole', + }, + }, + displayRole: { + onEntry: [ + { type: 'render', level: 'step', component: 'wizard/secrets-display-role' }, + { type: 'render', level: 'feature', component: 'wizard/mounts-wizard' }, + ], + on: { + CONTINUE: 'credentials', + }, + }, + displayConnection: { + onEntry: [ + { type: 'render', level: 'step', component: 'wizard/secrets-connection-show' }, + { type: 'render', level: 'feature', component: 'wizard/mounts-wizard' }, + ], + on: { + CONTINUE: 'displayRoleDatabase', + }, + }, + displayRoleDatabase: { + onEntry: [ + { type: 'render', level: 'step', component: 'wizard/secrets-display-database-role' }, + { type: 'render', level: 'feature', component: 'wizard/mounts-wizard' }, + ], + on: { + CONTINUE: 'display', + }, + }, + secret: { + onEntry: [ + { type: 'render', level: 'step', component: 'wizard/secrets-secret' }, + { type: 'render', level: 'feature', component: 'wizard/mounts-wizard' }, + ], + on: { + CONTINUE: 'display', + }, + }, + provider: { + onEntry: [ + { type: 'render', level: 'step', component: 'wizard/secrets-keymgmt' }, + { type: 'render', level: 'feature', component: 'wizard/mounts-wizard' }, + ], + on: { + CONTINUE: 'displayProvider', + }, + }, + displayProvider: { + onEntry: [ + { type: 'render', level: 'step', component: 'wizard/secrets-keymgmt' }, + { type: 'render', level: 'feature', component: 'wizard/mounts-wizard' }, + ], + on: { + CONTINUE: 'distribute', + }, + }, + distribute: { + onEntry: [ + { type: 'render', level: 'step', component: 'wizard/secrets-keymgmt' }, + { type: 'render', level: 'feature', component: 'wizard/mounts-wizard' }, + ], + on: { + CONTINUE: 'display', + }, + }, + display: { + onEntry: [ + { type: 'render', level: 'step', component: 'wizard/secrets-display' }, + { type: 'render', level: 'feature', component: 'wizard/mounts-wizard' }, + ], + on: { + REPEAT: { + connection: { + cond: (type) => type === 'database', + actions: [{ type: 'routeTransition', params: ['vault.cluster.secrets.backend.create-root'] }], + }, + role: { + cond: (type) => ['pki', 'aws', 'ssh'].includes(type), + actions: [{ type: 'routeTransition', params: ['vault.cluster.secrets.backend.create-root'] }], + }, + secret: { + cond: (type) => ['kv'].includes(type), + actions: [{ type: 'routeTransition', params: ['vault.cluster.secrets.backend.create-root'] }], + }, + encryption: { + cond: (type) => type === 'transit', + actions: [{ type: 'routeTransition', params: ['vault.cluster.secrets.backend.create-root'] }], + }, + provider: { + cond: (type) => type === 'keymgmt', + actions: [ + { + type: 'routeTransition', + params: [ + 'vault.cluster.secrets.backend.create-root', + { queryParams: { itemType: 'provider' } }, + ], + }, + ], + }, + }, + }, + }, + list: { + onEntry: [ + { type: 'render', level: 'step', component: 'wizard/secrets-list' }, + { type: 'render', level: 'feature', component: 'wizard/mounts-wizard' }, + ], + on: { + CONTINUE: 'display', + }, + }, + error: { + onEntry: [ + { type: 'render', level: 'step', component: 'wizard/tutorial-error' }, + { type: 'render', level: 'feature', component: 'wizard/mounts-wizard' }, + ], + on: { + CONTINUE: 'complete', + }, + }, + complete: { + onEntry: ['completeFeature'], + }, + }, +}; diff --git a/ui/app/machines/tools-machine.js b/ui/app/machines/tools-machine.js new file mode 100644 index 0000000..64643b6 --- /dev/null +++ b/ui/app/machines/tools-machine.js @@ -0,0 +1,65 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +export default { + key: 'tools', + initial: 'wrap', + states: { + wrap: { + onEntry: [ + { type: 'routeTransition', params: ['vault.cluster.tools'] }, + { type: 'render', level: 'feature', component: 'wizard/tools-wrap' }, + ], + on: { + CONTINUE: 'wrapped', + }, + }, + wrapped: { + onEntry: [{ type: 'render', level: 'feature', component: 'wizard/tools-wrapped' }], + on: { + LOOKUP: 'lookup', + }, + }, + lookup: { + onEntry: [{ type: 'render', level: 'feature', component: 'wizard/tools-lookup' }], + on: { + CONTINUE: 'info', + }, + }, + info: { + onEntry: [{ type: 'render', level: 'feature', component: 'wizard/tools-info' }], + on: { + REWRAP: 'rewrap', + }, + }, + rewrap: { + onEntry: [{ type: 'render', level: 'feature', component: 'wizard/tools-rewrap' }], + on: { + CONTINUE: 'rewrapped', + }, + }, + rewrapped: { + onEntry: [{ type: 'render', level: 'feature', component: 'wizard/tools-rewrapped' }], + on: { + UNWRAP: 'unwrap', + }, + }, + unwrap: { + onEntry: [{ type: 'render', level: 'feature', component: 'wizard/tools-unwrap' }], + on: { + CONTINUE: 'unwrapped', + }, + }, + unwrapped: { + onEntry: [{ type: 'render', level: 'feature', component: 'wizard/tools-unwrapped' }], + on: { + CONTINUE: 'complete', + }, + }, + complete: { + onEntry: ['completeFeature'], + }, + }, +}; diff --git a/ui/app/machines/tutorial-machine.js b/ui/app/machines/tutorial-machine.js new file mode 100644 index 0000000..0519f02 --- /dev/null +++ b/ui/app/machines/tutorial-machine.js @@ -0,0 +1,117 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +export default { + key: 'tutorial', + initial: 'idle', + on: { + DISMISS: 'dismissed', + DONE: 'complete', + PAUSE: 'paused', + }, + states: { + init: { + key: 'init', + initial: 'idle', + on: { INITDONE: 'active.select' }, + onEntry: [ + 'showTutorialAlways', + { type: 'render', level: 'tutorial', component: 'wizard/tutorial-idle' }, + { type: 'render', level: 'feature', component: null }, + ], + onExit: ['showTutorialWhenAuthenticated', 'clearFeatureData'], + states: { + idle: { + on: { + START: 'active.setup', + SAVE: 'active.save', + UNSEAL: 'active.unseal', + LOGIN: 'active.login', + }, + }, + active: { + onEntry: { type: 'render', level: 'tutorial', component: 'wizard/tutorial-active' }, + states: { + setup: { + on: { TOSAVE: 'save' }, + onEntry: { type: 'render', level: 'feature', component: 'wizard/init-setup' }, + }, + save: { + on: { + TOUNSEAL: 'unseal', + TOLOGIN: 'login', + }, + onEntry: { type: 'render', level: 'feature', component: 'wizard/init-save-keys' }, + }, + unseal: { + on: { TOLOGIN: 'login' }, + onEntry: { type: 'render', level: 'feature', component: 'wizard/init-unseal' }, + }, + login: { + onEntry: { type: 'render', level: 'feature', component: 'wizard/init-login' }, + }, + }, + }, + }, + }, + active: { + key: 'feature', + initial: 'select', + onEntry: { type: 'render', level: 'tutorial', component: 'wizard/tutorial-active' }, + states: { + select: { + on: { + CONTINUE: 'feature', + }, + onEntry: { type: 'render', level: 'feature', component: 'wizard/features-selection' }, + }, + feature: {}, + }, + }, + idle: { + on: { + INIT: 'init.idle', + AUTH: 'active.select', + CONTINUE: 'active', + }, + onEntry: [ + { type: 'render', level: 'feature', component: null }, + { type: 'render', level: 'step', component: null }, + { type: 'render', level: 'detail', component: null }, + { type: 'render', level: 'tutorial', component: 'wizard/tutorial-idle' }, + ], + }, + dismissed: { + onEntry: [ + { type: 'render', level: 'tutorial', component: null }, + { type: 'render', level: 'feature', component: null }, + { type: 'render', level: 'step', component: null }, + { type: 'render', level: 'detail', component: null }, + 'handleDismissed', + ], + }, + paused: { + on: { + CONTINUE: 'active.feature', + }, + onEntry: [ + { type: 'render', level: 'feature', component: null }, + { type: 'render', level: 'step', component: null }, + { type: 'render', level: 'detail', component: null }, + { type: 'render', level: 'tutorial', component: 'wizard/tutorial-paused' }, + 'handlePaused', + ], + onExit: ['handleResume'], + }, + complete: { + onEntry: [ + { type: 'render', level: 'feature', component: null }, + { type: 'render', level: 'step', component: null }, + { type: 'render', level: 'detail', component: null }, + { type: 'render', level: 'tutorial', component: 'wizard/tutorial-complete' }, + ], + }, + }, +}; diff --git a/ui/app/macros/identity-capabilities.js b/ui/app/macros/identity-capabilities.js new file mode 100644 index 0000000..da70c00 --- /dev/null +++ b/ui/app/macros/identity-capabilities.js @@ -0,0 +1,10 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import lazyCapabilities, { apiPath } from 'vault/macros/lazy-capabilities'; + +export default function () { + return lazyCapabilities(apiPath`identity/${'identityType'}/id/${'id'}`, 'id', 'identityType'); +} diff --git a/ui/app/macros/lazy-capabilities.js b/ui/app/macros/lazy-capabilities.js new file mode 100644 index 0000000..fa5d265 --- /dev/null +++ b/ui/app/macros/lazy-capabilities.js @@ -0,0 +1,57 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +// usage: +// +// import lazyCapabilities, { apiPath } from 'vault/macros/lazy-capabilities'; +// +// export default DS.Model.extend({ +// //pass the template string as the first arg, and be sure to use '' around the +// //parameters that get interpolated in the string - that's how the template function +// //knows where to put each value +// zeroAddressPath: lazyCapabilities(apiPath`${'id'}/config/zeroaddress`, 'id'), +// +// }); +// + +import { maybeQueryRecord } from 'vault/macros/maybe-query-record'; + +export function apiPath(strings, ...keys) { + return function (data) { + const dict = data || {}; + const result = [strings[0]]; + keys.forEach((key, i) => { + result.push(dict[key], strings[i + 1]); + }); + return result.join(''); + }; +} + +export default function () { + const [templateFn, ...keys] = arguments; + return maybeQueryRecord( + 'capabilities', + (context) => { + // pull all context attrs + const contextObject = context.getProperties(...keys); + // remove empty ones + const nonEmptyContexts = Object.keys(contextObject).reduce((ret, key) => { + if (contextObject[key] != null) { + ret[key] = contextObject[key]; + } + return ret; + }, {}); + // if all of them aren't present, cancel the fetch + if (Object.keys(nonEmptyContexts).length !== keys.length) { + return; + } + // otherwise proceed with the capabilities check + return { + id: templateFn(nonEmptyContexts), + }; + }, + ...keys + ); +} diff --git a/ui/app/macros/maybe-query-record.js b/ui/app/macros/maybe-query-record.js new file mode 100644 index 0000000..94cc369 --- /dev/null +++ b/ui/app/macros/maybe-query-record.js @@ -0,0 +1,22 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { computed } from '@ember/object'; +import ObjectProxy from '@ember/object/proxy'; +import PromiseProxyMixin from '@ember/object/promise-proxy-mixin'; +import { resolve } from 'rsvp'; + +export function maybeQueryRecord(modelName, options = {}, ...keys) { + return computed(...keys, 'store', { + get() { + const query = typeof options === 'function' ? options(this) : options; + const PromiseObject = ObjectProxy.extend(PromiseProxyMixin); + + return PromiseObject.create({ + promise: query ? this.store.queryRecord(modelName, query) : resolve({}), + }); + }, + }); +} diff --git a/ui/app/mixins/backend-crumb.js b/ui/app/mixins/backend-crumb.js new file mode 100644 index 0000000..57d81c8 --- /dev/null +++ b/ui/app/mixins/backend-crumb.js @@ -0,0 +1,24 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { computed } from '@ember/object'; +import Mixin from '@ember/object/mixin'; + +export default Mixin.create({ + backendCrumb: computed('backend', function () { + const backend = this.backend; + + if (backend === undefined) { + throw new Error('backend-crumb mixin requires backend to be set'); + } + + return { + label: backend, + text: backend, + path: 'vault.cluster.secrets.backend.list-root', + model: backend, + }; + }), +}); diff --git a/ui/app/mixins/cluster-route.js b/ui/app/mixins/cluster-route.js new file mode 100644 index 0000000..6cb3a96 --- /dev/null +++ b/ui/app/mixins/cluster-route.js @@ -0,0 +1,117 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { inject as service } from '@ember/service'; +import Mixin from '@ember/object/mixin'; +import RSVP from 'rsvp'; +import { + INIT, + UNSEAL, + AUTH, + CLUSTER, + CLUSTER_INDEX, + OIDC_CALLBACK, + OIDC_PROVIDER, + NS_OIDC_PROVIDER, + DR_REPLICATION_SECONDARY, + DR_REPLICATION_SECONDARY_DETAILS, + EXCLUDED_REDIRECT_URLS, + REDIRECT, +} from 'vault/lib/route-paths'; + +export default Mixin.create({ + auth: service(), + store: service(), + router: service(), + + transitionToTargetRoute(transition = {}) { + const targetRoute = this.targetRouteName(transition); + if ( + targetRoute && + targetRoute !== this.routeName && + targetRoute !== transition.targetName && + targetRoute !== this.router.currentRouteName + ) { + // there may be query params so check for inclusion rather than exact match + const isExcluded = EXCLUDED_REDIRECT_URLS.find((url) => this.router.currentURL?.includes(url)); + if ( + // only want to redirect if we're going to authenticate + targetRoute === AUTH && + transition.targetName !== CLUSTER_INDEX && + !isExcluded + ) { + return this.transitionTo(targetRoute, { queryParams: { redirect_to: this.router.currentURL } }); + } + return this.transitionTo(targetRoute); + } + + return RSVP.resolve(); + }, + + beforeModel(transition) { + return this.transitionToTargetRoute(transition); + }, + + clusterModel() { + return this.modelFor(CLUSTER) || this.store.peekRecord('cluster', 'vault'); + }, + + authToken() { + return this.auth.currentToken; + }, + + hasKeyData() { + /* eslint-disable-next-line ember/no-controller-access-in-routes */ + return !!this.controllerFor(INIT).keyData; + }, + + targetRouteName(transition) { + const cluster = this.clusterModel(); + const isAuthed = this.authToken(); + if (cluster.needsInit) { + return INIT; + } + if (this.hasKeyData() && this.routeName !== UNSEAL && this.routeName !== AUTH) { + return INIT; + } + if (cluster.sealed) { + return UNSEAL; + } + if (cluster?.dr?.isSecondary) { + if (transition && transition.targetName === DR_REPLICATION_SECONDARY_DETAILS) { + return DR_REPLICATION_SECONDARY_DETAILS; + } + if (this.router.currentRouteName === DR_REPLICATION_SECONDARY_DETAILS) { + return DR_REPLICATION_SECONDARY_DETAILS; + } + + return DR_REPLICATION_SECONDARY; + } + if (!isAuthed) { + if ((transition && transition.targetName === OIDC_PROVIDER) || this.routeName === OIDC_PROVIDER) { + return OIDC_PROVIDER; + } + if ((transition && transition.targetName === NS_OIDC_PROVIDER) || this.routeName === NS_OIDC_PROVIDER) { + return NS_OIDC_PROVIDER; + } + if ((transition && transition.targetName === OIDC_CALLBACK) || this.routeName === OIDC_CALLBACK) { + return OIDC_CALLBACK; + } + return AUTH; + } + if ( + (!cluster.needsInit && this.routeName === INIT) || + (!cluster.sealed && this.routeName === UNSEAL) || + (!cluster?.dr?.isSecondary && this.routeName === DR_REPLICATION_SECONDARY) + ) { + return CLUSTER; + } + if (isAuthed && this.routeName === AUTH) { + // if you're already authed and you wanna go to auth, you probably want to redirect + return REDIRECT; + } + return null; + }, +}); diff --git a/ui/app/mixins/focus-on-insert.js b/ui/app/mixins/focus-on-insert.js new file mode 100644 index 0000000..08b2716 --- /dev/null +++ b/ui/app/mixins/focus-on-insert.js @@ -0,0 +1,35 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { schedule } from '@ember/runloop'; +import { on } from '@ember/object/evented'; +import Mixin from '@ember/object/mixin'; + +export default Mixin.create({ + // selector passed to `this.$()` to find the element to focus + // defaults to `'input'` + focusOnInsertSelector: null, + shouldFocus: true, + + // uses Ember.on so that we don't have to worry about calling _super if + // didInsertElement is overridden + focusOnInsert: on('didInsertElement', function () { + schedule('afterRender', this, 'focusOnInsertFocus'); + }), + + focusOnInsertFocus() { + if (this.shouldFocus === false) { + return; + } + this.forceFocus(); + }, + + forceFocus() { + var $targ = this.element.querySelectorAll(this.focusOnInsertSelector || 'input[type="text"]')[0]; + if ($targ && $targ !== document.activeElement) { + $targ.focus(); + } + }, +}); diff --git a/ui/app/mixins/key-mixin.js b/ui/app/mixins/key-mixin.js new file mode 100644 index 0000000..f9d9aaf --- /dev/null +++ b/ui/app/mixins/key-mixin.js @@ -0,0 +1,59 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { computed } from '@ember/object'; +import Mixin from '@ember/object/mixin'; +import utils from 'vault/lib/key-utils'; + +export default Mixin.create({ + // what attribute has the path for the key + // will.be 'path' for v2 or 'id' v1 + pathAttr: 'path', + flags: null, + + initialParentKey: null, + + isCreating: computed('initialParentKey', function () { + return this.initialParentKey != null; + }), + + pathVal() { + return this[this.pathAttr] || this.id; + }, + + // rather than using defineProperty for all of these, + // we're just going to hardcode the known keys for the path ('id' and 'path') + isFolder: computed('id', 'path', function () { + return utils.keyIsFolder(this.pathVal()); + }), + + keyParts: computed('id', 'path', function () { + return utils.keyPartsForKey(this.pathVal()); + }), + + parentKey: computed('id', 'path', 'isCreating', { + get: function () { + return this.isCreating ? this.initialParentKey : utils.parentKeyForKey(this.pathVal()); + }, + set: function (_, value) { + return value; + }, + }), + + keyWithoutParent: computed('id', 'path', 'parentKey', { + get: function () { + var key = this.pathVal(); + return key ? key.replace(this.parentKey, '') : null; + }, + set: function (_, value) { + if (value && value.trim()) { + this.set(this.pathAttr, this.parentKey + value); + } else { + this.set(this.pathAttr, null); + } + return value; + }, + }), +}); diff --git a/ui/app/mixins/model-boundary-route.js b/ui/app/mixins/model-boundary-route.js new file mode 100644 index 0000000..3d639d7 --- /dev/null +++ b/ui/app/mixins/model-boundary-route.js @@ -0,0 +1,68 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +// meant for use mixed-in to a Route file +// +// When a route is deactivated, this mixin clears the Ember Data store of +// models of type specified by the required param `modelType`. +// +// example: +// Using this as with a modelType of `datacenter` on the infrastructure +// route will cause all `datacenter` models to get unloaded when the +// infrastructure route is navigated away from. + +import Route from '@ember/routing/route'; + +import { isPresent } from '@ember/utils'; +import { warn } from '@ember/debug'; +import { on } from '@ember/object/evented'; +import Mixin from '@ember/object/mixin'; + +export default Mixin.create({ + modelType: null, + modelTypes: null, + + verifyProps: on('init', function () { + var modelType = this.modelType; + var modelTypes = this.modelTypes; + warn( + 'No `modelType` or `modelTypes` specified for `' + + this.toString() + + '`. Check to make sure you still need to use the `model-boundary-route` mixin.', + isPresent(modelType) || isPresent(modelTypes), + { id: 'model-boundary-init' } + ); + + warn( + 'Expected `model-boundary-route` to be used on an Ember.Route, not `' + this.toString() + '`.', + this instanceof Route, + { id: 'mode-boundary-is-route' } + ); + }), + + clearModelCache: on('deactivate', function () { + var modelType = this.modelType; + var modelTypes = this.modelTypes; + + if (!modelType && !modelTypes) { + warn( + 'Attempted to clear store clear store cache when leaving `' + + this.routeName + + '`, but no `modelType` or `modelTypes` was specified.', + isPresent(modelType), + { id: 'model-boundary-clear' } + ); + return; + } + if (modelType) { + this.store.unloadAll(modelType); + } + if (modelTypes) { + modelTypes.forEach((type) => { + this.store.unloadAll(type); + }); + } + }), +}); diff --git a/ui/app/mixins/unload-model-route.js b/ui/app/mixins/unload-model-route.js new file mode 100644 index 0000000..62375a4 --- /dev/null +++ b/ui/app/mixins/unload-model-route.js @@ -0,0 +1,33 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Mixin from '@ember/object/mixin'; +import removeRecord from 'vault/utils/remove-record'; + +// removes Ember Data records from the cache when the model +// changes or you move away from the current route +export default Mixin.create({ + modelPath: 'model', + unloadModel() { + const { modelPath } = this; + /* eslint-disable-next-line ember/no-controller-access-in-routes */ + const model = this.controller.get(modelPath); + // error is thrown when you attempt to unload a record that is inFlight (isSaving) + if (!model || !model.unloadRecord || model.isSaving) { + return; + } + removeRecord(this.store, model); + model.destroy(); + // it's important to unset the model on the controller since controllers are singletons + this.controller.set(modelPath, null); + }, + + actions: { + willTransition() { + this.unloadModel(); + return true; + }, + }, +}); diff --git a/ui/app/mixins/unsaved-model-route.js b/ui/app/mixins/unsaved-model-route.js new file mode 100644 index 0000000..717504d --- /dev/null +++ b/ui/app/mixins/unsaved-model-route.js @@ -0,0 +1,32 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Mixin from '@ember/object/mixin'; + +// this mixin relies on `unload-model-route` also being used +export default Mixin.create({ + actions: { + willTransition(transition) { + const model = this.controller.get('model'); + if (!model) { + return true; + } + if (model.hasDirtyAttributes) { + if ( + window.confirm( + 'You have unsaved changes. Navigating away will discard these changes. Are you sure you want to discard your changes?' + ) + ) { + this.unloadModel(); + return true; + } else { + transition.abort(); + return false; + } + } + return true; + }, + }, +}); diff --git a/ui/app/mixins/with-nav-to-nearest-ancestor.js b/ui/app/mixins/with-nav-to-nearest-ancestor.js new file mode 100644 index 0000000..b7cc39a --- /dev/null +++ b/ui/app/mixins/with-nav-to-nearest-ancestor.js @@ -0,0 +1,45 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Mixin from '@ember/object/mixin'; +import utils from 'vault/lib/key-utils'; +import { task } from 'ember-concurrency'; + +// This mixin is currently used in a controller and a component, but we +// don't see cancellation of the task as the while loop runs in either + +// Controller in Ember are singletons so there's no cancellation there +// during the loop. For components, it might be expected that the task would +// be cancelled when we transitioned to a new route and a rerender occured, but this is not +// the case since we are catching the error. Since Ember's route transitions are lazy +// and we're catching any 404s, the loop continues until the transtion succeeds, or exhausts +// the ancestors array and transitions to the root +export default Mixin.create({ + navToNearestAncestor: task(function* (key) { + const ancestors = utils.ancestorKeysForKey(key); + let errored = false; + let nearest = ancestors.pop(); + while (nearest) { + try { + const transition = this.transitionToRoute('vault.cluster.secrets.backend.list', nearest); + transition.data.isDeletion = true; + yield transition.promise; + } catch (e) { + // in the route error event handler, we're only throwing when it's a 404, + // other errors will be in the route and will not be caught, so the task will complete + errored = true; + nearest = ancestors.pop(); + } finally { + if (!errored) { + nearest = null; + // eslint-disable-next-line + return; + } + errored = false; + } + } + yield this.transitionToRoute('vault.cluster.secrets.backend.list-root'); + }), +}); diff --git a/ui/app/models/.gitkeep b/ui/app/models/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/ui/app/models/auth-config.js b/ui/app/models/auth-config.js new file mode 100644 index 0000000..2733307 --- /dev/null +++ b/ui/app/models/auth-config.js @@ -0,0 +1,13 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { belongsTo } from '@ember-data/model'; + +export default Model.extend({ + backend: belongsTo('auth-method', { inverse: 'authConfigs', readOnly: true, async: false }), + getHelpUrl: function (backend) { + return `/v1/auth/${backend}/config?help=1`; + }, +}); diff --git a/ui/app/models/auth-config/approle.js b/ui/app/models/auth-config/approle.js new file mode 100644 index 0000000..099e01a --- /dev/null +++ b/ui/app/models/auth-config/approle.js @@ -0,0 +1,7 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import AuthConfig from '../auth-config'; +export default AuthConfig.extend({}); diff --git a/ui/app/models/auth-config/aws/client.js b/ui/app/models/auth-config/aws/client.js new file mode 100644 index 0000000..6a94c5c --- /dev/null +++ b/ui/app/models/auth-config/aws/client.js @@ -0,0 +1,35 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { attr } from '@ember-data/model'; +import { computed } from '@ember/object'; +import AuthConfig from '../../auth-config'; +import fieldToAttrs from 'vault/utils/field-to-attrs'; + +export default AuthConfig.extend({ + secretKey: attr('string'), + accessKey: attr('string'), + endpoint: attr('string', { + label: 'EC2 Endpoint', + }), + iamEndpoint: attr('string', { + label: 'IAM Endpoint', + }), + stsEndpoint: attr('string', { + label: 'STS Endpoint', + }), + iamServerIdHeaderValue: attr('string', { + label: 'IAM Server ID Header Value', + }), + + fieldGroups: computed(function () { + const groups = [ + { default: ['accessKey', 'secretKey'] }, + { 'AWS Options': ['endpoint', 'iamEndpoint', 'stsEndpoint', 'iamServerIdHeaderValue'] }, + ]; + + return fieldToAttrs(this, groups); + }), +}); diff --git a/ui/app/models/auth-config/aws/identity-accesslist.js b/ui/app/models/auth-config/aws/identity-accesslist.js new file mode 100644 index 0000000..687b730 --- /dev/null +++ b/ui/app/models/auth-config/aws/identity-accesslist.js @@ -0,0 +1,7 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Tidy from './tidy'; +export default Tidy.extend(); diff --git a/ui/app/models/auth-config/aws/roletag-denylist.js b/ui/app/models/auth-config/aws/roletag-denylist.js new file mode 100644 index 0000000..687b730 --- /dev/null +++ b/ui/app/models/auth-config/aws/roletag-denylist.js @@ -0,0 +1,7 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Tidy from './tidy'; +export default Tidy.extend(); diff --git a/ui/app/models/auth-config/aws/tidy.js b/ui/app/models/auth-config/aws/tidy.js new file mode 100644 index 0000000..e6a8377 --- /dev/null +++ b/ui/app/models/auth-config/aws/tidy.js @@ -0,0 +1,24 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { attr } from '@ember-data/model'; +import { computed } from '@ember/object'; +import { expandAttributeMeta } from 'vault/utils/field-to-attrs'; +import AuthConfig from '../../auth-config'; + +export default AuthConfig.extend({ + safetyBuffer: attr({ + defaultValue: '72h', + editType: 'ttl', + }), + + disablePeriodicTidy: attr('boolean', { + defaultValue: false, + }), + + attrs: computed(function () { + return expandAttributeMeta(this, ['safetyBuffer', 'disablePeriodicTidy']); + }), +}); diff --git a/ui/app/models/auth-config/azure.js b/ui/app/models/auth-config/azure.js new file mode 100644 index 0000000..8c2633b --- /dev/null +++ b/ui/app/models/auth-config/azure.js @@ -0,0 +1,45 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { attr } from '@ember-data/model'; +import { computed } from '@ember/object'; +import AuthConfig from '../auth-config'; +import { combineFieldGroups } from 'vault/utils/openapi-to-attrs'; +import fieldToAttrs from 'vault/utils/field-to-attrs'; + +export default AuthConfig.extend({ + useOpenAPI: true, + tenantId: attr('string', { + label: 'Tenant ID', + helpText: 'The tenant ID for the Azure Active Directory organization', + }), + resource: attr('string', { + helpText: 'The configured URL for the application registered in Azure Active Directory', + }), + clientId: attr('string', { + label: 'Client ID', + helpText: + 'The client ID for credentials to query the Azure APIs. Currently read permissions to query compute resources are required.', + }), + clientSecret: attr('string', { + helpText: 'The client secret for credentials to query the Azure APIs', + }), + + googleCertsEndpoint: attr('string'), + + fieldGroups: computed('newFields', function () { + let groups = [ + { default: ['tenantId', 'resource'] }, + { + 'Azure Options': ['clientId', 'clientSecret'], + }, + ]; + if (this.newFields) { + groups = combineFieldGroups(groups, this.newFields, []); + } + + return fieldToAttrs(this, groups); + }), +}); diff --git a/ui/app/models/auth-config/cert.js b/ui/app/models/auth-config/cert.js new file mode 100644 index 0000000..099e01a --- /dev/null +++ b/ui/app/models/auth-config/cert.js @@ -0,0 +1,7 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import AuthConfig from '../auth-config'; +export default AuthConfig.extend({}); diff --git a/ui/app/models/auth-config/gcp.js b/ui/app/models/auth-config/gcp.js new file mode 100644 index 0000000..3b87616 --- /dev/null +++ b/ui/app/models/auth-config/gcp.js @@ -0,0 +1,33 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { attr } from '@ember-data/model'; +import { computed } from '@ember/object'; +import AuthConfig from '../auth-config'; +import { combineFieldGroups } from 'vault/utils/openapi-to-attrs'; +import fieldToAttrs from 'vault/utils/field-to-attrs'; + +export default AuthConfig.extend({ + useOpenAPI: true, + // We have to leave this here because the backend doesn't support the file type yet. + credentials: attr('string', { + editType: 'file', + }), + + googleCertsEndpoint: attr('string'), + + fieldGroups: computed('newFields', function () { + let groups = [ + { default: ['credentials'] }, + { + 'Google Cloud Options': ['googleCertsEndpoint'], + }, + ]; + if (this.newFields) { + groups = combineFieldGroups(groups, this.newFields, []); + } + return fieldToAttrs(this, groups); + }), +}); diff --git a/ui/app/models/auth-config/github.js b/ui/app/models/auth-config/github.js new file mode 100644 index 0000000..76d8b9f --- /dev/null +++ b/ui/app/models/auth-config/github.js @@ -0,0 +1,32 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { attr } from '@ember-data/model'; +import { computed } from '@ember/object'; +import AuthConfig from '../auth-config'; +import fieldToAttrs from 'vault/utils/field-to-attrs'; +import { combineFieldGroups } from 'vault/utils/openapi-to-attrs'; + +export default AuthConfig.extend({ + useOpenAPI: true, + organization: attr('string'), + baseUrl: attr('string', { + label: 'Base URL', + }), + + fieldGroups: computed('newFields', function () { + let groups = [ + { default: ['organization'] }, + { + 'GitHub Options': ['baseUrl'], + }, + ]; + if (this.newFields) { + groups = combineFieldGroups(groups, this.newFields, []); + } + + return fieldToAttrs(this, groups); + }), +}); diff --git a/ui/app/models/auth-config/jwt.js b/ui/app/models/auth-config/jwt.js new file mode 100644 index 0000000..e1e0db5 --- /dev/null +++ b/ui/app/models/auth-config/jwt.js @@ -0,0 +1,67 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { attr } from '@ember-data/model'; +import { computed } from '@ember/object'; +import AuthConfig from '../auth-config'; +import fieldToAttrs from 'vault/utils/field-to-attrs'; +import { combineFieldGroups } from 'vault/utils/openapi-to-attrs'; + +export default AuthConfig.extend({ + useOpenAPI: true, + oidcDiscoveryUrl: attr('string', { + label: 'OIDC discovery URL', + helpText: + 'The OIDC discovery URL, without any .well-known component (base path). Cannot be used with jwt_validation_pubkeys', + }), + + oidcClientId: attr('string', { + label: 'OIDC client ID', + }), + + oidcClientSecret: attr('string', { + label: 'OIDC client secret', + }), + oidcDiscoveryCaPem: attr('string', { + label: 'OIDC discovery CA PEM', + editType: 'file', + helpText: + 'The CA certificate or chain of certificates, in PEM format, to use to validate connections to the OIDC Discovery URL. If not set, system certificates are used', + }), + jwtValidationPubkeys: attr({ + label: 'JWT validation public keys', + editType: 'stringArray', + }), + + jwtSupportedAlgs: attr({ + label: 'JWT supported algorithms', + }), + boundIssuer: attr('string', { + helpText: 'The value against which to match the iss claim in a JWT', + }), + fieldGroups: computed('constructor.modelName', 'newFields', function () { + const type = this.constructor.modelName.split('/')[1].toUpperCase(); + let groups = [ + { + default: ['oidcDiscoveryUrl', 'defaultRole'], + }, + { + [`${type} Options`]: [ + 'oidcClientId', + 'oidcClientSecret', + 'oidcDiscoveryCaPem', + 'jwtValidationPubkeys', + 'jwtSupportedAlgs', + 'boundIssuer', + ], + }, + ]; + + if (this.newFields) { + groups = combineFieldGroups(groups, this.newFields, []); + } + return fieldToAttrs(this, groups); + }), +}); diff --git a/ui/app/models/auth-config/kubernetes.js b/ui/app/models/auth-config/kubernetes.js new file mode 100644 index 0000000..9a1ccc0 --- /dev/null +++ b/ui/app/models/auth-config/kubernetes.js @@ -0,0 +1,49 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { attr } from '@ember-data/model'; +import { computed } from '@ember/object'; + +import AuthConfig from '../auth-config'; +import { combineFieldGroups } from 'vault/utils/openapi-to-attrs'; +import fieldToAttrs from 'vault/utils/field-to-attrs'; + +export default AuthConfig.extend({ + useOpenAPI: true, + kubernetesHost: attr('string', { + helpText: + 'Host must be a host string, a host:port pair, or a URL to the base of the Kubernetes API server', + }), + + kubernetesCaCert: attr('string', { + editType: 'file', + helpText: 'PEM encoded CA cert for use by the TLS client used to talk with the Kubernetes API', + }), + + tokenReviewerJwt: attr('string', { + helpText: + 'A service account JWT used to access the TokenReview API to validate other JWTs during login. If not set the JWT used for login will be used to access the API', + }), + + pemKeys: attr({ + editType: 'stringArray', + }), + + fieldGroups: computed('newFields', function () { + let groups = [ + { + default: ['kubernetesHost', 'kubernetesCaCert'], + }, + { + 'Kubernetes Options': ['tokenReviewerJwt', 'pemKeys'], + }, + ]; + if (this.newFields) { + groups = combineFieldGroups(groups, this.newFields, []); + } + + return fieldToAttrs(this, groups); + }), +}); diff --git a/ui/app/models/auth-config/ldap.js b/ui/app/models/auth-config/ldap.js new file mode 100644 index 0000000..a5fb4ca --- /dev/null +++ b/ui/app/models/auth-config/ldap.js @@ -0,0 +1,52 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { attr } from '@ember-data/model'; +import { computed } from '@ember/object'; + +import AuthConfig from '../auth-config'; +import fieldToAttrs from 'vault/utils/field-to-attrs'; +import { combineFieldGroups } from 'vault/utils/openapi-to-attrs'; + +export default AuthConfig.extend({ + useOpenAPI: true, + certificate: attr({ + label: 'Certificate', + editType: 'file', + }), + fieldGroups: computed('newFields', function () { + let groups = [ + { + default: ['url'], + }, + { + 'LDAP Options': [ + 'starttls', + 'insecureTls', + 'discoverdn', + 'denyNullBind', + 'tlsMinVersion', + 'tlsMaxVersion', + 'certificate', + 'clientTlsCert', + 'clientTlsKey', + 'userattr', + 'upndomain', + 'anonymousGroupSearch', + ], + }, + { + 'Customize User Search': ['binddn', 'userdn', 'bindpass', 'userfilter'], + }, + { + 'Customize Group Membership Search': ['groupfilter', 'groupattr', 'groupdn', 'useTokenGroups'], + }, + ]; + if (this.newFields) { + groups = combineFieldGroups(groups, this.newFields, []); + } + return fieldToAttrs(this, groups); + }), +}); diff --git a/ui/app/models/auth-config/oidc.js b/ui/app/models/auth-config/oidc.js new file mode 100644 index 0000000..0cc4da7 --- /dev/null +++ b/ui/app/models/auth-config/oidc.js @@ -0,0 +1,6 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +export { default } from './jwt'; diff --git a/ui/app/models/auth-config/okta.js b/ui/app/models/auth-config/okta.js new file mode 100644 index 0000000..44aa8c8 --- /dev/null +++ b/ui/app/models/auth-config/okta.js @@ -0,0 +1,46 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { attr } from '@ember-data/model'; +import { computed } from '@ember/object'; +import AuthConfig from '../auth-config'; +import fieldToAttrs from 'vault/utils/field-to-attrs'; +import { combineFieldGroups } from 'vault/utils/openapi-to-attrs'; + +export default AuthConfig.extend({ + useOpenAPI: true, + orgName: attr('string', { + helpText: 'Name of the organization to be used in the Okta API', + }), + apiToken: attr('string', { + helpText: + 'Okta API token. This is required to query Okta for user group membership. If this is not supplied only locally configured groups will be enabled.', + }), + baseUrl: attr('string', { + helpText: + 'If set, will be used as the base domain for API requests. Examples are okta.com, oktapreview.com, and okta-emea.com', + }), + bypassOktaMfa: attr('boolean', { + defaultValue: false, + helpText: + "Useful if using Vault's built-in MFA mechanisms. Will also cause certain other statuses to be ignored, such as PASSWORD_EXPIRED", + }), + + fieldGroups: computed('newFields', function () { + let groups = [ + { + default: ['orgName'], + }, + { + Options: ['apiToken', 'baseUrl', 'bypassOktaMfa'], + }, + ]; + if (this.newFields) { + groups = combineFieldGroups(groups, this.newFields, []); + } + + return fieldToAttrs(this, groups); + }), +}); diff --git a/ui/app/models/auth-config/radius.js b/ui/app/models/auth-config/radius.js new file mode 100644 index 0000000..96752f1 --- /dev/null +++ b/ui/app/models/auth-config/radius.js @@ -0,0 +1,32 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { attr } from '@ember-data/model'; +import { computed } from '@ember/object'; +import AuthConfig from '../auth-config'; +import { combineFieldGroups } from 'vault/utils/openapi-to-attrs'; +import fieldToAttrs from 'vault/utils/field-to-attrs'; + +export default AuthConfig.extend({ + useOpenAPI: true, + host: attr('string'), + secret: attr('string'), + + fieldGroups: computed('newFields', function () { + let groups = [ + { + default: ['host', 'secret'], + }, + { + 'RADIUS Options': ['port', 'nasPort', 'nasIdentifier', 'dialTimeout', 'unregisteredUserPolicies'], + }, + ]; + if (this.newFields) { + groups = combineFieldGroups(groups, this.newFields, []); + } + + return fieldToAttrs(this, groups); + }), +}); diff --git a/ui/app/models/auth-config/userpass.js b/ui/app/models/auth-config/userpass.js new file mode 100644 index 0000000..099e01a --- /dev/null +++ b/ui/app/models/auth-config/userpass.js @@ -0,0 +1,7 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import AuthConfig from '../auth-config'; +export default AuthConfig.extend({}); diff --git a/ui/app/models/auth-method.js b/ui/app/models/auth-method.js new file mode 100644 index 0000000..9cc3654 --- /dev/null +++ b/ui/app/models/auth-method.js @@ -0,0 +1,135 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { belongsTo, hasMany, attr } from '@ember-data/model'; +import { alias } from '@ember/object/computed'; // eslint-disable-line +import { computed } from '@ember/object'; // eslint-disable-line +import { inject as service } from '@ember/service'; +import fieldToAttrs, { expandAttributeMeta } from 'vault/utils/field-to-attrs'; +import apiPath from 'vault/utils/api-path'; +import attachCapabilities from 'vault/lib/attach-capabilities'; +import { withModelValidations } from 'vault/decorators/model-validations'; + +const validations = { + path: [ + { type: 'presence', message: "Path can't be blank." }, + { + type: 'containsWhiteSpace', + message: + "Path contains whitespace. If this is desired, you'll need to encode it with %20 in API requests.", + level: 'warn', + }, + ], +}; + +// unsure if ember-api-actions will work on native JS class model +// for now create class to use validations and then use classic extend pattern +@withModelValidations(validations) +class AuthMethodModel extends Model {} +const ModelExport = AuthMethodModel.extend({ + store: service(), + + config: belongsTo('mount-config', { async: false, inverse: null }), // one-to-none that replaces former fragment + authConfigs: hasMany('auth-config', { polymorphic: true, inverse: 'backend', async: false }), + path: attr('string'), + accessor: attr('string'), + name: attr('string'), + type: attr('string'), + // namespaces introduced types with a `ns_` prefix for built-in engines + // so we need to strip that to normalize the type + methodType: computed('type', function () { + return this.type.replace(/^ns_/, ''); + }), + description: attr('string', { + editType: 'textarea', + }), + local: attr('boolean', { + helpText: + 'When Replication is enabled, a local mount will not be replicated across clusters. This can only be specified at mount time.', + }), + sealWrap: attr('boolean', { + helpText: + 'When enabled - if a seal supporting seal wrapping is specified in the configuration, all critical security parameters (CSPs) in this backend will be seal wrapped. (For K/V mounts, all values will be seal wrapped.) This can only be specified at mount time.', + }), + + // used when the `auth` prefix is important, + // currently only when setting perf mount filtering + apiPath: computed('path', function () { + return `auth/${this.path}`; + }), + localDisplay: computed('local', function () { + return this.local ? 'local' : 'replicated'; + }), + + tuneAttrs: computed('path', function () { + const { methodType } = this; + let tuneAttrs; + // token_type should not be tuneable for the token auth method + if (methodType === 'token') { + tuneAttrs = [ + 'description', + 'config.{listingVisibility,defaultLeaseTtl,maxLeaseTtl,auditNonHmacRequestKeys,auditNonHmacResponseKeys,passthroughRequestHeaders}', + ]; + } else { + tuneAttrs = [ + 'description', + 'config.{listingVisibility,defaultLeaseTtl,maxLeaseTtl,tokenType,auditNonHmacRequestKeys,auditNonHmacResponseKeys,passthroughRequestHeaders}', + ]; + } + return expandAttributeMeta(this, tuneAttrs); + }), + + formFields: computed(function () { + return [ + 'type', + 'path', + 'description', + 'accessor', + 'local', + 'sealWrap', + 'config.{listingVisibility,defaultLeaseTtl,maxLeaseTtl,tokenType,auditNonHmacRequestKeys,auditNonHmacResponseKeys,passthroughRequestHeaders}', + ]; + }), + + formFieldGroups: computed(function () { + return [ + { default: ['path'] }, + { + 'Method Options': [ + 'description', + 'config.listingVisibility', + 'local', + 'sealWrap', + 'config.{defaultLeaseTtl,maxLeaseTtl,tokenType,auditNonHmacRequestKeys,auditNonHmacResponseKeys,passthroughRequestHeaders}', + ], + }, + ]; + }), + + attrs: computed('formFields', function () { + return expandAttributeMeta(this, this.formFields); + }), + + fieldGroups: computed('formFieldGroups', function () { + return fieldToAttrs(this, this.formFieldGroups); + }), + canDisable: alias('deletePath.canDelete'), + canEdit: alias('configPath.canUpdate'), + + tune(data) { + return this.store.adapterFor('auth-method').tune(this.path, data); + }, +}); + +export default attachCapabilities(ModelExport, { + deletePath: apiPath`sys/auth/${'id'}`, + configPath: function (context) { + if (context.type === 'aws') { + return apiPath`auth/${'id'}/config/client`.call(this, context); + } else { + return apiPath`auth/${'id'}/config`.call(this, context); + } + }, +}); diff --git a/ui/app/models/aws-credential.js b/ui/app/models/aws-credential.js new file mode 100644 index 0000000..1c8051f --- /dev/null +++ b/ui/app/models/aws-credential.js @@ -0,0 +1,88 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import { computed } from '@ember/object'; +import { expandAttributeMeta } from 'vault/utils/field-to-attrs'; +const CREDENTIAL_TYPES = [ + { + value: 'iam_user', + displayName: 'IAM User', + }, + { + value: 'assumed_role', + displayName: 'Assumed Role', + }, + { + value: 'federation_token', + displayName: 'Federation Token', + }, +]; + +const DISPLAY_FIELDS = ['accessKey', 'secretKey', 'securityToken', 'leaseId', 'renewable', 'leaseDuration']; +export default Model.extend({ + helpText: + 'For Vault roles of credential type iam_user, there are no inputs, just submit the form. Choose a type to change the input options.', + role: attr('object', { + readOnly: true, + }), + + credentialType: attr('string', { + defaultValue: 'iam_user', + possibleValues: CREDENTIAL_TYPES, + readOnly: true, + }), + + roleArn: attr('string', { + label: 'Role ARN', + helpText: + 'The ARN of the role to assume if credential_type on the Vault role is assumed_role. Optional if the role has a single role ARN; required otherwise.', + }), + + ttl: attr({ + editType: 'ttl', + defaultValue: '3600s', + setDefault: true, + label: 'TTL', + helpText: + 'Specifies the TTL for the use of the STS token. Valid only when credential_type is assumed_role or federation_token.', + }), + leaseId: attr('string'), + renewable: attr('boolean'), + leaseDuration: attr('number'), + accessKey: attr('string'), + secretKey: attr('string'), + securityToken: attr('string'), + + attrs: computed('credentialType', 'accessKey', 'securityToken', function () { + const type = this.credentialType; + const fieldsForType = { + iam_user: ['credentialType'], + assumed_role: ['credentialType', 'ttl', 'roleArn'], + federation_token: ['credentialType', 'ttl'], + }; + if (this.accessKey || this.securityToken) { + return expandAttributeMeta(this, DISPLAY_FIELDS.slice(0)); + } + return expandAttributeMeta(this, fieldsForType[type].slice(0)); + }), + + toCreds: computed('accessKey', 'secretKey', 'securityToken', 'leaseId', function () { + const props = { + accessKey: this.accessKey, + secretKey: this.secretKey, + securityToken: this.securityToken, + leaseId: this.leaseId, + }; + const propsWithVals = Object.keys(props).reduce((ret, prop) => { + if (props[prop]) { + ret[prop] = props[prop]; + return ret; + } + return ret; + }, {}); + return JSON.stringify(propsWithVals, null, 2); + }), +}); diff --git a/ui/app/models/capabilities.js b/ui/app/models/capabilities.js new file mode 100644 index 0000000..cdd7b34 --- /dev/null +++ b/ui/app/models/capabilities.js @@ -0,0 +1,58 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +// This model represents the capabilities on a given `path` +// `path` is also the primaryId +// https://www.vaultproject.io/docs/concepts/policies.html#capabilities + +import Model, { attr } from '@ember-data/model'; + +import { computed } from '@ember/object'; + +const SUDO_PATHS = [ + 'sys/seal', + 'sys/replication/performance/primary/secondary-token', + 'sys/replication/dr/primary/secondary-token', + 'sys/replication/reindex', + 'sys/leases/lookup/', +]; + +const SUDO_PATH_PREFIXES = ['sys/leases/revoke-prefix', 'sys/leases/revoke-force']; + +export { SUDO_PATHS, SUDO_PATH_PREFIXES }; + +const computedCapability = function (capability) { + return computed('path', 'capabilities', 'capabilities.[]', function () { + const capabilities = this.capabilities; + const path = this.path; + if (!capabilities) { + return false; + } + if (capabilities.includes('root')) { + return true; + } + if (capabilities.includes('deny')) { + return false; + } + // if the path is sudo protected, they'll need sudo + the appropriate capability + if (SUDO_PATHS.includes(path) || SUDO_PATH_PREFIXES.find((item) => path.startsWith(item))) { + return capabilities.includes('sudo') && capabilities.includes(capability); + } + return capabilities.includes(capability); + }); +}; + +export default Model.extend({ + path: attr('string'), + capabilities: attr('array'), + canSudo: computedCapability('sudo'), + canRead: computedCapability('read'), + canCreate: computedCapability('create'), + canUpdate: computedCapability('update'), + canDelete: computedCapability('delete'), + canList: computedCapability('list'), + allowedParameters: attr(), + deniedParameters: attr(), +}); diff --git a/ui/app/models/clients/activity.js b/ui/app/models/clients/activity.js new file mode 100644 index 0000000..4f80ef4 --- /dev/null +++ b/ui/app/models/clients/activity.js @@ -0,0 +1,14 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +export default class Activity extends Model { + @attr('array') byMonth; + @attr('array') byNamespace; + @attr('object') total; + @attr('string') startTime; + @attr('string') endTime; + @attr('string') responseTimestamp; +} diff --git a/ui/app/models/clients/config.js b/ui/app/models/clients/config.js new file mode 100644 index 0000000..0be017a --- /dev/null +++ b/ui/app/models/clients/config.js @@ -0,0 +1,48 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import lazyCapabilities, { apiPath } from 'vault/macros/lazy-capabilities'; +import { withFormFields } from 'vault/decorators/model-form-fields'; +import { withModelValidations } from 'vault/decorators/model-validations'; + +const validations = { + retentionMonths: [ + { + validator: (model) => parseInt(model.retentionMonths) >= model.minimumRetentionMonths, + message: (model) => + `Retention period must be greater than or equal to ${model.minimumRetentionMonths}.`, + }, + ], +}; + +@withModelValidations(validations) +@withFormFields(['enabled', 'retentionMonths']) +export default class ClientsConfigModel extends Model { + @attr('boolean') queriesAvailable; // true only if historical data exists, will be false if there is only current month data + + @attr('number', { + label: 'Retention period', + subText: 'The number of months of activity logs to maintain for client tracking.', + }) + retentionMonths; + + @attr('number') minimumRetentionMonths; + + @attr('string') enabled; + + @attr('boolean') reportingEnabled; + + @attr('date') billingStartTimestamp; + + @lazyCapabilities(apiPath`sys/internal/counters/config`) configPath; + + get canRead() { + return this.configPath.get('canRead') !== false; + } + get canEdit() { + return this.configPath.get('canUpdate') !== false; + } +} diff --git a/ui/app/models/clients/version-history.js b/ui/app/models/clients/version-history.js new file mode 100644 index 0000000..fae00d2 --- /dev/null +++ b/ui/app/models/clients/version-history.js @@ -0,0 +1,11 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +export default class VersionHistoryModel extends Model { + @attr('string') version; + @attr('string') previousVersion; + @attr('string') timestampInstalled; +} diff --git a/ui/app/models/cluster.js b/ui/app/models/cluster.js new file mode 100644 index 0000000..0a8600a --- /dev/null +++ b/ui/app/models/cluster.js @@ -0,0 +1,79 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr, belongsTo, hasMany } from '@ember-data/model'; +import { inject as service } from '@ember/service'; +import { alias, and, equal, gte, not, or } from '@ember/object/computed'; +import { get, computed } from '@ember/object'; + +export default Model.extend({ + version: service(), + + nodes: hasMany('nodes', { async: false }), + name: attr('string'), + status: attr('string'), + standby: attr('boolean'), + type: attr('string'), + license: attr('object'), + + /* Licensing concerns */ + licenseExpiry: alias('license.expiry_time'), + licenseState: alias('license.state'), + + needsInit: computed('nodes', 'nodes.@each.initialized', function () { + // needs init if no nodes are initialized + return this.nodes.isEvery('initialized', false); + }), + + unsealed: computed('nodes', 'nodes.{[],@each.sealed}', function () { + // unsealed if there's at least one unsealed node + return !!this.nodes.findBy('sealed', false); + }), + + sealed: not('unsealed'), + + leaderNode: computed('nodes', 'nodes.[]', function () { + const nodes = this.nodes; + if (nodes.get('length') === 1) { + return nodes.get('firstObject'); + } else { + return nodes.findBy('isLeader'); + } + }), + + sealThreshold: alias('leaderNode.sealThreshold'), + sealProgress: alias('leaderNode.progress'), + sealType: alias('leaderNode.type'), + storageType: alias('leaderNode.storageType'), + hcpLinkStatus: alias('leaderNode.hcpLinkStatus'), + hasProgress: gte('sealProgress', 1), + usingRaft: equal('storageType', 'raft'), + + //replication mode - will only ever be 'unsupported' + //otherwise the particular mode will have the relevant mode attr through replication-attributes + mode: attr('string'), + allReplicationDisabled: and('{dr,performance}.replicationDisabled'), + anyReplicationEnabled: or('{dr,performance}.replicationEnabled'), + + dr: belongsTo('replication-attributes', { async: false, inverse: null }), + performance: belongsTo('replication-attributes', { async: false, inverse: null }), + // this service exposes what mode the UI is currently viewing + // replicationAttrs will then return the relevant `replication-attributes` model + rm: service('replication-mode'), + drMode: alias('dr.mode'), + replicationMode: alias('rm.mode'), + replicationModeForDisplay: computed('replicationMode', function () { + return this.replicationMode === 'dr' ? 'Disaster Recovery' : 'Performance'; + }), + replicationIsInitializing: computed('dr.mode', 'performance.mode', function () { + // a mode of null only happens when a cluster is being initialized + // otherwise the mode will be 'disabled', 'primary', 'secondary' + return !this.dr.mode || !this.performance.mode; + }), + replicationAttrs: computed('dr.mode', 'performance.mode', 'replicationMode', function () { + const replicationMode = this.replicationMode; + return replicationMode ? get(this, replicationMode) : null; + }), +}); diff --git a/ui/app/models/control-group-config.js b/ui/app/models/control-group-config.js new file mode 100644 index 0000000..3125726 --- /dev/null +++ b/ui/app/models/control-group-config.js @@ -0,0 +1,25 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import { alias } from '@ember/object/computed'; +import { computed } from '@ember/object'; +import lazyCapabilities, { apiPath } from 'vault/macros/lazy-capabilities'; + +import { expandAttributeMeta } from 'vault/utils/field-to-attrs'; + +export default Model.extend({ + fields: computed(function () { + return expandAttributeMeta(this, ['maxTtl']); + }), + + configurePath: lazyCapabilities(apiPath`sys/config/control-group`), + canDelete: alias('configurePath.canDelete'), + maxTtl: attr({ + defaultValue: 0, + editType: 'ttl', + label: 'Maximum TTL', + }), +}); diff --git a/ui/app/models/control-group.js b/ui/app/models/control-group.js new file mode 100644 index 0000000..f6d4efb --- /dev/null +++ b/ui/app/models/control-group.js @@ -0,0 +1,20 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { hasMany, belongsTo, attr } from '@ember-data/model'; +import { alias } from '@ember/object/computed'; +import lazyCapabilities, { apiPath } from 'vault/macros/lazy-capabilities'; + +export default Model.extend({ + approved: attr('boolean'), + requestPath: attr('string'), + requestEntity: belongsTo('identity/entity', { async: false }), + authorizations: hasMany('identity/entity', { async: false }), + + authorizePath: lazyCapabilities(apiPath`sys/control-group/authorize`), + canAuthorize: alias('authorizePath.canUpdate'), + configurePath: lazyCapabilities(apiPath`sys/config/control-group`), + canConfigure: alias('configurePath.canUpdate'), +}); diff --git a/ui/app/models/database/connection.js b/ui/app/models/database/connection.js new file mode 100644 index 0000000..a0ae34c --- /dev/null +++ b/ui/app/models/database/connection.js @@ -0,0 +1,207 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import { computed } from '@ember/object'; +import { alias, or } from '@ember/object/computed'; +import lazyCapabilities, { apiPath } from 'vault/macros/lazy-capabilities'; +import fieldToAttrs, { expandAttributeMeta } from 'vault/utils/field-to-attrs'; +import { AVAILABLE_PLUGIN_TYPES } from '../../utils/database-helpers'; + +/** + * fieldsToGroups helper fn + * @param {array} arr any subset of "fields" from AVAILABLE_PLUGIN_TYPES + * @param {*} key item by which to group the fields. If item has no group it will be under "default" + * @returns array of objects where the key is default or the name of the option group, and the value is an array of attr names + */ +const fieldsToGroups = function (arr, key = 'subgroup') { + const fieldGroups = []; + const byGroup = arr.reduce(function (rv, x) { + (rv[x[key]] = rv[x[key]] || []).push(x); + return rv; + }, {}); + Object.keys(byGroup).forEach((key) => { + const attrsArray = byGroup[key].map((obj) => obj.attr); + const group = key === 'undefined' ? 'default' : key; + fieldGroups.push({ [group]: attrsArray }); + }); + return fieldGroups; +}; + +export default Model.extend({ + backend: attr('string', { + readOnly: true, + }), + // required + name: attr('string', { + label: 'Connection name', + }), + plugin_name: attr('string', { + label: 'Database plugin', + possibleValues: AVAILABLE_PLUGIN_TYPES, + noDefault: true, + }), + + // standard + verify_connection: attr('boolean', { + label: 'Connection will be verified', + defaultValue: true, + }), + allowed_roles: attr('array', { + readOnly: true, + }), + password_policy: attr('string', { + label: 'Use custom password policy', + editType: 'optionalText', + subText: 'Specify the name of an existing password policy.', + defaultSubText: + 'Unless a custom policy is specified, Vault will use a default: 20 characters with at least 1 uppercase, 1 lowercase, 1 number, and 1 dash character.', + defaultShown: 'Default', + docLink: '/vault/docs/concepts/password-policies', + }), + + // common fields + connection_url: attr('string', { + label: 'Connection URL', + subText: + 'The connection string used to connect to the database. This allows for simple templating of username and password of the root user in the {{field_name}} format.', + }), + url: attr('string', { + label: 'URL', + subText: `The URL for Elasticsearch's API ("https://localhost:9200").`, + }), + username: attr('string', { + subText: `The name of the user to use as the "root" user when connecting to the database.`, + }), + password: attr('string', { + subText: 'The password to use when connecting with the above username.', + editType: 'password', + }), + + // optional + ca_cert: attr('string', { + label: 'CA certificate', + subText: `The path to a PEM-encoded CA cert file to use to verify the Elasticsearch server's identity.`, + }), + ca_path: attr('string', { + label: 'CA path', + subText: `The path to a directory of PEM-encoded CA cert files to use to verify the Elasticsearch server's identity.`, + }), + client_cert: attr('string', { + label: 'Client certificate', + subText: 'The path to the certificate for the Elasticsearch client to present for communication.', + }), + client_key: attr('string', { + subText: 'The path to the key for the Elasticsearch client to use for communication.', + }), + hosts: attr('string', {}), + host: attr('string', {}), + port: attr('string', {}), + write_concern: attr('string', { + subText: 'Optional. Must be in JSON. See our documentation for help.', + allowReset: true, + editType: 'json', + theme: 'hashi short', + defaultShown: 'Default', + }), + username_template: attr('string', { + editType: 'optionalText', + subText: 'Enter the custom username template to use.', + defaultSubText: + 'Template describing how dynamic usernames are generated. Vault will use the default for this plugin.', + docLink: '/vault/docs/concepts/username-templating', + defaultShown: 'Default', + }), + max_open_connections: attr('number', { + defaultValue: 4, + }), + max_idle_connections: attr('number', { + defaultValue: 0, + }), + max_connection_lifetime: attr('string', { + defaultValue: '0s', + }), + insecure: attr('boolean', { + label: 'Disable SSL verification', + defaultValue: false, + }), + tls: attr('string', { + label: 'TLS Certificate Key', + helpText: + 'x509 certificate for connecting to the database. This must be a PEM encoded version of the private key and the certificate combined.', + editType: 'file', + }), + tls_ca: attr('string', { + label: 'TLS CA', + helpText: + 'x509 CA file for validating the certificate presented by the MongoDB server. Must be PEM encoded.', + editType: 'file', + }), + tls_server_name: attr('string', { + label: 'TLS server name', + subText: 'If set, this name is used to set the SNI host when connecting via 1TLS.', + }), + root_rotation_statements: attr({ + subText: `The database statements to be executed to rotate the root user's credentials. If nothing is entered, Vault will use a reasonable default.`, + editType: 'stringArray', + defaultShown: 'Default', + }), + + isAvailablePlugin: computed('plugin_name', function () { + return !!AVAILABLE_PLUGIN_TYPES.find((a) => a.value === this.plugin_name); + }), + + showAttrs: computed('plugin_name', function () { + const fields = AVAILABLE_PLUGIN_TYPES.find((a) => a.value === this.plugin_name) + .fields.filter((f) => f.show !== false) + .map((f) => f.attr); + fields.push('allowed_roles'); + return expandAttributeMeta(this, fields); + }), + + fieldAttrs: computed('plugin_name', function () { + // for both create and edit fields + let fields = ['plugin_name', 'name', 'connection_url', 'verify_connection', 'password_policy']; + if (this.plugin_name) { + fields = AVAILABLE_PLUGIN_TYPES.find((a) => a.value === this.plugin_name) + .fields.filter((f) => !f.group) + .map((field) => field.attr); + } + return expandAttributeMeta(this, fields); + }), + + pluginFieldGroups: computed('plugin_name', function () { + if (!this.plugin_name) { + return null; + } + const pluginFields = AVAILABLE_PLUGIN_TYPES.find((a) => a.value === this.plugin_name).fields.filter( + (f) => f.group === 'pluginConfig' + ); + const groups = fieldsToGroups(pluginFields, 'subgroup'); + return fieldToAttrs(this, groups); + }), + + statementFields: computed('plugin_name', function () { + if (!this.plugin_name) { + return expandAttributeMeta(this, ['root_rotation_statements']); + } + const fields = AVAILABLE_PLUGIN_TYPES.find((a) => a.value === this.plugin_name) + .fields.filter((f) => f.group === 'statements') + .map((field) => field.attr); + return expandAttributeMeta(this, fields); + }), + + /* CAPABILITIES */ + editConnectionPath: lazyCapabilities(apiPath`${'backend'}/config/${'id'}`, 'backend', 'id'), + canEdit: alias('editConnectionPath.canUpdate'), + canDelete: alias('editConnectionPath.canDelete'), + resetConnectionPath: lazyCapabilities(apiPath`${'backend'}/reset/${'id'}`, 'backend', 'id'), + canReset: or('resetConnectionPath.canUpdate', 'resetConnectionPath.canCreate'), + rotateRootPath: lazyCapabilities(apiPath`${'backend'}/rotate-root/${'id'}`, 'backend', 'id'), + canRotateRoot: or('rotateRootPath.canUpdate', 'rotateRootPath.canCreate'), + rolePath: lazyCapabilities(apiPath`${'backend'}/role/*`, 'backend'), + staticRolePath: lazyCapabilities(apiPath`${'backend'}/static-role/*`, 'backend'), + canAddRole: or('rolePath.canCreate', 'staticRolePath.canCreate'), +}); diff --git a/ui/app/models/database/credential.js b/ui/app/models/database/credential.js new file mode 100644 index 0000000..110fa4e --- /dev/null +++ b/ui/app/models/database/credential.js @@ -0,0 +1,17 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; + +export default Model.extend({ + username: attr('string'), + password: attr('string'), + leaseId: attr('string'), + leaseDuration: attr('string'), + lastVaultRotation: attr('string'), + rotationPeriod: attr('number'), + ttl: attr('number'), + roleType: attr('string'), +}); diff --git a/ui/app/models/database/role.js b/ui/app/models/database/role.js new file mode 100644 index 0000000..3fd022a --- /dev/null +++ b/ui/app/models/database/role.js @@ -0,0 +1,143 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import { computed } from '@ember/object'; +import { alias } from '@ember/object/computed'; +import lazyCapabilities, { apiPath } from 'vault/macros/lazy-capabilities'; +import { expandAttributeMeta } from 'vault/utils/field-to-attrs'; +import { getRoleFields } from 'vault/utils/database-helpers'; + +export default Model.extend({ + idPrefix: 'role/', + backend: attr('string', { readOnly: true }), + name: attr('string', { + label: 'Role name', + }), + database: attr('array', { + label: 'Connection name', + editType: 'searchSelect', + fallbackComponent: 'string-list', + models: ['database/connection'], + selectLimit: 1, + onlyAllowExisting: true, + subText: 'The database connection for which credentials will be generated.', + }), + type: attr('string', { + label: 'Type of role', + noDefault: true, + possibleValues: ['static', 'dynamic'], + }), + default_ttl: attr({ + editType: 'ttl', + defaultValue: '1h', + label: 'Generated credentials’s Time-to-Live (TTL)', + helperTextDisabled: 'Vault will use a TTL of 1 hour.', + defaultShown: 'Engine default', + }), + max_ttl: attr({ + editType: 'ttl', + defaultValue: '24h', + label: 'Generated credentials’s maximum Time-to-Live (Max TTL)', + helperTextDisabled: 'Vault will use a TTL of 24 hours.', + defaultShown: 'Engine default', + }), + username: attr('string', { + subText: 'The database username that this Vault role corresponds to.', + }), + rotation_period: attr({ + editType: 'ttl', + defaultValue: '24h', + helperTextDisabled: + 'Specifies the amount of time Vault should wait before rotating the password. The minimum is 5 seconds. Default is 24 hours.', + helperTextEnabled: 'Vault will rotate password after', + }), + creation_statements: attr('array', { + editType: 'stringArray', + }), + revocation_statements: attr('array', { + editType: 'stringArray', + defaultShown: 'Default', + }), + rotation_statements: attr('array', { + editType: 'stringArray', + defaultShown: 'Default', + }), + rollback_statements: attr('array', { + editType: 'stringArray', + defaultShown: 'Default', + }), + renew_statements: attr('array', { + editType: 'stringArray', + defaultShown: 'Default', + }), + creation_statement: attr('string', { + editType: 'json', + allowReset: true, + theme: 'hashi short', + defaultShown: 'Default', + }), + revocation_statement: attr('string', { + editType: 'json', + allowReset: true, + theme: 'hashi short', + defaultShown: 'Default', + }), + + /* FIELD ATTRIBUTES */ + get fieldAttrs() { + // Main fields on edit/create form + const fields = ['name', 'database', 'type']; + return expandAttributeMeta(this, fields); + }, + + get showFields() { + let fields = ['name', 'database', 'type']; + fields = fields.concat(getRoleFields(this.type)).concat(['creation_statements']); + // elasticsearch does not support revocation statements: https://www.vaultproject.io/api-docs/secret/databases/elasticdb#parameters-1 + if (this.database[0] !== 'elasticsearch') { + fields = fields.concat(['revocation_statements']); + } + return expandAttributeMeta(this, fields); + }, + + roleSettingAttrs: computed(function () { + // logic for which get displayed is on DatabaseRoleSettingForm + const allRoleSettingFields = [ + 'default_ttl', + 'max_ttl', + 'username', + 'rotation_period', + 'creation_statements', + 'creation_statement', // for editType: JSON + 'revocation_statements', + 'revocation_statement', // only for MongoDB (editType: JSON) + 'rotation_statements', + 'rollback_statements', + 'renew_statements', + ]; + return expandAttributeMeta(this, allRoleSettingFields); + }), + + /* CAPABILITIES */ + // only used for secretPath + path: attr('string', { readOnly: true }), + + secretPath: lazyCapabilities(apiPath`${'backend'}/${'path'}/${'id'}`, 'backend', 'path', 'id'), + canEditRole: alias('secretPath.canUpdate'), + canDelete: alias('secretPath.canDelete'), + dynamicPath: lazyCapabilities(apiPath`${'backend'}/roles/+`, 'backend'), + canCreateDynamic: alias('dynamicPath.canCreate'), + staticPath: lazyCapabilities(apiPath`${'backend'}/static-roles/+`, 'backend'), + canCreateStatic: alias('staticPath.canCreate'), + credentialPath: lazyCapabilities(apiPath`${'backend'}/creds/${'id'}`, 'backend', 'id'), + staticCredentialPath: lazyCapabilities(apiPath`${'backend'}/static-creds/${'id'}`, 'backend', 'id'), + canGenerateCredentials: alias('credentialPath.canRead'), + canGetCredentials: alias('staticCredentialPath.canRead'), + databasePath: lazyCapabilities(apiPath`${'backend'}/config/${'database[0]'}`, 'backend', 'database'), + canUpdateDb: alias('databasePath.canUpdate'), + rotateRolePath: lazyCapabilities(apiPath`${'backend'}/rotate-role/${'id'}`, 'backend', 'id'), + canRotateRoleCredentials: alias('rotateRolePath.canUpdate'), +}); diff --git a/ui/app/models/identity/_base.js b/ui/app/models/identity/_base.js new file mode 100644 index 0000000..37971c2 --- /dev/null +++ b/ui/app/models/identity/_base.js @@ -0,0 +1,24 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model from '@ember-data/model'; +import { assert } from '@ember/debug'; +import { computed } from '@ember/object'; +import { expandAttributeMeta } from 'vault/utils/field-to-attrs'; + +export default Model.extend({ + formFields: computed(function () { + return assert('formFields should be overridden', false); + }), + + fields: computed('formFields', 'formFields.[]', function () { + return expandAttributeMeta(this, this.formFields); + }), + + identityType: computed('constructor.modelName', function () { + const modelType = this.constructor.modelName.split('/')[1]; + return modelType; + }), +}); diff --git a/ui/app/models/identity/entity-alias.js b/ui/app/models/identity/entity-alias.js new file mode 100644 index 0000000..825dd9a --- /dev/null +++ b/ui/app/models/identity/entity-alias.js @@ -0,0 +1,45 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { belongsTo, attr } from '@ember-data/model'; +import { computed } from '@ember/object'; +import { alias } from '@ember/object/computed'; +import IdentityModel from './_base'; +import identityCapabilities from 'vault/macros/identity-capabilities'; + +export default IdentityModel.extend({ + parentType: 'entity', + formFields: computed(function () { + return ['name', 'mountAccessor']; + }), + entity: belongsTo('identity/entity', { readOnly: true, async: false }), + + name: attr('string'), + canonicalId: attr('string'), + mountAccessor: attr('string', { + label: 'Auth Backend', + editType: 'mountAccessor', + }), + metadata: attr({ + editType: 'kv', + }), + mountPath: attr('string', { + readOnly: true, + }), + mountType: attr('string', { + readOnly: true, + }), + creationTime: attr('string', { + readOnly: true, + }), + lastUpdateTime: attr('string', { + readOnly: true, + }), + mergedFromCanonicalIds: attr(), + + updatePath: identityCapabilities(), + canDelete: alias('updatePath.canDelete'), + canEdit: alias('updatePath.canUpdate'), +}); diff --git a/ui/app/models/identity/entity-merge.js b/ui/app/models/identity/entity-merge.js new file mode 100644 index 0000000..2d6771e --- /dev/null +++ b/ui/app/models/identity/entity-merge.js @@ -0,0 +1,25 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { attr } from '@ember-data/model'; +import { computed } from '@ember/object'; +import IdentityModel from './_base'; + +export default IdentityModel.extend({ + formFields: computed(function () { + return ['toEntityId', 'fromEntityIds', 'force']; + }), + toEntityId: attr('string', { + label: 'Entity to merge to', + }), + fromEntityIds: attr({ + label: 'Entities to merge from', + editType: 'stringArray', + }), + force: attr('boolean', { + label: 'Keep MFA secrets from the "to" entity if there are merge conflicts', + defaultValue: false, + }), +}); diff --git a/ui/app/models/identity/entity.js b/ui/app/models/identity/entity.js new file mode 100644 index 0000000..bb22e11 --- /dev/null +++ b/ui/app/models/identity/entity.js @@ -0,0 +1,59 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { hasMany, attr } from '@ember-data/model'; +import { computed } from '@ember/object'; +import { alias } from '@ember/object/computed'; +import IdentityModel from './_base'; +import apiPath from 'vault/utils/api-path'; +import attachCapabilities from 'vault/lib/attach-capabilities'; +import lazyCapabilities from 'vault/macros/lazy-capabilities'; + +const Model = IdentityModel.extend({ + formFields: computed(function () { + return ['name', 'disabled', 'policies', 'metadata']; + }), + name: attr('string'), + disabled: attr('boolean', { + defaultValue: false, + label: 'Disable entity', + helpText: 'All associated tokens cannot be used, but are not revoked.', + }), + mergedEntityIds: attr(), + metadata: attr({ + editType: 'kv', + }), + policies: attr({ + editType: 'yield', + isSectionHeader: true, + }), + creationTime: attr('string', { + readOnly: true, + }), + lastUpdateTime: attr('string', { + readOnly: true, + }), + aliases: hasMany('identity/entity-alias', { async: false, readOnly: true }), + groupIds: attr({ + readOnly: true, + }), + directGroupIds: attr({ + readOnly: true, + }), + inheritedGroupIds: attr({ + readOnly: true, + }), + canDelete: alias('updatePath.canDelete'), + canEdit: alias('updatePath.canUpdate'), + canRead: alias('updatePath.canRead'), + canAddAlias: alias('aliasPath.canCreate'), + policyPath: lazyCapabilities(apiPath`sys/policies`), + canCreatePolicies: alias('policyPath.canCreate'), +}); + +export default attachCapabilities(Model, { + updatePath: apiPath`identity/entity/id/${'id'}`, + aliasPath: apiPath`identity/entity-alias`, +}); diff --git a/ui/app/models/identity/group-alias.js b/ui/app/models/identity/group-alias.js new file mode 100644 index 0000000..b5ed785 --- /dev/null +++ b/ui/app/models/identity/group-alias.js @@ -0,0 +1,43 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { belongsTo, attr } from '@ember-data/model'; +import { computed } from '@ember/object'; +import { alias } from '@ember/object/computed'; +import IdentityModel from './_base'; +import identityCapabilities from 'vault/macros/identity-capabilities'; + +export default IdentityModel.extend({ + parentType: 'group', + formFields: computed(function () { + return ['name', 'mountAccessor']; + }), + group: belongsTo('identity/group', { readOnly: true, async: false }), + + name: attr('string'), + canonicalId: attr('string'), + + mountPath: attr('string', { + readOnly: true, + }), + mountType: attr('string', { + readOnly: true, + }), + mountAccessor: attr('string', { + label: 'Auth Backend', + editType: 'mountAccessor', + }), + + creationTime: attr('string', { + readOnly: true, + }), + lastUpdateTime: attr('string', { + readOnly: true, + }), + + updatePath: identityCapabilities(), + canDelete: alias('updatePath.canDelete'), + canEdit: alias('updatePath.canUpdate'), +}); diff --git a/ui/app/models/identity/group.js b/ui/app/models/identity/group.js new file mode 100644 index 0000000..7a14ffa --- /dev/null +++ b/ui/app/models/identity/group.js @@ -0,0 +1,95 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { belongsTo, attr } from '@ember-data/model'; +import { alias } from '@ember/object/computed'; +import { computed } from '@ember/object'; +import IdentityModel from './_base'; +import lazyCapabilities, { apiPath } from 'vault/macros/lazy-capabilities'; +import identityCapabilities from 'vault/macros/identity-capabilities'; + +export default IdentityModel.extend({ + formFields: computed('type', function () { + const fields = ['name', 'type', 'policies', 'metadata']; + if (this.type === 'internal') { + return fields.concat(['memberGroupIds', 'memberEntityIds']); + } + return fields; + }), + name: attr('string'), + type: attr('string', { + defaultValue: 'internal', + possibleValues: ['internal', 'external'], + }), + creationTime: attr('string', { + readOnly: true, + }), + lastUpdateTime: attr('string', { + readOnly: true, + }), + numMemberEntities: attr('number', { + readOnly: true, + }), + numParentGroups: attr('number', { + readOnly: true, + }), + metadata: attr('object', { + editType: 'kv', + }), + policies: attr({ + editType: 'yield', + isSectionHeader: true, + }), + memberGroupIds: attr({ + label: 'Member Group IDs', + editType: 'searchSelect', + isSectionHeader: true, + fallbackComponent: 'string-list', + models: ['identity/group'], + }), + parentGroupIds: attr({ + label: 'Parent Group IDs', + editType: 'searchSelect', + isSectionHeader: true, + fallbackComponent: 'string-list', + models: ['identity/group'], + }), + memberEntityIds: attr({ + label: 'Member Entity IDs', + editType: 'searchSelect', + isSectionHeader: true, + fallbackComponent: 'string-list', + models: ['identity/entity'], + }), + hasMembers: computed( + 'memberEntityIds', + 'memberEntityIds.[]', + 'memberGroupIds', + 'memberGroupIds.[]', + function () { + const { memberEntityIds, memberGroupIds } = this; + const numEntities = (memberEntityIds && memberEntityIds.length) || 0; + const numGroups = (memberGroupIds && memberGroupIds.length) || 0; + return numEntities + numGroups > 0; + } + ), + policyPath: lazyCapabilities(apiPath`sys/policies`), + canCreatePolicies: alias('policyPath.canCreate'), + alias: belongsTo('identity/group-alias', { async: false, readOnly: true }), + updatePath: identityCapabilities(), + canDelete: alias('updatePath.canDelete'), + canEdit: alias('updatePath.canUpdate'), + + aliasPath: lazyCapabilities(apiPath`identity/group-alias`), + canAddAlias: computed('aliasPath.canCreate', 'type', 'alias', function () { + const type = this.type; + const alias = this.alias; + // internal groups can't have aliases, and external groups can only have one + if (type === 'internal' || alias) { + return false; + } + return this.aliasPath.canCreate; + }), +}); diff --git a/ui/app/models/keymgmt/key.js b/ui/app/models/keymgmt/key.js new file mode 100644 index 0000000..264f21d --- /dev/null +++ b/ui/app/models/keymgmt/key.js @@ -0,0 +1,134 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import { expandAttributeMeta } from 'vault/utils/field-to-attrs'; +import lazyCapabilities, { apiPath } from 'vault/macros/lazy-capabilities'; + +export const KEY_TYPES = [ + 'aes256-gcm96', + 'rsa-2048', + 'rsa-3072', + 'rsa-4096', + 'ecdsa-p256', + 'ecdsa-p384', + 'ecdsa-p521', +]; +export default class KeymgmtKeyModel extends Model { + @attr('string', { + label: 'Key name', + subText: 'This is the name of the key that shows in Vault.', + }) + name; + + @attr('string') + backend; + + @attr('string', { + subText: 'The type of cryptographic key that will be created.', + possibleValues: KEY_TYPES, + defaultValue: 'rsa-2048', + }) + type; + + @attr('boolean', { + label: 'Allow deletion', + defaultValue: false, + }) + deletionAllowed; + + @attr('number', { + label: 'Current version', + }) + latestVersion; + + @attr('number', { + defaultValue: 0, + defaultShown: 'All versions enabled', + }) + minEnabledVersion; + + @attr('array') + versions; + + // The following are calculated in serializer + @attr('date') + created; + + @attr('date', { + defaultShown: 'Not yet rotated', + }) + lastRotated; + + // The following are from endpoints other than the main read one + @attr() provider; // string, or object with permissions error + @attr() distribution; + + icon = 'key'; + + get hasVersions() { + return this.versions.length > 1; + } + + get createFields() { + const createFields = ['name', 'type', 'deletionAllowed']; + return expandAttributeMeta(this, createFields); + } + + get updateFields() { + return expandAttributeMeta(this, ['minEnabledVersion', 'deletionAllowed']); + } + get showFields() { + return expandAttributeMeta(this, [ + 'name', + 'created', + 'type', + 'deletionAllowed', + 'latestVersion', + 'minEnabledVersion', + 'lastRotated', + ]); + } + + get keyTypeOptions() { + return expandAttributeMeta(this, ['type'])[0]; + } + + get distFields() { + return [ + { + name: 'name', + type: 'string', + label: 'Distributed name', + subText: 'The name given to the key by the provider.', + }, + { name: 'purpose', type: 'string', label: 'Key Purpose' }, + { name: 'protection', type: 'string', subText: 'Where cryptographic operations are performed.' }, + ]; + } + + @lazyCapabilities(apiPath`${'backend'}/key/${'id'}`, 'backend', 'id') keyPath; + @lazyCapabilities(apiPath`${'backend'}/key`, 'backend') keysPath; + @lazyCapabilities(apiPath`${'backend'}/key/${'id'}/kms`, 'backend', 'id') keyProvidersPath; + + get canCreate() { + return this.keyPath.get('canCreate'); + } + get canDelete() { + return this.keyPath.get('canDelete'); + } + get canEdit() { + return this.keyPath.get('canUpdate'); + } + get canRead() { + return this.keyPath.get('canRead'); + } + get canList() { + return this.keysPath.get('canList'); + } + get canListProviders() { + return this.keyProvidersPath.get('canList'); + } +} diff --git a/ui/app/models/keymgmt/provider.js b/ui/app/models/keymgmt/provider.js new file mode 100644 index 0000000..7c605a9 --- /dev/null +++ b/ui/app/models/keymgmt/provider.js @@ -0,0 +1,171 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import { tracked } from '@glimmer/tracking'; +import { expandAttributeMeta } from 'vault/utils/field-to-attrs'; +import { withModelValidations } from 'vault/decorators/model-validations'; +import lazyCapabilities, { apiPath } from 'vault/macros/lazy-capabilities'; +import { inject as service } from '@ember/service'; + +const CRED_PROPS = { + azurekeyvault: ['client_id', 'client_secret', 'tenant_id'], + awskms: ['access_key', 'secret_key', 'session_token', 'endpoint'], + gcpckms: ['service_account_file'], +}; + +const OPTIONAL_CRED_PROPS = ['session_token', 'endpoint']; + +// since we have dynamic credential attributes based on provider we need a dynamic presence validator +// add validators for all cred props and return true for value if not associated with selected provider +const credValidators = Object.keys(CRED_PROPS).reduce((obj, providerKey) => { + CRED_PROPS[providerKey].forEach((prop) => { + if (!OPTIONAL_CRED_PROPS.includes(prop)) { + obj[`credentials.${prop}`] = [ + { + message: `${prop} is required`, + validator(model) { + return model.credentialProps.includes(prop) ? model.credentials[prop] : true; + }, + }, + ]; + } + }); + return obj; +}, {}); + +const validations = { + name: [{ type: 'presence', message: 'Provider name is required' }], + keyCollection: [{ type: 'presence', message: 'Key Vault instance name' }], + ...credValidators, +}; + +@withModelValidations(validations) +export default class KeymgmtProviderModel extends Model { + @service store; + @attr('string') backend; + @attr('string', { + label: 'Provider name', + subText: 'This is the name of the provider that will be displayed in Vault. This cannot be edited later.', + }) + name; + + @attr('string', { + label: 'Type', + subText: 'Choose the provider type.', + possibleValues: ['azurekeyvault', 'awskms', 'gcpckms'], + noDefault: true, + }) + provider; + + @attr('string', { + label: 'Key Vault instance name', + subText: 'The name of a Key Vault instance must be supplied. This cannot be edited later.', + }) + keyCollection; + + idPrefix = 'provider/'; + type = 'provider'; + + @tracked keys = []; + @tracked credentials = null; // never returned from API -- set only during create/edit + + get icon() { + return { + azurekeyvault: 'azure-color', + awskms: 'aws-color', + gcpckms: 'gcp-color', + }[this.provider]; + } + get typeName() { + return { + azurekeyvault: 'Azure Key Vault', + awskms: 'AWS Key Management Service', + gcpckms: 'Google Cloud Key Management Service', + }[this.provider]; + } + get showFields() { + const attrs = expandAttributeMeta(this, ['name', 'keyCollection']); + attrs.splice(1, 0, { hasBlock: true, label: 'Type', value: this.typeName, icon: this.icon }); + const l = this.keys.length; + const value = l + ? `${l} ${l > 1 ? 'keys' : 'key'}` + : this.canListKeys + ? 'None' + : 'You do not have permission to list keys'; + attrs.push({ hasBlock: true, isLink: l, label: 'Keys', value }); + return attrs; + } + get credentialProps() { + if (!this.provider) return []; + return CRED_PROPS[this.provider]; + } + get credentialFields() { + const [creds, fields] = this.credentialProps.reduce( + ([creds, fields], prop) => { + creds[prop] = null; + const field = { name: `credentials.${prop}`, type: 'string', options: { label: prop } }; + if (prop === 'service_account_file') { + field.options.subText = 'The path to a Google service account key file, not the file itself.'; + } + fields.push(field); + return [creds, fields]; + }, + [{}, []] + ); + this.credentials = creds; + return fields; + } + get createFields() { + return expandAttributeMeta(this, ['provider', 'name', 'keyCollection']); + } + + async fetchKeys(page) { + if (this.canListKeys === false) { + this.keys = []; + } else { + // try unless capabilities returns false + try { + this.keys = await this.store.lazyPaginatedQuery('keymgmt/key', { + backend: this.backend, + provider: this.name, + responsePath: 'data.keys', + page, + }); + } catch (error) { + this.keys = []; + if (error.httpStatus !== 404) { + throw error; + } + } + } + } + + @lazyCapabilities(apiPath`${'backend'}/kms/${'id'}`, 'backend', 'id') providerPath; + @lazyCapabilities(apiPath`${'backend'}/kms`, 'backend') providersPath; + @lazyCapabilities(apiPath`${'backend'}/kms/${'id'}/key`, 'backend', 'id') providerKeysPath; + + get canCreate() { + return this.providerPath.get('canCreate'); + } + get canDelete() { + return this.providerPath.get('canDelete'); + } + get canEdit() { + return this.providerPath.get('canUpdate'); + } + get canRead() { + return this.providerPath.get('canRead'); + } + get canList() { + return this.providersPath.get('canList'); + } + get canListKeys() { + return this.providerKeysPath.get('canList'); + } + get canCreateKeys() { + return this.providerKeysPath.get('canCreate'); + } +} diff --git a/ui/app/models/kmip/ca.js b/ui/app/models/kmip/ca.js new file mode 100644 index 0000000..731d614 --- /dev/null +++ b/ui/app/models/kmip/ca.js @@ -0,0 +1,13 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { belongsTo, attr } from '@ember-data/model'; + +export default Model.extend({ + config: belongsTo('kmip/config', { async: false }), + caPem: attr('string', { + label: 'CA PEM', + }), +}); diff --git a/ui/app/models/kmip/config.js b/ui/app/models/kmip/config.js new file mode 100644 index 0000000..5ddfc8a --- /dev/null +++ b/ui/app/models/kmip/config.js @@ -0,0 +1,24 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { belongsTo } from '@ember-data/model'; +import { computed } from '@ember/object'; +import { combineFieldGroups } from 'vault/utils/openapi-to-attrs'; +import fieldToAttrs from 'vault/utils/field-to-attrs'; + +export default Model.extend({ + useOpenAPI: true, + ca: belongsTo('kmip/ca', { async: false }), + getHelpUrl(path) { + return `/v1/${path}/config?help=1`; + }, + + fieldGroups: computed('newFields', function () { + let groups = [{ default: ['listenAddrs', 'connectionTimeout'] }]; + + groups = combineFieldGroups(groups, this.newFields, []); + return fieldToAttrs(this, groups); + }), +}); diff --git a/ui/app/models/kmip/credential.js b/ui/app/models/kmip/credential.js new file mode 100644 index 0000000..99ec2c9 --- /dev/null +++ b/ui/app/models/kmip/credential.js @@ -0,0 +1,40 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import fieldToAttrs from 'vault/utils/field-to-attrs'; +import { computed } from '@ember/object'; +import apiPath from 'vault/utils/api-path'; +import attachCapabilities from 'vault/lib/attach-capabilities'; + +const ModelExport = Model.extend({ + backend: attr({ readOnly: true }), + scope: attr({ readOnly: true }), + role: attr({ readOnly: true }), + certificate: attr('string', { readOnly: true }), + caChain: attr({ readOnly: true }), + privateKey: attr('string', { + readOnly: true, + sensitive: true, + }), + format: attr('string', { + possibleValues: ['pem', 'der', 'pem_bundle'], + defaultValue: 'pem', + label: 'Certificate format', + }), + fieldGroups: computed(function () { + const groups = [ + { + default: ['format'], + }, + ]; + + return fieldToAttrs(this, groups); + }), +}); + +export default attachCapabilities(ModelExport, { + deletePath: apiPath`${'backend'}/scope/${'scope'}/role/${'role'}/credentials/revoke`, +}); diff --git a/ui/app/models/kmip/role.js b/ui/app/models/kmip/role.js new file mode 100644 index 0000000..5be47a4 --- /dev/null +++ b/ui/app/models/kmip/role.js @@ -0,0 +1,92 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import { computed } from '@ember/object'; +import fieldToAttrs, { expandAttributeMeta } from 'vault/utils/field-to-attrs'; +import apiPath from 'vault/utils/api-path'; +import attachCapabilities from 'vault/lib/attach-capabilities'; + +export const COMPUTEDS = { + operationFields: computed('newFields', function () { + return this.newFields.filter((key) => key.startsWith('operation')); + }), + + operationFieldsWithoutSpecial: computed('operationFields', function () { + return this.operationFields.slice().removeObjects(['operationAll', 'operationNone']); + }), + + tlsFields: computed(function () { + return ['tlsClientKeyBits', 'tlsClientKeyType', 'tlsClientTtl']; + }), + + // For rendering on the create/edit pages + defaultFields: computed('newFields', 'operationFields', 'tlsFields', function () { + const excludeFields = ['role'].concat(this.operationFields, this.tlsFields); + return this.newFields.slice().removeObjects(excludeFields); + }), + + // For adapter/serializer + nonOperationFields: computed('newFields', 'operationFields', function () { + return this.newFields.slice().removeObjects(this.operationFields); + }), +}; + +const ModelExport = Model.extend(COMPUTEDS, { + useOpenAPI: true, + backend: attr({ readOnly: true }), + scope: attr({ readOnly: true }), + name: attr({ readOnly: true }), + getHelpUrl(path) { + return `/v1/${path}/scope/example/role/example?help=1`; + }, + fieldGroups: computed('fields', 'defaultFields.length', 'tlsFields', function () { + const groups = [{ TLS: this.tlsFields }]; + if (this.defaultFields.length) { + groups.unshift({ default: this.defaultFields }); + } + const ret = fieldToAttrs(this, groups); + return ret; + }), + + operationFormFields: computed('operationFieldsWithoutSpecial', function () { + const objects = [ + 'operationCreate', + 'operationActivate', + 'operationGet', + 'operationLocate', + 'operationRekey', + 'operationRevoke', + 'operationDestroy', + ]; + + const attributes = ['operationAddAttribute', 'operationGetAttributes']; + const server = ['operationDiscoverVersions']; + const others = this.operationFieldsWithoutSpecial + .slice() + .removeObjects(objects.concat(attributes, server)); + const groups = [ + { 'Managed Cryptographic Objects': objects }, + { 'Object Attributes': attributes }, + { Server: server }, + ]; + if (others.length) { + groups.push({ + Other: others, + }); + } + return fieldToAttrs(this, groups); + }), + tlsFormFields: computed('tlsFields', function () { + return expandAttributeMeta(this, this.tlsFields); + }), + fields: computed('defaultFields', function () { + return expandAttributeMeta(this, this.defaultFields); + }), +}); + +export default attachCapabilities(ModelExport, { + updatePath: apiPath`${'backend'}/scope/${'scope'}/role/${'id'}`, +}); diff --git a/ui/app/models/kmip/scope.js b/ui/app/models/kmip/scope.js new file mode 100644 index 0000000..aa1e185 --- /dev/null +++ b/ui/app/models/kmip/scope.js @@ -0,0 +1,23 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import { computed } from '@ember/object'; +import apiPath from 'vault/utils/api-path'; +import attachCapabilities from 'vault/lib/attach-capabilities'; + +import { expandAttributeMeta } from 'vault/utils/field-to-attrs'; + +const ModelExport = Model.extend({ + name: attr('string'), + backend: attr({ readOnly: true }), + attrs: computed(function () { + return expandAttributeMeta(this, ['name']); + }), +}); + +export default attachCapabilities(ModelExport, { + updatePath: apiPath`${'backend'}/scope/${'id'}`, +}); diff --git a/ui/app/models/kubernetes/config.js b/ui/app/models/kubernetes/config.js new file mode 100644 index 0000000..bd55709 --- /dev/null +++ b/ui/app/models/kubernetes/config.js @@ -0,0 +1,41 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import { withFormFields } from 'vault/decorators/model-form-fields'; +import { withModelValidations } from 'vault/decorators/model-validations'; + +const validations = { + kubernetesHost: [ + { + validator: (model) => (model.disableLocalCaJwt && !model.kubernetesHost ? false : true), + message: 'Kubernetes host is required', + }, + ], +}; +@withModelValidations(validations) +@withFormFields(['kubernetesHost', 'serviceAccountJwt', 'kubernetesCaCert']) +export default class KubernetesConfigModel extends Model { + @attr('string') backend; // dynamic path of secret -- set on response from value passed to queryRecord + @attr('string', { + label: 'Kubernetes host', + subText: 'Kubernetes API URL to connect to.', + }) + kubernetesHost; + @attr('string', { + label: 'Service account JWT', + subText: + 'The JSON web token of the service account used by the secret engine to manage Kubernetes roles. Defaults to the local pod’s JWT if found.', + }) + serviceAccountJwt; + @attr('string', { + label: 'Kubernetes CA Certificate', + subText: + 'PEM-encoded CA certificate to use by the secret engine to verify the Kubernetes API server certificate. Defaults to the local pod’s CA if found.', + editType: 'textarea', + }) + kubernetesCaCert; + @attr('boolean', { defaultValue: false }) disableLocalCaJwt; +} diff --git a/ui/app/models/kubernetes/role.js b/ui/app/models/kubernetes/role.js new file mode 100644 index 0000000..03473e7 --- /dev/null +++ b/ui/app/models/kubernetes/role.js @@ -0,0 +1,158 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import { withModelValidations } from 'vault/decorators/model-validations'; +import { withFormFields } from 'vault/decorators/model-form-fields'; +import lazyCapabilities, { apiPath } from 'vault/macros/lazy-capabilities'; +import { tracked } from '@glimmer/tracking'; + +const validations = { + name: [{ type: 'presence', message: 'Name is required' }], +}; +const formFieldProps = [ + 'name', + 'serviceAccountName', + 'kubernetesRoleType', + 'kubernetesRoleName', + 'allowedKubernetesNamespaces', + 'tokenMaxTtl', + 'tokenDefaultTtl', + 'nameTemplate', +]; + +@withModelValidations(validations) +@withFormFields(formFieldProps) +export default class KubernetesRoleModel extends Model { + @attr('string') backend; // dynamic path of secret -- set on response from value passed to queryRecord + @attr('string', { + label: 'Role name', + subText: 'The role’s name in Vault.', + }) + name; + + @attr('string', { + label: 'Service account name', + subText: 'Vault will use the default template when generating service accounts, roles and role bindings.', + }) + serviceAccountName; + + @attr('string', { + label: 'Kubernetes role type', + editType: 'radio', + possibleValues: ['Role', 'ClusterRole'], + }) + kubernetesRoleType; + + @attr('string', { + label: 'Kubernetes role name', + subText: 'Vault will use the default template when generating service accounts, roles and role bindings.', + }) + kubernetesRoleName; + + @attr('string', { + label: 'Service account name', + subText: 'Vault will use the default template when generating service accounts, roles and role bindings.', + }) + serviceAccountName; + + @attr('string', { + label: 'Allowed Kubernetes namespaces', + subText: + 'A list of the valid Kubernetes namespaces in which this role can be used for creating service accounts. If set to "*" all namespaces are allowed.', + }) + allowedKubernetesNamespaces; + + @attr({ + label: 'Max Lease TTL', + editType: 'ttl', + }) + tokenMaxTtl; + + @attr({ + label: 'Default Lease TTL', + editType: 'ttl', + }) + tokenDefaultTtl; + + @attr('string', { + label: 'Name template', + editType: 'optionalText', + defaultSubText: + 'Vault will use the default template when generating service accounts, roles and role bindings.', + subText: 'Vault will use the default template when generating service accounts, roles and role bindings.', + }) + nameTemplate; + + @attr extraAnnotations; + @attr extraLabels; + + @attr('string') generatedRoleRules; + + @tracked _generationPreference; + get generationPreference() { + // when the user interacts with the radio cards the value will be set to the pseudo prop which takes precedence + if (this._generationPreference) { + return this._generationPreference; + } + // for existing roles, default the value based on which model prop has value -- only one can be set + let pref = null; + if (this.serviceAccountName) { + pref = 'basic'; + } else if (this.kubernetesRoleName) { + pref = 'expanded'; + } else if (this.generatedRoleRules) { + pref = 'full'; + } + return pref; + } + set generationPreference(pref) { + // unset model props specific to filteredFormFields when changing preference + // only one of service_account_name, kubernetes_role_name or generated_role_rules can be set + const props = { + basic: ['kubernetesRoleType', 'kubernetesRoleName', 'generatedRoleRules', 'nameTemplate'], + expanded: ['serviceAccountName', 'generatedRoleRules'], + full: ['serviceAccountName', 'kubernetesRoleName'], + }[pref]; + props.forEach((prop) => (this[prop] = null)); + this._generationPreference = pref; + } + + get filteredFormFields() { + // return different form fields based on generationPreference + const hiddenFieldIndices = { + basic: [2, 3, 7], // kubernetesRoleType, kubernetesRoleName and nameTemplate + expanded: [1], // serviceAccountName + full: [1, 3], // serviceAccountName and kubernetesRoleName + }[this.generationPreference]; + + return hiddenFieldIndices + ? this.formFields.filter((field, index) => !hiddenFieldIndices.includes(index)) + : null; + } + + @lazyCapabilities(apiPath`${'backend'}/roles/${'name'}`, 'backend', 'name') rolePath; + @lazyCapabilities(apiPath`${'backend'}/creds/${'name'}`, 'backend', 'name') credsPath; + @lazyCapabilities(apiPath`${'backend'}/roles`, 'backend') rolesPath; + + get canCreate() { + return this.rolePath.get('canCreate'); + } + get canDelete() { + return this.rolePath.get('canDelete'); + } + get canEdit() { + return this.rolePath.get('canUpdate'); + } + get canRead() { + return this.rolePath.get('canRead'); + } + get canList() { + return this.rolesPath.get('canList'); + } + get canGenerateCreds() { + return this.credsPath.get('canCreate'); + } +} diff --git a/ui/app/models/lease.js b/ui/app/models/lease.js new file mode 100644 index 0000000..4acb8d6 --- /dev/null +++ b/ui/app/models/lease.js @@ -0,0 +1,29 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import { match } from '@ember/object/computed'; +import KeyMixin from 'vault/mixins/key-mixin'; + +/* sample response +{ + "id": "auth/token/create/25c75065466dfc5f920525feafe47502c4c9915c", + "issue_time": "2017-04-30T10:18:11.228946471-04:00", + "expire_time": "2017-04-30T11:18:11.228946708-04:00", + "last_renewal": null, + "renewable": true, + "ttl": 3558 +} + +*/ + +export default Model.extend(KeyMixin, { + issueTime: attr('string'), + expireTime: attr('string'), + lastRenewal: attr('string'), + renewable: attr('boolean'), + ttl: attr('number'), + isAuthLease: match('id', /^auth/), +}); diff --git a/ui/app/models/license.js b/ui/app/models/license.js new file mode 100644 index 0000000..6818645 --- /dev/null +++ b/ui/app/models/license.js @@ -0,0 +1,48 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; + +/* sample response +{ + "data": { + "autoloading_used": true, + "autoloaded": { + "expiration_time": "2017-11-14T16:34:36.546753-05:00", + "license_id": "some-id", + "start_time": "2017-11-14T16:04:36.546753-05:00" + "features": [ + "UI", + "HSM", + "Performance Replication", + "DR Replication" + ], + }, + "stored": { + "expiration_time": "2017-11-14T16:34:36.546753-05:00", + "license_id": "some-id", + "start_time": "2017-11-14T16:04:36.546753-05:00" + "features": [ + "UI", + "HSM", + "Performance Replication", + "DR Replication" + ], + } + }, + "warnings": [ + "time left on license is 29m33s" + ] +} +*/ + +export default Model.extend({ + expirationTime: attr('string'), + features: attr('array'), + licenseId: attr('string'), + startTime: attr('string'), + performanceStandbyCount: attr('number'), + autoloaded: attr('boolean'), +}); diff --git a/ui/app/models/mfa-login-enforcement.js b/ui/app/models/mfa-login-enforcement.js new file mode 100644 index 0000000..f49c143 --- /dev/null +++ b/ui/app/models/mfa-login-enforcement.js @@ -0,0 +1,114 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr, hasMany } from '@ember-data/model'; +import ArrayProxy from '@ember/array/proxy'; +import PromiseProxyMixin from '@ember/object/promise-proxy-mixin'; +import { methods } from 'vault/helpers/mountable-auth-methods'; +import { withModelValidations } from 'vault/decorators/model-validations'; +import { isPresent } from '@ember/utils'; +import { inject as service } from '@ember/service'; + +const validations = { + name: [{ type: 'presence', message: 'Name is required' }], + mfa_methods: [{ type: 'presence', message: 'At least one MFA method is required' }], + targets: [ + { + validator(model) { + // avoid async fetch of records here and access relationship ids to check for presence + const entityIds = model.hasMany('identity_entities').ids(); + const groupIds = model.hasMany('identity_groups').ids(); + return ( + isPresent(model.auth_method_accessors) || + isPresent(model.auth_method_types) || + isPresent(entityIds) || + isPresent(groupIds) + ); + }, + message: + "At least one target is required. If you've selected one, click 'Add' to make sure it's added to this enforcement.", + }, + ], +}; + +@withModelValidations(validations) +export default class MfaLoginEnforcementModel extends Model { + @service store; + @attr('string') name; + @hasMany('mfa-method') mfa_methods; + @attr('string') namespace_id; + @attr('array', { defaultValue: () => [] }) auth_method_accessors; // ["auth_approle_17a552c6"] + @attr('array', { defaultValue: () => [] }) auth_method_types; // ["userpass"] + @hasMany('identity/entity') identity_entities; + @hasMany('identity/group') identity_groups; + + get targets() { + return ArrayProxy.extend(PromiseProxyMixin).create({ + promise: this.prepareTargets(), + }); + } + + async prepareTargets() { + let authMethods; + const targets = []; + + if (this.auth_method_accessors.length || this.auth_method_types.length) { + // fetch all auth methods and lookup by accessor to get mount path and type + try { + const { data } = await this.store.adapterFor('auth-method').findAll(); + authMethods = Object.keys(data).map((key) => ({ path: key, ...data[key] })); + } catch (error) { + // swallow this error + } + } + + if (this.auth_method_accessors.length) { + const selectedAuthMethods = authMethods.filter((model) => { + return this.auth_method_accessors.includes(model.accessor); + }); + targets.addObjects( + selectedAuthMethods.map((method) => ({ + icon: this.iconForMount(method.type), + link: 'vault.cluster.access.method', + linkModels: [method.path.slice(0, -1)], + title: method.path, + subTitle: method.accessor, + })) + ); + } + + this.auth_method_types.forEach((type) => { + const icon = this.iconForMount(type); + const mountCount = authMethods.filterBy('type', type).length; + targets.addObject({ + key: 'auth_method_types', + icon, + title: type, + subTitle: `All ${type} mounts (${mountCount})`, + }); + }); + + for (const key of ['identity_entities', 'identity_groups']) { + (await this[key]).forEach((model) => { + targets.addObject({ + key, + icon: 'user', + link: 'vault.cluster.access.identity.show', + linkModels: [key.split('_')[1], model.id, 'details'], + title: model.name, + subTitle: model.id, + }); + }); + } + + return targets; + } + + iconForMount(type) { + const mountableMethods = methods(); + const mount = mountableMethods.findBy('type', type); + return mount ? mount.glyph || mount.type : 'token'; + } +} diff --git a/ui/app/models/mfa-method.js b/ui/app/models/mfa-method.js new file mode 100644 index 0000000..d8f3a02 --- /dev/null +++ b/ui/app/models/mfa-method.js @@ -0,0 +1,178 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import { capitalize } from '@ember/string'; +import { expandAttributeMeta } from 'vault/utils/field-to-attrs'; +import { withModelValidations } from 'vault/decorators/model-validations'; +import { isPresent } from '@ember/utils'; + +const METHOD_PROPS = { + common: [], + duo: ['username_format', 'secret_key', 'integration_key', 'api_hostname', 'push_info', 'use_passcode'], + okta: ['username_format', 'mount_accessor', 'org_name', 'api_token', 'base_url', 'primary_email'], + totp: ['issuer', 'period', 'key_size', 'qr_size', 'algorithm', 'digits', 'skew', 'max_validation_attempts'], + pingid: [ + 'username_format', + 'settings_file_base64', + 'use_signature', + 'idp_url', + 'admin_url', + 'authenticator_url', + 'org_alias', + ], +}; + +const REQUIRED_PROPS = { + duo: ['secret_key', 'integration_key', 'api_hostname'], + okta: ['org_name', 'api_token'], + totp: ['issuer'], + pingid: ['settings_file_base64'], +}; + +const validators = Object.keys(REQUIRED_PROPS).reduce((obj, type) => { + REQUIRED_PROPS[type].forEach((prop) => { + obj[`${prop}`] = [ + { + message: `${prop.replace(/_/g, ' ')} is required`, + validator(model) { + return model.type === type ? isPresent(model[prop]) : true; + }, + }, + ]; + }); + return obj; +}, {}); + +@withModelValidations(validators) +export default class MfaMethod extends Model { + // common + @attr('string') type; + @attr('string', { + label: 'Username format', + subText: 'How to map identity names to MFA method names. ', + }) + username_format; + @attr('string', { + label: 'Namespace', + }) + namespace_id; + @attr('string') mount_accessor; + + // PING ID + @attr('string', { + label: 'Settings file', + subText: 'A base-64 encoded third party setting file retrieved from the PingIDs configuration page.', + }) + settings_file_base64; + @attr('boolean') use_signature; + @attr('string') idp_url; + @attr('string') admin_url; + @attr('string') authenticator_url; + @attr('string') org_alias; + + // OKTA + @attr('string', { + label: 'Organization name', + subText: 'Name of the organization to be used in the Okta API.', + }) + org_name; + @attr('string', { + label: 'Okta API key', + }) + api_token; + @attr('string', { + label: 'Base URL', + subText: + 'If set, will be used as the base domain for API requests. Example are okta.com, oktapreview.com and okta-emea.com.', + }) + base_url; + @attr('boolean') primary_email; + + // DUO + @attr('string', { + label: 'Duo secret key', + sensitive: true, + }) + secret_key; + @attr('string', { + label: 'Duo integration key', + sensitive: true, + }) + integration_key; + @attr('string', { + label: 'Duo API hostname', + }) + api_hostname; + @attr('string', { + label: 'Duo push information', + subText: 'Additional information displayed to the user when the push is presented to them.', + }) + push_info; + @attr('boolean', { + label: 'Passcode reminder', + subText: 'If this is turned on, the user is reminded to use the passcode upon MFA validation.', + }) + use_passcode; + + // TOTP + @attr('string', { + label: 'Issuer', + subText: 'The human-readable name of the keys issuing organization.', + }) + issuer; + @attr({ + label: 'Period', + editType: 'ttl', + helperTextEnabled: 'How long each generated TOTP is valid.', + hideToggle: true, + defaultValue: 30, // API accepts both an integer as seconds and sting with unit e.g 30 || '30s' + }) + period; + @attr('number', { + label: 'Key size', + subText: 'The size in bytes of the Vault generated key.', + }) + key_size; + @attr('number', { + label: 'QR size', + subText: 'The pixel size of the generated square QR code.', + }) + qr_size; + @attr('string', { + label: 'Algorithm', + editType: 'radio', + possibleValues: ['SHA1', 'SHA256', 'SHA512'], + subText: 'The hashing algorithm used to generate the TOTP code.', + }) + algorithm; + @attr('number', { + label: 'Digits', + editType: 'radio', + possibleValues: [6, 8], + subText: 'The number digits in the generated TOTP code.', + }) + digits; + @attr('number', { + label: 'Skew', + editType: 'radio', + possibleValues: [0, 1], + subText: 'The number of delay periods allowed when validating a TOTP token.', + }) + skew; + @attr('number') max_validation_attempts; + + get name() { + return this.type === 'totp' ? this.type.toUpperCase() : capitalize(this.type); + } + + get formFields() { + return [...METHOD_PROPS.common, ...METHOD_PROPS[this.type]]; + } + + get attrs() { + return expandAttributeMeta(this, this.formFields); + } +} diff --git a/ui/app/models/mount-config.js b/ui/app/models/mount-config.js new file mode 100644 index 0000000..8f938b2 --- /dev/null +++ b/ui/app/models/mount-config.js @@ -0,0 +1,70 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; + +export default class MountConfigModel extends Model { + @attr({ + label: 'Default Lease TTL', + editType: 'ttl', + }) + defaultLeaseTtl; + + @attr({ + label: 'Max Lease TTL', + editType: 'ttl', + }) + maxLeaseTtl; + + @attr({ + label: 'Request keys excluded from HMACing in audit', + editType: 'stringArray', + helpText: "Keys that will not be HMAC'd by audit devices in the request data object.", + }) + auditNonHmacRequestKeys; + + @attr({ + label: 'Response keys excluded from HMACing in audit', + editType: 'stringArray', + helpText: "Keys that will not be HMAC'd by audit devices in the response data object.", + }) + auditNonHmacResponseKeys; + + @attr('string', { + editType: 'boolean', + label: 'List method when unauthenticated', + trueValue: 'unauth', + falseValue: 'hidden', + }) + listingVisibility; + + @attr({ + label: 'Allowed passthrough request headers', + helpText: 'Headers to allow and pass from the request to the backend', + editType: 'stringArray', + }) + passthroughRequestHeaders; + + @attr({ + label: 'Allowed response headers', + helpText: 'Headers to allow, allowing a plugin to include them in the response.', + editType: 'stringArray', + }) + allowedResponseHeaders; + + @attr('string', { + label: 'Token Type', + helpText: + 'The type of token that should be generated via this role. For `default-service` and `default-batch` service and batch tokens will be issued respectively, unless the auth method explicitly requests a different type.', + possibleValues: ['default-service', 'default-batch', 'batch', 'service'], + noDefault: true, + }) + tokenType; + + @attr({ + editType: 'stringArray', + }) + allowedManagedKeys; +} diff --git a/ui/app/models/namespace.js b/ui/app/models/namespace.js new file mode 100644 index 0000000..87b964b --- /dev/null +++ b/ui/app/models/namespace.js @@ -0,0 +1,24 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import { withExpandedAttributes } from 'vault/decorators/model-expanded-attributes'; + +@withExpandedAttributes() +export default class NamespaceModel extends Model { + @attr('string', { + validationAttr: 'pathIsValid', + invalidMessage: 'You have entered and invalid path please only include letters, numbers, -, ., and _.', + }) + path; + + get pathIsValid() { + return this.path && this.path.match(/^[\w\d-.]+$/g); + } + + get fields() { + return ['path'].map((f) => this.allByKey[f]); + } +} diff --git a/ui/app/models/node.js b/ui/app/models/node.js new file mode 100644 index 0000000..cc74e57 --- /dev/null +++ b/ui/app/models/node.js @@ -0,0 +1,38 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import { alias, and, equal } from '@ember/object/computed'; + +export default Model.extend({ + name: attr('string'), + //https://www.vaultproject.io/docs/http/sys-health.html + initialized: attr('boolean'), + sealed: attr('boolean'), + isSealed: alias('sealed'), + standby: attr('boolean'), + isActive: equal('standby', false), + clusterName: attr('string'), + clusterId: attr('string'), + + isLeader: and('initialized', 'isActive'), + + //https://www.vaultproject.io/docs/http/sys-seal-status.html + //The "t" parameter is the threshold, and "n" is the number of shares. + t: attr('number'), + n: attr('number'), + progress: attr('number'), + sealThreshold: alias('t'), + sealNumShares: alias('n'), + version: attr('string'), + type: attr('string'), + storageType: attr('string'), + hcpLinkStatus: attr('string'), + + //https://www.vaultproject.io/docs/http/sys-leader.html + haEnabled: attr('boolean'), + isSelf: attr('boolean'), + leaderAddress: attr('string'), +}); diff --git a/ui/app/models/oidc/assignment.js b/ui/app/models/oidc/assignment.js new file mode 100644 index 0000000..8077c66 --- /dev/null +++ b/ui/app/models/oidc/assignment.js @@ -0,0 +1,46 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import lazyCapabilities, { apiPath } from 'vault/macros/lazy-capabilities'; +import { withModelValidations } from 'vault/decorators/model-validations'; +import { isPresent } from '@ember/utils'; + +const validations = { + name: [ + { type: 'presence', message: 'Name is required.' }, + { + type: 'containsWhiteSpace', + message: 'Name cannot contain whitespace.', + }, + ], + targets: [ + { + validator(model) { + return isPresent(model.entityIds) || isPresent(model.groupIds); + }, + message: 'At least one entity or group is required.', + }, + ], +}; + +@withModelValidations(validations) +export default class OidcAssignmentModel extends Model { + @attr('string') name; + @attr('array') entityIds; + @attr('array') groupIds; + + // CAPABILITIES + @lazyCapabilities(apiPath`identity/oidc/assignment/${'name'}`, 'name') assignmentPath; + get canRead() { + return this.assignmentPath.get('canRead'); + } + get canEdit() { + return this.assignmentPath.get('canUpdate'); + } + get canDelete() { + return this.assignmentPath.get('canDelete'); + } +} diff --git a/ui/app/models/oidc/client.js b/ui/app/models/oidc/client.js new file mode 100644 index 0000000..09a11c4 --- /dev/null +++ b/ui/app/models/oidc/client.js @@ -0,0 +1,112 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import lazyCapabilities, { apiPath } from 'vault/macros/lazy-capabilities'; +import { expandAttributeMeta } from 'vault/utils/field-to-attrs'; +import fieldToAttrs from 'vault/utils/field-to-attrs'; +import { withModelValidations } from 'vault/decorators/model-validations'; + +const validations = { + name: [ + { type: 'presence', message: 'Name is required.' }, + { + type: 'containsWhiteSpace', + message: 'Name cannot contain whitespace.', + }, + ], + key: [{ type: 'presence', message: 'Key is required.' }], +}; + +@withModelValidations(validations) +export default class OidcClientModel extends Model { + @attr('string', { label: 'Application name', editDisabled: true }) name; + @attr('string', { + label: 'Type', + subText: + 'Specify whether the application type is confidential or public. The public type must use PKCE. This cannot be edited later.', + editType: 'radio', + editDisabled: true, + defaultValue: 'confidential', + possibleValues: ['confidential', 'public'], + }) + clientType; + + @attr('array', { + label: 'Redirect URIs', + subText: + 'One of these values must exactly match the redirect_uri parameter value used in each authentication request.', + editType: 'stringArray', + }) + redirectUris; + + // >> MORE OPTIONS TOGGLE << + + @attr('string', { + label: 'Signing key', + subText: 'Add a key to sign and verify the JSON web tokens (JWT). This cannot be edited later.', + editType: 'searchSelect', + editDisabled: true, + onlyAllowExisting: true, + defaultValue() { + return ['default']; + }, + fallbackComponent: 'input-search', + selectLimit: 1, + models: ['oidc/key'], + }) + key; + @attr({ + label: 'Access Token TTL', + editType: 'ttl', + defaultValue: '24h', + }) + accessTokenTtl; + + @attr({ + label: 'ID Token TTL', + editType: 'ttl', + defaultValue: '24h', + }) + idTokenTtl; + + // >> END MORE OPTIONS TOGGLE << + + @attr('array', { label: 'Assign access' }) assignments; // no editType because does not use form-field component + @attr('string', { label: 'Client ID' }) clientId; + @attr('string') clientSecret; + + // TODO refactor when field-to-attrs util is refactored as decorator + _attributeMeta = null; // cache initial result of expandAttributeMeta in getter and return + get formFields() { + if (!this._attributeMeta) { + this._attributeMeta = expandAttributeMeta(this, ['name', 'clientType', 'redirectUris']); + } + return this._attributeMeta; + } + + _fieldToAttrsGroups = null; + // more options fields + get fieldGroups() { + if (!this._fieldToAttrsGroups) { + this._fieldToAttrsGroups = fieldToAttrs(this, [ + { 'More options': ['key', 'idTokenTtl', 'accessTokenTtl'] }, + ]); + } + return this._fieldToAttrsGroups; + } + + // CAPABILITIES // + @lazyCapabilities(apiPath`identity/oidc/client/${'name'}`, 'name') clientPath; + get canRead() { + return this.clientPath.get('canRead'); + } + get canEdit() { + return this.clientPath.get('canUpdate'); + } + get canDelete() { + return this.clientPath.get('canDelete'); + } +} diff --git a/ui/app/models/oidc/key.js b/ui/app/models/oidc/key.js new file mode 100644 index 0000000..3c8762e --- /dev/null +++ b/ui/app/models/oidc/key.js @@ -0,0 +1,62 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import lazyCapabilities, { apiPath } from 'vault/macros/lazy-capabilities'; +import { expandAttributeMeta } from 'vault/utils/field-to-attrs'; +import { withModelValidations } from 'vault/decorators/model-validations'; + +const validations = { + name: [ + { type: 'presence', message: 'Name is required.' }, + { + type: 'containsWhiteSpace', + message: 'Name cannot contain whitespace.', + }, + ], +}; + +@withModelValidations(validations) +export default class OidcKeyModel extends Model { + @attr('string', { editDisabled: true }) name; + @attr('string', { + defaultValue: 'RS256', + possibleValues: ['RS256', 'RS384', 'RS512', 'ES256', 'ES384', 'ES512', 'EdDSA'], + }) + algorithm; + + @attr({ editType: 'ttl', defaultValue: '24h' }) rotationPeriod; + @attr({ label: 'Verification TTL', editType: 'ttl', defaultValue: '24h' }) verificationTtl; + @attr('array', { label: 'Allowed applications' }) allowedClientIds; // no editType because does not use form-field component + + // TODO refactor when field-to-attrs is refactored as decorator + _attributeMeta = null; // cache initial result of expandAttributeMeta in getter and return + get formFields() { + if (!this._attributeMeta) { + this._attributeMeta = expandAttributeMeta(this, [ + 'name', + 'algorithm', + 'rotationPeriod', + 'verificationTtl', + ]); + } + return this._attributeMeta; + } + + @lazyCapabilities(apiPath`identity/oidc/key/${'name'}`, 'name') keyPath; + @lazyCapabilities(apiPath`identity/oidc/key/${'name'}/rotate`, 'name') rotatePath; + get canRead() { + return this.keyPath.get('canRead'); + } + get canEdit() { + return this.keyPath.get('canUpdate'); + } + get canRotate() { + return this.rotatePath.get('canUpdate'); + } + get canDelete() { + return this.keyPath.get('canDelete'); + } +} diff --git a/ui/app/models/oidc/provider.js b/ui/app/models/oidc/provider.js new file mode 100644 index 0000000..a08dd79 --- /dev/null +++ b/ui/app/models/oidc/provider.js @@ -0,0 +1,64 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import lazyCapabilities, { apiPath } from 'vault/macros/lazy-capabilities'; +import { expandAttributeMeta } from 'vault/utils/field-to-attrs'; +import { withModelValidations } from 'vault/decorators/model-validations'; + +const validations = { + name: [ + { type: 'presence', message: 'Name is required.' }, + { + type: 'containsWhiteSpace', + message: 'Name cannot contain whitespace.', + }, + ], +}; + +@withModelValidations(validations) +export default class OidcProviderModel extends Model { + @attr('string', { editDisabled: true }) name; + @attr('string', { + subText: + 'The scheme, host, and optional port for your issuer. This will be used to build the URL that validates ID tokens.', + placeholderText: 'e.g. https://example.com:8200', + docLink: '/vault/api-docs/secret/identity/oidc-provider#create-or-update-a-provider', + helpText: `Optional. This defaults to a URL with Vault's api_addr`, + }) + issuer; + + @attr('array', { + label: 'Supported scopes', + subText: 'Scopes define information about a user and the OIDC service. Optional.', + editType: 'searchSelect', + models: ['oidc/scope'], + fallbackComponent: 'string-list', + onlyAllowExisting: true, + }) + scopesSupported; + + @attr('array', { label: 'Allowed applications' }) allowedClientIds; // no editType because does not use form-field component + + // TODO refactor when field-to-attrs is refactored as decorator + _attributeMeta = null; // cache initial result of expandAttributeMeta in getter and return + get formFields() { + if (!this._attributeMeta) { + this._attributeMeta = expandAttributeMeta(this, ['name', 'issuer', 'scopesSupported']); + } + return this._attributeMeta; + } + + @lazyCapabilities(apiPath`identity/oidc/provider/${'name'}`, 'name') providerPath; + get canRead() { + return this.providerPath.get('canRead'); + } + get canEdit() { + return this.providerPath.get('canUpdate'); + } + get canDelete() { + return this.providerPath.get('canDelete'); + } +} diff --git a/ui/app/models/oidc/scope.js b/ui/app/models/oidc/scope.js new file mode 100644 index 0000000..4526bbd --- /dev/null +++ b/ui/app/models/oidc/scope.js @@ -0,0 +1,40 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import lazyCapabilities, { apiPath } from 'vault/macros/lazy-capabilities'; +import { expandAttributeMeta } from 'vault/utils/field-to-attrs'; +import { withModelValidations } from 'vault/decorators/model-validations'; + +const validations = { + name: [{ type: 'presence', message: 'Name is required.' }], +}; + +@withModelValidations(validations) +export default class OidcScopeModel extends Model { + @attr('string', { editDisabled: true }) name; + @attr('string', { editType: 'textarea' }) description; + @attr('string', { label: 'JSON Template', editType: 'json', mode: 'ruby' }) template; + + // TODO refactor when field-to-attrs is refactored as decorator + _attributeMeta = null; // cache initial result of expandAttributeMeta in getter and return + get formFields() { + if (!this._attributeMeta) { + this._attributeMeta = expandAttributeMeta(this, ['name', 'description', 'template']); + } + return this._attributeMeta; + } + + @lazyCapabilities(apiPath`identity/oidc/scope/${'name'}`, 'name') scopePath; + get canRead() { + return this.scopePath.get('canRead'); + } + get canEdit() { + return this.scopePath.get('canUpdate'); + } + get canDelete() { + return this.scopePath.get('canDelete'); + } +} diff --git a/ui/app/models/path-filter-config.js b/ui/app/models/path-filter-config.js new file mode 100644 index 0000000..31dfcec --- /dev/null +++ b/ui/app/models/path-filter-config.js @@ -0,0 +1,15 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; + +export default Model.extend({ + mode: attr('string'), + paths: attr('array', { + defaultValue: function () { + return []; + }, + }), +}); diff --git a/ui/app/models/pki/action.js b/ui/app/models/pki/action.js new file mode 100644 index 0000000..71c476b --- /dev/null +++ b/ui/app/models/pki/action.js @@ -0,0 +1,248 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import { inject as service } from '@ember/service'; +import { tracked } from '@glimmer/tracking'; +import lazyCapabilities, { apiPath } from 'vault/macros/lazy-capabilities'; +import { withFormFields } from 'vault/decorators/model-form-fields'; +import { withModelValidations } from 'vault/decorators/model-validations'; + +const validations = { + type: [{ type: 'presence', message: 'Type is required.' }], + commonName: [{ type: 'presence', message: 'Common name is required.' }], + issuerName: [ + { + validator(model) { + if ( + (model.actionType === 'generate-root' || model.actionType === 'rotate-root') && + model.issuerName === 'default' + ) + return false; + return true; + }, + message: `Issuer name must be unique across all issuers and not be the reserved value 'default'.`, + }, + ], + keyName: [ + { + validator(model) { + if (model.keyName === 'default') return false; + return true; + }, + message: `Key name cannot be the reserved value 'default'`, + }, + ], +}; + +/** + * This model maps to multiple PKI endpoints, specifically the ones that make up the + * configuration/create workflow. These endpoints also share a nontypical behavior in that + * a POST request to the endpoints don't necessarily result in a single entity created -- + * depending on the inputs, some number of issuers, keys, and certificates can be created + * from the API. + */ +@withModelValidations(validations) +@withFormFields() +export default class PkiActionModel extends Model { + @service secretMountPath; + + @tracked actionType; // used to toggle between different form fields when creating configuration + + /* actionType import */ + @attr('string') pemBundle; + + // parsed attrs from parse-pki-cert util if certificate on response + @attr parsedCertificate; + + // readonly attrs returned after importing + @attr importedIssuers; + @attr importedKeys; + @attr mapping; + @attr('string', { readOnly: true, masked: true }) certificate; + + /* actionType generate-root */ + + // readonly attrs returned right after root generation + @attr serialNumber; + @attr('string', { label: 'Issuing CA', readOnly: true, masked: true }) issuingCa; + // end of readonly + + @attr('string', { + possibleValues: ['exported', 'internal', 'existing', 'kms'], + noDefault: true, + }) + type; + + @attr('string') issuerName; + + @attr('string') keyName; + + @attr('string', { + defaultValue: 'default', + label: 'Key reference', + }) + keyRef; // type=existing only + + @attr('string') commonName; // REQUIRED + + @attr('string', { + label: 'Subject Alternative Names (SANs)', + editType: 'stringArray', + }) + altNames; + + @attr('string', { + label: 'IP Subject Alternative Names (IP SANs)', + editType: 'stringArray', + }) + ipSans; + + @attr('string', { + label: 'URI Subject Alternative Names (URI SANs)', + editType: 'stringArray', + }) + uriSans; + + @attr('string', { + label: 'Other SANs', + editType: 'stringArray', + }) + otherSans; + + @attr('string', { + defaultValue: 'pem', + possibleValues: ['pem', 'der', 'pem_bundle'], + }) + format; + + @attr('string', { + defaultValue: 'der', + possibleValues: ['der', 'pkcs8'], + }) + privateKeyFormat; + + @attr('string', { + defaultValue: 'rsa', + possibleValues: ['rsa', 'ed25519', 'ec'], + }) + keyType; + + @attr('string', { + defaultValue: '0', + // options management happens in pki-key-parameters + }) + keyBits; + + @attr('number', { + defaultValue: -1, + }) + maxPathLength; + + @attr('boolean', { + label: 'Exclude common name from SANs', + subText: + 'If checked, the common name will not be included in DNS or Email Subject Alternate Names. This is useful if the CN is a human-readable identifier, not a hostname or email address.', + defaultValue: false, + }) + excludeCnFromSans; + + @attr('string', { + label: 'Permitted DNS domains', + }) + permittedDnsDomains; + + @attr('string', { + label: 'Organizational Units (OU)', + subText: + 'A list of allowed serial numbers to be requested during certificate issuance. Shell-style globbing is supported. If empty, custom-specified serial numbers will be forbidden.', + editType: 'stringArray', + }) + ou; + @attr({ editType: 'stringArray' }) organization; + @attr({ editType: 'stringArray' }) country; + @attr({ editType: 'stringArray' }) locality; + @attr({ editType: 'stringArray' }) province; + @attr({ editType: 'stringArray' }) streetAddress; + @attr({ editType: 'stringArray' }) postalCode; + + @attr('string', { + subText: + "Specifies the requested Subject's named Serial Number value. This has no impact on the Certificate's serial number randomly generated by Vault.", + }) + subjectSerialNumber; + // this is different from the number (16:5e:a0...) randomly generated by Vault + // https://developer.hashicorp.com/vault/api-docs/secret/pki#serial_number + + @attr('boolean', { + subText: 'Whether to add a Basic Constraints extension with CA: true.', + }) + addBasicConstraints; + + @attr({ + label: 'Backdate validity', + detailsLabel: 'Issued certificate backdating', + helperTextDisabled: 'Vault will use the default value, 30s', + helperTextEnabled: + 'Also called the not_before_duration property. Allows certificates to be valid for a certain time period before now. This is useful to correct clock misalignment on various systems when setting up your CA.', + editType: 'ttl', + defaultValue: '30s', + }) + notBeforeDuration; + + @attr('string') managedKeyName; + @attr('string', { + label: 'Managed key UUID', + }) + managedKeyId; + + @attr({ + label: 'Not valid after', + detailsLabel: 'Issued certificates expire after', + subText: + 'The time after which this certificate will no longer be valid. This can be a TTL (a range of time from now) or a specific date.', + editType: 'yield', + }) + customTtl; + @attr('string') ttl; + @attr('date') notAfter; + + @attr('string', { label: 'Issuer ID', readOnly: true, detailLinkTo: 'issuers.issuer.details' }) issuerId; // returned from generate-root action + + // For generating and signing a CSR + @attr('string', { label: 'CSR', masked: true }) csr; + @attr caChain; + @attr('string', { label: 'Key ID', detailLinkTo: 'keys.key.details' }) keyId; + @attr('string', { masked: true }) privateKey; + @attr('string') privateKeyType; + + get backend() { + return this.secretMountPath.currentPath; + } + + // To determine which endpoint the config adapter should use, + // we want to check capabilities on the newer endpoints (those + // prefixed with "issuers") and use the old path as fallback + // if user does not have permissions. + @lazyCapabilities(apiPath`${'backend'}/issuers/import/bundle`, 'backend') importBundlePath; + @lazyCapabilities(apiPath`${'backend'}/issuers/generate/root/${'type'}`, 'backend', 'type') + generateIssuerRootPath; + @lazyCapabilities(apiPath`${'backend'}/issuers/generate/intermediate/${'type'}`, 'backend', 'type') + generateIssuerCsrPath; + @lazyCapabilities(apiPath`${'backend'}/issuers/cross-sign`, 'backend') crossSignPath; + + get canImportBundle() { + return this.importBundlePath.get('canCreate') === true; + } + get canGenerateIssuerRoot() { + return this.generateIssuerRootPath.get('canCreate') === true; + } + get canGenerateIssuerIntermediate() { + return this.generateIssuerCsrPath.get('canCreate') === true; + } + get canCrossSign() { + return this.crossSignPath.get('canCreate') === true; + } +} diff --git a/ui/app/models/pki/certificate/base.js b/ui/app/models/pki/certificate/base.js new file mode 100644 index 0000000..8564bfa --- /dev/null +++ b/ui/app/models/pki/certificate/base.js @@ -0,0 +1,99 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import { assert } from '@ember/debug'; +import { service } from '@ember/service'; +import { withFormFields } from 'vault/decorators/model-form-fields'; +import lazyCapabilities, { apiPath } from 'vault/macros/lazy-capabilities'; + +/** + * There are many actions that involve certificates in PKI world. + * The base certificate model contains shared attributes that make up a certificate's content. + * Other models under pki/certificate will extend this model and include additional attributes + * and associated adapter methods for performing various generation and signing actions. + * This model also displays leaf certs and their parsed attributes (which exist as an object in + * the attribute `parsedCertificate`) + */ + +// also displays parsedCertificate values in the template +const certDisplayFields = ['certificate', 'commonName', 'revocationTime', 'serialNumber']; + +@withFormFields(certDisplayFields) +export default class PkiCertificateBaseModel extends Model { + @service secretMountPath; + + get useOpenAPI() { + return true; + } + get backend() { + return this.secretMountPath.currentPath; + } + getHelpUrl() { + assert('You must provide a helpUrl for OpenAPI', true); + } + + // The attributes parsed from parse-pki-cert util live here + @attr parsedCertificate; + + @attr('string') commonName; + @attr({ + label: 'Not valid after', + detailsLabel: 'Issued certificates expire after', + subText: + 'The time after which this certificate will no longer be valid. This can be a TTL (a range of time from now) or a specific date.', + editType: 'yield', + }) + customTtl; // sets ttl and notAfter via one input + + @attr('boolean', { + label: 'Exclude common name from SANs', + subText: + 'If checked, the common name will not be included in DNS or Email Subject Alternate Names. This is useful if the CN is a human-readable identifier, not a hostname or email address.', + defaultValue: false, + }) + excludeCnFromSans; + + @attr('string', { + label: 'Subject Alternative Names (SANs)', + subText: + 'The requested Subject Alternative Names; if email protection is enabled for the role, this may contain email addresses.', + editType: 'stringArray', + }) + altNames; + + // SANs below are editType: stringArray from openApi + @attr('string', { + label: 'IP Subject Alternative Names (IP SANs)', + subText: 'Only valid if the role allows IP SANs (which is the default).', + }) + ipSans; + + @attr('string', { + label: 'URI Subject Alternative Names (URI SANs)', + subText: 'If any requested URIs do not match role policy, the entire request will be denied.', + }) + uriSans; + + @attr('string', { + subText: 'Requested other SANs with the format ;UTF8: for each entry.', + }) + otherSans; + + // Attrs that come back from API POST request + @attr({ label: 'CA Chain', masked: true }) caChain; + @attr('string', { masked: true }) certificate; + @attr('number') expiration; + @attr('string', { label: 'Issuing CA', masked: true }) issuingCa; + @attr('string', { masked: true }) privateKey; // only returned for type=exported and /issue + @attr('string') privateKeyType; // only returned for type=exported and /issue + @attr('number', { formatDate: true }) revocationTime; + @attr('string') serialNumber; + + @lazyCapabilities(apiPath`${'backend'}/revoke`, 'backend') revokePath; + get canRevoke() { + return this.revokePath.get('isLoading') || this.revokePath.get('canCreate') !== false; + } +} diff --git a/ui/app/models/pki/certificate/generate.js b/ui/app/models/pki/certificate/generate.js new file mode 100644 index 0000000..c3351c4 --- /dev/null +++ b/ui/app/models/pki/certificate/generate.js @@ -0,0 +1,41 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { attr } from '@ember-data/model'; +import { withFormFields } from 'vault/decorators/model-form-fields'; +import PkiCertificateBaseModel from './base'; + +const generateFromRole = [ + { + default: ['commonName', 'userIds', 'customTtl', 'format', 'privateKeyFormat'], + }, + { + 'Subject Alternative Name (SAN) Options': [ + 'excludeCnFromSans', + 'altNames', + 'ipSans', + 'uriSans', + 'otherSans', + ], + }, +]; +// Extra fields returned on the /issue endpoint +const certDisplayFields = [ + 'certificate', + 'commonName', + 'revocationTime', + 'serialNumber', + 'caChain', + 'issuingCa', + 'privateKey', + 'privateKeyType', +]; +@withFormFields(certDisplayFields, generateFromRole) +export default class PkiCertificateGenerateModel extends PkiCertificateBaseModel { + getHelpUrl(backend) { + return `/v1/${backend}/issue/example?help=1`; + } + @attr('string') role; // role name to issue certificate against for request URL +} diff --git a/ui/app/models/pki/certificate/sign.js b/ui/app/models/pki/certificate/sign.js new file mode 100644 index 0000000..307d703 --- /dev/null +++ b/ui/app/models/pki/certificate/sign.js @@ -0,0 +1,41 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { attr } from '@ember-data/model'; +import { withFormFields } from 'vault/decorators/model-form-fields'; +import PkiCertificateBaseModel from './base'; + +const generateFromRole = [ + { + default: ['csr', 'commonName', 'customTtl', 'format', 'removeRootsFromChain'], + }, + { + 'Subject Alternative Name (SAN) Options': [ + 'excludeCnFromSans', + 'altNames', + 'ipSans', + 'uriSans', + 'otherSans', + ], + }, +]; +@withFormFields(null, generateFromRole) +export default class PkiCertificateSignModel extends PkiCertificateBaseModel { + getHelpUrl(backend) { + return `/v1/${backend}/sign/example?help=1`; + } + @attr('string') role; // role name to create certificate against for request URL + + @attr('string', { + label: 'CSR', + editType: 'textarea', + }) + csr; + + @attr('boolean', { + subText: 'When checked, the CA chain will not include self-signed CA certificates.', + }) + removeRootsFromChain; +} diff --git a/ui/app/models/pki/config/acme.js b/ui/app/models/pki/config/acme.js new file mode 100644 index 0000000..9733932 --- /dev/null +++ b/ui/app/models/pki/config/acme.js @@ -0,0 +1,75 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import { withFormFields } from 'vault/decorators/model-form-fields'; +import lazyCapabilities, { apiPath } from 'vault/macros/lazy-capabilities'; + +@withFormFields() +export default class PkiConfigAcmeModel extends Model { + // This model uses the backend value as the model ID + get useOpenAPI() { + return true; + } + + getHelpUrl(backendPath) { + return `/v1/${backendPath}/config/acme?help=1`; + } + + // attrs order in the form is determined by order here + + @attr('boolean', { + label: 'ACME enabled', + subText: 'When ACME is disabled, all requests to ACME directory URLs will return 404.', + }) + enabled; + + @attr('string', { + subText: + "Specifies the behavior of the default ACME directory. Can be 'forbid', 'sign-verbatim' or a role given by 'role:'. If a role is used, it must be present in 'allowed_roles'.", + }) + defaultDirectoryPolicy; + + @attr('array', { + editType: 'stringArray', + subText: + "The default value '*' allows every role within the mount to be used. If the default_directory_policy specifies a role, it must be allowed under this configuration.", + }) + allowedRoles; + + @attr('boolean', { + label: 'Allow role ExtKeyUsage', + subText: + "When enabled, respect the role's ExtKeyUsage flags. Otherwise, ACME certificates are forced to ServerAuth.", + }) + allowRoleExtKeyUsage; + + @attr('array', { + editType: 'stringArray', + subText: + "Specifies a list of issuers allowed to issue certificates via explicit ACME paths. If an allowed role specifies an issuer outside this list, it will be allowed. The default value '*' allows every issuer within the mount.", + }) + allowedIssuers; + + @attr('string', { + label: 'EAB policy', + possibleValues: ['not-required', 'new-account-required', 'always-required'], + }) + eabPolicy; + + @attr('string', { + label: 'DNS resolver', + subText: + 'An optional overriding DNS resolver to use for challenge verification lookups. When not specified, the default system resolver will be used. This allows domains on peered networks with an accessible DNS resolver to be validated.', + }) + dnsResolver; + + @lazyCapabilities(apiPath`${'id'}/config/acme`, 'id') + acmePath; + + get canSet() { + return this.acmePath.get('canUpdate') !== false; + } +} diff --git a/ui/app/models/pki/config/cluster.js b/ui/app/models/pki/config/cluster.js new file mode 100644 index 0000000..b89d660 --- /dev/null +++ b/ui/app/models/pki/config/cluster.js @@ -0,0 +1,40 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import { withFormFields } from 'vault/decorators/model-form-fields'; +import lazyCapabilities, { apiPath } from 'vault/macros/lazy-capabilities'; + +@withFormFields() +export default class PkiConfigClusterModel extends Model { + // This model uses the backend value as the model ID + get useOpenAPI() { + return true; + } + + getHelpUrl(backendPath) { + return `/v1/${backendPath}/config/cluster?help=1`; + } + + @attr('string', { + label: "Mount's API path", + subText: + "Specifies the path to this performance replication cluster's API mount path, including any namespaces as path components. This address is used for the ACME directories, which must be served over a TLS-enabled listener.", + }) + path; + @attr('string', { + label: 'AIA path', + subText: + "Specifies the path to this performance replication cluster's AIA distribution point; may refer to an external, non-Vault responder.", + }) + aiaPath; + + // this is for pki-only cluster config, not the universal vault cluster + @lazyCapabilities(apiPath`${'id'}/config/cluster`, 'id') clusterPath; + + get canSet() { + return this.clusterPath.get('canUpdate') !== false; + } +} diff --git a/ui/app/models/pki/config/crl.js b/ui/app/models/pki/config/crl.js new file mode 100644 index 0000000..6d7490e --- /dev/null +++ b/ui/app/models/pki/config/crl.js @@ -0,0 +1,98 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import { withFormFields } from 'vault/decorators/model-form-fields'; +import lazyCapabilities, { apiPath } from 'vault/macros/lazy-capabilities'; + +const formFieldGroups = [ + { + 'Certificate Revocation List (CRL)': ['expiry', 'autoRebuildGracePeriod', 'deltaRebuildInterval'], + }, + { + 'Online Certificate Status Protocol (OCSP)': ['ocspExpiry'], + }, + { 'Unified Revocation': ['crossClusterRevocation', 'unifiedCrl', 'unifiedCrlOnExistingPaths'] }, +]; +@withFormFields(null, formFieldGroups) +export default class PkiConfigCrlModel extends Model { + // This model uses the backend value as the model ID + + @attr('boolean') autoRebuild; + @attr('string', { + label: 'Auto-rebuild on', + labelDisabled: 'Auto-rebuild off', + mapToBoolean: 'autoRebuild', + isOppositeValue: false, + editType: 'ttl', + helperTextEnabled: 'Vault will rebuild the CRL in the below grace period before expiration', + helperTextDisabled: 'Vault will not automatically rebuild the CRL', + }) + autoRebuildGracePeriod; + + @attr('boolean') enableDelta; + @attr('string', { + label: 'Delta CRL building on', + labelDisabled: 'Delta CRL building off', + mapToBoolean: 'enableDelta', + isOppositeValue: false, + editType: 'ttl', + helperTextEnabled: 'Vault will rebuild the delta CRL at the interval below:', + helperTextDisabled: 'Vault will not rebuild the delta CRL at an interval', + }) + deltaRebuildInterval; + + @attr('boolean') disable; + @attr('string', { + label: 'Expiry', + labelDisabled: 'No expiry', + mapToBoolean: 'disable', + isOppositeValue: true, + editType: 'ttl', + helperTextDisabled: 'The CRL will not be built.', + helperTextEnabled: 'The CRL will expire after:', + }) + expiry; + + @attr('boolean') ocspDisable; + @attr('string', { + label: 'OCSP responder APIs enabled', + labelDisabled: 'OCSP responder APIs disabled', + mapToBoolean: 'ocspDisable', + isOppositeValue: true, + editType: 'ttl', + helperTextEnabled: "Requests about a certificate's status will be valid for:", + helperTextDisabled: 'Requests cannot be made to check if an individual certificate is valid.', + }) + ocspExpiry; + + // enterprise only params + @attr('boolean', { + label: 'Cross-cluster revocation', + helpText: + 'Enables cross-cluster revocation request queues. When a serial not issued on this local cluster is passed to the /revoke endpoint, it is replicated across clusters and revoked by the issuing cluster if it is online.', + }) + crossClusterRevocation; + + @attr('boolean', { + label: 'Unified CRL', + helpText: + 'Enables unified CRL and OCSP building. This synchronizes all revocations between clusters; a single, unified CRL will be built on the active node of the primary performance replication (PR) cluster.', + }) + unifiedCrl; + + @attr('boolean', { + label: 'Unified CRL on existing paths', + helpText: + 'If enabled, existing CRL and OCSP paths will return the unified CRL instead of a response based on cluster-local data.', + }) + unifiedCrlOnExistingPaths; + + @lazyCapabilities(apiPath`${'id'}/config/crl`, 'id') crlPath; + + get canSet() { + return this.crlPath.get('canUpdate') !== false; + } +} diff --git a/ui/app/models/pki/config/urls.js b/ui/app/models/pki/config/urls.js new file mode 100644 index 0000000..bb6f2af --- /dev/null +++ b/ui/app/models/pki/config/urls.js @@ -0,0 +1,50 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import { withFormFields } from 'vault/decorators/model-form-fields'; +import lazyCapabilities, { apiPath } from 'vault/macros/lazy-capabilities'; + +@withFormFields() +export default class PkiConfigUrlsModel extends Model { + // This model uses the backend value as the model ID + get useOpenAPI() { + return true; + } + getHelpUrl(backendPath) { + return `/v1/${backendPath}/config/urls?help=1`; + } + + @attr({ + label: 'Issuing certificates', + subText: + 'The URL values for the Issuing Certificate field; these are different URLs for the same resource.', + showHelpText: false, + editType: 'stringArray', + }) + issuingCertificates; + + @attr({ + label: 'CRL distribution points', + subText: 'Specifies the URL values for the CRL Distribution Points field.', + showHelpText: false, + editType: 'stringArray', + }) + crlDistributionPoints; + + @attr({ + label: 'OCSP Servers', + subText: 'Specifies the URL values for the OCSP Servers field.', + showHelpText: false, + editType: 'stringArray', + }) + ocspServers; + + @lazyCapabilities(apiPath`${'id'}/config/urls`, 'id') urlsPath; + + get canSet() { + return this.urlsPath.get('canUpdate') !== false; + } +} diff --git a/ui/app/models/pki/issuer.js b/ui/app/models/pki/issuer.js new file mode 100644 index 0000000..6de6cfc --- /dev/null +++ b/ui/app/models/pki/issuer.js @@ -0,0 +1,164 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import { withFormFields } from 'vault/decorators/model-form-fields'; +import lazyCapabilities, { apiPath } from 'vault/macros/lazy-capabilities'; +import { service } from '@ember/service'; + +const issuerUrls = ['issuingCertificates', 'crlDistributionPoints', 'ocspServers']; +const inputFields = [ + 'issuerName', + 'leafNotAfterBehavior', + 'usage', + 'manualChain', + 'revocationSignatureAlgorithm', + ...issuerUrls, +]; +const displayFields = [ + { + default: ['certificate', 'caChain', 'commonName', 'issuerName', 'issuerId', 'keyId'], + // also displays parsedCertificate values in the template + }, + { 'Issuer URLs': issuerUrls }, +]; +@withFormFields(inputFields, displayFields) +export default class PkiIssuerModel extends Model { + @service secretMountPath; + // TODO use openAPI after removing route extension (see pki/roles route for example) + get useOpenAPI() { + return false; + } + get backend() { + return this.secretMountPath.currentPath; + } + get issuerRef() { + return this.issuerName || this.issuerId; + } + + // READ ONLY + @attr isDefault; + @attr('string', { label: 'Issuer ID', detailLinkTo: 'issuers.issuer.details' }) issuerId; + @attr('string', { label: 'Default key ID', detailLinkTo: 'keys.key.details' }) keyId; + @attr({ label: 'CA Chain', masked: true }) caChain; + @attr({ masked: true }) certificate; + @attr('string') serialNumber; + + // parsed from certificate contents in serializer (see parse-pki-cert.js) + @attr parsedCertificate; + @attr('string') commonName; + @attr isRoot; + + @attr subjectSerialNumber; // this is not the UUID serial number field randomly generated by Vault for leaf certificates + @attr({ label: 'Subject Alternative Names (SANs)' }) altNames; + @attr({ label: 'IP SANs' }) ipSans; + @attr({ label: 'URI SANs' }) uriSans; + @attr({ label: 'Other SANs' }) otherSans; + + // UPDATING + @attr('string') issuerName; + + @attr({ + label: 'Leaf notAfter behavior', + subText: + 'What happens when a leaf certificate is issued, but its NotAfter field (and therefore its expiry date) exceeds that of this issuer.', + docLink: '/vault/api-docs/secret/pki#update-issuer', + editType: 'yield', + valueOptions: ['err', 'truncate', 'permit'], + }) + leafNotAfterBehavior; + + @attr({ + subText: 'Allowed usages for this issuer. It can always be read.', + editType: 'yield', + valueOptions: [ + { label: 'Issuing certificates', value: 'issuing-certificates' }, + { label: 'Signing CRLs', value: 'crl-signing' }, + { label: 'Signing OCSPs', value: 'ocsp-signing' }, + ], + }) + usage; + + @attr('string', { + subText: + "An advanced field useful when automatic chain building isn't desired. The first element must be the present issuer's reference.", + }) + manualChain; + + @attr({ + subText: + 'The signature algorithm to use when building CRLs. The default value (empty string) is for Go to select the signature algorithm automatically, which may not always work.', + noDefault: true, + possibleValues: [ + 'sha256withrsa', + 'ecdsawithsha384', + 'sha256withrsapss', + 'ed25519', + 'sha384withrsapss', + 'sha512withrsapss', + 'pureed25519', + 'sha384withrsa', + 'sha512withrsa', + 'ecdsawithsha256', + 'ecdsawithsha512', + ], + }) + revocationSignatureAlgorithm; + + @attr('string', { + subText: + 'The URL values for the Issuing Certificate field; these are different URLs for the same resource.', + editType: 'stringArray', + }) + issuingCertificates; + + @attr('string', { + label: 'CRL distribution points', + subText: 'Specifies the URL values for the CRL Distribution Points field.', + editType: 'stringArray', + }) + crlDistributionPoints; + + @attr('string', { + label: 'OCSP servers', + subText: 'Specifies the URL values for the OCSP Servers field.', + editType: 'stringArray', + }) + ocspServers; + + // IMPORTING + @attr('string') pemBundle; + // readonly attrs returned after importing + @attr importedIssuers; + @attr importedKeys; + @attr mapping; + + @lazyCapabilities(apiPath`${'backend'}/issuer/${'issuerId'}`) issuerPath; + @lazyCapabilities(apiPath`${'backend'}/root/rotate/exported`) rotateExported; + @lazyCapabilities(apiPath`${'backend'}/root/rotate/internal`) rotateInternal; + @lazyCapabilities(apiPath`${'backend'}/root/rotate/existing`) rotateExisting; + @lazyCapabilities(apiPath`${'backend'}/root`, 'backend') deletePath; + @lazyCapabilities(apiPath`${'backend'}/intermediate/cross-sign`) crossSignPath; + @lazyCapabilities(apiPath`${'backend'}/issuer/${'issuerId'}/sign-intermediate`) signIntermediate; + get canRotateIssuer() { + return ( + this.rotateExported.get('canUpdate') !== false || + this.rotateExisting.get('canUpdate') !== false || + this.rotateInternal.get('canUpdate') !== false + ); + } + get canCrossSign() { + return this.crossSignPath.get('canUpdate') !== false; + } + get canSignIntermediate() { + return this.signIntermediate.get('canUpdate') !== false; + } + get canConfigure() { + return this.issuerPath.get('canUpdate') !== false; + } + get canDeleteAllIssuers() { + return this.deletePath.get('isLoading') || this.deletePath.get('canDelete') !== false; + } +} diff --git a/ui/app/models/pki/key.js b/ui/app/models/pki/key.js new file mode 100644 index 0000000..e6a41a4 --- /dev/null +++ b/ui/app/models/pki/key.js @@ -0,0 +1,88 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import { inject as service } from '@ember/service'; +import lazyCapabilities, { apiPath } from 'vault/macros/lazy-capabilities'; +import { withFormFields } from 'vault/decorators/model-form-fields'; +import { withModelValidations } from 'vault/decorators/model-validations'; + +const validations = { + type: [{ type: 'presence', message: 'Type is required.' }], + keyType: [{ type: 'presence', message: 'Please select a key type.' }], + keyName: [ + { + validator(model) { + if (model.keyName === 'default') return false; + return true; + }, + message: `Key name cannot be the reserved value 'default'`, + }, + ], +}; +const displayFields = ['keyId', 'keyName', 'keyType', 'keyBits']; +const formFieldGroups = [{ default: ['keyName', 'type'] }, { 'Key parameters': ['keyType', 'keyBits'] }]; +@withModelValidations(validations) +@withFormFields(displayFields, formFieldGroups) +export default class PkiKeyModel extends Model { + @service secretMountPath; + + @attr('string', { detailsLabel: 'Key ID' }) keyId; + @attr('string', { + subText: `Optional, human-readable name for this key. The name must be unique across all keys and cannot be 'default'.`, + }) + keyName; + @attr('string', { + noDefault: true, + possibleValues: ['internal', 'exported'], + subText: + 'The type of operation. If exported, the private key will be returned in the response; if internal the private key will not be returned and cannot be retrieved later.', + }) + type; + @attr('string', { + noDefault: true, + possibleValues: ['rsa', 'ec', 'ed25519'], + subText: 'The type of key that will be generated. Must be rsa, ed25519, or ec. ', + }) + keyType; + @attr('string', { + label: 'Key bits', + noDefault: true, + subText: 'Bit length of the key to generate.', + }) + keyBits; // no possibleValues because dependent on selected key type + + @attr('string') pemBundle; + @attr('string') privateKey; + + get backend() { + return this.secretMountPath.currentPath; + } + + /* CAPABILITIES + * Default to show UI elements unless we know they can't access the given path + */ + + @lazyCapabilities(apiPath`${'backend'}/key/${'keyId'}`, 'backend', 'keyId') keyPath; + get canRead() { + return this.keyPath.get('canRead') !== false; + } + get canEdit() { + return this.keyPath.get('canUpdate') !== false; + } + get canDelete() { + return this.keyPath.get('canDelete') !== false; + } + + @lazyCapabilities(apiPath`${'backend'}/keys/generate`, 'backend') generatePath; + get canGenerateKey() { + return this.generatePath.get('canUpdate') !== false; + } + + @lazyCapabilities(apiPath`${'backend'}/keys/import`, 'backend') importPath; + get canImportKey() { + return this.importPath.get('canUpdate') !== false; + } +} diff --git a/ui/app/models/pki/role.js b/ui/app/models/pki/role.js new file mode 100644 index 0000000..78fdb44 --- /dev/null +++ b/ui/app/models/pki/role.js @@ -0,0 +1,360 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import lazyCapabilities, { apiPath } from 'vault/macros/lazy-capabilities'; +import { withModelValidations } from 'vault/decorators/model-validations'; +import { withFormFields } from 'vault/decorators/model-form-fields'; + +const validations = { + name: [{ type: 'presence', message: 'Name is required.' }], +}; + +const fieldGroups = [ + { + default: [ + 'name', + 'issuerRef', + 'customTtl', + 'notBeforeDuration', + 'maxTtl', + 'generateLease', + 'noStore', + 'addBasicConstraints', + ], + }, + { + 'Domain handling': [ + 'allowedDomains', + 'allowedDomainsTemplate', + 'allowBareDomains', + 'allowSubdomains', + 'allowGlobDomains', + 'allowWildcardCertificates', + 'allowLocalhost', // default: true (returned true by OpenApi) + 'allowAnyName', + 'enforceHostnames', // default: true (returned true by OpenApi) + ], + }, + { + 'Key parameters': ['keyType', 'keyBits', 'signatureBits'], + }, + { + 'Key usage': ['keyUsage', 'extKeyUsage', 'extKeyUsageOids'], + }, + { 'Policy identifiers': ['policyIdentifiers'] }, + { + 'Subject Alternative Name (SAN) Options': [ + 'allowIpSans', + 'allowedUriSans', + 'allowUriSansTemplate', + 'allowedOtherSans', + ], + }, + { + 'Additional subject fields': [ + 'allowedUserIds', + 'allowedSerialNumbers', + 'requireCn', + 'useCsrCommonName', + 'useCsrSans', + 'ou', + 'organization', + 'country', + 'locality', + 'province', + 'streetAddress', + 'postalCode', + ], + }, +]; + +@withFormFields(null, fieldGroups) +@withModelValidations(validations) +export default class PkiRoleModel extends Model { + get useOpenAPI() { + // must be a getter so it can be accessed in path-help.js + return true; + } + getHelpUrl(backend) { + return `/v1/${backend}/roles/example?help=1`; + } + + @attr('string', { readOnly: true }) backend; + + /* Overriding OpenApi default options */ + @attr('string', { + label: 'Role name', + fieldValue: 'name', + editDisabled: true, + }) + name; + + @attr('string', { + label: 'Issuer reference', + detailsLabel: 'Issuer', + defaultValue: 'default', + subText: `Specifies the issuer that will be used to create certificates with this role. To find this, run read -field=default pki_int/config/issuers in the console. By default, we will use the mounts default issuer.`, + }) + issuerRef; + + @attr({ + label: 'Not valid after', + detailsLabel: 'Issued certificates expire after', + subText: + 'The time after which this certificate will no longer be valid. This can be a TTL (a range of time from now) or a specific date.', + editType: 'yield', + }) + customTtl; + + @attr({ + label: 'Backdate validity', + detailsLabel: 'Issued certificate backdating', + helperTextDisabled: 'Vault will use the default value, 30s', + helperTextEnabled: + 'Also called the not_before_duration property. Allows certificates to be valid for a certain time period before now. This is useful to correct clock misalignment on various systems when setting up your CA.', + editType: 'ttl', + defaultValue: '30s', + }) + notBeforeDuration; + + @attr({ + label: 'Max TTL', + helperTextDisabled: + 'The maximum Time-To-Live of certificates generated by this role. If not set, the system max lease TTL will be used.', + editType: 'ttl', + defaultShown: 'System default', + }) + maxTtl; + + @attr('boolean', { + label: 'Generate lease with certificate', + subText: + 'Specifies if certificates issued/signed against this role will have Vault leases attached to them.', + editType: 'boolean', + docLink: '/vault/api-docs/secret/pki#create-update-role', + }) + generateLease; + + @attr('boolean', { + label: 'Do not store certificates in storage backend', + detailsLabel: 'Store in storage backend', // template reverses value + subText: + 'This can improve performance when issuing large numbers of certificates. However, certificates issued in this way cannot be enumerated or revoked.', + editType: 'boolean', + docLink: '/vault/api-docs/secret/pki#create-update-role', + }) + noStore; + + @attr('boolean', { + label: 'Basic constraints valid for non-CA', + detailsLabel: 'Add basic constraints', + subText: 'Mark Basic Constraints valid when issuing non-CA certificates.', + editType: 'boolean', + }) + addBasicConstraints; + /* End of overriding default options */ + + /* Overriding OpenApi Domain handling options */ + @attr({ + label: 'Allowed domains', + subText: 'Specifies the domains this role is allowed to issue certificates for.', + editType: 'stringArray', + }) + allowedDomains; + + @attr('boolean', { + label: 'Allow templates in allowed domains', + }) + allowedDomainsTemplate; + /* End of overriding Domain handling options */ + + /* Overriding OpenApi Key parameters options */ + @attr('string', { + label: 'Key type', + possibleValues: ['rsa', 'ec', 'ed25519', 'any'], + defaultValue: 'rsa', + }) + keyType; + + @attr('string', { + label: 'Key bits', + defaultValue: '2048', + }) + keyBits; // no possibleValues because options are dependent on selected key type + + @attr('string', { + label: 'Signature bits', + subText: `Only applicable for key_type 'RSA'. Ignore for other key types.`, + defaultValue: '0', + possibleValues: ['0', '256', '384', '512'], + }) + signatureBits; + /* End of overriding Key parameters options */ + + /* Overriding API Policy identifier option */ + @attr({ + label: 'Policy identifiers', + subText: 'A list of policy object identifiers (OIDs).', + editType: 'stringArray', + }) + policyIdentifiers; + /* End of overriding Policy identifier options */ + + /* Overriding OpenApi SAN options */ + @attr('boolean', { + label: 'Allow IP SANs', + subText: 'Specifies if clients can request IP Subject Alternative Names.', + editType: 'boolean', + defaultValue: true, + }) + allowIpSans; + + @attr({ + label: 'URI Subject Alternative Names (URI SANs)', + subText: 'Defines allowed URI Subject Alternative Names.', + editType: 'stringArray', + docLink: '/vault/docs/concepts/policies', + }) + allowedUriSans; + + @attr('boolean', { + label: 'Allow URI SANs template', + subText: 'If true, the URI SANs above may contain templates, as with ACL Path Templating.', + editType: 'boolean', + docLink: '/vault/docs/concepts/policies', + }) + allowUriSansTemplate; + + @attr({ + label: 'Other SANs', + subText: 'Defines allowed custom OID/UTF8-string SANs.', + editType: 'stringArray', + }) + allowedOtherSans; + /* End of overriding SAN options */ + + /* Overriding OpenApi Additional subject field options */ + @attr({ + label: 'Allowed serial numbers', + subText: + 'A list of allowed serial numbers to be requested during certificate issuance. Shell-style globbing is supported. If empty, custom-specified serial numbers will be forbidden.', + editType: 'stringArray', + }) + allowedSerialNumbers; + + @attr('boolean', { + label: 'Require common name', + subText: 'If set to false, common name will be optional when generating a certificate.', + defaultValue: true, + }) + requireCn; + + @attr('boolean', { + label: 'Use CSR common name', + subText: + 'When used with the CSR signing endpoint, the common name in the CSR will be used instead of taken from the JSON data.', + defaultValue: true, + }) + useCsrCommonName; + + @attr('boolean', { + label: 'Use CSR SANs', + subText: + 'When used with the CSR signing endpoint, the subject alternate names in the CSR will be used instead of taken from the JSON data.', + defaultValue: true, + }) + useCsrSans; + + @attr({ + label: 'Organization Units (OU)', + subText: + 'A list of allowed serial numbers to be requested during certificate issuance. Shell-style globbing is supported. If empty, custom-specified serial numbers will be forbidden.', + editType: 'stringArray', + }) + ou; + + @attr('array', { + defaultValue() { + return ['DigitalSignature', 'KeyAgreement', 'KeyEncipherment']; + }, + defaultShown: 'None', + }) + keyUsage; + + @attr('array', { + defaultShown: 'None', + }) + extKeyUsage; + + @attr('array', { + defaultShown: 'None', + }) + extKeyUsageOids; + + @attr({ editType: 'stringArray' }) allowedUserIds; + @attr({ editType: 'stringArray' }) organization; + @attr({ editType: 'stringArray' }) country; + @attr({ editType: 'stringArray' }) locality; + @attr({ editType: 'stringArray' }) province; + @attr({ editType: 'stringArray' }) streetAddress; + @attr({ editType: 'stringArray' }) postalCode; + /* End of overriding Additional subject field options */ + + /* CAPABILITIES + * Default to show UI elements unless we know they can't access the given path + */ + @lazyCapabilities(apiPath`${'backend'}/roles/${'id'}`, 'backend', 'id') updatePath; + get canDelete() { + return this.updatePath.get('isLoading') || this.updatePath.get('canCreate') !== false; + } + get canEdit() { + return this.updatePath.get('isLoading') || this.updatePath.get('canUpdate') !== false; + } + get canRead() { + return this.updatePath.get('isLoading') || this.updatePath.get('canRead') !== false; + } + + @lazyCapabilities(apiPath`${'backend'}/issue/${'id'}`, 'backend', 'id') generatePath; + get canGenerateCert() { + return this.generatePath.get('isLoading') || this.generatePath.get('canUpdate') !== false; + } + @lazyCapabilities(apiPath`${'backend'}/sign/${'id'}`, 'backend', 'id') signPath; + get canSign() { + return this.signPath.get('isLoading') || this.signPath.get('canUpdate') !== false; + } + @lazyCapabilities(apiPath`${'backend'}/sign-verbatim/${'id'}`, 'backend', 'id') signVerbatimPath; + get canSignVerbatim() { + return this.signVerbatimPath.get('isLoading') || this.signVerbatimPath.get('canUpdate') !== false; + } + + // Gets header/footer copy for specific toggle groups. + get fieldGroupsInfo() { + return { + 'Domain handling': { + footer: { + text: 'These options can interact intricately with one another. For more information,', + docText: 'learn more here.', + docLink: '/vault/api-docs/secret/pki#allowed_domains', + }, + }, + 'Key parameters': { + header: { + text: `These are the parameters for generating or validating the certificate's key material.`, + }, + }, + 'Subject Alternative Name (SAN) Options': { + header: { + text: `Subject Alternative Names (SANs) are identities (domains, IP addresses, and URIs) Vault attaches to the requested certificates.`, + }, + }, + 'Additional subject fields': { + header: { + text: `Additional identity metadata Vault can attach to the requested certificates.`, + }, + }, + }; + } +} diff --git a/ui/app/models/pki/sign-intermediate.js b/ui/app/models/pki/sign-intermediate.js new file mode 100644 index 0000000..6fe8845 --- /dev/null +++ b/ui/app/models/pki/sign-intermediate.js @@ -0,0 +1,98 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { attr } from '@ember-data/model'; +import { withFormFields } from 'vault/decorators/model-form-fields'; +import { withModelValidations } from 'vault/decorators/model-validations'; +import PkiCertificateBaseModel from './certificate/base'; + +const validations = { + csr: [{ type: 'presence', message: 'CSR is required.' }], +}; +@withModelValidations(validations) +@withFormFields([ + 'csr', + 'useCsrValues', + 'commonName', + 'excludeCnFromSans', + 'customTtl', + 'notBeforeDuration', + 'format', + 'permittedDnsDomains', + 'maxPathLength', +]) +export default class PkiSignIntermediateModel extends PkiCertificateBaseModel { + getHelpUrl(backend) { + return `/v1/${backend}/issuer/example/sign-intermediate?help=1`; + } + + @attr issuerRef; + + @attr('string', { + label: 'CSR', + editType: 'textarea', + subText: 'The PEM-encoded CSR to be signed.', + }) + csr; + + @attr('boolean', { + label: 'Use CSR values', + subText: + 'Subject information and key usages specified in the CSR will be used over parameters provided here, and extensions in the CSR will be copied into the issued certificate.', + docLink: '/vault/api-docs/secret/pki#use_csr_values', + }) + useCsrValues; + + @attr({ + label: 'Backdate validity', + detailsLabel: 'Issued certificate backdating', + helperTextDisabled: 'Vault will use the default value, 30s', + helperTextEnabled: + 'Also called the not_before_duration property. Allows certificates to be valid for a certain time period before now. This is useful to correct clock misalignment on various systems when setting up your CA.', + editType: 'ttl', + defaultValue: '30s', + }) + notBeforeDuration; + + @attr({ + label: 'Permitted DNS domains', + subText: + 'DNS domains for which certificates are allowed to be issued or signed by this CA certificate. Enter each value as a new input.', + }) + permittedDnsDomains; + + @attr({ + subText: 'Specifies the maximum path length to encode in the generated certificate. -1 means no limit', + defaultValue: '-1', + }) + maxPathLength; + + /* Signing Options overrides */ + @attr({ + label: 'Use PSS', + subText: + 'If checked, PSS signatures will be used over PKCS#1v1.5 signatures when a RSA-type issuer is used. Ignored for ECDSA/Ed25519 issuers.', + }) + usePss; + + @attr({ + label: 'Subject Key Identifier (SKID)', + subText: + 'Value for the subject key identifier, specified as a string in hex format. If this is empty, Vault will automatically calculate the SKID. ', + }) + skid; + + @attr({ + possibleValues: ['0', '256', '384', '512'], + }) + signatureBits; + + /* Additional subject overrides */ + @attr('string', { + subText: + "Specifies the requested Subject's named Serial Number value. This has no impact on the Certificate's serial number randomly generated by Vault.", + }) + subjectSerialNumber; +} diff --git a/ui/app/models/pki/tidy.js b/ui/app/models/pki/tidy.js new file mode 100644 index 0000000..4083a10 --- /dev/null +++ b/ui/app/models/pki/tidy.js @@ -0,0 +1,171 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import { service } from '@ember/service'; +import { withExpandedAttributes } from 'vault/decorators/model-expanded-attributes'; + +@withExpandedAttributes() +export default class PkiTidyModel extends Model { + // the backend mount is the model id, only one pki/tidy model will ever persist (the auto-tidy config) + @service version; + + @attr({ + label: 'Tidy ACME enabled', + labelDisabled: 'Tidy ACME disabled', + mapToBoolean: 'tidyAcme', + helperTextDisabled: 'Tidying of ACME accounts, orders and authorizations is disabled', + helperTextEnabled: + 'The amount of time that must pass after creation that an account with no orders is marked revoked, and the amount of time after being marked revoked or deactivated.', + detailsLabel: 'ACME account safety buffer', + formatTtl: true, + }) + acmeAccountSafetyBuffer; + + @attr('boolean', { + label: 'Tidy ACME', + defaultValue: false, + }) + tidyAcme; + + @attr('boolean', { + label: 'Automatic tidy enabled', + defaultValue: false, + }) + enabled; // auto-tidy only + + @attr({ + label: 'Automatic tidy enabled', + labelDisabled: 'Automatic tidy disabled', + mapToBoolean: 'enabled', + helperTextEnabled: + 'Sets the interval_duration between automatic tidy operations; note that this is from the end of one operation to the start of the next.', + helperTextDisabled: 'Automatic tidy operations will not run.', + detailsLabel: 'Automatic tidy duration', + formatTtl: true, + }) + intervalDuration; // auto-tidy only + + @attr('string', { + editType: 'ttl', + helperTextEnabled: + 'Specifies a duration that issuers should be kept for, past their NotAfter validity period. Defaults to 365 days (8760 hours).', + hideToggle: true, + formatTtl: true, + }) + issuerSafetyBuffer; + + @attr('string', { + editType: 'ttl', + helperTextEnabled: + 'Specifies the duration to pause between tidying individual certificates. This releases the revocation lock and allows other operations to continue while tidy is running.', + hideToggle: true, + formatTtl: true, + }) + pauseDuration; + + @attr('string', { + editType: 'ttl', + helperTextEnabled: + 'Specifies a duration after which cross-cluster revocation requests will be removed as expired.', + hideToggle: true, + formatTtl: true, + }) + revocationQueueSafetyBuffer; // enterprise only + + @attr('string', { + editType: 'ttl', + helperTextEnabled: + 'For a certificate to be expunged, the time must be after the expiration time of the certificate (according to the local clock) plus the safety buffer. Defaults to 72 hours.', + hideToggle: true, + formatTtl: true, + }) + safetyBuffer; + + @attr('boolean', { label: 'Tidy the certificate store' }) + tidyCertStore; + + @attr('boolean', { + label: 'Tidy cross-cluster revoked certificates', + subText: 'Remove expired, cross-cluster revocation entries.', + }) + tidyCrossClusterRevokedCerts; // enterprise only + + @attr('boolean', { + subText: 'Automatically remove expired issuers after the issuer safety buffer duration has elapsed.', + }) + tidyExpiredIssuers; + + @attr('boolean', { + label: 'Tidy legacy CA bundle', + subText: + 'Backup any legacy CA/issuers bundle (from Vault versions earlier than 1.11) to config/ca_bundle.bak. Migration will only occur after issuer safety buffer has passed.', + }) + tidyMoveLegacyCaBundle; + + @attr('boolean', { + label: 'Tidy cross-cluster revocation requests', + }) + tidyRevocationQueue; // enterprise only + + @attr('boolean', { + label: 'Tidy revoked certificate issuer associations', + }) + tidyRevokedCertIssuerAssociations; + + @attr('boolean', { + label: 'Tidy revoked certificates', + subText: 'Remove all invalid and expired certificates from storage.', + }) + tidyRevokedCerts; + + get useOpenAPI() { + return true; + } + + getHelpUrl(backend) { + return `/v1/${backend}/config/auto-tidy?help=1`; + } + + get allGroups() { + const groups = [{ autoTidy: ['enabled', 'intervalDuration'] }, ...this.sharedFields]; + return this._expandGroups(groups); + } + + // shared between auto and manual tidy operations + get sharedFields() { + const groups = [ + { + 'Universal operations': [ + 'tidyCertStore', + 'tidyRevokedCerts', + 'tidyRevokedCertIssuerAssociations', + 'safetyBuffer', + 'pauseDuration', + ], + }, + { + 'ACME operations': ['tidyAcme', 'acmeAccountSafetyBuffer'], + }, + { + 'Issuer operations': ['tidyExpiredIssuers', 'tidyMoveLegacyCaBundle', 'issuerSafetyBuffer'], + }, + ]; + if (this.version.isEnterprise) { + groups.push({ + 'Cross-cluster operations': [ + 'tidyRevocationQueue', + 'tidyCrossClusterRevokedCerts', + 'revocationQueueSafetyBuffer', + ], + }); + } + return groups; + } + + get formFieldGroups() { + return this._expandGroups(this.sharedFields); + } +} diff --git a/ui/app/models/policy.js b/ui/app/models/policy.js new file mode 100644 index 0000000..c180ba6 --- /dev/null +++ b/ui/app/models/policy.js @@ -0,0 +1,35 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import { alias } from '@ember/object/computed'; +import { computed } from '@ember/object'; +import lazyCapabilities, { apiPath } from 'vault/macros/lazy-capabilities'; + +export default Model.extend({ + name: attr('string'), + policy: attr('string'), + policyType: computed('constructor.modelName', function () { + return this.constructor.modelName.split('/')[1]; + }), + updatePath: lazyCapabilities(apiPath`sys/policies/${'policyType'}/${'id'}`, 'id', 'policyType'), + canDelete: alias('updatePath.canDelete'), + canEdit: alias('updatePath.canUpdate'), + canRead: alias('updatePath.canRead'), + format: computed('policy', function () { + const policy = this.policy; + let isJSON; + try { + const parsed = JSON.parse(policy); + if (parsed) { + isJSON = true; + } + } catch (e) { + // can't parse JSON + isJSON = false; + } + return isJSON ? 'json' : 'hcl'; + }), +}); diff --git a/ui/app/models/policy/acl.js b/ui/app/models/policy/acl.js new file mode 100644 index 0000000..9e37dd8 --- /dev/null +++ b/ui/app/models/policy/acl.js @@ -0,0 +1,8 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import PolicyModel from '../policy'; + +export default PolicyModel.extend(); diff --git a/ui/app/models/policy/egp.js b/ui/app/models/policy/egp.js new file mode 100644 index 0000000..af6fc84 --- /dev/null +++ b/ui/app/models/policy/egp.js @@ -0,0 +1,19 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { attr } from '@ember-data/model'; +import { computed } from '@ember/object'; + +import PolicyModel from './rgp'; +import { expandAttributeMeta } from 'vault/utils/field-to-attrs'; + +export default PolicyModel.extend({ + paths: attr({ + editType: 'stringArray', + }), + additionalAttrs: computed(function () { + return expandAttributeMeta(this, ['enforcementLevel', 'paths']); + }), +}); diff --git a/ui/app/models/policy/rgp.js b/ui/app/models/policy/rgp.js new file mode 100644 index 0000000..33829a6 --- /dev/null +++ b/ui/app/models/policy/rgp.js @@ -0,0 +1,21 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { attr } from '@ember-data/model'; +import { computed } from '@ember/object'; + +import PolicyModel from '../policy'; +import { expandAttributeMeta } from 'vault/utils/field-to-attrs'; + +export default PolicyModel.extend({ + enforcementLevel: attr('string', { + possibleValues: ['advisory', 'soft-mandatory', 'hard-mandatory'], + defaultValue: 'hard-mandatory', + }), + + additionalAttrs: computed(function () { + return expandAttributeMeta(this, ['enforcementLevel']); + }), +}); diff --git a/ui/app/models/raft-join.js b/ui/app/models/raft-join.js new file mode 100644 index 0000000..374a5aa --- /dev/null +++ b/ui/app/models/raft-join.js @@ -0,0 +1,48 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import { expandAttributeMeta } from 'vault/utils/field-to-attrs'; +import { computed } from '@ember/object'; + +//leader_api_addr (string: ) – Address of the leader node in the Raft cluster to which this node is trying to join. + +//retry (bool: false) - Retry joining the Raft cluster in case of failures. + +//leader_ca_cert (string: "") - CA certificate used to communicate with Raft's leader node. + +//leader_client_cert (string: "") - Client certificate used to communicate with Raft's leader node. + +//leader_client_key (string: "") - Client key used to communicate with Raft's leader node. + +export default Model.extend({ + leaderApiAddr: attr('string', { + label: 'Leader API Address', + }), + retry: attr('boolean', { + label: 'Keep retrying to join in case of failures', + }), + leaderCaCert: attr('string', { + label: 'Leader CA Certificate', + editType: 'file', + }), + leaderClientCert: attr('string', { + label: 'Leader Client Certificate', + editType: 'file', + }), + leaderClientKey: attr('string', { + label: 'Leader Client Key', + editType: 'file', + }), + fields: computed(function () { + return expandAttributeMeta(this, [ + 'leaderApiAddr', + 'leaderCaCert', + 'leaderClientCert', + 'leaderClientKey', + 'retry', + ]); + }), +}); diff --git a/ui/app/models/replication-attributes.js b/ui/app/models/replication-attributes.js new file mode 100644 index 0000000..749e6c4 --- /dev/null +++ b/ui/app/models/replication-attributes.js @@ -0,0 +1,89 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import { match, not } from '@ember/object/computed'; +import { computed } from '@ember/object'; + +export default Model.extend({ + clusterId: attr('string'), + clusterIdDisplay: computed('clusterId', 'mode', function () { + const clusterId = this.clusterId; + return clusterId ? clusterId.split('-')[0] : null; + }), + mode: attr('string'), + replicationDisabled: match('mode', /disabled|unsupported/), + replicationUnsupported: match('mode', /unsupported/), + replicationEnabled: not('replicationDisabled'), + + // primary attrs + isPrimary: match('mode', /primary/), + + knownSecondaries: attr('array'), + secondaries: attr('array'), + + // secondary attrs + isSecondary: match('mode', /secondary/), + connection_state: attr('string'), + modeForUrl: computed('isPrimary', 'isSecondary', 'mode', function () { + const mode = this.mode; + return mode === 'bootstrapping' + ? 'bootstrapping' + : (this.isSecondary && 'secondary') || (this.isPrimary && 'primary'); + }), + modeForHeader: computed('mode', function () { + const mode = this.mode; + if (!mode) { + // mode will be false or undefined if it calls the status endpoint while still setting up the cluster + return 'loading'; + } + return mode; + }), + secondaryId: attr('string'), + primaryClusterAddr: attr('string'), + knownPrimaryClusterAddrs: attr('array'), + primaries: attr('array'), + state: attr('string'), //stream-wal, merkle-diff, merkle-sync, idle + lastRemoteWAL: attr('number'), + + // attrs on primary and secondary + lastWAL: attr('number'), + merkleRoot: attr('string'), + merkleSyncProgress: attr('object'), + get syncProgress() { + const { state, merkleSyncProgress } = this; + if (state !== 'merkle-sync' || !merkleSyncProgress) { + return null; + } + const { sync_total_keys, sync_progress } = merkleSyncProgress; + return { + progress: sync_progress, + total: sync_total_keys, + }; + }, + + syncProgressPercent: computed('syncProgress', function () { + const syncProgress = this.syncProgress; + if (!syncProgress) { + return null; + } + const { progress, total } = syncProgress; + + return Math.floor(100 * (progress / total)); + }), + + modeDisplay: computed('mode', function () { + const displays = { + disabled: 'Disabled', + unknown: 'Unknown', + bootstrapping: 'Bootstrapping', + primary: 'Primary', + secondary: 'Secondary', + unsupported: 'Not supported', + }; + + return displays[this.mode] || 'Disabled'; + }), +}); diff --git a/ui/app/models/replication-mode.js b/ui/app/models/replication-mode.js new file mode 100644 index 0000000..d125207 --- /dev/null +++ b/ui/app/models/replication-mode.js @@ -0,0 +1,42 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; + +/* sample response + +{ + "request_id": "d81bba81-e8a1-0ee9-240e-a77d36e3e08f", + "lease_id": "", + "renewable": false, + "lease_duration": 0, + "data": { + "cluster_id": "ab7d4191-d1a3-b4d6-6297-5a41af6154ae", + "known_secondaries": [ + "test" + ], + "last_performance_wal": 72, + "last_reindex_epoch": "1588281113", + "last_wal": 73, + "merkle_root": "c8d258d376f01d98156f74e8d8f82ea2aca8dc4a", + "mode": "primary", + "primary_cluster_addr": "", + "reindex_building_progress": 26838, + "reindex_building_total": 305443, + "reindex_in_progress": true, + "reindex_stage": "building", + "state": "running" + }, + "wrap_info": null, + "warnings": null, + "auth": null +} + + +*/ + +export default Model.extend({ + status: attr('object'), +}); diff --git a/ui/app/models/role-aws.js b/ui/app/models/role-aws.js new file mode 100644 index 0000000..3d7cb84 --- /dev/null +++ b/ui/app/models/role-aws.js @@ -0,0 +1,76 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import { alias } from '@ember/object/computed'; +import { computed } from '@ember/object'; +import lazyCapabilities, { apiPath } from 'vault/macros/lazy-capabilities'; +import { expandAttributeMeta } from 'vault/utils/field-to-attrs'; + +const CREDENTIAL_TYPES = [ + { + value: 'iam_user', + displayName: 'IAM User', + }, + { + value: 'assumed_role', + displayName: 'Assumed Role', + }, + { + value: 'federation_token', + displayName: 'Federation Token', + }, +]; +export default Model.extend({ + backend: attr('string', { + readOnly: true, + }), + name: attr('string', { + label: 'Role name', + readOnly: true, + }), + // credentialTypes are for backwards compatibility. + // we use this to populate "credentialType" in + // the serializer. if there is more than one, the + // show and edit pages will show a warning + credentialTypes: attr('array', { + readOnly: true, + }), + credentialType: attr('string', { + defaultValue: 'iam_user', + possibleValues: CREDENTIAL_TYPES, + }), + roleArns: attr({ + editType: 'stringArray', + label: 'Role ARNs', + }), + policyArns: attr({ + editType: 'stringArray', + label: 'Policy ARNs', + }), + policyDocument: attr('string', { + editType: 'json', + helpText: + 'A policy is an object in AWS that, when associated with an identity or resource, defines their permissions.', + // Cannot have a default_value on policy_document because in some cases AWS expects this value to be empty. + }), + fields: computed('credentialType', function () { + const credentialType = this.credentialType; + const keysForType = { + iam_user: ['name', 'credentialType', 'policyArns', 'policyDocument'], + assumed_role: ['name', 'credentialType', 'roleArns', 'policyDocument'], + federation_token: ['name', 'credentialType', 'policyDocument'], + }; + + return expandAttributeMeta(this, keysForType[credentialType]); + }), + updatePath: lazyCapabilities(apiPath`${'backend'}/roles/${'id'}`, 'backend', 'id'), + canDelete: alias('updatePath.canDelete'), + canEdit: alias('updatePath.canUpdate'), + canRead: alias('updatePath.canRead'), + + generatePath: lazyCapabilities(apiPath`${'backend'}/creds/${'id'}`, 'backend', 'id'), + canGenerate: alias('generatePath.canUpdate'), +}); diff --git a/ui/app/models/role-jwt.js b/ui/app/models/role-jwt.js new file mode 100644 index 0000000..ddd5de0 --- /dev/null +++ b/ui/app/models/role-jwt.js @@ -0,0 +1,35 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import parseURL from 'core/utils/parse-url'; + +const DOMAIN_STRINGS = { + 'github.com': 'GitHub', + 'gitlab.com': 'GitLab', + 'google.com': 'Google', + 'ping.com': 'Ping', + 'okta.com': 'Okta', + 'auth0.com': 'Auth0', +}; + +const PROVIDER_WITH_LOGO = ['GitLab', 'Google', 'Auth0']; + +export { DOMAIN_STRINGS, PROVIDER_WITH_LOGO }; + +export default class RoleJwtModel extends Model { + @attr('string') authUrl; + + get providerName() { + const { hostname } = parseURL(this.authUrl); + const firstMatch = Object.keys(DOMAIN_STRINGS).find((name) => hostname.includes(name)); + return DOMAIN_STRINGS[firstMatch] || null; + } + + get providerButtonComponent() { + const { providerName } = this; + return PROVIDER_WITH_LOGO.includes(providerName) ? `auth-button-${providerName.toLowerCase()}` : null; + } +} diff --git a/ui/app/models/role-ssh.js b/ui/app/models/role-ssh.js new file mode 100644 index 0000000..2371d46 --- /dev/null +++ b/ui/app/models/role-ssh.js @@ -0,0 +1,168 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import { alias } from '@ember/object/computed'; +import { computed } from '@ember/object'; +import fieldToAttrs, { expandAttributeMeta } from 'vault/utils/field-to-attrs'; +import lazyCapabilities, { apiPath } from 'vault/macros/lazy-capabilities'; + +// these arrays define the order in which the fields will be displayed +// see +// https://github.com/hashicorp/vault/blob/main/builtin/logical/ssh/path_roles.go#L542 for list of fields for each key type +const OTP_FIELDS = [ + 'name', + 'keyType', + 'defaultUser', + 'adminUser', + 'port', + 'allowedUsers', + 'cidrList', + 'excludeCidrList', +]; +const CA_FIELDS = [ + 'name', + 'keyType', + 'allowUserCertificates', + 'allowHostCertificates', + 'defaultUser', + 'allowedUsers', + 'allowedUsersTemplate', + 'allowedDomains', + 'allowedDomainsTemplate', + 'ttl', + 'maxTtl', + 'allowedCriticalOptions', + 'defaultCriticalOptions', + 'allowedExtensions', + 'defaultExtensions', + 'allowBareDomains', + 'allowSubdomains', + 'allowUserKeyIds', + 'keyIdFormat', + 'notBeforeDuration', + 'algorithmSigner', +]; + +export default Model.extend({ + useOpenAPI: true, + getHelpUrl: function (backend) { + return `/v1/${backend}/roles/example?help=1`; + }, + zeroAddress: attr('boolean', { + readOnly: true, + }), + backend: attr('string', { + readOnly: true, + }), + name: attr('string', { + label: 'Role Name', + fieldValue: 'name', + readOnly: true, + }), + keyType: attr('string', { + possibleValues: ['ca', 'otp'], //overriding the API which also lists 'dynamic' as a type though it is deprecated + }), + adminUser: attr('string', { + helpText: 'Username of the admin user at the remote host', + }), + defaultUser: attr('string', { + helpText: "Username to use when one isn't specified", + }), + allowedUsers: attr('string', { + helpText: + 'Create a list of users who are allowed to use this key (e.g. `admin, dev`, or use `*` to allow all.)', + }), + allowedUsersTemplate: attr('boolean', { + helpText: + 'Specifies that Allowed Users can be templated e.g. {{identity.entity.aliases.mount_accessor_xyz.name}}', + }), + allowedDomains: attr('string', { + helpText: + 'List of domains for which a client can request a certificate (e.g. `example.com`, or `*` to allow all)', + }), + allowedDomainsTemplate: attr('boolean', { + helpText: + 'Specifies that Allowed Domains can be set using identity template policies. Non-templated domains are also permitted.', + }), + cidrList: attr('string', { + helpText: 'List of CIDR blocks for which this role is applicable', + }), + excludeCidrList: attr('string', { + helpText: 'List of CIDR blocks that are not accepted by this role', + }), + port: attr('number', { + helpText: 'Port number for the SSH connection (default is `22`)', + }), + allowedCriticalOptions: attr('string', { + helpText: 'List of critical options that certificates have when signed', + }), + defaultCriticalOptions: attr('object', { + helpText: 'Map of critical options certificates should have if none are provided when signing', + }), + allowedExtensions: attr('string', { + helpText: 'List of extensions that certificates can have when signed', + }), + defaultExtensions: attr('object', { + helpText: 'Map of extensions certificates should have if none are provided when signing', + }), + allowUserCertificates: attr('boolean', { + helpText: 'Specifies if certificates are allowed to be signed for us as a user', + }), + allowHostCertificates: attr('boolean', { + helpText: 'Specifies if certificates are allowed to be signed for us as a host', + }), + allowBareDomains: attr('boolean', { + helpText: + 'Specifies if host certificates that are requested are allowed to use the base domains listed in Allowed Domains', + }), + allowSubdomains: attr('boolean', { + helpText: + 'Specifies if host certificates that are requested are allowed to be subdomains of those listed in Allowed Domains', + }), + allowUserKeyIds: attr('boolean', { + helpText: 'Specifies if users can override the key ID for a signed certificate with the "key_id" field', + }), + keyIdFormat: attr('string', { + helpText: 'When supplied, this value specifies a custom format for the key id of a signed certificate', + }), + algorithmSigner: attr('string', { + helpText: 'When supplied, this value specifies a signing algorithm for the key', + possibleValues: ['default', 'ssh-rsa', 'rsa-sha2-256', 'rsa-sha2-512'], + }), + + showFields: computed('keyType', function () { + const keyType = this.keyType; + const keys = keyType === 'ca' ? CA_FIELDS.slice(0) : OTP_FIELDS.slice(0); + return expandAttributeMeta(this, keys); + }), + + fieldGroups: computed('keyType', function () { + const numRequired = this.keyType === 'otp' ? 3 : 4; + const fields = this.keyType === 'otp' ? [...OTP_FIELDS] : [...CA_FIELDS]; + const defaultFields = fields.splice(0, numRequired); + const groups = [ + { default: defaultFields }, + { + Options: [...fields], + }, + ]; + return fieldToAttrs(this, groups); + }), + + updatePath: lazyCapabilities(apiPath`${'backend'}/roles/${'id'}`, 'backend', 'id'), + canDelete: alias('updatePath.canDelete'), + canEdit: alias('updatePath.canUpdate'), + canRead: alias('updatePath.canRead'), + + generatePath: lazyCapabilities(apiPath`${'backend'}/creds/${'id'}`, 'backend', 'id'), + canGenerate: alias('generatePath.canUpdate'), + + signPath: lazyCapabilities(apiPath`${'backend'}/sign/${'id'}`, 'backend', 'id'), + canSign: alias('signPath.canUpdate'), + + zeroAddressPath: lazyCapabilities(apiPath`${'backend'}/config/zeroaddress`, 'backend'), + canEditZeroAddress: alias('zeroAddressPath.canUpdate'), +}); diff --git a/ui/app/models/secret-engine.js b/ui/app/models/secret-engine.js new file mode 100644 index 0000000..afda997 --- /dev/null +++ b/ui/app/models/secret-engine.js @@ -0,0 +1,290 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr, belongsTo } from '@ember-data/model'; +import { computed } from '@ember/object'; // eslint-disable-line +import { equal } from '@ember/object/computed'; // eslint-disable-line +import { withModelValidations } from 'vault/decorators/model-validations'; +import { withExpandedAttributes } from 'vault/decorators/model-expanded-attributes'; +import { supportedSecretBackends } from 'vault/helpers/supported-secret-backends'; + +const LINKED_BACKENDS = supportedSecretBackends(); + +// identity will be managed separately and the inclusion +// of the system backend is an implementation detail +const LIST_EXCLUDED_BACKENDS = ['system', 'identity']; + +const validations = { + path: [ + { type: 'presence', message: "Path can't be blank." }, + { + type: 'containsWhiteSpace', + message: + "Path contains whitespace. If this is desired, you'll need to encode it with %20 in API requests.", + level: 'warn', + }, + ], + maxVersions: [ + { type: 'number', message: 'Maximum versions must be a number.' }, + { type: 'length', options: { min: 1, max: 16 }, message: 'You cannot go over 16 characters.' }, + ], +}; + +@withModelValidations(validations) +@withExpandedAttributes() +export default class SecretEngineModel extends Model { + @attr('string') path; + @attr('string') type; + @attr('string', { + editType: 'textarea', + }) + description; + @belongsTo('mount-config', { async: false, inverse: null }) config; + + // Enterprise options (still available on OSS) + @attr('boolean', { + helpText: + 'When Replication is enabled, a local mount will not be replicated across clusters. This can only be specified at mount time.', + }) + local; + @attr('boolean', { + helpText: + 'When enabled - if a seal supporting seal wrapping is specified in the configuration, all critical security parameters (CSPs) in this backend will be seal wrapped. (For K/V mounts, all values will be seal wrapped.) This can only be specified at mount time.', + }) + sealWrap; + @attr('boolean') externalEntropyAccess; + + // options.version + @attr('number', { + label: 'Version', + helpText: + 'The KV Secrets Engine can operate in different modes. Version 1 is the original generic Secrets Engine the allows for storing of static key/value pairs. Version 2 added more features including data versioning, TTLs, and check and set.', + possibleValues: [2, 1], + // This shouldn't be defaultValue because if no version comes back from API we should assume it's v1 + defaultFormValue: 2, // Set the form to 2 by default + }) + version; + + // SSH specific attributes + @attr('string') privateKey; + @attr('string') publicKey; + @attr('boolean', { + defaultValue: true, + }) + generateSigningKey; + + // AWS specific attributes + @attr('string') lease; + @attr('string') leaseMax; + + // Returned from API response + @attr('string') accessor; + + // KV 2 additional config default options + @attr('number', { + defaultValue: 0, + label: 'Maximum number of versions', + subText: + 'The number of versions to keep per key. Once the number of keys exceeds the maximum number set here, the oldest version will be permanently deleted. This value applies to all keys, but a key’s metadata settings can overwrite this value. When 0 is used or the value is unset, Vault will keep 10 versions.', + }) + maxVersions; + @attr('boolean', { + defaultValue: false, + label: 'Require Check and Set', + subText: + 'If checked, all keys will require the cas parameter to be set on all write requests. A key’s metadata settings can overwrite this value.', + }) + casRequired; + @attr({ + defaultValue: 0, + editType: 'ttl', + label: 'Automate secret deletion', + helperTextDisabled: 'A secret’s version must be manually deleted.', + helperTextEnabled: 'Delete all new versions of this secret after', + }) + deleteVersionAfter; + + /* GETTERS */ + get modelTypeForKV() { + const engineType = this.engineType; + if ((engineType === 'kv' || engineType === 'generic') && this.version === 2) { + return 'secret-v2'; + } + return 'secret'; + } + get isV2KV() { + return this.modelTypeForKV === 'secret-v2'; + } + + get attrs() { + return this.formFields.map((fieldName) => { + return this.allByKey[fieldName]; + }); + } + + get fieldGroups() { + return this._expandGroups(this.formFieldGroups); + } + + get icon() { + if (!this.engineType || this.engineType === 'kmip') { + return 'secrets'; + } + if (this.engineType === 'keymgmt') { + return 'key'; + } + return this.engineType; + } + + get engineType() { + return (this.type || '').replace(/^ns_/, ''); + } + + get shouldIncludeInList() { + return !LIST_EXCLUDED_BACKENDS.includes(this.engineType); + } + + get isSupportedBackend() { + return LINKED_BACKENDS.includes(this.engineType); + } + + get backendLink() { + if (this.engineType === 'kmip') { + return 'vault.cluster.secrets.backend.kmip.scopes'; + } + if (this.engineType === 'database') { + return 'vault.cluster.secrets.backend.overview'; + } + return 'vault.cluster.secrets.backend.list-root'; + } + + get accessor() { + if (this.version === 2) { + return `v2 ${this.accessor}`; + } + return this.accessor; + } + + get localDisplay() { + return this.local ? 'local' : 'replicated'; + } + + get formFields() { + const type = this.engineType; + const fields = ['type', 'path', 'description', 'accessor', 'local', 'sealWrap']; + // no ttl options for keymgmt + if (type !== 'keymgmt') { + fields.push('config.defaultLeaseTtl', 'config.maxLeaseTtl'); + } + fields.push( + 'config.allowedManagedKeys', + 'config.auditNonHmacRequestKeys', + 'config.auditNonHmacResponseKeys', + 'config.passthroughRequestHeaders', + 'config.allowedResponseHeaders' + ); + if (type === 'kv' || type === 'generic') { + fields.push('version'); + } + // version comes in as number not string + if (type === 'kv' && parseInt(this.version, 10) === 2) { + fields.push('casRequired', 'deleteVersionAfter', 'maxVersions'); + } + return fields; + } + + get formFieldGroups() { + let defaultFields = ['path']; + let optionFields; + const CORE_OPTIONS = ['description', 'config.listingVisibility', 'local', 'sealWrap']; + const STANDARD_CONFIG = [ + 'config.auditNonHmacRequestKeys', + 'config.auditNonHmacResponseKeys', + 'config.passthroughRequestHeaders', + 'config.allowedResponseHeaders', + ]; + + switch (this.engineType) { + case 'kv': + defaultFields = ['path', 'maxVersions', 'casRequired', 'deleteVersionAfter']; + optionFields = [ + 'version', + ...CORE_OPTIONS, + 'config.defaultLeaseTtl', + 'config.maxLeaseTtl', + 'config.allowedManagedKeys', + ...STANDARD_CONFIG, + ]; + break; + case 'generic': + optionFields = [ + 'version', + ...CORE_OPTIONS, + 'config.defaultLeaseTtl', + 'config.maxLeaseTtl', + 'config.allowedManagedKeys', + ...STANDARD_CONFIG, + ]; + break; + case 'database': + // Highlight TTLs in default + defaultFields = ['path', 'config.defaultLeaseTtl', 'config.maxLeaseTtl']; + optionFields = [...CORE_OPTIONS, 'config.allowedManagedKeys', ...STANDARD_CONFIG]; + break; + case 'pki': + defaultFields = ['path', 'config.defaultLeaseTtl', 'config.maxLeaseTtl', 'config.allowedManagedKeys']; + optionFields = [...CORE_OPTIONS, ...STANDARD_CONFIG]; + break; + case 'keymgmt': + // no ttl options for keymgmt + optionFields = [...CORE_OPTIONS, 'config.allowedManagedKeys', ...STANDARD_CONFIG]; + break; + default: + defaultFields = ['path']; + optionFields = [ + ...CORE_OPTIONS, + 'config.defaultLeaseTtl', + 'config.maxLeaseTtl', + 'config.allowedManagedKeys', + ...STANDARD_CONFIG, + ]; + break; + } + + return [ + { default: defaultFields }, + { + 'Method Options': optionFields, + }, + ]; + } + + /* ACTIONS */ + saveCA(options) { + if (this.type !== 'ssh') { + return; + } + if (options.isDelete) { + this.privateKey = null; + this.publicKey = null; + this.generateSigningKey = false; + } + return this.save({ + adapterOptions: { + options: options, + apiPath: 'config/ca', + attrsToSend: ['privateKey', 'publicKey', 'generateSigningKey'], + }, + }); + } + + saveZeroAddressConfig() { + return this.save({ + adapterOptions: { + adapterMethod: 'saveZeroAddressConfig', + }, + }); + } +} diff --git a/ui/app/models/secret-v2-version.js b/ui/app/models/secret-v2-version.js new file mode 100644 index 0000000..2ce4aea --- /dev/null +++ b/ui/app/models/secret-v2-version.js @@ -0,0 +1,27 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { belongsTo, attr } from '@ember-data/model'; +import timestamp from 'core/utils/timestamp'; +import SecretModel from './secret'; + +export default class SecretV2VersionModel extends SecretModel { + @attr('boolean') failedServerRead; + @attr('number') version; + @attr('string') path; + @attr('string') deletionTime; + @attr('string') createdTime; + @attr('boolean') destroyed; + @attr('number') currentVersion; + @belongsTo('secret-v2') secret; + + pathAttr = 'path'; + + get deleted() { + const deletionTime = new Date(this.deletionTime); + const now = timestamp.now(); + return deletionTime <= now; + } +} diff --git a/ui/app/models/secret-v2.js b/ui/app/models/secret-v2.js new file mode 100644 index 0000000..7833012 --- /dev/null +++ b/ui/app/models/secret-v2.js @@ -0,0 +1,69 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { belongsTo, hasMany, attr } from '@ember-data/model'; +import { computed } from '@ember/object'; // eslint-disable-line +import { alias } from '@ember/object/computed'; // eslint-disable-line +import { expandAttributeMeta } from 'vault/utils/field-to-attrs'; +import KeyMixin from 'vault/mixins/key-mixin'; +import lazyCapabilities, { apiPath } from 'vault/macros/lazy-capabilities'; +import { withModelValidations } from 'vault/decorators/model-validations'; + +const validations = { + maxVersions: [ + { type: 'number', message: 'Maximum versions must be a number.' }, + { type: 'length', options: { min: 1, max: 16 }, message: 'You cannot go over 16 characters.' }, + ], +}; + +@withModelValidations(validations) +class SecretV2Model extends Model {} +export default SecretV2Model.extend(KeyMixin, { + failedServerRead: attr('boolean'), + engine: belongsTo('secret-engine', { async: false }), + engineId: attr('string'), + versions: hasMany('secret-v2-version', { async: false, inverse: null }), + selectedVersion: belongsTo('secret-v2-version', { async: false, inverse: 'secret' }), + createdTime: attr(), + updatedTime: attr(), + currentVersion: attr('number'), + oldestVersion: attr('number'), + customMetadata: attr('object', { + editType: 'kv', + subText: 'An optional set of informational key-value pairs that will be stored with all secret versions.', + }), + maxVersions: attr('number', { + defaultValue: 0, + label: 'Maximum number of versions', + subText: + 'The number of versions to keep per key. Once the number of keys exceeds the maximum number set here, the oldest version will be permanently deleted.', + }), + casRequired: attr('boolean', { + defaultValue: false, + label: 'Require Check and Set', + subText: + 'Writes will only be allowed if the key’s current version matches the version specified in the cas parameter.', + }), + deleteVersionAfter: attr({ + defaultValue: 0, + editType: 'ttl', + label: 'Automate secret deletion', + helperTextDisabled: 'A secret’s version must be manually deleted.', + helperTextEnabled: 'Delete all new versions of this secret after', + }), + fields: computed(function () { + return expandAttributeMeta(this, ['customMetadata', 'maxVersions', 'casRequired', 'deleteVersionAfter']); + }), + secretDataPath: lazyCapabilities(apiPath`${'engineId'}/data/${'id'}`, 'engineId', 'id'), + secretMetadataPath: lazyCapabilities(apiPath`${'engineId'}/metadata/${'id'}`, 'engineId', 'id'), + + canListMetadata: alias('secretMetadataPath.canList'), + canReadMetadata: alias('secretMetadataPath.canRead'), + canUpdateMetadata: alias('secretMetadataPath.canUpdate'), + + canReadSecretData: alias('secretDataPath.canRead'), + canEditSecretData: alias('secretDataPath.canUpdate'), + canDeleteSecretData: alias('secretDataPath.canDelete'), +}); diff --git a/ui/app/models/secret.js b/ui/app/models/secret.js new file mode 100644 index 0000000..83c0678 --- /dev/null +++ b/ui/app/models/secret.js @@ -0,0 +1,43 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import { computed } from '@ember/object'; +import { alias } from '@ember/object/computed'; +import KeyMixin from 'vault/mixins/key-mixin'; +import lazyCapabilities, { apiPath } from 'vault/macros/lazy-capabilities'; + +export default Model.extend(KeyMixin, { + failedServerRead: attr('boolean'), + auth: attr('string'), + lease_duration: attr('number'), + lease_id: attr('string'), + renewable: attr('boolean'), + + secretData: attr('object'), + secretKeyAndValue: computed('secretData', function () { + const data = this.secretData; + return Object.keys(data).map((key) => { + return { key, value: data[key] }; + }); + }), + + dataAsJSONString: computed('secretData', function () { + return JSON.stringify(this.secretData, null, 2); + }), + + isAdvancedFormat: computed('secretData', function () { + const data = this.secretData; + return data && Object.keys(data).some((key) => typeof data[key] !== 'string'); + }), + + helpText: attr('string'), + // TODO this needs to be a relationship like `engine` on kv-v2 + backend: attr('string'), + secretPath: lazyCapabilities(apiPath`${'backend'}/${'id'}`, 'backend', 'id'), + canEdit: alias('secretPath.canUpdate'), + canDelete: alias('secretPath.canDelete'), + canRead: alias('secretPath.canRead'), +}); diff --git a/ui/app/models/server.js b/ui/app/models/server.js new file mode 100644 index 0000000..ba612f1 --- /dev/null +++ b/ui/app/models/server.js @@ -0,0 +1,15 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; + +//{"node_id":"1249bfbc-b234-96f3-0c66-07078ac3e16e","address":"127.0.0.1:8201","leader":true,"protocol_version":"3","voter":true} +export default Model.extend({ + address: attr('string'), + nodeId: attr('string'), + protocolVersion: attr('string'), + voter: attr('boolean'), + leader: attr('boolean'), +}); diff --git a/ui/app/models/ssh-otp-credential.js b/ui/app/models/ssh-otp-credential.js new file mode 100644 index 0000000..b7044aa --- /dev/null +++ b/ui/app/models/ssh-otp-credential.js @@ -0,0 +1,29 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { reads } from '@ember/object/computed'; +import Model, { attr } from '@ember-data/model'; +import { computed } from '@ember/object'; +import { expandAttributeMeta } from 'vault/utils/field-to-attrs'; +const CREATE_FIELDS = ['username', 'ip']; + +const DISPLAY_FIELDS = ['username', 'ip', 'key', 'keyType', 'port']; +export default Model.extend({ + role: attr('object', { + readOnly: true, + }), + ip: attr('string', { + label: 'IP Address', + }), + username: attr('string'), + key: attr('string'), + keyType: attr('string'), + port: attr('number'), + attrs: computed('key', function () { + const keys = this.key ? DISPLAY_FIELDS.slice(0) : CREATE_FIELDS.slice(0); + return expandAttributeMeta(this, keys); + }), + toCreds: reads('key'), +}); diff --git a/ui/app/models/ssh-sign.js b/ui/app/models/ssh-sign.js new file mode 100644 index 0000000..1937479 --- /dev/null +++ b/ui/app/models/ssh-sign.js @@ -0,0 +1,57 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import { computed } from '@ember/object'; +import { expandAttributeMeta } from 'vault/utils/field-to-attrs'; +const CREATE_FIELDS = [ + 'publicKey', + 'keyId', + 'validPrincipals', + 'certType', + 'criticalOptions', + 'extensions', + 'ttl', +]; + +const DISPLAY_FIELDS = ['signedKey', 'leaseId', 'renewable', 'leaseDuration', 'serialNumber']; + +export default Model.extend({ + role: attr('object', { + readOnly: true, + }), + publicKey: attr('string', { + label: 'Public Key', + editType: 'textarea', + }), + ttl: attr({ + label: 'TTL', + editType: 'ttl', + }), + validPrincipals: attr('string'), + certType: attr('string', { + defaultValue: 'user', + label: 'Certificate Type', + possibleValues: ['user', 'host'], + }), + keyId: attr('string', { + label: 'Key ID', + }), + criticalOptions: attr('object'), + extensions: attr('object'), + + leaseId: attr('string', { + label: 'Lease ID', + }), + renewable: attr('boolean'), + leaseDuration: attr('number'), + serialNumber: attr('string'), + signedKey: attr('string'), + + attrs: computed('signedKey', function () { + const keys = this.signedKey ? DISPLAY_FIELDS.slice(0) : CREATE_FIELDS.slice(0); + return expandAttributeMeta(this, keys); + }), +}); diff --git a/ui/app/models/test-form-model.js b/ui/app/models/test-form-model.js new file mode 100644 index 0000000..8d47285 --- /dev/null +++ b/ui/app/models/test-form-model.js @@ -0,0 +1,14 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +// this model is just used for integration tests +// + +import AuthMethodModel from './auth-method'; +import { belongsTo } from '@ember-data/model'; + +export default AuthMethodModel.extend({ + otherConfig: belongsTo('mount-config', { async: false, inverse: null }), +}); diff --git a/ui/app/models/transform.js b/ui/app/models/transform.js new file mode 100644 index 0000000..043f043 --- /dev/null +++ b/ui/app/models/transform.js @@ -0,0 +1,104 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import { computed } from '@ember/object'; +import { apiPath } from 'vault/macros/lazy-capabilities'; +import { expandAttributeMeta } from 'vault/utils/field-to-attrs'; +import attachCapabilities from 'vault/lib/attach-capabilities'; + +// these arrays define the order in which the fields will be displayed +// see +//https://www.vaultproject.io/api-docs/secret/transform#create-update-transformation +const TYPES = [ + { + value: 'fpe', + displayName: 'Format Preserving Encryption (FPE)', + }, + { + value: 'masking', + displayName: 'Masking', + }, +]; + +const TWEAK_SOURCE = [ + { + value: 'supplied', + displayName: 'supplied', + }, + { + value: 'generated', + displayName: 'generated', + }, + { + value: 'internal', + displayName: 'internal', + }, +]; + +const ModelExport = Model.extend({ + name: attr('string', { + // CBS TODO: make this required for making a transformation + label: 'Name', + readOnly: true, + subText: 'The name for your transformation. This cannot be edited later.', + }), + type: attr('string', { + defaultValue: 'fpe', + label: 'Type', + possibleValues: TYPES, + subText: + 'Vault provides two types of transformations: Format Preserving Encryption (FPE) is reversible, while Masking is not. This cannot be edited later.', + }), + tweak_source: attr('string', { + defaultValue: 'supplied', + label: 'Tweak source', + possibleValues: TWEAK_SOURCE, + subText: `A tweak value is used when performing FPE transformations. This can be supplied, generated, or internal.`, // CBS TODO: I do not include the link here. Need to figure out the best way to approach this. + }), + masking_character: attr('string', { + characterLimit: 1, + defaultValue: '*', + label: 'Masking character', + subText: 'Specify which character you’d like to mask your data.', + }), + template: attr('array', { + editType: 'searchSelect', + isSectionHeader: true, + fallbackComponent: 'string-list', + label: 'Template', // CBS TODO: make this required for making a transformation + models: ['transform/template'], + selectLimit: 1, + onlyAllowExisting: true, + subText: + 'Templates allow Vault to determine what and how to capture the value to be transformed. Type to use an existing template or create a new one.', + }), + allowed_roles: attr('array', { + editType: 'searchSelect', + isSectionHeader: true, + label: 'Allowed roles', + fallbackComponent: 'string-list', + models: ['transform/role'], + subText: 'Search for an existing role, type a new role to create it, or use a wildcard (*).', + wildcardLabel: 'role', + }), + transformAttrs: computed('type', function () { + if (this.type === 'masking') { + return ['name', 'type', 'masking_character', 'template', 'allowed_roles']; + } + return ['name', 'type', 'tweak_source', 'template', 'allowed_roles']; + }), + transformFieldAttrs: computed('transformAttrs', function () { + return expandAttributeMeta(this, this.transformAttrs); + }), + + backend: attr('string', { + readOnly: true, + }), +}); + +export default attachCapabilities(ModelExport, { + updatePath: apiPath`${'backend'}/transformation/${'id'}`, +}); diff --git a/ui/app/models/transform/alphabet.js b/ui/app/models/transform/alphabet.js new file mode 100644 index 0000000..8f7f661 --- /dev/null +++ b/ui/app/models/transform/alphabet.js @@ -0,0 +1,39 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import { computed } from '@ember/object'; +import { apiPath } from 'vault/macros/lazy-capabilities'; +import attachCapabilities from 'vault/lib/attach-capabilities'; +import { expandAttributeMeta } from 'vault/utils/field-to-attrs'; + +const M = Model.extend({ + idPrefix: 'alphabet/', + idForNav: computed('id', 'idPrefix', function () { + const modelId = this.id || ''; + return `${this.idPrefix}${modelId}`; + }), + + name: attr('string', { + readOnly: true, + subText: 'The alphabet name. Keep in mind that spaces are not allowed and this cannot be edited later.', + }), + alphabet: attr('string', { + label: 'Alphabet', + subText: + 'Provide the set of valid UTF-8 characters contained within both the input and transformed value. Read more.', + }), + + attrs: computed(function () { + const keys = ['name', 'alphabet']; + return expandAttributeMeta(this, keys); + }), + + backend: attr('string', { readOnly: true }), +}); + +export default attachCapabilities(M, { + updatePath: apiPath`${'backend'}/alphabet/${'id'}`, +}); diff --git a/ui/app/models/transform/role.js b/ui/app/models/transform/role.js new file mode 100644 index 0000000..daee453 --- /dev/null +++ b/ui/app/models/transform/role.js @@ -0,0 +1,47 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import { computed } from '@ember/object'; +import { apiPath } from 'vault/macros/lazy-capabilities'; +import { expandAttributeMeta } from 'vault/utils/field-to-attrs'; +import attachCapabilities from 'vault/lib/attach-capabilities'; + +const ModelExport = Model.extend({ + // used for getting appropriate options for backend + idPrefix: 'role/', + // the id prefixed with `role/` so we can use it as the *secret param for the secret show route + idForNav: computed('id', 'idPrefix', function () { + const modelId = this.id || ''; + return `${this.idPrefix}${modelId}`; + }), + + name: attr('string', { + // TODO: make this required for making a transformation + label: 'Name', + readOnly: true, + subText: 'The name for your role. This cannot be edited later.', + }), + transformations: attr('array', { + editType: 'searchSelect', + isSectionHeader: true, + fallbackComponent: 'string-list', + label: 'Transformations', + models: ['transform'], + onlyAllowExisting: true, + subText: 'Select which transformations this role will have access to. It must already exist.', + }), + + attrs: computed('transformations', function () { + const keys = ['name', 'transformations']; + return expandAttributeMeta(this, keys); + }), + + backend: attr('string', { readOnly: true }), +}); + +export default attachCapabilities(ModelExport, { + updatePath: apiPath`${'backend'}/role/${'id'}`, +}); diff --git a/ui/app/models/transform/template.js b/ui/app/models/transform/template.js new file mode 100644 index 0000000..0716d67 --- /dev/null +++ b/ui/app/models/transform/template.js @@ -0,0 +1,54 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import { computed } from '@ember/object'; +import { apiPath } from 'vault/macros/lazy-capabilities'; +import attachCapabilities from 'vault/lib/attach-capabilities'; +import { expandAttributeMeta } from 'vault/utils/field-to-attrs'; + +const M = Model.extend({ + idPrefix: 'template/', + idForNav: computed('id', 'idPrefix', function () { + const modelId = this.id || ''; + return `${this.idPrefix}${modelId}`; + }), + + name: attr('string', { + readOnly: true, + subText: + 'Templates allow Vault to determine what and how to capture the value to be transformed. This cannot be edited later.', + }), + type: attr('string', { defaultValue: 'regex' }), + pattern: attr('string', { + editType: 'regex', + subText: 'The template’s pattern defines the data format. Expressed in regex.', + }), + alphabet: attr('array', { + subText: + 'Alphabet defines a set of characters (UTF-8) that is used for FPE to determine the validity of plaintext and ciphertext values. You can choose a built-in one, or create your own.', + editType: 'searchSelect', + isSectionHeader: true, + fallbackComponent: 'string-list', + label: 'Alphabet', + models: ['transform/alphabet'], + selectLimit: 1, + }), + encodeFormat: attr('string'), + decodeFormats: attr(), + backend: attr('string', { readOnly: true }), + + readAttrs: computed(function () { + const keys = ['name', 'pattern', 'encodeFormat', 'decodeFormats', 'alphabet']; + return expandAttributeMeta(this, keys); + }), + writeAttrs: computed(function () { + return expandAttributeMeta(this, ['name', 'pattern', 'alphabet']); + }), +}); + +export default attachCapabilities(M, { + updatePath: apiPath`${'backend'}/template/${'id'}`, +}); diff --git a/ui/app/models/transit-key.js b/ui/app/models/transit-key.js new file mode 100644 index 0000000..01e92b3 --- /dev/null +++ b/ui/app/models/transit-key.js @@ -0,0 +1,178 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Model, { attr } from '@ember-data/model'; +import { alias } from '@ember/object/computed'; +import { set, get, computed } from '@ember/object'; +import clamp from 'vault/utils/clamp'; +import lazyCapabilities, { apiPath } from 'vault/macros/lazy-capabilities'; + +const ACTION_VALUES = { + encrypt: { + isSupported: 'supportsEncryption', + description: 'Looks up wrapping properties for the given token', + glyph: 'lock-fill', + }, + decrypt: { + isSupported: 'supportsDecryption', + description: 'Decrypts the provided ciphertext using this key', + glyph: 'mail-open', + }, + datakey: { + isSupported: 'supportsEncryption', + description: 'Generates a new key and value encrypted with this key', + glyph: 'key', + }, + rewrap: { + isSupported: 'supportsEncryption', + description: 'Rewraps the ciphertext using the latest version of the named key', + glyph: 'reload', + }, + sign: { + isSupported: 'supportsSigning', + description: 'Get the cryptographic signature of the given data', + glyph: 'pencil-tool', + }, + hmac: { + isSupported: true, + description: 'Generate a data digest using a hash algorithm', + glyph: 'shuffle', + }, + verify: { + isSupported: true, + description: 'Validate the provided signature for the given data', + glyph: 'check-circle', + }, + export: { + isSupported: 'exportable', + description: 'Get the named key', + glyph: 'external-link', + }, +}; + +export default Model.extend({ + type: attr('string', { + defaultValue: 'aes256-gcm96', + }), + name: attr('string', { + label: 'Name', + readOnly: true, + }), + autoRotatePeriod: attr({ + defaultValue: '0', + defaultShown: 'Key is not automatically rotated', + editType: 'ttl', + label: 'Auto-rotation period', + }), + deletionAllowed: attr('boolean'), + derived: attr('boolean'), + exportable: attr('boolean'), + minDecryptionVersion: attr('number', { + defaultValue: 1, + }), + minEncryptionVersion: attr('number', { + defaultValue: 0, + }), + latestVersion: attr('number'), + keys: attr('object'), + convergentEncryption: attr('boolean'), + convergentEncryptionVersion: attr('number'), + + supportsSigning: attr('boolean'), + supportsEncryption: attr('boolean'), + supportsDecryption: attr('boolean'), + supportsDerivation: attr('boolean'), + + setConvergentEncryption(val) { + if (val === true) { + set(this, 'derived', val); + } + set(this, 'convergentEncryption', val); + }, + + setDerived(val) { + if (val === false) { + set(this, 'convergentEncryption', val); + } + set(this, 'derived', val); + }, + + supportedActions: computed('type', function () { + return Object.keys(ACTION_VALUES) + .filter((name) => { + const { isSupported } = ACTION_VALUES[name]; + return typeof isSupported === 'boolean' || get(this, isSupported); + }) + .map((name) => { + const { description, glyph } = ACTION_VALUES[name]; + return { name, description, glyph }; + }); + }), + + canDelete: computed('deletionAllowed', 'lastLoadTS', function () { + const deleteAttrChanged = Boolean(this.changedAttributes().deletionAllowed); + return this.deletionAllowed && deleteAttrChanged === false; + }), + + keyVersions: computed('validKeyVersions', function () { + let maxVersion = Math.max(...this.validKeyVersions); + const versions = []; + while (maxVersion > 0) { + versions.unshift(maxVersion); + maxVersion--; + } + return versions; + }), + + encryptionKeyVersions: computed( + 'keyVerisons', + 'keyVersions', + 'latestVersion', + 'minDecryptionVersion', + function () { + const { keyVersions, minDecryptionVersion } = this; + + return keyVersions + .filter((version) => { + return version >= minDecryptionVersion; + }) + .reverse(); + } + ), + + keysForEncryption: computed('minEncryptionVersion', 'latestVersion', function () { + let { minEncryptionVersion, latestVersion } = this; + const minVersion = clamp(minEncryptionVersion - 1, 0, latestVersion); + const versions = []; + while (latestVersion > minVersion) { + versions.push(latestVersion); + latestVersion--; + } + return versions; + }), + + validKeyVersions: computed('keys', function () { + return Object.keys(this.keys); + }), + + exportKeyTypes: computed('exportable', 'supportsEncryption', 'supportsSigning', 'type', function () { + const types = ['hmac']; + if (this.supportsSigning) { + types.unshift('signing'); + } + if (this.supportsEncryption) { + types.unshift('encryption'); + } + return types; + }), + + backend: attr('string'), + + rotatePath: lazyCapabilities(apiPath`${'backend'}/keys/${'id'}/rotate`, 'backend', 'id'), + canRotate: alias('rotatePath.canUpdate'), + secretPath: lazyCapabilities(apiPath`${'backend'}/keys/${'id'}`, 'backend', 'id'), + canRead: alias('secretPath.canUpdate'), + canEdit: alias('secretPath.canUpdate'), +}); diff --git a/ui/app/router.js b/ui/app/router.js new file mode 100644 index 0000000..1ac7175 --- /dev/null +++ b/ui/app/router.js @@ -0,0 +1,216 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import EmberRouter from '@ember/routing/router'; +import config from 'vault/config/environment'; + +export default class Router extends EmberRouter { + location = config.locationType; + rootURL = config.rootURL; +} + +Router.map(function () { + this.route('vault', { path: '/' }, function () { + this.route('cluster', { path: '/:cluster_name' }, function () { + this.route('oidc-provider-ns', { path: '/*namespace/identity/oidc/provider/:provider_name/authorize' }); + this.route('oidc-provider', { path: '/identity/oidc/provider/:provider_name/authorize' }); + this.route('oidc-callback', { path: '/auth/*auth_path/oidc/callback' }); + this.route('auth'); + this.route('redirect'); + this.route('init'); + this.route('logout'); + this.mount('open-api-explorer', { path: '/api-explorer' }); + this.route('license'); + this.route('mfa-setup'); + this.route('clients', function () { + this.route('dashboard'); + this.route('config'); + this.route('edit'); + }); + this.route('storage', { path: '/storage/raft' }); + this.route('storage-restore', { path: '/storage/raft/restore' }); + this.route('settings', function () { + this.route('index', { path: '/' }); + this.route('seal'); + this.route('auth', function () { + this.route('index', { path: '/' }); + this.route('enable'); + this.route('configure', { path: '/configure/:method' }, function () { + this.route('index', { path: '/' }); + this.route('section', { path: '/:section_name' }); + }); + }); + this.route('mount-secret-backend'); + this.route('configure-secret-backend', { path: '/secrets/configure/:backend' }, function () { + this.route('index', { path: '/' }); + this.route('section', { path: '/:section_name' }); + }); + }); + this.route('unseal'); + this.route('tools', function () { + this.route('tool', { path: '/:selected_action' }); + }); + this.route('access', function () { + this.route('methods', { path: '/' }); + this.route('method', { path: '/:path' }, function () { + this.route('index', { path: '/' }); + this.route('item', { path: '/item/:item_type' }, function () { + this.route('list', { path: '/' }); + this.route('create'); + this.route('edit', { path: '/edit/:item_id' }); + this.route('show', { path: '/show/:item_id' }); + }); + this.route('section', { path: '/:section_name' }); + }); + this.route('mfa', function () { + this.route('index', { path: '/' }); + this.route('methods', function () { + this.route('index', { path: '/' }); + this.route('create'); + this.route('method', { path: '/:id' }, function () { + this.route('edit'); + this.route('enforcements'); + }); + }); + this.route('enforcements', function () { + this.route('index', { path: '/' }); + this.route('create'); + this.route('enforcement', { path: '/:name' }, function () { + this.route('edit'); + }); + }); + }); + this.route('leases', function () { + // lookup + this.route('index', { path: '/' }); + // lookup prefix + // revoke prefix + revoke force + this.route('list-root', { path: '/list/' }); + this.route('list', { path: '/list/*prefix' }); + //renew + revoke + this.route('show', { path: '/show/*lease_id' }); + }); + // the outer identity route handles group and entity items + this.route('identity', { path: '/identity/:item_type' }, function () { + this.route('index', { path: '/' }); + this.route('create'); + this.route('merge'); + this.route('edit', { path: '/edit/:item_id' }); + this.route('show', { path: '/:item_id/:section' }); + this.route('aliases', function () { + this.route('index', { path: '/' }); + this.route('add', { path: '/add/:item_id' }); + this.route('edit', { path: '/edit/:item_alias_id' }); + this.route('show', { path: '/:item_alias_id/:section' }); + }); + }); + this.route('control-groups'); + this.route('control-groups-configure', { path: '/control-groups/configure' }); + this.route('control-group-accessor', { path: '/control-groups/:accessor' }); + this.route('namespaces', function () { + this.route('index', { path: '/' }); + this.route('create'); + }); + this.route('oidc', function () { + this.route('clients', function () { + this.route('create'); + this.route('client', { path: '/:name' }, function () { + this.route('details'); + this.route('providers'); + this.route('edit'); + }); + }); + this.route('keys', function () { + this.route('create'); + this.route('key', { path: '/:name' }, function () { + this.route('details'); + this.route('clients'); + this.route('edit'); + }); + }); + this.route('assignments', function () { + this.route('create'); + this.route('assignment', { path: '/:name' }, function () { + this.route('details'); + this.route('edit'); + }); + }); + this.route('providers', function () { + this.route('create'); + this.route('provider', { path: '/:name' }, function () { + this.route('details'); + this.route('clients'); + this.route('edit'); + }); + }); + this.route('scopes', function () { + this.route('create'); + this.route('scope', { path: '/:name' }, function () { + this.route('details'); + this.route('edit'); + }); + }); + }); + }); + this.route('secrets', function () { + this.route('backends', { path: '/' }); + this.route('backend', { path: '/:backend' }, function () { + this.mount('kmip'); + this.mount('kubernetes'); + this.mount('pki'); + this.route('index', { path: '/' }); + this.route('configuration'); + // because globs / params can't be empty, + // we have to special-case ids of '' with their own routes + this.route('list-root', { path: '/list/' }); + this.route('create-root', { path: '/create/' }); + this.route('show-root', { path: '/show/' }); + this.route('edit-root', { path: '/edit/' }); + + this.route('list', { path: '/list/*secret' }); + this.route('show', { path: '/show/*secret' }); + this.route('diff', { path: '/diff/*id' }); + this.route('metadata', { path: '/metadata/*secret' }); + this.route('edit-metadata', { path: '/edit-metadata/*secret' }); + this.route('create', { path: '/create/*secret' }); + this.route('edit', { path: '/edit/*secret' }); + + this.route('credentials-root', { path: '/credentials/' }); + this.route('credentials', { path: '/credentials/*secret' }); + + // kv v2 versions + this.route('versions-root', { path: '/versions/' }); + this.route('versions', { path: '/versions/*secret' }); + + // ssh sign + this.route('sign-root', { path: '/sign/' }); + this.route('sign', { path: '/sign/*secret' }); + // transit-specific routes + this.route('actions-root', { path: '/actions/' }); + this.route('actions', { path: '/actions/*secret' }); + // database specific route + this.route('overview'); + }); + }); + this.route('policies', { path: '/policies/:type' }, function () { + this.route('index', { path: '/' }); + this.route('create'); + }); + this.route('policy', { path: '/policy/:type' }, function () { + this.route('show', { path: '/:policy_name' }); + this.route('edit', { path: '/:policy_name/edit' }); + }); + this.route('replication-dr-promote', function () { + this.route('details'); + }); + if (config.addRootMounts) { + config.addRootMounts.call(this); + } + + this.route('not-found', { path: '/*path' }); + }); + this.route('not-found', { path: '/*path' }); + }); +}); diff --git a/ui/app/routes/.gitkeep b/ui/app/routes/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/ui/app/routes/application.js b/ui/app/routes/application.js new file mode 100644 index 0000000..5ac7b0e --- /dev/null +++ b/ui/app/routes/application.js @@ -0,0 +1,78 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { inject as service } from '@ember/service'; +import Route from '@ember/routing/route'; +import ControlGroupError from 'vault/lib/control-group-error'; + +export default Route.extend({ + controlGroup: service(), + routing: service('router'), + namespaceService: service('namespace'), + featureFlagService: service('featureFlag'), + + actions: { + willTransition() { + window.scrollTo(0, 0); + }, + error(error, transition) { + const controlGroup = this.controlGroup; + if (error instanceof ControlGroupError) { + return controlGroup.handleError(error); + } + if (error.path === '/v1/sys/wrapping/unwrap') { + controlGroup.unmarkTokenForUnwrap(); + } + + const router = this.routing; + //FIXME transition.intent likely needs to be replaced + let errorURL = transition.intent.url; + const { name, contexts, queryParams } = transition.intent; + + // If the transition is internal to Ember, we need to generate the URL + // from the route parameters ourselves + if (!errorURL) { + try { + errorURL = router.urlFor(name, ...(contexts || []), { queryParams }); + } catch (e) { + // If this fails, something weird is happening with URL transitions + errorURL = null; + } + } + // because we're using rootURL, we need to trim this from the front to get + // the ember-routeable url + if (errorURL) { + errorURL = errorURL.replace('/ui', ''); + } + + error.errorURL = errorURL; + + // if we have queryParams, update the namespace so that the observer can fire on the controller + if (queryParams) { + /* eslint-disable-next-line ember/no-controller-access-in-routes */ + this.controllerFor('vault.cluster').set('namespaceQueryParam', queryParams.namespace || ''); + } + + // Assuming we have a URL, push it into browser history and update the + // location bar for the user + if (errorURL) { + router.get('location').setURL(errorURL); + } + + return true; + }, + }, + + async beforeModel() { + const result = await fetch('/v1/sys/internal/ui/feature-flags', { + method: 'GET', + }); + if (result.status === 200) { + const body = await result.json(); + const flags = body.feature_flags || []; + this.featureFlagService.setFeatureFlags(flags); + } + }, +}); diff --git a/ui/app/routes/loading.js b/ui/app/routes/loading.js new file mode 100644 index 0000000..3818725 --- /dev/null +++ b/ui/app/routes/loading.js @@ -0,0 +1,14 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; + +export default Route.extend({ + setupController(controller) { + this._super(...arguments); + const targetRoute = location.pathname || ''; + controller.set('isCallback', targetRoute.includes('oidc/callback')); + }, +}); diff --git a/ui/app/routes/vault.js b/ui/app/routes/vault.js new file mode 100644 index 0000000..625a74b --- /dev/null +++ b/ui/app/routes/vault.js @@ -0,0 +1,46 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { later } from '@ember/runloop'; +import { Promise } from 'rsvp'; +import { inject as service } from '@ember/service'; +import Route from '@ember/routing/route'; +import Ember from 'ember'; +/* eslint-disable ember/no-ember-testing-in-module-scope */ +const SPLASH_DELAY = Ember.testing ? 0 : 300; + +export default Route.extend({ + store: service(), + version: service(), + + beforeModel() { + return this.version.fetchVersion(); + }, + + model() { + // hardcode single cluster + const fixture = { + data: { + id: '1', + type: 'cluster', + attributes: { + name: 'vault', + }, + }, + }; + this.store.push(fixture); + return new Promise((resolve) => { + later(() => { + resolve(this.store.peekAll('cluster')); + }, SPLASH_DELAY); + }); + }, + + redirect(model, transition) { + if (model.get('length') === 1 && transition.targetName === 'vault.index') { + return this.transitionTo('vault.cluster', model.get('firstObject.name')); + } + }, +}); diff --git a/ui/app/routes/vault/cluster.js b/ui/app/routes/vault/cluster.js new file mode 100644 index 0000000..3e52da9 --- /dev/null +++ b/ui/app/routes/vault/cluster.js @@ -0,0 +1,143 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { inject as service } from '@ember/service'; +import { computed } from '@ember/object'; +import { reject } from 'rsvp'; +import Route from '@ember/routing/route'; +import { task, timeout } from 'ember-concurrency'; +import Ember from 'ember'; +import getStorage from '../../lib/token-storage'; +import localStorage from 'vault/lib/local-storage'; +import ClusterRoute from 'vault/mixins/cluster-route'; +import ModelBoundaryRoute from 'vault/mixins/model-boundary-route'; + +const POLL_INTERVAL_MS = 10000; + +export const getManagedNamespace = (nsParam, root) => { + if (!nsParam || nsParam.replaceAll('/', '') === root) return root; + // Check if param starts with root and / + if (nsParam.startsWith(`${root}/`)) { + return nsParam; + } + // Otherwise prepend the given param with the root + return `${root}/${nsParam}`; +}; + +export default Route.extend(ModelBoundaryRoute, ClusterRoute, { + namespaceService: service('namespace'), + version: service(), + permissions: service(), + store: service(), + auth: service(), + featureFlagService: service('featureFlag'), + currentCluster: service(), + modelTypes: computed(function () { + return ['node', 'secret', 'secret-engine']; + }), + + queryParams: { + namespaceQueryParam: { + refreshModel: true, + }, + }, + + getClusterId(params) { + const { cluster_name } = params; + const cluster = this.modelFor('vault').findBy('name', cluster_name); + return cluster ? cluster.get('id') : null; + }, + + async beforeModel() { + const params = this.paramsFor(this.routeName); + let namespace = params.namespaceQueryParam; + const currentTokenName = this.auth.get('currentTokenName'); + const managedRoot = this.featureFlagService.managedNamespaceRoot; + if (managedRoot && this.version.isOSS) { + // eslint-disable-next-line no-console + console.error('Cannot use Cloud Admin Namespace flag with OSS Vault'); + } + if (!namespace && currentTokenName && !Ember.testing) { + // if no namespace queryParam and user authenticated, + // use user's root namespace to redirect to properly param'd url + const storage = getStorage().getItem(currentTokenName); + namespace = storage?.userRootNamespace; + // only redirect if something other than nothing + if (namespace) { + this.transitionTo({ queryParams: { namespace } }); + } + } else if (managedRoot !== null) { + const managed = getManagedNamespace(namespace, managedRoot); + if (managed !== namespace) { + this.transitionTo({ queryParams: { namespace: managed } }); + } + } + this.namespaceService.setNamespace(namespace); + const id = this.getClusterId(params); + if (id) { + this.auth.setCluster(id); + if (this.auth.currentToken) { + await this.permissions.getPaths.perform(); + } + return this.version.fetchFeatures(); + } else { + return reject({ httpStatus: 404, message: 'not found', path: params.cluster_name }); + } + }, + + model(params) { + // if a user's browser settings block localStorage they will be unable to use Vault. The method will throw the error and the rest of the application will not load. + localStorage.isLocalStorageSupported(); + + const id = this.getClusterId(params); + return this.store.findRecord('cluster', id); + }, + + poll: task(function* () { + while (true) { + // when testing, the polling loop causes promises to never settle so acceptance tests hang + // to get around that, we just disable the poll in tests + if (Ember.testing) { + return; + } + yield timeout(POLL_INTERVAL_MS); + try { + /* eslint-disable-next-line ember/no-controller-access-in-routes */ + yield this.controller.model.reload(); + yield this.transitionToTargetRoute(); + } catch (e) { + // we want to keep polling here + } + } + }) + .cancelOn('deactivate') + .keepLatest(), + + afterModel(model, transition) { + this._super(...arguments); + this.currentCluster.setCluster(model); + + // Check that namespaces is enabled and if not, + // clear the namespace by transition to this route w/o it + if (this.namespaceService.path && !this.version.hasNamespaces) { + return this.transitionTo(this.routeName, { queryParams: { namespace: '' } }); + } + return this.transitionToTargetRoute(transition); + }, + + setupController() { + this._super(...arguments); + this.poll.perform(); + }, + + actions: { + error(e) { + if (e.httpStatus === 503 && e.errors[0] === 'Vault is sealed') { + this.refresh(); + } + return true; + }, + }, +}); diff --git a/ui/app/routes/vault/cluster/access.js b/ui/app/routes/vault/cluster/access.js new file mode 100644 index 0000000..5c1e44f --- /dev/null +++ b/ui/app/routes/vault/cluster/access.js @@ -0,0 +1,18 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { computed } from '@ember/object'; +import Route from '@ember/routing/route'; +import ClusterRoute from 'vault/mixins/cluster-route'; +import ModelBoundaryRoute from 'vault/mixins/model-boundary-route'; + +export default Route.extend(ModelBoundaryRoute, ClusterRoute, { + modelTypes: computed(function () { + return ['capabilities', 'control-group', 'identity/group', 'identity/group-alias', 'identity/alias']; + }), + model() { + return {}; + }, +}); diff --git a/ui/app/routes/vault/cluster/access/control-group-accessor.js b/ui/app/routes/vault/cluster/access/control-group-accessor.js new file mode 100644 index 0000000..a4c28ed --- /dev/null +++ b/ui/app/routes/vault/cluster/access/control-group-accessor.js @@ -0,0 +1,36 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { inject as service } from '@ember/service'; +import Route from '@ember/routing/route'; +import UnloadModel from 'vault/mixins/unload-model-route'; + +export default Route.extend(UnloadModel, { + store: service(), + version: service(), + + beforeModel() { + return this.version.fetchFeatures().then(() => { + return this._super(...arguments); + }); + }, + + model(params) { + return this.version.hasControlGroups ? this.store.findRecord('control-group', params.accessor) : null; + }, + + actions: { + willTransition() { + return true; + }, + // deactivate happens later than willTransition, + // so since we're using the model to render links + // we don't want the UI blinking + deactivate() { + this.unloadModel(); + return true; + }, + }, +}); diff --git a/ui/app/routes/vault/cluster/access/control-groups-configure.js b/ui/app/routes/vault/cluster/access/control-groups-configure.js new file mode 100644 index 0000000..177f223 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/control-groups-configure.js @@ -0,0 +1,40 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { inject as service } from '@ember/service'; +import Route from '@ember/routing/route'; +import UnloadModel from 'vault/mixins/unload-model-route'; + +export default Route.extend(UnloadModel, { + store: service(), + version: service(), + + beforeModel() { + return this.version.fetchFeatures().then(() => { + return this._super(...arguments); + }); + }, + + model() { + const type = 'control-group-config'; + return this.version.hasControlGroups + ? this.store.findRecord(type, 'config').catch((e) => { + // if you haven't saved a config, the API 404s, so create one here to edit and return it + if (e.httpStatus === 404) { + return this.store.createRecord(type, { + id: 'config', + }); + } + throw e; + }) + : null; + }, + + actions: { + reload() { + this.refresh(); + }, + }, +}); diff --git a/ui/app/routes/vault/cluster/access/control-groups.js b/ui/app/routes/vault/cluster/access/control-groups.js new file mode 100644 index 0000000..c2f4380 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/control-groups.js @@ -0,0 +1,23 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { inject as service } from '@ember/service'; +import Route from '@ember/routing/route'; +import UnloadModel from 'vault/mixins/unload-model-route'; + +export default Route.extend(UnloadModel, { + store: service(), + version: service(), + + beforeModel() { + return this.version.fetchFeatures().then(() => { + return this._super(...arguments); + }); + }, + + model() { + return this.version.hasControlGroups ? this.store.createRecord('control-group') : null; + }, +}); diff --git a/ui/app/routes/vault/cluster/access/identity.js b/ui/app/routes/vault/cluster/access/identity.js new file mode 100644 index 0000000..eabdc00 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/identity.js @@ -0,0 +1,25 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import AdapterError from '@ember-data/adapter/error'; +import { set } from '@ember/object'; +import Route from '@ember/routing/route'; + +const MODEL_FROM_PARAM = { + entities: 'entity', + groups: 'group', +}; + +export default Route.extend({ + model(params) { + const model = MODEL_FROM_PARAM[params.item_type]; + if (!model) { + const error = new AdapterError(); + set(error, 'httpStatus', 404); + throw error; + } + return model; + }, +}); diff --git a/ui/app/routes/vault/cluster/access/identity/aliases/add.js b/ui/app/routes/vault/cluster/access/identity/aliases/add.js new file mode 100644 index 0000000..1d9b006 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/identity/aliases/add.js @@ -0,0 +1,21 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import UnloadModelRoute from 'vault/mixins/unload-model-route'; +import UnsavedModelRoute from 'vault/mixins/unsaved-model-route'; +import { inject as service } from '@ember/service'; + +export default Route.extend(UnloadModelRoute, UnsavedModelRoute, { + store: service(), + + model(params) { + const itemType = this.modelFor('vault.cluster.access.identity'); + const modelType = `identity/${itemType}-alias`; + return this.store.createRecord(modelType, { + canonicalId: params.item_id, + }); + }, +}); diff --git a/ui/app/routes/vault/cluster/access/identity/aliases/edit.js b/ui/app/routes/vault/cluster/access/identity/aliases/edit.js new file mode 100644 index 0000000..308b9ed --- /dev/null +++ b/ui/app/routes/vault/cluster/access/identity/aliases/edit.js @@ -0,0 +1,19 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import UnloadModelRoute from 'vault/mixins/unload-model-route'; +import UnsavedModelRoute from 'vault/mixins/unsaved-model-route'; +import { inject as service } from '@ember/service'; + +export default Route.extend(UnloadModelRoute, UnsavedModelRoute, { + store: service(), + + model(params) { + const itemType = this.modelFor('vault.cluster.access.identity'); + const modelType = `identity/${itemType}-alias`; + return this.store.findRecord(modelType, params.item_alias_id); + }, +}); diff --git a/ui/app/routes/vault/cluster/access/identity/aliases/index.js b/ui/app/routes/vault/cluster/access/identity/aliases/index.js new file mode 100644 index 0000000..fc38ed7 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/identity/aliases/index.js @@ -0,0 +1,50 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import ListRoute from 'core/mixins/list-route'; +import { inject as service } from '@ember/service'; + +export default Route.extend(ListRoute, { + store: service(), + + model(params) { + const itemType = this.modelFor('vault.cluster.access.identity'); + const modelType = `identity/${itemType}-alias`; + return this.store + .lazyPaginatedQuery(modelType, { + responsePath: 'data.keys', + page: params.page, + pageFilter: params.pageFilter, + sortBy: 'name', + }) + .catch((err) => { + if (err.httpStatus === 404) { + return []; + } else { + throw err; + } + }); + }, + + setupController(controller) { + this._super(...arguments); + controller.set('identityType', this.modelFor('vault.cluster.access.identity')); + }, + + actions: { + willTransition(transition) { + window.scrollTo(0, 0); + if (!transition || transition.targetName !== this.routeName) { + this.store.clearAllDatasets(); + } + return true; + }, + reload() { + this.store.clearAllDatasets(); + this.refresh(); + }, + }, +}); diff --git a/ui/app/routes/vault/cluster/access/identity/aliases/show.js b/ui/app/routes/vault/cluster/access/identity/aliases/show.js new file mode 100644 index 0000000..074bdf4 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/identity/aliases/show.js @@ -0,0 +1,40 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import AdapterError from '@ember-data/adapter/error'; +import { hash } from 'rsvp'; +import { set } from '@ember/object'; +import Route from '@ember/routing/route'; +import { TABS } from 'vault/helpers/tabs-for-identity-show'; +import { inject as service } from '@ember/service'; + +export default Route.extend({ + store: service(), + + model(params) { + const { section } = params; + const itemType = this.modelFor('vault.cluster.access.identity') + '-alias'; + const tabs = TABS[itemType]; + const modelType = `identity/${itemType}`; + if (!tabs.includes(section)) { + const error = new AdapterError(); + set(error, 'httpStatus', 404); + throw error; + } + // TODO peekRecord here to see if we have the record already + return hash({ + model: this.store.findRecord(modelType, params.item_alias_id), + section, + }); + }, + + setupController(controller, resolvedModel) { + const { model, section } = resolvedModel; + controller.setProperties({ + model, + section, + }); + }, +}); diff --git a/ui/app/routes/vault/cluster/access/identity/create.js b/ui/app/routes/vault/cluster/access/identity/create.js new file mode 100644 index 0000000..6b7df56 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/identity/create.js @@ -0,0 +1,19 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import UnloadModelRoute from 'vault/mixins/unload-model-route'; +import UnsavedModelRoute from 'vault/mixins/unsaved-model-route'; +import { inject as service } from '@ember/service'; + +export default Route.extend(UnloadModelRoute, UnsavedModelRoute, { + store: service(), + + model() { + const itemType = this.modelFor('vault.cluster.access.identity'); + const modelType = `identity/${itemType}`; + return this.store.createRecord(modelType); + }, +}); diff --git a/ui/app/routes/vault/cluster/access/identity/edit.js b/ui/app/routes/vault/cluster/access/identity/edit.js new file mode 100644 index 0000000..7127720 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/identity/edit.js @@ -0,0 +1,19 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import UnloadModelRoute from 'vault/mixins/unload-model-route'; +import UnsavedModelRoute from 'vault/mixins/unsaved-model-route'; +import { inject as service } from '@ember/service'; + +export default Route.extend(UnloadModelRoute, UnsavedModelRoute, { + store: service(), + + model(params) { + const itemType = this.modelFor('vault.cluster.access.identity'); + const modelType = `identity/${itemType}`; + return this.store.findRecord(modelType, params.item_id); + }, +}); diff --git a/ui/app/routes/vault/cluster/access/identity/index.js b/ui/app/routes/vault/cluster/access/identity/index.js new file mode 100644 index 0000000..503e20f --- /dev/null +++ b/ui/app/routes/vault/cluster/access/identity/index.js @@ -0,0 +1,50 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import ListRoute from 'core/mixins/list-route'; +import { inject as service } from '@ember/service'; + +export default Route.extend(ListRoute, { + store: service(), + + model(params) { + const itemType = this.modelFor('vault.cluster.access.identity'); + const modelType = `identity/${itemType}`; + return this.store + .lazyPaginatedQuery(modelType, { + responsePath: 'data.keys', + page: params.page, + pageFilter: params.pageFilter, + sortBy: 'name', + }) + .catch((err) => { + if (err.httpStatus === 404) { + return []; + } else { + throw err; + } + }); + }, + + setupController(controller) { + this._super(...arguments); + controller.set('identityType', this.modelFor('vault.cluster.access.identity')); + }, + + actions: { + willTransition(transition) { + window.scrollTo(0, 0); + if (transition.targetName !== this.routeName) { + this.store.clearAllDatasets(); + } + return true; + }, + reload() { + this.store.clearAllDatasets(); + this.refresh(); + }, + }, +}); diff --git a/ui/app/routes/vault/cluster/access/identity/merge.js b/ui/app/routes/vault/cluster/access/identity/merge.js new file mode 100644 index 0000000..c61d33f --- /dev/null +++ b/ui/app/routes/vault/cluster/access/identity/merge.js @@ -0,0 +1,24 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import UnloadModelRoute from 'vault/mixins/unload-model-route'; +import { inject as service } from '@ember/service'; + +export default Route.extend(UnloadModelRoute, { + store: service(), + + beforeModel() { + const itemType = this.modelFor('vault.cluster.access.identity'); + if (itemType !== 'entity') { + return this.transitionTo('vault.cluster.access.identity'); + } + }, + + model() { + const modelType = `identity/entity-merge`; + return this.store.createRecord(modelType); + }, +}); diff --git a/ui/app/routes/vault/cluster/access/identity/show.js b/ui/app/routes/vault/cluster/access/identity/show.js new file mode 100644 index 0000000..5a4e36e --- /dev/null +++ b/ui/app/routes/vault/cluster/access/identity/show.js @@ -0,0 +1,72 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import AdapterError from '@ember-data/adapter/error'; +import { next } from '@ember/runloop'; +import { hash } from 'rsvp'; +import { set } from '@ember/object'; +import Route from '@ember/routing/route'; +import { TABS } from 'vault/helpers/tabs-for-identity-show'; +import { inject as service } from '@ember/service'; + +export default Route.extend({ + store: service(), + + model(params) { + const { section } = params; + const itemType = this.modelFor('vault.cluster.access.identity'); + const tabs = TABS[itemType]; + const modelType = `identity/${itemType}`; + if (!tabs.includes(section)) { + const error = new AdapterError(); + set(error, 'httpStatus', 404); + throw error; + } + + // if the record is in the store use that + let model = this.store.peekRecord(modelType, params.item_id); + + // if we don't have creationTime, we only have a partial model so reload + if (model && !model.get('creationTime')) { + model = model.reload(); + } + + // if there's no model, we need to fetch it + if (!model) { + model = this.store.findRecord(modelType, params.item_id); + } + + return hash({ + model, + section, + }); + }, + + activate() { + // if we're just entering the route, and it's not a hard reload + // reload to make sure we have the newest info + if (this.currentModel) { + next(() => { + /* eslint-disable-next-line ember/no-controller-access-in-routes */ + this.controller.get('model').reload(); + }); + } + }, + + afterModel(resolvedModel) { + const { section, model } = resolvedModel; + if (model.get('identityType') === 'group' && model.get('type') === 'internal' && section === 'aliases') { + return this.transitionTo('vault.cluster.access.identity.show', model.id, 'details'); + } + }, + + setupController(controller, resolvedModel) { + const { model, section } = resolvedModel; + controller.setProperties({ + model, + section, + }); + }, +}); diff --git a/ui/app/routes/vault/cluster/access/leases.js b/ui/app/routes/vault/cluster/access/leases.js new file mode 100644 index 0000000..39d599a --- /dev/null +++ b/ui/app/routes/vault/cluster/access/leases.js @@ -0,0 +1,16 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import ClusterRoute from 'vault/mixins/cluster-route'; +import { inject as service } from '@ember/service'; + +export default Route.extend(ClusterRoute, { + store: service(), + + model() { + return this.store.findRecord('capabilities', 'sys/leases/lookup/'); + }, +}); diff --git a/ui/app/routes/vault/cluster/access/leases/index.js b/ui/app/routes/vault/cluster/access/leases/index.js new file mode 100644 index 0000000..01e74e2 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/leases/index.js @@ -0,0 +1,19 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; + +export default Route.extend({ + beforeModel(transition) { + if ( + this.modelFor('vault.cluster.access.leases').get('canList') && + transition.targetName === this.routeName + ) { + return this.replaceWith('vault.cluster.access.leases.list-root'); + } else { + return; + } + }, +}); diff --git a/ui/app/routes/vault/cluster/access/leases/list-root.js b/ui/app/routes/vault/cluster/access/leases/list-root.js new file mode 100644 index 0000000..eb7ed18 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/leases/list-root.js @@ -0,0 +1,6 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +export { default } from './list'; diff --git a/ui/app/routes/vault/cluster/access/leases/list.js b/ui/app/routes/vault/cluster/access/leases/list.js new file mode 100644 index 0000000..c6f951d --- /dev/null +++ b/ui/app/routes/vault/cluster/access/leases/list.js @@ -0,0 +1,112 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { set } from '@ember/object'; +import { hash } from 'rsvp'; +import Route from '@ember/routing/route'; +import { inject as service } from '@ember/service'; + +export default Route.extend({ + store: service(), + + queryParams: { + page: { + refreshModel: true, + }, + pageFilter: { + refreshModel: true, + }, + }, + + templateName: 'vault/cluster/access/leases/list', + + model(params) { + const prefix = params.prefix || ''; + if (this.modelFor('vault.cluster.access.leases').get('canList')) { + return hash({ + leases: this.store + .lazyPaginatedQuery('lease', { + prefix, + responsePath: 'data.keys', + page: params.page, + pageFilter: params.pageFilter, + }) + .then((model) => { + this.set('has404', false); + return model; + }) + .catch((err) => { + if (err.httpStatus === 404 && prefix === '') { + return []; + } else { + throw err; + } + }), + capabilities: hash({ + revokePrefix: this.store.findRecord('capabilities', `sys/leases/revoke-prefix/${prefix}`), + forceRevokePrefix: this.store.findRecord('capabilities', `sys/leases/revoke-force/${prefix}`), + }), + }); + } + }, + + setupController(controller, model) { + const params = this.paramsFor(this.routeName); + const prefix = params.prefix ? params.prefix : ''; + const has404 = this.has404; + controller.set('hasModel', true); + controller.setProperties({ + model: model.leases, + capabilities: model.capabilities, + baseKey: { id: prefix }, + has404, + }); + if (!has404) { + const pageFilter = params.pageFilter; + let filter; + if (prefix) { + filter = prefix + (pageFilter || ''); + } else if (pageFilter) { + filter = pageFilter; + } + controller.setProperties({ + filter: filter || '', + page: model.leases.get('meta.currentPage'), + }); + } + }, + + resetController(controller, isExiting) { + this._super(...arguments); + if (isExiting) { + controller.set('filter', ''); + } + }, + + actions: { + error(error, transition) { + const { prefix } = this.paramsFor(this.routeName); + + set(error, 'keyId', prefix); + /* eslint-disable-next-line ember/no-controller-access-in-routes */ + const hasModel = this.controllerFor(this.routeName).get('hasModel'); + // only swallow the error if we have a previous model + if (hasModel && error.httpStatus === 404) { + this.set('has404', true); + transition.abort(); + } else { + return true; + } + }, + + willTransition(transition) { + window.scrollTo(0, 0); + if (transition.targetName !== this.routeName) { + this.store.clearAllDatasets(); + } + return true; + }, + }, +}); diff --git a/ui/app/routes/vault/cluster/access/leases/show.js b/ui/app/routes/vault/cluster/access/leases/show.js new file mode 100644 index 0000000..b8035e5 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/leases/show.js @@ -0,0 +1,63 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { set } from '@ember/object'; +import { hash } from 'rsvp'; +import Route from '@ember/routing/route'; +import UnloadModelRoute from 'vault/mixins/unload-model-route'; +import utils from 'vault/lib/key-utils'; +import { inject as service } from '@ember/service'; + +export default Route.extend(UnloadModelRoute, { + store: service(), + + beforeModel() { + const { lease_id: leaseId } = this.paramsFor(this.routeName); + const parentKey = utils.parentKeyForKey(leaseId); + if (utils.keyIsFolder(leaseId)) { + if (parentKey) { + return this.transitionTo('vault.cluster.access.leases.list', parentKey); + } else { + return this.transitionTo('vault.cluster.access.leases.list-root'); + } + } + }, + + model(params) { + const { lease_id } = params; + return hash({ + lease: this.store.queryRecord('lease', { + lease_id, + }), + capabilities: hash({ + renew: this.store.findRecord('capabilities', 'sys/leases/renew'), + revoke: this.store.findRecord('capabilities', 'sys/leases/revoke'), + leases: this.modelFor('vault.cluster.access.leases'), + }), + }); + }, + + setupController(controller, model) { + this._super(...arguments); + const { lease_id: leaseId } = this.paramsFor(this.routeName); + controller.setProperties({ + model: model.lease, + capabilities: model.capabilities, + baseKey: { id: leaseId }, + }); + }, + + actions: { + error(error) { + const { lease_id } = this.paramsFor(this.routeName); + set(error, 'keyId', lease_id); + return true; + }, + + refreshModel() { + this.refresh(); + }, + }, +}); diff --git a/ui/app/routes/vault/cluster/access/method.js b/ui/app/routes/vault/cluster/access/method.js new file mode 100644 index 0000000..ade257b --- /dev/null +++ b/ui/app/routes/vault/cluster/access/method.js @@ -0,0 +1,30 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import AdapterError from '@ember-data/adapter/error'; +import { set } from '@ember/object'; +import Route from '@ember/routing/route'; +import { inject as service } from '@ember/service'; + +export default Route.extend({ + store: service(), + pathHelp: service('path-help'), + + model(params) { + const { path } = params; + return this.store.findAll('auth-method').then((modelArray) => { + const model = modelArray.findBy('id', path); + if (!model) { + const error = new AdapterError(); + set(error, 'httpStatus', 404); + throw error; + } + return this.pathHelp.getPaths(model.apiPath, path).then((paths) => { + model.set('paths', paths); + return model; + }); + }); + }, +}); diff --git a/ui/app/routes/vault/cluster/access/method/index.js b/ui/app/routes/vault/cluster/access/method/index.js new file mode 100644 index 0000000..b5a5c65 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/method/index.js @@ -0,0 +1,15 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import { tabsForAuthSection } from 'vault/helpers/tabs-for-auth-section'; +export default Route.extend({ + beforeModel() { + let { methodType, paths } = this.modelFor('vault.cluster.access.method'); + paths = paths ? paths.paths.filter((path) => path.navigation === true) : null; + const activeTab = tabsForAuthSection([methodType, 'authConfig', paths])[0].routeParams; + return this.transitionTo(...activeTab); + }, +}); diff --git a/ui/app/routes/vault/cluster/access/method/item.js b/ui/app/routes/vault/cluster/access/method/item.js new file mode 100644 index 0000000..e39b52c --- /dev/null +++ b/ui/app/routes/vault/cluster/access/method/item.js @@ -0,0 +1,39 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { inject as service } from '@ember/service'; +import Route from '@ember/routing/route'; +import { singularize } from 'ember-inflector'; + +export default Route.extend({ + pathHelp: service('path-help'), + + beforeModel() { + const { apiPath, type, authMethodPath, itemType } = this.getMethodAndModelInfo(); + const modelType = `generated-${singularize(itemType)}-${type}`; + return this.pathHelp.getNewModel(modelType, authMethodPath, apiPath, itemType); + }, + + getMethodAndModelInfo() { + const { item_type: itemType } = this.paramsFor(this.routeName); + const { path: authMethodPath } = this.paramsFor('vault.cluster.access.method'); + const methodModel = this.modelFor('vault.cluster.access.method'); + const { apiPath, type } = methodModel; + return { apiPath, type, authMethodPath, itemType }; + }, + + setupController(controller) { + this._super(...arguments); + const { apiPath, authMethodPath, itemType } = this.getMethodAndModelInfo(); + controller.set('itemType', itemType); + this.pathHelp.getPaths(apiPath, authMethodPath, itemType).then((paths) => { + const navigationPaths = paths.paths.filter((path) => path.navigation); + controller.set( + 'paths', + navigationPaths.filter((path) => path.itemType.includes(itemType)).map((path) => path.path) + ); + }); + }, +}); diff --git a/ui/app/routes/vault/cluster/access/method/item/create.js b/ui/app/routes/vault/cluster/access/method/item/create.js new file mode 100644 index 0000000..32da7a3 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/method/item/create.js @@ -0,0 +1,36 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import UnloadModelRoute from 'vault/mixins/unload-model-route'; +import UnsavedModelRoute from 'vault/mixins/unsaved-model-route'; +import { singularize } from 'ember-inflector'; +import { inject as service } from '@ember/service'; + +export default Route.extend(UnloadModelRoute, UnsavedModelRoute, { + store: service(), + + model() { + const { item_type: itemType } = this.paramsFor('vault.cluster.access.method.item'); + const methodModel = this.modelFor('vault.cluster.access.method'); + const { type } = methodModel; + const { path: method } = this.paramsFor('vault.cluster.access.method'); + const modelType = `generated-${singularize(itemType)}-${type}`; + return this.store.createRecord(modelType, { + itemType, + method, + adapterOptions: { path: `${method}/${itemType}` }, + }); + }, + + setupController(controller) { + this._super(...arguments); + const { item_type: itemType } = this.paramsFor('vault.cluster.access.method.item'); + const { path: method } = this.paramsFor('vault.cluster.access.method'); + controller.set('itemType', singularize(itemType)); + controller.set('mode', 'create'); + controller.set('method', method); + }, +}); diff --git a/ui/app/routes/vault/cluster/access/method/item/edit.js b/ui/app/routes/vault/cluster/access/method/item/edit.js new file mode 100644 index 0000000..fcf4a5f --- /dev/null +++ b/ui/app/routes/vault/cluster/access/method/item/edit.js @@ -0,0 +1,28 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import UnloadModelRoute from 'vault/mixins/unload-model-route'; +import UnsavedModelRoute from 'vault/mixins/unsaved-model-route'; +import { singularize } from 'ember-inflector'; +import { inject as service } from '@ember/service'; + +export default Route.extend(UnloadModelRoute, UnsavedModelRoute, { + store: service(), + + model(params) { + const id = params.item_id; + const { item_type: itemType } = this.paramsFor('vault.cluster.access.method.item'); + const methodModel = this.modelFor('vault.cluster.access.method'); + const modelType = `generated-${singularize(itemType)}-${methodModel.type}`; + return this.store.queryRecord(modelType, { id, authMethodPath: methodModel.id }); + }, + + setupController(controller) { + this._super(...arguments); + const { item_type: itemType } = this.paramsFor('vault.cluster.access.method.item'); + controller.set('itemType', singularize(itemType)); + }, +}); diff --git a/ui/app/routes/vault/cluster/access/method/item/list.js b/ui/app/routes/vault/cluster/access/method/item/list.js new file mode 100644 index 0000000..65f43d7 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/method/item/list.js @@ -0,0 +1,71 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { inject as service } from '@ember/service'; +import Route from '@ember/routing/route'; +import { singularize } from 'ember-inflector'; +import ListRoute from 'vault/mixins/list-route'; + +export default Route.extend(ListRoute, { + store: service(), + pathHelp: service('path-help'), + + getMethodAndModelInfo() { + const { item_type: itemType } = this.paramsFor('vault.cluster.access.method.item'); + const { path: authMethodPath } = this.paramsFor('vault.cluster.access.method'); + const methodModel = this.modelFor('vault.cluster.access.method'); + const { apiPath, type } = methodModel; + return { apiPath, type, authMethodPath, itemType, methodModel }; + }, + + model() { + const { type, authMethodPath, itemType } = this.getMethodAndModelInfo(); + const { page, pageFilter } = this.paramsFor(this.routeName); + const modelType = `generated-${singularize(itemType)}-${type}`; + + return this.store + .lazyPaginatedQuery(modelType, { + responsePath: 'data.keys', + page: page, + pageFilter: pageFilter, + type: itemType, + id: authMethodPath, + }) + .catch((err) => { + if (err.httpStatus === 404) { + return []; + } else { + throw err; + } + }); + }, + + actions: { + willTransition(transition) { + window.scrollTo(0, 0); + if (transition.targetName !== this.routeName) { + this.store.clearAllDatasets(); + } + return true; + }, + reload() { + this.store.clearAllDatasets(); + this.refresh(); + }, + }, + + setupController(controller) { + this._super(...arguments); + const { apiPath, authMethodPath, itemType, methodModel } = this.getMethodAndModelInfo(); + controller.set('itemType', itemType); + controller.set('methodModel', methodModel); + this.pathHelp.getPaths(apiPath, authMethodPath, itemType).then((paths) => { + controller.set( + 'paths', + paths.paths.filter((path) => path.navigation && path.itemType.includes(itemType)) + ); + }); + }, +}); diff --git a/ui/app/routes/vault/cluster/access/method/item/show.js b/ui/app/routes/vault/cluster/access/method/item/show.js new file mode 100644 index 0000000..5617d07 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/method/item/show.js @@ -0,0 +1,27 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { singularize } from 'ember-inflector'; +import { inject as service } from '@ember/service'; +import Route from '@ember/routing/route'; + +export default Route.extend({ + store: service(), + pathHelp: service('path-help'), + + model(params) { + const id = params.item_id; + const { item_type: itemType } = this.paramsFor('vault.cluster.access.method.item'); + const methodModel = this.modelFor('vault.cluster.access.method'); + const modelType = `generated-${singularize(itemType)}-${methodModel.type}`; + return this.store.queryRecord(modelType, { id, authMethodPath: methodModel.id }); + }, + + setupController(controller) { + this._super(...arguments); + const { item_type: itemType } = this.paramsFor('vault.cluster.access.method.item'); + controller.set('itemType', singularize(itemType)); + }, +}); diff --git a/ui/app/routes/vault/cluster/access/method/section.js b/ui/app/routes/vault/cluster/access/method/section.js new file mode 100644 index 0000000..fa6539f --- /dev/null +++ b/ui/app/routes/vault/cluster/access/method/section.js @@ -0,0 +1,31 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import AdapterError from '@ember-data/adapter/error'; +import { set } from '@ember/object'; +import Route from '@ember/routing/route'; + +export default Route.extend({ + model(params) { + const { section_name: section } = params; + if (section !== 'configuration') { + const error = new AdapterError(); + set(error, 'httpStatus', 404); + throw error; + } + return this.modelFor('vault.cluster.access.method'); + }, + + setupController(controller) { + const { section_name: section } = this.paramsFor(this.routeName); + this._super(...arguments); + controller.set('section', section); + const method = this.modelFor('vault.cluster.access.method'); + controller.set( + 'paths', + method.paths.paths.filter((path) => path.navigation) + ); + }, +}); diff --git a/ui/app/routes/vault/cluster/access/methods.js b/ui/app/routes/vault/cluster/access/methods.js new file mode 100644 index 0000000..7d573c2 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/methods.js @@ -0,0 +1,24 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import { inject as service } from '@ember/service'; + +export default class VaultClusterAccessMethodsRoute extends Route { + @service store; + + queryParams = { + page: { + refreshModel: true, + }, + pageFilter: { + refreshModel: true, + }, + }; + + model() { + return this.store.findAll('auth-method'); + } +} diff --git a/ui/app/routes/vault/cluster/access/mfa/enforcements/create.js b/ui/app/routes/vault/cluster/access/mfa/enforcements/create.js new file mode 100644 index 0000000..26e4e35 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/mfa/enforcements/create.js @@ -0,0 +1,15 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import { inject as service } from '@ember/service'; + +export default class MfaLoginEnforcementCreateRoute extends Route { + @service store; + + model() { + return this.store.createRecord('mfa-login-enforcement'); + } +} diff --git a/ui/app/routes/vault/cluster/access/mfa/enforcements/enforcement.js b/ui/app/routes/vault/cluster/access/mfa/enforcements/enforcement.js new file mode 100644 index 0000000..1301965 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/mfa/enforcements/enforcement.js @@ -0,0 +1,15 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import { inject as service } from '@ember/service'; + +export default class MfaLoginEnforcementRoute extends Route { + @service store; + + model({ name }) { + return this.store.findRecord('mfa-login-enforcement', name); + } +} diff --git a/ui/app/routes/vault/cluster/access/mfa/enforcements/enforcement/edit.js b/ui/app/routes/vault/cluster/access/mfa/enforcements/enforcement/edit.js new file mode 100644 index 0000000..6a27d54 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/mfa/enforcements/enforcement/edit.js @@ -0,0 +1,8 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; + +export default class MfaLoginEnforcementEditRoute extends Route {} diff --git a/ui/app/routes/vault/cluster/access/mfa/enforcements/index.js b/ui/app/routes/vault/cluster/access/mfa/enforcements/index.js new file mode 100644 index 0000000..7b656fd --- /dev/null +++ b/ui/app/routes/vault/cluster/access/mfa/enforcements/index.js @@ -0,0 +1,24 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import { inject as service } from '@ember/service'; + +export default class MfaEnforcementsRoute extends Route { + @service store; + + model() { + return this.store.query('mfa-login-enforcement', {}).catch((err) => { + if (err.httpStatus === 404) { + return []; + } else { + throw err; + } + }); + } + setupController(controller, model) { + controller.set('model', model); + } +} diff --git a/ui/app/routes/vault/cluster/access/mfa/index.js b/ui/app/routes/vault/cluster/access/mfa/index.js new file mode 100644 index 0000000..ef12533 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/mfa/index.js @@ -0,0 +1,23 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import { inject as service } from '@ember/service'; + +export default class MfaConfigureRoute extends Route { + @service store; + + beforeModel() { + return this.store + .query('mfa-method', {}) + .then(() => { + // if response then they should transition to the methods page instead of staying on the configure page. + this.transitionTo('vault.cluster.access.mfa.methods.index'); + }) + .catch(() => { + // stay on the landing page + }); + } +} diff --git a/ui/app/routes/vault/cluster/access/mfa/methods/create.js b/ui/app/routes/vault/cluster/access/mfa/methods/create.js new file mode 100644 index 0000000..f5c3ced --- /dev/null +++ b/ui/app/routes/vault/cluster/access/mfa/methods/create.js @@ -0,0 +1,25 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; + +export default class MfaLoginEnforcementCreateRoute extends Route { + setupController(controller) { + super.setupController(...arguments); + // if route was refreshed after type select recreate method model + const { type } = controller; + if (type) { + // create method and enforcement models for forms if type is selected + controller.createModels(); + } + } + resetController(controller, isExiting) { + if (isExiting) { + // reset type query param when user saves or cancels + // this will not trigger when refreshing the page which preserves intended functionality + controller.set('type', null); + } + } +} diff --git a/ui/app/routes/vault/cluster/access/mfa/methods/index.js b/ui/app/routes/vault/cluster/access/mfa/methods/index.js new file mode 100644 index 0000000..7be2c2d --- /dev/null +++ b/ui/app/routes/vault/cluster/access/mfa/methods/index.js @@ -0,0 +1,31 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import { inject as service } from '@ember/service'; + +export default class MfaMethodsRoute extends Route { + @service store; + @service router; + + model() { + return this.store.query('mfa-method', {}).catch((err) => { + if (err.httpStatus === 404) { + return []; + } else { + throw err; + } + }); + } + + afterModel(model) { + if (model.length === 0) { + this.router.transitionTo('vault.cluster.access.mfa'); + } + } + setupController(controller, model) { + controller.set('model', model); + } +} diff --git a/ui/app/routes/vault/cluster/access/mfa/methods/method.js b/ui/app/routes/vault/cluster/access/mfa/methods/method.js new file mode 100644 index 0000000..f518efa --- /dev/null +++ b/ui/app/routes/vault/cluster/access/mfa/methods/method.js @@ -0,0 +1,33 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import { hash } from 'rsvp'; +import { inject as service } from '@ember/service'; + +export default class MfaMethodRoute extends Route { + @service store; + + model({ id }) { + return hash({ + method: this.store.findRecord('mfa-method', id).then((data) => data), + enforcements: this.store + .query('mfa-login-enforcement', {}) + .then((data) => { + const filteredEnforcements = data.filter((item) => { + const results = item.hasMany('mfa_methods').ids(); + return results.includes(id); + }); + return filteredEnforcements; + }) + .catch(() => { + // Do nothing + }), + }); + } + setupController(controller, model) { + controller.set('model', model); + } +} diff --git a/ui/app/routes/vault/cluster/access/mfa/methods/method/edit.js b/ui/app/routes/vault/cluster/access/mfa/methods/method/edit.js new file mode 100644 index 0000000..4dcdd87 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/mfa/methods/method/edit.js @@ -0,0 +1,8 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; + +export default class MfaMethodEditRoute extends Route {} diff --git a/ui/app/routes/vault/cluster/access/namespaces/create.js b/ui/app/routes/vault/cluster/access/namespaces/create.js new file mode 100644 index 0000000..14aee25 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/namespaces/create.js @@ -0,0 +1,23 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { inject as service } from '@ember/service'; +import Route from '@ember/routing/route'; +import UnloadModel from 'vault/mixins/unload-model-route'; + +export default Route.extend(UnloadModel, { + store: service(), + version: service(), + + beforeModel() { + return this.version.fetchFeatures().then(() => { + return this._super(...arguments); + }); + }, + + model() { + return this.version.hasNamespaces ? this.store.createRecord('namespace') : null; + }, +}); diff --git a/ui/app/routes/vault/cluster/access/namespaces/index.js b/ui/app/routes/vault/cluster/access/namespaces/index.js new file mode 100644 index 0000000..f662de6 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/namespaces/index.js @@ -0,0 +1,86 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { inject as service } from '@ember/service'; +import Route from '@ember/routing/route'; +import UnloadModel from 'vault/mixins/unload-model-route'; + +export default Route.extend(UnloadModel, { + store: service(), + + queryParams: { + page: { + refreshModel: true, + }, + }, + + version: service(), + + beforeModel() { + this.store.unloadAll('namespace'); + return this.version.fetchFeatures().then(() => { + return this._super(...arguments); + }); + }, + + model(params) { + if (this.version.hasNamespaces) { + return this.store + .lazyPaginatedQuery('namespace', { + responsePath: 'data.keys', + page: Number(params?.page) || 1, + }) + .then((model) => { + return model; + }) + .catch((err) => { + if (err.httpStatus === 404) { + return []; + } else { + throw err; + } + }); + } + return null; + }, + + setupController(controller, model) { + const has404 = this.has404; + controller.setProperties({ + model: model, + has404, + hasModel: true, + }); + if (!has404) { + controller.setProperties({ + page: Number(model?.meta?.currentPage) || 1, + }); + } + }, + + actions: { + error(error, transition) { + /* eslint-disable-next-line ember/no-controller-access-in-routes */ + const hasModel = this.controllerFor(this.routeName).get('hasModel'); + if (hasModel && error.httpStatus === 404) { + this.set('has404', true); + transition.abort(); + } else { + return true; + } + }, + willTransition(transition) { + window.scrollTo(0, 0); + if (!transition || transition.targetName !== this.routeName) { + this.store.clearAllDatasets(); + } + return true; + }, + reload() { + this.store.clearAllDatasets(); + this.refresh(); + }, + }, +}); diff --git a/ui/app/routes/vault/cluster/access/oidc.js b/ui/app/routes/vault/cluster/access/oidc.js new file mode 100644 index 0000000..4683b3a --- /dev/null +++ b/ui/app/routes/vault/cluster/access/oidc.js @@ -0,0 +1,8 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; + +export default class OidcConfigureRoute extends Route {} diff --git a/ui/app/routes/vault/cluster/access/oidc/assignments/assignment.js b/ui/app/routes/vault/cluster/access/oidc/assignments/assignment.js new file mode 100644 index 0000000..0dc5133 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/oidc/assignments/assignment.js @@ -0,0 +1,15 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import { inject as service } from '@ember/service'; + +export default class OidcAssignmentRoute extends Route { + @service store; + + model({ name }) { + return this.store.findRecord('oidc/assignment', name); + } +} diff --git a/ui/app/routes/vault/cluster/access/oidc/assignments/assignment/details.js b/ui/app/routes/vault/cluster/access/oidc/assignments/assignment/details.js new file mode 100644 index 0000000..9f353a8 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/oidc/assignments/assignment/details.js @@ -0,0 +1,8 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; + +export default class OidcAssignmentDetailsRoute extends Route {} diff --git a/ui/app/routes/vault/cluster/access/oidc/assignments/assignment/edit.js b/ui/app/routes/vault/cluster/access/oidc/assignments/assignment/edit.js new file mode 100644 index 0000000..94f96b4 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/oidc/assignments/assignment/edit.js @@ -0,0 +1,8 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; + +export default class OidcAssignmentEditRoute extends Route {} diff --git a/ui/app/routes/vault/cluster/access/oidc/assignments/create.js b/ui/app/routes/vault/cluster/access/oidc/assignments/create.js new file mode 100644 index 0000000..fc65093 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/oidc/assignments/create.js @@ -0,0 +1,15 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import { inject as service } from '@ember/service'; + +export default class OidcAssignmentsCreateRoute extends Route { + @service store; + + model() { + return this.store.createRecord('oidc/assignment'); + } +} diff --git a/ui/app/routes/vault/cluster/access/oidc/assignments/index.js b/ui/app/routes/vault/cluster/access/oidc/assignments/index.js new file mode 100644 index 0000000..3c25c95 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/oidc/assignments/index.js @@ -0,0 +1,20 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import { inject as service } from '@ember/service'; + +export default class OidcAssignmentsRoute extends Route { + @service store; + model() { + return this.store.query('oidc/assignment', {}).catch((err) => { + if (err.httpStatus === 404) { + return []; + } else { + throw err; + } + }); + } +} diff --git a/ui/app/routes/vault/cluster/access/oidc/clients/client.js b/ui/app/routes/vault/cluster/access/oidc/clients/client.js new file mode 100644 index 0000000..cd90870 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/oidc/clients/client.js @@ -0,0 +1,15 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import { inject as service } from '@ember/service'; + +export default class OidcClientRoute extends Route { + @service store; + + model({ name }) { + return this.store.findRecord('oidc/client', name); + } +} diff --git a/ui/app/routes/vault/cluster/access/oidc/clients/client/details.js b/ui/app/routes/vault/cluster/access/oidc/clients/client/details.js new file mode 100644 index 0000000..c02c28e --- /dev/null +++ b/ui/app/routes/vault/cluster/access/oidc/clients/client/details.js @@ -0,0 +1,7 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +export default class OidcClientDetailsRoute extends Route {} diff --git a/ui/app/routes/vault/cluster/access/oidc/clients/client/edit.js b/ui/app/routes/vault/cluster/access/oidc/clients/client/edit.js new file mode 100644 index 0000000..a2aee44 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/oidc/clients/client/edit.js @@ -0,0 +1,8 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; + +export default class OidcClientEditRoute extends Route {} diff --git a/ui/app/routes/vault/cluster/access/oidc/clients/client/providers.js b/ui/app/routes/vault/cluster/access/oidc/clients/client/providers.js new file mode 100644 index 0000000..26f77b3 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/oidc/clients/client/providers.js @@ -0,0 +1,26 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import { inject as service } from '@ember/service'; + +export default class OidcClientProvidersRoute extends Route { + @service store; + + model() { + const model = this.modelFor('vault.cluster.access.oidc.clients.client'); + return this.store + .query('oidc/provider', { + allowed_client_id: model.clientId, + }) + .catch((err) => { + if (err.httpStatus === 404) { + return []; + } else { + throw err; + } + }); + } +} diff --git a/ui/app/routes/vault/cluster/access/oidc/clients/create.js b/ui/app/routes/vault/cluster/access/oidc/clients/create.js new file mode 100644 index 0000000..317ec69 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/oidc/clients/create.js @@ -0,0 +1,15 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import { inject as service } from '@ember/service'; + +export default class OidcClientsCreateRoute extends Route { + @service store; + + model() { + return this.store.createRecord('oidc/client'); + } +} diff --git a/ui/app/routes/vault/cluster/access/oidc/clients/index.js b/ui/app/routes/vault/cluster/access/oidc/clients/index.js new file mode 100644 index 0000000..4f932de --- /dev/null +++ b/ui/app/routes/vault/cluster/access/oidc/clients/index.js @@ -0,0 +1,27 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import { inject as service } from '@ember/service'; +export default class OidcClientsRoute extends Route { + @service store; + @service router; + + model() { + return this.store.query('oidc/client', {}).catch((err) => { + if (err.httpStatus === 404) { + return []; + } else { + throw err; + } + }); + } + + afterModel(model) { + if (model.length === 0) { + this.router.transitionTo('vault.cluster.access.oidc'); + } + } +} diff --git a/ui/app/routes/vault/cluster/access/oidc/index.js b/ui/app/routes/vault/cluster/access/oidc/index.js new file mode 100644 index 0000000..a85a4d6 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/oidc/index.js @@ -0,0 +1,24 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import { inject as service } from '@ember/service'; + +export default class OidcConfigureRoute extends Route { + @service store; + @service router; + + beforeModel() { + return this.store + .query('oidc/client', {}) + .then(() => { + // transition to client list view if clients have been created + this.router.transitionTo('vault.cluster.access.oidc.clients'); + }) + .catch(() => { + // adapter throws error for 404 - swallow and remain on index route to show call to action + }); + } +} diff --git a/ui/app/routes/vault/cluster/access/oidc/keys/create.js b/ui/app/routes/vault/cluster/access/oidc/keys/create.js new file mode 100644 index 0000000..c34f8c8 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/oidc/keys/create.js @@ -0,0 +1,15 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import { inject as service } from '@ember/service'; + +export default class OidcKeysCreateRoute extends Route { + @service store; + + model() { + return this.store.createRecord('oidc/key'); + } +} diff --git a/ui/app/routes/vault/cluster/access/oidc/keys/index.js b/ui/app/routes/vault/cluster/access/oidc/keys/index.js new file mode 100644 index 0000000..4e0d835 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/oidc/keys/index.js @@ -0,0 +1,21 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import { inject as service } from '@ember/service'; + +export default class OidcKeysRoute extends Route { + @service store; + + model() { + return this.store.query('oidc/key', {}).catch((err) => { + if (err.httpStatus === 404) { + return []; + } else { + throw err; + } + }); + } +} diff --git a/ui/app/routes/vault/cluster/access/oidc/keys/key.js b/ui/app/routes/vault/cluster/access/oidc/keys/key.js new file mode 100644 index 0000000..84d8865 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/oidc/keys/key.js @@ -0,0 +1,15 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import { inject as service } from '@ember/service'; + +export default class OidcKeyRoute extends Route { + @service store; + + model({ name }) { + return this.store.findRecord('oidc/key', name); + } +} diff --git a/ui/app/routes/vault/cluster/access/oidc/keys/key/clients.js b/ui/app/routes/vault/cluster/access/oidc/keys/key/clients.js new file mode 100644 index 0000000..f295cfd --- /dev/null +++ b/ui/app/routes/vault/cluster/access/oidc/keys/key/clients.js @@ -0,0 +1,16 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import { inject as service } from '@ember/service'; + +export default class OidcKeyClientsRoute extends Route { + @service store; + + async model() { + const { allowedClientIds } = this.modelFor('vault.cluster.access.oidc.keys.key'); + return await this.store.query('oidc/client', { paramKey: 'client_id', filterFor: allowedClientIds }); + } +} diff --git a/ui/app/routes/vault/cluster/access/oidc/keys/key/details.js b/ui/app/routes/vault/cluster/access/oidc/keys/key/details.js new file mode 100644 index 0000000..6762ec3 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/oidc/keys/key/details.js @@ -0,0 +1,8 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; + +export default class OidcKeyDetailsRoute extends Route {} diff --git a/ui/app/routes/vault/cluster/access/oidc/keys/key/edit.js b/ui/app/routes/vault/cluster/access/oidc/keys/key/edit.js new file mode 100644 index 0000000..f21031e --- /dev/null +++ b/ui/app/routes/vault/cluster/access/oidc/keys/key/edit.js @@ -0,0 +1,8 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; + +export default class OidcKeyEditRoute extends Route {} diff --git a/ui/app/routes/vault/cluster/access/oidc/providers/create.js b/ui/app/routes/vault/cluster/access/oidc/providers/create.js new file mode 100644 index 0000000..f4d1a36 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/oidc/providers/create.js @@ -0,0 +1,15 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import { inject as service } from '@ember/service'; + +export default class OidcProvidersCreateRoute extends Route { + @service store; + + model() { + return this.store.createRecord('oidc/provider'); + } +} diff --git a/ui/app/routes/vault/cluster/access/oidc/providers/index.js b/ui/app/routes/vault/cluster/access/oidc/providers/index.js new file mode 100644 index 0000000..5b172e3 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/oidc/providers/index.js @@ -0,0 +1,21 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import { inject as service } from '@ember/service'; + +export default class OidcProvidersRoute extends Route { + @service store; + + model() { + return this.store.query('oidc/provider', {}).catch((err) => { + if (err.httpStatus === 404) { + return []; + } else { + throw err; + } + }); + } +} diff --git a/ui/app/routes/vault/cluster/access/oidc/providers/provider.js b/ui/app/routes/vault/cluster/access/oidc/providers/provider.js new file mode 100644 index 0000000..d658ca3 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/oidc/providers/provider.js @@ -0,0 +1,15 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import { inject as service } from '@ember/service'; + +export default class OidcProviderRoute extends Route { + @service store; + + model({ name }) { + return this.store.findRecord('oidc/provider', name); + } +} diff --git a/ui/app/routes/vault/cluster/access/oidc/providers/provider/clients.js b/ui/app/routes/vault/cluster/access/oidc/providers/provider/clients.js new file mode 100644 index 0000000..c7d98a0 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/oidc/providers/provider/clients.js @@ -0,0 +1,16 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import { inject as service } from '@ember/service'; + +export default class OidcProviderClientsRoute extends Route { + @service store; + + async model() { + const { allowedClientIds } = this.modelFor('vault.cluster.access.oidc.providers.provider'); + return await this.store.query('oidc/client', { paramKey: 'client_id', filterFor: allowedClientIds }); + } +} diff --git a/ui/app/routes/vault/cluster/access/oidc/providers/provider/details.js b/ui/app/routes/vault/cluster/access/oidc/providers/provider/details.js new file mode 100644 index 0000000..9faa77b --- /dev/null +++ b/ui/app/routes/vault/cluster/access/oidc/providers/provider/details.js @@ -0,0 +1,8 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; + +export default class OidcProviderDetailsRoute extends Route {} diff --git a/ui/app/routes/vault/cluster/access/oidc/providers/provider/edit.js b/ui/app/routes/vault/cluster/access/oidc/providers/provider/edit.js new file mode 100644 index 0000000..ad1e9fc --- /dev/null +++ b/ui/app/routes/vault/cluster/access/oidc/providers/provider/edit.js @@ -0,0 +1,8 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; + +export default class OidcProviderEditRoute extends Route {} diff --git a/ui/app/routes/vault/cluster/access/oidc/scopes/create.js b/ui/app/routes/vault/cluster/access/oidc/scopes/create.js new file mode 100644 index 0000000..65c1a5d --- /dev/null +++ b/ui/app/routes/vault/cluster/access/oidc/scopes/create.js @@ -0,0 +1,15 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import { inject as service } from '@ember/service'; + +export default class OidcScopesCreateRoute extends Route { + @service store; + + model() { + return this.store.createRecord('oidc/scope'); + } +} diff --git a/ui/app/routes/vault/cluster/access/oidc/scopes/index.js b/ui/app/routes/vault/cluster/access/oidc/scopes/index.js new file mode 100644 index 0000000..64383c7 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/oidc/scopes/index.js @@ -0,0 +1,21 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import { inject as service } from '@ember/service'; + +export default class OidcScopesRoute extends Route { + @service store; + + model() { + return this.store.query('oidc/scope', {}).catch((err) => { + if (err.httpStatus === 404) { + return []; + } else { + throw err; + } + }); + } +} diff --git a/ui/app/routes/vault/cluster/access/oidc/scopes/scope.js b/ui/app/routes/vault/cluster/access/oidc/scopes/scope.js new file mode 100644 index 0000000..e3ad226 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/oidc/scopes/scope.js @@ -0,0 +1,15 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import { inject as service } from '@ember/service'; + +export default class OidcScopeRoute extends Route { + @service store; + + model({ name }) { + return this.store.findRecord('oidc/scope', name); + } +} diff --git a/ui/app/routes/vault/cluster/access/oidc/scopes/scope/details.js b/ui/app/routes/vault/cluster/access/oidc/scopes/scope/details.js new file mode 100644 index 0000000..5973918 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/oidc/scopes/scope/details.js @@ -0,0 +1,8 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; + +export default class OidcScopeDetailsRoute extends Route {} diff --git a/ui/app/routes/vault/cluster/access/oidc/scopes/scope/edit.js b/ui/app/routes/vault/cluster/access/oidc/scopes/scope/edit.js new file mode 100644 index 0000000..0140ac9 --- /dev/null +++ b/ui/app/routes/vault/cluster/access/oidc/scopes/scope/edit.js @@ -0,0 +1,8 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; + +export default class OidcScopeEditRoute extends Route {} diff --git a/ui/app/routes/vault/cluster/auth.js b/ui/app/routes/vault/cluster/auth.js new file mode 100644 index 0000000..fa9866e --- /dev/null +++ b/ui/app/routes/vault/cluster/auth.js @@ -0,0 +1,40 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { inject as service } from '@ember/service'; +import ClusterRouteBase from './cluster-route-base'; +import config from 'vault/config/environment'; + +export default ClusterRouteBase.extend({ + queryParams: { + authMethod: { + replace: true, + }, + }, + flashMessages: service(), + version: service(), + beforeModel() { + return this._super().then(() => { + return this.version.fetchFeatures(); + }); + }, + model() { + return this._super(...arguments); + }, + + resetController(controller) { + controller.set('wrappedToken', ''); + controller.set('authMethod', 'token'); + }, + + afterModel() { + if (config.welcomeMessage) { + this.flashMessages.info(config.welcomeMessage, { + sticky: true, + priority: 300, + }); + } + }, +}); diff --git a/ui/app/routes/vault/cluster/clients.js b/ui/app/routes/vault/cluster/clients.js new file mode 100644 index 0000000..b611adf --- /dev/null +++ b/ui/app/routes/vault/cluster/clients.js @@ -0,0 +1,43 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import { hash } from 'rsvp'; +import { action } from '@ember/object'; +import getStorage from 'vault/lib/token-storage'; +import { inject as service } from '@ember/service'; +const INPUTTED_START_DATE = 'vault:ui-inputted-start-date'; + +export default class ClientsRoute extends Route { + @service store; + async getVersionHistory() { + return this.store + .findAll('clients/version-history') + .then((response) => { + return response.map(({ version, previousVersion, timestampInstalled }) => { + return { + version, + previousVersion, + timestampInstalled, + }; + }); + }) + .catch(() => []); + } + + model() { + // swallow config error so activity can show if no config permissions + return hash({ + config: this.store.queryRecord('clients/config', {}).catch(() => {}), + versionHistory: this.getVersionHistory(), + }); + } + + @action + deactivate() { + // when navigating away from parent route, delete manually inputted license start date + getStorage().removeItem(INPUTTED_START_DATE); + } +} diff --git a/ui/app/routes/vault/cluster/clients/config.js b/ui/app/routes/vault/cluster/clients/config.js new file mode 100644 index 0000000..b1046db --- /dev/null +++ b/ui/app/routes/vault/cluster/clients/config.js @@ -0,0 +1,15 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import { inject as service } from '@ember/service'; + +export default class ConfigRoute extends Route { + @service store; + + model() { + return this.store.queryRecord('clients/config', {}); + } +} diff --git a/ui/app/routes/vault/cluster/clients/dashboard.js b/ui/app/routes/vault/cluster/clients/dashboard.js new file mode 100644 index 0000000..883d4ae --- /dev/null +++ b/ui/app/routes/vault/cluster/clients/dashboard.js @@ -0,0 +1,49 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import getStorage from 'vault/lib/token-storage'; +import { inject as service } from '@ember/service'; +import timestamp from 'core/utils/timestamp'; + +export default class DashboardRoute extends Route { + @service store; + currentDate = timestamp.now().toISOString(); + + async getActivity(start_time) { + // on init ONLY make network request if we have a start_time + return start_time + ? await this.store.queryRecord('clients/activity', { + start_time: { timestamp: start_time }, + end_time: { timestamp: this.currentDate }, + }) + : {}; + } + + async getLicenseStartTime() { + try { + const license = await this.store.queryRecord('license', {}); + // if license.startTime is 'undefined' return 'null' for consistency + return license.startTime || getStorage().getItem('vault:ui-inputted-start-date') || null; + } catch (e) { + // return null so user can input date manually + // if already inputted manually, will be in localStorage + return getStorage().getItem('vault:ui-inputted-start-date') || null; + } + } + + async model() { + const { config, versionHistory } = this.modelFor('vault.cluster.clients'); + const licenseStart = await this.getLicenseStartTime(); + const activity = await this.getActivity(licenseStart); + return { + config, + versionHistory, + activity, + licenseStartTimestamp: licenseStart, + currentDate: this.currentDate, + }; + } +} diff --git a/ui/app/routes/vault/cluster/clients/edit.js b/ui/app/routes/vault/cluster/clients/edit.js new file mode 100644 index 0000000..d296d9a --- /dev/null +++ b/ui/app/routes/vault/cluster/clients/edit.js @@ -0,0 +1,15 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import { inject as service } from '@ember/service'; + +export default Route.extend({ + store: service(), + + model() { + return this.store.queryRecord('clients/config', {}); + }, +}); diff --git a/ui/app/routes/vault/cluster/clients/index.js b/ui/app/routes/vault/cluster/clients/index.js new file mode 100644 index 0000000..940f634 --- /dev/null +++ b/ui/app/routes/vault/cluster/clients/index.js @@ -0,0 +1,10 @@ +import Route from '@ember/routing/route'; +import { inject as service } from '@ember/service'; + +export default class ClientsIndexRoute extends Route { + @service router; + + redirect() { + this.router.transitionTo('vault.cluster.clients.dashboard'); + } +} diff --git a/ui/app/routes/vault/cluster/cluster-route-base.js b/ui/app/routes/vault/cluster/cluster-route-base.js new file mode 100644 index 0000000..3097225 --- /dev/null +++ b/ui/app/routes/vault/cluster/cluster-route-base.js @@ -0,0 +1,20 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +// this is the base route for +// all of the CLUSTER_ROUTES that are states before you can use vault +// +import Route from '@ember/routing/route'; +import ClusterRoute from 'vault/mixins/cluster-route'; + +export default Route.extend(ClusterRoute, { + model() { + return this.modelFor('vault.cluster'); + }, + + resetController(controller) { + controller.reset && controller.reset(); + }, +}); diff --git a/ui/app/routes/vault/cluster/index.js b/ui/app/routes/vault/cluster/index.js new file mode 100644 index 0000000..2397a7e --- /dev/null +++ b/ui/app/routes/vault/cluster/index.js @@ -0,0 +1,12 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; + +export default Route.extend({ + beforeModel() { + return this.transitionTo('vault.cluster.secrets'); + }, +}); diff --git a/ui/app/routes/vault/cluster/init.js b/ui/app/routes/vault/cluster/init.js new file mode 100644 index 0000000..cca8691 --- /dev/null +++ b/ui/app/routes/vault/cluster/init.js @@ -0,0 +1,8 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import ClusterRoute from './cluster-route-base'; + +export default ClusterRoute.extend({}); diff --git a/ui/app/routes/vault/cluster/license.js b/ui/app/routes/vault/cluster/license.js new file mode 100644 index 0000000..a295df6 --- /dev/null +++ b/ui/app/routes/vault/cluster/license.js @@ -0,0 +1,23 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import ClusterRoute from 'vault/mixins/cluster-route'; +import { inject as service } from '@ember/service'; + +export default Route.extend(ClusterRoute, { + store: service(), + version: service(), + + beforeModel() { + if (this.version.isOSS) { + this.transitionTo('vault.cluster'); + } + }, + + model() { + return this.store.queryRecord('license', {}); + }, +}); diff --git a/ui/app/routes/vault/cluster/logout.js b/ui/app/routes/vault/cluster/logout.js new file mode 100644 index 0000000..18ff383 --- /dev/null +++ b/ui/app/routes/vault/cluster/logout.js @@ -0,0 +1,48 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Ember from 'ember'; +import { computed } from '@ember/object'; +import { inject as service } from '@ember/service'; +import Route from '@ember/routing/route'; +import ModelBoundaryRoute from 'vault/mixins/model-boundary-route'; + +export default Route.extend(ModelBoundaryRoute, { + auth: service(), + controlGroup: service(), + flashMessages: service(), + console: service(), + permissions: service(), + namespaceService: service('namespace'), + router: service(), + + modelTypes: computed(function () { + return ['secret', 'secret-engine']; + }), + + beforeModel({ to: { queryParams } }) { + const authType = this.auth.getAuthType(); + const ns = this.namespaceService.path; + this.auth.deleteCurrentToken(); + this.controlGroup.deleteTokens(); + this.namespaceService.reset(); + this.console.set('isOpen', false); + this.console.clearLog(true); + this.flashMessages.clearMessages(); + this.permissions.reset(); + + queryParams.with = authType; + if (ns) { + queryParams.namespace = ns; + } + if (Ember.testing) { + // Don't redirect on the test + this.replaceWith('vault.cluster.auth', { queryParams }); + } else { + const { cluster_name } = this.paramsFor('vault.cluster'); + location.assign(this.router.urlFor('vault.cluster.auth', cluster_name, { queryParams })); + } + }, +}); diff --git a/ui/app/routes/vault/cluster/mfa-setup.js b/ui/app/routes/vault/cluster/mfa-setup.js new file mode 100644 index 0000000..b8aa955 --- /dev/null +++ b/ui/app/routes/vault/cluster/mfa-setup.js @@ -0,0 +1,8 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; + +export default class MfaSetupRoute extends Route {} diff --git a/ui/app/routes/vault/cluster/oidc-callback.js b/ui/app/routes/vault/cluster/oidc-callback.js new file mode 100644 index 0000000..39c4f3d --- /dev/null +++ b/ui/app/routes/vault/cluster/oidc-callback.js @@ -0,0 +1,46 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; + +export function getParamsForCallback(qp, searchString) { + const queryString = decodeURIComponent(searchString); + let { path, code, state, namespace } = qp; + // namespace from state takes precedence over the cluster's ns + if (state?.includes(',ns=')) { + [state, namespace] = state.split(',ns='); + } + // some SSO providers do not return a url-encoded state param + // check for namespace using URLSearchParams instead of paramsFor + const urlParams = new URLSearchParams(queryString); + const checkState = urlParams.get('state'); + if (checkState?.includes(',ns=')) { + [state, namespace] = checkState.split(',ns='); + } + path = window.decodeURIComponent(path); + const payload = { source: 'oidc-callback', path: path || '', code: code || '', state: state || '' }; + if (namespace) { + payload.namespace = namespace; + } + return payload; +} + +export default Route.extend({ + templateName: 'vault/cluster/oidc-callback', + model() { + // left blank so we render the template immediately + }, + afterModel() { + const { auth_path: path, code, state } = this.paramsFor(this.routeName); + const { namespaceQueryParam: namespace } = this.paramsFor('vault.cluster'); + const queryString = window.location.search; + const payload = getParamsForCallback({ path, code, state, namespace }, queryString); + window.opener.postMessage(payload, window.origin); + }, + setupController(controller) { + this._super(...arguments); + controller.set('pageContainer', document.querySelector('.page-container')); + }, +}); diff --git a/ui/app/routes/vault/cluster/oidc-provider-ns.js b/ui/app/routes/vault/cluster/oidc-provider-ns.js new file mode 100644 index 0000000..d056e4e --- /dev/null +++ b/ui/app/routes/vault/cluster/oidc-provider-ns.js @@ -0,0 +1,8 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import VaultClusterOidcProviderRoute from './oidc-provider'; + +export default class VaultClusterOidcProviderNsRoute extends VaultClusterOidcProviderRoute {} diff --git a/ui/app/routes/vault/cluster/oidc-provider.js b/ui/app/routes/vault/cluster/oidc-provider.js new file mode 100644 index 0000000..849d07e --- /dev/null +++ b/ui/app/routes/vault/cluster/oidc-provider.js @@ -0,0 +1,173 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Ember from 'ember'; +import Route from '@ember/routing/route'; +import { inject as service } from '@ember/service'; + +const AUTH = 'vault.cluster.auth'; +const PROVIDER = 'vault.cluster.oidc-provider'; +const NS_PROVIDER = 'vault.cluster.oidc-provider-ns'; + +export default class VaultClusterOidcProviderRoute extends Route { + @service auth; + @service router; + + get win() { + return this.window || window; + } + + _redirect(url, params) { + if (!url) return; + const redir = this._buildUrl(url, params); + if (Ember.testing) { + return redir; + } + this.win.location.replace(redir); + } + + beforeModel(transition) { + const currentToken = this.auth.get('currentTokenName'); + const qp = transition.to.queryParams; + // remove redirect_to if carried over from auth + qp.redirect_to = null; + if (!currentToken && 'none' === qp.prompt?.toLowerCase()) { + this._redirect(qp.redirect_uri, { + state: qp.state, + error: 'login_required', + }); + } else if (!currentToken || 'login' === qp.prompt?.toLowerCase()) { + const logout = !!currentToken; + if ('login' === qp.prompt?.toLowerCase()) { + // need to remove before redirect to avoid infinite loop + qp.prompt = null; + } + return this._redirectToAuth({ + ...transition.to.params, + qp, + logout, + }); + } + } + + _redirectToAuth({ provider_name, namespace = null, qp, logout = false }) { + const { cluster_name } = this.paramsFor('vault.cluster'); + let url = namespace + ? this.router.urlFor(NS_PROVIDER, cluster_name, namespace, provider_name, { queryParams: qp }) + : this.router.urlFor(PROVIDER, cluster_name, provider_name, { queryParams: qp }); + // This is terrible, I'm sorry + // Need to do this because transitionTo (as used in auth-form) expects url without + // rootURL /ui/ at the beginning, but urlFor builds it in. We can't use currentRoute + // because it hasn't transitioned yet + url = url.replace(/^(\/?ui)/, ''); + if (logout) { + this.auth.deleteCurrentToken(); + } + // o param can be anything, as long as it's present the auth page will change + const queryParams = { + redirect_to: url, + o: provider_name, + }; + if (namespace) { + queryParams.namespace = namespace; + } + return this.transitionTo(AUTH, cluster_name, { queryParams }); + } + + _buildUrl(urlString, params) { + try { + const url = new URL(urlString); + Object.keys(params).forEach((key) => { + if (params[key]) { + url.searchParams.append(key, params[key]); + } + }); + return url; + } catch (e) { + console.debug('DEBUG: parsing url failed for', urlString); // eslint-disable-line + throw new Error('Invalid URL'); + } + } + + _handleSuccess(response, baseUrl, state) { + const { code } = response; + const redirectUrl = this._buildUrl(baseUrl, { code, state }); + if (Ember.testing) { + return { redirectUrl }; + } + this.win.location.replace(redirectUrl); + } + _handleError(errorResp, baseUrl) { + const redirectUrl = this._buildUrl(baseUrl, { ...errorResp }); + if (Ember.testing) { + return { redirectUrl }; + } + this.win.location.replace(redirectUrl); + } + + /** + * Method for getting the parameters from the route. Allows for namespace to be defined on extended route oidc-provider-ns + * @param {object} params object passed into the model method + * @returns object with provider_name (string), qp (object of query params), decodedRedirect (string, FQDN) + */ + _getInfoFromParams(params) { + const { provider_name, namespace, ...qp } = params; + const decodedRedirect = decodeURI(qp.redirect_uri); + return { + provider_name, + qp, + decodedRedirect, + namespace, + }; + } + + async model(params) { + const modelInfo = this._getInfoFromParams(params); + const { qp, decodedRedirect, ...routeParams } = modelInfo; + const endpoint = this._buildUrl( + `${this.win.origin}/v1/identity/oidc/provider/${routeParams.provider_name}/authorize`, + qp + ); + if (!qp.redirect_uri) { + throw new Error('Missing required query params'); + } + try { + const response = await this.auth.ajax(endpoint, 'GET', { namespace: routeParams.namespace }); + if ('consent' === qp.prompt?.toLowerCase()) { + return { + consent: { + code: response.code, + redirect: decodedRedirect, + state: qp.state, + }, + }; + } + return this._handleSuccess(response, decodedRedirect, qp.state); + } catch (errorRes) { + const resp = await errorRes.json(); + const code = resp.error; + if (code === 'max_age_violation' || resp?.errors?.includes('permission denied')) { + this._redirectToAuth({ ...routeParams, qp, logout: true }); + } else if (code === 'invalid_redirect_uri') { + return { + error: { + title: 'Redirect URI mismatch', + message: + 'The provided redirect_uri is not in the list of allowed redirect URIs. Please make sure you are sending a valid redirect URI from your application.', + }, + }; + } else if (code === 'invalid_client_id') { + return { + error: { + title: 'Invalid client ID', + message: 'Your client ID is invalid. Please update your configuration and try again.', + }, + }; + } else { + return this._handleError(resp, decodedRedirect); + } + } + } +} diff --git a/ui/app/routes/vault/cluster/policies.js b/ui/app/routes/vault/cluster/policies.js new file mode 100644 index 0000000..aead6a4 --- /dev/null +++ b/ui/app/routes/vault/cluster/policies.js @@ -0,0 +1,28 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { inject as service } from '@ember/service'; +import Route from '@ember/routing/route'; +import ClusterRoute from 'vault/mixins/cluster-route'; + +const ALLOWED_TYPES = ['acl', 'egp', 'rgp']; + +export default Route.extend(ClusterRoute, { + version: service(), + + beforeModel() { + return this.version.fetchFeatures().then(() => { + return this._super(...arguments); + }); + }, + + model(params) { + const policyType = params.type; + if (!ALLOWED_TYPES.includes(policyType)) { + return this.transitionTo(this.routeName, ALLOWED_TYPES[0]); + } + return {}; + }, +}); diff --git a/ui/app/routes/vault/cluster/policies/create.js b/ui/app/routes/vault/cluster/policies/create.js new file mode 100644 index 0000000..a16840d --- /dev/null +++ b/ui/app/routes/vault/cluster/policies/create.js @@ -0,0 +1,31 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { inject as service } from '@ember/service'; +import Route from '@ember/routing/route'; +import UnloadModelRoute from 'vault/mixins/unload-model-route'; +import UnsavedModelRoute from 'vault/mixins/unsaved-model-route'; + +export default Route.extend(UnloadModelRoute, UnsavedModelRoute, { + store: service(), + version: service(), + + model() { + const policyType = this.policyType(); + if (!this.version.hasSentinel && policyType !== 'acl') { + return this.transitionTo('vault.cluster.policies', policyType); + } + return this.store.createRecord(`policy/${policyType}`, {}); + }, + + setupController(controller) { + this._super(...arguments); + controller.set('policyType', this.policyType()); + }, + + policyType() { + return this.paramsFor('vault.cluster.policies').type; + }, +}); diff --git a/ui/app/routes/vault/cluster/policies/index.js b/ui/app/routes/vault/cluster/policies/index.js new file mode 100644 index 0000000..f088a4d --- /dev/null +++ b/ui/app/routes/vault/cluster/policies/index.js @@ -0,0 +1,81 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { inject as service } from '@ember/service'; +import Route from '@ember/routing/route'; +import ClusterRoute from 'vault/mixins/cluster-route'; +import ListRoute from 'core/mixins/list-route'; + +export default Route.extend(ClusterRoute, ListRoute, { + store: service(), + version: service(), + + shouldReturnEmptyModel(policyType, version) { + return policyType !== 'acl' && (version.get('isOSS') || !version.get('hasSentinel')); + }, + + model(params) { + const policyType = this.policyType(); + if (this.shouldReturnEmptyModel(policyType, this.version)) { + return; + } + return this.store + .lazyPaginatedQuery(`policy/${policyType}`, { + page: params.page, + pageFilter: params.pageFilter, + responsePath: 'data.keys', + }) + .catch((err) => { + // acls will never be empty, but sentinel policies can be + if (err.httpStatus === 404 && this.policyType() !== 'acl') { + return []; + } else { + throw err; + } + }); + }, + + setupController(controller, model) { + const params = this.paramsFor(this.routeName); + if (!model) { + controller.setProperties({ + model: null, + policyType: this.policyType(), + }); + return; + } + controller.setProperties({ + model, + filter: params.pageFilter || '', + page: model.get('meta.currentPage') || 1, + policyType: this.policyType(), + }); + }, + + resetController(controller, isExiting) { + this._super(...arguments); + if (isExiting) { + controller.set('filter', ''); + } + }, + + actions: { + willTransition(transition) { + window.scrollTo(0, 0); + if (!transition || transition.targetName !== this.routeName) { + this.store.clearAllDatasets(); + } + return true; + }, + reload() { + this.store.clearAllDatasets(); + this.refresh(); + }, + }, + + policyType() { + return this.paramsFor('vault.cluster.policies').type; + }, +}); diff --git a/ui/app/routes/vault/cluster/policy.js b/ui/app/routes/vault/cluster/policy.js new file mode 100644 index 0000000..1241f78 --- /dev/null +++ b/ui/app/routes/vault/cluster/policy.js @@ -0,0 +1,29 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { inject as service } from '@ember/service'; +import Route from '@ember/routing/route'; +import ClusterRoute from 'vault/mixins/cluster-route'; + +const ALLOWED_TYPES = ['acl', 'egp', 'rgp']; + +export default Route.extend(ClusterRoute, { + version: service(), + beforeModel() { + return this.version.fetchFeatures().then(() => { + return this._super(...arguments); + }); + }, + model(params) { + const policyType = params.type; + if (!ALLOWED_TYPES.includes(policyType)) { + return this.transitionTo('vault.cluster.policies', ALLOWED_TYPES[0]); + } + if (!this.version.hasSentinel && policyType !== 'acl') { + return this.transitionTo('vault.cluster.policies', policyType); + } + return {}; + }, +}); diff --git a/ui/app/routes/vault/cluster/policy/edit.js b/ui/app/routes/vault/cluster/policy/edit.js new file mode 100644 index 0000000..ddc0f5a --- /dev/null +++ b/ui/app/routes/vault/cluster/policy/edit.js @@ -0,0 +1,9 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import UnsavedModelRoute from 'vault/mixins/unsaved-model-route'; +import ShowRoute from './show'; + +export default ShowRoute.extend(UnsavedModelRoute, {}); diff --git a/ui/app/routes/vault/cluster/policy/index.js b/ui/app/routes/vault/cluster/policy/index.js new file mode 100644 index 0000000..76c71e7 --- /dev/null +++ b/ui/app/routes/vault/cluster/policy/index.js @@ -0,0 +1,12 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; + +export default Route.extend({ + beforeModel() { + return this.transitionTo('vault.cluster.policies', 'acl'); + }, +}); diff --git a/ui/app/routes/vault/cluster/policy/show.js b/ui/app/routes/vault/cluster/policy/show.js new file mode 100644 index 0000000..8194b1a --- /dev/null +++ b/ui/app/routes/vault/cluster/policy/show.js @@ -0,0 +1,41 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { hash } from 'rsvp'; +import Route from '@ember/routing/route'; +import UnloadModelRoute from 'vault/mixins/unload-model-route'; +import { inject as service } from '@ember/service'; + +export default Route.extend(UnloadModelRoute, { + store: service(), + + beforeModel() { + const params = this.paramsFor(this.routeName); + const policyType = this.policyType(); + if (policyType === 'acl' && params.policy_name === 'root') { + return this.transitionTo('vault.cluster.policies', 'acl'); + } + }, + + model(params) { + const type = this.policyType(); + return hash({ + policy: this.store.findRecord(`policy/${type}`, params.policy_name), + capabilities: this.store.findRecord('capabilities', `sys/policies/${type}/${params.policy_name}`), + }); + }, + + setupController(controller, model) { + controller.setProperties({ + model: model.policy, + capabilities: model.capabilities, + policyType: this.policyType(), + }); + }, + + policyType() { + return this.paramsFor('vault.cluster.policy').type; + }, +}); diff --git a/ui/app/routes/vault/cluster/redirect.js b/ui/app/routes/vault/cluster/redirect.js new file mode 100644 index 0000000..f80e638 --- /dev/null +++ b/ui/app/routes/vault/cluster/redirect.js @@ -0,0 +1,35 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import { inject as service } from '@ember/service'; +import { AUTH, CLUSTER } from 'vault/lib/route-paths'; + +export default class VaultClusterRedirectRoute extends Route { + @service auth; + @service router; + + beforeModel({ to: { queryParams } }) { + let transition; + const isAuthed = this.auth.currentToken; + // eslint-disable-next-line ember/no-controller-access-in-routes + const controller = this.controllerFor('vault'); + const { redirect_to, ...otherParams } = queryParams; + + if (isAuthed && redirect_to) { + // if authenticated and redirect exists, redirect to that place and strip other params + transition = this.router.replaceWith(redirect_to); + } else if (isAuthed) { + // if authed no redirect, go to cluster + transition = this.router.replaceWith(CLUSTER, { queryParams: otherParams }); + } else { + // default go to Auth + transition = this.router.replaceWith(AUTH, { queryParams: otherParams }); + } + transition.followRedirects().then(() => { + controller.set('redirectTo', ''); + }); + } +} diff --git a/ui/app/routes/vault/cluster/replication-dr-promote/details.js b/ui/app/routes/vault/cluster/replication-dr-promote/details.js new file mode 100644 index 0000000..4f504a5 --- /dev/null +++ b/ui/app/routes/vault/cluster/replication-dr-promote/details.js @@ -0,0 +1,15 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { inject as service } from '@ember/service'; +import Base from '../cluster-route-base'; + +export default Base.extend({ + replicationMode: service(), + beforeModel() { + this._super(...arguments); + this.replicationMode.setMode('dr'); + }, +}); diff --git a/ui/app/routes/vault/cluster/replication-dr-promote/index.js b/ui/app/routes/vault/cluster/replication-dr-promote/index.js new file mode 100644 index 0000000..4f504a5 --- /dev/null +++ b/ui/app/routes/vault/cluster/replication-dr-promote/index.js @@ -0,0 +1,15 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { inject as service } from '@ember/service'; +import Base from '../cluster-route-base'; + +export default Base.extend({ + replicationMode: service(), + beforeModel() { + this._super(...arguments); + this.replicationMode.setMode('dr'); + }, +}); diff --git a/ui/app/routes/vault/cluster/secrets.js b/ui/app/routes/vault/cluster/secrets.js new file mode 100644 index 0000000..d2ddd1e --- /dev/null +++ b/ui/app/routes/vault/cluster/secrets.js @@ -0,0 +1,9 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import ClusterRoute from 'vault/mixins/cluster-route'; + +export default Route.extend(ClusterRoute); diff --git a/ui/app/routes/vault/cluster/secrets/backend.js b/ui/app/routes/vault/cluster/secrets/backend.js new file mode 100644 index 0000000..99a97b4 --- /dev/null +++ b/ui/app/routes/vault/cluster/secrets/backend.js @@ -0,0 +1,34 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { inject as service } from '@ember/service'; +import Route from '@ember/routing/route'; +export default Route.extend({ + store: service(), + flashMessages: service(), + secretMountPath: service(), + oldModel: null, + + model(params) { + const { backend } = params; + this.secretMountPath.update(backend); + return this.store + .query('secret-engine', { + path: backend, + }) + .then((model) => { + if (model) { + return model.get('firstObject'); + } + }); + }, + + afterModel(model, transition) { + const path = model && model.get('path'); + if (transition.targetName === this.routeName) { + return this.replaceWith('vault.cluster.secrets.backend.list-root', path); + } + }, +}); diff --git a/ui/app/routes/vault/cluster/secrets/backend/actions.js b/ui/app/routes/vault/cluster/secrets/backend/actions.js new file mode 100644 index 0000000..cc9e12f --- /dev/null +++ b/ui/app/routes/vault/cluster/secrets/backend/actions.js @@ -0,0 +1,35 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import EditBase from './secret-edit'; +import utils from 'vault/lib/key-utils'; + +export default EditBase.extend({ + queryParams: { + selectedAction: { + replace: true, + }, + }, + + templateName: 'vault/cluster/secrets/backend/transitActionsLayout', + + beforeModel() { + const { secret } = this.paramsFor(this.routeName); + const parentKey = utils.parentKeyForKey(secret); + const { backend } = this.paramsFor('vault.cluster.secrets.backend'); + if (this.backendType(backend) !== 'transit') { + if (parentKey) { + return this.transitionTo('vault.cluster.secrets.backend.show', parentKey); + } else { + return this.transitionTo('vault.cluster.secrets.backend.show-root'); + } + } + }, + setupController(controller, model) { + this._super(...arguments); + const { selectedAction } = this.paramsFor(this.routeName); + controller.set('selectedAction', selectedAction || model.secret.get('supportedActions.firstObject')); + }, +}); diff --git a/ui/app/routes/vault/cluster/secrets/backend/configuration.js b/ui/app/routes/vault/cluster/secrets/backend/configuration.js new file mode 100644 index 0000000..0e3f591 --- /dev/null +++ b/ui/app/routes/vault/cluster/secrets/backend/configuration.js @@ -0,0 +1,36 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { inject as service } from '@ember/service'; +import Route from '@ember/routing/route'; + +export default Route.extend({ + store: service(), + async model() { + const backend = this.modelFor('vault.cluster.secrets.backend'); + if (backend.isV2KV) { + const canRead = await this.store + .findRecord('capabilities', `${backend.id}/config`) + .then((response) => response.canRead); + // only set these config params if they can read the config endpoint. + if (canRead) { + // design wants specific default to show that can't be set in the model + backend.set('casRequired', backend.casRequired ? backend.casRequired : 'False'); + backend.set( + 'deleteVersionAfter', + backend.deleteVersionAfter !== '0s' ? backend.deleteVersionAfter : 'Never delete' + ); + backend.set('maxVersions', backend.maxVersions ? backend.maxVersions : 'Not set'); + } else { + // remove the default values from the model if they don't have read access otherwise it will display the defaults even if they've been set (because they error on returning config data) + // normally would catch the config error in the secret-v2 adapter, but I need the functions to proceed, not stop. So we remove the values here. + backend.set('casRequired', null); + backend.set('deleteVersionAfter', null); + backend.set('maxVersions', null); + } + } + return backend; + }, +}); diff --git a/ui/app/routes/vault/cluster/secrets/backend/create-root.js b/ui/app/routes/vault/cluster/secrets/backend/create-root.js new file mode 100644 index 0000000..d8e8c03 --- /dev/null +++ b/ui/app/routes/vault/cluster/secrets/backend/create-root.js @@ -0,0 +1,68 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { hash } from 'rsvp'; +import { inject as service } from '@ember/service'; +import EditBase from './secret-edit'; + +const secretModel = (store, backend, key) => { + const backendModel = store.peekRecord('secret-engine', backend); + const modelType = backendModel.modelTypeForKV; + if (modelType !== 'secret-v2') { + const model = store.createRecord(modelType, { + path: key, + }); + return model; + } + const secret = store.createRecord(modelType); + secret.set('engine', backendModel); + const version = store.createRecord('secret-v2-version', { + path: key, + }); + secret.set('selectedVersion', version); + return secret; +}; + +const transformModel = (queryParams) => { + const modelType = 'transform'; + if (!queryParams || !queryParams.itemType) return modelType; + + return `${modelType}/${queryParams.itemType}`; +}; + +export default EditBase.extend({ + store: service(), + + createModel(transition) { + const { backend } = this.paramsFor('vault.cluster.secrets.backend'); + let modelType = this.modelType(backend, null, { queryParams: transition.to.queryParams }); + if (modelType === 'role-ssh') { + return this.store.createRecord(modelType, { keyType: 'ca' }); + } + if (modelType === 'transform') { + modelType = transformModel(transition.to.queryParams); + } + if (modelType === 'database/connection' && transition.to?.queryParams?.itemType === 'role') { + modelType = 'database/role'; + } + if (modelType !== 'secret' && modelType !== 'secret-v2') { + return this.store.createRecord(modelType); + } + // create record in capabilities that checks for access to create metadata + // this record is then maybeQueryRecord in the component secret-create-or-update + if (modelType === 'secret-v2') { + // only check for kv2 secrets + this.store.findRecord('capabilities', `${backend}/metadata/`); + } + return secretModel(this.store, backend, transition.to.queryParams.initialKey); + }, + + model(params, transition) { + return hash({ + secret: this.createModel(transition), + capabilities: {}, + }); + }, +}); diff --git a/ui/app/routes/vault/cluster/secrets/backend/create.js b/ui/app/routes/vault/cluster/secrets/backend/create.js new file mode 100644 index 0000000..5a4e62a --- /dev/null +++ b/ui/app/routes/vault/cluster/secrets/backend/create.js @@ -0,0 +1,16 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; + +export default Route.extend({ + beforeModel() { + const { secret, initialKey } = this.paramsFor(this.routeName); + const qp = initialKey || secret; + return this.transitionTo('vault.cluster.secrets.backend.create-root', { + queryParams: { initialKey: qp }, + }); + }, +}); diff --git a/ui/app/routes/vault/cluster/secrets/backend/credentials-root.js b/ui/app/routes/vault/cluster/secrets/backend/credentials-root.js new file mode 100644 index 0000000..2f652b3 --- /dev/null +++ b/ui/app/routes/vault/cluster/secrets/backend/credentials-root.js @@ -0,0 +1,6 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +export { default } from './credentials'; diff --git a/ui/app/routes/vault/cluster/secrets/backend/credentials.js b/ui/app/routes/vault/cluster/secrets/backend/credentials.js new file mode 100644 index 0000000..87b825a --- /dev/null +++ b/ui/app/routes/vault/cluster/secrets/backend/credentials.js @@ -0,0 +1,90 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { resolve } from 'rsvp'; +import Route from '@ember/routing/route'; +import { inject as service } from '@ember/service'; +import ControlGroupError from 'vault/lib/control-group-error'; + +const SUPPORTED_DYNAMIC_BACKENDS = ['database', 'ssh', 'aws', 'pki']; + +export default Route.extend({ + templateName: 'vault/cluster/secrets/backend/credentials', + pathHelp: service('path-help'), + store: service(), + + backendModel() { + return this.modelFor('vault.cluster.secrets.backend'); + }, + + beforeModel() { + const { backend } = this.paramsFor('vault.cluster.secrets.backend'); + if (backend != 'ssh') { + return; + } + const modelType = 'ssh-otp-credential'; + return this.pathHelp.getNewModel(modelType, backend); + }, + + getDatabaseCredential(backend, secret, roleType = '') { + return this.store.queryRecord('database/credential', { backend, secret, roleType }).catch((error) => { + if (error instanceof ControlGroupError) { + throw error; + } + // Unless it's a control group error, we want to pass back error info + // so we can render it on the GenerateCredentialsDatabase component + const status = error?.httpStatus; + let title; + let message = `We ran into a problem and could not continue: ${ + error?.errors ? error.errors[0] : 'See Vault logs for details.' + }`; + if (status === 403) { + // 403 is forbidden + title = 'You are not authorized'; + message = + "Role wasn't found or you do not have permissions. Ask your administrator if you think you should have access."; + } + return { + errorHttpStatus: status, + errorTitle: title, + errorMessage: message, + }; + }); + }, + + async model(params) { + const role = params.secret; + const backendModel = this.backendModel(); + const backendPath = backendModel.get('id'); + const backendType = backendModel.get('type'); + const roleType = params.roleType; + let dbCred; + if (backendType === 'database') { + dbCred = await this.getDatabaseCredential(backendPath, role, roleType); + } + if (!SUPPORTED_DYNAMIC_BACKENDS.includes(backendModel.get('type'))) { + return this.transitionTo('vault.cluster.secrets.backend.list-root', backendPath); + } + return resolve({ + backendPath, + backendType, + roleName: role, + roleType, + dbCred, + }); + }, + + resetController(controller) { + controller.reset(); + }, + + actions: { + willTransition() { + // we do not want to save any of the credential information in the store. + // once the user navigates away from this page, remove all credential info. + this.store.unloadAll('database/credential'); + }, + }, +}); diff --git a/ui/app/routes/vault/cluster/secrets/backend/diff.js b/ui/app/routes/vault/cluster/secrets/backend/diff.js new file mode 100644 index 0000000..2944801 --- /dev/null +++ b/ui/app/routes/vault/cluster/secrets/backend/diff.js @@ -0,0 +1,30 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import { inject as service } from '@ember/service'; + +export default class diff extends Route { + @service store; + + beforeModel() { + const { backend } = this.paramsFor('vault.cluster.secrets.backend'); + this.backend = backend; + } + + model(params) { + const { id } = params; + return this.store.queryRecord('secret-v2', { + backend: this.backend, + id, + }); + } + + setupController(controller, model) { + controller.set('backend', this.backend); // for backendCrumb + controller.set('id', model.id); // for navigation on tabs + controller.set('model', model); + } +} diff --git a/ui/app/routes/vault/cluster/secrets/backend/edit-metadata.js b/ui/app/routes/vault/cluster/secrets/backend/edit-metadata.js new file mode 100644 index 0000000..9d46a72 --- /dev/null +++ b/ui/app/routes/vault/cluster/secrets/backend/edit-metadata.js @@ -0,0 +1,8 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Metadata from './metadata'; + +export default class EditMetadataRoute extends Metadata {} diff --git a/ui/app/routes/vault/cluster/secrets/backend/edit-root.js b/ui/app/routes/vault/cluster/secrets/backend/edit-root.js new file mode 100644 index 0000000..fa2cc77 --- /dev/null +++ b/ui/app/routes/vault/cluster/secrets/backend/edit-root.js @@ -0,0 +1,6 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +export { default } from './edit'; diff --git a/ui/app/routes/vault/cluster/secrets/backend/edit.js b/ui/app/routes/vault/cluster/secrets/backend/edit.js new file mode 100644 index 0000000..1d353ae --- /dev/null +++ b/ui/app/routes/vault/cluster/secrets/backend/edit.js @@ -0,0 +1,14 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import EditBase from './secret-edit'; + +export default EditBase.extend({ + queryParams: { + version: { + refreshModel: true, + }, + }, +}); diff --git a/ui/app/routes/vault/cluster/secrets/backend/index.js b/ui/app/routes/vault/cluster/secrets/backend/index.js new file mode 100644 index 0000000..56a271b --- /dev/null +++ b/ui/app/routes/vault/cluster/secrets/backend/index.js @@ -0,0 +1,12 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; + +export default Route.extend({ + beforeModel() { + return this.replaceWith('vault.cluster.secrets.backend.list-root'); + }, +}); diff --git a/ui/app/routes/vault/cluster/secrets/backend/list-root.js b/ui/app/routes/vault/cluster/secrets/backend/list-root.js new file mode 100644 index 0000000..eb7ed18 --- /dev/null +++ b/ui/app/routes/vault/cluster/secrets/backend/list-root.js @@ -0,0 +1,6 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +export { default } from './list'; diff --git a/ui/app/routes/vault/cluster/secrets/backend/list.js b/ui/app/routes/vault/cluster/secrets/backend/list.js new file mode 100644 index 0000000..575b5b7 --- /dev/null +++ b/ui/app/routes/vault/cluster/secrets/backend/list.js @@ -0,0 +1,253 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { set } from '@ember/object'; +import { hash, all } from 'rsvp'; +import Route from '@ember/routing/route'; +import { supportedSecretBackends } from 'vault/helpers/supported-secret-backends'; +import { allEngines } from 'vault/helpers/mountable-secret-engines'; +import { inject as service } from '@ember/service'; +import { normalizePath } from 'vault/utils/path-encoding-helpers'; +import { assert } from '@ember/debug'; + +const SUPPORTED_BACKENDS = supportedSecretBackends(); + +export default Route.extend({ + store: service(), + templateName: 'vault/cluster/secrets/backend/list', + pathHelp: service('path-help'), + router: service(), + + // By default assume user doesn't have permissions + noMetadataPermissions: true, + + queryParams: { + page: { + refreshModel: true, + }, + pageFilter: { + refreshModel: true, + }, + tab: { + refreshModel: true, + }, + }, + + modelTypeForTransform(tab) { + let modelType; + switch (tab) { + case 'role': + modelType = 'transform/role'; + break; + case 'template': + modelType = 'transform/template'; + break; + case 'alphabet': + modelType = 'transform/alphabet'; + break; + default: // CBS TODO: transform/transformation + modelType = 'transform'; + break; + } + return modelType; + }, + + secretParam() { + const { secret } = this.paramsFor(this.routeName); + return secret ? normalizePath(secret) : ''; + }, + + enginePathParam() { + const { backend } = this.paramsFor('vault.cluster.secrets.backend'); + return backend; + }, + + beforeModel() { + const secret = this.secretParam(); + const backend = this.enginePathParam(); + const { tab } = this.paramsFor('vault.cluster.secrets.backend.list-root'); + const secretEngine = this.store.peekRecord('secret-engine', backend); + const type = secretEngine?.engineType; + assert('secretEngine.engineType is not defined', !!type); + const engineRoute = allEngines().findBy('type', type)?.engineRoute; + + if (!type || !SUPPORTED_BACKENDS.includes(type)) { + return this.router.transitionTo('vault.cluster.secrets'); + } + if (this.routeName === 'vault.cluster.secrets.backend.list' && !secret.endsWith('/')) { + return this.router.replaceWith('vault.cluster.secrets.backend.list', secret + '/'); + } + if (engineRoute) { + return this.router.transitionTo(`vault.cluster.secrets.backend.${engineRoute}`, backend); + } + const modelType = this.getModelType(backend, tab); + return this.pathHelp.getNewModel(modelType, backend).then(() => { + this.store.unloadAll('capabilities'); + }); + }, + + getModelType(backend, tab) { + const secretEngine = this.store.peekRecord('secret-engine', backend); + const type = secretEngine.engineType; + const types = { + database: tab === 'role' ? 'database/role' : 'database/connection', + transit: 'transit-key', + ssh: 'role-ssh', + transform: this.modelTypeForTransform(tab), + aws: 'role-aws', + pki: `pki/${tab || 'pki-role'}`, + // secret or secret-v2 + cubbyhole: 'secret', + kv: secretEngine.modelTypeForKV, + keymgmt: `keymgmt/${tab || 'key'}`, + generic: secretEngine.modelTypeForKV, + }; + return types[type]; + }, + + async model(params) { + const secret = this.secretParam() || ''; + const backend = this.enginePathParam(); + const backendModel = this.modelFor('vault.cluster.secrets.backend'); + const modelType = this.getModelType(backend, params.tab); + + return hash({ + secret, + secrets: this.store + .lazyPaginatedQuery(modelType, { + id: secret, + backend, + responsePath: 'data.keys', + page: params.page || 1, + pageFilter: params.pageFilter, + }) + .then((model) => { + this.set('noMetadataPermissions', false); + this.set('has404', false); + return model; + }) + .catch((err) => { + // if we're at the root we don't want to throw + if (backendModel && err.httpStatus === 404 && secret === '') { + this.set('noMetadataPermissions', false); + return []; + } else if (err.httpStatus === 403 && backendModel.isV2KV) { + this.set('noMetadataPermissions', true); + return []; + } else { + // else we're throwing and dealing with this in the error action + throw err; + } + }), + }); + }, + + afterModel(model) { + const { tab } = this.paramsFor(this.routeName); + const backend = this.enginePathParam(); + if (!tab || tab !== 'cert') { + return; + } + return all( + // these ids are treated specially by vault's api, but it's also + // possible that there is no certificate for them in order to know, + // we fetch them specifically on the list page, and then unload the + // records if there is no `certificate` attribute on the resultant model + ['ca', 'crl', 'ca_chain'].map((id) => this.store.queryRecord('pki/cert', { id, backend })) + ).then( + (results) => { + results.rejectBy('certificate').forEach((record) => record.unloadRecord()); + return model; + }, + () => { + return model; + } + ); + }, + + setupController(controller, resolvedModel) { + const secretParams = this.paramsFor(this.routeName); + const secret = resolvedModel.secret; + const model = resolvedModel.secrets; + const backend = this.enginePathParam(); + const backendModel = this.store.peekRecord('secret-engine', backend); + const has404 = this.has404; + const noMetadataPermissions = this.noMetadataPermissions; + // only clear store cache if this is a new model + if (secret !== controller.get('baseKey.id')) { + this.store.clearAllDatasets(); + } + controller.set('hasModel', true); + controller.setProperties({ + model, + has404, + noMetadataPermissions, + backend, + backendModel, + baseKey: { id: secret }, + backendType: backendModel.get('engineType'), + }); + if (!has404) { + const pageFilter = secretParams.pageFilter; + let filter; + if (secret) { + filter = secret + (pageFilter || ''); + } else if (pageFilter) { + filter = pageFilter; + } + controller.setProperties({ + filter: filter || '', + page: model.meta?.currentPage || 1, + }); + } + }, + + resetController(controller, isExiting) { + this._super(...arguments); + if (isExiting) { + controller.set('pageFilter', null); + controller.set('filter', null); + } + }, + + actions: { + error(error, transition) { + const secret = this.secretParam(); + const backend = this.enginePathParam(); + const is404 = error.httpStatus === 404; + /* eslint-disable-next-line ember/no-controller-access-in-routes */ + const hasModel = this.controllerFor(this.routeName).get('hasModel'); + + // this will occur if we've deleted something, + // and navigate to its parent and the parent doesn't exist - + // this if often the case with nested keys in kv-like engines + if (transition.data.isDeletion && is404) { + throw error; + } + set(error, 'secret', secret); + set(error, 'isRoot', true); + set(error, 'backend', backend); + // only swallow the error if we have a previous model + if (hasModel && is404) { + this.set('has404', true); + transition.abort(); + return false; + } + return true; + }, + + willTransition(transition) { + window.scrollTo(0, 0); + if (transition.targetName !== this.routeName) { + this.store.clearAllDatasets(); + } + return true; + }, + reload() { + this.store.clearAllDatasets(); + this.refresh(); + }, + }, +}); diff --git a/ui/app/routes/vault/cluster/secrets/backend/metadata.js b/ui/app/routes/vault/cluster/secrets/backend/metadata.js new file mode 100644 index 0000000..935ad97 --- /dev/null +++ b/ui/app/routes/vault/cluster/secrets/backend/metadata.js @@ -0,0 +1,41 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import { inject as service } from '@ember/service'; + +export default class MetadataShow extends Route { + @service store; + noReadAccess = false; + + beforeModel() { + const { backend } = this.paramsFor('vault.cluster.secrets.backend'); + this.backend = backend; + } + + model(params) { + const { secret } = params; + this.id = secret; + return this.store + .queryRecord('secret-v2', { + backend: this.backend, + id: secret, + }) + .catch((error) => { + // there was an error likely in read metadata. + // still load the page and handle what you show by filtering for this property + if (error.httpStatus === 403) { + this.noReadAccess = true; + } + }); + } + + setupController(controller, model) { + controller.set('backend', this.backend); // for backendCrumb + controller.set('id', this.id); // for navigation on tabs + controller.set('model', model); + controller.set('noReadAccess', this.noReadAccess); + } +} diff --git a/ui/app/routes/vault/cluster/secrets/backend/overview.js b/ui/app/routes/vault/cluster/secrets/backend/overview.js new file mode 100644 index 0000000..a4bfb99 --- /dev/null +++ b/ui/app/routes/vault/cluster/secrets/backend/overview.js @@ -0,0 +1,94 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import { hash } from 'rsvp'; +import { inject as service } from '@ember/service'; + +export default Route.extend({ + store: service(), + type: '', + + enginePathParam() { + const { backend } = this.paramsFor('vault.cluster.secrets.backend'); + return backend; + }, + + async fetchConnection(queryOptions) { + try { + return await this.store.query('database/connection', queryOptions); + } catch (e) { + return e.httpStatus; + } + }, + + async fetchAllRoles(queryOptions) { + try { + return await this.store.query('database/role', queryOptions); + } catch (e) { + return e.httpStatus; + } + }, + + pathQuery(backend, endpoint) { + return { + id: `${backend}/${endpoint}/`, + }; + }, + + async fetchCapabilitiesRole(queryOptions) { + return this.store.queryRecord('capabilities', this.pathQuery(queryOptions.backend, 'roles')); + }, + + async fetchCapabilitiesStaticRole(queryOptions) { + return this.store.queryRecord('capabilities', this.pathQuery(queryOptions.backend, 'static-roles')); + }, + + async fetchCapabilitiesConnection(queryOptions) { + return this.store.queryRecord('capabilities', this.pathQuery(queryOptions.backend, 'config')); + }, + + model() { + const backend = this.enginePathParam(); + const queryOptions = { backend, id: '' }; + + const connection = this.fetchConnection(queryOptions); + const role = this.fetchAllRoles(queryOptions); + const roleCapabilities = this.fetchCapabilitiesRole(queryOptions); + const staticRoleCapabilities = this.fetchCapabilitiesStaticRole(queryOptions); + const connectionCapabilities = this.fetchCapabilitiesConnection(queryOptions); + + return hash({ + backend, + connections: connection, + roles: role, + engineType: 'database', + id: backend, + roleCapabilities, + staticRoleCapabilities, + connectionCapabilities, + icon: 'database', + }); + }, + + setupController(controller, model) { + this._super(...arguments); + const showEmptyState = model.connections === 404 && model.roles === 404; + const noConnectionCapabilities = + !model.connectionCapabilities.canList && + !model.connectionCapabilities.canCreate && + !model.connectionCapabilities.canUpdate; + + const emptyStateMessage = function () { + if (noConnectionCapabilities) { + return 'You cannot yet generate credentials. Ask your administrator if you think you should have access.'; + } else { + return 'You can connect an external database to Vault. We recommend that you create a user for Vault rather than using the database root user.'; + } + }; + controller.set('showEmptyState', showEmptyState); + controller.set('emptyStateMessage', emptyStateMessage()); + }, +}); diff --git a/ui/app/routes/vault/cluster/secrets/backend/secret-edit.js b/ui/app/routes/vault/cluster/secrets/backend/secret-edit.js new file mode 100644 index 0000000..1e24320 --- /dev/null +++ b/ui/app/routes/vault/cluster/secrets/backend/secret-edit.js @@ -0,0 +1,356 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import AdapterError from '@ember-data/adapter/error'; +import { set } from '@ember/object'; +import { resolve } from 'rsvp'; +import { inject as service } from '@ember/service'; +import Route from '@ember/routing/route'; +import utils from 'vault/lib/key-utils'; +import UnloadModelRoute from 'vault/mixins/unload-model-route'; +import { encodePath, normalizePath } from 'vault/utils/path-encoding-helpers'; + +export default Route.extend(UnloadModelRoute, { + store: service(), + pathHelp: service('path-help'), + wizard: service(), + + secretParam() { + const { secret } = this.paramsFor(this.routeName); + return secret ? normalizePath(secret) : ''; + }, + + enginePathParam() { + const { backend } = this.paramsFor('vault.cluster.secrets.backend'); + return backend; + }, + + capabilities(secret, modelType) { + const backend = this.enginePathParam(); + const backendModel = this.modelFor('vault.cluster.secrets.backend'); + const backendType = backendModel.engineType; + let path; + if (backendModel.isV2KV) { + path = `${backend}/data/${secret}`; + } else if (backendType === 'transit') { + path = backend + '/keys/' + secret; + } else if (backendType === 'ssh' || backendType === 'aws') { + path = backend + '/roles/' + secret; + } else if (modelType.startsWith('transform/')) { + path = this.buildTransformPath(backend, secret, modelType); + } else { + path = backend + '/' + secret; + } + return this.store.findRecord('capabilities', path); + }, + + buildTransformPath(backend, secret, modelType) { + const noun = modelType.split('/')[1]; + return `${backend}/${noun}/${secret}`; + }, + + modelTypeForTransform(secretName) { + if (!secretName) return 'transform'; + if (secretName.startsWith('role/')) { + return 'transform/role'; + } + if (secretName.startsWith('template/')) { + return 'transform/template'; + } + if (secretName.startsWith('alphabet/')) { + return 'transform/alphabet'; + } + return 'transform'; // TODO: transform/transformation; + }, + + transformSecretName(secret, modelType) { + const noun = modelType.split('/')[1]; + return secret.replace(`${noun}/`, ''); + }, + + backendType() { + return this.modelFor('vault.cluster.secrets.backend').get('engineType'); + }, + + templateName: 'vault/cluster/secrets/backend/secretEditLayout', + + beforeModel({ to: { queryParams } }) { + const secret = this.secretParam(); + return this.buildModel(secret, queryParams).then(() => { + const parentKey = utils.parentKeyForKey(secret); + const mode = this.routeName.split('.').pop(); + if (mode === 'edit' && utils.keyIsFolder(secret)) { + if (parentKey) { + return this.transitionTo('vault.cluster.secrets.backend.list', encodePath(parentKey)); + } else { + return this.transitionTo('vault.cluster.secrets.backend.list-root'); + } + } + }); + }, + + buildModel(secret, queryParams) { + const backend = this.enginePathParam(); + const modelType = this.modelType(backend, secret, { queryParams }); + if (['secret', 'secret-v2'].includes(modelType)) { + return resolve(); + } + return this.pathHelp.getNewModel(modelType, backend); + }, + + modelType(backend, secret, options = {}) { + const backendModel = this.modelFor('vault.cluster.secrets.backend', backend); + const type = backendModel.get('engineType'); + const types = { + database: secret && secret.startsWith('role/') ? 'database/role' : 'database/connection', + transit: 'transit-key', + ssh: 'role-ssh', + transform: this.modelTypeForTransform(secret), + aws: 'role-aws', + pki: secret && secret.startsWith('cert/') ? 'pki/cert' : 'pki/pki-role', + cubbyhole: 'secret', + kv: backendModel.modelTypeForKV, + keymgmt: `keymgmt/${options.queryParams?.itemType || 'key'}`, + generic: backendModel.modelTypeForKV, + }; + return types[type]; + }, + + getTargetVersion(currentVersion, paramsVersion) { + if (currentVersion) { + // we have the secret metadata, so we can read the currentVersion but give priority to any + // version passed in via the url + return parseInt(paramsVersion || currentVersion, 10); + } else { + // we've got a stub model because don't have read access on the metadata endpoint + return paramsVersion ? parseInt(paramsVersion, 10) : null; + } + }, + + async fetchV2Models(capabilities, secretModel, params) { + const backend = this.enginePathParam(); + const backendModel = this.modelFor('vault.cluster.secrets.backend', backend); + const targetVersion = this.getTargetVersion(secretModel.currentVersion, params.version); + + // if we have the metadata, a list of versions are part of the payload + const version = secretModel.versions && secretModel.versions.findBy('version', targetVersion); + // if it didn't fail the server read, and the version is not attached to the metadata, + // this should 404 + if (!version && secretModel.failedServerRead !== true) { + const error = new AdapterError(); + set(error, 'httpStatus', 404); + throw error; + } + // manually set the related model + secretModel.set('engine', backendModel); + + secretModel.set( + 'selectedVersion', + await this.fetchV2VersionModel(capabilities, secretModel, version, targetVersion) + ); + return secretModel; + }, + + async fetchV2VersionModel(capabilities, secretModel, version, targetVersion) { + const secret = this.secretParam(); + const backend = this.enginePathParam(); + + // v2 versions have a composite ID, we generated one here if we need to manually set it + // after a failed fetch later; + const versionId = targetVersion ? [backend, secret, targetVersion] : [backend, secret]; + + let versionModel; + try { + if (secretModel.failedServerRead) { + // we couldn't read metadata, so we want to directly fetch the version + versionModel = await this.store.findRecord('secret-v2-version', JSON.stringify(versionId), { + reload: true, + }); + } else { + // we may have previously errored, so roll it back here + version.rollbackAttributes(); + // if metadata read was successful, the version we have is only a partial model + // trigger reload to fetch the whole version model + versionModel = await version.reload(); + } + } catch (error) { + // cannot read the data endpoint but still allow them to see show page to access metadata if they have permissions + if (error.httpStatus === 403) { + // versionModel is then a partial model from the metadata (if we have read there), or + // we need to create one on the client + if (version) { + version.set('failedServerRead', true); + versionModel = version; + } else { + this.store.push({ + data: { + type: 'secret-v2-version', + id: JSON.stringify(versionId), + attributes: { + failedServerRead: true, + }, + }, + }); + versionModel = this.store.peekRecord('secret-v2-version', JSON.stringify(versionId)); + } + } else { + throw error; + } + } + return versionModel; + }, + + handleSecretModelError(capabilities, secretId, modelType, error) { + // can't read the path and don't have update capability, so re-throw + if (!capabilities.get('canUpdate') && modelType === 'secret') { + throw error; + } + // don't have access to the metadata for v2 or the secret for v1, + // so we make a stub model and mark it as `failedServerRead` + this.store.push({ + data: { + id: secretId, + type: modelType, + attributes: { + failedServerRead: true, + }, + }, + }); + const secretModel = this.store.peekRecord(modelType, secretId); + return secretModel; + }, + + // wizard will pause unless we manually continue it + updateWizard(params) { + // verify that keymgmt tutorial is in progress + if (params.itemType === 'provider' && this.wizard.nextStep === 'displayProvider') { + this.wizard.transitionFeatureMachine(this.wizard.featureState, 'CONTINUE', 'keymgmt'); + } + }, + + async model(params, { to: { queryParams } }) { + this.updateWizard(params); + let secret = this.secretParam(); + const backend = this.enginePathParam(); + const modelType = this.modelType(backend, secret, { queryParams }); + const type = params.type || ''; + if (!secret) { + secret = '\u0020'; + } + if (modelType === 'pki/cert') { + secret = secret.replace('cert/', ''); + } + if (modelType.startsWith('transform/')) { + secret = this.transformSecretName(secret, modelType); + } + if (modelType === 'database/role') { + secret = secret.replace('role/', ''); + } + let secretModel; + + const capabilities = this.capabilities(secret, modelType); + try { + secretModel = await this.store.queryRecord(modelType, { id: secret, backend, type }); + } catch (err) { + // we've failed the read request, but if it's a kv-type backend, we want to + // do additional checks of the capabilities + if (err.httpStatus === 403 && (modelType === 'secret-v2' || modelType === 'secret')) { + await capabilities; + secretModel = this.handleSecretModelError(capabilities, secret, modelType, err); + } else { + throw err; + } + } + await capabilities; + if (modelType === 'secret-v2') { + // after the the base model fetch, kv-v2 has a second associated + // version model that contains the secret data + secretModel = await this.fetchV2Models(capabilities, secretModel, params); + } + return { + secret: secretModel, + capabilities, + }; + }, + + setupController(controller, model) { + this._super(...arguments); + const secret = this.secretParam(); + const backend = this.enginePathParam(); + const preferAdvancedEdit = + /* eslint-disable-next-line ember/no-controller-access-in-routes */ + this.controllerFor('vault.cluster.secrets.backend').get('preferAdvancedEdit') || false; + const backendType = this.backendType(); + model.secret.setProperties({ backend }); + controller.setProperties({ + model: model.secret, + capabilities: model.capabilities, + baseKey: { id: secret }, + // mode will be 'show', 'edit', 'create' + mode: this.routeName.split('.').pop().replace('-root', ''), + backend, + preferAdvancedEdit, + backendType, + }); + }, + + resetController(controller) { + if (controller.reset && typeof controller.reset === 'function') { + controller.reset(); + } + }, + + actions: { + error(error) { + const secret = this.secretParam(); + const backend = this.enginePathParam(); + set(error, 'keyId', backend + '/' + secret); + set(error, 'backend', backend); + return true; + }, + + refreshModel() { + this.refresh(); + }, + + willTransition(transition) { + /* eslint-disable-next-line ember/no-controller-access-in-routes */ + const { mode, model } = this.controller; + const version = model.get('selectedVersion'); + const changed = model.changedAttributes(); + const changedKeys = Object.keys(changed); + + // when you don't have read access on metadata we add currentVersion to the model + // this makes it look like you have unsaved changes and prompts a browser warning + // here we are specifically ignoring it. + if (mode === 'edit' && changedKeys.length && changedKeys[0] === 'currentVersion') { + version && version.rollbackAttributes(); + return true; + } + // until we have time to move `backend` on a v1 model to a relationship, + // it's going to dirty the model state, so we need to look for it + // and explicity ignore it here + if ( + (mode !== 'show' && changedKeys.length && changedKeys[0] !== 'backend') || + (mode !== 'show' && version && Object.keys(version.changedAttributes()).length) + ) { + if ( + window.confirm( + 'You have unsaved changes. Navigating away will discard these changes. Are you sure you want to discard your changes?' + ) + ) { + version && version.rollbackAttributes(); + model && model.rollbackAttributes(); + this.unloadModel(); + return true; + } else { + transition.abort(); + return false; + } + } + return this._super(...arguments); + }, + }, +}); diff --git a/ui/app/routes/vault/cluster/secrets/backend/show-root.js b/ui/app/routes/vault/cluster/secrets/backend/show-root.js new file mode 100644 index 0000000..95e3848 --- /dev/null +++ b/ui/app/routes/vault/cluster/secrets/backend/show-root.js @@ -0,0 +1,6 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +export { default } from './show'; diff --git a/ui/app/routes/vault/cluster/secrets/backend/show.js b/ui/app/routes/vault/cluster/secrets/backend/show.js new file mode 100644 index 0000000..1d353ae --- /dev/null +++ b/ui/app/routes/vault/cluster/secrets/backend/show.js @@ -0,0 +1,14 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import EditBase from './secret-edit'; + +export default EditBase.extend({ + queryParams: { + version: { + refreshModel: true, + }, + }, +}); diff --git a/ui/app/routes/vault/cluster/secrets/backend/sign-root.js b/ui/app/routes/vault/cluster/secrets/backend/sign-root.js new file mode 100644 index 0000000..316a201 --- /dev/null +++ b/ui/app/routes/vault/cluster/secrets/backend/sign-root.js @@ -0,0 +1,6 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +export { default } from './sign'; diff --git a/ui/app/routes/vault/cluster/secrets/backend/sign.js b/ui/app/routes/vault/cluster/secrets/backend/sign.js new file mode 100644 index 0000000..2072949 --- /dev/null +++ b/ui/app/routes/vault/cluster/secrets/backend/sign.js @@ -0,0 +1,55 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import UnloadModel from 'vault/mixins/unload-model-route'; +import { inject as service } from '@ember/service'; + +export default Route.extend(UnloadModel, { + store: service(), + templateName: 'vault/cluster/secrets/backend/sign', + + backendModel() { + return this.modelFor('vault.cluster.secrets.backend'); + }, + + pathQuery(role, backend) { + return { + id: `${backend}/sign/${role}`, + }; + }, + + pathForType() { + return 'sign'; + }, + + model(params) { + const role = params.secret; + const backendModel = this.backendModel(); + const backend = backendModel.get('id'); + + if (backendModel.get('type') !== 'ssh') { + return this.transitionTo('vault.cluster.secrets.backend.list-root', backend); + } + return this.store.queryRecord('capabilities', this.pathQuery(role, backend)).then((capabilities) => { + if (!capabilities.get('canUpdate')) { + return this.transitionTo('vault.cluster.secrets.backend.list-root', backend); + } + return this.store.createRecord('ssh-sign', { + role: { + backend, + id: role, + name: role, + }, + id: `${backend}-${role}`, + }); + }); + }, + + setupController(controller) { + this._super(...arguments); + controller.set('backend', this.backendModel()); + }, +}); diff --git a/ui/app/routes/vault/cluster/secrets/backend/versions-root.js b/ui/app/routes/vault/cluster/secrets/backend/versions-root.js new file mode 100644 index 0000000..e8cbcb6 --- /dev/null +++ b/ui/app/routes/vault/cluster/secrets/backend/versions-root.js @@ -0,0 +1,6 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +export { default } from './version'; diff --git a/ui/app/routes/vault/cluster/secrets/backend/versions.js b/ui/app/routes/vault/cluster/secrets/backend/versions.js new file mode 100644 index 0000000..dbe5ff2 --- /dev/null +++ b/ui/app/routes/vault/cluster/secrets/backend/versions.js @@ -0,0 +1,36 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import utils from 'vault/lib/key-utils'; +import UnloadModelRoute from 'vault/mixins/unload-model-route'; +import { normalizePath } from 'vault/utils/path-encoding-helpers'; +import { inject as service } from '@ember/service'; + +export default Route.extend(UnloadModelRoute, { + store: service(), + templateName: 'vault/cluster/secrets/backend/versions', + + beforeModel() { + const backendModel = this.modelFor('vault.cluster.secrets.backend'); + const { secret } = this.paramsFor(this.routeName); + const parentKey = utils.parentKeyForKey(secret); + if (backendModel.get('isV2KV')) { + return; + } + if (parentKey) { + return this.transitionTo('vault.cluster.secrets.backend.list', parentKey); + } else { + return this.transitionTo('vault.cluster.secrets.backend.list-root'); + } + }, + + model(params) { + const { secret } = params; + const { backend } = this.paramsFor('vault.cluster.secrets.backend'); + const id = normalizePath(secret); + return this.store.queryRecord('secret-v2', { id, backend }); + }, +}); diff --git a/ui/app/routes/vault/cluster/secrets/backends.js b/ui/app/routes/vault/cluster/secrets/backends.js new file mode 100644 index 0000000..192132f --- /dev/null +++ b/ui/app/routes/vault/cluster/secrets/backends.js @@ -0,0 +1,15 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import { inject as service } from '@ember/service'; + +export default Route.extend({ + store: service(), + + model() { + return this.store.query('secret-engine', {}); + }, +}); diff --git a/ui/app/routes/vault/cluster/settings.js b/ui/app/routes/vault/cluster/settings.js new file mode 100644 index 0000000..18878a4 --- /dev/null +++ b/ui/app/routes/vault/cluster/settings.js @@ -0,0 +1,13 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import ClusterRoute from 'vault/mixins/cluster-route'; + +export default Route.extend(ClusterRoute, { + model() { + return {}; + }, +}); diff --git a/ui/app/routes/vault/cluster/settings/auth/configure.js b/ui/app/routes/vault/cluster/settings/auth/configure.js new file mode 100644 index 0000000..e974c0e --- /dev/null +++ b/ui/app/routes/vault/cluster/settings/auth/configure.js @@ -0,0 +1,18 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import { inject as service } from '@ember/service'; + +export default Route.extend({ + store: service(), + + model() { + const { method } = this.paramsFor(this.routeName); + return this.store.findAll('auth-method').then(() => { + return this.store.peekRecord('auth-method', method); + }); + }, +}); diff --git a/ui/app/routes/vault/cluster/settings/auth/configure/index.js b/ui/app/routes/vault/cluster/settings/auth/configure/index.js new file mode 100644 index 0000000..5187879 --- /dev/null +++ b/ui/app/routes/vault/cluster/settings/auth/configure/index.js @@ -0,0 +1,15 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import { tabsForAuthSection } from 'vault/helpers/tabs-for-auth-section'; + +export default Route.extend({ + beforeModel() { + const model = this.modelFor('vault.cluster.settings.auth.configure'); + const section = tabsForAuthSection([model]).firstObject.routeParams.lastObject; + return this.transitionTo('vault.cluster.settings.auth.configure.section', section); + }, +}); diff --git a/ui/app/routes/vault/cluster/settings/auth/configure/section.js b/ui/app/routes/vault/cluster/settings/auth/configure/section.js new file mode 100644 index 0000000..15a5a59 --- /dev/null +++ b/ui/app/routes/vault/cluster/settings/auth/configure/section.js @@ -0,0 +1,104 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import AdapterError from '@ember-data/adapter/error'; +import { inject as service } from '@ember/service'; +import { set } from '@ember/object'; +import Route from '@ember/routing/route'; +import RSVP from 'rsvp'; +import UnloadModelRoute from 'vault/mixins/unload-model-route'; + +export default Route.extend(UnloadModelRoute, { + modelPath: 'model.model', + pathHelp: service('path-help'), + store: service(), + + modelType(backendType, section) { + const MODELS = { + 'aws-client': 'auth-config/aws/client', + 'aws-identity-accesslist': 'auth-config/aws/identity-accesslist', + 'aws-roletag-denylist': 'auth-config/aws/roletag-denylist', + 'azure-configuration': 'auth-config/azure', + 'github-configuration': 'auth-config/github', + 'gcp-configuration': 'auth-config/gcp', + 'jwt-configuration': 'auth-config/jwt', + 'oidc-configuration': 'auth-config/oidc', + 'kubernetes-configuration': 'auth-config/kubernetes', + 'ldap-configuration': 'auth-config/ldap', + 'okta-configuration': 'auth-config/okta', + 'radius-configuration': 'auth-config/radius', + }; + return MODELS[`${backendType}-${section}`]; + }, + + beforeModel() { + const { section_name } = this.paramsFor(this.routeName); + if (section_name === 'options') { + return; + } + const { method } = this.paramsFor('vault.cluster.settings.auth.configure'); + const backend = this.modelFor('vault.cluster.settings.auth.configure'); + const modelType = this.modelType(backend.type, section_name); + return this.pathHelp.getNewModel(modelType, method, backend.apiPath); + }, + + model(params) { + const backend = this.modelFor('vault.cluster.settings.auth.configure'); + const { section_name: section } = params; + if (section === 'options') { + return RSVP.hash({ + model: backend, + section, + }); + } + const modelType = this.modelType(backend.get('type'), section); + if (!modelType) { + const error = new AdapterError(); + set(error, 'httpStatus', 404); + throw error; + } + const model = this.store.peekRecord(modelType, backend.id); + if (model) { + return RSVP.hash({ + model, + section, + }); + } + return this.store + .findRecord(modelType, backend.id) + .then((config) => { + config.set('backend', backend); + return RSVP.hash({ + model: config, + section, + }); + }) + .catch((e) => { + let config; + // if you haven't saved a config, the API 404s, so create one here to edit and return it + if (e.httpStatus === 404) { + config = this.store.createRecord(modelType, { + id: backend.id, + }); + config.set('backend', backend); + + return RSVP.hash({ + model: config, + section, + }); + } + throw e; + }); + }, + + actions: { + willTransition() { + if (this.currentModel.model.constructor.modelName !== 'auth-method') { + this.unloadModel(); + return true; + } + }, + }, +}); diff --git a/ui/app/routes/vault/cluster/settings/auth/enable.js b/ui/app/routes/vault/cluster/settings/auth/enable.js new file mode 100644 index 0000000..356ac5e --- /dev/null +++ b/ui/app/routes/vault/cluster/settings/auth/enable.js @@ -0,0 +1,22 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import { inject as service } from '@ember/service'; + +export default class VaultClusterSettingsAuthEnableRoute extends Route { + @service store; + + beforeModel() { + // Unload to prevent naming collisions when we mount a new engine + this.store.unloadAll('auth-method'); + } + + model() { + const authMethod = this.store.createRecord('auth-method'); + authMethod.set('config', this.store.createRecord('mount-config')); + return authMethod; + } +} diff --git a/ui/app/routes/vault/cluster/settings/auth/index.js b/ui/app/routes/vault/cluster/settings/auth/index.js new file mode 100644 index 0000000..ed754a4 --- /dev/null +++ b/ui/app/routes/vault/cluster/settings/auth/index.js @@ -0,0 +1,12 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; + +export default Route.extend({ + beforeModel() { + return this.replaceWith('vault.cluster.settings.auth.enable'); + }, +}); diff --git a/ui/app/routes/vault/cluster/settings/configure-secret-backend.js b/ui/app/routes/vault/cluster/settings/configure-secret-backend.js new file mode 100644 index 0000000..e7ceeb2 --- /dev/null +++ b/ui/app/routes/vault/cluster/settings/configure-secret-backend.js @@ -0,0 +1,70 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import AdapterError from '@ember-data/adapter/error'; +import { set } from '@ember/object'; +import Route from '@ember/routing/route'; +import { inject as service } from '@ember/service'; +const CONFIGURABLE_BACKEND_TYPES = ['aws', 'ssh']; + +export default Route.extend({ + store: service(), + + model() { + const { backend } = this.paramsFor(this.routeName); + return this.store.query('secret-engine', { path: backend }).then((modelList) => { + const model = modelList && modelList.get('firstObject'); + if (!model || !CONFIGURABLE_BACKEND_TYPES.includes(model.get('type'))) { + const error = new AdapterError(); + set(error, 'httpStatus', 404); + throw error; + } + return this.store.findRecord('secret-engine', backend).then( + () => { + return model; + }, + () => { + return model; + } + ); + }); + }, + + afterModel(model) { + const type = model.get('type'); + + if (type === 'aws') { + return this.store + .queryRecord('secret-engine', { + backend: model.id, + type, + }) + .then( + () => model, + () => model + ); + } + return model; + }, + + setupController(controller, model) { + if (model.get('publicKey')) { + controller.set('configured', true); + } + return this._super(...arguments); + }, + + resetController(controller, isExiting) { + if (isExiting) { + controller.reset(); + } + }, + + actions: { + refreshRoute() { + this.refresh(); + }, + }, +}); diff --git a/ui/app/routes/vault/cluster/settings/index.js b/ui/app/routes/vault/cluster/settings/index.js new file mode 100644 index 0000000..0a66a79 --- /dev/null +++ b/ui/app/routes/vault/cluster/settings/index.js @@ -0,0 +1,15 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; + +export default Route.extend({ + beforeModel: function (transition) { + if (transition.targetName === this.routeName) { + transition.abort(); + return this.replaceWith('vault.cluster.settings.mount-secret-backend'); + } + }, +}); diff --git a/ui/app/routes/vault/cluster/settings/mount-secret-backend.js b/ui/app/routes/vault/cluster/settings/mount-secret-backend.js new file mode 100644 index 0000000..de9b16d --- /dev/null +++ b/ui/app/routes/vault/cluster/settings/mount-secret-backend.js @@ -0,0 +1,22 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import { inject as service } from '@ember/service'; + +export default class VaultClusterSettingsMountSecretBackendRoute extends Route { + @service store; + + beforeModel() { + // Unload to prevent naming collisions when we mount a new engine + this.store.unloadAll('secret-engine'); + } + + model() { + const secretEngine = this.store.createRecord('secret-engine'); + secretEngine.set('config', this.store.createRecord('mount-config')); + return secretEngine; + } +} diff --git a/ui/app/routes/vault/cluster/settings/seal.js b/ui/app/routes/vault/cluster/settings/seal.js new file mode 100644 index 0000000..67a51c1 --- /dev/null +++ b/ui/app/routes/vault/cluster/settings/seal.js @@ -0,0 +1,19 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { hash } from 'rsvp'; +import Route from '@ember/routing/route'; +import { inject as service } from '@ember/service'; + +export default Route.extend({ + store: service(), + + model() { + return hash({ + cluster: this.modelFor('vault.cluster'), + seal: this.store.findRecord('capabilities', 'sys/seal'), + }); + }, +}); diff --git a/ui/app/routes/vault/cluster/storage.js b/ui/app/routes/vault/cluster/storage.js new file mode 100644 index 0000000..e675091 --- /dev/null +++ b/ui/app/routes/vault/cluster/storage.js @@ -0,0 +1,25 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import ClusterRoute from 'vault/mixins/cluster-route'; +import { inject as service } from '@ember/service'; + +export default Route.extend(ClusterRoute, { + store: service(), + + model() { + // findAll method will return all records in store as well as response from server + // when removing a peer via the cli, stale records would continue to appear until refresh + // query method will only return records from response + return this.store.query('server', {}); + }, + + actions: { + doRefresh() { + this.refresh(); + }, + }, +}); diff --git a/ui/app/routes/vault/cluster/tools.js b/ui/app/routes/vault/cluster/tools.js new file mode 100644 index 0000000..f4ec532 --- /dev/null +++ b/ui/app/routes/vault/cluster/tools.js @@ -0,0 +1,13 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import ClusterRoute from 'vault/mixins/cluster-route'; + +export default Route.extend(ClusterRoute, { + model() { + return this.modelFor('vault.cluster'); + }, +}); diff --git a/ui/app/routes/vault/cluster/tools/index.js b/ui/app/routes/vault/cluster/tools/index.js new file mode 100644 index 0000000..0d93b71 --- /dev/null +++ b/ui/app/routes/vault/cluster/tools/index.js @@ -0,0 +1,20 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { inject as service } from '@ember/service'; +import Route from '@ember/routing/route'; +import { toolsActions } from 'vault/helpers/tools-actions'; + +export default Route.extend({ + currentCluster: service(), + beforeModel(transition) { + const currentCluster = this.currentCluster.cluster.name; + const supportedActions = toolsActions(); + if (transition.targetName === this.routeName) { + transition.abort(); + return this.replaceWith('vault.cluster.tools.tool', currentCluster, supportedActions[0]); + } + }, +}); diff --git a/ui/app/routes/vault/cluster/tools/tool.js b/ui/app/routes/vault/cluster/tools/tool.js new file mode 100644 index 0000000..1eb0a27 --- /dev/null +++ b/ui/app/routes/vault/cluster/tools/tool.js @@ -0,0 +1,31 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Route from '@ember/routing/route'; +import { toolsActions } from 'vault/helpers/tools-actions'; + +export default Route.extend({ + model(params) { + const supportedActions = toolsActions(); + if (supportedActions.includes(params.selected_action)) { + return params.selected_action; + } + throw new Error('Given param is not a supported tool action'); + }, + + setupController(controller, model) { + this._super(...arguments); + controller.set('selectedAction', model); + }, + + actions: { + didTransition() { + const params = this.paramsFor(this.routeName); + /* eslint-disable-next-line ember/no-controller-access-in-routes */ + this.controller.setProperties(params); + return true; + }, + }, +}); diff --git a/ui/app/routes/vault/cluster/unseal.js b/ui/app/routes/vault/cluster/unseal.js new file mode 100644 index 0000000..cca8691 --- /dev/null +++ b/ui/app/routes/vault/cluster/unseal.js @@ -0,0 +1,8 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import ClusterRoute from './cluster-route-base'; + +export default ClusterRoute.extend({}); diff --git a/ui/app/serializers/application.js b/ui/app/serializers/application.js new file mode 100644 index 0000000..9f66597 --- /dev/null +++ b/ui/app/serializers/application.js @@ -0,0 +1,79 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import JSONSerializer from '@ember-data/serializer/json'; +import { isNone, isBlank } from '@ember/utils'; +import { assign } from '@ember/polyfills'; +import { decamelize } from '@ember/string'; + +export default JSONSerializer.extend({ + keyForAttribute: function (attr) { + return decamelize(attr); + }, + + normalizeItems(payload) { + if (payload.data && payload.data.keys && Array.isArray(payload.data.keys)) { + const models = payload.data.keys.map((key) => { + if (typeof key !== 'string') { + return key; + } + const pk = this.primaryKey || 'id'; + let model = { [pk]: key }; + // if we've added _requestQuery in the adapter, we want + // attach it to the individual models + if (payload._requestQuery) { + model = { ...model, ...payload._requestQuery }; + } + return model; + }); + return models; + } + assign(payload, payload.data); + delete payload.data; + return payload; + }, + + pushPayload(store, payload) { + const transformedPayload = this.normalizeResponse( + store, + store.modelFor(payload.modelName), + payload, + payload.id, + 'findRecord' + ); + return store.push(transformedPayload); + }, + + normalizeResponse(store, primaryModelClass, payload, id, requestType) { + const responseJSON = this.normalizeItems(payload, requestType); + delete payload._requestQuery; + if (id && !responseJSON.id) { + responseJSON.id = id; + } + let jsonAPIRepresentation = this._super(store, primaryModelClass, responseJSON, id, requestType); + if (primaryModelClass.relatedCapabilities) { + jsonAPIRepresentation = primaryModelClass.relatedCapabilities(jsonAPIRepresentation); + } + return jsonAPIRepresentation; + }, + + serializeAttribute(snapshot, json, key, attributes) { + const val = snapshot.attr(key); + const valHasNotChanged = isNone(snapshot.changedAttributes()[key]); + const valIsBlank = isBlank(val); + if (attributes.options.readOnly) { + return; + } + if (valIsBlank && valHasNotChanged) { + return; + } + + this._super(snapshot, json, key, attributes); + }, + + serializeBelongsTo(snapshot, json) { + return json; + }, +}); diff --git a/ui/app/serializers/auth-method.js b/ui/app/serializers/auth-method.js new file mode 100644 index 0000000..ecb7767 --- /dev/null +++ b/ui/app/serializers/auth-method.js @@ -0,0 +1,38 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import ApplicationSerializer from './application'; +import { EmbeddedRecordsMixin } from '@ember-data/serializer/rest'; + +export default ApplicationSerializer.extend(EmbeddedRecordsMixin, { + attrs: { + config: { embedded: 'always' }, + }, + normalize(modelClass, data) { + // embedded records need a unique value to be stored + // use the uuid from the auth-method as the unique id for mount-config + if (data.config && !data.config.id) { + data.config.id = data.uuid; + } + return this._super(modelClass, data); + }, + normalizeBackend(path, backend) { + const struct = { ...backend }; + // strip the trailing slash off of the path so we + // can navigate to it without getting `//` in the url + struct.id = path.slice(0, -1); + struct.path = path; + return struct; + }, + + normalizeResponse(store, primaryModelClass, payload, id, requestType) { + const isCreate = requestType === 'createRecord'; + const backends = isCreate + ? payload.data + : Object.keys(payload.data).map((path) => this.normalizeBackend(path, payload.data[path])); + + return this._super(store, primaryModelClass, backends, id, requestType); + }, +}); diff --git a/ui/app/serializers/capabilities.js b/ui/app/serializers/capabilities.js new file mode 100644 index 0000000..3b8577a --- /dev/null +++ b/ui/app/serializers/capabilities.js @@ -0,0 +1,26 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import ApplicationSerializer from './application'; + +export default ApplicationSerializer.extend({ + primaryKey: 'path', + + normalizeResponse(store, primaryModelClass, payload, id, requestType) { + // queryRecord will already have set this, and we won't have an id here + if (id) { + payload.path = id; + } + const response = { + ...payload.data, + path: payload.path, + }; + return this._super(store, primaryModelClass, response, id, requestType); + }, + + modelNameFromPayloadKey() { + return 'capabilities'; + }, +}); diff --git a/ui/app/serializers/clients/activity.js b/ui/app/serializers/clients/activity.js new file mode 100644 index 0000000..0be0def --- /dev/null +++ b/ui/app/serializers/clients/activity.js @@ -0,0 +1,72 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import ApplicationSerializer from '../application'; +import { formatISO } from 'date-fns'; +import { formatByMonths, formatByNamespace, homogenizeClientNaming } from 'core/utils/client-count-utils'; +import timestamp from 'core/utils/timestamp'; +export default class ActivitySerializer extends ApplicationSerializer { + normalizeResponse(store, primaryModelClass, payload, id, requestType) { + if (payload.id === 'no-data') { + return super.normalizeResponse(store, primaryModelClass, payload, id, requestType); + } + const response_timestamp = formatISO(timestamp.now()); + const transformedPayload = { + ...payload, + response_timestamp, + by_namespace: formatByNamespace(payload.data.by_namespace), + by_month: formatByMonths(payload.data.months), + total: homogenizeClientNaming(payload.data.total), + }; + delete payload.data.by_namespace; + delete payload.data.months; + delete payload.data.total; + return super.normalizeResponse(store, primaryModelClass, transformedPayload, id, requestType); + } +} +/* +SAMPLE PAYLOAD BEFORE/AFTER: + +payload.data.by_namespace = [ + { + namespace_id: '5SWT8', + namespace_path: 'namespacelonglonglong4/', + counts: { + entity_clients: 171, + non_entity_clients: 20, + clients: 191, + }, + mounts: [ + { + mount_path: 'auth/method/uMGBU', + "counts":{ + "distinct_entities":0, + "entity_clients":0, + "non_entity_tokens":0, + "non_entity_clients":10, + "clients":10 + } + }, + ], + }, +]; + +transformedPayload.by_namespace = [ + { + label: 'namespacelonglonglong4/', + entity_clients: 171, + non_entity_clients: 20, + clients: 191, + mounts: [ + { + label: 'auth/method/uMGBU', + entity_clients: 20, + non_entity_clients: 15, + clients: 35, + }, + ], + }, +] +*/ diff --git a/ui/app/serializers/clients/config.js b/ui/app/serializers/clients/config.js new file mode 100644 index 0000000..00b84ed --- /dev/null +++ b/ui/app/serializers/clients/config.js @@ -0,0 +1,37 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import ApplicationSerializer from '../application'; + +export default ApplicationSerializer.extend({ + normalizeResponse(store, primaryModelClass, payload, id, requestType) { + if (!payload.data) { + // CBS TODO: Remove this if block once API is published + return this._super(store, primaryModelClass, payload, id, requestType); + } + const normalizedPayload = { + id: payload.id, + data: { + ...payload.data, + enabled: payload.data.enabled?.includes('enable') ? 'On' : 'Off', + }, + }; + return this._super(store, primaryModelClass, normalizedPayload, id, requestType); + }, + + serialize() { + const json = this._super(...arguments); + if (json.enabled === 'On' || json.enabled === 'Off') { + const oldEnabled = json.enabled; + json.enabled = oldEnabled === 'On' ? 'enable' : 'disable'; + } + json.retention_months = parseInt(json.retention_months, 10); + if (isNaN(json.retention_months)) { + throw new Error('Invalid number value'); + } + delete json.queries_available; + return json; + }, +}); diff --git a/ui/app/serializers/clients/version-history.js b/ui/app/serializers/clients/version-history.js new file mode 100644 index 0000000..afe1565 --- /dev/null +++ b/ui/app/serializers/clients/version-history.js @@ -0,0 +1,16 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import ApplicationSerializer from '../application'; + +export default class VersionHistorySerializer extends ApplicationSerializer { + primaryKey = 'version'; + + normalizeItems(payload) { + if (payload.data.keys && Array.isArray(payload.data.keys)) { + return payload.data.keys.map((key) => ({ version: key, ...payload.data.key_info[key] })); + } + } +} diff --git a/ui/app/serializers/cluster.js b/ui/app/serializers/cluster.js new file mode 100644 index 0000000..f87bb33 --- /dev/null +++ b/ui/app/serializers/cluster.js @@ -0,0 +1,57 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import RESTSerializer, { EmbeddedRecordsMixin } from '@ember-data/serializer/rest'; +import { assign } from '@ember/polyfills'; +import { decamelize } from '@ember/string'; +import IdentityManager from '../utils/identity-manager'; + +const uuids = new IdentityManager(); + +export default RESTSerializer.extend(EmbeddedRecordsMixin, { + keyForAttribute: function (attr) { + return decamelize(attr); + }, + + attrs: { + nodes: { embedded: 'always' }, + dr: { embedded: 'always' }, + performance: { embedded: 'always' }, + }, + + setReplicationId(data) { + if (data) { + data.id = data.cluster_id || uuids.fetch(); + } + }, + + normalize(modelClass, data) { + // embedded records need a unique value to be stored + // set id for dr and performance to cluster_id or random unique id + this.setReplicationId(data.dr); + this.setReplicationId(data.performance); + return this._super(modelClass, data); + }, + + pushPayload(store, payload) { + const transformedPayload = this.normalizeResponse( + store, + store.modelFor('cluster'), + payload, + null, + 'findAll' + ); + return store.push(transformedPayload); + }, + + normalizeResponse(store, primaryModelClass, payload, id, requestType) { + // FIXME when multiple clusters lands + const transformedPayload = { + clusters: assign({ id: '1' }, payload.data || payload), + }; + + return this._super(store, primaryModelClass, transformedPayload, id, requestType); + }, +}); diff --git a/ui/app/serializers/config.js b/ui/app/serializers/config.js new file mode 100644 index 0000000..4d61592 --- /dev/null +++ b/ui/app/serializers/config.js @@ -0,0 +1,34 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import RESTSerializer from '@ember-data/serializer/rest'; +import { assign } from '@ember/polyfills'; +import { decamelize } from '@ember/string'; + +export default RESTSerializer.extend({ + keyForAttribute: function (attr) { + return decamelize(attr); + }, + + normalizeAll(payload) { + if (payload.data) { + const data = assign({}, payload, payload.data); + return [data]; + } + return [payload]; + }, + + normalizeResponse(store, primaryModelClass, payload, id, requestType) { + const records = this.normalizeAll(payload); + const { modelName } = primaryModelClass; + let transformedPayload = { [modelName]: records }; + // just return the single object because ember is picky + if (requestType === 'queryRecord') { + transformedPayload = { [modelName]: records[0] }; + } + + return this._super(store, primaryModelClass, transformedPayload, id, requestType); + }, +}); diff --git a/ui/app/serializers/control-group.js b/ui/app/serializers/control-group.js new file mode 100644 index 0000000..abe6033 --- /dev/null +++ b/ui/app/serializers/control-group.js @@ -0,0 +1,33 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { EmbeddedRecordsMixin } from '@ember-data/serializer/rest'; +import ApplicationSerializer from './application'; + +export default ApplicationSerializer.extend(EmbeddedRecordsMixin, { + attrs: { + requestEntity: { embedded: 'always' }, + authorizations: { embedded: 'always' }, + }, + + normalizeResponse(store, primaryModelClass, payload) { + const entity = payload?.data?.request_entity; + if (Array.isArray(payload.data.authorizations)) { + for (const authorization of payload.data.authorizations) { + authorization.id = authorization.entity_id; + authorization.name = authorization.entity_name; + } + } + + if (entity && Object.keys(entity).length === 0) { + payload.data.request_entity = null; + } + return this._super(...arguments); + }, + + serialize(snapshot) { + return { accessor: snapshot.id }; + }, +}); diff --git a/ui/app/serializers/database/connection.js b/ui/app/serializers/database/connection.js new file mode 100644 index 0000000..2006015 --- /dev/null +++ b/ui/app/serializers/database/connection.js @@ -0,0 +1,75 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import RESTSerializer from '@ember-data/serializer/rest'; +import { AVAILABLE_PLUGIN_TYPES } from '../../utils/database-helpers'; + +export default RESTSerializer.extend({ + primaryKey: 'name', + + serializeAttribute(snapshot, json, key, attributes) { + // Don't send values that are undefined + if ( + undefined !== snapshot.attr(key) && + (snapshot.record.get('isNew') || snapshot.changedAttributes()[key]) + ) { + this._super(snapshot, json, key, attributes); + } + }, + + normalizeSecrets(payload) { + if (payload.data.keys && Array.isArray(payload.data.keys)) { + const connections = payload.data.keys.map((secret) => ({ name: secret, backend: payload.backend })); + return connections; + } + // Query single record response: + const response = { + id: payload.id, + name: payload.id, + backend: payload.backend, + ...payload.data, + ...payload.data.connection_details, + }; + if (payload.data.root_credentials_rotate_statements) { + response.root_rotation_statements = payload.data.root_credentials_rotate_statements; + } + return response; + }, + + normalizeResponse(store, primaryModelClass, payload, id, requestType) { + const nullResponses = ['updateRecord', 'createRecord', 'deleteRecord']; + const connections = nullResponses.includes(requestType) + ? { name: id, backend: payload.backend } + : this.normalizeSecrets(payload); + const { modelName } = primaryModelClass; + let transformedPayload = { [modelName]: connections }; + if (requestType === 'queryRecord') { + // comes back as object anyway + transformedPayload = { [modelName]: { id, ...connections } }; + } + return this._super(store, primaryModelClass, transformedPayload, id, requestType); + }, + + serialize(snapshot, requestType) { + const data = this._super(snapshot, requestType); + if (!data.plugin_name) { + return data; + } + const pluginType = AVAILABLE_PLUGIN_TYPES.find((plugin) => plugin.value === data.plugin_name); + if (!pluginType) { + return data; + } + const pluginAttributes = pluginType.fields.map((fields) => fields.attr).concat('backend'); + + // filter data to only allow plugin specific attrs + const allowedAttributes = Object.keys(data).filter((dataAttrs) => pluginAttributes.includes(dataAttrs)); + for (const key in data) { + if (!allowedAttributes.includes(key)) { + delete data[key]; + } + } + return data; + }, +}); diff --git a/ui/app/serializers/database/credential.js b/ui/app/serializers/database/credential.js new file mode 100644 index 0000000..a3c0f6b --- /dev/null +++ b/ui/app/serializers/database/credential.js @@ -0,0 +1,34 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import RESTSerializer from '@ember-data/serializer/rest'; + +export default RESTSerializer.extend({ + primaryKey: 'username', + + normalizePayload(payload) { + if (payload.data) { + return { + username: payload.data.username, + password: payload.data.password, + leaseId: payload.lease_id, + leaseDuration: payload.lease_duration, + lastVaultRotation: payload.data.last_vault_rotation, + rotationPeriod: payload.data.rotation_period, + ttl: payload.data.ttl, + // roleType is added on adapter + roleType: payload.roleType, + }; + } + }, + + normalizeResponse(store, primaryModelClass, payload, id, requestType) { + const credentials = this.normalizePayload(payload); + const { modelName } = primaryModelClass; + const transformedPayload = { [modelName]: credentials }; + + return this._super(store, primaryModelClass, transformedPayload, id, requestType); + }, +}); diff --git a/ui/app/serializers/database/role.js b/ui/app/serializers/database/role.js new file mode 100644 index 0000000..f5513c7 --- /dev/null +++ b/ui/app/serializers/database/role.js @@ -0,0 +1,99 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import RESTSerializer from '@ember-data/serializer/rest'; + +export default RESTSerializer.extend({ + primaryKey: 'name', + + normalizeSecrets(payload) { + if (payload.data.keys && Array.isArray(payload.data.keys)) { + const roles = payload.data.keys.map((secret) => { + let type = 'dynamic'; + let path = 'roles'; + if (payload.data.staticRoles.includes(secret)) { + type = 'static'; + path = 'static-roles'; + } + return { name: secret, backend: payload.backend, type, path }; + }); + return roles; + } + let path = 'roles'; + if (payload.type === 'static') { + path = 'static-roles'; + } + let database = []; + if (payload.data.db_name) { + database = [payload.data.db_name]; + } + // Copy to singular for MongoDB + let creation_statement = ''; + let revocation_statement = ''; + if (payload.data.creation_statements) { + creation_statement = payload.data.creation_statements[0]; + } + if (payload.data.revocation_statements) { + revocation_statement = payload.data.revocation_statements[0]; + } + return { + id: payload.id, + backend: payload.backend, + name: payload.id, + type: payload.type, + database, + path, + creation_statement, + revocation_statement, + ...payload.data, + }; + }, + + normalizeResponse(store, primaryModelClass, payload, id, requestType) { + const nullResponses = ['updateRecord', 'createRecord', 'deleteRecord']; + const roles = nullResponses.includes(requestType) + ? { name: id, backend: payload.backend } + : this.normalizeSecrets(payload); + const { modelName } = primaryModelClass; + let transformedPayload = { [modelName]: roles }; + if (requestType === 'queryRecord') { + transformedPayload = { [modelName]: roles }; + } + return this._super(store, primaryModelClass, transformedPayload, id, requestType); + }, + + serializeAttribute(snapshot, json, key, attributes) { + // Don't send values that are undefined + if ( + undefined !== snapshot.attr(key) && + (snapshot.record.get('isNew') || snapshot.changedAttributes()[key]) + ) { + this._super(snapshot, json, key, attributes); + } + }, + + serialize(snapshot, requestType) { + const data = this._super(snapshot, requestType); + if (data.database) { + const db = data.database[0]; + data.db_name = db; + delete data.database; + } + // This is necessary because the input for MongoDB is a json string + // rather than an array, so we transpose that here + if (data.creation_statement) { + const singleStatement = data.creation_statement; + data.creation_statements = [singleStatement]; + delete data.creation_statement; + } + if (data.revocation_statement) { + const singleStatement = data.revocation_statement; + data.revocation_statements = [singleStatement]; + delete data.revocation_statement; + } + + return data; + }, +}); diff --git a/ui/app/serializers/identity/_base.js b/ui/app/serializers/identity/_base.js new file mode 100644 index 0000000..8c9854f --- /dev/null +++ b/ui/app/serializers/identity/_base.js @@ -0,0 +1,26 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { assign } from '@ember/polyfills'; +import ApplicationSerializer from '../application'; + +export default ApplicationSerializer.extend({ + normalizeItems(payload) { + if (payload.data.keys && Array.isArray(payload.data.keys)) { + if (typeof payload.data.keys[0] !== 'string') { + // If keys is not an array of strings, it was already normalized into objects in extractLazyPaginatedData + return payload.data.keys; + } + return payload.data.keys.map((key) => { + const model = payload.data.key_info[key]; + model.id = key; + return model; + }); + } + assign(payload, payload.data); + delete payload.data; + return payload; + }, +}); diff --git a/ui/app/serializers/identity/entity-alias.js b/ui/app/serializers/identity/entity-alias.js new file mode 100644 index 0000000..bc0ebff --- /dev/null +++ b/ui/app/serializers/identity/entity-alias.js @@ -0,0 +1,18 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import IdentitySerializer from './_base'; +export default IdentitySerializer.extend({ + extractLazyPaginatedData(payload) { + return payload.data.keys.map((key) => { + const model = payload.data.key_info[key]; + model.id = key; + if (payload.backend) { + model.backend = payload.backend; + } + return model; + }); + }, +}); diff --git a/ui/app/serializers/identity/entity.js b/ui/app/serializers/identity/entity.js new file mode 100644 index 0000000..5d79369 --- /dev/null +++ b/ui/app/serializers/identity/entity.js @@ -0,0 +1,25 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { EmbeddedRecordsMixin } from '@ember-data/serializer/rest'; +import IdentitySerializer from './_base'; + +export default IdentitySerializer.extend(EmbeddedRecordsMixin, { + // we don't need to serialize relationships here + serializeHasMany() {}, + attrs: { + aliases: { embedded: 'always' }, + }, + extractLazyPaginatedData(payload) { + return payload.data.keys.map((key) => { + const model = payload.data.key_info[key]; + model.id = key; + if (payload.backend) { + model.backend = payload.backend; + } + return model; + }); + }, +}); diff --git a/ui/app/serializers/identity/group-alias.js b/ui/app/serializers/identity/group-alias.js new file mode 100644 index 0000000..a0087fd --- /dev/null +++ b/ui/app/serializers/identity/group-alias.js @@ -0,0 +1,7 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import IdentitySerializer from './_base'; +export default IdentitySerializer.extend(); diff --git a/ui/app/serializers/identity/group.js b/ui/app/serializers/identity/group.js new file mode 100644 index 0000000..7429e26 --- /dev/null +++ b/ui/app/serializers/identity/group.js @@ -0,0 +1,30 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { EmbeddedRecordsMixin } from '@ember-data/serializer/rest'; +import IdentitySerializer from './_base'; + +export default IdentitySerializer.extend(EmbeddedRecordsMixin, { + attrs: { + alias: { embedded: 'always' }, + }, + + normalizeFindRecordResponse(store, primaryModelClass, payload) { + if (payload.alias && Object.keys(payload.alias).length === 0) { + delete payload.alias; + } + return this._super(...arguments); + }, + + serialize() { + const json = this._super(...arguments); + delete json.alias; + if (json.type === 'external') { + delete json.member_entity_ids; + delete json.member_group_ids; + } + return json; + }, +}); diff --git a/ui/app/serializers/keymgmt/key.js b/ui/app/serializers/keymgmt/key.js new file mode 100644 index 0000000..576a284 --- /dev/null +++ b/ui/app/serializers/keymgmt/key.js @@ -0,0 +1,39 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import ApplicationSerializer from '../application'; + +export default class KeymgmtKeySerializer extends ApplicationSerializer { + normalizeItems(payload) { + const normalized = super.normalizeItems(payload); + // Transform versions from object with number keys to array with key ids + if (normalized.versions) { + let lastRotated; + let created; + const versions = []; + Object.keys(normalized.versions).forEach((key, i, arr) => { + versions.push({ + id: parseInt(key, 10), + ...normalized.versions[key], + }); + if (i === 0) { + created = normalized.versions[key].creation_time; + } else if (arr.length - 1 === i) { + // Set lastRotated to the last key + lastRotated = normalized.versions[key].creation_time; + } + }); + normalized.versions = versions; + return { ...normalized, last_rotated: lastRotated, created }; + } else if (Array.isArray(normalized)) { + return normalized.map((key) => ({ + id: key.id, + name: key.id, + backend: payload.backend, + })); + } + return normalized; + } +} diff --git a/ui/app/serializers/keymgmt/provider.js b/ui/app/serializers/keymgmt/provider.js new file mode 100644 index 0000000..d65de5c --- /dev/null +++ b/ui/app/serializers/keymgmt/provider.js @@ -0,0 +1,29 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import ApplicationSerializer from '../application'; + +export default class KeymgmtProviderSerializer extends ApplicationSerializer { + primaryKey = 'name'; + + normalizeItems(payload) { + const normalized = super.normalizeItems(payload); + if (Array.isArray(normalized)) { + normalized.forEach((provider) => { + provider.id = provider.name; + provider.backend = payload.backend; + }); + } + return normalized; + } + + serialize(snapshot) { + const json = super.serialize(...arguments); + return { + ...json, + credentials: snapshot.record.credentials, + }; + } +} diff --git a/ui/app/serializers/kubernetes/config.js b/ui/app/serializers/kubernetes/config.js new file mode 100644 index 0000000..fe732f0 --- /dev/null +++ b/ui/app/serializers/kubernetes/config.js @@ -0,0 +1,23 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import ApplicationSerializer from '../application'; + +export default class KubernetesConfigSerializer extends ApplicationSerializer { + primaryKey = 'backend'; + + serialize() { + const json = super.serialize(...arguments); + // remove backend value from payload + delete json.backend; + // ensure that values from a previous manual configuration are unset + if (json.disable_local_ca_jwt === false) { + json.kubernetes_ca_cert = null; + json.kubernetes_host = null; + json.service_account_jwt = null; + } + return json; + } +} diff --git a/ui/app/serializers/kubernetes/role.js b/ui/app/serializers/kubernetes/role.js new file mode 100644 index 0000000..580b4c6 --- /dev/null +++ b/ui/app/serializers/kubernetes/role.js @@ -0,0 +1,17 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import ApplicationSerializer from '../application'; + +export default class KubernetesConfigSerializer extends ApplicationSerializer { + primaryKey = 'name'; + + serialize() { + const json = super.serialize(...arguments); + // remove backend value from payload + delete json.backend; + return json; + } +} diff --git a/ui/app/serializers/lease.js b/ui/app/serializers/lease.js new file mode 100644 index 0000000..cfa8614 --- /dev/null +++ b/ui/app/serializers/lease.js @@ -0,0 +1,36 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import RESTSerializer from '@ember-data/serializer/rest'; +import { decamelize } from '@ember/string'; + +export default RESTSerializer.extend({ + keyForAttribute: function (attr) { + return decamelize(attr); + }, + + normalizeAll(payload) { + if (payload.data.keys && Array.isArray(payload.data.keys)) { + const records = payload.data.keys.map((record) => { + const fullPath = payload.prefix ? payload.prefix + record : record; + return { id: fullPath }; + }); + return records; + } + return [payload.data]; + }, + + normalizeResponse(store, primaryModelClass, payload, id, requestType) { + const records = this.normalizeAll(payload); + const { modelName } = primaryModelClass; + let transformedPayload = { [modelName]: records }; + // just return the single object because ember is picky + if (requestType === 'queryRecord') { + transformedPayload = { [modelName]: records[0] }; + } + + return this._super(store, primaryModelClass, transformedPayload, id, requestType); + }, +}); diff --git a/ui/app/serializers/license.js b/ui/app/serializers/license.js new file mode 100644 index 0000000..42c078a --- /dev/null +++ b/ui/app/serializers/license.js @@ -0,0 +1,20 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import ApplicationSerializer from './application'; + +export default ApplicationSerializer.extend({ + normalizeResponse(store, primaryModelClass, payload, id, requestType) { + let transformedPayload = { autoloaded: payload.data.autoloading_used, license_id: 'no-license' }; + if (payload.data.autoloaded) { + transformedPayload = { + ...transformedPayload, + ...payload.data.autoloaded, + }; + } + transformedPayload.id = transformedPayload.license_id; + return this._super(store, primaryModelClass, transformedPayload, id, requestType); + }, +}); diff --git a/ui/app/serializers/mfa-login-enforcement.js b/ui/app/serializers/mfa-login-enforcement.js new file mode 100644 index 0000000..bf71cb4 --- /dev/null +++ b/ui/app/serializers/mfa-login-enforcement.js @@ -0,0 +1,45 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import ApplicationSerializer from './application'; + +export default class MfaLoginEnforcementSerializer extends ApplicationSerializer { + primaryKey = 'name'; + + // Return data with updated keys for hasMany relationships with ids in the name + transformHasManyKeys(data, destination) { + const keys = { + model: ['mfa_methods', 'identity_entities', 'identity_groups'], + server: ['mfa_method_ids', 'identity_entity_ids', 'identity_group_ids'], + }; + keys[destination].forEach((newKey, index) => { + const oldKey = destination === 'model' ? keys.server[index] : keys.model[index]; + delete Object.assign(data, { [newKey]: data[oldKey] })[oldKey]; + }); + return data; + } + normalize(model, data) { + this.transformHasManyKeys(data, 'model'); + return super.normalize(model, data); + } + normalizeItems(payload) { + if (payload.data) { + if (payload.data?.keys && Array.isArray(payload.data.keys)) { + return payload.data.keys.map((key) => payload.data.key_info[key]); + } + Object.assign(payload, payload.data); + delete payload.data; + } + return payload; + } + serialize() { + const json = super.serialize(...arguments); + // empty arrays are being removed from serialized json + // ensure that they are sent to the server, otherwise removing items will not be persisted + json.auth_method_accessors = json.auth_method_accessors || []; + json.auth_method_types = json.auth_method_types || []; + return this.transformHasManyKeys(json, 'server'); + } +} diff --git a/ui/app/serializers/mfa-method.js b/ui/app/serializers/mfa-method.js new file mode 100644 index 0000000..693e006 --- /dev/null +++ b/ui/app/serializers/mfa-method.js @@ -0,0 +1,28 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import ApplicationSerializer from './application'; + +export default class KeymgmtKeySerializer extends ApplicationSerializer { + normalizeItems(payload) { + if (!payload?.data) return payload; + if (payload.data.keys && Array.isArray(payload.data.keys)) { + const data = payload.data.keys.map((key) => { + const model = payload.data.key_info[key]; + model.id = key; + return model; + }); + return data; + } + Object.assign(payload, payload.data); + delete payload.data; + return payload; + } + serialize() { + const json = super.serialize(...arguments); + delete json.type; + return json; + } +} diff --git a/ui/app/serializers/mount-config.js b/ui/app/serializers/mount-config.js new file mode 100644 index 0000000..32fbf4c --- /dev/null +++ b/ui/app/serializers/mount-config.js @@ -0,0 +1,7 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import ApplicationSerializer from './application'; +export default ApplicationSerializer.extend(); diff --git a/ui/app/serializers/namespace.js b/ui/app/serializers/namespace.js new file mode 100644 index 0000000..f94a818 --- /dev/null +++ b/ui/app/serializers/namespace.js @@ -0,0 +1,33 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import ApplicationSerializer from './application'; + +export default class NamespaceSerializer extends ApplicationSerializer { + attrs = { + path: { serialize: false }, + }; + + normalizeList(payload) { + const data = payload.data.keys + ? payload.data.keys.map((key) => ({ + path: key, + // remove the trailing slash from the id + id: key.replace(/\/$/, ''), + })) + : payload.data; + + return data; + } + + normalizeResponse(store, primaryModelClass, payload, id, requestType) { + const nullResponses = ['deleteRecord', 'createRecord']; + const cid = (id || payload.id || '').replace(/\/$/, ''); + const normalizedPayload = nullResponses.includes(requestType) + ? { id: cid, path: cid } + : this.normalizeList(payload); + return super.normalizeResponse(store, primaryModelClass, normalizedPayload, id, requestType); + } +} diff --git a/ui/app/serializers/node.js b/ui/app/serializers/node.js new file mode 100644 index 0000000..e7586ba --- /dev/null +++ b/ui/app/serializers/node.js @@ -0,0 +1,48 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import RESTSerializer, { EmbeddedRecordsMixin } from '@ember-data/serializer/rest'; +import { assign } from '@ember/polyfills'; +import { decamelize } from '@ember/string'; + +export default RESTSerializer.extend(EmbeddedRecordsMixin, { + keyForAttribute: function (attr) { + return decamelize(attr); + }, + + pushPayload(store, payload) { + const transformedPayload = this.normalizeResponse( + store, + store.modelFor('node'), + payload, + null, + 'findAll' + ); + return store.push(transformedPayload); + }, + + nodeFromObject(name, payload) { + const nodeObj = payload.nodes[name]; + return assign(nodeObj, { + name, + id: name, + }); + }, + + normalizeResponse(store, primaryModelClass, payload, id, requestType) { + const nodes = payload.nodes + ? Object.keys(payload.nodes).map((name) => this.nodeFromObject(name, payload)) + : [assign(payload, { id: '1' })]; + + const transformedPayload = { nodes: nodes }; + + return this._super(store, primaryModelClass, transformedPayload, id, requestType); + }, + + normalize(model, hash, prop) { + hash.id = '1'; + return this._super(model, hash, prop); + }, +}); diff --git a/ui/app/serializers/oidc/assignment.js b/ui/app/serializers/oidc/assignment.js new file mode 100644 index 0000000..096d7c1 --- /dev/null +++ b/ui/app/serializers/oidc/assignment.js @@ -0,0 +1,10 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import ApplicationSerializer from '../application'; + +export default class OidcAssignmentSerializer extends ApplicationSerializer { + primaryKey = 'name'; +} diff --git a/ui/app/serializers/oidc/client.js b/ui/app/serializers/oidc/client.js new file mode 100644 index 0000000..4cdc952 --- /dev/null +++ b/ui/app/serializers/oidc/client.js @@ -0,0 +1,22 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import ApplicationSerializer from '../application'; + +export default class OidcClientSerializer extends ApplicationSerializer { + primaryKey = 'name'; + + // rehydrate each client model so all model attributes are accessible from the LIST response + normalizeItems(payload) { + if (payload.data) { + if (payload.data?.keys && Array.isArray(payload.data.keys)) { + return payload.data.keys.map((key) => ({ name: key, ...payload.data.key_info[key] })); + } + Object.assign(payload, payload.data); + delete payload.data; + } + return payload; + } +} diff --git a/ui/app/serializers/oidc/key.js b/ui/app/serializers/oidc/key.js new file mode 100644 index 0000000..49625d7 --- /dev/null +++ b/ui/app/serializers/oidc/key.js @@ -0,0 +1,10 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import ApplicationSerializer from '../application'; + +export default class OidcKeySerializer extends ApplicationSerializer { + primaryKey = 'name'; +} diff --git a/ui/app/serializers/oidc/provider.js b/ui/app/serializers/oidc/provider.js new file mode 100644 index 0000000..14dd3c2 --- /dev/null +++ b/ui/app/serializers/oidc/provider.js @@ -0,0 +1,22 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import ApplicationSerializer from '../application'; + +export default class OidcProviderSerializer extends ApplicationSerializer { + primaryKey = 'name'; + + // need to normalize to get issuer metadata for provider's list view + normalizeItems(payload) { + if (payload.data) { + if (payload.data?.keys && Array.isArray(payload.data.keys)) { + return payload.data.keys.map((key) => ({ name: key, ...payload.data.key_info[key] })); + } + Object.assign(payload, payload.data); + delete payload.data; + } + return payload; + } +} diff --git a/ui/app/serializers/oidc/scope.js b/ui/app/serializers/oidc/scope.js new file mode 100644 index 0000000..0d829bb --- /dev/null +++ b/ui/app/serializers/oidc/scope.js @@ -0,0 +1,10 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import ApplicationSerializer from '../application'; + +export default class OidcScopeSerializer extends ApplicationSerializer { + primaryKey = 'name'; +} diff --git a/ui/app/serializers/path-filter-config.js b/ui/app/serializers/path-filter-config.js new file mode 100644 index 0000000..d6c335c --- /dev/null +++ b/ui/app/serializers/path-filter-config.js @@ -0,0 +1,20 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import RESTSerializer from '@ember-data/serializer/rest'; +import { decamelize } from '@ember/string'; + +export default RESTSerializer.extend({ + keyForAttribute: function (attr) { + return decamelize(attr); + }, + + normalizeResponse(store, primaryModelClass, payload, id, requestType) { + const { modelName } = primaryModelClass; + payload.data.id = id; + const transformedPayload = { [modelName]: payload.data }; + return this._super(store, primaryModelClass, transformedPayload, id, requestType); + }, +}); diff --git a/ui/app/serializers/pki/action.js b/ui/app/serializers/pki/action.js new file mode 100644 index 0000000..692331d --- /dev/null +++ b/ui/app/serializers/pki/action.js @@ -0,0 +1,104 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { underscore } from '@ember/string'; +import { keyParamsByType } from 'pki/utils/action-params'; +import ApplicationSerializer from '../application'; +import { parseCertificate } from 'vault/utils/parse-pki-cert'; + +export default class PkiActionSerializer extends ApplicationSerializer { + attrs = { + customTtl: { serialize: false }, + type: { serialize: false }, + }; + + normalizeResponse(store, primaryModelClass, payload, id, requestType) { + if (payload.data.certificate) { + // Parse certificate back from the API and add to payload + const parsedCert = parseCertificate(payload.data.certificate); + const data = { + ...payload.data, + common_name: parsedCert.common_name, + parsed_certificate: parsedCert, + }; + return super.normalizeResponse(store, primaryModelClass, { ...payload, data }, id, requestType); + } + return super.normalizeResponse(...arguments); + } + + serialize(snapshot, requestType) { + const data = super.serialize(snapshot); + // requestType is a custom value specified from the pki/action adapter + const allowedPayloadAttributes = this._allowedParamsByType(requestType, snapshot.record.type); + if (!allowedPayloadAttributes) return data; + // the backend expects the subject's serial number param to be 'serial_number' + // we label it as subject_serial_number to differentiate from the vault generated UUID + data.serial_number = data.subject_serial_number; + + const payload = {}; + allowedPayloadAttributes.forEach((key) => { + if ('undefined' !== typeof data[key]) { + payload[key] = data[key]; + } + }); + return payload; + } + + _allowedParamsByType(actionType, type) { + const keyFields = keyParamsByType(type).map((attrName) => underscore(attrName).toLowerCase()); + const commonProps = [ + 'alt_names', + 'common_name', + 'country', + 'exclude_cn_from_sans', + 'format', + 'ip_sans', + 'locality', + 'organization', + 'other_sans', + 'ou', + 'postal_code', + 'province', + 'serial_number', + 'street_address', + 'type', + 'uri_sans', + ...keyFields, + ]; + switch (actionType) { + case 'import': + return ['pem_bundle']; + case 'generate-root': + return [ + ...commonProps, + 'issuer_name', + 'max_path_length', + 'not_after', + 'not_before_duration', + 'permitted_dns_domains', + 'private_key_format', + 'ttl', + ]; + case 'rotate-root': + return [ + ...commonProps, + 'issuer_name', + 'max_path_length', + 'not_after', + 'not_before_duration', + 'permitted_dns_domains', + 'private_key_format', + 'ttl', + ]; + case 'generate-csr': + return [...commonProps, 'add_basic_constraints']; + case 'sign-intermediate': + return ['common_name', 'issuer_name', 'csr']; + default: + // if type doesn't match, serialize all + return null; + } + } +} diff --git a/ui/app/serializers/pki/certificate.js b/ui/app/serializers/pki/certificate.js new file mode 100644 index 0000000..7a90df0 --- /dev/null +++ b/ui/app/serializers/pki/certificate.js @@ -0,0 +1,8 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import CertSerializer from './cert'; + +export default class PkiCertificateSerializer extends CertSerializer {} diff --git a/ui/app/serializers/pki/certificate/base.js b/ui/app/serializers/pki/certificate/base.js new file mode 100644 index 0000000..cc74ed0 --- /dev/null +++ b/ui/app/serializers/pki/certificate/base.js @@ -0,0 +1,30 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { parseCertificate } from 'vault/utils/parse-pki-cert'; +import ApplicationSerializer from '../../application'; + +export default class PkiCertificateBaseSerializer extends ApplicationSerializer { + primaryKey = 'serial_number'; + + attrs = { + role: { serialize: false }, + }; + + normalizeResponse(store, primaryModelClass, payload, id, requestType) { + if (payload.data.certificate) { + // Parse certificate back from the API and add to payload + const parsedCert = parseCertificate(payload.data.certificate); + return super.normalizeResponse( + store, + primaryModelClass, + { ...payload, parsed_certificate: parsedCert, common_name: parsedCert.common_name }, + id, + requestType + ); + } + return super.normalizeResponse(...arguments); + } +} diff --git a/ui/app/serializers/pki/certificate/generate.js b/ui/app/serializers/pki/certificate/generate.js new file mode 100644 index 0000000..b729e3d --- /dev/null +++ b/ui/app/serializers/pki/certificate/generate.js @@ -0,0 +1,8 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import PkiCertificateBaseSerializer from './base'; + +export default class PkiCertificateGenerateSerializer extends PkiCertificateBaseSerializer {} diff --git a/ui/app/serializers/pki/certificate/sign.js b/ui/app/serializers/pki/certificate/sign.js new file mode 100644 index 0000000..b729e3d --- /dev/null +++ b/ui/app/serializers/pki/certificate/sign.js @@ -0,0 +1,8 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import PkiCertificateBaseSerializer from './base'; + +export default class PkiCertificateGenerateSerializer extends PkiCertificateBaseSerializer {} diff --git a/ui/app/serializers/pki/issuer.js b/ui/app/serializers/pki/issuer.js new file mode 100644 index 0000000..d205c5f --- /dev/null +++ b/ui/app/serializers/pki/issuer.js @@ -0,0 +1,54 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { parseCertificate } from 'vault/utils/parse-pki-cert'; +import ApplicationSerializer from '../application'; + +export default class PkiIssuerSerializer extends ApplicationSerializer { + primaryKey = 'issuer_id'; + + attrs = { + caChain: { serialize: false }, + certificate: { serialize: false }, + commonName: { serialize: false }, + isDefault: { serialize: false }, + isRoot: { serialize: false }, + issuerId: { serialize: false }, + keyId: { serialize: false }, + parsedCertificate: { serialize: false }, + serialNumber: { serialize: false }, + }; + + normalizeResponse(store, primaryModelClass, payload, id, requestType) { + if (payload.data.certificate) { + // Parse certificate back from the API and add to payload + const parsedCert = parseCertificate(payload.data.certificate); + const data = { + ...payload.data, + parsed_certificate: parsedCert, + common_name: parsedCert.common_name, + }; + return super.normalizeResponse(store, primaryModelClass, { ...payload, data }, id, requestType); + } + return super.normalizeResponse(...arguments); + } + + // rehydrate each issuers model so all model attributes are accessible from the LIST response + normalizeItems(payload) { + if (payload.data) { + if (payload.data?.keys && Array.isArray(payload.data.keys)) { + return payload.data.keys.map((key) => { + return { + issuer_id: key, + ...payload.data.key_info[key], + }; + }); + } + Object.assign(payload, payload.data); + delete payload.data; + } + return payload; + } +} diff --git a/ui/app/serializers/pki/key.js b/ui/app/serializers/pki/key.js new file mode 100644 index 0000000..ff7519f --- /dev/null +++ b/ui/app/serializers/pki/key.js @@ -0,0 +1,25 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import ApplicationSerializer from '../application'; + +export default class PkiKeySerializer extends ApplicationSerializer { + primaryKey = 'key_id'; + attrs = { + type: { serialize: false }, + }; + + // rehydrate each keys model so all model attributes are accessible from the LIST response + normalizeItems(payload) { + if (payload.data) { + if (payload.data?.keys && Array.isArray(payload.data.keys)) { + return payload.data.keys.map((key) => ({ key_id: key, ...payload.data.key_info[key] })); + } + Object.assign(payload, payload.data); + delete payload.data; + } + return payload; + } +} diff --git a/ui/app/serializers/pki/role.js b/ui/app/serializers/pki/role.js new file mode 100644 index 0000000..5ee82a5 --- /dev/null +++ b/ui/app/serializers/pki/role.js @@ -0,0 +1,21 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import ApplicationSerializer from '../application'; + +export default class PkiRoleSerializer extends ApplicationSerializer { + attrs = { + name: { serialize: false }, + }; + + serialize() { + const json = super.serialize(...arguments); + // attributes with empty arrays are stripped from serialized json + // but an empty list is acceptable for key_usage to specify no default constraints + // intercepting here to ensure an empty array persists (the backend assumes default values) + json.key_usage = json.key_usage || []; + return json; + } +} diff --git a/ui/app/serializers/pki/tidy.js b/ui/app/serializers/pki/tidy.js new file mode 100644 index 0000000..77f2a98 --- /dev/null +++ b/ui/app/serializers/pki/tidy.js @@ -0,0 +1,17 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import ApplicationSerializer from '../application'; + +export default class PkiTidySerializer extends ApplicationSerializer { + serialize(snapshot, tidyType) { + const data = super.serialize(snapshot); + if (tidyType === 'manual') { + delete data?.enabled; + delete data?.intervalDuration; + } + return data; + } +} diff --git a/ui/app/serializers/policy.js b/ui/app/serializers/policy.js new file mode 100644 index 0000000..9427c63 --- /dev/null +++ b/ui/app/serializers/policy.js @@ -0,0 +1,23 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import ApplicationSerializer from './application'; + +export default ApplicationSerializer.extend({ + primaryKey: 'name', + + normalizePolicies(payload) { + const data = payload.data.keys ? payload.data.keys.map((name) => ({ name })) : payload.data; + return data; + }, + + normalizeResponse(store, primaryModelClass, payload, id, requestType) { + const nullResponses = ['deleteRecord']; + const normalizedPayload = nullResponses.includes(requestType) + ? { name: id } + : this.normalizePolicies(payload); + return this._super(store, primaryModelClass, normalizedPayload, id, requestType); + }, +}); diff --git a/ui/app/serializers/policy/acl.js b/ui/app/serializers/policy/acl.js new file mode 100644 index 0000000..60237e0 --- /dev/null +++ b/ui/app/serializers/policy/acl.js @@ -0,0 +1,8 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import PolicySerializer from '../policy'; + +export default PolicySerializer.extend(); diff --git a/ui/app/serializers/policy/egp.js b/ui/app/serializers/policy/egp.js new file mode 100644 index 0000000..60237e0 --- /dev/null +++ b/ui/app/serializers/policy/egp.js @@ -0,0 +1,8 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import PolicySerializer from '../policy'; + +export default PolicySerializer.extend(); diff --git a/ui/app/serializers/policy/rgp.js b/ui/app/serializers/policy/rgp.js new file mode 100644 index 0000000..60237e0 --- /dev/null +++ b/ui/app/serializers/policy/rgp.js @@ -0,0 +1,8 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import PolicySerializer from '../policy'; + +export default PolicySerializer.extend(); diff --git a/ui/app/serializers/replication-attributes.js b/ui/app/serializers/replication-attributes.js new file mode 100644 index 0000000..bb973ff --- /dev/null +++ b/ui/app/serializers/replication-attributes.js @@ -0,0 +1,13 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import RESTSerializer from '@ember-data/serializer/rest'; +import { decamelize } from '@ember/string'; + +export default RESTSerializer.extend({ + keyForAttribute: function (attr) { + return decamelize(attr); + }, +}); diff --git a/ui/app/serializers/replication-mode.js b/ui/app/serializers/replication-mode.js new file mode 100644 index 0000000..b067303 --- /dev/null +++ b/ui/app/serializers/replication-mode.js @@ -0,0 +1,17 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import ApplicationSerializer from './application'; + +export default ApplicationSerializer.extend({ + normalizeResponse(store, primaryModelClass, payload, id, requestType) { + const normalizedPayload = { + id: payload.id, + status: payload.data, + }; + + return this._super(store, primaryModelClass, normalizedPayload, id, requestType); + }, +}); diff --git a/ui/app/serializers/role-aws.js b/ui/app/serializers/role-aws.js new file mode 100644 index 0000000..4d35ccd --- /dev/null +++ b/ui/app/serializers/role-aws.js @@ -0,0 +1,34 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import ApplicationSerializer from './application'; + +export default ApplicationSerializer.extend({ + primaryKey: 'name', + + extractLazyPaginatedData(payload) { + return payload.data.keys.map((key) => { + const model = { + name: key, + }; + if (payload.backend) { + model.backend = payload.backend; + } + return model; + }); + }, + + normalizeItems() { + const normalized = this._super(...arguments); + // most roles will only have one in this array, + // we'll default to the first, and keep the array on the + // model and show a warning if there's more than one so that + // they don't inadvertently save + if (normalized.credential_types) { + normalized.credential_type = normalized.credential_types[0]; + } + return normalized; + }, +}); diff --git a/ui/app/serializers/role-ssh.js b/ui/app/serializers/role-ssh.js new file mode 100644 index 0000000..b7eb493 --- /dev/null +++ b/ui/app/serializers/role-ssh.js @@ -0,0 +1,8 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import RoleSerializer from './role'; + +export default RoleSerializer.extend(); diff --git a/ui/app/serializers/role.js b/ui/app/serializers/role.js new file mode 100644 index 0000000..77ed548 --- /dev/null +++ b/ui/app/serializers/role.js @@ -0,0 +1,45 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import ApplicationSerializer from './application'; + +export default ApplicationSerializer.extend({ + primaryKey: 'name', + + // Used for both pki-role (soon to be deprecated) and role-ssh + extractLazyPaginatedData(payload) { + if (payload.zero_address_roles) { + payload.zero_address_roles.forEach((role) => { + // mutate key_info object to add zero_address info + payload.data.key_info[role].zero_address = true; + }); + } + if (!payload.data.key_info) { + return payload.data.keys.map((key) => { + const model = { + name: key, + }; + if (payload.backend) { + model.backend = payload.backend; + } + return model; + }); + } + + const ret = payload.data.keys.map((key) => { + const model = { + name: key, + key_type: payload.data.key_info[key].key_type, + zero_address: payload.data.key_info[key].zero_address, + }; + if (payload.backend) { + model.backend = payload.backend; + } + return model; + }); + delete payload.data.key_info; + return ret; + }, +}); diff --git a/ui/app/serializers/secret-engine.js b/ui/app/serializers/secret-engine.js new file mode 100644 index 0000000..f6e537d --- /dev/null +++ b/ui/app/serializers/secret-engine.js @@ -0,0 +1,97 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { assign } from '@ember/polyfills'; +import ApplicationSerializer from './application'; +import { EmbeddedRecordsMixin } from '@ember-data/serializer/rest'; + +export default ApplicationSerializer.extend(EmbeddedRecordsMixin, { + attrs: { + config: { embedded: 'always' }, + }, + + normalize(modelClass, data) { + // embedded records need a unique value to be stored + // set id for config to uuid of secret engine + if (data.config && !data.config.id) { + data.config.id = data.uuid; + } + // move version out of options so it can be defined on secret-engine model + data.version = data.options ? data.options.version : null; + return this._super(modelClass, data); + }, + + normalizeBackend(path, backend) { + let struct = {}; + for (const attribute in backend) { + struct[attribute] = backend[attribute]; + } + //queryRecord adds path to the response + if (path !== null && !struct.path) { + struct.path = path; + } + + if (struct.data) { + struct = assign({}, struct, struct.data); + delete struct.data; + } + // strip the trailing slash off of the path so we + // can navigate to it without getting `//` in the url + struct.id = struct.path.slice(0, -1); + return struct; + }, + + normalizeResponse(store, primaryModelClass, payload, id, requestType) { + const isCreate = requestType === 'createRecord'; + const isFind = requestType === 'findRecord'; + const isQueryRecord = requestType === 'queryRecord'; + let backends; + if (isCreate) { + backends = payload.data; + } else if (isFind) { + backends = this.normalizeBackend(id + '/', payload.data); + } else if (isQueryRecord) { + backends = this.normalizeBackend(null, payload); + } else { + // this is terrible, I'm sorry + // TODO extract AWS and SSH config saving from the secret-engine model to simplify this + if (payload.data.secret) { + backends = Object.keys(payload.data.secret).map((id) => + this.normalizeBackend(id, payload.data.secret[id]) + ); + } else if (!payload.data.path) { + backends = Object.keys(payload.data).map((id) => this.normalizeBackend(id, payload[id])); + } else { + backends = [this.normalizeBackend(payload.data.path, payload.data)]; + } + } + + return this._super(store, primaryModelClass, backends, id, requestType); + }, + + serialize(snapshot) { + const type = snapshot.record.get('engineType'); + const data = this._super(...arguments); + // move version back to options + data.options = data.version ? { version: data.version } : {}; + delete data.version; + + if (type !== 'kv' || data.options.version === 1) { + // These items are on the model, but used by the kv-v2 config endpoint only + delete data.max_versions; + delete data.cas_required; + delete data.delete_version_after; + } + // only KV uses options + if (type !== 'kv' && type !== 'generic') { + delete data.options; + } else if (!data.options.version) { + // if options.version isn't set for some reason + // default to 2 + data.options.version = 2; + } + return data; + }, +}); diff --git a/ui/app/serializers/secret-v2-version.js b/ui/app/serializers/secret-v2-version.js new file mode 100644 index 0000000..df10982 --- /dev/null +++ b/ui/app/serializers/secret-v2-version.js @@ -0,0 +1,41 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { get } from '@ember/object'; +import { assign } from '@ember/polyfills'; +import ApplicationSerializer from './application'; + +export default ApplicationSerializer.extend({ + secretDataPath: 'data.data', + normalizeItems(payload) { + const path = this.secretDataPath; + // move response that is the contents of the secret from the dataPath + // to `secret_data` so it will be `secretData` in the model + payload.secret_data = get(payload, path); + payload = assign({}, payload, payload.data.metadata); + delete payload.data; + payload.path = payload.id; + // return the payload if it's expecting a single object or wrap + // it as an array if not + return payload; + }, + serialize(snapshot) { + const secret = snapshot.belongsTo('secret'); + // if both models failed to read from the server, we need to write without CAS + if (secret.record.failedServerRead && snapshot.record.failedServerRead) { + return { + data: snapshot.attr('secretData'), + }; + } + let version = secret.record.failedServerRead ? snapshot.attr('version') : secret.attr('currentVersion'); + version = version || 0; + return { + data: snapshot.attr('secretData'), + options: { + cas: version, + }, + }; + }, +}); diff --git a/ui/app/serializers/secret-v2.js b/ui/app/serializers/secret-v2.js new file mode 100644 index 0000000..7540ed2 --- /dev/null +++ b/ui/app/serializers/secret-v2.js @@ -0,0 +1,53 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { EmbeddedRecordsMixin } from '@ember-data/serializer/rest'; +import ApplicationSerializer from './application'; + +export default ApplicationSerializer.extend(EmbeddedRecordsMixin, { + attrs: { + versions: { embedded: 'always' }, + }, + secretDataPath: 'data', + normalizeItems(payload, requestType) { + if (payload.data.keys && Array.isArray(payload.data.keys)) { + // if we have data.keys, it's a list of ids, so we map over that + // and create objects with id's + return payload.data.keys.map((secret) => { + // secrets don't have an id in the response, so we need to concat the full + // path of the secret here - the id in the payload is added + // in the adapter after making the request + let fullSecretPath = payload.id ? payload.id + secret : secret; + + // if there is no path, it's a "top level" secret, so add + // a unicode space for the id + // https://github.com/hashicorp/vault/issues/3348 + if (!fullSecretPath) { + fullSecretPath = '\u0020'; + } + return { + id: fullSecretPath, + engine_id: payload.backend, + }; + }); + } + // transform versions to an array with composite IDs + if (payload.data.versions) { + payload.data.versions = Object.keys(payload.data.versions).map((version) => { + const body = payload.data.versions[version]; + body.version = version; + body.path = payload.id; + body.id = JSON.stringify([payload.backend, payload.id, version]); + return body; + }); + } + payload.data.engine_id = payload.backend; + payload.data.id = payload.id; + return requestType === 'queryRecord' ? payload.data : [payload.data]; + }, + serializeHasMany() { + return; + }, +}); diff --git a/ui/app/serializers/secret.js b/ui/app/serializers/secret.js new file mode 100644 index 0000000..895602e --- /dev/null +++ b/ui/app/serializers/secret.js @@ -0,0 +1,48 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { get } from '@ember/object'; +import ApplicationSerializer from './application'; + +export default ApplicationSerializer.extend({ + secretDataPath: 'data', + normalizeItems(payload, requestType) { + if ( + requestType !== 'queryRecord' && + payload.data && + payload.data.keys && + Array.isArray(payload.data.keys) + ) { + // if we have data.keys, it's a list of ids, so we map over that + // and create objects with id's + return payload.data.keys.map((secret) => { + // secrets don't have an id in the response, so we need to concat the full + // path of the secret here - the id in the payload is added + // in the adapter after making the request + let fullSecretPath = payload.id ? payload.id + secret : secret; + + // if there is no path, it's a "top level" secret, so add + // a unicode space for the id + // https://github.com/hashicorp/vault/issues/3348 + if (!fullSecretPath) { + fullSecretPath = '\u0020'; + } + return { id: fullSecretPath, backend: payload.backend }; + }); + } + const path = this.secretDataPath; + // move response that is the contents of the secret from the dataPath + // to `secret_data` so it will be `secretData` in the model + payload.secret_data = get(payload, path); + delete payload[path]; + // return the payload if it's expecting a single object or wrap + // it as an array if not + return requestType === 'queryRecord' ? payload : [payload]; + }, + + serialize(snapshot) { + return snapshot.attr('secretData'); + }, +}); diff --git a/ui/app/serializers/server.js b/ui/app/serializers/server.js new file mode 100644 index 0000000..0b5d41b --- /dev/null +++ b/ui/app/serializers/server.js @@ -0,0 +1,18 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import ApplicationSerializer from './application'; + +export default ApplicationSerializer.extend({ + primaryKey: 'node_id', + normalizeItems(payload) { + if (payload.data && payload.data.config) { + // rewrite the payload from data.config.servers to data.keys so we can use the application serializer + // on it + return payload.data.config.servers.slice(0); + } + return this._super(payload); + }, +}); diff --git a/ui/app/serializers/ssh.js b/ui/app/serializers/ssh.js new file mode 100644 index 0000000..06622f9 --- /dev/null +++ b/ui/app/serializers/ssh.js @@ -0,0 +1,60 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import RESTSerializer from '@ember-data/serializer/rest'; +import { isNone, isBlank } from '@ember/utils'; +import { assign } from '@ember/polyfills'; +import { decamelize } from '@ember/string'; + +export default RESTSerializer.extend({ + keyForAttribute: function (attr) { + return decamelize(attr); + }, + + pushPayload(store, payload) { + const transformedPayload = this.normalizeResponse( + store, + store.modelFor(payload.modelName), + payload, + payload.id, + 'findRecord' + ); + return store.push(transformedPayload); + }, + + normalizeItems(payload) { + assign(payload, payload.data); + delete payload.data; + return payload; + }, + + normalizeResponse(store, primaryModelClass, payload, id, requestType) { + const responseJSON = this.normalizeItems(payload); + const { modelName } = primaryModelClass; + const transformedPayload = { [modelName]: responseJSON }; + const ret = this._super(store, primaryModelClass, transformedPayload, id, requestType); + return ret; + }, + + serializeAttribute(snapshot, json, key, attributes) { + const val = snapshot.attr(key); + if (attributes.options.readOnly) { + return; + } + if ( + attributes.type === 'object' && + val && + Object.keys(val).length > 0 && + isNone(snapshot.changedAttributes()[key]) + ) { + return; + } + if (isBlank(val) && isNone(snapshot.changedAttributes()[key])) { + return; + } + + this._super(snapshot, json, key, attributes); + }, +}); diff --git a/ui/app/serializers/transform.js b/ui/app/serializers/transform.js new file mode 100644 index 0000000..6ea968c --- /dev/null +++ b/ui/app/serializers/transform.js @@ -0,0 +1,37 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import ApplicationSerializer from './application'; + +export default ApplicationSerializer.extend({ + normalizeResponse(store, primaryModelClass, payload, id, requestType) { + if (payload.data?.masking_character) { + payload.data.masking_character = String.fromCharCode(payload.data.masking_character); + } + return this._super(store, primaryModelClass, payload, id, requestType); + }, + + serialize() { + const json = this._super(...arguments); + if (json.template && Array.isArray(json.template)) { + // Transformations should only ever have one template + json.template = json.template[0]; + } + return json; + }, + + extractLazyPaginatedData(payload) { + return payload.data.keys.map((key) => { + const model = { + id: key, + name: key, + }; + if (payload.backend) { + model.backend = payload.backend; + } + return model; + }); + }, +}); diff --git a/ui/app/serializers/transform/alphabet.js b/ui/app/serializers/transform/alphabet.js new file mode 100644 index 0000000..c7be724 --- /dev/null +++ b/ui/app/serializers/transform/alphabet.js @@ -0,0 +1,23 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import ApplicationSerializer from '../application'; + +export default ApplicationSerializer.extend({ + primaryKey: 'name', + + extractLazyPaginatedData(payload) { + return payload.data.keys.map((key) => { + const model = { + id: key, + name: key, + }; + if (payload.backend) { + model.backend = payload.backend; + } + return model; + }); + }, +}); diff --git a/ui/app/serializers/transform/role.js b/ui/app/serializers/transform/role.js new file mode 100644 index 0000000..6369f99 --- /dev/null +++ b/ui/app/serializers/transform/role.js @@ -0,0 +1,22 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import ApplicationSerializer from '../application'; + +export default ApplicationSerializer.extend({ + primaryKey: 'name', + extractLazyPaginatedData(payload) { + return payload.data.keys.map((key) => { + const model = { + id: key, + name: key, + }; + if (payload.backend) { + model.backend = payload.backend; + } + return model; + }); + }, +}); diff --git a/ui/app/serializers/transform/template.js b/ui/app/serializers/transform/template.js new file mode 100644 index 0000000..3a8d94c --- /dev/null +++ b/ui/app/serializers/transform/template.js @@ -0,0 +1,60 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import ApplicationSerializer from '../application'; + +export default ApplicationSerializer.extend({ + primaryKey: 'name', + + normalizeResponse(store, primaryModelClass, payload, id, requestType) { + if (payload.data?.alphabet) { + payload.data.alphabet = [payload.data.alphabet]; + } + // strip out P character from any named capture groups + if (payload.data?.pattern) { + this._formatNamedCaptureGroups(payload.data, '?P', '?'); + } + return this._super(store, primaryModelClass, payload, id, requestType); + }, + + serialize() { + const json = this._super(...arguments); + if (json.alphabet && Array.isArray(json.alphabet)) { + // Templates should only ever have one alphabet + json.alphabet = json.alphabet[0]; + } + // add P character to any named capture groups + if (json.pattern) { + this._formatNamedCaptureGroups(json, '?', '?P'); + } + return json; + }, + + _formatNamedCaptureGroups(json, replace, replaceWith) { + // named capture groups are handled differently between Go and js + // first look for named capture groups in pattern string + const regex = new RegExp(/\?P?(<(.+?)>)/, 'g'); + const namedGroups = json.pattern.match(regex); + if (namedGroups) { + namedGroups.forEach((group) => { + // add or remove P depending on destination + json.pattern = json.pattern.replace(group, group.replace(replace, replaceWith)); + }); + } + }, + + extractLazyPaginatedData(payload) { + return payload.data.keys.map((key) => { + const model = { + id: key, + name: key, + }; + if (payload.backend) { + model.backend = payload.backend; + } + return model; + }); + }, +}); diff --git a/ui/app/serializers/transit-key.js b/ui/app/serializers/transit-key.js new file mode 100644 index 0000000..f0799e2 --- /dev/null +++ b/ui/app/serializers/transit-key.js @@ -0,0 +1,70 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import RESTSerializer from '@ember-data/serializer/rest'; +import { assign } from '@ember/polyfills'; +import { decamelize } from '@ember/string'; + +export default RESTSerializer.extend({ + primaryKey: 'name', + + keyForAttribute: function (attr) { + return decamelize(attr); + }, + + normalizeSecrets(payload) { + if (payload.data.keys && Array.isArray(payload.data.keys)) { + const secrets = payload.data.keys.map((secret) => ({ name: secret, backend: payload.backend })); + return secrets; + } + assign(payload, payload.data); + delete payload.data; + // timestamps for these two are in seconds... + if ( + payload.type === 'aes256-gcm96' || + payload.type === 'chacha20-poly1305' || + payload.type === 'aes128-gcm96' + ) { + for (const version in payload.keys) { + payload.keys[version] = payload.keys[version] * 1000; + } + } + payload.id = payload.name; + return [payload]; + }, + + normalizeResponse(store, primaryModelClass, payload, id, requestType) { + const nullResponses = ['updateRecord', 'createRecord', 'deleteRecord']; + const secrets = nullResponses.includes(requestType) + ? { name: id, backend: payload.backend } + : this.normalizeSecrets(payload); + const { modelName } = primaryModelClass; + let transformedPayload = { [modelName]: secrets }; + // just return the single object because ember is picky + if (requestType === 'queryRecord') { + transformedPayload = { [modelName]: secrets[0] }; + } + + return this._super(store, primaryModelClass, transformedPayload, id, requestType); + }, + + serialize(snapshot, requestType) { + if (requestType === 'update') { + const min_decryption_version = snapshot.attr('minDecryptionVersion'); + const min_encryption_version = snapshot.attr('minEncryptionVersion'); + const deletion_allowed = snapshot.attr('deletionAllowed'); + const auto_rotate_period = snapshot.attr('autoRotatePeriod'); + return { + min_decryption_version, + min_encryption_version, + deletion_allowed, + auto_rotate_period, + }; + } else { + snapshot.id = snapshot.attr('name'); + return this._super(snapshot, requestType); + } + }, +}); diff --git a/ui/app/services/auth.js b/ui/app/services/auth.js new file mode 100644 index 0000000..170ca6e --- /dev/null +++ b/ui/app/services/auth.js @@ -0,0 +1,496 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Ember from 'ember'; +import { resolve, reject } from 'rsvp'; +import { assign } from '@ember/polyfills'; +import { isArray } from '@ember/array'; +import { computed, get } from '@ember/object'; +import { capitalize } from '@ember/string'; + +import fetch from 'fetch'; +import { getOwner } from '@ember/application'; +import Service, { inject as service } from '@ember/service'; +import getStorage from '../lib/token-storage'; +import ENV from 'vault/config/environment'; +import { supportedAuthBackends } from 'vault/helpers/supported-auth-backends'; +import { task, timeout } from 'ember-concurrency'; +const TOKEN_SEPARATOR = '☃'; +const TOKEN_PREFIX = 'vault-'; +const ROOT_PREFIX = '_root_'; +const BACKENDS = supportedAuthBackends(); + +export { TOKEN_SEPARATOR, TOKEN_PREFIX, ROOT_PREFIX }; + +export default Service.extend({ + permissions: service(), + store: service(), + router: service(), + namespaceService: service('namespace'), + + IDLE_TIMEOUT: 3 * 60e3, + expirationCalcTS: null, + isRenewing: false, + mfaErrors: null, + + get tokenExpired() { + const expiration = this.tokenExpirationDate; + return expiration ? this.now() >= expiration : null; + }, + + get activeCluster() { + return this.activeClusterId ? this.store.peekRecord('cluster', this.activeClusterId) : null; + }, + + // eslint-disable-next-line + tokens: computed({ + get() { + return this._tokens || this.getTokensFromStorage() || []; + }, + set(key, value) { + return (this._tokens = value); + }, + }), + + isActiveSession: computed( + 'router.currentRouteName', + 'currentToken', + 'activeCluster.{dr.isSecondary,needsInit,sealed,name}', + function () { + if (this.activeCluster) { + if (this.activeCluster.dr?.isSecondary || this.activeCluster.needsInit || this.activeCluster.sealed) { + return false; + } + if ( + this.activeCluster.name && + this.currentToken && + this.router.currentRouteName !== 'vault.cluster.auth' + ) { + return true; + } + } + return false; + } + ), + + tokenExpirationDate: computed('currentTokenName', 'expirationCalcTS', function () { + const tokenName = this.currentTokenName; + if (!tokenName) { + return; + } + const { tokenExpirationEpoch } = this.getTokenData(tokenName); + const expirationDate = new Date(0); + return tokenExpirationEpoch ? expirationDate.setUTCMilliseconds(tokenExpirationEpoch) : null; + }), + + renewAfterEpoch: computed('currentTokenName', 'expirationCalcTS', function () { + const tokenName = this.currentTokenName; + const { expirationCalcTS } = this; + const data = this.getTokenData(tokenName); + if (!tokenName || !data || !expirationCalcTS) { + return null; + } + const { ttl, renewable } = data; + // renew after last expirationCalc time + half of the ttl (in ms) + return renewable ? Math.floor((ttl * 1e3) / 2) + expirationCalcTS : null; + }), + + // returns the key for the token to use + currentTokenName: computed('activeClusterId', 'tokens', 'tokens.[]', function () { + const regex = new RegExp(this.activeClusterId); + return this.tokens.find((key) => regex.test(key)); + }), + + currentToken: computed('currentTokenName', function () { + const name = this.currentTokenName; + const data = name && this.getTokenData(name); + // data.token is undefined so that's why it returns current token undefined + return name && data ? data.token : null; + }), + + authData: computed('currentTokenName', function () { + const token = this.currentTokenName; + if (!token) { + return; + } + const backend = this.backendFromTokenName(token); + const stored = this.getTokenData(token); + + return assign(stored, { + backend: BACKENDS.findBy('type', backend), + }); + }), + + init() { + this._super(...arguments); + this.checkForRootToken(); + }, + + clusterAdapter() { + return getOwner(this).lookup('adapter:cluster'); + }, + + generateTokenName({ backend, clusterId }, policies) { + return (policies || []).includes('root') + ? `${TOKEN_PREFIX}${ROOT_PREFIX}${TOKEN_SEPARATOR}${clusterId}` + : `${TOKEN_PREFIX}${backend}${TOKEN_SEPARATOR}${clusterId}`; + }, + + backendFromTokenName(tokenName) { + return tokenName.includes(`${TOKEN_PREFIX}${ROOT_PREFIX}`) + ? 'token' + : tokenName.slice(TOKEN_PREFIX.length).split(TOKEN_SEPARATOR)[0]; + }, + + storage(tokenName) { + if ( + tokenName && + tokenName.indexOf(`${TOKEN_PREFIX}${ROOT_PREFIX}`) === 0 && + this.environment() !== 'development' + ) { + return getStorage('memory'); + } else { + return getStorage(); + } + }, + + environment() { + return ENV.environment; + }, + + now() { + return Date.now(); + }, + + setCluster(clusterId) { + this.set('activeClusterId', clusterId); + }, + + ajax(url, method, options) { + const defaults = { + url, + method, + dataType: 'json', + headers: { + 'X-Vault-Token': this.currentToken, + }, + }; + + const namespace = + typeof options.namespace === 'undefined' ? this.namespaceService.path : options.namespace; + if (namespace) { + defaults.headers['X-Vault-Namespace'] = namespace; + } + const opts = assign(defaults, options); + + return fetch(url, { + method: opts.method || 'GET', + headers: opts.headers || {}, + }).then((response) => { + if (response.status === 204) { + return resolve(); + } else if (response.status >= 200 && response.status < 300) { + return resolve(response.json()); + } else { + return reject(response); + } + }); + }, + + renewCurrentToken() { + const namespace = this.authData.userRootNamespace; + const url = '/v1/auth/token/renew-self'; + return this.ajax(url, 'POST', { namespace }); + }, + + revokeCurrentToken() { + const namespace = this.authData.userRootNamespace; + const url = '/v1/auth/token/revoke-self'; + return this.ajax(url, 'POST', { namespace }); + }, + + calculateExpiration(resp) { + const now = this.now(); + const ttl = resp.ttl || resp.lease_duration; + const tokenExpirationEpoch = now + ttl * 1e3; + this.set('expirationCalcTS', now); + return { + ttl, + tokenExpirationEpoch, + }; + }, + + persistAuthData() { + const [firstArg, resp] = arguments; + const tokens = this.tokens; + const currentNamespace = this.namespaceService.path || ''; + let tokenName; + let options; + let backend; + if (typeof firstArg === 'string') { + tokenName = firstArg; + backend = this.backendFromTokenName(tokenName); + } else { + options = firstArg; + backend = options.backend; + } + + const currentBackend = BACKENDS.findBy('type', backend); + let displayName; + if (isArray(currentBackend.displayNamePath)) { + displayName = currentBackend.displayNamePath.map((name) => get(resp, name)).join('/'); + } else { + displayName = get(resp, currentBackend.displayNamePath); + } + + const { entity_id, policies, renewable, namespace_path } = resp; + // here we prefer namespace_path if its defined, + // else we look and see if there's already a namespace saved + // and then finally we'll use the current query param if the others + // haven't set a value yet + // all of the typeof checks are necessary because the root namespace is '' + let userRootNamespace = namespace_path && namespace_path.replace(/\/$/, ''); + // if we're logging in with token and there's no namespace_path, we can assume + // that the token belongs to the root namespace + if (backend === 'token' && !userRootNamespace) { + userRootNamespace = ''; + } + if (typeof userRootNamespace === 'undefined') { + if (this.authData) { + userRootNamespace = this.authData.userRootNamespace; + } + } + if (typeof userRootNamespace === 'undefined') { + userRootNamespace = currentNamespace; + } + const data = { + userRootNamespace, + displayName, + backend: currentBackend, + token: resp.client_token || get(resp, currentBackend.tokenPath), + policies, + renewable, + entity_id, + }; + + tokenName = this.generateTokenName( + { + backend, + clusterId: (options && options.clusterId) || this.activeClusterId, + }, + resp.policies + ); + + if (resp.renewable) { + assign(data, this.calculateExpiration(resp)); + } + + if (!data.displayName) { + data.displayName = (this.getTokenData(tokenName) || {}).displayName; + } + tokens.addObject(tokenName); + this.set('tokens', tokens); + this.set('allowExpiration', false); + this.setTokenData(tokenName, data); + return resolve({ + namespace: currentNamespace || data.userRootNamespace, + token: tokenName, + isRoot: policies.includes('root'), + }); + }, + + setTokenData(token, data) { + this.storage(token).setItem(token, data); + }, + + getTokenData(token) { + return this.storage(token).getItem(token); + }, + + removeTokenData(token) { + return this.storage(token).removeItem(token); + }, + + renew() { + const tokenName = this.currentTokenName; + const currentlyRenewing = this.isRenewing; + if (currentlyRenewing) { + return; + } + this.isRenewing = true; + return this.renewCurrentToken().then( + (resp) => { + this.isRenewing = false; + return this.persistAuthData(tokenName, resp.data || resp.auth); + }, + (e) => { + this.isRenewing = false; + throw e; + } + ); + }, + + checkShouldRenew: task(function* () { + while (true) { + if (Ember.testing) { + return; + } + yield timeout(5000); + if (this.shouldRenew()) { + yield this.renew(); + } + } + }).on('init'), + shouldRenew() { + const now = this.now(); + const lastFetch = this.lastFetch; + const renewTime = this.renewAfterEpoch; + if (!this.currentTokenName || this.tokenExpired || this.allowExpiration || !renewTime) { + return false; + } + if (lastFetch && now - lastFetch >= this.IDLE_TIMEOUT) { + this.set('allowExpiration', true); + return false; + } + if (now >= renewTime) { + return true; + } + return false; + }, + + setLastFetch(timestamp) { + const now = this.now(); + this.set('lastFetch', timestamp); + // if expiration was allowed and we're over half the ttl we want to go ahead and renew here + if (this.allowExpiration && now >= this.renewAfterEpoch) { + this.renew(); + } + this.set('allowExpiration', false); + }, + + getTokensFromStorage(filterFn) { + return this.storage() + .keys() + .reject((key) => { + return key.indexOf(TOKEN_PREFIX) !== 0 || (filterFn && filterFn(key)); + }); + }, + + checkForRootToken() { + if (this.environment() === 'development') { + return; + } + + this.getTokensFromStorage().forEach((key) => { + const data = this.getTokenData(key); + if (data && data.policies && data.policies.includes('root')) { + this.removeTokenData(key); + } + }); + }, + + _parseMfaResponse(mfa_requirement) { + // mfa_requirement response comes back in a shape that is not easy to work with + // convert to array of objects and add necessary properties to satisfy the view + if (mfa_requirement) { + const { mfa_request_id, mfa_constraints } = mfa_requirement; + const constraints = []; + for (const key in mfa_constraints) { + const methods = mfa_constraints[key].any; + const isMulti = methods.length > 1; + + // friendly label for display in MfaForm + methods.forEach((m) => { + const typeFormatted = m.type === 'totp' ? m.type.toUpperCase() : capitalize(m.type); + m.label = `${typeFormatted} ${m.uses_passcode ? 'passcode' : 'push notification'}`; + }); + constraints.push({ + name: key, + methods, + selectedMethod: isMulti ? null : methods[0], + }); + } + + return { + mfa_requirement: { mfa_request_id, mfa_constraints: constraints }, + }; + } + return {}; + }, + + async authenticate(/*{clusterId, backend, data, selectedAuth}*/) { + const [options] = arguments; + const adapter = this.clusterAdapter(); + const resp = await adapter.authenticate(options); + + if (resp.auth?.mfa_requirement) { + return this._parseMfaResponse(resp.auth?.mfa_requirement); + } + + return this.authSuccess(options, resp.auth || resp.data); + }, + + async totpValidate({ mfa_requirement, ...options }) { + const resp = await this.clusterAdapter().mfaValidate(mfa_requirement); + return this.authSuccess(options, resp.auth || resp.data); + }, + + async authSuccess(options, response) { + // persist selectedAuth to localStorage to rehydrate auth form on logout + localStorage.setItem('selectedAuth', options.selectedAuth); + const authData = await this.persistAuthData(options, response, this.namespaceService.path); + await this.permissions.getPaths.perform(); + return authData; + }, + + handleError(e) { + if (e.errors) { + return e.errors.map((error) => { + if (error.detail) { + return error.detail; + } + return error; + }); + } + return [e]; + }, + + getAuthType() { + // check localStorage first + const selectedAuth = localStorage.getItem('selectedAuth'); + if (selectedAuth) return selectedAuth; + // fallback to authData which discerns backend type from token + return this.authData ? this.authData.backend.type : null; + }, + + deleteCurrentToken() { + const tokenName = this.currentTokenName; + this.deleteToken(tokenName); + this.removeTokenData(tokenName); + }, + + deleteToken(tokenName) { + const tokenNames = this.tokens.without(tokenName); + this.removeTokenData(tokenName); + this.set('tokens', tokenNames); + }, + + getOktaNumberChallengeAnswer(nonce, mount) { + const url = `/v1/auth/${mount}/verify/${nonce}`; + return this.ajax(url, 'GET', {}).then( + (resp) => { + return resp.data.correct_answer; + }, + (e) => { + // if error status is 404, return and keep polling for a response + if (e.status === 404) { + return null; + } else { + throw e; + } + } + ); + }, +}); diff --git a/ui/app/services/console.js b/ui/app/services/console.js new file mode 100644 index 0000000..9b4d6bd --- /dev/null +++ b/ui/app/services/console.js @@ -0,0 +1,120 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +// Low level service that allows users to input paths to make requests to vault +// this service provides the UI synecdote to the cli commands read, write, delete, and list +import { filterBy } from '@ember/object/computed'; + +import Service from '@ember/service'; + +import { getOwner } from '@ember/application'; +import { computed } from '@ember/object'; +import { shiftCommandIndex } from 'vault/lib/console-helpers'; +import { encodePath } from 'vault/utils/path-encoding-helpers'; + +export function sanitizePath(path) { + //remove whitespace + remove trailing and leading slashes + return path.trim().replace(/^\/+|\/+$/g, ''); +} +export function ensureTrailingSlash(path) { + return path.replace(/(\w+[^/]$)/g, '$1/'); +} + +const VERBS = { + read: 'GET', + list: 'GET', + write: 'POST', + delete: 'DELETE', +}; + +export default Service.extend({ + isOpen: false, + + adapter() { + return getOwner(this).lookup('adapter:console'); + }, + commandHistory: filterBy('log', 'type', 'command'), + log: computed(function () { + return []; + }), + commandIndex: null, + + shiftCommandIndex(keyCode, setCommandFn = () => {}) { + const [newIndex, newCommand] = shiftCommandIndex(keyCode, this.commandHistory, this.commandIndex); + if (newCommand !== undefined && newIndex !== undefined) { + this.set('commandIndex', newIndex); + setCommandFn(newCommand); + } + }, + + clearLog(clearAll = false) { + const log = this.log; + let history; + if (!clearAll) { + history = this.commandHistory.slice(); + history.setEach('hidden', true); + } + log.clear(); + if (history) { + log.addObjects(history); + } + }, + + logAndOutput(command, logContent) { + const log = this.log; + if (command) { + log.pushObject({ type: 'command', content: command }); + this.set('commandIndex', null); + } + if (logContent) { + log.pushObject(logContent); + } + }, + + ajax(operation, path, options = {}) { + const verb = VERBS[operation]; + const adapter = this.adapter(); + const url = adapter.buildURL(encodePath(path)); + const { data, wrapTTL } = options; + return adapter.ajax(url, verb, { + data, + wrapTTL, + }); + }, + + kvGet(path, data, flags = {}) { + const { wrapTTL, metadata } = flags; + // Split on first / to find backend and secret path + const pathSegment = metadata ? 'metadata' : 'data'; + const [backend, secretPath] = path.split(/\/(.+)?/); + const kvPath = `${backend}/${pathSegment}/${secretPath}`; + return this.ajax('read', sanitizePath(kvPath), { wrapTTL }); + }, + + read(path, data, flags) { + const wrapTTL = flags?.wrapTTL; + return this.ajax('read', sanitizePath(path), { wrapTTL }); + }, + + write(path, data, flags) { + const wrapTTL = flags?.wrapTTL; + return this.ajax('write', sanitizePath(path), { data, wrapTTL }); + }, + + delete(path) { + return this.ajax('delete', sanitizePath(path)); + }, + + list(path, data, flags) { + const wrapTTL = flags?.wrapTTL; + const listPath = ensureTrailingSlash(sanitizePath(path)); + return this.ajax('list', listPath, { + data: { + list: true, + }, + wrapTTL, + }); + }, +}); diff --git a/ui/app/services/control-group.js b/ui/app/services/control-group.js new file mode 100644 index 0000000..7078147 --- /dev/null +++ b/ui/app/services/control-group.js @@ -0,0 +1,140 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Service, { inject as service } from '@ember/service'; +import RSVP from 'rsvp'; +import ControlGroupError from 'vault/lib/control-group-error'; +import getStorage from 'vault/lib/token-storage'; +import parseURL from 'core/utils/parse-url'; + +const CONTROL_GROUP_PREFIX = 'vault:cg-'; +const TOKEN_SEPARATOR = '☃'; + +// list of endpoints that return wrapped responses +// without `wrap-ttl` +const WRAPPED_RESPONSE_PATHS = [ + 'sys/wrapping/rewrap', + 'sys/wrapping/wrap', + 'sys/replication/performance/primary/secondary-token', + 'sys/replication/dr/primary/secondary-token', +]; + +const storageKey = (accessor, path) => { + return `${CONTROL_GROUP_PREFIX}${accessor}${TOKEN_SEPARATOR}${path}`; +}; + +export { storageKey, CONTROL_GROUP_PREFIX, TOKEN_SEPARATOR }; +export default Service.extend({ + version: service(), + router: service(), + + storage() { + return getStorage(); + }, + + keyFromAccessor(accessor) { + const keys = this.storage().keys() || []; + const returnKey = keys + .filter((k) => k.startsWith(CONTROL_GROUP_PREFIX)) + .find((key) => key.replace(CONTROL_GROUP_PREFIX, '').startsWith(accessor)); + return returnKey ? returnKey : null; + }, + + storeControlGroupToken(info) { + const key = storageKey(info.accessor, info.creation_path); + this.storage().setItem(key, info); + }, + + deleteControlGroupToken(accessor) { + this.unmarkTokenForUnwrap(); + const key = this.keyFromAccessor(accessor); + this.storage().removeItem(key); + }, + + deleteTokens() { + const keys = this.storage().keys() || []; + keys.filter((k) => k.startsWith(CONTROL_GROUP_PREFIX)).forEach((key) => this.storage().removeItem(key)); + }, + + wrapInfoForAccessor(accessor) { + const key = this.keyFromAccessor(accessor); + return key ? this.storage().getItem(key) : null; + }, + + tokenToUnwrap: null, + markTokenForUnwrap(accessor) { + this.set('tokenToUnwrap', this.wrapInfoForAccessor(accessor)); + }, + + unmarkTokenForUnwrap() { + this.set('tokenToUnwrap', null); + }, + + tokenForUrl(url) { + if (this.version.isOSS) { + return null; + } + let pathForUrl = parseURL(url).pathname; + pathForUrl = pathForUrl.replace('/v1/', ''); + const tokenInfo = this.tokenToUnwrap; + if (tokenInfo && tokenInfo.creation_path === pathForUrl) { + const { token, accessor, creation_time } = tokenInfo; + return { token, accessor, creationTime: creation_time }; + } + return null; + }, + + checkForControlGroup(callbackArgs, response, wasWrapTTLRequested) { + const creationPath = response && response?.wrap_info?.creation_path; + if ( + this.version.isOSS || + wasWrapTTLRequested || + !response || + (creationPath && WRAPPED_RESPONSE_PATHS.includes(creationPath)) || + !response.wrap_info + ) { + return RSVP.resolve(...callbackArgs); + } + const error = new ControlGroupError(response.wrap_info); + return RSVP.reject(error); + }, + + handleError(error) { + const { accessor, token, creation_path, creation_time, ttl } = error; + const data = { accessor, token, creation_path, creation_time, ttl }; + data.uiParams = { url: this.router.currentURL }; + this.storeControlGroupToken(data); + return this.router.transitionTo('vault.cluster.access.control-group-accessor', accessor); + }, + + // Handle error from non-read request (eg. POST or UPDATE) so it can be retried + saveTokenFromError(error) { + const { accessor, token, creation_path, creation_time, ttl } = error; + const data = { accessor, token, creation_path, creation_time, ttl }; + this.storeControlGroupToken(data); + // In the read flow the accessor is marked once the user clicks "Visit" from the control group page + // On a POST/UPDATE flow we don't redirect, so we need to mark automatically so that on the next try + // the request will attempt unwrap. + this.markTokenForUnwrap(accessor); + }, + + logFromError(error) { + const { accessor, token, creation_path, creation_time, ttl } = error; + const data = { accessor, token, creation_path, creation_time, ttl }; + this.storeControlGroupToken(data); + + const href = this.router.urlFor('vault.cluster.access.control-group-accessor', accessor); + const lines = [ + `A Control Group was encountered at ${error.creation_path}.`, + `The Control Group Token is ${error.token}.`, + `The Accessor is ${error.accessor}.`, + `Visit ${href} for more details.`, + ]; + return { + type: 'error-with-html', + content: lines.join('\n'), + }; + }, +}); diff --git a/ui/app/services/csp-event.js b/ui/app/services/csp-event.js new file mode 100644 index 0000000..54d8792 --- /dev/null +++ b/ui/app/services/csp-event.js @@ -0,0 +1,36 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +/*eslint-disable no-constant-condition*/ +import { computed } from '@ember/object'; + +import Service from '@ember/service'; +import { task, waitForEvent } from 'ember-concurrency'; + +export default Service.extend({ + events: computed(function () { + return []; + }), + connectionViolations: computed('events.@each.violatedDirective', function () { + return this.events.filter((e) => e.violatedDirective.startsWith('connect-src')); + }), + + attach() { + this.monitor.perform(); + }, + + remove() { + this.monitor.cancelAll(); + }, + + monitor: task(function* () { + this.events.clear(); + + while (true) { + const event = yield waitForEvent(window.document, 'securitypolicyviolation'); + this.events.addObject(event); + } + }), +}); diff --git a/ui/app/services/current-cluster.js b/ui/app/services/current-cluster.js new file mode 100644 index 0000000..2aa61b5 --- /dev/null +++ b/ui/app/services/current-cluster.js @@ -0,0 +1,14 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Service from '@ember/service'; + +export default Service.extend({ + cluster: null, + + setCluster(cluster) { + this.set('cluster', cluster); + }, +}); diff --git a/ui/app/services/download.ts b/ui/app/services/download.ts new file mode 100644 index 0000000..5f1a26b --- /dev/null +++ b/ui/app/services/download.ts @@ -0,0 +1,67 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Service from '@ember/service'; +import timestamp from 'core/utils/timestamp'; + +interface Extensions { + csv: string; + hcl: string; + sentinel: string; + json: string; + pem: string; + txt: string; +} + +// https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types/Common_types +const EXTENSION_TO_MIME: Extensions = { + csv: 'txt/csv', + hcl: 'text/plain', + sentinel: 'text/plain', + json: 'application/json', + pem: 'application/x-pem-file', + txt: 'text/plain', +}; + +export default class DownloadService extends Service { + download(filename: string, content: string, extension: string) { + // replace spaces with hyphens, append extension to filename + const formattedFilename = + `${filename?.replace(/\s+/g, '-')}.${extension}` || + `vault-data-${timestamp.now().toISOString()}.${extension}`; + + // map extension to MIME type or use default + const mimetype = EXTENSION_TO_MIME[extension as keyof Extensions] || 'text/plain'; + + // commence download + const { document, URL } = window; + const downloadElement = document.createElement('a'); + const data = new File([content], formattedFilename, { type: mimetype }); + downloadElement.download = formattedFilename; + downloadElement.href = URL.createObjectURL(data); + document.body.appendChild(downloadElement); + downloadElement.click(); + URL.revokeObjectURL(downloadElement.href); + downloadElement.remove(); + return formattedFilename; + } + + // SAMPLE CSV FORMAT ('content' argument) + // Must be a string with each row \n separated and each column comma separated + // 'Namespace path,Authentication method,Total clients,Entity clients,Non-entity clients\n + // namespacelonglonglong4/,,191,171,20\n + // namespacelonglonglong4/,auth/method/uMGBU,35,20,15\n' + csv(filename: string, content: string) { + this.download(filename, content, 'csv'); + } + + pem(filename: string, content: string) { + this.download(filename, content, 'pem'); + } + + miscExtension(filename: string, content: string, extension: string) { + this.download(filename, content, extension); + } +} diff --git a/ui/app/services/feature-flag.js b/ui/app/services/feature-flag.js new file mode 100644 index 0000000..dd00d5d --- /dev/null +++ b/ui/app/services/feature-flag.js @@ -0,0 +1,24 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Service from '@ember/service'; + +const FLAGS = { + vaultCloudNamespace: 'VAULT_CLOUD_ADMIN_NAMESPACE', +}; + +export default Service.extend({ + featureFlags: null, + setFeatureFlags(flags) { + this.set('featureFlags', flags); + }, + + get managedNamespaceRoot() { + if (this.featureFlags && this.featureFlags.includes(FLAGS.vaultCloudNamespace)) { + return 'admin'; + } + return null; + }, +}); diff --git a/ui/app/services/flash-messages.ts b/ui/app/services/flash-messages.ts new file mode 100644 index 0000000..708fad4 --- /dev/null +++ b/ui/app/services/flash-messages.ts @@ -0,0 +1,13 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import FlashMessages from 'ember-cli-flash/services/flash-messages'; + +/* +we extend the ember-cli-flash service here so each ember engine can +import 'flash-messages' as a dependency giving it access to the + template in the main app's cluster.hbs file +*/ +export default class FlashMessageService extends FlashMessages {} diff --git a/ui/app/services/namespace.js b/ui/app/services/namespace.js new file mode 100644 index 0000000..02926dd --- /dev/null +++ b/ui/app/services/namespace.js @@ -0,0 +1,68 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { alias, equal } from '@ember/object/computed'; +import Service, { inject as service } from '@ember/service'; +import { task } from 'ember-concurrency'; + +const ROOT_NAMESPACE = ''; +export default Service.extend({ + store: service(), + auth: service(), + userRootNamespace: alias('auth.authData.userRootNamespace'), + //populated by the query param on the cluster route + path: '', + // list of namespaces available to the current user under the + // current namespace + accessibleNamespaces: null, + + inRootNamespace: equal('path', ROOT_NAMESPACE), + + setNamespace(path) { + if (!path) { + this.set('path', ''); + return; + } + this.set('path', path); + }, + + findNamespacesForUser: task(function* () { + // uses the adapter and the raw response here since + // models get wiped when switching namespaces and we + // want to keep track of these separately + const store = this.store; + const adapter = store.adapterFor('namespace'); + const userRoot = this.auth.authData.userRootNamespace; + try { + const ns = yield adapter.findAll(store, 'namespace', null, { + adapterOptions: { + forUser: true, + namespace: userRoot, + }, + }); + const keys = ns.data.keys || []; + this.set( + 'accessibleNamespaces', + keys.map((n) => { + let fullNS = n; + // if the user's root isn't '', then we need to construct + // the paths so they connect to the user root to the list + // otherwise using the current ns to grab the correct leaf + // node in the graph doesn't work + if (userRoot) { + fullNS = `${userRoot}/${n}`; + } + return fullNS.replace(/\/$/, ''); + }) + ); + } catch (e) { + //do nothing here + } + }).drop(), + + reset() { + this.set('accessibleNamespaces', null); + }, +}); diff --git a/ui/app/services/path-help.js b/ui/app/services/path-help.js new file mode 100644 index 0000000..d4aca10 --- /dev/null +++ b/ui/app/services/path-help.js @@ -0,0 +1,377 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +/* + This service is used to pull an OpenAPI document describing the + shape of data at a specific path to hydrate a model with attrs it + has less (or no) information about. +*/ +import Model from '@ember-data/model'; +import Service from '@ember/service'; +import { encodePath } from 'vault/utils/path-encoding-helpers'; +import { getOwner } from '@ember/application'; +import { assign } from '@ember/polyfills'; +import { expandOpenApiProps, combineAttributes } from 'vault/utils/openapi-to-attrs'; +import fieldToAttrs from 'vault/utils/field-to-attrs'; +import { resolve, reject } from 'rsvp'; +import { debug } from '@ember/debug'; +import { dasherize, capitalize } from '@ember/string'; +import { computed } from '@ember/object'; // eslint-disable-line +import { singularize } from 'ember-inflector'; +import { withModelValidations } from 'vault/decorators/model-validations'; + +import generatedItemAdapter from 'vault/adapters/generated-item-list'; +export function sanitizePath(path) { + // remove whitespace + remove trailing and leading slashes + return path.trim().replace(/^\/+|\/+$/g, ''); +} + +export default Service.extend({ + attrs: null, + dynamicApiPath: '', + ajax(url, options = {}) { + const appAdapter = getOwner(this).lookup(`adapter:application`); + const { data } = options; + return appAdapter.ajax(url, 'GET', { + data, + }); + }, + + getNewModel(modelType, backend, apiPath, itemType) { + const owner = getOwner(this); + const modelName = `model:${modelType}`; + + const modelFactory = owner.factoryFor(modelName); + let newModel, helpUrl; + // if we have a factory, we need to take the existing model into account + if (modelFactory) { + debug(`Model factory found for ${modelType}`); + newModel = modelFactory.class; + const modelProto = newModel.proto(); + if (newModel.merged || modelProto.useOpenAPI !== true) { + return resolve(); + } + + helpUrl = modelProto.getHelpUrl(backend); + return this.registerNewModelWithProps(helpUrl, backend, newModel, modelName); + } else { + debug(`Creating new Model for ${modelType}`); + newModel = Model.extend({}); + } + + // we don't have an apiPath for dynamic secrets + // and we don't need paths for them yet + if (!apiPath) { + helpUrl = newModel.proto().getHelpUrl(backend); + return this.registerNewModelWithProps(helpUrl, backend, newModel, modelName); + } + + // use paths to dynamically create our openapi help url + // if we have a brand new model + return this.getPaths(apiPath, backend, itemType) + .then((pathInfo) => { + const adapterFactory = owner.factoryFor(`adapter:${modelType}`); + // if we have an adapter already use that, otherwise create one + if (!adapterFactory) { + debug(`Creating new adapter for ${modelType}`); + const adapter = this.getNewAdapter(pathInfo, itemType); + owner.register(`adapter:${modelType}`, adapter); + } + let path; + // if we have an item we want the create info for that itemType + const paths = itemType ? this.filterPathsByItemType(pathInfo, itemType) : pathInfo.paths; + const createPath = paths.find((path) => path.operations.includes('post') && path.action !== 'Delete'); + path = createPath.path; + path = path.includes('{') ? path.slice(0, path.indexOf('{') - 1) + '/example' : path; + if (!path) { + // TODO: we don't know if path will ever be falsey + // if it is never falsey we can remove this. + return reject(); + } + + helpUrl = `/v1/${apiPath}${path.slice(1)}?help=true` || newModel.proto().getHelpUrl(backend); + pathInfo.paths = paths; + newModel = newModel.extend({ paths: pathInfo }); + return this.registerNewModelWithProps(helpUrl, backend, newModel, modelName); + }) + .catch((err) => { + // TODO: we should handle the error better here + console.error(err); // eslint-disable-line + }); + }, + + reducePathsByPathName(pathInfo, currentPath) { + const pathName = currentPath[0]; + const pathDetails = currentPath[1]; + const displayAttrs = pathDetails['x-vault-displayAttrs']; + + if (!displayAttrs) { + return pathInfo; + } + + let itemType, itemName; + if (displayAttrs.itemType) { + itemType = displayAttrs.itemType; + let items = itemType.split(':'); + itemName = items[items.length - 1]; + items = items.map((item) => dasherize(singularize(item.toLowerCase()))); + itemType = items.join('~*'); + } + + if (itemType && !pathInfo.itemTypes.includes(itemType)) { + pathInfo.itemTypes.push(itemType); + } + + const operations = []; + if (pathDetails.get) { + operations.push('get'); + } + if (pathDetails.post) { + operations.push('post'); + } + if (pathDetails.delete) { + operations.push('delete'); + } + if (pathDetails.get && pathDetails.get.parameters && pathDetails.get.parameters[0].name === 'list') { + operations.push('list'); + } + + pathInfo.paths.push({ + path: pathName, + itemType: itemType || displayAttrs.itemType, + itemName: itemName || pathInfo.itemType || displayAttrs.itemType, + operations, + action: displayAttrs.action, + navigation: displayAttrs.navigation === true, + param: pathName.includes('{') ? pathName.split('{')[1].split('}')[0] : false, + }); + + return pathInfo; + }, + + filterPathsByItemType(pathInfo, itemType) { + if (!itemType) { + return pathInfo.paths; + } + return pathInfo.paths.filter((path) => { + return itemType === path.itemType; + }); + }, + + getPaths(apiPath, backend, itemType, itemID) { + const debugString = + itemID && itemType + ? `Fetching relevant paths for ${backend} ${itemType} ${itemID} from ${apiPath}` + : `Fetching relevant paths for ${backend} ${itemType} from ${apiPath}`; + debug(debugString); + return this.ajax(`/v1/${apiPath}?help=1`, backend).then((help) => { + const pathInfo = help.openapi.paths; + const paths = Object.entries(pathInfo); + + return paths.reduce(this.reducePathsByPathName, { + apiPath, + itemType, + itemTypes: [], + paths: [], + itemID, + }); + }); + }, + + // Makes a call to grab the OpenAPI document. + // Returns relevant information from OpenAPI + // as determined by the expandOpenApiProps util + getProps(helpUrl, backend) { + // add name of thing you want + debug(`Fetching schema properties for ${backend} from ${helpUrl}`); + + return this.ajax(helpUrl, backend).then((help) => { + // paths is an array but it will have a single entry + // for the scope we're in + const path = Object.keys(help.openapi.paths)[0]; // do this or look at name + const pathInfo = help.openapi.paths[path]; + const params = pathInfo.parameters; + const paramProp = {}; + + // include url params + if (params) { + const { name, schema, description } = params[0]; + const label = capitalize(name.split('_').join(' ')); + + paramProp[name] = { + 'x-vault-displayAttrs': { + name: label, + group: 'default', + }, + type: schema.type, + description: description, + isId: true, + }; + } + + let props = {}; + const schema = pathInfo?.post?.requestBody?.content['application/json'].schema; + if (schema.$ref) { + // $ref will be shaped like `#/components/schemas/MyResponseType + // which maps to the location of the item within the openApi response + const loc = schema.$ref.replace('#/', '').split('/'); + props = loc.reduce((prev, curr) => { + return prev[curr] || {}; + }, help.openapi).properties; + } else if (schema.properties) { + props = schema.properties; + } + // put url params (e.g. {name}, {role}) + // at the front of the props list + const newProps = assign({}, paramProp, props); + return expandOpenApiProps(newProps); + }); + }, + + getNewAdapter(pathInfo, itemType) { + // we need list and create paths to set the correct urls for actions + const paths = this.filterPathsByItemType(pathInfo, itemType); + let { apiPath } = pathInfo; + const getPath = paths.find((path) => path.operations.includes('get')); + + // the action might be "Generate" or something like that so we'll grab the first post endpoint if there + // isn't one with "Create" + // TODO: look into a more sophisticated way to determine the create endpoint + const createPath = paths.find((path) => path.action === 'Create' || path.operations.includes('post')); + const deletePath = paths.find((path) => path.operations.includes('delete')); + + return generatedItemAdapter.extend({ + urlForItem(id, isList, dynamicApiPath) { + const itemType = getPath.path.slice(1); + let url; + id = encodePath(id); + // the apiPath changes when you switch between routes but the apiPath variable does not unless the model is reloaded + // overwrite apiPath if dynamicApiPath exist. + // dynamicApiPath comes from the model->adapter + if (dynamicApiPath) { + apiPath = dynamicApiPath; + } + // isList indicates whether we are viewing the list page + // of a top-level item such as userpass + if (isList) { + url = `${this.buildURL()}/${apiPath}${itemType}/`; + } else { + // build the URL for the show page of a nested item + // such as a userpass group + url = `${this.buildURL()}/${apiPath}${itemType}/${id}`; + } + + return url; + }, + + urlForQueryRecord(id, modelName) { + return this.urlForItem(id, modelName); + }, + + urlForUpdateRecord(id) { + const itemType = createPath.path.slice(1, createPath.path.indexOf('{') - 1); + return `${this.buildURL()}/${apiPath}${itemType}/${id}`; + }, + + urlForCreateRecord(modelType, snapshot) { + const id = snapshot.record.mutableId; // computed property that returns either id or private settable _id value + const path = createPath.path.slice(1, createPath.path.indexOf('{') - 1); + return `${this.buildURL()}/${apiPath}${path}/${id}`; + }, + + urlForDeleteRecord(id) { + const path = deletePath.path.slice(1, deletePath.path.indexOf('{') - 1); + return `${this.buildURL()}/${apiPath}${path}/${id}`; + }, + + createRecord(store, type, snapshot) { + return this._super(...arguments).then((response) => { + // if the server does not return an id and one has not been set on the model we need to set it manually from the mutableId value + if (!response?.id && !snapshot.record.id) { + snapshot.record.id = snapshot.record.mutableId; + snapshot.id = snapshot.record.id; + } + return response; + }); + }, + }); + }, + + registerNewModelWithProps(helpUrl, backend, newModel, modelName) { + return this.getProps(helpUrl, backend).then((props) => { + const { attrs, newFields } = combineAttributes(newModel.attributes, props); + const owner = getOwner(this); + newModel = newModel.extend(attrs, { newFields }); + // if our newModel doesn't have fieldGroups already + // we need to create them + try { + // Initialize prototype to access field groups + let fieldGroups = newModel.proto().fieldGroups; + if (!fieldGroups) { + debug(`Constructing fieldGroups for ${backend}`); + fieldGroups = this.getFieldGroups(newModel); + newModel = newModel.extend({ fieldGroups }); + // Build and add validations on model + // NOTE: For initial phase, initialize validations only for user pass auth + if (backend === 'userpass') { + const validations = fieldGroups.reduce((obj, element) => { + if (element.default) { + element.default.forEach((v) => { + const key = v.options.fieldValue || v.name; + obj[key] = [{ type: 'presence', message: `${v.name} can't be blank` }]; + }); + } + return obj; + }, {}); + @withModelValidations(validations) + class GeneratedItemModel extends newModel {} + newModel = GeneratedItemModel; + } + } + } catch (err) { + // eat the error, fieldGroups is computed in the model definition + } + // attempting to set the id prop on a model will trigger an error + // this computed will be used in place of the the id fieldValue -- see openapi-to-attrs + newModel.reopen({ + mutableId: computed('id', '_id', { + get() { + return this._id || this.id; + }, + set(key, value) { + return (this._id = value); + }, + }), + }); + newModel.reopenClass({ merged: true }); + owner.unregister(modelName); + owner.register(modelName, newModel); + }); + }, + getFieldGroups(newModel) { + const groups = { + default: [], + }; + const fieldGroups = []; + newModel.attributes.forEach((attr) => { + // if the attr comes in with a fieldGroup from OpenAPI, + // add it to that group + if (attr.options.fieldGroup) { + if (groups[attr.options.fieldGroup]) { + groups[attr.options.fieldGroup].push(attr.name); + } else { + groups[attr.options.fieldGroup] = [attr.name]; + } + } else { + // otherwise just add that attr to the default group + groups.default.push(attr.name); + } + }); + for (const group in groups) { + fieldGroups.push({ [group]: groups[group] }); + } + return fieldToAttrs(newModel, fieldGroups); + }, +}); diff --git a/ui/app/services/permissions.js b/ui/app/services/permissions.js new file mode 100644 index 0000000..324c17f --- /dev/null +++ b/ui/app/services/permissions.js @@ -0,0 +1,185 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Service, { inject as service } from '@ember/service'; +import { task } from 'ember-concurrency'; + +const API_PATHS = { + access: { + methods: 'sys/auth', + mfa: 'identity/mfa/method', + oidc: 'identity/oidc/client', + entities: 'identity/entity/id', + groups: 'identity/group/id', + leases: 'sys/leases/lookup', + namespaces: 'sys/namespaces', + 'control-groups': 'sys/control-group/', + }, + policies: { + acl: 'sys/policies/acl', + rgp: 'sys/policies/rgp', + egp: 'sys/policies/egp', + }, + tools: { + wrap: 'sys/wrapping/wrap', + lookup: 'sys/wrapping/lookup', + unwrap: 'sys/wrapping/unwrap', + rewrap: 'sys/wrapping/rewrap', + random: 'sys/tools/random', + hash: 'sys/tools/hash', + }, + status: { + replication: 'sys/replication', + license: 'sys/license', + seal: 'sys/seal', + raft: 'sys/storage/raft/configuration', + }, + clients: { + activity: 'sys/internal/counters/activity', + config: 'sys/internal/counters/config', + }, +}; + +const API_PATHS_TO_ROUTE_PARAMS = { + 'sys/auth': { route: 'vault.cluster.access.methods', models: [] }, + 'identity/entity/id': { route: 'vault.cluster.access.identity', models: ['entities'] }, + 'identity/group/id': { route: 'vault.cluster.access.identity', models: ['groups'] }, + 'sys/leases/lookup': { route: 'vault.cluster.access.leases', models: [] }, + 'sys/namespaces': { route: 'vault.cluster.access.namespaces', models: [] }, + 'sys/control-group/': { route: 'vault.cluster.access.control-groups', models: [] }, + 'identity/mfa/method': { route: 'vault.cluster.access.mfa', models: [] }, + 'identity/oidc/client': { route: 'vault.cluster.access.oidc', models: [] }, +}; + +/* + The Permissions service is used to gate top navigation and sidebar items. + It fetches a users' policy from the resultant-acl endpoint and stores their + allowed exact and glob paths as state. It also has methods for checking whether + a user has permission for a given path. +*/ + +export default Service.extend({ + exactPaths: null, + globPaths: null, + canViewAll: null, + store: service(), + auth: service(), + namespace: service(), + + getPaths: task(function* () { + if (this.paths) { + return; + } + + try { + const resp = yield this.store.adapterFor('permissions').query(); + this.setPaths(resp); + return; + } catch (err) { + // If no policy can be found, default to showing all nav items. + this.set('canViewAll', true); + } + }), + + setPaths(resp) { + this.set('exactPaths', resp.data.exact_paths); + this.set('globPaths', resp.data.glob_paths); + this.set('canViewAll', resp.data.root); + }, + + reset() { + this.set('exactPaths', null); + this.set('globPaths', null); + this.set('canViewAll', null); + }, + + hasNavPermission(navItem, routeParams, requireAll) { + if (routeParams) { + // check that the user has permission to access all (requireAll = true) or any of the routes when array is passed + // useful for hiding nav headings when user does not have access to any of the links + const params = Array.isArray(routeParams) ? routeParams : [routeParams]; + const evalMethod = !Array.isArray(routeParams) || requireAll ? 'every' : 'some'; + return params[evalMethod]((param) => { + // viewing the entity and groups pages require the list capability, while the others require the default, which is anything other than deny + const capability = param === 'entities' || param === 'groups' ? ['list'] : [null]; + return this.hasPermission(API_PATHS[navItem][param], capability); + }); + } + return Object.values(API_PATHS[navItem]).some((path) => this.hasPermission(path)); + }, + + navPathParams(navItem) { + const path = Object.values(API_PATHS[navItem]).find((path) => this.hasPermission(path)); + if (['policies', 'tools'].includes(navItem)) { + return { models: [path.split('/').lastObject] }; + } + + return API_PATHS_TO_ROUTE_PARAMS[path]; + }, + + pathNameWithNamespace(pathName) { + const namespace = this.namespace.path; + if (namespace) { + return `${namespace}/${pathName}`; + } else { + return pathName; + } + }, + + hasPermission(pathName, capabilities = [null]) { + const path = this.pathNameWithNamespace(pathName); + + if (this.canViewAll) { + return true; + } + + return capabilities.every( + (capability) => + this.hasMatchingExactPath(path, capability) || this.hasMatchingGlobPath(path, capability) + ); + }, + + hasMatchingExactPath(pathName, capability) { + const exactPaths = this.exactPaths; + if (exactPaths) { + const prefix = Object.keys(exactPaths).find((path) => path.startsWith(pathName)); + const hasMatchingPath = prefix && !this.isDenied(exactPaths[prefix]); + + if (prefix && capability) { + return this.hasCapability(exactPaths[prefix], capability) && hasMatchingPath; + } + + return hasMatchingPath; + } + return false; + }, + + hasMatchingGlobPath(pathName, capability) { + const globPaths = this.globPaths; + if (globPaths) { + const matchingPath = Object.keys(globPaths).find((k) => { + return pathName.includes(k) || pathName.includes(k.replace(/\/$/, '')); + }); + const hasMatchingPath = + (matchingPath && !this.isDenied(globPaths[matchingPath])) || + Object.prototype.hasOwnProperty.call(globPaths, ''); + + if (matchingPath && capability) { + return this.hasCapability(globPaths[matchingPath], capability) && hasMatchingPath; + } + + return hasMatchingPath; + } + return false; + }, + + hasCapability(path, capability) { + return path.capabilities.includes(capability); + }, + + isDenied(path) { + return path.capabilities.includes('deny'); + }, +}); diff --git a/ui/app/services/replication-mode.js b/ui/app/services/replication-mode.js new file mode 100644 index 0000000..25316f8 --- /dev/null +++ b/ui/app/services/replication-mode.js @@ -0,0 +1,18 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Service from '@ember/service'; + +export default Service.extend({ + mode: null, + + getMode() { + return this.mode; + }, + + setMode(mode) { + this.set('mode', mode); + }, +}); diff --git a/ui/app/services/secret-mount-path.js b/ui/app/services/secret-mount-path.js new file mode 100644 index 0000000..a292390 --- /dev/null +++ b/ui/app/services/secret-mount-path.js @@ -0,0 +1,19 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Service from '@ember/service'; + +// this service tracks the path of the currently viewed secret mount +// so that we can access that inside of engines where parent route params +// are not accessible +export default class SecretMountPath extends Service { + currentPath = ''; + update(path) { + this.currentPath = path; + } + get() { + return this.currentPath; + } +} diff --git a/ui/app/services/store.js b/ui/app/services/store.js new file mode 100644 index 0000000..30cabb1 --- /dev/null +++ b/ui/app/services/store.js @@ -0,0 +1,211 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Store from '@ember-data/store'; +import { schedule } from '@ember/runloop'; +import { copy } from 'ember-copy'; +import { resolve, Promise } from 'rsvp'; +import { dasherize } from '@ember/string'; +import { assert } from '@ember/debug'; +import { set, get, computed } from '@ember/object'; +import clamp from 'vault/utils/clamp'; +import config from 'vault/config/environment'; +import sortObjects from 'vault/utils/sort-objects'; + +const { DEFAULT_PAGE_SIZE } = config.APP; + +export function normalizeModelName(modelName) { + return dasherize(modelName); +} + +export function keyForCache(query) { + /*eslint no-unused-vars: ["error", { "ignoreRestSiblings": true }]*/ + // we want to ignore size, page, responsePath, and pageFilter in the cacheKey + const { size, page, responsePath, pageFilter, ...queryForCache } = query; + const cacheKeyObject = Object.keys(queryForCache) + .sort() + .reduce((result, key) => { + result[key] = queryForCache[key]; + return result; + }, {}); + return JSON.stringify(cacheKeyObject); +} + +export default Store.extend({ + // this is a map of map that stores the caches + // eslint-disable-next-line + lazyCaches: computed({ + get() { + return this._lazyCaches || new Map(); + }, + set(key, value) { + return (this._lazyCaches = value); + }, + }), + + setLazyCacheForModel(modelName, key, value) { + const cacheKey = keyForCache(key); + const cache = this.lazyCacheForModel(modelName) || new Map(); + cache.set(cacheKey, value); + const lazyCaches = this.lazyCaches; + const modelKey = normalizeModelName(modelName); + lazyCaches.set(modelKey, cache); + }, + + getLazyCacheForModel(modelName, key) { + const cacheKey = keyForCache(key); + const modelCache = this.lazyCacheForModel(modelName); + if (modelCache) { + return modelCache.get(cacheKey); + } + }, + + lazyCacheForModel(modelName) { + return this.lazyCaches.get(normalizeModelName(modelName)); + }, + + // This is the public interface for the store extension - to be used just + // like `Store.query`. Special handling of the response is controlled by + // `query.pageFilter`, `query.page`, and `query.size`. + + // Required attributes of the `query` argument are: + // responsePath: a string indicating the location on the response where + // the array of items will be found + // page: the page number to return + // size: the size of the page + // pageFilter: a string that will be used to do a fuzzy match against the + // results, this is done pre-pagination + lazyPaginatedQuery(modelType, query /*, options*/) { + const skipCache = query.skipCache; + // We don't want skipCache to be part of the actual query key, so remove it + delete query.skipCache; + const adapter = this.adapterFor(modelType); + const modelName = normalizeModelName(modelType); + const dataCache = skipCache ? this.clearDataset(modelName) : this.getDataset(modelName, query); + const responsePath = query.responsePath; + assert('responsePath is required', responsePath); + assert('page is required', typeof query.page === 'number'); + if (!query.size) { + query.size = DEFAULT_PAGE_SIZE; + } + + if (dataCache) { + return resolve(this.fetchPage(modelName, query)); + } + return adapter + .query(this, { modelName }, query) + .then((response) => { + const serializer = this.serializerFor(modelName); + const datasetHelper = serializer.extractLazyPaginatedData; + const dataset = datasetHelper + ? datasetHelper.call(serializer, response) + : get(response, responsePath); + set(response, responsePath, null); + this.storeDataset(modelName, query, response, dataset); + return this.fetchPage(modelName, query); + }) + .catch(function (e) { + throw e; + }); + }, + + filterData(filter, dataset) { + let newData = dataset || []; + if (filter) { + newData = dataset.filter(function (item) { + const id = item.id || item.name || item; + return id.toLowerCase().includes(filter.toLowerCase()); + }); + } + return newData; + }, + + // reconstructs the original form of the response from the server + // with an additional `meta` block + // + // the meta block includes: + // currentPage, lastPage, nextPage, prevPage, total, filteredTotal + constructResponse(modelName, query) { + const { pageFilter, responsePath, size, page } = query; + let { response, dataset } = this.getDataset(modelName, query); + response = copy(response, true); + const data = this.filterData(pageFilter, dataset); + + const lastPage = Math.ceil(data.length / size); + const currentPage = clamp(page, 1, lastPage); + const end = currentPage * size; + const start = end - size; + const slicedDataSet = data.slice(start, end); + + set(response, responsePath || '', slicedDataSet); + + response.meta = { + currentPage, + lastPage, + nextPage: clamp(currentPage + 1, 1, lastPage), + prevPage: clamp(currentPage - 1, 1, lastPage), + total: dataset.length || 0, + filteredTotal: data.length || 0, + pageSize: size, + }; + + return response; + }, + + // pushes records into the store and returns the result + fetchPage(modelName, query) { + const response = this.constructResponse(modelName, query); + this.peekAll(modelName).forEach((record) => { + record.unloadRecord(); + }); + return new Promise((resolve) => { + schedule('destroy', () => { + this.push( + this.serializerFor(modelName).normalizeResponse( + this, + this.modelFor(modelName), + response, + null, + 'query' + ) + ); + const model = this.peekAll(modelName).toArray(); + model.set('meta', response.meta); + resolve(model); + }); + }); + }, + + // get cached data + getDataset(modelName, query) { + return this.getLazyCacheForModel(modelName, query); + }, + + // store data cache as { response, dataset} + // also populated `lazyCaches` attribute + storeDataset(modelName, query, response, array) { + const dataset = query.sortBy ? sortObjects(array, query.sortBy) : array; + const value = { + response, + dataset, + }; + this.setLazyCacheForModel(modelName, query, value); + }, + + clearDataset(modelName) { + const cacheList = this.lazyCaches; + if (!cacheList.size) return; + if (modelName && cacheList.has(modelName)) { + cacheList.delete(modelName); + return; + } + cacheList.clear(); + this.set('lazyCaches', cacheList); + }, + + clearAllDatasets() { + this.clearDataset(); + }, +}); diff --git a/ui/app/services/version.js b/ui/app/services/version.js new file mode 100644 index 0000000..cd3be84 --- /dev/null +++ b/ui/app/services/version.js @@ -0,0 +1,73 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Service, { inject as service } from '@ember/service'; +import { keepLatestTask, task } from 'ember-concurrency'; +import { tracked } from '@glimmer/tracking'; + +export default class VersionService extends Service { + @service store; + @tracked features = []; + @tracked version = null; + + get hasPerfReplication() { + return this.features.includes('Performance Replication'); + } + + get hasDRReplication() { + return this.features.includes('DR Replication'); + } + + get hasSentinel() { + return this.features.includes('Sentinel'); + } + + get hasNamespaces() { + return this.features.includes('Namespaces'); + } + + get hasControlGroups() { + return this.features.includes('Control Groups'); + } + + get isEnterprise() { + if (!this.version) return false; + return this.version.includes('+'); + } + + get isOSS() { + return !this.isEnterprise; + } + + @task + *getVersion() { + if (this.version) return; + const response = yield this.store.adapterFor('cluster').health(); + this.version = response.version; + return; + } + + @keepLatestTask + *getFeatures() { + if (this.features?.length || this.isOSS) { + return; + } + try { + const response = yield this.store.adapterFor('cluster').features(); + this.features = response.features; + return; + } catch (err) { + // if we fail here, we're likely in DR Secondary mode and don't need to worry about it + } + } + + fetchVersion() { + return this.getVersion.perform(); + } + + fetchFeatures() { + return this.getFeatures.perform(); + } +} diff --git a/ui/app/services/wizard.js b/ui/app/services/wizard.js new file mode 100644 index 0000000..c245f03 --- /dev/null +++ b/ui/app/services/wizard.js @@ -0,0 +1,361 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { next } from '@ember/runloop'; +import { typeOf } from '@ember/utils'; +import Service, { inject as service } from '@ember/service'; +import { Machine } from 'xstate'; +import { capitalize } from '@ember/string'; + +import getStorage from 'vault/lib/token-storage'; +import { STORAGE_KEYS, DEFAULTS, MACHINES } from 'vault/helpers/wizard-constants'; +const { + TUTORIAL_STATE, + COMPONENT_STATE, + FEATURE_STATE, + FEATURE_LIST, + FEATURE_STATE_HISTORY, + COMPLETED_FEATURES, + RESUME_URL, + RESUME_ROUTE, +} = STORAGE_KEYS; +const TutorialMachine = Machine(MACHINES.tutorial); +let FeatureMachine = null; + +export default Service.extend(DEFAULTS, { + router: service(), + showWhenUnauthenticated: false, + featureMachineHistory: null, + init() { + this._super(...arguments); + this.initializeMachines(); + }, + + initializeMachines() { + if (!this.storageHasKey(TUTORIAL_STATE)) { + const state = TutorialMachine.initialState; + this.saveState('currentState', state.value); + this.saveExtState(TUTORIAL_STATE, state.value); + } + this.saveState('currentState', this.getExtState(TUTORIAL_STATE)); + if (this.storageHasKey(COMPONENT_STATE)) { + this.set('componentState', this.getExtState(COMPONENT_STATE)); + } + const stateNodes = TutorialMachine.getStateNodes(this.currentState); + this.executeActions( + stateNodes.reduce((acc, node) => acc.concat(node.onEntry), []), + null, + 'tutorial' + ); + + if (this.storageHasKey(FEATURE_LIST)) { + this.set('featureList', this.getExtState(FEATURE_LIST)); + if (this.storageHasKey(FEATURE_STATE_HISTORY)) { + this.set('featureMachineHistory', this.getExtState(FEATURE_STATE_HISTORY)); + } + this.saveState( + 'featureState', + this.getExtState(FEATURE_STATE) || (FeatureMachine ? FeatureMachine.initialState : null) + ); + this.saveExtState(FEATURE_STATE, this.featureState); + this.buildFeatureMachine(); + } + }, + + clearFeatureData() { + const storage = this.storage(); + // empty storage + [FEATURE_LIST, FEATURE_STATE, FEATURE_STATE_HISTORY, COMPLETED_FEATURES].forEach((key) => + storage.removeItem(key) + ); + + this.set('currentMachine', null); + this.set('featureMachineHistory', null); + this.set('featureState', null); + this.set('featureList', null); + }, + + restartGuide() { + this.clearFeatureData(); + const storage = this.storage(); + // empty storage + [TUTORIAL_STATE, COMPONENT_STATE, RESUME_URL, RESUME_ROUTE].forEach((key) => storage.removeItem(key)); + // reset wizard state + this.setProperties(DEFAULTS); + // restart machines from blank state + this.initializeMachines(); + // progress machine to 'active.select' + this.transitionTutorialMachine('idle', 'AUTH'); + }, + + saveFeatureHistory(state) { + if ( + this.getCompletedFeatures().length === 0 && + this.featureMachineHistory === null && + (state === 'idle' || state === 'wrap') + ) { + const newHistory = [state]; + this.set('featureMachineHistory', newHistory); + } else { + if (this.featureMachineHistory) { + if (!this.featureMachineHistory.includes(state)) { + const newHistory = this.featureMachineHistory.addObject(state); + this.set('featureMachineHistory', newHistory); + } else { + //we're repeating steps + const stepIndex = this.featureMachineHistory.indexOf(state); + const newHistory = this.featureMachineHistory.splice(0, stepIndex + 1); + this.set('featureMachineHistory', newHistory); + } + } + } + if (this.featureMachineHistory) { + this.saveExtState(FEATURE_STATE_HISTORY, this.featureMachineHistory); + } + }, + + saveState(stateType, state) { + if (state.value) { + state = state.value; + } + let stateKey = ''; + while (typeOf(state) === 'object') { + const newState = Object.keys(state); + stateKey += newState + '.'; + state = state[newState]; + } + stateKey += state; + this.set(stateType, stateKey); + if (stateType === 'featureState') { + //only track progress if we are on the first step of the first feature + this.saveFeatureHistory(state); + } + }, + + transitionTutorialMachine(currentState, event, extendedState) { + if (extendedState) { + this.set('componentState', extendedState); + this.saveExtState(COMPONENT_STATE, extendedState); + } + const { actions, value } = TutorialMachine.transition(currentState, event); + this.saveState('currentState', value); + this.saveExtState(TUTORIAL_STATE, this.currentState); + this.executeActions(actions, event, 'tutorial'); + }, + + transitionFeatureMachine(currentState, event, extendedState) { + if (!FeatureMachine || !this.currentState.includes('active')) { + return; + } + if (extendedState) { + this.set('componentState', extendedState); + this.saveExtState(COMPONENT_STATE, extendedState); + } + + const { actions, value } = FeatureMachine.transition(currentState, event, this.componentState); + this.saveState('featureState', value); + this.saveExtState(FEATURE_STATE, value); + this.executeActions(actions, event, 'feature'); + // if all features were completed, the FeatureMachine gets nulled + // out and won't exist here as there is no next step + if (FeatureMachine) { + let next; + if (this.currentMachine === 'secrets' && value === 'display') { + next = FeatureMachine.transition(value, 'REPEAT', this.componentState); + } else { + next = FeatureMachine.transition(value, 'CONTINUE', this.componentState); + } + this.saveState('nextStep', next.value); + } + }, + + saveExtState(key, value) { + this.storage().setItem(key, value); + }, + + getExtState(key) { + return this.storage().getItem(key); + }, + + storageHasKey(key) { + return Boolean(this.getExtState(key)); + }, + + executeActions(actions, event, machineType) { + let transitionURL; + let expectedRouteName; + const router = this.router; + + for (const action of actions) { + let type = action; + if (action.type) { + type = action.type; + } + switch (type) { + case 'render': + this.set(`${action.level}Component`, action.component); + break; + case 'routeTransition': + expectedRouteName = action.params[0]; + transitionURL = router.urlFor(...action.params).replace(/^\/ui/, ''); + next(() => { + router.transitionTo(...action.params); + }); + break; + case 'saveFeatures': + this.saveFeatures(event.features); + break; + case 'completeFeature': + this.completeFeature(); + break; + case 'handleDismissed': + this.handleDismissed(); + break; + case 'handlePaused': + this.handlePaused(); + return; + case 'handleResume': + this.handleResume(); + break; + case 'showTutorialWhenAuthenticated': + this.set('showWhenUnauthenticated', false); + break; + case 'showTutorialAlways': + this.set('showWhenUnauthenticated', true); + break; + case 'clearFeatureData': + this.clearFeatureData(); + break; + case 'continueFeature': + this.transitionFeatureMachine(this.featureState, 'CONTINUE', this.componentState); + break; + default: + break; + } + } + if (machineType === 'tutorial') { + return; + } + // if we're transitioning in the actions, we want that url, + // else we want the URL we land on in didTransition in the + // application route - we'll notify the application route to + // update the route + if (transitionURL) { + this.set('expectedURL', transitionURL); + this.set('expectedRouteName', expectedRouteName); + this.set('setURLAfterTransition', false); + } else { + this.set('setURLAfterTransition', true); + } + }, + + handlePaused() { + const expected = this.expectedURL; + if (expected) { + this.saveExtState(RESUME_URL, this.expectedURL); + this.saveExtState(RESUME_ROUTE, this.expectedRouteName); + } + }, + + handleResume() { + const resumeURL = this.storage().getItem(RESUME_URL); + if (!resumeURL) { + return; + } + this.router + .transitionTo(resumeURL) + .followRedirects() + .then(() => { + this.set('expectedRouteName', this.storage().getItem(RESUME_ROUTE)); + this.set('expectedURL', resumeURL); + this.initializeMachines(); + this.storage().removeItem(RESUME_URL); + }); + }, + + handleDismissed() { + this.storage().removeItem(FEATURE_STATE); + this.storage().removeItem(FEATURE_LIST); + this.storage().removeItem(FEATURE_STATE_HISTORY); + this.storage().removeItem(COMPONENT_STATE); + }, + + saveFeatures(features) { + this.set('featureList', features); + this.saveExtState(FEATURE_LIST, this.featureList); + this.buildFeatureMachine(); + }, + + buildFeatureMachine() { + if (this.featureList === null) { + return; + } + this.startFeature(); + const nextFeature = this.featureList.length > 1 ? capitalize(this.featureList.objectAt(1)) : 'Finish'; + this.set('nextFeature', nextFeature); + let next; + if (this.currentMachine === 'secrets' && this.featureState === 'display') { + next = FeatureMachine.transition(this.featureState, 'REPEAT', this.componentState); + } else { + next = FeatureMachine.transition(this.featureState, 'CONTINUE', this.componentState); + } + this.saveState('nextStep', next.value); + const stateNodes = FeatureMachine.getStateNodes(this.featureState); + this.executeActions( + stateNodes.reduce((acc, node) => acc.concat(node.onEntry), []), + null, + 'feature' + ); + }, + + startFeature() { + const FeatureMachineConfig = MACHINES[this.featureList.objectAt(0)]; + FeatureMachine = Machine(FeatureMachineConfig); + this.set('currentMachine', this.featureList.objectAt(0)); + if (this.storageHasKey(FEATURE_STATE)) { + this.saveState('featureState', this.getExtState(FEATURE_STATE)); + } else { + this.saveState('featureState', FeatureMachine.initialState); + } + this.saveExtState(FEATURE_STATE, this.featureState); + }, + + getCompletedFeatures() { + if (this.storageHasKey(COMPLETED_FEATURES)) { + return this.getExtState(COMPLETED_FEATURES).toArray(); + } + return []; + }, + + completeFeature() { + const features = this.featureList; + const done = features.shift(); + if (!this.getExtState(COMPLETED_FEATURES)) { + const completed = []; + completed.push(done); + this.saveExtState(COMPLETED_FEATURES, completed); + } else { + this.saveExtState(COMPLETED_FEATURES, this.getExtState(COMPLETED_FEATURES).toArray().addObject(done)); + } + + this.saveExtState(FEATURE_LIST, features.length ? features : null); + this.storage().removeItem(FEATURE_STATE); + if (this.featureMachineHistory) { + this.set('featureMachineHistory', []); + this.saveExtState(FEATURE_STATE_HISTORY, []); + } + if (features.length > 0) { + this.buildFeatureMachine(); + } else { + this.storage().removeItem(FEATURE_LIST); + FeatureMachine = null; + this.transitionTutorialMachine(this.currentState, 'DONE'); + } + }, + + storage() { + return getStorage(); + }, +}); diff --git a/ui/app/styles/app.scss b/ui/app/styles/app.scss new file mode 100644 index 0000000..d1d6618 --- /dev/null +++ b/ui/app/styles/app.scss @@ -0,0 +1,22 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +@import './reset'; +@import 'ember-basic-dropdown'; +@import 'ember-power-select'; +@import '@hashicorp/design-system-components'; +@import './core'; + +@mixin font-face($name) { + @font-face { + font-family: $name; + src: url('/ui/fonts/#{$name}.woff2') format('woff2'), url('/ui/fonts/#{$name}.woff') format('woff'); + } +} + +@include font-face('obscure'); +// Font comes from npm package: https://www.npmjs.com/package/text-security +// We took the font we wanted and moved it into the ui/fonts folder +@include font-face('text-security-square'); diff --git a/ui/app/styles/components/action-block.scss b/ui/app/styles/components/action-block.scss new file mode 100644 index 0000000..cc049d2 --- /dev/null +++ b/ui/app/styles/components/action-block.scss @@ -0,0 +1,73 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +@mixin stacked-grid { + grid-template-columns: 1fr; + grid-row: 1/1; +} +@mixin stacked-content { + margin-bottom: $spacing-l; +} + +.action-block { + @extend .selectable-card; + grid-template-columns: 2fr 1fr; + display: grid; + padding: $spacing-m $spacing-l; + line-height: inherit; + grid-gap: $spacing-m; + + @include until($mobile) { + @include stacked-grid(); + } +} + +.action-block-info { + @include until($mobile) { + @include stacked-content(); + } +} + +.action-block.stacked { + @include stacked-grid(); +} +.stacked > .action-block-info { + @include stacked-content(); +} + +.action-block-title { + font-size: $size-5; + font-weight: $font-weight-bold; +} +.action-block-action { + text-align: right; + @include until($mobile) { + text-align: left; + } +} + +/* Action Block Grid */ +.replication-actions-grid-layout { + display: flex; + flex-wrap: wrap; + margin: $spacing-m 0; + @include until($mobile) { + display: block; + } +} + +.replication-actions-grid-item { + flex-basis: 50%; + padding: $spacing-s; + display: flex; + width: 100%; +} + +.replication-actions-grid-item .action-block { + width: 100%; + @include until($mobile) { + height: inherit; + } +} diff --git a/ui/app/styles/components/auth-buttons.scss b/ui/app/styles/components/auth-buttons.scss new file mode 100644 index 0000000..c0e1b98 --- /dev/null +++ b/ui/app/styles/components/auth-buttons.scss @@ -0,0 +1,27 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.auth-button-tile { + height: 31px; + width: 31px; + background: $white; + border-radius: 1px; + box-shadow: 0 0 0 1px rgba(255, 255, 255, 0.4); +} +.auth-button-type-google { + position: relative; + top: -10px; + left: -1.05rem; +} + +.auth-button-type-auth0, +.auth-button-type-gitlab { + position: relative; + top: -6px; + left: -0.75rem; +} +[class*='auth-button-type'] .text { + padding-left: $spacing-m; +} diff --git a/ui/app/styles/components/auth-form.scss b/ui/app/styles/components/auth-form.scss new file mode 100644 index 0000000..155e85b --- /dev/null +++ b/ui/app/styles/components/auth-form.scss @@ -0,0 +1,42 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.auth-form { + @extend .box; + @extend .is-bottomless; + padding: 0; + position: relative; + overflow: hidden; +} + +.auth-form .vault-loader { + position: absolute; + top: -1px; + left: -1px; + right: -1px; + bottom: -1px; + background: rgba(255, 255, 255, 0.8); + z-index: 10; + display: flex; + justify-content: center; + align-items: center; +} + +.toolbar-namespace-picker { + padding: 0 $spacing-s; + + .field { + width: 100%; + } + + .field-label { + margin-right: $spacing-s; + align-self: center; + } + + .is-label { + color: $grey; + } +} diff --git a/ui/app/styles/components/autocomplete-input.scss b/ui/app/styles/components/autocomplete-input.scss new file mode 100644 index 0000000..e0b270d --- /dev/null +++ b/ui/app/styles/components/autocomplete-input.scss @@ -0,0 +1,23 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.autocomplete-input { + background: $white !important; + border: 1px solid $grey-light; + box-sizing: border-box; + border-radius: 3px; + width: 99%; + padding: 4px 0; + margin-left: 0.5%; + margin-top: -4px; +} + +.autocomplete-input-option { + padding: 12px; + &:hover { + background-color: $grey-lightest; + cursor: pointer; + } +} diff --git a/ui/app/styles/components/b64-toggle.scss b/ui/app/styles/components/b64-toggle.scss new file mode 100644 index 0000000..ea9164b --- /dev/null +++ b/ui/app/styles/components/b64-toggle.scss @@ -0,0 +1,18 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.b64-toggle { + padding: 0.75rem; + font-size: $size-9; +} +.b64-toggle.is-input { + box-shadow: none; +} +.b64-toggle.is-textarea { + @extend .is-compact; + position: absolute; + bottom: 0.25rem; + right: 0.25rem; +} diff --git a/ui/app/styles/components/box-label.scss b/ui/app/styles/components/box-label.scss new file mode 100644 index 0000000..a489139 --- /dev/null +++ b/ui/app/styles/components/box-label.scss @@ -0,0 +1,68 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +label.box-label { + cursor: pointer; +} + +.box-label { + @extend .box; + @extend .is-centered; + + border-color: $grey-light; + border-radius: 3px; + box-shadow: $box-link-shadow; + text-decoration: none; + transition: box-shadow $speed; + width: 100%; + + > div:first-child { + flex-grow: 1; + } + + &.is-column { + @extend .is-flex-column; + } + + &.is-selected { + box-shadow: $box-link-hover-shadow, $box-shadow-middle; + + .icon { + color: $grey; + } + } + + .icon { + color: $grey-light; + } + + input[type='radio'] { + display: none; + } + + input[type='radio'] + label { + border: 1px solid $grey-light; + border-radius: 50%; + cursor: pointer; + display: block; + margin: 1rem auto 0; + height: 1rem; + width: 1rem; + } + + input[type='radio']:checked + label { + background: $blue; + border: 1px solid $blue; + box-shadow: inset 0 0 0 0.15rem $white; + } +} + +.box-label-header { + color: $grey; + + .is-selected & { + color: $grey-darkest; + } +} diff --git a/ui/app/styles/components/box-radio.scss b/ui/app/styles/components/box-radio.scss new file mode 100644 index 0000000..19d6e48 --- /dev/null +++ b/ui/app/styles/components/box-radio.scss @@ -0,0 +1,67 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.box-radio-container { + display: flex; + flex-wrap: wrap; +} +.title.box-radio-header { + font-size: $size-6; + color: $grey; + margin: $size-7 0 0 0; +} +.box-radio-spacing { + margin: $size-6 $size-3 $size-6 0; +} +.box-radio { + box-sizing: border-box; + flex-basis: 7rem; + width: 7rem; + min-height: 7.5rem; + padding: $size-10 $size-6 $size-10; + flex-direction: column; + justify-content: space-between; + align-items: center; + display: flex; + border-radius: $radius; + box-shadow: $box-shadow; + text-align: center; + color: $grey; + font-weight: $font-weight-semibold; + line-height: 1; + margin: $size-6 $size-3 $size-6 0; + font-size: 12px; + transition: box-shadow ease-in-out $speed; + will-change: box-shadow; + + &.is-selected { + box-shadow: 0 0 0 1px $grey-light, $box-shadow-middle; + } + + &.is-disabled { + opacity: 0.5; + } + + input[type='radio'].radio + label { + border: 1px solid $grey-light; + border-radius: 50%; + cursor: pointer; + display: block; + margin: 1rem auto 0; + height: 1rem; + width: 1rem; + flex-shrink: 0; + flex-grow: 0; + } + + input[type='radio'].radio:checked + label { + background: $blue; + border: 1px solid $blue; + box-shadow: inset 0 0 0 0.15rem $white; + } + input[type='radio'].radio:focus + label { + box-shadow: 0 0 10px 1px rgba($blue, 0.4), inset 0 0 0 0.15rem $white; + } +} diff --git a/ui/app/styles/components/calendar-widget.scss b/ui/app/styles/components/calendar-widget.scss new file mode 100644 index 0000000..03c6a65 --- /dev/null +++ b/ui/app/styles/components/calendar-widget.scss @@ -0,0 +1,109 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +$dark-gray: #535f73; + +.calendar-content { + display: flex; + width: 234px; + + &.calendar-open { + width: 500px; + } + + > .menu { + margin-bottom: 0px; + } +} +.calendar-title { + padding: $size-10 $size-8; +} + +.select-year { + grid-area: select-year; + margin-left: 24px; + margin-top: 10px; + font-weight: $font-weight-bold; + display: flex; + justify-content: space-between; + align-items: first baseline; + + // spacing of < year > icons when the tooltip is added or removed from DOM. + > .padding-right { + // if no tooltip + padding-right: 26px; + } + > .negative-margin { + // if tooltip + margin-right: -50px; + } +} + +.calendar-widget { + grid-area: calendar-widget; + + > button { + &.is-month-list { + background-color: $white; + color: black; + text-align: center; + border: $light-border; + border-radius: $radius; + } + &.is-current-month { + border: 1px solid $ui-gray-900; + } + &:hover { + background-color: lighten($dark-gray, 30%); + color: $white; + text-align: center; + cursor: pointer; + } + &.is-readOnly { + background-color: $ui-gray-100; + color: lighten($dark-gray, 30%); + pointer-events: none; + } + } +} + +.border-col { + grid-area: border-col; + background-color: $ui-gray-200; +} + +.calendar-widget-container { + display: grid; + grid-template-areas: + 'select-year' + 'calendar-widget'; + grid-template-columns: 1fr; + grid-template-rows: 0.7fr 3fr; + box-shadow: $box-shadow, $box-shadow-middle; + background-color: white; + border-radius: $radius; +} + +.calendar-widget-grid { + display: grid; + grid-template-columns: 110px 110px; + grid-template-rows: repeat(2, 1fr); + grid-gap: 0.7rem; + padding: 7px 28px 28px 28px; +} + +// for modal-dialog tooltips +.calendar-tooltip { + background-color: $ui-gray-700; + color: $white; + font-size: $size-8; + padding: $size-9; + border-radius: $radius-large; + width: 141px; +} + +.ember-modal-dialog { + z-index: 1000; +} diff --git a/ui/app/styles/components/codemirror.scss b/ui/app/styles/components/codemirror.scss new file mode 100644 index 0000000..ad06b27 --- /dev/null +++ b/ui/app/styles/components/codemirror.scss @@ -0,0 +1,190 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +$light-grey: #dde3e7; +$light-gray: #a4a4a4; +$light-grey-blue: #6c7b81; +$dark-grey: #788290; +$faded-gray: #eaeaea; +// Product colors +$atlas: #127eff; +$vagrant: #2f88f7; +$consul: #69499a; +$terraform: #822ff7; +$serf: #dd4e58; +$packer: #1ddba3; + +// Our colors +$gray: lighten($black, 89%); +$red: #ff3d3d; +$green: #39b54a; +$dark-gray: #535f73; + +$gutter-grey: #2a2f36; + +.CodeMirror-lint-tooltip { + background-color: #f9f9fa; + border: 1px solid $light-gray; + border-radius: 0; + color: lighten($black, 13%); + font-family: $family-monospace; + font-size: 13px; + padding: 7px 8px 9px; +} + +.cm-s-hashi-read-only { + &.CodeMirror { + background-color: $grey-lightest; + border: none; + color: $ui-gray-600; + font-family: $family-monospace; + -webkit-font-smoothing: auto; + line-height: 1.4; + } + + span.cm-string, + span.cm-string-2 { + color: $packer; + } + span.cm-property { + color: lighten($consul, 20%); + } +} + +.cm-s-hashi { + &.CodeMirror { + background-color: $black !important; + resize: vertical; + color: #cfd2d1 !important; + border: none; + font-family: $family-monospace; + -webkit-font-smoothing: auto; + line-height: 1.4; + } + + .CodeMirror-gutters { + color: $dark-grey; + background-color: $gutter-grey; + border: none; + } + + .CodeMirror-cursor { + border-left: solid thin #f8f8f0; + } + + .CodeMirror-linenumber { + color: #6d8a88; + } + + div.CodeMirror-selected { + background: rgb(33, 66, 131); + } + + &.CodeMirror-focused div.CodeMirror-selected { + background: rgb(33, 66, 131); + } + + .CodeMirror-line::selection, + .CodeMirror-line > span::selection, + .CodeMirror-line > span > span::selection { + background: rgb(33, 66, 131); + } + + .CodeMirror-line::-moz-selection, + .CodeMirror-line > span::-moz-selection, + .CodeMirror-line > span > span::-moz-selection { + background: rgb(33, 66, 131); + } + + span.cm-comment { + color: $light-grey; + } + + span.cm-string, + span.cm-string-2 { + color: $packer; + } + + span.cm-number { + color: $serf; + } + + span.cm-variable { + color: lighten($consul, 20%); + } + + span.cm-variable-2 { + color: lighten($consul, 20%); + } + + span.cm-def { + color: $packer; + } + + span.cm-operator { + color: $gray; + } + span.cm-keyword { + color: $yellow; + } + + span.cm-atom { + color: $serf; + } + + span.cm-meta { + color: $packer; + } + + span.cm-tag { + color: $packer; + } + + span.cm-attribute { + color: #9fca56; + } + + span.cm-qualifier { + color: #9fca56; + } + + span.cm-property { + color: lighten($consul, 20%); + } + + span.cm-variable-3 { + color: #9fca56; + } + + span.cm-builtin { + color: #9fca56; + } + + .CodeMirror-activeline-background { + background: #101213; + } + + .CodeMirror-matchingbracket { + text-decoration: underline; + color: white !important; + } +} + +.readonly-codemirror { + .CodeMirror-code { + cursor: default; + } + .CodeMirror-cursor { + // https://github.com/codemirror/CodeMirror/issues/1099 + display: none; + } +} +.cm-s-auto-height.CodeMirror { + height: auto; +} + +.cm-s-short.CodeMirror { + height: 100px; +} diff --git a/ui/app/styles/components/confirm.scss b/ui/app/styles/components/confirm.scss new file mode 100644 index 0000000..2f6c826 --- /dev/null +++ b/ui/app/styles/components/confirm.scss @@ -0,0 +1,139 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.confirm-wrapper { + position: relative; + overflow: hidden; + border-radius: $radius; + box-shadow: $box-shadow, $box-shadow-middle; +} + +.confirm { + transition: transform $speed; + padding-top: 2px; +} + +.show-confirm { + transform: translateX(-100%); + transition: transform $speed; +} + +.confirm.show-confirm { + visibility: hidden; +} + +.confirm-overlay { + position: absolute; + background-color: white; + top: 0; + left: 100%; + width: 100%; +} + +.confirm, +.confirm-overlay { + button.link, + a { + background-color: $white; + color: $grey-darkest; + + &:hover { + background-color: $ui-gray-050; + color: $ui-gray-900; + } + + &.is-active { + background-color: $blue-500; + color: $blue; + } + + &.is-destroy { + color: $red; + + &:hover { + background-color: $red; + color: $white; + } + } + + &.disabled { + opacity: 0.5; + + &:hover { + background: transparent; + cursor: default; + } + } + } +} + +.confirm-action span .button { + display: block; + margin: 0.25rem auto; + width: 95%; +} + +.confirm-action > span { + @include from($mobile) { + align-items: center; + display: flex; + } + + * { + margin-left: $size-8; + } + + .confirm-action-text:not(.is-block) { + text-align: right; + + @include until($mobile) { + display: block; + margin-bottom: $size-8; + text-align: left; + } + } + .confirm-action-text.is-block { + text-align: left; + } +} + +.confirm-action-message { + margin: 0; + + .message { + border: 0; + font-size: $size-8; + line-height: 1.33; + margin: 0; + } + + .message-title { + font-size: 1rem; + } + + .hs-icon { + color: $yellow; + } + + p { + font-weight: $font-weight-semibold; + margin-left: $spacing-l; + padding-left: $spacing-xxs; + padding-top: 0; + } + + .confirm-action-options { + border-top: $light-border; + display: flex; + padding: $spacing-xxs; + + .link { + flex: 1; + text-align: center; + width: auto; + padding: $spacing-xs; + } + } +} diff --git a/ui/app/styles/components/console-ui-panel.scss b/ui/app/styles/components/console-ui-panel.scss new file mode 100644 index 0000000..3338726 --- /dev/null +++ b/ui/app/styles/components/console-ui-panel.scss @@ -0,0 +1,165 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +$console-close-height: 35px; + +.console-ui-panel { + background: var(--token-color-palette-neutral-700); + width: -moz-available; + width: -webkit-fill-available; + height: 0; + min-height: 0; + overflow: auto; + position: fixed; + bottom: 0; + transition: min-height $speed $easing, transform $speed ease-in; + will-change: transform, min-height; + -webkit-overflow-scrolling: touch; + z-index: 199; + + .button { + background: transparent; + border: none; + color: $white; + min-width: 0; + padding: 0 $size-8; + + &.active, + &:hover { + background: $blue; + color: $white; + } + } +} + +.console-ui-panel-content { + color: $white; + display: flex; + flex-direction: column; + font-size: 14px; + font-weight: $font-weight-semibold; + justify-content: flex-end; + min-height: calc(100% - $console-close-height); // account for close button that is sticky positioned + padding: $size-8 $size-8 $size-5; + transition: justify-content $speed ease-in; + + pre, + p { + background: none; + color: inherit; + font-size: 14px; + min-height: 2rem; + padding: 0; + + &:not(.console-ui-command):not(.CodeMirror-line) { + padding-left: $size-4; + } + } + + .cm-s-hashi.CodeMirror { + background-color: rgba($black, 0.5) !important; + font-weight: $font-weight-normal; + margin-left: $size-4; + padding: $size-8 $size-4; + } +} + +.console-ui-panel-content.hover-copy-button, +.console-ui-panel-content.hover-copy-button-static { + top: auto; + bottom: 0; + right: 0; +} + +.console-ui-input { + align-items: center; + display: flex; + + input { + background-color: rgba($black, 0.5); + border: 1px solid var(--token-color-palette-neutral-500); + border-radius: 2px; + caret-color: $white; + color: $white; + flex: 1 1 auto; + font-family: $family-monospace; + font-size: 16px; + font-weight: $font-weight-bold; + outline: none; + padding: $size-10; + margin-right: $spacing-xs; + transition: background-color $speed; + } +} + +.console-ui-command { + line-height: 2; +} + +.console-ui-output { + transition: background-color $speed ease-in-out; + will-change: background-color; + padding-right: $size-2; + position: relative; + background-color: rgba(#000, 0); + &:hover { + background-color: rgba(#000, 0.5); + } +} + +.console-ui-alert { + margin-left: calc(#{$size-4} - 0.33rem); + position: relative; + + svg { + position: absolute; + left: 0; + top: 0; + } +} + +.panel-open .console-ui-panel { + box-shadow: $box-shadow-highest; + min-height: 400px; +} + +.main--console-open { + padding-bottom: 400px; +} + +.panel-open .console-ui-panel.fullscreen { + bottom: 0; + right: 0; + min-height: 100vh; + width: 100%; +} + +.console-spinner.control { + height: 21px; + width: 21px; + transform: scale(0.75); + transform-origin: center; + &::after { + height: auto; + width: auto; + right: 0.25rem; + left: 0.25rem; + top: 0.25rem; + bottom: 0.25rem; + } +} + +.console-close-button { + position: sticky; + top: $spacing-xs; + height: $console-close-height; + display: flex; + justify-content: flex-end; + z-index: 210; + + button { + margin-right: $spacing-xs; + } +} diff --git a/ui/app/styles/components/control-group.scss b/ui/app/styles/components/control-group.scss new file mode 100644 index 0000000..16146c5 --- /dev/null +++ b/ui/app/styles/components/control-group.scss @@ -0,0 +1,53 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.control-group, +.control-group-success { + @extend .box; + box-shadow: $box-shadow-middle, 0 0 1px $grey-dark; +} +.control-group-success.is-editor { + background: $grey-lightest; +} + +.control-group a { + color: currentColor; +} +.control-group-header { + box-shadow: 0 0 1px currentColor; + padding: $size-9 $size-6; + background: $grey-lightest; + color: $grey-dark; + position: relative; + strong { + color: currentColor; + } +} + +.control-group-header.is-success { + color: $green-dark; + background: $green-lightest; +} + +.control-group .authorizations { + margin-top: $size-9; +} + +.control-group .hover-copy-button-static { + color: $orange; +} + +.control-group-token-text { + color: $grey; + position: relative; + padding: $size-8 0; + + .hover-copy-button-static { + position: relative; + top: auto; + left: auto; + display: inline-block; + } +} diff --git a/ui/app/styles/components/diff-version-selector.scss b/ui/app/styles/components/diff-version-selector.scss new file mode 100644 index 0000000..a9432f1 --- /dev/null +++ b/ui/app/styles/components/diff-version-selector.scss @@ -0,0 +1,29 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.visual-diff { + background-color: black; + + pre { + color: $ui-gray-010; + } +} + +.jsondiffpatch-deleted .jsondiffpatch-property-name, +.jsondiffpatch-deleted pre, +.jsondiffpatch-modified .jsondiffpatch-left-value pre, +.jsondiffpatch-textdiff-deleted { + background: $red-500; +} +.jsondiffpatch-added .jsondiffpatch-property-name, +.jsondiffpatch-added .jsondiffpatch-value pre, +.jsondiffpatch-modified .jsondiffpatch-right-value pre, +.jsondiffpatch-textdiff-added { + background: $green-500; +} + +.jsondiffpatch-property-name { + color: $ui-gray-300; +} diff --git a/ui/app/styles/components/doc-link.scss b/ui/app/styles/components/doc-link.scss new file mode 100644 index 0000000..cc18482 --- /dev/null +++ b/ui/app/styles/components/doc-link.scss @@ -0,0 +1,22 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.doc-link { + color: $blue; + text-decoration: none; + font-weight: $font-weight-semibold; + &:hover { + text-decoration: underline !important; + } +} + +.doc-link-subtle { + color: inherit; + text-decoration: underline; + font-weight: inherit; + &:hover { + color: inherit; + } +} diff --git a/ui/app/styles/components/empty-state-component.scss b/ui/app/styles/components/empty-state-component.scss new file mode 100644 index 0000000..2bfd5d6 --- /dev/null +++ b/ui/app/styles/components/empty-state-component.scss @@ -0,0 +1,72 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.empty-state { + align-items: center; + color: $grey; + background: $ui-gray-010; + display: flex; + justify-content: center; + padding: $spacing-xxl $spacing-s; + box-shadow: 0 -2px 0 -1px $ui-gray-300; +} + +.empty-state-transparent { + align-items: center; + color: $grey; + background: transparent; + display: flex; + justify-content: center; + padding: $spacing-xxl 0; + box-shadow: none; +} + +.empty-state-content { + max-width: 320px; +} + +.empty-state-title { + color: $grey; + font-size: $size-4; + font-weight: $font-weight-semibold; + line-height: 1.2; + margin-bottom: $spacing-xs; +} + +.empty-state-subTitle { + font-size: $size-7; + margin-top: -10px; + padding-bottom: $spacing-s; +} + +.empty-state-message.has-border-bottom-light { + padding-bottom: $spacing-s; +} + +.empty-state-actions { + margin-top: $spacing-xs; + display: flex; + justify-content: space-between; + + a, + .link, + a:not(.button):not(.file-delete-button):not(.tag) { + color: $blue; + font-size: $size-8; + font-weight: $font-weight-semibold; + text-decoration: none; + } + + > * + * { + margin-left: $spacing-s; + margin-right: $spacing-s; + } +} + +.empty-state-icon > .hs-icon, +.empty-state-icon > .flight-icon { + float: left; + margin-right: $spacing-xs; +} diff --git a/ui/app/styles/components/env-banner.scss b/ui/app/styles/components/env-banner.scss new file mode 100644 index 0000000..f18e90c --- /dev/null +++ b/ui/app/styles/components/env-banner.scss @@ -0,0 +1,34 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.env-banner { + align-self: center; + border-radius: $size-1; + background: linear-gradient( + 135deg, + $blue, + hsl(271, 100%, 71%) + ); // only use case for purple in the app. define here instead of utils/color_var + animation: env-banner-color-rotate 8s infinite linear alternate; + color: $white; + margin-top: -20px; + margin-bottom: 6px; + + .hs-icon { + margin: 0; + } + + .notification { + background-color: transparent; + line-height: 1.66; + padding: 0 $spacing-s; + } +} + +@keyframes env-banner-color-rotate { + 100% { + filter: hue-rotate(105deg); + } +} diff --git a/ui/app/styles/components/features-selection.scss b/ui/app/styles/components/features-selection.scss new file mode 100644 index 0000000..ac5904d --- /dev/null +++ b/ui/app/styles/components/features-selection.scss @@ -0,0 +1,60 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.feature-header { + font-size: $size-6; + font-weight: $font-weight-semibold; + color: $grey; +} + +.access-information { + display: flex; + padding: $size-8 0px; + font-size: $size-8; +} + +.feature-box { + box-shadow: $box-shadow; + border-radius: $radius; + padding: $size-8; + margin: $size-8 0; + + &.is-active { + box-shadow: 0 0 0 1px $grey-light; + } + + &.is-disabled { + background-color: $ui-gray-010; + color: $ui-gray-300; + } +} + +.feature-box label { + font-weight: $font-weight-semibold; + padding-left: $size-10; + + &::before { + top: 3px; + } + + &::after { + top: 5px; + } +} + +.feature-steps { + font-size: $size-8; + color: $grey; + line-height: 1.5; + margin-left: $size-3; + margin-top: $size-10; + + li::before { + // bullet + content: '\2022'; + position: relative; + right: $size-11; + } +} diff --git a/ui/app/styles/components/form-section.scss b/ui/app/styles/components/form-section.scss new file mode 100644 index 0000000..e351d9f --- /dev/null +++ b/ui/app/styles/components/form-section.scss @@ -0,0 +1,15 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.form-section { + padding: 1.75rem 0; + box-shadow: 0 -1px 0 0 rgba($black, 0.1); +} + +.field:first-child .form-section, +.box > .field > .field.form-section.string-list { + padding: 0; + box-shadow: none; +} diff --git a/ui/app/styles/components/global-flash.scss b/ui/app/styles/components/global-flash.scss new file mode 100644 index 0000000..1b5f77f --- /dev/null +++ b/ui/app/styles/components/global-flash.scss @@ -0,0 +1,24 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.global-flash { + bottom: 0; + left: $spacing-s; + margin: 10px; + max-width: $drawer-width; + position: fixed; + width: 95%; + z-index: 300; + + .message { + box-shadow: $box-shadow-high; + .message-body { + overflow-wrap: break-word; + word-wrap: break-word; + word-break: break-word; + display: inline-flex; + } + } +} diff --git a/ui/app/styles/components/hover-copy-button.scss b/ui/app/styles/components/hover-copy-button.scss new file mode 100644 index 0000000..bfe748a --- /dev/null +++ b/ui/app/styles/components/hover-copy-button.scss @@ -0,0 +1,30 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.has-copy-button { + position: relative; + color: $grey; +} +.hover-copy-button, +.hover-copy-button-static { + position: absolute; + top: 0.5rem; + right: 0.5rem; +} + +.hover-copy-button { + opacity: 0; + pointer-events: none; + transition: opacity $speed ease-in-out; + will-change: opacity; + z-index: 10; +} + +.has-copy-button:hover .hover-copy-button, +.has-copy-button:focus .hover-copy-button, +.hover-copy-button .copy-button:focus { + opacity: 1; + pointer-events: auto; +} diff --git a/ui/app/styles/components/icon.scss b/ui/app/styles/components/icon.scss new file mode 100644 index 0000000..b10a2f9 --- /dev/null +++ b/ui/app/styles/components/icon.scss @@ -0,0 +1,89 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.icon { + align-items: center; + display: inline-flex; + height: $size-4; + justify-content: center; + vertical-align: middle; + width: $size-4; + // override the display if .button.icon to .button's default display: inline-block. See manage namespaces icon button in the namespace picker + &.button { + display: inline-block; + } +} + +.icon-blue { + color: $blue; +} + +.flight-icon { + &.flight-icon-display-inline { + vertical-align: middle; + margin-left: $spacing-xxs; + margin-right: $spacing-xxs; + } +} + +.hs-icon { + flex: 0 0 auto; + display: inline-flex; + justify-content: center; + align-items: flex-start; + vertical-align: middle; + width: 16px; + height: 16px; + margin: 2px 4px; +} + +.hs-icon svg { + fill: currentColor; + flex: 1 1 0; +} + +.hs-icon-button-right { + margin-left: 0.25rem; + margin-right: -0.5rem; + align-items: center; +} + +.hs-icon-s { + width: 12px; + height: 12px; +} + +.hs-icon-l { + width: 20px; + height: 20px; +} + +.hs-icon-xlm { + width: 24px; + height: 24px; +} + +.hs-icon-xl { + width: 28px; + height: 28px; +} + +.hs-icon-xxl { + width: 32px; + height: 32px; +} + +// if using @stretched on FlightIcon there must be an explicit height set on the parent if used in a flexbox +// without height set the flexbox will scale out of proportion on Safari + +.brand-icon-large { + width: 62px; + height: 62px; +} + +.error-icon { + width: 48px; + height: 48px; +} diff --git a/ui/app/styles/components/info-table-row.scss b/ui/app/styles/components/info-table-row.scss new file mode 100644 index 0000000..9112fdd --- /dev/null +++ b/ui/app/styles/components/info-table-row.scss @@ -0,0 +1,115 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.info-table-row { + box-shadow: 0 1px 0 $grey-light; + margin: 0; + + &.has-no-shadow { + box-shadow: none; + } + + @include from($mobile) { + display: flex; + } + + &.thead { + box-shadow: 0 1px 0 $grey-light, 0 -1px 0 $grey-light; + margin: 0; + padding: 0 $size-6 0 0; + + .column { + padding: 0.5rem 0.75rem; + } + } + + .column { + align-self: center; + padding: $spacing-m; + + &.info-table-row-edit { + padding-bottom: 0.3rem; + padding-top: 0.3rem; + } + + textarea { + min-height: 35px; + } + + .helper-text { + font-weight: normal; + } + &.justify-right { + display: flex; + justify-content: right; + } + } + + .hs-icon { + margin-right: $size-11; + } + + .icon-true { + color: $green-500; + } + + .icon-false { + color: $ui-gray-300; + } + + a { + text-decoration: none; + } +} + +.info-table-row:not(.is-mobile) .column { + @include until($mobile) { + padding: 0; + } + + &:first-child { + padding-left: 0; + + @include until($mobile) { + padding: $size-8 0 0; + } + } + + &:last-child { + padding-right: 0; + + @include until($mobile) { + padding: 0 0 $size-8; + } + } +} + +.info-table-row-header { + margin: 0; + font-size: $size-8; + color: $grey; + font-weight: $font-weight-semibold; + + @include until($mobile) { + display: none; + } + .info-table-row:not(.is-mobile) .column:last-child { + padding-left: 0; + } +} + +.label-overflow { + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; + + // inline display necessary for nested elements so ellipsis shows + > div { + display: inline; + } + > div > span { + display: inline; + } +} diff --git a/ui/app/styles/components/init-illustration.scss b/ui/app/styles/components/init-illustration.scss new file mode 100644 index 0000000..c7f14ee --- /dev/null +++ b/ui/app/styles/components/init-illustration.scss @@ -0,0 +1,23 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.init-box { + position: relative; + z-index: 10; +} +.init-illustration { + bottom: 0; + right: 0; + overflow: hidden; + position: absolute; + height: 200px; + width: 200px; +} +.init-illustration svg { + position: absolute; + right: -50px; + bottom: -50px; + opacity: 0.8; +} diff --git a/ui/app/styles/components/kmip-role-edit.scss b/ui/app/styles/components/kmip-role-edit.scss new file mode 100644 index 0000000..e878c19 --- /dev/null +++ b/ui/app/styles/components/kmip-role-edit.scss @@ -0,0 +1,14 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.kmip-role-allowed-operations { + @extend .box; + flex: 1 1 auto; + box-shadow: none; + padding: 0; +} +.kmip-role-allowed-operations .field { + margin-bottom: $spacing-xxs; +} diff --git a/ui/app/styles/components/known-secondaries-card.scss b/ui/app/styles/components/known-secondaries-card.scss new file mode 100644 index 0000000..cb21c7c --- /dev/null +++ b/ui/app/styles/components/known-secondaries-card.scss @@ -0,0 +1,23 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.selectable-card.secondaries { + grid-column: 2/3; + grid-row: 1/3; + + @include until($mobile) { + grid-column: 1/1; + grid-row: 1/1; + } + + .secondaries-table { + margin-bottom: $spacing-s; + } + + .link { + font-size: $size-7; + text-decoration: none; + } +} diff --git a/ui/app/styles/components/license-banners.scss b/ui/app/styles/components/license-banners.scss new file mode 100644 index 0000000..665beee --- /dev/null +++ b/ui/app/styles/components/license-banners.scss @@ -0,0 +1,11 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.license-banner-wrapper { + width: 100%; + max-width: 1344px; + margin: $spacing-l auto 0; + padding: 0 1.5rem; +} diff --git a/ui/app/styles/components/linkable-item.scss b/ui/app/styles/components/linkable-item.scss new file mode 100644 index 0000000..e531bc2 --- /dev/null +++ b/ui/app/styles/components/linkable-item.scss @@ -0,0 +1,17 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.linkable-item { + display: grid; + grid-template-columns: minmax(0, 1fr) 50px 35px; + gap: 1em 1em; +} + +.linkable-item-menu { + box-sizing: border-box; + grid-column: 3; + grid-row: 1; + align-self: center; +} diff --git a/ui/app/styles/components/linked-block.scss b/ui/app/styles/components/linked-block.scss new file mode 100644 index 0000000..a5b40ab --- /dev/null +++ b/ui/app/styles/components/linked-block.scss @@ -0,0 +1,12 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.linked-block { + cursor: pointer; +} + +.linked-block .columns { + @extend .is-flex-center; +} diff --git a/ui/app/styles/components/list-item-row.scss b/ui/app/styles/components/list-item-row.scss new file mode 100644 index 0000000..24a27ca --- /dev/null +++ b/ui/app/styles/components/list-item-row.scss @@ -0,0 +1,56 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.list-item-row { + @extend .box; + @extend .is-sideless; + @extend .is-marginless; + padding-left: 0; + padding-right: 0; + + .ember-basic-dropdown-trigger { + display: inline-block; + } + + .center-inside-row { + width: 50%; + margin-left: auto; + font-size: $size-8; + font-weight: $font-weight-semibold; + color: $ui-gray-500; + } + + .center-display { + width: 50%; + margin-left: auto; + margin-right: auto; + } + + &.is-disabled { + opacity: 0.5; + } +} + +a.list-item-row, +.linked-block.list-item-row { + transition: box-shadow $speed, margin $speed, padding $speed; + will-change: box-shadow, margin, padding; + + &:hover, + &:focus, + &:active { + margin-left: #{-$size-9} !important; + margin-right: #{-$size-9} !important; + padding-left: $size-9; + padding-right: $size-9; + position: relative; + box-shadow: 0 2px 0 -1px $grey-light, 0 -2px 0 -1px $grey-light, $box-link-hover-shadow, + $box-shadow-middle; + } + + &.no-destination { + cursor: default; + } +} diff --git a/ui/app/styles/components/list-pagination.scss b/ui/app/styles/components/list-pagination.scss new file mode 100644 index 0000000..5f487c4 --- /dev/null +++ b/ui/app/styles/components/list-pagination.scss @@ -0,0 +1,200 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +// This file combines Bulma CSS with our own CSS that previously overrode Bulma. In the future we should adopt the HDS pagination. + +.pagination-previous[disabled], +.pagination-next[disabled], +.pagination-link[disabled], +.pagination-ellipsis[disabled] { + cursor: not-allowed; +} + +.pagination-previous, +.pagination-next, +.pagination-link, +.pagination-ellipsis, +.tabs { + user-select: none; +} + +.pagination-previous, +.pagination-next, +.pagination-link, +.pagination-ellipsis { + align-items: center; + box-shadow: none; + display: inline-flex; + font-size: $size-6; + justify-content: flex-start; + line-height: 1.5; + margin: $size-11; + padding-bottom: calc(0.5em - 1px); + padding-left: calc(0.75em - 1px); + padding-right: calc(0.75em - 1px); + padding-top: calc(0.5em - 1px); + position: relative; + vertical-align: top; +} + +.pagination-link.is-current { + color: $white; +} + +.pagination-list { + flex-grow: 1; + flex-shrink: 1; + justify-content: flex-start; + order: 1; +} + +.list-pagination { + @extend .has-slim-padding; + position: relative; + top: 1px; + background-color: $grey-lightest; + margin-bottom: $size-4; + + a { + text-decoration: none; + height: 1.5rem; + min-width: 1.5rem; + border: none; + } + a.pagination-link { + width: 3ch; + } + a:not(.is-current):hover { + text-decoration: underline; + color: $blue; + } + a.is-current { + background-color: $grey; + } + .pagination { + justify-content: center; + } + + .pagination-list { + flex-grow: 0; + flex-wrap: wrap; + + li { + list-style: none; + } + } + .pagination, + .pagination-list { + align-items: center; + display: flex; + justify-content: center; + text-align: center; + } + .pagination-ellipsis { + margin: 0; + padding-left: 0; + padding-right: 0; + } +} + +.list-pagination .pagination-previous, +.list-pagination .pagination-next { + @extend .button; + @extend .is-primary; + @extend .is-outlined; + @extend .is-compact; + background: $white; + border-color: $blue-500; + color: $blue-500; + max-width: 8rem; + + @include until($mobile) { + max-width: 2rem; + padding-left: 0; + padding-right: 0; + } + + .pagination-next-label, + .pagination-previous-label { + @include until($mobile) { + display: none; + } + } + + .icon { + height: 1em; + width: 1em; + vertical-align: middle; + + &:last-child:not(:first-child), + &:first-child:not(:last-child) { + margin: -0.1em 0 0; + } + } + + .button .icon { + margin: 0; + } +} + +.pagination-previous { + order: 1; +} + +.pagination-next { + order: 3; +} + +.pagination.is-centered { + &.pagination-previous { + order: 1; + } + + &.pagination-list { + justify-content: center; + order: 2; + } + + &.pagination-next { + order: 3; + } +} + +.pagination.is-right { + &.pagination-previous { + order: 1; + } + + &.pagination-next { + order: 2; + } + + &.pagination-list { + justify-content: flex-end; + order: 3; + } +} + +// responsive css +@media screen and (max-width: 768px) { + .pagination { + flex-wrap: wrap; + } + .pagination-previous, + .pagination-next { + flex-grow: 1; + flex-shrink: 1; + } + .pagination-list li { + flex-grow: 1; + flex-shrink: 1; + } +} + +.list-pagination .pagination-next { + @include until($mobile) { + order: 3; + } +} diff --git a/ui/app/styles/components/loader.scss b/ui/app/styles/components/loader.scss new file mode 100644 index 0000000..51495cf --- /dev/null +++ b/ui/app/styles/components/loader.scss @@ -0,0 +1,8 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.loader-inner-page { + height: 60vh; +} diff --git a/ui/app/styles/components/login-form.scss b/ui/app/styles/components/login-form.scss new file mode 100644 index 0000000..a04229b --- /dev/null +++ b/ui/app/styles/components/login-form.scss @@ -0,0 +1,9 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.login-form { + box-shadow: $box-shadow, $box-shadow-high; + overflow: auto; +} diff --git a/ui/app/styles/components/masked-input.scss b/ui/app/styles/components/masked-input.scss new file mode 100644 index 0000000..03d7868 --- /dev/null +++ b/ui/app/styles/components/masked-input.scss @@ -0,0 +1,101 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.masked-font { + color: $ui-gray-300; +} + +.masked-input { + display: flex; + align-items: center; +} + +.has-label .masked-input { + padding-top: $spacing-s; +} + +.has-padding { + padding: $size-10 $size-8; +} + +// we want to style the boxes the same everywhere so they +// need to be the same font and small +.masked-input.masked .masked-value { + font-size: 9px; + font-family: $family-sans; + line-height: 2.5; +} + +.masked-input.display-only .masked-value { + order: 1; +} + +// aligns the boxes on the input page +.masked-input.masked:not(.display-only) .masked-value { + line-height: 3; + border-radius: $radius 0 0 $radius; +} + +//override bulma's pre styling +.masked-input .display-only { + line-height: 1.5; + font-size: 1rem; + padding-top: 0; + padding-bottom: 0; + padding-left: $spacing-s; + background-color: transparent; +} + +.button.masked-input-toggle, +.button.copy-button, +.button.download-button { + min-width: $spacing-xl; + border-left: 0; + color: $grey; + box-shadow: 0 3px 1px 0px rgba(10, 10, 10, 0.12); +} + +.button.copy-button, +.button.download-button { + border-radius: 0; +} + +.button.masked-input-toggle { + border-radius: 0 $radius $radius 0; +} + +.display-only { + .button.masked-input-toggle, + .button.copy-button, + .button.download-button { + background: transparent; + height: auto; + line-height: 1rem; + min-width: $spacing-l; + border: 0; + box-shadow: none; + color: $grey-light; + padding-left: 0; + padding-right: 0; + + &:active, + &.is-active, + &:focus, + &:hover, + &:focus:not(:active) { + color: $blue; + border: 0; + box-shadow: none; + } + } +} + +.masked-input.masked .masked-value { + color: $grey-light; +} + +.masked-input .input:focus + .masked-input-toggle { + background: rgba($white, 0.95); +} diff --git a/ui/app/styles/components/modal-component.scss b/ui/app/styles/components/modal-component.scss new file mode 100644 index 0000000..fbc55fd --- /dev/null +++ b/ui/app/styles/components/modal-component.scss @@ -0,0 +1,161 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.modal { + align-items: center; + bottom: 0; + left: 0; + right: 0; + top: 0; + display: none; + justify-content: center; + overflow: hidden; + position: fixed; + z-index: 20; + + &.is-active { + display: flex; + } +} + +.modal-background { + background: $ui-gray-100; + bottom: 0; + left: 0; + opacity: 0.9; + position: absolute; + right: 0; + top: 0; +} + +.modal-card { + box-shadow: $box-shadow-highest; + border: 1px solid $grey-light; + display: flex; + flex-direction: column; + max-height: calc(100vh - 70px); + margin-top: 60px; + min-width: calc(100vw * 0.3); + overflow: hidden; + position: relative; + + &-head { + border-radius: 0; + background-color: $grey-lightest; + border-bottom: 1px solid $grey-light; + display: flex; + justify-content: flex-start; + padding: 20px; + } + + &-foot { + border-radius: 0; + border: 0; + background-color: $white; + padding: 20px; + + .button:not(:last-child) { + margin-right: 10px; + } + } + + &-title.title { + margin: 0; + flex-grow: 1; + } + + .copy-text { + background-color: $grey-lightest; + padding: $spacing-s; + margin-bottom: $spacing-s; + + code { + overflow: scroll; + max-width: calc(100% - 36px); + background-color: inherit; + } + } + + .copy-close { + margin-top: $spacing-s; + } +} + +.modal-card-title.title { + display: flex; + align-items: center; +} + +.modal-card-body { + background-color: $white; + flex-grow: 1; + flex-shrink: 1; + overflow: auto; + padding: 20px; +} + +pre { + background-color: inherit; +} + +.is-highlight { + .modal-card-head { + background: $yellow-010; + border: 1px solid $yellow-100; + } + .modal-card-title { + color: $yellow-dark; + } +} + +.modal-confirm-section { + margin: $spacing-xl 0 $spacing-m; +} + +.modal-card-foot-outlined { + background: $ui-gray-050; + border-top: $base-border; +} + +.modal-radio-button { + display: flex; + align-items: baseline; + margin-bottom: $spacing-xs; + + input { + top: 2px; + } + + .helper-text { + margin-left: 10px; + } +} + +// customize spacing (.modal-card-body is restricted to padding: 20px) +.modal-card-custom { + background-color: white; + flex-grow: 1; + flex-shrink: 1; + overflow: auto; + + &.has-padding-m { + padding: $spacing-m; + } + + &.has-padding-btm-left { + padding-bottom: $spacing-m; + padding-left: $spacing-m; + } +} + +// responsive css +@media screen and (min-width: 769px), print { + .modal-card, + .modal-content { + margin: 0 auto; + max-height: calc(100vh - 40px); + width: 640px; + } +} diff --git a/ui/app/styles/components/namespace-picker.scss b/ui/app/styles/components/namespace-picker.scss new file mode 100644 index 0000000..26d30ab --- /dev/null +++ b/ui/app/styles/components/namespace-picker.scss @@ -0,0 +1,124 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.namespace-picker { + position: relative; + color: var(--token-color-palette-neutral-300); + display: flex; + padding: $spacing-xxs $spacing-xs; + width: 100%; +} + +.namespace-picker.no-namespaces { + border: none; + padding-right: 0; +} + +.namespace-picker-trigger { + align-items: center; + display: flex; + flex: 1 1 auto; + height: 2rem; + justify-content: space-between; + margin-right: $spacing-xxs; +} + +.namespace-picker-content { + width: 250px; + max-height: 300px; + overflow: auto; + border-radius: $radius; + box-shadow: $box-shadow, $box-shadow-high; + + &.ember-basic-dropdown-content { + background: $white; + } +} + +.namespace-picker-content .level-left { + max-width: 210px; + overflow-wrap: break-word; + word-wrap: break-word; + word-break: break-word; +} + +.namespace-header-bar { + padding: $size-11 $size-9; + border-bottom: 1px solid rgba($black, 0.1); + font-weight: $font-weight-semibold; + min-height: 32px; +} + +.namespace-manage-link { + border-top: 1px solid rgba($black, 0.1); + + .level-left { + font-weight: $font-weight-bold; + font-size: 14px; + } + .level-right { + margin-right: 10px; + } +} + +.namespace-list { + position: relative; + overflow: hidden; +} + +.namespace-link { + color: $black; + text-decoration: none; + font-weight: $font-weight-semibold; + padding: $size-10 $size-9 $size-10 0; +} + +.namespace-link.is-current { + margin-top: $size-8; + margin-right: -$size-10; + + svg { + margin-top: 2px; + color: var(--token-color-border-strong); + } +} + +.leaf-panel { + padding: $size-11 $size-9; + transition: transform ease-in-out 250ms; + will-change: transform; + transform: translateX(0); + background: $white; + position: absolute; + top: 0; + left: 0; + right: 0; + bottom: 0; + z-index: 1; +} + +.leaf-panel-left { + transform: translateX(-$drawer-width); +} + +.leaf-panel-adding, +.leaf-panel-current { + position: relative; + & .namespace-link:last-child { + margin-bottom: 4px; + } +} + +.animated-list { + .leaf-panel-exiting, + .leaf-panel-adding { + transform: translateX($drawer-width); + z-index: 20; + } +} + +.leaf-panel-adding { + z-index: 100; +} diff --git a/ui/app/styles/components/namespace-reminder.scss b/ui/app/styles/components/namespace-reminder.scss new file mode 100644 index 0000000..462f92e --- /dev/null +++ b/ui/app/styles/components/namespace-reminder.scss @@ -0,0 +1,24 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.namespace-reminder { + color: $grey; + margin: 0 0 $size-6 0; +} + +.console-reminder p.namespace-reminder { + color: $grey; + font-family: $family-monospace; + font-size: $size-8; + margin: $spacing-xxs 0 0; + opacity: 0.7; + position: absolute; + + .tag { + background-color: $ui-gray-800; + color: $light-grey; + font-size: $size-9; + } +} diff --git a/ui/app/styles/components/navigate-input.scss b/ui/app/styles/components/navigate-input.scss new file mode 100644 index 0000000..c0e2010 --- /dev/null +++ b/ui/app/styles/components/navigate-input.scss @@ -0,0 +1,10 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.search-icon { + position: absolute; + top: 6px; + left: 2px; +} diff --git a/ui/app/styles/components/page-header.scss b/ui/app/styles/components/page-header.scss new file mode 100644 index 0000000..04c0ddf --- /dev/null +++ b/ui/app/styles/components/page-header.scss @@ -0,0 +1,44 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.page-header { + padding-bottom: $size-10; + padding-top: $size-4; + + .level { + align-items: flex-end; + } + .level-left, + .level-right { + flex-grow: 1; + flex-shrink: 1; + + @include until($mobile) { + margin-top: 0.5rem; + } + } + .level-right { + @include from($mobile) { + justify-content: flex-end; + } + } + + .title { + margin-top: $size-2; + } + + .title-with-icon { + display: flex; + } + + .breadcrumb + .level .title { + margin-top: $size-4; + } + .title .icon { + height: auto; + vertical-align: -0.15em; + width: auto; + } +} diff --git a/ui/app/styles/components/popup-menu.scss b/ui/app/styles/components/popup-menu.scss new file mode 100644 index 0000000..6a1fc14 --- /dev/null +++ b/ui/app/styles/components/popup-menu.scss @@ -0,0 +1,161 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.popup-menu-content, +.ember-power-select-options { + border-radius: $radius; + margin: -2px 0 0 0; + + & > .box { + border-radius: $radius; + box-shadow: $box-shadow, $box-shadow-middle; + padding: 0; + position: relative; + width: 210px; + } + + &.is-wide > .box { + width: $drawer-width; + } + + .confirm-action span .button { + display: block; + margin: 0.25rem auto; + width: 95%; + } + + .menu { + padding: $size-11 0; + } + + button.link, + a, + .ember-power-select-option, + .ember-power-select-option[aria-current='true'], + .menu-item { + background: transparent; + box-shadow: none; + border: none; + display: block; + height: auto; + font-size: $size-7; + font-weight: $font-weight-semibold; + padding: $size-9 $size-8; + text-align: left; + text-decoration: none; + width: 100%; + } + + button.link, + .ember-power-select-option, + .ember-power-select-option[aria-current='true'], + a { + background-color: $white; + color: $grey-darkest; + + &:hover { + background-color: $ui-gray-050; + color: $ui-gray-900; + } + + &.active, + &.is-active { + background-color: transparent; + color: $blue; + } + + &.is-destroy { + color: $red; + + &:hover { + background-color: $red; + color: $white; + } + } + + &.disabled { + opacity: 0.5; + + &:hover { + background: transparent; + cursor: default; + } + } + } + + .menu-label { + color: $grey-dark; + font-size: $size-9; + font-weight: $font-weight-normal; + letter-spacing: 0; + margin: 0; + padding: $size-10 $size-8 0; + text-transform: uppercase; + } + + .menu-content { + padding: $size-10 $size-8; + } + + hr { + background-color: $ui-gray-200; + margin: 0; + } +} + +.popup-menu-content p { + box-shadow: none; + padding-top: $size-10; + font-weight: $font-weight-semibold; +} + +.popup-menu-content .level-left { + flex-shrink: 1; +} + +// when you need to limit the height and have dropdown scroll in child nav element +.popup-menu-content > nav.scroll { + height: 200px; + overflow-y: scroll; +} + +.list-item-row, +.info-table-row, +.wizard-dismiss-menu { + .popup-menu-trigger { + height: 2.5rem; + min-width: 0; + padding: 0; + width: 2.5rem; + } +} + +.ember-basic-dropdown-content { + background-color: transparent; + + &--left.popup-menu { + margin: 0px 0 0 -8px; + } + + &--below { + &.ember-basic-dropdown--transitioning-in { + animation: drop-fade-above 0.15s; + } + + &.ember-basic-dropdown--transitioning-out { + animation: drop-fade-above 0.15s reverse; + } + } + + &--above { + &.ember-basic-dropdown--transitioning-in { + animation: drop-fade-below 0.15s; + } + + &.ember-basic-dropdown--transitioning-out { + animation: drop-fade-below 0.15s reverse; + } + } +} diff --git a/ui/app/styles/components/radial-progress.scss b/ui/app/styles/components/radial-progress.scss new file mode 100644 index 0000000..1a06530 --- /dev/null +++ b/ui/app/styles/components/radial-progress.scss @@ -0,0 +1,17 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.radial-progress { + transform: rotate(-90deg) translateX(-20%); +} +.radial-progress circle { + stroke: rgba($grey-light, 0.5); + transition: stroke-dashoffset $speed ease-in; + will-change: stroke-dashoffset; + stroke-linecap: round; +} +.radial-progress circle.progress-fill { + stroke: $green; +} diff --git a/ui/app/styles/components/radio-card.scss b/ui/app/styles/components/radio-card.scss new file mode 100644 index 0000000..1bf70e2 --- /dev/null +++ b/ui/app/styles/components/radio-card.scss @@ -0,0 +1,109 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.radio-card-selector { + display: flex; + margin-bottom: $spacing-xs; +} +.radio-card { + box-shadow: $box-shadow-low; + flex: 1 1 25%; + flex-direction: column; + justify-content: space-between; + margin: $spacing-xs $spacing-m; + border: $base-border; + border-radius: $radius; + transition: all ease-in-out $speed; + max-width: 60%; + input[type='radio'] { + position: absolute; + z-index: 1; + opacity: 0; + } + + input[type='radio'] + span.dot { + border: 1px solid $grey-light; + border-radius: 50%; + cursor: pointer; + display: block; + height: 1rem; + width: 1rem; + flex-shrink: 0; + flex-grow: 0; + } + + input[type='radio']:checked + span.dot { + background: $blue; + border: 1px solid $blue; + box-shadow: inset 0 0 0 0.15rem $white; + } + input[type='radio']:focus + span.dot { + box-shadow: 0 0 10px 1px rgba($blue, 0.4), inset 0 0 0 0.15rem $white; + } + + &.is-disabled { + opacity: 0.6; + box-shadow: none; + } + + &.has-fixed-width { + width: 368px; + max-width: 368px; + } +} +.radio-card:first-child { + margin-left: 0; +} +.radio-card:last-child { + margin-right: 0; +} +.radio-card-container { + display: flex; + flex-direction: column; + height: 100%; +} +.radio-card-row { + display: flex; + flex: 1; + padding: $spacing-m; +} +.radio-card-icon { + color: $ui-gray-300; +} +.radio-card-message { + margin: $spacing-xxs; +} + +.radio-card-message-title { + font-weight: $font-weight-semibold; + font-size: $size-7; + margin-bottom: $spacing-xxs; +} +.radio-card-message-body { + line-height: 1.2; + color: $ui-gray-500; + font-size: $size-8; +} + +.radio-card-radio-row { + display: flex; + justify-content: center; + background: $ui-gray-050; + padding: $spacing-xs; +} + +.is-selected { + &.radio-card { + border-color: $blue-500; + background: $ui-gray-010; + box-shadow: $box-shadow-middle; + } + .radio-card-icon { + color: $black; + } + .radio-card-radio-row { + background: $blue-050; + } +} diff --git a/ui/app/styles/components/raft-join.scss b/ui/app/styles/components/raft-join.scss new file mode 100644 index 0000000..a2209f9 --- /dev/null +++ b/ui/app/styles/components/raft-join.scss @@ -0,0 +1,17 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.raft-join .field { + margin-bottom: 0; +} +.raft-join .box.is-fullwidth { + padding-top: $spacing-s; + padding-bottom: $spacing-s; +} +.raft-join-unseal { + color: $orange; + font-size: $size-6; + display: inline-block; +} diff --git a/ui/app/styles/components/read-more.scss b/ui/app/styles/components/read-more.scss new file mode 100644 index 0000000..5348a27 --- /dev/null +++ b/ui/app/styles/components/read-more.scss @@ -0,0 +1,20 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.linkable-item-seemore { + grid-column: 2; + grid-row: 1; + align-self: baseline; +} + +.overflow-ellipsis { + box-sizing: border-box; + + &.is-closed { + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; + } +} diff --git a/ui/app/styles/components/regex-validator.scss b/ui/app/styles/components/regex-validator.scss new file mode 100644 index 0000000..ab9c68d --- /dev/null +++ b/ui/app/styles/components/regex-validator.scss @@ -0,0 +1,33 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.regex-label-wrapper { + display: flex; + align-items: flex-end; +} +.regex-label { + flex: 1 0 auto; +} +.regex-toggle { + flex: 0 1 auto; +} +.regex-group { + font-family: $family-monospace; + font-size: $size-8; + color: $ui-gray-600; +} +.regex-group-position { + background-color: $ui-gray-200; + border-radius: 3px; + padding-top: 5px; + padding-bottom: 4px; + margin-right: 4px; + span { + margin-left: 6px; + } +} +.regex-group-value { + margin-right: $spacing-m; +} diff --git a/ui/app/styles/components/replication-dashboard.scss b/ui/app/styles/components/replication-dashboard.scss new file mode 100644 index 0000000..68d5993 --- /dev/null +++ b/ui/app/styles/components/replication-dashboard.scss @@ -0,0 +1,136 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.replication-dashboard { + box-shadow: none; + + .selectable-card { + line-height: normal; + + &:hover { + box-shadow: 0 0 0 1px rgba($grey-dark, 0.3); + } + + .toolbar-link { + color: $blue-500; + } + } + + .helper-text { + font-weight: $font-weight-normal; + } + + .title.is-6 { + margin-bottom: $spacing-xs; + } + + .reindexing-alert, + .syncing-alert { + margin-top: $spacing-xl; + } + + .selectable-card-container { + margin-top: $spacing-xl; + display: grid; + + &.primary, + .summary { + margin: 2rem 0 2rem 0; + grid-template-columns: 1fr 2fr; + + @include until($mobile) { + grid-template-columns: 1fr; + } + } + + &.secondary { + grid-template-columns: repeat(auto-fit, minmax(400px, 1fr)); + grid-gap: $spacing-xl; + } + + .card-container { + display: grid; + grid-gap: $spacing-s; + grid-template-columns: 1fr 1fr; + grid-template-rows: 0.2fr 0.2fr 0.2fr; + padding: $spacing-l; + line-height: 1.5; + + &.summary { + grid-template-rows: 0.2fr 1fr 0.2fr 1fr; + } + + &.has-error-border:hover { + box-shadow: none; + } + + @include until(1320px) { + // prevent an issue with the card descriptions wrapping and expanding height + min-height: 250px; + } + + .grid-item-top-left { + grid-column: 1 / span 1; + display: flex; + } + .grid-item-top-right { + grid-column: 2 / span 1; + justify-self: right; + } + .grid-item-left { + grid-column: 1/1; + grid-row: 2/2; + } + .grid-item-right { + grid-column: 2/2; + grid-row: 2/2; + } + + .grid-item-bottom-left { + grid-column: 1/1; + grid-row: 3/3; + display: flex; + align-items: center; + } + .grid-item-bottom-right { + grid-column: 2/2; + grid-row: 3/3; + } + + .grid-item-second-row { + grid-column: 1 / span 2; + grid-row: 2/2; + } + + .grid-item-third-row { + grid-column: 1 / span 2; + grid-row: 3/4; + + .empty-state { + padding: 0px 12px; + box-shadow: none; + } + } + .grid-item-bottom-row { + grid-column: 1 / span 2; + grid-row: 4/4; + } + } + + &.summary { + margin-bottom: $spacing-xl; + } + } + .summary-state { + padding-bottom: $spacing-xl; + border-bottom: 1px solid rgba($grey-dark, 0.3); + } + + // prevent double lines at the bottom of the dashboard + &.box { + padding-bottom: 0; + padding-top: 1px; // at least 1px so border still shows + } +} diff --git a/ui/app/styles/components/replication-mode-summary.scss b/ui/app/styles/components/replication-mode-summary.scss new file mode 100644 index 0000000..d1cb2cb --- /dev/null +++ b/ui/app/styles/components/replication-mode-summary.scss @@ -0,0 +1,16 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.replication-description { + flex-shrink: 1; + + .title { + margin-bottom: $spacing-xs; + } + + .detail-tags { + margin-bottom: $spacing-m; + } +} diff --git a/ui/app/styles/components/replication-page.scss b/ui/app/styles/components/replication-page.scss new file mode 100644 index 0000000..e81f480 --- /dev/null +++ b/ui/app/styles/components/replication-page.scss @@ -0,0 +1,10 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.replication-page { + .empty-state { + background: none; + } +} diff --git a/ui/app/styles/components/replication-primary-card.scss b/ui/app/styles/components/replication-primary-card.scss new file mode 100644 index 0000000..e0c9787 --- /dev/null +++ b/ui/app/styles/components/replication-primary-card.scss @@ -0,0 +1,20 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.replication { + .selectable-card { + display: initial; + line-height: normal; + padding: $spacing-l; + + &:hover { + box-shadow: 0 0 0 1px rgba($grey-dark, 0.3); + } + + .card-title { + margin-bottom: 2rem; + } + } +} diff --git a/ui/app/styles/components/replication-summary.scss b/ui/app/styles/components/replication-summary.scss new file mode 100644 index 0000000..6ab7be6 --- /dev/null +++ b/ui/app/styles/components/replication-summary.scss @@ -0,0 +1,19 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.replication { + .toolbar { + border-top: 0px; + } + + .helper-text { + font-weight: normal; + } + + .float-right { + margin: $spacing-s 0 $spacing-l 0; + float: right; + } +} diff --git a/ui/app/styles/components/role-item.scss b/ui/app/styles/components/role-item.scss new file mode 100644 index 0000000..67c876b --- /dev/null +++ b/ui/app/styles/components/role-item.scss @@ -0,0 +1,9 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.role-item-details { + float: left; + margin-left: 8px; +} diff --git a/ui/app/styles/components/search-select.scss b/ui/app/styles/components/search-select.scss new file mode 100644 index 0000000..8d76144 --- /dev/null +++ b/ui/app/styles/components/search-select.scss @@ -0,0 +1,140 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.ember-power-select-dropdown { + background: transparent; + box-shadow: none; + overflow: visible; + + &.ember-power-select-dropdown.ember-basic-dropdown-content--below { + border: 0; + } +} + +.ember-power-select-trigger { + border: 0; + border-radius: $radius; + padding-right: 0; + + &--active { + outline-width: 3px; + outline-offset: -2px; + } +} + +.ember-power-select-trigger:focus, +.ember-power-select-trigger--active { + border: 0; +} + +.ember-power-select-status-icon { + display: none; +} + +.ember-power-select-search { + left: 0; + padding: 0; + position: absolute; + top: 0; + right: 0; + transform: translateY(-100%); + z-index: -1; + + &::after { + background: $white; + bottom: $spacing-xxs; + content: ''; + left: $spacing-xxs + $spacing-l; + position: absolute; + right: $spacing-xxs; + top: $spacing-xxs; + z-index: -1; + } +} + +.ember-power-select-search-input { + background: transparent; + border: 0; + padding: $spacing-xxs $spacing-s; + padding-left: $spacing-xxs + $spacing-l; +} + +div > .ember-power-select-options { + background: $white; + border: $base-border; + box-shadow: $box-shadow-middle; + margin: -4px $spacing-xs 0; + padding: $spacing-xxs 0; + + .ember-power-select-option, + .ember-power-select-option[aria-current='true'] { + align-items: center; + display: flex; + justify-content: space-between; + } + + .ember-power-select-option[aria-current='true'] { + background-color: $ui-gray-050; + color: $ui-gray-900; + } + + .ember-power-select-option--no-matches-message { + color: $grey; + font-size: $size-8; + font-weight: $font-weight-semibold; + text-transform: uppercase; + + &:hover, + &:focus { + background: transparent; + color: $grey; + } + } +} + +.search-select-list-item { + align-items: center; + display: flex; + padding: $spacing-xxs; + justify-content: space-between; + border-bottom: $light-border; + + .control .button { + color: $grey-light; + min-width: auto; + + &:hover, + &:focus { + color: $blue; + } + } +} + +.search-select-list-key { + color: $grey; + font-size: $size-8; +} + +.ember-power-select-dropdown.ember-basic-dropdown-content { + animation: none; + + .ember-power-select-options { + animation: drop-fade-above 0.15s; + } +} + +.search-select .search-icon { + position: absolute; + width: 20px; + top: 5px; +} + +.search-icon { + margin-top: 4px; +} + +.search-select.display-inherit { + display: inherit; +} diff --git a/ui/app/styles/components/selectable-card-container.scss b/ui/app/styles/components/selectable-card-container.scss new file mode 100644 index 0000000..8422993 --- /dev/null +++ b/ui/app/styles/components/selectable-card-container.scss @@ -0,0 +1,49 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.selectable-card-container { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(240px, 1fr)); + grid-template-rows: 1fr; + grid-gap: 2rem; + + &.one-card { + max-width: 33%; + min-width: 350px; + margin-left: auto; + margin-right: auto; + } +} + +.selectable-card-container.has-grid { + display: grid; + grid-template-columns: 2fr 1fr; + grid-template-rows: repeat(2, 1fr); + grid-gap: 2rem; + + @include until($mobile) { + grid-template-columns: 2fr; + } + + .selectable-card.is-grid-container { + display: grid; + grid-template-columns: 2fr 0.5fr; + grid-template-rows: 1fr 2fr 0.5fr; + padding: $spacing-l 0 14px $spacing-l; // modify bottom spacing to better align with other cards + } +} + +.selectable-card-container.has-grid.has-two-col-grid { + grid-template-columns: 2fr 2fr; + grid-template-rows: none; + + @include until($mobile) { + grid-template-columns: 1fr; + } +} +.selectable-card-container.has-grid.has-three-col-grid { + grid-template-columns: 1fr 1fr 1fr; + grid-template-rows: none; +} diff --git a/ui/app/styles/components/selectable-card.scss b/ui/app/styles/components/selectable-card.scss new file mode 100644 index 0000000..f60c5a1 --- /dev/null +++ b/ui/app/styles/components/selectable-card.scss @@ -0,0 +1,109 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.selectable-card { + box-shadow: 0 0 0 1px rgba($grey-dark, 0.3); + display: flex; + justify-content: space-between; + padding: $spacing-l 0 $spacing-l $spacing-l; + line-height: 0; + + &.no-flex { + padding: $spacing-l; + display: initial; + line-height: initial; + + .title-number { + padding-top: $spacing-s; + } + + .search-label { + margin-bottom: -$spacing-xs; + } + } + + &:hover { + box-shadow: 0 0 0 1px $grey-light, $box-shadow-middle; + } + + > a { + text-decoration: none; + } + + .button { + &:disabled { + border-color: $ui-gray-300; + } + } + + .card-details { + grid-column-start: 2; + grid-row-start: 3; + align-self: center; + justify-self: right; + padding-right: $spacing-l; + } + + .change-metric { + justify-self: right; + padding-right: $spacing-l; + display: grid; + grid-template-columns: 1fr 2fr; + grid-template-rows: 1fr 1fr; + + .hs-icon { + color: $grey-light; + align-self: center; + justify-self: right; + } + + .amount-change { + align-self: center; + justify-self: center; + + font-weight: 500; + } + .item-c { + grid-column: 1 / span 2; + align-self: start; + justify-self: end; + + font-weight: $font-weight-semibold; + white-space: nowrap; + + @include until($mobile) { + overflow: hidden; + } + } + } + + .title-number { + color: $black; + font-size: 36px; + font-weight: 500; + line-height: 1.33; + } + + .vlt-table { + max-height: 200px; + overflow-y: auto; + } +} + +.selectable-card.is-rounded { + border-radius: $radius; +} + +.selectable-card.has-error-border { + box-shadow: none; +} + +.change-metric-icon.is-decrease { + transform: rotate(135deg); +} + +.change-metric-icon.is-increase { + transform: rotate(45deg); +} diff --git a/ui/app/styles/components/shamir-modal-flow.scss b/ui/app/styles/components/shamir-modal-flow.scss new file mode 100644 index 0000000..f7f70d5 --- /dev/null +++ b/ui/app/styles/components/shamir-modal-flow.scss @@ -0,0 +1,9 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.field-title { + font-weight: 700; + font-size: $size-7; +} diff --git a/ui/app/styles/components/shamir-progress.scss b/ui/app/styles/components/shamir-progress.scss new file mode 100644 index 0000000..a23734d --- /dev/null +++ b/ui/app/styles/components/shamir-progress.scss @@ -0,0 +1,17 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.shamir-progress { + .shamir-progress-progress { + display: inline-block; + margin-top: $size-10; + margin-right: $size-8; + } + .progress { + box-shadow: 0 0 0 4px $ui-gray-050; + margin-top: $size-10; + min-width: 90px; + } +} diff --git a/ui/app/styles/components/sidebar.scss b/ui/app/styles/components/sidebar.scss new file mode 100644 index 0000000..5e39671 --- /dev/null +++ b/ui/app/styles/components/sidebar.scss @@ -0,0 +1,52 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.sidebar-user-menu { + align-self: center; + + .popup-menu-content { + .menu-label { + color: $black; + font-size: 14px; + font-weight: $font-weight-bold; + text-transform: unset; + } + .token-alert { + padding: $spacing-xs; + } + } + + .confirm-action-message p { + padding-top: $size-10; + font-weight: $font-weight-semibold; + color: $black; + } +} + +.link-status { + height: 40px; + display: flex; + justify-content: center; + align-items: center; + font-size: $size-7; + font-weight: $font-weight-semibold; + + &.connected { + background-color: var(--token-color-surface-action); + color: var(--token-color-foreground-action-active); + + a { + color: var(--token-color-foreground-action-active); + } + } + &.warning { + background-color: var(--token-color-surface-warning); + color: var(--token-color-palette-amber-300); + + a { + color: var(--token-color-palette-amber-300); + } + } +} diff --git a/ui/app/styles/components/splash-page.scss b/ui/app/styles/components/splash-page.scss new file mode 100644 index 0000000..48ca107 --- /dev/null +++ b/ui/app/styles/components/splash-page.scss @@ -0,0 +1,30 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.splash-page-logo { + padding: $spacing-xs $spacing-s $spacing-xs $spacing-l; + + @include from($mobile) { + padding-left: $spacing-xs; + } + + svg { + fill: $white; + height: 24px; + width: 72px; + + @include from($mobile) { + margin-left: $spacing-xs; + } + } +} + +.splash-page-container { + margin: $size-2 0; +} + +.splash-page-header { + padding: $size-6 0; +} diff --git a/ui/app/styles/components/stat-text.scss b/ui/app/styles/components/stat-text.scss new file mode 100644 index 0000000..5fbf8b4 --- /dev/null +++ b/ui/app/styles/components/stat-text.scss @@ -0,0 +1,110 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.stat-text-container { + line-height: normal; + max-height: 100%; + display: flex; + flex-direction: column; + + &.l, + &.m { + .stat-label { + font-size: $size-5; + font-weight: $font-weight-semibold; + margin-bottom: $spacing-xxs; + line-height: inherit; + } + .stat-text { + font-size: $size-8; + font-weight: $font-weight-normal; + color: $ui-gray-700; + line-height: inherit; + } + .stat-value { + font-size: $size-3; + font-weight: $font-weight-normal; + margin-top: $spacing-s; + } + } + + &.s { + .stat-label { + font-size: $size-5; + font-weight: $font-weight-semibold; + line-height: inherit; + } + .stat-text { + font-size: $size-8; + font-weight: $font-weight-normal; + color: $ui-gray-700; + line-height: inherit; + } + .stat-value { + font-size: $size-5; + font-weight: $font-weight-normal; + margin-top: $spacing-s; + } + } + + &.l-no-subText { + .stat-label { + font-size: $size-5; + font-weight: $font-weight-semibold; + line-height: inherit; + } + .stat-text { + font-size: $size-8; + font-weight: $font-weight-normal; + color: $ui-gray-700; + line-height: inherit; + } + .stat-value { + font-size: $size-3; + font-weight: $font-weight-normal; + margin-top: $spacing-xxs; + } + } + + &.m-no-subText { + .stat-label { + font-size: $size-8; + font-weight: $font-weight-bold; + line-height: inherit; + } + .stat-text { + font-size: $size-8; + font-weight: $font-weight-normal; + color: $ui-gray-700; + line-height: inherit; + } + .stat-value { + font-size: $size-5; + font-weight: $font-weight-normal; + margin-top: $spacing-xxs; + } + } + + &.s-no-subText { + .stat-label { + font-size: $size-8; + font-weight: $font-weight-normal; + color: $ui-gray-500; + line-height: inherit; + } + .stat-text { + font-size: $size-8; + font-weight: $font-weight-normal; + color: $ui-gray-700; + line-height: inherit; + } + .stat-value { + font-size: $size-8; + font-weight: $font-weight-normal; + color: $ui-gray-800; + line-height: inherit; + } + } +} diff --git a/ui/app/styles/components/tabs-component.scss b/ui/app/styles/components/tabs-component.scss new file mode 100644 index 0000000..baf3fab --- /dev/null +++ b/ui/app/styles/components/tabs-component.scss @@ -0,0 +1,101 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +// This file defines the style for .tabs-container, .tabs and .tab + +.page-header + .tabs-container { + box-shadow: none; +} + +.tabs { + align-items: stretch; + box-shadow: inset 0 -1px 0 $grey-light; + display: flex; + justify-content: space-between; + overflow: hidden; + overflow-x: auto; + user-select: none; + white-space: nowrap; + + ul { + align-items: center; + display: flex; + justify-content: flex-start; + min-height: 3rem; + + > a { + &:focus { + box-shadow: none; + } + &.active, + &.active .tab { + border-color: $blue; + color: $blue; + } + } + li { + // solves for tools -> sub-tabs like "Unwrap data" -> "Data" + &.is-active { + border-bottom: 2px solid $blue; + color: $blue; + + > button { + color: $blue; + } + } + // solves for tabs on auth mounts & secrets engines + > a { + &.active { + color: $blue; + background-color: transparent; + border-bottom: 2px solid $blue; + } + } + } + } + + li { + &:focus { + box-shadow: none; + } + &.active a, + &.is-active a, + &.active .tab { + border-color: $blue; + color: $blue; + } + } + // important for auth tabs in active state, otherwise the border-bottom will not show. + a { + align-items: center; + display: flex; + justify-content: center; + vertical-align: top; + } + + a, + .tab { + border-bottom: 2px solid transparent; + color: $grey; + font-weight: $font-weight-semibold; + padding: $size-6 $size-8 $size-8; + text-decoration: none; + transition: background-color $speed, border-color $speed; + + &:hover, + &:active { + border-color: $grey-light; + } + + &:hover { + background-color: $ui-gray-050; + color: $grey-darkest; + } + } + + .ember-basic-dropdown-trigger { + outline: none; + } +} diff --git a/ui/app/styles/components/text-file.scss b/ui/app/styles/components/text-file.scss new file mode 100644 index 0000000..081beb2 --- /dev/null +++ b/ui/app/styles/components/text-file.scss @@ -0,0 +1,19 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.text-file { + .has-icon-right { + display: flex; + width: 97%; + + .textarea { + line-height: inherit; + } + } + .button.masked-input-toggle, + .button.copy-button { + display: flex; + } +} diff --git a/ui/app/styles/components/token-expire-warning.scss b/ui/app/styles/components/token-expire-warning.scss new file mode 100644 index 0000000..ca43966 --- /dev/null +++ b/ui/app/styles/components/token-expire-warning.scss @@ -0,0 +1,22 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.token-expire-warning { + position: absolute; + z-index: 200; + display: flex; + justify-content: center; + box-shadow: $box-shadow-highest; + top: 1rem; + left: 1rem; + right: 1rem; +} +.token-expire-warning .content p { + padding-right: $size-6; +} +.token-expire-warning .message { + margin: 0; + width: 100%; +} diff --git a/ui/app/styles/components/tool-tip.scss b/ui/app/styles/components/tool-tip.scss new file mode 100644 index 0000000..94a029a --- /dev/null +++ b/ui/app/styles/components/tool-tip.scss @@ -0,0 +1,72 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.tool-tip { + font-size: $size-7; + text-transform: none; + margin: 8px 0px 0 -4px; + + .box { + position: relative; + color: $white; + max-width: 200px; + background: $grey; + padding: 0.5rem; + line-height: 1.4; + } + + .fit-content { + max-width: fit-content; + } + + @include css-top-arrow(8px, $grey, 1px, $grey-dark, 20px); + &.ember-basic-dropdown-content--below.ember-basic-dropdown--transitioning-in { + animation: drop-fade-above 0.15s; + } + &.ember-basic-dropdown-content--below.ember-basic-dropdown--transitioning-out { + animation: drop-fade-above 0.15s reverse; + } + &.ember-basic-dropdown-content--above.ember-basic-dropdown--transitioning-in { + animation: drop-fade-below 0.15s; + } + &.ember-basic-dropdown-content--above.ember-basic-dropdown--transitioning-out { + animation: drop-fade-below 0.15s reverse; + } + &.smaller-font { + font-size: $size-8; + } +} + +.ember-basic-dropdown-content--left.tool-tip { + margin: 8px 0 0 -2px; + + &::before, + &::after { + right: auto; + left: $spacing-s; + } +} +.ember-basic-dropdown-content--right.tool-tip { + margin: 8px -11px; +} + +.ember-basic-dropdown-content--below.ember-basic-dropdown-content--left.tool-tip { + @include css-top-arrow(8px, $grey, 1px, $grey-dark, calc(100% - 20px)); +} +.ember-basic-dropdown-content--below.ember-basic-dropdown-content--right.tool-tip { + @include css-top-arrow(8px, $grey, 1px, $grey-dark, calc(100% - 20px)); +} +.ember-basic-dropdown-content--above.tool-tip { + @include css-bottom-arrow(8px, $grey, 1px, $grey-dark); + margin-top: -8px; +} +.ember-basic-dropdown-content--above.ember-basic-dropdown-content--right.tool-tip { + @include css-bottom-arrow(8px, $grey, 1px, $grey-dark, calc(100% - 20px)); +} + +.b-checkbox .tool-tip-trigger { + position: relative; + top: -3px; +} diff --git a/ui/app/styles/components/toolbar.scss b/ui/app/styles/components/toolbar.scss new file mode 100644 index 0000000..130d258 --- /dev/null +++ b/ui/app/styles/components/toolbar.scss @@ -0,0 +1,146 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.tabs-container + .toolbar { + border-top: 0; +} + +.toolbar { + background-color: $ui-gray-010; + border: 1px solid $ui-gray-100; + border-bottom-color: $ui-gray-300; + border-top-color: $ui-gray-300; + position: relative; + + &::after { + background: linear-gradient(to right, $ui-gray-010, rgba($ui-gray-010, 0)); + bottom: 0; + content: ''; + position: absolute; + right: 0; + top: 0; + width: $spacing-xxs; + z-index: 2; + } + + .input, + .select { + min-width: 190px; + } +} + +.toolbar-label { + padding: $spacing-xs; + color: $grey; +} + +.toolbar-scroller { + align-items: center; + display: flex; + height: 44px; + justify-content: space-between; + overflow-x: auto; + width: 100%; + + @include from($mobile) { + padding: 0 $spacing-xxs; + } + + &::-webkit-scrollbar { + border-bottom: $base-border; + height: $spacing-xxs; + } + + &::-webkit-scrollbar-thumb { + background: $ui-gray-300; + border-radius: $spacing-xxs; + } +} + +.toolbar-filters, +.toolbar-actions { + align-items: center; + display: flex; + flex: 1; + white-space: nowrap; +} + +.toolbar-filters + .toolbar-actions { + @include until($mobile) { + border-left: $base-border; + margin-left: $spacing-xs; + padding-left: $spacing-xs; + } +} + +.toolbar-actions { + @include from($mobile) { + justify-content: flex-end; + } +} + +.toolbar-link { + @extend .button; + @extend .is-ghost; + @extend .has-icon-right; + border: 0; + color: $black; + transition: background-color $speed; + + &:hover:not(.disabled) { + background-color: $ui-gray-100; + border: 0; + color: $blue; + } + + &:active { + box-shadow: none; + } + + &.popup-menu-trigger { + height: 2.5rem; + padding: $size-10 $size-8; + } + + &.disabled { + opacity: 0.5; + cursor: default; + + &:focus { + box-shadow: none; + } + &:hover { + background: transparent; + } + } +} +a.disabled.toolbar-link { + color: $black; + background-color: $white; + cursor: not-allowed; + &:hover { + background-color: $ui-gray-100; + color: $blue; + } +} + +.toolbar-separator { + border-right: $light-border; + height: 32px; + margin: 0 $spacing-xs; + width: 0; +} + +.version-diff-toolbar { + display: flex; + align-items: baseline; + gap: $spacing-s; + + .diff-status { + display: flex; + direction: rtl; + align-items: center; + } +} diff --git a/ui/app/styles/components/transform-edit.scss b/ui/app/styles/components/transform-edit.scss new file mode 100644 index 0000000..4c189a3 --- /dev/null +++ b/ui/app/styles/components/transform-edit.scss @@ -0,0 +1,19 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.copy-text { + background: $ui-gray-010; + + & > code { + color: $ui-gray-800; + padding: 14px; + } +} +.transform-pattern-text div:not(:first-child) { + font-family: $family-monospace; +} +.transform-decode-formats:not(:last-child) { + margin-bottom: $spacing-s; +} diff --git a/ui/app/styles/components/transit-card.scss b/ui/app/styles/components/transit-card.scss new file mode 100644 index 0000000..f10d527 --- /dev/null +++ b/ui/app/styles/components/transit-card.scss @@ -0,0 +1,47 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.transit-card-container { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(240px, 0.2fr)); + grid-template-rows: 1fr; + align-content: start; + grid-gap: 2rem; + margin-top: $spacing-l; +} + +.transit-card { + border-radius: $radius; + box-shadow: 0 0 0 1px rgba($grey-light, 0.4); + display: grid; + grid-template-columns: 0.45fr 2fr; + padding: $spacing-m; + border: none; + + .transit-icon { + justify-self: center; + } + + .transit-action-description { + font-family: $family-sans; + font-size: $size-8; + color: $grey; + } + + .title { + color: $grey; + font-size: $size-7; + margin-bottom: $spacing-xxs; + } + + &:hover { + box-shadow: 0 0 0 1px $blue-500, $box-shadow-middle; + background: $blue-010; + + .title { + color: initial; + } + } +} diff --git a/ui/app/styles/components/ttl-picker.scss b/ui/app/styles/components/ttl-picker.scss new file mode 100644 index 0000000..0a69524 --- /dev/null +++ b/ui/app/styles/components/ttl-picker.scss @@ -0,0 +1,28 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.ttl-show-picker { + padding: 0.5rem 0 1.6rem 2.4rem; +} + +.ttl-picker-label { + font-weight: bold; +} + +input.has-error, +input.has-error:focus, +input.has-error:hover { + border-color: $red-dark; +} + +.ttl-value-error { + margin-top: 0.3em; +} + +.description { + display: flex; + justify-content: flex-start; + align-content: center; +} diff --git a/ui/app/styles/components/ui-wizard.scss b/ui/app/styles/components/ui-wizard.scss new file mode 100644 index 0000000..c113d7d --- /dev/null +++ b/ui/app/styles/components/ui-wizard.scss @@ -0,0 +1,258 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +// This file is not being used. https://github.com/hashicorp/vault/pull/19220 +.ui-wizard-container { + display: flex; + flex-direction: column; + flex-grow: 1; +} + +.ui-wizard-container .app-content { + display: flex; + flex-direction: column; + flex-grow: 1; + transition: padding $speed; + will-change: padding; + padding: env(safe-area-inset-top) env(safe-area-inset-right) env(safe-area-inset-bottom) + env(safe-area-inset-left); +} + +.ui-wizard-container .app-content.wizard-open { + padding-right: 324px; + padding-right: calc(324px + env(safe-area-inset-right)); + + @include until($mobile) { + padding-right: 0; + padding-bottom: 50vh; + } +} + +.ui-wizard { + z-index: 300; + padding: $size-5; + width: $drawer-width; + background: $white; + box-shadow: $box-shadow, $box-shadow-highest; + position: fixed; + right: $size-8; + bottom: $size-8; + top: calc(#{4rem} + #{$size-8}); + overflow: auto; + + p { + line-height: 1.33; + } + + .dismiss-collapsed { + position: absolute; + top: $size-8; + right: $size-8; + color: $grey; + z-index: 30; + } + + @include until($mobile) { + box-shadow: $box-shadow, 0 0 20px rgba($black, 0.24); + bottom: 0; + left: 0; + right: 0; + top: 50%; + width: auto; + } + + .doc-link { + margin-top: $size-5; + display: block; + } + + pre code { + background: $ui-gray-050; + margin: $size-8 0; + } +} + +.wizard-header { + border-bottom: $light-border; + padding: 0 $size-4 $size-8 0; + margin: $size-4 0; + position: relative; + + @include until($mobile) { + margin-top: 0; + padding-top: 0; + } +} + +.wizard-dismiss-menu { + position: absolute; + right: 0; + top: -$size-11; + z-index: 10; +} + +.ui-wizard.collapsed { + animation: drop-fade-above $speed-slow; + color: $white; + background: $black; + bottom: auto; + box-shadow: $box-shadow-middle; + height: auto; + min-height: 0; + padding-bottom: $size-11; + position: fixed; + right: $size-8; + top: calc(#{4rem} + #{$size-8}); + + @include until($mobile) { + box-shadow: $box-shadow, 0 0 20px rgba($black, 0.24); + bottom: 0; + left: 0; + right: 0; + top: auto; + width: auto; + } + + .title { + color: $white; + } + + .wizard-header { + border-bottom: 0; + margin: 0 0 $size-10; + padding-top: 0; + } + + .wizard-dismiss-menu { + svg { + color: $white; + } + + &:hover svg { + color: $black; + } + } +} + +.wizard-divider-box { + background: none; + box-shadow: none; + margin: $size-8 0 0; + padding: 0 $size-8; + border-top: solid 1px $white; + border-image: linear-gradient(to right, $grey-dark, $grey) 1; + border-width: 1px 0 0; + button { + font-size: $size-7; + font-weight: $font-weight-semibold; + } +} + +.wizard-section:last-of-type { + margin-bottom: $size-5; +} + +.wizard-section button:not(:last-of-type) { + margin-bottom: $size-10; +} + +.wizard-details { + padding-top: $size-4; + margin-top: $size-4; + border-top: 1px solid $grey-light; +} + +.wizard-instructions { + margin: $size-4 0; +} + +.selection-summary { + display: flex; + align-items: center; + width: 100%; + justify-content: space-between; +} + +.time-estimate { + align-items: center; + color: $grey; + display: flex; + font-size: 12px; +} + +.progress-container { + align-items: center; + background: $white; + bottom: 0; + height: $wizard-progress-bar-height; + display: flex; + left: 0; + padding: 0; + position: absolute; + right: 0; + transform: translateY(50%); + width: 100%; +} + +.progress-bar { + background: $ui-gray-050; + box-shadow: inset $box-link-shadow; + display: flex; + height: $wizard-progress-bar-height; + position: relative; + width: 100%; +} + +.feature-progress-container { + align-items: center; + flex: 1 0 auto; + padding: 0 ($wizard-progress-check-size / 4); + position: relative; +} + +.feature-progress { + background: $green; + border-radius: $wizard-progress-bar-height; + height: $wizard-progress-bar-height; +} + +.feature-check { + height: $wizard-progress-check-size; + left: $wizard-progress-check-size / 2; + position: absolute; + top: 50%; + transform: translate(-50%, -50%); + width: $wizard-progress-check-size; + z-index: 10; + margin: 0 !important; +} + +.feature-progress-container .feature-check { + left: 100%; +} + +.feature-progress-container:first-child { + padding-left: 0; + + .progress-bar, + .feature-progress { + border-radius: $wizard-progress-bar-height 0 0 $wizard-progress-bar-height; + } +} + +.feature-progress-container:first-child:last-child { + .progress-bar, + .feature-progress { + border-radius: $wizard-progress-bar-height; + } +} + +.incomplete-check { + color: $ui-gray-200; +} + +.completed-check { + color: $green; +} diff --git a/ui/app/styles/components/unseal-warning.scss b/ui/app/styles/components/unseal-warning.scss new file mode 100644 index 0000000..f40f320 --- /dev/null +++ b/ui/app/styles/components/unseal-warning.scss @@ -0,0 +1,8 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.unseal-warning.message { + margin: -1px -1px 0; +} diff --git a/ui/app/styles/components/vault-loading.scss b/ui/app/styles/components/vault-loading.scss new file mode 100644 index 0000000..a038a96 --- /dev/null +++ b/ui/app/styles/components/vault-loading.scss @@ -0,0 +1,57 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +@keyframes vault-loading-animation { + 0%, + 70%, + 100% { + transform: scale3D(1, 1, 1); + } + + 35% { + transform: scale3D(0, 0, 1); + } +} + +#vault-loading { + polygon { + animation: vault-loading-animation 1.3s infinite ease-in-out; + transform-origin: 50% 50%; + fill: #dce2e9; + } + + .vault-loading-order-1 { + animation-delay: 0.1s; + } + + .vault-loading-order-2 { + animation-delay: 0.2s; + } + + .vault-loading-order-3 { + animation-delay: 0.3s; + } + + .vault-loading-order-4 { + animation-delay: 0.4s; + } +} + +#vault-loading-animated { + @media all and (-ms-high-contrast: none), (-ms-high-contrast: active) { + // For IE11 + display: none; + } +} + +#vault-loading-static { + display: none; + font-size: 9px; + + @media all and (-ms-high-contrast: none), (-ms-high-contrast: active) { + // For IE11 + display: block; + } +} diff --git a/ui/app/styles/components/vlt-radio.scss b/ui/app/styles/components/vlt-radio.scss new file mode 100644 index 0000000..972e63f --- /dev/null +++ b/ui/app/styles/components/vlt-radio.scss @@ -0,0 +1,44 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.vlt-radio { + position: relative; + input[type='radio'] { + position: absolute; + z-index: 1; + opacity: 0; + } + + input[type='radio'] + label { + content: ''; + border: 1px solid $grey-light; + border-radius: 50%; + cursor: pointer; + display: inline-block; + margin: 0.25rem 0; + height: 1rem; + width: 1rem; + flex-shrink: 0; + flex-grow: 0; + position: relative; + left: 0; + top: 0.3rem; + } + + input[type='radio']:checked + label { + content: ''; + background: $blue; + border: 1px solid $blue; + box-shadow: inset 0 0 0 0.15rem $white; + position: relative; + left: 0; + } + input[type='radio']:focus + label { + content: ''; + box-shadow: 0 0 10px 1px rgba($blue, 0.4), inset 0 0 0 0.15rem $white; + position: relative; + left: 0; + } +} diff --git a/ui/app/styles/components/vlt-table.scss b/ui/app/styles/components/vlt-table.scss new file mode 100644 index 0000000..47a4d90 --- /dev/null +++ b/ui/app/styles/components/vlt-table.scss @@ -0,0 +1,63 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.vlt-table { + .is-collapsed { + visibility: collapse; + height: 0; + } + + &.sticky-header { + thead th { + position: sticky; + background: #fff; + box-shadow: 0 1px 0px 0px rgba($grey-dark, 0.3); + top: 0; + } + } + + table { + border-collapse: collapse; + border-spacing: 0; + } + + th, + td { + padding: $spacing-s; + } + + th { + color: $grey-dark; + font-weight: 500; + font-size: $size-8; + text-align: left; + vertical-align: top; + } + + tbody th { + font-size: $size-7; + } + + tr { + border-bottom: 1px solid $grey-light; + } + + td { + color: $grey-darkest; + } + + td.middle { + vertical-align: middle; + } + + td.no-padding { + padding: 0; + } + + code { + font-size: $size-7; + color: $black; + } +} diff --git a/ui/app/styles/core.scss b/ui/app/styles/core.scss new file mode 100644 index 0000000..3951d87 --- /dev/null +++ b/ui/app/styles/core.scss @@ -0,0 +1,129 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +// Variables +@import './utils/color_variables'; // colors need to come first. +@import './utils/box-shadow_variables'; +@import './utils/font_variables'; +@import './utils/size_variables'; + +// Element styling +@import './core/element-styling'; + +// Mixins +@import './utils/mixins'; +@import './utils/animations'; + +// Open-api styling +@import './utils/swagger'; + +// Core Styles: each file styles a class that is not associated with a component. Ex: box and not box-label. +@import './core/alert-banner'; +@import './core/box'; +@import './core/breadcrumb'; +@import './core/buttons'; +@import './core/charts'; +@import './core/checkbox-and-radio'; +@import './core/columns'; +@import './core/containers'; +@import './core/control'; +@import './core/field'; +@import './core/file'; +@import './core/footer'; +@import './core/inputs'; +@import './core/label'; +@import './core/level'; +@import './core/link'; +@import './core/lists'; +@import './core/menu'; +@import './core/message'; +@import './core/progress'; +@import './core/select'; +@import './core/switch'; +@import './core/tag'; +@import './core/title'; +@import './core/toggle'; + +// Helpers +@import './helper-classes/colors'; +@import './helper-classes/flexbox-and-grid'; +@import './helper-classes/layout'; +@import './helper-classes/general'; +@import './helper-classes/spacing'; +@import './helper-classes/typography'; + +// Component specific styling +@import './components/auth-buttons'; +@import './components/auth-form'; +@import './components/autocomplete-input'; +@import './components/b64-toggle'; +@import './components/box-label'; +@import './components/box-radio'; +@import './components/calendar-widget'; +@import './components/codemirror'; +@import './components/confirm'; +@import './components/console-ui-panel'; +@import './components/control-group'; +@import './components/diff-version-selector'; +@import './components/doc-link'; +@import './components/empty-state-component'; +@import './components/env-banner'; +@import './components/features-selection'; +@import './components/form-section'; +@import './components/global-flash'; +@import './components/hover-copy-button'; +@import './components/icon'; +@import './components/init-illustration'; +@import './components/info-table-row'; +@import './components/kmip-role-edit'; +@import './components/known-secondaries-card.scss'; +@import './components/license-banners'; +@import './components/linkable-item'; +@import './components/linked-block'; +@import './components/list-item-row'; +@import './components/list-pagination'; +@import './components/loader'; +@import './components/login-form'; +@import './components/masked-input'; +@import './components/modal-component.scss'; +@import './components/namespace-picker'; +@import './components/namespace-reminder'; +@import './components/navigate-input'; +@import './components/page-header'; +@import './components/popup-menu'; +@import './components/radio-card'; +@import './components/radial-progress'; +@import './components/raft-join'; +@import './components/read-more'; +@import './components/regex-validator'; +@import './components/replication-dashboard'; +@import './components/replication-mode-summary'; +@import './components/replication-page'; +@import './components/replication-primary-card'; +@import './components/replication-summary'; +@import './components/role-item'; +@import './components/search-select'; +@import './components/selectable-card'; +@import './components/selectable-card-container'; +// action-block extends selectable-card +@import './components/action-block'; +@import './components/shamir-modal-flow'; +@import './components/shamir-progress'; +@import './components/sidebar'; +@import './components/splash-page'; +@import './components/stat-text'; +@import './components/tabs-component'; +@import './components/text-file'; +@import './components/token-expire-warning'; +@import './components/toolbar'; +@import './components/tool-tip'; +@import './components/transform-edit'; +@import './components/transit-card'; +@import './components/ttl-picker'; +@import './components/unseal-warning'; +// @import './components/ui-wizard'; // remove, see PR https://github.com/hashicorp/vault/pull/19220 +@import './components/vault-loading'; +@import './components/vlt-radio'; +@import './components/vlt-table'; diff --git a/ui/app/styles/core/alert-banner.scss b/ui/app/styles/core/alert-banner.scss new file mode 100644 index 0000000..57ed20c --- /dev/null +++ b/ui/app/styles/core/alert-banner.scss @@ -0,0 +1,11 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.alert-banner-message-body { + border: 0; + margin-top: $spacing-xxs; + + color: $black; +} diff --git a/ui/app/styles/core/box.scss b/ui/app/styles/core/box.scss new file mode 100644 index 0000000..b247dd7 --- /dev/null +++ b/ui/app/styles/core/box.scss @@ -0,0 +1,40 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.box { + background-color: $white; + border-radius: 0; + box-shadow: 0 0 0 1px rgba($grey-dark, 0.3); + color: $ui-gray-900; + display: block; + padding: $size-5; + + &:not(:last-child) { + margin-bottom: 1.5rem; + } + + &.is-fullwidth { + padding-left: 0; + padding-right: 0; + } + + &.no-padding-top { + padding-top: 0; + } + + &.has-slim-padding { + padding: 9px 0; + } + + &.no-top-shadow { + box-shadow: inset 0 -1px 0 0 rgba($black, 0.1); + } + + &.has-container { + box-shadow: 0 4px 4px rgba($black, 0.25); + border: $base-border; + padding: $spacing-l; + } +} diff --git a/ui/app/styles/core/breadcrumb.scss b/ui/app/styles/core/breadcrumb.scss new file mode 100644 index 0000000..eb43c39 --- /dev/null +++ b/ui/app/styles/core/breadcrumb.scss @@ -0,0 +1,75 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.breadcrumb { + display: flex; + user-select: text; + min-height: 1.5rem; + margin: 0; + overflow-x: auto; + white-space: nowrap; + + &:not(:last-child) { + margin: 0; + } + + ul, + ol { + align-items: center; + display: flex; + flex-grow: 1; + flex-shrink: 0; + justify-content: flex-start; + } + + li { + align-items: center; + display: flex; + + & + li::before { + display: none; + } + + &:first-child { + .sep { + margin-left: 0; + } + } + &.is-active a { + color: $grey-darkest; + cursor: default; + pointer-events: none; + } + } + + a { + align-items: center; + display: flex; + justify-content: center; + line-height: 1; + padding: 0 $size-11 0 0; + text-decoration: none; + + &:hover { + color: $blue; + } + } + + .sep { + display: inline-block; + color: transparent; + margin: 0.15rem 0.4rem 0 0.5rem; + overflow: hidden; + width: 0.5rem; + + &::before { + color: $blue; + content: '❮'; + font-size: 1rem; + line-height: 1; + opacity: 0.33; + } + } +} diff --git a/ui/app/styles/core/buttons.scss b/ui/app/styles/core/buttons.scss new file mode 100644 index 0000000..f084172 --- /dev/null +++ b/ui/app/styles/core/buttons.scss @@ -0,0 +1,323 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.button { + align-items: center; + background-color: $grey-lightest; + border: 1px solid $grey-light; + border-radius: $radius; + box-shadow: $box-shadow-low; + cursor: pointer; + color: $ui-gray-800; + display: inline-block; + font-size: $size-8; + font-weight: $font-weight-semibold; + height: 2.5rem; + line-height: 1.6; + min-width: 6rem; + padding: $size-10 $size-8; + position: relative; + text-align: center; + text-decoration: none; + transition: background-color $speed, border-color $speed, box-shadow $speed, color $speed; + user-select: none; + vertical-align: middle; + white-space: nowrap; + + @keyframes spinAround { + from { + transform: rotate(0deg); + } + to { + transform: rotate(359deg); + } + } + + &:active, + &.is-active, + &:focus { + border-color: darken($grey-light, 10%); + box-shadow: $button-box-shadow-standard; + color: darken($grey-dark, 10%); + } + + &:focus:not(:active) { + box-shadow: 0 0 0 0.125em rgba(21, 99, 255, 0.25); + } + + &:disabled { + background-color: $white; + border-color: $ui-gray-050; + box-shadow: none; + cursor: not-allowed; + opacity: 0.5; + } + + // Button types: is-compact, is-danger, is-danger-outlined, is-flat, is-ghost, is-icon, is-loading, is-link, is-outline, is-primary, is-transparent. + + &.is-compact { + height: 2rem; + padding: $size-11 $size-8; + } + + &.is-danger { + background-color: $red-500; + border-color: darken($red-500, 2%); + color: $white; + text-shadow: 0 1px 1px rgba($black, 0.25); + + &:hover:not([disabled]) { + background-color: darken($red-500, 5%); + border-color: darken($red-500, 5%); + box-shadow: $box-shadow-middle; + } + + &:active, + &.is-active { + background-color: darken($red-500, 10%); + border-color: darken($red-500, 10%); + box-shadow: $box-shadow-middle; + } + + &:focus { + border-color: darken($red-500, 10%); + box-shadow: $button-box-shadow-standard; + } + } + + &.is-danger-outlined { + border: 1px solid $red-500; + color: $red-500; + } + + &.is-flat { + min-width: auto; + border: none; + box-shadow: none; + } + + &.is-ghost { + background-color: transparent; + border-color: transparent; + box-shadow: none; + color: $blue; + text-decoration: none; + + &:hover { + color: $blue-500; + background-color: $grey-lightest; + } + } + + &.is-icon { + padding: 0.25rem $size-3; + } + + &.is-loading { + color: transparent !important; + pointer-events: none; + &::after { + animation: spinAround 500ms infinite linear; + border: 2px solid $ui-gray-300; + border-radius: 290486px; + border-right-color: transparent; + border-top-color: transparent; + content: ''; + display: block; + height: 1em; + width: 1em; + left: calc(50% - (1em / 2)); + top: calc(50% - (1em / 2)); + position: absolute !important; + } + } + + &.is-link { + background-color: transparent; + border-color: transparent; + color: $blue; + text-decoration: none; + font-weight: $font-weight-semibold; + box-shadow: none; + min-width: 4rem; + } + + &.is-primary { + background-color: $blue; + border-color: darken($blue, 2%); + color: $white; + font-weight: $font-weight-bold; + text-shadow: 0 1px 1px rgba($black, 0.25); + + &:disabled { + background-color: $ui-gray-700; + border-color: transparent; + box-shadow: none; + } + + &:hover:not([disabled]) { + background-color: darken($blue, 5%); + border-color: darken($blue, 5%); + box-shadow: $box-shadow-middle; + } + + &:active, + &.is-active { + background-color: darken($blue, 10%); + border-color: darken($blue, 10%); + box-shadow: $box-shadow-middle; + } + + &:focus { + border-color: darken($blue, 10%); + box-shadow: $button-box-shadow-standard; + } + + &.is-loading::after { + border-color: transparent transparent $white $white; + } + + &.is-underlined { + &:active, + &.is-active { + background-color: transparent; + border-bottom: 2px solid darken($blue, 10%); + border-radius: unset; + color: darken($blue, 10%); + } + } + // is-primary.is-outlined the only is-outlined buttons are primary. + &.is-outlined { + background-color: transparent; + border-color: $blue; + color: $blue; + + &:hover, + &:focus { + background-color: transparent; + border-color: darken($blue, 10%); + color: $blue; + } + + &:active, + &.is-active { + background-color: transparent; + border-color: darken($blue, 10%); + color: darken($blue, 10%); + } + } + + &.is-outlined [disabled] { + background-color: transparent; + border-color: $ui-gray-700; + box-shadow: none; + color: $ui-gray-700; + } + } + + &.is-transparent { + color: currentColor; + background: none; + border: none; + box-shadow: none; + min-width: auto; + padding: 0; + } + // End of button types + + &.tool-tip-trigger { + color: $grey-dark; + min-width: auto; + padding: 0; + } + + &.has-icon-left, + &.has-icon-right { + .hs-icon { + height: 16px; + min-width: auto; + width: 16px; + } + } + + &.has-icon-left { + .hs-icon { + &, + &:first-child:last-child { + position: relative; + left: -$size-10; + } + } + } + + &.has-icon-right { + .hs-icon { + &, + &:first-child:last-child { + margin-left: $spacing-xxs; + margin-right: -$spacing-xxs; + } + } + } +} + +.button.icon { + box-sizing: border-box; + padding: 0 $size-11; + height: 24px; + width: 24px; + &, + & .icon { + min-width: 0; + } +} + +.button.auto-width, +.button .icon.auto-width { + width: auto; + min-width: auto; + margin: 0 !important; +} + +.button.next-feature-step { + background: $white; + border: 1px solid $grey-light; + border-radius: $radius; + box-shadow: none; + color: $blue; + display: flex; + height: auto; + line-height: 1.2; + justify-content: space-between; + text-align: left; + white-space: normal; + padding: $size-8; + width: 100%; +} + +a.button.disabled { + color: $white; + background-color: $grey-dark; + opacity: 0.5; + border-color: transparent; + box-shadow: none; + cursor: default; +} +.icon-button { + background: transparent; + padding: 0; + margin: 0; + border: none; + cursor: pointer; +} +.text-button { + padding: unset; + border: none; + background-color: inherit; + color: inherit; + font-size: inherit; + font-weight: inherit; + cursor: pointer; +} diff --git a/ui/app/styles/core/charts.scss b/ui/app/styles/core/charts.scss new file mode 100644 index 0000000..34a89de --- /dev/null +++ b/ui/app/styles/core/charts.scss @@ -0,0 +1,348 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.chart-wrapper { + border: $light-border; + border-radius: $radius-large; + padding: $spacing-s $spacing-l; + margin-bottom: $spacing-m; +} + +// GRID LAYOUT // +.single-month-grid { + display: grid; + grid-template-columns: repeat(2, 1fr); + gap: 2em; + width: 100%; +} +.single-month-stats { + display: grid; + grid-template-columns: repeat(3, 1fr); + gap: 2em; + width: 100%; + margin-bottom: 1rem; +} +.single-month-section-title { + grid-column-start: 1; + grid-column-end: span col3-end; +} +.single-month-breakdown-entity { + grid-column-start: 1; +} +.single-month-breakdown-nonentity { + grid-column-start: 2; +} +.stacked-charts { + display: grid; + width: 100%; +} + +.single-chart-grid { + display: grid; + grid-template-columns: 1fr 0.3fr 3.7fr; + grid-template-rows: 0.5fr 1fr 1fr 1fr 0.25fr; + width: 100%; +} + +.dual-chart-grid { + display: grid; + grid-template-columns: repeat(6, 1fr); + grid-template-rows: 0.7fr 1fr 1fr 1fr 0.3fr; + width: 100%; +} + +.chart-header { + grid-column-start: 1; + grid-column-end: span col4-end; + grid-row-start: 1; + box-shadow: inset 0 -1px 0 $ui-gray-200; +} + +.has-header-link { + display: grid; + grid-template-columns: 4fr 1fr; + + .header-right { + text-align: right; + > button { + &:hover, + &:focus { + background-color: transparent; + background-color: darken($ui-gray-050, 5%); + border-color: darken($ui-gray-300, 5%); + } + } + } +} + +.chart-container-wide { + grid-column-start: 3; + grid-column-end: 4; + grid-row-start: 2; + grid-row-end: span 3; + justify-self: center; + height: 300px; + max-width: 700px; + + svg.chart { + width: 100%; + height: 100%; + } +} + +.chart-container-left { + grid-column-start: 1; + grid-column-end: 4; + grid-row-start: 2; + grid-row-end: 5; + padding-bottom: $spacing-xl; + margin-bottom: $spacing-s; + box-shadow: inset 0 -1px 0 $ui-gray-200; + + > h2 { + padding-left: 18px; + } + > p { + padding-left: 18px; + } +} + +.chart-container-right { + grid-column-start: 4; + grid-column-end: 8; + grid-row-start: 2; + grid-row-end: 5; + padding-bottom: $spacing-xl; + margin-bottom: $spacing-s; + box-shadow: inset 0 -1px 0 $ui-gray-200; + + > h2 { + padding-left: 18px; + } + > p { + padding-left: 18px; + } +} + +.chart-empty-state { + place-self: center stretch; + grid-row-end: span 3; + grid-column-end: span 3; + max-width: none; + padding-right: 20px; + padding-left: 20px; + display: flex; + + > div { + box-shadow: none !important; + } + + > div.empty-state { + white-space: nowrap; + align-self: stretch; + width: 100%; + } +} + +.chart-subTitle { + grid-column-start: 1; + grid-column-end: 3; + grid-row-start: 2; +} + +.data-details-top { + grid-column-start: 1; + grid-column-end: 3; + grid-row-start: 3; +} + +.data-details-bottom { + grid-column-start: 1; + grid-column-end: 3; + grid-row-start: 4; +} + +.timestamp { + grid-column-start: 1; + grid-column-end: 2; + grid-row-start: 5; + color: $ui-gray-500; + font-size: $size-9; + align-self: end; +} + +.legend-center { + grid-row-start: 5; + grid-column-start: 3; + grid-column-end: 5; + align-self: center; + justify-self: center; + font-size: $size-9; +} + +.legend-right { + grid-row-start: 4; + grid-column-start: 3; + grid-column-end: 3; + align-self: end; + justify-self: center; + font-size: $size-9; +} + +// FONT STYLES // + +h2.chart-title { + font-weight: $font-weight-bold; + font-size: $size-5; + line-height: $spacing-l; + margin-bottom: $spacing-xxs; +} + +p.chart-description { + color: $ui-gray-700; + font-size: 14px; + line-height: 18px; + margin-bottom: $spacing-xs; +} + +p.chart-subtext { + color: $ui-gray-500; + font-size: $size-8; + line-height: 16px; + margin-top: $spacing-xs; +} + +h3.data-details { + font-weight: $font-weight-bold; + font-size: $size-8; + line-height: 14px; + margin-bottom: $spacing-xs; +} + +p.data-details { + font-weight: $font-weight-normal; + font-size: $size-4; +} + +// MISC STYLES + +.light-dot { + background-color: $blue-100; + height: 10px; + width: 10px; + border-radius: 50%; + display: inline-block; +} + +.dark-dot { + background-color: $blue-500; + height: 10px; + width: 10px; + border-radius: 50%; + display: inline-block; +} + +.legend-label { + padding-left: $spacing-xs; + padding-right: $spacing-xl; +} + +.chart-tooltip { + background-color: $ui-gray-700; + color: white; + font-size: $size-9; + padding: 6px; + border-radius: $radius-large; + width: 140px; + + .bold { + font-weight: $font-weight-bold; + } + .line-chart { + width: 117px; + } + .vertical-chart { + text-align: center; + flex-wrap: nowrap; + width: fit-content; + } + .horizontal-chart { + padding: $spacing-s; + } +} + +.is-label-fit-content { + max-width: fit-content !important; +} + +.chart-tooltip-arrow { + width: 0; + height: 0; + border-left: 5px solid transparent; + border-right: 5px solid transparent; + border-top: 9px solid $ui-gray-700; + position: absolute; + opacity: 0.8; + bottom: -9px; + left: calc(50% - 5px); +} + +.has-grid { + g > text { + color: $ui-gray-500; + font-size: $size-9; + } + + g > line { + // TODO: mix-blend doesn't work in firefox browser? + mix-blend-mode: darken; + color: $ui-gray-300; + } +} + +.is-horizontal { + .tick > text { + font-weight: $font-weight-semibold; + font-size: $size-9; + } +} + +// RESPONSIVE STYLING // + +@media only screen and (max-width: 950px) { + .dual-chart-grid { + display: grid; + grid-template-columns: repeat(2, 1fr); + grid-template-rows: 0.2fr 0.75fr 0.75fr 0.2fr; + width: 100%; + } + + .chart-container-left { + grid-column-start: 1; + grid-column-end: 4; + grid-row-start: 2; + grid-row-end: 3; + margin-left: $spacing-xxl; + margin-right: $spacing-xxl; + } + .chart-container-right { + grid-column-start: 1; + grid-column-end: 4; + grid-row-start: 3; + grid-row-end: 4; + margin-left: $spacing-xxl; + margin-right: $spacing-xxl; + } + + .legend-center { + grid-column-start: 1; + grid-row-start: 4; + } + + .timestamp { + grid-column-start: 1; + grid-row-start: 4; + } +} diff --git a/ui/app/styles/core/checkbox-and-radio.scss b/ui/app/styles/core/checkbox-and-radio.scss new file mode 100644 index 0000000..bdf678e --- /dev/null +++ b/ui/app/styles/core/checkbox-and-radio.scss @@ -0,0 +1,124 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +// This file defines the styles for .checkbox, .radio and .b-checkboxes. The prefix "b" comes from Bulma. + +@import '../sass-svg-uri/svg-uri'; + +// checkbox and radio styling +.checkbox, +.radio { + cursor: pointer; + display: inline-block; + line-height: 1.25; + position: relative; +} + +.checkbox input, +.radio input { + cursor: pointer; +} + +.checkbox:hover, +.radio:hover { + color: hsl(0, 0%, 21%); +} + +.checkbox[disabled], +.radio[disabled], +.checkbox input[disabled], +.radio input[disabled] { + color: $grey; + cursor: not-allowed; +} + +// radio only styling +.radio + .radio { + margin-left: 0.5em; +} + +// one-off checkbox class +.checkbox-help-text { + color: $ui-gray-700; + font-size: $size-7; + padding-left: 28px; +} + +// b-checkbox styling +.b-checkbox { + position: relative; +} + +.b-checkbox label::before { + background-color: $white; + border-radius: 3px; + border: 1px solid $ui-gray-300; + content: ''; + height: 17px; + left: 0; + position: absolute; + top: 1px; + transition: background 0.1s ease-in-out; + width: 17px; +} + +.b-checkbox input[type='checkbox']:checked + label::before { + border-color: $blue; +} + +.b-checkbox input[type='checkbox']:checked + label::after, +.b-checkbox input[type='radio']:checked + label::after { + font-family: $family-monospace; + /*checkmark from ionicons*/ + content: svg-uri( + '' + ); +} + +.b-checkbox input[type='checkbox'], +.b-checkbox input[type='radio'] { + cursor: pointer; + opacity: 0; + z-index: 1; +} + +.b-checkbox label::after { + color: $ui-gray-800; + font-size: 12px; + height: 16px; + left: 3px; + position: absolute; + top: 3px; + width: 16px; +} + +.b-checkbox label { + display: inline; + line-height: 1; + margin-left: 0.5rem; + padding-left: 5px; +} + +.b-checkbox input[type='checkbox']:focus + label::before, +.b-checkbox input[type='radio']:focus + label::before { + outline: thin dotted; + outline: 5px auto -webkit-focus-ring-color; + outline-offset: -2px; +} + +.b-checkbox input[type='checkbox']:disabled + label::before, +.b-checkbox input[type='radio']:disabled + label::before { + background-color: $ui-gray-100; + cursor: not-allowed; +} + +.b-checkbox input[type='checkbox']:disabled + label, +.b-checkbox input[type='radio']:disabled + label { + opacity: 0.65; +} + +.b-checkbox > .sub-text { + padding-left: 2rem; +} diff --git a/ui/app/styles/core/columns.scss b/ui/app/styles/core/columns.scss new file mode 100644 index 0000000..15b6a34 --- /dev/null +++ b/ui/app/styles/core/columns.scss @@ -0,0 +1,201 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +// This file defines the styles for columns and column + +// Columns (plural) +.columns { + margin-left: -$size-9; + margin-right: -$size-9; + margin-top: -$size-9; + + &:last-child { + margin-bottom: -$size-9; + } + + &.is-centered { + justify-content: center; + } + + &.is-gapless { + margin-left: 0; + margin-right: 0; + margin-top: 0; + } + + &.is-gapless > .column { + margin: 0; + padding: 0 !important; + } + + &.is-gapless:not(:last-child) { + margin-bottom: $size-4; + } + + &.is-gapless:last-child { + margin-bottom: 0; + } + + &.is-mobile { + display: flex; + + > .column.is-1 { + flex: none; + width: 8.33333%; + } + + > .column.is-3 { + flex: none; + width: 25%; + } + + > .column.is-10 { + flex: none; + width: 83.33333%; + } + + > .column.is-12 { + flex: none; + width: 100%; + } + } + + &.is-multiline { + flex-wrap: wrap; + } + + // columnGap does not take vars + &.is-variable.is-1 { + --columnGap: $i * 0.25rem; + } + + &.is-variable { + --columnGap: 0.75rem; + margin-left: calc(-1 * var(--columnGap)); + margin-right: calc(-1 * var(--columnGap)); + } + + &.is-variable > .column { + padding-left: var(--columnGap); + padding-right: var(--columnGap); + } +} + +// responsive css columns +@media screen and (min-width: 769px), print { + .columns:not(.is-desktop) { + display: flex; + } + .columns.is-variable.is-7-tablet { + --columnGap: 1.75rem; + } +} + +@media screen and (min-width: 1024px) { + .columns.is-desktop { + display: flex; + } +} + +// Column (singular) +.column { + display: block; + flex-basis: 0; + flex-grow: 1; + flex-shrink: 1; + padding: 0.75rem; + + &.is-narrow { + flex: none; + width: unset; + } + + &.is-1 { + flex: none; + width: 8.33333%; + } + + &.is-3 { + flex: none; + width: 25%; + } + + &.is-5, + .is-5-tablet { + flex: none; + width: 41.66667%; + } +} + +// responsive css column (order matters here because some columns have several of these classes and they need to override in the correct order). +@media screen and (min-width: 769px), print { + .column { + &.is-one-quarter { + flex: none; + width: 25%; + } + + &.is-half { + flex: none; + width: 50%; + } + + &.is-two-thirds { + flex: none; + width: 66.6666%; + } + + &.is-one-third { + flex: none; + width: 33.3333%; + } + + &.is-5 { + flex: none; + width: 41.66667%; + } + + &.is-6, + &.is-6-tablet { + flex: none; + width: 50%; + } + &.is-9 { + flex: none; + width: 75%; + } + + &.is-7, + &.is-7-tablet { + flex: none; + width: 58.33333%; + } + + &.is-12 { + flex: none; + width: 100%; + } + + &.is-full, + &.is-full-tablet { + flex: none; + width: 100%; + } + } +} + +@media screen and (max-width: 768px) { + .column.is-10-mobile { + flex: none; + width: 83.33333%; + } +} + +@media screen and (min-width: 1024px) { + .column.is-4-desktop { + flex: none; + width: 33.33333%; + } +} diff --git a/ui/app/styles/core/containers.scss b/ui/app/styles/core/containers.scss new file mode 100644 index 0000000..40a5cb7 --- /dev/null +++ b/ui/app/styles/core/containers.scss @@ -0,0 +1,44 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +// This file defines the styles for the larger container elements like page-container, section and container. + +.ember-application > .ember-view { + display: flex; + flex-direction: column; +} + +.page-container { + min-height: 100vh; + display: flex; + flex-direction: column; + justify-content: flex-end; +} + +.section { + display: flex; + flex-grow: 1; + flex-direction: column; + padding: 0 $size-4; + + > .container { + display: flex; + flex-grow: 1; + flex-direction: column; + width: 100%; + + > .columns { + flex-grow: 1; + } + } +} + +.container { + flex-grow: 1; + margin: 0 auto; + max-width: 1024px; + position: relative; + width: auto; +} diff --git a/ui/app/styles/core/control.scss b/ui/app/styles/core/control.scss new file mode 100644 index 0000000..1ed3750 --- /dev/null +++ b/ui/app/styles/core/control.scss @@ -0,0 +1,64 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +// This file defines the style for .control + +.control.has-icons-left .icon, +.control.has-icons-right .icon { + height: 2.5rem; +} + +.control.has-icons-right .input { + padding-right: 2.25em; +} + +.control { + font-size: 1rem; + max-width: 100%; + position: relative; + text-align: left; + + // Modifiers + &.has-icons-left, + &.has-icons-right { + .input, + .select select { + padding-left: 2rem; + + &:focus, + &:active, + &.is-active { + & ~ .icon { + color: currentColor; + } + } + } + } + &.has-checkbox-right { + label.checkbox { + display: inline-flex; + height: 2.5em; + position: absolute; + top: 0; + right: 0; + justify-content: flex-end; + margin-left: auto; + } + .input, + .select select { + padding-right: 10em; + } + } + + &.is-align-end { + display: flex; + align-self: end; + } +} + +.control.is-expanded { + flex-grow: 1; + flex-shrink: 1; +} diff --git a/ui/app/styles/core/element-styling.scss b/ui/app/styles/core/element-styling.scss new file mode 100644 index 0000000..8b20299 --- /dev/null +++ b/ui/app/styles/core/element-styling.scss @@ -0,0 +1,167 @@ +/* These come from bulma */ +html, +body, +p, +ol, +ul, +li, +dl, +dt, +dd, +blockquote, +figure, +fieldset, +legend, +textarea, +pre, +iframe, +hr, +h1, +h2, +h3, +h4, +h5, +h6 { + margin: 0; + padding: 0; +} + +a:hover, +body, +pre, +strong, +table th { + color: $ui-gray-900; +} + +h1, +h2, +h3, +h4, +h5, +h6 { + font-size: 100%; + font-weight: normal; +} + +ul { + list-style: none; +} + +button, +input, +select, +textarea { + margin: 0; +} + +html { + box-sizing: border-box; +} + +* { + box-sizing: inherit; +} +*:before, +*:after { + box-sizing: inherit; +} + +img, +embed, +object { + max-width: 100%; +} + +html { + background-color: $white; + font-size: 14px; + -moz-osx-font-smoothing: grayscale; + -webkit-font-smoothing: antialiased; + min-width: 300px; + overflow-x: hidden; + overflow-y: scroll; + text-rendering: optimizeLegibility; + text-size-adjust: 100%; +} + +article, +aside, +figure, +footer, +header, +hgroup, +section { + display: block; +} + +body, +button, +input, +select, +textarea { + font-family: $family-sans; +} + +code, +pre { + -moz-osx-font-smoothing: auto; + -webkit-font-smoothing: inherit; + font-family: 'SFMono-Regular', Consolas, 'Liberation Mono', Menlo, Courier, monospace; + font-smooth: inherit; +} + +a strong, +pre code { + color: currentColor; +} + +body { + font-size: $size-6; + line-height: 1.5; +} + +a { + color: $blue-500; + cursor: pointer; + text-decoration: underline; + + &:hover, + &:active, + &:focus { + position: relative; + } +} + +a:hover { + color: $ui-gray-900; +} + +code { + background-color: transparent; + color: $ui-gray-700; + font-size: 1em; + font-weight: 400; + padding: 0; +} + +hr { + background-color: $ui-gray-300; + border: none; + display: block; + height: 1px; + margin: 1rem 0; +} + +img { + height: auto; + max-width: 100%; +} + +form { + margin: 0; +} + +label { + cursor: pointer; +} diff --git a/ui/app/styles/core/field.scss b/ui/app/styles/core/field.scss new file mode 100644 index 0000000..93eecd3 --- /dev/null +++ b/ui/app/styles/core/field.scss @@ -0,0 +1,141 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +// This file defines the styles for .field, .field-body, .form-fieldset + +.field { + // cannot use :read-only selector because tag used for other purposes + &.is-readOnly { + background-color: $ui-gray-100; + cursor: not-allowed; + } + + &:not(:last-child) { + margin-bottom: $size-4; + } +} +// must come after field due to overriding the margin-bottom of not last-child +.field-body .field { + margin-bottom: 0; +} + +.field.has-addons { + flex-wrap: wrap; + .control { + .button, + .checkbox, + .input, + .select select { + border-radius: 0; + &:hover { + z-index: 2; + } + &:focus, + &:active, + &.is-active { + z-index: 3; + &:hover { + z-index: 4; + } + } + } + &:first-of-type { + flex-grow: 1; + .button, + .checkbox, + .input, + .select select { + border-bottom-left-radius: $radius; + border-top-left-radius: $radius; + } + } + &:last-child { + .button, + .checkbox, + .input, + .select select { + border-bottom-right-radius: $radius; + border-top-right-radius: $radius; + } + } + } + & > .label { + flex-grow: 1; + flex-shrink: 0; + width: 100%; + } +} + +fieldset.form-fieldset { + border: none; +} + +// field.is-grouped styles +.field.is-grouped { + display: flex; + justify-content: flex-start; + + > .control.is-expanded { + flex-grow: 1; + flex-shrink: 1; + } + + > .control { + flex-shrink: 0; + } + + > .control:not(:last-child) { + margin-bottom: 0; + margin-right: 0.75rem; + } + + > .control.is-expanded { + flex-grow: 1; + flex-shrink: 1; + } +} + +// responsive css +@media screen and (min-width: 769px), print { + .field.is-horizontal { + display: flex; + } + + .field-body { + display: flex; + flex-basis: 0; + flex-grow: 5; + flex-shrink: 1; + + > .field:not(:last-child) { + margin-right: $size-9; + } + .field:not(.is-narrow) { + flex-grow: 1; + } + } + + .field-label { + flex-basis: 0; + flex-grow: 1; + flex-shrink: 0; + margin-right: 1.5rem; + text-align: right; + + &.is-normal { + padding-top: 0.375em; + } + + &.is-medium { + font-size: 1.25rem; + padding-top: 0.375em; + } + + &.is-large { + font-size: 1.5rem; + padding-top: 0.375em; + } + } +} diff --git a/ui/app/styles/core/file.scss b/ui/app/styles/core/file.scss new file mode 100644 index 0000000..e08ddbc --- /dev/null +++ b/ui/app/styles/core/file.scss @@ -0,0 +1,99 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +// This file defines the styles for file, file-cta, file-delete-button, file-icon, file-input, file-label, file-name. + +.file { + user-select: none; +} + +.file-cta { + align-items: center; + border: $base-border; + border-radius: $radius; + display: inline-flex; + height: 2.25em; + justify-content: flex-start; + min-width: auto; + padding-bottom: calc(0.375em - 1px); + padding-left: 1em; + padding-right: 1em; + padding-top: calc(0.375em - 1px); + position: relative; + vertical-align: top; + white-space: nowrap; + + &.button { + height: $size-2; // in older parts of the code (Ex: shamir-modal-flow) this class is not a button. In newer parts of the code it is, and height needs to match the height of a button (2.5rem). + } + + &:disabled { + cursor: not-allowed; + } + + .icon:first-child:last-child { + display: inline-block; + margin-right: 0.1rem; + vertical-align: middle; + } +} + +.file-delete-button { + @extend .button; + @extend .is-transparent; + color: $grey; + position: absolute; + right: $spacing-xs; +} + +.file-icon { + align-items: center; + display: flex; + height: 1em; + justify-content: center; + margin-right: 0.5em; + width: 1em; +} + +.file-input { + height: 0.01em; + left: 0; + outline: 0; + position: absolute; + top: 0; + width: 0.01em; + visibility: hidden; +} + +.file-label { + align-items: stretch; + cursor: pointer; + display: flex; + justify-content: flex-start; + position: relative; +} + +.file-name { + @extend .input; + border: $base-border; + border-radius: $radius; + box-shadow: 0 4px 1px rgba(10, 10, 10, 0.06) inset; + display: block; + font-size: 1em; + line-height: 1.5; + overflow: hidden; + padding-bottom: calc(0.375em - 1px); + padding-left: 1em; + padding-right: 1em; + padding-top: calc(0.375em - 1px); + position: relative; + text-align: left; + text-overflow: ellipsis; + white-space: nowrap; + + &:disabled { + cursor: not-allowed; + } +} diff --git a/ui/app/styles/core/footer.scss b/ui/app/styles/core/footer.scss new file mode 100644 index 0000000..49aa90c --- /dev/null +++ b/ui/app/styles/core/footer.scss @@ -0,0 +1,20 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.footer { + background-color: transparent; + border-top: $base-border; + padding: $size-3 1.5rem; + margin-top: auto; + + span:not(:first-child) { + display: inline-block; + padding: 0 0.5rem; + + @include until($mobile) { + display: none; + } + } +} diff --git a/ui/app/styles/core/inputs.scss b/ui/app/styles/core/inputs.scss new file mode 100644 index 0000000..826abda --- /dev/null +++ b/ui/app/styles/core/inputs.scss @@ -0,0 +1,83 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +// This file defines the styles for .input, .textarea and .input-hint. + +.input, +.textarea { + align-items: center; + border-radius: $radius; + border: $base-border; + box-shadow: 0 4px 1px rgba($black, 0.06) inset; + color: $grey-dark; + display: inline-flex; + font-size: $size-6; + height: $size-2; + line-height: 1.5; + max-width: 100%; + padding-bottom: calc(0.375em - 1px); + padding-left: $size-8; + padding-right: $size-8; + padding-top: calc(0.375em - 1px); + width: 100%; + + &:focus, + &:active, + &.is-active, + &:focus-visible { + outline: none; + background-color: $white; + border-color: $blue-500 !important; + box-shadow: 0 0 0 0.125em rgba(21, 99, 255, 0.25); + } + + &:hover { + border-color: $ui-gray-300; + } + + @include until($desktop) { + font-size: $size-6; + } + + &::placeholder { + opacity: 0.5; + } +} + +.input[disabled], +.textarea[disabled] { + border-color: $grey-light; + background-color: $ui-gray-100; + color: $ui-gray-500; + cursor: not-allowed; + + &:hover { + border-color: $grey-light; + } +} + +.input[readonly], +.textarea[readonly] { + box-shadow: none; +} + +// textarea specific css +.textarea { + min-width: 100%; + padding-bottom: 0.625em; + padding-top: 0.625em; +} + +.textarea:not([rows]) { + max-height: 600px; + min-height: 120px; +} + +// custom input +.input-hint { + padding: 0 $size-9; + font-size: $size-8; + color: $grey; +} diff --git a/ui/app/styles/core/label.scss b/ui/app/styles/core/label.scss new file mode 100644 index 0000000..8e01e65 --- /dev/null +++ b/ui/app/styles/core/label.scss @@ -0,0 +1,26 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +// This file defines the style for .is-label + +.is-label { + color: $grey-darkest; + display: inline-block; + font-size: 14px; + font-weight: $font-weight-bold; + + &:not(:last-child) { + margin-bottom: 0.25rem; + } + + &::before, + &::after { + transform: translateY(0.2em); + } + + &::before { + border-color: $grey-light; + } +} diff --git a/ui/app/styles/core/level.scss b/ui/app/styles/core/level.scss new file mode 100644 index 0000000..712e3ff --- /dev/null +++ b/ui/app/styles/core/level.scss @@ -0,0 +1,97 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.level { + align-items: center; + justify-content: space-between; +} + +.level code { + border-radius: $radius; +} + +.level:not(:last-child) { + @include vault-block; +} + +.level-left, +.level-right { + flex-basis: auto; + flex-grow: 0; + flex-shrink: 0; +} + +.level-right { + align-items: center; + justify-content: flex-end; +} + +.level-left { + align-items: center; + justify-content: flex-start; +} + +.level.is-mobile, +.level.is-mobile .level-left, +.level.is-mobile .level-right { + display: flex; +} + +.level.is-mobile .level-left + .level-right { + margin-top: 0; +} + +.level.is-mobile .level-item:not(:last-child) { + margin-bottom: 0; + margin-right: $size-9; +} + +.level.is-mobile .level-item:not(.is-narrow) { + flex-grow: 1; +} + +.level.has-padded-items { + .level-item { + flex: 0 1 auto; + } + .level-item { + padding-right: 1.5rem; + } + .level-item.is-fixed-25 { + flex-basis: 25%; + } +} + +.level-item { + align-items: center; + display: flex; + flex-basis: auto; + flex-grow: 0; + flex-shrink: 0; + justify-content: center; +} + +// responsive css +@media screen and (min-width: 769px), print { + .level { + display: flex; + } + .level > .level-item:not(.is-narrow) { + flex-grow: 1; + } + .level-left { + display: flex; + } + + .level-right { + display: flex; + } +} + +@media screen and (max-width: 768px) { + .level-item:not(:last-child) { + margin-bottom: $size-9; + } +} diff --git a/ui/app/styles/core/link.scss b/ui/app/styles/core/link.scss new file mode 100644 index 0000000..998573b --- /dev/null +++ b/ui/app/styles/core/link.scss @@ -0,0 +1,27 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.link { + background: transparent; + border: 0; + color: $blue; + cursor: pointer; + display: inline; + font: inherit; + line-height: normal; + margin: 0; + padding: 0; + font-weight: $font-weight-semibold; + user-select: text; + + &[disabled] { + opacity: 0.5; + cursor: default; + } +} +// NICE TO HAVE: replace all instances with helper "is-no-underline" +.link-plain { + text-decoration: none; +} diff --git a/ui/app/styles/core/lists.scss b/ui/app/styles/core/lists.scss new file mode 100644 index 0000000..824af1a --- /dev/null +++ b/ui/app/styles/core/lists.scss @@ -0,0 +1,30 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +// This file defines styles for list type items, like: .sep, .list-header and ul.bullet. + +.sep { + display: inline-flex; + align-items: center; + &:before { + font-size: $size-5; + color: $blue-500; + content: '|'; + position: relative; + } +} + +.list-header { + margin: $size-9 $size-9 0; + color: $grey; + font-size: $size-8; + font-weight: $font-weight-semibold; + text-transform: uppercase; +} + +ul.bullet { + list-style: disc; + padding-left: $spacing-m; +} diff --git a/ui/app/styles/core/menu.scss b/ui/app/styles/core/menu.scss new file mode 100644 index 0000000..b9a781d --- /dev/null +++ b/ui/app/styles/core/menu.scss @@ -0,0 +1,34 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.menu { + font-size: $size-6; +} + +.menu a { + display: block; + padding: $size-10 $size-9; + text-decoration: none; +} + +.menu-label { + letter-spacing: 0.1em; + text-transform: uppercase; +} + +.menu-list { + line-height: 1.25; +} + +.column .menu-list a { + border-radius: 0; + border-right: 0 solid transparent; + font-weight: $font-weight-semibold; + + &:hover, + &.is-active { + color: $blue; + } +} diff --git a/ui/app/styles/core/message.scss b/ui/app/styles/core/message.scss new file mode 100644 index 0000000..0b4a26e --- /dev/null +++ b/ui/app/styles/core/message.scss @@ -0,0 +1,184 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.message { + background: $blue-010; + border: 1px solid $blue-100; + margin-bottom: $spacing-s; + padding: $spacing-s $spacing-m $spacing-m $spacing-s; + position: relative; + + &:not(:last-child) { + margin-bottom: $size-4; + } + + .message-icon { + color: $blue; + margin-right: $spacing-xs; + } + + .message-title { + color: $blue-500; + font-size: 16px; + font-weight: $font-weight-bold; + line-height: 1.25; + + .progress { + margin-left: $spacing-xs; + } + } + + .close-button { + background: transparent; + border: 0; + color: $grey; + cursor: pointer; + position: absolute; + right: $spacing-s; + top: $spacing-m; + } + + .close-button + .message-title { + padding-right: $spacing-m; + } + + .message-body { + border: 0; + padding: 1em 1.25em; + margin-top: $spacing-xxs; + } + + .message-body.pre { + white-space: pre-wrap; + } + // was p selector only, but padding was getting overridden by the message-body above + p.message-body { + font-size: $size-8; + border: 0; + padding: 0; + } + + .message-actions { + margin-top: $spacing-xs; + + a, + a:not(.button):not(.file-delete-button):not(.tag) { + color: $blue; + font-weight: $font-weight-semibold; + text-decoration: none; + } + + > * + * { + margin-left: $spacing-xs; + } + } + + // message types, see message-types.js + &.is-danger { + background: $red-010; + border: 1px solid $red-050; + + .message-body { + color: $red-700; + } + + .message-icon { + color: $red-500; + } + + .message-title { + color: $red-700; + } + } + + &.is-highlight { + background: $yellow-010; + border: 1px solid $yellow-100; + + .message-body { + color: $ui-gray-900; + } + + .message-icon { + color: $yellow-500; + } + + .message-title { + color: $orange-700; + } + } + + &.is-info { + background-color: $blue-010; + + .message-body { + color: $blue-700; + } + + .message-header { + background-color: $blue-500; + color: $white; + } + + .message-title { + color: $blue-700; + } + } + + &.is-success { + background: $green-010; + border: 1px solid $green-100; + + .message-body { + color: $green-900; + } + + .message-icon { + color: $green-500; + } + + .message-title { + color: $green-700; + } + } +} + +.message-inline { + display: flex; + align-items: center; + margin: 0 0 $spacing-l; + + .hs-icon { + margin: 0 $spacing-xxs 0 0; + } + + .p { + margin: 0; + } + + &.has-top { + margin-top: $size-6; + } + + &.size-small { + font-size: $size-8; + } + + &.padding-top { + padding-top: $size-8; + } + + &.is-marginless { + margin-bottom: 0; + } + + > p::first-letter { + text-transform: capitalize; + } +} + +.message.message-marginless { + margin: 0; +} diff --git a/ui/app/styles/core/progress.scss b/ui/app/styles/core/progress.scss new file mode 100644 index 0000000..aab1a9f --- /dev/null +++ b/ui/app/styles/core/progress.scss @@ -0,0 +1,86 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +.progress { + appearance: none; + background: $ui-gray-050; + border: none; + box-shadow: inset $box-link-shadow; + border-radius: $radius; + display: block; + height: $size-6; + overflow: hidden; + padding: 0; + margin-bottom: 0; + width: 100%; + + &.is-small { + height: 0.5rem; + } + &.is-narrow { + width: 30px; + } + &.is-medium { + height: $size-5; + width: 120px; + } +} + +// style the container in chrome +.progress[value]::-webkit-progress-bar { + background-color: #ededed; + box-shadow: inset $box-link-shadow; +} + +// style the bar in chrome +.progress[value]::-webkit-progress-value { + background-color: #4a4a4a; + border-radius: $radius; + transition: width 1s $easing; +} + +// style the bar in firefox +.progress[value]::-moz-progress-bar { + border-radius: $radius; + transition: width 1s $easing; +} + +.progress::-ms-fill { + background-color: #4a4a4a; + border: none; +} + +// is-success and is-info are the two types of progress bars we have +.progress.is-info::-webkit-progress-value { + background-color: #3e8ed0; +} + +.progress.is-info::-moz-progress-bar { + background-color: #3e8ed0; +} + +.progress.is-info::-ms-fill { + background-color: #3e8ed0; +} + +.progress.is-info:indeterminate { + background-image: linear-gradient(to right, #3e8ed0 30%, #ededed 30%); +} + +.progress.is-success::-webkit-progress-value { + background-color: $green-500; +} + +.progress.is-success::-moz-progress-bar { + background-color: $green-500; +} + +.progress.is-success::-ms-fill { + background-color: $green-500; +} + +.progress.is-success:indeterminate { + background-image: linear-gradient(to right, $green-500 30%, #ededed 30%); +} diff --git a/ui/app/styles/core/select.scss b/ui/app/styles/core/select.scss new file mode 100644 index 0000000..d07e952 --- /dev/null +++ b/ui/app/styles/core/select.scss @@ -0,0 +1,84 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +select { + width: 100%; + appearance: none; +} + +.select select { + background-color: $ui-gray-050; + box-shadow: 0 3px 1px rgba($black, 0.12); + border: 1px solid $grey-light; + border-radius: $radius; + color: $grey-dark; + cursor: pointer; + display: block; + font-size: 1em; + height: 2.5rem; + max-width: 100%; + outline: none; + padding-bottom: calc(0.375em - 1px); + padding-left: $size-8; + padding-right: $size-8; + padding-top: calc(0.375em - 1px); + text-rendering: auto !important; + + @include until($desktop) { + font-size: 1rem; + } + + &::placeholder { + opacity: 0.5; + } + + &:active, + &:focus, + &:focus-visible, + &.is-active { + border-color: $blue-500; + box-shadow: 0 0 0 0.125em rgba(21, 99, 255, 0.25); + } +} + +.select select:not([multiple]) { + padding-right: $size-2; +} + +.select select[disabled] { + border-color: $grey-light; + background-color: $ui-gray-100; + color: $ui-gray-500; + cursor: not-allowed; + user-select: none; + &:hover { + border-color: $grey-light; + } +} + +.select { + display: inline-block; + height: 2.5rem; + max-width: 100%; + position: relative; + vertical-align: top; +} + +.select::after { + content: svg-uri( + '' + ); + border: 0; + display: block; + height: 24px; + margin: 0; + pointer-events: none; + position: absolute; + right: $spacing-xs; + top: 50%; + transform: translateY(-50%); + width: 24px; + z-index: 4; +} diff --git a/ui/app/styles/core/switch.scss b/ui/app/styles/core/switch.scss new file mode 100644 index 0000000..dffc483 --- /dev/null +++ b/ui/app/styles/core/switch.scss @@ -0,0 +1,152 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +// This file defines the style for switch, with the nested classes: is-small, is-rounded, is-success + +.switch[type='checkbox'] { + display: inline-block; + outline: 0; + opacity: 0; + position: absolute; + user-select: none; + + + label { + align-items: center; + cursor: pointer; + display: inline-flex; + font-size: $size-6; + justify-content: flex-start; + line-height: 1.5; + padding-left: 3.5rem; + position: relative; + + &::before { + position: absolute; + display: block; + top: calc(50% - 1.5rem * 0.5); + left: 0; + width: $size-1; + height: $size-4; + border: 0.1rem solid transparent; + border-radius: $radius-large; + background: $ui-gray-300; + content: ''; + } + + &::after { + background: $white; + border-radius: $radius-large; + content: ''; + display: block; + height: $size-6; + left: $size-11; + position: absolute; + top: calc(50% - 1rem * 0.5); + transform: translate3d(0, 0, 0); + transition: all 0.25s ease-out; + width: $size-6; + } + + &:checked::after { + left: 1.625rem; + } + } +} + +// is-rounded +.switch[type='checkbox'].is-rounded { + + label { + &::before { + border-radius: 16px; + } + &::after { + border-radius: 50%; + } + } + &.is-small { + + label { + &::before { + border-radius: 16px; + } + &::after { + border-radius: 50%; + } + } + } +} +// is-small +.switch[type='checkbox'].is-small { + + label { + cursor: pointer; + display: inline-block; + font-size: $size-8; + font-weight: bold; + height: 18px; + padding-left: $size-8 * 2.5; + position: relative; + margin: 0 $size-11; + &::before { + border: 0.1rem solid transparent; + border-radius: $radius-large; + background: $ui-gray-300; + display: block; + content: ''; + height: $size-8; + position: absolute; + top: calc($size-8 / 5); + width: $size-8 * 2; + } + &::after { + background: $white; + border-radius: $radius-large; + content: ''; + display: block; + height: $size-8 * 0.8; + left: 0; + position: absolute; + top: calc($size-8 / 4); + transform: translateX(0.15rem); + transition: all 0.25s ease-out; + width: $size-8 * 0.8; + will-change: left; + + &:checked { + left: $size-5; + } + } + } + &:checked + label::after { + left: 0; + transform: translateX(($size-8 * 2) - ($size-8 * 0.94)); + } +} + +// is-success +.switch[type='checkbox'].is-success:checked + label::before { + background: $blue; +} + +// focus +.switch[type='checkbox']:focus + label { + box-shadow: 0 0 1px $blue; +} + +// disabled +.switch[type='checkbox'][disabled] { + cursor: not-allowed; +} + +.switch[type='checkbox'][disabled] + label { + opacity: 0.5; +} + +.switch[type='checkbox'][disabled] + label:hover { + cursor: not-allowed; +} + +// misc. +.field-body .switch[type='checkbox'] + label { + margin-top: 0.375em; +} diff --git a/ui/app/styles/core/tag.scss b/ui/app/styles/core/tag.scss new file mode 100644 index 0000000..5f55d71 --- /dev/null +++ b/ui/app/styles/core/tag.scss @@ -0,0 +1,72 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +// This file defines the style for .tag + +.tag:not(body) { + align-items: center; + background-color: $ui-gray-100; + border-radius: $radius; + color: $grey; + display: inline-flex; + font-size: $size-8; + font-weight: $font-weight-normal; + height: auto; + justify-content: center; + line-height: 1.5; + margin-right: $size-10; + padding: 0 $size-10; + white-space: nowrap; + vertical-align: middle; + + code { + color: $grey; + } + + .icon { + height: 12px; + margin-left: -0.25em; + margin-right: 0.25em; + min-width: 0; + width: 12px; + } + + &.has-extra-padding { + padding: $size-11 $spacing-xxs; + } +} + +.tag { + &.is-bold { + font-weight: bold; + } + + &.is-light { + background-color: whitesmoke; + color: rgba(0, 0, 0, 0.7); + } + + &.is-outlined { + background-color: transparent; + border: 1px solid $grey-light; + } + + &.is-transparent { + background-color: transparent; + } + + &.is-small { + height: auto; + } + + &.is-medium { + font-size: $size-6; + } + + &.is-success { + background-color: $green-500; + color: $white; + } +} diff --git a/ui/app/styles/core/title.scss b/ui/app/styles/core/title.scss new file mode 100644 index 0000000..00e4e76 --- /dev/null +++ b/ui/app/styles/core/title.scss @@ -0,0 +1,60 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +// This file styles the following styles: title, subtitle, form-section.title and title sizes + +.title:not(:last-child), +.subtitle:not(:last-child) { + display: block; + margin-bottom: 1rem; +} + +.title { + color: hsl(0, 0%, 21%); + font-size: 2rem; + font-weight: $font-weight-bold; + line-height: 1.125; + word-break: break-word; + + > a { + color: $black; + text-decoration: none; + } + + // title sizes + &.is-2 { + font-size: $size-2; + } + + &.is-3 { + font-size: $size-3; + } + + &.is-4 { + font-size: $size-4; + } + + &.is-5 { + font-size: $size-5; + } + + &.is-6 { + font-size: $size-6; + } + + &.is-7 { + font-size: $size-7; + } +} + +.form-section .title { + margin-bottom: $spacing-s; +} + +.is-subtitle-gray { + text-transform: uppercase; + font-size: $size-7; + color: $ui-gray-500; +} diff --git a/ui/app/styles/core/toggle.scss b/ui/app/styles/core/toggle.scss new file mode 100644 index 0000000..16b4b5d --- /dev/null +++ b/ui/app/styles/core/toggle.scss @@ -0,0 +1,112 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +/* COPIED FROM BULMA SWITCH */ + +.toggle[type='checkbox'] { + outline: 0; + user-select: none; + position: absolute; + margin-bottom: 0; + opacity: 0; + left: 0; +} +.toggle[type='checkbox'][disabled] { + cursor: not-allowed; +} +.toggle[type='checkbox'][disabled] + label { + opacity: 0.5; +} +.toggle[type='checkbox'][disabled] + label::before { + opacity: 0.5; +} +.toggle[type='checkbox'][disabled] + label::after { + opacity: 0.5; +} +.toggle[type='checkbox'][disabled] + label:hover { + cursor: not-allowed; +} +.toggle[type='checkbox'] + label { + position: relative; + display: inline-block; + padding-left: 3.5rem; + cursor: pointer; +} +.toggle[type='checkbox'] + label::before { + position: absolute; + display: block; + top: 0; + left: 0; + width: 3rem; + height: 1.5rem; + border: 0.1rem solid transparent; + border-radius: 0.75rem; + background: $ui-gray-300; + content: ''; +} +.toggle[type='checkbox'] + label::after { + display: block; + position: absolute; + top: 0.25rem; + left: 0.25rem; + width: 1rem; + height: 1rem; + transform: translate3d(0, 0, 0); + border-radius: 50%; + background: $white; + transition: all 0.25s $easing; + content: ''; +} +.toggle[type='checkbox']:checked + label::before { + background: $ui-gray-700; +} +.toggle[type='checkbox']:checked + label::after { + left: 1.625rem; +} + +/* CUSTOM */ +.toggle[type='checkbox'] { + &.is-small { + + label { + font-size: $size-7; + padding-left: $size-8 * 2.5; + margin: 0 0.25rem; + &::before { + top: calc($size-8 / 5); + height: $size-8; + width: $size-8 * 2; + } + &::after { + width: $size-8 * 0.8; + height: $size-8 * 0.8; + transform: translateX(0.15rem); + left: 0; + top: calc($size-8 / 4); + } + } + &:checked + label::after { + left: 0; + transform: translateX(($size-8 * 2) - ($size-8 * 0.94)); + } + } + + &.is-large { + width: 4.5rem; + height: 2.25rem; + } +} +.toggle[type='checkbox'].is-small + label::after { + will-change: left; +} +.toggle[type='checkbox']:focus + label { + box-shadow: 0 0 1px $blue; +} +.toggle[type='checkbox'].is-success:checked + label::before { + background: $blue; +} + +.toggle-label { + width: 100%; +} diff --git a/ui/app/styles/helper-classes/colors.scss b/ui/app/styles/helper-classes/colors.scss new file mode 100644 index 0000000..64f6467 --- /dev/null +++ b/ui/app/styles/helper-classes/colors.scss @@ -0,0 +1,81 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +/* This helper includes styles referencing background color, border color, and text color. */ + +// background colors +.has-background-white-bis { + background: $ui-gray-050; +} + +.has-background-gray-100 { + background-color: $ui-gray-100; +} + +.has-background-gray-200 { + background-color: $ui-gray-200; +} + +// borders +.has-border-top-light { + border-radius: 0; + border-top: 1px solid $grey-light; +} + +.has-border-bottom-light { + border-radius: 0; + border-bottom: 1px solid $grey-light; +} + +.has-error-border, +select.has-error-border { + border: 1px solid $red-500; +} + +// specifically for the SearchSelect dropdown. +.dropdown-has-error-border > div.ember-basic-dropdown-trigger { + border: 1px solid $red-500; +} + +// text color +.has-text-grey-light { + color: $ui-gray-300 !important; +} + +.has-text-grey-400 { + color: $ui-gray-400; +} + +.has-text-grey { + color: $ui-gray-500 !important; +} + +.has-text-grey-dark { + color: $ui-gray-700 !important; +} + +.has-text-white { + color: $white !important; +} + +.has-text-black { + color: $black !important; +} + +.has-text-info { + color: $blue-500 !important; +} + +.has-text-success { + color: $green-500 !important; +} + +.has-text-highlight { + color: $yellow-500; +} + +.has-text-danger { + color: $red-500 !important; +} diff --git a/ui/app/styles/helper-classes/flexbox-and-grid.scss b/ui/app/styles/helper-classes/flexbox-and-grid.scss new file mode 100644 index 0000000..b2e3b56 --- /dev/null +++ b/ui/app/styles/helper-classes/flexbox-and-grid.scss @@ -0,0 +1,115 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +/* Helpers that define anything with the CSS flexbox or CSS grid. */ + +/* Flexbox helpers */ +.is-flex { + display: flex !important; +} + +.is-flex-column { + display: flex; + flex-direction: column; +} + +.is-flex-row { + display: flex; + flex-direction: row; +} + +// Alignment of the items +.is-flex-v-centered { + display: flex; + align-items: center; + align-self: center; + justify-content: center; +} + +.is-flex-center { + display: flex; + align-items: center; +} + +.is-flex-align-baseline { + display: flex; + align-items: baseline; +} + +// Justify-content +.is-centered { + justify-content: center; +} + +.is-flex-end { + display: flex !important; + justify-content: flex-end; +} + +.is-flex-start { + display: flex !important; + justify-content: flex-start; + + &.has-gap { + gap: $spacing-m; + } +} + +.is-flex-between, +.is-grouped-split { + display: flex; + justify-content: space-between !important; +} + +// Flex basis, grow, stretch and flow +.is-flex-full { + flex-basis: 100%; +} + +.is-flex-1 { + flex-grow: 1; + &.basis-0 { + flex-basis: 0; + } +} + +.is-no-flex-grow { + flex-grow: 0 !important; +} + +.is-flex-wrap { + flex-flow: row wrap; +} + +.is-flex-half { + flex: 50%; +} + +/* Flex Responsive */ +@media screen and (min-width: 769px), print { + .is-flex-v-centered-tablet { + display: flex; + align-items: center; + align-self: center; + justify-content: center; + } +} + +/* CSS GRID */ +.is-grid { + display: grid; +} + +.is-grid-3-columns { + grid-template-columns: repeat(3, 1fr); +} + +.is-medium-height { + height: 125px; +} + +.is-grid-column-span-3 { + grid-column-end: span 3; +} diff --git a/ui/app/styles/helper-classes/general.scss b/ui/app/styles/helper-classes/general.scss new file mode 100644 index 0000000..ff77e64 --- /dev/null +++ b/ui/app/styles/helper-classes/general.scss @@ -0,0 +1,100 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +/* This helper includes styles relating to: box-shadow, border, transition, pointer, grouped css in one helper, and screen-reader only.*/ + +// box-shadow helpers +.has-only-top-shadow { + box-shadow: 0 -1px 0 -1px $grey-light, 0 -2px 0 -1px $grey-light; +} + +.has-bottom-shadow { + box-shadow: $box-shadow !important; +} + +.is-shadowless { + box-shadow: none !important; +} + +.is-sideless { + box-shadow: 0 2px 0 -1px $grey-light, 0 -2px 0 -1px $grey-light; +} + +// this helper needs to go after is-sideless as they are often used together and is-bottomless needs to override is-sideless (no this is not ideal). +.is-bottomless { + box-shadow: 0 -1px 0 0 $grey-light; +} + +// border helpers +.has-default-border { + border: 1px solid $grey !important; +} + +.is-borderless { + border: none !important; +} + +// pointer helpers +.has-no-pointer { + pointer-events: none; +} + +.has-pointer { + cursor: pointer; +} + +// other +.has-background-transition { + transition: background-color ease-out $speed; +} + +.has-current-color-fill { + &, + & svg { + fill: currentColor; + } +} + +// large grouped css blocks +.is-hint { + color: $grey; + font-size: $size-8; + padding: $size-8 0; +} + +.is-optional { + color: $grey; + font-size: $size-8; + text-transform: lowercase; +} + +.is-word-break { + overflow-wrap: break-word; + word-wrap: break-word; + word-break: break-word; + white-space: pre-wrap; +} + +.truncate-second-line { + display: -webkit-box; + -webkit-line-clamp: 2; + -webkit-box-orient: vertical; + text-overflow: ellipsis; + overflow: hidden; +} + +// screen reader only +.sr-only { + border: 0; + clip-path: inset(50%); + clip: rect(0 0 0 0); + height: 1px; + margin: -1px; + overflow: hidden; + padding: 0px; + position: absolute; + white-space: nowrap; + width: 1px; +} diff --git a/ui/app/styles/helper-classes/layout.scss b/ui/app/styles/helper-classes/layout.scss new file mode 100644 index 0000000..1011ed0 --- /dev/null +++ b/ui/app/styles/helper-classes/layout.scss @@ -0,0 +1,90 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +/* This helper includes styles relating to: display, width, height, float, visibility, position, alignment. */ + +// display +.is-block { + display: block !important; +} + +.is-hidden { + display: none !important; +} + +.is-inline { + display: inline !important; +} + +.is-inline-block { + display: inline-block !important; +} + +// position +.is-in-bottom-right { + position: absolute; + bottom: 1rem; + right: 1rem; + z-index: 10; +} + +.is-relative { + position: relative; +} + +// visibility +.is-invisible { + visibility: hidden; +} + +// width and height +.is-fullwidth { + width: 100%; +} + +.is-auto-width { + width: auto; +} + +.is-min-width-0 { + min-width: 0; +} + +.is-medium-height { + height: 125px; +} + +// float +.is-pulled-left { + float: left !important; +} + +.is-pulled-right { + float: right !important; +} + +// alignment +.is-v-centered { + vertical-align: middle; +} + +// responsive css +@media screen and (min-width: 769px), print { + .is-hidden-tablet { + display: none !important; + } +} + +@media screen and (max-width: 768px) { + .is-hidden-mobile { + display: none !important; + } +} + +@media screen and (max-width: 1215px) { + .is-widescreen:not(.is-max-desktop) { + max-width: 1152px; + } +} diff --git a/ui/app/styles/helper-classes/spacing.scss b/ui/app/styles/helper-classes/spacing.scss new file mode 100644 index 0000000..5f8c500 --- /dev/null +++ b/ui/app/styles/helper-classes/spacing.scss @@ -0,0 +1,169 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +/* Helpers that define anything with the margin or padding. */ + +/* Notes +- these helpers are generally defined in px but some (if they use $size-xx) are using rems. +*/ + +.is-paddingless { + padding: 0 !important; +} + +.has-short-padding { + padding: $size-11 $size-5; +} + +.has-tall-padding { + padding: $size-2; +} + +.has-side-padding-s { + padding-left: $spacing-s; + padding-right: $spacing-s; +} + +.has-padding-m { + padding: $spacing-m; +} + +.has-bottom-padding-s { + padding-bottom: $spacing-s; +} + +.has-bottom-padding-l { + padding-bottom: $spacing-l; +} + +.has-top-padding-s { + padding-top: $spacing-s; +} + +.has-top-padding-m { + padding-top: $spacing-m; +} + +.has-top-padding-l { + padding-top: $spacing-l; +} + +.has-left-padding-xs { + padding-left: $spacing-xs; +} + +.has-left-padding-s { + padding-left: $spacing-s; +} + +.has-left-padding-l { + padding-left: $spacing-l; +} + +.has-top-padding-xxl { + padding-top: $spacing-xxl; +} + +// All Margin helpers +.is-marginless { + margin: 0 !important; +} + +.has-top-bottom-margin { + margin: $size-5 0rem; +} + +.has-top-bottom-margin-xxs { + margin: $spacing-xxs 0; +} + +.has-top-bottom-margin-negative-m { + margin-top: -$spacing-m; + margin-bottom: -$spacing-m; +} + +.has-bottom-margin-xs { + margin-bottom: $spacing-xs !important; +} + +.has-bottom-margin-s { + margin-bottom: $spacing-s; +} + +.has-bottom-margin-m { + margin-bottom: $spacing-m; +} + +.has-bottom-margin-l { + margin-bottom: $spacing-l; +} + +.has-bottom-margin-xl { + margin-bottom: $spacing-xl; +} + +.has-bottom-margin-xxl { + margin-bottom: $spacing-xxl; +} + +.has-top-margin-s { + margin-top: $spacing-s; +} + +.has-top-margin-xs { + margin-top: $spacing-xs; +} + +.has-top-margin-m { + margin-top: $spacing-m; +} + +.has-top-margin-l { + margin-top: $spacing-l; +} + +.has-top-margin-xl { + margin-top: $spacing-xl; +} + +.has-top-margin-xxl { + margin-top: $spacing-xxl; +} + +.has-top-margin-negative-s { + margin-top: (-1 * $spacing-s); +} + +.has-left-margin-xxs { + margin-left: $spacing-xxs; +} + +.has-left-margin-xs { + margin-left: $spacing-xs; +} + +.has-left-margin-s { + margin-left: $spacing-s; +} + +.has-left-margin-m { + margin-left: $spacing-m; +} + +.has-left-margin-l { + margin-left: $spacing-l; +} + +.has-left-margin-xl { + margin-left: $spacing-xl; +} + +.has-right-margin-m { + margin-right: $spacing-m; +} + +.has-right-margin-l { + margin-right: $spacing-l; +} diff --git a/ui/app/styles/helper-classes/typography.scss b/ui/app/styles/helper-classes/typography.scss new file mode 100644 index 0000000..2b1ce30 --- /dev/null +++ b/ui/app/styles/helper-classes/typography.scss @@ -0,0 +1,110 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +/* This helper includes styles relating to: font-size, font-family, font-alignment, text-transform, text-weight, font-style, text-decoration. */ + +// font size helpers +.is-size-4 { + font-size: $size-4 !important; +} + +.is-size-5 { + font-size: $size-5 !important; +} + +.is-size-6 { + font-size: $size-6 !important; +} + +.is-size-7 { + font-size: $size-7 !important; +} + +.is-size-8 { + font-size: $size-8 !important; +} + +.is-size-9 { + font-size: $size-9 !important; +} + +// Font weight +.has-font-weight-normal { + font-weight: $font-weight-normal; +} + +.has-text-weight-semibold { + font-weight: $font-weight-semibold !important; +} + +.has-text-weight-bold { + font-weight: $font-weight-bold !important; +} + +// Font family +.is-font-mono { + font-family: $family-monospace; +} + +.masked-font { + font-family: obscure, text-security-square; + letter-spacing: 2px; +} + +// Text decoration +.is-underline { + text-decoration: underline; +} +.is-no-underline { + text-decoration: none; +} + +// Text transformations +.is-lowercase { + text-transform: lowercase !important; +} + +.is-uppercase { + text-transform: uppercase !important; +} + +// Text alignment +.has-text-right { + text-align: right !important; +} + +.has-text-left { + text-align: left; +} + +.has-text-centered { + text-align: center !important; +} + +.has-line-height-1 { + line-height: 1; +} + +// Text color or styling +.is-help { + font-size: $size-8; + margin-top: $size-11; +} + +.help { + display: block; + font-size: 0.85714rem; + margin-top: $size-11; +} + +.sub-text { + color: $grey; + margin-bottom: $size-11; + font-size: $size-8; + + strong { + color: inherit; + } +} diff --git a/ui/app/styles/reset.scss b/ui/app/styles/reset.scss new file mode 100644 index 0000000..90a4e2a --- /dev/null +++ b/ui/app/styles/reset.scss @@ -0,0 +1,6 @@ +// reset for HDS +*, +*::before, +*::after { + box-sizing: border-box; +} diff --git a/ui/app/styles/utils/_box-shadow_variables.scss b/ui/app/styles/utils/_box-shadow_variables.scss new file mode 100644 index 0000000..55eb3f7 --- /dev/null +++ b/ui/app/styles/utils/_box-shadow_variables.scss @@ -0,0 +1,24 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +// TODO: lots of inconsistent variation on box-shadow usage. Fix. + +// box +$box-shadow: 0 0 0 1px rgba($black, 0.1); +$box-shadow-low: 0 5px 1px -2px rgba($black, 0.12), 0 3px 2px -1px rgba($black, 0); +$box-shadow-middle: 0 8px 4px -4px rgba($black, 0.1), 0 6px 8px -2px rgba($black, 0.05); +$box-shadow-high: 0 12px 5px -7px rgba($black, 0.08), 0 11px 10px -3px rgba($black, 0.1); +$box-shadow-highest: 0 16px 6px -10px rgba($black, 0.06), 0 16px 16px -4px rgba($black, 0.2); + +// box-link +$box-link-shadow: 0 0 0 1px $ui-gray-200; +$box-link-hover-shadow: 0 0 0 1px $ui-gray-300; + +// border +$base-border: 1px solid $ui-gray-300; +$light-border: 1px solid $ui-gray-200; + +// button specific +$button-box-shadow-standard: 0 3px 1px 0 rgba($black, 0.12); diff --git a/ui/app/styles/utils/_color_variables.scss b/ui/app/styles/utils/_color_variables.scss new file mode 100644 index 0000000..c3c2676 --- /dev/null +++ b/ui/app/styles/utils/_color_variables.scss @@ -0,0 +1,98 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +// UI Gray +$ui-gray-010: #fbfbfc; +$ui-gray-050: #f7f8fa; +$ui-gray-100: #ebeef2; +$ui-gray-200: #dce0e6; +$ui-gray-300: #bac1cc; +$ui-gray-400: #8e96a3; +$ui-gray-500: #6f7682; +$ui-gray-600: #626873; +$ui-gray-700: #525761; +$ui-gray-800: #373a42; +$ui-gray-900: #1f2124; +$grey-lightest: $ui-gray-050; +$grey-light: $ui-gray-300; +$grey: $ui-gray-500; +$grey-dark: $ui-gray-700; +$grey-darkest: $ui-gray-900; + +// Black +$black: hsl(0, 0%, 4%); // in hex: #0a0a0a + +// White +$white: hsl(0, 0%, 100%); + +// Blue +$blue-010: #fbfcff; +$blue-050: #f0f5ff; +$blue-100: #bfd4ff; +$blue-300: #5b92ff; +$blue-500: #1563ff; +$blue-700: #0e40a3; +$blue-900: #061b46; +$blue-lightest: $blue-050; +$blue-light: $blue-300; +$blue: $blue-500; +$blue-dark: $blue-700; +$blue-darkest: $blue-900; + +// Red +$red-010: #fdfafb; +$red-050: #f9ecee; +$red-100: #efc7cc; +$red-300: #db7d88; +$red-500: #c73445; +$red-700: #7f222c; +$red-900: #370f13; +$red-lightest: $red-050; +$red-light: $red-300; +$red: $red-500; +$red-dark: $red-700; +$red-darkest: $red-900; + +// Green +$green-010: #fafdfa; +$green-050: #ecf7ed; +$green-100: #c6e9c9; +$green-300: #7acc81; +$green-500: #2eb039; +$green-700: #1e7125; +$green-900: #0d3010; +$green-lightest: $green-050; +$green-light: $green-300; +$green: $green-500; +$green-dark: $green-700; +$green-darkest: $green-900; + +// Orange +$orange-010: #fffcfa; +$orange-050: #fef4ec; +$orange-100: #fde0c8; +$orange-300: #fbb77f; +$orange-500: #fa8f37; +$orange-700: #a05c23; +$orange-900: #45270f; +$orange-lightest: $orange-050; +$orange-light: $orange-300; +$orange: $orange-500; +$orange-dark: $orange-700; +$orange-darkest: $orange-900; + +// Yellow +$yellow-010: #fffdf6; +$yellow-050: #fffbed; +$yellow-100: #fdeeba; +$yellow-300: #fbd95e; +$yellow-500: #fac402; +$yellow-700: #a07d02; +$yellow-900: #453601; +$yellow-lightest: $yellow-050; +$yellow-light: $yellow-300; +$yellow: $yellow-500; +$yellow-dark: $yellow-700; +$yellow-darkest: $yellow-900; diff --git a/ui/app/styles/utils/_font_variables.scss b/ui/app/styles/utils/_font_variables.scss new file mode 100644 index 0000000..287b3e8 --- /dev/null +++ b/ui/app/styles/utils/_font_variables.scss @@ -0,0 +1,16 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +// Vars relating to font-family or font-weight. + +$family-sans: system-ui, -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen', 'Ubuntu', + 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue', sans-serif; + +$family-monospace: 'SFMono-Regular', Consolas, 'Liberation Mono', Menlo, Courier, monospace; + +/* Font weight */ +$font-weight-normal: 400; +$font-weight-semibold: 600; +$font-weight-bold: 700; diff --git a/ui/app/styles/utils/_size_variables.scss b/ui/app/styles/utils/_size_variables.scss new file mode 100644 index 0000000..3a59e78 --- /dev/null +++ b/ui/app/styles/utils/_size_variables.scss @@ -0,0 +1,49 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +/* General sizing in rem values used largely for text sizing.*/ +$size-1: 3rem; // 48px, same as $spacing-xxl +$size-2: 2.5rem; // 40px +$size-3: calc(24 / 14) + 0rem; // ~1.714rem ~27px +$size-4: 1.5rem; // 24px same as $spacing-l +$size-5: 1.25rem; // 20px +$size-6: 1rem; // 16px same as $spacing-m +$size-7: calc(13 / 14) + 0rem; // ~.929rem ~15px +$size-8: calc(12 / 14) + 0rem; // ~.857rem ~13.7px +$size-9: 0.75rem; // 12px same as $spacing-s +$size-10: 0.5rem; // 8px same as $spacing-xs +$size-11: 0.25rem; // 4px same as spacing-xxs + +/* Spacing vars in px */ +$spacing-xxs: 4px; +$spacing-xs: 8px; +$spacing-s: 12px; +$spacing-m: 16px; +$spacing-l: 24px; +$spacing-xl: 36px; +$spacing-xxl: 48px; + +/* Border radius */ +$radius: 2px; +$radius-large: 4px; + +/* Responsiveness */ +// 960, 1152, and 1344 have been chosen because they are divisible by both 12 and 16 +$mobile: 769px; +// 960px container + 4rem +$desktop: 960px + (2 * 32px); + +/* Animations and transitions */ +$speed: 150ms; +$speed-slow: $speed * 2; +$easing: ease-out; + +/* Nav */ +$drawer-width: 300px; + +// Wizard +// Not being used: https://github.com/hashicorp/vault/pull/19220 +// $wizard-progress-bar-height: 6px; +// $wizard-progress-check-size: 16px; diff --git a/ui/app/styles/utils/animations.scss b/ui/app/styles/utils/animations.scss new file mode 100644 index 0000000..4073134 --- /dev/null +++ b/ui/app/styles/utils/animations.scss @@ -0,0 +1,41 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +@mixin keyframes($name) { + @-webkit-keyframes #{$name} { + @content; + } + @-moz-keyframes #{$name} { + @content; + } + @-ms-keyframes #{$name} { + @content; + } + @keyframes #{$name} { + @content; + } +} + +@include keyframes(drop-fade-below) { + 0% { + opacity: 0; + transform: translateY(-1rem); + } + 100% { + opacity: 1; + transform: translateY(0px); + } +} + +@include keyframes(drop-fade-above) { + 0% { + opacity: 0; + transform: translateY(1rem); + } + 100% { + opacity: 1; + transform: translateY(0px); + } +} diff --git a/ui/app/styles/utils/mixins.scss b/ui/app/styles/utils/mixins.scss new file mode 100644 index 0000000..c5fcef7 --- /dev/null +++ b/ui/app/styles/utils/mixins.scss @@ -0,0 +1,89 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +// This file defines the following mixins: css-arrow, css-top-arrow, css-bottom-arrow, from, until, and vault-block + +@mixin css-arrow( + $vertical-direction, + $size, + $color, + $border-width, + $border-color, + $left: 50%, + $left-offset: 0px +) { + & { + border: 1px solid $border-color; + } + + &:after, + &:before { + @if ($vertical-direction == 'top') { + bottom: 100%; + } @else { + top: 100%; + } + border: solid transparent; + content: ' '; + height: 0; + width: 0; + position: absolute; + pointer-events: none; + } + + &:after { + border-color: rgba($color, 0); + border-bottom-color: $color; + border-width: $size; + left: calc(#{$left} + #{$left-offset}); + margin-left: -$size; + } + &:before { + border-color: rgba($border-color, 0); + border-bottom-color: $border-color; + border-width: $size + round(1.41421356 * $border-width); + left: calc(#{$left} + #{$left-offset}); + margin-left: -($size + round(1.41421356 * $border-width)); + } + &:before, + &:after { + @if ($vertical-direction == 'bottom') { + transform: rotate(180deg); + } + } + + @at-root .ember-basic-dropdown-content--left#{&} { + &:after, + &:before { + left: auto; + right: calc(#{$left} + #{$left-offset} - #{$size}); + } + } +} + +@mixin css-top-arrow($size, $color, $border-width, $border-color, $left: 50%, $left-offset: 0px) { + @include css-arrow('top', $size, $color, $border-width, $border-color, $left, $left-offset); +} +@mixin css-bottom-arrow($size, $color, $border-width, $border-color, $left: 50%, $left-offset: 0px) { + @include css-arrow('bottom', $size, $color, $border-width, $border-color, $left, $left-offset); +} + +@mixin from($breakpoint) { + @media (min-width: $breakpoint) { + @content; + } +} + +@mixin until($breakpoint) { + @media (max-width: $breakpoint) { + @content; + } +} + +@mixin vault-block { + &:not(:last-child) { + margin-bottom: calc(5 / 14) + 0rem; + } +} diff --git a/ui/app/styles/utils/swagger.scss b/ui/app/styles/utils/swagger.scss new file mode 100644 index 0000000..e1ab095 --- /dev/null +++ b/ui/app/styles/utils/swagger.scss @@ -0,0 +1,11 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +// This file defines the scss for open-api-explorer. + +/* align list items with container */ +.swagger-ember .swagger-ui .wrapper { + padding: 0; +} diff --git a/ui/app/templates/application.hbs b/ui/app/templates/application.hbs new file mode 100644 index 0000000..26fb7cd --- /dev/null +++ b/ui/app/templates/application.hbs @@ -0,0 +1,5 @@ + +
+ {{outlet}} +
+
\ No newline at end of file diff --git a/ui/app/templates/components/alphabet-edit.hbs b/ui/app/templates/components/alphabet-edit.hbs new file mode 100644 index 0000000..0e764c7 --- /dev/null +++ b/ui/app/templates/components/alphabet-edit.hbs @@ -0,0 +1,128 @@ + + + + + +

+ {{#if (eq this.mode "create")}} + Create Alphabet + {{else if (eq this.mode "edit")}} + Edit Alphabet + {{else}} + Alphabet + {{this.model.id}} + {{/if}} +

+
+
+ +{{#if (eq this.mode "show")}} + + + {{#if this.capabilities.canDelete}} + +
+ {{/if}} + {{#if this.capabilities.canUpdate}} + + Edit alphabet + + {{/if}} +
+
+{{/if}} + +{{#if (or (eq this.mode "edit") (eq this.mode "create"))}} +
+
+ + + {{#each this.model.attrs as |attr|}} + {{#if (and (eq attr.name "name") (eq this.mode "edit"))}} + + {{#if attr.options.subText}} +

{{attr.options.subText}}

+ {{/if}} + + {{else}} + + {{/if}} + {{/each}} +
+
+
+ + + Cancel + +
+
+
+{{else}} + {{#if this.model.displayErrors}} +
+ +
+ {{/if}} +
+ {{#each this.model.attrs as |attr|}} + {{#if (eq attr.type "object")}} + + {{else if (eq attr.type "array")}} + + {{else}} + + {{/if}} + {{/each}} +
+{{/if}} \ No newline at end of file diff --git a/ui/app/templates/components/auth-button-auth0.hbs b/ui/app/templates/components/auth-button-auth0.hbs new file mode 100644 index 0000000..232dda0 --- /dev/null +++ b/ui/app/templates/components/auth-button-auth0.hbs @@ -0,0 +1,19 @@ +
+
+ {{! template-lint-disable no-forbidden-elements }} + + + + + + + + {{! template-lint-enable no-forbidden-elements }} +
+
+ Sign in with Auth0 +
+
\ No newline at end of file diff --git a/ui/app/templates/components/auth-button-gitlab.hbs b/ui/app/templates/components/auth-button-gitlab.hbs new file mode 100644 index 0000000..6992693 --- /dev/null +++ b/ui/app/templates/components/auth-button-gitlab.hbs @@ -0,0 +1,48 @@ +
+ {{! template-lint-disable no-forbidden-elements }} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + {{! template-lint-enable no-forbidden-elements }} +
+ Sign in with GitLab +
+
\ No newline at end of file diff --git a/ui/app/templates/components/auth-button-google.hbs b/ui/app/templates/components/auth-button-google.hbs new file mode 100644 index 0000000..06c7a54 --- /dev/null +++ b/ui/app/templates/components/auth-button-google.hbs @@ -0,0 +1,70 @@ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Sign in with Google +
+
\ No newline at end of file diff --git a/ui/app/templates/components/auth-config-form/config.hbs b/ui/app/templates/components/auth-config-form/config.hbs new file mode 100644 index 0000000..d60d737 --- /dev/null +++ b/ui/app/templates/components/auth-config-form/config.hbs @@ -0,0 +1,23 @@ +
+
+ + + {{#if this.model.attrs}} + {{#each this.model.attrs as |attr|}} + + {{/each}} + {{else if this.model.fieldGroups}} + + {{/if}} +
+
+ +
+
\ No newline at end of file diff --git a/ui/app/templates/components/auth-config-form/options.hbs b/ui/app/templates/components/auth-config-form/options.hbs new file mode 100644 index 0000000..dd9e551 --- /dev/null +++ b/ui/app/templates/components/auth-config-form/options.hbs @@ -0,0 +1,19 @@ +
+
+ + + {{#each this.model.tuneAttrs as |attr|}} + + {{/each}} +
+
+ +
+
\ No newline at end of file diff --git a/ui/app/templates/components/auth-form-options.hbs b/ui/app/templates/components/auth-form-options.hbs new file mode 100644 index 0000000..d774470 --- /dev/null +++ b/ui/app/templates/components/auth-form-options.hbs @@ -0,0 +1,29 @@ +{{#unless this.selectedAuthIsPath}} +
+ + {{#if this.isOpen}} +
+ +
+ +
+ +
+ {{/if}} +
+{{/unless}} \ No newline at end of file diff --git a/ui/app/templates/components/auth-form.hbs b/ui/app/templates/components/auth-form.hbs new file mode 100644 index 0000000..2753b1d --- /dev/null +++ b/ui/app/templates/components/auth-form.hbs @@ -0,0 +1,183 @@ +
+ {{#if (and this.waitingForOktaNumberChallenge (not this.cancelAuthForOktaNumberChallenge))}} + + {{else}} + {{#if this.hasMethodsWithPath}} + + {{/if}} +
+ +
+

{{this.selectedAuthBackend.path}}

+ + {{this.selectedAuthBackend.mountDescription}} + +
+ {{#if (or (not this.hasMethodsWithPath) (not this.selectedAuthIsPath))}} + +
+
+ {{else if (eq this.providerName "token")}} +
+ +
+ +
+
+ {{else}} +
+ +
+ +
+
+
+ +
+ +
+
+ {{/if}} + {{#if (not-eq this.selectedAuthBackend.type "token")}} + + {{/if}} + + {{#if (and this.delayAuthMessageReminder.isIdle this.showLoading)}} + + {{/if}} + + {{/if}} + + {{/if}} + \ No newline at end of file diff --git a/ui/app/templates/components/auth-jwt.hbs b/ui/app/templates/components/auth-jwt.hbs new file mode 100644 index 0000000..e178623 --- /dev/null +++ b/ui/app/templates/components/auth-jwt.hbs @@ -0,0 +1,62 @@ +
+
+ +
+ +
+ +
+ {{#unless this.isOIDC}} +
+ +
+ +
+
+ {{/unless}} +
+ {{yield}} +
+ +
\ No newline at end of file diff --git a/ui/app/templates/components/auth-method/configuration.hbs b/ui/app/templates/components/auth-method/configuration.hbs new file mode 100644 index 0000000..8287bb2 --- /dev/null +++ b/ui/app/templates/components/auth-method/configuration.hbs @@ -0,0 +1,17 @@ +
+ {{#each @model.attrs as |attr|}} + {{#if (eq attr.type "object")}} + + {{else}} + + {{/if}} + {{/each}} +
\ No newline at end of file diff --git a/ui/app/templates/components/b64-toggle.hbs b/ui/app/templates/components/b64-toggle.hbs new file mode 100644 index 0000000..dd31e66 --- /dev/null +++ b/ui/app/templates/components/b64-toggle.hbs @@ -0,0 +1,5 @@ +{{#if this.isBase64}} + Decode from base64 +{{else}} + Encode to base64 +{{/if}} \ No newline at end of file diff --git a/ui/app/templates/components/backend-configure.hbs b/ui/app/templates/components/backend-configure.hbs new file mode 100644 index 0000000..e69de29 diff --git a/ui/app/templates/components/block-error.hbs b/ui/app/templates/components/block-error.hbs new file mode 100644 index 0000000..92ba49f --- /dev/null +++ b/ui/app/templates/components/block-error.hbs @@ -0,0 +1,3 @@ +
+ {{yield}} +
\ No newline at end of file diff --git a/ui/app/templates/components/calendar-widget.hbs b/ui/app/templates/components/calendar-widget.hbs new file mode 100644 index 0000000..e282889 --- /dev/null +++ b/ui/app/templates/components/calendar-widget.hbs @@ -0,0 +1,119 @@ + + + {{date-format this.startDate "MMM yyyy"}} + - + {{date-format this.endDate "MMM yyyy"}} + + + + + {{#if this.showCalendar}} +
+
+ +

+ {{this.displayYear}} +

+ + {{#if this.tooltipTarget}} + {{! Component must be in curly bracket notation }} + {{! template-lint-disable no-curly-component-invocation }} + {{#modal-dialog + tagName="div" + tetherTarget=this.tooltipTarget + targetAttachment="top right" + attachment="top middle" + offset="150px 0" + }} +
+

+ {{this.tooltipText}} +

+
+
+ {{/modal-dialog}} + {{/if}} +
+
+ {{#each this.widgetMonths as |month|}} + + {{/each}} +
+
+ {{/if}} +
+
\ No newline at end of file diff --git a/ui/app/templates/components/clients/attribution.hbs b/ui/app/templates/components/clients/attribution.hbs new file mode 100644 index 0000000..cf57aac --- /dev/null +++ b/ui/app/templates/components/clients/attribution.hbs @@ -0,0 +1,126 @@ +{{! only show side-by-side horizontal bar charts if data is from a single, historical month }} +
+ + {{#if this.barChartTotalClients}} + {{#if @isHistoricalMonth}} +
+

New clients

+

{{this.chartText.newCopy}}

+ +
+ +
+

Total clients

+

{{this.chartText.totalCopy}}

+ +
+ {{else}} +
+ +
+
+

{{this.chartText.totalCopy}}

+
+ +
+

Top {{this.attributionBreakdown}}

+

{{this.topClientCounts.label}}

+
+ +
+

Clients in {{this.attributionBreakdown}}

+

{{format-number this.topClientCounts.clients}}

+
+ {{/if}} +
+ {{capitalize @chartLegend.0.label}} + {{capitalize @chartLegend.1.label}} +
+ {{else}} +
+ +
+ {{/if}} +
+ {{#if @responseTimestamp}} + Updated + {{date-format @responseTimestamp "MMM d yyyy, h:mm:ss aaa" withTimeZone=true}} + {{/if}} +
+
+ +{{! MODAL FOR CSV DOWNLOAD }} + + +
+ + + {{#if @upgradeExplanation}} +
+ + Your data contains an upgrade. + + Learn more here. + + +

{{@upgradeExplanation}}

+
+ {{/if}} +
+
\ No newline at end of file diff --git a/ui/app/templates/components/clients/config.hbs b/ui/app/templates/components/clients/config.hbs new file mode 100644 index 0000000..f87f765 --- /dev/null +++ b/ui/app/templates/components/clients/config.hbs @@ -0,0 +1,92 @@ +{{#if (eq @mode "edit")}} +
+
+ + {{#each @model.formFields as |attr|}} + {{#if (eq attr.name "enabled")}} + +

+ Enable or disable client tracking. Keep in mind that disabling tracking will delete the data for the current + month. +

+
+ + +
+ {{else}} + + {{/if}} + {{/each}} +
+
+
+ + + Cancel + +
+
+
+ + + +
+ + +
+
+{{else}} +
+ {{#each this.infoRows as |item|}} + + {{/each}} +
+{{/if}} \ No newline at end of file diff --git a/ui/app/templates/components/clients/dashboard.hbs b/ui/app/templates/components/clients/dashboard.hbs new file mode 100644 index 0000000..01642ce --- /dev/null +++ b/ui/app/templates/components/clients/dashboard.hbs @@ -0,0 +1,208 @@ +
+

+ This dashboard will surface Vault client usage over time. Clients represent a user or service that has authenticated to + Vault. Documentation is available + here. + Date queries are sent in UTC. +

+

+ {{this.versionText.label}} +

+
+ {{#if this.formattedStartDate}} +

{{this.formattedStartDate}}

+ + {{else}} + + {{/if}} +
+

+ {{this.versionText.description}} +

+ {{#if this.noActivityData}} + {{#if (eq @model.config.enabled "On")}} + + {{else}} + + {{#if @model.config.canEdit}} +

+ + Go to configuration + +

+ {{/if}} +
+ {{/if}} + {{else if this.errorObject}} + + {{else}} + {{#if (eq @model.config.enabled "Off")}} + + Tracking is currently disabled and data is not being collected. Historical data can be searched, but you will need to + + edit the configuration + + to enable tracking again. + + {{/if}} + {{#if (or this.totalUsageCounts this.hasAttributionData)}} +
+ FILTERS + + + + {{#if this.namespaceArray}} + + {{/if}} + {{#if (not (is-empty this.authMethodOptions))}} + + {{/if}} + + +
+ + {{#if (or this.upgradeDuringActivity this.startTimeDiscrepancy)}} + +
    + {{#if this.startTimeDiscrepancy}} +
  • {{this.startTimeDiscrepancy}}
  • + {{/if}} + {{#if this.upgradeDuringActivity}} +
  • + {{this.upgradeVersionAndDate}} + {{this.upgradeExplanation}} + + Learn more here. + +
  • + {{/if}} +
+
+ {{/if}} + {{#if this.isLoadingQuery}} + + {{else}} + {{#if this.totalUsageCounts}} + {{#unless this.byMonthActivityData}} + {{! UsageStats render when viewing a single, historical month AND activity data predates new client breakdown (< v1.10.0) + or viewing the current month filtered down to auth method }} + + {{/unless}} + {{#if this.byMonthActivityData}} + + {{/if}} + {{#if this.hasAttributionData}} + + {{/if}} + {{#if this.hasMultipleMonthsData}} + + {{/if}} + {{/if}} + {{/if}} + {{else if (and (not @model.licenseStartTimestamp) (not this.startMonthTimestamp))}} + {{! Empty state for no billing/license start date }} + + {{else}} + + + + {{/if}} + {{/if}} + + {{! BILLING START DATE MODAL }} + + + + + +
\ No newline at end of file diff --git a/ui/app/templates/components/clients/error.hbs b/ui/app/templates/components/clients/error.hbs new file mode 100644 index 0000000..6eb1365 --- /dev/null +++ b/ui/app/templates/components/clients/error.hbs @@ -0,0 +1,25 @@ + + {{#if (eq @error.httpStatus 403)}} +

+ You must be granted permissions to view this page. Ask your administrator if you think you should have access to the + {{@error.path}} + endpoint. +

+ {{else}} +
    + {{#if @error.message}} +
  • {{@error.message}}
  • +
    + {{/if}} + {{#each @error.errors as |error|}} +
  • + {{error}} +
  • + {{/each}} +
+ {{/if}} +
\ No newline at end of file diff --git a/ui/app/templates/components/clients/horizontal-bar-chart.hbs b/ui/app/templates/components/clients/horizontal-bar-chart.hbs new file mode 100644 index 0000000..1aac955 --- /dev/null +++ b/ui/app/templates/components/clients/horizontal-bar-chart.hbs @@ -0,0 +1,29 @@ +{{#if @dataset}} + + +{{else}} + +{{/if}} +{{#if this.tooltipTarget}} + {{! Required to set tag name = div https://github.com/yapplabs/ember-modal-dialog/issues/290 }} + {{! Component must be in curly bracket notation }} + {{! template-lint-disable no-curly-component-invocation }} + {{#modal-dialog + tagName="div" + tetherTarget=this.tooltipTarget + targetAttachment="bottom middle" + attachment="bottom middle" + offset="35px 0" + }} +
+

{{this.tooltipText}}

+
+
+ {{/modal-dialog}} +{{/if}} \ No newline at end of file diff --git a/ui/app/templates/components/clients/line-chart.hbs b/ui/app/templates/components/clients/line-chart.hbs new file mode 100644 index 0000000..65433a9 --- /dev/null +++ b/ui/app/templates/components/clients/line-chart.hbs @@ -0,0 +1,38 @@ +{{#if @dataset}} + + +{{else}} + +{{/if}} + +{{! TOOLTIP }} + +{{#if this.tooltipTarget}} + {{! Required to set tag name = div https://github.com/yapplabs/ember-modal-dialog/issues/290 }} + {{! Component must be in curly bracket notation }} + {{! template-lint-disable no-curly-component-invocation }} + {{#modal-dialog + tagName="div" + tetherTarget=this.tooltipTarget + targetAttachment="bottom middle" + attachment="bottom middle" + offset="35px 0" + }} +
+

{{this.tooltipMonth}}

+

{{this.tooltipTotal}}

+

{{this.tooltipNew}}

+ {{#if this.tooltipUpgradeText}} +
+

{{this.tooltipUpgradeText}}

+ {{/if}} +
+
+ {{/modal-dialog}} +{{/if}} \ No newline at end of file diff --git a/ui/app/templates/components/clients/monthly-usage.hbs b/ui/app/templates/components/clients/monthly-usage.hbs new file mode 100644 index 0000000..307c6b0 --- /dev/null +++ b/ui/app/templates/components/clients/monthly-usage.hbs @@ -0,0 +1,47 @@ +
+
+

Vault usage

+

+ This data can be used to understand how many total clients are using Vault each month for this date range. +

+
+ +
+ +
+ +
+

Total monthly clients

+

+ Each client is counted once per month. This can help with capacity planning. +

+
+ +
+

Average total clients per month

+

+ {{format-number this.averageTotalClients}} +

+
+ +
+

Average new clients per month

+

+ {{format-number this.averageNewClients}} +

+
+ +
+ {{#if @responseTimestamp}} + Updated + {{date-format @responseTimestamp "MMM d yyyy, h:mm:ss aaa" withTimeZone=true}} + {{/if}} +
+ + {{#if @verticalBarChartData}} +
+ {{capitalize @chartLegend.0.label}} + {{capitalize @chartLegend.1.label}} +
+ {{/if}} +
\ No newline at end of file diff --git a/ui/app/templates/components/clients/running-total.hbs b/ui/app/templates/components/clients/running-total.hbs new file mode 100644 index 0000000..2a857cb --- /dev/null +++ b/ui/app/templates/components/clients/running-total.hbs @@ -0,0 +1,144 @@ +{{#if (gt @byMonthActivityData.length 1)}} +
+
+
+

Vault client counts

+

+ A client is any user or service that interacts with Vault. They are made up of entity clients and non-entity + clients. The total client count number is an important consideration for Vault billing. +

+
+ +
+ +
+ +
+

Running client total

+

The number of clients which interacted with Vault during this date range.

+
+
+

Entity clients

+

+ {{format-number this.entityClientData.runningTotal}} +

+
+ +
+

Non-entity clients

+

+ {{format-number this.nonEntityClientData.runningTotal}} +

+
+
+ +
+
+ +
+ +
+

New monthly clients

+

+ Clients which interacted with Vault for the first time during this date range, displayed per month. +

+
+ + {{#if this.hasAverageNewClients}} +
+

Average new entity clients per month

+

+ {{format-number this.entityClientData.averageNewClients}} +

+
+ +
+

Average new non-entity clients per month

+

+ {{format-number this.nonEntityClientData.averageNewClients}} +

+
+ {{/if}} + +
+ {{#if @responseTimestamp}} + Updated + {{date-format @responseTimestamp "MMM d yyyy, h:mm:ss aaa" withTimeZone=true}} + {{/if}} +
+ + {{#if this.hasAverageNewClients}} +
+ {{capitalize @chartLegend.0.label}} + {{capitalize @chartLegend.1.label}} +
+ {{/if}} +
+
+{{else}} + {{#if (and @isHistoricalMonth this.singleMonthData.new_clients.clients)}} +
+
+

Vault client counts

+

+ A client is any user or service that interacts with Vault. They are made up of entity clients and non-entity + clients. The total client count number is an important consideration for Vault billing. +

+
+
+
+ +
+
+ +
+
+ +
+
+
+
+ +
+
+ +
+
+ +
+
+
+ {{else}} + {{! This renders when either: + -> viewing the current month and all namespaces (no filters) + -> filtering by a namespace with no month over month data + if filtering by a mount with no month over month data in dashboard.hbs renders }} + + {{/if}} +{{/if}} \ No newline at end of file diff --git a/ui/app/templates/components/clients/usage-stats.hbs b/ui/app/templates/components/clients/usage-stats.hbs new file mode 100644 index 0000000..f993a7e --- /dev/null +++ b/ui/app/templates/components/clients/usage-stats.hbs @@ -0,0 +1,45 @@ +
+ + +
+
+ +
+
+ +
+
+ +
+
+
\ No newline at end of file diff --git a/ui/app/templates/components/clients/vertical-bar-chart.hbs b/ui/app/templates/components/clients/vertical-bar-chart.hbs new file mode 100644 index 0000000..01e0b7d --- /dev/null +++ b/ui/app/templates/components/clients/vertical-bar-chart.hbs @@ -0,0 +1,34 @@ +{{#if @dataset}} + + +{{else}} + +{{/if}} + +{{! TOOLTIP }} + +{{#if this.tooltipTarget}} + {{! Required to set tag name = div https://github.com/yapplabs/ember-modal-dialog/issues/290 }} + {{! Component must be in curly bracket notation }} + {{! template-lint-disable no-curly-component-invocation }} + {{#modal-dialog + tagName="div" + tetherTarget=this.tooltipTarget + targetAttachment="bottom middle" + attachment="bottom middle" + offset="10px 0" + }} +
+

{{this.tooltipTotal}}

+

{{this.entityClients}}

+

{{this.nonEntityClients}}

+
+
+ {{/modal-dialog}} +{{/if}} \ No newline at end of file diff --git a/ui/app/templates/components/configure-aws-secret.hbs b/ui/app/templates/components/configure-aws-secret.hbs new file mode 100644 index 0000000..d872ecf --- /dev/null +++ b/ui/app/templates/components/configure-aws-secret.hbs @@ -0,0 +1,154 @@ +
+ +
+ +{{#if (eq @tab "leases")}} +
+
+ + +

+ If you do not supply lease settings, we will use the default values in AWS. +

+
+ + +
+ +
+ +{{else}} +
+
+ +

+ Note: the client uses the official AWS SDK and will use the specified credentials, environment credentials, shared + file credentials, or IAM role/ECS task credentials in that order. +

+
+ +
+ +
+ +
+
+ +
+ +
+ +
+
+ + + {{#if this.showOptions}} +
+
+ +
+
+ +
+
+
+
+ +
+ +
+
+
+ +
+ +
+
+
+ {{/if}} + +
+ +
+ +{{/if}} \ No newline at end of file diff --git a/ui/app/templates/components/configure-ssh-secret.hbs b/ui/app/templates/components/configure-ssh-secret.hbs new file mode 100644 index 0000000..93a016d --- /dev/null +++ b/ui/app/templates/components/configure-ssh-secret.hbs @@ -0,0 +1,90 @@ +{{#if @configured}} +
+
+ +
+ +
+
+
+
+
+ + Copy + +
+
+ + Delete + +
+
+{{else}} +
+
+ +
+ +
+ +
+
+
+ +
+ +
+

+ {{#if this.textareaHelpText}} + {{this.textareaHelpText}} + {{else}} + Enter a base64-encoded key + {{/if}} +

+ {{else}} +
+
+
+ + + + + + + {{#if this.key.filename}} + + {{/if}} + +
+
+
+

+ {{#if this.fileHelpText}} + {{this.fileHelpText}} + {{else}} + Select a PGP key from your computer + {{/if}} +

+ {{/if}} +
\ No newline at end of file diff --git a/ui/app/templates/components/pgp-list.hbs b/ui/app/templates/components/pgp-list.hbs new file mode 100644 index 0000000..7381184 --- /dev/null +++ b/ui/app/templates/components/pgp-list.hbs @@ -0,0 +1,7 @@ +{{#each this.listData as |key index|}} + +{{else}} +

+ Enter a number of Key Shares to enter PGP keys. +

+{{/each}} \ No newline at end of file diff --git a/ui/app/templates/components/radial-progress.hbs b/ui/app/templates/components/radial-progress.hbs new file mode 100644 index 0000000..0bcd24b --- /dev/null +++ b/ui/app/templates/components/radial-progress.hbs @@ -0,0 +1,20 @@ + + \ No newline at end of file diff --git a/ui/app/templates/components/raft-join.hbs b/ui/app/templates/components/raft-join.hbs new file mode 100644 index 0000000..c00409e --- /dev/null +++ b/ui/app/templates/components/raft-join.hbs @@ -0,0 +1,55 @@ + +{{#if this.showJoinForm}} +
+

+ Join an existing Raft cluster +

+ +
+{{else}} + +
+

+ This server is configured to use Raft Storage. +
+
+ How do you want to get started? +

+
+ + + Join an existing Raft cluster +
+
+ + + Create a new Raft cluster +
+
+
+ +
+ +{{/if}} \ No newline at end of file diff --git a/ui/app/templates/components/raft-storage-overview.hbs b/ui/app/templates/components/raft-storage-overview.hbs new file mode 100644 index 0000000..b6bd618 --- /dev/null +++ b/ui/app/templates/components/raft-storage-overview.hbs @@ -0,0 +1,102 @@ + + +

+ Raft Storage +

+
+
+ + + + + Snapshots + + + + + + + + + + + + + + + + + + + + {{#each @model as |server|}} + + + + + + {{/each}} + +
AddressVoter
+ {{server.address}} + {{#if server.leader}} + Leader + {{/if}} + + + {{#if server.voter}} + + {{else}} + + {{/if}} + + + + + + +
\ No newline at end of file diff --git a/ui/app/templates/components/raft-storage-restore.hbs b/ui/app/templates/components/raft-storage-restore.hbs new file mode 100644 index 0000000..efbe9eb --- /dev/null +++ b/ui/app/templates/components/raft-storage-restore.hbs @@ -0,0 +1,67 @@ + + + + + +

+ Restore Snapshot +

+
+
+ +
+ + + {{#if this.isUploading}} +
+ +
+
+ +
+ {{else}} +
+ + +
+ + +

+ Bypass checks to ensure the AutoUnseal or Shamir keys are consistent with the snapshot data. +

+
+
+ + {{/if}} + \ No newline at end of file diff --git a/ui/app/templates/components/role-aws-edit.hbs b/ui/app/templates/components/role-aws-edit.hbs new file mode 100644 index 0000000..d2db6a9 --- /dev/null +++ b/ui/app/templates/components/role-aws-edit.hbs @@ -0,0 +1,105 @@ + + + + + +

+ {{#if (eq this.mode "create")}} + Create an AWS Role + {{else if (eq this.mode "edit")}} + Edit AWS Role + {{this.model.id}} + {{else}} + AWS Role + {{this.model.id}} + {{/if}} +

+
+
+ +{{#if (eq this.mode "show")}} + + + {{#if this.model.canDelete}} + + Delete role + +
+ {{/if}} + {{#if this.model.canGenerate}} + + Generate credentials + + {{/if}} + {{#if this.model.canEdit}} + + Edit role + + {{/if}} +
+
+{{/if}} + +{{#if (or (eq this.mode "edit") (eq this.mode "create"))}} +
+
+ + + {{#if (gt this.model.credentialTypes.length 1)}} + + {{/if}} + {{#each (if (eq this.mode "edit") (drop 1 (or this.model.fields (array))) this.model.fields) as |attr|}} + + {{/each}} +
+
+
+ + + Cancel + +
+
+
+{{else}} +
+ {{#if (gt this.model.credentialTypes.length 1)}} + + {{/if}} + {{#each this.model.fields as |attr|}} + {{#if (eq attr.name "policyDocument")}} + +
{{stringify (jsonify this.model.policyDocument)}}
+
+ {{else}} + + {{/if}} + {{/each}} +
+{{/if}} \ No newline at end of file diff --git a/ui/app/templates/components/role-ssh-edit.hbs b/ui/app/templates/components/role-ssh-edit.hbs new file mode 100644 index 0000000..bdcf551 --- /dev/null +++ b/ui/app/templates/components/role-ssh-edit.hbs @@ -0,0 +1,95 @@ + + + + + +

+ {{#if (eq this.mode "create")}} + Create an SSH role + {{else if (eq this.mode "edit")}} + Edit SSH role + {{else}} + SSH role + {{this.model.id}} + {{/if}} +

+
+
+ +{{#if (eq this.mode "show")}} + + + {{#if this.model.canDelete}} + + Delete role + +
+ {{/if}} + {{#if (eq this.model.keyType "otp")}} + + Generate Credential + + {{else}} + + Sign Keys + + {{/if}} + {{#if (or this.model.canUpdate this.model.canDelete)}} + + Edit role + + {{/if}} +
+
+{{/if}} + +{{#if (or (eq this.mode "edit") (eq this.mode "create"))}} +
+
+ + + +
+
+
+ + + Cancel + +
+
+
+{{else}} +
+ {{#each this.model.showFields as |attr|}} + {{#if (eq attr.type "object")}} + + {{else}} + + {{/if}} + {{/each}} +
+{{/if}} \ No newline at end of file diff --git a/ui/app/templates/components/secret-create-or-update.hbs b/ui/app/templates/components/secret-create-or-update.hbs new file mode 100644 index 0000000..3bb582f --- /dev/null +++ b/ui/app/templates/components/secret-create-or-update.hbs @@ -0,0 +1,260 @@ +{{#if (eq @mode "create")}} +
+
+ + + +

+ +

+ {{#if (get this.validationMessages "path")}} + + {{/if}} + {{#if @modelForData.isFolder}} +

+ The secret path may not end in + / +

+ {{/if}} + {{#if this.pathWhiteSpaceWarning}} +
+ +
+ {{/if}} +
+ {{#if @showAdvancedMode}} +
+ +
+ {{else}} +
+ + {{#each @secretData as |secret index|}} +
+
+ +
+
+ +
+
+ {{#if (eq @secretData.length (inc index))}} + + {{else}} + + {{/if}} +
+
+ {{#if this.validationMessages.key}} + + {{/if}} + {{/each}} +
+ {{/if}} + {{! must have UPDATE permissions to add secret metadata. Create only will not work }} + {{#if (and @isV2 @canUpdateSecretMetadata)}} + + {{#if this.showMetadata}} + + {{/if}} + {{/if}} +
+
+ +
+
+ + Cancel + +
+
+ +{{/if}} + +{{#if (eq @mode "edit")}} + {{! no metadata option because metadata is version agnostic }} +
+
+ + {{#if (eq @canReadSecretData false)}} + + {{/if}} + + {{#if this.isCreateNewVersionFromOldVersion}} +
+ +
+ {{/if}} + {{#if @showAdvancedMode}} +
+ +
+ {{else}} +
+ + {{#each @secretData as |secret index|}} +
+
+ +
+
+ +
+
+ {{#if (eq @secretData.length (inc index))}} + + {{else}} + + {{/if}} +
+
+ {{/each}} +
+ {{/if}} +
+
+
+
+ +
+
+ + Cancel + +
+
+
+
+{{/if}} \ No newline at end of file diff --git a/ui/app/templates/components/secret-delete-menu.hbs b/ui/app/templates/components/secret-delete-menu.hbs new file mode 100644 index 0000000..f9afe3f --- /dev/null +++ b/ui/app/templates/components/secret-delete-menu.hbs @@ -0,0 +1,145 @@ +{{#if @isV2}} + {{#if (and this.canUndeleteVersion @modelForData.deleted)}} + + {{/if}} + {{#if (and (not @modelForData.deleted) (not @modelForData.destroyed))}} + {{#if (or this.canDestroyVersion this.canDestroyAllVersions)}} + +
+ {{else}} + {{#if (or (and this.isLatestVersion this.canDeleteSecretData) this.canSoftDeleteSecretData)}} + + Delete + + {{/if}} + {{/if}} + {{/if}} + + + +
+ + +
+
+{{else if this.canDeleteSecretData}} + + Delete + +
+{{/if}} \ No newline at end of file diff --git a/ui/app/templates/components/secret-edit-metadata.hbs b/ui/app/templates/components/secret-edit-metadata.hbs new file mode 100644 index 0000000..feb15e4 --- /dev/null +++ b/ui/app/templates/components/secret-edit-metadata.hbs @@ -0,0 +1,52 @@ +
+
+

+ The options below are all version-agnostic; they apply to all versions of this secret. + {{if (eq @mode "create") "After the secret is created, this can be edited in the Metadata tab." ""}} +

+ {{#each @model.fields as |attr|}} + {{#if (eq attr.name "customMetadata")}} + + + + {{/if}} + {{#if (not-eq attr.name "customMetadata")}} + + {{/if}} + {{/each}} +
+ {{#if (not-eq @mode "create")}} +
+
+
+ +
+
+ + Cancel + +
+
+
+ {{/if}} +
\ No newline at end of file diff --git a/ui/app/templates/components/secret-edit-toolbar.hbs b/ui/app/templates/components/secret-edit-toolbar.hbs new file mode 100644 index 0000000..5748717 --- /dev/null +++ b/ui/app/templates/components/secret-edit-toolbar.hbs @@ -0,0 +1,110 @@ +{{! template-lint-configure simple-unless "warn" }} + + {{#unless (and (eq @mode "show") @isWriteWithoutRead)}} + + + JSON + + + {{/unless}} + + {{#if (eq @mode "show")}} + + {{/if}} + {{#if (and (eq @mode "show") @canUpdateSecretData)}} + {{#unless (and @isV2 (or @isWriteWithoutRead @modelForData.destroyed @modelForData.deleted))}} + + + Copy + + + + + + + {{/unless}} + {{/if}} + + {{#if (and (eq @mode "show") @isV2 (not @model.failedServerRead))}} + + {{/if}} + + {{#if (and (eq @mode "show") @canUpdateSecretData)}} + {{#let (concat "vault.cluster.secrets.backend." (if (eq @mode "show") "edit" "show")) as |targetRoute|}} + {{#if @isV2}} + + Create new version + + {{else}} + + Edit secret + + {{/if}} + {{/let}} + {{/if}} + + \ No newline at end of file diff --git a/ui/app/templates/components/secret-edit.hbs b/ui/app/templates/components/secret-edit.hbs new file mode 100644 index 0000000..78596e8 --- /dev/null +++ b/ui/app/templates/components/secret-edit.hbs @@ -0,0 +1,85 @@ +
+ + + + + +

+ {{#if (eq @mode "create")}} + Create secret + {{else if (and this.isV2 (eq @mode "edit"))}} + Create new version + {{else if (eq @mode "edit")}} + Edit secret + {{else}} + {{@key.id}} + {{/if}} +

+
+
+ {{! tabs for show only }} + {{#if (eq @mode "show")}} +
+ +
+ {{/if}} + + {{#if (and @model (not-eq this.secretData null))}} + + + {{#if (or (eq @mode "create") (eq @mode "edit"))}} + + {{else if (eq @mode "show")}} + + {{else}} + + {{/if}} + {{/if}} +
\ No newline at end of file diff --git a/ui/app/templates/components/secret-form-show.hbs b/ui/app/templates/components/secret-form-show.hbs new file mode 100644 index 0000000..4616b5d --- /dev/null +++ b/ui/app/templates/components/secret-form-show.hbs @@ -0,0 +1,88 @@ +{{#if (and @isV2 @modelForData.destroyed)}} + + + Learn more + + +{{else if (and @isV2 @modelForData.deleted)}} + + + Learn more + + +{{else if @isWriteWithoutRead}} + +{{else}} + {{#if @showAdvancedMode}} +
+ +
+ {{else}} +
+
+
+ Key +
+
+ Value +
+
+ {{#if @modelForData.createdTime}} + + + Version created + {{date-format @modelForData.createdTime "MMM dd, yyyy hh:mm a"}} + + +
+ {{@modelForData.createdTime}} +
+
+
+ {{/if}} +
+
+
+ {{#if @modelForData.secretKeyAndValue}} + {{#each @modelForData.secretKeyAndValue as |secret|}} + + {{#if secret.value}} + + {{else}} + + {{/if}} + + {{/each}} + {{else}} + {{! In the case of no key or value will still render }} + + + + {{/if}} + {{/if}} +{{/if}} \ No newline at end of file diff --git a/ui/app/templates/components/secret-link.hbs b/ui/app/templates/components/secret-link.hbs new file mode 100644 index 0000000..9246c9d --- /dev/null +++ b/ui/app/templates/components/secret-link.hbs @@ -0,0 +1,3 @@ + + {{yield}} + \ No newline at end of file diff --git a/ui/app/templates/components/secret-list/aws-role-item.hbs b/ui/app/templates/components/secret-list/aws-role-item.hbs new file mode 100644 index 0000000..220ae50 --- /dev/null +++ b/ui/app/templates/components/secret-list/aws-role-item.hbs @@ -0,0 +1,80 @@ + +
+
+ + +
+ {{if (eq @item.id " ") "(self)" (or @item.keyWithoutParent @item.id)}} +
+
+
+
+ + + + + +
+
+
\ No newline at end of file diff --git a/ui/app/templates/components/secret-list/database-list-item.hbs b/ui/app/templates/components/secret-list/database-list-item.hbs new file mode 100644 index 0000000..f382f74 --- /dev/null +++ b/ui/app/templates/components/secret-list/database-list-item.hbs @@ -0,0 +1,88 @@ + +
+
+ + +
+ {{if (eq @item.id " ") "(self)" (or @item.keyWithoutParent @item.id)}} + {{this.keyTypeValue}} +
+
+
+
+ + + +
+
+
\ No newline at end of file diff --git a/ui/app/templates/components/secret-list/item.hbs b/ui/app/templates/components/secret-list/item.hbs new file mode 100644 index 0000000..f57a391 --- /dev/null +++ b/ui/app/templates/components/secret-list/item.hbs @@ -0,0 +1,93 @@ + +
+
+ + {{#if (eq @backendModel.type "transit")}} + + {{else}} + + {{/if}} + {{if (eq @item.id " ") "(self)" (or @item.keyWithoutParent @item.id)}} + +
+
+ + + + + +
+
+
\ No newline at end of file diff --git a/ui/app/templates/components/secret-list/ssh-role-item.hbs b/ui/app/templates/components/secret-list/ssh-role-item.hbs new file mode 100644 index 0000000..028eebc --- /dev/null +++ b/ui/app/templates/components/secret-list/ssh-role-item.hbs @@ -0,0 +1,139 @@ + +
+
+ + +
+ {{if (eq @item.id " ") "(self)" (or @item.keyWithoutParent @item.id)}} +
+ {{@item.keyType}} + {{#if @item.zeroAddress}} + Zero-Address + {{/if}} +
+
+
+
+ {{#if (eq @backendType "ssh")}} + + + + + + {{/if}} +
+
+
\ No newline at end of file diff --git a/ui/app/templates/components/secret-list/transform-list-item.hbs b/ui/app/templates/components/secret-list/transform-list-item.hbs new file mode 100644 index 0000000..18e7703 --- /dev/null +++ b/ui/app/templates/components/secret-list/transform-list-item.hbs @@ -0,0 +1,70 @@ +{{#if (and @item.updatePath.canRead (not this.isBuiltin))}} + +
+
+ + + {{if (eq @item.id " ") "(self)" (or @item.keyWithoutParent @item.id)}} + +
+
+ {{#if (or @item.updatePath.canRead @item.updatePath.canUpdate)}} + + + + {{/if}} +
+
+
+{{else}} +
+
+
+ + {{#if this.isBuiltin}} + + + {{@item.id}} + + +
+ This is a built-in HashiCorp + {{@itemType}}. It can't be viewed or edited. +
+
+
+ {{else}} + {{@item.id}} + {{/if}} +
+
+
+{{/if}} \ No newline at end of file diff --git a/ui/app/templates/components/secret-list/transform-transformation-item.hbs b/ui/app/templates/components/secret-list/transform-transformation-item.hbs new file mode 100644 index 0000000..c6efb88 --- /dev/null +++ b/ui/app/templates/components/secret-list/transform-transformation-item.hbs @@ -0,0 +1,65 @@ +{{! CBS TODO do not let click if !canRead }} +{{#if (eq @options.item "transformation")}} + +
+
+ + + {{if (eq @item.id " ") "(self)" (or @item.keyWithoutParent @item.id)}} + +
+
+ {{#if (or @item.updatePath.canRead @item.updatePath.canUpdate)}} + + + + {{/if}} +
+
+
+{{else}} +
+
+
+ + {{if (eq @item.id " ") "(self)" (or @item.keyWithoutParent @item.id)}} +
+
+
+{{/if}} \ No newline at end of file diff --git a/ui/app/templates/components/secret-version-menu.hbs b/ui/app/templates/components/secret-version-menu.hbs new file mode 100644 index 0000000..a9509b9 --- /dev/null +++ b/ui/app/templates/components/secret-version-menu.hbs @@ -0,0 +1,63 @@ + + + Version + {{@version.version}} + + + + + + \ No newline at end of file diff --git a/ui/app/templates/components/section-tabs.hbs b/ui/app/templates/components/section-tabs.hbs new file mode 100644 index 0000000..50c47bd --- /dev/null +++ b/ui/app/templates/components/section-tabs.hbs @@ -0,0 +1,17 @@ +{{#let (tabs-for-auth-section this.model this.tabType this.paths) as |tabs|}} + {{#if tabs.length}} +
+ +
+ {{/if}} +{{/let}} \ No newline at end of file diff --git a/ui/app/templates/components/selectable-card.hbs b/ui/app/templates/components/selectable-card.hbs new file mode 100644 index 0000000..0bab334 --- /dev/null +++ b/ui/app/templates/components/selectable-card.hbs @@ -0,0 +1,31 @@ +{{! conditional to check if SelectableCard is apart of a CSS Grid, if yes return grid item class }} +{{#if this.gridContainer}} +
+
+

{{format-number this.total}}

+

{{@cardTitle}}

+

{{@subText}}

+
+ {{yield}} +
+{{else}} +
+
+

{{@cardTitle}}

+ + {{@actionText}} + {{#if @actionText}} + + {{/if}} + +
+

{{@subText}}

+

{{format-number this.total}}

+ {{yield}} +
+{{/if}} \ No newline at end of file diff --git a/ui/app/templates/components/shamir-progress.hbs b/ui/app/templates/components/shamir-progress.hbs new file mode 100644 index 0000000..91b731c --- /dev/null +++ b/ui/app/templates/components/shamir-progress.hbs @@ -0,0 +1,11 @@ +
+
+ + {{this.progress}}/{{this.threshold}} + keys provided + +
+
+ +
+
\ No newline at end of file diff --git a/ui/app/templates/components/splash-page.hbs b/ui/app/templates/components/splash-page.hbs new file mode 100644 index 0000000..7d36d29 --- /dev/null +++ b/ui/app/templates/components/splash-page.hbs @@ -0,0 +1,21 @@ +{{! bypass container styling }} +{{#if @hasAltContent}} + {{yield (hash altContent=(component "splash-page/splash-content"))}} +{{else}} +
+
+
+
+ {{yield (hash header=(component "splash-page/splash-header"))}} +
+
+ {{yield (hash sub-header=(component "splash-page/splash-header"))}} +
+ + {{yield (hash footer=(component "splash-page/splash-content"))}} +
+
+
+{{/if}} \ No newline at end of file diff --git a/ui/app/templates/components/token-expire-warning.hbs b/ui/app/templates/components/token-expire-warning.hbs new file mode 100644 index 0000000..6788fa4 --- /dev/null +++ b/ui/app/templates/components/token-expire-warning.hbs @@ -0,0 +1,16 @@ +{{#if (and this.showWarning (is-after (now interval=1000) @expirationDate))}} +
+ + + Reauthenticate + + +
+{{else}} + {{yield}} +{{/if}} \ No newline at end of file diff --git a/ui/app/templates/components/tool-actions-form.hbs b/ui/app/templates/components/tool-actions-form.hbs new file mode 100644 index 0000000..27781f0 --- /dev/null +++ b/ui/app/templates/components/tool-actions-form.hbs @@ -0,0 +1,63 @@ +
+ {{#if (eq this.selectedAction "hash")}} + {{! template-lint-disable no-passed-in-event-handlers }} + + {{! template-lint-enable no-passed-in-event-handlers }} + {{else if (eq this.selectedAction "random")}} + + {{else if (eq this.selectedAction "rewrap")}} + + {{else if (eq this.selectedAction "unwrap")}} + + {{else if (eq this.selectedAction "lookup")}} + + {{else if (eq this.selectedAction "wrap")}} + + {{else}} + + {{/if}} + \ No newline at end of file diff --git a/ui/app/templates/components/tool-hash.hbs b/ui/app/templates/components/tool-hash.hbs new file mode 100644 index 0000000..4d6d3b7 --- /dev/null +++ b/ui/app/templates/components/tool-hash.hbs @@ -0,0 +1,87 @@ + + +

+ Hash data +

+
+
+ +{{#if @sum}} +
+
+ +
+ +
+
+
+
+
+ + Copy + +
+
+ +
+
+{{else}} +
+ +
+ +
+ +
+
+
+ + Copy + +
+
+ +
+
+{{else}} +
+ +
+
+
+ +
+ +
+
+
+ +
+
+ +
+
+
+
+
+
+
+
+ +
+
+{{/if}} \ No newline at end of file diff --git a/ui/app/templates/components/tool-rewrap.hbs b/ui/app/templates/components/tool-rewrap.hbs new file mode 100644 index 0000000..e5914c7 --- /dev/null +++ b/ui/app/templates/components/tool-rewrap.hbs @@ -0,0 +1,63 @@ + + +

+ Rewrap token +

+
+
+ +{{#if @rewrap_token}} +
+
+ +
+ + {{#if this.validationError}} + + {{/if}} + {{else if (eq @attr.options.editType "password")}} + + {{else if (eq @attr.options.editType "json")}} + {{! JSON Editor }} + {{#let (get @model this.valuePath) as |value|}} + + {{#if @attr.options.allowReset}} + + {{/if}} + + {{/let}} + {{#if @attr.options.subText}} +

+ {{@attr.options.subText}} + {{#if @attr.options.docLink}} + + See our documentation + + for help. + {{/if}} +

+ {{/if}} + {{else}} + {{! Regular Text Input }} + + {{#if @attr.options.validationAttr}} + {{#if (and (get @model this.valuePath) (not (get @model @attr.options.validationAttr)))}} + + {{/if}} + {{/if}} + {{#if this.validationError}} + + {{/if}} + {{#if this.validationWarning}} + + {{/if}} + {{/if}} +
+ {{else if (eq @attr.type "boolean")}} +
+ + + {{#if @attr.options.subText}} +

+ {{@attr.options.subText}} + {{#if @attr.options.docLink}} + + Learn more here. + + {{/if}} +

+ {{/if}} +
+ {{else if (eq @attr.type "object")}} + + {{else if (eq @attr.options.editType "yield")}} + {{yield}} + {{/if}} +
\ No newline at end of file diff --git a/ui/lib/core/addon/components/form-field.js b/ui/lib/core/addon/components/form-field.js new file mode 100644 index 0000000..3473cae --- /dev/null +++ b/ui/lib/core/addon/components/form-field.js @@ -0,0 +1,187 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Component from '@glimmer/component'; +import { tracked } from '@glimmer/tracking'; +import { action } from '@ember/object'; +import { capitalize } from 'vault/helpers/capitalize'; +import { humanize } from 'vault/helpers/humanize'; +import { dasherize } from 'vault/helpers/dasherize'; +import { assert } from '@ember/debug'; +/** + * @module FormField + * `FormField` components are field elements associated with a particular model. + * + * @example + * ```js + * {{#each @model.fields as |attr|}} + * + * {{/each}} + * ``` + * example attr object: + * attr = { + * name: "foo", // name of attribute -- used to populate various fields and pull value from model + * options: { + * label: "Foo", // custom label to be shown, otherwise attr.name will be displayed + * defaultValue: "", // default value to display if model value is not present + * fieldValue: "bar", // used for value lookup on model over attr.name + * editType: "ttl", type of field to use. List of editTypes:boolean, file, json, kv, optionalText, mountAccessor, password, radio, regex, searchSelect, stringArray, textarea, ttl, yield. + * helpText: "This will be in a tooltip", + * readOnly: true + * }, + * type: "boolean" // type of attribute value -- string, boolean, etc. + * } + * @param {Object} attr - usually derived from ember model `attributes` lookup, and all members of `attr.options` are optional + * @param {Model} model - Ember Data model that `attr` is defined on + * @param {boolean} [disabled=false] - whether the field is disabled + * @param {boolean} [showHelpText=true] - whether to show the tooltip with help text from OpenAPI + * @param {string} [subText] - text to be displayed below the label + * @param {string} [mode] - used when editType is 'kv' + * @param {ModelValidations} [modelValidations] - Object of errors. If attr.name is in object and has error message display in AlertInline. + * @callback onChangeCallback + * @param {onChangeCallback} [onChange] - called whenever a value on the model changes via the component + * @callback onKeyUpCallback + * @param {onKeyUpCallback} [onKeyUp] - function passed through into MaskedInput to handle validation. It is also handled for certain form-field types here in the action handleKeyUp. + * + */ + +export default class FormFieldComponent extends Component { + emptyData = '{\n}'; + shouldHideLabel = [ + 'boolean', + 'file', + 'json', + 'kv', + 'mountAccessor', + 'optionalText', + 'regex', + 'searchSelect', + 'stringArray', + 'ttl', + ]; + @tracked showInput = false; + + constructor() { + super(...arguments); + const { attr, model } = this.args; + const valuePath = attr.options?.fieldValue || attr.name; + assert( + 'Form is attempting to modify an ID. Ember-data does not allow this.', + valuePath.toLowerCase() !== 'id' + ); + const modelValue = model[valuePath]; + this.showInput = !!modelValue; + } + + get hideLabel() { + const { type, options } = this.args.attr; + if (type === 'boolean' || type === 'object' || options?.isSectionHeader) { + return true; + } + // falsey values render a + return this.shouldHideLabel.includes(options?.editType); + } + + get disabled() { + return this.args.disabled || false; + } + get showHelpText() { + return this.args.showHelpText === false ? false : true; + } + get subText() { + return this.args.subText || ''; + } + // used in the label element next to the form element + get labelString() { + const label = this.args.attr.options?.label || ''; + if (label) { + return label; + } + if (this.args.attr.name) { + return capitalize([humanize([dasherize([this.args.attr.name])])]); + } + return ''; + } + // both the path to mutate on the model, and the path to read the value from + get valuePath() { + return this.args.attr.options?.fieldValue || this.args.attr.name; + } + get isReadOnly() { + const readonly = this.args.attr.options?.readOnly || false; + return readonly && this.args.mode === 'edit'; + } + get validationError() { + const validations = this.args.modelValidations || {}; + const state = validations[this.valuePath]; + return state && !state.isValid ? state.errors.join(' ') : null; + } + get validationWarning() { + const validations = this.args.modelValidations || {}; + const state = validations[this.valuePath]; + return state?.warnings?.length ? state.warnings.join(' ') : null; + } + + onChange() { + if (this.args.onChange) { + this.args.onChange(...arguments); + } + } + + @action + setFile(keyFile) { + const path = this.valuePath; + const { value } = keyFile; + this.args.model.set(path, value); + this.onChange(path, value); + } + @action + setAndBroadcast(value) { + this.args.model.set(this.valuePath, value); + this.onChange(this.valuePath, value); + } + @action + setAndBroadcastBool(trueVal, falseVal, event) { + const valueToSet = event.target.checked === true ? trueVal : falseVal; + this.setAndBroadcast(valueToSet); + } + @action + setAndBroadcastTtl(value) { + const alwaysSendValue = this.valuePath === 'expiry' || this.valuePath === 'safetyBuffer'; + const valueToSet = value.enabled === true || alwaysSendValue ? `${value.seconds}s` : 0; + this.setAndBroadcast(`${valueToSet}`); + } + @action + codemirrorUpdated(isString, value, codemirror) { + codemirror.performLint(); + const hasErrors = codemirror.state.lint.marked.length > 0; + const valToSet = isString ? value : JSON.parse(value); + + if (!hasErrors) { + this.args.model.set(this.valuePath, valToSet); + this.onChange(this.valuePath, valToSet); + } + } + @action + toggleShow() { + const value = !this.showInput; + this.showInput = value; + if (!value) { + this.setAndBroadcast(null); + } + } + @action + handleKeyUp(maybeEvent) { + const value = typeof maybeEvent === 'object' ? maybeEvent.target.value : maybeEvent; + if (!this.args.onKeyUp) { + return; + } + this.args.onKeyUp(this.valuePath, value); + } + @action + onChangeWithEvent(event) { + const prop = event.target.type === 'checkbox' ? 'checked' : 'value'; + this.setAndBroadcast(event.target[prop]); + } +} diff --git a/ui/lib/core/addon/components/form-save-buttons.js b/ui/lib/core/addon/components/form-save-buttons.js new file mode 100644 index 0000000..9bfa234 --- /dev/null +++ b/ui/lib/core/addon/components/form-save-buttons.js @@ -0,0 +1,38 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Component from '@ember/component'; +import { computed } from '@ember/object'; +import layout from '../templates/components/form-save-buttons'; + +/** + * @module FormSaveButtons + * `FormSaveButtons` displays a button save and a cancel button at the bottom of a form. + * + * @example + * ```js + * + * ``` + * + * @param [saveButtonText="Save" {String}] - The text that will be rendered on the Save button. + * @param [cancelButtonText="Cancel" {String}] - The text that will be rendered on the Cancel button. + * @param [isSaving=false {Boolean}] - If the form is saving, this should be true. This will disable the save button and render a spinner on it; + * @param [cancelLinkParams=[] {Array}] - An array of arguments used to construct a link to navigate back to when the Cancel button is clicked. + * @param [onCancel=null {Function}] - If the form should call an action on cancel instead of route somewhere, the function can be passed using onCancel instead of passing an array to cancelLinkParams. + * @param [includeBox=true {Boolean}] - By default we include padding around the form with underlines. Passing this value as false will remove that padding. + * + */ + +export default Component.extend({ + layout, + tagName: '', + + cancelLink: computed('cancelLinkParams.[]', function () { + if (!Array.isArray(this.cancelLinkParams) || !this.cancelLinkParams.length) return; + const [route, ...models] = this.cancelLinkParams; + return { route, models }; + }), +}); diff --git a/ui/lib/core/addon/components/icon.hbs b/ui/lib/core/addon/components/icon.hbs new file mode 100644 index 0000000..3ee2056 --- /dev/null +++ b/ui/lib/core/addon/components/icon.hbs @@ -0,0 +1,7 @@ +{{#if this.isFlightIcon}} + +{{else}} + + {{svg-jar this.name}} + +{{/if}} \ No newline at end of file diff --git a/ui/lib/core/addon/components/icon.js b/ui/lib/core/addon/components/icon.js new file mode 100644 index 0000000..b9b3b6d --- /dev/null +++ b/ui/lib/core/addon/components/icon.js @@ -0,0 +1,48 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Component from '@glimmer/component'; +import { assert } from '@ember/debug'; +import flightIconMap from '@hashicorp/flight-icons/catalog.json'; +const flightIconNames = flightIconMap.assets.mapBy('iconName').uniq(); + +/** + * @module Icon + * `Icon` components are glyphs used to indicate important information. + * + * Flight icon documentation at https://flight-hashicorp.vercel.app/ + * + * @example + * ```js + * + * ``` + * @param {string} name=null - The name of the SVG to render inline. + * @param {string} [size=16] - size for flight icon, can be 16 or 24 + * + */ + +export default class Icon extends Component { + constructor(owner, args) { + super(owner, args); + assert('Icon component size argument must be either "16" or "24"', ['16', '24'].includes(this.size)); + } + + get size() { + return this.args.size || '16'; + } + + get name() { + return this.args.name || null; + } + + // favor flight icon set and fall back to structure icons if not found + get isFlightIcon() { + return this.name ? flightIconNames.includes(this.name) : false; + } + + get hsIconClass() { + return this.size === '24' ? 'hs-icon-xl' : 'hs-icon-l'; + } +} diff --git a/ui/lib/core/addon/components/info-table-item-array.hbs b/ui/lib/core/addon/components/info-table-item-array.hbs new file mode 100644 index 0000000..5a3f1e3 --- /dev/null +++ b/ui/lib/core/addon/components/info-table-item-array.hbs @@ -0,0 +1,76 @@ +{{! the class linkable-item is needed for the read-more component }} +
+ {{#if @isLink}} +
+ {{#if this.fetchComplete}} + + {{#each this.displayArrayTruncated as |item|}} + {{#if (is-wildcard-string item)}} + {{#let (filter-wildcard item this.allOptions) as |wildcardCount|}} + {{item}} + + {{if (not-eq wildcardCount undefined) (concat "includes " wildcardCount)}} + {{if (eq wildcardCount 1) @wildcardLabel (pluralize @wildcardLabel)}} + + {{#if (eq this.displayArrayTruncated.lastObject item)}} + + View all {{lowercase @label}}. + + {{/if}} + {{/let}} + {{else}} + {{#if (is-array this.itemRoute)}} + + {{or (get this.itemNameById item) item}} + + {{else}} + + {{or (get this.itemNameById item) item}} + + {{/if}} + {{/if}} + {{#if (not-eq item this.displayArrayTruncated.lastObject)}} + ,  + {{/if}} + {{#unless this.doNotTruncate}} + {{#if (and (eq item this.displayArrayTruncated.lastObject) (gte @displayArray.length 10))}} + {{! dec is a math helper that decrements by 5 the length of the array ex: 11-5 = "and 6 others."}} + +  and + {{dec 5 @displayArray.length}} + others.  + + {{/if}} + {{#if (and (eq item this.displayArrayTruncated.lastObject) (gte @displayArray.length 10))}} + {{#if (is-array @rootRoute)}} + + View all {{lowercase @label}}. + + {{else}} + + View all {{lowercase @label}}. + + {{/if}} + {{/if}} + {{/unless}} + {{/each}} + + {{/if}} +
+ {{else}} + + {{if + (gte @displayArray.length 10) + (concat @displayArray ", and " (dec 5 @displayArray.length) " more.") + @displayArray + }} + + {{/if}} +
\ No newline at end of file diff --git a/ui/lib/core/addon/components/info-table-item-array.js b/ui/lib/core/addon/components/info-table-item-array.js new file mode 100644 index 0000000..2b78184 --- /dev/null +++ b/ui/lib/core/addon/components/info-table-item-array.js @@ -0,0 +1,93 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Component from '@glimmer/component'; +import { tracked } from '@glimmer/tracking'; +import { inject as service } from '@ember/service'; +import { action } from '@ember/object'; + +/** + * @module InfoTableItemArray + * The `InfoTableItemArray` component handles arrays in the info-table-row component. + * If an array has more than 10 items, then only 5 are displayed and a count of total items is displayed next to the five. + * If a isLink is true than a link can be set for the use to click on the specific array item + * If a wildcard is a potential variable in the string of an item, then you can use the modelType and wildcardLabel parameters to + * return a wildcard count similar to what is done in the searchSelect component. + * + * @example + * ```js + * ." + * @param {array} displayArray - The array of data to be displayed. (In InfoTableRow this comes from the @value arg.) If the array length > 10, and @doNotTruncate is false only 5 will show with a count of the number hidden. + * @param {boolean} [isLink] - Indicates if the item should contain a link-to component. Only setup for arrays, but this could be changed if needed. + * @param {string || array} [rootRoute="vault.cluster.secrets.backend.list-root"] - Tells what route the link should go to when selecting "view all". If the route requires more than one dynamic param, insert an array. + * @param {string || array} [itemRoute=vault.cluster.secrets.backend.show] - Tells what route the link should go to when selecting the individual item. If the route requires more than one dynamic param, insert an array. + * @param {string} [modelType] - Tells which model you want to query and set allOptions. Used in conjunction with the the isLink. + * @param {string} [wildcardLabel] - when you want the component to return a count on the model for options returned when using a wildcard you must provide a label of the count e.g. role. Should be singular. + * @param {string} [backend] - To specify which backend to point the link to. + * @param {boolean} [doNotTruncate=false] - Determines whether to show the View all "roles" link. Otherwise uses the ReadMore component's "See More" toggle + * @param {boolean} [renderItemName=false] - If true renders the item name instead of its id + */ +export default class InfoTableItemArray extends Component { + @service store; + @tracked allOptions = null; + @tracked itemNameById; // object is only created if renderItemName=true + @tracked fetchComplete = false; + + get rootRoute() { + return this.args.rootRoute || 'vault.cluster.secrets.backend.list-root'; + } + + get itemRoute() { + return this.args.itemRoute || 'vault.cluster.secrets.backend.show'; + } + + get doNotTruncate() { + return this.args.doNotTruncate || false; + } + + get displayArrayTruncated() { + const { displayArray } = this.args; + if (!displayArray) return null; + if (displayArray.length >= 10 && !this.args.doNotTruncate) { + // if array greater than 10 in length only display the first 5 + return displayArray.slice(0, 5); + } + return displayArray; + } + + @action async fetchOptions() { + if (this.args.isLink && this.args.modelType) { + const queryOptions = this.args.backend ? { backend: this.args.backend } : {}; + + const modelRecords = await this.store.query(this.args.modelType, queryOptions).catch((err) => { + if (err.httpStatus === 404) { + return []; + } else { + return null; + } + }); + + this.allOptions = modelRecords ? modelRecords.mapBy('id') : null; + if (this.args.renderItemName && modelRecords) { + modelRecords.forEach(({ id, name }) => { + // create key/value pair { item-id: item-name } for each record + this.itemNameById = { ...this.itemNameById, [id]: name }; + }); + } + } + this.fetchComplete = true; + } +} diff --git a/ui/lib/core/addon/components/info-table-row.hbs b/ui/lib/core/addon/components/info-table-row.hbs new file mode 100644 index 0000000..1ceb489 --- /dev/null +++ b/ui/lib/core/addon/components/info-table-row.hbs @@ -0,0 +1,109 @@ +{{#if (or (has-block) this.isVisible)}} +
+
+ {{#if @label}} + {{#if this.hasLabelOverflow}} + + + {{@label}} + + +
+ {{@label}} +
+
+
+ {{else}} + {{@label}} + {{/if}} + {{#if @helperText}} +
+ {{@helperText}} +
+ {{/if}} + {{else}} + + {{/if}} +
+
+ {{#if @addCopyButton}} +
+ + + +
+ {{/if}} + {{#if (has-block)}} + {{yield}} + {{else if this.valueIsBoolean}} + {{#if @value}} + + Yes + {{else}} + + No + {{/if}} + {{! @alwaysRender (this.isVisible) is still true }} + {{else if this.valueIsEmpty}} + {{#if @defaultShown}} + {{@defaultShown}} + {{else}} + + {{/if}} + {{else if @formatDate}} + {{date-format @value @formatDate}} + {{else if @formatTtl}} + {{this.formattedTtl}} + {{else}} + {{#if (eq @type "array")}} + + {{else}} + {{#if @tooltipText}} + + + {{this.value}} + + + +
+ {{@tooltipText}} +
+
+
+
+ {{else}} + {{@value}} + {{/if}} + {{/if}} + {{/if}} +
+
+{{/if}} \ No newline at end of file diff --git a/ui/lib/core/addon/components/info-table-row.js b/ui/lib/core/addon/components/info-table-row.js new file mode 100644 index 0000000..9e6b610 --- /dev/null +++ b/ui/lib/core/addon/components/info-table-row.js @@ -0,0 +1,84 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { typeOf } from '@ember/utils'; +import Component from '@glimmer/component'; +import { tracked } from '@glimmer/tracking'; +import { action } from '@ember/object'; +import { convertFromSeconds, largestUnitFromSeconds } from 'core/utils/duration-utils'; + +/** + * @module InfoTableRow + * `InfoTableRow` displays a label and a value in a table-row style manner. The component is responsive so + * that the value breaks under the label on smaller viewports. + * + * @example + * ```js + * + * ``` + * + * @param label=null {string} - The display name for the value. + * @param helperText=null {string} - Text to describe the value displayed beneath the label. + * @param value=null {any} - The the data to be displayed - by default the content of the component will only show if there is a value. Also note that special handling is given to boolean values - they will render `Yes` for true and `No` for false. Overridden by block if exists + * @param [alwaysRender=false] {Boolean} - Indicates if the component content should be always be rendered. When false, the value of `value` will be used to determine if the component should render. + * @param [defaultShown] {String} - Text that renders as value if alwaysRender=true. Eg. "Vault default" + * @param [tooltipText] {String} - Text if a tooltip should display over the value. + * @param [isTooltipCopyable] {Boolean} - Allows tooltip click to copy + * @param [type=array] {string} - The type of value being passed in. This is used for when you want to trim an array. For example, if you have an array value that can equal length 15+ this will trim to show 5 and count how many more are there + * @param [isLink=true] {Boolean} - Passed through to InfoTableItemArray. Indicates if the item should contain a link-to component. Only setup for arrays, but this could be changed if needed. + * @param [modelType=null] {string} - Passed through to InfoTableItemArray. Tells what model you want data for the allOptions to be returned from. Used in conjunction with the the isLink. + * @param [queryParam] {String} - Passed through to InfoTableItemArray. If you want to specific a tab for the View All XX to display to. Ex= role + * @param [backend] {String} - Passed through to InfoTableItemArray. To specify secrets backend to point link to Ex= transformation + * @param [viewAll] {String} - Passed through to InfoTableItemArray. Specify the word at the end of the link View all. + */ + +export default class InfoTableRowComponent extends Component { + @tracked + hasLabelOverflow = false; // is calculated and set in didInsertElement + + get isVisible() { + return this.args.alwaysRender || !this.valueIsEmpty; + } + + get valueIsBoolean() { + return typeOf(this.args.value) === 'boolean'; + } + + get valueIsEmpty() { + const { value } = this.args; + if (typeOf(value) === 'array' && value.length === 0) { + return true; + } + switch (value) { + case undefined: + return true; + case null: + return true; + case '': + return true; + default: + return false; + } + } + get formattedTtl() { + const { value } = this.args; + if (Number.isInteger(value)) { + const unit = largestUnitFromSeconds(value); + return `${convertFromSeconds(value, unit)}${unit}`; + } + return value; + } + + @action + calculateLabelOverflow(el) { + const labelDiv = el; + const labelText = el.querySelector('.is-label'); + if (labelDiv && labelText) { + if (labelText.offsetWidth > labelDiv.offsetWidth) { + this.hasLabelOverflow = true; + } + } + } +} diff --git a/ui/lib/core/addon/components/info-table.hbs b/ui/lib/core/addon/components/info-table.hbs new file mode 100644 index 0000000..f813395 --- /dev/null +++ b/ui/lib/core/addon/components/info-table.hbs @@ -0,0 +1,23 @@ + \ No newline at end of file diff --git a/ui/lib/core/addon/components/info-table.js b/ui/lib/core/addon/components/info-table.js new file mode 100644 index 0000000..39b5648 --- /dev/null +++ b/ui/lib/core/addon/components/info-table.js @@ -0,0 +1,29 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Component from '@glimmer/component'; + +/** + * @module InfoTable + * InfoTable components are a table with a single column and header. They are used to render a list of InfoTableRow components. + * + * @example + * ```js + * + * ``` + * @param {String} [title=Info Table] - The title of the table. Used for accessibility purposes. + * @param {String} header=null - The column header. + * @param {Array} items=null - An array of strings which will be used as the InfoTableRow value. + */ + +export default class InfoTable extends Component { + get title() { + return this.args.title || 'Info Table'; + } +} diff --git a/ui/lib/core/addon/components/info-tooltip.hbs b/ui/lib/core/addon/components/info-tooltip.hbs new file mode 100644 index 0000000..73a15c8 --- /dev/null +++ b/ui/lib/core/addon/components/info-tooltip.hbs @@ -0,0 +1,24 @@ + + + + + + +
+ {{yield}} +
+
+
+
\ No newline at end of file diff --git a/ui/lib/core/addon/components/info-tooltip.js b/ui/lib/core/addon/components/info-tooltip.js new file mode 100644 index 0000000..83831e8 --- /dev/null +++ b/ui/lib/core/addon/components/info-tooltip.js @@ -0,0 +1,27 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Component from '@glimmer/component'; +import { action } from '@ember/object'; + +/** + * @module JsonEditor + * + * @example + * ```js + * + * ``` + * + * @param {string} [verticalPosition] - vertical position specification (above, below) + * @param {string} [horizontalPosition] - horizontal position specification (center, auto-right) + + */ + +export default class InfoTooltip extends Component { + @action + preventSubmit(e) { + e.preventDefault(); + } +} diff --git a/ui/lib/core/addon/components/input-search.hbs b/ui/lib/core/addon/components/input-search.hbs new file mode 100644 index 0000000..22b35c7 --- /dev/null +++ b/ui/lib/core/addon/components/input-search.hbs @@ -0,0 +1,18 @@ +
+
+ {{#if @label}} + + {{/if}} + {{#if @subText}} +

{{@subText}}

+ {{/if}} + +
+
\ No newline at end of file diff --git a/ui/lib/core/addon/components/input-search.js b/ui/lib/core/addon/components/input-search.js new file mode 100644 index 0000000..0914684 --- /dev/null +++ b/ui/lib/core/addon/components/input-search.js @@ -0,0 +1,29 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Component from '@glimmer/component'; +import { action } from '@ember/object'; +import { tracked } from '@glimmer/tracking'; + +export default class inputSelect extends Component { + /* + * @public + * @param Function + * + * Function called when any of the inputs change + * + */ + @tracked searchInput = ''; + + constructor() { + super(...arguments); + this.searchInput = this.args?.initialValue; + } + + @action + inputChanged() { + this.args.onChange(this.searchInput); + } +} diff --git a/ui/lib/core/addon/components/json-editor.hbs b/ui/lib/core/addon/components/json-editor.hbs new file mode 100644 index 0000000..620057f --- /dev/null +++ b/ui/lib/core/addon/components/json-editor.hbs @@ -0,0 +1,48 @@ +
+ {{#if this.getShowToolbar}} +
+ + + + {{yield}} +
+ + + +
+
+
+ {{/if}} +
+ + {{#if @helpText}} +
+

{{@helpText}}

+
+ {{/if}} +
\ No newline at end of file diff --git a/ui/lib/core/addon/components/json-editor.js b/ui/lib/core/addon/components/json-editor.js new file mode 100644 index 0000000..831f32c --- /dev/null +++ b/ui/lib/core/addon/components/json-editor.js @@ -0,0 +1,50 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Component from '@glimmer/component'; +import { action } from '@ember/object'; + +/** + * @module JsonEditor + * + * @example + * ```js + * + * ``` + * + * @param {string} [title] - Name above codemirror view + * @param {string} value - a specific string the comes from codemirror. It's the value inside the codemirror display + * @param {Function} [valueUpdated] - action to preform when you edit the codemirror value. + * @param {Function} [onFocusOut] - action to preform when you focus out of codemirror. + * @param {string} [helpText] - helper text. + * @param {Object} [extraKeys] - Provides keyboard shortcut methods for things like saving on shift + enter. + * @param {Array} [gutters] - An array of CSS class names or class name / CSS string pairs, each of which defines a width (and optionally a background), and which will be used to draw the background of the gutters. + * @param {string} [mode] - The mode defined for styling. Right now we only import ruby so mode must but be ruby or defaults to javascript. If you wanted another language you need to import it into the modifier. + * @param {Boolean} [readOnly] - Sets the view to readOnly, allowing for copying but no editing. It also hides the cursor. Defaults to false. + * @param {String} [theme] - Specify or customize the look via a named "theme" class in scss. + * @param {String} [value] - Value within the display. Generally, a json string. + * @param {String} [viewportMargin] - Size of viewport. Often set to "Infinity" to load/show all text regardless of length. + */ + +export default class JsonEditorComponent extends Component { + get getShowToolbar() { + return this.args.showToolbar === false ? false : true; + } + + @action + onUpdate(...args) { + if (!this.args.readOnly) { + // catching a situation in which the user is not readOnly and has not provided a valueUpdated function to the instance + this.args.valueUpdated(...args); + } + } + + @action + onFocus(...args) { + if (this.args.onFocusOut) { + this.args.onFocusOut(...args); + } + } +} diff --git a/ui/lib/core/addon/components/key-value-header.hbs b/ui/lib/core/addon/components/key-value-header.hbs new file mode 100644 index 0000000..28e048a --- /dev/null +++ b/ui/lib/core/addon/components/key-value-header.hbs @@ -0,0 +1,22 @@ + \ No newline at end of file diff --git a/ui/lib/core/addon/components/key-value-header.js b/ui/lib/core/addon/components/key-value-header.js new file mode 100644 index 0000000..b0d03fa --- /dev/null +++ b/ui/lib/core/addon/components/key-value-header.js @@ -0,0 +1,103 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Component from '@glimmer/component'; +import utils from 'vault/lib/key-utils'; +import { encodePath } from 'vault/utils/path-encoding-helpers'; + +/** + * @module KeyValueHeader + * KeyValueHeader components show breadcrumbs for secret engines. + * + * @example + * ```js + + * ``` + * @param {string} [mode=null] - Used to set the currentPath. + * @param {string} [baseKey=null] - Used to generate the path backward. + * @param {string} [path=null] - The fallback path. + * @param {string} [root=null] - Used to set the secretPath. + * @param {boolean} [showCurrent=true] - Boolean to show the second part of the breadcrumb, ex: the secret's name. + * @param {boolean} [linkToPaths=true] - If true link to the path. + * @param {boolean} [isEngine=false] - Change the LinkTo if the path is coming from an engine. + */ + +export default class KeyValueHeader extends Component { + get showCurrent() { + return this.args.showCurrent || true; + } + + get linkToPaths() { + return this.args.linkToPaths || true; + } + + stripTrailingSlash(str) { + return str[str.length - 1] === '/' ? str.slice(0, -1) : str; + } + + get currentPath() { + if (!this.args.mode || this.showCurrent === false) { + return this.args.path; + } + return `vault.cluster.secrets.backend.${this.args.mode}`; + } + + get secretPath() { + const crumbs = []; + const root = this.args.root; + const baseKey = this.args.baseKey?.display || this.args.baseKey?.id; + const baseKeyModel = encodePath(this.args.baseKey?.id); + + if (root) { + crumbs.push(root); + } + + if (!baseKey) { + return crumbs; + } + + const path = this.args.path; + const currentPath = this.currentPath; + const showCurrent = this.showCurrent; + const ancestors = utils.ancestorKeysForKey(baseKey); + const parts = utils.keyPartsForKey(baseKey); + if (ancestors.length === 0) { + crumbs.push({ + label: baseKey, + text: this.stripTrailingSlash(baseKey), + path: currentPath, + model: baseKeyModel, + }); + + if (!showCurrent) { + crumbs.pop(); + } + + return crumbs; + } + + ancestors.forEach((ancestor, index) => { + crumbs.push({ + label: parts[index], + text: this.stripTrailingSlash(parts[index]), + path: path, + model: encodePath(ancestor), + }); + }); + + crumbs.push({ + label: utils.keyWithoutParentKey(baseKey), + text: this.stripTrailingSlash(utils.keyWithoutParentKey(baseKey)), + path: currentPath, + model: baseKeyModel, + }); + + if (!showCurrent) { + crumbs.pop(); + } + + return crumbs; + } +} diff --git a/ui/lib/core/addon/components/kv-object-editor.hbs b/ui/lib/core/addon/components/kv-object-editor.hbs new file mode 100644 index 0000000..5f63876 --- /dev/null +++ b/ui/lib/core/addon/components/kv-object-editor.hbs @@ -0,0 +1,71 @@ +
+ + {{#if @validationError}} +
+ +
+ {{/if}} + {{#if this.kvData}} + {{#each this.kvData as |row index|}} +
+
+ +
+
+ {{#if (has-block)}} + {{yield row this.kvData}} + {{else}} + +
+
+
+ + +
+
+ +
+ + Copy & Close + +
+ +{{/if}} \ No newline at end of file diff --git a/ui/lib/replication/addon/templates/mode/secondaries/config-create.hbs b/ui/lib/replication/addon/templates/mode/secondaries/config-create.hbs new file mode 100644 index 0000000..1918026 --- /dev/null +++ b/ui/lib/replication/addon/templates/mode/secondaries/config-create.hbs @@ -0,0 +1,21 @@ +
+

+ Create a path filter config for + {{this.model.config.id}} +

+
+
+ +
+
+ +
+
+ + Cancel + +
+
+ \ No newline at end of file diff --git a/ui/lib/replication/addon/templates/mode/secondaries/config-edit.hbs b/ui/lib/replication/addon/templates/mode/secondaries/config-edit.hbs new file mode 100644 index 0000000..51a1bc3 --- /dev/null +++ b/ui/lib/replication/addon/templates/mode/secondaries/config-edit.hbs @@ -0,0 +1,24 @@ + +
+

+ Edit path filter config for + {{this.model.config.id}} +

+
+
+ +
+
+
+ +
+
+ + Cancel + +
+
+
+ \ No newline at end of file diff --git a/ui/lib/replication/addon/templates/mode/secondaries/config-show.hbs b/ui/lib/replication/addon/templates/mode/secondaries/config-show.hbs new file mode 100644 index 0000000..504f7e6 --- /dev/null +++ b/ui/lib/replication/addon/templates/mode/secondaries/config-show.hbs @@ -0,0 +1,43 @@ + + + {{#if this.model.config.mode}} + + Edit config + + {{else}} + + Create config + + {{/if}} + + +
+

+ Mount filter config for + {{this.model.config.id}} +

+
+{{#if this.model.config.mode}} +
+ + +
    + {{#each this.model.config.paths as |path|}} +
  • + {{path}} +
  • + {{/each}} +
+
+
+{{else}} + +{{/if}} \ No newline at end of file diff --git a/ui/lib/replication/addon/templates/mode/secondaries/index.hbs b/ui/lib/replication/addon/templates/mode/secondaries/index.hbs new file mode 100644 index 0000000..6c4584e --- /dev/null +++ b/ui/lib/replication/addon/templates/mode/secondaries/index.hbs @@ -0,0 +1,79 @@ +{{#if this.model.replicationAttrs.isPrimary}} + + + {{#if this.model.replicationAttrs.knownSecondaries.length}} + {{#if this.model.canRevokeSecondary}} + + Revoke secondary + + {{/if}} + {{/if}} + {{#if this.model.canAddSecondary}} + + Add secondary + + {{/if}} + + + {{#if this.model.replicationAttrs.knownSecondaries.length}} + {{#each this.model.replicationAttrs.knownSecondaries as |secondary|}} +
+
+
+ {{secondary}} +
+
+ {{#if (or (eq this.replicationMode "performance") this.model.canRevokeSecondary)}} + + + + {{/if}} +
+
+
+
+ {{/each}} + {{else}} + + {{#if this.model.canAddSecondary}} + + Add secondary + + {{/if}} + + Learn more + + + {{/if}} +{{/if}} \ No newline at end of file diff --git a/ui/lib/replication/addon/templates/mode/secondaries/revoke.hbs b/ui/lib/replication/addon/templates/mode/secondaries/revoke.hbs new file mode 100644 index 0000000..e183e6a --- /dev/null +++ b/ui/lib/replication/addon/templates/mode/secondaries/revoke.hbs @@ -0,0 +1,40 @@ +
+

+ Revoke a secondary token +

+
+ +
+ +
+ +
+

+ The secondary id to revoke; given initially to generate a secondary token. +

+
+
+
+ + Revoke + +
+
+ {{#unless this.isRevoking}} + + Cancel + + {{/unless}} +
+
\ No newline at end of file diff --git a/ui/lib/replication/addon/utils/decode-config-from-jwt.js b/ui/lib/replication/addon/utils/decode-config-from-jwt.js new file mode 100644 index 0000000..c4ddca3 --- /dev/null +++ b/ui/lib/replication/addon/utils/decode-config-from-jwt.js @@ -0,0 +1,39 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { decodeString } from 'core/utils/b64'; + +/* + * @param token - Replication Secondary Activation Token + * @returns config Object if successful | undefined if not + * + */ +export default function (token) { + if (!token) { + return; + } + const tokenParts = token.split('.'); + // config is the second item in the JWT + let [, configB64] = tokenParts; + let config; + + if (tokenParts.length !== 3) { + return; + } + + // JWTs strip padding from their b64 parts. + // since we're converting to a typed array before + // decoding back to utf-8, we need to add any padding back + while (configB64.length % 4 !== 0) { + configB64 = configB64 + '='; + } + try { + config = JSON.parse(decodeString(configB64)); + } catch (e) { + // swallow error + } + + return config; +} diff --git a/ui/lib/replication/config/environment.js b/ui/lib/replication/config/environment.js new file mode 100644 index 0000000..35de50e --- /dev/null +++ b/ui/lib/replication/config/environment.js @@ -0,0 +1,16 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +/* eslint-env node */ +'use strict'; + +module.exports = function (environment) { + const ENV = { + modulePrefix: 'replication', + environment, + }; + + return ENV; +}; diff --git a/ui/lib/replication/index.js b/ui/lib/replication/index.js new file mode 100644 index 0000000..5260ac2 --- /dev/null +++ b/ui/lib/replication/index.js @@ -0,0 +1,23 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +/* eslint-env node */ +/* eslint-disable ember/avoid-leaking-state-in-ember-objects */ +/* eslint-disable node/no-extraneous-require */ +'use strict'; + +const EngineAddon = require('ember-engines/lib/engine-addon'); + +module.exports = EngineAddon.extend({ + name: 'replication', + + lazyLoading: { + enabled: true, + }, + + isDevelopingAddon() { + return true; + }, +}); diff --git a/ui/lib/replication/package.json b/ui/lib/replication/package.json new file mode 100644 index 0000000..108f883 --- /dev/null +++ b/ui/lib/replication/package.json @@ -0,0 +1,16 @@ +{ + "name": "replication", + "keywords": [ + "ember-addon", + "ember-engine" + ], + "dependencies": { + "ember-cli-htmlbars": "*", + "ember-cli-babel": "*" + }, + "ember-addon": { + "paths": [ + "../core" + ] + } +} diff --git a/ui/lib/service-worker-authenticated-download/index.js b/ui/lib/service-worker-authenticated-download/index.js new file mode 100644 index 0000000..c7a9c04 --- /dev/null +++ b/ui/lib/service-worker-authenticated-download/index.js @@ -0,0 +1,21 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +'use strict'; + +module.exports = { + name: require('./package').name, + + isDevelopingAddon() { + return true; + }, + + serverMiddleware({ app }) { + app.use((req, res, next) => { + res.setHeader('Service-Worker-Allowed', '/'); + next(); + }); + }, +}; diff --git a/ui/lib/service-worker-authenticated-download/package.json b/ui/lib/service-worker-authenticated-download/package.json new file mode 100644 index 0000000..e47a462 --- /dev/null +++ b/ui/lib/service-worker-authenticated-download/package.json @@ -0,0 +1,21 @@ +{ + "name": "service-worker-authenticated-download", + "keywords": [ + "ember-addon", + "ember-service-worker-plugin" + ], + "ember-addon": { + "before": [ + "serve-files-middleware", + "broccoli-serve-files", + "history-support-middleware", + "proxy-server-middleware" + ] + }, + "dependencies": { + "ember-cli-babel": "*", + "ember-auto-import": "*", + "ember-source": "*", + "ember-service-worker": "*" + } +} diff --git a/ui/lib/service-worker-authenticated-download/service-worker-registration/index.js b/ui/lib/service-worker-authenticated-download/service-worker-registration/index.js new file mode 100644 index 0000000..06630e6 --- /dev/null +++ b/ui/lib/service-worker-authenticated-download/service-worker-registration/index.js @@ -0,0 +1,13 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { addSuccessHandler } from 'ember-service-worker/service-worker-registration'; + +addSuccessHandler(function (registration) { + // attempt to unregister the service worker on unload because we're not doing any sort of caching + window.addEventListener('unload', function () { + registration.unregister(); + }); +}); diff --git a/ui/lib/service-worker-authenticated-download/service-worker/index.js b/ui/lib/service-worker-authenticated-download/service-worker/index.js new file mode 100644 index 0000000..ef719ff --- /dev/null +++ b/ui/lib/service-worker-authenticated-download/service-worker/index.js @@ -0,0 +1,56 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { createUrlRegEx, urlMatchesAnyPattern } from 'ember-service-worker/service-worker/url-utils'; + +var patterns = ['/v1/sys/storage/raft/snapshot']; +var REGEXES = patterns.map(createUrlRegEx); + +function sendMessage(message) { + return self.clients.matchAll({ includeUncontrolled: true, type: 'window' }).then(function (results) { + var client = results[0]; + return new Promise(function (resolve, reject) { + var messageChannel = new MessageChannel(); + messageChannel.port2.onmessage = function (event) { + if (event.data.error) { + reject(event.data.error); + } else { + resolve(event.data.token); + } + }; + + client.postMessage(message, [messageChannel.port1]); + }); + }); +} + +function authenticateRequest(request) { + // copy the reaquest headers so we can mutate them + const headers = new Headers(request.headers); + + // get and set vault token so the request is authenticated + return sendMessage({ action: 'getToken' }).then(function (token) { + headers.set('X-Vault-Token', token); + + // continue the fetch with the new request + // that has the auth header + return fetch( + new Request(request.url, { + method: request.method, + headers, + }) + ); + }); +} + +self.addEventListener('fetch', function (fetchEvent) { + const request = fetchEvent.request; + + if (urlMatchesAnyPattern(request.url, REGEXES) && request.method === 'GET') { + return fetchEvent.respondWith(authenticateRequest(request)); + } else { + return fetchEvent.respondWith(fetch(request)); + } +}); diff --git a/ui/metadata.json b/ui/metadata.json new file mode 100644 index 0000000..3431708 --- /dev/null +++ b/ui/metadata.json @@ -0,0 +1,6 @@ +{ + "versions": { + "main": "https://vault-storybook.vercel.app/" + } +} + diff --git a/ui/mirage/config.js b/ui/mirage/config.js new file mode 100644 index 0000000..1ce4dea --- /dev/null +++ b/ui/mirage/config.js @@ -0,0 +1,24 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import ENV from 'vault/config/environment'; +import handlers from './handlers'; + +// remember to export handler name from mirage/handlers/index.js file + +export default function () { + this.namespace = 'v1'; + + // start ember in development running mirage -> yarn start:mirage handlerName + // if handler is not provided, general config will be used + // this is useful for feature development when a specific and limited config is required + const { handler } = ENV['ember-cli-mirage']; + const handlerName = handler in handlers ? handler : 'base'; + handlers[handlerName](this); + this.logging = false; // disables passthrough logging which spams the console + console.log(`⚙ Using ${handlerName} Mirage request handlers ⚙`); // eslint-disable-line + // passthrough all unhandled requests + this.passthrough(); +} diff --git a/ui/mirage/factories/configuration.js b/ui/mirage/factories/configuration.js new file mode 100644 index 0000000..df07dc8 --- /dev/null +++ b/ui/mirage/factories/configuration.js @@ -0,0 +1,31 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { Factory, trait } from 'ember-cli-mirage'; + +export default Factory.extend({ + auth: null, + data: null, // populated via traits + lease_duration: 0, + lease_id: '', + renewable: true, + request_id: '22068a49-a504-41ad-b5b0-1eac71659190', + warnings: null, + wrap_info: null, + + // add servers to test raft storage configuration + withRaft: trait({ + afterCreate(config, server) { + if (!config.data) { + config.data = { + config: { + index: 0, + servers: server.serializerOrRegistry.serialize(server.createList('server', 2)), + }, + }; + } + }, + }), +}); diff --git a/ui/mirage/factories/feature.js b/ui/mirage/factories/feature.js new file mode 100644 index 0000000..69935f6 --- /dev/null +++ b/ui/mirage/factories/feature.js @@ -0,0 +1,12 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { Factory } from 'ember-cli-mirage'; + +export default Factory.extend({ + feature_flags() { + return []; // VAULT_CLOUD_ADMIN_NAMESPACE + }, +}); diff --git a/ui/mirage/factories/kubernetes-config.js b/ui/mirage/factories/kubernetes-config.js new file mode 100644 index 0000000..1c99abf --- /dev/null +++ b/ui/mirage/factories/kubernetes-config.js @@ -0,0 +1,16 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { Factory } from 'ember-cli-mirage'; + +export default Factory.extend({ + kubernetes_host: 'https://192.168.99.100:8443', + kubernetes_ca_cert: + '-----BEGIN CERTIFICATE-----\nMIIDNTCCAh2gApGgAwIBAgIULNEk+01LpkDeJujfsAgIULNEkAgIULNEckApGgAwIBAg+01LpkDeJuj\n-----END CERTIFICATE-----', + disable_local_ca_jwt: true, + + // property used only for record lookup and filtered from response payload + path: null, +}); diff --git a/ui/mirage/factories/kubernetes-role.js b/ui/mirage/factories/kubernetes-role.js new file mode 100644 index 0000000..9b9f7c5 --- /dev/null +++ b/ui/mirage/factories/kubernetes-role.js @@ -0,0 +1,59 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { Factory, trait } from 'ember-cli-mirage'; + +const generated_role_rules = `rules: +- apiGroups: [""] + resources: ["secrets", "services"] + verbs: ["get", "watch", "list", "create", "delete", "deletecollection", "patch", "update"] +`; +const name_template = '{{.FieldName | lowercase}}'; +const extra_annotations = { foo: 'bar', baz: 'qux' }; +const extra_labels = { foobar: 'baz', barbaz: 'foo' }; + +export default Factory.extend({ + name: (i) => `role-${i}`, + allowed_kubernetes_namespaces: '*', + allowed_kubernetes_namespace_selector: '', + token_max_ttl: 86400, + token_default_ttl: 600, + service_account_name: 'default', + kubernetes_role_name: '', + kubernetes_role_type: 'Role', + generated_role_rules: '', + name_template: '', + extra_annotations: null, + extra_labels: null, + + afterCreate(record) { + // only one of these three props can be defined + if (record.generated_role_rules) { + record.service_account_name = null; + record.kubernetes_role_name = null; + } else if (record.kubernetes_role_name) { + record.service_account_name = null; + record.generated_role_rules = null; + } else if (record.service_account_name) { + record.generated_role_rules = null; + record.kubernetes_role_name = null; + } + }, + withRoleName: trait({ + service_account_name: null, + generated_role_rules: null, + kubernetes_role_name: 'vault-k8s-secrets-role', + extra_annotations, + name_template, + }), + withRoleRules: trait({ + service_account_name: null, + kubernetes_role_name: null, + generated_role_rules, + extra_annotations, + extra_labels, + name_template, + }), +}); diff --git a/ui/mirage/factories/mfa-duo-method.js b/ui/mirage/factories/mfa-duo-method.js new file mode 100644 index 0000000..25d98b1 --- /dev/null +++ b/ui/mirage/factories/mfa-duo-method.js @@ -0,0 +1,24 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { Factory } from 'ember-cli-mirage'; + +export default Factory.extend({ + api_hostname: 'api-foobar.duosecurity.com', + mount_accessor: '', + name: '', // returned but cannot be set at this time + namespace_id: 'root', + pushinfo: '', + type: 'duo', + use_passcode: false, + username_template: '', + + afterCreate(record) { + if (record.name) { + console.warn('Endpoint ignored these unrecognized parameters: [name]'); // eslint-disable-line + record.name = ''; + } + }, +}); diff --git a/ui/mirage/factories/mfa-login-enforcement.js b/ui/mirage/factories/mfa-login-enforcement.js new file mode 100644 index 0000000..aaa0332 --- /dev/null +++ b/ui/mirage/factories/mfa-login-enforcement.js @@ -0,0 +1,51 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { Factory } from 'ember-cli-mirage'; + +export default Factory.extend({ + auth_method_accessors: null, + auth_method_types: null, + identity_entity_ids: null, + identity_group_ids: null, + mfa_method_ids: null, + name: null, + namespace_id: 'root', + + afterCreate(record, server) { + // initialize arrays and stub some data if not provided + if (!record.name) { + // use random string for generated name + record.update('name', (Math.random() + 1).toString(36).substring(2)); + } + if (!record.mfa_method_ids) { + // aggregate all existing methods and choose a random one + const methods = ['Totp', 'Duo', 'Okta', 'Pingid'].reduce((methods, type) => { + const records = server.schema.db[`mfa${type}Methods`].where({}); + if (records.length) { + methods.push(...records); + } + return methods; + }, []); + // if no methods were found create one since it is a required for login enforcements + if (!methods.length) { + methods.push(server.create('mfa-totp-method')); + } + const method = methods.length ? methods[Math.floor(Math.random() * methods.length)] : null; + record.update('mfa_method_ids', method ? [method.id] : []); + } + const targets = { + auth_method_accessors: ['auth_userpass_bb95c2b1'], + auth_method_types: ['userpass'], + identity_group_ids: ['34db6b52-591e-bc22-8af0-4add5e167326'], + identity_entity_ids: ['f831667b-7392-7a1c-c0fc-33d48cb1c57d'], + }; + for (const key in targets) { + if (!record.key) { + record.update(key, targets[key]); + } + } + }, +}); diff --git a/ui/mirage/factories/mfa-method.js b/ui/mirage/factories/mfa-method.js new file mode 100644 index 0000000..e807b3d --- /dev/null +++ b/ui/mirage/factories/mfa-method.js @@ -0,0 +1,17 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { Factory } from 'ember-cli-mirage'; + +export default Factory.extend({ + type: 'okta', + uses_passcode: false, + + afterCreate(mfaMethod) { + if (mfaMethod.type === 'totp') { + mfaMethod.uses_passcode = true; + } + }, +}); diff --git a/ui/mirage/factories/mfa-okta-method.js b/ui/mirage/factories/mfa-okta-method.js new file mode 100644 index 0000000..cd60f65 --- /dev/null +++ b/ui/mirage/factories/mfa-okta-method.js @@ -0,0 +1,23 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { Factory } from 'ember-cli-mirage'; + +export default Factory.extend({ + base_url: 'okta.com', + mount_accessor: '', + name: '', // returned but cannot be set at this time + namespace_id: 'root', + org_name: 'dev-foobar', + type: 'okta', + username_template: '', // returned but cannot be set at this time + + afterCreate(record) { + if (record.name) { + console.warn('Endpoint ignored these unrecognized parameters: [name]'); // eslint-disable-line + record.name = ''; + } + }, +}); diff --git a/ui/mirage/factories/mfa-pingid-method.js b/ui/mirage/factories/mfa-pingid-method.js new file mode 100644 index 0000000..3d3e63a --- /dev/null +++ b/ui/mirage/factories/mfa-pingid-method.js @@ -0,0 +1,17 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { Factory } from 'ember-cli-mirage'; + +export default Factory.extend({ + use_signature: true, + idp_url: 'https://foobar.pingidentity.com/pingid', + admin_url: 'https://foobar.pingidentity.com/pingid', + authenticator_url: 'https://authenticator.pingone.com/pingid/ppm', + org_alias: 'foobarbaz', + type: 'pingid', + username_template: '', + namespace_id: 'root', +}); diff --git a/ui/mirage/factories/mfa-totp-method.js b/ui/mirage/factories/mfa-totp-method.js new file mode 100644 index 0000000..86eb1b2 --- /dev/null +++ b/ui/mirage/factories/mfa-totp-method.js @@ -0,0 +1,27 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { Factory } from 'ember-cli-mirage'; + +export default Factory.extend({ + algorithm: 'SHA1', + digits: 6, + issuer: 'Vault', + key_size: 20, + max_validation_attempts: 5, + name: '', // returned but cannot be set at this time + namespace_id: 'root', + period: 30, + qr_size: 200, + skew: 1, + type: 'totp', + + afterCreate(record) { + if (record.name) { + console.warn('Endpoint ignored these unrecognized parameters: [name]'); // eslint-disable-line + record.name = ''; + } + }, +}); diff --git a/ui/mirage/factories/open-api-explorer.js b/ui/mirage/factories/open-api-explorer.js new file mode 100644 index 0000000..7f47b46 --- /dev/null +++ b/ui/mirage/factories/open-api-explorer.js @@ -0,0 +1,45 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { Factory } from 'ember-cli-mirage'; +/* eslint-disable ember/avoid-leaking-state-in-ember-objects */ +export default Factory.extend({ + openapi: '3.0.2', + info: { + title: 'HashiCorp Vault API', + description: 'HTTP API that gives you full access to Vault. All API routes are prefixed with `/v1/`.', + version: '1.0.0', + license: { + name: 'Mozilla Public License 2.0', + url: 'https://www.mozilla.org/en-US/MPL/2.0', + }, + }, + paths: { + '/auth/token/create': { + description: 'The token create path is used to create new tokens.', + post: { + summary: 'The token create path is used to create new tokens.', + tags: ['auth'], + responses: { + 200: { + description: 'OK', + }, + }, + }, + }, + '/secret/data/{path}': { + description: 'Location of a secret.', + post: { + summary: 'Location of a secret.', + tags: ['secret'], + responses: { + 200: { + description: 'OK', + }, + }, + }, + }, + }, +}); diff --git a/ui/mirage/factories/secret-engine.js b/ui/mirage/factories/secret-engine.js new file mode 100644 index 0000000..0039379 --- /dev/null +++ b/ui/mirage/factories/secret-engine.js @@ -0,0 +1,25 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { Factory } from 'ember-cli-mirage'; + +export default Factory.extend({ + path: 'foo/', + description: 'secret-engine generated by mirage', + local: true, + sealWrap: true, + // set in afterCreate + accessor: 'type_7f52940', + type: 'kv', + options: null, + + afterCreate(secretEngine) { + if (!secretEngine.options && ['generic', 'kv'].includes(secretEngine.type)) { + secretEngine.options = { + version: '2', + }; + } + }, +}); diff --git a/ui/mirage/factories/server.js b/ui/mirage/factories/server.js new file mode 100644 index 0000000..5e03f76 --- /dev/null +++ b/ui/mirage/factories/server.js @@ -0,0 +1,14 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { Factory } from 'ember-cli-mirage'; + +export default Factory.extend({ + address: '127.0.0.1', + node_id: (i) => `raft_node_${i}`, + protocol_version: '3', + voter: true, + leader: true, +}); diff --git a/ui/mirage/handlers/base.js b/ui/mirage/handlers/base.js new file mode 100644 index 0000000..ba27ea8 --- /dev/null +++ b/ui/mirage/handlers/base.js @@ -0,0 +1,87 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +// base handlers used in mirage config when a specific handler is not specified +const EXPIRY_DATE = '2021-05-12T23:20:50.52Z'; + +export default function (server) { + server.get('/sys/internal/ui/feature-flags', (db) => { + const featuresResponse = db.features.first(); + return { + data: { + feature_flags: featuresResponse ? featuresResponse.feature_flags : null, + }, + }; + }); + + server.get('/sys/health', function () { + return { + initialized: true, + sealed: false, + standby: false, + license: { + expiry: '2021-05-12T23:20:50.52Z', + state: 'stored', + }, + performance_standby: false, + replication_performance_mode: 'disabled', + replication_dr_mode: 'disabled', + server_time_utc: 1622562585, + version: '1.9.0+ent', + cluster_name: 'vault-cluster-e779cd7c', + cluster_id: '5f20f5ab-acea-0481-787e-71ec2ff5a60b', + last_wal: 121, + }; + }); + + server.get('/sys/license/status', function () { + return { + data: { + autoloading_used: false, + persisted_autoload: { + expiration_time: EXPIRY_DATE, + features: ['DR Replication', 'Namespaces', 'Lease Count Quotas', 'Automated Snapshots'], + license_id: '0eca7ef8-ebc0-f875-315e-3cc94a7870cf', + performance_standby_count: 0, + start_time: '2020-04-28T00:00:00Z', + }, + autoloaded: { + expiration_time: EXPIRY_DATE, + features: ['DR Replication', 'Namespaces', 'Lease Count Quotas', 'Automated Snapshots'], + license_id: '0eca7ef8-ebc0-f875-315e-3cc94a7870cf', + performance_standby_count: 0, + start_time: '2020-04-28T00:00:00Z', + }, + }, + }; + }); + + server.get('sys/namespaces', function () { + return { + data: { + keys: [ + 'ns1/', + 'ns2/', + 'ns3/', + 'ns4/', + 'ns5/', + 'ns6/', + 'ns7/', + 'ns8/', + 'ns9/', + 'ns10/', + 'ns11/', + 'ns12/', + 'ns13/', + 'ns14/', + 'ns15/', + 'ns16/', + 'ns17/', + 'ns18/', + ], + }, + }; + }); +} diff --git a/ui/mirage/handlers/clients.js b/ui/mirage/handlers/clients.js new file mode 100644 index 0000000..e54fe2a --- /dev/null +++ b/ui/mirage/handlers/clients.js @@ -0,0 +1,175 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { + isBefore, + startOfMonth, + endOfMonth, + addMonths, + subMonths, + differenceInCalendarMonths, + fromUnixTime, + isAfter, + formatRFC3339, +} from 'date-fns'; +import { parseAPITimestamp } from 'core/utils/date-formatters'; + +// Matches mocked date in client-dashboard-test file +const CURRENT_DATE = new Date('2023-01-13T14:15:00'); +const COUNTS_START = subMonths(CURRENT_DATE, 12); // pretend vault user started cluster 6 months ago +// for testing, we're in the middle of a license/billing period +const LICENSE_START = startOfMonth(subMonths(CURRENT_DATE, 6)); +// upgrade happened 1 month after license start +const UPGRADE_DATE = addMonths(LICENSE_START, 1); + +function getSum(array, key) { + return array.reduce((sum, { counts }) => sum + counts[key], 0); +} + +function getTotalCounts(array) { + return { + distinct_entities: getSum(array, 'entity_clients'), + entity_clients: getSum(array, 'entity_clients'), + non_entity_tokens: getSum(array, 'non_entity_clients'), + non_entity_clients: getSum(array, 'non_entity_clients'), + clients: getSum(array, 'clients'), + }; +} + +function randomBetween(min, max) { + return Math.floor(Math.random() * (max - min + 1) + min); +} + +function arrayOfCounts(max, arrayLength) { + var result = []; + var sum = 0; + for (var i = 0; i < arrayLength - 1; i++) { + result[i] = randomBetween(1, max - (arrayLength - i - 1) - sum); + sum += result[i]; + } + result[arrayLength - 1] = max - sum; + return result.sort((a, b) => b - a); +} + +function generateNamespaceBlock(idx = 0, isLowerCounts = false, ns) { + const min = isLowerCounts ? 10 : 50; + const max = isLowerCounts ? 100 : 5000; + const nsBlock = { + namespace_id: ns?.namespace_id || (idx === 0 ? 'root' : Math.random().toString(36).slice(2, 7) + idx), + namespace_path: ns?.namespace_path || (idx === 0 ? '' : `ns/${idx}`), + counts: {}, + }; + const mounts = []; + Array.from(Array(10)).forEach((mount, index) => { + const mountClients = randomBetween(min, max); + const [nonEntity, entity] = arrayOfCounts(mountClients, 2); + mounts.push({ + mount_path: `auth/authid${index}`, + counts: { + clients: mountClients, + entity_clients: entity, + non_entity_clients: nonEntity, + distinct_entities: entity, + non_entity_tokens: nonEntity, + }, + }); + }); + mounts.sort((a, b) => b.counts.clients - a.counts.clients); + nsBlock.mounts = mounts; + nsBlock.counts = getTotalCounts(mounts); + return nsBlock; +} + +function generateMonths(startDate, endDate, namespaces) { + const startDateObject = startOfMonth(parseAPITimestamp(startDate)); + const endDateObject = startOfMonth(parseAPITimestamp(endDate)); + const numberOfMonths = differenceInCalendarMonths(endDateObject, startDateObject) + 1; + const months = []; + if (isBefore(startDateObject, UPGRADE_DATE) && isBefore(endDateObject, UPGRADE_DATE)) { + // months block is empty if dates do not span an upgrade + return []; + } + for (let i = 0; i < numberOfMonths; i++) { + const month = addMonths(startDateObject, i); + const hasNoData = isBefore(month, UPGRADE_DATE); + if (hasNoData) { + months.push({ + timestamp: formatRFC3339(month), + counts: null, + namespaces: null, + new_clients: null, + }); + continue; + } + + const monthNs = namespaces.map((ns, idx) => generateNamespaceBlock(idx, true, ns)); + const newClients = namespaces.map((ns, idx) => generateNamespaceBlock(idx, true, ns)); + months.push({ + timestamp: formatRFC3339(month), + counts: getTotalCounts(monthNs), + namespaces: monthNs.sort((a, b) => b.counts.clients - a.counts.clients), + new_clients: { + counts: getTotalCounts(newClients), + namespaces: newClients.sort((a, b) => b.counts.clients - a.counts.clients), + }, + }); + } + return months; +} + +function generateActivityResponse(namespaces, startDate, endDate) { + return { + start_time: isAfter(new Date(startDate), COUNTS_START) ? startDate : formatRFC3339(COUNTS_START), + end_time: endDate, + by_namespace: namespaces.sort((a, b) => b.counts.clients - a.counts.clients), + months: generateMonths(startDate, endDate, namespaces), + total: getTotalCounts(namespaces), + }; +} + +export default function (server) { + server.get('sys/license/status', function () { + return { + request_id: 'my-license-request-id', + data: { + autoloaded: { + license_id: 'my-license-id', + start_time: formatRFC3339(LICENSE_START), + expiration_time: formatRFC3339(endOfMonth(addMonths(CURRENT_DATE, 6))), + }, + }, + }; + }); + + server.get('sys/internal/counters/config', function () { + return { + request_id: 'some-config-id', + data: { + default_report_months: 12, + enabled: 'default-enable', + queries_available: true, + retention_months: 24, + }, + }; + }); + + server.get('/sys/internal/counters/activity', (schema, req) => { + let { start_time, end_time } = req.queryParams; + // backend returns a timestamp if given unix time, so first convert to timestamp string here + if (!start_time.includes('T')) start_time = fromUnixTime(start_time).toISOString(); + if (!end_time.includes('T')) end_time = fromUnixTime(end_time).toISOString(); + const namespaces = Array.from(Array(12)).map((v, idx) => generateNamespaceBlock(idx)); + return { + request_id: 'some-activity-id', + lease_id: '', + renewable: false, + lease_duration: 0, + data: generateActivityResponse(namespaces, start_time, end_time), + wrap_info: null, + warnings: null, + auth: null, + }; + }); +} diff --git a/ui/mirage/handlers/db.js b/ui/mirage/handlers/db.js new file mode 100644 index 0000000..920bb3d --- /dev/null +++ b/ui/mirage/handlers/db.js @@ -0,0 +1,32 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +export default function (server) { + server.get('/database/static-roles', function () { + return { + data: { keys: ['dev-static', 'prod-static'] }, + }; + }); + + server.get('/database/static-roles/:rolename', function (db, req) { + if (req.params.rolename.includes('tester')) { + return new Response(400); + } + return { + data: { + rotation_statements: [ + '{ "db": "admin", "roles": [{ "role": "readWrite" }, {"role": "read", "db": "foo"}] }', + ], + db_name: 'connection', + username: 'alice', + rotation_period: '1h', + }, + }; + }); + + server.post('/database/rotate-role/:rolename', function () { + return new Response(204); + }); +} diff --git a/ui/mirage/handlers/hcp-link.js b/ui/mirage/handlers/hcp-link.js new file mode 100644 index 0000000..ad24955 --- /dev/null +++ b/ui/mirage/handlers/hcp-link.js @@ -0,0 +1,29 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import modifyPassthroughResponse from '../helpers/modify-passthrough-response'; + +export const statuses = [ + 'connected', + 'disconnected since 2022-09-21T11:25:02.196835-07:00; error: unable to establish a connection with HCP', + 'connecting since 2022-09-21T11:25:02.196835-07:00; error: unable to establish a connection with HCP', + 'connecting since 2022-09-21T11:25:02.196835-07:00; error: principal does not have the permission to register as a provider', + 'connecting since 2022-09-21T11:25:02.196835-07:00; error: could not obtain a token with the supplied credentials', +]; +let index = null; + +export default function (server) { + server.get('sys/seal-status', (schema, req) => { + // return next status from statuses array + if (index === null || index === statuses.length - 1) { + index = 0; + } else { + index++; + } + return modifyPassthroughResponse(req, { hcp_link_status: statuses[index] }); + }); + // enterprise only feature initially + server.get('sys/health', (schema, req) => modifyPassthroughResponse(req, { version: '1.12.0-dev1+ent' })); +} diff --git a/ui/mirage/handlers/index.js b/ui/mirage/handlers/index.js new file mode 100644 index 0000000..d7c652c --- /dev/null +++ b/ui/mirage/handlers/index.js @@ -0,0 +1,18 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +// add all handlers here +// individual lookup done in mirage config +import base from './base'; +import clients from './clients'; +import db from './db'; +import kms from './kms'; +import mfaConfig from './mfa-config'; +import mfaLogin from './mfa-login'; +import oidcConfig from './oidc-config'; +import hcpLink from './hcp-link'; +import kubernetes from './kubernetes'; + +export { base, clients, db, kms, mfaConfig, mfaLogin, oidcConfig, hcpLink, kubernetes }; diff --git a/ui/mirage/handlers/kms.js b/ui/mirage/handlers/kms.js new file mode 100644 index 0000000..3d71182 --- /dev/null +++ b/ui/mirage/handlers/kms.js @@ -0,0 +1,63 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +export default function (server) { + server.get('keymgmt/key?list=true', function () { + return { + data: { + keys: ['example-1', 'example-2', 'example-3'], + }, + }; + }); + + server.get('keymgmt/key/:name', function (_, request) { + const name = request.params.name; + return { + data: { + name, + deletion_allowed: false, + keys: { + 1: { + creation_time: '2020-11-02T15:54:58.768473-08:00', + public_key: '-----BEGIN PUBLIC KEY----- ... -----END PUBLIC KEY-----', + }, + 2: { + creation_time: '2020-11-04T16:58:47.591718-08:00', + public_key: '-----BEGIN PUBLIC KEY----- ... -----END PUBLIC KEY-----', + }, + }, + latest_version: 2, + min_enabled_version: 1, + type: 'rsa-2048', + }, + }; + }); + + server.get('keymgmt/key/:name/kms', function () { + return { + data: { + keys: ['example-kms'], + }, + }; + }); + + server.post('keymgmt/key/:name', function () { + return {}; + }); + + server.put('keymgmt/key/:name', function () { + return {}; + }); + + server.get('/keymgmt/kms/:provider/key', () => { + const keys = []; + let i = 1; + while (i <= 75) { + keys.push(`testkey-${i}`); + i++; + } + return { data: { keys } }; + }); +} diff --git a/ui/mirage/handlers/kubernetes.js b/ui/mirage/handlers/kubernetes.js new file mode 100644 index 0000000..a646519 --- /dev/null +++ b/ui/mirage/handlers/kubernetes.js @@ -0,0 +1,105 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { Response } from 'miragejs'; + +export default function (server) { + const getRecord = (schema, req, dbKey) => { + const { path, name } = req.params; + const findBy = dbKey === 'kubernetesConfigs' ? { path } : { name }; + const record = schema.db[dbKey].findBy(findBy); + if (record) { + delete record.path; + delete record.id; + } + return record ? { data: record } : new Response(404, {}, { errors: [] }); + }; + const createRecord = (req, key) => { + const data = JSON.parse(req.requestBody); + if (key === 'kubernetes-config') { + data.path = req.params.path; + } + server.create(key, data); + return new Response(204); + }; + const deleteRecord = (schema, req, dbKey) => { + const { name } = req.params; + const record = schema.db[dbKey].findBy({ name }); + if (record) { + schema.db[dbKey].remove(record.id); + } + return new Response(204); + }; + + server.get('/:path/config', (schema, req) => { + return getRecord(schema, req, 'kubernetesConfigs'); + }); + server.post('/:path/config', (schema, req) => { + return createRecord(req, 'kubernetes-config'); + }); + server.delete('/:path/config', (schema, req) => { + return deleteRecord(schema, req, 'kubernetesConfigs'); + }); + // endpoint for checking for environment variables necessary for inferred config + server.get('/:path/check', () => { + const response = {}; + const status = Math.random() > 0.5 ? 204 : 404; + if (status === 404) { + response.errors = [ + 'Missing environment variables: KUBERNETES_SERVICE_HOST, KUBERNETES_SERVICE_PORT_HTTPS', + ]; + } + return new Response(status, response); + }); + server.get('/:path/roles', (schema) => { + return { + data: { + keys: schema.db.kubernetesRoles.where({}).mapBy('name'), + }, + }; + }); + server.get('/:path/roles/:name', (schema, req) => { + return getRecord(schema, req, 'kubernetesRoles'); + }); + server.post('/:path/roles/:name', (schema, req) => { + return createRecord(req, 'kubernetes-role'); + }); + server.delete('/:path/roles/:name', (schema, req) => { + return deleteRecord(schema, req, 'kubernetesRoles'); + }); + server.post('/:path/creds/:role', (schema, req) => { + const { role } = req.params; + const record = schema.db.kubernetesRoles.findBy({ name: role }); + const data = JSON.parse(req.requestBody); + let errors; + if (!record) { + errors = [`role '${role}' does not exist`]; + } else if (!data.kubernetes_namespace) { + errors = ["'kubernetes_namespace' is required"]; + } + // creds cannot be fetched after creation so we don't need to store them + return errors + ? new Response(400, {}, { errors }) + : { + request_id: '58fefc6c-5195-c17a-94f2-8f889f3df57c', + lease_id: 'kubernetes/creds/default-role/aWczfcfJ7NKUdiirJrPXIs38', + renewable: false, + lease_duration: 3600, + data: { + service_account_name: 'default', + service_account_namespace: 'default', + service_account_token: 'eyJhbGciOiJSUzI1NiIsImtpZCI6Imlr', + }, + }; + }); + + server.get('/sys/internal/ui/mounts/kubernetes', () => ({ + data: { + accessor: 'kubernetes_9f846a87', + path: 'kubernetes/', + type: 'kubernetes', + }, + })); +} diff --git a/ui/mirage/handlers/mfa-config.js b/ui/mirage/handlers/mfa-config.js new file mode 100644 index 0000000..6771692 --- /dev/null +++ b/ui/mirage/handlers/mfa-config.js @@ -0,0 +1,206 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { Response } from 'miragejs'; + +export default function (server) { + const methods = ['totp', 'duo', 'okta', 'pingid']; + const required = { + totp: ['issuer'], + duo: ['secret_key', 'integration_key', 'api_hostname'], + okta: ['org_name', 'api_token'], + pingid: ['settings_file_base64'], + }; + + const validate = (type, data, cb) => { + if (!methods.includes(type)) { + return new Response(400, {}, { errors: [`Method ${type} not found`] }); + } + if (data) { + const missing = required[type].reduce((params, key) => { + if (!data[key]) { + params.push(key); + } + return params; + }, []); + if (missing.length) { + return new Response(400, {}, { errors: [`Missing required parameters: [${missing.join(', ')}]`] }); + } + } + return cb(); + }; + + const dbKeyFromType = (type) => `mfa${type.charAt(0).toUpperCase()}${type.slice(1)}Methods`; + + const generateListResponse = (schema, isMethod) => { + let records = []; + if (isMethod) { + methods.forEach((method) => { + records.addObjects(schema.db[dbKeyFromType(method)].where({})); + }); + } else { + records = schema.db.mfaLoginEnforcements.where({}); + } + // seed the db with a few records if none exist + if (!records.length) { + if (isMethod) { + methods.forEach((type) => { + records.push(server.create(`mfa-${type}-method`)); + }); + } else { + records = server.createList('mfa-login-enforcement', 4).toArray(); + } + } + const dataKey = isMethod ? 'id' : 'name'; + const data = records.reduce( + (resp, record) => { + resp.key_info[record[dataKey]] = record; + resp.keys.push(record[dataKey]); + return resp; + }, + { + key_info: {}, + keys: [], + } + ); + return { data }; + }; + + // list methods + server.get('/identity/mfa/method/', (schema) => { + return generateListResponse(schema, true); + }); + // fetch method by id + server.get('/identity/mfa/method/:id', (schema, { params: { id } }) => { + let record; + for (const method of methods) { + record = schema.db[dbKeyFromType(method)].find(id); + if (record) { + break; + } + } + // inconvenient when testing edit route to return a 404 on refresh since mirage memory is cleared + // flip this variable to test 404 state if needed + const shouldError = false; + // create a new record so data is always returned + if (!record && !shouldError) { + return { data: server.create('mfa-totp-method') }; + } + return !record ? new Response(404, {}, { errors: [] }) : { data: record }; + }); + // create method + server.post('/identity/mfa/method/:type', (schema, { params: { type }, requestBody }) => { + const data = JSON.parse(requestBody); + return validate(type, data, () => { + const record = server.create(`mfa-${type}-method`, data); + return { data: { method_id: record.id } }; + }); + }); + // update method + server.post('/identity/mfa/method/:type/:id', (schema, { params: { type, id }, requestBody }) => { + const data = JSON.parse(requestBody); + return validate(type, data, () => { + schema.db[dbKeyFromType(type)].update(id, data); + return {}; + }); + }); + // delete method + server.delete('/identity/mfa/method/:type/:id', (schema, { params: { type, id } }) => { + return validate(type, null, () => { + schema.db[dbKeyFromType(type)].remove(id); + return {}; + }); + }); + // list enforcements + server.get('/identity/mfa/login-enforcement', (schema) => { + return generateListResponse(schema); + }); + // fetch enforcement by name + server.get('/identity/mfa/login-enforcement/:name', (schema, { params: { name } }) => { + const record = schema.db.mfaLoginEnforcements.findBy({ name }); + // inconvenient when testing edit route to return a 404 on refresh since mirage memory is cleared + // flip this variable to test 404 state if needed + const shouldError = false; + // create a new record so data is always returned + if (!record && !shouldError) { + return { data: server.create('mfa-login-enforcement', { name }) }; + } + return !record ? new Response(404, {}, { errors: [] }) : { data: record }; + }); + // create/update enforcement + server.post('/identity/mfa/login-enforcement/:name', (schema, { params: { name }, requestBody }) => { + const data = JSON.parse(requestBody); + // at least one method id is required + if (!data.mfa_method_ids?.length) { + return new Response(400, {}, { errors: ['missing method ids'] }); + } + // at least one of the following targets is required + const required = [ + 'auth_method_accessors', + 'auth_method_types', + 'identity_group_ids', + 'identity_entity_ids', + ]; + let hasRequired = false; + for (const key of required) { + if (data[key]?.length) { + hasRequired = true; + break; + } + } + if (!hasRequired) { + return new Response( + 400, + {}, + { + errors: [ + 'One of auth_method_accessors, auth_method_types, identity_group_ids, identity_entity_ids must be specified', + ], + } + ); + } + if (schema.db.mfaLoginEnforcements.findBy({ name })) { + schema.db.mfaLoginEnforcements.update({ name }, data); + } else { + schema.db.mfaLoginEnforcements.insert(data); + } + return { ...data, id: data.name }; + }); + // delete enforcement + server.delete('/identity/mfa/login-enforcement/:name', (schema, { params: { name } }) => { + schema.db.mfaLoginEnforcements.remove({ name }); + return {}; + }); + // endpoints for target selection + server.get('/identity/group/id', () => ({ + data: { + key_info: { '34db6b52-591e-bc22-8af0-4add5e167326': { name: 'test-group' } }, + keys: ['34db6b52-591e-bc22-8af0-4add5e167326'], + }, + })); + server.get('/identity/group/id/:id', () => ({ + data: { + id: '34db6b52-591e-bc22-8af0-4add5e167326', + name: 'test-group', + }, + })); + server.get('/identity/entity/id', () => ({ + data: { + key_info: { 'f831667b-7392-7a1c-c0fc-33d48cb1c57d': { name: 'test-entity' } }, + keys: ['f831667b-7392-7a1c-c0fc-33d48cb1c57d'], + }, + })); + server.get('/identity/entity/id/:id', () => ({ + data: { + id: 'f831667b-7392-7a1c-c0fc-33d48cb1c57d', + name: 'test-entity', + }, + })); + server.get('/sys/auth', () => ({ + data: { + 'userpass/': { accessor: 'auth_userpass_bb95c2b1', type: 'userpass' }, + }, + })); +} diff --git a/ui/mirage/handlers/mfa-login.js b/ui/mirage/handlers/mfa-login.js new file mode 100644 index 0000000..b409e25 --- /dev/null +++ b/ui/mirage/handlers/mfa-login.js @@ -0,0 +1,159 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { Response } from 'miragejs'; +import Ember from 'ember'; +import fetch from 'fetch'; + +// initial auth response cache -- lookup by mfa_request_id key +const authResponses = {}; +// mfa requirement cache -- lookup by mfa_request_id key +const mfaRequirement = {}; + +// may be imported in tests when the validation request needs to be intercepted to make assertions prior to returning a response +// in that case it may be helpful to still use this validation logic to ensure to payload is as expected +export const validationHandler = (schema, req) => { + try { + const { mfa_request_id, mfa_payload } = JSON.parse(req.requestBody); + const mfaRequest = mfaRequirement[mfa_request_id]; + + if (!mfaRequest) { + return new Response(404, {}, { errors: ['MFA Request ID not found'] }); + } + // validate request body + for (const constraintId in mfa_payload) { + // ensure ids were passed in map + const method = mfaRequest.methods.find(({ id }) => id === constraintId); + if (!method) { + return new Response(400, {}, { errors: [`Invalid MFA constraint id ${constraintId} passed in map`] }); + } + // test non-totp validation by rejecting all pingid requests + if (method.type === 'pingid') { + return new Response(403, {}, { errors: ['PingId MFA validation failed'] }); + } + // validate totp passcode + const passcode = mfa_payload[constraintId][0]; + if (method.uses_passcode) { + const expectedPasscode = method.type === 'duo' ? 'passcode=test' : 'test'; + if (passcode !== expectedPasscode) { + const error = + { + used: 'code already used; new code is available in 30 seconds', + limit: + 'maximum TOTP validation attempts 4 exceeded the allowed attempts 3. Please try again in 15 seconds', + }[passcode] || 'failed to validate'; + console.log(error); // eslint-disable-line + return new Response(403, {}, { errors: [error] }); + } + } else if (passcode) { + // for okta and duo, reject if a passcode was provided + return new Response(400, {}, { errors: ['Passcode should only be provided for TOTP MFA type'] }); + } + } + return authResponses[mfa_request_id]; + } catch (error) { + console.log(error); // eslint-disable-line + return new Response(500, {}, { errors: ['Mirage Handler Error: /sys/mfa/validate'] }); + } +}; + +export default function (server) { + // generate different constraint scenarios and return mfa_requirement object + const generateMfaRequirement = (req, res) => { + const { user } = req.params; + // uses_passcode automatically set to true in factory for totp type + const m = (type, uses_passcode = false) => server.create('mfa-method', { type, uses_passcode }); + let mfa_constraints = {}; + let methods = []; // flat array of methods for easy lookup during validation + + function generator() { + const methods = []; + const constraintObj = [...arguments].reduce((obj, methodArray, index) => { + obj[`test_${index}`] = { any: methodArray }; + methods.push(...methodArray); + return obj; + }, {}); + return [constraintObj, methods]; + } + + if (user === 'mfa-a') { + [mfa_constraints, methods] = generator([m('totp')]); // 1 constraint 1 passcode + } else if (user === 'mfa-b') { + [mfa_constraints, methods] = generator([m('okta')]); // 1 constraint 1 non-passcode + } else if (user === 'mfa-c') { + [mfa_constraints, methods] = generator([m('totp'), m('duo', true)]); // 1 constraint 2 passcodes + } else if (user === 'mfa-d') { + [mfa_constraints, methods] = generator([m('okta'), m('duo')]); // 1 constraint 2 non-passcode + } else if (user === 'mfa-e') { + [mfa_constraints, methods] = generator([m('okta'), m('totp')]); // 1 constraint 1 passcode 1 non-passcode + } else if (user === 'mfa-f') { + [mfa_constraints, methods] = generator([m('totp')], [m('duo', true)]); // 2 constraints 1 passcode for each + } else if (user === 'mfa-g') { + [mfa_constraints, methods] = generator([m('okta')], [m('duo')]); // 2 constraints 1 non-passcode for each + } else if (user === 'mfa-h') { + [mfa_constraints, methods] = generator([m('totp')], [m('okta')]); // 2 constraints 1 passcode 1 non-passcode + } else if (user === 'mfa-i') { + [mfa_constraints, methods] = generator([m('okta'), m('totp')], [m('totp')]); // 2 constraints 1 passcode/1 non-passcode 1 non-passcode + } else if (user === 'mfa-j') { + [mfa_constraints, methods] = generator([m('pingid')]); // use to test push failures + } else if (user === 'mfa-k') { + [mfa_constraints, methods] = generator([m('duo', true)]); // test duo passcode and prepending passcode= to user input + } + const mfa_request_id = crypto.randomUUID(); + const mfa_requirement = { + mfa_request_id, + mfa_constraints, + }; + // cache mfa requests to test different validation scenarios + mfaRequirement[mfa_request_id] = { methods }; + // cache auth response to be returned later by sys/mfa/validate + authResponses[mfa_request_id] = { ...res }; + return mfa_requirement; + }; + // passthrough original request, cache response and return mfa stub + const passthroughLogin = async (schema, req) => { + // test totp not configured scenario + if (req.params.user === 'totp-na') { + return new Response(400, {}, { errors: ['TOTP mfa required but not configured'] }); + } + const mock = req.params.user ? req.params.user.includes('mfa') : null; + // bypass mfa for users that do not match type + if (!mock) { + req.passthrough(); + } else if (Ember.testing) { + // use root token in test environment + const res = await fetch('/v1/auth/token/lookup-self', { headers: { 'X-Vault-Token': 'root' } }); + if (res.status < 300) { + const json = res.json(); + if (Ember.testing) { + json.auth = { + ...json.data, + policies: [], + metadata: { username: 'foobar' }, + }; + json.data = null; + } + return { auth: { mfa_requirement: generateMfaRequirement(req, json) } }; + } + return new Response(500, {}, { errors: ['Mirage error fetching root token in testing'] }); + } else { + const xhr = req.passthrough(); + xhr.onreadystatechange = () => { + if (xhr.readyState === 4 && xhr.status < 300) { + // XMLHttpRequest response prop only has a getter -- redefine as writable and set value + Object.defineProperty(xhr, 'response', { + writable: true, + value: JSON.stringify({ + auth: { mfa_requirement: generateMfaRequirement(req, JSON.parse(xhr.responseText)) }, + }), + }); + } + }; + } + }; + server.post('/auth/:method/login/:user', passthroughLogin); + + server.post('/sys/mfa/validate', validationHandler); +} diff --git a/ui/mirage/handlers/oidc-config.js b/ui/mirage/handlers/oidc-config.js new file mode 100644 index 0000000..11c3a3b --- /dev/null +++ b/ui/mirage/handlers/oidc-config.js @@ -0,0 +1,22 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +export default function (server) { + // ENTITY SEARCH SELECT + server.get('/identity/entity/id', () => ({ + data: { + key_info: { '1234-12345': { name: 'test-entity' } }, + keys: ['1234-12345'], + }, + })); + + // GROUP SEARCH SELECT + server.get('/identity/group/id', () => ({ + data: { + key_info: { 'abcdef-123': { name: 'test-group' } }, + keys: ['abcdef-123'], + }, + })); +} diff --git a/ui/mirage/helpers/modify-passthrough-response.js b/ui/mirage/helpers/modify-passthrough-response.js new file mode 100644 index 0000000..c6a794b --- /dev/null +++ b/ui/mirage/helpers/modify-passthrough-response.js @@ -0,0 +1,22 @@ +// passthrough request and modify response from server +// pass object as second arg of properties in response to override +export default function (req, props = {}) { + return new Promise((resolve) => { + const xhr = req.passthrough(); + xhr.onreadystatechange = () => { + if (xhr.readyState === 4) { + if (xhr.status < 300) { + // XMLHttpRequest response prop only has a getter -- redefine as writable and set value + Object.defineProperty(xhr, 'response', { + writable: true, + value: JSON.stringify({ + ...JSON.parse(xhr.responseText), + ...props, + }), + }); + } + resolve(); + } + }; + }); +} diff --git a/ui/mirage/identity-managers/application.js b/ui/mirage/identity-managers/application.js new file mode 100644 index 0000000..a71912f --- /dev/null +++ b/ui/mirage/identity-managers/application.js @@ -0,0 +1,8 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import IdentityManager from 'vault/utils/identity-manager'; +// to more closely match the Vault backend this will return UUIDs as identifiers for records in mirage +export default IdentityManager; diff --git a/ui/mirage/models/feature.js b/ui/mirage/models/feature.js new file mode 100644 index 0000000..cc93d19 --- /dev/null +++ b/ui/mirage/models/feature.js @@ -0,0 +1,10 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { Model } from 'ember-cli-mirage'; + +export default Model.extend({ + feature_flags: null, +}); diff --git a/ui/mirage/scenarios/default.js b/ui/mirage/scenarios/default.js new file mode 100644 index 0000000..cafa0e8 --- /dev/null +++ b/ui/mirage/scenarios/default.js @@ -0,0 +1,17 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import ENV from 'vault/config/environment'; +const { handler } = ENV['ember-cli-mirage']; +import kubernetesScenario from './kubernetes'; + +export default function (server) { + server.create('clients/config'); + server.create('feature', { feature_flags: ['SOME_FLAG', 'VAULT_CLOUD_ADMIN_NAMESPACE'] }); + + if (handler === 'kubernetes') { + kubernetesScenario(server); + } +} diff --git a/ui/mirage/scenarios/kubernetes.js b/ui/mirage/scenarios/kubernetes.js new file mode 100644 index 0000000..bc30ae7 --- /dev/null +++ b/ui/mirage/scenarios/kubernetes.js @@ -0,0 +1,13 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +export default function (server, shouldConfigureRoles = true) { + server.create('kubernetes-config', { path: 'kubernetes' }); + if (shouldConfigureRoles) { + server.create('kubernetes-role'); + server.create('kubernetes-role', 'withRoleName'); + server.create('kubernetes-role', 'withRoleRules'); + } +} diff --git a/ui/mirage/serializers/application.js b/ui/mirage/serializers/application.js new file mode 100644 index 0000000..4620ee5 --- /dev/null +++ b/ui/mirage/serializers/application.js @@ -0,0 +1,12 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { JSONAPISerializer } from 'ember-cli-mirage'; + +export default JSONAPISerializer.extend({ + typeKeyForModel(model) { + return model.modelName; + }, +}); diff --git a/ui/package.json b/ui/package.json new file mode 100644 index 0000000..d678437 --- /dev/null +++ b/ui/package.json @@ -0,0 +1,249 @@ +{ + "name": "vault", + "version": "0.0.0", + "description": "The official UI for Vault by HashiCorp", + "repository": "", + "author": "", + "directories": { + "doc": "doc", + "test": "tests" + }, + "scripts": { + "build": "ember build --environment=production && cp metadata.json ../http/web_ui/metadata.json", + "build:dev": "ember build", + "lint:fix": "npm-run-all --aggregate-output --continue-on-error --parallel lint:*:fix", + "lint:hbs": "ember-template-lint '**/*.hbs'", + "lint:hbs:quiet": "ember-template-lint '**/*.hbs' --quiet", + "lint:hbs:fix": "ember-template-lint . --fix", + "lint:js": "eslint . --cache", + "lint:js:quiet": "eslint . --cache --quiet", + "lint:js:fix": "eslint . --fix", + "fmt": "npm-run-all --aggregate-output --continue-on-error --parallel fmt:*", + "fmt:js": "prettier --config .prettierrc.js --write '{app,tests,config,lib}/**/*.js'", + "fmt:hbs": "prettier --config .prettierrc.js --write '**/*.hbs'", + "fmt:styles": "prettier --write app/styles/**/*.*", + "start": "VAULT_ADDR=http://localhost:8200; ember server --proxy=$VAULT_ADDR", + "start2": "ember server --proxy=http://localhost:8202 --port=4202", + "start:mirage": "start () { MIRAGE_DEV_HANDLER=$1 yarn run start; }; start", + "test": "npm-run-all lint:js:quiet lint:hbs:quiet && node scripts/start-vault.js", + "test:enos": "npm-run-all lint:js:quiet lint:hbs:quiet && node scripts/enos-test-ember.js", + "test:oss": "yarn run test -f='!enterprise'", + "test:quick": "node scripts/start-vault.js", + "test:quick-oss": "yarn test:quick -f='!enterprise'", + "types:declare": "declare () { yarn tsc $1 --declaration --allowJs --emitDeclarationOnly --experimentalDecorators --outDir $2; }; declare", + "vault": "VAULT_REDIRECT_ADDR=http://127.0.0.1:8200 vault server -log-level=error -dev -dev-root-token-id=root -dev-ha -dev-transactional", + "vault:cluster": "VAULT_REDIRECT_ADDR=http://127.0.0.1:8202 vault server -log-level=error -dev -dev-root-token-id=root -dev-listen-address=127.0.0.1:8202 -dev-ha -dev-transactional" + }, + "lint-staged": { + "*.js": [ + "prettier --config .prettierrc.js --write", + "eslint --quiet", + "git add" + ], + "*.hbs": [ + "prettier --config .prettierrc.js --write", + "ember-template-lint --quiet", + "git add" + ], + "*.scss": [ + "prettier --write", + "git add" + ] + }, + "devDependencies": { + "@babel/plugin-proposal-object-rest-spread": "^7.12.1", + "@babel/plugin-transform-block-scoping": "^7.12.1", + "@ember/legacy-built-in-components": "^0.4.1", + "@ember/optional-features": "^2.0.0", + "@ember/render-modifiers": "^1.0.2", + "@ember/test-helpers": "2.8.1", + "@ember/test-waiters": "^3.0.0", + "@glimmer/component": "^1.1.2", + "@glimmer/tracking": "^1.1.2", + "@hashicorp/ember-flight-icons": "^3.0.2", + "@hashicorp/structure-icons": "^1.3.0", + "@icholy/duration": "^5.1.0", + "@tsconfig/ember": "^1.0.1", + "@types/ember": "^4.0.2", + "@types/ember-data": "^4.4.6", + "@types/ember-data__adapter": "^4.0.1", + "@types/ember-data__model": "^4.0.0", + "@types/ember-data__serializer": "^4.0.1", + "@types/ember-data__store": "^4.0.2", + "@types/ember-qunit": "^5.0.2", + "@types/ember-resolver": "^5.0.13", + "@types/ember__application": "^4.0.4", + "@types/ember__array": "^4.0.3", + "@types/ember__component": "^4.0.11", + "@types/ember__controller": "^4.0.3", + "@types/ember__debug": "^4.0.3", + "@types/ember__destroyable": "^4.0.1", + "@types/ember__engine": "^4.0.4", + "@types/ember__error": "^4.0.1", + "@types/ember__object": "^4.0.5", + "@types/ember__polyfills": "^4.0.1", + "@types/ember__routing": "^4.0.12", + "@types/ember__runloop": "^4.0.2", + "@types/ember__service": "^4.0.1", + "@types/ember__string": "^3.0.10", + "@types/ember__template": "^4.0.1", + "@types/ember__test": "^4.0.1", + "@types/ember__test-helpers": "^2.8.2", + "@types/ember__utils": "^4.0.2", + "@types/qunit": "^2.19.3", + "@types/rsvp": "^4.0.4", + "@types/shell-quote": "^1.7.1", + "@typescript-eslint/eslint-plugin": "^5.19.0", + "@typescript-eslint/parser": "^5.19.0", + "asn1js": "^2.2.0", + "autosize": "^4.0.0", + "babel-eslint": "^10.1.0", + "babel-plugin-inline-json-import": "^0.3.2", + "base64-js": "^1.3.1", + "broccoli-asset-rev": "^3.0.0", + "broccoli-sri-hash": "meirish/broccoli-sri-hash#rooturl", + "codemirror": "^5.58.2", + "columnify": "^1.5.4", + "d3-axis": "^1.0.8", + "d3-ease": "^1.0.5", + "d3-scale": "^1.0.7", + "d3-selection": "^1.3.0", + "d3-time-format": "^2.1.1", + "d3-tip": "^0.9.1", + "d3-transition": "^1.2.0", + "date-fns": "^2.16.1", + "date-fns-tz": "^1.2.2", + "deepmerge": "^4.0.0", + "doctoc": "^2.2.0", + "dompurify": "^3.0.2", + "ember-auto-import": "2.4.2", + "ember-basic-dropdown": "6.0.1", + "ember-cli": "~4.4.0", + "ember-cli-autoprefixer": "^0.8.1", + "ember-cli-babel": "^7.26.11", + "ember-cli-clipboard": "0.16.0", + "ember-cli-content-security-policy": "2.0.3", + "ember-cli-dependency-checker": "^3.3.1", + "ember-cli-deprecation-workflow": "^2.1.0", + "ember-cli-flash": "4.0.0", + "ember-cli-htmlbars": "^6.0.1", + "ember-cli-inject-live-reload": "^2.1.0", + "ember-cli-mirage": "2.4.0", + "ember-cli-page-object": "1.17.10", + "ember-cli-sass": "11.0.1", + "ember-cli-sri": "meirish/ember-cli-sri#rooturl", + "ember-cli-string-helpers": "6.1.0", + "ember-cli-terser": "^4.0.2", + "ember-cli-typescript": "^5.2.1", + "ember-composable-helpers": "5.0.0", + "ember-concurrency": "2.3.4", + "ember-copy": "2.0.1", + "ember-d3": "^0.5.1", + "ember-data": "~4.5.0", + "ember-engines": "0.8.23", + "ember-export-application-global": "^2.0.1", + "ember-fetch": "^8.1.1", + "ember-inflector": "4.0.2", + "ember-load-initializers": "^2.1.2", + "ember-maybe-in-element": "^2.0.3", + "ember-modal-dialog": "^4.0.1", + "ember-modifier": "^3.1.0", + "ember-page-title": "^7.0.0", + "ember-power-select": "6.0.1", + "ember-qrcode-shim": "^0.4.0", + "ember-qunit": "6.0.0", + "ember-resolver": "^8.0.3", + "ember-responsive": "5.0.0", + "ember-router-helpers": "^0.4.0", + "ember-service-worker": "meirish/ember-service-worker#configurable-scope", + "ember-sinon": "^4.0.0", + "ember-source": "4.4.4", + "ember-svg-jar": "2.4.0", + "ember-template-lint": "4.8.0", + "ember-template-lint-plugin-prettier": "4.0.0", + "ember-test-selectors": "6.0.0", + "ember-tether": "^2.0.1", + "ember-truth-helpers": "3.0.0", + "ember-wormhole": "0.6.0", + "escape-string-regexp": "^2.0.0", + "eslint": "^7.32.0", + "eslint-config-prettier": "^8.5.0", + "eslint-plugin-compat": "4.0.2", + "eslint-plugin-ember": "^10.6.1", + "eslint-plugin-node": "^11.1.0", + "eslint-plugin-prettier": "^4.0.0", + "eslint-plugin-qunit": "^7.2.0", + "filesize": "^4.2.1", + "flat": "^6.0.1", + "jsondiffpatch": "^0.4.1", + "jsonlint": "^1.6.3", + "lint-staged": "^10.5.1", + "loader.js": "^4.7.0", + "normalize.css": "4.1.1", + "npm-run-all": "^4.1.5", + "pkijs": "^2.2.2", + "pretender": "^3.4.3", + "prettier": "2.6.2", + "prettier-eslint-cli": "^7.1.0", + "pvutils": "^1.0.17", + "qunit": "^2.19.1", + "qunit-dom": "^2.0.0", + "sass": "^1.58.3", + "sass-svg-uri": "^1.0.0", + "shell-quote": "^1.8.1", + "string.prototype.endswith": "^0.2.0", + "string.prototype.startswith": "^0.2.0", + "swagger-ui-dist": "^5.9.0", + "text-encoder-lite": "2.0.0", + "typescript": "^4.8.4", + "walk-sync": "^2.0.2", + "webpack": "5.73.0", + "xstate": "^3.3.3" + }, + "resolutions": { + "cryptiles": "^4.1.2", + "eslint-utils": "^1.4.1", + "ember-basic-dropdown": "6.0.1", + "growl": "^1.10.0", + "highlight.js": "^10.4.1", + "https-proxy-agent": "^2.2.3", + "ini": "^1.3.6", + "kind-of": "^6.0.3", + "minimatch": "^3.0.2", + "node-notifier": "^8.0.1", + "prismjs": "^1.21.0", + "qs": "^6.3.0", + "serialize-javascript": "^3.1.0", + "underscore": "^1.12.1", + "trim": "^0.0.3", + "xmlhttprequest-ssl": "^1.6.2" + }, + "engines": { + "node": "16" + }, + "ember": { + "edition": "octane" + }, + "private": true, + "ember-addon": { + "paths": [ + "lib/core", + "lib/css", + "lib/keep-gitkeep", + "lib/kmip", + "lib/kubernetes", + "lib/open-api-explorer", + "lib/pki", + "lib/replication", + "lib/service-worker-authenticated-download" + ] + }, + "dependencies": { + "@hashicorp/design-system-components": "2.3.1", + "handlebars": "4.7.7", + "highlight.js": "^10.4.1", + "node-notifier": "^8.0.1", + "uuid": "^9.0.0" + }, + "packageManager": "yarn@3.5.0" +} diff --git a/ui/public/android-sync.svg b/ui/public/android-sync.svg new file mode 100644 index 0000000..251025e --- /dev/null +++ b/ui/public/android-sync.svg @@ -0,0 +1,3 @@ + + + diff --git a/ui/public/console.svg b/ui/public/console.svg new file mode 100644 index 0000000..9c43c0c --- /dev/null +++ b/ui/public/console.svg @@ -0,0 +1,3 @@ + + + diff --git a/ui/public/duo.svg b/ui/public/duo.svg new file mode 100644 index 0000000..72a97e5 --- /dev/null +++ b/ui/public/duo.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/ui/public/eco/ad.svg b/ui/public/eco/ad.svg new file mode 100644 index 0000000..3c60f88 --- /dev/null +++ b/ui/public/eco/ad.svg @@ -0,0 +1 @@ + diff --git a/ui/public/eco/alicloud.svg b/ui/public/eco/alicloud.svg new file mode 100644 index 0000000..fa908d2 --- /dev/null +++ b/ui/public/eco/alicloud.svg @@ -0,0 +1 @@ + diff --git a/ui/public/eco/approle.svg b/ui/public/eco/approle.svg new file mode 100644 index 0000000..751811c --- /dev/null +++ b/ui/public/eco/approle.svg @@ -0,0 +1 @@ + diff --git a/ui/public/eco/auth.svg b/ui/public/eco/auth.svg new file mode 100644 index 0000000..0890a58 --- /dev/null +++ b/ui/public/eco/auth.svg @@ -0,0 +1 @@ + diff --git a/ui/public/eco/aws.svg b/ui/public/eco/aws.svg new file mode 100644 index 0000000..2b0b449 --- /dev/null +++ b/ui/public/eco/aws.svg @@ -0,0 +1 @@ + diff --git a/ui/public/eco/azure.svg b/ui/public/eco/azure.svg new file mode 100644 index 0000000..3f1479c --- /dev/null +++ b/ui/public/eco/azure.svg @@ -0,0 +1 @@ + diff --git a/ui/public/eco/cert.svg b/ui/public/eco/cert.svg new file mode 100644 index 0000000..f4562c2 --- /dev/null +++ b/ui/public/eco/cert.svg @@ -0,0 +1 @@ + diff --git a/ui/public/eco/consul.svg b/ui/public/eco/consul.svg new file mode 100644 index 0000000..fda270c --- /dev/null +++ b/ui/public/eco/consul.svg @@ -0,0 +1 @@ + diff --git a/ui/public/eco/cubbyhole.svg b/ui/public/eco/cubbyhole.svg new file mode 100644 index 0000000..e95f1b6 --- /dev/null +++ b/ui/public/eco/cubbyhole.svg @@ -0,0 +1 @@ + diff --git a/ui/public/eco/database.svg b/ui/public/eco/database.svg new file mode 100644 index 0000000..110f2f1 --- /dev/null +++ b/ui/public/eco/database.svg @@ -0,0 +1 @@ + diff --git a/ui/public/eco/gcp.svg b/ui/public/eco/gcp.svg new file mode 100644 index 0000000..44dd17b --- /dev/null +++ b/ui/public/eco/gcp.svg @@ -0,0 +1 @@ + diff --git a/ui/public/eco/gcpkms.svg b/ui/public/eco/gcpkms.svg new file mode 100644 index 0000000..44dd17b --- /dev/null +++ b/ui/public/eco/gcpkms.svg @@ -0,0 +1 @@ + diff --git a/ui/public/eco/generic.svg b/ui/public/eco/generic.svg new file mode 100644 index 0000000..39af7a0 --- /dev/null +++ b/ui/public/eco/generic.svg @@ -0,0 +1 @@ + diff --git a/ui/public/eco/github.svg b/ui/public/eco/github.svg new file mode 100644 index 0000000..fdd4893 --- /dev/null +++ b/ui/public/eco/github.svg @@ -0,0 +1 @@ + diff --git a/ui/public/eco/jwt.svg b/ui/public/eco/jwt.svg new file mode 100644 index 0000000..0890a58 --- /dev/null +++ b/ui/public/eco/jwt.svg @@ -0,0 +1 @@ + diff --git a/ui/public/eco/kmip.svg b/ui/public/eco/kmip.svg new file mode 100644 index 0000000..e95f1b6 --- /dev/null +++ b/ui/public/eco/kmip.svg @@ -0,0 +1 @@ + diff --git a/ui/public/eco/kubernetes.svg b/ui/public/eco/kubernetes.svg new file mode 100644 index 0000000..b75d726 --- /dev/null +++ b/ui/public/eco/kubernetes.svg @@ -0,0 +1 @@ + diff --git a/ui/public/eco/kv.svg b/ui/public/eco/kv.svg new file mode 100644 index 0000000..39af7a0 --- /dev/null +++ b/ui/public/eco/kv.svg @@ -0,0 +1 @@ + diff --git a/ui/public/eco/ldap.svg b/ui/public/eco/ldap.svg new file mode 100644 index 0000000..0890a58 --- /dev/null +++ b/ui/public/eco/ldap.svg @@ -0,0 +1 @@ + diff --git a/ui/public/eco/nomad.svg b/ui/public/eco/nomad.svg new file mode 100644 index 0000000..8b5efff --- /dev/null +++ b/ui/public/eco/nomad.svg @@ -0,0 +1 @@ + diff --git a/ui/public/eco/oidc.svg b/ui/public/eco/oidc.svg new file mode 100644 index 0000000..0890a58 --- /dev/null +++ b/ui/public/eco/oidc.svg @@ -0,0 +1 @@ + diff --git a/ui/public/eco/okta.svg b/ui/public/eco/okta.svg new file mode 100644 index 0000000..16d288e --- /dev/null +++ b/ui/public/eco/okta.svg @@ -0,0 +1 @@ + diff --git a/ui/public/eco/pki.svg b/ui/public/eco/pki.svg new file mode 100644 index 0000000..5faba68 --- /dev/null +++ b/ui/public/eco/pki.svg @@ -0,0 +1 @@ + diff --git a/ui/public/eco/rabbitmq.svg b/ui/public/eco/rabbitmq.svg new file mode 100644 index 0000000..f4f2712 --- /dev/null +++ b/ui/public/eco/rabbitmq.svg @@ -0,0 +1 @@ + diff --git a/ui/public/eco/radius.svg b/ui/public/eco/radius.svg new file mode 100644 index 0000000..0890a58 --- /dev/null +++ b/ui/public/eco/radius.svg @@ -0,0 +1 @@ + diff --git a/ui/public/eco/secrets.svg b/ui/public/eco/secrets.svg new file mode 100644 index 0000000..e95f1b6 --- /dev/null +++ b/ui/public/eco/secrets.svg @@ -0,0 +1 @@ + diff --git a/ui/public/eco/ssh.svg b/ui/public/eco/ssh.svg new file mode 100644 index 0000000..30e7261 --- /dev/null +++ b/ui/public/eco/ssh.svg @@ -0,0 +1 @@ + diff --git a/ui/public/eco/token.svg b/ui/public/eco/token.svg new file mode 100644 index 0000000..6f8be4c --- /dev/null +++ b/ui/public/eco/token.svg @@ -0,0 +1 @@ + diff --git a/ui/public/eco/totp.svg b/ui/public/eco/totp.svg new file mode 100644 index 0000000..18996f7 --- /dev/null +++ b/ui/public/eco/totp.svg @@ -0,0 +1 @@ + diff --git a/ui/public/eco/transform.svg b/ui/public/eco/transform.svg new file mode 100644 index 0000000..c7a59bb --- /dev/null +++ b/ui/public/eco/transform.svg @@ -0,0 +1,16 @@ + + + + + + + + + + + + + + + + diff --git a/ui/public/eco/transit.svg b/ui/public/eco/transit.svg new file mode 100644 index 0000000..22c2801 --- /dev/null +++ b/ui/public/eco/transit.svg @@ -0,0 +1 @@ + diff --git a/ui/public/eco/userpass.svg b/ui/public/eco/userpass.svg new file mode 100644 index 0000000..22d10ca --- /dev/null +++ b/ui/public/eco/userpass.svg @@ -0,0 +1 @@ + diff --git a/ui/public/favicon.png b/ui/public/favicon.png new file mode 100644 index 0000000..e4f3428 Binary files /dev/null and b/ui/public/favicon.png differ diff --git a/ui/public/file-error.svg b/ui/public/file-error.svg new file mode 100644 index 0000000..97f8e13 --- /dev/null +++ b/ui/public/file-error.svg @@ -0,0 +1,3 @@ + + + diff --git a/ui/public/file-success.svg b/ui/public/file-success.svg new file mode 100644 index 0000000..779273d --- /dev/null +++ b/ui/public/file-success.svg @@ -0,0 +1,3 @@ + + + diff --git a/ui/public/fonts/obscure.woff b/ui/public/fonts/obscure.woff new file mode 100644 index 0000000..e714220 Binary files /dev/null and b/ui/public/fonts/obscure.woff differ diff --git a/ui/public/fonts/obscure.woff2 b/ui/public/fonts/obscure.woff2 new file mode 100644 index 0000000..c2e583a Binary files /dev/null and b/ui/public/fonts/obscure.woff2 differ diff --git a/ui/public/fonts/text-security-square.woff2 b/ui/public/fonts/text-security-square.woff2 new file mode 100644 index 0000000..dcf8069 Binary files /dev/null and b/ui/public/fonts/text-security-square.woff2 differ diff --git a/ui/public/hashicorp.svg b/ui/public/hashicorp.svg new file mode 100644 index 0000000..1553261 --- /dev/null +++ b/ui/public/hashicorp.svg @@ -0,0 +1,3 @@ + diff --git a/ui/public/images/mfa-landing.png b/ui/public/images/mfa-landing.png new file mode 100644 index 0000000..7848f72 Binary files /dev/null and b/ui/public/images/mfa-landing.png differ diff --git a/ui/public/images/oidc-landing.png b/ui/public/images/oidc-landing.png new file mode 100644 index 0000000..0534a10 Binary files /dev/null and b/ui/public/images/oidc-landing.png differ diff --git a/ui/public/images/pki-cross-sign.png b/ui/public/images/pki-cross-sign.png new file mode 100644 index 0000000..7016466 Binary files /dev/null and b/ui/public/images/pki-cross-sign.png differ diff --git a/ui/public/images/pki-rotate-root.png b/ui/public/images/pki-rotate-root.png new file mode 100644 index 0000000..819e0f9 Binary files /dev/null and b/ui/public/images/pki-rotate-root.png differ diff --git a/ui/public/images/pki-tidy.png b/ui/public/images/pki-tidy.png new file mode 100644 index 0000000..320e905 Binary files /dev/null and b/ui/public/images/pki-tidy.png differ diff --git a/ui/public/initialize.svg b/ui/public/initialize.svg new file mode 100644 index 0000000..d8cc987 --- /dev/null +++ b/ui/public/initialize.svg @@ -0,0 +1,963 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ui/public/loop.svg b/ui/public/loop.svg new file mode 100644 index 0000000..290bbb7 --- /dev/null +++ b/ui/public/loop.svg @@ -0,0 +1,3 @@ + diff --git a/ui/public/okta.svg b/ui/public/okta.svg new file mode 100644 index 0000000..6b6e890 --- /dev/null +++ b/ui/public/okta.svg @@ -0,0 +1,3 @@ + + + diff --git a/ui/public/perf-replication.svg b/ui/public/perf-replication.svg new file mode 100644 index 0000000..e673092 --- /dev/null +++ b/ui/public/perf-replication.svg @@ -0,0 +1,6 @@ + + + + + + diff --git a/ui/public/pingid.svg b/ui/public/pingid.svg new file mode 100644 index 0000000..99b33fe --- /dev/null +++ b/ui/public/pingid.svg @@ -0,0 +1,11 @@ + + + + + + + + + + + diff --git a/ui/public/replication.svg b/ui/public/replication.svg new file mode 100644 index 0000000..3d9fc03 --- /dev/null +++ b/ui/public/replication.svg @@ -0,0 +1,3 @@ + diff --git a/ui/public/reply.svg b/ui/public/reply.svg new file mode 100644 index 0000000..dcdf541 --- /dev/null +++ b/ui/public/reply.svg @@ -0,0 +1,3 @@ + diff --git a/ui/public/robots.txt b/ui/public/robots.txt new file mode 100644 index 0000000..217b42f --- /dev/null +++ b/ui/public/robots.txt @@ -0,0 +1,3 @@ +# http://www.robotstxt.org +User-agent: * +Disallow: / diff --git a/ui/public/stopwatch.svg b/ui/public/stopwatch.svg new file mode 100644 index 0000000..10d4b40 --- /dev/null +++ b/ui/public/stopwatch.svg @@ -0,0 +1,3 @@ + + + diff --git a/ui/public/tour.svg b/ui/public/tour.svg new file mode 100644 index 0000000..e864d78 --- /dev/null +++ b/ui/public/tour.svg @@ -0,0 +1,4 @@ + + + + diff --git a/ui/public/vault-logo.svg b/ui/public/vault-logo.svg new file mode 100644 index 0000000..6a7ec53 --- /dev/null +++ b/ui/public/vault-logo.svg @@ -0,0 +1,3 @@ + diff --git a/ui/scripts/codemods/README.md b/ui/scripts/codemods/README.md new file mode 100644 index 0000000..7e6d24f --- /dev/null +++ b/ui/scripts/codemods/README.md @@ -0,0 +1,14 @@ +# Running Codemods + +The handlebars codemods use [ember-template-recast](https://github.com/ember-template-lint/ember-template-recast) and can be run with the following: + +- navigate to the UI directory of the Vault project +- execute `npx ember-template-recast "**/*.hbs" -t ./path/to/transform-file.js` + +This will run the transform on all .hbs files within the ui directory which covers the app and all addons. +The terminal will output the number of files processed as well as the number of changed, unchanged, skipped and errored files. +It's a good idea to validate the output to ensure that the intended transforms have taken place. +If there are issues with some of the files, simply revert the changes via git, tweak the codemod and run again. + +## Example +`npx ember-template-recast "**/*.hbs" -t ./scripts/codemods/no-quoteless-attributes.js` \ No newline at end of file diff --git a/ui/scripts/codemods/dropdown-transform.js b/ui/scripts/codemods/dropdown-transform.js new file mode 100644 index 0000000..43715da --- /dev/null +++ b/ui/scripts/codemods/dropdown-transform.js @@ -0,0 +1,44 @@ +#!/usr/bin/env node +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +/* eslint-env node */ + +/** + * Codemod to transform BasicDropdown and Tooltip trigger and content components + * As of version 2 of ember-basic-dropdown the yielded component names are now capitalized + * In addition, splattributes are used and class must be passed as an attribute rather than argument + */ + +module.exports = () => { + return { + ElementNode(node) { + // ensure we have the right parent node by first looking for BasicDropdown or ToolTip nodes + if (['BasicDropdown', 'ToolTip'].includes(node.tag)) { + node.children.forEach((child) => { + // capitalize trigger and content and transform attributes + if (child.type === 'ElementNode' && child.tag.match(/\.(content|trigger)/gi)) { + const { tag } = child; + const char = tag.charAt(tag.indexOf('.') + 1); + child.tag = tag.replace(char, char.toUpperCase()); + // remove @ symbol from class and change @tagName to @htmlTag + // Content component does not use splattributes -- apply class with @defaultClass arg + child.attributes.forEach((attr) => { + if (attr.name.includes('class')) { + if (child.tag.includes('Content')) { + attr.name = '@defaultClass'; + } else if (attr.name === '@class') { + attr.name = 'class'; + } + } else if (attr.name.includes('tagName')) { + attr.name = '@htmlTag'; + } + }); + } + }); + } + }, + }; +}; diff --git a/ui/scripts/codemods/icon-transform.js b/ui/scripts/codemods/icon-transform.js new file mode 100644 index 0000000..4c1b308 --- /dev/null +++ b/ui/scripts/codemods/icon-transform.js @@ -0,0 +1,75 @@ +#!/usr/bin/env node +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +/* eslint-env node */ +/** + * Codemod to transform Icon component to new API to accomodate FlightIcon + * example execution from ui directory -> npx ember-template-recast ./templates -t ./scripts/codemods/icon-transform.js + * above will run transform on all files in templates directory + */ + +module.exports = (env) => { + const { + syntax: { builders }, + } = env; + const hsSizes = ['s', 'm', 'l', 'xlm', 'xl', 'xxl']; + + // find attribute by name + const findAttribute = (attrs, name) => { + for (let i = 0; i < attrs.length; i++) { + if (attrs[i].name === name) { + return [attrs[i], i, attrs[i].value.chars]; + } + } + return []; + }; + + // possibly a bug with ember-template-recast for multi line components with attributes on their own lines + // when removing an attribute the one on the line below will jump to the same line as the previous one + // this does not happen when removing the first attribute -- doing so may add unnecessary quotes to the first shifted attribute + // example: class="{{foo}}" -> class=""{{foo}}"" + const preserveFormatting = (attributes, removeIndex) => { + if (removeIndex > 0) { + // shift the location of the attributes that appear after the one being removed to preserve formatting + for (let i = attributes.length - 1; i > removeIndex; i--) { + attributes[i].loc = attributes[i - 1].loc; + } + } + }; + + // transform structure icon size letter to flight icon supported size + const transformSize = (attributes, attrName) => { + const [attr, attrIndex, value] = findAttribute(attributes, attrName); + + if (hsSizes.includes(value)) { + if (['s', 'm', 'l'].includes(value)) { + // before removing attribute set the location of the remaining attributes + preserveFormatting(attributes, attrIndex); + // since 16 is the default in the component we can remove the attribute + attributes.splice(attrIndex, 1); + } else { + attr.value = builders.text('24'); + // rename attribute + if (attrName === '@sizeClass') { + attr.name = '@size'; + } + } + } + }; + + return { + ElementNode(node) { + if (node.tag === 'Icon') { + const { attributes } = node; + // the inital refactor of the component introduced a sizeClass attribute + // this can now be mapped to size and removed + transformSize(attributes, '@sizeClass'); + // check for old component instances that may still have a letter for size value + transformSize(attributes, '@size'); + } + }, + }; +}; diff --git a/ui/scripts/codemods/inject-service.js b/ui/scripts/codemods/inject-service.js new file mode 100644 index 0000000..206492e --- /dev/null +++ b/ui/scripts/codemods/inject-service.js @@ -0,0 +1,149 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import babylonParser from './jscodeshift-babylon-parser'; + +// use babylon parser with decorators-legacy plugin +export const parser = babylonParser; + +// checks for access of specified service on this +// injects service if not present and imports inject as service if needed +// example usage - npx jscodeshift -t ./scripts/codemods/inject-service.js ./app/**/*.js --service=store +// --service arg is required with name of service +// pass -d for dry run (no files transformed) + +export default function transformer({ source }, api, { service }) { + if (!service) { + throw new Error('Missing service arg. Pass --service=store for example to script'); + } + const j = api.jscodeshift; + const filterForService = (path) => { + return j(path.value).find(j.MemberExpression, { + object: { + type: 'ThisExpression', + }, + property: { + name: service, + }, + }).length; + }; + const recastOptions = { + reuseWhitespace: false, + wrapColumn: 110, + quote: 'single', + trailingComma: true, + }; + let didInjectService = false; + + // find class bodies and filter down to ones that access service + const classesAccessingService = j(source).find(j.ClassBody).filter(filterForService); + + if (classesAccessingService.length) { + // filter down to class bodies where service is not injected + const missingService = classesAccessingService.filter((path) => { + return !j(path.value) + .find(j.ClassProperty, { + key: { + name: service, + }, + }) + .filter((path) => { + // ensure service property belongs to service decorator + return path.value.decorators.find((path) => path.expression.name === 'service'); + }).length; + }); + + if (missingService.length) { + // inject service + const serviceInjection = j.classProperty(j.identifier(`@service ${service}`), null); + // adding a decorator this way will force injection down to a new line and then add a new line + // leaving in just in case it's needed + // serviceInjection.decorators = [j.decorator(j.identifier('service'))]; + + source = missingService + .forEach((path) => { + path.value.body.unshift(serviceInjection); + }) + .toSource(); + + didInjectService = true; + } + } + + // find .extend object expressions and filter down to ones that access this[service] + const objectsAccessingService = j(source) + .find(j.CallExpression, { + callee: { + type: 'MemberExpression', + property: { + name: 'extend', + }, + }, + }) + .filter(filterForService) + .find(j.ObjectExpression) + .filter((path) => { + // filter object expressions that belong to .extend + // otherwise service will also be injected in actions: { } block of component for example + const callee = path.parent.value.callee; + return callee && callee.property?.name === 'extend'; + }); + + if (objectsAccessingService.length) { + // filter down to objects where service is not injected + const missingService = objectsAccessingService.filter((path) => { + return !j(path.value).find(j.ObjectProperty, { + key: { + name: service, + }, + value: { + callee: { + name: 'service', + }, + }, + }).length; + }); + + if (missingService.length) { + // inject service + const serviceInjection = j.objectProperty( + j.identifier(service), + j.callExpression(j.identifier('service'), []) + ); + + source = missingService + .forEach((path) => { + path.value.properties.unshift(serviceInjection); + }) + .toSource(recastOptions); + + didInjectService = true; + } + } + + // if service was injected here check if inject has been imported + if (didInjectService) { + const needsImport = !j(source).find(j.ImportSpecifier, { + imported: { + name: 'inject', + }, + }).length; + + if (needsImport) { + const injectionImport = j.importDeclaration( + [j.importSpecifier(j.identifier('inject'), j.identifier('service'))], + j.literal('@ember/service') + ); + + const imports = j(source).find(j.ImportDeclaration); + source = imports + .at(imports.length - 1) + .insertAfter(injectionImport) + .toSource(recastOptions); + } + } + + return source; +} diff --git a/ui/scripts/codemods/jscodeshift-babylon-parser.js b/ui/scripts/codemods/jscodeshift-babylon-parser.js new file mode 100644 index 0000000..42f3b5e --- /dev/null +++ b/ui/scripts/codemods/jscodeshift-babylon-parser.js @@ -0,0 +1,51 @@ +#!/usr/bin/env node +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +/* eslint-env node */ + +// Read more: https://github.com/facebook/jscodeshift#parser + +const babylon = require('@babel/parser'); + +const parserConfig = { + sourceType: 'module', + allowImportExportEverywhere: true, + allowReturnOutsideFunction: true, + startLine: 1, + tokens: true, + plugins: [ + ['flow', { all: true }], + 'flowComments', + 'jsx', + 'asyncGenerators', + 'bigInt', + 'classProperties', + 'classPrivateProperties', + 'classPrivateMethods', + 'decorators-legacy', // allows decorator to come before export statement + 'doExpressions', + 'dynamicImport', + 'exportDefaultFrom', + 'exportNamespaceFrom', + 'functionBind', + 'functionSent', + 'importMeta', + 'logicalAssignment', + 'nullishCoalescingOperator', + 'numericSeparator', + 'objectRestSpread', + 'optionalCatchBinding', + 'optionalChaining', + ['pipelineOperator', { proposal: 'minimal' }], + 'throwExpressions', + ], +}; + +export default { + parse: function (source) { + return babylon.parse(source, parserConfig); + }, +}; diff --git a/ui/scripts/codemods/linkto-with-on-modifier.js b/ui/scripts/codemods/linkto-with-on-modifier.js new file mode 100644 index 0000000..dc8c4e9 --- /dev/null +++ b/ui/scripts/codemods/linkto-with-on-modifier.js @@ -0,0 +1,25 @@ +#!/usr/bin/env node +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +/* eslint-env node */ + +// print to console all files that include LinkTo elements using the {{on modifier}} +module.exports = (env) => { + let fileAlerted; + return { + ElementNode(node) { + if (node.tag === 'LinkTo') { + if (!fileAlerted) { + const usesModifier = node.modifiers.find((modifier) => modifier.path.original === 'on'); + if (usesModifier) { + console.log(env.filePath); // eslint-disable-line + fileAlerted = true; + } + } + } + }, + }; +}; diff --git a/ui/scripts/codemods/no-quoteless-attributes.js b/ui/scripts/codemods/no-quoteless-attributes.js new file mode 100644 index 0000000..1e28a8b --- /dev/null +++ b/ui/scripts/codemods/no-quoteless-attributes.js @@ -0,0 +1,29 @@ +#!/usr/bin/env node +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +/* eslint-env node */ + +/** + * Codemod to convert quoteless attribute or argument to mustache statement + * eg. data-test-foo=true -> data-test-foo={{true}} + * eg @isVisible=true -> @isVisible={{true}} + */ + +module.exports = (env) => { + const { builders } = env.syntax; + return { + ElementNode({ attributes }) { + let i = 0; + while (i < attributes.length) { + const { type, chars } = attributes[i].value; + if (type === 'TextNode' && chars && !attributes[i].quoteType) { + attributes[i].value = builders.mustache(builders.path(attributes[i].value.chars)); + } + i++; + } + }, + }; +}; diff --git a/ui/scripts/codemods/transform-deprecated-args.js b/ui/scripts/codemods/transform-deprecated-args.js new file mode 100644 index 0000000..352cced --- /dev/null +++ b/ui/scripts/codemods/transform-deprecated-args.js @@ -0,0 +1,58 @@ +#!/usr/bin/env node +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +/* eslint-env node */ +/** + * Codemod to convert args to attributes for Input and TextArea built in components + * eg. -> + */ + +module.exports = () => { + // partial list of deprecated arguments + // complete list used by linter found at: + // https://github.com/ember-template-lint/ember-template-lint/blob/master/lib/rules/no-unknown-arguments-for-builtin-components.js + const deprecatedArgs = [ + '@id', + '@name', + '@autocomplete', + '@spellcheck', + '@disabled', // not deprecated for LinkTo + '@class', + '@placeholder', + '@wrap', + '@rows', + '@readonly', + '@step', + '@min', + '@pattern', + ]; + return { + ElementNode(node) { + if (['Textarea', 'Input', 'LinkTo', 'ToolbarSecretLink', 'SecretLink'].includes(node.tag)) { + const attrs = node.attributes; + let i = 0; + while (i < attrs.length) { + // LinkTo uses disabled as named arg + // ensure that it is not present as attribute since link will still work + // since ToolbarSecretLink wraps SecretLink and SecretLink wraps LinkTo include them as well + const disabledAsArg = ['LinkTo', 'SecretLink', 'ToolbarSecretLink'].includes(node.tag); + if (disabledAsArg && attrs[i].name === 'disabled') { + attrs[i].name = '@disabled'; + } + const arg = deprecatedArgs.find((name) => { + return node.tag.includes('SecretLink') || (node.tag === 'LinkTo' && name === '@disabled') + ? false + : name === attrs[i].name; + }); + if (arg) { + attrs[i].name = arg.slice(1); + } + i++; + } + } + }, + }; +}; diff --git a/ui/scripts/enos-test-ember.js b/ui/scripts/enos-test-ember.js new file mode 100755 index 0000000..1965c3c --- /dev/null +++ b/ui/scripts/enos-test-ember.js @@ -0,0 +1,64 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +/* eslint-env node */ +/* eslint-disable no-console */ + +const testHelper = require('./test-helper'); + +(async function () { + try { + let unsealKeys = process.env.VAULT_UNSEAL_KEYS; + if (!unsealKeys) { + console.error( + 'Cannot run ember tests without unseal keys, please make sure to export the keys, in an env ' + + 'var named: VAULT_UNSEAL_KEYS' + ); + process.exit(1); + } else { + unsealKeys = JSON.parse(unsealKeys); + } + + const rootToken = process.env.VAULT_TOKEN; + if (!rootToken) { + console.error( + 'Cannot run ember tests without root token, please make sure to export the root token, in an env ' + + 'var named: VAULT_TOKEN' + ); + process.exit(1); + } + + testHelper.writeKeysFile(unsealKeys, rootToken); + } catch (error) { + console.log(error); + process.exit(1); + } + + const vaultAddr = process.env.VAULT_ADDR; + if (!vaultAddr) { + console.error( + 'Cannot run ember tests without the Vault Address, please make sure to export the vault address, in an env ' + + 'var named: VAULT_ADDR' + ); + process.exit(1); + } + + console.log('VAULT_ADDR=' + vaultAddr); + + try { + const testArgs = ['test', '-c', 'testem.enos.js']; + + if (process.env.TEST_FILTER && process.env.TEST_FILTER.length > 0) { + testArgs.push('-f=' + process.env.TEST_FILTER); + } + + await testHelper.run('ember', [...testArgs, ...process.argv.slice(2)], false); + } catch (error) { + console.log(error); + process.exit(1); + } finally { + process.exit(0); + } +})(); diff --git a/ui/scripts/gen-story-md.js b/ui/scripts/gen-story-md.js new file mode 100755 index 0000000..63a4593 --- /dev/null +++ b/ui/scripts/gen-story-md.js @@ -0,0 +1,45 @@ +#!/usr/bin/env node +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +/* eslint-disable */ +// run this script via yarn in the ui directory: +// yarn gen-story-md some-component +// +// or if the story is for a component in an in-repo-addon or an engine: +// yarn gen-story-md some-component name-of-engine + +const fs = require('fs'); +const jsdoc2md = require('jsdoc-to-markdown'); +var args = process.argv.slice(2); +const name = args[0]; +const addonOrEngine = args[1]; +const inputFile = addonOrEngine + ? `lib/${addonOrEngine}/addon/components/${name}.js` + : `app/components/${name}.js`; +const outputFile = addonOrEngine ? `lib/${addonOrEngine}/stories/${name}.md` : `stories/${name}.md`; + +const component = name + .split('-') + .map((word) => word.charAt(0).toUpperCase() + word.slice(1)) + .join(''); +const options = { + files: inputFile, + template: fs.readFileSync('./lib/story-md.hbs', 'utf8'), + 'example-lang': 'js', +}; +let md = jsdoc2md.renderSync(options); + +const pageBreakIndex = md.lastIndexOf('---'); //this is our last page break + +const seeLinks = `**See** +- [Uses of ${component}](https://github.com/hashicorp/vault/search?l=Handlebars&q=${component}+OR+${name}) +- [${component} Source Code](https://github.com/hashicorp/vault/blob/main/ui/${inputFile}) +`; +const generatedWarning = ` +`; +md = generatedWarning + md.slice(0, pageBreakIndex) + seeLinks + md.slice(pageBreakIndex); + +fs.writeFileSync(outputFile, md); diff --git a/ui/scripts/list-templates.js b/ui/scripts/list-templates.js new file mode 100755 index 0000000..37315ef --- /dev/null +++ b/ui/scripts/list-templates.js @@ -0,0 +1,22 @@ +#!/usr/bin/env node +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +/* eslint-disable */ + +// We need an array in this format for all of the files +//https://github.com/ember-template-lint/ember-cli-template-lint/blob/1bc03444ecf367473108cb28208cb3123199f950/.template-lintrc.js#L9 + +var walkSync = require('walk-sync'); +var templates = walkSync('app', { globs: ['**/*.hbs'] }); + +templates = templates.map((path) => { + // we want the relative path w/o the extension: + // 'app/templates/path/to/file/filename' + return `app/${path.replace(/\.hbs$/, '')}`; +}); + +// stringify because if we don't console won't output the full list lol +console.log(JSON.stringify(templates, null, 2)); diff --git a/ui/scripts/start-vault.js b/ui/scripts/start-vault.js new file mode 100755 index 0000000..f069f8d --- /dev/null +++ b/ui/scripts/start-vault.js @@ -0,0 +1,83 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +/* eslint-env node */ +/* eslint-disable no-console */ +/* eslint-disable no-process-exit */ +/* eslint-disable node/no-extraneous-require */ + +var readline = require('readline'); +const testHelper = require('./test-helper'); + +var output = ''; +var unseal, root, written, initError; + +async function processLines(input, eachLine = () => {}) { + const rl = readline.createInterface({ + input, + terminal: true, + }); + for await (const line of rl) { + eachLine(line); + } +} + +(async function () { + try { + const vault = testHelper.run( + 'vault', + [ + 'server', + '-dev', + '-dev-ha', + '-dev-transactional', + '-dev-root-token-id=root', + '-dev-listen-address=127.0.0.1:9200', + ], + false + ); + processLines(vault.stdout, function (line) { + if (written) { + output = null; + return; + } + output = output + line; + var unsealMatch = output.match(/Unseal Key: (.+)$/m); + if (unsealMatch && !unseal) { + unseal = [unsealMatch[1]]; + } + var rootMatch = output.match(/Root Token: (.+)$/m); + if (rootMatch && !root) { + root = rootMatch[1]; + } + var errorMatch = output.match(/Error initializing core: (.*)$/m); + if (errorMatch) { + initError = errorMatch[1]; + } + if (root && unseal && !written) { + testHelper.writeKeysFile(unseal, root); + written = true; + console.log('VAULT SERVER READY'); + } else if (initError) { + console.log('VAULT SERVER START FAILED'); + console.log( + 'If this is happening, run `export VAULT_LICENSE_PATH=/Users/username/license.hclic` to your valid local vault license filepath, or use OSS Vault' + ); + process.exit(1); + } + }); + try { + await testHelper.run('ember', ['test', ...process.argv.slice(2)]); + } catch (error) { + console.log(error); + process.exit(1); + } finally { + process.exit(0); + } + } catch (error) { + console.log(error); + process.exit(1); + } +})(); diff --git a/ui/scripts/test-helper.js b/ui/scripts/test-helper.js new file mode 100644 index 0000000..8ff4961 --- /dev/null +++ b/ui/scripts/test-helper.js @@ -0,0 +1,59 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +/* eslint-env node */ +/* eslint-disable no-console */ + +const fs = require('fs'); +const path = require('path'); +const chalk = require('chalk'); +const execa = require('execa'); + +/** + * Writes a vault keys file that can be imported in other scripts, that includes the unseal keys and the root token. + * @param unsealKeys an array of unseal keys, must contain at least one key + * @param rootToken the root token + * @param filePath optional file path, if not provided the default path of /tests/helpers/vault-keys.js + * will be used. + */ +function writeKeysFile(unsealKeys, rootToken, filePath) { + if (filePath === undefined) { + filePath = path.join(process.cwd(), 'tests/helpers/vault-keys.js'); + } + const keys = {}; + keys.unsealKeys = unsealKeys; + keys.rootToken = rootToken; + + fs.writeFile(filePath, `export default ${JSON.stringify(keys, null, 2)}`, (err) => { + if (err) throw err; + }); +} + +/** + * Runs the provided command and pipes the processes stdout and stderr to the terminal. Upon completion with + * success or error the child process will be cleaned up. + * @param command some command to run + * @param args some arguments for the command to run + * @param shareStd if true the sub process created by the command will share the stdout and stderr of the parent + * process + * @returns {*} The child_process for the executed command which is also a Promise. + */ +function run(command, args = [], shareStd = true) { + console.log(chalk.dim('$ ' + command + ' ' + args.join(' '))); + // cleanup means that execa will handle stopping the subprocess + // inherit all of the stdin/out/err so that testem still works as if you were running it directly + if (shareStd) { + return execa(command, args, { cleanup: true, stdin: 'inherit', stdout: 'inherit', stderr: 'inherit' }); + } + const p = execa(command, args, { cleanup: true }); + p.stdout.pipe(process.stdout); + p.stderr.pipe(process.stderr); + return p; +} + +module.exports = { + writeKeysFile: writeKeysFile, + run: run, +}; diff --git a/ui/stories/pagination-controls.md b/ui/stories/pagination-controls.md new file mode 100644 index 0000000..0d59633 --- /dev/null +++ b/ui/stories/pagination-controls.md @@ -0,0 +1,26 @@ + + +## PaginationControls +PaginationControls components are used to paginate through item lists + +**Params** + +| Param | Type | Default | Description | +| --- | --- | --- | --- | +| total | number | | total number of items | +| [startPage] | number | 1 | initial page number to select | +| [size] | number | 15 | number of items to display per page | +| onChange | function | | callback fired on page change | + +**Example** + +```js + +``` + +**See** + +- [Uses of PaginationControls](https://github.com/hashicorp/vault/search?l=Handlebars&q=PaginationControls+OR+pagination-controls) +- [PaginationControls Source Code](https://github.com/hashicorp/vault/blob/master/ui/app/components/pagination-controls.js) + +--- diff --git a/ui/testem.enos.js b/ui/testem.enos.js new file mode 100644 index 0000000..c76635c --- /dev/null +++ b/ui/testem.enos.js @@ -0,0 +1,45 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +/* eslint-env node */ + +'use strict'; + +const vault_addr = process.env.VAULT_ADDR; +console.log('VAULT_ADDR=' + vault_addr); // eslint-disable-line + +module.exports = { + test_page: 'tests/index.html?hidepassed', + tap_quiet_logs: true, + tap_failed_tests_only: true, + disable_watching: true, + launch_in_ci: ['Chrome'], + browser_start_timeout: 120, + browser_args: { + Chrome: { + ci: [ + // --no-sandbox is needed when running Chrome inside a container + process.env.CI ? '--no-sandbox' : null, + '--headless', + '--disable-dev-shm-usage', + '--disable-software-rasterizer', + '--mute-audio', + '--remote-debugging-port=0', + '--window-size=1440,900', + ].filter(Boolean), + }, + }, + proxies: { + '/v1': { + target: vault_addr, + }, + }, +}; + +if (process.env.CI) { + module.exports.reporter = 'xunit'; + module.exports.report_file = 'test-results/qunit/results.xml'; + module.exports.xunit_intermediate_output = true; +} diff --git a/ui/testem.js b/ui/testem.js new file mode 100644 index 0000000..81aa185 --- /dev/null +++ b/ui/testem.js @@ -0,0 +1,40 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +'use strict'; + +module.exports = { + test_page: 'tests/index.html?hidepassed', + tap_quiet_logs: true, + tap_failed_tests_only: true, + disable_watching: true, + launch_in_ci: ['Chrome'], + browser_start_timeout: 120, + browser_args: { + Chrome: { + ci: [ + // --no-sandbox is needed when running Chrome inside a container + process.env.CI ? '--no-sandbox' : null, + '--headless', + '--disable-dev-shm-usage', + '--disable-software-rasterizer', + '--mute-audio', + '--remote-debugging-port=0', + '--window-size=1440,900', + ].filter(Boolean), + }, + }, + proxies: { + '/v1': { + target: 'http://localhost:9200', + }, + }, +}; + +if (process.env.CI) { + module.exports.reporter = 'xunit'; + module.exports.report_file = 'test-results/qunit/results.xml'; + module.exports.xunit_intermediate_output = true; +} diff --git a/ui/tests/.eslintrc.js b/ui/tests/.eslintrc.js new file mode 100644 index 0000000..8c897a3 --- /dev/null +++ b/ui/tests/.eslintrc.js @@ -0,0 +1,19 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +/* eslint-disable no-undef */ +module.exports = { + env: { + embertest: true, + }, + globals: { + server: true, + $: true, + authLogout: false, + authLogin: false, + pollCluster: false, + mountSupportedSecretBackend: false, + }, +}; diff --git a/ui/tests/acceptance/access/identity/_shared-alias-tests.js b/ui/tests/acceptance/access/identity/_shared-alias-tests.js new file mode 100644 index 0000000..5cadcb9 --- /dev/null +++ b/ui/tests/acceptance/access/identity/_shared-alias-tests.js @@ -0,0 +1,109 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { currentRouteName, settled } from '@ember/test-helpers'; +import page from 'vault/tests/pages/access/identity/aliases/add'; +import aliasIndexPage from 'vault/tests/pages/access/identity/aliases/index'; +import aliasShowPage from 'vault/tests/pages/access/identity/aliases/show'; +import createItemPage from 'vault/tests/pages/access/identity/create'; +import showItemPage from 'vault/tests/pages/access/identity/show'; + +export const testAliasCRUD = async function (name, itemType, assert) { + if (itemType === 'groups') { + await createItemPage.createItem(itemType, 'external'); + await settled(); + } else { + await createItemPage.createItem(itemType); + await settled(); + } + let idRow = showItemPage.rows.filterBy('hasLabel').filterBy('rowLabel', 'ID')[0]; + const itemID = idRow.rowValue; + await page.visit({ item_type: itemType, id: itemID }); + await settled(); + await page.editForm.name(name).submit(); + await settled(); + assert.ok( + aliasShowPage.flashMessage.latestMessage.startsWith('Successfully saved'), + `${itemType}: shows a flash message` + ); + + idRow = aliasShowPage.rows.filterBy('hasLabel').filterBy('rowLabel', 'ID')[0]; + const aliasID = idRow.rowValue; + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.identity.aliases.show', + 'navigates to the correct route' + ); + assert.ok(aliasShowPage.nameContains(name), `${itemType}: renders the name on the show page`); + + await aliasIndexPage.visit({ item_type: itemType }); + await settled(); + assert.strictEqual( + aliasIndexPage.items.filterBy('name', name).length, + 1, + `${itemType}: lists the entity in the entity list` + ); + + const item = aliasIndexPage.items.filterBy('name', name)[0]; + await item.menu(); + await settled(); + await aliasIndexPage.delete(); + await settled(); + await aliasIndexPage.confirmDelete(); + await settled(); + assert.ok( + aliasIndexPage.flashMessage.latestMessage.startsWith('Successfully deleted'), + `${itemType}: shows flash message` + ); + + assert.strictEqual( + aliasIndexPage.items.filterBy('id', aliasID).length, + 0, + `${itemType}: the row is deleted` + ); +}; + +export const testAliasDeleteFromForm = async function (name, itemType, assert) { + if (itemType === 'groups') { + await createItemPage.createItem(itemType, 'external'); + await settled(); + } else { + await createItemPage.createItem(itemType); + await settled(); + } + let idRow = showItemPage.rows.filterBy('hasLabel').filterBy('rowLabel', 'ID')[0]; + const itemID = idRow.rowValue; + await page.visit({ item_type: itemType, id: itemID }); + await settled(); + await page.editForm.name(name).submit(); + await settled(); + idRow = aliasShowPage.rows.filterBy('hasLabel').filterBy('rowLabel', 'ID')[0]; + const aliasID = idRow.rowValue; + await aliasShowPage.edit(); + await settled(); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.identity.aliases.edit', + `${itemType}: navigates to edit on create` + ); + await page.editForm.delete(); + await page.editForm.waitForConfirm(); + await page.editForm.confirmDelete(); + await settled(); + assert.ok( + aliasIndexPage.flashMessage.latestMessage.startsWith('Successfully deleted'), + `${itemType}: shows flash message` + ); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.identity.aliases.index', + `${itemType}: navigates to list page on delete` + ); + assert.strictEqual( + aliasIndexPage.items.filterBy('id', aliasID).length, + 0, + `${itemType}: the row does not show in the list` + ); +}; diff --git a/ui/tests/acceptance/access/identity/_shared-tests.js b/ui/tests/acceptance/access/identity/_shared-tests.js new file mode 100644 index 0000000..b667e59 --- /dev/null +++ b/ui/tests/acceptance/access/identity/_shared-tests.js @@ -0,0 +1,87 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { settled, currentRouteName, click, waitUntil, find } from '@ember/test-helpers'; +import { selectChoose, clickTrigger } from 'ember-power-select/test-support/helpers'; +import page from 'vault/tests/pages/access/identity/create'; +import showPage from 'vault/tests/pages/access/identity/show'; +import indexPage from 'vault/tests/pages/access/identity/index'; + +export const testCRUD = async (name, itemType, assert) => { + await page.visit({ item_type: itemType }); + await settled(); + await page.editForm.name(name).submit(); + await settled(); + assert.ok( + showPage.flashMessage.latestMessage.startsWith('Successfully saved'), + `${itemType}: shows a flash message` + ); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.identity.show', + `${itemType}: navigates to show on create` + ); + assert.ok(showPage.nameContains(name), `${itemType}: renders the name on the show page`); + + await indexPage.visit({ item_type: itemType }); + await settled(); + assert.strictEqual( + indexPage.items.filterBy('name', name).length, + 1, + `${itemType}: lists the entity in the entity list` + ); + await indexPage.items.filterBy('name', name)[0].menu(); + await waitUntil(() => find('[data-test-item-delete]')); + await indexPage.delete(); + await settled(); + await indexPage.confirmDelete(); + await settled(); + assert.ok( + indexPage.flashMessage.latestMessage.startsWith('Successfully deleted'), + `${itemType}: shows flash message` + ); + assert.strictEqual(indexPage.items.filterBy('name', name).length, 0, `${itemType}: the row is deleted`); +}; + +export const testDeleteFromForm = async (name, itemType, assert) => { + await page.visit({ item_type: itemType }); + await settled(); + await page.editForm.name(name); + await page.editForm.metadataKey('hello'); + await page.editForm.metadataValue('goodbye'); + await clickTrigger('#policies'); + // first option should be "default" + await selectChoose('#policies', '.ember-power-select-option', 0); + await page.editForm.submit(); + await click('[data-test-tab-subnav="policies"]'); + assert.dom('.list-item-row').exists({ count: 1 }, 'One item is under policies'); + await click('[data-test-tab-subnav="metadata"]'); + assert.dom('.info-table-row').hasText('hello goodbye', 'Metadata shows on tab'); + await showPage.edit(); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.identity.edit', + `${itemType}: navigates to edit on create` + ); + await settled(); + await page.editForm.delete(); + await settled(); + await page.editForm.confirmDelete(); + await settled(); + assert.ok( + indexPage.flashMessage.latestMessage.startsWith('Successfully deleted'), + `${itemType}: shows flash message` + ); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.identity.index', + `${itemType}: navigates to list page on delete` + ); + assert.strictEqual( + indexPage.items.filterBy('name', name).length, + 0, + `${itemType}: the row does not show in the list` + ); +}; diff --git a/ui/tests/acceptance/access/identity/entities/aliases/create-test.js b/ui/tests/acceptance/access/identity/entities/aliases/create-test.js new file mode 100644 index 0000000..6f81e33 --- /dev/null +++ b/ui/tests/acceptance/access/identity/entities/aliases/create-test.js @@ -0,0 +1,34 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, skip, test } from 'qunit'; +import { settled } from '@ember/test-helpers'; +import { setupApplicationTest } from 'ember-qunit'; +import { testAliasCRUD, testAliasDeleteFromForm } from '../../_shared-alias-tests'; +import authPage from 'vault/tests/pages/auth'; + +module('Acceptance | /access/identity/entities/aliases/add', function (hooks) { + // TODO come back and figure out why this is failing. Seems to be a race condition + setupApplicationTest(hooks); + + hooks.beforeEach(async function () { + await authPage.login(); + return; + }); + + skip('it allows create, list, delete of an entity alias', async function (assert) { + assert.expect(6); + const name = `alias-${Date.now()}`; + await testAliasCRUD(name, 'entities', assert); + await settled(); + }); + + test('it allows delete from the edit form', async function (assert) { + assert.expect(4); + const name = `alias-${Date.now()}`; + await testAliasDeleteFromForm(name, 'entities', assert); + await settled(); + }); +}); diff --git a/ui/tests/acceptance/access/identity/entities/create-test.js b/ui/tests/acceptance/access/identity/entities/create-test.js new file mode 100644 index 0000000..86589f1 --- /dev/null +++ b/ui/tests/acceptance/access/identity/entities/create-test.js @@ -0,0 +1,40 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { currentRouteName } from '@ember/test-helpers'; +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import page from 'vault/tests/pages/access/identity/create'; +import { testCRUD, testDeleteFromForm } from '../_shared-tests'; +import authPage from 'vault/tests/pages/auth'; + +module('Acceptance | /access/identity/entities/create', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(function () { + return authPage.login(); + }); + + test('it visits the correct page', async function (assert) { + await page.visit({ item_type: 'entities' }); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.identity.create', + 'navigates to the correct route' + ); + }); + + test('it allows create, list, delete of an entity', async function (assert) { + assert.expect(6); + const name = `entity-${Date.now()}`; + await testCRUD(name, 'entities', assert); + }); + + test('it can be deleted from the edit form', async function (assert) { + assert.expect(6); + const name = `entity-${Date.now()}`; + await testDeleteFromForm(name, 'entities', assert); + }); +}); diff --git a/ui/tests/acceptance/access/identity/entities/index-test.js b/ui/tests/acceptance/access/identity/entities/index-test.js new file mode 100644 index 0000000..79dbbfe --- /dev/null +++ b/ui/tests/acceptance/access/identity/entities/index-test.js @@ -0,0 +1,36 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { currentRouteName } from '@ember/test-helpers'; +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import page from 'vault/tests/pages/access/identity/index'; +import authPage from 'vault/tests/pages/auth'; + +module('Acceptance | /access/identity/entities', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(function () { + return authPage.login(); + }); + + test('it renders the entities page', async function (assert) { + await page.visit({ item_type: 'entities' }); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.identity.index', + 'navigates to the correct route' + ); + }); + + test('it renders the groups page', async function (assert) { + await page.visit({ item_type: 'groups' }); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.identity.index', + 'navigates to the correct route' + ); + }); +}); diff --git a/ui/tests/acceptance/access/identity/groups/aliases/create-test.js b/ui/tests/acceptance/access/identity/groups/aliases/create-test.js new file mode 100644 index 0000000..486464a --- /dev/null +++ b/ui/tests/acceptance/access/identity/groups/aliases/create-test.js @@ -0,0 +1,35 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, skip, test } from 'qunit'; +import { settled } from '@ember/test-helpers'; +import { setupApplicationTest } from 'ember-qunit'; +import { testAliasCRUD, testAliasDeleteFromForm } from '../../_shared-alias-tests'; +import authPage from 'vault/tests/pages/auth'; + +module('Acceptance | /access/identity/groups/aliases/add', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(async function () { + await authPage.login(); + return; + }); + + skip('it allows create, list, delete of an entity alias', async function (assert) { + // TODO figure out what is wrong with this test + assert.expect(6); + const name = `alias-${Date.now()}`; + await testAliasCRUD(name, 'groups', assert); + await settled(); + }); + + test('it allows delete from the edit form', async function (assert) { + // TODO figure out what is wrong with this test + assert.expect(4); + const name = `alias-${Date.now()}`; + await testAliasDeleteFromForm(name, 'groups', assert); + await settled(); + }); +}); diff --git a/ui/tests/acceptance/access/identity/groups/create-test.js b/ui/tests/acceptance/access/identity/groups/create-test.js new file mode 100644 index 0000000..4e54bec --- /dev/null +++ b/ui/tests/acceptance/access/identity/groups/create-test.js @@ -0,0 +1,40 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { currentRouteName } from '@ember/test-helpers'; +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import page from 'vault/tests/pages/access/identity/create'; +import { testCRUD, testDeleteFromForm } from '../_shared-tests'; +import authPage from 'vault/tests/pages/auth'; + +module('Acceptance | /access/identity/groups/create', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(function () { + return authPage.login(); + }); + + test('it visits the correct page', async function (assert) { + await page.visit({ item_type: 'groups' }); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.identity.create', + 'navigates to the correct route' + ); + }); + + test('it allows create, list, delete of an group', async function (assert) { + assert.expect(6); + const name = `group-${Date.now()}`; + await testCRUD(name, 'groups', assert); + }); + + test('it can be deleted from the group edit form', async function (assert) { + assert.expect(6); + const name = `group-${Date.now()}`; + await testDeleteFromForm(name, 'groups', assert); + }); +}); diff --git a/ui/tests/acceptance/access/methods-test.js b/ui/tests/acceptance/access/methods-test.js new file mode 100644 index 0000000..b44d6f2 --- /dev/null +++ b/ui/tests/acceptance/access/methods-test.js @@ -0,0 +1,74 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { currentRouteName } from '@ember/test-helpers'; +import { clickTrigger } from 'ember-power-select/test-support/helpers'; +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { create } from 'ember-cli-page-object'; +import page from 'vault/tests/pages/access/methods'; +import authEnable from 'vault/tests/pages/settings/auth/enable'; +import authPage from 'vault/tests/pages/auth'; +import ss from 'vault/tests/pages/components/search-select'; +import consoleClass from 'vault/tests/pages/components/console/ui-panel'; + +import { v4 as uuidv4 } from 'uuid'; + +const consoleComponent = create(consoleClass); +const searchSelect = create(ss); + +module('Acceptance | auth-methods list view', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(function () { + this.uid = uuidv4(); + return authPage.login(); + }); + + test('it navigates to auth method', async function (assert) { + await page.visit(); + assert.strictEqual(currentRouteName(), 'vault.cluster.access.methods', 'navigates to the correct route'); + assert.ok(page.methodsLink.isActive, 'the first link is active'); + assert.strictEqual(page.methodsLink.text, 'Authentication methods'); + }); + + test('it filters by name and auth type', async function (assert) { + assert.expect(4); + const authPath1 = `userpass-1-${this.uid}`; + const authPath2 = `userpass-2-${this.uid}`; + const type = 'userpass'; + await authEnable.visit(); + await authEnable.enable(type, authPath1); + await authEnable.visit(); + await authEnable.enable(type, authPath2); + await page.visit(); + // filter by auth type + + await clickTrigger('#filter-by-auth-type'); + await searchSelect.options.objectAt(0).click(); + + const rows = document.querySelectorAll('[data-test-auth-backend-link]'); + const rowsUserpass = Array.from(rows).filter((row) => row.innerText.includes('userpass')); + + assert.strictEqual(rows.length, rowsUserpass.length, 'all rows returned are userpass'); + + // filter by name + await clickTrigger('#filter-by-auth-name'); + const firstItemToSelect = searchSelect.options.objectAt(0).text; + await searchSelect.options.objectAt(0).click(); + const singleRow = document.querySelectorAll('[data-test-auth-backend-link]'); + + assert.strictEqual(singleRow.length, 1, 'returns only one row'); + assert.dom(singleRow[0]).includesText(firstItemToSelect, 'shows the filtered by auth name'); + // clear filter by engine name + await searchSelect.deleteButtons.objectAt(1).click(); + const rowsAgain = document.querySelectorAll('[data-test-auth-backend-link]'); + assert.ok(rowsAgain.length > 1, 'filter has been removed'); + + // cleanup + await consoleComponent.runCommands([`delete sys/auth/${authPath1}`]); + await consoleComponent.runCommands([`delete sys/auth/${authPath2}`]); + }); +}); diff --git a/ui/tests/acceptance/access/namespaces/index-test.js b/ui/tests/acceptance/access/namespaces/index-test.js new file mode 100644 index 0000000..6a756a1 --- /dev/null +++ b/ui/tests/acceptance/access/namespaces/index-test.js @@ -0,0 +1,45 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { currentRouteName } from '@ember/test-helpers'; +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import page from 'vault/tests/pages/access/namespaces/index'; +import authPage from 'vault/tests/pages/auth'; +import logout from 'vault/tests/pages/logout'; + +module('Acceptance | Enterprise | /access/namespaces', function (hooks) { + setupApplicationTest(hooks); + setupMirage(hooks); + + hooks.beforeEach(function () { + return authPage.login(); + }); + + hooks.afterEach(function () { + return logout.visit(); + }); + + test('it navigates to namespaces page', async function (assert) { + assert.expect(1); + await page.visit(); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.namespaces.index', + 'navigates to the correct route' + ); + }); + + test('it should render correct number of namespaces', async function (assert) { + assert.expect(3); + await page.visit(); + const store = this.owner.lookup('service:store'); + // Default page size is 15 + assert.strictEqual(store.peekAll('namespace').length, 15, 'Store has 15 namespaces records'); + assert.dom('.list-item-row').exists({ count: 15 }); + assert.dom('[data-test-list-view-pagination]').exists(); + }); +}); diff --git a/ui/tests/acceptance/auth-list-test.js b/ui/tests/acceptance/auth-list-test.js new file mode 100644 index 0000000..cd106aa --- /dev/null +++ b/ui/tests/acceptance/auth-list-test.js @@ -0,0 +1,143 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +/* eslint qunit/no-conditional-assertions: "warn" */ +import { click, fillIn, settled, visit, triggerKeyEvent, find, waitUntil } from '@ember/test-helpers'; +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { v4 as uuidv4 } from 'uuid'; + +import authPage from 'vault/tests/pages/auth'; +import logout from 'vault/tests/pages/logout'; +import enablePage from 'vault/tests/pages/settings/auth/enable'; +import { supportedAuthBackends } from 'vault/helpers/supported-auth-backends'; +import { supportedManagedAuthBackends } from 'vault/helpers/supported-managed-auth-backends'; +import { create } from 'ember-cli-page-object'; +import consoleClass from 'vault/tests/pages/components/console/ui-panel'; + +const consoleComponent = create(consoleClass); + +module('Acceptance | auth backend list', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(function () { + return authPage.login(); + }); + + hooks.afterEach(function () { + return logout.visit(); + }); + + test('userpass secret backend', async function (assert) { + let n = Math.random(); + const path1 = `userpass-${++n}`; + const path2 = `userpass-${++n}`; + const user1 = 'user1'; + const user2 = 'user2'; + + // enable the first userpass method with one username + await enablePage.enable('userpass', path1); + await settled(); + await click('[data-test-save-config="true"]'); + + await visit(`/vault/access/${path1}/item/user/create`); + await waitUntil(() => find('[data-test-input="username"]') && find('[data-test-textarea]')); + await fillIn('[data-test-input="username"]', user1); + await triggerKeyEvent('[data-test-input="username"]', 'keyup', 65); + await fillIn('[data-test-textarea]', user1); + await triggerKeyEvent('[data-test-textarea]', 'keyup', 65); + await click('[data-test-save-config="true"]'); + + // enable the first userpass method with one username + await visit(`/vault/settings/auth/enable`); + + await click('[data-test-mount-type="userpass"]'); + + await click('[data-test-mount-next]'); + + await fillIn('[data-test-input="path"]', path2); + + await click('[data-test-mount-submit="true"]'); + + await click('[data-test-save-config="true"]'); + + await click(`[data-test-auth-backend-link="${path2}"]`); + + await click('[data-test-entity-create-link="user"]'); + + await fillIn('[data-test-input="username"]', user2); + await triggerKeyEvent('[data-test-input="username"]', 'keyup', 65); + await fillIn('[data-test-textarea]', user2); + await triggerKeyEvent('[data-test-textarea]', 'keyup', 65); + + await click('[data-test-save-config="true"]'); + + //confirming that the user was created. There was a bug where the apiPath was not being updated when toggling between auth routes + assert + .dom('[data-test-list-item-content]') + .hasText(user2, 'user just created shows in current auth list'); + + //confirm that the auth method 1 shows the user1. There was a bug where it was not updated the list when toggling between auth routes + await visit(`/vault/access/${path1}/item/user`); + + assert + .dom('[data-test-list-item-content]') + .hasText(user1, 'first user created shows in current auth list'); + }); + + test('auth methods are linkable and link to correct view', async function (assert) { + assert.expect(16); + const uid = uuidv4(); + await visit('/vault/access'); + + const supportManaged = supportedManagedAuthBackends(); + const backends = supportedAuthBackends(); + for (const backend of backends) { + const { type } = backend; + const path = `auth-list-${type}-${uid}`; + if (type !== 'token') { + await enablePage.enable(type, path); + } + await settled(); + await visit('/vault/access'); + + // all auth methods should be linkable + await click(`[data-test-auth-backend-link="${type === 'token' ? type : path}"]`); + if (!supportManaged.includes(type)) { + assert.dom('[data-test-auth-section-tab]').exists({ count: 1 }); + assert + .dom('[data-test-auth-section-tab]') + .hasText('Configuration', `only shows configuration tab for ${type} auth method`); + assert.dom('[data-test-doc-link] .doc-link').exists(`includes doc link for ${type} auth method`); + } else { + let expectedTabs = 2; + if (type == 'ldap' || type === 'okta') { + expectedTabs = 3; + } + assert + .dom('[data-test-auth-section-tab]') + .exists({ count: expectedTabs }, `has management tabs for ${type} auth method`); + // cleanup method + await consoleComponent.runCommands(`delete sys/auth/${path}`); + } + } + }); + + test('enterprise: token config within namespace', async function (assert) { + const ns = 'ns-wxyz'; + await consoleComponent.runCommands(`write sys/namespaces/${ns} -f`); + await authPage.loginNs(ns); + // go directly to token configure route + await visit('/vault/settings/auth/configure/token/options'); + await fillIn('[data-test-input="description"]', 'My custom description'); + await click('[data-test-save-config="true"]'); + assert.strictEqual(currentURL(), '/vault/access', 'successfully saves and navigates away'); + await click('[data-test-auth-backend-link="token"]'); + assert + .dom('[data-test-row-value="Description"]') + .hasText('My custom description', 'description was saved'); + await consoleComponent.runCommands(`delete sys/namespaces/${ns}`); + }); +}); diff --git a/ui/tests/acceptance/auth-test.js b/ui/tests/acceptance/auth-test.js new file mode 100644 index 0000000..5496b5c --- /dev/null +++ b/ui/tests/acceptance/auth-test.js @@ -0,0 +1,110 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +/* eslint qunit/no-conditional-assertions: "warn" */ +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import sinon from 'sinon'; +import { click, currentURL, visit, waitUntil, find } from '@ember/test-helpers'; +import { supportedAuthBackends } from 'vault/helpers/supported-auth-backends'; +import authForm from '../pages/components/auth-form'; +import jwtForm from '../pages/components/auth-jwt'; +import { create } from 'ember-cli-page-object'; +import apiStub from 'vault/tests/helpers/noop-all-api-requests'; +import logout from 'vault/tests/pages/logout'; + +const component = create(authForm); +const jwtComponent = create(jwtForm); + +module('Acceptance | auth', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(function () { + this.clock = sinon.useFakeTimers({ + now: Date.now(), + shouldAdvanceTime: true, + }); + this.server = apiStub({ usePassthrough: true }); + return logout.visit(); + }); + + hooks.afterEach(function () { + this.clock.restore(); + this.server.shutdown(); + return logout.visit(); + }); + + test('auth query params', async function (assert) { + const backends = supportedAuthBackends(); + assert.expect(backends.length + 1); + await visit('/vault/auth'); + assert.strictEqual(currentURL(), '/vault/auth?with=token'); + for (const backend of backends.reverse()) { + await component.selectMethod(backend.type); + assert.strictEqual( + currentURL(), + `/vault/auth?with=${backend.type}`, + `has the correct URL for ${backend.type}` + ); + } + }); + + test('it clears token when changing selected auth method', async function (assert) { + await visit('/vault/auth'); + assert.strictEqual(currentURL(), '/vault/auth?with=token'); + await component.token('token').selectMethod('github'); + await component.selectMethod('token'); + assert.strictEqual(component.tokenValue, '', 'it clears the token value when toggling methods'); + }); + + test('it sends the right attributes when authenticating', async function (assert) { + assert.expect(8); + const backends = supportedAuthBackends(); + await visit('/vault/auth'); + for (const backend of backends.reverse()) { + await component.selectMethod(backend.type); + if (backend.type === 'github') { + await component.token('token'); + } + if (backend.type === 'jwt' || backend.type === 'oidc') { + await jwtComponent.role('test'); + } + await component.login(); + const lastRequest = this.server.passthroughRequests[this.server.passthroughRequests.length - 1]; + let body = JSON.parse(lastRequest.requestBody); + // Note: x-vault-token used to be lowercase prior to upgrade + if (backend.type === 'token') { + assert.ok( + Object.keys(lastRequest.requestHeaders).includes('X-Vault-Token'), + 'token uses vault token header' + ); + } else if (backend.type === 'github') { + assert.ok(Object.keys(body).includes('token'), 'GitHub includes token'); + } else if (backend.type === 'jwt' || backend.type === 'oidc') { + const authReq = this.server.passthroughRequests[this.server.passthroughRequests.length - 2]; + body = JSON.parse(authReq.requestBody); + assert.ok(Object.keys(body).includes('role'), `${backend.type} includes role`); + } else { + assert.ok(Object.keys(body).includes('password'), `${backend.type} includes password`); + } + } + }); + + test('it shows the push notification warning after submit', async function (assert) { + assert.expect(1); + + this.server.get('/v1/auth/token/lookup-self', async () => { + assert.ok( + await waitUntil(() => find('[data-test-auth-message="push"]')), + 'shows push notification message' + ); + return [204, { 'Content-Type': 'application/json' }, JSON.stringify({})]; + }); + + await visit('/vault/auth'); + await component.selectMethod('token'); + await click('[data-test-auth-submit]'); + }); +}); diff --git a/ui/tests/acceptance/aws-test.js b/ui/tests/acceptance/aws-test.js new file mode 100644 index 0000000..ef46972 --- /dev/null +++ b/ui/tests/acceptance/aws-test.js @@ -0,0 +1,108 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { click, fillIn, findAll, currentURL, find, settled, waitUntil } from '@ember/test-helpers'; +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { v4 as uuidv4 } from 'uuid'; + +import authPage from 'vault/tests/pages/auth'; +import logout from 'vault/tests/pages/logout'; +import enablePage from 'vault/tests/pages/settings/mount-secret-backend'; + +module('Acceptance | aws secret backend', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(function () { + this.uid = uuidv4(); + return authPage.login(); + }); + + hooks.afterEach(function () { + return logout.visit(); + }); + + const POLICY = { + Version: '2012-10-17', + Statement: [ + { + Effect: 'Allow', + Action: 'iam:*', + Resource: '*', + }, + ], + }; + test('aws backend', async function (assert) { + assert.expect(12); + const path = `aws-${this.uid}`; + const roleName = 'awsrole'; + + await enablePage.enable('aws', path); + await settled(); + await click('[data-test-configuration-tab]'); + + await click('[data-test-secret-backend-configure]'); + + assert.strictEqual(currentURL(), `/vault/settings/secrets/configure/${path}`); + assert.ok(findAll('[data-test-aws-root-creds-form]').length, 'renders the empty root creds form'); + assert.ok(findAll('[data-test-aws-link="root-creds"]').length, 'renders the root creds link'); + assert.ok(findAll('[data-test-aws-link="leases"]').length, 'renders the leases config link'); + + await fillIn('[data-test-aws-input="accessKey"]', 'foo'); + await fillIn('[data-test-aws-input="secretKey"]', 'bar'); + + await click('[data-test-aws-input="root-save"]'); + + assert.ok( + find('[data-test-flash-message]').textContent.trim(), + `The backend configuration saved successfully!` + ); + + await click('[data-test-aws-link="leases"]'); + + await click('[data-test-aws-input="lease-save"]'); + + assert.ok( + find('[data-test-flash-message]').textContent.trim(), + `The backend configuration saved successfully!` + ); + + await click('[data-test-backend-view-link]'); + + assert.strictEqual(currentURL(), `/vault/secrets/${path}/list`, `navigates to the roles list`); + + await click('[data-test-secret-create]'); + + assert.ok( + find('[data-test-secret-header]').textContent.includes('AWS Role'), + `aws: renders the create page` + ); + + await fillIn('[data-test-input="name"]', roleName); + + findAll('.CodeMirror')[0].CodeMirror.setValue(JSON.stringify(POLICY)); + + // save the role + await click('[data-test-role-aws-create]'); + await waitUntil(() => currentURL() === `/vault/secrets/${path}/show/${roleName}`); // flaky without this + assert.strictEqual( + currentURL(), + `/vault/secrets/${path}/show/${roleName}`, + `$aws: navigates to the show page on creation` + ); + + await click('[data-test-secret-root-link]'); + + assert.strictEqual(currentURL(), `/vault/secrets/${path}/list`); + assert.ok(findAll(`[data-test-secret-link="${roleName}"]`).length, `aws: role shows in the list`); + + //and delete + await click(`[data-test-secret-link="${roleName}"] [data-test-popup-menu-trigger]`); + await waitUntil(() => find(`[data-test-aws-role-delete="${roleName}"]`)); // flaky without + await click(`[data-test-aws-role-delete="${roleName}"]`); + await click(`[data-test-confirm-button]`); + assert.dom(`[data-test-secret-link="${roleName}"]`).doesNotExist(`aws: role is no longer in the list`); + }); +}); diff --git a/ui/tests/acceptance/client-dashboard-test.js b/ui/tests/acceptance/client-dashboard-test.js new file mode 100644 index 0000000..6da3987 --- /dev/null +++ b/ui/tests/acceptance/client-dashboard-test.js @@ -0,0 +1,393 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import sinon from 'sinon'; +import { visit, currentURL, click, findAll, find, settled } from '@ember/test-helpers'; +import { setupApplicationTest } from 'ember-qunit'; +import authPage from 'vault/tests/pages/auth'; +import { addMonths, formatRFC3339, startOfMonth, subMonths } from 'date-fns'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import ENV from 'vault/config/environment'; +import { SELECTORS, overrideResponse } from '../helpers/clients'; +import { create } from 'ember-cli-page-object'; +import ss from 'vault/tests/pages/components/search-select'; +import { clickTrigger } from 'ember-power-select/test-support/helpers'; +import { ARRAY_OF_MONTHS } from 'core/utils/date-formatters'; +import { formatNumber } from 'core/helpers/format-number'; +import timestamp from 'core/utils/timestamp'; + +const searchSelect = create(ss); + +const STATIC_NOW = new Date('2023-01-13T14:15:00'); +// for testing, we're in the middle of a license/billing period +const LICENSE_START = startOfMonth(subMonths(STATIC_NOW, 6)); // 2022-07-01 +// upgrade happened 1 month after license start +const UPGRADE_DATE = addMonths(LICENSE_START, 1); // 2022-08-01 + +module('Acceptance | client counts dashboard tab', function (hooks) { + setupApplicationTest(hooks); + setupMirage(hooks); + + hooks.before(function () { + sinon.stub(timestamp, 'now').callsFake(() => STATIC_NOW); + ENV['ember-cli-mirage'].handler = 'clients'; + }); + + hooks.beforeEach(function () { + this.store = this.owner.lookup('service:store'); + }); + + hooks.after(function () { + timestamp.now.restore(); + ENV['ember-cli-mirage'].handler = null; + }); + + hooks.beforeEach(function () { + return authPage.login(); + }); + + test('shows warning when config off, no data', async function (assert) { + assert.expect(4); + this.server.get('sys/internal/counters/activity', () => overrideResponse(204)); + this.server.get('sys/internal/counters/config', () => { + return { + request_id: 'some-config-id', + data: { + default_report_months: 12, + enabled: 'default-disable', + queries_available: false, + retention_months: 24, + }, + }; + }); + await visit('/vault/clients/dashboard'); + assert.strictEqual(currentURL(), '/vault/clients/dashboard'); + assert.dom(SELECTORS.dashboardActiveTab).hasText('Dashboard', 'dashboard tab is active'); + assert.dom(SELECTORS.emptyStateTitle).hasText('Data tracking is disabled'); + assert.dom(SELECTORS.filterBar).doesNotExist('Filter bar is hidden when no data available'); + }); + + test('shows empty state when config enabled and no data', async function (assert) { + assert.expect(4); + this.server.get('sys/internal/counters/activity', () => overrideResponse(204)); + this.server.get('sys/internal/counters/config', () => { + return { + request_id: 'some-config-id', + data: { + default_report_months: 12, + enabled: 'default-enable', + retention_months: 24, + }, + }; + }); + await visit('/vault/clients/dashboard'); + assert.strictEqual(currentURL(), '/vault/clients/dashboard'); + assert.dom(SELECTORS.dashboardActiveTab).hasText('Dashboard', 'dashboard tab is active'); + assert.dom(SELECTORS.emptyStateTitle).hasTextContaining('No data received'); + assert.dom(SELECTORS.filterBar).doesNotExist('Does not show filter bar'); + }); + + test('visiting dashboard tab config on and data with mounts', async function (assert) { + assert.expect(8); + await visit('/vault/clients/dashboard'); + assert.strictEqual(currentURL(), '/vault/clients/dashboard'); + assert + .dom(SELECTORS.dateDisplay) + .hasText('July 2022', 'billing start month is correctly parsed from license'); + assert + .dom(SELECTORS.rangeDropdown) + .hasText(`Jul 2022 - Jan 2023`, 'Date range shows dates correctly parsed activity response'); + assert.dom(SELECTORS.attributionBlock).exists('Shows attribution area'); + assert.dom(SELECTORS.monthlyUsageBlock).exists('Shows monthly usage block'); + assert + .dom(SELECTORS.runningTotalMonthlyCharts) + .exists('Shows running totals with monthly breakdown charts'); + assert + .dom(find('[data-test-line-chart="x-axis-labels"] g.tick text')) + .hasText(`7/22`, 'x-axis labels start with billing start date'); + assert.strictEqual( + findAll('[data-test-line-chart="plot-point"]').length, + 6, + `line chart plots 6 points to match query` + ); + }); + + test('updates correctly when querying date ranges', async function (assert) { + assert.expect(27); + await visit('/vault/clients/dashboard'); + assert.strictEqual(currentURL(), '/vault/clients/dashboard'); + // query for single, historical month with no new counts + await click(SELECTORS.rangeDropdown); + await click('[data-test-show-calendar]'); + if (parseInt(find('[data-test-display-year]').innerText) > LICENSE_START.getFullYear()) { + await click('[data-test-previous-year]'); + } + await click(find(`[data-test-calendar-month=${ARRAY_OF_MONTHS[LICENSE_START.getMonth()]}]`)); + assert.dom('[data-test-usage-stats]').exists('total usage stats show'); + assert + .dom(SELECTORS.runningTotalMonthStats) + .doesNotExist('running total single month stat boxes do not show'); + assert + .dom(SELECTORS.runningTotalMonthlyCharts) + .doesNotExist('running total month over month charts do not show'); + assert.dom(SELECTORS.monthlyUsageBlock).doesNotExist('does not show monthly usage block'); + assert.dom(SELECTORS.attributionBlock).exists('attribution area shows'); + assert + .dom('[data-test-chart-container="new-clients"] [data-test-component="empty-state"]') + .exists('new client attribution has empty state'); + assert + .dom('[data-test-empty-state-subtext]') + .hasText('There are no new clients for this namespace during this time period. '); + assert.dom('[data-test-chart-container="total-clients"]').exists('total client attribution chart shows'); + + // reset to billing period + await click(SELECTORS.rangeDropdown); + await click('[data-test-current-billing-period]'); + + // change billing start to month/year of first upgrade + await click('[data-test-start-date-editor] button'); + await click(SELECTORS.monthDropdown); + await click(`[data-test-dropdown-month="${ARRAY_OF_MONTHS[UPGRADE_DATE.getMonth()]}"]`); + await click(SELECTORS.yearDropdown); + await click(`[data-test-dropdown-year="${UPGRADE_DATE.getFullYear()}"]`); + await click('[data-test-date-dropdown-submit]'); + assert.dom(SELECTORS.attributionBlock).exists('Shows attribution area'); + assert.dom(SELECTORS.monthlyUsageBlock).exists('Shows monthly usage block'); + assert + .dom(SELECTORS.runningTotalMonthlyCharts) + .exists('Shows running totals with monthly breakdown charts'); + assert + .dom(find('[data-test-line-chart="x-axis-labels"] g.tick text')) + .hasText(`8/22`, 'x-axis labels start with updated billing start month'); + assert.strictEqual( + findAll('[data-test-line-chart="plot-point"]').length, + 6, + `line chart plots 6 points to match query` + ); + + // query three months ago (Oct 2022) + await click(SELECTORS.rangeDropdown); + await click('[data-test-show-calendar]'); + await click('[data-test-previous-year]'); + await click(find(`[data-test-calendar-month="October"]`)); + + assert.dom(SELECTORS.attributionBlock).exists('Shows attribution area'); + assert.dom(SELECTORS.monthlyUsageBlock).exists('Shows monthly usage block'); + assert + .dom(SELECTORS.runningTotalMonthlyCharts) + .exists('Shows running totals with monthly breakdown charts'); + assert.strictEqual( + findAll('[data-test-line-chart="plot-point"]').length, + 3, + `line chart plots 3 points to match query` + ); + const xAxisLabels = findAll('[data-test-line-chart="x-axis-labels"] g.tick text'); + assert + .dom(xAxisLabels[xAxisLabels.length - 1]) + .hasText(`10/22`, 'x-axis labels end with queried end month'); + + // query for single, historical month (upgrade month) + await click(SELECTORS.rangeDropdown); + await click('[data-test-show-calendar]'); + assert.dom('[data-test-display-year]').hasText('2022'); + await click(find(`[data-test-calendar-month="August"]`)); + + assert.dom(SELECTORS.runningTotalMonthStats).exists('running total single month stat boxes show'); + assert + .dom(SELECTORS.runningTotalMonthlyCharts) + .doesNotExist('running total month over month charts do not show'); + assert.dom(SELECTORS.monthlyUsageBlock).doesNotExist('Does not show monthly usage block'); + assert.dom(SELECTORS.attributionBlock).exists('attribution area shows'); + assert.dom('[data-test-chart-container="new-clients"]').exists('new client attribution chart shows'); + assert.dom('[data-test-chart-container="total-clients"]').exists('total client attribution chart shows'); + + // reset to billing period + await click(SELECTORS.rangeDropdown); + await click('[data-test-current-billing-period]'); + // query month older than count start date + await click('[data-test-start-date-editor] button'); + await click(SELECTORS.monthDropdown); + await click(`[data-test-dropdown-month="${ARRAY_OF_MONTHS[LICENSE_START.getMonth()]}"]`); + await click(SELECTORS.yearDropdown); + await click(`[data-test-dropdown-year="${LICENSE_START.getFullYear() - 3}"]`); + await click('[data-test-date-dropdown-submit]'); + assert + .dom('[data-test-alert-banner="alert"]') + .hasTextContaining( + `We only have data from January 2022`, + 'warning banner displays that date queried was prior to count start date' + ); + }); + + test('dashboard filters correctly with full data', async function (assert) { + assert.expect(21); + await visit('/vault/clients/dashboard'); + assert.strictEqual(currentURL(), '/vault/clients/dashboard', 'clients/dashboard URL is correct'); + assert.dom(SELECTORS.dashboardActiveTab).hasText('Dashboard', 'dashboard tab is active'); + assert + .dom(SELECTORS.runningTotalMonthlyCharts) + .exists('Shows running totals with monthly breakdown charts'); + assert.dom(SELECTORS.attributionBlock).exists('Shows attribution area'); + assert.dom(SELECTORS.monthlyUsageBlock).exists('Shows monthly usage block'); + const response = await this.store.peekRecord('clients/activity', 'some-activity-id'); + + // FILTER BY NAMESPACE + await clickTrigger(); + await searchSelect.options.objectAt(0).click(); + await settled(); + const topNamespace = response.byNamespace[0]; + const topMount = topNamespace.mounts[0]; + assert.ok(true, 'Filter by first namespace'); + assert.strictEqual( + find(SELECTORS.selectedNs).innerText.toLowerCase(), + topNamespace.label, + 'selects top namespace' + ); + assert.dom('[data-test-top-attribution]').includesText('Top auth method'); + assert + .dom('[data-test-running-total-entity] p') + .includesText(`${formatNumber([topNamespace.entity_clients])}`, 'total entity clients is accurate'); + assert + .dom('[data-test-running-total-nonentity] p') + .includesText( + `${formatNumber([topNamespace.non_entity_clients])}`, + 'total non-entity clients is accurate' + ); + assert + .dom('[data-test-attribution-clients] p') + .includesText(`${formatNumber([topMount.clients])}`, 'top attribution clients accurate'); + + // FILTER BY AUTH METHOD + await clickTrigger(); + await searchSelect.options.objectAt(0).click(); + await settled(); + assert.ok(true, 'Filter by first auth method'); + assert.strictEqual( + find(SELECTORS.selectedAuthMount).innerText.toLowerCase(), + topMount.label, + 'selects top mount' + ); + assert + .dom('[data-test-running-total-entity] p') + .includesText(`${formatNumber([topMount.entity_clients])}`, 'total entity clients is accurate'); + assert + .dom('[data-test-running-total-nonentity] p') + .includesText(`${formatNumber([topMount.non_entity_clients])}`, 'total non-entity clients is accurate'); + assert.dom(SELECTORS.attributionBlock).doesNotExist('Does not show attribution block'); + + await click('#namespace-search-select [data-test-selected-list-button="delete"]'); + assert.ok(true, 'Remove namespace filter without first removing auth method filter'); + assert.dom('[data-test-top-attribution]').includesText('Top namespace'); + assert + .dom('[data-test-running-total-entity]') + .hasTextContaining( + `${formatNumber([response.total.entity_clients])}`, + 'total entity clients is back to unfiltered value' + ); + assert + .dom('[data-test-running-total-nonentity]') + .hasTextContaining( + `${formatNumber([formatNumber([response.total.non_entity_clients])])}`, + 'total non-entity clients is back to unfiltered value' + ); + assert + .dom('[data-test-attribution-clients]') + .hasTextContaining( + `${formatNumber([topNamespace.clients])}`, + 'top attribution clients back to unfiltered value' + ); + }); + + test('shows warning if upgrade happened within license period', async function (assert) { + assert.expect(3); + this.server.get('sys/version-history', function () { + return { + data: { + keys: ['1.9.0', '1.9.1', '1.9.2', '1.10.1'], + key_info: { + '1.9.0': { + previous_version: null, + timestamp_installed: formatRFC3339(subMonths(UPGRADE_DATE, 4)), + }, + '1.9.1': { + previous_version: '1.9.0', + timestamp_installed: formatRFC3339(subMonths(UPGRADE_DATE, 3)), + }, + '1.9.2': { + previous_version: '1.9.1', + timestamp_installed: formatRFC3339(subMonths(UPGRADE_DATE, 2)), + }, + '1.10.1': { + previous_version: '1.9.2', + timestamp_installed: formatRFC3339(UPGRADE_DATE), + }, + }, + }, + }; + }); + await visit('/vault/clients/dashboard'); + assert.strictEqual(currentURL(), '/vault/clients/dashboard', 'clients/dashboard URL is correct'); + assert.dom(SELECTORS.dashboardActiveTab).hasText('Dashboard', 'dashboard tab is active'); + assert + .dom('[data-test-alert-banner="alert"]') + .hasTextContaining( + `Warning Vault was upgraded to 1.10.1 on Aug 1, 2022. We added monthly breakdowns and mount level attribution starting in 1.10, so keep that in mind when looking at the data. Learn more here.` + ); + }); + + test('Shows empty if license start date is current month', async function (assert) { + // TODO cmb update to reflect new behavior + const licenseStart = STATIC_NOW; + const licenseEnd = addMonths(licenseStart, 12); + this.server.get('sys/license/status', function () { + return { + request_id: 'my-license-request-id', + data: { + autoloaded: { + license_id: 'my-license-id', + start_time: formatRFC3339(licenseStart), + expiration_time: formatRFC3339(licenseEnd), + }, + }, + }; + }); + await visit('/vault/clients/dashboard'); + assert.strictEqual(currentURL(), '/vault/clients/dashboard', 'clients/dashboard URL is correct'); + assert.dom(SELECTORS.emptyStateTitle).doesNotExist('No data for this billing period'); + }); + + test('shows correct interface if no permissions on license', async function (assert) { + this.server.get('/sys/license/status', () => overrideResponse(403)); + await visit('/vault/clients/dashboard'); + assert.strictEqual(currentURL(), '/vault/clients/dashboard', 'clients/dashboard URL is correct'); + assert.dom(SELECTORS.dashboardActiveTab).hasText('Dashboard', 'dashboard tab is active'); + // Message changes depending on ent or OSS + assert.dom(SELECTORS.emptyStateTitle).exists('Empty state exists'); + assert.dom(SELECTORS.monthDropdown).exists('Dropdown exists to select month'); + assert.dom(SELECTORS.yearDropdown).exists('Dropdown exists to select year'); + }); + + test('shows error template if permissions denied querying activity response with no data', async function (assert) { + this.server.get('sys/license/status', () => overrideResponse(403)); + this.server.get('sys/version-history', () => overrideResponse(403)); + this.server.get('sys/internal/counters/config', () => overrideResponse(403)); + this.server.get('sys/internal/counters/activity', () => overrideResponse(403)); + + await visit('/vault/clients/dashboard'); + assert.strictEqual(currentURL(), '/vault/clients/dashboard', 'clients/dashboard URL is correct'); + assert + .dom(SELECTORS.emptyStateTitle) + .includesText('start date found', 'Empty state shows no billing start date'); + await click(SELECTORS.monthDropdown); + await click(this.element.querySelector('[data-test-month-list] button:not([disabled])')); + await click(SELECTORS.yearDropdown); + await click(this.element.querySelector('[data-test-year-list] button:not([disabled])')); + await click(SELECTORS.dateDropdownSubmit); + assert + .dom(SELECTORS.emptyStateTitle) + .hasText('You are not authorized', 'Empty state displays not authorized message'); + }); +}); diff --git a/ui/tests/acceptance/cluster-test.js b/ui/tests/acceptance/cluster-test.js new file mode 100644 index 0000000..26bb8fb --- /dev/null +++ b/ui/tests/acceptance/cluster-test.js @@ -0,0 +1,88 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { create } from 'ember-cli-page-object'; +import { settled, click, visit } from '@ember/test-helpers'; +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { v4 as uuidv4 } from 'uuid'; + +import authPage from 'vault/tests/pages/auth'; +import logout from 'vault/tests/pages/logout'; +import enablePage from 'vault/tests/pages/settings/auth/enable'; +import consoleClass from 'vault/tests/pages/components/console/ui-panel'; + +const consoleComponent = create(consoleClass); + +const tokenWithPolicy = async function (name, policy) { + await consoleComponent.runCommands([ + `write sys/policies/acl/${name} policy=${btoa(policy)}`, + `write -field=client_token auth/token/create policies=${name}`, + ]); + + return consoleComponent.lastLogOutput; +}; + +module('Acceptance | cluster', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(async function () { + await logout.visit(); + return authPage.login(); + }); + + test('hides nav item if user does not have permission', async function (assert) { + const deny_policies_policy = ` + path "sys/policies/*" { + capabilities = ["deny"] + }, + `; + + const userToken = await tokenWithPolicy('hide-policies-nav', deny_policies_policy); + await logout.visit(); + await authPage.login(userToken); + await visit('/vault/access'); + + assert.dom('[data-test-sidebar-nav-link="Policies"]').doesNotExist(); + await logout.visit(); + }); + + test('it hides mfa setup if user has not entityId (ex: is a root user)', async function (assert) { + const user = 'end-user'; + const password = 'mypassword'; + const path = `cluster-userpass-${uuidv4()}`; + + await enablePage.enable('userpass', path); + await consoleComponent.runCommands([`write auth/${path}/users/end-user password="${password}"`]); + + await logout.visit(); + await settled(); + await authPage.loginUsername(user, password, path); + await click('[data-test-user-menu-trigger]'); + assert.dom('[data-test-user-menu-item="mfa"]').exists(); + await logout.visit(); + + await authPage.login('root'); + await settled(); + await click('[data-test-user-menu-trigger]'); + assert.dom('[data-test-user-menu-item="mfa"]').doesNotExist(); + }); + + test('enterprise nav item links to first route that user has access to', async function (assert) { + const read_rgp_policy = ` + path "sys/policies/rgp" { + capabilities = ["read"] + }, + `; + + const userToken = await tokenWithPolicy('show-policies-nav', read_rgp_policy); + await logout.visit(); + await authPage.login(userToken); + await visit('/vault/access'); + + assert.dom('[data-test-sidebar-nav-link="Policies"]').hasAttribute('href', '/ui/vault/policies/rgp'); + await logout.visit(); + }); +}); diff --git a/ui/tests/acceptance/console-test.js b/ui/tests/acceptance/console-test.js new file mode 100644 index 0000000..b8ed7da --- /dev/null +++ b/ui/tests/acceptance/console-test.js @@ -0,0 +1,110 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { settled, waitUntil, click } from '@ember/test-helpers'; +import { create } from 'ember-cli-page-object'; +import { setupApplicationTest } from 'ember-qunit'; +import enginesPage from 'vault/tests/pages/secrets/backends'; +import authPage from 'vault/tests/pages/auth'; +import consoleClass from 'vault/tests/pages/components/console/ui-panel'; + +const consoleComponent = create(consoleClass); + +module('Acceptance | console', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(function () { + return authPage.login(); + }); + + test("refresh reloads the current route's data", async function (assert) { + await enginesPage.visit(); + await settled(); + const numEngines = enginesPage.rows.length; + await consoleComponent.toggle(); + await settled(); + for (const num of [1, 2, 3]) { + const inputString = `write sys/mounts/console-route-${num} type=kv`; + await consoleComponent.runCommands(inputString); + await settled(); + } + await consoleComponent.runCommands('refresh'); + await settled(); + assert.strictEqual(enginesPage.rows.length, numEngines + 3, 'new engines were added to the page'); + // Clean up + for (const num of [1, 2, 3]) { + const inputString = `delete sys/mounts/console-route-${num}`; + await consoleComponent.runCommands(inputString); + await settled(); + } + await consoleComponent.runCommands('refresh'); + await settled(); + assert.strictEqual(enginesPage.rows.length, numEngines, 'engines were removed from the page'); + }); + + test('fullscreen command expands the cli panel', async function (assert) { + await consoleComponent.toggle(); + await settled(); + await consoleComponent.runCommands('fullscreen'); + await settled(); + const consoleEle = document.querySelector('[data-test-component="console/ui-panel"]'); + // wait for the CSS transition to finish + await waitUntil(() => consoleEle.offsetHeight === window.innerHeight); + assert.strictEqual( + consoleEle.offsetHeight, + window.innerHeight, + 'fullscreen is the same height as the window' + ); + }); + + test('array output is correctly formatted', async function (assert) { + await consoleComponent.toggle(); + await settled(); + await consoleComponent.runCommands('read -field=policies /auth/token/lookup-self'); + await settled(); + const consoleOut = document.querySelector('.console-ui-output>pre'); + // wait for the CSS transition to finish + await waitUntil(() => consoleOut.innerText); + assert.notOk(consoleOut.innerText.includes('function(){')); + assert.strictEqual(consoleOut.innerText, '["root"]'); + }); + + test('number output is correctly formatted', async function (assert) { + await consoleComponent.toggle(); + await settled(); + await consoleComponent.runCommands('read -field=creation_time /auth/token/lookup-self'); + await settled(); + const consoleOut = document.querySelector('.console-ui-output>pre'); + // wait for the CSS transition to finish + await waitUntil(() => consoleOut.innerText); + assert.strictEqual(consoleOut.innerText.match(/^\d+$/).length, 1); + }); + + test('boolean output is correctly formatted', async function (assert) { + await consoleComponent.toggle(); + await settled(); + await consoleComponent.runCommands('read -field=orphan /auth/token/lookup-self'); + await settled(); + const consoleOut = document.querySelector('.console-ui-output>pre'); + // have to wrap in a later so that we can wait for the CSS transition to finish + await waitUntil(() => consoleOut.innerText); + assert.strictEqual(consoleOut.innerText.match(/^(true|false)$/g).length, 1); + }); + + test('it should open and close console panel', async function (assert) { + await click('[data-test-console-toggle]'); + assert.dom('[data-test-console-panel]').hasClass('panel-open', 'Sidebar button opens console panel'); + await click('[data-test-console-toggle]'); + assert + .dom('[data-test-console-panel]') + .doesNotHaveClass('panel-open', 'Sidebar button closes console panel'); + await click('[data-test-console-toggle]'); + await click('[data-test-console-panel-close]'); + assert + .dom('[data-test-console-panel]') + .doesNotHaveClass('panel-open', 'Console panel close button closes console panel'); + }); +}); diff --git a/ui/tests/acceptance/enterprise-control-groups-test.js b/ui/tests/acceptance/enterprise-control-groups-test.js new file mode 100644 index 0000000..f5307ac --- /dev/null +++ b/ui/tests/acceptance/enterprise-control-groups-test.js @@ -0,0 +1,228 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { settled, currentURL, currentRouteName, visit, waitUntil } from '@ember/test-helpers'; +import { module, test, skip } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { create } from 'ember-cli-page-object'; + +import { storageKey } from 'vault/services/control-group'; +import consoleClass from 'vault/tests/pages/components/console/ui-panel'; +import authForm from 'vault/tests/pages/components/auth-form'; +import controlGroup from 'vault/tests/pages/components/control-group'; +import controlGroupSuccess from 'vault/tests/pages/components/control-group-success'; +import authPage from 'vault/tests/pages/auth'; +import editPage from 'vault/tests/pages/secrets/backend/kv/edit-secret'; +import listPage from 'vault/tests/pages/secrets/backend/list'; + +const consoleComponent = create(consoleClass); +const authFormComponent = create(authForm); +const controlGroupComponent = create(controlGroup); +const controlGroupSuccessComponent = create(controlGroupSuccess); + +module('Acceptance | Enterprise | control groups', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(function () { + return authPage.login(); + }); + + const POLICY = ` + path "kv/foo" { + capabilities = ["create", "read", "update", "delete", "list"] + control_group = { + max_ttl = "24h" + factor "ops_manager" { + identity { + group_names = ["managers"] + approvals = 1 + } + } + } + } + + path "kv-v2-mount/data/foo" { + capabilities = ["create", "read", "update", "list"] + control_group = { + max_ttl = "24h" + factor "ops_manager" { + identity { + group_names = ["managers"] + approvals = 1 + } + } + } + } + + path "kv-v2-mount/*" { + capabilities = ["list"] + } + `; + + const AUTHORIZER_POLICY = ` + path "sys/control-group/authorize" { + capabilities = ["update"] + } + + path "sys/control-group/request" { + capabilities = ["update"] + } + `; + + const ADMIN_USER = 'authorizer'; + const ADMIN_PASSWORD = 'test'; + const setupControlGroup = async (context) => { + await visit('/vault/secrets'); + await consoleComponent.toggle(); + await settled(); + await consoleComponent.runCommands([ + //enable kv-v1 mount and write a secret + 'write sys/mounts/kv type=kv', + 'write kv/foo bar=baz', + + //enable userpass, create user and associated entity + 'write sys/auth/userpass type=userpass', + `write auth/userpass/users/${ADMIN_USER} password=${ADMIN_PASSWORD} policies=default`, + `write identity/entity name=${ADMIN_USER} policies=test`, + // write policies for control group + authorization + `write sys/policies/acl/kv-control-group policy=${btoa(POLICY)}`, + `write sys/policies/acl/authorizer policy=${btoa(AUTHORIZER_POLICY)}`, + // read out mount to get the accessor + 'read -field=accessor sys/internal/ui/mounts/auth/userpass', + ]); + await settled(); + const userpassAccessor = consoleComponent.lastTextOutput; + + await consoleComponent.runCommands([ + // lookup entity id for our authorizer + `write -field=id identity/lookup/entity name=${ADMIN_USER}`, + ]); + await settled(); + const authorizerEntityId = consoleComponent.lastTextOutput; + await consoleComponent.runCommands([ + // create alias for authorizor and add them to the managers group + `write identity/alias mount_accessor=${userpassAccessor} entity_id=${authorizerEntityId} name=${ADMIN_USER}`, + `write identity/group name=managers member_entity_ids=${authorizerEntityId} policies=authorizer`, + // create a token to request access to kv/foo + 'write -field=client_token auth/token/create policies=kv-control-group', + ]); + await settled(); + context.userToken = consoleComponent.lastLogOutput; + + await authPage.login(context.userToken); + await settled(); + return this; + }; + + const writeSecret = async function (backend, path, key, val) { + await listPage.visitRoot({ backend }); + await listPage.create(); + await editPage.createSecret(path, key, val); + }; + + test('for v2 secrets it redirects you if you try to navigate to a Control Group restricted path', async function (assert) { + await consoleComponent.runCommands([ + 'write sys/mounts/kv-v2-mount type=kv-v2', + 'delete kv-v2-mount/metadata/foo', + ]); + await writeSecret('kv-v2-mount', 'foo', 'bar', 'baz'); + await settled(); + await setupControlGroup(this); + await settled(); + await visit('/vault/secrets/kv-v2-mount/show/foo'); + + assert.ok( + await waitUntil(() => currentRouteName() === 'vault.cluster.access.control-group-accessor'), + 'redirects to access control group route' + ); + }); + + const workflow = async (assert, context, shouldStoreToken) => { + const url = '/vault/secrets/kv/show/foo'; + await setupControlGroup(context); + await settled(); + // as the requestor, go to the URL that's blocked by the control group + // and store the values + await visit(url); + + const accessor = controlGroupComponent.accessor; + const controlGroupToken = controlGroupComponent.token; + await authPage.logout(); + await settled(); + // log in as the admin, navigate to the accessor page, + // and authorize the control group request + await visit('/vault/auth?with=userpass'); + + await authFormComponent.username(ADMIN_USER); + await settled(); + await authFormComponent.password(ADMIN_PASSWORD); + await settled(); + await authFormComponent.login(); + await settled(); + await visit(`/vault/access/control-groups/${accessor}`); + + // putting here to help with flaky test + assert.dom('[data-test-authorize-button]').exists(); + await controlGroupComponent.authorize(); + await settled(); + assert.strictEqual(controlGroupComponent.bannerPrefix, 'Thanks!', 'text display changes'); + await settled(); + await authPage.logout(); + await settled(); + await authPage.login(context.userToken); + await settled(); + if (shouldStoreToken) { + localStorage.setItem( + storageKey(accessor, 'kv/foo'), + JSON.stringify({ + accessor, + token: controlGroupToken, + creation_path: 'kv/foo', + uiParams: { + url, + }, + }) + ); + await visit(`/vault/access/control-groups/${accessor}`); + + assert.ok(controlGroupSuccessComponent.showsNavigateMessage, 'shows user the navigate message'); + await controlGroupSuccessComponent.navigate(); + await settled(); + assert.strictEqual(currentURL(), url, 'successfully loads the target url'); + } else { + await visit(`/vault/access/control-groups/${accessor}`); + + await controlGroupSuccessComponent.token(controlGroupToken); + await settled(); + await controlGroupSuccessComponent.unwrap(); + await settled(); + assert.ok(controlGroupSuccessComponent.showsJsonViewer, 'shows the json viewer'); + } + }; + + skip('it allows the full flow to work without a saved token', async function (assert) { + await workflow(assert, this); + await settled(); + }); + + skip('it allows the full flow to work with a saved token', async function (assert) { + await workflow(assert, this, true); + await settled(); + }); + + test('it displays the warning in the console when making a request to a Control Group path', async function (assert) { + await setupControlGroup(this); + await settled(); + await consoleComponent.toggle(); + await settled(); + await consoleComponent.runCommands('read kv/foo'); + await settled(); + const output = consoleComponent.lastLogOutput; + assert.ok(output.includes('A Control Group was encountered at kv/foo')); + assert.ok(output.includes('The Control Group Token is')); + assert.ok(output.includes('The Accessor is')); + assert.ok(output.includes('Visit /ui/vault/access/control-groups/')); + }); +}); diff --git a/ui/tests/acceptance/enterprise-kmip-test.js b/ui/tests/acceptance/enterprise-kmip-test.js new file mode 100644 index 0000000..18df8b4 --- /dev/null +++ b/ui/tests/acceptance/enterprise-kmip-test.js @@ -0,0 +1,312 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { currentURL, currentRouteName, settled, fillIn, waitUntil, find } from '@ember/test-helpers'; +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { create } from 'ember-cli-page-object'; + +import consoleClass from 'vault/tests/pages/components/console/ui-panel'; +import authPage from 'vault/tests/pages/auth'; +import scopesPage from 'vault/tests/pages/secrets/backend/kmip/scopes'; +import rolesPage from 'vault/tests/pages/secrets/backend/kmip/roles'; +import credentialsPage from 'vault/tests/pages/secrets/backend/kmip/credentials'; +import mountSecrets from 'vault/tests/pages/settings/mount-secret-backend'; + +const uiConsole = create(consoleClass); + +const getRandomPort = () => { + let a = Math.floor(100000 + Math.random() * 900000); + a = String(a); + return a.substring(0, 4); +}; + +const mount = async (shouldConfig = true) => { + const now = Date.now(); + const path = `kmip-${now}`; + const addr = `127.0.0.1:${getRandomPort()}`; // use random port + await settled(); + const commands = shouldConfig + ? [`write sys/mounts/${path} type=kmip`, `write ${path}/config listen_addrs=${addr}`] + : [`write sys/mounts/${path} type=kmip`]; + await uiConsole.runCommands(commands); + await settled(); + const res = uiConsole.lastLogOutput; + if (res.includes('Error')) { + throw new Error(`Error mounting secrets engine: ${res}`); + } + return path; +}; + +const createScope = async () => { + const path = await mount(); + await settled(); + const scope = `scope-${Date.now()}`; + await settled(); + await uiConsole.runCommands([`write ${path}/scope/${scope} -force`]); + await settled(); + const res = uiConsole.lastLogOutput; + if (res.includes('Error')) { + throw new Error(`Error creating scope: ${res}`); + } + return { path, scope }; +}; + +const createRole = async () => { + const { path, scope } = await createScope(); + await settled(); + const role = `role-${Date.now()}`; + await uiConsole.runCommands([`write ${path}/scope/${scope}/role/${role} operation_all=true`]); + await settled(); + const res = uiConsole.lastLogOutput; + if (res.includes('Error')) { + throw new Error(`Error creating role: ${res}`); + } + return { path, scope, role }; +}; + +const generateCreds = async () => { + const { path, scope, role } = await createRole(); + await settled(); + await uiConsole.runCommands([ + `write ${path}/scope/${scope}/role/${role}/credential/generate format=pem -field=serial_number`, + ]); + const serial = uiConsole.lastLogOutput; + if (serial.includes('Error')) { + throw new Error(`Credential generation failed with error: ${serial}`); + } + return { path, scope, role, serial }; +}; +module('Acceptance | Enterprise | KMIP secrets', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(async function () { + await authPage.login(); + return; + }); + + test('it enables KMIP secrets engine', async function (assert) { + const path = `kmip-${Date.now()}`; + await mountSecrets.enable('kmip', path); + await settled(); + assert.strictEqual( + currentURL(), + `/vault/secrets/${path}/kmip/scopes`, + 'mounts and redirects to the kmip scopes page' + ); + assert.ok(scopesPage.isEmpty, 'renders empty state'); + }); + + test('it can configure a KMIP secrets engine', async function (assert) { + const path = await mount(false); + await scopesPage.visit({ backend: path }); + await settled(); + await scopesPage.configurationLink(); + await settled(); + assert.strictEqual( + currentURL(), + `/vault/secrets/${path}/kmip/configuration`, + 'configuration navigates to the config page' + ); + assert.ok(scopesPage.isEmpty, 'config page renders empty state'); + + await scopesPage.configureLink(); + await settled(); + assert.strictEqual( + currentURL(), + `/vault/secrets/${path}/kmip/configure`, + 'configuration navigates to the configure page' + ); + const addr = `127.0.0.1:${getRandomPort()}`; + await fillIn('[data-test-string-list-input="0"]', addr); + + await scopesPage.submit(); + await settled(); + assert.strictEqual( + currentURL(), + `/vault/secrets/${path}/kmip/configuration`, + 'redirects to configuration page after saving config' + ); + assert.notOk(scopesPage.isEmpty, 'configuration page no longer renders empty state'); + }); + + test('it can revoke from the credentials show page', async function (assert) { + const { path, scope, role, serial } = await generateCreds(); + await settled(); + await credentialsPage.visitDetail({ backend: path, scope, role, serial }); + await settled(); + await waitUntil(() => find('[data-test-confirm-action-trigger]')); + assert.dom('[data-test-confirm-action-trigger]').exists('delete button exists'); + await credentialsPage.delete().confirmDelete(); + await settled(); + + assert.strictEqual( + currentURL(), + `/vault/secrets/${path}/kmip/scopes/${scope}/roles/${role}/credentials`, + 'redirects to the credentials list' + ); + assert.ok(credentialsPage.isEmpty, 'renders an empty credentials page'); + }); + + test('it can create a scope', async function (assert) { + const path = await mount(this); + await scopesPage.visit({ backend: path }); + await settled(); + await scopesPage.createLink(); + await settled(); + assert.strictEqual( + currentURL(), + `/vault/secrets/${path}/kmip/scopes/create`, + 'navigates to the kmip scope create page' + ); + + // create scope + await scopesPage.scopeName('foo'); + await settled(); + await scopesPage.submit(); + await settled(); + assert.strictEqual( + currentURL(), + `/vault/secrets/${path}/kmip/scopes`, + 'navigates to the kmip scopes page after create' + ); + assert.strictEqual(scopesPage.listItemLinks.length, 1, 'renders a single scope'); + }); + + test('it can delete a scope from the list', async function (assert) { + const { path } = await createScope(); + await scopesPage.visit({ backend: path }); + await settled(); + // delete the scope + await scopesPage.listItemLinks.objectAt(0).menuToggle(); + await settled(); + await scopesPage.delete(); + await settled(); + await scopesPage.confirmDelete(); + await settled(); + assert.strictEqual(scopesPage.listItemLinks.length, 0, 'no scopes'); + assert.ok(scopesPage.isEmpty, 'renders the empty state'); + }); + + test('it can create a role', async function (assert) { + // moving create scope here to help with flaky test + const path = await mount(); + await settled(); + const scope = `scope-for-can-create-role`; + await settled(); + await uiConsole.runCommands([`write ${path}/scope/${scope} -force`]); + await settled(); + const res = uiConsole.lastLogOutput; + if (res.includes('Error')) { + throw new Error(`Error creating scope: ${res}`); + } + const role = `role-new-role`; + await rolesPage.visit({ backend: path, scope }); + await settled(); + assert.ok(rolesPage.isEmpty, 'renders the empty role page'); + await rolesPage.create(); + await settled(); + assert.strictEqual( + currentURL(), + `/vault/secrets/${path}/kmip/scopes/${scope}/roles/create`, + 'links to the role create form' + ); + + await rolesPage.roleName(role); + await settled(); + await rolesPage.submit(); + await settled(); + assert.strictEqual( + currentURL(), + `/vault/secrets/${path}/kmip/scopes/${scope}/roles`, + 'redirects to roles list' + ); + + assert.strictEqual(rolesPage.listItemLinks.length, 1, 'renders a single role'); + }); + + test('it can delete a role from the list', async function (assert) { + const { path, scope } = await createRole(); + await rolesPage.visit({ backend: path, scope }); + await settled(); + // delete the role + await rolesPage.listItemLinks.objectAt(0).menuToggle(); + await settled(); + await rolesPage.delete(); + await settled(); + await rolesPage.confirmDelete(); + await settled(); + assert.strictEqual(rolesPage.listItemLinks.length, 0, 'renders no roles'); + assert.ok(rolesPage.isEmpty, 'renders empty'); + }); + + test('it can delete a role from the detail page', async function (assert) { + const { path, scope, role } = await createRole(); + await settled(); + await rolesPage.visitDetail({ backend: path, scope, role }); + await settled(); + await waitUntil(() => find('[data-test-kmip-link-edit-role]')); + await rolesPage.detailEditLink(); + await settled(); + assert.strictEqual( + currentURL(), + `/vault/secrets/${path}/kmip/scopes/${scope}/roles/${role}/edit`, + 'navigates to role edit' + ); + await rolesPage.cancelLink(); + await settled(); + assert.strictEqual( + currentURL(), + `/vault/secrets/${path}/kmip/scopes/${scope}/roles/${role}`, + 'cancel navigates to role show' + ); + await rolesPage.delete().confirmDelete(); + await settled(); + assert.strictEqual( + currentURL(), + `/vault/secrets/${path}/kmip/scopes/${scope}/roles`, + 'redirects to the roles list' + ); + assert.ok(rolesPage.isEmpty, 'renders an empty roles page'); + }); + + test('it can create a credential', async function (assert) { + // TODO come back and figure out why issue here with test + const { path, scope, role } = await createRole(); + await credentialsPage.visit({ backend: path, scope, role }); + await settled(); + assert.ok(credentialsPage.isEmpty, 'renders empty creds page'); + await credentialsPage.generateCredentialsLink(); + await settled(); + assert.strictEqual( + currentURL(), + `/vault/secrets/${path}/kmip/scopes/${scope}/roles/${role}/credentials/generate`, + 'navigates to generate credentials' + ); + await credentialsPage.submit(); + await settled(); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.secrets.backend.kmip.credentials.show', + 'generate redirects to the show page' + ); + await credentialsPage.backToRoleLink(); + await settled(); + assert.strictEqual(credentialsPage.listItemLinks.length, 1, 'renders a single credential'); + }); + + test('it can revoke a credential from the list', async function (assert) { + const { path, scope, role } = await generateCreds(); + await credentialsPage.visit({ backend: path, scope, role }); + // revoke the credentials + await settled(); + await credentialsPage.listItemLinks.objectAt(0).menuToggle(); + await settled(); + await credentialsPage.delete().confirmDelete(); + await settled(); + assert.strictEqual(credentialsPage.listItemLinks.length, 0, 'renders no credentials'); + assert.ok(credentialsPage.isEmpty, 'renders empty'); + }); +}); diff --git a/ui/tests/acceptance/enterprise-kmse-test.js b/ui/tests/acceptance/enterprise-kmse-test.js new file mode 100644 index 0000000..2b61129 --- /dev/null +++ b/ui/tests/acceptance/enterprise-kmse-test.js @@ -0,0 +1,50 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { click, fillIn } from '@ember/test-helpers'; +import authPage from 'vault/tests/pages/auth'; +import logout from 'vault/tests/pages/logout'; +import mountSecrets from 'vault/tests/pages/settings/mount-secret-backend'; +import { setupMirage } from 'ember-cli-mirage/test-support'; + +module('Acceptance | Enterprise | keymgmt', function (hooks) { + setupApplicationTest(hooks); + setupMirage(hooks); + + hooks.beforeEach(async function () { + await logout.visit(); + return authPage.login(); + }); + + test('it should add new key and distribute to provider', async function (assert) { + const path = `keymgmt-${Date.now()}`; + this.server.post(`/${path}/key/test-key`, () => ({})); + this.server.put(`/${path}/kms/test-keyvault/key/test-key`, () => ({})); + + await mountSecrets.enable('keymgmt', path); + await click('[data-test-secret-create]'); + await fillIn('[data-test-input="provider"]', 'azurekeyvault'); + await fillIn('[data-test-input="name"]', 'test-keyvault'); + await fillIn('[data-test-input="keyCollection"]', 'test-keycollection'); + await fillIn('[data-test-input="credentials.client_id"]', '123'); + await fillIn('[data-test-input="credentials.client_secret"]', '456'); + await fillIn('[data-test-input="credentials.tenant_id"]', '789'); + await click('[data-test-kms-provider-submit]'); + await click('[data-test-distribute-key]'); + await click('[data-test-component="search-select"] .ember-basic-dropdown-trigger'); + await fillIn('.ember-power-select-search-input', 'test-key'); + await click('.ember-power-select-option'); + await fillIn('[data-test-keymgmt-dist-keytype]', 'rsa-2048'); + await click('[data-test-operation="encrypt"]'); + await fillIn('[data-test-protection="hsm"]', 'hsm'); + + this.server.get(`/${path}/kms/test-keyvault/key`, () => ({ data: { keys: ['test-key'] } })); + await click('[data-test-secret-save]'); + await click('[data-test-kms-provider-tab="keys"] a'); + assert.dom('[data-test-secret-link="test-key"]').exists('Key is listed under keys tab of provider'); + }); +}); diff --git a/ui/tests/acceptance/enterprise-license-banner-test.js b/ui/tests/acceptance/enterprise-license-banner-test.js new file mode 100644 index 0000000..3f790b6 --- /dev/null +++ b/ui/tests/acceptance/enterprise-license-banner-test.js @@ -0,0 +1,109 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import sinon from 'sinon'; +import { visit } from '@ember/test-helpers'; +import { setupApplicationTest } from 'ember-qunit'; +import Pretender from 'pretender'; +import formatRFC3339 from 'date-fns/formatRFC3339'; +import { addDays, subDays } from 'date-fns'; +import timestamp from 'core/utils/timestamp'; + +const generateHealthResponse = (now, state) => { + let expiry; + switch (state) { + case 'expired': + expiry = subDays(now, 2); + break; + case 'expiring': + expiry = addDays(now, 10); + break; + default: + expiry = addDays(now, 33); + break; + } + return { + initialized: true, + sealed: false, + standby: false, + license: { + expiry_time: formatRFC3339(expiry), + state: 'stored', + }, + performance_standby: false, + replication_performance_mode: 'disabled', + replication_dr_mode: 'disabled', + server_time_utc: 1622562585, + version: '1.9.0+ent', + cluster_name: 'vault-cluster-e779cd7c', + cluster_id: '5f20f5ab-acea-0481-787e-71ec2ff5a60b', + last_wal: 121, + }; +}; + +module('Acceptance | Enterprise | License banner warnings', function (hooks) { + setupApplicationTest(hooks); + + hooks.before(function () { + sinon.stub(timestamp, 'now').callsFake(() => new Date('2018-04-03T14:15:30')); + }); + hooks.beforeEach(function () { + this.now = timestamp.now(); + }); + hooks.afterEach(function () { + this.server.shutdown(); + }); + hooks.after(function () { + timestamp.now.restore(); + }); + + test('it shows no license banner if license expires in > 30 days', async function (assert) { + const healthResp = generateHealthResponse(this.now); + this.server = new Pretender(function () { + this.get('/v1/sys/health', (response) => { + return [response, { 'Content-Type': 'application/json' }, JSON.stringify(healthResp)]; + }); + this.get('/v1/sys/internal/ui/feature-flags', this.passthrough); + this.get('/v1/sys/internal/ui/mounts', this.passthrough); + this.get('/v1/sys/seal-status', this.passthrough); + this.get('/v1/sys/license/features', this.passthrough); + }); + await visit('/vault/auth'); + assert.dom('[data-test-license-banner]').doesNotExist('license banner does not show'); + this.server.shutdown(); + }); + test('it shows license banner warning if license expires within 30 days', async function (assert) { + const healthResp = generateHealthResponse(this.now, 'expiring'); + this.server = new Pretender(function () { + this.get('/v1/sys/health', (response) => { + return [response, { 'Content-Type': 'application/json' }, JSON.stringify(healthResp)]; + }); + this.get('/v1/sys/internal/ui/feature-flags', this.passthrough); + this.get('/v1/sys/internal/ui/mounts', this.passthrough); + this.get('/v1/sys/seal-status', this.passthrough); + this.get('/v1/sys/license/features', this.passthrough); + }); + await visit('/vault/auth'); + assert.dom('[data-test-license-banner-warning]').exists('license warning shows'); + this.server.shutdown(); + }); + + test('it shows license banner alert if license has already expired', async function (assert) { + const healthResp = generateHealthResponse(this.now, 'expired'); + this.server = new Pretender(function () { + this.get('/v1/sys/health', (response) => { + return [response, { 'Content-Type': 'application/json' }, JSON.stringify(healthResp)]; + }); + this.get('/v1/sys/internal/ui/feature-flags', this.passthrough); + this.get('/v1/sys/internal/ui/mounts', this.passthrough); + this.get('/v1/sys/seal-status', this.passthrough); + this.get('/v1/sys/license/features', this.passthrough); + }); + await visit('/vault/auth'); + assert.dom('[data-test-license-banner-expired]').exists('expired license message shows'); + this.server.shutdown(); + }); +}); diff --git a/ui/tests/acceptance/enterprise-namespaces-test.js b/ui/tests/acceptance/enterprise-namespaces-test.js new file mode 100644 index 0000000..63429fd --- /dev/null +++ b/ui/tests/acceptance/enterprise-namespaces-test.js @@ -0,0 +1,91 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { click, settled, visit, fillIn, currentURL } from '@ember/test-helpers'; +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { create } from 'ember-cli-page-object'; +import consoleClass from 'vault/tests/pages/components/console/ui-panel'; +import authPage from 'vault/tests/pages/auth'; +import logout from 'vault/tests/pages/logout'; + +const shell = create(consoleClass); + +const createNS = async (name) => shell.runCommands(`write sys/namespaces/${name} -force`); + +module('Acceptance | Enterprise | namespaces', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(function () { + return authPage.login(); + }); + + test('it clears namespaces when you log out', async function (assert) { + const ns = 'foo'; + await createNS(ns); + await shell.runCommands(`write -field=client_token auth/token/create policies=default`); + const token = shell.lastLogOutput; + await logout.visit(); + await authPage.login(token); + await click('[data-test-namespace-toggle]'); + assert.dom('[data-test-current-namespace]').hasText('root', 'root renders as current namespace'); + assert.dom('[data-test-namespace-link]').doesNotExist('Additional namespace have been cleared'); + await logout.visit(); + }); + + test('it shows nested namespaces if you log in with a namspace starting with a /', async function (assert) { + assert.expect(5); + + await click('[data-test-namespace-toggle]'); + + const nses = ['beep', 'boop', 'bop']; + for (const [i, ns] of nses.entries()) { + await createNS(ns); + await settled(); + // the namespace path will include all of the namespaces up to this point + const targetNamespace = nses.slice(0, i + 1).join('/'); + const url = `/vault/secrets?namespace=${targetNamespace}`; + // this is usually triggered when creating a ns in the form -- trigger a reload of the namespaces manually + await click('[data-test-refresh-namespaces]'); + // check that the single namespace "beep" or "boop" not "beep/boop" shows in the toggle display + assert + .dom(`[data-test-namespace-link="${targetNamespace}"]`) + .hasText(ns, `shows the namespace ${ns} in the toggle component`); + // because quint does not like page reloads, visiting url directing instead of clicking on namespace in toggle + await visit(url); + } + + await logout.visit(); + await settled(); + await authPage.visit({ namespace: '/beep/boop' }); + await settled(); + await authPage.tokenInput('root').submit(); + await settled(); + await click('[data-test-namespace-toggle]'); + + assert.dom('[data-test-current-namespace]').hasText('/beep/boop/', 'current namespace begins with a /'); + assert + .dom('[data-test-namespace-link="beep/boop/bop"]') + .exists('renders the link to the nested namespace'); + }); + + test('it shows the regular namespace toolbar when not managed', async function (assert) { + // This test is the opposite of the test in managed-namespace-test + await logout.visit(); + assert.strictEqual(currentURL(), '/vault/auth?with=token', 'Does not redirect'); + assert.dom('[data-test-namespace-toolbar]').exists('Normal namespace toolbar exists'); + assert + .dom('[data-test-managed-namespace-toolbar]') + .doesNotExist('Managed namespace toolbar does not exist'); + assert.dom('input#namespace').hasAttribute('placeholder', '/ (Root)'); + await fillIn('input#namespace', '/foo'); + const encodedNamespace = encodeURIComponent('/foo'); + assert.strictEqual( + currentURL(), + `/vault/auth?namespace=${encodedNamespace}&with=token`, + 'Does not prepend root to namespace' + ); + }); +}); diff --git a/ui/tests/acceptance/enterprise-oidc-namespace-test.js b/ui/tests/acceptance/enterprise-oidc-namespace-test.js new file mode 100644 index 0000000..a28a387 --- /dev/null +++ b/ui/tests/acceptance/enterprise-oidc-namespace-test.js @@ -0,0 +1,91 @@ +import { visit, currentURL } from '@ember/test-helpers'; +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { create } from 'ember-cli-page-object'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import parseURL from 'core/utils/parse-url'; +import consoleClass from 'vault/tests/pages/components/console/ui-panel'; +import authPage from 'vault/tests/pages/auth'; + +const shell = create(consoleClass); + +const createNS = async (name) => { + await shell.runCommands(`write sys/namespaces/${name} -force`); +}; +const SELECTORS = { + authTab: (path) => `[data-test-auth-method="${path}"] a`, +}; + +module('Acceptance | Enterprise | oidc auth namespace test', function (hooks) { + setupApplicationTest(hooks); + setupMirage(hooks); + + hooks.beforeEach(async function () { + this.namespace = 'test-ns'; + this.rootOidc = 'root-oidc'; + this.nsOidc = 'ns-oidc'; + + this.server.post(`/auth/:path/config`, () => {}); + + this.enableOidc = (path, role = '') => { + return shell.runCommands([ + `write sys/auth/${path} type=oidc`, + `write auth/${path}/config default_role="${role}" oidc_discovery_url="https://example.com"`, + // show method as tab + `write sys/auth/${path}/tune listing_visibility="unauth"`, + ]); + }; + + this.disableOidc = (path) => shell.runCommands([`delete /sys/auth/${path}`]); + }); + + test('oidc: request is made to auth_url when a namespace is inputted', async function (assert) { + assert.expect(5); + + this.server.post(`/auth/${this.rootOidc}/oidc/auth_url`, (schema, req) => { + const { redirect_uri } = JSON.parse(req.requestBody); + const { pathname, search } = parseURL(redirect_uri); + assert.strictEqual( + pathname + search, + `/ui/vault/auth/${this.rootOidc}/oidc/callback`, + 'request made to auth_url when the login page is visited' + ); + }); + this.server.post(`/auth/${this.nsOidc}/oidc/auth_url`, (schema, req) => { + const { redirect_uri } = JSON.parse(req.requestBody); + const { pathname, search } = parseURL(redirect_uri); + assert.strictEqual( + pathname + search, + `/ui/vault/auth/${this.nsOidc}/oidc/callback?namespace=${this.namespace}`, + 'request made to correct auth_url when namespace is filled in' + ); + }); + + await authPage.login(); + // enable oidc in root namespace, without default role + await this.enableOidc(this.rootOidc); + // create child namespace to enable oidc + await createNS(this.namespace); + // enable oidc in child namespace with default role + await authPage.loginNs(this.namespace); + await this.enableOidc(this.nsOidc, `${this.nsOidc}-role`); + await authPage.logout(); + + await visit('/vault/auth'); + assert.dom(SELECTORS.authTab(this.rootOidc)).exists('renders oidc method tab for root'); + await authPage.namespaceInput(this.namespace); + assert.strictEqual( + currentURL(), + `/vault/auth?namespace=${this.namespace}&with=${this.nsOidc}%2F`, + 'url updates with namespace value' + ); + assert.dom(SELECTORS.authTab(this.nsOidc)).exists('renders oidc method tab for child namespace'); + + // disable methods to cleanup test state for re-running + await authPage.login(); + await this.disableOidc(this.rootOidc); + await this.disableOidc(this.nsOidc); + await shell.runCommands([`delete /sys/auth/${this.namespace}`]); + await authPage.logout(); + }); +}); diff --git a/ui/tests/acceptance/enterprise-replication-test.js b/ui/tests/acceptance/enterprise-replication-test.js new file mode 100644 index 0000000..f72875f --- /dev/null +++ b/ui/tests/acceptance/enterprise-replication-test.js @@ -0,0 +1,387 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { clickTrigger } from 'ember-power-select/test-support/helpers'; +import { click, fillIn, findAll, currentURL, find, visit, settled, waitUntil } from '@ember/test-helpers'; +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import authPage from 'vault/tests/pages/auth'; +import { pollCluster } from 'vault/tests/helpers/poll-cluster'; +import { create } from 'ember-cli-page-object'; +import flashMessage from 'vault/tests/pages/components/flash-message'; +import ss from 'vault/tests/pages/components/search-select'; + +const searchSelect = create(ss); +const flash = create(flashMessage); + +const disableReplication = async (type, assert) => { + // disable performance replication + await visit(`/vault/replication/${type}`); + + if (findAll('[data-test-replication-link="manage"]').length) { + await click('[data-test-replication-link="manage"]'); + + await click('[data-test-disable-replication] button'); + + const typeDisplay = type === 'dr' ? 'Disaster Recovery' : 'Performance'; + await fillIn('[data-test-confirmation-modal-input="Disable Replication?"]', typeDisplay); + await click('[data-test-confirm-button]'); + await settled(); // eslint-disable-line + + if (assert) { + // bypassing for now -- remove if tests pass reliably + // assert.strictEqual( + // flash.latestMessage, + // 'This cluster is having replication disabled. Vault will be unavailable for a brief period and will resume service shortly.', + // 'renders info flash when disabled' + // ); + assert.ok( + await waitUntil(() => currentURL() === '/vault/replication'), + 'redirects to the replication page' + ); + } + await settled(); + } +}; + +module('Acceptance | Enterprise | replication', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(async function () { + await authPage.login(); + await settled(); + await disableReplication('dr'); + await settled(); + await disableReplication('performance'); + await settled(); + }); + + hooks.afterEach(async function () { + await disableReplication('dr'); + await settled(); + await disableReplication('performance'); + await settled(); + }); + + test('replication', async function (assert) { + assert.expect(17); + const secondaryName = 'firstSecondary'; + const mode = 'deny'; + + // confirm unable to visit dr secondary details page when both replications are disabled + await visit('/vault/replication-dr-promote/details'); + + assert.dom('[data-test-component="empty-state"]').exists(); + assert + .dom('[data-test-empty-state-title]') + .includesText('Disaster Recovery secondary not set up', 'shows the correct title of the empty state'); + + assert + .dom('[data-test-empty-state-message]') + .hasText( + 'This cluster has not been enabled as a Disaster Recovery Secondary. You can do so by enabling replication and adding a secondary from the Disaster Recovery Primary.', + 'renders default message specific to when no replication is enabled' + ); + + await visit('/vault/replication'); + + assert.strictEqual(currentURL(), '/vault/replication'); + + // enable perf replication + await click('[data-test-replication-type-select="performance"]'); + + await fillIn('[data-test-replication-cluster-mode-select]', 'primary'); + + await click('[data-test-replication-enable]'); + + await pollCluster(this.owner); + + // confirm that the details dashboard shows + assert.ok(await waitUntil(() => find('[data-test-replication-dashboard]')), 'details dashboard is shown'); + + // add a secondary with a mount filter config + await click('[data-test-replication-link="secondaries"]'); + + await click('[data-test-secondary-add]'); + + await fillIn('[data-test-replication-secondary-id]', secondaryName); + + await click('#deny'); + await clickTrigger(); + const mountPath = searchSelect.options.objectAt(0).text; + await searchSelect.options.objectAt(0).click(); + await click('[data-test-secondary-add]'); + + await pollCluster(this.owner); + // click into the added secondary's mount filter config + await click('[data-test-replication-link="secondaries"]'); + + await click('[data-test-popup-menu-trigger]'); + + await click('[data-test-replication-path-filter-link]'); + + assert.strictEqual( + currentURL(), + `/vault/replication/performance/secondaries/config/show/${secondaryName}` + ); + assert.dom('[data-test-mount-config-mode]').includesText(mode, 'show page renders the correct mode'); + assert + .dom('[data-test-mount-config-paths]') + .includesText(mountPath, 'show page renders the correct mount path'); + + // delete config by choosing "no filter" in the edit screen + await click('[data-test-replication-link="edit-mount-config"]'); + + await click('#no-filtering'); + + await click('[data-test-config-save]'); + await settled(); // eslint-disable-line + + assert.strictEqual( + flash.latestMessage, + `The performance mount filter config for the secondary ${secondaryName} was successfully deleted.`, + 'renders success flash upon deletion' + ); + assert.strictEqual( + currentURL(), + `/vault/replication/performance/secondaries`, + 'redirects to the secondaries page' + ); + // nav back to details page and confirm secondary is in the known secondaries table + await click('[data-test-replication-link="details"]'); + + assert + .dom(`[data-test-secondaries=row-for-${secondaryName}]`) + .exists('shows a table row the recently added secondary'); + + // nav to DR + await visit('/vault/replication/dr'); + + await fillIn('[data-test-replication-cluster-mode-select]', 'secondary'); + assert + .dom('[data-test-replication-enable]') + .isDisabled('dr secondary enable is disabled when other replication modes are on'); + + // disable performance replication + await disableReplication('performance', assert); + await settled(); + await pollCluster(this.owner); + + // enable dr replication + await visit('vault/replication/dr'); + + await fillIn('[data-test-replication-cluster-mode-select]', 'primary'); + await click('button[type="submit"]'); + + await pollCluster(this.owner); + await waitUntil(() => find('[data-test-empty-state-title]')); + // empty state inside of know secondaries table + assert + .dom('[data-test-empty-state-title]') + .includesText( + 'No known dr secondary clusters associated with this cluster', + 'shows the correct title of the empty state' + ); + + assert.ok( + find('[data-test-replication-title]').textContent.includes('Disaster Recovery'), + 'it displays the replication type correctly' + ); + assert.ok( + find('[data-test-replication-mode-display]').textContent.includes('primary'), + 'it displays the cluster mode correctly' + ); + + // add dr secondary + await click('[data-test-replication-link="secondaries"]'); + + await click('[data-test-secondary-add]'); + + await fillIn('[data-test-replication-secondary-id]', secondaryName); + + await click('[data-test-secondary-add]'); + + await pollCluster(this.owner); + await click('[data-test-replication-link="secondaries"]'); + + assert + .dom('[data-test-secondary-name]') + .includesText(secondaryName, 'it displays the secondary in the list of known secondaries'); + }); + + test('disabling dr primary when perf replication is enabled', async function (assert) { + await visit('vault/replication/performance'); + + // enable perf replication + await fillIn('[data-test-replication-cluster-mode-select]', 'primary'); + await click('[data-test-replication-enable]'); + + await pollCluster(this.owner); + + // enable dr replication + await visit('/vault/replication/dr'); + + await fillIn('[data-test-replication-cluster-mode-select]', 'primary'); + + await click('[data-test-replication-enable]'); + + await pollCluster(this.owner); + await visit('/vault/replication/dr/manage'); + + await click('[data-test-demote-replication] [data-test-replication-action-trigger]'); + + assert.ok(findAll('[data-test-demote-warning]').length, 'displays the demotion warning'); + }); + + test('navigating to dr secondary details page when dr secondary is not enabled', async function (assert) { + // enable dr replication + + await visit('/vault/replication/dr'); + + await fillIn('[data-test-replication-cluster-mode-select]', 'primary'); + await click('[data-test-replication-enable]'); + await settled(); // eslint-disable-line + await pollCluster(this.owner); + await visit('/vault/replication-dr-promote/details'); + + assert.dom('[data-test-component="empty-state"]').exists(); + assert + .dom('[data-test-empty-state-message]') + .hasText( + 'This Disaster Recovery secondary has not been enabled. You can do so from the Disaster Recovery Primary.', + 'renders message when replication is enabled' + ); + }); + + test('add secondary and navigate through token generation modal', async function (assert) { + const secondaryNameFirst = 'firstSecondary'; + const secondaryNameSecond = 'secondSecondary'; + await visit('/vault/replication'); + + // enable perf replication + await click('[data-test-replication-type-select="performance"]'); + + await fillIn('[data-test-replication-cluster-mode-select]', 'primary'); + await click('[data-test-replication-enable]'); + + await pollCluster(this.owner); + await settled(); + + // add a secondary with default TTL + await click('[data-test-replication-link="secondaries"]'); + + await click('[data-test-secondary-add]'); + + await fillIn('[data-test-replication-secondary-id]', secondaryNameFirst); + await click('[data-test-secondary-add]'); + + await pollCluster(this.owner); + await settled(); + const modalDefaultTtl = document.querySelector('[data-test-row-value="TTL"]').innerText; + // checks on secondary token modal + assert.dom('#modal-wormhole').exists(); + assert.strictEqual(modalDefaultTtl, '1800s', 'shows the correct TTL of 1800s'); + // click off the modal to make sure you don't just have to click on the copy-close button to copy the token + await click('[data-test-modal-background="Copy your token"]'); + + // add another secondary not using the default ttl + await click('[data-test-secondary-add]'); + + await fillIn('[data-test-replication-secondary-id]', secondaryNameSecond); + await click('[data-test-toggle-input]'); + + await fillIn('[data-test-ttl-value]', 3); + await click('[data-test-secondary-add]'); + + await pollCluster(this.owner); + await settled(); + const modalTtl = document.querySelector('[data-test-row-value="TTL"]').innerText; + assert.strictEqual(modalTtl, '180s', 'shows the correct TTL of 180s'); + await click('[data-test-modal-background="Copy your token"]'); + + // confirm you were redirected to the secondaries page + assert.strictEqual( + currentURL(), + `/vault/replication/performance/secondaries`, + 'redirects to the secondaries page' + ); + assert + .dom('[data-test-secondary-name]') + .includesText(secondaryNameFirst, 'it displays the secondary in the list of secondaries'); + }); + + test('render performance and dr primary and navigate to details page', async function (assert) { + // enable perf primary replication + await visit('/vault/replication'); + await click('[data-test-replication-type-select="performance"]'); + + await fillIn('[data-test-replication-cluster-mode-select]', 'primary'); + await click('[data-test-replication-enable]'); + + await pollCluster(this.owner); + await settled(); + + await visit('/vault/replication'); + + assert + .dom(`[data-test-replication-summary-card]`) + .doesNotExist(`does not render replication summary card when both modes are not enabled as primary`); + + // enable DR primary replication + await click('[data-test-replication-promote-secondary]'); + await click('[data-test-replication-enable]'); + + await pollCluster(this.owner); + await settled(); + + // navigate using breadcrumbs back to replication.index + await click('[data-test-replication-breadcrumb]'); + + assert + .dom('[data-test-replication-summary-card]') + .exists({ count: 2 }, 'renders two replication-summary-card components'); + + // navigate to details page using the "Details" link + await click('[data-test-manage-link="Disaster Recovery"]'); + + assert + .dom('[data-test-selectable-card-container="primary"]') + .exists('shows the correct card on the details dashboard'); + assert.strictEqual(currentURL(), '/vault/replication/dr'); + }); + + test('render performance secondary and navigate to the details page', async function (assert) { + // enable perf replication + await visit('/vault/replication'); + + await click('[data-test-replication-type-select="performance"]'); + + await fillIn('[data-test-replication-cluster-mode-select]', 'primary'); + await click('[data-test-replication-enable]'); + + await pollCluster(this.owner); + await settled(); + + // demote perf primary to a secondary + await click('[data-test-replication-link="manage"]'); + + // open demote modal + await click('[data-test-demote-replication] [data-test-replication-action-trigger]'); + + // enter confirmation text + await fillIn('[data-test-confirmation-modal-input="Demote to secondary?"]', 'Performance'); + // Click confirm button + await click('[data-test-confirm-button="Demote to secondary?"]'); + + await click('[data-test-replication-link="details"]'); + + assert.dom('[data-test-replication-dashboard]').exists(); + assert.dom('[data-test-selectable-card-container="secondary"]').exists(); + assert.ok( + find('[data-test-replication-mode-display]').textContent.includes('secondary'), + 'it displays the cluster mode correctly' + ); + }); +}); diff --git a/ui/tests/acceptance/enterprise-replication-unsupported-test.js b/ui/tests/acceptance/enterprise-replication-unsupported-test.js new file mode 100644 index 0000000..cfcb2a7 --- /dev/null +++ b/ui/tests/acceptance/enterprise-replication-unsupported-test.js @@ -0,0 +1,33 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import authPage from 'vault/tests/pages/auth'; +import { visit } from '@ember/test-helpers'; + +module('Acceptance | Enterprise | replication unsupported', function (hooks) { + setupApplicationTest(hooks); + setupMirage(hooks); + + hooks.beforeEach(async function () { + this.server.get('/sys/replication/status', function () { + return { + data: { + mode: 'unsupported', + }, + }; + }); + return authPage.login(); + }); + + test('replication page when unsupported', async function (assert) { + await visit('/vault/replication'); + assert + .dom('[data-test-replication-title]') + .hasText('Replication unsupported', 'it shows the unsupported view'); + }); +}); diff --git a/ui/tests/acceptance/enterprise-sidebar-nav-test.js b/ui/tests/acceptance/enterprise-sidebar-nav-test.js new file mode 100644 index 0000000..270efe6 --- /dev/null +++ b/ui/tests/acceptance/enterprise-sidebar-nav-test.js @@ -0,0 +1,67 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { click, currentURL, fillIn } from '@ember/test-helpers'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import authPage from 'vault/tests/pages/auth'; + +const link = (label) => `[data-test-sidebar-nav-link="${label}"]`; +const panel = (label) => `[data-test-sidebar-nav-panel="${label}"]`; + +module('Acceptance | Enterprise | sidebar navigation', function (hooks) { + setupApplicationTest(hooks); + setupMirage(hooks); + + hooks.beforeEach(function () { + return authPage.login(); + }); + + // common links are tested in the sidebar-nav test and will not be covered here + test('it should render enterprise only navigation links', async function (assert) { + assert.dom(panel('Cluster')).exists('Cluster nav panel renders'); + + await click(link('Replication')); + assert.strictEqual(currentURL(), '/vault/replication', 'Replication route renders'); + await click('[data-test-replication-enable]'); + + await click(link('Performance')); + assert.strictEqual( + currentURL(), + '/vault/replication/performance', + 'Replication performance route renders' + ); + + await click(link('Disaster Recovery')); + assert.strictEqual(currentURL(), '/vault/replication/dr', 'Replication dr route renders'); + // disable replication now that we have checked the links + await click('[data-test-replication-link="manage"]'); + await click('[data-test-replication-action-trigger]'); + await fillIn('[data-test-confirmation-modal-input="Disable Replication?"]', 'Disaster Recovery'); + await click('[data-test-confirm-button="Disable Replication?"]'); + + await click(link('Client count')); + assert.strictEqual(currentURL(), '/vault/clients/dashboard', 'Client counts route renders'); + + await click(link('License')); + assert.strictEqual(currentURL(), '/vault/license', 'License route renders'); + + await click(link('Access')); + await click(link('Control Groups')); + assert.strictEqual(currentURL(), '/vault/access/control-groups', 'Control groups route renders'); + + await click(link('Namespaces')); + assert.strictEqual(currentURL(), '/vault/access/namespaces?page=1', 'Replication route renders'); + + await click(link('Back to main navigation')); + await click(link('Policies')); + await click(link('Role-Governing Policies')); + assert.strictEqual(currentURL(), '/vault/policies/rgp', 'Role-Governing Policies route renders'); + + await click(link('Endpoint Governing Policies')); + assert.strictEqual(currentURL(), '/vault/policies/egp', 'Endpoint Governing Policies route renders'); + }); +}); diff --git a/ui/tests/acceptance/enterprise-transform-test.js b/ui/tests/acceptance/enterprise-transform-test.js new file mode 100644 index 0000000..6122473 --- /dev/null +++ b/ui/tests/acceptance/enterprise-transform-test.js @@ -0,0 +1,300 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { currentURL, click, settled } from '@ember/test-helpers'; +import { create } from 'ember-cli-page-object'; +import { typeInSearch, selectChoose, clickTrigger } from 'ember-power-select/test-support/helpers'; + +import authPage from 'vault/tests/pages/auth'; +import mountSecrets from 'vault/tests/pages/settings/mount-secret-backend'; +import transformationsPage from 'vault/tests/pages/secrets/backend/transform/transformations'; +import rolesPage from 'vault/tests/pages/secrets/backend/transform/roles'; +import templatesPage from 'vault/tests/pages/secrets/backend/transform/templates'; +import alphabetsPage from 'vault/tests/pages/secrets/backend/transform/alphabets'; +import searchSelect from 'vault/tests/pages/components/search-select'; + +const searchSelectComponent = create(searchSelect); + +const mount = async () => { + const path = `transform-${Date.now()}`; + await mountSecrets.enable('transform', path); + await settled(); + return path; +}; + +const newTransformation = async (backend, name, submit = false) => { + const transformationName = name || 'foo'; + await transformationsPage.visitCreate({ backend }); + await settled(); + await transformationsPage.name(transformationName); + await settled(); + await clickTrigger('#template'); + await selectChoose('#template', '.ember-power-select-option', 0); + await settled(); + // Don't automatically choose role because we might be testing that + if (submit) { + await transformationsPage.submit(); + await settled(); + } + return transformationName; +}; + +const newRole = async (backend, name) => { + const roleName = name || 'bar'; + await rolesPage.visitCreate({ backend }); + await settled(); + await rolesPage.name(roleName); + await settled(); + await clickTrigger('#transformations'); + await settled(); + await selectChoose('#transformations', '.ember-power-select-option', 0); + await settled(); + await rolesPage.submit(); + await settled(); + return roleName; +}; + +module('Acceptance | Enterprise | Transform secrets', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(function () { + return authPage.login(); + }); + + test('it enables Transform secrets engine and shows tabs', async function (assert) { + const backend = `transform-${Date.now()}`; + await mountSecrets.enable('transform', backend); + await settled(); + assert.strictEqual( + currentURL(), + `/vault/secrets/${backend}/list`, + 'mounts and redirects to the transformations list page' + ); + assert.ok(transformationsPage.isEmpty, 'renders empty state'); + assert + .dom('.active[data-test-secret-list-tab="Transformations"]') + .exists('Has Transformations tab which is active'); + assert.dom('[data-test-secret-list-tab="Roles"]').exists('Has Roles tab'); + assert.dom('[data-test-secret-list-tab="Templates"]').exists('Has Templates tab'); + assert.dom('[data-test-secret-list-tab="Alphabets"]').exists('Has Alphabets tab'); + }); + + test('it can create a transformation and add itself to the role attached', async function (assert) { + const backend = await mount(); + const transformationName = 'foo'; + const roleName = 'foo-role'; + await settled(); + await transformationsPage.createLink({ backend }); + await settled(); + assert.strictEqual( + currentURL(), + `/vault/secrets/${backend}/create`, + 'redirects to create transformation page' + ); + await transformationsPage.name(transformationName); + await settled(); + assert.dom('[data-test-input="type"]').hasValue('fpe', 'Has type FPE by default'); + assert.dom('[data-test-input="tweak_source"]').exists('Shows tweak source when FPE'); + await transformationsPage.type('masking'); + await settled(); + assert + .dom('[data-test-input="masking_character"]') + .exists('Shows masking character input when changed to masking type'); + assert.dom('[data-test-input="tweak_source"]').doesNotExist('Does not show tweak source when masking'); + await clickTrigger('#template'); + await settled(); + assert.strictEqual(searchSelectComponent.options.length, 2, 'list shows two builtin options by default'); + await selectChoose('#template', '.ember-power-select-option', 0); + await settled(); + + await clickTrigger('#allowed_roles'); + await settled(); + await typeInSearch(roleName); + await settled(); + await selectChoose('#allowed_roles', '.ember-power-select-option', 0); + await settled(); + await transformationsPage.submit(); + await settled(); + assert.strictEqual( + currentURL(), + `/vault/secrets/${backend}/show/${transformationName}`, + 'redirects to show transformation page after submit' + ); + await click(`[data-test-secret-breadcrumb="${backend}"]`); + assert.strictEqual( + currentURL(), + `/vault/secrets/${backend}/list`, + 'Links back to list view from breadcrumb' + ); + }); + + test('it can create a role and add itself to the transformation attached', async function (assert) { + const roleName = 'my-role'; + const backend = await mount(); + // create transformation without role + await newTransformation(backend, 'a-transformation', true); + await click(`[data-test-secret-breadcrumb="${backend}"]`); + assert.strictEqual( + currentURL(), + `/vault/secrets/${backend}/list`, + 'Links back to list view from breadcrumb' + ); + await click('[data-test-secret-list-tab="Roles"]'); + assert.strictEqual(currentURL(), `/vault/secrets/${backend}/list?tab=role`, 'links to role list page'); + // create role with transformation attached + await rolesPage.createLink(); + assert.strictEqual( + currentURL(), + `/vault/secrets/${backend}/create?itemType=role`, + 'redirects to create role page' + ); + await rolesPage.name(roleName); + await clickTrigger('#transformations'); + assert.strictEqual(searchSelectComponent.options.length, 1, 'lists the transformation'); + await selectChoose('#transformations', '.ember-power-select-option', 0); + await rolesPage.submit(); + await settled(); + assert.strictEqual( + currentURL(), + `/vault/secrets/${backend}/show/role/${roleName}`, + 'redirects to show role page after submit' + ); + await click(`[data-test-secret-breadcrumb="${backend}"]`); + assert.strictEqual( + currentURL(), + `/vault/secrets/${backend}/list?tab=role`, + 'Links back to role list view from breadcrumb' + ); + }); + + test('it adds a role to a transformation when added to a role', async function (assert) { + const roleName = 'role-test'; + const backend = await mount(); + const transformation = await newTransformation(backend, 'b-transformation', true); + await newRole(backend, roleName); + await transformationsPage.visitShow({ backend, id: transformation }); + await settled(); + assert.dom('[data-test-row-value="Allowed roles"]').hasText(roleName); + }); + + test('it shows a message if an update fails after save', async function (assert) { + const roleName = 'role-remove'; + const backend = await mount(); + // Create transformation + const transformation = await newTransformation(backend, 'c-transformation', true); + // create role + await newRole(backend, roleName); + await settled(); + await transformationsPage.visitShow({ backend, id: transformation }); + assert.dom('[data-test-row-value="Allowed roles"]').hasText(roleName); + // Edit transformation + await click('[data-test-edit-link]'); + assert.dom('.modal.is-active').exists('Confirmation modal appears'); + await rolesPage.modalConfirm(); + await settled(); + assert.strictEqual( + currentURL(), + `/vault/secrets/${backend}/edit/${transformation}`, + 'Correctly links to edit page for secret' + ); + // remove role + await settled(); + await click('#allowed_roles [data-test-selected-list-button="delete"]'); + + await transformationsPage.save(); + await settled(); + assert.dom('.flash-message.is-info').exists('Shows info message since role could not be updated'); + assert.strictEqual( + currentURL(), + `/vault/secrets/${backend}/show/${transformation}`, + 'Correctly links to show page for secret' + ); + assert + .dom('[data-test-row-value="Allowed roles"]') + .doesNotExist('Allowed roles are no longer on the transformation'); + }); + + test('it allows creation and edit of a template', async function (assert) { + const templateName = 'my-template'; + const backend = await mount(); + await click('[data-test-secret-list-tab="Templates"]'); + + assert.strictEqual( + currentURL(), + `/vault/secrets/${backend}/list?tab=template`, + 'links to template list page' + ); + await settled(); + await templatesPage.createLink(); + assert.strictEqual( + currentURL(), + `/vault/secrets/${backend}/create?itemType=template`, + 'redirects to create template page' + ); + await templatesPage.name(templateName); + await templatesPage.pattern(`(\\d{4})`); + await clickTrigger('#alphabet'); + await settled(); + assert.ok(searchSelectComponent.options.length > 0, 'lists built-in alphabets'); + await selectChoose('#alphabet', '.ember-power-select-option', 0); + assert.dom('#alphabet .ember-power-select-trigger').doesNotExist('Alphabet input no longer searchable'); + await templatesPage.submit(); + await settled(); + assert.strictEqual( + currentURL(), + `/vault/secrets/${backend}/show/template/${templateName}`, + 'redirects to show template page after submit' + ); + await templatesPage.editLink(); + await settled(); + assert.strictEqual( + currentURL(), + `/vault/secrets/${backend}/edit/template/${templateName}`, + 'Links to template edit page' + ); + await settled(); + assert.dom('[data-test-input="name"]').hasAttribute('readonly'); + }); + + test('it allows creation and edit of an alphabet', async function (assert) { + const alphabetName = 'vowels-only'; + const backend = await mount(); + await click('[data-test-secret-list-tab="Alphabets"]'); + + assert.strictEqual( + currentURL(), + `/vault/secrets/${backend}/list?tab=alphabet`, + 'links to alphabet list page' + ); + await alphabetsPage.createLink(); + await settled(); + assert.strictEqual( + currentURL(), + `/vault/secrets/${backend}/create?itemType=alphabet`, + 'redirects to create alphabet page' + ); + await alphabetsPage.name(alphabetName); + await alphabetsPage.alphabet('aeiou'); + await alphabetsPage.submit(); + await settled(); + assert.strictEqual( + currentURL(), + `/vault/secrets/${backend}/show/alphabet/${alphabetName}`, + 'redirects to show alphabet page after submit' + ); + assert.dom('[data-test-row-value="Name"]').hasText(alphabetName); + assert.dom('[data-test-row-value="Alphabet"]').hasText('aeiou'); + await alphabetsPage.editLink(); + await settled(); + assert.strictEqual( + currentURL(), + `/vault/secrets/${backend}/edit/alphabet/${alphabetName}`, + 'Links to alphabet edit page' + ); + assert.dom('[data-test-input="name"]').hasAttribute('readonly'); + }); +}); diff --git a/ui/tests/acceptance/init-test.js b/ui/tests/acceptance/init-test.js new file mode 100644 index 0000000..a10169b --- /dev/null +++ b/ui/tests/acceptance/init-test.js @@ -0,0 +1,137 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; + +import initPage from 'vault/tests/pages/init'; +import Pretender from 'pretender'; + +const HEALTH_RESPONSE = { + initialized: false, + sealed: true, + standby: true, + performance_standby: false, + replication_performance_mode: 'unknown', + replication_dr_mode: 'unknown', + server_time_utc: 1538066726, + version: '1.13.0-dev1', +}; + +const CLOUD_SEAL_RESPONSE = { + keys: [], + keys_base64: [], + recovery_keys: [ + '1659986a8d56b998b175b6e259998f3c064c061d256c2a331681b8d122fedf0db4', + '4d34c58f56e4f077e3b74f9e8db2850fc251ac3f16e952441301eedc462addeb84', + '3b3cbdf4b2f5ac1e809ff1bb72fd9778e460856561728a871a9370345bd52e97f4', + 'aa99b46e2ed5d837ee9824b7894b24987be2f32c81ab9ff5ce9e07d2012eaf4158', + 'c2bf6d71d8db8ae09b26177ed393ecb274740fe9ab51884eaa00ac113a74c08ba7', + ], + recovery_keys_base64: [ + 'FlmYao1WuZixdbbiWZmPPAZMBh0lbCozFoG40SL+3w20', + 'TTTFj1bk8Hfjt0+ejbKFD8JRrD8W6VJEEwHu3EYq3euE', + 'Ozy99LL1rB6An/G7cv2XeORghWVhcoqHGpNwNFvVLpf0', + 'qpm0bi7V2DfumCS3iUskmHvi8yyBq5/1zp4H0gEur0FY', + 'wr9tcdjbiuCbJhd+05PssnR0D+mrUYhOqgCsETp0wIun', + ], + root_token: '48dF3Drr1jl4ayM0jcHrN4NC', +}; +const SEAL_RESPONSE = { + keys: [ + '1659986a8d56b998b175b6e259998f3c064c061d256c2a331681b8d122fedf0db4', + '4d34c58f56e4f077e3b74f9e8db2850fc251ac3f16e952441301eedc462addeb84', + '3b3cbdf4b2f5ac1e809ff1bb72fd9778e460856561728a871a9370345bd52e97f4', + ], + keys_base64: [ + 'FlmYao1WuZixdbbiWZmPPAZMBh0lbCozFoG40SL+3w20', + 'TTTFj1bk8Hfjt0+ejbKFD8JRrD8W6VJEEwHu3EYq3euE', + 'Ozy99LL1rB6An/G7cv2XeORghWVhcoqHGpNwNFvVLpf0', + ], + root_token: '48dF3Drr1jl4ayM0jcHrN4NC', +}; + +const CLOUD_SEAL_STATUS_RESPONSE = { + type: 'awskms', + sealed: true, + initialized: false, +}; +const SEAL_STATUS_RESPONSE = { + type: 'shamir', + sealed: true, + initialized: false, +}; + +const assertRequest = (req, assert, isCloud) => { + const json = JSON.parse(req.requestBody); + for (const key of ['recovery_shares', 'recovery_threshold']) { + assert[isCloud ? 'ok' : 'notOk']( + json[key], + `requestBody ${isCloud ? 'includes' : 'does not include'} cloud seal specific attribute: ${key}` + ); + } + for (const key of ['secret_shares', 'secret_threshold']) { + assert[isCloud ? 'notOk' : 'ok']( + json[key], + `requestBody ${isCloud ? 'does not include' : 'includes'} shamir specific attribute: ${key}` + ); + } +}; + +module('Acceptance | init', function (hooks) { + setupApplicationTest(hooks); + + const setInitResponse = (server, resp) => { + server.put('/v1/sys/init', () => { + return [200, { 'Content-Type': 'application/json' }, JSON.stringify(resp)]; + }); + }; + const setStatusResponse = (server, resp) => { + server.get('/v1/sys/seal-status', () => { + return [200, { 'Content-Type': 'application/json' }, JSON.stringify(resp)]; + }); + }; + hooks.beforeEach(function () { + this.server = new Pretender(); + this.server.get('/v1/sys/health', () => { + return [200, { 'Content-Type': 'application/json' }, JSON.stringify(HEALTH_RESPONSE)]; + }); + this.server.get('/v1/sys/internal/ui/feature-flags', this.server.passthrough); + }); + + hooks.afterEach(function () { + this.server.shutdown(); + }); + + test('cloud seal init', async function (assert) { + assert.expect(6); + + setInitResponse(this.server, CLOUD_SEAL_RESPONSE); + setStatusResponse(this.server, CLOUD_SEAL_STATUS_RESPONSE); + + await initPage.init(5, 3); + + assert.strictEqual( + initPage.keys.length, + CLOUD_SEAL_RESPONSE.recovery_keys.length, + 'shows all of the recovery keys' + ); + assert.strictEqual(initPage.buttonText, 'Continue to Authenticate', 'links to authenticate'); + assertRequest(this.server.handledRequests.findBy('url', '/v1/sys/init'), assert, true); + }); + + test('shamir seal init', async function (assert) { + assert.expect(6); + + setInitResponse(this.server, SEAL_RESPONSE); + setStatusResponse(this.server, SEAL_STATUS_RESPONSE); + + await initPage.init(3, 2); + + assert.strictEqual(initPage.keys.length, SEAL_RESPONSE.keys.length, 'shows all of the recovery keys'); + assert.strictEqual(initPage.buttonText, 'Continue to Unseal', 'links to unseal'); + assertRequest(this.server.handledRequests.findBy('url', '/v1/sys/init'), assert, false); + }); +}); diff --git a/ui/tests/acceptance/jwt-auth-method-test.js b/ui/tests/acceptance/jwt-auth-method-test.js new file mode 100644 index 0000000..166f0f9 --- /dev/null +++ b/ui/tests/acceptance/jwt-auth-method-test.js @@ -0,0 +1,98 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { click, visit, fillIn } from '@ember/test-helpers'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import sinon from 'sinon'; +import { Response } from 'miragejs'; +import { ERROR_JWT_LOGIN } from 'vault/components/auth-jwt'; + +module('Acceptance | jwt auth method', function (hooks) { + setupApplicationTest(hooks); + setupMirage(hooks); + + hooks.beforeEach(function () { + localStorage.clear(); // ensure that a token isn't stored otherwise visit('/vault/auth') will redirect to secrets + this.stub = sinon.stub(); + this.server.post( + '/auth/:path/oidc/auth_url', + () => + new Response( + 400, + { 'Content-Type': 'application/json' }, + JSON.stringify({ errors: [ERROR_JWT_LOGIN] }) + ) + ); + this.server.get('/auth/foo/oidc/callback', () => ({ + auth: { client_token: 'root' }, + })); + }); + + test('it works correctly with default name and no role', async function (assert) { + assert.expect(6); + this.server.post('/auth/jwt/login', (schema, req) => { + const { jwt, role } = JSON.parse(req.requestBody); + assert.ok(true, 'request made to auth/jwt/login after submit'); + assert.strictEqual(jwt, 'my-test-jwt-token', 'JWT token is sent in body'); + assert.strictEqual(role, undefined, 'role is not sent in body when not filled in'); + req.passthrough(); + }); + await visit('/vault/auth'); + await fillIn('[data-test-select="auth-method"]', 'jwt'); + assert.dom('[data-test-role]').exists({ count: 1 }, 'Role input exists'); + assert.dom('[data-test-jwt]').exists({ count: 1 }, 'JWT input exists'); + await fillIn('[data-test-jwt]', 'my-test-jwt-token'); + await click('[data-test-auth-submit]'); + assert.dom('[data-test-error]').exists('Failed login'); + }); + + test('it works correctly with default name and a role', async function (assert) { + assert.expect(7); + this.server.post('/auth/jwt/login', (schema, req) => { + const { jwt, role } = JSON.parse(req.requestBody); + assert.ok(true, 'request made to auth/jwt/login after login'); + assert.strictEqual(jwt, 'my-test-jwt-token', 'JWT token is sent in body'); + assert.strictEqual(role, 'some-role', 'role is sent in the body when filled in'); + req.passthrough(); + }); + await visit('/vault/auth'); + await fillIn('[data-test-select="auth-method"]', 'jwt'); + assert.dom('[data-test-role]').exists({ count: 1 }, 'Role input exists'); + assert.dom('[data-test-jwt]').exists({ count: 1 }, 'JWT input exists'); + await fillIn('[data-test-role]', 'some-role'); + await fillIn('[data-test-jwt]', 'my-test-jwt-token'); + assert.dom('[data-test-jwt]').exists({ count: 1 }, 'JWT input exists'); + await click('[data-test-auth-submit]'); + assert.dom('[data-test-error]').exists('Failed login'); + }); + + test('it works correctly with custom endpoint and a role', async function (assert) { + assert.expect(6); + this.server.get('/sys/internal/ui/mounts', () => ({ + data: { + auth: { + 'test-jwt/': { description: '', options: {}, type: 'jwt' }, + }, + }, + })); + this.server.post('/auth/test-jwt/login', (schema, req) => { + const { jwt, role } = JSON.parse(req.requestBody); + assert.ok(true, 'request made to auth/custom-jwt-login after login'); + assert.strictEqual(jwt, 'my-test-jwt-token', 'JWT token is sent in body'); + assert.strictEqual(role, 'some-role', 'role is sent in body when filled in'); + req.passthrough(); + }); + await visit('/vault/auth'); + await click('[data-test-auth-method-link="jwt"]'); + assert.dom('[data-test-role]').exists({ count: 1 }, 'Role input exists'); + assert.dom('[data-test-jwt]').exists({ count: 1 }, 'JWT input exists'); + await fillIn('[data-test-role]', 'some-role'); + await fillIn('[data-test-jwt]', 'my-test-jwt-token'); + await click('[data-test-auth-submit]'); + assert.dom('[data-test-error]').exists('Failed login'); + }); +}); diff --git a/ui/tests/acceptance/leases-test.js b/ui/tests/acceptance/leases-test.js new file mode 100644 index 0000000..de306d6 --- /dev/null +++ b/ui/tests/acceptance/leases-test.js @@ -0,0 +1,119 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { click, currentRouteName, visit } from '@ember/test-helpers'; +// TESTS HERE ARE SKIPPED +// running vault with -dev-leased-kv flag lets you run some of these tests +// but generating leases programmatically is currently difficult +// +// TODO revisit this when it's easier to create leases + +import { module, skip } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { v4 as uuidv4 } from 'uuid'; +import secretList from 'vault/tests/pages/secrets/backend/list'; +import secretEdit from 'vault/tests/pages/secrets/backend/kv/edit-secret'; +import mountSecrets from 'vault/tests/pages/settings/mount-secret-backend'; +import authPage from 'vault/tests/pages/auth'; +import logout from 'vault/tests/pages/logout'; + +module('Acceptance | leases', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(async function () { + await authPage.login(); + this.enginePath = `kv-for-lease-${uuidv4()}`; + // need a version 1 mount for leased secrets here + return mountSecrets.visit().path(this.enginePath).type('kv').version(1).submit(); + }); + + hooks.afterEach(function () { + return logout.visit(); + }); + + const createSecret = async (context, isRenewable) => { + context.name = `secret-${uuidv4()}`; + await secretList.visitRoot({ backend: context.enginePath }); + await secretList.create(); + if (isRenewable) { + await secretEdit.createSecret(context.name, 'ttl', '30h'); + } else { + await secretEdit.createSecret(context.name, 'foo', 'bar'); + } + }; + + const navToDetail = async (context) => { + await visit('/vault/access/leases/'); + await click(`[data-test-lease-link="${context.enginePath}/"]`); + await click(`[data-test-lease-link="${context.enginePath}/${context.name}/"]`); + await click(`[data-test-lease-link]:eq(0)`); + }; + + skip('it renders the show page', function (assert) { + createSecret(this); + navToDetail(this); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.leases.show', + 'a lease for the secret is in the list' + ); + assert + .dom('[data-test-lease-renew-picker]') + .doesNotExist('non-renewable lease does not render a renew picker'); + }); + + // skip for now until we find an easy way to generate a renewable lease + skip('it renders the show page with a picker', function (assert) { + createSecret(this, true); + navToDetail(this); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.leases.show', + 'a lease for the secret is in the list' + ); + assert + .dom('[data-test-lease-renew-picker]') + .exists({ count: 1 }, 'renewable lease renders a renew picker'); + }); + + skip('it removes leases upon revocation', async function (assert) { + createSecret(this); + navToDetail(this); + await click('[data-test-lease-revoke] button'); + await click('[data-test-confirm-button]'); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.leases.list-root', + 'it navigates back to the leases root on revocation' + ); + await click(`[data-test-lease-link="${this.enginePath}/"]`); + await click('[data-test-lease-link="data/"]'); + assert + .dom(`[data-test-lease-link="${this.enginePath}/data/${this.name}/"]`) + .doesNotExist('link to the lease was removed with revocation'); + }); + + skip('it removes branches when a prefix is revoked', async function (assert) { + createSecret(this); + await visit(`/vault/access/leases/list/${this.enginePath}`); + await click('[data-test-lease-revoke-prefix] button'); + await click('[data-test-confirm-button]'); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.leases.list-root', + 'it navigates back to the leases root on revocation' + ); + assert + .dom(`[data-test-lease-link="${this.enginePath}/"]`) + .doesNotExist('link to the prefix was removed with revocation'); + }); + + skip('lease not found', async function (assert) { + await visit('/vault/access/leases/show/not-found'); + assert + .dom('[data-test-lease-error]') + .hasText('not-found is not a valid lease ID', 'it shows an error when the lease is not found'); + }); +}); diff --git a/ui/tests/acceptance/managed-namespace-test.js b/ui/tests/acceptance/managed-namespace-test.js new file mode 100644 index 0000000..6f6bb40 --- /dev/null +++ b/ui/tests/acceptance/managed-namespace-test.js @@ -0,0 +1,86 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { currentURL, visit, fillIn } from '@ember/test-helpers'; +import { setupApplicationTest } from 'ember-qunit'; +import Pretender from 'pretender'; +import logout from 'vault/tests/pages/logout'; +import { getManagedNamespace } from 'vault/routes/vault/cluster'; + +const FEATURE_FLAGS_RESPONSE = { + feature_flags: ['VAULT_CLOUD_ADMIN_NAMESPACE'], +}; + +module('Acceptance | Enterprise | Managed namespace root', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(function () { + /** + * Since the features are fetched on the application load, + * we have to populate them on the beforeEach hook because + * the fetch won't trigger again within the tests + */ + this.server = new Pretender(function () { + this.get('/v1/sys/internal/ui/feature-flags', () => { + return [200, { 'Content-Type': 'application/json' }, JSON.stringify(FEATURE_FLAGS_RESPONSE)]; + }); + this.get('/v1/sys/health', this.passthrough); + this.get('/v1/sys/seal-status', this.passthrough); + this.get('/v1/sys/license/features', this.passthrough); + this.get('/v1/sys/internal/ui/mounts', this.passthrough); + }); + }); + + hooks.afterEach(function () { + this.server.shutdown(); + }); + + test('it shows the managed namespace toolbar when feature flag exists', async function (assert) { + await logout.visit(); + await visit('/vault/auth'); + assert.ok(currentURL().startsWith('/vault/auth'), 'Redirected to auth'); + assert.ok(currentURL().includes('?namespace=admin'), 'with base namespace'); + assert.dom('[data-test-namespace-toolbar]').doesNotExist('Normal namespace toolbar does not exist'); + assert.dom('[data-test-managed-namespace-toolbar]').exists('Managed namespace toolbar exists'); + assert.dom('[data-test-managed-namespace-root]').hasText('/admin', 'Shows /admin namespace prefix'); + assert.dom('input#namespace').hasAttribute('placeholder', '/ (Default)'); + await fillIn('input#namespace', '/foo'); + const encodedNamespace = encodeURIComponent('admin/foo'); + assert.strictEqual( + currentURL(), + `/vault/auth?namespace=${encodedNamespace}&with=token`, + 'Correctly prepends root to namespace' + ); + }); + + test('getManagedNamespace helper works as expected', function (assert) { + let managedNs = getManagedNamespace(null, 'admin'); + assert.strictEqual(managedNs, 'admin', 'returns root ns when no namespace present'); + managedNs = getManagedNamespace('admin/', 'admin'); + assert.strictEqual(managedNs, 'admin', 'returns root ns when matches passed ns'); + managedNs = getManagedNamespace('adminfoo/', 'admin'); + assert.strictEqual( + managedNs, + 'admin/adminfoo/', + 'appends passed namespace to root even if it matches without slashes' + ); + managedNs = getManagedNamespace('admin/foo/', 'admin'); + assert.strictEqual(managedNs, 'admin/foo/', 'returns passed namespace if it starts with root and /'); + }); + + test('it redirects to root prefixed ns when non-root passed', async function (assert) { + await logout.visit(); + await visit('/vault/auth?namespace=admindev'); + assert.ok(currentURL().startsWith('/vault/auth'), 'Redirected to auth'); + assert.ok( + currentURL().includes(`?namespace=${encodeURIComponent('admin/admindev')}`), + 'with appended namespace' + ); + + assert.dom('[data-test-managed-namespace-root]').hasText('/admin', 'Shows /admin namespace prefix'); + assert.dom('input#namespace').hasValue('/admindev', 'Input has /dev value'); + }); +}); diff --git a/ui/tests/acceptance/mfa-login-enforcement-test.js b/ui/tests/acceptance/mfa-login-enforcement-test.js new file mode 100644 index 0000000..bd8e05e --- /dev/null +++ b/ui/tests/acceptance/mfa-login-enforcement-test.js @@ -0,0 +1,220 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { click, currentRouteName, fillIn, visit } from '@ember/test-helpers'; +import authPage from 'vault/tests/pages/auth'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import ENV from 'vault/config/environment'; + +module('Acceptance | mfa-login-enforcement', function (hooks) { + setupApplicationTest(hooks); + setupMirage(hooks); + + hooks.before(function () { + ENV['ember-cli-mirage'].handler = 'mfaConfig'; + }); + hooks.beforeEach(function () { + return authPage.login(); + }); + hooks.after(function () { + ENV['ember-cli-mirage'].handler = null; + }); + + test('it should create login enforcement', async function (assert) { + await visit('/ui/vault/access'); + await click('[data-test-sidebar-nav-link="Multi-factor authentication"]'); + await click('[data-test-tab="enforcements"]'); + await click('[data-test-enforcement-create]'); + + assert.dom('[data-test-mleh-title]').hasText('New enforcement', 'Title renders'); + await click('[data-test-mlef-save]'); + assert + .dom('[data-test-inline-error-message]') + .exists({ count: 3 }, 'Validation error messages are displayed'); + + await click('[data-test-mlef-cancel]'); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.mfa.enforcements.index', + 'Cancel transitions to enforcements list' + ); + await click('[data-test-enforcement-create]'); + await click('.breadcrumb a'); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.mfa.enforcements.index', + 'Breadcrumb transitions to enforcements list' + ); + await click('[data-test-enforcement-create]'); + + await fillIn('[data-test-mlef-input="name"]', 'foo'); + await click('[data-test-component="search-select"] .ember-basic-dropdown-trigger'); + await click('.ember-power-select-option'); + await fillIn('[data-test-mount-accessor-select]', 'auth_userpass_bb95c2b1'); + await click('[data-test-mlef-add-target]'); + await click('[data-test-mlef-save]'); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.mfa.enforcements.enforcement.index', + 'Route transitions to enforcement on save success' + ); + }); + + test('it should list login enforcements', async function (assert) { + await visit('/vault/access/mfa/enforcements'); + assert.dom('[data-test-tab="enforcements"]').hasClass('active', 'Enforcements tab is active'); + assert.dom('.toolbar-link').exists({ count: 1 }, 'Correct number of toolbar links render'); + assert + .dom('[data-test-enforcement-create]') + .includesText('New enforcement', 'New enforcement link renders'); + + await click('[data-test-enforcement-create]'); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.mfa.enforcements.create', + 'New enforcement link transitions to create route' + ); + await click('[data-test-mlef-cancel]'); + + const enforcements = this.server.db.mfaLoginEnforcements.where({}); + const item = enforcements[0]; + assert.dom('[data-test-list-item]').exists({ count: enforcements.length }, 'Enforcements list renders'); + assert + .dom(`[data-test-list-item="${item.name}"] svg`) + .hasClass('flight-icon-lock', 'Lock icon renders for list item'); + assert.dom(`[data-test-list-item-title="${item.name}"]`).hasText(item.name, 'Enforcement name renders'); + + await click('[data-test-popup-menu-trigger]'); + await click('[data-test-list-item-link="details"]'); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.mfa.enforcements.enforcement.index', + 'Details more menu action transitions to enforcement route' + ); + await click('.breadcrumb a'); + await click('[data-test-popup-menu-trigger]'); + await click('[data-test-list-item-link="edit"]'); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.mfa.enforcements.enforcement.edit', + 'Edit more menu action transitions to enforcement edit route' + ); + }); + + test('it should display login enforcement', async function (assert) { + await visit('/vault/access/mfa/enforcements'); + const enforcement = this.server.db.mfaLoginEnforcements.where({})[0]; + await click(`[data-test-list-item="${enforcement.name}"]`); + // heading + assert.dom('h1').includesText(enforcement.name, 'Name renders in title'); + assert.dom('h1 svg').hasClass('flight-icon-lock', 'Lock icon renders in title'); + assert.dom('[data-test-tab="targets"]').hasClass('active', 'Targets tab is active by default'); + assert.dom('[data-test-target]').exists({ count: 4 }, 'Targets render in list'); + // targets tab + const targets = { + accessor: ['userpass/', 'auth_userpass_bb95c2b1', '/ui/vault/access/userpass'], + method: ['userpass', 'All userpass mounts (1)'], + entity: [ + 'test-entity', + 'f831667b-7392-7a1c-c0fc-33d48cb1c57d', + '/ui/vault/access/identity/entities/f831667b-7392-7a1c-c0fc-33d48cb1c57d/details', + ], + group: [ + 'test-group', + '34db6b52-591e-bc22-8af0-4add5e167326', + '/ui/vault/access/identity/groups/34db6b52-591e-bc22-8af0-4add5e167326/details', + ], + }; + for (const key in targets) { + const t = targets[key]; + const selector = `[data-test-target="${t[0]}"]`; + assert.dom(selector).includesText(`${t[0]} ${t[1]}`, `Target text renders for ${key} type`); + if (key !== 'method') { + await click(`${selector} [data-test-popup-menu-trigger]`); + assert + .dom(`[data-test-target-link="${t[0]}"]`) + .hasAttribute('href', t[2], `Details link renders for ${key} type`); + } else { + assert.dom(`${selector} [data-test-popup-menu-trigger]`).doesNotExist('Method type has no link'); + } + } + // methods tab + await click('[data-test-tab="methods"]'); + assert.dom('[data-test-tab="methods"]').hasClass('active', 'Methods tab is active'); + const method = this.owner.lookup('service:store').peekRecord('mfa-method', enforcement.mfa_method_ids[0]); + assert + .dom(`[data-test-mfa-method-list-item="${method.id}"]`) + .includesText( + `${method.name} ${method.id} Namespace: ${method.namespace_id}`, + 'Method list item renders' + ); + await click('[data-test-popup-menu-trigger]'); + assert + .dom(`[data-test-mfa-method-menu-link="details"]`) + .hasAttribute('href', `/ui/vault/access/mfa/methods/${method.id}`, `Details link renders for method`); + assert + .dom(`[data-test-mfa-method-menu-link="edit"]`) + .hasAttribute('href', `/ui/vault/access/mfa/methods/${method.id}/edit`, `Edit link renders for method`); + // toolbar + assert + .dom('[data-test-enforcement-edit]') + .hasAttribute( + 'href', + `/ui/vault/access/mfa/enforcements/${enforcement.name}/edit`, + 'Toolbar edit action has link to edit route' + ); + await click('[data-test-enforcement-delete]'); + assert.dom('[data-test-confirm-button]').isDisabled('Delete button disabled with no confirmation'); + await fillIn('[data-test-confirmation-modal-input]', enforcement.name); + await click('[data-test-confirm-button]'); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.mfa.enforcements.index', + 'Route transitions to enforcements list on delete success' + ); + }); + + test('it should edit login enforcement', async function (assert) { + await visit('/vault/access/mfa/enforcements'); + const enforcement = this.server.db.mfaLoginEnforcements.where({})[0]; + await click('[data-test-popup-menu-trigger]'); + await click('[data-test-list-item-link="edit"]'); + + assert.dom('h1').hasText('Update enforcement', 'Title renders'); + assert.dom('[data-test-mlef-input="name"]').hasValue(enforcement.name, 'Name input is populated'); + assert.dom('[data-test-mlef-input="name"]').isDisabled('Name is disabled and cannot be changed'); + + const method = this.owner.lookup('service:store').peekRecord('mfa-method', enforcement.mfa_method_ids[0]); + assert + .dom('[data-test-selected-option]') + .includesText(`${method.name} ${method.id}`, 'Selected mfa method renders'); + assert + .dom('[data-test-mlef-target="Authentication mount"]') + .includesText('Authentication mount auth_userpass_bb95c2b1', 'Accessor target populates'); + assert + .dom('[data-test-mlef-target="Authentication method"]') + .includesText('Authentication method userpass', 'Method target populates'); + assert + .dom('[data-test-mlef-target="Group"]') + .includesText('Group test-group 34db6b52-591e-bc22-8af0-4add5e167326', 'Group target populates'); + assert + .dom('[data-test-mlef-target="Entity"]') + .includesText('Entity test-entity f831667b-7392-7a1c-c0fc-33d48cb1c57d', 'Entity target populates'); + + await click('[data-test-mlef-remove-target="Entity"]'); + await click('[data-test-mlef-remove-target="Group"]'); + await click('[data-test-mlef-remove-target="Authentication method"]'); + await click('[data-test-mlef-save]'); + + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.mfa.enforcements.enforcement.index', + 'Route transitions to enforcement on save success' + ); + assert.dom('[data-test-target]').exists({ count: 1 }, 'Targets were successfully removed on save'); + }); +}); diff --git a/ui/tests/acceptance/mfa-login-test.js b/ui/tests/acceptance/mfa-login-test.js new file mode 100644 index 0000000..e30ca59 --- /dev/null +++ b/ui/tests/acceptance/mfa-login-test.js @@ -0,0 +1,190 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { click, currentRouteName, fillIn, visit, waitUntil, find } from '@ember/test-helpers'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import ENV from 'vault/config/environment'; +import { validationHandler } from '../../mirage/handlers/mfa-login'; + +module('Acceptance | mfa-login', function (hooks) { + setupApplicationTest(hooks); + setupMirage(hooks); + + hooks.before(function () { + ENV['ember-cli-mirage'].handler = 'mfaLogin'; + }); + hooks.beforeEach(function () { + this.select = async (select = 0, option = 1) => { + const selector = `[data-test-mfa-select="${select}"]`; + const value = this.element.querySelector(`${selector} option:nth-child(${option + 1})`).value; + await fillIn(`${selector} select`, value); + }; + }); + hooks.after(function () { + ENV['ember-cli-mirage'].handler = null; + }); + + const login = async (user) => { + await visit('/vault/auth'); + await fillIn('[data-test-select="auth-method"]', 'userpass'); + await fillIn('[data-test-username]', user); + await fillIn('[data-test-password]', 'test'); + await click('[data-test-auth-submit]'); + }; + const didLogin = (assert) => { + assert.strictEqual(currentRouteName(), 'vault.cluster.secrets.backends', 'Route transitions after login'); + }; + const validate = async (multi) => { + await fillIn('[data-test-mfa-passcode="0"]', 'test'); + if (multi) { + await fillIn('[data-test-mfa-passcode="1"]', 'test'); + } + await click('[data-test-mfa-validate]'); + }; + + test('it should handle single mfa constraint with passcode method', async function (assert) { + assert.expect(4); + await login('mfa-a'); + assert + .dom('[data-test-mfa-description]') + .includesText( + 'Enter your authentication code to log in.', + 'Mfa form displays with correct description' + ); + assert.dom('[data-test-mfa-select]').doesNotExist('Select is hidden for single method'); + assert.dom('[data-test-mfa-passcode]').exists({ count: 1 }, 'Single passcode input renders'); + await validate(); + didLogin(assert); + }); + + test('it should handle single mfa constraint with push method', async function (assert) { + assert.expect(6); + + server.post('/sys/mfa/validate', async (schema, req) => { + await waitUntil(() => find('[data-test-mfa-description]')); + assert + .dom('[data-test-mfa-description]') + .hasText( + 'Multi-factor authentication is enabled for your account.', + 'Mfa form displays with correct description' + ); + assert.dom('[data-test-mfa-label]').hasText('Okta push notification', 'Correct method renders'); + assert + .dom('[data-test-mfa-push-instruction]') + .hasText('Check device for push notification', 'Push notification instruction renders'); + assert.dom('[data-test-mfa-validate]').isDisabled('Button is disabled while validating'); + assert + .dom('[data-test-mfa-validate]') + .hasClass('is-loading', 'Loading class applied to button while validating'); + return validationHandler(schema, req); + }); + + await login('mfa-b'); + didLogin(assert); + }); + + test('it should handle single mfa constraint with 2 passcode methods', async function (assert) { + assert.expect(4); + await login('mfa-c'); + assert + .dom('[data-test-mfa-description]') + .includesText('Select the MFA method you wish to use.', 'Mfa form displays with correct description'); + assert + .dom('[data-test-mfa-select]') + .exists({ count: 1 }, 'Select renders for single constraint with multiple methods'); + assert.dom('[data-test-mfa-passcode]').doesNotExist('Passcode input hidden until selection is made'); + await this.select(); + await validate(); + didLogin(assert); + }); + + test('it should handle single mfa constraint with 2 push methods', async function (assert) { + assert.expect(1); + await login('mfa-d'); + await this.select(); + await click('[data-test-mfa-validate]'); + didLogin(assert); + }); + + test('it should handle single mfa constraint with 1 passcode and 1 push method', async function (assert) { + assert.expect(3); + await login('mfa-e'); + await this.select(0, 2); + assert.dom('[data-test-mfa-passcode]').exists('Passcode input renders'); + await this.select(); + assert.dom('[data-test-mfa-passcode]').doesNotExist('Passcode input is hidden for push method'); + await click('[data-test-mfa-validate]'); + didLogin(assert); + }); + + test('it should handle multiple mfa constraints with 1 passcode method each', async function (assert) { + assert.expect(3); + await login('mfa-f'); + assert + .dom('[data-test-mfa-description]') + .includesText( + 'Two methods are required for successful authentication.', + 'Mfa form displays with correct description' + ); + assert.dom('[data-test-mfa-select]').doesNotExist('Selects do not render for single methods'); + await validate(true); + didLogin(assert); + }); + + test('it should handle multi mfa constraint with 1 push method each', async function (assert) { + assert.expect(1); + await login('mfa-g'); + didLogin(assert); + }); + + test('it should handle multiple mfa constraints with 1 passcode and 1 push method', async function (assert) { + assert.expect(4); + await login('mfa-h'); + assert + .dom('[data-test-mfa-description]') + .includesText( + 'Two methods are required for successful authentication.', + 'Mfa form displays with correct description' + ); + assert.dom('[data-test-mfa-select]').doesNotExist('Select is hidden for single method'); + assert.dom('[data-test-mfa-passcode]').exists({ count: 1 }, 'Passcode input renders'); + await validate(); + didLogin(assert); + }); + + test('it should handle multiple mfa constraints with multiple mixed methods', async function (assert) { + assert.expect(2); + await login('mfa-i'); + assert + .dom('[data-test-mfa-description]') + .includesText( + 'Two methods are required for successful authentication.', + 'Mfa form displays with correct description' + ); + await this.select(); + await fillIn('[data-test-mfa-passcode="1"]', 'test'); + await click('[data-test-mfa-validate]'); + didLogin(assert); + }); + + test('it should render unauthorized message for push failure', async function (assert) { + await login('mfa-j'); + assert.dom('[data-test-auth-form]').doesNotExist('Auth form hidden when mfa fails'); + assert.dom('[data-test-empty-state-title]').hasText('Unauthorized', 'Error title renders'); + assert + .dom('[data-test-empty-state-subText]') + .hasText('PingId MFA validation failed', 'Error message from server renders'); + assert + .dom('[data-test-empty-state-message]') + .hasText( + 'Multi-factor authentication is required, but failed. Go back and try again, or contact your administrator.', + 'Error description renders' + ); + await click('[data-test-mfa-error] button'); + assert.dom('[data-test-auth-form]').exists('Auth form renders after mfa error dismissal'); + }); +}); diff --git a/ui/tests/acceptance/mfa-method-test.js b/ui/tests/acceptance/mfa-method-test.js new file mode 100644 index 0000000..400b69b --- /dev/null +++ b/ui/tests/acceptance/mfa-method-test.js @@ -0,0 +1,309 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { click, currentRouteName, currentURL, fillIn, visit } from '@ember/test-helpers'; +import authPage from 'vault/tests/pages/auth'; +import logout from 'vault/tests/pages/logout'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import ENV from 'vault/config/environment'; +import { Response } from 'miragejs'; +import { underscore } from '@ember/string'; + +module('Acceptance | mfa-method', function (hooks) { + setupApplicationTest(hooks); + setupMirage(hooks); + + hooks.before(function () { + ENV['ember-cli-mirage'].handler = 'mfaConfig'; + }); + hooks.beforeEach(async function () { + this.store = this.owner.lookup('service:store'); + this.getMethods = () => + ['Totp', 'Duo', 'Okta', 'Pingid'].reduce((methods, type) => { + methods.addObjects(this.server.db[`mfa${type}Methods`].where({})); + return methods; + }, []); + await logout.visit(); + return authPage.login(); + }); + hooks.after(function () { + ENV['ember-cli-mirage'].handler = null; + }); + + test('it should display landing page when no methods exist', async function (assert) { + this.server.get('/identity/mfa/method/', () => new Response(404, {}, { errors: [] })); + await visit('/vault/access/mfa/methods'); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.mfa.index', + 'Route redirects to mfa index when no methods exist' + ); + await click('[data-test-mfa-configure]'); + assert.strictEqual(currentRouteName(), 'vault.cluster.access.mfa.methods.create'); + }); + + test('it should list methods', async function (assert) { + await visit('/vault/access/mfa'); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.mfa.methods.index', + 'Parent route redirects to methods when some exist' + ); + assert.dom('[data-test-tab="methods"]').hasClass('active', 'Methods tab is active'); + assert.dom('.toolbar-link').exists({ count: 1 }, 'Correct number of toolbar links render'); + assert.dom('[data-test-mfa-method-create]').includesText('New MFA method', 'New mfa link renders'); + + await click('[data-test-mfa-method-create]'); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.mfa.methods.create', + 'New method link transitions to create route' + ); + await click('.breadcrumb a'); + + const methods = this.getMethods(); + const model = this.store.peekRecord('mfa-method', methods[0].id); + assert.dom('[data-test-mfa-method-list-item]').exists({ count: methods.length }, 'Methods list renders'); + assert.dom(`[data-test-mfa-method-list-icon="${model.type}"]`).exists('Icon renders for list item'); + assert + .dom(`[data-test-mfa-method-list-item="${model.id}"]`) + .includesText( + `${model.name} ${model.id} Namespace: ${model.namespace_id}`, + 'Copy renders for list item' + ); + + await click('[data-test-popup-menu-trigger]'); + await click('[data-test-mfa-method-menu-link="details"]'); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.mfa.methods.method.index', + 'Details more menu action transitions to method route' + ); + await click('.breadcrumb a'); + await click('[data-test-popup-menu-trigger]'); + await click('[data-test-mfa-method-menu-link="edit"]'); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.mfa.methods.method.edit', + 'Edit more menu action transitions to method edit route' + ); + }); + + test('it should display method details', async function (assert) { + // ensure methods are tied to an enforcement + this.server.get('/identity/mfa/login-enforcement', () => { + const record = this.server.create('mfa-login-enforcement', { + mfa_method_ids: this.getMethods().mapBy('id'), + }); + return { + data: { + key_info: { [record.name]: record }, + keys: [record.name], + }, + }; + }); + await visit('/vault/access/mfa/methods'); + await click('[data-test-mfa-method-list-item]'); + assert.dom('[data-test-tab="config"]').hasClass('active', 'Configuration tab is active by default'); + assert + .dom('[data-test-confirm-action-trigger]') + .isDisabled('Delete toolbar action disabled when method is attached to an enforcement'); + + const fields = [ + ['Issuer', 'Period', 'Key size', 'QR size', 'Algorithm', 'Digits', 'Skew', 'Max validation attempts'], + ['Duo API hostname', 'Passcode reminder'], + ['Organization name', 'Base URL'], + ['Use signature', 'Idp url', 'Admin url', 'Authenticator url', 'Org alias'], + ]; + for (const [index, labels] of fields.entries()) { + if (index) { + await click(`[data-test-mfa-method-list-item]:nth-of-type(${index + 2})`); + } + const url = currentURL(); + const id = url.slice(url.lastIndexOf('/') + 1); + const model = this.store.peekRecord('mfa-method', id); + + labels.forEach((label) => { + assert.dom(`[data-test-row-label="${label}"]`).hasText(label, `${label} field label renders`); + const key = + { + 'Duo API hostname': 'api_hostname', + 'Passcode reminder': 'use_passcode', + 'Organization name': 'org_name', + }[label] || underscore(label); + const value = typeof model[key] === 'boolean' ? (model[key] ? 'Yes' : 'No') : model[key].toString(); + assert.dom(`[data-test-value-div="${label}"]`).hasText(value, `${label} value renders`); + }); + await click('.breadcrumb a'); + } + + await click('[data-test-mfa-method-list-item]'); + await click('[data-test-mfa-method-edit]'); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.mfa.methods.method.edit', + 'Toolbar action transitions to edit route' + ); + }); + + test('it should delete method that is not associated with any login enforcements', async function (assert) { + this.server.get('/identity/mfa/login-enforcement', () => new Response(404, {}, { errors: [] })); + + await visit('/vault/access/mfa/methods'); + const methodCount = this.element.querySelectorAll('[data-test-mfa-method-list-item]').length; + await click('[data-test-mfa-method-list-item]'); + await click('[data-test-confirm-action-trigger]'); + await click('[data-test-confirm-button]'); + assert.dom('[data-test-mfa-method-list-item]').exists({ count: methodCount - 1 }, 'Method was deleted'); + }); + + test('it should create methods', async function (assert) { + assert.expect(12); + + await visit('/vault/access/mfa/methods'); + const methodCount = this.element.querySelectorAll('[data-test-mfa-method-list-item]').length; + + const methods = [ + { type: 'totp', required: ['issuer'] }, + { type: 'duo', required: ['secret_key', 'integration_key', 'api_hostname'] }, + { type: 'okta', required: ['org_name', 'api_token'] }, + { type: 'pingid', required: ['settings_file_base64'] }, + ]; + for (const [index, method] of methods.entries()) { + const { type, required } = method; + await click('[data-test-mfa-method-create]'); + await click(`[data-test-radio-card="${method.type}"]`); + await click('[data-test-mfa-create-next]'); + await click('[data-test-mleh-radio="skip"]'); + await click('[data-test-mfa-create-save]'); + assert + .dom('[data-test-inline-error-message]') + .exists({ count: required.length }, `Required field validations display for ${type}`); + + for (const [i, field] of required.entries()) { + let inputType = 'input'; + // this is less than ideal but updating the test selectors in masked-input break a bunch of tests + // add value to the masked input text area data-test attributes for selection + if (['secret_key', 'integration_key'].includes(field)) { + inputType = 'textarea'; + const textareas = this.element.querySelectorAll('[data-test-textarea]'); + textareas[i].setAttribute('data-test-textarea', field); + } + await fillIn(`[data-test-${inputType}="${field}"]`, 'foo'); + } + await click('[data-test-mfa-create-save]'); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.mfa.methods.method.index', + `${type} method is displayed on save` + ); + await click('.breadcrumb a'); + assert + .dom('[data-test-mfa-method-list-item]') + .exists({ count: methodCount + index + 1 }, `List updates with new ${type} method`); + } + }); + + test('it should create method with new enforcement', async function (assert) { + await visit('/vault/access/mfa/methods/create'); + await click('[data-test-radio-card="totp"]'); + await click('[data-test-mfa-create-next]'); + await fillIn('[data-test-input="issuer"]', 'foo'); + await fillIn('[data-test-mlef-input="name"]', 'bar'); + await fillIn('[data-test-mount-accessor-select]', 'auth_userpass_bb95c2b1'); + await click('[data-test-mlef-add-target]'); + await click('[data-test-mfa-create-save]'); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.mfa.methods.method.index', + 'Route transitions to method on save' + ); + await click('[data-test-tab="enforcements"]'); + assert.dom('[data-test-list-item]').hasText('bar', 'Enforcement is listed in method view'); + await click('[data-test-sidebar-nav-link="Multi-factor authentication"]'); + await click('[data-test-tab="enforcements"]'); + assert.dom('[data-test-list-item="bar"]').hasText('bar', 'Enforcement is listed in enforcements view'); + await click('[data-test-list-item="bar"]'); + await click('[data-test-tab="methods"]'); + assert + .dom('[data-test-mfa-method-list-item]') + .includesText('TOTP', 'TOTP method is listed in enforcement view'); + }); + + test('it should create method and add it to existing enforcement', async function (assert) { + await visit('/vault/access/mfa/methods/create'); + await click('[data-test-radio-card="totp"]'); + await click('[data-test-mfa-create-next]'); + await fillIn('[data-test-input="issuer"]', 'foo'); + await click('[data-test-mleh-radio="existing"]'); + await click('[data-test-component="search-select"] .ember-basic-dropdown-trigger'); + const enforcement = this.element.querySelector('.ember-power-select-option'); + const name = enforcement.children[0].textContent.trim(); + await click(enforcement); + await click('[data-test-mfa-create-save]'); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.mfa.methods.method.index', + 'Route transitions to method on save' + ); + await click('[data-test-tab="enforcements"]'); + assert.dom('[data-test-list-item]').hasText(name, 'Enforcement is listed in method view'); + }); + + test('it should edit methods', async function (assert) { + await visit('/vault/access/mfa/methods'); + const id = this.element.querySelector('[data-test-mfa-method-list-item] .tag').textContent.trim(); + const model = this.store.peekRecord('mfa-method', id); + await click('[data-test-mfa-method-list-item] .ember-basic-dropdown-trigger'); + await click('[data-test-mfa-method-menu-link="edit"]'); + + const keys = ['issuer', 'period', 'key_size', 'qr_size', 'algorithm', 'digits', 'skew']; + keys.forEach((key) => { + if (key === 'period') { + assert + .dom('[data-test-ttl-value="Period"]') + .hasValue(model.period.toString(), 'Period form field is populated with model value'); + assert.dom('[data-test-select="ttl-unit"]').hasValue('s', 'Correct time unit is shown for period'); + } else if (key === 'algorithm' || key === 'digits' || key === 'skew') { + const radioElem = this.element.querySelector(`input[name=${key}]:checked`); + assert + .dom(radioElem) + .hasValue(model[key].toString(), `${key} form field is populated with model value`); + } else { + assert + .dom(`[data-test-input="${key}"]`) + .hasValue(model[key].toString(), `${key} form field is populated with model value`); + } + }); + + await fillIn('[data-test-input="issuer"]', 'foo'); + const SHA1radioBtn = this.element.querySelectorAll('input[name=algorithm]')[0]; + await click(SHA1radioBtn); + await fillIn('[data-test-input="max_validation_attempts"]', 10); + await click('[data-test-mfa-method-save]'); + await fillIn('[data-test-confirmation-modal-input]', model.type); + await click('[data-test-confirm-button]'); + + assert.dom('[data-test-row-value="Issuer"]').hasText('foo', 'Issuer field is updated'); + assert.dom('[data-test-row-value="Algorithm"]').hasText('SHA1', 'Algorithm field is updated'); + assert + .dom('[data-test-row-value="Max validation attempts"]') + .hasText('10', 'Max validation attempts field is updated'); + }); + + test('it should navigate to enforcements create route from method enforcement tab', async function (assert) { + await visit('/vault/access/mfa/methods'); + await click('[data-test-mfa-method-list-item]'); + await click('[data-test-tab="enforcements"]'); + await click('[data-test-enforcement-create]'); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.mfa.enforcements.create', + 'Navigates to enforcements create route from toolbar action' + ); + }); +}); diff --git a/ui/tests/acceptance/mfa-setup-test.js b/ui/tests/acceptance/mfa-setup-test.js new file mode 100644 index 0000000..9e7f08d --- /dev/null +++ b/ui/tests/acceptance/mfa-setup-test.js @@ -0,0 +1,101 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { create } from 'ember-cli-page-object'; +import { v4 as uuidv4 } from 'uuid'; +import { setupApplicationTest } from 'ember-qunit'; +import { click, fillIn } from '@ember/test-helpers'; +import authPage from 'vault/tests/pages/auth'; +import enablePage from 'vault/tests/pages/settings/auth/enable'; +import consoleClass from 'vault/tests/pages/components/console/ui-panel'; +import { setupMirage } from 'ember-cli-mirage/test-support'; + +const consoleComponent = create(consoleClass); +const USER = 'end-user'; +const PASSWORD = 'mypassword'; +const POLICY_NAME = 'identity_policy'; + +const writePolicy = async function (path) { + await enablePage.enable('userpass', path); + const identityPolicy = `path "identity/*" { + capabilities = ["create", "read", "update", "delete", "list"] + }`; + await consoleComponent.runCommands([ + `write sys/policies/acl/${POLICY_NAME} policy=${btoa(identityPolicy)}`, + ]); +}; + +const writeUserWithPolicy = async function (path) { + await consoleComponent.runCommands([ + `write auth/${path}/users/${USER} password=${PASSWORD} policies=${POLICY_NAME}`, + ]); +}; + +const setupUser = async function (path) { + await writePolicy(path); + await writeUserWithPolicy(path); + await click('[data-test-save-config="true"]'); +}; + +module('Acceptance | mfa-setup', function (hooks) { + setupApplicationTest(hooks); + setupMirage(hooks); + + hooks.beforeEach(async function () { + const path = `userpass-${uuidv4()}`; + await authPage.login(); + await setupUser(path); + await authPage.logout(); + await authPage.loginUsername(USER, PASSWORD, path); + await click('[data-test-user-menu-trigger]'); + await click('[data-test-user-menu-item="mfa"]'); + }); + + test('it should login through MFA and post to generate and be able to restart the setup', async function (assert) { + assert.expect(5); + // the network requests required in this test + this.server.post('/identity/mfa/method/totp/generate', (scheme, req) => { + const json = JSON.parse(req.requestBody); + assert.strictEqual(json.method_id, '123', 'sends the UUID value'); + return { + data: { + barcode: + 'iVBORw0KGgoAAAANSUhEUgAAAMgAAADIEAAAAADYoy0BAAAGbUlEQVR4nOydUW7kOAxEk0Xuf+VZzABepAXTrKLUmMrivY8AacuSkgJFSyTdX79+fUAQ//ztCcArX79/fH7Oblat6+q/a7+2c++r5qX+fdU4av/rvF1+34+FhIEgYSBIGF/ff3F9gtuuW4u7Nbi6v1v7q36nT5i7PumpPywkDAQJA0HC+Lr7sFoj3ef0bj/h+gR13PX3ype4+4tufHWe1XgfWEgeCBIGgoRx60OmVGute/aj+oaq/a5vWXHnswMWEgaChIEgYRz1IRfdc7e65rrP/9Var/oKN47yjmgrFhIGgoSBIGHc+pDdOMGpOMPanxprX8+qVF/S+aBqXh3O/wMLCQNBwkCQMF58yDSf6KJbqzsf4LZf5z3tz92nqD5l8v/EQsJAkDAQJIw/PuRdGfDdc37X/sI9g+rG7/7eqS/r5qOAhYSBIGEgSBgv9SFufUN3xqSeHU2f36fxCjVuosbaT9ajYCFhIEgYCBLG7VnW9Axomv+krunV9RX3jErFzQ+bjIuFhIEgYSBIGLc1htMzp1P14ru+rPM9Fe5+5FRM/ft4WEgYCBIGgoTxGFPv6j2qWr21v2lsXPUF0zOrFfUMa/ouFsWnYiFhIEgYCBLG47tOurVvWoe+G5verT85lUOgnpk5NZZYSBgIEgaChHGbl+XGvt19htrvivu8r67t3bynOb/rvJRxsZAwECQMBAnj8/v6peY5vTtWvZsn5tYAuvld7j7M8ZFYSBgIEgaChPF5t85On9/d+MDKbr7V6TqXaTxE3UexD/kBIEgYCBLG4/eHTHNV1Rxg9Qyp69dl+nepuctVewUsJAwECQNBwrDelzWNHVf3d3T1J7vz7eY1zSFW+78DCwkDQcJAkDBuz7LKxhv5RnecWnvds7fqczWvTL3ezfPucywkDAQJA0HCePwOKrcOY7pPmPY/9R0V3b5nWje/tnsCCwkDQcJAkDCsfch/N23uR9wYtHt9WtNYXVfnTV7W/xAECQNBwrh95+LK9Pl8ty59N/9q6juq+3f3Icr8sJAwECQMBAnjzz7EfV6uUJ/Tp7m40/lM4xZdf9P6lWoc9iGBIEgYCBLGY43htP5cbbfinn3t5mPtnoW581H6x0LCQJAwECSMx+9Td3Nhq+vqPketU3Hn456Vdfd1uGde5GUFgyBhIEgYo3e/T2sCq89P1berqL6rus+NozjXsZAwECQMBAnjaDzkYpqf5D7nn5rXev1d8ZBuHPYhgSBIGAgSxuP3GLpxiAr1jGnXl53ygV1/p+I2d/dhIWEgSBgIEoZVY9hdP/UeqmkeleoDu3FdX+S2fzrLw0LCQJAwECSMl+8PmT7vV9fVM6Fu7b9wY+In6jUUqvmr8Rv2IcEgSBgIEsZtjeE0HrAba76o9gvdeN3v1TjT3GF13ur97EMCQZAwECQM6zuoLqb1H11/p3OG3x3DP1VPz1lWMAgSBoKE8fju92m9xLQW8XTdd9W+OkOb3t+1q+Z7BxYSBoKEgSBhPH6PYYXqY9TP5cmavmVa57KON53npF4GCwkDQcJAkDBu4yG7NYHTnN1p/7ux8Kr/U/si5+wPCwkDQcJAkDBu60Mupmv4tHbP7d/NAeg4ldOr+tA7sJAwECQMBAlDqlOf7ktOxaJP1ZOvTPOvOtz/w3ewkDAQJAwECeM2L2t65uSeTbk1f7txmq7fUz6waq+AhYSBIGEgSBiP7+29cPOZujXVrSdR1/jd3GC3JrJrp47//X4sJAwECQNBwnh818lFVz++tpvGMab7BXWNrzhVT7Ib4//AQvJAkDAQJIyXeMiKG0fY9R3T+EOFmydVtTs1XyVOg4WEgSBhIEgYL+/LUtmt7ZvGD9x3iXRnSt381Hm583nqBwsJA0HCQJAwXupDXLozLrcuvXvur67v1pOovk3dj6hnbnfzxELCQJAwECSM2+8x7HDX8Op+t76iQvUd1Ty6+9zc32ku8QcWkgeChIEgYUgx9YvdM69qHDeu0s2z6mfqM9R8rt0zPfYhgSBIGAgShpTb+y52fYQbl1nbuXlm1efqPkXxaVhIGAgSBoKEcdSHnM7Vre5zx1Ovu59PaxLJy/pBIEgYCBLG47vfVdw68hV3LXfnNZ2Put/YnQ9nWcEgSBgIEsbjOxdVujOad7wT5MR907OobvxpbsIHFpIHgoSBIGHcfn8I/D2wkDD+DQAA//8FNJPArbdKOwAAAABJRU5ErkJggg==', + url: 'otpauth://totp/Vault:26606dbe-d8ea-82ca-41b0-1250a4484079?algorithm=SHA1&digits=6&issuer=Vault&period=30&secret=FID3WRPRRADQDN3CGPVVOLKCXTZZPSML', + lease_duration: 0, + }, + }; + }); + this.server.post('/identity/mfa/method/totp/admin-destroy', (scheme, req) => { + const json = JSON.parse(req.requestBody); + assert.strictEqual(json.method_id, '123', 'sends the UUID value'); + // returns nothing + return {}; + }); + await fillIn('[data-test-input="uuid"]', 123); + await click('[data-test-verify]'); + assert.dom('[data-test-qrcode]').exists('the qrCode is shown.'); + assert.dom('[data-test-mfa-enabled-warning]').doesNotExist('warning does not show.'); + await click('[data-test-restart]'); + assert.dom('[data-test-step-one]').exists('back to step one.'); + }); + + test('it should show a warning if you enter in the same UUID without restarting the setup', async function (assert) { + assert.expect(2); + // the network requests required in this test + this.server.post('/identity/mfa/method/totp/generate', () => { + return { + data: null, + warnings: ['Entity already has a secret for MFA method “”'], + }; + }); + + await fillIn('[data-test-input="uuid"]', 123); + await click('[data-test-verify]'); + assert.dom('[data-test-qrcode]').doesNotExist('the qrCode is not shown.'); + assert.dom('[data-test-mfa-enabled-warning]').exists('the mfa-enabled warning shows.'); + }); +}); diff --git a/ui/tests/acceptance/not-found-test.js b/ui/tests/acceptance/not-found-test.js new file mode 100644 index 0000000..9375273 --- /dev/null +++ b/ui/tests/acceptance/not-found-test.js @@ -0,0 +1,53 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { visit } from '@ember/test-helpers'; +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import authPage from 'vault/tests/pages/auth'; +import logout from 'vault/tests/pages/logout'; +import Ember from 'ember'; + +let adapterException; + +module('Acceptance | not-found', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(function () { + adapterException = Ember.Test.adapter.exception; + Ember.Test.adapter.exception = () => {}; + return authPage.login(); + }); + + hooks.afterEach(function () { + Ember.Test.adapter.exception = adapterException; + return logout.visit(); + }); + + test('top-level not-found', async function (assert) { + await visit('/404'); + assert + .dom('[data-test-error-description]') + .hasText( + 'Sorry, we were unable to find any content at that URL. Double check it or go back home.', + 'renders cluster error template' + ); + }); + + test('vault route not-found', async function (assert) { + await visit('/vault/404'); + assert.dom('[data-test-not-found]').exists('renders the not found component'); + }); + + test('cluster route not-found', async function (assert) { + await visit('/vault/secrets/secret/404/show'); + assert.dom('[data-test-not-found]').exists('renders the not found component'); + }); + + test('secret not-found', async function (assert) { + await visit('/vault/secrets/secret/show/404'); + assert.dom('[data-test-secret-not-found]').exists('renders the message about the secret not being found'); + }); +}); diff --git a/ui/tests/acceptance/oidc-auth-method-test.js b/ui/tests/acceptance/oidc-auth-method-test.js new file mode 100644 index 0000000..96a7d99 --- /dev/null +++ b/ui/tests/acceptance/oidc-auth-method-test.js @@ -0,0 +1,107 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { click, fillIn, find, waitUntil } from '@ember/test-helpers'; +import authPage from 'vault/tests/pages/auth'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import { fakeWindow, buildMessage } from '../helpers/oidc-window-stub'; +import sinon from 'sinon'; +import { later, _cancelTimers as cancelTimers } from '@ember/runloop'; + +module('Acceptance | oidc auth method', function (hooks) { + setupApplicationTest(hooks); + setupMirage(hooks); + + hooks.beforeEach(function () { + this.openStub = sinon.stub(window, 'open').callsFake(() => fakeWindow.create()); + // OIDC test fails when using fake timestamps, we use the real timestamp.now here + this.server.post('/auth/oidc/oidc/auth_url', () => ({ + data: { auth_url: 'http://example.com' }, + })); + this.server.get('/auth/foo/oidc/callback', () => ({ + auth: { client_token: 'root' }, + })); + // ensure clean state + localStorage.removeItem('selectedAuth'); + }); + + hooks.afterEach(function () { + this.openStub.restore(); + }); + + test('it should login with oidc when selected from auth methods dropdown', async function (assert) { + assert.expect(1); + + this.server.get('/auth/token/lookup-self', (schema, req) => { + assert.ok(true, 'request made to auth/token/lookup-self after oidc callback'); + req.passthrough(); + }); + authPage.logout(); + // select from dropdown or click auth path tab + await waitUntil(() => find('[data-test-select="auth-method"]')); + await fillIn('[data-test-select="auth-method"]', 'oidc'); + later(() => { + window.postMessage(buildMessage().data, window.origin); + cancelTimers(); + }, 50); + await click('[data-test-auth-submit]'); + }); + + test('it should login with oidc from listed auth mount tab', async function (assert) { + assert.expect(3); + + this.server.get('/sys/internal/ui/mounts', () => ({ + data: { + auth: { + 'test-path/': { description: '', options: {}, type: 'oidc' }, + }, + }, + })); + // this request is fired twice -- total assertion count should be 3 rather than 2 + // JLR TODO - auth-jwt: verify whether additional request is necessary, especially when glimmerizing component + // look into whether didReceiveAttrs is necessary to trigger this request + this.server.post('/auth/test-path/oidc/auth_url', () => { + assert.ok(true, 'auth_url request made to correct non-standard mount path'); + return { data: { auth_url: 'http://example.com' } }; + }); + // there was a bug that would result in the /auth/:path/login endpoint hit with an empty payload rather than lookup-self + // ensure that the correct endpoint is hit after the oidc callback + this.server.get('/auth/token/lookup-self', (schema, req) => { + assert.ok(true, 'request made to auth/token/lookup-self after oidc callback'); + req.passthrough(); + }); + + authPage.logout(); + // select from dropdown or click auth path tab + await waitUntil(() => find('[data-test-auth-method-link="oidc"]')); + await click('[data-test-auth-method-link="oidc"]'); + later(() => { + window.postMessage(buildMessage().data, window.origin); + cancelTimers(); + }, 50); + await click('[data-test-auth-submit]'); + }); + + // coverage for bug where token was selected as auth method for oidc and jwt + test('it should populate oidc auth method on logout', async function (assert) { + authPage.logout(); + // select from dropdown or click auth path tab + await waitUntil(() => find('[data-test-select="auth-method"]')); + await fillIn('[data-test-select="auth-method"]', 'oidc'); + later(() => { + window.postMessage(buildMessage().data, window.origin); + cancelTimers(); + }, 50); + await click('[data-test-auth-submit]'); + await waitUntil(() => find('[data-test-user-menu-trigger]')); + await click('[data-test-user-menu-trigger]'); + await click('#logout'); + assert + .dom('[data-test-select="auth-method"]') + .hasValue('oidc', 'Previous auth method selected on logout'); + }); +}); diff --git a/ui/tests/acceptance/oidc-config/clients-assignments-test.js b/ui/tests/acceptance/oidc-config/clients-assignments-test.js new file mode 100644 index 0000000..b774dad --- /dev/null +++ b/ui/tests/acceptance/oidc-config/clients-assignments-test.js @@ -0,0 +1,360 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { visit, currentURL, click, fillIn, findAll, currentRouteName } from '@ember/test-helpers'; +import { setupApplicationTest } from 'ember-qunit'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import ENV from 'vault/config/environment'; +import authPage from 'vault/tests/pages/auth'; +import logout from 'vault/tests/pages/logout'; +import { create } from 'ember-cli-page-object'; +import { clickTrigger } from 'ember-power-select/test-support/helpers'; +import ss from 'vault/tests/pages/components/search-select'; +import fm from 'vault/tests/pages/components/flash-message'; +import { + OIDC_BASE_URL, // -> '/vault/access/oidc' + SELECTORS, + clearRecord, + overrideCapabilities, + overrideMirageResponse, + ASSIGNMENT_LIST_RESPONSE, + ASSIGNMENT_DATA_RESPONSE, +} from 'vault/tests/helpers/oidc-config'; +const searchSelect = create(ss); +const flashMessage = create(fm); + +module('Acceptance | oidc-config clients and assignments', function (hooks) { + setupApplicationTest(hooks); + setupMirage(hooks); + + hooks.before(function () { + ENV['ember-cli-mirage'].handler = 'oidcConfig'; + }); + + hooks.beforeEach(async function () { + this.store = await this.owner.lookup('service:store'); + return authPage.login(); + }); + + hooks.afterEach(function () { + return logout.visit(); + }); + + hooks.after(function () { + ENV['ember-cli-mirage'].handler = null; + }); + + test('it renders only allow_all when no assignments are configured', async function (assert) { + assert.expect(3); + + //* clear out test state + await clearRecord(this.store, 'oidc/assignment', 'test-assignment'); + + await visit(OIDC_BASE_URL + '/assignments'); + assert.strictEqual(currentURL(), '/vault/access/oidc/assignments'); + assert.dom('[data-test-tab="assignments"]').hasClass('active', 'assignments tab is active'); + assert + .dom('[data-test-oidc-assignment-linked-block="allow_all"]') + .hasClass('is-disabled', 'renders default allow all assignment and is disabled.'); + }); + + test('it renders empty state when no clients are configured', async function (assert) { + assert.expect(5); + this.server.get('/identity/oidc/client', () => overrideMirageResponse(404)); + + await visit(OIDC_BASE_URL); + assert.strictEqual(currentURL(), '/vault/access/oidc'); + assert.dom('h1.title.is-3').hasText('OIDC Provider'); + assert.dom(SELECTORS.oidcHeader).hasText( + `Configure Vault to act as an OIDC identity provider, and offer Vault’s various authentication + methods and source of identity to any client applications. Learn more Create your first app`, + 'renders call to action header when no clients are configured' + ); + assert.dom('[data-test-oidc-landing]').exists('landing page renders when no clients are configured'); + assert + .dom(SELECTORS.oidcLandingImg) + .hasAttribute('src', '/ui/images/oidc-landing.png', 'image renders image when no clients configured'); + }); + + test('it creates an assignment inline, creates a client, updates client to limit access, deletes client', async function (assert) { + assert.expect(22); + + //* clear out test state + await clearRecord(this.store, 'oidc/client', 'test-app'); + await clearRecord(this.store, 'oidc/client', 'my-webapp'); // created by oidc-provider-test + await clearRecord(this.store, 'oidc/assignment', 'assignment-inline'); + + // create a client with allow all access + await visit(OIDC_BASE_URL + '/clients/create'); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.clients.create', + 'navigates to create form' + ); + await fillIn('[data-test-input="name"]', 'test-app'); + await click('[data-test-toggle-group="More options"]'); + // toggle ttls to false, testing it sets correct default duration + await click('[data-test-input="idTokenTtl"]'); + await click('[data-test-input="accessTokenTtl"]'); + await click(SELECTORS.clientSaveButton); + assert.strictEqual( + flashMessage.latestMessage, + 'Successfully created the application test-app.', + 'renders success flash upon client creation' + ); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.clients.client.details', + 'navigates to client details view after save' + ); + // assert default values in details view are correct + assert.dom('[data-test-value-div="Assignment"]').hasText('allow_all', 'client allows all assignments'); + assert.dom('[data-test-value-div="Type"]').hasText('confidential', 'type defaults to confidential'); + assert + .dom('[data-test-value-div="Key"] a') + .hasText('default', 'client uses default key and renders a link'); + assert + .dom('[data-test-value-div="Client ID"] [data-test-copy-button]') + .exists('client ID exists and has copy button'); + assert + .dom('[data-test-value-div="Client Secret"] [data-test-copy-button]') + .exists('client secret exists and has copy button'); + assert + .dom('[data-test-value-div="ID Token TTL"]') + .hasText('1 day', 'ID token ttl toggled off sets default of 24h'); + assert + .dom('[data-test-value-div="Access Token TTL"]') + .hasText('1 day', 'access token ttl toggled off sets default of 24h'); + + // edit client + await click(SELECTORS.clientDetailsTab); + await click(SELECTORS.clientEditButton); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.clients.client.edit', + 'navigates to edit page from details' + ); + await fillIn('[data-test-input="redirectUris"] [data-test-string-list-input="0"]', 'some-url.com'); + + // limit access & create new assignment inline + await click('[data-test-oidc-radio="limited"]'); + await clickTrigger(); + await fillIn('.ember-power-select-search input', 'assignment-inline'); + await searchSelect.options.objectAt(0).click(); + await click('[data-test-search-select="entities"] .ember-basic-dropdown-trigger'); + await searchSelect.options.objectAt(0).click(); + await click('[data-test-search-select="groups"] .ember-basic-dropdown-trigger'); + await searchSelect.options.objectAt(0).click(); + await click(SELECTORS.assignmentSaveButton); + assert.strictEqual( + flashMessage.latestMessage, + 'Successfully created the assignment assignment-inline.', + 'renders success flash upon assignment creating' + ); + await click(SELECTORS.clientSaveButton); + assert.strictEqual( + flashMessage.latestMessage, + 'Successfully updated the application test-app.', + 'renders success flash upon client updating' + ); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.clients.client.details', + 'navigates back to details on update' + ); + assert.dom('[data-test-value-div="Redirect URI"]').hasText('some-url.com', 'shows updated attribute'); + assert + .dom('[data-test-value-div="Assignment"]') + .hasText('assignment-inline', 'updated to limited assignment'); + + // edit back to allow_all + await click(SELECTORS.clientEditButton); + assert.dom(SELECTORS.clientSaveButton).hasText('Update', 'form button renders correct text'); + await click('[data-test-oidc-radio="allow-all"]'); + await click(SELECTORS.clientSaveButton); + assert + .dom('[data-test-value-div="Assignment"]') + .hasText('allow_all', 'client updated to allow all assignments'); + + // create another client + await visit(OIDC_BASE_URL + '/clients/create'); + await fillIn('[data-test-input="name"]', 'app-to-delete'); + await click(SELECTORS.clientSaveButton); + + // immediately delete client, test transition + await click(SELECTORS.clientDeleteButton); + await click(SELECTORS.confirmActionButton); + assert.strictEqual( + flashMessage.latestMessage, + 'Application deleted successfully', + 'renders success flash upon deleting client' + ); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.clients.index', + 'navigates back to list view after delete' + ); + // delete last client + await click('[data-test-oidc-client-linked-block]'); + assert.strictEqual(currentRouteName(), 'vault.cluster.access.oidc.clients.client.details'); + await click(SELECTORS.clientDeleteButton); + await click(SELECTORS.confirmActionButton); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.index', + 'redirects to call to action if only existing client is deleted' + ); + //* clean up test state + await clearRecord(this.store, 'oidc/assignment', 'assignment-inline'); + }); + + test('it creates, updates, and deletes an assignment', async function (assert) { + assert.expect(14); + await visit(OIDC_BASE_URL + '/assignments'); + + //* ensure clean test state + await clearRecord(this.store, 'oidc/assignment', 'test-assignment'); + + // create a new assignment + await click(SELECTORS.assignmentCreateButton); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.assignments.create', + 'navigates to create form' + ); + assert.dom('[data-test-oidc-assignment-title]').hasText('Create assignment', 'Form title renders'); + await fillIn('[data-test-input="name"]', 'test-assignment'); + await click('[data-test-component="search-select"]#entities .ember-basic-dropdown-trigger'); + await click('.ember-power-select-option'); + await click(SELECTORS.assignmentSaveButton); + assert.strictEqual( + flashMessage.latestMessage, + 'Successfully created the assignment test-assignment.', + 'renders success flash upon creating the assignment' + ); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.assignments.assignment.details', + 'navigates to the assignments detail view after save' + ); + + // assert default values in assignment details view are correct + assert.dom('[data-test-value-div="Name"]').hasText('test-assignment'); + assert.dom('[data-test-value-div="Entities"]').hasText('test-entity', 'shows the entity name.'); + + // edit assignment + await click(SELECTORS.assignmentEditButton); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.assignments.assignment.edit', + 'navigates to the assignment edit page from details' + ); + assert.dom('[data-test-oidc-assignment-title]').hasText('Edit assignment', 'Form title renders'); + await click('[data-test-component="search-select"]#groups .ember-basic-dropdown-trigger'); + await click('.ember-power-select-option'); + assert.dom('[data-test-oidc-assignment-save]').hasText('Update'); + await click(SELECTORS.assignmentSaveButton); + assert.strictEqual( + flashMessage.latestMessage, + 'Successfully updated the assignment test-assignment.', + 'renders success flash upon updating the assignment' + ); + + assert.dom('[data-test-value-div="Entities"]').hasText('test-entity', 'it still shows the entity name.'); + assert.dom('[data-test-value-div="Groups"]').hasText('test-group', 'shows updated group name id.'); + + // delete the assignment + await click(SELECTORS.assignmentDeleteButton); + await click(SELECTORS.confirmActionButton); + assert.strictEqual( + flashMessage.latestMessage, + 'Assignment deleted successfully', + 'renders success flash upon deleting assignment' + ); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.assignments.index', + 'navigates back to assignment list view after delete' + ); + }); + + test('it navigates to and from an assignment from the list view', async function (assert) { + assert.expect(6); + this.server.get('/identity/oidc/assignment', () => + overrideMirageResponse(null, ASSIGNMENT_LIST_RESPONSE) + ); + this.server.get('/identity/oidc/assignment/test-assignment', () => + overrideMirageResponse(null, ASSIGNMENT_DATA_RESPONSE) + ); + await visit(OIDC_BASE_URL + '/assignments'); + assert + .dom('[data-test-oidc-assignment-linked-block="test-assignment"]') + .exists('displays linked block for test-assignment'); + + await click(SELECTORS.assignmentCreateButton); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.assignments.create', + 'assignments index toolbar navigates to create form' + ); + await click(SELECTORS.assignmentCancelButton); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.assignments.index', + 'create form navigates back to assignment index on cancel' + ); + + await click('[data-test-popup-menu-trigger]'); + await click('[data-test-oidc-assignment-menu-link="edit"]'); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.assignments.assignment.edit', + 'linked block popup menu navigates to edit' + ); + await click(SELECTORS.assignmentCancelButton); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.assignments.assignment.details', + 'edit form navigates back to assignment details on cancel' + ); + // navigate to details from index page + await visit('/vault/access/oidc/assignments'); + await click('[data-test-popup-menu-trigger]'); + await click('[data-test-oidc-assignment-menu-link="details"]'); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.assignments.assignment.details', + 'popup menu navigates to assignment details' + ); + }); + + test('it hides assignment delete and edit when no permission', async function (assert) { + assert.expect(5); + this.server.get('/identity/oidc/assignment', () => + overrideMirageResponse(null, ASSIGNMENT_LIST_RESPONSE) + ); + this.server.get('/identity/oidc/assignment/test-assignment', () => + overrideMirageResponse(null, ASSIGNMENT_DATA_RESPONSE) + ); + this.server.post('/sys/capabilities-self', () => + overrideCapabilities(OIDC_BASE_URL + '/assignment/test-assignment', ['read']) + ); + + await visit(OIDC_BASE_URL + '/assignments'); + await click('[data-test-oidc-assignment-linked-block="test-assignment"]'); + assert + .dom('[data-test-oidc-assignment-title]') + .hasText('test-assignment', 'renders assignment name as title'); + assert.dom(SELECTORS.assignmentDetailsTab).hasClass('active', 'details tab is active'); + assert.dom(SELECTORS.assignmentDeleteButton).doesNotExist('delete option is hidden'); + assert.dom(SELECTORS.assignmentEditButton).doesNotExist('edit button is hidden'); + assert.strictEqual( + findAll('[data-test-component="info-table-row"]').length, + 3, + 'renders all assignment info rows' + ); + }); +}); diff --git a/ui/tests/acceptance/oidc-config/clients-keys-test.js b/ui/tests/acceptance/oidc-config/clients-keys-test.js new file mode 100644 index 0000000..2e97047 --- /dev/null +++ b/ui/tests/acceptance/oidc-config/clients-keys-test.js @@ -0,0 +1,318 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { visit, click, fillIn, findAll, currentRouteName } from '@ember/test-helpers'; +import { setupApplicationTest } from 'ember-qunit'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import ENV from 'vault/config/environment'; +import authPage from 'vault/tests/pages/auth'; +import logout from 'vault/tests/pages/logout'; +import { create } from 'ember-cli-page-object'; +import { clickTrigger, selectChoose } from 'ember-power-select/test-support/helpers'; +import ss from 'vault/tests/pages/components/search-select'; +import fm from 'vault/tests/pages/components/flash-message'; +import { + OIDC_BASE_URL, // -> '/vault/access/oidc' + SELECTORS, + clearRecord, + overrideCapabilities, + overrideMirageResponse, + CLIENT_LIST_RESPONSE, + CLIENT_DATA_RESPONSE, +} from 'vault/tests/helpers/oidc-config'; +const searchSelect = create(ss); +const flashMessage = create(fm); + +// in congruency with backend verbiage 'applications' are referred to as 'clients' +// throughout the codebase and the term 'applications' only appears in the UI + +module('Acceptance | oidc-config clients and keys', function (hooks) { + setupApplicationTest(hooks); + setupMirage(hooks); + + hooks.before(function () { + ENV['ember-cli-mirage'].handler = 'oidcConfig'; + }); + + hooks.beforeEach(async function () { + this.store = await this.owner.lookup('service:store'); + return authPage.login(); + }); + + hooks.afterEach(function () { + return logout.visit(); + }); + + hooks.after(function () { + ENV['ember-cli-mirage'].handler = null; + }); + + test('it creates a key, signs a client and edits key access to only that client', async function (assert) { + assert.expect(21); + + //* start with clean test state + await clearRecord(this.store, 'oidc/client', 'client-with-test-key'); + await clearRecord(this.store, 'oidc/client', 'client-with-default-key'); + await clearRecord(this.store, 'oidc/key', 'test-key'); + + // create client with default key + await visit(OIDC_BASE_URL + '/clients/create'); + await fillIn('[data-test-input="name"]', 'client-with-default-key'); + await click(SELECTORS.clientSaveButton); + + // check reroutes from oidc index to clients index when client exists + await visit(OIDC_BASE_URL); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.clients.index', + 'redirects to clients index route when clients exist' + ); + assert.dom('[data-test-tab="clients"]').hasClass('active', 'clients tab is active'); + assert + .dom('[data-test-oidc-client-linked-block]') + .hasTextContaining('client-with-default-key', 'displays linked block for client'); + + // navigate to keys + await click('[data-test-tab="keys"]'); + assert.dom('[data-test-tab="keys"]').hasClass('active', 'keys tab is active'); + assert.strictEqual(currentRouteName(), 'vault.cluster.access.oidc.keys.index'); + assert + .dom('[data-test-oidc-key-linked-block="default"]') + .hasText('default', 'index page lists default key'); + + // navigate to default key details from pop-up menu + await click('[data-test-popup-menu-trigger]'); + await click('[data-test-oidc-key-menu-link="details"]'); + assert.dom(SELECTORS.keyDeleteButton).isDisabled('delete button is disabled for default key'); + await click(SELECTORS.keyEditButton); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.keys.key.edit', + 'navigates to edit from key details' + ); + await click(SELECTORS.keyCancelButton); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.keys.key.details', + 'key edit form navigates back to details on cancel' + ); + await click(SELECTORS.keyClientsTab); + assert + .dom('[data-test-oidc-client-linked-block="client-with-default-key"]') + .exists('lists correct app with default'); + + // create a new key + await click('[data-test-breadcrumb-link="oidc-keys"]'); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.keys.index', + 'keys breadcrumb navigates back to list view' + ); + await click('[data-test-oidc-key-create]'); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.keys.create', + 'navigates to key create form' + ); + await fillIn('[data-test-input="name"]', 'test-key'); + await click(SELECTORS.keySaveButton); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.keys.key.details', + 'navigates to key details after save' + ); + + // create client with test-key + await visit(OIDC_BASE_URL + '/clients'); + await click('[data-test-oidc-client-create]'); + await fillIn('[data-test-input="name"]', 'client-with-test-key'); + await click('[data-test-toggle-group="More options"]'); + await click('[data-test-component="search-select"] [data-test-icon="trash"]'); + await clickTrigger('#key'); + await selectChoose('[data-test-component="search-select"]#key', 'test-key'); + await click(SELECTORS.clientSaveButton); + + // edit key and limit applications + await visit(OIDC_BASE_URL + '/keys'); + await click('[data-test-oidc-key-linked-block="test-key"] [data-test-popup-menu-trigger]'); + await click('[data-test-oidc-key-menu-link="edit"]'); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.keys.key.edit', + 'key linked block popup menu navigates to edit' + ); + await click('[data-test-oidc-radio="limited"]'); + await clickTrigger(); + assert.strictEqual(searchSelect.options.length, 1, 'dropdown has only application that uses this key'); + assert + .dom('.ember-power-select-option') + .hasTextContaining('client-with-test-key', 'dropdown renders correct application'); + await searchSelect.options.objectAt(0).click(); + await click(SELECTORS.keySaveButton); + assert.strictEqual( + flashMessage.latestMessage, + 'Successfully updated the key test-key.', + 'renders success flash upon key updating' + ); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.keys.key.details', + 'navigates back to details on update' + ); + await click(SELECTORS.keyClientsTab); + assert + .dom('[data-test-oidc-client-linked-block="client-with-test-key"]') + .exists('lists client-with-test-key'); + assert.strictEqual(findAll('[data-test-oidc-client-linked-block]').length, 1, 'it lists only one client'); + + // edit back to allow all + await click(SELECTORS.keyDetailsTab); + await click(SELECTORS.keyEditButton); + await click('[data-test-oidc-radio="allow-all"]'); + await click(SELECTORS.keySaveButton); + await click(SELECTORS.keyClientsTab); + assert.notEqual( + findAll('[data-test-oidc-client-linked-block]').length, + 1, + 'more than one client appears in key applications tab' + ); + + //* clean up test state + await clearRecord(this.store, 'oidc/client', 'client-with-test-key'); + await clearRecord(this.store, 'oidc/client', 'client-with-default-key'); + await clearRecord(this.store, 'oidc/key', 'test-key'); + }); + + test('it creates, rotates and deletes a key', async function (assert) { + assert.expect(10); + // mock client list so OIDC url does not redirect to landing page + this.server.get('/identity/oidc/client', () => overrideMirageResponse(null, CLIENT_LIST_RESPONSE)); + this.server.post('/identity/oidc/key/test-key/rotate', (schema, req) => { + const json = JSON.parse(req.requestBody); + assert.strictEqual(json.verification_ttl, 86400, 'request made with correct args to accurate endpoint'); + }); + + //* clear out test state + await clearRecord(this.store, 'oidc/key', 'test-key'); + + // create a new key + await visit(OIDC_BASE_URL + '/keys/create'); + await fillIn('[data-test-input="name"]', 'test-key'); + // toggle ttls to false, testing it sets correct default duration + await click('[data-test-input="rotationPeriod"]'); + await click('[data-test-input="verificationTtl"]'); + assert + .dom('[data-test-oidc-radio="limited"] input') + .isDisabled('limiting access radio button is disabled on create'); + assert + .dom('[data-test-oidc-radio="limited"]') + .hasClass('is-disabled', 'limited radio button label has disabled class'); + await click(SELECTORS.keySaveButton); + assert.strictEqual( + flashMessage.latestMessage, + 'Successfully created the key test-key.', + 'renders success flash upon key creation' + ); + + // assert default values in details view are correct + assert.dom('[data-test-value-div="Algorithm"]').hasText('RS256', 'defaults to RS526 algorithm'); + assert + .dom('[data-test-value-div="Rotation period"]') + .hasText('1 day', 'when toggled off rotation period defaults to 1 day'); + assert + .dom('[data-test-value-div="Verification TTL"]') + .hasText('1 day', 'when toggled off verification ttl defaults to 1 day'); + + // rotate key + await click(SELECTORS.keyDetailsTab); + await click(SELECTORS.keyRotateButton); + await click(SELECTORS.confirmActionButton); + assert.strictEqual( + flashMessage.latestMessage, + 'Success: test-key connection was rotated.', + 'renders success flash upon key rotation' + ); + // delete + await click(SELECTORS.keyDeleteButton); + await click(SELECTORS.confirmActionButton); + assert.strictEqual( + flashMessage.latestMessage, + 'Key deleted successfully', + 'success flash message renders after deleting key' + ); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.keys.index', + 'navigates back to list view after delete' + ); + }); + + test('it renders client details and providers', async function (assert) { + assert.expect(8); + this.server.get('/identity/oidc/client', () => overrideMirageResponse(null, CLIENT_LIST_RESPONSE)); + this.server.get('/identity/oidc/client/test-app', () => + overrideMirageResponse(null, CLIENT_DATA_RESPONSE) + ); + await visit(OIDC_BASE_URL); + await click('[data-test-oidc-client-linked-block]'); + assert.dom('[data-test-oidc-client-header]').hasText('test-app', 'renders application name as title'); + assert.dom(SELECTORS.clientDetailsTab).hasClass('active', 'details tab is active'); + assert.dom(SELECTORS.clientDeleteButton).exists('toolbar renders delete option'); + assert.dom(SELECTORS.clientEditButton).exists('toolbar renders edit button'); + assert.strictEqual(findAll('[data-test-component="info-table-row"]').length, 9, 'renders all info rows'); + + await click(SELECTORS.clientProvidersTab); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.clients.client.providers', + 'navigates to client providers route' + ); + assert.dom(SELECTORS.clientProvidersTab).hasClass('active', 'providers tab is active'); + assert.dom('[data-test-oidc-provider-linked-block="default"]').exists('lists default provider'); + }); + + test('it hides delete and edit client when no permission', async function (assert) { + assert.expect(5); + this.server.get('/identity/oidc/client', () => overrideMirageResponse(null, CLIENT_LIST_RESPONSE)); + this.server.get('/identity/oidc/client/test-app', () => + overrideMirageResponse(null, CLIENT_DATA_RESPONSE) + ); + this.server.post('/sys/capabilities-self', () => + overrideCapabilities(OIDC_BASE_URL + '/client/test-app', ['read']) + ); + + await visit(OIDC_BASE_URL); + await click('[data-test-oidc-client-linked-block]'); + assert.dom('[data-test-oidc-client-header]').hasText('test-app', 'renders application name as title'); + assert.dom(SELECTORS.clientDetailsTab).hasClass('active', 'details tab is active'); + assert.dom(SELECTORS.clientDeleteButton).doesNotExist('delete option is hidden'); + assert.dom(SELECTORS.clientEditButton).doesNotExist('edit button is hidden'); + assert.strictEqual(findAll('[data-test-component="info-table-row"]').length, 9, 'renders all info rows'); + }); + + test('it hides delete and edit key when no permission', async function (assert) { + assert.expect(4); + this.server.get('/identity/oidc/keys', () => overrideMirageResponse(null, { keys: ['test-key'] })); + this.server.get('/identity/oidc/key/test-key', () => + overrideMirageResponse(null, { + algorithm: 'RS256', + allowed_client_ids: ['*'], + rotation_period: 86400, + verification_ttl: 86400, + }) + ); + this.server.post('/sys/capabilities-self', () => + overrideCapabilities(OIDC_BASE_URL + '/key/test-key', ['read']) + ); + + await visit(OIDC_BASE_URL + '/keys'); + await click('[data-test-oidc-key-linked-block]'); + assert.dom(SELECTORS.keyDetailsTab).hasClass('active', 'details tab is active'); + assert.dom(SELECTORS.keyDeleteButton).doesNotExist('delete option is hidden'); + assert.dom(SELECTORS.keyEditButton).doesNotExist('edit button is hidden'); + assert.strictEqual(findAll('[data-test-component="info-table-row"]').length, 4, 'renders all info rows'); + }); +}); diff --git a/ui/tests/acceptance/oidc-config/providers-scopes-test.js b/ui/tests/acceptance/oidc-config/providers-scopes-test.js new file mode 100644 index 0000000..8454530 --- /dev/null +++ b/ui/tests/acceptance/oidc-config/providers-scopes-test.js @@ -0,0 +1,410 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { visit, currentURL, click, fillIn, findAll, currentRouteName } from '@ember/test-helpers'; +import { setupApplicationTest } from 'ember-qunit'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import ENV from 'vault/config/environment'; +import authPage from 'vault/tests/pages/auth'; +import logout from 'vault/tests/pages/logout'; +import { create } from 'ember-cli-page-object'; +import { clickTrigger, selectChoose } from 'ember-power-select/test-support/helpers'; +import ss from 'vault/tests/pages/components/search-select'; +import fm from 'vault/tests/pages/components/flash-message'; +import { + OIDC_BASE_URL, + SELECTORS, + CLIENT_LIST_RESPONSE, + SCOPE_LIST_RESPONSE, + SCOPE_DATA_RESPONSE, + PROVIDER_LIST_RESPONSE, + PROVIDER_DATA_RESPONSE, + clearRecord, + overrideCapabilities, + overrideMirageResponse, +} from 'vault/tests/helpers/oidc-config'; +const searchSelect = create(ss); +const flashMessage = create(fm); + +// OIDC_BASE_URL = '/vault/access/oidc' + +module('Acceptance | oidc-config providers and scopes', function (hooks) { + setupApplicationTest(hooks); + setupMirage(hooks); + + hooks.before(function () { + ENV['ember-cli-mirage'].handler = 'oidcConfig'; + }); + + hooks.beforeEach(async function () { + this.store = await this.owner.lookup('service:store'); + // mock client list so OIDC BASE URL does not redirect to landing call-to-action image + this.server.get('/identity/oidc/client', () => overrideMirageResponse(null, CLIENT_LIST_RESPONSE)); + return authPage.login(); + }); + + hooks.afterEach(function () { + return logout.visit(); + }); + + hooks.after(function () { + ENV['ember-cli-mirage'].handler = null; + }); + + // LIST SCOPES EMPTY + test('it navigates to scopes list view and renders empty state when no scopes are configured', async function (assert) { + assert.expect(4); + this.server.get('/identity/oidc/scope', () => overrideMirageResponse(404)); + await visit(OIDC_BASE_URL); + await click('[data-test-tab="scopes"]'); + assert.strictEqual(currentURL(), '/vault/access/oidc/scopes'); + assert.dom('[data-test-tab="scopes"]').hasClass('active', 'scopes tab is active'); + assert + .dom(SELECTORS.scopeEmptyState) + .hasText( + `No scopes yet Use scope to define identity information about the authenticated user. Learn more. Create scope`, + 'renders empty state no scopes are configured' + ); + assert + .dom(SELECTORS.scopeCreateButtonEmptyState) + .hasAttribute('href', '/ui/vault/access/oidc/scopes/create', 'empty state renders create scope link'); + }); + + // LIST SCOPE EXIST + test('it renders scope list when scopes exist', async function (assert) { + assert.expect(11); + this.server.get('/identity/oidc/scope', () => overrideMirageResponse(null, SCOPE_LIST_RESPONSE)); + this.server.get('/identity/oidc/scope/test-scope', () => + overrideMirageResponse(null, SCOPE_DATA_RESPONSE) + ); + await visit(OIDC_BASE_URL + '/scopes'); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.scopes.index', + 'redirects to scopes index route when scopes exist' + ); + assert + .dom('[data-test-oidc-scope-linked-block="test-scope"]') + .exists('displays linked block for test scope'); + + // navigates to/from create, edit, detail views from list view + await click(SELECTORS.scopeCreateButton); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.scopes.create', + 'scope index toolbar navigates to create form' + ); + await click(SELECTORS.scopeCancelButton); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.scopes.index', + 'create form navigates back to index on cancel' + ); + + await click('[data-test-popup-menu-trigger]'); + await click('[data-test-oidc-scope-menu-link="edit"]'); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.scopes.scope.edit', + 'linked block popup menu navigates to edit' + ); + await click(SELECTORS.scopeCancelButton); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.scopes.scope.details', + 'scope edit form navigates back to details on cancel' + ); + + // navigate to details from index page + await click('[data-test-breadcrumb-link="oidc-scopes"]'); + await click('[data-test-popup-menu-trigger]'); + await click('[data-test-oidc-scope-menu-link="details"]'); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.scopes.scope.details', + 'popup menu navigates to details' + ); + // check that details tab has all the information + assert.dom(SELECTORS.scopeDetailsTab).hasClass('active', 'details tab is active'); + assert.dom(SELECTORS.scopeDeleteButton).exists('toolbar renders delete option'); + assert.dom(SELECTORS.scopeEditButton).exists('toolbar renders edit button'); + assert.strictEqual(findAll('[data-test-component="info-table-row"]').length, 2, 'renders all info rows'); + }); + + // ERROR DELETING SCOPE + test('it throws error when trying to delete when scope is currently being associated with any provider', async function (assert) { + assert.expect(3); + this.server.get('/identity/oidc/scope', () => overrideMirageResponse(null, SCOPE_LIST_RESPONSE)); + this.server.get('/identity/oidc/scope/test-scope', () => + overrideMirageResponse(null, SCOPE_DATA_RESPONSE) + ); + this.server.get('/identity/oidc/provider', () => overrideMirageResponse(null, PROVIDER_LIST_RESPONSE)); + this.server.get('/identity/oidc/provider/test-provider', () => { + overrideMirageResponse(null, PROVIDER_DATA_RESPONSE); + }); + // throw error when trying to delete test-scope since it is associated to test-provider + this.server.delete( + '/identity/oidc/scope/test-scope', + () => ({ + errors: [ + 'unable to delete scope "test-scope" because it is currently referenced by these providers: test-provider', + ], + }), + 400 + ); + await visit(OIDC_BASE_URL + '/scopes'); + await click('[data-test-oidc-scope-linked-block="test-scope"]'); + assert.dom('[data-test-oidc-scope-header]').hasText('test-scope', 'renders scope name'); + assert.dom(SELECTORS.scopeDetailsTab).hasClass('active', 'details tab is active'); + + // try to delete scope + await click(SELECTORS.scopeDeleteButton); + await click(SELECTORS.confirmActionButton); + assert.strictEqual( + flashMessage.latestMessage, + 'unable to delete scope "test-scope" because it is currently referenced by these providers: test-provider', + 'renders error flash upon scope deletion' + ); + }); + + // CRUD SCOPE + CRUD PROVIDER + test('it creates a scope, and creates a provider with that scope', async function (assert) { + assert.expect(28); + + //* clear out test state + await clearRecord(this.store, 'oidc/scope', 'test-scope'); + await clearRecord(this.store, 'oidc/provider', 'test-provider'); + + // create a new scope + await visit(OIDC_BASE_URL + '/scopes/create'); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.scopes.create', + 'navigates to create form' + ); + await fillIn('[data-test-input="name"]', 'test-scope'); + await fillIn('[data-test-input="description"]', 'this is a test'); + await fillIn('[data-test-component="code-mirror-modifier"] textarea', SCOPE_DATA_RESPONSE.template); + await click(SELECTORS.scopeSaveButton); + assert.strictEqual( + flashMessage.latestMessage, + 'Successfully created the scope test-scope.', + 'renders success flash upon scope creation' + ); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.scopes.scope.details', + 'navigates to scope detail view after save' + ); + assert.dom(SELECTORS.scopeDetailsTab).hasClass('active', 'scope details tab is active'); + assert.dom('[data-test-value-div="Name"]').hasText('test-scope', 'has correct created name'); + assert + .dom('[data-test-value-div="Description"]') + .hasText('this is a test', 'has correct created description'); + + // edit scope + await click(SELECTORS.scopeEditButton); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.scopes.scope.edit', + 'navigates to edit page from details' + ); + await fillIn('[data-test-input="description"]', 'this is an edit test'); + await click(SELECTORS.scopeSaveButton); + assert.strictEqual( + flashMessage.latestMessage, + 'Successfully updated the scope test-scope.', + 'renders success flash upon scope updating' + ); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.scopes.scope.details', + 'navigates back to scope details on update' + ); + assert + .dom('[data-test-value-div="Description"]') + .hasText('this is an edit test', 'has correct edited description'); + + // create a provider using test-scope + await click('[data-test-breadcrumb-link="oidc-scopes"]'); + await click('[data-test-tab="providers"]'); + assert.dom('[data-test-tab="providers"]').hasClass('active', 'providers tab is active'); + await click('[data-test-oidc-provider-create]'); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.providers.create', + 'navigates to provider create form' + ); + await fillIn('[data-test-input="name"]', 'test-provider'); + await clickTrigger('#scopesSupported'); + await selectChoose('#scopesSupported', 'test-scope'); + await click(SELECTORS.providerSaveButton); + assert.strictEqual( + flashMessage.latestMessage, + 'Successfully created the OIDC provider test-provider.', + 'renders success flash upon provider creation' + ); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.providers.provider.details', + 'navigates to provider detail view after save' + ); + + // assert default values in details view are correct + assert.dom('[data-test-value-div="Issuer URL"]').hasTextContaining('http://', 'issuer includes scheme'); + assert + .dom('[data-test-value-div="Issuer URL"]') + .hasTextContaining('identity/oidc/provider/test', 'issuer path populates correctly'); + assert + .dom('[data-test-value-div="Scopes"] a') + .hasAttribute('href', '/ui/vault/access/oidc/scopes/test-scope/details', 'lists scopes as links'); + + // check provider's application list view + await click(SELECTORS.providerClientsTab); + assert.strictEqual( + findAll('[data-test-oidc-client-linked-block]').length, + 2, + 'all applications appear in provider applications tab' + ); + + // edit and limit applications + await click(SELECTORS.providerDetailsTab); + await click(SELECTORS.providerEditButton); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.providers.provider.edit', + 'navigates to provider edit page from details' + ); + await click('[data-test-oidc-radio="limited"]'); + await click('[data-test-component="search-select"]#allowedClientIds .ember-basic-dropdown-trigger'); + await fillIn('.ember-power-select-search input', 'test-app'); + await searchSelect.options.objectAt(0).click(); + await click(SELECTORS.providerSaveButton); + assert.strictEqual( + flashMessage.latestMessage, + 'Successfully updated the OIDC provider test-provider.', + 'renders success flash upon provider updating' + ); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.providers.provider.details', + 'navigates back to provider details after updating' + ); + const providerModel = this.store.peekRecord('oidc/provider', 'test-provider'); + assert.propEqual( + providerModel.allowedClientIds, + ['whaT7KB0C3iBH1l3rXhd5HPf0n6vXU0s'], + 'provider saves client_id (not id or name) in allowed_client_ids param' + ); + await click(SELECTORS.providerClientsTab); + assert + .dom('[data-test-oidc-client-linked-block]') + .hasTextContaining('test-app', 'list of applications is just test-app'); + + // edit back to allow all + await click(SELECTORS.providerDetailsTab); + await click(SELECTORS.providerEditButton); + await click('[data-test-oidc-radio="allow-all"]'); + await click(SELECTORS.providerSaveButton); + await click(SELECTORS.providerClientsTab); + assert.strictEqual( + findAll('[data-test-oidc-client-linked-block]').length, + 2, + 'all applications appear in provider applications tab' + ); + + // delete + await click(SELECTORS.providerDetailsTab); + await click(SELECTORS.providerDeleteButton); + await click(SELECTORS.confirmActionButton); + assert.strictEqual( + flashMessage.latestMessage, + 'Provider deleted successfully', + 'success flash message renders after deleting provider' + ); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.providers.index', + 'navigates back to list view after delete' + ); + + // delete scope + await visit(OIDC_BASE_URL + '/scopes/test-scope/details'); + await click(SELECTORS.scopeDeleteButton); + await click(SELECTORS.confirmActionButton); + assert.strictEqual( + flashMessage.latestMessage, + 'Scope deleted successfully', + 'renders success flash upon deleting scope' + ); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.scopes.index', + 'navigates back to list view after delete' + ); + }); + + // LIST PROVIDERS + test('it lists default provider and navigates to details', async function (assert) { + assert.expect(7); + await visit(OIDC_BASE_URL); + await click('[data-test-tab="providers"]'); + assert.dom('[data-test-tab="providers"]').hasClass('active', 'providers tab is active'); + assert.strictEqual(currentURL(), '/vault/access/oidc/providers'); + assert + .dom('[data-test-oidc-provider-linked-block="default"]') + .exists('index page lists default provider'); + await click('[data-test-popup-menu-trigger]'); + + await click('[data-test-oidc-provider-menu-link="edit"]'); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.providers.provider.edit', + 'provider linked block popup menu navigates to edit' + ); + await click(SELECTORS.providerCancelButton); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.providers.provider.details', + 'provider edit form navigates back to details on cancel' + ); + + // navigate to details from index page + await click('[data-test-breadcrumb-link="oidc-providers"]'); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.access.oidc.providers.index', + 'providers breadcrumb navigates back to list view' + ); + await click('[data-test-oidc-provider-linked-block="default"] [data-test-popup-menu-trigger]'); + await click('[data-test-oidc-provider-menu-link="details"]'); + assert.dom(SELECTORS.providerDeleteButton).isDisabled('delete button is disabled for default provider'); + }); + + // PROVIDER DELETE + EDIT PERMISSIONS + test('it hides delete and edit for a provider when no permission', async function (assert) { + assert.expect(3); + this.server.get('/identity/oidc/providers', () => + overrideMirageResponse(null, { providers: ['test-provider'] }) + ); + this.server.get('/identity/oidc/provider/test-provider', () => + overrideMirageResponse(null, { + allowed_client_ids: ['*'], + issuer: 'http://127.0.0.1:8200/v1/identity/oidc/provider/test-provider', + scopes_supported: ['test-scope'], + }) + ); + this.server.post('/sys/capabilities-self', () => + overrideCapabilities(OIDC_BASE_URL + '/provider/test-provider', ['read']) + ); + + await visit(OIDC_BASE_URL + '/providers'); + await click('[data-test-oidc-provider-linked-block]'); + assert.dom(SELECTORS.providerDetailsTab).hasClass('active', 'details tab is active'); + assert.dom(SELECTORS.providerDeleteButton).doesNotExist('delete option is hidden'); + assert.dom(SELECTORS.providerEditButton).doesNotExist('edit button is hidden'); + }); +}); diff --git a/ui/tests/acceptance/oidc-provider-test.js b/ui/tests/acceptance/oidc-provider-test.js new file mode 100644 index 0000000..8955287 --- /dev/null +++ b/ui/tests/acceptance/oidc-provider-test.js @@ -0,0 +1,222 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { create } from 'ember-cli-page-object'; +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { v4 as uuidv4 } from 'uuid'; + +import authPage from 'vault/tests/pages/auth'; +import logout from 'vault/tests/pages/logout'; +import authForm from 'vault/tests/pages/components/auth-form'; +import enablePage from 'vault/tests/pages/settings/auth/enable'; +import consoleClass from 'vault/tests/pages/components/console/ui-panel'; +import { visit, settled, currentURL } from '@ember/test-helpers'; +import { clearRecord } from 'vault/tests/helpers/oidc-config'; +const consoleComponent = create(consoleClass); +const authFormComponent = create(authForm); + +const OIDC_USER = 'end-user'; +const USER_PASSWORD = 'mypassword'; +const OIDC_POLICY = `path "identity/oidc/provider/+/userinfo" { + capabilities = ["read", "update"] +}`; +const USER_TOKEN_TEMPLATE = `{ + "username": {{identity.entity.aliases.$USERPASS_ACCESSOR.name}}, + "contact": { + "email": {{identity.entity.metadata.email}}, + "phone_number": {{identity.entity.metadata.phone_number}} + } +}`; +const GROUP_TOKEN_TEMPLATE = `{ + "groups": {{identity.entity.groups.names}} +}`; +const oidcEntity = async function (name, policy) { + await consoleComponent.runCommands([ + `write sys/policies/acl/${name} policy=${window.btoa(policy)}`, + `write identity/entity name="${OIDC_USER}" policies="${name}" metadata="email=vault@hashicorp.com" metadata="phone_number=123-456-7890"`, + `read -field=id identity/entity/name/${OIDC_USER}`, + ]); + return consoleComponent.lastLogOutput; +}; + +const oidcGroup = async function (entityId) { + await consoleComponent.runCommands([ + `write identity/group name="engineering" member_entity_ids="${entityId}"`, + `read -field=id identity/group/name/engineering`, + ]); + return consoleComponent.lastLogOutput; +}; + +const authAccessor = async function (path = 'userpass') { + await enablePage.enable('userpass', path); + await consoleComponent.runCommands([ + `write auth/${path}/users/end-user password="${USER_PASSWORD}"`, + `read -field=accessor sys/internal/ui/mounts/auth/${path}`, + ]); + return consoleComponent.lastLogOutput; +}; + +const entityAlias = async function (entityId, accessor, groupId) { + const userTokenTemplate = btoa(USER_TOKEN_TEMPLATE); + const groupTokenTemplate = btoa(GROUP_TOKEN_TEMPLATE); + + await consoleComponent.runCommands([ + `write identity/entity-alias name="end-user" canonical_id="${entityId}" mount_accessor="${accessor}"`, + `write identity/oidc/key/sigkey allowed_client_ids="*"`, + `write identity/oidc/assignment/my-assignment group_ids="${groupId}" entity_ids="${entityId}"`, + `write identity/oidc/scope/user description="scope for user metadata" template="${userTokenTemplate}"`, + `write identity/oidc/scope/groups description="scope for groups" template="${groupTokenTemplate}"`, + ]); + return consoleComponent.lastLogOutput.includes('Success'); +}; +const setupWebapp = async function (redirect) { + const webappName = 'my-webapp'; + await consoleComponent.runCommands([ + `write identity/oidc/client/${webappName} redirect_uris="${redirect}" assignments="my-assignment" key="sigkey" id_token_ttl="30m" access_token_ttl="1h"`, + `read -field=client_id identity/oidc/client/${webappName}`, + ]); + const output = consoleComponent.lastLogOutput; + if (output.includes('error occurred')) { + throw new Error(`OIDC setup failed: ${output}`); + } + return output; +}; +const setupProvider = async function (clientId) { + const providerName = `my-provider`; + await consoleComponent.runCommands( + `write identity/oidc/provider/${providerName} allowed_client_ids="${clientId}" scopes="user,groups"` + ); + return providerName; +}; + +const getAuthzUrl = (providerName, redirect, clientId, params) => { + const queryParams = { + client_id: clientId, + nonce: 'abc123', + redirect_uri: encodeURIComponent(redirect), + response_type: 'code', + scope: 'openid', + state: 'foobar', + ...params, + }; + const queryString = Object.keys(queryParams).reduce((prev, key, idx) => { + if (idx === 0) { + return `${prev}${key}=${queryParams[key]}`; + } + return `${prev}&${key}=${queryParams[key]}`; + }, '?'); + return `/vault/identity/oidc/provider/${providerName}/authorize${queryString}`; +}; + +const setupOidc = async function (uid) { + const callback = 'http://127.0.0.1:8251/callback'; + const entityId = await oidcEntity('oidc', OIDC_POLICY); + const groupId = await oidcGroup(entityId); + const authMethodPath = `oidc-userpass-${uid}`; + const accessor = await authAccessor(authMethodPath); + await entityAlias(entityId, accessor, groupId); + const clientId = await setupWebapp(callback); + const providerName = await setupProvider(clientId); + return { + providerName, + callback, + clientId, + authMethodPath, + }; +}; + +module('Acceptance | oidc provider', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(async function () { + this.uid = uuidv4(); + this.store = await this.owner.lookup('service:store'); + await logout.visit(); + return authPage.login(); + }); + + test('OIDC Provider logs in and redirects correctly', async function (assert) { + const { providerName, callback, clientId, authMethodPath } = await setupOidc(this.uid); + + await logout.visit(); + await settled(); + const url = getAuthzUrl(providerName, callback, clientId); + await visit(url); + + assert.ok(currentURL().startsWith('/vault/auth'), 'redirects to auth when no token'); + assert.ok( + currentURL().includes(`redirect_to=${encodeURIComponent(url)}`), + 'encodes url for the query param' + ); + assert.dom('[data-test-auth-logo]').exists('Vault logo exists on auth page'); + assert + .dom('[data-test-auth-helptext]') + .hasText( + 'Once you log in, you will be redirected back to your application. If you require login credentials, contact your administrator.', + 'Has updated text for client authorization flow' + ); + await authFormComponent.selectMethod(authMethodPath); + await authFormComponent.username(OIDC_USER); + await authFormComponent.password(USER_PASSWORD); + await authFormComponent.login(); + await settled(); + assert.strictEqual(currentURL(), url, 'URL is as expected after login'); + assert.dom('[data-test-oidc-redirect]').exists('redirect text exists'); + assert + .dom('[data-test-oidc-redirect]') + .hasTextContaining(`${callback}?code=`, 'Successful redirect to callback'); + + //* clean up test state + await clearRecord(this.store, 'oidc/client', 'my-webapp'); + await clearRecord(this.store, 'oidc/provider', 'my-provider'); + }); + + test('OIDC Provider redirects to auth if current token and prompt = login', async function (assert) { + const { providerName, callback, clientId, authMethodPath } = await setupOidc(this.uid); + await settled(); + const url = getAuthzUrl(providerName, callback, clientId, { prompt: 'login' }); + await visit(url); + + assert.ok(currentURL().startsWith('/vault/auth'), 'redirects to auth when no token'); + assert.notOk( + currentURL().includes('prompt=login'), + 'Url params no longer include prompt=login after redirect' + ); + await authFormComponent.selectMethod(authMethodPath); + await authFormComponent.username(OIDC_USER); + await authFormComponent.password(USER_PASSWORD); + await authFormComponent.login(); + await settled(); + assert + .dom('[data-test-oidc-redirect]') + .hasTextContaining(`${callback}?code=`, 'Successful redirect to callback'); + + //* clean up test state + await clearRecord(this.store, 'oidc/client', 'my-webapp'); + await clearRecord(this.store, 'oidc/provider', 'my-provider'); + }); + + test('OIDC Provider shows consent form when prompt = consent', async function (assert) { + const { providerName, callback, clientId, authMethodPath } = await setupOidc(this.uid); + const url = getAuthzUrl(providerName, callback, clientId, { prompt: 'consent' }); + await logout.visit(); + await authFormComponent.selectMethod(authMethodPath); + await authFormComponent.username(OIDC_USER); + await authFormComponent.password(USER_PASSWORD); + await authFormComponent.login(); + await visit(url); + + assert.notOk( + currentURL().startsWith('/vault/auth'), + 'Does not redirect to auth because user is already logged in' + ); + assert.dom('[data-test-consent-form]').exists('Consent form exists'); + + //* clean up test state + await clearRecord(this.store, 'oidc/client', 'my-webapp'); + await clearRecord(this.store, 'oidc/provider', 'my-provider'); + }); +}); diff --git a/ui/tests/acceptance/pki/pki-action-forms-test.js b/ui/tests/acceptance/pki/pki-action-forms-test.js new file mode 100644 index 0000000..e4b2c89 --- /dev/null +++ b/ui/tests/acceptance/pki/pki-action-forms-test.js @@ -0,0 +1,298 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { click, currentURL, fillIn, typeIn, visit } from '@ember/test-helpers'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import { v4 as uuidv4 } from 'uuid'; + +import authPage from 'vault/tests/pages/auth'; +import logout from 'vault/tests/pages/logout'; +import enablePage from 'vault/tests/pages/settings/mount-secret-backend'; +import { runCommands } from 'vault/tests/helpers/pki/pki-run-commands'; +import { SELECTORS as S } from 'vault/tests/helpers/pki/workflow'; +import { issuerPemBundle } from 'vault/tests/helpers/pki/values'; + +module('Acceptance | pki action forms test', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(async function () { + await authPage.login(); + // Setup PKI engine + const mountPath = `pki-workflow-${uuidv4()}`; + await enablePage.enable('pki', mountPath); + this.mountPath = mountPath; + await logout.visit(); + }); + + hooks.afterEach(async function () { + await logout.visit(); + await authPage.login(); + // Cleanup engine + await runCommands([`delete sys/mounts/${this.mountPath}`]); + await logout.visit(); + }); + + module('import', function (hooks) { + setupMirage(hooks); + + hooks.beforeEach(function () { + this.pemBundle = issuerPemBundle; + }); + + test('happy path', async function (assert) { + await authPage.login(this.pkiAdminToken); + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/overview`); + await click(S.emptyStateLink); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/configuration/create`); + assert.dom(S.configuration.title).hasText('Configure PKI'); + assert.dom(S.configuration.emptyState).exists({ count: 1 }, 'Shows empty state by default'); + await click(S.configuration.optionByKey('import')); + assert.dom(S.configuration.emptyState).doesNotExist(); + // Submit before filling out form shows an error + await click('[data-test-pki-import-pem-bundle]'); + assert.dom('[data-test-alert-banner="alert"]').hasText('Error please upload your PEM bundle'); + // Fill in form data + await click('[data-test-text-toggle]'); + await fillIn('[data-test-text-file-textarea]', this.pemBundle); + await click('[data-test-pki-import-pem-bundle]'); + + assert.strictEqual( + currentURL(), + `/vault/secrets/${this.mountPath}/pki/configuration/create`, + 'stays on page on success' + ); + assert.dom(S.configuration.title).hasText('View imported items'); + assert.dom(S.configuration.importForm).doesNotExist('import form is hidden after save'); + assert.dom(S.configuration.importMapping).exists('import mapping is shown after save'); + await click('[data-test-done]'); + assert.strictEqual( + currentURL(), + `/vault/secrets/${this.mountPath}/pki/overview`, + 'redirects to overview when done' + ); + }); + test('with many imports', async function (assert) { + this.server.post(`${this.mountPath}/config/ca`, () => { + return { + request_id: 'some-config-id', + data: { + imported_issuers: ['my-imported-issuer', 'imported2'], + imported_keys: ['my-imported-key', 'imported3'], + mapping: { + 'my-imported-issuer': 'my-imported-key', + imported2: '', + }, + }, + }; + }); + await authPage.login(this.pkiAdminToken); + await visit(`/vault/secrets/${this.mountPath}/pki/configuration/create`); + await click(S.configuration.optionByKey('import')); + assert.dom(S.configuration.importForm).exists('import form is shown save'); + await click('[data-test-text-toggle]'); + await fillIn('[data-test-text-file-textarea]', this.pemBundle); + await click('[data-test-pki-import-pem-bundle]'); + + assert.strictEqual( + currentURL(), + `/vault/secrets/${this.mountPath}/pki/configuration/create`, + 'stays on page on success' + ); + assert.dom(S.configuration.title).hasText('View imported items'); + assert.dom(S.configuration.importForm).doesNotExist('import form is hidden after save'); + assert.dom(S.configuration.importMapping).exists('import mapping is shown after save'); + assert.dom(S.configuration.importedIssuer).hasText('my-imported-issuer', 'Issuer value is displayed'); + assert.dom(S.configuration.importedKey).hasText('my-imported-key', 'Key value is displayed'); + await click('[data-test-done]'); + assert.strictEqual( + currentURL(), + `/vault/secrets/${this.mountPath}/pki/overview`, + 'redirects to overview when done' + ); + }); + test('shows imported items when keys is empty', async function (assert) { + this.server.post(`${this.mountPath}/config/ca`, () => { + return { + request_id: 'some-config-id', + data: { + imported_issuers: ['my-imported-issuer', 'my-imported-issuer2'], + imported_keys: null, + mapping: { + 'my-imported-issuer': '', + 'my-imported-issuer2': '', + }, + }, + }; + }); + await authPage.login(this.pkiAdminToken); + await visit(`/vault/secrets/${this.mountPath}/pki/configuration/create`); + await click(S.configuration.optionByKey('import')); + assert.dom(S.configuration.importForm).exists('import form is shown save'); + await click('[data-test-text-toggle]'); + await fillIn('[data-test-text-file-textarea]', this.pemBundle); + await click('[data-test-pki-import-pem-bundle]'); + + assert.dom(S.configuration.importForm).doesNotExist('import form is hidden after save'); + assert.dom(S.configuration.importMapping).exists('import mapping is shown after save'); + assert.dom(S.configuration.importedIssuer).hasText('my-imported-issuer', 'Issuer value is displayed'); + assert.dom(S.configuration.importedKey).hasText('None', 'Shows placeholder value for key'); + }); + test('shows None for imported items if nothing new imported', async function (assert) { + this.server.post(`${this.mountPath}/config/ca`, () => { + return { + request_id: 'some-config-id', + data: { + imported_issuers: null, + imported_keys: null, + mapping: {}, + }, + }; + }); + await authPage.login(this.pkiAdminToken); + await visit(`/vault/secrets/${this.mountPath}/pki/configuration/create`); + await click(S.configuration.optionByKey('import')); + assert.dom(S.configuration.importForm).exists('import form is shown save'); + await click('[data-test-text-toggle]'); + await fillIn('[data-test-text-file-textarea]', this.pemBundle); + await click('[data-test-pki-import-pem-bundle]'); + + assert.dom(S.configuration.importForm).doesNotExist('import form is hidden after save'); + assert.dom(S.configuration.importMapping).exists('import mapping is shown after save'); + assert.dom(S.configuration.importedIssuer).hasText('None', 'Shows placeholder value for issuer'); + assert.dom(S.configuration.importedKey).hasText('None', 'Shows placeholder value for key'); + }); + }); + + module('generate root', function () { + test('happy path', async function (assert) { + const commonName = 'my-common-name'; + const issuerName = 'my-first-issuer'; + const keyName = 'my-first-key'; + await authPage.login(); + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/overview`); + await click(S.emptyStateLink); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/configuration/create`); + assert.dom(S.configuration.title).hasText('Configure PKI'); + assert.dom(S.configuration.emptyState).exists({ count: 1 }, 'Shows empty state by default'); + await click(S.configuration.optionByKey('generate-root')); + assert.dom(S.configuration.emptyState).doesNotExist(); + // The URLs section is populated based on params returned from OpenAPI. This test will break when + // the backend adds fields. We should update the count accordingly. + assert.dom(S.configuration.urlField).exists({ count: 4 }); + // Fill in form + await fillIn(S.configuration.typeField, 'internal'); + await typeIn(S.configuration.inputByName('commonName'), commonName); + await typeIn(S.configuration.inputByName('issuerName'), issuerName); + await click(S.configuration.keyParamsGroupToggle); + await typeIn(S.configuration.inputByName('keyName'), keyName); + await click(S.configuration.generateRootSave); + + assert.strictEqual( + currentURL(), + `/vault/secrets/${this.mountPath}/pki/configuration/create`, + 'stays on page on success' + ); + assert.dom(S.configuration.title).hasText('View root certificate'); + assert.dom('[data-test-alert-banner="alert"]').doesNotExist('no private key warning'); + assert.dom(S.configuration.title).hasText('View root certificate', 'Updates title on page'); + assert.dom(S.configuration.saved.certificate).hasClass('allow-copy', 'copyable certificate is masked'); + assert.dom(S.configuration.saved.issuerName).hasText(issuerName); + assert.dom(S.configuration.saved.issuerLink).exists('Issuer link exists'); + assert.dom(S.configuration.saved.keyLink).exists('Key link exists'); + assert.dom(S.configuration.saved.keyName).hasText(keyName); + assert.dom('[data-test-done]').exists('Done button exists'); + // Check that linked issuer has correct common name + await click(S.configuration.saved.issuerLink); + assert.dom(S.issuerDetails.valueByName('Common name')).hasText(commonName); + }); + test('type=exported', async function (assert) { + const commonName = 'my-exported-name'; + await authPage.login(); + await visit(`/vault/secrets/${this.mountPath}/pki/configuration/create`); + await click(S.configuration.optionByKey('generate-root')); + // Fill in form + await fillIn(S.configuration.typeField, 'exported'); + await typeIn(S.configuration.inputByName('commonName'), commonName); + await click(S.configuration.generateRootSave); + + assert.strictEqual( + currentURL(), + `/vault/secrets/${this.mountPath}/pki/configuration/create`, + 'stays on page on success' + ); + assert.dom(S.configuration.title).hasText('View root certificate'); + assert + .dom('[data-test-alert-banner="alert"]') + .hasText('Next steps The private_key is only available once. Make sure you copy and save it now.'); + assert.dom(S.configuration.title).hasText('View root certificate', 'Updates title on page'); + assert + .dom(S.configuration.saved.certificate) + .hasClass('allow-copy', 'copyable masked certificate exists'); + assert + .dom(S.configuration.saved.issuerName) + .doesNotExist('Issuer name not shown because it was not named'); + assert.dom(S.configuration.saved.issuerLink).exists('Issuer link exists'); + assert.dom(S.configuration.saved.keyLink).exists('Key link exists'); + assert + .dom(S.configuration.saved.privateKey) + .hasClass('allow-copy', 'copyable masked private key exists'); + assert.dom(S.configuration.saved.keyName).doesNotExist('Key name not shown because it was not named'); + assert.dom('[data-test-done]').exists('Done button exists'); + // Check that linked issuer has correct common name + await click(S.configuration.saved.issuerLink); + assert.dom(S.issuerDetails.valueByName('Common name')).hasText(commonName); + }); + }); + + module('generate CSR', function () { + test('happy path', async function (assert) { + await authPage.login(); + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + await click(S.emptyStateLink); + assert.dom(S.configuration.title).hasText('Configure PKI'); + await click(S.configuration.optionByKey('generate-csr')); + await fillIn(S.configuration.typeField, 'internal'); + await fillIn(S.configuration.inputByName('commonName'), 'my-common-name'); + await click('[data-test-save]'); + assert.dom(S.configuration.title).hasText('View generated CSR'); + await assert.dom(S.configuration.csrDetails).exists('renders CSR details after save'); + await click('[data-test-done]'); + assert.strictEqual( + currentURL(), + `/vault/secrets/${this.mountPath}/pki/overview`, + 'Transitions to overview after viewing csr details' + ); + }); + test('type = exported', async function (assert) { + await authPage.login(); + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + await click(S.emptyStateLink); + await click(S.configuration.optionByKey('generate-csr')); + await fillIn(S.configuration.typeField, 'exported'); + await fillIn(S.configuration.inputByName('commonName'), 'my-common-name'); + await click('[data-test-save]'); + await assert.dom(S.configuration.csrDetails).exists('renders CSR details after save'); + assert.dom(S.configuration.title).hasText('View generated CSR'); + assert + .dom('[data-test-alert-banner="alert"]') + .hasText( + 'Next steps Copy the CSR below for a parent issuer to sign and then import the signed certificate back into this mount. The private_key is only available once. Make sure you copy and save it now.' + ); + assert + .dom(S.configuration.saved.privateKey) + .hasClass('allow-copy', 'copyable masked private key exists'); + await click('[data-test-done]'); + assert.strictEqual( + currentURL(), + `/vault/secrets/${this.mountPath}/pki/overview`, + 'Transitions to overview after viewing csr details' + ); + }); + }); +}); diff --git a/ui/tests/acceptance/pki/pki-configuration-test.js b/ui/tests/acceptance/pki/pki-configuration-test.js new file mode 100644 index 0000000..f220367 --- /dev/null +++ b/ui/tests/acceptance/pki/pki-configuration-test.js @@ -0,0 +1,244 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import { click, currentURL, fillIn, visit, isSettled, waitUntil, find } from '@ember/test-helpers'; +import { v4 as uuidv4 } from 'uuid'; + +import authPage from 'vault/tests/pages/auth'; +import logout from 'vault/tests/pages/logout'; +import enablePage from 'vault/tests/pages/settings/mount-secret-backend'; +import { runCommands } from 'vault/tests/helpers/pki/pki-run-commands'; +import { SELECTORS } from 'vault/tests/helpers/pki/workflow'; +import { issuerPemBundle } from 'vault/tests/helpers/pki/values'; + +module('Acceptance | pki configuration test', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(async function () { + this.pemBundle = issuerPemBundle; + await authPage.login(); + // Setup PKI engine + const mountPath = `pki-workflow-${uuidv4()}`; + await enablePage.enable('pki', mountPath); + this.mountPath = mountPath; + await logout.visit(); + }); + + hooks.afterEach(async function () { + await logout.visit(); + await authPage.login(); + // Cleanup engine + await runCommands([`delete sys/mounts/${this.mountPath}`]); + await logout.visit(); + }); + + module('delete all issuers modal and empty states', function (hooks) { + setupMirage(hooks); + + test('it shows the delete all issuers modal', async function (assert) { + await authPage.login(this.pkiAdminToken); + await visit(`/vault/secrets/${this.mountPath}/pki/configuration`); + await click(SELECTORS.configuration.configureButton); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/configuration/create`); + await isSettled(); + await click(SELECTORS.configuration.generateRootOption); + await fillIn(SELECTORS.configuration.typeField, 'exported'); + await fillIn(SELECTORS.configuration.generateRootCommonNameField, 'issuer-common-0'); + await fillIn(SELECTORS.configuration.generateRootIssuerNameField, 'issuer-0'); + await click(SELECTORS.configuration.generateRootSave); + await click(SELECTORS.configuration.doneButton); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/overview`); + await isSettled(); + await click(SELECTORS.configTab); + await isSettled(); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/configuration`); + await click(SELECTORS.configuration.issuerLink); + await isSettled(); + assert.dom(SELECTORS.configuration.deleteAllIssuerModal).exists(); + await fillIn(SELECTORS.configuration.deleteAllIssuerInput, 'delete-all'); + await click(SELECTORS.configuration.deleteAllIssuerButton); + assert.dom(SELECTORS.configuration.deleteAllIssuerModal).doesNotExist(); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/configuration`); + }); + + test('it shows the correct empty state message if certificates exists after delete all issuers', async function (assert) { + await authPage.login(this.pkiAdminToken); + await visit(`/vault/secrets/${this.mountPath}/pki/configuration`); + await click(SELECTORS.configuration.configureButton); + assert.strictEqual( + currentURL(), + `/vault/secrets/${this.mountPath}/pki/configuration/create`, + 'goes to pki configure page' + ); + await click(SELECTORS.configuration.generateRootOption); + await fillIn(SELECTORS.configuration.typeField, 'exported'); + await fillIn(SELECTORS.configuration.generateRootCommonNameField, 'issuer-common-0'); + await fillIn(SELECTORS.configuration.generateRootIssuerNameField, 'issuer-0'); + await click(SELECTORS.configuration.generateRootSave); + await click(SELECTORS.configuration.doneButton); + assert.strictEqual( + currentURL(), + `/vault/secrets/${this.mountPath}/pki/overview`, + 'goes to overview page' + ); + await click(SELECTORS.configTab); + assert.strictEqual( + currentURL(), + `/vault/secrets/${this.mountPath}/pki/configuration`, + 'goes to configuration page' + ); + await click(SELECTORS.configuration.issuerLink); + assert.dom(SELECTORS.configuration.deleteAllIssuerModal).exists(); + await fillIn(SELECTORS.configuration.deleteAllIssuerInput, 'delete-all'); + await click(SELECTORS.configuration.deleteAllIssuerButton); + await isSettled(); + assert + .dom(SELECTORS.configuration.deleteAllIssuerModal) + .doesNotExist('delete all issuers modal closes'); + assert.strictEqual( + currentURL(), + `/vault/secrets/${this.mountPath}/pki/configuration`, + 'is still on configuration page' + ); + await isSettled(); + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + await waitUntil(() => currentURL() === `/vault/secrets/${this.mountPath}/pki/overview`); + await isSettled(); + assert.strictEqual( + currentURL(), + `/vault/secrets/${this.mountPath}/pki/overview`, + 'goes to overview page' + ); + assert + .dom(SELECTORS.emptyStateMessage) + .hasText( + "This PKI mount hasn't yet been configured with a certificate issuer. There are existing certificates. Use the CLI to perform any operations with them until an issuer is configured." + ); + + await visit(`/vault/secrets/${this.mountPath}/pki/roles`); + await isSettled(); + assert + .dom(SELECTORS.emptyStateMessage) + .hasText("This PKI mount hasn't yet been configured with a certificate issuer."); + + await visit(`/vault/secrets/${this.mountPath}/pki/issuers`); + await isSettled(); + assert + .dom(SELECTORS.emptyStateMessage) + .hasText("This PKI mount hasn't yet been configured with a certificate issuer."); + + await visit(`/vault/secrets/${this.mountPath}/pki/keys`); + await isSettled(); + assert + .dom(SELECTORS.emptyStateMessage) + .hasText("This PKI mount hasn't yet been configured with a certificate issuer."); + + await visit(`/vault/secrets/${this.mountPath}/pki/certificates`); + await isSettled(); + assert + .dom(SELECTORS.emptyStateMessage) + .hasText( + "This PKI mount hasn't yet been configured with a certificate issuer. There are existing certificates. Use the CLI to perform any operations with them until an issuer is configured." + ); + }); + + test('it shows the correct empty state message if roles and certificates exists after delete all issuers', async function (assert) { + await authPage.login(this.pkiAdminToken); + await visit(`/vault/secrets/${this.mountPath}/pki/configuration`); + await click(SELECTORS.configuration.configureButton); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/configuration/create`); + await click(SELECTORS.configuration.generateRootOption); + await fillIn(SELECTORS.configuration.typeField, 'exported'); + await fillIn(SELECTORS.configuration.generateRootCommonNameField, 'issuer-common-0'); + await fillIn(SELECTORS.configuration.generateRootIssuerNameField, 'issuer-0'); + await click(SELECTORS.configuration.generateRootSave); + await click(SELECTORS.configuration.doneButton); + await runCommands([ + `write ${this.mountPath}/roles/some-role \ + issuer_ref="default" \ + allowed_domains="example.com" \ + allow_subdomains=true \ + max_ttl="720h"`, + ]); + await runCommands([`write ${this.mountPath}/root/generate/internal common_name="Hashicorp Test"`]); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/overview`); + await click(SELECTORS.configTab); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/configuration`); + await click(SELECTORS.configuration.issuerLink); + assert.dom(SELECTORS.configuration.deleteAllIssuerModal).exists(); + await fillIn(SELECTORS.configuration.deleteAllIssuerInput, 'delete-all'); + await click(SELECTORS.configuration.deleteAllIssuerButton); + await isSettled(); + assert.dom(SELECTORS.configuration.deleteAllIssuerModal).doesNotExist(); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/configuration`); + await isSettled(); + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + await waitUntil(() => currentURL() === `/vault/secrets/${this.mountPath}/pki/overview`); + await isSettled(); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/overview`); + assert + .dom(SELECTORS.emptyStateMessage) + .hasText( + "This PKI mount hasn't yet been configured with a certificate issuer. There are existing roles and certificates. Use the CLI to perform any operations with them until an issuer is configured." + ); + + await visit(`/vault/secrets/${this.mountPath}/pki/roles`); + await isSettled(); + assert + .dom(SELECTORS.emptyStateMessage) + .hasText( + "This PKI mount hasn't yet been configured with a certificate issuer. There are existing roles. Use the CLI to perform any operations with them until an issuer is configured." + ); + + await visit(`/vault/secrets/${this.mountPath}/pki/issuers`); + await isSettled(); + assert + .dom(SELECTORS.emptyStateMessage) + .hasText("This PKI mount hasn't yet been configured with a certificate issuer."); + + await visit(`/vault/secrets/${this.mountPath}/pki/keys`); + await isSettled(); + assert + .dom(SELECTORS.emptyStateMessage) + .hasText("This PKI mount hasn't yet been configured with a certificate issuer."); + + await visit(`/vault/secrets/${this.mountPath}/pki/certificates`); + await isSettled(); + assert + .dom(SELECTORS.emptyStateMessage) + .hasText( + "This PKI mount hasn't yet been configured with a certificate issuer. There are existing certificates. Use the CLI to perform any operations with them until an issuer is configured." + ); + }); + + // test coverage for ed25519 certs not displaying because the verify() function errors + test('it generates and displays a root issuer of key type = ed25519', async function (assert) { + assert.expect(4); + await authPage.login(this.pkiAdminToken); + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + await click(SELECTORS.issuersTab); + await click(SELECTORS.generateIssuerDropdown); + await click(SELECTORS.generateIssuerRoot); + await fillIn(SELECTORS.configuration.inputByName('type'), 'internal'); + await fillIn(SELECTORS.configuration.inputByName('commonName'), 'my-certificate'); + await click(SELECTORS.configuration.keyParamsGroupToggle); + await fillIn(SELECTORS.configuration.inputByName('keyType'), 'ed25519'); + await click(SELECTORS.configuration.generateRootSave); + + const issuerId = find(SELECTORS.configuration.saved.issuerLink).innerHTML; + await visit(`/vault/secrets/${this.mountPath}/pki/issuers`); + assert.dom(SELECTORS.issuerListItem(issuerId)).exists(); + assert + .dom('[data-test-common-name="0"]') + .hasText('my-certificate', 'parses certificate metadata in the list view'); + await click(SELECTORS.issuerListItem(issuerId)); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/issuers/${issuerId}/details`); + assert.dom(SELECTORS.configuration.saved.commonName).exists('renders issuer details'); + }); + }); +}); diff --git a/ui/tests/acceptance/pki/pki-cross-sign-test.js b/ui/tests/acceptance/pki/pki-cross-sign-test.js new file mode 100644 index 0000000..f20d9fc --- /dev/null +++ b/ui/tests/acceptance/pki/pki-cross-sign-test.js @@ -0,0 +1,111 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { visit, click, fillIn, find } from '@ember/test-helpers'; +import { setupApplicationTest } from 'vault/tests/helpers'; +import { v4 as uuidv4 } from 'uuid'; + +import authPage from 'vault/tests/pages/auth'; +import logout from 'vault/tests/pages/logout'; +import enablePage from 'vault/tests/pages/settings/mount-secret-backend'; +import { runCommands } from 'vault/tests/helpers/pki/pki-run-commands'; +import { SELECTORS } from 'vault/tests/helpers/pki/pki-issuer-cross-sign'; +import { verifyCertificates } from 'vault/utils/parse-pki-cert'; +module('Acceptance | pki/pki cross sign', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(async function () { + await authPage.login(); + this.parentMountPath = `parent-mount-${uuidv4()}`; + this.oldParentIssuerName = 'old-parent-issuer'; // old parent issuer we're transferring from + this.parentIssuerName = 'new-parent-issuer'; // issuer where cross-signing action will begin + this.intMountPath = `intermediate-mount-${uuidv4()}`; // first input box in cross-signing page + this.intIssuerName = 'my-intermediate-issuer'; // second input box in cross-signing page + this.newlySignedIssuer = 'my-newly-signed-int'; // third input + await enablePage.enable('pki', this.parentMountPath); + await enablePage.enable('pki', this.intMountPath); + + await runCommands([ + `write "${this.parentMountPath}/root/generate/internal" common_name="Long-Lived Root X1" ttl=8960h issuer_name="${this.oldParentIssuerName}"`, + `write "${this.parentMountPath}/root/generate/internal" common_name="Long-Lived Root X2" ttl=8960h issuer_name="${this.parentIssuerName}"`, + `write "${this.parentMountPath}/config/issuers" default="${this.parentIssuerName}"`, + ]); + }); + + hooks.afterEach(async function () { + // Cleanup engine + await runCommands([`delete sys/mounts/${this.intMountPath}`]); + await runCommands([`delete sys/mounts/${this.parentMountPath}`]); + await logout.visit(); + }); + + test('it cross-signs an issuer', async function (assert) { + // configure parent and intermediate mounts to make them cross-signable + await visit(`/vault/secrets/${this.intMountPath}/pki/configuration/create`); + await click(SELECTORS.configure.optionByKey('generate-csr')); + await fillIn(SELECTORS.inputByName('type'), 'internal'); + await fillIn(SELECTORS.inputByName('commonName'), 'Short-Lived Int R1'); + await click('[data-test-save]'); + const csr = find(SELECTORS.copyButton('CSR')).getAttribute('data-clipboard-text'); + await visit(`vault/secrets/${this.parentMountPath}/pki/issuers/${this.oldParentIssuerName}/sign`); + await fillIn(SELECTORS.inputByName('csr'), csr); + await fillIn(SELECTORS.inputByName('format'), 'pem_bundle'); + await click('[data-test-pki-sign-intermediate-save]'); + const pemBundle = find(SELECTORS.copyButton('CA Chain')) + .getAttribute('data-clipboard-text') + .replace(/,/, '\n'); + await visit(`vault/secrets/${this.intMountPath}/pki/configuration/create`); + await click(SELECTORS.configure.optionByKey('import')); + await click('[data-test-text-toggle]'); + await fillIn('[data-test-text-file-textarea]', pemBundle); + await click(SELECTORS.configure.importSubmit); + await visit(`vault/secrets/${this.intMountPath}/pki/issuers`); + await click('[data-test-is-default]'); + // name default issuer of intermediate + const oldIntIssuerId = find(SELECTORS.rowValue('Issuer ID')).innerText; + const oldIntCert = find(SELECTORS.copyButton('Certificate')).getAttribute('data-clipboard-text'); + await click(SELECTORS.details.configure); + await fillIn(SELECTORS.inputByName('issuerName'), this.intIssuerName); + await click('[data-test-save]'); + + // perform cross-sign + await visit(`vault/secrets/${this.parentMountPath}/pki/issuers/${this.parentIssuerName}/cross-sign`); + await fillIn(SELECTORS.objectListInput('intermediateMount'), this.intMountPath); + await fillIn(SELECTORS.objectListInput('intermediateIssuer'), this.intIssuerName); + await fillIn(SELECTORS.objectListInput('newCrossSignedIssuer'), this.newlySignedIssuer); + await click(SELECTORS.submitButton); + assert + .dom(`${SELECTORS.signedIssuerCol('intermediateMount')} a`) + .hasAttribute('href', `/ui/vault/secrets/${this.intMountPath}/pki/overview`); + assert + .dom(`${SELECTORS.signedIssuerCol('intermediateIssuer')} a`) + .hasAttribute('href', `/ui/vault/secrets/${this.intMountPath}/pki/issuers/${oldIntIssuerId}/details`); + + // get certificate data of newly signed issuer + await click(`${SELECTORS.signedIssuerCol('newCrossSignedIssuer')} a`); + const newIntCert = find(SELECTORS.copyButton('Certificate')).getAttribute('data-clipboard-text'); + + // verify cross-sign was accurate by creating a role to issue a leaf certificate + const myRole = 'some-role'; + await runCommands([ + `write ${this.intMountPath}/roles/${myRole} \ + issuer_ref=${this.newlySignedIssuer}\ + allow_any_name=true \ + max_ttl="720h"`, + ]); + await visit(`vault/secrets/${this.intMountPath}/pki/roles/${myRole}/generate`); + await fillIn(SELECTORS.inputByName('commonName'), 'my-leaf'); + await fillIn('[data-test-ttl-value="TTL"]', '3600'); + await click('[data-test-pki-generate-button]'); + const myLeafCert = find(SELECTORS.copyButton('Certificate')).getAttribute('data-clipboard-text'); + + // see comments in utils/parse-pki-cert.js for step-by-step explanation of of verifyCertificates method + assert.true( + await verifyCertificates(oldIntCert, newIntCert, myLeafCert), + 'the leaf certificate validates against both intermediate certificates' + ); + }); +}); diff --git a/ui/tests/acceptance/pki/pki-engine-route-cleanup-test.js b/ui/tests/acceptance/pki/pki-engine-route-cleanup-test.js new file mode 100644 index 0000000..9dde991 --- /dev/null +++ b/ui/tests/acceptance/pki/pki-engine-route-cleanup-test.js @@ -0,0 +1,406 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { v4 as uuidv4 } from 'uuid'; + +import authPage from 'vault/tests/pages/auth'; +import logout from 'vault/tests/pages/logout'; +import enablePage from 'vault/tests/pages/settings/mount-secret-backend'; +import { click, currentURL, fillIn, visit } from '@ember/test-helpers'; +import { runCommands } from 'vault/tests/helpers/pki/pki-run-commands'; +import { SELECTORS } from 'vault/tests/helpers/pki/workflow'; + +/** + * This test module should test that dirty route models are cleaned up when the user leaves the page + */ +module('Acceptance | pki engine route cleanup test', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(async function () { + this.store = this.owner.lookup('service:store'); + await authPage.login(); + // Setup PKI engine + const mountPath = `pki-workflow-${uuidv4()}`; + await enablePage.enable('pki', mountPath); + this.mountPath = mountPath; + await logout.visit(); + }); + + hooks.afterEach(async function () { + await logout.visit(); + await authPage.login(); + // Cleanup engine + await runCommands([`delete sys/mounts/${this.mountPath}`]); + await logout.visit(); + }); + + module('configuration', function () { + test('create config', async function (assert) { + let configs, urls, config; + await authPage.login(this.pkiAdminToken); + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + await click(SELECTORS.emptyStateLink); + configs = this.store.peekAll('pki/action'); + urls = this.store.peekRecord('pki/config/urls', this.mountPath); + config = configs.objectAt(0); + assert.strictEqual(configs.length, 1, 'One config model present'); + assert.false(urls.hasDirtyAttributes, 'URLs is loaded from endpoint'); + assert.true(config.hasDirtyAttributes, 'Config model is dirty'); + + // Cancel button rolls it back + await click(SELECTORS.configuration.cancelButton); + configs = this.store.peekAll('pki/action'); + urls = this.store.peekRecord('pki/config/urls', this.mountPath); + assert.strictEqual(configs.length, 0, 'config model is rolled back on cancel'); + assert.strictEqual(urls.id, this.mountPath, 'Urls still exists on exit'); + + await click(SELECTORS.emptyStateLink); + configs = this.store.peekAll('pki/action'); + urls = this.store.peekRecord('pki/config/urls', this.mountPath); + config = configs.objectAt(0); + assert.strictEqual(configs.length, 1, 'One config model present'); + assert.false(urls.hasDirtyAttributes, 'URLs is loaded from endpoint'); + assert.true(config.hasDirtyAttributes, 'Config model is dirty'); + + // Exit page via link rolls it back + await click(SELECTORS.overviewBreadcrumb); + configs = this.store.peekAll('pki/action'); + urls = this.store.peekRecord('pki/config/urls', this.mountPath); + assert.strictEqual(configs.length, 0, 'config model is rolled back on cancel'); + assert.strictEqual(urls.id, this.mountPath, 'Urls still exists on exit'); + }); + }); + + module('role routes', function (hooks) { + hooks.beforeEach(async function () { + await authPage.login(); + // Configure PKI + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + await click(SELECTORS.emptyStateLink); + await click(SELECTORS.configuration.optionByKey('generate-root')); + await fillIn(SELECTORS.configuration.typeField, 'internal'); + await fillIn(SELECTORS.configuration.inputByName('commonName'), 'my-root-cert'); + await click(SELECTORS.configuration.generateRootSave); + await logout.visit(); + }); + + test('create role exit via cancel', async function (assert) { + let roles; + await authPage.login(); + // Create PKI + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + await click(SELECTORS.rolesTab); + roles = this.store.peekAll('pki/role'); + assert.strictEqual(roles.length, 0, 'No roles exist yet'); + await click(SELECTORS.createRoleLink); + roles = this.store.peekAll('pki/role'); + const role = roles.objectAt(0); + assert.strictEqual(roles.length, 1, 'New role exists'); + assert.true(role.isNew, 'Role is new model'); + await click(SELECTORS.roleForm.roleCancelButton); + roles = this.store.peekAll('pki/role'); + assert.strictEqual(roles.length, 0, 'Role is removed from store'); + }); + test('create role exit via breadcrumb', async function (assert) { + let roles; + await authPage.login(); + // Create PKI + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + await click(SELECTORS.rolesTab); + roles = this.store.peekAll('pki/role'); + assert.strictEqual(roles.length, 0, 'No roles exist yet'); + await click(SELECTORS.createRoleLink); + roles = this.store.peekAll('pki/role'); + const role = roles.objectAt(0); + assert.strictEqual(roles.length, 1, 'New role exists'); + assert.true(role.isNew, 'Role is new model'); + await click(SELECTORS.overviewBreadcrumb); + roles = this.store.peekAll('pki/role'); + assert.strictEqual(roles.length, 0, 'Role is removed from store'); + }); + test('edit role', async function (assert) { + let roles, role; + const roleId = 'workflow-edit-role'; + await authPage.login(); + // Create PKI + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + await click(SELECTORS.rolesTab); + roles = this.store.peekAll('pki/role'); + assert.strictEqual(roles.length, 0, 'No roles exist yet'); + await click(SELECTORS.createRoleLink); + await fillIn(SELECTORS.roleForm.roleName, roleId); + await click(SELECTORS.roleForm.roleCreateButton); + assert.dom('[data-test-value-div="Role name"]').hasText(roleId, 'Shows correct role after create'); + roles = this.store.peekAll('pki/role'); + role = roles.objectAt(0); + assert.strictEqual(roles.length, 1, 'Role is created'); + assert.false(role.hasDirtyAttributes, 'Role no longer has dirty attributes'); + + // Edit role + await click(SELECTORS.editRoleLink); + await click(SELECTORS.roleForm.issuerRefToggle); + await fillIn(SELECTORS.roleForm.issuerRefSelect, 'foobar'); + role = this.store.peekRecord('pki/role', roleId); + assert.true(role.hasDirtyAttributes, 'Role has dirty attrs'); + // Exit page via cancel button + await click(SELECTORS.roleForm.roleCancelButton); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/roles/${roleId}/details`); + role = this.store.peekRecord('pki/role', roleId); + assert.false(role.hasDirtyAttributes, 'Role dirty attrs have been rolled back'); + + // Edit again + await click(SELECTORS.editRoleLink); + await click(SELECTORS.roleForm.issuerRefToggle); + await fillIn(SELECTORS.roleForm.issuerRefSelect, 'foobar2'); + role = this.store.peekRecord('pki/role', roleId); + assert.true(role.hasDirtyAttributes, 'Role has dirty attrs'); + // Exit page via breadcrumbs + await click(SELECTORS.overviewBreadcrumb); + role = this.store.peekRecord('pki/role', roleId); + assert.false(role.hasDirtyAttributes, 'Role dirty attrs have been rolled back'); + }); + }); + + module('issuer routes', function () { + test('import issuer exit via cancel', async function (assert) { + let issuers; + await authPage.login(); + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + await click(SELECTORS.issuersTab); + issuers = this.store.peekAll('pki/issuer'); + assert.strictEqual(issuers.length, 0, 'No issuer models exist yet'); + await click(SELECTORS.importIssuerLink); + issuers = this.store.peekAll('pki/action'); + assert.strictEqual(issuers.length, 1, 'Action model created'); + const issuer = issuers.objectAt(0); + assert.true(issuer.hasDirtyAttributes, 'Action has dirty attrs'); + assert.true(issuer.isNew, 'Action is new'); + // Exit + await click('[data-test-pki-ca-cert-cancel]'); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/issuers`); + issuers = this.store.peekAll('pki/action'); + assert.strictEqual(issuers.length, 0, 'Action is removed from store'); + }); + test('import issuer exit via breadcrumb', async function (assert) { + let issuers; + await authPage.login(); + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + await click(SELECTORS.issuersTab); + issuers = this.store.peekAll('pki/issuer'); + assert.strictEqual(issuers.length, 0, 'No issuers exist yet'); + await click(SELECTORS.importIssuerLink); + issuers = this.store.peekAll('pki/action'); + assert.strictEqual(issuers.length, 1, 'Action model created'); + const issuer = issuers.objectAt(0); + assert.true(issuer.hasDirtyAttributes, 'Action model has dirty attrs'); + assert.true(issuer.isNew, 'Action model is new'); + // Exit + await click(SELECTORS.overviewBreadcrumb); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/overview`); + issuers = this.store.peekAll('pki/action'); + assert.strictEqual(issuers.length, 0, 'Issuer is removed from store'); + }); + test('generate root exit via cancel', async function (assert) { + let actions; + await authPage.login(); + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + await click(SELECTORS.issuersTab); + actions = this.store.peekAll('pki/action'); + assert.strictEqual(actions.length, 0, 'No actions exist yet'); + await click(SELECTORS.generateIssuerDropdown); + await click(SELECTORS.generateIssuerRoot); + actions = this.store.peekAll('pki/action'); + assert.strictEqual(actions.length, 1, 'Action model for generate-root created'); + const action = actions.objectAt(0); + assert.true(action.hasDirtyAttributes, 'Action has dirty attrs'); + assert.true(action.isNew, 'Action is new'); + assert.strictEqual(action.actionType, 'generate-root', 'Action type is correct'); + // Exit + await click(SELECTORS.configuration.generateRootCancel); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/issuers`); + actions = this.store.peekAll('pki/action'); + assert.strictEqual(actions.length, 0, 'Action is removed from store'); + }); + test('generate root exit via breadcrumb', async function (assert) { + let actions; + await authPage.login(); + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + await click(SELECTORS.issuersTab); + actions = this.store.peekAll('pki/action'); + assert.strictEqual(actions.length, 0, 'No actions exist yet'); + await click(SELECTORS.generateIssuerDropdown); + await click(SELECTORS.generateIssuerRoot); + actions = this.store.peekAll('pki/action'); + assert.strictEqual(actions.length, 1, 'Action model for generate-root created'); + const action = actions.objectAt(0); + assert.true(action.hasDirtyAttributes, 'Action has dirty attrs'); + assert.true(action.isNew, 'Action is new'); + assert.strictEqual(action.actionType, 'generate-root'); + // Exit + await click(SELECTORS.overviewBreadcrumb); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/overview`); + actions = this.store.peekAll('pki/action'); + assert.strictEqual(actions.length, 0, 'Action is removed from store'); + }); + test('generate intermediate csr exit via cancel', async function (assert) { + let actions; + await authPage.login(); + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + await click(SELECTORS.issuersTab); + actions = this.store.peekAll('pki/action'); + assert.strictEqual(actions.length, 0, 'No actions exist yet'); + await await click(SELECTORS.generateIssuerDropdown); + await click(SELECTORS.generateIssuerIntermediate); + actions = this.store.peekAll('pki/action'); + assert.strictEqual(actions.length, 1, 'Action model for generate-csr created'); + const action = actions.objectAt(0); + assert.true(action.hasDirtyAttributes, 'Action has dirty attrs'); + assert.true(action.isNew, 'Action is new'); + assert.strictEqual(action.actionType, 'generate-csr'); + // Exit + await click('[data-test-cancel]'); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/issuers`); + actions = this.store.peekAll('pki/action'); + assert.strictEqual(actions.length, 0, 'Action is removed from store'); + }); + test('generate intermediate csr exit via breadcrumb', async function (assert) { + let actions; + await authPage.login(); + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + await click(SELECTORS.issuersTab); + actions = this.store.peekAll('pki/action'); + assert.strictEqual(actions.length, 0, 'No actions exist yet'); + await click(SELECTORS.generateIssuerDropdown); + await click(SELECTORS.generateIssuerIntermediate); + actions = this.store.peekAll('pki/action'); + assert.strictEqual(actions.length, 1, 'Action model for generate-csr created'); + const action = actions.objectAt(0); + assert.true(action.hasDirtyAttributes, 'Action has dirty attrs'); + assert.true(action.isNew, 'Action is new'); + assert.strictEqual(action.actionType, 'generate-csr'); + // Exit + await click(SELECTORS.overviewBreadcrumb); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/overview`); + actions = this.store.peekAll('pki/action'); + assert.strictEqual(actions.length, 0, 'Action is removed from store'); + }); + test('edit issuer exit', async function (assert) { + let issuers, issuer; + await authPage.login(); + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + await click(SELECTORS.emptyStateLink); + await click(SELECTORS.configuration.optionByKey('generate-root')); + await fillIn(SELECTORS.configuration.typeField, 'internal'); + await fillIn(SELECTORS.configuration.inputByName('commonName'), 'my-root-cert'); + await click(SELECTORS.configuration.generateRootSave); + // Go to list view so we fetch all the issuers + await visit(`/vault/secrets/${this.mountPath}/pki/issuers`); + + issuers = this.store.peekAll('pki/issuer'); + const issuerId = issuers.objectAt(0).id; + assert.strictEqual(issuers.length, 1, 'Issuer exists on model in list'); + await visit(`/vault/secrets/${this.mountPath}/pki/issuers/${issuerId}/details`); + await click(SELECTORS.issuerDetails.configure); + issuer = this.store.peekRecord('pki/issuer', issuerId); + assert.false(issuer.hasDirtyAttributes, 'Model not dirty'); + await fillIn('[data-test-input="issuerName"]', 'foobar'); + assert.true(issuer.hasDirtyAttributes, 'Model is dirty'); + await click(SELECTORS.overviewBreadcrumb); + issuers = this.store.peekAll('pki/issuer'); + assert.strictEqual(issuers.length, 1, 'Issuer exists on model in overview'); + issuer = this.store.peekRecord('pki/issuer', issuerId); + assert.false(issuer.hasDirtyAttributes, 'Dirty attrs were rolled back'); + }); + }); + + module('key routes', function (hooks) { + hooks.beforeEach(async function () { + await authPage.login(); + // Configure PKI -- key creation not allowed unless configured + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + await click(SELECTORS.emptyStateLink); + await click(SELECTORS.configuration.optionByKey('generate-root')); + await fillIn(SELECTORS.configuration.typeField, 'internal'); + await fillIn(SELECTORS.configuration.inputByName('commonName'), 'my-root-cert'); + await click(SELECTORS.configuration.generateRootSave); + await logout.visit(); + }); + test('create key exit', async function (assert) { + let keys, key; + await authPage.login(); + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + await click(SELECTORS.keysTab); + keys = this.store.peekAll('pki/key'); + const configKeyId = keys.objectAt(0).id; + assert.strictEqual(keys.length, 1, 'One key exists from config'); + // Create key + await click(SELECTORS.keyPages.generateKey); + keys = this.store.peekAll('pki/key'); + key = keys.objectAt(1); + assert.strictEqual(keys.length, 2, 'New key exists'); + assert.true(key.isNew, 'Role is new model'); + // Exit + await click(SELECTORS.keyForm.keyCancelButton); + keys = this.store.peekAll('pki/key'); + assert.strictEqual(keys.length, 1, 'Second key is removed from store'); + assert.strictEqual(keys.objectAt(0).id, configKeyId); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/keys`, 'url is correct'); + + // Create again + await click(SELECTORS.keyPages.generateKey); + assert.strictEqual(keys.length, 2, 'New key exists'); + keys = this.store.peekAll('pki/key'); + key = keys.objectAt(1); + assert.true(key.isNew, 'Key is new model'); + // Exit + await click(SELECTORS.overviewBreadcrumb); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/overview`, 'url is correct'); + keys = this.store.peekAll('pki/key'); + assert.strictEqual(keys.length, 1, 'Key is removed from store'); + }); + test('edit key exit', async function (assert) { + let keys, key; + await authPage.login(); + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + await click(SELECTORS.keysTab); + keys = this.store.peekAll('pki/key'); + assert.strictEqual(keys.length, 1, 'One key from config exists'); + assert.dom('.list-item-row').exists({ count: 1 }, 'single row for key'); + await click('.list-item-row'); + // Edit + await click(SELECTORS.keyPages.keyEditLink); + await fillIn(SELECTORS.keyForm.keyNameInput, 'foobar'); + keys = this.store.peekAll('pki/key'); + key = keys.objectAt(0); + assert.true(key.hasDirtyAttributes, 'Key model is dirty'); + // Exit + await click(SELECTORS.keyForm.keyCancelButton); + assert.strictEqual( + currentURL(), + `/vault/secrets/${this.mountPath}/pki/keys/${key.id}/details`, + 'url is correct' + ); + keys = this.store.peekAll('pki/key'); + assert.strictEqual(keys.length, 1, 'Key list has 1'); + assert.false(key.hasDirtyAttributes, 'Key dirty attrs have been rolled back'); + + // Edit again + await click(SELECTORS.keyPages.keyEditLink); + await fillIn(SELECTORS.keyForm.keyNameInput, 'foobar'); + keys = this.store.peekAll('pki/key'); + key = keys.objectAt(0); + assert.true(key.hasDirtyAttributes, 'Key model is dirty'); + + // Exit via breadcrumb + await click(SELECTORS.overviewBreadcrumb); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/overview`, 'url is correct'); + keys = this.store.peekAll('pki/key'); + assert.strictEqual(keys.length, 1, 'Key list has 1'); + assert.false(key.hasDirtyAttributes, 'Key dirty attrs have been rolled back'); + }); + }); +}); diff --git a/ui/tests/acceptance/pki/pki-engine-workflow-test.js b/ui/tests/acceptance/pki/pki-engine-workflow-test.js new file mode 100644 index 0000000..4927f08 --- /dev/null +++ b/ui/tests/acceptance/pki/pki-engine-workflow-test.js @@ -0,0 +1,517 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { v4 as uuidv4 } from 'uuid'; + +import authPage from 'vault/tests/pages/auth'; +import logout from 'vault/tests/pages/logout'; +import enablePage from 'vault/tests/pages/settings/mount-secret-backend'; +import { click, currentURL, fillIn, find, isSettled, visit } from '@ember/test-helpers'; +import { SELECTORS } from 'vault/tests/helpers/pki/workflow'; +import { adminPolicy, readerPolicy, updatePolicy } from 'vault/tests/helpers/policy-generator/pki'; +import { tokenWithPolicy, runCommands } from 'vault/tests/helpers/pki/pki-run-commands'; +import { unsupportedPem } from 'vault/tests/helpers/pki/values'; + +/** + * This test module should test the PKI workflow, including: + * - link between pages and confirm that the url is as expected + * - log in as user with a policy and ensure expected UI elements are shown/hidden + */ +module('Acceptance | pki workflow', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(async function () { + await authPage.login(); + // Setup PKI engine + const mountPath = `pki-workflow-${uuidv4()}`; + await enablePage.enable('pki', mountPath); + this.mountPath = mountPath; + await logout.visit(); + }); + + hooks.afterEach(async function () { + await logout.visit(); + await authPage.login(); + // Cleanup engine + await runCommands([`delete sys/mounts/${this.mountPath}`]); + await logout.visit(); + }); + + test('empty state messages are correct when PKI not configured', async function (assert) { + assert.expect(21); + const assertEmptyState = (assert, resource) => { + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/${resource}`); + assert + .dom(SELECTORS.emptyStateTitle) + .hasText( + 'PKI not configured', + `${resource} index renders correct empty state title when PKI not configured` + ); + assert.dom(SELECTORS.emptyStateLink).hasText('Configure PKI'); + assert + .dom(SELECTORS.emptyStateMessage) + .hasText( + `This PKI mount hasn't yet been configured with a certificate issuer.`, + `${resource} index empty state message correct when PKI not configured` + ); + }; + await authPage.login(this.pkiAdminToken); + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/overview`); + + await click(SELECTORS.rolesTab); + assertEmptyState(assert, 'roles'); + + await click(SELECTORS.issuersTab); + assertEmptyState(assert, 'issuers'); + + await click(SELECTORS.certsTab); + assertEmptyState(assert, 'certificates'); + await click(SELECTORS.keysTab); + assertEmptyState(assert, 'keys'); + await click(SELECTORS.tidyTab); + assertEmptyState(assert, 'tidy'); + }); + + module('roles', function (hooks) { + hooks.beforeEach(async function () { + await authPage.login(); + // Setup role-specific items + await runCommands([ + `write ${this.mountPath}/roles/some-role \ + issuer_ref="default" \ + allowed_domains="example.com" \ + allow_subdomains=true \ + max_ttl="720h"`, + ]); + await runCommands([`write ${this.mountPath}/root/generate/internal common_name="Hashicorp Test"`]); + const pki_admin_policy = adminPolicy(this.mountPath, 'roles'); + const pki_reader_policy = readerPolicy(this.mountPath, 'roles'); + const pki_editor_policy = updatePolicy(this.mountPath, 'roles'); + this.pkiRoleReader = await tokenWithPolicy('pki-reader', pki_reader_policy); + this.pkiRoleEditor = await tokenWithPolicy('pki-editor', pki_editor_policy); + this.pkiAdminToken = await tokenWithPolicy('pki-admin', pki_admin_policy); + await logout.visit(); + }); + + test('shows correct items if user has all permissions', async function (assert) { + await authPage.login(this.pkiAdminToken); + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/overview`); + assert.dom(SELECTORS.rolesTab).exists('Roles tab is present'); + await click(SELECTORS.rolesTab); + assert.dom(SELECTORS.createRoleLink).exists({ count: 1 }, 'Create role link is rendered'); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/roles`); + assert.dom('.linked-block').exists({ count: 1 }, 'One role is in list'); + await click('.linked-block'); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/roles/some-role/details`); + + assert.dom(SELECTORS.generateCertLink).exists('Generate cert link is shown'); + await click(SELECTORS.generateCertLink); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/roles/some-role/generate`); + + // Go back to details and test all the links + await visit(`/vault/secrets/${this.mountPath}/pki/roles/some-role/details`); + assert.dom(SELECTORS.signCertLink).exists('Sign cert link is shown'); + await click(SELECTORS.signCertLink); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/roles/some-role/sign`); + + await visit(`/vault/secrets/${this.mountPath}/pki/roles/some-role/details`); + assert.dom(SELECTORS.editRoleLink).exists('Edit link is shown'); + await click(SELECTORS.editRoleLink); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/roles/some-role/edit`); + + await visit(`/vault/secrets/${this.mountPath}/pki/roles/some-role/details`); + assert.dom(SELECTORS.deleteRoleButton).exists('Delete role button is shown'); + await click(`${SELECTORS.deleteRoleButton} [data-test-confirm-action-trigger]`); + await click(`[data-test-confirm-button]`); + assert.strictEqual( + currentURL(), + `/vault/secrets/${this.mountPath}/pki/roles`, + 'redirects to roles list after deletion' + ); + }); + + test('it does not show toolbar items the user does not have permission to see', async function (assert) { + await authPage.login(this.pkiRoleReader); + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + assert.dom(SELECTORS.rolesTab).exists('Roles tab is present'); + await click(SELECTORS.rolesTab); + assert.dom(SELECTORS.createRoleLink).exists({ count: 1 }, 'Create role link is rendered'); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/roles`); + assert.dom('.linked-block').exists({ count: 1 }, 'One role is in list'); + await click('.linked-block'); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/roles/some-role/details`); + assert.dom(SELECTORS.deleteRoleButton).doesNotExist('Delete role button is not shown'); + assert.dom(SELECTORS.generateCertLink).doesNotExist('Generate cert link is not shown'); + assert.dom(SELECTORS.signCertLink).doesNotExist('Sign cert link is not shown'); + assert.dom(SELECTORS.editRoleLink).doesNotExist('Edit link is not shown'); + }); + + test('it shows correct toolbar items for the user policy', async function (assert) { + await authPage.login(this.pkiRoleEditor); + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + assert.dom(SELECTORS.rolesTab).exists('Roles tab is present'); + await click(SELECTORS.rolesTab); + assert.dom(SELECTORS.createRoleLink).exists({ count: 1 }, 'Create role link is rendered'); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/roles`); + assert.dom('.linked-block').exists({ count: 1 }, 'One role is in list'); + await click('.linked-block'); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/roles/some-role/details`); + assert.dom(SELECTORS.deleteRoleButton).doesNotExist('Delete role button is not shown'); + assert.dom(SELECTORS.generateCertLink).exists('Generate cert link is shown'); + assert.dom(SELECTORS.signCertLink).exists('Sign cert link is shown'); + assert.dom(SELECTORS.editRoleLink).exists('Edit link is shown'); + await click(SELECTORS.editRoleLink); + assert.strictEqual( + currentURL(), + `/vault/secrets/${this.mountPath}/pki/roles/some-role/edit`, + 'Links to edit view' + ); + await click(SELECTORS.roleForm.roleCancelButton); + assert.strictEqual( + currentURL(), + `/vault/secrets/${this.mountPath}/pki/roles/some-role/details`, + 'Cancel from edit goes to details' + ); + await click(SELECTORS.generateCertLink); + assert.strictEqual( + currentURL(), + `/vault/secrets/${this.mountPath}/pki/roles/some-role/generate`, + 'Generate cert button goes to generate page' + ); + await click(SELECTORS.generateCertForm.cancelButton); + assert.strictEqual( + currentURL(), + `/vault/secrets/${this.mountPath}/pki/roles/some-role/details`, + 'Cancel from generate goes to details' + ); + }); + + test('create role happy path', async function (assert) { + const roleName = 'another-role'; + await authPage.login(this.pkiAdminToken); + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/overview`); + assert.dom(SELECTORS.rolesTab).exists('Roles tab is present'); + await click(SELECTORS.rolesTab); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/roles`); + await click(SELECTORS.createRoleLink); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/roles/create`); + assert.dom(SELECTORS.breadcrumbContainer).exists({ count: 1 }, 'breadcrumbs are rendered'); + assert.dom(SELECTORS.breadcrumbs).exists({ count: 4 }, 'Shows 4 breadcrumbs'); + assert.dom(SELECTORS.pageTitle).hasText('Create a PKI role'); + + await fillIn(SELECTORS.roleForm.roleName, roleName); + await click(SELECTORS.roleForm.roleCreateButton); + + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/roles/${roleName}/details`); + assert.dom(SELECTORS.breadcrumbs).exists({ count: 4 }, 'Shows 4 breadcrumbs'); + assert.dom(SELECTORS.pageTitle).hasText(`PKI Role ${roleName}`); + }); + }); + + module('keys', function (hooks) { + hooks.beforeEach(async function () { + await authPage.login(); + // base config pki so empty state doesn't show + await runCommands([`write ${this.mountPath}/root/generate/internal common_name="Hashicorp Test"`]); + const pki_admin_policy = adminPolicy(this.mountPath); + const pki_reader_policy = readerPolicy(this.mountPath, 'keys', true); + const pki_editor_policy = updatePolicy(this.mountPath, 'keys'); + this.pkiKeyReader = await tokenWithPolicy('pki-reader', pki_reader_policy); + this.pkiKeyEditor = await tokenWithPolicy('pki-editor', pki_editor_policy); + this.pkiAdminToken = await tokenWithPolicy('pki-admin', pki_admin_policy); + await logout.visit(); + }); + + test('shows correct items if user has all permissions', async function (assert) { + await authPage.login(this.pkiAdminToken); + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/overview`); + await click(SELECTORS.keysTab); + // index page + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/keys`); + assert + .dom(SELECTORS.keyPages.importKey) + .hasAttribute( + 'href', + `/ui/vault/secrets/${this.mountPath}/pki/keys/import`, + 'import link renders with correct url' + ); + let keyId = find(SELECTORS.keyPages.keyId).innerText; + assert.dom('.linked-block').exists({ count: 1 }, 'One key is in list'); + await click('.linked-block'); + // details page + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/keys/${keyId}/details`); + assert.dom(SELECTORS.keyPages.downloadButton).doesNotExist('does not download button for private key'); + + // edit page + await click(SELECTORS.keyPages.keyEditLink); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/keys/${keyId}/edit`); + await click(SELECTORS.keyForm.keyCancelButton); + assert.strictEqual( + currentURL(), + `/vault/secrets/${this.mountPath}/pki/keys/${keyId}/details`, + 'navigates back to details on cancel' + ); + await visit(`/vault/secrets/${this.mountPath}/pki/keys/${keyId}/edit`); + await fillIn(SELECTORS.keyForm.keyNameInput, 'test-key'); + await click(SELECTORS.keyForm.keyCreateButton); + assert.strictEqual( + currentURL(), + `/vault/secrets/${this.mountPath}/pki/keys/${keyId}/details`, + 'navigates to details after save' + ); + assert.dom(SELECTORS.keyPages.keyNameValue).hasText('test-key', 'updates key name'); + + // key generate and delete navigation + await visit(`/vault/secrets/${this.mountPath}/pki/keys`); + await click(SELECTORS.keyPages.generateKey); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/keys/create`); + await fillIn(SELECTORS.keyForm.typeInput, 'exported'); + await fillIn(SELECTORS.keyForm.keyTypeInput, 'rsa'); + await click(SELECTORS.keyForm.keyCreateButton); + keyId = find(SELECTORS.keyPages.keyIdValue).innerText; + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/keys/${keyId}/details`); + + assert + .dom(SELECTORS.alertBanner) + .hasText( + 'Next steps This private key material will only be available once. Copy or download it now.', + 'renders banner to save private key' + ); + assert.dom(SELECTORS.keyPages.downloadButton).exists('renders download button'); + await click(SELECTORS.keyPages.keyDeleteButton); + await click(SELECTORS.keyPages.confirmDelete); + assert.strictEqual( + currentURL(), + `/vault/secrets/${this.mountPath}/pki/keys`, + 'navigates back to key list view on delete' + ); + }); + + test('it hide corrects actions for user with read policy', async function (assert) { + await authPage.login(this.pkiKeyReader); + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + await click(SELECTORS.keysTab); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/keys`); + await isSettled(); + assert.dom(SELECTORS.keyPages.importKey).doesNotExist(); + assert.dom(SELECTORS.keyPages.generateKey).doesNotExist(); + assert.dom('.linked-block').exists({ count: 1 }, 'One key is in list'); + const keyId = find(SELECTORS.keyPages.keyId).innerText; + await click(SELECTORS.keyPages.popupMenuTrigger); + assert.dom(SELECTORS.keyPages.popupMenuEdit).hasClass('disabled', 'popup menu edit link is disabled'); + await click(SELECTORS.keyPages.popupMenuDetails); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/keys/${keyId}/details`); + assert.dom(SELECTORS.keyPages.keyDeleteButton).doesNotExist('Delete key button is not shown'); + assert.dom(SELECTORS.keyPages.keyEditLink).doesNotExist('Edit key button does not render'); + }); + + test('it shows correct toolbar items for the user with update policy', async function (assert) { + await authPage.login(this.pkiKeyEditor); + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + await click(SELECTORS.keysTab); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/keys`); + await isSettled(); + assert.dom(SELECTORS.keyPages.importKey).exists('import action exists'); + assert.dom(SELECTORS.keyPages.generateKey).exists('generate action exists'); + assert.dom('.linked-block').exists({ count: 1 }, 'One key is in list'); + const keyId = find(SELECTORS.keyPages.keyId).innerText; + await click(SELECTORS.keyPages.popupMenuTrigger); + assert + .dom(SELECTORS.keyPages.popupMenuEdit) + .doesNotHaveClass('disabled', 'popup menu edit link is not disabled'); + await click('.linked-block'); + assert.dom(SELECTORS.keyPages.keyDeleteButton).doesNotExist('Delete key button is not shown'); + await click(SELECTORS.keyPages.keyEditLink); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/keys/${keyId}/edit`); + assert.dom(SELECTORS.keyPages.title).hasText('Edit key'); + await click(SELECTORS.keyForm.keyCancelButton); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/keys/${keyId}/details`); + }); + }); + + module('issuers', function (hooks) { + hooks.beforeEach(async function () { + await authPage.login(); + // Configure engine with a default issuer + await runCommands([ + `write ${this.mountPath}/root/generate/internal common_name="Hashicorp Test" name="Hashicorp Test"`, + ]); + await logout.visit(); + }); + test('lists the correct issuer metadata info', async function (assert) { + assert.expect(6); + await authPage.login(this.pkiAdminToken); + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + assert.dom(SELECTORS.issuersTab).exists('Issuers tab is present'); + await click(SELECTORS.issuersTab); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/issuers`); + assert.dom('.linked-block').exists({ count: 1 }, 'One issuer is in list'); + assert.dom('[data-test-is-root-tag="0"]').hasText('root'); + assert.dom('[data-test-serial-number="0"]').exists({ count: 1 }, 'displays serial number tag'); + assert.dom('[data-test-common-name="0"]').exists({ count: 1 }, 'displays cert common name tag'); + }); + test('lists the correct issuer metadata info when user has only read permission', async function (assert) { + assert.expect(2); + await authPage.login(); + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + await click(SELECTORS.issuersTab); + await click(SELECTORS.issuerPopupMenu); + await click(SELECTORS.issuerPopupDetails); + const issuerId = find(SELECTORS.issuerDetails.valueByName('Issuer ID')).innerText; + const pki_issuer_denied_policy = ` + path "${this.mountPath}/*" { + capabilities = ["create", "read", "update", "delete", "list"] + }, + path "${this.mountPath}/issuer/${issuerId}" { + capabilities = ["deny"] + } + `; + this.token = await tokenWithPolicy('pki-issuer-denied-policy', pki_issuer_denied_policy); + await logout.visit(); + await authPage.login(this.token); + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + await click(SELECTORS.issuersTab); + assert.dom('[data-test-serial-number="0"]').exists({ count: 1 }, 'displays serial number tag'); + assert.dom('[data-test-common-name="0"]').doesNotExist('does not display cert common name tag'); + }); + + test('details view renders correct number of info items', async function (assert) { + assert.expect(13); + await authPage.login(this.pkiAdminToken); + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + assert.dom(SELECTORS.issuersTab).exists('Issuers tab is present'); + await click(SELECTORS.issuersTab); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/issuers`); + assert.dom('.linked-block').exists({ count: 1 }, 'One issuer is in list'); + await click('.linked-block'); + assert.ok( + currentURL().match(`/vault/secrets/${this.mountPath}/pki/issuers/.+/details`), + `/vault/secrets/${this.mountPath}/pki/issuers/my-issuer/details` + ); + assert.dom(SELECTORS.issuerDetails.title).hasText('View issuer certificate'); + ['Certificate', 'CA Chain', 'Common name', 'Issuer name', 'Issuer ID', 'Default key ID'].forEach( + (label) => { + assert + .dom(`${SELECTORS.issuerDetails.defaultGroup} ${SELECTORS.issuerDetails.valueByName(label)}`) + .exists({ count: 1 }, `${label} value rendered`); + } + ); + assert + .dom(`${SELECTORS.issuerDetails.urlsGroup} ${SELECTORS.issuerDetails.row}`) + .exists({ count: 3 }, 'Renders 3 info table items under URLs group'); + assert.dom(SELECTORS.issuerDetails.groupTitle).exists({ count: 1 }, 'only 1 group title rendered'); + }); + + test('toolbar links navigate to expected routes', async function (assert) { + await authPage.login(this.pkiAdminToken); + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + await click(SELECTORS.issuersTab); + await click(SELECTORS.issuerPopupMenu); + await click(SELECTORS.issuerPopupDetails); + + const issuerId = find(SELECTORS.issuerDetails.valueByName('Issuer ID')).innerText; + assert.strictEqual( + currentURL(), + `/vault/secrets/${this.mountPath}/pki/issuers/${issuerId}/details`, + 'it navigates to details route' + ); + assert + .dom(SELECTORS.issuerDetails.crossSign) + .hasAttribute('href', `/ui/vault/secrets/${this.mountPath}/pki/issuers/${issuerId}/cross-sign`); + assert + .dom(SELECTORS.issuerDetails.signIntermediate) + .hasAttribute('href', `/ui/vault/secrets/${this.mountPath}/pki/issuers/${issuerId}/sign`); + assert + .dom(SELECTORS.issuerDetails.configure) + .hasAttribute('href', `/ui/vault/secrets/${this.mountPath}/pki/issuers/${issuerId}/edit`); + await click(SELECTORS.issuerDetails.rotateRoot); + assert.dom(find(SELECTORS.issuerDetails.rotateModal).parentElement).hasClass('is-active'); + await click(SELECTORS.issuerDetails.rotateModalGenerate); + assert.strictEqual( + currentURL(), + `/vault/secrets/${this.mountPath}/pki/issuers/${issuerId}/rotate-root`, + 'it navigates to root rotate form' + ); + assert + .dom('[data-test-input="commonName"]') + .hasValue('Hashicorp Test', 'form prefilled with parent issuer cn'); + }); + }); + + module('rotate', function (hooks) { + hooks.beforeEach(async function () { + await authPage.login(); + await runCommands([`write ${this.mountPath}/root/generate/internal issuer_name="existing-issuer"`]); + await logout.visit(); + }); + test('it renders a warning banner when parent issuer has unsupported OIDs', async function (assert) { + await authPage.login(); + await visit(`/vault/secrets/${this.mountPath}/pki/configuration/create`); + await click(SELECTORS.configuration.optionByKey('import')); + await click('[data-test-text-toggle]'); + await fillIn('[data-test-text-file-textarea]', unsupportedPem); + await click(SELECTORS.configuration.importSubmit); + const issuerId = find(SELECTORS.configuration.importedIssuer).innerText; + await click(`${SELECTORS.configuration.importedIssuer} a`); + + // navigating directly to route because the rotate button is not visible for non-root issuers + // but we're just testing that route model was parsed and passed as expected + await visit(`/vault/secrets/${this.mountPath}/pki/issuers/${issuerId}/rotate-root`); + assert + .dom('[data-test-warning-banner]') + .hasTextContaining( + 'Not all of the certificate values could be parsed and transferred to new root', + 'it renders warning banner' + ); + assert.dom('[data-test-input="commonName"]').hasValue('fancy-cert-unsupported-subj-and-ext-oids'); + await fillIn('[data-test-input="issuerName"]', 'existing-issuer'); + await click('[data-test-pki-rotate-root-save]'); + assert + .dom('[data-test-error-banner]') + .hasText('Error issuer name already in use', 'it renders error banner'); + }); + }); + + module('config', function (hooks) { + hooks.beforeEach(async function () { + await authPage.login(); + await runCommands([`write ${this.mountPath}/root/generate/internal issuer_name="existing-issuer"`]); + const mixed_config_policy = ` + ${adminPolicy(this.mountPath)} + ${readerPolicy(this.mountPath, 'config/cluster')} + `; + this.mixedConfigCapabilities = await tokenWithPolicy('pki-reader', mixed_config_policy); + await logout.visit(); + }); + + test('it updates config when user only has permission to some endpoints', async function (assert) { + await authPage.login(this.mixedConfigCapabilities); + await visit(`/vault/secrets/${this.mountPath}/pki/configuration/edit`); + assert + .dom(`${SELECTORS.configEdit.configEditSection} [data-test-component="empty-state"]`) + .hasText( + `You do not have permission to set this mount's the cluster config Ask your administrator if you think you should have access to: POST /${this.mountPath}/config/cluster` + ); + assert.dom(SELECTORS.configEdit.acmeEditSection).exists(); + assert.dom(SELECTORS.configEdit.urlsEditSection).exists(); + assert.dom(SELECTORS.configEdit.crlEditSection).exists(); + assert.dom(`${SELECTORS.acmeEditSection} [data-test-component="empty-state"]`).doesNotExist(); + assert.dom(`${SELECTORS.urlsEditSection} [data-test-component="empty-state"]`).doesNotExist(); + assert.dom(`${SELECTORS.crlEditSection} [data-test-component="empty-state"]`).doesNotExist(); + await click(SELECTORS.configEdit.crlToggleInput('expiry')); + await click(SELECTORS.configEdit.saveButton); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/configuration`); + assert + .dom('[data-test-value-div="CRL building"]') + .hasText('Disabled', 'Successfully saves config with partial permission'); + }); + }); +}); diff --git a/ui/tests/acceptance/pki/pki-overview-test.js b/ui/tests/acceptance/pki/pki-overview-test.js new file mode 100644 index 0000000..fcdf2e4 --- /dev/null +++ b/ui/tests/acceptance/pki/pki-overview-test.js @@ -0,0 +1,126 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import authPage from 'vault/tests/pages/auth'; +import logout from 'vault/tests/pages/logout'; +import enablePage from 'vault/tests/pages/settings/mount-secret-backend'; +import { click, currentURL, currentRouteName, visit } from '@ember/test-helpers'; +import { SELECTORS } from 'vault/tests/helpers/pki/overview'; +import { tokenWithPolicy, runCommands } from 'vault/tests/helpers/pki/pki-run-commands'; + +module('Acceptance | pki overview', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(async function () { + await authPage.login(); + // Setup PKI engine + const mountPath = `pki`; + await enablePage.enable('pki', mountPath); + this.mountPath = mountPath; + await runCommands([`write ${this.mountPath}/root/generate/internal common_name="Hashicorp Test"`]); + const pki_admin_policy = ` + path "${this.mountPath}/*" { + capabilities = ["create", "read", "update", "delete", "list"] + }, + `; + const pki_issuers_list_policy = ` + path "${this.mountPath}/issuers" { + capabilities = ["list"] + }, + `; + const pki_roles_list_policy = ` + path "${this.mountPath}/roles" { + capabilities = ["list"] + }, + `; + + this.pkiRolesList = await tokenWithPolicy('pki-roles-list', pki_roles_list_policy); + this.pkiIssuersList = await tokenWithPolicy('pki-issuers-list', pki_issuers_list_policy); + this.pkiAdminToken = await tokenWithPolicy('pki-admin', pki_admin_policy); + await logout.visit(); + }); + + hooks.afterEach(async function () { + await logout.visit(); + await authPage.login(); + // Cleanup engine + await runCommands([`delete sys/mounts/${this.mountPath}`]); + await logout.visit(); + }); + + test('navigates to view issuers when link is clicked on issuer card', async function (assert) { + await authPage.login(this.pkiAdminToken); + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + assert.dom(SELECTORS.issuersCardTitle).hasText('Issuers'); + assert.dom(SELECTORS.issuersCardOverviewNum).hasText('1'); + await click(SELECTORS.issuersCardLink); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/issuers`); + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + }); + + test('navigates to view roles when link is clicked on roles card', async function (assert) { + await authPage.login(this.pkiAdminToken); + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + assert.dom(SELECTORS.rolesCardTitle).hasText('Roles'); + assert.dom(SELECTORS.rolesCardOverviewNum).hasText('0'); + await click(SELECTORS.rolesCardLink); + assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/roles`); + await runCommands([ + `write ${this.mountPath}/roles/some-role \ + issuer_ref="default" \ + allowed_domains="example.com" \ + allow_subdomains=true \ + max_ttl="720h"`, + ]); + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + assert.dom(SELECTORS.rolesCardOverviewNum).hasText('1'); + }); + + test('hides roles card if user does not have permissions', async function (assert) { + await authPage.login(this.pkiIssuersList); + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + assert.dom(SELECTORS.rolesCardTitle).doesNotExist('Roles card does not exist'); + assert.dom(SELECTORS.issuersCardTitle).exists('Issuers card exists'); + }); + + test('navigates to generate certificate page for Issue Certificates card', async function (assert) { + await authPage.login(this.pkiAdminToken); + await runCommands([ + `write ${this.mountPath}/roles/some-role \ + issuer_ref="default" \ + allowed_domains="example.com" \ + allow_subdomains=true \ + max_ttl="720h"`, + ]); + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + await click(SELECTORS.issueCertificatePowerSearch); + await click(SELECTORS.firstPowerSelectOption); + await click(SELECTORS.issueCertificateButton); + assert.strictEqual(currentRouteName(), 'vault.cluster.secrets.backend.pki.roles.role.generate'); + }); + + test('navigates to certificate details page for View Certificates card', async function (assert) { + await authPage.login(this.pkiAdminToken); + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + await click(SELECTORS.viewCertificatePowerSearch); + await click(SELECTORS.firstPowerSelectOption); + await click(SELECTORS.viewCertificateButton); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.secrets.backend.pki.certificates.certificate.details' + ); + }); + + test('navigates to issuer details page for View Issuer card', async function (assert) { + await authPage.login(this.pkiAdminToken); + await visit(`/vault/secrets/${this.mountPath}/pki/overview`); + await click(SELECTORS.viewIssuerPowerSearch); + await click(SELECTORS.firstPowerSelectOption); + await click(SELECTORS.viewIssuerButton); + assert.strictEqual(currentRouteName(), 'vault.cluster.secrets.backend.pki.issuers.issuer.details'); + }); +}); diff --git a/ui/tests/acceptance/pki/pki-tidy-test.js b/ui/tests/acceptance/pki/pki-tidy-test.js new file mode 100644 index 0000000..4d6907c --- /dev/null +++ b/ui/tests/acceptance/pki/pki-tidy-test.js @@ -0,0 +1,181 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { click, currentRouteName, fillIn, visit } from '@ember/test-helpers'; + +import { setupMirage } from 'ember-cli-mirage/test-support'; +import { v4 as uuidv4 } from 'uuid'; + +import authPage from 'vault/tests/pages/auth'; +import logout from 'vault/tests/pages/logout'; +import enablePage from 'vault/tests/pages/settings/mount-secret-backend'; +import { runCommands } from 'vault/tests/helpers/pki/pki-run-commands'; +import { SELECTORS } from 'vault/tests/helpers/pki/page/pki-tidy'; + +module('Acceptance | pki tidy', function (hooks) { + setupApplicationTest(hooks); + setupMirage(hooks); + + hooks.beforeEach(async function () { + await authPage.login(); + // Setup PKI engine + const mountPath = `pki-workflow-${uuidv4()}`; + await enablePage.enable('pki', mountPath); + this.mountPath = mountPath; + await runCommands([ + `write ${this.mountPath}/root/generate/internal common_name="Hashicorp Test" name="Hashicorp Test"`, + ]); + await logout.visit(); + }); + + hooks.afterEach(async function () { + await logout.visit(); + await authPage.login(); + // Cleanup engine + await runCommands([`delete sys/mounts/${this.mountPath}`]); + await logout.visit(); + }); + + test('it configures a manual tidy operation and shows its details and tidy states', async function (assert) { + await authPage.login(this.pkiAdminToken); + await visit(`/vault/secrets/${this.mountPath}/pki/tidy`); + await click(SELECTORS.tidyEmptyStateConfigure); + assert.dom(SELECTORS.tidyConfigureModal.configureTidyModal).exists('Configure tidy modal exists'); + assert.dom(SELECTORS.tidyConfigureModal.tidyModalAutoButton).exists('Configure auto tidy button exists'); + assert + .dom(SELECTORS.tidyConfigureModal.tidyModalManualButton) + .exists('Configure manual tidy button exists'); + await click(SELECTORS.tidyConfigureModal.tidyModalManualButton); + assert.dom(SELECTORS.tidyForm.tidyFormName('manual')).exists('Manual tidy form exists'); + await click(SELECTORS.tidyForm.inputByAttr('tidyCertStore')); + await fillIn(SELECTORS.tidyForm.tidyPauseDuration, '10'); + await click(SELECTORS.tidyForm.tidySave); + await click(SELECTORS.cancelTidyAction); + assert.dom(SELECTORS.cancelTidyModalBackground).exists('Confirm cancel tidy modal exits'); + await click(SELECTORS.tidyConfigureModal.tidyModalCancelButton); + // we can't properly test the background refresh fetching of tidy status in testing + this.server.get(`${this.mountPath}/tidy-status`, () => { + return { + request_id: 'dba2d42d-1a6e-1551-80f8-4ddb364ede4b', + lease_id: '', + renewable: false, + lease_duration: 0, + data: { + acme_account_deleted_count: 0, + acme_account_revoked_count: 0, + acme_account_safety_buffer: 2592000, + acme_orders_deleted_count: 0, + cert_store_deleted_count: 0, + cross_revoked_cert_deleted_count: 0, + current_cert_store_count: null, + current_revoked_cert_count: null, + error: null, + internal_backend_uuid: '964a41f7-a159-53aa-d62e-fc1914e4a7e1', + issuer_safety_buffer: 31536000, + last_auto_tidy_finished: '2023-05-19T10:27:11.721825-07:00', + message: 'Tidying certificate store: checking entry 0 of 1', + missing_issuer_cert_count: 0, + pause_duration: '1m40s', + revocation_queue_deleted_count: 0, + revocation_queue_safety_buffer: 36000, + revoked_cert_deleted_count: 0, + safety_buffer: 2073600, + state: 'Cancelled', + tidy_acme: false, + tidy_cert_store: true, + tidy_cross_cluster_revoked_certs: false, + tidy_expired_issuers: false, + tidy_move_legacy_ca_bundle: false, + tidy_revocation_queue: false, + tidy_revoked_cert_issuer_associations: false, + tidy_revoked_certs: false, + time_finished: '2023-05-19T10:28:51.733092-07:00', + time_started: '2023-05-19T10:27:11.721846-07:00', + total_acme_account_count: 0, + }, + wrap_info: null, + warnings: null, + auth: null, + }; + }); + await visit(`/vault/secrets/${this.mountPath}/pki/configuration`); + await visit(`/vault/secrets/${this.mountPath}/pki/tidy`); + assert.dom(SELECTORS.hdsAlertTitle).hasText('Tidy operation cancelled'); + assert + .dom(SELECTORS.hdsAlertDescription) + .hasText( + 'Your tidy operation has been cancelled. If this was a mistake configure and run another tidy operation.' + ); + assert.dom(SELECTORS.alertUpdatedAt).exists(); + }); + + test('it configures an auto tidy operation and shows its details', async function (assert) { + await authPage.login(this.pkiAdminToken); + await visit(`/vault/secrets/${this.mountPath}/pki/tidy`); + await click(SELECTORS.tidyEmptyStateConfigure); + assert.dom(SELECTORS.tidyConfigureModal.configureTidyModal).exists('Configure tidy modal exists'); + assert.dom(SELECTORS.tidyConfigureModal.tidyModalAutoButton).exists('Configure auto tidy button exists'); + assert + .dom(SELECTORS.tidyConfigureModal.tidyModalManualButton) + .exists('Configure manual tidy button exists'); + await click(SELECTORS.tidyConfigureModal.tidyModalAutoButton); + assert.dom(SELECTORS.tidyForm.tidyFormName('auto')).exists('Auto tidy form exists'); + await click(SELECTORS.tidyForm.tidyCancel); + assert.strictEqual(currentRouteName(), 'vault.cluster.secrets.backend.pki.tidy.index'); + await click(SELECTORS.tidyEmptyStateConfigure); + await click(SELECTORS.tidyConfigureModal.tidyModalAutoButton); + assert.dom(SELECTORS.tidyForm.tidyFormName('auto')).exists('Auto tidy form exists'); + await click(SELECTORS.tidyForm.toggleLabel('Automatic tidy disabled')); + assert + .dom(SELECTORS.tidyForm.tidySectionHeader('ACME operations')) + .exists('Auto tidy form enabled shows ACME operations field'); + await click(SELECTORS.tidyForm.inputByAttr('tidyCertStore')); + await click(SELECTORS.tidyForm.tidySave); + assert.strictEqual(currentRouteName(), 'vault.cluster.secrets.backend.pki.tidy.auto.index'); + await click(SELECTORS.tidyForm.editAutoTidyButton); + assert.strictEqual(currentRouteName(), 'vault.cluster.secrets.backend.pki.tidy.auto.configure'); + await click(SELECTORS.tidyForm.inputByAttr('tidyRevokedCerts')); + await click(SELECTORS.tidyForm.tidySave); + assert.strictEqual(currentRouteName(), 'vault.cluster.secrets.backend.pki.tidy.auto.index'); + }); + + test('it opens a tidy modal when the user clicks on the tidy toolbar action', async function (assert) { + await authPage.login(this.pkiAdminToken); + await visit(`/vault/secrets/${this.mountPath}/pki/tidy`); + await click(SELECTORS.tidyConfigureModal.tidyOptionsModal); + assert.dom(SELECTORS.tidyConfigureModal.configureTidyModal).exists('Configure tidy modal exists'); + assert.dom(SELECTORS.tidyConfigureModal.tidyModalAutoButton).exists('Configure auto tidy button exists'); + assert + .dom(SELECTORS.tidyConfigureModal.tidyModalManualButton) + .exists('Configure manual tidy button exists'); + await click(SELECTORS.tidyConfigureModal.tidyModalCancelButton); + assert.dom(SELECTORS.tidyEmptyState).exists(); + }); + + test('it should show correct toolbar action depending on whether auto tidy is enabled', async function (assert) { + await authPage.login(this.pkiAdminToken); + await visit(`/vault/secrets/${this.mountPath}/pki/tidy`); + assert + .dom(SELECTORS.tidyConfigureModal.tidyOptionsModal) + .exists('Configure tidy modal options button exists'); + await click(SELECTORS.tidyConfigureModal.tidyOptionsModal); + assert.dom(SELECTORS.tidyConfigureModal.configureTidyModal).exists('Configure tidy modal exists'); + await click(SELECTORS.tidyConfigureModal.tidyOptionsModal); + await click(SELECTORS.tidyConfigureModal.tidyModalAutoButton); + await click(SELECTORS.tidyForm.toggleLabel('Automatic tidy disabled')); + await click(SELECTORS.tidyForm.inputByAttr('tidyCertStore')); + await click(SELECTORS.tidyForm.inputByAttr('tidyRevokedCerts')); + await click(SELECTORS.tidyForm.tidySave); + await visit(`/vault/secrets/${this.mountPath}/pki/tidy`); + assert + .dom(SELECTORS.manualTidyToolbar) + .exists('Manual tidy toolbar action exists if auto tidy is configured'); + assert + .dom(SELECTORS.autoTidyToolbar) + .exists('Auto tidy toolbar action exists if auto tidy is configured'); + }); +}); diff --git a/ui/tests/acceptance/policies-acl-old-test.js b/ui/tests/acceptance/policies-acl-old-test.js new file mode 100644 index 0000000..6ae3571 --- /dev/null +++ b/ui/tests/acceptance/policies-acl-old-test.js @@ -0,0 +1,90 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { click, fillIn, find, currentURL, waitUntil } from '@ember/test-helpers'; +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { v4 as uuidv4 } from 'uuid'; + +import page from 'vault/tests/pages/policies/index'; +import authPage from 'vault/tests/pages/auth'; + +module('Acceptance | policies (old)', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(function () { + this.uid = uuidv4(); + return authPage.login(); + }); + + test('policies', async function (assert) { + const policyString = 'path "*" { capabilities = ["update"]}'; + const policyName = `Policy test ${this.uid}`; + const policyLower = policyName.toLowerCase(); + + await page.visit({ type: 'acl' }); + // new policy creation + await click('[data-test-policy-create-link]'); + + await fillIn('[data-test-policy-input="name"]', policyName); + await click('[data-test-policy-save]'); + assert + .dom('[data-test-error]') + .hasText(`Error 'policy' parameter not supplied or empty`, 'renders error message on save'); + find('.CodeMirror').CodeMirror.setValue(policyString); + await click('[data-test-policy-save]'); + + await waitUntil(() => currentURL() === `/vault/policy/acl/${encodeURIComponent(policyLower)}`); + assert.strictEqual( + currentURL(), + `/vault/policy/acl/${encodeURIComponent(policyLower)}`, + 'navigates to policy show on successful save' + ); + assert.dom('[data-test-policy-name]').hasText(policyLower, 'displays the policy name on the show page'); + assert.dom('[data-test-flash-message].is-info').doesNotExist('no flash message is displayed on save'); + await click('[data-test-policy-list-link]'); + + assert + .dom(`[data-test-policy-link="${policyLower}"]`) + .exists({ count: 1 }, 'new policy shown in the list'); + + // policy deletion + await click(`[data-test-policy-link="${policyLower}"]`); + + await click('[data-test-policy-edit-toggle]'); + + await click('[data-test-policy-delete] button'); + + await click('[data-test-confirm-button]'); + await waitUntil(() => currentURL() === `/vault/policies/acl`); + assert.strictEqual( + currentURL(), + `/vault/policies/acl`, + 'navigates to policy list on successful deletion' + ); + assert + .dom(`[data-test-policy-item="${policyLower}"]`) + .doesNotExist('deleted policy is not shown in the list'); + }); + + // https://github.com/hashicorp/vault/issues/4395 + test('it properly fetches policies when the name ends in a ,', async function (assert) { + const policyString = 'path "*" { capabilities = ["update"]}'; + const policyName = `${this.uid}-policy-symbol,.`; + + await page.visit({ type: 'acl' }); + // new policy creation + await click('[data-test-policy-create-link]'); + + await fillIn('[data-test-policy-input="name"]', policyName); + find('.CodeMirror').CodeMirror.setValue(policyString); + await click('[data-test-policy-save]'); + assert.ok( + await waitUntil(() => currentURL() === `/vault/policy/acl/${policyName}`), + 'navigates to policy show on successful save' + ); + assert.dom('[data-test-policy-edit-toggle]').exists({ count: 1 }, 'shows the edit toggle'); + }); +}); diff --git a/ui/tests/acceptance/policies-test.js b/ui/tests/acceptance/policies-test.js new file mode 100644 index 0000000..5e545bb --- /dev/null +++ b/ui/tests/acceptance/policies-test.js @@ -0,0 +1,33 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { currentURL, currentRouteName, visit } from '@ember/test-helpers'; +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import authPage from 'vault/tests/pages/auth'; + +module('Acceptance | policies', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(function () { + return authPage.login(); + }); + + test('it redirects to acls on unknown policy type', async function (assert) { + await visit('/vault/policy/foo/default'); + assert.strictEqual(currentRouteName(), 'vault.cluster.policies.index'); + assert.strictEqual(currentURL(), '/vault/policies/acl'); + + await visit('/vault/policy/foo/default/edit'); + assert.strictEqual(currentRouteName(), 'vault.cluster.policies.index'); + assert.strictEqual(currentURL(), '/vault/policies/acl'); + }); + + test('it redirects to acls on index navigation', async function (assert) { + await visit('/vault/policy/acl'); + assert.strictEqual(currentRouteName(), 'vault.cluster.policies.index'); + assert.strictEqual(currentURL(), '/vault/policies/acl'); + }); +}); diff --git a/ui/tests/acceptance/policies/index-test.js b/ui/tests/acceptance/policies/index-test.js new file mode 100644 index 0000000..feb8eda --- /dev/null +++ b/ui/tests/acceptance/policies/index-test.js @@ -0,0 +1,61 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { currentURL, currentRouteName, settled, find, findAll, click } from '@ember/test-helpers'; +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { create } from 'ember-cli-page-object'; + +import page from 'vault/tests/pages/policies/index'; +import authPage from 'vault/tests/pages/auth'; +import consoleClass from 'vault/tests/pages/components/console/ui-panel'; + +const consoleComponent = create(consoleClass); + +module('Acceptance | policies/acl', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(function () { + return authPage.login(); + }); + + test('it lists default and root acls', async function (assert) { + await page.visit({ type: 'acl' }); + await settled(); + assert.strictEqual(currentURL(), '/vault/policies/acl'); + assert.ok(page.findPolicyByName('default'), 'default policy shown in the list'); + if (find('nav.pagination')) { + // Root ACL is always last in the list + const paginationLinks = findAll('.pagination-link'); + await click(paginationLinks[paginationLinks.length - 1]); + } + assert.ok(page.findPolicyByName('root'), 'root policy shown in the list'); + }); + + test('it navigates to show when clicking on the link', async function (assert) { + await page.visit({ type: 'acl' }); + await settled(); + await page.findPolicyByName('default').click(); + await settled(); + assert.strictEqual(currentRouteName(), 'vault.cluster.policy.show'); + assert.strictEqual(currentURL(), '/vault/policy/acl/default'); + }); + + test('it allows deletion of policies with dots in names', async function (assert) { + const POLICY = 'path "*" { capabilities = ["list"]}'; + const policyName = 'list.policy'; + await consoleComponent.runCommands([`write sys/policies/acl/${policyName} policy=${btoa(POLICY)}`]); + await settled(); + await page.visit({ type: 'acl' }); + await settled(); + const policy = page.row.filterBy('name', policyName)[0]; + assert.ok(policy, 'policy is shown in the list'); + await policy.menu(); + await settled(); + await page.delete().confirmDelete(); + await settled(); + assert.notOk(page.findPolicyByName(policyName), 'policy is deleted successfully'); + }); +}); diff --git a/ui/tests/acceptance/policy-test.js b/ui/tests/acceptance/policy-test.js new file mode 100644 index 0000000..4df2580 --- /dev/null +++ b/ui/tests/acceptance/policy-test.js @@ -0,0 +1,28 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { currentURL, currentRouteName, visit } from '@ember/test-helpers'; +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import authPage from 'vault/tests/pages/auth'; +import logout from 'vault/tests/pages/logout'; + +module('Acceptance | policies', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(function () { + return authPage.login(); + }); + + hooks.afterEach(function () { + return logout.visit(); + }); + + test('it redirects to acls with unknown policy type', async function (assert) { + await visit('/vault/policies/foo'); + assert.strictEqual(currentRouteName(), 'vault.cluster.policies.index'); + assert.strictEqual(currentURL(), '/vault/policies/acl'); + }); +}); diff --git a/ui/tests/acceptance/policy/edit-test.js b/ui/tests/acceptance/policy/edit-test.js new file mode 100644 index 0000000..b67e03e --- /dev/null +++ b/ui/tests/acceptance/policy/edit-test.js @@ -0,0 +1,37 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { currentURL } from '@ember/test-helpers'; +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import page from 'vault/tests/pages/policy/edit'; +import authPage from 'vault/tests/pages/auth'; + +module('Acceptance | policy/acl/:name/edit', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(function () { + return authPage.login(); + }); + + test('it redirects to list if navigating to root', async function (assert) { + await page.visit({ type: 'acl', name: 'root' }); + assert.strictEqual( + currentURL(), + '/vault/policies/acl', + 'navigation to root show redirects you to policy list' + ); + }); + + test('it does not show delete for default policy', async function (assert) { + await page.visit({ type: 'acl', name: 'default' }); + assert.notOk(page.deleteIsPresent, 'there is no delete button'); + }); + + test('it navigates to show when the toggle is clicked', async function (assert) { + await page.visit({ type: 'acl', name: 'default' }).toggleEdit(); + assert.strictEqual(currentURL(), '/vault/policy/acl/default', 'toggle navigates from edit to show'); + }); +}); diff --git a/ui/tests/acceptance/policy/show-test.js b/ui/tests/acceptance/policy/show-test.js new file mode 100644 index 0000000..6b1032d --- /dev/null +++ b/ui/tests/acceptance/policy/show-test.js @@ -0,0 +1,32 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { currentURL } from '@ember/test-helpers'; +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import page from 'vault/tests/pages/policy/show'; +import authPage from 'vault/tests/pages/auth'; + +module('Acceptance | policy/acl/:name', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(function () { + return authPage.login(); + }); + + test('it redirects to list if navigating to root', async function (assert) { + await page.visit({ type: 'acl', name: 'root' }); + assert.strictEqual( + currentURL(), + '/vault/policies/acl', + 'navigation to root show redirects you to policy list' + ); + }); + + test('it navigates to edit when the toggle is clicked', async function (assert) { + await page.visit({ type: 'acl', name: 'default' }).toggleEdit(); + assert.strictEqual(currentURL(), '/vault/policy/acl/default/edit', 'toggle navigates to edit page'); + }); +}); diff --git a/ui/tests/acceptance/raft-storage-test.js b/ui/tests/acceptance/raft-storage-test.js new file mode 100644 index 0000000..1209d76 --- /dev/null +++ b/ui/tests/acceptance/raft-storage-test.js @@ -0,0 +1,78 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import { click, visit } from '@ember/test-helpers'; +import authPage from 'vault/tests/pages/auth'; +import logout from 'vault/tests/pages/logout'; + +module('Acceptance | raft storage', function (hooks) { + setupApplicationTest(hooks); + setupMirage(hooks); + + hooks.beforeEach(async function () { + this.config = this.server.create('configuration', 'withRaft'); + this.server.get('/sys/internal/ui/resultant-acl', () => + this.server.create('configuration', { data: { root: true } }) + ); + this.server.get('/sys/license/features', () => ({})); + await authPage.login(); + }); + hooks.afterEach(function () { + return logout.visit(); + }); + + test('it should render correct number of raft peers', async function (assert) { + assert.expect(3); + + let didRemovePeer = false; + this.server.get('/sys/storage/raft/configuration', () => { + if (didRemovePeer) { + this.config.data.config.servers.pop(); + } else { + // consider peer removed by external means (cli) after initial request + didRemovePeer = true; + } + return this.config; + }); + + await visit('/vault/storage/raft'); + assert.dom('[data-raft-row]').exists({ count: 2 }, '2 raft peers render in table'); + // leave route and return to trigger config fetch + await visit('/vault/secrets'); + await visit('/vault/storage/raft'); + const store = this.owner.lookup('service:store'); + assert.strictEqual( + store.peekAll('server').length, + 2, + 'Store contains 2 server records since remove peer was triggered externally' + ); + assert.dom('[data-raft-row]').exists({ count: 1 }, 'Only raft nodes from response are rendered'); + }); + + test('it should remove raft peer', async function (assert) { + assert.expect(3); + + this.server.get('/sys/storage/raft/configuration', () => this.config); + this.server.post('/sys/storage/raft/remove-peer', (schema, req) => { + const body = JSON.parse(req.requestBody); + assert.strictEqual( + body.server_id, + this.config.data.config.servers[1].node_id, + 'Remove peer request made with node id' + ); + return {}; + }); + + await visit('/vault/storage/raft'); + assert.dom('[data-raft-row]').exists({ count: 2 }, '2 raft peers render in table'); + await click('[data-raft-row]:nth-child(2) [data-test-popup-menu-trigger]'); + await click('[data-test-confirm-action-trigger]'); + await click('[data-test-confirm-button]'); + assert.dom('[data-raft-row]').exists({ count: 1 }, 'Raft peer successfully removed'); + }); +}); diff --git a/ui/tests/acceptance/redirect-to-test.js b/ui/tests/acceptance/redirect-to-test.js new file mode 100644 index 0000000..a5f1680 --- /dev/null +++ b/ui/tests/acceptance/redirect-to-test.js @@ -0,0 +1,100 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { currentURL, visit as _visit, settled } from '@ember/test-helpers'; +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { create } from 'ember-cli-page-object'; +import auth from 'vault/tests/pages/auth'; +import consoleClass from 'vault/tests/pages/components/console/ui-panel'; + +const visit = async (url) => { + try { + await _visit(url); + } catch (e) { + if (e.message !== 'TransitionAborted') { + throw e; + } + } + + await settled(); +}; + +const consoleComponent = create(consoleClass); + +const wrappedAuth = async () => { + await consoleComponent.runCommands(`write -field=token auth/token/create policies=default -wrap-ttl=5m`); + await settled(); + // because of flaky test, trying to capture the token using a dom selector instead of the page object + const token = document.querySelector('[data-test-component="console/log-text"] pre').textContent; + if (token.includes('Error')) { + throw new Error(`Error mounting secrets engine: ${token}`); + } + return token; +}; + +const setupWrapping = async () => { + await auth.logout(); + await settled(); + await auth.visit(); + await settled(); + await auth.tokenInput('root').submit(); + await settled(); + const wrappedToken = await wrappedAuth(); + return wrappedToken; +}; +module('Acceptance | redirect_to query param functionality', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(function () { + // normally we'd use the auth.logout helper to visit the route and reset the app, but in this case that + // also routes us to the auth page, and then all of the transitions from the auth page get redirected back + // to the auth page resulting in no redirect_to query param being set + localStorage.clear(); + }); + test('redirect to a route after authentication', async function (assert) { + const url = '/vault/secrets/secret/create'; + await visit(url); + assert.ok( + currentURL().includes(`redirect_to=${encodeURIComponent(url)}`), + 'encodes url for the query param' + ); + // the login method on this page does another visit call that we don't want here + await auth.tokenInput('root').submit(); + await settled(); + assert.strictEqual(currentURL(), url, 'navigates to the redirect_to url after auth'); + }); + + test('redirect from root does not include redirect_to', async function (assert) { + const url = '/'; + await visit(url); + assert.ok(currentURL().indexOf('redirect_to') < 0, 'there is no redirect_to query param'); + }); + + test('redirect to a route after authentication with a query param', async function (assert) { + const url = '/vault/secrets/secret/create?initialKey=hello'; + await visit(url); + assert.ok( + currentURL().includes(`?redirect_to=${encodeURIComponent(url)}`), + 'encodes url for the query param' + ); + await auth.tokenInput('root').submit(); + await settled(); + assert.strictEqual(currentURL(), url, 'navigates to the redirect_to with the query param after auth'); + }); + + test('redirect to logout with wrapped token authenticates you', async function (assert) { + const wrappedToken = await setupWrapping(); + const url = '/vault/secrets/cubbyhole/create'; + + await auth.logout({ + redirect_to: url, + wrapped_token: wrappedToken, + }); + await settled(); + + assert.strictEqual(currentURL(), url, 'authenticates then navigates to the redirect_to url after auth'); + }); +}); diff --git a/ui/tests/acceptance/secrets/backend/alicloud/secret-test.js b/ui/tests/acceptance/secrets/backend/alicloud/secret-test.js new file mode 100644 index 0000000..6e7306a --- /dev/null +++ b/ui/tests/acceptance/secrets/backend/alicloud/secret-test.js @@ -0,0 +1,40 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { currentRouteName, settled } from '@ember/test-helpers'; +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { v4 as uuidv4 } from 'uuid'; + +import mountSecrets from 'vault/tests/pages/settings/mount-secret-backend'; +import backendsPage from 'vault/tests/pages/secrets/backends'; +import authPage from 'vault/tests/pages/auth'; + +module('Acceptance | alicloud/enable', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(function () { + this.uid = uuidv4(); + return authPage.login(); + }); + + test('enable alicloud', async function (assert) { + const enginePath = `alicloud-${this.uid}`; + await mountSecrets.visit(); + await settled(); + await mountSecrets.selectType('alicloud'); + await settled(); + await mountSecrets.next().path(enginePath).submit(); + await settled(); + + assert.strictEqual( + currentRouteName(), + 'vault.cluster.secrets.backends', + 'redirects to the backends page' + ); + await settled(); + assert.ok(backendsPage.rows.filterBy('path', `${enginePath}/`)[0], 'shows the alicloud engine'); + }); +}); diff --git a/ui/tests/acceptance/secrets/backend/cubbyhole/secret-test.js b/ui/tests/acceptance/secrets/backend/cubbyhole/secret-test.js new file mode 100644 index 0000000..d50ddcd --- /dev/null +++ b/ui/tests/acceptance/secrets/backend/cubbyhole/secret-test.js @@ -0,0 +1,52 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { currentRouteName, settled } from '@ember/test-helpers'; +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { v4 as uuidv4 } from 'uuid'; + +import editPage from 'vault/tests/pages/secrets/backend/kv/edit-secret'; +import showPage from 'vault/tests/pages/secrets/backend/kv/show'; +import listPage from 'vault/tests/pages/secrets/backend/list'; +import apiStub from 'vault/tests/helpers/noop-all-api-requests'; +import authPage from 'vault/tests/pages/auth'; + +module('Acceptance | secrets/cubbyhole/create', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(function () { + this.uid = uuidv4(); + this.server = apiStub({ usePassthrough: true }); + return authPage.login(); + }); + + hooks.afterEach(function () { + this.server.shutdown(); + }); + + test('it creates and can view a secret with the cubbyhole backend', async function (assert) { + const kvPath = `cubbyhole-kv-${this.uid}`; + await listPage.visitRoot({ backend: 'cubbyhole' }); + await settled(); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.secrets.backend.list-root', + 'navigates to the list page' + ); + + await listPage.create(); + await settled(); + await editPage.createSecret(kvPath, 'foo', 'bar'); + await settled(); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.secrets.backend.show', + 'redirects to the show page' + ); + assert.dom('[data-test-created-time]').hasText('', 'it does not render created time if blank'); + assert.ok(showPage.editIsPresent, 'shows the edit button'); + }); +}); diff --git a/ui/tests/acceptance/secrets/backend/database/secret-test.js b/ui/tests/acceptance/secrets/backend/database/secret-test.js new file mode 100644 index 0000000..0d80def --- /dev/null +++ b/ui/tests/acceptance/secrets/backend/database/secret-test.js @@ -0,0 +1,569 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { currentURL, settled, click, visit, fillIn, typeIn } from '@ember/test-helpers'; +import { create } from 'ember-cli-page-object'; +import { selectChoose, clickTrigger } from 'ember-power-select/test-support/helpers'; + +import mountSecrets from 'vault/tests/pages/settings/mount-secret-backend'; +import connectionPage from 'vault/tests/pages/secrets/backend/database/connection'; +import rolePage from 'vault/tests/pages/secrets/backend/database/role'; +import apiStub from 'vault/tests/helpers/noop-all-api-requests'; +import authPage from 'vault/tests/pages/auth'; +import logout from 'vault/tests/pages/logout'; +import consoleClass from 'vault/tests/pages/components/console/ui-panel'; +import searchSelect from 'vault/tests/pages/components/search-select'; + +const searchSelectComponent = create(searchSelect); + +const consoleComponent = create(consoleClass); + +const MODEL = { + engineType: 'database', + id: 'database-name', +}; + +const mount = async () => { + const path = `database-${Date.now()}`; + await mountSecrets.enable('database', path); + await settled(); + return path; +}; + +const newConnection = async ( + backend, + plugin = 'mongodb-database-plugin', + connectionUrl = `mongodb://127.0.0.1:4321/${name}` +) => { + const name = `connection-${Date.now()}`; + await connectionPage.visitCreate({ backend }); + await connectionPage.dbPlugin(plugin); + await connectionPage.name(name); + await connectionPage.connectionUrl(connectionUrl); + await connectionPage.toggleVerify(); + await connectionPage.save(); + await connectionPage.enable(); + return name; +}; + +const connectionTests = [ + { + name: 'elasticsearch-connection', + plugin: 'elasticsearch-database-plugin', + elasticUser: 'username', + elasticPassword: 'password', + url: 'http://127.0.0.1:9200', + requiredFields: async (assert, name) => { + assert.dom('[data-test-input="username"]').exists(`Username field exists for ${name}`); + assert.dom('[data-test-input="password"]').exists(`Password field exists for ${name}`); + assert.dom('[data-test-input="ca_cert"]').exists(`CA certificate field exists for ${name}`); + assert.dom('[data-test-input="ca_path"]').exists(`CA path field exists for ${name}`); + assert.dom('[data-test-input="client_cert"]').exists(`Client certificate field exists for ${name}`); + assert.dom('[data-test-input="client_key"]').exists(`Client key field exists for ${name}`); + assert.dom('[data-test-input="tls_server_name"]').exists(`TLS server name field exists for ${name}`); + assert.dom('[data-test-input="insecure"]').exists(`Insecure checkbox exists for ${name}`); + assert + .dom('[data-test-toggle-input="show-username_template"]') + .exists(`Username template toggle exists for ${name}`); + }, + }, + { + name: 'mongodb-connection', + plugin: 'mongodb-database-plugin', + url: `mongodb://127.0.0.1:4321/test`, + requiredFields: async (assert, name) => { + assert.dom('[data-test-input="username"]').exists(`Username field exists for ${name}`); + assert.dom('[data-test-input="password"]').exists(`Password field exists for ${name}`); + assert.dom('[data-test-input="write_concern"]').exists(`Write concern field exists for ${name}`); + assert.dom('[data-test-toggle-group="TLS options"]').exists('TLS options toggle exists'); + assert + .dom('[data-test-input="root_rotation_statements"]') + .exists(`Root rotation statements exists for ${name}`); + }, + }, + { + name: 'mssql-connection', + plugin: 'mssql-database-plugin', + url: `mssql://127.0.0.1:4321/test`, + requiredFields: async (assert, name) => { + assert.dom('[data-test-input="username"]').exists(`Username field exists for ${name}`); + assert.dom('[data-test-input="password"]').exists(`Password field exists for ${name}`); + assert + .dom('[data-test-input="max_open_connections"]') + .exists(`Max open connections exists for ${name}`); + assert + .dom('[data-test-input="max_idle_connections"]') + .exists(`Max idle connections exists for ${name}`); + assert + .dom('[data-test-input="max_connection_lifetime"]') + .exists(`Max connection lifetime exists for ${name}`); + assert + .dom('[data-test-input="root_rotation_statements"]') + .exists(`Root rotation statements exists for ${name}`); + }, + }, + { + name: 'mysql-connection', + plugin: 'mysql-database-plugin', + url: `{{username}}:{{password}}@tcp(127.0.0.1:3306)/test`, + requiredFields: async (assert, name) => { + assert.dom('[data-test-input="username"]').exists(`Username field exists for ${name}`); + assert.dom('[data-test-input="password"]').exists(`Password field exists for ${name}`); + assert + .dom('[data-test-input="max_open_connections"]') + .exists(`Max open connections exists for ${name}`); + assert + .dom('[data-test-input="max_idle_connections"]') + .exists(`Max idle connections exists for ${name}`); + assert + .dom('[data-test-input="max_connection_lifetime"]') + .exists(`Max connection lifetime exists for ${name}`); + assert.dom('[data-test-toggle-group="TLS options"]').exists('TLS options toggle exists'); + assert + .dom('[data-test-input="root_rotation_statements"]') + .exists(`Root rotation statements exists for ${name}`); + }, + }, + { + name: 'mysql-aurora-connection', + plugin: 'mysql-aurora-database-plugin', + url: `{{username}}:{{password}}@tcp(127.0.0.1:3306)/test`, + requiredFields: async (assert, name) => { + assert.dom('[data-test-input="username"]').exists(`Username field exists for ${name}`); + assert.dom('[data-test-input="password"]').exists(`Password field exists for ${name}`); + assert + .dom('[data-test-input="max_open_connections"]') + .exists(`Max open connections exists for ${name}`); + assert + .dom('[data-test-input="max_idle_connections"]') + .exists(`Max idle connections exists for ${name}`); + assert + .dom('[data-test-input="max_connection_lifetime"]') + .exists(`Max connection lifetime exists for ${name}`); + assert.dom('[data-test-toggle-group="TLS options"]').exists('TLS options toggle exists'); + assert + .dom('[data-test-input="root_rotation_statements"]') + .exists(`Root rotation statements exists for ${name}`); + }, + }, + { + name: 'mysql-rds-connection', + plugin: 'mysql-rds-database-plugin', + url: `{{username}}:{{password}}@tcp(127.0.0.1:3306)/test`, + requiredFields: async (assert, name) => { + assert.dom('[data-test-input="username"]').exists(`Username field exists for ${name}`); + assert.dom('[data-test-input="password"]').exists(`Password field exists for ${name}`); + assert + .dom('[data-test-input="max_open_connections"]') + .exists(`Max open connections exists for ${name}`); + assert + .dom('[data-test-input="max_idle_connections"]') + .exists(`Max idle connections exists for ${name}`); + assert + .dom('[data-test-input="max_connection_lifetime"]') + .exists(`Max connection lifetime exists for ${name}`); + assert.dom('[data-test-toggle-group="TLS options"]').exists('TLS options toggle exists'); + assert + .dom('[data-test-input="root_rotation_statements"]') + .exists(`Root rotation statements exists for ${name}`); + }, + }, + { + name: 'mysql-legacy-connection', + plugin: 'mysql-legacy-database-plugin', + url: `{{username}}:{{password}}@tcp(127.0.0.1:3306)/test`, + requiredFields: async (assert, name) => { + assert.dom('[data-test-input="username"]').exists(`Username field exists for ${name}`); + assert.dom('[data-test-input="password"]').exists(`Password field exists for ${name}`); + assert + .dom('[data-test-input="max_open_connections"]') + .exists(`Max open connections exists for ${name}`); + assert + .dom('[data-test-input="max_idle_connections"]') + .exists(`Max idle connections exists for ${name}`); + assert + .dom('[data-test-input="max_connection_lifetime"]') + .exists(`Max connection lifetime exists for ${name}`); + assert.dom('[data-test-toggle-group="TLS options"]').exists('TLS options toggle exists'); + assert + .dom('[data-test-input="root_rotation_statements"]') + .exists(`Root rotation statements exists for ${name}`); + }, + }, + { + name: 'postgresql-connection', + plugin: 'postgresql-database-plugin', + url: `postgresql://{{username}}:{{password}}@localhost:5432/postgres?sslmode=disable`, + requiredFields: async (assert, name) => { + assert.dom('[data-test-input="username"]').exists(`Username field exists for ${name}`); + assert.dom('[data-test-input="password"]').exists(`Password field exists for ${name}`); + assert + .dom('[data-test-input="max_open_connections"]') + .exists(`Max open connections exists for ${name}`); + assert + .dom('[data-test-input="max_idle_connections"]') + .exists(`Max idle connections exists for ${name}`); + assert + .dom('[data-test-input="max_connection_lifetime"]') + .exists(`Max connection lifetime exists for ${name}`); + assert + .dom('[data-test-input="root_rotation_statements"]') + .exists(`Root rotation statements exists for ${name}`); + assert + .dom('[data-test-toggle-input="show-username_template"]') + .exists(`Username template toggle exists for ${name}`); + }, + }, + // keep oracle as last DB because it is skipped in some tests (line 285) the UI doesn't return to empty state after + { + name: 'oracle-connection', + plugin: 'vault-plugin-database-oracle', + url: `{{username}}/{{password}}@localhost:1521/OraDoc.localhost`, + requiredFields: async (assert, name) => { + assert.dom('[data-test-input="username"]').exists(`Username field exists for ${name}`); + assert.dom('[data-test-input="password"]').exists(`Password field exists for ${name}`); + assert + .dom('[data-test-input="max_open_connections"]') + .exists(`Max open connections exists for ${name}`); + assert + .dom('[data-test-input="max_idle_connections"]') + .exists(`Max idle connections exists for ${name}`); + assert + .dom('[data-test-input="max_connection_lifetime"]') + .exists(`Max connection lifetime exists for ${name}`); + assert + .dom('[data-test-input="root_rotation_statements"]') + .exists(`Root rotation statements exists for ${name}`); + assert + .dom('[data-test-alert-banner="alert"]') + .hasTextContaining( + `Warning Please ensure that your Oracle plugin has the default name of vault-plugin-database-oracle. Custom naming is not supported in the UI at this time. If the plugin is already named vault-plugin-database-oracle, disregard this warning.`, + 'warning banner displays about connections with SSL.' + ); + }, + }, +]; + +module('Acceptance | secrets/database/*', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(async function () { + this.server = apiStub({ usePassthrough: true }); + return authPage.login(); + }); + hooks.afterEach(function () { + this.server.shutdown(); + }); + + test('can enable the database secrets engine', async function (assert) { + const backend = `database-${Date.now()}`; + await mountSecrets.enable('database', backend); + await settled(); + assert.strictEqual( + currentURL(), + `/vault/secrets/${backend}/list`, + 'Mounts and redirects to connection list page' + ); + assert.dom('[data-test-component="empty-state"]').exists('Empty state exists'); + assert + .dom('.active[data-test-secret-list-tab="Connections"]') + .exists('Has Connections tab which is active'); + await click('[data-test-tab="overview"]'); + assert.strictEqual(currentURL(), `/vault/secrets/${backend}/overview`, 'Tab links to overview page'); + assert.dom('[data-test-component="empty-state"]').exists('Empty state also exists on overview page'); + assert.dom('[data-test-secret-list-tab="Roles"]').exists('Has Roles tab'); + }); + + test('Connection create and edit form for each plugin', async function (assert) { + assert.expect(161); + const backend = await mount(); + for (const testCase of connectionTests) { + await connectionPage.visitCreate({ backend }); + assert.strictEqual(currentURL(), `/vault/secrets/${backend}/create`, 'Correct creation URL'); + assert + .dom('[data-test-empty-state-title]') + .hasText('No plugin selected', 'No plugin is selected by default and empty state shows'); + await connectionPage.dbPlugin(testCase.plugin); + assert.dom('[data-test-empty-state]').doesNotExist('Empty state goes away after plugin selected'); + await connectionPage.name(testCase.name); + if (testCase.plugin === 'elasticsearch-database-plugin') { + await connectionPage.url(testCase.url); + await connectionPage.username(testCase.elasticUser); + await connectionPage.password(testCase.elasticPassword); + } else { + await connectionPage.connectionUrl(testCase.url); + } + // skip adding oracle db connection since plugin doesn't exist + if (testCase.plugin === 'vault-plugin-database-oracle') { + testCase.requiredFields(assert, testCase.name); + continue; + } + testCase.requiredFields(assert, testCase.name); + await connectionPage.toggleVerify(); + await connectionPage.save(); + await settled(); + assert + .dom('.modal.is-active .title') + .hasText('Rotate your root credentials?', 'Modal appears asking to rotate root credentials'); + await connectionPage.enable(); + assert.ok( + currentURL().startsWith(`/vault/secrets/${backend}/show/${testCase.name}`), + `Saves connection and takes you to show page for ${testCase.name}` + ); + assert + .dom(`[data-test-row-value="Password"]`) + .doesNotExist(`Does not show Password value on show page for ${testCase.name}`); + await connectionPage.edit(); + assert.ok( + currentURL().startsWith(`/vault/secrets/${backend}/edit/${testCase.name}`), + `Edit connection button and takes you to edit page for ${testCase.name}` + ); + assert.dom(`[data-test-input="name"]`).hasAttribute('readonly'); + assert.dom(`[data-test-input="plugin_name"]`).hasAttribute('readonly'); + assert.dom('[data-test-input="password"]').doesNotExist('Password is not displayed on edit form'); + assert.dom('[data-test-toggle-input="show-password"]').exists('Update password toggle exists'); + await connectionPage.toggleVerify(); + await connectionPage.save(); + // click "Add Role" + await connectionPage.addRole(); + await settled(); + assert.strictEqual( + searchSelectComponent.selectedOptions[0].text, + testCase.name, + 'Database connection is pre-selected on the form' + ); + await click('[data-test-secret-breadcrumb]'); + } + }); + + test('Can create and delete a connection', async function (assert) { + const backend = await mount(); + const connectionDetails = { + plugin: 'mongodb-database-plugin', + id: 'horses-db', + fields: [ + { label: 'Connection name', name: 'name', value: 'horses-db' }, + { label: 'Connection URL', name: 'connection_url', value: 'mongodb://127.0.0.1:235/horses' }, + { label: 'Username', name: 'username', value: 'user', hideOnShow: true }, + { label: 'Password', name: 'password', password: 'so-secure', hideOnShow: true }, + { label: 'Write concern', name: 'write_concern' }, + ], + }; + assert.strictEqual( + currentURL(), + `/vault/secrets/${backend}/list`, + 'Mounts and redirects to connection list page' + ); + await connectionPage.createLink(); + assert.strictEqual(currentURL(), `/vault/secrets/${backend}/create`, 'Create link goes to create page'); + assert + .dom('[data-test-empty-state-title]') + .hasText('No plugin selected', 'No plugin is selected by default and empty state shows'); + await connectionPage.dbPlugin(connectionDetails.plugin); + assert.dom('[data-test-empty-state]').doesNotExist('Empty state goes away after plugin selected'); + connectionDetails.fields.forEach(async ({ name, value }) => { + assert + .dom(`[data-test-input="${name}"]`) + .exists(`Field ${name} exists for ${connectionDetails.plugin}`); + if (value) { + await fillIn(`[data-test-input="${name}"]`, value); + } + }); + // uncheck verify for the save step to work + await connectionPage.toggleVerify(); + await connectionPage.save(); + await settled(); + assert + .dom('.modal.is-active .title') + .hasText('Rotate your root credentials?', 'Modal appears asking to '); + await connectionPage.enable(); + assert.strictEqual( + currentURL(), + `/vault/secrets/${backend}/show/${connectionDetails.id}`, + 'Saves connection and takes you to show page' + ); + connectionDetails.fields.forEach(({ label, name, value, hideOnShow }) => { + if (hideOnShow) { + assert + .dom(`[data-test-row-value="${label}"]`) + .doesNotExist(`Does not show ${name} value on show page for ${connectionDetails.plugin}`); + } else if (!value) { + assert.dom(`[data-test-row-value="${label}"]`).hasText('Default'); + } else { + assert.dom(`[data-test-row-value="${label}"]`).hasText(value); + } + }); + await connectionPage.delete(); + assert + .dom('.modal.is-active .title') + .hasText('Delete connection?', 'Modal appears asking to confirm delete action'); + await fillIn('[data-test-confirmation-modal-input="Delete connection?"]', connectionDetails.id); + await click('[data-test-confirm-button]'); + + assert.strictEqual(currentURL(), `/vault/secrets/${backend}/list`, 'Redirects to connection list page'); + assert + .dom('[data-test-empty-state-title]') + .hasText('No connections in this backend', 'No connections listed because it was deleted'); + }); + + test('buttons show up for managing connection', async function (assert) { + const backend = await mount(); + const connection = await newConnection(backend); + await connectionPage.visitShow({ backend, id: connection }); + assert + .dom('[data-test-database-connection-delete]') + .hasText('Delete connection', 'Delete connection button exists with correct text'); + assert + .dom('[data-test-database-connection-reset]') + .hasText('Reset connection', 'Reset button exists with correct text'); + assert.dom('[data-test-secret-create]').hasText('Add role', 'Add role button exists with correct text'); + assert.dom('[data-test-edit-link]').hasText('Edit configuration', 'Edit button exists with correct text'); + const CONNECTION_VIEW_ONLY = ` + path "${backend}/*" { + capabilities = ["deny"] + } + path "${backend}/config" { + capabilities = ["list"] + } + path "${backend}/config/*" { + capabilities = ["read"] + } + `; + await consoleComponent.runCommands([ + `write sys/mounts/${backend} type=database`, + `write sys/policies/acl/test-policy policy=${btoa(CONNECTION_VIEW_ONLY)}`, + 'write -field=client_token auth/token/create policies=test-policy ttl=1h', + ]); + const token = consoleComponent.lastTextOutput; + await logout.visit(); + await authPage.login(token); + await connectionPage.visitShow({ backend, id: connection }); + assert.strictEqual( + currentURL(), + `/vault/secrets/${backend}/show/${connection}`, + 'Allows reading connection' + ); + assert + .dom('[data-test-database-connection-delete]') + .doesNotExist('Delete button does not show due to permissions'); + assert + .dom('[data-test-database-connection-reset]') + .doesNotExist('Reset button does not show due to permissions'); + assert.dom('[data-test-secret-create]').doesNotExist('Add role button does not show due to permissions'); + assert.dom('[data-test-edit-link]').doesNotExist('Edit button does not show due to permissions'); + await visit(`/vault/secrets/${backend}/overview`); + assert.dom('[data-test-selectable-card="Connections"]').exists('Connections card exists on overview'); + assert + .dom('[data-test-selectable-card="Roles"]') + .doesNotExist('Roles card does not exist on overview w/ policy'); + assert.dom('.title-number').hasText('1', 'Lists the correct number of connections'); + // confirm get credentials card is an option to select. Regression bug. + await typeIn('.ember-text-field', 'blah'); + assert.dom('[data-test-get-credentials]').isEnabled(); + }); + + test('connection_url must be decoded', async function (assert) { + const backend = await mount(); + const connection = await newConnection( + backend, + 'mongodb-database-plugin', + '{{username}}/{{password}}@oracle-xe:1521/XEPDB1' + ); + await connectionPage.visitShow({ backend, id: connection }); + assert + .dom('[data-test-row-value="Connection URL"]') + .hasText('{{username}}/{{password}}@oracle-xe:1521/XEPDB1'); + }); + + test('Role create form', async function (assert) { + const backend = await mount(); + // Connection needed for role fields + await newConnection(backend); + await rolePage.visitCreate({ backend }); + await rolePage.name('bar'); + assert + .dom('[data-test-component="empty-state"]') + .exists({ count: 2 }, 'Two empty states exist before selections made'); + await clickTrigger('#database'); + assert.strictEqual(searchSelectComponent.options.length, 1, 'list shows existing connections so far'); + await selectChoose('#database', '.ember-power-select-option', 0); + assert + .dom('[data-test-component="empty-state"]') + .exists({ count: 2 }, 'Two empty states exist before selections made'); + await rolePage.roleType('static'); + assert.dom('[data-test-component="empty-state"]').doesNotExist('Empty states go away'); + assert.dom('[data-test-input="username"]').exists('Username field appears for static role'); + assert + .dom('[data-test-toggle-input="Rotation period"]') + .exists('Rotation period field appears for static role'); + await rolePage.roleType('dynamic'); + assert + .dom('[data-test-toggle-input="Generated credentials’s Time-to-Live (TTL)"]') + .exists('TTL field exists for dynamic'); + assert + .dom('[data-test-toggle-input="Generated credentials’s maximum Time-to-Live (Max TTL)"]') + .exists('Max TTL field exists for dynamic'); + // Real connection (actual running db) required to save role, so we aren't testing that flow yet + }); + + test('root and limited access', async function (assert) { + this.set('model', MODEL); + const backend = 'database'; + const NO_ROLES_POLICY = ` + path "database/roles/*" { + capabilities = ["delete"] + } + path "database/static-roles/*" { + capabilities = ["delete"] + } + path "database/config/*" { + capabilities = ["list", "create", "read", "update"] + } + path "database/creds/*" { + capabilities = ["list", "create", "read", "update"] + } + `; + await consoleComponent.runCommands([ + `write sys/mounts/${backend} type=database`, + `write sys/policies/acl/test-policy policy=${btoa(NO_ROLES_POLICY)}`, + 'write -field=client_token auth/token/create policies=test-policy ttl=1h', + ]); + const token = consoleComponent.lastTextOutput; + + // test root user flow + await settled(); + + // await click('[data-test-secret-backend-row="database"]'); + // skipping the click because occasionally is shows up on the second page and cannot be found + await visit(`/vault/secrets/database/overview`); + + assert.dom('[data-test-component="empty-state"]').exists('renders empty state'); + assert.dom('[data-test-secret-list-tab="Connections"]').exists('renders connections tab'); + assert.dom('[data-test-secret-list-tab="Roles"]').exists('renders connections tab'); + + await click('[data-test-secret-create="connections"]'); + assert.strictEqual(currentURL(), '/vault/secrets/database/create'); + + // Login with restricted policy + await logout.visit(); + await authPage.login(token); + await settled(); + // skipping the click because occasionally is shows up on the second page and cannot be found + await visit(`/vault/secrets/database/overview`); + assert.dom('[data-test-tab="overview"]').exists('renders overview tab'); + assert.dom('[data-test-secret-list-tab="Connections"]').exists('renders connections tab'); + assert + .dom('[data-test-secret-list-tab="Roles"]') + .doesNotExist(`does not show the roles tab because it does not have permissions`); + assert + .dom('[data-test-selectable-card="Connections"]') + .exists({ count: 1 }, 'renders only the connection card'); + + await click('[data-test-action-text="Configure new"]'); + assert.strictEqual(currentURL(), '/vault/secrets/database/create?itemType=connection'); + }); +}); diff --git a/ui/tests/acceptance/secrets/backend/engines-test.js b/ui/tests/acceptance/secrets/backend/engines-test.js new file mode 100644 index 0000000..60fd9a2 --- /dev/null +++ b/ui/tests/acceptance/secrets/backend/engines-test.js @@ -0,0 +1,116 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { currentRouteName, settled } from '@ember/test-helpers'; +import { clickTrigger } from 'ember-power-select/test-support/helpers'; +import { create } from 'ember-cli-page-object'; +import { module, test } from 'qunit'; +import consoleClass from 'vault/tests/pages/components/console/ui-panel'; +import { setupApplicationTest } from 'ember-qunit'; +import { v4 as uuidv4 } from 'uuid'; + +import mountSecrets from 'vault/tests/pages/settings/mount-secret-backend'; +import backendsPage from 'vault/tests/pages/secrets/backends'; +import authPage from 'vault/tests/pages/auth'; +import ss from 'vault/tests/pages/components/search-select'; + +const consoleComponent = create(consoleClass); +const searchSelect = create(ss); + +module('Acceptance | secret-engine list view', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(function () { + this.uid = uuidv4(); + return authPage.login(); + }); + + test('it allows you to disable an engine', async function (assert) { + // first mount an engine so we can disable it. + const enginePath = `alicloud-disable-${this.uid}`; + await mountSecrets.enable('alicloud', enginePath); + await settled(); + assert.ok(backendsPage.rows.filterBy('path', `${enginePath}/`)[0], 'shows the mounted engine'); + + await backendsPage.visit(); + await settled(); + const row = backendsPage.rows.filterBy('path', `${enginePath}/`)[0]; + await row.menu(); + await settled(); + await backendsPage.disableButton(); + await settled(); + await backendsPage.confirmDisable(); + await settled(); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.secrets.backends', + 'redirects to the backends page' + ); + assert.strictEqual( + backendsPage.rows.filterBy('path', `${enginePath}/`).length, + 0, + 'does not show the disabled engine' + ); + }); + + test('it adds disabled css styling to unsupported secret engines', async function (assert) { + assert.expect(2); + // first mount engine that is not supported + const enginePath = `nomad-${this.uid}`; + + await mountSecrets.enable('nomad', enginePath); + await settled(); + await backendsPage.visit(); + await settled(); + + const rows = document.querySelectorAll('[data-test-auth-backend-link]'); + const rowUnsupported = Array.from(rows).filter((row) => row.innerText.includes('nomad')); + const rowSupported = Array.from(rows).filter((row) => row.innerText.includes('cubbyhole')); + assert + .dom(rowUnsupported[0]) + .doesNotHaveClass( + 'linked-block', + `the linked-block class is not added to unsupported engines, which effectively disables it.` + ); + assert.dom(rowSupported[0]).hasClass('linked-block', `linked-block class is added to supported engines.`); + + // cleanup + await consoleComponent.runCommands([`delete sys/mounts/${enginePath}`]); + }); + + test('it filters by name and engine type', async function (assert) { + assert.expect(4); + const enginePath1 = `aws-1-${this.uid}`; + const enginePath2 = `aws-2-${this.uid}`; + + await mountSecrets.enable('aws', enginePath1); + await mountSecrets.enable('aws', enginePath2); + await backendsPage.visit(); + await settled(); + // filter by type + await clickTrigger('#filter-by-engine-type'); + await searchSelect.options.objectAt(0).click(); + + const rows = document.querySelectorAll('[data-test-auth-backend-link]'); + const rowsAws = Array.from(rows).filter((row) => row.innerText.includes('aws')); + + assert.strictEqual(rows.length, rowsAws.length, 'all rows returned are aws'); + // filter by name + await clickTrigger('#filter-by-engine-name'); + const firstItemToSelect = searchSelect.options.objectAt(0).text; + await searchSelect.options.objectAt(0).click(); + const singleRow = document.querySelectorAll('[data-test-auth-backend-link]'); + assert.strictEqual(singleRow.length, 1, 'returns only one row'); + assert.dom(singleRow[0]).includesText(firstItemToSelect, 'shows the filtered by name engine'); + // clear filter by engine name + await searchSelect.deleteButtons.objectAt(1).click(); + const rowsAgain = document.querySelectorAll('[data-test-auth-backend-link]'); + assert.ok(rowsAgain.length > 1, 'filter has been removed'); + + // cleanup + await consoleComponent.runCommands([`delete sys/mounts/${enginePath1}`]); + await consoleComponent.runCommands([`delete sys/mounts/${enginePath2}`]); + }); +}); diff --git a/ui/tests/acceptance/secrets/backend/gcpkms/secrets-test.js b/ui/tests/acceptance/secrets/backend/gcpkms/secrets-test.js new file mode 100644 index 0000000..28acf35 --- /dev/null +++ b/ui/tests/acceptance/secrets/backend/gcpkms/secrets-test.js @@ -0,0 +1,38 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { currentRouteName, settled } from '@ember/test-helpers'; +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { v4 as uuidv4 } from 'uuid'; + +import mountSecrets from 'vault/tests/pages/settings/mount-secret-backend'; +import backendsPage from 'vault/tests/pages/secrets/backends'; +import authPage from 'vault/tests/pages/auth'; + +module('Acceptance | gcpkms/enable', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(function () { + this.uid = uuidv4(); + return authPage.login(); + }); + + test('enable gcpkms', async function (assert) { + // Error: Cannot call `visit` without having first called `setupApplicationContext`. + const enginePath = `gcpkms-${this.uid}`; + await mountSecrets.visit(); + await settled(); + await mountSecrets.selectType('gcpkms'); + await mountSecrets.next().path(enginePath).submit(); + await settled(); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.secrets.backends', + 'redirects to the backends page' + ); + assert.ok(backendsPage.rows.filterBy('path', `${enginePath}/`)[0], 'shows the gcpkms engine'); + }); +}); diff --git a/ui/tests/acceptance/secrets/backend/generic/secret-test.js b/ui/tests/acceptance/secrets/backend/generic/secret-test.js new file mode 100644 index 0000000..cdec0e8 --- /dev/null +++ b/ui/tests/acceptance/secrets/backend/generic/secret-test.js @@ -0,0 +1,80 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { currentRouteName } from '@ember/test-helpers'; +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { v4 as uuidv4 } from 'uuid'; + +import editPage from 'vault/tests/pages/secrets/backend/kv/edit-secret'; +import showPage from 'vault/tests/pages/secrets/backend/kv/show'; +import listPage from 'vault/tests/pages/secrets/backend/list'; +import consolePanel from 'vault/tests/pages/components/console/ui-panel'; +import authPage from 'vault/tests/pages/auth'; + +import { create } from 'ember-cli-page-object'; + +import apiStub from 'vault/tests/helpers/noop-all-api-requests'; + +const cli = create(consolePanel); + +module('Acceptance | secrets/generic/create', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(function () { + this.uid = uuidv4(); + this.server = apiStub({ usePassthrough: true }); + return authPage.login(); + }); + + hooks.afterEach(function () { + this.server.shutdown(); + }); + + test('it creates and can view a secret with the generic backend', async function (assert) { + const path = `generic-${this.uid}`; + const kvPath = `generic-kv-${this.uid}`; + await cli.runCommands([`write sys/mounts/${path} type=generic`, `write ${path}/foo bar=baz`]); + await listPage.visitRoot({ backend: path }); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.secrets.backend.list-root', + 'navigates to the list page' + ); + assert.strictEqual(listPage.secrets.length, 1, 'lists one secret in the backend'); + + await listPage.create(); + await editPage.createSecret(kvPath, 'foo', 'bar'); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.secrets.backend.show', + 'redirects to the show page' + ); + assert.ok(showPage.editIsPresent, 'shows the edit button'); + }); + + test('upgrading generic to version 2 lists all existing secrets, and CRUD continues to work', async function (assert) { + const path = `generic-${this.uid}`; + const kvPath = `generic-kv-${this.uid}`; + await cli.runCommands([ + `write sys/mounts/${path} type=generic`, + `write ${path}/foo bar=baz`, + // upgrade to version 2 generic mount + `write sys/mounts/${path}/tune options=version=2`, + ]); + await listPage.visitRoot({ backend: path }); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.secrets.backend.list-root', + 'navigates to the list page' + ); + assert.strictEqual(listPage.secrets.length, 1, 'lists the old secret in the backend'); + + await listPage.create(); + await editPage.createSecret(kvPath, 'foo', 'bar'); + await listPage.visitRoot({ backend: path }); + assert.strictEqual(listPage.secrets.length, 2, 'lists two secrets in the backend'); + }); +}); diff --git a/ui/tests/acceptance/secrets/backend/kubernetes/configuration-test.js b/ui/tests/acceptance/secrets/backend/kubernetes/configuration-test.js new file mode 100644 index 0000000..781b97e --- /dev/null +++ b/ui/tests/acceptance/secrets/backend/kubernetes/configuration-test.js @@ -0,0 +1,82 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import kubernetesScenario from 'vault/mirage/scenarios/kubernetes'; +import ENV from 'vault/config/environment'; +import authPage from 'vault/tests/pages/auth'; +import { visit, click, currentRouteName } from '@ember/test-helpers'; +import { Response } from 'miragejs'; + +module('Acceptance | kubernetes | configuration', function (hooks) { + setupApplicationTest(hooks); + setupMirage(hooks); + + hooks.before(function () { + ENV['ember-cli-mirage'].handler = 'kubernetes'; + }); + + hooks.beforeEach(function () { + kubernetesScenario(this.server); + this.visitConfiguration = () => { + return visit('/vault/secrets/kubernetes/kubernetes/configuration'); + }; + this.validateRoute = (assert, route, message) => { + assert.strictEqual(currentRouteName(), `vault.cluster.secrets.backend.kubernetes.${route}`, message); + }; + return authPage.login(); + }); + + hooks.after(function () { + ENV['ember-cli-mirage'].handler = null; + }); + + test('it should transition to configure page on Edit Configuration click from toolbar', async function (assert) { + assert.expect(1); + await this.visitConfiguration(); + await click('[data-test-toolbar-config-action]'); + this.validateRoute(assert, 'configure', 'Transitions to Configure route on click'); + }); + + test('it should transition to the configuration page on Save click in Configure', async function (assert) { + assert.expect(1); + await this.visitConfiguration(); + await click('[data-test-toolbar-config-action]'); + await click('[data-test-config-save]'); + await click('[data-test-config-confirm]'); + this.validateRoute(assert, 'configuration', 'Transitions to Configuration route on click'); + }); + + test('it should transition to the configuration page on Cancel click in Configure', async function (assert) { + assert.expect(1); + await this.visitConfiguration(); + await click('[data-test-toolbar-config-action]'); + await click('[data-test-config-cancel]'); + this.validateRoute(assert, 'configuration', 'Transitions to Configuration route on click'); + }); + + test('it should transition to error route on config fetch error other than 404', async function (assert) { + this.server.get('/kubernetes/config', () => new Response(403)); + await this.visitConfiguration(); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.secrets.backend.kubernetes.error', + 'Transitions to error route on config fetch error' + ); + }); + + test('it should not transition to error route on config fetch 404', async function (assert) { + this.server.get('/kubernetes/config', () => new Response(404)); + await this.visitConfiguration(); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.secrets.backend.kubernetes.configuration', + 'Transitions to configuration route on fetch 404' + ); + assert.dom('[data-test-empty-state-title]').hasText('Kubernetes not configured', 'Config cta renders'); + }); +}); diff --git a/ui/tests/acceptance/secrets/backend/kubernetes/credentials-test.js b/ui/tests/acceptance/secrets/backend/kubernetes/credentials-test.js new file mode 100644 index 0000000..05e9e11 --- /dev/null +++ b/ui/tests/acceptance/secrets/backend/kubernetes/credentials-test.js @@ -0,0 +1,82 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import kubernetesScenario from 'vault/mirage/scenarios/kubernetes'; +import ENV from 'vault/config/environment'; +import authPage from 'vault/tests/pages/auth'; +import { fillIn, visit, click, currentRouteName } from '@ember/test-helpers'; + +module('Acceptance | kubernetes | credentials', function (hooks) { + setupApplicationTest(hooks); + setupMirage(hooks); + + hooks.before(function () { + ENV['ember-cli-mirage'].handler = 'kubernetes'; + }); + hooks.beforeEach(function () { + kubernetesScenario(this.server); + this.visitRoleCredentials = () => { + return visit('/vault/secrets/kubernetes/kubernetes/roles/role-0/credentials'); + }; + this.validateRoute = (assert, route, message) => { + assert.strictEqual(currentRouteName(), `vault.cluster.secrets.backend.kubernetes.${route}`, message); + }; + return authPage.login(); + }); + hooks.after(function () { + ENV['ember-cli-mirage'].handler = null; + }); + + test('it should have correct breadcrumb links in credentials view', async function (assert) { + assert.expect(3); + await this.visitRoleCredentials(); + await click('[data-test-breadcrumbs] li:nth-child(3) a'); + this.validateRoute(assert, 'roles.role.details', 'Transitions to role details route on breadcrumb click'); + await this.visitRoleCredentials(); + await click('[data-test-breadcrumbs] li:nth-child(2) a'); + this.validateRoute(assert, 'roles.index', 'Transitions to roles route on breadcrumb click'); + await this.visitRoleCredentials(); + await click('[data-test-breadcrumbs] li:nth-child(1) a'); + this.validateRoute(assert, 'overview', 'Transitions to overview route on breadcrumb click'); + }); + + test('it should transition to role details view on Back click', async function (assert) { + assert.expect(1); + await this.visitRoleCredentials(); + await click('[data-test-generate-credentials-back]'); + + await this.validateRoute(assert, 'roles.role.details', 'Transitions to role details on Back click'); + }); + + test('it should transition to role details view on Done click', async function (assert) { + assert.expect(1); + await this.visitRoleCredentials(); + this.server.post('/kubernetes-test/creds/role-0', () => { + assert.ok('POST request made to generate credentials'); + return { + request_id: '58fefc6c-5195-c17a-94f2-8f889f3df57c', + lease_id: 'kubernetes/creds/default-role/aWczfcfJ7NKUdiirJrPXIs38', + renewable: false, + lease_duration: 3600, + data: { + service_account_name: 'default', + service_account_namespace: 'default', + service_account_token: 'eyJhbGciOiJSUzI1NiIsImtpZCI6Imlr', + }, + }; + }); + await fillIn('[data-test-kubernetes-namespace]', 'kubernetes-test'); + await click('[data-test-toggle-input]'); + await click('[data-test-toggle-input="Time-to-Live (TTL)"]'); + await fillIn('[data-test-ttl-value="Time-to-Live (TTL)"]', 2); + await click('[data-test-generate-credentials-button]'); + await click('[data-test-generate-credentials-done]'); + + await this.validateRoute(assert, 'roles.role.details', 'Transitions to role details on Done click'); + }); +}); diff --git a/ui/tests/acceptance/secrets/backend/kubernetes/overview-test.js b/ui/tests/acceptance/secrets/backend/kubernetes/overview-test.js new file mode 100644 index 0000000..42e18ed --- /dev/null +++ b/ui/tests/acceptance/secrets/backend/kubernetes/overview-test.js @@ -0,0 +1,70 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import kubernetesScenario from 'vault/mirage/scenarios/kubernetes'; +import ENV from 'vault/config/environment'; +import authPage from 'vault/tests/pages/auth'; +import { visit, click, currentRouteName } from '@ember/test-helpers'; +import { selectChoose } from 'ember-power-select/test-support'; +import { SELECTORS } from 'vault/tests/helpers/kubernetes/overview'; + +module('Acceptance | kubernetes | overview', function (hooks) { + setupApplicationTest(hooks); + setupMirage(hooks); + + hooks.before(function () { + ENV['ember-cli-mirage'].handler = 'kubernetes'; + }); + hooks.beforeEach(function () { + this.createScenario = (shouldConfigureRoles = true) => + shouldConfigureRoles ? kubernetesScenario(this.server) : kubernetesScenario(this.server, false); + + this.visitOverview = () => { + return visit('/vault/secrets/kubernetes/kubernetes/overview'); + }; + this.validateRoute = (assert, route, message) => { + assert.strictEqual(currentRouteName(), `vault.cluster.secrets.backend.kubernetes.${route}`, message); + }; + return authPage.login(); + }); + hooks.after(function () { + ENV['ember-cli-mirage'].handler = null; + }); + + test('it should transition to configuration page during empty state', async function (assert) { + assert.expect(1); + await this.visitOverview(); + await click('[data-test-component="empty-state"] a'); + this.validateRoute(assert, 'configure', 'Transitions to Configure route on click'); + }); + + test('it should transition to view roles', async function (assert) { + assert.expect(1); + this.createScenario(); + await this.visitOverview(); + await click(SELECTORS.rolesCardLink); + this.validateRoute(assert, 'roles.index', 'Transitions to roles route on View Roles click'); + }); + + test('it should transition to create roles', async function (assert) { + assert.expect(1); + this.createScenario(false); + await this.visitOverview(); + await click(SELECTORS.rolesCardLink); + this.validateRoute(assert, 'roles.create', 'Transitions to roles route on Create Roles click'); + }); + + test('it should transition to generate credentials', async function (assert) { + assert.expect(1); + await this.createScenario(); + await this.visitOverview(); + await selectChoose('.search-select', 'role-0'); + await click('[data-test-generate-credential-button]'); + this.validateRoute(assert, 'roles.role.credentials', 'Transitions to roles route on Generate click'); + }); +}); diff --git a/ui/tests/acceptance/secrets/backend/kubernetes/roles-test.js b/ui/tests/acceptance/secrets/backend/kubernetes/roles-test.js new file mode 100644 index 0000000..efcba9f --- /dev/null +++ b/ui/tests/acceptance/secrets/backend/kubernetes/roles-test.js @@ -0,0 +1,129 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import kubernetesScenario from 'vault/mirage/scenarios/kubernetes'; +import ENV from 'vault/config/environment'; +import authPage from 'vault/tests/pages/auth'; +import { fillIn, visit, currentURL, click, currentRouteName } from '@ember/test-helpers'; + +module('Acceptance | kubernetes | roles', function (hooks) { + setupApplicationTest(hooks); + setupMirage(hooks); + + hooks.before(function () { + ENV['ember-cli-mirage'].handler = 'kubernetes'; + }); + hooks.beforeEach(function () { + kubernetesScenario(this.server); + this.visitRoles = () => { + return visit('/vault/secrets/kubernetes/kubernetes/roles'); + }; + this.validateRoute = (assert, route, message) => { + assert.strictEqual(currentRouteName(), `vault.cluster.secrets.backend.kubernetes.${route}`, message); + }; + return authPage.login(); + }); + hooks.after(function () { + ENV['ember-cli-mirage'].handler = null; + }); + + test('it should filter roles', async function (assert) { + await this.visitRoles(); + assert.dom('[data-test-list-item-link]').exists({ count: 3 }, 'Roles list renders'); + await fillIn('[data-test-component="navigate-input"]', '1'); + assert.dom('[data-test-list-item-link]').exists({ count: 1 }, 'Filtered roles list renders'); + assert.ok(currentURL().includes('pageFilter=1'), 'pageFilter query param value is set'); + }); + + test('it should link to role details on list item click', async function (assert) { + assert.expect(1); + await this.visitRoles(); + await click('[data-test-list-item-link]'); + this.validateRoute(assert, 'roles.role.details', 'Transitions to details route on list item click'); + }); + + test('it should have correct breadcrumb links in role details view', async function (assert) { + assert.expect(2); + await this.visitRoles(); + await click('[data-test-list-item-link]'); + await click('[data-test-breadcrumbs] li:nth-child(2) a'); + this.validateRoute(assert, 'roles.index', 'Transitions to roles route on breadcrumb click'); + await click('[data-test-list-item-link]'); + await click('[data-test-breadcrumbs] li:nth-child(1) a'); + this.validateRoute(assert, 'overview', 'Transitions to overview route on breadcrumb click'); + }); + + test('it should have functional list item menu', async function (assert) { + assert.expect(3); + await this.visitRoles(); + for (const action of ['details', 'edit', 'delete']) { + await click('[data-test-list-item-popup] button'); + await click(`[data-test-${action}]`); + if (action === 'delete') { + await click('[data-test-confirm-button]'); + assert.dom('[data-test-list-item-link]').exists({ count: 2 }, 'Deleted role removed from list'); + } else { + this.validateRoute( + assert, + `roles.role.${action}`, + `Transitions to ${action} route on menu action click` + ); + const selector = + action === 'details' ? '[data-test-breadcrumbs] li:nth-child(2) a' : '[data-test-cancel]'; + await click(selector); + } + } + }); + + test('it should create role', async function (assert) { + assert.expect(2); + await this.visitRoles(); + await click('[data-test-toolbar-roles-action]'); + await click('[data-test-radio-card="basic"]'); + await fillIn('[data-test-input="name"]', 'new-test-role'); + await fillIn('[data-test-input="serviceAccountName"]', 'default'); + await fillIn('[data-test-input="allowedKubernetesNamespaces"]', '*'); + await click('[data-test-save]'); + this.validateRoute(assert, 'roles.role.details', 'Transitions to details route on save success'); + await click('[data-test-breadcrumbs] li:nth-child(2) a'); + assert.dom('[data-test-role="new-test-role"]').exists('New role renders in list'); + }); + + test('it should have functional toolbar actions in details view', async function (assert) { + assert.expect(3); + await this.visitRoles(); + await click('[data-test-list-item-link]'); + await click('[data-test-generate-credentials]'); + this.validateRoute(assert, 'roles.role.credentials', 'Transitions to credentials route'); + await click('[data-test-breadcrumbs] li:nth-child(3) a'); + await click('[data-test-edit]'); + this.validateRoute(assert, 'roles.role.edit', 'Transitions to edit route'); + await click('[data-test-cancel]'); + await click('[data-test-list-item-link]'); + await click('[data-test-delete] button'); + await click('[data-test-confirm-button]'); + assert + .dom('[data-test-list-item-link]') + .exists({ count: 2 }, 'Transitions to roles route and deleted role removed from list'); + }); + + test('it should generate credentials for role', async function (assert) { + assert.expect(1); + await this.visitRoles(); + await click('[data-test-list-item-link]'); + await click('[data-test-generate-credentials]'); + await fillIn('[data-test-kubernetes-namespace]', 'test-namespace'); + await click('[data-test-generate-credentials-button]'); + await click('[data-test-generate-credentials-done]'); + this.validateRoute( + assert, + 'roles.role.details', + 'Transitions to details route when done generating credentials' + ); + }); +}); diff --git a/ui/tests/acceptance/secrets/backend/kv/breadcrumbs-test.js b/ui/tests/acceptance/secrets/backend/kv/breadcrumbs-test.js new file mode 100644 index 0000000..d7c4814 --- /dev/null +++ b/ui/tests/acceptance/secrets/backend/kv/breadcrumbs-test.js @@ -0,0 +1,30 @@ +import { create } from 'ember-cli-page-object'; +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { click, currentURL, fillIn, find, visit, waitUntil } from '@ember/test-helpers'; +import authPage from 'vault/tests/pages/auth'; +import consoleClass from 'vault/tests/pages/components/console/ui-panel'; + +const consolePanel = create(consoleClass); + +module('Acceptance | kv | breadcrumbs', function (hooks) { + setupApplicationTest(hooks); + + test('it should route back to parent path from metadata tab', async function (assert) { + await authPage.login(); + await consolePanel.runCommands(['delete sys/mounts/kv', 'write sys/mounts/kv type=kv-v2']); + await visit('/vault/secrets/kv/list'); + await click('[data-test-secret-create]'); + await fillIn('[data-test-secret-path]', 'foo/bar'); + await click('[data-test-secret-save]'); + await waitUntil(() => find('[data-test-secret-metadata-tab]')); + await click('[data-test-secret-metadata-tab]'); + await click('[data-test-secret-breadcrumb="foo"]'); + assert.strictEqual( + currentURL(), + '/vault/secrets/kv/list/foo/', + 'Routes back to list view on breadcrumb click' + ); + await consolePanel.runCommands(['delete sys/mounts/kv']); + }); +}); diff --git a/ui/tests/acceptance/secrets/backend/kv/diff-test.js b/ui/tests/acceptance/secrets/backend/kv/diff-test.js new file mode 100644 index 0000000..d5ba2e3 --- /dev/null +++ b/ui/tests/acceptance/secrets/backend/kv/diff-test.js @@ -0,0 +1,73 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { click, settled, fillIn } from '@ember/test-helpers'; +import { create } from 'ember-cli-page-object'; +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import editPage from 'vault/tests/pages/secrets/backend/kv/edit-secret'; +import listPage from 'vault/tests/pages/secrets/backend/list'; +import apiStub from 'vault/tests/helpers/noop-all-api-requests'; +import authPage from 'vault/tests/pages/auth'; +import consoleClass from 'vault/tests/pages/components/console/ui-panel'; + +const consoleComponent = create(consoleClass); + +module('Acceptance | kv2 diff view', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(async function () { + this.server = apiStub({ usePassthrough: true }); + return authPage.login(); + }); + + hooks.afterEach(function () { + this.server.shutdown(); + }); + + test('it shows correct diff status based on versions', async function (assert) { + const secretPath = `my-secret`; + + await consoleComponent.runCommands([ + `write sys/mounts/secret type=kv options=version=2`, + // delete any kv previously written here so that tests can be re-run + `delete secret/metadata/${secretPath}`, + 'write -field=client_token auth/token/create policies=kv-v2-degrade', + ]); + + await listPage.visitRoot({ backend: 'secret' }); + await settled(); + await listPage.create(); + await settled(); + await editPage.createSecret(secretPath, 'version1', 'hello'); + await settled(); + await click('[data-test-popup-menu-trigger="version"]'); + + assert.dom('[data-test-view-diff]').doesNotExist('does not show diff view with only one version'); + // add another version + await click('[data-test-secret-edit="true"]'); + + const secondKey = document.querySelectorAll('[data-test-secret-key]')[1]; + const secondValue = document.querySelectorAll('.masked-value')[1]; + await fillIn(secondKey, 'version2'); + await fillIn(secondValue, 'world!'); + await click('[data-test-secret-save]'); + + await click('[data-test-popup-menu-trigger="version"]'); + + assert.dom('[data-test-view-diff]').exists('does show diff view with two versions'); + + await click('[data-test-view-diff]'); + + const diffBetweenVersion2and1 = document.querySelector('.jsondiffpatch-added').innerText; + assert.strictEqual(diffBetweenVersion2and1, 'version2"world!"', 'shows the correct added part'); + + await click('[data-test-popup-menu-trigger="right-version"]'); + + await click('[data-test-rightSide-version="2"]'); + + assert.dom('.diff-status').exists('shows States Match'); + }); +}); diff --git a/ui/tests/acceptance/secrets/backend/kv/secret-test.js b/ui/tests/acceptance/secrets/backend/kv/secret-test.js new file mode 100644 index 0000000..356b271 --- /dev/null +++ b/ui/tests/acceptance/secrets/backend/kv/secret-test.js @@ -0,0 +1,1084 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { + click, + visit, + settled, + currentURL, + currentRouteName, + fillIn, + triggerKeyEvent, + typeIn, +} from '@ember/test-helpers'; +import { create } from 'ember-cli-page-object'; +import { module, skip, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { v4 as uuidv4 } from 'uuid'; + +import editPage from 'vault/tests/pages/secrets/backend/kv/edit-secret'; +import showPage from 'vault/tests/pages/secrets/backend/kv/show'; +import listPage from 'vault/tests/pages/secrets/backend/list'; + +import mountSecrets from 'vault/tests/pages/settings/mount-secret-backend'; +import apiStub from 'vault/tests/helpers/noop-all-api-requests'; +import authPage from 'vault/tests/pages/auth'; +import logout from 'vault/tests/pages/logout'; +import consoleClass from 'vault/tests/pages/components/console/ui-panel'; +import enablePage from 'vault/tests/pages/settings/mount-secret-backend'; + +const consoleComponent = create(consoleClass); + +const writeSecret = async function (backend, path, key, val) { + await listPage.visitRoot({ backend }); + await listPage.create(); + return editPage.createSecret(path, key, val); +}; + +const deleteEngine = async function (enginePath, assert) { + await logout.visit(); + await authPage.login(); + await consoleComponent.runCommands([`delete sys/mounts/${enginePath}`]); + const response = consoleComponent.lastLogOutput; + assert.strictEqual( + response, + `Success! Data deleted (if it existed) at: sys/mounts/${enginePath}`, + 'Engine successfully deleted' + ); +}; + +const mountEngineGeneratePolicyToken = async (enginePath, secretPath, policy, version = 2) => { + await consoleComponent.runCommands([ + // delete any kv previously written here so that tests can be re-run + `delete ${enginePath}/metadata/${secretPath}`, + // delete any previous mount with same name + `delete sys/mounts/${enginePath}`, + // mount engine and generate policy + `write sys/mounts/${enginePath} type=kv options=version=${version}`, + `write sys/policies/acl/kv-v2-test-policy policy=${btoa(policy)}`, + 'write -field=client_token auth/token/create policies=kv-v2-test-policy', + ]); + return consoleComponent.lastLogOutput; +}; + +module('Acceptance | secrets/secret/create, read, delete', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(async function () { + this.uid = uuidv4(); + this.server = apiStub({ usePassthrough: true }); + await authPage.login(); + }); + + hooks.afterEach(async function () { + this.server.shutdown(); + await logout.visit(); + }); + + test('it creates a secret and redirects', async function (assert) { + assert.expect(5); + const secretPath = `kv-path-${this.uid}`; + const path = `kv-engine-${this.uid}`; + await enablePage.enable('kv', path); + await listPage.visitRoot({ backend: path }); + await settled(); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.secrets.backend.list-root', + 'navigates to the list page' + ); + await listPage.create(); + await settled(); + await editPage.toggleMetadata(); + await settled(); + assert.ok(editPage.hasMetadataFields, 'shows the metadata form'); + await editPage.createSecret(secretPath, 'foo', 'bar'); + await settled(); + + assert.strictEqual( + currentRouteName(), + 'vault.cluster.secrets.backend.show', + 'redirects to the show page' + ); + assert.ok(showPage.editIsPresent, 'shows the edit button'); + await deleteEngine(path, assert); + }); + + test('it can create a secret when check-and-set is required', async function (assert) { + assert.expect(3); + const enginePath = `kv-secret-${this.uid}`; + const secretPath = 'foo/bar'; + await mountSecrets.visit(); + await mountSecrets.enable('kv', enginePath); + await consoleComponent.runCommands(`write ${enginePath}/config cas_required=true`); + await writeSecret(enginePath, secretPath, 'foo', 'bar'); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.secrets.backend.show', + 'redirects to the show page' + ); + assert.ok(showPage.editIsPresent, 'shows the edit button'); + await deleteEngine(enginePath, assert); + }); + + test('it can create a secret with a non default max version and add metadata', async function (assert) { + assert.expect(4); + const enginePath = `kv-secret-${this.uid}`; + const secretPath = 'maxVersions'; + const maxVersions = 101; + await mountSecrets.visit(); + await mountSecrets.enable('kv', enginePath); + await settled(); + await editPage.startCreateSecret(); + await editPage.path(secretPath); + await editPage.toggleMetadata(); + await settled(); + await editPage.maxVersion(maxVersions); + await settled(); + await editPage.save(); + await settled(); + await editPage.metadataTab(); + await settled(); + const savedMaxVersions = Number( + document.querySelector('[data-test-value-div="Maximum versions"]').innerText + ); + assert.strictEqual( + maxVersions, + savedMaxVersions, + 'max_version displays the saved number set when creating the secret' + ); + // add metadata + await click('[data-test-add-custom-metadata]'); + await fillIn('[data-test-kv-key]', 'key'); + await fillIn('[data-test-kv-value]', 'value'); + await click('[data-test-save-metadata]'); + const key = document.querySelector('[data-test-row-label="key"]').innerText; + const value = document.querySelector('[data-test-row-value="key"]').innerText; + assert.strictEqual(key, 'key', 'metadata key displays after adding it.'); + assert.strictEqual(value, 'value', 'metadata value displays after adding it.'); + await deleteEngine(enginePath, assert); + }); + + skip('it can handle validation on custom metadata', async function (assert) { + assert.expect(3); + const enginePath = `kv-secret-${this.uid}`; + const secretPath = 'customMetadataValidations'; + + await mountSecrets.visit(); + await mountSecrets.enable('kv', enginePath); + await settled(); + await editPage.startCreateSecret(); + await editPage.path(secretPath); + await editPage.toggleMetadata(); + await settled(); + await typeIn('[data-test-kv-value]', 'invalid\\/'); + assert + .dom('[data-test-inline-error-message]') + .hasText('Custom values cannot contain a backward slash.', 'will not allow backward slash in value.'); + await fillIn('[data-test-kv-value]', ''); // clear previous contents + await typeIn('[data-test-kv-value]', 'removed!'); + assert.dom('[data-test-inline-error-message]').doesNotExist('inline error goes away'); + await click('[data-test-secret-save]'); + assert + .dom('[data-test-error]') + .includesText( + 'custom_metadata validation failed: length of key', + 'shows API error that is not captured by validation' + ); + await deleteEngine(enginePath, assert); + }); + + test('it can mount a KV 2 secret engine with config metadata', async function (assert) { + assert.expect(4); + const enginePath = `kv-secret-${this.uid}`; + const maxVersion = '101'; + await mountSecrets.visit(); + await click('[data-test-mount-type="kv"]'); + + await click('[data-test-mount-next]'); + + await fillIn('[data-test-input="path"]', enginePath); + await fillIn('[data-test-input="maxVersions"]', maxVersion); + await click('[data-test-input="casRequired"]'); + await click('[data-test-toggle-label="Automate secret deletion"]'); + await fillIn('[data-test-select="ttl-unit"]', 's'); + await fillIn('[data-test-ttl-value="Automate secret deletion"]', '1'); + await click('[data-test-mount-submit="true"]'); + + await click('[data-test-configuration-tab]'); + + const cas = document.querySelector('[data-test-value-div="Require Check and Set"]').innerText; + const deleteVersionAfter = document.querySelector( + '[data-test-value-div="Automate secret deletion"]' + ).innerText; + const savedMaxVersion = document.querySelector( + '[data-test-value-div="Maximum number of versions"]' + ).innerText; + + assert.strictEqual( + maxVersion, + savedMaxVersion, + 'displays the max version set when configuring the secret-engine' + ); + assert.strictEqual(cas.trim(), 'Yes', 'displays the cas set when configuring the secret-engine'); + assert.strictEqual( + deleteVersionAfter.trim(), + '1s', + 'displays the delete version after set when configuring the secret-engine' + ); + await deleteEngine(enginePath, assert); + }); + + test('it can create a secret and metadata can be created and edited', async function (assert) { + assert.expect(2); + const enginePath = `kv-secret-${this.uid}`; + const secretPath = 'metadata'; + const maxVersions = 101; + await mountSecrets.visit(); + await mountSecrets.enable('kv', enginePath); + await settled(); + await editPage.startCreateSecret(); + await editPage.path(secretPath); + await editPage.toggleMetadata(); + await settled(); + await fillIn('[data-test-input="maxVersions"]', maxVersions); + + await editPage.save(); + await settled(); + await editPage.metadataTab(); + await settled(); + const savedMaxVersions = Number(document.querySelectorAll('[data-test-value-div]')[0].innerText); + assert.strictEqual( + maxVersions, + savedMaxVersions, + 'max_version displays the saved number set when creating the secret' + ); + await deleteEngine(enginePath, assert); + }); + + test('it shows validation errors', async function (assert) { + assert.expect(5); + const enginePath = `kv-secret-${this.uid}`; + const secretPath = 'not-duplicate'; + await mountSecrets.visit(); + await mountSecrets.enable('kv', enginePath); + await settled(); + await editPage.startCreateSecret(); + await typeIn('[data-test-secret-path="true"]', 'beep'); + assert + .dom('[data-test-inline-error-message]') + .hasText( + 'A secret with this path already exists.', + 'when duplicate path it shows correct error message' + ); + + await editPage.toggleMetadata(); + await settled(); + await typeIn('[data-test-input="maxVersions"]', 'abc'); + assert + .dom('[data-test-input="maxVersions"]') + .hasClass('has-error-border', 'shows border error on input with error'); + assert.dom('[data-test-secret-save]').isNotDisabled('Save button is disabled'); + await fillIn('[data-test-input="maxVersions"]', 20); // fillIn replaces the text, whereas typeIn only adds to it. + await triggerKeyEvent('[data-test-input="maxVersions"]', 'keyup', 65); + await editPage.path(secretPath); + await triggerKeyEvent('[data-test-secret-path="true"]', 'keyup', 65); + await click('[data-test-secret-save]'); + assert.strictEqual( + currentURL(), + `/vault/secrets/${enginePath}/show/${secretPath}`, + 'navigates to show secret' + ); + await deleteEngine(enginePath, assert); + }); + + test('it navigates to version history and to a specific version', async function (assert) { + assert.expect(6); + const enginePath = `kv-secret-${this.uid}`; + const secretPath = `specific-version`; + await mountSecrets.visit(); + await mountSecrets.enable('kv', enginePath); + await settled(); + await listPage.visitRoot({ backend: enginePath }); + await settled(); + await listPage.create(); + await settled(); + await editPage.createSecret(secretPath, 'foo', 'bar'); + await click('[data-test-popup-menu-trigger="version"]'); + + assert.dom('[data-test-created-time]').includesText('Version created ', 'shows version created time'); + + await click('[data-test-version-history]'); + + assert + .dom('[data-test-list-item-content]') + .includesText('Version 1 Current', 'shows version one data on the version history as current'); + assert.dom('[data-test-list-item-content]').exists({ count: 1 }, 'renders a single version'); + + await click('.linked-block'); + await click('button.button.masked-input-toggle'); + assert.dom('[data-test-masked-input]').hasText('bar', 'renders secret on the secret version show page'); + assert.strictEqual( + currentURL(), + `/vault/secrets/${enginePath}/show/${secretPath}?version=1`, + 'redirects to the show page with queryParam version=1' + ); + await deleteEngine(enginePath, assert); + }); + + test('version 1 performs the correct capabilities lookup and does not show metadata tab', async function (assert) { + assert.expect(4); + const enginePath = `kv-secret-${this.uid}`; + const secretPath = 'foo/bar'; + // mount version 1 engine + await mountSecrets.visit(); + await mountSecrets.selectType('kv'); + await mountSecrets.next().path(enginePath).toggleOptions().version(1).submit(); + await listPage.create(); + await editPage.createSecret(secretPath, 'foo', 'bar'); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.secrets.backend.show', + 'redirects to the show page' + ); + assert.ok(showPage.editIsPresent, 'shows the edit button'); + // check for metadata tab should not exist on KV version 1 + assert.dom('[data-test-secret-metadata-tab]').doesNotExist('does not show metadata tab'); + await deleteEngine(enginePath, assert); + }); + + // https://github.com/hashicorp/vault/issues/5960 + test('version 1: nested paths creation maintains ability to navigate the tree', async function (assert) { + assert.expect(6); + const enginePath = `kv-secret-${this.uid}`; + const secretPath = '1/2/3/4'; + // mount version 1 engine + await mountSecrets.visit(); + await mountSecrets.selectType('kv'); + await mountSecrets.next().path(enginePath).toggleOptions().version(1).submit(); + await listPage.create(); + await editPage.createSecret(secretPath, 'foo', 'bar'); + + // setup an ancestor for when we delete + await listPage.visitRoot({ backend: enginePath }); + await listPage.secrets.filterBy('text', '1/')[0].click(); + await listPage.create(); + await editPage.createSecret('1/2', 'foo', 'bar'); + + // lol we have to do this because ember-cli-page-object doesn't like *'s in visitable + await listPage.visitRoot({ backend: enginePath }); + await listPage.secrets.filterBy('text', '1/')[0].click(); + await listPage.secrets.filterBy('text', '2/')[0].click(); + await listPage.secrets.filterBy('text', '3/')[0].click(); + await listPage.create(); + + await editPage.createSecret(secretPath + 'a', 'foo', 'bar'); + await listPage.visitRoot({ backend: enginePath }); + await listPage.secrets.filterBy('text', '1/')[0].click(); + await listPage.secrets.filterBy('text', '2/')[0].click(); + const secretLink = listPage.secrets.filterBy('text', '3/')[0]; + assert.ok(secretLink, 'link to the 3/ branch displays properly'); + + await listPage.secrets.filterBy('text', '3/')[0].click(); + await listPage.secrets.objectAt(0).menuToggle(); + await settled(); + await listPage.delete(); + await listPage.confirmDelete(); + await settled(); + assert.strictEqual(currentRouteName(), 'vault.cluster.secrets.backend.list'); + assert.strictEqual(currentURL(), `/vault/secrets/${enginePath}/list/1/2/3/`, 'remains on the page'); + + await listPage.secrets.objectAt(0).menuToggle(); + await listPage.delete(); + await listPage.confirmDelete(); + await settled(); + assert.strictEqual(currentRouteName(), 'vault.cluster.secrets.backend.list'); + assert.strictEqual( + currentURL(), + `/vault/secrets/${enginePath}/list/1/`, + 'navigates to the ancestor created earlier' + ); + await deleteEngine(enginePath, assert); + }); + + test('first level secrets redirect properly upon deletion', async function (assert) { + assert.expect(2); + const enginePath = `kv-secret-${this.uid}`; + const secretPath = 'test'; + // mount version 1 engine + await mountSecrets.visit(); + await mountSecrets.selectType('kv'); + await mountSecrets.next().path(enginePath).toggleOptions().version(1).submit(); + await listPage.create(); + await editPage.createSecret(secretPath, 'foo', 'bar'); + await showPage.deleteSecretV1(); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.secrets.backend.list-root', + 'redirected to the list page on delete' + ); + await deleteEngine(enginePath, assert); + }); + + // https://github.com/hashicorp/vault/issues/5994 + test('version 1: key named keys', async function (assert) { + assert.expect(2); + await consoleComponent.runCommands([ + 'vault write sys/mounts/test type=kv', + 'refresh', + 'vault write test/a keys=a keys=b', + ]); + await showPage.visit({ backend: 'test', id: 'a' }); + assert.ok(showPage.editIsPresent, 'renders the page properly'); + await deleteEngine('test', assert); + }); + + test('it redirects to the path ending in / for list pages', async function (assert) { + assert.expect(3); + const secretPath = `foo/bar/kv-list-${this.uid}`; + await consoleComponent.runCommands(['vault write sys/mounts/secret type=kv']); + await listPage.visitRoot({ backend: 'secret' }); + await listPage.create(); + await editPage.createSecret(secretPath, 'foo', 'bar'); + await settled(); + // use visit helper here because ids with / in them get encoded + await visit('/vault/secrets/secret/list/foo/bar'); + assert.strictEqual(currentRouteName(), 'vault.cluster.secrets.backend.list'); + assert.ok(currentURL().endsWith('/'), 'redirects to the path ending in a slash'); + await deleteEngine('secret', assert); + }); + + test('it can edit via the JSON input', async function (assert) { + assert.expect(4); + const content = JSON.stringify({ foo: 'fa', bar: 'boo' }); + const secretPath = `kv-json-${this.uid}`; + await consoleComponent.runCommands(['vault write sys/mounts/secret type=kv']); + await listPage.visitRoot({ backend: 'secret' }); + await listPage.create(); + await editPage.path(secretPath).toggleJSON(); + const instance = document.querySelector('.CodeMirror').CodeMirror; + instance.setValue(content); + await editPage.save(); + + assert.strictEqual( + currentRouteName(), + 'vault.cluster.secrets.backend.show', + 'redirects to the show page' + ); + assert.ok(showPage.editIsPresent, 'shows the edit button'); + const savedInstance = document.querySelector('.CodeMirror').CodeMirror; + assert.strictEqual( + savedInstance.options.value, + JSON.stringify({ bar: 'boo', foo: 'fa' }, null, 2), + 'saves the content' + ); + await deleteEngine('secret', assert); + }); + + test('paths are properly encoded', async function (assert) { + const backend = `kv-encoding-${this.uid}`; + const paths = [ + '(', + ')', + '"', + //"'", + '!', + '#', + '$', + '&', + '*', + '+', + '@', + '{', + '|', + '}', + '~', + '[', + '\\', + ']', + '^', + '_', + ].map((char) => `${char}some`); + assert.expect(paths.length * 2 + 1); + const secretPath = '2'; + const commands = paths.map((path) => `write '${backend}/${path}/${secretPath}' 3=4`); + await consoleComponent.runCommands([`write sys/mounts/${backend} type=kv`, ...commands]); + for (const path of paths) { + await listPage.visit({ backend, id: path }); + assert.ok(listPage.secrets.filterBy('text', '2')[0], `${path}: secret is displayed properly`); + await listPage.secrets.filterBy('text', '2')[0].click(); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.secrets.backend.show', + `${path}: show page renders correctly` + ); + } + await deleteEngine(backend, assert); + }); + + test('create secret with space shows version data and shows space warning', async function (assert) { + assert.expect(4); + const enginePath = `kv-engine-${this.uid}`; + const secretPath = 'space space'; + // mount version 2 + await mountSecrets.visit(); + await mountSecrets.selectType('kv'); + await mountSecrets.next().path(enginePath).submit(); + await settled(); + await listPage.create(); + await editPage.createSecretDontSave(secretPath, 'foo', 'bar'); + // to trigger warning need to hit keyup on the secret path + await triggerKeyEvent('[data-test-secret-path="true"]', 'keyup', 65); + + assert.dom('[data-test-whitespace-warning]').exists('renders warning about their being a space'); + await settled(); + await click('[data-test-secret-save]'); + + await click('[data-test-popup-menu-trigger="version"]'); + + await click('[data-test-version-history]'); + + assert.dom('[data-test-list-item-content]').exists('renders the version and not an error state'); + // click on version + await click('[data-test-popup-menu-trigger="true"]'); + await click('[data-test-version]'); + + // perform encode function that should be done by the encodePath + const encodedSecretPath = secretPath.replace(/ /g, '%20'); + assert.strictEqual(currentURL(), `/vault/secrets/${enginePath}/show/${encodedSecretPath}?version=1`); + await deleteEngine(enginePath, assert); + }); + + test('UI handles secret with % in path correctly', async function (assert) { + assert.expect(7); + const enginePath = `kv-engine-${this.uid}`; + const secretPath = 'per%cent/%fu ll'; + const [firstPath, secondPath] = secretPath.split('/'); + const commands = [`write sys/mounts/${enginePath} type=kv`, `write '${enginePath}/${secretPath}' 3=4`]; + await consoleComponent.runCommands(commands); + await listPage.visitRoot({ backend: enginePath }); + assert.dom(`[data-test-secret-link="${firstPath}/"]`).exists('First section item exists'); + await click(`[data-test-secret-link="${firstPath}/"]`); + + assert.strictEqual( + currentURL(), + `/vault/secrets/${enginePath}/list/${encodeURIComponent(firstPath)}/`, + 'First part of path is encoded in URL' + ); + assert.dom(`[data-test-secret-link="${secretPath}"]`).exists('Link to secret exists'); + await click(`[data-test-secret-link="${secretPath}"]`); + assert.strictEqual( + currentURL(), + `/vault/secrets/${enginePath}/show/${encodeURIComponent(firstPath)}/${encodeURIComponent(secondPath)}`, + 'secret path is encoded in URL' + ); + assert.dom('h1').hasText(secretPath, 'Path renders correctly on show page'); + await click(`[data-test-secret-breadcrumb="${firstPath}"]`); + assert.strictEqual( + currentURL(), + `/vault/secrets/${enginePath}/list/${encodeURIComponent(firstPath)}/`, + 'Breadcrumb link encodes correctly' + ); + await deleteEngine(enginePath, assert); + }); + + // the web cli does not handle a quote as part of a path, so we test it here via the UI + test('creating a secret with a single or double quote works properly', async function (assert) { + assert.expect(5); + const backend = `kv-quotes-${this.uid}`; + await consoleComponent.runCommands(`write sys/mounts/${backend} type=kv`); + const paths = ["'some", '"some']; + for (const path of paths) { + await listPage.visitRoot({ backend }); + await listPage.create(); + await editPage.createSecret(`${path}/2`, 'foo', 'bar'); + await listPage.visit({ backend, id: path }); + assert.ok(listPage.secrets.filterBy('text', '2')[0], `${path}: secret is displayed properly`); + await listPage.secrets.filterBy('text', '2')[0].click(); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.secrets.backend.show', + `${path}: show page renders correctly` + ); + } + await deleteEngine(backend, assert); + }); + + test('filter clears on nav', async function (assert) { + assert.expect(5); + const backend = 'test'; + await consoleComponent.runCommands([ + 'vault write sys/mounts/test type=kv', + 'refresh', + 'vault write test/filter/foo keys=a keys=b', + 'vault write test/filter/foo1 keys=a keys=b', + 'vault write test/filter/foo2 keys=a keys=b', + ]); + await listPage.visit({ backend, id: 'filter' }); + assert.strictEqual(listPage.secrets.length, 3, 'renders three secrets'); + await listPage.filterInput('filter/foo1'); + assert.strictEqual(listPage.secrets.length, 1, 'renders only one secret'); + await listPage.secrets.objectAt(0).click(); + await showPage.breadcrumbs.filterBy('text', 'filter')[0].click(); + assert.strictEqual(listPage.secrets.length, 3, 'renders three secrets'); + assert.strictEqual(listPage.filterInputValue, 'filter/', 'pageFilter has been reset'); + await deleteEngine(backend, assert); + }); + + // All policy tests below this line + test('version 2 with restricted policy still allows creation and does not show metadata tab', async function (assert) { + assert.expect(4); + const enginePath = 'dont-show-metadata-tab'; + const secretPath = 'dont-show-metadata-tab-secret-path'; + const V2_POLICY = ` + path "${enginePath}/metadata/*" { + capabilities = ["list"] + } + path "${enginePath}/data/${secretPath}" { + capabilities = ["create", "read", "update"] + } + `; + const userToken = await mountEngineGeneratePolicyToken(enginePath, secretPath, V2_POLICY); + await logout.visit(); + await authPage.login(userToken); + + await writeSecret(enginePath, secretPath, 'foo', 'bar'); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.secrets.backend.show', + 'redirects to the show page' + ); + assert.ok(showPage.editIsPresent, 'shows the edit button'); + //check for metadata tab which should not show because you don't have read capabilities + assert.dom('[data-test-secret-metadata-tab]').doesNotExist('does not show metadata tab'); + await deleteEngine(enginePath, assert); + }); + + test('version 2 with no access to data but access to metadata shows metadata tab', async function (assert) { + assert.expect(5); + const enginePath = 'kv-metadata-access-only'; + const secretPath = 'nested/kv-metadata-access-only-secret-name'; + const V2_POLICY = ` + path "${enginePath}/metadata/nested/*" { + capabilities = ["read", "update"] + } + `; + + const userToken = await mountEngineGeneratePolicyToken(enginePath, secretPath, V2_POLICY); + await writeSecret(enginePath, secretPath, 'foo', 'bar'); + await logout.visit(); + await authPage.login(userToken); + await settled(); + await visit(`/vault/secrets/${enginePath}/show/${secretPath}`); + assert.dom('[data-test-empty-state-title]').hasText('You do not have permission to read this secret.'); + assert.dom('[data-test-secret-metadata-tab]').exists('Metadata tab exists'); + await editPage.metadataTab(); + await settled(); + assert.dom('[data-test-empty-state-title]').hasText('No custom metadata'); + assert.dom('[data-test-add-custom-metadata]').exists('it shows link to edit metadata'); + + await deleteEngine(enginePath, assert); + }); + + test('version 2: with metadata no read or list but with delete access and full access to the data endpoint', async function (assert) { + assert.expect(12); + const enginePath = 'no-metadata-read'; + const secretPath = 'no-metadata-read-secret-name'; + const V2_POLICY_NO_LIST = ` + path "${enginePath}/metadata/*" { + capabilities = ["update","delete"] + } + path "${enginePath}/data/*" { + capabilities = ["create", "read", "update", "delete"] + } + `; + const userToken = await mountEngineGeneratePolicyToken(enginePath, secretPath, V2_POLICY_NO_LIST); + await listPage.visitRoot({ backend: enginePath }); + // confirm they see an empty state and not the get-credentials card + assert.dom('[data-test-empty-state-title]').hasText('No secrets in this backend'); + await settled(); + await listPage.create(); + await settled(); + await editPage.createSecretWithMetadata(secretPath, 'secret-key', 'secret-value', 101); + await settled(); + await logout.visit(); + await settled(); + await authPage.login(userToken); + await settled(); + // test if metadata tab there with no read access message and no ability to edit. + await click(`[data-test-auth-backend-link=${enginePath}]`); + assert + .dom('[data-test-get-credentials]') + .exists( + 'They do not have list access so when logged in under the restricted policy they see the get-credentials-card' + ); + + await visit(`/vault/secrets/${enginePath}/show/${secretPath}`); + + assert + .dom('[data-test-value-div="secret-key"]') + .exists('secret view page and info table row with secret-key value'); + + // Create new version + assert.dom('[data-test-secret-edit]').doesNotHaveClass('disabled', 'Create new version is not disabled'); + await click('[data-test-secret-edit]'); + + // create new version should not include version in the URL + assert.strictEqual( + currentURL(), + `/vault/secrets/${enginePath}/edit/${secretPath}`, + 'edit route does not include version query param' + ); + // Update key + await editPage.secretKey('newKey'); + await editPage.secretValue('some-value'); + await editPage.save(); + assert.dom('[data-test-value-div="newKey"]').exists('Info row table exists at newKey'); + + // check metadata tab + await click('[data-test-secret-metadata-tab]'); + + assert + .dom('[data-test-empty-state-message]') + .hasText( + 'In order to edit secret metadata access, the UI requires read permissions; otherwise, data may be deleted. Edits can still be made via the API and CLI.' + ); + // destroy the version + await click('[data-test-secret-tab]'); + + await click('[data-test-delete-open-modal]'); + + assert.dom('.modal.is-active').exists('Modal appears'); + assert.dom('[data-test-delete-modal="destroy-all-versions"]').exists(); // we have a if Ember.testing catch in the delete action because it breaks things in testing + // we can however destroy the versions + await click('#destroy-all-versions'); + + await click('[data-test-modal-delete]'); + + assert.strictEqual(currentURL(), `/vault/secrets/${enginePath}/list`, 'brings you back to the list page'); + await visit(`/vault/secrets/${enginePath}/show/${secretPath}`); + + assert.dom('[data-test-secret-not-found]').exists('secret no longer found'); + await deleteEngine(enginePath, assert); + }); + + // KV delete operations testing + test('version 2 with policy with destroy capabilities shows modal', async function (assert) { + assert.expect(5); + const enginePath = 'kv-v2-destroy-capabilities'; + const secretPath = 'kv-v2-destroy-capabilities-secret-path'; + const V2_POLICY = ` + path "${enginePath}/destroy/*" { + capabilities = ["update"] + } + path "${enginePath}/metadata/*" { + capabilities = ["list", "update", "delete"] + } + path "${enginePath}/data/${secretPath}" { + capabilities = ["create", "read", "update"] + } + `; + const userToken = await mountEngineGeneratePolicyToken(enginePath, secretPath, V2_POLICY); + await logout.visit(); + await authPage.login(userToken); + + await writeSecret(enginePath, secretPath, 'foo', 'bar'); + await click('[data-test-delete-open-modal]'); + + assert.dom('[data-test-delete-modal="destroy-version"]').exists('destroy this version option shows'); + assert.dom('[data-test-delete-modal="destroy-all-versions"]').exists('destroy all versions option shows'); + assert.dom('[data-test-delete-modal="delete-version"]').doesNotExist('delete version does not show'); + + // because destroy requires a page refresh (making the test suite run in a loop) this action is caught in ember testing and does not refresh. + // therefore to show new state change after modal closes we jump to the metadata tab and then back. + await click('#destroy-version'); + await settled(); // eslint-disable-line + await click('[data-test-modal-delete]'); + await settled(); // eslint-disable-line + await click('[data-test-secret-metadata-tab]'); + await settled(); // eslint-disable-line + await click('[data-test-secret-tab]'); + await settled(); // eslint-disable-line + assert + .dom('[data-test-empty-state-title]') + .includesText('Version 1 of this secret has been permanently destroyed'); + await deleteEngine(enginePath, assert); + }); + + test('version 2 with policy with only delete option does not show modal and undelete is an option', async function (assert) { + assert.expect(5); + const enginePath = 'kv-v2-only-delete'; + const secretPath = 'kv-v2-only-delete-secret-path'; + const V2_POLICY = ` + path "${enginePath}/delete/*" { + capabilities = ["update"] + } + path "${enginePath}/undelete/*" { + capabilities = ["update"] + } + path "${enginePath}/metadata/*" { + capabilities = ["list","read","create","update"] + } + path "${enginePath}/data/${secretPath}" { + capabilities = ["create", "read"] + } + `; + const userToken = await mountEngineGeneratePolicyToken(enginePath, secretPath, V2_POLICY); + await logout.visit(); + await authPage.login(userToken); + await writeSecret(enginePath, secretPath, 'foo', 'bar'); + assert.dom('[data-test-delete-open-modal]').doesNotExist('delete version does not show'); + assert.dom('[data-test-secret-v2-delete="true"]').exists('drop down delete shows'); + await showPage.deleteSecretV2(); + // unable to reload page in test scenario so going to list and back to secret to confirm deletion + const url = `/vault/secrets/${enginePath}/list`; + await visit(url); + + await click(`[data-test-secret-link="${secretPath}"]`); + await settled(); // eslint-disable-line + assert.dom('[data-test-component="empty-state"]').exists('secret has been deleted'); + assert.dom('[data-test-secret-undelete]').exists('undelete button shows'); + await deleteEngine(enginePath, assert); + }); + + test('version 2: policy includes "delete" capability for secret path but does not have "update" to /delete endpoint', async function (assert) { + assert.expect(4); + const enginePath = 'kv-v2-soft-delete-only'; + const secretPath = 'kv-v2-delete-capability-not-path'; + const policy = ` + path "${enginePath}/data/${secretPath}" { capabilities = ["create","read","update","delete","list"] } + path "${enginePath}/metadata/*" { capabilities = ["create","update","delete","list","read"] } + path "${enginePath}/undelete/*" { capabilities = ["update"] } + `; + const userToken = await mountEngineGeneratePolicyToken(enginePath, secretPath, policy); + await logout.visit(); + await authPage.login(userToken); + await writeSecret(enginePath, secretPath, 'foo', 'bar'); + // create multiple versions + await click('[data-test-secret-edit]'); + await editPage.editSecret('foo2', 'bar2'); + await click('[data-test-secret-edit]'); + await editPage.editSecret('foo3', 'bar3'); + // delete oldest version + await click('[data-test-popup-menu-trigger="version"]'); + await click('[data-test-version-dropdown-link="1"]'); + await click('[data-test-delete-open-modal]'); + assert + .dom('[data-test-type-select="delete-version"]') + .hasText('Delete latest version', 'modal reads that it will delete latest version'); + await click('input#delete-version'); + await click('[data-test-modal-delete]'); + await visit(`/vault/secrets/${enginePath}/show/${secretPath}?version=3`); + assert + .dom('[data-test-empty-state-title]') + .hasText( + 'Version 3 of this secret has been deleted', + 'empty state renders latest version has been deleted' + ); + await visit(`/vault/secrets/${enginePath}/show/${secretPath}?version=1`); + assert.dom('[data-test-delete-open-modal]').hasText('Delete', 'version 1 has not been deleted'); + await deleteEngine(enginePath, assert); + }); + + test('version 2: policy has "update" to /delete endpoint but not "delete" capability for secret path', async function (assert) { + assert.expect(5); + const enginePath = 'kv-v2-can-delete-version'; + const secretPath = 'kv-v2-delete-path-not-capability'; + const policy = ` + path "${enginePath}/data/${secretPath}" { capabilities = ["create","read","update","list"] } + path "${enginePath}/metadata/*" { capabilities = ["create","update","delete","list","read"] } + path "${enginePath}/undelete/*" { capabilities = ["update"] } + path "${enginePath}/delete/*" { capabilities = ["update"] } + `; + const userToken = await mountEngineGeneratePolicyToken(enginePath, secretPath, policy); + await logout.visit(); + await authPage.login(userToken); + await writeSecret(enginePath, secretPath, 'foo', 'bar'); + // create multiple versions + await click('[data-test-secret-edit]'); + await editPage.editSecret('foo2', 'bar2'); + await click('[data-test-secret-edit]'); + await editPage.editSecret('foo3', 'bar3'); + // delete oldest version + await click('[data-test-popup-menu-trigger="version"]'); + await click('[data-test-version-dropdown-link="1"]'); + await click('[data-test-delete-open-modal]'); + assert + .dom('[data-test-type-select="delete-version"]') + .hasText('Delete this version', 'delete option refers to "this" version'); + assert + .dom('[data-test-delete-modal="delete-version"]') + .hasTextContaining('Version 1', 'modal reads that it will delete version 1'); + await click('input#delete-version'); + await click('[data-test-modal-delete]'); + await visit(`/vault/secrets/${enginePath}/show/${secretPath}?version=3`); + assert.dom('[data-test-delete-open-modal]').hasText('Delete', 'latest version (3) has not been deleted'); + await visit(`/vault/secrets/${enginePath}/show/${secretPath}?version=1`); + assert + .dom('[data-test-empty-state-title]') + .hasText( + 'Version 1 of this secret has been deleted', + 'empty state renders oldest version (1) has been deleted' + ); + await deleteEngine(enginePath, assert); + }); + + test('version 2 with path forward slash will show delete button', async function (assert) { + assert.expect(2); + const enginePath = 'kv-v2-forward-slash'; + const secretPath = 'forward/slash'; + const V2_POLICY = ` + path "${enginePath}/delete/${secretPath}" { + capabilities = ["update"] + } + path "${enginePath}/metadata/*" { + capabilities = ["list","read","create","update"] + } + path "${enginePath}/data/${secretPath}" { + capabilities = ["create", "read"] + } + `; + const userToken = await mountEngineGeneratePolicyToken(enginePath, secretPath, V2_POLICY); + await logout.visit(); + await authPage.login(userToken); + await writeSecret(enginePath, secretPath, 'foo', 'bar'); + assert.dom('[data-test-secret-v2-delete="true"]').exists('drop down delete shows'); + await deleteEngine(enginePath, assert); + }); + + test('version 2 with engine with forward slash will show delete button', async function (assert) { + assert.expect(2); + const enginePath = 'forward/slash'; + const secretPath = 'secret-name'; + const V2_POLICY = ` + path "${enginePath}/delete/${secretPath}" { + capabilities = ["update"] + } + path "${enginePath}/metadata/*" { + capabilities = ["list","read","create","update"] + } + path "${enginePath}/data/*" { + capabilities = ["create", "read"] + } + `; + const userToken = await mountEngineGeneratePolicyToken(enginePath, secretPath, V2_POLICY); + await logout.visit(); + await authPage.login(userToken); + await writeSecret(enginePath, secretPath, 'foo', 'bar'); + assert.dom('[data-test-secret-v2-delete="true"]').exists('drop down delete shows'); + await deleteEngine(enginePath, assert); + }); + + const setupNoRead = async function (backend, canReadMeta = false) { + const V2_WRITE_ONLY_POLICY = ` + path "${backend}/+/+" { + capabilities = ["create", "update", "list"] + } + path "${backend}/+" { + capabilities = ["list"] + } + `; + + const V2_WRITE_WITH_META_READ_POLICY = ` + path "${backend}/+/+" { + capabilities = ["create", "update", "list"] + } + path "${backend}/metadata/+" { + capabilities = ["read"] + } + path "${backend}/+" { + capabilities = ["list"] + } + `; + const V1_WRITE_ONLY_POLICY = ` + path "${backend}/+" { + capabilities = ["create", "update", "list"] + } + `; + + const version = backend === 'kv-v2' ? 2 : 1; + let policy; + if (backend === 'kv-v2' && canReadMeta) { + policy = V2_WRITE_WITH_META_READ_POLICY; + } else if (backend === 'kv-v2') { + policy = V2_WRITE_ONLY_POLICY; + } else if (backend === 'kv-v1') { + policy = V1_WRITE_ONLY_POLICY; + } + + return await mountEngineGeneratePolicyToken(backend, 'nonexistent-secret', policy, version); + }; + test('write without read: version 2', async function (assert) { + assert.expect(5); + const backend = 'kv-v2'; + const userToken = await setupNoRead(backend); + await writeSecret(backend, 'secret', 'foo', 'bar'); + await logout.visit(); + await authPage.login(userToken); + + await showPage.visit({ backend, id: 'secret' }); + assert.ok(showPage.noReadIsPresent, 'shows no read empty state'); + assert.ok(showPage.editIsPresent, 'shows the edit button'); + + await editPage.visitEdit({ backend, id: 'secret' }); + assert.notOk(editPage.hasMetadataFields, 'hides the metadata form'); + + await editPage.editSecret('bar', 'baz'); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.secrets.backend.show', + 'redirects to the show page' + ); + await deleteEngine(backend, assert); + }); + + test('write without read: version 2 with metadata read', async function (assert) { + assert.expect(5); + const backend = 'kv-v2'; + const userToken = await setupNoRead(backend, true); + await writeSecret(backend, 'secret', 'foo', 'bar'); + await logout.visit(); + await authPage.login(userToken); + + await showPage.visit({ backend, id: 'secret' }); + assert.ok(showPage.noReadIsPresent, 'shows no read empty state'); + assert.ok(showPage.editIsPresent, 'shows the edit button'); + + await editPage.visitEdit({ backend, id: 'secret' }); + assert + .dom('[data-test-warning-no-read-permissions]') + .exists('shows custom warning instead of default API warning about permissions'); + + await editPage.editSecret('bar', 'baz'); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.secrets.backend.show', + 'redirects to the show page' + ); + await deleteEngine(backend, assert); + }); + + test('write without read: version 1', async function (assert) { + assert.expect(4); + const backend = 'kv-v1'; + const userToken = await setupNoRead(backend); + await writeSecret(backend, 'secret', 'foo', 'bar'); + await logout.visit(); + await authPage.login(userToken); + + await showPage.visit({ backend, id: 'secret' }); + assert.ok(showPage.noReadIsPresent, 'shows no read empty state'); + assert.ok(showPage.editIsPresent, 'shows the edit button'); + + await editPage.visitEdit({ backend, id: 'secret' }); + await editPage.editSecret('bar', 'baz'); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.secrets.backend.show', + 'redirects to the show page' + ); + await deleteEngine(backend, assert); + }); +}); diff --git a/ui/tests/acceptance/secrets/backend/ssh/role-test.js b/ui/tests/acceptance/secrets/backend/ssh/role-test.js new file mode 100644 index 0000000..b2dbfaf --- /dev/null +++ b/ui/tests/acceptance/secrets/backend/ssh/role-test.js @@ -0,0 +1,112 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { currentRouteName, settled } from '@ember/test-helpers'; +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { v4 as uuidv4 } from 'uuid'; + +import editPage from 'vault/tests/pages/secrets/backend/ssh/edit-role'; +import showPage from 'vault/tests/pages/secrets/backend/ssh/show'; +import generatePage from 'vault/tests/pages/secrets/backend/ssh/generate-otp'; +import listPage from 'vault/tests/pages/secrets/backend/list'; +import enablePage from 'vault/tests/pages/settings/mount-secret-backend'; +import authPage from 'vault/tests/pages/auth'; + +module('Acceptance | secrets/ssh', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(function () { + this.uid = uuidv4(); + return authPage.login(); + }); + + const mountAndNav = async (uid) => { + const path = `ssh-${uid}`; + await enablePage.enable('ssh', path); + await settled(); + await editPage.visitRoot({ backend: path }); + await settled(); + return path; + }; + + test('it creates a role and redirects', async function (assert) { + assert.expect(5); + const path = await mountAndNav(this.uid); + await editPage.createOTPRole('role'); + await settled(); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.secrets.backend.show', + 'redirects to the show page' + ); + assert.ok(showPage.generateIsPresent, 'shows the generate button'); + + await showPage.visit({ backend: path, id: 'role' }); + await settled(); + await showPage.generate(); + await settled(); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.secrets.backend.credentials', + 'navs to the credentials page' + ); + + await listPage.visitRoot({ backend: path }); + await settled(); + assert.strictEqual(listPage.secrets.length, 1, 'shows role in the list'); + const secret = listPage.secrets.objectAt(0); + await secret.menuToggle(); + assert.ok(listPage.menuItems.length > 0, 'shows links in the menu'); + }); + + test('it deletes a role', async function (assert) { + assert.expect(2); + const path = await mountAndNav(this.uid); + await editPage.createOTPRole('role'); + await settled(); + await showPage.visit({ backend: path, id: 'role' }); + await settled(); + await showPage.deleteRole(); + await settled(); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.secrets.backend.list-root', + 'redirects to list page' + ); + assert.ok(listPage.backendIsEmpty, 'no roles listed'); + }); + + test('it generates an OTP', async function (assert) { + assert.expect(6); + const path = await mountAndNav(this.uid); + await editPage.createOTPRole('role'); + await settled(); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.secrets.backend.show', + 'redirects to the show page' + ); + assert.ok(showPage.generateIsPresent, 'shows the generate button'); + + await showPage.visit({ backend: path, id: 'role' }); + await settled(); + await showPage.generate(); + await settled(); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.secrets.backend.credentials', + 'navs to the credentials page' + ); + + await generatePage.generateOTP(); + await settled(); + assert.ok(generatePage.warningIsPresent, 'shows warning'); + await generatePage.back(); + await settled(); + assert.ok(generatePage.userIsPresent, 'clears generate, shows user input'); + assert.ok(generatePage.ipIsPresent, 'clears generate, shows ip input'); + }); +}); diff --git a/ui/tests/acceptance/settings-test.js b/ui/tests/acceptance/settings-test.js new file mode 100644 index 0000000..f4210aa --- /dev/null +++ b/ui/tests/acceptance/settings-test.js @@ -0,0 +1,58 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { currentURL, find, visit, settled } from '@ember/test-helpers'; +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { v4 as uuidv4 } from 'uuid'; + +import backendListPage from 'vault/tests/pages/secrets/backends'; +import mountSecrets from 'vault/tests/pages/settings/mount-secret-backend'; +import authPage from 'vault/tests/pages/auth'; +import logout from 'vault/tests/pages/logout'; + +module('Acceptance | settings', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(function () { + this.uid = uuidv4(); + return authPage.login(); + }); + + hooks.afterEach(function () { + return logout.visit(); + }); + + test('settings', async function (assert) { + const type = 'consul'; + const path = `settings-path-${this.uid}`; + + // mount unsupported backend + await visit('/vault/settings/mount-secret-backend'); + + assert.strictEqual(currentURL(), '/vault/settings/mount-secret-backend'); + + await mountSecrets.selectType(type); + await mountSecrets + .next() + .path(path) + .toggleOptions() + .enableDefaultTtl() + .defaultTTLUnit('s') + .defaultTTLVal(100) + .submit(); + await settled(); + assert.ok( + find('[data-test-flash-message]').textContent.trim(), + `Successfully mounted '${type}' at '${path}'!` + ); + await settled(); + assert.strictEqual(currentURL(), `/vault/secrets`, 'redirects to secrets page'); + const row = backendListPage.rows.filterBy('path', path + '/')[0]; + await row.menu(); + await backendListPage.configLink(); + assert.strictEqual(currentURL(), `/vault/secrets/${path}/configuration`, 'navigates to the config page'); + }); +}); diff --git a/ui/tests/acceptance/settings/auth/configure/index-test.js b/ui/tests/acceptance/settings/auth/configure/index-test.js new file mode 100644 index 0000000..ce04431 --- /dev/null +++ b/ui/tests/acceptance/settings/auth/configure/index-test.js @@ -0,0 +1,48 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { currentURL, currentRouteName } from '@ember/test-helpers'; +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { v4 as uuidv4 } from 'uuid'; + +import enablePage from 'vault/tests/pages/settings/auth/enable'; +import page from 'vault/tests/pages/settings/auth/configure/index'; +import authPage from 'vault/tests/pages/auth'; + +module('Acceptance | settings/auth/configure', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(function () { + this.uid = uuidv4(); + return authPage.login(); + }); + + test('it redirects to section options when there are no other sections', async function (assert) { + const path = `approle-config-${this.uid}`; + const type = 'approle'; + await enablePage.enable(type, path); + await page.visit({ path }); + assert.strictEqual(currentRouteName(), 'vault.cluster.settings.auth.configure.section'); + assert.strictEqual( + currentURL(), + `/vault/settings/auth/configure/${path}/options`, + 'loads the options route' + ); + }); + + test('it redirects to the first section', async function (assert) { + const path = `aws-redirect-${this.uid}`; + const type = 'aws'; + await enablePage.enable(type, path); + await page.visit({ path }); + assert.strictEqual(currentRouteName(), 'vault.cluster.settings.auth.configure.section'); + assert.strictEqual( + currentURL(), + `/vault/settings/auth/configure/${path}/client`, + 'loads the first section for the type of auth method' + ); + }); +}); diff --git a/ui/tests/acceptance/settings/auth/configure/section-test.js b/ui/tests/acceptance/settings/auth/configure/section-test.js new file mode 100644 index 0000000..24617ff --- /dev/null +++ b/ui/tests/acceptance/settings/auth/configure/section-test.js @@ -0,0 +1,74 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { create } from 'ember-cli-page-object'; +import { fillIn } from '@ember/test-helpers'; +import { v4 as uuidv4 } from 'uuid'; + +import enablePage from 'vault/tests/pages/settings/auth/enable'; +import page from 'vault/tests/pages/settings/auth/configure/section'; +import indexPage from 'vault/tests/pages/settings/auth/configure/index'; +import apiStub from 'vault/tests/helpers/noop-all-api-requests'; +import consolePanel from 'vault/tests/pages/components/console/ui-panel'; +import authPage from 'vault/tests/pages/auth'; + +const cli = create(consolePanel); + +module('Acceptance | settings/auth/configure/section', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(function () { + this.uid = uuidv4(); + this.server = apiStub({ usePassthrough: true }); + return authPage.login(); + }); + + hooks.afterEach(function () { + this.server.shutdown(); + }); + + test('it can save options', async function (assert) { + const path = `approle-save-${this.uid}`; + const type = 'approle'; + const section = 'options'; + await enablePage.enable(type, path); + await page.visit({ path, section }); + await page.fillInTextarea('description', 'This is AppRole!'); + assert + .dom('[data-test-input="config.tokenType"]') + .hasValue('default-service', 'as default the token type selected is default-service.'); + await fillIn('[data-test-input="config.tokenType"]', 'batch'); + await page.save(); + assert.strictEqual( + page.flash.latestMessage, + `The configuration was saved successfully.`, + 'success flash shows' + ); + const tuneRequest = this.server.passthroughRequests.filterBy( + 'url', + `/v1/sys/mounts/auth/${path}/tune` + )[0]; + const keys = Object.keys(JSON.parse(tuneRequest.requestBody)); + const token_type = JSON.parse(tuneRequest.requestBody).token_type; + assert.strictEqual(token_type, 'batch', 'passes new token type'); + assert.ok(keys.includes('default_lease_ttl'), 'passes default_lease_ttl on tune'); + assert.ok(keys.includes('max_lease_ttl'), 'passes max_lease_ttl on tune'); + assert.ok(keys.includes('description'), 'passes updated description on tune'); + }); + + for (const type of ['aws', 'azure', 'gcp', 'github', 'kubernetes']) { + test(`it shows tabs for auth method: ${type}`, async function (assert) { + const path = `${type}-showtab-${this.uid}`; + await cli.consoleInput(`write sys/auth/${path} type=${type}`); + await cli.enter(); + await indexPage.visit({ path }); + // aws has 4 tabs, the others will have 'Configuration' and 'Method Options' tabs + const numTabs = type === 'aws' ? 4 : 2; + assert.strictEqual(page.tabs.length, numTabs, 'shows correct number of tabs'); + }); + } +}); diff --git a/ui/tests/acceptance/settings/auth/enable-test.js b/ui/tests/acceptance/settings/auth/enable-test.js new file mode 100644 index 0000000..02ed844 --- /dev/null +++ b/ui/tests/acceptance/settings/auth/enable-test.js @@ -0,0 +1,45 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { currentRouteName, settled } from '@ember/test-helpers'; +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { v4 as uuidv4 } from 'uuid'; + +import page from 'vault/tests/pages/settings/auth/enable'; +import listPage from 'vault/tests/pages/access/methods'; +import authPage from 'vault/tests/pages/auth'; + +module('Acceptance | settings/auth/enable', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(function () { + this.uid = uuidv4(); + return authPage.login(); + }); + + test('it mounts and redirects', async function (assert) { + // always force the new mount to the top of the list + const path = `aaa-approle-${this.uid}`; + const type = 'approle'; + await page.visit(); + assert.strictEqual(currentRouteName(), 'vault.cluster.settings.auth.enable'); + await page.enable(type, path); + await settled(); + await assert.strictEqual( + page.flash.latestMessage, + `Successfully mounted the ${type} auth method at ${path}.`, + 'success flash shows' + ); + assert.strictEqual( + currentRouteName(), + 'vault.cluster.settings.auth.configure.section', + 'redirects to the auth config page' + ); + + await listPage.visit(); + assert.ok(listPage.findLinkById(path), 'mount is present in the list'); + }); +}); diff --git a/ui/tests/acceptance/settings/configure-secret-backends/configure-ssh-secret-test.js b/ui/tests/acceptance/settings/configure-secret-backends/configure-ssh-secret-test.js new file mode 100644 index 0000000..5b35bbd --- /dev/null +++ b/ui/tests/acceptance/settings/configure-secret-backends/configure-ssh-secret-test.js @@ -0,0 +1,48 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { click, settled } from '@ember/test-helpers'; +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { v4 as uuidv4 } from 'uuid'; + +import { visit } from '@ember/test-helpers'; +import authPage from 'vault/tests/pages/auth'; +import enablePage from 'vault/tests/pages/settings/mount-secret-backend'; +import { create } from 'ember-cli-page-object'; +import fm from 'vault/tests/pages/components/flash-message'; +const flashMessage = create(fm); +const SELECTORS = { + generateSigningKey: '[data-test-ssh-input="generate-signing-key-checkbox"]', + saveConfig: '[data-test-ssh-input="configure-submit"]', + publicKey: '[data-test-ssh-input="public-key"]', +}; +module('Acceptance | settings/configure/secrets/ssh', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(function () { + this.uid = uuidv4(); + return authPage.login(); + }); + + test('it configures ssh ca', async function (assert) { + const path = `ssh-configure-${this.uid}`; + await enablePage.enable('ssh', path); + await settled(); + visit(`/vault/settings/secrets/configure/${path}`); + await settled(); + assert.dom(SELECTORS.generateSigningKey).isChecked('generate_signing_key defaults to true'); + await click(SELECTORS.generateSigningKey); + await click(SELECTORS.saveConfig); + assert.strictEqual( + flashMessage.latestMessage, + 'missing public_key', + 'renders warning flash message for failed save' + ); + await click(SELECTORS.generateSigningKey); + await click(SELECTORS.saveConfig); + assert.dom(SELECTORS.publicKey).exists('renders public key after saving config'); + }); +}); diff --git a/ui/tests/acceptance/settings/mount-secret-backend-test.js b/ui/tests/acceptance/settings/mount-secret-backend-test.js new file mode 100644 index 0000000..12c3ef6 --- /dev/null +++ b/ui/tests/acceptance/settings/mount-secret-backend-test.js @@ -0,0 +1,199 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { currentRouteName, currentURL, settled } from '@ember/test-helpers'; +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { v4 as uuidv4 } from 'uuid'; + +import { create } from 'ember-cli-page-object'; +import page from 'vault/tests/pages/settings/mount-secret-backend'; +import configPage from 'vault/tests/pages/secrets/backend/configuration'; +import authPage from 'vault/tests/pages/auth'; +import consoleClass from 'vault/tests/pages/components/console/ui-panel'; +import logout from 'vault/tests/pages/logout'; +import mountSecrets from 'vault/tests/pages/settings/mount-secret-backend'; +import { allEngines } from 'vault/helpers/mountable-secret-engines'; + +const consoleComponent = create(consoleClass); + +module('Acceptance | settings/mount-secret-backend', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(function () { + this.uid = uuidv4(); + return authPage.login(); + }); + + test('it sets the ttl correctly when mounting', async function (assert) { + // always force the new mount to the top of the list + const path = `mount-kv-${this.uid}`; + const defaultTTLHours = 100; + const maxTTLHours = 300; + + await page.visit(); + + assert.strictEqual(currentRouteName(), 'vault.cluster.settings.mount-secret-backend'); + await page.selectType('kv'); + await page + .next() + .path(path) + .toggleOptions() + .enableDefaultTtl() + .defaultTTLUnit('h') + .defaultTTLVal(defaultTTLHours) + .enableMaxTtl() + .maxTTLUnit('h') + .maxTTLVal(maxTTLHours) + .submit(); + await configPage.visit({ backend: path }); + assert.strictEqual(configPage.defaultTTL, `${defaultTTLHours}h`, 'shows the proper TTL'); + assert.strictEqual(configPage.maxTTL, `${maxTTLHours}h`, 'shows the proper max TTL'); + }); + + test('it sets the ttl when enabled then disabled', async function (assert) { + // always force the new mount to the top of the list + const path = `mount-kv-${this.uid}`; + const maxTTLHours = 300; + + await page.visit(); + + assert.strictEqual(currentRouteName(), 'vault.cluster.settings.mount-secret-backend'); + await page.selectType('kv'); + await page + .next() + .path(path) + .toggleOptions() + .enableDefaultTtl() + .enableDefaultTtl() + .enableMaxTtl() + .maxTTLUnit('h') + .maxTTLVal(maxTTLHours) + .submit(); + await configPage.visit({ backend: path }); + assert.strictEqual(configPage.defaultTTL, '0s', 'shows the proper TTL'); + assert.strictEqual(configPage.maxTTL, `${maxTTLHours}h`, 'shows the proper max TTL'); + }); + + test('it sets the max ttl after pki chosen, resets after', async function (assert) { + await page.visit(); + assert.strictEqual(currentRouteName(), 'vault.cluster.settings.mount-secret-backend'); + await page.selectType('pki'); + await page.next(); + assert.dom('[data-test-input="maxLeaseTtl"]').exists(); + assert + .dom('[data-test-input="maxLeaseTtl"] [data-test-ttl-toggle]') + .isChecked('Toggle is checked by default'); + assert.dom('[data-test-input="maxLeaseTtl"] [data-test-ttl-value]').hasValue('3650'); + assert.dom('[data-test-input="maxLeaseTtl"] [data-test-select="ttl-unit"]').hasValue('d'); + + // Go back and choose a different type + await page.back(); + await page.selectType('database'); + await page.next(); + assert.dom('[data-test-input="maxLeaseTtl"]').exists('3650'); + assert + .dom('[data-test-input="maxLeaseTtl"] [data-test-ttl-toggle]') + .isNotChecked('Toggle is unchecked by default'); + await page.enableMaxTtl(); + assert.dom('[data-test-input="maxLeaseTtl"] [data-test-ttl-value]').hasValue(''); + assert.dom('[data-test-input="maxLeaseTtl"] [data-test-select="ttl-unit"]').hasValue('s'); + }); + + test('it throws error if setting duplicate path name', async function (assert) { + const path = `kv-duplicate`; + + await consoleComponent.runCommands([ + // delete any kv-duplicate previously written here so that tests can be re-run + `delete sys/mounts/${path}`, + ]); + + await page.visit(); + + assert.strictEqual(currentRouteName(), 'vault.cluster.settings.mount-secret-backend'); + await page.selectType('kv'); + await page.next().path(path).submit(); + await page.secretList(); + await settled(); + await page.enableEngine(); + await page.selectType('kv'); + await page.next().path(path).submit(); + assert.dom('[data-test-alert-banner="alert"]').containsText(`path is already in use at ${path}`); + assert.strictEqual(currentRouteName(), 'vault.cluster.settings.mount-secret-backend'); + + await page.secretList(); + await settled(); + assert + .dom(`[data-test-auth-backend-link=${path}]`) + .exists({ count: 1 }, 'renders only one instance of the engine'); + }); + + test('version 2 with no update to config endpoint still allows mount of secret engine', async function (assert) { + const enginePath = `kv-noUpdate-${this.uid}`; + const V2_POLICY = ` + path "${enginePath}/*" { + capabilities = ["list","create","read","sudo","delete"] + } + path "sys/mounts/*" + { + capabilities = ["create", "read", "update", "delete", "list", "sudo"] + } + + # List existing secrets engines. + path "sys/mounts" + { + capabilities = ["read"] + } + # Allow page to load after mount + path "sys/internal/ui/mounts/${enginePath}" { + capabilities = ["read"] + } + `; + await consoleComponent.runCommands([ + // delete any previous mount with same name + `delete sys/mounts/${enginePath}`, + `write sys/policies/acl/kv-v2-degrade policy=${btoa(V2_POLICY)}`, + 'write -field=client_token auth/token/create policies=kv-v2-degrade', + ]); + await settled(); + const userToken = consoleComponent.lastLogOutput; + await logout.visit(); + await authPage.login(userToken); + // create the engine + await mountSecrets.visit(); + await mountSecrets.selectType('kv'); + await mountSecrets.next().path(enginePath).setMaxVersion(101).submit(); + await settled(); + assert + .dom('[data-test-flash-message]') + .containsText( + `You do not have access to the config endpoint. The secret engine was mounted, but the configuration settings were not saved.` + ); + assert.strictEqual( + currentURL(), + `/vault/secrets/${enginePath}/list`, + 'After mounting, redirects to secrets list page' + ); + await configPage.visit({ backend: enginePath }); + await settled(); + assert.dom('[data-test-row-value="Maximum number of versions"]').hasText('Not set'); + }); + + test('it should transition to engine route on success if defined in mount config', async function (assert) { + await consoleComponent.runCommands([ + // delete any previous mount with same name + `delete sys/mounts/kubernetes`, + ]); + await mountSecrets.visit(); + await mountSecrets.selectType('kubernetes'); + await mountSecrets.next().path('kubernetes').submit(); + const { engineRoute } = allEngines().findBy('type', 'kubernetes'); + assert.strictEqual( + currentRouteName(), + `vault.cluster.secrets.backend.${engineRoute}`, + 'Transitions to engine route on mount success' + ); + }); +}); diff --git a/ui/tests/acceptance/sidebar-nav-test.js b/ui/tests/acceptance/sidebar-nav-test.js new file mode 100644 index 0000000..f68c995 --- /dev/null +++ b/ui/tests/acceptance/sidebar-nav-test.js @@ -0,0 +1,122 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { click, currentURL } from '@ember/test-helpers'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import authPage from 'vault/tests/pages/auth'; +import modifyPassthroughResponse from 'vault/mirage/helpers/modify-passthrough-response'; + +const link = (label) => `[data-test-sidebar-nav-link="${label}"]`; +const panel = (label) => `[data-test-sidebar-nav-panel="${label}"]`; + +module('Acceptance | sidebar navigation', function (hooks) { + setupApplicationTest(hooks); + setupMirage(hooks); + + hooks.beforeEach(function () { + // set storage_type to raft to test link + this.server.get('/sys/seal-status', (schema, req) => { + return modifyPassthroughResponse(req, { storage_type: 'raft' }); + }); + this.server.get('/sys/storage/raft/configuration', () => this.server.create('configuration', 'withRaft')); + + return authPage.login(); + }); + + test('it should link to correct routes at the cluster level', async function (assert) { + assert.expect(10); + + assert.dom(panel('Cluster')).exists('Cluster nav panel renders'); + + const subNavs = [ + { label: 'Access', route: 'access' }, + { label: 'Policies', route: 'policies/acl' }, + { label: 'Tools', route: 'tools/wrap' }, + ]; + + for (const subNav of subNavs) { + const { label, route } = subNav; + await click(link(label)); + assert.strictEqual(currentURL(), `/vault/${route}`, `${label} route renders`); + assert.dom(panel(label)).exists(`${label} nav panel renders`); + await click(link('Back to main navigation')); + } + + const links = [ + { label: 'Raft Storage', route: '/vault/storage/raft' }, + { label: 'Seal Vault', route: '/vault/settings/seal' }, + { label: 'Secrets engines', route: '/vault/secrets' }, + ]; + + for (const l of links) { + await click(link(l.label)); + assert.strictEqual(currentURL(), l.route, `${l.label} route renders`); + } + }); + + test('it should link to correct routes at the access level', async function (assert) { + assert.expect(7); + + await click(link('Access')); + assert.dom(panel('Access')).exists('Access nav panel renders'); + + const links = [ + { label: 'Multi-factor authentication', route: '/vault/access/mfa' }, + { label: 'OIDC provider', route: '/vault/access/oidc' }, + { label: 'Groups', route: '/vault/access/identity/groups' }, + { label: 'Entities', route: '/vault/access/identity/entities' }, + { label: 'Leases', route: '/vault/access/leases/list' }, + { label: 'Authentication methods', route: '/vault/access' }, + ]; + + for (const l of links) { + await click(link(l.label)); + assert.ok(currentURL().includes(l.route), `${l.label} route renders`); + } + }); + + test('it should link to correct routes at the policies level', async function (assert) { + assert.expect(2); + + await click(link('Policies')); + assert.dom(panel('Policies')).exists('Access nav panel renders'); + + await click(link('ACL Policies')); + assert.strictEqual(currentURL(), '/vault/policies/acl', 'ACL Policies route renders'); + }); + + test('it should link to correct routes at the tools level', async function (assert) { + assert.expect(7); + + await click(link('Tools')); + assert.dom(panel('Tools')).exists('Access nav panel renders'); + + const links = [ + { label: 'Wrap', route: '/vault/tools/wrap' }, + { label: 'Lookup', route: '/vault/tools/lookup' }, + { label: 'Unwrap', route: '/vault/tools/unwrap' }, + { label: 'Rewrap', route: '/vault/tools/rewrap' }, + { label: 'Random', route: '/vault/tools/random' }, + { label: 'Hash', route: '/vault/tools/hash' }, + ]; + + for (const l of links) { + await click(link(l.label)); + assert.strictEqual(currentURL(), l.route, `${l.label} route renders`); + } + }); + + test('it should display access nav when mounting and configuring auth methods', async function (assert) { + await click(link('Access')); + await click('[data-test-auth-enable]'); + assert.dom('[data-test-sidebar-nav-panel="Access"]').exists('Access nav panel renders'); + await click(link('Authentication methods')); + await click('[data-test-auth-backend-link="token"]'); + await click('[data-test-configure-link]'); + assert.dom('[data-test-sidebar-nav-panel="Access"]').exists('Access nav panel renders'); + }); +}); diff --git a/ui/tests/acceptance/ssh-test.js b/ui/tests/acceptance/ssh-test.js new file mode 100644 index 0000000..461349f --- /dev/null +++ b/ui/tests/acceptance/ssh-test.js @@ -0,0 +1,176 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { click, fillIn, findAll, currentURL, find, settled, waitUntil } from '@ember/test-helpers'; +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { v4 as uuidv4 } from 'uuid'; + +import authPage from 'vault/tests/pages/auth'; +import enablePage from 'vault/tests/pages/settings/mount-secret-backend'; + +module('Acceptance | ssh secret backend', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(function () { + this.uid = uuidv4(); + return authPage.login(); + }); + + const PUB_KEY = `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCn9p5dHNr4aU4R2W7ln+efzO5N2Cdv/SXk6zbCcvhWcblWMjkXf802B0PbKvf6cJIzM/Xalb3qz1cK+UUjCSEAQWefk6YmfzbOikfc5EHaSKUqDdE+HlsGPvl42rjCr28qYfuYh031YfwEQGEAIEypo7OyAj+38NLbHAQxDxuaReee1YCOV5rqWGtEgl2VtP5kG+QEBza4ZfeglS85f/GGTvZC4Jq1GX+wgmFxIPnd6/mUXa4ecoR0QMfOAzzvPm4ajcNCQORfHLQKAcmiBYMiyQJoU+fYpi9CJGT1jWTmR99yBkrSg6yitI2qqXyrpwAbhNGrM0Fw0WpWxh66N9Xp meirish@Macintosh-3.local`; + + const ROLES = [ + { + type: 'ca', + name: 'carole', + async fillInCreate() { + await click('[data-test-input="allowUserCertificates"]'); + }, + async fillInGenerate() { + await fillIn('[data-test-input="publicKey"]', PUB_KEY); + await click('[data-test-toggle-button]'); + + await click('[data-test-toggle-label="TTL"]'); + await fillIn('[data-test-select="ttl-unit"]', 'm'); + + document.querySelector('[data-test-ttl-value="TTL"]').value = 30; + }, + assertBeforeGenerate(assert) { + assert.dom('[data-test-form-field-from-model]').exists('renders the FormFieldFromModel'); + const value = document.querySelector('[data-test-ttl-value="TTL"]').value; + // confirms that the actions are correctly being passed down to the FormFieldFromModel component + assert.strictEqual(value, '30', 'renders action updateTtl'); + }, + assertAfterGenerate(assert, sshPath) { + assert.strictEqual( + currentURL(), + `/vault/secrets/${sshPath}/sign/${this.name}`, + 'ca sign url is correct' + ); + assert.dom('[data-test-row-label="Signed key"]').exists({ count: 1 }, 'renders the signed key'); + assert + .dom('[data-test-row-value="Signed key"]') + .exists({ count: 1 }, "renders the signed key's value"); + assert.dom('[data-test-row-label="Serial number"]').exists({ count: 1 }, 'renders the serial'); + assert.dom('[data-test-row-value="Serial number"]').exists({ count: 1 }, 'renders the serial value'); + }, + }, + { + type: 'otp', + name: 'otprole', + async fillInCreate() { + await fillIn('[data-test-input="defaultUser"]', 'admin'); + await click('[data-test-toggle-group="Options"]'); + await fillIn('[data-test-input="cidrList"]', '1.2.3.4/32'); + }, + async fillInGenerate() { + await fillIn('[data-test-input="username"]', 'admin'); + await fillIn('[data-test-input="ip"]', '1.2.3.4'); + }, + assertAfterGenerate(assert, sshPath) { + assert.strictEqual( + currentURL(), + `/vault/secrets/${sshPath}/credentials/${this.name}`, + 'otp credential url is correct' + ); + assert.dom('[data-test-row-label="Key"]').exists({ count: 1 }, 'renders the key'); + assert.dom('[data-test-masked-input]').exists({ count: 1 }, 'renders mask for key value'); + assert.dom('[data-test-row-label="Port"]').exists({ count: 1 }, 'renders the port'); + assert.dom('[data-test-row-value="Port"]').exists({ count: 1 }, "renders the port's value"); + }, + }, + ]; + test('ssh backend', async function (assert) { + assert.expect(28); + const sshPath = `ssh-${this.uid}`; + + await enablePage.enable('ssh', sshPath); + await settled(); + await click('[data-test-configuration-tab]'); + + await click('[data-test-secret-backend-configure]'); + + assert.strictEqual(currentURL(), `/vault/settings/secrets/configure/${sshPath}`); + assert.ok(findAll('[data-test-ssh-configure-form]').length, 'renders the empty configuration form'); + + // default has generate CA checked so we just submit the form + await click('[data-test-ssh-input="configure-submit"]'); + + assert.ok( + await waitUntil(() => findAll('[data-test-ssh-input="public-key"]').length), + 'a public key is fetched' + ); + await click('[data-test-backend-view-link]'); + + assert.strictEqual(currentURL(), `/vault/secrets/${sshPath}/list`, `redirects to ssh index`); + + for (const role of ROLES) { + // create a role + await click('[ data-test-secret-create]'); + + assert.ok( + find('[data-test-secret-header]').textContent.includes('SSH role'), + `${role.type}: renders the create page` + ); + + await fillIn('[data-test-input="name"]', role.name); + await fillIn('[data-test-input="keyType"]', role.type); + await role.fillInCreate(); + await settled(); + + // save the role + await click('[data-test-role-ssh-create]'); + await waitUntil(() => currentURL() === `/vault/secrets/${sshPath}/show/${role.name}`); // flaky without this + assert.strictEqual( + currentURL(), + `/vault/secrets/${sshPath}/show/${role.name}`, + `${role.type}: navigates to the show page on creation` + ); + + // sign a key with this role + await click('[data-test-backend-credentials]'); + + await role.fillInGenerate(); + if (role.type === 'ca') { + await settled(); + role.assertBeforeGenerate(assert); + } + + // generate creds + await click('[data-test-secret-generate]'); + await settled(); // eslint-disable-line + role.assertAfterGenerate(assert, sshPath); + + // click the "Back" button + await click('[data-test-secret-generate-back]'); + + assert.ok( + findAll('[data-test-secret-generate-form]').length, + `${role.type}: back takes you back to the form` + ); + + await click('[data-test-secret-generate-cancel]'); + + assert.strictEqual( + currentURL(), + `/vault/secrets/${sshPath}/list`, + `${role.type}: cancel takes you to ssh index` + ); + assert.ok( + findAll(`[data-test-secret-link="${role.name}"]`).length, + `${role.type}: role shows in the list` + ); + + //and delete + await click(`[data-test-secret-link="${role.name}"] [data-test-popup-menu-trigger]`); + await waitUntil(() => find('[data-test-ssh-role-delete]')); // flaky without + await click(`[data-test-ssh-role-delete]`); + await click(`[data-test-confirm-button]`); + assert + .dom(`[data-test-secret-link="${role.name}"]`) + .doesNotExist(`${role.type}: role is no longer in the list`); + } + }); +}); diff --git a/ui/tests/acceptance/tools-test.js b/ui/tests/acceptance/tools-test.js new file mode 100644 index 0000000..6ed46e3 --- /dev/null +++ b/ui/tests/acceptance/tools-test.js @@ -0,0 +1,174 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { click, fillIn, find, findAll, currentURL, visit, settled, waitUntil } from '@ember/test-helpers'; +import Pretender from 'pretender'; +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { toolsActions } from 'vault/helpers/tools-actions'; +import authPage from 'vault/tests/pages/auth'; +import logout from 'vault/tests/pages/logout'; +import { capitalize } from '@ember/string'; + +module('Acceptance | tools', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(function () { + return authPage.login(); + }); + + hooks.afterEach(function () { + return logout.visit(); + }); + + const DATA_TO_WRAP = JSON.stringify({ tools: 'tests' }); + const TOOLS_ACTIONS = toolsActions(); + + /* + data-test-tools-input="wrapping-token" + data-test-tools-input="rewrapped-token" + data-test-tools="token-lookup-row" + data-test-sidebar-nav-link=supportedAction + */ + + var createTokenStore = () => { + let token; + return { + set(val) { + token = val; + }, + get() { + return token; + }, + }; + }; + test('tools functionality', async function (assert) { + var tokenStore = createTokenStore(); + await visit('/vault/tools'); + + assert.strictEqual(currentURL(), '/vault/tools/wrap', 'forwards to the first action'); + TOOLS_ACTIONS.forEach((action) => { + assert.dom(`[data-test-sidebar-nav-link="${capitalize(action)}"]`).exists(`${action} link renders`); + }); + + const { CodeMirror } = await waitUntil(() => find('.CodeMirror')); + CodeMirror.setValue(DATA_TO_WRAP); + + // wrap + await click('[data-test-tools-submit]'); + const wrappedToken = await waitUntil(() => find('[data-test-tools-input="wrapping-token"]')); + tokenStore.set(wrappedToken.value); + assert + .dom('[data-test-tools-input="wrapping-token"]') + .hasValue(wrappedToken.value, 'has a wrapping token'); + + //lookup + await click('[data-test-sidebar-nav-link="Lookup"]'); + + await fillIn('[data-test-tools-input="wrapping-token"]', tokenStore.get()); + await click('[data-test-tools-submit]'); + await waitUntil(() => findAll('[data-test-tools="token-lookup-row"]').length >= 3); + const rows = findAll('[data-test-tools="token-lookup-row"]'); + assert.dom(rows[0]).hasText(/Creation path/, 'show creation path row'); + assert.dom(rows[1]).hasText(/Creation time/, 'show creation time row'); + assert.dom(rows[2]).hasText(/Creation TTL/, 'show creation ttl row'); + + //rewrap + await click('[data-test-sidebar-nav-link="Rewrap"]'); + + await fillIn('[data-test-tools-input="wrapping-token"]', tokenStore.get()); + await click('[data-test-tools-submit]'); + const rewrappedToken = await waitUntil(() => find('[data-test-tools-input="rewrapped-token"]')); + assert.ok(rewrappedToken.value, 'has a new re-wrapped token'); + assert.notEqual(rewrappedToken.value, tokenStore.get(), 're-wrapped token is not the wrapped token'); + tokenStore.set(rewrappedToken.value); + await settled(); + + //unwrap + await click('[data-test-sidebar-nav-link="Unwrap"]'); + + await fillIn('[data-test-tools-input="wrapping-token"]', tokenStore.get()); + await click('[data-test-tools-submit]'); + assert.deepEqual( + JSON.parse(CodeMirror.getValue()), + JSON.parse(DATA_TO_WRAP), + 'unwrapped data equals input data' + ); + const buttonDetails = await waitUntil(() => find('[data-test-button-details]')); + await click(buttonDetails); + await click('[data-test-button-data]'); + assert.dom('.CodeMirror').exists(); + + //random + await click('[data-test-sidebar-nav-link="Random"]'); + + assert.dom('[data-test-tools-input="bytes"]').hasValue('32', 'defaults to 32 bytes'); + await click('[data-test-tools-submit]'); + const randomBytes = await waitUntil(() => find('[data-test-tools-input="random-bytes"]')); + assert.ok(randomBytes.value, 'shows the returned value of random bytes'); + + //hash + await click('[data-test-sidebar-nav-link="Hash"]'); + + await fillIn('[data-test-tools-input="hash-input"]', 'foo'); + await click('[data-test-transit-b64-toggle="input"]'); + + await click('[data-test-tools-submit]'); + let sumInput = await waitUntil(() => find('[data-test-tools-input="sum"]')); + assert + .dom(sumInput) + .hasValue('LCa0a2j/xo/5m0U8HTBBNBNCLXBkg7+g+YpeiGJm564=', 'hashes the data, encodes input'); + await click('[data-test-tools-back]'); + + await fillIn('[data-test-tools-input="hash-input"]', 'e2RhdGE6ImZvbyJ9'); + + await click('[data-test-tools-submit]'); + sumInput = await waitUntil(() => find('[data-test-tools-input="sum"]')); + assert + .dom(sumInput) + .hasValue('JmSi2Hhbgu2WYOrcOyTqqMdym7KT3sohCwAwaMonVrc=', 'hashes the data, passes b64 input through'); + }); + + const AUTH_RESPONSE = { + request_id: '39802bc4-235c-2f0b-87f3-ccf38503ac3e', + lease_id: '', + renewable: false, + lease_duration: 0, + data: null, + wrap_info: null, + warnings: null, + auth: { + client_token: 'ecfc2758-588e-981d-50f4-a25883bbf03c', + accessor: '6299780b-f2b2-1a3f-7b83-9d3d67629249', + policies: ['root'], + metadata: null, + lease_duration: 0, + renewable: false, + entity_id: '', + }, + }; + + test('ensure unwrap with auth block works properly', async function (assert) { + this.server = new Pretender(function () { + this.post('/v1/sys/wrapping/unwrap', (response) => { + return [response, { 'Content-Type': 'application/json' }, JSON.stringify(AUTH_RESPONSE)]; + }); + }); + await visit('/vault/tools'); + + //unwrap + await click('[data-test-sidebar-nav-link="Unwrap"]'); + + await fillIn('[data-test-tools-input="wrapping-token"]', 'sometoken'); + await click('[data-test-tools-submit]'); + + assert.deepEqual( + JSON.parse(findAll('.CodeMirror')[0].CodeMirror.getValue()), + AUTH_RESPONSE.auth, + 'unwrapped data equals input data' + ); + this.server.shutdown(); + }); +}); diff --git a/ui/tests/acceptance/transit-test.js b/ui/tests/acceptance/transit-test.js new file mode 100644 index 0000000..98202f5 --- /dev/null +++ b/ui/tests/acceptance/transit-test.js @@ -0,0 +1,370 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { click, fillIn, find, currentURL, settled, visit, waitUntil, findAll } from '@ember/test-helpers'; +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { v4 as uuidv4 } from 'uuid'; + +import { encodeString } from 'vault/utils/b64'; +import authPage from 'vault/tests/pages/auth'; +import enablePage from 'vault/tests/pages/settings/mount-secret-backend'; +import secretListPage from 'vault/tests/pages/secrets/backend/list'; + +const keyTypes = [ + { + name: (ts) => `aes-${ts}`, + type: 'aes128-gcm96', + exportable: true, + supportsEncryption: true, + }, + { + name: (ts) => `aes-convergent-${ts}`, + type: 'aes128-gcm96', + convergent: true, + supportsEncryption: true, + }, + { + name: (ts) => `aes-${ts}`, + type: 'aes256-gcm96', + exportable: true, + supportsEncryption: true, + }, + { + name: (ts) => `aes-convergent-${ts}`, + type: 'aes256-gcm96', + convergent: true, + supportsEncryption: true, + }, + { + name: (ts) => `chacha-${ts}`, + type: 'chacha20-poly1305', + exportable: true, + supportsEncryption: true, + }, + { + name: (ts) => `chacha-convergent-${ts}`, + type: 'chacha20-poly1305', + convergent: true, + supportsEncryption: true, + autoRotate: true, + }, + { + name: (ts) => `ecdsa-${ts}`, + type: 'ecdsa-p256', + exportable: true, + supportsSigning: true, + }, + { + name: (ts) => `ecdsa-${ts}`, + type: 'ecdsa-p384', + exportable: true, + supportsSigning: true, + }, + { + name: (ts) => `ecdsa-${ts}`, + type: 'ecdsa-p521', + exportable: true, + supportsSigning: true, + }, + { + name: (ts) => `ed25519-${ts}`, + type: 'ed25519', + derived: true, + supportsSigning: true, + }, + { + name: (ts) => `rsa-2048-${ts}`, + type: `rsa-2048`, + supportsSigning: true, + supportsEncryption: true, + }, + { + name: (ts) => `rsa-3072-${ts}`, + type: `rsa-3072`, + supportsSigning: true, + supportsEncryption: true, + }, + { + name: (ts) => `rsa-4096-${ts}`, + type: `rsa-4096`, + supportsSigning: true, + supportsEncryption: true, + autoRotate: true, + }, +]; + +const generateTransitKey = async function (key, now) { + const name = key.name(now); + await click('[data-test-secret-create]'); + + await fillIn('[data-test-transit-key-name]', name); + await fillIn('[data-test-transit-key-type]', key.type); + if (key.exportable) { + await click('[data-test-transit-key-exportable]'); + } + if (key.derived) { + await click('[data-test-transit-key-derived]'); + } + if (key.convergent) { + await click('[data-test-transit-key-convergent-encryption]'); + } + if (key.autoRotate) { + await click('[data-test-toggle-label="Auto-rotation period"]'); + } + await click('[data-test-transit-key-create]'); + await settled(); // eslint-disable-line + // link back to the list + await click('[data-test-secret-root-link]'); + + return name; +}; + +const testConvergentEncryption = async function (assert, keyName) { + const tests = [ + // raw bytes for plaintext and context + { + plaintext: 'NaXud2QW7KjyK6Me9ggh+zmnCeBGdG93LQED49PtoOI=', + context: 'nqR8LiVgNh/lwO2rArJJE9F9DMhh0lKo4JX9DAAkCDw=', + encodePlaintext: false, + encodeContext: false, + assertAfterEncrypt: (key) => { + assert.dom('.modal.is-active').exists(`${key}: Modal opens after encrypt`); + assert.ok( + /vault:/.test(find('[data-test-encrypted-value="ciphertext"]').innerText), + `${key}: ciphertext shows a vault-prefixed ciphertext` + ); + }, + assertBeforeDecrypt: (key) => { + assert.dom('.modal.is-active').doesNotExist(`${key}: Modal not open before decrypt`); + assert + .dom('[data-test-transit-input="context"]') + .hasValue( + 'nqR8LiVgNh/lwO2rArJJE9F9DMhh0lKo4JX9DAAkCDw=', + `${key}: the ui shows the base64-encoded context` + ); + }, + + assertAfterDecrypt: (key) => { + assert.dom('.modal.is-active').exists(`${key}: Modal opens after decrypt`); + assert.strictEqual( + find('[data-test-encrypted-value="plaintext"]').innerText, + 'NaXud2QW7KjyK6Me9ggh+zmnCeBGdG93LQED49PtoOI=', + `${key}: the ui shows the base64-encoded plaintext` + ); + }, + }, + // raw bytes for plaintext, string for context + { + plaintext: 'NaXud2QW7KjyK6Me9ggh+zmnCeBGdG93LQED49PtoOI=', + context: encodeString('context'), + encodePlaintext: false, + encodeContext: false, + assertAfterEncrypt: (key) => { + assert.dom('.modal.is-active').exists(`${key}: Modal opens after encrypt`); + assert.ok( + /vault:/.test(find('[data-test-encrypted-value="ciphertext"]').innerText), + `${key}: ciphertext shows a vault-prefixed ciphertext` + ); + }, + assertBeforeDecrypt: (key) => { + assert.dom('.modal.is-active').doesNotExist(`${key}: Modal not open before decrypt`); + assert + .dom('[data-test-transit-input="context"]') + .hasValue(encodeString('context'), `${key}: the ui shows the input context`); + }, + assertAfterDecrypt: (key) => { + assert.dom('.modal.is-active').exists(`${key}: Modal opens after decrypt`); + assert.strictEqual( + find('[data-test-encrypted-value="plaintext"]').innerText, + 'NaXud2QW7KjyK6Me9ggh+zmnCeBGdG93LQED49PtoOI=', + `${key}: the ui shows the base64-encoded plaintext` + ); + }, + }, + // base64 input + { + plaintext: encodeString('This is the secret'), + context: encodeString('context'), + encodePlaintext: false, + encodeContext: false, + assertAfterEncrypt: (key) => { + assert.dom('.modal.is-active').exists(`${key}: Modal opens after encrypt`); + assert.ok( + /vault:/.test(find('[data-test-encrypted-value="ciphertext"]').innerText), + `${key}: ciphertext shows a vault-prefixed ciphertext` + ); + }, + assertBeforeDecrypt: (key) => { + assert.dom('.modal.is-active').doesNotExist(`${key}: Modal not open before decrypt`); + assert + .dom('[data-test-transit-input="context"]') + .hasValue(encodeString('context'), `${key}: the ui shows the input context`); + }, + assertAfterDecrypt: (key) => { + assert.dom('.modal.is-active').exists(`${key}: Modal opens after decrypt`); + assert.strictEqual( + find('[data-test-encrypted-value="plaintext"]').innerText, + encodeString('This is the secret'), + `${key}: the ui decodes plaintext` + ); + }, + }, + + // string input + { + plaintext: 'There are many secrets 🤐', + context: 'secret 2', + encodePlaintext: true, + encodeContext: true, + assertAfterEncrypt: (key) => { + assert.dom('.modal.is-active').exists(`${key}: Modal opens after encrypt`); + assert.ok( + /vault:/.test(find('[data-test-encrypted-value="ciphertext"]').innerText), + `${key}: ciphertext shows a vault-prefixed ciphertext` + ); + }, + assertBeforeDecrypt: (key) => { + assert.dom('.modal.is-active').doesNotExist(`${key}: Modal not open before decrypt`); + assert + .dom('[data-test-transit-input="context"]') + .hasValue(encodeString('secret 2'), `${key}: the ui shows the encoded context`); + }, + assertAfterDecrypt: (key) => { + assert.dom('.modal.is-active').exists(`${key}: Modal opens after decrypt`); + assert.strictEqual( + find('[data-test-encrypted-value="plaintext"]').innerText, + encodeString('There are many secrets 🤐'), + `${key}: the ui decodes plaintext` + ); + }, + }, + ]; + + for (const testCase of tests) { + await click('[data-test-transit-action-link="encrypt"]'); + + find('#plaintext-control .CodeMirror').CodeMirror.setValue(testCase.plaintext); + await fillIn('[data-test-transit-input="context"]', testCase.context); + + if (!testCase.encodePlaintext) { + // If value is already encoded, check the box + await click('input[data-test-transit-input="encodedBase64"]'); + } + if (testCase.encodeContext) { + await click('[data-test-transit-b64-toggle="context"]'); + } + assert.dom('.modal.is-active').doesNotExist(`${name}: is not open before encrypt`); + await click('[data-test-button-encrypt]'); + + if (testCase.assertAfterEncrypt) { + await settled(); + testCase.assertAfterEncrypt(keyName); + } + // store ciphertext for decryption step + const copiedCiphertext = find('[data-test-encrypted-value="ciphertext"]').innerText; + await click('.modal.is-active [data-test-modal-background]'); + + assert.dom('.modal.is-active').doesNotExist(`${name}: Modal closes after background clicked`); + await click('[data-test-transit-action-link="decrypt"]'); + + if (testCase.assertBeforeDecrypt) { + await settled(); + testCase.assertBeforeDecrypt(keyName); + } + find('#ciphertext-control .CodeMirror').CodeMirror.setValue(copiedCiphertext); + await click('[data-test-button-decrypt]'); + + if (testCase.assertAfterDecrypt) { + await settled(); + testCase.assertAfterDecrypt(keyName); + } + + await click('.modal.is-active [data-test-modal-background]'); + + assert.dom('.modal.is-active').doesNotExist(`${name}: Modal closes after background clicked`); + } +}; +module('Acceptance | transit', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(async function () { + const uid = uuidv4(); + await authPage.login(); + await settled(); + this.uid = uid; + this.path = `transit-${uid}`; + + await enablePage.enable('transit', `transit-${uid}`); + await settled(); + }); + + test(`transit backend: list menu`, async function (assert) { + await generateTransitKey(keyTypes[0], this.uid); + await secretListPage.secrets.objectAt(0).menuToggle(); + await settled(); + assert.strictEqual(secretListPage.menuItems.length, 2, 'shows 2 items in the menu'); + }); + for (const key of keyTypes) { + test(`transit backend: ${key.type}`, async function (assert) { + assert.expect(key.convergent ? 43 : 7); + const name = await generateTransitKey(key, this.uid); + await visit(`vault/secrets/${this.path}/show/${name}`); + + const expectedRotateValue = key.autoRotate ? '30 days' : 'Key will not be automatically rotated'; + assert + .dom('[data-test-row-value="Auto-rotation period"]') + .hasText(expectedRotateValue, 'Has expected auto rotate value'); + + await click('[data-test-transit-link="versions"]'); + // wait for capabilities + + assert.dom('[data-test-transit-key-version-row]').exists({ count: 1 }, `${name}: only one key version`); + await waitUntil(() => find('[data-test-confirm-action-trigger]')); + await click('[data-test-confirm-action-trigger]'); + + await click('[data-test-confirm-button]'); + // wait for rotate call + await waitUntil(() => findAll('[data-test-transit-key-version-row]').length >= 2); + assert + .dom('[data-test-transit-key-version-row]') + .exists({ count: 2 }, `${name}: two key versions after rotate`); + await click('[data-test-transit-key-actions-link]'); + + assert.ok( + currentURL().startsWith(`/vault/secrets/${this.path}/show/${name}?tab=actions`), + `${name}: navigates to transit actions` + ); + + const keyAction = key.supportsEncryption ? 'encrypt' : 'sign'; + const actionTitle = find(`[data-test-transit-action-title=${keyAction}]`).innerText.toLowerCase(); + + assert.true( + actionTitle.includes(keyAction), + `shows a card with title that links to the ${name} transit action` + ); + + await click(`[data-test-transit-card=${keyAction}]`); + + assert + .dom('[data-test-transit-key-version-select]') + .exists(`${name}: the rotated key allows you to select versions`); + if (key.exportable) { + assert + .dom('[data-test-transit-action-link="export"]') + .exists(`${name}: exportable key has a link to export action`); + } else { + assert + .dom('[data-test-transit-action-link="export"]') + .doesNotExist(`${name}: non-exportable key does not link to export action`); + } + if (key.convergent && key.supportsEncryption) { + await testConvergentEncryption(assert, name); + await settled(); + } + await settled(); + }); + } +}); diff --git a/ui/tests/acceptance/unseal-test.js b/ui/tests/acceptance/unseal-test.js new file mode 100644 index 0000000..8663224 --- /dev/null +++ b/ui/tests/acceptance/unseal-test.js @@ -0,0 +1,54 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { click, currentRouteName, currentURL, fillIn, settled, visit } from '@ember/test-helpers'; +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import VAULT_KEYS from 'vault/tests/helpers/vault-keys'; +import authPage from 'vault/tests/pages/auth'; +import logout from 'vault/tests/pages/logout'; +import { pollCluster } from 'vault/tests/helpers/poll-cluster'; + +const { unsealKeys } = VAULT_KEYS; + +module('Acceptance | unseal', function (hooks) { + setupApplicationTest(hooks); + + hooks.beforeEach(function () { + return authPage.login(); + }); + + hooks.afterEach(function () { + return logout.visit(); + }); + + test('seal then unseal', async function (assert) { + await visit('/vault/settings/seal'); + + assert.strictEqual(currentURL(), '/vault/settings/seal'); + + // seal + await click('[data-test-seal] button'); + + await click('[data-test-confirm-button]'); + + await pollCluster(this.owner); + await settled(); + assert.strictEqual(currentURL(), '/vault/unseal', 'vault is on the unseal page'); + + // unseal + for (const key of unsealKeys) { + await fillIn('[data-test-shamir-input]', key); + + await click('button[type="submit"]'); + + await pollCluster(this.owner); + await settled(); + } + + assert.dom('[data-test-cluster-status]').doesNotExist('ui does not show sealed warning'); + assert.strictEqual(currentRouteName(), 'vault.cluster.auth', 'vault is ready to authenticate'); + }); +}); diff --git a/ui/tests/acceptance/wrapped-token-test.js b/ui/tests/acceptance/wrapped-token-test.js new file mode 100644 index 0000000..fe6b7f1 --- /dev/null +++ b/ui/tests/acceptance/wrapped-token-test.js @@ -0,0 +1,60 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupApplicationTest } from 'ember-qunit'; +import { settled, currentURL, visit } from '@ember/test-helpers'; +import { create } from 'ember-cli-page-object'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import auth from 'vault/tests/pages/auth'; +import consoleClass from 'vault/tests/pages/components/console/ui-panel'; + +const consoleComponent = create(consoleClass); + +const wrappedAuth = async () => { + await consoleComponent.runCommands(`write -field=token auth/token/create policies=default -wrap-ttl=3m`); + await settled(); + return consoleComponent.lastLogOutput; +}; + +const setupWrapping = async () => { + await auth.logout(); + await settled(); + await auth.visit(); + await settled(); + await auth.tokenInput('root').submit(); + await settled(); + const token = await wrappedAuth(); + await auth.logout(); + await settled(); + return token; +}; +module('Acceptance | wrapped_token query param functionality', function (hooks) { + setupApplicationTest(hooks); + setupMirage(hooks); + + test('it authenticates you if the query param is present', async function (assert) { + const token = await setupWrapping(); + await auth.visit({ wrapped_token: token }); + await settled(); + assert.strictEqual(currentURL(), '/vault/secrets', 'authenticates and redirects to home'); + }); + + test('it authenticates when used with the with=token query param', async function (assert) { + const token = await setupWrapping(); + await auth.visit({ wrapped_token: token, with: 'token' }); + await settled(); + assert.strictEqual(currentURL(), '/vault/secrets', 'authenticates and redirects to home'); + }); + + test('it should authenticate when hitting logout url with wrapped_token when logged out', async function (assert) { + this.server.post('/sys/wrapping/unwrap', () => { + return { auth: { client_token: 'root' } }; + }); + + await visit(`/vault/logout?wrapped_token=1234`); + assert.strictEqual(currentURL(), '/vault/secrets', 'authenticates and redirects to home'); + }); +}); diff --git a/ui/tests/helpers/clients.js b/ui/tests/helpers/clients.js new file mode 100644 index 0000000..1f463ac --- /dev/null +++ b/ui/tests/helpers/clients.js @@ -0,0 +1,81 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { Response } from 'miragejs'; + +/** Scenarios + Config off, no data + Config on, no data + Config on, with data + Filtering (data with mounts) + Filtering (data without mounts) + Filtering (data without mounts) + * -- HISTORY ONLY -- + Filtering different date ranges (hist only) + Upgrade warning + No permissions for license + Version + queries available + queries unavailable + License start date this month +*/ +export const SELECTORS = { + dashboardActiveTab: '.active[data-test-dashboard]', + emptyStateTitle: '[data-test-empty-state-title]', + usageStats: '[data-test-usage-stats]', + dateDisplay: '[data-test-date-display]', + attributionBlock: '[data-test-clients-attribution]', + filterBar: '[data-test-clients-filter-bar]', + rangeDropdown: '[data-test-calendar-widget-trigger]', + monthDropdown: '[data-test-popup-menu-trigger="month"]', + yearDropdown: '[data-test-popup-menu-trigger="year"]', + dateDropdownSubmit: '[data-test-date-dropdown-submit]', + runningTotalMonthStats: '[data-test-running-total="single-month-stats"]', + runningTotalMonthlyCharts: '[data-test-running-total="monthly-charts"]', + monthlyUsageBlock: '[data-test-monthly-usage]', + selectedAuthMount: 'div#auth-method-search-select [data-test-selected-option] div', + selectedNs: 'div#namespace-search-select [data-test-selected-option] div', +}; + +export const CHART_ELEMENTS = { + entityClientDataBars: '[data-test-group="entity_clients"]', + nonEntityDataBars: '[data-test-group="non_entity_clients"]', + yLabels: '[data-test-group="y-labels"]', + actionBars: '[data-test-group="action-bars"]', + labelActionBars: '[data-test-group="label-action-bars"]', + totalValues: '[data-test-group="total-values"]', +}; + +export function sendResponse(data, httpStatus = 200) { + if (httpStatus === 403) { + return [ + httpStatus, + { 'Content-Type': 'application/json' }, + JSON.stringify({ errors: ['permission denied'] }), + ]; + } + if (httpStatus === 204) { + // /activity endpoint returns 204 when no data, while + // /activity/monthly returns 200 with zero values on data + return [httpStatus, { 'Content-Type': 'application/json' }]; + } + return [httpStatus, { 'Content-Type': 'application/json' }, JSON.stringify(data)]; +} + +export function overrideResponse(httpStatus, data) { + if (httpStatus === 403) { + return new Response( + 403, + { 'Content-Type': 'application/json' }, + JSON.stringify({ errors: ['permission denied'] }) + ); + } + // /activity endpoint returns 204 when no data, while + // /activity/monthly returns 200 with zero values on data + if (httpStatus === 204) { + return new Response(204, { 'Content-Type': 'application/json' }); + } + return new Response(200, { 'Content-Type': 'application/json' }, JSON.stringify(data)); +} diff --git a/ui/tests/helpers/codemirror.js b/ui/tests/helpers/codemirror.js new file mode 100644 index 0000000..0c01c77 --- /dev/null +++ b/ui/tests/helpers/codemirror.js @@ -0,0 +1,20 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +const invariant = (truthy, error) => { + if (!truthy) throw new Error(error); +}; + +export default function (context, selector) { + const cmService = context.owner.lookup('service:code-mirror'); + + const element = document.querySelector(selector); + invariant(element, `Selector ${selector} matched no elements`); + + const cm = cmService.instanceFor(element.id); + invariant(cm, `No registered CodeMirror instance for ${selector}`); + + return cm; +} diff --git a/ui/tests/helpers/components/sidebar-nav.js b/ui/tests/helpers/components/sidebar-nav.js new file mode 100644 index 0000000..874cd9a --- /dev/null +++ b/ui/tests/helpers/components/sidebar-nav.js @@ -0,0 +1,27 @@ +import { allFeatures } from 'vault/helpers/all-features'; +import sinon from 'sinon'; + +export const stubFeaturesAndPermissions = (owner, isEnterprise = false, setCluster = false) => { + const permissions = owner.lookup('service:permissions'); + const hasNavPermission = sinon.stub(permissions, 'hasNavPermission'); + hasNavPermission.returns(true); + sinon.stub(permissions, 'navPathParams'); + + const version = owner.lookup('service:version'); + const features = sinon.stub(version, 'features'); + features.value(allFeatures()); + sinon.stub(version, 'isEnterprise').value(isEnterprise); + + const auth = owner.lookup('service:auth'); + sinon.stub(auth, 'authData').value({}); + + if (setCluster) { + owner.lookup('service:currentCluster').setCluster({ + id: 'foo', + anyReplicationEnabled: true, + usingRaft: true, + }); + } + + return { hasNavPermission, features }; +}; diff --git a/ui/tests/helpers/components/ttl-picker.js b/ui/tests/helpers/components/ttl-picker.js new file mode 100644 index 0000000..a600022 --- /dev/null +++ b/ui/tests/helpers/components/ttl-picker.js @@ -0,0 +1,19 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +const selectors = { + ttlFormGroup: '[data-test-ttl-inputs]', + toggle: '[data-test-ttl-toggle]', + toggleByLabel: (label) => `[data-test-ttl-toggle="${label}"]`, + label: '[data-test-ttl-form-label]', + subtext: '[data-test-ttl-form-subtext]', + tooltipTrigger: `[data-test-tooltip-trigger]`, + ttlValue: '[data-test-ttl-value]', + ttlUnit: '[data-test-select="ttl-unit"]', + valueInputByLabel: (label) => `[data-test-ttl-value="${label}"]`, + unitInputByLabel: (label) => `[data-test-ttl-unit="${label}"] [data-test-select="ttl-unit"]`, +}; + +export default selectors; diff --git a/ui/tests/helpers/flash-message.js b/ui/tests/helpers/flash-message.js new file mode 100644 index 0000000..f98432d --- /dev/null +++ b/ui/tests/helpers/flash-message.js @@ -0,0 +1,8 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import FlashObject from 'ember-cli-flash/flash/object'; + +FlashObject.reopen({ init() {} }); diff --git a/ui/tests/helpers/index.js b/ui/tests/helpers/index.js new file mode 100644 index 0000000..a223051 --- /dev/null +++ b/ui/tests/helpers/index.js @@ -0,0 +1,47 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { + setupApplicationTest as upstreamSetupApplicationTest, + setupRenderingTest as upstreamSetupRenderingTest, + setupTest as upstreamSetupTest, +} from 'ember-qunit'; + +// This file exists to provide wrappers around ember-qunit's / ember-mocha's +// test setup functions. This way, you can easily extend the setup that is +// needed per test type. + +function setupApplicationTest(hooks, options) { + upstreamSetupApplicationTest(hooks, options); + + // Additional setup for application tests can be done here. + // + // For example, if you need an authenticated session for each + // application test, you could do: + // + // hooks.beforeEach(async function () { + // await authenticateSession(); // ember-simple-auth + // }); + // + // This is also a good place to call test setup functions coming + // from other addons: + // + // setupIntl(hooks); // ember-intl + // setupMirage(hooks); // ember-cli-mirage +} + +function setupRenderingTest(hooks, options) { + upstreamSetupRenderingTest(hooks, options); + + // Additional setup for rendering tests can be done here. +} + +function setupTest(hooks, options) { + upstreamSetupTest(hooks, options); + + // Additional setup for unit tests can be done here. +} + +export { setupApplicationTest, setupRenderingTest, setupTest }; diff --git a/ui/tests/helpers/kubernetes/overview.js b/ui/tests/helpers/kubernetes/overview.js new file mode 100644 index 0000000..49b647b --- /dev/null +++ b/ui/tests/helpers/kubernetes/overview.js @@ -0,0 +1,17 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +export const SELECTORS = { + rolesCardTitle: '[data-test-selectable-card="Roles"] .title', + rolesCardSubTitle: '[data-test-selectable-card-container="Roles"] p', + rolesCardLink: '[data-test-selectable-card="Roles"] a', + rolesCardNumRoles: '[data-test-roles-card-overview-num]', + generateCredentialsCardTitle: '[data-test-selectable-card="Generate credentials"] .title', + generateCredentialsCardSubTitle: '[data-test-selectable-card-container="Generate credentials"] p', + generateCredentialsCardButton: '[data-test-generate-credential-button]', + emptyStateTitle: '.empty-state .empty-state-title', + emptyStateMessage: '.empty-state .empty-state-message', + emptyStateActionText: '.empty-state .empty-state-actions', +}; diff --git a/ui/tests/helpers/mirage-to-models.js b/ui/tests/helpers/mirage-to-models.js new file mode 100644 index 0000000..cf7576a --- /dev/null +++ b/ui/tests/helpers/mirage-to-models.js @@ -0,0 +1,17 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { getContext } from '@ember/test-helpers'; + +export default (data) => { + const context = getContext(); + const store = context.owner.lookup('service:store'); + const modelName = Array.isArray(data) ? data[0].modelName : data.modelName; + const json = context.server.serializerOrRegistry.serialize(data); + store.push(json); + return Array.isArray(data) + ? data.map(({ id }) => store.peekRecord(modelName, id)) + : store.peekRecord(modelName, data.id); +}; diff --git a/ui/tests/helpers/noop-all-api-requests.js b/ui/tests/helpers/noop-all-api-requests.js new file mode 100644 index 0000000..fbf0dd2 --- /dev/null +++ b/ui/tests/helpers/noop-all-api-requests.js @@ -0,0 +1,23 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Pretender from 'pretender'; +import { noopStub } from './stubs'; + +/** + * DEPRECATED prefer to use `setupMirage` along with stubs in vault/tests/helpers/stubs + */ +export default function (options = { usePassthrough: false }) { + return new Pretender(function () { + let fn = noopStub(); + if (options.usePassthrough) { + fn = this.passthrough; + } + this.post('/v1/**', fn); + this.put('/v1/**', fn); + this.get('/v1/**', fn); + this.delete('/v1/**', fn || noopStub(204)); + }); +} diff --git a/ui/tests/helpers/oidc-config.js b/ui/tests/helpers/oidc-config.js new file mode 100644 index 0000000..03ae0d2 --- /dev/null +++ b/ui/tests/helpers/oidc-config.js @@ -0,0 +1,184 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { Response } from 'miragejs'; + +export const OIDC_BASE_URL = `/vault/access/oidc`; + +export const SELECTORS = { + oidcHeader: '[data-test-oidc-header]', + oidcClientCreateButton: '[data-test-oidc-configure]', + oidcRouteTabs: '[data-test-oidc-tabs]', + oidcLandingImg: '[data-test-oidc-img]', + confirmActionButton: '[data-test-confirm-button="true"]', + inlineAlert: '[data-test-inline-alert]', + // client route + clientSaveButton: '[data-test-oidc-client-save]', + clientCancelButton: '[data-test-oidc-client-cancel]', + clientDeleteButton: '[data-test-oidc-client-delete] button', + clientEditButton: '[data-test-oidc-client-edit]', + clientDetailsTab: '[data-test-oidc-client-details]', + clientProvidersTab: '[data-test-oidc-client-providers]', + + // assignment route + assignmentSaveButton: '[data-test-oidc-assignment-save]', + assignmentCreateButton: '[data-test-oidc-assignment-create]', + assignmentEditButton: '[data-test-oidc-assignment-edit]', + assignmentDeleteButton: '[data-test-oidc-assignment-delete] button', + assignmentCancelButton: '[data-test-oidc-assignment-cancel]', + assignmentDetailsTab: '[data-test-oidc-assignment-details]', + + // scope routes + scopeSaveButton: '[data-test-oidc-scope-save]', + scopeCancelButton: '[data-test-oidc-scope-cancel]', + scopeDeleteButton: '[data-test-oidc-scope-delete] button', + scopeEditButton: '[data-test-oidc-scope-edit]', + scopeDetailsTab: '[data-test-oidc-scope-details]', + scopeEmptyState: '[data-test-oidc-scope-empty-state]', + scopeCreateButtonEmptyState: '[data-test-oidc-scope-create-empty-state]', + scopeCreateButton: '[data-test-oidc-scope-create]', + + // key route + keySaveButton: '[data-test-oidc-key-save]', + keyCancelButton: '[data-test-oidc-key-cancel]', + keyDeleteButton: '[data-test-oidc-key-delete] button', + keyEditButton: '[data-test-oidc-key-edit]', + keyRotateButton: '[data-test-oidc-key-rotate] button', + keyDetailsTab: '[data-test-oidc-key-details]', + keyClientsTab: '[data-test-oidc-key-clients]', + + // provider route + providerSaveButton: '[data-test-oidc-provider-save]', + providerCancelButton: '[data-test-oidc-provider-cancel]', + providerDeleteButton: '[data-test-oidc-provider-delete] button', + providerEditButton: '[data-test-oidc-provider-edit]', + providerDetailsTab: '[data-test-oidc-provider-details]', + providerClientsTab: '[data-test-oidc-provider-clients]', +}; + +export function overrideMirageResponse(httpStatus, data) { + if (httpStatus === 403) { + return new Response( + 403, + { 'Content-Type': 'application/json' }, + JSON.stringify({ errors: ['permission denied'] }) + ); + } + if (httpStatus === 404) { + return new Response(404, { 'Content-Type': 'application/json' }); + } + if (httpStatus === 200) { + return new Response(200, { 'Content-Type': 'application/json' }, JSON.stringify(data)); + } + return { + request_id: crypto.randomUUID(), + lease_id: '', + renewable: false, + lease_duration: 0, + wrap_info: null, + warnings: null, + auth: null, + data: { ...data }, + }; +} + +export function overrideCapabilities(requestPath, capabilitiesArray) { + // sample of capabilitiesArray: ['read', 'update'] + return { + request_id: '40f7e44d-af5c-9b60-bd20-df72eb17e294', + lease_id: '', + renewable: false, + lease_duration: 0, + data: { + capabilities: capabilitiesArray, + [requestPath]: capabilitiesArray, + }, + wrap_info: null, + warnings: null, + auth: null, + }; +} + +export async function clearRecord(store, modelType, id) { + await store + .findRecord(modelType, id) + .then((model) => { + deleteModelRecord(model); + }) + .catch(() => { + // swallow error + }); +} + +const deleteModelRecord = async (model) => { + await model.destroyRecord(); +}; + +// MOCK RESPONSES: + +export const CLIENT_LIST_RESPONSE = { + keys: ['test-app', 'app-1'], + key_info: { + 'test-app': { + assignments: ['allow_all'], + client_id: 'whaT7KB0C3iBH1l3rXhd5HPf0n6vXU0s', + client_secret: 'hvo_secret_nkJSTu2NVYqylXwFbFijsTxJHg4Ic4gqSJw7uOZ4FbSXcObngDkKoVsvyndrf2O8', + client_type: 'confidential', + id_token_ttl: 0, + key: 'default', + redirect_uris: [], + }, + 'app-1': { + assignments: ['allow_all'], + client_id: 'HkmsTA4GG17j0Djy4EUAB2VAyzuLVewg', + client_secret: 'hvo_secret_g3f30MxAJWLXhhrCejbG4zY3O4LEHhEIO24aMy181AYKnfQtWTVV924ZmnlpUFUw', + client_type: 'confidential', + id_token_ttl: 0, + key: 'test-key', + redirect_uris: [], + }, + }, +}; + +export const CLIENT_DATA_RESPONSE = { + access_token_ttl: 0, + assignments: ['allow_all'], + client_id: 'whaT7KB0C3iBH1l3rXhd5HPf0n6vXU0s', + client_secret: 'hvo_secret_nkJSTu2NVYqylXwFbFijsTxJHg4Ic4gqSJw7uOZ4FbSXcObngDkKoVsvyndrf2O8', + client_type: 'confidential', + id_token_ttl: 0, + key: 'default', + redirect_uris: [], +}; + +export const ASSIGNMENT_LIST_RESPONSE = { + keys: ['allow_all', 'test-assignment'], +}; + +export const ASSIGNMENT_DATA_RESPONSE = { + group_ids: ['262ca5b9-7b69-0a84-446a-303dc7d778af'], + entity_ids: ['b6094ac6-baf4-6520-b05a-2bd9f07c66da'], +}; + +export const SCOPE_LIST_RESPONSE = { + keys: ['test-scope'], +}; + +export const SCOPE_DATA_RESPONSE = { + description: 'this is a test', + template: `{ + "groups": {{identity.entity.groups.names}} + }`, +}; + +export const PROVIDER_LIST_RESPONSE = { + keys: ['test-provider'], +}; + +export const PROVIDER_DATA_RESPONSE = { + allowed_client_ids: ['*'], + issuer: '', + scopes_supported: ['test-scope'], +}; diff --git a/ui/tests/helpers/oidc-window-stub.js b/ui/tests/helpers/oidc-window-stub.js new file mode 100644 index 0000000..17d7fb0 --- /dev/null +++ b/ui/tests/helpers/oidc-window-stub.js @@ -0,0 +1,38 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import EmberObject, { computed } from '@ember/object'; +import Evented from '@ember/object/evented'; + +export const fakeWindow = EmberObject.extend(Evented, { + init() { + this._super(...arguments); + this.on('close', () => { + this.set('closed', true); + }); + }, + screen: computed(function () { + return { + height: 600, + width: 500, + }; + }), + origin: 'https://my-vault.com', + closed: false, + open() {}, + close() {}, +}); + +export const buildMessage = (opts) => ({ + isTrusted: true, + origin: 'https://my-vault.com', + data: { + source: 'oidc-callback', + path: 'foo', + state: 'state', + code: 'code', + }, + ...opts, +}); diff --git a/ui/tests/helpers/pki.js b/ui/tests/helpers/pki.js new file mode 100644 index 0000000..4ee4261 --- /dev/null +++ b/ui/tests/helpers/pki.js @@ -0,0 +1,23 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +export const SELECTORS = { + caChain: '[data-test-value-div="CA chain"] [data-test-masked-input]', + certificate: '[data-test-value-div="Certificate"] [data-test-masked-input]', + commonName: '[data-test-row-value="Common name"]', + csr: '[data-test-value-div="CSR"] [data-test-masked-input]', + expiryDate: '[data-test-row-value="Expiration date"]', + issueDate: '[data-test-row-value="Issue date"]', + issuingCa: '[data-test-value-div="Issuing CA"] [data-test-masked-input]', + privateKey: '[data-test-value-div="Private key"] [data-test-masked-input]', + revocationTime: '[data-test-row-value="Revocation time"]', + serialNumber: '[data-test-row-value="Serial number"]', +}; + +export const STANDARD_META = { + total: 2, + currentPage: 1, + pageSize: 100, +}; diff --git a/ui/tests/helpers/pki/overview.js b/ui/tests/helpers/pki/overview.js new file mode 100644 index 0000000..78329cd --- /dev/null +++ b/ui/tests/helpers/pki/overview.js @@ -0,0 +1,27 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +export const SELECTORS = { + issuersCardTitle: '[data-test-selectable-card-container="Issuers"] h3', + issuersCardSubtitle: '[data-test-selectable-card-container="Issuers"] p', + issuersCardLink: '[data-test-selectable-card-container="Issuers"] a', + issuersCardOverviewNum: '[data-test-selectable-card-container="Issuers"] .title-number', + rolesCardTitle: '[data-test-selectable-card-container="Roles"] h3', + rolesCardSubtitle: '[data-test-selectable-card-container="Roles"] p', + rolesCardLink: '[data-test-selectable-card-container="Roles"] a', + rolesCardOverviewNum: '[data-test-selectable-card-container="Roles"] .title-number', + issueCertificate: '[data-test-selectable-card-container="Issue certificate"] h3', + issueCertificateInput: '[data-test-issue-certificate-input]', + issueCertificatePowerSearch: '[data-test-issue-certificate-input] span', + issueCertificateButton: '[data-test-issue-certificate-button]', + viewCertificate: '[data-test-selectable-card-container="View certificate"] h3', + viewCertificateInput: '[data-test-view-certificate-input]', + viewCertificatePowerSearch: '[data-test-view-certificate-input] span', + viewCertificateButton: '[data-test-view-certificate-button]', + viewIssuerInput: '[data-test-issue-issuer-input]', + viewIssuerPowerSearch: '[data-test-issue-issuer-input] span', + viewIssuerButton: '[data-test-view-issuer-button]', + firstPowerSelectOption: '[data-option-index="0"]', +}; diff --git a/ui/tests/helpers/pki/page/pki-configuration-edit.js b/ui/tests/helpers/pki/page/pki-configuration-edit.js new file mode 100644 index 0000000..f7f4c3e --- /dev/null +++ b/ui/tests/helpers/pki/page/pki-configuration-edit.js @@ -0,0 +1,25 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +export const SELECTORS = { + errorBanner: '[data-test-error-banner]', + acmeEditSection: '[data-test-acme-edit-section]', + configEditSection: '[data-test-cluster-config-edit-section]', + configInput: (attr) => `[data-test-input="${attr}"]`, + stringListInput: (attr) => `[data-test-input="${attr}"] [data-test-string-list-input="0"]`, + urlsEditSection: '[data-test-urls-edit-section]', + urlFieldInput: (attr) => `[data-test-input="${attr}"] textarea`, + urlFieldLabel: (attr) => `[data-test-input="${attr}"] label`, + crlEditSection: '[data-test-crl-edit-section]', + crlToggleInput: (attr) => `[data-test-input="${attr}"] input`, + crlTtlInput: (attr) => `[data-test-ttl-value="${attr}"]`, + crlFieldLabel: (attr) => `[data-test-input="${attr}"] label`, + saveButton: '[data-test-configuration-edit-save]', + cancelButton: '[data-test-configuration-edit-cancel]', + validationAlert: '[data-test-configuration-edit-validation-alert]', + deleteButton: (attr) => `[data-test-input="${attr}"] [data-test-string-list-button="delete"]`, + groupHeader: (group) => `[data-test-crl-header="${group}"]`, + checkboxInput: (attr) => `[data-test-input="${attr}"]`, +}; diff --git a/ui/tests/helpers/pki/page/pki-keys.js b/ui/tests/helpers/pki/page/pki-keys.js new file mode 100644 index 0000000..fbbd9c1 --- /dev/null +++ b/ui/tests/helpers/pki/page/pki-keys.js @@ -0,0 +1,25 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +export const SELECTORS = { + // key index + importKey: '[data-test-pki-key-import]', + generateKey: '[data-test-pki-key-generate]', + keyId: '[data-test-key="id"]', + keyName: '[data-test-key="name"]', + popupMenuTrigger: '[data-test-popup-menu-trigger]', + popupMenuDetails: '[data-test-key-menu-link="details"]', + popupMenuEdit: '[data-test-key-menu-link="edit"]', + // key details + title: '[data-test-key-details-title]', + keyIdValue: '[data-test-value-div="Key ID"]', + keyNameValue: '[data-test-value-div="Key name"]', + keyTypeValue: '[data-test-value-div="Key type"]', + keyBitsValue: '[data-test-value-div="Key bits"]', + keyDeleteButton: '[data-test-pki-key-delete] button', + downloadButton: '[data-test-download-button]', + keyEditLink: '[data-test-pki-key-edit]', + confirmDelete: '[data-test-confirm-button]', +}; diff --git a/ui/tests/helpers/pki/page/pki-role-details.js b/ui/tests/helpers/pki/page/pki-role-details.js new file mode 100644 index 0000000..88f5b48 --- /dev/null +++ b/ui/tests/helpers/pki/page/pki-role-details.js @@ -0,0 +1,12 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +export const SELECTORS = { + issuerLabel: '[data-test-row-label="Issuer"]', + noStoreValue: '[data-test-value-div="Store in storage backend"]', + keyUsageValue: '[data-test-value-div="Key usage"]', + extKeyUsageValue: '[data-test-value-div="Ext key usage"]', + customTtlValue: '[data-test-value-div="Issued certificates expire after"]', +}; diff --git a/ui/tests/helpers/pki/page/pki-tidy-form.js b/ui/tests/helpers/pki/page/pki-tidy-form.js new file mode 100644 index 0000000..1a877f5 --- /dev/null +++ b/ui/tests/helpers/pki/page/pki-tidy-form.js @@ -0,0 +1,18 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +export const SELECTORS = { + tidyFormName: (attr) => `[data-test-tidy-form="${attr}"]`, + inputByAttr: (attr) => `[data-test-input="${attr}"]`, + toggleInput: (attr) => `[data-test-input="${attr}"] input`, + intervalDuration: '[data-test-ttl-value="Automatic tidy enabled"]', + acmeAccountSafetyBuffer: '[data-test-ttl-value="Tidy ACME enabled"]', + toggleLabel: (label) => `[data-test-toggle-label="${label}"]`, + tidySectionHeader: (header) => `[data-test-tidy-header="${header}"]`, + tidySave: '[data-test-pki-tidy-button]', + tidyCancel: '[data-test-pki-tidy-cancel]', + tidyPauseDuration: '[data-test-ttl-value="Pause duration"]', + editAutoTidyButton: '[data-test-pki-edit-tidy-auto-link]', +}; diff --git a/ui/tests/helpers/pki/page/pki-tidy.js b/ui/tests/helpers/pki/page/pki-tidy.js new file mode 100644 index 0000000..783efc8 --- /dev/null +++ b/ui/tests/helpers/pki/page/pki-tidy.js @@ -0,0 +1,30 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ +import { SELECTORS as TIDY_FORM } from './pki-tidy-form'; + +export const SELECTORS = { + hdsAlertTitle: '[data-test-hds-alert-title]', + hdsAlertDescription: '[data-test-hds-alert-description]', + alertUpdatedAt: '[data-test-hds-alert-updated-at]', + cancelTidyAction: '[data-test-cancel-tidy-action]', + hdsAlertButtonText: '[data-test-cancel-tidy-action] .hds-button__text', + timeStartedRow: '[data-test-value-div="Time started"]', + timeFinishedRow: '[data-test-value-div="Time finished"]', + cancelTidyModalBackground: '[data-test-modal-background="Cancel tidy?"]', + tidyEmptyStateConfigure: '[data-test-tidy-empty-state-configure]', + manualTidyToolbar: '[data-test-pki-manual-tidy-config]', + autoTidyToolbar: '[data-test-pki-auto-tidy-config]', + tidyConfigureModal: { + configureTidyModal: '[data-test-modal-background="Tidy this mount"]', + tidyModalAutoButton: '[data-test-tidy-modal-auto-button]', + tidyModalManualButton: '[data-test-tidy-modal-manual-button]', + tidyModalCancelButton: '[data-test-tidy-modal-cancel-button]', + tidyOptionsModal: '[data-test-pki-tidy-options-modal]', + }, + tidyEmptyState: '[data-test-component="empty-state"]', + tidyForm: { + ...TIDY_FORM, + }, +}; diff --git a/ui/tests/helpers/pki/pki-configure-create.js b/ui/tests/helpers/pki/pki-configure-create.js new file mode 100644 index 0000000..374ffdd --- /dev/null +++ b/ui/tests/helpers/pki/pki-configure-create.js @@ -0,0 +1,30 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { SELECTORS as GENERATE_ROOT } from './pki-generate-root'; + +export const SELECTORS = { + // page::pki-configure-create + breadcrumbContainer: '[data-test-breadcrumbs]', + title: '[data-test-pki-engine-page-title]', + option: '[data-test-pki-config-option]', + optionByKey: (key) => `[data-test-pki-config-option="${key}"]`, + cancelButton: '[data-test-pki-config-cancel]', + saveButton: '[data-test-pki-config-save]', + doneButton: '[data-test-done]', + configureButton: '[data-test-configure-pki-button]', + // pki-generate-root + ...GENERATE_ROOT, + generateRootOption: '[data-test-pki-config-option="generate-root"]', + // pki-ca-cert-import + importForm: '[data-test-pki-import-pem-bundle-form]', + importSubmit: '[data-test-pki-import-pem-bundle]', + importSectionLabel: '[data-test-import-section-label]', + importMapping: '[data-test-imported-bundle-mapping]', + importedIssuer: '[data-test-imported-issuer]', + importedKey: '[data-test-imported-key]', + // generate-intermediate + csrDetails: '[data-test-generate-csr-result]', +}; diff --git a/ui/tests/helpers/pki/pki-delete-all-issuers.js b/ui/tests/helpers/pki/pki-delete-all-issuers.js new file mode 100644 index 0000000..397f71a --- /dev/null +++ b/ui/tests/helpers/pki/pki-delete-all-issuers.js @@ -0,0 +1,11 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +export const SELECTORS = { + issuerLink: '[data-test-delete-all-issuers-link]', + deleteAllIssuerModal: '[data-test-modal-background="Delete All Issuers?"]', + deleteAllIssuerInput: '[data-test-confirmation-modal-input="Delete All Issuers?"]', + deleteAllIssuerButton: '[data-test-confirm-button="Delete All Issuers?"]', +}; diff --git a/ui/tests/helpers/pki/pki-generate-root.js b/ui/tests/helpers/pki/pki-generate-root.js new file mode 100644 index 0000000..4e16f72 --- /dev/null +++ b/ui/tests/helpers/pki/pki-generate-root.js @@ -0,0 +1,35 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +export const SELECTORS = { + mainSectionTitle: '[data-test-generate-root-title="Root parameters"]', + urlSectionTitle: '[data-test-generate-root-title="Issuer URLs"]', + keyParamsGroupToggle: '[data-test-toggle-group="Key parameters"]', + sanGroupToggle: '[data-test-toggle-group="Subject Alternative Name (SAN) Options"]', + additionalGroupToggle: '[data-test-toggle-group="Additional subject fields"]', + toggleGroupDescription: '[data-test-toggle-group-description]', + formField: '[data-test-field]', + typeField: '[data-test-input="type"]', + inputByName: (name) => `[data-test-input="${name}"]`, + fieldByName: (name) => `[data-test-field="${name}"]`, + generateRootSave: '[data-test-pki-generate-root-save]', + generateRootCancel: '[data-test-pki-generate-root-cancel]', + generateRootCommonNameField: '[data-test-input="commonName"]', + generateRootIssuerNameField: '[data-test-input="issuerName"]', + formInvalidError: '[data-test-pki-generate-root-validation-error]', + urlsSection: '[data-test-urls-section]', + urlField: '[data-test-urls-section] [data-test-input]', + // Shown values after save + saved: { + certificate: '[data-test-value-div="Certificate"] [data-test-masked-input]', + commonName: '[data-test-row-value="Common name"]', + issuerName: '[data-test-row-value="Issuer name"]', + issuerLink: '[data-test-value-div="Issuer ID"] a', + keyName: '[data-test-row-value="Key name"]', + keyLink: '[data-test-value-div="Key ID"] a', + privateKey: '[data-test-value-div="Private key"] [data-test-masked-input]', + serialNumber: '[data-test-row-value="Serial number"]', + }, +}; diff --git a/ui/tests/helpers/pki/pki-issuer-cross-sign.js b/ui/tests/helpers/pki/pki-issuer-cross-sign.js new file mode 100644 index 0000000..86700e9 --- /dev/null +++ b/ui/tests/helpers/pki/pki-issuer-cross-sign.js @@ -0,0 +1,23 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { SELECTORS as CONFIGURE } from './pki-configure-create'; +import { SELECTORS as DETAILS } from './pki-issuer-details'; + +export const SELECTORS = { + objectListInput: (key, row = 0) => `[data-test-object-list-input="${key}-${row}"]`, + inputByName: (name) => `[data-test-input="${name}"]`, + addRow: '[data-test-object-list-add-button', + submitButton: '[data-test-cross-sign-submit]', + cancelButton: '[data-test-cross-sign-cancel]', + statusCount: '[data-test-cross-sign-status-count]', + signedIssuerRow: (row = 0) => `[data-test-info-table-row="${row}"]`, + signedIssuerCol: (attr) => `[data-test-info-table-column="${attr}"]`, + // for cross signing acceptance tests + configure: { ...CONFIGURE }, + details: { ...DETAILS }, + rowValue: (attr) => `[data-test-row-value="${attr}"]`, + copyButton: (attr) => `[data-test-value-div="${attr}"] [data-test-copy-button]`, +}; diff --git a/ui/tests/helpers/pki/pki-issuer-details.js b/ui/tests/helpers/pki/pki-issuer-details.js new file mode 100644 index 0000000..8235ac8 --- /dev/null +++ b/ui/tests/helpers/pki/pki-issuer-details.js @@ -0,0 +1,20 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +export const SELECTORS = { + configure: '[data-test-pki-issuer-configure]', + crossSign: '[data-test-pki-issuer-cross-sign]', + defaultGroup: '[data-test-details-group="default"]', + download: '[data-test-issuer-download]', + groupTitle: '[data-test-group-title]', + parsingAlertBanner: '[data-test-parsing-error-alert-banner]', + rotateModal: '[data-test-modal-background="Rotate this root"]', + rotateModalGenerate: '[data-test-root-rotate-step-one]', + rotateRoot: '[data-test-pki-issuer-rotate-root]', + row: '[data-test-component="info-table-row"]', + signIntermediate: '[data-test-pki-issuer-sign-int]', + urlsGroup: '[data-test-details-group="Issuer URLs"]', + valueByName: (name) => `[data-test-value-div="${name}"]`, +}; diff --git a/ui/tests/helpers/pki/pki-key-form.js b/ui/tests/helpers/pki/pki-key-form.js new file mode 100644 index 0000000..71ecd80 --- /dev/null +++ b/ui/tests/helpers/pki/pki-key-form.js @@ -0,0 +1,15 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +export const SELECTORS = { + keyCreateButton: '[data-test-pki-key-save]', + keyCancelButton: '[data-test-pki-key-cancel]', + keyNameInput: '[data-test-input="keyName"]', + typeInput: '[data-test-input="type"]', + keyTypeInput: '[data-test-input="keyType"]', + keyBitsInput: '[data-test-input="keyBits"]', + validationError: '[data-test-pki-key-validation-error]', + fieldErrorByName: (name) => `[data-test-field-validation="${name}"]`, +}; diff --git a/ui/tests/helpers/pki/pki-not-valid-after-form.js b/ui/tests/helpers/pki/pki-not-valid-after-form.js new file mode 100644 index 0000000..6a22272 --- /dev/null +++ b/ui/tests/helpers/pki/pki-not-valid-after-form.js @@ -0,0 +1,15 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +export const SELECTORS = { + radioTtl: '[data-test-radio-button="ttl"]', + radioTtlLabel: '[data-test-radio-label="ttl"]', + radioDate: '[data-test-radio-button="not_after"]', + radioDateLabel: '[data-test-radio-label="specificDate"]', + ttlForm: '[data-test-ttl-inputs]', + ttlTimeInput: '[data-test-ttl-value="TTL"]', + ttlUnitInput: '[data-test-select="ttl-unit"]', + dateInput: '[data-test-input="not_after"]', +}; diff --git a/ui/tests/helpers/pki/pki-role-form.js b/ui/tests/helpers/pki/pki-role-form.js new file mode 100644 index 0000000..605ee22 --- /dev/null +++ b/ui/tests/helpers/pki/pki-role-form.js @@ -0,0 +1,36 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +export const PKI_BASE_URL = `/vault/cluster/secrets/backend/pki/roles`; + +export const SELECTORS = { + roleName: '[data-test-input="name"]', + issuerRef: '[data-test-input="issuerRef"]', + issuerRefSelect: '[data-test-select="issuerRef"]', + issuerRefToggle: '[data-test-toggle-label="issuerRef-toggle"]', + customTtl: '[data-test-field="customTtl"]', + backdateValidity: '[data-test-ttl-value="Backdate validity"]', + maxTtl: '[data-test-toggle-label="Max TTL"]', + generateLease: '[data-test-field="generateLease"]', + noStore: '[data-test-field="noStore"]', + addBasicConstraints: '[data-test-input="addBasicConstraints"]', + domainHandling: '[data-test-toggle-group="Domain handling"]', + keyParams: '[data-test-toggle-group="Key parameters"]', + keyType: '[data-test-input="keyType"]', + keyBits: '[data-test-input="keyBits"]', + signatureBits: '[data-test-input="signatureBits"]', + keyUsage: '[data-test-toggle-group="Key usage"]', + extKeyUsageOids: '[data-test-input="extKeyUsageOids"]', + digitalSignature: '[data-test-checkbox="DigitalSignature"]', + keyAgreement: '[data-test-checkbox="KeyAgreement"]', + keyEncipherment: '[data-test-checkbox="KeyEncipherment"]', + any: '[data-test-checkbox="Any"]', + serverAuth: '[data-test-checkbox="ServerAuth"]', + policyIdentifiers: '[data-test-toggle-group="Policy identifiers"]', + san: '[data-test-toggle-group="Subject Alternative Name (SAN) Options"]', + additionalSubjectFields: '[data-test-toggle-group="Additional subject fields"]', + roleCreateButton: '[data-test-pki-role-save]', + roleCancelButton: '[data-test-pki-role-cancel]', +}; diff --git a/ui/tests/helpers/pki/pki-role-generate.js b/ui/tests/helpers/pki/pki-role-generate.js new file mode 100644 index 0000000..e7af1b2 --- /dev/null +++ b/ui/tests/helpers/pki/pki-role-generate.js @@ -0,0 +1,19 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +export const SELECTORS = { + form: '[data-test-pki-generate-cert-form]', + commonNameField: '[data-test-input="commonName"]', + optionsToggle: '[data-test-toggle-group="Subject Alternative Name (SAN) Options"]', + generateButton: '[data-test-pki-generate-button]', + cancelButton: '[data-test-pki-generate-cancel]', + downloadButton: '[data-test-pki-cert-download-button]', + revokeButton: '[data-test-pki-cert-revoke-button]', + serialNumber: '[data-test-value-div="Serial number"]', + certificate: '[data-test-value-div="Certificate"]', + inlineAlert: '[data-test-alert]', + commonNameInlineError: '[data-test-field="commonName"] [data-test-inline-alert]', + commonNameErrorBorder: '[data-test-input="commonName"]', +}; diff --git a/ui/tests/helpers/pki/pki-run-commands.js b/ui/tests/helpers/pki/pki-run-commands.js new file mode 100644 index 0000000..c99748d --- /dev/null +++ b/ui/tests/helpers/pki/pki-run-commands.js @@ -0,0 +1,36 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import consoleClass from 'vault/tests/pages/components/console/ui-panel'; +import { create } from 'ember-cli-page-object'; + +const consoleComponent = create(consoleClass); + +export const tokenWithPolicy = async function (name, policy) { + await consoleComponent.runCommands([ + `write sys/policies/acl/${name} policy=${btoa(policy)}`, + `write -field=client_token auth/token/create policies=${name}`, + ]); + return consoleComponent.lastLogOutput; +}; + +export const runCommands = async function (commands) { + try { + await consoleComponent.runCommands(commands); + const res = consoleComponent.lastLogOutput; + if (res.includes('Error')) { + throw new Error(res); + } + return res; + } catch (error) { + // eslint-disable-next-line no-console + console.error( + `The following occurred when trying to run the command(s):\n ${commands.join('\n')} \n\n ${ + consoleComponent.lastLogOutput + }` + ); + throw error; + } +}; diff --git a/ui/tests/helpers/pki/values.js b/ui/tests/helpers/pki/values.js new file mode 100644 index 0000000..7c945b6 --- /dev/null +++ b/ui/tests/helpers/pki/values.js @@ -0,0 +1,185 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +// Expires Jan 10, 2033 +export const rootPem = `-----BEGIN CERTIFICATE----- +MIIDezCCAmOgAwIBAgIUTBbQcZijQsmd0rjd6COikPsrGyowDQYJKoZIhvcNAQEL +BQAwFDESMBAGA1UEAxMJdGVzdC1yb290MB4XDTIzMDEyMDE3NTcxMloXDTIzMDIy +MTE3NTc0MlowFDESMBAGA1UEAxMJdGVzdC1yb290MIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAlUHfvQLsocXtvwRCpTnXGzwMCD+3KKK7y1+SUCgpAD9Y +RV2xLAbqh0iK3x2WI4+Pek1Ub6dYaWczzBob6wRq9iFB72uLPpbL8yRf+tc1egmP +wwJQS9qidb1hcSi4p6x/JwOpr2v2PDqJPDoHrfaHeJgCuBGS00qUFH7oHQz9Usim +lHjIbVNF3Qa1Hq2bgwkZmRjRn3Bez/xy3YEiQ41GTicUBqY4NAGWuS1LiHyEUW81 +iQ+1iGlbpuAL4H7lpKmrhv1xZXEsF9vNL6H0Y7kjjAImTQnmo+ozcArnKnwh2wmS +f/TrVnN4RRc8dvN8P8nWvVsJYK/D40yc7YMljIESKQIDAQABo4HEMIHBMA4GA1Ud +DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBT6rcf5twb19wLL +JjhPOVywd41d2jAfBgNVHSMEGDAWgBT6rcf5twb19wLLJjhPOVywd41d2jArBggr +BgEFBQcBAQQfMB0wGwYIKwYBBQUHMAKGD2Z1bmZvcmVjYXN0LmNvbTAUBgNVHREE +DTALggl0ZXN0LXJvb3QwGwYDVR0fBBQwEjAQoA6gDIYKZ29vZ2xlLmNvbTANBgkq +hkiG9w0BAQsFAAOCAQEAjG7km+QsIuW7KNY3h8YHJZhdr+tIx57k9tUR4w1+d8QI +t44FTdCYdN8n89lsFK9bONZatd0LY3qqcOARE2ni0Hg/zV9u8TTVKTKAOOx8zBd1 +TnwzhXb8mssqnXK9lcECexuWf/s5lkyHjcWOuzNVI0PohrX9tGZwdzsZEgH4Y49i +o8I9DD+uBHknwByRLXSDmgggwgOYsyTg/IfYoHlLHDD3CaOpkCvUCZvM9bI7nrlx +2GByQ/WDT4ArAHcf+Z1iaSIbV6WG6QWoPsu2/WKybcuN2fznaXtJMwgRl50BUv2h +DU3c2oZTc0mPYGft6U8mVwLqfYTcEduGidTLAQPE5w== +-----END CERTIFICATE-----`; + +export const issuerPemBundle = ` +-----BEGIN CERTIFICATE----- +MIIDRTCCAi2gAwIBAgIUdKagCL6TnN5xLkwhPbNY8JEcY0YwDQYJKoZIhvcNAQEL +BQAwGzEZMBcGA1UEAxMQd3d3LnRlc3QtaW50LmNvbTAeFw0yMzAxMDkxOTA1NTBa +Fw0yMzAyMTAxOTA2MjBaMBsxGTAXBgNVBAMTEHd3dy50ZXN0LWludC5jb20wggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCfd5o9JfyRAXH+E1vE2U0xjSqs +A/cxDqsDXRHBnNJvzAa+7gPKXCDQZbr6chjxLXpP6Bv2/O+dZHq1fo/f6q9PDDGW +JYIluwbACpe7W1UB7q9xFkZg85yQsNYokGZlwv/AMGpFBxDwVlNGL+4fxvFTv7uF +mIlDzSIPrzByyCrqAFMNNqNwlAerDt/C6DMZae/rTGXIXsTfUpxPy21bzkeA+70I +YCV1ffK8UnAeBYNUJ+v8+XgTQ5KhRyQ+fscUkO3T2s6f3O9Q2sWxswkf2YmZB+V1 +cTZ5w6hqiuFdBXz7GRnACi1/gbWbaExQTJRplArFwIHka7dqJh8tYkXDjai3AgMB +AAGjgYAwfjAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQU68/xXIgvsleKkuA8clK/6YslB/IwHwYDVR0jBBgwFoAU68/xXIgvsleKkuA8 +clK/6YslB/IwGwYDVR0RBBQwEoIQd3d3LnRlc3QtaW50LmNvbTANBgkqhkiG9w0B +AQsFAAOCAQEAWSff0BH3SJv/XqwN/flqc1CVzOios72/IJ+KBBv0AzFCZ8wJPi+c +hH1bw7tqi01Bgh595TctogDFN1b6pjN+jrlIP4N+FF9Moj79Q+jHQMnuJomyPuI7 +i07vqUcxgSmvEBBWOWS+/vxe6TfWDg18nyPf127CWQN8IHTo1f/GavX+XmRve6XT +EWoqcQshEk9i87oqCbaT7B40jgjTAd1r4Cc6P4s1fAGPt9e9eqMj13kTyVDNuCoD +FSZYalrlkASpg+c9oDQIh2MikGQINXHv/zIEHOW93siKMWeA4ni6phHtMg/p5eJt +SxnVZsSzj8QLy2uwX1AADR0QUvJzMxptyA== +-----END CERTIFICATE----- +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAn3eaPSX8kQFx/hNbxNlNMY0qrAP3MQ6rA10RwZzSb8wGvu4D +ylwg0GW6+nIY8S16T+gb9vzvnWR6tX6P3+qvTwwxliWCJbsGwAqXu1tVAe6vcRZG +YPOckLDWKJBmZcL/wDBqRQcQ8FZTRi/uH8bxU7+7hZiJQ80iD68wcsgq6gBTDTaj +cJQHqw7fwugzGWnv60xlyF7E31KcT8ttW85HgPu9CGAldX3yvFJwHgWDVCfr/Pl4 +E0OSoUckPn7HFJDt09rOn9zvUNrFsbMJH9mJmQfldXE2ecOoaorhXQV8+xkZwAot +f4G1m2hMUEyUaZQKxcCB5Gu3aiYfLWJFw42otwIDAQABAoIBADC+vZ4Ne4vTtkWl +Izsj9Y29Chs0xx3uzuWjUGcvib/0zOcWGICF8t3hCuu9btRiQ24jlFDGdnRVH5FV +E6OtuFLgdlPgOU1RQzn2wvTZcT26+VQHLBI8xVIRTBVwNmzK06Sq6AEbrNjaenAM +/KwoAuLHzAmFXAgmr0++DIA5oayPWyi5IoyFO7EoRv79Xz5LWfu5j8CKOFXmI5MT +vEVYM6Gb2xHRa2Ng0SJ4VzwC09GcXlHKRAz+CubJuncvjbcM/EryvexozKkUq4XA +KqGr9xxdZ4XDlo3Rj9S9P9JaOin0I1mwwz6p+iwMF0zr+/ldjE4oPBdB1PUgSJ7j +2CZcS1kCgYEAwIZ3UsMIXqkMlkMz/7nu2sqzV3EgQjY5QRoz98ligKg4fhYKz+K4 +yXvJrRyLkwEBaPdLppCZbs4xsuuv3jiqUHV5n7sfpUA5HVKkKh6XY7jnszbqV732 +iB1mQVEjzM92/amew2hDKLGQDW0nglrg6uV+bx0Lnp6Glahr8NOAyk0CgYEA1Ar3 +jTqTkU+NQX7utlxx0HPVL//JH/erp/Gnq9fN8dZhK/yjwX5savUlNHpgePoXf1pE +lgi21/INQsvp7O2AUKuj96k+jBHQ0SS58AQGFv8iNDkLE57N74vCO6+Xdi1rHj/Y +7jglr00box/7SOmvb4SZz2o0jm0Ejsg2M0aBuRMCgYEAgTB6F34qOqMDgD1eQka5 +QfXs/Es8E1Ihf08e+jIXuC+poOoXnUINL56ySUizXBS7pnzzNbUoUFNqxB4laF/r +4YvC7m15ocED0mpnIKBghBlK2VaLUA93xAS+XiwdcszwkuzkTUnEbyUfffL2JSHo +dZdEDTmXV3wW4Ywfyn2Sma0CgYAeNNG/FLEg6iw9QE/ROqob/+RGyjFklGunqQ0x +tbRo1xlQotTRI6leMz3xk91aXoYqZjmPBf7GFH0/Hr1cOxkkZM8e4MVAPul4Ybr7 +LheP/xhoSBgD24OKtGYfCoyRETdJP98vUGBN8LYXLt8lK+UKBeHDYmXKRE156ZuP +AmRIcQKBgFvp+xMoyAsBeOlTjVDZ0mTnFh1yp8f7N3yXdHPpFShwjXjlqLmLO5RH +mZAvaH0Ux/wCfvwHhdC46jBrs9S4zLBvj3+44NYOzvz2dBWP/5MuXgzFe30h9Yd0 +zUlyEaWm0jY2Ylzax8ECKRL0td2bv36vxOYtTax8MSB15szsnPJ+ +-----END RSA PRIVATE KEY----- +`; + +export const csr = `-----BEGIN CERTIFICATE REQUEST----- +MIICdDCCAVwCAQAwDjEMMAoGA1UEAxMDbG9sMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA4Dz2b/nAP/M6bqyk5mctqqYAAcoME//xPBy0wREHuZ776Pu4 +l45kDL3dPXiY8U2P9pn8WIr2KpLK6oWUfSsiG2P082bpWDL20UymkWqDhhrA4unf +ZRq68UIDbcetlLw15YKnlNdvNZ7Qr8Se8KV0YGR/wFqI7QfS6VE3lhxZWEBUayI0 +egqOuDbXAcZTON1AZ92/F+WFSbc43iYdDk16XfAPFKhtvLr6zQQuzebAb7HG04Hc +GhRskixxyJ8XY6XUplfsa1HcpUXE4f1GeUvq3g6ltVCSJ0p7qI9FFjV4t+DCLVVV +LnwHUi9Vzz6i2wjMt7P6+gHR+RrOWBgRMn38fwIDAQABoCEwHwYJKoZIhvcNAQkO +MRIwEDAOBgNVHREEBzAFggNsb2wwDQYJKoZIhvcNAQELBQADggEBAAm3AHQ1ctdV +8HCrMOXGVLgI2cB1sFd6VYVxPBxIk812Y4wyO8Q6POE5VZNTIgMcSeIaFu5lgHNL +Peeb54F+zEa+OJYkcWgCAX5mY/0HoML4p2bxFTSjllSpcX7ktjq4IEIY/LRpqSgc +jgZHHRwanFfkeIOhN4Q5qJWgBPNhDAcNPE7T0M/4mxqYDqMSJvMYmC67hq1UOOug +/QVDUDJRC1C0aDw9if+DbG/bt1V6HpMQhDIEUjzfu4zG8pcag3cJpOA8JhW1hnG0 +XA2ZOCA7s34/szr2FczXtIoKiYmv3UzPyO9/4mc0Q2+/nR4CG8NU9WW/XJCne9ID +elRplAzrMF4= +-----END CERTIFICATE REQUEST-----`; + +export const csr2 = `-----BEGIN CERTIFICATE REQUEST----- +MIIChDCCAWwCAQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCuW9C58M1wO0vdGmtLcJbbCkKyfsHJJae1j4LL +xdGqs1j9UKD66UALSzZEeMCBdtTNNzThAgYJqCSA5swqpbRf6WZ3K/X7oHbfcrHi +SAm8v/0QsJDF5Rphiy6wyggaoaHEsbSp83kYy9r+h48vFW5Dr8UvJTsp5kdRn31L +bTHr56iqOaHQbu6hDj4Ompg/0OElPH1tV2X947o8timR+L89utZzR+d8x/eeTdPl +H7TEkMEomRvt7NTRHGYRsm3Gzq4AA6PalzIxzwJrNgXfJDutNn/QwcVd5sImwYCO +GaHsOvGfc02w+Vqqva9EOEQSr6B90kA+vc30I6uSiugzV9TFAgMBAAGgKTAnBgkq +hkiG9w0BCQ4xGjAYMBYGA1UdEQQPMA2CC2V4YW1wbGUuY29tMA0GCSqGSIb3DQEB +CwUAA4IBAQAjm6JTU7axU6TzLlXlOp7hZ4+nep2/8vvJ9EOXzL8x/qtTTizctdG9 +Op70gywoUxAS2tatwa4fmW9DbA2eGiLU+Ibj/5b0Veq5DQdp1Qg3MLBP/+AcM/7m +rrgA9MhkpQahXCj4vXo6NeXYaTh6Jo/s8C9h3WxTD6ptDMiaPFcEuWcx0e3AjjH0 +pe7k9/MfB2wLfQ7+5wee/tCFWZN4tk8YfjQeQA1extXYKM/f8eu3Z/wjbbMOVpwb +xst+VTY7X9T8cU/hjDEoNG677meI+W5MgiwX0rxTpoz991fqr3vp7PELYj3GMyii +D1YfvqXieNij4UrduRqCXj1m8SVZlM+X +-----END CERTIFICATE REQUEST-----`; + +export const componentPemBundle = `-----BEGIN CERTIFICATE----- +MIIDGjCCAgKgAwIBAgIUFvnhb2nQ8+KNS3SzjlfYDMHGIRgwDQYJKoZIhvcNAQEL +BQAwDTELMAkGA1UEAxMCZmEwHhcNMTgwMTEwMTg1NDI5WhcNMTgwMjExMTg1NDU5 +WjANMQswCQYDVQQDEwJmYTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +AN2VtBn6EMlA4aYre/xoKHxlgNDxJnfSQWfs6yF/K201qPnt4QF9AXChatbmcKVn +OaURq+XEJrGVgF/u2lSos3NRZdhWVe8o3/sOetsGxcrd0gXAieOSmkqJjp27bYdl +uY3WsxhyiPvdfS6xz39OehsK/YCB6qCzwB4eEfSKqbkvfDL9sLlAiOlaoHC9pczf +6/FANKp35UDwInSwmq5vxGbnWk9zMkh5Jq6hjOWHZnVc2J8J49PYvkIM8uiHDgOE +w71T2xM5plz6crmZnxPCOcTKIdF7NTEP2lUfiqc9lONV9X1Pi4UclLPHJf5bwTmn +JaWgbKeY+IlF61/mgxzhC7cCAwEAAaNyMHAwDgYDVR0PAQH/BAQDAgEGMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFLDtc6+HZN2lv60JSDAZq3+IHoq7MB8GA1Ud +IwQYMBaAFLDtc6+HZN2lv60JSDAZq3+IHoq7MA0GA1UdEQQGMASCAmZhMA0GCSqG +SIb3DQEBCwUAA4IBAQDVt6OddTV1MB0UvF5v4zL1bEB9bgXvWx35v/FdS+VGn/QP +cC2c4ZNukndyHhysUEPdqVg4+up1aXm4eKXzNmGMY/ottN2pEhVEWQyoIIA1tH0e +8Kv/bysYpHZKZuoGg5+mdlHS2p2Dh2bmYFyBLJ8vaeP83NpTs2cNHcmEvWh/D4UN +UmYDODRN4qh9xYruKJ8i89iMGQfbdcq78dCC4JwBIx3bysC8oF4lqbTYoYNVTnAi +LVqvLdHycEOMlqV0ecq8uMLhPVBalCmIlKdWNQFpXB0TQCsn95rCCdi7ZTsYk5zv +Q4raFvQrZth3Cz/X5yPTtQL78oBYrmHzoQKDFJ2z +-----END CERTIFICATE-----`; + +// for parse-pki-cert tests: +// certificate contains all allowable params +export const loadedCert = `-----BEGIN CERTIFICATE-----\nMIIE7TCCA9WgAwIBAgIULcrWXSz3/kG81EgBo0A4Zt+ZgkYwDQYJKoZIhvcNAQEL\nBQAwga0xDzANBgNVBAYTBkZyYW5jZTESMBAGA1UECBMJQ2hhbXBhZ25lMQ4wDAYD\nVQQHEwVQYXJpczETMBEGA1UECRMKMjM0IHNlc2FtZTEPMA0GA1UEERMGMTIzNDU2\nMQ8wDQYDVQQKEwZXaWRnZXQxEDAOBgNVBAsTB0ZpbmFuY2UxGDAWBgNVBAMTD2Nv\nbW1vbi1uYW1lLmNvbTETMBEGA1UEBRMKY2VyZWFsMTI5MjAeFw0yMzAyMDMxNzI3\nMzNaFw0yMzAzMDcxNzI4MDNaMIGtMQ8wDQYDVQQGEwZGcmFuY2UxEjAQBgNVBAgT\nCUNoYW1wYWduZTEOMAwGA1UEBxMFUGFyaXMxEzARBgNVBAkTCjIzNCBzZXNhbWUx\nDzANBgNVBBETBjEyMzQ1NjEPMA0GA1UEChMGV2lkZ2V0MRAwDgYDVQQLEwdGaW5h\nbmNlMRgwFgYDVQQDEw9jb21tb24tbmFtZS5jb20xEzARBgNVBAUTCmNlcmVhbDEy\nOTIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC8NO7LXHp28SzOqmQv\nns4fGogKydEklWG4JEN3pM+k9nTyEgA8DFhtSLvcqF0cqhEydw4FVU+LEUGySUer\nmM4VNl9qglFBgmYE8TNgWkUw9ZP6MNgx13I8zXTXOIDj0iwXks02x8451oPbqqdq\nXsCc4vSP7BPwQOjc0C56c54zyRC1zFm9jlh+As0QinuYcjFjVabCku6JSYc4kunh\nz7derU9cURUxB5/ja9zC7jGS8tg4XUWdUkbj1O/krEWfjQx9Kj8aEU1gFfAvW/Bd\nIqgAlHATYN6i8HDmAmdGty9zLht9wUgnAtVh3lK3939h/rI0qCLV6N/RjCC7csnz\n9I67AgMBAAGjggEBMIH+MA4GA1UdDwEB/wQEAwIBBjASBgNVHRMBAf8ECDAGAQH/\nAgERMB0GA1UdDgQWBBSSdKle0wMGy0jvPmcoDanGduhLqzAfBgNVHSMEGDAWgBSS\ndKle0wMGy0jvPmcoDanGduhLqzAuBgNVHR4BAf8EJDAioCAwDoIMZG5zbmFtZTEu\nY29tMA6CDGRzbm5hbWUyLmNvbTBoBgNVHREEYTBfoB0GCCsBBAEFCQIGoBEMD3Nv\nbWUtdXRmLXN0cmluZ4IIYWx0bmFtZTGCCGFsdG5hbWUyhwTAngEmhxASNA/SViEA\nAQCJAAAAAEUAhgh0ZXN0dXJpMYYIdGVzdHVyaTIwDQYJKoZIhvcNAQELBQADggEB\nAAQukDwIg01QLQK2MQqjePNZlJleKLMK9LiabyGqc7u4bgmX3gYIrH7uopvO5cIv\nvqxcVBATQ6ez29t5MagzDu1+vnwE8fQhRoe0sp5TRLiGSlBJf53+0Wb3vbaOT0Fx\n/FFK0f2wHqYv3h/CTxu8YxDY4DwCRTPJ2KfTvT85BXtTUlzKIp1ytALSKcz0Owoe\neQPtQUdi8UHef8uHuWbk7DftMXojXbCqtHQdS3Rrl9zyc+Ds67flb5hKEseQZRgw\ntPtAIxhjSfZPTjl/3aasCBikESdeS8IOxIXL1bGun0xWnIBBc9uRe8hpdPjZj7Eh\nIt7ucIzFep0DLWCeQrAHeqo=\n-----END CERTIFICATE-----`; +// use_pss = true +export const pssTrueCert = `-----BEGIN CERTIFICATE-----\nMIIDqTCCAl2gAwIBAgIUVY2PTRZl1t/fjfyEwrG4HvGjYekwQQYJKoZIhvcNAQEK\nMDSgDzANBglghkgBZQMEAgEFAKEcMBoGCSqGSIb3DQEBCDANBglghkgBZQMEAgEF\nAKIDAgEgMBoxGDAWBgNVBAMTD2NvbW1vbi1uYW1lLmNvbTAeFw0yMzAxMjEwMTA3\nNDBaFw0yMzAyMjIwMTA4MTBaMBoxGDAWBgNVBAMTD2NvbW1vbi1uYW1lLmNvbTCC\nASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANlG6DuZ4B6Tv8u8rI+pUvQv\n2E7dFCu+i1YAbEJSnuAQ9XFUG5Uf3uHB8AFrOKRBAaFdBV4hKvBpfvMj3jl93d0b\nHdHeIM+sancDwpexpLvSW4yDpbIhAnkYzbUYgZyEAJeIgq/4ufT77TCK8XIzDywD\nhXZtDJkc6w3mm6hiqEQXLKnDQTfKLK8Fbsq4OuQ4vO5VIJrVZ1gKemDs7W/9WIzp\n0iSjzcIfWnUy1Dpk+AF8HhXok8CbhHfOGgbQZ6DcXOIJeb4XarJ9sgLJNAuhdcHR\ngP0TkPiOewbBG9Ish1p3F+pkI3vjQk4cghmilAuEkMc2NCNNy6q1bwSELVQnMiMC\nAwEAAaN/MH0wDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O\nBBYEFAsrMoFu6tt1pybxx9ln6w5QK/2tMB8GA1UdIwQYMBaAFAsrMoFu6tt1pybx\nx9ln6w5QK/2tMBoGA1UdEQQTMBGCD2NvbW1vbi1uYW1lLmNvbTBBBgkqhkiG9w0B\nAQowNKAPMA0GCWCGSAFlAwQCAQUAoRwwGgYJKoZIhvcNAQEIMA0GCWCGSAFlAwQC\nAQUAogMCASADggEBAFh+PMwEmxaZR6OtfB0Uvw2vA7Oodmm3W0bYjQlEz8U+Q+JZ\ncIPa4VnRy1QALmKbPCbRApA/gcWzIwtzo1JhLtcDINg2Tl0nj4WvgpIvj0/lQNMq\nmwP7G/K4PyJTv3+y5XwVfepZAZITB0w5Sg5dLC6HP8AGVIaeb3hGNHYvPlE+pbT+\njL0xxzFjOorWoy5fxbWoVyVv9iZ4j0zRnbkYHIi3d8g56VV6Rbyw4WJt6p87lmQ8\n0wbiJTtuew/0Rpuc3PEcR9XfB5ct8bvaGGTSTwh6JQ33ohKKAKjbBNmhBDSP1thQ\n2mTkms/mbDRaTiQKHZx25TmOlLN5Ea1TSS0K6yw=\n-----END CERTIFICATE-----`; +// only has common name +export const skeletonCert = `-----BEGIN CERTIFICATE-----\nMIIDQTCCAimgAwIBAgIUVQy58VgdVpAK9c8SfS31idSv6FUwDQYJKoZIhvcNAQEL\nBQAwGjEYMBYGA1UEAxMPY29tbW9uLW5hbWUuY29tMB4XDTIzMDEyMTAxMjAyOVoX\nDTIzMDIyMjAxMjA1OVowGjEYMBYGA1UEAxMPY29tbW9uLW5hbWUuY29tMIIBIjAN\nBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2UboO5ngHpO/y7ysj6lS9C/YTt0U\nK76LVgBsQlKe4BD1cVQblR/e4cHwAWs4pEEBoV0FXiEq8Gl+8yPeOX3d3Rsd0d4g\nz6xqdwPCl7Gku9JbjIOlsiECeRjNtRiBnIQAl4iCr/i59PvtMIrxcjMPLAOFdm0M\nmRzrDeabqGKoRBcsqcNBN8osrwVuyrg65Di87lUgmtVnWAp6YOztb/1YjOnSJKPN\nwh9adTLUOmT4AXweFeiTwJuEd84aBtBnoNxc4gl5vhdqsn2yAsk0C6F1wdGA/ROQ\n+I57BsEb0iyHWncX6mQje+NCThyCGaKUC4SQxzY0I03LqrVvBIQtVCcyIwIDAQAB\no38wfTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU\nCysygW7q23WnJvHH2WfrDlAr/a0wHwYDVR0jBBgwFoAUCysygW7q23WnJvHH2Wfr\nDlAr/a0wGgYDVR0RBBMwEYIPY29tbW9uLW5hbWUuY29tMA0GCSqGSIb3DQEBCwUA\nA4IBAQDPco+FIHXczf0HTwFAmIVu4HKaeIwDsVPxoUqqWEix8AyCsB5uqpKZasby\nedlrdBohM4dnoV+VmV0de04y95sdo3Ot60hm/czLog3tHg4o7AmfA7saS+5hCL1M\nCJWqoJHRFo0hOWJHpLJRWz5DqRZWspASoVozLOYyjRD+tNBjO5hK4FtaG6eri38t\nOpTt7sdInVODlntpNuuCVprPpHGj4kPOcViQULoFQq5fwyadpdjqSXmEGlt0to5Y\nMbTb4Jhj0HywgO53BUUmMzzY9idXh/8A7ThrM5LtqhxaYHLVhyeo+5e0mgiXKp+n\nQ8Uh4TNNTCvOUlAHycZNaxYTlEPn\n-----END CERTIFICATE-----`; +// contains unsupported subject and extension OIDs +export const unsupportedOids = `-----BEGIN CERTIFICATE-----\nMIIEjDCCA3SgAwIBAgIUD4EeORgh/i+ZZFOk8KsGKQPWsoIwDQYJKoZIhvcNAQEL\nBQAwgZIxMTAvBgNVBAMMKGZhbmN5LWNlcnQtdW5zdXBwb3J0ZWQtc3Viai1hbmQt\nZXh0LW9pZHMxCzAJBgNVBAYTAlVTMQ8wDQYDVQQIDAZLYW5zYXMxDzANBgNVBAcM\nBlRvcGVrYTESMBAGA1UECgwJQWNtZSwgSW5jMRowGAYJKoZIhvcNAQkBFgtmb29A\nYmFyLmNvbTAeFw0yMzAxMjMxODQ3MjNaFw0zMzAxMjAxODQ3MjNaMIGSMTEwLwYD\nVQQDDChmYW5jeS1jZXJ0LXVuc3VwcG9ydGVkLXN1YmotYW5kLWV4dC1vaWRzMQsw\nCQYDVQQGEwJVUzEPMA0GA1UECAwGS2Fuc2FzMQ8wDQYDVQQHDAZUb3Bla2ExEjAQ\nBgNVBAoMCUFjbWUsIEluYzEaMBgGCSqGSIb3DQEJARYLZm9vQGJhci5jb20wggEi\nMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDyYH5qS7krfZ2tA5uZsY2qXbTb\ntGNG1BsyDhZ/qqVlQybjDsHJZwNUbpfhBcCLaKyAwH1R9n54NOOOn6bYgfKWTgy3\nL7224YDAqYe7Y/GPjgI2MRvRfn6t2xzQxtJ0l0k8LeyNcwhiqYLQyOOfDdc127fm\nW40r2nmhLpH0i9e2I/YP1HQ+ldVgVBqeUTntgVSBfrQF56v9mAcvvHEa5sdHqmX4\nJ2lhWTnx9jqb7NZxCem76BlX1Gt5TpP3Ym2ZFVQI9fuPK4O8JVhk1KBCmIgR3Ft+\nPpFUs/c41EMunKJNzveYrInSDScaC6voIJpK23nMAiM1HckLfUUc/4UojD+VAgMB\nAAGjgdcwgdQwHQYDVR0OBBYEFH7tt4enejKTZtYjUKUUx6PXyzlgMB8GA1UdIwQY\nMBaAFH7tt4enejKTZtYjUKUUx6PXyzlgMA4GA1UdDwEB/wQEAwIFoDAgBgNVHSUB\nAf8EFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwEgYDVR0TAQH/BAgwBgEB/wIBCjBM\nBgNVHREERTBDhwTAngEmhgx1cmlTdXBwb3J0ZWSCEWRucy1OYW1lU3VwcG9ydGVk\noBoGAyoDBKATDBFleGFtcGxlIG90aGVybmFtZTANBgkqhkiG9w0BAQsFAAOCAQEA\nP6ckVJgbcJue+MK3RVDuG+Mh7dl89ynC7NwpQFRjLVZQuoMHZT/dcLlVeFejVXu5\nR+IPLmQU6NV7JAmy4zGap8awf12QTy3g410ecrSF94WWlu8bPoekfUnnP+kfzLPH\nCUAkRKxWDSRKX5C8cMMxacVBBaBIayuusLcHkHmxLLDw34PFzyz61gtZOJq7JYnD\nhU9YsNh6bCDmnBDBsDMOI7h8lBRQwTiWVoSD9YNVvFiY29YvFbJQGdh+pmBtf7E+\n1B/0t5NbvqlQSbhMM0QgYFhuCxr3BGNob7kRjgW4i+oh+Nc5ptA5q70QMaYudqRS\nd8SYWhRdxmH3qcHNPcR1iw==\n-----END CERTIFICATE-----`; +// unsupportedPem is same cert as above, formatted differently +export const unsupportedPem = ` +-----BEGIN CERTIFICATE----- +MIIEjDCCA3SgAwIBAgIUD4EeORgh/i+ZZFOk8KsGKQPWsoIwDQYJKoZIhvcNAQEL +BQAwgZIxMTAvBgNVBAMMKGZhbmN5LWNlcnQtdW5zdXBwb3J0ZWQtc3Viai1hbmQt +ZXh0LW9pZHMxCzAJBgNVBAYTAlVTMQ8wDQYDVQQIDAZLYW5zYXMxDzANBgNVBAcM +BlRvcGVrYTESMBAGA1UECgwJQWNtZSwgSW5jMRowGAYJKoZIhvcNAQkBFgtmb29A +YmFyLmNvbTAeFw0yMzAxMjMxODQ3MjNaFw0zMzAxMjAxODQ3MjNaMIGSMTEwLwYD +VQQDDChmYW5jeS1jZXJ0LXVuc3VwcG9ydGVkLXN1YmotYW5kLWV4dC1vaWRzMQsw +CQYDVQQGEwJVUzEPMA0GA1UECAwGS2Fuc2FzMQ8wDQYDVQQHDAZUb3Bla2ExEjAQ +BgNVBAoMCUFjbWUsIEluYzEaMBgGCSqGSIb3DQEJARYLZm9vQGJhci5jb20wggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDyYH5qS7krfZ2tA5uZsY2qXbTb +tGNG1BsyDhZ/qqVlQybjDsHJZwNUbpfhBcCLaKyAwH1R9n54NOOOn6bYgfKWTgy3 +L7224YDAqYe7Y/GPjgI2MRvRfn6t2xzQxtJ0l0k8LeyNcwhiqYLQyOOfDdc127fm +W40r2nmhLpH0i9e2I/YP1HQ+ldVgVBqeUTntgVSBfrQF56v9mAcvvHEa5sdHqmX4 +J2lhWTnx9jqb7NZxCem76BlX1Gt5TpP3Ym2ZFVQI9fuPK4O8JVhk1KBCmIgR3Ft+ +PpFUs/c41EMunKJNzveYrInSDScaC6voIJpK23nMAiM1HckLfUUc/4UojD+VAgMB +AAGjgdcwgdQwHQYDVR0OBBYEFH7tt4enejKTZtYjUKUUx6PXyzlgMB8GA1UdIwQY +MBaAFH7tt4enejKTZtYjUKUUx6PXyzlgMA4GA1UdDwEB/wQEAwIFoDAgBgNVHSUB +Af8EFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwEgYDVR0TAQH/BAgwBgEB/wIBCjBM +BgNVHREERTBDhwTAngEmhgx1cmlTdXBwb3J0ZWSCEWRucy1OYW1lU3VwcG9ydGVk +oBoGAyoDBKATDBFleGFtcGxlIG90aGVybmFtZTANBgkqhkiG9w0BAQsFAAOCAQEA +P6ckVJgbcJue+MK3RVDuG+Mh7dl89ynC7NwpQFRjLVZQuoMHZT/dcLlVeFejVXu5 +R+IPLmQU6NV7JAmy4zGap8awf12QTy3g410ecrSF94WWlu8bPoekfUnnP+kfzLPH +CUAkRKxWDSRKX5C8cMMxacVBBaBIayuusLcHkHmxLLDw34PFzyz61gtZOJq7JYnD +hU9YsNh6bCDmnBDBsDMOI7h8lBRQwTiWVoSD9YNVvFiY29YvFbJQGdh+pmBtf7E+ +1B/0t5NbvqlQSbhMM0QgYFhuCxr3BGNob7kRjgW4i+oh+Nc5ptA5q70QMaYudqRS +d8SYWhRdxmH3qcHNPcR1iw== +-----END CERTIFICATE-----`; +export const certWithoutCN = `-----BEGIN CERTIFICATE-----\nMIIDUDCCAjigAwIBAgIUEUpM5i7XMd/imZkR9XvonMaqPyYwDQYJKoZIhvcNAQEL\nBQAwHDEaMBgGCSqGSIb3DQEJARYLZm9vQGJhci5jb20wHhcNMjMwMTIzMjMyODEw\nWhcNMzMwMTIwMjMyODEwWjAcMRowGAYJKoZIhvcNAQkBFgtmb29AYmFyLmNvbTCC\nASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAPGSdeqLICZcoUzxk88F8Tp+\nVNI+mS74L8pHyb9ZNZfeXPo0E9L5pi+KKI7rkxAtBGUecG1ENSxDDK9p6XZhWHSU\nZ6bdjOsjcIlfiM+1hhtDclIVxIDnz2Jt1/Vmnm8DXwdwVATWiFLTnfm288deNwsT\npl0ehAR3BadkZvteC6t+giEw/4qm1/FP53GEBOQeUWJDZRvtL37rdx4joFv3cR4w\nV0dukOjc5AGXtIOorO145OSZj8s7RsW3pfGcFUcOg7/flDxfK1UqFflQa7veLvKa\nWE/fOMyB/711QjSkTuQ5Rw3Rf9Fr2pqVJQgElTIW1SKaX5EJTB9mtGB34UqUXtsC\nAwEAAaOBiTCBhjAdBgNVHQ4EFgQUyhFP/fm+798mErPD5VQvEaAZQrswHwYDVR0j\nBBgwFoAUyhFP/fm+798mErPD5VQvEaAZQrswDgYDVR0PAQH/BAQDAgWgMCAGA1Ud\nJQEB/wQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjASBgNVHRMBAf8ECDAGAQH/AgEK\nMA0GCSqGSIb3DQEBCwUAA4IBAQCishzVkhuSAtqxgsZdYzBs3GpakGIio5zReW27\n6dk96hYCbbe4K3DtcFbRD1B8t6aTJlHxkFRaOWErSXu9WP3fUhIDNRE64Qsrg1zk\n3Km430qBlorXmTp6xhYHQfY5bn5rT2YY7AmaYIlIFxRhod43i5GDbBP+e+d/vTqR\nv1AJflYofeR4LeATP64B6a4R+QQVoxI43+pyH3ka+nRHwJBR9h8SMtJoqBy7x9pl\nYlBDa8lSn05doA3+e03VIzitvBBWI4oX1XB0tShSLk6YJXayIwe0ZNVvfYLIRKCp\nb4DUwChYzG/FwFSssUAqzVFhu3i+uU3Z47bsLVm0R5m7hLiZ\n-----END CERTIFICATE-----`; + +// CROSS-SIGNING: +export const newCSR = { + common_name: 'Short-Lived Int R1', + csr: `-----BEGIN CERTIFICATE REQUEST-----\nMIICYjCCAUoCAQAwHTEbMBkGA1UEAxMSU2hvcnQtTGl2ZWQgSW50IFIxMIIBIjAN\nBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqsvFU7lzt06n1w6BL+Waf9zd+Z3G\n90Kv0HAksoLaWYinhkxNIUTU8ar9HLa2WV4EoJbNq91Hn+jFc2SYEXtRV+jm0kEh\nz4C4AoQ4D0s83JcYNssiNbVA04wa5ovD0iA/pzwVz8TnJSfAAuZ3vXFlyIbQ3ESo\nzt9hGjo/JOpoBh67E7xkuzw4lnC2rXGHdh9pk1Di+wqREnKU4nuhDLnTC/LL+Mkm\n07F1aMAW3Z/PWFmmsDJHMhZnaYo2LGCwU4A0U1ED0XpwflobVbkzZDmCXOPEI8UX\nG6VcL36zWnzEQnlZKN91MAa+s0E4z40KHKVSblSkjYD1K6n0y787ic2mDwIDAQAB\noAAwDQYJKoZIhvcNAQELBQADggEBAFQtiJaRfaQS3jHr7aFeszB/JmDRQiOoML3g\nhA3EcVd2rvDjiqikwD9EFdLTJyYJfb+9yiKDJqB7Fw2GPSrFxrd+jC9qZRI3VEWK\n8VdflLbruc1FcqJcE/0z2hWa11eud1bMLq8U6AfxNHL4r4ukrp2D5elrdsrDnhZj\nwMi3FtEFd4RZVaWZYVmWcQTeH7Zz/LYwkVDgBuvC+SOCaNNo/dCurkAAoxw8obBj\n1FS2F/3oHQxMui8vS8j6sMWMPZ5D3Q0xSC3HBUNoI2ZC77Mxn9yfj6ianUXKOOlf\nQMRaPBVajxZm9ovV64QKr+7HK7W7U/fNEqvoKBUDCqEuWmSsxMk=\n-----END CERTIFICATE REQUEST-----`, +}; + +export const oldParentIssuerCert = `-----BEGIN CERTIFICATE-----\nMIIDKzCCAhOgAwIBAgIUMCEF+bzBC4NQIWjE1sv/RbnYfUgwDQYJKoZIhvcNAQEL\nBQAwHTEbMBkGA1UEAxMSTG9uZy1MaXZlZCBSb290IFgxMB4XDTIzMDEyNTAwMjQz\nM1oXDTIzMDIyNjAwMjUwM1owHTEbMBkGA1UEAxMSTG9uZy1MaXZlZCBSb290IFgx\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0zrpJvjWcBV49Eor+zfh\nGW40xH6PcPSpzWGCCFiMPFwKrBSjuGRwwkLsXU7u2P15jIV/IU2kPS+WOW+EIe0x\ns5X2SoujZGOmM6du/6HIo9lz9yjb5G1SHdv/e65Q45QWb6wQcuO4axffvPzmAU9L\nQcunEF4g3rCz4cHYumi0osybbwR45z+8owNhykdbu7AwV0Cyz3C/lT1wxDxbFr0Y\n1NEjQ8AF4oRzqkmGoLp6ixDxp8zMpOlKWWYem1mx0RbqlwLP7khiS5YKi8+j8aog\nOhHA/W4i+ihrBzkv4GtOSdkhJz5qacifydUXtJ7SmvYs9Fi+hFgw61sw23ywbr3+\nywIDAQABo2MwYTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV\nHQ4EFgQUQsdYFMtsNMYNDIhZHMd77kcLLi8wHwYDVR0jBBgwFoAUQsdYFMtsNMYN\nDIhZHMd77kcLLi8wDQYJKoZIhvcNAQELBQADggEBAFNKTnNUzjZGHpXVK9Go8k/i\nVMNBktjGp58z+EN32TJnq/tOW1eVswUmq71S3R16Iho4XZDZVchuK+zhqSwlAmgM\no1vs6L5IJ0rVZcLZpysxFtawlbA362zBOX0F7tqStdEeBWaXw6J+MQ26xAPgHjXo\nc3fqqNWGbrOPt1uFoXWD+0Bg8M90a7OT0ijubh/PcuCe1yF9G2BqRQruB05gZiHl\n0NGbUka1ntD/lxYfLeSnp+FHJVDrcAHwPhKQS8HHr/ZBjKEGY8In+JIi/KBV/M8b\nGeW2k5odl6r2UIR6PWSei1WKKHe09WzO7rGJaN6uKLP14c0nSF3/q+AQY3m+tPY=\n-----END CERTIFICATE-----\n`; +export const parentIssuerCert = `-----BEGIN CERTIFICATE-----\nMIIDKzCCAhOgAwIBAgIUBxLeuD3K0hF5dGpaEgZqytTN3lswDQYJKoZIhvcNAQEL\nBQAwHTEbMBkGA1UEAxMSTG9uZy1MaXZlZCBSb290IFgyMB4XDTIzMDEyNTAwMjQz\nM1oXDTIzMDIyNjAwMjUwM1owHTEbMBkGA1UEAxMSTG9uZy1MaXZlZCBSb290IFgy\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuqkwN4m5dFLwFi0iYs4r\nTO4HWzloF4yCOaNfVksh1cOafVu9vjFwOWgHZFe6h8BOn6biKdFtvGTyIzlMHe5t\nyFmec9pfjX243bH9Ev4n2RTMKs818g9LdoZT6SI7DxHEu3yuHBg9TM87+GB+dA1V\nkRsK5hgtNCSMdgFSljM169sYbNilpk8M7O2hr+AmgRi0c1nUEPCe4JAr0Zv8iweJ\ntFRVHiQJXD9WIVxaWVxqWFsHoXseZS7H76RSdf4jNfENmBguHZMAPhtqlc/pMan8\nu0IJETWjWENn+WYC7DnnfQtNqyebU2LdT3oKO8tELqITygjT2tCS1Zavmsy69VY0\nYwIDAQABo2MwYTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV\nHQ4EFgQUxgchIBo+1F++IFW0F586I5QDFGYwHwYDVR0jBBgwFoAUxgchIBo+1F++\nIFW0F586I5QDFGYwDQYJKoZIhvcNAQELBQADggEBAI6DdnW8q/FqGqk/Y0k7iUrZ\nYkfMRlss6uBTzLev53eXqFIJ3+EFVfV+ohDEedlYYm2QCELzQcJSR7Q2I22PQj8X\nTO0yqk6LOCMv/4yiDhF4D+haiDU4joq5GX1dpFdlNSQ5fJmnLKu8HYbOhbwUo4ns\n4yGzIMulZR1Zqf/HGEOCYPDQ0ZHucmHn7uGhmV+kgYGoKVEZ8XxfmyNPKuwTAUHL\nfInPJZtbxXTVmiWWy3iraeI4XcUvaD0JtVnsVphYrqrSZ60DjgFsjiyenxePGHXf\nYXV9HIS6OXlvWhJKlSINOTv9fAa+e+JtK7frdvxJNHoTG34PiGXfOV2swTvLJQo=\n-----END CERTIFICATE-----\n`; +export const intIssuerCert = `-----BEGIN CERTIFICATE-----\nMIIDKzCCAhOgAwIBAgIUPt5VyO6gyA4hVaMkdpNyBlP+I64wDQYJKoZIhvcNAQEL\nBQAwHTEbMBkGA1UEAxMSTG9uZy1MaXZlZCBSb290IFgxMB4XDTIzMDEyNTAwMjQz\nM1oXDTIzMDIyNjAwMjUwM1owHTEbMBkGA1UEAxMSU2hvcnQtTGl2ZWQgSW50IFIx\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqsvFU7lzt06n1w6BL+Wa\nf9zd+Z3G90Kv0HAksoLaWYinhkxNIUTU8ar9HLa2WV4EoJbNq91Hn+jFc2SYEXtR\nV+jm0kEhz4C4AoQ4D0s83JcYNssiNbVA04wa5ovD0iA/pzwVz8TnJSfAAuZ3vXFl\nyIbQ3ESozt9hGjo/JOpoBh67E7xkuzw4lnC2rXGHdh9pk1Di+wqREnKU4nuhDLnT\nC/LL+Mkm07F1aMAW3Z/PWFmmsDJHMhZnaYo2LGCwU4A0U1ED0XpwflobVbkzZDmC\nXOPEI8UXG6VcL36zWnzEQnlZKN91MAa+s0E4z40KHKVSblSkjYD1K6n0y787ic2m\nDwIDAQABo2MwYTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV\nHQ4EFgQUkBK+oGpo5DNj2pCKoUE08WFOxQUwHwYDVR0jBBgwFoAUQsdYFMtsNMYN\nDIhZHMd77kcLLi8wDQYJKoZIhvcNAQELBQADggEBAIf4Bp/NYftiN8LmQrVzPWAe\nc4Bxm/NFFtkwQEvFhndMN68MUyXa5yxAdnYAHN+fRpYPxbjoZNXjW/jx3Kjft44r\ntyNGrrkjR80TI9FbL53nN7hLtZQdizsQD0Wype4Q1JOIxYw2Wd5Hr/PVPrJZ3PGg\nwNeI5IRu/cVbVT/vkRaHqYSwpa+V2cZTaEk6h62KPaKu3ui+omoeitU6qXHOysXQ\nrdGkJl/x831sIKmN0dMiGeoJdHGAr/E2f3ijKbVPsjIxZbm2SSumldOFYWn9cNYD\nI6sizFH976Wpde/GRIvBIzJnlK3xgfy0D9AUvwKyt75PVEnshc9tlhxoSVlKaUE=\n-----END CERTIFICATE-----\n`; +export const newlySignedCert = `-----BEGIN CERTIFICATE-----\nMIIDKzCCAhOgAwIBAgIUKapKK5Coau2sfIJgqA9jcC6BkWIwDQYJKoZIhvcNAQEL\nBQAwHTEbMBkGA1UEAxMSTG9uZy1MaXZlZCBSb290IFgyMB4XDTIzMDEyNTIyMjky\nNVoXDTIzMDIyNjIyMjk1NVowHTEbMBkGA1UEAxMSU2hvcnQtTGl2ZWQgSW50IFIx\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqsvFU7lzt06n1w6BL+Wa\nf9zd+Z3G90Kv0HAksoLaWYinhkxNIUTU8ar9HLa2WV4EoJbNq91Hn+jFc2SYEXtR\nV+jm0kEhz4C4AoQ4D0s83JcYNssiNbVA04wa5ovD0iA/pzwVz8TnJSfAAuZ3vXFl\nyIbQ3ESozt9hGjo/JOpoBh67E7xkuzw4lnC2rXGHdh9pk1Di+wqREnKU4nuhDLnT\nC/LL+Mkm07F1aMAW3Z/PWFmmsDJHMhZnaYo2LGCwU4A0U1ED0XpwflobVbkzZDmC\nXOPEI8UXG6VcL36zWnzEQnlZKN91MAa+s0E4z40KHKVSblSkjYD1K6n0y787ic2m\nDwIDAQABo2MwYTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV\nHQ4EFgQUkBK+oGpo5DNj2pCKoUE08WFOxQUwHwYDVR0jBBgwFoAUxgchIBo+1F++\nIFW0F586I5QDFGYwDQYJKoZIhvcNAQELBQADggEBAJaems1vgEjxgb3d1y9PYxzN\nLZbuf/+0+BCVa9k4bEsbuhXhEecFdIi2OKS6fabeoEOF97Gvqrgc+LEpNsU6lIRA\nkJ/nHe0CD2hf0aBQsGsOllYy/4QnrPlbowb4KizPknEMWdGcvfnlzzOJzo4/UuMk\nMZ9vn2GrINzfml/sLocOzP/MsPd8bBhXI2Emh2O9tJ4+zeHLhEzcM1gdNk8pp+wP\nEOks0EcN4UBkpEnDZcDTJVgp9XpWy19EEGqsxjBq6rlpIvPW8XHoH1jZSGY1KWBJ\nRGtDcGugwTxO9jYHz/a1qu4BVt5FFcb0L3IOvcr+3QCCeiJQHcVY8QRbO9M4AQk=\n-----END CERTIFICATE-----\n`; +// both certs generated with key type ed25519 +export const unsupportedSignatureRoot = `-----BEGIN CERTIFICATE-----\nMIIBXTCCAQ+gAwIBAgIUcp9CkzsU5Pkv2ZJO8Gp+tJrzuJYwBQYDK2VwMBIxEDAO\nBgNVBAMTB215LXJvb3QwHhcNMjMwNzE4MTYyNzQ3WhcNMjMwODE5MTYyODE3WjAS\nMRAwDgYDVQQDEwdteS1yb290MCowBQYDK2VwAyEAmZ+By07QvgAEX1HRjhltJlgK\nA8il2LYUpH0uw7f2lXCjdzB1MA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTAD\nAQH/MB0GA1UdDgQWBBTAcYaOaiKhDmYqSe6vg/lAtYspkDAfBgNVHSMEGDAWgBTA\ncYaOaiKhDmYqSe6vg/lAtYspkDASBgNVHREECzAJggdteS1yb290MAUGAytlcANB\nAG9xXZnKNEXRyfa91hm9S80PwlwIMh4MkWetwfPBn3M74cHzDK1okANmweca4RRq\nQHDPT7shx3CuosvL2Ori/ws=\n-----END CERTIFICATE-----`; +export const unsupportedSignatureInt = `-----BEGIN CERTIFICATE-----\nMIICfTCCAWWgAwIBAgIUei2XIhhsP1/ytDciEGfA1C7t/sMwDQYJKoZIhvcNAQEL\nBQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMjMwNzE4MTg1NDA3WhcNMjMw\nODE5MTg1NDM3WjASMRAwDgYDVQQDEwdpbnQtY3NyMCowBQYDK2VwAyEAa9vHnJA3\nnzA/fYiTUg8EhomjMtVp5O2c01nQRXEv72OjgcAwgb0wDgYDVR0PAQH/BAQDAgEG\nMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFGtjjUwrRGmFmYBHrUE38tSxvVM3\nMB8GA1UdIwQYMBaAFNng9+uArFyIUcD23XdvCSIfYiDPMEYGCCsGAQUFBwEBBDow\nODAZBggrBgEFBQcwAoYNaGFzaGljb3JwLmNvbTAbBggrBgEFBQcwAoYPdmF1bHRw\ncm9qZWN0LmlvMBIGA1UdEQQLMAmCB2ludC1jc3IwDQYJKoZIhvcNAQELBQADggEB\nAAOSNgZjesJG4BgLU8jQmOO7n6W8WcR+dT+ELDC1nLlEZ2BJCDSXXUX8AihIHKxn\nA9W4slABUacyJlAZo/o/wcxyfbA6PUXmHnoqEPZ3zXMwuLN/iRW7/uQvI6TIwnpH\nXETFARLmK8cfGgbhi24STkHTF4ljczkOab7sTUQTHELlo+F2gNtmgnyaBFCGUYor\nX1pkMBcBa9BWRsfhy8E+tBVVUrNNUddwzC/5nMLqT8XqENMndDoG7eeT9Ex6otZy\nzURkcq09FtcmyY2RBYkV4UzyHN7cESMIk/J33ZCNAfHaDGuOqTy5nYU5fTtjJcit\nwEcWiSesrKPCletBpuMpgiU=\n-----END CERTIFICATE-----\n`; diff --git a/ui/tests/helpers/pki/workflow.js b/ui/tests/helpers/pki/workflow.js new file mode 100644 index 0000000..679aec0 --- /dev/null +++ b/ui/tests/helpers/pki/workflow.js @@ -0,0 +1,81 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { SELECTORS as ROLEFORM } from './pki-role-form'; +import { SELECTORS as GENERATECERT } from './pki-role-generate'; +import { SELECTORS as KEYFORM } from './pki-key-form'; +import { SELECTORS as KEYPAGES } from './page/pki-keys'; +import { SELECTORS as ISSUERDETAILS } from './pki-issuer-details'; +import { SELECTORS as CONFIGURATION } from './pki-configure-create'; +import { SELECTORS as DELETE } from './pki-delete-all-issuers'; +import { SELECTORS as TIDY } from './page/pki-tidy-form'; +import { SELECTORS as CONFIGEDIT } from './page/pki-configuration-edit'; +import { SELECTORS as GENROOT } from './pki-generate-root'; + +export const SELECTORS = { + breadcrumbContainer: '[data-test-breadcrumbs]', + breadcrumbs: '[data-test-breadcrumbs] li', + overviewBreadcrumb: '[data-test-breadcrumbs] li:nth-of-type(2) > a', + pageTitle: '[data-test-pki-role-page-title]', + alertBanner: '[data-test-alert-banner="alert"]', + emptyState: '[data-test-component="empty-state"]', + emptyStateTitle: '[data-test-empty-state-title]', + emptyStateLink: '.empty-state-actions a', + emptyStateMessage: '[data-test-empty-state-message]', + // TABS + overviewTab: '[data-test-secret-list-tab="Overview"]', + rolesTab: '[data-test-secret-list-tab="Roles"]', + issuersTab: '[data-test-secret-list-tab="Issuers"]', + certsTab: '[data-test-secret-list-tab="Certificates"]', + keysTab: '[data-test-secret-list-tab="Keys"]', + tidyTab: '[data-test-secret-list-tab="Tidy"]', + configTab: '[data-test-secret-list-tab="Configuration"]', + // ROLES + deleteRoleButton: '[data-test-pki-role-delete]', + generateCertLink: '[data-test-pki-role-generate-cert]', + signCertLink: '[data-test-pki-role-sign-cert]', + editRoleLink: '[data-test-pki-role-edit-link]', + createRoleLink: '[data-test-pki-role-create-link]', + roleForm: { + ...ROLEFORM, + }, + generateCertForm: { + ...GENERATECERT, + }, + // KEYS + keyForm: { + ...KEYFORM, + }, + keyPages: { + ...KEYPAGES, + }, + // ISSUERS + issuerListItem: (id) => `[data-test-issuer-list="${id}"]`, + importIssuerLink: '[data-test-generate-issuer="import"]', + generateIssuerDropdown: '[data-test-issuer-generate-dropdown]', + generateIssuerRoot: '[data-test-generate-issuer="root"]', + generateIssuerIntermediate: '[data-test-generate-issuer="intermediate"]', + issuerPopupMenu: '[data-test-popup-menu-trigger]', + issuerPopupDetails: '[data-test-popup-menu-details] a', + issuerDetails: { + title: '[data-test-pki-issuer-page-title]', + ...ISSUERDETAILS, + }, + // CONFIGURATION + configuration: { + title: '[data-test-pki-configuration-page-title]', + emptyState: '[data-test-configuration-empty-state]', + pkiBetaBanner: '[data-test-pki-configuration-banner]', + pkiBetaBannerLink: '[data-test-pki-configuration-banner] a', + ...CONFIGURATION, + ...DELETE, + ...TIDY, + ...GENROOT, + }, + // EDIT CONFIGURATION + configEdit: { + ...CONFIGEDIT, + }, +}; diff --git a/ui/tests/helpers/policy-generator/pki.js b/ui/tests/helpers/policy-generator/pki.js new file mode 100644 index 0000000..f1630b5 --- /dev/null +++ b/ui/tests/helpers/policy-generator/pki.js @@ -0,0 +1,57 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { singularize } from 'ember-inflector'; + +export const adminPolicy = (mountPath) => { + return ` + path "${mountPath}/*" { + capabilities = ["create", "read", "update", "delete", "list"] + }, + `; +}; + +// keys require singularized paths for GET +export const readerPolicy = (mountPath, resource) => { + return ` + path "${mountPath}/${resource}" { + capabilities = ["read", "list"] + }, + path "${mountPath}/${resource}/*" { + capabilities = ["read", "list"] + }, + path "${mountPath}/${singularize(resource)}" { + capabilities = ["read", "list"] + }, + path "${mountPath}/${singularize(resource)}/*" { + capabilities = ["read", "list"] + }, + `; +}; +export const updatePolicy = (mountPath, resource) => { + return ` + path "${mountPath}/${resource}" { + capabilities = ["read", "list"] + }, + path "${mountPath}/${resource}/*" { + capabilities = ["read", "update"] + }, + path "${mountPath}/${singularize(resource)}/*" { + capabilities = ["read", "update"] + }, + path "${mountPath}/issue/*" { + capabilities = ["update"] + }, + path "${mountPath}/generate/*" { + capabilities = ["update"] + }, + path "${mountPath}/import" { + capabilities = ["update"] + }, + path "${mountPath}/sign/*" { + capabilities = ["update"] + }, + `; +}; diff --git a/ui/tests/helpers/poll-cluster.js b/ui/tests/helpers/poll-cluster.js new file mode 100644 index 0000000..a4bb708 --- /dev/null +++ b/ui/tests/helpers/poll-cluster.js @@ -0,0 +1,12 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { settled } from '@ember/test-helpers'; + +export async function pollCluster(owner) { + const store = owner.lookup('service:store'); + await store.peekAll('cluster').firstObject.reload(); + await settled(); +} diff --git a/ui/tests/helpers/stubs.js b/ui/tests/helpers/stubs.js new file mode 100644 index 0000000..ece48ed --- /dev/null +++ b/ui/tests/helpers/stubs.js @@ -0,0 +1,59 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +export function capabilitiesStub(requestPath, capabilitiesArray) { + // sample of capabilitiesArray: ['read', 'update'] + return { + [requestPath]: capabilitiesArray, + capabilities: capabilitiesArray, + request_id: '40f7e44d-af5c-9b60-bd20-df72eb17e294', + lease_id: '', + renewable: false, + lease_duration: 0, + data: { + [requestPath]: capabilitiesArray, + capabilities: capabilitiesArray, + }, + wrap_info: null, + warnings: null, + auth: null, + }; +} + +export const noopStub = (response) => { + return function () { + return [response, { 'Content-Type': 'application/json' }, JSON.stringify({})]; + }; +}; + +/** + * allowAllCapabilitiesStub mocks the response from capabilities-self + * that allows the user to do any action (root user) + * Example usage assuming setupMirage(hooks) was called: + * this.server.post('/sys/capabilities-self', allowAllCapabilitiesStub(['read'])); + */ +export function allowAllCapabilitiesStub(capabilitiesList = ['root']) { + return function (_, { requestBody }) { + const { paths } = JSON.parse(requestBody); + const specificCapabilities = paths.reduce((obj, path) => { + return { + ...obj, + [path]: capabilitiesList, + }; + }, {}); + return { + ...specificCapabilities, + capabilities: capabilitiesList, + request_id: 'mirage-795dc9e1-0321-9ac6-71fc', + lease_id: '', + renewable: false, + lease_duration: 0, + data: { ...specificCapabilities, capabilities: capabilitiesList }, + wrap_info: null, + warnings: null, + auth: null, + }; + }; +} diff --git a/ui/tests/helpers/wait-for-error.js b/ui/tests/helpers/wait-for-error.js new file mode 100644 index 0000000..5ebda46 --- /dev/null +++ b/ui/tests/helpers/wait-for-error.js @@ -0,0 +1,20 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { waitUntil } from '@ember/test-helpers'; +import Ember from 'ember'; + +export default function waitForError(opts) { + const orig = Ember.onerror; + + let error = null; + Ember.onerror = (err) => { + error = err; + }; + + return waitUntil(() => error, opts).finally(() => { + Ember.onerror = orig; + }); +} diff --git a/ui/tests/index.html b/ui/tests/index.html new file mode 100644 index 0000000..37974a1 --- /dev/null +++ b/ui/tests/index.html @@ -0,0 +1,41 @@ + + + + + + + Vault Tests + + + + {{content-for "head"}} {{content-for "test-head"}} + + + + + + {{content-for "head-footer"}} {{content-for "test-head-footer"}} + + + {{content-for "body"}} {{content-for "test-body"}} + +
+
+
+
+ +
+
+ + + + + + + + {{content-for "body-footer"}} {{content-for "test-body-footer"}} + + diff --git a/ui/tests/integration/.gitkeep b/ui/tests/integration/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/ui/tests/integration/components/alert-inline-test.js b/ui/tests/integration/components/alert-inline-test.js new file mode 100644 index 0000000..c0aad24 --- /dev/null +++ b/ui/tests/integration/components/alert-inline-test.js @@ -0,0 +1,89 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { render, settled, find, waitUntil } from '@ember/test-helpers'; +import hbs from 'htmlbars-inline-precompile'; + +module('Integration | Component | alert-inline', function (hooks) { + setupRenderingTest(hooks); + + hooks.beforeEach(function () { + this.set('message', 'some very important alert'); + this.set('type', 'warning'); + }); + + test('it renders alert message with correct class args', async function (assert) { + await render(hbs` + + `); + assert.dom('[data-test-inline-error-message]').hasText('some very important alert'); + assert + .dom('[data-test-inline-alert]') + .hasAttribute('class', 'message-inline padding-top is-marginless size-small'); + }); + + test('it yields to block text', async function (assert) { + await render(hbs` + + A much more important alert + + `); + assert.dom('[data-test-inline-error-message]').hasText('A much more important alert'); + }); + + test('it renders correctly for type=danger', async function (assert) { + this.set('type', 'danger'); + await render(hbs` + + `); + assert + .dom('[data-test-inline-error-message]') + .hasAttribute('class', 'has-text-danger', 'has danger text'); + assert.dom('[data-test-icon="x-square-fill"]').exists('danger icon exists'); + }); + + test('it renders correctly for type=warning', async function (assert) { + await render(hbs` + + `); + assert.dom('[data-test-inline-error-message]').doesNotHaveAttribute('class', 'does not have styled text'); + assert.dom('[data-test-icon="alert-triangle-fill"]').exists('warning icon exists'); + }); + + test('it mimics loading when message changes', async function (assert) { + await render(hbs` + + `); + assert + .dom('[data-test-inline-error-message]') + .hasText('some very important alert', 'it renders original message'); + + this.set('message', 'some changed alert!!!'); + await waitUntil(() => find('[data-test-icon="loading"]')); + assert.ok(find('[data-test-icon="loading"]'), 'it shows loading icon when message changes'); + await settled(); + assert + .dom('[data-test-inline-error-message]') + .hasText('some changed alert!!!', 'it shows updated message'); + }); +}); diff --git a/ui/tests/integration/components/alert-popup-test.js b/ui/tests/integration/components/alert-popup-test.js new file mode 100644 index 0000000..25f476e --- /dev/null +++ b/ui/tests/integration/components/alert-popup-test.js @@ -0,0 +1,58 @@ +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'vault/tests/helpers'; +import { render } from '@ember/test-helpers'; +import { hbs } from 'ember-cli-htmlbars'; +import { click } from '@ember/test-helpers'; + +module('Integration | Component | alert-popup', function (hooks) { + setupRenderingTest(hooks); + + hooks.beforeEach(function () { + this.set('message', 'some very important alert'); + this.set('type', 'warning'); + this.set('close', () => this.set('closed', true)); + }); + + test('it renders the alert popup input', async function (assert) { + await render(hbs` + + `); + + assert.dom(this.element).hasText('Warning some very important alert'); + }); + + test('it invokes the close action', async function (assert) { + assert.expect(1); + + await render(hbs` + + `); + await click('.close-button'); + + assert.true(this.closed); + }); + + test('it renders the alert popup with different colors based on types', async function (assert) { + await render(hbs` + + `); + + assert.dom('.message').hasClass('is-highlight'); + + this.set('type', 'info'); + + await render(hbs` + + `); + + assert.dom('.message').hasClass('is-info'); + + this.set('type', 'danger'); + + await render(hbs` + + `); + + assert.dom('.message').hasClass('is-danger'); + }); +}); diff --git a/ui/tests/integration/components/auth-config-form/options-test.js b/ui/tests/integration/components/auth-config-form/options-test.js new file mode 100644 index 0000000..24ddaf6 --- /dev/null +++ b/ui/tests/integration/components/auth-config-form/options-test.js @@ -0,0 +1,58 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { resolve } from 'rsvp'; +import EmberObject from '@ember/object'; +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { render, settled } from '@ember/test-helpers'; +import hbs from 'htmlbars-inline-precompile'; +import sinon from 'sinon'; +import { create } from 'ember-cli-page-object'; +import authConfigForm from 'vault/tests/pages/components/auth-config-form/options'; + +const component = create(authConfigForm); + +module('Integration | Component | auth-config-form options', function (hooks) { + setupRenderingTest(hooks); + + hooks.beforeEach(function () { + this.owner.lookup('service:flash-messages').registerTypes(['success']); + this.router = this.owner.lookup('service:router'); + this.router.reopen({ + transitionTo() { + return { + followRedirects() { + return resolve(); + }, + }; + }, + replaceWith() { + return resolve(); + }, + }); + }); + + test('it submits data correctly', async function (assert) { + assert.expect(1); + const model = EmberObject.create({ + tune() { + return resolve(); + }, + config: { + serialize() { + return {}; + }, + }, + }); + sinon.spy(model.config, 'serialize'); + this.set('model', model); + await render(hbs`{{auth-config-form/options model=this.model}}`); + component.save(); + return settled().then(() => { + assert.ok(model.config.serialize.calledOnce); + }); + }); +}); diff --git a/ui/tests/integration/components/auth-form-test.js b/ui/tests/integration/components/auth-form-test.js new file mode 100644 index 0000000..b8cd73d --- /dev/null +++ b/ui/tests/integration/components/auth-form-test.js @@ -0,0 +1,354 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { later, _cancelTimers as cancelTimers } from '@ember/runloop'; +import EmberObject from '@ember/object'; +import { resolve } from 'rsvp'; +import Service from '@ember/service'; +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { render, settled } from '@ember/test-helpers'; +import hbs from 'htmlbars-inline-precompile'; +import sinon from 'sinon'; +import Pretender from 'pretender'; +import { create } from 'ember-cli-page-object'; +import authForm from '../../pages/components/auth-form'; +import { validate } from 'uuid'; + +const component = create(authForm); + +const workingAuthService = Service.extend({ + authenticate() { + return resolve({}); + }, + handleError() {}, + setLastFetch() {}, +}); + +const routerService = Service.extend({ + transitionTo() { + return { + followRedirects() { + return resolve(); + }, + }; + }, +}); + +module('Integration | Component | auth form', function (hooks) { + setupRenderingTest(hooks); + + hooks.beforeEach(function () { + this.owner.register('service:router', routerService); + this.router = this.owner.lookup('service:router'); + }); + + const CSP_ERR_TEXT = `Error This is a standby Vault node but can't communicate with the active node via request forwarding. Sign in at the active node to use the Vault UI.`; + test('it renders error on CSP violation', async function (assert) { + assert.expect(2); + this.set('cluster', EmberObject.create({ standby: true })); + this.set('selectedAuth', 'token'); + await render(hbs`{{auth-form cluster=this.cluster selectedAuth=this.selectedAuth}}`); + assert.false(component.errorMessagePresent, false); + this.owner.lookup('service:csp-event').events.addObject({ violatedDirective: 'connect-src' }); + await settled(); + assert.strictEqual(component.errorText, CSP_ERR_TEXT); + }); + + test('it renders with vault style errors', async function (assert) { + assert.expect(1); + const server = new Pretender(function () { + this.get('/v1/auth/**', () => { + return [ + 400, + { 'Content-Type': 'application/json' }, + JSON.stringify({ + errors: ['Not allowed'], + }), + ]; + }); + this.get('/v1/sys/internal/ui/mounts', this.passthrough); + }); + + this.set('cluster', EmberObject.create({})); + this.set('selectedAuth', 'token'); + await render(hbs`{{auth-form cluster=this.cluster selectedAuth=this.selectedAuth}}`); + return component.login().then(() => { + assert.strictEqual(component.errorText, 'Error Authentication failed: Not allowed'); + server.shutdown(); + }); + }); + + test('it renders AdapterError style errors', async function (assert) { + assert.expect(1); + const server = new Pretender(function () { + this.get('/v1/auth/**', () => { + return [400, { 'Content-Type': 'application/json' }]; + }); + this.get('/v1/sys/internal/ui/mounts', this.passthrough); + }); + + this.set('cluster', EmberObject.create({})); + this.set('selectedAuth', 'token'); + await render(hbs`{{auth-form cluster=this.cluster selectedAuth=this.selectedAuth}}`); + // returns null because test does not return details of failed network request. On the app it will return the details of the error instead of null. + return component.login().then(() => { + assert.strictEqual(component.errorText, 'Error Authentication failed: null'); + server.shutdown(); + }); + }); + + test('it renders no tabs when no methods are passed', async function (assert) { + const methods = { + 'approle/': { + type: 'approle', + }, + }; + const server = new Pretender(function () { + this.get('/v1/sys/internal/ui/mounts', () => { + return [200, { 'Content-Type': 'application/json' }, JSON.stringify({ data: { auth: methods } })]; + }); + }); + await render(hbs``); + + assert.strictEqual(component.tabs.length, 0, 'renders a tab for every backend'); + server.shutdown(); + }); + + test('it renders all the supported methods and Other tab when methods are present', async function (assert) { + const methods = { + 'foo/': { + type: 'userpass', + }, + 'approle/': { + type: 'approle', + }, + }; + const server = new Pretender(function () { + this.get('/v1/sys/internal/ui/mounts', () => { + return [200, { 'Content-Type': 'application/json' }, JSON.stringify({ data: { auth: methods } })]; + }); + }); + + this.set('cluster', EmberObject.create({})); + await render(hbs`{{auth-form cluster=this.cluster }}`); + + assert.strictEqual(component.tabs.length, 2, 'renders a tab for userpass and Other'); + assert.strictEqual(component.tabs.objectAt(0).name, 'foo', 'uses the path in the label'); + assert.strictEqual(component.tabs.objectAt(1).name, 'Other', 'second tab is the Other tab'); + server.shutdown(); + }); + + test('it renders the description', async function (assert) { + const methods = { + 'approle/': { + type: 'userpass', + description: 'app description', + }, + }; + const server = new Pretender(function () { + this.get('/v1/sys/internal/ui/mounts', () => { + return [200, { 'Content-Type': 'application/json' }, JSON.stringify({ data: { auth: methods } })]; + }); + }); + this.set('cluster', EmberObject.create({})); + await render(hbs`{{auth-form cluster=this.cluster }}`); + + assert.strictEqual( + component.descriptionText, + 'app description', + 'renders a description for auth methods' + ); + server.shutdown(); + }); + + test('it calls authenticate with the correct path', async function (assert) { + this.owner.unregister('service:auth'); + this.owner.register('service:auth', workingAuthService); + this.auth = this.owner.lookup('service:auth'); + const authSpy = sinon.spy(this.auth, 'authenticate'); + const methods = { + 'foo/': { + type: 'userpass', + }, + }; + const server = new Pretender(function () { + this.get('/v1/sys/internal/ui/mounts', () => { + return [200, { 'Content-Type': 'application/json' }, JSON.stringify({ data: { auth: methods } })]; + }); + }); + + this.set('cluster', EmberObject.create({})); + this.set('selectedAuth', 'foo/'); + await render(hbs`{{auth-form cluster=this.cluster selectedAuth=this.selectedAuth}}`); + await component.login(); + + await settled(); + assert.ok(authSpy.calledOnce, 'a call to authenticate was made'); + const { data } = authSpy.getCall(0).args[0]; + assert.strictEqual(data.path, 'foo', 'uses the id for the path'); + authSpy.restore(); + server.shutdown(); + }); + + test('it renders no tabs when no supported methods are present in passed methods', async function (assert) { + const methods = { + 'approle/': { + type: 'approle', + }, + }; + const server = new Pretender(function () { + this.get('/v1/sys/internal/ui/mounts', () => { + return [200, { 'Content-Type': 'application/json' }, JSON.stringify({ data: { auth: methods } })]; + }); + }); + this.set('cluster', EmberObject.create({})); + await render(hbs``); + + server.shutdown(); + assert.strictEqual(component.tabs.length, 0, 'renders a tab for every backend'); + }); + + test('it makes a request to unwrap if passed a wrappedToken and logs in', async function (assert) { + this.owner.register('service:auth', workingAuthService); + this.auth = this.owner.lookup('service:auth'); + const authSpy = sinon.spy(this.auth, 'authenticate'); + const server = new Pretender(function () { + this.post('/v1/sys/wrapping/unwrap', () => { + return [ + 200, + { 'content-type': 'application/json' }, + JSON.stringify({ + auth: { + client_token: '12345', + }, + }), + ]; + }); + }); + + const wrappedToken = '54321'; + this.set('wrappedToken', wrappedToken); + this.set('cluster', EmberObject.create({})); + await render(hbs``); + later(() => cancelTimers(), 50); + await settled(); + assert.strictEqual( + server.handledRequests[0].url, + '/v1/sys/wrapping/unwrap', + 'makes call to unwrap the token' + ); + assert.strictEqual( + server.handledRequests[0].requestHeaders['X-Vault-Token'], + wrappedToken, + 'uses passed wrapped token for the unwrap' + ); + assert.ok(authSpy.calledOnce, 'a call to authenticate was made'); + server.shutdown(); + authSpy.restore(); + }); + + test('it shows an error if unwrap errors', async function (assert) { + const server = new Pretender(function () { + this.post('/v1/sys/wrapping/unwrap', () => { + return [ + 400, + { 'Content-Type': 'application/json' }, + JSON.stringify({ + errors: ['There was an error unwrapping!'], + }), + ]; + }); + }); + + this.set('wrappedToken', '54321'); + await render(hbs`{{auth-form cluster=this.cluster wrappedToken=this.wrappedToken}}`); + later(() => cancelTimers(), 50); + + await settled(); + assert.strictEqual( + component.errorText, + 'Error Token unwrap failed: There was an error unwrapping!', + 'shows the error' + ); + server.shutdown(); + }); + + test('it should retain oidc role when mount path is changed', async function (assert) { + assert.expect(1); + + const auth_url = 'http://dev-foo-bar.com'; + const server = new Pretender(function () { + this.post('/v1/auth/:path/oidc/auth_url', (req) => { + const { role, redirect_uri } = JSON.parse(req.requestBody); + const goodRequest = + req.params.path === 'foo-oidc' && + role === 'foo' && + redirect_uri.includes('/auth/foo-oidc/oidc/callback'); + + return [ + goodRequest ? 200 : 400, + { 'Content-Type': 'application/json' }, + JSON.stringify( + goodRequest ? { data: { auth_url } } : { errors: [`role "${role}" could not be found`] } + ), + ]; + }); + this.get('/v1/sys/internal/ui/mounts', this.passthrough); + }); + + window.open = (url) => { + assert.strictEqual(url, auth_url, 'auth_url is returned when required params are passed'); + }; + + this.owner.lookup('service:router').reopen({ + urlFor(route, { auth_path }) { + return `/auth/${auth_path}/oidc/callback`; + }, + }); + + this.set('cluster', EmberObject.create({})); + await render(hbs``); + + await component.selectMethod('oidc'); + await component.oidcRole('foo'); + await component.oidcMoreOptions(); + await component.oidcMountPath('foo-oidc'); + await component.login(); + + server.shutdown(); + }); + + test('it should set nonce value as uuid for okta method type', async function (assert) { + assert.expect(1); + + const server = new Pretender(function () { + this.post('/v1/auth/okta/login/foo', (req) => { + const { nonce } = JSON.parse(req.requestBody); + assert.true(validate(nonce), 'Nonce value passed as uuid for okta login'); + return [ + 200, + { 'content-type': 'application/json' }, + JSON.stringify({ + auth: { + client_token: '12345', + }, + }), + ]; + }); + this.get('/v1/sys/internal/ui/mounts', this.passthrough); + }); + + this.set('cluster', EmberObject.create({})); + await render(hbs``); + + await component.selectMethod('okta'); + await component.username('foo'); + await component.password('bar'); + await component.login(); + + server.shutdown(); + }); +}); diff --git a/ui/tests/integration/components/auth-jwt-test.js b/ui/tests/integration/components/auth-jwt-test.js new file mode 100644 index 0000000..6c114d9 --- /dev/null +++ b/ui/tests/integration/components/auth-jwt-test.js @@ -0,0 +1,257 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { _cancelTimers as cancelTimers } from '@ember/runloop'; +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { render, settled, waitUntil } from '@ember/test-helpers'; +import hbs from 'htmlbars-inline-precompile'; +import sinon from 'sinon'; +import Pretender from 'pretender'; +import { resolve } from 'rsvp'; +import { create } from 'ember-cli-page-object'; +import form from '../../pages/components/auth-jwt'; +import { ERROR_WINDOW_CLOSED, ERROR_MISSING_PARAMS, ERROR_JWT_LOGIN } from 'vault/components/auth-jwt'; +import { fakeWindow, buildMessage } from '../../helpers/oidc-window-stub'; + +const component = create(form); +const windows = []; + +fakeWindow.reopen({ + init() { + this._super(...arguments); + windows.push(this); + }, + open() { + return fakeWindow.create(); + }, + close() { + windows.forEach((w) => w.trigger('close')); + }, +}); + +const OIDC_AUTH_RESPONSE = { + auth: { + client_token: 'token', + }, +}; + +const renderIt = async (context, path = 'jwt') => { + const handler = (data, e) => { + if (e && e.preventDefault) e.preventDefault(); + return resolve(); + }; + const fake = fakeWindow.create(); + context.set('window', fake); + context.set('handler', sinon.spy(handler)); + context.set('roleName', ''); + context.set('selectedAuthPath', path); + await render(hbs` + + `); +}; +module('Integration | Component | auth jwt', function (hooks) { + setupRenderingTest(hooks); + + hooks.beforeEach(function () { + this.openSpy = sinon.spy(fakeWindow.proto(), 'open'); + this.owner.lookup('service:router').reopen({ + urlFor() { + return 'http://example.com'; + }, + }); + this.server = new Pretender(function () { + this.get('/v1/auth/:path/oidc/callback', function () { + return [200, { 'Content-Type': 'application/json' }, JSON.stringify(OIDC_AUTH_RESPONSE)]; + }); + this.post('/v1/auth/:path/oidc/auth_url', (request) => { + const { role } = JSON.parse(request.requestBody); + if (['test', 'okta', 'bar'].includes(role)) { + const auth_url = role === 'test' ? 'http://example.com' : role === 'okta' ? 'http://okta.com' : ''; + return [ + 200, + { 'Content-Type': 'application/json' }, + JSON.stringify({ + data: { auth_url }, + }), + ]; + } + const errors = role === 'foo' ? ['role "foo" could not be found'] : [ERROR_JWT_LOGIN]; + return [400, { 'Content-Type': 'application/json' }, JSON.stringify({ errors })]; + }); + }); + }); + + hooks.afterEach(function () { + this.openSpy.restore(); + this.server.shutdown(); + }); + + test('it renders the yield', async function (assert) { + await render(hbs`Hello!`); + assert.strictEqual(component.yieldContent, 'Hello!', 'yields properly'); + }); + + test('jwt: it renders and makes auth_url requests', async function (assert) { + await renderIt(this); + await settled(); + assert.ok(component.jwtPresent, 'renders jwt field'); + assert.ok(component.rolePresent, 'renders jwt field'); + assert.strictEqual(this.server.handledRequests.length, 1, 'request to the default path is made'); + assert.strictEqual(this.server.handledRequests[0].url, '/v1/auth/jwt/oidc/auth_url'); + this.set('selectedAuthPath', 'foo'); + await settled(); + assert.strictEqual(this.server.handledRequests.length, 2, 'a second request was made'); + assert.strictEqual( + this.server.handledRequests[1].url, + '/v1/auth/foo/oidc/auth_url', + 'requests when path is set' + ); + }); + + test('jwt: it calls passed action on login', async function (assert) { + await renderIt(this); + await component.login(); + assert.ok(this.handler.calledOnce); + }); + + test('oidc: test role: it renders', async function (assert) { + await renderIt(this); + await settled(); + this.set('selectedAuthPath', 'foo'); + await component.role('test'); + await settled(); + assert.notOk(component.jwtPresent, 'does not show jwt input for OIDC type login'); + assert.strictEqual(component.loginButtonText, 'Sign in with OIDC Provider'); + + await component.role('okta'); + // 1 for initial render, 1 for each time role changed = 3 + assert.strictEqual(this.server.handledRequests.length, 4, 'fetches the auth_url when the path changes'); + assert.strictEqual( + component.loginButtonText, + 'Sign in with Okta', + 'recognizes auth methods with certain urls' + ); + }); + + test('oidc: it calls window.open popup window on login', async function (assert) { + await renderIt(this); + this.set('selectedAuthPath', 'foo'); + await component.role('test'); + component.login(); + await waitUntil(() => { + return this.openSpy.calledOnce; + }); + cancelTimers(); + const call = this.openSpy.getCall(0); + assert.deepEqual( + call.args, + ['http://example.com', 'vaultOIDCWindow', 'width=500,height=600,resizable,scrollbars=yes,top=0,left=0'], + 'called with expected args' + ); + }); + + test('oidc: it calls error handler when popup is closed', async function (assert) { + await renderIt(this); + this.set('selectedAuthPath', 'foo'); + await component.role('test'); + component.login(); + await waitUntil(() => { + return this.openSpy.calledOnce; + }); + this.window.close(); + await settled(); + assert.strictEqual(this.error, ERROR_WINDOW_CLOSED, 'calls onError with error string'); + }); + + test('oidc: shows error when message posted with state key, wrong params', async function (assert) { + await renderIt(this); + this.set('selectedAuthPath', 'foo'); + await component.role('test'); + component.login(); + await waitUntil(() => { + return this.openSpy.calledOnce; + }); + this.window.trigger( + 'message', + buildMessage({ data: { source: 'oidc-callback', state: 'state', foo: 'bar' } }) + ); + cancelTimers(); + assert.strictEqual(this.error, ERROR_MISSING_PARAMS, 'calls onError with params missing error'); + }); + + test('oidc: storage event fires with state key, correct params', async function (assert) { + await renderIt(this); + this.set('selectedAuthPath', 'foo'); + await component.role('test'); + component.login(); + await waitUntil(() => { + return this.openSpy.calledOnce; + }); + this.window.trigger('message', buildMessage()); + await settled(); + assert.ok(this.handler.withArgs(null, null, 'token').calledOnce, 'calls the onSubmit handler with token'); + }); + + test('oidc: fails silently when event origin does not match window origin', async function (assert) { + await renderIt(this); + this.set('selectedAuthPath', 'foo'); + await component.role('test'); + component.login(); + await waitUntil(() => { + return this.openSpy.calledOnce; + }); + this.window.trigger('message', buildMessage({ origin: 'http://hackerz.com' })); + cancelTimers(); + await settled(); + assert.notOk(this.handler.called, 'should not call the submit handler'); + }); + + test('oidc: fails silently when event is not trusted', async function (assert) { + await renderIt(this); + this.set('selectedAuthPath', 'foo'); + await component.role('test'); + component.login(); + await waitUntil(() => { + return this.openSpy.calledOnce; + }); + this.window.trigger('message', buildMessage({ isTrusted: false })); + cancelTimers(); + await settled(); + assert.notOk(this.handler.called, 'should not call the submit handler'); + }); + + test('oidc: it should trigger error callback when role is not found', async function (assert) { + await renderIt(this, 'oidc'); + await component.role('foo'); + await component.login(); + assert.strictEqual( + this.error, + 'Invalid role. Please try again.', + 'Error message is returned when role is not found' + ); + }); + + test('oidc: it should trigger error callback when role is returned without auth_url', async function (assert) { + await renderIt(this, 'oidc'); + await component.role('bar'); + await component.login(); + assert.strictEqual( + this.error, + 'Missing auth_url. Please check that allowed_redirect_uris for the role include this mount path.', + 'Error message is returned when role is returned without auth_url' + ); + }); +}); diff --git a/ui/tests/integration/components/autocomplete-input-test.js b/ui/tests/integration/components/autocomplete-input-test.js new file mode 100644 index 0000000..051dd17 --- /dev/null +++ b/ui/tests/integration/components/autocomplete-input-test.js @@ -0,0 +1,92 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { click, fillIn, triggerEvent, typeIn, render } from '@ember/test-helpers'; +import { hbs } from 'ember-cli-htmlbars'; + +module('Integration | Component | autocomplete-input', function (hooks) { + setupRenderingTest(hooks); + + test('it should render label', async function (assert) { + await render( + hbs` + ` + ); + + assert.dom('label').doesNotExist('Label is hidden when not provided'); + this.setProperties({ + label: 'Some label', + subText: 'Some description', + }); + assert.dom('label').hasText('Some label', 'Label renders'); + assert.dom('[data-test-label-subtext]').hasText('Some description', 'Sub text renders'); + }); + + test('it should function as standard input', async function (assert) { + assert.expect(3); + const changeValue = 'foo bar'; + this.value = 'test'; + this.placeholder = 'text goes here'; + this.onChange = (value) => assert.strictEqual(value, changeValue, 'Value sent in onChange callback'); + + await render( + hbs` + ` + ); + + assert.dom('input').hasAttribute('placeholder', this.placeholder, 'Input placeholder renders'); + assert.dom('input').hasValue(this.value, 'Initial input value renders'); + await fillIn('input', changeValue); + }); + + test('it should trigger dropdown', async function (assert) { + await render( + hbs` + ` + ); + + await typeIn('input', '$'); + await triggerEvent('input', 'input', { data: '$' }); // simulate InputEvent for data prop with character pressed + assert.dom('.autocomplete-input-option').doesNotExist('Trigger does not open dropdown with no options'); + + this.set('options', [ + { label: 'Foo', value: '$foo' }, + { label: 'Bar', value: 'bar' }, + ]); + await triggerEvent('input', 'input', { data: '$' }); + const options = this.element.querySelectorAll('.autocomplete-input-option'); + options.forEach((o, index) => { + assert.dom(o).hasText(this.options[index].label, 'Label renders for option'); + }); + + await click(options[0]); + assert.dom('input').isFocused('Focus is returned to input after selecting option'); + assert + .dom('input') + .hasValue('$foo', 'Value is updated correctly. Trigger character is not prepended to value.'); + + await typeIn('input', '-$'); + await triggerEvent('input', 'input', { data: '$' }); + await click('.autocomplete-input-option:last-child'); + assert + .dom('input') + .hasValue('$foo-$bar', 'Value is updated correctly. Trigger character is prepended to option.'); + assert.strictEqual(this.value, '$foo-$bar', 'Value prop is updated correctly onChange'); + }); +}); diff --git a/ui/tests/integration/components/b64-toggle-test.js b/ui/tests/integration/components/b64-toggle-test.js new file mode 100644 index 0000000..6a9a6f8 --- /dev/null +++ b/ui/tests/integration/components/b64-toggle-test.js @@ -0,0 +1,55 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { render, click, find } from '@ember/test-helpers'; +import hbs from 'htmlbars-inline-precompile'; + +module('Integration | Component | b64 toggle', function (hooks) { + setupRenderingTest(hooks); + + test('it renders', async function (assert) { + await render(hbs`{{b64-toggle}}`); + assert.dom('button').exists({ count: 1 }); + }); + + test('it toggles encoding on the passed string', async function (assert) { + this.set('value', 'value'); + await render(hbs`{{b64-toggle value=this.value}}`); + await click('button'); + assert.strictEqual(this.value, btoa('value'), 'encodes to base64'); + await click('button'); + assert.strictEqual(this.value, 'value', 'decodes from base64'); + }); + + test('it toggles encoding starting with base64', async function (assert) { + this.set('value', btoa('value')); + await render(hbs`{{b64-toggle value=this.value initialEncoding='base64'}}`); + assert.ok(find('button').textContent.includes('Decode'), 'renders as on when in b64 mode'); + await click('button'); + assert.strictEqual(this.value, 'value', 'decodes from base64'); + }); + + test('it detects changes to value after encoding', async function (assert) { + this.set('value', btoa('value')); + await render(hbs`{{b64-toggle value=this.value initialEncoding='base64'}}`); + assert.ok(find('button').textContent.includes('Decode'), 'renders as on when in b64 mode'); + this.set('value', btoa('value') + '='); + assert.ok(find('button').textContent.includes('Encode'), 'toggles off since value has changed'); + this.set('value', btoa('value')); + assert.ok( + find('button').textContent.includes('Decode'), + 'toggles on since value is equal to the original' + ); + }); + + test('it does not toggle when the value is empty', async function (assert) { + this.set('value', ''); + await render(hbs`{{b64-toggle value=this.value}}`); + await click('button'); + assert.ok(find('button').textContent.includes('Encode')); + }); +}); diff --git a/ui/tests/integration/components/box-radio-test.js b/ui/tests/integration/components/box-radio-test.js new file mode 100644 index 0000000..9cf26bb --- /dev/null +++ b/ui/tests/integration/components/box-radio-test.js @@ -0,0 +1,55 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import sinon from 'sinon'; +import { render, click } from '@ember/test-helpers'; +import hbs from 'htmlbars-inline-precompile'; + +module('Integration | Component | box-radio', function (hooks) { + setupRenderingTest(hooks); + + hooks.beforeEach(function () { + this.set('type', 'aws'); + this.set('displayName', 'An Option'); + this.set('mountType', ''); + this.set('disabled', false); + }); + + test('it renders', async function (assert) { + const spy = sinon.spy(); + this.set('onRadioChange', spy); + await render(hbs``); + + assert.dom(this.element).hasText('An Option', 'shows the display name of the option'); + assert.dom('.tooltip').doesNotExist('tooltip does not exist when disabled is false'); + await click('[data-test-mount-type="aws"]'); + assert.ok(spy.calledOnce, 'calls the radio change function when option clicked'); + }); + + test('it renders correctly when disabled', async function (assert) { + const spy = sinon.spy(); + this.set('onRadioChange', spy); + await render(hbs``); + + assert.dom(this.element).hasText('An Option', 'shows the display name of the option'); + assert.dom('.ember-basic-dropdown-trigger').exists('tooltip exists'); + await click('[data-test-mount-type="aws"]'); + assert.ok(spy.notCalled, 'does not call the radio change function when option is clicked'); + }); +}); diff --git a/ui/tests/integration/components/calendar-widget-test.js b/ui/tests/integration/components/calendar-widget-test.js new file mode 100644 index 0000000..9a9c2d6 --- /dev/null +++ b/ui/tests/integration/components/calendar-widget-test.js @@ -0,0 +1,257 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { render, click } from '@ember/test-helpers'; +import sinon from 'sinon'; +import hbs from 'htmlbars-inline-precompile'; +import calendarDropdown from 'vault/tests/pages/components/calendar-widget'; +import { ARRAY_OF_MONTHS } from 'core/utils/date-formatters'; +import { subMonths, subYears } from 'date-fns'; +import timestamp from 'core/utils/timestamp'; + +module('Integration | Component | calendar-widget', function (hooks) { + setupRenderingTest(hooks); + + hooks.before(function () { + sinon.stub(timestamp, 'now').callsFake(() => new Date('2018-04-03T14:15:30')); + }); + hooks.beforeEach(function () { + const CURRENT_DATE = timestamp.now(); + this.set('currentDate', CURRENT_DATE); + this.set('calendarStartDate', subMonths(CURRENT_DATE, 12)); + this.set('calendarEndDate', CURRENT_DATE); + this.set('startTimestamp', subMonths(CURRENT_DATE, 12).toISOString()); + this.set('endTimestamp', CURRENT_DATE.toISOString()); + this.set('handleClientActivityQuery', sinon.spy()); + }); + hooks.after(function () { + timestamp.now.restore(); + }); + + test('it renders and disables correct months when start date is 12 months ago', async function (assert) { + assert.expect(14); + await render(hbs` + + `); + assert + .dom('[data-test-calendar-widget-trigger]') + .hasText(`Apr 2017 - Apr 2018`, 'renders and formats start and end dates'); + await calendarDropdown.openCalendar(); + assert.ok(calendarDropdown.showsCalendar, 'renders the calendar component'); + // assert months in current year are disabled/enabled correctly + const enabledMonths = ['January', 'February', 'March', 'April']; + ARRAY_OF_MONTHS.forEach(function (month) { + if (enabledMonths.includes(month)) { + assert + .dom(`[data-test-calendar-month="${month}"]`) + .doesNotHaveClass('is-readOnly', `${month} is enabled`); + } else { + assert.dom(`[data-test-calendar-month="${month}"]`).hasClass('is-readOnly', `${month} is read only`); + } + }); + }); + + test('it renders and disables months before start timestamp', async function (assert) { + await render(hbs` + + `); + + await calendarDropdown.openCalendar(); + assert.dom('[data-test-next-year]').isDisabled('Future year is disabled'); + await calendarDropdown.clickPreviousYear(); + assert + .dom('[data-test-display-year]') + .hasText(`${subYears(this.currentDate, 1).getFullYear()}`, 'shows the previous year'); + assert.dom('[data-test-previous-year]').isDisabled('disables previous year'); + + // assert months in previous year are disabled/enabled correctly + const disabledMonths = ['January', 'February', 'March']; + ARRAY_OF_MONTHS.forEach(function (month) { + if (disabledMonths.includes(month)) { + assert.dom(`[data-test-calendar-month="${month}"]`).hasClass('is-readOnly', `${month} is read only`); + } else { + assert + .dom(`[data-test-calendar-month="${month}"]`) + .doesNotHaveClass('is-readOnly', `${month} is enabled`); + } + }); + }); + + test('it calls parent callback with correct arg when clicking "Current billing period"', async function (assert) { + await render(hbs` + + `); + await calendarDropdown.menuToggle(); + await calendarDropdown.clickCurrentBillingPeriod(); + assert.propEqual( + this.handleClientActivityQuery.args[0][0], + { dateType: 'reset' }, + 'it calls parent function with reset dateType' + ); + }); + + test('it calls parent callback with correct arg when clicking "Current month"', async function (assert) { + await render(hbs` + + `); + await calendarDropdown.menuToggle(); + await calendarDropdown.clickCurrentMonth(); + assert.propEqual( + this.handleClientActivityQuery.args[0][0], + { dateType: 'currentMonth' }, + 'it calls parent function with currentMoth dateType' + ); + }); + + test('it calls parent callback with correct arg when selecting a month', async function (assert) { + await render(hbs` + + `); + await calendarDropdown.openCalendar(); + await click(`[data-test-calendar-month="April"`); + assert.propEqual( + this.handleClientActivityQuery.lastCall.lastArg, + { + dateType: 'endDate', + monthIdx: 3, + monthName: 'April', + year: 2018, + }, + 'it calls parent function with end date (current) month/year' + ); + + await calendarDropdown.openCalendar(); + await calendarDropdown.clickPreviousYear(); + await click(`[data-test-calendar-month="March"]`); + assert.propEqual( + this.handleClientActivityQuery.lastCall.lastArg, + { + dateType: 'endDate', + monthIdx: 2, + monthName: 'March', + year: 2017, + }, + 'it calls parent function with selected start date month/year' + ); + }); + + test('it disables correct months when start date 6 months ago', async function (assert) { + this.set('calendarStartDate', subMonths(this.currentDate, 6)); // Nov 3, 2017 + this.set('startTimestamp', subMonths(this.currentDate, 6).toISOString()); + await render(hbs` + + `); + + await calendarDropdown.openCalendar(); + assert.dom('[data-test-next-year]').isDisabled('Future year is disabled'); + + // Check start year disables correct months + await calendarDropdown.clickPreviousYear(); + assert.dom('[data-test-previous-year]').isDisabled('previous year is disabled'); + const prevYearEnabled = ['October', 'November', 'December']; + ARRAY_OF_MONTHS.forEach(function (month) { + if (prevYearEnabled.includes(month)) { + assert + .dom(`[data-test-calendar-month="${month}"]`) + .doesNotHaveClass('is-readOnly', `${month} is enabled`); + } else { + assert.dom(`[data-test-calendar-month="${month}"]`).hasClass('is-readOnly', `${month} is read only`); + } + }); + + // Check end year disables correct months + await click('[data-test-next-year]'); + const currYearEnabled = ['January', 'February', 'March', 'April']; + ARRAY_OF_MONTHS.forEach(function (month) { + if (currYearEnabled.includes(month)) { + assert + .dom(`[data-test-calendar-month="${month}"]`) + .doesNotHaveClass('is-readOnly', `${month} is enabled`); + } else { + assert.dom(`[data-test-calendar-month="${month}"]`).hasClass('is-readOnly', `${month} is read only`); + } + }); + }); + + test('it disables correct months when start date 36 months ago', async function (assert) { + this.set('calendarStartDate', subMonths(this.currentDate, 36)); // April 3 2015 + this.set('startTimestamp', subMonths(this.currentDate, 36).toISOString()); + await render(hbs` + + `); + + await calendarDropdown.openCalendar(); + assert.dom('[data-test-next-year]').isDisabled('Future year is disabled'); + + for (const year of [2017, 2016, 2015]) { + await calendarDropdown.clickPreviousYear(); + assert.dom('[data-test-display-year]').hasText(year.toString()); + } + + assert.dom('[data-test-previous-year]').isDisabled('previous year is disabled'); + assert.dom('[data-test-next-year]').isEnabled('next year is enabled'); + + assert.dom('.calendar-widget .is-readOnly').exists('Some months disabled'); + + const disabledMonths = ['January', 'February', 'March']; + ARRAY_OF_MONTHS.forEach(function (month) { + if (disabledMonths.includes(month)) { + assert.dom(`[data-test-calendar-month="${month}"]`).hasClass('is-readOnly', `${month} is read only`); + } else { + assert + .dom(`[data-test-calendar-month="${month}"]`) + .doesNotHaveClass('is-readOnly', `${month} is enabled`); + } + }); + + await click('[data-test-next-year]'); + assert.dom('.calendar-widget .is-readOnly').doesNotExist('All months enabled for 2016'); + await click('[data-test-next-year]'); + assert.dom('.calendar-widget .is-readOnly').doesNotExist('All months enabled for 2017'); + await click('[data-test-next-year]'); + assert.dom('.calendar-widget .is-readOnly').exists('Some months disabled for 2018'); + + const enabledMonths = ['January', 'February', 'March', 'April']; + ARRAY_OF_MONTHS.forEach(function (month) { + if (enabledMonths.includes(month)) { + assert + .dom(`[data-test-calendar-month="${month}"]`) + .doesNotHaveClass('is-readOnly', `${month} is enabled`); + } else { + assert.dom(`[data-test-calendar-month="${month}"]`).hasClass('is-readOnly', `${month} is read only`); + } + }); + }); +}); diff --git a/ui/tests/integration/components/checkbox-grid-test.js b/ui/tests/integration/components/checkbox-grid-test.js new file mode 100644 index 0000000..d31b7eb --- /dev/null +++ b/ui/tests/integration/components/checkbox-grid-test.js @@ -0,0 +1,59 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { render, click } from '@ember/test-helpers'; +import { hbs } from 'ember-cli-htmlbars'; +import Sinon from 'sinon'; + +module('Integration | Component | checkbox-grid', function (hooks) { + setupRenderingTest(hooks); + + hooks.beforeEach(function () { + this.name = 'fooBar'; + this.label = 'Foo bar'; + this.fields = [ + { key: 'abc', label: 'All Bears Cry' }, + { key: 'def', label: 'Dark Eel Feelings' }, + ]; + + this.onChange = Sinon.spy(); + }); + + test('it renders with minimum inputs', async function (assert) { + const changeSpy = Sinon.spy(); + this.set('onChange', changeSpy); + await render( + hbs`` + ); + + assert.dom('[data-test-checkbox]').exists({ count: 2 }, 'One checkbox is rendered for each field'); + assert.dom('[data-test-checkbox]').isNotChecked('no fields are checked by default'); + await click('[data-test-checkbox="abc"]'); + assert.ok(changeSpy.calledOnceWithExactly('fooBar', ['abc'])); + }); + + test('it renders with values set', async function (assert) { + const changeSpy = Sinon.spy(); + this.set('onChange', changeSpy); + this.set('currentValue', ['abc']); + await render( + hbs`` + ); + + assert.dom('[data-test-checkbox]').exists({ count: 2 }, 'One checkbox is rendered for each field'); + assert.dom('[data-test-checkbox="abc"]').isChecked('abc field is checked on load'); + assert.dom('[data-test-checkbox="def"]').isNotChecked('def field is unchecked on load'); + await click('[data-test-checkbox="abc"]'); + assert.ok(changeSpy.calledOnceWithExactly('fooBar', []), 'Sends correct payload when unchecking'); + await click('[data-test-checkbox="def"]'); + await click('[data-test-checkbox="abc"]'); + assert.ok( + changeSpy.calledWithExactly('fooBar', ['def', 'abc']), + 'sends correct payload with multiple checked' + ); + }); +}); diff --git a/ui/tests/integration/components/chevron-test.js b/ui/tests/integration/components/chevron-test.js new file mode 100644 index 0000000..ec8f096 --- /dev/null +++ b/ui/tests/integration/components/chevron-test.js @@ -0,0 +1,37 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { render } from '@ember/test-helpers'; +import hbs from 'htmlbars-inline-precompile'; +import waitForError from 'vault/tests/helpers/wait-for-error'; + +module('Integration | Component | chevron', function (hooks) { + setupRenderingTest(hooks); + + test('it renders', async function (assert) { + // Set any properties with this.set('myProperty', 'value'); + // Handle any actions with this.set('myAction', function(val) { ... }); + + await render(hbs``); + assert.dom('.flight-icon').exists('renders'); + + await render(hbs``); + assert.dom('.flight-icon').hasClass('hs-icon-button-right', 'renders'); + + await render(hbs``); + assert.dom('.flight-icon').doesNotHaveClass('hs-icon-button-right', 'renders'); + + const promise = waitForError(); + render(hbs``); + const err = await promise; + + assert.ok( + err.message.includes('The direction property of Chevron'), + 'asserts about unsupported direction' + ); + }); +}); diff --git a/ui/tests/integration/components/clients/attribution-test.js b/ui/tests/integration/components/clients/attribution-test.js new file mode 100644 index 0000000..76efdca --- /dev/null +++ b/ui/tests/integration/components/clients/attribution-test.js @@ -0,0 +1,241 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import sinon from 'sinon'; +import { setupRenderingTest } from 'ember-qunit'; +import { render } from '@ember/test-helpers'; +import { hbs } from 'ember-cli-htmlbars'; +import { endOfMonth, formatRFC3339 } from 'date-fns'; +import { click } from '@ember/test-helpers'; +import subMonths from 'date-fns/subMonths'; +import timestamp from 'core/utils/timestamp'; + +module('Integration | Component | clients/attribution', function (hooks) { + setupRenderingTest(hooks); + + hooks.before(function () { + sinon.stub(timestamp, 'now').callsFake(() => new Date('2018-04-03T14:15:30')); + }); + hooks.beforeEach(function () { + const mockNow = timestamp.now(); + this.mockNow = mockNow; + this.set('startTimestamp', formatRFC3339(subMonths(mockNow, 6))); + this.set('timestamp', formatRFC3339(mockNow)); + this.set('selectedNamespace', null); + this.set('chartLegend', [ + { label: 'entity clients', key: 'entity_clients' }, + { label: 'non-entity clients', key: 'non_entity_clients' }, + ]); + this.set('totalUsageCounts', { clients: 15, entity_clients: 10, non_entity_clients: 5 }); + this.set('totalClientAttribution', [ + { label: 'second', clients: 10, entity_clients: 7, non_entity_clients: 3 }, + { label: 'first', clients: 5, entity_clients: 3, non_entity_clients: 2 }, + ]); + this.set('totalMountsData', { clients: 5, entity_clients: 3, non_entity_clients: 2 }); + this.set('namespaceMountsData', [ + { label: 'auth1/', clients: 3, entity_clients: 2, non_entity_clients: 1 }, + { label: 'auth2/', clients: 2, entity_clients: 1, non_entity_clients: 1 }, + ]); + }); + hooks.after(function () { + timestamp.now.restore(); + }); + + test('it renders empty state with no data', async function (assert) { + await render(hbs` + + + `); + + assert.dom('[data-test-component="empty-state"]').exists(); + assert.dom('[data-test-empty-state-title]').hasText('No data found'); + assert.dom('[data-test-attribution-description]').hasText('There is a problem gathering data'); + assert.dom('[data-test-attribution-export-button]').doesNotExist(); + assert.dom('[data-test-attribution-timestamp]').doesNotHaveTextContaining('Updated'); + }); + + test('it renders with data for namespaces', async function (assert) { + await render(hbs` + + + `); + + assert.dom('[data-test-component="empty-state"]').doesNotExist(); + assert.dom('[data-test-horizontal-bar-chart]').exists('chart displays'); + assert.dom('[data-test-attribution-export-button]').exists(); + assert + .dom('[data-test-attribution-description]') + .hasText( + 'This data shows the top ten namespaces by client count and can be used to understand where clients are originating. Namespaces are identified by path. To see all namespaces, export this data.' + ); + assert + .dom('[data-test-attribution-subtext]') + .hasText( + 'The total clients in the namespace for this date range. This number is useful for identifying overall usage volume.' + ); + assert.dom('[data-test-top-attribution]').includesText('namespace').includesText('second'); + assert.dom('[data-test-attribution-clients]').includesText('namespace').includesText('10'); + }); + + test('it renders two charts and correct text for single, historical month', async function (assert) { + this.start = formatRFC3339(subMonths(this.mockNow, 1)); + this.end = formatRFC3339(subMonths(endOfMonth(this.mockNow), 1)); + await render(hbs` + + + `); + assert + .dom('[data-test-attribution-description]') + .includesText( + 'This data shows the top ten namespaces by client count and can be used to understand where clients are originating. Namespaces are identified by path. To see all namespaces, export this data.', + 'renders correct auth attribution description' + ); + assert + .dom('[data-test-chart-container="total-clients"] .chart-description') + .includesText( + 'The total clients in the namespace for this month. This number is useful for identifying overall usage volume.', + 'renders total monthly namespace text' + ); + assert + .dom('[data-test-chart-container="new-clients"] .chart-description') + .includesText( + 'The new clients in the namespace for this month. This aids in understanding which namespaces create and use new clients.', + 'renders new monthly namespace text' + ); + this.set('selectedNamespace', 'second'); + + assert + .dom('[data-test-attribution-description]') + .includesText( + 'This data shows the top ten authentication methods by client count within this namespace, and can be used to understand where clients are originating. Authentication methods are organized by path.', + 'renders correct auth attribution description' + ); + assert + .dom('[data-test-chart-container="total-clients"] .chart-description') + .includesText( + 'The total clients used by the auth method for this month. This number is useful for identifying overall usage volume.', + 'renders total monthly auth method text' + ); + assert + .dom('[data-test-chart-container="new-clients"] .chart-description') + .includesText( + 'The new clients used by the auth method for this month. This aids in understanding which auth methods create and use new clients.', + 'renders new monthly auth method text' + ); + }); + + test('it renders single chart for current month', async function (assert) { + await render(hbs` + + + `); + assert + .dom('[data-test-chart-container="single-chart"]') + .exists('renders single chart with total clients'); + assert + .dom('[data-test-attribution-subtext]') + .hasTextContaining('this month', 'renders total monthly namespace text'); + }); + + test('it renders single chart and correct text for for date range', async function (assert) { + await render(hbs` + + + `); + + assert + .dom('[data-test-chart-container="single-chart"]') + .exists('renders single chart with total clients'); + assert + .dom('[data-test-attribution-subtext]') + .hasTextContaining('date range', 'renders total monthly namespace text'); + }); + + test('it renders with data for selected namespace auth methods for a date range', async function (assert) { + this.set('selectedNamespace', 'second'); + await render(hbs` + + + `); + + assert.dom('[data-test-component="empty-state"]').doesNotExist(); + assert.dom('[data-test-horizontal-bar-chart]').exists('chart displays'); + assert.dom('[data-test-attribution-export-button]').exists(); + assert + .dom('[data-test-attribution-description]') + .hasText( + 'This data shows the top ten authentication methods by client count within this namespace, and can be used to understand where clients are originating. Authentication methods are organized by path.' + ); + assert + .dom('[data-test-attribution-subtext]') + .hasText( + 'The total clients used by the auth method for this date range. This number is useful for identifying overall usage volume.' + ); + assert.dom('[data-test-top-attribution]').includesText('auth method').includesText('auth1/'); + assert.dom('[data-test-attribution-clients]').includesText('auth method').includesText('3'); + }); + + test('it renders modal', async function (assert) { + await render(hbs` + + + `); + await click('[data-test-attribution-export-button]'); + assert.dom('.modal.is-active .title').hasText('Export attribution data', 'modal appears to export csv'); + assert.dom('.modal.is-active').includesText('June 2022 - December 2022'); + }); +}); diff --git a/ui/tests/integration/components/clients/config-test.js b/ui/tests/integration/components/clients/config-test.js new file mode 100644 index 0000000..65e8ea3 --- /dev/null +++ b/ui/tests/integration/components/clients/config-test.js @@ -0,0 +1,172 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { render, find, click, fillIn } from '@ember/test-helpers'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import hbs from 'htmlbars-inline-precompile'; +import sinon from 'sinon'; + +module('Integration | Component | client count config', function (hooks) { + setupRenderingTest(hooks); + setupMirage(hooks); + + hooks.beforeEach(function () { + this.router = this.owner.lookup('service:router'); + this.transitionStub = sinon.stub(this.router, 'transitionTo'); + const store = this.owner.lookup('service:store'); + this.createModel = (enabled = 'enable', reporting_enabled = false, minimum_retention_months = 0) => { + store.pushPayload('clients/config', { + modelName: 'clients/config', + id: 'foo', + data: { + enabled, + reporting_enabled, + minimum_retention_months, + retention_months: 24, + }, + }); + this.model = store.peekRecord('clients/config', 'foo'); + }; + }); + + test('it shows the table with the correct rows by default', async function (assert) { + this.createModel(); + + await render(hbs``); + + assert.dom('[data-test-clients-config-table]').exists('Clients config table exists'); + const rows = document.querySelectorAll('.info-table-row'); + assert.strictEqual(rows.length, 2, 'renders 2 info table rows'); + assert.ok( + find('[data-test-row-value="Usage data collection"]').textContent.includes('On'), + 'Enabled value matches model' + ); + assert.ok( + find('[data-test-row-value="Retention period"]').textContent.includes('24'), + 'Retention period value matches model' + ); + }); + + test('it should function in edit mode when reporting is disabled', async function (assert) { + assert.expect(13); + + this.server.put('/sys/internal/counters/config', (schema, req) => { + const { enabled, retention_months } = JSON.parse(req.requestBody); + const expected = { enabled: 'enable', retention_months: 5 }; + assert.deepEqual(expected, { enabled, retention_months }, 'Correct data sent in PUT request'); + return {}; + }); + + this.createModel('disable'); + + await render(hbs` + + + `); + + assert.dom('[data-test-input="enabled"]').isNotChecked('Data collection checkbox is not checked'); + assert + .dom('label[for="enabled"]') + .hasText('Data collection is off', 'Correct label renders when data collection is off'); + assert.dom('[data-test-input="retentionMonths"]').hasValue('24', 'Retention months render'); + + await click('[data-test-input="enabled"]'); + await fillIn('[data-test-input="retentionMonths"]', -3); + await click('[data-test-clients-config-save]'); + assert + .dom('[data-test-inline-error-message]') + .hasText( + 'Retention period must be greater than or equal to 0.', + 'Validation error shows for incorrect retention period' + ); + + await fillIn('[data-test-input="retentionMonths"]', 5); + await click('[data-test-clients-config-save]'); + assert.dom('.modal.is-active').exists('Modal renders'); + assert + .dom('[data-test-modal-title] span') + .hasText('Turn usage tracking on?', 'Correct modal title renders'); + assert.dom('[data-test-clients-config-modal="on"]').exists('Correct modal description block renders'); + + await click('[data-test-clients-config-modal="continue"]'); + assert.ok( + this.transitionStub.calledWith('vault.cluster.clients.config'), + 'Route transitions correctly on save success' + ); + + await click('[data-test-input="enabled"]'); + await click('[data-test-clients-config-save]'); + assert.dom('.modal.is-active').exists('Modal renders'); + assert + .dom('[data-test-modal-title] span') + .hasText('Turn usage tracking off?', 'Correct modal title renders'); + assert.dom('[data-test-clients-config-modal="off"]').exists('Correct modal description block renders'); + + await click('[data-test-clients-config-modal="cancel"]'); + assert.dom('.modal.is-active').doesNotExist('Modal is hidden on cancel'); + }); + + test('it should function in edit mode when reporting is enabled', async function (assert) { + assert.expect(6); + + this.server.put('/sys/internal/counters/config', (schema, req) => { + const { enabled, retention_months } = JSON.parse(req.requestBody); + const expected = { enabled: 'enable', retention_months: 48 }; + assert.deepEqual(expected, { enabled, retention_months }, 'Correct data sent in PUT request'); + return {}; + }); + + this.createModel('enable', true, 24); + + await render(hbs` + + + `); + + assert.dom('[data-test-input="enabled"]').isChecked('Data collection input is checked'); + assert + .dom('[data-test-input="enabled"]') + .isDisabled('Data collection input disabled when reporting is enabled'); + assert + .dom('label[for="enabled"]') + .hasText('Data collection is on', 'Correct label renders when data collection is on'); + assert.dom('[data-test-input="retentionMonths"]').hasValue('24', 'Retention months render'); + + await fillIn('[data-test-input="retentionMonths"]', 5); + await click('[data-test-clients-config-save]'); + assert + .dom('[data-test-inline-error-message]') + .hasText( + 'Retention period must be greater than or equal to 24.', + 'Validation error shows for incorrect retention period' + ); + + await fillIn('[data-test-input="retentionMonths"]', 48); + await click('[data-test-clients-config-save]'); + }); + + test('it should not show modal when data collection is not changed', async function (assert) { + assert.expect(1); + + this.server.put('/sys/internal/counters/config', (schema, req) => { + const { enabled, retention_months } = JSON.parse(req.requestBody); + const expected = { enabled: 'enable', retention_months: 5 }; + assert.deepEqual(expected, { enabled, retention_months }, 'Correct data sent in PUT request'); + return {}; + }); + + this.createModel(); + + await render(hbs` + + + `); + + await fillIn('[data-test-input="retentionMonths"]', 5); + await click('[data-test-clients-config-save]'); + }); +}); diff --git a/ui/tests/integration/components/clients/horizontal-bar-chart-test.js b/ui/tests/integration/components/clients/horizontal-bar-chart-test.js new file mode 100644 index 0000000..31f1c4d --- /dev/null +++ b/ui/tests/integration/components/clients/horizontal-bar-chart-test.js @@ -0,0 +1,89 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { findAll, render, triggerEvent } from '@ember/test-helpers'; +import { hbs } from 'ember-cli-htmlbars'; + +module('Integration | Component | clients/horizontal-bar-chart', function (hooks) { + setupRenderingTest(hooks); + hooks.beforeEach(function () { + this.set('chartLegend', [ + { label: 'entity clients', key: 'entity_clients' }, + { label: 'non-entity clients', key: 'non_entity_clients' }, + ]); + }); + + test('it renders chart and tooltip', async function (assert) { + const totalObject = { clients: 5, entity_clients: 2, non_entity_clients: 3 }; + const dataArray = [ + { label: 'second', clients: 3, entity_clients: 1, non_entity_clients: 2 }, + { label: 'first', clients: 2, entity_clients: 1, non_entity_clients: 1 }, + ]; + this.set('totalCounts', totalObject); + this.set('totalClientAttribution', dataArray); + + await render(hbs` + `); + + assert.dom('[data-test-horizontal-bar-chart]').exists(); + const dataBars = findAll('[data-test-horizontal-bar-chart] rect.data-bar'); + const actionBars = findAll('[data-test-horizontal-bar-chart] rect.action-bar'); + + assert.strictEqual(actionBars.length, dataArray.length, 'renders correct number of hover bars'); + assert.strictEqual(dataBars.length, dataArray.length * 2, 'renders correct number of data bars'); + + const textLabels = this.element.querySelectorAll('[data-test-horizontal-bar-chart] .tick text'); + const textTotals = this.element.querySelectorAll('[data-test-horizontal-bar-chart] text.total-value'); + textLabels.forEach((label, index) => { + assert.dom(label).hasText(dataArray[index].label, 'label renders correct text'); + }); + textTotals.forEach((label, index) => { + assert.dom(label).hasText(`${dataArray[index].clients}`, 'total value renders correct number'); + }); + for (const [i, bar] of actionBars.entries()) { + const percent = Math.round((dataArray[i].clients / totalObject.clients) * 100); + await triggerEvent(bar, 'mouseover'); + const tooltip = document.querySelector('.ember-modal-dialog'); + assert.dom(tooltip).includesText(`${percent}%`, 'tooltip renders correct percentage'); + } + }); + + test('it renders data with a large range', async function (assert) { + const totalObject = { clients: 5929393, entity_clients: 1391997, non_entity_clients: 4537396 }; + const dataArray = [ + { label: 'second', clients: 5929093, entity_clients: 1391896, non_entity_clients: 4537100 }, + { label: 'first', clients: 300, entity_clients: 101, non_entity_clients: 296 }, + ]; + this.set('totalCounts', totalObject); + this.set('totalClientAttribution', dataArray); + + await render(hbs` + `); + + assert.dom('[data-test-horizontal-bar-chart]').exists(); + const dataBars = findAll('[data-test-horizontal-bar-chart] rect.data-bar'); + const actionBars = findAll('[data-test-horizontal-bar-chart] rect.action-bar'); + + assert.strictEqual(actionBars.length, dataArray.length, 'renders correct number of hover bars'); + assert.strictEqual(dataBars.length, dataArray.length * 2, 'renders correct number of data bars'); + + for (const [i, bar] of actionBars.entries()) { + const percent = Math.round((dataArray[i].clients / totalObject.clients) * 100); + await triggerEvent(bar, 'mouseover'); + const tooltip = document.querySelector('.ember-modal-dialog'); + assert.dom(tooltip).includesText(`${percent}%`, 'tooltip renders correct percentage'); + } + }); +}); diff --git a/ui/tests/integration/components/clients/line-chart-test.js b/ui/tests/integration/components/clients/line-chart-test.js new file mode 100644 index 0000000..723df5d --- /dev/null +++ b/ui/tests/integration/components/clients/line-chart-test.js @@ -0,0 +1,226 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import sinon from 'sinon'; +import { setupRenderingTest } from 'ember-qunit'; +import { find, render, findAll, triggerEvent } from '@ember/test-helpers'; +import { hbs } from 'ember-cli-htmlbars'; +import { format, formatRFC3339, subMonths } from 'date-fns'; +import { formatChartDate } from 'core/utils/date-formatters'; +import timestamp from 'core/utils/timestamp'; + +module('Integration | Component | clients/line-chart', function (hooks) { + setupRenderingTest(hooks); + hooks.before(function () { + sinon.stub(timestamp, 'now').callsFake(() => new Date('2018-04-03T14:15:30')); + }); + hooks.beforeEach(function () { + this.set('xKey', 'foo'); + this.set('yKey', 'bar'); + this.set('dataset', [ + { + foo: 1, + bar: 4, + }, + { + foo: 2, + bar: 8, + }, + { + foo: 3, + bar: 14, + }, + { + foo: 4, + bar: 10, + }, + ]); + }); + hooks.after(function () { + timestamp.now.restore(); + }); + + test('it renders', async function (assert) { + await render(hbs` +
+ +
+ `); + + assert.dom('[data-test-line-chart]').exists('Chart is rendered'); + assert + .dom('[data-test-line-chart="plot-point"]') + .exists({ count: this.dataset.length }, `renders ${this.dataset.length} plot points`); + + findAll('[data-test-line-chart="x-axis-labels"] text').forEach((e, i) => { + assert + .dom(e) + .hasText(`${this.dataset[i][this.xKey]}`, `renders x-axis label: ${this.dataset[i][this.xKey]}`); + }); + assert.dom(find('[data-test-line-chart="y-axis-labels"] text')).hasText('0', `y-axis starts at 0`); + }); + + test('it renders upgrade data', async function (assert) { + const now = timestamp.now(); + this.set('dataset', [ + { + foo: format(subMonths(now, 4), 'M/yy'), + bar: 4, + }, + { + foo: format(subMonths(now, 3), 'M/yy'), + bar: 8, + }, + { + foo: format(subMonths(now, 2), 'M/yy'), + bar: 14, + }, + { + foo: format(subMonths(now, 1), 'M/yy'), + bar: 10, + }, + ]); + this.set('upgradeData', [ + { + id: '1.10.1', + previousVersion: '1.9.2', + timestampInstalled: formatRFC3339(subMonths(now, 2)), + }, + ]); + await render(hbs` +
+ +
+ `); + assert.dom('[data-test-line-chart]').exists('Chart is rendered'); + assert + .dom('[data-test-line-chart="plot-point"]') + .exists({ count: this.dataset.length }, `renders ${this.dataset.length} plot points`); + assert + .dom(find(`[data-test-line-chart="upgrade-${this.dataset[2][this.xKey]}"]`)) + .hasStyle({ opacity: '1' }, `upgrade data point ${this.dataset[2][this.xKey]} has yellow highlight`); + }); + + test('it renders tooltip', async function (assert) { + const now = timestamp.now(); + const tooltipData = [ + { + month: format(subMonths(now, 4), 'M/yy'), + clients: 4, + new_clients: { + clients: 0, + }, + }, + { + month: format(subMonths(now, 3), 'M/yy'), + clients: 8, + new_clients: { + clients: 4, + }, + }, + { + month: format(subMonths(now, 2), 'M/yy'), + clients: 14, + new_clients: { + clients: 6, + }, + }, + { + month: format(subMonths(now, 1), 'M/yy'), + clients: 20, + new_clients: { + clients: 4, + }, + }, + ]; + this.set('dataset', tooltipData); + this.set('upgradeData', [ + { + id: '1.10.1', + previousVersion: '1.9.2', + timestampInstalled: formatRFC3339(subMonths(now, 2)), + }, + ]); + await render(hbs` +
+ +
+ `); + + const tooltipHoverCircles = findAll('[data-test-line-chart] circle.hover-circle'); + for (const [i, bar] of tooltipHoverCircles.entries()) { + await triggerEvent(bar, 'mouseover'); + const tooltip = document.querySelector('.ember-modal-dialog'); + const { month, clients, new_clients } = tooltipData[i]; + assert + .dom(tooltip) + .includesText( + `${formatChartDate(month)} ${clients} total clients ${new_clients.clients} new clients`, + `tooltip text is correct for ${month}` + ); + } + }); + + test('it fails gracefully when upgradeData is an object', async function (assert) { + this.set('upgradeData', { some: 'object' }); + await render(hbs` +
+ +
+ `); + + assert + .dom('[data-test-line-chart="plot-point"]') + .exists({ count: this.dataset.length }, 'chart still renders when upgradeData is not an array'); + }); + + test('it fails gracefully when upgradeData has incorrect key names', async function (assert) { + this.set('upgradeData', [{ incorrect: 'key names' }]); + await render(hbs` +
+ +
+ `); + + assert + .dom('[data-test-line-chart="plot-point"]') + .exists({ count: this.dataset.length }, 'chart still renders when upgradeData has incorrect keys'); + }); + + test('it renders empty state when no dataset', async function (assert) { + await render(hbs` +
+ +
+ `); + + assert.dom('[data-test-component="empty-state"]').exists('renders empty state when no data'); + assert + .dom('[data-test-empty-state-subtext]') + .hasText( + `this is a custom message to explain why you're not seeing a line chart`, + 'custom message renders' + ); + }); +}); diff --git a/ui/tests/integration/components/clients/monthly-usage-test.js b/ui/tests/integration/components/clients/monthly-usage-test.js new file mode 100644 index 0000000..8efc7bf --- /dev/null +++ b/ui/tests/integration/components/clients/monthly-usage-test.js @@ -0,0 +1,1487 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import sinon from 'sinon'; +import { setupRenderingTest } from 'ember-qunit'; +import { render } from '@ember/test-helpers'; +import { hbs } from 'ember-cli-htmlbars'; +import { formatRFC3339 } from 'date-fns'; +import { findAll } from '@ember/test-helpers'; +import { calculateAverage } from 'vault/utils/chart-helpers'; +import { formatNumber } from 'core/helpers/format-number'; +import timestamp from 'core/utils/timestamp'; + +module('Integration | Component | clients/monthly-usage', function (hooks) { + setupRenderingTest(hooks); + const DATASET = [ + { + month: '8/21', + timestamp: '2021-08-01T00:00:00Z', + counts: null, + namespaces: [], + new_clients: { + month: '8/21', + namespaces: [], + }, + namespaces_by_key: {}, + }, + { + month: '9/21', + clients: 19251, + entity_clients: 10713, + non_entity_clients: 8538, + namespaces: [ + { + label: 'root', + clients: 4852, + entity_clients: 3108, + non_entity_clients: 1744, + mounts: [ + { + label: 'path-3-with-over-18-characters', + clients: 1598, + entity_clients: 687, + non_entity_clients: 911, + }, + { + label: 'path-1', + clients: 1429, + entity_clients: 981, + non_entity_clients: 448, + }, + { + label: 'path-4-with-over-18-characters', + clients: 965, + entity_clients: 720, + non_entity_clients: 245, + }, + { + label: 'path-2', + clients: 860, + entity_clients: 720, + non_entity_clients: 140, + }, + ], + }, + { + label: 'test-ns-2/', + clients: 4702, + entity_clients: 3057, + non_entity_clients: 1645, + mounts: [ + { + label: 'path-3-with-over-18-characters', + clients: 1686, + entity_clients: 926, + non_entity_clients: 760, + }, + { + label: 'path-4-with-over-18-characters', + clients: 1525, + entity_clients: 789, + non_entity_clients: 736, + }, + { + label: 'path-2', + clients: 905, + entity_clients: 849, + non_entity_clients: 56, + }, + { + label: 'path-1', + clients: 586, + entity_clients: 493, + non_entity_clients: 93, + }, + ], + }, + { + label: 'test-ns-1/', + clients: 4569, + entity_clients: 1871, + non_entity_clients: 2698, + mounts: [ + { + label: 'path-4-with-over-18-characters', + clients: 1534, + entity_clients: 619, + non_entity_clients: 915, + }, + { + label: 'path-3-with-over-18-characters', + clients: 1528, + entity_clients: 589, + non_entity_clients: 939, + }, + { + label: 'path-1', + clients: 828, + entity_clients: 612, + non_entity_clients: 216, + }, + { + label: 'path-2', + clients: 679, + entity_clients: 51, + non_entity_clients: 628, + }, + ], + }, + { + label: 'test-ns-2-with-namespace-length-over-18-characters/', + clients: 3771, + entity_clients: 2029, + non_entity_clients: 1742, + mounts: [ + { + label: 'path-3-with-over-18-characters', + clients: 1249, + entity_clients: 793, + non_entity_clients: 456, + }, + { + label: 'path-1', + clients: 1046, + entity_clients: 444, + non_entity_clients: 602, + }, + { + label: 'path-2', + clients: 930, + entity_clients: 277, + non_entity_clients: 653, + }, + { + label: 'path-4-with-over-18-characters', + clients: 546, + entity_clients: 515, + non_entity_clients: 31, + }, + ], + }, + { + label: 'test-ns-1-with-namespace-length-over-18-characters/', + clients: 1357, + entity_clients: 648, + non_entity_clients: 709, + mounts: [ + { + label: 'path-1', + clients: 613, + entity_clients: 23, + non_entity_clients: 590, + }, + { + label: 'path-3-with-over-18-characters', + clients: 543, + entity_clients: 465, + non_entity_clients: 78, + }, + { + label: 'path-2', + clients: 146, + entity_clients: 141, + non_entity_clients: 5, + }, + { + label: 'path-4-with-over-18-characters', + clients: 55, + entity_clients: 19, + non_entity_clients: 36, + }, + ], + }, + ], + namespaces_by_key: { + root: { + month: '9/21', + clients: 4852, + entity_clients: 3108, + non_entity_clients: 1744, + new_clients: { + month: '9/21', + label: 'root', + clients: 2525, + entity_clients: 1315, + non_entity_clients: 1210, + }, + mounts_by_key: { + 'path-3-with-over-18-characters': { + month: '9/21', + label: 'path-3-with-over-18-characters', + clients: 1598, + entity_clients: 687, + non_entity_clients: 911, + new_clients: { + month: '9/21', + label: 'path-3-with-over-18-characters', + clients: 1055, + entity_clients: 257, + non_entity_clients: 798, + }, + }, + 'path-1': { + month: '9/21', + label: 'path-1', + clients: 1429, + entity_clients: 981, + non_entity_clients: 448, + new_clients: { + month: '9/21', + label: 'path-1', + clients: 543, + entity_clients: 340, + non_entity_clients: 203, + }, + }, + 'path-4-with-over-18-characters': { + month: '9/21', + label: 'path-4-with-over-18-characters', + clients: 965, + entity_clients: 720, + non_entity_clients: 245, + new_clients: { + month: '9/21', + label: 'path-4-with-over-18-characters', + clients: 136, + entity_clients: 7, + non_entity_clients: 129, + }, + }, + 'path-2': { + month: '9/21', + label: 'path-2', + clients: 860, + entity_clients: 720, + non_entity_clients: 140, + new_clients: { + month: '9/21', + label: 'path-2', + clients: 791, + entity_clients: 711, + non_entity_clients: 80, + }, + }, + }, + }, + 'test-ns-2/': { + month: '9/21', + clients: 4702, + entity_clients: 3057, + non_entity_clients: 1645, + new_clients: { + month: '9/21', + label: 'test-ns-2/', + clients: 1537, + entity_clients: 662, + non_entity_clients: 875, + }, + mounts_by_key: { + 'path-3-with-over-18-characters': { + month: '9/21', + label: 'path-3-with-over-18-characters', + clients: 1686, + entity_clients: 926, + non_entity_clients: 760, + new_clients: { + month: '9/21', + label: 'path-3-with-over-18-characters', + clients: 520, + entity_clients: 13, + non_entity_clients: 507, + }, + }, + 'path-4-with-over-18-characters': { + month: '9/21', + label: 'path-4-with-over-18-characters', + clients: 1525, + entity_clients: 789, + non_entity_clients: 736, + new_clients: { + month: '9/21', + label: 'path-4-with-over-18-characters', + clients: 499, + entity_clients: 197, + non_entity_clients: 302, + }, + }, + 'path-2': { + month: '9/21', + label: 'path-2', + clients: 905, + entity_clients: 849, + non_entity_clients: 56, + new_clients: { + month: '9/21', + label: 'path-2', + clients: 398, + entity_clients: 370, + non_entity_clients: 28, + }, + }, + 'path-1': { + month: '9/21', + label: 'path-1', + clients: 586, + entity_clients: 493, + non_entity_clients: 93, + new_clients: { + month: '9/21', + label: 'path-1', + clients: 120, + entity_clients: 82, + non_entity_clients: 38, + }, + }, + }, + }, + 'test-ns-1/': { + month: '9/21', + clients: 4569, + entity_clients: 1871, + non_entity_clients: 2698, + new_clients: { + month: '9/21', + label: 'test-ns-1/', + clients: 2712, + entity_clients: 879, + non_entity_clients: 1833, + }, + mounts_by_key: { + 'path-4-with-over-18-characters': { + month: '9/21', + label: 'path-4-with-over-18-characters', + clients: 1534, + entity_clients: 619, + non_entity_clients: 915, + new_clients: { + month: '9/21', + label: 'path-4-with-over-18-characters', + clients: 740, + entity_clients: 39, + non_entity_clients: 701, + }, + }, + 'path-3-with-over-18-characters': { + month: '9/21', + label: 'path-3-with-over-18-characters', + clients: 1528, + entity_clients: 589, + non_entity_clients: 939, + new_clients: { + month: '9/21', + label: 'path-3-with-over-18-characters', + clients: 1250, + entity_clients: 536, + non_entity_clients: 714, + }, + }, + 'path-1': { + month: '9/21', + label: 'path-1', + clients: 828, + entity_clients: 612, + non_entity_clients: 216, + new_clients: { + month: '9/21', + label: 'path-1', + clients: 463, + entity_clients: 283, + non_entity_clients: 180, + }, + }, + 'path-2': { + month: '9/21', + label: 'path-2', + clients: 679, + entity_clients: 51, + non_entity_clients: 628, + new_clients: { + month: '9/21', + label: 'path-2', + clients: 259, + entity_clients: 21, + non_entity_clients: 238, + }, + }, + }, + }, + 'test-ns-2-with-namespace-length-over-18-characters/': { + month: '9/21', + clients: 3771, + entity_clients: 2029, + non_entity_clients: 1742, + new_clients: { + month: '9/21', + label: 'test-ns-2-with-namespace-length-over-18-characters/', + clients: 2087, + entity_clients: 902, + non_entity_clients: 1185, + }, + mounts_by_key: { + 'path-3-with-over-18-characters': { + month: '9/21', + label: 'path-3-with-over-18-characters', + clients: 1249, + entity_clients: 793, + non_entity_clients: 456, + new_clients: { + month: '9/21', + label: 'path-3-with-over-18-characters', + clients: 472, + entity_clients: 260, + non_entity_clients: 212, + }, + }, + 'path-1': { + month: '9/21', + label: 'path-1', + clients: 1046, + entity_clients: 444, + non_entity_clients: 602, + new_clients: { + month: '9/21', + label: 'path-1', + clients: 775, + entity_clients: 349, + non_entity_clients: 426, + }, + }, + 'path-2': { + month: '9/21', + label: 'path-2', + clients: 930, + entity_clients: 277, + non_entity_clients: 653, + new_clients: { + month: '9/21', + label: 'path-2', + clients: 632, + entity_clients: 90, + non_entity_clients: 542, + }, + }, + 'path-4-with-over-18-characters': { + month: '9/21', + label: 'path-4-with-over-18-characters', + clients: 546, + entity_clients: 515, + non_entity_clients: 31, + new_clients: { + month: '9/21', + label: 'path-4-with-over-18-characters', + clients: 208, + entity_clients: 203, + non_entity_clients: 5, + }, + }, + }, + }, + 'test-ns-1-with-namespace-length-over-18-characters/': { + month: '9/21', + clients: 1357, + entity_clients: 648, + non_entity_clients: 709, + new_clients: { + month: '9/21', + label: 'test-ns-1-with-namespace-length-over-18-characters/', + clients: 560, + entity_clients: 189, + non_entity_clients: 371, + }, + mounts_by_key: { + 'path-1': { + month: '9/21', + label: 'path-1', + clients: 613, + entity_clients: 23, + non_entity_clients: 590, + new_clients: { + month: '9/21', + label: 'path-1', + clients: 318, + entity_clients: 12, + non_entity_clients: 306, + }, + }, + 'path-3-with-over-18-characters': { + month: '9/21', + label: 'path-3-with-over-18-characters', + clients: 543, + entity_clients: 465, + non_entity_clients: 78, + new_clients: { + month: '9/21', + label: 'path-3-with-over-18-characters', + clients: 126, + entity_clients: 89, + non_entity_clients: 37, + }, + }, + 'path-2': { + month: '9/21', + label: 'path-2', + clients: 146, + entity_clients: 141, + non_entity_clients: 5, + new_clients: { + month: '9/21', + label: 'path-2', + clients: 76, + entity_clients: 75, + non_entity_clients: 1, + }, + }, + 'path-4-with-over-18-characters': { + month: '9/21', + label: 'path-4-with-over-18-characters', + clients: 55, + entity_clients: 19, + non_entity_clients: 36, + new_clients: { + month: '9/21', + label: 'path-4-with-over-18-characters', + clients: 40, + entity_clients: 13, + non_entity_clients: 27, + }, + }, + }, + }, + }, + new_clients: { + month: '9/21', + clients: 9421, + entity_clients: 3947, + non_entity_clients: 5474, + namespaces: [ + { + label: 'test-ns-1/', + clients: 2712, + entity_clients: 879, + non_entity_clients: 1833, + mounts: [ + { + label: 'path-3-with-over-18-characters', + clients: 1250, + entity_clients: 536, + non_entity_clients: 714, + }, + { + label: 'path-4-with-over-18-characters', + clients: 740, + entity_clients: 39, + non_entity_clients: 701, + }, + { + label: 'path-1', + clients: 463, + entity_clients: 283, + non_entity_clients: 180, + }, + { + label: 'path-2', + clients: 259, + entity_clients: 21, + non_entity_clients: 238, + }, + ], + }, + { + label: 'root', + clients: 2525, + entity_clients: 1315, + non_entity_clients: 1210, + mounts: [ + { + label: 'path-3-with-over-18-characters', + clients: 1055, + entity_clients: 257, + non_entity_clients: 798, + }, + { + label: 'path-2', + clients: 791, + entity_clients: 711, + non_entity_clients: 80, + }, + { + label: 'path-1', + clients: 543, + entity_clients: 340, + non_entity_clients: 203, + }, + { + label: 'path-4-with-over-18-characters', + clients: 136, + entity_clients: 7, + non_entity_clients: 129, + }, + ], + }, + { + label: 'test-ns-2-with-namespace-length-over-18-characters/', + clients: 2087, + entity_clients: 902, + non_entity_clients: 1185, + mounts: [ + { + label: 'path-1', + clients: 775, + entity_clients: 349, + non_entity_clients: 426, + }, + { + label: 'path-2', + clients: 632, + entity_clients: 90, + non_entity_clients: 542, + }, + { + label: 'path-3-with-over-18-characters', + clients: 472, + entity_clients: 260, + non_entity_clients: 212, + }, + { + label: 'path-4-with-over-18-characters', + clients: 208, + entity_clients: 203, + non_entity_clients: 5, + }, + ], + }, + { + label: 'test-ns-2/', + clients: 1537, + entity_clients: 662, + non_entity_clients: 875, + mounts: [ + { + label: 'path-3-with-over-18-characters', + clients: 520, + entity_clients: 13, + non_entity_clients: 507, + }, + { + label: 'path-4-with-over-18-characters', + clients: 499, + entity_clients: 197, + non_entity_clients: 302, + }, + { + label: 'path-2', + clients: 398, + entity_clients: 370, + non_entity_clients: 28, + }, + { + label: 'path-1', + clients: 120, + entity_clients: 82, + non_entity_clients: 38, + }, + ], + }, + { + label: 'test-ns-1-with-namespace-length-over-18-characters/', + clients: 560, + entity_clients: 189, + non_entity_clients: 371, + mounts: [ + { + label: 'path-1', + clients: 318, + entity_clients: 12, + non_entity_clients: 306, + }, + { + label: 'path-3-with-over-18-characters', + clients: 126, + entity_clients: 89, + non_entity_clients: 37, + }, + { + label: 'path-2', + clients: 76, + entity_clients: 75, + non_entity_clients: 1, + }, + { + label: 'path-4-with-over-18-characters', + clients: 40, + entity_clients: 13, + non_entity_clients: 27, + }, + ], + }, + ], + }, + }, + { + month: '10/21', + clients: 19417, + entity_clients: 10105, + non_entity_clients: 9312, + namespaces: [ + { + label: 'root', + clients: 4835, + entity_clients: 2364, + non_entity_clients: 2471, + mounts: [ + { + label: 'path-3-with-over-18-characters', + clients: 1797, + entity_clients: 883, + non_entity_clients: 914, + }, + { + label: 'path-1', + clients: 1501, + entity_clients: 663, + non_entity_clients: 838, + }, + { + label: 'path-2', + clients: 1461, + entity_clients: 800, + non_entity_clients: 661, + }, + { + label: 'path-4-with-over-18-characters', + clients: 76, + entity_clients: 18, + non_entity_clients: 58, + }, + ], + }, + { + label: 'test-ns-2/', + clients: 4027, + entity_clients: 1692, + non_entity_clients: 2335, + mounts: [ + { + label: 'path-4-with-over-18-characters', + clients: 1223, + entity_clients: 820, + non_entity_clients: 403, + }, + { + label: 'path-3-with-over-18-characters', + clients: 1110, + entity_clients: 111, + non_entity_clients: 999, + }, + { + label: 'path-1', + clients: 1034, + entity_clients: 462, + non_entity_clients: 572, + }, + { + label: 'path-2', + clients: 660, + entity_clients: 299, + non_entity_clients: 361, + }, + ], + }, + { + label: 'test-ns-2-with-namespace-length-over-18-characters/', + clients: 3924, + entity_clients: 2132, + non_entity_clients: 1792, + mounts: [ + { + label: 'path-3-with-over-18-characters', + clients: 1411, + entity_clients: 765, + non_entity_clients: 646, + }, + { + label: 'path-2', + clients: 1205, + entity_clients: 382, + non_entity_clients: 823, + }, + { + label: 'path-1', + clients: 884, + entity_clients: 850, + non_entity_clients: 34, + }, + { + label: 'path-4-with-over-18-characters', + clients: 424, + entity_clients: 135, + non_entity_clients: 289, + }, + ], + }, + { + label: 'test-ns-1-with-namespace-length-over-18-characters/', + clients: 3639, + entity_clients: 2314, + non_entity_clients: 1325, + mounts: [ + { + label: 'path-1', + clients: 1062, + entity_clients: 781, + non_entity_clients: 281, + }, + { + label: 'path-4-with-over-18-characters', + clients: 1021, + entity_clients: 609, + non_entity_clients: 412, + }, + { + label: 'path-2', + clients: 849, + entity_clients: 426, + non_entity_clients: 423, + }, + { + label: 'path-3-with-over-18-characters', + clients: 707, + entity_clients: 498, + non_entity_clients: 209, + }, + ], + }, + { + label: 'test-ns-1/', + clients: 2992, + entity_clients: 1603, + non_entity_clients: 1389, + mounts: [ + { + label: 'path-1', + clients: 1140, + entity_clients: 480, + non_entity_clients: 660, + }, + { + label: 'path-4-with-over-18-characters', + clients: 1058, + entity_clients: 651, + non_entity_clients: 407, + }, + { + label: 'path-2', + clients: 575, + entity_clients: 416, + non_entity_clients: 159, + }, + { + label: 'path-3-with-over-18-characters', + clients: 219, + entity_clients: 56, + non_entity_clients: 163, + }, + ], + }, + ], + namespaces_by_key: { + root: { + month: '10/21', + clients: 4835, + entity_clients: 2364, + non_entity_clients: 2471, + new_clients: { + month: '10/21', + label: 'root', + clients: 1732, + entity_clients: 586, + non_entity_clients: 1146, + }, + mounts_by_key: { + 'path-3-with-over-18-characters': { + month: '10/21', + label: 'path-3-with-over-18-characters', + clients: 1797, + entity_clients: 883, + non_entity_clients: 914, + new_clients: { + month: '10/21', + label: 'path-3-with-over-18-characters', + clients: 907, + entity_clients: 192, + non_entity_clients: 715, + }, + }, + 'path-1': { + month: '10/21', + label: 'path-1', + clients: 1501, + entity_clients: 663, + non_entity_clients: 838, + new_clients: { + month: '10/21', + label: 'path-1', + clients: 276, + entity_clients: 202, + non_entity_clients: 74, + }, + }, + 'path-2': { + month: '10/21', + label: 'path-2', + clients: 1461, + entity_clients: 800, + non_entity_clients: 661, + new_clients: { + month: '10/21', + label: 'path-2', + clients: 502, + entity_clients: 189, + non_entity_clients: 313, + }, + }, + 'path-4-with-over-18-characters': { + month: '10/21', + label: 'path-4-with-over-18-characters', + clients: 76, + entity_clients: 18, + non_entity_clients: 58, + new_clients: { + month: '10/21', + label: 'path-4-with-over-18-characters', + clients: 47, + entity_clients: 3, + non_entity_clients: 44, + }, + }, + }, + }, + 'test-ns-2/': { + month: '10/21', + clients: 4027, + entity_clients: 1692, + non_entity_clients: 2335, + new_clients: { + month: '10/21', + label: 'test-ns-2/', + clients: 2301, + entity_clients: 678, + non_entity_clients: 1623, + }, + mounts_by_key: { + 'path-4-with-over-18-characters': { + month: '10/21', + label: 'path-4-with-over-18-characters', + clients: 1223, + entity_clients: 820, + non_entity_clients: 403, + new_clients: { + month: '10/21', + label: 'path-4-with-over-18-characters', + clients: 602, + entity_clients: 212, + non_entity_clients: 390, + }, + }, + 'path-3-with-over-18-characters': { + month: '10/21', + label: 'path-3-with-over-18-characters', + clients: 1110, + entity_clients: 111, + non_entity_clients: 999, + new_clients: { + month: '10/21', + label: 'path-3-with-over-18-characters', + clients: 440, + entity_clients: 7, + non_entity_clients: 433, + }, + }, + 'path-1': { + month: '10/21', + label: 'path-1', + clients: 1034, + entity_clients: 462, + non_entity_clients: 572, + new_clients: { + month: '10/21', + label: 'path-1', + clients: 980, + entity_clients: 454, + non_entity_clients: 526, + }, + }, + 'path-2': { + month: '10/21', + label: 'path-2', + clients: 660, + entity_clients: 299, + non_entity_clients: 361, + new_clients: { + month: '10/21', + label: 'path-2', + clients: 279, + entity_clients: 5, + non_entity_clients: 274, + }, + }, + }, + }, + 'test-ns-2-with-namespace-length-over-18-characters/': { + month: '10/21', + clients: 3924, + entity_clients: 2132, + non_entity_clients: 1792, + new_clients: { + month: '10/21', + label: 'test-ns-2-with-namespace-length-over-18-characters/', + clients: 1561, + entity_clients: 1225, + non_entity_clients: 336, + }, + mounts_by_key: { + 'path-3-with-over-18-characters': { + month: '10/21', + label: 'path-3-with-over-18-characters', + clients: 1411, + entity_clients: 765, + non_entity_clients: 646, + new_clients: { + month: '10/21', + label: 'path-3-with-over-18-characters', + clients: 948, + entity_clients: 660, + non_entity_clients: 288, + }, + }, + 'path-2': { + month: '10/21', + label: 'path-2', + clients: 1205, + entity_clients: 382, + non_entity_clients: 823, + new_clients: { + month: '10/21', + label: 'path-2', + clients: 305, + entity_clients: 289, + non_entity_clients: 16, + }, + }, + 'path-1': { + month: '10/21', + label: 'path-1', + clients: 884, + entity_clients: 850, + non_entity_clients: 34, + new_clients: { + month: '10/21', + label: 'path-1', + clients: 230, + entity_clients: 207, + non_entity_clients: 23, + }, + }, + 'path-4-with-over-18-characters': { + month: '10/21', + label: 'path-4-with-over-18-characters', + clients: 424, + entity_clients: 135, + non_entity_clients: 289, + new_clients: { + month: '10/21', + label: 'path-4-with-over-18-characters', + clients: 78, + entity_clients: 69, + non_entity_clients: 9, + }, + }, + }, + }, + 'test-ns-1-with-namespace-length-over-18-characters/': { + month: '10/21', + clients: 3639, + entity_clients: 2314, + non_entity_clients: 1325, + new_clients: { + month: '10/21', + label: 'test-ns-1-with-namespace-length-over-18-characters/', + clients: 1245, + entity_clients: 710, + non_entity_clients: 535, + }, + mounts_by_key: { + 'path-1': { + month: '10/21', + label: 'path-1', + clients: 1062, + entity_clients: 781, + non_entity_clients: 281, + new_clients: { + month: '10/21', + label: 'path-1', + clients: 288, + entity_clients: 63, + non_entity_clients: 225, + }, + }, + 'path-4-with-over-18-characters': { + month: '10/21', + label: 'path-4-with-over-18-characters', + clients: 1021, + entity_clients: 609, + non_entity_clients: 412, + new_clients: { + month: '10/21', + label: 'path-4-with-over-18-characters', + clients: 440, + entity_clients: 323, + non_entity_clients: 117, + }, + }, + 'path-2': { + month: '10/21', + label: 'path-2', + clients: 849, + entity_clients: 426, + non_entity_clients: 423, + new_clients: { + month: '10/21', + label: 'path-2', + clients: 339, + entity_clients: 308, + non_entity_clients: 31, + }, + }, + 'path-3-with-over-18-characters': { + month: '10/21', + label: 'path-3-with-over-18-characters', + clients: 707, + entity_clients: 498, + non_entity_clients: 209, + new_clients: { + month: '10/21', + label: 'path-3-with-over-18-characters', + clients: 178, + entity_clients: 16, + non_entity_clients: 162, + }, + }, + }, + }, + 'test-ns-1/': { + month: '10/21', + clients: 2992, + entity_clients: 1603, + non_entity_clients: 1389, + new_clients: { + month: '10/21', + label: 'test-ns-1/', + clients: 820, + entity_clients: 356, + non_entity_clients: 464, + }, + mounts_by_key: { + 'path-1': { + month: '10/21', + label: 'path-1', + clients: 1140, + entity_clients: 480, + non_entity_clients: 660, + new_clients: { + month: '10/21', + label: 'path-1', + clients: 239, + entity_clients: 30, + non_entity_clients: 209, + }, + }, + 'path-4-with-over-18-characters': { + month: '10/21', + label: 'path-4-with-over-18-characters', + clients: 1058, + entity_clients: 651, + non_entity_clients: 407, + new_clients: { + month: '10/21', + label: 'path-4-with-over-18-characters', + clients: 256, + entity_clients: 63, + non_entity_clients: 193, + }, + }, + 'path-2': { + month: '10/21', + label: 'path-2', + clients: 575, + entity_clients: 416, + non_entity_clients: 159, + new_clients: { + month: '10/21', + label: 'path-2', + clients: 259, + entity_clients: 245, + non_entity_clients: 14, + }, + }, + 'path-3-with-over-18-characters': { + month: '10/21', + label: 'path-3-with-over-18-characters', + clients: 219, + entity_clients: 56, + non_entity_clients: 163, + new_clients: { + month: '10/21', + label: 'path-3-with-over-18-characters', + clients: 66, + entity_clients: 18, + non_entity_clients: 48, + }, + }, + }, + }, + }, + new_clients: { + month: '10/21', + clients: 7659, + entity_clients: 3555, + non_entity_clients: 4104, + namespaces: [ + { + label: 'test-ns-2/', + clients: 2301, + entity_clients: 678, + non_entity_clients: 1623, + mounts: [ + { + label: 'path-1', + clients: 980, + entity_clients: 454, + non_entity_clients: 526, + }, + { + label: 'path-4-with-over-18-characters', + clients: 602, + entity_clients: 212, + non_entity_clients: 390, + }, + { + label: 'path-3-with-over-18-characters', + clients: 440, + entity_clients: 7, + non_entity_clients: 433, + }, + { + label: 'path-2', + clients: 279, + entity_clients: 5, + non_entity_clients: 274, + }, + ], + }, + { + label: 'root', + clients: 1732, + entity_clients: 586, + non_entity_clients: 1146, + mounts: [ + { + label: 'path-3-with-over-18-characters', + clients: 907, + entity_clients: 192, + non_entity_clients: 715, + }, + { + label: 'path-2', + clients: 502, + entity_clients: 189, + non_entity_clients: 313, + }, + { + label: 'path-1', + clients: 276, + entity_clients: 202, + non_entity_clients: 74, + }, + { + label: 'path-4-with-over-18-characters', + clients: 47, + entity_clients: 3, + non_entity_clients: 44, + }, + ], + }, + { + label: 'test-ns-2-with-namespace-length-over-18-characters/', + clients: 1561, + entity_clients: 1225, + non_entity_clients: 336, + mounts: [ + { + label: 'path-3-with-over-18-characters', + clients: 948, + entity_clients: 660, + non_entity_clients: 288, + }, + { + label: 'path-2', + clients: 305, + entity_clients: 289, + non_entity_clients: 16, + }, + { + label: 'path-1', + clients: 230, + entity_clients: 207, + non_entity_clients: 23, + }, + { + label: 'path-4-with-over-18-characters', + clients: 78, + entity_clients: 69, + non_entity_clients: 9, + }, + ], + }, + { + label: 'test-ns-1-with-namespace-length-over-18-characters/', + clients: 1245, + entity_clients: 710, + non_entity_clients: 535, + mounts: [ + { + label: 'path-4-with-over-18-characters', + clients: 440, + entity_clients: 323, + non_entity_clients: 117, + }, + { + label: 'path-2', + clients: 339, + entity_clients: 308, + non_entity_clients: 31, + }, + { + label: 'path-1', + clients: 288, + entity_clients: 63, + non_entity_clients: 225, + }, + { + label: 'path-3-with-over-18-characters', + clients: 178, + entity_clients: 16, + non_entity_clients: 162, + }, + ], + }, + { + label: 'test-ns-1/', + clients: 820, + entity_clients: 356, + non_entity_clients: 464, + mounts: [ + { + label: 'path-2', + clients: 259, + entity_clients: 245, + non_entity_clients: 14, + }, + { + label: 'path-4-with-over-18-characters', + clients: 256, + entity_clients: 63, + non_entity_clients: 193, + }, + { + label: 'path-1', + clients: 239, + entity_clients: 30, + non_entity_clients: 209, + }, + { + label: 'path-3-with-over-18-characters', + clients: 66, + entity_clients: 18, + non_entity_clients: 48, + }, + ], + }, + ], + }, + }, + ]; + hooks.before(function () { + sinon.stub(timestamp, 'now').callsFake(() => new Date('2018-04-03T14:15:30')); + }); + hooks.beforeEach(function () { + this.set('timestamp', formatRFC3339(timestamp.now())); + this.set('isDateRange', true); + this.set('chartLegend', [ + { label: 'entity clients', key: 'entity_clients' }, + { label: 'non-entity clients', key: 'non_entity_clients' }, + ]); + this.set('byMonthActivityData', DATASET); + }); + hooks.after(function () { + timestamp.now.restore(); + }); + + test('it renders empty state with no data', async function (assert) { + await render(hbs` + + + `); + assert.dom('[data-test-monthly-usage]').exists('monthly usage component renders'); + assert.dom('[data-test-component="empty-state"]').exists(); + assert.dom('[data-test-empty-state-subtext]').hasText('No data to display'); + assert.dom('[data-test-monthly-usage-average-total] p.data-details').hasText('0', 'average total is 0'); + assert.dom('[data-test-monthly-usage-average-new] p.data-details').hasText('0', 'average new is 0'); + assert.dom('[data-test-vertical-bar-chart]').doesNotExist('vertical bar chart does not render'); + assert.dom('[data-test-monthly-usage-legend]').doesNotExist('legend does not exist'); + assert.dom('[data-test-monthly-usage-timestamp]').exists('renders timestamp'); + }); + + test('it renders with month over month activity data', async function (assert) { + const expectedTotal = formatNumber([calculateAverage(DATASET, 'clients')]); + const expectedNew = formatNumber([ + calculateAverage( + DATASET?.map((d) => d.new_clients), + 'clients' + ), + ]); + await render(hbs` + + + `); + assert.dom('[data-test-monthly-usage]').exists('monthly usage component renders'); + assert.dom('[data-test-component="empty-state"]').doesNotExist(); + assert.dom('[data-test-vertical-bar-chart]').exists('vertical bar chart displays'); + assert.dom('[data-test-monthly-usage-legend]').exists('renders vertical bar chart legend'); + assert.dom('[data-test-monthly-usage-timestamp]').exists('renders timestamp'); + + findAll('[data-test-vertical-chart="x-axis-labels"] text').forEach((e, i) => { + assert.dom(e).hasText(`${DATASET[i].month}`, `renders x-axis label: ${DATASET[i].month}`); + }); + assert + .dom('[data-test-vertical-chart="data-bar"]') + .exists( + { count: DATASET.filter((m) => m.counts !== null).length * 2 }, + 'renders correct number of data bars' + ); + assert + .dom('[data-test-monthly-usage-average-total] p.data-details') + .hasText(`${expectedTotal}`, `renders correct total average ${expectedTotal}`); + assert + .dom('[data-test-monthly-usage-average-new] p.data-details') + .hasText(`${expectedNew}`, `renders correct new average ${expectedNew}`); + }); +}); diff --git a/ui/tests/integration/components/clients/running-total-test.js b/ui/tests/integration/components/clients/running-total-test.js new file mode 100644 index 0000000..3e71afa --- /dev/null +++ b/ui/tests/integration/components/clients/running-total-test.js @@ -0,0 +1,1596 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import sinon from 'sinon'; +import { setupRenderingTest } from 'ember-qunit'; +import { render } from '@ember/test-helpers'; +import { hbs } from 'ember-cli-htmlbars'; +import { formatRFC3339 } from 'date-fns'; +import { findAll } from '@ember/test-helpers'; +import { calculateAverage } from 'vault/utils/chart-helpers'; +import { formatNumber } from 'core/helpers/format-number'; +import timestamp from 'core/utils/timestamp'; + +module('Integration | Component | clients/running-total', function (hooks) { + setupRenderingTest(hooks); + const MONTHLY_ACTIVITY = [ + { + month: '8/21', + timestamp: '2021-08-01T00:00:00Z', + counts: null, + namespaces: [], + new_clients: { + month: '8/21', + namespaces: [], + }, + namespaces_by_key: {}, + }, + { + month: '9/21', + clients: 19251, + entity_clients: 10713, + non_entity_clients: 8538, + namespaces: [ + { + label: 'root', + clients: 4852, + entity_clients: 3108, + non_entity_clients: 1744, + mounts: [ + { + label: 'path-3-with-over-18-characters', + clients: 1598, + entity_clients: 687, + non_entity_clients: 911, + }, + { + label: 'path-1', + clients: 1429, + entity_clients: 981, + non_entity_clients: 448, + }, + { + label: 'path-4-with-over-18-characters', + clients: 965, + entity_clients: 720, + non_entity_clients: 245, + }, + { + label: 'path-2', + clients: 860, + entity_clients: 720, + non_entity_clients: 140, + }, + ], + }, + { + label: 'test-ns-2/', + clients: 4702, + entity_clients: 3057, + non_entity_clients: 1645, + mounts: [ + { + label: 'path-3-with-over-18-characters', + clients: 1686, + entity_clients: 926, + non_entity_clients: 760, + }, + { + label: 'path-4-with-over-18-characters', + clients: 1525, + entity_clients: 789, + non_entity_clients: 736, + }, + { + label: 'path-2', + clients: 905, + entity_clients: 849, + non_entity_clients: 56, + }, + { + label: 'path-1', + clients: 586, + entity_clients: 493, + non_entity_clients: 93, + }, + ], + }, + { + label: 'test-ns-1/', + clients: 4569, + entity_clients: 1871, + non_entity_clients: 2698, + mounts: [ + { + label: 'path-4-with-over-18-characters', + clients: 1534, + entity_clients: 619, + non_entity_clients: 915, + }, + { + label: 'path-3-with-over-18-characters', + clients: 1528, + entity_clients: 589, + non_entity_clients: 939, + }, + { + label: 'path-1', + clients: 828, + entity_clients: 612, + non_entity_clients: 216, + }, + { + label: 'path-2', + clients: 679, + entity_clients: 51, + non_entity_clients: 628, + }, + ], + }, + { + label: 'test-ns-2-with-namespace-length-over-18-characters/', + clients: 3771, + entity_clients: 2029, + non_entity_clients: 1742, + mounts: [ + { + label: 'path-3-with-over-18-characters', + clients: 1249, + entity_clients: 793, + non_entity_clients: 456, + }, + { + label: 'path-1', + clients: 1046, + entity_clients: 444, + non_entity_clients: 602, + }, + { + label: 'path-2', + clients: 930, + entity_clients: 277, + non_entity_clients: 653, + }, + { + label: 'path-4-with-over-18-characters', + clients: 546, + entity_clients: 515, + non_entity_clients: 31, + }, + ], + }, + { + label: 'test-ns-1-with-namespace-length-over-18-characters/', + clients: 1357, + entity_clients: 648, + non_entity_clients: 709, + mounts: [ + { + label: 'path-1', + clients: 613, + entity_clients: 23, + non_entity_clients: 590, + }, + { + label: 'path-3-with-over-18-characters', + clients: 543, + entity_clients: 465, + non_entity_clients: 78, + }, + { + label: 'path-2', + clients: 146, + entity_clients: 141, + non_entity_clients: 5, + }, + { + label: 'path-4-with-over-18-characters', + clients: 55, + entity_clients: 19, + non_entity_clients: 36, + }, + ], + }, + ], + namespaces_by_key: { + root: { + month: '9/21', + clients: 4852, + entity_clients: 3108, + non_entity_clients: 1744, + new_clients: { + month: '9/21', + label: 'root', + clients: 2525, + entity_clients: 1315, + non_entity_clients: 1210, + }, + mounts_by_key: { + 'path-3-with-over-18-characters': { + month: '9/21', + label: 'path-3-with-over-18-characters', + clients: 1598, + entity_clients: 687, + non_entity_clients: 911, + new_clients: { + month: '9/21', + label: 'path-3-with-over-18-characters', + clients: 1055, + entity_clients: 257, + non_entity_clients: 798, + }, + }, + 'path-1': { + month: '9/21', + label: 'path-1', + clients: 1429, + entity_clients: 981, + non_entity_clients: 448, + new_clients: { + month: '9/21', + label: 'path-1', + clients: 543, + entity_clients: 340, + non_entity_clients: 203, + }, + }, + 'path-4-with-over-18-characters': { + month: '9/21', + label: 'path-4-with-over-18-characters', + clients: 965, + entity_clients: 720, + non_entity_clients: 245, + new_clients: { + month: '9/21', + label: 'path-4-with-over-18-characters', + clients: 136, + entity_clients: 7, + non_entity_clients: 129, + }, + }, + 'path-2': { + month: '9/21', + label: 'path-2', + clients: 860, + entity_clients: 720, + non_entity_clients: 140, + new_clients: { + month: '9/21', + label: 'path-2', + clients: 791, + entity_clients: 711, + non_entity_clients: 80, + }, + }, + }, + }, + 'test-ns-2/': { + month: '9/21', + clients: 4702, + entity_clients: 3057, + non_entity_clients: 1645, + new_clients: { + month: '9/21', + label: 'test-ns-2/', + clients: 1537, + entity_clients: 662, + non_entity_clients: 875, + }, + mounts_by_key: { + 'path-3-with-over-18-characters': { + month: '9/21', + label: 'path-3-with-over-18-characters', + clients: 1686, + entity_clients: 926, + non_entity_clients: 760, + new_clients: { + month: '9/21', + label: 'path-3-with-over-18-characters', + clients: 520, + entity_clients: 13, + non_entity_clients: 507, + }, + }, + 'path-4-with-over-18-characters': { + month: '9/21', + label: 'path-4-with-over-18-characters', + clients: 1525, + entity_clients: 789, + non_entity_clients: 736, + new_clients: { + month: '9/21', + label: 'path-4-with-over-18-characters', + clients: 499, + entity_clients: 197, + non_entity_clients: 302, + }, + }, + 'path-2': { + month: '9/21', + label: 'path-2', + clients: 905, + entity_clients: 849, + non_entity_clients: 56, + new_clients: { + month: '9/21', + label: 'path-2', + clients: 398, + entity_clients: 370, + non_entity_clients: 28, + }, + }, + 'path-1': { + month: '9/21', + label: 'path-1', + clients: 586, + entity_clients: 493, + non_entity_clients: 93, + new_clients: { + month: '9/21', + label: 'path-1', + clients: 120, + entity_clients: 82, + non_entity_clients: 38, + }, + }, + }, + }, + 'test-ns-1/': { + month: '9/21', + clients: 4569, + entity_clients: 1871, + non_entity_clients: 2698, + new_clients: { + month: '9/21', + label: 'test-ns-1/', + clients: 2712, + entity_clients: 879, + non_entity_clients: 1833, + }, + mounts_by_key: { + 'path-4-with-over-18-characters': { + month: '9/21', + label: 'path-4-with-over-18-characters', + clients: 1534, + entity_clients: 619, + non_entity_clients: 915, + new_clients: { + month: '9/21', + label: 'path-4-with-over-18-characters', + clients: 740, + entity_clients: 39, + non_entity_clients: 701, + }, + }, + 'path-3-with-over-18-characters': { + month: '9/21', + label: 'path-3-with-over-18-characters', + clients: 1528, + entity_clients: 589, + non_entity_clients: 939, + new_clients: { + month: '9/21', + label: 'path-3-with-over-18-characters', + clients: 1250, + entity_clients: 536, + non_entity_clients: 714, + }, + }, + 'path-1': { + month: '9/21', + label: 'path-1', + clients: 828, + entity_clients: 612, + non_entity_clients: 216, + new_clients: { + month: '9/21', + label: 'path-1', + clients: 463, + entity_clients: 283, + non_entity_clients: 180, + }, + }, + 'path-2': { + month: '9/21', + label: 'path-2', + clients: 679, + entity_clients: 51, + non_entity_clients: 628, + new_clients: { + month: '9/21', + label: 'path-2', + clients: 259, + entity_clients: 21, + non_entity_clients: 238, + }, + }, + }, + }, + 'test-ns-2-with-namespace-length-over-18-characters/': { + month: '9/21', + clients: 3771, + entity_clients: 2029, + non_entity_clients: 1742, + new_clients: { + month: '9/21', + label: 'test-ns-2-with-namespace-length-over-18-characters/', + clients: 2087, + entity_clients: 902, + non_entity_clients: 1185, + }, + mounts_by_key: { + 'path-3-with-over-18-characters': { + month: '9/21', + label: 'path-3-with-over-18-characters', + clients: 1249, + entity_clients: 793, + non_entity_clients: 456, + new_clients: { + month: '9/21', + label: 'path-3-with-over-18-characters', + clients: 472, + entity_clients: 260, + non_entity_clients: 212, + }, + }, + 'path-1': { + month: '9/21', + label: 'path-1', + clients: 1046, + entity_clients: 444, + non_entity_clients: 602, + new_clients: { + month: '9/21', + label: 'path-1', + clients: 775, + entity_clients: 349, + non_entity_clients: 426, + }, + }, + 'path-2': { + month: '9/21', + label: 'path-2', + clients: 930, + entity_clients: 277, + non_entity_clients: 653, + new_clients: { + month: '9/21', + label: 'path-2', + clients: 632, + entity_clients: 90, + non_entity_clients: 542, + }, + }, + 'path-4-with-over-18-characters': { + month: '9/21', + label: 'path-4-with-over-18-characters', + clients: 546, + entity_clients: 515, + non_entity_clients: 31, + new_clients: { + month: '9/21', + label: 'path-4-with-over-18-characters', + clients: 208, + entity_clients: 203, + non_entity_clients: 5, + }, + }, + }, + }, + 'test-ns-1-with-namespace-length-over-18-characters/': { + month: '9/21', + clients: 1357, + entity_clients: 648, + non_entity_clients: 709, + new_clients: { + month: '9/21', + label: 'test-ns-1-with-namespace-length-over-18-characters/', + clients: 560, + entity_clients: 189, + non_entity_clients: 371, + }, + mounts_by_key: { + 'path-1': { + month: '9/21', + label: 'path-1', + clients: 613, + entity_clients: 23, + non_entity_clients: 590, + new_clients: { + month: '9/21', + label: 'path-1', + clients: 318, + entity_clients: 12, + non_entity_clients: 306, + }, + }, + 'path-3-with-over-18-characters': { + month: '9/21', + label: 'path-3-with-over-18-characters', + clients: 543, + entity_clients: 465, + non_entity_clients: 78, + new_clients: { + month: '9/21', + label: 'path-3-with-over-18-characters', + clients: 126, + entity_clients: 89, + non_entity_clients: 37, + }, + }, + 'path-2': { + month: '9/21', + label: 'path-2', + clients: 146, + entity_clients: 141, + non_entity_clients: 5, + new_clients: { + month: '9/21', + label: 'path-2', + clients: 76, + entity_clients: 75, + non_entity_clients: 1, + }, + }, + 'path-4-with-over-18-characters': { + month: '9/21', + label: 'path-4-with-over-18-characters', + clients: 55, + entity_clients: 19, + non_entity_clients: 36, + new_clients: { + month: '9/21', + label: 'path-4-with-over-18-characters', + clients: 40, + entity_clients: 13, + non_entity_clients: 27, + }, + }, + }, + }, + }, + new_clients: { + month: '9/21', + clients: 9421, + entity_clients: 3947, + non_entity_clients: 5474, + namespaces: [ + { + label: 'test-ns-1/', + clients: 2712, + entity_clients: 879, + non_entity_clients: 1833, + mounts: [ + { + label: 'path-3-with-over-18-characters', + clients: 1250, + entity_clients: 536, + non_entity_clients: 714, + }, + { + label: 'path-4-with-over-18-characters', + clients: 740, + entity_clients: 39, + non_entity_clients: 701, + }, + { + label: 'path-1', + clients: 463, + entity_clients: 283, + non_entity_clients: 180, + }, + { + label: 'path-2', + clients: 259, + entity_clients: 21, + non_entity_clients: 238, + }, + ], + }, + { + label: 'root', + clients: 2525, + entity_clients: 1315, + non_entity_clients: 1210, + mounts: [ + { + label: 'path-3-with-over-18-characters', + clients: 1055, + entity_clients: 257, + non_entity_clients: 798, + }, + { + label: 'path-2', + clients: 791, + entity_clients: 711, + non_entity_clients: 80, + }, + { + label: 'path-1', + clients: 543, + entity_clients: 340, + non_entity_clients: 203, + }, + { + label: 'path-4-with-over-18-characters', + clients: 136, + entity_clients: 7, + non_entity_clients: 129, + }, + ], + }, + { + label: 'test-ns-2-with-namespace-length-over-18-characters/', + clients: 2087, + entity_clients: 902, + non_entity_clients: 1185, + mounts: [ + { + label: 'path-1', + clients: 775, + entity_clients: 349, + non_entity_clients: 426, + }, + { + label: 'path-2', + clients: 632, + entity_clients: 90, + non_entity_clients: 542, + }, + { + label: 'path-3-with-over-18-characters', + clients: 472, + entity_clients: 260, + non_entity_clients: 212, + }, + { + label: 'path-4-with-over-18-characters', + clients: 208, + entity_clients: 203, + non_entity_clients: 5, + }, + ], + }, + { + label: 'test-ns-2/', + clients: 1537, + entity_clients: 662, + non_entity_clients: 875, + mounts: [ + { + label: 'path-3-with-over-18-characters', + clients: 520, + entity_clients: 13, + non_entity_clients: 507, + }, + { + label: 'path-4-with-over-18-characters', + clients: 499, + entity_clients: 197, + non_entity_clients: 302, + }, + { + label: 'path-2', + clients: 398, + entity_clients: 370, + non_entity_clients: 28, + }, + { + label: 'path-1', + clients: 120, + entity_clients: 82, + non_entity_clients: 38, + }, + ], + }, + { + label: 'test-ns-1-with-namespace-length-over-18-characters/', + clients: 560, + entity_clients: 189, + non_entity_clients: 371, + mounts: [ + { + label: 'path-1', + clients: 318, + entity_clients: 12, + non_entity_clients: 306, + }, + { + label: 'path-3-with-over-18-characters', + clients: 126, + entity_clients: 89, + non_entity_clients: 37, + }, + { + label: 'path-2', + clients: 76, + entity_clients: 75, + non_entity_clients: 1, + }, + { + label: 'path-4-with-over-18-characters', + clients: 40, + entity_clients: 13, + non_entity_clients: 27, + }, + ], + }, + ], + }, + }, + { + month: '10/21', + clients: 19417, + entity_clients: 10105, + non_entity_clients: 9312, + namespaces: [ + { + label: 'root', + clients: 4835, + entity_clients: 2364, + non_entity_clients: 2471, + mounts: [ + { + label: 'path-3-with-over-18-characters', + clients: 1797, + entity_clients: 883, + non_entity_clients: 914, + }, + { + label: 'path-1', + clients: 1501, + entity_clients: 663, + non_entity_clients: 838, + }, + { + label: 'path-2', + clients: 1461, + entity_clients: 800, + non_entity_clients: 661, + }, + { + label: 'path-4-with-over-18-characters', + clients: 76, + entity_clients: 18, + non_entity_clients: 58, + }, + ], + }, + { + label: 'test-ns-2/', + clients: 4027, + entity_clients: 1692, + non_entity_clients: 2335, + mounts: [ + { + label: 'path-4-with-over-18-characters', + clients: 1223, + entity_clients: 820, + non_entity_clients: 403, + }, + { + label: 'path-3-with-over-18-characters', + clients: 1110, + entity_clients: 111, + non_entity_clients: 999, + }, + { + label: 'path-1', + clients: 1034, + entity_clients: 462, + non_entity_clients: 572, + }, + { + label: 'path-2', + clients: 660, + entity_clients: 299, + non_entity_clients: 361, + }, + ], + }, + { + label: 'test-ns-2-with-namespace-length-over-18-characters/', + clients: 3924, + entity_clients: 2132, + non_entity_clients: 1792, + mounts: [ + { + label: 'path-3-with-over-18-characters', + clients: 1411, + entity_clients: 765, + non_entity_clients: 646, + }, + { + label: 'path-2', + clients: 1205, + entity_clients: 382, + non_entity_clients: 823, + }, + { + label: 'path-1', + clients: 884, + entity_clients: 850, + non_entity_clients: 34, + }, + { + label: 'path-4-with-over-18-characters', + clients: 424, + entity_clients: 135, + non_entity_clients: 289, + }, + ], + }, + { + label: 'test-ns-1-with-namespace-length-over-18-characters/', + clients: 3639, + entity_clients: 2314, + non_entity_clients: 1325, + mounts: [ + { + label: 'path-1', + clients: 1062, + entity_clients: 781, + non_entity_clients: 281, + }, + { + label: 'path-4-with-over-18-characters', + clients: 1021, + entity_clients: 609, + non_entity_clients: 412, + }, + { + label: 'path-2', + clients: 849, + entity_clients: 426, + non_entity_clients: 423, + }, + { + label: 'path-3-with-over-18-characters', + clients: 707, + entity_clients: 498, + non_entity_clients: 209, + }, + ], + }, + { + label: 'test-ns-1/', + clients: 2992, + entity_clients: 1603, + non_entity_clients: 1389, + mounts: [ + { + label: 'path-1', + clients: 1140, + entity_clients: 480, + non_entity_clients: 660, + }, + { + label: 'path-4-with-over-18-characters', + clients: 1058, + entity_clients: 651, + non_entity_clients: 407, + }, + { + label: 'path-2', + clients: 575, + entity_clients: 416, + non_entity_clients: 159, + }, + { + label: 'path-3-with-over-18-characters', + clients: 219, + entity_clients: 56, + non_entity_clients: 163, + }, + ], + }, + ], + namespaces_by_key: { + root: { + month: '10/21', + clients: 4835, + entity_clients: 2364, + non_entity_clients: 2471, + new_clients: { + month: '10/21', + label: 'root', + clients: 1732, + entity_clients: 586, + non_entity_clients: 1146, + }, + mounts_by_key: { + 'path-3-with-over-18-characters': { + month: '10/21', + label: 'path-3-with-over-18-characters', + clients: 1797, + entity_clients: 883, + non_entity_clients: 914, + new_clients: { + month: '10/21', + label: 'path-3-with-over-18-characters', + clients: 907, + entity_clients: 192, + non_entity_clients: 715, + }, + }, + 'path-1': { + month: '10/21', + label: 'path-1', + clients: 1501, + entity_clients: 663, + non_entity_clients: 838, + new_clients: { + month: '10/21', + label: 'path-1', + clients: 276, + entity_clients: 202, + non_entity_clients: 74, + }, + }, + 'path-2': { + month: '10/21', + label: 'path-2', + clients: 1461, + entity_clients: 800, + non_entity_clients: 661, + new_clients: { + month: '10/21', + label: 'path-2', + clients: 502, + entity_clients: 189, + non_entity_clients: 313, + }, + }, + 'path-4-with-over-18-characters': { + month: '10/21', + label: 'path-4-with-over-18-characters', + clients: 76, + entity_clients: 18, + non_entity_clients: 58, + new_clients: { + month: '10/21', + label: 'path-4-with-over-18-characters', + clients: 47, + entity_clients: 3, + non_entity_clients: 44, + }, + }, + }, + }, + 'test-ns-2/': { + month: '10/21', + clients: 4027, + entity_clients: 1692, + non_entity_clients: 2335, + new_clients: { + month: '10/21', + label: 'test-ns-2/', + clients: 2301, + entity_clients: 678, + non_entity_clients: 1623, + }, + mounts_by_key: { + 'path-4-with-over-18-characters': { + month: '10/21', + label: 'path-4-with-over-18-characters', + clients: 1223, + entity_clients: 820, + non_entity_clients: 403, + new_clients: { + month: '10/21', + label: 'path-4-with-over-18-characters', + clients: 602, + entity_clients: 212, + non_entity_clients: 390, + }, + }, + 'path-3-with-over-18-characters': { + month: '10/21', + label: 'path-3-with-over-18-characters', + clients: 1110, + entity_clients: 111, + non_entity_clients: 999, + new_clients: { + month: '10/21', + label: 'path-3-with-over-18-characters', + clients: 440, + entity_clients: 7, + non_entity_clients: 433, + }, + }, + 'path-1': { + month: '10/21', + label: 'path-1', + clients: 1034, + entity_clients: 462, + non_entity_clients: 572, + new_clients: { + month: '10/21', + label: 'path-1', + clients: 980, + entity_clients: 454, + non_entity_clients: 526, + }, + }, + 'path-2': { + month: '10/21', + label: 'path-2', + clients: 660, + entity_clients: 299, + non_entity_clients: 361, + new_clients: { + month: '10/21', + label: 'path-2', + clients: 279, + entity_clients: 5, + non_entity_clients: 274, + }, + }, + }, + }, + 'test-ns-2-with-namespace-length-over-18-characters/': { + month: '10/21', + clients: 3924, + entity_clients: 2132, + non_entity_clients: 1792, + new_clients: { + month: '10/21', + label: 'test-ns-2-with-namespace-length-over-18-characters/', + clients: 1561, + entity_clients: 1225, + non_entity_clients: 336, + }, + mounts_by_key: { + 'path-3-with-over-18-characters': { + month: '10/21', + label: 'path-3-with-over-18-characters', + clients: 1411, + entity_clients: 765, + non_entity_clients: 646, + new_clients: { + month: '10/21', + label: 'path-3-with-over-18-characters', + clients: 948, + entity_clients: 660, + non_entity_clients: 288, + }, + }, + 'path-2': { + month: '10/21', + label: 'path-2', + clients: 1205, + entity_clients: 382, + non_entity_clients: 823, + new_clients: { + month: '10/21', + label: 'path-2', + clients: 305, + entity_clients: 289, + non_entity_clients: 16, + }, + }, + 'path-1': { + month: '10/21', + label: 'path-1', + clients: 884, + entity_clients: 850, + non_entity_clients: 34, + new_clients: { + month: '10/21', + label: 'path-1', + clients: 230, + entity_clients: 207, + non_entity_clients: 23, + }, + }, + 'path-4-with-over-18-characters': { + month: '10/21', + label: 'path-4-with-over-18-characters', + clients: 424, + entity_clients: 135, + non_entity_clients: 289, + new_clients: { + month: '10/21', + label: 'path-4-with-over-18-characters', + clients: 78, + entity_clients: 69, + non_entity_clients: 9, + }, + }, + }, + }, + 'test-ns-1-with-namespace-length-over-18-characters/': { + month: '10/21', + clients: 3639, + entity_clients: 2314, + non_entity_clients: 1325, + new_clients: { + month: '10/21', + label: 'test-ns-1-with-namespace-length-over-18-characters/', + clients: 1245, + entity_clients: 710, + non_entity_clients: 535, + }, + mounts_by_key: { + 'path-1': { + month: '10/21', + label: 'path-1', + clients: 1062, + entity_clients: 781, + non_entity_clients: 281, + new_clients: { + month: '10/21', + label: 'path-1', + clients: 288, + entity_clients: 63, + non_entity_clients: 225, + }, + }, + 'path-4-with-over-18-characters': { + month: '10/21', + label: 'path-4-with-over-18-characters', + clients: 1021, + entity_clients: 609, + non_entity_clients: 412, + new_clients: { + month: '10/21', + label: 'path-4-with-over-18-characters', + clients: 440, + entity_clients: 323, + non_entity_clients: 117, + }, + }, + 'path-2': { + month: '10/21', + label: 'path-2', + clients: 849, + entity_clients: 426, + non_entity_clients: 423, + new_clients: { + month: '10/21', + label: 'path-2', + clients: 339, + entity_clients: 308, + non_entity_clients: 31, + }, + }, + 'path-3-with-over-18-characters': { + month: '10/21', + label: 'path-3-with-over-18-characters', + clients: 707, + entity_clients: 498, + non_entity_clients: 209, + new_clients: { + month: '10/21', + label: 'path-3-with-over-18-characters', + clients: 178, + entity_clients: 16, + non_entity_clients: 162, + }, + }, + }, + }, + 'test-ns-1/': { + month: '10/21', + clients: 2992, + entity_clients: 1603, + non_entity_clients: 1389, + new_clients: { + month: '10/21', + label: 'test-ns-1/', + clients: 820, + entity_clients: 356, + non_entity_clients: 464, + }, + mounts_by_key: { + 'path-1': { + month: '10/21', + label: 'path-1', + clients: 1140, + entity_clients: 480, + non_entity_clients: 660, + new_clients: { + month: '10/21', + label: 'path-1', + clients: 239, + entity_clients: 30, + non_entity_clients: 209, + }, + }, + 'path-4-with-over-18-characters': { + month: '10/21', + label: 'path-4-with-over-18-characters', + clients: 1058, + entity_clients: 651, + non_entity_clients: 407, + new_clients: { + month: '10/21', + label: 'path-4-with-over-18-characters', + clients: 256, + entity_clients: 63, + non_entity_clients: 193, + }, + }, + 'path-2': { + month: '10/21', + label: 'path-2', + clients: 575, + entity_clients: 416, + non_entity_clients: 159, + new_clients: { + month: '10/21', + label: 'path-2', + clients: 259, + entity_clients: 245, + non_entity_clients: 14, + }, + }, + 'path-3-with-over-18-characters': { + month: '10/21', + label: 'path-3-with-over-18-characters', + clients: 219, + entity_clients: 56, + non_entity_clients: 163, + new_clients: { + month: '10/21', + label: 'path-3-with-over-18-characters', + clients: 66, + entity_clients: 18, + non_entity_clients: 48, + }, + }, + }, + }, + }, + new_clients: { + month: '10/21', + clients: 7659, + entity_clients: 3555, + non_entity_clients: 4104, + namespaces: [ + { + label: 'test-ns-2/', + clients: 2301, + entity_clients: 678, + non_entity_clients: 1623, + mounts: [ + { + label: 'path-1', + clients: 980, + entity_clients: 454, + non_entity_clients: 526, + }, + { + label: 'path-4-with-over-18-characters', + clients: 602, + entity_clients: 212, + non_entity_clients: 390, + }, + { + label: 'path-3-with-over-18-characters', + clients: 440, + entity_clients: 7, + non_entity_clients: 433, + }, + { + label: 'path-2', + clients: 279, + entity_clients: 5, + non_entity_clients: 274, + }, + ], + }, + { + label: 'root', + clients: 1732, + entity_clients: 586, + non_entity_clients: 1146, + mounts: [ + { + label: 'path-3-with-over-18-characters', + clients: 907, + entity_clients: 192, + non_entity_clients: 715, + }, + { + label: 'path-2', + clients: 502, + entity_clients: 189, + non_entity_clients: 313, + }, + { + label: 'path-1', + clients: 276, + entity_clients: 202, + non_entity_clients: 74, + }, + { + label: 'path-4-with-over-18-characters', + clients: 47, + entity_clients: 3, + non_entity_clients: 44, + }, + ], + }, + { + label: 'test-ns-2-with-namespace-length-over-18-characters/', + clients: 1561, + entity_clients: 1225, + non_entity_clients: 336, + mounts: [ + { + label: 'path-3-with-over-18-characters', + clients: 948, + entity_clients: 660, + non_entity_clients: 288, + }, + { + label: 'path-2', + clients: 305, + entity_clients: 289, + non_entity_clients: 16, + }, + { + label: 'path-1', + clients: 230, + entity_clients: 207, + non_entity_clients: 23, + }, + { + label: 'path-4-with-over-18-characters', + clients: 78, + entity_clients: 69, + non_entity_clients: 9, + }, + ], + }, + { + label: 'test-ns-1-with-namespace-length-over-18-characters/', + clients: 1245, + entity_clients: 710, + non_entity_clients: 535, + mounts: [ + { + label: 'path-4-with-over-18-characters', + clients: 440, + entity_clients: 323, + non_entity_clients: 117, + }, + { + label: 'path-2', + clients: 339, + entity_clients: 308, + non_entity_clients: 31, + }, + { + label: 'path-1', + clients: 288, + entity_clients: 63, + non_entity_clients: 225, + }, + { + label: 'path-3-with-over-18-characters', + clients: 178, + entity_clients: 16, + non_entity_clients: 162, + }, + ], + }, + { + label: 'test-ns-1/', + clients: 820, + entity_clients: 356, + non_entity_clients: 464, + mounts: [ + { + label: 'path-2', + clients: 259, + entity_clients: 245, + non_entity_clients: 14, + }, + { + label: 'path-4-with-over-18-characters', + clients: 256, + entity_clients: 63, + non_entity_clients: 193, + }, + { + label: 'path-1', + clients: 239, + entity_clients: 30, + non_entity_clients: 209, + }, + { + label: 'path-3-with-over-18-characters', + clients: 66, + entity_clients: 18, + non_entity_clients: 48, + }, + ], + }, + ], + }, + }, + ]; + const NEW_ACTIVITY = MONTHLY_ACTIVITY.map((d) => d.new_clients); + const TOTAL_USAGE_COUNTS = { + clients: 38668, + entity_clients: 20818, + non_entity_clients: 17850, + }; + hooks.before(function () { + sinon.stub(timestamp, 'now').callsFake(() => new Date('2018-04-03T14:15:30')); + }); + hooks.beforeEach(function () { + this.set('timestamp', formatRFC3339(timestamp.now())); + this.set('chartLegend', [ + { label: 'entity clients', key: 'entity_clients' }, + { label: 'non-entity clients', key: 'non_entity_clients' }, + ]); + }); + hooks.after(function () { + timestamp.now.restore(); + }); + + test('it renders with full monthly activity data', async function (assert) { + this.set('byMonthActivityData', MONTHLY_ACTIVITY); + this.set('totalUsageCounts', TOTAL_USAGE_COUNTS); + const expectedTotalEntity = formatNumber([TOTAL_USAGE_COUNTS.entity_clients]); + const expectedTotalNonEntity = formatNumber([TOTAL_USAGE_COUNTS.non_entity_clients]); + const expectedNewEntity = formatNumber([calculateAverage(NEW_ACTIVITY, 'entity_clients')]); + const expectedNewNonEntity = formatNumber([calculateAverage(NEW_ACTIVITY, 'non_entity_clients')]); + + await render(hbs` + + + `); + + assert.dom('[data-test-running-total]').exists('running total component renders'); + assert.dom('[data-test-line-chart]').exists('line chart renders'); + assert.dom('[data-test-vertical-bar-chart]').exists('vertical bar chart renders'); + assert.dom('[data-test-running-total-legend]').exists('legend renders'); + assert.dom('[data-test-running-total-timestamp]').exists('renders timestamp'); + assert + .dom('[data-test-running-total-entity] p.data-details') + .hasText(`${expectedTotalEntity}`, `renders correct total average ${expectedTotalEntity}`); + assert + .dom('[data-test-running-total-nonentity] p.data-details') + .hasText(`${expectedTotalNonEntity}`, `renders correct new average ${expectedTotalNonEntity}`); + assert + .dom('[data-test-running-new-entity] p.data-details') + .hasText(`${expectedNewEntity}`, `renders correct total average ${expectedNewEntity}`); + assert + .dom('[data-test-running-new-nonentity] p.data-details') + .hasText(`${expectedNewNonEntity}`, `renders correct new average ${expectedNewNonEntity}`); + + // assert line chart is correct + findAll('[data-test-line-chart="x-axis-labels"] text').forEach((e, i) => { + assert + .dom(e) + .hasText( + `${MONTHLY_ACTIVITY[i].month}`, + `renders x-axis labels for line chart: ${MONTHLY_ACTIVITY[i].month}` + ); + }); + assert + .dom('[data-test-line-chart="plot-point"]') + .exists( + { count: MONTHLY_ACTIVITY.filter((m) => m.counts !== null).length }, + 'renders correct number of plot points' + ); + + // assert bar chart is correct + findAll('[data-test-vertical-chart="x-axis-labels"] text').forEach((e, i) => { + assert + .dom(e) + .hasText( + `${MONTHLY_ACTIVITY[i].month}`, + `renders x-axis labels for bar chart: ${MONTHLY_ACTIVITY[i].month}` + ); + }); + assert + .dom('[data-test-vertical-chart="data-bar"]') + .exists( + { count: MONTHLY_ACTIVITY.filter((m) => m.counts !== null).length * 2 }, + 'renders correct number of data bars' + ); + }); + + test('it renders with no new monthly data', async function (assert) { + const monthlyWithoutNew = MONTHLY_ACTIVITY.map((d) => ({ ...d, new_clients: { month: d.month } })); + this.set('byMonthActivityData', monthlyWithoutNew); + this.set('totalUsageCounts', TOTAL_USAGE_COUNTS); + const expectedTotalEntity = formatNumber([TOTAL_USAGE_COUNTS.entity_clients]); + const expectedTotalNonEntity = formatNumber([TOTAL_USAGE_COUNTS.non_entity_clients]); + + await render(hbs` + + + `); + assert.dom('[data-test-running-total]').exists('running total component renders'); + assert.dom('[data-test-line-chart]').exists('line chart renders'); + assert.dom('[data-test-vertical-bar-chart]').doesNotExist('vertical bar chart does not render'); + assert.dom('[data-test-running-total-legend]').doesNotExist('legend does not render'); + assert.dom('[data-test-component="empty-state"]').exists('renders empty state'); + assert.dom('[data-test-empty-state-title]').hasText('No new clients'); + assert.dom('[data-test-running-total-timestamp]').exists('renders timestamp'); + assert + .dom('[data-test-running-total-entity] p.data-details') + .hasText(`${expectedTotalEntity}`, `renders correct total average ${expectedTotalEntity}`); + assert + .dom('[data-test-running-total-nonentity] p.data-details') + .hasText(`${expectedTotalNonEntity}`, `renders correct new average ${expectedTotalNonEntity}`); + assert + .dom('[data-test-running-new-entity] p.data-details') + .doesNotExist('new client counts does not exist'); + assert + .dom('[data-test-running-new-nonentity] p.data-details') + .doesNotExist('average new client counts does not exist'); + }); + + test('it renders with single historical month data', async function (assert) { + const singleMonth = MONTHLY_ACTIVITY[MONTHLY_ACTIVITY.length - 1]; + const singleMonthNew = NEW_ACTIVITY[NEW_ACTIVITY.length - 1]; + this.set('singleMonth', [singleMonth]); + const expectedTotalClients = formatNumber([singleMonth.clients]); + const expectedTotalEntity = formatNumber([singleMonth.entity_clients]); + const expectedTotalNonEntity = formatNumber([singleMonth.non_entity_clients]); + const expectedNewClients = formatNumber([singleMonthNew.clients]); + const expectedNewEntity = formatNumber([singleMonthNew.entity_clients]); + const expectedNewNonEntity = formatNumber([singleMonthNew.non_entity_clients]); + + await render(hbs` + + + `); + assert.dom('[data-test-running-total]').exists('running total component renders'); + assert.dom('[data-test-line-chart]').doesNotExist('line chart does not render'); + assert.dom('[data-test-vertical-bar-chart]').doesNotExist('vertical bar chart does not render'); + assert.dom('[data-test-running-total-legend]').doesNotExist('legend does not render'); + assert.dom('[data-test-running-total-timestamp]').doesNotExist('renders timestamp'); + assert.dom('[data-test-stat-text-container]').exists({ count: 6 }, 'renders stat text containers'); + assert + .dom('[data-test-new] [data-test-stat-text-container="New clients"] div.stat-value') + .hasText(`${expectedNewClients}`, `renders correct total new clients: ${expectedNewClients}`); + assert + .dom('[data-test-new] [data-test-stat-text-container="Entity clients"] div.stat-value') + .hasText(`${expectedNewEntity}`, `renders correct total new entity: ${expectedNewEntity}`); + assert + .dom('[data-test-new] [data-test-stat-text-container="Non-entity clients"] div.stat-value') + .hasText(`${expectedNewNonEntity}`, `renders correct total new non-entity: ${expectedNewNonEntity}`); + assert + .dom('[data-test-total] [data-test-stat-text-container="Total monthly clients"] div.stat-value') + .hasText(`${expectedTotalClients}`, `renders correct total clients: ${expectedTotalClients}`); + assert + .dom('[data-test-total] [data-test-stat-text-container="Entity clients"] div.stat-value') + .hasText(`${expectedTotalEntity}`, `renders correct total entity: ${expectedTotalEntity}`); + assert + .dom('[data-test-total] [data-test-stat-text-container="Non-entity clients"] div.stat-value') + .hasText(`${expectedTotalNonEntity}`, `renders correct total non-entity: ${expectedTotalNonEntity}`); + }); +}); diff --git a/ui/tests/integration/components/clients/usage-stats-test.js b/ui/tests/integration/components/clients/usage-stats-test.js new file mode 100644 index 0000000..f776975 --- /dev/null +++ b/ui/tests/integration/components/clients/usage-stats-test.js @@ -0,0 +1,55 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { render } from '@ember/test-helpers'; +import { hbs } from 'ember-cli-htmlbars'; + +module('Integration | Component | clients/usage-stats', function (hooks) { + setupRenderingTest(hooks); + + test('it renders defaults', async function (assert) { + await render(hbs``); + + assert.dom('[data-test-stat-text]').exists({ count: 3 }, 'Renders 3 Stat texts even with no data passed'); + assert.dom('[data-test-stat-text="total-clients"]').exists('Total clients exists'); + assert.dom('[data-test-stat-text="total-clients"] .stat-value').hasText('-', 'renders dash when no data'); + assert.dom('[data-test-stat-text="entity-clients"]').exists('Entity clients exists'); + assert + .dom('[data-test-stat-text="entity-clients"] .stat-value') + .hasText('-', 'renders dash when no data'); + assert.dom('[data-test-stat-text="non-entity-clients"]').exists('Non entity clients exists'); + assert + .dom('[data-test-stat-text="non-entity-clients"] .stat-value') + .hasText('-', 'renders dash when no data'); + assert + .dom('a') + .hasAttribute('href', 'https://developer.hashicorp.com/vault/tutorials/monitoring/usage-metrics'); + }); + + test('it renders with data', async function (assert) { + this.set('counts', { + clients: 17, + entity_clients: 7, + non_entity_clients: 10, + }); + await render(hbs``); + + assert.dom('[data-test-stat-text]').exists({ count: 3 }, 'Renders 3 Stat texts even with no data passed'); + assert.dom('[data-test-stat-text="total-clients"]').exists('Total clients exists'); + assert + .dom('[data-test-stat-text="total-clients"] .stat-value') + .hasText('17', 'Total clients shows passed value'); + assert.dom('[data-test-stat-text="entity-clients"]').exists('Entity clients exists'); + assert + .dom('[data-test-stat-text="entity-clients"] .stat-value') + .hasText('7', 'entity clients shows passed value'); + assert.dom('[data-test-stat-text="non-entity-clients"]').exists('Non entity clients exists'); + assert + .dom('[data-test-stat-text="non-entity-clients"] .stat-value') + .hasText('10', 'non entity clients shows passed value'); + }); +}); diff --git a/ui/tests/integration/components/clients/vertical-bar-chart-test.js b/ui/tests/integration/components/clients/vertical-bar-chart-test.js new file mode 100644 index 0000000..2300b91 --- /dev/null +++ b/ui/tests/integration/components/clients/vertical-bar-chart-test.js @@ -0,0 +1,112 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { render, findAll, find, triggerEvent } from '@ember/test-helpers'; +import { hbs } from 'ember-cli-htmlbars'; + +module('Integration | Component | clients/vertical-bar-chart', function (hooks) { + setupRenderingTest(hooks); + hooks.beforeEach(function () { + this.set('chartLegend', [ + { label: 'entity clients', key: 'entity_clients' }, + { label: 'non-entity clients', key: 'non_entity_clients' }, + ]); + }); + + test('it renders chart and tooltip for total clients', async function (assert) { + const barChartData = [ + { month: 'january', clients: 141, entity_clients: 91, non_entity_clients: 50, new_clients: 5 }, + { month: 'february', clients: 251, entity_clients: 101, non_entity_clients: 150, new_clients: 5 }, + ]; + this.set('barChartData', barChartData); + + await render(hbs` +
+ +
+ `); + const tooltipHoverBars = findAll('[data-test-vertical-bar-chart] rect.tooltip-rect'); + assert.dom('[data-test-vertical-bar-chart]').exists('renders chart'); + assert + .dom('[data-test-vertical-chart="data-bar"]') + .exists({ count: barChartData.length * 2 }, 'renders correct number of bars'); // multiply length by 2 because bars are stacked + + assert.dom(find('[data-test-vertical-chart="y-axis-labels"] text')).hasText('0', `y-axis starts at 0`); + findAll('[data-test-vertical-chart="x-axis-labels"] text').forEach((e, i) => { + assert.dom(e).hasText(`${barChartData[i].month}`, `renders x-axis label: ${barChartData[i].month}`); + }); + + for (const [i, bar] of tooltipHoverBars.entries()) { + await triggerEvent(bar, 'mouseover'); + const tooltip = document.querySelector('.ember-modal-dialog'); + assert + .dom(tooltip) + .includesText( + `${barChartData[i].clients} total clients ${barChartData[i].entity_clients} entity clients ${barChartData[i].non_entity_clients} non-entity clients`, + 'tooltip text is correct' + ); + } + }); + + test('it renders chart and tooltip for new clients', async function (assert) { + const barChartData = [ + { month: 'january', entity_clients: 91, non_entity_clients: 50, clients: 0 }, + { month: 'february', entity_clients: 101, non_entity_clients: 150, clients: 110 }, + ]; + this.set('barChartData', barChartData); + + await render(hbs` +
+ +
+ `); + + const tooltipHoverBars = findAll('[data-test-vertical-bar-chart] rect.tooltip-rect'); + assert.dom('[data-test-vertical-bar-chart]').exists('renders chart'); + assert + .dom('[data-test-vertical-chart="data-bar"]') + .exists({ count: barChartData.length * 2 }, 'renders correct number of bars'); // multiply length by 2 because bars are stacked + + assert.dom(find('[data-test-vertical-chart="y-axis-labels"] text')).hasText('0', `y-axis starts at 0`); + findAll('[data-test-vertical-chart="x-axis-labels"] text').forEach((e, i) => { + assert.dom(e).hasText(`${barChartData[i].month}`, `renders x-axis label: ${barChartData[i].month}`); + }); + + for (const [i, bar] of tooltipHoverBars.entries()) { + await triggerEvent(bar, 'mouseover'); + const tooltip = document.querySelector('.ember-modal-dialog'); + assert + .dom(tooltip) + .includesText( + `${barChartData[i].clients} new clients ${barChartData[i].entity_clients} entity clients ${barChartData[i].non_entity_clients} non-entity clients`, + 'tooltip text is correct' + ); + } + }); + + test('it renders empty state when no dataset', async function (assert) { + await render(hbs` +
+ +
+ `); + + assert.dom('[data-test-component="empty-state"]').exists('renders empty state when no data'); + assert + .dom('[data-test-empty-state-subtext]') + .hasText( + `this is a custom message to explain why you're not seeing a vertical bar chart`, + 'custom message renders' + ); + }); +}); diff --git a/ui/tests/integration/components/confirm-action-test.js b/ui/tests/integration/components/confirm-action-test.js new file mode 100644 index 0000000..9b30c33 --- /dev/null +++ b/ui/tests/integration/components/confirm-action-test.js @@ -0,0 +1,58 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { render, click } from '@ember/test-helpers'; +import hbs from 'htmlbars-inline-precompile'; +import sinon from 'sinon'; + +module('Integration | Component | confirm-action', function (hooks) { + setupRenderingTest(hooks); + + test('it renders and on click shows the correct icon', async function (assert) { + const confirmAction = sinon.spy(); + this.set('onConfirm', confirmAction); + await render(hbs` + + DELETE + + `); + assert.dom('[data-test-icon="chevron-down"]').exists('Icon is pointing down'); + await click('[data-test-confirm-action-trigger="true"]'); + assert.dom('[data-test-icon="chevron-up"]').exists('Icon is now pointing up'); + assert.dom('[data-test-confirm-action-title]').hasText('Delete this?'); + }); + + test('it closes the confirmation modal on successful delete', async function (assert) { + const confirmAction = sinon.spy(); + this.set('onConfirm', confirmAction); + await render(hbs` + + DELETE + + `); + await click('[data-test-confirm-action-trigger="true"]'); + await click('[data-test-confirm-cancel-button="true"]'); + // assert that after CANCEL the icon button is pointing down. + assert.dom('[data-test-icon="chevron-down"]').exists('Icon is pointing down after clicking cancel'); + // open the modal again to test the DELETE action + await click('[data-test-confirm-action-trigger="true"]'); + await click('[data-test-confirm-button="true"]'); + assert + .dom('[data-test-icon="chevron-down"]') + .exists('Icon is pointing down after executing the Delete action'); + assert.true(confirmAction.called, 'calls the action when Delete is pressed'); + assert + .dom('[data-test-confirm-action-title]') + .doesNotExist('it has closed the confirm content and does not show the title'); + }); +}); diff --git a/ui/tests/integration/components/confirm-test.js b/ui/tests/integration/components/confirm-test.js new file mode 100644 index 0000000..f172f5b --- /dev/null +++ b/ui/tests/integration/components/confirm-test.js @@ -0,0 +1,107 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { render, click } from '@ember/test-helpers'; +import hbs from 'htmlbars-inline-precompile'; +import sinon from 'sinon'; + +module('Integration | Component | Confirm', function (hooks) { + setupRenderingTest(hooks); + + hooks.beforeEach(function () { + this.set('id', 'foo'); + this.set('title', 'Are you sure?'); + this.set('message', 'You will not be able to recover this item later.'); + this.set('triggerText', 'Click me!'); + this.set('onConfirm', sinon.spy()); + }); + + test('it renders', async function (assert) { + await render(hbs` + + + + `); + + assert.dom('.confirm-wrapper').exists(); + assert.dom('.confirm').containsText(this.triggerText); + }); + + test('does not show the confirmation message until it is triggered', async function (assert) { + await render(hbs` + + + + `); + assert.dom('.confirm-overlay').doesNotContainText(this.message); + + await click('[data-test-confirm-action-trigger]'); + + assert.dom('.confirm-overlay').containsText(this.title); + assert.dom('.confirm-overlay').containsText(this.message); + }); + + test('it calls onConfirm when the confirm button is clicked', async function (assert) { + await render(hbs` + + + + `); + await click('[data-test-confirm-action-trigger]'); + await click('[data-test-confirm-button=true]'); + + assert.ok(this.onConfirm.calledOnce); + }); + + test('it shows only the active triggers message', async function (assert) { + await render(hbs` + + + + + `); + + await click(`[data-test-confirm-action-trigger=${this.id}]`); + assert.dom('.confirm-overlay').containsText(this.title); + assert.dom('.confirm-overlay').containsText(this.message); + + await click('[data-test-confirm-cancel-button]'); + + await click("[data-test-confirm-action-trigger='bar']"); + assert.dom('.confirm-overlay').containsText('Wow'); + assert.dom('.confirm-overlay').containsText('Bazinga!'); + }); +}); diff --git a/ui/tests/integration/components/confirmation-modal-test.js b/ui/tests/integration/components/confirmation-modal-test.js new file mode 100644 index 0000000..76d79e1 --- /dev/null +++ b/ui/tests/integration/components/confirmation-modal-test.js @@ -0,0 +1,46 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import sinon from 'sinon'; +import { click, fillIn, render } from '@ember/test-helpers'; +import hbs from 'htmlbars-inline-precompile'; + +module('Integration | Component | confirmation-modal', function (hooks) { + setupRenderingTest(hooks); + + test('it renders with disabled confirmation button until input matches', async function (assert) { + const confirmAction = sinon.spy(); + const closeAction = sinon.spy(); + this.set('onConfirm', confirmAction); + this.set('onClose', closeAction); + await render(hbs` + + + `); + + assert.dom('[data-test-confirm-button]').isDisabled(); + assert.dom('[data-test-modal-div]').hasAttribute('class', 'modal is-highlight is-active'); + assert.dom('[data-test-confirm-button]').hasText('Plz Continue', 'Confirm button has specified value'); + assert + .dom('[data-test-modal-title]') + .hasStyle({ color: 'rgb(160, 125, 2)' }, 'title exists with warning header'); + await fillIn('[data-test-confirmation-modal-input="Confirmation Modal"]', 'Destructive Thing'); + assert.dom('[data-test-confirm-button="Confirmation Modal"]').isNotDisabled(); + + await click('[data-test-cancel-button]'); + assert.true(closeAction.called, 'executes passed in onClose function'); + await click('[data-test-confirm-button]'); + assert.true(confirmAction.called, 'executes passed in onConfirm function'); + }); +}); diff --git a/ui/tests/integration/components/console/log-command-test.js b/ui/tests/integration/components/console/log-command-test.js new file mode 100644 index 0000000..b3c0740 --- /dev/null +++ b/ui/tests/integration/components/console/log-command-test.js @@ -0,0 +1,21 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { render } from '@ember/test-helpers'; +import hbs from 'htmlbars-inline-precompile'; + +module('Integration | Component | console/log command', function (hooks) { + setupRenderingTest(hooks); + + test('it renders', async function (assert) { + const commandText = 'list this/path'; + this.set('content', commandText); + + await render(hbs``); + assert.dom('.console-ui-command').includesText(commandText); + }); +}); diff --git a/ui/tests/integration/components/console/log-error-test.js b/ui/tests/integration/components/console/log-error-test.js new file mode 100644 index 0000000..36c65be --- /dev/null +++ b/ui/tests/integration/components/console/log-error-test.js @@ -0,0 +1,20 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { render } from '@ember/test-helpers'; +import hbs from 'htmlbars-inline-precompile'; + +module('Integration | Component | console/log error', function (hooks) { + setupRenderingTest(hooks); + + test('it renders', async function (assert) { + const errorText = 'Error deleting at: sys/foo. URL: v1/sys/foo Code: 404'; + this.set('content', errorText); + await render(hbs`{{console/log-error content=this.content}}`); + assert.dom('pre').includesText(errorText); + }); +}); diff --git a/ui/tests/integration/components/console/log-json-test.js b/ui/tests/integration/components/console/log-json-test.js new file mode 100644 index 0000000..e759c07 --- /dev/null +++ b/ui/tests/integration/components/console/log-json-test.js @@ -0,0 +1,30 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { render, find } from '@ember/test-helpers'; +import hbs from 'htmlbars-inline-precompile'; + +module('Integration | Component | console/log json', function (hooks) { + setupRenderingTest(hooks); + + hooks.beforeEach(function () { + this.codeMirror = this.owner.lookup('service:code-mirror'); + }); + + test('it renders', async function (assert) { + // Set any properties with this.set('myProperty', 'value'); + // Handle any actions with this.on('myAction', function(val) { ... }); + const objectContent = { one: 'two', three: 'four', seven: { five: 'six' }, eight: [5, 6] }; + const expectedText = JSON.stringify(objectContent, null, 2); + + this.set('content', objectContent); + + await render(hbs`{{console/log-json content=this.content}}`); + const instance = find('[data-test-component=code-mirror-modifier]').innerText; + assert.strictEqual(instance, expectedText); + }); +}); diff --git a/ui/tests/integration/components/console/log-list-test.js b/ui/tests/integration/components/console/log-list-test.js new file mode 100644 index 0000000..e47ac13 --- /dev/null +++ b/ui/tests/integration/components/console/log-list-test.js @@ -0,0 +1,26 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { render } from '@ember/test-helpers'; +import hbs from 'htmlbars-inline-precompile'; + +module('Integration | Component | console/log list', function (hooks) { + setupRenderingTest(hooks); + + test('it renders', async function (assert) { + // Set any properties with this.set('myProperty', 'value'); + // Handle any actions with this.on('myAction', function(val) { ... }); + const listContent = { keys: ['one', 'two'] }; + const expectedText = 'Keys one two'; + + this.set('content', listContent); + + await render(hbs`{{console/log-list content=this.content}}`); + + assert.dom('pre').includesText(`${expectedText}`); + }); +}); diff --git a/ui/tests/integration/components/console/log-object-test.js b/ui/tests/integration/components/console/log-object-test.js new file mode 100644 index 0000000..b364311 --- /dev/null +++ b/ui/tests/integration/components/console/log-object-test.js @@ -0,0 +1,25 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { render } from '@ember/test-helpers'; +import hbs from 'htmlbars-inline-precompile'; +import { stringifyObjectValues } from 'vault/components/console/log-object'; + +module('Integration | Component | console/log object', function (hooks) { + setupRenderingTest(hooks); + + test('it renders', async function (assert) { + const objectContent = { one: 'two', three: 'four', seven: { five: 'six' }, eight: [5, 6] }; + const data = { one: 'two', three: 'four', seven: { five: 'six' }, eight: [5, 6] }; + stringifyObjectValues(data); + const expectedText = 'Key Value one two three four seven {"five":"six"} eight [5,6]'; + this.set('content', objectContent); + + await render(hbs`{{console/log-object content=this.content}}`); + assert.dom('pre').includesText(expectedText); + }); +}); diff --git a/ui/tests/integration/components/console/log-text-test.js b/ui/tests/integration/components/console/log-text-test.js new file mode 100644 index 0000000..c27bdc4 --- /dev/null +++ b/ui/tests/integration/components/console/log-text-test.js @@ -0,0 +1,24 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { render } from '@ember/test-helpers'; +import hbs from 'htmlbars-inline-precompile'; + +module('Integration | Component | console/log text', function (hooks) { + setupRenderingTest(hooks); + + test('it renders', async function (assert) { + // Set any properties with this.set('myProperty', 'value'); + // Handle any actions with this.on('myAction', function(val) { ... }); + const text = 'Success! You did a thing!'; + this.set('content', text); + + await render(hbs`{{console/log-text content=this.content}}`); + + assert.dom('pre').includesText(text); + }); +}); diff --git a/ui/tests/integration/components/console/ui-panel-test.js b/ui/tests/integration/components/console/ui-panel-test.js new file mode 100644 index 0000000..d64f12d --- /dev/null +++ b/ui/tests/integration/components/console/ui-panel-test.js @@ -0,0 +1,109 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { render, settled } from '@ember/test-helpers'; +import { create } from 'ember-cli-page-object'; +import uiPanel from 'vault/tests/pages/components/console/ui-panel'; +import hbs from 'htmlbars-inline-precompile'; + +const component = create(uiPanel); + +module('Integration | Component | console/ui panel', function (hooks) { + setupRenderingTest(hooks); + + test('it renders', async function (assert) { + await render(hbs`{{console/ui-panel}}`); + + assert.ok(component.hasInput); + }); + + test('it clears console input on enter', async function (assert) { + await render(hbs`{{console/ui-panel}}`); + + await component.runCommands('list this/thing/here'); + await settled(); + assert.strictEqual(component.consoleInputValue, '', 'empties input field on enter'); + }); + + test('it clears the log when using clear command', async function (assert) { + await render(hbs`{{console/ui-panel}}`); + + await component.runCommands(['list this/thing/here', 'list this/other/thing', 'read another/thing']); + await settled(); + assert.notEqual(component.logOutput, '', 'there is output in the log'); + + await component.runCommands('clear'); + await settled(); + await component.up(); + await settled(); + assert.strictEqual(component.logOutput, '', 'clears the output log'); + assert.strictEqual( + component.consoleInputValue, + 'clear', + 'populates console input with previous command on up after enter' + ); + }); + + test('it adds command to history on enter', async function (assert) { + await render(hbs`{{console/ui-panel}}`); + + await component.runCommands('list this/thing/here'); + await settled(); + await component.up(); + await settled(); + assert.strictEqual( + component.consoleInputValue, + 'list this/thing/here', + 'populates console input with previous command on up after enter' + ); + await component.down(); + await settled(); + assert.strictEqual(component.consoleInputValue, '', 'populates console input with next command on down'); + }); + + test('it cycles through history with more than one command', async function (assert) { + await render(hbs`{{console/ui-panel}}`); + + await component.runCommands(['list this/thing/here', 'read that/thing/there', 'qwerty']); + await settled(); + await component.up(); + await settled(); + assert.strictEqual( + component.consoleInputValue, + 'qwerty', + 'populates console input with previous command on up after enter' + ); + await component.up(); + await settled(); + assert.strictEqual( + component.consoleInputValue, + 'read that/thing/there', + 'populates console input with previous command on up' + ); + await component.up(); + await settled(); + assert.strictEqual( + component.consoleInputValue, + 'list this/thing/here', + 'populates console input with previous command on up' + ); + await component.up(); + await settled(); + assert.strictEqual( + component.consoleInputValue, + 'qwerty', + 'populates console input with initial command if cycled through all previous commands' + ); + await component.down(); + await settled(); + assert.strictEqual( + component.consoleInputValue, + '', + 'clears console input if down pressed after history is on most recent command' + ); + }); +}); diff --git a/ui/tests/integration/components/control-group-success-test.js b/ui/tests/integration/components/control-group-success-test.js new file mode 100644 index 0000000..857c912 --- /dev/null +++ b/ui/tests/integration/components/control-group-success-test.js @@ -0,0 +1,87 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { later, run, _cancelTimers as cancelTimers } from '@ember/runloop'; +import { resolve } from 'rsvp'; +import Service from '@ember/service'; +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { render, settled } from '@ember/test-helpers'; +import hbs from 'htmlbars-inline-precompile'; +import sinon from 'sinon'; +import { create } from 'ember-cli-page-object'; +import controlGroupSuccess from '../../pages/components/control-group-success'; + +const component = create(controlGroupSuccess); + +const controlGroupService = Service.extend({ + deleteControlGroupToken: sinon.stub(), + markTokenForUnwrap: sinon.stub(), +}); + +const storeService = Service.extend({ + adapterFor() { + return { + toolAction() { + return resolve({ data: { foo: 'bar' } }); + }, + }; + }, +}); + +module('Integration | Component | control group success', function (hooks) { + setupRenderingTest(hooks); + + hooks.beforeEach(function () { + run(() => { + this.owner.unregister('service:store'); + this.owner.register('service:control-group', controlGroupService); + this.controlGroup = this.owner.lookup('service:control-group'); + this.owner.register('service:store', storeService); + this.router = this.owner.lookup('service:router'); + this.router.reopen({ + transitionTo: sinon.stub().returns(resolve()), + }); + }); + }); + + const MODEL = { + approved: false, + requestPath: 'foo/bar', + id: 'accessor', + requestEntity: { id: 'requestor', name: 'entity8509' }, + reload: sinon.stub(), + }; + test('render with saved token', async function (assert) { + assert.expect(3); + const response = { + uiParams: { url: '/foo' }, + token: 'token', + }; + this.set('model', MODEL); + this.set('response', response); + await render(hbs`{{control-group-success model=this.model controlGroupResponse=this.response }}`); + assert.ok(component.showsNavigateMessage, 'shows unwrap message'); + await component.navigate(); + later(() => cancelTimers(), 50); + return settled().then(() => { + assert.ok(this.controlGroup.markTokenForUnwrap.calledOnce, 'marks token for unwrap'); + assert.ok(this.router.transitionTo.calledOnce, 'calls router transition'); + }); + }); + + test('render without token', async function (assert) { + assert.expect(2); + this.set('model', MODEL); + await render(hbs`{{control-group-success model=this.model}}`); + assert.ok(component.showsUnwrapForm, 'shows unwrap form'); + await component.token('token'); + component.unwrap(); + later(() => cancelTimers(), 50); + return settled().then(() => { + assert.ok(component.showsJsonViewer, 'shows unwrapped data'); + }); + }); +}); diff --git a/ui/tests/integration/components/control-group-test.js b/ui/tests/integration/components/control-group-test.js new file mode 100644 index 0000000..49a0b17 --- /dev/null +++ b/ui/tests/integration/components/control-group-test.js @@ -0,0 +1,187 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import Service from '@ember/service'; +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { render } from '@ember/test-helpers'; +import hbs from 'htmlbars-inline-precompile'; +import sinon from 'sinon'; +import { create } from 'ember-cli-page-object'; +import controlGroup from '../../pages/components/control-group'; + +const component = create(controlGroup); + +const controlGroupService = Service.extend({ + init() { + this._super(...arguments); + this.set('wrapInfo', null); + }, + wrapInfoForAccessor() { + return this.wrapInfo; + }, +}); + +const authService = Service.extend(); + +module('Integration | Component | control group', function (hooks) { + setupRenderingTest(hooks); + + hooks.beforeEach(function () { + this.owner.register('service:auth', authService); + this.owner.register('service:control-group', controlGroupService); + this.controlGroup = this.owner.lookup('service:controlGroup'); + this.auth = this.owner.lookup('service:auth'); + }); + + const setup = (modelData = {}, authData = {}) => { + const modelDefaults = { + approved: false, + requestPath: 'foo/bar', + id: 'accessor', + requestEntity: { id: 'requestor', name: 'entity8509' }, + reload: sinon.stub(), + }; + const authDataDefaults = { entity_id: 'requestor' }; + + return { + model: { + ...modelDefaults, + ...modelData, + }, + authData: { + ...authDataDefaults, + ...authData, + }, + }; + }; + + test('requestor rendering', async function (assert) { + const { model, authData } = setup(); + this.set('model', model); + this.set('auth.authData', authData); + await render(hbs`{{control-group model=this.model}}`); + assert.ok(component.showsAccessorCallout, 'shows accessor callout'); + assert.strictEqual(component.bannerPrefix, 'Locked'); + assert.strictEqual(component.bannerText, 'The path you requested is locked by a Control Group'); + assert.strictEqual(component.requestorText, `You are requesting access to ${model.requestPath}`); + assert.false(component.showsTokenText, 'does not show token message when there is no token'); + assert.ok(component.showsRefresh, 'shows refresh button'); + assert.ok(component.authorizationText, 'Awaiting authorization.'); + }); + + test('requestor rendering: with token', async function (assert) { + const { model, authData } = setup(); + this.set('model', model); + this.set('auth.authData', authData); + this.set('controlGroup.wrapInfo', { token: 'token' }); + await render(hbs`{{control-group model=this.model}}`); + assert.true(component.showsTokenText, 'shows token message'); + assert.strictEqual(component.token, 'token', 'shows token value'); + }); + + test('requestor rendering: some approvals', async function (assert) { + const { model, authData } = setup({ authorizations: [{ name: 'manager 1' }, { name: 'manager 2' }] }); + this.set('model', model); + this.set('auth.authData', authData); + await render(hbs`{{control-group model=this.model}}`); + assert.ok(component.authorizationText, 'Already approved by manager 1, manager 2'); + }); + + test('requestor rendering: approved with no token', async function (assert) { + const { model, authData } = setup({ approved: true }); + this.set('model', model); + this.set('auth.authData', authData); + await render(hbs`{{control-group model=this.model}}`); + + assert.strictEqual(component.bannerPrefix, 'Success!'); + assert.strictEqual(component.bannerText, 'You have been given authorization'); + assert.false(component.showsTokenText, 'does not show token message when there is no token'); + assert.notOk(component.showsRefresh, 'does not shows refresh button'); + assert.ok(component.showsSuccessComponent, 'renders control group success'); + }); + + test('requestor rendering: approved with token', async function (assert) { + const { model, authData } = setup({ approved: true }); + this.set('model', model); + this.set('auth.authData', authData); + this.set('controlGroup.wrapInfo', { token: 'token' }); + await render(hbs`{{control-group model=this.model}}`); + assert.true(component.showsTokenText, 'shows token'); + assert.notOk(component.showsRefresh, 'does not shows refresh button'); + assert.ok(component.showsSuccessComponent, 'renders control group success'); + }); + + test('authorizer rendering', async function (assert) { + const { model, authData } = setup({ canAuthorize: true }, { entity_id: 'manager' }); + + this.set('model', model); + this.set('auth.authData', authData); + await render(hbs`{{control-group model=this.model}}`); + + assert.strictEqual(component.bannerPrefix, 'Locked'); + assert.strictEqual( + component.bannerText, + 'Someone is requesting access to a path locked by a Control Group' + ); + assert.strictEqual( + component.requestorText, + `${model.requestEntity.name} is requesting access to ${model.requestPath}` + ); + assert.false(component.showsTokenText, 'does not show token message when there is no token'); + + assert.ok(component.showsAuthorize, 'shows authorize button'); + }); + + test('authorizer rendering:authorized', async function (assert) { + const { model, authData } = setup( + { canAuthorize: true, authorizations: [{ id: 'manager', name: 'manager' }] }, + { entity_id: 'manager' } + ); + + this.set('model', model); + this.set('auth.authData', authData); + await render(hbs`{{control-group model=this.model}}`); + + assert.strictEqual(component.bannerPrefix, 'Thanks!'); + assert.strictEqual(component.bannerText, 'You have given authorization'); + assert.ok(component.showsBackLink, 'back link is visible'); + }); + + test('authorizer rendering: authorized and success', async function (assert) { + const { model, authData } = setup( + { approved: true, canAuthorize: true, authorizations: [{ id: 'manager', name: 'manager' }] }, + { entity_id: 'manager' } + ); + + this.set('model', model); + this.set('auth.authData', authData); + await render(hbs`{{control-group model=this.model}}`); + + assert.strictEqual(component.bannerPrefix, 'Thanks!'); + assert.strictEqual(component.bannerText, 'You have given authorization'); + assert.ok(component.showsBackLink, 'back link is visible'); + assert.strictEqual( + component.requestorText, + `${model.requestEntity.name} is authorized to access ${model.requestPath}` + ); + assert.notOk(component.showsSuccessComponent, 'does not render control group success'); + }); + + test('third-party: success', async function (assert) { + const { model, authData } = setup( + { approved: true, canAuthorize: true, authorizations: [{ id: 'foo', name: 'foo' }] }, + { entity_id: 'manager' } + ); + + this.set('model', model); + this.set('auth.authData', authData); + await render(hbs`{{control-group model=this.model}}`); + assert.strictEqual(component.bannerPrefix, 'Success!'); + assert.strictEqual(component.bannerText, 'This Control Group has been authorized'); + assert.ok(component.showsBackLink, 'back link is visible'); + assert.notOk(component.showsSuccessComponent, 'does not render control group success'); + }); +}); diff --git a/ui/tests/integration/components/database-role-edit-test.js b/ui/tests/integration/components/database-role-edit-test.js new file mode 100644 index 0000000..d0e9a43 --- /dev/null +++ b/ui/tests/integration/components/database-role-edit-test.js @@ -0,0 +1,50 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { render } from '@ember/test-helpers'; +import { hbs } from 'ember-cli-htmlbars'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import { capabilitiesStub } from 'vault/tests/helpers/stubs'; + +module('Integration | Component | database-role-edit', function (hooks) { + setupRenderingTest(hooks); + setupMirage(hooks); + + hooks.beforeEach(function () { + this.store = this.owner.lookup('service:store'); + this.store.pushPayload('database-role', { + modelName: 'database/role', + database: ['my-mongodb-database'], + backend: 'database', + type: 'static', + name: 'my-static-role', + id: 'my-static-role', + }); + this.store.pushPayload('database-role', { + modelName: 'database/role', + database: ['my-mongodb-database'], + backend: 'database', + type: 'dynamic', + name: 'my-dynamic-role', + id: 'my-dynamic-role', + }); + this.modelStatic = this.store.peekRecord('database/role', 'my-static-role'); + this.modelDynamic = this.store.peekRecord('database/role', 'my-dynamic-role'); + }); + + test('it should show Get credentials button when a user has the correct policy', async function (assert) { + this.server.post('/sys/capabilities-self', capabilitiesStub('database/static-creds/my-role', ['read'])); + await render(hbs``); + assert.dom('[data-test-database-role-creds="static"]').exists('Get credentials button exists'); + }); + + test('it should show Generate credentials button when a user has the correct policy', async function (assert) { + this.server.post('/sys/capabilities-self', capabilitiesStub('database/creds/my-role', ['read'])); + await render(hbs``); + assert.dom('[data-test-database-role-creds="dynamic"]').exists('Generate credentials button exists'); + }); +}); diff --git a/ui/tests/integration/components/database-role-setting-form-test.js b/ui/tests/integration/components/database-role-setting-form-test.js new file mode 100644 index 0000000..39bfbb9 --- /dev/null +++ b/ui/tests/integration/components/database-role-setting-form-test.js @@ -0,0 +1,165 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import EmberObject from '@ember/object'; +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { render } from '@ember/test-helpers'; +import hbs from 'htmlbars-inline-precompile'; + +const testCases = [ + { + // default case should show all possible fields for each type + pluginType: '', + staticRoleFields: ['name', 'username', 'rotation_period', 'rotation_statements'], + dynamicRoleFields: [ + 'name', + 'default_ttl', + 'max_ttl', + 'creation_statements', + 'revocation_statements', + 'rollback_statements', + 'renew_statements', + ], + }, + { + pluginType: 'elasticsearch-database-plugin', + staticRoleFields: ['username', 'rotation_period'], + dynamicRoleFields: ['creation_statement', 'default_ttl', 'max_ttl'], + }, + { + pluginType: 'mongodb-database-plugin', + staticRoleFields: ['username', 'rotation_period'], + dynamicRoleFields: ['creation_statement', 'revocation_statement', 'default_ttl', 'max_ttl'], + statementsHidden: true, + }, + { + pluginType: 'mssql-database-plugin', + staticRoleFields: ['username', 'rotation_period'], + dynamicRoleFields: ['creation_statements', 'revocation_statements', 'default_ttl', 'max_ttl'], + }, + { + pluginType: 'mysql-database-plugin', + staticRoleFields: ['username', 'rotation_period'], + dynamicRoleFields: ['creation_statements', 'revocation_statements', 'default_ttl', 'max_ttl'], + }, + { + pluginType: 'mysql-aurora-database-plugin', + staticRoleFields: ['username', 'rotation_period'], + dynamicRoleFields: ['creation_statements', 'revocation_statements', 'default_ttl', 'max_ttl'], + }, + { + pluginType: 'mysql-rds-database-plugin', + staticRoleFields: ['username', 'rotation_period'], + dynamicRoleFields: ['creation_statements', 'revocation_statements', 'default_ttl', 'max_ttl'], + }, + { + pluginType: 'mysql-legacy-database-plugin', + staticRoleFields: ['username', 'rotation_period'], + dynamicRoleFields: ['creation_statements', 'revocation_statements', 'default_ttl', 'max_ttl'], + }, + { + pluginType: 'vault-plugin-database-oracle', + staticRoleFields: ['username', 'rotation_period'], + dynamicRoleFields: ['creation_statements', 'revocation_statements', 'default_ttl', 'max_ttl'], + }, +]; + +// used to calculate checks that fields do NOT show up +const ALL_ATTRS = [ + { name: 'default_ttl', type: 'string', options: {} }, + { name: 'max_ttl', type: 'string', options: {} }, + { name: 'username', type: 'string', options: {} }, + { name: 'rotation_period', type: 'string', options: {} }, + { name: 'creation_statements', type: 'string', options: {} }, + { name: 'creation_statement', type: 'string', options: {} }, + { name: 'revocation_statements', type: 'string', options: {} }, + { name: 'revocation_statement', type: 'string', options: {} }, + { name: 'rotation_statements', type: 'string', options: {} }, + { name: 'rollback_statements', type: 'string', options: {} }, + { name: 'renew_statements', type: 'string', options: {} }, +]; +const getFields = (nameArray) => { + const show = ALL_ATTRS.filter((attr) => nameArray.indexOf(attr.name) >= 0); + const hide = ALL_ATTRS.filter((attr) => nameArray.indexOf(attr.name) < 0); + return { show, hide }; +}; + +module('Integration | Component | database-role-setting-form', function (hooks) { + setupRenderingTest(hooks); + + hooks.beforeEach(function () { + this.set( + 'model', + EmberObject.create({ + // attrs is not its own set value b/c ember hates arrays as args + attrs: ALL_ATTRS, + }) + ); + }); + + test('it shows empty states when no roleType passed in', async function (assert) { + await render(hbs``); + assert.dom('[data-test-component="empty-state"]').exists({ count: 2 }, 'Two empty states exist'); + }); + + test('it shows appropriate fields based on roleType and db plugin', async function (assert) { + this.set('roleType', 'static'); + this.set('dbType', ''); + await render(hbs` + + `); + assert.dom('[data-test-component="empty-state"]').doesNotExist('Does not show empty states'); + for (const testCase of testCases) { + const staticFields = getFields(testCase.staticRoleFields); + const dynamicFields = getFields(testCase.dynamicRoleFields); + this.set('dbType', testCase.pluginType); + this.set('roleType', 'static'); + staticFields.show.forEach((attr) => { + assert + .dom(`[data-test-input="${attr.name}"]`) + .exists( + `${attr.name} attribute exists on static role for ${testCase.pluginType || 'default'} db type` + ); + }); + staticFields.hide.forEach((attr) => { + assert + .dom(`[data-test-input="${attr.name}"]`) + .doesNotExist( + `${attr.name} attribute does not exist on static role for ${ + testCase.pluginType || 'default' + } db type` + ); + }); + if (testCase.statementsHidden) { + assert + .dom('[data-test-statements-section]') + .doesNotExist(`Statements section is hidden for static ${testCase.pluginType} role`); + } + this.set('roleType', 'dynamic'); + dynamicFields.show.forEach((attr) => { + assert + .dom(`[data-test-input="${attr.name}"]`) + .exists( + `${attr.name} attribute exists on dynamic role for ${testCase.pluginType || 'default'} db type` + ); + }); + dynamicFields.hide.forEach((attr) => { + assert + .dom(`[data-test-input="${attr.name}"]`) + .doesNotExist( + `${attr.name} attribute does not exist on dynamic role for ${ + testCase.pluginType || 'default' + } db type` + ); + }); + } + }); +}); diff --git a/ui/tests/integration/components/date-dropdown-test.js b/ui/tests/integration/components/date-dropdown-test.js new file mode 100644 index 0000000..5d44569 --- /dev/null +++ b/ui/tests/integration/components/date-dropdown-test.js @@ -0,0 +1,186 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import sinon from 'sinon'; +import { setupRenderingTest } from 'ember-qunit'; +import { click, render } from '@ember/test-helpers'; +import { hbs } from 'ember-cli-htmlbars'; +import { ARRAY_OF_MONTHS } from 'core/utils/date-formatters'; +import timestamp from 'core/utils/timestamp'; + +const SELECTORS = { + monthDropdown: '[data-test-popup-menu-trigger="month"]', + specificMonth: (m) => `[data-test-dropdown-month="${m}"]`, + yearDropdown: '[data-test-popup-menu-trigger="year"]', + specificYear: (y) => `[data-test-dropdown-year="${y}"]`, + + submitButton: '[data-test-date-dropdown-submit]', + cancelButton: '[data-test-date-dropdown-cancel]', + monthOptions: '[data-test-month-list] button', +}; + +module('Integration | Component | date-dropdown', function (hooks) { + setupRenderingTest(hooks); + + hooks.before(function () { + sinon.stub(timestamp, 'now').callsFake(() => new Date('2018-04-03T14:15:30')); + }); + hooks.after(function () { + timestamp.now.restore(); + }); + + test('it renders dropdown', async function (assert) { + await render(hbs` +
+ +
+ `); + assert.dom(SELECTORS.submitButton).hasText('Submit', 'button renders default text'); + assert.dom(SELECTORS.cancelButton).doesNotExist('it does not render cancel button by default'); + }); + + test('it fires off cancel callback', async function (assert) { + assert.expect(2); + const onCancel = () => { + assert.ok('fires onCancel callback'); + }; + this.set('onCancel', onCancel); + await render(hbs` +
+ +
+ `); + assert.dom(SELECTORS.submitButton).hasText('Save', 'button renders passed in text'); + await click(SELECTORS.cancelButton); + }); + + test('it renders dropdown and selects month and year', async function (assert) { + assert.expect(26); + const parentAction = (args) => { + assert.propEqual( + args, + { + dateType: 'start', + monthIdx: 1, + monthName: 'February', + year: 2016, + }, + 'sends correct args to parent' + ); + }; + this.set('parentAction', parentAction); + + await render(hbs` +
+ +
+ `); + assert.dom(SELECTORS.submitButton).isDisabled('button is disabled when no month or year selected'); + + await click(SELECTORS.monthDropdown); + + assert.dom(SELECTORS.monthOptions).exists({ count: 12 }, 'dropdown has 12 months'); + ARRAY_OF_MONTHS.forEach((month) => { + assert.dom(SELECTORS.specificMonth(month)).hasText(`${month}`, `dropdown includes ${month}`); + }); + + await click(SELECTORS.specificMonth('February')); + assert.dom(SELECTORS.monthDropdown).hasText('February', 'dropdown shows selected month'); + assert.dom('.ember-basic-dropdown-content').doesNotExist('dropdown closes after selecting month'); + + await click(SELECTORS.yearDropdown); + + assert.dom('[data-test-dropdown-year]').exists({ count: 5 }, 'dropdown has 5 years'); + for (const year of [2018, 2017, 2016, 2015, 2014]) { + assert.dom(SELECTORS.specificYear(year)).exists(); + } + + await click('[data-test-dropdown-year="2016"]'); + assert.dom(SELECTORS.yearDropdown).hasText(`2016`, `dropdown shows selected year`); + assert.dom('.ember-basic-dropdown-content').doesNotExist('dropdown closes after selecting year'); + assert.dom(SELECTORS.submitButton).isNotDisabled('button enabled when month and year selected'); + + await click(SELECTORS.submitButton); + }); + + test('selecting month first: current year enabled when current month selected', async function (assert) { + assert.expect(5); + await render(hbs` +
+ +
+ `); + // select current month + await click(SELECTORS.monthDropdown); + await click(SELECTORS.specificMonth('January')); + await click(SELECTORS.yearDropdown); + // all years should be selectable + for (const year of [2018, 2017, 2016, 2015, 2014]) { + assert.dom(SELECTORS.specificYear(year)).isNotDisabled(`year ${year} is selectable`); + } + }); + + test('selecting month first: it disables current year when future months selected', async function (assert) { + assert.expect(5); + await render(hbs` +
+ +
+ `); + + // select future month + await click(SELECTORS.monthDropdown); + await click(SELECTORS.specificMonth('June')); + await click(SELECTORS.yearDropdown); + + assert.dom(SELECTORS.specificYear(2018)).isDisabled(`current year is disabled`); + // previous years should be selectable + for (const year of [2017, 2016, 2015, 2014]) { + assert.dom(SELECTORS.specificYear(year)).isNotDisabled(`year ${year} is selectable`); + } + }); + + test('selecting year first: it disables future months when current year selected', async function (assert) { + assert.expect(12); + await render(hbs` +
+ +
+ `); + await click(SELECTORS.yearDropdown); + await click(SELECTORS.specificYear(2018)); + await click(SELECTORS.monthDropdown); + + const expectedSelectable = ['January', 'February', 'March', 'April']; + ARRAY_OF_MONTHS.forEach((month) => { + if (expectedSelectable.includes(month)) { + assert.dom(SELECTORS.specificMonth(month)).isNotDisabled(`${month} is selectable for current year`); + } else { + assert.dom(SELECTORS.specificMonth(month)).isDisabled(`${month} is disabled for current year`); + } + }); + }); + + test('selecting year first: it enables all months when past year is selected', async function (assert) { + assert.expect(12); + await render(hbs` +
+ +
+ `); + + await click(SELECTORS.yearDropdown); + await click(SELECTORS.specificYear(2017)); + await click(SELECTORS.monthDropdown); + + ARRAY_OF_MONTHS.forEach((month) => { + assert.dom(SELECTORS.specificMonth(month)).isNotDisabled(`${month} is selectable for previous year`); + }); + }); +}); diff --git a/ui/tests/integration/components/diff-version-selector-test.js b/ui/tests/integration/components/diff-version-selector-test.js new file mode 100644 index 0000000..3ecce59 --- /dev/null +++ b/ui/tests/integration/components/diff-version-selector-test.js @@ -0,0 +1,43 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import EmberObject from '@ember/object'; +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { render, click } from '@ember/test-helpers'; +import { hbs } from 'ember-cli-htmlbars'; + +const VERSIONS = [ + { + version: 2, + }, + { + version: 1, + }, +]; + +module('Integration | Component | diff-version-selector', function (hooks) { + setupRenderingTest(hooks); + + test('it renders', async function (assert) { + this.set( + 'model', + EmberObject.create({ + currentVersion: 2, + versions: VERSIONS, + }) + ); + await render(hbs``); + const leftSideVersion = document + .querySelector('[data-test-popup-menu-trigger="left-version"]') + .innerText.trim(); + assert.strictEqual(leftSideVersion, 'Version 2', 'left side toolbar defaults to currentVersion'); + + await click('[data-test-popup-menu-trigger="left-version"]'); + + assert.dom('[data-test-leftSide-version="1"]').exists('leftside shows both versions'); + assert.dom('[data-test-leftSide-version="2"]').exists('leftside shows both versions'); + }); +}); diff --git a/ui/tests/integration/components/download-button-test.js b/ui/tests/integration/components/download-button-test.js new file mode 100644 index 0000000..b31c6c3 --- /dev/null +++ b/ui/tests/integration/components/download-button-test.js @@ -0,0 +1,110 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { click, render, resetOnerror, setupOnerror } from '@ember/test-helpers'; +import { isPresent } from 'ember-cli-page-object'; +import hbs from 'htmlbars-inline-precompile'; +import sinon from 'sinon'; + +const SELECTORS = { + button: '[data-test-download-button]', + icon: '[data-test-icon="download"]', +}; +module('Integration | Component | download button', function (hooks) { + setupRenderingTest(hooks); + + hooks.beforeEach(function () { + const downloadService = this.owner.lookup('service:download'); + this.downloadSpy = sinon.stub(downloadService, 'miscExtension'); + + this.data = 'my data to download'; + this.filename = 'my special file'; + this.extension = 'csv'; + }); + + test('it renders', async function (assert) { + await render(hbs` + + + Download + + `); + assert.dom(SELECTORS.button).hasClass('button'); + assert.ok(isPresent(SELECTORS.icon), 'renders yielded icon'); + assert.dom(SELECTORS.button).hasTextContaining('Download', 'renders yielded text'); + }); + + test('it downloads with defaults when only passed @data arg', async function (assert) { + assert.expect(3); + + await render(hbs` + + Download + + `); + await click(SELECTORS.button); + const [filename, content, extension] = this.downloadSpy.getCall(0).args; + assert.ok(filename.includes('Z'), 'filename defaults to ISO string'); + assert.strictEqual(content, this.data, 'called with correct data'); + assert.strictEqual(extension, 'txt', 'called with default extension'); + }); + + test('it calls download service with passed in args', async function (assert) { + assert.expect(3); + + await render(hbs` + + Download + + `); + + await click(SELECTORS.button); + const [filename, content, extension] = this.downloadSpy.getCall(0).args; + assert.ok(filename.includes(`${this.filename}-`), 'filename added to ISO string'); + assert.strictEqual(content, this.data, 'called with correct data'); + assert.strictEqual(extension, this.extension, 'called with passed in extension'); + }); + + test('it sets download content with arg passed to fetchData', async function (assert) { + assert.expect(3); + this.fetchData = () => 'this is fetched data from a parent function'; + await render(hbs` + + Download + + `); + + await click(SELECTORS.button); + const [filename, content, extension] = this.downloadSpy.getCall(0).args; + assert.ok(filename.includes('Z'), 'filename defaults to ISO string'); + assert.strictEqual(content, this.fetchData(), 'called with fetched data'); + assert.strictEqual(extension, 'txt', 'called with default extension'); + }); + + test('it throws error when both data and fetchData are passed as args', async function (assert) { + assert.expect(1); + setupOnerror((error) => { + assert.strictEqual( + error.message, + 'Assertion Failed: Only pass either @data or @fetchData, passing both means @data will be overwritten by the return value of @fetchData', + 'throws error with incorrect args' + ); + }); + this.fetchData = () => 'this is fetched data from a parent function'; + await render(hbs` + + `); + resetOnerror(); + }); +}); diff --git a/ui/tests/integration/components/edit-form-kmip-role-test.js b/ui/tests/integration/components/edit-form-kmip-role-test.js new file mode 100644 index 0000000..f92ba4f --- /dev/null +++ b/ui/tests/integration/components/edit-form-kmip-role-test.js @@ -0,0 +1,238 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { later, run, _cancelTimers as cancelTimers } from '@ember/runloop'; +import { resolve } from 'rsvp'; +import EmberObject, { computed } from '@ember/object'; +import Service from '@ember/service'; +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { click, render, settled } from '@ember/test-helpers'; +import hbs from 'htmlbars-inline-precompile'; +import sinon from 'sinon'; +import { setupEngine } from 'ember-engines/test-support'; +import { COMPUTEDS } from 'vault/models/kmip/role'; + +const flash = Service.extend({ + success: sinon.stub(), +}); +const namespace = Service.extend({}); + +const fieldToCheckbox = (field) => ({ name: field, type: 'boolean' }); + +const createModel = (options) => { + const model = EmberObject.extend(COMPUTEDS, { + /* eslint-disable ember/avoid-leaking-state-in-ember-objects */ + newFields: [ + 'role', + 'operationActivate', + 'operationAddAttribute', + 'operationAll', + 'operationCreate', + 'operationDestroy', + 'operationDiscoverVersion', + 'operationGet', + 'operationGetAttributes', + 'operationLocate', + 'operationNone', + 'operationRekey', + 'operationRevoke', + 'tlsClientKeyBits', + 'tlsClientKeyType', + 'tlsClientTtl', + ], + fields: computed('operationFields', function () { + return this.operationFields.map(fieldToCheckbox); + }), + destroyRecord() { + return resolve(); + }, + save() { + return resolve(); + }, + rollbackAttributes() {}, + }); + return model.create({ + ...options, + }); +}; + +module('Integration | Component | edit form kmip role', function (hooks) { + setupRenderingTest(hooks); + setupEngine(hooks, 'kmip'); + + hooks.beforeEach(function () { + this.context = { owner: this.engine }; // this.engine set by setupEngine + run(() => { + this.engine.unregister('service:flash-messages'); + this.engine.register('service:flash-messages', flash); + this.engine.register('service:namespace', namespace); + }); + }); + + test('it renders: new model', async function (assert) { + assert.expect(3); + const model = createModel({ isNew: true }); + this.set('model', model); + this.onSave = ({ model }) => { + assert.false(model.operationNone, 'callback fires with operationNone as false'); + assert.true(model.operationAll, 'callback fires with operationAll as true'); + }; + await render(hbs``, this.context); + + assert.dom('[data-test-input="operationAll"]').isChecked('sets operationAll'); + await click('[data-test-edit-form-submit]'); + }); + + test('it renders: operationAll', async function (assert) { + assert.expect(3); + const model = createModel({ operationAll: true }); + this.set('model', model); + this.onSave = ({ model }) => { + assert.false(model.operationNone, 'callback fires with operationNone as false'); + assert.true(model.operationAll, 'callback fires with operationAll as true'); + }; + await render(hbs``, this.context); + assert.dom('[data-test-input="operationAll"]').isChecked('sets operationAll'); + await click('[data-test-edit-form-submit]'); + }); + + test('it renders: operationNone', async function (assert) { + assert.expect(2); + const model = createModel({ operationNone: true, operationAll: undefined }); + this.set('model', model); + + this.onSave = ({ model }) => { + assert.true(model.operationNone, 'callback fires with operationNone as true'); + }; + await render(hbs``, this.context); + assert.dom('[data-test-input="operationNone"]').isNotChecked('sets operationNone'); + await click('[data-test-edit-form-submit]'); + }); + + test('it renders: choose operations', async function (assert) { + assert.expect(3); + const model = createModel({ operationGet: true }); + this.set('model', model); + this.onSave = ({ model }) => { + assert.false(model.operationNone, 'callback fires with operationNone as false'); + }; + await render(hbs``, this.context); + + assert.dom('[data-test-input="operationNone"]').isChecked('sets operationNone'); + assert.dom('[data-test-input="operationAll"]').isNotChecked('sets operationAll'); + await click('[data-test-edit-form-submit]'); + }); + + test('it saves operationNone=true when unchecking operationAll box', async function (assert) { + assert.expect(15); + const model = createModel({ isNew: true }); + this.set('model', model); + this.onSave = ({ model }) => { + assert.true(model.operationNone, 'callback fires with operationNone as true'); + assert.false(model.operationAll, 'callback fires with operationAll as false'); + }; + + await render(hbs``, this.context); + await click('[data-test-input="operationAll"]'); + for (const field of model.fields) { + const { name } = field; + if (name === 'operationNone') continue; + assert.dom(`[data-test-input="${name}"]`).isNotChecked(`${name} is unchecked`); + } + + assert.dom('[data-test-input="operationAll"]').isNotChecked('sets operationAll'); + assert + .dom('[data-test-input="operationNone"]') + .isChecked('operationNone toggle is true which means allow operations'); + await click('[data-test-edit-form-submit]'); + }); + + const savingTests = [ + [ + 'setting operationAll', + { operationNone: true, operationGet: true }, + 'operationNone', + { + operationAll: true, + operationNone: false, + operationGet: true, + }, + { + operationGet: null, + operationNone: false, + }, + 5, + ], + [ + 'setting operationNone', + { operationAll: true, operationCreate: true }, + 'operationNone', + { + operationAll: false, + operationNone: true, + operationCreate: true, + }, + { + operationNone: true, + operationCreate: null, + operationAll: false, + }, + 6, + ], + + [ + 'setting choose, and selecting an additional item', + { operationAll: true, operationGet: true, operationCreate: true }, + 'operationAll,operationDestroy', + { + operationAll: false, + operationCreate: true, + operationGet: true, + }, + { + operationGet: true, + operationCreate: true, + operationDestroy: true, + operationAll: false, + }, + 7, + ], + ]; + for (const testCase of savingTests) { + const [name, initialState, displayClicks, stateBeforeSave, stateAfterSave, assertionCount] = testCase; + test(name, async function (assert) { + assert.expect(assertionCount); + const model = createModel(initialState); + this.set('model', model); + const clickTargets = displayClicks.split(','); + await render(hbs``, this.context); + + for (const clickTarget of clickTargets) { + await click(`label[for=${clickTarget}]`); + } + for (const beforeStateKey of Object.keys(stateBeforeSave)) { + assert.strictEqual( + model.get(beforeStateKey), + stateBeforeSave[beforeStateKey], + `sets ${beforeStateKey}` + ); + } + + click('[data-test-edit-form-submit]'); + + later(() => cancelTimers(), 50); + return settled().then(() => { + for (const afterStateKey of Object.keys(stateAfterSave)) { + assert.strictEqual( + model.get(afterStateKey), + stateAfterSave[afterStateKey], + `sets ${afterStateKey} on save` + ); + } + }); + }); + } +}); diff --git a/ui/tests/integration/components/edit-form-test.js b/ui/tests/integration/components/edit-form-test.js new file mode 100644 index 0000000..6e54781 --- /dev/null +++ b/ui/tests/integration/components/edit-form-test.js @@ -0,0 +1,79 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { later, run, _cancelTimers as cancelTimers } from '@ember/runloop'; +import { resolve } from 'rsvp'; +import EmberObject from '@ember/object'; +import Service from '@ember/service'; +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { render, settled } from '@ember/test-helpers'; +import hbs from 'htmlbars-inline-precompile'; +import sinon from 'sinon'; +import { create } from 'ember-cli-page-object'; +import editForm from 'vault/tests/pages/components/edit-form'; + +const component = create(editForm); + +const flash = Service.extend({ + success: sinon.stub(), +}); + +const createModel = (canDelete = true) => { + return EmberObject.create({ + fields: [ + { name: 'one', type: 'string' }, + { name: 'two', type: 'boolean' }, + ], + canDelete, + destroyRecord() { + return resolve(); + }, + save() { + return resolve(); + }, + rollbackAttributes() {}, + }); +}; + +module('Integration | Component | edit form', function (hooks) { + setupRenderingTest(hooks); + + hooks.beforeEach(function () { + run(() => { + this.owner.unregister('service:flash-messages'); + this.owner.register('service:flash-messages', flash); + }); + }); + + test('it renders', async function (assert) { + this.set('model', createModel()); + await render(hbs`{{edit-form model=this.model}}`); + + assert.ok(component.fields.length, 2); + }); + + test('it calls flash message fns on save', async function (assert) { + assert.expect(4); + const onSave = () => { + return resolve(); + }; + this.set('model', createModel()); + this.set('onSave', onSave); + const saveSpy = sinon.spy(this, 'onSave'); + + await render(hbs`{{edit-form model=this.model onSave=this.onSave}}`); + + component.submit(); + later(() => cancelTimers(), 50); + return settled().then(() => { + assert.ok(saveSpy.calledOnce, 'calls passed onSave'); + assert.strictEqual(saveSpy.getCall(0).args[0].saveType, 'save'); + assert.deepEqual(saveSpy.getCall(0).args[0].model, this.model, 'passes model to onSave'); + const flash = this.owner.lookup('service:flash-messages'); + assert.strictEqual(flash.success.callCount, 1, 'calls flash message success'); + }); + }); +}); diff --git a/ui/tests/integration/components/empty-state-test.js b/ui/tests/integration/components/empty-state-test.js new file mode 100644 index 0000000..e3cef83 --- /dev/null +++ b/ui/tests/integration/components/empty-state-test.js @@ -0,0 +1,38 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { render } from '@ember/test-helpers'; +import hbs from 'htmlbars-inline-precompile'; + +module('Integration | Component | empty-state', function (hooks) { + setupRenderingTest(hooks); + + test('it renders', async function (assert) { + // Set any properties with this.set('myProperty', 'value'); + // Handle any actions with this.set('myAction', function(val) { ... }); + + await render(hbs`{{empty-state}}`); + + assert.dom(this.element).hasText(''); + + // Template block usage: + await render(hbs` + {{#empty-state + title="Empty State Title" + message="This is the empty state message" + }} + Actions Link + {{/empty-state}} + `); + + assert.dom('.empty-state-title').hasText('Empty State Title', 'renders empty state title'); + assert + .dom('.empty-state-message') + .hasText('This is the empty state message', 'renders empty state message'); + assert.dom('.empty-state-actions').hasText('Actions Link', 'renders empty state actions'); + }); +}); diff --git a/ui/tests/integration/components/features-selection-test.js b/ui/tests/integration/components/features-selection-test.js new file mode 100644 index 0000000..8cc67cc --- /dev/null +++ b/ui/tests/integration/components/features-selection-test.js @@ -0,0 +1,58 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { render } from '@ember/test-helpers'; +import { create } from 'ember-cli-page-object'; +import featuresSelection from 'vault/tests/pages/components/wizard/features-selection'; +import hbs from 'htmlbars-inline-precompile'; +import Service from '@ember/service'; + +const component = create(featuresSelection); + +const permissionsService = Service.extend({ + hasPermission(path) { + // This enables the Secrets and Authentication wizard items and disables the others. + const allowedPaths = ['sys/mounts/example', 'sys/auth', 'sys/auth/foo', 'sys/wrapping/wrap']; + if (allowedPaths.includes(path)) { + return true; + } + return false; + }, +}); + +module('Integration | Component | features-selection', function (hooks) { + setupRenderingTest(hooks); + + hooks.beforeEach(function () { + this.owner.register('service:permissions', permissionsService); + }); + + test('it disables and enables wizard items according to user permissions', async function (assert) { + assert.expect(4); + const enabled = { Secrets: true, Authentication: true, Policies: false, Tools: false }; + await render(hbs``); + + component.wizardItems.forEach((i) => { + assert.strictEqual( + i.hasDisabledTooltip, + !enabled[i.text], + 'shows a tooltip only when the wizard item is not enabled' + ); + }); + }); + + test('it disables the start button if no wizard items are checked', async function (assert) { + await render(hbs``); + assert.true(component.hasDisabledStartButton); + }); + + test('it enables the start button when user has permission and wizard items are checked', async function (assert) { + await render(hbs``); + await component.selectSecrets(); + assert.false(component.hasDisabledStartButton); + }); +}); diff --git a/ui/tests/integration/components/form-error-test.js b/ui/tests/integration/components/form-error-test.js new file mode 100644 index 0000000..c847c3f --- /dev/null +++ b/ui/tests/integration/components/form-error-test.js @@ -0,0 +1,31 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { render } from '@ember/test-helpers'; +import hbs from 'htmlbars-inline-precompile'; + +module('Integration | Component | form-error', function (hooks) { + setupRenderingTest(hooks); + + test('it renders', async function (assert) { + // Set any properties with this.set('myProperty', 'value'); + // Handle any actions with this.set('myAction', function(val) { ... }); + + await render(hbs`{{form-error}}`); + + assert.dom(this.element).hasText(''); + + // Template block usage: + await render(hbs` + {{#form-error}} + template block text + {{/form-error}} + `); + + assert.dom(this.element).hasText('template block text'); + }); +}); diff --git a/ui/tests/integration/components/form-field-label-test.js b/ui/tests/integration/components/form-field-label-test.js new file mode 100644 index 0000000..3b45c16 --- /dev/null +++ b/ui/tests/integration/components/form-field-label-test.js @@ -0,0 +1,51 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { render } from '@ember/test-helpers'; +import { hbs } from 'ember-cli-htmlbars'; +import { click } from '@ember/test-helpers'; + +module('Integration | Component | form-field-label', function (hooks) { + setupRenderingTest(hooks); + + test('it renders', async function (assert) { + this.setProperties({ + label: 'Test Label', + helpText: null, + subText: null, + docLink: null, + }); + + await render(hbs` + + `); + + assert.dom('label').hasAttribute('for', 'some-input', 'Attributes passed to label element'); + assert.dom('label').hasText(this.label, 'Label text renders'); + assert.dom('[data-test-help-text]').doesNotExist('Help text hidden when not provided'); + assert.dom('.sub-text').doesNotExist('Sub text hidden when not provided'); + this.setProperties({ + helpText: 'More info', + subText: 'Some description', + }); + await click('[data-test-tool-tip-trigger]'); + assert.dom('[data-test-help-text]').hasText(this.helpText, 'Help text renders in tooltip'); + assert.dom('.sub-text').hasText(this.subText, 'Sub text renders'); + assert.dom('a').doesNotExist('docLink hidden when not provided'); + this.set('docLink', '/doc/path'); + assert.dom('.sub-text').includesText('See our documentation for help', 'Doc link text renders'); + assert + .dom('a') + .hasAttribute('href', 'https://developer.hashicorp.com' + this.docLink, 'Doc link renders'); + }); +}); diff --git a/ui/tests/integration/components/form-field-test.js b/ui/tests/integration/components/form-field-test.js new file mode 100644 index 0000000..6209bcc --- /dev/null +++ b/ui/tests/integration/components/form-field-test.js @@ -0,0 +1,230 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import EmberObject from '@ember/object'; +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { render, click, fillIn } from '@ember/test-helpers'; +import hbs from 'htmlbars-inline-precompile'; +import { create } from 'ember-cli-page-object'; +import sinon from 'sinon'; +import formFields from '../../pages/components/form-field'; + +const component = create(formFields); + +module('Integration | Component | form field', function (hooks) { + setupRenderingTest(hooks); + + const createAttr = (name, type, options) => { + return { + name, + type, + options, + }; + }; + + const setup = async function (attr) { + // ember sets model attrs from the defaultValue key, mimicking that behavior here + const model = EmberObject.create({ [attr.name]: attr.options?.defaultValue }); + const spy = sinon.spy(); + this.set('onChange', spy); + this.set('model', model); + this.set('attr', attr); + await render(hbs``); + return [model, spy]; + }; + + test('it renders', async function (assert) { + const model = EmberObject.create({}); + this.attr = { name: 'foo' }; + this.model = model; + await render(hbs``); + assert.strictEqual(component.fields.objectAt(0).labelText[0], 'Foo', 'renders a label'); + assert.notOk(component.hasInput, 'renders only the label'); + }); + + test('it renders: string', async function (assert) { + const [model, spy] = await setup.call(this, createAttr('foo', 'string', { defaultValue: 'default' })); + assert.strictEqual(component.fields.objectAt(0).labelText[0], 'Foo', 'renders a label'); + assert.strictEqual(component.fields.objectAt(0).inputValue, 'default', 'renders default value'); + assert.ok(component.hasInput, 'renders input for string'); + await component.fields.objectAt(0).input('bar').change(); + + assert.strictEqual(model.get('foo'), 'bar'); + assert.ok(spy.calledWith('foo', 'bar'), 'onChange called with correct args'); + }); + + test('it renders: boolean', async function (assert) { + const [model, spy] = await setup.call(this, createAttr('foo', 'boolean', { defaultValue: false })); + assert.strictEqual(component.fields.objectAt(0).labelText[0], 'Foo', 'renders a label'); + assert.notOk(component.fields.objectAt(0).inputChecked, 'renders default value'); + assert.ok(component.hasCheckbox, 'renders a checkbox for boolean'); + await component.fields.objectAt(0).clickLabel(); + + assert.true(model.get('foo')); + assert.ok(spy.calledWith('foo', true), 'onChange called with correct args'); + }); + + test('it renders: number', async function (assert) { + const [model, spy] = await setup.call(this, createAttr('foo', 'number', { defaultValue: 5 })); + assert.strictEqual(component.fields.objectAt(0).labelText[0], 'Foo', 'renders a label'); + assert.strictEqual(component.fields.objectAt(0).inputValue, '5', 'renders default value'); + assert.ok(component.hasInput, 'renders input for number'); + await component.fields.objectAt(0).input(8).change(); + + assert.strictEqual(model.get('foo'), '8'); + assert.ok(spy.calledWith('foo', '8'), 'onChange called with correct args'); + }); + + test('it renders: object', async function (assert) { + await setup.call(this, createAttr('foo', 'object')); + assert.strictEqual(component.fields.objectAt(0).labelText[0], 'Foo', 'renders a label'); + assert.ok(component.hasJSONEditor, 'renders the json editor'); + }); + + test('it renders: string as json with clear button', async function (assert) { + await setup.call(this, createAttr('foo', 'string', { editType: 'json', allowReset: true })); + assert.strictEqual(component.fields.objectAt(0).labelText[0], 'Foo', 'renders a label'); + assert.ok(component.hasJSONEditor, 'renders the json editor'); + assert.ok(component.hasJSONClearButton, 'renders button that will clear the JSON value'); + }); + + test('it renders: editType textarea', async function (assert) { + const [model, spy] = await setup.call( + this, + createAttr('foo', 'string', { defaultValue: 'goodbye', editType: 'textarea' }) + ); + assert.strictEqual(component.fields.objectAt(0).labelText[0], 'Foo', 'renders a label'); + assert.ok(component.hasTextarea, 'renders a textarea'); + assert.strictEqual(component.fields.objectAt(0).textareaValue, 'goodbye', 'renders default value'); + await component.fields.objectAt(0).textarea('hello'); + + assert.strictEqual(model.get('foo'), 'hello'); + assert.ok(spy.calledWith('foo', 'hello'), 'onChange called with correct args'); + }); + + test('it renders: editType file', async function (assert) { + await setup.call(this, createAttr('foo', 'string', { editType: 'file' })); + assert.ok(component.hasTextFile, 'renders the text-file component'); + await click('[data-test-text-toggle]'); + await fillIn('[data-test-text-file-textarea]', 'hello world'); + assert.dom('[data-test-text-file-textarea]').hasClass('masked-font'); + await click('[data-test-button="toggle-masked"]'); + assert.dom('[data-test-text-file-textarea]').doesNotHaveClass('masked-font'); + }); + + test('it renders: editType ttl', async function (assert) { + const [model, spy] = await setup.call(this, createAttr('foo', null, { editType: 'ttl' })); + assert.ok(component.hasTTLPicker, 'renders the ttl-picker component'); + await component.fields.objectAt(0).toggleTtl(); + await component.fields.objectAt(0).select('h').change(); + await component.fields.objectAt(0).ttlTime('3'); + const expectedSeconds = `${3 * 3600}s`; + assert.strictEqual(model.get('foo'), expectedSeconds); + assert.ok(spy.calledWith('foo', expectedSeconds), 'onChange called with correct args'); + }); + + test('it renders: editType ttl without toggle', async function (assert) { + const [model, spy] = await setup.call( + this, + createAttr('foo', null, { editType: 'ttl', hideToggle: true }) + ); + await component.fields.objectAt(0).select('h').change(); + await component.fields.objectAt(0).ttlTime('3'); + const expectedSeconds = `${3 * 3600}s`; + assert.strictEqual(model.get('foo'), expectedSeconds); + assert.ok(spy.calledWith('foo', expectedSeconds), 'onChange called with correct args'); + }); + + test('it renders: radio buttons for possible values', async function (assert) { + const [model, spy] = await setup.call( + this, + createAttr('foo', null, { editType: 'radio', possibleValues: ['SHA1', 'SHA256'] }) + ); + assert.ok(component.hasRadio, 'renders radio buttons'); + const selectedValue = 'SHA256'; + await component.selectRadioInput(selectedValue); + assert.strictEqual(model.get('foo'), selectedValue); + assert.ok(spy.calledWith('foo', selectedValue), 'onChange called with correct args'); + }); + + test('it renders: editType stringArray', async function (assert) { + const [model, spy] = await setup.call(this, createAttr('foo', 'string', { editType: 'stringArray' })); + assert.ok(component.hasStringList, 'renders the string-list component'); + + await component.fields.objectAt(0).textarea('array').change(); + assert.deepEqual(model.get('foo'), ['array'], 'sets the value on the model'); + assert.deepEqual(spy.args[0], ['foo', ['array']], 'onChange called with correct args'); + }); + + test('it renders: sensitive', async function (assert) { + const [model, spy] = await setup.call(this, createAttr('password', 'string', { sensitive: true })); + assert.ok(component.hasMaskedInput, 'renders the masked-input component'); + await component.fields.objectAt(0).textarea('secret'); + assert.strictEqual(model.get('password'), 'secret'); + assert.ok(spy.calledWith('password', 'secret'), 'onChange called with correct args'); + }); + + test('it uses a passed label', async function (assert) { + await setup.call(this, createAttr('foo', 'string', { label: 'Not Foo' })); + assert.strictEqual( + component.fields.objectAt(0).labelText[0], + 'Not Foo', + 'renders the label from options' + ); + }); + + test('it renders a help tooltip', async function (assert) { + await setup.call(this, createAttr('foo', 'string', { helpText: 'Here is some help text' })); + await component.tooltipTrigger(); + assert.ok(component.hasTooltip, 'renders the tooltip component'); + }); + + test('it should not expand and toggle ttl when default 0s value is present', async function (assert) { + assert.expect(2); + + this.setProperties({ + model: EmberObject.create({ foo: '0s' }), + attr: createAttr('foo', null, { editType: 'ttl' }), + onChange: () => {}, + }); + + await render(hbs``); + assert + .dom('[data-test-toggle-input="Foo"]') + .isNotChecked('Toggle is initially unchecked when given default value'); + assert.dom('[data-test-ttl-picker-group="Foo"]').doesNotExist('Ttl input is hidden'); + }); + + test('it should toggle and expand ttl when initial non default value is provided', async function (assert) { + assert.expect(2); + + this.setProperties({ + model: EmberObject.create({ foo: '1s' }), + attr: createAttr('foo', null, { editType: 'ttl' }), + onChange: () => {}, + }); + + await render(hbs``); + assert.dom('[data-test-toggle-input="Foo"]').isChecked('Toggle is initially checked when given value'); + assert.dom('[data-test-ttl-value="Foo"]').hasValue('1', 'Ttl input displays with correct value'); + }); + + test('it should show validation warning', async function (assert) { + const model = this.owner.lookup('service:store').createRecord('auth-method'); + model.path = 'foo bar'; + this.validations = model.validate().state; + this.setProperties({ + model, + attr: createAttr('path', 'string'), + onChange: () => {}, + }); + + await render( + hbs`` + ); + assert.dom('[data-test-validation-warning]').exists('Validation warning renders'); + }); +}); diff --git a/ui/tests/integration/components/get-credentials-card-test.js b/ui/tests/integration/components/get-credentials-card-test.js new file mode 100644 index 0000000..10eb11a --- /dev/null +++ b/ui/tests/integration/components/get-credentials-card-test.js @@ -0,0 +1,141 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import Service from '@ember/service'; +import { click, find, render, typeIn } from '@ember/test-helpers'; +import { selectChoose, clickTrigger } from 'ember-power-select/test-support/helpers'; +import hbs from 'htmlbars-inline-precompile'; +import sinon from 'sinon'; + +const TITLE = 'Get Credentials'; +const SEARCH_LABEL = 'Role to use'; + +const storeService = Service.extend({ + query(modelType) { + return new Promise((resolve, reject) => { + switch (modelType) { + case 'database/role': + resolve([{ id: 'my-role', backend: 'database' }]); + break; + default: + reject({ httpStatus: 404, message: 'not found' }); + break; + } + reject({ httpStatus: 404, message: 'not found' }); + }); + }, +}); + +module('Integration | Component | get-credentials-card', function (hooks) { + setupRenderingTest(hooks); + + hooks.beforeEach(function () { + this.router = this.owner.lookup('service:router'); + this.router.transitionTo = sinon.stub(); + + this.owner.unregister('service:store'); + this.owner.register('service:store', storeService); + this.set('title', TITLE); + this.set('searchLabel', SEARCH_LABEL); + }); + + hooks.afterEach(function () { + this.router.transitionTo.reset(); + }); + + test('it shows a disabled button when no item is selected', async function (assert) { + await render(hbs``); + assert.dom('[data-test-get-credentials]').isDisabled(); + assert.dom('[data-test-get-credentials]').hasText('Get credentials', 'Button has default text'); + }); + + test('it shows button that can be clicked to credentials route when an item is selected', async function (assert) { + const models = ['database/role']; + this.set('models', models); + await render( + hbs`` + ); + assert + .dom('[data-test-component="search-select"]#search-input-role') + .exists('renders search select component by default'); + assert + .dom('[data-test-component="search-select"]#search-input-role') + .hasText('Search for a role...', 'renders placeholder text passed to search select'); + await clickTrigger(); + await selectChoose('', 'my-role'); + assert.dom('[data-test-get-credentials]').isEnabled(); + await click('[data-test-get-credentials]'); + assert.propEqual( + this.router.transitionTo.lastCall.args, + ['vault.cluster.secrets.backend.credentials', 'my-role'], + 'transitionTo is called with correct route and role name' + ); + }); + + test('it renders input search field when renderInputSearch=true and shows placeholder text', async function (assert) { + await render( + hbs`` + ); + assert + .dom('[data-test-component="search-select"]') + .doesNotExist('does not render search select component'); + assert.strictEqual( + find('[data-test-search-roles] input').placeholder, + 'secret/', + 'renders placeholder text passed to search input' + ); + await typeIn('[data-test-search-roles] input', 'test'); + assert.dom('[data-test-get-credentials]').isEnabled('submit button enables after typing input text'); + assert.dom('[data-test-get-credentials]').hasText('View secret', 'Button has view secret CTA'); + await click('[data-test-get-credentials]'); + assert.propEqual( + this.router.transitionTo.lastCall.args, + ['vault.cluster.secrets.backend.show', 'test'], + 'transitionTo is called with correct route and secret name' + ); + }); + + test('it prefills input if initialValue has value', async function (assert) { + await render( + hbs`` + ); + assert + .dom('[data-test-component="search-select"]') + .doesNotExist('does not render search select component'); + assert.dom('[data-test-search-roles] input').hasValue('hello/', 'pre-fills search input'); + assert.dom('[data-test-get-credentials]').isEnabled('submit button is enabled at render'); + assert.dom('[data-test-get-credentials]').hasText('View list', 'Button has list CTA'); + await typeIn('[data-test-search-roles] input', 'test'); + assert + .dom('[data-test-get-credentials]') + .hasText('View secret', 'Button has view secret CTA after input'); + await click('[data-test-get-credentials]'); + assert.propEqual( + this.router.transitionTo.lastCall.args, + ['vault.cluster.secrets.backend.show', 'hello/test'], + 'transitionTo is called with correct route and secret name' + ); + }); + + test('it goes to list route if input ends in / and type=secret', async function (assert) { + await render( + hbs`` + ); + assert + .dom('[data-test-component="search-select"]') + .doesNotExist('does not render search select component'); + await typeIn('[data-test-search-roles] input', 'test/'); + assert.dom('[data-test-get-credentials]').hasText('View list', 'submit button has list CTA'); + assert.dom('[data-test-get-credentials]').isEnabled('submit button is enabled at render'); + await click('[data-test-get-credentials]'); + assert.propEqual( + this.router.transitionTo.lastCall.args, + ['vault.cluster.secrets.backend.list', 'test/'], + 'transitionTo is called with correct route and secret name' + ); + }); +}); diff --git a/ui/tests/integration/components/hover-copy-button-test.js b/ui/tests/integration/components/hover-copy-button-test.js new file mode 100644 index 0000000..568fd81 --- /dev/null +++ b/ui/tests/integration/components/hover-copy-button-test.js @@ -0,0 +1,43 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { render, settled } from '@ember/test-helpers'; +import { create } from 'ember-cli-page-object'; +import hbs from 'htmlbars-inline-precompile'; +import copyButton from 'vault/tests/pages/components/hover-copy-button'; +const component = create(copyButton); + +module('Integration | Component | hover copy button', function (hooks) { + setupRenderingTest(hooks); + + // ember-cli-clipboard helpers don't like the new style + test('it shows success message in tooltip', async function (assert) { + await render(hbs` +
+ +
+ `); + await component.focusContainer(); + await settled(); + assert.ok(component.buttonIsVisible); + await component.mouseEnter(); + await settled(); + assert.strictEqual(component.tooltipText, 'Copy', 'shows copy'); + }); + + test('it has the correct class when alwaysShow is true', async function (assert) { + await render(hbs` + + `); + await render(hbs`{{hover-copy-button alwaysShow=true copyValue=this.copyValue}}`); + assert.ok(component.buttonIsVisible); + assert.ok(component.wrapperClass.includes('hover-copy-button-static')); + }); +}); diff --git a/ui/tests/integration/components/icon-test.js b/ui/tests/integration/components/icon-test.js new file mode 100644 index 0000000..ccb58a1 --- /dev/null +++ b/ui/tests/integration/components/icon-test.js @@ -0,0 +1,61 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { render } from '@ember/test-helpers'; +import hbs from 'htmlbars-inline-precompile'; +import waitForError from 'vault/tests/helpers/wait-for-error'; + +module('Integration | Component | icon', function (hooks) { + setupRenderingTest(hooks); + + test('it renders', async function (assert) { + await render(hbs``); + assert.dom('.i-con').exists('renders'); + + await render(hbs``); + assert.dom('.vault-logo').exists('inlines the SVG'); + assert.dom('.hs-icon').hasClass('hs-icon-l', 'Default hs class applied'); + + await render(hbs`